From 58c9659903e12c0c9da101ea96ab20c9e50b8af9 Mon Sep 17 00:00:00 2001
From: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
Date: Fri, 20 Dec 2019 09:53:10 +0100
Subject: [PATCH] WDC patch

Signed-off-by: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
---
 Build2.sh                                     |    21 +
 .../bindings/arm/armada-370-xp-mpic.txt       |    11 +-
 .../bindings/arm/armada-370-xp-pmsu.txt       |    13 +-
 .../bindings/arm/armada-375-usb-cluster.txt   |    17 +
 .../devicetree/bindings/arm/armada-375.txt    |     9 +
 .../bindings/arm/coherency-fabric.txt         |    36 +-
 .../bindings/arm/mvebu-system-controller.txt  |     3 +-
 .../devicetree/bindings/bus/mvebu-mbus.txt    |   281 +
 .../bindings/clock/mvebu-core-clock.txt       |    14 +
 .../bindings/clock/mvebu-gated-clock.txt      |    71 +-
 .../devicetree/bindings/crypto/mvebu_cesa.txt |   140 +
 .../devicetree/bindings/i2c/i2c-mv64xxx.txt   |    25 +-
 .../interrupt-controller/interrupts.txt       |    29 +-
 .../memory-controllers/mvebu-devbus.txt       |   156 +
 .../mvebu-sdram-controller.txt                |    21 +
 .../devicetree/bindings/mmc/sdhci-pxa.txt     |    17 +-
 .../devicetree/bindings/mtd/mvebu_nfc.txt     |    59 +
 .../devicetree/bindings/mtd/pxa3xx-nand.txt   |     2 +
 .../devicetree/bindings/net/fixed-link.txt    |    26 +
 .../bindings/net/marvell-armada-370-neta.txt  |    24 +-
 .../devicetree/bindings/net/marvell-pp2.txt   |    67 +
 .../devicetree/bindings/pci/mvebu-pci.txt     |   296 +
 .../pinctrl/marvell,armada-375-pinctrl.txt    |    81 +
 .../reset/marvell,armada-cpu-reset.txt        |    28 +
 .../bindings/thermal/armada-thermal.txt       |     4 +-
 .../timer/marvell,armada-370-xp-timer.txt     |    45 +-
 .../devicetree/bindings/usb/usb-xhci.txt      |    14 +
 arch/arm/Kconfig                              |     2 +-
 arch/arm/Kconfig.debug                        |    30 +-
 arch/arm/Makefile                             |     1 +
 arch/arm/boot/compressed/.gitignore           |     1 +
 arch/arm/boot/compressed/Makefile             |     2 +-
 arch/arm/boot/compressed/head.S               |    16 +-
 arch/arm/boot/compressed/misc.c               |    74 +
 arch/arm/boot/compressed/vmlinux.lds.in       |    14 +
 arch/arm/boot/dts/Makefile                    |     4 +
 .../arm/boot/dts/YY_default/armada-385-db.dts |   157 +
 .../arm/boot/dts/YY_default/armada-385-rd.dts |   127 +
 .../arm/boot/dts/YY_default/armada-388-rd.dts |   127 +
 arch/arm/boot/dts/YY_default/armada-38x.dtsi  |   515 +
 .../dts/YY_default_T30p5/armada-385-db.dts    |   153 +
 .../dts/YY_default_T30p5/armada-385-rd.dts    |   127 +
 .../dts/YY_default_T30p5/armada-388-rd.dts    |   131 +
 .../boot/dts/YY_default_T30p5/armada-38x.dtsi |   538 +
 .../boot/dts/Yellowstone/armada-385-db.dts    |   187 +
 .../boot/dts/Yellowstone/armada-385-rd.dts    |   127 +
 .../boot/dts/Yellowstone/armada-388-rd.dts    |   131 +
 arch/arm/boot/dts/Yellowstone/armada-38x.dtsi |   538 +
 arch/arm/boot/dts/Yosemite/armada-385-db.dts  |   184 +
 arch/arm/boot/dts/Yosemite/armada-385-rd.dts  |   128 +
 arch/arm/boot/dts/Yosemite/armada-388-rd.dts  |   132 +
 arch/arm/boot/dts/Yosemite/armada-38x.dtsi    |   539 +
 arch/arm/boot/dts/armada-370-db.dts           |    67 +-
 arch/arm/boot/dts/armada-370-mirabox.dts      |    50 +-
 arch/arm/boot/dts/armada-370-rd.dts           |    79 +-
 arch/arm/boot/dts/armada-370-xp.dtsi          |   165 +-
 arch/arm/boot/dts/armada-370.dtsi             |   168 +-
 arch/arm/boot/dts/armada-375-db.dts           |   211 +
 arch/arm/boot/dts/armada-375.dtsi             |   660 +
 arch/arm/boot/dts/armada-380.dtsi             |   118 +
 arch/arm/boot/dts/armada-382-customer2.dts    |   152 +
 arch/arm/boot/dts/armada-382-db.dts           |   152 +
 arch/arm/boot/dts/armada-385-388.dtsi         |   151 +
 arch/arm/boot/dts/armada-385-customer1.dts    |   164 +
 arch/arm/boot/dts/armada-385-db-ap.dts        |   164 +
 arch/arm/boot/dts/armada-388-customer0.dts    |   133 +
 arch/arm/boot/dts/armada-388-db-gp.dts        |   133 +
 arch/arm/boot/dts/armada-38x-modular.dts      |   164 +
 arch/arm/boot/dts/armada-xp-db.dts            |   131 +-
 arch/arm/boot/dts/armada-xp-gp.dts            |   165 +-
 arch/arm/boot/dts/armada-xp-mv78230.dtsi      |   231 +-
 arch/arm/boot/dts/armada-xp-mv78260.dtsi      |   392 +-
 arch/arm/boot/dts/armada-xp-mv78460.dtsi      |   432 +-
 .../boot/dts/armada-xp-openblocks-ax3-4.dts   |    85 +-
 arch/arm/boot/dts/armada-xp.dtsi              |   105 +-
 arch/arm/boot/dts/kirkwood-6281.dtsi          |    35 +
 arch/arm/boot/dts/kirkwood-6282.dtsi          |    55 +
 arch/arm/boot/dts/kirkwood-cloudbox.dts       |     4 +-
 arch/arm/boot/dts/kirkwood-dns320.dts         |     2 +-
 arch/arm/boot/dts/kirkwood-dns325.dts         |     2 +-
 arch/arm/boot/dts/kirkwood-dnskw.dtsi         |     4 +-
 arch/arm/boot/dts/kirkwood-dockstar.dts       |     4 +-
 arch/arm/boot/dts/kirkwood-dreamplug.dts      |     5 +-
 arch/arm/boot/dts/kirkwood-goflexnet.dts      |     4 +-
 .../dts/kirkwood-guruplug-server-plus.dts     |     6 +-
 arch/arm/boot/dts/kirkwood-ib62x0.dts         |     4 +-
 arch/arm/boot/dts/kirkwood-iconnect.dts       |    15 +-
 arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts |     4 +-
 arch/arm/boot/dts/kirkwood-is2.dts            |     2 +-
 arch/arm/boot/dts/kirkwood-km_kirkwood.dts    |     4 +-
 arch/arm/boot/dts/kirkwood-lschlv2.dts        |     2 +-
 arch/arm/boot/dts/kirkwood-lsxhl.dts          |     2 +-
 arch/arm/boot/dts/kirkwood-lsxl.dtsi          |     4 +-
 arch/arm/boot/dts/kirkwood-mplcec4.dts        |    17 +-
 .../dts/kirkwood-netgear_readynas_duo_v2.dts  |    15 +-
 arch/arm/boot/dts/kirkwood-ns2-common.dtsi    |     4 +-
 arch/arm/boot/dts/kirkwood-ns2.dts            |     2 +-
 arch/arm/boot/dts/kirkwood-ns2lite.dts        |     2 +-
 arch/arm/boot/dts/kirkwood-ns2max.dts         |     2 +-
 arch/arm/boot/dts/kirkwood-ns2mini.dts        |     2 +-
 arch/arm/boot/dts/kirkwood-nsa310.dts         |    43 +-
 arch/arm/boot/dts/kirkwood-openblocks_a6.dts  |     4 +-
 arch/arm/boot/dts/kirkwood-topkick.dts        |     5 +-
 arch/arm/boot/dts/kirkwood-ts219-6281.dts     |    13 +-
 arch/arm/boot/dts/kirkwood-ts219-6282.dts     |    24 +-
 arch/arm/boot/dts/kirkwood-ts219.dtsi         |    10 +
 arch/arm/boot/dts/kirkwood.dtsi               |    20 +
 .../boot/dts/testcases/tests-interrupts.dtsi  |    41 +
 arch/arm/boot/dts/testcases/tests.dtsi        |     1 +
 arch/arm/common/mcpm_head.S                   |     2 +
 arch/arm/configs/mvebu_defconfig              |    12 +-
 arch/arm/configs/mvebu_lsp_defconfig          |   175 +
 arch/arm/include/asm/assembler.h              |     7 +
 arch/arm/include/asm/atomic.h                 |    26 +-
 arch/arm/include/asm/bug.h                    |    10 +-
 arch/arm/include/asm/cacheflush.h             |    46 +
 arch/arm/include/asm/dma-mapping.h            |     4 +
 arch/arm/include/asm/elf.h                    |     5 +
 arch/arm/include/asm/fixmap.h                 |     5 +
 arch/arm/include/asm/hardware/cache-l2x0.h    |     1 +
 arch/arm/include/asm/hardware/coresight.h     |     8 +-
 arch/arm/include/asm/io.h                     |     6 +
 arch/arm/include/asm/kgdb.h                   |     3 +-
 arch/arm/include/asm/mach/arch.h              |     5 +
 arch/arm/include/asm/mach/pci.h               |     4 +
 arch/arm/include/asm/memory.h                 |     4 +
 arch/arm/include/asm/mmu.h                    |     2 +-
 arch/arm/include/asm/page.h                   |    18 +
 arch/arm/include/asm/pgtable-2level-hwdef.h   |     8 +
 arch/arm/include/asm/pgtable-2level.h         |    14 +-
 arch/arm/include/asm/pgtable.h                |     6 +-
 arch/arm/include/asm/shmparam.h               |     4 +
 arch/arm/include/asm/smp_scu.h                |     2 +-
 arch/arm/include/asm/tlbflush.h               |    21 +
 arch/arm/include/debug/mvebu.S                |     5 +
 arch/arm/kernel/Makefile                      |     3 +-
 arch/arm/kernel/bios32.c                      |    16 +
 arch/arm/kernel/entry-armv.S                  |     5 +-
 arch/arm/kernel/entry-common.S                |    29 +-
 arch/arm/kernel/head.S                        |    12 +
 arch/arm/kernel/module.c                      |    57 +-
 arch/arm/kernel/perf_event.c                  |    14 +-
 arch/arm/kernel/perf_event_cpu.c              |    96 +-
 arch/arm/kernel/signal.c                      |    24 +-
 arch/arm/kernel/sigreturn_codes.S             |    80 +
 arch/arm/kernel/sleep.S                       |     2 +
 arch/arm/kernel/smp.c                         |    10 +
 arch/arm/kernel/smp_scu.c                     |    16 +-
 arch/arm/kernel/smp_twd.c                     |    24 +-
 arch/arm/kernel/traps.c                       |    24 +-
 arch/arm/lib/copy_page.S                      |     4 +
 arch/arm/mach-dove/common.c                   |    68 +-
 arch/arm/mach-integrator/pci_v3.c             |   278 +
 arch/arm/mach-ixp4xx/Kconfig                  |     4 -
 arch/arm/mach-kirkwood/board-dt.c             |     1 +
 arch/arm/mach-kirkwood/common.c               |    50 +-
 arch/arm/mach-kirkwood/pcie.c                 |    30 +
 arch/arm/mach-mv78xx0/pcie.c                  |    23 +-
 arch/arm/mach-mvebu/Kconfig                   |    56 +
 arch/arm/mach-mvebu/Makefile                  |    27 +-
 arch/arm/mach-mvebu/armada-370-xp.c           |    94 +-
 arch/arm/mach-mvebu/armada-370-xp.h           |    14 +-
 arch/arm/mach-mvebu/armada-375.c              |   143 +
 arch/arm/mach-mvebu/armada-375.h              |    23 +
 arch/arm/mach-mvebu/armada-380.h              |    23 +
 arch/arm/mach-mvebu/armada-38x.c              |   199 +
 arch/arm/mach-mvebu/coherency.c               |   170 +-
 arch/arm/mach-mvebu/coherency.h               |    11 +-
 arch/arm/mach-mvebu/coherency_ll.S            |   142 +-
 arch/arm/mach-mvebu/common.c                  |    46 +
 arch/arm/mach-mvebu/common.h                  |    14 +-
 arch/arm/mach-mvebu/cpu-reset.c               |    82 +
 arch/arm/mach-mvebu/dump_mv_regs.c            |   177 +
 arch/arm/mach-mvebu/headsmp-375.S             |    23 +
 arch/arm/mach-mvebu/headsmp-380.S             |    23 +
 arch/arm/mach-mvebu/headsmp.S                 |    16 +-
 arch/arm/mach-mvebu/include/mach/.gitignore   |     5 +
 arch/arm/mach-mvebu/include/mach/mvCommon.h   |   404 +
 arch/arm/mach-mvebu/include/mach/mvDebug.h    |   173 +
 arch/arm/mach-mvebu/include/mach/mvTypes.h    |   270 +
 arch/arm/mach-mvebu/linux_oss/mvOs.c          |   286 +
 arch/arm/mach-mvebu/linux_oss/mvOs.h          |   482 +
 arch/arm/mach-mvebu/mvebu-soc-id.c            |   144 +
 arch/arm/mach-mvebu/mvebu-soc-id.h            |    61 +
 arch/arm/mach-mvebu/platsmp-375.c             |    84 +
 arch/arm/mach-mvebu/platsmp-380.c             |   122 +
 arch/arm/mach-mvebu/platsmp.c                 |    40 +-
 arch/arm/mach-mvebu/pm-board.c                |   161 +
 arch/arm/mach-mvebu/pm.c                      |   231 +
 arch/arm/mach-mvebu/pmsu.c                    |   519 +-
 arch/arm/mach-mvebu/pmsu.h                    |     6 +
 arch/arm/mach-mvebu/pmsu_ll.S                 |   233 +
 arch/arm/mach-mvebu/serdes.c                  |   165 +
 arch/arm/mach-mvebu/soft-poweroff.c           |   115 +
 arch/arm/mach-mvebu/system-controller.c       |    34 +-
 arch/arm/mach-mvebu/usb-cluster.c             |    96 +
 arch/arm/mach-mvebu/usb-utmi.c                |    64 +
 arch/arm/mach-orion5x/common.c                |    36 +-
 arch/arm/mach-orion5x/common.h                |    17 +
 arch/arm/mach-orion5x/d2net-setup.c           |     6 +-
 arch/arm/mach-orion5x/db88f5281-setup.c       |    24 +-
 arch/arm/mach-orion5x/dns323-setup.c          |     6 +-
 arch/arm/mach-orion5x/edmini_v2-setup.c       |     6 +-
 arch/arm/mach-orion5x/kurobox_pro-setup.c     |    12 +-
 arch/arm/mach-orion5x/ls-chl-setup.c          |     6 +-
 arch/arm/mach-orion5x/ls_hgl-setup.c          |     6 +-
 arch/arm/mach-orion5x/lsmini-setup.c          |     6 +-
 arch/arm/mach-orion5x/mss2-setup.c            |     6 +-
 arch/arm/mach-orion5x/mv2120-setup.c          |     6 +-
 arch/arm/mach-orion5x/net2big-setup.c         |     6 +-
 arch/arm/mach-orion5x/pci.c                   |     9 +-
 arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c  |     6 +-
 arch/arm/mach-orion5x/rd88f5181l-ge-setup.c   |     6 +-
 arch/arm/mach-orion5x/rd88f5182-setup.c       |    13 +-
 .../arm/mach-orion5x/terastation_pro2-setup.c |     6 +-
 arch/arm/mach-orion5x/ts209-setup.c           |     6 +-
 arch/arm/mach-orion5x/ts409-setup.c           |     6 +-
 arch/arm/mach-orion5x/wnr854t-setup.c         |     6 +-
 arch/arm/mach-orion5x/wrt350n-v2-setup.c      |     6 +-
 arch/arm/mm/Kconfig                           |   107 +
 arch/arm/mm/abort-ev6.S                       |     5 +-
 arch/arm/mm/alignment.c                       |     9 +-
 arch/arm/mm/cache-l2x0.c                      |    26 +-
 arch/arm/mm/dma-mapping.c                     |     8 +-
 arch/arm/mm/fault.c                           |    19 +-
 arch/arm/mm/ioremap.c                         |     9 +-
 arch/arm/mm/mm.h                              |    26 +-
 arch/arm/mm/mmu.c                             |    60 +-
 arch/arm/mm/pgd.c                             |     2 +
 arch/arm/mm/proc-v6.S                         |     4 +-
 arch/arm/mm/proc-v7-2level.S                  |   109 +-
 arch/arm/mm/proc-v7.S                         |    14 +-
 arch/arm/mm/tlb-v7.S                          |     8 +
 arch/ia64/Kconfig                             |     1 -
 arch/microblaze/pci/pci-common.c              |    11 +-
 arch/mips/Kconfig                             |     2 -
 arch/mips/include/asm/pci.h                   |     5 -
 arch/mips/pci/fixup-lantiq.c                  |     7 +-
 arch/powerpc/Kconfig                          |     1 -
 arch/powerpc/include/asm/pci.h                |     5 -
 arch/powerpc/kernel/pci-common.c              |    11 +-
 .../powerpc/platforms/cell/celleb_scc_pciex.c |     7 +-
 arch/powerpc/platforms/cell/celleb_scc_sio.c  |     7 +-
 arch/powerpc/platforms/cell/spider-pic.c      |     9 +-
 arch/powerpc/platforms/cell/spu_manage.c      |    13 +-
 arch/powerpc/platforms/fsl_uli1575.c          |    12 +-
 arch/powerpc/platforms/powermac/pic.c         |    10 +-
 .../powerpc/platforms/pseries/event_sources.c |     8 +-
 arch/powerpc/sysdev/mpic_msi.c                |     8 +-
 arch/s390/Kconfig                             |     1 -
 arch/s390/include/asm/pci.h                   |     4 -
 arch/sparc/Kconfig                            |     1 -
 arch/tile/Kconfig                             |     1 -
 arch/x86/Kconfig                              |     1 -
 arch/x86/include/asm/pci.h                    |    30 -
 arch/x86/kernel/devicetree.c                  |     7 +-
 arch/x86/kernel/x86_init.c                    |    24 +
 block/partitions/mac.c                        |     4 +-
 crypto/Kconfig                                |     1 +
 crypto/Makefile                               |     2 +
 crypto/ocf/ChangeLog                          |  1957 ++
 crypto/ocf/Config.in                          |    38 +
 crypto/ocf/Kconfig                            |   135 +
 crypto/ocf/Makefile                           |   149 +
 crypto/ocf/c7108/Makefile                     |    11 +
 crypto/ocf/c7108/aes-7108.c                   |   841 +
 crypto/ocf/c7108/aes-7108.h                   |   134 +
 crypto/ocf/criov.c                            |   215 +
 crypto/ocf/crypto.c                           |  1871 ++
 crypto/ocf/cryptocteon/Makefile               |    16 +
 crypto/ocf/cryptocteon/README.txt             |    10 +
 crypto/ocf/cryptocteon/cavium_crypto.c        |  2283 ++
 crypto/ocf/cryptocteon/cryptocteon.c          |   576 +
 crypto/ocf/cryptodev.c                        |  1109 +
 crypto/ocf/cryptodev.h                        |   485 +
 crypto/ocf/cryptosoft.c                       |  1322 +
 crypto/ocf/ep80579/Makefile                   |   119 +
 crypto/ocf/ep80579/environment.mk             |    77 +
 crypto/ocf/ep80579/icp_asym.c                 |  1334 +
 crypto/ocf/ep80579/icp_common.c               |   773 +
 crypto/ocf/ep80579/icp_ocf.h                  |   376 +
 crypto/ocf/ep80579/icp_sym.c                  |  1153 +
 crypto/ocf/ep80579/linux_2.6_kernel_space.mk  |    68 +
 crypto/ocf/hifn/Makefile                      |    12 +
 crypto/ocf/hifn/hifn7751.c                    |  2954 ++
 crypto/ocf/hifn/hifn7751reg.h                 |   540 +
 crypto/ocf/hifn/hifn7751var.h                 |   368 +
 crypto/ocf/hifn/hifnHIPP.c                    |   420 +
 crypto/ocf/hifn/hifnHIPPreg.h                 |    46 +
 crypto/ocf/hifn/hifnHIPPvar.h                 |    93 +
 crypto/ocf/ixp4xx/Makefile                    |   103 +
 crypto/ocf/ixp4xx/ixp4xx.c                    |  1339 +
 crypto/ocf/kirkwood/Makefile                  |    18 +
 crypto/ocf/kirkwood/cesa/AES/mvAes.h          |    60 +
 crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c       |   316 +
 crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h       |    19 +
 crypto/ocf/kirkwood/cesa/AES/mvAesApi.c       |   310 +
 crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat   |   123 +
 crypto/ocf/kirkwood/cesa/mvCesa.c             |  3126 +++
 crypto/ocf/kirkwood/cesa/mvCesa.h             |   412 +
 crypto/ocf/kirkwood/cesa/mvCesaDebug.c        |   484 +
 crypto/ocf/kirkwood/cesa/mvCesaRegs.h         |   356 +
 crypto/ocf/kirkwood/cesa/mvCesaTest.c         |  3096 ++
 crypto/ocf/kirkwood/cesa/mvLru.c              |   158 +
 crypto/ocf/kirkwood/cesa/mvLru.h              |   112 +
 crypto/ocf/kirkwood/cesa/mvMD5.c              |   365 +
 crypto/ocf/kirkwood/cesa/mvMD5.h              |    93 +
 crypto/ocf/kirkwood/cesa/mvSHA1.c             |   252 +
 crypto/ocf/kirkwood/cesa/mvSHA1.h             |    88 +
 crypto/ocf/kirkwood/cesa_ocf_drv.c            |  1302 +
 crypto/ocf/kirkwood/mvHal/common/mv802_3.h    |   213 +
 crypto/ocf/kirkwood/mvHal/common/mvCommon.c   |   275 +
 crypto/ocf/kirkwood/mvHal/common/mvCommon.h   |   308 +
 crypto/ocf/kirkwood/mvHal/common/mvDebug.c    |   325 +
 crypto/ocf/kirkwood/mvHal/common/mvDebug.h    |   177 +
 crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h |   225 +
 crypto/ocf/kirkwood/mvHal/common/mvHalVer.h   |    73 +
 crypto/ocf/kirkwood/mvHal/common/mvStack.c    |   152 +
 crypto/ocf/kirkwood/mvHal/common/mvStack.h    |   191 +
 crypto/ocf/kirkwood/mvHal/common/mvTypes.h    |   244 +
 crypto/ocf/kirkwood/mvHal/dbg-trace.c         |   108 +
 crypto/ocf/kirkwood/mvHal/dbg-trace.h         |    24 +
 .../mvHal/kw_family/boardEnv/mvBoardEnvLib.c  |  2512 ++
 .../mvHal/kw_family/boardEnv/mvBoardEnvLib.h  |   376 +
 .../mvHal/kw_family/boardEnv/mvBoardEnvSpec.c |   846 +
 .../mvHal/kw_family/boardEnv/mvBoardEnvSpec.h |   262 +
 .../ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c  |   224 +
 .../ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h  |    99 +
 .../kw_family/ctrlEnv/mvCtrlEnvAddrDec.c      |   293 +
 .../kw_family/ctrlEnv/mvCtrlEnvAddrDec.h      |   203 +
 .../mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h    |    98 +
 .../mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c    |  1823 ++
 .../mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h    |   185 +
 .../mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h   |   419 +
 .../mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h   |   257 +
 .../mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c |  1047 +
 .../mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h |   130 +
 .../kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h   |   142 +
 .../mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c     |  1034 +
 .../mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h     |   120 +
 .../mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S |   163 +
 .../mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h |   303 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c  |   323 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h  |   122 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c   |   382 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h   |   100 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysDram.c   |   347 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysDram.h   |    80 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c    |   658 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h    |   113 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysPex.c    |  1695 ++
 .../mvHal/kw_family/ctrlEnv/sys/mvSysPex.h    |   348 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysSata.c   |   427 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysSata.h   |   123 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c  |   424 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h  |   120 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c    |   461 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h    |   105 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysTs.c     |   591 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysTs.h     |   110 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c    |   495 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h    |   125 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysXor.c    |   662 +
 .../mvHal/kw_family/ctrlEnv/sys/mvSysXor.h    |   140 +
 .../mvHal/kw_family/device/mvDevice.c         |    72 +
 .../mvHal/kw_family/device/mvDevice.h         |    74 +
 .../mvHal/kw_family/device/mvDeviceRegs.h     |   101 +
 crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c    |   210 +
 crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h    |   421 +
 .../ocf/kirkwood/mvHal/linux_oss/mvOsSata.h   |   158 +
 crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h     |   374 +
 .../ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c |   375 +
 .../ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h |   121 +
 .../kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h |   121 +
 .../kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c    |   207 +
 .../kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h    |   212 +
 .../kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c  |   143 +
 .../kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h  |   150 +
 .../ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c |  1478 +
 .../ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h |   191 +
 .../kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c   |  1597 ++
 .../kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h   |   179 +
 .../mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S   |   988 +
 .../mvHal/mv_hal/ddr1_2/mvDramIfConfig.S      |   667 +
 .../mvHal/mv_hal/ddr1_2/mvDramIfConfig.h      |   192 +
 .../mvHal/mv_hal/ddr1_2/mvDramIfRegs.h        |   306 +
 .../ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c |  1854 ++
 .../ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h |   172 +
 .../mvHal/mv_hal/ddr2/mvDramIfBasicInit.S     |   986 +
 .../mvHal/mv_hal/ddr2/mvDramIfConfig.S        |   527 +
 .../mvHal/mv_hal/ddr2/mvDramIfConfig.h        |   157 +
 .../kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h |   423 +
 .../mvHal/mv_hal/ddr2/mvDramIfStaticInit.h    |   178 +
 .../kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c    |  1473 +
 .../kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h    |   192 +
 .../ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c |  2943 ++
 .../mvHal/mv_hal/eth/gbe/mvEthDebug.c         |   748 +
 .../mvHal/mv_hal/eth/gbe/mvEthDebug.h         |   146 +
 .../kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h  |   749 +
 .../kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h |   700 +
 crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h  |   354 +
 crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c  |   360 +
 crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h  |   117 +
 .../ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h |   116 +
 .../kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c    |   666 +
 .../kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h    |   133 +
 .../mvHal/mv_hal/pci-if/mvPciIfRegs.h         |   244 +
 .../mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c |  1003 +
 .../mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h |   323 +
 crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c  |  1043 +
 crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h  |   182 +
 .../ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h |   410 +
 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c  |  1140 +
 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h  |   168 +
 .../ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h |   749 +
 .../kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c   |   311 +
 .../kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h   |    82 +
 .../kirkwood/mvHal/mv_hal/sflash/mvSFlash.c   |  1521 +
 .../kirkwood/mvHal/mv_hal/sflash/mvSFlash.h   |   166 +
 .../mvHal/mv_hal/sflash/mvSFlashSpec.h        |   232 +
 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c  |   574 +
 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h  |    94 +
 .../ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c |   248 +
 .../ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h |    82 +
 .../ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h |    97 +
 .../ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c   |  1023 +
 .../ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h   |   120 +
 .../kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S |   457 +
 .../kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h   |   160 +
 crypto/ocf/ocf-bench.c                        |   514 +
 crypto/ocf/ocf-compat.h                       |   373 +
 crypto/ocf/ocfnull/Makefile                   |    11 +
 crypto/ocf/ocfnull/ocfnull.c                  |   204 +
 crypto/ocf/pasemi/Makefile                    |    11 +
 crypto/ocf/pasemi/pasemi.c                    |  1007 +
 crypto/ocf/pasemi/pasemi_fnu.h                |   410 +
 crypto/ocf/random.c                           |   316 +
 crypto/ocf/rndtest.c                          |   299 +
 crypto/ocf/rndtest.h                          |    54 +
 crypto/ocf/safe/Makefile                      |    11 +
 crypto/ocf/safe/hmachack.h                    |    36 +
 crypto/ocf/safe/md5.c                         |   308 +
 crypto/ocf/safe/md5.h                         |    76 +
 crypto/ocf/safe/safe.c                        |  2230 ++
 crypto/ocf/safe/safereg.h                     |   421 +
 crypto/ocf/safe/safevar.h                     |   229 +
 crypto/ocf/safe/sha1.c                        |   279 +
 crypto/ocf/safe/sha1.h                        |    72 +
 crypto/ocf/talitos/Makefile                   |    11 +
 crypto/ocf/talitos/talitos.c                  |  1355 +
 crypto/ocf/talitos/talitos_dev.h              |   277 +
 crypto/ocf/talitos/talitos_soft.h             |    76 +
 crypto/ocf/ubsec_ssb/Makefile                 |    11 +
 crypto/ocf/ubsec_ssb/bsdqueue.h               |   527 +
 crypto/ocf/ubsec_ssb/ubsec_ssb.c              |  2219 ++
 crypto/ocf/ubsec_ssb/ubsecreg.h               |   233 +
 crypto/ocf/ubsec_ssb/ubsecvar.h               |   227 +
 crypto/ocf/uio.h                              |    54 +
 drivers/ata/Kconfig                           |     9 +
 drivers/ata/Makefile                          |     1 +
 drivers/ata/ahci_mv.c                         |   306 +
 drivers/ata/libata-pmp.c                      |     7 +
 drivers/ata/sata_mv.c                         |    47 +-
 drivers/block/loop.c                          |     2 +-
 drivers/bus/mvebu-mbus.c                      |   926 +-
 drivers/clk/mvebu/Kconfig                     |     3 +
 drivers/clk/mvebu/Makefile                    |     1 +
 drivers/clk/mvebu/clk-core.c                  |   241 +
 drivers/clk/mvebu/clk-corediv.c               |   261 +
 drivers/clk/mvebu/clk-gating-ctrl.c           |   131 +-
 drivers/clocksource/Kconfig                   |     1 +
 drivers/clocksource/time-armada-370-xp.c      |   201 +-
 drivers/cpuidle/Kconfig                       |     6 +
 drivers/cpuidle/Makefile                      |     2 +-
 drivers/cpuidle/cpuidle-mvebu-v7.c            |   136 +
 drivers/crypto/Kconfig                        |    12 +
 drivers/crypto/Makefile                       |     1 +
 drivers/crypto/mvebu_cesa/Kconfig             |    19 +
 drivers/crypto/mvebu_cesa/Makefile            |    30 +
 .../mvebu_cesa/cesa_apps/libreswan/README     |    66 +
 .../cesa_apps/libreswan/ipsec_build.sh        |    40 +
 .../libreswan/ipsec_routing_setup.sh          |    36 +
 .../mvebu_cesa/cesa_apps/openssl/README       |    20 +
 .../mvebu_cesa/cesa_apps/openswan/README      |    69 +
 .../cesa_apps/openswan/ipsec_build.sh         |    24 +
 .../cesa_apps/openswan/ipsec_routing_setup.sh |    36 +
 drivers/crypto/mvebu_cesa/cesa_dev.c          |   314 +
 drivers/crypto/mvebu_cesa/cesa_dev.h          |    63 +
 drivers/crypto/mvebu_cesa/cesa_if.c           |   825 +
 drivers/crypto/mvebu_cesa/cesa_if.h           |   124 +
 drivers/crypto/mvebu_cesa/cesa_ocf_drv.c      |  1379 +
 drivers/crypto/mvebu_cesa/cesa_test.c         |  3165 +++
 drivers/crypto/mvebu_cesa/hal/AES/mvAes.h     |    70 +
 drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.c  |   341 +
 drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.h  |    24 +
 drivers/crypto/mvebu_cesa/hal/AES/mvAesApi.c  |   270 +
 .../crypto/mvebu_cesa/hal/AES/mvAesBoxes.dat  |   125 +
 drivers/crypto/mvebu_cesa/hal/mvCesa.c        |  3229 +++
 drivers/crypto/mvebu_cesa/hal/mvCesa.h        |   381 +
 drivers/crypto/mvebu_cesa/hal/mvCesaAddrDec.c |   289 +
 drivers/crypto/mvebu_cesa/hal/mvCesaDebug.c   |   557 +
 drivers/crypto/mvebu_cesa/hal/mvCesaRegs.h    |   384 +
 drivers/crypto/mvebu_cesa/hal/mvLru.c         |   150 +
 drivers/crypto/mvebu_cesa/hal/mvLru.h         |   113 +
 drivers/crypto/mvebu_cesa/hal/mvMD5.c         |   355 +
 drivers/crypto/mvebu_cesa/hal/mvMD5.h         |    92 +
 drivers/crypto/mvebu_cesa/hal/mvSHA1.c        |   301 +
 drivers/crypto/mvebu_cesa/hal/mvSHA1.h        |    89 +
 drivers/crypto/mvebu_cesa/hal/mvSHA256.c      |   341 +
 drivers/crypto/mvebu_cesa/hal/mvSHA256.h      |    87 +
 drivers/crypto/mvebu_cesa/mvSysCesaConfig.h   |    73 +
 drivers/dma/Kconfig                           |    23 +-
 drivers/dma/Makefile                          |     1 +
 drivers/dma/dmaengine.c                       |    76 +
 drivers/dma/iovlock.c                         |    67 +
 drivers/dma/mv_memcpy.c                       |   581 +
 drivers/dma/mv_memcpy.h                       |   129 +
 drivers/dma/mv_xor.c                          |   683 +-
 drivers/dma/mv_xor.h                          |    69 +-
 drivers/gpio/gpio-mvebu.c                     |   104 +
 drivers/i2c/busses/i2c-mv64xxx.c              |   524 +-
 drivers/irqchip/irq-armada-370-xp.c           |   427 +-
 drivers/md/bitmap.c                           |     2 +-
 drivers/md/dm-crypt.c                         |  2134 +-
 drivers/md/md.c                               |    13 +
 drivers/md/raid1.c                            |     9 +-
 drivers/md/raid5.c                            |    14 +
 drivers/memory/Kconfig                        |    10 +
 drivers/memory/Makefile                       |     1 +
 drivers/memory/mvebu-devbus.c                 |   280 +
 drivers/mmc/host/mvsdio.c                     |    10 +
 drivers/mmc/host/sdhci-pxav3.c                |   132 +-
 drivers/mtd/nand/Kconfig                      |    18 +-
 drivers/mtd/nand/Makefile                     |     1 +
 drivers/mtd/nand/mvebu_nfc/Kconfig            |    52 +
 drivers/mtd/nand/mvebu_nfc/Makefile           |    28 +
 drivers/mtd/nand/mvebu_nfc/hal/mvNfc.c        |  3278 +++
 drivers/mtd/nand/mvebu_nfc/hal/mvNfc.h        |   463 +
 drivers/mtd/nand/mvebu_nfc/hal/mvNfcRegs.h    |   313 +
 drivers/mtd/nand/mvebu_nfc/mvSysNfcConfig.h   |    43 +
 drivers/mtd/nand/mvebu_nfc/nand_nfc.c         |  1870 ++
 drivers/mtd/nand/mvebu_nfc/nand_nfc.h         |    35 +
 drivers/mtd/nand/nand_base.c                  |    90 +
 drivers/mtd/nand/nand_ids.c                   |    11 +
 drivers/mtd/nand/pxa3xx_nand.c                |  1105 +-
 drivers/net/ethernet/Kconfig                  |     1 +
 drivers/net/ethernet/Makefile                 |     3 +-
 drivers/net/ethernet/marvell/Kconfig          |    11 +
 drivers/net/ethernet/marvell/Makefile         |     1 +
 drivers/net/ethernet/marvell/mvneta.c         |    66 +-
 drivers/net/ethernet/marvell/mvpp2.c          |  6571 +++++
 drivers/net/ethernet/mvebu_net/.gitignore     |    90 +
 drivers/net/ethernet/mvebu_net/Kconfig        |    97 +
 drivers/net/ethernet/mvebu_net/Makefile       |    40 +
 .../net/ethernet/mvebu_net/common/.gitignore  |     5 +
 .../net/ethernet/mvebu_net/common/mv802_3.h   |   309 +
 .../net/ethernet/mvebu_net/common/mvCommon.c  |   417 +
 .../net/ethernet/mvebu_net/common/mvCommon.h  |   420 +
 .../ethernet/mvebu_net/common/mvCopyright.h   |    97 +
 .../net/ethernet/mvebu_net/common/mvDebug.c   |   275 +
 .../net/ethernet/mvebu_net/common/mvDebug.h   |   169 +
 .../ethernet/mvebu_net/common/mvDeviceId.h    |   470 +
 .../net/ethernet/mvebu_net/common/mvHalVer.h  |    72 +
 drivers/net/ethernet/mvebu_net/common/mvIpc.c |  1395 +
 drivers/net/ethernet/mvebu_net/common/mvIpc.h |   158 +
 .../net/ethernet/mvebu_net/common/mvList.c    |   116 +
 .../net/ethernet/mvebu_net/common/mvList.h    |   127 +
 .../net/ethernet/mvebu_net/common/mvStack.c   |    85 +
 .../net/ethernet/mvebu_net/common/mvStack.h   |   122 +
 .../net/ethernet/mvebu_net/common/mvTypes.h   |   270 +
 .../net/ethernet/mvebu_net/common/mvVideo.h   |   121 +
 .../net/ethernet/mvebu_net/linux/mv_neta.h    |   126 +
 drivers/net/ethernet/mvebu_net/linux/mv_pp2.h |   127 +
 drivers/net/ethernet/mvebu_net/mvNetConfig.h  |   136 +
 .../net/ethernet/mvebu_net/neta/.gitignore    |    97 +
 drivers/net/ethernet/mvebu_net/neta/Kconfig   |   875 +
 drivers/net/ethernet/mvebu_net/neta/Makefile  |    90 +
 .../net/ethernet/mvebu_net/neta/bm/bm_sysfs.c |   141 +
 .../ethernet/mvebu_net/neta/bm/mv_eth_bm.c    |   186 +
 .../ethernet/mvebu_net/neta/hal/.gitignore    |    97 +
 .../net/ethernet/mvebu_net/neta/hal/bm/mvBm.c |   481 +
 .../net/ethernet/mvebu_net/neta/hal/bm/mvBm.h |   135 +
 .../ethernet/mvebu_net/neta/hal/bm/mvBmRegs.h |   235 +
 .../mvebu_net/neta/hal/gbe/mvEthRegs.h        |   569 +
 .../ethernet/mvebu_net/neta/hal/gbe/mvHwf.c   |   384 +
 .../ethernet/mvebu_net/neta/hal/gbe/mvNeta.c  |  3489 +++
 .../ethernet/mvebu_net/neta/hal/gbe/mvNeta.h  |   873 +
 .../mvebu_net/neta/hal/gbe/mvNetaAddrDec.c    |   431 +
 .../mvebu_net/neta/hal/gbe/mvNetaDebug.c      |   809 +
 .../mvebu_net/neta/hal/gbe/mvNetaRegs.h       |  1256 +
 .../ethernet/mvebu_net/neta/hal/pmt/mvPmt.c   |   639 +
 .../ethernet/mvebu_net/neta/hal/pmt/mvPmt.h   |   261 +
 .../ethernet/mvebu_net/neta/hal/pnc/mvPnc.c   |  1487 +
 .../ethernet/mvebu_net/neta/hal/pnc/mvPnc.h   |   375 +
 .../mvebu_net/neta/hal/pnc/mvPncAging.c       |   315 +
 .../ethernet/mvebu_net/neta/hal/pnc/mvPncLb.c |   159 +
 .../mvebu_net/neta/hal/pnc/mvPncRxq.c         |   414 +
 .../mvebu_net/neta/hal/pnc/mvPncWol.c         |   505 +
 .../ethernet/mvebu_net/neta/hal/pnc/mvTcam.c  |   931 +
 .../ethernet/mvebu_net/neta/hal/pnc/mvTcam.h  |   390 +
 .../net/ethernet/mvebu_net/neta/hwf/hwf_bm.c  |   190 +
 .../ethernet/mvebu_net/neta/hwf/hwf_sysfs.c   |   154 +
 .../ethernet/mvebu_net/neta/l2fw/l2fw_sysfs.c |   259 +
 .../mvebu_net/neta/l2fw/mv_eth_l2fw.c         |   928 +
 .../mvebu_net/neta/l2fw/mv_eth_l2fw.h         |    76 +
 .../mvebu_net/neta/l2fw/mv_eth_l2sec.c        |   689 +
 .../mvebu_net/neta/l2fw/mv_eth_l2sec.h        |   124 +
 .../mvebu_net/neta/net_dev/mv_eth_nfp.c       |  1072 +
 .../mvebu_net/neta/net_dev/mv_eth_qos_sysfs.c |   299 +
 .../mvebu_net/neta/net_dev/mv_eth_rss_sysfs.c |   164 +
 .../mvebu_net/neta/net_dev/mv_eth_rx_sysfs.c  |   156 +
 .../mvebu_net/neta/net_dev/mv_eth_sysfs.c     |   357 +
 .../mvebu_net/neta/net_dev/mv_eth_sysfs.h     |    72 +
 .../mvebu_net/neta/net_dev/mv_eth_tool.c      |   908 +
 .../mvebu_net/neta/net_dev/mv_eth_tool.h      |    35 +
 .../neta/net_dev/mv_eth_tx_sched_sysfs.c      |   186 +
 .../mvebu_net/neta/net_dev/mv_eth_tx_sysfs.c  |   297 +
 .../mvebu_net/neta/net_dev/mv_ethernet.c      |   435 +
 .../mvebu_net/neta/net_dev/mv_netdev.c        |  7514 +++++
 .../mvebu_net/neta/net_dev/mv_netdev.h        |   891 +
 .../mvebu_net/neta/net_dev/mv_pon_sysfs.c     |   168 +
 .../ethernet/mvebu_net/neta/pmt/pmt_sysfs.c   |   289 +
 .../ethernet/mvebu_net/neta/pnc/pnc_sysfs.c   |   402 +
 .../ethernet/mvebu_net/neta/pnc/pnc_sysfs.h   |    33 +
 .../mvebu_net/neta/pnc/rxq_map_sysfs.c        |   196 +
 .../ethernet/mvebu_net/neta/pnc/wol_sysfs.c   |   190 +
 .../net/ethernet/mvebu_net/netmux/.gitignore  |    96 +
 .../net/ethernet/mvebu_net/netmux/Makefile    |    22 +
 .../ethernet/mvebu_net/netmux/mv_mux_netdev.c |  1538 +
 .../ethernet/mvebu_net/netmux/mv_mux_netdev.h |   138 +
 .../ethernet/mvebu_net/netmux/mv_mux_sysfs.c  |   252 +
 .../ethernet/mvebu_net/netmux/mv_mux_tool.c   |   241 +
 .../ethernet/mvebu_net/netmux/mv_mux_tool.h   |    40 +
 drivers/net/ethernet/mvebu_net/phy/mvEthPhy.c |   669 +
 drivers/net/ethernet/mvebu_net/phy/mvEthPhy.h |   123 +
 .../net/ethernet/mvebu_net/phy/mvEthPhyRegs.h |   176 +
 .../net/ethernet/mvebu_net/phy/phy_sysfs.c    |   172 +
 drivers/net/ethernet/mvebu_net/pp2/.gitignore |    96 +
 drivers/net/ethernet/mvebu_net/pp2/Kconfig    |   498 +
 drivers/net/ethernet/mvebu_net/pp2/Makefile   |    98 +
 .../ethernet/mvebu_net/pp2/cls/cls2_sysfs.c   |   348 +
 .../ethernet/mvebu_net/pp2/cls/cls3_sysfs.c   |   401 +
 .../ethernet/mvebu_net/pp2/cls/cls4_sysfs.c   |   286 +
 .../ethernet/mvebu_net/pp2/cls/cls_mc_sysfs.c |   191 +
 .../ethernet/mvebu_net/pp2/cls/cls_sysfs.c    |   404 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_api.c   |   636 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_api.h   |   456 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_app.c   |  1432 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_app.h   |   766 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_db.c    |  1299 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_db.h    |   529 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_dev.c   |   446 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_dev.h   |   151 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_flow.c  |  2911 ++
 .../ethernet/mvebu_net/pp2/cph/mv_cph_flow.h  |   687 +
 .../mvebu_net/pp2/cph/mv_cph_header.h         |    97 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_infra.c |   238 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_infra.h |   499 +
 .../mvebu_net/pp2/cph/mv_cph_mng_if.h         |   162 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_mod.c   |   142 +
 .../mvebu_net/pp2/cph/mv_cph_netdev.c         |   865 +
 .../mvebu_net/pp2/cph/mv_cph_netdev.h         |   205 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.c |  1194 +
 .../ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.h |   131 +
 .../ethernet/mvebu_net/pp2/dpi/dpi_sysfs.c    |   297 +
 .../net/ethernet/mvebu_net/pp2/hal/.gitignore |    97 +
 .../net/ethernet/mvebu_net/pp2/hal/bm/mvBm.c  |   935 +
 .../net/ethernet/mvebu_net/pp2/hal/bm/mvBm.h  |   225 +
 .../ethernet/mvebu_net/pp2/hal/bm/mvBmRegs.h  |   274 +
 .../mvebu_net/pp2/hal/cls/mvPp2Classifier.c   |    94 +
 .../mvebu_net/pp2/hal/cls/mvPp2Classifier.h   |   103 +
 .../mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.c       |  1213 +
 .../mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.h       |   307 +
 .../mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.c       |  1670 ++
 .../mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.h       |   523 +
 .../mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.c       |   721 +
 .../mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.h       |   264 +
 .../mvebu_net/pp2/hal/cls/mvPp2ClsActHw.h     |   254 +
 .../mvebu_net/pp2/hal/cls/mvPp2ClsHw.c        |  1269 +
 .../mvebu_net/pp2/hal/cls/mvPp2ClsHw.h        |   520 +
 .../mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.c      |   339 +
 .../mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.h      |   150 +
 .../mvebu_net/pp2/hal/common/mvPp2Common.c    |   125 +
 .../mvebu_net/pp2/hal/common/mvPp2Common.h    |   185 +
 .../mvebu_net/pp2/hal/common/mvPp2ErrCode.h   |   119 +
 .../mvebu_net/pp2/hal/dpi/mvPp2DpiHw.c        |   371 +
 .../mvebu_net/pp2/hal/dpi/mvPp2DpiHw.h        |   264 +
 .../mvebu_net/pp2/hal/gbe/mvPp2AddrDec.c      |   357 +
 .../ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.c |  1995 ++
 .../ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.h |   761 +
 .../mvebu_net/pp2/hal/gbe/mvPp2GbeDebug.c     |   629 +
 .../mvebu_net/pp2/hal/gbe/mvPp2GbeRegs.h      |  1027 +
 .../mvebu_net/pp2/hal/gmac/mvEthGmacApi.c     |   831 +
 .../mvebu_net/pp2/hal/gmac/mvEthGmacApi.h     |   196 +
 .../mvebu_net/pp2/hal/gmac/mvEthGmacRegs.h    |   347 +
 .../mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.c      |   407 +
 .../mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.h      |   240 +
 .../mvebu_net/pp2/hal/pme/mvPp2PmeHw.c        |   673 +
 .../mvebu_net/pp2/hal/pme/mvPp2PmeHw.h        |   303 +
 .../ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.c |  3775 +++
 .../ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.h |   248 +
 .../mvebu_net/pp2/hal/prs/mvPp2PrsHw.c        |  1273 +
 .../mvebu_net/pp2/hal/prs/mvPp2PrsHw.h        |   928 +
 .../ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.c |   322 +
 .../ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.h |   171 +
 .../ethernet/mvebu_net/pp2/l2fw/l2fw_sysfs.c  |   258 +
 .../ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.c |  1214 +
 .../ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.h |    78 +
 .../mvebu_net/pp2/net_dev/mv_eth_bm_sysfs.c   |   223 +
 .../mvebu_net/pp2/net_dev/mv_eth_dbg_sysfs.c  |   146 +
 .../mvebu_net/pp2/net_dev/mv_eth_hwf_sysfs.c  |   195 +
 .../mvebu_net/pp2/net_dev/mv_eth_napi_sysfs.c |   170 +
 .../mvebu_net/pp2/net_dev/mv_eth_pme_sysfs.c  |   133 +
 .../mvebu_net/pp2/net_dev/mv_eth_pon_sysfs.c  |   142 +
 .../mvebu_net/pp2/net_dev/mv_eth_qos_sysfs.c  |   155 +
 .../mvebu_net/pp2/net_dev/mv_eth_rx_sysfs.c   |   205 +
 .../mvebu_net/pp2/net_dev/mv_eth_sysfs.c      |   353 +
 .../mvebu_net/pp2/net_dev/mv_eth_sysfs.h      |   108 +
 .../mvebu_net/pp2/net_dev/mv_eth_tool.c       |  1261 +
 .../mvebu_net/pp2/net_dev/mv_eth_tool.h       |    35 +
 .../pp2/net_dev/mv_eth_tx_sched_sysfs.c       |   151 +
 .../mvebu_net/pp2/net_dev/mv_eth_tx_sysfs.c   |   241 +
 .../mvebu_net/pp2/net_dev/mv_ethernet.c       |   379 +
 .../mvebu_net/pp2/net_dev/mv_netdev.c         |  6424 +++++
 .../mvebu_net/pp2/net_dev/mv_netdev.h         |  1073 +
 .../mvebu_net/pp2/net_dev/mv_pp2_netmap.h     |   473 +
 .../ethernet/mvebu_net/pp2/plcr/plcr_sysfs.c  |   233 +
 .../ethernet/mvebu_net/pp2/pme/pme_sysfs.c    |   321 +
 .../mvebu_net/pp2/prs/prs_high_sysfs.c        |   243 +
 .../mvebu_net/pp2/prs/prs_low_sysfs.c         |   272 +
 .../ethernet/mvebu_net/pp2/prs/prs_sysfs.h    |    34 +
 .../ethernet/mvebu_net/pp2/wol/wol_sysfs.c    |   327 +
 .../net/ethernet/mvebu_net/switch/.gitignore  |    96 +
 .../net/ethernet/mvebu_net/switch/Makefile    |    28 +
 .../net/ethernet/mvebu_net/switch/mv_phy.c    |   544 +
 .../net/ethernet/mvebu_net/switch/mv_phy.h    |    46 +
 .../net/ethernet/mvebu_net/switch/mv_switch.c |  5556 ++++
 .../net/ethernet/mvebu_net/switch/mv_switch.h |   272 +
 .../mvebu_net/switch/mv_switch_sysfs.c        |   192 +
 .../mvebu_net/switch/qd-dsdt-3.3/.gitignore   |    97 +
 .../switch/qd-dsdt-3.3/Include/Copyright.h    |    60 +
 .../switch/qd-dsdt-3.3/Include/gtMad.h        |   202 +
 .../switch/qd-dsdt-3.3/Include/gtPTP.h        |   499 +
 .../Include/h/driver/gtDrvConfig.h            |   249 +
 .../Include/h/driver/gtDrvEvents.h            |    87 +
 .../Include/h/driver/gtDrvSwRegs.h            |   304 +
 .../qd-dsdt-3.3/Include/h/driver/gtHwCntl.h   |  1083 +
 .../qd-dsdt-3.3/Include/h/msApi/gtVct.h       |   172 +
 .../Include/h/msApi/msApiInternal.h           |  1820 ++
 .../Include/h/platform/gtMiiSmiIf.h           |   238 +
 .../qd-dsdt-3.3/Include/h/platform/gtSem.h    |   153 +
 .../Include/h/platform/platformDeps.h         |    30 +
 .../switch/qd-dsdt-3.3/Include/msApi.h        |    22 +
 .../switch/qd-dsdt-3.3/Include/msApiDefs.h    |  4529 +++
 .../qd-dsdt-3.3/Include/msApiPrototype.h      | 23371 ++++++++++++++++
 .../switch/qd-dsdt-3.3/Include/msApiSelect.h  |    55 +
 .../switch/qd-dsdt-3.3/Include/msApiTypes.h   |   100 +
 .../switch/qd-dsdt-3.3/Include/msApiWince.h   |   556 +
 .../switch/qd-dsdt-3.3/README_SW.txt          |   382 +
 .../mvebu_net/switch/qd-dsdt-3.3/makedefs     |    14 +
 .../switch/qd-dsdt-3.3/sample/802.1Q/802_1q.c |   332 +
 .../qd-dsdt-3.3/sample/802.1Q/readme.txt      |    46 +
 .../sample/CableTest/advCableTest.c           |   205 +
 .../qd-dsdt-3.3/sample/CableTest/cableTest.c  |   109 +
 .../qd-dsdt-3.3/sample/CableTest/readme.txt   |    13 +
 .../sample/CrossChipTrunk/crossChipTrunk.c    |   347 +
 .../sample/CrossChipTrunk/readme.txt          |    12 +
 .../qd-dsdt-3.3/sample/FlowControl/flowCtrl.c |    55 +
 .../qd-dsdt-3.3/sample/FlowControl/readme.txt |    10 +
 .../switch/qd-dsdt-3.3/sample/Header/header.c |    44 +
 .../qd-dsdt-3.3/sample/Header/readme.txt      |    19 +
 .../qd-dsdt-3.3/sample/Include/msSample.h     |   146 +
 .../qd-dsdt-3.3/sample/Include/qdSimRegs.h    |   132 +
 .../sample/Initialization/ev96122mii.c        |   213 +
 .../sample/Initialization/msApiInit.c         |   117 +
 .../sample/Initialization/msSample.h          |   157 +
 .../qd-dsdt-3.3/sample/Initialization/osSem.c |   233 +
 .../qd-dsdt-3.3/sample/Initialization/qdSim.c |  2004 ++
 .../sample/Initialization/readme.txt          |    85 +
 .../qd-dsdt-3.3/sample/Interrupt/qdInt.c      |   386 +
 .../qd-dsdt-3.3/sample/Interrupt/readme.txt   |    13 +
 .../sample/LoadBalance/loadBalance.c          |    57 +
 .../qd-dsdt-3.3/sample/LoadBalance/readme.txt |     9 +
 .../qd-dsdt-3.3/sample/MACAddress/macAddr.c   |   223 +
 .../qd-dsdt-3.3/sample/MACAddress/readme.txt  |    16 +
 .../MinimizeCPUTraffic/minimizeCPUTraffic.c   |   184 +
 .../sample/MinimizeCPUTraffic/readme.txt      |    22 +
 .../sample/MultiDevice/msApiInit.c            |   271 +
 .../switch/qd-dsdt-3.3/sample/PIRL/pirl.c     |    92 +
 .../switch/qd-dsdt-3.3/sample/PIRL/pirl2.c    |   199 +
 .../switch/qd-dsdt-3.3/sample/PIRL/readme.txt |    16 +
 .../switch/qd-dsdt-3.3/sample/PTP/ptp.c       |   281 +
 .../switch/qd-dsdt-3.3/sample/PTP/readme.txt  |     7 +
 .../sample/PktGen/phyPktGenSample.c           |    86 +
 .../qd-dsdt-3.3/sample/PktGen/readme.txt      |    12 +
 .../sample/PortMonitor/portMonitor.c          |   107 +
 .../qd-dsdt-3.3/sample/PortMonitor/readme.txt |    21 +
 .../switch/qd-dsdt-3.3/sample/QoSSetup/qos.c  |   195 +
 .../qd-dsdt-3.3/sample/QoSSetup/readme.txt    |    32 +
 .../switch/qd-dsdt-3.3/sample/README          |     2 +
 .../switch/qd-dsdt-3.3/sample/RMON/readme.txt |    28 +
 .../switch/qd-dsdt-3.3/sample/RMON/rmon.c     |   237 +
 .../qd-dsdt-3.3/sample/Trailer/readme.txt     |    12 +
 .../qd-dsdt-3.3/sample/Trailer/trailer.c      |    50 +
 .../qd-dsdt-3.3/sample/VlanSetup/hgVlan.c     |   135 +
 .../qd-dsdt-3.3/sample/VlanSetup/readme.txt   |    31 +
 .../switch/qd-dsdt-3.3/sample/makefile        |    53 +
 .../switch/qd-dsdt-3.3/sample/tcam/readme.txt |     7 +
 .../switch/qd-dsdt-3.3/sample/tcam/tcam.c     |   408 +
 .../qd-dsdt-3.3/src/driver/gtDrvConfig.c      |   963 +
 .../qd-dsdt-3.3/src/driver/gtDrvEvents.c      |    94 +
 .../switch/qd-dsdt-3.3/src/driver/gtHwCntl.c  |  2931 ++
 .../switch/qd-dsdt-3.3/src/driver/makefile    |    33 +
 .../mvebu_net/switch/qd-dsdt-3.3/src/makefile |   106 +
 .../switch/qd-dsdt-3.3/src/msapi/gtAVB.c      |  4018 +++
 .../switch/qd-dsdt-3.3/src/msapi/gtAdvVct.c   |  1876 ++
 .../qd-dsdt-3.3/src/msapi/gtAdvVct_mad.c      |   227 +
 .../switch/qd-dsdt-3.3/src/msapi/gtBrgFdb.c   |  3289 +++
 .../switch/qd-dsdt-3.3/src/msapi/gtBrgStp.c   |   333 +
 .../switch/qd-dsdt-3.3/src/msapi/gtBrgStu.c   |   848 +
 .../switch/qd-dsdt-3.3/src/msapi/gtBrgVlan.c  |  1312 +
 .../switch/qd-dsdt-3.3/src/msapi/gtBrgVtu.c   |  1592 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtCCPVT.c    |   438 +
 .../switch/qd-dsdt-3.3/src/msapi/gtEvents.c   |  1583 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtMisc.c     |  2188 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPCSCtrl.c  |  1937 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPIRL.c     |  1827 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPIRL2.c    |  1833 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPTP.c      |  5681 ++++
 .../qd-dsdt-3.3/src/msapi/gtPTPHidden.c       |   269 +
 .../qd-dsdt-3.3/src/msapi/gtPage2Access.c     |  1175 +
 .../switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl.c  |  3752 +++
 .../qd-dsdt-3.3/src/msapi/gtPhyCtrl_mad.c     |  2045 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPhyInt.c   |   301 +
 .../qd-dsdt-3.3/src/msapi/gtPhyInt_mad.c      |   272 +
 .../switch/qd-dsdt-3.3/src/msapi/gtPolicy.c   |   279 +
 .../switch/qd-dsdt-3.3/src/msapi/gtPortCtrl.c |  7290 +++++
 .../switch/qd-dsdt-3.3/src/msapi/gtPortLed.c  |   840 +
 .../switch/qd-dsdt-3.3/src/msapi/gtPortPav.c  |   269 +
 .../qd-dsdt-3.3/src/msapi/gtPortRateCtrl.c    |  2818 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPortRmon.c |  1244 +
 .../switch/qd-dsdt-3.3/src/msapi/gtPortStat.c |   391 +
 .../qd-dsdt-3.3/src/msapi/gtPortStatus.c      |  2235 ++
 .../switch/qd-dsdt-3.3/src/msapi/gtPriTable.c |  1110 +
 .../switch/qd-dsdt-3.3/src/msapi/gtQosMap.c   |  2423 ++
 .../qd-dsdt-3.3/src/msapi/gtSerdesCtrl.c      |   287 +
 .../qd-dsdt-3.3/src/msapi/gtSysConfig.c       |  1304 +
 .../switch/qd-dsdt-3.3/src/msapi/gtSysCtrl.c  |  9236 ++++++
 .../qd-dsdt-3.3/src/msapi/gtSysStatus.c       |   262 +
 .../switch/qd-dsdt-3.3/src/msapi/gtTCAM.c     |  1226 +
 .../switch/qd-dsdt-3.3/src/msapi/gtUtils.c    |   209 +
 .../switch/qd-dsdt-3.3/src/msapi/gtVct.c      |  1248 +
 .../switch/qd-dsdt-3.3/src/msapi/gtVct_mad.c  |   224 +
 .../switch/qd-dsdt-3.3/src/msapi/gtVersion.c  |    59 +
 .../switch/qd-dsdt-3.3/src/msapi/gtWeight.c   |   479 +
 .../switch/qd-dsdt-3.3/src/msapi/makefile     |    49 +
 .../switch/qd-dsdt-3.3/src/platform/gtDebug.c |    78 +
 .../qd-dsdt-3.3/src/platform/gtMiiSmiIf.c     |   835 +
 .../switch/qd-dsdt-3.3/src/platform/gtSem.c   |   148 +
 .../switch/qd-dsdt-3.3/src/platform/makefile  |    33 +
 .../qd-dsdt-3.3/src/platform/platformDeps.c   |    90 +
 .../switch/qd-dsdt-3.3/tools/make.defs        |   105 +
 .../switch/qd-dsdt-3.3/tools/make.rules       |    54 +
 .../switch/qd-dsdt-3.3/tools/makelnx.defs     |    62 +
 .../switch/qd-dsdt-3.3/tools/makelnx.rules    |    57 +
 .../switch/qd-dsdt-3.3/tools/makewce.defs     |    94 +
 .../switch/qd-dsdt-3.3/tools/makewce.rules    |    49 +
 .../mvebu_net/switch/qd-dsdt-3.3/tools/setenv |    14 +
 .../switch/qd-dsdt-3.3/tools/setenv.bat       |    55 +
 drivers/net/phy/fixed.c                       |     2 +
 drivers/of/address.c                          |    77 +-
 drivers/of/base.c                             |    75 +-
 drivers/of/irq.c                              |   172 +-
 drivers/of/of_mdio.c                          |    36 +-
 drivers/of/of_pci.c                           |   104 +-
 drivers/of/of_pci_irq.c                       |    15 +-
 drivers/of/selftest.c                         |   161 +-
 drivers/pci/Kconfig                           |     6 +-
 drivers/pci/Makefile                          |     3 +
 drivers/pci/host/Kconfig                      |     8 +
 drivers/pci/host/Makefile                     |     1 +
 drivers/pci/host/pci-mvebu.c                  |  1006 +
 drivers/pci/msi.c                             |    69 +-
 drivers/pci/probe.c                           |     1 +
 drivers/pinctrl/mvebu/Kconfig                 |     8 +
 drivers/pinctrl/mvebu/Makefile                |     2 +
 drivers/pinctrl/mvebu/pinctrl-armada-375.c    |   441 +
 drivers/pinctrl/mvebu/pinctrl-armada-38x.c    |   449 +
 drivers/pinctrl/mvebu/pinctrl-mvebu.c         |    34 +
 drivers/pinctrl/mvebu/pinctrl-mvebu.h         |     2 +
 drivers/reset/core.c                          |    47 +-
 drivers/rtc/Makefile                          |     1 +
 drivers/rtc/rtc-mv.c                          |    12 +
 drivers/rtc/rtc-mvebu.c                       |   489 +
 drivers/scsi/scsi_error.c                     |     4 +-
 drivers/scsi/sd.c                             |    25 +
 drivers/spi/spi-orion.c                       |    35 +-
 drivers/target/iscsi/iscsi_target.c           |     4 +
 drivers/target/iscsi/iscsi_target_auth.c      |   109 +-
 drivers/target/iscsi/iscsi_target_configfs.c  |   242 +-
 drivers/target/iscsi/iscsi_target_login.c     |    28 +
 drivers/target/iscsi/iscsi_target_nego.c      |    30 +-
 drivers/target/iscsi/iscsi_target_nego.h      |     3 +
 .../target/iscsi/iscsi_target_nodeattrib.c    |    21 +
 .../target/iscsi/iscsi_target_nodeattrib.h    |     4 +
 .../target/iscsi/iscsi_target_parameters.h    |     4 +
 drivers/target/iscsi/iscsi_target_tpg.c       |    16 +
 drivers/target/iscsi/iscsi_target_tpg.h       |     4 +
 drivers/target/target_core_device.c           |    50 +
 drivers/target/target_core_fabric_configfs.c  |    56 +
 drivers/target/target_core_internal.h         |     4 +
 drivers/target/target_core_pr.c               |    24 +-
 drivers/target/target_core_sbc.c              |    33 +-
 drivers/target/target_core_spc.c              |    89 +-
 drivers/target/target_core_tpg.c              |   123 +
 drivers/target/target_core_transport.c        |    19 +
 drivers/thermal/armada_thermal.c              |   175 +-
 drivers/usb/core/hub.c                        |    59 +-
 drivers/usb/host/Kconfig                      |     7 +
 drivers/usb/host/Makefile                     |     1 +
 drivers/usb/host/ehci-orion.c                 |   102 +-
 drivers/usb/host/xhci-hub.c                   |    31 +-
 drivers/usb/host/xhci-mem.c                   |    34 +
 drivers/usb/host/xhci-mvebu.c                 |   123 +
 drivers/usb/host/xhci-mvebu.h                 |    23 +
 drivers/usb/host/xhci-plat.c                  |   148 +-
 drivers/usb/host/xhci-ring.c                  |     6 +
 drivers/usb/host/xhci.c                       |    25 +-
 drivers/usb/host/xhci.h                       |     4 +
 fs/btrfs/disk-io.c                            |    44 +
 fs/btrfs/disk-io.h                            |     2 +
 fs/btrfs/file-item.c                          |    56 +-
 fs/btrfs/free-space-cache.c                   |    24 +-
 fs/buffer.c                                   |     7 +-
 fs/cifs/file.c                                |     6 +-
 fs/ext2/dir.c                                 |     4 +-
 fs/ext4/file.c                                |     1 +
 fs/ext4/mballoc.c                             |    32 +-
 fs/fuse/dev.c                                 |     2 +-
 fs/notify/inode_mark.c                        |    17 +-
 fs/notify/mark.c                              |    12 +-
 fs/read_write.c                               |     2 +
 fs/splice.c                                   |   282 +-
 fs/squashfs/file.c                            |     2 +-
 fs/squashfs/symlink.c                         |     2 +-
 fs/ubifs/debug.c                              |     4 +-
 fs/ubifs/file.c                               |    14 +-
 include/linux/dmaengine.h                     |     9 +
 include/linux/fs.h                            |     5 +
 include/linux/fsnotify_backend.h              |     1 +
 include/linux/irqdesc.h                       |     8 +
 include/linux/mbus.h                          |    19 +-
 include/linux/miscdevice.h                    |     2 +
 include/linux/mm.h                            |     1 +
 include/linux/msi.h                           |    21 +-
 include/linux/mtd/bbm.h                       |     5 +
 include/linux/mtd/nand.h                      |    62 +-
 include/linux/mvebu-v7-cpuidle.h              |    28 +
 include/linux/of.h                            |    27 +
 include/linux/of_address.h                    |    48 +
 include/linux/of_irq.h                        |    57 +-
 include/linux/of_mdio.h                       |    10 +
 include/linux/of_pci.h                        |    18 +-
 include/linux/pci.h                           |     1 +
 include/linux/platform_data/mtd-nand-pxa3xx.h |    16 +-
 include/linux/platform_data/pxa_sdhci.h       |     2 +
 include/linux/radix-tree.h                    |    44 +-
 include/linux/reset.h                         |     2 +
 include/linux/skbuff.h                        |    38 +
 include/linux/socket.h                        |     1 +
 include/linux/types.h                         |     2 +-
 include/target/target_core_backend.h          |     3 +
 include/target/target_core_base.h             |     3 +
 include/target/target_core_fabric.h           |     3 +
 include/uapi/linux/loop.h                     |     5 +-
 kernel/irq/irqdomain.c                        |    15 +-
 lib/radix-tree.c                              |    91 +-
 mkimage                                       |   Bin 0 -> 12796 bytes
 mm/memory.c                                   |    16 +-
 mm/mmap.c                                     |     8 +
 mm/page_alloc.c                               |     2 +-
 mm/percpu.c                                   |     2 +-
 net/core/datagram.c                           |    59 +
 net/core/dev.c                                |    11 +-
 net/core/iovec.c                              |    20 +
 net/core/skbuff.c                             |    75 +
 net/ipv4/tcp.c                                |    34 +-
 net/socket.c                                  |     1 +
 patch/ITR-99092.diff                          |    78 +
 scripts/checkpatch.pl                         |    26 +-
 scripts/mkcompile_h                           |     3 +
 scripts/mkuboot.sh                            |     6 +-
 tools/Makefile                                |    11 +-
 tools/cesa/Makefile                           |    19 +
 tools/cesa/mv_cesa_tool.c                     |   307 +
 tools/nas/nas_init.sh                         |   873 +
 xbuild.sh                                     |   172 +
 xbuild_forGPL.sh                              |    77 +
 998 files changed, 371154 insertions(+), 4376 deletions(-)
 create mode 100755 Build2.sh
 create mode 100644 Documentation/devicetree/bindings/arm/armada-375-usb-cluster.txt
 create mode 100644 Documentation/devicetree/bindings/arm/armada-375.txt
 create mode 100644 Documentation/devicetree/bindings/bus/mvebu-mbus.txt
 create mode 100644 Documentation/devicetree/bindings/crypto/mvebu_cesa.txt
 create mode 100644 Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt
 create mode 100644 Documentation/devicetree/bindings/memory-controllers/mvebu-sdram-controller.txt
 create mode 100644 Documentation/devicetree/bindings/mtd/mvebu_nfc.txt
 create mode 100644 Documentation/devicetree/bindings/net/fixed-link.txt
 create mode 100644 Documentation/devicetree/bindings/net/marvell-pp2.txt
 create mode 100644 Documentation/devicetree/bindings/pci/mvebu-pci.txt
 create mode 100644 Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
 create mode 100644 Documentation/devicetree/bindings/reset/marvell,armada-cpu-reset.txt
 create mode 100644 Documentation/devicetree/bindings/usb/usb-xhci.txt
 mode change 100644 => 100755 arch/arm/boot/compressed/misc.c
 create mode 100644 arch/arm/boot/dts/YY_default/armada-385-db.dts
 create mode 100644 arch/arm/boot/dts/YY_default/armada-385-rd.dts
 create mode 100644 arch/arm/boot/dts/YY_default/armada-388-rd.dts
 create mode 100644 arch/arm/boot/dts/YY_default/armada-38x.dtsi
 create mode 100644 arch/arm/boot/dts/YY_default_T30p5/armada-385-db.dts
 create mode 100644 arch/arm/boot/dts/YY_default_T30p5/armada-385-rd.dts
 create mode 100755 arch/arm/boot/dts/YY_default_T30p5/armada-388-rd.dts
 create mode 100644 arch/arm/boot/dts/YY_default_T30p5/armada-38x.dtsi
 create mode 100755 arch/arm/boot/dts/Yellowstone/armada-385-db.dts
 create mode 100644 arch/arm/boot/dts/Yellowstone/armada-385-rd.dts
 create mode 100755 arch/arm/boot/dts/Yellowstone/armada-388-rd.dts
 create mode 100755 arch/arm/boot/dts/Yellowstone/armada-38x.dtsi
 create mode 100755 arch/arm/boot/dts/Yosemite/armada-385-db.dts
 create mode 100755 arch/arm/boot/dts/Yosemite/armada-385-rd.dts
 create mode 100755 arch/arm/boot/dts/Yosemite/armada-388-rd.dts
 create mode 100755 arch/arm/boot/dts/Yosemite/armada-38x.dtsi
 create mode 100644 arch/arm/boot/dts/armada-375-db.dts
 create mode 100644 arch/arm/boot/dts/armada-375.dtsi
 create mode 100644 arch/arm/boot/dts/armada-380.dtsi
 create mode 100644 arch/arm/boot/dts/armada-382-customer2.dts
 create mode 100644 arch/arm/boot/dts/armada-382-db.dts
 create mode 100644 arch/arm/boot/dts/armada-385-388.dtsi
 create mode 100644 arch/arm/boot/dts/armada-385-customer1.dts
 create mode 100644 arch/arm/boot/dts/armada-385-db-ap.dts
 create mode 100644 arch/arm/boot/dts/armada-388-customer0.dts
 create mode 100644 arch/arm/boot/dts/armada-388-db-gp.dts
 create mode 100644 arch/arm/boot/dts/armada-38x-modular.dts
 create mode 100644 arch/arm/boot/dts/testcases/tests-interrupts.dtsi
 create mode 100644 arch/arm/configs/mvebu_lsp_defconfig
 create mode 100644 arch/arm/kernel/sigreturn_codes.S
 create mode 100644 arch/arm/mach-mvebu/armada-375.c
 create mode 100644 arch/arm/mach-mvebu/armada-375.h
 create mode 100644 arch/arm/mach-mvebu/armada-380.h
 create mode 100644 arch/arm/mach-mvebu/armada-38x.c
 create mode 100644 arch/arm/mach-mvebu/common.c
 create mode 100644 arch/arm/mach-mvebu/cpu-reset.c
 create mode 100644 arch/arm/mach-mvebu/dump_mv_regs.c
 create mode 100644 arch/arm/mach-mvebu/headsmp-375.S
 create mode 100644 arch/arm/mach-mvebu/headsmp-380.S
 create mode 100644 arch/arm/mach-mvebu/include/mach/.gitignore
 create mode 100644 arch/arm/mach-mvebu/include/mach/mvCommon.h
 create mode 100644 arch/arm/mach-mvebu/include/mach/mvDebug.h
 create mode 100644 arch/arm/mach-mvebu/include/mach/mvTypes.h
 create mode 100644 arch/arm/mach-mvebu/linux_oss/mvOs.c
 create mode 100644 arch/arm/mach-mvebu/linux_oss/mvOs.h
 create mode 100644 arch/arm/mach-mvebu/mvebu-soc-id.c
 create mode 100644 arch/arm/mach-mvebu/mvebu-soc-id.h
 create mode 100644 arch/arm/mach-mvebu/platsmp-375.c
 create mode 100644 arch/arm/mach-mvebu/platsmp-380.c
 create mode 100644 arch/arm/mach-mvebu/pm-board.c
 create mode 100644 arch/arm/mach-mvebu/pm.c
 create mode 100644 arch/arm/mach-mvebu/pmsu_ll.S
 create mode 100644 arch/arm/mach-mvebu/serdes.c
 create mode 100644 arch/arm/mach-mvebu/soft-poweroff.c
 create mode 100644 arch/arm/mach-mvebu/usb-cluster.c
 create mode 100644 arch/arm/mach-mvebu/usb-utmi.c
 create mode 100644 crypto/ocf/ChangeLog
 create mode 100644 crypto/ocf/Config.in
 create mode 100644 crypto/ocf/Kconfig
 create mode 100644 crypto/ocf/Makefile
 create mode 100644 crypto/ocf/c7108/Makefile
 create mode 100644 crypto/ocf/c7108/aes-7108.c
 create mode 100644 crypto/ocf/c7108/aes-7108.h
 create mode 100644 crypto/ocf/criov.c
 create mode 100644 crypto/ocf/crypto.c
 create mode 100644 crypto/ocf/cryptocteon/Makefile
 create mode 100644 crypto/ocf/cryptocteon/README.txt
 create mode 100644 crypto/ocf/cryptocteon/cavium_crypto.c
 create mode 100644 crypto/ocf/cryptocteon/cryptocteon.c
 create mode 100644 crypto/ocf/cryptodev.c
 create mode 100644 crypto/ocf/cryptodev.h
 create mode 100644 crypto/ocf/cryptosoft.c
 create mode 100644 crypto/ocf/ep80579/Makefile
 create mode 100644 crypto/ocf/ep80579/environment.mk
 create mode 100644 crypto/ocf/ep80579/icp_asym.c
 create mode 100644 crypto/ocf/ep80579/icp_common.c
 create mode 100644 crypto/ocf/ep80579/icp_ocf.h
 create mode 100644 crypto/ocf/ep80579/icp_sym.c
 create mode 100644 crypto/ocf/ep80579/linux_2.6_kernel_space.mk
 create mode 100644 crypto/ocf/hifn/Makefile
 create mode 100644 crypto/ocf/hifn/hifn7751.c
 create mode 100644 crypto/ocf/hifn/hifn7751reg.h
 create mode 100644 crypto/ocf/hifn/hifn7751var.h
 create mode 100644 crypto/ocf/hifn/hifnHIPP.c
 create mode 100644 crypto/ocf/hifn/hifnHIPPreg.h
 create mode 100644 crypto/ocf/hifn/hifnHIPPvar.h
 create mode 100644 crypto/ocf/ixp4xx/Makefile
 create mode 100644 crypto/ocf/ixp4xx/ixp4xx.c
 create mode 100644 crypto/ocf/kirkwood/Makefile
 create mode 100644 crypto/ocf/kirkwood/cesa/AES/mvAes.h
 create mode 100644 crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c
 create mode 100644 crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h
 create mode 100644 crypto/ocf/kirkwood/cesa/AES/mvAesApi.c
 create mode 100644 crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat
 create mode 100644 crypto/ocf/kirkwood/cesa/mvCesa.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvCesa.h
 create mode 100644 crypto/ocf/kirkwood/cesa/mvCesaDebug.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvCesaRegs.h
 create mode 100644 crypto/ocf/kirkwood/cesa/mvCesaTest.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvLru.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvLru.h
 create mode 100644 crypto/ocf/kirkwood/cesa/mvMD5.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvMD5.h
 create mode 100644 crypto/ocf/kirkwood/cesa/mvSHA1.c
 create mode 100644 crypto/ocf/kirkwood/cesa/mvSHA1.h
 create mode 100644 crypto/ocf/kirkwood/cesa_ocf_drv.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mv802_3.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvCommon.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvCommon.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvDebug.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvDebug.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvHalVer.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvStack.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvStack.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/common/mvTypes.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/dbg-trace.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/dbg-trace.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S
 create mode 100644 crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h
 create mode 100644 crypto/ocf/ocf-bench.c
 create mode 100644 crypto/ocf/ocf-compat.h
 create mode 100644 crypto/ocf/ocfnull/Makefile
 create mode 100644 crypto/ocf/ocfnull/ocfnull.c
 create mode 100644 crypto/ocf/pasemi/Makefile
 create mode 100644 crypto/ocf/pasemi/pasemi.c
 create mode 100644 crypto/ocf/pasemi/pasemi_fnu.h
 create mode 100644 crypto/ocf/random.c
 create mode 100644 crypto/ocf/rndtest.c
 create mode 100644 crypto/ocf/rndtest.h
 create mode 100644 crypto/ocf/safe/Makefile
 create mode 100644 crypto/ocf/safe/hmachack.h
 create mode 100644 crypto/ocf/safe/md5.c
 create mode 100644 crypto/ocf/safe/md5.h
 create mode 100644 crypto/ocf/safe/safe.c
 create mode 100644 crypto/ocf/safe/safereg.h
 create mode 100644 crypto/ocf/safe/safevar.h
 create mode 100644 crypto/ocf/safe/sha1.c
 create mode 100644 crypto/ocf/safe/sha1.h
 create mode 100644 crypto/ocf/talitos/Makefile
 create mode 100644 crypto/ocf/talitos/talitos.c
 create mode 100644 crypto/ocf/talitos/talitos_dev.h
 create mode 100644 crypto/ocf/talitos/talitos_soft.h
 create mode 100644 crypto/ocf/ubsec_ssb/Makefile
 create mode 100644 crypto/ocf/ubsec_ssb/bsdqueue.h
 create mode 100644 crypto/ocf/ubsec_ssb/ubsec_ssb.c
 create mode 100644 crypto/ocf/ubsec_ssb/ubsecreg.h
 create mode 100644 crypto/ocf/ubsec_ssb/ubsecvar.h
 create mode 100644 crypto/ocf/uio.h
 create mode 100644 drivers/ata/ahci_mv.c
 create mode 100644 drivers/clk/mvebu/clk-corediv.c
 create mode 100644 drivers/cpuidle/cpuidle-mvebu-v7.c
 create mode 100644 drivers/crypto/mvebu_cesa/Kconfig
 create mode 100755 drivers/crypto/mvebu_cesa/Makefile
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_apps/libreswan/README
 create mode 100755 drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_build.sh
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_routing_setup.sh
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_apps/openssl/README
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_apps/openswan/README
 create mode 100755 drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_build.sh
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_routing_setup.sh
 create mode 100755 drivers/crypto/mvebu_cesa/cesa_dev.c
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_dev.h
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_if.c
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_if.h
 create mode 100755 drivers/crypto/mvebu_cesa/cesa_ocf_drv.c
 create mode 100644 drivers/crypto/mvebu_cesa/cesa_test.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/AES/mvAes.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/AES/mvAesApi.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/AES/mvAesBoxes.dat
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvCesa.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvCesa.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvCesaAddrDec.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvCesaDebug.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvCesaRegs.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvLru.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvLru.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvMD5.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvMD5.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvSHA1.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvSHA1.h
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvSHA256.c
 create mode 100644 drivers/crypto/mvebu_cesa/hal/mvSHA256.h
 create mode 100644 drivers/crypto/mvebu_cesa/mvSysCesaConfig.h
 create mode 100644 drivers/dma/mv_memcpy.c
 create mode 100644 drivers/dma/mv_memcpy.h
 mode change 100644 => 100755 drivers/md/md.c
 mode change 100644 => 100755 drivers/md/raid1.c
 mode change 100644 => 100755 drivers/md/raid5.c
 create mode 100644 drivers/memory/mvebu-devbus.c
 create mode 100644 drivers/mtd/nand/mvebu_nfc/Kconfig
 create mode 100644 drivers/mtd/nand/mvebu_nfc/Makefile
 create mode 100644 drivers/mtd/nand/mvebu_nfc/hal/mvNfc.c
 create mode 100644 drivers/mtd/nand/mvebu_nfc/hal/mvNfc.h
 create mode 100644 drivers/mtd/nand/mvebu_nfc/hal/mvNfcRegs.h
 create mode 100644 drivers/mtd/nand/mvebu_nfc/mvSysNfcConfig.h
 create mode 100644 drivers/mtd/nand/mvebu_nfc/nand_nfc.c
 create mode 100644 drivers/mtd/nand/mvebu_nfc/nand_nfc.h
 create mode 100644 drivers/net/ethernet/marvell/mvpp2.c
 create mode 100644 drivers/net/ethernet/mvebu_net/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/Kconfig
 create mode 100644 drivers/net/ethernet/mvebu_net/Makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/common/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mv802_3.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvCommon.c
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvCommon.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvCopyright.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvDebug.c
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvDebug.h
 create mode 100755 drivers/net/ethernet/mvebu_net/common/mvDeviceId.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvHalVer.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvIpc.c
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvIpc.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvList.c
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvList.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvStack.c
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvStack.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvTypes.h
 create mode 100644 drivers/net/ethernet/mvebu_net/common/mvVideo.h
 create mode 100644 drivers/net/ethernet/mvebu_net/linux/mv_neta.h
 create mode 100644 drivers/net/ethernet/mvebu_net/linux/mv_pp2.h
 create mode 100644 drivers/net/ethernet/mvebu_net/mvNetConfig.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/Kconfig
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/Makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/bm/bm_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/bm/mv_eth_bm.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBmRegs.h
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvEthRegs.h
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvHwf.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.h
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaAddrDec.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaDebug.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaRegs.h
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.c
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncAging.c
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncLb.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncRxq.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncWol.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hwf/hwf_bm.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/hwf/hwf_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/l2fw/l2fw_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_nfp.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_qos_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rss_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rx_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sched_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_ethernet.c
 create mode 100755 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/net_dev/mv_pon_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/pmt/pmt_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/pnc/rxq_map_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/neta/pnc/wol_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/Makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.c
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.h
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/mv_mux_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.c
 create mode 100644 drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.h
 create mode 100644 drivers/net/ethernet/mvebu_net/phy/mvEthPhy.c
 create mode 100644 drivers/net/ethernet/mvebu_net/phy/mvEthPhy.h
 create mode 100644 drivers/net/ethernet/mvebu_net/phy/mvEthPhyRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/phy/phy_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/Kconfig
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/Makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cls/cls2_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cls/cls3_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cls/cls4_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cls/cls_mc_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cls/cls_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_header.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mng_if.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mod.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/dpi/dpi_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBmRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsActHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2ErrCode.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2AddrDec.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeDebug.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/l2fw/l2fw_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_bm_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_dbg_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_hwf_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_napi_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pme_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pon_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_qos_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_rx_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sched_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_ethernet.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_pp2_netmap.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/plcr/plcr_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/pme/pme_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/prs/prs_high_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/prs/prs_low_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/prs/prs_sysfs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/pp2/wol/wol_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/Makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/mv_phy.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/mv_phy.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/mv_switch.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/mv_switch.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/mv_switch_sysfs.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/.gitignore
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/Copyright.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtMad.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtPTP.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvConfig.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvEvents.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvSwRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtHwCntl.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/gtVct.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/msApiInternal.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtMiiSmiIf.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtSem.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/platformDeps.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApi.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiDefs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiPrototype.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiSelect.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiTypes.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiWince.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/README_SW.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/makedefs
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/802_1q.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/advCableTest.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/cableTest.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/crossChipTrunk.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/flowCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/header.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/msSample.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/qdSimRegs.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/ev96122mii.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msApiInit.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msSample.h
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/osSem.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/qdSim.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/qdInt.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/loadBalance.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/macAddr.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/minimizeCPUTraffic.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MultiDevice/msApiInit.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl2.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/ptp.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/phyPktGenSample.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/portMonitor.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/qos.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/README
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/rmon.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/trailer.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/hgVlan.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/readme.txt
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/tcam.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvConfig.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvEvents.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtHwCntl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAVB.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct_mad.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgFdb.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStp.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStu.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVlan.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVtu.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtCCPVT.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtEvents.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtMisc.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPCSCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL2.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTP.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTPHidden.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPage2Access.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl_mad.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt_mad.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPolicy.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortLed.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortPav.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRateCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRmon.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStat.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStatus.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPriTable.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtQosMap.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSerdesCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysConfig.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysCtrl.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysStatus.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtTCAM.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtUtils.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct_mad.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVersion.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtWeight.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtDebug.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtMiiSmiIf.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtSem.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/makefile
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/platformDeps.c
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.defs
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.rules
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.defs
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.rules
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.defs
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.rules
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv
 create mode 100644 drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv.bat
 create mode 100644 drivers/pci/host/Kconfig
 create mode 100644 drivers/pci/host/Makefile
 create mode 100644 drivers/pci/host/pci-mvebu.c
 create mode 100644 drivers/pinctrl/mvebu/pinctrl-armada-375.c
 create mode 100644 drivers/pinctrl/mvebu/pinctrl-armada-38x.c
 create mode 100755 drivers/rtc/rtc-mvebu.c
 mode change 100644 => 100755 drivers/scsi/scsi_error.c
 mode change 100644 => 100755 drivers/scsi/sd.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_auth.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_configfs.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_login.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_nego.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_nego.h
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_nodeattrib.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_nodeattrib.h
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_parameters.h
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_tpg.c
 mode change 100644 => 100755 drivers/target/iscsi/iscsi_target_tpg.h
 mode change 100644 => 100755 drivers/target/target_core_device.c
 mode change 100644 => 100755 drivers/target/target_core_fabric_configfs.c
 mode change 100644 => 100755 drivers/target/target_core_internal.h
 mode change 100644 => 100755 drivers/target/target_core_pr.c
 mode change 100644 => 100755 drivers/target/target_core_sbc.c
 mode change 100644 => 100755 drivers/target/target_core_spc.c
 mode change 100644 => 100755 drivers/target/target_core_tpg.c
 mode change 100644 => 100755 drivers/target/target_core_transport.c
 create mode 100644 drivers/usb/host/xhci-mvebu.c
 create mode 100644 drivers/usb/host/xhci-mvebu.h
 mode change 100644 => 100755 fs/btrfs/free-space-cache.c
 mode change 100644 => 100755 fs/ext4/mballoc.c
 create mode 100644 include/linux/mvebu-v7-cpuidle.h
 mode change 100644 => 100755 include/target/target_core_backend.h
 mode change 100644 => 100755 include/target/target_core_base.h
 mode change 100644 => 100755 include/target/target_core_fabric.h
 mode change 100644 => 100755 include/uapi/linux/loop.h
 create mode 100755 mkimage
 create mode 100755 patch/ITR-99092.diff
 create mode 100644 tools/cesa/Makefile
 create mode 100644 tools/cesa/mv_cesa_tool.c
 create mode 100755 tools/nas/nas_init.sh
 create mode 100755 xbuild.sh
 create mode 100755 xbuild_forGPL.sh

diff --git a/Build2.sh b/Build2.sh
new file mode 100755
index 000000000000..3f353076027c
--- /dev/null
+++ b/Build2.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+
+
+export ARCH=arm
+export CROSS_COMPILE=arm-marvell-linux-gnueabi-
+export PATH=/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+#export PATH=$PATH:/Marvell/A375/TOOLS/CrossCompiler_SDK2013Q3_SFP_LE
+export PATH=/opt_gccarm/armv7-marvell-linux-gnueabi-softfp_i686_64K_Dev_20131002/bin:/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+
+make mrproper 
+
+make mvebu_lsp_defconfig
+
+make menuconfig 
+
+make zImage 
+
+#make device-tree-file.dtb
+
+make armada-388-rd.dtb
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
index 61df564c0d23..c911216ad05b 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
@@ -1,9 +1,11 @@
-Marvell Armada 370 and Armada XP Interrupt Controller
+Marvell Armada 370, 375, 38x, XP Interrupt Controller
 -----------------------------------------------------
 
 Required properties:
 - compatible: Should be "marvell,mpic"
 - interrupt-controller: Identifies the node as an interrupt controller.
+- msi-controller: Identifies the node as an PCI Message Signaled
+  Interrupt controller.
 - #interrupt-cells: The number of cells to define the interrupts. Should be 1.
   The cell is the IRQ number
 
@@ -14,7 +16,13 @@ Required properties:
   automatically map to the interrupt controller registers of the
   current CPU)
 
+Optional properties:
 
+- interrupts: If defined, then it indicates that this MPIC is
+  connected as a slave to another interrupt controller. This is
+  typically the case on Armada 375 and Armada 380, where the MPIC is
+  connected as a slave to the Cortex-A9 GIC. The provided interrupts
+  indicates to which GIC interrupt the MPIC output is connected.
 
 Example:
 
@@ -24,6 +32,7 @@ Example:
               #address-cells = <1>;
               #size-cells = <1>;
               interrupt-controller;
+              msi-controller;
               reg = <0xd0020a00 0x1d0>,
                     <0xd0021070 0x58>;
         };
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
index 926b4d6aae7e..f9dafd998d49 100644
--- a/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-pmsu.txt
@@ -1,20 +1,17 @@
 Power Management Service Unit(PMSU)
 -----------------------------------
-Available on Marvell SOCs: Armada 370 and Armada XP
+
+Available on Marvell SOCs: Armada XP
 
 Required properties:
 
 - compatible: "marvell,armada-370-xp-pmsu"
 
-- reg: Should contain PMSU registers location and length. First pair
-  for the per-CPU SW Reset Control registers, second pair for the
-  Power Management Service Unit.
+- reg: Should contain the location and length of the PMSU registers.
 
 Example:
 
-armada-370-xp-pmsu@d0022000 {
+armada-370-xp-pmsu@22100 {
 	compatible = "marvell,armada-370-xp-pmsu";
-	reg = <0xd0022100 0x430>,
-	      <0xd0020800 0x20>;
+	reg = <0x22100 0x400>;
 };
-
diff --git a/Documentation/devicetree/bindings/arm/armada-375-usb-cluster.txt b/Documentation/devicetree/bindings/arm/armada-375-usb-cluster.txt
new file mode 100644
index 000000000000..71feb8fb4434
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-375-usb-cluster.txt
@@ -0,0 +1,17 @@
+Armada 375 USB cluster
+----------------------
+
+Armada 375 comes with an USB2 host and device controller and an USB3
+controller. The USB cluster control register allows to manage common
+features of both USB controllers.
+
+Required properties:
+
+- compatible: "marvell,armada-375-usb-cluster"
+- reg: Should contain usb cluster register location and length.
+
+Example:
+	usb-cluster@18400 {
+		compatible = "marvell,armada-375-usb-cluster";
+		reg = <0x18400 0x4>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/armada-375.txt b/Documentation/devicetree/bindings/arm/armada-375.txt
new file mode 100644
index 000000000000..867d0b80cb8f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-375.txt
@@ -0,0 +1,9 @@
+Marvell Armada 375 Platforms Device Tree Bindings
+-------------------------------------------------
+
+Boards with a SoC of the Marvell Armada 375 family shall have the
+following property:
+
+Required root node property:
+
+compatible: must contain "marvell,armada375"
diff --git a/Documentation/devicetree/bindings/arm/coherency-fabric.txt b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
index 17d8cd107559..1fdccb746ff9 100644
--- a/Documentation/devicetree/bindings/arm/coherency-fabric.txt
+++ b/Documentation/devicetree/bindings/arm/coherency-fabric.txt
@@ -1,21 +1,45 @@
 Coherency fabric
 ----------------
-Available on Marvell SOCs: Armada 370 and Armada XP
+Available on Marvell SOCs: Armada 370, Armada 375 and Armada XP
 
 Required properties:
 
-- compatible: "marvell,coherency-fabric"
+- compatible: the possible values are:
+
+ * "marvell,coherency-fabric", kept for backward compatibility reasons
+   only. To be used for the coherency fabric of the Armada 370 and
+   Armada XP. It is recommended to use
+   "marvell,armada-370-coherency-fabric" instead.
+
+ * "marvell,armada-370-coherency-fabric", for the Armada 370 and
+   Armada XP coherency fabric.
+
+ * "marvell,armada-375-coherency-fabric", for the Armada 375 coherency
+   fabric.
 
 - reg: Should contain coherency fabric registers location and
-  length. First pair for the coherency fabric registers, second pair
-  for the per-CPU fabric registers registers.
+  length.
+
+ * For "marvell,coherency-fabric" and
+   "marvell,armada-370-coherency-fabric", the first pair for the
+   coherency fabric registers, second pair for the per-CPU fabric
+   registers.
 
-Example:
+ * For "marvell,armada-375-coherency-fabric", only one pair is needed
+   for the per-CPU fabric registers.
+
+
+Examples:
 
 coherency-fabric@d0020200 {
-	compatible = "marvell,coherency-fabric";
+	compatible = "marvell,armada-370-coherency-fabric";
 	reg = <0xd0020200 0xb0>,
 		<0xd0021810 0x1c>;
 
 };
 
+coherency-fabric@21810 {
+	compatible = "marvell,armada-375-coherency-fabric";
+	reg = <0x21810 0x1c>;
+};
+
diff --git a/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt b/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt
index 081c6a786c8a..d24ab2ebf8a7 100644
--- a/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt
+++ b/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt
@@ -1,12 +1,13 @@
 MVEBU System Controller
 -----------------------
-MVEBU (Marvell SOCs: Armada 370/XP, Dove, mv78xx0, Kirkwood, Orion5x)
+MVEBU (Marvell SOCs: Armada 370/375/XP, Dove, mv78xx0, Kirkwood, Orion5x)
 
 Required properties:
 
 - compatible: one of:
 	- "marvell,orion-system-controller"
 	- "marvell,armada-370-xp-system-controller"
+	- "marvell,armada-375-system-controller"
 - reg: Should contain system controller registers location and length.
 
 Example:
diff --git a/Documentation/devicetree/bindings/bus/mvebu-mbus.txt b/Documentation/devicetree/bindings/bus/mvebu-mbus.txt
new file mode 100644
index 000000000000..636fcf761af0
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/mvebu-mbus.txt
@@ -0,0 +1,281 @@
+
+* Marvell MBus
+
+Required properties:
+
+- compatible:	 Should be set to one of the following:
+		 marvell,armada370-mbus
+		 marvell,armadaxp-mbus
+		 marvell,armada370-mbus
+		 marvell,armada375-mbus
+		 marvell,armada380-mbus
+		 marvell,armadaxp-mbus
+		 marvell,kirkwood-mbus
+		 marvell,dove-mbus
+		 marvell,orion5x-88f5281-mbus
+		 marvell,orion5x-88f5182-mbus
+		 marvell,orion5x-88f5181-mbus
+		 marvell,orion5x-88f6183-mbus
+		 marvell,mv78xx0-mbus
+
+- address-cells: Must be '2'. The first cell for the MBus ID encoding,
+                 the second cell for the address offset within the window.
+
+- size-cells:    Must be '1'.
+
+- ranges:        Must be set up to provide a proper translation for each child.
+	         See the examples below.
+
+- controller:    Contains a single phandle referring to the MBus controller
+                 node. This allows to specify the node that contains the
+		 registers that control the MBus, which is typically contained
+		 within the internal register window (see below).
+
+Optional properties:
+
+- pcie-mem-aperture:	This optional property contains the aperture for
+			the memory region of the PCIe driver.
+			If it's defined, it must encode the base address and
+			size for the address decoding windows allocated for
+			the PCIe memory region.
+
+- pcie-io-aperture:	Just as explained for the above property, this
+			optional property contains the aperture for the
+			I/O region of the PCIe driver.
+
+* Marvell MBus controller
+
+Required properties:
+
+- compatible:	Should be set to "marvell,mbus-controller".
+
+- reg:          Device's register space.
+		Two or three entries are expected (see the examples below):
+		the first one controls the devices decoding window,
+		the second one controls the SDRAM decoding window and
+		the third controls the MBus bridge (only with the
+		marvell,armada370-mbus and marvell,armadaxp-mbus
+		compatible strings)
+
+Example:
+
+	soc {
+		compatible = "marvell,armada370-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		internal-regs {
+			compatible = "simple-bus";
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			/* more children ...*/
+		};
+	};
+
+** MBus address decoding window specification
+
+The MBus children address space is comprised of two cells: the first one for
+the window ID and the second one for the offset within the window.
+In order to allow to describe valid and non-valid window entries, the
+following encoding is used:
+
+  0xSIAA0000 0x00oooooo
+
+Where:
+
+  S = 0x0 for a MBus valid window
+  S = 0xf for a non-valid window (see below)
+
+If S = 0x0, then:
+
+   I = 4-bit window target ID
+  AA = windpw attribute
+
+If S = 0xf, then:
+
+   I = don't care
+   AA = 1 for internal register
+
+Following the above encoding, for each ranges entry for a MBus valid window
+(S = 0x0), an address decoding window is allocated. On the other side,
+entries for translation that do not correspond to valid windows (S = 0xf)
+are skipped.
+
+	soc {
+		compatible = "marvell,armada370-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+
+		ranges = <0xf0010000 0 0 0xd0000000 0x100000
+			  0x01e00000 0 0 0xfff00000 0x100000>;
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <0x01e00000 0 0x100000>;
+		};
+
+		/* other children */
+		...
+
+		internal-regs {
+			compatible = "simple-bus";
+			ranges = <0 0xf0010000 0 0x100000>;
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			/* more children ...*/
+		};
+	};
+
+In the shown example, the translation entry in the 'ranges' property is what
+makes the MBus driver create a static decoding window for the corresponding
+given child device. Note that the binding does not require child nodes to be
+present. Of course, child nodes are needed to probe the devices.
+
+Since each window is identified by its target ID and attribute ID there's
+a special macro that can be use to simplify the translation entries:
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+Using this macro, the above example would be:
+
+	soc {
+		compatible = "marvell,armada370-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+
+		ranges = < MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
+			   MBUS_ID(0x01, 0xe0) 0 0 0xfff00000 0x100000>;
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0xe0) 0 0x100000>;
+		};
+
+		/* other children */
+		...
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			/* other children */
+			...
+		};
+	};
+
+
+** About the window base address
+
+Remember the MBus controller allows a great deal of flexibility for choosing
+the decoding window base address. When planning the device tree layout it's
+possible to choose any address as the base address, provided of course there's
+a region large enough available, and with the required alignment.
+
+Yet in other words: there's nothing preventing us from setting a base address
+of 0xf0000000, or 0xd0000000 for the NOR device shown above, if such region is
+unused.
+
+** Window allocation policy
+
+The mbus-node ranges property defines a set of mbus windows that are expected
+to be set by the operating system and that are guaranteed to be free of overlaps
+with one another or with the system memory ranges.
+
+Each entry in the property refers to exactly one window. If the operating system
+choses to use a different set of mbus windows, it must ensure that any address
+translations performed from downstream devices are adapted accordingly.
+
+The operating system may insert additional mbus windows that do not conflict
+with the ones listed in the ranges, e.g. for mapping PCIe devices.
+As a special case, the internal register window must be set up by the boot
+loader at the address listed in the ranges property, since access to that region
+is needed to set up the other windows.
+
+** Example
+
+See the example below, where a more complete device tree is shown:
+
+	soc {
+		compatible = "marvell,armadaxp-mbus", "simple-bus";
+		controller = <&mbusc>;
+
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000   /* internal-regs */
+			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000>;
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x100000>;
+		};
+
+		devbus-bootcs {
+			status = "okay";
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0x8000000>;
+
+			/* NOR */
+			nor {
+				compatible = "cfi-flash";
+				reg = <0 0x8000000>;
+				bank-width = <2>;
+			};
+		};
+
+		pcie-controller {
+			compatible = "marvell,armada-xp-pcie";
+			status = "okay";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000   /* Port 0.0 registers */
+				0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000   /* Port 2.0 registers */
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000   /* Port 0.1 registers */
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000   /* Port 0.2 registers */
+				0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000   /* Port 0.3 registers */
+				0x82000800 0 0xe0000000 MBUS_ID(0x04, 0xe8) 0xe0000000 0 0x08000000 /* Port 0.0 MEM */
+				0x81000800 0 0          MBUS_ID(0x04, 0xe0) 0xe8000000 0 0x00100000 /* Port 0.0 IO */>;
+
+
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			mbusc: mbus-controller@20000 {
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			interrupt-controller@20000 {
+			      reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
index 1e662948661e..f8d4595411e7 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-core-clock.txt
@@ -11,6 +11,18 @@ The following is a list of provided IDs and clock names on Armada 370/XP:
  3 = hclk    (DRAM control clock)
  4 = dramclk (DDR clock)
 
+The following is a list of provided IDs and clock names on Armada 375:
+ 0 = tclk    (Internal Bus clock)
+ 1 = cpuclk  (CPU clock)
+ 2 = l2clk   (L2 Cache clock)
+ 3 = ddrclk  (DDR clock)
+
+The following is a list of provided IDs and clock names on Armada 380:
+ 0 = tclk    (Internal Bus clock)
+ 1 = cpuclk  (CPU clock)
+ 2 = l2clk   (L2 Cache clock)
+ 3 = ddrclk  (DDR clock)
+
 The following is a list of provided IDs and clock names on Kirkwood and Dove:
  0 = tclk   (Internal Bus clock)
  1 = cpuclk (CPU0 clock)
@@ -20,6 +32,8 @@ The following is a list of provided IDs and clock names on Kirkwood and Dove:
 Required properties:
 - compatible : shall be one of the following:
 	"marvell,armada-370-core-clock" - For Armada 370 SoC core clocks
+	"marvell,armada-375-core-clock" - For Armada 375 SoC core clocks
+	"marvell,armada-380-core-clock" - For Armada 380 SoC core clocks
 	"marvell,armada-xp-core-clock" - For Armada XP SoC core clocks
 	"marvell,dove-core-clock" - for Dove SoC core clocks
 	"marvell,kirkwood-core-clock" - for Kirkwood SoC (except mv88f6180)
diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
index cffc93d97f54..19ab6c064285 100644
--- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
+++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt
@@ -1,10 +1,11 @@
-* Gated Clock bindings for Marvell Orion SoCs
+* Gated Clock bindings for Marvell EBU SoCs
 
-Marvell Dove and Kirkwood allow some peripheral clocks to be gated to save
-some power. The clock consumer should specify the desired clock by having
-the clock ID in its "clocks" phandle cell. The clock ID is directly mapped to
-the corresponding clock gating control bit in HW to ease manual clock lookup
-in datasheet.
+Marvell Armada 370/375/380/385/XP, Dove and Kirkwood allow some
+peripheral clocks to be gated to save some power. The clock consumer
+should specify the desired clock by having the clock ID in its
+"clocks" phandle cell. The clock ID is directly mapped to the
+corresponding clock gating control bit in HW to ease manual clock
+lookup in datasheet.
 
 The following is a list of provided IDs for Armada 370:
 ID	Clock	Peripheral
@@ -22,6 +23,60 @@ ID	Clock	Peripheral
 28	ddr	DDR Cntrl
 30	sata1	SATA Host 0
 
+The following is a list of provided IDs for Armada 375:
+ID	Clock		Peripheral
+-----------------------------------
+2	mu		Management Unit
+3	pp		Packet Processor
+4	ptp		PTP
+5	pex0		PCIe 0 Clock out
+6	pex1		PCIe 1 Clock out
+8	audio		Audio Cntrl
+11	nand		Nand Flash Cntrl
+14	sata0_link	SATA 0 Link
+15	sata0_core	SATA 0 Core
+16	usb3		USB3 Host
+17	sdio		SDHCI Host
+18	usb		USB Host
+19	gop		Gigabit Ethernet MAC
+20	sata1_link	SATA 1 Link
+21	sata1_core	SATA 1 Core
+22	xor0		XOR DMA 0
+23	xor1		XOR DMA 0
+24	copro		Coprocessor
+25	tdm		Time Division Mplx
+28	crypto0_enc	Cryptographic Unit Port 0 Encryption
+29	crypto0_core	Cryptographic Unit Port 0 Core
+30	crypto1_enc	Cryptographic Unit Port 1 Encryption
+31	crypto1_core	Cryptographic Unit Port 1 Core
+
+The following is a list of provided IDs for Armada 380:
+ID	Clock		Peripheral
+-----------------------------------
+0	audio		Audio
+2	ge2		Gigabit Ethernet 2
+3	ge1		Gigabit Ethernet 1
+4	ge0		Gigabit Ethernet 0
+5	pex1		PCIe 1
+6	pex2		PCIe 2
+7	pex3		PCIe 3
+8	pex4		PCIe 0
+9	usb3h0		USB3 Host 0
+10	usb3h1		USB3 Host 1
+11	usb3d		USB3 Device
+13	bm		Buffer Management
+14	crypto0z	Cryptographic 0 Z
+15	sata0		SATA 0
+16	crypto1z	Cryptographic 1 Z
+17	sdio		SDIO
+18	usb2		USB 2
+21	crypto1		Cryptographic 1
+22	xor0		XOR 0
+23	crypto0		Cryptographic 0
+25	tdm		Time Division Multiplexing
+28	xor1		XOR 1
+30	sata1		SATA 1
+
 The following is a list of provided IDs for Armada XP:
 ID	Clock	Peripheral
 -----------------------------------
@@ -94,6 +149,10 @@ ID	Clock	Peripheral
 
 Required properties:
 - compatible : shall be one of the following:
+	"marvell,armada-370-gating-clock" - for Armada 370 SoC clock gating
+	"marvell,armada-375-gating-clock" - for Armada 375 SoC clock gating
+	"marvell,armada-380-gating-clock" - for Armada 380 SoC clock gating
+	"marvell,armada-xp-gating-clock" - for Armada XP SoC clock gating
 	"marvell,dove-gating-clock" - for Dove SoC clock gating
 	"marvell,kirkwood-gating-clock" - for Kirkwood SoC clock gating
 - reg : shall be the register address of the Clock Gating Control register
diff --git a/Documentation/devicetree/bindings/crypto/mvebu_cesa.txt b/Documentation/devicetree/bindings/crypto/mvebu_cesa.txt
new file mode 100644
index 000000000000..0c402e5c986d
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/mvebu_cesa.txt
@@ -0,0 +1,140 @@
+* Marvell Cryptographic Engines And Security Accelerator
+
+To support CESA in dts two separate nodes are required:
+	- one for CESA engine with compatible "marvell,armada-cesa"
+	- second one for internal CESA SRAM with compatible "marvell,cesa-sram"
+
+Note that Marvell chips have maximum 2 available CESA channels.
+
+======================================
+- CESA engine ("marvell,armada-cesa")
+======================================
+Required properties:
+
+- compatible : should be "marvell,armada-cesa"
+
+- reg : base physical address of the engine and length of memory mapped
+        region, followed by base physical address of tdma and length of memory
+	mapped region. For more then one CESA channels see the example bellow.
+
+- clocks : must contain an entry for each clock gate
+	- armada-38x: requires two entry for one channel ("crypto0", "crypto0z")
+	  for second channels see the example bellow
+	- armada-xp: requires one entry for whole engine
+	- armada-370: requires one entry for whole engine
+
+- clock-names : require the following entries:
+	- armada-38x: "crypto0" and "crypto0z" for channel0;
+		      "crypto1" and "crypto1z" for channel1
+	- armada-xp: permit one entry or lack of clock-names properties
+	- armada-370: permit one entry or lack of clock-names properties
+
+- cesa,channels : number of CESA channels
+	- armada-38x: has 2 available cesa channels
+	- armada-xp: has 2 available cesa channels
+	- armada-370: has 1 available cesa channel
+
+- cesa,mode : CESA can operate in one of the following mode 'ocf' or 'test'
+
+- cesa,feature : depends on chip, CESA can operate in one of the following mode:
+	- armada-38x: "chain", "int_coalescing" or "int_per_packet"(no optimization)
+	- armada-xp: "chain", "int_coalescing" or "int_per_packet"(no optimization)
+	- armada-370: "chain" or "int_per_packet"(no optimization)
+
+- cesa,threshold : valid only with "int_coalescing" feature
+	This field provides a way to minimize the number of interrupts to off
+	load the CPU. It defines the number of <AccAndTDMAInt_CM> indications
+	before asserting the <EopCoalInt> bit in the Cryptographic interrupt
+	Cause Register.
+
+- cesa,time_threshold : valid only with "int_coalescing" feature
+	This field provides a way to ensure maximum delay between
+	<AccAndTDMAInt_CM> assertion and assertion bit <EopCoalInt> in
+	Cryptographic Interrupt Cause Register (even if the number of
+	<AccAndTDMAInt_CM> indications did not reach the <EopCoalPacketTh> value).
+
+- cesa,ctrlModel : 16-bit device model ID:
+	- armada-38x: <0x6800>
+	- armada-xp: <0x7846>
+	- armada-370: <0x6710>
+
+- cesa,ctrlRev : 8-bit device revision ID
+
+- cesa,sramOffset : 16-bit SRAM offset
+
+- interrupts : interrupt number (defined per channel in child node - see
+	example bellow)
+
+Example:
+
+	crypto@9D000 {
+		compatible = "marvell,armada-cesa";
+		reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+		       0x90000 0x1000	/* tdma base reg chan 0 */
+		       0x9F000 0x1000	/* cesa base reg chan 1 */
+		       0x92000 0x1000>;	/* tdma base reg chan 1 */
+		clocks = <&gateclk 23>, <&gateclk 14>,
+		         <&gateclk 21>, <&gateclk 16>;
+		clock-names = "crypto0", "crypto0z",
+			      "crypto1", "crypto1z";
+		cesa,channels = <0x2>;
+		cesa,mode = "ocf";	/* ocf or test */
+		cesa,feature = "int_coalescing"; /* chain, int_coalescing
+					   or int_per_packet */
+
+		/* threshold and time_threshold relevant if
+		   int_coalescing in use */
+		cesa,threshold = <0x2>;
+		cesa,time_threshold = <0xfffff>;
+
+		cesa,ctrlModel = /bits/ 16 <0x6800>;
+		cesa,ctrlRev = /bits/ 8 <2>;
+		cesa,sramOffset = /bits/ 16 <0>;
+		status = "disabled";
+
+		crypto10 {
+			/* channel 0 */
+			interrupts = <0 19 0x4>;
+		};
+		crypto11 {
+			/* channel 1 */
+			interrupts = <0 20 0x4>;
+		};
+
+	};
+
+
+======================================
+- CESA SRAM ("marvell,cesa-sram")
+======================================
+
+Required properties in soc ranges:
+
+- ranges : entry in soc ranges for static decoding window configuration. For
+	more information read Documentation/devicetree/bindings/bus/mvebu-mbus.txt
+
+
+Required properties in cesa-sram node:
+
+- compatible : should be "marvell,cesa-sram"
+- reg : target ID, attribute ID, with use of MBUS_ID macro, followed by base
+	physical address and size of the SRAM, for each channel. For more
+	information read Documentation/devicetree/bindings/bus/mvebu-mbus.txt
+
+
+Example:
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x01) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x05) 0 0x10000>; /*chan1*/
+		};
+	};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index f46d928aa73d..244977db6a2d 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -5,8 +5,20 @@ Required properties :
 
  - reg             : Offset and length of the register set for the device
  - compatible      : Should be "marvell,mv64xxx-i2c"
+                     or "marvell,mv78230-i2c" or "marvell,mv78230-a0-i2c"
+                     Note: Only use "marvell,mv78230-a0-i2c" for a very rare,
+                     initial version of the SoC which had broken offload
+                     support.  Linux auto-detects this and sets it
+                     appropriately.
  - interrupts      : The interrupt number
- - clock-frequency : Desired I2C bus clock frequency in Hz.
+
+Optional properties :
+
+ - clock-frequency : Desired I2C bus clock frequency in Hz. If not set the
+default frequency is 100kHz
+
+ - timeout-ms      : timeout value in miliseconds. If not set the default
+                     timeout is 1000ms
 
 Examples:
 
@@ -14,5 +26,16 @@ Examples:
 		compatible = "marvell,mv64xxx-i2c";
 		reg = <0x11000 0x20>;
 		interrupts = <29>;
+		timeout-ms = <1000>;
+		clock-frequency = <100000>;
+	};
+
+For the Armada XP:
+
+	i2c@11000 {
+		compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+		reg = <0x11000 0x100>;
+		interrupts = <29>;
+		timeout-ms = <1000>;
 		clock-frequency = <100000>;
 	};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
index 72a06c0ab1db..1486497a24c1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
@@ -4,16 +4,33 @@ Specifying interrupt information for devices
 1) Interrupt client nodes
 -------------------------
 
-Nodes that describe devices which generate interrupts must contain an
-"interrupts" property. This property must contain a list of interrupt
-specifiers, one per output interrupt. The format of the interrupt specifier is
-determined by the interrupt controller to which the interrupts are routed; see
-section 2 below for details.
+Nodes that describe devices which generate interrupts must contain an either an
+"interrupts" property or an "interrupts-extended" property. These properties
+contain a list of interrupt specifiers, one per output interrupt. The format of
+the interrupt specifier is determined by the interrupt controller to which the
+interrupts are routed; see section 2 below for details.
+
+  Example:
+	interrupt-parent = <&intc1>;
+	interrupts = <5 0>, <6 0>;
 
 The "interrupt-parent" property is used to specify the controller to which
 interrupts are routed and contains a single phandle referring to the interrupt
 controller node. This property is inherited, so it may be specified in an
-interrupt client node or in any of its parent nodes.
+interrupt client node or in any of its parent nodes. Interrupts listed in the
+"interrupts" property are always in reference to the node's interrupt parent.
+
+The "interrupts-extended" property is a special form for use when a node needs
+to reference multiple interrupt parents. Each entry in this property contains
+both the parent phandle and the interrupt specifier. "interrupts-extended"
+should only be used when a device has multiple interrupt parents.
+
+  Example:
+	interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
+
+A device node may contain either "interrupts" or "interrupts-extended", but not
+both. If both properties are present, then the operating system should log an
+error and use only the data in "interrupts".
 
 2) Interrupt controller nodes
 -----------------------------
diff --git a/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt
new file mode 100644
index 000000000000..653c90c34a71
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/mvebu-devbus.txt
@@ -0,0 +1,156 @@
+Device tree bindings for MVEBU Device Bus controllers
+
+The Device Bus controller available in some Marvell's SoC allows to control
+different types of standard memory and I/O devices such as NOR, NAND, and FPGA.
+The actual devices are instantiated from the child nodes of a Device Bus node.
+
+Required properties:
+
+ - compatible:          Currently only Armada 370/XP SoC are supported,
+                        with this compatible string:
+
+                        marvell,mvebu-devbus
+
+ - reg:                 A resource specifier for the register space.
+                        This is the base address of a chip select within
+			the controller's register space.
+                        (see the example below)
+
+ - #address-cells:      Must be set to 1
+ - #size-cells:         Must be set to 1
+ - ranges:              Must be set up to reflect the memory layout with four
+                        integer values for each chip-select line in use:
+                        0 <physical address of mapping> <size>
+
+Mandatory timing properties for child nodes:
+
+Read parameters:
+
+ - devbus,turn-off-ps:  Defines the time during which the controller does not
+                        drive the AD bus after the completion of a device read.
+                        This prevents contentions on the Device Bus after a read
+                        cycle from a slow device.
+
+ - devbus,bus-width:    Defines the bus width (e.g. <16>)
+
+ - devbus,badr-skew-ps: Defines the time delay from from A[2:0] toggle,
+                        to read data sample. This parameter is useful for
+                        synchronous pipelined devices, where the address
+                        precedes the read data by one or two cycles.
+
+ - devbus,acc-first-ps: Defines the time delay from the negation of
+                        ALE[0] to the cycle that the first read data is sampled
+                        by the controller.
+
+ - devbus,acc-next-ps:  Defines the time delay between the cycle that
+                        samples data N and the cycle that samples data N+1
+                        (in burst accesses).
+
+ - devbus,rd-setup-ps:  Defines the time delay between DEV_CSn assertion to
+			DEV_OEn assertion. If set to 0 (default),
+                        DEV_OEn and DEV_CSn are asserted at the same cycle.
+                        This parameter has no affect on <acc-first-ps> parameter
+                        (no affect on first data sample). Set <rd-setup-ps>
+                        to a value smaller than <acc-first-ps>.
+
+ - devbus,rd-hold-ps:   Defines the time between the last data sample to the
+			de-assertion of DEV_CSn. If set to 0 (default),
+			DEV_OEn and DEV_CSn are de-asserted at the same cycle
+			(the cycle of the last data sample).
+                        This parameter has no affect on DEV_OEn de-assertion.
+                        DEV_OEn is always de-asserted the next cycle after
+                        last data sampled. Also this parameter has no
+                        affect on <turn-off-ps> parameter.
+                        Set <rd-hold-ps> to a value smaller than <turn-off-ps>.
+
+Write parameters:
+
+ - devbus,ale-wr-ps:    Defines the time delay from the ALE[0] negation cycle
+			to the DEV_WEn assertion.
+
+ - devbus,wr-low-ps:    Defines the time during which DEV_WEn is active.
+                        A[2:0] and Data are kept valid as long as DEV_WEn
+                        is active. This parameter defines the setup time of
+                        address and data to DEV_WEn rise.
+
+ - devbus,wr-high-ps:   Defines the time during which DEV_WEn is kept
+                        inactive (high) between data beats of a burst write.
+                        DEV_A[2:0] and Data are kept valid (do not toggle) for
+                        <wr-high-ps> - <tick> ps.
+			This parameter defines the hold time of address and
+			data after DEV_WEn rise.
+
+ - devbus,sync-enable: Synchronous device enable.
+                       1: True
+                       0: False
+
+An example for an Armada XP GP board, with a 16 MiB NOR device as child
+is showed below. Note that the Device Bus driver is in charge of allocating
+the mbus address decoding window for each of its child devices.
+The window is created using the chip select specified in the child
+device node together with the base address and size specified in the ranges
+property. For instance, in the example below the allocated decoding window
+will start at base address 0xf0000000, with a size 0x1000000 (16 MiB)
+for chip select 0 (a.k.a DEV_BOOTCS).
+
+This address window handling is done in this mvebu-devbus only as a temporary
+solution. It will be removed when the support for mbus device tree binding is
+added.
+
+The reg property implicitly specifies the chip select as this:
+
+  0x10400: DEV_BOOTCS
+  0x10408: DEV_CS0
+  0x10410: DEV_CS1
+  0x10418: DEV_CS2
+  0x10420: DEV_CS3
+
+Example:
+
+	devbus-bootcs@d0010400 {
+		status = "okay";
+		ranges = <0 0xf0000000 0x1000000>; /* @addr 0xf0000000, size 0x1000000 */
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		/* Device Bus parameters are required */
+
+		/* Read parameters */
+		devbus,bus-width    = <8>;
+		devbus,turn-off-ps  = <60000>;
+		devbus,badr-skew-ps = <0>;
+		devbus,acc-first-ps = <124000>;
+		devbus,acc-next-ps  = <248000>;
+		devbus,rd-setup-ps  = <0>;
+		devbus,rd-hold-ps   = <0>;
+
+		/* Write parameters */
+		devbus,sync-enable = <0>;
+		devbus,wr-high-ps  = <60000>;
+		devbus,wr-low-ps   = <60000>;
+		devbus,ale-wr-ps   = <60000>;
+
+		flash@0 {
+			compatible = "cfi-flash";
+
+			/* 16 MiB */
+			reg = <0 0x1000000>;
+			bank-width = <2>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			/*
+			 * We split the 16 MiB in two partitions,
+			 * just as an example.
+			 */
+			partition@0 {
+				label = "First";
+				reg = <0 0x800000>;
+			};
+
+			partition@800000 {
+				label = "Second";
+				reg = <0x800000 0x800000>;
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/memory-controllers/mvebu-sdram-controller.txt b/Documentation/devicetree/bindings/memory-controllers/mvebu-sdram-controller.txt
new file mode 100644
index 000000000000..89657d1d4cd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/memory-controllers/mvebu-sdram-controller.txt
@@ -0,0 +1,21 @@
+Device Tree bindings for MVEBU SDRAM controllers
+
+The Marvell EBU SoCs all have a SDRAM controller. The SDRAM controller
+differs from one SoC variant to another, but they also share a number
+of commonalities.
+
+For now, this Device Tree binding documentation only documents the
+Armada XP SDRAM controller.
+
+Required properties:
+
+ - compatible: for Armada XP, "marvell,armada-xp-sdram-controller"
+ - reg: a resource specifier for the register space, which should
+   include all SDRAM controller registers as per the datasheet.
+
+Example:
+
+sdramc@1400 {
+	compatible = "marvell,armada-xp-sdram-controller";
+	reg = <0x1400 0x500>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
index dbe98a3c183a..86223c3eda90 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
@@ -4,7 +4,14 @@ This file documents differences between the core properties in mmc.txt
 and the properties used by the sdhci-pxav2 and sdhci-pxav3 drivers.
 
 Required properties:
-- compatible: Should be "mrvl,pxav2-mmc" or "mrvl,pxav3-mmc".
+- compatible: Should be "mrvl,pxav2-mmc", "mrvl,pxav3-mmc" or
+  "marvell,armada-380-sdhci".
+- reg:
+  * for "mrvl,pxav2-mmc" and "mrvl,pxav3-mmc", one register area for
+    the SDHCI registers.
+  * for "marvell,armada-380-sdhci", two register areas. The first one
+    for the SDHCI registers themselves, and the second one for the
+    AXI/Mbus bridge registers of the SDHCI unit.
 
 Optional properties:
 - mrvl,clk-delay-cycles: Specify a number of cycles to delay for tuning.
@@ -19,3 +26,11 @@ sdhci@d4280800 {
 	non-removable;
 	mrvl,clk-delay-cycles = <31>;
 };
+
+sdhci@d8000 {
+	compatible = "marvell,armada-380-sdhci";
+	reg = <0xd8000 0x1000>, <0xdc000 0x100>;
+	interrupts = <0 25 0x4>;
+	clocks = <&gateclk 17>;
+	mrvl,clk-delay-cycles = <0x1F>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/mvebu_nfc.txt b/Documentation/devicetree/bindings/mtd/mvebu_nfc.txt
new file mode 100644
index 000000000000..5f48146f1104
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mvebu_nfc.txt
@@ -0,0 +1,59 @@
+* Marvell HAL based Nand Flash Controller driver device tree bindings
+
+Required properties:
+
+- compatible:			Should be "marvell,armada-nand"
+- reg:				The register base for the controller
+- interrupts:			The interrupt to map
+- #address-cells/#size-cells:	Set to <1> or <2> if the node includes partitions,
+				for more details see partition.txt file from
+				this folder
+- clocks:			Set clock that runs the controller
+- clock-frequency		SoC's Tclk frequency
+- nfc,nfc-dma			'0'/'1' for disable/enable DMA mode. Set always
+				to '0' - DMA mode not supported
+- nfc,nfc-width			Bus width - 8/16 bits
+- nfc,ecc-type			ECC mode options. Possible values:
+				0 - 1 bit
+				1 - 4 bit
+				2 - 8 bit
+				3 - 12 bit
+				4 - 16 bit
+				5 - disable ECC
+- nfc,num-cs			Chip-select
+
+Example:
+nfc: nand@d0000 {
+	compatible = "marvell,armada-nand";
+	interrupts = <0 84 0x4>;
+	reg = <0xd0000 0x400>;
+	clocks = <&coredivclk 0>;
+
+	#address-cells = <1>;
+	#size-cells = <1>;
+	clock-frequency = <200000000>;
+	status = "okay";
+
+	nfc,nfc-mode  = "normal";
+	nfc,nfc-dma   = <0>;
+	nfc,nfc-width = <8>;
+	nfc,ecc-type  = <1>;
+	nfc,num-cs    = <1>;
+
+	mtd0@00000000 {
+		label = "U-Boot";
+		reg = <0x00000000 0x00600000>;
+		read-only;
+	};
+
+	mtd1@00080000 {
+		label = "uImage";
+		reg = <0x00600000 0x00400000>;
+		read-only;
+	};
+
+	mtd2@00140000 {
+		label = "Root";
+		reg = <0x00a00000 0x3f600000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
index f1421e2bbab7..a0bcfd05ec01 100644
--- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt
@@ -13,6 +13,8 @@ Optional properties:
  - marvell,nand-keep-config:	Set to keep the NAND controller config as set
 				by the bootloader
  - num-cs:			Number of chipselect lines to usw
+ - nand-on-flash-bbt: 		boolean to enable on flash bbt option if
+				not present false
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/net/fixed-link.txt b/Documentation/devicetree/bindings/net/fixed-link.txt
new file mode 100644
index 000000000000..25a009a14c05
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fixed-link.txt
@@ -0,0 +1,26 @@
+Fixed link Device Tree binding
+------------------------------
+
+Some Ethernet MACs have a "fixed link", and are not connected to a
+normal MDIO-managed PHY device. For those situations, a Device Tree
+binding allows to describe a "fixed link".
+
+Such a fixed link situation is described within an Ethernet device
+Device Tree node using a 'fixed-link' property, composed of 5
+elements:
+
+ 1. A fake PHY ID, which must be unique accross all fixed-link PHYs in
+    the system.
+ 2. The duplex (1 for full-duplex, 0 for half-duplex)
+ 3. The speed (10, 100, 1000)
+ 4. The pause setting (1 for enabled, 0 for disabled)
+ 5. The asym pause setting (1 for enabled, 0 for disabled)
+
+Example:
+
+ethernet@0 {
+	...
+	fixed-link = <1 1 1000 0 0>;
+	...
+};
+
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index 859a6fa7569c..31e61c096be6 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -4,13 +4,21 @@ Required properties:
 - compatible: should be "marvell,armada-370-neta".
 - reg: address and length of the register set for the device.
 - interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
-  property, a single integer).
 - phy-mode: The interface between the SoC and the PHY (a string that
   of_get_phy_mode() can understand)
 - clocks: a pointer to the reference clock for this device.
 
-Example:
+Optional properties:
+
+- phy: A phandle to a phy node defining the PHY address (as the reg
+  property, a single integer). Note: if this property isn't present,
+  then fixed link is assumed, and the 'fixed-link' property is
+  mandatory.
+- fixed-link: A 5 elements array that describe a fixed link, see
+  fixed-link.txt for details. Note: if a 'phy' property is present,
+  this 'fixed-link' property is ignored.
+
+Examples:
 
 ethernet@d0070000 {
 	compatible = "marvell,armada-370-neta";
@@ -21,3 +29,13 @@ ethernet@d0070000 {
 	phy = <&phy0>;
 	phy-mode = "rgmii-id";
 };
+
+ethernet@d0070000 {
+	compatible = "marvell,armada-370-neta";
+	reg = <0xd0070000 0x2500>;
+	interrupts = <8>;
+	clocks = <&gate_clk 4>;
+	status = "okay";
+	fixed-link = <1 1 1000 0 0>;
+	phy-mode = "rgmii-id";
+};
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
new file mode 100644
index 000000000000..e06fdcfa071c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt
@@ -0,0 +1,67 @@
+* Marvell Armada 375 Ethernet Controller (PPv2)
+
+Required properties - common part:
+- compatible: should be "marvell,armada-375-pp2"
+- reg: addresses and length of the register sets for the device.
+  Minimum 3 sets are required:
+	- common controller registers
+	- LMS registers
+	- GbE MAC0 registers
+	- GbE MAC1 registers
+	- Other port's registers (the driver enables extending the list
+	  with another GbE MAC's or PON port in future)
+- phy-mode: the interface between the SoC and the PHY (a string that
+  of_get_phy_mode() can understand)
+- clocks: a pointer to the reference clocks for this device, consequently:
+	- main Packet Processor clock
+	- GOP clock
+- clock-names: names of used clocks
+
+The Ethernet/PON ports are represented by their subnodes. The presence of
+at least one is required. The order of the subnodes is not important,
+however related register sets have to be declared in the common part
+accordingly.
+
+Required properties - port part:
+- interrupts: port interrupt id
+- port-id: port is recognized by it's id field which should fit predefined
+  values in HW, e.g. ethernet MACs (id=0/1), loopback (id=2)
+- phy-mode: the interface between the SoC and the PHY (a string that
+  of_get_phy_mode() can understand)
+
+Optional properties - port part:
+
+- marvell,loopback: set GMAC to loopback mode
+- phy: a phandle to a phy node defining the PHY address (as the reg
+  property, a single integer). Note: if this property isn't present,
+  then fixed link is assumed, and the 'fixed-link' property is
+  mandatory.
+
+Examples:
+
+pp2@f0000 {
+	compatible = "marvell,armada-375-pp2";
+	reg = <0xf0000 0xa000>,
+	      <0xc0000 0x3060>,
+	      <0xc4000 0x100>,
+	      <0xc5000 0x100>;
+	clocks = <&gateclk 3>, <&gateclk 19>;
+	clock-names = "pp_clk", "gop_clk";
+	status = "ok";
+
+	eth0: ethernet@c4000 {
+		interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+		port-id = <0>;
+		status = "ok";
+		phy = <&phy0>;
+		phy-mode = "gmii";
+	};
+
+	eth1: ethernet@c5000 {
+		interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+		port-id = <1>;
+		status = "ok";
+		phy = <&phy3>;
+		phy-mode = "gmii";
+	};
+};
diff --git a/Documentation/devicetree/bindings/pci/mvebu-pci.txt b/Documentation/devicetree/bindings/pci/mvebu-pci.txt
new file mode 100644
index 000000000000..9355a85ad002
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/mvebu-pci.txt
@@ -0,0 +1,296 @@
+* Marvell EBU PCIe interfaces
+
+Mandatory properties:
+
+- compatible: one of the following values:
+    marvell,armada-370-pcie
+    marvell,armada-xp-pcie
+- #address-cells, set to <3>
+- #size-cells, set to <2>
+- #interrupt-cells, set to <1>
+- bus-range: PCI bus numbers covered
+- device_type, set to "pci"
+- ranges: ranges describing the MMIO registers to control the PCIe
+  interfaces, and ranges describing the MBus windows needed to access
+  the memory and I/O regions of each PCIe interface.
+
+- msi-parent: Link to the hardware entity that serves as the Message
+  Signaled Interrupt controller for this PCI controller.
+The ranges describing the MMIO registers have the following layout:
+
+    0x82000000 0 r MBUS_ID(0xf0, 0x01) r 0 s
+
+where:
+
+  * r is a 32-bits value that gives the offset of the MMIO
+  registers of this PCIe interface, from the base of the internal
+  registers.
+
+  * s is a 32-bits value that give the size of this MMIO
+  registers area. This range entry translates the '0x82000000 0 r' PCI
+  address into the 'MBUS_ID(0xf0, 0x01) r' CPU address, which is part
+  of the internal register window (as identified by MBUS_ID(0xf0,
+  0x01)).
+
+The ranges describing the MBus windows have the following layout:
+
+    0x8t000000 s 0     MBUS_ID(w, a) 0 1 0
+
+where:
+
+   * t is the type of the MBus window (as defined by the standard PCI DT
+   bindings), 1 for I/O and 2 for memory.
+
+   * s is the PCI slot that corresponds to this PCIe interface
+
+   * w is the 'target ID' value for the MBus window
+
+   * a the 'attribute' value for the MBus window.
+
+Since the location and size of the different MBus windows is not fixed in
+hardware, and only determined in runtime, those ranges cover the full first
+4 GB of the physical address space, and do not translate into a valid CPU
+address.
+
+In addition, the device tree node must have sub-nodes describing each
+PCIe interface, having the following mandatory properties:
+
+- reg: used only for interrupt mapping, so only the first four bytes
+  are used to refer to the correct bus number and device number.
+- assigned-addresses: reference to the MMIO registers used to control
+  this PCIe interface.
+- clocks: the clock associated to this PCIe interface
+- marvell,pcie-port: the physical PCIe port number
+- status: either "disabled" or "okay"
+- device_type, set to "pci"
+- #address-cells, set to <3>
+- #size-cells, set to <2>
+- #interrupt-cells, set to <1>
+- ranges, translating the MBus windows ranges of the parent node into
+  standard PCI addresses.
+- interrupt-map-mask and interrupt-map, standard PCI properties to
+  define the mapping of the PCIe interface to interrupt numbers.
+
+and the following optional properties:
+- marvell,pcie-lane: the physical PCIe lane number, for ports having
+  multiple lanes. If this property is not found, we assume that the
+  value is 0.
+
+Example:
+
+pcie-controller {
+	compatible = "marvell,armada-xp-pcie";
+	status = "disabled";
+	device_type = "pci";
+
+	#address-cells = <3>;
+	#size-cells = <2>;
+
+	bus-range = <0x00 0xff>;
+	msi-parent = <&mpic>;
+
+	ranges =
+	       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000	/* Port 0.0 registers */
+		0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000	/* Port 2.0 registers */
+		0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000	/* Port 0.1 registers */
+		0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000	/* Port 0.2 registers */
+		0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000	/* Port 0.3 registers */
+		0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000	/* Port 1.0 registers */
+		0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000	/* Port 3.0 registers */
+		0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000	/* Port 1.1 registers */
+		0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000	/* Port 1.2 registers */
+		0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000	/* Port 1.3 registers */
+		0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+		0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO  */
+		0x82000000 0x2 0     MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+		0x81000000 0x2 0     MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO  */
+		0x82000000 0x3 0     MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
+		0x81000000 0x3 0     MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO  */
+		0x82000000 0x4 0     MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+		0x81000000 0x4 0     MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO  */
+
+		0x82000000 0x5 0     MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+		0x81000000 0x5 0     MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO  */
+		0x82000000 0x6 0     MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
+		0x81000000 0x6 0     MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO  */
+		0x82000000 0x7 0     MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
+		0x81000000 0x7 0     MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO  */
+		0x82000000 0x8 0     MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
+		0x81000000 0x8 0     MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO  */
+
+		0x82000000 0x9 0     MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+		0x81000000 0x9 0     MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO  */
+
+		0x82000000 0xa 0     MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
+		0x81000000 0xa 0     MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO  */>;
+
+	pcie@1,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+		reg = <0x0800 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+			  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 58>;
+		marvell,pcie-port = <0>;
+		marvell,pcie-lane = <0>;
+		clocks = <&gateclk 5>;
+		status = "disabled";
+	};
+
+	pcie@2,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+		reg = <0x1000 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+			  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 59>;
+		marvell,pcie-port = <0>;
+		marvell,pcie-lane = <1>;
+		clocks = <&gateclk 6>;
+		status = "disabled";
+	};
+
+	pcie@3,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+		reg = <0x1800 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+			  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 60>;
+		marvell,pcie-port = <0>;
+		marvell,pcie-lane = <2>;
+		clocks = <&gateclk 7>;
+		status = "disabled";
+	};
+
+	pcie@4,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+		reg = <0x2000 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+			  0x81000000 0 0 0x81000000 0x4 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 61>;
+		marvell,pcie-port = <0>;
+		marvell,pcie-lane = <3>;
+		clocks = <&gateclk 8>;
+		status = "disabled";
+	};
+
+	pcie@5,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+		reg = <0x2800 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+			  0x81000000 0 0 0x81000000 0x5 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 62>;
+		marvell,pcie-port = <1>;
+		marvell,pcie-lane = <0>;
+		clocks = <&gateclk 9>;
+		status = "disabled";
+	};
+
+	pcie@6,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+		reg = <0x3000 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
+			  0x81000000 0 0 0x81000000 0x6 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 63>;
+		marvell,pcie-port = <1>;
+		marvell,pcie-lane = <1>;
+		clocks = <&gateclk 10>;
+		status = "disabled";
+	};
+
+	pcie@7,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
+		reg = <0x3800 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
+			  0x81000000 0 0 0x81000000 0x7 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 64>;
+		marvell,pcie-port = <1>;
+		marvell,pcie-lane = <2>;
+		clocks = <&gateclk 11>;
+		status = "disabled";
+	};
+
+	pcie@8,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
+		reg = <0x4000 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
+			  0x81000000 0 0 0x81000000 0x8 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 65>;
+		marvell,pcie-port = <1>;
+		marvell,pcie-lane = <3>;
+		clocks = <&gateclk 12>;
+		status = "disabled";
+	};
+
+	pcie@9,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
+		reg = <0x4800 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+			  0x81000000 0 0 0x81000000 0x9 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 99>;
+		marvell,pcie-port = <2>;
+		marvell,pcie-lane = <0>;
+		clocks = <&gateclk 26>;
+		status = "disabled";
+	};
+
+	pcie@10,0 {
+		device_type = "pci";
+		assigned-addresses = <0x82005000 0 0x82000 0 0x2000>;
+		reg = <0x5000 0 0 0 0>;
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0
+			  0x81000000 0 0 0x81000000 0xa 0 1 0>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &mpic 103>;
+		marvell,pcie-port = <3>;
+		marvell,pcie-lane = <0>;
+		clocks = <&gateclk 27>;
+		status = "disabled";
+	};
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
new file mode 100644
index 000000000000..2ede59dffe1b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada-375-pinctrl.txt
@@ -0,0 +1,81 @@
+* Marvell Armada 375 SoC pinctrl driver for mpp
+
+Please refer to marvell,mvebu-pinctrl.txt in this directory for common binding
+part and usage.
+
+Required properties:
+- compatible: "marvell,88f6720-pinctrl"
+
+Available mpp pins/groups and functions:
+Note: brackets (x) are not part of the mpp name for marvell,function and given
+only for more detailed description in this document.
+
+name          pins     functions
+================================================================================
+mpp0          0        gpio, dev(ad2), spi0(cs1), spi1(cs1)
+mpp1          1        gpio, dev(ad3), spi0(mosi), spi1(mosi)
+mpp2          2        gpio, dev(ad4), ptp(eventreq), led(c0), audio(sdi)
+mpp3          3        gpio, dev(ad5), ptp(triggen), led(p3), audio(mclk)
+mpp4          4        gpio, dev(ad6), spi0(miso), spi1(miso)
+mpp5          5        gpio, dev(ad7), spi0(cs2), spi1(cs2)
+mpp6          6        gpio, dev(ad0), led(p1), audio(rclk)
+mpp7          7        gpio, dev(ad1), ptp(clk), led(p2), audio(extclk)
+mpp8          8        gpio, dev (bootcs), spi0(cs0), spi1(cs0)
+mpp9          9        gpio, nf(wen), spi0(sck), spi1(sck)
+mpp10        10        gpio, nf(ren), dram(vttctrl), led(c1)
+mpp11        11        gpio, dev(a0), led(c2), audio(sdo)
+mpp12        12        gpio, dev(a1), audio(bclk)
+mpp13        13        gpio, dev(readyn), pcie0(rstoutn), pcie1(rstoutn)
+mpp14        14        gpio, i2c0(sda), uart1(txd)
+mpp15        15        gpio, i2c0(sck), uart1(rxd)
+mpp16        16        gpio, uart0(txd)
+mpp17        17        gpio, uart0(rxd)
+mpp18        18        gpio, tdm(intn)
+mpp19        19        gpio, tdm(rstn)
+mpp20        20        gpio, tdm(pclk)
+mpp21        21        gpio, tdm(fsync)
+mpp22        22        gpio, tdm(drx)
+mpp23        23        gpio, tdm(dtx)
+mpp24        24        gpio, led(p0), ge1(rxd0), sd(cmd), uart0(rts)
+mpp25        25        gpio, led(p2), ge1(rxd1), sd(d0), uart0(cts)
+mpp26        26        gpio, pcie0(clkreq), ge1(rxd2), sd(d2), uart1(rts)
+mpp27        27        gpio, pcie1(clkreq), ge1(rxd3), sd(d1), uart1(cts)
+mpp28        28        gpio, led(p3), ge1(txctl), sd(clk)
+mpp29        29        gpio, pcie1(clkreq), ge1(rxclk), sd(d3)
+mpp30        30        gpio, ge1(txd0), spi1(cs0)
+mpp31        31        gpio, ge1(txd1), spi1(mosi)
+mpp32        32        gpio, ge1(txd2), spi1(sck), ptp(triggen)
+mpp33        33        gpio, ge1(txd3), spi1(miso)
+mpp34        34        gpio, ge1(txclkout), spi1(sck)
+mpp35        35        gpio, ge1(rxctl), spi1(cs1), spi0(cs2)
+mpp36        36        gpio, pcie0(clkreq)
+mpp37        37        gpio, pcie0(clkreq), tdm(intn), ge(mdc)
+mpp38        38        gpio, pcie1(clkreq), ge(mdio)
+mpp39        39        gpio, ref(clkout)
+mpp40        40        gpio, uart1(txd)
+mpp41        41        gpio, uart1(rxd)
+mpp42        42        gpio, spi1(cs2), led(c0)
+mpp43        43        gpio, sata0(prsnt), dram(vttctrl)
+mpp44        44        gpio, sata0(prsnt)
+mpp45        45        gpio, spi0(cs2), pcie0(rstoutn)
+mpp46        46        gpio, led(p0), ge0(txd0), ge1(txd0)
+mpp47        47        gpio, led(p1), ge0(txd1), ge1(txd1)
+mpp48        48        gpio, led(p2), ge0(txd2), ge1(txd2)
+mpp49        49        gpio, led(p3), ge0(txd3), ge1(txd3)
+mpp50        50        gpio, led(c0), ge0(rxd0), ge1(rxd0)
+mpp51        51        gpio, led(c1), ge0(rxd1), ge1(rxd1)
+mpp52        52        gpio, led(c2), ge0(rxd2), ge1(rxd2)
+mpp53        53        gpio, pcie1(rstoutn), ge0(rxd3), ge1(rxd3)
+mpp54        54        gpio, pcie0(rstoutn), ge0(rxctl), ge1(rxctl)
+mpp55        55        gpio, ge0(rxclk), ge1(rxclk)
+mpp56        56        gpio, ge0(txclkout), ge1(txclkout)
+mpp57        57        gpio, ge0(txctl), ge1(txctl)
+mpp58        58        gpio, led(c0)
+mpp59        59        gpio, led(c1)
+mpp60        60        gpio, uart1(txd), led(c2)
+mpp61        61        gpio, i2c1(sda), uart1(rxd), spi1(cs2), led(p0)
+mpp62        62        gpio, i2c1(sck), led(p1)
+mpp63        63        gpio, ptp(triggen), led(p2)
+mpp64        64        gpio, dram(vttctrl), led(p3)
+mpp65        65        gpio, sata1(prsnt)
+mpp66        66        gpio, ptp(eventreq), spi1(cs3)
diff --git a/Documentation/devicetree/bindings/reset/marvell,armada-cpu-reset.txt b/Documentation/devicetree/bindings/reset/marvell,armada-cpu-reset.txt
new file mode 100644
index 000000000000..88a218c68ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/marvell,armada-cpu-reset.txt
@@ -0,0 +1,28 @@
+Marvell Armada CPU reset controller
+===================================
+
+Please also refer to reset.txt in this directory for common reset
+controller binding usage.
+
+Required properties:
+- compatible: Should be "marvell,armada-<chip>-cpu-reset"
+- reg: should be register base and length as documented in the
+  datasheet for the CPU reset register
+- #reset-cells: 1, which allows to pass the CPU hardware ID when
+  referencing this cpu reset block. See the example below.
+
+cpurst: cpurst@20800 {
+	compatible = "marvell,armada-xp-cpu-reset";
+	reg = <0x20800 0x20>;
+	reset-cells = <1>;
+};
+
+And to associate a CPU to its reset controller:
+
+cpu@0 {
+	device_type = "cpu";
+	compatible = "marvell,sheeva-v7";
+	reg = <0>;
+	clocks = <&cpuclk 0>;
+	resets = <&cpurst 0>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
index fff93d5f92de..4698e0edc205 100644
--- a/Documentation/devicetree/bindings/thermal/armada-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/armada-thermal.txt
@@ -1,9 +1,11 @@
-* Marvell Armada 370/XP thermal management
+* Marvell Armada 370/375/380/XP thermal management
 
 Required properties:
 
 - compatible:	Should be set to one of the following:
 		marvell,armada370-thermal
+		marvell,armada375-thermal
+		marvell,armada380-thermal
 		marvell,armadaxp-thermal
 
 - reg:		Device's register space.
diff --git a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
index 36381129d141..59894fb972db 100644
--- a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
+++ b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
@@ -1,15 +1,46 @@
-Marvell Armada 370 and Armada XP Timers
----------------------------------------
+Marvell Armada 370, 375 and XP Timers
+-------------------------------------
 
 Required properties:
-- compatible: Should be "marvell,armada-370-xp-timer"
+- compatible: Should be either:
+    - "marvell,armada-370-timer"
+    - "marvell,armada-375-timer"
+    - "marvell,armada-xp-timer"
 - interrupts: Should contain the list of Global Timer interrupts and
   then local timer interrupts
 - reg: Should contain location and length for timers register. First
   pair for the Global Timer registers, second pair for the
   local/private timers.
-- clocks: clock driving the timer hardware
 
-Optional properties:
-- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
-  Mhz fixed mode (available on Armada XP and not on Armada 370)
+Clocks required for compatible = "marvell,armada-370-timer":
+- clocks : Must contain a single entry describing the clock input
+
+Clocks required for compatible = "marvell,armada-375-timer":
+- clocks : Must contain a single entry describing the clock input
+
+Clocks required for compatible = "marvell,armada-xp-timer":
+- clocks : Must contain an entry for each entry in clock-names.
+- clock-names : Must include the following entries:
+  "nbclk" (L2/coherency fabric clock),
+  "fixed" (Reference 25 MHz fixed-clock).
+
+Examples:
+
+- Armada 370:
+
+	timer {
+		compatible = "marvell,armada-370-timer";
+		reg = <0x20300 0x30>, <0x21040 0x30>;
+		interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
+		clocks = <&coreclk 2>;
+	};
+
+- Armada XP:
+
+	timer {
+		compatible = "marvell,armada-xp-timer";
+		reg = <0x20300 0x30>, <0x21040 0x30>;
+		interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
+		clocks = <&coreclk 2>, <&refclk>;
+		clock-names = "nbclk", "fixed";
+	};
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
new file mode 100644
index 000000000000..708be2ddaac9
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -0,0 +1,14 @@
+USB xHCI controllers
+
+Required properties:
+  - compatible: should be "xhci-platform" or "xhci-armada-375"
+  - reg: should contain address and length of the standard XHCI
+    register set for the device.
+  - interrupts: one XHCI interrupt should be described here.
+
+Example:
+	usb@f0931000 {
+		compatible = "xhci-platform";
+		reg = <0xf0931000 0x8c8>;
+		interrupts = <0x0 0x4e 0x0>;
+	};
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 18a9f5ef643a..f6a8e37bb7aa 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -435,7 +435,6 @@ config ARCH_NETX
 config ARCH_IOP13XX
 	bool "IOP13xx-based"
 	depends on MMU
-	select ARCH_SUPPORTS_MSI
 	select CPU_XSC3
 	select NEED_MACH_MEMORY_H
 	select NEED_RET_TO_USER
@@ -474,6 +473,7 @@ config ARCH_IXP4XX
 	bool "IXP4xx-based"
 	depends on MMU
 	select ARCH_HAS_DMA_SET_COHERENT_MASK
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select ARCH_REQUIRE_GPIOLIB
 	select CLKSRC_MMIO
 	select CPU_XSCALE
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 1d41908d5cda..e6a6ab1b10d7 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -303,12 +303,37 @@ choice
 		  their output to the serial port on MSM 8960 devices.
 
 	config DEBUG_MVEBU_UART
-		bool "Kernel low-level debugging messages via MVEBU UART"
+		bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
 		depends on ARCH_MVEBU
 		help
 		  Say Y here if you want kernel low-level debugging support
 		  on MVEBU based platforms.
 
+		  This option should be used with the old bootloaders
+		  that left the internal registers mapped at
+		  0xd0000000. As of today, this is the case on
+		  platforms such as the Globalscale Mirabox or the
+		  Plathome OpenBlocks AX3, when using the original
+		  bootloader.
+
+		  If the wrong DEBUG_MVEBU_UART* option is selected,
+		  when u-boot hands over to the kernel, the system
+		  silently crashes, with no serial output at all.
+
+	config DEBUG_MVEBU_UART_ALTERNATE
+		bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)"
+		depends on ARCH_MVEBU
+		help
+		  Say Y here if you want kernel low-level debugging support
+		  on MVEBU based platforms.
+
+		  This option should be used with the new bootloaders
+		  that remap the internal registers at 0xf1000000.
+
+		  If the wrong DEBUG_MVEBU_UART* option is selected,
+		  when u-boot hands over to the kernel, the system
+		  silently crashes, with no serial output at all.
+
 	config DEBUG_NOMADIK_UART
 		bool "Kernel low-level debugging messages via NOMADIK UART"
 		depends on ARCH_NOMADIK
@@ -632,7 +657,8 @@ config DEBUG_LL_INCLUDE
 				 DEBUG_IMX51_UART || \
 				 DEBUG_IMX53_UART ||\
 				 DEBUG_IMX6Q_UART
-	default "debug/mvebu.S" if DEBUG_MVEBU_UART
+	default "debug/mvebu.S" if DEBUG_MVEBU_UART || \
+				   DEBUG_MVEBU_UART_ALTERNATE
 	default "debug/mxs.S" if DEBUG_IMX23_UART || DEBUG_IMX28_UART
 	default "debug/nomadik.S" if DEBUG_NOMADIK_UART
 	default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 1ba358ba16b8..70bc19e2274f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -16,6 +16,7 @@ LDFLAGS		:=
 LDFLAGS_vmlinux	:=-p --no-undefined -X
 ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
 LDFLAGS_vmlinux	+= --be8
+LDFLAGS_MODULE	+= --be8
 endif
 
 OBJCOPYFLAGS	:=-O binary -R .comment -S
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index f79a08efe000..68642049ed6a 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -8,6 +8,7 @@ piggy.lzma
 piggy.xzkern
 vmlinux
 vmlinux.lds
+vmlinux.lds.S
 
 # borrowed libfdt files
 fdt.c
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 120b83bfde20..87a5bce22837 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -195,7 +195,7 @@ CFLAGS_font.o := -Dstatic=
 $(obj)/font.c: $(FONTC)
 	$(call cmd,shipped)
 
-$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
+$(obj)/vmlinux.lds.S: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
 	@sed "$(SEDFLAGS)" < $< > $@
 
 $(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 032a8d987148..ac7ff2a4d039 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -130,11 +130,13 @@ start:
  THUMB(		adr	r12, BSYM(1f)	)
  THUMB(		bx	r12		)
 
-		.word	0x016f2818		@ Magic numbers to help the loader
-		.word	start			@ absolute load/run zImage address
-		.word	_edata			@ zImage end address
+		.word	_magic_sig	@ Magic numbers to help the loader
+		.word	_magic_start	@ absolute load/run zImage address
+		.word	_magic_end	@ zImage end address
+
  THUMB(		.thumb			)
 1:
+ ARM_BE8(	setend	be )			@ go BE8 if compiled for BE8
 		mrs	r9, cpsr
 #ifdef CONFIG_ARM_VIRT_EXT
 		bl	__hyp_stub_install	@ get into SVC mode, reversibly
@@ -679,9 +681,7 @@ __armv4_mmu_cache_on:
 		mrc	p15, 0, r0, c1, c0, 0	@ read control reg
 		orr	r0, r0, #0x5000		@ I-cache enable, RR cache replacement
 		orr	r0, r0, #0x0030
-#ifdef CONFIG_CPU_ENDIAN_BE8
-		orr	r0, r0, #1 << 25	@ big-endian page tables
-#endif
+ ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
 		bl	__common_mmu_cache_on
 		mov	r0, #0
 		mcr	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
@@ -708,9 +708,7 @@ __armv7_mmu_cache_on:
 		orr	r0, r0, #1 << 22	@ U (v6 unaligned access model)
 						@ (needed for ARM1176)
 #ifdef CONFIG_MMU
-#ifdef CONFIG_CPU_ENDIAN_BE8
-		orr	r0, r0, #1 << 25	@ big-endian page tables
-#endif
+ ARM_BE8(	orr	r0, r0, #1 << 25 )	@ big-endian page tables
 		mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
 		orrne	r0, r0, #1		@ MMU enabled
 		movne	r1, #0xfffffffd		@ domain 0 = client
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
old mode 100644
new mode 100755
index 31bd43b82095..70fc185a1db8
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -27,6 +27,22 @@ extern void error(char *x);
 
 #include CONFIG_UNCOMPRESS_INCLUDE
 
+#define MARVELL_MEMIO32_WRITE(addr, data)    \
+        ((*((volatile unsigned int *)(addr))) = ((unsigned int)(data)))
+
+#define MARVELL_MEMIO32_READ(addr)           \
+        ((*((volatile unsigned int *)(addr))))
+
+#define DELAY_TIME 4000000
+void Create_Delay(int time)
+{
+  int i=0;
+  int a=0;
+  
+  for (i=0;i<time;i++)
+    a = 10 * 100;
+}
+        
 #ifdef CONFIG_DEBUG_ICEDCC
 
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
@@ -151,4 +167,62 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
 		error("decompressor returned an error");
 	else
 		putstr(" done, booting the kernel.\n");
+  
+  #if defined(Yosemite)
+  putstr("Modify USB signal\n");	    
+	//Yosemite SRDS3 USB3_Host1			    
+	MARVELL_MEMIO32_WRITE(0xF10A1904, 0x00003008);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xF10A1a88, 0x00000C20);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xF10A1a90, 0x00008f17);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xF10A1E20, 0x00000293);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xF10A18F8, 0x00004560);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a1840, 0x00000BD2);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a1C48, 0x000000EF);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a1E00, 0x00001001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a1F08, 0x00000004);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a195c, 0x00000081);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a195c, 0x00000001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a195c, 0x00004001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a195c, 0x00000001);
+  Create_Delay(DELAY_TIME);
+  
+	//Yosemite SRDS3 USB3_Host0			    
+	MARVELL_MEMIO32_WRITE(0xf10a2104, 0x00003008);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2288, 0x00000C20);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2290, 0x00008f17);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2620, 0x00000293);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a20F8, 0x00004560);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2040, 0x00000BD2);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2448, 0x000000EF);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2600, 0x00001001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a2708, 0x00000004);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a215c, 0x00000081);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a215c, 0x00000001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a215c, 0x00004001);
+	Create_Delay(DELAY_TIME);
+	MARVELL_MEMIO32_WRITE(0xf10a215c, 0x00000001);
+	#endif
 }
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index 4919f2ac8b89..60162231c7ea 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -7,6 +7,16 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
+			  (((x) >>  8) & 0x0000ff00) | \
+			  (((x) <<  8) & 0x00ff0000) | \
+			  (((x) << 24) & 0xff000000) )
+#else
+#define ZIMAGE_MAGIC(x) (x)
+#endif
+
 OUTPUT_ARCH(arm)
 ENTRY(_start)
 SECTIONS
@@ -57,6 +67,10 @@ SECTIONS
   .pad			: { BYTE(0); . = ALIGN(8); }
   _edata = .;
 
+  _magic_sig = ZIMAGE_MAGIC(0x016f2818);
+  _magic_start = ZIMAGE_MAGIC(_start);
+  _magic_end = ZIMAGE_MAGIC(_edata);
+
   . = BSS_START;
   __bss_start = .;
   .bss			: { *(.bss) }
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index f0895c581a89..fa8e988b9b85 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -94,6 +94,10 @@ dtb-$(CONFIG_ARCH_MSM) += msm8660-surf.dtb \
 dtb-$(CONFIG_ARCH_MVEBU) += armada-370-db.dtb \
 	armada-370-mirabox.dtb \
 	armada-370-rd.dtb \
+	armada-375-db.dtb \
+	armada-385-db.dtb \
+	armada-385-rd.dtb \
+	armada-388-rd.dtb \
 	armada-xp-db.dtb \
 	armada-xp-gp.dtb \
 	armada-xp-openblocks-ax3-4.dtb
diff --git a/arch/arm/boot/dts/YY_default/armada-385-db.dts b/arch/arm/boot/dts/YY_default/armada-385-db.dts
new file mode 100644
index 000000000000..91ecd2e85d9a
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default/armada-385-db.dts
@@ -0,0 +1,157 @@
+/*
+ * Device Tree file for Marvell Armada 385 evaluation board
+ * (DB-88F6820)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Development Board";
+	compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * by defeault disabled, because NFC
+			 * shares same pins with SPI0 and
+			 * requires SLM-1358 jumper
+			 */
+			nfc: nand@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default/armada-385-rd.dts b/arch/arm/boot/dts/YY_default/armada-385-rd.dts
new file mode 100644
index 000000000000..7bb7c60a2348
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default/armada-385-rd.dts
@@ -0,0 +1,127 @@
+/*
+ * Device Tree file for Marvell Armada 385 Reference Design board
+ * (RD-88F6820-AP)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Reference Design";
+	compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default/armada-388-rd.dts b/arch/arm/boot/dts/YY_default/armada-388-rd.dts
new file mode 100644
index 000000000000..69c45f3a2ee9
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default/armada-388-rd.dts
@@ -0,0 +1,127 @@
+/*
+ * Device Tree file for Marvell Armada 388 Reference Design board
+ * (RB-88F6828-NAS)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Reference Design";
+	compatible = "marvell,a388-rd", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default/armada-38x.dtsi b/arch/arm/boot/dts/YY_default/armada-38x.dtsi
new file mode 100644
index 000000000000..b06a360898bf
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default/armada-38x.dtsi
@@ -0,0 +1,515 @@
+/*
+ * Device Tree Include file for Marvell Armada 38x family of SoCs.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "skeleton.dtsi"
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+	model = "Marvell Armada 38x family SoC";
+	compatible = "marvell,armada38x";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	soc {
+		compatible = "marvell,armada380-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		interrupt-parent = <&gic>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x19) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x15) 0 0x10000>; /*chan1*/
+		};
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			coherency-fabric@21010 {
+				compatible = "marvell,armada-380-coherency-fabric";
+				reg = <0x21010 0x1c>;
+			};
+
+			coreclk: mvebu-sar@18600 {
+				compatible = "marvell,armada-380-core-clock";
+				reg = <0x18600 0x04>;
+				#clock-cells = <1>;
+			};
+
+			coredivclk: corediv-clock@e4250 {
+				compatible = "marvell,armada-38x-corediv-clock";
+				reg = <0xe4250 0x8>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-380-cpu-reset";
+				reg = <0x20800 0x10>;
+				#reset-cells = <1>;
+			};
+
+			mpcore-soc-ctrl@20d20 {
+				compatible = "marvell,armada-380-mpcore-soc-ctrl";
+				reg = <0x20d20 0x6c>;
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,orion-mdio";
+				reg = <0x72004 0x4>;
+			};
+
+			eth0: ethernet@70000 {
+				compatible = "marvell,neta";
+				reg = <0x70000 0x4000>;
+				interrupts-extended = <&mpic 8>;
+				clocks = <&gateclk 4>;
+				tx-csum-limit = <9800>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth1: ethernet@30000 {
+				compatible = "marvell,neta";
+				reg = <0x30000 0x4000>;
+				interrupts-extended = <&mpic 10>;
+				clocks = <&gateclk 3>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 02 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth2: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
+				interrupts-extended = <&mpic 12>;
+				clocks = <&gateclk 2>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <2>;
+				eth,port-mtu    = <1500>;
+			};
+
+			gateclk: clock-gating-control@18220 {
+				compatible = "marvell,armada-380-gating-clock";
+				reg = <0x18220 0x4>;
+				clocks = <&coreclk 0>;
+				#clock-cells = <1>;
+			};
+
+			gpio0: gpio@18100 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18100 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 53 0x4>, <0 54 0x4>,
+					     <0 55 0x4>, <0 56 0x4>;
+			};
+
+			gpio1: gpio@18140 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18140 0x40>;
+				ngpios = <28>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 58 0x4>, <0 59 0x4>,
+					     <0 60 0x4>, <0 61 0x4>;
+			};
+
+			i2c0: i2c@11000 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 2 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@11100 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 3 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			gic: interrupt-controller@1e001000 {
+				compatible = "arm,cortex-a9-gic";
+				#interrupt-cells = <3>;
+				#size-cells = <0>;
+				interrupt-controller;
+				reg = <0xd000 0x1000>,
+				      <0xc100 0x100>;
+			};
+
+			L2: cache-controller {
+				compatible = "arm,pl310-cache";
+				reg = <0x8000 0x1000>;
+				cache-unified;
+				cache-level = <2>;
+			};
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>;
+			};
+
+			mpic: interrupt-controller@20000 {
+				compatible = "marvell,mpic";
+				reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+				#interrupt-cells = <1>;
+				#size-cells = <1>;
+				interrupt-controller;
+				msi-controller;
+				interrupts = <1 15 0x4>;
+			};
+
+			pinctrl {
+				compatible = "marvell,mv88f6820-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+
+			pmsu@22000 {
+				compatible = "marvell,armada-380-pmsu";
+				reg = <0x22000 0x1000>;
+			};
+
+			pm {
+				compatible = "marvell,armada-380-pm";
+				reg = <0x1400 0x310>, <0x18000 0x200>;
+			};
+
+			rtc@a3800 {
+				compatible = "marvell,mvebu-rtc";
+				reg = <0xa3800 0x20>, <0x184a0 0xc>;
+				interrupts = <0 21 0x4>;
+			};
+
+			sata@a8000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xa8000 0x2000>;
+				interrupts = <0 26 0x4>;
+				clocks = <&gateclk 15>;
+				status = "disabled";
+			};
+
+			sata@e0000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xe0000 0x2000>;
+				interrupts = <0 28 0x4>;
+				clocks = <&gateclk 30>;
+				status = "disabled";
+			};
+
+			sdhci@d8000 {
+				compatible = "marvell,armada-380-sdhci";
+				reg = <0xd8000 0x1000>, <0xdc000 0x100>;
+				interrupts = <0 25 0x4>;
+				clocks = <&gateclk 17>;
+				mrvl,clk-delay-cycles = <0x1F>;
+				status = "disabled";
+			};
+
+			scu@c000 {
+				compatible = "arm,cortex-a9-scu";
+				reg = <0xc000 0x58>;
+			};
+
+			serial@12000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12000 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 12 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			serial@12100 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12100 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 13 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			thermal@e8078 {
+				compatible = "marvell,armada380-thermal";
+				reg = <0xe4078 0x4>, <0xe4074 0x4>;
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10600 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <0>;
+				interrupts = <0 1 0x4>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			spi1: spi@10680 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <1>;
+				interrupts = <0 63 0x4>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			nfc: nand@d0000 {
+				compatible = "marvell,armada-nand";
+				interrupts = <0 84 0x4>;
+				reg = <0xd0000 0x400>;
+				clocks = <&coredivclk 0>;
+				clock-names = "ecc_clk";
+				status = "disabled";
+			};
+
+			system-controller@18200 {
+				compatible = "marvell,armada-380-system-controller";
+				reg = <0x18200 0x100>;
+			};
+
+			timer@c600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xc600 0x20>;
+				interrupts = <1 13 0x301>;
+				clocks = <&coreclk 2>;
+			};
+
+			usb@50000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x58000 0x500>;
+				interrupts = <0 18 4>;
+				clocks = <&gateclk 18>;
+			};
+
+			usb3@f0000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf0000 0x3fff>,<0xf4000 0x3fff>;
+				interrupts = <0 16 0x4>;
+				clocks = <&gateclk 9>;
+			};
+
+			usb3@f8000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf8000 0x3fff>,<0xfc000 0x3fff>;
+				interrupts = <0 17 0x4>;
+				clocks = <&gateclk 10>;
+			};
+
+			usb3-utmi@1842 {
+				compatible = "marvell,armada-380-usb-utmi";
+				reg = <0x18420 0x74>,<0xc0000 0x10000>;
+			};
+
+			timer@20300 {
+				compatible = "marvell,armada-380-timer";
+				reg = <0x20300 0x30>, <0x21040 0x30>;
+				interrupts-extended = <&gic  0  8 4>,
+						      <&gic  0  9 4>,
+						      <&gic  0 10 4>,
+						      <&gic  0 11 4>,
+						      <&mpic 5>,
+						      <&mpic 6>;
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
+			};
+
+			xor@60800 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60800 0x100
+				       0x60a00 0x100>;
+				clocks = <&gateclk 22>;
+				status = "okay";
+
+				xor00 {
+					interrupts = <0 22 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor01 {
+					interrupts = <0 23 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			xor@60900 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+				clocks = <&gateclk 28>;
+				status = "okay";
+
+				xor10 {
+					interrupts = <0 65 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor11 {
+					interrupts = <0 66 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+				clocks = <&gateclk 23>, <&gateclk 14>,
+				         <&gateclk 21>, <&gateclk 16>;
+				clock-names = "crypto0", "crypto0z",
+					      "crypto1", "crypto1z";
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "int_coalescing"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xfffff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x6800>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0x40>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <0 19 0x4>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <0 20 0x4>;
+				};
+			};
+
+			pmu {
+				compatible = "arm,cortex-a9-pmu";
+				interrupts-extended = <&mpic 3>;
+			};
+		};
+	};
+
+	clocks {
+		/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default_T30p5/armada-385-db.dts b/arch/arm/boot/dts/YY_default_T30p5/armada-385-db.dts
new file mode 100644
index 000000000000..7a6a50ecd24c
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default_T30p5/armada-385-db.dts
@@ -0,0 +1,153 @@
+/*
+ * Device Tree file for Marvell Armada 385 evaluation board
+ * (DB-88F6820)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Development Board";
+	compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+				no-1-8-v;
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default_T30p5/armada-385-rd.dts b/arch/arm/boot/dts/YY_default_T30p5/armada-385-rd.dts
new file mode 100644
index 000000000000..7bb7c60a2348
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default_T30p5/armada-385-rd.dts
@@ -0,0 +1,127 @@
+/*
+ * Device Tree file for Marvell Armada 385 Reference Design board
+ * (RD-88F6820-AP)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Reference Design";
+	compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default_T30p5/armada-388-rd.dts b/arch/arm/boot/dts/YY_default_T30p5/armada-388-rd.dts
new file mode 100755
index 000000000000..09adbd2e8097
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default_T30p5/armada-388-rd.dts
@@ -0,0 +1,131 @@
+/*
+ * Device Tree file for Marvell Armada 388 Reference Design board
+ * (RB-88F6828-NAS)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Reference Design";
+	compatible = "marvell,a388-rd", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "root=/dev/ram console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+      serial@12100 {
+				status = "okay";
+			};
+			
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/YY_default_T30p5/armada-38x.dtsi b/arch/arm/boot/dts/YY_default_T30p5/armada-38x.dtsi
new file mode 100644
index 000000000000..61d03e111390
--- /dev/null
+++ b/arch/arm/boot/dts/YY_default_T30p5/armada-38x.dtsi
@@ -0,0 +1,538 @@
+/*
+ * Device Tree Include file for Marvell Armada 38x family of SoCs.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "skeleton.dtsi"
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+	model = "Marvell Armada 38x family SoC";
+	compatible = "marvell,armada38x";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	soc {
+		compatible = "marvell,armada380-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		interrupt-parent = <&gic>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		ranges = <
+			  MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000	/* CESA1: PHYS=0xf1110000 */
+			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000	/* BM: PHYS=0xf1200000 size 1M */
+			  MBUS_ID(0x0b, 0x04) 0 0xf1300000 0x100000>;	/* PNC: PHYS=0xf1300000 size 1M */
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x19) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x15) 0 0x10000>; /*chan1*/
+		};
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			coherency-fabric@21010 {
+				compatible = "marvell,armada-380-coherency-fabric";
+				reg = <0x21010 0x1c>;
+			};
+
+			coreclk: mvebu-sar@18600 {
+				compatible = "marvell,armada-380-core-clock";
+				reg = <0x18600 0x04>;
+				#clock-cells = <1>;
+			};
+
+			coredivclk: corediv-clock@e4250 {
+				compatible = "marvell,armada-38x-corediv-clock";
+				reg = <0xe4250 0x8>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-380-cpu-reset";
+				reg = <0x20800 0x10>;
+				#reset-cells = <1>;
+			};
+
+			mpcore-soc-ctrl@20d20 {
+				compatible = "marvell,armada-380-mpcore-soc-ctrl";
+				reg = <0x20d20 0x6c>;
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,orion-mdio";
+				reg = <0x72004 0x4>;
+			};
+
+			/* PnC and BM */
+			bm_pnc@c0000 {
+				compatible = "marvell,neta_bm_pnc";
+				reg = <0xc8000 0xAC 0xb8000 0x48>;
+				clocks = <&gateclk 13>, <&gateclk 29>;
+				/*neta_cap_bm, bitmap of NETA dynamic capabilities, such as PNC, BM, HWF and PME
+				  PNC--0x1, BM--0x2, HWF--0x4, PME--0x8*/
+				neta_cap_bm = <0x3>;
+				pnc_tcam_size = <1024>;
+			};
+
+			eth0: ethernet@70000 {
+				compatible = "marvell,neta";
+				reg = <0x70000 0x4000>;
+				interrupts-extended = <&mpic 8>;
+				clocks = <&gateclk 4>;
+				tx-csum-limit = <9800>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth1: ethernet@30000 {
+				compatible = "marvell,neta";
+				reg = <0x30000 0x4000>;
+				interrupts-extended = <&mpic 10>;
+				clocks = <&gateclk 3>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 02 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth2: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
+				interrupts-extended = <&mpic 12>;
+				clocks = <&gateclk 2>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <2>;
+				eth,port-mtu    = <1500>;
+			};
+
+			gateclk: clock-gating-control@18220 {
+				compatible = "marvell,armada-380-gating-clock";
+				reg = <0x18220 0x4>;
+				clocks = <&coreclk 0>;
+				#clock-cells = <1>;
+			};
+
+			gpio0: gpio@18100 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18100 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 53 0x4>, <0 54 0x4>,
+					     <0 55 0x4>, <0 56 0x4>;
+			};
+
+			gpio1: gpio@18140 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18140 0x40>;
+				ngpios = <28>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 58 0x4>, <0 59 0x4>,
+					     <0 60 0x4>, <0 61 0x4>;
+			};
+
+			i2c0: i2c@11000 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 2 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@11100 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 3 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			gic: interrupt-controller@1e001000 {
+				compatible = "arm,cortex-a9-gic";
+				#interrupt-cells = <3>;
+				#size-cells = <0>;
+				interrupt-controller;
+				reg = <0xd000 0x1000>,
+				      <0xc100 0x100>;
+			};
+
+			L2: cache-controller {
+				compatible = "arm,pl310-cache";
+				reg = <0x8000 0x1000>;
+				cache-unified;
+				cache-level = <2>;
+			};
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			mpic: interrupt-controller@20000 {
+				compatible = "marvell,mpic";
+				reg = <0x20a00 0x2d0>, <0x21070 0x58>, <0x21870 0x190>;
+				#interrupt-cells = <1>;
+				#size-cells = <1>;
+				interrupt-controller;
+				msi-controller;
+				interrupts = <1 15 0x4>;
+			};
+
+			pinctrl {
+				compatible = "marvell,mv88f6820-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+
+			pmsu@22000 {
+				compatible = "marvell,armada-380-pmsu";
+				reg = <0x22000 0x1000>;
+			};
+
+			sdramc@1400 {
+				compatible = "marvell,armada-xp-sdram-controller";
+				reg = <0x1400 0x500>;
+			};
+
+			rtc@a3800 {
+				compatible = "marvell,mvebu-rtc";
+				reg = <0xa3800 0x20>, <0x184a0 0xc>;
+				interrupts = <0 21 0x4>;
+			};
+
+			sata@a8000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xa8000 0x2000>;
+				interrupts = <0 26 0x4>;
+				clocks = <&gateclk 15>;
+				status = "disabled";
+			};
+
+			sata@e0000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xe0000 0x2000>;
+				interrupts = <0 28 0x4>;
+				clocks = <&gateclk 30>;
+				status = "disabled";
+			};
+
+			sdhci@d8000 {
+				compatible = "marvell,armada-380-sdhci";
+				reg = <0xd8000 0x1000>, <0xdc000 0x100>,
+				      <0x18454 0x4>;
+				interrupts = <0 25 0x4>;
+				clocks = <&gateclk 17>;
+				mrvl,clk-delay-cycles = <0x1F>;
+				status = "disabled";
+			};
+
+			scu@c000 {
+				compatible = "arm,cortex-a9-scu";
+				reg = <0xc000 0x58>;
+			};
+
+			serial@12000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12000 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 12 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			serial@12100 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12100 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 13 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			thermal@e8078 {
+				compatible = "marvell,armada380-thermal";
+				reg = <0xe4078 0x4>, <0xe4074 0x4>;
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10600 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <0>;
+				interrupts = <0 1 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+
+			spi1: spi@10680 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <1>;
+				interrupts = <0 63 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+
+			nfc: nand@d0000 {
+				compatible = "marvell,armada-nand";
+				interrupts = <0 84 0x4>;
+				reg = <0xd0000 0x400>;
+				clocks = <&coredivclk 0>;
+				clock-names = "ecc_clk";
+				status = "disabled";
+			};
+
+			system-controller@18200 {
+				compatible = "marvell,armada-380-system-controller";
+				reg = <0x18200 0x100>;
+			};
+
+			timer@c600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xc600 0x20>;
+				interrupts = <1 13 0x301>;
+				clocks = <&coreclk 2>;
+			};
+
+			usb@58000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x58000 0x500>;
+				interrupts = <0 18 4>;
+				clocks = <&gateclk 18>;
+			};
+
+			usb3@f0000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf0000 0x3fff>,<0xf4000 0x3fff>;
+				interrupts = <0 16 0x4>;
+				clocks = <&gateclk 9>;
+			};
+
+			usb3@f8000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf8000 0x3fff>,<0xfc000 0x3fff>;
+				interrupts = <0 17 0x4>;
+				clocks = <&gateclk 10>;
+			};
+
+			usb3-utmi@1842 {
+				compatible = "marvell,armada-380-usb-utmi";
+				reg = <0x18420 0x74>,<0xc0000 0x10000>;
+			};
+
+			timer@20300 {
+				compatible = "marvell,armada-380-timer";
+				reg = <0x20300 0x30>, <0x21040 0x30>;
+				interrupts-extended = <&gic  0  8 4>,
+						      <&gic  0  9 4>,
+						      <&gic  0 10 4>,
+						      <&gic  0 11 4>,
+						      <&mpic 5>,
+						      <&mpic 6>;
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
+			};
+
+			xor@60800 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60800 0x100
+				       0x60a00 0x100>;
+				clocks = <&gateclk 22>;
+				status = "okay";
+
+				xor00 {
+					interrupts = <0 22 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor01 {
+					interrupts = <0 23 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			xor@60900 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+				clocks = <&gateclk 28>;
+				status = "okay";
+
+				xor10 {
+					interrupts = <0 65 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor11 {
+					interrupts = <0 66 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+				clocks = <&gateclk 23>, <&gateclk 14>,
+				         <&gateclk 21>, <&gateclk 16>;
+				clock-names = "crypto0", "crypto0z",
+					      "crypto1", "crypto1z";
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "int_coalescing"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x6800>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0x40>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <0 19 0x4>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <0 20 0x4>;
+				};
+			};
+
+			pmu {
+				compatible = "arm,cortex-a9-pmu";
+				interrupts-extended = <&mpic 3>;
+			};
+		};
+	};
+
+	clocks {
+		/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yellowstone/armada-385-db.dts b/arch/arm/boot/dts/Yellowstone/armada-385-db.dts
new file mode 100755
index 000000000000..5fa687d99f85
--- /dev/null
+++ b/arch/arm/boot/dts/Yellowstone/armada-385-db.dts
@@ -0,0 +1,187 @@
+/*
+ * Device Tree file for Marvell Armada 385 evaluation board
+ * (DB-88F6820)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Development Board";
+	compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "sgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+				no-1-8-v;
+			};
+
+      serial@12000 {
+				status = "okay";
+			};
+			
+			serial@12100 {
+				status = "okay";
+			};
+			
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+        
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00500000>;
+					read-only;
+				};
+
+				mtd1@00500000 {
+					label = "uImage";
+					reg = <0x00500000 0x00500000>;
+				};
+
+				mtd2@00a00000 {
+					label = "uRamdisk";
+					reg = <0x00a00000 0x00500000>; //BLUE_ADD for Alpha 512MB NAND
+				};
+				
+				mtd3@00f00000 {
+					label = "image.cfs";
+					reg = <0x00f00000 0x1b900000>;
+				};
+				
+				mtd4@1d200000 {
+					label = "rescue fw";
+					reg = <0x1c800000 0x00f00000>;
+				};
+				
+				mtd5@1e10000 {
+					label = "config";
+					reg = <0x1d700000 0x01400000>; 
+				};
+				
+				mtd6@1f500000 {
+					label = "reserve1";
+					reg = <0x1eb00000 0x00a00000>;
+				};
+				
+				mtd7@1fa00000 {
+					label = "reserve2";
+					reg = <0x1f500000 0x00a00000>;
+				};
+				
+				
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yellowstone/armada-385-rd.dts b/arch/arm/boot/dts/Yellowstone/armada-385-rd.dts
new file mode 100644
index 000000000000..7bb7c60a2348
--- /dev/null
+++ b/arch/arm/boot/dts/Yellowstone/armada-385-rd.dts
@@ -0,0 +1,127 @@
+/*
+ * Device Tree file for Marvell Armada 385 Reference Design board
+ * (RD-88F6820-AP)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Reference Design";
+	compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yellowstone/armada-388-rd.dts b/arch/arm/boot/dts/Yellowstone/armada-388-rd.dts
new file mode 100755
index 000000000000..09adbd2e8097
--- /dev/null
+++ b/arch/arm/boot/dts/Yellowstone/armada-388-rd.dts
@@ -0,0 +1,131 @@
+/*
+ * Device Tree file for Marvell Armada 388 Reference Design board
+ * (RB-88F6828-NAS)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Reference Design";
+	compatible = "marvell,a388-rd", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "root=/dev/ram console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+      serial@12100 {
+				status = "okay";
+			};
+			
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yellowstone/armada-38x.dtsi b/arch/arm/boot/dts/Yellowstone/armada-38x.dtsi
new file mode 100755
index 000000000000..a32b4ef19d3b
--- /dev/null
+++ b/arch/arm/boot/dts/Yellowstone/armada-38x.dtsi
@@ -0,0 +1,538 @@
+/*
+ * Device Tree Include file for Marvell Armada 38x family of SoCs.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "skeleton.dtsi"
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+	model = "Marvell Armada 38x family SoC";
+	compatible = "marvell,armada38x";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	soc {
+		compatible = "marvell,armada380-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		interrupt-parent = <&gic>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		ranges = <
+			  MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000	/* CESA1: PHYS=0xf1110000 */
+			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000	/* BM: PHYS=0xf1200000 size 1M */
+			  MBUS_ID(0x0b, 0x04) 0 0xf1300000 0x100000>;	/* PNC: PHYS=0xf1300000 size 1M */
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x19) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x15) 0 0x10000>; /*chan1*/
+		};
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			coherency-fabric@21010 {
+				compatible = "marvell,armada-380-coherency-fabric";
+				reg = <0x21010 0x1c>;
+			};
+
+			coreclk: mvebu-sar@18600 {
+				compatible = "marvell,armada-380-core-clock";
+				reg = <0x18600 0x04>;
+				#clock-cells = <1>;
+			};
+
+			coredivclk: corediv-clock@e4250 {
+				compatible = "marvell,armada-38x-corediv-clock";
+				reg = <0xe4250 0x8>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-380-cpu-reset";
+				reg = <0x20800 0x10>;
+				#reset-cells = <1>;
+			};
+
+			mpcore-soc-ctrl@20d20 {
+				compatible = "marvell,armada-380-mpcore-soc-ctrl";
+				reg = <0x20d20 0x6c>;
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,orion-mdio";
+				reg = <0x72004 0x4>;
+			};
+
+			/* PnC and BM */
+			bm_pnc@c0000 {
+				compatible = "marvell,neta_bm_pnc";
+				reg = <0xc8000 0xAC 0xb8000 0x48>;
+				clocks = <&gateclk 13>, <&gateclk 29>;
+				/*neta_cap_bm, bitmap of NETA dynamic capabilities, such as PNC, BM, HWF and PME
+				  PNC--0x1, BM--0x2, HWF--0x4, PME--0x8*/
+				neta_cap_bm = <0x3>;
+				pnc_tcam_size = <1024>;
+			};
+
+			eth0: ethernet@70000 {
+				compatible = "marvell,neta";
+				reg = <0x70000 0x4000>;
+				interrupts-extended = <&mpic 8>;
+				clocks = <&gateclk 4>;
+				tx-csum-limit = <9800>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth1: ethernet@30000 {
+				compatible = "marvell,neta";
+				reg = <0x30000 0x4000>;
+				interrupts-extended = <&mpic 10>;
+				clocks = <&gateclk 3>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 02 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth2: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
+				interrupts-extended = <&mpic 12>;
+				clocks = <&gateclk 2>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <2>;
+				eth,port-mtu    = <1500>;
+			};
+
+			gateclk: clock-gating-control@18220 {
+				compatible = "marvell,armada-380-gating-clock";
+				reg = <0x18220 0x4>;
+				clocks = <&coreclk 0>;
+				#clock-cells = <1>;
+			};
+
+			gpio0: gpio@18100 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18100 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 53 0x4>, <0 54 0x4>,
+					     <0 55 0x4>, <0 56 0x4>;
+			};
+
+			gpio1: gpio@18140 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18140 0x40>;
+				ngpios = <28>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 58 0x4>, <0 59 0x4>,
+					     <0 60 0x4>, <0 61 0x4>;
+			};
+
+			i2c0: i2c@11000 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 2 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@11100 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 3 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			gic: interrupt-controller@1e001000 {
+				compatible = "arm,cortex-a9-gic";
+				#interrupt-cells = <3>;
+				#size-cells = <0>;
+				interrupt-controller;
+				reg = <0xd000 0x1000>,
+				      <0xc100 0x100>;
+			};
+
+			L2: cache-controller {
+				compatible = "arm,pl310-cache";
+				reg = <0x8000 0x1000>;
+				cache-unified;
+				cache-level = <2>;
+			};
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			mpic: interrupt-controller@20000 {
+				compatible = "marvell,mpic";
+				reg = <0x20a00 0x2d0>, <0x21070 0x58>, <0x21870 0x190>;
+				#interrupt-cells = <1>;
+				#size-cells = <1>;
+				interrupt-controller;
+				msi-controller;
+				interrupts = <1 15 0x4>;
+			};
+
+			pinctrl {
+				compatible = "marvell,mv88f6820-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+
+			pmsu@22000 {
+				compatible = "marvell,armada-380-pmsu";
+				reg = <0x22000 0x1000>;
+			};
+
+			sdramc@1400 {
+				compatible = "marvell,armada-xp-sdram-controller";
+				reg = <0x1400 0x500>;
+			};
+
+			rtc@a3800 {
+				compatible = "marvell,mvebu-rtc";
+				reg = <0xa3800 0x20>, <0x184a0 0xc>;
+				interrupts = <0 21 0x4>;
+			};
+
+			sata@a8000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xa8000 0x2000>;
+				interrupts = <0 26 0x4>;
+				clocks = <&gateclk 15>;
+				status = "disabled";
+			};
+
+			sata@e0000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xe0000 0x2000>;
+				interrupts = <0 28 0x4>;
+				clocks = <&gateclk 30>;
+				status = "disabled";
+			};
+
+			sdhci@d8000 {
+				compatible = "marvell,armada-380-sdhci";
+				reg = <0xd8000 0x1000>, <0xdc000 0x100>,
+				      <0x18454 0x4>;
+				interrupts = <0 25 0x4>;
+				clocks = <&gateclk 17>;
+				mrvl,clk-delay-cycles = <0x1F>;
+				status = "disabled";
+			};
+
+			scu@c000 {
+				compatible = "arm,cortex-a9-scu";
+				reg = <0xc000 0x58>;
+			};
+
+			serial@12000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12000 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 12 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			serial@12100 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12100 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 13 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			thermal@e8078 {
+				compatible = "marvell,armada380-thermal";
+				reg = <0xe4078 0x4>, <0xe4074 0x4>;
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10600 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <0>;
+				interrupts = <0 1 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+
+			spi1: spi@10680 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <1>;
+				interrupts = <0 63 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+
+			nfc: nand@d0000 {
+				compatible = "marvell,armada-nand";
+				interrupts = <0 84 0x4>;
+				reg = <0xd0000 0x400>;
+				clocks = <&coredivclk 0>;
+				clock-names = "ecc_clk";
+				status = "disabled";
+			};
+
+			system-controller@18200 {
+				compatible = "marvell,armada-380-system-controller";
+				reg = <0x18200 0x100>;
+			};
+
+			timer@c600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xc600 0x20>;
+				interrupts = <1 13 0x301>;
+				clocks = <&coreclk 2>;
+			};
+
+			usb@58000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x58000 0x500>;
+				interrupts = <0 18 4>;
+				clocks = <&gateclk 18>;
+			};
+
+			usb3@f0000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf0000 0x3fff>,<0xf4000 0x3fff>;
+				interrupts = <0 16 0x4>;
+				clocks = <&gateclk 9>;
+			};
+
+/*			usb3@f8000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf8000 0x3fff>,<0xfc000 0x3fff>;
+				interrupts = <0 17 0x4>;
+				clocks = <&gateclk 10>;
+			};
+*/
+			usb3-utmi@1842 {
+				compatible = "marvell,armada-380-usb-utmi";
+				reg = <0x18420 0x74>,<0xc0000 0x10000>;
+			};
+
+			timer@20300 {
+				compatible = "marvell,armada-380-timer";
+				reg = <0x20300 0x30>, <0x21040 0x30>;
+				interrupts-extended = <&gic  0  8 4>,
+						      <&gic  0  9 4>,
+						      <&gic  0 10 4>,
+						      <&gic  0 11 4>,
+						      <&mpic 5>,
+						      <&mpic 6>;
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
+			};
+
+			xor@60800 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60800 0x100
+				       0x60a00 0x100>;
+				clocks = <&gateclk 22>;
+				status = "okay";
+
+				xor00 {
+					interrupts = <0 22 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor01 {
+					interrupts = <0 23 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			xor@60900 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+				clocks = <&gateclk 28>;
+				status = "okay";
+
+				xor10 {
+					interrupts = <0 65 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor11 {
+					interrupts = <0 66 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+				clocks = <&gateclk 23>, <&gateclk 14>,
+				         <&gateclk 21>, <&gateclk 16>;
+				clock-names = "crypto0", "crypto0z",
+					      "crypto1", "crypto1z";
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "chain"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x6800>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0x40>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <0 19 0x4>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <0 20 0x4>;
+				};
+			};
+
+			pmu {
+				compatible = "arm,cortex-a9-pmu";
+				interrupts-extended = <&mpic 3>;
+			};
+		};
+	};
+
+	clocks {
+		/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yosemite/armada-385-db.dts b/arch/arm/boot/dts/Yosemite/armada-385-db.dts
new file mode 100755
index 000000000000..3cc651a9b7df
--- /dev/null
+++ b/arch/arm/boot/dts/Yosemite/armada-385-db.dts
@@ -0,0 +1,184 @@
+/*
+ * Device Tree file for Marvell Armada 385 evaluation board
+ * (DB-88F6820)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Development Board";
+	compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@34000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "sgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+			
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+				no-1-8-v;
+			};
+
+			serial@12100 {
+				status = "okay";
+			};
+      
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+        status = "okay"; //BLUE_ADD... 
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00500000>;
+					read-only;
+				};
+
+				mtd1@00500000 {
+					label = "uImage";
+					reg = <0x00500000 0x00500000>;
+				};
+
+				mtd2@00a00000 {
+					label = "uRamdisk";
+					reg = <0x00a00000 0x00500000>; //BLUE_ADD for Alpha 512MB NAND
+				};
+				
+				mtd3@00f00000 {
+					label = "image.cfs";
+					reg = <0x00f00000 0x1b900000>;
+				};
+
+				mtd4@1d200000 {
+					label = "rescue fw";
+					reg = <0x1c800000 0x00f00000>;
+				};
+				
+				mtd5@1e10000 {
+					label = "config";
+					reg = <0x1d700000 0x01400000>; 
+				};
+				
+				mtd6@1f500000 {
+					label = "reserve1";
+					reg = <0x1eb00000 0x00a00000>;
+				};
+				
+				mtd7@1fa00000 {
+					label = "reserve2";
+					reg = <0x1f500000 0x00a00000>;
+				};
+				
+				
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yosemite/armada-385-rd.dts b/arch/arm/boot/dts/Yosemite/armada-385-rd.dts
new file mode 100755
index 000000000000..758967f097ef
--- /dev/null
+++ b/arch/arm/boot/dts/Yosemite/armada-385-rd.dts
@@ -0,0 +1,128 @@
+/*
+ * Device Tree file for Marvell Armada 385 Reference Design board
+ * (RD-88F6820-AP)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Reference Design";
+	compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+/*
+HWALOCK_ADD
+			sata@e0000 {
+				status = "okay";
+			};
+*/
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yosemite/armada-388-rd.dts b/arch/arm/boot/dts/Yosemite/armada-388-rd.dts
new file mode 100755
index 000000000000..64bbf97171f8
--- /dev/null
+++ b/arch/arm/boot/dts/Yosemite/armada-388-rd.dts
@@ -0,0 +1,132 @@
+/*
+ * Device Tree file for Marvell Armada 388 Reference Design board
+ * (RB-88F6828-NAS)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Reference Design";
+	compatible = "marvell,a388-rd", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "root=/dev/ram console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+/*
+HWALOCK_ADD
+			sata@e0000 {
+				status = "okay";
+			};
+*/
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+      serial@12100 {
+				status = "okay";
+			};
+			
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/Yosemite/armada-38x.dtsi b/arch/arm/boot/dts/Yosemite/armada-38x.dtsi
new file mode 100755
index 000000000000..d6c3db8a7404
--- /dev/null
+++ b/arch/arm/boot/dts/Yosemite/armada-38x.dtsi
@@ -0,0 +1,539 @@
+/*
+ * Device Tree Include file for Marvell Armada 38x family of SoCs.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "skeleton.dtsi"
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+	model = "Marvell Armada 38x family SoC";
+	compatible = "marvell,armada38x";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+	};
+
+	soc {
+		compatible = "marvell,armada380-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		interrupt-parent = <&gic>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		ranges = <
+			  MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000	/* CESA1: PHYS=0xf1110000 */
+			  MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000	/* BM: PHYS=0xf1200000 size 1M */
+			  MBUS_ID(0x0b, 0x04) 0 0xf1300000 0x100000>;	/* PNC: PHYS=0xf1300000 size 1M */
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x200000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x19) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x15) 0 0x10000>; /*chan1*/
+		};
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			coherency-fabric@21010 {
+				compatible = "marvell,armada-380-coherency-fabric";
+				reg = <0x21010 0x1c>;
+			};
+
+			coreclk: mvebu-sar@18600 {
+				compatible = "marvell,armada-380-core-clock";
+				reg = <0x18600 0x04>;
+				#clock-cells = <1>;
+			};
+
+			coredivclk: corediv-clock@e4250 {
+				compatible = "marvell,armada-38x-corediv-clock";
+				reg = <0xe4250 0x8>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-380-cpu-reset";
+				reg = <0x20800 0x10>;
+				#reset-cells = <1>;
+			};
+
+			mpcore-soc-ctrl@20d20 {
+				compatible = "marvell,armada-380-mpcore-soc-ctrl";
+				reg = <0x20d20 0x6c>;
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,orion-mdio";
+				reg = <0x72004 0x4>;
+			};
+
+			/* PnC and BM */
+			bm_pnc@c0000 {
+				compatible = "marvell,neta_bm_pnc";
+				reg = <0xc8000 0xAC 0xb8000 0x48>;
+				clocks = <&gateclk 13>, <&gateclk 29>;
+				/*neta_cap_bm, bitmap of NETA dynamic capabilities, such as PNC, BM, HWF and PME
+				  PNC--0x1, BM--0x2, HWF--0x4, PME--0x8*/
+				neta_cap_bm = <0x3>;
+				pnc_tcam_size = <1024>;
+			};
+
+			eth0: ethernet@70000 {
+				compatible = "marvell,neta";
+				reg = <0x70000 0x4000>;
+				interrupts-extended = <&mpic 8>;
+				clocks = <&gateclk 4>;
+				tx-csum-limit = <9800>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth1: ethernet@30000 {
+				compatible = "marvell,neta";
+				reg = <0x30000 0x4000>;
+				interrupts-extended = <&mpic 10>;
+				clocks = <&gateclk 3>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 02 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
+			};
+
+			eth2: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
+				interrupts-extended = <&mpic 12>;
+				clocks = <&gateclk 2>;
+				tx-csum-limit = <2048>;
+				status = "disabled";
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <2>;
+				eth,port-mtu    = <1500>;
+			};
+
+			gateclk: clock-gating-control@18220 {
+				compatible = "marvell,armada-380-gating-clock";
+				reg = <0x18220 0x4>;
+				clocks = <&coreclk 0>;
+				#clock-cells = <1>;
+			};
+
+			gpio0: gpio@18100 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18100 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 53 0x4>, <0 54 0x4>,
+					     <0 55 0x4>, <0 56 0x4>;
+			};
+
+			gpio1: gpio@18140 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18140 0x40>;
+				ngpios = <28>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 58 0x4>, <0 59 0x4>,
+					     <0 60 0x4>, <0 61 0x4>;
+			};
+
+			i2c0: i2c@11000 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 2 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@11100 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 3 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			gic: interrupt-controller@1e001000 {
+				compatible = "arm,cortex-a9-gic";
+				#interrupt-cells = <3>;
+				#size-cells = <0>;
+				interrupt-controller;
+				reg = <0xd000 0x1000>,
+				      <0xc100 0x100>;
+			};
+
+			L2: cache-controller {
+				compatible = "arm,pl310-cache";
+				reg = <0x8000 0x1000>;
+				cache-unified;
+				cache-level = <2>;
+			};
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>, <0x20250 0x8>;
+			};
+
+			mpic: interrupt-controller@20000 {
+				compatible = "marvell,mpic";
+				reg = <0x20a00 0x2d0>, <0x21070 0x58>, <0x21870 0x190>;
+				#interrupt-cells = <1>;
+				#size-cells = <1>;
+				interrupt-controller;
+				msi-controller;
+				interrupts = <1 15 0x4>;
+			};
+
+			pinctrl {
+				compatible = "marvell,mv88f6820-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+
+			pmsu@22000 {
+				compatible = "marvell,armada-380-pmsu";
+				reg = <0x22000 0x1000>;
+			};
+
+			sdramc@1400 {
+				compatible = "marvell,armada-xp-sdram-controller";
+				reg = <0x1400 0x500>;
+			};
+
+			rtc@a3800 {
+				compatible = "marvell,mvebu-rtc";
+				reg = <0xa3800 0x20>, <0x184a0 0xc>;
+				interrupts = <0 21 0x4>;
+			};
+
+			sata@a8000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xa8000 0x2000>;
+				interrupts = <0 26 0x4>;
+				clocks = <&gateclk 15>;
+				status = "disabled";
+			};
+/*
+HWALOCK_ADD
+			sata@e0000 {
+				compatible = "marvell,ahci-sata";
+				reg = <0xe0000 0x2000>;
+				interrupts = <0 28 0x4>;
+				clocks = <&gateclk 30>;
+				status = "disabled";
+			};
+
+			sdhci@d8000 {
+				compatible = "marvell,armada-380-sdhci";
+				reg = <0xd8000 0x1000>, <0xdc000 0x100>,
+				      <0x18454 0x4>;
+				interrupts = <0 25 0x4>;
+				clocks = <&gateclk 17>;
+				mrvl,clk-delay-cycles = <0x1F>;
+				status = "disabled";
+			};
+*/
+			scu@c000 {
+				compatible = "arm,cortex-a9-scu";
+				reg = <0xc000 0x58>;
+			};
+
+			serial@12000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12000 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 12 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			serial@12100 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12100 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 13 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			thermal@e8078 {
+				compatible = "marvell,armada380-thermal";
+				reg = <0xe4078 0x4>, <0xe4074 0x4>;
+				status = "okay";
+			};
+/*
+			spi0: spi@10600 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10600 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <0>;
+				interrupts = <0 1 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+
+			spi1: spi@10680 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <1>;
+				interrupts = <0 63 0x4>;
+				clocks = <&coreclk 0>;
+				num-cs = <4>;
+				status = "disabled";
+			};
+*/
+			nfc: nand@d0000 {
+				compatible = "marvell,armada-nand";
+				interrupts = <0 84 0x4>;
+				reg = <0xd0000 0x400>;
+				clocks = <&coredivclk 0>;
+				clock-names = "ecc_clk";
+				status = "disabled";
+			};
+
+			system-controller@18200 {
+				compatible = "marvell,armada-380-system-controller";
+				reg = <0x18200 0x100>;
+			};
+
+			timer@c600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xc600 0x20>;
+				interrupts = <1 13 0x301>;
+				clocks = <&coreclk 2>;
+			};
+
+			usb@58000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x58000 0x500>;
+				interrupts = <0 18 4>;
+				clocks = <&gateclk 18>;
+			};
+
+			usb3@f0000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf0000 0x3fff>,<0xf4000 0x3fff>;
+				interrupts = <0 16 0x4>;
+				clocks = <&gateclk 9>;
+			};
+
+			usb3@f8000 {
+				compatible = "marvell,xhci-armada-380";
+				reg = <0xf8000 0x3fff>,<0xfc000 0x3fff>;
+				interrupts = <0 17 0x4>;
+				clocks = <&gateclk 10>;
+			};
+
+			usb3-utmi@1842 {
+				compatible = "marvell,armada-380-usb-utmi";
+				reg = <0x18420 0x74>,<0xc0000 0x10000>;
+			};
+
+			timer@20300 {
+				compatible = "marvell,armada-380-timer";
+				reg = <0x20300 0x30>, <0x21040 0x30>;
+				interrupts-extended = <&gic  0  8 4>,
+						      <&gic  0  9 4>,
+						      <&gic  0 10 4>,
+						      <&gic  0 11 4>,
+						      <&mpic 5>,
+						      <&mpic 6>;
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
+			};
+
+			xor@60800 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60800 0x100
+				       0x60a00 0x100>;
+				clocks = <&gateclk 22>;
+				status = "okay";
+
+				xor00 {
+					interrupts = <0 22 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor01 {
+					interrupts = <0 23 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			xor@60900 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+				clocks = <&gateclk 28>;
+				status = "okay";
+
+				xor10 {
+					interrupts = <0 65 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+				xor11 {
+					interrupts = <0 66 0x4>;
+					dmacap,crc32c;
+				};
+			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+				clocks = <&gateclk 23>, <&gateclk 14>,
+				         <&gateclk 21>, <&gateclk 16>;
+				clock-names = "crypto0", "crypto0z",
+					      "crypto1", "crypto1z";
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "chain"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x6800>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0x40>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <0 19 0x4>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <0 20 0x4>;
+				};
+			};
+
+			pmu {
+				compatible = "arm,cortex-a9-pmu";
+				interrupts-extended = <&mpic 3>;
+			};
+		};
+	};
+
+	clocks {
+		/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 2353b1f13704..5a1384a443ca 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -14,7 +14,7 @@
  */
 
 /dts-v1/;
-/include/ "armada-370.dtsi"
+#include "armada-370.dtsi"
 
 / {
 	model = "Marvell Armada 370 Evaluation Board";
@@ -30,9 +30,13 @@
 	};
 
 	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <200000000>;
 				status = "okay";
 			};
 			sata@a0000 {
@@ -74,6 +78,7 @@
 				 */
 				status = "disabled";
 				/* No CD or WP GPIOs */
+				broken-cd;
 			};
 
 			usb@50000 {
@@ -96,22 +101,56 @@
 				};
 			};
 
-			pcie-controller {
+			/* 1GB Flash via NFC NAND controller */
+			nfc: nand-flash@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
 				status = "okay";
-				/*
-				 * The two PCIe units are accessible through
-				 * both standard PCIe slots and mini-PCIe
-				 * slots on the board.
-				 */
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
+
+				nfc,nfc-mode  = "normal";       /* normal or ganged */
+				nfc,nfc-dma   = <0>;            /* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;            /* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00300000>;
+					read-only;
 				};
-				pcie@2,0 {
-					/* Port 1, Lane 0 */
-					status = "okay";
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00300000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00700000 0x3f900000>;
 				};
 			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * both standard PCIe slots and mini-PCIe
+			 * slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/armada-370-mirabox.dts b/arch/arm/boot/dts/armada-370-mirabox.dts
index 14e36e19d515..f5cdab86315d 100644
--- a/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -9,7 +9,7 @@
  */
 
 /dts-v1/;
-/include/ "armada-370.dtsi"
+#include "armada-370.dtsi"
 
 / {
 	model = "Globalscale Mirabox";
@@ -25,9 +25,27 @@
 	};
 
 	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xd0000000 0x100000
+			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000>;
+
+		pcie-controller {
+			status = "okay";
+
+			/* Internal mini-PCIe connector */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+
+			/* Connected on the PCB to a USB 3.0 XHCI controller */
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <200000000>;
 				status = "okay";
 			};
 			timer@20300 {
@@ -99,6 +117,7 @@
 				 * No CD or WP GPIOs: SDIO interface used for
 				 * Wifi/Bluetooth chip
 				 */
+				 broken-cd;
 			};
 
 			usb@50000 {
@@ -120,19 +139,24 @@
 				};
 			};
 
-			pcie-controller {
+			nand@d0000 {
 				status = "okay";
-
-				/* Internal mini-PCIe connector */
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
+				num-cs = <1>;
+				marvell,nand-keep-config;
+				marvell,nand-enable-arbiter;
+				nand-on-flash-bbt;
+
+				partition@0 {
+					label = "U-Boot";
+					reg = <0 0x400000>;
 				};
-
-				/* Connected on the PCB to a USB 3.0 XHCI controller */
-				pcie@2,0 {
-					/* Port 1, Lane 0 */
-					status = "okay";
+				partition@400000 {
+					label = "Linux";
+					reg = <0x400000 0x400000>;
+				};
+				partition@800000 {
+					label = "Filesystem";
+					reg = <0x800000 0x3f800000>;
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/armada-370-rd.dts b/arch/arm/boot/dts/armada-370-rd.dts
index 130f8390a7e4..4017885b8dd7 100644
--- a/arch/arm/boot/dts/armada-370-rd.dts
+++ b/arch/arm/boot/dts/armada-370-rd.dts
@@ -12,7 +12,7 @@
  */
 
 /dts-v1/;
-/include/ "armada-370.dtsi"
+#include "armada-370.dtsi"
 
 / {
 	model = "Marvell Armada 370 Reference Design";
@@ -28,9 +28,29 @@
 	};
 
 	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0xe0) 0 0xfff00000 0x100000
+			  MBUS_ID(0x09, 0x01) 0 0xf1100000 0x10000>;	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+
+		pcie-controller {
+			status = "okay";
+
+			/* Internal mini-PCIe connector */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+
+			/* Internal mini-PCIe connector */
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <200000000>;
 				status = "okay";
 			};
 			sata@a0000 {
@@ -64,6 +84,7 @@
 				pinctrl-names = "default";
 				status = "okay";
 				/* No CD or WP GPIOs */
+				broken-cd;
 			};
 
 			usb@50000 {
@@ -84,6 +105,60 @@
 					gpios = <&gpio0 6 1>;
 				};
 			};
+
+			nand@d0000 {
+				num-cs = <1>;
+				marvell,nand-keep-config;
+				marvell,nand-enable-arbiter;
+				nand-on-flash-bbt;
+
+				partition@0 {
+					label = "U-Boot";
+					reg = <0 0x800000>;
+				};
+				partition@800000 {
+					label = "Linux";
+					reg = <0x800000 0x800000>;
+				};
+				partition@1000000 {
+					label = "Filesystem";
+					reg = <0x1000000 0x3f000000>;
+				};
+			};
+
+			/* 1GB Flash via NFC NAND controller */
+			nfc: nand-flash@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+				status = "okay";
+
+				nfc,nfc-mode  = "normal";       /* normal or ganged */
+				nfc,nfc-dma   = <0>;            /* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;            /* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00300000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00300000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00700000 0x3f900000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
 		};
 	};
  };
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index ddd068bb1457..4a633ed09367 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -18,10 +18,17 @@
 
 /include/ "skeleton64.dtsi"
 
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
 / {
 	model = "Marvell Armada 370 and XP SoC";
 	compatible = "marvell,armada-370-xp";
 
+	aliases {
+		eth0 = &eth0;
+		eth1 = &eth1;
+	};
+
 	cpus {
 		cpu@0 {
 			compatible = "marvell,sheeva-v7";
@@ -29,24 +36,81 @@
 	};
 
 	soc {
-		#address-cells = <1>;
+		#address-cells = <2>;
 		#size-cells = <1>;
-		compatible = "simple-bus";
+		controller = <&mbusc>;
 		interrupt-parent = <&mpic>;
-		ranges = <0          0 0xd0000000 0x0100000 /* internal registers */
-			  0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>;
+		pcie-mem-aperture = <0xf8000000 0x7e00000>;
+		pcie-io-aperture  = <0xffe00000 0x100000>;
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
 
 		internal-regs {
 			compatible = "simple-bus";
 			#address-cells = <1>;
 			#size-cells = <1>;
-			ranges;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>,
+				      <0x20250 0x8>;
+			};
 
 			mpic: interrupt-controller@20000 {
 				compatible = "marvell,mpic";
 				#interrupt-cells = <1>;
 				#size-cells = <1>;
 				interrupt-controller;
+				msi-controller;
 			};
 
 			coherency-fabric@20200 {
@@ -60,6 +124,7 @@
 				reg-shift = <2>;
 				interrupts = <41>;
 				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
 				status = "disabled";
 			};
 			serial@12100 {
@@ -68,19 +133,26 @@
 				reg-shift = <2>;
 				interrupts = <42>;
 				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
 				status = "disabled";
 			};
 
+			coredivclk: corediv-clock@18740 {
+				compatible = "marvell,armada-370-corediv-clock";
+				reg = <0x18740 0xc>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
 			timer@20300 {
-				compatible = "marvell,armada-370-xp-timer";
 				reg = <0x20300 0x30>, <0x21040 0x30>;
 				interrupts = <37>, <38>, <39>, <40>, <5>, <6>;
-				clocks = <&coreclk 2>;
 			};
 
 			sata@a0000 {
 				compatible = "marvell,orion-sata";
-				reg = <0xa0000 0x2400>;
+				reg = <0xa0000 0x5000>;
 				interrupts = <55>;
 				clocks = <&gateclk 15>, <&gateclk 30>;
 				clock-names = "0", "1";
@@ -95,20 +167,26 @@
 				clocks = <&gateclk 4>;
 			};
 
-			ethernet@70000 {
-				compatible = "marvell,armada-370-neta";
-				reg = <0x70000 0x2500>;
+			eth0: ethernet@70000 {
+				compatible = "marvell,neta";
+				reg = <0x70000 0x4000>;
 				interrupts = <8>;
 				clocks = <&gateclk 4>;
 				status = "disabled";
+				mac-address = [ 00 50 43 02 02 00 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
 			};
 
-			ethernet@74000 {
-				compatible = "marvell,armada-370-neta";
-				reg = <0x74000 0x2500>;
+			eth1: ethernet@74000 {
+				compatible = "marvell,neta";
+				reg = <0x74000 0x4000>;
 				interrupts = <10>;
 				clocks = <&gateclk 3>;
 				status = "disabled";
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
 			};
 
 			i2c0: i2c@11000 {
@@ -144,6 +222,10 @@
 				reg = <0xd4000 0x200>;
 				interrupts = <54>;
 				clocks = <&gateclk 17>;
+				bus-width = <4>;
+				cap-sdio-irq;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
 				status = "disabled";
 			};
 
@@ -183,50 +265,33 @@
 				status = "disabled";
 			};
 
-			devbus-bootcs@10400 {
-				compatible = "marvell,mvebu-devbus";
-				reg = <0x10400 0x8>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-				clocks = <&coreclk 0>;
-				status = "disabled";
-			};
-
-			devbus-cs0@10408 {
-				compatible = "marvell,mvebu-devbus";
-				reg = <0x10408 0x8>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-				clocks = <&coreclk 0>;
-				status = "disabled";
-			};
-
-			devbus-cs1@10410 {
-				compatible = "marvell,mvebu-devbus";
-				reg = <0x10410 0x8>;
+			nand@d0000 {
+				compatible = "marvell,armada370-nand";
+				reg = <0xd0000 0x54>;
 				#address-cells = <1>;
 				#size-cells = <1>;
-				clocks = <&coreclk 0>;
+				interrupts = <113>;
+				clocks = <&coredivclk 0>;
 				status = "disabled";
 			};
 
-			devbus-cs2@10418 {
-				compatible = "marvell,mvebu-devbus";
-				reg = <0x10418 0x8>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-				clocks = <&coreclk 0>;
+			nfc: nand-flash@d0000 {
+				compatible = "marvell,armada-nand";
+				interrupts = <113>;
+				reg = <0xd0000 0x400>;
 				status = "disabled";
+				clocks = <&coredivclk 0>;
+				clock-names = "ecc_clk";
 			};
+		};
+	};
 
-			devbus-cs3@10420 {
-				compatible = "marvell,mvebu-devbus";
-				reg = <0x10420 0x8>;
-				#address-cells = <1>;
-				#size-cells = <1>;
-				clocks = <&coreclk 0>;
-				status = "disabled";
-			};
+	clocks {
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
 		};
 	};
  };
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index aee2b1866ce2..b94f6f2175a1 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -15,7 +15,7 @@
  * common to all Armada SoCs.
  */
 
-/include/ "armada-370-xp.dtsi"
+#include "armada-370-xp.dtsi"
 /include/ "skeleton.dtsi"
 
 / {
@@ -28,15 +28,98 @@
 		gpio2 = &gpio2;
 	};
 
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "marvell,sheeva-v7";
+			reg = <0>;
+			resets = <&cpurst 0>;
+		};
+	};
+
 	soc {
-		ranges = <0          0xd0000000 0x0100000 /* internal registers */
-			  0xe0000000 0xe0000000 0x8100000 /* PCIe */>;
+		compatible = "marvell,armada370-mbus", "simple-bus";
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0xe0) 0 0x100000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x01) 0 0x10000>;   /*chan0*/
+		};
+
+		pcie-controller {
+			compatible = "marvell,armada-370-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+				0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000
+				0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0       1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0       1 0 /* Port 0.0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x08, 0xe8) 0       1 0 /* Port 1.0 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x08, 0xe0) 0       1 0 /* Port 1.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+                                ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+                                          0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 58>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+                                ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+                                          0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 62>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 9>;
+				status = "disabled";
+			};
+		};
+
 		internal-regs {
 			system-controller@18200 {
 				compatible = "marvell,armada-370-xp-system-controller";
 				reg = <0x18200 0x100>;
 			};
 
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-370-cpu-reset";
+				reg = <0x20800 0x8>;
+				#reset-cells = <1>;
+			};
+
 			L2: l2-cache {
 				compatible = "marvell,aurora-outer-cache";
 				reg = <0x08000 0x1000>;
@@ -78,7 +161,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
 			};
 
@@ -89,7 +172,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
 			};
 
@@ -100,10 +183,15 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <91>;
 			};
 
+			timer@20300 {
+				compatible = "marvell,armada-370-timer";
+				clocks = <&coreclk 2>;
+			};
+
 			coreclk: mvebu-sar@18230 {
 				compatible = "marvell,armada-370-core-clock";
 				reg = <0x18230 0x08>;
@@ -117,6 +205,13 @@
 				#clock-cells = <1>;
 			};
 
+			memcpy@60800 {
+				compatible = "marvell,orion-memcpy";
+				reg = <0x60800 0x100>;
+				marvell,coalescing = <1>;
+				status = "okay";
+			};
+
 			xor@60800 {
 				compatible = "marvell,orion-xor";
 				reg = <0x60800 0x100
@@ -169,56 +264,23 @@
 					0x18304 0x4>;
 				status = "okay";
 			};
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000>;	/* tdma base reg chan 0 */
+				clocks = <&gateclk 23>;
+				cesa,channels = <0x1>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "chain"; /* chain or int_per_packet */
 
-			pcie-controller {
-				compatible = "marvell,armada-370-pcie";
+				cesa,ctrlModel = /bits/ 16 <0x6710>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0>;
 				status = "disabled";
-				device_type = "pci";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-
-				bus-range = <0x00 0xff>;
-
-				reg = <0x40000 0x2000>, <0x80000 0x2000>;
-
-				reg-names = "pcie0.0", "pcie1.0";
-
-				ranges = <0x82000000 0 0x40000 0x40000 0 0x00002000   /* Port 0.0 registers */
-					0x82000000 0 0x80000 0x80000 0 0x00002000   /* Port 1.0 registers */
-					0x82000000 0 0xe0000000 0xe0000000 0 0x08000000   /* non-prefetchable memory */
-					0x81000000 0 0          0xe8000000 0 0x00100000>; /* downstream I/O */
-
-				pcie@1,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
-					reg = <0x0800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 58>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 5>;
-					status = "disabled";
-				};
 
-				pcie@2,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
-					reg = <0x1000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 62>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 9>;
-					status = "disabled";
+				crypto10 {
+					/* channel 0 */
+					interrupts = <48>;
 				};
 			};
 		};
diff --git a/arch/arm/boot/dts/armada-375-db.dts b/arch/arm/boot/dts/armada-375-db.dts
new file mode 100644
index 000000000000..d620fdf95239
--- /dev/null
+++ b/arch/arm/boot/dts/armada-375-db.dts
@@ -0,0 +1,211 @@
+/*
+ * Device Tree file for Marvell Armada 375 evaluation board
+ * (DB-88F6720)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-375.dtsi"
+
+/ {
+	model = "Marvell Armada 375 Development Board";
+	compatible = "marvell,a375-db", "marvell,armada375";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x40000000>; /* 1 GB */
+	};
+
+	soc {
+
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 /* internal regs */
+			  MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000  /* bootrom */
+			  MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000   /* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>; /* CESA1: PHYS=0xf1110000
+									   size 64K */
+
+		internal-regs {
+			/* The mainline node is not used in hal driver, reserved just for reference in future enable  */
+			/*pp2@f0000 {
+				status = "okay";
+
+				ethernet@c4000 {
+					status = "okay";
+					phy = <&phy0>;
+					phy-mode = "rgmii-id";
+				};
+
+				ethernet@c5000 {
+					status = "okay";
+					phy = <&phy3>;
+					phy-mode = "gmii";
+				};
+			};*/
+
+			ethernet@c4000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "mii";
+			};
+
+			ethernet@c5000 {
+				status = "okay";
+				phy = <&phy3>;
+				phy-mode = "mii";
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy3: ethernet-phy@3 {
+					reg = <3>;
+				};
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+				pinctrl-0 = <&i2c0_pins>;
+				pinctrl-names = "default";
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+				pinctrl-0 = <&i2c1_pins>;
+				pinctrl-names = "default";
+			};
+
+			mvsdio@d4000 {
+				pinctrl-0 = <&sdio_pins &sdio_st_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+				cd-gpios = <&gpio1 12 0>;
+				wp-gpios = <&gpio1 13 0>;
+			};
+
+			pinctrl {
+				sdio_st_pins: sdio-st-pins {
+					marvell,pins = "mpp44", "mpp45";
+					marvell,function = "gpio";
+				};
+
+				sata_sd_pins: sata-sd-pins {
+					marvell,pins = "mpp63", "mpp66";
+					marvell,function = "gpio";
+				};
+			};
+
+			sata@a0000 {
+				nr-ports = <2>;
+				status = "okay";
+				pinctrl-0 = <&sata_sd_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+				sd-gpios = <&gpio1 31 0>, <&gpio2 2 0>;
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				pinctrl-0 = <&spi0_pins>;
+				pinctrl-names = "default";
+				/*
+				 * SPI conflicts with NAND, so we disable it
+				 * here, and select NAND as the enabled device
+				 * by default.
+				 */
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "n25q128a13";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			nand: nand@d0000 {
+				pinctrl-0 = <&nand_pins>;
+				pinctrl-names = "default";
+				status = "okay";
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				partition@0 {
+					label = "U-Boot";
+					reg = <0 0x800000>;
+					read-only;
+				};
+
+				partition@800000 {
+					label = "Linux";
+					reg = <0x800000 0x800000>;
+					read-only;
+				};
+
+				partition@1000000 {
+					label = "Filesystem";
+					reg = <0x1000000 0x3f000000>;
+				};
+			};
+
+			usb@50000 {
+				status = "disabled";
+			};
+
+			usb@54000 {
+				status = "okay";
+			};
+
+			usb3@58000 {
+				status = "okay";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			common-phy@18310 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
new file mode 100644
index 000000000000..870d989d9be1
--- /dev/null
+++ b/arch/arm/boot/dts/armada-375.dtsi
@@ -0,0 +1,660 @@
+/*
+ * Device Tree Include file for Marvell Armada 375 family SoC
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Contains definitions specific to the Armada 375 SoC that are not
+ * common to all Armada SoCs.
+ */
+
+/include/ "skeleton.dtsi"
+
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
+/ {
+	model = "Marvell Armada 375 family SoC";
+	compatible = "marvell,armada375";
+
+	aliases {
+		gpio0 = &gpio0;
+		gpio1 = &gpio1;
+		gpio2 = &gpio2;
+	};
+
+	clocks {
+	/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
+		};
+		/* 2 GHz fixed main PLL */
+		mainpll: mainpll {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <2000000000>;
+		};
+	};
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			resets = <&cpurst 0>;
+		};
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+			resets = <&cpurst 1>;
+		};
+	};
+
+	soc {
+		compatible = "marvell,armada375-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		interrupt-parent = <&gic>;
+		pcie-mem-aperture = <0xe0000000 0x8000000>;
+		pcie-io-aperture  = <0xe8000000 0x100000>;
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x100000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x09) 0 0x10000	/*chan0*/
+			       MBUS_ID(0x09, 0x05) 0 0x10000>;	/*chan1*/
+		};
+
+		devbus-bootcs {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10400 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x2f) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs0 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10408 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3e) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs1 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10410 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3d) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs2 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10418 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x3b) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		devbus-cs3 {
+			compatible = "marvell,mvebu-devbus";
+			reg = <MBUS_ID(0xf0, 0x01) 0x10420 0x8>;
+			ranges = <0 MBUS_ID(0x01, 0x37) 0 0xffffffff>;
+			#address-cells = <1>;
+			#size-cells = <1>;
+			clocks = <&coreclk 0>;
+			status = "disabled";
+		};
+
+		internal-regs {
+			compatible = "simple-bus";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			ranges = <0 MBUS_ID(0xf0, 0x01) 0 0x100000>;
+
+			coherency-fabric@21010 {
+				compatible = "marvell,armada-375-coherency-fabric";
+				reg = <0x21010 0x1c>;
+			};
+
+			coreclk: mvebu-sar@e8204 {
+				compatible = "marvell,armada-375-core-clock";
+				reg = <0xe8204 0x04>;
+				#clock-cells = <1>;
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-375-cpu-reset";
+				reg = <0x20800 0x10>;
+				#reset-cells = <1>;
+			};
+
+			gic: interrupt-controller@1e001000 {
+				compatible = "arm,cortex-a9-gic";
+				#interrupt-cells = <3>;
+				#size-cells = <0>;
+				interrupt-controller;
+				reg = <0xd000 0x1000>,
+				      <0xc100 0x100>;
+			};
+
+			gateclk: clock-gating-control@18220 {
+				compatible = "marvell,armada-375-gating-clock";
+				reg = <0x18220 0x4>;
+				clocks = <&coreclk 0>;
+				#clock-cells = <1>;
+			};
+
+
+			common-phy@18310 {
+				compatible = "marvell,armada-375-common-phy-configuration";
+				reg = <0x18310 0x34>;
+				status = "disabled";
+				phy-count = <4>;
+			};
+
+			ip-configuration@18400 {
+				compatible = "marvell,armada-375-ip-configuration";
+				reg = <0x18400 0x100>;
+			};
+
+			pm-override-configuration0@34308 {
+				compatible = "marvell,armada-375-serdes-pipe-configuration";
+				reg = <0x34308 0x4>, <0x36308 0x4>, <0x38308 0x4>, <0x3a308 0x4>;
+			};
+
+			coredivclk: corediv-clock@e8250 {
+				compatible = "marvell,armada-375-corediv-clock";
+				reg = <0xe8250 0xc>;
+				#clock-cells = <1>;
+				clocks = <&mainpll>;
+				clock-output-names = "nand";
+			};
+
+			mdio {
+				#address-cells = <1>;
+				#size-cells = <0>;
+				compatible = "marvell,orion-mdio";
+				reg = <0xC0054 0x4>;
+			};
+
+/* Network controller. The mainline node is not used in hal driver, reserved just for reference in future enable
+			pp2@f0000 {
+				compatible = "marvell,armada-375-pp2";
+				reg = <0xf0000 0xa000>,
+				      <0xc0000 0x3060>,
+				      <0xc4000 0x100>,
+				      <0xc5000 0x100>;
+				clocks = <&gateclk 3>, <&gateclk 19>;
+				clock-names = "pp_clk", "gop_clk";
+				status = "disabled";
+
+				ethernet@c4000 {
+					interrupts = <0 37 1>;
+					port-id = <0>;
+					status = "disabled";
+				};
+
+				ethernet@c5000 {
+					interrupts = <0 41 1>;
+					port-id = <1>;
+					status = "disabled";
+				};
+			};
+*/
+			gpio0: gpio@18100 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18100 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 53 0x4>, <0 54 0x4>,
+					     <0 55 0x4>, <0 56 0x4>;
+			};
+
+			gpio1: gpio@18140 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18140 0x40>;
+				ngpios = <32>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 58 0x4>, <0 59 0x4>,
+					     <0 60 0x4>, <0 61 0x4>;
+			};
+
+			gpio2: gpio@18180 {
+				compatible = "marvell,orion-gpio";
+				reg = <0x18180 0x40>;
+				ngpios = <3>;
+				gpio-controller;
+				#gpio-cells = <2>;
+				interrupt-controller;
+				#interrupt-cells = <2>;
+				interrupts = <0 62 0x4>;
+			};
+
+			/* Packet Processor */
+			ppv2@f0000 {
+				compatible = "marvell,packet_processor_v2";
+				reg = <0xf0000 0xa000>;
+				clocks = <&gateclk 3>;
+			};
+
+			/* ETH LMS regs */
+			eth_lms@c0000 {
+				compatible = "marvell,eth_lms";
+				reg = <0xc0000 0x3060>;
+			};
+
+			/* GbE MAC's */
+			eth0: ethernet@c4000 {
+				compatible = "marvell,pp2";
+				reg = <0xc4000 0x100>;
+				interrupts = <0 37 1>;
+				clocks = <&gateclk 19>;
+				mac-address = [ 00 50 43 02 02 00 ];
+				eth,port-num    = <0>;
+				eth,port-mtu    = <1500>;
+				status = "disabled";
+			};
+
+			eth1: ethernet@c5000 {
+				compatible = "marvell,pp2";
+				reg = <0xc5000 0x100>;
+				interrupts = <0 41 1>;
+				clocks = <&gateclk 19>;
+				mac-address = [ 00 50 43 02 02 01 ];
+				eth,port-num    = <1>;
+				eth,port-mtu    = <1500>;
+				status = "disabled";
+			};
+
+			i2c0: i2c@11000 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11000 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 2 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			i2c1: i2c@11100 {
+				compatible = "marvell,mv64xxx-i2c";
+				reg = <0x11100 0x20>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				interrupts = <0 3 0x4>;
+				timeout-ms = <1000>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			L2: cache-controller {
+				compatible = "arm,pl310-cache";
+				reg = <0x8000 0x1000>;
+				cache-unified;
+				cache-level = <2>;
+			};
+
+			mbusc: mbus-controller@20000 {
+				compatible = "marvell,mbus-controller";
+				reg = <0x20000 0x100>, <0x20180 0x20>;
+			};
+
+			mpic: interrupt-controller@20000 {
+				compatible = "marvell,mpic";
+				reg = <0x20a00 0x2d0>, <0x21070 0x58>, <0x21870 0x190>;
+				#interrupt-cells = <1>;
+				#size-cells = <1>;
+				interrupt-controller;
+				msi-controller;
+				interrupts = <1 15 0x4>;
+			};
+
+			mvsdio@d4000 {
+				compatible = "marvell,orion-sdio";
+				reg = <0xd4000 0x200>;
+				interrupts = <0 25 0x4>;
+				clocks = <&gateclk 17>;
+				bus-width = <4>;
+				cap-sdio-irq;
+				cap-sd-highspeed;
+				cap-mmc-highspeed;
+				status = "disabled";
+			};
+
+			pinctrl {
+				compatible = "marvell,mv88f6720-pinctrl";
+				reg = <0x18000 0x24>;
+
+				i2c0_pins: i2c0-pins {
+					marvell,pins = "mpp14",  "mpp15";
+					marvell,function = "i2c0";
+				};
+
+				i2c1_pins: i2c1-pins {
+					marvell,pins = "mpp61",  "mpp62";
+					marvell,function = "i2c1";
+				};
+
+				sdio_pins: sdio-pins {
+					marvell,pins = "mpp24",  "mpp25", "mpp26",
+						     "mpp27", "mpp28", "mpp29";
+					marvell,function = "sd";
+				};
+
+				spi0_pins: spi0-pins {
+					marvell,pins = "mpp0",  "mpp1", "mpp4",
+						     "mpp5", "mpp8", "mpp9";
+					marvell,function = "spi0";
+				};
+
+				nand_pins: nand-pins {
+					marvell,pins = "mpp0", "mpp1", "mpp2",
+							"mpp3", "mpp4", "mpp5",
+							"mpp6", "mpp7", "mpp8",
+							"mpp9", "mpp10", "mpp11",
+							"mpp12", "mpp13";
+					marvell,function = "nand";
+				};
+			};
+
+			rtc@10300 {
+				compatible = "marvell,orion-rtc";
+				reg = <0x10300 0x20>;
+				interrupts = <0 21 0x4>;
+			};
+
+			sata@a0000 {
+				compatible = "marvell,armada-370-sata";
+				reg = <0xa0000 0x5000>;
+				interrupts = <0 26 0x4>;
+				clocks = <&gateclk 14>, <&gateclk 20>;
+				clock-names = "0", "1";
+				status = "disabled";
+			};
+
+			scu@c000 {
+				compatible = "arm,cortex-a9-scu";
+				reg = <0xc000 0x58>;
+			};
+
+			serial@12000 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12000 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 12 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			serial@12100 {
+				compatible = "snps,dw-apb-uart";
+				reg = <0x12100 0x100>;
+				reg-shift = <2>;
+				interrupts = <0 13 4>;
+				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			spi0: spi@10600 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10600 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <0>;
+				interrupts = <0 1 0x4>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			spi1: spi@10680 {
+				compatible = "marvell,orion-spi";
+				reg = <0x10680 0x50>;
+				#address-cells = <1>;
+				#size-cells = <0>;
+				cell-index = <1>;
+				interrupts = <0 63 0x4>;
+				clocks = <&coreclk 0>;
+				status = "disabled";
+			};
+
+			system-controller@18200 {
+				compatible = "marvell,armada-375-system-controller";
+				reg = <0x18200 0x100>;
+			};
+
+			thermal@e8078 {
+				compatible = "marvell,armada375-thermal";
+				reg = <0xe8078 0x4>, <0xe807c 0x8>;
+				status = "okay";
+			};
+
+			timer@c600 {
+				compatible = "arm,cortex-a9-twd-timer";
+				reg = <0xc600 0x20>;
+				interrupts = <1 13 0x301>;
+				clocks = <&coreclk 2>;
+			};
+
+			timer@20300 {
+				compatible = "marvell,armada-375-timer";
+				reg = <0x20300 0x30>, <0x21040 0x30>;
+				interrupts-extended = <&gic  0  8 4>,
+						      <&gic  0  9 4>,
+						      <&gic  0 10 4>,
+						      <&gic  0 11 4>,
+						      <&mpic 5>,
+						      <&mpic 6>;
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
+			};
+
+			/*
+			 * On Armada 375, USB2 host controller #0 and
+			 * USB3 host controller are incompatible. That
+			 * means that in the dts of your board, you
+			 * can either select the first USB2 controller:
+			 * marvell,orion-ehci (@0x50000) or the USB3 controller:
+			 * marvell,xhci-armada-375, but not both. If
+			 * both controllers are selected, then the
+			 * kernel will select the USB3 by default.
+			 */
+			usb@50000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x50000 0x500>;
+				interrupts = <0 17 4>;
+				clocks = <&gateclk 18>;
+				status = "disabled";
+			};
+
+			usb@54000 {
+				compatible = "marvell,orion-ehci";
+				reg = <0x54000 0x500>;
+				interrupts = <0 18 4>;
+				clocks = <&gateclk 26>;
+				status = "disabled";
+			};
+
+			usb3@58000 {
+				compatible = "marvell,xhci-armada-375";
+				reg = <0x58000 0x4000>,<0x5b880 0x80>;
+				interrupts = <0 16 4>;
+				clocks = <&gateclk 16>;
+				status = "disabled";
+			};
+
+			usb-cluster@18400 {
+				compatible = "marvell,armada-375-usb-cluster";
+				reg = <0x18400 0x4>;
+			};
+
+			xor@60800 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60800 0x100
+				       0x60A00 0x100>;
+				clocks = <&gateclk 22>;
+				status = "okay";
+
+				xor00 {
+					interrupts = <0 22 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+			};
+
+			xor@60900 {
+				compatible = "marvell,orion-xor";
+				reg = <0x60900 0x100
+				       0x60b00 0x100>;
+				clocks = <&gateclk 23>;
+				status = "okay";
+
+				xor10 {
+					interrupts = <0 65 0x4>;
+					dmacap,memcpy;
+					dmacap,xor;
+					dmacap,interrupt;
+				};
+			};
+
+			nand: nand@d0000 {
+				compatible = "marvell,armada-375-nand";
+				reg = <0xd0000 0x54>;
+				#address-cells = <1>;
+				#size-cells = <1>;
+				interrupts = <0 84 0x4>;
+				clocks = <&coredivclk 0>, <&gateclk 11>;
+				clock-names = "ecc_clk", "gateclk";
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+
+				clocks = <&gateclk 30>, <&gateclk 28>,
+					 <&gateclk 31>, <&gateclk 29>;
+				clock-names = "crypto0", "crypto0z",
+					      "crypto1", "crypto1z";
+
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "int_coalescing"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x6720>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <0 19 0x4>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <0 20 0x4>;
+				};
+			};
+		};
+
+		pcie-controller {
+			compatible = "marvell,armada-375-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000
+				0x82000000 0x1 0       MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0 MEM */
+				0x81000000 0x1 0       MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0 IO  */
+				0x82000000 0x2 0       MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 1 MEM */
+				0x81000000 0x2 0       MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 1 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 29 0x4>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 33 0x4>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
new file mode 100644
index 000000000000..221f8d458a49
--- /dev/null
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -0,0 +1,118 @@
+/*
+ * Device Tree Include file for Marvell Armada 380 SoC.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "armada-38x.dtsi"
+
+/ {
+	model = "Marvell Armada 380 family SoC";
+	compatible = "marvell,armada380", "marvell,armada38x";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			resets = <&cpurst 0>;
+		};
+	};
+
+	soc {
+		internal-regs {
+			pinctrl {
+				compatible = "marvell,mv88f6810-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+		};
+
+		pcie-controller {
+			compatible = "marvell,armada-38x-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000
+				0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000
+				0x82000000 0x1 0     MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 1 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 1 IO  */
+				0x82000000 0x3 0     MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 2 MEM */
+				0x81000000 0x3 0     MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 2 IO  */>;
+
+			/* x1 port */
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 29 0x4>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 8>;
+				status = "disabled";
+			};
+
+			/* x1 port */
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 33 0x4>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			/* x1 port */
+			pcie@3,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+				reg = <0x1800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+					  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 70 0x4>;
+				marvell,pcie-port = <2>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-382-customer2.dts b/arch/arm/boot/dts/armada-382-customer2.dts
new file mode 100644
index 000000000000..b6626aabc173
--- /dev/null
+++ b/arch/arm/boot/dts/armada-382-customer2.dts
@@ -0,0 +1,152 @@
+/*
+ * Device Tree file for Marvell Armada 382 Development Board Purpose board
+ * (DB-88F6821-BP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-380.dtsi"
+
+/ {
+	model = "Marvell Armada 382 Development Board";
+	compatible = "marvell,a382-db", "marvell,armada382", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-382-db.dts b/arch/arm/boot/dts/armada-382-db.dts
new file mode 100644
index 000000000000..b6626aabc173
--- /dev/null
+++ b/arch/arm/boot/dts/armada-382-db.dts
@@ -0,0 +1,152 @@
+/*
+ * Device Tree file for Marvell Armada 382 Development Board Purpose board
+ * (DB-88F6821-BP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include "armada-380.dtsi"
+
+/ {
+	model = "Marvell Armada 382 Development Board";
+	compatible = "marvell,a382-db", "marvell,armada382", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-385-388.dtsi b/arch/arm/boot/dts/armada-385-388.dtsi
new file mode 100644
index 000000000000..9be81c859829
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-388.dtsi
@@ -0,0 +1,151 @@
+/*
+ * Device Tree Include file for Marvell Armada 385 SoC.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include "armada-38x.dtsi"
+
+/ {
+	model = "Marvell Armada 385 family SoC";
+	compatible = "marvell,armada385", "marvell,armada38x";
+
+	cpus {
+		#address-cells = <1>;
+		#size-cells = <0>;
+		cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <0>;
+			resets = <&cpurst 0>;
+		};
+		cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,cortex-a9";
+			reg = <1>;
+			resets = <&cpurst 1>;
+		};
+	};
+
+	soc {
+		internal-regs {
+			pinctrl {
+				compatible = "marvell,mv88f6820-pinctrl";
+				reg = <0x18000 0x20>;
+			};
+		};
+
+		pcie-controller {
+			compatible = "marvell,armada-38x-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000
+				0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000
+				0x82000000 0x1 0     MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 1 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 1 IO  */
+				0x82000000 0x3 0     MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 2 MEM */
+				0x81000000 0x3 0     MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 2 IO  */
+				0x82000000 0x4 0     MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 3 MEM */
+				0x81000000 0x4 0     MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 3 IO  */>;
+
+			/*
+			 * This port can be either x4 or x1. When
+			 * configured in x4 by the bootloader, then
+			 * pcie@4,0 is not available.
+			 */
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 29 0x4>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 8>;
+				status = "disabled";
+			};
+
+			/* x1 port */
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 33 0x4>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			/* x1 port */
+			pcie@3,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+				reg = <0x1800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+					  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 70 0x4>;
+				marvell,pcie-port = <2>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+
+			/*
+			 * x1 port only available when pcie@1,0 is
+			 * configured as a x1 port
+			 */
+			pcie@4,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+				reg = <0x2000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+					  0x81000000 0 0 0x81000000 0x4 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &gic 0 71 0x4>;
+				marvell,pcie-port = <3>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 7>;
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-385-customer1.dts b/arch/arm/boot/dts/armada-385-customer1.dts
new file mode 100644
index 000000000000..a8bb0a1b9a3d
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-customer1.dts
@@ -0,0 +1,164 @@
+/*
+ * Device Tree file for Marvell Armada 385 Access Point Development board
+ * (DB-88F6820-AP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Access Point Development Board";
+	compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "sgmii";
+			};
+
+			ethernet@34000 {
+				status = "okay";
+				phy = <&phy2>;
+				phy-mode = "sgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <6>;
+				};
+
+				phy2: ethernet-phy@2 {
+					reg = <4>;
+				};
+			};
+
+			serial@12100 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			pm_pic {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				ctrl-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>,
+					     <&gpio1 15 GPIO_ACTIVE_LOW>;
+			};
+
+			pinctrl {
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp21", "mpp47";
+					marvell,function = "gpio";
+				};
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+			pcie@3,0 {
+				/* Port 2, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-385-db-ap.dts b/arch/arm/boot/dts/armada-385-db-ap.dts
new file mode 100644
index 000000000000..a8bb0a1b9a3d
--- /dev/null
+++ b/arch/arm/boot/dts/armada-385-db-ap.dts
@@ -0,0 +1,164 @@
+/*
+ * Device Tree file for Marvell Armada 385 Access Point Development board
+ * (DB-88F6820-AP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 385 Access Point Development Board";
+	compatible = "marvell,a385-db-ap", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "sgmii";
+			};
+
+			ethernet@34000 {
+				status = "okay";
+				phy = <&phy2>;
+				phy-mode = "sgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <6>;
+				};
+
+				phy2: ethernet-phy@2 {
+					reg = <4>;
+				};
+			};
+
+			serial@12100 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			nfc: nand@d0000 {
+				status = "okay";
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			pm_pic {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				ctrl-gpios = <&gpio0 21 GPIO_ACTIVE_LOW>,
+					     <&gpio1 15 GPIO_ACTIVE_LOW>;
+			};
+
+			pinctrl {
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp21", "mpp47";
+					marvell,function = "gpio";
+				};
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "okay";
+			};
+			pcie@3,0 {
+				/* Port 2, Lane 0 */
+				status = "okay";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-388-customer0.dts b/arch/arm/boot/dts/armada-388-customer0.dts
new file mode 100644
index 000000000000..c80c0ef09519
--- /dev/null
+++ b/arch/arm/boot/dts/armada-388-customer0.dts
@@ -0,0 +1,133 @@
+/*
+ * Device Tree file for Marvell Armada 388 Development General Purpose board
+ * (DB-88F6828-GP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Development General Purpose";
+	compatible = "marvell,a388-db-gp", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+				no-1-8-v;
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			pm_pic {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				ctrl-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>,
+					     <&gpio1 2 GPIO_ACTIVE_LOW>,
+					     <&gpio1 3 GPIO_ACTIVE_LOW>;
+			};
+
+			pinctrl {
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp33", "mpp34", "mpp35";
+					marvell,function = "gpio";
+				};
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-388-db-gp.dts b/arch/arm/boot/dts/armada-388-db-gp.dts
new file mode 100644
index 000000000000..c80c0ef09519
--- /dev/null
+++ b/arch/arm/boot/dts/armada-388-db-gp.dts
@@ -0,0 +1,133 @@
+/*
+ * Device Tree file for Marvell Armada 388 Development General Purpose board
+ * (DB-88F6828-GP)
+ *
+ *  Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 388 Development General Purpose";
+	compatible = "marvell,a388-db-gp", "marvell,armada388", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+		internal-regs {
+			ethernet@70000 {
+				status = "okay";
+				phy = <&phy0>;
+				phy-mode = "rgmii";
+			};
+
+			ethernet@30000 {
+				status = "okay";
+				phy = <&phy1>;
+				phy-mode = "rgmii";
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@1 {
+					reg = <1>;
+				};
+
+				phy1: ethernet-phy@0 {
+					reg = <0>;
+				};
+			};
+
+			sata@a8000 {
+				status = "okay";
+			};
+
+			sata@e0000 {
+				status = "okay";
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				status = "okay";
+				no-1-8-v;
+			};
+
+			serial@12000 {
+				status = "okay";
+			};
+
+			spi0: spi@10600 {
+				status = "okay";
+
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <0>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			pm_pic {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				ctrl-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>,
+					     <&gpio1 2 GPIO_ACTIVE_LOW>,
+					     <&gpio1 3 GPIO_ACTIVE_LOW>;
+			};
+
+			pinctrl {
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp33", "mpp34", "mpp35";
+					marvell,function = "gpio";
+				};
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/*
+			 * The two PCIe units are accessible through
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 1, Lane 0 */
+				status = "disabled";
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-38x-modular.dts b/arch/arm/boot/dts/armada-38x-modular.dts
new file mode 100644
index 000000000000..23cf4262b3f2
--- /dev/null
+++ b/arch/arm/boot/dts/armada-38x-modular.dts
@@ -0,0 +1,164 @@
+/*
+ * Device Tree file for Marvell Armada 38x boards: Modular file to be updated by U-Boot board-setup
+ * (DB-88F6820)
+ *
+ *  Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-385-388.dtsi"
+
+/ {
+	model = "Marvell Armada 38x Modular Device Tree";
+	compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+
+	chosen {
+		bootargs = "console=ttyS0,115200 earlyprintk";
+	};
+
+	memory {
+		device_type = "memory";
+		reg = <0x00000000 0x10000000>; /* 256 MB */
+	};
+
+	soc {
+
+		internal-regs {
+			ethernet@70000 {
+				phy = <&phy0>;
+				phy-mode = "rgmii";	/* RGMII for DB-BP, DB-GP, & DB-381 */
+			};
+
+			ethernet@30000 {
+				phy = <&phy1>;
+				phy-mode = "rgmii";	/* RGMII for DB-BP, DB-GP, & DB-381 */
+			};
+
+			ethernet@34000 {
+				phy = <&phy2>;
+				phy-mode = "sgmii";	/* SGMII for DB-AP */
+			};
+
+			i2c0: i2c@11000 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			i2c1: i2c@11100 {
+				status = "okay";
+				clock-frequency = <100000>;
+			};
+
+			mdio {
+				phy0: ethernet-phy@0 {
+					reg = <0>;	/* PHY address = 0x0 for DB-BP, DB-GP, & DB-381 */
+				};
+
+				phy1: ethernet-phy@1 {
+					reg = <1>;	/* PHY address = 0x1 for DB-BP, DB-GP, & DB-381 */
+				};
+
+				phy2: ethernet-phy@2 {
+					reg = <4>;	/* PHY address = 0x4 for DB-AP */
+				};
+			};
+
+			sdhci@d8000 {
+				broken-cd;
+				wp-inverted;
+				bus-width = <8>;
+				no-1-8-v;
+			};
+
+			spi0: spi@10600 {
+				spi-flash@0 {
+					#address-cells = <1>;
+					#size-cells = <1>;
+					compatible = "w25q32";
+					reg = <0>; /* Chip select 0 */
+					spi-max-frequency = <108000000>;
+				};
+			};
+
+			/*
+			 * 1GB Flash via NFC NAND controller
+			 * should be disabled when the board boots
+			 * from SPI flash, since NFC shares the same
+			 * pins with SPI0 and requires SLM-1358 jumper.
+			 * However the u-boot DTB parser will
+			 * handle this situation and disable/remove
+			 * unnessesary devices according to board
+			 * boot-up configuration.
+			 */
+			nfc: nand@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
+
+				nfc,nfc-mode  = "normal";	/* normal or ganged */
+				nfc,nfc-dma   = <0>;		/* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;		/* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00600000>;
+					read-only;
+				};
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00600000 0x00400000>;
+					read-only;
+				};
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00a00000 0x3f600000>;
+				};
+			};
+
+			usb@58000 {
+				status = "disabled";
+			};
+
+			usb3@f0000 {
+				status = "disabled";
+			};
+
+			usb3@f8000 {
+				status = "disabled";
+			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
+
+			pm_pic {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				ctrl-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>,	/* mpp33: default for DB-GP */
+					     <&gpio1 2 GPIO_ACTIVE_LOW>,	/* mpp34: default for DB-GP */
+					     <&gpio1 3 GPIO_ACTIVE_LOW>;	/* mpp35: default for DB-GP */
+			};
+
+			pinctrl {
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp33", "mpp34", "mpp35";	/* default for DB-GP */
+					marvell,function = "gpio";
+				};
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
index d6cc8bf8272e..ab48be83d7a8 100644
--- a/arch/arm/boot/dts/armada-xp-db.dts
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -14,7 +14,7 @@
  */
 
 /dts-v1/;
-/include/ "armada-xp-mv78460.dtsi"
+#include "armada-xp-mv78460.dtsi"
 
 / {
 	model = "Marvell Armada XP Evaluation Board";
@@ -30,21 +30,88 @@
 	};
 
 	soc {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
+			  MBUS_ID(0x09, 0x01) 0 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+			  MBUS_ID(0x0c, 0x04) 0 0 0xf1200000 0x100000>;	/* PNC_BM: PHYS=0xf1200000
+									   size 1M, (PnC 512KB, BM 512KB) */
+
+		devbus-bootcs {
+			status = "okay";
+
+			/* Device Bus parameters are required */
+
+			/* Read parameters */
+			devbus,bus-width    = <8>;
+			devbus,turn-off-ps  = <60000>;
+			devbus,badr-skew-ps = <0>;
+			devbus,acc-first-ps = <124000>;
+			devbus,acc-next-ps  = <248000>;
+			devbus,rd-setup-ps  = <0>;
+			devbus,rd-hold-ps   = <0>;
+
+			/* Write parameters */
+			devbus,sync-enable = <0>;
+			devbus,wr-high-ps  = <60000>;
+			devbus,wr-low-ps   = <60000>;
+			devbus,ale-wr-ps   = <60000>;
+
+			/* NOR 16 MiB */
+			nor@0 {
+				compatible = "cfi-flash";
+				reg = <0 0x1000000>;
+				bank-width = <2>;
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+
+			/*
+			 * All 6 slots are physically present as
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@2,0 {
+				/* Port 0, Lane 1 */
+				status = "okay";
+			};
+			pcie@3,0 {
+				/* Port 0, Lane 2 */
+				status = "okay";
+			};
+			pcie@4,0 {
+				/* Port 0, Lane 3 */
+				status = "okay";
+			};
+			pcie@9,0 {
+				/* Port 2, Lane 0 */
+				status = "okay";
+			};
+			pcie@10,0 {
+				/* Port 3, Lane 0 */
+				status = "okay";
+			};
+		};
+
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12100 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12200 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12300 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 
@@ -97,6 +164,7 @@
 				pinctrl-names = "default";
 				status = "okay";
 				/* No CD or WP GPIOs */
+				broken-cd;
 			};
 
 			usb@50000 {
@@ -123,38 +191,39 @@
 				};
 			};
 
-			pcie-controller {
+			/* 1GB Flash via NFC NAND controller */
+			nfc: nand-flash@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
 				status = "okay";
 
-				/*
-				 * All 6 slots are physically present as
-				 * standard PCIe slots on the board.
-				 */
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
-				};
-				pcie@2,0 {
-					/* Port 0, Lane 1 */
-					status = "okay";
-				};
-				pcie@3,0 {
-					/* Port 0, Lane 2 */
-					status = "okay";
-				};
-				pcie@4,0 {
-					/* Port 0, Lane 3 */
-					status = "okay";
+				nfc,nfc-mode  = "normal";       /* normal or ganged */
+				nfc,nfc-dma   = <0>;            /* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;            /* 4 bit */
+				nfc,num-cs    = <1>;
+
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00300000>;
+					read-only;
 				};
-				pcie@9,0 {
-					/* Port 2, Lane 0 */
-					status = "okay";
+
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00300000 0x00400000>;
+					read-only;
 				};
-				pcie@10,0 {
-					/* Port 3, Lane 0 */
-					status = "okay";
+
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00700000 0x3f900000>;
 				};
 			};
+
+			crypto@9D000 {
+				status = "okay";
+			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 76db557adbe7..681a784dd220 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -14,7 +14,8 @@
  */
 
 /dts-v1/;
-/include/ "armada-xp-mv78460.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include "armada-xp-mv78460.dtsi"
 
 / {
 	model = "Marvell Armada XP Development Board DB-MV784MP-GP";
@@ -38,29 +39,96 @@
 		      <0x00000001 0x00000000 0x00000001 0x00000000>;
 	};
 
+	cpus {
+		pm_pic {
+			ctrl-gpios = <&gpio0 16 GPIO_ACTIVE_LOW>,
+				     <&gpio0 17 GPIO_ACTIVE_LOW>,
+				     <&gpio0 18 GPIO_ACTIVE_LOW>;
+		};
+	};
+
 	soc {
-		ranges = <0          0 0xd0000000 0x100000  /* Internal registers 1MiB */
-			  0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
-			  0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB  */>;
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x1000000
+			  MBUS_ID(0x09, 0x01) 0 0 0xf1100000 0x10000	/* CESA0: PHYS=0xf1100000
+									   size 64K */
+			  MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000	/* CESA1: PHYS=0xf1110000
+									   size 64K */
+			  MBUS_ID(0x0c, 0x04) 0 0 0xf1200000 0x100000>;	/* PNC_BM: PHYS=0xf1200000
+									   size 1M, (PnC 512KB, BM 512KB) */
+
+		devbus-bootcs {
+			status = "okay";
+
+			/* Device Bus parameters are required */
+
+			/* Read parameters */
+			devbus,bus-width    = <8>;
+			devbus,turn-off-ps  = <60000>;
+			devbus,badr-skew-ps = <0>;
+			devbus,acc-first-ps = <124000>;
+			devbus,acc-next-ps  = <248000>;
+			devbus,rd-setup-ps  = <0>;
+			devbus,rd-hold-ps   = <0>;
+
+			/* Write parameters */
+			devbus,sync-enable = <0>;
+			devbus,wr-high-ps  = <60000>;
+			devbus,wr-low-ps   = <60000>;
+			devbus,ale-wr-ps   = <60000>;
+
+			/* NOR 16 MiB */
+			nor@0 {
+				compatible = "cfi-flash";
+				reg = <0 0x1000000>;
+				bank-width = <2>;
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+
+			/*
+			 * The 3 slots are physically present as
+			 * standard PCIe slots on the board.
+			 */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+			pcie@9,0 {
+				/* Port 2, Lane 0 */
+				status = "okay";
+			};
+			pcie@10,0 {
+				/* Port 3, Lane 0 */
+				status = "okay";
+			};
+		};
 
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12100 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12200 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12300 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
-
+			pinctrl {
+				pinctrl-0 = <&pic_pins>;
+				pinctrl-names = "default";
+				pic_pins: pic-pins-0 {
+					marvell,pins = "mpp16", "mpp17",
+						       "mpp18";
+					marvell,function = "gpio";
+				};
+			};
 			sata@a0000 {
 				nr-ports = <2>;
 				status = "okay";
@@ -105,6 +173,16 @@
 				phy-mode = "rgmii-id";
 			};
 
+			/* Front-side USB slot */
+			usb@50000 {
+				status = "okay";
+			};
+
+			/* Back-side USB slot */
+			usb@51000 {
+				status = "okay";
+			};
+
 			spi0: spi@10600 {
 				status = "okay";
 
@@ -117,54 +195,45 @@
 				};
 			};
 
-			devbus-bootcs@10400 {
+			nand@d0000 {
+				num-cs = <1>;
+				marvell,nand-keep-config;
+				marvell,nand-enable-arbiter;
+				nand-on-flash-bbt;
+			};
+
+			/* 1GB Flash via NFC NAND controller */
+			nfc: nand-flash@d0000 {
+				#address-cells = <1>;
+				#size-cells = <1>;
 				status = "okay";
-				ranges = <0 0xf0000000 0x1000000>; /* @addr 0xf000000, size 0x1000000 */
 
-				/* Device Bus parameters are required */
+				nfc,nfc-mode  = "normal";       /* normal or ganged */
+				nfc,nfc-dma   = <0>;            /* 0 for no, 1 for dma */
+				nfc,nfc-width = <8>;
+				nfc,ecc-type  = <1>;            /* 4 bit */
+				nfc,num-cs    = <1>;
 
-				/* Read parameters */
-				devbus,bus-width    = <8>;
-				devbus,turn-off-ps  = <60000>;
-				devbus,badr-skew-ps = <0>;
-				devbus,acc-first-ps = <124000>;
-				devbus,acc-next-ps  = <248000>;
-				devbus,rd-setup-ps  = <0>;
-				devbus,rd-hold-ps   = <0>;
+				mtd0@00000000 {
+					label = "U-Boot";
+					reg = <0x00000000 0x00300000>;
+					read-only;
+				};
 
-				/* Write parameters */
-				devbus,sync-enable = <0>;
-				devbus,wr-high-ps  = <60000>;
-				devbus,wr-low-ps   = <60000>;
-				devbus,ale-wr-ps   = <60000>;
+				mtd1@00080000 {
+					label = "uImage";
+					reg = <0x00300000 0x00400000>;
+					read-only;
+				};
 
-				/* NOR 16 MiB */
-				nor@0 {
-					compatible = "cfi-flash";
-					reg = <0 0x1000000>;
-					bank-width = <2>;
+				mtd2@00140000 {
+					label = "Root";
+					reg = <0x00700000 0x3f900000>;
 				};
 			};
 
-			pcie-controller {
+			crypto@9D000 {
 				status = "okay";
-
-				/*
-				 * The 3 slots are physically present as
-				 * standard PCIe slots on the board.
-				 */
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
-				};
-				pcie@9,0 {
-					/* Port 2, Lane 0 */
-					status = "okay";
-				};
-				pcie@10,0 {
-					/* Port 3, Lane 0 */
-					status = "okay";
-				};
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
index f94cdbc579cb..b9f6e420e913 100644
--- a/arch/arm/boot/dts/armada-xp-mv78230.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78230.dtsi
@@ -13,7 +13,7 @@
  * common to all Armada XP SoCs.
  */
 
-/include/ "armada-xp.dtsi"
+#include "armada-xp.dtsi"
 
 / {
 	model = "Marvell Armada XP MV78230 SoC";
@@ -33,6 +33,7 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <0>;
 			clocks = <&cpuclk 0>;
+			resets = <&cpurst 0>;
 		};
 
 		cpu@1 {
@@ -40,10 +41,130 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <1>;
 			clocks = <&cpuclk 1>;
+			resets = <&cpurst 1>;
 		};
 	};
 
 	soc {
+		/*
+		 * MV78230 has 2 PCIe units Gen2.0: One unit can be
+		 * configured as x4 or quad x1 lanes. One unit is
+		 * x1 only.
+		 */
+		pcie-controller {
+			compatible = "marvell,armada-xp-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+				msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000   /* Port 0.0 registers */
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000   /* Port 0.1 registers */
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000   /* Port 0.2 registers */
+				0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000   /* Port 0.3 registers */
+				0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000   /* Port 1.0 registers */
+				0x82000000 0x1 0       MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0       MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO  */
+				0x82000000 0x2 0       MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+				0x81000000 0x2 0       MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO  */
+				0x82000000 0x3 0       MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
+				0x81000000 0x3 0       MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO  */
+				0x82000000 0x4 0       MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+				0x81000000 0x4 0       MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO  */
+				0x82000000 0x5 0       MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+				0x81000000 0x5 0       MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 58>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 59>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+
+			pcie@3,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+				reg = <0x1800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+					  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 60>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <2>;
+				clocks = <&gateclk 7>;
+				status = "disabled";
+			};
+
+			pcie@4,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
+				reg = <0x2000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+					  0x81000000 0 0 0x81000000 0x4 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 61>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <3>;
+				clocks = <&gateclk 8>;
+				status = "disabled";
+			};
+
+			pcie@5,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+				reg = <0x2800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+					  0x81000000 0 0 0x81000000 0x5 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 62>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 9>;
+				status = "disabled";
+			};
+		};
+
 		internal-regs {
 			pinctrl {
 				compatible = "marvell,mv78230-pinctrl";
@@ -63,7 +184,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
 			};
 
@@ -74,113 +195,9 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>;
 			};
-
-			/*
-			 * MV78230 has 2 PCIe units Gen2.0: One unit can be
-			 * configured as x4 or quad x1 lanes. One unit is
-			 * x1 only.
-			 */
-			pcie-controller {
-				compatible = "marvell,armada-xp-pcie";
-				status = "disabled";
-				device_type = "pci";
-
-#address-cells = <3>;
-#size-cells = <2>;
-
-				bus-range = <0x00 0xff>;
-
-				ranges = <0x82000000 0 0x40000 0x40000 0 0x00002000   /* Port 0.0 registers */
-					0x82000000 0 0x44000 0x44000 0 0x00002000   /* Port 0.1 registers */
-					0x82000000 0 0x48000 0x48000 0 0x00002000   /* Port 0.2 registers */
-					0x82000000 0 0x4c000 0x4c000 0 0x00002000   /* Port 0.3 registers */
-					0x82000000 0 0x80000 0x80000 0 0x00002000   /* Port 1.0 registers */
-					0x82000000 0 0xe0000000 0xe0000000 0 0x08000000   /* non-prefetchable memory */
-					0x81000000 0 0	  0xe8000000 0 0x00100000>; /* downstream I/O */
-
-				pcie@1,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
-					reg = <0x0800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 58>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 5>;
-					status = "disabled";
-				};
-
-				pcie@2,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-					reg = <0x1000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 59>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <1>;
-					clocks = <&gateclk 6>;
-					status = "disabled";
-				};
-
-				pcie@3,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
-					reg = <0x1800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 60>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <2>;
-					clocks = <&gateclk 7>;
-					status = "disabled";
-				};
-
-				pcie@4,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
-					reg = <0x2000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 61>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <3>;
-					clocks = <&gateclk 8>;
-					status = "disabled";
-				};
-
-				pcie@5,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
-					reg = <0x2800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 62>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 9>;
-					status = "disabled";
-				};
-			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 55cdd58c155f..34276c23b31c 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -13,7 +13,7 @@
  * common to all Armada XP SoCs.
  */
 
-/include/ "armada-xp.dtsi"
+#include "armada-xp.dtsi"
 
 / {
 	model = "Marvell Armada XP MV78260 SoC";
@@ -34,6 +34,7 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <0>;
 			clocks = <&cpuclk 0>;
+			resets = <&cpurst 0>;
 		};
 
 		cpu@1 {
@@ -41,10 +42,212 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <1>;
 			clocks = <&cpuclk 1>;
+			resets = <&cpurst 1>;
 		};
 	};
 
 	soc {
+		/*
+		 * MV78260 has 3 PCIe units Gen2.0: Two units can be
+		 * configured as x4 or quad x1 lanes. One unit is
+		 * x4 only.
+		 */
+		pcie-controller {
+			compatible = "marvell,armada-xp-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+				msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000   /* Port 0.0 registers */
+				0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000   /* Port 2.0 registers */
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000   /* Port 0.1 registers */
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000   /* Port 0.2 registers */
+				0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000   /* Port 0.3 registers */
+				0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000   /* Port 1.0 registers */
+				0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000   /* Port 1.1 registers */
+				0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000   /* Port 1.2 registers */
+				0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000   /* Port 1.3 registers */
+				0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO  */
+				0x82000000 0x3 0     MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
+				0x81000000 0x3 0     MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO  */
+				0x82000000 0x4 0     MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+				0x81000000 0x4 0     MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO  */
+
+				0x82000000 0x5 0     MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+				0x81000000 0x5 0     MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO  */
+				0x82000000 0x6 0     MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
+				0x81000000 0x6 0     MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO  */
+				0x82000000 0x7 0     MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
+				0x81000000 0x7 0     MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO  */
+				0x82000000 0x8 0     MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
+				0x81000000 0x8 0     MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO  */
+
+				0x82000000 0x9 0     MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+				0x81000000 0x9 0     MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 58>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 59>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+
+			pcie@3,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
+				reg = <0x1800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+					  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 60>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <2>;
+				clocks = <&gateclk 7>;
+				status = "disabled";
+			};
+
+			pcie@4,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
+				reg = <0x2000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+					  0x81000000 0 0 0x81000000 0x4 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 61>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <3>;
+				clocks = <&gateclk 8>;
+				status = "disabled";
+			};
+
+			pcie@5,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
+				reg = <0x2800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+					  0x81000000 0 0 0x81000000 0x5 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 62>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 9>;
+				status = "disabled";
+			};
+
+			pcie@6,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
+				reg = <0x3000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
+					  0x81000000 0 0 0x81000000 0x6 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 63>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 10>;
+				status = "disabled";
+			};
+
+			pcie@7,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
+				reg = <0x3800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
+					  0x81000000 0 0 0x81000000 0x7 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 64>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <2>;
+				clocks = <&gateclk 11>;
+				status = "disabled";
+			};
+
+			pcie@8,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
+				reg = <0x4000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
+					  0x81000000 0 0 0x81000000 0x8 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 65>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <3>;
+				clocks = <&gateclk 12>;
+				status = "disabled";
+			};
+
+			pcie@9,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
+				reg = <0x4800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+					  0x81000000 0 0 0x81000000 0x9 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 99>;
+				marvell,pcie-port = <2>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 26>;
+				status = "disabled";
+			};
+		};
+
 		internal-regs {
 			pinctrl {
 				compatible = "marvell,mv78260-pinctrl";
@@ -64,7 +267,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
 			};
 
@@ -75,7 +278,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
 			};
 
@@ -86,188 +289,19 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <91>;
 			};
 
-			ethernet@34000 {
-				compatible = "marvell,armada-370-neta";
-				reg = <0x34000 0x2500>;
+			eth3: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
 				interrupts = <14>;
 				clocks = <&gateclk 1>;
 				status = "disabled";
-			};
-
-			/*
-			 * MV78260 has 3 PCIe units Gen2.0: Two units can be
-			 * configured as x4 or quad x1 lanes. One unit is
-			 * x4 only.
-			 */
-			pcie-controller {
-				compatible = "marvell,armada-xp-pcie";
-				status = "disabled";
-				device_type = "pci";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-
-				bus-range = <0x00 0xff>;
-
-				ranges = <0x82000000 0 0x40000 0x40000 0 0x00002000   /* Port 0.0 registers */
-					0x82000000 0 0x42000 0x42000 0 0x00002000   /* Port 2.0 registers */
-					0x82000000 0 0x44000 0x44000 0 0x00002000   /* Port 0.1 registers */
-					0x82000000 0 0x48000 0x48000 0 0x00002000   /* Port 0.2 registers */
-					0x82000000 0 0x4c000 0x4c000 0 0x00002000   /* Port 0.3 registers */
-					0x82000000 0 0x80000 0x80000 0 0x00002000   /* Port 1.0 registers */
-					0x82000000 0 0x84000 0x84000 0 0x00002000   /* Port 1.1 registers */
-					0x82000000 0 0x88000 0x88000 0 0x00002000   /* Port 1.2 registers */
-					0x82000000 0 0x8c000 0x8c000 0 0x00002000   /* Port 1.3 registers */
-					0x82000000 0 0xe0000000 0xe0000000 0 0x08000000   /* non-prefetchable memory */
-					0x81000000 0 0	  0xe8000000 0 0x00100000>; /* downstream I/O */
-
-				pcie@1,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
-					reg = <0x0800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 58>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 5>;
-					status = "disabled";
-				};
-
-				pcie@2,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x44000 0 0x2000>;
-					reg = <0x1000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 59>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <1>;
-					clocks = <&gateclk 6>;
-					status = "disabled";
-				};
-
-				pcie@3,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x48000 0 0x2000>;
-					reg = <0x1800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 60>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <2>;
-					clocks = <&gateclk 7>;
-					status = "disabled";
-				};
-
-				pcie@4,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x4c000 0 0x2000>;
-					reg = <0x2000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 61>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <3>;
-					clocks = <&gateclk 8>;
-					status = "disabled";
-				};
-
-				pcie@5,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x80000 0 0x2000>;
-					reg = <0x2800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 62>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 9>;
-					status = "disabled";
-				};
-
-				pcie@6,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x84000 0 0x2000>;
-					reg = <0x3000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 63>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <1>;
-					clocks = <&gateclk 10>;
-					status = "disabled";
-				};
-
-				pcie@7,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x88000 0 0x2000>;
-					reg = <0x3800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 64>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <2>;
-					clocks = <&gateclk 11>;
-					status = "disabled";
-				};
-
-				pcie@8,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x8c000 0 0x2000>;
-					reg = <0x4000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 65>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <3>;
-					clocks = <&gateclk 12>;
-					status = "disabled";
-				};
-
-				pcie@9,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x42000 0 0x2000>;
-					reg = <0x4800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 99>;
-					marvell,pcie-port = <2>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 26>;
-					status = "disabled";
-				};
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <3>;
+				eth,port-mtu    = <1500>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
index 6ab56bd35de9..2baeac624544 100644
--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
@@ -13,7 +13,7 @@
  * common to all Armada XP SoCs.
  */
 
-/include/ "armada-xp.dtsi"
+#include "armada-xp.dtsi"
 
 / {
 	model = "Marvell Armada XP MV78460 SoC";
@@ -23,6 +23,7 @@
 		gpio0 = &gpio0;
 		gpio1 = &gpio1;
 		gpio2 = &gpio2;
+		eth3 = &eth3;
 	};
 
 
@@ -35,6 +36,7 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <0>;
 			clocks = <&cpuclk 0>;
+			resets = <&cpurst 0>;
 		};
 
 		cpu@1 {
@@ -42,6 +44,7 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <1>;
 			clocks = <&cpuclk 1>;
+			resets = <&cpurst 1>;
 		};
 
 		cpu@2 {
@@ -49,6 +52,7 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <2>;
 			clocks = <&cpuclk 2>;
+			resets = <&cpurst 2>;
 		};
 
 		cpu@3 {
@@ -56,10 +60,233 @@
 			compatible = "marvell,sheeva-v7";
 			reg = <3>;
 			clocks = <&cpuclk 3>;
+			resets = <&cpurst 3>;
 		};
 	};
 
 	soc {
+		/*
+		 * MV78460 has 4 PCIe units Gen2.0: Two units can be
+		 * configured as x4 or quad x1 lanes. Two units are
+		 * x4/x1.
+		 */
+		pcie-controller {
+			compatible = "marvell,armada-xp-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+				msi-parent = <&mpic>;
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000   /* Port 0.0 registers */
+				0x82000000 0 0x42000 MBUS_ID(0xf0, 0x01) 0x42000 0 0x00002000   /* Port 2.0 registers */
+				0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000   /* Port 0.1 registers */
+				0x82000000 0 0x48000 MBUS_ID(0xf0, 0x01) 0x48000 0 0x00002000   /* Port 0.2 registers */
+				0x82000000 0 0x4c000 MBUS_ID(0xf0, 0x01) 0x4c000 0 0x00002000   /* Port 0.3 registers */
+				0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000   /* Port 1.0 registers */
+				0x82000000 0 0x82000 MBUS_ID(0xf0, 0x01) 0x82000 0 0x00002000   /* Port 3.0 registers */
+				0x82000000 0 0x84000 MBUS_ID(0xf0, 0x01) 0x84000 0 0x00002000   /* Port 1.1 registers */
+				0x82000000 0 0x88000 MBUS_ID(0xf0, 0x01) 0x88000 0 0x00002000   /* Port 1.2 registers */
+				0x82000000 0 0x8c000 MBUS_ID(0xf0, 0x01) 0x8c000 0 0x00002000   /* Port 1.3 registers */
+				0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0 1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0 1 0 /* Port 0.0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x04, 0xd8) 0 1 0 /* Port 0.1 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x04, 0xd0) 0 1 0 /* Port 0.1 IO  */
+				0x82000000 0x3 0     MBUS_ID(0x04, 0xb8) 0 1 0 /* Port 0.2 MEM */
+				0x81000000 0x3 0     MBUS_ID(0x04, 0xb0) 0 1 0 /* Port 0.2 IO  */
+				0x82000000 0x4 0     MBUS_ID(0x04, 0x78) 0 1 0 /* Port 0.3 MEM */
+				0x81000000 0x4 0     MBUS_ID(0x04, 0x70) 0 1 0 /* Port 0.3 IO  */
+
+				0x82000000 0x5 0     MBUS_ID(0x08, 0xe8) 0 1 0 /* Port 1.0 MEM */
+				0x81000000 0x5 0     MBUS_ID(0x08, 0xe0) 0 1 0 /* Port 1.0 IO  */
+				0x82000000 0x6 0     MBUS_ID(0x08, 0xd8) 0 1 0 /* Port 1.1 MEM */
+				0x81000000 0x6 0     MBUS_ID(0x08, 0xd0) 0 1 0 /* Port 1.1 IO  */
+				0x82000000 0x7 0     MBUS_ID(0x08, 0xb8) 0 1 0 /* Port 1.2 MEM */
+				0x81000000 0x7 0     MBUS_ID(0x08, 0xb0) 0 1 0 /* Port 1.2 IO  */
+				0x82000000 0x8 0     MBUS_ID(0x08, 0x78) 0 1 0 /* Port 1.3 MEM */
+				0x81000000 0x8 0     MBUS_ID(0x08, 0x70) 0 1 0 /* Port 1.3 IO  */
+
+				0x82000000 0x9 0     MBUS_ID(0x04, 0xf8) 0 1 0 /* Port 2.0 MEM */
+				0x81000000 0x9 0     MBUS_ID(0x04, 0xf0) 0 1 0 /* Port 2.0 IO  */
+
+				0x82000000 0xa 0     MBUS_ID(0x08, 0xf8) 0 1 0 /* Port 3.0 MEM */
+				0x81000000 0xa 0     MBUS_ID(0x08, 0xf0) 0 1 0 /* Port 3.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 58>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 5>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 59>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 6>;
+				status = "disabled";
+			};
+
+			pcie@3,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
+				reg = <0x1800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x3 0 1 0
+					  0x81000000 0 0 0x81000000 0x3 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 60>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <2>;
+				clocks = <&gateclk 7>;
+				status = "disabled";
+			};
+
+			pcie@4,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
+				reg = <0x2000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x4 0 1 0
+					  0x81000000 0 0 0x81000000 0x4 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 61>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <3>;
+				clocks = <&gateclk 8>;
+				status = "disabled";
+			};
+
+			pcie@5,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
+				reg = <0x2800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x5 0 1 0
+					  0x81000000 0 0 0x81000000 0x5 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 62>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 9>;
+				status = "disabled";
+			};
+
+			pcie@6,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
+				reg = <0x3000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x6 0 1 0
+					  0x81000000 0 0 0x81000000 0x6 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 63>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <1>;
+				clocks = <&gateclk 10>;
+				status = "disabled";
+			};
+
+			pcie@7,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
+				reg = <0x3800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x7 0 1 0
+					  0x81000000 0 0 0x81000000 0x7 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 64>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <2>;
+				clocks = <&gateclk 11>;
+				status = "disabled";
+			};
+
+			pcie@8,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
+				reg = <0x4000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x8 0 1 0
+					  0x81000000 0 0 0x81000000 0x8 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 65>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <3>;
+				clocks = <&gateclk 12>;
+				status = "disabled";
+			};
+
+			pcie@9,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
+				reg = <0x4800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x9 0 1 0
+					  0x81000000 0 0 0x81000000 0x9 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 99>;
+				marvell,pcie-port = <2>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 26>;
+				status = "disabled";
+			};
+
+			pcie@10,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82005000 0 0x82000 0 0x2000>;
+				reg = <0x5000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0xa 0 1 0
+					  0x81000000 0 0 0x81000000 0xa 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &mpic 103>;
+				marvell,pcie-port = <3>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gateclk 27>;
+				status = "disabled";
+			};
+		};
+
 		internal-regs {
 			pinctrl {
 				compatible = "marvell,mv78460-pinctrl";
@@ -79,7 +306,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <82>, <83>, <84>, <85>;
 			};
 
@@ -90,7 +317,7 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <87>, <88>, <89>, <90>;
 			};
 
@@ -101,204 +328,19 @@
 				gpio-controller;
 				#gpio-cells = <2>;
 				interrupt-controller;
-				#interrupts-cells = <2>;
+				#interrupt-cells = <2>;
 				interrupts = <91>;
 			};
 
-			ethernet@34000 {
-				compatible = "marvell,armada-370-neta";
-				reg = <0x34000 0x2500>;
+			eth3: ethernet@34000 {
+				compatible = "marvell,neta";
+				reg = <0x34000 0x4000>;
 				interrupts = <14>;
 				clocks = <&gateclk 1>;
 				status = "disabled";
-			};
-
-			/*
-			 * MV78460 has 4 PCIe units Gen2.0: Two units can be
-			 * configured as x4 or quad x1 lanes. Two units are
-			 * x4/x1.
-			 */
-			pcie-controller {
-				compatible = "marvell,armada-xp-pcie";
-				status = "disabled";
-				device_type = "pci";
-
-				#address-cells = <3>;
-				#size-cells = <2>;
-
-				bus-range = <0x00 0xff>;
-
-				ranges = <0x82000000 0 0x40000 0x40000 0 0x00002000   /* Port 0.0 registers */
-					0x82000000 0 0x42000 0x42000 0 0x00002000   /* Port 2.0 registers */
-					0x82000000 0 0x44000 0x44000 0 0x00002000   /* Port 0.1 registers */
-					0x82000000 0 0x48000 0x48000 0 0x00002000   /* Port 0.2 registers */
-					0x82000000 0 0x4c000 0x4c000 0 0x00002000   /* Port 0.3 registers */
-					0x82000000 0 0x80000 0x80000 0 0x00002000   /* Port 1.0 registers */
-					0x82000000 0 0x82000 0x82000 0 0x00002000   /* Port 3.0 registers */
-					0x82000000 0 0x84000 0x84000 0 0x00002000   /* Port 1.1 registers */
-					0x82000000 0 0x88000 0x88000 0 0x00002000   /* Port 1.2 registers */
-					0x82000000 0 0x8c000 0x8c000 0 0x00002000   /* Port 1.3 registers */
-					0x82000000 0 0xe0000000 0xe0000000 0 0x08000000   /* non-prefetchable memory */
-					0x81000000 0 0	  0xe8000000 0 0x00100000>; /* downstream I/O */
-
-				pcie@1,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82000800 0 0x40000 0 0x2000>;
-					reg = <0x0800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 58>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 5>;
-					status = "disabled";
-				};
-
-				pcie@2,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82001000 0 0x44000 0 0x2000>;
-					reg = <0x1000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 59>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <1>;
-					clocks = <&gateclk 6>;
-					status = "disabled";
-				};
-
-				pcie@3,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82001800 0 0x48000 0 0x2000>;
-					reg = <0x1800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 60>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <2>;
-					clocks = <&gateclk 7>;
-					status = "disabled";
-				};
-
-				pcie@4,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82002000 0 0x4c000 0 0x2000>;
-					reg = <0x2000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 61>;
-					marvell,pcie-port = <0>;
-					marvell,pcie-lane = <3>;
-					clocks = <&gateclk 8>;
-					status = "disabled";
-				};
-
-				pcie@5,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82002800 0 0x80000 0 0x2000>;
-					reg = <0x2800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 62>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 9>;
-					status = "disabled";
-				};
-
-				pcie@6,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82003000 0 0x84000 0 0x2000>;
-					reg = <0x3000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 63>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <1>;
-					clocks = <&gateclk 10>;
-					status = "disabled";
-				};
-
-				pcie@7,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82003800 0 0x88000 0 0x2000>;
-					reg = <0x3800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 64>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <2>;
-					clocks = <&gateclk 11>;
-					status = "disabled";
-				};
-
-				pcie@8,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82004000 0 0x8c000 0 0x2000>;
-					reg = <0x4000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 65>;
-					marvell,pcie-port = <1>;
-					marvell,pcie-lane = <3>;
-					clocks = <&gateclk 12>;
-					status = "disabled";
-				};
-				pcie@9,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82004800 0 0x42000 0 0x2000>;
-					reg = <0x4800 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 99>;
-					marvell,pcie-port = <2>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 26>;
-					status = "disabled";
-				};
-
-				pcie@10,0 {
-					device_type = "pci";
-					assigned-addresses = <0x82005000 0 0x82000 0 0x2000>;
-					reg = <0x5000 0 0 0 0>;
-					#address-cells = <3>;
-					#size-cells = <2>;
-					#interrupt-cells = <1>;
-					ranges;
-					interrupt-map-mask = <0 0 0 0>;
-					interrupt-map = <0 0 0 0 &mpic 103>;
-					marvell,pcie-port = <3>;
-					marvell,pcie-lane = <0>;
-					clocks = <&gateclk 27>;
-					status = "disabled";
-				};
+				mac-address = [ 00 50 43 02 02 03 ];
+				eth,port-num    = <3>;
+				eth,port-mtu    = <1500>;
 			};
 		};
 	};
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index fdea75c73411..9c87d2753197 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -11,7 +11,7 @@
  */
 
 /dts-v1/;
-/include/ "armada-xp-mv78260.dtsi"
+#include "armada-xp-mv78260.dtsi"
 
 / {
 	model = "PlatHome OpenBlocks AX3-4 board";
@@ -27,17 +27,52 @@
 	};
 
 	soc {
-		ranges = <0          0 0xd0000000 0x100000	/* Internal registers 1MiB */
-			  0xe0000000 0 0xe0000000 0x8100000     /* PCIe */
-			  0xf0000000 0 0xf0000000 0x8000000     /* Device Bus, NOR 128MiB   */>;
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xd0000000 0x100000
+			  MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000
+			  MBUS_ID(0x01, 0x2f) 0 0 0xf0000000 0x8000000>;
+
+		devbus-bootcs {
+			status = "okay";
+
+			/* Device Bus parameters are required */
+
+			/* Read parameters */
+			devbus,bus-width    = <8>;
+			devbus,turn-off-ps  = <60000>;
+			devbus,badr-skew-ps = <0>;
+			devbus,acc-first-ps = <124000>;
+			devbus,acc-next-ps  = <248000>;
+			devbus,rd-setup-ps  = <0>;
+			devbus,rd-hold-ps   = <0>;
+
+			/* Write parameters */
+			devbus,sync-enable = <0>;
+			devbus,wr-high-ps  = <60000>;
+			devbus,wr-low-ps   = <60000>;
+			devbus,ale-wr-ps   = <60000>;
+
+			/* NOR 128 MiB */
+			nor@0 {
+				compatible = "cfi-flash";
+				reg = <0 0x8000000>;
+				bank-width = <2>;
+			};
+		};
+
+		pcie-controller {
+			status = "okay";
+			/* Internal mini-PCIe connector */
+			pcie@1,0 {
+				/* Port 0, Lane 0 */
+				status = "okay";
+			};
+		};
 
 		internal-regs {
 			serial@12000 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			serial@12100 {
-				clock-frequency = <250000000>;
 				status = "okay";
 			};
 			pinctrl {
@@ -144,44 +179,6 @@
 			usb@51000 {
 				status = "okay";
 			};
-
-			devbus-bootcs@10400 {
-				status = "okay";
-				ranges = <0 0xf0000000 0x8000000>; /* @addr 0xf000000, size 0x8000000 */
-
-				/* Device Bus parameters are required */
-
-				/* Read parameters */
-				devbus,bus-width    = <8>;
-				devbus,turn-off-ps  = <60000>;
-				devbus,badr-skew-ps = <0>;
-				devbus,acc-first-ps = <124000>;
-				devbus,acc-next-ps  = <248000>;
-				devbus,rd-setup-ps  = <0>;
-				devbus,rd-hold-ps   = <0>;
-
-				/* Write parameters */
-				devbus,sync-enable = <0>;
-				devbus,wr-high-ps  = <60000>;
-				devbus,wr-low-ps   = <60000>;
-				devbus,ale-wr-ps   = <60000>;
-
-				/* NOR 128 MiB */
-				nor@0 {
-					compatible = "cfi-flash";
-					reg = <0 0x8000000>;
-					bank-width = <2>;
-				};
-			};
-
-			pcie-controller {
-				status = "okay";
-				/* Internal mini-PCIe connector */
-				pcie@1,0 {
-					/* Port 0, Lane 0 */
-					status = "okay";
-				};
-			};
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index 5b902f9a3af2..17eb320ceff1 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -16,14 +16,37 @@
  * common to all Armada SoCs.
  */
 
-/include/ "armada-370-xp.dtsi"
+#include "armada-370-xp.dtsi"
 
 / {
 	model = "Marvell Armada XP family SoC";
 	compatible = "marvell,armadaxp", "marvell,armada-370-xp";
 
+	aliases {
+		eth2 = &eth2;
+	};
+
 	soc {
+		compatible = "marvell,armadaxp-mbus", "simple-bus";
+
+		bootrom {
+			compatible = "marvell,bootrom";
+			reg = <MBUS_ID(0x01, 0x1d) 0 0x100000>;
+		};
+
+		/* Security Accelerator SRAM (CESA) */
+		cesa-sram {
+			compatible = "marvell,cesa-sram";
+			reg = <MBUS_ID(0x09, 0x01) 0 0x10000   /*chan0*/
+			       MBUS_ID(0x09, 0x05) 0 0x10000>; /*chan1*/
+		};
+
 		internal-regs {
+			sdramc@1400 {
+				compatible = "marvell,armada-xp-sdram-controller";
+				reg = <0x1400 0x500>;
+			};
+
 			L2: l2-cache {
 				compatible = "marvell,aurora-system-cache";
 				reg = <0x08000 0x1000>;
@@ -32,12 +55,18 @@
 			};
 
 			interrupt-controller@20000 {
-			      reg = <0x20a00 0x2d0>, <0x21070 0x58>;
+			      reg = <0x20a00 0x2d0>, <0x21070 0x58>, <0x21870 0x390>;
 			};
 
 			armada-370-xp-pmsu@22000 {
 				compatible = "marvell,armada-370-xp-pmsu";
-				reg = <0x22100 0x430>, <0x20800 0x20>;
+				reg = <0x22100 0x400>;
+			};
+
+			cpurst: cpurst@20800 {
+				compatible = "marvell,armada-xp-cpu-reset";
+				reg = <0x20800 0x20>;
+				#reset-cells = <1>;
 			};
 
 			serial@12200 {
@@ -46,6 +75,7 @@
 				reg-shift = <2>;
 				interrupts = <43>;
 				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
 				status = "disabled";
 			};
 			serial@12300 {
@@ -54,11 +84,14 @@
 				reg-shift = <2>;
 				interrupts = <44>;
 				reg-io-width = <1>;
+				clocks = <&coreclk 0>;
 				status = "disabled";
 			};
 
 			timer@20300 {
-				marvell,timer-25Mhz;
+				compatible = "marvell,armada-xp-timer";
+				clocks = <&coreclk 2>, <&refclk>;
+				clock-names = "nbclk", "fixed";
 			};
 
 			coreclk: mvebu-sar@18230 {
@@ -70,7 +103,7 @@
 			cpuclk: clock-complex@18700 {
 				#clock-cells = <1>;
 				compatible = "marvell,armada-xp-cpu-clock";
-				reg = <0x18700 0xA0>;
+				reg = <0x18700 0x24>;
 				clocks = <&coreclk 1>;
 			};
 
@@ -86,12 +119,26 @@
 				reg = <0x18200 0x500>;
 			};
 
-			ethernet@30000 {
-				compatible = "marvell,armada-370-neta";
-				reg = <0x30000 0x2500>;
+			/* PnC and BM */
+			bm_pnc@c0000 {
+				compatible = "marvell,neta_bm_pnc";
+				reg = <0xc0000 0xAC 0xc8000 0x48>;
+				clocks = <&gateclk 13>;
+				/*neta_cap_bm, bitmap of NETA dynamic capabilities, such as PNC, BM, HWF and PME
+				  PNC--0x1, BM--0x2, HWF--0x4, PME--0x8*/
+				neta_cap_bm = <0x3>;
+				pnc_tcam_size = <1024>;
+			};
+
+			eth2: ethernet@30000 {
+				compatible = "marvell,neta";
+				reg = <0x30000 0x4000>;
 				interrupts = <12>;
 				clocks = <&gateclk 2>;
 				status = "disabled";
+				mac-address = [ 00 50 43 02 02 02 ];
+				eth,port-num    = <2>;
+				eth,port-mtu    = <1500>;
 			};
 
 			xor@60900 {
@@ -156,6 +203,48 @@
 					0x184d0 0x4>;
 				status = "okay";
 			};
+
+			crypto@9D000 {
+				compatible = "marvell,armada-cesa";
+				reg = <0x9D000 0x1000	/* cesa base reg chan 0 */
+				       0x90000 0x1000	/* tdma base reg chan 0 */
+				       0x9F000 0x1000	/* cesa base reg chan 1 */
+				       0x92000 0x1000>;	/* tdma base reg chan 1 */
+				clocks = <&gateclk 23>;
+				cesa,channels = <0x2>;
+				cesa,mode = "ocf";	/* ocf or test */
+				cesa,feature = "int_coalescing"; /* chain, int_coalescing
+							   or int_per_packet */
+
+				/* threshold and time_threshold relevant if
+				   int_coalescing in use */
+				cesa,threshold = <0x2>;
+				cesa,time_threshold = <0xff>;
+
+				cesa,ctrlModel = /bits/ 16 <0x7846>;
+				cesa,ctrlRev = /bits/ 8 <2>;
+				cesa,sramOffset = /bits/ 16 <0>;
+				status = "disabled";
+
+				crypto10 {
+					/* channel 0 */
+					interrupts = <48>;
+				};
+				crypto11 {
+					/* channel 1 */
+					interrupts = <49>;
+				};
+			};
+
+		};
+	};
+
+	clocks {
+		/* 25 MHz reference crystal */
+		refclk: oscillator {
+			compatible = "fixed-clock";
+			#clock-cells = <0>;
+			clock-frequency = <25000000>;
 		};
 	};
 };
diff --git a/arch/arm/boot/dts/kirkwood-6281.dtsi b/arch/arm/boot/dts/kirkwood-6281.dtsi
index d6c9d65cbaeb..3653ee20de29 100644
--- a/arch/arm/boot/dts/kirkwood-6281.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6281.dtsi
@@ -1,4 +1,39 @@
 / {
+	mbus {
+		pcie-controller {
+			compatible = "marvell,kirkwood-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+				0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0       1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0       1 0 /* Port 0.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x00040000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &intc 9>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gate_clk 2>;
+				status = "disabled";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 			compatible = "marvell,88f6281-pinctrl";
diff --git a/arch/arm/boot/dts/kirkwood-6282.dtsi b/arch/arm/boot/dts/kirkwood-6282.dtsi
index 23991e45bc55..82741a7d0f53 100644
--- a/arch/arm/boot/dts/kirkwood-6282.dtsi
+++ b/arch/arm/boot/dts/kirkwood-6282.dtsi
@@ -1,4 +1,59 @@
 / {
+	mbus {
+		pcie-controller {
+			compatible = "marvell,kirkwood-pcie";
+			status = "disabled";
+			device_type = "pci";
+
+			#address-cells = <3>;
+			#size-cells = <2>;
+
+			bus-range = <0x00 0xff>;
+
+			ranges =
+			       <0x82000000 0 0x40000 MBUS_ID(0xf0, 0x01) 0x40000 0 0x00002000
+			        0x82000000 0 0x44000 MBUS_ID(0xf0, 0x01) 0x44000 0 0x00002000
+				0x82000000 0 0x80000 MBUS_ID(0xf0, 0x01) 0x80000 0 0x00002000
+				0x82000000 0x1 0     MBUS_ID(0x04, 0xe8) 0       1 0 /* Port 0.0 MEM */
+				0x81000000 0x1 0     MBUS_ID(0x04, 0xe0) 0       1 0 /* Port 0.0 IO  */
+				0x82000000 0x2 0     MBUS_ID(0x04, 0xd8) 0       1 0 /* Port 1.0 MEM */
+				0x81000000 0x2 0     MBUS_ID(0x04, 0xd0) 0       1 0 /* Port 1.0 IO  */>;
+
+			pcie@1,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82000800 0 0x00040000 0 0x2000>;
+				reg = <0x0800 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x1 0 1 0
+					  0x81000000 0 0 0x81000000 0x1 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &intc 9>;
+				marvell,pcie-port = <0>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gate_clk 2>;
+				status = "disabled";
+			};
+
+			pcie@2,0 {
+				device_type = "pci";
+				assigned-addresses = <0x82001000 0 0x00044000 0 0x2000>;
+				reg = <0x1000 0 0 0 0>;
+				#address-cells = <3>;
+				#size-cells = <2>;
+				#interrupt-cells = <1>;
+				ranges = <0x82000000 0 0 0x82000000 0x2 0 1 0
+					  0x81000000 0 0 0x81000000 0x2 0 1 0>;
+				interrupt-map-mask = <0 0 0 0>;
+				interrupt-map = <0 0 0 0 &intc 10>;
+				marvell,pcie-port = <1>;
+				marvell,pcie-lane = <0>;
+				clocks = <&gate_clk 18>;
+				status = "disabled";
+			};
+		};
+	};
 	ocp@f1000000 {
 
 		pinctrl: pinctrl@10000 {
diff --git a/arch/arm/boot/dts/kirkwood-cloudbox.dts b/arch/arm/boot/dts/kirkwood-cloudbox.dts
index 5f21d4e427b0..56d417d9e740 100644
--- a/arch/arm/boot/dts/kirkwood-cloudbox.dts
+++ b/arch/arm/boot/dts/kirkwood-cloudbox.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "LaCie CloudBox";
diff --git a/arch/arm/boot/dts/kirkwood-dns320.dts b/arch/arm/boot/dts/kirkwood-dns320.dts
index c9c44b2f62d7..a0439afa34f4 100644
--- a/arch/arm/boot/dts/kirkwood-dns320.dts
+++ b/arch/arm/boot/dts/kirkwood-dns320.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-dnskw.dtsi"
+#include "kirkwood-dnskw.dtsi"
 
 / {
 	model = "D-Link DNS-320 NAS (Rev A1)";
diff --git a/arch/arm/boot/dts/kirkwood-dns325.dts b/arch/arm/boot/dts/kirkwood-dns325.dts
index e4e4930dc5cf..e101ebd8f323 100644
--- a/arch/arm/boot/dts/kirkwood-dns325.dts
+++ b/arch/arm/boot/dts/kirkwood-dns325.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-dnskw.dtsi"
+#include "kirkwood-dnskw.dtsi"
 
 / {
 	model = "D-Link DNS-325 NAS (Rev A1)";
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index 6875ac00c174..2951128c3de6 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -1,5 +1,5 @@
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "D-Link DNS NASes (kirkwood-based)";
diff --git a/arch/arm/boot/dts/kirkwood-dockstar.dts b/arch/arm/boot/dts/kirkwood-dockstar.dts
index 0196cf6b0ef2..5a3780dc6b33 100644
--- a/arch/arm/boot/dts/kirkwood-dockstar.dts
+++ b/arch/arm/boot/dts/kirkwood-dockstar.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Seagate FreeAgent Dockstar";
diff --git a/arch/arm/boot/dts/kirkwood-dreamplug.dts b/arch/arm/boot/dts/kirkwood-dreamplug.dts
index 289e51d86372..f4f4651f6a5e 100644
--- a/arch/arm/boot/dts/kirkwood-dreamplug.dts
+++ b/arch/arm/boot/dts/kirkwood-dreamplug.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Globalscale Technologies Dreamplug";
@@ -79,6 +79,7 @@
 			pinctrl-names = "default";
 			status = "okay";
 			/* No CD or WP GPIOs */
+			broken-cd;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/kirkwood-goflexnet.dts b/arch/arm/boot/dts/kirkwood-goflexnet.dts
index c3573be7b92c..92abf473945f 100644
--- a/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Seagate GoFlex Net";
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
index 44fd97dfc1f3..f43d3b0eb8e1 100644
--- a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
+++ b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Globalscale Technologies Guruplug Server Plus";
@@ -72,6 +72,8 @@
 
 		mvsdio@90000 {
 			status = "okay";
+			/* No CD or WP GPIOs */
+			broken-cd;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/kirkwood-ib62x0.dts b/arch/arm/boot/dts/kirkwood-ib62x0.dts
index 5335b1aa8601..f0dc12e265ae 100644
--- a/arch/arm/boot/dts/kirkwood-ib62x0.dts
+++ b/arch/arm/boot/dts/kirkwood-ib62x0.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "RaidSonic ICY BOX IB-NAS62x0 (Rev B)";
diff --git a/arch/arm/boot/dts/kirkwood-iconnect.dts b/arch/arm/boot/dts/kirkwood-iconnect.dts
index 12ccf74ac3c4..4b95e4a005b0 100644
--- a/arch/arm/boot/dts/kirkwood-iconnect.dts
+++ b/arch/arm/boot/dts/kirkwood-iconnect.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Iomega Iconnect";
@@ -18,6 +18,17 @@
 		linux,initrd-end   = <0x4800000>;
 	};
 
+	mbus {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 
diff --git a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 3694e94f6e99..a426cf3cdd3f 100644
--- a/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "Iomega StorCenter ix2-200";
diff --git a/arch/arm/boot/dts/kirkwood-is2.dts b/arch/arm/boot/dts/kirkwood-is2.dts
index 0bdce0ad7277..2d6403cedfec 100644
--- a/arch/arm/boot/dts/kirkwood-is2.dts
+++ b/arch/arm/boot/dts/kirkwood-is2.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-ns2-common.dtsi"
+#include "kirkwood-ns2-common.dtsi"
 
 / {
 	model = "LaCie Internet Space v2";
diff --git a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
index 5bbd0542cdd3..43743656aa2c 100644
--- a/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
+++ b/arch/arm/boot/dts/kirkwood-km_kirkwood.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-98dx4122.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-98dx4122.dtsi"
 
 / {
 	model = "Keymile Kirkwood Reference Design";
diff --git a/arch/arm/boot/dts/kirkwood-lschlv2.dts b/arch/arm/boot/dts/kirkwood-lschlv2.dts
index 9f55d95f35f5..e2fa368aef25 100644
--- a/arch/arm/boot/dts/kirkwood-lschlv2.dts
+++ b/arch/arm/boot/dts/kirkwood-lschlv2.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-lsxl.dtsi"
+#include "kirkwood-lsxl.dtsi"
 
 / {
 	model = "Buffalo Linkstation LS-CHLv2";
diff --git a/arch/arm/boot/dts/kirkwood-lsxhl.dts b/arch/arm/boot/dts/kirkwood-lsxhl.dts
index 5c84c118ed8d..8d89cdf8d6bf 100644
--- a/arch/arm/boot/dts/kirkwood-lsxhl.dts
+++ b/arch/arm/boot/dts/kirkwood-lsxhl.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-lsxl.dtsi"
+#include "kirkwood-lsxl.dtsi"
 
 / {
 	model = "Buffalo Linkstation LS-XHL";
diff --git a/arch/arm/boot/dts/kirkwood-lsxl.dtsi b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
index 37d45c4f88fb..948d2fd2a8ce 100644
--- a/arch/arm/boot/dts/kirkwood-lsxl.dtsi
+++ b/arch/arm/boot/dts/kirkwood-lsxl.dtsi
@@ -1,5 +1,5 @@
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	chosen {
diff --git a/arch/arm/boot/dts/kirkwood-mplcec4.dts b/arch/arm/boot/dts/kirkwood-mplcec4.dts
index 758824118a9a..6bff584584ba 100644
--- a/arch/arm/boot/dts/kirkwood-mplcec4.dts
+++ b/arch/arm/boot/dts/kirkwood-mplcec4.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "MPL CEC4";
@@ -16,6 +16,17 @@
                 bootargs = "console=ttyS0,115200n8 earlyprintk";
         };
 
+	mbus {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 
@@ -136,7 +147,7 @@
 			pinctrl-0 = <&pmx_sdio &pmx_sdio_cd>;
 			pinctrl-names = "default";
 			status = "okay";
-			cd-gpios = <&gpio1 15 0>;
+			cd-gpios = <&gpio1 15 1>;
 			/* No WP GPIO */
 		};
 	};
diff --git a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
index 1ca66ab83ad6..2dbc3463226d 100644
--- a/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
+++ b/arch/arm/boot/dts/kirkwood-netgear_readynas_duo_v2.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6282.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
 
 / {
 	model = "NETGEAR ReadyNAS Duo v2";
@@ -16,6 +16,17 @@
 		bootargs = "console=ttyS0,115200n8 earlyprintk";
 	};
 
+	mbus {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 
diff --git a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
index 6affd924fe11..5984e03384a0 100644
--- a/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ns2-common.dtsi
@@ -1,5 +1,5 @@
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	chosen {
diff --git a/arch/arm/boot/dts/kirkwood-ns2.dts b/arch/arm/boot/dts/kirkwood-ns2.dts
index f2d36ecf36d8..94c1c79d1257 100644
--- a/arch/arm/boot/dts/kirkwood-ns2.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-ns2-common.dtsi"
+#include "kirkwood-ns2-common.dtsi"
 
 / {
 	model = "LaCie Network Space v2";
diff --git a/arch/arm/boot/dts/kirkwood-ns2lite.dts b/arch/arm/boot/dts/kirkwood-ns2lite.dts
index b02eb4ea1bb4..5afdf6863a11 100644
--- a/arch/arm/boot/dts/kirkwood-ns2lite.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2lite.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-ns2-common.dtsi"
+#include "kirkwood-ns2-common.dtsi"
 
 / {
 	model = "LaCie Network Space Lite v2";
diff --git a/arch/arm/boot/dts/kirkwood-ns2max.dts b/arch/arm/boot/dts/kirkwood-ns2max.dts
index bcec4d6cada7..39703ff6d0b0 100644
--- a/arch/arm/boot/dts/kirkwood-ns2max.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2max.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-ns2-common.dtsi"
+#include "kirkwood-ns2-common.dtsi"
 
 / {
 	model = "LaCie Network Space Max v2";
diff --git a/arch/arm/boot/dts/kirkwood-ns2mini.dts b/arch/arm/boot/dts/kirkwood-ns2mini.dts
index adab1ab25733..d810c6eb54d8 100644
--- a/arch/arm/boot/dts/kirkwood-ns2mini.dts
+++ b/arch/arm/boot/dts/kirkwood-ns2mini.dts
@@ -1,6 +1,6 @@
 /dts-v1/;
 
-/include/ "kirkwood-ns2-common.dtsi"
+#include "kirkwood-ns2-common.dtsi"
 
 / {
 	/* This machine is embedded in the first LaCie CloudBox product. */
diff --git a/arch/arm/boot/dts/kirkwood-nsa310.dts b/arch/arm/boot/dts/kirkwood-nsa310.dts
index a7412b937a8a..bd7f05f6aa96 100644
--- a/arch/arm/boot/dts/kirkwood-nsa310.dts
+++ b/arch/arm/boot/dts/kirkwood-nsa310.dts
@@ -1,6 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
 
 / {
 	model = "ZyXEL NSA310";
@@ -15,24 +16,20 @@
 		bootargs = "console=ttyS0,115200";
 	};
 
+	mbus {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
-			pinctrl-0 = < &pmx_led_esata_green
-				      &pmx_led_esata_red
-				      &pmx_led_usb_green
-				      &pmx_led_usb_red
-				      &pmx_usb_power_off
-				      &pmx_led_sys_green
-				      &pmx_led_sys_red
-				      &pmx_btn_reset
-				      &pmx_btn_copy
-				      &pmx_led_copy_green
-				      &pmx_led_copy_red
-				      &pmx_led_hdd_green
-				      &pmx_led_hdd_red
-				      &pmx_unknown
-				      &pmx_btn_power
-				      &pmx_pwr_off >;
+			pinctrl-0 = <&pmx_unknown>;
 			pinctrl-names = "default";
 
 			pmx_led_esata_green: pmx-led-esata-green {
@@ -182,6 +179,8 @@
 		compatible = "gpio-keys";
 		#address-cells = <1>;
 		#size-cells = <0>;
+		pinctrl-0 = <&pmx_btn_reset &pmx_btn_copy &pmx_btn_power>;
+		pinctrl-names = "default";
 
 		button@1 {
 			label = "Power Button";
@@ -202,6 +201,12 @@
 
 	gpio-leds {
 		compatible = "gpio-leds";
+		pinctrl-0 = <&pmx_led_esata_green &pmx_led_esata_red
+			     &pmx_led_usb_green &pmx_led_usb_red
+			     &pmx_led_sys_green &pmx_led_sys_red
+			     &pmx_led_copy_green &pmx_led_copy_red
+			     &pmx_led_hdd_green &pmx_led_hdd_red>;
+		pinctrl-names = "default";
 
 		green-sys {
 			label = "nsa310:green:sys";
@@ -247,6 +252,8 @@
 
 	gpio_poweroff {
 		compatible = "gpio-poweroff";
+		pinctrl-0 = <&pmx_pwr_off>;
+		pinctrl-names = "default";
 		gpios = <&gpio1 16 0>;
 	};
 
@@ -254,6 +261,8 @@
 		compatible = "simple-bus";
 		#address-cells = <1>;
 		#size-cells = <0>;
+		pinctrl-0 = <&pmx_usb_power_off>;
+		pinctrl-names = "default";
 
 		usb0_power_off: regulator@1 {
 			compatible = "regulator-fixed";
diff --git a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
index d27f7245f8e7..8f867685daf0 100644
--- a/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
+++ b/arch/arm/boot/dts/kirkwood-openblocks_a6.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6282.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
 
 / {
 	model = "Plat'Home OpenBlocksA6";
diff --git a/arch/arm/boot/dts/kirkwood-topkick.dts b/arch/arm/boot/dts/kirkwood-topkick.dts
index 66eb45b00b25..3e0b6032e725 100644
--- a/arch/arm/boot/dts/kirkwood-topkick.dts
+++ b/arch/arm/boot/dts/kirkwood-topkick.dts
@@ -1,7 +1,7 @@
 /dts-v1/;
 
-/include/ "kirkwood.dtsi"
-/include/ "kirkwood-6282.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
 
 / {
 	model = "Univeral Scientific Industrial Co. Topkick-1281P2";
@@ -154,6 +154,7 @@
 			pinctrl-names = "default";
 			status = "okay";
 			/* No CD or WP GPIOs */
+			broken-cd;
 		};
 	};
 
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6281.dts b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
index 8295c833887f..3867ae3030be 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6281.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6281.dts
@@ -1,16 +1,14 @@
 /dts-v1/;
 
-/include/ "kirkwood-ts219.dtsi"
-/include/ "kirkwood-6281.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
+#include "kirkwood-ts219.dtsi"
 
 / {
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 
-			pinctrl-0 = < &pmx_uart0 &pmx_uart1 &pmx_spi
-				      &pmx_twsi0 &pmx_sata0 &pmx_sata1
-				      &pmx_ram_size &pmx_reset_button
-				      &pmx_USB_copy_button &pmx_board_id>;
+			pinctrl-0 = <&pmx_ram_size &pmx_board_id>;
 			pinctrl-names = "default";
 
 			pmx_ram_size: pmx-ram-size {
@@ -38,6 +36,9 @@
 		compatible = "gpio-keys";
 		#address-cells = <1>;
 		#size-cells = <0>;
+		pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
+		pinctrl-names = "default";
+
 		button@1 {
 			label = "USB Copy";
 			linux,code = <133>;
diff --git a/arch/arm/boot/dts/kirkwood-ts219-6282.dts b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
index df3f95dfba33..04f6fe106bb5 100644
--- a/arch/arm/boot/dts/kirkwood-ts219-6282.dts
+++ b/arch/arm/boot/dts/kirkwood-ts219-6282.dts
@@ -1,16 +1,25 @@
 /dts-v1/;
 
-/include/ "kirkwood-ts219.dtsi"
-/include/ "kirkwood-6282.dtsi"
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+#include "kirkwood-ts219.dtsi"
 
 / {
+	mbus {
+		ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>;
+		pcie-controller {
+			status = "okay";
+
+			pcie@2,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		pinctrl: pinctrl@10000 {
 
-			pinctrl-0 = < &pmx_uart0 &pmx_uart1 &pmx_spi
-				      &pmx_twsi0 &pmx_sata0 &pmx_sata1
-				      &pmx_ram_size &pmx_reset_button
-				      &pmx_USB_copy_button &pmx_board_id>;
+			pinctrl-0 = <&pmx_ram_size &pmx_board_id>;
 			pinctrl-names = "default";
 
 			pmx_ram_size: pmx-ram-size {
@@ -38,6 +47,9 @@
 		compatible = "gpio-keys";
 		#address-cells = <1>;
 		#size-cells = <0>;
+		pinctrl-0 = <&pmx_reset_button &pmx_USB_copy_button>;
+		pinctrl-names = "default";
+
 		button@1 {
 			label = "USB Copy";
 			linux,code = <133>;
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
index 64ea27cb3298..eb506ad847c6 100644
--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -13,6 +13,16 @@
 		bootargs = "console=ttyS0,115200n8";
 	};
 
+	mbus {
+		pcie-controller {
+			status = "okay";
+
+			pcie@1,0 {
+				status = "okay";
+			};
+		};
+	};
+
 	ocp@f1000000 {
 		i2c@11000 {
 			status = "okay";
diff --git a/arch/arm/boot/dts/kirkwood.dtsi b/arch/arm/boot/dts/kirkwood.dtsi
index fada7e6d24d8..399aed26564d 100644
--- a/arch/arm/boot/dts/kirkwood.dtsi
+++ b/arch/arm/boot/dts/kirkwood.dtsi
@@ -1,5 +1,7 @@
 /include/ "skeleton.dtsi"
 
+#define MBUS_ID(target,attributes) (((target) << 24) | ((attributes) << 16))
+
 / {
 	compatible = "marvell,kirkwood";
 	interrupt-parent = <&intc>;
@@ -16,6 +18,15 @@
 		      <0xf1020214 0x04>;
 	};
 
+	mbus {
+		compatible = "marvell,kirkwood-mbus", "simple-bus";
+		#address-cells = <2>;
+		#size-cells = <1>;
+		controller = <&mbusc>;
+		pcie-mem-aperture = <0xe0000000 0x10000000>; /* 256 MiB memory space */
+		pcie-io-aperture  = <0xf2000000 0x100000>;   /*   1 MiB    I/O space */
+	};
+
 	ocp@f1000000 {
 		compatible = "simple-bus";
 		ranges = <0x00000000 0xf1000000 0x4000000
@@ -23,6 +34,11 @@
 		#address-cells = <1>;
 		#size-cells = <1>;
 
+		mbusc: mbus-controller@20000 {
+			compatible = "marvell,mbus-controller";
+			reg = <0x20000 0x80>, <0x1500 0x20>;
+		};
+
 		core_clk: core-clocks@10030 {
 			compatible = "marvell,kirkwood-core-clock";
 			reg = <0x10030 0x4>;
@@ -200,6 +216,10 @@
 			reg = <0x90000 0x200>;
 			interrupts = <28>;
 			clocks = <&gate_clk 4>;
+			bus-width = <4>;
+			cap-sdio-irq;
+			cap-sd-highspeed;
+			cap-mmc-highspeed;
 			status = "disabled";
 		};
 	};
diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
new file mode 100644
index 000000000000..6ecda716e9d4
--- /dev/null
+++ b/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
@@ -0,0 +1,41 @@
+
+/ {
+	testcase-data {
+		interrupts {
+			#address-cells = <0>;
+			test_intc0: intc0 {
+				interrupt-controller;
+				#interrupt-cells = <1>;
+			};
+
+			test_intc1: intc1 {
+				interrupt-controller;
+				#interrupt-cells = <3>;
+			};
+
+			test_intc2: intc2 {
+				interrupt-controller;
+				#interrupt-cells = <2>;
+			};
+
+			test_intmap0: intmap0 {
+				#interrupt-cells = <1>;
+				#address-cells = <0>;
+				interrupt-map = <1 &test_intc0 9>,
+						<2 &test_intc1 10 11 12>,
+						<3 &test_intc2 13 14>,
+						<4 &test_intc2 15 16>;
+			};
+
+			interrupts0 {
+				interrupt-parent = <&test_intc0>;
+				interrupts = <1>, <2>, <3>, <4>;
+			};
+
+			interrupts1 {
+				interrupt-parent = <&test_intmap0>;
+				interrupts = <1>, <2>, <3>, <4>;
+			};
+		};
+	};
+};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
index a7c5067622e8..3f123ecc9dd7 100644
--- a/arch/arm/boot/dts/testcases/tests.dtsi
+++ b/arch/arm/boot/dts/testcases/tests.dtsi
@@ -1 +1,2 @@
 /include/ "tests-phandle.dtsi"
+/include/ "tests-interrupts.dtsi"
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 8178705c4b24..4ad68cbfce42 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -15,6 +15,7 @@
 
 #include <linux/linkage.h>
 #include <asm/mcpm.h>
+#include <asm/assembler.h>
 
 #include "vlock.h"
 
@@ -47,6 +48,7 @@
 
 ENTRY(mcpm_entry_point)
 
+ ARM_BE8(setend        be)
  THUMB(	adr	r12, BSYM(1f)	)
  THUMB(	bx	r12		)
  THUMB(	.thumb			)
diff --git a/arch/arm/configs/mvebu_defconfig b/arch/arm/configs/mvebu_defconfig
index f3e8ae001ff1..6852c3d9b2fa 100644
--- a/arch/arm/configs/mvebu_defconfig
+++ b/arch/arm/configs/mvebu_defconfig
@@ -10,9 +10,13 @@ CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_ARCH_MVEBU=y
 CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_380=y
 CONFIG_MACH_ARMADA_XP=y
 # CONFIG_CACHE_L2X0 is not set
 # CONFIG_SWP_EMULATE is not set
+CONFIG_PCI=y
+CONFIG_PCI_MVEBU=y
 CONFIG_SMP=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
@@ -51,6 +55,8 @@ CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_CFI_AMDSTD=y
 CONFIG_MTD_CFI_STAA=y
 CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PXA3xx=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
@@ -60,7 +66,11 @@ CONFIG_USB_SUPPORT=y
 CONFIG_USB=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_MVEBU=y
 CONFIG_MMC=y
+CONFIG_MMC_SDHCI_PXAV3=y
 CONFIG_MMC_MVSDIO=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
@@ -96,5 +106,3 @@ CONFIG_TIMER_STATS=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_USER=y
-CONFIG_DEBUG_LL=y
-CONFIG_EARLY_PRINTK=y
diff --git a/arch/arm/configs/mvebu_lsp_defconfig b/arch/arm/configs/mvebu_lsp_defconfig
new file mode 100644
index 000000000000..c2fff2ec0426
--- /dev/null
+++ b/arch/arm/configs/mvebu_lsp_defconfig
@@ -0,0 +1,175 @@
+CONFIG_SYSVIPC=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_380=y
+CONFIG_MACH_ARMADA_XP=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_PCI=y
+CONFIG_PCI_MVEBU=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+# CONFIG_COMPACTION is not set
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CPU_IDLE=y
+CONFIG_VFP=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IPV6=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_BT=y
+CONFIG_BT_MRVL=y
+CONFIG_BT_MRVL_SDIO=y
+CONFIG_CFG80211=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PXA3xx=y
+CONFIG_MTD_NAND_MVEBU_NFC=y
+CONFIG_MTD_UBI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_MV=y
+CONFIG_SATA_MV=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID456=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_MV_ETH_DEBUG_CODE=y
+CONFIG_MV_ETH_NETA=y
+CONFIG_MV_ETH_PORTS_NUM=4
+CONFIG_MV_ETH_PNC_WOL=y
+# CONFIG_MV_ETH_HWF is not set
+CONFIG_MV_ETH_RXQ=8
+CONFIG_MV_ETH_TXQ=8
+CONFIG_MV_ETH_RXQ_DESC=256
+CONFIG_MV_ETH_GRO_DEF=y
+CONFIG_MV_ETH_TSO_DEF=y
+CONFIG_MV_NETA_DEBUG_CODE=y
+CONFIG_MV_ETH_STAT_DBG=y
+CONFIG_MV_NETA_SKB_RECYCLE_DEF=0
+CONFIG_MV_ETH_NAPI_GROUPS=4
+CONFIG_MV_ETH_PP2=y
+CONFIG_MV_ETH_PP2_1=y
+CONFIG_MV_PP2_DEBUG_CODE=y
+CONFIG_MV_PP2_STAT_DBG=y
+CONFIG_MV_PP2_SKB_RECYCLE_DEF=0
+CONFIG_MARVELL_PHY=y
+CONFIG_MWIFIEX=y
+CONFIG_MWIFIEX_SDIO=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_SPI=y
+CONFIG_SPI_ORION=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_THERMAL=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_MVEBU=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI_PXAV3=y
+CONFIG_MMC_MVSDIO=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=m
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_S35390A=y
+CONFIG_RTC_DRV_MV=y
+CONFIG_DMADEVICES=y
+CONFIG_MV_XOR=y
+CONFIG_ASYNC_TX_DMA=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MEMORY=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_BTRFS_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_UTF8=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_DEBUG_MVEBU_UART_ALTERNATE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_DES=y
+CONFIG_MV_INCLUDE_CESA=y
+CONFIG_OCF_OCF=y
+CONFIG_OCF_CRYPTODEV=y
+CONFIG_OCF_DM_CRYPT=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 05ee9eebad6b..e780afbcee54 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
 #define put_byte_3      lsl #0
 #endif
 
+/* Select code for any configuration running in BE8 mode */
+#ifdef CONFIG_CPU_ENDIAN_BE8
+#define ARM_BE8(code...) code
+#else
+#define ARM_BE8(code...)
+#endif
+
 /*
  * Data preload for architectures that support it
  */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..6447a0b7b127 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -301,8 +301,8 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
 
 	__asm__ __volatile__("@ atomic64_add\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	adds	%0, %0, %4\n"
-"	adc	%H0, %H0, %H4\n"
+"	adds	%Q0, %Q0, %Q4\n"
+"	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -320,8 +320,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 
 	__asm__ __volatile__("@ atomic64_add_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	adds	%0, %0, %4\n"
-"	adc	%H0, %H0, %H4\n"
+"	adds	%Q0, %Q0, %Q4\n"
+"	adc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -341,8 +341,8 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
 
 	__asm__ __volatile__("@ atomic64_sub\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, %4\n"
-"	sbc	%H0, %H0, %H4\n"
+"	subs	%Q0, %Q0, %Q4\n"
+"	sbc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -360,8 +360,8 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
 
 	__asm__ __volatile__("@ atomic64_sub_return\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, %4\n"
-"	sbc	%H0, %H0, %H4\n"
+"	subs	%Q0, %Q0, %Q4\n"
+"	sbc	%R0, %R0, %R4\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
 "	bne	1b"
@@ -428,9 +428,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
 
 	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
 "1:	ldrexd	%0, %H0, [%3]\n"
-"	subs	%0, %0, #1\n"
-"	sbc	%H0, %H0, #0\n"
-"	teq	%H0, #0\n"
+"	subs	%Q0, %Q0, #1\n"
+"	sbc	%R0, %R0, #0\n"
+"	teq	%R0, #0\n"
 "	bmi	2f\n"
 "	strexd	%1, %0, %H0, [%3]\n"
 "	teq	%1, #0\n"
@@ -459,8 +459,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 "	teqeq	%H0, %H5\n"
 "	moveq	%1, #0\n"
 "	beq	2f\n"
-"	adds	%0, %0, %6\n"
-"	adc	%H0, %H0, %H6\n"
+"	adds	%Q0, %Q0, %Q6\n"
+"	adc	%R0, %R0, %R6\n"
 "	strexd	%2, %0, %H0, [%4]\n"
 "	teq	%2, #0\n"
 "	bne	1b\n"
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..b274bde24905 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
 #define _ASMARM_BUG_H
 
 #include <linux/linkage.h>
+#include <linux/types.h>
+#include <asm/opcodes.h>
 
 #ifdef CONFIG_BUG
 
@@ -12,10 +14,10 @@
  */
 #ifdef CONFIG_THUMB2_KERNEL
 #define BUG_INSTR_VALUE 0xde02
-#define BUG_INSTR_TYPE ".hword "
+#define BUG_INSTR(__value) __inst_thumb16(__value)
 #else
 #define BUG_INSTR_VALUE 0xe7f001f2
-#define BUG_INSTR_TYPE ".word "
+#define BUG_INSTR(__value) __inst_arm(__value)
 #endif
 
 
@@ -33,7 +35,7 @@
 
 #define __BUG(__file, __line, __value)				\
 do {								\
-	asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"	\
+	asm volatile("1:\t" BUG_INSTR(__value) "\n"  \
 		".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
 		"2:\t.asciz " #__file "\n" 			\
 		".popsection\n" 				\
@@ -48,7 +50,7 @@ do {								\
 
 #define __BUG(__file, __line, __value)				\
 do {								\
-	asm volatile(BUG_INSTR_TYPE #__value);			\
+	asm volatile(BUG_INSTR(__value) "\n");			\
 	unreachable();						\
 } while (0)
 #endif  /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index a25e62d2de6e..2059f019bef4 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -437,4 +437,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
 
+/*
+ * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
+ * To do so we must:
+ *
+ * - Clear the SCTLR.C bit to prevent further cache allocations
+ * - Flush the desired level of cache
+ * - Clear the ACTLR "SMP" bit to disable local coherency
+ *
+ * ... and so without any intervening memory access in between those steps,
+ * not even to the stack.
+ *
+ * WARNING -- After this has been called:
+ *
+ * - No ldrex/strex (and similar) instructions must be used.
+ * - The CPU is obviously no longer coherent with the other CPUs.
+ * - This is unlikely to work as expected if Linux is running non-secure.
+ *
+ * Note:
+ *
+ * - This is known to apply to several ARMv7 processor implementations,
+ *   however some exceptions may exist.  Caveat emptor.
+ *
+ * - The clobber list is dictated by the call to v7_flush_dcache_*.
+ *   fp is preserved to the stack explicitly prior disabling the cache
+ *   since adding it to the clobber list is incompatible with having
+ *   CONFIG_FRAME_POINTER=y.  ip is saved as well if ever r12-clobbering
+ *   trampoline are inserted by the linker and to keep sp 64-bit aligned.
+ */
+#define v7_exit_coherency_flush(level) \
+	asm volatile( \
+	"stmfd	sp!, {fp, ip} \n\t" \
+	"mrc	p15, 0, r0, c1, c0, 0	@ get SCTLR \n\t" \
+	"bic	r0, r0, #"__stringify(CR_C)" \n\t" \
+	"mcr	p15, 0, r0, c1, c0, 0	@ set SCTLR \n\t" \
+	"isb	\n\t" \
+	"bl	v7_flush_dcache_"__stringify(level)" \n\t" \
+	"clrex	\n\t" \
+	"mrc	p15, 0, r0, c1, c0, 1	@ get ACTLR \n\t" \
+	"bic	r0, r0, #(1 << 6)	@ disable local coherency \n\t" \
+	"mcr	p15, 0, r0, c1, c0, 1	@ set ACTLR \n\t" \
+	"isb	\n\t" \
+	"dsb	\n\t" \
+	"ldmfd	sp!, {fp, ip}" \
+	: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
+	      "r9","r10","lr","memory" )
+
 #endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 5b579b951503..0d5383d0f329 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -127,6 +127,8 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
  */
 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 			   gfp_t gfp, struct dma_attrs *attrs);
+extern void *arm_coherent_dma_alloc(struct device *dev, size_t size,
+				    dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
 
 #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
 
@@ -159,6 +161,8 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  */
 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 			 dma_addr_t handle, struct dma_attrs *attrs);
+extern void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
+				  dma_addr_t handle, struct dma_attrs *attrs);
 
 #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
 
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 56211f2084ef..cb1b89f7461f 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -109,7 +109,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
 #define ELF_CORE_COPY_TASK_REGS dump_task_regs
 
 #define CORE_DUMP_USE_REGSET
+
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+#define ELF_EXEC_PAGESIZE	PAGE_SIZE
+#else
 #define ELF_EXEC_PAGESIZE	4096
+#endif
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
    use of this is to invoke "./ld.so someprog" to test out a new version of
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index bbae919bceb4..34e107ff85c8 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -13,8 +13,13 @@
  * 0xfffe0000 and 0xfffeffff.
  */
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && defined(CONFIG_HIGHMEM)
+#define FIXADDR_START		0xffc00000UL
+#define FIXADDR_TOP		0xfff00000UL
+#else
 #define FIXADDR_START		0xfff00000UL
 #define FIXADDR_TOP		0xfffe0000UL
+#endif
 #define FIXADDR_SIZE		(FIXADDR_TOP - FIXADDR_START)
 
 #define FIX_KMAP_BEGIN		0
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 3b2c40b5bfa2..bb8beefaa74d 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -110,6 +110,7 @@
 extern void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask);
 #if defined(CONFIG_CACHE_L2X0) && defined(CONFIG_OF)
 extern int l2x0_of_init(u32 aux_val, u32 aux_mask);
+extern int l2x0_of_init_coherent(u32 aux_val, u32 aux_mask);
 #else
 static inline int l2x0_of_init(u32 aux_val, u32 aux_mask)
 {
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b842ff..ad774f37c47c 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
 #define TRACER_TIMEOUT 10000
 
 #define etm_writel(t, v, x) \
-	(__raw_writel((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x)))
+	(writel_relaxed((v), (t)->etm_regs + (x)))
+#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
 
 /* CoreSight Management Registers */
 #define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
 #define ETBFF_TRIGFL		BIT(10)
 
 #define etb_writel(t, v, x) \
-	(__raw_writel((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x)))
+	(writel_relaxed((v), (t)->etb_regs + (x)))
+#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
 
 #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
 #define etm_unlock(t) \
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d070741b2b37..60a4f2a307b3 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -171,6 +171,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
 /* PCI fixed i/o mapping */
 #define PCI_IO_VIRT_BASE	0xfee00000
 
+#if defined(CONFIG_PCI)
+void pci_ioremap_set_mem_type(int mem_type);
+#else
+static inline void pci_ioremap_set_mem_type(int mem_type) {}
+#endif
+
 extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
 
 /*
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..0a9d5dd93294 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
 #define __ARM_KGDB_H__
 
 #include <linux/ptrace.h>
+#include <asm/opcodes.h>
 
 /*
  * GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
 
 static inline void arch_kgdb_breakpoint(void)
 {
-	asm(".word 0xe7ffdeff");
+	asm(__inst_arm(0xe7ffdeff));
 }
 
 extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 308ad7d6f98b..3e08c6bd8c89 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -20,6 +20,10 @@ struct smp_operations;
 #define smp_ops(ops) (struct smp_operations *)NULL
 #endif
 
+/* Possible flags in struct machine_desc */
+#define MACHINE_NEEDS_CPOLICY_WRITEALLOC BIT(0)
+#define MACHINE_NEEDS_SHAREABLE_PAGES    BIT(1)
+
 struct machine_desc {
 	unsigned int		nr;		/* architecture number	*/
 	const char		*name;		/* architecture name	*/
@@ -39,6 +43,7 @@ struct machine_desc {
 	unsigned char		reserve_lp0 :1;	/* never has lp0	*/
 	unsigned char		reserve_lp1 :1;	/* never has lp1	*/
 	unsigned char		reserve_lp2 :1;	/* never has lp2	*/
+	unsigned long           flags;
 	char			restart_mode;	/* default restart mode	*/
 	struct smp_operations	*smp;		/* SMP operations	*/
 	void			(*fixup)(struct tag *, char **,
diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h
index 7d2c3c843801..0279cd4ea92a 100644
--- a/arch/arm/include/asm/mach/pci.h
+++ b/arch/arm/include/asm/mach/pci.h
@@ -35,6 +35,8 @@ struct hw_pci {
 					  resource_size_t start,
 					  resource_size_t size,
 					  resource_size_t align);
+	void		(*add_bus)(struct pci_bus *bus);
+	void		(*remove_bus)(struct pci_bus *bus);
 };
 
 /*
@@ -62,6 +64,8 @@ struct pci_sys_data {
 					  resource_size_t start,
 					  resource_size_t size,
 					  resource_size_t align);
+	void		(*add_bus)(struct pci_bus *bus);
+	void		(*remove_bus)(struct pci_bus *bus);
 	void		*private_data;	/* platform controller private data	*/
 };
 
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 57870ab313c5..b7200f910981 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -80,7 +80,11 @@
  */
 #define IOREMAP_MAX_ORDER	24
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && defined(CONFIG_HIGHMEM)
+#define CONSISTENT_END		(0xffc00000UL)
+#else
 #define CONSISTENT_END		(0xffe00000UL)
+#endif
 
 #else /* CONFIG_MMU */
 
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6f18da09668b..64fd15159b7d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@ typedef struct {
 #ifdef CONFIG_CPU_HAS_ASID
 #define ASID_BITS	8
 #define ASID_MASK	((~0ULL) << ASID_BITS)
-#define ASID(mm)	((mm)->context.id.counter & ~ASID_MASK)
+#define ASID(mm)	((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
 #else
 #define ASID(mm)	(0)
 #endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index cbdc7a21f869..daf3a35ef212 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -11,7 +11,25 @@
 #define _ASMARM_PAGE_H
 
 /* PAGE_SHIFT determines the page size */
+#ifdef CONFIG_MV_8KB_SW_PAGE_SIZE_SUPPORT
+#define PAGE_SHIFT		13
+#define MV_PAGE_SIZE_STR	"8KB SW Page Size"
+#elif defined(CONFIG_MV_16KB_SW_PAGE_SIZE_SUPPORT)
+#define PAGE_SHIFT		14
+#define MV_PAGE_SIZE_STR	"16KB SW Page Size"
+#elif defined(CONFIG_MV_32KB_SW_PAGE_SIZE_SUPPORT)
+#define PAGE_SHIFT		15
+#define MV_PAGE_SIZE_STR	"32KB SW Page Size"
+#elif defined(CONFIG_MV_64KB_SW_PAGE_SIZE_SUPPORT)
+#define PAGE_SHIFT		16
+#define MV_PAGE_SIZE_STR	"64KB SW Page Size"
+#elif defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+#define PAGE_SHIFT		16
+#define MV_PAGE_SIZE_STR	"64KB MMU Page Size"
+#else
 #define PAGE_SHIFT		12
+#endif
+
 #define PAGE_SIZE		(_AC(1,UL) << PAGE_SHIFT)
 #define PAGE_MASK		(~(PAGE_SIZE-1))
 
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401..66e5b66cd197 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -65,7 +65,11 @@
 /*
  *   - extended small page/tiny page
  */
+#ifdef CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT
+#define PTE_EXT_XN		(_AT(pteval_t, 1) << 15)	/* v6 */
+#else
 #define PTE_EXT_XN		(_AT(pteval_t, 1) << 0)		/* v6 */
+#endif
 #define PTE_EXT_AP_MASK		(_AT(pteval_t, 3) << 4)
 #define PTE_EXT_AP0		(_AT(pteval_t, 1) << 4)
 #define PTE_EXT_AP1		(_AT(pteval_t, 2) << 4)
@@ -73,7 +77,11 @@
 #define PTE_EXT_AP_UNO_SRW	(PTE_EXT_AP0)
 #define PTE_EXT_AP_URO_SRW	(PTE_EXT_AP1)
 #define PTE_EXT_AP_URW_SRW	(PTE_EXT_AP1|PTE_EXT_AP0)
+#ifdef CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT
+#define PTE_EXT_TEX(x)		(_AT(pteval_t, (x)) << 12)	/* Large Page */
+#else
 #define PTE_EXT_TEX(x)		(_AT(pteval_t, (x)) << 6)	/* v5 */
+#endif
 #define PTE_EXT_APX		(_AT(pteval_t, 1) << 9)		/* v6 */
 #define PTE_EXT_COHERENT	(_AT(pteval_t, 1) << 9)		/* XScale3 */
 #define PTE_EXT_SHARED		(_AT(pteval_t, 1) << 10)	/* v6 */
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index c98c9c89b95c..50da27226d6d 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -68,13 +68,21 @@
  * until either the TLB entry is evicted under pressure, or a context
  * switch which changes the user space mapping occurs.
  */
+
+
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+#define PTRS_PER_PTE		(512 >> (PAGE_SHIFT-12))
+#define PTE_HWTABLE_PTRS	(512)
+#else
 #define PTRS_PER_PTE		512
+#define PTE_HWTABLE_PTRS	(PTRS_PER_PTE)
+#endif
+
 #define PTRS_PER_PMD		1
 #define PTRS_PER_PGD		2048
 
-#define PTE_HWTABLE_PTRS	(PTRS_PER_PTE)
-#define PTE_HWTABLE_OFF		(PTE_HWTABLE_PTRS * sizeof(pte_t))
-#define PTE_HWTABLE_SIZE	(PTRS_PER_PTE * sizeof(u32))
+#define PTE_HWTABLE_OFF		(512 * sizeof(pte_t))
+#define PTE_HWTABLE_SIZE	(PTE_HWTABLE_PTRS * sizeof(u32))
 
 /*
  * PMD_SHIFT determines the size of the area a second-level page table can map
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5aac06fcc97e..e709706f0ad4 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -40,7 +40,7 @@
  */
 #define VMALLOC_OFFSET		(8*1024*1024)
 #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
-#define VMALLOC_END		0xff000000UL
+#define VMALLOC_END		0xfff00000UL
 
 #define LIBRARY_TEXT_START	0x0c000000
 
@@ -58,7 +58,11 @@ extern void __pgd_error(const char *file, int line, pgd_t);
  * mapping to be mapped at.  This is particularly important for
  * non-high vector CPUs.
  */
+#ifndef CONFIG_MV_LARGE_PAGE_SUPPORT
 #define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)
+#else
+#define FIRST_USER_ADDRESS	PAGE_SIZE
+#endif
 
 /*
  * Use TASK_SIZE as the ceiling argument for free_pgtables() and
diff --git a/arch/arm/include/asm/shmparam.h b/arch/arm/include/asm/shmparam.h
index a5223b3a9bf9..131fb28fddd1 100644
--- a/arch/arm/include/asm/shmparam.h
+++ b/arch/arm/include/asm/shmparam.h
@@ -6,7 +6,11 @@
  * or page size, whichever is greater since the cache aliases
  * every size/ways bytes.
  */
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+#define	SHMLBA	(16 << 10)		 /* attach addr a multiple of (4 * 4096) */
+#else
 #define	SHMLBA	(4 * PAGE_SIZE)		 /* attach addr a multiple of this */
+#endif
 
 /*
  * Enforce SHMLBA in shmat
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 18d169373612..d638e64f30ab 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -26,7 +26,7 @@ static inline unsigned long scu_a9_get_base(void)
 unsigned int scu_get_core_count(void __iomem *);
 int scu_power_mode(void __iomem *, unsigned int);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_HAVE_ARM_SCU)
 void scu_enable(void __iomem *scu_base);
 #else
 static inline void scu_enable(void __iomem *scu_base) {}
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index a3625d141c1d..643c050f3137 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -372,9 +372,19 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
 static inline void
 local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 {
+#if !defined(CONFIG_MV_LARGE_PAGE_SUPPORT) || defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
 	const int zero = 0;
+#endif
 	const unsigned int __tlb_flag = __cpu_tlb_flags;
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && !defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	uaddr = (uaddr & PAGE_MASK);
+	__cpu_flush_user_tlb_range(uaddr, uaddr + PAGE_SIZE, vma);
+
+#else
 	uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
 
 	if (tlb_flag(TLB_WB))
@@ -398,17 +408,27 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
 	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
 #endif
 
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT && !CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT */
 	if (tlb_flag(TLB_BARRIER))
 		dsb();
 }
 
 static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 {
+#if !defined(CONFIG_MV_LARGE_PAGE_SUPPORT) || defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
 	const int zero = 0;
+#endif
 	const unsigned int __tlb_flag = __cpu_tlb_flags;
 
 	kaddr &= PAGE_MASK;
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && !defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+	if (tlb_flag(TLB_WB))
+		dsb();
+
+	__cpu_flush_kern_tlb_range(kaddr, kaddr + PAGE_SIZE);
+#else
+
 	if (tlb_flag(TLB_WB))
 		dsb();
 
@@ -422,6 +442,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
 	tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
 	tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
 	tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT && !CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT */
 
 	if (tlb_flag(TLB_BARRIER)) {
 		dsb();
diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S
index df191afa3be1..6517311a1c91 100644
--- a/arch/arm/include/debug/mvebu.S
+++ b/arch/arm/include/debug/mvebu.S
@@ -11,7 +11,12 @@
  * published by the Free Software Foundation.
 */
 
+#ifdef CONFIG_DEBUG_MVEBU_UART_ALTERNATE
+#define ARMADA_370_XP_REGS_PHYS_BASE	0xf1000000
+#else
 #define ARMADA_370_XP_REGS_PHYS_BASE	0xd0000000
+#endif
+
 #define ARMADA_370_XP_REGS_VIRT_BASE	0xfec00000
 
 	.macro	addruart, rp, rv, tmp
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..3f239a176a69 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,7 +17,8 @@ CFLAGS_REMOVE_return_address.o = -pg
 
 obj-y		:= elf.o entry-armv.o entry-common.o irq.o opcodes.o \
 		   process.o ptrace.o return_address.o sched_clock.o \
-		   setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+		   setup.o signal.o sigreturn_codes.o stacktrace.o \
+		   sys_arm.o time.o traps.o
 
 obj-$(CONFIG_ATAGS)		+= atags_parse.o
 obj-$(CONFIG_ATAGS_PROC)	+= atags_proc.o
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index b2ed73c45489..b0024b209319 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -363,6 +363,20 @@ void pcibios_fixup_bus(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibios_fixup_bus);
 
+void pcibios_add_bus(struct pci_bus *bus)
+{
+	struct pci_sys_data *sys = bus->sysdata;
+	if (sys->add_bus)
+		sys->add_bus(bus);
+}
+
+void pcibios_remove_bus(struct pci_bus *bus)
+{
+	struct pci_sys_data *sys = bus->sysdata;
+	if (sys->remove_bus)
+		sys->remove_bus(bus);
+}
+
 /*
  * Swizzle the device pin each time we cross a bridge.  If a platform does
  * not provide a swizzle function, we perform the standard PCI swizzling.
@@ -463,6 +477,8 @@ static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
 		sys->swizzle = hw->swizzle;
 		sys->map_irq = hw->map_irq;
 		sys->align_resource = hw->align_resource;
+		sys->add_bus = hw->add_bus;
+		sys->remove_bus = hw->remove_bus;
 		INIT_LIST_HEAD(&sys->resources);
 
 		if (hw->private_data)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 32640ae7750f..45a68d6bb2a3 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -416,9 +416,8 @@ __und_usr:
 	bne	__und_usr_thumb
 	sub	r4, r2, #4			@ ARM instr at LR - 4
 1:	ldrt	r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r0, r0				@ little endian instruction
-#endif
+ ARM_BE8(rev	r0, r0)				@ little endian instruction
+
 	@ r0 = 32-bit ARM instruction which caused the exception
 	@ r2 = PC value for the following instruction (:= regs->ARM_pc)
 	@ r4 = PC value for the faulting instruction
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a97131..54cdf06ce3e1 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -379,9 +379,7 @@ ENTRY(vector_swi)
 #else
 	ldr	r10, [lr, #-4]			@ get SWI instruction
 #endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r10, r10			@ little endian instruction
-#endif
+ ARM_BE8(rev	r10, r10)			@ little endian instruction
 
 #elif defined(CONFIG_AEABI)
 
@@ -542,10 +540,31 @@ ENDPROC(sys_fstatfs64_wrapper)
  * Note: off_4k (r5) is always units of 4K.  If we can't do the requested
  * offset, we return EINVAL.
  */
+
+#ifdef CONFIG_MV_8KB_SW_PAGE_SIZE_SUPPORT
+#define PGOFF_MASK 0x1
+#define PGOFF_SHIFT 1
+#elif defined(CONFIG_MV_16KB_SW_PAGE_SIZE_SUPPORT)
+#define PGOFF_MASK 0x3
+#define PGOFF_SHIFT 2
+#elif defined(CONFIG_MV_32KB_SW_PAGE_SIZE_SUPPORT)
+#define PGOFF_MASK 0x7
+#define PGOFF_SHIFT 3
+#elif defined(CONFIG_MV_64KB_SW_PAGE_SIZE_SUPPORT)
+#define PGOFF_MASK 0xF
+#define PGOFF_SHIFT 4
+#elif defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+#define PGOFF_MASK 0xF
+#define PGOFF_SHIFT 4
+#else
+#define PGOFF_MASK 0x0
+#define PGOFF_SHIFT 0
+#endif
+
 sys_mmap2:
-#if PAGE_SHIFT > 12
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
 		tst	r5, #PGOFF_MASK
-		moveq	r5, r5, lsr #PAGE_SHIFT - 12
+		moveq	r5, r5, lsr #PGOFF_SHIFT
 		streq	r5, [sp, #4]
 		beq	sys_mmap_pgoff
 		mov	r0, #-EINVAL
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 8bac553fe213..a20d6c8ab5bb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
 
 	__HEAD
 ENTRY(stext)
+ ARM_BE8(setend	be )			@ ensure we are in BE8 mode
 
  THUMB(	adr	r9, BSYM(1f)	)	@ Kernel is always entered in ARM.
  THUMB(	bx	r9		)	@ If this is a Thumb-2 kernel,
@@ -351,6 +352,9 @@ ENTRY(secondary_startup)
 	 * the processor type - there is no need to check the machine type
 	 * as it has already been validated by the primary processor.
 	 */
+
+ ARM_BE8(setend	be)				@ ensure we are in BE8 mode
+
 #ifdef CONFIG_ARM_VIRT_EXT
 	bl	__hyp_stub_install_secondary
 #endif
@@ -584,8 +588,10 @@ __fixup_a_pv_table:
 	b	2f
 1:	add     r7, r3
 	ldrh	ip, [r7, #2]
+ARM_BE8(rev16	ip, ip)
 	and	ip, 0x8f00
 	orr	ip, r6	@ mask in offset bits 31-24
+ARM_BE8(rev16	ip, ip)
 	strh	ip, [r7, #2]
 2:	cmp	r4, r5
 	ldrcc	r7, [r4], #4	@ use branch for delay slot
@@ -594,8 +600,14 @@ __fixup_a_pv_table:
 #else
 	b	2f
 1:	ldr	ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+	@ in BE8, we load data in BE, but instructions still in LE
+	bic	ip, ip, #0xff000000
+	orr	ip, ip, r6, lsl#24
+#else
 	bic	ip, ip, #0x000000ff
 	orr	ip, ip, r6	@ mask in offset bits 31-24
+#endif
 	str	ip, [r7, r3]
 2:	cmp	r4, r5
 	ldrcc	r7, [r4], #4	@ use branch for delay slot
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 1e9be5d25e56..7e137873083d 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
 #include <asm/sections.h>
 #include <asm/smp_plat.h>
 #include <asm/unwind.h>
+#include <asm/opcodes.h>
 
 #ifdef CONFIG_XIP_KERNEL
 /*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 		Elf32_Sym *sym;
 		const char *symname;
 		s32 offset;
+		u32 tmp;
 #ifdef CONFIG_THUMB2_KERNEL
 		u32 upper, lower, sign, j1, j2;
 #endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 		case R_ARM_PC24:
 		case R_ARM_CALL:
 		case R_ARM_JUMP24:
-			offset = (*(u32 *)loc & 0x00ffffff) << 2;
+			offset = __mem_to_opcode_arm(*(u32 *)loc);
+			offset = (offset & 0x00ffffff) << 2;
 			if (offset & 0x02000000)
 				offset -= 0x04000000;
 
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			}
 
 			offset >>= 2;
+			offset &= 0x00ffffff;
 
-			*(u32 *)loc &= 0xff000000;
-			*(u32 *)loc |= offset & 0x00ffffff;
+			*(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+			*(u32 *)loc |= __opcode_to_mem_arm(offset);
 			break;
 
 	       case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			* other bits to re-code instruction as
 			* MOV PC,Rm.
 			*/
-		       *(u32 *)loc &= 0xf000000f;
-		       *(u32 *)loc |= 0x01a0f000;
+		       *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+		       *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
 		       break;
 
 		case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 
 		case R_ARM_MOVW_ABS_NC:
 		case R_ARM_MOVT_ABS:
-			offset = *(u32 *)loc;
+			offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
 			offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
 			offset = (offset ^ 0x8000) - 0x8000;
 
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
 				offset >>= 16;
 
-			*(u32 *)loc &= 0xfff0f000;
-			*(u32 *)loc |= ((offset & 0xf000) << 4) |
-					(offset & 0x0fff);
+			tmp &= 0xfff0f000;
+			tmp |= ((offset & 0xf000) << 4) |
+				(offset & 0x0fff);
+
+			*(u32 *)loc = __opcode_to_mem_arm(tmp);
 			break;
 
 #ifdef CONFIG_THUMB2_KERNEL
 		case R_ARM_THM_CALL:
 		case R_ARM_THM_JUMP24:
-			upper = *(u16 *)loc;
-			lower = *(u16 *)(loc + 2);
+			upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+			lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
 
 			/*
 			 * 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			sign = (offset >> 24) & 1;
 			j1 = sign ^ (~(offset >> 23) & 1);
 			j2 = sign ^ (~(offset >> 22) & 1);
-			*(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+			upper = (u16)((upper & 0xf800) | (sign << 10) |
 					    ((offset >> 12) & 0x03ff));
-			*(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
-						  (j1 << 13) | (j2 << 11) |
-						  ((offset >> 1) & 0x07ff));
+			lower = (u16)((lower & 0xd000) |
+				      (j1 << 13) | (j2 << 11) |
+				      ((offset >> 1) & 0x07ff));
+
+			*(u16 *)loc = __opcode_to_mem_thumb16(upper);
+			*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
 			break;
 
 		case R_ARM_THM_MOVW_ABS_NC:
 		case R_ARM_THM_MOVT_ABS:
-			upper = *(u16 *)loc;
-			lower = *(u16 *)(loc + 2);
+			upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+			lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
 
 			/*
 			 * MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
 			if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
 				offset >>= 16;
 
-			*(u16 *)loc = (u16)((upper & 0xfbf0) |
-					    ((offset & 0xf000) >> 12) |
-					    ((offset & 0x0800) >> 1));
-			*(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
-						  ((offset & 0x0700) << 4) |
-						  (offset & 0x00ff));
+			upper = (u16)((upper & 0xfbf0) |
+				      ((offset & 0xf000) >> 12) |
+				      ((offset & 0x0800) >> 1));
+			lower = (u16)((lower & 0x8f00) |
+				      ((offset & 0x0700) << 4) |
+				      (offset & 0x00ff));
+			*(u16 *)loc = __opcode_to_mem_thumb16(upper);
+			*(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
 			break;
 #endif
 
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index e19edc6f2d15..57f5c492a157 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,6 +16,8 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/uaccess.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
 
 #include <asm/irq_regs.h>
 #include <asm/pmu.h>
@@ -300,9 +302,15 @@ validate_group(struct perf_event *event)
 
 static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
 {
-	struct arm_pmu *armpmu = (struct arm_pmu *) dev;
-	struct platform_device *plat_device = armpmu->plat_device;
-	struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
+	struct arm_pmu *armpmu;
+	struct platform_device *plat_device;
+	struct arm_pmu_platdata *plat;
+
+	if (irq_is_percpu(irq))
+		dev = *(void **)dev;
+	armpmu = dev;
+	plat_device = armpmu->plat_device;
+	plat = dev_get_platdata(&plat_device->dev);
 
 	if (plat && plat->handle_irq)
 		return plat->handle_irq(irq, dev, armpmu->handle_irq);
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..46b50a9bee17 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -25,6 +25,8 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
 
 #include <asm/cputype.h>
 #include <asm/irq_regs.h>
@@ -33,6 +35,7 @@
 /* Set at runtime when we know what CPU type we are. */
 static struct arm_pmu *cpu_pmu;
 
+static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
 static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
 static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
 static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
@@ -71,6 +74,26 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
 	return &__get_cpu_var(cpu_hw_events);
 }
 
+static void cpu_pmu_enable_percpu_irq(void *data)
+{
+	struct arm_pmu *cpu_pmu = data;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	int irq = platform_get_irq(pmu_device, 0);
+
+	enable_percpu_irq(irq, IRQ_TYPE_NONE);
+	cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
+}
+
+static void cpu_pmu_disable_percpu_irq(void *data)
+{
+	struct arm_pmu *cpu_pmu = data;
+	struct platform_device *pmu_device = cpu_pmu->plat_device;
+	int irq = platform_get_irq(pmu_device, 0);
+
+	cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs);
+	disable_percpu_irq(irq);
+}
+
 static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
 {
 	int i, irq, irqs;
@@ -78,12 +101,18 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
 
 	irqs = min(pmu_device->num_resources, num_possible_cpus());
 
-	for (i = 0; i < irqs; ++i) {
-		if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
-			continue;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq >= 0)
-			free_irq(irq, cpu_pmu);
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1);
+		free_percpu_irq(irq, &percpu_pmu);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+				continue;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq >= 0)
+				free_irq(irq, cpu_pmu);
+		}
 	}
 }
 
@@ -101,32 +130,44 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
 		return -ENODEV;
 	}
 
-	for (i = 0; i < irqs; ++i) {
-		err = 0;
-		irq = platform_get_irq(pmu_device, i);
-		if (irq < 0)
-			continue;
-
-		/*
-		 * If we have a single PMU interrupt that we can't shift,
-		 * assume that we're running on a uniprocessor machine and
-		 * continue. Otherwise, continue without this interrupt.
-		 */
-		if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
-			pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
-				    irq, i);
-			continue;
-		}
-
-		err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
-				  cpu_pmu);
+	irq = platform_get_irq(pmu_device, 0);
+	if (irq >= 0 && irq_is_percpu(irq)) {
+		err = request_percpu_irq(irq, handler, "arm-pmu", &percpu_pmu);
 		if (err) {
 			pr_err("unable to request IRQ%d for ARM PMU counters\n",
 				irq);
 			return err;
 		}
-
-		cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+		on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1);
+	} else {
+		for (i = 0; i < irqs; ++i) {
+			err = 0;
+			irq = platform_get_irq(pmu_device, i);
+			if (irq < 0)
+				continue;
+
+			/*
+			 * If we have a single PMU interrupt that we can't shift,
+			 * assume that we're running on a uniprocessor machine and
+			 * continue. Otherwise, continue without this interrupt.
+			 */
+			if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+				pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+					    irq, i);
+				continue;
+			}
+
+			err = request_irq(irq, handler,
+					  IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
+					  cpu_pmu);
+			if (err) {
+				pr_err("unable to request IRQ%d for ARM PMU counters\n",
+					irq);
+				return err;
+			}
+
+			cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+		}
 	}
 
 	return 0;
@@ -140,6 +181,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
 		events->events = per_cpu(hw_events, cpu);
 		events->used_mask = per_cpu(used_mask, cpu);
 		raw_spin_lock_init(&events->pmu_lock);
+		per_cpu(percpu_pmu, cpu) = cpu_pmu;
 	}
 
 	cpu_pmu->get_hw_events	= cpu_pmu_get_cpu_events;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 5a42c12767af..3c23086dc8e2 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -21,29 +21,7 @@
 #include <asm/unistd.h>
 #include <asm/vfp.h>
 
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN	(0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN	(0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN	(0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN	(0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7.  We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN	(0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-static const unsigned long sigreturn_codes[7] = {
-	MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
-	MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+extern const unsigned long sigreturn_codes[7];
 
 static unsigned long signal_return_offset;
 
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by:	Victor Kamensky, 2013-08-13
+ * Copyright:	(C) 2013  Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7.  We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+	/*
+	 * Note we manually set minimally required arch that supports
+	 * required thumb opcodes for early arch versions. It is OK
+	 * for this file to be used in combination with other
+	 * lower arch variants, since these code snippets are only
+	 * used as input data.
+	 */
+	.arch armv4t
+#endif
+
+	.section .rodata
+	.global sigreturn_codes
+	.type	sigreturn_codes, #object
+
+	.arm
+
+sigreturn_codes:
+
+	/* ARM sigreturn syscall code snippet */
+	mov	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+	swi	#(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+	/* Thumb sigreturn syscall code snippet */
+	.thumb
+	movs	r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+	swi	#0
+
+	/* ARM sigreturn_rt syscall code snippet */
+	.arm
+	mov	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+	swi	#(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+	/* Thumb sigreturn_rt syscall code snippet */
+	.thumb
+	movs	r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+	swi	#0
+
+	/*
+	 * Note on addtional space: setup_return in signal.c
+	 * algorithm uses two words copy regardless whether
+	 * it is thumb case or not, so we need additional
+	 * word after real last entry.
+	 */
+	.arm
+	.space	4
+
+	.size	sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415c..bf43f66e9677 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -81,6 +81,7 @@ ENDPROC(cpu_resume_after_mmu)
 	.data
 	.align
 ENTRY(cpu_resume)
+ARM_BE8(setend be)			@ ensure we are in BE mode
 #ifdef CONFIG_SMP
 	adr	r0, sleep_save_sp
 	ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
@@ -98,6 +99,7 @@ THUMB(	mov	sp, r2			)
 THUMB(	bx	r3			)
 ENDPROC(cpu_resume)
 
+	.globl	sleep_save_sp
 sleep_save_sp:
 	.rept	CONFIG_NR_CPUS
 	.long	0				@ preserve stack phys ptr here
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb451bb9..ea9c02059061 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -584,8 +584,18 @@ static void ipi_cpu_stop(unsigned int cpu)
 	local_fiq_disable();
 	local_irq_disable();
 
+
+	/*
+	 * If Soft power off is enabled, instead of relaxing the CPU upon IPI_CPU_STOP event,
+	 * the CPU will enter WFI state
+	 */
 	while (1)
+#ifdef CONFIG_MVEBU_SOFT_POWEROFF
+		wfi();
+#else
 		cpu_relax();
+#endif
+
 }
 
 /*
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..09d21b063154 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -22,13 +22,12 @@
 #define SCU_INVALIDATE		0x0c
 #define SCU_FPGA_REVISION	0x10
 
-#ifdef CONFIG_SMP
 /*
  * Get the number of CPU cores from the SCU configuration
  */
 unsigned int __init scu_get_core_count(void __iomem *scu_base)
 {
-	unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+	unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
 	return (ncores & 0x03) + 1;
 }
 
@@ -42,19 +41,19 @@ void scu_enable(void __iomem *scu_base)
 #ifdef CONFIG_ARM_ERRATA_764369
 	/* Cortex-A9 only */
 	if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
-		scu_ctrl = __raw_readl(scu_base + 0x30);
+		scu_ctrl = readl_relaxed(scu_base + 0x30);
 		if (!(scu_ctrl & 1))
-			__raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+			writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
 	}
 #endif
 
-	scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+	scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
 	/* already enabled? */
 	if (scu_ctrl & 1)
 		return;
 
 	scu_ctrl |= 1;
-	__raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+	writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
 
 	/*
 	 * Ensure that the data accessed by CPU0 before the SCU was
@@ -62,7 +61,6 @@ void scu_enable(void __iomem *scu_base)
 	 */
 	flush_cache_all();
 }
-#endif
 
 /*
  * Set the executing CPUs power mode as defined.  This will be in
@@ -80,9 +78,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
 	if (mode > 3 || mode == 1 || cpu > 3)
 		return -EINVAL;
 
-	val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+	val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
 	val |= mode;
-	__raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+	writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
 
 	return 0;
 }
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index f6fd1d4398c6..4971ccf012ca 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
 	case CLOCK_EVT_MODE_PERIODIC:
 		ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
 			| TWD_TIMER_CONTROL_PERIODIC;
-		__raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+		writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
 			twd_base + TWD_TIMER_LOAD);
 		break;
 	case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
 		ctrl = 0;
 	}
 
-	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
 }
 
 static int twd_set_next_event(unsigned long evt,
 			struct clock_event_device *unused)
 {
-	unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+	unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
 
 	ctrl |= TWD_TIMER_CONTROL_ENABLE;
 
-	__raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
-	__raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+	writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
 
 	return 0;
 }
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
  */
 static int twd_timer_ack(void)
 {
-	if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
-		__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+	if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+		writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
 		return 1;
 	}
 
@@ -209,15 +209,15 @@ static void __cpuinit twd_calibrate_rate(void)
 		waitjiffies += 5;
 
 				 /* enable, no interrupt or reload */
-		__raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+		writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
 
 				 /* maximum value */
-		__raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+		writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
 
 		while (get_jiffies_64() < waitjiffies)
 			udelay(10);
 
-		count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+		count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
 
 		twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
 
@@ -275,7 +275,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
 	 * bother with the below.
 	 */
 	if (per_cpu(percpu_setup_called, cpu)) {
-		__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+		writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
 		clockevents_register_device(*__this_cpu_ptr(twd_evt));
 		enable_percpu_irq(clk->irq, 0);
 		return 0;
@@ -288,7 +288,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
 	 * The following is done once per CPU the first time .setup() is
 	 * called.
 	 */
-	__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+	writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
 
 	clk->name = "local_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d6a0fdb6c2ee..b4fd850c34b2 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,6 +34,7 @@
 #include <asm/unwind.h>
 #include <asm/tls.h>
 #include <asm/system_misc.h>
+#include <asm/opcodes.h>
 
 static const char *handler[]= {
 	"prefetch abort",
@@ -347,15 +348,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
 int is_valid_bugaddr(unsigned long pc)
 {
 #ifdef CONFIG_THUMB2_KERNEL
-	unsigned short bkpt;
+	u16 bkpt;
+	u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 #else
-	unsigned long bkpt;
+	u32 bkpt;
+	u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 #endif
 
 	if (probe_kernel_address((unsigned *)pc, bkpt))
 		return 0;
 
-	return bkpt == BUG_INSTR_VALUE;
+	return bkpt == insn;
 }
 
 #endif
@@ -408,25 +411,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 	if (processor_mode(regs) == SVC_MODE) {
 #ifdef CONFIG_THUMB2_KERNEL
 		if (thumb_mode(regs)) {
-			instr = ((u16 *)pc)[0];
+			instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 			if (is_wide_instruction(instr)) {
-				instr <<= 16;
-				instr |= ((u16 *)pc)[1];
+				u16 inst2;
+				inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+				instr = __opcode_thumb32_compose(instr, inst2);
 			}
 		} else
 #endif
-			instr = *(u32 *) pc;
+			instr = __mem_to_opcode_arm(*(u32 *) pc);
 	} else if (thumb_mode(regs)) {
 		if (get_user(instr, (u16 __user *)pc))
 			goto die_sig;
+		instr = __mem_to_opcode_thumb16(instr);
 		if (is_wide_instruction(instr)) {
 			unsigned int instr2;
 			if (get_user(instr2, (u16 __user *)pc+1))
 				goto die_sig;
-			instr <<= 16;
-			instr |= instr2;
+			instr2 = __mem_to_opcode_thumb16(instr2);
+			instr = __opcode_thumb32_compose(instr, instr2);
 		}
 	} else if (get_user(instr, (u32 __user *)pc)) {
+		instr = __mem_to_opcode_arm(instr);
 		goto die_sig;
 	}
 
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
index 6ee2f6706f86..86365f9c632e 100644
--- a/arch/arm/lib/copy_page.S
+++ b/arch/arm/lib/copy_page.S
@@ -28,7 +28,11 @@ ENTRY(copy_page)
 		stmfd	sp!, {r4, lr}			@	2
 	PLD(	pld	[r1, #0]		)
 	PLD(	pld	[r1, #L1_CACHE_BYTES]		)
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+		ldr	r2, =COPY_COUNT
+#else
 		mov	r2, #COPY_COUNT			@	1
+#endif
 		ldmia	r1!, {r3, r4, ip, lr}		@	4+1
 1:	PLD(	pld	[r1, #2 * L1_CACHE_BYTES])
 	PLD(	pld	[r1, #3 * L1_CACHE_BYTES])
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index e2b5da031f96..71645e6066dd 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -28,6 +28,22 @@
 #include <plat/time.h>
 #include "common.h"
 
+/* These can go away once Dove uses the mvebu-mbus DT binding */
+#define DOVE_MBUS_PCIE0_MEM_TARGET    0x4
+#define DOVE_MBUS_PCIE0_MEM_ATTR      0xe8
+#define DOVE_MBUS_PCIE0_IO_TARGET     0x4
+#define DOVE_MBUS_PCIE0_IO_ATTR       0xe0
+#define DOVE_MBUS_PCIE1_MEM_TARGET    0x8
+#define DOVE_MBUS_PCIE1_MEM_ATTR      0xe8
+#define DOVE_MBUS_PCIE1_IO_TARGET     0x8
+#define DOVE_MBUS_PCIE1_IO_ATTR       0xe0
+#define DOVE_MBUS_CESA_TARGET         0x3
+#define DOVE_MBUS_CESA_ATTR           0x1
+#define DOVE_MBUS_BOOTROM_TARGET      0x1
+#define DOVE_MBUS_BOOTROM_ATTR        0xfd
+#define DOVE_MBUS_SCRATCHPAD_TARGET   0xd
+#define DOVE_MBUS_SCRATCHPAD_ATTR     0x0
+
 /*****************************************************************************
  * I/O Address Mapping
  ****************************************************************************/
@@ -333,34 +349,40 @@ void __init dove_setup_cpu_wins(void)
 {
 	/*
 	 * The PCIe windows will no longer be statically allocated
-	 * here once Dove is migrated to the pci-mvebu driver.
+	 * here once Dove is migrated to the pci-mvebu driver. The
+	 * non-PCIe windows will no longer be created here once Dove
+	 * fully moves to DT.
 	 */
-	mvebu_mbus_add_window_remap_flags("pcie0.0",
+	mvebu_mbus_add_window_remap_by_id(DOVE_MBUS_PCIE0_IO_TARGET,
+					  DOVE_MBUS_PCIE0_IO_ATTR,
 					  DOVE_PCIE0_IO_PHYS_BASE,
 					  DOVE_PCIE0_IO_SIZE,
-					  DOVE_PCIE0_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pcie1.0",
+					  DOVE_PCIE0_IO_BUS_BASE);
+	mvebu_mbus_add_window_remap_by_id(DOVE_MBUS_PCIE1_IO_TARGET,
+					  DOVE_MBUS_PCIE1_IO_ATTR,
 					  DOVE_PCIE1_IO_PHYS_BASE,
 					  DOVE_PCIE1_IO_SIZE,
-					  DOVE_PCIE1_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pcie0.0",
-					  DOVE_PCIE0_MEM_PHYS_BASE,
-					  DOVE_PCIE0_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
-	mvebu_mbus_add_window_remap_flags("pcie1.0",
-					  DOVE_PCIE1_MEM_PHYS_BASE,
-					  DOVE_PCIE1_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
-	mvebu_mbus_add_window("cesa", DOVE_CESA_PHYS_BASE,
-			      DOVE_CESA_SIZE);
-	mvebu_mbus_add_window("bootrom", DOVE_BOOTROM_PHYS_BASE,
-			      DOVE_BOOTROM_SIZE);
-	mvebu_mbus_add_window("scratchpad", DOVE_SCRATCHPAD_PHYS_BASE,
-			      DOVE_SCRATCHPAD_SIZE);
+					  DOVE_PCIE1_IO_BUS_BASE);
+	mvebu_mbus_add_window_by_id(DOVE_MBUS_PCIE0_MEM_TARGET,
+				    DOVE_MBUS_PCIE0_MEM_ATTR,
+				    DOVE_PCIE0_MEM_PHYS_BASE,
+				    DOVE_PCIE0_MEM_SIZE);
+	mvebu_mbus_add_window_by_id(DOVE_MBUS_PCIE1_MEM_TARGET,
+				    DOVE_MBUS_PCIE1_MEM_ATTR,
+				    DOVE_PCIE1_MEM_PHYS_BASE,
+				    DOVE_PCIE1_MEM_SIZE);
+	mvebu_mbus_add_window_by_id(DOVE_MBUS_CESA_TARGET,
+				    DOVE_MBUS_CESA_ATTR,
+				    DOVE_CESA_PHYS_BASE,
+				    DOVE_CESA_SIZE);
+	mvebu_mbus_add_window_by_id(DOVE_MBUS_BOOTROM_TARGET,
+				    DOVE_MBUS_BOOTROM_ATTR,
+				    DOVE_BOOTROM_PHYS_BASE,
+				    DOVE_BOOTROM_SIZE);
+	mvebu_mbus_add_window_by_id(DOVE_MBUS_SCRATCHPAD_TARGET,
+				    DOVE_MBUS_SCRATCHPAD_ATTR,
+				    DOVE_SCRATCHPAD_PHYS_BASE,
+				    DOVE_SCRATCHPAD_SIZE);
 }
 
 void __init dove_init(void)
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c
index e7fcea7f3300..a3661598fdcd 100644
--- a/arch/arm/mach-integrator/pci_v3.c
+++ b/arch/arm/mach-integrator/pci_v3.c
@@ -610,3 +610,281 @@ void __init pci_v3_postinit(void)
 
 	register_isa_ports(PHYS_PCI_MEM_BASE, PHYS_PCI_IO_BASE, 0);
 }
+
+/*
+ * A small note about bridges and interrupts.  The DECchip 21050 (and
+ * later) adheres to the PCI-PCI bridge specification.  This says that
+ * the interrupts on the other side of a bridge are swizzled in the
+ * following manner:
+ *
+ * Dev    Interrupt   Interrupt
+ *        Pin on      Pin on
+ *        Device      Connector
+ *
+ *   4    A           A
+ *        B           B
+ *        C           C
+ *        D           D
+ *
+ *   5    A           B
+ *        B           C
+ *        C           D
+ *        D           A
+ *
+ *   6    A           C
+ *        B           D
+ *        C           A
+ *        D           B
+ *
+ *   7    A           D
+ *        B           A
+ *        C           B
+ *        D           C
+ *
+ * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
+ * Thus, each swizzle is ((pin-1) + (device#-4)) % 4
+ */
+
+/*
+ * This routine handles multiple bridges.
+ */
+static u8 __init pci_v3_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+	if (*pinp == 0)
+		*pinp = 1;
+
+	return pci_common_swizzle(dev, pinp);
+}
+
+static int irq_tab[4] __initdata = {
+	IRQ_AP_PCIINT0,	IRQ_AP_PCIINT1,	IRQ_AP_PCIINT2,	IRQ_AP_PCIINT3
+};
+
+/*
+ * map the specified device/slot/pin to an IRQ.  This works out such
+ * that slot 9 pin 1 is INT0, pin 2 is INT1, and slot 10 pin 1 is INT1.
+ */
+static int __init pci_v3_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	int intnr = ((slot - 9) + (pin - 1)) & 3;
+
+	return irq_tab[intnr];
+}
+
+static struct hw_pci pci_v3 __initdata = {
+	.swizzle		= pci_v3_swizzle,
+	.setup			= pci_v3_setup,
+	.nr_controllers		= 1,
+	.ops			= &pci_v3_ops,
+	.preinit		= pci_v3_preinit,
+	.postinit		= pci_v3_postinit,
+};
+
+#ifdef CONFIG_OF
+
+static int __init pci_v3_map_irq_dt(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct of_phandle_args oirq;
+	int ret;
+
+	ret = of_irq_parse_pci(dev, &oirq);
+	if (ret) {
+		dev_err(&dev->dev, "of_irq_parse_pci() %d\n", ret);
+		/* Proper return code 0 == NO_IRQ */
+		return 0;
+	}
+
+	return irq_create_of_mapping(&oirq);
+}
+
+static int __init pci_v3_dtprobe(struct platform_device *pdev,
+				struct device_node *np)
+{
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	struct resource *res;
+	int irq, ret;
+
+	if (of_pci_range_parser_init(&parser, np))
+		return -EINVAL;
+
+	/* Get base for bridge registers */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "unable to obtain PCIv3 base\n");
+		return -ENODEV;
+	}
+	pci_v3_base = devm_ioremap(&pdev->dev, res->start,
+				   resource_size(res));
+	if (!pci_v3_base) {
+		dev_err(&pdev->dev, "unable to remap PCIv3 base\n");
+		return -ENODEV;
+	}
+
+	/* Get and request error IRQ resource */
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(&pdev->dev, "unable to obtain PCIv3 error IRQ\n");
+		return -ENODEV;
+	}
+	ret = devm_request_irq(&pdev->dev, irq, v3_irq, 0,
+			"PCIv3 error", NULL);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "unable to request PCIv3 error IRQ %d (%d)\n", irq, ret);
+		return ret;
+	}
+
+	for_each_of_pci_range(&parser, &range) {
+		if (!range.flags) {
+			of_pci_range_to_resource(&range, np, &conf_mem);
+			conf_mem.name = "PCIv3 config";
+		}
+		if (range.flags & IORESOURCE_IO) {
+			of_pci_range_to_resource(&range, np, &io_mem);
+			io_mem.name = "PCIv3 I/O";
+		}
+		if ((range.flags & IORESOURCE_MEM) &&
+			!(range.flags & IORESOURCE_PREFETCH)) {
+			non_mem_pci = range.pci_addr;
+			non_mem_pci_sz = range.size;
+			of_pci_range_to_resource(&range, np, &non_mem);
+			non_mem.name = "PCIv3 non-prefetched mem";
+		}
+		if ((range.flags & IORESOURCE_MEM) &&
+			(range.flags & IORESOURCE_PREFETCH)) {
+			pre_mem_pci = range.pci_addr;
+			pre_mem_pci_sz = range.size;
+			of_pci_range_to_resource(&range, np, &pre_mem);
+			pre_mem.name = "PCIv3 prefetched mem";
+		}
+	}
+
+	if (!conf_mem.start || !io_mem.start ||
+	    !non_mem.start || !pre_mem.start) {
+		dev_err(&pdev->dev, "missing ranges in device node\n");
+		return -EINVAL;
+	}
+
+	pci_v3.map_irq = pci_v3_map_irq_dt;
+	pci_common_init_dev(&pdev->dev, &pci_v3);
+
+	return 0;
+}
+
+#else
+
+static inline int pci_v3_dtprobe(struct platform_device *pdev,
+				  struct device_node *np)
+{
+	return -EINVAL;
+}
+
+#endif
+
+static int __init pci_v3_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	/* Remap the Integrator system controller */
+	ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100);
+	if (!ap_syscon_base) {
+		dev_err(&pdev->dev, "unable to remap the AP syscon for PCIv3\n");
+		return -ENODEV;
+	}
+
+	/* Device tree probe path */
+	if (np)
+		return pci_v3_dtprobe(pdev, np);
+
+	pci_v3_base = devm_ioremap(&pdev->dev, PHYS_PCI_V3_BASE, SZ_64K);
+	if (!pci_v3_base) {
+		dev_err(&pdev->dev, "unable to remap PCIv3 base\n");
+		return -ENODEV;
+	}
+
+	ret = devm_request_irq(&pdev->dev, IRQ_AP_V3INT, v3_irq, 0, "V3", NULL);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to grab PCI error interrupt: %d\n",
+			ret);
+		return -ENODEV;
+	}
+
+	conf_mem.name = "PCIv3 config";
+	conf_mem.start = PHYS_PCI_CONFIG_BASE;
+	conf_mem.end = PHYS_PCI_CONFIG_BASE + SZ_16M - 1;
+	conf_mem.flags = IORESOURCE_MEM;
+
+	io_mem.name = "PCIv3 I/O";
+	io_mem.start = PHYS_PCI_IO_BASE;
+	io_mem.end = PHYS_PCI_IO_BASE + SZ_16M - 1;
+	io_mem.flags = IORESOURCE_MEM;
+
+	non_mem_pci = 0x00000000;
+	non_mem_pci_sz = SZ_256M;
+	non_mem.name = "PCIv3 non-prefetched mem";
+	non_mem.start = PHYS_PCI_MEM_BASE;
+	non_mem.end = PHYS_PCI_MEM_BASE + SZ_256M - 1;
+	non_mem.flags = IORESOURCE_MEM;
+
+	pre_mem_pci = 0x10000000;
+	pre_mem_pci_sz = SZ_256M;
+	pre_mem.name = "PCIv3 prefetched mem";
+	pre_mem.start = PHYS_PCI_PRE_BASE + SZ_256M;
+	pre_mem.end = PHYS_PCI_PRE_BASE + SZ_256M - 1;
+	pre_mem.flags = IORESOURCE_MEM | IORESOURCE_PREFETCH;
+
+	pci_v3.map_irq = pci_v3_map_irq;
+
+	pci_common_init_dev(&pdev->dev, &pci_v3);
+
+	return 0;
+}
+
+static const struct of_device_id pci_ids[] = {
+	{ .compatible = "v3,v360epc-pci", },
+	{},
+};
+
+static struct platform_driver pci_v3_driver = {
+	.driver = {
+		.name = "pci-v3",
+		.of_match_table = pci_ids,
+	},
+};
+
+static int __init pci_v3_init(void)
+{
+	return platform_driver_probe(&pci_v3_driver, pci_v3_probe);
+}
+
+subsys_initcall(pci_v3_init);
+
+/*
+ * Static mappings for the PCIv3 bridge
+ *
+ * e8000000	40000000	PCI memory		PHYS_PCI_MEM_BASE	(max 512M)
+ * ec000000	61000000	PCI config space	PHYS_PCI_CONFIG_BASE	(max 16M)
+ * fee00000	60000000	PCI IO			PHYS_PCI_IO_BASE	(max 16M)
+ */
+static struct map_desc pci_v3_io_desc[] __initdata __maybe_unused = {
+	{
+		.virtual	= (unsigned long)PCI_MEMORY_VADDR,
+		.pfn		= __phys_to_pfn(PHYS_PCI_MEM_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	}, {
+		.virtual	= (unsigned long)PCI_CONFIG_VADDR,
+		.pfn		= __phys_to_pfn(PHYS_PCI_CONFIG_BASE),
+		.length		= SZ_16M,
+		.type		= MT_DEVICE
+	}
+};
+
+int __init pci_v3_early_init(void)
+{
+	iotable_init(pci_v3_io_desc, ARRAY_SIZE(pci_v3_io_desc));
+	vga_base = (unsigned long)PCI_MEMORY_VADDR;
+	pci_map_io_early(__phys_to_pfn(PHYS_PCI_IO_BASE));
+	return 0;
+}
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 73a2d905af8a..72de05f09cb8 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -1,9 +1,5 @@
 if ARCH_IXP4XX
 
-config ARCH_SUPPORTS_BIG_ENDIAN
-	bool
-	default y
-
 menu "Intel IXP4xx Implementation Options"
 
 comment "IXP4xx Platforms"
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index e9647b80cb59..62061f75c492 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -93,6 +93,7 @@ static void __init kirkwood_dt_init(void)
 	 */
 	writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
 
+	BUG_ON(mvebu_mbus_dt_init(false));
 	kirkwood_setup_wins();
 
 	kirkwood_l2_init();
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index f38922897563..c074c3725b0f 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -36,6 +36,12 @@
 #include <linux/platform_data/dma-mv_xor.h>
 #include "common.h"
 
+/* These can go away once Kirkwood uses the mvebu-mbus DT binding */
+#define KIRKWOOD_MBUS_NAND_TARGET 0x01
+#define KIRKWOOD_MBUS_NAND_ATTR   0x2f
+#define KIRKWOOD_MBUS_SRAM_TARGET 0x03
+#define KIRKWOOD_MBUS_SRAM_ATTR   0x01
+
 /*****************************************************************************
  * I/O Address Mapping
  ****************************************************************************/
@@ -527,10 +533,6 @@ void __init kirkwood_cpuidle_init(void)
 void __init kirkwood_init_early(void)
 {
 	orion_time_set_base(TIMER_VIRT_BASE);
-
-	mvebu_mbus_init("marvell,kirkwood-mbus",
-			BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
-			DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
 }
 
 int kirkwood_tclk;
@@ -648,34 +650,14 @@ char * __init kirkwood_id(void)
 
 void __init kirkwood_setup_wins(void)
 {
-	/*
-	 * The PCIe windows will no longer be statically allocated
-	 * here once Kirkwood is migrated to the pci-mvebu driver.
-	 */
-	mvebu_mbus_add_window_remap_flags("pcie0.0",
-					  KIRKWOOD_PCIE_IO_PHYS_BASE,
-					  KIRKWOOD_PCIE_IO_SIZE,
-					  KIRKWOOD_PCIE_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pcie0.0",
-					  KIRKWOOD_PCIE_MEM_PHYS_BASE,
-					  KIRKWOOD_PCIE_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
-	mvebu_mbus_add_window_remap_flags("pcie1.0",
-					  KIRKWOOD_PCIE1_IO_PHYS_BASE,
-					  KIRKWOOD_PCIE1_IO_SIZE,
-					  KIRKWOOD_PCIE1_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pcie1.0",
-					  KIRKWOOD_PCIE1_MEM_PHYS_BASE,
-					  KIRKWOOD_PCIE1_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
-	mvebu_mbus_add_window("nand", KIRKWOOD_NAND_MEM_PHYS_BASE,
-			      KIRKWOOD_NAND_MEM_SIZE);
-	mvebu_mbus_add_window("sram", KIRKWOOD_SRAM_PHYS_BASE,
-			      KIRKWOOD_SRAM_SIZE);
+	mvebu_mbus_add_window_by_id(KIRKWOOD_MBUS_NAND_TARGET,
+				    KIRKWOOD_MBUS_NAND_ATTR,
+				    KIRKWOOD_NAND_MEM_PHYS_BASE,
+				    KIRKWOOD_NAND_MEM_SIZE);
+	mvebu_mbus_add_window_by_id(KIRKWOOD_MBUS_SRAM_TARGET,
+				    KIRKWOOD_MBUS_SRAM_ATTR,
+				    KIRKWOOD_SRAM_PHYS_BASE,
+				    KIRKWOOD_SRAM_SIZE);
 }
 
 void __init kirkwood_l2_init(void)
@@ -703,6 +685,10 @@ void __init kirkwood_init(void)
 	 */
 	writel(readl(CPU_CONFIG) & ~CPU_CONFIG_ERROR_PROP, CPU_CONFIG);
 
+	BUG_ON(mvebu_mbus_init("marvell,kirkwood-mbus",
+			BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
+			DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ));
+
 	kirkwood_setup_wins();
 
 	kirkwood_l2_init();
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index 7f43e6c2f8c0..12d86f39f380 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -12,6 +12,7 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
+#include <linux/mbus.h>
 #include <video/vga.h>
 #include <asm/irq.h>
 #include <asm/mach/pci.h>
@@ -19,6 +20,16 @@
 #include <mach/bridge-regs.h>
 #include "common.h"
 
+/* These can go away once Kirkwood uses the mvebu-mbus DT binding */
+#define KIRKWOOD_MBUS_PCIE0_MEM_TARGET    0x4
+#define KIRKWOOD_MBUS_PCIE0_MEM_ATTR      0xe8
+#define KIRKWOOD_MBUS_PCIE0_IO_TARGET     0x4
+#define KIRKWOOD_MBUS_PCIE0_IO_ATTR       0xe0
+#define KIRKWOOD_MBUS_PCIE1_MEM_TARGET    0x4
+#define KIRKWOOD_MBUS_PCIE1_MEM_ATTR      0xd8
+#define KIRKWOOD_MBUS_PCIE1_IO_TARGET     0x4
+#define KIRKWOOD_MBUS_PCIE1_IO_ATTR       0xd0
+
 static void kirkwood_enable_pcie_clk(const char *port)
 {
 	struct clk *clk;
@@ -253,6 +264,25 @@ static void __init add_pcie_port(int index, void __iomem *base)
 
 void __init kirkwood_pcie_init(unsigned int portmask)
 {
+	mvebu_mbus_add_window_remap_by_id(KIRKWOOD_MBUS_PCIE0_IO_TARGET,
+					  KIRKWOOD_MBUS_PCIE0_IO_ATTR,
+					  KIRKWOOD_PCIE_IO_PHYS_BASE,
+					  KIRKWOOD_PCIE_IO_SIZE,
+					  KIRKWOOD_PCIE_IO_BUS_BASE);
+	mvebu_mbus_add_window_by_id(KIRKWOOD_MBUS_PCIE0_MEM_TARGET,
+				    KIRKWOOD_MBUS_PCIE0_MEM_ATTR,
+				    KIRKWOOD_PCIE_MEM_PHYS_BASE,
+				    KIRKWOOD_PCIE_MEM_SIZE);
+	mvebu_mbus_add_window_remap_by_id(KIRKWOOD_MBUS_PCIE1_IO_TARGET,
+					  KIRKWOOD_MBUS_PCIE1_IO_ATTR,
+					  KIRKWOOD_PCIE1_IO_PHYS_BASE,
+					  KIRKWOOD_PCIE1_IO_SIZE,
+					  KIRKWOOD_PCIE1_IO_BUS_BASE);
+	mvebu_mbus_add_window_by_id(KIRKWOOD_MBUS_PCIE1_MEM_TARGET,
+				    KIRKWOOD_MBUS_PCIE1_MEM_ATTR,
+				    KIRKWOOD_PCIE1_MEM_PHYS_BASE,
+				    KIRKWOOD_PCIE1_MEM_SIZE);
+
 	vga_base = KIRKWOOD_PCIE_MEM_PHYS_BASE;
 
 	if (portmask & KW_PCIE0)
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index dc26a654c496..445e553f4a28 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -18,6 +18,11 @@
 #include <mach/mv78xx0.h>
 #include "common.h"
 
+#define MV78XX0_MBUS_PCIE_MEM_TARGET(port, lane) ((port) ? 8 : 4)
+#define MV78XX0_MBUS_PCIE_MEM_ATTR(port, lane)   (0xf8 & ~(0x10 << (lane)))
+#define MV78XX0_MBUS_PCIE_IO_TARGET(port, lane)  ((port) ? 8 : 4)
+#define MV78XX0_MBUS_PCIE_IO_ATTR(port, lane)    (0xf0 & ~(0x10 << (lane)))
+
 struct pcie_port {
 	u8			maj;
 	u8			min;
@@ -71,7 +76,6 @@ static void __init mv78xx0_pcie_preinit(void)
 	start = MV78XX0_PCIE_MEM_PHYS_BASE;
 	for (i = 0; i < num_pcie_ports; i++) {
 		struct pcie_port *pp = pcie_port + i;
-		char winname[MVEBU_MBUS_MAX_WINNAME_SZ];
 
 		snprintf(pp->mem_space_name, sizeof(pp->mem_space_name),
 			"PCIe %d.%d MEM", pp->maj, pp->min);
@@ -85,17 +89,12 @@ static void __init mv78xx0_pcie_preinit(void)
 		if (request_resource(&iomem_resource, &pp->res))
 			panic("can't allocate PCIe MEM sub-space");
 
-		snprintf(winname, sizeof(winname), "pcie%d.%d",
-			 pp->maj, pp->min);
-
-		mvebu_mbus_add_window_remap_flags(winname,
-						  pp->res.start,
-						  resource_size(&pp->res),
-						  MVEBU_MBUS_NO_REMAP,
-						  MVEBU_MBUS_PCI_MEM);
-		mvebu_mbus_add_window_remap_flags(winname,
-						  i * SZ_64K, SZ_64K,
-						  0, MVEBU_MBUS_PCI_IO);
+		mvebu_mbus_add_window_by_id(MV78XX0_MBUS_PCIE_MEM_TARGET(pp->maj, pp->min),
+					    MV78XX0_MBUS_PCIE_MEM_ATTR(pp->maj, pp->min),
+					    pp->res.start, resource_size(&pp->res));
+		mvebu_mbus_add_window_remap_by_id(MV78XX0_MBUS_PCIE_IO_TARGET(pp->maj, pp->min),
+						  MV78XX0_MBUS_PCIE_IO_ATTR(pp->maj, pp->min),
+						  i * SZ_64K, SZ_64K, 0);
 	}
 }
 
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 80a8bcacd9d5..f9fd6e0e36ba 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -1,5 +1,6 @@
 config ARCH_MVEBU
 	bool "Marvell SOCs with Device Tree support" if ARCH_MULTI_V7
+	select ARCH_SUPPORTS_BIG_ENDIAN
 	select CLKSRC_MMIO
 	select COMMON_CLK
 	select GENERIC_CLOCKEVENTS
@@ -13,9 +14,21 @@ config ARCH_MVEBU
 	select MVEBU_CLK_CORE
 	select MVEBU_CLK_CPU
 	select MVEBU_CLK_GATING
+	select MVEBU_CLK_COREDIV
 	select MVEBU_MBUS
 	select ZONE_DMA if ARM_LPAE
 	select ARCH_REQUIRE_GPIOLIB
+	select MIGHT_HAVE_PCI
+	select PCI_QUIRKS if PCI
+	select RESET_CONTROLLER
+
+config MVEBU_SOFT_POWEROFF
+	bool "Soft Power Off"
+	depends on ARCH_MVEBU
+	default y
+	help
+	   Say 'Y' here if you want your kernel to support soft power off with WFI.
+	   usage: 'echo 1 > /sys/devices/platform/mv_power_wol/soft_power_idle'
 
 if ARCH_MVEBU
 
@@ -26,6 +39,7 @@ config MACH_ARMADA_370_XP
 	select ARMADA_370_XP_TIMER
 	select HAVE_SMP
 	select CACHE_L2X0
+	select ARM_CPU_SUSPEND
 	select CPU_PJ4B
 
 config MACH_ARMADA_370
@@ -36,6 +50,47 @@ config MACH_ARMADA_370
 	  Say 'Y' here if you want your kernel to support boards based
 	  on the Marvell Armada 370 SoC with device tree.
 
+config MACH_ARMADA_375
+	bool "Marvell Armada 375 boards"
+	select ARM_ERRATA_720789
+	select ARM_ERRATA_753970
+	select ARM_NEEDS_SMP_COHERENCY_ON_UP
+	select ARM_GIC
+	select ARMADA_370_XP_TIMER
+	select CACHE_L2X0
+	select ARM_CPU_SUSPEND
+	select CPU_V7
+	select HAVE_SMP
+	select HAVE_ARM_SCU
+	select HAVE_ARM_TWD if LOCAL_TIMERS
+	select NEON
+	select PINCTRL_ARMADA_375
+	select USB_ARCH_HAS_XHCI
+	help
+	  Say 'Y' here if you want your kernel to support boards based
+	  on the Marvell Armada 375 SoC with device tree.
+
+config MACH_ARMADA_380
+	bool "Marvell Armada 380/385 boards"
+	select USB_ARCH_HAS_XHCI
+	select ARM_ERRATA_720789
+	select ARM_ERRATA_753970
+	select ARM_NEEDS_SMP_COHERENCY_ON_UP
+	select ARM_GIC
+	select ARMADA_370_XP_TIMER
+	select CACHE_L2X0
+	select ARM_CPU_SUSPEND
+	select CPU_V7
+	select HAVE_SMP
+	select HAVE_ARM_SCU
+	select HAVE_ARM_TWD if LOCAL_TIMERS
+	select NEON
+	select PINCTRL_ARMADA_38X
+	select USB_ARCH_HAS_XHCI
+	help
+	  Say 'Y' here if you want your kernel to support boards based
+	  on the Marvell Armada 380/385 SoC with device tree.
+
 config MACH_ARMADA_XP
 	bool "Marvell Armada XP boards"
 	select MACH_ARMADA_370_XP
@@ -44,6 +99,7 @@ config MACH_ARMADA_XP
 	  Say 'Y' here if you want your kernel to support boards based
 	  on the Marvell Armada XP SoC with device tree.
 
+
 endmenu
 
 endif
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index 2d04f0e21870..61315bb4d3c1 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -1,10 +1,31 @@
+ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
+ENDIAN      = BE
+else
+ENDIAN      = LE
+endif
+
 ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
 	-I$(srctree)/arch/arm/plat-orion/include
 
 AFLAGS_coherency_ll.o		:= -Wa,-march=armv7-a
 
-obj-y				 += system-controller.o
+obj-y				 += system-controller.o mvebu-soc-id.o
 obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o
-obj-$(CONFIG_ARCH_MVEBU)	 += coherency.o coherency_ll.o pmsu.o
-obj-$(CONFIG_SMP)                += platsmp.o headsmp.o
+obj-$(CONFIG_MACH_ARMADA_375) += armada-375.o
+obj-$(CONFIG_MACH_ARMADA_380) += armada-38x.o
+obj-$(CONFIG_ARCH_MVEBU)	 += \
+	coherency.o coherency_ll.o pmsu.o pmsu_ll.o cpu-reset.o \
+	common.o usb-cluster.o usb-utmi.o serdes.o pm.o pm-board.o
+ifeq ($(CONFIG_SMP),y)
+obj-$(CONFIG_MACH_ARMADA_XP)	 += platsmp.o headsmp.o
+obj-$(CONFIG_MACH_ARMADA_375)	 += platsmp-375.o headsmp-375.o
+obj-$(CONFIG_MACH_ARMADA_380)	 += platsmp-380.o headsmp-380.o
+endif
 obj-$(CONFIG_HOTPLUG_CPU)        += hotplug.o
+obj-$(CONFIG_PROC_FS)            += dump_mv_regs.o
+obj-$(CONFIG_MVEBU_SOFT_POWEROFF)      += soft-poweroff.o
+obj-y	+= linux_oss/mvOs.o
+
+
+COMMON_DIR	:= $(srctree)/arch/arm/mach-mvebu/include/mach
+ccflags-y	+= -I$(COMMON_DIR) -DMV_CPU_$(ENDIAN)
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
index 1c48890bb72b..041fb3a80db3 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.c
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
@@ -14,13 +14,15 @@
 
 #include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/of_address.h>
 #include <linux/of_platform.h>
 #include <linux/io.h>
-#include <linux/time-armada-370-xp.h>
+#include <linux/clocksource.h>
 #include <linux/clk/mvebu.h>
 #include <linux/dma-mapping.h>
 #include <linux/mbus.h>
 #include <linux/irqchip.h>
+#include <linux/slab.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -28,69 +30,89 @@
 #include "armada-370-xp.h"
 #include "common.h"
 #include "coherency.h"
+#include "mvebu-soc-id.h"
 
-static struct map_desc armada_370_xp_io_desc[] __initdata = {
-	{
-		.virtual	= (unsigned long) ARMADA_370_XP_REGS_VIRT_BASE,
-		.pfn		= __phys_to_pfn(ARMADA_370_XP_REGS_PHYS_BASE),
-		.length		= ARMADA_370_XP_REGS_SIZE,
-		.type		= MT_DEVICE,
-	},
-};
-
-void __init armada_370_xp_map_io(void)
+static void __init armada_370_xp_map_io(void)
 {
-	iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc));
+	debug_ll_io_init();
 }
 
-void __init armada_370_xp_timer_and_clk_init(void)
+static void __init armada_370_xp_timer_and_clk_init(void)
 {
+	pr_notice("\n  LSP version: %s\n\n", LSP_VERSION);
+
 	mvebu_clocks_init();
-	armada_370_xp_timer_init();
+	clocksource_of_init();
+	coherency_init();
+	BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+#ifdef CONFIG_CACHE_L2X0
+	l2x0_of_init(0, ~0UL);
+#endif
 }
 
-void __init armada_370_xp_init_early(void)
+static void __init i2c_quirk(void)
 {
-	char *mbus_soc_name;
+	struct device_node *np;
+	u32 dev, rev;
 
 	/*
-	 * This initialization will be replaced by a DT-based
-	 * initialization once the mvebu-mbus driver gains DT support.
+	 * Only revisons more recent than A0 support the offload
+	 * mechanism. We can exit only if we are sure that we can
+	 * get the SoC revision and it is more recent than A0.
 	 */
-	if (of_machine_is_compatible("marvell,armada370"))
-		mbus_soc_name = "marvell,armada370-mbus";
-	else
-		mbus_soc_name = "marvell,armadaxp-mbus";
+	if (mvebu_get_soc_id(&rev, &dev) == 0 && dev > MV78XX0_A0_REV)
+		return;
 
-	mvebu_mbus_init(mbus_soc_name,
-			ARMADA_370_XP_MBUS_WINS_BASE,
-			ARMADA_370_XP_MBUS_WINS_SIZE,
-			ARMADA_370_XP_SDRAM_WINS_BASE,
-			ARMADA_370_XP_SDRAM_WINS_SIZE);
+	for_each_compatible_node(np, NULL, "marvell,mv78230-i2c") {
+		struct property *new_compat;
 
-#ifdef CONFIG_CACHE_L2X0
-	l2x0_of_init(0, ~0UL);
-#endif
+		new_compat = kzalloc(sizeof(*new_compat), GFP_KERNEL);
+
+		new_compat->name = kstrdup("compatible", GFP_KERNEL);
+		new_compat->length = sizeof("marvell,mv78230-a0-i2c");
+		new_compat->value = kstrdup("marvell,mv78230-a0-i2c",
+						GFP_KERNEL);
+
+		of_update_property(np, new_compat);
+	}
+	return;
 }
 
 static void __init armada_370_xp_dt_init(void)
 {
+	if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
+		i2c_quirk();
 	of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-	coherency_init();
 }
 
-static const char * const armada_370_xp_dt_compat[] = {
-	"marvell,armada-370-xp",
+static const char * const armada_370_dt_compat[] = {
+	"marvell,armada370",
+	NULL,
+};
+
+DT_MACHINE_START(ARMADA_370_DT, "Marvell Armada 370 (Device Tree)")
+	.init_machine	= armada_370_xp_dt_init,
+	.map_io		= armada_370_xp_map_io,
+	.init_irq	= irqchip_init,
+	.init_time	= armada_370_xp_timer_and_clk_init,
+	.restart	= mvebu_restart,
+	.dt_compat	= armada_370_dt_compat,
+	.flags          = MACHINE_NEEDS_CPOLICY_WRITEALLOC,
+MACHINE_END
+
+static const char * const armada_xp_dt_compat[] = {
+	"marvell,armadaxp",
 	NULL,
 };
 
-DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 370/XP (Device Tree)")
+DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada XP (Device Tree)")
 	.smp		= smp_ops(armada_xp_smp_ops),
 	.init_machine	= armada_370_xp_dt_init,
 	.map_io		= armada_370_xp_map_io,
-	.init_early	= armada_370_xp_init_early,
 	.init_irq	= irqchip_init,
 	.init_time	= armada_370_xp_timer_and_clk_init,
 	.restart	= mvebu_restart,
-	.dt_compat	= armada_370_xp_dt_compat,
+	.dt_compat	= armada_xp_dt_compat,
+	.flags          = (MACHINE_NEEDS_CPOLICY_WRITEALLOC |
+			   MACHINE_NEEDS_SHAREABLE_PAGES),
 MACHINE_END
diff --git a/arch/arm/mach-mvebu/armada-370-xp.h b/arch/arm/mach-mvebu/armada-370-xp.h
index 2070e1b4f342..237c86b83390 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.h
+++ b/arch/arm/mach-mvebu/armada-370-xp.h
@@ -15,21 +15,15 @@
 #ifndef __MACH_ARMADA_370_XP_H
 #define __MACH_ARMADA_370_XP_H
 
-#define ARMADA_370_XP_REGS_PHYS_BASE	0xd0000000
-#define ARMADA_370_XP_REGS_VIRT_BASE	IOMEM(0xfec00000)
-#define ARMADA_370_XP_REGS_SIZE		SZ_1M
-
-/* These defines can go away once mvebu-mbus has a DT binding */
-#define ARMADA_370_XP_MBUS_WINS_BASE    (ARMADA_370_XP_REGS_PHYS_BASE + 0x20000)
-#define ARMADA_370_XP_MBUS_WINS_SIZE    0x100
-#define ARMADA_370_XP_SDRAM_WINS_BASE   (ARMADA_370_XP_REGS_PHYS_BASE + 0x20180)
-#define ARMADA_370_XP_SDRAM_WINS_SIZE   0x20
-
 #ifdef CONFIG_SMP
 #include <linux/cpumask.h>
 
+#define ARMADA_XP_MAX_CPUS 4
+
 void armada_mpic_send_doorbell(const struct cpumask *mask, unsigned int irq);
 void armada_xp_mpic_smp_cpu_init(void);
+void armada_xp_secondary_startup(void);
+extern struct smp_operations armada_xp_smp_ops;
 #endif
 
 #endif /* __MACH_ARMADA_370_XP_H */
diff --git a/arch/arm/mach-mvebu/armada-375.c b/arch/arm/mach-mvebu/armada-375.c
new file mode 100644
index 000000000000..bb6c6d6d3914
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-375.c
@@ -0,0 +1,143 @@
+/*
+ * Device Tree support for Armada 375 platforms.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/clocksource.h>
+#include <linux/io.h>
+#include <linux/clk/mvebu.h>
+#include <linux/dma-mapping.h>
+#include <linux/mbus.h>
+#include <linux/irqchip.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/smp_scu.h>
+#include <asm/mach/time.h>
+#include "armada-375.h"
+#include "common.h"
+#include "coherency.h"
+
+extern void __iomem *scu_base;
+
+static struct of_device_id of_scu_table[] = {
+	{ .compatible = "arm,cortex-a9-scu" },
+	{ },
+};
+
+#define SCU_CTRL		0x00
+
+static void __init armada_375_scu_enable(void)
+{
+	u32 scu_ctrl;
+
+	struct device_node *np = of_find_matching_node(NULL, of_scu_table);
+	if (np) {
+		scu_base = of_iomap(np, 0);
+
+		scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
+		/* already enabled? */
+		if (!(scu_ctrl & 1)) {
+			/* Enable SCU Speculative linefills to L2 */
+			scu_ctrl |= (1 << 3);
+			writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
+		}
+		scu_enable(scu_base);
+	}
+}
+
+void __init armada_375_l2_enable(void)
+{
+	void __iomem *l2x0_base;
+	struct device_node *np;
+	unsigned int val;
+
+	np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
+	if (!np)
+		goto out;
+
+	l2x0_base = of_iomap(np, 0);
+	if (!l2x0_base) {
+		of_node_put(np);
+		goto out;
+	}
+
+	/* Configure the L2 PREFETCH and POWER registers */
+	val = 0x58800000;
+	/*
+	*  Support the following configuration:
+	*  Incr double linefill enable
+	*  Data prefetch enable
+	*  Double linefill enable
+	*  Double linefill on WRAP disable
+	*  NO prefetch drop enable
+	*/
+	writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
+	val = L2X0_DYNAMIC_CLK_GATING_EN;
+	writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
+
+	iounmap(l2x0_base);
+	of_node_put(np);
+out:
+	if (coherency_available())
+		l2x0_of_init_coherent(0, ~0UL);
+	else
+		l2x0_of_init(0, ~0UL);
+}
+
+static void __iomem *
+armada_375_ioremap_caller(unsigned long phys_addr, size_t size,
+			  unsigned int mtype, void *caller)
+{
+	struct resource pcie_mem;
+
+	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
+
+	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
+		mtype = MT_MEMORY_SO;
+
+	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+}
+
+static void __init armada_375_timer_and_clk_init(void)
+{
+	pr_notice("\n  LSP version: %s\n\n", LSP_VERSION);
+
+	mvebu_clocks_init();
+	clocksource_of_init();
+	armada_375_scu_enable();
+	BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+	arch_ioremap_caller = armada_375_ioremap_caller;
+	pci_ioremap_set_mem_type(MT_MEMORY_SO);
+	coherency_init();
+	armada_375_l2_enable();
+}
+
+static const char * const armada_375_dt_compat[] = {
+	"marvell,armada375",
+	NULL,
+};
+
+DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
+	.smp		= smp_ops(armada_375_smp_ops),
+	.map_io		= debug_ll_io_init,
+	.init_irq	= irqchip_init,
+	.init_time	= armada_375_timer_and_clk_init,
+	.restart	= mvebu_restart,
+	.dt_compat	= armada_375_dt_compat,
+	.flags          = (MACHINE_NEEDS_CPOLICY_WRITEALLOC |
+			   MACHINE_NEEDS_SHAREABLE_PAGES),
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/armada-375.h b/arch/arm/mach-mvebu/armada-375.h
new file mode 100644
index 000000000000..7eaecd25719f
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-375.h
@@ -0,0 +1,23 @@
+/*
+ * Generic definitions for Marvell Armada 375 SoCs
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_ARMADA_375_H
+#define __MACH_ARMADA_375_H
+
+#ifdef CONFIG_SMP
+#define ARMADA_375_MAX_CPUS 2
+
+void armada_375_set_bootaddr(void *boot_addr);
+extern struct smp_operations armada_375_smp_ops;
+#endif
+
+#endif /* __MACH_ARMADA_375_H */
diff --git a/arch/arm/mach-mvebu/armada-380.h b/arch/arm/mach-mvebu/armada-380.h
new file mode 100644
index 000000000000..fb79f7951b79
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-380.h
@@ -0,0 +1,23 @@
+/*
+ * Generic definitions for Marvell Armada 380 SoCs
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_ARMADA_380_H
+#define __MACH_ARMADA_380_H
+
+#ifdef CONFIG_SMP
+#define ARMADA_380_MAX_CPUS 2
+extern struct smp_operations armada_380_smp_ops;
+#endif
+
+#define IRQ_PRIV_MPIC_PPI_IRQ 31
+
+#endif /* __MACH_ARMADA_380_H */
diff --git a/arch/arm/mach-mvebu/armada-38x.c b/arch/arm/mach-mvebu/armada-38x.c
new file mode 100644
index 000000000000..8d4c689ae7ff
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-38x.c
@@ -0,0 +1,199 @@
+/*
+ * Device Tree support for Armada 380/385 platforms.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/clocksource.h>
+#include <linux/io.h>
+#include <linux/clk/mvebu.h>
+#include <linux/dma-mapping.h>
+#include <linux/memblock.h>
+#include <linux/mbus.h>
+#include <linux/irqchip.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/smp_scu.h>
+#include <asm/mach/time.h>
+#include <asm/signal.h>
+#include "common.h"
+#include "coherency.h"
+#include "armada-380.h"
+
+extern void __iomem *scu_base;
+
+static struct of_device_id of_scu_table[] = {
+	{ .compatible = "arm,cortex-a9-scu" },
+	{ },
+};
+
+#define SCU_CTRL		0x00
+
+void armada_380_scu_enable(void)
+{
+	u32 scu_ctrl;
+
+	struct device_node *np = of_find_matching_node(NULL, of_scu_table);
+	if (np) {
+		scu_base = of_iomap(np, 0);
+
+		scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
+		/* already enabled? */
+		if (!(scu_ctrl & 1)) {
+			/* Enable SCU Speculative linefills to L2 */
+			scu_ctrl |= (1 << 3);
+			writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
+		}
+		scu_enable(scu_base);
+	}
+}
+
+void __init armada_380_l2_enable(void)
+{
+	void __iomem *l2x0_base;
+	struct device_node *np;
+	unsigned int val;
+
+	np = of_find_compatible_node(NULL, NULL, "arm,pl310-cache");
+	if (!np)
+		goto out;
+
+	l2x0_base = of_iomap(np, 0);
+	if (!l2x0_base) {
+		of_node_put(np);
+		goto out;
+	}
+
+	/* Configure the L2 PREFETCH and POWER registers */
+	val = 0x58800000;
+	/*
+	*  Support the following configuration:
+	*  Incr double linefill enable
+	*  Data prefetch enable
+	*  Double linefill enable
+	*  Double linefill on WRAP disable
+	*  NO prefetch drop enable
+	 */
+	writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL);
+	val = L2X0_DYNAMIC_CLK_GATING_EN;
+	writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
+
+	iounmap(l2x0_base);
+	of_node_put(np);
+out:
+	if (coherency_available())
+		l2x0_of_init_coherent(0, ~0UL);
+	else
+		l2x0_of_init(0, ~0UL);
+}
+
+static void __iomem *
+armada_380_ioremap_caller(unsigned long phys_addr, size_t size,
+			  unsigned int mtype, void *caller)
+{
+	struct resource pcie_mem;
+
+	mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
+
+	if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
+		mtype = MT_MEMORY_SO;
+
+	return __arm_ioremap_caller(phys_addr, size, mtype, caller);
+}
+
+static void __init armada_380_timer_and_clk_init(void)
+{
+	pr_notice("\n  LSP version: %s\n\n", LSP_VERSION);
+
+	mvebu_clocks_init();
+	clocksource_of_init();
+	armada_380_scu_enable();
+	BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+	arch_ioremap_caller = armada_380_ioremap_caller;
+	pci_ioremap_set_mem_type(MT_MEMORY_SO);
+	coherency_init();
+	armada_380_l2_enable();
+}
+
+static const char * const armada_380_dt_compat[] = {
+	"marvell,armada380",
+	"marvell,armada381",
+	"marvell,armada382",
+	"marvell,armada385",
+	"marvell,armada388",
+	NULL,
+};
+
+/*
+ * When returning from suspend, the platform goes through the
+ * bootloader, which executes its DDR3 training code. This code has
+ * the unfortunate idea of using the first 10 KB of each DRAM bank to
+ * exercise the RAM and calculate the optimal timings. Therefore, this
+ * area of RAM is overwritten, and shouldn't be used by the kernel if
+ * suspend/resume is supported.
+ */
+
+#ifdef CONFIG_SUSPEND
+#define MVEBU_DDR_TRAINING_AREA_SZ (10 * SZ_1K)
+static int __init mvebu_scan_mem(unsigned long node, const char *uname,
+				 int depth, void *data)
+{
+	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+	const __be32 *reg, *endp;
+	int l;
+
+	if (type == NULL || strcmp(type, "memory"))
+		return 0;
+
+	reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+	if (reg == NULL)
+		reg = of_get_flat_dt_prop(node, "reg", &l);
+	if (reg == NULL)
+		return 0;
+
+	endp = reg + (l / sizeof(__be32));
+	while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+		u64 base, size;
+
+		base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+		size = dt_mem_next_cell(dt_root_size_cells, &reg);
+
+		memblock_reserve(base, MVEBU_DDR_TRAINING_AREA_SZ);
+	}
+
+	return 0;
+}
+
+static void __init mvebu_memblock_reserve(void)
+{
+	of_scan_flat_dt(mvebu_scan_mem, NULL);
+}
+#else
+static void __init mvebu_memblock_reserve(void) {}
+#endif
+
+
+
+DT_MACHINE_START(ARMADA_XP_DT, "Marvell Armada 380/381/382/385/388 (Device Tree)")
+	.smp		= smp_ops(armada_380_smp_ops),
+	.map_io		= debug_ll_io_init,
+	.init_irq	= irqchip_init,
+	.init_time	= armada_380_timer_and_clk_init,
+	.restart	= mvebu_restart,
+	.reserve        = mvebu_memblock_reserve,
+	.dt_compat	= armada_380_dt_compat,
+	.flags          = (MACHINE_NEEDS_CPOLICY_WRITEALLOC |
+			   MACHINE_NEEDS_SHAREABLE_PAGES),
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 8278960066c3..6b2ef454f950 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -24,53 +24,67 @@
 #include <linux/smp.h>
 #include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
+#include <linux/pci.h>
 #include <asm/smp_plat.h>
+#include <asm/cacheflush.h>
 #include "armada-370-xp.h"
 
-/*
- * Some functions in this file are called very early during SMP
- * initialization. At that time the device tree framework is not yet
- * ready, and it is not possible to get the register address to
- * ioremap it. That's why the pointer below is given with an initial
- * value matching its virtual mapping
- */
-static void __iomem *coherency_base = ARMADA_370_XP_REGS_VIRT_BASE + 0x20200;
+extern void armada_380_scu_enable(void);
+static int coherency_type(void);
+unsigned long coherency_phys_base;
+void __iomem *coherency_base;
 static void __iomem *coherency_cpu_base;
+bool coherency_hard_mode;
 
 /* Coherency fabric registers */
 #define COHERENCY_FABRIC_CFG_OFFSET		   0x4
 
 #define IO_SYNC_BARRIER_CTL_OFFSET		   0x0
 
+enum {
+	COHERENCY_FABRIC_TYPE_NONE,
+	COHERENCY_FABRIC_TYPE_ARMADA_370_XP,
+	COHERENCY_FABRIC_TYPE_ARMADA_375,
+	COHERENCY_FABRIC_TYPE_ARMADA_380,
+};
+
+/*
+ * The "marvell,coherency-fabric" compatible string is kept for
+ * backward compatibility reasons, and is equivalent to
+ * "marvell,armada-370-coherency-fabric".
+ */
 static struct of_device_id of_coherency_table[] = {
-	{.compatible = "marvell,coherency-fabric"},
+	{.compatible = "marvell,coherency-fabric",
+	 .data = (void*) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
+	{.compatible = "marvell,armada-370-coherency-fabric",
+	 .data = (void*) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
+	{.compatible = "marvell,armada-375-coherency-fabric",
+	 .data = (void*) COHERENCY_FABRIC_TYPE_ARMADA_375 },
+	{.compatible = "marvell,armada-380-coherency-fabric",
+	 .data = (void*) COHERENCY_FABRIC_TYPE_ARMADA_380 },
 	{ /* end of list */ },
 };
 
-#ifdef CONFIG_SMP
-int coherency_get_cpu_count(void)
-{
-	int reg, cnt;
+/* Functions defined in coherency_ll.S */
+int ll_enable_coherency(void);
+void ll_add_cpu_to_smp_group(void);
 
-	reg = readl(coherency_base + COHERENCY_FABRIC_CFG_OFFSET);
-	cnt = (reg & 0xF) + 1;
-
-	return cnt;
-}
-#endif
-
-/* Function defined in coherency_ll.S */
-int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
-
-int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
+int set_cpu_coherent(void)
 {
-	if (!coherency_base) {
-		pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
-		pr_warn("Coherency fabric is not initialized\n");
-		return 1;
-	}
+	int type = coherency_type();
+
+	if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP) {
+		if (!coherency_base) {
+			pr_warn("Can't make current CPU cache coherent.\n");
+			pr_warn("Coherency fabric is not initialized\n");
+			return 1;
+		}
+		ll_add_cpu_to_smp_group();
+		return ll_enable_coherency();
+	} else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380)
+		armada_380_scu_enable();
 
-	return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
+	return 0;
 }
 
 static inline void mvebu_hwcc_sync_io_barrier(void)
@@ -106,8 +120,8 @@ static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
 }
 
 static struct dma_map_ops mvebu_hwcc_dma_ops = {
-	.alloc			= arm_dma_alloc,
-	.free			= arm_dma_free,
+	.alloc			= arm_coherent_dma_alloc,
+	.free			= arm_coherent_dma_free,
 	.mmap			= arm_dma_mmap,
 	.map_page		= mvebu_hwcc_dma_map_page,
 	.unmap_page		= mvebu_hwcc_dma_unmap_page,
@@ -137,19 +151,97 @@ static struct notifier_block mvebu_hwcc_platform_nb = {
 	.notifier_call = mvebu_hwcc_platform_notifier,
 };
 
-int __init coherency_init(void)
+static void __init armada_370_coherency_init(struct device_node *np)
+{
+	struct resource res;
+	of_address_to_resource(np, 0, &res);
+	coherency_phys_base = res.start;
+	/*
+	 * Ensure secondary CPUs will see the updated value,
+	 * which they read before they join the coherency
+	 * fabric, and therefore before they are coherent with
+	 * the boot CPU cache.
+	 */
+	sync_cache_w(&coherency_phys_base);
+	coherency_base = of_iomap(np, 0);
+	coherency_cpu_base = of_iomap(np, 1);
+	set_cpu_coherent();
+}
+
+static void __init armada_375_coherency_init(struct device_node *np)
+{
+	coherency_cpu_base = of_iomap(np, 0);
+}
+
+static void __init armada_380_coherency_init(struct device_node *np)
+{
+	coherency_cpu_base = of_iomap(np, 0);
+}
+
+static int coherency_type(void)
 {
 	struct device_node *np;
 
 	np = of_find_matching_node(NULL, of_coherency_table);
 	if (np) {
-		pr_info("Initializing Coherency fabric\n");
-		coherency_base = of_iomap(np, 0);
-		coherency_cpu_base = of_iomap(np, 1);
-		set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
-		bus_register_notifier(&platform_bus_type,
-					&mvebu_hwcc_platform_nb);
+                const struct of_device_id *match =
+                    of_match_node(of_coherency_table, np);
+		int type;
+
+		type = (int) match->data;
+
+		if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP ||
+			type == COHERENCY_FABRIC_TYPE_ARMADA_375    ||
+			type == COHERENCY_FABRIC_TYPE_ARMADA_380)
+			return type;
 	}
 
+	return COHERENCY_FABRIC_TYPE_NONE;
+}
+
+int coherency_available(void)
+{
+	return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+}
+
+int __init coherency_init(void)
+{
+	int type = coherency_type();
+	struct device_node *np;
+
+	if (type != COHERENCY_FABRIC_TYPE_NONE)
+		coherency_hard_mode = true;
+	else
+		coherency_hard_mode = false;
+
+	np = of_find_matching_node(NULL, of_coherency_table);
+
+	if (type == COHERENCY_FABRIC_TYPE_ARMADA_370_XP)
+		armada_370_coherency_init(np);
+	else if (type == COHERENCY_FABRIC_TYPE_ARMADA_375)
+		armada_375_coherency_init(np);
+	else if (type == COHERENCY_FABRIC_TYPE_ARMADA_380)
+		armada_380_coherency_init(np);
+
+	return 0;
+}
+
+static int __init coherency_late_init(void)
+{
+	if (coherency_available())
+		bus_register_notifier(&platform_bus_type,
+				      &mvebu_hwcc_platform_nb);
 	return 0;
 }
+
+postcore_initcall(coherency_late_init);
+
+static int __init coherency_pci_notify_init(void)
+{
+	if (coherency_available())
+		bus_register_notifier(&pci_bus_type,
+				       &mvebu_hwcc_platform_nb);
+	return 0;
+}
+
+arch_initcall(coherency_pci_notify_init);
diff --git a/arch/arm/mach-mvebu/coherency.h b/arch/arm/mach-mvebu/coherency.h
index 2f428137f6fe..9f6b4779191a 100644
--- a/arch/arm/mach-mvebu/coherency.h
+++ b/arch/arm/mach-mvebu/coherency.h
@@ -14,11 +14,14 @@
 #ifndef __MACH_370_XP_COHERENCY_H
 #define __MACH_370_XP_COHERENCY_H
 
-#ifdef CONFIG_SMP
-int coherency_get_cpu_count(void);
-#endif
 
-int set_cpu_coherent(int cpu_id, int smp_group_id);
+extern unsigned long coherency_phys_base;
+extern bool coherency_hard_mode;
+
+#define COHERENCY_FABRIC_HARD_MODE() coherency_hard_mode
+int set_cpu_coherent(void);
+
 int coherency_init(void);
+int coherency_available(void);
 
 #endif	/* __MACH_370_XP_COHERENCY_H */
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 5476669ba905..510c29e079ca 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -20,36 +20,130 @@
 #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
 #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4
 
+#include <asm/assembler.h>
+#include <asm/cp15.h>
+
 	.text
+/* Returns the coherency base address in r1 (r0 is untouched) */
+ENTRY(ll_get_coherency_base)
+	mrc	p15, 0, r1, c1, c0, 0
+	tst	r1, #CR_M @ Check MMU bit enabled
+	bne	1f
+
+	/*
+	 * MMU is disabled, use the physical address of the coherency
+	 * base address.
+	 */
+	adr	r1, 3f
+	ldr	r3, [r1]
+	ldr	r1, [r1, r3]
+	b	2f
+1:
+	/*
+	 * MMU is enabled, use the virtual address of the coherency
+	 * base address.
+	 */
+	ldr	r1, =coherency_base
+	ldr	r1, [r1]
+2:
+	mov	pc, lr
+ENDPROC(ll_get_coherency_base)
+
 /*
- * r0: Coherency fabric base register address
- * r1: HW CPU id
+ * Returns the coherency CPU mask in r3 (r0 is untouched). This
+ * coherency CPU mask can be used with the coherency fabric
+ * configuration and control registers. Note that the mask is already
+ * endian-swapped as appropriate so that the calling functions do not
+ * have to care about endianness issues while accessing the coherency
+ * fabric registers
  */
-ENTRY(ll_set_cpu_coherent)
-	/* Create bit by cpu index */
-	mov	r3, #(1 << 24)
-	lsl	r1, r3, r1
+ENTRY(ll_get_coherency_cpumask)
+	mrc	15, 0, r3, cr0, cr0, 5
+	and	r3, r3, #15
+	mov	r2, #(1 << 24)
+	lsl	r3, r2, r3
+ARM_BE8(rev	r3, r3)
+	mov	pc, lr
+ENDPROC(ll_get_coherency_cpumask)
 
-	/* Add CPU to SMP group - Atomic */
-	add	r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET
-1:
-	ldrex	r2, [r3]
-	orr	r2, r2, r1
-	strex 	r0, r2, [r3]
-	cmp	r0, #0
-	bne 1b
-
-	/* Enable coherency on CPU - Atomic */
-	add	r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET
+/*
+ * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
+ * ll_disable_coherency() use the strex/ldrex instructions while the
+ * MMU can be disabled. The Armada XP SoC has an exclusive monitor
+ * that tracks transactions to Device and/or SO memory and thanks to
+ * that, exclusive transactions are functional even when the MMU is
+ * disabled.
+ */
+
+ENTRY(ll_add_cpu_to_smp_group)
+	/*
+	 * As r0 is not modified by ll_get_coherency_base() and
+	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
+	 * and avoid it being modified by the branch and link
+	 * calls. This function is used very early in the secondary
+	 * CPU boot, and no stack is available at this point.
+	 */
+	mov 	r0, lr
+	bl	ll_get_coherency_base
+	bl	ll_get_coherency_cpumask
+	mov 	lr, r0
+	add	r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
 1:
-	ldrex	r2, [r3]
-	orr	r2, r2, r1
-	strex	r0, r2, [r3]
-	cmp	r0, #0
-	bne 1b
+	ldrex	r2, [r0]
+	orr	r2, r2, r3
+	strex	r1, r2, [r0]
+	cmp	r1, #0
+	bne	1b
+	mov	pc, lr
+ENDPROC(ll_add_cpu_to_smp_group)
 
+ENTRY(ll_enable_coherency)
+	/*
+	 * As r0 is not modified by ll_get_coherency_base() and
+	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
+	 * and avoid it being modified by the branch and link
+	 * calls. This function is used very early in the secondary
+	 * CPU boot, and no stack is available at this point.
+	 */
+	mov r0, lr
+	bl	ll_get_coherency_base
+	bl	ll_get_coherency_cpumask
+	mov lr, r0
+	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+1:
+	ldrex	r2, [r0]
+	orr	r2, r2, r3
+	strex	r1, r2, [r0]
+	cmp	r1, #0
+	bne	1b
 	dsb
-
 	mov	r0, #0
 	mov	pc, lr
-ENDPROC(ll_set_cpu_coherent)
+ENDPROC(ll_enable_coherency)
+
+ENTRY(ll_disable_coherency)
+	/*
+	 * As r0 is not modified by ll_get_coherency_base() and
+	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
+	 * and avoid it being modified by the branch and link
+	 * calls. This function is used very early in the secondary
+	 * CPU boot, and no stack is available at this point.
+	 */
+	mov 	r0, lr
+	bl	ll_get_coherency_base
+	bl	ll_get_coherency_cpumask
+	mov 	lr, r0
+	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
+1:
+	ldrex	r2, [r0]
+	bic	r2, r2, r3
+	strex	r1, r2, [r0]
+	cmp	r1, #0
+	bne	1b
+	dsb
+	mov	pc, lr
+ENDPROC(ll_disable_coherency)
+
+	.align 2
+3:
+	.long	coherency_phys_base - .
diff --git a/arch/arm/mach-mvebu/common.c b/arch/arm/mach-mvebu/common.c
new file mode 100644
index 000000000000..ab1ae515d86a
--- /dev/null
+++ b/arch/arm/mach-mvebu/common.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/reset.h>
+#include <asm/smp_plat.h>
+
+/*
+ * This function uses the reset framework to deassert a CPU, which
+ * makes it boot.
+ */
+int mvebu_boot_cpu(int cpu)
+{
+	struct device_node *np;
+	int hw_cpu = cpu_logical_map(cpu);
+
+	for_each_node_by_type(np, "cpu") {
+		struct reset_control *rstc;
+		int icpu;
+
+		of_property_read_u32(np, "reg", &icpu);
+		if (icpu != hw_cpu)
+			continue;
+
+		rstc = of_reset_control_get(np, NULL);
+		if (IS_ERR(rstc)) {
+			pr_err("Cannot get reset for CPU%d\n", cpu);
+			return PTR_ERR(rstc);
+		}
+
+		reset_control_deassert(rstc);
+		reset_control_put(rstc);
+		return 0;
+	}
+
+	return -EINVAL;
+}
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
index aa27bc2ffb60..4e1bc534ee90 100644
--- a/arch/arm/mach-mvebu/common.h
+++ b/arch/arm/mach-mvebu/common.h
@@ -15,14 +15,14 @@
 #ifndef __ARCH_MVEBU_COMMON_H
 #define __ARCH_MVEBU_COMMON_H
 
-void mvebu_restart(char mode, const char *cmd);
+#define LSP_VERSION    "linux-3.10.39-2014_T3.0p5"
 
-void armada_370_xp_init_irq(void);
-void armada_370_xp_handle_irq(struct pt_regs *regs);
+void mvebu_restart(char mode, const char *cmd);
 
 void armada_xp_cpu_die(unsigned int cpu);
-int armada_370_xp_coherency_init(void);
-int armada_370_xp_pmsu_init(void);
-void armada_xp_secondary_startup(void);
-extern struct smp_operations armada_xp_smp_ops;
+void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr);
+int mvebu_boot_cpu(int cpu);
+
+int mvebu_pm_init(void (*board_pm_enter)(void __iomem *sdram_reg, u32 srcmd));
+
 #endif
diff --git a/arch/arm/mach-mvebu/cpu-reset.c b/arch/arm/mach-mvebu/cpu-reset.c
new file mode 100644
index 000000000000..865adc5793de
--- /dev/null
+++ b/arch/arm/mach-mvebu/cpu-reset.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+
+static struct of_device_id of_cpu_reset_table[] = {
+	{.compatible = "marvell,armada-370-cpu-reset", .data = (void*) 1 },
+	{.compatible = "marvell,armada-xp-cpu-reset",  .data = (void*) 4 },
+	{.compatible = "marvell,armada-375-cpu-reset", .data = (void*) 2 },
+	{.compatible = "marvell,armada-380-cpu-reset", .data = (void*) 2 },
+	{ /* end of list */ },
+};
+
+static void __iomem *cpu_reset_base;
+
+#define CPU_RESET_OFFSET(cpu) (cpu * 0x8)
+#define CPU_RESET_ASSERT      BIT(0)
+
+static int mvebu_cpu_reset_assert(struct reset_controller_dev *rcdev,
+				  unsigned long idx)
+{
+	u32 reg;
+
+	reg = readl(cpu_reset_base + CPU_RESET_OFFSET(idx));
+	reg |= CPU_RESET_ASSERT;
+	writel(reg, cpu_reset_base + CPU_RESET_OFFSET(idx));
+
+	return 0;
+}
+
+static int mvebu_cpu_reset_deassert(struct reset_controller_dev *rcdev,
+				    unsigned long idx)
+{
+	u32 reg;
+
+	reg = readl(cpu_reset_base + CPU_RESET_OFFSET(idx));
+	reg &= ~CPU_RESET_ASSERT;
+	writel(reg, cpu_reset_base + CPU_RESET_OFFSET(idx));
+
+	return 0;
+}
+
+static struct reset_control_ops mvebu_cpu_reset_ops = {
+	.assert = mvebu_cpu_reset_assert,
+	.deassert = mvebu_cpu_reset_deassert,
+};
+
+static struct reset_controller_dev mvebu_cpu_reset_dev = {
+	.ops = &mvebu_cpu_reset_ops,
+};
+
+int __init mvebu_cpu_reset_init(void)
+{
+	struct device_node *np;
+	const struct of_device_id *match;
+
+	np = of_find_matching_node_and_match(NULL, of_cpu_reset_table,
+					     &match);
+	if (np) {
+		pr_info("Initializing CPU Reset module\n");
+		cpu_reset_base = of_iomap(np, 0);
+		mvebu_cpu_reset_dev.of_node = np;
+		mvebu_cpu_reset_dev.nr_resets =
+			(unsigned int) match->data;
+		reset_controller_register(&mvebu_cpu_reset_dev);
+	}
+
+	return 0;
+}
+
+early_initcall(mvebu_cpu_reset_init);
diff --git a/arch/arm/mach-mvebu/dump_mv_regs.c b/arch/arm/mach-mvebu/dump_mv_regs.c
new file mode 100644
index 000000000000..1f5a3e9dec0a
--- /dev/null
+++ b/arch/arm/mach-mvebu/dump_mv_regs.c
@@ -0,0 +1,177 @@
+/*
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <asm/hardware/cache-l2x0.h>
+
+static int proc_dump_cp15_read(struct seq_file *m, void *p)
+{
+	unsigned int value;
+
+	asm volatile ("mrc p15, 0, %0, c0, c0, 0" : "=r" (value));
+	seq_printf(m, "Main ID: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c0, 1" : "=r" (value));
+	seq_printf(m, "Cache Type: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c0, 2" : "=r" (value));
+	seq_printf(m, "TCM Type: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c0, 3" : "=r" (value));
+	seq_printf(m, "TLB Type: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (value));
+	seq_printf(m, "Microprocessor Affinity: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 0" : "=r" (value));
+	seq_printf(m, "Processor Feature 0: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 1" : "=r" (value));
+	seq_printf(m, "Processor Feature 1: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 2" : "=r" (value));
+	seq_printf(m, "Debug Feature 0: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 3" : "=r" (value));
+	seq_printf(m, "Auxiliary Feature 0: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 4" : "=r" (value));
+	seq_printf(m, "Memory Model Feature 0: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 5" : "=r" (value));
+	seq_printf(m, "Memory Model Feature 1: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 6" : "=r" (value));
+	seq_printf(m, "Memory Model Feature 2: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c1, 7" : "=r" (value));
+	seq_printf(m, "Memory Model Feature 3: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c2, 0" : "=r" (value));
+	seq_printf(m, "Set Attribute 0: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c2, 1" : "=r" (value));
+	seq_printf(m, "Set Attribute 1: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c2, 2" : "=r" (value));
+	seq_printf(m, "Set Attribute 2: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c2, 3" : "=r" (value));
+	seq_printf(m, "Set Attribute 3: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c0, c2, 4" : "=r" (value));
+	seq_printf(m, "Set Attribute 4: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 1, %0, c0, c0, 0" : "=r" (value));
+	seq_printf(m, "Current Cache Size ID: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 1, %0, c0, c0, 1" : "=r" (value));
+	seq_printf(m, "Current Cache Level ID: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 1, %0, c0, c0, 7" : "=r" (value));
+	seq_printf(m, "Auxiliary ID: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 2, %0, c0, c0, 0" : "=r" (value));
+	seq_printf(m, "Cache Size Selection: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c1, c0, 0" : "=r" (value));
+	seq_printf(m, "Control : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (value));
+	seq_printf(m, "Auxiliary Control : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c1, c0, 2" : "=r" (value));
+	seq_printf(m, "Coprocessor Access Control : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c1, c1, 0" : "=r" (value));
+	seq_printf(m, "Secure Configuration : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c2, c0, 0" : "=r" (value));
+	seq_printf(m, "Translation Table Base 0 : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c2, c0, 1" : "=r" (value));
+	seq_printf(m, "Translation Table Base 1 : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c2, c0, 2" : "=r" (value));
+	seq_printf(m, "Translation Table Control : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c3, c0, 0" : "=r" (value));
+	seq_printf(m, "Domain Access Control : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c5, c0, 0" : "=r" (value));
+	seq_printf(m, "Data Fault Status : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c5, c0, 1" : "=r" (value));
+	seq_printf(m, "Instruction Fault Status : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c5, c1, 0" : "=r" (value));
+	seq_printf(m, "Auxiliary Data Fault Status : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c5, c1, 1" : "=r" (value));
+	seq_printf(m, "Auxiliary Instruction Fault Status : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c6, c0, 0" : "=r" (value));
+	seq_printf(m, "Data Fault Address : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c6, c0, 2" : "=r" (value));
+	seq_printf(m, "Instruction Fault Address : 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 4, %0, c15, c0, 0" : "=r" (value));
+	seq_printf(m, "Configuration Base Address: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c10, c2, 0" : "=r" (value));
+	seq_printf(m, "Memory Attribute PRRR: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c10, c2, 1" : "=r" (value));
+	seq_printf(m, "Memory Attribute NMRR: 0x%08x\n", value);
+
+	asm volatile ("mrc p15, 0, %0, c15, c0, 0" : "=r" (value));
+	seq_printf(m, "Power Control Register: 0x%08x\n", value);
+
+	return 0;
+}
+
+static int proc_dump_cp15_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, proc_dump_cp15_read, NULL);
+}
+
+static const struct file_operations proc_dump_cp15_fops = {
+	.owner		= THIS_MODULE,
+	.open		= proc_dump_cp15_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static struct proc_dir_entry *proc_mv_dump_dir;
+
+int dump_init_module(void)
+{
+	proc_mv_dump_dir = proc_mkdir("mv_dump", NULL);
+	proc_create("cp15", 0444, proc_mv_dump_dir, &proc_dump_cp15_fops);
+
+	return 0;
+}
+
+void dump_cleanup_module(void)
+{
+	proc_remove(proc_mv_dump_dir);
+}
+
+module_init(dump_init_module);
+module_exit(dump_cleanup_module);
diff --git a/arch/arm/mach-mvebu/headsmp-375.S b/arch/arm/mach-mvebu/headsmp-375.S
new file mode 100644
index 000000000000..392131bfa308
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp-375.S
@@ -0,0 +1,23 @@
+/*
+ * SMP support: Entry point for secondary CPUs
+ *
+ * Copyright (C) 2013  Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+	__CPUINIT
+
+ENTRY(a375_secondary_startup)
+ARM_BE8(setend be)
+	bl      v7_invalidate_l1
+	b	secondary_startup
+ENDPROC(a375_secondary_startup)
diff --git a/arch/arm/mach-mvebu/headsmp-380.S b/arch/arm/mach-mvebu/headsmp-380.S
new file mode 100644
index 000000000000..5c3c73bbe5d4
--- /dev/null
+++ b/arch/arm/mach-mvebu/headsmp-380.S
@@ -0,0 +1,23 @@
+/*
+ * SMP support: Entry point for secondary CPUs
+ *
+ * Copyright (C) 2013  Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/assembler.h>
+
+	__CPUINIT
+ENTRY(a380_secondary_startup)
+ARM_BE8(setend be)
+	bl      v7_invalidate_l1
+	b	secondary_startup
+ENDPROC(a380_secondary_startup)
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index a06e0ede8c08..f6d9eebff38e 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,29 +21,21 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
-/*
- * At this stage the secondary CPUs don't have acces yet to the MMU, so
- * we have to provide physical addresses
- */
-#define ARMADA_XP_CFB_BASE	     0xD0020200
+#include <asm/assembler.h>
 
 	__CPUINIT
-
 /*
  * Armada XP specific entry point for secondary CPUs.
  * We add the CPU to the coherency fabric and then jump to secondary
  * startup
  */
 ENTRY(armada_xp_secondary_startup)
+ ARM_BE8(setend	be )			@ go BE8 if entered LE
 
-	/* Read CPU id */
-	mrc     p15, 0, r1, c0, c0, 5
-	and     r1, r1, #0xF
+	bl	ll_add_cpu_to_smp_group
 
-	/* Add CPU to coherency fabric */
-	ldr     r0, =ARMADA_XP_CFB_BASE
+	bl	ll_enable_coherency
 
-	bl	ll_set_cpu_coherent
 	b	secondary_startup
 
 ENDPROC(armada_xp_secondary_startup)
diff --git a/arch/arm/mach-mvebu/include/mach/.gitignore b/arch/arm/mach-mvebu/include/mach/.gitignore
new file mode 100644
index 000000000000..b33b8f2fab2e
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/.gitignore
@@ -0,0 +1,5 @@
+*.o
+*.o.*
+*.rej
+*.orig
+*.su
diff --git a/arch/arm/mach-mvebu/include/mach/mvCommon.h b/arch/arm/mach-mvebu/include/mach/mvCommon.h
new file mode 100644
index 000000000000..2bad66efa1e1
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/mvCommon.h
@@ -0,0 +1,404 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCommonh
+#define __INCmvCommonh
+
+#ifdef __cplusplus
+extern "C" {
+#endif	/* __cplusplus */
+
+#include "mvTypes.h"
+#ifndef MV_ASMLANGUAGE
+
+/* The golden ration: an arbitrary value */
+#define MV_JHASH_GOLDEN_RATIO           0x9e3779b9
+
+#define MV_JHASH_MIX(a, b, c)        \
+{                                   \
+	a -= b; a -= c; a ^= (c>>13);   \
+	b -= c; b -= a; b ^= (a<<8);    \
+	c -= a; c -= b; c ^= (b>>13);   \
+	a -= b; a -= c; a ^= (c>>12);   \
+	b -= c; b -= a; b ^= (a<<16);   \
+	c -= a; c -= b; c ^= (b>>5);    \
+	a -= b; a -= c; a ^= (c>>3);    \
+	b -= c; b -= a; b ^= (a<<10);   \
+	c -= a; c -= b; c ^= (b>>15);   \
+}
+
+#ifdef MV_VXWORKS
+static __inline__ MV_U32 mv_jhash_3words(MV_U32 a, MV_U32 b, MV_U32 c, MV_U32 initval)
+#else
+static inline MV_U32 mv_jhash_3words(MV_U32 a, MV_U32 b, MV_U32 c, MV_U32 initval)
+
+#endif
+{
+	a += MV_JHASH_GOLDEN_RATIO;
+	b += MV_JHASH_GOLDEN_RATIO;
+	c += initval;
+	MV_JHASH_MIX(a, b, c);
+
+	return c;
+}
+#endif
+
+
+/* Swap tool */
+
+/* 16bit nibble swap. For example 0x1234 -> 0x2143                          */
+#define MV_NIBBLE_SWAP_16BIT(X)        (((X&0xf) << 4) |     \
+					((X&0xf0) >> 4) |    \
+					((X&0xf00) << 4) |   \
+					((X&0xf000) >> 4))
+
+/* 32bit nibble swap. For example 0x12345678 -> 0x21436587                  */
+#define MV_NIBBLE_SWAP_32BIT(X)		(((X&0xf) << 4) |       \
+					((X&0xf0) >> 4) |      \
+					((X&0xf00) << 4) |     \
+					((X&0xf000) >> 4) |    \
+					((X&0xf0000) << 4) |   \
+					((X&0xf00000) >> 4) |  \
+					((X&0xf000000) << 4) | \
+					((X&0xf0000000) >> 4))
+
+/* 16bit byte swap. For example 0x1234->0x3412                             */
+#define MV_BYTE_SWAP_16BIT(X) ((((X)&0xff)<<8) | (((X)&0xff00)>>8))
+
+/* 32bit byte swap. For example 0x12345678->0x78563412                    */
+#define MV_BYTE_SWAP_32BIT(X)  ((((X)&0xff)<<24) |                       \
+				(((X)&0xff00)<<8) |                      \
+				(((X)&0xff0000)>>8) |                    \
+				(((X)&0xff000000)>>24))
+
+/* 64bit byte swap. For example 0x11223344.55667788 -> 0x88776655.44332211  */
+#define MV_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xffULL)<<56) |             \
+				      (((X)&0xff00ULL)<<40) |           \
+				      (((X)&0xff0000ULL)<<24) |         \
+				      (((X)&0xff000000ULL)<<8) |        \
+				      (((X)&0xff00000000ULL)>>8) |      \
+				      (((X)&0xff0000000000ULL)>>24) |   \
+				      (((X)&0xff000000000000ULL)>>40) | \
+				      (((X)&0xff00000000000000ULL)>>56)))
+
+/* Endianess macros.                                                        */
+#if defined(MV_CPU_LE)
+#define MV_16BIT_LE(X)  (X)
+#define MV_32BIT_LE(X)  (X)
+#define MV_64BIT_LE(X)  (X)
+#define MV_16BIT_BE(X)  MV_BYTE_SWAP_16BIT(X)
+#define MV_32BIT_BE(X)  MV_BYTE_SWAP_32BIT(X)
+#define MV_64BIT_BE(X)  MV_BYTE_SWAP_64BIT(X)
+#elif defined(MV_CPU_BE)
+#define MV_16BIT_LE(X)  MV_BYTE_SWAP_16BIT(X)
+#define MV_32BIT_LE(X)  MV_BYTE_SWAP_32BIT(X)
+#define MV_64BIT_LE(X)  MV_BYTE_SWAP_64BIT(X)
+#define MV_16BIT_BE(X)  (X)
+#define MV_32BIT_BE(X)  (X)
+#define MV_64BIT_BE(X)  (X)
+#else
+#error "CPU endianess isn't defined!\n"
+#endif
+
+/* Bit field definitions */
+#define NO_BIT      0x00000000
+
+/* avoid redefinition of bits */
+#ifndef BIT0
+
+#define BIT0        0x00000001
+#define BIT1        0x00000002
+#define BIT2        0x00000004
+#define BIT3        0x00000008
+#define BIT4        0x00000010
+#define BIT5        0x00000020
+#define BIT6        0x00000040
+#define BIT7        0x00000080
+#define BIT8        0x00000100
+#define BIT9        0x00000200
+#define BIT10       0x00000400
+#define BIT11       0x00000800
+#define BIT12       0x00001000
+#define BIT13       0x00002000
+#define BIT14       0x00004000
+#define BIT15       0x00008000
+#define BIT16       0x00010000
+#define BIT17       0x00020000
+#define BIT18       0x00040000
+#define BIT19       0x00080000
+#define BIT20       0x00100000
+#define BIT21       0x00200000
+#define BIT22       0x00400000
+#define BIT23       0x00800000
+#define BIT24       0x01000000
+#define BIT25       0x02000000
+#define BIT26       0x04000000
+#define BIT27       0x08000000
+#define BIT28       0x10000000
+#define BIT29       0x20000000
+#define BIT30       0x40000000
+#define BIT31       0x80000000
+
+#endif /* BIT0 */
+/* Handy sizes */
+#define _1K         0x00000400
+#define _2K         0x00000800
+#define _4K         0x00001000
+#define _8K         0x00002000
+#define _16K        0x00004000
+#define _32K        0x00008000
+#define _64K        0x00010000
+#define _128K       0x00020000
+#define _256K       0x00040000
+#define _512K       0x00080000
+
+#define _1M         0x00100000
+#define _2M         0x00200000
+#define _4M         0x00400000
+#define _8M         0x00800000
+#define _16M        0x01000000
+#define _32M        0x02000000
+#define _64M        0x04000000
+#define _128M       0x08000000
+#define _256M       0x10000000
+#define _512M       0x20000000
+
+#define _1G         0x40000000
+#define _2G         0x80000000
+
+/* Tclock and Sys clock define */
+#define _100MHz     100000000
+#define _125MHz     125000000
+#define _133MHz     133333334
+#define _150MHz     150000000
+#define _160MHz     160000000
+#define _166MHz     166666667
+#define _175MHz     175000000
+#define _178MHz     178000000
+#define _183MHz     183333334
+#define _187MHz     187000000
+#define _192MHz     192000000
+#define _194MHz     194000000
+#define _200MHz     200000000
+#define _233MHz     233333334
+#define _250MHz     250000000
+#define _266MHz     266666667
+#define _300MHz     300000000
+#define _800MHz     800000000
+#define _1GHz       1000000000UL
+#define _2GHz       2000000000UL
+
+/* Supported clocks */
+#define MV_BOARD_TCLK_100MHZ	100000000
+#define MV_BOARD_TCLK_125MHZ	125000000
+#define MV_BOARD_TCLK_133MHZ	133333333
+#define MV_BOARD_TCLK_150MHZ	150000000
+#define MV_BOARD_TCLK_166MHZ	166666667
+#define MV_BOARD_TCLK_200MHZ	200000000
+#define MV_BOARD_TCLK_250MHZ	250000000
+
+#define MV_BOARD_SYSCLK_100MHZ	100000000
+#define MV_BOARD_SYSCLK_125MHZ	125000000
+#define MV_BOARD_SYSCLK_133MHZ	133333333
+#define MV_BOARD_SYSCLK_150MHZ	150000000
+#define MV_BOARD_SYSCLK_166MHZ	166666667
+#define MV_BOARD_SYSCLK_200MHZ	200000000
+#define MV_BOARD_SYSCLK_233MHZ	233333333
+#define MV_BOARD_SYSCLK_250MHZ	250000000
+#define MV_BOARD_SYSCLK_267MHZ	266666667
+#define MV_BOARD_SYSCLK_300MHZ	300000000
+#define MV_BOARD_SYSCLK_333MHZ	333333334
+#define MV_BOARD_SYSCLK_400MHZ	400000000
+
+#define MV_BOARD_REFCLK_25MHZ	 25000000
+
+/* For better address window table readability */
+#define EN			MV_TRUE
+#define DIS			MV_FALSE
+#define N_A			-1	/* Not applicable */
+
+/* Cache configuration options for memory (DRAM, SRAM, ... ) */
+
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED             0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT    1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB    2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW       3
+
+/* Macro for testing aligment. Positive if number is NOT aligned   */
+#define MV_IS_NOT_ALIGN(number, align)      ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0340   */
+#define MV_ALIGN_UP(number, align)                                          \
+(((number) & ((align) - 1)) ? (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for alignment down. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0320 */
+#define MV_ALIGN_DOWN(number, align) ((number) & ~((align)-1))
+
+/* This macro returns absolute value                                        */
+#define MV_ABS(number)  (((int)(number) < 0) ? -(int)(number) : (int)(number))
+
+/* Bit fields manipulation macros                                           */
+
+/* An integer word which its 'x' bit is set                                 */
+#define MV_BIT_MASK(bitNum)         (1 << (bitNum))
+
+/* Checks wheter bit 'x' in integer word is set                             */
+#define MV_BIT_CHECK(word, bitNum)  ((word) & MV_BIT_MASK(bitNum))
+
+/* Clear (reset) bit 'x' in integer word (RMW - Read-Modify-Write)          */
+#define MV_BIT_CLEAR(word, bitNum)  ((word) &= ~(MV_BIT_MASK(bitNum)))
+
+/* Set bit 'x' in integer word (RMW)                                        */
+#define MV_BIT_SET(word, bitNum)    ((word) |= MV_BIT_MASK(bitNum))
+
+/* Invert bit 'x' in integer word (RMW)                                     */
+#define MV_BIT_INV(word, bitNum)    ((word) ^= MV_BIT_MASK(bitNum))
+
+/* Get the min between 'a' or 'b'                                           */
+#define MV_MIN(a, b)    (((a) < (b)) ? (a) : (b))
+
+/* Get the max between 'a' or 'b'                                           */
+#define MV_MAX(a, b)    (((a) < (b)) ? (b) : (a))
+
+#define mvOsDivide(num, div)	\
+({				\
+	int i = 0, rem = (num);	\
+	while (rem >= (div)) {	\
+		rem -= (div);	\
+		i++;		\
+	}			\
+	(i);			\
+})
+
+#define mvOsReminder(num, div)	\
+({				\
+	int rem = (num);	\
+	while (rem >= (div))	\
+		rem -= (div);	\
+	(rem);			\
+})
+
+#define MV_MACQUAD_FMT "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x"
+
+#define MV_MACQUAD(addr) \
+	(((unsigned char *)addr)[0], \
+	((unsigned char *)addr)[1], \
+	((unsigned char *)addr)[2], \
+	((unsigned char *)addr)[3], \
+	((unsigned char *)addr)[4], \
+	((unsigned char *)addr)[5])
+
+#define MV_IPQUAD_FMT         "%u.%u.%u.%u"
+#define MV_IPQUAD(ip)         (ip[0], ip[1], ip[2], ip[3])
+
+#define MV_IP_QUAD(ipAddr)    (((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF), \
+				((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF))
+
+#define MV_IP6_FMT		"%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define MV_IP6_ARG(L3)		(L3[0], L3[1], L3[2], L3[3],	\
+				L3[4], L3[5], L3[6], L3[7],	\
+				L3[8], L3[9], L3[10], L3[11],	\
+				L3[12], L3[13], L3[14], L3[15])
+
+#define MV_IS_POWER_OF_2(num) ((num != 0) && ((num & (num - 1)) == 0))
+
+#define MV_GET_BIT(word, bitNum) (((word) & (1 << (bitNum))) >> (bitNum))
+
+#define MV_SET_BIT(word, bitNum, bitVal) (((word) & ~(1 << (bitNum))) | (bitVal << bitNum))
+
+#define MV_ARRAY_SIZE(a)                    ((sizeof(a)) / (sizeof(a[0])))
+
+#ifndef MV_ASMLANGUAGE
+/* mvCommon API list */
+
+int mvCharToHex(char ch);
+int mvCharToDigit(char ch);
+
+MV_VOID mvHexToBin(const char *pHexStr, MV_U8 *pBin, int size);
+void mvAsciiToHex(const char *asciiStr, char *hexStr);
+void mvBinToHex(const MV_U8 *bin, char *hexStr, int size);
+void mvBinToAscii(const MV_U8 *bin, char *asciiStr, int size);
+MV_U8 mvReverseBits(MV_U8 num);
+MV_U32 mvCountMaskBits(MV_U8 mask);
+
+MV_STATUS mvMacStrToHex(const char *macStr, MV_U8 *macHex);
+MV_STATUS mvMacHexToStr(MV_U8 *macHex, char *macStr);
+void mvSizePrint(MV_U64);
+
+MV_U32 mvLog2(MV_U32 num);
+
+MV_STATUS mvWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+MV_STATUS mvWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+
+#endif /* MV_ASMLANGUAGE */
+
+#ifdef __cplusplus
+}
+#endif	/* __cplusplus */
+
+#endif /* __INCmvCommonh */
diff --git a/arch/arm/mach-mvebu/include/mach/mvDebug.h b/arch/arm/mach-mvebu/include/mach/mvDebug.h
new file mode 100644
index 000000000000..07e55648351e
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/mvDebug.h
@@ -0,0 +1,173 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDebugh
+#define __INCmvDebugh
+
+/* includes */
+#include "mvTypes.h"
+
+typedef enum {
+	MV_MODULE_INVALID = -1,
+	MV_MODULE_ETH = 0,
+	MV_MODULE_IDMA,
+	MV_MODULE_XOR,
+	MV_MODULE_TWASI,
+	MV_MODULE_MGI,
+	MV_MODULE_USB,
+	MV_MODULE_CESA,
+
+	MV_MODULE_MAX
+} MV_MODULE_ID;
+
+/* Define generic flags useful for most of modules */
+#define MV_DEBUG_FLAG_ALL   (0)
+#define MV_DEBUG_FLAG_INIT  (1 << 0)
+#define MV_DEBUG_FLAG_RX    (1 << 1)
+#define MV_DEBUG_FLAG_TX    (1 << 2)
+#define MV_DEBUG_FLAG_ERR   (1 << 3)
+#define MV_DEBUG_FLAG_TRACE (1 << 4)
+#define MV_DEBUG_FLAG_DUMP  (1 << 5)
+#define MV_DEBUG_FLAG_CACHE (1 << 6)
+#define MV_DEBUG_FLAG_IOCTL (1 << 7)
+#define MV_DEBUG_FLAG_STATS (1 << 8)
+
+extern MV_U32 mvDebug;
+extern MV_U32 mvDebugModules[MV_MODULE_MAX];
+
+#ifdef MV_DEBUG
+# define MV_DEBUG_PRINT(module, flags, msg)     mvOsPrintf(msg)
+# define MV_DEBUG_CODE(module, flags, code)     code
+#elif defined(MV_RT_DEBUG)
+# define MV_DEBUG_PRINT(module, flags, msg)			\
+do {								\
+	if ((mvDebug & (1<<(module))) &&			\
+	    ((mvDebugModules[(module)] & (flags)) == (flags)))	\
+		mvOsPrintf(msg)					\
+} while (0)
+# define MV_DEBUG_CODE(module, flags, code)			\
+do {								\
+	if ((mvDebug & (1<<(module))) &&			\
+	    ((mvDebugModules[(module)] & (flags)) == (flags)))	\
+		(code)						\
+} while (0)
+#else
+# define MV_DEBUG_PRINT(module, flags, msg)
+# define MV_DEBUG_CODE(module, flags, code)
+#endif
+
+/* typedefs */
+
+/*  time measurement structure used to check how much time pass between
+ *  two points
+ */
+typedef struct {
+	char name[20];		/* name of the entry */
+	unsigned long begin;	/* time measured on begin point */
+	unsigned long end;	/* time measured on end point */
+	unsigned long total;	/* Accumulated time */
+	unsigned long left;	/* The rest measurement actions */
+	unsigned long count;	/* Maximum measurement actions */
+	unsigned long min;	/* Minimum time from begin to end */
+	unsigned long max;	/* Maximum time from begin to end */
+} MV_DEBUG_TIMES;
+
+/* mvDebug.h API list */
+
+/****** Error Recording ******/
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void *addr, int size, int access);
+
+void mvDebugPrintBufInfo(BUF_INFO *pBufInfo, int size, int access);
+
+void mvDebugPrintPktInfo(MV_PKT_INFO *pPktInfo, int size, int access);
+
+void mvDebugPrintIpAddr(MV_U32 ipAddr);
+
+void mvDebugPrintMacAddr(const MV_U8 *pMacAddr);
+
+/**** There are three functions deals with MV_DEBUG_TIMES structure ****/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES *pTimeEntry, int count, char *name);
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES *pTimeEntry);
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES *pTimeEntry, MV_BOOL isTitle);
+
+/******** General ***********/
+
+/* Change value of mvDebugPrint global variable */
+
+void mvDebugInit(void);
+void mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable);
+void mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags);
+void mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags);
+
+#endif /* __INCmvDebug.h */
diff --git a/arch/arm/mach-mvebu/include/mach/mvTypes.h b/arch/arm/mach-mvebu/include/mach/mvTypes.h
new file mode 100644
index 000000000000..f0f70c9afe10
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/mvTypes.h
@@ -0,0 +1,270 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTypesh
+#define __INCmvTypesh
+
+/* Defines */
+
+/* The following is a list of Marvell status    */
+#define MV_ERROR		    (-1)
+#define MV_OK			    (0)	/* Operation succeeded                   */
+#define MV_FAIL			    (1)	/* Operation failed                      */
+#define MV_BAD_VALUE        (2)	/* Illegal value (general)               */
+#define MV_OUT_OF_RANGE     (3)	/* The value is out of range             */
+#define MV_BAD_PARAM        (4)	/* Illegal parameter in function called  */
+#define MV_BAD_PTR          (5)	/* Illegal pointer value                 */
+#define MV_BAD_SIZE         (6)	/* Illegal size                          */
+#define MV_BAD_STATE        (7)	/* Illegal state of state machine        */
+#define MV_SET_ERROR        (8)	/* Set operation failed                  */
+#define MV_GET_ERROR        (9)	/* Get operation failed                  */
+#define MV_CREATE_ERROR     (10)	/* Fail while creating an item           */
+#define MV_NOT_FOUND        (11)	/* Item not found                        */
+#define MV_NO_MORE          (12)	/* No more items found                   */
+#define MV_NO_SUCH          (13)	/* No such item                          */
+#define MV_TIMEOUT          (14)	/* Time Out                              */
+#define MV_NO_CHANGE        (15)	/* Parameter(s) is already in this value */
+#define MV_NOT_SUPPORTED    (16)	/* This request is not support           */
+#define MV_NOT_IMPLEMENTED  (17)	/* Request supported but not implemented */
+#define MV_NOT_INITIALIZED  (18)	/* The item is not initialized           */
+#define MV_NO_RESOURCE      (19)	/* Resource not available (memory ...)   */
+#define MV_FULL             (20)	/* Item is full (Queue or table etc...)  */
+#define MV_EMPTY            (21)	/* Item is empty (Queue or table etc...) */
+#define MV_INIT_ERROR       (22)	/* Error occured while INIT process      */
+#define MV_HW_ERROR         (23)	/* Hardware error                        */
+#define MV_TX_ERROR         (24)	/* Transmit operation not succeeded      */
+#define MV_RX_ERROR         (25)	/* Recieve operation not succeeded       */
+#define MV_NOT_READY	    (26)	/* The other side is not ready yet       */
+#define MV_ALREADY_EXIST    (27)	/* Tried to create existing item         */
+#define MV_OUT_OF_CPU_MEM   (28)	/* Cpu memory allocation failed.         */
+#define MV_NOT_STARTED      (29)	/* Not started yet                       */
+#define MV_BUSY             (30)	/* Item is busy.                         */
+#define MV_TERMINATE        (31)	/* Item terminates it's work.            */
+#define MV_NOT_ALIGNED      (32)	/* Wrong alignment                       */
+#define MV_NOT_ALLOWED      (33)	/* Operation NOT allowed                 */
+#define MV_WRITE_PROTECT    (34)	/* Write protected                       */
+#define MV_DROPPED          (35)	/* Packet dropped                        */
+#define MV_STOLEN           (36)	/* Packet stolen */
+#define MV_CONTINUE         (37)        /* Continue */
+#define MV_RETRY		    (38)	/* Operation failed need retry           */
+
+#define MV_INVALID  (int)(-1)
+
+#define MV_FALSE	0
+#define MV_TRUE     (!(MV_FALSE))
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef MV_ASMLANGUAGE
+/* typedefs */
+
+typedef char MV_8;
+typedef unsigned char MV_U8;
+
+typedef int MV_32;
+typedef unsigned int MV_U32;
+
+typedef short MV_16;
+typedef unsigned short MV_U16;
+
+#ifdef MV_PPC64
+typedef long MV_64;
+typedef unsigned long MV_U64;
+#else
+typedef long long MV_64;
+typedef unsigned long long MV_U64;
+#endif
+
+typedef long MV_LONG;		/* 32/64 */
+typedef unsigned long MV_ULONG;	/* 32/64 */
+
+typedef int MV_STATUS;
+typedef int MV_BOOL;
+typedef void MV_VOID;
+typedef float MV_FLOAT;
+
+typedef int (*MV_FUNCPTR) (void);	/* ptr to function returning int   */
+typedef void (*MV_VOIDFUNCPTR) (void);	/* ptr to function returning void  */
+typedef double (*MV_DBLFUNCPTR) (void);	/* ptr to function returning double */
+typedef float (*MV_FLTFUNCPTR) (void);	/* ptr to function returning float */
+
+typedef MV_U32 MV_KHZ;
+typedef MV_U32 MV_MHZ;
+typedef MV_U32 MV_HZ;
+
+/* This enumerator describes the set of commands that can be applied on		*/
+/* an engine (e.g. IDMA, XOR). Appling a comman depends on the current		*/
+/* status (see MV_STATE enumerator)						*/
+/* Start can be applied only when status is IDLE				*/
+/* Stop can be applied only when status is IDLE, ACTIVE or PAUSED		*/
+/* Pause can be applied only when status is ACTIVE				*/
+/* Restart can be applied only when status is PAUSED				*/
+typedef enum _mvCommand {
+	MV_START,		/* Star */
+	MV_STOP,		/* Stop */
+	MV_PAUSE,		/* Pause */
+	MV_RESTART		/* Restart */
+} MV_COMMAND;
+
+/* This enumerator describes the set of state conditions.					*/
+/* Moving from one state to other is stricted.							*/
+typedef enum _mvState {
+	MV_IDLE,
+	MV_ACTIVE,
+	MV_PAUSED,
+	MV_UNDEFINED_STATE
+} MV_STATE;
+
+typedef enum {
+	ETH_MAC_SPEED_10M,
+	ETH_MAC_SPEED_100M,
+	ETH_MAC_SPEED_1000M,
+	ETH_MAC_SPEED_AUTO
+
+} MV_ETH_MAC_SPEED;
+
+/* This structure describes address space window. Window base can be        */
+/* 64 bit, window size up to 4GB                                            */
+typedef struct _mvAddrWin {
+	MV_U32 baseLow;		/* 32bit base low       */
+	MV_U32 baseHigh;	/* 32bit base high      */
+	MV_U64 size;		/* 64bit size           */
+} MV_ADDR_WIN;
+
+/* This binary enumerator describes protection attribute status		*/
+typedef enum _mvProtRight {
+	ALLOWED,		/* Protection attribute allowed		*/
+	FORBIDDEN		/* Protection attribute forbidden	*/
+} MV_PROT_RIGHT;
+
+/* Unified struct for Rx and Tx packet operations. The user is required to	*/
+/* be familier only with Tx/Rx descriptor command status.			*/
+typedef struct _bufInfo {
+	MV_U32 cmdSts;		/* Tx/Rx command status			*/
+	MV_U16 byteCnt;		/* Size of valid data in the buffer	*/
+	MV_U16 bufSize;		/* Total size of the buffer		*/
+	MV_U8 *pBuff;		/* Pointer to Buffer			*/
+	MV_U8 *pData;		/* Pointer to data in the Buffer	*/
+	MV_U32 userInfo1;	/* Tx/Rx attached user information 1	*/
+	MV_U32 userInfo2;	/* Tx/Rx attached user information 2	*/
+	struct _bufInfo *pNextBufInfo;	/* Next buffer in packet	*/
+} BUF_INFO;
+
+/* This structure contains information describing one of buffers
+ * (fragments) they are built Ethernet packet.
+ */
+typedef struct {
+	MV_U8 *bufVirtPtr;
+	MV_ULONG bufPhysAddr;
+	MV_U32 bufSize;
+	MV_U32 dataSize;
+	MV_U32 memHandle;
+	MV_32 bufAddrShift;
+} MV_BUF_INFO;
+
+/* This structure contains information describing Ethernet packet.
+ * The packet can be divided for few buffers (fragments)
+ */
+typedef struct {
+	MV_ULONG osInfo;
+	MV_BUF_INFO *pFrags;
+	MV_U32 status;
+	MV_U16 pktSize;
+	MV_U16 numFrags;
+	MV_U32 ownerId;
+	MV_U32 fragIP;
+	MV_U32 txq;
+} MV_PKT_INFO;
+
+/* This structure describes SoC units address decode window	*/
+typedef struct {
+	MV_ADDR_WIN addrWin;	/* An address window */
+	MV_BOOL enable;		/* Address decode window is enabled/disabled    */
+	MV_U8 attrib;		/* chip select attributes */
+	MV_U8 targetId;		/* Target Id of this MV_TARGET */
+} MV_UNIT_WIN_INFO;
+
+/* This structure describes access rights for Access protection windows     */
+/* that can be found in IDMA, XOR, Ethernet and MPSC units.                 */
+/* Note that the permission enumerator coresponds to its register format.   */
+/* For example, Read only premission is presented as "1" in register field. */
+typedef enum _mvAccessRights {
+	NO_ACCESS_ALLOWED = 0,	/* No access allowed            */
+	READ_ONLY = 1,		/* Read only permission         */
+	ACC_RESERVED = 2,	/* Reserved access right                */
+	FULL_ACCESS = 3,	/* Read and Write permission    */
+	MAX_ACC_RIGHTS
+} MV_ACCESS_RIGHTS;
+
+typedef struct _mvDecRegs {
+	MV_U32 baseReg;
+	MV_U32 baseRegHigh;
+	MV_U32 ctrlReg;
+} MV_DEC_REGS;
+
+#endif /* MV_ASMLANGUAGE */
+
+#endif /* __INCmvTypesh */
diff --git a/arch/arm/mach-mvebu/linux_oss/mvOs.c b/arch/arm/mach-mvebu/linux_oss/mvOs.c
new file mode 100644
index 000000000000..a5c93e0339ef
--- /dev/null
+++ b/arch/arm/mach-mvebu/linux_oss/mvOs.c
@@ -0,0 +1,286 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvOsCpuArchLib.c - Marvell CPU architecture library
+*
+* DESCRIPTION:
+*       This library introduce Marvell API for OS dependent CPU architecture
+*       APIs. This library introduce single CPU architecture services APKI
+*       cross OS.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/* includes */
+#include "mvOs.h"
+
+static MV_U32 read_p15_c0(void);
+static MV_U32 read_p15_c1(void);
+
+/* defines  */
+#define ARM_ID_REVISION_OFFS	0
+#define ARM_ID_REVISION_MASK	(0xf << ARM_ID_REVISION_OFFS)
+
+#define ARM_ID_PART_NUM_OFFS	4
+#define ARM_ID_PART_NUM_MASK	(0xfff << ARM_ID_PART_NUM_OFFS)
+
+#define ARM_ID_ARCH_OFFS	16
+#define ARM_ID_ARCH_MASK	(0xf << ARM_ID_ARCH_OFFS)
+
+#define ARM_ID_VAR_OFFS		20
+#define ARM_ID_VAR_MASK		(0xf << ARM_ID_VAR_OFFS)
+
+#define ARM_ID_ASCII_OFFS	24
+#define ARM_ID_ASCII_MASK	(0xff << ARM_ID_ASCII_OFFS)
+
+#define ARM_FEATURE_THUMBEE_OFFS	12
+#define ARM_FEATURE_THUMBEE_MASK	(0xf << ARM_FEATURE_THUMBEE_OFFS)
+
+
+void *mvOsIoCachedMalloc(void *osHandle, MV_U32 size, MV_ULONG *pPhyAddr,
+			  MV_U32 *memHandle)
+{
+	void *p = kmalloc(size, GFP_ATOMIC);
+	dma_addr_t dma_addr;
+	dma_addr = dma_map_single(osHandle, p, 0, DMA_BIDIRECTIONAL);
+	*pPhyAddr = (MV_ULONG)(dma_addr & 0xFFFFFFFF);
+	return p;
+}
+void *mvOsIoUncachedMalloc(void *osHandle, MV_U32 size, MV_ULONG *pPhyAddr,
+			    MV_U32 *memHandle)
+{
+	dma_addr_t dma_addr;
+	void *ptr = dma_alloc_coherent(osHandle, size, &dma_addr, GFP_KERNEL);
+	*pPhyAddr = (MV_ULONG)(dma_addr & 0xFFFFFFFF);
+	return ptr;
+}
+
+void mvOsIoUncachedFree(void *osHandle, MV_U32 size, MV_ULONG phyAddr, void *pVirtAddr,
+			 MV_U32 memHandle)
+{
+	dma_free_coherent(osHandle, size, pVirtAddr, (dma_addr_t)phyAddr);
+}
+
+void mvOsIoCachedFree(void *osHandle, MV_U32 size, MV_ULONG phyAddr, void *pVirtAddr,
+		       MV_U32 memHandle)
+{
+	return kfree(pVirtAddr);
+}
+
+int mvOsRand(void)
+{
+	int rand;
+	get_random_bytes(&rand, sizeof(rand));
+	return rand;
+}
+
+/*******************************************************************************
+* mvOsCpuVerGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Revision
+*
+*******************************************************************************/
+MV_U32 mvOsCpuRevGet(MV_VOID)
+{
+	return (read_p15_c0() & ARM_ID_REVISION_MASK) >> ARM_ID_REVISION_OFFS;
+}
+/*******************************************************************************
+* mvOsCpuPartGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Part number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuPartGet(MV_VOID)
+{
+	return (read_p15_c0() & ARM_ID_PART_NUM_MASK) >> ARM_ID_PART_NUM_OFFS;
+}
+/*******************************************************************************
+* mvOsCpuArchGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Architicture number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuArchGet(MV_VOID)
+{
+	return (read_p15_c0() & ARM_ID_ARCH_MASK) >> ARM_ID_ARCH_OFFS;
+}
+/*******************************************************************************
+* mvOsCpuVarGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuVarGet(MV_VOID)
+{
+	return (read_p15_c0() & ARM_ID_VAR_MASK) >> ARM_ID_VAR_OFFS;
+}
+/*******************************************************************************
+* mvOsCpuAsciiGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuAsciiGet(MV_VOID)
+{
+	return (read_p15_c0() & ARM_ID_ASCII_MASK) >> ARM_ID_ASCII_OFFS;
+}
+
+/*******************************************************************************
+* mvOsCpuThumbEEGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuThumbEEGet(MV_VOID)
+{
+	return (read_p15_c1() & ARM_FEATURE_THUMBEE_MASK) >> ARM_FEATURE_THUMBEE_OFFS;
+}
+
+/*
+static unsigned long read_p15_c0 (void)
+*/
+/* read co-processor 15, register #0 (ID register) */
+static MV_U32 read_p15_c0 (void)
+{
+	MV_U32 value;
+
+	__asm__ __volatile__(
+		"mrc	p15, 0, %0, c0, c0, 0   @ read control reg\n"
+		: "=r" (value)
+		:
+		: "memory");
+
+	return value;
+}
+
+/* read co-processor 15, register #1 (Feature 0) */
+static MV_U32 read_p15_c1 (void)
+{
+	MV_U32 value;
+
+	__asm__ __volatile__(
+						 "mrc	p15, 0, %0, c0, c1, 0   @ read feature0 reg\n"
+	: "=r" (value)
+	:
+	: "memory");
+
+	return value;
+}
diff --git a/arch/arm/mach-mvebu/linux_oss/mvOs.h b/arch/arm/mach-mvebu/linux_oss/mvOs.h
new file mode 100644
index 000000000000..e9c09175c743
--- /dev/null
+++ b/arch/arm/mach-mvebu/linux_oss/mvOs.h
@@ -0,0 +1,482 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef _MV_OS_LNX_H_
+#define _MV_OS_LNX_H_
+
+#ifdef __KERNEL__
+/* for kernel space */
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/hardirq.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <linux/module.h>
+
+#include <linux/random.h>
+
+#define arch_is_coherent()      true
+
+extern void mv_early_printk(char *fmt, ...);
+
+#define MV_ASM              (__asm__ __volatile__)
+#define INLINE              inline
+#define _INIT               __init
+#define mvOsPrintf          printk
+#define mvOsEarlyPrintf	    mv_early_printk
+#define mvOsOutput          printk
+#define mvOsSPrintf         sprintf
+#define mvOsSNPrintf        snprintf
+#define mvOsMalloc(_size_)  kmalloc(_size_, GFP_ATOMIC)
+#define mvOsFree            kfree
+#define mvOsMemcpy          memcpy
+#define mvOsMemset          memset
+#define mvOsSleep(_mils_)   mdelay(_mils_)
+#define mvOsTaskLock()
+#define mvOsTaskUnlock()
+#define strtol              simple_strtoul
+#define mvOsDelay(x)        mdelay(x)
+#define mvOsUDelay(x)       udelay(x)
+#define mvCopyFromOs        copy_from_user
+#define mvCopyToOs          copy_to_user
+#define mvOsWarning()       WARN_ON(1)
+#define mvOsGetTicks()      jiffies
+#define mvOsGetTicksFreq()  HZ
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "../coherency.h"
+
+#ifdef MV_NDEBUG
+#define mvOsAssert(cond)
+#else
+#define mvOsAssert(cond) { do { if (!(cond)) { BUG(); } } while (0); }
+#endif /* MV_NDEBUG */
+
+#else /* __KERNEL__ */
+
+/* for user space applications */
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#define INLINE inline
+#define mvOsPrintf printf
+#define mvOsOutput printf
+#define mvOsMalloc(_size_) malloc(_size_)
+#define mvOsFree free
+#define mvOsAssert(cond) assert(cond)
+
+#endif /* __KERNEL__ */
+
+#define mvOsIoVirtToPhy(pDev, pVirtAddr)        virt_to_dma((pDev), (pVirtAddr))
+
+#define mvOsIoVirtToPhys(pDev, pVirtAddr)       virt_to_dma((pDev), (pVirtAddr))
+
+#define mvOsCacheFlush(pDev, p, size)                              \
+	(COHERENCY_FABRIC_HARD_MODE() ? virt_to_phys((p)) : dma_map_single((pDev), (p), (size), DMA_TO_DEVICE))
+
+#define mvOsCacheInvalidate(pDev, p, size)                          \
+	(COHERENCY_FABRIC_HARD_MODE() ? virt_to_phys((p)) : dma_map_single((pDev), (p), (size), DMA_FROM_DEVICE))
+
+#define mvOsCacheUnmap(pDev, phys, size)                          \
+	dma_unmap_single((pDev), (dma_addr_t)(phys), (size), DMA_FROM_DEVICE)
+
+#define CPU_PHY_MEM(x)              ((MV_U32)x)
+#define CPU_MEMIO_CACHED_ADDR(x)    ((void *)x)
+#define CPU_MEMIO_UNCACHED_ADDR(x)  ((void *)x)
+
+
+/* CPU architecture dependent 32, 16, 8 bit read/write IO addresses */
+#define MV_MEMIO32_WRITE(addr, data)    \
+	((*((volatile unsigned int *)(addr))) = ((unsigned int)(data)))
+
+#define MV_MEMIO32_READ(addr)           \
+	((*((volatile unsigned int *)(addr))))
+
+#define MV_MEMIO16_WRITE(addr, data)    \
+	((*((volatile unsigned short *)(addr))) = ((unsigned short)(data)))
+
+#define MV_MEMIO16_READ(addr)           \
+	((*((volatile unsigned short *)(addr))))
+
+#define MV_MEMIO8_WRITE(addr, data)     \
+	((*((volatile unsigned char *)(addr))) = ((unsigned char)(data)))
+
+#define MV_MEMIO8_READ(addr)            \
+	((*((volatile unsigned char *)(addr))))
+
+
+/* No Fast Swap implementation (in assembler) for ARM */
+#define MV_32BIT_LE_FAST(val)            MV_32BIT_LE(val)
+#define MV_16BIT_LE_FAST(val)            MV_16BIT_LE(val)
+#define MV_32BIT_BE_FAST(val)            MV_32BIT_BE(val)
+#define MV_16BIT_BE_FAST(val)            MV_16BIT_BE(val)
+
+/* 32 and 16 bit read/write in big/little endian mode */
+
+/* 16bit write in little endian mode */
+#define MV_MEMIO_LE16_WRITE(addr, data) \
+	MV_MEMIO16_WRITE(addr, MV_16BIT_LE_FAST(data))
+
+/* 16bit read in little endian mode */
+static inline MV_U16 MV_MEMIO_LE16_READ(MV_U32 addr)
+{
+	MV_U16 data;
+
+	data = (MV_U16)MV_MEMIO16_READ(addr);
+
+	return (MV_U16)MV_16BIT_LE_FAST(data);
+}
+
+/* 32bit write in little endian mode */
+#define MV_MEMIO_LE32_WRITE(addr, data) \
+	MV_MEMIO32_WRITE(addr, MV_32BIT_LE_FAST(data))
+
+/* 32bit read in little endian mode */
+static inline MV_U32 MV_MEMIO_LE32_READ(MV_U32 addr)
+{
+	MV_U32 data;
+
+	data = (MV_U32)MV_MEMIO32_READ(addr);
+
+	return (MV_U32)MV_32BIT_LE_FAST(data);
+}
+
+static inline void mvOsBCopy(char *srcAddr, char *dstAddr, int byteCount)
+{
+	while (byteCount != 0) {
+		*dstAddr = *srcAddr;
+		dstAddr++;
+		srcAddr++;
+		byteCount--;
+	}
+}
+
+static INLINE MV_U64 mvOsDivMod64(MV_U64 divided, MV_U64 divisor, MV_U64 *modulu)
+{
+	MV_U64  division = 0;
+
+	if (divisor == 1)
+		return divided;
+
+	while (divided >= divisor) {
+		division++;
+		divided -= divisor;
+	}
+	if (modulu != NULL)
+		*modulu = divided;
+
+	return division;
+}
+
+/* Flash APIs */
+#define MV_FL_8_READ            MV_MEMIO8_READ
+#define MV_FL_16_READ           MV_MEMIO_LE16_READ
+#define MV_FL_32_READ           MV_MEMIO_LE32_READ
+#define MV_FL_8_DATA_READ       MV_MEMIO8_READ
+#define MV_FL_16_DATA_READ      MV_MEMIO16_READ
+#define MV_FL_32_DATA_READ      MV_MEMIO32_READ
+#define MV_FL_8_WRITE           MV_MEMIO8_WRITE
+#define MV_FL_16_WRITE          MV_MEMIO_LE16_WRITE
+#define MV_FL_32_WRITE          MV_MEMIO_LE32_WRITE
+#define MV_FL_8_DATA_WRITE      MV_MEMIO8_WRITE
+#define MV_FL_16_DATA_WRITE     MV_MEMIO16_WRITE
+#define MV_FL_32_DATA_WRITE     MV_MEMIO32_WRITE
+
+
+/* CPU cache information */
+#define CPU_I_CACHE_LINE_SIZE   32    /* 2do: replace 32 with linux core macro */
+#define CPU_D_CACHE_LINE_SIZE   32    /* 2do: replace 32 with linux core macro */
+
+#if defined(CONFIG_SHEEVA_ERRATA_ARM_CPU_4413)
+#define	 DSBWA_4413(x)	dmb() 		/* replaced dsb() for optimization */
+#else
+#define  DSBWA_4413(x)
+#endif
+
+#if defined(CONFIG_SHEEVA_ERRATA_ARM_CPU_4611)
+#define	 DSBWA_4611(x)	dmb()		/* replaced dsb() for optimization */
+#else
+#define  DSBWA_4611(x)
+#endif
+
+#define MV_OS_CACHE_MULTI_THRESH	256
+
+static inline void mvOsCacheIoSync(void *handle)
+{
+	if (likely(COHERENCY_FABRIC_HARD_MODE()))
+		dma_sync_single_for_cpu(handle, (dma_addr_t) NULL,
+					CPU_D_CACHE_LINE_SIZE, DMA_FROM_DEVICE);
+}
+
+static inline void mvOsCacheLineFlush(void *handle, void *addr)
+{
+	if (unlikely(!COHERENCY_FABRIC_HARD_MODE()))
+		dma_sync_single_for_device(handle, virt_to_dma(handle, addr), CPU_D_CACHE_LINE_SIZE, DMA_TO_DEVICE);
+}
+
+static inline void mvOsCacheLineInv(void *handle, void *addr)
+{
+	if (unlikely(!COHERENCY_FABRIC_HARD_MODE()))
+		dma_sync_single_for_device(handle, virt_to_dma(handle, addr), CPU_D_CACHE_LINE_SIZE, DMA_FROM_DEVICE);
+}
+
+/* Flush multiple cache lines using mvOsCacheLineFlush to improve performance.              */
+/* addr is the pointer to start the flush operation from. It will be aligned to             */
+/* the beginning of the cache line automatically and the size will be adjusted accordingly. */
+static inline void mvOsCacheMultiLineFlush(void *handle, void *addr, int size)
+{
+	dma_map_single(handle, addr, size, DMA_TO_DEVICE);
+}
+
+/* Invalidate multiple cache lines using mvOsCacheLineInv to improve performance.           */
+/* addr is the pointer to start the invalidate operation from. It will be aligned to        */
+/* the beginning of the cache line automatically and the size will be adjusted accordingly. */
+/* IMPORTANT: this function assumes the invalidate operation on partial lines does not      */
+/* interfere with the data written there.                                                   */
+/* DO NOT USE this function unless you are certain of this!                                 */
+static inline void mvOsCacheMultiLineInv(void *handle, void *addr, int size)
+{
+	dma_map_single(handle, addr, size, DMA_FROM_DEVICE);
+}
+
+/* Flush and invalidate multiple cache lines using mvOsCacheLineFlushInv to improve performance. */
+/* addr is the pointer to start the flush and invalidate operation from. It will be aligned to   */
+/* the beginning of the cache line automatically and the size will be adjusted accordingly.      */
+static inline void mvOsCacheMultiLineFlushInv(void *handle, void *addr, int size)
+{
+	dma_map_single(handle, addr, size, DMA_BIDIRECTIONAL);
+}
+
+/* register manipulations  */
+
+/******************************************************************************
+* This debug function enable the write of each register that u-boot access to
+* to an array in the DRAM, the function record only MV_REG_WRITE access.
+* The function could not be operate when booting from flash.
+* In order to print the array we use the printreg command.
+******************************************************************************/
+/* #define REG_DEBUG */
+#if defined(REG_DEBUG)
+extern int reg_arry[2048][2];
+extern int reg_arry_index;
+#endif
+
+/* Marvell controller register read/write macros */
+#define MV_REG_VALUE(offset)          \
+	(MV_MEMIO32_READ((INTER_REGS_VIRT_BASE | (offset))))
+
+/* PPv2 specific reg read/write */
+#ifdef CONFIG_OF
+#define MV_PP2_CPU0_REG_READ(offset)             \
+	(MV_MEMIO_LE32_READ(offset))
+#define MV_PP2_CPU0_REG_WRITE(offset, val)    \
+	MV_MEMIO_LE32_WRITE((offset), (val))
+
+#define MV_PP2_CPU1_REG_READ(offset)             \
+	(MV_MEMIO_LE32_READ(offset))
+#define MV_PP2_CPU1_REG_WRITE(offset, val)    \
+	MV_MEMIO_LE32_WRITE((offset), (val))
+#else
+#define MV_PP2_CPU0_REG_READ(offset)             \
+	(MV_MEMIO_LE32_READ(PP2_CPU0_VIRT_BASE | (offset & 0xffff)))
+#define MV_PP2_CPU0_REG_WRITE(offset, val)    \
+	MV_MEMIO_LE32_WRITE((PP2_CPU0_VIRT_BASE | (offset & 0xffff)), (val))
+
+#define MV_PP2_CPU1_REG_READ(offset)             \
+	(MV_MEMIO_LE32_READ(PP2_CPU1_VIRT_BASE | (offset & 0xffff)))
+#define MV_PP2_CPU1_REG_WRITE(offset, val)    \
+	MV_MEMIO_LE32_WRITE((PP2_CPU1_VIRT_BASE | (offset & 0xffff)), (val))
+#endif
+
+#ifdef CONFIG_SMP
+#define MV_PP2_REG_READ(offset)	\
+	((smp_processor_id() == 0) ? MV_PP2_CPU0_REG_READ(offset) : MV_PP2_CPU1_REG_READ(offset))
+
+#define MV_PP2_REG_WRITE(offset, val)	\
+	((smp_processor_id() == 0) ? MV_PP2_CPU0_REG_WRITE(offset, val) : MV_PP2_CPU1_REG_WRITE(offset, val))
+#else
+#define MV_PP2_REG_READ(offset)	\
+	MV_PP2_CPU0_REG_READ(offset)
+
+#define MV_PP2_REG_WRITE(offset, val)	\
+	MV_PP2_CPU0_REG_WRITE(offset, val)
+#endif
+
+#define MV_REG_READ(offset)			MV_MEMIO_LE32_READ(offset)
+
+#if defined(REG_DEBUG)
+#define MV_REG_WRITE(offset, val)    \
+	MV_MEMIO_LE32_WRITE(((offset)), (val)); \
+	{ \
+		reg_arry[reg_arry_index][0] = (offset);\
+		reg_arry[reg_arry_index][1] = (val);\
+		reg_arry_index++;\
+	}
+#else
+#define MV_REG_WRITE(offset, val)	MV_MEMIO_LE32_WRITE((offset), (val))
+#endif
+
+#define MV_REG_BYTE_READ(offset)	MV_MEMIO8_READ((offset))
+
+#if defined(REG_DEBUG)
+#define MV_REG_BYTE_WRITE(offset, val)  \
+	MV_MEMIO8_WRITE((offset), (val)); \
+	{ \
+		reg_arry[reg_arry_index][0] = (offset);\
+		reg_arry[reg_arry_index][1] = (val);\
+		reg_arry_index++;\
+	}
+#else
+#define MV_REG_BYTE_WRITE(offset, val)  \
+	MV_MEMIO8_WRITE((offset), (val))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_SET(offset, bitMask)                 \
+	(MV_MEMIO32_WRITE((offset), \
+	(MV_MEMIO32_READ(offset) | \
+	MV_32BIT_LE_FAST(bitMask)))); \
+	{ \
+		reg_arry[reg_arry_index][0] = (offset);\
+		reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(offset));\
+		reg_arry_index++;\
+	}
+#else
+#define MV_REG_BIT_SET(offset, bitMask)                 \
+	(MV_MEMIO32_WRITE((offset), \
+	(MV_MEMIO32_READ(offset) | \
+	MV_32BIT_LE_FAST(bitMask))))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_RESET(offset, bitMask)                \
+	(MV_MEMIO32_WRITE((offset), \
+	(MV_MEMIO32_READ(offset) & \
+	MV_32BIT_LE_FAST(~bitMask)))); \
+	{ \
+		reg_arry[reg_arry_index][0] = (offset);\
+		reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(offset));\
+		reg_arry_index++;\
+	}
+#else
+#define MV_REG_BIT_RESET(offset, bitMask)                \
+	(MV_MEMIO32_WRITE((offset), \
+	(MV_MEMIO32_READ(offset) & \
+	MV_32BIT_LE_FAST(~bitMask))))
+#endif
+
+/* Assembly functions */
+
+/*
+** MV_ASM_READ_CPU_EXTRA_FEATURES
+** Read Marvell extra features register.
+*/
+#define MV_ASM_READ_EXTRA_FEATURES(x) __asm__ volatile("mrc  p15, 1, %0, c15, c1, 0" : "=r" (x));
+
+/*
+** MV_ASM_WAIT_FOR_INTERRUPT
+** Wait for interrupt.
+*/
+#define MV_ASM_WAIT_FOR_INTERRUPT      __asm__ volatile("mcr  p15, 0, r0, c7, c0, 4");
+
+
+/* ARM architecture APIs */
+MV_U32  mvOsCpuRevGet(MV_VOID);
+MV_U32  mvOsCpuPartGet(MV_VOID);
+MV_U32  mvOsCpuArchGet(MV_VOID);
+MV_U32  mvOsCpuVarGet(MV_VOID);
+MV_U32  mvOsCpuAsciiGet(MV_VOID);
+MV_U32  mvOsCpuThumbEEGet(MV_VOID);
+
+/*  Other APIs  */
+void *mvOsIoCachedMalloc(void *osHandle, MV_U32 size, MV_ULONG *pPhyAddr, MV_U32 *memHandle);
+void *mvOsIoUncachedMalloc(void *osHandle, MV_U32 size, MV_ULONG *pPhyAddr, MV_U32 *memHandle);
+void mvOsIoUncachedFree(void *osHandle, MV_U32 size, MV_ULONG phyAddr, void *pVirtAddr, MV_U32 memHandle);
+void mvOsIoCachedFree(void *osHandle, MV_U32 size, MV_ULONG phyAddr, void *pVirtAddr, MV_U32 memHandle);
+int  mvOsRand(void);
+
+#endif /* _MV_OS_LNX_H_ */
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.c b/arch/arm/mach-mvebu/mvebu-soc-id.c
new file mode 100644
index 000000000000..2cd9ef408ec8
--- /dev/null
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.c
@@ -0,0 +1,144 @@
+/*
+ * ID and revision information for mvebu SoCs
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * All the mvebu SoCs have information related to their variant and
+ * revision that can be read from the PCI control register. This is
+ * done before the PCI initialization to avoid any conflict. Once the
+ * ID and revision are retrieved, the mapping is freed.
+ */
+
+#define pr_fmt(fmt) "mvebu-soc-id: " fmt
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include "mvebu-soc-id.h"
+
+#define PCIE_DEV_ID_OFF		0x0
+#define PCIE_DEV_REV_OFF	0x8
+#define A38X_DEV_ID_OFF		0x38
+#define A38X_DEV_REV_OFF	0x3C
+
+#define SOC_ID_MASK	    0xFFFF0000
+#define SOC_REV_MASK	    0xFF
+#define A38X_REV_MASK	    0xF
+
+static u32 soc_dev_id;
+static u32 soc_rev;
+static bool is_id_valid;
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+	{ .compatible = "marvell,armada-xp-pcie", },
+	{ .compatible = "marvell,armada-370-pcie", },
+	{},
+};
+
+static const struct of_device_id mvebu_a38x_of_match_table[] = {
+	{ .compatible = "marvell,armada-380-system-controller", },
+	{},
+};
+
+
+int mvebu_get_soc_id(u32 *dev, u32 *rev)
+{
+	if (is_id_valid) {
+		*dev = soc_dev_id;
+		*rev = soc_rev;
+		return 0;
+	} else
+		return -1;
+}
+
+static int __init mvebu_soc_id_init(void)
+{
+	struct device_node *np;
+	int ret = 0;
+	void __iomem *reg_base;
+	struct clk *clk;
+	struct device_node *child;
+	bool is_pcie_id;
+
+	np = of_find_matching_node(NULL, mvebu_pcie_of_match_table);
+	if (!np) {/* If no pcie for soc-id, try A38x deicated register */
+		np = of_find_matching_node(NULL, mvebu_a38x_of_match_table);
+		if (!np)
+			return ret;
+		is_pcie_id = false;
+	} else {
+		is_pcie_id = true;
+
+		/*
+		 * ID and revision are available from any port, so we
+		 * just pick the first one
+		 */
+		child = of_get_next_child(np, NULL);
+		if (child == NULL) {
+			pr_err("cannot get pci node\n");
+			ret = -ENOMEM;
+			goto clk_err;
+		}
+
+		clk = of_clk_get_by_name(child, NULL);
+		if (IS_ERR(clk)) {
+			pr_err("cannot get clock\n");
+			ret = -ENOMEM;
+			goto clk_err;
+		}
+
+		ret = clk_prepare_enable(clk);
+		if (ret) {
+			pr_err("cannot enable clock\n");
+			goto clk_err;
+		}
+	}
+	if (is_pcie_id == true)
+		reg_base = of_iomap(child, 0);
+	else
+		reg_base = of_iomap(np, 0);
+	if (IS_ERR(reg_base)) {
+		pr_err("cannot map registers\n");
+		ret = -ENOMEM;
+		goto res_ioremap;
+	}
+
+	if (is_pcie_id == true) {
+		/* SoC ID */
+		soc_dev_id = readl(reg_base + PCIE_DEV_ID_OFF) >> 16;
+		/* SoC revision */
+		soc_rev = readl(reg_base + PCIE_DEV_REV_OFF) & SOC_REV_MASK;
+	} else {
+		/* SoC ID */
+		soc_dev_id = readl(reg_base + A38X_DEV_ID_OFF) >> 16;
+		/* SoC revision */
+		soc_rev = (readl(reg_base + A38X_DEV_REV_OFF) >> 8) & A38X_REV_MASK;
+	}
+
+	is_id_valid = true;
+
+	pr_info("MVEBU SoC ID=0x%X, Rev=0x%X\n", soc_dev_id, soc_rev);
+
+	iounmap(reg_base);
+
+res_ioremap:
+	if (is_pcie_id == true)
+		clk_disable_unprepare(clk);
+
+clk_err:
+	if (is_pcie_id == true)
+		of_node_put(child);
+	of_node_put(np);
+
+	return ret;
+}
+core_initcall(mvebu_soc_id_init);
diff --git a/arch/arm/mach-mvebu/mvebu-soc-id.h b/arch/arm/mach-mvebu/mvebu-soc-id.h
new file mode 100644
index 000000000000..8bc0effb838c
--- /dev/null
+++ b/arch/arm/mach-mvebu/mvebu-soc-id.h
@@ -0,0 +1,61 @@
+/*
+ * Marvell EBU SoC ID and revision definitions.
+ *
+ * Copyright (C) 2014 Marvell Semiconductor
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_MVEBU_SOC_ID_H
+#define __LINUX_MVEBU_SOC_ID_H
+
+/* Armada 370 ID */
+#define MV6710_DEV_ID		0x6710
+#define MV6707_DEV_ID		0x6707
+
+/* Armada 370 Revision */
+#define MV6710_A1_REV		0x1
+
+/* Armada XP ID */
+#define MV78230_DEV_ID		0x7823
+#define MV78260_DEV_ID		0x7826
+#define MV78460_DEV_ID		0x7846
+
+/* Armada XP Revision */
+#define MV78XX0_A0_REV		0x1
+#define MV78XX0_B0_REV		0x2
+
+/* Armada A375 ID */
+#define MV88F6720_DEV_ID	0x6720
+
+/* Armada A375 Revision */
+#define MV88F6720_A0_REV	0x1
+
+/* Armada A38x ID */
+#define MV88F6810_DEV_ID	0x6810
+#define MV88F6811_DEV_ID	0x6811
+#define MV88F6820_DEV_ID	0x6820
+#define MV88F6828_DEV_ID	0x6828
+
+/* Armada A38x Revision */
+#define MV88F68xx_Z1_REV	0x0
+#define MV88F68xx_A0_REV	0x4
+
+/* Armada KW2 ID */
+#define MV88F6510_DEV_ID	0x6510
+#define MV88F6530_DEV_ID	0x6530
+#define MV88F6560_DEV_ID	0x6560
+#define MV88F6601_DEV_ID	0x6601
+
+#ifdef CONFIG_ARCH_MVEBU
+int mvebu_get_soc_id(u32 *dev, u32 *rev);
+#else
+static inline int mvebu_get_soc_id(u32 *dev, u32 *rev)
+{
+	return -1;
+}
+#endif
+
+#endif /* __LINUX_MVEBU_SOC_ID_H */
diff --git a/arch/arm/mach-mvebu/platsmp-375.c b/arch/arm/mach-mvebu/platsmp-375.c
new file mode 100644
index 000000000000..0bd2d41725d7
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp-375.c
@@ -0,0 +1,84 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Armada 375
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <asm/smp_scu.h>
+#include "armada-375.h"
+#include "common.h"
+
+#include "pmsu.h"
+
+extern void a375_secondary_startup(void);
+
+static int __cpuinit armada_375_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	/*
+	 * Write the address of secondary startup into the system-wide
+	 * flags register. The boot monitor waits until it receives a
+	 * soft interrupt, and then the secondary CPU branches to this
+	 * address.
+	 */
+	armada_375_set_bootaddr(a375_secondary_startup);
+	mvebu_boot_cpu(cpu);
+	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+	return 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init armada_375_smp_init_cpus(void)
+{
+	struct device_node *np;
+	unsigned int i, ncores;
+
+	np = of_find_node_by_name(NULL, "cpus");
+	if (!np)
+		panic("No 'cpus' node found\n");
+
+	ncores = of_get_child_count(np);
+	if (ncores == 0 || ncores > ARMADA_375_MAX_CPUS)
+		panic("Invalid number of CPUs in DT\n");
+
+	/* Limit possible CPUs to defconfig */
+	if (ncores > nr_cpu_ids) {
+		pr_warn("SMP: %d CPUs physically present. Only %d configured.",
+			ncores, nr_cpu_ids);
+		pr_warn("Clipping CPU count to %d\n", nr_cpu_ids);
+		ncores = nr_cpu_ids;
+	}
+
+	for (i = 0; i < ncores; ++i)
+		set_cpu_possible(i, true);
+}
+
+static void __init armada_375_smp_prepare_cpus(unsigned int max_cpus)
+{
+	int i;
+
+	for (i = 0; i < max_cpus; i++)
+		set_cpu_present(i, true);
+}
+
+struct smp_operations armada_375_smp_ops __initdata = {
+	.smp_init_cpus		= armada_375_smp_init_cpus,
+	.smp_prepare_cpus	= armada_375_smp_prepare_cpus,
+	.smp_boot_secondary	= armada_375_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_die		= armada_xp_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-mvebu/platsmp-380.c b/arch/arm/mach-mvebu/platsmp-380.c
new file mode 100644
index 000000000000..a4f5271db590
--- /dev/null
+++ b/arch/arm/mach-mvebu/platsmp-380.c
@@ -0,0 +1,122 @@
+/*
+ * Symmetric Multi Processing (SMP) support for Armada 380/385
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/irq.h>
+#include <asm/smp_scu.h>
+#include "armada-380.h"
+#include "common.h"
+
+#include "pmsu.h"
+
+extern void a380_secondary_startup(void);
+static struct notifier_block armada_380_secondary_cpu_notifier;
+
+static int __cpuinit armada_380_boot_secondary(unsigned int cpu, struct task_struct *idle)
+{
+	mvebu_pmsu_set_cpu_boot_addr(cpu, a380_secondary_startup);
+	mvebu_boot_cpu(cpu);
+	arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+	return 0;
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init armada_380_smp_init_cpus(void)
+{
+	struct device_node *np;
+	unsigned int i, ncores;
+
+	np = of_find_node_by_name(NULL, "cpus");
+	if (!np)
+		panic("No 'cpus' node found\n");
+
+	ncores = of_get_child_count(np);
+	if (ncores == 0 || ncores > ARMADA_380_MAX_CPUS)
+		panic("Invalid number of CPUs in DT\n");
+
+	/* Limit possible CPUs to defconfig */
+	if (ncores > nr_cpu_ids) {
+		pr_warn("SMP: %d CPUs physically present. Only %d configured.",
+			ncores, nr_cpu_ids);
+		pr_warn("Clipping CPU count to %d\n", nr_cpu_ids);
+		ncores = nr_cpu_ids;
+	}
+
+	for (i = 0; i < ncores; ++i)
+		set_cpu_possible(i, true);
+}
+
+static void __init armada_380_smp_prepare_cpus(unsigned int max_cpus)
+{
+	int i;
+
+	for (i = 0; i < max_cpus; i++)
+		set_cpu_present(i, true);
+
+	/*
+	 * Register notifier to unmask SOC Private Peripheral Interrupt on
+	 * second core. It have to be done here because the interrupt
+	 * cannot be enabled from another CPU.
+	 */
+	register_cpu_notifier(&armada_380_secondary_cpu_notifier);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void armada_38x_cpu_die(unsigned int cpu)
+{
+	/*
+	 * CPU hotplug is implemented by putting offline CPUs into the
+	 * deep idle sleep state.
+	 */
+	armada_38x_do_cpu_suspend(true);
+}
+#endif
+
+struct smp_operations armada_380_smp_ops __initdata = {
+	.smp_init_cpus		= armada_380_smp_init_cpus,
+	.smp_prepare_cpus	= armada_380_smp_prepare_cpus,
+	.smp_boot_secondary	= armada_380_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_die		= armada_38x_cpu_die,
+#endif
+};
+
+/*
+ * CPU Notifier for enabling the SOC Private Peripheral Interrupts on CPU1.
+ */
+static int __cpuinit armada_380_secondary_init(struct notifier_block *nfb,
+					       unsigned long action, void *hcpu)
+{
+	struct irq_data *irqd;
+
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
+		irqd = irq_get_irq_data(IRQ_PRIV_MPIC_PPI_IRQ);
+		if (irqd && irqd->chip && irqd->chip->irq_unmask)
+			irqd->chip->irq_unmask(irqd);
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata armada_380_secondary_cpu_notifier = {
+	.notifier_call = armada_380_secondary_init,
+	.priority = INT_MIN,
+};
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 875ea748391c..8d10c9a7fae2 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -21,6 +21,7 @@
 #include <linux/smp.h>
 #include <linux/clk.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/mbus.h>
 #include <asm/cacheflush.h>
 #include <asm/smp_plat.h>
@@ -29,6 +30,9 @@
 #include "pmsu.h"
 #include "coherency.h"
 
+#define AXP_BOOTROM_BASE 0xfff00000
+#define AXP_BOOTROM_SIZE 0x100000
+
 void __init set_secondary_cpus_clock(void)
 {
 	int thiscpu;
@@ -81,15 +85,24 @@ static int __cpuinit armada_xp_boot_secondary(unsigned int cpu,
 {
 	pr_info("Booting CPU %d\n", cpu);
 
-	armada_xp_boot_cpu(cpu, armada_xp_secondary_startup);
+	mvebu_pmsu_set_cpu_boot_addr(cpu, armada_xp_secondary_startup);
+	mvebu_boot_cpu(cpu);
 
 	return 0;
 }
 
 static void __init armada_xp_smp_init_cpus(void)
 {
+	struct device_node *np;
 	unsigned int i, ncores;
-	ncores = coherency_get_cpu_count();
+
+	np = of_find_node_by_name(NULL, "cpus");
+	if (!np)
+		panic("No 'cpus' node found\n");
+
+	ncores = of_get_child_count(np);
+	if (ncores == 0 || ncores > ARMADA_XP_MAX_CPUS)
+		panic("Invalid number of CPUs in DT\n");
 
 	/* Limit possible CPUs to defconfig */
 	if (ncores > nr_cpu_ids) {
@@ -107,10 +120,29 @@ static void __init armada_xp_smp_init_cpus(void)
 
 void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
 {
+	struct device_node *node;
+	struct resource res;
+	int err;
+
 	set_secondary_cpus_clock();
 	flush_cache_all();
-	set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
-	mvebu_mbus_add_window("bootrom", 0xfff00000, SZ_1M);
+	set_cpu_coherent();
+
+	/*
+	 * In order to boot the secondary CPUs we need to ensure
+	 * the bootROM is mapped at the correct address.
+	 */
+	node = of_find_compatible_node(NULL, NULL, "marvell,bootrom");
+	if (!node)
+		panic("Cannot find 'marvell,bootrom' compatible node");
+
+	err = of_address_to_resource(node, 0, &res);
+	if (err < 0)
+		panic("Cannot get 'bootrom' node address");
+
+	if (res.start != AXP_BOOTROM_BASE ||
+	    resource_size(&res) != AXP_BOOTROM_SIZE)
+		panic("The address for the BootROM is incorrect");
 }
 
 struct smp_operations armada_xp_smp_ops __initdata = {
diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c
new file mode 100644
index 000000000000..043f1b8ab2cf
--- /dev/null
+++ b/arch/arm/mach-mvebu/pm-board.c
@@ -0,0 +1,161 @@
+/*
+ * Board-level suspend/resume support.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include "common.h"
+
+#define ARMADA_XP_GP_PIC_NR_GPIOS 3
+
+static void __iomem *gpio_ctrl[ARMADA_XP_GP_PIC_NR_GPIOS];
+static void __iomem *gpio_ctrl_addr[ARMADA_XP_GP_PIC_NR_GPIOS];
+static int pic_gpios[ARMADA_XP_GP_PIC_NR_GPIOS];
+static int pic_raw_gpios[ARMADA_XP_GP_PIC_NR_GPIOS];
+static int pic_gpios_num;
+
+static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd)
+{
+	u32 reg, ackcmd;
+	int i;
+
+	/* Put 001 as value on the GPIOs */
+	for (i = 0; i < pic_gpios_num; i++) {
+		reg = readl(gpio_ctrl[i]);
+		reg &= ~BIT(pic_raw_gpios[i]);
+		if (i == 0)
+			reg |= BIT(pic_raw_gpios[0]);
+		writel(reg, gpio_ctrl[i]);
+	}
+
+	/* Prepare writing 111 to the GPIOs */
+	/* This code assumes that the ack bits (#1 and possibly #2) belong to the same GPIO group */
+	ackcmd = readl(gpio_ctrl[pic_gpios_num-1]);
+	for (i = 0; i < pic_gpios_num; i++) {
+		if (gpio_ctrl[i] == gpio_ctrl[pic_gpios_num-1])
+			ackcmd |= BIT(pic_raw_gpios[i]);
+	}
+
+	/*
+	 * Wait a while, the PIC needs quite a bit of time between the
+	 * two GPIO commands.
+	 */
+	mdelay(3000);
+
+	asm volatile (
+		/* Align to a cache line */
+		".balign 32\n\t"
+
+		/* Enter self refresh */
+		"str %[srcmd], [%[sdram_reg]]\n\t"
+
+		/*
+		 * Wait 100 cycles for DDR to enter self refresh, by
+		 * doing 50 times two instructions.
+		 */
+		"mov r1, #50\n\t"
+		"1: subs r1, r1, #1\n\t"
+		"bne 1b\n\t"
+
+		/* Issue the command ACK */
+		"str %[ackcmd], [%[gpio_ctrl]]\n\t"
+
+		/* Trap the processor */
+		"b .\n\t"
+		: : [srcmd] "r" (srcmd), [sdram_reg] "r" (sdram_reg),
+		  [ackcmd] "r" (ackcmd), [gpio_ctrl] "r" (gpio_ctrl[pic_gpios_num-1]) : "r1");
+}
+
+static int mvebu_armada_xp_gp_pm_init(void)
+{
+	struct device_node *np;
+	struct device_node *gpio_ctrl_np;
+	int ret = 0, i;
+
+	if (!of_machine_is_compatible("marvell,axp-gp") &&
+		!of_machine_is_compatible("marvell,a388-db-gp") &&
+		!of_machine_is_compatible("marvell,a385-db-ap"))
+		return -ENODEV;
+
+	np = of_find_node_by_name(NULL, "pm_pic");
+	if (!np)
+		return -ENODEV;
+
+	pic_gpios_num = of_gpio_named_count(np, "ctrl-gpios");
+	if (pic_gpios_num < 1)
+		return -ENODEV;
+
+	for (i = 0; i < pic_gpios_num; i++) {
+		char *name;
+		struct of_phandle_args args;
+
+		pic_gpios[i] = of_get_named_gpio(np, "ctrl-gpios", i);
+		if (pic_gpios[i] < 0) {
+			ret = -ENODEV;
+			goto out;
+		}
+
+		name = kasprintf(GFP_KERNEL, "pic-pin%d", i);
+		if (!name) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		ret = gpio_request(pic_gpios[i], name);
+		if (ret < 0) {
+			kfree(name);
+			goto out;
+		}
+
+		ret = gpio_direction_output(pic_gpios[i], 0);
+		if (ret < 0) {
+			gpio_free(pic_gpios[i]);
+			kfree(name);
+			goto out;
+		}
+
+		ret = of_parse_phandle_with_fixed_args(np, "ctrl-gpios", 2,
+						       i, &args);
+		if (ret < 0) {
+			gpio_free(pic_gpios[i]);
+			kfree(name);
+			goto out;
+		}
+
+		gpio_ctrl_np = args.np;
+		pic_raw_gpios[i] = args.args[0];
+		gpio_ctrl_addr[i] = of_get_address(gpio_ctrl_np, 0, NULL, NULL);
+
+		if ((i == 0) || (i > 0 && gpio_ctrl_addr[i] != gpio_ctrl_addr[i-1]))
+			gpio_ctrl[i] = of_iomap(gpio_ctrl_np, 0);
+		else
+			gpio_ctrl[i] = gpio_ctrl[i-1];
+
+		if (!gpio_ctrl[i]) {
+			ret = -ENOMEM;
+			goto out;
+		}
+	}
+
+	mvebu_pm_init(mvebu_armada_xp_gp_pm_enter);
+
+out:
+	of_node_put(np);
+	return ret;
+}
+
+late_initcall(mvebu_armada_xp_gp_pm_init);
diff --git a/arch/arm/mach-mvebu/pm.c b/arch/arm/mach-mvebu/pm.c
new file mode 100644
index 000000000000..60d66984dbb5
--- /dev/null
+++ b/arch/arm/mach-mvebu/pm.c
@@ -0,0 +1,231 @@
+/*
+ * Suspend/resume support. Currently supporting Armada XP only.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/of_address.h>
+#include <linux/suspend.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
+#include <asm/suspend.h>
+
+#include "coherency.h"
+#include "pmsu.h"
+
+#define SDRAM_CONFIG_OFFS                  0x0
+#define  SDRAM_CONFIG_SR_MODE_BIT          BIT(24)
+#define SDRAM_OPERATION_OFFS               0x18
+#define  SDRAM_OPERATION_SELF_REFRESH      0x7
+#define SDRAM_DLB_EVICTION_OFFS            0x30c
+#define  SDRAM_DLB_EVICTION_THRESHOLD_MASK 0xff
+
+extern void armada_38x_cpu_mem_resume(void);
+
+static void (*mvebu_board_pm_enter)(void __iomem *sdram_reg, u32 srcmd);
+static void __iomem *sdram_ctrl;
+
+static int mvebu_pm_powerdown(unsigned long data)
+{
+	u32 reg, srcmd;
+
+	flush_cache_all();
+	outer_flush_all();
+
+	/*
+	 * Issue a Data Synchronization Barrier instruction to ensure
+	 * that all state saving has been completed.
+	 */
+	dsb();
+
+	/* Flush the DLB and wait ~7 usec */
+	reg = readl(sdram_ctrl + SDRAM_DLB_EVICTION_OFFS);
+	reg &= ~SDRAM_DLB_EVICTION_THRESHOLD_MASK;
+	writel(reg, sdram_ctrl + SDRAM_DLB_EVICTION_OFFS);
+
+	udelay(7);
+
+	/* Set DRAM in battery backup mode */
+	reg = readl(sdram_ctrl + SDRAM_CONFIG_OFFS);
+	reg &= ~SDRAM_CONFIG_SR_MODE_BIT;
+	writel(reg, sdram_ctrl + SDRAM_CONFIG_OFFS);
+
+	/* Prepare to go to self-refresh */
+
+	srcmd = readl(sdram_ctrl + SDRAM_OPERATION_OFFS);
+	srcmd &= ~0x1F;
+	srcmd |= SDRAM_OPERATION_SELF_REFRESH;
+
+	mvebu_board_pm_enter(sdram_ctrl + SDRAM_OPERATION_OFFS, srcmd);
+
+	return 0;
+}
+
+#define BOOT_INFO_ADDR      0x3000
+#define BOOT_MAGIC_WORD	    0xdeadb002
+#define BOOT_MAGIC_LIST_END 0xffffffff
+
+/*
+ * Those registers are accessed before switching the internal register
+ * base, which is why we hardcode the 0xd0000000 base address, the one
+ * used by the SoC out of reset.
+ */
+#define MBUS_WINDOW_12_CTRL       0xd00200b0
+#define MBUS_INTERNAL_REG_ADDRESS 0xd0020080
+
+#define SDRAM_WIN_BASE_REG(x)	(0x20180 + (0x8*x))
+#define SDRAM_WIN_CTRL_REG(x)	(0x20184 + (0x8*x))
+
+static phys_addr_t mvebu_internal_reg_base(void)
+{
+	struct device_node *np;
+	__be32 in_addr[2];
+
+	np = of_find_node_by_name(NULL, "internal-regs");
+	BUG_ON(!np);
+
+	/*
+	 * Ask the DT what is the internal register address on this
+	 * platform. In the mvebu-mbus DT binding, 0xf0010000
+	 * corresponds to the internal register window.
+	 */
+	in_addr[0] = cpu_to_be32(0xf0010000);
+	in_addr[1] = 0x0;
+
+	return of_translate_address(np, in_addr);
+}
+
+static void mvebu_pm_store_bootinfo(void)
+{
+	u32 *store_addr;
+	phys_addr_t resume_pc;
+
+	store_addr = phys_to_virt(BOOT_INFO_ADDR);
+	/* TBD - Fix support for Armada XP */
+	/* resume_pc = virt_to_phys(armada_370_xp_cpu_resume); */
+	resume_pc = virt_to_phys(armada_38x_cpu_mem_resume);
+
+	/*
+	 * The bootloader expects the first two words to be a magic
+	 * value (BOOT_MAGIC_WORD), followed by the address of the
+	 * resume code to jump to. Then, it expects a sequence of
+	 * (address, value) pairs, which can be used to restore the
+	 * value of certain registers. This sequence must end with the
+	 * BOOT_MAGIC_LIST_END magic value.
+	 */
+
+	writel(BOOT_MAGIC_WORD, store_addr++);
+	writel(resume_pc, store_addr++);
+
+	/*
+	 * Some platforms remap their internal register base address
+	 * to 0xf1000000. However, out of reset, window 12 starts at
+	 * 0xf0000000 and ends at 0xf7ffffff, which would overlap with
+	 * the internal registers. Therefore, disable window 12.
+	 */
+	writel(MBUS_WINDOW_12_CTRL, store_addr++);
+	writel(0x0, store_addr++);
+
+	/*
+	 * Set the internal register base address to the value
+	 * expected by Linux, as read from the Device Tree.
+	 */
+	/* TBD - Fix support for Armada XP */
+	/* Skip this part for now for Armada 38x - will be done in Linux resume function */
+#if 0
+	writel(MBUS_INTERNAL_REG_ADDRESS, store_addr++); */
+	writel(mvebu_internal_reg_base(), store_addr++);
+#endif
+
+	/*
+	 * Ask the mvebu-mbus driver to store the SDRAM window
+	 * configuration, which has to be restored by the bootloader
+	 * before re-entering the kernel on resume.
+	 */
+	store_addr += mvebu_mbus_save_cpu_target(store_addr);
+
+	writel(BOOT_MAGIC_LIST_END, store_addr);
+}
+
+static int mvebu_pm_enter(suspend_state_t state)
+{
+	if (state != PM_SUSPEND_MEM)
+		return -EINVAL;
+
+	cpu_pm_enter();
+
+	mvebu_pm_store_bootinfo();
+
+	outer_flush_all();
+	outer_disable();
+
+	cpu_suspend(0, mvebu_pm_powerdown);
+
+	outer_resume();
+
+	mvebu_v7_pmsu_idle_exit();
+
+	set_cpu_coherent();
+
+	cpu_pm_exit();
+
+	return 0;
+}
+
+static const struct platform_suspend_ops mvebu_pm_ops = {
+	.enter = mvebu_pm_enter,
+	.valid = suspend_valid_only_mem,
+};
+
+int mvebu_pm_init(void (*board_pm_enter)(void __iomem *sdram_reg, u32 srcmd))
+{
+	struct device_node *np;
+	struct resource res;
+
+	if (!of_machine_is_compatible("marvell,armadaxp") &&
+		!of_machine_is_compatible("marvell,armada38x"))
+		return -ENODEV;
+
+	np = of_find_compatible_node(NULL, NULL,
+				     "marvell,armada-xp-sdram-controller");
+	if (!np)
+		return -ENODEV;
+
+	if (of_address_to_resource(np, 0, &res)) {
+		of_node_put(np);
+		return -ENODEV;
+	}
+
+	if (!request_mem_region(res.start, resource_size(&res),
+				np->full_name)) {
+		of_node_put(np);
+		return -EBUSY;
+	}
+
+	sdram_ctrl = ioremap(res.start, resource_size(&res));
+	if (!sdram_ctrl) {
+		release_mem_region(res.start, resource_size(&res));
+		of_node_put(np);
+		return -ENOMEM;
+	}
+
+	of_node_put(np);
+
+	mvebu_board_pm_enter = board_pm_enter;
+
+	suspend_set_ops(&mvebu_pm_ops);
+
+	return 0;
+}
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 3cc4bef6401c..eebee3759607 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -16,60 +16,527 @@
  * other SOC units
  */
 
-#include <linux/kernel.h>
+#define pr_fmt(fmt) "mvebu-pmsu: " fmt
+
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
 #include <linux/init.h>
-#include <linux/of_address.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/mvebu-v7-cpuidle.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
 #include <linux/smp.h>
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/smp_scu.h>
 #include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+#include "common.h"
+
+
+#define PMSU_BASE_OFFSET    0x100
+#define PMSU_REG_SIZE	    0x1000
+
+/* PMSU MP registers */
+#define PMSU_CONTROL_AND_CONFIG(cpu)	    ((cpu * 0x100) + 0x104)
+#define PMSU_CONTROL_AND_CONFIG_DFS_REQ		BIT(18)
+#define PMSU_CONTROL_AND_CONFIG_PWDDN_REQ	BIT(16)
+#define PMSU_CONTROL_AND_CONFIG_L2_PWDDN	BIT(20)
+
+#define PMSU_CPU_POWER_DOWN_CONTROL(cpu)    ((cpu * 0x100) + 0x108)
+
+#define PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP	BIT(0)
+
+#define PMSU_STATUS_AND_MASK(cpu)	    ((cpu * 0x100) + 0x10c)
+#define PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT	BIT(16)
+#define PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT	BIT(17)
+#define PMSU_STATUS_AND_MASK_IRQ_WAKEUP		BIT(20)
+#define PMSU_STATUS_AND_MASK_FIQ_WAKEUP		BIT(21)
+#define PMSU_STATUS_AND_MASK_DBG_WAKEUP		BIT(22)
+#define PMSU_STATUS_AND_MASK_IRQ_MASK		BIT(24)
+#define PMSU_STATUS_AND_MASK_FIQ_MASK		BIT(25)
+
+#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu) ((cpu * 0x100) + 0x124)
+
+/* PMSU fabric registers */
+#define L2C_NFABRIC_PM_CTL		    0x4
+#define L2C_NFABRIC_PM_CTL_PWR_DOWN		BIT(20)
+
+/* PMSU delay registers */
+#define PMSU_POWERDOWN_DELAY		    0xF04
+#define PMSU_POWERDOWN_DELAY_PMU		BIT(1)
+#define PMSU_POWERDOWN_DELAY_MASK		0xFFFE
+#define PMSU_DFLT_ARMADA38X_DELAY	        0x64
+
+/* CA9 MPcore SoC Control registers */
+
+#define MPCORE_RESET_CTL		    0x64
+#define MPCORE_RESET_CTL_L2			BIT(0)
+#define MPCORE_RESET_CTL_DEBUG			BIT(16)
+
+#define SRAM_PHYS_BASE  0xFFFF0000
+#define BOOTROM_BASE    0xFFF00000
+#define BOOTROM_SIZE    0x100000
+
+#define ARMADA_370_CRYPT0_ENG_TARGET   0x9
+#define ARMADA_370_CRYPT0_ENG_ATTR     0x1
+
+extern void ll_disable_coherency(void);
+extern void ll_enable_coherency(void);
 
+extern void armada_370_xp_cpu_resume(void);
+extern void armada_38x_cpu_resume(void);
+
+void __iomem *scu_base;
+
+static phys_addr_t pmsu_mp_phys_base;
 static void __iomem *pmsu_mp_base;
-static void __iomem *pmsu_reset_base;
 
-#define PMSU_BOOT_ADDR_REDIRECT_OFFSET(cpu)	((cpu * 0x100) + 0x24)
-#define PMSU_RESET_CTL_OFFSET(cpu)		(cpu * 0x8)
+static void *mvebu_cpu_resume;
+static void __iomem *sram_wa_virt_base[2];
 
 static struct of_device_id of_pmsu_table[] = {
-	{.compatible = "marvell,armada-370-xp-pmsu"},
+	{ .compatible = "marvell,armada-370-pmsu", },
+	{ .compatible = "marvell,armada-370-xp-pmsu", },
+	{ .compatible = "marvell,armada-380-pmsu", },
 	{ /* end of list */ },
 };
 
-#ifdef CONFIG_SMP
-int armada_xp_boot_cpu(unsigned int cpu_id, void *boot_addr)
+void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
+{
+	writel(virt_to_phys(boot_addr), pmsu_mp_base +
+		PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
+}
+
+extern unsigned char mvebu_boot_wa_start;
+extern unsigned char mvebu_boot_wa_end;
+extern long sleep_save_sp[CONFIG_NR_CPUS];
+/*
+ * This function sets up the boot address workaround needed for SMP
+ * boot on Armada 375 Z1 and cpuidle on Armada 370. It unmaps the
+ * BootROM Mbus window, and instead remaps a crypto SRAM into which a
+ * custom piece of code is copied to replace the problematic BootROM.
+ */
+int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
+			     unsigned int crypto_eng_attribute,
+			     phys_addr_t resume_addr_reg)
 {
-	int reg, hw_cpu;
+	void __iomem *sram_virt_base;
+	u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
 
-	if (!pmsu_mp_base || !pmsu_reset_base) {
-		pr_warn("Can't boot CPU. PMSU is uninitialized\n");
-		return 1;
+	mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
+	mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
+				    SRAM_PHYS_BASE, SZ_64K);
+
+	sram_virt_base = ioremap(SRAM_PHYS_BASE, SZ_64K);
+	if (!sram_virt_base) {
+		pr_err("Unable to map SRAM to setup the boot address WA\n");
+		return -ENOMEM;
 	}
 
-	hw_cpu = cpu_logical_map(cpu_id);
+	memcpy(sram_virt_base, &mvebu_boot_wa_start, code_len);
 
-	writel(virt_to_phys(boot_addr), pmsu_mp_base +
-			PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
+	/*
+	 * The last word of the code copied in SRAM must contain the
+	 * physical base address of the PMSU register. We
+	 * intentionally store this address in the native endianness
+	 * of the system.
+	 */
+	__raw_writel((unsigned long)resume_addr_reg,
+		     sram_virt_base + code_len - 4);
 
-	/* Release CPU from reset by clearing reset bit*/
-	reg = readl(pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
-	reg &= (~0x1);
-	writel(reg, pmsu_reset_base + PMSU_RESET_CTL_OFFSET(hw_cpu));
+	iounmap(sram_virt_base);
 
 	return 0;
 }
-#endif
 
-int __init armada_370_xp_pmsu_init(void)
+/*
+ * Due to a known ARM architecture bug which is related to the PCIe bus,
+ * the L2 cache cannot be cleaned during the cpuidle suspend flow under active PCIe traffic.
+ * Therefore we copy the cpuidle saved data to the CESA SRAM instead of cleaning it to the DRAM
+ * and copy it to the DRAM when we return from the idle state
+ */
+int armada_38x_cpuidle_wa(void)
+{
+	u32 *tmp;
+	unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+
+	tmp = sram_wa_virt_base[hw_cpu];
+	*tmp = virt_to_phys(&sleep_save_sp[hw_cpu]);	/* Save address */
+	tmp++;
+	*tmp = sleep_save_sp[hw_cpu];			/* Save pointer address */
+	tmp++;
+	*tmp = 0x2C;					/* Save size */
+	tmp++;
+	memcpy(tmp, sleep_save_sp[hw_cpu] + 0xC0000000, 0x2C); /* Copy from virtual address (mmu enabled) */
+}
+
+static int __init mvebu_v7_pmsu_init(void)
 {
 	struct device_node *np;
+	struct resource res;
+	int ret = 0;
 
 	np = of_find_matching_node(NULL, of_pmsu_table);
-	if (np) {
-		pr_info("Initializing Power Management Service Unit\n");
-		pmsu_mp_base = of_iomap(np, 0);
-		pmsu_reset_base = of_iomap(np, 1);
+	if (!np)
+		return 0;
+
+	pr_info("Initializing Power Management Service Unit\n");
+
+	if (of_address_to_resource(np, 0, &res)) {
+		pr_err("unable to get resource\n");
+		ret = -ENOENT;
+		goto out;
+	}
+
+	if (of_device_is_compatible(np, "marvell,armada-370-xp-pmsu")) {
+		pr_warn(FW_WARN "deprecated pmsu binding\n");
+		res.start = res.start - PMSU_BASE_OFFSET;
+		res.end = res.start + PMSU_REG_SIZE - 1;
+	}
+
+	if (!request_mem_region(res.start, resource_size(&res),
+				np->full_name)) {
+		pr_err("unable to request region\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	pmsu_mp_phys_base = res.start;
+
+	pmsu_mp_base = ioremap(res.start, resource_size(&res));
+	if (!pmsu_mp_base) {
+		pr_err("unable to map registers\n");
+		release_mem_region(res.start, resource_size(&res));
+		ret = -ENOMEM;
+		goto out;
+	}
+
+ out:
+	of_node_put(np);
+	return ret;
+}
+
+static void mvebu_v7_pmsu_enable_l2_powerdown_onidle(void)
+{
+	u32 reg;
+
+	if (pmsu_mp_base == NULL)
+		return;
+
+	/* Enable L2 & Fabric powerdown in Deep-Idle mode - Fabric */
+	reg = readl(pmsu_mp_base + L2C_NFABRIC_PM_CTL);
+	reg |= L2C_NFABRIC_PM_CTL_PWR_DOWN;
+	writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
+}
+
+enum pmsu_idle_prepare_flags {
+	PMSU_PREPARE_NORMAL = 0,
+	PMSU_PREPARE_DEEP_IDLE = BIT(0),
+	PMSU_PREPARE_SNOOP_DISABLE = BIT(1),
+};
+
+/* No locking is needed because we only access per-CPU registers */
+static int mvebu_v7_pmsu_idle_prepare(unsigned long flags)
+{
+	unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+	u32 reg;
+
+	if (pmsu_mp_base == NULL)
+		return -EINVAL;
+
+	/*
+	 * Adjust the PMSU configuration to wait for WFI signal, enable
+	 * IRQ and FIQ as wakeup events, set wait for snoop queue empty
+	 * indication and mask IRQ and FIQ from CPU
+	 */
+	reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+	reg |= PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT    |
+	       PMSU_STATUS_AND_MASK_IRQ_WAKEUP       |
+	       PMSU_STATUS_AND_MASK_FIQ_WAKEUP       |
+	       PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT |
+	       PMSU_STATUS_AND_MASK_IRQ_MASK         |
+	       PMSU_STATUS_AND_MASK_FIQ_MASK;
+	writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+
+	reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+	/* ask HW to power down the L2 Cache if needed */
+	if (flags & PMSU_PREPARE_DEEP_IDLE)
+		reg |= PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+
+	/* request power down */
+	reg |= PMSU_CONTROL_AND_CONFIG_PWDDN_REQ;
+	writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+	if (flags & PMSU_PREPARE_SNOOP_DISABLE) {
+		/* Disable snoop disable by HW - SW is taking care of it */
+		reg = readl(pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+		reg |= PMSU_CPU_POWER_DOWN_DIS_SNP_Q_SKIP;
+		writel(reg, pmsu_mp_base + PMSU_CPU_POWER_DOWN_CONTROL(hw_cpu));
+	}
+
+	return 0;
+}
+
+int armada_370_xp_pmsu_idle_enter(unsigned long deepidle)
+{
+	unsigned long flags = PMSU_PREPARE_SNOOP_DISABLE;
+	int ret;
+
+	if (deepidle)
+		flags |= PMSU_PREPARE_DEEP_IDLE;
+
+	ret = mvebu_v7_pmsu_idle_prepare(flags);
+	if (ret)
+		return ret;
+
+	v7_exit_coherency_flush(all);
+
+	ll_disable_coherency();
+
+	dsb();
+
+	wfi();
+
+	/* If we are here, wfi failed. As processors run out of
+	 * coherency for some time, tlbs might be stale, so flush them
+	 */
+	local_flush_tlb_all();
+
+	ll_enable_coherency();
+
+	/* Test the CR_C bit and set it if it was cleared */
+	asm volatile(
+	"mrc	p15, 0, r0, c1, c0, 0 \n\t"
+	"tst	r0, #(1 << 2) \n\t"
+	"orreq	r0, r0, #(1 << 2) \n\t"
+	"mcreq	p15, 0, r0, c1, c0, 0 \n\t"
+	"isb	"
+	: : : "r0");
+
+	pr_debug("Failed to suspend the system\n");
+
+	return 0;
+}
+
+static int armada_370_xp_cpu_suspend(unsigned long deepidle)
+{
+	return cpu_suspend(deepidle, armada_370_xp_pmsu_idle_enter);
+}
+
+int armada_38x_do_cpu_suspend(unsigned long deepidle)
+{
+	unsigned long flags = 0;
+
+	armada_38x_cpuidle_wa();
+
+	if (deepidle)
+		flags |= PMSU_PREPARE_DEEP_IDLE;
+
+	mvebu_v7_pmsu_idle_prepare(flags);
+	/*
+	 * Already flushed cache, but do it again as the outer cache
+	 * functions dirty the cache with spinlocks
+	 */
+	v7_exit_coherency_flush(louis);
+
+	scu_power_mode(scu_base, SCU_PM_POWEROFF);
+
+	cpu_do_idle();
+
+	return 1;
+}
+
+static int armada_38x_cpu_suspend(unsigned long deepidle)
+{
+	return cpu_suspend(false, armada_38x_do_cpu_suspend);
+}
+
+/* No locking is needed because we only access per-CPU registers */
+void mvebu_v7_pmsu_idle_exit(void)
+{
+	unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+	u32 reg;
+
+	if (pmsu_mp_base == NULL)
+		return;
+	/* cancel ask HW to power down the L2 Cache if possible */
+	reg = readl(pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+	reg &= ~PMSU_CONTROL_AND_CONFIG_L2_PWDDN;
+	writel(reg, pmsu_mp_base + PMSU_CONTROL_AND_CONFIG(hw_cpu));
+
+	/* cancel Enable wakeup events and mask interrupts */
+	reg = readl(pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+	reg &= ~(PMSU_STATUS_AND_MASK_IRQ_WAKEUP | PMSU_STATUS_AND_MASK_FIQ_WAKEUP);
+	reg &= ~PMSU_STATUS_AND_MASK_CPU_IDLE_WAIT;
+	reg &= ~PMSU_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT;
+	reg &= ~(PMSU_STATUS_AND_MASK_IRQ_MASK | PMSU_STATUS_AND_MASK_FIQ_MASK);
+	writel(reg, pmsu_mp_base + PMSU_STATUS_AND_MASK(hw_cpu));
+}
+
+static int mvebu_v7_cpu_pm_notify(struct notifier_block *self,
+				    unsigned long action, void *hcpu)
+{
+	if (action == CPU_PM_ENTER) {
+		unsigned int hw_cpu = cpu_logical_map(smp_processor_id());
+		mvebu_pmsu_set_cpu_boot_addr(hw_cpu, mvebu_cpu_resume);
+	} else if (action == CPU_PM_EXIT) {
+		mvebu_v7_pmsu_idle_exit();
 	}
 
+	return NOTIFY_OK;
+}
+
+static struct notifier_block mvebu_v7_cpu_pm_notifier = {
+	.notifier_call = mvebu_v7_cpu_pm_notify,
+};
+
+static struct mvebu_v7_cpuidle armada_370_cpuidle = {
+	.type = CPUIDLE_ARMADA_370,
+	.cpu_suspend = armada_370_xp_cpu_suspend,
+};
+
+static struct mvebu_v7_cpuidle armada_38x_cpuidle = {
+	.type = CPUIDLE_ARMADA_38X,
+	.cpu_suspend = armada_38x_cpu_suspend,
+};
+
+static struct mvebu_v7_cpuidle armada_xp_cpuidle = {
+	.type = CPUIDLE_ARMADA_XP,
+	.cpu_suspend = armada_370_xp_cpu_suspend,
+};
+
+static struct platform_device mvebu_v7_cpuidle_device = {
+	.name = "cpuidle-mvebu-v7",
+};
+
+static __init int armada_370_cpuidle_init(void)
+{
+	struct device_node *np;
+	phys_addr_t redirect_reg;
+
+	np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
+	if (!np)
+		return -ENODEV;
+	of_node_put(np);
+
+	/*
+	 * On Armada 370, there is "a slow exit process from the deep
+	 * idle state due to heavy L1/L2 cache cleanup operations
+	 * performed by the BootROM software". To avoid this, we
+	 * replace the restart code of the bootrom by a a simple jump
+	 * to the boot address. Then the code located at this boot
+	 * address will take care of the initialization.
+	 */
+	redirect_reg = pmsu_mp_phys_base + PMSU_BOOT_ADDR_REDIRECT_OFFSET(0);
+	mvebu_setup_boot_addr_wa(ARMADA_370_CRYPT0_ENG_TARGET,
+				 ARMADA_370_CRYPT0_ENG_ATTR,
+				 redirect_reg);
+
+	mvebu_cpu_resume = armada_370_xp_cpu_resume;
+	mvebu_v7_cpuidle_device.dev.platform_data = &armada_370_cpuidle;
+
+	return 0;
+}
+
+int armada_38x_cpuidle_init(void)
+{
+	struct device_node *np;
+	void __iomem *mpsoc_base;
+	u32 reg;
+
+	np = of_find_compatible_node(NULL, NULL,
+				     "marvell,armada-380-coherency-fabric");
+	if (!np)
+		return -ENODEV;
+	of_node_put(np);
+
+	np = of_find_compatible_node(NULL, NULL,
+				     "marvell,armada-380-mpcore-soc-ctrl");
+	if (!np)
+		return -ENODEV;
+	mpsoc_base = of_iomap(np, 0);
+	BUG_ON(!mpsoc_base);
+	of_node_put(np);
+
+	/* Set up reset mask when powering down the cpus */
+	reg = readl(mpsoc_base + MPCORE_RESET_CTL);
+	reg |= MPCORE_RESET_CTL_L2;
+	reg |= MPCORE_RESET_CTL_DEBUG;
+	writel(reg, mpsoc_base + MPCORE_RESET_CTL);
+	iounmap(mpsoc_base);
+
+	/* Set up delay */
+	reg = readl(pmsu_mp_base + PMSU_POWERDOWN_DELAY);
+	reg &= ~PMSU_POWERDOWN_DELAY_MASK;
+	reg |= PMSU_DFLT_ARMADA38X_DELAY;
+	reg |= PMSU_POWERDOWN_DELAY_PMU;
+	writel(reg, pmsu_mp_base + PMSU_POWERDOWN_DELAY);
+
+	mvebu_cpu_resume = armada_38x_cpu_resume;
+	mvebu_v7_cpuidle_device.dev.platform_data = &armada_38x_cpuidle;
+
+	/* PCIe deadlock WA for Armada 38x cpuidle */
+	/* CESA SRAM remap */
+	sram_wa_virt_base[0] = ioremap(0xf1100000, SZ_64);
+	sram_wa_virt_base[1] = ioremap(0xf1110000, SZ_64);
+
+	/* Disable the L2 cache clean function as it is being used in the cpu_suspend
+	 * Flow and violates the PCIe deadlock WA.
+	 * We cannot disable this function in the L2 cache driver as it will break SMP boot */
+	/* TBD - Disable cpuidle outer cache WA as it affects Suspend to RAM */
+#if 0
+	outer_cache.clean_range = NULL;
+#endif
+	return 0;
+}
+
+static __init int armada_xp_cpuidle_init(void)
+{
+	struct device_node *np;
+	np = of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric");
+	if (!np)
+		return -ENODEV;
+	of_node_put(np);
+
+	mvebu_cpu_resume = armada_370_xp_cpu_resume;
+	mvebu_v7_cpuidle_device.dev.platform_data = &armada_xp_cpuidle;
+
+	return 0;
+}
+
+static int __init mvebu_v7_cpu_pm_init(void)
+{
+	struct device_node *np;
+	int ret;
+
+	np = of_find_matching_node(NULL, of_pmsu_table);
+	if (!np)
+		return 0;
+	of_node_put(np);
+
+	if (of_machine_is_compatible("marvell,armadaxp"))
+		ret = armada_xp_cpuidle_init();
+	else if (of_machine_is_compatible("marvell,armada370"))
+		ret = armada_370_cpuidle_init();
+	else if (of_machine_is_compatible("marvell,armada38x"))
+		ret = armada_38x_cpuidle_init();
+	else
+		return 0;
+
+	if (ret)
+		return ret;
+
+	mvebu_v7_pmsu_enable_l2_powerdown_onidle();
+	platform_device_register(&mvebu_v7_cpuidle_device);
+	cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
+
 	return 0;
 }
 
-early_initcall(armada_370_xp_pmsu_init);
+arch_initcall(mvebu_v7_cpu_pm_init);
+early_initcall(mvebu_v7_pmsu_init);
diff --git a/arch/arm/mach-mvebu/pmsu.h b/arch/arm/mach-mvebu/pmsu.h
index 07a737c6b95d..221f0dc02897 100644
--- a/arch/arm/mach-mvebu/pmsu.h
+++ b/arch/arm/mach-mvebu/pmsu.h
@@ -12,5 +12,11 @@
 #define __MACH_MVEBU_PMSU_H
 
 int armada_xp_boot_cpu(unsigned int cpu_id, void *phys_addr);
+int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
+				unsigned int crypto_eng_attribute,
+				phys_addr_t resume_addr_reg);
 
+void mvebu_v7_pmsu_idle_exit(void);
+void armada_370_xp_cpu_resume(void);
+int armada_38x_do_cpu_suspend(unsigned long deepidle);
 #endif	/* __MACH_370_XP_PMSU_H */
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
new file mode 100644
index 000000000000..a87564f553ce
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu_ll.S
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+
+/*
+ * This is the entry point through which CPUs exiting cpuidle deep
+ * idle state are going.
+ */
+ENTRY(armada_370_xp_cpu_resume)
+ARM_BE8(setend	be )			@ go BE8 if entered LE
+	/*
+	 * Disable the MMU that might have been enabled in BootROM if
+	 * this code is used in the resume path of a suspend/resume
+	 * cycle.
+	 */
+	mrc	p15, 0, r1, c1, c0, 0
+	bic	r1, #1
+	mcr	p15, 0, r1, c1, c0, 0
+	bl	ll_add_cpu_to_smp_group
+	bl	ll_enable_coherency
+	b	cpu_resume
+ENDPROC(armada_370_xp_cpu_resume)
+
+ENTRY(armada_38x_cpu_resume)
+	/* do we need it for Armada 38x*/
+ARM_BE8(setend	be )			@ go BE8 if entered LE
+	bl	v7_invalidate_l1
+	mrc     p15, 4, r1, c15, c0	@ get SCU base address
+	orr	r1, r1, #0x8		@ SCU CPU Power Status Register
+	mrc	15, 0, r0, cr0, cr0, 5	@ get the CPU ID
+	and	r0, r0, #15
+	add	r1, r1, r0
+	mov	r2, #0x0
+	strb	r2, [r1]		@ switch SCU power state to Normal mode
+
+	/*
+	 * PCIe deadlock WA for Armada 38x cpuidle:
+	 * The saved data is copied back to the DRAM according to the saved pointer
+	 */
+	mov	r2, #0x10000
+	mul	r1, r0, r2
+	ldr	r0, =0xF1100000
+	add	r0, r0, r1		@ add CPU offset
+	ldr	r1, [r0], #4		@ get address
+	ldr	r2, [r0], #4		@ get pointer address
+	str	r2, [r1]		@ update pointer
+	ldr	r3, [r0],#4		@ ptr size
+	lsr	r3, r3 , #0x2
+1:
+	ldr	r1, [r0], #4
+	str	r1, [r2], #4
+	subs	r3, r3, #0x1
+	bne	1b
+	b	cpu_resume
+ENDPROC(armada_38x_cpu_resume)
+
+ENTRY(armada_38x_cpu_mem_resume)
+	ARM_BE8(setend	be)
+
+	/* Disable MMU that was enabled in bootROM */
+	mrc  p15, 0, r1, c1, c0, 0
+	bic  r1, #0x1
+	mcr  p15, 0, r1, c1, c0, 0
+
+	/* Restore internal registers Base address @ 0xF1000000 */
+	ldr  r1, =0xf1000000
+ARM_BE8(rev	r1, r1)
+	ldr  r0, =0xD0020080
+	str  r1, [r0]
+
+	/* Update SCU offset CP15 register */
+	ldr  r2, = 0xC000		/* SCU offset = 0xC000 */
+	add  r1, r1, r2			/* r1 = INTER_REG_BASE + SCU_OFFSET */
+	mcr  p15, 4, r1, c15, c0, 0 	/* Write SCU base register */
+
+	bl	v7_invalidate_l1
+	mrc     p15, 4, r1, c15, c0	@ get SCU base address
+	orr	r1, r1, #0x8		@ SCU CPU Power Status Register
+	mrc	15, 0, r0, cr0, cr0, 5	@ get the CPU ID
+	and	r0, r0, #15
+	add	r1, r1, r0
+	mov	r2, #0x0
+	strb	r2, [r1]		@ switch SCU power state to Normal mode
+
+	b	cpu_resume
+
+ENDPROC(armada_38x_cpu_mem_resume)
+
+#define GPIO_32_47_DATA_OUT_EN_CTRL_REG_ADDR	(0x144)	/* 0x18144 */
+#define GPIO_32_47_DATA_OUT_REG_ADDR		(0x140) /* 0x18140 */
+#define GPIO_CMD_VALUE				(0x2)
+#define GPIO_ACK_VALUE				(0xE)
+#define GPIO_PIN_MASK				(0xFFFFFFFE)
+#define SDRAM_DLB_EVICT_OFFS_REG		(0x30C)	/* 0x170C */
+#define SDRAM_OPERATION_REG			(0x18)	/* 0x1418 */
+#define SDRAM_CONFIG_REG			(0x00)	/* 0x1400 */
+
+ENTRY(enter_mem_suspend)
+	/* Save ARM registers */
+	stmfd	sp!, {r4-r12, lr}		@ save registers on stack
+	/*
+	* Issue a Data Synchronization Barrier instruction to ensure
+	* that all state saving has been completed.
+	*/
+	dsb
+
+	/* Flush the DLB and wait ~7 usec */
+	/* Clear bits 7:0 in 'DLB Eviction Control Register', 0x170C */
+	/* Flush the DLB and wait ~7 usec*/
+	ldr r2, =SDRAM_DLB_EVICT_OFFS_REG
+	orr	r2, r2, r0
+	ldr	r3, [r2]
+ARM_BE8(rev	r3, r3)
+	bic     r3, #0x000000FF
+ARM_BE8(rev	r3, r3)
+	str     r3, [r2]
+
+	/* Wait ~7 us */
+	ldr r2, = 6000  /* WC freq =  1.6 Ghz, 2 cycles per loop */
+1:	subs r2,r2,#1
+	bne 1b
+
+	/* Set DRAM in battery backup mode */
+	/* Clear bit 24 in 'SDRAM Configuration Register', 0x1400 */
+	ldr 	r2, =SDRAM_CONFIG_REG
+	orr	r2, r2, r0
+	ldr	r3, [r2]
+ARM_BE8(rev	r3, r3)
+	bic     r3, #0x01000000
+ARM_BE8(rev	r3, r3)
+	str     r3, [r2]
+
+	/* Prepare to go to self-refresh */
+	/* Involves writing 0x7 to 'SDRAM Operation Register', 0x1418 */
+	ldr	r2, =(SDRAM_OPERATION_REG)
+	orr	r2, r2, r0
+	ldr	r3, [r2]
+ARM_BE8(rev	r3, r3)
+	ldr	r4, =0x00000007
+	orr	r3, r3, r4
+ARM_BE8(rev	r3, r3)
+
+	/* Configure GPIOs 33-35 for communicating with PIC */
+	/* Prepare command value for GPIOs 33-35 */
+	ldr r4, =(GPIO_32_47_DATA_OUT_REG_ADDR)
+	orr r4, r4, r1
+	ldr r5, =(GPIO_PIN_MASK)
+	ldr r6, =(GPIO_CMD_VALUE)
+	ldr r7, [r4]
+ARM_BE8(rev r7, r7)
+	and r7, r7, r5
+	orr r7, r7, r6
+ARM_BE8(rev r7, r7)
+
+	/* Set GPIO 33-35 as out */
+	ldr r0, =(GPIO_32_47_DATA_OUT_EN_CTRL_REG_ADDR)
+	orr r0, r0, r1
+	ldr r6, [r0]
+ARM_BE8(rev r6, r6)
+	and r6, r6, r5
+ARM_BE8(rev r6, r6)
+	str r6, [r0]
+
+	/* Issue the cmd */
+	str r7, [r4]
+
+	/* Prepare the cmd ack */
+	ldr r6, =(GPIO_ACK_VALUE)
+ARM_BE8(rev r7, r7)
+	orr r7, r7, r6
+ARM_BE8(rev r7, r7)
+
+	/*
+	 * Wait between cmd (0x1) and cmd ack (0x7)
+	 */
+	ldr r1, =1000000000
+1:	subs r1,r1,#1
+	bne 1b
+
+	/*
+	 * Put Dram into self refresh. From here on we can perform
+	 * 8 instructions to ensure execution from I-Cache
+	 */
+	.align 5
+	/* Enter self-refresh */
+	str	r3, [r2]
+
+	/* Wait 100 cycles for DDR to enter self refresh */
+	ldr r1, = 50
+1:	subs r1,r1,#1
+	bne 1b
+
+	/* Issue the cmd ack. This will turn of the board */
+	str r7, [r4]
+
+	/* trap the processor */
+	b .
+	/* Wait a while */
+ENDPROC(enter_mem_suspend)
+
+.global mvebu_boot_wa_start
+.global mvebu_boot_wa_end
+
+/* The following code will be executed from SRAM */
+ENTRY(mvebu_boot_wa_start)
+mvebu_boot_wa_start:
+ARM_BE8(setend	be)
+	adr	r0, 1f
+	ldr	r0, [r0]		@ load the address of the
+					@ resume register
+	ldr	r0, [r0]		@ load the value in the
+					@ resume register
+ARM_BE8(rev	r0, r0)			@ the value is stored LE
+	mov	pc, r0			@ jump to this value
+/*
+ * the last word of this piece of code will be filled by the physical
+ * address of the boot address register just after being copied in SRAM
+ */
+1:
+	.long   .
+mvebu_boot_wa_end:
+ENDPROC(mvebu_boot_wa_end)
diff --git a/arch/arm/mach-mvebu/serdes.c b/arch/arm/mach-mvebu/serdes.c
new file mode 100644
index 000000000000..0b37b5d31941
--- /dev/null
+++ b/arch/arm/mach-mvebu/serdes.c
@@ -0,0 +1,165 @@
+/*
+ * SerDes Shutdown platform driver .
+ *
+ * Power down all related SerDes interfaces: SerDes Lanes, PCIe/USB3.0 Pipes, PCIe Ref Clocks, and PON SerDes
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Omri Itach <omrii@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+
+static struct of_device_id of_ip_config_table[] = {
+	{.compatible = "marvell,armada-375-ip-configuration"},
+	{ /* end of list */ },
+};
+
+#define PCIE_REF_CLK_CTRL_REG		0xf0
+#define REF_CLK_REG_PCIE0_POWERUP	BIT(0)
+#define REF_CLK_REG_PCIE1_POWERUP	BIT(1)
+
+#define GPON_PHY_CTRL0_REG		0xf4
+#define GPON_CFG_REG_PLL		BIT(0)
+#define GPON_CFG_REG_RX			BIT(1)
+#define GPON_CFG_REG_TX			BIT(2)
+#define GPON_CFG_REG_PLL_RX_TX_MASK	(GPON_CFG_REG_PLL | GPON_CFG_REG_RX | GPON_CFG_REG_TX)
+#define GPON_CFG_REG_RESET_PIN		BIT(3)
+#define GPON_CFG_REG_RESET_TX		BIT(4)
+#define GPON_CFG_REG_RESET_CORE		BIT(5)
+#define GPON_CFG_REG_RESET_MASK		(GPON_CFG_REG_RESET_PIN | GPON_CFG_REG_RESET_TX | GPON_CFG_REG_RESET_CORE)
+
+/* IP configuration: Power down PCIe0/1 Ref clock & PON PHY */
+static void mvebu_powerdown_ip_configuration(void)
+{
+	struct device_node *np = NULL;
+	static void __iomem *ip_base;
+	u32 ip_cfg;
+
+	np = of_find_matching_node(NULL, of_ip_config_table);
+	if (!np)
+		return;
+
+	ip_base = of_iomap(np, 0);
+	BUG_ON(!ip_base);
+
+	/* PCie0/1 Ref CLK */
+	ip_cfg = readl(ip_base + PCIE_REF_CLK_CTRL_REG);
+	ip_cfg &= ~(REF_CLK_REG_PCIE0_POWERUP | REF_CLK_REG_PCIE1_POWERUP);	/* Power down PCIe-0/1 Ref clocks */
+	writel(ip_cfg, ip_base + PCIE_REF_CLK_CTRL_REG);
+
+	/* PON PHY */
+	ip_cfg = readl(ip_base + GPON_PHY_CTRL0_REG);
+	ip_cfg &= ~GPON_CFG_REG_PLL_RX_TX_MASK;	/* Power down PLL, TX, RX */
+	ip_cfg |= GPON_CFG_REG_RESET_MASK;	/* PHY Reset, TX reset, Reset Core */
+	writel(ip_cfg, ip_base + GPON_PHY_CTRL0_REG);
+
+	iounmap(ip_base);
+}
+
+
+static struct of_device_id of_serdes_pipe_config_table[] = {
+	{.compatible = "marvell,armada-375-serdes-pipe-configuration"},
+	{ /* end of list */ },
+};
+
+#define PIPE_PM_OVERRIDE		BIT(1)
+
+/* Power down SerDes through Pipes (PCIe/USB3.0): PM-OVERRIDE */
+static void mvebu_powerdown_pcie_usb_serdes(int phy_number)
+{
+	struct device_node *np = NULL;
+	static void __iomem *pipe_base;
+
+	/* find pipe configuration node for requested PHY*/
+	np = of_find_matching_node(np, of_serdes_pipe_config_table);
+	if (!np)
+		return;
+
+	pipe_base = of_iomap(np, phy_number);
+	BUG_ON(!pipe_base);
+
+	writel((readl(pipe_base) | PIPE_PM_OVERRIDE) , pipe_base);
+	iounmap(pipe_base);
+}
+
+
+static struct of_device_id of_phy_config_table[] = {
+	{.compatible = "marvell,armada-375-common-phy-configuration"},
+	{ /* end of list */ },
+};
+
+#define COMMON_PHY_CFG_REG(phy_num)	(0x4 * phy_num)
+#define PHY_CFG_REG_POWER_UP_IVREF	BIT(1)
+#define PHY_CFG_REG_PIPE_SELECT		BIT(2)
+#define PHY_CFG_REG_POWER_UP_PLL	BIT(16)
+#define PHY_CFG_REG_POWER_UP_TX		BIT(17)
+#define PHY_CFG_REG_POWER_UP_RX		BIT(18)
+#define PHY_CFG_REG_POWER_UP_MASK	(PHY_CFG_REG_POWER_UP_IVREF | PHY_CFG_REG_POWER_UP_PLL |\
+					PHY_CFG_REG_POWER_UP_TX | PHY_CFG_REG_POWER_UP_RX)
+
+/* Power down SerDes Lanes & relevant Pipes: COMMON-PHY configuration */
+static void mvebu_powerdown_serdes_lanes(void)
+{
+	struct device_node *np = NULL;
+	static void __iomem *phy_cfg_base;
+	u32 phy_cfg, phy_count, i;
+
+	np = of_find_matching_node(NULL, of_phy_config_table);
+	if (!np)
+		return;
+
+	phy_cfg_base = of_iomap(np, 0);
+	BUG_ON(!phy_cfg_base);
+	of_property_read_u32(np, "phy-count", &phy_count);
+
+	for (i = 0; i < phy_count; i++) {
+		phy_cfg = readl(phy_cfg_base + COMMON_PHY_CFG_REG(i));
+
+		/* if Lane is Piped to USB3.0 / PCIe */
+		if (phy_cfg & PHY_CFG_REG_PIPE_SELECT)
+			mvebu_powerdown_pcie_usb_serdes(i);
+
+		phy_cfg &= ~PHY_CFG_REG_POWER_UP_MASK; /* Power down: IVREF, PLL, RX, TX */
+		writel(phy_cfg, phy_cfg_base + COMMON_PHY_CFG_REG(i));
+	}
+	iounmap(phy_cfg_base);
+}
+
+void mvebu_serdes_shutdown(struct platform_device *pdev)
+{
+	mvebu_powerdown_serdes_lanes();
+	mvebu_powerdown_ip_configuration();
+}
+
+static struct platform_driver mvebu_serdes_driver = {
+	.shutdown = mvebu_serdes_shutdown,
+	.driver = {
+		.name = "common-phy-configuration",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(of_phy_config_table),
+	},
+};
+
+static int __init mvebu_serdes_init(void)
+{
+	/* if common-phy node exists, register driver (needed for SerDes configuration) */
+	if (!of_find_matching_node(NULL, of_phy_config_table))
+		return 0;
+
+	return platform_driver_register(&mvebu_serdes_driver);
+}
+
+static void __exit mvebu_serdes_exit(void)
+{
+	platform_driver_unregister(&mvebu_serdes_driver);
+}
+
+module_init(mvebu_serdes_init);
+module_exit(mvebu_serdes_exit);
diff --git a/arch/arm/mach-mvebu/soft-poweroff.c b/arch/arm/mach-mvebu/soft-poweroff.c
new file mode 100644
index 000000000000..15fc1a4c817b
--- /dev/null
+++ b/arch/arm/mach-mvebu/soft-poweroff.c
@@ -0,0 +1,115 @@
+/*
+ * Soft power off support for Armada 375 platforms.
+ *
+ * this file adds WFI state support, with wakeup via UART/NETA IRQ and reset machine
+ * usage : 'echo 1 > /sys/devices/platform/mv_power_wol/soft_power_idle'
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Omri Itach <omrii@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#include <linux/of_platform.h>
+#include <linux/reboot.h>
+#include <linux/perf_event.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+
+#define IRQ_PRIV_GIC_UART_IRQ		44
+#define IRQ_PRIV_GIC_GBE0_WOL_IRQ	112
+#define NR_GIC_IRQ			118
+
+static void soft_poweroff_unmask_irq(unsigned int irq_number)
+{
+	struct irq_data *irqd = irq_get_irq_data(irq_number);
+
+	if (irqd && irqd->chip && irqd->chip->irq_unmask)
+		irqd->chip->irq_unmask(irqd);
+}
+
+static void soft_poweroff_mask_all_irq(void)
+{
+	struct irq_data *irqd;
+	int i;
+
+	for (i = 0; i < NR_GIC_IRQ; i++) {
+		irqd = irq_get_irq_data(i);
+		if (irqd && irqd->chip && irqd->chip->irq_unmask)
+			irqd->chip->irq_mask(irqd);
+	}
+}
+
+static void soft_poweroff_pm_power_off(void)
+{
+
+	local_irq_disable();
+	soft_poweroff_mask_all_irq();
+	soft_poweroff_unmask_irq(IRQ_PRIV_GIC_UART_IRQ);
+	soft_poweroff_unmask_irq(IRQ_PRIV_GIC_GBE0_WOL_IRQ);
+
+	pr_info("\nEntering WFI state..\n");
+	wfi();
+	pr_info("\nRecovered from WFI state..\nRestarting machine.\n");
+
+	machine_restart(NULL);
+}
+
+static ssize_t soft_poweroff(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	int a = 0, err = 0, ret = 0;
+	struct pid_namespace *pid_ns = task_active_pid_ns(current);
+
+	sscanf(buf, "%x", &a);
+
+	if (a == 1) {
+		pr_info("\n pm_power_off() Enabled.. simulating Syscall 'poweroff' and entering WFI state");
+		pm_power_off = soft_poweroff_pm_power_off;
+		ret = reboot_pid_ns(pid_ns, LINUX_REBOOT_CMD_POWER_OFF);
+		if (ret)
+			return ret;
+		kernel_power_off();
+		do_exit(0);
+	} else if (a == 2) {
+		pr_info("\n pm_power_off() Enabled --> use 'poweroff' Syscall to enter WFI state");
+		pm_power_off = soft_poweroff_pm_power_off;
+	}
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(soft_poweroff, 00200 , NULL, soft_poweroff);
+
+static struct attribute *mv_power_wol_attrs[] = {
+	&dev_attr_soft_poweroff.attr,
+	NULL
+};
+
+static struct attribute_group mv_power_wol_group = {
+	.name = "mv_power_wol",
+	.attrs = mv_power_wol_attrs,
+};
+
+int __init power_idle_sysfs_init(void)
+{
+		int err;
+		struct device *pd;
+
+		pd = &platform_bus;
+
+		err = sysfs_create_group(&pd->kobj, &mv_power_wol_group);
+		if (err) {
+			pr_info("power idle sysfs group failed %d\n", err);
+			goto out;
+		}
+out:
+		return err;
+}
+
+module_init(power_idle_sysfs_init);
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
index b8079df8c986..addfc8835861 100644
--- a/arch/arm/mach-mvebu/system-controller.c
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -35,6 +35,8 @@ struct mvebu_system_controller {
 
 	u32 rstoutn_mask_reset_out_en;
 	u32 system_soft_reset;
+
+	u32 resume_boot_addr;
 };
 static struct mvebu_system_controller *mvebu_sc;
 
@@ -45,6 +47,14 @@ const struct mvebu_system_controller armada_370_xp_system_controller = {
 	.system_soft_reset = 0x1,
 };
 
+const struct mvebu_system_controller armada_375_system_controller = {
+	.rstoutn_mask_offset = 0x54,
+	.system_soft_reset_offset = 0x58,
+	.rstoutn_mask_reset_out_en = 0x1,
+	.system_soft_reset = 0x1,
+	.resume_boot_addr = 0xd4,
+};
+
 const struct mvebu_system_controller orion_system_controller = {
 	.rstoutn_mask_offset = 0x108,
 	.system_soft_reset_offset = 0x10c,
@@ -59,6 +69,17 @@ static struct of_device_id of_system_controller_table[] = {
 	}, {
 		.compatible = "marvell,armada-370-xp-system-controller",
 		.data = (void *) &armada_370_xp_system_controller,
+	}, {
+		.compatible = "marvell,armada-375-system-controller",
+		.data = (void *) &armada_375_system_controller,
+	}, {
+		/*
+		 * As far as RSTOUTn and System soft reset registers
+		 * are concerned, Armada 38x is similar to Armada
+		 * 370/XP
+		 */
+		.compatible = "marvell,armada-380-system-controller",
+		.data = (void *) &armada_370_xp_system_controller,
 	},
 	{ /* end of list */ },
 };
@@ -86,6 +107,15 @@ void mvebu_restart(char mode, const char *cmd)
 		;
 }
 
+#ifdef CONFIG_SMP
+void armada_375_set_bootaddr(void *boot_addr)
+{
+	WARN_ON(system_controller_base == NULL);
+	writel(virt_to_phys(boot_addr), system_controller_base +
+	       mvebu_sc->resume_boot_addr);
+}
+#endif
+
 static int __init mvebu_system_controller_init(void)
 {
 	struct device_node *np;
@@ -93,7 +123,7 @@ static int __init mvebu_system_controller_init(void)
 	np = of_find_matching_node(NULL, of_system_controller_table);
 	if (np) {
 		const struct of_device_id *match =
-		    of_match_node(of_system_controller_table, np);
+			of_match_node(of_system_controller_table, np);
 		BUG_ON(!match);
 		system_controller_base = of_iomap(np, 0);
 		mvebu_sc = (struct mvebu_system_controller *)match->data;
@@ -102,4 +132,4 @@ static int __init mvebu_system_controller_init(void)
 	return 0;
 }
 
-arch_initcall(mvebu_system_controller_init);
+early_initcall(mvebu_system_controller_init);
diff --git a/arch/arm/mach-mvebu/usb-cluster.c b/arch/arm/mach-mvebu/usb-cluster.c
new file mode 100644
index 000000000000..0fa03ce634cf
--- /dev/null
+++ b/arch/arm/mach-mvebu/usb-cluster.c
@@ -0,0 +1,96 @@
+/*
+ * USB cluster support for Armada 375 platform.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Armada 375 comes with an USB2 host and device controller and an
+ * USB3 controller. The USB cluster control register allows to manage
+ * common features of both USB controller.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define USB2_PHY_CONFIG_ENABLE BIT(0) /* active low */
+
+static struct of_device_id of_usb_cluster_table[] = {
+	{ .compatible = "marvell,armada-375-usb-cluster", },
+	{ /* end of list */ },
+};
+
+static int __init mvebu_usb_cluster_init(void)
+{
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, of_usb_cluster_table);
+	if (np) {
+		void __iomem *usb_cluster_base;
+		u32 reg;
+		struct device_node *ehci_node, *xhci_node;
+		struct property *ehci_status;
+		bool use_usb3 = false;
+
+		usb_cluster_base = of_iomap(np, 0);
+		BUG_ON(!usb_cluster_base);
+
+		xhci_node = of_find_compatible_node(NULL, NULL,
+						"marvell,xhci-armada-375");
+
+		if (xhci_node && of_device_is_available(xhci_node))
+			use_usb3 = true;
+
+		ehci_node = of_find_compatible_node(NULL, NULL,
+						"marvell,orion-ehci");
+
+		if (ehci_node && of_device_is_available(ehci_node)
+			&& use_usb3) {
+			/*
+			 * We can't use the first usb2 unit and usb3 in the same time, so let's
+			 * disbale usb2 and complain about it to the user asking
+			 * to fix the device tree.
+			 */
+
+			ehci_status = kzalloc(sizeof(struct property),
+					GFP_KERNEL);
+			WARN_ON(!ehci_status);
+
+			ehci_status->value = kstrdup("disabled", GFP_KERNEL);
+			WARN_ON(!ehci_status->value);
+
+			ehci_status->length = 8;
+			ehci_status->name = kstrdup("status", GFP_KERNEL);
+			WARN_ON(!ehci_status->name);
+
+			of_update_property(ehci_node, ehci_status);
+			pr_err("%s: xhci-armada-375 and orion-ehci are incompatible for this SoC.\n",
+				__func__);
+			pr_err("Please fix your dts!\n");
+			pr_err("orion-ehci have been disabled by default...\n");
+
+		}
+
+		reg = readl(usb_cluster_base);
+		if (use_usb3)
+			reg |= USB2_PHY_CONFIG_ENABLE;
+		else
+			reg &= ~USB2_PHY_CONFIG_ENABLE;
+		writel(reg, usb_cluster_base);
+
+		of_node_put(ehci_node);
+		of_node_put(xhci_node);
+		of_node_put(np);
+		iounmap(usb_cluster_base);
+	}
+
+	return 0;
+}
+postcore_initcall(mvebu_usb_cluster_init);
diff --git a/arch/arm/mach-mvebu/usb-utmi.c b/arch/arm/mach-mvebu/usb-utmi.c
new file mode 100644
index 000000000000..ec67fa5f66f6
--- /dev/null
+++ b/arch/arm/mach-mvebu/usb-utmi.c
@@ -0,0 +1,64 @@
+/*
+ * USB UTMI support for Armada 38x platform.
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define USB_UTMI_PHY_CTRL_STATUS(i)	(0x20+i*4)
+#define		USB_UTMI_TX_BITSTUFF_EN BIT(1)
+#define		USB_UTMI_PU_PHY			BIT(5)
+#define		USB_UTMI_VBUS_ON_PHY	BIT(6)
+
+static struct of_device_id of_usb_utmi_table[] = {
+	{ .compatible = "marvell,armada-380-usb-utmi", },
+	{ /* end of list */ },
+};
+
+static int __init mvebu_usb_utmi_init(void)
+{
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, of_usb_utmi_table);
+	if (np) {
+		void __iomem *usb_utmi_base, *utmi_base;
+
+		usb_utmi_base = of_iomap(np, 0);
+		BUG_ON(!usb_utmi_base);
+
+		utmi_base = of_iomap(np, 1);
+		BUG_ON(!utmi_base);
+
+		writel(USB_UTMI_TX_BITSTUFF_EN |
+			   USB_UTMI_PU_PHY |
+			   USB_UTMI_VBUS_ON_PHY,
+			   usb_utmi_base + USB_UTMI_PHY_CTRL_STATUS(0));
+
+		/*
+		 * Magic init... the registers and their value are not
+		 * documented
+		 */
+		writel(0x40605205, utmi_base);
+		writel(0x409, utmi_base + 4);
+		writel(0x1be7f6f, utmi_base + 0xc);
+
+		of_node_put(np);
+		iounmap(utmi_base);
+		iounmap(usb_utmi_base);
+	}
+
+	return 0;
+}
+postcore_initcall(mvebu_usb_utmi_init);
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index f8a6db9239bf..78985692135d 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -174,8 +174,10 @@ void __init orion5x_xor_init(void)
  ****************************************************************************/
 static void __init orion5x_crypto_init(void)
 {
-	mvebu_mbus_add_window("sram", ORION5X_SRAM_PHYS_BASE,
-			      ORION5X_SRAM_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_SRAM_TARGET,
+				    ORION_MBUS_SRAM_ATTR,
+				    ORION5X_SRAM_PHYS_BASE,
+				    ORION5X_SRAM_SIZE);
 	orion_crypto_init(ORION5X_CRYPTO_PHYS_BASE, ORION5X_SRAM_PHYS_BASE,
 			  SZ_8K, IRQ_ORION5X_CESA);
 }
@@ -222,22 +224,24 @@ void orion5x_setup_wins(void)
 	 * The PCIe windows will no longer be statically allocated
 	 * here once Orion5x is migrated to the pci-mvebu driver.
 	 */
-	mvebu_mbus_add_window_remap_flags("pcie0.0", ORION5X_PCIE_IO_PHYS_BASE,
+	mvebu_mbus_add_window_remap_by_id(ORION_MBUS_PCIE_IO_TARGET,
+					  ORION_MBUS_PCIE_IO_ATTR,
+					  ORION5X_PCIE_IO_PHYS_BASE,
 					  ORION5X_PCIE_IO_SIZE,
-					  ORION5X_PCIE_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pcie0.0", ORION5X_PCIE_MEM_PHYS_BASE,
-					  ORION5X_PCIE_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
-	mvebu_mbus_add_window_remap_flags("pci0.0", ORION5X_PCI_IO_PHYS_BASE,
+					  ORION5X_PCIE_IO_BUS_BASE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_PCIE_MEM_TARGET,
+				    ORION_MBUS_PCIE_MEM_ATTR,
+				    ORION5X_PCIE_MEM_PHYS_BASE,
+				    ORION5X_PCIE_MEM_SIZE);
+	mvebu_mbus_add_window_remap_by_id(ORION_MBUS_PCI_IO_TARGET,
+					  ORION_MBUS_PCI_IO_ATTR,
+					  ORION5X_PCI_IO_PHYS_BASE,
 					  ORION5X_PCI_IO_SIZE,
-					  ORION5X_PCI_IO_BUS_BASE,
-					  MVEBU_MBUS_PCI_IO);
-	mvebu_mbus_add_window_remap_flags("pci0.0", ORION5X_PCI_MEM_PHYS_BASE,
-					  ORION5X_PCI_MEM_SIZE,
-					  MVEBU_MBUS_NO_REMAP,
-					  MVEBU_MBUS_PCI_MEM);
+					  ORION5X_PCI_IO_BUS_BASE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_PCI_MEM_TARGET,
+				    ORION_MBUS_PCI_MEM_ATTR,
+				    ORION5X_PCI_MEM_PHYS_BASE,
+				    ORION5X_PCI_MEM_SIZE);
 }
 
 int orion5x_tclk;
diff --git a/arch/arm/mach-orion5x/common.h b/arch/arm/mach-orion5x/common.h
index cdaa01f3d186..127157c5a1a2 100644
--- a/arch/arm/mach-orion5x/common.h
+++ b/arch/arm/mach-orion5x/common.h
@@ -5,6 +5,23 @@ struct dsa_platform_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
+#define ORION_MBUS_PCIE_MEM_TARGET    0x04
+#define ORION_MBUS_PCIE_MEM_ATTR      0x59
+#define ORION_MBUS_PCIE_IO_TARGET     0x04
+#define ORION_MBUS_PCIE_IO_ATTR       0x51
+#define ORION_MBUS_PCIE_WA_TARGET     0x04
+#define ORION_MBUS_PCIE_WA_ATTR       0x79
+#define ORION_MBUS_PCI_MEM_TARGET     0x03
+#define ORION_MBUS_PCI_MEM_ATTR       0x59
+#define ORION_MBUS_PCI_IO_TARGET      0x03
+#define ORION_MBUS_PCI_IO_ATTR        0x51
+#define ORION_MBUS_DEVBUS_BOOT_TARGET 0x01
+#define ORION_MBUS_DEVBUS_BOOT_ATTR   0x0f
+#define ORION_MBUS_DEVBUS_TARGET(cs)  0x01
+#define ORION_MBUS_DEVBUS_ATTR(cs)    (~(1 << cs))
+#define ORION_MBUS_SRAM_TARGET        0x00
+#define ORION_MBUS_SRAM_ATTR          0x00
+
 /*
  * Basic Orion init functions used early by machine-setup.
  */
diff --git a/arch/arm/mach-orion5x/d2net-setup.c b/arch/arm/mach-orion5x/d2net-setup.c
index 16c88bbabc98..8f68b745c1d5 100644
--- a/arch/arm/mach-orion5x/d2net-setup.c
+++ b/arch/arm/mach-orion5x/d2net-setup.c
@@ -317,8 +317,10 @@ static void __init d2net_init(void)
 	d2net_sata_power_init();
 	orion5x_sata_init(&d2net_sata_data);
 
-	mvebu_mbus_add_window("devbus-boot", D2NET_NOR_BOOT_BASE,
-			      D2NET_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    D2NET_NOR_BOOT_BASE,
+				    D2NET_NOR_BOOT_SIZE);
 	platform_device_register(&d2net_nor_flash);
 
 	platform_device_register(&d2net_gpio_buttons);
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index 4e1263da38bb..4b2aefd1d961 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -340,19 +340,27 @@ static void __init db88f5281_init(void)
 	orion5x_uart0_init();
 	orion5x_uart1_init();
 
-	mvebu_mbus_add_window("devbus-boot", DB88F5281_NOR_BOOT_BASE,
-			      DB88F5281_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    DB88F5281_NOR_BOOT_BASE,
+				    DB88F5281_NOR_BOOT_SIZE);
 	platform_device_register(&db88f5281_boot_flash);
 
-	mvebu_mbus_add_window("devbus-cs0", DB88F5281_7SEG_BASE,
-			      DB88F5281_7SEG_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(0),
+				    ORION_MBUS_DEVBUS_ATTR(0),
+				    DB88F5281_7SEG_BASE,
+				    DB88F5281_7SEG_SIZE);
 
-	mvebu_mbus_add_window("devbus-cs1", DB88F5281_NOR_BASE,
-			      DB88F5281_NOR_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(1),
+				    ORION_MBUS_DEVBUS_ATTR(1),
+				    DB88F5281_NOR_BASE,
+				    DB88F5281_NOR_SIZE);
 	platform_device_register(&db88f5281_nor_flash);
 
-	mvebu_mbus_add_window("devbus-cs2", DB88F5281_NAND_BASE,
-			      DB88F5281_NAND_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(2),
+				    ORION_MBUS_DEVBUS_ATTR(2),
+				    DB88F5281_NAND_BASE,
+				    DB88F5281_NAND_SIZE);
 	platform_device_register(&db88f5281_nand_flash);
 
 	i2c_register_board_info(0, &db88f5281_i2c_rtc, 1);
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index 9e6baf581ed3..70974732cbf0 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -611,8 +611,10 @@ static void __init dns323_init(void)
 	/* setup flash mapping
 	 * CS3 holds a 8 MB Spansion S29GL064M90TFIR4
 	 */
-	mvebu_mbus_add_window("devbus-boot", DNS323_NOR_BOOT_BASE,
-			      DNS323_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    DNS323_NOR_BOOT_BASE,
+				    DNS323_NOR_BOOT_SIZE);
 	platform_device_register(&dns323_nor_flash);
 
 	/* Sort out LEDs, Buttons and i2c devices */
diff --git a/arch/arm/mach-orion5x/edmini_v2-setup.c b/arch/arm/mach-orion5x/edmini_v2-setup.c
index 147615510dd0..0fc33c56cbb7 100644
--- a/arch/arm/mach-orion5x/edmini_v2-setup.c
+++ b/arch/arm/mach-orion5x/edmini_v2-setup.c
@@ -154,8 +154,10 @@ void __init edmini_v2_init(void)
 	orion5x_ehci0_init();
 	orion5x_eth_init(&edmini_v2_eth_data);
 
-	mvebu_mbus_add_window("devbus-boot", EDMINI_V2_NOR_BOOT_BASE,
-			      EDMINI_V2_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    EDMINI_V2_NOR_BOOT_BASE,
+				    EDMINI_V2_NOR_BOOT_SIZE);
 	platform_device_register(&edmini_v2_nor_flash);
 
 	pr_notice("edmini_v2: USB device port, flash write and power-off "
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index aae10e4a917c..fe6a48a325e8 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -359,13 +359,17 @@ static void __init kurobox_pro_init(void)
 	orion5x_uart1_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", KUROBOX_PRO_NOR_BOOT_BASE,
-			      KUROBOX_PRO_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    KUROBOX_PRO_NOR_BOOT_BASE,
+				    KUROBOX_PRO_NOR_BOOT_SIZE);
 	platform_device_register(&kurobox_pro_nor_flash);
 
 	if (machine_is_kurobox_pro()) {
-		mvebu_mbus_add_window("devbus-cs0", KUROBOX_PRO_NAND_BASE,
-				      KUROBOX_PRO_NAND_SIZE);
+		mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(0),
+					    ORION_MBUS_DEVBUS_ATTR(0),
+					    KUROBOX_PRO_NAND_BASE,
+					    KUROBOX_PRO_NAND_SIZE);
 		platform_device_register(&kurobox_pro_nand_flash);
 	}
 
diff --git a/arch/arm/mach-orion5x/ls-chl-setup.c b/arch/arm/mach-orion5x/ls-chl-setup.c
index 24f4e14e5893..75cf340581b3 100644
--- a/arch/arm/mach-orion5x/ls-chl-setup.c
+++ b/arch/arm/mach-orion5x/ls-chl-setup.c
@@ -294,8 +294,10 @@ static void __init lschl_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", LSCHL_NOR_BOOT_BASE,
-			      LSCHL_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    LSCHL_NOR_BOOT_BASE,
+				    LSCHL_NOR_BOOT_SIZE);
 	platform_device_register(&lschl_nor_flash);
 
 	platform_device_register(&lschl_leds);
diff --git a/arch/arm/mach-orion5x/ls_hgl-setup.c b/arch/arm/mach-orion5x/ls_hgl-setup.c
index fc653bb41e78..013310a3bed8 100644
--- a/arch/arm/mach-orion5x/ls_hgl-setup.c
+++ b/arch/arm/mach-orion5x/ls_hgl-setup.c
@@ -243,8 +243,10 @@ static void __init ls_hgl_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", LS_HGL_NOR_BOOT_BASE,
-			      LS_HGL_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    LS_HGL_NOR_BOOT_BASE,
+				    LS_HGL_NOR_BOOT_SIZE);
 	platform_device_register(&ls_hgl_nor_flash);
 
 	platform_device_register(&ls_hgl_button_device);
diff --git a/arch/arm/mach-orion5x/lsmini-setup.c b/arch/arm/mach-orion5x/lsmini-setup.c
index 18e66e617dc2..e34c32658f01 100644
--- a/arch/arm/mach-orion5x/lsmini-setup.c
+++ b/arch/arm/mach-orion5x/lsmini-setup.c
@@ -244,8 +244,10 @@ static void __init lsmini_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", LSMINI_NOR_BOOT_BASE,
-			      LSMINI_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    LSMINI_NOR_BOOT_BASE,
+				    LSMINI_NOR_BOOT_SIZE);
 	platform_device_register(&lsmini_nor_flash);
 
 	platform_device_register(&lsmini_button_device);
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 827acbafc9dc..e105130ba51c 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -241,8 +241,10 @@ static void __init mss2_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", MSS2_NOR_BOOT_BASE,
-			      MSS2_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    MSS2_NOR_BOOT_BASE,
+				    MSS2_NOR_BOOT_SIZE);
 	platform_device_register(&mss2_nor_flash);
 
 	platform_device_register(&mss2_button_device);
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 92600ae2b4b6..e032f01da49e 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -204,8 +204,10 @@ static void __init mv2120_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", MV2120_NOR_BOOT_BASE,
-			      MV2120_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    MV2120_NOR_BOOT_BASE,
+				    MV2120_NOR_BOOT_SIZE);
 	platform_device_register(&mv2120_nor_flash);
 
 	platform_device_register(&mv2120_button_device);
diff --git a/arch/arm/mach-orion5x/net2big-setup.c b/arch/arm/mach-orion5x/net2big-setup.c
index dd0641a0d074..ba73dc7ffb9e 100644
--- a/arch/arm/mach-orion5x/net2big-setup.c
+++ b/arch/arm/mach-orion5x/net2big-setup.c
@@ -397,8 +397,10 @@ static void __init net2big_init(void)
 	net2big_sata_power_init();
 	orion5x_sata_init(&net2big_sata_data);
 
-	mvebu_mbus_add_window("devbus-boot", NET2BIG_NOR_BOOT_BASE,
-			      NET2BIG_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    NET2BIG_NOR_BOOT_BASE,
+				    NET2BIG_NOR_BOOT_SIZE);
 	platform_device_register(&net2big_nor_flash);
 
 	platform_device_register(&net2big_gpio_buttons);
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 503368023bb1..7fab67053030 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -157,11 +157,10 @@ static int __init pcie_setup(struct pci_sys_data *sys)
 	if (dev == MV88F5181_DEV_ID || dev == MV88F5182_DEV_ID) {
 		printk(KERN_NOTICE "Applying Orion-1/Orion-NAS PCIe config "
 				   "read transaction workaround\n");
-		mvebu_mbus_add_window_remap_flags("pcie0.0",
-						  ORION5X_PCIE_WA_PHYS_BASE,
-						  ORION5X_PCIE_WA_SIZE,
-						  MVEBU_MBUS_NO_REMAP,
-						  MVEBU_MBUS_PCI_WA);
+		mvebu_mbus_add_window_by_id(ORION_MBUS_PCIE_WA_TARGET,
+					    ORION_MBUS_PCIE_WA_ATTR,
+					    ORION5X_PCIE_WA_PHYS_BASE,
+					    ORION5X_PCIE_WA_SIZE);
 		pcie_ops.read = pcie_rd_conf_wa;
 	}
 
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index 1c4498bf650a..213b3e143c57 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -123,8 +123,10 @@ static void __init rd88f5181l_fxo_init(void)
 	orion5x_eth_switch_init(&rd88f5181l_fxo_switch_plat_data, NO_IRQ);
 	orion5x_uart0_init();
 
-	mvebu_mbus_add_window("devbus-boot", RD88F5181L_FXO_NOR_BOOT_BASE,
-			      RD88F5181L_FXO_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    RD88F5181L_FXO_NOR_BOOT_BASE,
+				    RD88F5181L_FXO_NOR_BOOT_SIZE);
 	platform_device_register(&rd88f5181l_fxo_nor_boot_flash);
 }
 
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index adabe34c4fc6..594800e1d691 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -130,8 +130,10 @@ static void __init rd88f5181l_ge_init(void)
 	orion5x_i2c_init();
 	orion5x_uart0_init();
 
-	mvebu_mbus_add_window("devbus-boot", RD88F5181L_GE_NOR_BOOT_BASE,
-			      RD88F5181L_GE_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    RD88F5181L_GE_NOR_BOOT_BASE,
+				    RD88F5181L_GE_NOR_BOOT_SIZE);
 	platform_device_register(&rd88f5181l_ge_nor_boot_flash);
 
 	i2c_register_board_info(0, &rd88f5181l_ge_i2c_rtc, 1);
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index 66e77ec91532..b1cf68493ffc 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -264,11 +264,14 @@ static void __init rd88f5182_init(void)
 	orion5x_uart0_init();
 	orion5x_xor_init();
 
-	mvebu_mbus_add_window("devbus-boot", RD88F5182_NOR_BOOT_BASE,
-			      RD88F5182_NOR_BOOT_SIZE);
-
-	mvebu_mbus_add_window("devbus-cs1", RD88F5182_NOR_BASE,
-			      RD88F5182_NOR_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    RD88F5182_NOR_BOOT_BASE,
+				    RD88F5182_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_TARGET(1),
+				    ORION_MBUS_DEVBUS_ATTR(1),
+				    RD88F5182_NOR_BASE,
+				    RD88F5182_NOR_SIZE);
 	platform_device_register(&rd88f5182_nor_flash);
 	platform_device_register(&rd88f5182_gpio_leds);
 
diff --git a/arch/arm/mach-orion5x/terastation_pro2-setup.c b/arch/arm/mach-orion5x/terastation_pro2-setup.c
index a0bfa53e7556..7e9064844698 100644
--- a/arch/arm/mach-orion5x/terastation_pro2-setup.c
+++ b/arch/arm/mach-orion5x/terastation_pro2-setup.c
@@ -329,8 +329,10 @@ static void __init tsp2_init(void)
 	/*
 	 * Configure peripherals.
 	 */
-	mvebu_mbus_add_window("devbus-boot", TSP2_NOR_BOOT_BASE,
-			      TSP2_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    TSP2_NOR_BOOT_BASE,
+				    TSP2_NOR_BOOT_SIZE);
 	platform_device_register(&tsp2_nor_flash);
 
 	orion5x_ehci0_init();
diff --git a/arch/arm/mach-orion5x/ts209-setup.c b/arch/arm/mach-orion5x/ts209-setup.c
index 80174f0f168e..e90c0618fdad 100644
--- a/arch/arm/mach-orion5x/ts209-setup.c
+++ b/arch/arm/mach-orion5x/ts209-setup.c
@@ -286,8 +286,10 @@ static void __init qnap_ts209_init(void)
 	/*
 	 * Configure peripherals.
 	 */
-	mvebu_mbus_add_window("devbus-boot", QNAP_TS209_NOR_BOOT_BASE,
-			      QNAP_TS209_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    QNAP_TS209_NOR_BOOT_BASE,
+				    QNAP_TS209_NOR_BOOT_SIZE);
 	platform_device_register(&qnap_ts209_nor_flash);
 
 	orion5x_ehci0_init();
diff --git a/arch/arm/mach-orion5x/ts409-setup.c b/arch/arm/mach-orion5x/ts409-setup.c
index 92592790d6da..5c079d312015 100644
--- a/arch/arm/mach-orion5x/ts409-setup.c
+++ b/arch/arm/mach-orion5x/ts409-setup.c
@@ -277,8 +277,10 @@ static void __init qnap_ts409_init(void)
 	/*
 	 * Configure peripherals.
 	 */
-	mvebu_mbus_add_window("devbus-boot", QNAP_TS409_NOR_BOOT_BASE,
-			      QNAP_TS409_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    QNAP_TS409_NOR_BOOT_BASE,
+				    QNAP_TS409_NOR_BOOT_SIZE);
 	platform_device_register(&qnap_ts409_nor_flash);
 
 	orion5x_ehci0_init();
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index 6b84863c018d..80a56ee245b3 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -127,8 +127,10 @@ static void __init wnr854t_init(void)
 	orion5x_eth_switch_init(&wnr854t_switch_plat_data, NO_IRQ);
 	orion5x_uart0_init();
 
-	mvebu_mbus_add_window("devbus-boot", WNR854T_NOR_BOOT_BASE,
-			      WNR854T_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    WNR854T_NOR_BOOT_BASE,
+				    WNR854T_NOR_BOOT_SIZE);
 	platform_device_register(&wnr854t_nor_flash);
 }
 
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index fae684bc54f2..670e30dc0d1b 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -213,8 +213,10 @@ static void __init wrt350n_v2_init(void)
 	orion5x_eth_switch_init(&wrt350n_v2_switch_plat_data, NO_IRQ);
 	orion5x_uart0_init();
 
-	mvebu_mbus_add_window("devbus-boot", WRT350N_V2_NOR_BOOT_BASE,
-			      WRT350N_V2_NOR_BOOT_SIZE);
+	mvebu_mbus_add_window_by_id(ORION_MBUS_DEVBUS_BOOT_TARGET,
+				    ORION_MBUS_DEVBUS_BOOT_ATTR,
+				    WRT350N_V2_NOR_BOOT_BASE,
+				    WRT350N_V2_NOR_BOOT_SIZE);
 	platform_device_register(&wrt350n_v2_nor_flash);
 	platform_device_register(&wrt350n_v2_leds);
 	platform_device_register(&wrt350n_v2_button_device);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c21082d664ed..b06374843b42 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -861,6 +861,107 @@ config MIGHT_HAVE_CACHE_L2X0
 	  instead of this option, thus preventing the user from
 	  inadvertently configuring a broken kernel.
 
+config MV_LARGE_PAGE_SUPPORT
+	bool
+
+choice
+	prompt "Kernel Large Page Support"
+	default MV_NO_LARGE_PAGE_SUPPORT
+	help
+	  Support kennel large pages (> 4KB), this includes MMU large pages
+	  (64KB) and software emulation of large pages (using 4KB MMU pages).
+	  Select one of the page sizes below. Notice that using 64KB modes
+	  will require building the file-system with 64KB enabled compiler
+	  (basically changing the default prodocued elf alignment to 64KB)
+
+config MV_NO_LARGE_PAGE_SUPPORT
+	bool "Disabled - Use default"
+	help
+	  Use kernel default page size (4KB).
+	  If you are not sure, select this option.
+	  This option does not make any changes to default kernel page size
+	  MMU management.
+
+config MV_8KB_SW_PAGE_SIZE_SUPPORT
+	bool "8KB software page size support"
+#	depends on !HIGHMEM
+	select MV_LARGE_PAGE_SUPPORT
+	help
+	  The kernel uses 8KB pages, MMU page table will still use 4KB pages.
+	  This feature provides higher performance (specifically in NAS
+	  applications) at the expense of higher memory fragmentation. This also
+	  enables support for large storage volumes up to 32TB.
+
+config MV_16KB_SW_PAGE_SIZE_SUPPORT
+	bool "16KB software page size support"
+#	depends on !HIGHMEM
+	select MV_LARGE_PAGE_SUPPORT
+	help
+	  The kernel uses 16KB pages, MMU page table will still use 4KB pages.
+	  This feature provides higher performance (specifically in NAS
+	  applications) at the expense of higher memory fragmentation. This also
+	  enables support for large storage volumes up to 64TB.
+
+config MV_32KB_SW_PAGE_SIZE_SUPPORT
+	bool "32KB software page size support"
+#	depends on !HIGHMEM
+	select MV_LARGE_PAGE_SUPPORT
+	help
+	  The kernel uses 32KB pages, MMU page table will still use 4KB pages.
+	  This feature provides higher performance (specifically in NAS
+	  applications) at the expense of higher memory fragmentation. This also
+	  enables support for large storage volumes up to 64TB.
+
+config MV_64KB_SW_PAGE_SIZE_SUPPORT
+	bool "64KB software page size support"
+#	depends on !HIGHMEM
+	select MV_LARGE_PAGE_SUPPORT
+	help
+	  The kernel uses 64KB pages, MMU page table will still use 4KB pages.
+	  This feature provides higher performance (specifically in NAS
+	  applications) at the expense of higher memory fragmentation. This also
+	  enables support for large storage volumes up to 128TB.
+	  If you need 64KB pages, consider using the MV_64KB_MMU_PAGE_SIZE_SUPPORT
+	  option.
+
+config MV_64KB_MMU_PAGE_SIZE_SUPPORT
+	bool "64KB MMU page size support"
+	select MV_LARGE_PAGE_SUPPORT
+	help
+	  The kernel uses 64KB pages. The page-table will use large-pages (64KB)
+	  as well.
+	  This feature provides higher performance (specifically in NAS
+	  applications) at the expense of higher memory fragmentation. This also
+	  enables support for large storage volumes up to 128TB.
+
+endchoice
+
+config CPU_DYNAMIC_CLOCK_GATING_ENABLE
+	bool "Enable CPU Dynamic Clock Gating - Power optimization"
+	depends on CPU_V7
+	default y if CPU_V7 && ARCH_MVEBU
+	help
+	  Say Y here to enable CPU Dynamic Clock Gating.
+	  If unsure, say N.
+	  When dynamic clock gating is enabled, the clock of the system control block is cut
+	  in the following cases:
+	  - there are no system control coprocessor instructions being executed
+	  - there are no system control coprocessor instructions present in the pipeline
+	  - performance events are not enabled
+	  When dynamic clock gating is enabled, the clock of the data engine is cut when there is no
+	  data engine instruction in the data engine and no data engine instruction in the pipeline.
+	  CPU Dynamic Clock Gating is performance feature,
+
+config CPU_L1_CACHE_PREF_ENABLE
+	bool "Enable CPU L1 Cache Prefetch"
+	depends on CPU_V7
+	default y if CPU_V7 && ARCH_MVEBU
+	help
+	  Say Y here to enable L1 cache prefetch.
+	  If unsure, say N.
+	  L1 cache prefetch is performance feature,
+	  impact mainly cpu read operations.
+
 config CACHE_L2X0
 	bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
 	default MIGHT_HAVE_CACHE_L2X0
@@ -931,3 +1032,9 @@ config ARCH_HAS_BARRIERS
 	help
 	  This option allows the use of custom mandatory barriers
 	  included via the mach/barriers.h file.
+
+config ARCH_SUPPORTS_BIG_ENDIAN
+	bool
+	help
+	  This option specifies the architecture can support big endian
+	  operation.
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 80741992a9fc..3815a8262af0 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -38,9 +38,8 @@ ENTRY(v6_early_abort)
 	bne	do_DataAbort
 	bic	r1, r1, #1 << 11		@ clear bit 11 of FSR
 	ldr	r3, [r4]			@ read aborted ARM instruction
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	rev	r3, r3
-#endif
+ ARM_BE8(rev	r3, r3)
+
 	do_ldrd_abort tmp=ip, insn=r3
 	tst	r3, #1 << 20			@ L = 0 -> write
 	orreq	r1, r1, #1 << 11		@ yes.
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 6f4585b89078..924036473b16 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -25,6 +25,7 @@
 #include <asm/cp15.h>
 #include <asm/system_info.h>
 #include <asm/unaligned.h>
+#include <asm/opcodes.h>
 
 #include "fault.h"
 
@@ -762,21 +763,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	if (thumb_mode(regs)) {
 		u16 *ptr = (u16 *)(instrptr & ~1);
 		fault = probe_kernel_address(ptr, tinstr);
+		tinstr = __mem_to_opcode_thumb16(tinstr);
 		if (!fault) {
 			if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
 			    IS_T32(tinstr)) {
 				/* Thumb-2 32-bit */
 				u16 tinst2 = 0;
 				fault = probe_kernel_address(ptr + 1, tinst2);
-				instr = (tinstr << 16) | tinst2;
+				tinst2 = __mem_to_opcode_thumb16(tinst2);
+				instr = __opcode_thumb32_compose(tinstr, tinst2);
 				thumb2_32b = 1;
 			} else {
 				isize = 2;
 				instr = thumb2arm(tinstr);
 			}
 		}
-	} else
+	} else {
 		fault = probe_kernel_address(instrptr, instr);
+		instr = __mem_to_opcode_arm(instr);
+	}
 
 	if (fault) {
 		type = TYPE_FAULT;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c465faca51b0..2bbac56d52bc 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -776,7 +776,7 @@ static const struct of_device_id l2x0_ids[] __initconst = {
 	{}
 };
 
-int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
+int __init l2x0_of_init_common(u32 aux_val, u32 aux_mask, bool is_coherent)
 {
 	struct device_node *np;
 	const struct l2x0_of_data *data;
@@ -813,8 +813,32 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 
 	of_init = true;
 	memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+
+	/*
+	 * PL310 newer than r3p2 don't need an outer cache sync
+	 * operation when the system is operating with hardware
+	 * coherency enabled, as it is done directly in hardware.
+	 */
+	if (of_device_is_compatible(np, "arm,pl310-cache")) {
+		u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+			L2X0_CACHE_ID_RTL_MASK;
+		if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P2 && is_coherent)
+			outer_cache.sync = NULL;
+	}
+
 	l2x0_init(l2x0_base, aux_val, aux_mask);
 
 	return 0;
 }
+
+int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
+{
+	return l2x0_of_init_common(aux_val, aux_mask, false);
+}
+
+int __init l2x0_of_init_coherent(u32 aux_val, u32 aux_mask)
+{
+	return l2x0_of_init_common(aux_val, aux_mask, true);
+}
+
 #endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 6c9d7054d997..58302fb777a7 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -141,9 +141,9 @@ struct dma_map_ops arm_dma_ops = {
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
-static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
+void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
-static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
+void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
 				  dma_addr_t handle, struct dma_attrs *attrs);
 
 struct dma_map_ops arm_coherent_dma_ops = {
@@ -698,7 +698,7 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 			   __builtin_return_address(0));
 }
 
-static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
+void *arm_coherent_dma_alloc(struct device *dev, size_t size,
 	dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
 {
 	pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
@@ -777,7 +777,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
 }
 
-static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
+void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
 				  dma_addr_t handle, struct dma_attrs *attrs)
 {
 	__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5dbf13f954f6..ec4a006355e3 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -112,9 +112,26 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
 		pte = pte_offset_map(pmd, addr);
 		printk(", *pte=%08llx", (long long)pte_val(*pte));
 #ifndef CONFIG_ARM_LPAE
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+		{
+			unsigned long pte_ptr = (unsigned long)pte;
+			unsigned long tmp = pte_ptr;
+			unsigned long shift_bits;
+			unsigned long mask;
+
+			shift_bits = PAGE_SHIFT - 12;
+			mask = 0x7FC & (~((shift_bits-1) << 7));
+			pte_ptr += (PTE_HWTABLE_PTRS * sizeof(void *));
+			pte_ptr &= ~0x7FC;
+			tmp &= mask;
+			pte_ptr += (tmp << shift_bits);
+			printk(", *ppte=%08llx", (long long unsigned int)pte_val((pte_t *)pte_ptr));
+		}
+#else
 		printk(", *ppte=%08llx",
-		       (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
+				(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
 #endif
+#endif /* CONFIG_ARM_LPAE */
 		pte_unmap(pte);
 	} while(0);
 
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index f123d6eb074b..f88c93ea0d53 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -438,6 +438,13 @@ void __arm_iounmap(volatile void __iomem *io_addr)
 EXPORT_SYMBOL(__arm_iounmap);
 
 #ifdef CONFIG_PCI
+static int pci_ioremap_mem_type = MT_DEVICE;
+
+void pci_ioremap_set_mem_type(int mem_type)
+{
+	pci_ioremap_mem_type = mem_type;
+}
+
 int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 {
 	BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
@@ -445,7 +452,7 @@ int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
 	return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
 				  PCI_IO_VIRT_BASE + offset + SZ_64K,
 				  phys_addr,
-				  __pgprot(get_mem_type(MT_DEVICE)->prot_pte));
+				  __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
 }
 EXPORT_SYMBOL_GPL(pci_ioremap_io);
 #endif
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a4e9ad8f0f..2e66c0404c4c 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -18,22 +18,34 @@ extern pmd_t *top_pmd;
 /* PFN alias flushing, for VIPT caches */
 #define FLUSH_ALIAS_START	0xffff4000
 
+static inline pmd_t *pmd_off_k(unsigned long virt)
+{
+	return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+}
+
 static inline void set_top_pte(unsigned long va, pte_t pte)
 {
-	pte_t *ptep = pte_offset_kernel(top_pmd, va);
+	pte_t *ptep;
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+	ptep = pte_offset_kernel(pmd_off_k(va), va);
+#else
+	ptep = pte_offset_kernel(top_pmd, va);
+#endif
 	set_pte_ext(ptep, pte, 0);
 	local_flush_tlb_kernel_page(va);
 }
 
 static inline pte_t get_top_pte(unsigned long va)
 {
-	pte_t *ptep = pte_offset_kernel(top_pmd, va);
-	return *ptep;
-}
+	pte_t *ptep;
 
-static inline pmd_t *pmd_off_k(unsigned long virt)
-{
-	return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+	ptep = pte_offset_kernel(pmd_off_k(va), va);
+#else
+	ptep = pte_offset_kernel(top_pmd, va);
+#endif
+
+	return *ptep;
 }
 
 struct mem_type {
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index fb3c446af9e5..ffb1cea7e72d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -29,6 +29,10 @@
 #include <asm/system_info.h>
 #include <asm/traps.h>
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && defined(CONFIG_HIGHMEM)
+#include <asm/fixmap.h>
+#endif
+
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 #include <asm/mach/pci.h>
@@ -338,7 +342,7 @@ EXPORT_SYMBOL(get_mem_type);
 /*
  * Adjust the PMD section entries according to the CPU in use.
  */
-static void __init build_mem_type_table(void)
+static void __init build_mem_type_table(const struct machine_desc *mdesc)
 {
 	struct cachepolicy *cp;
 	unsigned int cr = get_cr();
@@ -361,7 +365,7 @@ static void __init build_mem_type_table(void)
 			cachepolicy = CPOLICY_WRITEBACK;
 		ecc_mask = 0;
 	}
-	if (is_smp())
+	if (is_smp() || (mdesc->flags & MACHINE_NEEDS_CPOLICY_WRITEALLOC))
 		cachepolicy = CPOLICY_WRITEALLOC;
 
 	/*
@@ -482,7 +486,23 @@ static void __init build_mem_type_table(void)
 		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
 #endif
 
-		if (is_smp()) {
+		/*
+		 * On Cortex-A9 systems, configured in !SMP, proc-v7.S
+		 * has not set the SMP bit and the TLB broadcast
+		 * bit. However, these are needed to use the shareable
+		 * attribute on page tables, which in turn is needed
+		 * for certain systems that provide hardware I/O
+		 * coherency.
+		 */
+		if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 &&
+		    !is_smp() && (mdesc->flags & MACHINE_NEEDS_SHAREABLE_PAGES)) {
+			u32 reg;
+			asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (reg));
+			reg |= (1 << 6) | (1 << 0);
+			asm("mcr p15, 0, %0, c1, c0, 1" : : "r" (reg));
+		}
+
+		if (is_smp() || (mdesc->flags & MACHINE_NEEDS_SHAREABLE_PAGES)) {
 			/*
 			 * Mark memory with the "shared" attribute
 			 * for SMP systems
@@ -608,7 +628,11 @@ static void __init *early_alloc(unsigned long sz)
 static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
 {
 	if (pmd_none(*pmd)) {
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+		pte_t *pte = early_alloc(PAGE_SIZE);
+#else
 		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
+#endif
 		__pmd_populate(pmd, __pa(pte), prot);
 	}
 	BUG_ON(pmd_bad(*pmd));
@@ -966,6 +990,30 @@ void __init debug_ll_io_init(void)
 static void * __initdata vmalloc_min =
 	(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && defined(CONFIG_HIGHMEM)
+/* Create L1 Mapping for High-Mem pages. */
+static void __init map_highmem_pages(void)
+{
+	struct map_desc map;
+	unsigned long addr;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	for (addr = FIXADDR_START; addr < FIXADDR_TOP; addr += SZ_1M) {
+		map.pfn = __phys_to_pfn(virt_to_phys((void*)addr));
+		map.virtual = addr;
+		map.length = PAGE_SIZE;
+		map.type = MT_DEVICE;
+		create_mapping(&map);
+
+		/* Clear the L2 entry. */
+		pmd = pmd_offset(pgd_offset_k(addr), addr);
+		pte = pte_offset_kernel(pmd, addr);
+		set_pte_ext(pte, __pte(0), 0);
+	}
+}
+#endif
+
 /*
  * vmalloc=size forces the vmalloc area to be exactly 'size'
  * bytes. This can be used to increase (or decrease) the vmalloc
@@ -1251,6 +1299,10 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
 	map.type = MT_LOW_VECTORS;
 	create_mapping(&map);
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && defined(CONFIG_HIGHMEM)
+	map_highmem_pages();
+#endif
+
 	/*
 	 * Ask the machine support to map in the statically mapped devices.
 	 */
@@ -1313,7 +1365,7 @@ void __init paging_init(struct machine_desc *mdesc)
 
 	memblock_set_current_limit(arm_lowmem_limit);
 
-	build_mem_type_table();
+	build_mem_type_table(mdesc);
 	prepare_page_table();
 	map_lowmem();
 	dma_contiguous_remap();
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index 1046b373d1ae..d5eac05e28ea 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -88,7 +88,9 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 		init_pmd = pmd_offset(init_pud, 0);
 		init_pte = pte_offset_map(init_pmd, 0);
 		set_pte_ext(new_pte + 0, init_pte[0], 0);
+#ifndef CONFIG_MV_LARGE_PAGE_SUPPORT
 		set_pte_ext(new_pte + 1, init_pte[1], 0);
+#endif
 		pte_unmap(init_pte);
 		pte_unmap(new_pte);
 	}
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index d07352819580..b96c6e64943e 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -219,9 +219,7 @@ __v6_setup:
 						@ complete invalidations
 	adr	r5, v6_crval
 	ldmia	r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	orr	r6, r6, #1 << 25		@ big-endian page tables
-#endif
+ ARM_BE8(orr	r6, r6, #1 << 25)		@ big-endian page tables
 	mrc	p15, 0, r0, c1, c0, 0		@ read control register
 	bic	r0, r0, r5			@ clear bits them
 	orr	r0, r0, r6			@ set them
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index bb20ba0f7bc7..c7c3bbcb293d 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -62,6 +62,10 @@ ENTRY(cpu_v7_switch_mm)
 	mov	pc, lr
 ENDPROC(cpu_v7_switch_mm)
 
+.macro flush_pte adr
+	mcr	p15, 0, \adr, c7, c10, 1	@ flush_pte
+.endm
+
 /*
  *	cpu_v7_set_pte_ext(ptep, pte)
  *
@@ -76,10 +80,44 @@ ENTRY(cpu_v7_set_pte_ext)
 #ifdef CONFIG_MMU
 	str	r1, [r0]			@ linux version
 
+	/* Calc HW PTE Entry Offset */
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+#ifdef CONFIG_MV_8KB_SW_PAGE_SIZE_SUPPORT
+	mov	r3, #0x3FC
+	and	r3, r3, r0
+	mov	r3, r3, lsl#1
+#elif defined(CONFIG_MV_16KB_SW_PAGE_SIZE_SUPPORT)
+	mov	r3, #0x1FC
+	and	r3, r3, r0
+	mov	r3, r3, lsl#2
+#elif defined(CONFIG_MV_32KB_SW_PAGE_SIZE_SUPPORT)
+	mov	r3, #0xFC
+	and	r3, r3, r0
+	mov	r3, r3, lsl#3
+#elif defined(CONFIG_MV_64KB_SW_PAGE_SIZE_SUPPORT)
+	mov	r3, #0x7C
+	and	r3, r3, r0
+	mov	r3, r3, lsl#4
+#elif defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+	mov	r3, #0x7C
+	and	r3, r3, r0
+	mov	r3, r3, lsl#4
+#endif /* CONFIG_MV_8KB_SW_PAGE_SIZE_SUPPORT */
+	bic	r0, r0, #0x3FC
+	bic	r0, r0, #0x400
+	orr	r0, r0, r3
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT */
 	bic	r3, r1, #0x000003f0
+#ifdef CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT
+	bic	r3, r3, #0x00000F000
+#endif
 	bic	r3, r3, #PTE_TYPE_MASK
 	orr	r3, r3, r2
+#ifdef CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT
+	orr	r3, r3, #PTE_EXT_AP0 | 1
+#else
 	orr	r3, r3, #PTE_EXT_AP0 | 2
+#endif
 
 	tst	r1, #1 << 4
 	orrne	r3, r3, #PTE_EXT_TEX(1)
@@ -103,9 +141,74 @@ ENTRY(cpu_v7_set_pte_ext)
  ARM(	str	r3, [r0, #2048]! )
  THUMB(	add	r0, r0, #2048 )
  THUMB(	str	r3, [r0] )
-	ALT_SMP(W(nop))
-	ALT_UP (mcr	p15, 0, r0, c7, c10, 1)		@ flush_pte
-#endif
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+#ifdef CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT
+	@ Need to duplicate the entry 16 times because of overlapping in PTE index bits.
+	str	r3, [r0, #4]
+	str	r3, [r0, #8]
+	str	r3, [r0, #12]
+	str	r3, [r0, #16]
+	str	r3, [r0, #20]
+	str	r3, [r0, #24]
+	str	r3, [r0, #28]
+	flush_pte r0
+	add	r0, r0, #32
+	str	r3, [r0]
+	str	r3, [r0, #4]
+	str	r3, [r0, #8]
+	str	r3, [r0, #12]
+	str	r3, [r0, #16]
+	str	r3, [r0, #20]
+	str	r3, [r0, #24]
+	str	r3, [r0, #28]
+	flush_pte r0
+#else
+#if PAGE_SHIFT > 12 		// >= 8KB
+	add	r3, r3, #0x1000
+	str	r3, [r0, #4]
+#endif /* PAGE_SHIFT > 12 */
+#if PAGE_SHIFT > 13		// >= 16KB
+	add	r3, r3, #0x1000
+	str	r3, [r0, #8]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #12]
+#endif /* PAGE_SHIFT > 13 */
+#if PAGE_SHIFT > 14		// >= 32KB
+	add	r3, r3, #0x1000
+	str	r3, [r0, #16]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #20]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #24]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #28]
+#endif /* PAGE_SHIFT > 14 */
+	flush_pte r0
+#if PAGE_SHIFT > 15		// >= 64KB
+	add	r0, r0, #32
+	add	r3, r3, #0x1000
+	str	r3, [r0]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #4]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #8]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #12]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #16]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #20]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #24]
+	add	r3, r3, #0x1000
+	str	r3, [r0, #28]
+	flush_pte r0
+#endif /* PAGE_SHIFT > 15 */
+#endif /* CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT */
+#else /* CONFIG_MV_LARGE_PAGE_SUPPORT */
+	flush_pte	r0
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT */
+#endif /* CONFIG_MMU */
 	mov	pc, lr
 ENDPROC(cpu_v7_set_pte_ext)
 
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 19da84172cc3..818a79a5dbd2 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -180,6 +180,14 @@ ENDPROC(cpu_pj4b_do_idle)
 __v7_ca5mp_setup:
 __v7_ca9mp_setup:
 	mov	r10, #(1 << 0)			@ TLB ops broadcasting
+#ifdef CONFIG_CPU_L1_CACHE_PREF_ENABLE
+	orr	r10, r10, #(1 << 2)
+#endif
+#ifdef CONFIG_CPU_DYNAMIC_CLOCK_GATING_ENABLE
+	mrc	p15, 0,	r0, c15, c0, 0
+	orr     r0, r0, #(1 << 0)
+	mcr	p15, 0,	r0, c15, c0, 0
+#endif
 	b	1f
 __v7_ca7mp_setup:
 __v7_ca15mp_setup:
@@ -201,7 +209,6 @@ __v7_pj4b_setup:
 /* Auxiliary Debug Modes Control 1 Register */
 #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
 #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
-#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
 #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
 
 /* Auxiliary Debug Modes Control 2 Register */
@@ -224,7 +231,6 @@ __v7_pj4b_setup:
 	/* Auxiliary Debug Modes Control 1 Register */
 	mrc	p15, 1,	r0, c15, c1, 1
 	orr     r0, r0, #PJ4B_CLEAN_LINE
-	orr     r0, r0, #PJ4B_BCK_OFF_STREX
 	orr     r0, r0, #PJ4B_INTER_PARITY
 	bic	r0, r0, #PJ4B_STATIC_BP
 	mcr	p15, 1,	r0, c15, c1, 1
@@ -352,9 +358,7 @@ __v7_setup:
 #endif
 	adr	r5, v7_crval
 	ldmia	r5, {r5, r6}
-#ifdef CONFIG_CPU_ENDIAN_BE8
-	orr	r6, r6, #1 << 25		@ big-endian page tables
-#endif
+ ARM_BE8(orr	r6, r6, #1 << 25)		@ big-endian page tables
 #ifdef CONFIG_SWP_EMULATE
 	orr     r5, r5, #(1 << 10)              @ set SW bit in "clear"
 	bic     r6, r6, #(1 << 10)              @ clear it in "mmuset"
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index ea94765acf9a..24aa3f02484d 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -53,7 +53,11 @@ ENTRY(v7wbi_flush_user_tlb_range)
 #endif
 	ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA
 
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && !defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+	add	r0, r0, #0x1000
+#else
 	add	r0, r0, #PAGE_SZ
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT && !CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT */
 	cmp	r0, r1
 	blo	1b
 	dsb
@@ -81,7 +85,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
 	ALT_SMP(mcr	p15, 0, r0, c8, c3, 1)	@ TLB invalidate U MVA (shareable)
 #endif
 	ALT_UP(mcr	p15, 0, r0, c8, c7, 1)	@ TLB invalidate U MVA
+#if defined(CONFIG_MV_LARGE_PAGE_SUPPORT) && !defined(CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT)
+	add	r0, r0, #0x1000
+#else
 	add	r0, r0, #PAGE_SZ
+#endif /* CONFIG_MV_LARGE_PAGE_SUPPORT && !CONFIG_MV_64KB_MMU_PAGE_SIZE_SUPPORT */
 	cmp	r0, r1
 	blo	1b
 	dsb
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1a2b7749b047..943d425b7302 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -9,7 +9,6 @@ config IA64
 	select PCI if (!IA64_HP_SIM)
 	select ACPI if (!IA64_HP_SIM)
 	select PM if (!IA64_HP_SIM)
-	select ARCH_SUPPORTS_MSI
 	select HAVE_UNSTABLE_SCHED_CLOCK
 	select HAVE_IDE
 	select HAVE_OPROFILE
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index bdb8ea100e73..91f56691cfa1 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -199,7 +199,7 @@ void pcibios_set_master(struct pci_dev *dev)
  */
 int pci_read_irq_line(struct pci_dev *pci_dev)
 {
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	unsigned int virq;
 
 	/* The current device-tree that iSeries generates from the HV
@@ -217,7 +217,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
 	memset(&oirq, 0xff, sizeof(oirq));
 #endif
 	/* Try to get a mapping from the device-tree */
-	if (of_irq_map_pci(pci_dev, &oirq)) {
+	if (of_irq_parse_pci(pci_dev, &oirq)) {
 		u8 line, pin;
 
 		/* If that fails, lets fallback to what is in the config
@@ -243,11 +243,10 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
 			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
 	} else {
 		pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
-			 oirq.size, oirq.specifier[0], oirq.specifier[1],
-			 of_node_full_name(oirq.controller));
+			 oirq.args_count, oirq.args[0], oirq.args[1],
+			 of_node_full_name(oirq.np));
 
-		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-					     oirq.size);
+		virq = irq_create_of_mapping(&oirq);
 	}
 	if (!virq) {
 		pr_debug(" Failed to map !\n");
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e53e2b40d695..7e6db40c2a52 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -764,7 +764,6 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
 	select SYS_HAS_CPU_CAVIUM_OCTEON
 	select SWAP_IO_SPACE
 	select HW_HAS_PCI
-	select ARCH_SUPPORTS_MSI
 	select ZONE_DMA32
 	select USB_ARCH_HAS_OHCI
 	select USB_ARCH_HAS_EHCI
@@ -800,7 +799,6 @@ config NLM_XLR_BOARD
 	select CEVT_R4K
 	select CSRC_R4K
 	select IRQ_CPU
-	select ARCH_SUPPORTS_MSI
 	select ZONE_DMA32 if 64BIT
 	select SYNC_R4K
 	select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index b8e24fd4cbc5..031f4c1cc9b6 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -137,11 +137,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 	return channel ? 15 : 14;
 }
 
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-/* MSI arch hook for OCTEON */
-#define arch_setup_msi_irqs arch_setup_msi_irqs
-#endif
-
 extern char * (*pcibios_plat_setup)(char *str);
 
 #ifdef CONFIG_OF
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
index 6c829df28dc7..aef60e75003e 100644
--- a/arch/mips/pci/fixup-lantiq.c
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -25,16 +25,15 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
-	struct of_irq dev_irq;
+	struct of_phandle_args dev_irq;
 	int irq;
 
-	if (of_irq_map_pci(dev, &dev_irq)) {
+	if (of_irq_parse_pci(dev, &dev_irq)) {
 		dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
 			slot, pin);
 		return 0;
 	}
-	irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
-					dev_irq.size);
+	irq = irq_create_of_mapping(&dev_irq);
 	dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
 	return irq;
 }
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index fe404e77246e..b4697de2cf96 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -734,7 +734,6 @@ config PCI
 	default y if !40x && !CPM2 && !8xx && !PPC_83xx \
 		&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
 	default PCI_QSPAN if !4xx && !CPM2 && 8xx
-	select ARCH_SUPPORTS_MSI
 	select GENERIC_PCI_IOMAP
 	help
 	  Find out whether your system includes a PCI bus. PCI is the name of
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 6653f2743c4e..95145a15c708 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -113,11 +113,6 @@ extern int pci_domain_nr(struct pci_bus *bus);
 /* Decide whether to display the domain number in /proc */
 extern int pci_proc_domain(struct pci_bus *bus);
 
-/* MSI arch hooks */
-#define arch_setup_msi_irqs arch_setup_msi_irqs
-#define arch_teardown_msi_irqs arch_teardown_msi_irqs
-#define arch_msi_check_device arch_msi_check_device
-
 struct vm_area_struct;
 /* Map a range of PCI memory or I/O space for a device into user space */
 int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..2a404fe6975a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -228,7 +228,7 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
  */
 static int pci_read_irq_line(struct pci_dev *pci_dev)
 {
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	unsigned int virq;
 
 	pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
@@ -237,7 +237,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
 	memset(&oirq, 0xff, sizeof(oirq));
 #endif
 	/* Try to get a mapping from the device-tree */
-	if (of_irq_map_pci(pci_dev, &oirq)) {
+	if (of_irq_parse_pci(pci_dev, &oirq)) {
 		u8 line, pin;
 
 		/* If that fails, lets fallback to what is in the config
@@ -263,11 +263,10 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
 			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
 	} else {
 		pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
-			 oirq.size, oirq.specifier[0], oirq.specifier[1],
-			 of_node_full_name(oirq.controller));
+			 oirq.args_count, oirq.args[0], oirq.args[1],
+			 of_node_full_name(oirq.np));
 
-		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-					     oirq.size);
+		virq = irq_create_of_mapping(&oirq);
 	}
 	if(virq == NO_IRQ) {
 		pr_debug(" Failed to map !\n");
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 14be2bd358b8..b3ea96db5b06 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -486,7 +486,7 @@ static __init int celleb_setup_pciex(struct device_node *node,
 				     struct pci_controller *phb)
 {
 	struct resource	r;
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	int virq;
 
 	/* SMMIO registers; used inside this file */
@@ -507,12 +507,11 @@ static __init int celleb_setup_pciex(struct device_node *node,
 	phb->ops = &scc_pciex_pci_ops;
 
 	/* internal interrupt handler */
-	if (of_irq_map_one(node, 1, &oirq)) {
+	if (of_irq_parse_one(node, 1, &oirq)) {
 		pr_err("PCIEXC:Failed to map irq\n");
 		goto error;
 	}
-	virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-				     oirq.size);
+	virq = irq_create_of_mapping(&oirq);
 	if (request_irq(virq, pciex_handle_internal_irq,
 			0, "pciex", (void *)phb)) {
 		pr_err("PCIEXC:Failed to request irq\n");
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
index 9c339ec646f5..c8eb57193826 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_sio.c
@@ -45,7 +45,7 @@ static int __init txx9_serial_init(void)
 	struct device_node *node;
 	int i;
 	struct uart_port req;
-	struct of_irq irq;
+	struct of_phandle_args irq;
 	struct resource res;
 
 	for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
@@ -53,7 +53,7 @@ static int __init txx9_serial_init(void)
 			if (!(txx9_serial_bitmap & (1<<i)))
 				continue;
 
-			if (of_irq_map_one(node, i, &irq))
+			if (of_irq_parse_one(node, i, &irq))
 				continue;
 			if (of_address_to_resource(node,
 				txx9_scc_tab[i].index, &res))
@@ -66,8 +66,7 @@ static int __init txx9_serial_init(void)
 #ifdef CONFIG_SERIAL_TXX9_CONSOLE
 			req.membase = ioremap(req.mapbase, 0x24);
 #endif
-			req.irq = irq_create_of_mapping(irq.controller,
-				irq.specifier, irq.size);
+			req.irq = irq_create_of_mapping(&irq);
 			req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
 				/*HAVE_CTS_LINE*/;
 			req.uartclk = 83300000;
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 8e299447127e..d20680446174 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -235,12 +235,9 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic)
 	/* First, we check whether we have a real "interrupts" in the device
 	 * tree in case the device-tree is ever fixed
 	 */
-	struct of_irq oirq;
-	if (of_irq_map_one(pic->host->of_node, 0, &oirq) == 0) {
-		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-					     oirq.size);
-		return virq;
-	}
+	struct of_phandle_args oirq;
+	if (of_irq_parse_one(pic->host->of_node, 0, &oirq) == 0)
+		return irq_create_of_mapping(&oirq);
 
 	/* Now do the horrible hacks */
 	tmp = of_get_property(pic->host->of_node, "#interrupt-cells", NULL);
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 2bb6977c0a5a..c3327f3d8cf7 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -177,21 +177,20 @@ out:
 
 static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
 {
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	int ret;
 	int i;
 
 	for (i=0; i < 3; i++) {
-		ret = of_irq_map_one(np, i, &oirq);
+		ret = of_irq_parse_one(np, i, &oirq);
 		if (ret) {
 			pr_debug("spu_new: failed to get irq %d\n", i);
 			goto err;
 		}
 		ret = -EINVAL;
-		pr_debug("  irq %d no 0x%x on %s\n", i, oirq.specifier[0],
-			 oirq.controller->full_name);
-		spu->irqs[i] = irq_create_of_mapping(oirq.controller,
-					oirq.specifier, oirq.size);
+		pr_debug("  irq %d no 0x%x on %s\n", i, oirq.args[0],
+			 oirq.np->full_name);
+		spu->irqs[i] = irq_create_of_mapping(&oirq);
 		if (spu->irqs[i] == NO_IRQ) {
 			pr_debug("spu_new: failed to map it !\n");
 			goto err;
@@ -200,7 +199,7 @@ static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
 	return 0;
 
 err:
-	pr_debug("failed to map irq %x for spu %s\n", *oirq.specifier,
+	pr_debug("failed to map irq %x for spu %s\n", *oirq.args,
 		spu->name);
 	for (; i >= 0; i--) {
 		if (spu->irqs[i] != NO_IRQ)
diff --git a/arch/powerpc/platforms/fsl_uli1575.c b/arch/powerpc/platforms/fsl_uli1575.c
index 92ac9b52b32d..b97f6f3d3c5b 100644
--- a/arch/powerpc/platforms/fsl_uli1575.c
+++ b/arch/powerpc/platforms/fsl_uli1575.c
@@ -321,8 +321,7 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
 {
 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
 	struct device_node *hosenode = hose ? hose->dn : NULL;
-	struct of_irq oirq;
-	int virq, pin = 2;
+	struct of_phandle_args oirq;
 	u32 laddr[3];
 
 	if (!machine_is(mpc86xx_hpcd))
@@ -331,12 +330,13 @@ static void hpcd_final_uli5288(struct pci_dev *dev)
 	if (!hosenode)
 		return;
 
+	oirq.np = hosenode;
+	oirq.args[0] = 2;
+	oirq.args_count = 1;
 	laddr[0] = (hose->first_busno << 16) | (PCI_DEVFN(31, 0) << 8);
 	laddr[1] = laddr[2] = 0;
-	of_irq_map_raw(hosenode, &pin, 1, laddr, &oirq);
-	virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-				     oirq.size);
-	dev->irq = virq;
+	of_irq_parse_raw(laddr, &oirq);
+	dev->irq = irq_create_of_mapping(&oirq);
 }
 
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x1575, hpcd_quirk_uli1575);
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 31036b56670e..4c24bf60d39d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -393,8 +393,8 @@ static void __init pmac_pic_probe_oldstyle(void)
 #endif
 }
 
-int of_irq_map_oldworld(struct device_node *device, int index,
-			struct of_irq *out_irq)
+int of_irq_parse_oldworld(struct device_node *device, int index,
+			struct of_phandle_args *out_irq)
 {
 	const u32 *ints = NULL;
 	int intlen;
@@ -422,9 +422,9 @@ int of_irq_map_oldworld(struct device_node *device, int index,
 	if (index >= intlen)
 		return -EINVAL;
 
-	out_irq->controller = NULL;
-	out_irq->specifier[0] = ints[index];
-	out_irq->size = 1;
+	out_irq->np = NULL;
+	out_irq->args[0] = ints[index];
+	out_irq->args_count = 1;
 
 	return 0;
 }
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 2605c310166a..18380e8f6dfe 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -25,7 +25,7 @@ void request_event_sources_irqs(struct device_node *np,
 				const char *name)
 {
 	int i, index, count = 0;
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	const u32 *opicprop;
 	unsigned int opicplen;
 	unsigned int virqs[16];
@@ -55,13 +55,11 @@ void request_event_sources_irqs(struct device_node *np,
 	/* Else use normal interrupt tree parsing */
 	else {
 		/* First try to do a proper OF tree parsing */
-		for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
+		for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
 		     index++) {
 			if (count > 15)
 				break;
-			virqs[count] = irq_create_of_mapping(oirq.controller,
-							    oirq.specifier,
-							    oirq.size);
+			virqs[count] = irq_create_of_mapping(&oirq);
 			if (virqs[count] == NO_IRQ) {
 				pr_err("event-sources: Unable to allocate "
 				       "interrupt number for %s\n",
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index bbf342c88314..7dc39f35a4cc 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -35,7 +35,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
 	const struct irq_domain_ops *ops = mpic->irqhost->ops;
 	struct device_node *np;
 	int flags, index, i;
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 
 	pr_debug("mpic: found U3, guessing msi allocator setup\n");
 
@@ -63,9 +63,9 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
 		pr_debug("mpic: mapping hwirqs for %s\n", np->full_name);
 
 		index = 0;
-		while (of_irq_map_one(np, index++, &oirq) == 0) {
-			ops->xlate(mpic->irqhost, NULL, oirq.specifier,
-						oirq.size, &hwirq, &flags);
+		while (of_irq_parse_one(np, index++, &oirq) == 0) {
+			ops->xlate(mpic->irqhost, NULL, oirq.args,
+						oirq.args_count, &hwirq, &flags);
 			msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, hwirq);
 		}
 	}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index d8d6eeca56b0..27c73fd6819d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -431,7 +431,6 @@ menuconfig PCI
 	bool "PCI support"
 	default n
 	depends on 64BIT
-	select ARCH_SUPPORTS_MSI
 	select PCI_MSI
 	help
 	  Enable PCI support.
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6c1801235db9..8641e8dfedf9 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -21,10 +21,6 @@ void pci_iounmap(struct pci_dev *, void __iomem *);
 int pci_domain_nr(struct pci_bus *);
 int pci_proc_domain(struct pci_bus *);
 
-/* MSI arch hooks */
-#define arch_setup_msi_irqs	arch_setup_msi_irqs
-#define arch_teardown_msi_irqs	arch_teardown_msi_irqs
-
 #define ZPCI_BUS_NR			0	/* default bus number */
 #define ZPCI_DEVFN			0	/* default device number */
 
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 2668b3142fa2..aeecf2921132 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -52,7 +52,6 @@ config SPARC32
 
 config SPARC64
 	def_bool 64BIT
-	select ARCH_SUPPORTS_MSI
 	select HAVE_FUNCTION_TRACER
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_FUNCTION_GRAPH_FP_TEST
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 3aa37669ff8c..d2d519ca8ef9 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -379,7 +379,6 @@ config PCI
 	select PCI_DOMAINS
 	select GENERIC_PCI_IOMAP
 	select TILE_GXIO_TRIO if TILEGX
-	select ARCH_SUPPORTS_MSI if TILEGX
 	select PCI_MSI if TILEGX
 	---help---
 	  Enable PCI root complex support, so PCIe endpoint devices can
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fe120da25625..814c7f3ed056 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1999,7 +1999,6 @@ menu "Bus options (PCI etc.)"
 config PCI
 	bool "PCI support"
 	default y
-	select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
 	---help---
 	  Find out whether you have a PCI motherboard. PCI is the name of a
 	  bus system, i.e. the way the CPU talks to the other stuff inside
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index d9e9e6c7ed32..7d7443283a9d 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -100,29 +100,6 @@ static inline void early_quirks(void) { }
 extern void pci_iommu_alloc(void);
 
 #ifdef CONFIG_PCI_MSI
-/* MSI arch specific hooks */
-static inline int x86_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
-	return x86_msi.setup_msi_irqs(dev, nvec, type);
-}
-
-static inline void x86_teardown_msi_irqs(struct pci_dev *dev)
-{
-	x86_msi.teardown_msi_irqs(dev);
-}
-
-static inline void x86_teardown_msi_irq(unsigned int irq)
-{
-	x86_msi.teardown_msi_irq(irq);
-}
-static inline void x86_restore_msi_irqs(struct pci_dev *dev, int irq)
-{
-	x86_msi.restore_msi_irqs(dev, irq);
-}
-#define arch_setup_msi_irqs x86_setup_msi_irqs
-#define arch_teardown_msi_irqs x86_teardown_msi_irqs
-#define arch_teardown_msi_irq x86_teardown_msi_irq
-#define arch_restore_msi_irqs x86_restore_msi_irqs
 /* implemented in arch/x86/kernel/apic/io_apic. */
 struct msi_desc;
 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
@@ -130,16 +107,9 @@ void native_teardown_msi_irq(unsigned int irq);
 void native_restore_msi_irqs(struct pci_dev *dev, int irq);
 int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
 		  unsigned int irq_base, unsigned int irq_offset);
-/* default to the implementation in drivers/lib/msi.c */
-#define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
-#define HAVE_DEFAULT_MSI_RESTORE_IRQS
-void default_teardown_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev, int irq);
 #else
 #define native_setup_msi_irqs		NULL
 #define native_teardown_msi_irq		NULL
-#define default_teardown_msi_irqs	NULL
-#define default_restore_msi_irqs	NULL
 #endif
 
 #define PCI_DMA_BUS_IS_PHYS (dma_ops->is_phys)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index b1581527a236..0171604becb5 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -106,7 +106,7 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
 
 static int x86_of_pci_irq_enable(struct pci_dev *dev)
 {
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 	u32 virq;
 	int ret;
 	u8 pin;
@@ -117,12 +117,11 @@ static int x86_of_pci_irq_enable(struct pci_dev *dev)
 	if (!pin)
 		return 0;
 
-	ret = of_irq_map_pci(dev, &oirq);
+	ret = of_irq_parse_pci(dev, &oirq);
 	if (ret)
 		return ret;
 
-	virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
-			oirq.size);
+	virq = irq_create_of_mapping(&oirq);
 	if (virq == 0)
 		return -EINVAL;
 	dev->irq = virq;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5587f991d111 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -107,6 +107,8 @@ struct x86_platform_ops x86_platform = {
 };
 
 EXPORT_SYMBOL_GPL(x86_platform);
+
+#if defined(CONFIG_PCI_MSI)
 struct x86_msi_ops x86_msi = {
 	.setup_msi_irqs		= native_setup_msi_irqs,
 	.compose_msi_msg	= native_compose_msi_msg,
@@ -116,6 +118,28 @@ struct x86_msi_ops x86_msi = {
 	.setup_hpet_msi		= default_setup_hpet_msi,
 };
 
+/* MSI arch specific hooks */
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+	return x86_msi.setup_msi_irqs(dev, nvec, type);
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+	x86_msi.teardown_msi_irqs(dev);
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+	x86_msi.teardown_msi_irq(irq);
+}
+
+void arch_restore_msi_irqs(struct pci_dev *dev, int irq)
+{
+	x86_msi.restore_msi_irqs(dev, irq);
+}
+#endif
+
 struct x86_io_apic_ops x86_io_apic_ops = {
 	.init			= native_io_apic_init_mappings,
 	.read			= native_io_apic_read,
diff --git a/block/partitions/mac.c b/block/partitions/mac.c
index 76d8ba6379a9..4824af990f90 100644
--- a/block/partitions/mac.c
+++ b/block/partitions/mac.c
@@ -32,7 +32,9 @@ int mac_partition(struct parsed_partitions *state)
 	Sector sect;
 	unsigned char *data;
 	int slot, blocks_in_map;
-	unsigned secsize;
+	//For ITR HFS+J > 2TB Filesystems usb device mount is read-only
+	//unsigned secsize;
+	sector_t secsize;
 #ifdef CONFIG_PPC_PMAC
 	int found_root = 0;
 	int found_root_goodness = 0;
diff --git a/crypto/Kconfig b/crypto/Kconfig
index bf8148e74e73..df93b8266090 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1397,5 +1397,6 @@ config CRYPTO_USER_API_SKCIPHER
 
 source "drivers/crypto/Kconfig"
 source crypto/asymmetric_keys/Kconfig
+source "crypto/ocf/Kconfig"
 
 endif	# if CRYPTO
diff --git a/crypto/Makefile b/crypto/Makefile
index a8e9b0fefbe9..5a39dcfaadf3 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -95,6 +95,8 @@ obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
 obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
 obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
 
+obj-$(CONFIG_OCF_OCF) += ocf/
+
 #
 # generic algorithms and the async_tx api
 #
diff --git a/crypto/ocf/ChangeLog b/crypto/ocf/ChangeLog
new file mode 100644
index 000000000000..b477010d00d8
--- /dev/null
+++ b/crypto/ocf/ChangeLog
@@ -0,0 +1,1957 @@
+2008-09-18 01:27  davidm
+
+	* Makefile, README, README.sglinux,
+	patches/linux-2.6.26-natt.patch, patches/linux-2.6.26-ocf.patch,
+	patches/openssl-0.9.8g.patch, patches/openssl-0.9.8i.patch:
+
+	Updates for a new OCF release with openssl-0.9.8i and linux-2.6.26
+	support.
+
+2008-09-18 00:19  davidm
+
+	* Config.in, Kconfig, Makefile, ep80579/Makefile,
+	ep80579/environment.mk, ep80579/icp_asym.c, ep80579/icp_common.c,
+	ep80579/icp_ocf.h, ep80579/icp_sym.c,
+	ep80579/linux_2.6_kernel_space.mk:
+
+	A new driver from Intel for their Intel QuickAssist enabled EP80579
+	Integrated Processor Product Line.
+
+	Adrian Hoban  Brad Vrabete
+
+
+2008-07-25 01:01  gerg
+
+	* ocf-compat.h:
+
+	From linux-2.6.26 onwards there is now a linux/fdtable.h include
+	that contains the file_fdtable() definition.
+
+2008-07-05 01:20  davidm
+
+	* Makefile, patches/linux-2.6.25-natt.patch,
+	patches/linux-2.6.25-ocf.patch:
+
+	A new ocf-linux release and some patches to send to the OS guys.
+
+2008-07-03 21:21  davidm
+
+	* crypto.c:
+
+	Clean up a some possible deadlock/busy wait issues with
+	locking/sleeping.  This has greatly improved openswan 2.6.14
+	reliability ;-)
+
+	Make more of our state available in /sys for debugging.
+
+2008-06-23 20:38  davidm
+
+	* Config.in, Kconfig:
+
+	Some settings should be bool's John Gumb
+
+2008-05-28 03:43  philipc
+
+	* ixp4xx/ixp4xx.c:  Revert previous checkin since the
+	callbacks are meant to complete the crypto request.  Before
+	reverting this, CONFIG_DEBUG_SLAB gives: slab error in
+	verify_redzone_free(): cache `ixp4xx_q': double free detected when
+	trying to ssh to the device.
+
+2008-04-27 19:31  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	We were not completing a crypto request under some error
+	conditions.
+
+2008-04-02 01:51  davidm
+
+	* talitos/talitos.c:
+
+	This patch is for crypto/ocf/talitos for use on linux 2.6.23.  It
+	is applied to the ocf-linux-20071215 release.
+
+	Signed-off-by: Lee Nipper
+
+2008-02-29 00:43  davidm
+
+	* crypto.c, ocf-compat.h, ixp4xx/ixp4xx.c:
+
+	freshen  up the 2.4 support,  some recent OCF changes and openswan
+	changes are not that old-os friendly.
+
+	Force OCF to select HW/SW,  otherwise it may get stuck on the first
+	device.  This change means we will favour HW over SW,  but we will
+	use both as required.  Passing in a crid of 0 to crypto_newsession
+	effectively meant we were stuck on the first device registered with
+	OCF,  not good.  This only applied to ipsec,  cryptodev already did
+	the right thing.
+
+2008-01-31 07:37  gerg
+
+	* hifn/hifn7751.c:
+
+	The linux-2.6.24 modules build fails if the pci ID table doesn't
+	have a NULL entry at the end. So add one.
+
+2008-01-29 09:16  gerg
+
+	* cryptosoft.c, ocf-compat.h:
+
+	Added some compatability macros for scatterlist changes from 2.6.24
+	onwards.
+
+2007-12-16 07:31  davidm
+
+	* Makefile:
+
+	missed an openssl patch name change
+
+2007-12-16 07:27  davidm
+
+	* Makefile, README, README.sglinux, patches/linux-2.6.23-ocf.patch,
+	patches/openssl-0.9.8e.patch, patches/openssl-0.9.8g.patch,
+	patches/ssl.patch:
+
+	updates for a new ocf release and associated bits
+
+2007-12-16 06:36  davidm
+
+	* crypto.c:
+
+	Be very careful what you do while potentially in an driver unload
+	state or we will call through NULL pointers.
+
+	Reported by Nawang Chhetan .
+
+2007-12-14 22:32  davidm
+
+	* cryptodev.c:
+
+	Add in an unlock_ioctl when available to help SMP systems a lot.
+	Otherwise all ioctls get a BKL :-(
+
+	Problem found by Egor N. Martovetsky
+
+2007-12-14 18:29  davidm
+
+	* cryptosoft.c:
+
+	reformat the alg table to make it easier to read.
+
+2007-12-14 18:29  davidm
+
+	* crypto.c:
+
+	Fix more driver locking/sleeping bugs report by Nawang Chhetan
+
+
+2007-12-12 21:36  davidm
+
+	* Config.in, Kconfig, Makefile, pasemi/Makefile, pasemi/pasemi.c,
+	pasemi/pasemi_fnu.h:
+
+	Here I'm including my PA Semi driver patch to OCF.  Please consider
+	it for inclusion into next OCF release.
+
+	Egor N. Martovetsky
+
+2007-12-05 00:37  davidm
+
+	* patches/: linux-2.4.35-ocf.patch, linux-2.6.22-ocf.patch:
+
+	More correct count setting if we get a signal
+
+	Adrian Hoban
+
+2007-12-05 00:02  davidm
+
+	* random.c:
+
+	OCF has a static array for holding random data. The random number
+	generator I have can write directly into physically contiguous
+	memory. Static memory comes from the heap and isn't physically
+	contiguous. I could use kmalloc'd memory and then copy into the OCF
+	static buf but I'd like to avoid a memory copy. The following patch
+	(Physically_Contig_Rand.patch) allows me to avoid a memory copy and
+	should not impact the other OCF drivers:
+
+	Adrian Hoban
+
+2007-12-05 00:01  davidm
+
+	* Kconfig:
+
+	Fix a typo in the Kconfig
+
+2007-11-23 19:15  davidm
+
+	* talitos/talitos_dev.h:
+
+	fix the DPRINTF macro so that it actually compiles.
+
+2007-11-22 19:41  davidm
+
+	* ocf-compat.h, talitos/talitos.c, talitos/talitos_dev.h:
+
+	Various updates to get talitos compiling and work on real-world (ie
+	ubuntu) kernels.
+
+2007-11-08 02:21  davidm
+
+	* crypto.c:
+
+	keep and eye on us being completely blocked.  If we have Q's to
+	process, but all the requests are blocked,  sleep.  We do not want
+	to busy loop until a driver unblocks as it uses valuable CPU
+	resources that could be doing something much more important ;-)
+
+2007-11-07 19:04  davidm
+
+	* hifn/hifn7751.c:
+
+	hifn driver was failing to unblock itself under some "out of
+	resources" conditions.	It would return ERESTART to signal it was
+	full but never call crypto_unblock to start things moving again.
+
+2007-11-06 02:09  davidm
+
+	* hifn/hifn7751.c:
+
+	Remove some bogus trace left in the driver for the overflow (too
+	busy) case.
+
+2007-10-12 21:10  gerg
+
+	* crypto.c, ixp4xx/ixp4xx.c:
+
+	Fix up use of kmem_cache_create() - it takes one less argument in
+	2.6.23 onwards.
+
+2007-10-03 02:41  gerg
+
+	* ixp4xx/Makefile:
+
+	The directory locations for includes in CSR-2.4 is different.  Need
+	to modify the CFLAGS accordingly if using CSR-2.4.
+
+2007-09-22 00:39  philipc
+
+	* ixp4xx/Makefile:  linux 2.4 make dep was failing.  This is
+	a quick fix to get it building, need to double check this.
+
+2007-09-19 00:13  mmccreat
+
+	* Config.in:  Add config option CONFIG_OCF_IXP4XX_SHA1_MD5,
+	that enables SHA1 and MD5 hashing to be done by the IXP4xx crypto
+	accelerator (although it is much slower than using cryptosoft).
+
+2007-09-18 21:45  mmccreat
+
+	* Makefile, random.c:  - Force the inclusion of autoconf.h,
+	which contains #defines for CONFIG_xxx	 options for OCF.  -
+	Removing additional -D option, now that we are including the
+	CONFIG_xxx   #defines.
+
+2007-09-18 21:44  mmccreat
+
+	* Kconfig:  Add config option CONFIG_OCF_IXP4XX_SHA1_MD5,
+	that enables SHA1 and MD5 hashing to be done by the IXP4xx crypto
+	accelerator (although it is much slower than using cryptosoft).
+
+2007-09-18 21:37  mmccreat
+
+	* cryptodev.h:  The CRYPTO_MAX_DATA_LEN limit should be
+	0xFFFF ie 64K - 1.
+
+2007-09-18 21:19  mmccreat
+
+	* ixp4xx/ixp4xx.c:  - Rework the code so that the correct IXP
+	function, ixCryptoAccHashPerform(), is	 used to calculate SHA1 and
+	MD5 hashes.    NB: The performance of using the IXP4xx hardware is
+	really, really poor    compared to using cryptosoft (and the kernel
+	crypto).  - Only support SHA1 and MD5 hashing if the
+	CONFIG_OCF_IXP4XX_SHA1_MD5 is	enabled.
+
+2007-08-30 21:42  davidm
+
+	* Makefile:
+
+	do not archive build files in the crypto-tools archive
+
+2007-08-22 19:19  mmccreat
+
+	* cryptodev.c:  Fix up the checking for key lengths, when the
+	key can be of unlimited size.
+
+2007-08-16 01:50  davidm
+
+	* Makefile:
+
+	Better 2.4 compat for "make dep" now working with fastdep.
+
+2007-07-28 08:25  davidm
+
+	* Makefile, README, README.sglinux, patches/crypto-tools.patch,
+	patches/linux-2.4.29-ocf.patch, patches/linux-2.4.35-ocf.patch,
+	patches/linux-2.6.11-ocf.patch, patches/linux-2.6.22-ocf.patch,
+	patches/ssl.patch:
+
+	Update all the patches and put the patch making target back into
+	the Makefile.
+
+2007-07-28 08:25  davidm
+
+	* hifn/hifn7751.c:
+
+	fix an unused variable warning when HARVESTING is disabled
+
+2007-07-27 21:33  davidm
+
+	* hifn/hifn7751.c, ixp4xx/ixp4xx.c, safe/safe.c, talitos/talitos.c:
+
+
+	Remove all the random code if OCF does not have radom harvesting
+	enabled.
+
+2007-07-26 00:36  davidm
+
+	* Kconfig, hifn/hifnHIPP.c, hifn/hifnHIPPvar.h:
+
+	Changes to get the hifn HIPP stub driver to build.
+
+2007-07-25 21:25  davidm
+
+	* Makefile, hifn/Makefile, hifn/hifnHIPP.c, hifn/hifnHIPPreg.h,
+	hifn/hifnHIPPvar.h, ixp4xx/Makefile, ocfnull/Makefile,
+	safe/Makefile, talitos/Makefile:
+
+	Bring in the hifnHIPP driver written by Xelerance.  This is the
+	super hifn chip with full protocol offload.
+
+	Switch to much more traditional Makefile/subdir building.  The
+	Makefiles are nicer now,  but still not beautiful,  2.6 and 2.4
+	capable builds result in a certain amount of uglyiness.
+
+2007-07-24 21:46  davidm
+
+	* cryptodev.c:
+
+	Clean up all the driver id checking and session management so
+	adding/removing drivers all continues to run cleanly.
+
+2007-07-24 20:14  davidm
+
+	* talitos/talitos.c:
+
+	From: Ahsan Kabir
+
+	less than .1% packet corruption was detected using the talitos
+	driver. It turns out we don't need the cipher iv out len/ptr field
+	to do ESP IPsec. Therefore we set the len field as 0, which tells
+	the SEC not to do anything with this len/ptr field.
+
+	Signed-off-by: Ahsan Kabir
+	Signed-off-by: Kim Phillips
+
+2007-07-24 08:25  davidm
+
+	* cryptosoft.c, ocf-bench.c, hifn/hifn7751.c, ixp4xx/ixp4xx.c,
+	safe/safe.c, talitos/talitos.c:
+
+	Switch the remaining GFP_ATOMIC to the newer and not deprecated
+	SLAB_ATOMIC
+
+2007-07-23 22:16  mmccreat
+
+	* ixp4xx/ixp4xx.c:  Add missing ";" from end of dprintk()
+	call!
+
+2007-07-21 01:16  davidm
+
+	* ocf-compat.h, rndtest.c, hifn/hifn7751.c, safe/safe.c:
+
+	pci_register_driver is nothing like it is depending on the kernel,
+	so we need a compat function to fix it up for all kernels before
+	2.6.10, and in different ways for older and not so older versions.
+
+2007-07-20 21:54  davidm
+
+	* safe/safe.c:
+
+	Make the debug macro ';' safe so you do not get compiler warnings
+
+2007-07-20 21:53  davidm
+
+	* talitos/: talitos.c, talitos_dev.h, talitos_soft.h:
+
+	update to the latest FreeBSD driver structure and fix up the code
+	as required.
+
+2007-07-20 03:07  davidm
+
+	* rndtest.c:
+
+	more headers needed to compile on 2.4
+
+2007-07-20 03:00  davidm
+
+	* cryptosoft.c:
+
+	Put in the 2.4 stubs to support compression
+
+2007-07-20 02:53  davidm
+
+	* crypto.c, ocf-compat.h:
+
+	move some more compat stuff into the compat header.
+
+2007-07-20 02:47  davidm
+
+	* talitos/talitos.c:
+
+	support of_platform_driver for newer, ARCH=powerpc based kernels.
+	Signed-off-by: Kim Phillips
+
+2007-07-20 02:46  davidm
+
+	* talitos/talitos.c:
+
+	From: Ahsan Kabir
+
+	When Talitos completes job both the channel and execution unit are
+	capable of generating interrupts.  Talitos used to take two
+	interrupts per request - one for channel completion and the other
+	for execution unit completion. This patch ensures that Talitos
+	takes interrupt only for channel completion. Execution unit will
+	generate interrupt only when there is error and the error
+	interrupts for execution units are not masked.
+
+	Signed-off-by: Ahsan Kabir
+	Signed-off-by: Kim Phillips
+
+2007-07-20 02:37  davidm
+
+	* cryptodev.c:
+
+	Unless the user specifies,  select from both HW and SW.
+
+	Clean up some debug to report the actual ioctl name.
+
+	Compiler warning on newer compilers.
+
+2007-07-20 02:35  davidm
+
+	* cryptodev.h:
+
+	moved dprintk to the compat code,  seemed nicer in there.
+
+2007-07-20 02:35  davidm
+
+	* rndtest.c:
+
+	Need to clean up some warnings etc,  more includes
+
+2007-07-20 02:34  davidm
+
+	* ocf-compat.h, hifn/hifn7751.c, safe/safe.c:
+
+	new shared IRQ flags for 2.6.22 and a safer version of the debug
+	macro
+
+2007-07-20 00:52  davidm
+
+	* cryptosoft.c:
+
+	Implement compression based on the code from the openswan guys.
+
+2007-07-20 00:52  davidm
+
+	* criov.c:
+
+	Fix compiler warning on non-value returning void func.
+
+2007-07-18 22:55  davidm
+
+	* hifn/hifn7751.c, safe/safe.c:
+
+	Use pci_register_driver rather than pci_module_init.
+	pci_module_init has been dropped in 2.6.22 yet pci_register_driver
+	has always existed and used to do some crazy hotplug junk.
+
+2007-07-18 21:55  gerg
+
+	* ixp4xx/ixp4xx.c:
+
+	Added a missing ";" at the end of the ixp_kproces() prototype.	It
+	is IXP465 specific, only showed up when generateing for SG720.
+
+2007-07-17 00:37  davidm
+
+	* Makefile, cryptodev.c, random.c, rndtest.c, hifn/hifn7751.c,
+	ocfnull/ocfnull.c, safe/safe.c:
+
+	Fixup all the debug support for 2.4 kernels,  clean up the entropy
+	harvester to be far more robust.
+
+2007-07-14 02:19  davidm
+
+	* talitos/talitos.c:
+
+	Old patch that had not been applied Kim Phillips
+
+
+2007-07-14 01:12  davidm
+
+	* Config.in, Kconfig, Makefile, cryptodev.c, cryptodev.h, random.c,
+	syscall.h:
+
+	Finally ditched all the syscall stuff.	You can now enable/disable
+	the random harvestor.  Pulled in most of random.c from openswan
+	project and fixed some obvious bugs (that were always there).
+
+2007-07-13 21:59  davidm
+
+	* ocf-compat.h:
+
+	Better error printing and checking for drivers
+
+2007-07-13 21:56  davidm
+
+	* cryptosoft.c:
+
+	Fix some incorrect debug (reporting wrong error type)
+
+2007-07-13 21:55  davidm
+
+	* hifn/hifn7751.c, safe/safe.c:
+
+	Make the code more similar to Free-BSD by reverting to the same
+	debug macros
+
+2007-07-13 21:53  davidm
+
+	* ocfnull/ocfnull.c:
+
+	Fix up the null driver to work again in the new framework.
+
+2007-07-06 23:54  mmccreat
+
+	* cryptodev.c, cryptodev.h, cryptosoft.c:  - Update OCF to
+	work with new Crypto API introduced in 2.6.19 kerneli, and add
+	macros so it work with older kernels.  - Add support for SHA256,
+	SHA384 and SHA512 HASH and HMAC algorithms.  - Cryptosoft: Only
+	register algorithms that the kernel has implementations for.
+
+2007-07-03 19:52  davidm
+
+	* Kconfig, README, README.sglinux, criov.c, crypto.c, cryptodev.c,
+	cryptodev.h, cryptosoft.c, ocf-bench.c, ocf-compat.h, random.c,
+	rndtest.c, uio.h, hifn/hifn7751.c, hifn/hifn7751reg.h,
+	hifn/hifn7751var.h, ixp4xx/ixp4xx.c, ocfnull/ocfnull.c,
+	safe/safe.c, safe/safevar.h, talitos/talitos.c:
+
+	Updated OCF to the lastest FreeBSD version.
+
+	There was a lot of change in here,  some of which will help FIP's,
+	some which won't.
+
+	Did lots of cleaning and diff reduction against the freebsd code.
+	Still more cleaning to do.
+
+2007-06-01 21:58  gerg
+
+	* Config.in:
+
+	Put the regular old Config.in back, needed for puclic releases.
+
+2007-06-01 21:58  gerg
+
+	* Kconfig:
+
+	Change the CONFIG_OCF_IXP400 dependencies to be the same as they
+	where in the Config.in file.
+
+2007-05-28 21:40  gerg
+
+	* Config.in, Kconfig:
+
+	Switch all module configuration over to new style Kconfigs.
+
+2007-05-24 18:49  davidm
+
+	* cryptodev.c, random.c:
+
+	Work around some problems on redhat systems with errno redefinition
+
+2007-04-30 21:09  gerg
+
+	* cryptosoft.c:
+
+	The CRYPTO_TFM_MODE_ family of defines no longer exists from 2.6.21
+	onwards. As far as I can tell you don't need to pass it to the
+	crypto_alloc_tfm() function anymore.
+
+	So define it to be 0 if it doesn't exist.
+
+2007-04-03 02:13  gerg
+
+	* syscall.h:
+
+	Added syscall macros for SH architecture. Just temporary, 'till
+	Dave fixes the OCF code to not use syscalls from the modules :-)
+
+2007-02-16 23:10  davidm
+
+	* syscall.h:
+
+	ensure the temprary 2.6 fix doesn't break 2.4
+
+2007-02-07 22:23  gerg
+
+	* cryptodev.c, random.c, syscall.h:
+
+	Temporary fix for new 2.6 kernels no longer defining in-kernel
+	system call functions. Define them locally for now until we fix
+	properly.
+
+2007-02-07 03:10  gerg
+
+	* ixp4xx/ixp4xx.c:
+
+	Changes to support the different INIT_WORK() mechanism from kernels
+	2.6.20 onwards.
+
+2007-02-06 02:38  gerg
+
+	* crypto.c:
+
+	Cleaned up use of kmem_cache_t and use of SLAB_ATOMIC.
+
+2006-12-05 20:50  cpascoe
+
+	* hifn/hifn7751.c, safe/safe.c, talitos/talitos.c:
+
+	Remove pt_regs from OCF interrupt handlers for 2.6.19+
+
+2006-12-02 03:36  gerg
+
+	* criov.c, crypto.c, cryptodev.c, cryptosoft.c, ocf-bench.c,
+	random.c, rndtest.c, hifn/hifn7751.c, ixp4xx/ixp4xx.c,
+	ocfnull/ocfnull.c, safe/safe.c, talitos/talitos.c:
+
+	Can no longer include linux/config.h as of 2.6.19 kernels.  Need to
+	conditionally include it based on AUTOCONF_INCLUDED not being
+	defined.
+
+2006-10-13 21:52  cpascoe
+
+	* random.c:
+
+	Remove another race condition that may result in us running more
+	than one random thread if modules are reloaded during heavy system
+	load.
+
+2006-10-13 21:18  cpascoe
+
+	* random.c:
+
+	- Permit delivery of SIGKILL to the random thread.  - Do not exit
+	prematurely if poll() is interrupted.  - Improve exit conditions so
+	that we quit immediately, rather than loop   infinitely, if the
+	last RNG source is removed while we are trying to   fill our
+	buffer.
+
+2006-10-03 20:28  cpascoe
+
+	* crypto.c, crypto.c:  AutoMerged
+	>
+	> Zero the empty half of "new driver" buffer, and not past the end
+	of the old
+	> buffer that we are about to free.
+
+2006-10-03 20:28  cpascoe
+
+	* crypto.c:
+
+	Zero the empty half of "new driver" buffer, and not past the end of
+	the old buffer that we are about to free.
+
+2006-08-25 23:57  davidm
+
+	* cryptosoft.c:
+
+	Do not print errors for failed ALG requests unless debugging
+
+2006-07-14 21:44  davidm
+
+	* cryptodev.h:
+
+	2.6.11 and earlier did not have the files_fdtable macro.
+
+2006-06-21 21:26  gerg
+
+	* cryptodev.h, hifn/hifn7751.c, ocfnull/ocfnull.c, safe/safe.c,
+	talitos/talitos.c:
+
+	Fixed up more occurrances of MODULE_PARM() needing to be converted
+	to module_param() (as of 2.6.17 and onwards).
+
+2006-06-21 00:28  gerg
+
+	* cryptosoft.c:
+
+	Change use of MODULE_PARM to module_param() for 2.6.17+ kernels.
+
+2006-06-20 22:13  gerg
+
+	* crypto.c, cryptodev.c, cryptodev.h, ocf-bench.c, ixp4xx/ixp4xx.c:
+
+
+	As of 2.6.17 and onwards module_param is used in place of
+	MODULE_PARM.
+
+2006-06-06 00:31  gerg
+
+	* Makefile:
+
+	Fix Makefile to find includes is using CSR-2.1.
+
+2006-05-31 01:44  gerg
+
+	* cryptodev.h:
+
+	The vars "crypto_usercrypto", "crypto_userasymcrypto" are declared
+	as extern in the header file, but static in the c file. I guessed
+	that they should probably be truely static, so removed the extern
+	declarations from the header.
+
+2006-05-25 21:06  davidm
+
+	* talitos/talitos.c:
+
+	This fixes a situation that I never provably experienced, where a
+	descriptor in memory may be reserved within the proper lock, and
+	freed immediately after, only for a few cycles, right outside the
+	lock.  Kim Phillips
+
+2006-05-15 19:49  davidm
+
+	* criov.c, crypto.c, cryptodev.c, cryptodev.h, cryptosoft.c,
+	ocf-bench.c, random.c, rndtest.c, uio.h, ixp4xx/ixp4xx.c:
+
+	Remove the "all rights reserved" from the Intel copyrights.
+
+2006-05-12 21:19  davidm
+
+	* Config.in, Kconfig, Makefile, cryptodev.c, ocf-bench.c,
+	ocfnull/ocfnull.c:
+
+	Add in a null OCF driver that does nothing at all,  useful for
+	measuring the cost of various parts of the ipsec stack.
+
+2006-05-12 21:17  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	make sure we do not overwrite a correctly set error type.
+
+2006-05-12 06:52  davidm
+
+	* crypto.c:
+
+	Fix a problem where a driver would return ERESTART (full) but then
+	unblock itself before the upper layer had marked it as blocked.
+	This caused the code to get stuck in crypto_proc and process no
+	more requests.
+
+2006-05-12 06:47  davidm
+
+	* cryptosoft.c:
+
+	Implement CRD_F_KEY_EXPLICIT for cryptosoft so keys can be changed
+	on an active session.
+
+2006-05-10 20:09  davidm
+
+	* README, criov.c, crypto.c, cryptodev.h, cryptosoft.c,
+	ocf-bench.c, random.c, rndtest.c, uio.h, hifn/hifn7751.c,
+	ixp4xx/ixp4xx.c, safe/safe.c, safe/safevar.h:
+
+	update email addresses and other house cleaning
+
+2006-05-10 20:08  davidm
+
+	* cryptodev.c:
+
+	pull in better error checking from openswan modifications
+
+2006-05-10 19:11  davidm
+
+	* cryptosoft.c:
+
+	Fix an unused variable warning when various options are disabled.
+
+2006-05-10 19:10  davidm
+
+	* cryptodev.h:
+
+	Add support for 2.4 kernels for the new FD cloning operation
+
+2006-05-09 19:48  davidm
+
+	* hifn/hifn7751.c:
+
+	remove the hifn 7855 support,  this driver will never work with
+	that chip.
+
+2006-05-08 23:34  davidm
+
+	* hifn/hifn7751var.h:
+
+	Contiguous buffer support so that ocf-bench can run properly.
+
+2006-05-05 23:21  davidm
+
+	* hifn/hifn7751.c:
+
+	Add in contiguous buffer support so that ocf-bench and run on the
+	driver.
+
+2006-05-05 23:14  davidm
+
+	* ocf-bench.c:
+
+	Our requests were out of order,  need to do crypto then auth on
+	encrypt requests.  Some drivers enforce this.
+
+2006-05-04 23:21  davidm
+
+	* crypto.c:
+
+	Do not run "newsession" with lock,  since newsession may sleep on
+	some targets.  Handle the accounting so that things are not pulled
+	from underneath us.
+
+2006-05-04 23:20  davidm
+
+	* cryptodev.c:
+
+	Switch to a less optimal (marginally) solution for creating a new
+	fd that appears to work in far more versions of the kernel
+	including 64bit versions.
+
+2006-05-04 18:54  davidm
+
+	* ocf-bench.c:
+
+	Turn off the IXP access lib benchmarking by default as most people
+	don't have it.
+
+	Paul Wouters
+
+2006-04-01 08:23  davidm
+
+	* Makefile:
+
+	Remove more temp files when cleaning
+
+2006-04-01 08:12  davidm
+
+	* hifn/hifn7751reg.h:
+
+	7855 PCI id's as yet untested
+
+2006-04-01 08:08  davidm
+
+	* hifn/hifn7751.c:
+
+	add PCI id's for the 7855 and AES support,  card is untested still
+	as it requires 128MB of PCI memory !
+
+2006-03-31 08:38  davidm
+
+	* README.sglinux:
+
+	small update to instructions with corrected patch name
+
+2006-03-31 00:23  davidm
+
+	* Config.in:
+
+	Add the Talitos driver to the 2.4 config,  even though it probably
+	won't compile.
+
+2006-03-30 07:48  davidm
+
+	* Kconfig, Makefile, talitos/talitos.c, talitos/talitos_dev.h,
+	talitos/talitos_soft.h:
+
+	Please find attached the freescale SEC driver for OCF.	It's been
+	(most recently) tested on an SEC2.0 based MPC8541E
+	(cryptographically identical to the MPC8555E) under 2.6.15.2, with
+	openssl-0.9.8a and openswan-2.4.3 (2.4.5rc5 won't keep the security
+	association up for me for some reason).
+
+	Please feel free to add it to your next release of OCF-Linux :-)
+
+	Kim Phillips
+
+2006-03-20 19:34  davidm
+
+	* safe/: safe.c, safevar.h:
+
+	Safenet 1141 v1.0 chips have a DMA lockup error if you access the
+	chip while DMA'ing.  As a work around you need to limit your DMA to
+	256 byte chunks using scatter/gather descriptors.  All the SG/SME
+	products have v1.0 chips and would lockup with more than two
+	outstanding packets at one time.
+
+	Fix the KASSERT macro
+
+	Add some more exhaustive initialisation.
+
+2006-03-15 21:58  davidm
+
+	* cryptodev.h, random.c:
+
+	Switch random support to "ints" since that is what the kernel uses
+	and not using the same thing is 64bit wise a bad idea.
+
+	Fix FIP's code to ensure correct amount of data is passed in.
+
+	Add work around for broken 64bit OS RNG support (disable it)
+
+	General code cleanups.
+
+2006-03-15 21:55  davidm
+
+	* hifn/hifn7751.c:
+
+	Fixes for 64bit OS's,  make sure PCI address are within bus space,
+	make sure we order writes to the bus so that chip functions
+	correctly.  Some small cleanups.
+
+2006-03-15 21:48  davidm
+
+	* hifn/hifn7751var.h:
+
+	Remove unused field from structure
+
+2006-03-15 21:47  davidm
+
+	* safe/safe.c:
+
+	Make the code more 64bit OS compatible,  force PCI address space
+	and so on.
+
+2006-03-09 20:42  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	If we call ixpCryptoAccInit() and it fails,  just assume that it
+	has already been called.  This allows our "rc" scripts to be
+	openswan and freeswan compatible ore easily.
+
+2006-02-28 23:21  davidm
+
+	* README:
+
+	generalise it a bit so rel-dates don't get in the way
+
+2006-02-28 01:52  davidm
+
+	* README, patches/ssh.patch, patches/ssl.patch:
+
+	Updated the README and patches for a release
+
+2006-02-25 09:21  davidm
+
+	* README, README.sglinux:
+
+	updates for a new release of OCF,  ssl patches and so on.
+
+2006-02-25 08:44  davidm
+
+	* crypto.c:
+
+	We were calling the "process"routines with interrupts disabled.  I
+	can see no good reason for this and it provokes badness warnings in
+	2.6 under some conditions.
+
+	I am going to run with the Q's unlocked for processing,  and
+	hopefully it will allow the system to be more responsive.  It
+	hasn't affected ipsec throughput in any way.  Userland throughput
+	(multi threaded) may have improved significantly though,  but it
+	needs more testing.
+
+2006-02-24 23:32  davidm
+
+	* cryptodev.c:
+
+	Whoa,  set the segments with uninitted values can't be good.  Clean
+	out the rest of the old code that was accidently left in.
+
+2006-02-22 01:02  davidm
+
+	* cryptodev.c:
+
+	Still not sure about this one,	but this is working for all the
+	cases I can see so far.  If it gets weird,  I am going to switch to
+	a simple clone and chain the sessions rather than a new fcr per fd.
+
+2006-02-20 22:12  davidm
+
+	* cryptodev.c:
+
+	Error handling case could free data that was not allocated
+
+	Ronen Shitrit  Marvell Semiconductor Israel
+	Ltd
+
+2006-02-20 21:57  davidm
+
+	* cryptosoft.c:
+
+	Proper SHA/MD5 (non hmac) implementation,  remove some retrictions
+	for hashes (which only applied to crypto) and fix over zealous
+	error checking.
+
+	Ronen Shitrit  Marvell Semiconductor Israel
+	Ltd
+
+2006-02-09 21:15  davidm
+
+	* cryptodev.c:
+
+	The code wasn't quite right and needed some fixing for proper file
+	accounting.
+
+2006-01-24 20:08  davidm
+
+	* crypto.c, cryptodev.c, hifn/hifn7751.c:
+
+	We implement our own open in the CRIOGET ioctl now which does
+	pretty much the same thing as the BSD version,	rather than use an
+	open system call which gets caught out by chroot.
+
+2006-01-06 00:42  gerg
+
+	* cryptosoft.c:
+
+	Moved "{" block marker inside the set of
+	"defined(CONFIG_CRYPTO_HMAC) || defined(CONFIG_CRYPTO_HMAC_MODULE)"
+	code (in function swcr_process) so that it compiled if these where
+	not defined.
+
+2005-11-11 01:44  davidm
+
+	* Makefile, README:
+
+	cleanups for a general release of OCF
+
+2005-11-11 01:18  davidm
+
+	* patches/ssl.patch:
+
+	Updated the ssl patch to the latest and greatest changed we have
+	made.
+
+2005-11-10 21:41  davidm
+
+	* Makefile, ixp4xx/ixp4xx.c:
+
+	Better debug for bad input.
+
+	Have make clean do more cleaning and less talking.
+
+2005-11-03 20:53  davidm
+
+	* cryptosoft.c:
+
+	clean up some compilation errors with various options on/off
+
+2005-10-25 00:25  davidm
+
+	* Makefile:
+
+	Add a patch target that generates full kernel patches to add OCF
+	into either a 2.4 or 2.6 kernel as a single patch.
+
+2005-10-25 00:24  davidm
+
+	* Kconfig:
+
+	Make sure all OCF settings depend on OCF_OCF
+
+2005-09-23 02:45  davidm
+
+	* README, README.sglinux:
+
+	new crypto-tools archive to keep the tools up to date
+
+2005-09-23 02:08  davidm
+
+	* Makefile, README, README.sglinux:
+
+	updates for doing OCF releases
+
+2005-09-23 01:59  davidm
+
+	* patches/: ssh.patch, ssl.patch:
+
+	updated the patches for the latest in fixes etc to ssh/ssl for a
+	new OCF release before the openswan 2.4.1 merge.
+
+2005-09-21 00:57  davidm
+
+	* Makefile, cryptosoft.c, hifn/hifn7751.c, ixp4xx/ixp4xx.c:
+
+	Fixes for building cleanly under 2.6
+
+2005-09-13 23:11  davidm
+
+	* ocf-bench.c:
+
+	Add an exit function for cleaner 2.6 support.  Patch from Ronen
+	Shitrit
+
+2005-07-30 00:23  davidm
+
+	* cryptosoft.c:
+
+	Add proper hooks for MD5/SHA and their HMAC countrparts processing
+	from cryptodev.
+
+2005-07-29 01:50  davidm
+
+	* cryptodev.c:
+
+	cryptodev did not support MD5 and SHA1,  only the HMAC versions
+
+2005-07-29 01:05  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	register for MD5,  return 16 bytes for MD5 and 12 for MD5_HMAC,
+	likewise for SHA1 (only 20 is not HMAC).
+
+2005-07-28 21:52  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	First pass fix of userland MD5 processing.  We now do as well as
+	the safenet does,  I think.
+
+2005-07-27 06:23  davidm
+
+	* cryptodev.c:
+
+	udelay doesn't give up the current thread, thus the kernel will get
+	locked if a process is killed but the hardware never completes the
+	crypto request.
+
+2005-07-22 02:07  davidm
+
+	* crypto.c, cryptodev.h:
+
+	Implement queuing limits for input/output and OCF requests.
+
+	Implement 2.6 style work queues instead of the 2.4 task_queues.
+
+2005-07-21 20:42  davidm
+
+	* cryptodev.h, ocf-bench.c, ixp4xx/ixp4xx.c:
+
+	Fix OCF to use work queues,  add 2.4 version of work queues to
+	cryptodev.h for use in ipsec and elsewhere.
+
+	Problem and initial patch provided by David Vrabel
+	.  Cleaned up and 2.4 support added by me.
+
+2005-07-21 19:08  davidm
+
+	* random.c:
+
+	Fix spinlock initialisation, problem reported by Andy @
+	windsorcarclub.co.uk.
+
+2005-07-20 20:24  davidm
+
+	* cryptodev.c:
+
+	fix a silly spelling mistake
+
+2005-07-08 00:56  gerg
+
+	* Makefile:
+
+	Only build ocf-bench when CONFIG_OCF_BENCH is acrually enabled.
+
+2005-06-27 20:29  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Ok,  we need a simple implentation here or we go too slow for UDP
+	tests.	For now,  if the Q is full just ditch the packet,  someone
+	has to do it.
+
+2005-06-25 01:13  davidm
+
+	* safe/safe.c:
+
+	bytes swapping etc all seems wrong for safenet on BE,  these fixes
+	see both MD5 and SHA1 working with OpenSwan.
+
+2005-06-22 23:10  davidm
+
+	* random.c:
+
+	clean up some ifdef code a little
+
+2005-06-22 21:28  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Make sure we do not call blocking functions from ISR callable
+	routines.  In this case we were calling ixCryptoAccCtxUnregister.
+
+	Run all the random numbers through SHA1 process to ensure more
+	uniform distribution of bits (NOTE:  it is not more random in any
+	way)
+
+2005-06-21 00:11  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	do not process requests from the register callback, gets the AES
+	code all messed up.
+
+	Align caches on HW cache boundaries ot improve speed.
+
+	More tracking or potential errors.
+
+2005-06-15 01:55  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	technically ixCryptoAccCtxRegister cannot be called from IRQ
+	context, so run it from the immediate BH.
+
+2005-06-14 23:13  davidm
+
+	* ocf-bench.c:
+
+	Fix some compile warnings/errors
+
+2005-06-14 20:52  davidm
+
+	* Config.in, Kconfig, Makefile, ocf-bench.c:
+
+	Add in kernel benchmark driver
+
+	Support for building under CSR 1.4 and 2.0 is now complete
+
+2005-06-14 20:51  davidm
+
+	* crypto.c:
+
+	Do not use immediate processing for SW drivers otherwise we hold a
+	lock for too long.  Instead force BATCH processing.
+
+	Problem found by David Vrabel
+
+2005-06-14 20:46  davidm
+
+	* cryptodev.c:
+
+	Fix up AES minimum key size
+
+	Make some more variables static
+
+2005-06-14 20:36  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Use kernel caches as they are more efficient and faster to obtain.
+
+	Fix some spelling
+
+	Tune PKE to only use the space required.  Turn off go fast options
+	to reduce any speed-related cracking.
+
+	Only zero data that needs to be zeroed (save some cycles)
+
+2005-06-02 21:42  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Added optimisations ideas from Intel the improve the PKE
+	performance for 512 and 1024 bits operations.
+
+2005-06-01 02:13  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Looks like I found the ixp bug.  Using OSAL buffer routines on
+	non-OSAL buffers is a very very bad thing to do.  Must double check
+	all the API's I am using (ie., PKE) just to be sure.
+
+2005-05-31 21:38  davidm
+
+	* Config.in, Kconfig:
+
+	Updated/Added the menu wrapper for the config options
+
+2005-05-31 21:18  gerg
+
+	* Config.in:
+
+	Reworked the config.in so that each sub-module has its own
+	Config.in.  That way it is easier to make release trees with some
+	modules left in.
+
+2005-05-30 19:46  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	Not all the Pke code was appropriately ifdef'd
+
+2005-05-28 01:49  davidm
+
+	* cryptosoft.c:
+
+	We were not injecting data in the right places nor calling the
+	cipher code the best was under all situations.	We are now good
+	enought to do ESP/AH processing with 3DES and SHA1-HMAC.
+
+2005-05-28 01:42  davidm
+
+	* hifn/hifn7751.c:
+
+	Non atomic kmallocs at IRQ time are bad and cause lockups
+
+2005-05-21 08:31  davidm
+
+	* README, README.sglinux:
+
+	Some small updates to email and patches that no longer exist
+
+2005-05-21 08:25  davidm
+
+	* cryptosoft.c:
+
+	better error message on failure,  a lot of the error check we do
+	sems to break apps like cryptotest, not sure who is right yet.
+
+2005-05-21 00:55  davidm
+
+	* criov.c, crypto.c, cryptodev.c, cryptodev.h, cryptosoft.c,
+	random.c, rndtest.c, uio.h, hifn/hifn7751.c, ixp4xx/ixp4xx.c,
+	safe/safe.c, safe/safevar.h:
+
+	Convert to CyberGuard email addresses for OCF files.
+
+2005-05-21 00:28  davidm
+
+	* crypto.c, cryptodev.c, cryptosoft.c, hifn/hifn7751.c,
+	ixp4xx/ixp4xx.c, safe/safe.c:
+
+	final round of naming changes,	hifn driver also confirmed working
+	in SG710
+
+2005-05-20 23:50  davidm
+
+	* cryptosoft.c, random.c, uio.h, ixp4xx/ixp4xx.c:
+
+	more copyright/author updates etc for Intel
+
+2005-05-20 23:48  davidm
+
+	* criov.c, crypto.c, cryptodev.c, cryptodev.h, cryptosoft.c,
+	random.c, rndtest.c, uio.h, ixp4xx/ixp4xx.c, safe/safe.c,
+	safe/safevar.h:
+
+	updated copyrights to reflect Intels interest/investment in the
+	project
+
+2005-05-20 20:31  davidm
+
+	* hifn/: hifn7751.c, hifn7751var.h:
+
+	Most of a working SKB implementation now
+
+2005-05-20 20:30  davidm
+
+	* safe/: md5.c, md5.h, safe.c, sha1.c, sha1.h:
+
+	Full AH/ESP processing now working,  just added BSD sha/md5
+	routines as needed for now.
+
+2005-05-20 20:30  davidm
+
+	* random.c:
+
+	use the new BSD rndtest code rather than the old GPL fips code
+
+2005-05-20 20:28  davidm
+
+	* Makefile, fips.c, fips.h, rndtest.c, rndtest.h:
+
+	Get rid of the GPL version of the fips test and use a BSD licensed
+	version so no one can get upset and cry derivative :-)
+
+2005-05-20 08:19  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	High throughput improvements. Can now handle multiple simultaneous
+	requests.
+
+2005-05-20 00:55  davidm
+
+	* ixp4xx/ixp4xx.c:
+
+	works with openswan/OCF for ipsec receive.
+
+	fixes ESP/AUTH processing (esp. HMAC)
+
+	This driver needs a rework for kernel IPSEC as it's current packet
+	at a time processing is not up to the task.
+
+2005-05-20 00:53  davidm
+
+	* cryptosoft.c:
+
+	working ESP/AUTH code now impleneted.  Can do RX processing for
+	openswan KLIPS.
+
+	Fixes numerous problems in skb processing
+
+	Fixes broken HMAC code and IV processing
+
+2005-05-14 01:44  davidm
+
+	* cryptodev.c, ixp4xx/ixp4xx.c:
+
+	Cleanup IXP key processing to guarantee only a single outstanding
+	request rather than relying on the intel driver to get it right.
+	Stops us losing requests.
+
+	Tighten up the cryptodev response to "no answer" so that we don't
+	take all the CPU.  Only happens if there is a driver bug.
+
+2005-05-14 00:07  davidm
+
+	* Config.in, Kconfig, Makefile, crypto.c, fips.c, fips.h, random.c,
+	ixp4xx/ixp4xx.c, tools/bench-ocf:
+
+	PKE and RNG support running on the ixp driver,	added PKE bench
+	marking to script.  Still some multi-thread problems in the PKE
+	code.
+
+	Added FIP RNG checking option to config and code.
+
+2005-05-10 19:18  davidm
+
+	* Makefile, ixp4xx/ixp4xx.c:
+
+	Get the OCF stuff building for the 465 and CSR-2.0
+
+2005-04-27 19:18  davidm
+
+	* cryptodev.h, random.c, hifn/hifn7751.c, safe/safe.c:
+	cleanup the random number interface some more
+
+2005-04-27 00:57  davidm
+
+	* cryptodev.h, cryptosoft.c, random.c, hifn/hifn7751.c,
+	ixp4xx/ixp4xx.c, patches/linux-2.4.29-add_true_randomness.patch,
+	patches/linux-2.6.11-add_true_randomness.patch, safe/safe.c,
+	safe/safevar.h:
+
+	Switch to a more "user" like random number handling.  Drivers no
+	longer poll for RNG data,  we pull it as needed to fill
+	/dev/random's entropy.
+
+	Implement sk_buff handling within the OCF framework.
+
+	fixup IV handling in cryptosoft.
+
+2005-04-27 00:41  davidm
+
+	* crypto.c:
+
+	Fix a race condition with the starting of kernel threads.  The
+	threads were running before the pid assignment in the parent.
+
+2005-03-24 23:57  davidm
+
+	* Makefile:
+	include crypto-tools.patch in the release file
+
+
+
+2005-03-24 20:14  davidm
+
+	* safe/safe.c:
+	Fixup compile time warnings due to some left over BSDisms
+
+
+2005-03-24 00:53  davidm
+
+	* Makefile, README, README.sglinux, patches/linux-2.4.29-ocf.patch,
+	patches/linux-2.6.11-ocf.patch, patches/ocf-linux-2.4.29.patch,
+	patches/ocf-linux-2.6.11.patch:
+	added cleaner patch names and a tarball target to aid releases
+
+
+2005-03-24 00:28  davidm
+
+	* patches/crypto-tools.patch,
+	patches/linux-2.4.29-add_true_randomness.patch,
+	patches/linux-2.6.11-add_true_randomness.patch,
+	patches/ocf-linux-2.4.29.patch, patches/ocf-linux-2.6.11.patch,
+	patches/ssh.patch, patches/ssl.patch, tools/bench-ocf:
+	move all the release file patches into CVS for simplicity
+
+
+2005-03-23 20:37  davidm
+
+	* safe/safe.c:
+	remove excessive debug from RNG routines so that you can turn on
+	debug and live through it
+
+
+
+2005-03-23 02:23  davidm
+
+	* safe/safe.c:
+	fix memory corruption for mod_exp and the safenet,  we were copying
+	back more than the space available.
+
+
+
+2005-03-22 21:45  davidm
+
+	* crypto.c, cryptodev.c, cryptodev.h, safe/safe.c:
+	fixup a major sync issues with key processing (callback called
+	before sleeping).  Improve its performance while we are there with
+	a CBIMM (callback immediate) option.
+
+
+
+2005-03-19 00:33  davidm
+
+	* random.c:
+	A new randomness function for both 2.4 and 2.6 that replaces out
+	previous old solution for the hifn driver with more generic code
+	that works on both kernels.
+
+		add_true_randomness(u_int32_t *buf, int nwords);
+
+
+
+2005-03-18 21:01  davidm
+
+	* Makefile, cryptodev.h, random.c, hifn/hifn7751.c, safe/safe.c:
+	RNG support in both the safenet and the hifn plus the required
+	kernel support.
+
+	"hd /dev/random" runs much much faster now :-)
+
+
+
+2005-03-17 23:29  toby
+
+	* cryptodev.c:  Make sure the CIOCASYMFEAT ioctl on
+	/dev/crypto copies out the capable features.
+
+2005-03-17 01:19  davidm
+
+	* safe/: safe.c, safereg.h, safevar.h:
+	hardware PK acceleration on the safenet (CRK_MOD_EXP only)
+
+
+2005-03-16 04:28  davidm
+
+	* criov.c, crypto.c, cryptodev.c, safe/safe.c:
+	fixup the FBSD id stuff to compile :-)
+
+2005-03-16 04:02  davidm
+
+	* README, README.sglinux, TODO:
+	Updated with versions,	removed tabs,  new kernel versions, web
+	site etc
+
+2005-03-16 03:45  davidm
+
+	* criov.c, crypto.c, cryptodev.c, cryptodev.h:
+	more license formatting and version id's to help keep in touch with
+	FreeBSD
+
+2005-03-16 03:16  davidm
+
+	* safe/safe.c:
+	up to date with FreeBSD versioning, no changes to merge
+
+2005-03-16 03:15  davidm
+
+	* safe/safe.c:
+	include FreeBSD version info so I can track changes better
+
+
+2005-03-16 03:11  davidm
+
+	* hifn/: hifn7751.c, hifn7751reg.h, hifn7751var.h:
+	incorporate latest updates from FreeBSD:
+
+	| Update support for 795x parts: | o rework pll setup code to
+	follow h/w specification | o add hint.hifn.X.pllconfig to specify
+	reference clock setup |   requirements; default is pci66 which
+	means the clock is |   derived from the PCI bus clock and the card
+	resides in a |	 66MHz slot | | Tested on 7955 and 7956 cards;
+	support for 7954 cards not enabled | since we have no cards to test
+	against.
+
+
+
+2005-03-16 02:50  davidm
+
+	* Config.in, Kconfig, README, crypto.c, cryptodev.c, cryptodev.h,
+	hifn/hifn7751.c, hifn/hifn7751var.h, safe/safe.c, safe/safevar.h:
+	cleanups to build and run on 2.6.11 and 2.4.29 for a public release
+
+	included configs locally to reduce patch to kernel and required
+	user effort.
+
+	pci_dma_sync_single fix from Michele Baldessari
+	,	with modifications by me to work on all
+	kernels.
+
+
+
+2005-03-11 23:30  davidm
+
+	* Makefile:
+	fix up the compiling again, I had outstanding commits for this one
+	:-)
+
+2005-03-11 21:56  danield
+
+	* Makefile: Get ocf modules building again.
+
+2004-12-25 07:12  davidm
+
+	* TODO:
+	safenet is all good now AKAIK
+
+2004-12-25 07:02  davidm
+
+	* Makefile, crypto.c, cryptodev.c, cryptosoft.c, uio.h:
+	2.6 port of the OCF framework,	tested on Xscale and compiled for
+	x86
+
+
+
+2004-12-10 02:41  davidm
+
+	* hifn/hifn7751.c:
+	Fix compilation as l_flags was not defined for LOCK in pci_remove
+
+
+2004-12-06 19:15  davidm
+
+	* hifn/hifn7751.c, safe/safe.c:
+	* Put locking into hifn_remove to ensure interrupts are not running
+	while we   remove the driver
+
+	  Use del_timer_sync (need to ensure timer is not running on
+	another CPU
+	* when we delete it).
+
+	Improvements suggested by Evgeniy Polyakov <johnpol@2ka.mipt.ru>
+
+
+2004-12-02 09:16  davidm
+
+	* README, README.sglinux:
+	* Fix up all tabs to be spaces * explain how to update the ocf
+	support in the patch to the current version.
+
+
+
+2004-12-02 09:11  davidm
+
+	* README:
+	* fix some spelling/grammar * add more info on the ssl.patch file
+	and what it contains
+
+2004-12-02 09:08  davidm
+
+	* README.sglinux:
+	give some more instructions on the shar archive and extracting the
+	release.
+
+2004-12-02 09:03  davidm
+
+	* README.sglinux:
+	how to include crypto-tools into 3.1.6
+
+
+
+2004-12-02 08:48  davidm
+
+	* README.sglinux:
+	updated for a new crypto patch for openssl-0.9.7e
+
+2004-12-02 08:04  davidm
+
+	* README, README.sglinux:
+	Clean up the README's to use the new SG Linux alpha and a specific
+	2.4.28 patch.
+
+
+
+2004-12-01 23:11  davidm
+
+	* Makefile, criov.c, crypto.c, cryptodev.c, cryptosoft.c,
+	hifn/Makefile, hifn/hifn7751.c, ixp4xx/Makefile, ixp4xx/ixp4xx.c,
+	safe/Makefile, safe/safe.c:
+	Re-worked the Makefiles so that including the code into a standard
+	non-SG kernel is really easy.
+
+	Fixed a non-initialised bug that was introduce into cryptosoft.c
+	with the additional error checking.
+
+
+
+2004-12-01 01:54  davidm
+
+	* TODO:
+	updates based on things that have been fixed.
+
+2004-12-01 01:53  davidm
+
+	* safe/: safe.c, safevar.h:
+	Cleanup some old hacks based on the much cleaner port of the hifn
+	driver.
+
+2004-12-01 01:53  davidm
+
+	* cryptosoft.c:
+	fix some crashes due to bad buffer sizes etc.
+
+2004-11-27 09:16  davidm
+
+	* crypto.c, cryptodev.c, ixp4xx/ixp4xx.c:
+	Fixed some nasty problems that were stopping reliable behaviour:
+
+	1) we were not initialising some of our lists/wait queues,  which
+	 meant they appears to have things in them when in fact they did
+	not.	 This actually didn't seems to cause any problems but is
+	extremely bogus.
+
+	2) While a process was waiting for the crypto engine,  if
+	itreceived a	signal we would lose sync with the engine and end
+	up allowing out-of-band    actions that were invalid (ie.,
+	unregistering a context that was still	  active in a crypto
+	operation).
+
+	   Of source the CryptACC should probably deal with this as well
+	;-)
+
+
+
+2004-11-26 01:33  davidm
+
+	* TODO:
+	We should also hook in the random number generators to linux
+
+
+2004-11-26 01:33  davidm
+
+	* hifn/hifn7751.c:
+	Changed all the accesses to DMS descriptors to not set the valid
+	bit until after everything else was set.  This got the driver
+	running smoothly, along with a fixup to the pci_map_uio which
+	wasn't settings lengths correctly.
+
+
+
+2004-11-25 21:15  davidm
+
+	* ixp4xx/ixp4xx.c:
+	document why using a new context for each packet is actually a
+	better idea than allocating one (actually two) per session.
+
+
+
+2004-11-25 08:48  davidm
+
+	* README, README.sglinux:
+	updates from email with Intel to fix a few little things
+
+
+2004-11-25 00:02  davidm
+
+	* README, README.sglinux, safe/safe.c:
+	Some cleanups of doc and so on for Intel/General use
+
+2004-11-23 07:58  davidm
+
+	* TODO:
+	safenet is working on big endian machines now
+
+
+
+2004-11-23 07:56  davidm
+
+	* ixp4xx/: Makefile, ixp4xx.c:
+	Reference the Intel library,  cleanup the IV sizes and turn on AES.
+	 Fix Makefile for new config options.
+
+
+
+2004-11-23 07:52  davidm
+
+	* Makefile:
+	Complete the changes of ixp to ixp4xx (IXP4xx) as appropriate
+
+
+2004-11-23 00:33  davidm
+
+	* Makefile, README, ixp4xx/Makefile:
+	Changes all references to ixp to be ixp4xx at Intels request
+
+
+2004-11-20 01:07  davidm
+
+	* safe/: safe.c, safereg.h:
+	fully working safenet on BE machines
+
+2004-11-19 01:03  davidm
+
+	* Makefile, hifn/Makefile, hifn/hifn7751.c, hifn/hifn7751reg.h,
+	hifn/hifn7751var.h:
+	hifn driver code complete and compiling,  needs a test now ;-)
+
+
+2004-11-18 21:45  davidm
+
+	* hifn/: Makefile, hifn7751.c, hifn7751var.h:
+	Makefile for building the hifn driver
+
+2004-11-18 21:44  davidm
+
+	* hifn/: hifn7751.c, hifn7751reg.h, hifn7751var.h:
+	Checkin the orginal freebsd source for the hifn driver as a
+	reference in the future.
+
+
+
+2004-11-18 10:05  davidm
+
+	* Makefile, README, TODO, criov.c, crypto.c, cryptodev.c,
+	cryptodev.h, cryptosoft.c, uio.h, ixp4xx/Makefile, ixp4xx/ixp4xx.c,
+	safe/Makefile, safe/safe.c, safe/safevar.h:
+	Clean up license and copyright info to be more acceptable (and
+	present) Clean up debug and trace Fixup memory freeing etc on
+	safenet Fix compiler warnings (some were bugs)
+
+
+
+2004-11-17 02:23  davidm
+
+	* safe/safe.c:
+	working on Xscale (big endian) now but packet data is getting
+	stuffed up due to endian problems (at least now we are talking to
+	the chip correctly for BE).  Good enough to test packet throughput,
+	 no good for testing scp.
+
+
+
+2004-11-17 02:21  davidm
+
+	* criov.c:
+	Make sure public symbols are exported by including the correct
+	header files
+
+2004-11-17 02:15  davidm
+
+	* crypto.c:
+	even better cleanup of kernel threads on exit
+
+2004-11-17 02:15  davidm
+
+	* cryptosoft.c:
+	return some trace to debug so it doesn't appear
+
+2004-11-17 02:14  davidm
+
+	* Makefile:
+	Make sure all drivers get built, not just IXP
+
+2004-11-16 21:31  davidm
+
+	* crypto.c:
+	Fix problem with reboots and driver not unloading cleanly,  we were
+	not handling signals correctly in the kernel threads,
+
+2004-11-10 10:46  davidm
+
+	* ixp4xx/ixp4xx.c:
+	fix serious context leak,  itturns out the context is still
+	considered busy while it is calling the perform callback,  so we
+	cleanup on closing the session and on allocating the next context
+	to work around this.
+
+
+
+2004-11-10 05:26  davidm
+
+	* crypto.c, cryptodev.c, ixp4xx/ixp4xx.c, safe/safe.c:
+	cleaned out some debug,  found MAX tunnels bug,  traced it various
+	other cleanups.
+
+
+
+2004-11-10 04:02  davidm
+
+	* cryptodev.c, cryptosoft.c, ixp4xx/ixp4xx.c, safe/Makefile,
+	safe/safe.c:
+	Fix up kmalloc usage to always zero resulting buffer everywhere
+	(stops crashes in ixp)
+
+	Add some function debug to ixp so you can see it working
+
+	Fix safe driver to build and install in a real tree.
+
+
+
+2004-11-10 02:27  davidm
+
+	* Makefile, criov.c, crypto.c, cryptosoft.c, ixp4xx/Makefile,
+	ixp4xx/ixp4xx.c:
+	Compiling OCF modules for the IXP crypto, needs testing now
+
+
+2004-11-09 19:16  davidm
+
+	* criov.c, crypto.c, cryptodev.c, cryptodev.h, cryptosoft.c,
+	cryptosoft.h, uio.h, safe/safe.c, safe/safevar.h:
+	The linux port of OCF with working safenet and software modules.
+	Still some bugs with mutiple crypto threads using the safenet
+	driver.
+
+
+
+2004-11-09 18:49  davidm
+
+	* Makefile, criov.c, crypto.c, cryptodev.c, cryptodev.h,
+	cryptosoft.c, cryptosoft.h, uio.h, safe/Makefile, safe/safe.c,
+	safe/safereg.h, safe/safevar.h:
+	Check in the orginal free-bsd sources for the OCF support.  This
+	allows us to diff against it later to see if we botched anything
+	major league.
diff --git a/crypto/ocf/Config.in b/crypto/ocf/Config.in
new file mode 100644
index 000000000000..652f76e90743
--- /dev/null
+++ b/crypto/ocf/Config.in
@@ -0,0 +1,38 @@
+#############################################################################
+
+mainmenu_option next_comment
+comment 'OCF Configuration'
+tristate 'OCF (Open Cryptograhic Framework)' CONFIG_OCF_OCF
+dep_mbool '  enable fips RNG checks (fips check on RNG data before use)' \
+				CONFIG_OCF_FIPS $CONFIG_OCF_OCF
+dep_mbool '  enable harvesting entropy for /dev/random' \
+				CONFIG_OCF_RANDOMHARVEST $CONFIG_OCF_OCF
+dep_tristate '  cryptodev (user space support)' \
+				CONFIG_OCF_CRYPTODEV $CONFIG_OCF_OCF
+dep_tristate '  cryptosoft (software crypto engine)' \
+				CONFIG_OCF_CRYPTOSOFT $CONFIG_OCF_OCF
+dep_tristate '  safenet (HW crypto engine)' \
+				CONFIG_OCF_SAFE $CONFIG_OCF_OCF
+dep_tristate '  IXP4xx (HW crypto engine)' \
+				CONFIG_OCF_IXP4XX $CONFIG_OCF_OCF
+dep_mbool    '  Enable IXP4xx HW to perform SHA1 and MD5 hashing (very slow)' \
+				CONFIG_OCF_IXP4XX_SHA1_MD5 $CONFIG_OCF_IXP4XX
+dep_tristate '  hifn (HW crypto engine)' \
+				CONFIG_OCF_HIFN $CONFIG_OCF_OCF
+dep_tristate '  talitos (HW crypto engine)' \
+				CONFIG_OCF_TALITOS $CONFIG_OCF_OCF
+dep_tristate '  pasemi (HW crypto engine)' \
+				CONFIG_OCF_PASEMI $CONFIG_OCF_OCF
+dep_tristate '  ep80579 (HW crypto engine)' \
+				CONFIG_OCF_EP80579 $CONFIG_OCF_OCF
+dep_tristate '  Micronas c7108 (HW crypto engine)' \
+				CONFIG_OCF_C7108 $CONFIG_OCF_OCF
+dep_tristate '  uBsec BCM5365 (HW crypto engine)'
+				CONFIG_OCF_UBSEC_SSB $CONFIG_OCF_OCF
+dep_tristate '  ocfnull (does no crypto)' \
+				CONFIG_OCF_OCFNULL $CONFIG_OCF_OCF
+dep_tristate '  ocf-bench (HW crypto in-kernel benchmark)' \
+				CONFIG_OCF_BENCH $CONFIG_OCF_OCF
+endmenu
+
+#############################################################################
diff --git a/crypto/ocf/Kconfig b/crypto/ocf/Kconfig
new file mode 100644
index 000000000000..47f4e21fae03
--- /dev/null
+++ b/crypto/ocf/Kconfig
@@ -0,0 +1,135 @@
+menu "OCF Configuration"
+
+config OCF_OCF
+	tristate "OCF (Open Cryptograhic Framework)"
+	help
+	  A linux port of the OpenBSD/FreeBSD crypto framework.
+
+config OCF_RANDOMHARVEST
+	bool "crypto random --- harvest entropy for /dev/random"
+	depends on OCF_OCF
+	help
+	  Includes code to harvest random numbers from devices that support it.
+
+config OCF_FIPS
+	bool "enable fips RNG checks"
+	depends on OCF_OCF && OCF_RANDOMHARVEST
+	help
+	  Run all RNG provided data through a fips check before
+	  adding it /dev/random's entropy pool.
+
+config OCF_CRYPTODEV
+	tristate "cryptodev (user space support)"
+	depends on OCF_OCF
+	help
+	  The user space API to access crypto hardware.
+
+config OCF_CRYPTOSOFT
+	tristate "cryptosoft (software crypto engine)"
+	depends on OCF_OCF
+	help
+	  A software driver for the OCF framework that uses
+	  the kernel CryptoAPI.
+
+config OCF_DM_CRYPT
+	bool "OCF dm_crypt"
+	depends on OCF_OCF && DM_CRYPT
+	help
+	  The dm_crypt device mapper will use the OCF for encryption/decryption,
+	  in case of essiv, the essiv generation will use the kernel crypto APIs.
+	  When using the OCF dm_crypt, only the following encryption algorithms
+	  are supported:
+		DES-CBC, 3DES-CBC and AES-CBC.
+
+config OCF_SAFE
+	tristate "safenet (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  A driver for a number of the safenet Excel crypto accelerators.
+	  Currently tested and working on the 1141 and 1741.
+
+config OCF_IXP4XX
+	tristate "IXP4xx (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  XScale IXP4xx crypto accelerator driver.  Requires the
+	  Intel Access library.
+
+config OCF_IXP4XX_SHA1_MD5
+	bool "IXP4xx SHA1 and MD5 Hashing"
+	depends on OCF_IXP4XX
+	help
+	  Allows the IXP4xx crypto accelerator to perform SHA1 and MD5 hashing.
+	  Note: this is MUCH slower than using cryptosoft (software crypto engine).
+
+config OCF_HIFN
+	tristate "hifn (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for various HIFN based crypto accelerators.
+	  (7951, 7955, 7956, 7751, 7811)
+
+config OCF_HIFNHIPP
+	tristate "Hifn HIPP (HW packet crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for various HIFN (HIPP) based crypto accelerators
+	  (7855)
+
+config OCF_TALITOS
+	tristate "talitos (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for Freescale's security engine (SEC/talitos).
+
+config OCF_PASEMI
+	tristate "pasemi (HW crypto engine)"
+	depends on OCF_OCF && PPC_PASEMI
+	help
+	  OCF driver for the PA Semi PWRficient DMA Engine
+
+config OCF_EP80579
+	tristate "ep80579 (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for the Intel EP80579 Integrated Processor Product Line.
+
+config OCF_CRYPTOCTEON
+	tristate "cryptocteon (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for the Cavium OCTEON Processors.
+
+config OCF_KIRKWOOD
+	tristate "kirkwood (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for the Marvell Kirkwood (88F6xxx) Processors.
+
+config OCF_C7108
+	tristate "Micronas 7108 (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for the Microna 7108 Cipher processors.
+
+config OCF_UBSEC_SSB
+	tristate "uBsec BCM5365 (HW crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for uBsec BCM5365 hardware crypto accelerator.
+
+config OCF_OCFNULL
+	tristate "ocfnull (fake crypto engine)"
+	depends on OCF_OCF
+	help
+	  OCF driver for measuring ipsec overheads (does no crypto)
+
+config OCF_BENCH
+	tristate "ocf-bench (HW crypto in-kernel benchmark)"
+	depends on OCF_OCF
+	help
+	  A very simple encryption test for the in-kernel interface
+	  of OCF.  Also includes code to benchmark the IXP Access library
+	  for comparison.
+
+endmenu
diff --git a/crypto/ocf/Makefile b/crypto/ocf/Makefile
new file mode 100644
index 000000000000..b9110fc2adf1
--- /dev/null
+++ b/crypto/ocf/Makefile
@@ -0,0 +1,149 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+OCF_OBJS = crypto.o criov.o
+
+ifdef CONFIG_OCF_RANDOMHARVEST
+	OCF_OBJS += random.o
+endif
+
+ifdef CONFIG_OCF_FIPS
+	OCF_OBJS += rndtest.o
+endif
+
+# Add in autoconf.h to get #defines for CONFIG_xxx
+AUTOCONF_H=$(ROOTDIR)/modules/autoconf.h
+ifeq ($(AUTOCONF_H), $(wildcard $(AUTOCONF_H)))
+	EXTRA_CFLAGS += -include $(AUTOCONF_H)
+	export EXTRA_CFLAGS
+endif
+
+ifndef obj
+	obj ?= .
+	_obj = subdir
+	mod-subdirs := safe hifn ixp4xx talitos ocfnull
+	export-objs += crypto.o criov.o random.o
+	list-multi += ocf.o
+	_slash :=
+else
+	_obj = obj
+	_slash := /
+endif
+
+EXTRA_CFLAGS += -I$(obj)/.
+ccflags-$(CONFIG_OF)	+= -I$(srctree)/drivers/crypto/mvebu_cesa
+ccflags-$(CONFIG_OF)	+= -I$(srctree)/arch/arm/mach-mvebu/include/mach
+
+obj-$(CONFIG_OCF_OCF)         += ocf.o
+obj-$(CONFIG_OCF_CRYPTODEV)   += cryptodev.o
+obj-$(CONFIG_OCF_CRYPTOSOFT)  += cryptosoft.o
+obj-$(CONFIG_OCF_BENCH)       += ocf-bench.o
+
+$(_obj)-$(CONFIG_OCF_SAFE)    += safe$(_slash)
+$(_obj)-$(CONFIG_OCF_HIFN)    += hifn$(_slash)
+$(_obj)-$(CONFIG_OCF_IXP4XX)  += ixp4xx$(_slash)
+$(_obj)-$(CONFIG_OCF_TALITOS) += talitos$(_slash)
+$(_obj)-$(CONFIG_OCF_PASEMI)  += pasemi$(_slash)
+$(_obj)-$(CONFIG_OCF_EP80579) += ep80579$(_slash)
+$(_obj)-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon$(_slash)
+$(_obj)-$(CONFIG_OCF_KIRKWOOD) += kirkwood$(_slash)
+$(_obj)-$(CONFIG_OCF_OCFNULL) += ocfnull$(_slash)
+$(_obj)-$(CONFIG_OCF_C7108) += c7108$(_slash)
+$(_obj)-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb$(_slash)
+
+ocf-objs := $(OCF_OBJS)
+
+dummy:
+	@echo "Please consult the README for how to build OCF."
+	@echo "If you can't wait then the following should do it:"
+	@echo ""
+	@echo "    make ocf_modules"
+	@echo "    sudo make ocf_install"
+	@echo ""
+	@exit 1
+
+$(list-multi) dummy1: $(ocf-objs)
+	$(LD) -r -o $@ $(ocf-objs)
+
+.PHONY:
+clean:
+	rm -f *.o *.ko .*.o.flags .*.ko.cmd .*.o.cmd .*.mod.o.cmd *.mod.c
+	rm -f */*.o */*.ko */.*.o.cmd */.*.ko.cmd */.*.mod.o.cmd */*.mod.c */.*.o.flags
+	rm -f */modules.order */modules.builtin modules.order modules.builtin
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
+
+#
+# targets to build easily on the current machine
+#
+
+ocf_make:
+	make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m
+	make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_CRYPTOSOFT=m
+	-make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_BENCH=m
+	-make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_OCFNULL=m
+	-make -C /lib/modules/$(shell uname -r)/build M=`pwd` $(OCF_TARGET) CONFIG_OCF_OCF=m CONFIG_OCF_HIFN=m
+
+ocf_modules:
+	$(MAKE) ocf_make OCF_TARGET=modules
+
+ocf_install:
+	$(MAKE) ocf_make OCF_TARGET="modules modules_install"
+	depmod
+	mkdir -p /usr/include/crypto
+	cp cryptodev.h /usr/include/crypto/.
+
+#
+# generate full kernel patches for 2.4 and 2.6 kernels to make patching
+# your kernel easier
+#
+
+.PHONY: patch
+patch:
+	patchbase=.; \
+		[ -d $$patchbase/patches ] || patchbase=..; \
+		patch=ocf-linux-base.patch; \
+		patch24=ocf-linux-24.patch; \
+		patch26=ocf-linux-26.patch; \
+		patch3=ocf-linux-3.patch; \
+		( \
+			find . -name Makefile; \
+			find . -name Config.in; \
+			find . -name Kconfig; \
+			find . -name README; \
+			find . -name '*.[ch]' | grep -v '.mod.c'; \
+		) | while read t; do \
+			diff -Nau /dev/null $$t | sed 's?^+++ \./?+++ linux/crypto/ocf/?'; \
+		done > $$patch; \
+		cat $$patchbase/patches/linux-2.4.35-ocf.patch $$patch > $$patch24; \
+		cat $$patchbase/patches/linux-2.6.38-ocf.patch $$patch > $$patch26; \
+		cat $$patchbase/patches/linux-3.2.1-ocf.patch $$patch > $$patch3; \
+
+
+#
+# this target probably does nothing for anyone but me - davidm
+#
+
+.PHONY: release
+release:
+	REL=`date +%Y%m%d`; RELDIR=/tmp/ocf-linux-$$REL; \
+		CURDIR=`pwd`; \
+		rm -rf /tmp/ocf-linux-$$REL*; \
+		mkdir -p $$RELDIR/ocf; \
+		mkdir -p $$RELDIR/patches; \
+		mkdir -p $$RELDIR/crypto-tools; \
+		cp README* $$RELDIR/.; \
+		cp patches/[!C]* $$RELDIR/patches/.; \
+		cp tools/[!C]* $$RELDIR/crypto-tools/.; \
+		cp -r [!C]* Config.in $$RELDIR/ocf/.; \
+		rm -rf $$RELDIR/ocf/patches $$RELDIR/ocf/tools; \
+		rm -f $$RELDIR/ocf/README*; \
+		cp $$CURDIR/../../user/crypto-tools/[!C]* $$RELDIR/crypto-tools/.; \
+		make -C $$RELDIR/crypto-tools clean; \
+		make -C $$RELDIR/ocf clean; \
+		find $$RELDIR/ocf -name CVS | xargs rm -rf; \
+		cd $$RELDIR/..; \
+		tar cvf ocf-linux-$$REL.tar ocf-linux-$$REL; \
+		gzip -9 ocf-linux-$$REL.tar
diff --git a/crypto/ocf/c7108/Makefile b/crypto/ocf/c7108/Makefile
new file mode 100644
index 000000000000..dd9af557357b
--- /dev/null
+++ b/crypto/ocf/c7108/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_C7108) += aes-7108.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/c7108/aes-7108.c b/crypto/ocf/c7108/aes-7108.c
new file mode 100644
index 000000000000..f39666d8e940
--- /dev/null
+++ b/crypto/ocf/c7108/aes-7108.c
@@ -0,0 +1,841 @@
+/*
+ * Copyright (C) 2006 Micronas USA
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+//#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <asm/io.h>
+#include <asm/delay.h>
+//#include <asm/scatterlist.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <cryptodev.h>
+#include <uio.h>
+#include <aes-7108.h>
+
+/* Runtime mode */
+static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR;
+//static int c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC;
+
+static int32_t c7108_id = -1;
+static struct cipher_7108 **c7108_sessions = NULL;
+static u_int32_t c7108_sesnum = 0;
+static unsigned long iobar;
+
+/* Crypto entry points */
+static	int c7108_process(void *, struct cryptop *, int);
+static	int c7108_newsession(void *, u_int32_t *, struct cryptoini *);
+static	int c7108_freesession(void *, u_int64_t);
+
+/* Globals */
+static int debug = 0;
+static spinlock_t csr_mutex;
+
+/* Generic controller-based lock */
+#define AES_LOCK()\
+          spin_lock(&csr_mutex)
+#define AES_UNLOCK()\
+          spin_unlock(&csr_mutex)
+
+/* 7108 AES register access */
+#define c7108_reg_wr8(a,d)   iowrite8(d, (void*)(iobar+(a)))
+#define c7108_reg_wr16(a,d)  iowrite16(d, (void*)(iobar+(a)))
+#define c7108_reg_wr32(a,d)  iowrite32(d, (void*)(iobar+(a)))
+#define c7108_reg_rd8(a)     ioread8((void*)(iobar+(a)))
+#define c7108_reg_rd16(a)    ioread16((void*)(iobar+(a)))
+#define c7108_reg_rd32(a)    ioread32((void*)(iobar+(a)))
+
+static int
+c7108_xlate_key(int klen, u8* k8ptr, u32* k32ptr)
+{
+        int i, nw=0;
+	nw = ((klen >= 256) ? 8 : (klen >= 192) ? 6 : 4);
+	for ( i = 0; i < nw; i++) {
+	    k32ptr[i] =    (k8ptr[i+3] << 24) | (k8ptr[i+2] << 16) |
+		           (k8ptr[i+1] << 8)  | k8ptr[i];
+
+	}
+	return 0;
+}
+
+static int
+c7108_cache_key(int klen, u32* k32ptr, u8* k8ptr)
+{
+        int i, nb=0;
+	u8* ptr = (u8*)k32ptr;
+	nb = ((klen >= 256) ? 32 : (klen >= 192) ? 24 : 16);
+	for ( i = 0; i < nb; i++)
+	    k8ptr[i] = ptr[i];
+	return 0;
+}
+
+static int
+c7108_aes_setup_dma(u32 src, u32 dst, u32 len)
+{
+        if (len < 16) {
+	    printk("len < 16\n");
+	    return -10;
+	}
+	if (len % 16) {
+	    printk("len not multiple of 16\n");
+	    return -11;
+	}
+	c7108_reg_wr16(C7108_AES_DMA_SRC0_LO, (u16) src);
+	c7108_reg_wr16(C7108_AES_DMA_SRC0_HI, (u16)((src & 0xffff0000) >> 16));
+	c7108_reg_wr16(C7108_AES_DMA_DST0_LO, (u16) dst);
+	c7108_reg_wr16(C7108_AES_DMA_DST0_HI, (u16)((dst & 0xffff0000) >> 16));
+	c7108_reg_wr16(C7108_AES_DMA_LEN, (u16) ((len / 16) - 1));
+
+	return 0;
+}
+
+static int
+c7108_aes_set_hw_iv(u8 iv[16])
+{
+        c7108_reg_wr16(C7108_AES_IV0_LO, (u16) ((iv[1] << 8) | iv[0]));
+	c7108_reg_wr16(C7108_AES_IV0_HI, (u16) ((iv[3] << 8) | iv[2]));
+	c7108_reg_wr16(C7108_AES_IV1_LO, (u16) ((iv[5] << 8) | iv[4]));
+	c7108_reg_wr16(C7108_AES_IV1_HI, (u16) ((iv[7] << 8) | iv[6]));
+	c7108_reg_wr16(C7108_AES_IV2_LO, (u16) ((iv[9] << 8) | iv[8]));
+	c7108_reg_wr16(C7108_AES_IV2_HI, (u16) ((iv[11] << 8) | iv[10]));
+	c7108_reg_wr16(C7108_AES_IV3_LO, (u16) ((iv[13] << 8) | iv[12]));
+	c7108_reg_wr16(C7108_AES_IV3_HI, (u16) ((iv[15] << 8) | iv[14]));
+
+    return 0;
+}
+
+static void
+c7108_aes_read_dkey(u32 * dkey)
+{
+        dkey[0] = (c7108_reg_rd16(C7108_AES_EKEY0_HI) << 16) |
+	           c7108_reg_rd16(C7108_AES_EKEY0_LO);
+	dkey[1] = (c7108_reg_rd16(C7108_AES_EKEY1_HI) << 16) |
+	           c7108_reg_rd16(C7108_AES_EKEY1_LO);
+	dkey[2] = (c7108_reg_rd16(C7108_AES_EKEY2_HI) << 16) |
+	           c7108_reg_rd16(C7108_AES_EKEY2_LO);
+	dkey[3] = (c7108_reg_rd16(C7108_AES_EKEY3_HI) << 16) |
+	           c7108_reg_rd16(C7108_AES_EKEY3_LO);
+	dkey[4] = (c7108_reg_rd16(C7108_AES_EKEY4_HI) << 16) |
+                   c7108_reg_rd16(C7108_AES_EKEY4_LO);
+	dkey[5] = (c7108_reg_rd16(C7108_AES_EKEY5_HI) << 16) |
+                   c7108_reg_rd16(C7108_AES_EKEY5_LO);
+	dkey[6] = (c7108_reg_rd16(C7108_AES_EKEY6_HI) << 16) |
+                   c7108_reg_rd16(C7108_AES_EKEY6_LO);
+	dkey[7] = (c7108_reg_rd16(C7108_AES_EKEY7_HI) << 16) |
+                   c7108_reg_rd16(C7108_AES_EKEY7_LO);
+}
+
+static int
+c7108_aes_cipher(int op,
+		 u32 dst,
+		 u32 src,
+		 u32 len,
+		 int klen,
+		 u16 mode,
+		 u32 key[8],
+		 u8 iv[16])
+{
+        int rv = 0, cnt=0;
+	u16 ctrl = 0, stat = 0;
+
+	AES_LOCK();
+
+	/* Setup key length */
+	if (klen == 128) {
+	    ctrl |= C7108_AES_KEY_LEN_128;
+	} else if (klen == 192) {
+	    ctrl |= C7108_AES_KEY_LEN_192;
+	} else if (klen == 256) {
+	    ctrl |= C7108_AES_KEY_LEN_256;
+	} else {
+	    AES_UNLOCK();
+	    return -3;
+	}
+
+	/* Check opcode */
+	if (C7108_AES_ENCRYPT == op) {
+	    ctrl |= C7108_AES_ENCRYPT;
+	} else if (C7108_AES_DECRYPT == op) {
+	    ctrl |= C7108_AES_DECRYPT;
+	} else {
+	    AES_UNLOCK();
+	    return -4;
+	}
+
+	/* check mode */
+	if ( (mode != C7108_AES_CTRL_MODE_CBC) &&
+	     (mode != C7108_AES_CTRL_MODE_CFB) &&
+	     (mode != C7108_AES_CTRL_MODE_OFB) &&
+	     (mode != C7108_AES_CTRL_MODE_CTR) &&
+	     (mode != C7108_AES_CTRL_MODE_ECB) ) {
+	    AES_UNLOCK();
+	    return -5;
+	}
+
+	/* Now set mode */
+	ctrl |= mode;
+
+	/* For CFB, OFB, and CTR, neither backward key
+	 * expansion nor key inversion is required.
+	 */
+	if ( (C7108_AES_DECRYPT == op) &&
+	     (C7108_AES_CTRL_MODE_CBC == mode ||
+	      C7108_AES_CTRL_MODE_ECB == mode ) ){
+
+	    /* Program Key */
+	    c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[4]);
+	    c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[4] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[5]);
+	    c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[5] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[6]);
+	    c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[6] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[7]);
+	    c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[7] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[2]);
+	    c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[2] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[3]);
+	    c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[3] >> 16));
+
+
+	    if (192 == klen) {
+		c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[7]);
+		c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[7] >> 16));
+		c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[7]);
+		c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[7] >> 16));
+
+	    } else if (256 == klen) {
+		/* 256 */
+		c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[0]);
+		c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[0] >> 16));
+		c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[1]);
+		c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[1] >> 16));
+
+	    }
+
+	} else {
+	    /* Program Key */
+	    c7108_reg_wr16(C7108_AES_KEY0_LO, (u16) key[0]);
+	    c7108_reg_wr16(C7108_AES_KEY0_HI, (u16) (key[0] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY1_LO, (u16) key[1]);
+	    c7108_reg_wr16(C7108_AES_KEY1_HI, (u16) (key[1] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY2_LO, (u16) key[2]);
+	    c7108_reg_wr16(C7108_AES_KEY2_HI, (u16) (key[2] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY3_LO, (u16) key[3]);
+	    c7108_reg_wr16(C7108_AES_KEY3_HI, (u16) (key[3] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY4_LO, (u16) key[4]);
+	    c7108_reg_wr16(C7108_AES_KEY4_HI, (u16) (key[4] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY5_LO, (u16) key[5]);
+	    c7108_reg_wr16(C7108_AES_KEY5_HI, (u16) (key[5] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY6_LO, (u16) key[6]);
+	    c7108_reg_wr16(C7108_AES_KEY6_HI, (u16) (key[6] >> 16));
+	    c7108_reg_wr16(C7108_AES_KEY7_LO, (u16) key[7]);
+	    c7108_reg_wr16(C7108_AES_KEY7_HI, (u16) (key[7] >> 16));
+
+	}
+
+	/* Set IV always */
+	c7108_aes_set_hw_iv(iv);
+
+	/* Program DMA addresses */
+	if ((rv = c7108_aes_setup_dma(src, dst, len)) < 0) {
+	    AES_UNLOCK();
+	    return rv;
+	}
+
+
+	/* Start AES cipher */
+	c7108_reg_wr16(C7108_AES_CTRL, ctrl | C7108_AES_GO);
+
+	//printk("Ctrl: 0x%x\n", ctrl | C7108_AES_GO);
+	do {
+	    /* TODO: interrupt mode */
+	    //        printk("aes_stat=0x%x\n", stat);
+	    //udelay(100);
+	} while ((cnt++ < 1000000) &&
+		 !((stat=c7108_reg_rd16(C7108_AES_CTRL))&C7108_AES_OP_DONE));
+
+
+	if ((mode == C7108_AES_CTRL_MODE_ECB)||
+	    (mode == C7108_AES_CTRL_MODE_CBC)) {
+	    /* Save out key when the lock is held ... */
+	    c7108_aes_read_dkey(key);
+	}
+
+	AES_UNLOCK();
+	return 0;
+
+}
+
+/*
+ * Generate a new crypto device session.
+ */
+static int
+c7108_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
+{
+	struct cipher_7108 **swd;
+	u_int32_t i;
+	char *algo;
+	int mode, xfm_type;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid == NULL || cri == NULL) {
+		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	if (c7108_sessions) {
+		for (i = 1; i < c7108_sesnum; i++)
+			if (c7108_sessions[i] == NULL)
+				break;
+	} else
+		i = 1;		/* NB: to silence compiler warning */
+
+	if (c7108_sessions == NULL || i == c7108_sesnum) {
+	    if (c7108_sessions == NULL) {
+		i = 1; /* We leave c7108_sessions[0] empty */
+		c7108_sesnum = CRYPTO_SW_SESSIONS;
+	    } else
+		c7108_sesnum *= 2;
+
+	    swd = kmalloc(c7108_sesnum * sizeof(struct cipher_7108 *),
+			  GFP_ATOMIC);
+	    if (swd == NULL) {
+		/* Reset session number */
+		if (c7108_sesnum == CRYPTO_SW_SESSIONS)
+		    c7108_sesnum = 0;
+		else
+		    c7108_sesnum /= 2;
+		dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+		return ENOBUFS;
+	    }
+	    memset(swd, 0, c7108_sesnum * sizeof(struct cipher_7108 *));
+
+	    /* Copy existing sessions */
+	    if (c7108_sessions) {
+		memcpy(swd, c7108_sessions,
+		       (c7108_sesnum / 2) * sizeof(struct cipher_7108 *));
+		kfree(c7108_sessions);
+	    }
+
+	    c7108_sessions = swd;
+
+	}
+
+	swd = &c7108_sessions[i];
+	*sid = i;
+
+	while (cri) {
+		*swd = (struct cipher_7108 *)
+		    kmalloc(sizeof(struct cipher_7108), GFP_ATOMIC);
+		if (*swd == NULL) {
+		    c7108_freesession(NULL, i);
+		    dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		    return ENOBUFS;
+		}
+		memset(*swd, 0, sizeof(struct cipher_7108));
+
+		algo = NULL;
+		mode = 0;
+		xfm_type = HW_TYPE_CIPHER;
+
+		switch (cri->cri_alg) {
+
+		case CRYPTO_AES_CBC:
+			algo = "aes";
+			mode = CRYPTO_TFM_MODE_CBC;
+			c7108_crypto_mode = C7108_AES_CTRL_MODE_CBC;
+			break;
+#if 0
+		case CRYPTO_AES_CTR:
+			algo = "aes_ctr";
+			mode = CRYPTO_TFM_MODE_CBC;
+			c7108_crypto_mode = C7108_AES_CTRL_MODE_CTR;
+			break;
+		case CRYPTO_AES_ECB:
+			algo = "aes_ecb";
+			mode = CRYPTO_TFM_MODE_CBC;
+			c7108_crypto_mode = C7108_AES_CTRL_MODE_ECB;
+			break;
+		case CRYPTO_AES_OFB:
+			algo = "aes_ofb";
+			mode = CRYPTO_TFM_MODE_CBC;
+			c7108_crypto_mode = C7108_AES_CTRL_MODE_OFB;
+			break;
+		case CRYPTO_AES_CFB:
+			algo = "aes_cfb";
+			mode = CRYPTO_TFM_MODE_CBC;
+			c7108_crypto_mode = C7108_AES_CTRL_MODE_CFB;
+			break;
+#endif
+		default:
+		        printk("unsupported crypto algorithm: %d\n",
+			       cri->cri_alg);
+			return -EINVAL;
+			break;
+		}
+
+
+		if (!algo || !*algo) {
+		    printk("cypher_7108_crypto: Unknown algo 0x%x\n",
+			   cri->cri_alg);
+		    c7108_freesession(NULL, i);
+		    return EINVAL;
+		}
+
+		if (xfm_type == HW_TYPE_CIPHER) {
+		    if (debug) {
+			dprintk("%s key:", __FUNCTION__);
+			for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
+			    dprintk("%s0x%02x", (i % 8) ? " " : "\n    ",
+				    cri->cri_key[i]);
+			dprintk("\n");
+		    }
+
+		} else if (xfm_type == SW_TYPE_HMAC ||
+			   xfm_type == SW_TYPE_HASH) {
+		    printk("cypher_7108_crypto: HMAC unsupported!\n");
+		    return -EINVAL;
+		    c7108_freesession(NULL, i);
+		} else {
+		    printk("cypher_7108_crypto: "
+			   "Unhandled xfm_type %d\n", xfm_type);
+		    c7108_freesession(NULL, i);
+		    return EINVAL;
+		}
+
+		(*swd)->cri_alg = cri->cri_alg;
+		(*swd)->xfm_type = xfm_type;
+
+		cri = cri->cri_next;
+		swd = &((*swd)->next);
+	}
+	return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+c7108_freesession(void *arg, u_int64_t tid)
+{
+	struct cipher_7108 *swd;
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid > c7108_sesnum || c7108_sessions == NULL ||
+			c7108_sessions[sid] == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return(EINVAL);
+	}
+
+	/* Silently accept and return */
+	if (sid == 0)
+		return(0);
+
+	while ((swd = c7108_sessions[sid]) != NULL) {
+		c7108_sessions[sid] = swd->next;
+		kfree(swd);
+	}
+	return 0;
+}
+
+/*
+ * Process a hardware request.
+ */
+static int
+c7108_process(void *arg, struct cryptop *crp, int hint)
+{
+	struct cryptodesc *crd;
+	struct cipher_7108 *sw;
+	u_int32_t lid;
+	int type;
+	u32 hwkey[8];
+
+#define SCATTERLIST_MAX 16
+	struct scatterlist sg[SCATTERLIST_MAX];
+	int sg_num, sg_len, skip;
+	struct sk_buff *skb = NULL;
+	struct uio *uiop = NULL;
+
+	dprintk("%s()\n", __FUNCTION__);
+	/* Sanity check */
+	if (crp == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	crp->crp_etype = 0;
+
+	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	lid = crp->crp_sid & 0xffffffff;
+	if (lid >= c7108_sesnum || lid == 0 || c7108_sessions == NULL ||
+			c7108_sessions[lid] == NULL) {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+
+	/*
+	 * do some error checking outside of the loop for SKB and IOV
+	 * processing this leaves us with valid skb or uiop pointers
+	 * for later
+	 */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		skb = (struct sk_buff *) crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX",
+			       __FILE__, __LINE__,
+			       skb_shinfo(skb)->nr_frags);
+			goto done;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		uiop = (struct uio *) crp->crp_buf;
+		if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX",
+			       __FILE__, __LINE__,
+			       uiop->uio_iovcnt);
+			goto done;
+		}
+	}
+
+	/* Go through crypto descriptors, processing as we go */
+	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+	    /*
+	     * Find the crypto context.
+	     *
+	     * XXX Note that the logic here prevents us from having
+	     * XXX the same algorithm multiple times in a session
+	     * XXX (or rather, we can but it won't give us the right
+	     * XXX results). To do that, we'd need some way of differentiating
+	     * XXX between the various instances of an algorithm (so we can
+	     * XXX locate the correct crypto context).
+	     */
+	    for (sw = c7108_sessions[lid];
+		 sw && sw->cri_alg != crd->crd_alg;
+		 sw = sw->next)
+		;
+
+	    /* No such context ? */
+	    if (sw == NULL) {
+		crp->crp_etype = EINVAL;
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		goto done;
+	    }
+
+	    skip = crd->crd_skip;
+
+	    /*
+	     * setup the SG list skip from the start of the buffer
+	     */
+	    memset(sg, 0, sizeof(sg));
+	    if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		int i, len;
+		type = CRYPTO_BUF_SKBUF;
+
+		sg_num = 0;
+		sg_len = 0;
+
+		if (skip < skb_headlen(skb)) {
+		    //sg[sg_num].page   = virt_to_page(skb->data + skip);
+			//sg[sg_num].offset = offset_in_page(skb->data + skip);
+		    len = skb_headlen(skb) - skip;
+		    if (len + sg_len > crd->crd_len)
+			len = crd->crd_len - sg_len;
+		    //sg[sg_num].length = len;
+		    sg_set_page(&sg[sg_num], virt_to_page(skb->data + skip), len, offset_in_page(skb->data + skip));
+			sg_len += sg[sg_num].length;
+		    sg_num++;
+		    skip = 0;
+		} else
+		    skip -= skb_headlen(skb);
+
+		for (i = 0; sg_len < crd->crd_len &&
+			 i < skb_shinfo(skb)->nr_frags &&
+			 sg_num < SCATTERLIST_MAX; i++) {
+		    if (skip < skb_shinfo(skb)->frags[i].size) {
+			//sg[sg_num].page   = skb_frag_page(&skb_shinfo(skb)->frags[i]);
+			//sg[sg_num].offset = skb_shinfo(skb)->frags[i].page_offset + skip;
+			len = skb_shinfo(skb)->frags[i].size - skip;
+			if (len + sg_len > crd->crd_len)
+			    len = crd->crd_len - sg_len;
+			//sg[sg_num].length = len;
+			sg_set_page(&sg[sg_num], skb_frag_page(&skb_shinfo(skb)->frags[i]), len, skb_shinfo(skb)->frags[i].page_offset + skip);
+			sg_len += sg[sg_num].length;
+			sg_num++;
+			skip = 0;
+		    } else
+			skip -= skb_shinfo(skb)->frags[i].size;
+		}
+	    } else if (crp->crp_flags & CRYPTO_F_IOV) {
+		int len;
+		type = CRYPTO_BUF_IOV;
+		sg_len = 0;
+		for (sg_num = 0; sg_len < crd->crd_len &&
+			 sg_num < uiop->uio_iovcnt &&
+			 sg_num < SCATTERLIST_MAX; sg_num++) {
+		    if (skip < uiop->uio_iov[sg_num].iov_len) {
+			//sg[sg_num].page   =			    virt_to_page(uiop->uio_iov[sg_num].iov_base+skip);
+			//sg[sg_num].offset =			   offset_in_page(uiop->uio_iov[sg_num].iov_base+skip);
+			len = uiop->uio_iov[sg_num].iov_len - skip;
+			if (len + sg_len > crd->crd_len)
+			    len = crd->crd_len - sg_len;
+			//sg[sg_num].length = len;
+			sg_set_page(&sg[sg_num], virt_to_page(uiop->uio_iov[sg_num].iov_base+skip), len, offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+			sg_len += sg[sg_num].length;
+			skip = 0;
+		    } else
+			skip -= uiop->uio_iov[sg_num].iov_len;
+		}
+	    } else {
+		type = CRYPTO_BUF_CONTIG;
+		//sg[0].page   = virt_to_page(crp->crp_buf + skip);
+		//sg[0].offset = offset_in_page(crp->crp_buf + skip);
+		sg_len = (crp->crp_ilen - skip);
+		if (sg_len > crd->crd_len)
+		    sg_len = crd->crd_len;
+		//sg[0].length = sg_len;
+		sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip), sg_len, offset_in_page(crp->crp_buf + skip));
+		sg_num = 1;
+	    }
+	    if (sg_num > 0)
+		sg_mark_end(&sg[sg_num-1]);
+
+
+	    switch (sw->xfm_type) {
+
+	    case HW_TYPE_CIPHER: {
+
+		unsigned char iv[64];
+		unsigned char *ivp = iv;
+		int i;
+		int ivsize = 16;    /* fixed for AES */
+		int blocksize = 16; /* fixed for AES */
+
+		if (sg_len < blocksize) {
+		    crp->crp_etype = EINVAL;
+		    dprintk("%s,%d: EINVAL len %d < %d\n",
+			    __FILE__, __LINE__,
+			    sg_len,
+			    blocksize);
+		    goto done;
+		}
+
+		if (ivsize > sizeof(iv)) {
+		    crp->crp_etype = EINVAL;
+		    dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		    goto done;
+		}
+
+		if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+		    if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+			ivp = crd->crd_iv;
+		    } else {
+			get_random_bytes(ivp, ivsize);
+		    }
+		    /*
+		     * do we have to copy the IV back to the buffer ?
+		     */
+		    if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+			    crypto_copyback(crp->crp_buf,
+					  crd->crd_inject,
+					  ivsize,
+					  (caddr_t)ivp);
+		    }
+
+		    c7108_xlate_key(crd->crd_klen,
+				    (u8*)crd->crd_key, (u32*)hwkey);
+
+		    /* Encrypt SG list */
+		    for (i = 0; i < sg_num; i++) {
+			sg[i].dma_address =
+			    dma_map_single(NULL,
+					   kmap(sg_page(&sg[i])) + sg[i].offset, sg_len, DMA_BIDIRECTIONAL);
+#if 0
+			printk("sg[%d]:0x%08x, off 0x%08x "
+			       "kmap 0x%08x phys 0x%08x\n",
+			       i, sg[i].page, sg[i].offset,
+			       kmap(sg[i].page) + sg[i].offset,
+			       sg[i].dma_address);
+#endif
+			c7108_aes_cipher(C7108_AES_ENCRYPT,
+					 sg[i].dma_address,
+					 sg[i].dma_address,
+					 sg_len,
+					 crd->crd_klen,
+					 c7108_crypto_mode,
+					 hwkey,
+					 ivp);
+
+			if ((c7108_crypto_mode == C7108_AES_CTRL_MODE_CBC)||
+			    (c7108_crypto_mode == C7108_AES_CTRL_MODE_ECB)) {
+			    /* Read back expanded key and cache it in key
+			     * context.
+			     * NOTE: for ECB/CBC modes only (not CTR, CFB, OFB)
+			     *       where you set the key once.
+			     */
+			    c7108_cache_key(crd->crd_klen,
+					    (u32*)hwkey, (u8*)crd->crd_key);
+#if 0
+			    printk("%s expanded key:", __FUNCTION__);
+			    for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+				printk("%s0x%02x", (i % 8) ? " " : "\n    ",
+				       crd->crd_key[i]);
+			    printk("\n");
+#endif
+			}
+		    }
+		}
+		else { /*decrypt */
+
+		    if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+			ivp = crd->crd_iv;
+		    } else {
+			crypto_copydata(crp->crp_buf, crd->crd_inject,
+				  ivsize, (caddr_t)ivp);
+		    }
+
+		    c7108_xlate_key(crd->crd_klen,
+				    (u8*)crd->crd_key, (u32*)hwkey);
+
+		    /* Decrypt SG list */
+		    for (i = 0; i < sg_num; i++) {
+			sg[i].dma_address =
+			    dma_map_single(NULL,
+					   kmap(sg_page(&sg[i])) + sg[i].offset,
+					   sg_len, DMA_BIDIRECTIONAL);
+
+#if 0
+			printk("sg[%d]:0x%08x, off 0x%08x "
+			       "kmap 0x%08x phys 0x%08x\n",
+			       i, sg[i].page, sg[i].offset,
+			       kmap(sg[i].page) + sg[i].offset,
+			       sg[i].dma_address);
+#endif
+			c7108_aes_cipher(C7108_AES_DECRYPT,
+					 sg[i].dma_address,
+					 sg[i].dma_address,
+					 sg_len,
+					 crd->crd_klen,
+					 c7108_crypto_mode,
+					 hwkey,
+					 ivp);
+		    }
+		}
+	    } break;
+	    case SW_TYPE_HMAC:
+	    case SW_TYPE_HASH:
+		crp->crp_etype = EINVAL;
+		goto done;
+		break;
+
+	    case SW_TYPE_COMP:
+		crp->crp_etype = EINVAL;
+		goto done;
+		break;
+
+	    default:
+		/* Unknown/unsupported algorithm */
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	    }
+	}
+
+done:
+	crypto_done(crp);
+	return 0;
+}
+
+static struct {
+	softc_device_decl sc_dev;
+} a7108dev;
+
+static device_method_t a7108_methods = {
+/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession, c7108_newsession),
+	DEVMETHOD(cryptodev_freesession, c7108_freesession),
+	DEVMETHOD(cryptodev_process, c7108_process),
+	DEVMETHOD(cryptodev_kprocess, NULL)
+};
+
+static int
+cypher_7108_crypto_init(void)
+{
+	dprintk("%s(%p)\n", __FUNCTION__, cypher_7108_crypto_init);
+
+	iobar = (unsigned long)ioremap(CCU_AES_REG_BASE, 0x4000);
+	printk("7108: AES @ 0x%08x (0x%08x phys) %s mode\n",
+	       iobar, CCU_AES_REG_BASE,
+	       c7108_crypto_mode & C7108_AES_CTRL_MODE_CBC ? "CBC" :
+	       c7108_crypto_mode & C7108_AES_CTRL_MODE_ECB ? "ECB" :
+	       c7108_crypto_mode & C7108_AES_CTRL_MODE_CTR ? "CTR" :
+	       c7108_crypto_mode & C7108_AES_CTRL_MODE_CFB ? "CFB" :
+	       c7108_crypto_mode & C7108_AES_CTRL_MODE_OFB ? "OFB" : "???");
+	csr_mutex  = SPIN_LOCK_UNLOCKED;
+
+	memset(&a7108dev, 0, sizeof(a7108dev));
+	softc_device_init(&a7108dev, "aes7108", 0, a7108_methods);
+
+	c7108_id = crypto_get_driverid(softc_get_device(&a7108dev), CRYPTOCAP_F_HARDWARE);
+	if (c7108_id < 0)
+		panic("7108: crypto device cannot initialize!");
+
+//	crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0, c7108_newsession, c7108_freesession, c7108_process, NULL);
+	crypto_register(c7108_id, CRYPTO_AES_CBC, 0, 0);
+
+	return(0);
+}
+
+static void
+cypher_7108_crypto_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	crypto_unregister_all(c7108_id);
+	c7108_id = -1;
+}
+
+module_init(cypher_7108_crypto_init);
+module_exit(cypher_7108_crypto_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Cypher 7108 Crypto (OCF module for kernel crypto)");
diff --git a/crypto/ocf/c7108/aes-7108.h b/crypto/ocf/c7108/aes-7108.h
new file mode 100644
index 000000000000..48711b41083d
--- /dev/null
+++ b/crypto/ocf/c7108/aes-7108.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2006 Micronas USA
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef __AES_7108_H__
+#define __AES_7108_H__
+
+/* Cypher 7108 AES Controller Hardware */
+#define CCU_REG_BASE       0x1b500000
+#define CCU_AES_REG_BASE   (CCU_REG_BASE + 0x100)
+#define C7108_AES_KEY0_LO        (0x0000)
+#define C7108_AES_KEY0_HI        (0x0004)
+#define C7108_AES_KEY1_LO        (0x0008)
+#define C7108_AES_KEY1_HI        (0x000c)
+#define C7108_AES_KEY2_LO        (0x0010)
+#define C7108_AES_KEY2_HI        (0x0014)
+#define C7108_AES_KEY3_LO        (0x0018)
+#define C7108_AES_KEY3_HI        (0x001c)
+#define C7108_AES_KEY4_LO        (0x0020)
+#define C7108_AES_KEY4_HI        (0x0024)
+#define C7108_AES_KEY5_LO        (0x0028)
+#define C7108_AES_KEY5_HI        (0x002c)
+#define C7108_AES_KEY6_LO        (0x0030)
+#define C7108_AES_KEY6_HI        (0x0034)
+#define C7108_AES_KEY7_LO        (0x0038)
+#define C7108_AES_KEY7_HI        (0x003c)
+#define C7108_AES_IV0_LO         (0x0040)
+#define C7108_AES_IV0_HI         (0x0044)
+#define C7108_AES_IV1_LO         (0x0048)
+#define C7108_AES_IV1_HI         (0x004c)
+#define C7108_AES_IV2_LO         (0x0050)
+#define C7108_AES_IV2_HI         (0x0054)
+#define C7108_AES_IV3_LO         (0x0058)
+#define C7108_AES_IV3_HI         (0x005c)
+
+#define C7108_AES_DMA_SRC0_LO    (0x0068) /* Bits 0:15 */
+#define C7108_AES_DMA_SRC0_HI    (0x006c) /* Bits 27:16 */
+#define C7108_AES_DMA_DST0_LO    (0x0070) /* Bits 0:15 */
+#define C7108_AES_DMA_DST0_HI    (0x0074) /* Bits 27:16 */
+#define C7108_AES_DMA_LEN        (0x0078)  /*Bytes:(Count+1)x16 */
+
+/* AES/Copy engine control register */
+#define C7108_AES_CTRL           (0x007c) /* AES control */
+#define C7108_AES_CTRL_RS        (1<<0)     /* Which set of src/dst to use */
+
+/* AES Cipher mode, controlled by setting Bits 2:0 */
+#define C7108_AES_CTRL_MODE_CBC     0
+#define C7108_AES_CTRL_MODE_CFB     (1<<0)
+#define C7108_AES_CTRL_MODE_OFB     (1<<1)
+#define C7108_AES_CTRL_MODE_CTR     ((1<<0)|(1<<1))
+#define C7108_AES_CTRL_MODE_ECB     (1<<2)
+
+/* AES Key length , Bits 5:4 */
+#define C7108_AES_KEY_LEN_128         0       /* 00 */
+#define C7108_AES_KEY_LEN_192         (1<<4)  /* 01 */
+#define C7108_AES_KEY_LEN_256         (1<<5)  /* 10 */
+
+/* AES Operation (crypt/decrypt), Bit 3 */
+#define C7108_AES_DECRYPT             (1<<3)   /* Clear for encrypt */
+#define C7108_AES_ENCRYPT              0
+#define C7108_AES_INTR                (1<<13) /* Set on done trans from 0->1*/
+#define C7108_AES_GO                  (1<<14) /* Run */
+#define C7108_AES_OP_DONE             (1<<15) /* Set when complete */
+
+
+/* Expanded key registers */
+#define C7108_AES_EKEY0_LO            (0x0080)
+#define C7108_AES_EKEY0_HI            (0x0084)
+#define C7108_AES_EKEY1_LO            (0x0088)
+#define C7108_AES_EKEY1_HI            (0x008c)
+#define C7108_AES_EKEY2_LO            (0x0090)
+#define C7108_AES_EKEY2_HI            (0x0094)
+#define C7108_AES_EKEY3_LO            (0x0098)
+#define C7108_AES_EKEY3_HI            (0x009c)
+#define C7108_AES_EKEY4_LO            (0x00a0)
+#define C7108_AES_EKEY4_HI            (0x00a4)
+#define C7108_AES_EKEY5_LO            (0x00a8)
+#define C7108_AES_EKEY5_HI            (0x00ac)
+#define C7108_AES_EKEY6_LO            (0x00b0)
+#define C7108_AES_EKEY6_HI            (0x00b4)
+#define C7108_AES_EKEY7_LO            (0x00b8)
+#define C7108_AES_EKEY7_HI            (0x00bc)
+#define C7108_AES_OK                  (0x00fc) /* Reset: "OK" */
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+/* Software session entry */
+
+#define HW_TYPE_CIPHER	0
+#define SW_TYPE_HMAC	1
+#define SW_TYPE_AUTH2	2
+#define SW_TYPE_HASH	3
+#define SW_TYPE_COMP	4
+
+struct cipher_7108 {
+	int			xfm_type;
+	int			cri_alg;
+	union {
+		struct {
+			char sw_key[HMAC_BLOCK_LEN];
+			int  sw_klen;
+			int  sw_authlen;
+		} hmac;
+	} u;
+	struct cipher_7108	*next;
+};
+
+
+
+#endif /* __C7108_AES_7108_H__ */
diff --git a/crypto/ocf/criov.c b/crypto/ocf/criov.c
new file mode 100644
index 000000000000..7ad2266d9e51
--- /dev/null
+++ b/crypto/ocf/criov.c
@@ -0,0 +1,215 @@
+/*      $OpenBSD: criov.c,v 1.9 2002/01/29 15:48:29 jason Exp $	*/
+
+/*
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 1999 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/criov.c,v 1.5 2006/06/04 22:15:13 pjd Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/uio.h>
+#include <linux/skbuff.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+#include <uio.h>
+#include <cryptodev.h>
+
+/*
+ * This macro is only for avoiding code duplication, as we need to skip
+ * given number of bytes in the same way in three functions below.
+ */
+#define	CUIO_SKIP()	do {						\
+	KASSERT(off >= 0, ("%s: off %d < 0", __func__, off));		\
+	KASSERT(len >= 0, ("%s: len %d < 0", __func__, len));		\
+	while (off > 0) {						\
+		KASSERT(iol >= 0, ("%s: empty in skip", __func__));	\
+		if (off < iov->iov_len)					\
+			break;						\
+		off -= iov->iov_len;					\
+		iol--;							\
+		iov++;							\
+	}								\
+} while (0)
+
+void
+cuio_copydata(struct uio* uio, int off, int len, caddr_t cp)
+{
+	struct iovec *iov = uio->uio_iov;
+	int iol = uio->uio_iovcnt;
+	unsigned count;
+
+	CUIO_SKIP();
+	while (len > 0) {
+		KASSERT(iol >= 0, ("%s: empty", __func__));
+		count = min((int)(iov->iov_len - off), len);
+		memcpy(cp, ((caddr_t)iov->iov_base) + off, count);
+		len -= count;
+		cp += count;
+		off = 0;
+		iol--;
+		iov++;
+	}
+}
+
+void
+cuio_copyback(struct uio* uio, int off, int len, caddr_t cp)
+{
+	struct iovec *iov = uio->uio_iov;
+	int iol = uio->uio_iovcnt;
+	unsigned count;
+
+	CUIO_SKIP();
+	while (len > 0) {
+		KASSERT(iol >= 0, ("%s: empty", __func__));
+		count = min((int)(iov->iov_len - off), len);
+		memcpy(((caddr_t)iov->iov_base) + off, cp, count);
+		len -= count;
+		cp += count;
+		off = 0;
+		iol--;
+		iov++;
+	}
+}
+
+/*
+ * Return a pointer to iov/offset of location in iovec list.
+ */
+struct iovec *
+cuio_getptr(struct uio *uio, int loc, int *off)
+{
+	struct iovec *iov = uio->uio_iov;
+	int iol = uio->uio_iovcnt;
+
+	while (loc >= 0) {
+		/* Normal end of search */
+		if (loc < iov->iov_len) {
+			*off = loc;
+			return (iov);
+		}
+
+		loc -= iov->iov_len;
+		if (iol == 0) {
+			if (loc == 0) {
+				/* Point at the end of valid data */
+				*off = iov->iov_len;
+				return (iov);
+			} else
+				return (NULL);
+		} else {
+			iov++, iol--;
+		}
+	}
+
+	return (NULL);
+}
+
+EXPORT_SYMBOL(cuio_copyback);
+EXPORT_SYMBOL(cuio_copydata);
+EXPORT_SYMBOL(cuio_getptr);
+
+
+static void
+skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
+{
+	int i;
+	if (offset < skb_headlen(skb)) {
+		memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
+		len -= skb_headlen(skb);
+		cp += skb_headlen(skb);
+	}
+	offset -= skb_headlen(skb);
+	for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
+		if (offset < skb_shinfo(skb)->frags[i].size) {
+			memcpy(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+					skb_shinfo(skb)->frags[i].page_offset,
+					cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
+			len -= skb_shinfo(skb)->frags[i].size;
+			cp += skb_shinfo(skb)->frags[i].size;
+		}
+		offset -= skb_shinfo(skb)->frags[i].size;
+	}
+}
+
+void
+crypto_copyback(int flags, caddr_t buf, int off, int size, caddr_t in)
+{
+
+	if ((flags & CRYPTO_F_SKBUF) != 0)
+		skb_copy_bits_back((struct sk_buff *)buf, off, in, size);
+	else if ((flags & CRYPTO_F_IOV) != 0)
+		cuio_copyback((struct uio *)buf, off, size, in);
+	else
+		bcopy(in, buf + off, size);
+}
+
+void
+crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+{
+
+	if ((flags & CRYPTO_F_SKBUF) != 0)
+		skb_copy_bits((struct sk_buff *)buf, off, out, size);
+	else if ((flags & CRYPTO_F_IOV) != 0)
+		cuio_copydata((struct uio *)buf, off, size, out);
+	else
+		bcopy(buf + off, out, size);
+}
+
+int
+crypto_apply(int flags, caddr_t buf, int off, int len,
+    int (*f)(void *, void *, u_int), void *arg)
+{
+#if 0
+	int error;
+
+	if ((flags & CRYPTO_F_SKBUF) != 0)
+		error = XXXXXX((struct mbuf *)buf, off, len, f, arg);
+	else if ((flags & CRYPTO_F_IOV) != 0)
+		error = cuio_apply((struct uio *)buf, off, len, f, arg);
+	else
+		error = (*f)(arg, buf + off, len);
+	return (error);
+#else
+	KASSERT(0, ("crypto_apply not implemented!\n"));
+#endif
+	return 0;
+}
+
+EXPORT_SYMBOL(crypto_copyback);
+EXPORT_SYMBOL(crypto_copydata);
+EXPORT_SYMBOL(crypto_apply);
diff --git a/crypto/ocf/crypto.c b/crypto/ocf/crypto.c
new file mode 100644
index 000000000000..74da325bcb10
--- /dev/null
+++ b/crypto/ocf/crypto.c
@@ -0,0 +1,1871 @@
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * Copyright (c) 2002-2006 Sam Leffler.  All rights reserved.
+ *
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.27 2007/03/21 03:42:51 sam Exp $");
+#endif
+
+/*
+ * Cryptographic Subsystem.
+ *
+ * This code is derived from the Openbsd Cryptographic Framework (OCF)
+ * that has the copyright shown below.  Very little of the original
+ * code remains.
+ */
+/*-
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000, 2001 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/crypto.c,v 1.16 2005/01/07 02:29:16 imp Exp $");
+ */
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,4)
+#include <linux/kthread.h>
+#endif
+#include <cryptodev.h>
+
+/*
+ * keep track of whether or not we have been initialised, a big
+ * issue if we are linked into the kernel and a driver gets started before
+ * us
+ */
+static int crypto_initted = 0;
+
+/*
+ * Crypto drivers register themselves by allocating a slot in the
+ * crypto_drivers table with crypto_get_driverid() and then registering
+ * each algorithm they support with crypto_register() and crypto_kregister().
+ */
+
+/*
+ * lock on driver table
+ * we track its state as spin_is_locked does not do anything on non-SMP boxes
+ */
+static spinlock_t	crypto_drivers_lock;
+static int			crypto_drivers_locked;		/* for non-SMP boxes */
+
+#define	CRYPTO_DRIVER_LOCK() \
+			({ \
+				spin_lock_irqsave(&crypto_drivers_lock, d_flags); \
+				crypto_drivers_locked = 1; \
+				dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
+			 })
+#define	CRYPTO_DRIVER_UNLOCK() \
+			({ \
+				dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
+				crypto_drivers_locked = 0; \
+				spin_unlock_irqrestore(&crypto_drivers_lock, d_flags); \
+			 })
+#define	CRYPTO_DRIVER_ASSERT() \
+			({ \
+				if (!crypto_drivers_locked) { \
+					dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
+				} \
+			 })
+
+/*
+ * Crypto device/driver capabilities structure.
+ *
+ * Synchronization:
+ * (d) - protected by CRYPTO_DRIVER_LOCK()
+ * (q) - protected by CRYPTO_Q_LOCK()
+ * Not tagged fields are read-only.
+ */
+struct cryptocap {
+	device_t	cc_dev;			/* (d) device/driver */
+	u_int32_t	cc_sessions;		/* (d) # of sessions */
+	u_int32_t	cc_koperations;		/* (d) # os asym operations */
+	/*
+	 * Largest possible operator length (in bits) for each type of
+	 * encryption algorithm. XXX not used
+	 */
+	u_int16_t	cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
+	u_int8_t	cc_alg[CRYPTO_ALGORITHM_MAX + 1];
+	u_int8_t	cc_kalg[CRK_ALGORITHM_MAX + 1];
+
+	int		cc_flags;		/* (d) flags */
+#define CRYPTOCAP_F_CLEANUP	0x80000000	/* needs resource cleanup */
+	int		cc_qblocked;		/* (q) symmetric q blocked */
+	int		cc_kqblocked;		/* (q) asymmetric q blocked */
+
+	int		cc_unqblocked;		/* (q) symmetric q blocked */
+	int		cc_unkqblocked;		/* (q) asymmetric q blocked */
+};
+static struct cryptocap *crypto_drivers = NULL;
+static int crypto_drivers_num = 0;
+
+/*
+ * There are two queues for crypto requests; one for symmetric (e.g.
+ * cipher) operations and one for asymmetric (e.g. MOD)operations.
+ * A single mutex is used to lock access to both queues.  We could
+ * have one per-queue but having one simplifies handling of block/unblock
+ * operations.
+ */
+static LIST_HEAD(crp_q);		/* crypto request queue */
+static LIST_HEAD(crp_kq);		/* asym request queue */
+
+static spinlock_t crypto_q_lock;
+
+int crypto_all_qblocked = 0;  /* protect with Q_LOCK */
+module_param(crypto_all_qblocked, int, 0444);
+MODULE_PARM_DESC(crypto_all_qblocked, "Are all crypto queues blocked");
+
+int crypto_all_kqblocked = 0; /* protect with Q_LOCK */
+module_param(crypto_all_kqblocked, int, 0444);
+MODULE_PARM_DESC(crypto_all_kqblocked, "Are all asym crypto queues blocked");
+
+#define	CRYPTO_Q_LOCK() \
+			({ \
+				spin_lock_irqsave(&crypto_q_lock, q_flags); \
+				dprintk("%s,%d: Q_LOCK()\n", __FILE__, __LINE__); \
+			 })
+#define	CRYPTO_Q_UNLOCK() \
+			({ \
+				dprintk("%s,%d: Q_UNLOCK()\n", __FILE__, __LINE__); \
+				spin_unlock_irqrestore(&crypto_q_lock, q_flags); \
+			 })
+
+/*
+ * There are two queues for processing completed crypto requests; one
+ * for the symmetric and one for the asymmetric ops.  We only need one
+ * but have two to avoid type futzing (cryptop vs. cryptkop).  A single
+ * mutex is used to lock access to both queues.  Note that this lock
+ * must be separate from the lock on request queues to insure driver
+ * callbacks don't generate lock order reversals.
+ */
+static LIST_HEAD(crp_ret_q);		/* callback queues */
+static LIST_HEAD(crp_ret_kq);
+
+static spinlock_t crypto_ret_q_lock;
+#define	CRYPTO_RETQ_LOCK() \
+			({ \
+				spin_lock_irqsave(&crypto_ret_q_lock, r_flags); \
+				dprintk("%s,%d: RETQ_LOCK\n", __FILE__, __LINE__); \
+			 })
+#define	CRYPTO_RETQ_UNLOCK() \
+			({ \
+				dprintk("%s,%d: RETQ_UNLOCK\n", __FILE__, __LINE__); \
+				spin_unlock_irqrestore(&crypto_ret_q_lock, r_flags); \
+			 })
+#define	CRYPTO_RETQ_EMPTY()	(list_empty(&crp_ret_q) && list_empty(&crp_ret_kq))
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *cryptop_zone;
+static kmem_cache_t *cryptodesc_zone;
+#else
+static struct kmem_cache *cryptop_zone;
+static struct kmem_cache *cryptodesc_zone;
+#endif
+
+#define debug crypto_debug
+int crypto_debug = 0;
+module_param(crypto_debug, int, 0644);
+MODULE_PARM_DESC(crypto_debug, "Enable debug");
+EXPORT_SYMBOL(crypto_debug);
+
+/*
+ * Maximum number of outstanding crypto requests before we start
+ * failing requests.  We need this to prevent DOS when too many
+ * requests are arriving for us to keep up.  Otherwise we will
+ * run the system out of memory.  Since crypto is slow,  we are
+ * usually the bottleneck that needs to say, enough is enough.
+ *
+ * We cannot print errors when this condition occurs,  we are already too
+ * slow,  printing anything will just kill us
+ */
+
+static int crypto_q_cnt = 0;
+module_param(crypto_q_cnt, int, 0444);
+MODULE_PARM_DESC(crypto_q_cnt,
+		"Current number of outstanding crypto requests");
+
+static int crypto_q_max = 1000;
+module_param(crypto_q_max, int, 0644);
+MODULE_PARM_DESC(crypto_q_max,
+		"Maximum number of outstanding crypto requests");
+
+#define bootverbose crypto_verbose
+static int crypto_verbose = 0;
+module_param(crypto_verbose, int, 0644);
+MODULE_PARM_DESC(crypto_verbose,
+		"Enable verbose crypto startup");
+
+int	crypto_usercrypto = 1;	/* userland may do crypto reqs */
+module_param(crypto_usercrypto, int, 0644);
+MODULE_PARM_DESC(crypto_usercrypto,
+	   "Enable/disable user-mode access to crypto support");
+
+int	crypto_userasymcrypto = 1;	/* userland may do asym crypto reqs */
+module_param(crypto_userasymcrypto, int, 0644);
+MODULE_PARM_DESC(crypto_userasymcrypto,
+	   "Enable/disable user-mode access to asymmetric crypto support");
+
+int	crypto_devallowsoft = 0;	/* only use hardware crypto */
+module_param(crypto_devallowsoft, int, 0644);
+MODULE_PARM_DESC(crypto_devallowsoft,
+	   "Enable/disable use of software crypto support");
+
+/*
+ * This parameter controls the maximum number of crypto operations to
+ * do consecutively in the crypto kernel thread before scheduling to allow
+ * other processes to run. Without it, it is possible to get into a
+ * situation where the crypto thread never allows any other processes to run.
+ * Default to 1000 which should be less than one second.
+ */
+static int crypto_max_loopcount = 1000;
+module_param(crypto_max_loopcount, int, 0644);
+MODULE_PARM_DESC(crypto_max_loopcount,
+	   "Maximum number of crypto ops to do before yielding to other processes");
+
+#ifndef CONFIG_NR_CPUS
+#define CONFIG_NR_CPUS 1
+#endif
+
+static struct task_struct *cryptoproc[CONFIG_NR_CPUS];
+static struct task_struct *cryptoretproc[CONFIG_NR_CPUS];
+static DECLARE_WAIT_QUEUE_HEAD(cryptoproc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(cryptoretproc_wait);
+
+static	int crypto_proc(void *arg);
+static	int crypto_ret_proc(void *arg);
+static	int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
+static	int crypto_kinvoke(struct cryptkop *krp, int flags);
+static	void crypto_exit(void);
+int crypto_init(void);
+
+static	struct cryptostats cryptostats;
+
+static struct cryptocap *
+crypto_checkdriver(u_int32_t hid)
+{
+	if (crypto_drivers == NULL)
+		return NULL;
+	return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
+}
+
+/*
+ * Compare a driver's list of supported algorithms against another
+ * list; return non-zero if all algorithms are supported.
+ */
+static int
+driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
+{
+	const struct cryptoini *cr;
+
+	/* See if all the algorithms are supported. */
+	for (cr = cri; cr; cr = cr->cri_next)
+		if (cap->cc_alg[cr->cri_alg] == 0)
+			return 0;
+	return 1;
+}
+
+/*
+ * Select a driver for a new session that supports the specified
+ * algorithms and, optionally, is constrained according to the flags.
+ * The algorithm we use here is pretty stupid; just use the
+ * first driver that supports all the algorithms we need. If there
+ * are multiple drivers we choose the driver with the fewest active
+ * sessions.  We prefer hardware-backed drivers to software ones.
+ *
+ * XXX We need more smarts here (in real life too, but that's
+ * XXX another story altogether).
+ */
+static struct cryptocap *
+crypto_select_driver(const struct cryptoini *cri, int flags)
+{
+	struct cryptocap *cap, *best;
+	int match, hid;
+
+	CRYPTO_DRIVER_ASSERT();
+
+	/*
+	 * Look first for hardware crypto devices if permitted.
+	 */
+	if (flags & CRYPTOCAP_F_HARDWARE)
+		match = CRYPTOCAP_F_HARDWARE;
+	else
+		match = CRYPTOCAP_F_SOFTWARE;
+	best = NULL;
+again:
+	for (hid = 0; hid < crypto_drivers_num; hid++) {
+		cap = &crypto_drivers[hid];
+		/*
+		 * If it's not initialized, is in the process of
+		 * going away, or is not appropriate (hardware
+		 * or software based on match), then skip.
+		 */
+		if (cap->cc_dev == NULL ||
+		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+		    (cap->cc_flags & match) == 0)
+			continue;
+
+		/* verify all the algorithms are supported. */
+		if (driver_suitable(cap, cri)) {
+			if (best == NULL ||
+			    cap->cc_sessions < best->cc_sessions)
+				best = cap;
+		}
+	}
+	if (best != NULL)
+		return best;
+	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+		/* sort of an Algol 68-style for loop */
+		match = CRYPTOCAP_F_SOFTWARE;
+		goto again;
+	}
+	return best;
+}
+
+/*
+ * Create a new session.  The crid argument specifies a crypto
+ * driver to use or constraints on a driver to select (hardware
+ * only, software only, either).  Whatever driver is selected
+ * must be capable of the requested crypto algorithms.
+ */
+int
+crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid)
+{
+	struct cryptocap *cap;
+	u_int32_t hid, lid;
+	int err;
+	unsigned long d_flags;
+
+	CRYPTO_DRIVER_LOCK();
+	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+		/*
+		 * Use specified driver; verify it is capable.
+		 */
+		cap = crypto_checkdriver(crid);
+		if (cap != NULL && !driver_suitable(cap, cri))
+			cap = NULL;
+	} else {
+		/*
+		 * No requested driver; select based on crid flags.
+		 */
+		cap = crypto_select_driver(cri, crid);
+		/*
+		 * if NULL then can't do everything in one session.
+		 * XXX Fix this. We need to inject a "virtual" session
+		 * XXX layer right about here.
+		 */
+	}
+	if (cap != NULL) {
+		/* Call the driver initialization routine. */
+		hid = cap - crypto_drivers;
+		lid = hid;		/* Pass the driver ID. */
+		cap->cc_sessions++;
+		CRYPTO_DRIVER_UNLOCK();
+		err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri);
+		CRYPTO_DRIVER_LOCK();
+		if (err == 0) {
+			(*sid) = (cap->cc_flags & 0xff000000)
+			       | (hid & 0x00ffffff);
+			(*sid) <<= 32;
+			(*sid) |= (lid & 0xffffffff);
+		} else
+			cap->cc_sessions--;
+	} else
+		err = EINVAL;
+	CRYPTO_DRIVER_UNLOCK();
+	return err;
+}
+
+static void
+crypto_remove(struct cryptocap *cap)
+{
+	CRYPTO_DRIVER_ASSERT();
+	if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
+		bzero(cap, sizeof(*cap));
+}
+
+/*
+ * Delete an existing session (or a reserved session on an unregistered
+ * driver).
+ */
+int
+crypto_freesession(u_int64_t sid)
+{
+	struct cryptocap *cap;
+	u_int32_t hid;
+	int err = 0;
+	unsigned long d_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	CRYPTO_DRIVER_LOCK();
+
+	if (crypto_drivers == NULL) {
+		err = EINVAL;
+		goto done;
+	}
+
+	/* Determine two IDs. */
+	hid = CRYPTO_SESID2HID(sid);
+
+	if (hid >= crypto_drivers_num) {
+		dprintk("%s - INVALID DRIVER NUM %d\n", __FUNCTION__, hid);
+		err = ENOENT;
+		goto done;
+	}
+	cap = &crypto_drivers[hid];
+
+	if (cap->cc_dev) {
+		CRYPTO_DRIVER_UNLOCK();
+		/* Call the driver cleanup routine, if available, unlocked. */
+		err = CRYPTODEV_FREESESSION(cap->cc_dev, sid);
+		CRYPTO_DRIVER_LOCK();
+	}
+
+	if (cap->cc_sessions)
+		cap->cc_sessions--;
+
+	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+		crypto_remove(cap);
+
+done:
+	CRYPTO_DRIVER_UNLOCK();
+	return err;
+}
+
+/*
+ * Return an unused driver id.  Used by drivers prior to registering
+ * support for the algorithms they handle.
+ */
+int32_t
+crypto_get_driverid(device_t dev, int flags)
+{
+	struct cryptocap *newdrv;
+	int i;
+	unsigned long d_flags;
+
+	if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+		printf("%s: no flags specified when registering driver\n",
+		    device_get_nameunit(dev));
+		return -1;
+	}
+
+	CRYPTO_DRIVER_LOCK();
+
+	for (i = 0; i < crypto_drivers_num; i++) {
+		if (crypto_drivers[i].cc_dev == NULL &&
+		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
+			break;
+		}
+	}
+
+	/* Out of entries, allocate some more. */
+	if (i == crypto_drivers_num) {
+		/* Be careful about wrap-around. */
+		if (2 * crypto_drivers_num <= crypto_drivers_num) {
+			CRYPTO_DRIVER_UNLOCK();
+			printk("crypto: driver count wraparound!\n");
+			return -1;
+		}
+
+		newdrv = kmalloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
+				GFP_KERNEL);
+		if (newdrv == NULL) {
+			CRYPTO_DRIVER_UNLOCK();
+			printk("crypto: no space to expand driver table!\n");
+			return -1;
+		}
+
+		memcpy(newdrv, crypto_drivers,
+				crypto_drivers_num * sizeof(struct cryptocap));
+		memset(&newdrv[crypto_drivers_num], 0,
+				crypto_drivers_num * sizeof(struct cryptocap));
+
+		crypto_drivers_num *= 2;
+
+		kfree(crypto_drivers);
+		crypto_drivers = newdrv;
+	}
+
+	/* NB: state is zero'd on free */
+	crypto_drivers[i].cc_sessions = 1;	/* Mark */
+	crypto_drivers[i].cc_dev = dev;
+	crypto_drivers[i].cc_flags = flags;
+	if (bootverbose)
+		printf("crypto: assign %s driver id %u, flags %u\n",
+		    device_get_nameunit(dev), i, flags);
+
+	CRYPTO_DRIVER_UNLOCK();
+
+	return i;
+}
+
+/*
+ * Lookup a driver by name.  We match against the full device
+ * name and unit, and against just the name.  The latter gives
+ * us a simple widlcarding by device name.  On success return the
+ * driver/hardware identifier; otherwise return -1.
+ */
+int
+crypto_find_driver(const char *match)
+{
+	int i, len = strlen(match);
+	unsigned long d_flags;
+
+	CRYPTO_DRIVER_LOCK();
+	for (i = 0; i < crypto_drivers_num; i++) {
+		device_t dev = crypto_drivers[i].cc_dev;
+		if (dev == NULL ||
+		    (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+			continue;
+		if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
+		    strncmp(match, device_get_name(dev), len) == 0)
+			break;
+	}
+	CRYPTO_DRIVER_UNLOCK();
+	return i < crypto_drivers_num ? i : -1;
+}
+
+/*
+ * Return the device_t for the specified driver or NULL
+ * if the driver identifier is invalid.
+ */
+device_t
+crypto_find_device_byhid(int hid)
+{
+	struct cryptocap *cap = crypto_checkdriver(hid);
+	return cap != NULL ? cap->cc_dev : NULL;
+}
+
+/*
+ * Return the device/driver capabilities.
+ */
+int
+crypto_getcaps(int hid)
+{
+	struct cryptocap *cap = crypto_checkdriver(hid);
+	return cap != NULL ? cap->cc_flags : 0;
+}
+
+/*
+ * Register support for a key-related algorithm.  This routine
+ * is called once for each algorithm supported a driver.
+ */
+int
+crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags)
+{
+	struct cryptocap *cap;
+	int err;
+	unsigned long d_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	CRYPTO_DRIVER_LOCK();
+
+	cap = crypto_checkdriver(driverid);
+	if (cap != NULL &&
+	    (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
+		/*
+		 * XXX Do some performance testing to determine placing.
+		 * XXX We probably need an auxiliary data structure that
+		 * XXX describes relative performances.
+		 */
+
+		cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+		if (bootverbose)
+			printf("crypto: %s registers key alg %u flags %u\n"
+				, device_get_nameunit(cap->cc_dev)
+				, kalg
+				, flags
+			);
+		err = 0;
+	} else
+		err = EINVAL;
+
+	CRYPTO_DRIVER_UNLOCK();
+	return err;
+}
+
+/*
+ * Register support for a non-key-related algorithm.  This routine
+ * is called once for each such algorithm supported by a driver.
+ */
+int
+crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+    u_int32_t flags)
+{
+	struct cryptocap *cap;
+	int err;
+	unsigned long d_flags;
+
+	dprintk("%s(id=0x%x, alg=%d, maxoplen=%d, flags=0x%x)\n", __FUNCTION__,
+			driverid, alg, maxoplen, flags);
+
+	CRYPTO_DRIVER_LOCK();
+
+	cap = crypto_checkdriver(driverid);
+	/* NB: algorithms are in the range [1..max] */
+	if (cap != NULL &&
+	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
+		/*
+		 * XXX Do some performance testing to determine placing.
+		 * XXX We probably need an auxiliary data structure that
+		 * XXX describes relative performances.
+		 */
+
+		cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
+		cap->cc_max_op_len[alg] = maxoplen;
+		if (bootverbose)
+			printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
+				, device_get_nameunit(cap->cc_dev)
+				, alg
+				, flags
+				, maxoplen
+			);
+		cap->cc_sessions = 0;		/* Unmark */
+		err = 0;
+	} else
+		err = EINVAL;
+
+	CRYPTO_DRIVER_UNLOCK();
+	return err;
+}
+
+static void
+driver_finis(struct cryptocap *cap)
+{
+	u_int32_t ses, kops;
+
+	CRYPTO_DRIVER_ASSERT();
+
+	ses = cap->cc_sessions;
+	kops = cap->cc_koperations;
+	bzero(cap, sizeof(*cap));
+	if (ses != 0 || kops != 0) {
+		/*
+		 * If there are pending sessions,
+		 * just mark as invalid.
+		 */
+		cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+		cap->cc_sessions = ses;
+		cap->cc_koperations = kops;
+	}
+}
+
+/*
+ * Unregister a crypto driver. If there are pending sessions using it,
+ * leave enough information around so that subsequent calls using those
+ * sessions will correctly detect the driver has been unregistered and
+ * reroute requests.
+ */
+int
+crypto_unregister(u_int32_t driverid, int alg)
+{
+	struct cryptocap *cap;
+	int i, err;
+	unsigned long d_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	CRYPTO_DRIVER_LOCK();
+
+	cap = crypto_checkdriver(driverid);
+	if (cap != NULL &&
+	    (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
+	    cap->cc_alg[alg] != 0) {
+		cap->cc_alg[alg] = 0;
+		cap->cc_max_op_len[alg] = 0;
+
+		/* Was this the last algorithm ? */
+		for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
+			if (cap->cc_alg[i] != 0)
+				break;
+
+		if (i == CRYPTO_ALGORITHM_MAX + 1)
+			driver_finis(cap);
+		err = 0;
+	} else
+		err = EINVAL;
+	CRYPTO_DRIVER_UNLOCK();
+	return err;
+}
+
+/*
+ * Unregister all algorithms associated with a crypto driver.
+ * If there are pending sessions using it, leave enough information
+ * around so that subsequent calls using those sessions will
+ * correctly detect the driver has been unregistered and reroute
+ * requests.
+ */
+int
+crypto_unregister_all(u_int32_t driverid)
+{
+	struct cryptocap *cap;
+	int err;
+	unsigned long d_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	CRYPTO_DRIVER_LOCK();
+	cap = crypto_checkdriver(driverid);
+	if (cap != NULL) {
+		driver_finis(cap);
+		err = 0;
+	} else
+		err = EINVAL;
+	CRYPTO_DRIVER_UNLOCK();
+
+	return err;
+}
+
+/*
+ * Clear blockage on a driver.  The what parameter indicates whether
+ * the driver is now ready for cryptop's and/or cryptokop's.
+ */
+int
+crypto_unblock(u_int32_t driverid, int what)
+{
+	struct cryptocap *cap;
+	int err;
+	unsigned long q_flags;
+
+	CRYPTO_Q_LOCK();
+	cap = crypto_checkdriver(driverid);
+	if (cap != NULL) {
+		if (what & CRYPTO_SYMQ) {
+			cap->cc_qblocked = 0;
+			cap->cc_unqblocked = 0;
+			crypto_all_qblocked = 0;
+		}
+		if (what & CRYPTO_ASYMQ) {
+			cap->cc_kqblocked = 0;
+			cap->cc_unkqblocked = 0;
+			crypto_all_kqblocked = 0;
+		}
+		wake_up_interruptible(&cryptoproc_wait);
+		err = 0;
+	} else
+		err = EINVAL;
+	CRYPTO_Q_UNLOCK(); //DAVIDM should this be a driver lock
+
+	return err;
+}
+
+/*
+ * Add a crypto request to a queue, to be processed by the kernel thread.
+ */
+int
+crypto_dispatch(struct cryptop *crp)
+{
+	struct cryptocap *cap;
+	int result = -1;
+	unsigned long q_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	cryptostats.cs_ops++;
+
+	CRYPTO_Q_LOCK();
+	if (crypto_q_cnt >= crypto_q_max) {
+		cryptostats.cs_drops++;
+		CRYPTO_Q_UNLOCK();
+		return ENOMEM;
+	}
+	crypto_q_cnt++;
+
+	/* make sure we are starting a fresh run on this crp. */
+	crp->crp_flags &= ~CRYPTO_F_DONE;
+	crp->crp_etype = 0;
+
+#if defined(CONFIG_MV_CESA_OCF) || defined(CONFIG_MV_CESA_OCF_KW2)
+
+	CRYPTO_Q_UNLOCK();
+
+	/* warning: We are using the CRYPTO_F_BATCH to mark processing by HW,
+	   it should be disabled for software encryption */
+	if ((crp->crp_flags & CRYPTO_F_BATCH)) {
+		int hid = CRYPTO_SESID2HID(crp->crp_sid);
+		cap = crypto_checkdriver(hid);
+		/* Driver cannot disappear when there is an active session. */
+		KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+
+		result = crypto_invoke(cap, crp, 0);
+		if (result != 0) {
+			CRYPTO_Q_LOCK();
+			crypto_q_cnt--;
+			cryptostats.cs_drops++;
+			CRYPTO_Q_UNLOCK();
+		}
+	} else {
+		CRYPTO_Q_LOCK();
+		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+		result = 0;
+		wake_up_interruptible(&cryptoproc_wait);
+		CRYPTO_Q_UNLOCK();
+	}
+
+#elif CONFIG_OF
+	if (mv_cesa_mode == CESA_OCF_M) {
+		dprintk("%s:cesa mode %d\n", __func__, mv_cesa_mode);
+
+		CRYPTO_Q_UNLOCK();
+
+		/* warning: We are using the CRYPTO_F_BATCH to mark processing by HW,
+		   it should be disabled for software encryption */
+		if ((crp->crp_flags & CRYPTO_F_BATCH)) {
+			int hid = CRYPTO_SESID2HID(crp->crp_sid);
+			cap = crypto_checkdriver(hid);
+			/* Driver cannot disappear when there is an active session. */
+			KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+
+			result = crypto_invoke(cap, crp, 0);
+			if (result != 0) {
+				CRYPTO_Q_LOCK();
+				crypto_q_cnt--;
+				cryptostats.cs_drops++;
+				CRYPTO_Q_UNLOCK();
+			}
+		} else {
+			CRYPTO_Q_LOCK();
+			TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+			result = 0;
+			wake_up_interruptible(&cryptoproc_wait);
+			CRYPTO_Q_UNLOCK();
+		}
+
+	} else {
+
+		/*
+		 * Caller marked the request to be processed immediately; dispatch
+		 * it directly to the driver unless the driver is currently blocked.
+		 */
+		if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+			int hid = CRYPTO_SESID2HID(crp->crp_sid);
+			cap = crypto_checkdriver(hid);
+			/* Driver cannot disappear when there is an active session. */
+			KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+			if (!cap->cc_qblocked) {
+				crypto_all_qblocked = 0;
+				crypto_drivers[hid].cc_unqblocked = 1;
+				CRYPTO_Q_UNLOCK();
+				result = crypto_invoke(cap, crp, 0);
+				CRYPTO_Q_LOCK();
+				if (result == ERESTART)
+					if (crypto_drivers[hid].cc_unqblocked)
+						crypto_drivers[hid].cc_qblocked = 1;
+				crypto_drivers[hid].cc_unqblocked = 0;
+			}
+		}
+		if (result == ERESTART) {
+			/*
+			 * The driver ran out of resources, mark the
+			 * driver ``blocked'' for cryptop's and put
+			 * the request back in the queue.  It would
+			 * best to put the request back where we got
+			 * it but that's hard so for now we put it
+			 * at the front.  This should be ok; putting
+			 * it at the end does not work.
+			 */
+			list_add(&crp->crp_next, &crp_q);
+			cryptostats.cs_blocks++;
+			result = 0;
+		} else if (result == -1) {
+			TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+			result = 0;
+		}
+		wake_up_interruptible(&cryptoproc_wait);
+		CRYPTO_Q_UNLOCK();
+	}
+#else
+	/*
+	 * Caller marked the request to be processed immediately; dispatch
+	 * it directly to the driver unless the driver is currently blocked.
+	 */
+	if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
+		int hid = CRYPTO_SESID2HID(crp->crp_sid);
+		cap = crypto_checkdriver(hid);
+		/* Driver cannot disappear when there is an active session. */
+		KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+		if (!cap->cc_qblocked) {
+			crypto_all_qblocked = 0;
+			crypto_drivers[hid].cc_unqblocked = 1;
+			CRYPTO_Q_UNLOCK();
+			result = crypto_invoke(cap, crp, 0);
+			CRYPTO_Q_LOCK();
+			if (result == ERESTART)
+				if (crypto_drivers[hid].cc_unqblocked)
+					crypto_drivers[hid].cc_qblocked = 1;
+			crypto_drivers[hid].cc_unqblocked = 0;
+		}
+	}
+	if (result == ERESTART) {
+		/*
+		 * The driver ran out of resources, mark the
+		 * driver ``blocked'' for cryptop's and put
+		 * the request back in the queue.  It would
+		 * best to put the request back where we got
+		 * it but that's hard so for now we put it
+		 * at the front.  This should be ok; putting
+		 * it at the end does not work.
+		 */
+		list_add(&crp->crp_next, &crp_q);
+		cryptostats.cs_blocks++;
+		result = 0;
+	} else if (result == -1) {
+		TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
+		result = 0;
+	}
+	wake_up_interruptible(&cryptoproc_wait);
+	CRYPTO_Q_UNLOCK();
+#endif
+
+	return result;
+}
+
+/*
+ * Add an asymetric crypto request to a queue,
+ * to be processed by the kernel thread.
+ */
+int
+crypto_kdispatch(struct cryptkop *krp)
+{
+	int error;
+	unsigned long q_flags;
+
+	cryptostats.cs_kops++;
+
+	error = crypto_kinvoke(krp, krp->krp_crid);
+	if (error == ERESTART) {
+		CRYPTO_Q_LOCK();
+		TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
+		wake_up_interruptible(&cryptoproc_wait);
+		CRYPTO_Q_UNLOCK();
+		error = 0;
+	}
+	return error;
+}
+
+/*
+ * Verify a driver is suitable for the specified operation.
+ */
+static __inline int
+kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp)
+{
+	return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0;
+}
+
+/*
+ * Select a driver for an asym operation.  The driver must
+ * support the necessary algorithm.  The caller can constrain
+ * which device is selected with the flags parameter.  The
+ * algorithm we use here is pretty stupid; just use the first
+ * driver that supports the algorithms we need. If there are
+ * multiple suitable drivers we choose the driver with the
+ * fewest active operations.  We prefer hardware-backed
+ * drivers to software ones when either may be used.
+ */
+static struct cryptocap *
+crypto_select_kdriver(const struct cryptkop *krp, int flags)
+{
+	struct cryptocap *cap, *best, *blocked;
+	int match, hid;
+
+	CRYPTO_DRIVER_ASSERT();
+
+	/*
+	 * Look first for hardware crypto devices if permitted.
+	 */
+	if (flags & CRYPTOCAP_F_HARDWARE)
+		match = CRYPTOCAP_F_HARDWARE;
+	else
+		match = CRYPTOCAP_F_SOFTWARE;
+	best = NULL;
+	blocked = NULL;
+again:
+	for (hid = 0; hid < crypto_drivers_num; hid++) {
+		cap = &crypto_drivers[hid];
+		/*
+		 * If it's not initialized, is in the process of
+		 * going away, or is not appropriate (hardware
+		 * or software based on match), then skip.
+		 */
+		if (cap->cc_dev == NULL ||
+		    (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
+		    (cap->cc_flags & match) == 0)
+			continue;
+
+		/* verify all the algorithms are supported. */
+		if (kdriver_suitable(cap, krp)) {
+			if (best == NULL ||
+			    cap->cc_koperations < best->cc_koperations)
+				best = cap;
+		}
+	}
+	if (best != NULL)
+		return best;
+	if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) {
+		/* sort of an Algol 68-style for loop */
+		match = CRYPTOCAP_F_SOFTWARE;
+		goto again;
+	}
+	return best;
+}
+
+/*
+ * Dispatch an assymetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp, int crid)
+{
+	struct cryptocap *cap = NULL;
+	int error;
+	unsigned long d_flags;
+
+	KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+	KASSERT(krp->krp_callback != NULL,
+	    ("%s: krp->crp_callback == NULL", __func__));
+
+	CRYPTO_DRIVER_LOCK();
+	if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
+		cap = crypto_checkdriver(crid);
+		if (cap != NULL) {
+			/*
+			 * Driver present, it must support the necessary
+			 * algorithm and, if s/w drivers are excluded,
+			 * it must be registered as hardware-backed.
+			 */
+			if (!kdriver_suitable(cap, krp) ||
+			    (!crypto_devallowsoft &&
+			     (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+				cap = NULL;
+		}
+	} else {
+		/*
+		 * No requested driver; select based on crid flags.
+		 */
+		if (!crypto_devallowsoft)	/* NB: disallow s/w drivers */
+			crid &= ~CRYPTOCAP_F_SOFTWARE;
+		cap = crypto_select_kdriver(krp, crid);
+	}
+	if (cap != NULL && !cap->cc_kqblocked) {
+		krp->krp_hid = cap - crypto_drivers;
+		cap->cc_koperations++;
+		CRYPTO_DRIVER_UNLOCK();
+		error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+		CRYPTO_DRIVER_LOCK();
+		if (error == ERESTART) {
+			cap->cc_koperations--;
+			CRYPTO_DRIVER_UNLOCK();
+			return (error);
+		}
+		/* return the actual device used */
+		krp->krp_crid = krp->krp_hid;
+	} else {
+		/*
+		 * NB: cap is !NULL if device is blocked; in
+		 *     that case return ERESTART so the operation
+		 *     is resubmitted if possible.
+		 */
+		error = (cap == NULL) ? ENODEV : ERESTART;
+	}
+	CRYPTO_DRIVER_UNLOCK();
+
+	if (error) {
+		krp->krp_status = error;
+		crypto_kdone(krp);
+	}
+	return 0;
+}
+
+
+/*
+ * Dispatch a crypto request to the appropriate crypto devices.
+ */
+static int
+crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
+{
+	KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
+	KASSERT(crp->crp_callback != NULL,
+	    ("%s: crp->crp_callback == NULL", __func__));
+	KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+
+	dprintk("%s()\n", __FUNCTION__);
+
+#ifdef CRYPTO_TIMING
+	if (crypto_timing)
+		crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
+#endif
+	if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
+		struct cryptodesc *crd;
+		u_int64_t nid;
+
+		/*
+		 * Driver has unregistered; migrate the session and return
+		 * an error to the caller so they'll resubmit the op.
+		 *
+		 * XXX: What if there are more already queued requests for this
+		 *      session?
+		 */
+		crypto_freesession(crp->crp_sid);
+
+		for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
+			crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
+
+		/* XXX propagate flags from initial session? */
+		if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI),
+		    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
+			crp->crp_sid = nid;
+
+		crp->crp_etype = EAGAIN;
+		crypto_done(crp);
+		return 0;
+	} else {
+		/*
+		 * Invoke the driver to process the request.
+		 */
+		return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint);
+	}
+}
+
+/*
+ * Release a set of crypto descriptors.
+ */
+void
+crypto_freereq(struct cryptop *crp)
+{
+	struct cryptodesc *crd;
+
+	if (crp == NULL)
+		return;
+
+#ifdef DIAGNOSTIC
+	{
+		struct cryptop *crp2;
+		unsigned long q_flags;
+
+		CRYPTO_Q_LOCK();
+		TAILQ_FOREACH(crp2, &crp_q, crp_next) {
+			KASSERT(crp2 != crp,
+			    ("Freeing cryptop from the crypto queue (%p).",
+			    crp));
+		}
+		CRYPTO_Q_UNLOCK();
+		CRYPTO_RETQ_LOCK();
+		TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
+			KASSERT(crp2 != crp,
+			    ("Freeing cryptop from the return queue (%p).",
+			    crp));
+		}
+		CRYPTO_RETQ_UNLOCK();
+	}
+#endif
+
+	while ((crd = crp->crp_desc) != NULL) {
+		crp->crp_desc = crd->crd_next;
+		kmem_cache_free(cryptodesc_zone, crd);
+	}
+	kmem_cache_free(cryptop_zone, crp);
+}
+
+/*
+ * Acquire a set of crypto descriptors.
+ */
+struct cryptop *
+crypto_getreq(int num)
+{
+	struct cryptodesc *crd;
+	struct cryptop *crp;
+
+	crp = kmem_cache_alloc(cryptop_zone, SLAB_ATOMIC);
+	if (crp != NULL) {
+		memset(crp, 0, sizeof(*crp));
+		INIT_LIST_HEAD(&crp->crp_next);
+		init_waitqueue_head(&crp->crp_waitq);
+		while (num--) {
+			crd = kmem_cache_alloc(cryptodesc_zone, SLAB_ATOMIC);
+			if (crd == NULL) {
+				crypto_freereq(crp);
+				return NULL;
+			}
+			memset(crd, 0, sizeof(*crd));
+			crd->crd_next = crp->crp_desc;
+			crp->crp_desc = crd;
+		}
+	}
+	return crp;
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_done(struct cryptop *crp)
+{
+	unsigned long q_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if ((crp->crp_flags & CRYPTO_F_DONE) == 0) {
+		crp->crp_flags |= CRYPTO_F_DONE;
+		CRYPTO_Q_LOCK();
+		crypto_q_cnt--;
+		CRYPTO_Q_UNLOCK();
+	} else
+		printk("crypto: crypto_done op already done, flags 0x%x",
+				crp->crp_flags);
+	if (crp->crp_etype != 0)
+		cryptostats.cs_errs++;
+	/*
+	 * CBIMM means unconditionally do the callback immediately;
+	 * CBIFSYNC means do the callback immediately only if the
+	 * operation was done synchronously.  Both are used to avoid
+	 * doing extraneous context switches; the latter is mostly
+	 * used with the software crypto driver.
+	 */
+	if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
+	    ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
+	     (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
+		/*
+		 * Do the callback directly.  This is ok when the
+		 * callback routine does very little (e.g. the
+		 * /dev/crypto callback method just does a wakeup).
+		 */
+		crp->crp_callback(crp);
+	} else {
+		unsigned long r_flags;
+		/*
+		 * Normal case; queue the callback for the thread.
+		 */
+		CRYPTO_RETQ_LOCK();
+		wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+		TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
+		CRYPTO_RETQ_UNLOCK();
+	}
+}
+
+/*
+ * Invoke the callback on behalf of the driver.
+ */
+void
+crypto_kdone(struct cryptkop *krp)
+{
+	struct cryptocap *cap;
+	unsigned long d_flags;
+
+	if ((krp->krp_flags & CRYPTO_KF_DONE) != 0)
+		printk("crypto: crypto_kdone op already done, flags 0x%x",
+				krp->krp_flags);
+	krp->krp_flags |= CRYPTO_KF_DONE;
+	if (krp->krp_status != 0)
+		cryptostats.cs_kerrs++;
+
+	CRYPTO_DRIVER_LOCK();
+	/* XXX: What if driver is loaded in the meantime? */
+	if (krp->krp_hid < crypto_drivers_num) {
+		cap = &crypto_drivers[krp->krp_hid];
+		cap->cc_koperations--;
+		KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
+		if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+			crypto_remove(cap);
+	}
+	CRYPTO_DRIVER_UNLOCK();
+
+	/*
+	 * CBIMM means unconditionally do the callback immediately;
+	 * This is used to avoid doing extraneous context switches
+	 */
+	if ((krp->krp_flags & CRYPTO_KF_CBIMM)) {
+		/*
+		 * Do the callback directly.  This is ok when the
+		 * callback routine does very little (e.g. the
+		 * /dev/crypto callback method just does a wakeup).
+		 */
+		krp->krp_callback(krp);
+	} else {
+		unsigned long r_flags;
+		/*
+		 * Normal case; queue the callback for the thread.
+		 */
+		CRYPTO_RETQ_LOCK();
+		wake_up_interruptible(&cryptoretproc_wait);/* shared wait channel */
+		TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
+		CRYPTO_RETQ_UNLOCK();
+	}
+}
+
+int
+crypto_getfeat(int *featp)
+{
+	int hid, kalg, feat = 0;
+	unsigned long d_flags;
+
+	CRYPTO_DRIVER_LOCK();
+	for (hid = 0; hid < crypto_drivers_num; hid++) {
+		const struct cryptocap *cap = &crypto_drivers[hid];
+
+		if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+		    !crypto_devallowsoft) {
+			continue;
+		}
+		for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
+			if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED)
+				feat |=  1 << kalg;
+	}
+	CRYPTO_DRIVER_UNLOCK();
+	*featp = feat;
+	return (0);
+}
+
+/*
+ * Crypto thread, dispatches crypto requests.
+ */
+static int
+crypto_proc(void *arg)
+{
+	struct cryptop *crp, *submit;
+	struct cryptkop *krp, *krpp;
+	struct cryptocap *cap;
+	u_int32_t hid;
+	int result, hint;
+	unsigned long q_flags, wait_flags;
+	int loopcount = 0;
+
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	CRYPTO_Q_LOCK();
+	for (;;) {
+		/*
+		 * we need to make sure we don't get into a busy loop with nothing
+		 * to do,  the two crypto_all_*blocked vars help us find out when
+		 * we are all full and can do nothing on any driver or Q.  If so we
+		 * wait for an unblock.
+		 */
+		crypto_all_qblocked  = !list_empty(&crp_q);
+
+		/*
+		 * Find the first element in the queue that can be
+		 * processed and look-ahead to see if multiple ops
+		 * are ready for the same driver.
+		 */
+		submit = NULL;
+		hint = 0;
+		list_for_each_entry(crp, &crp_q, crp_next) {
+			hid = CRYPTO_SESID2HID(crp->crp_sid);
+			cap = crypto_checkdriver(hid);
+			/*
+			 * Driver cannot disappear when there is an active
+			 * session.
+			 */
+			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+			    __func__, __LINE__));
+			if (cap == NULL || cap->cc_dev == NULL) {
+				/* Op needs to be migrated, process it. */
+				if (submit == NULL)
+					submit = crp;
+				break;
+			}
+			if (!cap->cc_qblocked) {
+				if (submit != NULL) {
+					/*
+					 * We stop on finding another op,
+					 * regardless whether its for the same
+					 * driver or not.  We could keep
+					 * searching the queue but it might be
+					 * better to just use a per-driver
+					 * queue instead.
+					 */
+					if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
+						hint = CRYPTO_HINT_MORE;
+					break;
+				} else {
+					submit = crp;
+					if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
+						break;
+					/* keep scanning for more are q'd */
+				}
+			}
+		}
+		if (submit != NULL) {
+			hid = CRYPTO_SESID2HID(submit->crp_sid);
+			crypto_all_qblocked = 0;
+			list_del(&submit->crp_next);
+			crypto_drivers[hid].cc_unqblocked = 1;
+			cap = crypto_checkdriver(hid);
+			CRYPTO_Q_UNLOCK();
+			KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
+			    __func__, __LINE__));
+			result = crypto_invoke(cap, submit, hint);
+			CRYPTO_Q_LOCK();
+			if (result == ERESTART) {
+				/*
+				 * The driver ran out of resources, mark the
+				 * driver ``blocked'' for cryptop's and put
+				 * the request back in the queue.  It would
+				 * best to put the request back where we got
+				 * it but that's hard so for now we put it
+				 * at the front.  This should be ok; putting
+				 * it at the end does not work.
+				 */
+				/* XXX validate sid again? */
+				list_add(&submit->crp_next, &crp_q);
+				cryptostats.cs_blocks++;
+				if (crypto_drivers[hid].cc_unqblocked)
+					crypto_drivers[hid].cc_qblocked=0;
+				crypto_drivers[hid].cc_unqblocked=0;
+			}
+			crypto_drivers[hid].cc_unqblocked = 0;
+		}
+
+		crypto_all_kqblocked = !list_empty(&crp_kq);
+
+		/* As above, but for key ops */
+		krp = NULL;
+		list_for_each_entry(krpp, &crp_kq, krp_next) {
+			cap = crypto_checkdriver(krpp->krp_hid);
+			if (cap == NULL || cap->cc_dev == NULL) {
+				/*
+				 * Operation needs to be migrated, invalidate
+				 * the assigned device so it will reselect a
+				 * new one below.  Propagate the original
+				 * crid selection flags if supplied.
+				 */
+				krp->krp_hid = krp->krp_crid &
+				    (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
+				if (krp->krp_hid == 0)
+					krp->krp_hid =
+				    CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+				break;
+			}
+			if (!cap->cc_kqblocked) {
+				krp = krpp;
+				break;
+			}
+		}
+		if (krp != NULL) {
+			crypto_all_kqblocked = 0;
+			list_del(&krp->krp_next);
+			crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+			CRYPTO_Q_UNLOCK();
+			result = crypto_kinvoke(krp, krp->krp_hid);
+			CRYPTO_Q_LOCK();
+			if (result == ERESTART) {
+				/*
+				 * The driver ran out of resources, mark the
+				 * driver ``blocked'' for cryptkop's and put
+				 * the request back in the queue.  It would
+				 * best to put the request back where we got
+				 * it but that's hard so for now we put it
+				 * at the front.  This should be ok; putting
+				 * it at the end does not work.
+				 */
+				/* XXX validate sid again? */
+				list_add(&krp->krp_next, &crp_kq);
+				cryptostats.cs_kblocks++;
+			} else
+				crypto_drivers[krp->krp_hid].cc_kqblocked = 0;
+		}
+
+		if (submit == NULL && krp == NULL) {
+			/*
+			 * Nothing more to be processed.  Sleep until we're
+			 * woken because there are more ops to process.
+			 * This happens either by submission or by a driver
+			 * becoming unblocked and notifying us through
+			 * crypto_unblock.  Note that when we wakeup we
+			 * start processing each queue again from the
+			 * front. It's not clear that it's important to
+			 * preserve this ordering since ops may finish
+			 * out of order if dispatched to different devices
+			 * and some become blocked while others do not.
+			 */
+			dprintk("%s - sleeping (qe=%d qb=%d kqe=%d kqb=%d)\n",
+					__FUNCTION__,
+					list_empty(&crp_q), crypto_all_qblocked,
+					list_empty(&crp_kq), crypto_all_kqblocked);
+			loopcount = 0;
+			CRYPTO_Q_UNLOCK();
+			spin_lock_irqsave(&cryptoproc_wait.lock, wait_flags);
+			wait_event_interruptible_locked_irq(cryptoproc_wait,
+					!(list_empty(&crp_q) || crypto_all_qblocked) ||
+					!(list_empty(&crp_kq) || crypto_all_kqblocked) ||
+					kthread_should_stop());
+			spin_unlock_irqrestore(&cryptoproc_wait.lock, wait_flags);
+			if (signal_pending (current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+				spin_lock_irq(&current->sigmask_lock);
+#endif
+				flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+				spin_unlock_irq(&current->sigmask_lock);
+#endif
+			}
+			CRYPTO_Q_LOCK();
+			dprintk("%s - awake\n", __FUNCTION__);
+			if (kthread_should_stop())
+				break;
+			cryptostats.cs_intrs++;
+		} else if (loopcount > crypto_max_loopcount) {
+			/*
+			 * Give other processes a chance to run if we've
+			 * been using the CPU exclusively for a while.
+			 */
+			loopcount = 0;
+			CRYPTO_Q_UNLOCK();
+			schedule();
+			CRYPTO_Q_LOCK();
+		}
+		loopcount++;
+	}
+	CRYPTO_Q_UNLOCK();
+	return 0;
+}
+
+/*
+ * Crypto returns thread, does callbacks for processed crypto requests.
+ * Callbacks are done here, rather than in the crypto drivers, because
+ * callbacks typically are expensive and would slow interrupt handling.
+ */
+static int
+crypto_ret_proc(void *arg)
+{
+	struct cryptop *crpt;
+	struct cryptkop *krpt;
+	unsigned long  r_flags, wait_flags;
+
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	CRYPTO_RETQ_LOCK();
+	for (;;) {
+		/* Harvest return q's for completed ops */
+		crpt = NULL;
+		if (!list_empty(&crp_ret_q))
+			crpt = list_entry(crp_ret_q.next, typeof(*crpt), crp_next);
+		if (crpt != NULL)
+			list_del(&crpt->crp_next);
+
+		krpt = NULL;
+		if (!list_empty(&crp_ret_kq))
+			krpt = list_entry(crp_ret_kq.next, typeof(*krpt), krp_next);
+		if (krpt != NULL)
+			list_del(&krpt->krp_next);
+
+		if (crpt != NULL || krpt != NULL) {
+			CRYPTO_RETQ_UNLOCK();
+			/*
+			 * Run callbacks unlocked.
+			 */
+			if (crpt != NULL)
+				crpt->crp_callback(crpt);
+			if (krpt != NULL)
+				krpt->krp_callback(krpt);
+			CRYPTO_RETQ_LOCK();
+		} else {
+			/*
+			 * Nothing more to be processed.  Sleep until we're
+			 * woken because there are more returns to process.
+			 */
+			dprintk("%s - sleeping\n", __FUNCTION__);
+			CRYPTO_RETQ_UNLOCK();
+			spin_lock_irqsave(&cryptoretproc_wait.lock, wait_flags);
+			wait_event_interruptible_locked_irq(cryptoretproc_wait,
+					!list_empty(&crp_ret_q) ||
+					!list_empty(&crp_ret_kq) ||
+					kthread_should_stop());
+			spin_unlock_irqrestore(&cryptoretproc_wait.lock, wait_flags);
+			if (signal_pending (current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+				spin_lock_irq(&current->sigmask_lock);
+#endif
+				flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+				spin_unlock_irq(&current->sigmask_lock);
+#endif
+			}
+			CRYPTO_RETQ_LOCK();
+			dprintk("%s - awake\n", __FUNCTION__);
+			if (kthread_should_stop()) {
+				dprintk("%s - EXITING!\n", __FUNCTION__);
+				break;
+			}
+			cryptostats.cs_rets++;
+		}
+	}
+	CRYPTO_RETQ_UNLOCK();
+	return 0;
+}
+
+
+#if 0 /* should put this into /proc or something */
+static void
+db_show_drivers(void)
+{
+	int hid;
+
+	db_printf("%12s %4s %4s %8s %2s %2s\n"
+		, "Device"
+		, "Ses"
+		, "Kops"
+		, "Flags"
+		, "QB"
+		, "KB"
+	);
+	for (hid = 0; hid < crypto_drivers_num; hid++) {
+		const struct cryptocap *cap = &crypto_drivers[hid];
+		if (cap->cc_dev == NULL)
+			continue;
+		db_printf("%-12s %4u %4u %08x %2u %2u\n"
+		    , device_get_nameunit(cap->cc_dev)
+		    , cap->cc_sessions
+		    , cap->cc_koperations
+		    , cap->cc_flags
+		    , cap->cc_qblocked
+		    , cap->cc_kqblocked
+		);
+	}
+}
+
+DB_SHOW_COMMAND(crypto, db_show_crypto)
+{
+	struct cryptop *crp;
+
+	db_show_drivers();
+	db_printf("\n");
+
+	db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
+	    "HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
+	    "Desc", "Callback");
+	TAILQ_FOREACH(crp, &crp_q, crp_next) {
+		db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
+		    , (int) CRYPTO_SESID2HID(crp->crp_sid)
+		    , (int) CRYPTO_SESID2CAPS(crp->crp_sid)
+		    , crp->crp_ilen, crp->crp_olen
+		    , crp->crp_etype
+		    , crp->crp_flags
+		    , crp->crp_desc
+		    , crp->crp_callback
+		);
+	}
+	if (!TAILQ_EMPTY(&crp_ret_q)) {
+		db_printf("\n%4s %4s %4s %8s\n",
+		    "HID", "Etype", "Flags", "Callback");
+		TAILQ_FOREACH(crp, &crp_ret_q, crp_next) {
+			db_printf("%4u %4u %04x %8p\n"
+			    , (int) CRYPTO_SESID2HID(crp->crp_sid)
+			    , crp->crp_etype
+			    , crp->crp_flags
+			    , crp->crp_callback
+			);
+		}
+	}
+}
+
+DB_SHOW_COMMAND(kcrypto, db_show_kcrypto)
+{
+	struct cryptkop *krp;
+
+	db_show_drivers();
+	db_printf("\n");
+
+	db_printf("%4s %5s %4s %4s %8s %4s %8s\n",
+	    "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback");
+	TAILQ_FOREACH(krp, &crp_kq, krp_next) {
+		db_printf("%4u %5u %4u %4u %08x %4u %8p\n"
+		    , krp->krp_op
+		    , krp->krp_status
+		    , krp->krp_iparams, krp->krp_oparams
+		    , krp->krp_crid, krp->krp_hid
+		    , krp->krp_callback
+		);
+	}
+	if (!TAILQ_EMPTY(&crp_ret_q)) {
+		db_printf("%4s %5s %8s %4s %8s\n",
+		    "Op", "Status", "CRID", "HID", "Callback");
+		TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) {
+			db_printf("%4u %5u %08x %4u %8p\n"
+			    , krp->krp_op
+			    , krp->krp_status
+			    , krp->krp_crid, krp->krp_hid
+			    , krp->krp_callback
+			);
+		}
+	}
+}
+#endif
+
+
+int
+crypto_init(void)
+{
+	int error;
+	unsigned long cpu;
+
+	dprintk("%s(%p)\n", __FUNCTION__, (void *) crypto_init);
+
+	if (crypto_initted)
+		return 0;
+	crypto_initted = 1;
+
+	spin_lock_init(&crypto_drivers_lock);
+	spin_lock_init(&crypto_q_lock);
+	spin_lock_init(&crypto_ret_q_lock);
+
+	cryptop_zone = kmem_cache_create("cryptop", sizeof(struct cryptop),
+				       0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+				       , NULL
+#endif
+					);
+
+	cryptodesc_zone = kmem_cache_create("cryptodesc", sizeof(struct cryptodesc),
+				       0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+				       , NULL
+#endif
+					);
+
+	if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
+		printk("crypto: crypto_init cannot setup crypto zones\n");
+		error = ENOMEM;
+		goto bad;
+	}
+
+	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
+	crypto_drivers = kmalloc(crypto_drivers_num * sizeof(struct cryptocap),
+			GFP_KERNEL);
+	if (crypto_drivers == NULL) {
+		printk("crypto: crypto_init cannot setup crypto drivers\n");
+		error = ENOMEM;
+		goto bad;
+	}
+
+	memset(crypto_drivers, 0, crypto_drivers_num * sizeof(struct cryptocap));
+
+	ocf_for_each_cpu(cpu) {
+		cryptoproc[cpu] = kthread_create(crypto_proc, (void *) cpu,
+									"ocf_%d", (int) cpu);
+		if (IS_ERR(cryptoproc[cpu])) {
+			error = PTR_ERR(cryptoproc[cpu]);
+			printk("crypto: crypto_init cannot start crypto thread; error %d",
+				error);
+			goto bad;
+		}
+		kthread_bind(cryptoproc[cpu], cpu);
+		wake_up_process(cryptoproc[cpu]);
+
+		cryptoretproc[cpu] = kthread_create(crypto_ret_proc, (void *) cpu,
+									"ocf_ret_%d", (int) cpu);
+		if (IS_ERR(cryptoretproc[cpu])) {
+			error = PTR_ERR(cryptoretproc[cpu]);
+			printk("crypto: crypto_init cannot start cryptoret thread; error %d",
+					error);
+			goto bad;
+		}
+		kthread_bind(cryptoretproc[cpu], cpu);
+		wake_up_process(cryptoretproc[cpu]);
+	}
+
+	return 0;
+bad:
+	crypto_exit();
+	return error;
+}
+
+
+static void
+crypto_exit(void)
+{
+	int cpu;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	/*
+	 * Terminate any crypto threads.
+	 */
+	ocf_for_each_cpu(cpu) {
+		kthread_stop(cryptoproc[cpu]);
+		kthread_stop(cryptoretproc[cpu]);
+	}
+
+	/*
+	 * Reclaim dynamically allocated resources.
+	 */
+	if (crypto_drivers != NULL)
+		kfree(crypto_drivers);
+
+	if (cryptodesc_zone != NULL)
+		kmem_cache_destroy(cryptodesc_zone);
+	if (cryptop_zone != NULL)
+		kmem_cache_destroy(cryptop_zone);
+}
+
+
+EXPORT_SYMBOL(crypto_newsession);
+EXPORT_SYMBOL(crypto_freesession);
+EXPORT_SYMBOL(crypto_get_driverid);
+EXPORT_SYMBOL(crypto_kregister);
+EXPORT_SYMBOL(crypto_register);
+EXPORT_SYMBOL(crypto_unregister);
+EXPORT_SYMBOL(crypto_unregister_all);
+EXPORT_SYMBOL(crypto_unblock);
+EXPORT_SYMBOL(crypto_dispatch);
+EXPORT_SYMBOL(crypto_kdispatch);
+EXPORT_SYMBOL(crypto_freereq);
+EXPORT_SYMBOL(crypto_getreq);
+EXPORT_SYMBOL(crypto_done);
+EXPORT_SYMBOL(crypto_kdone);
+EXPORT_SYMBOL(crypto_getfeat);
+EXPORT_SYMBOL(crypto_userasymcrypto);
+EXPORT_SYMBOL(crypto_getcaps);
+EXPORT_SYMBOL(crypto_find_driver);
+EXPORT_SYMBOL(crypto_find_device_byhid);
+EXPORT_SYMBOL(crypto_init);
+
+module_init(crypto_init);
+module_exit(crypto_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF (OpenBSD Cryptographic Framework)");
diff --git a/crypto/ocf/cryptocteon/Makefile b/crypto/ocf/cryptocteon/Makefile
new file mode 100644
index 000000000000..7e8da193d2d1
--- /dev/null
+++ b/crypto/ocf/cryptocteon/Makefile
@@ -0,0 +1,16 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_CRYPTOCTEON) += cryptocteon.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef CONFIG_OCF_CRYPTOCTEON
+# you need the cavium crypto component installed
+EXTRA_CFLAGS += -I$(ROOTDIR)/prop/include
+endif
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/cryptocteon/README.txt b/crypto/ocf/cryptocteon/README.txt
new file mode 100644
index 000000000000..e6941cb61c4b
--- /dev/null
+++ b/crypto/ocf/cryptocteon/README.txt
@@ -0,0 +1,10 @@
+
+You will need the CRYPTO package installed to build this driver,  and
+potentially the ADK.
+
+cavium_crypto sourced from:
+
+	adk/components/source/cavium_ipsec_kame/cavium_ipsec.c
+
+and significantly modified to suit use with OCF.  All original
+copyright/ownership headers retained.
diff --git a/crypto/ocf/cryptocteon/cavium_crypto.c b/crypto/ocf/cryptocteon/cavium_crypto.c
new file mode 100644
index 000000000000..0254b9bd02c7
--- /dev/null
+++ b/crypto/ocf/cryptocteon/cavium_crypto.c
@@ -0,0 +1,2283 @@
+/*
+ * Copyright (c) 2009 David McCullough <david.mccullough@securecomputing.com>
+ *
+ * Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
+ * reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Cavium Networks
+ * 4. Cavium Networks' name may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * This Software, including technical data, may be subject to U.S. export
+ * control laws, including the U.S. Export Administration Act and its
+ * associated regulations, and may be subject to export or import regulations
+ * in other countries. You warrant that You will comply strictly in all
+ * respects with all such regulations and acknowledge that you have the
+ * responsibility to obtain licenses to export, re-export or import the
+ * Software.
+ *
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" AND
+ * WITH ALL FAULTS AND CAVIUM MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES,
+ * EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE
+ * SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+*/
+/****************************************************************************/
+
+#include <linux/scatterlist.h>
+#include <asm/octeon/octeon.h>
+#include "octeon-asm.h"
+
+/****************************************************************************/
+
+extern unsigned long octeon_crypto_enable(struct octeon_cop2_state *);
+extern void octeon_crypto_disable(struct octeon_cop2_state *, unsigned long);
+
+#define SG_INIT(s, p, i, l) \
+	{ \
+	    (i) = 0; \
+	    (l) = (s)[0].length; \
+	    (p) = (typeof(p)) sg_virt((s)); \
+		CVMX_PREFETCH0((p)); \
+	}
+
+#define SG_CONSUME(s, p, i, l) \
+	{ \
+		(p)++; \
+		(l) -= sizeof(*(p)); \
+		if ((l) < 0) { \
+			dprintk("%s, %d: l = %d\n", __FILE__, __LINE__, l); \
+		} else if ((l) == 0) { \
+		    (i)++; \
+		    (l) = (s)[0].length; \
+		    (p) = (typeof(p)) sg_virt(s); \
+			CVMX_PREFETCH0((p)); \
+		} \
+	}
+
+#define ESP_HEADER_LENGTH     8
+#define DES_CBC_IV_LENGTH     8
+#define AES_CBC_IV_LENGTH     16
+#define ESP_HMAC_LEN          12
+
+#define ESP_HEADER_LENGTH 8
+#define DES_CBC_IV_LENGTH 8
+
+/****************************************************************************/
+
+#define CVM_LOAD_SHA_UNIT(dat, next)  { \
+   if (next == 0) {                     \
+      next = 1;                         \
+      CVMX_MT_HSH_DAT (dat, 0);         \
+   } else if (next == 1) {              \
+      next = 2;                         \
+      CVMX_MT_HSH_DAT (dat, 1);         \
+   } else if (next == 2) {              \
+      next = 3;                    \
+      CVMX_MT_HSH_DAT (dat, 2);         \
+   } else if (next == 3) {              \
+      next = 4;                         \
+      CVMX_MT_HSH_DAT (dat, 3);         \
+   } else if (next == 4) {              \
+      next = 5;                           \
+      CVMX_MT_HSH_DAT (dat, 4);         \
+   } else if (next == 5) {              \
+      next = 6;                         \
+      CVMX_MT_HSH_DAT (dat, 5);         \
+   } else if (next == 6) {              \
+      next = 7;                         \
+      CVMX_MT_HSH_DAT (dat, 6);         \
+   } else {                             \
+     CVMX_MT_HSH_STARTSHA (dat);        \
+     next = 0;                          \
+   }                                    \
+}
+
+#define CVM_LOAD2_SHA_UNIT(dat1, dat2, next)  { \
+   if (next == 0) {                      \
+      CVMX_MT_HSH_DAT (dat1, 0);         \
+      CVMX_MT_HSH_DAT (dat2, 1);         \
+      next = 2;                          \
+   } else if (next == 1) {               \
+      CVMX_MT_HSH_DAT (dat1, 1);         \
+      CVMX_MT_HSH_DAT (dat2, 2);         \
+      next = 3;                          \
+   } else if (next == 2) {               \
+      CVMX_MT_HSH_DAT (dat1, 2);         \
+      CVMX_MT_HSH_DAT (dat2, 3);         \
+      next = 4;                          \
+   } else if (next == 3) {               \
+      CVMX_MT_HSH_DAT (dat1, 3);         \
+      CVMX_MT_HSH_DAT (dat2, 4);         \
+      next = 5;                          \
+   } else if (next == 4) {               \
+      CVMX_MT_HSH_DAT (dat1, 4);         \
+      CVMX_MT_HSH_DAT (dat2, 5);         \
+      next = 6;                          \
+   } else if (next == 5) {               \
+      CVMX_MT_HSH_DAT (dat1, 5);         \
+      CVMX_MT_HSH_DAT (dat2, 6);         \
+      next = 7;                          \
+   } else if (next == 6) {               \
+      CVMX_MT_HSH_DAT (dat1, 6);         \
+      CVMX_MT_HSH_STARTSHA (dat2);       \
+      next = 0;                          \
+   } else {                              \
+     CVMX_MT_HSH_STARTSHA (dat1);        \
+     CVMX_MT_HSH_DAT (dat2, 0);          \
+     next = 1;                           \
+   }                                     \
+}
+
+/****************************************************************************/
+
+#define CVM_LOAD_MD5_UNIT(dat, next)  { \
+   if (next == 0) {                     \
+      next = 1;                         \
+      CVMX_MT_HSH_DAT (dat, 0);         \
+   } else if (next == 1) {              \
+      next = 2;                         \
+      CVMX_MT_HSH_DAT (dat, 1);         \
+   } else if (next == 2) {              \
+      next = 3;                    \
+      CVMX_MT_HSH_DAT (dat, 2);         \
+   } else if (next == 3) {              \
+      next = 4;                         \
+      CVMX_MT_HSH_DAT (dat, 3);         \
+   } else if (next == 4) {              \
+      next = 5;                           \
+      CVMX_MT_HSH_DAT (dat, 4);         \
+   } else if (next == 5) {              \
+      next = 6;                         \
+      CVMX_MT_HSH_DAT (dat, 5);         \
+   } else if (next == 6) {              \
+      next = 7;                         \
+      CVMX_MT_HSH_DAT (dat, 6);         \
+   } else {                             \
+     CVMX_MT_HSH_STARTMD5 (dat);        \
+     next = 0;                          \
+   }                                    \
+}
+
+#define CVM_LOAD2_MD5_UNIT(dat1, dat2, next)  { \
+   if (next == 0) {                      \
+      CVMX_MT_HSH_DAT (dat1, 0);         \
+      CVMX_MT_HSH_DAT (dat2, 1);         \
+      next = 2;                          \
+   } else if (next == 1) {               \
+      CVMX_MT_HSH_DAT (dat1, 1);         \
+      CVMX_MT_HSH_DAT (dat2, 2);         \
+      next = 3;                          \
+   } else if (next == 2) {               \
+      CVMX_MT_HSH_DAT (dat1, 2);         \
+      CVMX_MT_HSH_DAT (dat2, 3);         \
+      next = 4;                          \
+   } else if (next == 3) {               \
+      CVMX_MT_HSH_DAT (dat1, 3);         \
+      CVMX_MT_HSH_DAT (dat2, 4);         \
+      next = 5;                          \
+   } else if (next == 4) {               \
+      CVMX_MT_HSH_DAT (dat1, 4);         \
+      CVMX_MT_HSH_DAT (dat2, 5);         \
+      next = 6;                          \
+   } else if (next == 5) {               \
+      CVMX_MT_HSH_DAT (dat1, 5);         \
+      CVMX_MT_HSH_DAT (dat2, 6);         \
+      next = 7;                          \
+   } else if (next == 6) {               \
+      CVMX_MT_HSH_DAT (dat1, 6);         \
+      CVMX_MT_HSH_STARTMD5 (dat2);       \
+      next = 0;                          \
+   } else {                              \
+     CVMX_MT_HSH_STARTMD5 (dat1);        \
+     CVMX_MT_HSH_DAT (dat2, 0);          \
+     next = 1;                           \
+   }                                     \
+}
+
+/****************************************************************************/
+
+static inline uint64_t
+swap64(uint64_t a)
+{
+    return ((a >> 56) |
+       (((a >> 48) & 0xfful) << 8) |
+       (((a >> 40) & 0xfful) << 16) |
+       (((a >> 32) & 0xfful) << 24) |
+       (((a >> 24) & 0xfful) << 32) |
+       (((a >> 16) & 0xfful) << 40) |
+       (((a >> 8) & 0xfful) << 48) | (((a >> 0) & 0xfful) << 56));
+}
+
+/****************************************************************************/
+
+void
+octo_calc_hash(__u8 auth, unsigned char *key, uint64_t *inner, uint64_t *outer)
+{
+    uint8_t hash_key[64];
+    uint64_t *key1;
+    register uint64_t xor1 = 0x3636363636363636ULL;
+    register uint64_t xor2 = 0x5c5c5c5c5c5c5c5cULL;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    memset(hash_key, 0, sizeof(hash_key));
+    memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
+    key1 = (uint64_t *) hash_key;
+    flags = octeon_crypto_enable(&state);
+    if (auth) {
+       CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
+       CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
+       CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
+    } else {
+       CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
+       CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
+    }
+
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 0);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 1);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 2);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 3);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 4);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 5);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor1), 6);
+    key1++;
+    if (auth)
+	CVMX_MT_HSH_STARTSHA((*key1 ^ xor1));
+    else
+	CVMX_MT_HSH_STARTMD5((*key1 ^ xor1));
+
+    CVMX_MF_HSH_IV(inner[0], 0);
+    CVMX_MF_HSH_IV(inner[1], 1);
+    if (auth) {
+	inner[2] = 0;
+	CVMX_MF_HSH_IV(((uint64_t *) inner)[2], 2);
+    }
+
+    memset(hash_key, 0, sizeof(hash_key));
+    memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
+    key1 = (uint64_t *) hash_key;
+    if (auth) {
+      CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
+      CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
+      CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
+    } else {
+      CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
+      CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
+    }
+
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 0);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 1);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 2);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 3);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 4);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 5);
+    key1++;
+    CVMX_MT_HSH_DAT((*key1 ^ xor2), 6);
+    key1++;
+    if (auth)
+       CVMX_MT_HSH_STARTSHA((*key1 ^ xor2));
+    else
+       CVMX_MT_HSH_STARTMD5((*key1 ^ xor2));
+
+    CVMX_MF_HSH_IV(outer[0], 0);
+    CVMX_MF_HSH_IV(outer[1], 1);
+    if (auth) {
+      outer[2] = 0;
+      CVMX_MF_HSH_IV(outer[2], 2);
+    }
+    octeon_crypto_disable(&state, flags);
+    return;
+}
+
+/****************************************************************************/
+/* DES functions */
+
+int
+octo_des_cbc_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    uint64_t *data;
+    int data_i, data_l;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    while (crypt_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_off -= 8;
+    }
+
+    while (crypt_len > 0) {
+	CVMX_MT_3DES_ENC_CBC(*data);
+	CVMX_MF_3DES_RESULT(*data);
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_len -= 8;
+    }
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+
+int
+octo_des_cbc_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    uint64_t *data;
+    int data_i, data_l;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    while (crypt_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_off -= 8;
+    }
+
+    while (crypt_len > 0) {
+	CVMX_MT_3DES_DEC_CBC(*data);
+	CVMX_MF_3DES_RESULT(*data);
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_len -= 8;
+    }
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* AES functions */
+
+int
+octo_aes_cbc_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    uint64_t *data, *pdata;
+    int data_i, data_l;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    while (crypt_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_off -= 8;
+    }
+
+    while (crypt_len > 0) {
+	pdata = data;
+	CVMX_MT_AES_ENC_CBC0(*data);
+	SG_CONSUME(sg, data, data_i, data_l);
+	CVMX_MT_AES_ENC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_len -= 16;
+    }
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+
+int
+octo_aes_cbc_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    uint64_t *data, *pdata;
+    int data_i, data_l;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x7) || (crypt_off + crypt_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    while (crypt_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_off -= 8;
+    }
+
+    while (crypt_len > 0) {
+	pdata = data;
+	CVMX_MT_AES_DEC_CBC0(*data);
+	SG_CONSUME(sg, data, data_i, data_l);
+	CVMX_MT_AES_DEC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	SG_CONSUME(sg, data, data_i, data_l);
+	crypt_len -= 16;
+    }
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* MD5 */
+
+int
+octo_null_md5_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    uint64_t *data;
+    uint64_t tmp1, tmp2;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 ||
+	    (auth_off & 0x7) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* Load MD5 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+    while (auth_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	auth_off -= 8;
+    }
+
+    while (auth_len > 0) {
+	CVM_LOAD_MD5_UNIT(*data, next);
+	auth_len -= 8;
+	SG_CONSUME(sg, data, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVMX_ES64(tmp1, ((alen + 64) << 3));
+    CVM_LOAD_MD5_UNIT(tmp1, next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_ES64(tmp1, ((64 + 16) << 3));
+    CVMX_MT_HSH_STARTMD5(tmp1);
+
+    /* save the HMAC */
+    SG_INIT(sg, data, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	icv_off -= 8;
+    }
+    CVMX_MF_HSH_IV(*data, 0);
+    SG_CONSUME(sg, data, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *(uint32_t *)data = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* SHA1 */
+
+int
+octo_null_sha1_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    uint64_t *data;
+    uint64_t tmp1, tmp2, tmp3;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 ||
+	    (auth_off & 0x7) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data, data_i, data_l);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* Load SHA1 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+    while (auth_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	auth_off -= 8;
+    }
+
+    while (auth_len > 0) {
+	CVM_LOAD_SHA_UNIT(*data, next);
+	auth_len -= 8;
+	SG_CONSUME(sg, data, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+    tmp3 = 0;
+    CVMX_MF_HSH_IV(tmp3, 2);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    tmp3 |= 0x0000000080000000;
+    CVMX_MT_HSH_DAT(tmp3, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+    /* save the HMAC */
+    SG_INIT(sg, data, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data, data_i, data_l);
+	icv_off -= 8;
+    }
+    CVMX_MF_HSH_IV(*data, 0);
+    SG_CONSUME(sg, data, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *(uint32_t *)data = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* DES MD5 */
+
+int
+octo_des_cbc_md5_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata;
+    uint64_t *data = &mydata.data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    /* Load MD5 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    while (crypt_len > 0 || auth_len > 0) {
+	uint32_t *first = data32;
+	mydata.data32[0] = *first;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata.data32[1] = *data32;
+	if (crypt_off <= 0) {
+	    if (crypt_len > 0) {
+		CVMX_MT_3DES_ENC_CBC(*data);
+		CVMX_MF_3DES_RESULT(*data);
+		crypt_len -= 8;
+	    }
+	} else
+	    crypt_off -= 8;
+	if (auth_off <= 0) {
+	    if (auth_len > 0) {
+		CVM_LOAD_MD5_UNIT(*data, next);
+		auth_len -= 8;
+	    }
+	} else
+	    auth_off -= 8;
+	*first = mydata.data32[0];
+	*data32 = mydata.data32[1];
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVMX_ES64(tmp1, ((alen + 64) << 3));
+    CVM_LOAD_MD5_UNIT(tmp1, next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_ES64(tmp1, ((64 + 16) << 3));
+    CVMX_MT_HSH_STARTMD5(tmp1);
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+int
+octo_des_cbc_md5_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata;
+    uint64_t *data = &mydata.data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    /* Load MD5 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    while (crypt_len > 0 || auth_len > 0) {
+	uint32_t *first = data32;
+	mydata.data32[0] = *first;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata.data32[1] = *data32;
+	if (auth_off <= 0) {
+	    if (auth_len > 0) {
+		CVM_LOAD_MD5_UNIT(*data, next);
+		auth_len -= 8;
+	    }
+	} else
+	    auth_off -= 8;
+	if (crypt_off <= 0) {
+	    if (crypt_len > 0) {
+		CVMX_MT_3DES_DEC_CBC(*data);
+		CVMX_MF_3DES_RESULT(*data);
+		crypt_len -= 8;
+	    }
+	} else
+	    crypt_off -= 8;
+	*first = mydata.data32[0];
+	*data32 = mydata.data32[1];
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVMX_ES64(tmp1, ((alen + 64) << 3));
+    CVM_LOAD_MD5_UNIT(tmp1, next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_ES64(tmp1, ((64 + 16) << 3));
+    CVMX_MT_HSH_STARTMD5(tmp1);
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* DES SHA */
+
+int
+octo_des_cbc_sha1_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata;
+    uint64_t *data = &mydata.data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2, tmp3;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    /* Load SHA1 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    while (crypt_len > 0 || auth_len > 0) {
+	uint32_t *first = data32;
+	mydata.data32[0] = *first;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata.data32[1] = *data32;
+	if (crypt_off <= 0) {
+	    if (crypt_len > 0) {
+		CVMX_MT_3DES_ENC_CBC(*data);
+		CVMX_MF_3DES_RESULT(*data);
+		crypt_len -= 8;
+	    }
+	} else
+	    crypt_off -= 8;
+	if (auth_off <= 0) {
+	    if (auth_len > 0) {
+		CVM_LOAD_SHA_UNIT(*data, next);
+		auth_len -= 8;
+	    }
+	} else
+	    auth_off -= 8;
+	*first = mydata.data32[0];
+	*data32 = mydata.data32[1];
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_SHA_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+    }
+	CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+    tmp3 = 0;
+    CVMX_MF_HSH_IV(tmp3, 2);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    tmp3 |= 0x0000000080000000;
+    CVMX_MT_HSH_DAT(tmp3, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+int
+octo_des_cbc_sha1_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata;
+    uint64_t *data = &mydata.data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2, tmp3;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load 3DES Key */
+    CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    if (od->octo_encklen == 24) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+    } else if (od->octo_encklen == 8) {
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 1);
+	CVMX_MT_3DES_KEY(((uint64_t *) od->octo_enckey)[0], 2);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+
+    CVMX_MT_3DES_IV(* (uint64_t *) ivp);
+
+    /* Load SHA1 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    while (crypt_len > 0 || auth_len > 0) {
+	uint32_t *first = data32;
+	mydata.data32[0] = *first;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata.data32[1] = *data32;
+	if (auth_off <= 0) {
+	    if (auth_len > 0) {
+		CVM_LOAD_SHA_UNIT(*data, next);
+		auth_len -= 8;
+	    }
+	} else
+	    auth_off -= 8;
+	if (crypt_off <= 0) {
+	    if (crypt_len > 0) {
+		CVMX_MT_3DES_DEC_CBC(*data);
+		CVMX_MF_3DES_RESULT(*data);
+		crypt_len -= 8;
+	    }
+	} else
+	    crypt_off -= 8;
+	*first = mydata.data32[0];
+	*data32 = mydata.data32[1];
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_SHA_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+    tmp3 = 0;
+    CVMX_MF_HSH_IV(tmp3, 2);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    tmp3 |= 0x0000000080000000;
+    CVMX_MT_HSH_DAT(tmp3, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* AES MD5 */
+
+int
+octo_aes_cbc_md5_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata[2];
+    uint64_t *pdata = &mydata[0].data64[0];
+    uint64_t *data =  &mydata[1].data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    /* Load MD5 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    /* align auth and crypt */
+    while (crypt_off > 0 && auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_MD5_UNIT(*pdata, next);
+	crypt_off -= 8;
+	auth_len -= 8;
+    }
+
+    while (crypt_len > 0) {
+	uint32_t *pdata32[3];
+
+	pdata32[0] = data32;
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+
+	pdata32[1] = data32;
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+
+	pdata32[2] = data32;
+	mydata[1].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+
+	mydata[1].data32[1] = *data32;
+
+	CVMX_MT_AES_ENC_CBC0(*pdata);
+	CVMX_MT_AES_ENC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	crypt_len -= 16;
+
+	if (auth_len > 0) {
+	    CVM_LOAD_MD5_UNIT(*pdata, next);
+	    auth_len -= 8;
+	}
+	if (auth_len > 0) {
+	    CVM_LOAD_MD5_UNIT(*data, next);
+	    auth_len -= 8;
+	}
+
+	*pdata32[0] = mydata[0].data32[0];
+	*pdata32[1] = mydata[0].data32[1];
+	*pdata32[2] = mydata[1].data32[0];
+	*data32     = mydata[1].data32[1];
+
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish any left over hashing */
+    while (auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_MD5_UNIT(*pdata, next);
+	auth_len -= 8;
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVMX_ES64(tmp1, ((alen + 64) << 3));
+    CVM_LOAD_MD5_UNIT(tmp1, next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_ES64(tmp1, ((64 + 16) << 3));
+    CVMX_MT_HSH_STARTMD5(tmp1);
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+int
+octo_aes_cbc_md5_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata[2];
+    uint64_t *pdata = &mydata[0].data64[0];
+    uint64_t *data =  &mydata[1].data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s()\n", __FUNCTION__);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    /* Load MD5 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    /* align auth and crypt */
+    while (crypt_off > 0 && auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_MD5_UNIT(*pdata, next);
+	crypt_off -= 8;
+	auth_len -= 8;
+    }
+
+    while (crypt_len > 0) {
+	uint32_t *pdata32[3];
+
+	pdata32[0] = data32;
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[1] = data32;
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[2] = data32;
+	mydata[1].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[1].data32[1] = *data32;
+
+	if (auth_len > 0) {
+	    CVM_LOAD_MD5_UNIT(*pdata, next);
+	    auth_len -= 8;
+	}
+
+	if (auth_len > 0) {
+	    CVM_LOAD_MD5_UNIT(*data, next);
+	    auth_len -= 8;
+	}
+
+	CVMX_MT_AES_DEC_CBC0(*pdata);
+	CVMX_MT_AES_DEC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	crypt_len -= 16;
+
+	*pdata32[0] = mydata[0].data32[0];
+	*pdata32[1] = mydata[0].data32[1];
+	*pdata32[2] = mydata[1].data32[0];
+	*data32     = mydata[1].data32[1];
+
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish left over hash if any */
+    while (auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_MD5_UNIT(*pdata, next);
+	auth_len -= 8;
+    }
+
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_MD5_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVMX_ES64(tmp1, ((alen + 64) << 3));
+    CVM_LOAD_MD5_UNIT(tmp1, next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    CVMX_MT_HSH_DAT(0x8000000000000000ULL, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_ES64(tmp1, ((64 + 16) << 3));
+    CVMX_MT_HSH_STARTMD5(tmp1);
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
+/* AES SHA1 */
+
+int
+octo_aes_cbc_sha1_encrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata[2];
+    uint64_t *pdata = &mydata[0].data64[0];
+    uint64_t *data =  &mydata[1].data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2, tmp3;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n",
+			__FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    /* Load SHA IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    /* align auth and crypt */
+    while (crypt_off > 0 && auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_SHA_UNIT(*pdata, next);
+	crypt_off -= 8;
+	auth_len -= 8;
+    }
+
+    while (crypt_len > 0) {
+	uint32_t *pdata32[3];
+
+	pdata32[0] = data32;
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[1] = data32;
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[2] = data32;
+	mydata[1].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[1].data32[1] = *data32;
+
+	CVMX_MT_AES_ENC_CBC0(*pdata);
+	CVMX_MT_AES_ENC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	crypt_len -= 16;
+
+	if (auth_len > 0) {
+	    CVM_LOAD_SHA_UNIT(*pdata, next);
+	    auth_len -= 8;
+	}
+	if (auth_len > 0) {
+	    CVM_LOAD_SHA_UNIT(*data, next);
+	    auth_len -= 8;
+	}
+
+	*pdata32[0] = mydata[0].data32[0];
+	*pdata32[1] = mydata[0].data32[1];
+	*pdata32[2] = mydata[1].data32[0];
+	*data32     = mydata[1].data32[1];
+
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish and hashing */
+    while (auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_SHA_UNIT(*pdata, next);
+	auth_len -= 8;
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_SHA_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+    }
+    CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+    tmp3 = 0;
+    CVMX_MF_HSH_IV(tmp3, 2);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    tmp3 |= 0x0000000080000000;
+    CVMX_MT_HSH_DAT(tmp3, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+int
+octo_aes_cbc_sha1_decrypt(
+    struct octo_sess *od,
+    struct scatterlist *sg, int sg_len,
+    int auth_off, int auth_len,
+    int crypt_off, int crypt_len,
+    int icv_off, uint8_t *ivp)
+{
+    register int next = 0;
+    union {
+	uint32_t data32[2];
+	uint64_t data64[1];
+    } mydata[2];
+    uint64_t *pdata = &mydata[0].data64[0];
+    uint64_t *data =  &mydata[1].data64[0];
+    uint32_t *data32;
+    uint64_t tmp1, tmp2, tmp3;
+    int data_i, data_l, alen = auth_len;
+    struct octeon_cop2_state state;
+    unsigned long flags;
+
+    dprintk("%s(a_off=%d a_len=%d c_off=%d c_len=%d icv_off=%d)\n",
+			__FUNCTION__, auth_off, auth_len, crypt_off, crypt_len, icv_off);
+
+    if (unlikely(od == NULL || sg==NULL || sg_len==0 || ivp==NULL ||
+	    (crypt_off & 0x3) || (crypt_off + crypt_len > sg_len) ||
+	    (crypt_len  & 0x7) ||
+	    (auth_len  & 0x7) ||
+	    (auth_off & 0x3) || (auth_off + auth_len > sg_len))) {
+	dprintk("%s: Bad parameters od=%p sg=%p sg_len=%d "
+		"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
+		"icv_off=%d ivp=%p\n", __FUNCTION__, od, sg, sg_len,
+		auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	return -EINVAL;
+    }
+
+    SG_INIT(sg, data32, data_i, data_l);
+
+    CVMX_PREFETCH0(ivp);
+    CVMX_PREFETCH0(od->octo_enckey);
+
+    flags = octeon_crypto_enable(&state);
+
+    /* load AES Key */
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
+    CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
+
+    if (od->octo_encklen == 16) {
+	CVMX_MT_AES_KEY(0x0, 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 24) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(0x0, 3);
+    } else if (od->octo_encklen == 32) {
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
+	CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
+    } else {
+	octeon_crypto_disable(&state, flags);
+	dprintk("%s: Bad key length %d\n", __FUNCTION__, od->octo_encklen);
+	return -EINVAL;
+    }
+    CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
+
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
+    CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
+
+    /* Load SHA1 IV */
+    CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
+
+    while (crypt_off > 0 && auth_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	crypt_off -= 4;
+	auth_off -= 4;
+    }
+
+    /* align auth and crypt */
+    while (crypt_off > 0 && auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_SHA_UNIT(*pdata, next);
+	crypt_off -= 8;
+	auth_len -= 8;
+    }
+
+    while (crypt_len > 0) {
+	uint32_t *pdata32[3];
+
+	pdata32[0] = data32;
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[1] = data32;
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	pdata32[2] = data32;
+	mydata[1].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[1].data32[1] = *data32;
+
+	if (auth_len > 0) {
+	    CVM_LOAD_SHA_UNIT(*pdata, next);
+	    auth_len -= 8;
+	}
+	if (auth_len > 0) {
+	    CVM_LOAD_SHA_UNIT(*data, next);
+	    auth_len -= 8;
+	}
+
+	CVMX_MT_AES_DEC_CBC0(*pdata);
+	CVMX_MT_AES_DEC_CBC1(*data);
+	CVMX_MF_AES_RESULT(*pdata, 0);
+	CVMX_MF_AES_RESULT(*data, 1);
+	crypt_len -= 16;
+
+	*pdata32[0] = mydata[0].data32[0];
+	*pdata32[1] = mydata[0].data32[1];
+	*pdata32[2] = mydata[1].data32[0];
+	*data32     = mydata[1].data32[1];
+
+	SG_CONSUME(sg, data32, data_i, data_l);
+    }
+
+    /* finish and leftover hashing */
+    while (auth_len > 0) {
+	mydata[0].data32[0] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	mydata[0].data32[1] = *data32;
+	SG_CONSUME(sg, data32, data_i, data_l);
+	CVM_LOAD_SHA_UNIT(*pdata, next);
+	auth_len -= 8;
+    }
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_SHA_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* Finish Inner hash */
+    while (next != 7) {
+	CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
+    }
+	CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
+
+    /* Get the inner hash of HMAC */
+    CVMX_MF_HSH_IV(tmp1, 0);
+    CVMX_MF_HSH_IV(tmp2, 1);
+    tmp3 = 0;
+    CVMX_MF_HSH_IV(tmp3, 2);
+
+    /* Initialize hash unit */
+    CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
+    CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
+    CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
+
+    CVMX_MT_HSH_DAT(tmp1, 0);
+    CVMX_MT_HSH_DAT(tmp2, 1);
+    tmp3 |= 0x0000000080000000;
+    CVMX_MT_HSH_DAT(tmp3, 2);
+    CVMX_MT_HSH_DATZ(3);
+    CVMX_MT_HSH_DATZ(4);
+    CVMX_MT_HSH_DATZ(5);
+    CVMX_MT_HSH_DATZ(6);
+    CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
+
+    /* finish the hash */
+    CVMX_PREFETCH0(od->octo_hmouter);
+#if 0
+    if (unlikely(inplen)) {
+	uint64_t tmp = 0;
+	uint8_t *p = (uint8_t *) & tmp;
+	p[inplen] = 0x80;
+	do {
+	    inplen--;
+	    p[inplen] = ((uint8_t *) data)[inplen];
+	} while (inplen);
+	CVM_LOAD_MD5_UNIT(tmp, next);
+    } else {
+	CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+    }
+#else
+    CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
+#endif
+
+    /* save the HMAC */
+    SG_INIT(sg, data32, data_i, data_l);
+    while (icv_off > 0) {
+	SG_CONSUME(sg, data32, data_i, data_l);
+	icv_off -= 4;
+    }
+    CVMX_MF_HSH_IV(tmp1, 0);
+    *data32 = (uint32_t) (tmp1 >> 32);
+    SG_CONSUME(sg, data32, data_i, data_l);
+    *data32 = (uint32_t) tmp1;
+    SG_CONSUME(sg, data32, data_i, data_l);
+    CVMX_MF_HSH_IV(tmp1, 1);
+    *data32 = (uint32_t) (tmp1 >> 32);
+
+    octeon_crypto_disable(&state, flags);
+    return 0;
+}
+
+/****************************************************************************/
diff --git a/crypto/ocf/cryptocteon/cryptocteon.c b/crypto/ocf/cryptocteon/cryptocteon.c
new file mode 100644
index 000000000000..0168ad321950
--- /dev/null
+++ b/crypto/ocf/cryptocteon/cryptocteon.c
@@ -0,0 +1,576 @@
+/*
+ * Octeon Crypto for OCF
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2009-2010 David McCullough
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+struct {
+	softc_device_decl	sc_dev;
+} octo_softc;
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+struct octo_sess {
+	int					 octo_encalg;
+	#define MAX_CIPHER_KEYLEN	64
+	char				 octo_enckey[MAX_CIPHER_KEYLEN];
+	int					 octo_encklen;
+
+	int					 octo_macalg;
+	#define MAX_HASH_KEYLEN	64
+	char				 octo_mackey[MAX_HASH_KEYLEN];
+	int					 octo_macklen;
+	int					 octo_mackey_set;
+
+	int					 octo_mlen;
+	int					 octo_ivsize;
+
+	int					(*octo_encrypt)(struct octo_sess *od,
+	                      struct scatterlist *sg, int sg_len,
+						  int auth_off, int auth_len,
+						  int crypt_off, int crypt_len,
+						  int icv_off, uint8_t *ivp);
+	int					(*octo_decrypt)(struct octo_sess *od,
+	                      struct scatterlist *sg, int sg_len,
+						  int auth_off, int auth_len,
+						  int crypt_off, int crypt_len,
+						  int icv_off, uint8_t *ivp);
+
+	uint64_t			 octo_hminner[3];
+	uint64_t			 octo_hmouter[3];
+};
+
+int32_t octo_id = -1;
+module_param(octo_id, int, 0444);
+MODULE_PARM_DESC(octo_id, "Read-Only OCF ID for cryptocteon driver");
+
+static struct octo_sess **octo_sessions = NULL;
+static u_int32_t octo_sesnum = 0;
+
+static	int octo_process(device_t, struct cryptop *, int);
+static	int octo_newsession(device_t, u_int32_t *, struct cryptoini *);
+static	int octo_freesession(device_t, u_int64_t);
+
+static device_method_t octo_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	octo_newsession),
+	DEVMETHOD(cryptodev_freesession,octo_freesession),
+	DEVMETHOD(cryptodev_process,	octo_process),
+};
+
+#define debug octo_debug
+int octo_debug = 0;
+module_param(octo_debug, int, 0644);
+MODULE_PARM_DESC(octo_debug, "Enable debug");
+
+
+#include "cavium_crypto.c"
+
+
+/*
+ * Generate a new octo session.  We artifically limit it to a single
+ * hash/cipher or hash-cipher combo just to make it easier, most callers
+ * do not expect more than this anyway.
+ */
+static int
+octo_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+	struct cryptoini *c, *encini = NULL, *macini = NULL;
+	struct octo_sess **ocd;
+	int i;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid == NULL || cri == NULL) {
+		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	/*
+	 * To keep it simple, we only handle hash, cipher or hash/cipher in a
+	 * session,  you cannot currently do multiple ciphers/hashes in one
+	 * session even though it would be possibel to code this driver to
+	 * handle it.
+	 */
+	for (i = 0, c = cri; c && i < 2; i++) {
+		if (c->cri_alg == CRYPTO_MD5_HMAC ||
+				c->cri_alg == CRYPTO_SHA1_HMAC ||
+				c->cri_alg == CRYPTO_NULL_HMAC) {
+			if (macini) {
+				break;
+			}
+			macini = c;
+		}
+		if (c->cri_alg == CRYPTO_DES_CBC ||
+				c->cri_alg == CRYPTO_3DES_CBC ||
+				c->cri_alg == CRYPTO_AES_CBC ||
+				c->cri_alg == CRYPTO_NULL_CBC) {
+			if (encini) {
+				break;
+			}
+			encini = c;
+		}
+		c = c->cri_next;
+	}
+	if (!macini && !encini) {
+		dprintk("%s,%d - EINVAL bad cipher/hash or combination\n",
+				__FILE__, __LINE__);
+		return EINVAL;
+	}
+	if (c) {
+		dprintk("%s,%d - EINVAL cannot handle chained cipher/hash combos\n",
+				__FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	/*
+	 * So we have something we can do, lets setup the session
+	 */
+
+	if (octo_sessions) {
+		for (i = 1; i < octo_sesnum; i++)
+			if (octo_sessions[i] == NULL)
+				break;
+	} else
+		i = 1;		/* NB: to silence compiler warning */
+
+	if (octo_sessions == NULL || i == octo_sesnum) {
+		if (octo_sessions == NULL) {
+			i = 1; /* We leave octo_sessions[0] empty */
+			octo_sesnum = CRYPTO_SW_SESSIONS;
+		} else
+			octo_sesnum *= 2;
+
+		ocd = kmalloc(octo_sesnum * sizeof(struct octo_sess *), SLAB_ATOMIC);
+		if (ocd == NULL) {
+			/* Reset session number */
+			if (octo_sesnum == CRYPTO_SW_SESSIONS)
+				octo_sesnum = 0;
+			else
+				octo_sesnum /= 2;
+			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+			return ENOBUFS;
+		}
+		memset(ocd, 0, octo_sesnum * sizeof(struct octo_sess *));
+
+		/* Copy existing sessions */
+		if (octo_sessions) {
+			memcpy(ocd, octo_sessions,
+			    (octo_sesnum / 2) * sizeof(struct octo_sess *));
+			kfree(octo_sessions);
+		}
+
+		octo_sessions = ocd;
+	}
+
+	ocd = &octo_sessions[i];
+	*sid = i;
+
+
+	*ocd = (struct octo_sess *) kmalloc(sizeof(struct octo_sess), SLAB_ATOMIC);
+	if (*ocd == NULL) {
+		octo_freesession(NULL, i);
+		dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+		return ENOBUFS;
+	}
+	memset(*ocd, 0, sizeof(struct octo_sess));
+
+	if (encini && encini->cri_key) {
+		(*ocd)->octo_encklen = (encini->cri_klen + 7) / 8;
+		memcpy((*ocd)->octo_enckey, encini->cri_key, (*ocd)->octo_encklen);
+	}
+
+	if (macini && macini->cri_key) {
+		(*ocd)->octo_macklen = (macini->cri_klen + 7) / 8;
+		memcpy((*ocd)->octo_mackey, macini->cri_key, (*ocd)->octo_macklen);
+	}
+
+	(*ocd)->octo_mlen = 0;
+	if (encini && encini->cri_mlen)
+		(*ocd)->octo_mlen = encini->cri_mlen;
+	else if (macini && macini->cri_mlen)
+		(*ocd)->octo_mlen = macini->cri_mlen;
+	else
+		(*ocd)->octo_mlen = 12;
+
+	/*
+	 * point c at the enc if it exists, otherwise the mac
+	 */
+	c = encini ? encini : macini;
+
+	switch (c->cri_alg) {
+	case CRYPTO_DES_CBC:
+	case CRYPTO_3DES_CBC:
+		(*ocd)->octo_ivsize  = 8;
+		switch (macini ? macini->cri_alg : -1) {
+		case CRYPTO_MD5_HMAC:
+			(*ocd)->octo_encrypt = octo_des_cbc_md5_encrypt;
+			(*ocd)->octo_decrypt = octo_des_cbc_md5_decrypt;
+			octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+					(*ocd)->octo_hmouter);
+			break;
+		case CRYPTO_SHA1_HMAC:
+			(*ocd)->octo_encrypt = octo_des_cbc_sha1_encrypt;
+			(*ocd)->octo_decrypt = octo_des_cbc_sha1_decrypt;
+			octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+					(*ocd)->octo_hmouter);
+			break;
+		case -1:
+			(*ocd)->octo_encrypt = octo_des_cbc_encrypt;
+			(*ocd)->octo_decrypt = octo_des_cbc_decrypt;
+			break;
+		default:
+			octo_freesession(NULL, i);
+			dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+			return EINVAL;
+		}
+		break;
+	case CRYPTO_AES_CBC:
+		(*ocd)->octo_ivsize  = 16;
+		switch (macini ? macini->cri_alg : -1) {
+		case CRYPTO_MD5_HMAC:
+			(*ocd)->octo_encrypt = octo_aes_cbc_md5_encrypt;
+			(*ocd)->octo_decrypt = octo_aes_cbc_md5_decrypt;
+			octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+					(*ocd)->octo_hmouter);
+			break;
+		case CRYPTO_SHA1_HMAC:
+			(*ocd)->octo_encrypt = octo_aes_cbc_sha1_encrypt;
+			(*ocd)->octo_decrypt = octo_aes_cbc_sha1_decrypt;
+			octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+					(*ocd)->octo_hmouter);
+			break;
+		case -1:
+			(*ocd)->octo_encrypt = octo_aes_cbc_encrypt;
+			(*ocd)->octo_decrypt = octo_aes_cbc_decrypt;
+			break;
+		default:
+			octo_freesession(NULL, i);
+			dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+			return EINVAL;
+		}
+		break;
+	case CRYPTO_MD5_HMAC:
+		(*ocd)->octo_encrypt = octo_null_md5_encrypt;
+		(*ocd)->octo_decrypt = octo_null_md5_encrypt; /* encrypt == decrypt */
+		octo_calc_hash(0, macini->cri_key, (*ocd)->octo_hminner,
+				(*ocd)->octo_hmouter);
+		break;
+	case CRYPTO_SHA1_HMAC:
+		(*ocd)->octo_encrypt = octo_null_sha1_encrypt;
+		(*ocd)->octo_decrypt = octo_null_sha1_encrypt; /* encrypt == decrypt */
+		octo_calc_hash(1, macini->cri_key, (*ocd)->octo_hminner,
+				(*ocd)->octo_hmouter);
+		break;
+	default:
+		octo_freesession(NULL, i);
+		dprintk("%s,%d: EINVALn", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	(*ocd)->octo_encalg = encini ? encini->cri_alg : -1;
+	(*ocd)->octo_macalg = macini ? macini->cri_alg : -1;
+
+	return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+octo_freesession(device_t dev, u_int64_t tid)
+{
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid > octo_sesnum || octo_sessions == NULL ||
+			octo_sessions[sid] == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return(EINVAL);
+	}
+
+	/* Silently accept and return */
+	if (sid == 0)
+		return(0);
+
+	if (octo_sessions[sid])
+		kfree(octo_sessions[sid]);
+	octo_sessions[sid] = NULL;
+	return 0;
+}
+
+/*
+ * Process a request.
+ */
+static int
+octo_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct cryptodesc *crd;
+	struct octo_sess *od;
+	u_int32_t lid;
+#define SCATTERLIST_MAX 16
+	struct scatterlist sg[SCATTERLIST_MAX];
+	int sg_num, sg_len;
+	struct sk_buff *skb = NULL;
+	struct uio *uiop = NULL;
+	struct cryptodesc *enccrd = NULL, *maccrd = NULL;
+	unsigned char *ivp = NULL;
+	unsigned char iv_data[HASH_MAX_LEN];
+	int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0;
+
+	dprintk("%s()\n", __FUNCTION__);
+	/* Sanity check */
+	if (crp == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	crp->crp_etype = 0;
+
+	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	lid = crp->crp_sid & 0xffffffff;
+	if (lid >= octo_sesnum || lid == 0 || octo_sessions == NULL ||
+			octo_sessions[lid] == NULL) {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+	od = octo_sessions[lid];
+
+	/*
+	 * do some error checking outside of the loop for SKB and IOV processing
+	 * this leaves us with valid skb or uiop pointers for later
+	 */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		skb = (struct sk_buff *) crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
+					skb_shinfo(skb)->nr_frags);
+			goto done;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		uiop = (struct uio *) crp->crp_buf;
+		if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
+					uiop->uio_iovcnt);
+			goto done;
+		}
+	}
+
+	/* point our enccrd and maccrd appropriately */
+	crd = crp->crp_desc;
+	if (crd->crd_alg == od->octo_encalg) enccrd = crd;
+	if (crd->crd_alg == od->octo_macalg) maccrd = crd;
+	crd = crd->crd_next;
+	if (crd) {
+		if (crd->crd_alg == od->octo_encalg) enccrd = crd;
+		if (crd->crd_alg == od->octo_macalg) maccrd = crd;
+		crd = crd->crd_next;
+	}
+	if (crd) {
+		crp->crp_etype = EINVAL;
+		dprintk("%s,%d: ENOENT - descriptors do not match session\n",
+				__FILE__, __LINE__);
+		goto done;
+	}
+
+	if (enccrd) {
+		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				ivp = enccrd->crd_iv;
+			else
+				read_random((ivp = iv_data), od->octo_ivsize);
+			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+						enccrd->crd_inject, od->octo_ivsize, ivp);
+		} else {
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+				ivp = enccrd->crd_iv;
+			} else {
+				ivp = iv_data;
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+						enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp);
+			}
+		}
+
+		if (maccrd) {
+			auth_off = maccrd->crd_skip;
+			auth_len = maccrd->crd_len;
+			icv_off  = maccrd->crd_inject;
+		}
+
+		crypt_off = enccrd->crd_skip;
+		crypt_len = enccrd->crd_len;
+	} else { /* if (maccrd) */
+		auth_off = maccrd->crd_skip;
+		auth_len = maccrd->crd_len;
+		icv_off  = maccrd->crd_inject;
+	}
+
+
+	/*
+	 * setup the SG list to cover the buffer
+	 */
+	memset(sg, 0, sizeof(sg));
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		int i, len;
+
+		sg_num = 0;
+		sg_len = 0;
+
+		len = skb_headlen(skb);
+		sg_set_page(&sg[sg_num], virt_to_page(skb->data), len,
+				offset_in_page(skb->data));
+		sg_len += len;
+		sg_num++;
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags && sg_num < SCATTERLIST_MAX;
+				i++) {
+			len = skb_shinfo(skb)->frags[i].size;
+			sg_set_page(&sg[sg_num], skb_frag_page(&skb_shinfo(skb)->frags[i]),
+					len, skb_shinfo(skb)->frags[i].page_offset);
+			sg_len += len;
+			sg_num++;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		int len;
+
+		sg_len = 0;
+		for (sg_num = 0; sg_len < crp->crp_ilen &&
+				sg_num < uiop->uio_iovcnt &&
+				sg_num < SCATTERLIST_MAX; sg_num++) {
+			len = uiop->uio_iov[sg_num].iov_len;
+			sg_set_page(&sg[sg_num],
+					virt_to_page(uiop->uio_iov[sg_num].iov_base), len,
+					offset_in_page(uiop->uio_iov[sg_num].iov_base));
+			sg_len += len;
+		}
+	} else {
+		sg_len = crp->crp_ilen;
+		sg_set_page(&sg[0], virt_to_page(crp->crp_buf), sg_len,
+				offset_in_page(crp->crp_buf));
+		sg_num = 1;
+	}
+	if (sg_num > 0)
+		sg_mark_end(&sg[sg_num-1]);
+
+	/*
+	 * setup a new explicit key
+	 */
+	if (enccrd) {
+		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			od->octo_encklen = (enccrd->crd_klen + 7) / 8;
+			memcpy(od->octo_enckey, enccrd->crd_key, od->octo_encklen);
+		}
+	}
+	if (maccrd) {
+		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			od->octo_macklen = (maccrd->crd_klen + 7) / 8;
+			memcpy(od->octo_mackey, maccrd->crd_key, od->octo_macklen);
+			od->octo_mackey_set = 0;
+		}
+		if (!od->octo_mackey_set) {
+			octo_calc_hash(maccrd->crd_alg == CRYPTO_MD5_HMAC ? 0 : 1,
+				maccrd->crd_key, od->octo_hminner, od->octo_hmouter);
+			od->octo_mackey_set = 1;
+		}
+	}
+
+
+	if (!enccrd || (enccrd->crd_flags & CRD_F_ENCRYPT))
+		(*od->octo_encrypt)(od, sg, sg_len,
+				auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+	else
+		(*od->octo_decrypt)(od, sg, sg_len,
+				auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+
+done:
+	crypto_done(crp);
+	return 0;
+}
+
+static int
+cryptocteon_init(void)
+{
+	dprintk("%s(%p)\n", __FUNCTION__, cryptocteon_init);
+
+	softc_device_init(&octo_softc, "cryptocteon", 0, octo_methods);
+
+	octo_id = crypto_get_driverid(softc_get_device(&octo_softc),
+			CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
+	if (octo_id < 0) {
+		printk("Cryptocteon device cannot initialize!");
+		return -ENODEV;
+	}
+
+	crypto_register(octo_id, CRYPTO_MD5_HMAC, 0,0);
+	crypto_register(octo_id, CRYPTO_SHA1_HMAC, 0,0);
+	//crypto_register(octo_id, CRYPTO_MD5, 0,0);
+	//crypto_register(octo_id, CRYPTO_SHA1, 0,0);
+	crypto_register(octo_id, CRYPTO_DES_CBC, 0,0);
+	crypto_register(octo_id, CRYPTO_3DES_CBC, 0,0);
+	crypto_register(octo_id, CRYPTO_AES_CBC, 0,0);
+
+	return(0);
+}
+
+static void
+cryptocteon_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	crypto_unregister_all(octo_id);
+	octo_id = -1;
+}
+
+module_init(cryptocteon_init);
+module_exit(cryptocteon_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptocteon (OCF module for Cavium OCTEON crypto)");
diff --git a/crypto/ocf/cryptodev.c b/crypto/ocf/cryptodev.c
new file mode 100644
index 000000000000..35d727520ee0
--- /dev/null
+++ b/crypto/ocf/cryptodev.c
@@ -0,0 +1,1109 @@
+/*	$OpenBSD: cryptodev.c,v 1.52 2002/06/19 07:22:46 deraadt Exp $	*/
+
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+__FBSDID("$FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.34 2007/05/09 19:37:02 gnn Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/miscdevice.h>
+#include <asm/uaccess.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+extern asmlinkage long sys_dup(unsigned int fildes);
+
+#define debug cryptodev_debug
+int cryptodev_debug = 0;
+module_param(cryptodev_debug, int, 0644);
+MODULE_PARM_DESC(cryptodev_debug, "Enable cryptodev debug");
+
+struct csession_info {
+	u_int16_t	blocksize;
+	u_int16_t	minkey, maxkey;
+
+	u_int16_t	keysize;
+	/* u_int16_t	hashsize;  */
+	u_int16_t	authsize;
+	u_int16_t	authkey;
+	/* u_int16_t	ctxsize; */
+};
+
+struct csession {
+	struct list_head	list;
+	u_int64_t	sid;
+	u_int32_t	ses;
+
+	wait_queue_head_t waitq;
+
+	u_int32_t	cipher;
+
+	u_int32_t	mac;
+
+	caddr_t		key;
+	int		keylen;
+	u_char		tmp_iv[EALG_MAX_BLOCK_LEN];
+
+	caddr_t		mackey;
+	int		mackeylen;
+
+	struct csession_info info;
+
+	struct iovec	iovec;
+	struct uio	uio;
+	int		error;
+};
+
+struct fcrypt {
+	struct list_head	csessions;
+	int		sesn;
+};
+
+static struct csession *csefind(struct fcrypt *, u_int);
+static int csedelete(struct fcrypt *, struct csession *);
+static struct csession *cseadd(struct fcrypt *, struct csession *);
+static struct csession *csecreate(struct fcrypt *, u_int64_t,
+		struct cryptoini *crie, struct cryptoini *cria, struct csession_info *);
+static int csefree(struct csession *);
+
+static	int cryptodev_op(struct csession *, struct crypt_op *);
+static	int cryptodev_key(struct crypt_kop *);
+static	int cryptodev_find(struct crypt_find_op *);
+
+static int cryptodev_cb(void *);
+static int cryptodev_open(struct inode *inode, struct file *filp);
+
+/*
+ * lock on driver table
+ * we track its state as spin_is_locked does not do anything on non-SMP boxes
+ */
+static spinlock_t	cryptodev_drivers_lock;
+static int		cryptodev_drivers_locked;		/* for non-SMP boxes */
+
+#define	CRYPTODEV_DRIVER_LOCK() \
+			({ \
+				spin_lock_irqsave(&cryptodev_drivers_lock, d_flags); \
+				cryptodev_drivers_locked = 1; \
+				dprintk("%s,%d: DRIVER_LOCK()\n", __FILE__, __LINE__); \
+			})
+#define	CRYPTODEV_DRIVER_UNLOCK() \
+			({ \
+				dprintk("%s,%d: DRIVER_UNLOCK()\n", __FILE__, __LINE__); \
+				cryptodev_drivers_locked = 0; \
+				spin_unlock_irqrestore(&cryptodev_drivers_lock, d_flags); \
+			})
+#define	CRYPTODEV_DRIVER_ASSERT() \
+			({ \
+				if (!cryptodev_drivers_locked) { \
+					dprintk("%s,%d: DRIVER_ASSERT!\n", __FILE__, __LINE__); \
+				} \
+			})
+
+/*
+ * Check a crypto identifier to see if it requested
+ * a valid crid and it's capabilities match.
+ */
+static int
+checkcrid(int crid)
+{
+	int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+	int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+	int caps = 0;
+
+	/* if the user hasn't selected a driver, then just call newsession */
+	if (hid == 0 && typ != 0)
+		return 0;
+
+	caps = crypto_getcaps(hid);
+
+	/* didn't find anything with capabilities */
+	if (caps == 0) {
+		dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
+		return EINVAL;
+	}
+
+	/* the user didn't specify SW or HW, so the driver is ok */
+	if (typ == 0)
+		return 0;
+
+	/* if the type specified didn't match */
+	if (typ != (caps & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE))) {
+		dprintk("%s: hid=%x typ=%x caps=%x not matched\n", __FUNCTION__,
+				hid, typ, caps);
+		return EINVAL;
+	}
+
+	return 0;
+}
+
+static int
+cryptodev_op(struct csession *cse, struct crypt_op *cop)
+{
+	struct cryptop *crp = NULL;
+	struct cryptodesc *crde = NULL, *crda = NULL;
+	int error = 0;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (cop->len > CRYPTO_MAX_DATA_LEN) {
+		dprintk("%s: %d > %d\n", __FUNCTION__, cop->len, CRYPTO_MAX_DATA_LEN);
+		return (E2BIG);
+	}
+
+	if (cse->info.blocksize && (cop->len % cse->info.blocksize) != 0) {
+		dprintk("%s: blocksize=%d len=%d\n", __FUNCTION__, cse->info.blocksize,
+				cop->len);
+		return (EINVAL);
+	}
+
+	cse->uio.uio_iov = &cse->iovec;
+	cse->uio.uio_iovcnt = 1;
+	cse->uio.uio_offset = 0;
+#if 0
+	cse->uio.uio_resid = cop->len;
+	cse->uio.uio_segflg = UIO_SYSSPACE;
+	cse->uio.uio_rw = UIO_WRITE;
+	cse->uio.uio_td = td;
+#endif
+	cse->uio.uio_iov[0].iov_len = cop->len;
+	if (cse->info.authsize)
+		cse->uio.uio_iov[0].iov_len += cse->info.authsize;
+	cse->uio.uio_iov[0].iov_base = kmalloc(cse->uio.uio_iov[0].iov_len,
+			GFP_KERNEL);
+
+	if (cse->uio.uio_iov[0].iov_base == NULL) {
+		dprintk("%s: iov_base kmalloc(%d) failed\n", __FUNCTION__,
+				(int)cse->uio.uio_iov[0].iov_len);
+		return (ENOMEM);
+	}
+
+	crp = crypto_getreq((cse->info.blocksize != 0) + (cse->info.authsize != 0));
+	if (crp == NULL) {
+		dprintk("%s: ENOMEM\n", __FUNCTION__);
+		error = ENOMEM;
+		goto bail;
+	}
+
+	if (cse->info.authsize && cse->info.blocksize) {
+		if (cop->op == COP_ENCRYPT) {
+			crde = crp->crp_desc;
+			crda = crde->crd_next;
+		} else {
+			crda = crp->crp_desc;
+			crde = crda->crd_next;
+		}
+	} else if (cse->info.authsize) {
+		crda = crp->crp_desc;
+	} else if (cse->info.blocksize) {
+		crde = crp->crp_desc;
+	} else {
+		dprintk("%s: bad request\n", __FUNCTION__);
+		error = EINVAL;
+		goto bail;
+	}
+
+	if ((error = copy_from_user(cse->uio.uio_iov[0].iov_base, cop->src,
+					cop->len))) {
+		dprintk("%s: bad copy\n", __FUNCTION__);
+		goto bail;
+	}
+
+	if (crda) {
+		crda->crd_skip = 0;
+		crda->crd_len = cop->len;
+		crda->crd_inject = cop->len;
+
+		crda->crd_alg = cse->mac;
+		crda->crd_key = cse->mackey;
+		crda->crd_klen = cse->mackeylen * 8;
+	}
+
+	if (crde) {
+		if (cop->op == COP_ENCRYPT)
+			crde->crd_flags |= CRD_F_ENCRYPT;
+		else
+			crde->crd_flags &= ~CRD_F_ENCRYPT;
+		crde->crd_len = cop->len;
+		crde->crd_inject = 0;
+
+		crde->crd_alg = cse->cipher;
+		crde->crd_key = cse->key;
+		crde->crd_klen = cse->keylen * 8;
+	}
+
+	crp->crp_ilen = cse->uio.uio_iov[0].iov_len;
+	crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
+		       | (cop->flags & COP_F_BATCH);
+	crp->crp_buf = (caddr_t)&cse->uio;
+	crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
+	crp->crp_sid = cse->sid;
+	crp->crp_opaque = (void *)cse;
+
+	if (cop->iv) {
+		if (crde == NULL) {
+			error = EINVAL;
+			dprintk("%s no crde\n", __FUNCTION__);
+			goto bail;
+		}
+		if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+			error = EINVAL;
+			dprintk("%s arc4 with IV\n", __FUNCTION__);
+			goto bail;
+		}
+		if ((error = copy_from_user(cse->tmp_iv, cop->iv,
+						cse->info.blocksize))) {
+			dprintk("%s bad iv copy\n", __FUNCTION__);
+			goto bail;
+		}
+		memcpy(crde->crd_iv, cse->tmp_iv, cse->info.blocksize);
+		crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+		crde->crd_skip = 0;
+	} else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
+		crde->crd_skip = 0;
+	} else if (crde) {
+		crde->crd_flags |= CRD_F_IV_PRESENT;
+		crde->crd_skip = cse->info.blocksize;
+		crde->crd_len -= cse->info.blocksize;
+	}
+
+	if (cop->mac && crda == NULL) {
+		error = EINVAL;
+		dprintk("%s no crda\n", __FUNCTION__);
+		goto bail;
+	}
+
+	/*
+	 * Let the dispatch run unlocked, then, interlock against the
+	 * callback before checking if the operation completed and going
+	 * to sleep.  This insures drivers don't inherit our lock which
+	 * results in a lock order reversal between crypto_dispatch forced
+	 * entry and the crypto_done callback into us.
+	 */
+	error = crypto_dispatch(crp);
+	if (error) {
+		dprintk("%s error in crypto_dispatch\n", __FUNCTION__);
+		goto bail;
+	}
+
+	dprintk("%s about to WAIT\n", __FUNCTION__);
+	/*
+	 * we really need to wait for driver to complete to maintain
+	 * state,  luckily interrupts will be remembered
+	 */
+	do {
+		error = wait_event_interruptible(crp->crp_waitq,
+				((crp->crp_flags & CRYPTO_F_DONE) != 0));
+		/*
+		 * we can't break out of this loop or we will leave behind
+		 * a huge mess,  however,  staying here means if your driver
+		 * is broken user applications can hang and not be killed.
+		 * The solution,  fix your driver :-)
+		 */
+		if (error) {
+			schedule();
+			error = 0;
+		}
+	} while ((crp->crp_flags & CRYPTO_F_DONE) == 0);
+	dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
+
+	if (crp->crp_etype != 0) {
+		error = crp->crp_etype;
+		dprintk("%s error in crp processing\n", __FUNCTION__);
+		goto bail;
+	}
+
+	if (cse->error) {
+		error = cse->error;
+		dprintk("%s error in cse processing\n", __FUNCTION__);
+		goto bail;
+	}
+
+	if (cop->dst && (error = copy_to_user(cop->dst,
+					cse->uio.uio_iov[0].iov_base, cop->len))) {
+		dprintk("%s bad dst copy\n", __FUNCTION__);
+		goto bail;
+	}
+
+	if (cop->mac &&
+			(error=copy_to_user(cop->mac,
+				(caddr_t)cse->uio.uio_iov[0].iov_base + cop->len,
+				cse->info.authsize))) {
+		dprintk("%s bad mac copy\n", __FUNCTION__);
+		goto bail;
+	}
+
+bail:
+	if (crp)
+		crypto_freereq(crp);
+	if (cse->uio.uio_iov[0].iov_base)
+		kfree(cse->uio.uio_iov[0].iov_base);
+
+	return (error);
+}
+
+static int
+cryptodev_cb(void *op)
+{
+	struct cryptop *crp = (struct cryptop *) op;
+	struct csession *cse = (struct csession *)crp->crp_opaque;
+	int error;
+
+	dprintk("%s()\n", __FUNCTION__);
+	error = crp->crp_etype;
+	if (error == EAGAIN) {
+		crp->crp_flags &= ~CRYPTO_F_DONE;
+#ifdef NOTYET
+		/*
+		 * DAVIDM I am fairly sure that we should turn this into a batch
+		 * request to stop bad karma/lockup, revisit
+		 */
+		crp->crp_flags |= CRYPTO_F_BATCH;
+#endif
+		return crypto_dispatch(crp);
+	}
+	if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
+		cse->error = error;
+		wake_up_interruptible(&crp->crp_waitq);
+	}
+	return (0);
+}
+
+static int
+cryptodevkey_cb(void *op)
+{
+	struct cryptkop *krp = (struct cryptkop *) op;
+	dprintk("%s()\n", __FUNCTION__);
+	wake_up_interruptible(&krp->krp_waitq);
+	return (0);
+}
+
+static int
+cryptodev_key(struct crypt_kop *kop)
+{
+	struct cryptkop *krp = NULL;
+	int error = EINVAL;
+	int in, out, size, i;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM) {
+		dprintk("%s params too big\n", __FUNCTION__);
+		return (EFBIG);
+	}
+
+	in = kop->crk_iparams;
+	out = kop->crk_oparams;
+	switch (kop->crk_op) {
+	case CRK_MOD_EXP:
+		if (in == 3 && out == 1)
+			break;
+		return (EINVAL);
+	case CRK_MOD_EXP_CRT:
+		if (in == 6 && out == 1)
+			break;
+		return (EINVAL);
+	case CRK_DSA_SIGN:
+		if (in == 5 && out == 2)
+			break;
+		return (EINVAL);
+	case CRK_DSA_VERIFY:
+		if (in == 7 && out == 0)
+			break;
+		return (EINVAL);
+	case CRK_DH_COMPUTE_KEY:
+		if (in == 3 && out == 1)
+			break;
+		return (EINVAL);
+	default:
+		return (EINVAL);
+	}
+
+	krp = (struct cryptkop *)kmalloc(sizeof *krp, GFP_KERNEL);
+	if (!krp)
+		return (ENOMEM);
+	bzero(krp, sizeof *krp);
+	krp->krp_op = kop->crk_op;
+	krp->krp_status = kop->crk_status;
+	krp->krp_iparams = kop->crk_iparams;
+	krp->krp_oparams = kop->crk_oparams;
+	krp->krp_crid = kop->crk_crid;
+	krp->krp_status = 0;
+	krp->krp_flags = CRYPTO_KF_CBIMM;
+	krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+	init_waitqueue_head(&krp->krp_waitq);
+
+	for (i = 0; i < CRK_MAXPARAM; i++)
+		krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
+	for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
+		size = (krp->krp_param[i].crp_nbits + 7) / 8;
+		if (size == 0)
+			continue;
+		krp->krp_param[i].crp_p = (caddr_t) kmalloc(size, GFP_KERNEL);
+		if (i >= krp->krp_iparams)
+			continue;
+		error = copy_from_user(krp->krp_param[i].crp_p,
+				kop->crk_param[i].crp_p, size);
+		if (error)
+			goto fail;
+	}
+
+	error = crypto_kdispatch(krp);
+	if (error)
+		goto fail;
+
+	do {
+		error = wait_event_interruptible(krp->krp_waitq,
+				((krp->krp_flags & CRYPTO_KF_DONE) != 0));
+		/*
+		 * we can't break out of this loop or we will leave behind
+		 * a huge mess,  however,  staying here means if your driver
+		 * is broken user applications can hang and not be killed.
+		 * The solution,  fix your driver :-)
+		 */
+		if (error) {
+			schedule();
+			error = 0;
+		}
+	} while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
+
+	dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
+
+	kop->crk_crid = krp->krp_crid;		/* device that did the work */
+	if (krp->krp_status != 0) {
+		error = krp->krp_status;
+		goto fail;
+	}
+
+	for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams; i++) {
+		size = (krp->krp_param[i].crp_nbits + 7) / 8;
+		if (size == 0)
+			continue;
+		error = copy_to_user(kop->crk_param[i].crp_p, krp->krp_param[i].crp_p,
+				size);
+		if (error)
+			goto fail;
+	}
+
+fail:
+	if (krp) {
+		kop->crk_status = krp->krp_status;
+		for (i = 0; i < CRK_MAXPARAM; i++) {
+			if (krp->krp_param[i].crp_p)
+				kfree(krp->krp_param[i].crp_p);
+		}
+		kfree(krp);
+	}
+	return (error);
+}
+
+static int
+cryptodev_find(struct crypt_find_op *find)
+{
+	device_t dev;
+
+	if (find->crid != -1) {
+		dev = crypto_find_device_byhid(find->crid);
+		if (dev == NULL)
+			return (ENOENT);
+		strlcpy(find->name, device_get_nameunit(dev),
+		    sizeof(find->name));
+	} else {
+		find->crid = crypto_find_driver(find->name);
+		if (find->crid == -1)
+			return (ENOENT);
+	}
+	return (0);
+}
+
+static struct csession *
+csefind(struct fcrypt *fcr, u_int ses)
+{
+	struct csession *cse;
+
+	dprintk("%s()\n", __FUNCTION__);
+	list_for_each_entry(cse, &fcr->csessions, list)
+		if (cse->ses == ses)
+			return (cse);
+	return (NULL);
+}
+
+static int
+csedelete(struct fcrypt *fcr, struct csession *cse_del)
+{
+	struct csession *cse;
+
+	dprintk("%s()\n", __FUNCTION__);
+	list_for_each_entry(cse, &fcr->csessions, list) {
+		if (cse == cse_del) {
+			list_del(&cse->list);
+			return (1);
+		}
+	}
+	return (0);
+}
+
+static struct csession *
+cseadd(struct fcrypt *fcr, struct csession *cse)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	list_add_tail(&cse->list, &fcr->csessions);
+	cse->ses = fcr->sesn++;
+	return (cse);
+}
+
+static struct csession *
+csecreate(struct fcrypt *fcr, u_int64_t sid, struct cryptoini *crie,
+	struct cryptoini *cria, struct csession_info *info)
+{
+	struct csession *cse;
+
+	dprintk("%s()\n", __FUNCTION__);
+	cse = (struct csession *) kmalloc(sizeof(struct csession), GFP_KERNEL);
+	if (cse == NULL)
+		return NULL;
+	memset(cse, 0, sizeof(struct csession));
+
+	INIT_LIST_HEAD(&cse->list);
+	init_waitqueue_head(&cse->waitq);
+
+	cse->key = crie->cri_key;
+	cse->keylen = crie->cri_klen/8;
+	cse->mackey = cria->cri_key;
+	cse->mackeylen = cria->cri_klen/8;
+	cse->sid = sid;
+	cse->cipher = crie->cri_alg;
+	cse->mac = cria->cri_alg;
+	cse->info = *info;
+	cseadd(fcr, cse);
+	return (cse);
+}
+
+static int
+csefree(struct csession *cse)
+{
+	int error;
+
+	dprintk("%s()\n", __FUNCTION__);
+	error = crypto_freesession(cse->sid);
+	if (cse->key)
+		kfree(cse->key);
+	if (cse->mackey)
+		kfree(cse->mackey);
+	kfree(cse);
+	return(error);
+}
+
+static int
+cryptodev_ioctl(
+	struct inode *inode,
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	struct cryptoini cria, crie;
+	struct fcrypt *fcr = filp->private_data;
+	struct csession *cse;
+	struct csession_info info;
+	struct session2_op sop;
+	struct crypt_op cop;
+	struct crypt_kop kop;
+	struct crypt_find_op fop;
+	u_int64_t sid;
+	u_int32_t ses = 0;
+	int feat, fd, error = 0, crid;
+	mm_segment_t fs;
+	unsigned long d_flags;
+
+	dprintk("%s(cmd=%x arg=%lx)\n", __FUNCTION__, cmd, arg);
+
+	switch (cmd) {
+
+	case CRIOGET: {
+		dprintk("%s(CRIOGET)\n", __FUNCTION__);
+		fs = get_fs();
+		set_fs(get_ds());
+		for (fd = 0; fd < files_fdtable(current->files)->max_fds; fd++)
+			if (files_fdtable(current->files)->fd[fd] == filp)
+				break;
+		fd = sys_dup(fd);
+		set_fs(fs);
+		put_user(fd, (int *) arg);
+		return IS_ERR_VALUE(fd) ? fd : 0;
+		}
+
+#define	CIOCGSESSSTR	(cmd == CIOCGSESSION ? "CIOCGSESSION" : "CIOCGSESSION2")
+	case CIOCGSESSION:
+	case CIOCGSESSION2:
+		dprintk("%s(%s)\n", __FUNCTION__, CIOCGSESSSTR);
+		memset(&crie, 0, sizeof(crie));
+		memset(&cria, 0, sizeof(cria));
+		memset(&info, 0, sizeof(info));
+		memset(&sop, 0, sizeof(sop));
+
+		if (copy_from_user(&sop, (void*)arg, (cmd == CIOCGSESSION) ?
+					sizeof(struct session_op) : sizeof(sop))) {
+			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+			error = EFAULT;
+			goto bail;
+		}
+
+		switch (sop.cipher) {
+		case 0:
+			dprintk("%s(%s) - no cipher\n", __FUNCTION__, CIOCGSESSSTR);
+			break;
+		case CRYPTO_NULL_CBC:
+			info.blocksize = NULL_BLOCK_LEN;
+			info.minkey = NULL_MIN_KEY_LEN;
+			info.maxkey = NULL_MAX_KEY_LEN;
+			break;
+		case CRYPTO_DES_CBC:
+			info.blocksize = DES_BLOCK_LEN;
+			info.minkey = DES_MIN_KEY_LEN;
+			info.maxkey = DES_MAX_KEY_LEN;
+			break;
+		case CRYPTO_3DES_CBC:
+			info.blocksize = DES3_BLOCK_LEN;
+			info.minkey = DES3_MIN_KEY_LEN;
+			info.maxkey = DES3_MAX_KEY_LEN;
+			break;
+		case CRYPTO_BLF_CBC:
+			info.blocksize = BLOWFISH_BLOCK_LEN;
+			info.minkey = BLOWFISH_MIN_KEY_LEN;
+			info.maxkey = BLOWFISH_MAX_KEY_LEN;
+			break;
+		case CRYPTO_CAST_CBC:
+			info.blocksize = CAST128_BLOCK_LEN;
+			info.minkey = CAST128_MIN_KEY_LEN;
+			info.maxkey = CAST128_MAX_KEY_LEN;
+			break;
+		case CRYPTO_SKIPJACK_CBC:
+			info.blocksize = SKIPJACK_BLOCK_LEN;
+			info.minkey = SKIPJACK_MIN_KEY_LEN;
+			info.maxkey = SKIPJACK_MAX_KEY_LEN;
+			break;
+		case CRYPTO_AES_CBC:
+			info.blocksize = AES_BLOCK_LEN;
+			info.minkey = AES_MIN_KEY_LEN;
+			info.maxkey = AES_MAX_KEY_LEN;
+			break;
+		case CRYPTO_ARC4:
+			info.blocksize = ARC4_BLOCK_LEN;
+			info.minkey = ARC4_MIN_KEY_LEN;
+			info.maxkey = ARC4_MAX_KEY_LEN;
+			break;
+		case CRYPTO_CAMELLIA_CBC:
+			info.blocksize = CAMELLIA_BLOCK_LEN;
+			info.minkey = CAMELLIA_MIN_KEY_LEN;
+			info.maxkey = CAMELLIA_MAX_KEY_LEN;
+			break;
+		default:
+			dprintk("%s(%s) - bad cipher\n", __FUNCTION__, CIOCGSESSSTR);
+			error = EINVAL;
+			goto bail;
+		}
+
+		switch (sop.mac) {
+		case 0:
+			dprintk("%s(%s) - no mac\n", __FUNCTION__, CIOCGSESSSTR);
+			break;
+		case CRYPTO_NULL_HMAC:
+			info.authsize = NULL_HASH_LEN;
+			break;
+		case CRYPTO_MD5:
+			info.authsize = MD5_HASH_LEN;
+			break;
+		case CRYPTO_SHA1:
+			info.authsize = SHA1_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_256:
+			info.authsize = SHA2_256_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_384:
+			info.authsize = SHA2_384_HASH_LEN;
+			break;
+		case CRYPTO_SHA2_512:
+			info.authsize = SHA2_512_HASH_LEN;
+			break;
+		case CRYPTO_RIPEMD160:
+			info.authsize = RIPEMD160_HASH_LEN;
+			break;
+		case CRYPTO_MD5_HMAC:
+			info.authsize = MD5_HASH_LEN;
+			info.authkey = 16;
+			break;
+		case CRYPTO_SHA1_HMAC:
+			info.authsize = SHA1_HASH_LEN;
+			info.authkey = 20;
+			break;
+		case CRYPTO_SHA2_256_HMAC:
+			info.authsize = SHA2_256_HASH_LEN;
+			info.authkey = 32;
+			break;
+		case CRYPTO_SHA2_384_HMAC:
+			info.authsize = SHA2_384_HASH_LEN;
+			info.authkey = 48;
+			break;
+		case CRYPTO_SHA2_512_HMAC:
+			info.authsize = SHA2_512_HASH_LEN;
+			info.authkey = 64;
+			break;
+		case CRYPTO_RIPEMD160_HMAC:
+			info.authsize = RIPEMD160_HASH_LEN;
+			info.authkey = 20;
+			break;
+		default:
+			dprintk("%s(%s) - bad mac\n", __FUNCTION__, CIOCGSESSSTR);
+			error = EINVAL;
+			goto bail;
+		}
+
+		if (info.blocksize) {
+			crie.cri_alg = sop.cipher;
+			crie.cri_klen = sop.keylen * 8;
+			if ((info.maxkey && sop.keylen > info.maxkey) ||
+					sop.keylen < info.minkey) {
+				dprintk("%s(%s) - bad key\n", __FUNCTION__, CIOCGSESSSTR);
+				error = EINVAL;
+				goto bail;
+			}
+
+			crie.cri_key = (u_int8_t *) kmalloc(crie.cri_klen/8+1, GFP_KERNEL);
+			if (copy_from_user(crie.cri_key, sop.key,
+							crie.cri_klen/8)) {
+				dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+				error = EFAULT;
+				goto bail;
+			}
+			if (info.authsize)
+				crie.cri_next = &cria;
+		}
+
+		if (info.authsize) {
+			cria.cri_alg = sop.mac;
+			cria.cri_klen = sop.mackeylen * 8;
+			if (info.authkey && sop.mackeylen != info.authkey) {
+				dprintk("%s(%s) - mackeylen %d != %d\n", __FUNCTION__,
+						CIOCGSESSSTR, sop.mackeylen, info.authkey);
+				error = EINVAL;
+				goto bail;
+			}
+
+			if (cria.cri_klen) {
+				cria.cri_key = (u_int8_t *) kmalloc(cria.cri_klen/8,GFP_KERNEL);
+				if (copy_from_user(cria.cri_key, sop.mackey,
+								cria.cri_klen / 8)) {
+					dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+					error = EFAULT;
+					goto bail;
+				}
+			}
+		}
+
+		/* NB: CIOGSESSION2 has the crid */
+		if (cmd == CIOCGSESSION2) {
+			crid = sop.crid;
+			error = checkcrid(crid);
+			if (error) {
+				dprintk("%s(%s) - checkcrid %x\n", __FUNCTION__,
+						CIOCGSESSSTR, error);
+				goto bail;
+			}
+		} else {
+			/* allow either HW or SW to be used */
+			crid = CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+		}
+		CRYPTODEV_DRIVER_LOCK();
+		error = crypto_newsession(&sid, (info.blocksize ? &crie : &cria), crid);
+		if (error) {
+			dprintk("%s(%s) - newsession %d\n",__FUNCTION__,CIOCGSESSSTR,error);
+			CRYPTODEV_DRIVER_UNLOCK();
+			goto bail;
+		}
+
+		cse = csecreate(fcr, sid, &crie, &cria, &info);
+		if (cse == NULL) {
+			crypto_freesession(sid);
+			error = EINVAL;
+			dprintk("%s(%s) - csecreate failed\n", __FUNCTION__, CIOCGSESSSTR);
+			CRYPTODEV_DRIVER_UNLOCK();
+			goto bail;
+		}
+		sop.ses = cse->ses;
+
+		if (cmd == CIOCGSESSION2) {
+			/* return hardware/driver id */
+			sop.crid = CRYPTO_SESID2HID(cse->sid);
+		}
+		CRYPTODEV_DRIVER_UNLOCK();
+
+		if (copy_to_user((void*)arg, &sop, (cmd == CIOCGSESSION) ?
+					sizeof(struct session_op) : sizeof(sop))) {
+			dprintk("%s(%s) - bad copy\n", __FUNCTION__, CIOCGSESSSTR);
+			error = EFAULT;
+		}
+bail:
+		if (error) {
+			dprintk("%s(%s) - bail %d\n", __FUNCTION__, CIOCGSESSSTR, error);
+			if (crie.cri_key)
+				kfree(crie.cri_key);
+			if (cria.cri_key)
+				kfree(cria.cri_key);
+		}
+		break;
+	case CIOCFSESSION:
+		dprintk("%s(CIOCFSESSION)\n", __FUNCTION__);
+		get_user(ses, (uint32_t*)arg);
+		CRYPTODEV_DRIVER_LOCK();
+		cse = csefind(fcr, ses);
+		if (cse == NULL) {
+			error = EINVAL;
+			dprintk("%s(CIOCFSESSION) - Fail %d\n", __FUNCTION__, error);
+			CRYPTODEV_DRIVER_UNLOCK();
+			break;
+		}
+		csedelete(fcr, cse);
+		error = csefree(cse);
+		CRYPTODEV_DRIVER_UNLOCK();
+		break;
+	case CIOCCRYPT:
+		dprintk("%s(CIOCCRYPT)\n", __FUNCTION__);
+		if(copy_from_user(&cop, (void*)arg, sizeof(cop))) {
+			dprintk("%s(CIOCCRYPT) - bad copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		CRYPTODEV_DRIVER_LOCK();
+		cse = csefind(fcr, cop.ses);
+		if (cse == NULL) {
+			error = EINVAL;
+			dprintk("%s(CIOCCRYPT) - Fail %d\n", __FUNCTION__, error);
+			CRYPTODEV_DRIVER_UNLOCK();
+			break;
+		}
+		CRYPTODEV_DRIVER_UNLOCK();
+		error = cryptodev_op(cse, &cop);
+		if(copy_to_user((void*)arg, &cop, sizeof(cop))) {
+			dprintk("%s(CIOCCRYPT) - bad return copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		break;
+	case CIOCKEY:
+	case CIOCKEY2:
+		dprintk("%s(CIOCKEY)\n", __FUNCTION__);
+		if (!crypto_userasymcrypto)
+			return (EPERM);		/* XXX compat? */
+		if(copy_from_user(&kop, (void*)arg, sizeof(kop))) {
+			dprintk("%s(CIOCKEY) - bad copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		if (cmd == CIOCKEY) {
+			/* NB: crypto core enforces s/w driver use */
+			kop.crk_crid =
+			    CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE;
+		}
+		error = cryptodev_key(&kop);
+		if(copy_to_user((void*)arg, &kop, sizeof(kop))) {
+			dprintk("%s(CIOCGKEY) - bad return copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		break;
+	case CIOCASYMFEAT:
+		dprintk("%s(CIOCASYMFEAT)\n", __FUNCTION__);
+		if (!crypto_userasymcrypto) {
+			/*
+			 * NB: if user asym crypto operations are
+			 * not permitted return "no algorithms"
+			 * so well-behaved applications will just
+			 * fallback to doing them in software.
+			 */
+			feat = 0;
+		} else
+			error = crypto_getfeat(&feat);
+		if (!error) {
+		  error = copy_to_user((void*)arg, &feat, sizeof(feat));
+		}
+		break;
+	case CIOCFINDDEV:
+		if (copy_from_user(&fop, (void*)arg, sizeof(fop))) {
+			dprintk("%s(CIOCFINDDEV) - bad copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		error = cryptodev_find(&fop);
+		if (copy_to_user((void*)arg, &fop, sizeof(fop))) {
+			dprintk("%s(CIOCFINDDEV) - bad return copy\n", __FUNCTION__);
+			error = EFAULT;
+			goto bail;
+		}
+		break;
+	default:
+		dprintk("%s(unknown ioctl 0x%x)\n", __FUNCTION__, cmd);
+		error = EINVAL;
+		break;
+	}
+	return(-error);
+}
+
+#ifdef HAVE_UNLOCKED_IOCTL
+static long
+cryptodev_unlocked_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	return cryptodev_ioctl(NULL, filp, cmd, arg);
+}
+#endif
+
+static int
+cryptodev_open(struct inode *inode, struct file *filp)
+{
+	struct fcrypt *fcr;
+
+	dprintk("%s()\n", __FUNCTION__);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35)
+	/*
+	 * on 2.6.35 private_data points to a miscdevice structure, we override
+	 * it,  which is currently safe to do.
+	 */
+	if (filp->private_data) {
+		printk("cryptodev: Private data already exists - %p!\n", filp->private_data);
+		return(-ENODEV);
+	}
+#endif
+
+	fcr = kmalloc(sizeof(*fcr), GFP_KERNEL);
+	if (!fcr) {
+		dprintk("%s() - malloc failed\n", __FUNCTION__);
+		return(-ENOMEM);
+	}
+	memset(fcr, 0, sizeof(*fcr));
+
+	INIT_LIST_HEAD(&fcr->csessions);
+	filp->private_data = fcr;
+	return(0);
+}
+
+static int
+cryptodev_release(struct inode *inode, struct file *filp)
+{
+	struct fcrypt *fcr = filp->private_data;
+	struct csession *cse, *tmp;
+	unsigned long d_flags;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (!filp) {
+		printk("cryptodev: No private data on release\n");
+		return(0);
+	}
+
+	CRYPTODEV_DRIVER_LOCK();
+	list_for_each_entry_safe(cse, tmp, &fcr->csessions, list) {
+		list_del(&cse->list);
+		(void)csefree(cse);
+	}
+	filp->private_data = NULL;
+	CRYPTODEV_DRIVER_UNLOCK();
+	kfree(fcr);
+	return(0);
+}
+
+static struct file_operations cryptodev_fops = {
+	.owner = THIS_MODULE,
+	.open = cryptodev_open,
+	.release = cryptodev_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+	.ioctl = cryptodev_ioctl,
+#endif
+#ifdef HAVE_UNLOCKED_IOCTL
+	.unlocked_ioctl = cryptodev_unlocked_ioctl,
+#endif
+};
+
+static struct miscdevice cryptodev = {
+	.minor = CRYPTODEV_MINOR,
+	.name = "crypto",
+	.fops = &cryptodev_fops,
+};
+
+static int __init
+cryptodev_init(void)
+{
+	int rc;
+
+	dprintk("%s(%p)\n", __FUNCTION__, cryptodev_init);
+	rc = misc_register(&cryptodev);
+	if (rc) {
+		printk(KERN_ERR "cryptodev: registration of /dev/crypto failed\n");
+		return(rc);
+	}
+
+	return(0);
+}
+
+static void __exit
+cryptodev_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	misc_deregister(&cryptodev);
+}
+
+module_init(cryptodev_init);
+module_exit(cryptodev_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptodev (user interface to OCF)");
diff --git a/crypto/ocf/cryptodev.h b/crypto/ocf/cryptodev.h
new file mode 100644
index 000000000000..06915897970a
--- /dev/null
+++ b/crypto/ocf/cryptodev.h
@@ -0,0 +1,485 @@
+/*	$FreeBSD: src/sys/opencrypto/cryptodev.h,v 1.25 2007/05/09 19:37:02 gnn Exp $	*/
+/*	$OpenBSD: cryptodev.h,v 1.31 2002/06/11 11:14:29 beck Exp $	*/
+
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
+ *
+ * This code was written by Angelos D. Keromytis in Athens, Greece, in
+ * February 2000. Network Security Technologies Inc. (NSTI) kindly
+ * supported the development of this code.
+ *
+ * Copyright (c) 2000 Angelos D. Keromytis
+ *
+ * Permission to use, copy, and modify this software with or without fee
+ * is hereby granted, provided that this entire notice is included in
+ * all source code copies of any software which is or includes a copy or
+ * modification of this software.
+ *
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
+ * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
+ * PURPOSE.
+ *
+ * Copyright (c) 2001 Theo de Raadt
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef _CRYPTO_CRYPTO_H_
+#define _CRYPTO_CRYPTO_H_
+
+#ifdef CONFIG_OF
+#include "../../arch/arm/mach-mvebu/include/mach/mvTypes.h"
+#include "../../drivers/crypto/mvebu_cesa/mvSysCesaConfig.h"
+#endif
+
+/* Some initial values */
+#define CRYPTO_DRIVERS_INITIAL	4
+#define CRYPTO_SW_SESSIONS	32
+
+/* Hash values */
+#define NULL_HASH_LEN		0
+#define MD5_HASH_LEN		16
+#define SHA1_HASH_LEN		20
+#define RIPEMD160_HASH_LEN	20
+#define SHA2_256_HASH_LEN	32
+#define SHA2_384_HASH_LEN	48
+#define SHA2_512_HASH_LEN	64
+#define MD5_KPDK_HASH_LEN	16
+#define SHA1_KPDK_HASH_LEN	20
+/* Maximum hash algorithm result length */
+#define HASH_MAX_LEN		SHA2_512_HASH_LEN /* Keep this updated */
+
+/* HMAC values */
+#define NULL_HMAC_BLOCK_LEN			1
+#define MD5_HMAC_BLOCK_LEN			64
+#define SHA1_HMAC_BLOCK_LEN			64
+#define RIPEMD160_HMAC_BLOCK_LEN	64
+#define SHA2_256_HMAC_BLOCK_LEN		64
+#define SHA2_384_HMAC_BLOCK_LEN		128
+#define SHA2_512_HMAC_BLOCK_LEN		128
+/* Maximum HMAC block length */
+#define HMAC_MAX_BLOCK_LEN		SHA2_512_HMAC_BLOCK_LEN /* Keep this updated */
+#define HMAC_IPAD_VAL			0x36
+#define HMAC_OPAD_VAL			0x5C
+
+/* Encryption algorithm block sizes */
+#define NULL_BLOCK_LEN			1
+#define DES_BLOCK_LEN			8
+#define DES3_BLOCK_LEN			8
+#define BLOWFISH_BLOCK_LEN		8
+#define SKIPJACK_BLOCK_LEN		8
+#define CAST128_BLOCK_LEN		8
+#define RIJNDAEL128_BLOCK_LEN	16
+#define AES_BLOCK_LEN			RIJNDAEL128_BLOCK_LEN
+#define CAMELLIA_BLOCK_LEN		16
+#define ARC4_BLOCK_LEN			1
+#define EALG_MAX_BLOCK_LEN		AES_BLOCK_LEN /* Keep this updated */
+
+/* Encryption algorithm min and max key sizes */
+#define NULL_MIN_KEY_LEN		0
+#define NULL_MAX_KEY_LEN		0
+#define DES_MIN_KEY_LEN			8
+#define DES_MAX_KEY_LEN			8
+#define DES3_MIN_KEY_LEN		24
+#define DES3_MAX_KEY_LEN		24
+#define BLOWFISH_MIN_KEY_LEN	4
+#define BLOWFISH_MAX_KEY_LEN	56
+#define SKIPJACK_MIN_KEY_LEN	10
+#define SKIPJACK_MAX_KEY_LEN	10
+#define CAST128_MIN_KEY_LEN		5
+#define CAST128_MAX_KEY_LEN		16
+#define RIJNDAEL128_MIN_KEY_LEN	16
+#define RIJNDAEL128_MAX_KEY_LEN	32
+#define AES_MIN_KEY_LEN			RIJNDAEL128_MIN_KEY_LEN
+#define AES_MAX_KEY_LEN			RIJNDAEL128_MAX_KEY_LEN
+#define CAMELLIA_MIN_KEY_LEN	16
+#define CAMELLIA_MAX_KEY_LEN	32
+#define ARC4_MIN_KEY_LEN		1
+#define ARC4_MAX_KEY_LEN		256
+
+/* Max size of data that can be processed */
+#define CRYPTO_MAX_DATA_LEN		64*1024 - 1
+
+#define CRYPTO_ALGORITHM_MIN	1
+#define CRYPTO_DES_CBC			1
+#define CRYPTO_3DES_CBC			2
+#define CRYPTO_BLF_CBC			3
+#define CRYPTO_CAST_CBC			4
+#define CRYPTO_SKIPJACK_CBC		5
+#define CRYPTO_MD5_HMAC			6
+#define CRYPTO_SHA1_HMAC		7
+#define CRYPTO_RIPEMD160_HMAC	8
+#define CRYPTO_MD5_KPDK			9
+#define CRYPTO_SHA1_KPDK		10
+#define CRYPTO_RIJNDAEL128_CBC	11 /* 128 bit blocksize */
+#define CRYPTO_AES_CBC			11 /* 128 bit blocksize -- the same as above */
+#define CRYPTO_ARC4				12
+#define CRYPTO_MD5				13
+#define CRYPTO_SHA1				14
+#define CRYPTO_NULL_HMAC		15
+#define CRYPTO_NULL_CBC			16
+#define CRYPTO_DEFLATE_COMP		17 /* Deflate compression algorithm */
+#define CRYPTO_SHA2_256_HMAC	18
+#define CRYPTO_SHA2_384_HMAC	19
+#define CRYPTO_SHA2_512_HMAC	20
+#define CRYPTO_CAMELLIA_CBC		21
+#define CRYPTO_SHA2_256			22
+#define CRYPTO_SHA2_384			23
+#define CRYPTO_SHA2_512			24
+#define CRYPTO_RIPEMD160		25
+#define	CRYPTO_LZS_COMP			26
+#define CRYPTO_ALGORITHM_MAX	26 /* Keep updated - see above */
+
+/* Algorithm flags */
+#define CRYPTO_ALG_FLAG_SUPPORTED	0x01 /* Algorithm is supported */
+#define CRYPTO_ALG_FLAG_RNG_ENABLE	0x02 /* Has HW RNG for DH/DSA */
+#define CRYPTO_ALG_FLAG_DSA_SHA		0x04 /* Can do SHA on msg */
+
+/*
+ * Crypto driver/device flags.  They can set in the crid
+ * parameter when creating a session or submitting a key
+ * op to affect the device/driver assigned.  If neither
+ * of these are specified then the crid is assumed to hold
+ * the driver id of an existing (and suitable) device that
+ * must be used to satisfy the request.
+ */
+#define CRYPTO_FLAG_HARDWARE	0x01000000	/* hardware accelerated */
+#define CRYPTO_FLAG_SOFTWARE	0x02000000	/* software implementation */
+
+/* NB: deprecated */
+struct session_op {
+	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
+	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
+
+	u_int32_t	keylen;		/* cipher key */
+	caddr_t		key;
+	int		mackeylen;	/* mac key */
+	caddr_t		mackey;
+
+	u_int32_t	ses;		/* returns: session # */
+};
+
+struct session2_op {
+	u_int32_t	cipher;		/* ie. CRYPTO_DES_CBC */
+	u_int32_t	mac;		/* ie. CRYPTO_MD5_HMAC */
+
+	u_int32_t	keylen;		/* cipher key */
+	caddr_t		key;
+	int		mackeylen;	/* mac key */
+	caddr_t		mackey;
+
+	u_int32_t	ses;		/* returns: session # */
+	int		crid;		/* driver id + flags (rw) */
+	int		pad[4];		/* for future expansion */
+};
+
+struct crypt_op {
+	u_int32_t	ses;
+	u_int16_t	op;		/* i.e. COP_ENCRYPT */
+#define COP_NONE	0
+#define COP_ENCRYPT	1
+#define COP_DECRYPT	2
+	u_int16_t	flags;
+#define	COP_F_BATCH	0x0008		/* Batch op if possible */
+	u_int		len;
+	caddr_t		src, dst;	/* become iov[] inside kernel */
+	caddr_t		mac;		/* must be big enough for chosen MAC */
+	caddr_t		iv;
+};
+
+/*
+ * Parameters for looking up a crypto driver/device by
+ * device name or by id.  The latter are returned for
+ * created sessions (crid) and completed key operations.
+ */
+struct crypt_find_op {
+	int		crid;		/* driver id + flags */
+	char		name[32];	/* device/driver name */
+};
+
+/* bignum parameter, in packed bytes, ... */
+struct crparam {
+	caddr_t		crp_p;
+	u_int		crp_nbits;
+};
+
+#define CRK_MAXPARAM	8
+
+struct crypt_kop {
+	u_int		crk_op;		/* ie. CRK_MOD_EXP or other */
+	u_int		crk_status;	/* return status */
+	u_short		crk_iparams;	/* # of input parameters */
+	u_short		crk_oparams;	/* # of output parameters */
+	u_int		crk_crid;	/* NB: only used by CIOCKEY2 (rw) */
+	struct crparam	crk_param[CRK_MAXPARAM];
+};
+#define CRK_ALGORITM_MIN	0
+#define CRK_MOD_EXP		0
+#define CRK_MOD_EXP_CRT		1
+#define CRK_DSA_SIGN		2
+#define CRK_DSA_VERIFY		3
+#define CRK_DH_COMPUTE_KEY	4
+#define CRK_ALGORITHM_MAX	4 /* Keep updated - see below */
+
+#define CRF_MOD_EXP		(1 << CRK_MOD_EXP)
+#define CRF_MOD_EXP_CRT		(1 << CRK_MOD_EXP_CRT)
+#define CRF_DSA_SIGN		(1 << CRK_DSA_SIGN)
+#define CRF_DSA_VERIFY		(1 << CRK_DSA_VERIFY)
+#define CRF_DH_COMPUTE_KEY	(1 << CRK_DH_COMPUTE_KEY)
+
+/*
+ * done against open of /dev/crypto, to get a cloned descriptor.
+ * Please use F_SETFD against the cloned descriptor.
+ */
+#define CRIOGET		_IOWR('c', 100, u_int32_t)
+#define CRIOASYMFEAT	CIOCASYMFEAT
+#define CRIOFINDDEV	CIOCFINDDEV
+
+/* the following are done against the cloned descriptor */
+#define CIOCGSESSION	_IOWR('c', 101, struct session_op)
+#define CIOCFSESSION	_IOW('c', 102, u_int32_t)
+#define CIOCCRYPT	_IOWR('c', 103, struct crypt_op)
+#define CIOCKEY		_IOWR('c', 104, struct crypt_kop)
+#define CIOCASYMFEAT	_IOR('c', 105, u_int32_t)
+#define CIOCGSESSION2	_IOWR('c', 106, struct session2_op)
+#define CIOCKEY2	_IOWR('c', 107, struct crypt_kop)
+#define CIOCFINDDEV	_IOWR('c', 108, struct crypt_find_op)
+
+struct cryptotstat {
+	struct timespec	acc;		/* total accumulated time */
+	struct timespec	min;		/* min time */
+	struct timespec	max;		/* max time */
+	u_int32_t	count;		/* number of observations */
+};
+
+struct cryptostats {
+	u_int32_t	cs_ops;		/* symmetric crypto ops submitted */
+	u_int32_t	cs_errs;	/* symmetric crypto ops that failed */
+	u_int32_t	cs_kops;	/* asymetric/key ops submitted */
+	u_int32_t	cs_kerrs;	/* asymetric/key ops that failed */
+	u_int32_t	cs_intrs;	/* crypto swi thread activations */
+	u_int32_t	cs_rets;	/* crypto return thread activations */
+	u_int32_t	cs_blocks;	/* symmetric op driver block */
+	u_int32_t	cs_kblocks;	/* symmetric op driver block */
+	/*
+	 * When CRYPTO_TIMING is defined at compile time and the
+	 * sysctl debug.crypto is set to 1, the crypto system will
+	 * accumulate statistics about how long it takes to process
+	 * crypto requests at various points during processing.
+	 */
+	struct cryptotstat cs_invoke;	/* crypto_dipsatch -> crypto_invoke */
+	struct cryptotstat cs_done;	/* crypto_invoke -> crypto_done */
+	struct cryptotstat cs_cb;	/* crypto_done -> callback */
+	struct cryptotstat cs_finis;	/* callback -> callback return */
+
+	u_int32_t	cs_drops;		/* crypto ops dropped due to congestion */
+};
+
+#ifdef __KERNEL__
+
+/* Standard initialization structure beginning */
+struct cryptoini {
+	int		cri_alg;	/* Algorithm to use */
+	int		cri_klen;	/* Key length, in bits */
+	int		cri_mlen;	/* Number of bytes we want from the
+					   entire hash. 0 means all. */
+	caddr_t		cri_key;	/* key to use */
+	u_int8_t	cri_iv[EALG_MAX_BLOCK_LEN];	/* IV to use */
+	struct cryptoini *cri_next;
+};
+
+/* Describe boundaries of a single crypto operation */
+struct cryptodesc {
+	int		crd_skip;	/* How many bytes to ignore from start */
+	int		crd_len;	/* How many bytes to process */
+	int		crd_inject;	/* Where to inject results, if applicable */
+	int		crd_flags;
+
+#define CRD_F_ENCRYPT		0x01	/* Set when doing encryption */
+#define CRD_F_IV_PRESENT	0x02	/* When encrypting, IV is already in
+					   place, so don't copy. */
+#define CRD_F_IV_EXPLICIT	0x04	/* IV explicitly provided */
+#define CRD_F_DSA_SHA_NEEDED	0x08	/* Compute SHA-1 of buffer for DSA */
+#define CRD_F_KEY_EXPLICIT	0x10	/* Key explicitly provided */
+#define CRD_F_COMP		0x0f    /* Set when doing compression */
+
+	struct cryptoini	CRD_INI; /* Initialization/context data */
+#define crd_iv		CRD_INI.cri_iv
+#define crd_key		CRD_INI.cri_key
+#define crd_alg		CRD_INI.cri_alg
+#define crd_klen	CRD_INI.cri_klen
+#define crd_mlen	CRD_INI.cri_mlen
+
+	struct cryptodesc *crd_next;
+};
+
+/* Structure describing complete operation */
+struct cryptop {
+	struct list_head crp_next;
+	wait_queue_head_t crp_waitq;
+
+	u_int64_t	crp_sid;	/* Session ID */
+	int		crp_ilen;	/* Input data total length */
+	int		crp_olen;	/* Result total length */
+
+	int		crp_etype;	/*
+					 * Error type (zero means no error).
+					 * All error codes except EAGAIN
+					 * indicate possible data corruption (as in,
+					 * the data have been touched). On all
+					 * errors, the crp_sid may have changed
+					 * (reset to a new one), so the caller
+					 * should always check and use the new
+					 * value on future requests.
+					 */
+	int		crp_flags;
+
+#define CRYPTO_F_SKBUF		0x0001	/* Input/output are skbuf chains */
+#define CRYPTO_F_IOV		0x0002	/* Input/output are uio */
+#define CRYPTO_F_REL		0x0004	/* Must return data in same place */
+#define CRYPTO_F_BATCH		0x0008	/* Batch op if possible */
+#define CRYPTO_F_CBIMM		0x0010	/* Do callback immediately */
+#define CRYPTO_F_DONE		0x0020	/* Operation completed */
+#define CRYPTO_F_CBIFSYNC	0x0040	/* Do CBIMM if op is synchronous */
+
+	caddr_t		crp_buf;	/* Data to be processed */
+	caddr_t		crp_opaque;	/* Opaque pointer, passed along */
+	struct cryptodesc *crp_desc;	/* Linked list of processing descriptors */
+
+	int (*crp_callback)(struct cryptop *); /* Callback function */
+};
+
+#define CRYPTO_BUF_CONTIG	0x0
+#define CRYPTO_BUF_IOV		0x1
+#define CRYPTO_BUF_SKBUF		0x2
+
+#define CRYPTO_OP_DECRYPT	0x0
+#define CRYPTO_OP_ENCRYPT	0x1
+
+/*
+ * Hints passed to process methods.
+ */
+#define CRYPTO_HINT_MORE	0x1	/* more ops coming shortly */
+
+struct cryptkop {
+	struct list_head krp_next;
+	wait_queue_head_t krp_waitq;
+
+	int		krp_flags;
+#define CRYPTO_KF_DONE		0x0001	/* Operation completed */
+#define CRYPTO_KF_CBIMM		0x0002	/* Do callback immediately */
+
+	u_int		krp_op;		/* ie. CRK_MOD_EXP or other */
+	u_int		krp_status;	/* return status */
+	u_short		krp_iparams;	/* # of input parameters */
+	u_short		krp_oparams;	/* # of output parameters */
+	u_int		krp_crid;	/* desired device, etc. */
+	u_int32_t	krp_hid;
+	struct crparam	krp_param[CRK_MAXPARAM];	/* kvm */
+	int		(*krp_callback)(struct cryptkop *);
+};
+
+#include "ocf-compat.h"
+
+/*
+ * Session ids are 64 bits.  The lower 32 bits contain a "local id" which
+ * is a driver-private session identifier.  The upper 32 bits contain a
+ * "hardware id" used by the core crypto code to identify the driver and
+ * a copy of the driver's capabilities that can be used by client code to
+ * optimize operation.
+ */
+#define CRYPTO_SESID2HID(_sid)	(((_sid) >> 32) & 0x00ffffff)
+#define CRYPTO_SESID2CAPS(_sid)	(((_sid) >> 32) & 0xff000000)
+#define CRYPTO_SESID2LID(_sid)	(((u_int32_t) (_sid)) & 0xffffffff)
+
+extern	int crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard);
+extern	int crypto_freesession(u_int64_t sid);
+#define CRYPTOCAP_F_HARDWARE	CRYPTO_FLAG_HARDWARE
+#define CRYPTOCAP_F_SOFTWARE	CRYPTO_FLAG_SOFTWARE
+#define CRYPTOCAP_F_SYNC	0x04000000	/* operates synchronously */
+extern	int32_t crypto_get_driverid(device_t dev, int flags);
+extern	int crypto_find_driver(const char *);
+extern	device_t crypto_find_device_byhid(int hid);
+extern	int crypto_getcaps(int hid);
+extern	int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
+	    u_int32_t flags);
+extern	int crypto_kregister(u_int32_t, int, u_int32_t);
+extern	int crypto_unregister(u_int32_t driverid, int alg);
+extern	int crypto_unregister_all(u_int32_t driverid);
+extern	int crypto_dispatch(struct cryptop *crp);
+extern	int crypto_kdispatch(struct cryptkop *);
+#define CRYPTO_SYMQ	0x1
+#define CRYPTO_ASYMQ	0x2
+extern	int crypto_unblock(u_int32_t, int);
+extern	void crypto_done(struct cryptop *crp);
+extern	void crypto_kdone(struct cryptkop *);
+extern	int crypto_getfeat(int *);
+
+extern	void crypto_freereq(struct cryptop *crp);
+extern	struct cryptop *crypto_getreq(int num);
+
+extern  int crypto_usercrypto;      /* userland may do crypto requests */
+extern  int crypto_userasymcrypto;  /* userland may do asym crypto reqs */
+extern  int crypto_devallowsoft;    /* only use hardware crypto */
+
+/*
+ * random number support,  crypto_unregister_all will unregister
+ */
+extern int crypto_rregister(u_int32_t driverid,
+		int (*read_random)(void *arg, u_int32_t *buf, int len), void *arg);
+extern int crypto_runregister_all(u_int32_t driverid);
+
+/*
+ * Crypto-related utility routines used mainly by drivers.
+ *
+ * XXX these don't really belong here; but for now they're
+ *     kept apart from the rest of the system.
+ */
+struct uio;
+extern	void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
+extern	void cuio_copyback(struct uio* uio, int off, int len, caddr_t cp);
+extern	struct iovec *cuio_getptr(struct uio *uio, int loc, int *off);
+
+extern	void crypto_copyback(int flags, caddr_t buf, int off, int size,
+	    caddr_t in);
+extern	void crypto_copydata(int flags, caddr_t buf, int off, int size,
+	    caddr_t out);
+extern	int crypto_apply(int flags, caddr_t buf, int off, int len,
+	    int (*f)(void *, void *, u_int), void *arg);
+
+#endif /* __KERNEL__ */
+#endif /* _CRYPTO_CRYPTO_H_ */
diff --git a/crypto/ocf/cryptosoft.c b/crypto/ocf/cryptosoft.c
new file mode 100644
index 000000000000..d6b5f739a1e2
--- /dev/null
+++ b/crypto/ocf/cryptosoft.c
@@ -0,0 +1,1322 @@
+/*
+ * An OCF module that uses the linux kernel cryptoapi, based on the
+ * original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
+ * but is mostly unrecognisable,
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2004-2011 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
+#include <linux/scatterlist.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+#include <crypto/hash.h>
+#endif
+
+#include <cryptodev.h>
+#include <uio.h>
+
+struct {
+	softc_device_decl	sc_dev;
+} swcr_softc;
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+#define SW_TYPE_CIPHER		0x01
+#define SW_TYPE_HMAC		0x02
+#define SW_TYPE_HASH		0x04
+#define SW_TYPE_COMP		0x08
+#define SW_TYPE_BLKCIPHER	0x10
+#define SW_TYPE_ALG_MASK	0x1f
+
+#define SW_TYPE_ASYNC		0x8000
+
+#define SW_TYPE_INUSE		0x10000000
+
+/* We change some of the above if we have an async interface */
+
+#define SW_TYPE_ALG_AMASK	(SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
+
+#define SW_TYPE_ABLKCIPHER	(SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
+#define SW_TYPE_AHASH		(SW_TYPE_HASH | SW_TYPE_ASYNC)
+#define SW_TYPE_AHMAC		(SW_TYPE_HMAC | SW_TYPE_ASYNC)
+
+#define SCATTERLIST_MAX 16
+
+struct swcr_data {
+	struct work_struct  workq;
+	int					sw_type;
+	int					sw_alg;
+	struct crypto_tfm	*sw_tfm;
+	spinlock_t			sw_tfm_lock;
+	union {
+		struct {
+			char *sw_key;
+			int  sw_klen;
+			int  sw_mlen;
+		} hmac;
+		void *sw_comp_buf;
+	} u;
+	struct swcr_data	*sw_next;
+};
+
+struct swcr_req {
+	struct swcr_data	*sw_head;
+	struct swcr_data	*sw;
+	struct cryptop		*crp;
+	struct cryptodesc	*crd;
+	struct scatterlist	 sg[SCATTERLIST_MAX];
+	unsigned char		 iv[EALG_MAX_BLOCK_LEN];
+	char				 result[HASH_MAX_LEN];
+	void				*crypto_req;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *swcr_req_cache;
+#else
+static struct kmem_cache *swcr_req_cache;
+#endif
+
+#ifndef CRYPTO_TFM_MODE_CBC
+/*
+ * As of linux-2.6.21 this is no longer defined, and presumably no longer
+ * needed to be passed into the crypto core code.
+ */
+#define	CRYPTO_TFM_MODE_CBC	0
+#define	CRYPTO_TFM_MODE_ECB	0
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+	/*
+	 * Linux 2.6.19 introduced a new Crypto API, setup macro's to convert new
+	 * API into old API.
+	 */
+
+	/* Symmetric/Block Cipher */
+	struct blkcipher_desc
+	{
+		struct crypto_tfm *tfm;
+		void *info;
+	};
+	#define ecb(X)								#X , CRYPTO_TFM_MODE_ECB
+	#define cbc(X)								#X , CRYPTO_TFM_MODE_CBC
+	#define crypto_has_blkcipher(X, Y, Z)		crypto_alg_available(X, 0)
+	#define crypto_blkcipher_cast(X)			X
+	#define crypto_blkcipher_tfm(X)				X
+	#define crypto_alloc_blkcipher(X, Y, Z)		crypto_alloc_tfm(X, mode)
+	#define crypto_blkcipher_ivsize(X)			crypto_tfm_alg_ivsize(X)
+	#define crypto_blkcipher_blocksize(X)		crypto_tfm_alg_blocksize(X)
+	#define crypto_blkcipher_setkey(X, Y, Z)	crypto_cipher_setkey(X, Y, Z)
+	#define crypto_blkcipher_encrypt_iv(W, X, Y, Z)	\
+				crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
+	#define crypto_blkcipher_decrypt_iv(W, X, Y, Z)	\
+				crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
+	#define crypto_blkcipher_set_flags(x, y)	/* nop */
+	#define crypto_free_blkcipher(x)			crypto_free_tfm(x)
+	#define crypto_free_comp					crypto_free_tfm
+	#define crypto_free_hash					crypto_free_tfm
+
+	/* Hash/HMAC/Digest */
+	struct hash_desc
+	{
+		struct crypto_tfm *tfm;
+	};
+	#define hmac(X)							#X , 0
+	#define crypto_has_hash(X, Y, Z)		crypto_alg_available(X, 0)
+	#define crypto_hash_cast(X)				X
+	#define crypto_hash_tfm(X)				X
+	#define crypto_alloc_hash(X, Y, Z)		crypto_alloc_tfm(X, mode)
+	#define crypto_hash_digestsize(X)		crypto_tfm_alg_digestsize(X)
+	#define crypto_hash_digest(W, X, Y, Z)	\
+				crypto_digest_digest((W)->tfm, X, sg_num, Z)
+
+	/* Asymmetric Cipher */
+	#define crypto_has_cipher(X, Y, Z)		crypto_alg_available(X, 0)
+
+	/* Compression */
+	#define crypto_has_comp(X, Y, Z)		crypto_alg_available(X, 0)
+	#define crypto_comp_tfm(X)				X
+	#define crypto_comp_cast(X)				X
+	#define crypto_alloc_comp(X, Y, Z)		crypto_alloc_tfm(X, mode)
+	#define plain(X)	#X , 0
+#else
+	#define ecb(X)	"ecb(" #X ")" , 0
+	#define cbc(X)	"cbc(" #X ")" , 0
+	#define hmac(X)	"hmac(" #X ")" , 0
+	#define plain(X)	#X , 0
+#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+/* no ablkcipher in older kernels */
+#define crypto_alloc_ablkcipher(a,b,c)		(NULL)
+#define crypto_ablkcipher_tfm(x)			((struct crypto_tfm *)(x))
+#define crypto_ablkcipher_set_flags(a, b)	/* nop */
+#define crypto_ablkcipher_setkey(x, y, z)	(-EINVAL)
+#define	crypto_has_ablkcipher(a,b,c)		(0)
+#else
+#define	HAVE_ABLKCIPHER
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+/* no ahash in older kernels */
+#define crypto_ahash_tfm(x)					((struct crypto_tfm *)(x))
+#define	crypto_alloc_ahash(a,b,c)			(NULL)
+#define	crypto_ahash_digestsize(x)			0
+#else
+#define	HAVE_AHASH
+#endif
+
+struct crypto_details {
+	char *alg_name;
+	int mode;
+	int sw_type;
+};
+
+static struct crypto_details crypto_details[] = {
+	[CRYPTO_DES_CBC]         = { cbc(des),          SW_TYPE_BLKCIPHER, },
+	[CRYPTO_3DES_CBC]        = { cbc(des3_ede),     SW_TYPE_BLKCIPHER, },
+	[CRYPTO_BLF_CBC]         = { cbc(blowfish),     SW_TYPE_BLKCIPHER, },
+	[CRYPTO_CAST_CBC]        = { cbc(cast5),        SW_TYPE_BLKCIPHER, },
+	[CRYPTO_SKIPJACK_CBC]    = { cbc(skipjack),     SW_TYPE_BLKCIPHER, },
+	[CRYPTO_MD5_HMAC]        = { hmac(md5),         SW_TYPE_HMAC, },
+	[CRYPTO_SHA1_HMAC]       = { hmac(sha1),        SW_TYPE_HMAC, },
+	[CRYPTO_RIPEMD160_HMAC]  = { hmac(ripemd160),   SW_TYPE_HMAC, },
+	[CRYPTO_MD5_KPDK]        = { plain(md5-kpdk),   SW_TYPE_HASH, },
+	[CRYPTO_SHA1_KPDK]       = { plain(sha1-kpdk),  SW_TYPE_HASH, },
+	[CRYPTO_AES_CBC]         = { cbc(aes),          SW_TYPE_BLKCIPHER, },
+	[CRYPTO_ARC4]            = { ecb(arc4),         SW_TYPE_BLKCIPHER, },
+	[CRYPTO_MD5]             = { plain(md5),        SW_TYPE_HASH, },
+	[CRYPTO_SHA1]            = { plain(sha1),       SW_TYPE_HASH, },
+	[CRYPTO_NULL_HMAC]       = { hmac(digest_null), SW_TYPE_HMAC, },
+	[CRYPTO_NULL_CBC]        = { cbc(cipher_null),  SW_TYPE_BLKCIPHER, },
+	[CRYPTO_DEFLATE_COMP]    = { plain(deflate),    SW_TYPE_COMP, },
+	[CRYPTO_SHA2_256_HMAC]   = { hmac(sha256),      SW_TYPE_HMAC, },
+	[CRYPTO_SHA2_384_HMAC]   = { hmac(sha384),      SW_TYPE_HMAC, },
+	[CRYPTO_SHA2_512_HMAC]   = { hmac(sha512),      SW_TYPE_HMAC, },
+	[CRYPTO_CAMELLIA_CBC]    = { cbc(camellia),     SW_TYPE_BLKCIPHER, },
+	[CRYPTO_SHA2_256]        = { plain(sha256),     SW_TYPE_HASH, },
+	[CRYPTO_SHA2_384]        = { plain(sha384),     SW_TYPE_HASH, },
+	[CRYPTO_SHA2_512]        = { plain(sha512),     SW_TYPE_HASH, },
+	[CRYPTO_RIPEMD160]       = { plain(ripemd160),  SW_TYPE_HASH, },
+};
+
+int32_t swcr_id = -1;
+module_param(swcr_id, int, 0444);
+MODULE_PARM_DESC(swcr_id, "Read-Only OCF ID for cryptosoft driver");
+
+int swcr_fail_if_compression_grows = 1;
+module_param(swcr_fail_if_compression_grows, int, 0644);
+MODULE_PARM_DESC(swcr_fail_if_compression_grows,
+                "Treat compression that results in more data as a failure");
+
+int swcr_no_ahash = 0;
+module_param(swcr_no_ahash, int, 0644);
+MODULE_PARM_DESC(swcr_no_ahash,
+                "Do not use async hash/hmac even if available");
+
+int swcr_no_ablk = 0;
+module_param(swcr_no_ablk, int, 0644);
+MODULE_PARM_DESC(swcr_no_ablk,
+                "Do not use async blk ciphers even if available");
+
+static struct swcr_data **swcr_sessions = NULL;
+static u_int32_t swcr_sesnum = 0;
+
+static	int swcr_process(device_t, struct cryptop *, int);
+static	int swcr_newsession(device_t, u_int32_t *, struct cryptoini *);
+static	int swcr_freesession(device_t, u_int64_t);
+
+static device_method_t swcr_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
+	DEVMETHOD(cryptodev_freesession,swcr_freesession),
+	DEVMETHOD(cryptodev_process,	swcr_process),
+};
+
+#define debug swcr_debug
+int swcr_debug = 0;
+module_param(swcr_debug, int, 0644);
+MODULE_PARM_DESC(swcr_debug, "Enable debug");
+
+static void swcr_process_req(struct swcr_req *req);
+
+/*
+ * somethings just need to be run with user context no matter whether
+ * the kernel compression libs use vmalloc/vfree for example.
+ */
+
+typedef struct {
+	struct work_struct wq;
+	void	(*func)(void *arg);
+	void	*arg;
+} execute_later_t;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+doing_it_now(struct work_struct *wq)
+{
+	execute_later_t *w = container_of(wq, execute_later_t, wq);
+	(w->func)(w->arg);
+	kfree(w);
+}
+#else
+static void
+doing_it_now(void *arg)
+{
+	execute_later_t *w = (execute_later_t *) arg;
+	(w->func)(w->arg);
+	kfree(w);
+}
+#endif
+
+static void
+execute_later(void (fn)(void *), void *arg)
+{
+	execute_later_t *w;
+
+	w = (execute_later_t *) kmalloc(sizeof(execute_later_t), SLAB_ATOMIC);
+	if (w) {
+		memset(w, '\0', sizeof(w));
+		w->func = fn;
+		w->arg = arg;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+		INIT_WORK(&w->wq, doing_it_now);
+#else
+		INIT_WORK(&w->wq, doing_it_now, w);
+#endif
+		schedule_work(&w->wq);
+	}
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+	struct swcr_data **swd;
+	u_int32_t i;
+	int error;
+	char *algo;
+	int mode;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid == NULL || cri == NULL) {
+		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	if (swcr_sessions) {
+		for (i = 1; i < swcr_sesnum; i++)
+			if (swcr_sessions[i] == NULL)
+				break;
+	} else
+		i = 1;		/* NB: to silence compiler warning */
+
+	if (swcr_sessions == NULL || i == swcr_sesnum) {
+		if (swcr_sessions == NULL) {
+			i = 1; /* We leave swcr_sessions[0] empty */
+			swcr_sesnum = CRYPTO_SW_SESSIONS;
+		} else
+			swcr_sesnum *= 2;
+
+		swd = kmalloc(swcr_sesnum * sizeof(struct swcr_data *), SLAB_ATOMIC);
+		if (swd == NULL) {
+			/* Reset session number */
+			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
+				swcr_sesnum = 0;
+			else
+				swcr_sesnum /= 2;
+			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+			return ENOBUFS;
+		}
+		memset(swd, 0, swcr_sesnum * sizeof(struct swcr_data *));
+
+		/* Copy existing sessions */
+		if (swcr_sessions) {
+			memcpy(swd, swcr_sessions,
+			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
+			kfree(swcr_sessions);
+		}
+
+		swcr_sessions = swd;
+	}
+
+	swd = &swcr_sessions[i];
+	*sid = i;
+
+	while (cri) {
+		*swd = (struct swcr_data *) kmalloc(sizeof(struct swcr_data),
+				SLAB_ATOMIC);
+		if (*swd == NULL) {
+			swcr_freesession(NULL, i);
+			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+			return ENOBUFS;
+		}
+		memset(*swd, 0, sizeof(struct swcr_data));
+
+		if (cri->cri_alg < 0 ||
+				cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
+			printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
+			swcr_freesession(NULL, i);
+			return EINVAL;
+		}
+
+		algo = crypto_details[cri->cri_alg].alg_name;
+		if (!algo || !*algo) {
+			printk("cryptosoft: Unsupported algorithm 0x%x\n", cri->cri_alg);
+			swcr_freesession(NULL, i);
+			return EINVAL;
+		}
+
+		mode = crypto_details[cri->cri_alg].mode;
+		(*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
+		(*swd)->sw_alg = cri->cri_alg;
+
+		spin_lock_init(&(*swd)->sw_tfm_lock);
+
+		/* Algorithm specific configuration */
+		switch (cri->cri_alg) {
+		case CRYPTO_NULL_CBC:
+			cri->cri_klen = 0; /* make it work with crypto API */
+			break;
+		default:
+			break;
+		}
+
+		if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
+			dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
+					algo, mode);
+
+			/* try async first */
+			(*swd)->sw_tfm = swcr_no_ablk ? NULL :
+					crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
+			if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm)) {
+				dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
+				(*swd)->sw_type |= SW_TYPE_ASYNC;
+			} else {
+				(*swd)->sw_tfm = crypto_blkcipher_tfm(
+						crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
+				if ((*swd)->sw_tfm && !IS_ERR((*swd)->sw_tfm))
+					dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
+			}
+			if (!(*swd)->sw_tfm || IS_ERR((*swd)->sw_tfm)) {
+				int err;
+				dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
+						algo,mode);
+				err = IS_ERR((*swd)->sw_tfm) ? -(PTR_ERR((*swd)->sw_tfm)) : EINVAL;
+				(*swd)->sw_tfm = NULL; /* ensure NULL */
+				swcr_freesession(NULL, i);
+				return err;
+			}
+
+			if (debug) {
+				dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
+						__FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
+				for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
+					dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
+							cri->cri_key[i] & 0xff);
+				dprintk("\n");
+			}
+			if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+				/* OCF doesn't enforce keys */
+				crypto_ablkcipher_set_flags(
+						__crypto_ablkcipher_cast((*swd)->sw_tfm),
+							CRYPTO_TFM_REQ_WEAK_KEY);
+				error = crypto_ablkcipher_setkey(
+							__crypto_ablkcipher_cast((*swd)->sw_tfm),
+								cri->cri_key, (cri->cri_klen + 7) / 8);
+			} else {
+				/* OCF doesn't enforce keys */
+				crypto_blkcipher_set_flags(
+						crypto_blkcipher_cast((*swd)->sw_tfm),
+							CRYPTO_TFM_REQ_WEAK_KEY);
+				error = crypto_blkcipher_setkey(
+							crypto_blkcipher_cast((*swd)->sw_tfm),
+								cri->cri_key, (cri->cri_klen + 7) / 8);
+			}
+			if (error) {
+				printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
+						(*swd)->sw_tfm->crt_flags);
+				swcr_freesession(NULL, i);
+				return error;
+			}
+		} else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
+			dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
+					algo, mode);
+
+			/* try async first */
+			(*swd)->sw_tfm = swcr_no_ahash ? NULL :
+					crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
+			if ((*swd)->sw_tfm) {
+				dprintk("%s %s hash is async\n", __FUNCTION__, algo);
+				(*swd)->sw_type |= SW_TYPE_ASYNC;
+			} else {
+				dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
+				(*swd)->sw_tfm = crypto_hash_tfm(
+						crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
+			}
+
+			if (!(*swd)->sw_tfm) {
+				dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
+						algo, mode);
+				swcr_freesession(NULL, i);
+				return EINVAL;
+			}
+
+			(*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
+			(*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
+					SLAB_ATOMIC);
+			if ((*swd)->u.hmac.sw_key == NULL) {
+				swcr_freesession(NULL, i);
+				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+				return ENOBUFS;
+			}
+			memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
+			if (cri->cri_mlen) {
+				(*swd)->u.hmac.sw_mlen = cri->cri_mlen;
+			} else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+				(*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
+						__crypto_ahash_cast((*swd)->sw_tfm));
+			} else  {
+				(*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
+						crypto_hash_cast((*swd)->sw_tfm));
+			}
+		} else if ((*swd)->sw_type & SW_TYPE_COMP) {
+			(*swd)->sw_tfm = crypto_comp_tfm(
+					crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
+			if (!(*swd)->sw_tfm) {
+				dprintk("cryptosoft: crypto_alloc_comp failed(%s,0x%x)\n",
+						algo, mode);
+				swcr_freesession(NULL, i);
+				return EINVAL;
+			}
+			(*swd)->u.sw_comp_buf = kmalloc(CRYPTO_MAX_DATA_LEN, SLAB_ATOMIC);
+			if ((*swd)->u.sw_comp_buf == NULL) {
+				swcr_freesession(NULL, i);
+				dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+				return ENOBUFS;
+			}
+		} else {
+			printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
+			swcr_freesession(NULL, i);
+			return EINVAL;
+		}
+
+		cri = cri->cri_next;
+		swd = &((*swd)->sw_next);
+	}
+	return 0;
+}
+
+/*
+ * Free a session.
+ */
+static int
+swcr_freesession(device_t dev, u_int64_t tid)
+{
+	struct swcr_data *swd;
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid > swcr_sesnum || swcr_sessions == NULL ||
+			swcr_sessions[sid] == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return(EINVAL);
+	}
+
+	/* Silently accept and return */
+	if (sid == 0)
+		return(0);
+
+	while ((swd = swcr_sessions[sid]) != NULL) {
+		swcr_sessions[sid] = swd->sw_next;
+		if (swd->sw_tfm) {
+			switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
+#ifdef HAVE_AHASH
+			case SW_TYPE_AHMAC:
+			case SW_TYPE_AHASH:
+				crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
+				break;
+#endif
+#ifdef HAVE_ABLKCIPHER
+			case SW_TYPE_ABLKCIPHER:
+				crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
+				break;
+#endif
+			case SW_TYPE_BLKCIPHER:
+				crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
+				break;
+			case SW_TYPE_HMAC:
+			case SW_TYPE_HASH:
+				crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
+				break;
+			case SW_TYPE_COMP:
+				if (in_interrupt())
+					execute_later((void (*)(void *))crypto_free_comp, (void *)crypto_comp_cast(swd->sw_tfm));
+				else
+					crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
+				break;
+			default:
+				crypto_free_tfm(swd->sw_tfm);
+				break;
+			}
+			swd->sw_tfm = NULL;
+		}
+		if (swd->sw_type & SW_TYPE_COMP) {
+			if (swd->u.sw_comp_buf)
+				kfree(swd->u.sw_comp_buf);
+		} else {
+			if (swd->u.hmac.sw_key)
+				kfree(swd->u.hmac.sw_key);
+		}
+		kfree(swd);
+	}
+	return 0;
+}
+
+static void swcr_process_req_complete(struct swcr_req *req)
+{
+	dprintk("%s()\n", __FUNCTION__);
+
+	if (req->sw->sw_type & SW_TYPE_INUSE) {
+		unsigned long flags;
+		spin_lock_irqsave(&req->sw->sw_tfm_lock, flags);
+		req->sw->sw_type &= ~SW_TYPE_INUSE;
+		spin_unlock_irqrestore(&req->sw->sw_tfm_lock, flags);
+	}
+
+	if (req->crp->crp_etype)
+		goto done;
+
+	switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
+#if defined(HAVE_AHASH)
+	case SW_TYPE_AHMAC:
+	case SW_TYPE_AHASH:
+		crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
+				req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
+		ahash_request_free(req->crypto_req);
+		break;
+#endif
+#if defined(HAVE_ABLKCIPHER)
+	case SW_TYPE_ABLKCIPHER:
+		ablkcipher_request_free(req->crypto_req);
+		break;
+#endif
+	case SW_TYPE_CIPHER:
+	case SW_TYPE_HMAC:
+	case SW_TYPE_HASH:
+	case SW_TYPE_COMP:
+	case SW_TYPE_BLKCIPHER:
+		break;
+	default:
+		req->crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	req->crd = req->crd->crd_next;
+	if (req->crd) {
+		swcr_process_req(req);
+		return;
+	}
+
+done:
+	dprintk("%s crypto_done %p\n", __FUNCTION__, req);
+	crypto_done(req->crp);
+	kmem_cache_free(swcr_req_cache, req);
+}
+
+#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
+static void swcr_process_callback(struct crypto_async_request *creq, int err)
+{
+	struct swcr_req *req = creq->data;
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (err) {
+		if (err == -EINPROGRESS)
+			return;
+		dprintk("%s() fail %d\n", __FUNCTION__, -err);
+		req->crp->crp_etype = -err;
+	}
+
+	swcr_process_req_complete(req);
+}
+#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
+
+
+static void swcr_process_req(struct swcr_req *req)
+{
+	struct swcr_data *sw;
+	struct cryptop *crp = req->crp;
+	struct cryptodesc *crd = req->crd;
+	struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+	struct uio *uiop = (struct uio *) crp->crp_buf;
+	int sg_num, sg_len, skip;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	/*
+	 * Find the crypto context.
+	 *
+	 * XXX Note that the logic here prevents us from having
+	 * XXX the same algorithm multiple times in a session
+	 * XXX (or rather, we can but it won't give us the right
+	 * XXX results). To do that, we'd need some way of differentiating
+	 * XXX between the various instances of an algorithm (so we can
+	 * XXX locate the correct crypto context).
+	 */
+	for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
+		;
+
+	/* No such context ? */
+	if (sw == NULL) {
+		crp->crp_etype = EINVAL;
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		goto done;
+	}
+
+	/*
+	 * for some types we need to ensure only one user as info is stored in
+	 * the tfm during an operation that can get corrupted
+	 */
+	switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
+#ifdef HAVE_AHASH
+	case SW_TYPE_AHMAC:
+	case SW_TYPE_AHASH:
+#endif
+	case SW_TYPE_HMAC:
+	case SW_TYPE_HASH: {
+		unsigned long flags;
+		spin_lock_irqsave(&sw->sw_tfm_lock, flags);
+		if (sw->sw_type & SW_TYPE_INUSE) {
+			spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
+			execute_later((void (*)(void *))swcr_process_req, (void *)req);
+			return;
+		}
+		sw->sw_type |= SW_TYPE_INUSE;
+		spin_unlock_irqrestore(&sw->sw_tfm_lock, flags);
+		} break;
+	}
+
+	req->sw = sw;
+	skip = crd->crd_skip;
+
+	/*
+	 * setup the SG list skip from the start of the buffer
+	 */
+	memset(req->sg, 0, sizeof(req->sg));
+	sg_init_table(req->sg, SCATTERLIST_MAX);
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		int i, len;
+
+		sg_num = 0;
+		sg_len = 0;
+
+		if (skip < skb_headlen(skb)) {
+			len = skb_headlen(skb) - skip;
+			if (len + sg_len > crd->crd_len)
+				len = crd->crd_len - sg_len;
+			sg_set_page(&req->sg[sg_num],
+				virt_to_page(skb->data + skip), len,
+				offset_in_page(skb->data + skip));
+			sg_len += len;
+			sg_num++;
+			skip = 0;
+		} else
+			skip -= skb_headlen(skb);
+
+		for (i = 0; sg_len < crd->crd_len &&
+					i < skb_shinfo(skb)->nr_frags &&
+					sg_num < SCATTERLIST_MAX; i++) {
+			if (skip < skb_shinfo(skb)->frags[i].size) {
+				len = skb_shinfo(skb)->frags[i].size - skip;
+				if (len + sg_len > crd->crd_len)
+					len = crd->crd_len - sg_len;
+				sg_set_page(&req->sg[sg_num],
+					skb_frag_page(&skb_shinfo(skb)->frags[i]),
+					len,
+					skb_shinfo(skb)->frags[i].page_offset + skip);
+				sg_len += len;
+				sg_num++;
+				skip = 0;
+			} else
+				skip -= skb_shinfo(skb)->frags[i].size;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		int len;
+
+		sg_len = 0;
+		for (sg_num = 0; sg_len < crd->crd_len &&
+				sg_num < uiop->uio_iovcnt &&
+				sg_num < SCATTERLIST_MAX; sg_num++) {
+			if (skip <= uiop->uio_iov[sg_num].iov_len) {
+				len = uiop->uio_iov[sg_num].iov_len - skip;
+				if (len + sg_len > crd->crd_len)
+					len = crd->crd_len - sg_len;
+				sg_set_page(&req->sg[sg_num],
+					virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
+					len,
+					offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+				sg_len += len;
+				skip = 0;
+			} else
+				skip -= uiop->uio_iov[sg_num].iov_len;
+		}
+	} else {
+		sg_len = (crp->crp_ilen - skip);
+		if (sg_len > crd->crd_len)
+			sg_len = crd->crd_len;
+		sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
+			sg_len, offset_in_page(crp->crp_buf + skip));
+		sg_num = 1;
+	}
+	if (sg_num > 0)
+		sg_mark_end(&req->sg[sg_num-1]);
+
+	switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
+
+#ifdef HAVE_AHASH
+	case SW_TYPE_AHMAC:
+	case SW_TYPE_AHASH:
+		{
+		int ret;
+
+		/* check we have room for the result */
+		if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+			dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+					"digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+					crd->crd_inject, sw->u.hmac.sw_mlen);
+			crp->crp_etype = EINVAL;
+			goto done;
+		}
+
+		req->crypto_req =
+				ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_ATOMIC);
+		if (!req->crypto_req) {
+			crp->crp_etype = ENOMEM;
+			dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
+			goto done;
+		}
+
+		ahash_request_set_callback(req->crypto_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
+
+		memset(req->result, 0, sizeof(req->result));
+
+		if (sw->sw_type & SW_TYPE_AHMAC)
+			crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
+					sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
+		ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
+		ret = crypto_ahash_digest(req->crypto_req);
+		switch (ret) {
+		case -EINPROGRESS:
+		case -EBUSY:
+			return;
+		default:
+		case 0:
+			dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
+			crp->crp_etype = ret;
+			goto done;
+		}
+		} break;
+#endif /* HAVE_AHASH */
+
+#ifdef HAVE_ABLKCIPHER
+	case SW_TYPE_ABLKCIPHER: {
+		int ret;
+		unsigned char *ivp = req->iv;
+		int ivsize =
+			crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
+
+		if (sg_len < crypto_ablkcipher_blocksize(
+				__crypto_ablkcipher_cast(sw->sw_tfm))) {
+			crp->crp_etype = EINVAL;
+			dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+					sg_len, crypto_ablkcipher_blocksize(
+						__crypto_ablkcipher_cast(sw->sw_tfm)));
+			goto done;
+		}
+
+		if (ivsize > sizeof(req->iv)) {
+			crp->crp_etype = EINVAL;
+			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+			goto done;
+		}
+
+		req->crypto_req = ablkcipher_request_alloc(
+				__crypto_ablkcipher_cast(sw->sw_tfm), GFP_ATOMIC);
+		if (!req->crypto_req) {
+			crp->crp_etype = ENOMEM;
+			dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
+					__FILE__, __LINE__);
+			goto done;
+		}
+
+		ablkcipher_request_set_callback(req->crypto_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
+
+		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			int i, error;
+
+			if (debug) {
+				dprintk("%s key:", __FUNCTION__);
+				for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+					dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
+							crd->crd_key[i] & 0xff);
+				dprintk("\n");
+			}
+			/* OCF doesn't enforce keys */
+			crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
+					CRYPTO_TFM_REQ_WEAK_KEY);
+			error = crypto_ablkcipher_setkey(
+						__crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
+						(crd->crd_klen + 7) / 8);
+			if (error) {
+				dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+						error, sw->sw_tfm->crt_flags);
+				crp->crp_etype = -error;
+			}
+		}
+
+		if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+			if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+				ivp = crd->crd_iv;
+			else
+				get_random_bytes(ivp, ivsize);
+			/*
+			 * do we have to copy the IV back to the buffer ?
+			 */
+			if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+						crd->crd_inject, ivsize, (caddr_t)ivp);
+			}
+			ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+					sg_len, ivp);
+			ret = crypto_ablkcipher_encrypt(req->crypto_req);
+
+		} else { /*decrypt */
+
+			if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+				ivp = crd->crd_iv;
+			else
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+						crd->crd_inject, ivsize, (caddr_t)ivp);
+			ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+					sg_len, ivp);
+			ret = crypto_ablkcipher_decrypt(req->crypto_req);
+		}
+
+		switch (ret) {
+		case -EINPROGRESS:
+		case -EBUSY:
+			return;
+		default:
+		case 0:
+			dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
+			crp->crp_etype = ret;
+			goto done;
+		}
+		} break;
+#endif /* HAVE_ABLKCIPHER */
+
+	case SW_TYPE_BLKCIPHER: {
+		unsigned char iv[EALG_MAX_BLOCK_LEN];
+		unsigned char *ivp = iv;
+		struct blkcipher_desc desc;
+		int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
+
+		if (sg_len < crypto_blkcipher_blocksize(
+				crypto_blkcipher_cast(sw->sw_tfm))) {
+			crp->crp_etype = EINVAL;
+			dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+					sg_len, crypto_blkcipher_blocksize(
+						crypto_blkcipher_cast(sw->sw_tfm)));
+			goto done;
+		}
+
+		if (ivsize > sizeof(iv)) {
+			crp->crp_etype = EINVAL;
+			dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+			goto done;
+		}
+
+		if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			int i, error;
+
+			if (debug) {
+				dprintk("%s key:", __FUNCTION__);
+				for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+					dprintk("%s0x%x", (i % 8) ? " " : "\n    ",
+							crd->crd_key[i] & 0xff);
+				dprintk("\n");
+			}
+			/* OCF doesn't enforce keys */
+			crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
+					CRYPTO_TFM_REQ_WEAK_KEY);
+			error = crypto_blkcipher_setkey(
+						crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
+						(crd->crd_klen + 7) / 8);
+			if (error) {
+				dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+						error, sw->sw_tfm->crt_flags);
+				crp->crp_etype = -error;
+			}
+		}
+
+		memset(&desc, 0, sizeof(desc));
+		desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
+
+		if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+
+			if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+				ivp = crd->crd_iv;
+			} else {
+				get_random_bytes(ivp, ivsize);
+			}
+			/*
+			 * do we have to copy the IV back to the buffer ?
+			 */
+			if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+						crd->crd_inject, ivsize, (caddr_t)ivp);
+			}
+			desc.info = ivp;
+			crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
+
+		} else { /*decrypt */
+
+			if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+				ivp = crd->crd_iv;
+			} else {
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+						crd->crd_inject, ivsize, (caddr_t)ivp);
+			}
+			desc.info = ivp;
+			crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
+		}
+		} break;
+
+	case SW_TYPE_HMAC:
+	case SW_TYPE_HASH:
+		{
+		char result[HASH_MAX_LEN];
+		struct hash_desc desc;
+
+		/* check we have room for the result */
+		if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+			dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+					"digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+					crd->crd_inject, sw->u.hmac.sw_mlen);
+			crp->crp_etype = EINVAL;
+			goto done;
+		}
+
+		memset(&desc, 0, sizeof(desc));
+		desc.tfm = crypto_hash_cast(sw->sw_tfm);
+
+		memset(result, 0, sizeof(result));
+
+		if (sw->sw_type & SW_TYPE_HMAC) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+			crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
+					req->sg, sg_num, result);
+#else
+			crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
+					sw->u.hmac.sw_klen);
+			crypto_hash_digest(&desc, req->sg, sg_len, result);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
+
+		} else { /* SW_TYPE_HASH */
+			crypto_hash_digest(&desc, req->sg, sg_len, result);
+		}
+
+		crypto_copyback(crp->crp_flags, crp->crp_buf,
+				crd->crd_inject, sw->u.hmac.sw_mlen, result);
+		}
+		break;
+
+	case SW_TYPE_COMP: {
+		void *ibuf = NULL;
+		void *obuf = sw->u.sw_comp_buf;
+		int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
+		int ret = 0;
+
+		/*
+		 * we need to use an additional copy if there is more than one
+		 * input chunk since the kernel comp routines do not handle
+		 * SG yet.  Otherwise we just use the input buffer as is.
+		 * Rather than allocate another buffer we just split the tmp
+		 * buffer we already have.
+		 * Perhaps we should just use zlib directly ?
+		 */
+		if (sg_num > 1) {
+			int blk;
+
+			ibuf = obuf;
+			for (blk = 0; blk < sg_num; blk++) {
+				memcpy(obuf, sg_virt(&req->sg[blk]),
+						req->sg[blk].length);
+				obuf += req->sg[blk].length;
+			}
+			olen -= sg_len;
+		} else
+			ibuf = sg_virt(&req->sg[0]);
+
+		if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
+			ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
+					ibuf, ilen, obuf, &olen);
+			if (!ret && olen > crd->crd_len) {
+				dprintk("cryptosoft: ERANGE compress %d into %d\n",
+						crd->crd_len, olen);
+				if (swcr_fail_if_compression_grows)
+					ret = ERANGE;
+			}
+		} else { /* decompress */
+			ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
+					ibuf, ilen, obuf, &olen);
+			if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
+				dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
+						"space for %d,at offset %d\n",
+						crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
+				ret = ETOOSMALL;
+			}
+		}
+		if (ret)
+			dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
+
+		/*
+		 * on success copy result back,
+		 * linux crpyto API returns -errno,  we need to fix that
+		 */
+		crp->crp_etype = ret < 0 ? -ret : ret;
+		if (ret == 0) {
+			/* copy back the result and return it's size */
+			crypto_copyback(crp->crp_flags, crp->crp_buf,
+					crd->crd_inject, olen, obuf);
+			crp->crp_olen = olen;
+		}
+		} break;
+
+	default:
+		/* Unknown/unsupported algorithm */
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+done:
+	swcr_process_req_complete(req);
+}
+
+
+/*
+ * Process a crypto request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct swcr_req *req = NULL;
+	u_int32_t lid;
+
+	dprintk("%s()\n", __FUNCTION__);
+	/* Sanity check */
+	if (crp == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	crp->crp_etype = 0;
+
+	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	lid = crp->crp_sid & 0xffffffff;
+	if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
+			swcr_sessions[lid] == NULL) {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+
+	/*
+	 * do some error checking outside of the loop for SKB and IOV processing
+	 * this leaves us with valid skb or uiop pointers for later
+	 */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+			printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
+					skb_shinfo(skb)->nr_frags);
+			goto done;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		struct uio *uiop = (struct uio *) crp->crp_buf;
+		if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+			printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
+					uiop->uio_iovcnt);
+			goto done;
+		}
+	}
+
+	/*
+	 * setup a new request ready for queuing
+	 */
+	req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
+	if (req == NULL) {
+		dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
+		crp->crp_etype = ENOMEM;
+		goto done;
+	}
+	memset(req, 0, sizeof(*req));
+
+	req->sw_head = swcr_sessions[lid];
+	req->crp = crp;
+	req->crd = crp->crp_desc;
+
+	swcr_process_req(req);
+	return 0;
+
+done:
+	crypto_done(crp);
+	if (req)
+		kmem_cache_free(swcr_req_cache, req);
+	return 0;
+}
+
+
+static int
+cryptosoft_init(void)
+{
+	int i, sw_type, mode;
+	char *algo;
+
+	dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
+
+	swcr_req_cache = kmem_cache_create("cryptosoft_req",
+				sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+				, NULL
+#endif
+				);
+	if (!swcr_req_cache) {
+		printk("cryptosoft: failed to create request cache\n");
+		return -ENOENT;
+	}
+
+	softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
+
+	swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
+			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
+	if (swcr_id < 0) {
+		printk("cryptosoft: Software crypto device cannot initialize!");
+		return -ENODEV;
+	}
+
+#define	REGISTER(alg) \
+		crypto_register(swcr_id, alg, 0,0)
+
+	for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
+		int found;
+
+		algo = crypto_details[i].alg_name;
+		if (!algo || !*algo) {
+			dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
+			continue;
+		}
+
+		mode = crypto_details[i].mode;
+		sw_type = crypto_details[i].sw_type;
+
+		found = 0;
+		switch (sw_type & SW_TYPE_ALG_MASK) {
+		case SW_TYPE_CIPHER:
+			found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
+			break;
+		case SW_TYPE_HMAC:
+			found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+			break;
+		case SW_TYPE_HASH:
+			found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+			break;
+		case SW_TYPE_COMP:
+			found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
+			break;
+		case SW_TYPE_BLKCIPHER:
+			found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
+			if (!found && !swcr_no_ablk)
+				found = crypto_has_ablkcipher(algo, 0, 0);
+			break;
+		}
+		if (found) {
+			REGISTER(i);
+		} else {
+			dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
+					__FUNCTION__, sw_type, i, algo);
+		}
+	}
+	return 0;
+}
+
+static void
+cryptosoft_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	crypto_unregister_all(swcr_id);
+	swcr_id = -1;
+	kmem_cache_destroy(swcr_req_cache);
+}
+
+late_initcall(cryptosoft_init);
+module_exit(cryptosoft_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Cryptosoft (OCF module for kernel crypto)");
diff --git a/crypto/ocf/ep80579/Makefile b/crypto/ocf/ep80579/Makefile
new file mode 100644
index 000000000000..e488374c0c93
--- /dev/null
+++ b/crypto/ocf/ep80579/Makefile
@@ -0,0 +1,119 @@
+#########################################################################
+#
+#  Targets supported
+#  all     - builds everything and installs
+#  install - identical to all
+#  depend  - build dependencies
+#  clean   - clears derived objects except the .depend files
+#  distclean- clears all derived objects and the .depend file
+#
+# @par
+# This file is provided under a dual BSD/GPLv2 license.  When using or
+#   redistributing this file, you may do so under either license.
+#
+#   GPL LICENSE SUMMARY
+#
+#   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+#
+#   This program is free software; you can redistribute it and/or modify
+#   it under the terms of version 2 of the GNU General Public License as
+#   published by the Free Software Foundation.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY; without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program; if not, write to the Free Software
+#   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#   The full GNU General Public License is included in this distribution
+#   in the file called LICENSE.GPL.
+#
+#   Contact Information:
+#   Intel Corporation
+#
+#   BSD LICENSE
+#
+#   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+#  version: Security.L.1.0.2-229
+############################################################################
+
+
+####################Common variables and definitions########################
+
+ifndef ICP_ROOT
+$(warning ICP_ROOT is undefined. Please set the path to EP80579 release package directory \
+        "-> setenv ICP_ROOT <path>")
+all fastdep:
+	:
+else
+
+ifndef KERNEL_SOURCE_ROOT
+$(error KERNEL_SOURCE_ROOT is undefined. Please set the path to the kernel source directory \
+        "-> setenv KERNEL_SOURCE_ROOT <path>")
+endif
+
+# Ensure The ENV_DIR environmental var is defined.
+ifndef ICP_ENV_DIR
+$(error ICP_ENV_DIR is undefined. Please set the path to EP80579 driver environment.mk file \
+        "-> setenv ICP_ENV_DIR <path>")
+endif
+
+#Add your project environment Makefile
+include ${ICP_ENV_DIR}/environment.mk
+
+#include the makefile with all the default and common Make variable definitions
+include ${ICP_BUILDSYSTEM_PATH}/build_files/common.mk
+
+#Add the name for the executable, Library or Module output definitions
+OUTPUT_NAME= icp_ocf
+
+# List of Source Files to be compiled
+SOURCES= icp_common.c icp_sym.c icp_asym.c icp_ocf_linux.c
+
+#common includes between all supported OSes
+INCLUDES= -I ${ICP_API_DIR} -I${ICP_LAC_API} \
+-I${ICP_OCF_SRC_DIR}
+
+# The location of the os level makefile needs to be changed.
+include ${ICP_ENV_DIR}/${ICP_OS}_${ICP_OS_LEVEL}.mk
+
+# On the line directly below list the outputs you wish to build for,
+# e.g "lib_static lib_shared exe module" as shown below
+install: module
+
+###################Include rules makefiles########################
+include ${ICP_BUILDSYSTEM_PATH}/build_files/rules.mk
+###################End of Rules inclusion#########################
+
+endif
diff --git a/crypto/ocf/ep80579/environment.mk b/crypto/ocf/ep80579/environment.mk
new file mode 100644
index 000000000000..177721f1a5fd
--- /dev/null
+++ b/crypto/ocf/ep80579/environment.mk
@@ -0,0 +1,77 @@
+ ###########################################################################
+ #
+# This file is provided under a dual BSD/GPLv2 license.  When using or
+#   redistributing this file, you may do so under either license.
+#
+#   GPL LICENSE SUMMARY
+#
+#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#
+#   This program is free software; you can redistribute it and/or modify
+#   it under the terms of version 2 of the GNU General Public License as
+#   published by the Free Software Foundation.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY; without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program; if not, write to the Free Software
+#   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#   The full GNU General Public License is included in this distribution
+#   in the file called LICENSE.GPL.
+#
+#   Contact Information:
+#   Intel Corporation
+#
+#   BSD LICENSE
+#
+#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+#  version: Security.L.1.0.130
+ #
+ ###########################################################################
+
+
+ICP_LAC_API=$(ICP_ROOT)/Acceleration/include/lac
+ICP_BTR_API=$(ICP_ROOT)/Acceleration/include/btr
+ICP_API_DIR=$(ICP_ROOT)/Acceleration/include
+ICP_OCF_SHIM_DIR?=$(KERNEL_SOURCE_ROOT)/crypto/ocf/
+ifeq ($(wildcard $(ICP_OCF_SHIM_DIR)),)
+ICP_OCF_SHIM_DIR?=$(ROOTDIR)/modules/ocf/
+endif
+
+ICP_OS_LEVEL?=kernel_space
+
+ICP_OS?=linux_2.6
+
+ICP_CORE?=ia
diff --git a/crypto/ocf/ep80579/icp_asym.c b/crypto/ocf/ep80579/icp_asym.c
new file mode 100644
index 000000000000..ebdddc1f371f
--- /dev/null
+++ b/crypto/ocf/ep80579/icp_asym.c
@@ -0,0 +1,1334 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ *  version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+#include "icp_ocf.h"
+
+/*The following define values (containing the word 'INDEX') are used to find
+the index of each input buffer of the crypto_kop struct (see OCF cryptodev.h).
+These values were found through analysis of the OCF OpenSSL patch. If the
+calling program uses different input buffer positions, these defines will have
+to be changed.*/
+
+/*DIFFIE HELLMAN buffer index values*/
+#define ICP_DH_KRP_PARAM_PRIME_INDEX                            (0)
+#define ICP_DH_KRP_PARAM_BASE_INDEX                             (1)
+#define ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX                    (2)
+#define ICP_DH_KRP_PARAM_RESULT_INDEX                           (3)
+
+/*MOD EXP buffer index values*/
+#define ICP_MOD_EXP_KRP_PARAM_BASE_INDEX                        (0)
+#define ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX                    (1)
+#define ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX                     (2)
+#define ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX                      (3)
+
+/*MOD EXP CRT buffer index values*/
+#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX                 (0)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX                 (1)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX                       (2)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX             (3)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX             (4)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX              (5)
+#define ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX                  (6)
+
+/*DSA sign buffer index values*/
+#define ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX                       (0)
+#define ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX                    (1)
+#define ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX                    (2)
+#define ICP_DSA_SIGN_KRP_PARAM_G_INDEX                          (3)
+#define ICP_DSA_SIGN_KRP_PARAM_X_INDEX                          (4)
+#define ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX                   (5)
+#define ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX                   (6)
+
+/*DSA verify buffer index values*/
+#define ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX                     (0)
+#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX                  (1)
+#define ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX                  (2)
+#define ICP_DSA_VERIFY_KRP_PARAM_G_INDEX                        (3)
+#define ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX                   (4)
+#define ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX                    (5)
+#define ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX                    (6)
+
+/*DSA sign prime Q vs random number K size check values*/
+#define DONT_RUN_LESS_THAN_CHECK                                (0)
+#define FAIL_A_IS_GREATER_THAN_B                                (1)
+#define FAIL_A_IS_EQUAL_TO_B                                    (1)
+#define SUCCESS_A_IS_LESS_THAN_B                                (0)
+#define DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS              (500)
+
+/* We need to set a cryptokp success value just in case it is set or allocated
+   and not set to zero outside of this module */
+#define CRYPTO_OP_SUCCESS                                       (0)
+
+/*Function to compute Diffie Hellman (DH) phase 1 or phase 2 key values*/
+static int icp_ocfDrvDHComputeKey(struct cryptkop *krp);
+
+/*Function to compute a Modular Exponentiation (Mod Exp)*/
+static int icp_ocfDrvModExp(struct cryptkop *krp);
+
+/*Function to compute a Mod Exp using the Chinease Remainder Theorem*/
+static int icp_ocfDrvModExpCRT(struct cryptkop *krp);
+
+/*Helper function to compute whether the first big number argument is less than
+ the second big number argument */
+static int
+icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck);
+
+/*Function to sign an input with DSA R and S keys*/
+static int icp_ocfDrvDsaSign(struct cryptkop *krp);
+
+/*Function to Verify a DSA buffer signature*/
+static int icp_ocfDrvDsaVerify(struct cryptkop *krp);
+
+/*Callback function for DH operation*/
+static void
+icp_ocfDrvDhP1CallBack(void *callbackTag,
+		       CpaStatus status,
+		       void *pOpData, CpaFlatBuffer * pLocalOctetStringPV);
+
+/*Callback function for ME operation*/
+static void
+icp_ocfDrvModExpCallBack(void *callbackTag,
+			 CpaStatus status,
+			 void *pOpData, CpaFlatBuffer * pResult);
+
+/*Callback function for ME CRT operation*/
+static void
+icp_ocfDrvModExpCRTCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData, CpaFlatBuffer * pOutputData);
+
+/*Callback function for DSA sign operation*/
+static void
+icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData,
+			    CpaBoolean protocolStatus,
+			    CpaFlatBuffer * pR, CpaFlatBuffer * pS);
+
+/*Callback function for DSA Verify operation*/
+static void
+icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData, CpaBoolean verifyStatus);
+
+/* Name        : icp_ocfDrvPkeProcess
+ *
+ * Description : This function will choose which PKE process to follow
+ * based on the input arguments
+ */
+int icp_ocfDrvPkeProcess(icp_device_t dev, struct cryptkop *krp, int hint)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+
+	if (NULL == krp) {
+		DPRINTK("%s(): Invalid input parameters, cryptkop = %p\n",
+			__FUNCTION__, krp);
+		return EINVAL;
+	}
+
+	if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+		krp->krp_status = ECANCELED;
+		return ECANCELED;
+	}
+
+	switch (krp->krp_op) {
+	case CRK_DH_COMPUTE_KEY:
+		DPRINTK("%s() doing DH_COMPUTE_KEY\n", __FUNCTION__);
+		lacStatus = icp_ocfDrvDHComputeKey(krp);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): icp_ocfDrvDHComputeKey failed "
+				"(%d).\n", __FUNCTION__, lacStatus);
+			krp->krp_status = ECANCELED;
+			return ECANCELED;
+		}
+
+		break;
+
+	case CRK_MOD_EXP:
+		DPRINTK("%s() doing MOD_EXP \n", __FUNCTION__);
+		lacStatus = icp_ocfDrvModExp(krp);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): icp_ocfDrvModExp failed (%d).\n",
+				__FUNCTION__, lacStatus);
+			krp->krp_status = ECANCELED;
+			return ECANCELED;
+		}
+
+		break;
+
+	case CRK_MOD_EXP_CRT:
+		DPRINTK("%s() doing MOD_EXP_CRT \n", __FUNCTION__);
+		lacStatus = icp_ocfDrvModExpCRT(krp);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): icp_ocfDrvModExpCRT "
+				"failed (%d).\n", __FUNCTION__, lacStatus);
+			krp->krp_status = ECANCELED;
+			return ECANCELED;
+		}
+
+		break;
+
+	case CRK_DSA_SIGN:
+		DPRINTK("%s() doing DSA_SIGN \n", __FUNCTION__);
+		lacStatus = icp_ocfDrvDsaSign(krp);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): icp_ocfDrvDsaSign "
+				"failed (%d).\n", __FUNCTION__, lacStatus);
+			krp->krp_status = ECANCELED;
+			return ECANCELED;
+		}
+
+		break;
+
+	case CRK_DSA_VERIFY:
+		DPRINTK("%s() doing DSA_VERIFY \n", __FUNCTION__);
+		lacStatus = icp_ocfDrvDsaVerify(krp);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): icp_ocfDrvDsaVerify "
+				"failed (%d).\n", __FUNCTION__, lacStatus);
+			krp->krp_status = ECANCELED;
+			return ECANCELED;
+		}
+
+		break;
+
+	default:
+		EPRINTK("%s(): Asymettric function not "
+			"supported (%d).\n", __FUNCTION__, krp->krp_op);
+		krp->krp_status = EOPNOTSUPP;
+		return EOPNOTSUPP;
+	}
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvSwapBytes
+ *
+ * Description : This function is used to swap the byte order of a buffer.
+ * It has been seen that in general we are passed little endian byte order
+ * buffers, but LAC only accepts big endian byte order buffers.
+ */
+static void inline icp_ocfDrvSwapBytes(u_int8_t * num, u_int32_t buff_len_bytes)
+{
+
+	int i;
+	u_int8_t *end_ptr;
+	u_int8_t hold_val;
+
+	end_ptr = num + (buff_len_bytes - 1);
+	buff_len_bytes = buff_len_bytes >> 1;
+	for (i = 0; i < buff_len_bytes; i++) {
+		hold_val = *num;
+		*num = *end_ptr;
+		num++;
+		*end_ptr = hold_val;
+		end_ptr--;
+	}
+}
+
+/* Name        : icp_ocfDrvDHComputeKey
+ *
+ * Description : This function will map Diffie Hellman calls from OCF
+ * to the LAC API. OCF uses this function for Diffie Hellman Phase1 and
+ * Phase2. LAC has a separate Diffie Hellman Phase2 call, however both phases
+ * break down to a modular exponentiation.
+ */
+static int icp_ocfDrvDHComputeKey(struct cryptkop *krp)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	void *callbackTag = NULL;
+	CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
+	CpaFlatBuffer *pLocalOctetStringPV = NULL;
+	uint32_t dh_prime_len_bytes = 0, dh_prime_len_bits = 0;
+
+	/* Input checks - check prime is a multiple of 8 bits to allow for
+	   allocation later */
+	dh_prime_len_bits =
+	    (krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_nbits);
+
+	/* LAC can reject prime lengths based on prime key sizes, we just
+	   need to make sure we can allocate space for the base and
+	   exponent buffers correctly */
+	if ((dh_prime_len_bits % NUM_BITS_IN_BYTE) != 0) {
+		APRINTK("%s(): Warning Prime number buffer size is not a "
+			"multiple of 8 bits\n", __FUNCTION__);
+	}
+
+	/* Result storage space should be the same size as the prime as this
+	   value can take up the same amount of storage space */
+	if (dh_prime_len_bits !=
+	    krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits) {
+		DPRINTK("%s(): Return Buffer must be the same size "
+			"as the Prime buffer\n", __FUNCTION__);
+		krp->krp_status = EINVAL;
+		return EINVAL;
+	}
+	/* Switch to size in bytes */
+	BITS_TO_BYTES(dh_prime_len_bytes, dh_prime_len_bits);
+
+	callbackTag = krp;
+
+/*All allocations are set to ICP_M_NOWAIT due to the possibility of getting
+called in interrupt context*/
+	pPhase1OpData = icp_kmem_cache_zalloc(drvDH_zone, ICP_M_NOWAIT);
+	if (NULL == pPhase1OpData) {
+		APRINTK("%s():Failed to get memory for key gen data\n",
+			__FUNCTION__);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	pLocalOctetStringPV =
+	    icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+	if (NULL == pLocalOctetStringPV) {
+		APRINTK("%s():Failed to get memory for pLocalOctetStringPV\n",
+			__FUNCTION__);
+		ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	/* Link parameters */
+	pPhase1OpData->primeP.pData =
+	    krp->krp_param[ICP_DH_KRP_PARAM_PRIME_INDEX].crp_p;
+
+	pPhase1OpData->primeP.dataLenInBytes = dh_prime_len_bytes;
+
+	icp_ocfDrvSwapBytes(pPhase1OpData->primeP.pData, dh_prime_len_bytes);
+
+	pPhase1OpData->baseG.pData =
+	    krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_p;
+
+	BITS_TO_BYTES(pPhase1OpData->baseG.dataLenInBytes,
+		      krp->krp_param[ICP_DH_KRP_PARAM_BASE_INDEX].crp_nbits);
+
+	icp_ocfDrvSwapBytes(pPhase1OpData->baseG.pData,
+			    pPhase1OpData->baseG.dataLenInBytes);
+
+	pPhase1OpData->privateValueX.pData =
+	    krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].crp_p;
+
+	BITS_TO_BYTES(pPhase1OpData->privateValueX.dataLenInBytes,
+		      krp->krp_param[ICP_DH_KRP_PARAM_PRIVATE_VALUE_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(pPhase1OpData->privateValueX.pData,
+			    pPhase1OpData->privateValueX.dataLenInBytes);
+
+	/* Output parameters */
+	pLocalOctetStringPV->pData =
+	    krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_p;
+
+	BITS_TO_BYTES(pLocalOctetStringPV->dataLenInBytes,
+		      krp->krp_param[ICP_DH_KRP_PARAM_RESULT_INDEX].crp_nbits);
+
+	lacStatus = cpaCyDhKeyGenPhase1(CPA_INSTANCE_HANDLE_SINGLE,
+					icp_ocfDrvDhP1CallBack,
+					callbackTag, pPhase1OpData,
+					pLocalOctetStringPV);
+
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): DH Phase 1 Key Gen failed (%d).\n",
+			__FUNCTION__, lacStatus);
+		icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
+		ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+	}
+
+	return lacStatus;
+}
+
+/* Name        : icp_ocfDrvModExp
+ *
+ * Description : This function will map ordinary Modular Exponentiation calls
+ * from OCF to the LAC API.
+ *
+ */
+static int icp_ocfDrvModExp(struct cryptkop *krp)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	void *callbackTag = NULL;
+	CpaCyLnModExpOpData *pModExpOpData = NULL;
+	CpaFlatBuffer *pResult = NULL;
+
+	if ((krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits %
+	     NUM_BITS_IN_BYTE) != 0) {
+		DPRINTK("%s(): Warning - modulus buffer size (%d) is not a "
+			"multiple of 8 bits\n", __FUNCTION__,
+			krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
+			crp_nbits);
+	}
+
+	/* Result storage space should be the same size as the prime as this
+	   value can take up the same amount of storage space */
+	if (krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_nbits >
+	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_nbits) {
+		APRINTK("%s(): Return Buffer size must be the same or"
+			" greater than the Modulus buffer\n", __FUNCTION__);
+		krp->krp_status = EINVAL;
+		return EINVAL;
+	}
+
+	callbackTag = krp;
+
+	pModExpOpData = icp_kmem_cache_zalloc(drvLnModExp_zone, ICP_M_NOWAIT);
+	if (NULL == pModExpOpData) {
+		APRINTK("%s():Failed to get memory for key gen data\n",
+			__FUNCTION__);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	pResult = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+	if (NULL == pResult) {
+		APRINTK("%s():Failed to get memory for ModExp result\n",
+			__FUNCTION__);
+		ICP_CACHE_FREE(drvLnModExp_zone, pModExpOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	/* Link parameters */
+	pModExpOpData->modulus.pData =
+	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].crp_p;
+	BITS_TO_BYTES(pModExpOpData->modulus.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_MODULUS_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(pModExpOpData->modulus.pData,
+			    pModExpOpData->modulus.dataLenInBytes);
+
+	DPRINTK("%s : base (%d)\n", __FUNCTION__, krp->
+		krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_nbits);
+	pModExpOpData->base.pData =
+	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].crp_p;
+	BITS_TO_BYTES(pModExpOpData->base.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(pModExpOpData->base.pData,
+			    pModExpOpData->base.dataLenInBytes);
+
+	pModExpOpData->exponent.pData =
+	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].crp_p;
+	BITS_TO_BYTES(pModExpOpData->exponent.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_KRP_PARAM_EXPONENT_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(pModExpOpData->exponent.pData,
+			    pModExpOpData->exponent.dataLenInBytes);
+	/* Output parameters */
+	pResult->pData =
+	    krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].crp_p,
+	    BITS_TO_BYTES(pResult->dataLenInBytes,
+			  krp->krp_param[ICP_MOD_EXP_KRP_PARAM_RESULT_INDEX].
+			  crp_nbits);
+
+	lacStatus = cpaCyLnModExp(CPA_INSTANCE_HANDLE_SINGLE,
+				  icp_ocfDrvModExpCallBack,
+				  callbackTag, pModExpOpData, pResult);
+
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): Mod Exp Operation failed (%d).\n",
+			__FUNCTION__, lacStatus);
+		krp->krp_status = ECANCELED;
+		icp_ocfDrvFreeFlatBuffer(pResult);
+		ICP_CACHE_FREE(drvLnModExp_zone, pModExpOpData);
+	}
+
+	return lacStatus;
+}
+
+/* Name        : icp_ocfDrvModExpCRT
+ *
+ * Description : This function will map ordinary Modular Exponentiation Chinese
+ * Remainder Theorem implementaion calls from OCF to the LAC API.
+ *
+ * Note : Mod Exp CRT for this driver is accelerated through LAC RSA type 2
+ * decrypt operation. Therefore P and Q input values must always be prime
+ * numbers. Although basic primality checks are done in LAC, it is up to the
+ * user to do any correct prime number checking before passing the inputs.
+ */
+static int icp_ocfDrvModExpCRT(struct cryptkop *krp)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	CpaCyRsaDecryptOpData *rsaDecryptOpData = NULL;
+	void *callbackTag = NULL;
+	CpaFlatBuffer *pOutputData = NULL;
+
+	/*Parameter input checks are all done by LAC, no need to repeat
+	   them here. */
+	callbackTag = krp;
+
+	rsaDecryptOpData =
+	    icp_kmem_cache_zalloc(drvRSADecrypt_zone, ICP_M_NOWAIT);
+	if (NULL == rsaDecryptOpData) {
+		APRINTK("%s():Failed to get memory"
+			" for MOD EXP CRT Op data struct\n", __FUNCTION__);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	rsaDecryptOpData->pRecipientPrivateKey
+	    = icp_kmem_cache_zalloc(drvRSAPrivateKey_zone, ICP_M_NOWAIT);
+	if (NULL == rsaDecryptOpData->pRecipientPrivateKey) {
+		APRINTK("%s():Failed to get memory for MOD EXP CRT"
+			" private key values struct\n", __FUNCTION__);
+		ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    version = CPA_CY_RSA_VERSION_TWO_PRIME;
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
+
+	pOutputData = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+	if (NULL == pOutputData) {
+		APRINTK("%s():Failed to get memory"
+			" for MOD EXP CRT output data\n", __FUNCTION__);
+		ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+			       rsaDecryptOpData->pRecipientPrivateKey);
+		ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    version = CPA_CY_RSA_VERSION_TWO_PRIME;
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    privateKeyRepType = CPA_CY_RSA_PRIVATE_KEY_REP_TYPE_2;
+
+	/* Link parameters */
+	rsaDecryptOpData->inputData.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->inputData.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_I_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->inputData.pData,
+			    rsaDecryptOpData->inputData.dataLenInBytes);
+
+	rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime1P.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+		      prime1P.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_P_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.prime1P.pData,
+			    rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.prime1P.dataLenInBytes);
+
+	rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.prime2Q.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+		      prime2Q.dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_PRIME_Q_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.prime2Q.pData,
+			    rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.prime2Q.dataLenInBytes);
+
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    privateKeyRep2.exponent1Dp.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->privateKeyRep2.
+		      exponent1Dp.dataLenInBytes,
+		      krp->
+		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DP_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.exponent1Dp.pData,
+			    rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.exponent1Dp.dataLenInBytes);
+
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    privateKeyRep2.exponent2Dq.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
+		      privateKeyRep2.exponent2Dq.dataLenInBytes,
+		      krp->
+		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_EXPONENT_DQ_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.exponent2Dq.pData,
+			    rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.exponent2Dq.dataLenInBytes);
+
+	rsaDecryptOpData->pRecipientPrivateKey->
+	    privateKeyRep2.coefficientQInv.pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].crp_p;
+	BITS_TO_BYTES(rsaDecryptOpData->pRecipientPrivateKey->
+		      privateKeyRep2.coefficientQInv.dataLenInBytes,
+		      krp->
+		      krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_COEFF_QINV_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.coefficientQInv.pData,
+			    rsaDecryptOpData->pRecipientPrivateKey->
+			    privateKeyRep2.coefficientQInv.dataLenInBytes);
+
+	/* Output Parameter */
+	pOutputData->pData =
+	    krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].crp_p;
+	BITS_TO_BYTES(pOutputData->dataLenInBytes,
+		      krp->krp_param[ICP_MOD_EXP_CRT_KRP_PARAM_RESULT_INDEX].
+		      crp_nbits);
+
+	lacStatus = cpaCyRsaDecrypt(CPA_INSTANCE_HANDLE_SINGLE,
+				    icp_ocfDrvModExpCRTCallBack,
+				    callbackTag, rsaDecryptOpData, pOutputData);
+
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): Mod Exp CRT Operation failed (%d).\n",
+			__FUNCTION__, lacStatus);
+		krp->krp_status = ECANCELED;
+		icp_ocfDrvFreeFlatBuffer(pOutputData);
+		ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+			       rsaDecryptOpData->pRecipientPrivateKey);
+		ICP_CACHE_FREE(drvRSADecrypt_zone, rsaDecryptOpData);
+	}
+
+	return lacStatus;
+}
+
+/* Name        : icp_ocfDrvCheckALessThanB
+ *
+ * Description : This function will check whether the first argument is less
+ * than the second. It is used to check whether the DSA RS sign Random K
+ * value is less than the Prime Q value (as defined in the specification)
+ *
+ */
+static int
+icp_ocfDrvCheckALessThanB(CpaFlatBuffer * pK, CpaFlatBuffer * pQ, int *doCheck)
+{
+
+	uint8_t *MSB_K = pK->pData;
+	uint8_t *MSB_Q = pQ->pData;
+	uint32_t buffer_lengths_in_bytes = pQ->dataLenInBytes;
+
+	if (DONT_RUN_LESS_THAN_CHECK == *doCheck) {
+		return FAIL_A_IS_GREATER_THAN_B;
+	}
+
+/*Check MSBs
+if A == B, check next MSB
+if A > B, return A_IS_GREATER_THAN_B
+if A < B, return A_IS_LESS_THAN_B (success)
+*/
+	while (*MSB_K == *MSB_Q) {
+		MSB_K++;
+		MSB_Q++;
+
+		buffer_lengths_in_bytes--;
+		if (0 == buffer_lengths_in_bytes) {
+			DPRINTK("%s() Buffers have equal value!!\n",
+				__FUNCTION__);
+			return FAIL_A_IS_EQUAL_TO_B;
+		}
+
+	}
+
+	if (*MSB_K < *MSB_Q) {
+		return SUCCESS_A_IS_LESS_THAN_B;
+	} else {
+		return FAIL_A_IS_GREATER_THAN_B;
+	}
+
+}
+
+/* Name        : icp_ocfDrvDsaSign
+ *
+ * Description : This function will map DSA RS Sign from OCF to the LAC API.
+ *
+ * NOTE: From looking at OCF patch to OpenSSL and even the number of input
+ * parameters, OCF expects us to generate the random seed value. This value
+ * is generated and passed to LAC, however the number is discared in the
+ * callback and not returned to the user.
+ */
+static int icp_ocfDrvDsaSign(struct cryptkop *krp)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	CpaCyDsaRSSignOpData *dsaRsSignOpData = NULL;
+	void *callbackTag = NULL;
+	CpaCyRandGenOpData randGenOpData;
+	int primeQSizeInBytes = 0;
+	int doCheck = 0;
+	CpaFlatBuffer randData;
+	CpaBoolean protocolStatus = CPA_FALSE;
+	CpaFlatBuffer *pR = NULL;
+	CpaFlatBuffer *pS = NULL;
+
+	callbackTag = krp;
+
+	BITS_TO_BYTES(primeQSizeInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
+		      crp_nbits);
+
+	if (DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES != primeQSizeInBytes) {
+		APRINTK("%s(): DSA PRIME Q size not equal to the "
+			"FIPS defined 20bytes, = %d\n",
+			__FUNCTION__, primeQSizeInBytes);
+		krp->krp_status = EDOM;
+		return EDOM;
+	}
+
+	dsaRsSignOpData =
+	    icp_kmem_cache_zalloc(drvDSARSSign_zone, ICP_M_NOWAIT);
+	if (NULL == dsaRsSignOpData) {
+		APRINTK("%s():Failed to get memory"
+			" for DSA RS Sign Op data struct\n", __FUNCTION__);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	dsaRsSignOpData->K.pData =
+	    icp_kmem_cache_alloc(drvDSARSSignKValue_zone, ICP_M_NOWAIT);
+
+	if (NULL == dsaRsSignOpData->K.pData) {
+		APRINTK("%s():Failed to get memory"
+			" for DSA RS Sign Op Random value\n", __FUNCTION__);
+		ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	pR = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+	if (NULL == pR) {
+		APRINTK("%s():Failed to get memory"
+			" for DSA signature R\n", __FUNCTION__);
+		ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+			       dsaRsSignOpData->K.pData);
+		ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	pS = icp_kmem_cache_zalloc(drvFlatBuffer_zone, ICP_M_NOWAIT);
+	if (NULL == pS) {
+		APRINTK("%s():Failed to get memory"
+			" for DSA signature S\n", __FUNCTION__);
+		icp_ocfDrvFreeFlatBuffer(pR);
+		ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+			       dsaRsSignOpData->K.pData);
+		ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	/*link prime number parameter for ease of processing */
+	dsaRsSignOpData->P.pData =
+	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].crp_p;
+	BITS_TO_BYTES(dsaRsSignOpData->P.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_P_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(dsaRsSignOpData->P.pData,
+			    dsaRsSignOpData->P.dataLenInBytes);
+
+	dsaRsSignOpData->Q.pData =
+	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+	BITS_TO_BYTES(dsaRsSignOpData->Q.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_PRIME_Q_INDEX].
+		      crp_nbits);
+
+	icp_ocfDrvSwapBytes(dsaRsSignOpData->Q.pData,
+			    dsaRsSignOpData->Q.dataLenInBytes);
+
+	/*generate random number with equal buffer size to Prime value Q,
+	   but value less than Q */
+	dsaRsSignOpData->K.dataLenInBytes = dsaRsSignOpData->Q.dataLenInBytes;
+
+	randGenOpData.generateBits = CPA_TRUE;
+	randGenOpData.lenInBytes = dsaRsSignOpData->K.dataLenInBytes;
+
+	icp_ocfDrvPtrAndLenToFlatBuffer(dsaRsSignOpData->K.pData,
+					dsaRsSignOpData->K.dataLenInBytes,
+					&randData);
+
+	doCheck = 0;
+	while (icp_ocfDrvCheckALessThanB(&(dsaRsSignOpData->K),
+					 &(dsaRsSignOpData->Q), &doCheck)) {
+
+		if (CPA_STATUS_SUCCESS
+		    != cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
+				    NULL, NULL, &randGenOpData, &randData)) {
+			APRINTK("%s(): ERROR - Failed to generate DSA RS Sign K"
+				"value\n", __FUNCTION__);
+			icp_ocfDrvFreeFlatBuffer(pS);
+			icp_ocfDrvFreeFlatBuffer(pR);
+			ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+				       dsaRsSignOpData->K.pData);
+			ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+			krp->krp_status = EAGAIN;
+			return EAGAIN;
+		}
+
+		doCheck++;
+		if (DSA_SIGN_RAND_GEN_VAL_CHECK_MAX_ITERATIONS == doCheck) {
+			APRINTK("%s(): ERROR - Failed to find DSA RS Sign K "
+				"value less than Q value\n", __FUNCTION__);
+			icp_ocfDrvFreeFlatBuffer(pS);
+			icp_ocfDrvFreeFlatBuffer(pR);
+			ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+				       dsaRsSignOpData->K.pData);
+			ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+			krp->krp_status = EAGAIN;
+			return EAGAIN;
+		}
+
+	}
+	/*Rand Data - no need to swap bytes for pK */
+
+	/* Link parameters */
+	dsaRsSignOpData->G.pData =
+	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_p;
+	BITS_TO_BYTES(dsaRsSignOpData->G.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_G_INDEX].crp_nbits);
+
+	icp_ocfDrvSwapBytes(dsaRsSignOpData->G.pData,
+			    dsaRsSignOpData->G.dataLenInBytes);
+
+	dsaRsSignOpData->X.pData =
+	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_p;
+	BITS_TO_BYTES(dsaRsSignOpData->X.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_X_INDEX].crp_nbits);
+	icp_ocfDrvSwapBytes(dsaRsSignOpData->X.pData,
+			    dsaRsSignOpData->X.dataLenInBytes);
+
+	/*OpenSSL dgst parameter is left in big endian byte order,
+	   therefore no byte swap is required */
+	dsaRsSignOpData->M.pData =
+	    krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].crp_p;
+	BITS_TO_BYTES(dsaRsSignOpData->M.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_DGST_INDEX].
+		      crp_nbits);
+
+	/* Output Parameters */
+	pS->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].crp_p;
+	BITS_TO_BYTES(pS->dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_S_RESULT_INDEX].
+		      crp_nbits);
+
+	pR->pData = krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].crp_p;
+	BITS_TO_BYTES(pR->dataLenInBytes,
+		      krp->krp_param[ICP_DSA_SIGN_KRP_PARAM_R_RESULT_INDEX].
+		      crp_nbits);
+
+	lacStatus = cpaCyDsaSignRS(CPA_INSTANCE_HANDLE_SINGLE,
+				   icp_ocfDrvDsaRSSignCallBack,
+				   callbackTag, dsaRsSignOpData,
+				   &protocolStatus, pR, pS);
+
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): DSA RS Sign Operation failed (%d).\n",
+			__FUNCTION__, lacStatus);
+		krp->krp_status = ECANCELED;
+		icp_ocfDrvFreeFlatBuffer(pS);
+		icp_ocfDrvFreeFlatBuffer(pR);
+		ICP_CACHE_FREE(drvDSARSSignKValue_zone,
+			       dsaRsSignOpData->K.pData);
+		ICP_CACHE_FREE(drvDSARSSign_zone, dsaRsSignOpData);
+	}
+
+	return lacStatus;
+}
+
+/* Name        : icp_ocfDrvDsaVerify
+ *
+ * Description : This function will map DSA RS Verify from OCF to the LAC API.
+ *
+ */
+static int icp_ocfDrvDsaVerify(struct cryptkop *krp)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	CpaCyDsaVerifyOpData *dsaVerifyOpData = NULL;
+	void *callbackTag = NULL;
+	CpaBoolean verifyStatus = CPA_FALSE;
+
+	callbackTag = krp;
+
+	dsaVerifyOpData =
+	    icp_kmem_cache_zalloc(drvDSAVerify_zone, ICP_M_NOWAIT);
+	if (NULL == dsaVerifyOpData) {
+		APRINTK("%s():Failed to get memory"
+			" for DSA Verify Op data struct\n", __FUNCTION__);
+		krp->krp_status = ENOMEM;
+		return ENOMEM;
+	}
+
+	/* Link parameters */
+	dsaVerifyOpData->P.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->P.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_P_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->P.pData,
+			    dsaVerifyOpData->P.dataLenInBytes);
+
+	dsaVerifyOpData->Q.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->Q.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PRIME_Q_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->Q.pData,
+			    dsaVerifyOpData->Q.dataLenInBytes);
+
+	dsaVerifyOpData->G.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->G.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_G_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->G.pData,
+			    dsaVerifyOpData->G.dataLenInBytes);
+
+	dsaVerifyOpData->Y.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->Y.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_PUBKEY_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->Y.pData,
+			    dsaVerifyOpData->Y.dataLenInBytes);
+
+	/*OpenSSL dgst parameter is left in big endian byte order,
+	   therefore no byte swap is required */
+	dsaVerifyOpData->M.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->M.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_DGST_INDEX].
+		      crp_nbits);
+
+	dsaVerifyOpData->R.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->R.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_R_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->R.pData,
+			    dsaVerifyOpData->R.dataLenInBytes);
+
+	dsaVerifyOpData->S.pData =
+	    krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].crp_p;
+	BITS_TO_BYTES(dsaVerifyOpData->S.dataLenInBytes,
+		      krp->krp_param[ICP_DSA_VERIFY_KRP_PARAM_SIG_S_INDEX].
+		      crp_nbits);
+	icp_ocfDrvSwapBytes(dsaVerifyOpData->S.pData,
+			    dsaVerifyOpData->S.dataLenInBytes);
+
+	lacStatus = cpaCyDsaVerify(CPA_INSTANCE_HANDLE_SINGLE,
+				   icp_ocfDrvDsaVerifyCallBack,
+				   callbackTag, dsaVerifyOpData, &verifyStatus);
+
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): DSA Verify Operation failed (%d).\n",
+			__FUNCTION__, lacStatus);
+		ICP_CACHE_FREE(drvDSAVerify_zone, dsaVerifyOpData);
+		krp->krp_status = ECANCELED;
+	}
+
+	return lacStatus;
+}
+
+/* Name        : icp_ocfDrvDhP1Callback
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DH operation.
+ */
+static void
+icp_ocfDrvDhP1CallBack(void *callbackTag,
+		       CpaStatus status,
+		       void *pOpData, CpaFlatBuffer * pLocalOctetStringPV)
+{
+	struct cryptkop *krp = NULL;
+	CpaCyDhPhase1KeyGenOpData *pPhase1OpData = NULL;
+
+	if (NULL == callbackTag) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"callbackTag data is NULL\n", __FUNCTION__);
+		return;
+	}
+	krp = (struct cryptkop *)callbackTag;
+
+	if (NULL == pOpData) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"Operation Data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+	pPhase1OpData = (CpaCyDhPhase1KeyGenOpData *) pOpData;
+
+	if (NULL == pLocalOctetStringPV) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"pLocalOctetStringPV Data is NULL\n", __FUNCTION__);
+		memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
+		ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+
+	if (CPA_STATUS_SUCCESS == status) {
+		krp->krp_status = CRYPTO_OP_SUCCESS;
+	} else {
+		APRINTK("%s(): Diffie Hellman Phase1 Key Gen failed - "
+			"Operation Status = %d\n", __FUNCTION__, status);
+		krp->krp_status = ECANCELED;
+	}
+
+	icp_ocfDrvSwapBytes(pLocalOctetStringPV->pData,
+			    pLocalOctetStringPV->dataLenInBytes);
+
+	icp_ocfDrvFreeFlatBuffer(pLocalOctetStringPV);
+	memset(pPhase1OpData, 0, sizeof(CpaCyDhPhase1KeyGenOpData));
+	ICP_CACHE_FREE(drvDH_zone, pPhase1OpData);
+
+	crypto_kdone(krp);
+
+	return;
+}
+
+/* Name        : icp_ocfDrvModExpCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the Mod Exp operation.
+ */
+static void
+icp_ocfDrvModExpCallBack(void *callbackTag,
+			 CpaStatus status,
+			 void *pOpdata, CpaFlatBuffer * pResult)
+{
+	struct cryptkop *krp = NULL;
+	CpaCyLnModExpOpData *pLnModExpOpData = NULL;
+
+	if (NULL == callbackTag) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"callbackTag data is NULL\n", __FUNCTION__);
+		return;
+	}
+	krp = (struct cryptkop *)callbackTag;
+
+	if (NULL == pOpdata) {
+		DPRINTK("%s(): Invalid Mod Exp input parameters - "
+			"Operation Data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+	pLnModExpOpData = (CpaCyLnModExpOpData *) pOpdata;
+
+	if (NULL == pResult) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"pResult data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
+		ICP_CACHE_FREE(drvLnModExp_zone, pLnModExpOpData);
+		crypto_kdone(krp);
+		return;
+	}
+
+	if (CPA_STATUS_SUCCESS == status) {
+		krp->krp_status = CRYPTO_OP_SUCCESS;
+	} else {
+		APRINTK("%s(): LAC Mod Exp Operation failed - "
+			"Operation Status = %d\n", __FUNCTION__, status);
+		krp->krp_status = ECANCELED;
+	}
+
+	icp_ocfDrvSwapBytes(pResult->pData, pResult->dataLenInBytes);
+
+	/*switch base size value back to original */
+	if (pLnModExpOpData->base.pData ==
+	    (uint8_t *) & (krp->
+			   krp_param[ICP_MOD_EXP_KRP_PARAM_BASE_INDEX].
+			   crp_nbits)) {
+		*((uint32_t *) pLnModExpOpData->base.pData) =
+		    ntohl(*((uint32_t *) pLnModExpOpData->base.pData));
+	}
+	icp_ocfDrvFreeFlatBuffer(pResult);
+	memset(pLnModExpOpData, 0, sizeof(CpaCyLnModExpOpData));
+	ICP_CACHE_FREE(drvLnModExp_zone, pLnModExpOpData);
+
+	crypto_kdone(krp);
+
+	return;
+
+}
+
+/* Name        : icp_ocfDrvModExpCRTCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the Mod Exp CRT operation.
+ */
+static void
+icp_ocfDrvModExpCRTCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData, CpaFlatBuffer * pOutputData)
+{
+	struct cryptkop *krp = NULL;
+	CpaCyRsaDecryptOpData *pDecryptData = NULL;
+
+	if (NULL == callbackTag) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"callbackTag data is NULL\n", __FUNCTION__);
+		return;
+	}
+
+	krp = (struct cryptkop *)callbackTag;
+
+	if (NULL == pOpData) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"Operation Data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+	pDecryptData = (CpaCyRsaDecryptOpData *) pOpData;
+
+	if (NULL == pOutputData) {
+		DPRINTK("%s(): Invalid input parameter - "
+			"pOutputData is NULL\n", __FUNCTION__);
+		memset(pDecryptData->pRecipientPrivateKey, 0,
+		       sizeof(CpaCyRsaPrivateKey));
+		ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+			       pDecryptData->pRecipientPrivateKey);
+		memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
+		ICP_CACHE_FREE(drvRSADecrypt_zone, pDecryptData);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+
+	if (CPA_STATUS_SUCCESS == status) {
+		krp->krp_status = CRYPTO_OP_SUCCESS;
+	} else {
+		APRINTK("%s(): LAC Mod Exp CRT operation failed - "
+			"Operation Status = %d\n", __FUNCTION__, status);
+		krp->krp_status = ECANCELED;
+	}
+
+	icp_ocfDrvSwapBytes(pOutputData->pData, pOutputData->dataLenInBytes);
+
+	icp_ocfDrvFreeFlatBuffer(pOutputData);
+	memset(pDecryptData->pRecipientPrivateKey, 0,
+	       sizeof(CpaCyRsaPrivateKey));
+	ICP_CACHE_FREE(drvRSAPrivateKey_zone,
+		       pDecryptData->pRecipientPrivateKey);
+	memset(pDecryptData, 0, sizeof(CpaCyRsaDecryptOpData));
+	ICP_CACHE_FREE(drvRSADecrypt_zone, pDecryptData);
+
+	crypto_kdone(krp);
+
+	return;
+}
+
+/* Name        : icp_ocfDrvDsaRSSignCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DSA RS sign operation.
+ */
+static void
+icp_ocfDrvDsaRSSignCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData,
+			    CpaBoolean protocolStatus,
+			    CpaFlatBuffer * pR, CpaFlatBuffer * pS)
+{
+	struct cryptkop *krp = NULL;
+	CpaCyDsaRSSignOpData *pSignData = NULL;
+
+	if (NULL == callbackTag) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"callbackTag data is NULL\n", __FUNCTION__);
+		return;
+	}
+
+	krp = (struct cryptkop *)callbackTag;
+
+	if (NULL == pOpData) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"Operation Data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+	pSignData = (CpaCyDsaRSSignOpData *) pOpData;
+
+	if (NULL == pR) {
+		DPRINTK("%s(): Invalid input parameter - "
+			"pR sign is NULL\n", __FUNCTION__);
+		icp_ocfDrvFreeFlatBuffer(pS);
+		ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+
+	if (NULL == pS) {
+		DPRINTK("%s(): Invalid input parameter - "
+			"pS sign is NULL\n", __FUNCTION__);
+		icp_ocfDrvFreeFlatBuffer(pR);
+		ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+
+	if (CPA_STATUS_SUCCESS != status) {
+		APRINTK("%s(): LAC DSA RS Sign operation failed - "
+			"Operation Status = %d\n", __FUNCTION__, status);
+		krp->krp_status = ECANCELED;
+	} else {
+		krp->krp_status = CRYPTO_OP_SUCCESS;
+
+		if (CPA_TRUE != protocolStatus) {
+			DPRINTK("%s(): LAC DSA RS Sign operation failed due "
+				"to protocol error\n", __FUNCTION__);
+			krp->krp_status = EIO;
+		}
+	}
+
+	/* Swap bytes only when the callback status is successful and
+	   protocolStatus is set to true */
+	if (CPA_STATUS_SUCCESS == status && CPA_TRUE == protocolStatus) {
+		icp_ocfDrvSwapBytes(pR->pData, pR->dataLenInBytes);
+		icp_ocfDrvSwapBytes(pS->pData, pS->dataLenInBytes);
+	}
+
+	icp_ocfDrvFreeFlatBuffer(pR);
+	icp_ocfDrvFreeFlatBuffer(pS);
+	memset(pSignData->K.pData, 0, pSignData->K.dataLenInBytes);
+	ICP_CACHE_FREE(drvDSARSSignKValue_zone, pSignData->K.pData);
+	memset(pSignData, 0, sizeof(CpaCyDsaRSSignOpData));
+	ICP_CACHE_FREE(drvDSARSSign_zone, pSignData);
+	crypto_kdone(krp);
+
+	return;
+}
+
+/* Name        : icp_ocfDrvDsaVerifyCallback
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the DSA Verify operation.
+ */
+static void
+icp_ocfDrvDsaVerifyCallBack(void *callbackTag,
+			    CpaStatus status,
+			    void *pOpData, CpaBoolean verifyStatus)
+{
+
+	struct cryptkop *krp = NULL;
+	CpaCyDsaVerifyOpData *pVerData = NULL;
+
+	if (NULL == callbackTag) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"callbackTag data is NULL\n", __FUNCTION__);
+		return;
+	}
+
+	krp = (struct cryptkop *)callbackTag;
+
+	if (NULL == pOpData) {
+		DPRINTK("%s(): Invalid input parameters - "
+			"Operation Data is NULL\n", __FUNCTION__);
+		krp->krp_status = ECANCELED;
+		crypto_kdone(krp);
+		return;
+	}
+	pVerData = (CpaCyDsaVerifyOpData *) pOpData;
+
+	if (CPA_STATUS_SUCCESS != status) {
+		APRINTK("%s(): LAC DSA Verify operation failed - "
+			"Operation Status = %d\n", __FUNCTION__, status);
+		krp->krp_status = ECANCELED;
+	} else {
+		krp->krp_status = CRYPTO_OP_SUCCESS;
+
+		if (CPA_TRUE != verifyStatus) {
+			DPRINTK("%s(): DSA signature invalid\n", __FUNCTION__);
+			krp->krp_status = EIO;
+		}
+	}
+
+	/* Swap bytes only when the callback status is successful and
+	   verifyStatus is set to true */
+	/*Just swapping back the key values for now. Possibly all
+	   swapped buffers need to be reverted */
+	if (CPA_STATUS_SUCCESS == status && CPA_TRUE == verifyStatus) {
+		icp_ocfDrvSwapBytes(pVerData->R.pData,
+				    pVerData->R.dataLenInBytes);
+		icp_ocfDrvSwapBytes(pVerData->S.pData,
+				    pVerData->S.dataLenInBytes);
+	}
+
+	memset(pVerData, 0, sizeof(CpaCyDsaVerifyOpData));
+	ICP_CACHE_FREE(drvDSAVerify_zone, pVerData);
+	crypto_kdone(krp);
+
+	return;
+}
diff --git a/crypto/ocf/ep80579/icp_common.c b/crypto/ocf/ep80579/icp_common.c
new file mode 100644
index 000000000000..50a402bf9d75
--- /dev/null
+++ b/crypto/ocf/ep80579/icp_common.c
@@ -0,0 +1,773 @@
+/*************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ *  version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+/*
+ * An OCF module that uses IntelÂ® QuickAssist Integrated Accelerator to do the
+ * crypto.
+ *
+ * This driver requires the ICP Access Library that is available from Intel in
+ * order to operate.
+ */
+
+#include "icp_ocf.h"
+
+#define ICP_OCF_COMP_NAME                       "ICP_OCF"
+#define ICP_OCF_VER_MAIN                        (2)
+#define ICP_OCF_VER_MJR                         (1)
+#define ICP_OCF_VER_MNR                         (0)
+
+#define MAX_DEREG_RETRIES                       (100)
+#define DEFAULT_DEREG_RETRIES 			(10)
+#define DEFAULT_DEREG_DELAY_IN_JIFFIES		(10)
+
+/* This defines the maximum number of sessions possible between OCF
+   and the OCF EP80579 Driver. If set to zero, there is no limit. */
+#define DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT    (0)
+#define NUM_SUPPORTED_CAPABILITIES              (21)
+
+/*Slab zone names*/
+#define ICP_SESSION_DATA_NAME   "icp_ocf.SesDat"
+#define ICP_OP_DATA_NAME        "icp_ocf.OpDat"
+#define ICP_DH_NAME             "icp_ocf.DH"
+#define ICP_MODEXP_NAME         "icp_ocf.ModExp"
+#define ICP_RSA_DECRYPT_NAME    "icp_ocf.RSAdec"
+#define ICP_RSA_PKEY_NAME       "icp_ocf.RSApk"
+#define ICP_DSA_SIGN_NAME       "icp_ocf.DSAsg"
+#define ICP_DSA_VER_NAME        "icp_ocf.DSAver"
+#define ICP_RAND_VAL_NAME       "icp_ocf.DSArnd"
+#define ICP_FLAT_BUFF_NAME      "icp_ocf.FB"
+
+/*Slabs zones*/
+icp_kmem_cache drvSessionData_zone = NULL;
+icp_kmem_cache drvOpData_zone = NULL;
+icp_kmem_cache drvDH_zone = NULL;
+icp_kmem_cache drvLnModExp_zone = NULL;
+icp_kmem_cache drvRSADecrypt_zone = NULL;
+icp_kmem_cache drvRSAPrivateKey_zone = NULL;
+icp_kmem_cache drvDSARSSign_zone = NULL;
+icp_kmem_cache drvDSARSSignKValue_zone = NULL;
+icp_kmem_cache drvDSAVerify_zone = NULL;
+
+/*Slab zones for flatbuffers and bufferlist*/
+icp_kmem_cache drvFlatBuffer_zone = NULL;
+
+static inline int icp_cache_null_check(void)
+{
+	return (drvSessionData_zone && drvOpData_zone
+		&& drvDH_zone && drvLnModExp_zone && drvRSADecrypt_zone
+		&& drvRSAPrivateKey_zone && drvDSARSSign_zone
+		&& drvDSARSSign_zone && drvDSARSSignKValue_zone
+		&& drvDSAVerify_zone && drvFlatBuffer_zone);
+}
+
+/*Function to free all allocated slab caches before exiting the module*/
+static void icp_ocfDrvFreeCaches(void);
+
+int32_t icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+
+/* Module parameter - gives the number of times LAC deregistration shall be
+   re-tried */
+int num_dereg_retries = DEFAULT_DEREG_RETRIES;
+
+/* Module parameter - gives the delay time in jiffies before a LAC session
+   shall be attempted to be deregistered again */
+int dereg_retry_delay_in_jiffies = DEFAULT_DEREG_DELAY_IN_JIFFIES;
+
+/* Module parameter - gives the maximum number of sessions possible between
+   OCF and the OCF EP80579 Driver. If set to zero, there is no limit.*/
+int max_sessions = DEFAULT_OCF_TO_DRV_MAX_SESSION_COUNT;
+
+/* This is set when the module is removed from the system, no further
+   processing can take place if this is set */
+icp_atomic_t icp_ocfDrvIsExiting = ICP_ATOMIC_INIT(0);
+
+/* This is used to show how many lac sessions were not deregistered*/
+icp_atomic_t lac_session_failed_dereg_count = ICP_ATOMIC_INIT(0);
+
+/* This is used to track the number of registered sessions between OCF and
+ * and the OCF EP80579 driver, when max_session is set to value other than
+ * zero. This ensures that the max_session set for the OCF and the driver
+ * is equal to the LAC registered sessions */
+icp_atomic_t num_ocf_to_drv_registered_sessions = ICP_ATOMIC_INIT(0);
+
+/* Head of linked list used to store session data */
+icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead;
+icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead_FreeMemList;
+
+icp_spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
+
+/*Below pointer is only used in linux, FreeBSD uses the name to
+create its own variable name*/
+icp_workqueue *icp_ocfDrvFreeLacSessionWorkQ = NULL;
+ICP_WORKQUEUE_DEFINE_THREAD(icp_ocfDrvFreeLacSessionWorkQ);
+
+struct icp_drvBuffListInfo defBuffListInfo;
+
+/* Name        : icp_ocfDrvInit
+ *
+ * Description : This function will register all the symmetric and asymmetric
+ * functionality that will be accelerated by the hardware. It will also
+ * get a unique driver ID from the OCF and initialise all slab caches
+ */
+ICP_MODULE_INIT_FUNC(icp_ocfDrvInit)
+{
+	int ocfStatus = 0;
+
+	IPRINTK("=== %s ver %d.%d.%d ===\n", ICP_OCF_COMP_NAME,
+		ICP_OCF_VER_MAIN, ICP_OCF_VER_MJR, ICP_OCF_VER_MNR);
+
+	if (MAX_DEREG_RETRIES < num_dereg_retries) {
+		EPRINTK("Session deregistration retry count set to greater "
+			"than %d", MAX_DEREG_RETRIES);
+		icp_module_return_code(EINVAL);
+	}
+
+	/* Initialize and Start the Cryptographic component */
+	if (CPA_STATUS_SUCCESS !=
+	    cpaCyStartInstance(CPA_INSTANCE_HANDLE_SINGLE)) {
+		EPRINTK("Failed to initialize and start the instance "
+			"of the Cryptographic component.\n");
+		return icp_module_return_code(EINVAL);
+	}
+
+	icp_spin_lock_init(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	/* Set the default size of BufferList to allocate */
+	memset(&defBuffListInfo, 0, sizeof(struct icp_drvBuffListInfo));
+	if (ICP_OCF_DRV_STATUS_SUCCESS !=
+	    icp_ocfDrvBufferListMemInfo(ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS,
+					&defBuffListInfo)) {
+		EPRINTK("Failed to get bufferlist memory info.\n");
+		return icp_module_return_code(ENOMEM);
+	}
+
+	/*Register OCF EP80579 Driver with OCF */
+	icp_ocfDrvDriverId = ICP_CRYPTO_GET_DRIVERID();
+
+	if (icp_ocfDrvDriverId < 0) {
+		EPRINTK("%s : ICP driver failed to register with OCF!\n",
+			__FUNCTION__);
+		return icp_module_return_code(ENODEV);
+	}
+
+	/*Create all the slab caches used by the OCF EP80579 Driver */
+	drvSessionData_zone =
+	    ICP_CACHE_CREATE(ICP_SESSION_DATA_NAME, struct icp_drvSessionData);
+
+	/*
+	 * Allocation of the OpData includes the allocation space for meta data.
+	 * The memory after the opData structure is reserved for this meta data.
+	 */
+	drvOpData_zone =
+	    icp_kmem_cache_create(ICP_OP_DATA_NAME,
+				  sizeof(struct icp_drvOpData) +
+				  defBuffListInfo.metaSize,
+				  ICP_KERNEL_CACHE_ALIGN,
+				  ICP_KERNEL_CACHE_NOINIT);
+
+	drvDH_zone = ICP_CACHE_CREATE(ICP_DH_NAME, CpaCyDhPhase1KeyGenOpData);
+
+	drvLnModExp_zone =
+	    ICP_CACHE_CREATE(ICP_MODEXP_NAME, CpaCyLnModExpOpData);
+
+	drvRSADecrypt_zone =
+	    ICP_CACHE_CREATE(ICP_RSA_DECRYPT_NAME, CpaCyRsaDecryptOpData);
+
+	drvRSAPrivateKey_zone =
+	    ICP_CACHE_CREATE(ICP_RSA_PKEY_NAME, CpaCyRsaPrivateKey);
+
+	drvDSARSSign_zone =
+	    ICP_CACHE_CREATE(ICP_DSA_SIGN_NAME, CpaCyDsaRSSignOpData);
+
+	/*too awkward to use a macro here */
+	drvDSARSSignKValue_zone =
+	    ICP_CACHE_CREATE(ICP_RAND_VAL_NAME,
+			     DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES);
+
+	drvDSAVerify_zone =
+	    ICP_CACHE_CREATE(ICP_DSA_VER_NAME, CpaCyDsaVerifyOpData);
+
+	drvFlatBuffer_zone =
+	    ICP_CACHE_CREATE(ICP_FLAT_BUFF_NAME, CpaFlatBuffer);
+
+	if (0 == icp_cache_null_check()) {
+		icp_ocfDrvFreeCaches();
+		EPRINTK("%s() line %d: Not enough memory!\n",
+			__FUNCTION__, __LINE__);
+		return ENOMEM;
+	}
+
+	/* Register the ICP symmetric crypto support. */
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_NULL_CBC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_DES_CBC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_3DES_CBC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_AES_CBC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_ARC4, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_MD5, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_MD5_HMAC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA1, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA1_HMAC, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_256, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_256_HMAC,
+			     ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_384, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_384_HMAC,
+			     ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_512, ocfStatus);
+	ICP_REG_SYM_WITH_OCF(icp_ocfDrvDriverId, CRYPTO_SHA2_512_HMAC,
+			     ocfStatus);
+
+	/* Register the ICP asymmetric algorithm support */
+	ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DH_COMPUTE_KEY,
+			      ocfStatus);
+	ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_MOD_EXP, ocfStatus);
+	ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_MOD_EXP_CRT, ocfStatus);
+	ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DSA_SIGN, ocfStatus);
+	ICP_REG_ASYM_WITH_OCF(icp_ocfDrvDriverId, CRK_DSA_VERIFY, ocfStatus);
+
+	/* Register the ICP random number generator support */
+	ICP_REG_RAND_WITH_OCF(icp_ocfDrvDriverId,
+			      icp_ocfDrvReadRandom, NULL, ocfStatus);
+
+	if (OCF_ZERO_FUNCTIONALITY_REGISTERED == ocfStatus) {
+		DPRINTK("%s: Failed to register any device capabilities\n",
+			__FUNCTION__);
+		icp_ocfDrvFreeCaches();
+		icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+		return icp_module_return_code(ECANCELED);
+	}
+
+	DPRINTK("%s: Registered %d of %d device capabilities\n",
+		__FUNCTION__, ocfStatus, NUM_SUPPORTED_CAPABILITIES);
+
+	/*Session data linked list used during module exit */
+	ICP_INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead);
+	ICP_INIT_LIST_HEAD(&icp_ocfDrvGlobalSymListHead_FreeMemList);
+
+	ICP_WORKQUEUE_CREATE(icp_ocfDrvFreeLacSessionWorkQ, "icpwq");
+	if (ICP_WORKQUEUE_NULL_CHECK(icp_ocfDrvFreeLacSessionWorkQ)) {
+		EPRINTK("%s: Failed to create single "
+			"thread workqueue\n", __FUNCTION__);
+		icp_ocfDrvFreeCaches();
+		icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+		return icp_module_return_code(ENOMEM);
+	}
+
+	return icp_module_return_code(0);
+}
+
+/* Name        : icp_ocfDrvExit
+ *
+ * Description : This function will deregister all the symmetric sessions
+ * registered with the LAC component. It will also deregister all symmetric
+ * and asymmetric functionality that can be accelerated by the hardware via OCF
+ * and random number generation if it is enabled.
+ */
+ICP_MODULE_EXIT_FUNC(icp_ocfDrvExit)
+{
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	struct icp_drvSessionData *sessionData = NULL;
+	struct icp_drvSessionData *tempSessionData = NULL;
+	int i, remaining_delay_time_in_jiffies = 0;
+
+	/* For FreeBSD the invariant macro below makes function to return     */
+	/* with EBUSY value in the case of any session which has been regi-   */
+	/* stered with LAC not being deregistered.                            */
+	/* The Linux implementation is empty since it is purely to compensate */
+	/* for a limitation of the FreeBSD 7.1 Opencrypto framework.          */
+
+    ICP_MODULE_EXIT_INV();
+
+	/* There is a possibility of a process or new session command being   */
+	/* sent before this variable is incremented. The aim of this variable */
+	/* is to stop a loop of calls creating a deadlock situation which     */
+	/* would prevent the driver from exiting.                             */
+	icp_atomic_set(&icp_ocfDrvIsExiting, 1);
+
+	/*Existing sessions will be routed to another driver after these calls */
+	crypto_unregister_all(icp_ocfDrvDriverId);
+	crypto_runregister_all(icp_ocfDrvDriverId);
+
+	if (ICP_WORKQUEUE_NULL_CHECK(icp_ocfDrvFreeLacSessionWorkQ)) {
+		DPRINTK("%s: workqueue already "
+			"destroyed, therefore module exit "
+			" function already called. Exiting.\n", __FUNCTION__);
+		return ICP_MODULE_EXIT_FUNC_RETURN_VAL;
+	}
+	/*If any sessions are waiting to be deregistered, do that. This also
+	   flushes the work queue */
+	ICP_WORKQUEUE_DESTROY(icp_ocfDrvFreeLacSessionWorkQ);
+
+	/*ENTER CRITICAL SECTION */
+	icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	ICP_LIST_FOR_EACH_ENTRY_SAFE(tempSessionData, sessionData,
+				     &icp_ocfDrvGlobalSymListHead, listNode) {
+		for (i = 0; i < num_dereg_retries; i++) {
+			/*No harm if bad input - LAC will handle error cases */
+			if (ICP_SESSION_RUNNING == tempSessionData->inUse) {
+				lacStatus =
+				    cpaCySymRemoveSession
+				    (CPA_INSTANCE_HANDLE_SINGLE,
+				     tempSessionData->sessHandle);
+				if (CPA_STATUS_SUCCESS == lacStatus) {
+					/* Succesfully deregistered */
+					break;
+				} else if (CPA_STATUS_RETRY != lacStatus) {
+					icp_atomic_inc
+					    (&lac_session_failed_dereg_count);
+					break;
+				}
+
+				/*schedule_timout returns the time left for completion if
+				 * this task is set to TASK_INTERRUPTIBLE */
+				remaining_delay_time_in_jiffies =
+				    dereg_retry_delay_in_jiffies;
+				while (0 > remaining_delay_time_in_jiffies) {
+					remaining_delay_time_in_jiffies =
+					    icp_schedule_timeout
+					    (&icp_ocfDrvSymSessInfoListSpinlock,
+					     remaining_delay_time_in_jiffies);
+				}
+
+				DPRINTK
+				    ("%s(): Retry %d to deregistrate the session\n",
+				     __FUNCTION__, i);
+			}
+		}
+
+		/*remove from current list */
+		ICP_LIST_DEL(tempSessionData, listNode);
+		/*add to free mem linked list */
+		ICP_LIST_ADD(tempSessionData,
+			     &icp_ocfDrvGlobalSymListHead_FreeMemList,
+			     listNode);
+
+	}
+
+	/*EXIT CRITICAL SECTION */
+	icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	/*set back to initial values */
+	sessionData = NULL;
+	/*still have a reference in our list! */
+	tempSessionData = NULL;
+	/*free memory */
+
+	ICP_LIST_FOR_EACH_ENTRY_SAFE(tempSessionData, sessionData,
+				     &icp_ocfDrvGlobalSymListHead_FreeMemList,
+				     listNode) {
+
+		ICP_LIST_DEL(tempSessionData, listNode);
+		/* Free allocated CpaCySymSessionCtx */
+		if (NULL != tempSessionData->sessHandle) {
+			icp_kfree(tempSessionData->sessHandle);
+		}
+		memset(tempSessionData, 0, sizeof(struct icp_drvSessionData));
+		ICP_CACHE_FREE(drvSessionData_zone, tempSessionData);
+	}
+
+	if (0 != icp_atomic_read(&lac_session_failed_dereg_count)) {
+		DPRINTK("%s(): %d LAC sessions were not deregistered "
+			"correctly. This is not a clean exit! \n",
+			__FUNCTION__,
+			icp_atomic_read(&lac_session_failed_dereg_count));
+	}
+
+	icp_ocfDrvFreeCaches();
+	icp_ocfDrvDriverId = INVALID_DRIVER_ID;
+
+	icp_spin_lock_destroy(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	/* Shutdown the Cryptographic component */
+	lacStatus = cpaCyStopInstance(CPA_INSTANCE_HANDLE_SINGLE);
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		DPRINTK("%s(): Failed to stop instance of the "
+			"Cryptographic component.(status == %d)\n",
+			__FUNCTION__, lacStatus);
+	}
+
+	return ICP_MODULE_EXIT_FUNC_RETURN_VAL;
+}
+
+/* Name        : icp_ocfDrvFreeCaches
+ *
+ * Description : This function deregisters all slab caches
+ */
+static void icp_ocfDrvFreeCaches(void)
+{
+	icp_atomic_set(&icp_ocfDrvIsExiting, 1);
+
+	/*Sym Zones */
+	ICP_CACHE_DESTROY(drvSessionData_zone);
+	ICP_CACHE_DESTROY(drvOpData_zone);
+
+	/*Asym zones */
+	ICP_CACHE_DESTROY(drvDH_zone);
+	ICP_CACHE_DESTROY(drvLnModExp_zone);
+	ICP_CACHE_DESTROY(drvRSADecrypt_zone);
+	ICP_CACHE_DESTROY(drvRSAPrivateKey_zone);
+	ICP_CACHE_DESTROY(drvDSARSSignKValue_zone);
+	ICP_CACHE_DESTROY(drvDSARSSign_zone);
+	ICP_CACHE_DESTROY(drvDSAVerify_zone);
+
+	/*FlatBuffer and BufferList Zones */
+	ICP_CACHE_DESTROY(drvFlatBuffer_zone);
+
+}
+
+/* Name        : icp_ocfDrvDeregRetry
+ *
+ * Description : This function will try to farm the session deregistration
+ * off to a work queue. If it fails, nothing more can be done and it
+ * returns an error
+ */
+int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister)
+{
+	struct icp_ocfDrvFreeLacSession *workstore = NULL;
+
+	DPRINTK("%s(): Retry - Deregistering session (%p)\n",
+		__FUNCTION__, sessionToDeregister);
+
+	/*make sure the session is not available to be allocated during this
+	   process */
+	icp_atomic_inc(&lac_session_failed_dereg_count);
+
+	/*Farm off to work queue */
+	workstore =
+	    icp_kmalloc(sizeof(struct icp_ocfDrvFreeLacSession), ICP_M_NOWAIT);
+	if (NULL == workstore) {
+		DPRINTK("%s(): unable to free session - no memory available "
+			"for work queue\n", __FUNCTION__);
+		return ENOMEM;
+	}
+
+	workstore->sessionToDeregister = sessionToDeregister;
+
+	icp_init_work(&(workstore->work),
+		      icp_ocfDrvDeferedFreeLacSessionTaskFn, workstore);
+
+	ICP_WORKQUEUE_ENQUEUE(icp_ocfDrvFreeLacSessionWorkQ,
+			      &(workstore->work));
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+
+}
+
+/* Name        : icp_ocfDrvDeferedFreeLacSessionProcess
+ *
+ * Description : This function will retry (module input parameter)
+ * 'num_dereg_retries' times to deregister any symmetric session that recieves a
+ * CPA_STATUS_RETRY message from the LAC component. This function is run in
+ * Thread context because it is called from a worker thread
+ */
+void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg)
+{
+	struct icp_ocfDrvFreeLacSession *workstore = NULL;
+	CpaCySymSessionCtx sessionToDeregister = NULL;
+	int i = 0;
+	int remaining_delay_time_in_jiffies = 0;
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+
+	workstore = (struct icp_ocfDrvFreeLacSession *)arg;
+	if (NULL == workstore) {
+		DPRINTK("%s() function called with null parameter \n",
+			__FUNCTION__);
+		return;
+	}
+
+	sessionToDeregister = workstore->sessionToDeregister;
+	icp_kfree(workstore);
+
+	/*if exiting, give deregistration one more blast only */
+	if (icp_atomic_read(&icp_ocfDrvIsExiting) == CPA_TRUE) {
+		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+						  sessionToDeregister);
+
+		if (lacStatus != CPA_STATUS_SUCCESS) {
+			DPRINTK("%s() Failed to Dereg LAC session %p "
+				"during module exit\n", __FUNCTION__,
+				sessionToDeregister);
+			return;
+		}
+
+		icp_atomic_dec(&lac_session_failed_dereg_count);
+		return;
+	}
+
+	for (i = 0; i <= num_dereg_retries; i++) {
+		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+						  sessionToDeregister);
+
+		if (lacStatus == CPA_STATUS_SUCCESS) {
+			icp_atomic_dec(&lac_session_failed_dereg_count);
+			return;
+		}
+		if (lacStatus != CPA_STATUS_RETRY) {
+			DPRINTK("%s() Failed to deregister session - lacStatus "
+				" = %d", __FUNCTION__, lacStatus);
+			break;
+		}
+
+		/*schedule_timout returns the time left for completion if this
+		   task is set to TASK_INTERRUPTIBLE */
+		remaining_delay_time_in_jiffies = dereg_retry_delay_in_jiffies;
+		while (0 < remaining_delay_time_in_jiffies) {
+			remaining_delay_time_in_jiffies =
+			    icp_schedule_timeout(NULL,
+						 remaining_delay_time_in_jiffies);
+		}
+
+	}
+
+	DPRINTK("%s(): Unable to deregister session\n", __FUNCTION__);
+	DPRINTK("%s(): Number of unavailable LAC sessions = %d\n", __FUNCTION__,
+		icp_atomic_read(&lac_session_failed_dereg_count));
+}
+
+/* Name        : icp_ocfDrvPtrAndLenToFlatBuffer
+ *
+ * Description : This function converts a "pointer and length" buffer
+ * structure to Fredericksburg Flat Buffer (CpaFlatBuffer) format.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
+				CpaFlatBuffer * pFlatBuffer)
+{
+	pFlatBuffer->pData = pData;
+	pFlatBuffer->dataLenInBytes = len;
+}
+
+/* Name        : icp_ocfDrvPtrAndLenToBufferList
+ *
+ * Description : This function converts a "pointer and length" buffer
+ * structure to Fredericksburg Scatter/Gather Buffer (CpaBufferList) format.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
+				CpaBufferList * pBufferList)
+{
+	pBufferList->numBuffers = 1;
+	pBufferList->pBuffers->pData = pDataIn;
+	pBufferList->pBuffers->dataLenInBytes = length;
+}
+
+/* Name        : icp_ocfDrvBufferListToPtrAndLen
+ *
+ * Description : This function converts Fredericksburg Scatter/Gather Buffer
+ * (CpaBufferList) format to a "pointer and length" buffer structure.
+ *
+ * This function assumes that the data passed in are valid.
+ */
+inline void
+icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
+				void **ppDataOut, uint32_t * pLength)
+{
+	*ppDataOut = pBufferList->pBuffers->pData;
+	*pLength = pBufferList->pBuffers->dataLenInBytes;
+}
+
+/* Name        : icp_ocfDrvBufferListMemInfo
+ *
+ * Description : This function will set the number of flat buffers in
+ * bufferlist, the size of memory to allocate for the pPrivateMetaData
+ * member of the CpaBufferList.
+ */
+int
+icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
+			    struct icp_drvBuffListInfo *buffListInfo)
+{
+	buffListInfo->numBuffers = numBuffers;
+
+	if (CPA_STATUS_SUCCESS !=
+	    cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
+				       buffListInfo->numBuffers,
+				       &(buffListInfo->metaSize))) {
+		EPRINTK("%s() Failed to get buffer list meta size.\n",
+			__FUNCTION__);
+		return ICP_OCF_DRV_STATUS_FAIL;
+	}
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvFreeFlatBuffer
+ *
+ * Description : This function will deallocate flat buffer.
+ */
+inline void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer)
+{
+	if (pFlatBuffer != NULL) {
+		memset(pFlatBuffer, 0, sizeof(CpaFlatBuffer));
+		ICP_CACHE_FREE(drvFlatBuffer_zone, pFlatBuffer);
+	}
+}
+
+/* Name        : icp_ocfDrvAllocMetaData
+ *
+ * Description : This function will allocate memory for the
+ * pPrivateMetaData member of CpaBufferList.
+ */
+inline int
+icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
+			struct icp_drvOpData *pOpData)
+{
+	Cpa32U metaSize = 0;
+
+	if (pBufferList->numBuffers <= ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+		uint8_t *pOpDataStartAddr = (uint8_t *) pOpData;
+
+		if (0 == defBuffListInfo.metaSize) {
+			pBufferList->pPrivateMetaData = NULL;
+			return ICP_OCF_DRV_STATUS_SUCCESS;
+		}
+		/*
+		 * The meta data allocation has been included as part of the
+		 * op data.  It has been pre-allocated in memory just after the
+		 * icp_drvOpData structure.
+		 */
+		pBufferList->pPrivateMetaData = (void *)(pOpDataStartAddr +
+							 sizeof(struct
+								icp_drvOpData));
+	} else {
+		if (CPA_STATUS_SUCCESS !=
+		    cpaCyBufferListGetMetaSize(CPA_INSTANCE_HANDLE_SINGLE,
+					       pBufferList->numBuffers,
+					       &metaSize)) {
+			EPRINTK("%s() Failed to get buffer list meta size.\n",
+				__FUNCTION__);
+			return ICP_OCF_DRV_STATUS_FAIL;
+		}
+
+		if (0 == metaSize) {
+			pBufferList->pPrivateMetaData = NULL;
+			return ICP_OCF_DRV_STATUS_SUCCESS;
+		}
+
+		pBufferList->pPrivateMetaData =
+		    icp_kmalloc(metaSize, ICP_M_NOWAIT);
+	}
+	if (NULL == pBufferList->pPrivateMetaData) {
+		EPRINTK("%s() Failed to allocate pPrivateMetaData.\n",
+			__FUNCTION__);
+		return ICP_OCF_DRV_STATUS_FAIL;
+	}
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvFreeMetaData
+ *
+ * Description : This function will deallocate pPrivateMetaData memory.
+ */
+inline void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList)
+{
+	if (NULL == pBufferList->pPrivateMetaData) {
+		return;
+	}
+
+	/*
+	 * Only free the meta data if the BufferList has more than
+	 * ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS number of buffers.
+	 * Otherwise, the meta data shall be freed when the icp_drvOpData is
+	 * freed.
+	 */
+	if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS < pBufferList->numBuffers) {
+		icp_kfree(pBufferList->pPrivateMetaData);
+	}
+}
+
+/* Module declaration, init and exit functions */
+ICP_DECLARE_MODULE(icp_ocf, icp_ocfDrvInit, icp_ocfDrvExit);
+ICP_MODULE_DESCRIPTION("OCF Driver for Intel Quick Assist crypto acceleration");
+ICP_MODULE_VERSION(icp_ocf, ICP_OCF_VER_MJR);
+ICP_MODULE_LICENSE("Dual BSD/GPL");
+ICP_MODULE_AUTHOR("Intel");
+
+/* Module parameters */
+ICP_MODULE_PARAM_INT(icp_ocf, num_dereg_retries,
+		     "Number of times to retry LAC Sym Session Deregistration. "
+		     "Default 10, Max 100");
+ICP_MODULE_PARAM_INT(icp_ocf, dereg_retry_delay_in_jiffies, "Delay in jiffies "
+		     "(added to a schedule() function call) before a LAC Sym "
+		     "Session Dereg is retried. Default 10");
+ICP_MODULE_PARAM_INT(icp_ocf, max_sessions,
+		     "This sets the maximum number of sessions "
+		     "between OCF and this driver. If this value is set to zero,"
+		     "max session count checking is disabled. Default is zero(0)");
+
+/* Module dependencies */
+#define MODULE_MIN_VER	1
+#define CRYPTO_MAX_VER	3
+#define LAC_MAX_VER	2
+
+ICP_MODULE_DEPEND(icp_ocf, crypto, MODULE_MIN_VER, MODULE_MIN_VER,
+		  CRYPTO_MAX_VER);
+ICP_MODULE_DEPEND(icp_ocf, cryptodev, MODULE_MIN_VER, MODULE_MIN_VER,
+		  CRYPTO_MAX_VER);
+ICP_MODULE_DEPEND(icp_ocf, icp_crypto, MODULE_MIN_VER, MODULE_MIN_VER,
+		  LAC_MAX_VER);
diff --git a/crypto/ocf/ep80579/icp_ocf.h b/crypto/ocf/ep80579/icp_ocf.h
new file mode 100644
index 000000000000..68b479c3bda6
--- /dev/null
+++ b/crypto/ocf/ep80579/icp_ocf.h
@@ -0,0 +1,376 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ *  version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+
+/*
+ * OCF driver header file for the Intel ICP processor.
+ */
+
+#ifndef ICP_OCF_H_
+#define ICP_OCF_H_
+
+#include <cpa.h>
+#include <cpa_cy_im.h>
+#include <cpa_cy_sym.h>
+#include <cpa_cy_rand.h>
+#include <cpa_cy_dh.h>
+#include <cpa_cy_rsa.h>
+#include <cpa_cy_ln.h>
+#include <cpa_cy_common.h>
+#include <cpa_cy_dsa.h>
+
+#include "icp_os.h"
+
+#define NUM_BITS_IN_BYTE (8)
+#define NUM_BITS_IN_BYTE_MINUS_ONE (NUM_BITS_IN_BYTE -1)
+#define INVALID_DRIVER_ID (-1)
+#define RETURN_RAND_NUM_GEN_FAILED (-1)
+
+/*This is the max block cipher initialisation vector*/
+#define MAX_IV_LEN_IN_BYTES (20)
+/*This is used to check whether the OCF to this driver session limit has
+  been disabled*/
+#define NO_OCF_TO_DRV_MAX_SESSIONS		(0)
+
+/*OCF values mapped here*/
+#define ICP_SHA1_DIGEST_SIZE_IN_BYTES 		(SHA1_HASH_LEN)
+#define ICP_SHA256_DIGEST_SIZE_IN_BYTES 	(SHA2_256_HASH_LEN)
+#define ICP_SHA384_DIGEST_SIZE_IN_BYTES 	(SHA2_384_HASH_LEN)
+#define ICP_SHA512_DIGEST_SIZE_IN_BYTES 	(SHA2_512_HASH_LEN)
+#define ICP_MD5_DIGEST_SIZE_IN_BYTES 		(MD5_HASH_LEN)
+#define ARC4_COUNTER_LEN 			(ARC4_BLOCK_LEN)
+
+#define OCF_REGISTRATION_STATUS_SUCCESS 	(0)
+#define OCF_ZERO_FUNCTIONALITY_REGISTERED 	(0)
+#define ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR 	(0)
+#define ICP_OCF_DRV_STATUS_SUCCESS 		(0)
+#define ICP_OCF_DRV_STATUS_FAIL 		(1)
+
+/*Turn on/off debug options*/
+#define ICP_OCF_PRINT_DEBUG_MESSAGES		(0)
+#define ICP_OCF_PRINT_KERN_ALERT		(1)
+#define ICP_OCF_PRINT_KERN_ERRS			(1)
+
+#if ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+#define DPRINTK(args...)      \
+{			      \
+                ICP_IPRINTK(args); \
+}
+
+#else				//ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+
+#define DPRINTK(args...)
+
+#endif				//ICP_OCF_PRINT_DEBUG_MESSAGES == 1
+
+#if ICP_OCF_PRINT_KERN_ALERT == 1
+#define APRINTK(args...)      						\
+{			      						\
+       ICP_APRINTK(args);						\
+}
+
+#else				//ICP_OCF_PRINT_KERN_ALERT == 1
+
+#define APRINTK(args...)
+
+#endif				//ICP_OCF_PRINT_KERN_ALERT == 1
+
+#if ICP_OCF_PRINT_KERN_ERRS == 1
+#define EPRINTK(args...)      \
+{			      \
+       ICP_EPRINTK(args); \
+}
+
+#else				//ICP_OCF_PRINT_KERN_ERRS == 1
+
+#define EPRINTK(args...)
+
+#endif				//ICP_OCF_PRINT_KERN_ERRS == 1
+
+#define IPRINTK(args...)      \
+{			      \
+      ICP_IPRINTK(args); \
+}
+
+/*DSA Prime Q size in bytes (as defined in the standard) */
+#define DSA_RS_SIGN_PRIMEQ_SIZE_IN_BYTES	(20)
+
+#define BITS_TO_BYTES(bytes, bits) 					\
+	bytes = (bits + NUM_BITS_IN_BYTE_MINUS_ONE) / NUM_BITS_IN_BYTE
+
+typedef enum {
+	ICP_OCF_DRV_ALG_CIPHER = 0,
+	ICP_OCF_DRV_ALG_HASH
+} icp_ocf_drv_alg_type_t;
+
+typedef ICP_LIST_HEAD(icp_drvSessionListHead_s,
+		      icp_drvSessionData) icp_drvSessionListHead_t;
+
+/*Values used to derisk chances of performs being called against
+deregistered sessions (for which the slab page has been reclaimed)
+This is not a fix - since page frames are reclaimed from a slab, one cannot
+rely on that memory not being re-used by another app.*/
+typedef enum {
+	ICP_SESSION_INITIALISED = 0x5C5C5C,
+	ICP_SESSION_RUNNING = 0x005C00,
+	ICP_SESSION_DEREGISTERED = 0xC5C5C5
+} usage_derisk;
+
+/* This struct is required for deferred session
+ deregistration as a work queue function can
+ only have one argument*/
+struct icp_ocfDrvFreeLacSession {
+	CpaCySymSessionCtx sessionToDeregister;
+	icp_workstruct work;
+};
+
+/*
+This is the OCF<->OCF_DRV session object:
+
+1.listNode
+  The first member is a listNode. These session objects are added to a linked
+  list in order to make it easier to remove them all at session exit time.
+
+2.inUse
+  The second member is used to give the session object state and derisk the
+  possibility of OCF batch calls executing against a deregistered session (as
+  described above).
+
+3.sessHandle
+  The third member is a LAC<->OCF_DRV session handle (initialised with the first
+  perform request for that session).
+
+4.lacSessCtx
+  The fourth is the LAC session context. All the parameters for this structure
+  are only known when the first perform request for this session occurs. That is
+  why the OCF EP80579 Driver only registers a new LAC session at perform time
+*/
+struct icp_drvSessionData {
+	ICP_LIST_ENTRY(icp_drvSessionData) listNode;
+	usage_derisk inUse;
+	CpaCySymSessionCtx sessHandle;
+	CpaCySymSessionSetupData lacSessCtx;
+};
+
+/* These are all defined in icp_common.c */
+extern icp_atomic_t lac_session_failed_dereg_count;
+extern icp_atomic_t icp_ocfDrvIsExiting;
+extern icp_atomic_t num_ocf_to_drv_registered_sessions;
+
+extern int32_t icp_ocfDrvDriverId;
+
+extern icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead;
+extern icp_drvSessionListHead_t icp_ocfDrvGlobalSymListHead_FreeMemList;
+extern icp_workqueue *icp_ocfDrvFreeLacSessionWorkQ;
+extern icp_spinlock_t icp_ocfDrvSymSessInfoListSpinlock;
+
+/*Slab zones for symettric functionality, instantiated in icp_common.c*/
+extern icp_kmem_cache drvSessionData_zone;
+extern icp_kmem_cache drvOpData_zone;
+
+/*Slabs zones for asymettric functionality, instantiated in icp_common.c*/
+extern icp_kmem_cache drvDH_zone;
+extern icp_kmem_cache drvLnModExp_zone;
+extern icp_kmem_cache drvRSADecrypt_zone;
+extern icp_kmem_cache drvRSAPrivateKey_zone;
+extern icp_kmem_cache drvDSARSSign_zone;
+extern icp_kmem_cache drvDSARSSignKValue_zone;
+extern icp_kmem_cache drvDSAVerify_zone;
+
+/* Module parameters defined in icp_cpmmon.c*/
+
+/* Module parameters - gives the number of times LAC deregistration shall be
+   re-tried */
+extern int num_dereg_retries;
+
+/* Module parameter - gives the delay time in jiffies before a LAC session
+   shall be attempted to be deregistered again */
+extern int dereg_retry_delay_in_jiffies;
+
+/* Module parameter - gives the maximum number of sessions possible between
+   OCF and the OCF EP80579 Driver. If set to zero, there is no limit.*/
+extern int max_sessions;
+
+/*Slab zones for flatbuffers and bufferlist*/
+extern icp_kmem_cache drvFlatBuffer_zone;
+
+#define ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS     (16)
+
+struct icp_drvBuffListInfo {
+	Cpa16U numBuffers;
+	Cpa32U metaSize;
+	Cpa32U metaOffset;
+	Cpa32U buffListSize;
+};
+
+extern struct icp_drvBuffListInfo defBuffListInfo;
+
+/* This struct is used to keep a reference to the relevant node in the list
+   of sessionData structs, to the buffer type required by OCF and to the OCF
+   provided crp struct that needs to be returned. All this info is needed in
+   the callback function.*/
+struct icp_drvOpData {
+	CpaCySymOpData lacOpData;
+	uint32_t digestSizeInBytes;
+	struct cryptop *crp;
+	uint8_t bufferType;
+	uint8_t ivData[MAX_IV_LEN_IN_BYTES];
+	uint16_t numBufferListArray;
+	CpaBufferList srcBuffer;
+	CpaFlatBuffer bufferListArray[ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS];
+	CpaBoolean verifyResult;
+};
+
+/* Create a new session between OCF and this driver*/
+int icp_ocfDrvNewSession(icp_device_t dev, uint32_t * sild,
+			 struct cryptoini *cri);
+
+/* Free a session between this driver and the Quick Assist Framework*/
+int icp_ocfDrvFreeLACSession(icp_device_t dev, uint64_t sid);
+
+/* Defer freeing a Quick Assist session*/
+void icp_ocfDrvDeferedFreeLacSessionProcess(void *arg);
+
+/* Process OCF cryptographic request for a symmetric algorithm*/
+int icp_ocfDrvSymProcess(icp_device_t dev, struct cryptop *crp, int hint);
+
+/* Process OCF cryptographic request for an asymmetric algorithm*/
+int icp_ocfDrvPkeProcess(icp_device_t dev, struct cryptkop *krp, int hint);
+
+/* Populate a buffer with random data*/
+int icp_ocfDrvReadRandom(void *arg, uint32_t * buf, int maxwords);
+
+/* Retry Quick Assist session deregistration*/
+int icp_ocfDrvDeregRetry(CpaCySymSessionCtx sessionToDeregister);
+
+/* Convert an OS scatter gather list to a CPA buffer list*/
+int icp_ocfDrvPacketBuffToBufferList(icp_packet_buffer_t * pPacketBuffer,
+				     CpaBufferList * bufferList);
+
+/* Convert a CPA buffer list to an OS scatter gather list*/
+int icp_ocfDrvBufferListToPacketBuff(CpaBufferList * bufferList,
+				     icp_packet_buffer_t ** pPacketBuffer);
+
+/* Get the number of buffers in an OS scatter gather list*/
+uint16_t icp_ocfDrvGetPacketBuffFrags(icp_packet_buffer_t * pPacketBuffer);
+
+/* Convert a single OS buffer to a CPA Flat Buffer*/
+void icp_ocfDrvSinglePacketBuffToFlatBuffer(icp_packet_buffer_t * pPacketBuffer,
+					    CpaFlatBuffer * pFlatBuffer);
+
+/* Add pointer and length to a CPA Flat Buffer structure*/
+void icp_ocfDrvPtrAndLenToFlatBuffer(void *pData, uint32_t len,
+				     CpaFlatBuffer * pFlatBuffer);
+
+/* Convert pointer and length values to a CPA buffer list*/
+void icp_ocfDrvPtrAndLenToBufferList(void *pDataIn, uint32_t length,
+				     CpaBufferList * pBufferList);
+
+/* Convert a CPA buffer list to pointer and length values*/
+void icp_ocfDrvBufferListToPtrAndLen(CpaBufferList * pBufferList,
+				     void **ppDataOut, uint32_t * pLength);
+
+/* Set the number of flat buffers in bufferlist and the size of memory
+   to allocate for the pPrivateMetaData member of the CpaBufferList.*/
+int icp_ocfDrvBufferListMemInfo(uint16_t numBuffers,
+				struct icp_drvBuffListInfo *buffListInfo);
+
+/* Find pointer position of the digest within an OS scatter gather list*/
+uint8_t *icp_ocfDrvPacketBufferDigestPointerFind(struct icp_drvOpData
+						 *drvOpData,
+						 int offsetInBytes,
+						 uint32_t digestSizeInBytes);
+
+/*This top level function is used to find a pointer to where a digest is
+  stored/needs to be inserted. */
+uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData *drvOpData,
+				     struct cryptodesc *crp_desc);
+
+/* Free a CPA flat buffer*/
+void icp_ocfDrvFreeFlatBuffer(CpaFlatBuffer * pFlatBuffer);
+
+/* This function will allocate memory for the pPrivateMetaData
+   member of CpaBufferList. */
+int icp_ocfDrvAllocMetaData(CpaBufferList * pBufferList,
+			    struct icp_drvOpData *pOpData);
+
+/* Free data allocated for the pPrivateMetaData
+   member of CpaBufferList.*/
+void icp_ocfDrvFreeMetaData(CpaBufferList * pBufferList);
+
+#define ICP_CACHE_CREATE(cache_ID, cache_name) \
+	icp_kmem_cache_create(cache_ID, sizeof(cache_name),ICP_KERNEL_CACHE_ALIGN,\
+	ICP_KERNEL_CACHE_NOINIT)
+
+#define ICP_CACHE_FREE(args...) \
+	icp_kmem_cache_free (args)
+
+#define ICP_CACHE_DESTROY(slab_zone)\
+{\
+        if(NULL != slab_zone){\
+                icp_kmem_cache_destroy(slab_zone);\
+                slab_zone = NULL;\
+        }\
+}
+
+#endif
+/* ICP_OCF_H_ */
diff --git a/crypto/ocf/ep80579/icp_sym.c b/crypto/ocf/ep80579/icp_sym.c
new file mode 100644
index 000000000000..a3edc43f4e30
--- /dev/null
+++ b/crypto/ocf/ep80579/icp_sym.c
@@ -0,0 +1,1153 @@
+/***************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *   The full GNU General Public License is included in this distribution
+ *   in the file called LICENSE.GPL.
+ *
+ *   Contact Information:
+ *   Intel Corporation
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2007,2008,2009 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ *  version: Security.L.1.0.2-229
+ *
+ ***************************************************************************/
+/*
+ * An OCF module that uses the API for IntelÂ® QuickAssist Technology to do the
+ * cryptography.
+ *
+ * This driver requires the ICP Access Library that is available from Intel in
+ * order to operate.
+ */
+
+#include "icp_ocf.h"
+
+/*This is the call back function for all symmetric cryptographic processes.
+  Its main functionality is to free driver crypto operation structure and to
+  call back to OCF*/
+static void
+icp_ocfDrvSymCallBack(void *callbackTag,
+		      CpaStatus status,
+		      const CpaCySymOp operationType,
+		      void *pOpData,
+		      CpaBufferList * pDstBuffer, CpaBoolean verifyResult);
+
+/*This function is used to extract crypto processing information from the OCF
+  inputs, so as that it may be passed onto LAC*/
+static int
+icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
+			   struct cryptodesc *crp_desc);
+
+/*This function checks whether the crp_desc argument pertains to a digest or a
+  cipher operation*/
+static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc);
+
+/*This function copies all the passed in session context information and stores
+  it in a LAC context structure*/
+static int
+icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
+			 CpaCySymSessionSetupData * lacSessCtx);
+
+/*This function is used to free an OCF->OCF_DRV session object*/
+static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData);
+
+/*max IOV buffs supported in a UIO structure*/
+#define NUM_IOV_SUPPORTED		(1)
+
+/* Name        : icp_ocfDrvSymCallBack
+ *
+ * Description : When this function returns it signifies that the LAC
+ * component has completed the relevant symmetric operation.
+ *
+ * Notes : The callbackTag is a pointer to an icp_drvOpData. This memory
+ * object was passed to LAC for the cryptographic processing and contains all
+ * the relevant information for cleaning up buffer handles etc. so that the
+ * OCF EP80579 Driver portion of this crypto operation can be fully completed.
+ */
+static void
+icp_ocfDrvSymCallBack(void *callbackTag,
+		      CpaStatus status,
+		      const CpaCySymOp operationType,
+		      void *pOpData,
+		      CpaBufferList * pDstBuffer, CpaBoolean verifyResult)
+{
+	struct cryptop *crp = NULL;
+	struct icp_drvOpData *temp_drvOpData =
+	    (struct icp_drvOpData *)callbackTag;
+	uint64_t *tempBasePtr = NULL;
+	uint32_t tempLen = 0;
+
+	if (NULL == temp_drvOpData) {
+		DPRINTK("%s(): The callback from the LAC component"
+			" has failed due to Null userOpaque data"
+			"(status == %d).\n", __FUNCTION__, status);
+		DPRINTK("%s(): Unable to call OCF back! \n", __FUNCTION__);
+		return;
+	}
+
+	crp = temp_drvOpData->crp;
+	crp->crp_etype = ICP_OCF_DRV_NO_CRYPTO_PROCESS_ERROR;
+
+	if (NULL == pOpData) {
+		DPRINTK("%s(): The callback from the LAC component"
+			" has failed due to Null Symmetric Op data"
+			"(status == %d).\n", __FUNCTION__, status);
+		crp->crp_etype = ECANCELED;
+		crypto_done(crp);
+		return;
+	}
+
+	if (NULL == pDstBuffer) {
+		DPRINTK("%s(): The callback from the LAC component"
+			" has failed due to Null Dst Bufferlist data"
+			"(status == %d).\n", __FUNCTION__, status);
+		crp->crp_etype = ECANCELED;
+		crypto_done(crp);
+		return;
+	}
+
+	if (CPA_STATUS_SUCCESS == status) {
+
+		if (temp_drvOpData->bufferType == ICP_CRYPTO_F_PACKET_BUF) {
+			if (ICP_OCF_DRV_STATUS_SUCCESS !=
+			    icp_ocfDrvBufferListToPacketBuff(pDstBuffer,
+							     (icp_packet_buffer_t
+							      **)
+							     & (crp->crp_buf))) {
+				EPRINTK("%s(): BufferList to SkBuff "
+					"conversion error.\n", __FUNCTION__);
+				crp->crp_etype = EPERM;
+			}
+		} else {
+			icp_ocfDrvBufferListToPtrAndLen(pDstBuffer,
+							(void **)&tempBasePtr,
+							&tempLen);
+			crp->crp_olen = (int)tempLen;
+		}
+
+	} else {
+		DPRINTK("%s(): The callback from the LAC component has failed"
+			"(status == %d).\n", __FUNCTION__, status);
+
+		crp->crp_etype = ECANCELED;
+	}
+
+	if (temp_drvOpData->numBufferListArray >
+	    ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+		icp_kfree(pDstBuffer->pBuffers);
+	}
+	icp_ocfDrvFreeMetaData(pDstBuffer);
+	ICP_CACHE_FREE(drvOpData_zone, temp_drvOpData);
+
+	/* Invoke the OCF callback function */
+	crypto_done(crp);
+
+	return;
+}
+
+/* Name        : icp_ocfDrvNewSession
+ *
+ * Description : This function will create a new Driver<->OCF session
+ *
+ * Notes : LAC session registration happens during the first perform call.
+ * That is the first time we know all information about a given session.
+ */
+int icp_ocfDrvNewSession(icp_device_t dev, uint32_t * sid,
+			 struct cryptoini *cri)
+{
+	struct icp_drvSessionData *sessionData = NULL;
+	uint32_t delete_session = 0;
+
+	/* The SID passed in should be our driver ID. We can return the     */
+	/* local ID (LID) which is a unique identifier which we can use     */
+	/* to differentiate between the encrypt/decrypt LAC session handles */
+	if (NULL == sid) {
+		EPRINTK("%s(): Invalid input parameters - NULL sid.\n",
+			__FUNCTION__);
+		return EINVAL;
+	}
+
+	if (NULL == cri) {
+		EPRINTK("%s(): Invalid input parameters - NULL cryptoini.\n",
+			__FUNCTION__);
+		return EINVAL;
+	}
+
+	if (icp_ocfDrvDriverId != *sid) {
+		EPRINTK("%s(): Invalid input parameters - bad driver ID\n",
+			__FUNCTION__);
+		EPRINTK("\t sid = 0x08%p \n \t cri = 0x08%p \n", sid, cri);
+		return EINVAL;
+	}
+
+	sessionData = icp_kmem_cache_zalloc(drvSessionData_zone, ICP_M_NOWAIT);
+	if (NULL == sessionData) {
+		DPRINTK("%s():No memory for Session Data\n", __FUNCTION__);
+		return ENOMEM;
+	}
+
+	/*ENTER CRITICAL SECTION */
+	icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+	/*put this check in the spinlock so no new sessions can be added to the
+	   linked list when we are exiting */
+	if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+		delete_session++;
+
+	} else if (NO_OCF_TO_DRV_MAX_SESSIONS != max_sessions) {
+		if (icp_atomic_read(&num_ocf_to_drv_registered_sessions) >=
+		    (max_sessions -
+		     icp_atomic_read(&lac_session_failed_dereg_count))) {
+			delete_session++;
+		} else {
+			icp_atomic_inc(&num_ocf_to_drv_registered_sessions);
+			/* Add to session data linked list */
+			ICP_LIST_ADD(sessionData, &icp_ocfDrvGlobalSymListHead,
+				     listNode);
+		}
+
+	} else if (NO_OCF_TO_DRV_MAX_SESSIONS == max_sessions) {
+		ICP_LIST_ADD(sessionData, &icp_ocfDrvGlobalSymListHead,
+			     listNode);
+	}
+
+	sessionData->inUse = ICP_SESSION_INITIALISED;
+
+	/*EXIT CRITICAL SECTION */
+	icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	if (delete_session) {
+		DPRINTK("%s():No Session handles available\n", __FUNCTION__);
+		ICP_CACHE_FREE(drvSessionData_zone, sessionData);
+		return EPERM;
+	}
+
+	if (ICP_OCF_DRV_STATUS_SUCCESS !=
+	    icp_ocfDrvAlgorithmSetup(cri, &(sessionData->lacSessCtx))) {
+		DPRINTK("%s():algorithm not supported\n", __FUNCTION__);
+		icp_ocfDrvFreeOCFSession(sessionData);
+		return EINVAL;
+	}
+
+	if (cri->cri_next) {
+		if (cri->cri_next->cri_next != NULL) {
+			DPRINTK("%s():only two chained algorithms supported\n",
+				__FUNCTION__);
+			icp_ocfDrvFreeOCFSession(sessionData);
+			return EPERM;
+		}
+
+		if (ICP_OCF_DRV_STATUS_SUCCESS !=
+		    icp_ocfDrvAlgorithmSetup(cri->cri_next,
+					     &(sessionData->lacSessCtx))) {
+			DPRINTK("%s():second algorithm not supported\n",
+				__FUNCTION__);
+			icp_ocfDrvFreeOCFSession(sessionData);
+			return EINVAL;
+		}
+
+		sessionData->lacSessCtx.symOperation =
+		    CPA_CY_SYM_OP_ALGORITHM_CHAINING;
+	}
+
+	*sid = (uint32_t) sessionData;
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvAlgorithmSetup
+ *
+ * Description : This function builds the session context data from the
+ * information supplied through OCF. Algorithm chain order and whether the
+ * session is Encrypt/Decrypt can only be found out at perform time however, so
+ * the session is registered with LAC at that time.
+ */
+static int
+icp_ocfDrvAlgorithmSetup(struct cryptoini *cri,
+			 CpaCySymSessionSetupData * lacSessCtx)
+{
+
+	lacSessCtx->sessionPriority = CPA_CY_PRIORITY_NORMAL;
+
+	switch (cri->cri_alg) {
+
+	case CRYPTO_NULL_CBC:
+		DPRINTK("%s(): NULL CBC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+		lacSessCtx->cipherSetupData.cipherAlgorithm =
+		    CPA_CY_SYM_CIPHER_NULL;
+		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+		break;
+
+	case CRYPTO_DES_CBC:
+		DPRINTK("%s(): DES CBC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+		lacSessCtx->cipherSetupData.cipherAlgorithm =
+		    CPA_CY_SYM_CIPHER_DES_CBC;
+		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+		break;
+
+	case CRYPTO_3DES_CBC:
+		DPRINTK("%s(): 3DES CBC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+		lacSessCtx->cipherSetupData.cipherAlgorithm =
+		    CPA_CY_SYM_CIPHER_3DES_CBC;
+		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+		break;
+
+	case CRYPTO_AES_CBC:
+		DPRINTK("%s(): AES CBC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+		lacSessCtx->cipherSetupData.cipherAlgorithm =
+		    CPA_CY_SYM_CIPHER_AES_CBC;
+		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+		break;
+
+	case CRYPTO_ARC4:
+		DPRINTK("%s(): ARC4\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_CIPHER;
+		lacSessCtx->cipherSetupData.cipherAlgorithm =
+		    CPA_CY_SYM_CIPHER_ARC4;
+		lacSessCtx->cipherSetupData.cipherKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->cipherSetupData.pCipherKey = cri->cri_key;
+		break;
+
+	case CRYPTO_SHA1:
+		DPRINTK("%s(): SHA1\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
+
+		break;
+
+	case CRYPTO_SHA1_HMAC:
+		DPRINTK("%s(): SHA1_HMAC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA1_DIGEST_SIZE_IN_BYTES);
+		lacSessCtx->hashSetupData.authModeSetupData.authKey =
+		    cri->cri_key;
+		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+		break;
+
+	case CRYPTO_SHA2_256:
+		DPRINTK("%s(): SHA256\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA256;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
+
+		break;
+
+	case CRYPTO_SHA2_256_HMAC:
+		DPRINTK("%s(): SHA256_HMAC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA256;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA256_DIGEST_SIZE_IN_BYTES);
+		lacSessCtx->hashSetupData.authModeSetupData.authKey =
+		    cri->cri_key;
+		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+		break;
+
+	case CRYPTO_SHA2_384:
+		DPRINTK("%s(): SHA384\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA384;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
+
+		break;
+
+	case CRYPTO_SHA2_384_HMAC:
+		DPRINTK("%s(): SHA384_HMAC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA384;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA384_DIGEST_SIZE_IN_BYTES);
+		lacSessCtx->hashSetupData.authModeSetupData.authKey =
+		    cri->cri_key;
+		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+		break;
+
+	case CRYPTO_SHA2_512:
+		DPRINTK("%s(): SHA512\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA512;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
+
+		break;
+
+	case CRYPTO_SHA2_512_HMAC:
+		DPRINTK("%s(): SHA512_HMAC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm =
+		    CPA_CY_SYM_HASH_SHA512;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_SHA512_DIGEST_SIZE_IN_BYTES);
+		lacSessCtx->hashSetupData.authModeSetupData.authKey =
+		    cri->cri_key;
+		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+		break;
+
+	case CRYPTO_MD5:
+		DPRINTK("%s(): MD5\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_PLAIN;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
+
+		break;
+
+	case CRYPTO_MD5_HMAC:
+		DPRINTK("%s(): MD5_HMAC\n", __FUNCTION__);
+		lacSessCtx->symOperation = CPA_CY_SYM_OP_HASH;
+		lacSessCtx->hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
+		lacSessCtx->hashSetupData.hashMode = CPA_CY_SYM_HASH_MODE_AUTH;
+		lacSessCtx->hashSetupData.digestResultLenInBytes =
+		    (cri->cri_mlen ?
+		     cri->cri_mlen : ICP_MD5_DIGEST_SIZE_IN_BYTES);
+		lacSessCtx->hashSetupData.authModeSetupData.authKey =
+		    cri->cri_key;
+		lacSessCtx->hashSetupData.authModeSetupData.authKeyLenInBytes =
+		    cri->cri_klen / NUM_BITS_IN_BYTE;
+		lacSessCtx->hashSetupData.authModeSetupData.aadLenInBytes = 0;
+
+		break;
+
+	default:
+		DPRINTK("%s(): ALG Setup FAIL\n", __FUNCTION__);
+		return ICP_OCF_DRV_STATUS_FAIL;
+	}
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvFreeOCFSession
+ *
+ * Description : This function deletes all existing Session data representing
+ * the Cryptographic session established between OCF and this driver. This
+ * also includes freeing the memory allocated for the session context. The
+ * session object is also removed from the session linked list.
+ */
+static void icp_ocfDrvFreeOCFSession(struct icp_drvSessionData *sessionData)
+{
+
+	sessionData->inUse = ICP_SESSION_DEREGISTERED;
+
+	/*ENTER CRITICAL SECTION */
+	icp_spin_lockbh_lock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+		/*If the Driver is exiting, allow that process to
+		   handle any deletions */
+		/*EXIT CRITICAL SECTION */
+		icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+		return;
+	}
+
+	icp_atomic_dec(&num_ocf_to_drv_registered_sessions);
+
+	ICP_LIST_DEL(sessionData, listNode);
+
+	/*EXIT CRITICAL SECTION */
+	icp_spin_lockbh_unlock(&icp_ocfDrvSymSessInfoListSpinlock);
+
+	if (NULL != sessionData->sessHandle) {
+		icp_kfree(sessionData->sessHandle);
+	}
+	ICP_CACHE_FREE(drvSessionData_zone, sessionData);
+}
+
+/* Name        : icp_ocfDrvFreeLACSession
+ *
+ * Description : This attempts to deregister a LAC session. If it fails, the
+ * deregistation retry function is called.
+ */
+int icp_ocfDrvFreeLACSession(icp_device_t dev, uint64_t sid)
+{
+	CpaCySymSessionCtx sessionToDeregister = NULL;
+	struct icp_drvSessionData *sessionData = NULL;
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	int retval = 0;
+
+	sessionData = (struct icp_drvSessionData *)CRYPTO_SESID2LID(sid);
+	if (NULL == sessionData) {
+		EPRINTK("%s(): OCF Free session called with Null Session ID.\n",
+			__FUNCTION__);
+		return EINVAL;
+	}
+
+	sessionToDeregister = sessionData->sessHandle;
+
+	if ((ICP_SESSION_INITIALISED != sessionData->inUse) &&
+	    (ICP_SESSION_RUNNING != sessionData->inUse) &&
+	    (ICP_SESSION_DEREGISTERED != sessionData->inUse)) {
+		DPRINTK("%s() Session not initialised.\n", __FUNCTION__);
+		return EINVAL;
+	}
+
+	if (ICP_SESSION_RUNNING == sessionData->inUse) {
+		lacStatus = cpaCySymRemoveSession(CPA_INSTANCE_HANDLE_SINGLE,
+						  sessionToDeregister);
+		if (CPA_STATUS_RETRY == lacStatus) {
+			if (ICP_OCF_DRV_STATUS_SUCCESS !=
+			    icp_ocfDrvDeregRetry(&sessionToDeregister)) {
+				/* the retry function increments the
+				   dereg failed count */
+				DPRINTK("%s(): LAC failed to deregister the "
+					"session. (localSessionId= %p)\n",
+					__FUNCTION__, sessionToDeregister);
+				retval = EPERM;
+			}
+
+		} else if (CPA_STATUS_SUCCESS != lacStatus) {
+			DPRINTK("%s(): LAC failed to deregister the session. "
+				"localSessionId= %p, lacStatus = %d\n",
+				__FUNCTION__, sessionToDeregister, lacStatus);
+			icp_atomic_inc(&lac_session_failed_dereg_count);
+			retval = EPERM;
+		}
+	} else {
+		DPRINTK("%s() Session not registered with LAC.\n",
+			__FUNCTION__);
+	}
+
+	icp_ocfDrvFreeOCFSession(sessionData);
+	return retval;
+
+}
+
+/* Name        : icp_ocfDrvAlgCheck
+ *
+ * Description : This function checks whether the cryptodesc argument pertains
+ * to a sym or hash function
+ */
+static int icp_ocfDrvAlgCheck(struct cryptodesc *crp_desc)
+{
+
+	if (crp_desc->crd_alg == CRYPTO_3DES_CBC ||
+	    crp_desc->crd_alg == CRYPTO_AES_CBC ||
+	    crp_desc->crd_alg == CRYPTO_DES_CBC ||
+	    crp_desc->crd_alg == CRYPTO_NULL_CBC ||
+	    crp_desc->crd_alg == CRYPTO_ARC4) {
+		return ICP_OCF_DRV_ALG_CIPHER;
+	}
+
+	return ICP_OCF_DRV_ALG_HASH;
+}
+
+/* Name        : icp_ocfDrvSymProcess
+ *
+ * Description : This function will map symmetric functionality calls from OCF
+ * to the LAC API. It will also allocate memory to store the session context.
+ *
+ * Notes: If it is the first perform call for a given session, then a LAC
+ * session is registered. After the session is registered, no checks as
+ * to whether session paramaters have changed (e.g. alg chain order) are
+ * done.
+ */
+int icp_ocfDrvSymProcess(icp_device_t dev, struct cryptop *crp, int hint)
+{
+	struct icp_drvSessionData *sessionData = NULL;
+	struct icp_drvOpData *drvOpData = NULL;
+	CpaStatus lacStatus = CPA_STATUS_SUCCESS;
+	Cpa32U sessionCtxSizeInBytes = 0;
+
+	if (NULL == crp) {
+		DPRINTK("%s(): Invalid input parameters, cryptop is NULL\n",
+			__FUNCTION__);
+		return EINVAL;
+	}
+
+	if (NULL == crp->crp_desc) {
+		DPRINTK("%s(): Invalid input parameters, no crp_desc attached "
+			"to crp\n", __FUNCTION__);
+		crp->crp_etype = EINVAL;
+		return EINVAL;
+	}
+
+	if (NULL == crp->crp_buf) {
+		DPRINTK("%s(): Invalid input parameters, no buffer attached "
+			"to crp\n", __FUNCTION__);
+		crp->crp_etype = EINVAL;
+		return EINVAL;
+	}
+
+	if (CPA_TRUE == icp_atomic_read(&icp_ocfDrvIsExiting)) {
+		crp->crp_etype = EFAULT;
+		return EFAULT;
+	}
+
+	sessionData = (struct icp_drvSessionData *)
+	    (CRYPTO_SESID2LID(crp->crp_sid));
+	if (NULL == sessionData) {
+		DPRINTK("%s(): Invalid input parameters, Null Session ID \n",
+			__FUNCTION__);
+		crp->crp_etype = EINVAL;
+		return EINVAL;
+	}
+
+/*If we get a request against a deregisted session, cancel operation*/
+	if (ICP_SESSION_DEREGISTERED == sessionData->inUse) {
+		DPRINTK("%s(): Session ID %d was deregistered \n",
+			__FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
+		crp->crp_etype = EFAULT;
+		return EFAULT;
+	}
+
+/*If none of the session states are set, then the session structure was either
+  not initialised properly or we are reading from a freed memory area (possible
+  due to OCF batch mode not removing queued requests against deregistered
+  sessions*/
+	if (ICP_SESSION_INITIALISED != sessionData->inUse &&
+	    ICP_SESSION_RUNNING != sessionData->inUse) {
+		DPRINTK("%s(): Session - ID %d - not properly initialised or "
+			"memory freed back to the kernel \n",
+			__FUNCTION__, (int)(CRYPTO_SESID2LID(crp->crp_sid)));
+		crp->crp_etype = EINVAL;
+		return EINVAL;
+	}
+
+	/*For the below checks, remember error checking is already done in LAC.
+	   We're not validating inputs subsequent to registration */
+	if (sessionData->inUse == ICP_SESSION_INITIALISED) {
+		DPRINTK("%s(): Initialising session\n", __FUNCTION__);
+
+		if (NULL != crp->crp_desc->crd_next) {
+			if (ICP_OCF_DRV_ALG_CIPHER ==
+			    icp_ocfDrvAlgCheck(crp->crp_desc)) {
+
+				sessionData->lacSessCtx.algChainOrder =
+				    CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+
+				if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
+					sessionData->lacSessCtx.cipherSetupData.
+					    cipherDirection =
+					    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+				} else {
+					sessionData->lacSessCtx.cipherSetupData.
+					    cipherDirection =
+					    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+				}
+			} else {
+				sessionData->lacSessCtx.algChainOrder =
+				    CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+				if (crp->crp_desc->crd_next->crd_flags &
+				    CRD_F_ENCRYPT) {
+					sessionData->lacSessCtx.cipherSetupData.
+					    cipherDirection =
+					    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+				} else {
+					sessionData->lacSessCtx.cipherSetupData.
+					    cipherDirection =
+					    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+				}
+
+			}
+
+		} else if (ICP_OCF_DRV_ALG_CIPHER ==
+			   icp_ocfDrvAlgCheck(crp->crp_desc)) {
+			if (crp->crp_desc->crd_flags & CRD_F_ENCRYPT) {
+				sessionData->lacSessCtx.cipherSetupData.
+				    cipherDirection =
+				    CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT;
+			} else {
+				sessionData->lacSessCtx.cipherSetupData.
+				    cipherDirection =
+				    CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT;
+			}
+
+		}
+
+		/*No action required for standalone Auth here */
+
+		/* Allocate memory for SymSessionCtx before the Session Registration */
+		lacStatus =
+		    cpaCySymSessionCtxGetSize(CPA_INSTANCE_HANDLE_SINGLE,
+					      &(sessionData->lacSessCtx),
+					      &sessionCtxSizeInBytes);
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): cpaCySymSessionCtxGetSize failed - %d\n",
+				__FUNCTION__, lacStatus);
+			crp->crp_etype = EINVAL;
+			return EINVAL;
+		}
+		sessionData->sessHandle =
+		    icp_kmalloc(sessionCtxSizeInBytes, ICP_M_NOWAIT);
+		if (NULL == sessionData->sessHandle) {
+			EPRINTK
+			    ("%s(): Failed to get memory for SymSessionCtx\n",
+			     __FUNCTION__);
+			crp->crp_etype = ENOMEM;
+			return ENOMEM;
+		}
+
+		lacStatus = cpaCySymInitSession(CPA_INSTANCE_HANDLE_SINGLE,
+						icp_ocfDrvSymCallBack,
+						&(sessionData->lacSessCtx),
+						sessionData->sessHandle);
+
+		if (CPA_STATUS_SUCCESS != lacStatus) {
+			EPRINTK("%s(): cpaCySymInitSession failed -%d \n",
+				__FUNCTION__, lacStatus);
+			crp->crp_etype = EFAULT;
+			return EFAULT;
+		}
+
+		sessionData->inUse = ICP_SESSION_RUNNING;
+	}
+
+	drvOpData = icp_kmem_cache_zalloc(drvOpData_zone, ICP_M_NOWAIT);
+	if (NULL == drvOpData) {
+		EPRINTK("%s():Failed to get memory for drvOpData\n",
+			__FUNCTION__);
+		crp->crp_etype = ENOMEM;
+		return ENOMEM;
+	}
+
+	drvOpData->lacOpData.pSessionCtx = sessionData->sessHandle;
+	drvOpData->digestSizeInBytes = sessionData->lacSessCtx.hashSetupData.
+	    digestResultLenInBytes;
+	drvOpData->crp = crp;
+
+	/* Set the default buffer list array memory allocation */
+	drvOpData->srcBuffer.pBuffers = drvOpData->bufferListArray;
+	drvOpData->numBufferListArray = ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS;
+
+	if (ICP_OCF_DRV_STATUS_SUCCESS !=
+	    icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->crp_desc)) {
+		crp->crp_etype = EINVAL;
+		goto err;
+	}
+
+	if (drvOpData->crp->crp_desc->crd_next != NULL) {
+		if (icp_ocfDrvProcessDataSetup(drvOpData, drvOpData->crp->
+					       crp_desc->crd_next)) {
+			crp->crp_etype = EINVAL;
+			goto err;
+		}
+
+	}
+
+	/*
+	 * Allocate buffer list array memory if the data fragment is more than
+	 * the default number (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) and not
+	 * calculated already
+	 */
+	if (crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+		if (NULL == drvOpData->lacOpData.pDigestResult) {
+			drvOpData->numBufferListArray =
+			    icp_ocfDrvGetPacketBuffFrags((icp_packet_buffer_t *)
+							 crp->crp_buf);
+		}
+
+		if (ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS <
+		    drvOpData->numBufferListArray) {
+			DPRINTK("%s() numBufferListArray more than default\n",
+				__FUNCTION__);
+			drvOpData->srcBuffer.pBuffers = NULL;
+			drvOpData->srcBuffer.pBuffers =
+			    icp_kmalloc(drvOpData->numBufferListArray *
+					sizeof(CpaFlatBuffer), ICP_M_NOWAIT);
+			if (NULL == drvOpData->srcBuffer.pBuffers) {
+				EPRINTK("%s() Failed to get memory for "
+					"pBuffers\n", __FUNCTION__);
+				ICP_CACHE_FREE(drvOpData_zone, drvOpData);
+				crp->crp_etype = ENOMEM;
+				return ENOMEM;
+			}
+		}
+	}
+
+	/*
+	 * Check the type of buffer structure we got and convert it into
+	 * CpaBufferList format.
+	 */
+	if (crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+		if (ICP_OCF_DRV_STATUS_SUCCESS !=
+		    icp_ocfDrvPacketBuffToBufferList((icp_packet_buffer_t *)
+						     crp->crp_buf,
+						     &(drvOpData->srcBuffer))) {
+			EPRINTK("%s():Failed to translate from packet buffer "
+				"to bufferlist\n", __FUNCTION__);
+			crp->crp_etype = EINVAL;
+			goto err;
+		}
+
+		drvOpData->bufferType = ICP_CRYPTO_F_PACKET_BUF;
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		/* OCF only supports IOV of one entry. */
+		if (NUM_IOV_SUPPORTED ==
+		    ((struct uio *)(crp->crp_buf))->uio_iovcnt) {
+
+			icp_ocfDrvPtrAndLenToBufferList(((struct uio *)(crp->
+									crp_buf))->
+							uio_iov[0].iov_base,
+							((struct uio *)(crp->
+									crp_buf))->
+							uio_iov[0].iov_len,
+							&(drvOpData->
+							  srcBuffer));
+
+			drvOpData->bufferType = CRYPTO_F_IOV;
+
+		} else {
+			DPRINTK("%s():Unable to handle IOVs with lengths of "
+				"greater than one!\n", __FUNCTION__);
+			crp->crp_etype = EINVAL;
+			goto err;
+		}
+
+	} else {
+		icp_ocfDrvPtrAndLenToBufferList(crp->crp_buf,
+						crp->crp_ilen,
+						&(drvOpData->srcBuffer));
+
+		drvOpData->bufferType = CRYPTO_BUF_CONTIG;
+	}
+
+	/* Allocate srcBuffer's private meta data */
+	if (ICP_OCF_DRV_STATUS_SUCCESS !=
+	    icp_ocfDrvAllocMetaData(&(drvOpData->srcBuffer), drvOpData)) {
+		EPRINTK("%s() icp_ocfDrvAllocMetaData failed\n", __FUNCTION__);
+		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+		crp->crp_etype = EINVAL;
+		goto err;
+	}
+
+	/* Perform "in-place" crypto operation */
+	lacStatus = cpaCySymPerformOp(CPA_INSTANCE_HANDLE_SINGLE,
+				      (void *)drvOpData,
+				      &(drvOpData->lacOpData),
+				      &(drvOpData->srcBuffer),
+				      &(drvOpData->srcBuffer),
+				      &(drvOpData->verifyResult));
+	if (CPA_STATUS_RETRY == lacStatus) {
+		DPRINTK("%s(): cpaCySymPerformOp retry, lacStatus = %d\n",
+			__FUNCTION__, lacStatus);
+		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+		crp->crp_etype = ERESTART;
+		goto err;
+	}
+	if (CPA_STATUS_SUCCESS != lacStatus) {
+		EPRINTK("%s(): cpaCySymPerformOp failed, lacStatus = %d\n",
+			__FUNCTION__, lacStatus);
+		memset(&(drvOpData->lacOpData), 0, sizeof(CpaCySymOpData));
+		crp->crp_etype = EINVAL;
+		goto err;
+	}
+
+	return 0;		//OCF success status value
+
+      err:
+	if (drvOpData->numBufferListArray > ICP_OCF_DRV_DEFAULT_BUFFLIST_ARRAYS) {
+		icp_kfree(drvOpData->srcBuffer.pBuffers);
+	}
+	icp_ocfDrvFreeMetaData(&(drvOpData->srcBuffer));
+	ICP_CACHE_FREE(drvOpData_zone, drvOpData);
+
+	return crp->crp_etype;
+}
+
+/* Name        : icp_ocfDrvProcessDataSetup
+ *
+ * Description : This function will setup all the cryptographic operation data
+ *               that is required by LAC to execute the operation.
+ */
+static int icp_ocfDrvProcessDataSetup(struct icp_drvOpData *drvOpData,
+				      struct cryptodesc *crp_desc)
+{
+	CpaCyRandGenOpData randGenOpData;
+	CpaFlatBuffer randData;
+
+	drvOpData->lacOpData.packetType = CPA_CY_SYM_PACKET_TYPE_FULL;
+
+	/* Convert from the cryptop to the ICP LAC crypto parameters */
+	switch (crp_desc->crd_alg) {
+	case CRYPTO_NULL_CBC:
+		drvOpData->lacOpData.
+		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToCipherInBytes = crp_desc->crd_len;
+		drvOpData->verifyResult = CPA_FALSE;
+		drvOpData->lacOpData.ivLenInBytes = NULL_BLOCK_LEN;
+		break;
+	case CRYPTO_DES_CBC:
+		drvOpData->lacOpData.
+		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToCipherInBytes = crp_desc->crd_len;
+		drvOpData->verifyResult = CPA_FALSE;
+		drvOpData->lacOpData.ivLenInBytes = DES_BLOCK_LEN;
+		break;
+	case CRYPTO_3DES_CBC:
+		drvOpData->lacOpData.
+		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToCipherInBytes = crp_desc->crd_len;
+		drvOpData->verifyResult = CPA_FALSE;
+		drvOpData->lacOpData.ivLenInBytes = DES3_BLOCK_LEN;
+		break;
+	case CRYPTO_ARC4:
+		drvOpData->lacOpData.
+		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToCipherInBytes = crp_desc->crd_len;
+		drvOpData->verifyResult = CPA_FALSE;
+		drvOpData->lacOpData.ivLenInBytes = ARC4_COUNTER_LEN;
+		break;
+	case CRYPTO_AES_CBC:
+		drvOpData->lacOpData.
+		    cryptoStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToCipherInBytes = crp_desc->crd_len;
+		drvOpData->verifyResult = CPA_FALSE;
+		drvOpData->lacOpData.ivLenInBytes = RIJNDAEL128_BLOCK_LEN;
+		break;
+	case CRYPTO_SHA1:
+	case CRYPTO_SHA1_HMAC:
+	case CRYPTO_SHA2_256:
+	case CRYPTO_SHA2_256_HMAC:
+	case CRYPTO_SHA2_384:
+	case CRYPTO_SHA2_384_HMAC:
+	case CRYPTO_SHA2_512:
+	case CRYPTO_SHA2_512_HMAC:
+	case CRYPTO_MD5:
+	case CRYPTO_MD5_HMAC:
+		drvOpData->lacOpData.
+		    hashStartSrcOffsetInBytes = crp_desc->crd_skip;
+		drvOpData->lacOpData.
+		    messageLenToHashInBytes = crp_desc->crd_len;
+		drvOpData->lacOpData.
+		    pDigestResult =
+		    icp_ocfDrvDigestPointerFind(drvOpData, crp_desc);
+
+		if (NULL == drvOpData->lacOpData.pDigestResult) {
+			DPRINTK("%s(): ERROR - could not calculate "
+				"Digest Result memory address\n", __FUNCTION__);
+			return ICP_OCF_DRV_STATUS_FAIL;
+		}
+
+		drvOpData->lacOpData.digestVerify = CPA_FALSE;
+		break;
+	default:
+		DPRINTK("%s(): Crypto process error - algorithm not "
+			"found \n", __FUNCTION__);
+		return ICP_OCF_DRV_STATUS_FAIL;
+	}
+
+	/* Figure out what the IV is supposed to be */
+	if ((crp_desc->crd_alg == CRYPTO_DES_CBC) ||
+	    (crp_desc->crd_alg == CRYPTO_3DES_CBC) ||
+	    (crp_desc->crd_alg == CRYPTO_AES_CBC)) {
+		/*ARC4 doesn't use an IV */
+		if (crp_desc->crd_flags & CRD_F_IV_EXPLICIT) {
+			/* Explicit IV provided to OCF */
+			drvOpData->lacOpData.pIv = crp_desc->crd_iv;
+		} else {
+			/* IV is not explicitly provided to OCF */
+
+			/* Point the LAC OP Data IV pointer to our allocated
+			   storage location for this session. */
+			drvOpData->lacOpData.pIv = drvOpData->ivData;
+
+			if ((crp_desc->crd_flags & CRD_F_ENCRYPT) &&
+			    ((crp_desc->crd_flags & CRD_F_IV_PRESENT) == 0)) {
+
+				/* Encrypting - need to create IV */
+				randGenOpData.generateBits = CPA_TRUE;
+				randGenOpData.lenInBytes = MAX_IV_LEN_IN_BYTES;
+
+				icp_ocfDrvPtrAndLenToFlatBuffer((Cpa8U *)
+								drvOpData->
+								ivData,
+								MAX_IV_LEN_IN_BYTES,
+								&randData);
+
+				if (CPA_STATUS_SUCCESS !=
+				    cpaCyRandGen(CPA_INSTANCE_HANDLE_SINGLE,
+						 NULL, NULL,
+						 &randGenOpData, &randData)) {
+					DPRINTK("%s(): ERROR - Failed to"
+						" generate"
+						" Initialisation Vector\n",
+						__FUNCTION__);
+					return ICP_OCF_DRV_STATUS_FAIL;
+				}
+
+				crypto_copyback(drvOpData->crp->
+						crp_flags,
+						drvOpData->crp->crp_buf,
+						crp_desc->crd_inject,
+						drvOpData->lacOpData.
+						ivLenInBytes,
+						(caddr_t) (drvOpData->lacOpData.
+							   pIv));
+			} else {
+				/* Reading IV from buffer */
+				crypto_copydata(drvOpData->crp->
+						crp_flags,
+						drvOpData->crp->crp_buf,
+						crp_desc->crd_inject,
+						drvOpData->lacOpData.
+						ivLenInBytes,
+						(caddr_t) (drvOpData->lacOpData.
+							   pIv));
+			}
+
+		}
+
+	}
+
+	return ICP_OCF_DRV_STATUS_SUCCESS;
+}
+
+/* Name        : icp_ocfDrvDigestPointerFind
+ *
+ * Description : This function is used to find the memory address of where the
+ * digest information shall be stored in. Input buffer types are an skbuff, iov
+ * or flat buffer. The address is found using the buffer data start address and
+ * an offset.
+ *
+ * Note: In the case of a linux skbuff, the digest address may exist within
+ * a memory space linked to from the start buffer. These linked memory spaces
+ * must be traversed by the data length offset in order to find the digest start
+ * address. Whether there is enough space for the digest must also be checked.
+ */
+uint8_t *icp_ocfDrvDigestPointerFind(struct icp_drvOpData * drvOpData,
+				     struct cryptodesc * crp_desc)
+{
+
+	int offsetInBytes = crp_desc->crd_inject;
+	uint32_t digestSizeInBytes = drvOpData->digestSizeInBytes;
+	uint8_t *flat_buffer_base = NULL;
+	int flat_buffer_length = 0;
+
+	if (drvOpData->crp->crp_flags & ICP_CRYPTO_F_PACKET_BUF) {
+
+		return icp_ocfDrvPacketBufferDigestPointerFind(drvOpData,
+							       offsetInBytes,
+							       digestSizeInBytes);
+
+	} else {
+		/* IOV or flat buffer */
+		if (drvOpData->crp->crp_flags & CRYPTO_F_IOV) {
+			/*single IOV check has already been done */
+			flat_buffer_base = ((struct uio *)
+					    (drvOpData->crp->crp_buf))->
+			    uio_iov[0].iov_base;
+			flat_buffer_length = ((struct uio *)
+					      (drvOpData->crp->crp_buf))->
+			    uio_iov[0].iov_len;
+		} else {
+			flat_buffer_base = (uint8_t *) drvOpData->crp->crp_buf;
+			flat_buffer_length = drvOpData->crp->crp_ilen;
+		}
+
+		if (flat_buffer_length < (offsetInBytes + digestSizeInBytes)) {
+			DPRINTK("%s() Not enough space for Digest "
+				"(IOV/Flat Buffer) \n", __FUNCTION__);
+			return NULL;
+		} else {
+			return (uint8_t *) (flat_buffer_base + offsetInBytes);
+		}
+	}
+	DPRINTK("%s() Should not reach this point\n", __FUNCTION__);
+	return NULL;
+}
diff --git a/crypto/ocf/ep80579/linux_2.6_kernel_space.mk b/crypto/ocf/ep80579/linux_2.6_kernel_space.mk
new file mode 100644
index 000000000000..78f678728a6f
--- /dev/null
+++ b/crypto/ocf/ep80579/linux_2.6_kernel_space.mk
@@ -0,0 +1,68 @@
+###################
+# @par
+# This file is provided under a dual BSD/GPLv2 license.  When using or
+#   redistributing this file, you may do so under either license.
+#
+#   GPL LICENSE SUMMARY
+#
+#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#
+#   This program is free software; you can redistribute it and/or modify
+#   it under the terms of version 2 of the GNU General Public License as
+#   published by the Free Software Foundation.
+#
+#   This program is distributed in the hope that it will be useful, but
+#   WITHOUT ANY WARRANTY; without even the implied warranty of
+#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#   General Public License for more details.
+#
+#   You should have received a copy of the GNU General Public License
+#   along with this program; if not, write to the Free Software
+#   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#   The full GNU General Public License is included in this distribution
+#   in the file called LICENSE.GPL.
+#
+#   Contact Information:
+#   Intel Corporation
+#
+#   BSD LICENSE
+#
+#   Copyright(c) 2007,2008 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+#
+#  version: Security.L.1.0.130
+###################
+
+#specific include directories in kernel space
+INCLUDES+=#e.g. -I$(OSAL_DIR)/include \
+
+#Extra Flags Specific in kernel space e.g. include path or debug flags etc. e.g to add an include path EXTRA_CFLAGS += -I$(src)/../include
+EXTRA_CFLAGS += $(INCLUDES) -O2 -Wall
+EXTRA_LDFLAGS +=-whole-archive
diff --git a/crypto/ocf/hifn/Makefile b/crypto/ocf/hifn/Makefile
new file mode 100644
index 000000000000..fb559ed52616
--- /dev/null
+++ b/crypto/ocf/hifn/Makefile
@@ -0,0 +1,12 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_HIFN)     += hifn7751.o
+obj-$(CONFIG_OCF_HIFNHIPP) += hifnHIPP.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/hifn/hifn7751.c b/crypto/ocf/hifn/hifn7751.c
new file mode 100644
index 000000000000..9dac0db3fba7
--- /dev/null
+++ b/crypto/ocf/hifn/hifn7751.c
@@ -0,0 +1,2954 @@
+/*	$OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $	*/
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ *			http://www.netsec.net
+ * Copyright (c) 2003 Hifn Inc.
+ *
+ * This driver is based on a previous driver by Invertex, for which they
+ * requested:  Please send any comments, feedback, bug-fixes, or feature
+ * requests to software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ *
+__FBSDID("$FreeBSD: src/sys/dev/hifn/hifn7751.c,v 1.40 2007/03/21 03:42:49 sam Exp $");
+ */
+
+/*
+ * Driver for various Hifn encryption processors.
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+#include <hifn/hifn7751reg.h>
+#include <hifn/hifn7751var.h>
+
+#if 1
+#define	DPRINTF(a...)	if (hifn_debug) { \
+							printk("%s: ", sc ? \
+								device_get_nameunit(sc->sc_dev) : "hifn"); \
+							printk(a); \
+						} else
+#else
+#define	DPRINTF(a...)
+#endif
+
+static inline int
+pci_get_revid(struct pci_dev *dev)
+{
+	u8 rid = 0;
+	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
+	return rid;
+}
+
+static	struct hifn_stats hifnstats;
+
+#define	debug hifn_debug
+int hifn_debug = 0;
+module_param(hifn_debug, int, 0644);
+MODULE_PARM_DESC(hifn_debug, "Enable debug");
+
+int hifn_maxbatch = 1;
+module_param(hifn_maxbatch, int, 0644);
+MODULE_PARM_DESC(hifn_maxbatch, "max ops to batch w/o interrupt");
+
+int hifn_cache_linesize = 0x10;
+module_param(hifn_cache_linesize, int, 0444);
+MODULE_PARM_DESC(hifn_cache_linesize, "PCI config cache line size");
+
+#ifdef MODULE_PARM
+char *hifn_pllconfig = NULL;
+MODULE_PARM(hifn_pllconfig, "s");
+#else
+char hifn_pllconfig[32]; /* This setting is RO after loading */
+module_param_string(hifn_pllconfig, hifn_pllconfig, 32, 0444);
+#endif
+MODULE_PARM_DESC(hifn_pllconfig, "PLL config, ie., pci66, ext33, ...");
+
+#ifdef HIFN_VULCANDEV
+#include <sys/conf.h>
+#include <sys/uio.h>
+
+static struct cdevsw vulcanpk_cdevsw; /* forward declaration */
+#endif
+
+/*
+ * Prototypes and count for the pci_device structure
+ */
+static	int  hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static	void hifn_remove(struct pci_dev *dev);
+
+static	int hifn_newsession(device_t, u_int32_t *, struct cryptoini *);
+static	int hifn_freesession(device_t, u_int64_t);
+static	int hifn_process(device_t, struct cryptop *, int);
+
+static device_method_t hifn_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	hifn_newsession),
+	DEVMETHOD(cryptodev_freesession,hifn_freesession),
+	DEVMETHOD(cryptodev_process,	hifn_process),
+};
+
+static	void hifn_reset_board(struct hifn_softc *, int);
+static	void hifn_reset_puc(struct hifn_softc *);
+static	void hifn_puc_wait(struct hifn_softc *);
+static	int hifn_enable_crypto(struct hifn_softc *);
+static	void hifn_set_retry(struct hifn_softc *sc);
+static	void hifn_init_dma(struct hifn_softc *);
+static	void hifn_init_pci_registers(struct hifn_softc *);
+static	int hifn_sramsize(struct hifn_softc *);
+static	int hifn_dramsize(struct hifn_softc *);
+static	int hifn_ramtype(struct hifn_softc *);
+static	void hifn_sessions(struct hifn_softc *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hifn_intr(int irq, void *arg);
+#else
+static irqreturn_t hifn_intr(int irq, void *arg, struct pt_regs *regs);
+#endif
+static	u_int hifn_write_command(struct hifn_command *, u_int8_t *);
+static	u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
+static	void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
+static	int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
+static	int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
+static	int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
+static	int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
+static	int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
+static	int hifn_init_pubrng(struct hifn_softc *);
+static	void hifn_tick(unsigned long arg);
+static	void hifn_abort(struct hifn_softc *);
+static	void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
+
+static	void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
+static	void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static	int hifn_read_random(void *arg, u_int32_t *buf, int len);
+#endif
+
+#define HIFN_MAX_CHIPS	8
+static struct hifn_softc *hifn_chip_idx[HIFN_MAX_CHIPS];
+
+static __inline u_int32_t
+READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
+{
+	u_int32_t v = readl(sc->sc_bar0 + reg);
+	sc->sc_bar0_lastreg = (bus_size_t) -1;
+	return (v);
+}
+#define	WRITE_REG_0(sc, reg, val)	hifn_write_reg_0(sc, reg, val)
+
+static __inline u_int32_t
+READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
+{
+	u_int32_t v = readl(sc->sc_bar1 + reg);
+	sc->sc_bar1_lastreg = (bus_size_t) -1;
+	return (v);
+}
+#define	WRITE_REG_1(sc, reg, val)	hifn_write_reg_1(sc, reg, val)
+
+/*
+ * map in a given buffer (great on some arches :-)
+ */
+
+static int
+pci_map_uio(struct hifn_softc *sc, struct hifn_operand *buf, struct uio *uio)
+{
+	struct iovec *iov = uio->uio_iov;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	buf->mapsize = 0;
+	for (buf->nsegs = 0; buf->nsegs < uio->uio_iovcnt; ) {
+		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
+				iov->iov_base, iov->iov_len,
+				PCI_DMA_BIDIRECTIONAL);
+		buf->segs[buf->nsegs].ds_len = iov->iov_len;
+		buf->mapsize += iov->iov_len;
+		iov++;
+		buf->nsegs++;
+	}
+	/* identify this buffer by the first segment */
+	buf->map = (void *) buf->segs[0].ds_addr;
+	return(0);
+}
+
+/*
+ * map in a given sk_buff
+ */
+
+static int
+pci_map_skb(struct hifn_softc *sc,struct hifn_operand *buf,struct sk_buff *skb)
+{
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	buf->mapsize = 0;
+
+	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
+			skb->data, skb_headlen(skb), PCI_DMA_BIDIRECTIONAL);
+	buf->segs[0].ds_len = skb_headlen(skb);
+	buf->mapsize += buf->segs[0].ds_len;
+
+	buf->nsegs = 1;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ) {
+		buf->segs[buf->nsegs].ds_len = skb_shinfo(skb)->frags[i].size;
+		buf->segs[buf->nsegs].ds_addr = pci_map_single(sc->sc_pcidev,
+				page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+					skb_shinfo(skb)->frags[i].page_offset,
+				buf->segs[buf->nsegs].ds_len, PCI_DMA_BIDIRECTIONAL);
+		buf->mapsize += buf->segs[buf->nsegs].ds_len;
+		buf->nsegs++;
+	}
+
+	/* identify this buffer by the first segment */
+	buf->map = (void *) buf->segs[0].ds_addr;
+	return(0);
+}
+
+/*
+ * map in a given contiguous buffer
+ */
+
+static int
+pci_map_buf(struct hifn_softc *sc,struct hifn_operand *buf, void *b, int len)
+{
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	buf->mapsize = 0;
+	buf->segs[0].ds_addr = pci_map_single(sc->sc_pcidev,
+			b, len, PCI_DMA_BIDIRECTIONAL);
+	buf->segs[0].ds_len = len;
+	buf->mapsize += buf->segs[0].ds_len;
+	buf->nsegs = 1;
+
+	/* identify this buffer by the first segment */
+	buf->map = (void *) buf->segs[0].ds_addr;
+	return(0);
+}
+
+#if 0 /* not needed at this time */
+static void
+pci_sync_iov(struct hifn_softc *sc, struct hifn_operand *buf)
+{
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	for (i = 0; i < buf->nsegs; i++)
+		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
+				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+}
+#endif
+
+static void
+pci_unmap_buf(struct hifn_softc *sc, struct hifn_operand *buf)
+{
+	int i;
+	DPRINTF("%s()\n", __FUNCTION__);
+	for (i = 0; i < buf->nsegs; i++) {
+		pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
+				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+		buf->segs[i].ds_addr = 0;
+		buf->segs[i].ds_len = 0;
+	}
+	buf->nsegs = 0;
+	buf->mapsize = 0;
+	buf->map = 0;
+}
+
+static const char*
+hifn_partname(struct hifn_softc *sc)
+{
+	/* XXX sprintf numbers when not decoded */
+	switch (pci_get_vendor(sc->sc_pcidev)) {
+	case PCI_VENDOR_HIFN:
+		switch (pci_get_device(sc->sc_pcidev)) {
+		case PCI_PRODUCT_HIFN_6500:	return "Hifn 6500";
+		case PCI_PRODUCT_HIFN_7751:	return "Hifn 7751";
+		case PCI_PRODUCT_HIFN_7811:	return "Hifn 7811";
+		case PCI_PRODUCT_HIFN_7951:	return "Hifn 7951";
+		case PCI_PRODUCT_HIFN_7955:	return "Hifn 7955";
+		case PCI_PRODUCT_HIFN_7956:	return "Hifn 7956";
+		}
+		return "Hifn unknown-part";
+	case PCI_VENDOR_INVERTEX:
+		switch (pci_get_device(sc->sc_pcidev)) {
+		case PCI_PRODUCT_INVERTEX_AEON:	return "Invertex AEON";
+		}
+		return "Invertex unknown-part";
+	case PCI_VENDOR_NETSEC:
+		switch (pci_get_device(sc->sc_pcidev)) {
+		case PCI_PRODUCT_NETSEC_7751:	return "NetSec 7751";
+		}
+		return "NetSec unknown-part";
+	}
+	return "Unknown-vendor unknown-part";
+}
+
+static u_int
+checkmaxmin(struct pci_dev *dev, const char *what, u_int v, u_int min, u_int max)
+{
+	struct hifn_softc *sc = pci_get_drvdata(dev);
+	if (v > max) {
+		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
+			"using max %u\n", what, v, max);
+		v = max;
+	} else if (v < min) {
+		device_printf(sc->sc_dev, "Warning, %s %u out of range, "
+			"using min %u\n", what, v, min);
+		v = min;
+	}
+	return v;
+}
+
+/*
+ * Select PLL configuration for 795x parts.  This is complicated in
+ * that we cannot determine the optimal parameters without user input.
+ * The reference clock is derived from an external clock through a
+ * multiplier.  The external clock is either the host bus (i.e. PCI)
+ * or an external clock generator.  When using the PCI bus we assume
+ * the clock is either 33 or 66 MHz; for an external source we cannot
+ * tell the speed.
+ *
+ * PLL configuration is done with a string: "pci" for PCI bus, or "ext"
+ * for an external source, followed by the frequency.  We calculate
+ * the appropriate multiplier and PLL register contents accordingly.
+ * When no configuration is given we default to "pci66" since that
+ * always will allow the card to work.  If a card is using the PCI
+ * bus clock and in a 33MHz slot then it will be operating at half
+ * speed until the correct information is provided.
+ *
+ * We use a default setting of "ext66" because according to Mike Ham
+ * of HiFn, almost every board in existence has an external crystal
+ * populated at 66Mhz. Using PCI can be a problem on modern motherboards,
+ * because PCI33 can have clocks from 0 to 33Mhz, and some have
+ * non-PCI-compliant spread-spectrum clocks, which can confuse the pll.
+ */
+static void
+hifn_getpllconfig(struct pci_dev *dev, u_int *pll)
+{
+	const char *pllspec = hifn_pllconfig;
+	u_int freq, mul, fl, fh;
+	u_int32_t pllconfig;
+	char *nxt;
+
+	if (pllspec == NULL)
+		pllspec = "ext66";
+	fl = 33, fh = 66;
+	pllconfig = 0;
+	if (strncmp(pllspec, "ext", 3) == 0) {
+		pllspec += 3;
+		pllconfig |= HIFN_PLL_REF_SEL;
+		switch (pci_get_device(dev)) {
+		case PCI_PRODUCT_HIFN_7955:
+		case PCI_PRODUCT_HIFN_7956:
+			fl = 20, fh = 100;
+			break;
+#ifdef notyet
+		case PCI_PRODUCT_HIFN_7954:
+			fl = 20, fh = 66;
+			break;
+#endif
+		}
+	} else if (strncmp(pllspec, "pci", 3) == 0)
+		pllspec += 3;
+	freq = strtoul(pllspec, &nxt, 10);
+	if (nxt == pllspec)
+		freq = 66;
+	else
+		freq = checkmaxmin(dev, "frequency", freq, fl, fh);
+	/*
+	 * Calculate multiplier.  We target a Fck of 266 MHz,
+	 * allowing only even values, possibly rounded down.
+	 * Multipliers > 8 must set the charge pump current.
+	 */
+	mul = checkmaxmin(dev, "PLL divisor", (266 / freq) &~ 1, 2, 12);
+	pllconfig |= (mul / 2 - 1) << HIFN_PLL_ND_SHIFT;
+	if (mul > 8)
+		pllconfig |= HIFN_PLL_IS;
+	*pll = pllconfig;
+}
+
+/*
+ * Attach an interface that successfully probed.
+ */
+static int
+hifn_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	struct hifn_softc *sc = NULL;
+	char rbase;
+	u_int16_t ena, rev;
+	int rseg, rc;
+	unsigned long mem_start, mem_len;
+	static int num_chips = 0;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (pci_enable_device(dev) < 0)
+		return(-ENODEV);
+
+	if (pci_set_mwi(dev))
+		return(-ENODEV);
+
+	if (!dev->irq) {
+		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
+		pci_disable_device(dev);
+		return(-ENODEV);
+	}
+
+	sc = (struct hifn_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+	if (!sc)
+		return(-ENOMEM);
+	memset(sc, 0, sizeof(*sc));
+
+	softc_device_init(sc, "hifn", num_chips, hifn_methods);
+
+	sc->sc_pcidev = dev;
+	sc->sc_irq = -1;
+	sc->sc_cid = -1;
+	sc->sc_num = num_chips++;
+	if (sc->sc_num < HIFN_MAX_CHIPS)
+		hifn_chip_idx[sc->sc_num] = sc;
+
+	pci_set_drvdata(sc->sc_pcidev, sc);
+
+	spin_lock_init(&sc->sc_mtx);
+
+	/* XXX handle power management */
+
+	/*
+	 * The 7951 and 795x have a random number generator and
+	 * public key support; note this.
+	 */
+	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
+	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
+	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
+		sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
+	/*
+	 * The 7811 has a random number generator and
+	 * we also note it's identity 'cuz of some quirks.
+	 */
+	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+	    pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
+		sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
+
+	/*
+	 * The 795x parts support AES.
+	 */
+	if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
+	    (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
+	     pci_get_device(dev) == PCI_PRODUCT_HIFN_7956)) {
+		sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
+		/*
+		 * Select PLL configuration.  This depends on the
+		 * bus and board design and must be manually configured
+		 * if the default setting is unacceptable.
+		 */
+		hifn_getpllconfig(dev, &sc->sc_pllconfig);
+	}
+
+	/*
+	 * Setup PCI resources. Note that we record the bus
+	 * tag and handle for each register mapping, this is
+	 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
+	 * and WRITE_REG_1 macros throughout the driver.
+	 */
+	mem_start = pci_resource_start(sc->sc_pcidev, 0);
+	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
+	sc->sc_bar0 = (ocf_iomem_t) ioremap(mem_start, mem_len);
+	if (!sc->sc_bar0) {
+		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 0);
+		goto fail;
+	}
+	sc->sc_bar0_lastreg = (bus_size_t) -1;
+
+	mem_start = pci_resource_start(sc->sc_pcidev, 1);
+	mem_len   = pci_resource_len(sc->sc_pcidev, 1);
+	sc->sc_bar1 = (ocf_iomem_t) ioremap(mem_start, mem_len);
+	if (!sc->sc_bar1) {
+		device_printf(sc->sc_dev, "cannot map bar%d register space\n", 1);
+		goto fail;
+	}
+	sc->sc_bar1_lastreg = (bus_size_t) -1;
+
+	/* fix up the bus size */
+	if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
+		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
+		goto fail;
+	}
+	if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
+		device_printf(sc->sc_dev,
+				"No usable consistent DMA configuration, aborting.\n");
+		goto fail;
+	}
+
+	hifn_set_retry(sc);
+
+	/*
+	 * Setup the area where the Hifn DMA's descriptors
+	 * and associated data structures.
+	 */
+	sc->sc_dma = (struct hifn_dma *) pci_alloc_consistent(dev,
+			sizeof(*sc->sc_dma),
+			&sc->sc_dma_physaddr);
+	if (!sc->sc_dma) {
+		device_printf(sc->sc_dev, "cannot alloc sc_dma\n");
+		goto fail;
+	}
+	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+	/*
+	 * Reset the board and do the ``secret handshake''
+	 * to enable the crypto support.  Then complete the
+	 * initialization procedure by setting up the interrupt
+	 * and hooking in to the system crypto support so we'll
+	 * get used for system services like the crypto device,
+	 * IPsec, RNG device, etc.
+	 */
+	hifn_reset_board(sc, 0);
+
+	if (hifn_enable_crypto(sc) != 0) {
+		device_printf(sc->sc_dev, "crypto enabling failed\n");
+		goto fail;
+	}
+	hifn_reset_puc(sc);
+
+	hifn_init_dma(sc);
+	hifn_init_pci_registers(sc);
+
+	pci_set_master(sc->sc_pcidev);
+
+	/* XXX can't dynamically determine ram type for 795x; force dram */
+	if (sc->sc_flags & HIFN_IS_7956)
+		sc->sc_drammodel = 1;
+	else if (hifn_ramtype(sc))
+		goto fail;
+
+	if (sc->sc_drammodel == 0)
+		hifn_sramsize(sc);
+	else
+		hifn_dramsize(sc);
+
+	/*
+	 * Workaround for NetSec 7751 rev A: half ram size because two
+	 * of the address lines were left floating
+	 */
+	if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
+	    pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
+	    pci_get_revid(dev) == 0x61)	/*XXX???*/
+		sc->sc_ramsize >>= 1;
+
+	/*
+	 * Arrange the interrupt line.
+	 */
+	rc = request_irq(dev->irq, hifn_intr, IRQF_SHARED, "hifn", sc);
+	if (rc) {
+		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
+		goto fail;
+	}
+	sc->sc_irq = dev->irq;
+
+	hifn_sessions(sc);
+
+	/*
+	 * NB: Keep only the low 16 bits; this masks the chip id
+	 *     from the 7951.
+	 */
+	rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
+
+	rseg = sc->sc_ramsize / 1024;
+	rbase = 'K';
+	if (sc->sc_ramsize >= (1024 * 1024)) {
+		rbase = 'M';
+		rseg /= 1024;
+	}
+	device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram",
+		hifn_partname(sc), rev,
+		rseg, rbase, sc->sc_drammodel ? 'd' : 's');
+	if (sc->sc_flags & HIFN_IS_7956)
+		printf(", pll=0x%x<%s clk, %ux mult>",
+			sc->sc_pllconfig,
+			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
+			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
+	printf("\n");
+
+	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+	if (sc->sc_cid < 0) {
+		device_printf(sc->sc_dev, "could not get crypto driver id\n");
+		goto fail;
+	}
+
+	WRITE_REG_0(sc, HIFN_0_PUCNFG,
+	    READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
+	ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+	switch (ena) {
+	case HIFN_PUSTAT_ENA_2:
+		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
+		if (sc->sc_flags & HIFN_HAS_AES)
+			crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+		/*FALLTHROUGH*/
+	case HIFN_PUSTAT_ENA_1:
+		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+		break;
+	}
+
+	if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
+		hifn_init_pubrng(sc);
+
+	init_timer(&sc->sc_tickto);
+	sc->sc_tickto.function = hifn_tick;
+	sc->sc_tickto.data = (unsigned long) sc->sc_num;
+	mod_timer(&sc->sc_tickto, jiffies + HZ);
+
+	return (0);
+
+fail:
+    if (sc->sc_cid >= 0)
+        crypto_unregister_all(sc->sc_cid);
+    if (sc->sc_irq != -1)
+        free_irq(sc->sc_irq, sc);
+    if (sc->sc_dma) {
+		/* Turn off DMA polling */
+		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+			HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+        pci_free_consistent(sc->sc_pcidev,
+				sizeof(*sc->sc_dma),
+                sc->sc_dma, sc->sc_dma_physaddr);
+	}
+    kfree(sc);
+	return (-ENXIO);
+}
+
+/*
+ * Detach an interface that successfully probed.
+ */
+static void
+hifn_remove(struct pci_dev *dev)
+{
+	struct hifn_softc *sc = pci_get_drvdata(dev);
+	unsigned long l_flags;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
+
+	/* disable interrupts */
+	HIFN_LOCK(sc);
+	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
+	HIFN_UNLOCK(sc);
+
+	/*XXX other resources */
+	del_timer_sync(&sc->sc_tickto);
+
+	/* Turn off DMA polling */
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+	crypto_unregister_all(sc->sc_cid);
+
+	free_irq(sc->sc_irq, sc);
+
+	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
+                sc->sc_dma, sc->sc_dma_physaddr);
+}
+
+
+static int
+hifn_init_pubrng(struct hifn_softc *sc)
+{
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if ((sc->sc_flags & HIFN_IS_7811) == 0) {
+		/* Reset 7951 public key/rng engine */
+		WRITE_REG_1(sc, HIFN_1_PUB_RESET,
+		    READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
+
+		for (i = 0; i < 100; i++) {
+			DELAY(1000);
+			if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
+			    HIFN_PUBRST_RESET) == 0)
+				break;
+		}
+
+		if (i == 100) {
+			device_printf(sc->sc_dev, "public key init failed\n");
+			return (1);
+		}
+	}
+
+	/* Enable the rng, if available */
+#ifdef CONFIG_OCF_RANDOMHARVEST
+	if (sc->sc_flags & HIFN_HAS_RNG) {
+		if (sc->sc_flags & HIFN_IS_7811) {
+			u_int32_t r;
+			r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
+			if (r & HIFN_7811_RNGENA_ENA) {
+				r &= ~HIFN_7811_RNGENA_ENA;
+				WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
+			}
+			WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
+			    HIFN_7811_RNGCFG_DEFL);
+			r |= HIFN_7811_RNGENA_ENA;
+			WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
+		} else
+			WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
+			    READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
+			    HIFN_RNGCFG_ENA);
+
+		sc->sc_rngfirst = 1;
+		crypto_rregister(sc->sc_cid, hifn_read_random, sc);
+	}
+#endif
+
+	/* Enable public key engine, if available */
+	if (sc->sc_flags & HIFN_HAS_PUBLIC) {
+		WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
+		sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
+		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+#ifdef HIFN_VULCANDEV
+		sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
+					UID_ROOT, GID_WHEEL, 0666,
+					"vulcanpk");
+		sc->sc_pkdev->si_drv1 = sc;
+#endif
+	}
+
+	return (0);
+}
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static int
+hifn_read_random(void *arg, u_int32_t *buf, int len)
+{
+	struct hifn_softc *sc = (struct hifn_softc *) arg;
+	u_int32_t sts;
+	int i, rc = 0;
+
+	if (len <= 0)
+		return rc;
+
+	if (sc->sc_flags & HIFN_IS_7811) {
+		/* ONLY VALID ON 7811!!!! */
+		for (i = 0; i < 5; i++) {
+			sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
+			if (sts & HIFN_7811_RNGSTS_UFL) {
+				device_printf(sc->sc_dev,
+					      "RNG underflow: disabling\n");
+				/* DAVIDM perhaps return -1 */
+				break;
+			}
+			if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
+				break;
+
+			/*
+			 * There are at least two words in the RNG FIFO
+			 * at this point.
+			 */
+			if (rc < len)
+				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
+			if (rc < len)
+				buf[rc++] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
+		}
+	} else
+		buf[rc++] = READ_REG_1(sc, HIFN_1_RNG_DATA);
+
+	/* NB: discard first data read */
+	if (sc->sc_rngfirst) {
+		sc->sc_rngfirst = 0;
+		rc = 0;
+	}
+
+	return(rc);
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+static void
+hifn_puc_wait(struct hifn_softc *sc)
+{
+	int i;
+	int reg = HIFN_0_PUCTRL;
+
+	if (sc->sc_flags & HIFN_IS_7956) {
+		reg = HIFN_0_PUCTRL2;
+	}
+
+	for (i = 5000; i > 0; i--) {
+		DELAY(1);
+		if (!(READ_REG_0(sc, reg) & HIFN_PUCTRL_RESET))
+			break;
+	}
+	if (!i)
+		device_printf(sc->sc_dev, "proc unit did not reset(0x%x)\n",
+				READ_REG_0(sc, HIFN_0_PUCTRL));
+}
+
+/*
+ * Reset the processing unit.
+ */
+static void
+hifn_reset_puc(struct hifn_softc *sc)
+{
+	/* Reset processing unit */
+	int reg = HIFN_0_PUCTRL;
+
+	if (sc->sc_flags & HIFN_IS_7956) {
+		reg = HIFN_0_PUCTRL2;
+	}
+	WRITE_REG_0(sc, reg, HIFN_PUCTRL_DMAENA);
+
+	hifn_puc_wait(sc);
+}
+
+/*
+ * Set the Retry and TRDY registers; note that we set them to
+ * zero because the 7811 locks up when forced to retry (section
+ * 3.6 of "Specification Update SU-0014-04".  Not clear if we
+ * should do this for all Hifn parts, but it doesn't seem to hurt.
+ */
+static void
+hifn_set_retry(struct hifn_softc *sc)
+{
+	DPRINTF("%s()\n", __FUNCTION__);
+	/* NB: RETRY only responds to 8-bit reads/writes */
+	pci_write_config_byte(sc->sc_pcidev, HIFN_RETRY_TIMEOUT, 0);
+	pci_write_config_byte(sc->sc_pcidev, HIFN_TRDY_TIMEOUT, 0);
+	/* piggy back the cache line setting here */
+	pci_write_config_byte(sc->sc_pcidev, PCI_CACHE_LINE_SIZE, hifn_cache_linesize);
+}
+
+/*
+ * Resets the board.  Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+hifn_reset_board(struct hifn_softc *sc, int full)
+{
+	u_int32_t reg;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	/*
+	 * Set polling in the DMA configuration register to zero.  0x7 avoids
+	 * resetting the board and zeros out the other fields.
+	 */
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+	/*
+	 * Now that polling has been disabled, we have to wait 1 ms
+	 * before resetting the board.
+	 */
+	DELAY(1000);
+
+	/* Reset the DMA unit */
+	if (full) {
+		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
+		DELAY(1000);
+	} else {
+		WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
+		    HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
+		hifn_reset_puc(sc);
+	}
+
+	KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
+	bzero(sc->sc_dma, sizeof(*sc->sc_dma));
+
+	/* Bring dma unit out of reset */
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+	hifn_puc_wait(sc);
+	hifn_set_retry(sc);
+
+	if (sc->sc_flags & HIFN_IS_7811) {
+		for (reg = 0; reg < 1000; reg++) {
+			if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
+			    HIFN_MIPSRST_CRAMINIT)
+				break;
+			DELAY(1000);
+		}
+		if (reg == 1000)
+			device_printf(sc->sc_dev, ": cram init timeout\n");
+	} else {
+	  /* set up DMA configuration register #2 */
+	  /* turn off all PK and BAR0 swaps */
+	  WRITE_REG_1(sc, HIFN_1_DMA_CNFG2,
+		      (3 << HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT)|
+		      (3 << HIFN_DMACNFG2_INIT_READ_BURST_SHIFT)|
+		      (2 << HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT)|
+		      (2 << HIFN_DMACNFG2_TGT_READ_BURST_SHIFT));
+	}
+}
+
+static u_int32_t
+hifn_next_signature(u_int32_t a, u_int cnt)
+{
+	int i;
+	u_int32_t v;
+
+	for (i = 0; i < cnt; i++) {
+
+		/* get the parity */
+		v = a & 0x80080125;
+		v ^= v >> 16;
+		v ^= v >> 8;
+		v ^= v >> 4;
+		v ^= v >> 2;
+		v ^= v >> 1;
+
+		a = (v & 1) ^ (a << 1);
+	}
+
+	return a;
+}
+
+
+/*
+ * Checks to see if crypto is already enabled.  If crypto isn't enable,
+ * "hifn_enable_crypto" is called to enable it.  The check is important,
+ * as enabling crypto twice will lock the board.
+ */
+static int
+hifn_enable_crypto(struct hifn_softc *sc)
+{
+	u_int32_t dmacfg, ramcfg, encl, addr, i;
+	char offtbl[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+					  0x00, 0x00, 0x00, 0x00 };
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
+	dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
+
+	/*
+	 * The RAM config register's encrypt level bit needs to be set before
+	 * every read performed on the encryption level register.
+	 */
+	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
+
+	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+	/*
+	 * Make sure we don't re-unlock.  Two unlocks kills chip until the
+	 * next reboot.
+	 */
+	if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
+#ifdef HIFN_DEBUG
+		if (hifn_debug)
+			device_printf(sc->sc_dev,
+			    "Strong crypto already enabled!\n");
+#endif
+		goto report;
+	}
+
+	if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
+#ifdef HIFN_DEBUG
+		if (hifn_debug)
+			device_printf(sc->sc_dev,
+			      "Unknown encryption level 0x%x\n", encl);
+#endif
+		return 1;
+	}
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
+	    HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+	DELAY(1000);
+	addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
+	DELAY(1000);
+	WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
+	DELAY(1000);
+
+	for (i = 0; i <= 12; i++) {
+		addr = hifn_next_signature(addr, offtbl[i] + 0x101);
+		WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
+
+		DELAY(1000);
+	}
+
+	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
+	encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+
+#ifdef HIFN_DEBUG
+	if (hifn_debug) {
+		if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
+			device_printf(sc->sc_dev, "Engine is permanently "
+				"locked until next system reset!\n");
+		else
+			device_printf(sc->sc_dev, "Engine enabled "
+				"successfully!\n");
+	}
+#endif
+
+report:
+	WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
+
+	switch (encl) {
+	case HIFN_PUSTAT_ENA_1:
+	case HIFN_PUSTAT_ENA_2:
+		break;
+	case HIFN_PUSTAT_ENA_0:
+	default:
+		device_printf(sc->sc_dev, "disabled\n");
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * Give initial values to the registers listed in the "Register Space"
+ * section of the HIFN Software Development reference manual.
+ */
+static void
+hifn_init_pci_registers(struct hifn_softc *sc)
+{
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/* write fixed values needed by the Initialization registers */
+	WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
+	WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
+	WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
+
+	/* write all 4 ring address registers */
+	WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, cmdr[0]));
+	WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, srcr[0]));
+	WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, dstr[0]));
+	WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, resr[0]));
+
+	DELAY(2000);
+
+	/* write status register */
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
+	    HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
+	    HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
+	    HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
+	    HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
+	    HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
+	    HIFN_DMACSR_S_WAIT |
+	    HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
+	    HIFN_DMACSR_C_WAIT |
+	    HIFN_DMACSR_ENGINE |
+	    ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
+		HIFN_DMACSR_PUBDONE : 0) |
+	    ((sc->sc_flags & HIFN_IS_7811) ?
+		HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
+
+	sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
+	sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
+	    HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
+	    HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
+	    ((sc->sc_flags & HIFN_IS_7811) ?
+		HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
+	sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
+	WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+
+
+	if (sc->sc_flags & HIFN_IS_7956) {
+		u_int32_t pll;
+
+		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
+		    HIFN_PUCNFG_TCALLPHASES |
+		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
+
+		/* turn off the clocks and insure bypass is set */
+		pll = READ_REG_1(sc, HIFN_1_PLL);
+		pll = (pll &~ (HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL))
+		  | HIFN_PLL_BP | HIFN_PLL_MBSET;
+		WRITE_REG_1(sc, HIFN_1_PLL, pll);
+		DELAY(10*1000);		/* 10ms */
+
+		/* change configuration */
+		pll = (pll &~ HIFN_PLL_CONFIG) | sc->sc_pllconfig;
+		WRITE_REG_1(sc, HIFN_1_PLL, pll);
+		DELAY(10*1000);		/* 10ms */
+
+		/* disable bypass */
+		pll &= ~HIFN_PLL_BP;
+		WRITE_REG_1(sc, HIFN_1_PLL, pll);
+		/* enable clocks with new configuration */
+		pll |= HIFN_PLL_PK_CLK_SEL | HIFN_PLL_PE_CLK_SEL;
+		WRITE_REG_1(sc, HIFN_1_PLL, pll);
+	} else {
+		WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
+		    HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
+		    HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
+		    (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
+	}
+
+	WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
+	    ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
+	    ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
+}
+
+/*
+ * The maximum number of sessions supported by the card
+ * is dependent on the amount of context ram, which
+ * encryption algorithms are enabled, and how compression
+ * is configured.  This should be configured before this
+ * routine is called.
+ */
+static void
+hifn_sessions(struct hifn_softc *sc)
+{
+	u_int32_t pucnfg;
+	int ctxsize;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
+
+	if (pucnfg & HIFN_PUCNFG_COMPSING) {
+		if (pucnfg & HIFN_PUCNFG_ENCCNFG)
+			ctxsize = 128;
+		else
+			ctxsize = 512;
+		/*
+		 * 7955/7956 has internal context memory of 32K
+		 */
+		if (sc->sc_flags & HIFN_IS_7956)
+			sc->sc_maxses = 32768 / ctxsize;
+		else
+			sc->sc_maxses = 1 +
+			    ((sc->sc_ramsize - 32768) / ctxsize);
+	} else
+		sc->sc_maxses = sc->sc_ramsize / 16384;
+
+	if (sc->sc_maxses > 2048)
+		sc->sc_maxses = 2048;
+}
+
+/*
+ * Determine ram type (sram or dram).  Board should be just out of a reset
+ * state when this is called.
+ */
+static int
+hifn_ramtype(struct hifn_softc *sc)
+{
+	u_int8_t data[8], dataexpect[8];
+	int i;
+
+	for (i = 0; i < sizeof(data); i++)
+		data[i] = dataexpect[i] = 0x55;
+	if (hifn_writeramaddr(sc, 0, data))
+		return (-1);
+	if (hifn_readramaddr(sc, 0, data))
+		return (-1);
+	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
+		sc->sc_drammodel = 1;
+		return (0);
+	}
+
+	for (i = 0; i < sizeof(data); i++)
+		data[i] = dataexpect[i] = 0xaa;
+	if (hifn_writeramaddr(sc, 0, data))
+		return (-1);
+	if (hifn_readramaddr(sc, 0, data))
+		return (-1);
+	if (bcmp(data, dataexpect, sizeof(data)) != 0) {
+		sc->sc_drammodel = 1;
+		return (0);
+	}
+
+	return (0);
+}
+
+#define	HIFN_SRAM_MAX		(32 << 20)
+#define	HIFN_SRAM_STEP_SIZE	16384
+#define	HIFN_SRAM_GRANULARITY	(HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
+
+static int
+hifn_sramsize(struct hifn_softc *sc)
+{
+	u_int32_t a;
+	u_int8_t data[8];
+	u_int8_t dataexpect[sizeof(data)];
+	int32_t i;
+
+	for (i = 0; i < sizeof(data); i++)
+		data[i] = dataexpect[i] = i ^ 0x5a;
+
+	for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
+		a = i * HIFN_SRAM_STEP_SIZE;
+		bcopy(&i, data, sizeof(i));
+		hifn_writeramaddr(sc, a, data);
+	}
+
+	for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
+		a = i * HIFN_SRAM_STEP_SIZE;
+		bcopy(&i, dataexpect, sizeof(i));
+		if (hifn_readramaddr(sc, a, data) < 0)
+			return (0);
+		if (bcmp(data, dataexpect, sizeof(data)) != 0)
+			return (0);
+		sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
+	}
+
+	return (0);
+}
+
+/*
+ * XXX For dram boards, one should really try all of the
+ * HIFN_PUCNFG_DSZ_*'s.  This just assumes that PUCNFG
+ * is already set up correctly.
+ */
+static int
+hifn_dramsize(struct hifn_softc *sc)
+{
+	u_int32_t cnfg;
+
+	if (sc->sc_flags & HIFN_IS_7956) {
+		/*
+		 * 7955/7956 have a fixed internal ram of only 32K.
+		 */
+		sc->sc_ramsize = 32768;
+	} else {
+		cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
+		    HIFN_PUCNFG_DRAMMASK;
+		sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
+	}
+	return (0);
+}
+
+static void
+hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
+		dma->cmdi = 0;
+		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	}
+	*cmdp = dma->cmdi++;
+	dma->cmdk = dma->cmdi;
+
+	if (dma->srci == HIFN_D_SRC_RSIZE) {
+		dma->srci = 0;
+		dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->srcr[HIFN_D_SRC_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	}
+	*srcp = dma->srci++;
+	dma->srck = dma->srci;
+
+	if (dma->dsti == HIFN_D_DST_RSIZE) {
+		dma->dsti = 0;
+		dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->dstr[HIFN_D_DST_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	}
+	*dstp = dma->dsti++;
+	dma->dstk = dma->dsti;
+
+	if (dma->resi == HIFN_D_RES_RSIZE) {
+		dma->resi = 0;
+		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	}
+	*resp = dma->resi++;
+	dma->resk = dma->resi;
+}
+
+static int
+hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	hifn_base_command_t wc;
+	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
+	int r, cmdi, resi, srci, dsti;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	wc.masks = htole16(3 << 13);
+	wc.session_num = htole16(addr >> 14);
+	wc.total_source_count = htole16(8);
+	wc.total_dest_count = htole16(addr & 0x3fff);
+
+	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
+	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
+
+	/* build write command */
+	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
+	*(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
+	bcopy(data, &dma->test_src, sizeof(dma->test_src));
+
+	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
+	    + offsetof(struct hifn_dma, test_src));
+	dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
+	    + offsetof(struct hifn_dma, test_dst));
+
+	dma->cmdr[cmdi].l = htole32(16 | masks);
+	dma->srcr[srci].l = htole32(8 | masks);
+	dma->dstr[dsti].l = htole32(4 | masks);
+	dma->resr[resi].l = htole32(4 | masks);
+
+	for (r = 10000; r >= 0; r--) {
+		DELAY(10);
+		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
+			break;
+	}
+	if (r == 0) {
+		device_printf(sc->sc_dev, "writeramaddr -- "
+		    "result[%d](addr %d) still valid\n", resi, addr);
+		r = -1;
+		return (-1);
+	} else
+		r = 0;
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
+	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
+
+	return (r);
+}
+
+static int
+hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	hifn_base_command_t rc;
+	const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
+	int r, cmdi, srci, dsti, resi;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	rc.masks = htole16(2 << 13);
+	rc.session_num = htole16(addr >> 14);
+	rc.total_source_count = htole16(addr & 0x3fff);
+	rc.total_dest_count = htole16(8);
+
+	hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
+	    HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
+
+	bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
+	*(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
+
+	dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, test_src));
+	dma->test_src = 0;
+	dma->dstr[dsti].p =  htole32(sc->sc_dma_physaddr +
+	    offsetof(struct hifn_dma, test_dst));
+	dma->test_dst = 0;
+	dma->cmdr[cmdi].l = htole32(8 | masks);
+	dma->srcr[srci].l = htole32(8 | masks);
+	dma->dstr[dsti].l = htole32(8 | masks);
+	dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
+
+	for (r = 10000; r >= 0; r--) {
+		DELAY(10);
+		if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
+			break;
+	}
+	if (r == 0) {
+		device_printf(sc->sc_dev, "readramaddr -- "
+		    "result[%d](addr %d) still valid\n", resi, addr);
+		r = -1;
+	} else {
+		r = 0;
+		bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
+	}
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR,
+	    HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
+	    HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
+
+	return (r);
+}
+
+/*
+ * Initialize the descriptor rings.
+ */
+static void
+hifn_init_dma(struct hifn_softc *sc)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	hifn_set_retry(sc);
+
+	/* initialize static pointer values */
+	for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
+		dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
+		    offsetof(struct hifn_dma, command_bufs[i][0]));
+	for (i = 0; i < HIFN_D_RES_RSIZE; i++)
+		dma->resr[i].p = htole32(sc->sc_dma_physaddr +
+		    offsetof(struct hifn_dma, result_bufs[i][0]));
+
+	dma->cmdr[HIFN_D_CMD_RSIZE].p =
+	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
+	dma->srcr[HIFN_D_SRC_RSIZE].p =
+	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
+	dma->dstr[HIFN_D_DST_RSIZE].p =
+	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
+	dma->resr[HIFN_D_RES_RSIZE].p =
+	    htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
+
+	dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
+	dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
+	dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
+}
+
+/*
+ * Writes out the raw command buffer space.  Returns the
+ * command buffer size.
+ */
+static u_int
+hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
+{
+	struct hifn_softc *sc = NULL;
+	u_int8_t *buf_pos;
+	hifn_base_command_t *base_cmd;
+	hifn_mac_command_t *mac_cmd;
+	hifn_crypt_command_t *cry_cmd;
+	int using_mac, using_crypt, len, ivlen;
+	u_int32_t dlen, slen;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	buf_pos = buf;
+	using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
+	using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
+
+	base_cmd = (hifn_base_command_t *)buf_pos;
+	base_cmd->masks = htole16(cmd->base_masks);
+	slen = cmd->src_mapsize;
+	if (cmd->sloplen)
+		dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
+	else
+		dlen = cmd->dst_mapsize;
+	base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
+	base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
+	dlen >>= 16;
+	slen >>= 16;
+	base_cmd->session_num = htole16(
+	    ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
+	    ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
+	buf_pos += sizeof(hifn_base_command_t);
+
+	if (using_mac) {
+		mac_cmd = (hifn_mac_command_t *)buf_pos;
+		dlen = cmd->maccrd->crd_len;
+		mac_cmd->source_count = htole16(dlen & 0xffff);
+		dlen >>= 16;
+		mac_cmd->masks = htole16(cmd->mac_masks |
+		    ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
+		mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
+		mac_cmd->reserved = 0;
+		buf_pos += sizeof(hifn_mac_command_t);
+	}
+
+	if (using_crypt) {
+		cry_cmd = (hifn_crypt_command_t *)buf_pos;
+		dlen = cmd->enccrd->crd_len;
+		cry_cmd->source_count = htole16(dlen & 0xffff);
+		dlen >>= 16;
+		cry_cmd->masks = htole16(cmd->cry_masks |
+		    ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
+		cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
+		cry_cmd->reserved = 0;
+		buf_pos += sizeof(hifn_crypt_command_t);
+	}
+
+	if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
+		bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
+		buf_pos += HIFN_MAC_KEY_LENGTH;
+	}
+
+	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
+		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
+		case HIFN_CRYPT_CMD_ALG_3DES:
+			bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
+			buf_pos += HIFN_3DES_KEY_LENGTH;
+			break;
+		case HIFN_CRYPT_CMD_ALG_DES:
+			bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
+			buf_pos += HIFN_DES_KEY_LENGTH;
+			break;
+		case HIFN_CRYPT_CMD_ALG_RC4:
+			len = 256;
+			do {
+				int clen;
+
+				clen = MIN(cmd->cklen, len);
+				bcopy(cmd->ck, buf_pos, clen);
+				len -= clen;
+				buf_pos += clen;
+			} while (len > 0);
+			bzero(buf_pos, 4);
+			buf_pos += 4;
+			break;
+		case HIFN_CRYPT_CMD_ALG_AES:
+			/*
+			 * AES keys are variable 128, 192 and
+			 * 256 bits (16, 24 and 32 bytes).
+			 */
+			bcopy(cmd->ck, buf_pos, cmd->cklen);
+			buf_pos += cmd->cklen;
+			break;
+		}
+	}
+
+	if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
+		switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
+		case HIFN_CRYPT_CMD_ALG_AES:
+			ivlen = HIFN_AES_IV_LENGTH;
+			break;
+		default:
+			ivlen = HIFN_IV_LENGTH;
+			break;
+		}
+		bcopy(cmd->iv, buf_pos, ivlen);
+		buf_pos += ivlen;
+	}
+
+	if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
+		bzero(buf_pos, 8);
+		buf_pos += 8;
+	}
+
+	return (buf_pos - buf);
+}
+
+static int
+hifn_dmamap_aligned(struct hifn_operand *op)
+{
+	struct hifn_softc *sc = NULL;
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	for (i = 0; i < op->nsegs; i++) {
+		if (op->segs[i].ds_addr & 3)
+			return (0);
+		if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
+			return (0);
+	}
+	return (1);
+}
+
+static __inline int
+hifn_dmamap_dstwrap(struct hifn_softc *sc, int idx)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+
+	if (++idx == HIFN_D_DST_RSIZE) {
+		dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
+		    HIFN_D_MASKDONEIRQ);
+		HIFN_DSTR_SYNC(sc, idx,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+		idx = 0;
+	}
+	return (idx);
+}
+
+static int
+hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	struct hifn_operand *dst = &cmd->dst;
+	u_int32_t p, l;
+	int idx, used = 0, i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	idx = dma->dsti;
+	for (i = 0; i < dst->nsegs - 1; i++) {
+		dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
+		dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
+		wmb();
+		dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+		HIFN_DSTR_SYNC(sc, idx,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+		used++;
+
+		idx = hifn_dmamap_dstwrap(sc, idx);
+	}
+
+	if (cmd->sloplen == 0) {
+		p = dst->segs[i].ds_addr;
+		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
+		    dst->segs[i].ds_len;
+	} else {
+		p = sc->sc_dma_physaddr +
+		    offsetof(struct hifn_dma, slop[cmd->slopidx]);
+		l = HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
+		    sizeof(u_int32_t);
+
+		if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
+			dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
+			dma->dstr[idx].l = htole32(HIFN_D_MASKDONEIRQ |
+			    (dst->segs[i].ds_len - cmd->sloplen));
+			wmb();
+			dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+			HIFN_DSTR_SYNC(sc, idx,
+			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+			used++;
+
+			idx = hifn_dmamap_dstwrap(sc, idx);
+		}
+	}
+	dma->dstr[idx].p = htole32(p);
+	dma->dstr[idx].l = htole32(l);
+	wmb();
+	dma->dstr[idx].l |= htole32(HIFN_D_VALID);
+	HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	used++;
+
+	idx = hifn_dmamap_dstwrap(sc, idx);
+
+	dma->dsti = idx;
+	dma->dstu += used;
+	return (idx);
+}
+
+static __inline int
+hifn_dmamap_srcwrap(struct hifn_softc *sc, int idx)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+
+	if (++idx == HIFN_D_SRC_RSIZE) {
+		dma->srcr[idx].l = htole32(HIFN_D_VALID |
+		    HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
+		HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+		idx = 0;
+	}
+	return (idx);
+}
+
+static int
+hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	struct hifn_operand *src = &cmd->src;
+	int idx, i;
+	u_int32_t last = 0;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	idx = dma->srci;
+	for (i = 0; i < src->nsegs; i++) {
+		if (i == src->nsegs - 1)
+			last = HIFN_D_LAST;
+
+		dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
+		dma->srcr[idx].l = htole32(src->segs[i].ds_len |
+		    HIFN_D_MASKDONEIRQ | last);
+		wmb();
+		dma->srcr[idx].l |= htole32(HIFN_D_VALID);
+		HIFN_SRCR_SYNC(sc, idx,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+
+		idx = hifn_dmamap_srcwrap(sc, idx);
+	}
+	dma->srci = idx;
+	dma->srcu += src->nsegs;
+	return (idx);
+}
+
+
+static int
+hifn_crypto(
+	struct hifn_softc *sc,
+	struct hifn_command *cmd,
+	struct cryptop *crp,
+	int hint)
+{
+	struct	hifn_dma *dma = sc->sc_dma;
+	u_int32_t cmdlen, csr;
+	int cmdi, resi, err = 0;
+	unsigned long l_flags;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/*
+	 * need 1 cmd, and 1 res
+	 *
+	 * NB: check this first since it's easy.
+	 */
+	HIFN_LOCK(sc);
+	if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
+	    (dma->resu + 1) > HIFN_D_RES_RSIZE) {
+#ifdef HIFN_DEBUG
+		if (hifn_debug) {
+			device_printf(sc->sc_dev,
+				"cmd/result exhaustion, cmdu %u resu %u\n",
+				dma->cmdu, dma->resu);
+		}
+#endif
+		hifnstats.hst_nomem_cr++;
+		sc->sc_needwakeup |= CRYPTO_SYMQ;
+		HIFN_UNLOCK(sc);
+		return (ERESTART);
+	}
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		if (pci_map_skb(sc, &cmd->src, cmd->src_skb)) {
+			hifnstats.hst_nomem_load++;
+			err = ENOMEM;
+			goto err_srcmap1;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		if (pci_map_uio(sc, &cmd->src, cmd->src_io)) {
+			hifnstats.hst_nomem_load++;
+			err = ENOMEM;
+			goto err_srcmap1;
+		}
+	} else {
+		if (pci_map_buf(sc, &cmd->src, cmd->src_buf, crp->crp_ilen)) {
+			hifnstats.hst_nomem_load++;
+			err = ENOMEM;
+			goto err_srcmap1;
+		}
+	}
+
+	if (hifn_dmamap_aligned(&cmd->src)) {
+		cmd->sloplen = cmd->src_mapsize & 3;
+		cmd->dst = cmd->src;
+	} else {
+		if (crp->crp_flags & CRYPTO_F_IOV) {
+			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+			err = EINVAL;
+			goto err_srcmap;
+		} else if (crp->crp_flags & CRYPTO_F_SKBUF) {
+#ifdef NOTYET
+			int totlen, len;
+			struct mbuf *m, *m0, *mlast;
+
+			KASSERT(cmd->dst_m == cmd->src_m,
+				("hifn_crypto: dst_m initialized improperly"));
+			hifnstats.hst_unaligned++;
+			/*
+			 * Source is not aligned on a longword boundary.
+			 * Copy the data to insure alignment.  If we fail
+			 * to allocate mbufs or clusters while doing this
+			 * we return ERESTART so the operation is requeued
+			 * at the crypto later, but only if there are
+			 * ops already posted to the hardware; otherwise we
+			 * have no guarantee that we'll be re-entered.
+			 */
+			totlen = cmd->src_mapsize;
+			if (cmd->src_m->m_flags & M_PKTHDR) {
+				len = MHLEN;
+				MGETHDR(m0, M_DONTWAIT, MT_DATA);
+				if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
+					m_free(m0);
+					m0 = NULL;
+				}
+			} else {
+				len = MLEN;
+				MGET(m0, M_DONTWAIT, MT_DATA);
+			}
+			if (m0 == NULL) {
+				hifnstats.hst_nomem_mbuf++;
+				err = dma->cmdu ? ERESTART : ENOMEM;
+				goto err_srcmap;
+			}
+			if (totlen >= MINCLSIZE) {
+				MCLGET(m0, M_DONTWAIT);
+				if ((m0->m_flags & M_EXT) == 0) {
+					hifnstats.hst_nomem_mcl++;
+					err = dma->cmdu ? ERESTART : ENOMEM;
+					m_freem(m0);
+					goto err_srcmap;
+				}
+				len = MCLBYTES;
+			}
+			totlen -= len;
+			m0->m_pkthdr.len = m0->m_len = len;
+			mlast = m0;
+
+			while (totlen > 0) {
+				MGET(m, M_DONTWAIT, MT_DATA);
+				if (m == NULL) {
+					hifnstats.hst_nomem_mbuf++;
+					err = dma->cmdu ? ERESTART : ENOMEM;
+					m_freem(m0);
+					goto err_srcmap;
+				}
+				len = MLEN;
+				if (totlen >= MINCLSIZE) {
+					MCLGET(m, M_DONTWAIT);
+					if ((m->m_flags & M_EXT) == 0) {
+						hifnstats.hst_nomem_mcl++;
+						err = dma->cmdu ? ERESTART : ENOMEM;
+						mlast->m_next = m;
+						m_freem(m0);
+						goto err_srcmap;
+					}
+					len = MCLBYTES;
+				}
+
+				m->m_len = len;
+				m0->m_pkthdr.len += len;
+				totlen -= len;
+
+				mlast->m_next = m;
+				mlast = m;
+			}
+			cmd->dst_m = m0;
+#else
+			device_printf(sc->sc_dev,
+					"%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
+					__FILE__, __LINE__);
+			err = EINVAL;
+			goto err_srcmap;
+#endif
+		} else {
+			device_printf(sc->sc_dev,
+					"%s,%d: unaligned contig buffers not implemented\n",
+					__FILE__, __LINE__);
+			err = EINVAL;
+			goto err_srcmap;
+		}
+	}
+
+	if (cmd->dst_map == NULL) {
+		if (crp->crp_flags & CRYPTO_F_SKBUF) {
+			if (pci_map_skb(sc, &cmd->dst, cmd->dst_skb)) {
+				hifnstats.hst_nomem_map++;
+				err = ENOMEM;
+				goto err_dstmap1;
+			}
+		} else if (crp->crp_flags & CRYPTO_F_IOV) {
+			if (pci_map_uio(sc, &cmd->dst, cmd->dst_io)) {
+				hifnstats.hst_nomem_load++;
+				err = ENOMEM;
+				goto err_dstmap1;
+			}
+		} else {
+			if (pci_map_buf(sc, &cmd->dst, cmd->dst_buf, crp->crp_ilen)) {
+				hifnstats.hst_nomem_load++;
+				err = ENOMEM;
+				goto err_dstmap1;
+			}
+		}
+	}
+
+#ifdef HIFN_DEBUG
+	if (hifn_debug) {
+		device_printf(sc->sc_dev,
+		    "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
+		    READ_REG_1(sc, HIFN_1_DMA_CSR),
+		    READ_REG_1(sc, HIFN_1_DMA_IER),
+		    dma->cmdu, dma->srcu, dma->dstu, dma->resu,
+		    cmd->src_nsegs, cmd->dst_nsegs);
+	}
+#endif
+
+#if 0
+	if (cmd->src_map == cmd->dst_map) {
+		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+		    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
+	} else {
+		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+		    BUS_DMASYNC_PREWRITE);
+		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+		    BUS_DMASYNC_PREREAD);
+	}
+#endif
+
+	/*
+	 * need N src, and N dst
+	 */
+	if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
+	    (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
+#ifdef HIFN_DEBUG
+		if (hifn_debug) {
+			device_printf(sc->sc_dev,
+				"src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
+				dma->srcu, cmd->src_nsegs,
+				dma->dstu, cmd->dst_nsegs);
+		}
+#endif
+		hifnstats.hst_nomem_sd++;
+		err = ERESTART;
+		goto err_dstmap;
+	}
+
+	if (dma->cmdi == HIFN_D_CMD_RSIZE) {
+		dma->cmdi = 0;
+		dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->cmdr[HIFN_D_CMD_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
+		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	}
+	cmdi = dma->cmdi++;
+	cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
+	HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
+
+	/* .p for command/result already set */
+	dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_LAST |
+	    HIFN_D_MASKDONEIRQ);
+	wmb();
+	dma->cmdr[cmdi].l |= htole32(HIFN_D_VALID);
+	HIFN_CMDR_SYNC(sc, cmdi,
+	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
+	dma->cmdu++;
+
+	/*
+	 * We don't worry about missing an interrupt (which a "command wait"
+	 * interrupt salvages us from), unless there is more than one command
+	 * in the queue.
+	 */
+	if (dma->cmdu > 1) {
+		sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
+		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+	}
+
+	hifnstats.hst_ipackets++;
+	hifnstats.hst_ibytes += cmd->src_mapsize;
+
+	hifn_dmamap_load_src(sc, cmd);
+
+	/*
+	 * Unlike other descriptors, we don't mask done interrupt from
+	 * result descriptor.
+	 */
+#ifdef HIFN_DEBUG
+	if (hifn_debug)
+		device_printf(sc->sc_dev, "load res\n");
+#endif
+	if (dma->resi == HIFN_D_RES_RSIZE) {
+		dma->resi = 0;
+		dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_JUMP|HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->resr[HIFN_D_RES_RSIZE].l |= htole32(HIFN_D_VALID);
+		HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
+		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	}
+	resi = dma->resi++;
+	KASSERT(dma->hifn_commands[resi] == NULL,
+		("hifn_crypto: command slot %u busy", resi));
+	dma->hifn_commands[resi] = cmd;
+	HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
+	if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
+		dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
+		    HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
+		wmb();
+		dma->resr[resi].l |= htole32(HIFN_D_VALID);
+		sc->sc_curbatch++;
+		if (sc->sc_curbatch > hifnstats.hst_maxbatch)
+			hifnstats.hst_maxbatch = sc->sc_curbatch;
+		hifnstats.hst_totbatch++;
+	} else {
+		dma->resr[resi].l = htole32(HIFN_MAX_RESULT | HIFN_D_LAST);
+		wmb();
+		dma->resr[resi].l |= htole32(HIFN_D_VALID);
+		sc->sc_curbatch = 0;
+	}
+	HIFN_RESR_SYNC(sc, resi,
+	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+	dma->resu++;
+
+	if (cmd->sloplen)
+		cmd->slopidx = resi;
+
+	hifn_dmamap_load_dst(sc, cmd);
+
+	csr = 0;
+	if (sc->sc_c_busy == 0) {
+		csr |= HIFN_DMACSR_C_CTRL_ENA;
+		sc->sc_c_busy = 1;
+	}
+	if (sc->sc_s_busy == 0) {
+		csr |= HIFN_DMACSR_S_CTRL_ENA;
+		sc->sc_s_busy = 1;
+	}
+	if (sc->sc_r_busy == 0) {
+		csr |= HIFN_DMACSR_R_CTRL_ENA;
+		sc->sc_r_busy = 1;
+	}
+	if (sc->sc_d_busy == 0) {
+		csr |= HIFN_DMACSR_D_CTRL_ENA;
+		sc->sc_d_busy = 1;
+	}
+	if (csr)
+		WRITE_REG_1(sc, HIFN_1_DMA_CSR, csr);
+
+#ifdef HIFN_DEBUG
+	if (hifn_debug) {
+		device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
+		    READ_REG_1(sc, HIFN_1_DMA_CSR),
+		    READ_REG_1(sc, HIFN_1_DMA_IER));
+	}
+#endif
+
+	sc->sc_active = 5;
+	HIFN_UNLOCK(sc);
+	KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
+	return (err);		/* success */
+
+err_dstmap:
+	if (cmd->src_map != cmd->dst_map)
+		pci_unmap_buf(sc, &cmd->dst);
+err_dstmap1:
+err_srcmap:
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		if (cmd->src_skb != cmd->dst_skb)
+#ifdef NOTYET
+			m_freem(cmd->dst_m);
+#else
+			device_printf(sc->sc_dev,
+					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+					__FILE__, __LINE__);
+#endif
+	}
+	pci_unmap_buf(sc, &cmd->src);
+err_srcmap1:
+	HIFN_UNLOCK(sc);
+	return (err);
+}
+
+static void
+hifn_tick(unsigned long arg)
+{
+	struct hifn_softc *sc;
+	unsigned long l_flags;
+
+	if (arg >= HIFN_MAX_CHIPS)
+		return;
+	sc = hifn_chip_idx[arg];
+	if (!sc)
+		return;
+
+	HIFN_LOCK(sc);
+	if (sc->sc_active == 0) {
+		struct hifn_dma *dma = sc->sc_dma;
+		u_int32_t r = 0;
+
+		if (dma->cmdu == 0 && sc->sc_c_busy) {
+			sc->sc_c_busy = 0;
+			r |= HIFN_DMACSR_C_CTRL_DIS;
+		}
+		if (dma->srcu == 0 && sc->sc_s_busy) {
+			sc->sc_s_busy = 0;
+			r |= HIFN_DMACSR_S_CTRL_DIS;
+		}
+		if (dma->dstu == 0 && sc->sc_d_busy) {
+			sc->sc_d_busy = 0;
+			r |= HIFN_DMACSR_D_CTRL_DIS;
+		}
+		if (dma->resu == 0 && sc->sc_r_busy) {
+			sc->sc_r_busy = 0;
+			r |= HIFN_DMACSR_R_CTRL_DIS;
+		}
+		if (r)
+			WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
+	} else
+		sc->sc_active--;
+	HIFN_UNLOCK(sc);
+	mod_timer(&sc->sc_tickto, jiffies + HZ);
+}
+
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+hifn_intr(int irq, void *arg)
+#else
+hifn_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+	struct hifn_softc *sc = arg;
+	struct hifn_dma *dma;
+	u_int32_t dmacsr, restart;
+	int i, u;
+	unsigned long l_flags;
+
+	dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
+
+	/* Nothing in the DMA unit interrupted */
+	if ((dmacsr & sc->sc_dmaier) == 0)
+		return IRQ_NONE;
+
+	HIFN_LOCK(sc);
+
+	dma = sc->sc_dma;
+
+#ifdef HIFN_DEBUG
+	if (hifn_debug) {
+		device_printf(sc->sc_dev,
+		    "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
+		    dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
+		    dma->cmdi, dma->srci, dma->dsti, dma->resi,
+		    dma->cmdk, dma->srck, dma->dstk, dma->resk,
+		    dma->cmdu, dma->srcu, dma->dstu, dma->resu);
+	}
+#endif
+
+	WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
+
+	if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
+	    (dmacsr & HIFN_DMACSR_PUBDONE))
+		WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
+		    READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
+
+	restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
+	if (restart)
+		device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
+
+	if (sc->sc_flags & HIFN_IS_7811) {
+		if (dmacsr & HIFN_DMACSR_ILLR)
+			device_printf(sc->sc_dev, "illegal read\n");
+		if (dmacsr & HIFN_DMACSR_ILLW)
+			device_printf(sc->sc_dev, "illegal write\n");
+	}
+
+	restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
+	    HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
+	if (restart) {
+		device_printf(sc->sc_dev, "abort, resetting.\n");
+		hifnstats.hst_abort++;
+		hifn_abort(sc);
+		HIFN_UNLOCK(sc);
+		return IRQ_HANDLED;
+	}
+
+	if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
+		/*
+		 * If no slots to process and we receive a "waiting on
+		 * command" interrupt, we disable the "waiting on command"
+		 * (by clearing it).
+		 */
+		sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
+		WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+	}
+
+	/* clear the rings */
+	i = dma->resk; u = dma->resu;
+	while (u != 0) {
+		HIFN_RESR_SYNC(sc, i,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+		if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
+			HIFN_RESR_SYNC(sc, i,
+			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+			break;
+		}
+
+		if (i != HIFN_D_RES_RSIZE) {
+			struct hifn_command *cmd;
+			u_int8_t *macbuf = NULL;
+
+			HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
+			cmd = dma->hifn_commands[i];
+			KASSERT(cmd != NULL,
+				("hifn_intr: null command slot %u", i));
+			dma->hifn_commands[i] = NULL;
+
+			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
+				macbuf = dma->result_bufs[i];
+				macbuf += 12;
+			}
+
+			hifn_callback(sc, cmd, macbuf);
+			hifnstats.hst_opackets++;
+			u--;
+		}
+
+		if (++i == (HIFN_D_RES_RSIZE + 1))
+			i = 0;
+	}
+	dma->resk = i; dma->resu = u;
+
+	i = dma->srck; u = dma->srcu;
+	while (u != 0) {
+		if (i == HIFN_D_SRC_RSIZE)
+			i = 0;
+		HIFN_SRCR_SYNC(sc, i,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+		if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
+			HIFN_SRCR_SYNC(sc, i,
+			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+			break;
+		}
+		i++, u--;
+	}
+	dma->srck = i; dma->srcu = u;
+
+	i = dma->cmdk; u = dma->cmdu;
+	while (u != 0) {
+		HIFN_CMDR_SYNC(sc, i,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+		if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
+			HIFN_CMDR_SYNC(sc, i,
+			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+			break;
+		}
+		if (i != HIFN_D_CMD_RSIZE) {
+			u--;
+			HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
+		}
+		if (++i == (HIFN_D_CMD_RSIZE + 1))
+			i = 0;
+	}
+	dma->cmdk = i; dma->cmdu = u;
+
+	HIFN_UNLOCK(sc);
+
+	if (sc->sc_needwakeup) {		/* XXX check high watermark */
+		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
+#ifdef HIFN_DEBUG
+		if (hifn_debug)
+			device_printf(sc->sc_dev,
+				"wakeup crypto (%x) u %d/%d/%d/%d\n",
+				sc->sc_needwakeup,
+				dma->cmdu, dma->srcu, dma->dstu, dma->resu);
+#endif
+		sc->sc_needwakeup &= ~wakeup;
+		crypto_unblock(sc->sc_cid, wakeup);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a new 'session' and return an encoded session id.  'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+hifn_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+	struct hifn_softc *sc = device_get_softc(dev);
+	struct cryptoini *c;
+	int mac = 0, cry = 0, sesn;
+	struct hifn_session *ses = NULL;
+	unsigned long l_flags;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	KASSERT(sc != NULL, ("hifn_newsession: null softc"));
+	if (sidp == NULL || cri == NULL || sc == NULL) {
+		DPRINTF("%s,%d: %s - EINVAL\n", __FILE__, __LINE__, __FUNCTION__);
+		return (EINVAL);
+	}
+
+	HIFN_LOCK(sc);
+	if (sc->sc_sessions == NULL) {
+		ses = sc->sc_sessions = (struct hifn_session *)kmalloc(sizeof(*ses),
+				SLAB_ATOMIC);
+		if (ses == NULL) {
+			HIFN_UNLOCK(sc);
+			return (ENOMEM);
+		}
+		sesn = 0;
+		sc->sc_nsessions = 1;
+	} else {
+		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+			if (!sc->sc_sessions[sesn].hs_used) {
+				ses = &sc->sc_sessions[sesn];
+				break;
+			}
+		}
+
+		if (ses == NULL) {
+			sesn = sc->sc_nsessions;
+			ses = (struct hifn_session *)kmalloc((sesn + 1) * sizeof(*ses),
+					SLAB_ATOMIC);
+			if (ses == NULL) {
+				HIFN_UNLOCK(sc);
+				return (ENOMEM);
+			}
+			bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
+			bzero(sc->sc_sessions, sesn * sizeof(*ses));
+			kfree(sc->sc_sessions);
+			sc->sc_sessions = ses;
+			ses = &sc->sc_sessions[sesn];
+			sc->sc_nsessions++;
+		}
+	}
+	HIFN_UNLOCK(sc);
+
+	bzero(ses, sizeof(*ses));
+	ses->hs_used = 1;
+
+	for (c = cri; c != NULL; c = c->cri_next) {
+		switch (c->cri_alg) {
+		case CRYPTO_MD5:
+		case CRYPTO_SHA1:
+		case CRYPTO_MD5_HMAC:
+		case CRYPTO_SHA1_HMAC:
+			if (mac) {
+				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+				return (EINVAL);
+			}
+			mac = 1;
+			ses->hs_mlen = c->cri_mlen;
+			if (ses->hs_mlen == 0) {
+				switch (c->cri_alg) {
+				case CRYPTO_MD5:
+				case CRYPTO_MD5_HMAC:
+					ses->hs_mlen = 16;
+					break;
+				case CRYPTO_SHA1:
+				case CRYPTO_SHA1_HMAC:
+					ses->hs_mlen = 20;
+					break;
+				}
+			}
+			break;
+		case CRYPTO_DES_CBC:
+		case CRYPTO_3DES_CBC:
+		case CRYPTO_AES_CBC:
+		case CRYPTO_ARC4:
+			if (cry) {
+				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+				return (EINVAL);
+			}
+			cry = 1;
+			break;
+		default:
+			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+			return (EINVAL);
+		}
+	}
+	if (mac == 0 && cry == 0) {
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		return (EINVAL);
+	}
+
+	*sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
+
+	return (0);
+}
+
+/*
+ * Deallocate a session.
+ * XXX this routine should run a zero'd mac/encrypt key into context ram.
+ * XXX to blow away any keys already stored there.
+ */
+static int
+hifn_freesession(device_t dev, u_int64_t tid)
+{
+	struct hifn_softc *sc = device_get_softc(dev);
+	int session, error;
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+	unsigned long l_flags;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	KASSERT(sc != NULL, ("hifn_freesession: null softc"));
+	if (sc == NULL) {
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		return (EINVAL);
+	}
+
+	HIFN_LOCK(sc);
+	session = HIFN_SESSION(sid);
+	if (session < sc->sc_nsessions) {
+		bzero(&sc->sc_sessions[session], sizeof(struct hifn_session));
+		error = 0;
+	} else {
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		error = EINVAL;
+	}
+	HIFN_UNLOCK(sc);
+
+	return (error);
+}
+
+static int
+hifn_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct hifn_softc *sc = device_get_softc(dev);
+	struct hifn_command *cmd = NULL;
+	int session, err, ivlen;
+	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (crp == NULL || crp->crp_callback == NULL) {
+		hifnstats.hst_invalid++;
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		return (EINVAL);
+	}
+	session = HIFN_SESSION(crp->crp_sid);
+
+	if (sc == NULL || session >= sc->sc_nsessions) {
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		err = EINVAL;
+		goto errout;
+	}
+
+	cmd = kmalloc(sizeof(struct hifn_command), SLAB_ATOMIC);
+	if (cmd == NULL) {
+		hifnstats.hst_nomem++;
+		err = ENOMEM;
+		goto errout;
+	}
+	memset(cmd, 0, sizeof(*cmd));
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		cmd->src_skb = (struct sk_buff *)crp->crp_buf;
+		cmd->dst_skb = (struct sk_buff *)crp->crp_buf;
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		cmd->src_io = (struct uio *)crp->crp_buf;
+		cmd->dst_io = (struct uio *)crp->crp_buf;
+	} else {
+		cmd->src_buf = crp->crp_buf;
+		cmd->dst_buf = crp->crp_buf;
+	}
+
+	crd1 = crp->crp_desc;
+	if (crd1 == NULL) {
+		DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+		err = EINVAL;
+		goto errout;
+	}
+	crd2 = crd1->crd_next;
+
+	if (crd2 == NULL) {
+		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1 ||
+		    crd1->crd_alg == CRYPTO_MD5) {
+			maccrd = crd1;
+			enccrd = NULL;
+		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+		    crd1->crd_alg == CRYPTO_3DES_CBC ||
+		    crd1->crd_alg == CRYPTO_AES_CBC ||
+		    crd1->crd_alg == CRYPTO_ARC4) {
+			if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
+				cmd->base_masks |= HIFN_BASE_CMD_DECODE;
+			maccrd = NULL;
+			enccrd = crd1;
+		} else {
+			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+			err = EINVAL;
+			goto errout;
+		}
+	} else {
+		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+                     crd1->crd_alg == CRYPTO_MD5 ||
+                     crd1->crd_alg == CRYPTO_SHA1) &&
+		    (crd2->crd_alg == CRYPTO_DES_CBC ||
+		     crd2->crd_alg == CRYPTO_3DES_CBC ||
+		     crd2->crd_alg == CRYPTO_AES_CBC ||
+		     crd2->crd_alg == CRYPTO_ARC4) &&
+		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+			cmd->base_masks = HIFN_BASE_CMD_DECODE;
+			maccrd = crd1;
+			enccrd = crd2;
+		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+		     crd1->crd_alg == CRYPTO_ARC4 ||
+		     crd1->crd_alg == CRYPTO_3DES_CBC ||
+		     crd1->crd_alg == CRYPTO_AES_CBC) &&
+		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+                     crd2->crd_alg == CRYPTO_MD5 ||
+                     crd2->crd_alg == CRYPTO_SHA1) &&
+		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
+			enccrd = crd1;
+			maccrd = crd2;
+		} else {
+			/*
+			 * We cannot order the 7751 as requested
+			 */
+			DPRINTF("%s,%d: %s %d,%d,%d - EINVAL\n",__FILE__,__LINE__,__FUNCTION__, crd1->crd_alg, crd2->crd_alg, crd1->crd_flags & CRD_F_ENCRYPT);
+			err = EINVAL;
+			goto errout;
+		}
+	}
+
+	if (enccrd) {
+		cmd->enccrd = enccrd;
+		cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
+		switch (enccrd->crd_alg) {
+		case CRYPTO_ARC4:
+			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
+			break;
+		case CRYPTO_DES_CBC:
+			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
+			    HIFN_CRYPT_CMD_MODE_CBC |
+			    HIFN_CRYPT_CMD_NEW_IV;
+			break;
+		case CRYPTO_3DES_CBC:
+			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
+			    HIFN_CRYPT_CMD_MODE_CBC |
+			    HIFN_CRYPT_CMD_NEW_IV;
+			break;
+		case CRYPTO_AES_CBC:
+			cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
+			    HIFN_CRYPT_CMD_MODE_CBC |
+			    HIFN_CRYPT_CMD_NEW_IV;
+			break;
+		default:
+			DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+			err = EINVAL;
+			goto errout;
+		}
+		if (enccrd->crd_alg != CRYPTO_ARC4) {
+			ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
+				HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
+			if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
+				else
+					read_random(cmd->iv, ivlen);
+
+				if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
+				    == 0) {
+					crypto_copyback(crp->crp_flags,
+					    crp->crp_buf, enccrd->crd_inject,
+					    ivlen, cmd->iv);
+				}
+			} else {
+				if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+					bcopy(enccrd->crd_iv, cmd->iv, ivlen);
+				else {
+					crypto_copydata(crp->crp_flags,
+					    crp->crp_buf, enccrd->crd_inject,
+					    ivlen, cmd->iv);
+				}
+			}
+		}
+
+		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
+			cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
+		cmd->ck = enccrd->crd_key;
+		cmd->cklen = enccrd->crd_klen >> 3;
+		cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
+
+		/*
+		 * Need to specify the size for the AES key in the masks.
+		 */
+		if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
+		    HIFN_CRYPT_CMD_ALG_AES) {
+			switch (cmd->cklen) {
+			case 16:
+				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
+				break;
+			case 24:
+				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
+				break;
+			case 32:
+				cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
+				break;
+			default:
+				DPRINTF("%s,%d: %s - EINVAL\n",__FILE__,__LINE__,__FUNCTION__);
+				err = EINVAL;
+				goto errout;
+			}
+		}
+	}
+
+	if (maccrd) {
+		cmd->maccrd = maccrd;
+		cmd->base_masks |= HIFN_BASE_CMD_MAC;
+
+		switch (maccrd->crd_alg) {
+		case CRYPTO_MD5:
+			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
+			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
+			    HIFN_MAC_CMD_POS_IPSEC;
+                       break;
+		case CRYPTO_MD5_HMAC:
+			cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
+			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
+			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
+			break;
+		case CRYPTO_SHA1:
+			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
+			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
+			    HIFN_MAC_CMD_POS_IPSEC;
+			break;
+		case CRYPTO_SHA1_HMAC:
+			cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
+			    HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
+			    HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
+			break;
+		}
+
+		if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
+		     maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+			cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
+			bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
+			bzero(cmd->mac + (maccrd->crd_klen >> 3),
+			    HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
+		}
+	}
+
+	cmd->crp = crp;
+	cmd->session_num = session;
+	cmd->softc = sc;
+
+	err = hifn_crypto(sc, cmd, crp, hint);
+	if (!err) {
+		return 0;
+	} else if (err == ERESTART) {
+		/*
+		 * There weren't enough resources to dispatch the request
+		 * to the part.  Notify the caller so they'll requeue this
+		 * request and resubmit it again soon.
+		 */
+#ifdef HIFN_DEBUG
+		if (hifn_debug)
+			device_printf(sc->sc_dev, "requeue request\n");
+#endif
+		kfree(cmd);
+		sc->sc_needwakeup |= CRYPTO_SYMQ;
+		return (err);
+	}
+
+errout:
+	if (cmd != NULL)
+		kfree(cmd);
+	if (err == EINVAL)
+		hifnstats.hst_invalid++;
+	else
+		hifnstats.hst_nomem++;
+	crp->crp_etype = err;
+	crypto_done(crp);
+	return (err);
+}
+
+static void
+hifn_abort(struct hifn_softc *sc)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	struct hifn_command *cmd;
+	struct cryptop *crp;
+	int i, u;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	i = dma->resk; u = dma->resu;
+	while (u != 0) {
+		cmd = dma->hifn_commands[i];
+		KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
+		dma->hifn_commands[i] = NULL;
+		crp = cmd->crp;
+
+		if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
+			/* Salvage what we can. */
+			u_int8_t *macbuf;
+
+			if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
+				macbuf = dma->result_bufs[i];
+				macbuf += 12;
+			} else
+				macbuf = NULL;
+			hifnstats.hst_opackets++;
+			hifn_callback(sc, cmd, macbuf);
+		} else {
+#if 0
+			if (cmd->src_map == cmd->dst_map) {
+				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+				    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+			} else {
+				bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+				    BUS_DMASYNC_POSTWRITE);
+				bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+				    BUS_DMASYNC_POSTREAD);
+			}
+#endif
+
+			if (cmd->src_skb != cmd->dst_skb) {
+#ifdef NOTYET
+				m_freem(cmd->src_m);
+				crp->crp_buf = (caddr_t)cmd->dst_m;
+#else
+				device_printf(sc->sc_dev,
+						"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+						__FILE__, __LINE__);
+#endif
+			}
+
+			/* non-shared buffers cannot be restarted */
+			if (cmd->src_map != cmd->dst_map) {
+				/*
+				 * XXX should be EAGAIN, delayed until
+				 * after the reset.
+				 */
+				crp->crp_etype = ENOMEM;
+				pci_unmap_buf(sc, &cmd->dst);
+			} else
+				crp->crp_etype = ENOMEM;
+
+			pci_unmap_buf(sc, &cmd->src);
+
+			kfree(cmd);
+			if (crp->crp_etype != EAGAIN)
+				crypto_done(crp);
+		}
+
+		if (++i == HIFN_D_RES_RSIZE)
+			i = 0;
+		u--;
+	}
+	dma->resk = i; dma->resu = u;
+
+	hifn_reset_board(sc, 1);
+	hifn_init_dma(sc);
+	hifn_init_pci_registers(sc);
+}
+
+static void
+hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
+{
+	struct hifn_dma *dma = sc->sc_dma;
+	struct cryptop *crp = cmd->crp;
+	struct cryptodesc *crd;
+	int i, u;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+#if 0
+	if (cmd->src_map == cmd->dst_map) {
+		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
+	} else {
+		bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
+		    BUS_DMASYNC_POSTWRITE);
+		bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
+		    BUS_DMASYNC_POSTREAD);
+	}
+#endif
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		if (cmd->src_skb != cmd->dst_skb) {
+#ifdef NOTYET
+			crp->crp_buf = (caddr_t)cmd->dst_m;
+			totlen = cmd->src_mapsize;
+			for (m = cmd->dst_m; m != NULL; m = m->m_next) {
+				if (totlen < m->m_len) {
+					m->m_len = totlen;
+					totlen = 0;
+				} else
+					totlen -= m->m_len;
+			}
+			cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
+			m_freem(cmd->src_m);
+#else
+			device_printf(sc->sc_dev,
+					"%s,%d: CRYPTO_F_SKBUF src != dst not implemented\n",
+					__FILE__, __LINE__);
+#endif
+		}
+	}
+
+	if (cmd->sloplen != 0) {
+		crypto_copyback(crp->crp_flags, crp->crp_buf,
+		    cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
+		    (caddr_t)&dma->slop[cmd->slopidx]);
+	}
+
+	i = dma->dstk; u = dma->dstu;
+	while (u != 0) {
+		if (i == HIFN_D_DST_RSIZE)
+			i = 0;
+#if 0
+		bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
+		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+#endif
+		if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
+#if 0
+			bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
+			    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+			break;
+		}
+		i++, u--;
+	}
+	dma->dstk = i; dma->dstu = u;
+
+	hifnstats.hst_obytes += cmd->dst_mapsize;
+
+	if (macbuf != NULL) {
+		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+                        int len;
+
+			if (crd->crd_alg != CRYPTO_MD5 &&
+			    crd->crd_alg != CRYPTO_SHA1 &&
+			    crd->crd_alg != CRYPTO_MD5_HMAC &&
+			    crd->crd_alg != CRYPTO_SHA1_HMAC) {
+				continue;
+			}
+			len = cmd->softc->sc_sessions[cmd->session_num].hs_mlen;
+			crypto_copyback(crp->crp_flags, crp->crp_buf,
+			    crd->crd_inject, len, macbuf);
+			break;
+		}
+	}
+
+	if (cmd->src_map != cmd->dst_map)
+		pci_unmap_buf(sc, &cmd->dst);
+	pci_unmap_buf(sc, &cmd->src);
+	kfree(cmd);
+	crypto_done(crp);
+}
+
+/*
+ * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
+ * and Group 1 registers; avoid conditions that could create
+ * burst writes by doing a read in between the writes.
+ *
+ * NB: The read we interpose is always to the same register;
+ *     we do this because reading from an arbitrary (e.g. last)
+ *     register may not always work.
+ */
+static void
+hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
+{
+	if (sc->sc_flags & HIFN_IS_7811) {
+		if (sc->sc_bar0_lastreg == reg - 4)
+			readl(sc->sc_bar0 + HIFN_0_PUCNFG);
+		sc->sc_bar0_lastreg = reg;
+	}
+	writel(val, sc->sc_bar0 + reg);
+}
+
+static void
+hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
+{
+	if (sc->sc_flags & HIFN_IS_7811) {
+		if (sc->sc_bar1_lastreg == reg - 4)
+			readl(sc->sc_bar1 + HIFN_1_REVID);
+		sc->sc_bar1_lastreg = reg;
+	}
+	writel(val, sc->sc_bar1 + reg);
+}
+
+
+static struct pci_device_id hifn_pci_tbl[] = {
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7951,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7955,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7956,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_NETSEC, PCI_PRODUCT_NETSEC_7751,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_INVERTEX, PCI_PRODUCT_INVERTEX_AEON,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7811,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	/*
+	 * Other vendors share this PCI ID as well, such as
+	 * http://www.powercrypt.com, and obviously they also
+	 * use the same key.
+	 */
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7751,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ 0, 0, 0, 0, 0, 0, }
+};
+MODULE_DEVICE_TABLE(pci, hifn_pci_tbl);
+
+static struct pci_driver hifn_driver = {
+	.name         = "hifn",
+	.id_table     = hifn_pci_tbl,
+	.probe        =	hifn_probe,
+	.remove       = hifn_remove,
+	/* add PM stuff here one day */
+};
+
+static int __init hifn_init (void)
+{
+	struct hifn_softc *sc = NULL;
+	int rc;
+
+	DPRINTF("%s(%p)\n", __FUNCTION__, hifn_init);
+
+	rc = pci_register_driver(&hifn_driver);
+	pci_register_driver_compat(&hifn_driver, rc);
+
+	return rc;
+}
+
+static void __exit hifn_exit (void)
+{
+	pci_unregister_driver(&hifn_driver);
+}
+
+module_init(hifn_init);
+module_exit(hifn_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF driver for hifn PCI crypto devices");
diff --git a/crypto/ocf/hifn/hifn7751reg.h b/crypto/ocf/hifn/hifn7751reg.h
new file mode 100644
index 000000000000..5a3d5aafd972
--- /dev/null
+++ b/crypto/ocf/hifn/hifn7751reg.h
@@ -0,0 +1,540 @@
+/* $FreeBSD: src/sys/dev/hifn/hifn7751reg.h,v 1.7 2007/03/21 03:42:49 sam Exp $ */
+/*	$OpenBSD: hifn7751reg.h,v 1.35 2002/04/08 17:49:42 jason Exp $	*/
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ *			http://www.netsec.net
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+#ifndef __HIFN_H__
+#define	__HIFN_H__
+
+/*
+ * Some PCI configuration space offset defines.  The names were made
+ * identical to the names used by the Linux kernel.
+ */
+#define	HIFN_BAR0		PCIR_BAR(0)	/* PUC register map */
+#define	HIFN_BAR1		PCIR_BAR(1)	/* DMA register map */
+#define	HIFN_TRDY_TIMEOUT	0x40
+#define	HIFN_RETRY_TIMEOUT	0x41
+
+/*
+ * PCI vendor and device identifiers
+ * (the names are preserved from their OpenBSD source).
+ */
+#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
+#define	PCI_PRODUCT_HIFN_7751	0x0005		/* 7751 */
+#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
+#define	PCI_PRODUCT_HIFN_7811	0x0007		/* 7811 */
+#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
+#define	PCI_PRODUCT_HIFN_7951	0x0012		/* 7951 */
+#define	PCI_PRODUCT_HIFN_7955	0x0020		/* 7954/7955 */
+#define	PCI_PRODUCT_HIFN_7956	0x001d		/* 7956 */
+
+#define	PCI_VENDOR_INVERTEX	0x14e1		/* Invertex */
+#define	PCI_PRODUCT_INVERTEX_AEON 0x0005	/* AEON */
+
+#define	PCI_VENDOR_NETSEC	0x1660		/* NetSec */
+#define	PCI_PRODUCT_NETSEC_7751	0x7751		/* 7751 */
+
+/*
+ * The values below should multiple of 4 -- and be large enough to handle
+ * any command the driver implements.
+ *
+ * MAX_COMMAND = base command + mac command + encrypt command +
+ *			mac-key + rc4-key
+ * MAX_RESULT  = base result + mac result + mac + encrypt result
+ *
+ *
+ */
+#define	HIFN_MAX_COMMAND	(8 + 8 + 8 + 64 + 260)
+#define	HIFN_MAX_RESULT		(8 + 4 + 20 + 4)
+
+/*
+ * hifn_desc_t
+ *
+ * Holds an individual descriptor for any of the rings.
+ */
+typedef struct hifn_desc {
+	volatile u_int32_t l;		/* length and status bits */
+	volatile u_int32_t p;
+} hifn_desc_t;
+
+/*
+ * Masks for the "length" field of struct hifn_desc.
+ */
+#define	HIFN_D_LENGTH		0x0000ffff	/* length bit mask */
+#define	HIFN_D_MASKDONEIRQ	0x02000000	/* mask the done interrupt */
+#define	HIFN_D_DESTOVER		0x04000000	/* destination overflow */
+#define	HIFN_D_OVER		0x08000000	/* overflow */
+#define	HIFN_D_LAST		0x20000000	/* last descriptor in chain */
+#define	HIFN_D_JUMP		0x40000000	/* jump descriptor */
+#define	HIFN_D_VALID		0x80000000	/* valid bit */
+
+
+/*
+ * Processing Unit Registers (offset from BASEREG0)
+ */
+#define	HIFN_0_PUDATA		0x00	/* Processing Unit Data */
+#define	HIFN_0_PUCTRL		0x04	/* Processing Unit Control */
+#define	HIFN_0_PUISR		0x08	/* Processing Unit Interrupt Status */
+#define	HIFN_0_PUCNFG		0x0c	/* Processing Unit Configuration */
+#define	HIFN_0_PUIER		0x10	/* Processing Unit Interrupt Enable */
+#define	HIFN_0_PUSTAT		0x14	/* Processing Unit Status/Chip ID */
+#define	HIFN_0_FIFOSTAT		0x18	/* FIFO Status */
+#define	HIFN_0_FIFOCNFG		0x1c	/* FIFO Configuration */
+#define	HIFN_0_PUCTRL2		0x28	/* Processing Unit Control (2nd map) */
+#define	HIFN_0_MUTE1		0x80
+#define	HIFN_0_MUTE2		0x90
+#define	HIFN_0_SPACESIZE	0x100	/* Register space size */
+
+/* Processing Unit Control Register (HIFN_0_PUCTRL) */
+#define	HIFN_PUCTRL_CLRSRCFIFO	0x0010	/* clear source fifo */
+#define	HIFN_PUCTRL_STOP	0x0008	/* stop pu */
+#define	HIFN_PUCTRL_LOCKRAM	0x0004	/* lock ram */
+#define	HIFN_PUCTRL_DMAENA	0x0002	/* enable dma */
+#define	HIFN_PUCTRL_RESET	0x0001	/* Reset processing unit */
+
+/* Processing Unit Interrupt Status Register (HIFN_0_PUISR) */
+#define	HIFN_PUISR_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUISR_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUISR_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUISR_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUISR_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUISR_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUISR_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUISR_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUISR_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUISR_DSTRESULT	0x0004	/* Destination result interrupt */
+
+/* Processing Unit Configuration Register (HIFN_0_PUCNFG) */
+#define	HIFN_PUCNFG_DRAMMASK	0xe000	/* DRAM size mask */
+#define	HIFN_PUCNFG_DSZ_256K	0x0000	/* 256k dram */
+#define	HIFN_PUCNFG_DSZ_512K	0x2000	/* 512k dram */
+#define	HIFN_PUCNFG_DSZ_1M	0x4000	/* 1m dram */
+#define	HIFN_PUCNFG_DSZ_2M	0x6000	/* 2m dram */
+#define	HIFN_PUCNFG_DSZ_4M	0x8000	/* 4m dram */
+#define	HIFN_PUCNFG_DSZ_8M	0xa000	/* 8m dram */
+#define	HIFN_PUNCFG_DSZ_16M	0xc000	/* 16m dram */
+#define	HIFN_PUCNFG_DSZ_32M	0xe000	/* 32m dram */
+#define	HIFN_PUCNFG_DRAMREFRESH	0x1800	/* DRAM refresh rate mask */
+#define	HIFN_PUCNFG_DRFR_512	0x0000	/* 512 divisor of ECLK */
+#define	HIFN_PUCNFG_DRFR_256	0x0800	/* 256 divisor of ECLK */
+#define	HIFN_PUCNFG_DRFR_128	0x1000	/* 128 divisor of ECLK */
+#define	HIFN_PUCNFG_TCALLPHASES	0x0200	/* your guess is as good as mine... */
+#define	HIFN_PUCNFG_TCDRVTOTEM	0x0100	/* your guess is as good as mine... */
+#define	HIFN_PUCNFG_BIGENDIAN	0x0080	/* DMA big endian mode */
+#define	HIFN_PUCNFG_BUS32	0x0040	/* Bus width 32bits */
+#define	HIFN_PUCNFG_BUS16	0x0000	/* Bus width 16 bits */
+#define	HIFN_PUCNFG_CHIPID	0x0020	/* Allow chipid from PUSTAT */
+#define	HIFN_PUCNFG_DRAM	0x0010	/* Context RAM is DRAM */
+#define	HIFN_PUCNFG_SRAM	0x0000	/* Context RAM is SRAM */
+#define	HIFN_PUCNFG_COMPSING	0x0004	/* Enable single compression context */
+#define	HIFN_PUCNFG_ENCCNFG	0x0002	/* Encryption configuration */
+
+/* Processing Unit Interrupt Enable Register (HIFN_0_PUIER) */
+#define	HIFN_PUIER_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUIER_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUIER_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUIER_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUIER_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUIER_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUIER_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUIER_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUIER_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUIER_DSTRESULT	0x0004	/* Destination result interrupt */
+
+/* Processing Unit Status Register/Chip ID (HIFN_0_PUSTAT) */
+#define	HIFN_PUSTAT_CMDINVAL	0x8000	/* Invalid command interrupt */
+#define	HIFN_PUSTAT_DATAERR	0x4000	/* Data error interrupt */
+#define	HIFN_PUSTAT_SRCFIFO	0x2000	/* Source FIFO ready interrupt */
+#define	HIFN_PUSTAT_DSTFIFO	0x1000	/* Destination FIFO ready interrupt */
+#define	HIFN_PUSTAT_DSTOVER	0x0200	/* Destination overrun interrupt */
+#define	HIFN_PUSTAT_SRCCMD	0x0080	/* Source command interrupt */
+#define	HIFN_PUSTAT_SRCCTX	0x0040	/* Source context interrupt */
+#define	HIFN_PUSTAT_SRCDATA	0x0020	/* Source data interrupt */
+#define	HIFN_PUSTAT_DSTDATA	0x0010	/* Destination data interrupt */
+#define	HIFN_PUSTAT_DSTRESULT	0x0004	/* Destination result interrupt */
+#define	HIFN_PUSTAT_CHIPREV	0x00ff	/* Chip revision mask */
+#define	HIFN_PUSTAT_CHIPENA	0xff00	/* Chip enabled mask */
+#define	HIFN_PUSTAT_ENA_2	0x1100	/* Level 2 enabled */
+#define	HIFN_PUSTAT_ENA_1	0x1000	/* Level 1 enabled */
+#define	HIFN_PUSTAT_ENA_0	0x3000	/* Level 0 enabled */
+#define	HIFN_PUSTAT_REV_2	0x0020	/* 7751 PT6/2 */
+#define	HIFN_PUSTAT_REV_3	0x0030	/* 7751 PT6/3 */
+
+/* FIFO Status Register (HIFN_0_FIFOSTAT) */
+#define	HIFN_FIFOSTAT_SRC	0x7f00	/* Source FIFO available */
+#define	HIFN_FIFOSTAT_DST	0x007f	/* Destination FIFO available */
+
+/* FIFO Configuration Register (HIFN_0_FIFOCNFG) */
+#define	HIFN_FIFOCNFG_THRESHOLD	0x0400	/* must be written as this value */
+
+/*
+ * DMA Interface Registers (offset from BASEREG1)
+ */
+#define	HIFN_1_DMA_CRAR		0x0c	/* DMA Command Ring Address */
+#define	HIFN_1_DMA_SRAR		0x1c	/* DMA Source Ring Address */
+#define	HIFN_1_DMA_RRAR		0x2c	/* DMA Result Ring Address */
+#define	HIFN_1_DMA_DRAR		0x3c	/* DMA Destination Ring Address */
+#define	HIFN_1_DMA_CSR		0x40	/* DMA Status and Control */
+#define	HIFN_1_DMA_IER		0x44	/* DMA Interrupt Enable */
+#define	HIFN_1_DMA_CNFG		0x48	/* DMA Configuration */
+#define	HIFN_1_PLL		0x4c	/* 7955/7956: PLL config */
+#define	HIFN_1_7811_RNGENA	0x60	/* 7811: rng enable */
+#define	HIFN_1_7811_RNGCFG	0x64	/* 7811: rng config */
+#define	HIFN_1_7811_RNGDAT	0x68	/* 7811: rng data */
+#define	HIFN_1_7811_RNGSTS	0x6c	/* 7811: rng status */
+#define	HIFN_1_DMA_CNFG2	0x6c	/* 7955/7956: dma config #2 */
+#define	HIFN_1_7811_MIPSRST	0x94	/* 7811: MIPS reset */
+#define	HIFN_1_REVID		0x98	/* Revision ID */
+
+#define	HIFN_1_PUB_RESET	0x204	/* Public/RNG Reset */
+#define	HIFN_1_PUB_BASE		0x300	/* Public Base Address */
+#define	HIFN_1_PUB_OPLEN	0x304	/* 7951-compat Public Operand Length */
+#define	HIFN_1_PUB_OP		0x308	/* 7951-compat Public Operand */
+#define	HIFN_1_PUB_STATUS	0x30c	/* 7951-compat Public Status */
+#define	HIFN_1_PUB_IEN		0x310	/* Public Interrupt enable */
+#define	HIFN_1_RNG_CONFIG	0x314	/* RNG config */
+#define	HIFN_1_RNG_DATA		0x318	/* RNG data */
+#define	HIFN_1_PUB_MODE		0x320	/* PK mode */
+#define	HIFN_1_PUB_FIFO_OPLEN	0x380	/* first element of oplen fifo */
+#define	HIFN_1_PUB_FIFO_OP	0x384	/* first element of op fifo */
+#define	HIFN_1_PUB_MEM		0x400	/* start of Public key memory */
+#define	HIFN_1_PUB_MEMEND	0xbff	/* end of Public key memory */
+
+/* DMA Status and Control Register (HIFN_1_DMA_CSR) */
+#define	HIFN_DMACSR_D_CTRLMASK	0xc0000000	/* Destinition Ring Control */
+#define	HIFN_DMACSR_D_CTRL_NOP	0x00000000	/* Dest. Control: no-op */
+#define	HIFN_DMACSR_D_CTRL_DIS	0x40000000	/* Dest. Control: disable */
+#define	HIFN_DMACSR_D_CTRL_ENA	0x80000000	/* Dest. Control: enable */
+#define	HIFN_DMACSR_D_ABORT	0x20000000	/* Destinition Ring PCIAbort */
+#define	HIFN_DMACSR_D_DONE	0x10000000	/* Destinition Ring Done */
+#define	HIFN_DMACSR_D_LAST	0x08000000	/* Destinition Ring Last */
+#define	HIFN_DMACSR_D_WAIT	0x04000000	/* Destinition Ring Waiting */
+#define	HIFN_DMACSR_D_OVER	0x02000000	/* Destinition Ring Overflow */
+#define	HIFN_DMACSR_R_CTRL	0x00c00000	/* Result Ring Control */
+#define	HIFN_DMACSR_R_CTRL_NOP	0x00000000	/* Result Control: no-op */
+#define	HIFN_DMACSR_R_CTRL_DIS	0x00400000	/* Result Control: disable */
+#define	HIFN_DMACSR_R_CTRL_ENA	0x00800000	/* Result Control: enable */
+#define	HIFN_DMACSR_R_ABORT	0x00200000	/* Result Ring PCI Abort */
+#define	HIFN_DMACSR_R_DONE	0x00100000	/* Result Ring Done */
+#define	HIFN_DMACSR_R_LAST	0x00080000	/* Result Ring Last */
+#define	HIFN_DMACSR_R_WAIT	0x00040000	/* Result Ring Waiting */
+#define	HIFN_DMACSR_R_OVER	0x00020000	/* Result Ring Overflow */
+#define	HIFN_DMACSR_S_CTRL	0x0000c000	/* Source Ring Control */
+#define	HIFN_DMACSR_S_CTRL_NOP	0x00000000	/* Source Control: no-op */
+#define	HIFN_DMACSR_S_CTRL_DIS	0x00004000	/* Source Control: disable */
+#define	HIFN_DMACSR_S_CTRL_ENA	0x00008000	/* Source Control: enable */
+#define	HIFN_DMACSR_S_ABORT	0x00002000	/* Source Ring PCI Abort */
+#define	HIFN_DMACSR_S_DONE	0x00001000	/* Source Ring Done */
+#define	HIFN_DMACSR_S_LAST	0x00000800	/* Source Ring Last */
+#define	HIFN_DMACSR_S_WAIT	0x00000400	/* Source Ring Waiting */
+#define	HIFN_DMACSR_ILLW	0x00000200	/* Illegal write (7811 only) */
+#define	HIFN_DMACSR_ILLR	0x00000100	/* Illegal read (7811 only) */
+#define	HIFN_DMACSR_C_CTRL	0x000000c0	/* Command Ring Control */
+#define	HIFN_DMACSR_C_CTRL_NOP	0x00000000	/* Command Control: no-op */
+#define	HIFN_DMACSR_C_CTRL_DIS	0x00000040	/* Command Control: disable */
+#define	HIFN_DMACSR_C_CTRL_ENA	0x00000080	/* Command Control: enable */
+#define	HIFN_DMACSR_C_ABORT	0x00000020	/* Command Ring PCI Abort */
+#define	HIFN_DMACSR_C_DONE	0x00000010	/* Command Ring Done */
+#define	HIFN_DMACSR_C_LAST	0x00000008	/* Command Ring Last */
+#define	HIFN_DMACSR_C_WAIT	0x00000004	/* Command Ring Waiting */
+#define	HIFN_DMACSR_PUBDONE	0x00000002	/* Public op done (7951 only) */
+#define	HIFN_DMACSR_ENGINE	0x00000001	/* Command Ring Engine IRQ */
+
+/* DMA Interrupt Enable Register (HIFN_1_DMA_IER) */
+#define	HIFN_DMAIER_D_ABORT	0x20000000	/* Destination Ring PCIAbort */
+#define	HIFN_DMAIER_D_DONE	0x10000000	/* Destination Ring Done */
+#define	HIFN_DMAIER_D_LAST	0x08000000	/* Destination Ring Last */
+#define	HIFN_DMAIER_D_WAIT	0x04000000	/* Destination Ring Waiting */
+#define	HIFN_DMAIER_D_OVER	0x02000000	/* Destination Ring Overflow */
+#define	HIFN_DMAIER_R_ABORT	0x00200000	/* Result Ring PCI Abort */
+#define	HIFN_DMAIER_R_DONE	0x00100000	/* Result Ring Done */
+#define	HIFN_DMAIER_R_LAST	0x00080000	/* Result Ring Last */
+#define	HIFN_DMAIER_R_WAIT	0x00040000	/* Result Ring Waiting */
+#define	HIFN_DMAIER_R_OVER	0x00020000	/* Result Ring Overflow */
+#define	HIFN_DMAIER_S_ABORT	0x00002000	/* Source Ring PCI Abort */
+#define	HIFN_DMAIER_S_DONE	0x00001000	/* Source Ring Done */
+#define	HIFN_DMAIER_S_LAST	0x00000800	/* Source Ring Last */
+#define	HIFN_DMAIER_S_WAIT	0x00000400	/* Source Ring Waiting */
+#define	HIFN_DMAIER_ILLW	0x00000200	/* Illegal write (7811 only) */
+#define	HIFN_DMAIER_ILLR	0x00000100	/* Illegal read (7811 only) */
+#define	HIFN_DMAIER_C_ABORT	0x00000020	/* Command Ring PCI Abort */
+#define	HIFN_DMAIER_C_DONE	0x00000010	/* Command Ring Done */
+#define	HIFN_DMAIER_C_LAST	0x00000008	/* Command Ring Last */
+#define	HIFN_DMAIER_C_WAIT	0x00000004	/* Command Ring Waiting */
+#define	HIFN_DMAIER_PUBDONE	0x00000002	/* public op done (7951 only) */
+#define	HIFN_DMAIER_ENGINE	0x00000001	/* Engine IRQ */
+
+/* DMA Configuration Register (HIFN_1_DMA_CNFG) */
+#define	HIFN_DMACNFG_BIGENDIAN	0x10000000	/* big endian mode */
+#define	HIFN_DMACNFG_POLLFREQ	0x00ff0000	/* Poll frequency mask */
+#define	HIFN_DMACNFG_UNLOCK	0x00000800
+#define	HIFN_DMACNFG_POLLINVAL	0x00000700	/* Invalid Poll Scalar */
+#define	HIFN_DMACNFG_LAST	0x00000010	/* Host control LAST bit */
+#define	HIFN_DMACNFG_MODE	0x00000004	/* DMA mode */
+#define	HIFN_DMACNFG_DMARESET	0x00000002	/* DMA Reset # */
+#define	HIFN_DMACNFG_MSTRESET	0x00000001	/* Master Reset # */
+
+/* DMA Configuration Register (HIFN_1_DMA_CNFG2) */
+#define	HIFN_DMACNFG2_PKSWAP32	(1 << 19)	/* swap the OPLEN/OP reg */
+#define	HIFN_DMACNFG2_PKSWAP8	(1 << 18)	/* swap the bits of OPLEN/OP */
+#define	HIFN_DMACNFG2_BAR0_SWAP32 (1<<17)	/* swap the bytes of BAR0 */
+#define	HIFN_DMACNFG2_BAR1_SWAP8 (1<<16)	/* swap the bits  of BAR0 */
+#define	HIFN_DMACNFG2_INIT_WRITE_BURST_SHIFT 12
+#define	HIFN_DMACNFG2_INIT_READ_BURST_SHIFT 8
+#define	HIFN_DMACNFG2_TGT_WRITE_BURST_SHIFT 4
+#define	HIFN_DMACNFG2_TGT_READ_BURST_SHIFT  0
+
+/* 7811 RNG Enable Register (HIFN_1_7811_RNGENA) */
+#define	HIFN_7811_RNGENA_ENA	0x00000001	/* enable RNG */
+
+/* 7811 RNG Config Register (HIFN_1_7811_RNGCFG) */
+#define	HIFN_7811_RNGCFG_PRE1	0x00000f00	/* first prescalar */
+#define	HIFN_7811_RNGCFG_OPRE	0x00000080	/* output prescalar */
+#define	HIFN_7811_RNGCFG_DEFL	0x00000f80	/* 2 words/ 1/100 sec */
+
+/* 7811 RNG Status Register (HIFN_1_7811_RNGSTS) */
+#define	HIFN_7811_RNGSTS_RDY	0x00004000	/* two numbers in FIFO */
+#define	HIFN_7811_RNGSTS_UFL	0x00001000	/* rng underflow */
+
+/* 7811 MIPS Reset Register (HIFN_1_7811_MIPSRST) */
+#define	HIFN_MIPSRST_BAR2SIZE	0xffff0000	/* sdram size */
+#define	HIFN_MIPSRST_GPRAMINIT	0x00008000	/* gpram can be accessed */
+#define	HIFN_MIPSRST_CRAMINIT	0x00004000	/* ctxram can be accessed */
+#define	HIFN_MIPSRST_LED2	0x00000400	/* external LED2 */
+#define	HIFN_MIPSRST_LED1	0x00000200	/* external LED1 */
+#define	HIFN_MIPSRST_LED0	0x00000100	/* external LED0 */
+#define	HIFN_MIPSRST_MIPSDIS	0x00000004	/* disable MIPS */
+#define	HIFN_MIPSRST_MIPSRST	0x00000002	/* warm reset MIPS */
+#define	HIFN_MIPSRST_MIPSCOLD	0x00000001	/* cold reset MIPS */
+
+/* Public key reset register (HIFN_1_PUB_RESET) */
+#define	HIFN_PUBRST_RESET	0x00000001	/* reset public/rng unit */
+
+/* Public operation register (HIFN_1_PUB_OP) */
+#define	HIFN_PUBOP_AOFFSET	0x0000003e	/* A offset */
+#define	HIFN_PUBOP_BOFFSET	0x00000fc0	/* B offset */
+#define	HIFN_PUBOP_MOFFSET	0x0003f000	/* M offset */
+#define	HIFN_PUBOP_OP_MASK	0x003c0000	/* Opcode: */
+#define	HIFN_PUBOP_OP_NOP	0x00000000	/*  NOP */
+#define	HIFN_PUBOP_OP_ADD	0x00040000	/*  ADD */
+#define	HIFN_PUBOP_OP_ADDC	0x00080000	/*  ADD w/carry */
+#define	HIFN_PUBOP_OP_SUB	0x000c0000	/*  SUB */
+#define	HIFN_PUBOP_OP_SUBC	0x00100000	/*  SUB w/carry */
+#define	HIFN_PUBOP_OP_MODADD	0x00140000	/*  Modular ADD */
+#define	HIFN_PUBOP_OP_MODSUB	0x00180000	/*  Modular SUB */
+#define	HIFN_PUBOP_OP_INCA	0x001c0000	/*  INC A */
+#define	HIFN_PUBOP_OP_DECA	0x00200000	/*  DEC A */
+#define	HIFN_PUBOP_OP_MULT	0x00240000	/*  MULT */
+#define	HIFN_PUBOP_OP_MODMULT	0x00280000	/*  Modular MULT */
+#define	HIFN_PUBOP_OP_MODRED	0x002c0000	/*  Modular Red */
+#define	HIFN_PUBOP_OP_MODEXP	0x00300000	/*  Modular Exp */
+
+/* Public operand length register (HIFN_1_PUB_OPLEN) */
+#define	HIFN_PUBOPLEN_MODLEN	0x0000007f
+#define	HIFN_PUBOPLEN_EXPLEN	0x0003ff80
+#define	HIFN_PUBOPLEN_REDLEN	0x003c0000
+
+/* Public status register (HIFN_1_PUB_STATUS) */
+#define	HIFN_PUBSTS_DONE	0x00000001	/* operation done */
+#define	HIFN_PUBSTS_CARRY	0x00000002	/* carry */
+#define	HIFN_PUBSTS_FIFO_EMPTY	0x00000100	/* fifo empty */
+#define	HIFN_PUBSTS_FIFO_FULL	0x00000200	/* fifo full */
+#define	HIFN_PUBSTS_FIFO_OVFL	0x00000400	/* fifo overflow */
+#define	HIFN_PUBSTS_FIFO_WRITE	0x000f0000	/* fifo write */
+#define	HIFN_PUBSTS_FIFO_READ	0x0f000000	/* fifo read */
+
+/* Public interrupt enable register (HIFN_1_PUB_IEN) */
+#define	HIFN_PUBIEN_DONE	0x00000001	/* operation done interrupt */
+
+/* Random number generator config register (HIFN_1_RNG_CONFIG) */
+#define	HIFN_RNGCFG_ENA		0x00000001	/* enable rng */
+
+/*
+ * Register offsets in register set 1
+ */
+
+#define	HIFN_UNLOCK_SECRET1	0xf4
+#define	HIFN_UNLOCK_SECRET2	0xfc
+
+/*
+ * PLL config register
+ *
+ * This register is present only on 7954/7955/7956 parts. It must be
+ * programmed according to the bus interface method used by the h/w.
+ * Note that the parts require a stable clock.  Since the PCI clock
+ * may vary the reference clock must usually be used.  To avoid
+ * overclocking the core logic, setup must be done carefully, refer
+ * to the driver for details.  The exact multiplier required varies
+ * by part and system configuration; refer to the Hifn documentation.
+ */
+#define	HIFN_PLL_REF_SEL	0x00000001	/* REF/HBI clk selection */
+#define	HIFN_PLL_BP		0x00000002	/* bypass (used during setup) */
+/* bit 2 reserved */
+#define	HIFN_PLL_PK_CLK_SEL	0x00000008	/* public key clk select */
+#define	HIFN_PLL_PE_CLK_SEL	0x00000010	/* packet engine clk select */
+/* bits 5-9 reserved */
+#define	HIFN_PLL_MBSET		0x00000400	/* must be set to 1 */
+#define	HIFN_PLL_ND		0x00003800	/* Fpll_ref multiplier select */
+#define	HIFN_PLL_ND_SHIFT	11
+#define	HIFN_PLL_ND_2		0x00000000	/* 2x */
+#define	HIFN_PLL_ND_4		0x00000800	/* 4x */
+#define	HIFN_PLL_ND_6		0x00001000	/* 6x */
+#define	HIFN_PLL_ND_8		0x00001800	/* 8x */
+#define	HIFN_PLL_ND_10		0x00002000	/* 10x */
+#define	HIFN_PLL_ND_12		0x00002800	/* 12x */
+/* bits 14-15 reserved */
+#define	HIFN_PLL_IS		0x00010000	/* charge pump current select */
+/* bits 17-31 reserved */
+
+/*
+ * Board configuration specifies only these bits.
+ */
+#define	HIFN_PLL_CONFIG		(HIFN_PLL_IS|HIFN_PLL_ND|HIFN_PLL_REF_SEL)
+
+/*
+ * Public Key Engine Mode Register
+ */
+#define	HIFN_PKMODE_HOSTINVERT	(1 << 0)	/* HOST INVERT */
+#define	HIFN_PKMODE_ENHANCED	(1 << 1)	/* Enable enhanced mode */
+
+
+/*********************************************************************
+ * Structs for board commands
+ *
+ *********************************************************************/
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_base_command {
+	volatile u_int16_t masks;
+	volatile u_int16_t session_num;
+	volatile u_int16_t total_source_count;
+	volatile u_int16_t total_dest_count;
+} hifn_base_command_t;
+
+#define	HIFN_BASE_CMD_MAC		0x0400
+#define	HIFN_BASE_CMD_CRYPT		0x0800
+#define	HIFN_BASE_CMD_DECODE		0x2000
+#define	HIFN_BASE_CMD_SRCLEN_M		0xc000
+#define	HIFN_BASE_CMD_SRCLEN_S		14
+#define	HIFN_BASE_CMD_DSTLEN_M		0x3000
+#define	HIFN_BASE_CMD_DSTLEN_S		12
+#define	HIFN_BASE_CMD_LENMASK_HI	0x30000
+#define	HIFN_BASE_CMD_LENMASK_LO	0x0ffff
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_crypt_command {
+	volatile u_int16_t masks;
+	volatile u_int16_t header_skip;
+	volatile u_int16_t source_count;
+	volatile u_int16_t reserved;
+} hifn_crypt_command_t;
+
+#define	HIFN_CRYPT_CMD_ALG_MASK		0x0003		/* algorithm: */
+#define	HIFN_CRYPT_CMD_ALG_DES		0x0000		/*   DES */
+#define	HIFN_CRYPT_CMD_ALG_3DES		0x0001		/*   3DES */
+#define	HIFN_CRYPT_CMD_ALG_RC4		0x0002		/*   RC4 */
+#define	HIFN_CRYPT_CMD_ALG_AES		0x0003		/*   AES */
+#define	HIFN_CRYPT_CMD_MODE_MASK	0x0018		/* Encrypt mode: */
+#define	HIFN_CRYPT_CMD_MODE_ECB		0x0000		/*   ECB */
+#define	HIFN_CRYPT_CMD_MODE_CBC		0x0008		/*   CBC */
+#define	HIFN_CRYPT_CMD_MODE_CFB		0x0010		/*   CFB */
+#define	HIFN_CRYPT_CMD_MODE_OFB		0x0018		/*   OFB */
+#define	HIFN_CRYPT_CMD_CLR_CTX		0x0040		/* clear context */
+#define	HIFN_CRYPT_CMD_NEW_KEY		0x0800		/* expect new key */
+#define	HIFN_CRYPT_CMD_NEW_IV		0x1000		/* expect new iv */
+
+#define	HIFN_CRYPT_CMD_SRCLEN_M		0xc000
+#define	HIFN_CRYPT_CMD_SRCLEN_S		14
+
+#define	HIFN_CRYPT_CMD_KSZ_MASK		0x0600		/* AES key size: */
+#define	HIFN_CRYPT_CMD_KSZ_128		0x0000		/*   128 bit */
+#define	HIFN_CRYPT_CMD_KSZ_192		0x0200		/*   192 bit */
+#define	HIFN_CRYPT_CMD_KSZ_256		0x0400		/*   256 bit */
+
+/*
+ * Structure to help build up the command data structure.
+ */
+typedef struct hifn_mac_command {
+	volatile u_int16_t masks;
+	volatile u_int16_t header_skip;
+	volatile u_int16_t source_count;
+	volatile u_int16_t reserved;
+} hifn_mac_command_t;
+
+#define	HIFN_MAC_CMD_ALG_MASK		0x0001
+#define	HIFN_MAC_CMD_ALG_SHA1		0x0000
+#define	HIFN_MAC_CMD_ALG_MD5		0x0001
+#define	HIFN_MAC_CMD_MODE_MASK		0x000c
+#define	HIFN_MAC_CMD_MODE_HMAC		0x0000
+#define	HIFN_MAC_CMD_MODE_SSL_MAC	0x0004
+#define	HIFN_MAC_CMD_MODE_HASH		0x0008
+#define	HIFN_MAC_CMD_MODE_FULL		0x0004
+#define	HIFN_MAC_CMD_TRUNC		0x0010
+#define	HIFN_MAC_CMD_RESULT		0x0020
+#define	HIFN_MAC_CMD_APPEND		0x0040
+#define	HIFN_MAC_CMD_SRCLEN_M		0xc000
+#define	HIFN_MAC_CMD_SRCLEN_S		14
+
+/*
+ * MAC POS IPsec initiates authentication after encryption on encodes
+ * and before decryption on decodes.
+ */
+#define	HIFN_MAC_CMD_POS_IPSEC		0x0200
+#define	HIFN_MAC_CMD_NEW_KEY		0x0800
+
+/*
+ * The poll frequency and poll scalar defines are unshifted values used
+ * to set fields in the DMA Configuration Register.
+ */
+#ifndef HIFN_POLL_FREQUENCY
+#define	HIFN_POLL_FREQUENCY	0x1
+#endif
+
+#ifndef HIFN_POLL_SCALAR
+#define	HIFN_POLL_SCALAR	0x0
+#endif
+
+#define	HIFN_MAX_SEGLEN 	0xffff		/* maximum dma segment len */
+#define	HIFN_MAX_DMALEN		0x3ffff		/* maximum dma length */
+#endif /* __HIFN_H__ */
diff --git a/crypto/ocf/hifn/hifn7751var.h b/crypto/ocf/hifn/hifn7751var.h
new file mode 100644
index 000000000000..7b690781cc1e
--- /dev/null
+++ b/crypto/ocf/hifn/hifn7751var.h
@@ -0,0 +1,368 @@
+/* $FreeBSD: src/sys/dev/hifn/hifn7751var.h,v 1.9 2007/03/21 03:42:49 sam Exp $ */
+/*	$OpenBSD: hifn7751var.h,v 1.42 2002/04/08 17:49:42 jason Exp $	*/
+
+/*-
+ * Invertex AEON / Hifn 7751 driver
+ * Copyright (c) 1999 Invertex Inc. All rights reserved.
+ * Copyright (c) 1999 Theo de Raadt
+ * Copyright (c) 2000-2001 Network Security Technologies, Inc.
+ *			http://www.netsec.net
+ *
+ * Please send any comments, feedback, bug-fixes, or feature requests to
+ * software@invertex.com.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+#ifndef __HIFN7751VAR_H__
+#define __HIFN7751VAR_H__
+
+#ifdef __KERNEL__
+
+/*
+ * Some configurable values for the driver.  By default command+result
+ * descriptor rings are the same size.  The src+dst descriptor rings
+ * are sized at 3.5x the number of potential commands.  Slower parts
+ * (e.g. 7951) tend to run out of src descriptors; faster parts (7811)
+ * src+cmd/result descriptors.  It's not clear that increasing the size
+ * of the descriptor rings helps performance significantly as other
+ * factors tend to come into play (e.g. copying misaligned packets).
+ */
+#define	HIFN_D_CMD_RSIZE	24	/* command descriptors */
+#define	HIFN_D_SRC_RSIZE	((HIFN_D_CMD_RSIZE * 7) / 2)	/* source descriptors */
+#define	HIFN_D_RES_RSIZE	HIFN_D_CMD_RSIZE	/* result descriptors */
+#define	HIFN_D_DST_RSIZE	HIFN_D_SRC_RSIZE	/* destination descriptors */
+
+/*
+ *  Length values for cryptography
+ */
+#define HIFN_DES_KEY_LENGTH		8
+#define HIFN_3DES_KEY_LENGTH		24
+#define HIFN_MAX_CRYPT_KEY_LENGTH	HIFN_3DES_KEY_LENGTH
+#define HIFN_IV_LENGTH			8
+#define	HIFN_AES_IV_LENGTH		16
+#define HIFN_MAX_IV_LENGTH		HIFN_AES_IV_LENGTH
+
+/*
+ *  Length values for authentication
+ */
+#define HIFN_MAC_KEY_LENGTH		64
+#define HIFN_MD5_LENGTH			16
+#define HIFN_SHA1_LENGTH		20
+#define HIFN_MAC_TRUNC_LENGTH		12
+
+#define MAX_SCATTER 64
+
+/*
+ * Data structure to hold all 4 rings and any other ring related data.
+ */
+struct hifn_dma {
+	/*
+	 *  Descriptor rings.  We add +1 to the size to accomidate the
+	 *  jump descriptor.
+	 */
+	struct hifn_desc	cmdr[HIFN_D_CMD_RSIZE+1];
+	struct hifn_desc	srcr[HIFN_D_SRC_RSIZE+1];
+	struct hifn_desc	dstr[HIFN_D_DST_RSIZE+1];
+	struct hifn_desc	resr[HIFN_D_RES_RSIZE+1];
+
+	struct hifn_command	*hifn_commands[HIFN_D_RES_RSIZE];
+
+	u_char			command_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_COMMAND];
+	u_char			result_bufs[HIFN_D_CMD_RSIZE][HIFN_MAX_RESULT];
+	u_int32_t		slop[HIFN_D_CMD_RSIZE];
+
+	u_int64_t		test_src, test_dst;
+
+	/*
+	 *  Our current positions for insertion and removal from the desriptor
+	 *  rings.
+	 */
+	int			cmdi, srci, dsti, resi;
+	volatile int		cmdu, srcu, dstu, resu;
+	int			cmdk, srck, dstk, resk;
+};
+
+struct hifn_session {
+	int hs_used;
+	int hs_mlen;
+};
+
+#define	HIFN_RING_SYNC(sc, r, i, f)					\
+	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+#define	HIFN_CMDR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), cmdr, (i), (f))
+#define	HIFN_RESR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), resr, (i), (f))
+#define	HIFN_SRCR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), srcr, (i), (f))
+#define	HIFN_DSTR_SYNC(sc, i, f)	HIFN_RING_SYNC((sc), dstr, (i), (f))
+
+#define	HIFN_CMD_SYNC(sc, i, f)						\
+	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+#define	HIFN_RES_SYNC(sc, i, f)						\
+	/* DAVIDM bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_dmamap, (f)) */
+
+typedef int bus_size_t;
+
+/*
+ * Holds data specific to a single HIFN board.
+ */
+struct hifn_softc {
+	softc_device_decl		 sc_dev;
+
+	struct pci_dev		*sc_pcidev;	/* PCI device pointer */
+	spinlock_t		sc_mtx;		/* per-instance lock */
+
+	int			sc_num;		/* for multiple devs */
+
+	ocf_iomem_t		sc_bar0;
+	bus_size_t		sc_bar0_lastreg;/* bar0 last reg written */
+	ocf_iomem_t		sc_bar1;
+	bus_size_t		sc_bar1_lastreg;/* bar1 last reg written */
+
+	int			sc_irq;
+
+	u_int32_t		sc_dmaier;
+	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
+	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
+
+	struct hifn_dma		*sc_dma;
+	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
+
+	int			sc_dmansegs;
+	int32_t			sc_cid;
+	int			sc_maxses;
+	int			sc_nsessions;
+	struct hifn_session	*sc_sessions;
+	int			sc_ramsize;
+	int			sc_flags;
+#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
+#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
+#define	HIFN_HAS_AES		0x4	/* includes AES support */
+#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
+#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
+
+	struct timer_list	sc_tickto;	/* for managing DMA */
+
+	int			sc_rngfirst;
+	int			sc_rnghz;	/* RNG polling frequency */
+
+	int			sc_c_busy;	/* command ring busy */
+	int			sc_s_busy;	/* source data ring busy */
+	int			sc_d_busy;	/* destination data ring busy */
+	int			sc_r_busy;	/* result ring busy */
+	int			sc_active;	/* for initial countdown */
+	int			sc_needwakeup;	/* ops q'd wating on resources */
+	int			sc_curbatch;	/* # ops submitted w/o int */
+	int			sc_suspended;
+#ifdef HIFN_VULCANDEV
+	struct cdev            *sc_pkdev;
+#endif
+};
+
+#define	HIFN_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
+#define	HIFN_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
+
+/*
+ *  hifn_command_t
+ *
+ *  This is the control structure used to pass commands to hifn_encrypt().
+ *
+ *  flags
+ *  -----
+ *  Flags is the bitwise "or" values for command configuration.  A single
+ *  encrypt direction needs to be set:
+ *
+ *	HIFN_ENCODE or HIFN_DECODE
+ *
+ *  To use cryptography, a single crypto algorithm must be included:
+ *
+ *	HIFN_CRYPT_3DES or HIFN_CRYPT_DES
+ *
+ *  To use authentication is used, a single MAC algorithm must be included:
+ *
+ *	HIFN_MAC_MD5 or HIFN_MAC_SHA1
+ *
+ *  By default MD5 uses a 16 byte hash and SHA-1 uses a 20 byte hash.
+ *  If the value below is set, hash values are truncated or assumed
+ *  truncated to 12 bytes:
+ *
+ *	HIFN_MAC_TRUNC
+ *
+ *  Keys for encryption and authentication can be sent as part of a command,
+ *  or the last key value used with a particular session can be retrieved
+ *  and used again if either of these flags are not specified.
+ *
+ *	HIFN_CRYPT_NEW_KEY, HIFN_MAC_NEW_KEY
+ *
+ *  session_num
+ *  -----------
+ *  A number between 0 and 2048 (for DRAM models) or a number between
+ *  0 and 768 (for SRAM models).  Those who don't want to use session
+ *  numbers should leave value at zero and send a new crypt key and/or
+ *  new MAC key on every command.  If you use session numbers and
+ *  don't send a key with a command, the last key sent for that same
+ *  session number will be used.
+ *
+ *  Warning:  Using session numbers and multiboard at the same time
+ *            is currently broken.
+ *
+ *  mbuf
+ *  ----
+ *  Either fill in the mbuf pointer and npa=0 or
+ *	 fill packp[] and packl[] and set npa to > 0
+ *
+ *  mac_header_skip
+ *  ---------------
+ *  The number of bytes of the source_buf that are skipped over before
+ *  authentication begins.  This must be a number between 0 and 2^16-1
+ *  and can be used by IPsec implementers to skip over IP headers.
+ *  *** Value ignored if authentication not used ***
+ *
+ *  crypt_header_skip
+ *  -----------------
+ *  The number of bytes of the source_buf that are skipped over before
+ *  the cryptographic operation begins.  This must be a number between 0
+ *  and 2^16-1.  For IPsec, this number will always be 8 bytes larger
+ *  than the auth_header_skip (to skip over the ESP header).
+ *  *** Value ignored if cryptography not used ***
+ *
+ */
+struct hifn_operand {
+	union {
+		struct sk_buff *skb;
+		struct uio *io;
+		unsigned char *buf;
+	} u;
+	void		*map;
+	bus_size_t	mapsize;
+	int		nsegs;
+	struct {
+	    dma_addr_t  ds_addr;
+	    int         ds_len;
+	} segs[MAX_SCATTER];
+};
+
+struct hifn_command {
+	u_int16_t session_num;
+	u_int16_t base_masks, cry_masks, mac_masks;
+	u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
+	int cklen;
+	int sloplen, slopidx;
+
+	struct hifn_operand src;
+	struct hifn_operand dst;
+
+	struct hifn_softc *softc;
+	struct cryptop *crp;
+	struct cryptodesc *enccrd, *maccrd;
+};
+
+#define	src_skb		src.u.skb
+#define	src_io		src.u.io
+#define	src_map		src.map
+#define	src_mapsize	src.mapsize
+#define	src_segs	src.segs
+#define	src_nsegs	src.nsegs
+#define	src_buf		src.u.buf
+
+#define	dst_skb		dst.u.skb
+#define	dst_io		dst.u.io
+#define	dst_map		dst.map
+#define	dst_mapsize	dst.mapsize
+#define	dst_segs	dst.segs
+#define	dst_nsegs	dst.nsegs
+#define	dst_buf		dst.u.buf
+
+/*
+ *  Return values for hifn_crypto()
+ */
+#define HIFN_CRYPTO_SUCCESS	0
+#define HIFN_CRYPTO_BAD_INPUT	(-1)
+#define HIFN_CRYPTO_RINGS_FULL	(-2)
+
+/**************************************************************************
+ *
+ *  Function:  hifn_crypto
+ *
+ *  Purpose:   Called by external drivers to begin an encryption on the
+ *             HIFN board.
+ *
+ *  Blocking/Non-blocking Issues
+ *  ============================
+ *  The driver cannot block in hifn_crypto (no calls to tsleep) currently.
+ *  hifn_crypto() returns HIFN_CRYPTO_RINGS_FULL if there is not enough
+ *  room in any of the rings for the request to proceed.
+ *
+ *  Return Values
+ *  =============
+ *  0 for success, negative values on error
+ *
+ *  Defines for negative error codes are:
+ *
+ *    HIFN_CRYPTO_BAD_INPUT  :  The passed in command had invalid settings.
+ *    HIFN_CRYPTO_RINGS_FULL :  All DMA rings were full and non-blocking
+ *                              behaviour was requested.
+ *
+ *************************************************************************/
+
+/*
+ * Convert back and forth from 'sid' to 'card' and 'session'
+ */
+#define HIFN_CARD(sid)		(((sid) & 0xf0000000) >> 28)
+#define HIFN_SESSION(sid)	((sid) & 0x000007ff)
+#define HIFN_SID(crd,ses)	(((crd) << 28) | ((ses) & 0x7ff))
+
+#endif /* _KERNEL */
+
+struct hifn_stats {
+	u_int64_t hst_ibytes;
+	u_int64_t hst_obytes;
+	u_int32_t hst_ipackets;
+	u_int32_t hst_opackets;
+	u_int32_t hst_invalid;
+	u_int32_t hst_nomem;		/* malloc or one of hst_nomem_* */
+	u_int32_t hst_abort;
+	u_int32_t hst_noirq;		/* IRQ for no reason */
+	u_int32_t hst_totbatch;		/* ops submitted w/o interrupt */
+	u_int32_t hst_maxbatch;		/* max ops submitted together */
+	u_int32_t hst_unaligned;	/* unaligned src caused copy */
+	/*
+	 * The following divides hst_nomem into more specific buckets.
+	 */
+	u_int32_t hst_nomem_map;	/* bus_dmamap_create failed */
+	u_int32_t hst_nomem_load;	/* bus_dmamap_load_* failed */
+	u_int32_t hst_nomem_mbuf;	/* MGET* failed */
+	u_int32_t hst_nomem_mcl;	/* MCLGET* failed */
+	u_int32_t hst_nomem_cr;		/* out of command/result descriptor */
+	u_int32_t hst_nomem_sd;		/* out of src/dst descriptors */
+};
+
+#endif /* __HIFN7751VAR_H__ */
diff --git a/crypto/ocf/hifn/hifnHIPP.c b/crypto/ocf/hifn/hifnHIPP.c
new file mode 100644
index 000000000000..f0669294a0de
--- /dev/null
+++ b/crypto/ocf/hifn/hifnHIPP.c
@@ -0,0 +1,420 @@
+/*-
+ * Driver for Hifn HIPP-I/II chipset
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *   notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *   notice, this list of conditions and the following disclaimer in the
+ *   documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *   derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn Inc.
+ *
+ */
+
+/*
+ * Driver for various Hifn encryption processors.
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/uio.h>
+#include <linux/sysfs.h>
+#include <linux/miscdevice.h>
+#include <asm/io.h>
+
+#include <cryptodev.h>
+
+#include "hifnHIPPreg.h"
+#include "hifnHIPPvar.h"
+
+#if 1
+#define	DPRINTF(a...)	if (hipp_debug) { \
+							printk("%s: ", sc ? \
+								device_get_nameunit(sc->sc_dev) : "hifn"); \
+							printk(a); \
+						} else
+#else
+#define	DPRINTF(a...)
+#endif
+
+typedef int bus_size_t;
+
+static inline int
+pci_get_revid(struct pci_dev *dev)
+{
+	u8 rid = 0;
+	pci_read_config_byte(dev, PCI_REVISION_ID, &rid);
+	return rid;
+}
+
+#define debug hipp_debug
+int hipp_debug = 0;
+module_param(hipp_debug, int, 0644);
+MODULE_PARM_DESC(hipp_debug, "Enable debug");
+
+int hipp_maxbatch = 1;
+module_param(hipp_maxbatch, int, 0644);
+MODULE_PARM_DESC(hipp_maxbatch, "max ops to batch w/o interrupt");
+
+static	int  hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static	void hipp_remove(struct pci_dev *dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hipp_intr(int irq, void *arg);
+#else
+static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs);
+#endif
+
+static int hipp_num_chips = 0;
+static struct hipp_softc *hipp_chip_idx[HIPP_MAX_CHIPS];
+
+static	int hipp_newsession(device_t, u_int32_t *, struct cryptoini *);
+static	int hipp_freesession(device_t, u_int64_t);
+static	int hipp_process(device_t, struct cryptop *, int);
+
+static device_method_t hipp_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	hipp_newsession),
+	DEVMETHOD(cryptodev_freesession,hipp_freesession),
+	DEVMETHOD(cryptodev_process,	hipp_process),
+};
+
+static __inline u_int32_t
+READ_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg)
+{
+	u_int32_t v = readl(sc->sc_bar[barno] + reg);
+	//sc->sc_bar0_lastreg = (bus_size_t) -1;
+	return (v);
+}
+static __inline void
+WRITE_REG(struct hipp_softc *sc, unsigned int barno, bus_size_t reg, u_int32_t val)
+{
+	writel(val, sc->sc_bar[barno] + reg);
+}
+
+#define READ_REG_0(sc, reg)         READ_REG(sc, 0, reg)
+#define WRITE_REG_0(sc, reg, val)   WRITE_REG(sc,0, reg, val)
+#define READ_REG_1(sc, reg)         READ_REG(sc, 1, reg)
+#define WRITE_REG_1(sc, reg, val)   WRITE_REG(sc,1, reg, val)
+
+static int
+hipp_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+	return EINVAL;
+}
+
+static int
+hipp_freesession(device_t dev, u_int64_t tid)
+{
+	return EINVAL;
+}
+
+static int
+hipp_process(device_t dev, struct cryptop *crp, int hint)
+{
+	return EINVAL;
+}
+
+static const char*
+hipp_partname(struct hipp_softc *sc, char buf[128], size_t blen)
+{
+	char *n = NULL;
+
+	switch (pci_get_vendor(sc->sc_pcidev)) {
+	case PCI_VENDOR_HIFN:
+		switch (pci_get_device(sc->sc_pcidev)) {
+		case PCI_PRODUCT_HIFN_7855:	n = "Hifn 7855";
+		case PCI_PRODUCT_HIFN_8155:	n = "Hifn 8155";
+		case PCI_PRODUCT_HIFN_6500:	n = "Hifn 6500";
+		}
+	}
+
+	if(n==NULL) {
+		snprintf(buf, blen, "VID=%02x,PID=%02x",
+			 pci_get_vendor(sc->sc_pcidev),
+			 pci_get_device(sc->sc_pcidev));
+	} else {
+		buf[0]='\0';
+		strncat(buf, n, blen);
+	}
+	return buf;
+}
+
+struct hipp_fs_entry {
+	struct attribute attr;
+	/* other stuff */
+};
+
+
+static ssize_t
+cryptoid_show(struct device *dev,
+	      struct device_attribute *attr,
+	      char *buf)
+{
+	struct hipp_softc *sc;
+
+	sc = pci_get_drvdata(to_pci_dev (dev));
+	return sprintf (buf, "%d\n", sc->sc_cid);
+}
+
+struct device_attribute hipp_dev_cryptoid = __ATTR_RO(cryptoid);
+
+/*
+ * Attach an interface that successfully probed.
+ */
+static int
+hipp_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	struct hipp_softc *sc = NULL;
+	int i;
+	//char rbase;
+	//u_int16_t ena;
+	int rev;
+	//int rseg;
+	int rc;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (pci_enable_device(dev) < 0)
+		return(-ENODEV);
+
+	if (pci_set_mwi(dev))
+		return(-ENODEV);
+
+	if (!dev->irq) {
+		printk("hifn: found device with no IRQ assigned. check BIOS settings!");
+		pci_disable_device(dev);
+		return(-ENODEV);
+	}
+
+	sc = (struct hipp_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+	if (!sc)
+		return(-ENOMEM);
+	memset(sc, 0, sizeof(*sc));
+
+	softc_device_init(sc, "hifn-hipp", hipp_num_chips, hipp_methods);
+
+	sc->sc_pcidev = dev;
+	sc->sc_irq = -1;
+	sc->sc_cid = -1;
+	sc->sc_num = hipp_num_chips++;
+
+	if (sc->sc_num < HIPP_MAX_CHIPS)
+		hipp_chip_idx[sc->sc_num] = sc;
+
+	pci_set_drvdata(sc->sc_pcidev, sc);
+
+	spin_lock_init(&sc->sc_mtx);
+
+	/*
+	 * Setup PCI resources.
+	 * The READ_REG_0, WRITE_REG_0, READ_REG_1,
+	 * and WRITE_REG_1 macros throughout the driver are used
+	 * to permit better debugging.
+	 */
+	for(i=0; i<4; i++) {
+		unsigned long mem_start, mem_len;
+		mem_start = pci_resource_start(sc->sc_pcidev, i);
+		mem_len   = pci_resource_len(sc->sc_pcidev, i);
+		sc->sc_barphy[i] = (caddr_t)mem_start;
+		sc->sc_bar[i] = (ocf_iomem_t) ioremap(mem_start, mem_len);
+		if (!sc->sc_bar[i]) {
+			device_printf(sc->sc_dev, "cannot map bar%d register space\n", i);
+			goto fail;
+		}
+	}
+
+	//hipp_reset_board(sc, 0);
+	pci_set_master(sc->sc_pcidev);
+
+	/*
+	 * Arrange the interrupt line.
+	 */
+	rc = request_irq(dev->irq, hipp_intr, IRQF_SHARED, "hifn", sc);
+	if (rc) {
+		device_printf(sc->sc_dev, "could not map interrupt: %d\n", rc);
+		goto fail;
+	}
+	sc->sc_irq = dev->irq;
+
+	rev = READ_REG_1(sc, HIPP_1_REVID) & 0xffff;
+
+	{
+		char b[32];
+		device_printf(sc->sc_dev, "%s, rev %u",
+			      hipp_partname(sc, b, sizeof(b)), rev);
+	}
+
+#if 0
+	if (sc->sc_flags & HIFN_IS_7956)
+		printf(", pll=0x%x<%s clk, %ux mult>",
+			sc->sc_pllconfig,
+			sc->sc_pllconfig & HIFN_PLL_REF_SEL ? "ext" : "pci",
+			2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
+#endif
+	printf("\n");
+
+	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+	if (sc->sc_cid < 0) {
+		device_printf(sc->sc_dev, "could not get crypto driver id\n");
+		goto fail;
+	}
+
+#if 0 /* cannot work with a non-GPL module */
+	/* make a sysfs entry to let the world know what entry we got */
+	sysfs_create_file(&sc->sc_pcidev->dev.kobj, &hipp_dev_cryptoid.attr);
+#endif
+
+#if 0
+	init_timer(&sc->sc_tickto);
+	sc->sc_tickto.function = hifn_tick;
+	sc->sc_tickto.data = (unsigned long) sc->sc_num;
+	mod_timer(&sc->sc_tickto, jiffies + HZ);
+#endif
+
+#if 0 /* no code here yet ?? */
+	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+#endif
+
+	return (0);
+
+fail:
+	if (sc->sc_cid >= 0)
+		crypto_unregister_all(sc->sc_cid);
+	if (sc->sc_irq != -1)
+		free_irq(sc->sc_irq, sc);
+
+#if 0
+	if (sc->sc_dma) {
+		/* Turn off DMA polling */
+		WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+			    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+
+		pci_free_consistent(sc->sc_pcidev,
+				    sizeof(*sc->sc_dma),
+				    sc->sc_dma, sc->sc_dma_physaddr);
+	}
+#endif
+	kfree(sc);
+	return (-ENXIO);
+}
+
+/*
+ * Detach an interface that successfully probed.
+ */
+static void
+hipp_remove(struct pci_dev *dev)
+{
+	struct hipp_softc *sc = pci_get_drvdata(dev);
+	unsigned long l_flags;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/* disable interrupts */
+	HIPP_LOCK(sc);
+
+#if 0
+	WRITE_REG_1(sc, HIFN_1_DMA_IER, 0);
+	HIFN_UNLOCK(sc);
+
+	/*XXX other resources */
+	del_timer_sync(&sc->sc_tickto);
+
+	/* Turn off DMA polling */
+	WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+	    HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
+#endif
+
+	crypto_unregister_all(sc->sc_cid);
+
+	free_irq(sc->sc_irq, sc);
+
+#if 0
+	pci_free_consistent(sc->sc_pcidev, sizeof(*sc->sc_dma),
+                sc->sc_dma, sc->sc_dma_physaddr);
+#endif
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+static irqreturn_t hipp_intr(int irq, void *arg)
+#else
+static irqreturn_t hipp_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+	struct hipp_softc *sc = arg;
+
+	sc = sc; /* shut up compiler */
+
+	return IRQ_HANDLED;
+}
+
+static struct pci_device_id hipp_pci_tbl[] = {
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_7855,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_HIFN, PCI_PRODUCT_HIFN_8155,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+};
+MODULE_DEVICE_TABLE(pci, hipp_pci_tbl);
+
+static struct pci_driver hipp_driver = {
+	.name         = "hipp",
+	.id_table     = hipp_pci_tbl,
+	.probe        =	hipp_probe,
+	.remove       = hipp_remove,
+	/* add PM stuff here one day */
+};
+
+static int __init hipp_init (void)
+{
+	struct hipp_softc *sc = NULL;
+	int rc;
+
+	DPRINTF("%s(%p)\n", __FUNCTION__, hipp_init);
+
+	rc = pci_register_driver(&hipp_driver);
+	pci_register_driver_compat(&hipp_driver, rc);
+
+	return rc;
+}
+
+static void __exit hipp_exit (void)
+{
+	pci_unregister_driver(&hipp_driver);
+}
+
+module_init(hipp_init);
+module_exit(hipp_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("Michael Richardson <mcr@xelerance.com>");
+MODULE_DESCRIPTION("OCF driver for hifn HIPP-I/II PCI crypto devices");
diff --git a/crypto/ocf/hifn/hifnHIPPreg.h b/crypto/ocf/hifn/hifnHIPPreg.h
new file mode 100644
index 000000000000..8c0e72038f70
--- /dev/null
+++ b/crypto/ocf/hifn/hifnHIPPreg.h
@@ -0,0 +1,46 @@
+/*-
+ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn inc.
+ *
+ */
+
+#ifndef __HIFNHIPP_H__
+#define	__HIFNHIPP_H__
+
+/*
+ * PCI vendor and device identifiers
+ */
+#define	PCI_VENDOR_HIFN		0x13a3		/* Hifn */
+#define	PCI_PRODUCT_HIFN_6500	0x0006		/* 6500 */
+#define	PCI_PRODUCT_HIFN_7855	0x001f		/* 7855 */
+#define	PCI_PRODUCT_HIFN_8155	0x999		/* XXX 8155 */
+
+#define HIPP_1_REVID            0x01 /* BOGUS */
+
+#endif /* __HIPP_H__ */
diff --git a/crypto/ocf/hifn/hifnHIPPvar.h b/crypto/ocf/hifn/hifnHIPPvar.h
new file mode 100644
index 000000000000..dde47f7fdf32
--- /dev/null
+++ b/crypto/ocf/hifn/hifnHIPPvar.h
@@ -0,0 +1,93 @@
+/*
+ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored by Hifn inc.
+ *
+ */
+
+#ifndef __HIFNHIPPVAR_H__
+#define __HIFNHIPPVAR_H__
+
+#define HIPP_MAX_CHIPS 8
+
+/*
+ * Holds data specific to a single Hifn HIPP-I board.
+ */
+struct hipp_softc {
+	softc_device_decl		 sc_dev;
+
+	struct pci_dev		*sc_pcidev;	/* device backpointer */
+	ocf_iomem_t             sc_bar[5];
+	caddr_t		        sc_barphy[5];   /* physical address */
+	int			sc_num;		/* for multiple devs */
+	spinlock_t		sc_mtx;		/* per-instance lock */
+	int32_t			sc_cid;
+	int			sc_irq;
+
+#if 0
+
+	u_int32_t		sc_dmaier;
+	u_int32_t		sc_drammodel;	/* 1=dram, 0=sram */
+	u_int32_t		sc_pllconfig;	/* 7954/7955/7956 PLL config */
+
+	struct hifn_dma		*sc_dma;
+	dma_addr_t		sc_dma_physaddr;/* physical address of sc_dma */
+
+	int			sc_dmansegs;
+	int			sc_maxses;
+	int			sc_nsessions;
+	struct hifn_session	*sc_sessions;
+	int			sc_ramsize;
+	int			sc_flags;
+#define	HIFN_HAS_RNG		0x1	/* includes random number generator */
+#define	HIFN_HAS_PUBLIC		0x2	/* includes public key support */
+#define	HIFN_HAS_AES		0x4	/* includes AES support */
+#define	HIFN_IS_7811		0x8	/* Hifn 7811 part */
+#define	HIFN_IS_7956		0x10	/* Hifn 7956/7955 don't have SDRAM */
+
+	struct timer_list	sc_tickto;	/* for managing DMA */
+
+	int			sc_rngfirst;
+	int			sc_rnghz;	/* RNG polling frequency */
+
+	int			sc_c_busy;	/* command ring busy */
+	int			sc_s_busy;	/* source data ring busy */
+	int			sc_d_busy;	/* destination data ring busy */
+	int			sc_r_busy;	/* result ring busy */
+	int			sc_active;	/* for initial countdown */
+	int			sc_needwakeup;	/* ops q'd wating on resources */
+	int			sc_curbatch;	/* # ops submitted w/o int */
+	int			sc_suspended;
+	struct miscdevice       sc_miscdev;
+#endif
+};
+
+#define	HIPP_LOCK(_sc)		spin_lock_irqsave(&(_sc)->sc_mtx, l_flags)
+#define	HIPP_UNLOCK(_sc)	spin_unlock_irqrestore(&(_sc)->sc_mtx, l_flags)
+
+#endif /* __HIFNHIPPVAR_H__ */
diff --git a/crypto/ocf/ixp4xx/Makefile b/crypto/ocf/ixp4xx/Makefile
new file mode 100644
index 000000000000..4555b58dc5d4
--- /dev/null
+++ b/crypto/ocf/ixp4xx/Makefile
@@ -0,0 +1,103 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+#
+# You will need to point this at your Intel ixp425 includes,  this portion
+# of the Makefile only really works under SGLinux with the appropriate libs
+# installed.  They can be downloaded from http://www.snapgear.org/
+#
+ifeq ($(CONFIG_CPU_IXP46X),y)
+IXPLATFORM = ixp46X
+else
+ifeq ($(CONFIG_CPU_IXP43X),y)
+IXPLATFORM = ixp43X
+else
+IXPLATFORM = ixp42X
+endif
+endif
+
+ifdef CONFIG_IXP400_LIB_2_4
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp400_xscale_sw
+OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.4/ixp_osal
+endif
+ifdef CONFIG_IXP400_LIB_2_1
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp400_xscale_sw
+OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.1/ixp_osal
+endif
+ifdef CONFIG_IXP400_LIB_2_0
+IX_XSCALE_SW = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp400_xscale_sw
+OSAL_DIR     = $(ROOTDIR)/modules/ixp425/ixp400-2.0/ixp_osal
+endif
+ifdef IX_XSCALE_SW
+ifdef CONFIG_IXP400_LIB_2_4
+IXP_CFLAGS = \
+	-I$(ROOTDIR)/. \
+	-I$(IX_XSCALE_SW)/src/include \
+	-I$(OSAL_DIR)/common/include/ \
+	-I$(OSAL_DIR)/common/include/modules/ \
+	-I$(OSAL_DIR)/common/include/modules/ddk/ \
+	-I$(OSAL_DIR)/common/include/modules/bufferMgt/ \
+	-I$(OSAL_DIR)/common/include/modules/ioMem/ \
+	-I$(OSAL_DIR)/common/os/linux/include/ \
+	-I$(OSAL_DIR)/common/os/linux/include/core/  \
+	-I$(OSAL_DIR)/common/os/linux/include/modules/ \
+	-I$(OSAL_DIR)/common/os/linux/include/modules/ddk/ \
+	-I$(OSAL_DIR)/common/os/linux/include/modules/bufferMgt/ \
+	-I$(OSAL_DIR)/common/os/linux/include/modules/ioMem/ \
+	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/include/ \
+	-I$(OSAL_DIR)/platforms/$(IXPLATFORM)/os/linux/include/ \
+	-DENABLE_IOMEM -DENABLE_BUFFERMGT -DENABLE_DDK \
+	-DUSE_IXP4XX_CRYPTO
+else
+IXP_CFLAGS = \
+	-I$(ROOTDIR)/. \
+	-I$(IX_XSCALE_SW)/src/include \
+	-I$(OSAL_DIR)/ \
+	-I$(OSAL_DIR)/os/linux/include/ \
+	-I$(OSAL_DIR)/os/linux/include/modules/ \
+	-I$(OSAL_DIR)/os/linux/include/modules/ioMem/ \
+	-I$(OSAL_DIR)/os/linux/include/modules/bufferMgt/ \
+	-I$(OSAL_DIR)/os/linux/include/core/  \
+	-I$(OSAL_DIR)/os/linux/include/platforms/ \
+	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ \
+	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp425 \
+	-I$(OSAL_DIR)/os/linux/include/platforms/ixp400/ixp465 \
+	-I$(OSAL_DIR)/os/linux/include/core/ \
+	-I$(OSAL_DIR)/include/ \
+	-I$(OSAL_DIR)/include/modules/ \
+	-I$(OSAL_DIR)/include/modules/bufferMgt/ \
+	-I$(OSAL_DIR)/include/modules/ioMem/ \
+	-I$(OSAL_DIR)/include/platforms/ \
+	-I$(OSAL_DIR)/include/platforms/ixp400/ \
+	-DUSE_IXP4XX_CRYPTO
+endif
+endif
+ifdef CONFIG_IXP400_LIB_1_4
+IXP_CFLAGS   = \
+	-I$(ROOTDIR)/. \
+	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/include \
+	-I$(ROOTDIR)/modules/ixp425/ixp400-1.4/ixp400_xscale_sw/src/linux \
+	-DUSE_IXP4XX_CRYPTO
+endif
+ifndef IXPDIR
+IXPDIR = ixp-version-is-not-supported
+endif
+
+ifeq ($(CONFIG_CPU_IXP46X),y)
+IXP_CFLAGS += -D__ixp46X
+else
+ifeq ($(CONFIG_CPU_IXP43X),y)
+IXP_CFLAGS += -D__ixp43X
+else
+IXP_CFLAGS += -D__ixp42X
+endif
+endif
+
+obj-$(CONFIG_OCF_IXP4XX) += ixp4xx.o
+
+obj ?= .
+EXTRA_CFLAGS += $(IXP_CFLAGS) -I$(obj)/.. -I$(obj)/.
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/ixp4xx/ixp4xx.c b/crypto/ocf/ixp4xx/ixp4xx.c
new file mode 100644
index 000000000000..f27b2278965c
--- /dev/null
+++ b/crypto/ocf/ixp4xx/ixp4xx.c
@@ -0,0 +1,1339 @@
+/*
+ * An OCF module that uses Intels IXP CryptACC API to do the crypto.
+ * This driver requires the IXP400 Access Library that is available
+ * from Intel in order to operate (or compile).
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2011 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+#include <asm/scatterlist.h>
+
+#include <IxTypes.h>
+#include <IxOsBuffMgt.h>
+#include <IxNpeDl.h>
+#include <IxCryptoAcc.h>
+#include <IxQMgr.h>
+#include <IxOsServices.h>
+#include <IxOsCacheMMU.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+#ifndef IX_MBUF_PRIV
+#define IX_MBUF_PRIV(x) ((x)->priv)
+#endif
+
+struct ixp_data;
+
+struct ixp_q {
+	struct list_head	 ixp_q_list;
+	struct ixp_data		*ixp_q_data;
+	struct cryptop		*ixp_q_crp;
+	struct cryptodesc	*ixp_q_ccrd;
+	struct cryptodesc	*ixp_q_acrd;
+	IX_MBUF				 ixp_q_mbuf;
+	UINT8				*ixp_hash_dest; /* Location for hash in client buffer */
+	UINT8				*ixp_hash_src; /* Location of hash in internal buffer */
+	unsigned char		 ixp_q_iv_data[IX_CRYPTO_ACC_MAX_CIPHER_IV_LENGTH];
+	unsigned char		*ixp_q_iv;
+};
+
+struct ixp_data {
+	int					 ixp_registered;	/* is the context registered */
+	int					 ixp_crd_flags;		/* detect direction changes */
+
+	int					 ixp_cipher_alg;
+	int					 ixp_auth_alg;
+
+	UINT32				 ixp_ctx_id;
+	UINT32				 ixp_hash_key_id;	/* used when hashing */
+	IxCryptoAccCtx		 ixp_ctx;
+	IX_MBUF				 ixp_pri_mbuf;
+	IX_MBUF				 ixp_sec_mbuf;
+
+	struct work_struct   ixp_pending_work;
+	struct work_struct   ixp_registration_work;
+	struct list_head	 ixp_q;				/* unprocessed requests */
+};
+
+#ifdef __ixp46X
+
+#define	MAX_IOP_SIZE	64	/* words */
+#define	MAX_OOP_SIZE	128
+
+#define	MAX_PARAMS		3
+
+struct ixp_pkq {
+	struct list_head			 pkq_list;
+	struct cryptkop				*pkq_krp;
+
+	IxCryptoAccPkeEauInOperands	 pkq_op;
+	IxCryptoAccPkeEauOpResult	 pkq_result;
+
+	UINT32						 pkq_ibuf0[MAX_IOP_SIZE];
+	UINT32						 pkq_ibuf1[MAX_IOP_SIZE];
+	UINT32						 pkq_ibuf2[MAX_IOP_SIZE];
+	UINT32						 pkq_obuf[MAX_OOP_SIZE];
+};
+
+static LIST_HEAD(ixp_pkq); /* current PK wait list */
+static struct ixp_pkq *ixp_pk_cur;
+static spinlock_t ixp_pkq_lock;
+
+#endif /* __ixp46X */
+
+static int ixp_blocked = 0;
+
+static int32_t			 ixp_id = -1;
+static struct ixp_data **ixp_sessions = NULL;
+static u_int32_t		 ixp_sesnum = 0;
+
+static int ixp_process(device_t, struct cryptop *, int);
+static int ixp_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int ixp_freesession(device_t, u_int64_t);
+#ifdef __ixp46X
+static int ixp_kprocess(device_t, struct cryptkop *krp, int hint);
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *qcache;
+#else
+static struct kmem_cache *qcache;
+#endif
+
+#define debug ixp_debug
+static int ixp_debug = 0;
+module_param(ixp_debug, int, 0644);
+MODULE_PARM_DESC(ixp_debug, "Enable debug");
+
+static int ixp_init_crypto = 1;
+module_param(ixp_init_crypto, int, 0444); /* RO after load/boot */
+MODULE_PARM_DESC(ixp_init_crypto, "Call ixCryptoAccInit (default is 1)");
+
+static void ixp_process_pending(void *arg);
+static void ixp_registration(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ixp_process_pending_wq(struct work_struct *work);
+static void ixp_registration_wq(struct work_struct *work);
+#endif
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+	softc_device_decl	sc_dev;
+} ixpdev;
+
+static device_method_t ixp_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	ixp_newsession),
+	DEVMETHOD(cryptodev_freesession,ixp_freesession),
+	DEVMETHOD(cryptodev_process,	ixp_process),
+#ifdef __ixp46X
+	DEVMETHOD(cryptodev_kprocess,	ixp_kprocess),
+#endif
+};
+
+/*
+ * Generate a new software session.
+ */
+static int
+ixp_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+	struct ixp_data *ixp;
+	u_int32_t i;
+#define AUTH_LEN(cri, def) \
+	(cri->cri_mlen ? cri->cri_mlen : (def))
+
+	dprintk("%s():alg %d\n", __FUNCTION__,cri->cri_alg);
+	if (sid == NULL || cri == NULL) {
+		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	if (ixp_sessions) {
+		for (i = 1; i < ixp_sesnum; i++)
+			if (ixp_sessions[i] == NULL)
+				break;
+	} else
+		i = 1;		/* NB: to silence compiler warning */
+
+	if (ixp_sessions == NULL || i == ixp_sesnum) {
+		struct ixp_data **ixpd;
+
+		if (ixp_sessions == NULL) {
+			i = 1; /* We leave ixp_sessions[0] empty */
+			ixp_sesnum = CRYPTO_SW_SESSIONS;
+		} else
+			ixp_sesnum *= 2;
+
+		ixpd = kmalloc(ixp_sesnum * sizeof(struct ixp_data *), SLAB_ATOMIC);
+		if (ixpd == NULL) {
+			/* Reset session number */
+			if (ixp_sesnum == CRYPTO_SW_SESSIONS)
+				ixp_sesnum = 0;
+			else
+				ixp_sesnum /= 2;
+			dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+			return ENOBUFS;
+		}
+		memset(ixpd, 0, ixp_sesnum * sizeof(struct ixp_data *));
+
+		/* Copy existing sessions */
+		if (ixp_sessions) {
+			memcpy(ixpd, ixp_sessions,
+			    (ixp_sesnum / 2) * sizeof(struct ixp_data *));
+			kfree(ixp_sessions);
+		}
+
+		ixp_sessions = ixpd;
+	}
+
+	ixp_sessions[i] = (struct ixp_data *) kmalloc(sizeof(struct ixp_data),
+			SLAB_ATOMIC);
+	if (ixp_sessions[i] == NULL) {
+		ixp_freesession(NULL, i);
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return ENOBUFS;
+	}
+
+	*sid = i;
+
+	ixp = ixp_sessions[i];
+	memset(ixp, 0, sizeof(*ixp));
+
+	ixp->ixp_cipher_alg = -1;
+	ixp->ixp_auth_alg = -1;
+	ixp->ixp_ctx_id = -1;
+	INIT_LIST_HEAD(&ixp->ixp_q);
+
+	ixp->ixp_ctx.useDifferentSrcAndDestMbufs = 0;
+
+	while (cri) {
+		switch (cri->cri_alg) {
+		case CRYPTO_DES_CBC:
+			ixp->ixp_cipher_alg = cri->cri_alg;
+			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_DES;
+			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
+						IX_CRYPTO_ACC_DES_IV_64;
+			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+					cri->cri_key, (cri->cri_klen + 7) / 8);
+			break;
+
+		case CRYPTO_3DES_CBC:
+			ixp->ixp_cipher_alg = cri->cri_alg;
+			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
+			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+			ixp->ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen =
+						IX_CRYPTO_ACC_DES_IV_64;
+			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+					cri->cri_key, (cri->cri_klen + 7) / 8);
+			break;
+
+		case CRYPTO_RIJNDAEL128_CBC:
+			ixp->ixp_cipher_alg = cri->cri_alg;
+			ixp->ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_AES;
+			ixp->ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+			ixp->ixp_ctx.cipherCtx.cipherKeyLen = (cri->cri_klen + 7) / 8;
+			ixp->ixp_ctx.cipherCtx.cipherBlockLen = 16;
+			ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen = 16;
+			memcpy(ixp->ixp_ctx.cipherCtx.key.cipherKey,
+					cri->cri_key, (cri->cri_klen + 7) / 8);
+			break;
+
+		case CRYPTO_MD5:
+		case CRYPTO_MD5_HMAC:
+			ixp->ixp_auth_alg = cri->cri_alg;
+			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_MD5;
+			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, MD5_HASH_LEN);
+			ixp->ixp_ctx.authCtx.aadLen = 0;
+			/* Only MD5_HMAC needs a key */
+			if (cri->cri_alg == CRYPTO_MD5_HMAC) {
+				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
+				if (ixp->ixp_ctx.authCtx.authKeyLen >
+						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
+					printk(
+						"ixp4xx: Invalid key length for MD5_HMAC - %d bits\n",
+							cri->cri_klen);
+					ixp_freesession(NULL, i);
+					return EINVAL;
+				}
+				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
+						cri->cri_key, (cri->cri_klen + 7) / 8);
+			}
+			break;
+
+		case CRYPTO_SHA1:
+		case CRYPTO_SHA1_HMAC:
+			ixp->ixp_auth_alg = cri->cri_alg;
+			ixp->ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
+			ixp->ixp_ctx.authCtx.authDigestLen = AUTH_LEN(cri, SHA1_HASH_LEN);
+			ixp->ixp_ctx.authCtx.aadLen = 0;
+			/* Only SHA1_HMAC needs a key */
+			if (cri->cri_alg == CRYPTO_SHA1_HMAC) {
+				ixp->ixp_ctx.authCtx.authKeyLen = (cri->cri_klen + 7) / 8;
+				if (ixp->ixp_ctx.authCtx.authKeyLen >
+						sizeof(ixp->ixp_ctx.authCtx.key.authKey)) {
+					printk(
+						"ixp4xx: Invalid key length for SHA1_HMAC - %d bits\n",
+							cri->cri_klen);
+					ixp_freesession(NULL, i);
+					return EINVAL;
+				}
+				memcpy(ixp->ixp_ctx.authCtx.key.authKey,
+						cri->cri_key, (cri->cri_klen + 7) / 8);
+			}
+			break;
+
+		default:
+			printk("ixp: unknown algo 0x%x\n", cri->cri_alg);
+			ixp_freesession(NULL, i);
+			return EINVAL;
+		}
+		cri = cri->cri_next;
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending_wq);
+	INIT_WORK(&ixp->ixp_registration_work, ixp_registration_wq);
+#else
+	INIT_WORK(&ixp->ixp_pending_work, ixp_process_pending, ixp);
+	INIT_WORK(&ixp->ixp_registration_work, ixp_registration, ixp);
+#endif
+
+	return 0;
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+ixp_freesession(device_t dev, u_int64_t tid)
+{
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid > ixp_sesnum || ixp_sessions == NULL ||
+			ixp_sessions[sid] == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	/* Silently accept and return */
+	if (sid == 0)
+		return 0;
+
+	if (ixp_sessions[sid]) {
+		if (ixp_sessions[sid]->ixp_ctx_id != -1) {
+			ixCryptoAccCtxUnregister(ixp_sessions[sid]->ixp_ctx_id);
+			ixp_sessions[sid]->ixp_ctx_id = -1;
+		}
+		kfree(ixp_sessions[sid]);
+	}
+	ixp_sessions[sid] = NULL;
+	if (ixp_blocked) {
+		ixp_blocked = 0;
+		crypto_unblock(ixp_id, CRYPTO_SYMQ);
+	}
+	return 0;
+}
+
+
+/*
+ * callback for when hash processing is complete
+ */
+
+static void
+ixp_hash_perform_cb(
+	UINT32 hash_key_id,
+	IX_MBUF *bufp,
+	IxCryptoAccStatus status)
+{
+	struct ixp_q *q;
+
+	dprintk("%s(%u, %p, 0x%x)\n", __FUNCTION__, hash_key_id, bufp, status);
+
+	if (bufp == NULL) {
+		printk("ixp: NULL buf in %s\n", __FUNCTION__);
+		return;
+	}
+
+	q = IX_MBUF_PRIV(bufp);
+	if (q == NULL) {
+		printk("ixp: NULL priv in %s\n", __FUNCTION__);
+		return;
+	}
+
+	if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
+		/* On success, need to copy hash back into original client buffer */
+		memcpy(q->ixp_hash_dest, q->ixp_hash_src,
+				(q->ixp_q_data->ixp_auth_alg == CRYPTO_SHA1) ?
+					SHA1_HASH_LEN : MD5_HASH_LEN);
+	}
+	else {
+		printk("ixp: hash perform failed status=%d\n", status);
+		q->ixp_q_crp->crp_etype = EINVAL;
+	}
+
+	/* Free internal buffer used for hashing */
+	kfree(IX_MBUF_MDATA(&q->ixp_q_mbuf));
+
+	crypto_done(q->ixp_q_crp);
+	kmem_cache_free(qcache, q);
+}
+
+/*
+ * setup a request and perform it
+ */
+static void
+ixp_q_process(struct ixp_q *q)
+{
+	IxCryptoAccStatus status;
+	struct ixp_data *ixp = q->ixp_q_data;
+	int auth_off = 0;
+	int auth_len = 0;
+	int crypt_off = 0;
+	int crypt_len = 0;
+	int icv_off = 0;
+	char *crypt_func;
+
+	dprintk("%s(%p)\n", __FUNCTION__, q);
+
+	if (q->ixp_q_ccrd) {
+		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
+			if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+				q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
+			} else {
+				q->ixp_q_iv = q->ixp_q_iv_data;
+				read_random(q->ixp_q_iv, ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen);
+			}
+			if ((q->ixp_q_ccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+				crypto_copyback(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
+						q->ixp_q_ccrd->crd_inject,
+						ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
+						(caddr_t) q->ixp_q_iv);
+		} else {
+			if (q->ixp_q_ccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				q->ixp_q_iv = q->ixp_q_ccrd->crd_iv;
+			else {
+				q->ixp_q_iv = q->ixp_q_iv_data;
+				crypto_copydata(q->ixp_q_crp->crp_flags, q->ixp_q_crp->crp_buf,
+						q->ixp_q_ccrd->crd_inject,
+						ixp->ixp_ctx.cipherCtx.cipherInitialVectorLen,
+						(caddr_t) q->ixp_q_iv);
+			}
+		}
+
+		if (q->ixp_q_acrd) {
+			auth_off = q->ixp_q_acrd->crd_skip;
+			auth_len = q->ixp_q_acrd->crd_len;
+			icv_off  = q->ixp_q_acrd->crd_inject;
+		}
+
+		crypt_off = q->ixp_q_ccrd->crd_skip;
+		crypt_len = q->ixp_q_ccrd->crd_len;
+	} else { /* if (q->ixp_q_acrd) */
+		auth_off = q->ixp_q_acrd->crd_skip;
+		auth_len = q->ixp_q_acrd->crd_len;
+		icv_off  = q->ixp_q_acrd->crd_inject;
+	}
+
+	if (q->ixp_q_crp->crp_flags & CRYPTO_F_SKBUF) {
+		struct sk_buff *skb = (struct sk_buff *) q->ixp_q_crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags) {
+			/*
+			 * DAVIDM fix this limitation one day by using
+			 * a buffer pool and chaining,  it is not currently
+			 * needed for current user/kernel space acceleration
+			 */
+			printk("ixp: Cannot handle fragmented skb's yet !\n");
+			q->ixp_q_crp->crp_etype = ENOENT;
+			goto done;
+		}
+		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
+				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =  skb->len;
+		IX_MBUF_MDATA(&q->ixp_q_mbuf) = skb->data;
+	} else if (q->ixp_q_crp->crp_flags & CRYPTO_F_IOV) {
+		struct uio *uiop = (struct uio *) q->ixp_q_crp->crp_buf;
+		if (uiop->uio_iovcnt != 1) {
+			/*
+			 * DAVIDM fix this limitation one day by using
+			 * a buffer pool and chaining,  it is not currently
+			 * needed for current user/kernel space acceleration
+			 */
+			printk("ixp: Cannot handle more than 1 iovec yet !\n");
+			q->ixp_q_crp->crp_etype = ENOENT;
+			goto done;
+		}
+		IX_MBUF_MLEN(&q->ixp_q_mbuf) =
+				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_len;
+		IX_MBUF_MDATA(&q->ixp_q_mbuf) = uiop->uio_iov[0].iov_base;
+	} else /* contig buffer */ {
+		IX_MBUF_MLEN(&q->ixp_q_mbuf)  =
+				IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_ilen;
+		IX_MBUF_MDATA(&q->ixp_q_mbuf) = q->ixp_q_crp->crp_buf;
+	}
+
+	IX_MBUF_PRIV(&q->ixp_q_mbuf) = q;
+
+	if (ixp->ixp_auth_alg == CRYPTO_SHA1 || ixp->ixp_auth_alg == CRYPTO_MD5) {
+		/*
+		 * For SHA1 and MD5 hash, need to create an internal buffer that is big
+		 * enough to hold the original data + the appropriate padding for the
+		 * hash algorithm.
+		 */
+		UINT8 *tbuf = NULL;
+
+		IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
+			((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
+		tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
+
+		if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
+			printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
+					IX_MBUF_MLEN(&q->ixp_q_mbuf));
+			q->ixp_q_crp->crp_etype = ENOMEM;
+			goto done;
+		}
+		memcpy(tbuf, &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off], auth_len);
+
+		/* Set location in client buffer to copy hash into */
+		q->ixp_hash_dest =
+			&(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_off + auth_len];
+
+		IX_MBUF_MDATA(&q->ixp_q_mbuf) = tbuf;
+
+		/* Set location in internal buffer for where hash starts */
+		q->ixp_hash_src = &(IX_MBUF_MDATA(&q->ixp_q_mbuf))[auth_len];
+
+		crypt_func = "ixCryptoAccHashPerform";
+		status = ixCryptoAccHashPerform(ixp->ixp_ctx.authCtx.authAlgo,
+				&q->ixp_q_mbuf, ixp_hash_perform_cb, 0, auth_len, auth_len,
+				&ixp->ixp_hash_key_id);
+	}
+	else {
+		crypt_func = "ixCryptoAccAuthCryptPerform";
+		status = ixCryptoAccAuthCryptPerform(ixp->ixp_ctx_id, &q->ixp_q_mbuf,
+			NULL, auth_off, auth_len, crypt_off, crypt_len, icv_off,
+			q->ixp_q_iv);
+	}
+
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+		return;
+
+	if (IX_CRYPTO_ACC_STATUS_QUEUE_FULL == status) {
+		q->ixp_q_crp->crp_etype = ENOMEM;
+		goto done;
+	}
+
+	printk("ixp: %s failed %u\n", crypt_func, status);
+	q->ixp_q_crp->crp_etype = EINVAL;
+
+done:
+	crypto_done(q->ixp_q_crp);
+	kmem_cache_free(qcache, q);
+}
+
+
+/*
+ * because we cannot process the Q from the Register callback
+ * we do it here on a task Q.
+ */
+
+static void
+ixp_process_pending(void *arg)
+{
+	struct ixp_data *ixp = arg;
+	struct ixp_q *q = NULL;
+
+	dprintk("%s(%p)\n", __FUNCTION__, arg);
+
+	if (!ixp)
+		return;
+
+	while (!list_empty(&ixp->ixp_q)) {
+		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+		list_del(&q->ixp_q_list);
+		ixp_q_process(q);
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_process_pending_wq(struct work_struct *work)
+{
+	struct ixp_data *ixp = container_of(work, struct ixp_data, ixp_pending_work);
+	ixp_process_pending(ixp);
+}
+#endif
+
+/*
+ * callback for when context registration is complete
+ */
+
+static void
+ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
+{
+	int i;
+	struct ixp_data *ixp;
+	struct ixp_q *q;
+
+	dprintk("%s(%d, %p, %d)\n", __FUNCTION__, ctx_id, bufp, status);
+
+	/*
+	 * free any buffer passed in to this routine
+	 */
+	if (bufp) {
+		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
+		kfree(IX_MBUF_MDATA(bufp));
+		IX_MBUF_MDATA(bufp) = NULL;
+	}
+
+	for (i = 0; i < ixp_sesnum; i++) {
+		ixp = ixp_sessions[i];
+		if (ixp && ixp->ixp_ctx_id == ctx_id)
+			break;
+	}
+	if (i >= ixp_sesnum) {
+		printk("ixp: invalid context id %d\n", ctx_id);
+		return;
+	}
+
+	if (IX_CRYPTO_ACC_STATUS_WAIT == status) {
+		/* this is normal to free the first of two buffers */
+		dprintk("ixp: register not finished yet.\n");
+		return;
+	}
+
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
+		printk("ixp: register failed 0x%x\n", status);
+		while (!list_empty(&ixp->ixp_q)) {
+			q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+			list_del(&q->ixp_q_list);
+			q->ixp_q_crp->crp_etype = EINVAL;
+			crypto_done(q->ixp_q_crp);
+			kmem_cache_free(qcache, q);
+		}
+		return;
+	}
+
+	/*
+	 * we are now registered,  we cannot start processing the Q here
+	 * or we get strange errors with AES (DES/3DES seem to be ok).
+	 */
+	ixp->ixp_registered = 1;
+	schedule_work(&ixp->ixp_pending_work);
+}
+
+
+/*
+ * callback for when data processing is complete
+ */
+
+static void
+ixp_perform_cb(
+	UINT32 ctx_id,
+	IX_MBUF *sbufp,
+	IX_MBUF *dbufp,
+	IxCryptoAccStatus status)
+{
+	struct ixp_q *q;
+
+	dprintk("%s(%d, %p, %p, 0x%x)\n", __FUNCTION__, ctx_id, sbufp,
+			dbufp, status);
+
+	if (sbufp == NULL) {
+		printk("ixp: NULL sbuf in ixp_perform_cb\n");
+		return;
+	}
+
+	q = IX_MBUF_PRIV(sbufp);
+	if (q == NULL) {
+		printk("ixp: NULL priv in ixp_perform_cb\n");
+		return;
+	}
+
+	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+		printk("ixp: perform failed status=%d\n", status);
+		q->ixp_q_crp->crp_etype = EINVAL;
+	}
+
+	crypto_done(q->ixp_q_crp);
+	kmem_cache_free(qcache, q);
+}
+
+
+/*
+ * registration is not callable at IRQ time,  so we defer
+ * to a task queue,  this routines completes the registration for us
+ * when the task queue runs
+ *
+ * Unfortunately this means we cannot tell OCF that the driver is blocked,
+ * we do that on the next request.
+ */
+
+static void
+ixp_registration(void *arg)
+{
+	struct ixp_data *ixp = arg;
+	struct ixp_q *q = NULL;
+	IX_MBUF *pri = NULL, *sec = NULL;
+	int status = IX_CRYPTO_ACC_STATUS_SUCCESS;
+
+	if (!ixp) {
+		printk("ixp: ixp_registration with no arg\n");
+		return;
+	}
+
+	if (ixp->ixp_ctx_id != -1) {
+		ixCryptoAccCtxUnregister(ixp->ixp_ctx_id);
+		ixp->ixp_ctx_id = -1;
+	}
+
+	if (list_empty(&ixp->ixp_q)) {
+		printk("ixp: ixp_registration with no Q\n");
+		return;
+	}
+
+	/*
+	 * setup the primary and secondary buffers
+	 */
+	q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+	if (q->ixp_q_acrd) {
+		pri = &ixp->ixp_pri_mbuf;
+		sec = &ixp->ixp_sec_mbuf;
+		IX_MBUF_MLEN(pri)  = IX_MBUF_PKT_LEN(pri) = 128;
+		IX_MBUF_MDATA(pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+		IX_MBUF_MLEN(sec)  = IX_MBUF_PKT_LEN(sec) = 128;
+		IX_MBUF_MDATA(sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+	}
+
+	/* Only need to register if a crypt op or HMAC op */
+	if (!(ixp->ixp_auth_alg == CRYPTO_SHA1 ||
+				ixp->ixp_auth_alg == CRYPTO_MD5)) {
+		status = ixCryptoAccCtxRegister(
+					&ixp->ixp_ctx,
+					pri, sec,
+					ixp_register_cb,
+					ixp_perform_cb,
+					&ixp->ixp_ctx_id);
+	}
+	else {
+		/* Otherwise we start processing pending q */
+		schedule_work(&ixp->ixp_pending_work);
+	}
+
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+		return;
+
+	if (IX_CRYPTO_ACC_STATUS_EXCEED_MAX_TUNNELS == status) {
+		printk("ixp: ixCryptoAccCtxRegister failed (out of tunnels)\n");
+		ixp_blocked = 1;
+		/* perhaps we should return EGAIN on queued ops ? */
+		return;
+	}
+
+	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
+	ixp->ixp_ctx_id = -1;
+
+	/*
+	 * everything waiting is toasted
+	 */
+	while (!list_empty(&ixp->ixp_q)) {
+		q = list_entry(ixp->ixp_q.next, struct ixp_q, ixp_q_list);
+		list_del(&q->ixp_q_list);
+		q->ixp_q_crp->crp_etype = ENOENT;
+		crypto_done(q->ixp_q_crp);
+		kmem_cache_free(qcache, q);
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_registration_wq(struct work_struct *work)
+{
+	struct ixp_data *ixp = container_of(work, struct ixp_data,
+								ixp_registration_work);
+	ixp_registration(ixp);
+}
+#endif
+
+/*
+ * Process a request.
+ */
+static int
+ixp_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct ixp_data *ixp;
+	unsigned int lid;
+	struct ixp_q *q = NULL;
+	int status;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	/* Sanity check */
+	if (crp == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	crp->crp_etype = 0;
+
+	if (ixp_blocked)
+		return ERESTART;
+
+	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	/*
+	 * find the session we are using
+	 */
+
+	lid = crp->crp_sid & 0xffffffff;
+	if (lid >= ixp_sesnum || lid == 0 || ixp_sessions == NULL ||
+			ixp_sessions[lid] == NULL) {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+	ixp = ixp_sessions[lid];
+
+	/*
+	 * setup a new request ready for queuing
+	 */
+	q = kmem_cache_alloc(qcache, SLAB_ATOMIC);
+	if (q == NULL) {
+		dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
+		crp->crp_etype = ENOMEM;
+		goto done;
+	}
+	/*
+	 * save some cycles by only zeroing the important bits
+	 */
+	memset(&q->ixp_q_mbuf, 0, sizeof(q->ixp_q_mbuf));
+	q->ixp_q_ccrd = NULL;
+	q->ixp_q_acrd = NULL;
+	q->ixp_q_crp = crp;
+	q->ixp_q_data = ixp;
+
+	/*
+	 * point the cipher and auth descriptors appropriately
+	 * check that we have something to do
+	 */
+	if (crp->crp_desc->crd_alg == ixp->ixp_cipher_alg)
+		q->ixp_q_ccrd = crp->crp_desc;
+	else if (crp->crp_desc->crd_alg == ixp->ixp_auth_alg)
+		q->ixp_q_acrd = crp->crp_desc;
+	else {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+	if (crp->crp_desc->crd_next) {
+		if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_cipher_alg)
+			q->ixp_q_ccrd = crp->crp_desc->crd_next;
+		else if (crp->crp_desc->crd_next->crd_alg == ixp->ixp_auth_alg)
+			q->ixp_q_acrd = crp->crp_desc->crd_next;
+		else {
+			crp->crp_etype = ENOENT;
+			dprintk("%s,%d: bad desc match: ENOENT\n", __FILE__, __LINE__);
+			goto done;
+		}
+	}
+
+	/*
+	 * If there is a direction change for this context then we mark it as
+	 * unregistered and re-register is for the new direction.  This is not
+	 * a very expensive operation and currently only tends to happen when
+	 * user-space application are doing benchmarks
+	 *
+	 * DM - we should be checking for pending requests before unregistering.
+	 */
+	if (q->ixp_q_ccrd && ixp->ixp_registered &&
+			ixp->ixp_crd_flags != (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT)) {
+		dprintk("%s - detected direction change on session\n", __FUNCTION__);
+		ixp->ixp_registered = 0;
+	}
+
+	/*
+	 * if we are registered,  call straight into the perform code
+	 */
+	if (ixp->ixp_registered) {
+		ixp_q_process(q);
+		return 0;
+	}
+
+	/*
+	 * the only part of the context not set in newsession is the direction
+	 * dependent parts
+	 */
+	if (q->ixp_q_ccrd) {
+		ixp->ixp_crd_flags = (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT);
+		if (q->ixp_q_ccrd->crd_flags & CRD_F_ENCRYPT) {
+			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
+					IX_CRYPTO_ACC_OP_ENCRYPT_AUTH : IX_CRYPTO_ACC_OP_ENCRYPT;
+		} else {
+			ixp->ixp_ctx.operation = q->ixp_q_acrd ?
+					IX_CRYPTO_ACC_OP_AUTH_DECRYPT : IX_CRYPTO_ACC_OP_DECRYPT;
+		}
+	} else {
+		/* q->ixp_q_acrd must be set if we are here */
+		ixp->ixp_ctx.operation = IX_CRYPTO_ACC_OP_AUTH_CALC;
+	}
+
+	status = list_empty(&ixp->ixp_q);
+	list_add_tail(&q->ixp_q_list, &ixp->ixp_q);
+	if (status)
+		schedule_work(&ixp->ixp_registration_work);
+	return 0;
+
+done:
+	if (q)
+		kmem_cache_free(qcache, q);
+	crypto_done(crp);
+	return 0;
+}
+
+
+#ifdef __ixp46X
+/*
+ * key processing support for the ixp465
+ */
+
+
+/*
+ * copy a BN (LE) into a buffer (BE) an fill out the op appropriately
+ * assume zeroed and only copy bits that are significant
+ */
+
+static int
+ixp_copy_ibuf(struct crparam *p, IxCryptoAccPkeEauOperand *op, UINT32 *buf)
+{
+	unsigned char *src = (unsigned char *) p->crp_p;
+	unsigned char *dst;
+	int len, bits = p->crp_nbits;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	if (bits > MAX_IOP_SIZE * sizeof(UINT32) * 8) {
+		dprintk("%s - ibuf too big (%d > %d)\n", __FUNCTION__,
+				bits, MAX_IOP_SIZE * sizeof(UINT32) * 8);
+		return -1;
+	}
+
+	len = (bits + 31) / 32; /* the number UINT32's needed */
+
+	dst = (unsigned char *) &buf[len];
+	dst--;
+
+	while (bits > 0) {
+		*dst-- = *src++;
+		bits -= 8;
+	}
+
+#if 0 /* no need to zero remaining bits as it is done during request alloc */
+	while (dst > (unsigned char *) buf)
+		*dst-- = '\0';
+#endif
+
+	op->pData = buf;
+	op->dataLen = len;
+	return 0;
+}
+
+/*
+ * copy out the result,  be as forgiving as we can about small output buffers
+ */
+
+static int
+ixp_copy_obuf(struct crparam *p, IxCryptoAccPkeEauOpResult *op, UINT32 *buf)
+{
+	unsigned char *dst = (unsigned char *) p->crp_p;
+	unsigned char *src = (unsigned char *) buf;
+	int len, z, bits = p->crp_nbits;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	len = op->dataLen * sizeof(UINT32);
+
+	/* skip leading zeroes to be small buffer friendly */
+	z = 0;
+	while (z < len && src[z] == '\0')
+		z++;
+
+	src += len;
+	src--;
+	len -= z;
+
+	while (len > 0 && bits > 0) {
+		*dst++ = *src--;
+		len--;
+		bits -= 8;
+	}
+
+	while (bits > 0) {
+		*dst++ = '\0';
+		bits -= 8;
+	}
+
+	if (len > 0) {
+		dprintk("%s - obuf is %d (z=%d, ob=%d) bytes too small\n",
+				__FUNCTION__, len, z, p->crp_nbits / 8);
+		return -1;
+	}
+
+	return 0;
+}
+
+
+/*
+ * the parameter offsets for exp_mod
+ */
+
+#define IXP_PARAM_BASE 0
+#define IXP_PARAM_EXP  1
+#define IXP_PARAM_MOD  2
+#define IXP_PARAM_RES  3
+
+/*
+ * key processing complete callback,  is also used to start processing
+ * by passing a NULL for pResult
+ */
+
+static void
+ixp_kperform_cb(
+	IxCryptoAccPkeEauOperation operation,
+	IxCryptoAccPkeEauOpResult *pResult,
+	BOOL carryOrBorrow,
+	IxCryptoAccStatus status)
+{
+	struct ixp_pkq *q, *tmp;
+	unsigned long flags;
+
+	dprintk("%s(0x%x, %p, %d, 0x%x)\n", __FUNCTION__, operation, pResult,
+			carryOrBorrow, status);
+
+	/* handle a completed request */
+	if (pResult) {
+		if (ixp_pk_cur && &ixp_pk_cur->pkq_result == pResult) {
+			q = ixp_pk_cur;
+			if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+				dprintk("%s() - op failed 0x%x\n", __FUNCTION__, status);
+				q->pkq_krp->krp_status = ERANGE; /* could do better */
+			} else {
+				/* copy out the result */
+				if (ixp_copy_obuf(&q->pkq_krp->krp_param[IXP_PARAM_RES],
+						&q->pkq_result, q->pkq_obuf))
+					q->pkq_krp->krp_status = ERANGE;
+			}
+			crypto_kdone(q->pkq_krp);
+			kfree(q);
+			ixp_pk_cur = NULL;
+		} else
+			printk("%s - callback with invalid result pointer\n", __FUNCTION__);
+	}
+
+	spin_lock_irqsave(&ixp_pkq_lock, flags);
+	if (ixp_pk_cur || list_empty(&ixp_pkq)) {
+		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+		return;
+	}
+
+	list_for_each_entry_safe(q, tmp, &ixp_pkq, pkq_list) {
+
+		list_del(&q->pkq_list);
+		ixp_pk_cur = q;
+
+		spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+
+		status = ixCryptoAccPkeEauPerform(
+				IX_CRYPTO_ACC_OP_EAU_MOD_EXP,
+				&q->pkq_op,
+				ixp_kperform_cb,
+				&q->pkq_result);
+
+		if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
+			dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
+			return; /* callback will return here for callback */
+		} else if (status == IX_CRYPTO_ACC_STATUS_RETRY) {
+			printk("%s() - ixCryptoAccPkeEauPerform RETRY\n", __FUNCTION__);
+		} else {
+			printk("%s() - ixCryptoAccPkeEauPerform failed %d\n",
+					__FUNCTION__, status);
+		}
+		q->pkq_krp->krp_status = ERANGE; /* could do better */
+		crypto_kdone(q->pkq_krp);
+		kfree(q);
+		spin_lock_irqsave(&ixp_pkq_lock, flags);
+	}
+	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+}
+
+
+static int
+ixp_kprocess(device_t dev, struct cryptkop *krp, int hint)
+{
+	struct ixp_pkq *q;
+	int rc = 0;
+	unsigned long flags;
+
+	dprintk("%s l1=%d l2=%d l3=%d l4=%d\n", __FUNCTION__,
+			krp->krp_param[IXP_PARAM_BASE].crp_nbits,
+			krp->krp_param[IXP_PARAM_EXP].crp_nbits,
+			krp->krp_param[IXP_PARAM_MOD].crp_nbits,
+			krp->krp_param[IXP_PARAM_RES].crp_nbits);
+
+
+	if (krp->krp_op != CRK_MOD_EXP) {
+		krp->krp_status = EOPNOTSUPP;
+		goto err;
+	}
+
+	q = (struct ixp_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
+	if (q == NULL) {
+		krp->krp_status = ENOMEM;
+		goto err;
+	}
+
+	/*
+	 * The PKE engine does not appear to zero the output buffer
+	 * appropriately, so we need to do it all here.
+	 */
+	memset(q, 0, sizeof(*q));
+
+	q->pkq_krp = krp;
+	INIT_LIST_HEAD(&q->pkq_list);
+
+	if (ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_BASE], &q->pkq_op.modExpOpr.M,
+			q->pkq_ibuf0))
+		rc = 1;
+	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_EXP],
+				&q->pkq_op.modExpOpr.e, q->pkq_ibuf1))
+		rc = 2;
+	if (!rc && ixp_copy_ibuf(&krp->krp_param[IXP_PARAM_MOD],
+				&q->pkq_op.modExpOpr.N, q->pkq_ibuf2))
+		rc = 3;
+
+	if (rc) {
+		kfree(q);
+		krp->krp_status = ERANGE;
+		goto err;
+	}
+
+	q->pkq_result.pData           = q->pkq_obuf;
+	q->pkq_result.dataLen         =
+			(krp->krp_param[IXP_PARAM_RES].crp_nbits + 31) / 32;
+
+	spin_lock_irqsave(&ixp_pkq_lock, flags);
+	list_add_tail(&q->pkq_list, &ixp_pkq);
+	spin_unlock_irqrestore(&ixp_pkq_lock, flags);
+
+	if (!ixp_pk_cur)
+		ixp_kperform_cb(0, NULL, 0, 0);
+	return (0);
+
+err:
+	crypto_kdone(krp);
+	return (0);
+}
+
+
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+/*
+ * We run the random number generator output through SHA so that it
+ * is FIPS compliant.
+ */
+
+static volatile int sha_done = 0;
+static unsigned char sha_digest[20];
+
+static void
+ixp_hash_cb(UINT8 *digest, IxCryptoAccStatus status)
+{
+	dprintk("%s(%p, %d)\n", __FUNCTION__, digest, status);
+	if (sha_digest != digest)
+		printk("digest error\n");
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+		sha_done = 1;
+	else
+		sha_done = -status;
+}
+
+static int
+ixp_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+	IxCryptoAccStatus status;
+	int i, n, rc;
+
+	dprintk("%s(%p, %d)\n", __FUNCTION__, buf, maxwords);
+	memset(buf, 0, maxwords * sizeof(*buf));
+	status = ixCryptoAccPkePseudoRandomNumberGet(maxwords, buf);
+	if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+		dprintk("%s: ixCryptoAccPkePseudoRandomNumberGet failed %d\n",
+				__FUNCTION__, status);
+		return 0;
+	}
+
+	/*
+	 * run the random data through SHA to make it look more random
+	 */
+
+	n = sizeof(sha_digest); /* process digest bytes at a time */
+
+	rc = 0;
+	for (i = 0; i < maxwords; i += n / sizeof(*buf)) {
+		if ((maxwords - i) * sizeof(*buf) < n)
+			n = (maxwords - i) * sizeof(*buf);
+		sha_done = 0;
+		status = ixCryptoAccPkeHashPerform(IX_CRYPTO_ACC_AUTH_SHA1,
+				(UINT8 *) &buf[i], n, ixp_hash_cb, sha_digest);
+		if (status != IX_CRYPTO_ACC_STATUS_SUCCESS) {
+			dprintk("ixCryptoAccPkeHashPerform failed %d\n", status);
+			return -EIO;
+		}
+		while (!sha_done)
+			schedule();
+		if (sha_done < 0) {
+			dprintk("ixCryptoAccPkeHashPerform failed CB %d\n", -sha_done);
+			return 0;
+		}
+		memcpy(&buf[i], sha_digest, n);
+		rc += n / sizeof(*buf);;
+	}
+
+	return rc;
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+#endif /* __ixp46X */
+
+
+
+/*
+ * our driver startup and shutdown routines
+ */
+
+static int
+ixp_init(void)
+{
+	dprintk("%s(%p)\n", __FUNCTION__, ixp_init);
+
+	if (ixp_init_crypto && ixCryptoAccInit() != IX_CRYPTO_ACC_STATUS_SUCCESS)
+		printk("ixCryptoAccInit failed, assuming already initialised!\n");
+
+	qcache = kmem_cache_create("ixp4xx_q", sizeof(struct ixp_q), 0,
+				SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+				, NULL
+#endif
+				  );
+	if (!qcache) {
+		printk("failed to create Qcache\n");
+		return -ENOENT;
+	}
+
+	memset(&ixpdev, 0, sizeof(ixpdev));
+	softc_device_init(&ixpdev, "ixp4xx", 0, ixp_methods);
+
+	ixp_id = crypto_get_driverid(softc_get_device(&ixpdev),
+				CRYPTOCAP_F_HARDWARE);
+	if (ixp_id < 0)
+		panic("IXP/OCF crypto device cannot initialize!");
+
+#define	REGISTER(alg) \
+	crypto_register(ixp_id,alg,0,0)
+
+	REGISTER(CRYPTO_DES_CBC);
+	REGISTER(CRYPTO_3DES_CBC);
+	REGISTER(CRYPTO_RIJNDAEL128_CBC);
+#ifdef CONFIG_OCF_IXP4XX_SHA1_MD5
+	REGISTER(CRYPTO_MD5);
+	REGISTER(CRYPTO_SHA1);
+#endif
+	REGISTER(CRYPTO_MD5_HMAC);
+	REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+#ifdef __ixp46X
+	spin_lock_init(&ixp_pkq_lock);
+	/*
+	 * we do not enable the go fast options here as they can potentially
+	 * allow timing based attacks
+	 *
+	 * http://www.openssl.org/news/secadv_20030219.txt
+	 */
+	ixCryptoAccPkeEauExpConfig(0, 0);
+	crypto_kregister(ixp_id, CRK_MOD_EXP, 0);
+#ifdef CONFIG_OCF_RANDOMHARVEST
+	crypto_rregister(ixp_id, ixp_read_random, NULL);
+#endif
+#endif
+
+	return 0;
+}
+
+static void
+ixp_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	crypto_unregister_all(ixp_id);
+	ixp_id = -1;
+	kmem_cache_destroy(qcache);
+	qcache = NULL;
+}
+
+module_init(ixp_init);
+module_exit(ixp_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <dmccullough@cyberguard.com>");
+MODULE_DESCRIPTION("ixp (OCF module for IXP4xx crypto)");
diff --git a/crypto/ocf/kirkwood/Makefile b/crypto/ocf/kirkwood/Makefile
new file mode 100644
index 000000000000..ae0fec27cd2b
--- /dev/null
+++ b/crypto/ocf/kirkwood/Makefile
@@ -0,0 +1,18 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_KIRKWOOD) += mv_cesa.o
+
+mv_cesa-y := cesa/mvCesa.o cesa/mvLru.o cesa/mvMD5.o cesa/mvSHA1.o cesa/AES/mvAesAlg.o cesa/AES/mvAesApi.o cesa/mvCesaDebug.o cesa_ocf_drv.o
+
+# Extra objects required by the CESA driver
+mv_cesa-y += mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.o mvHal/kw_family/boardEnv/mvBoardEnvLib.o mvHal/mv_hal/twsi/mvTwsi.o mvHal/kw_family/ctrlEnv/sys/mvCpuIf.o mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.o mvHal/kw_family/ctrlEnv/sys/mvSysDram.o mvHal/linux_oss/mvOs.o mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.o mvHal/mv_hal/gpp/mvGpp.o mvHal/kw_family/ctrlEnv/sys/mvSysPex.o mvHal/mv_hal/pex/mvPex.o mvHal/kw_family/boardEnv/mvBoardEnvSpec.o mvHal/common/mvCommon.o mvHal/common/mvDebug.o mvHal/kw_family/ctrlEnv/sys/mvSysCesa.o
+
+ifdef src
+EXTRA_CFLAGS += -I$(src)/.. -I$(src)/cesa -I$(src)/mvHal -I$(src)/mvHal/common -I$(src)/mvHal/kw_family -I$(src)/mvHal/mv_hal -I$(src)/mvHal/linux_oss -I$(src)
+endif
+
+EXTRA_CFLAGS += -DMV_LINUX -DMV_CPU_LE -DMV_ARM -DMV_INCLUDE_CESA -DMV_INCLUDE_PEX -DMV_CACHE_COHERENCY=3
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/kirkwood/cesa/AES/mvAes.h b/crypto/ocf/kirkwood/cesa/AES/mvAes.h
new file mode 100644
index 000000000000..d1d668782f01
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/AES/mvAes.h
@@ -0,0 +1,60 @@
+/* mvAes.h   v2.0   August '99
+ * Reference ANSI C code
+ */
+
+/*  AES Cipher header file for ANSI C Submissions
+      Lawrence E. Bassham III
+      Computer Security Division
+      National Institute of Standards and Technology
+
+      April 15, 1998
+
+    This sample is to assist implementers developing to the Cryptographic
+API Profile for AES Candidate Algorithm Submissions.  Please consult this
+document as a cross-reference.
+
+    ANY CHANGES, WHERE APPROPRIATE, TO INFORMATION PROVIDED IN THIS FILE
+MUST BE DOCUMENTED.  CHANGES ARE ONLY APPROPRIATE WHERE SPECIFIED WITH
+THE STRING "CHANGE POSSIBLE".  FUNCTION CALLS AND THEIR PARAMETERS CANNOT
+BE CHANGED.  STRUCTURES CAN BE ALTERED TO ALLOW IMPLEMENTERS TO INCLUDE
+IMPLEMENTATION SPECIFIC INFORMATION.
+*/
+
+/*  Includes:
+	Standard include files
+*/
+
+#include "mvOs.h"
+
+
+/*  Error Codes - CHANGE POSSIBLE: inclusion of additional error codes  */
+
+/*  Key direction is invalid, e.g., unknown value */
+#define     AES_BAD_KEY_DIR        -1
+
+/*  Key material not of correct length */
+#define     AES_BAD_KEY_MAT        -2
+
+/*  Key passed is not valid  */
+#define     AES_BAD_KEY_INSTANCE   -3
+
+/*  Params struct passed to cipherInit invalid */
+#define     AES_BAD_CIPHER_MODE    -4
+
+/*  Cipher in wrong state (e.g., not initialized) */
+#define     AES_BAD_CIPHER_STATE   -5
+
+#define     AES_BAD_CIPHER_INSTANCE   -7
+
+
+/*  Function protoypes  */
+/*  CHANGED: makeKey(): parameter blockLen added
+                        this parameter is absolutely necessary if you want to
+			setup the round keys in a variable block length setting
+	     cipherInit(): parameter blockLen added (for obvious reasons)
+ */
+int     aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen);
+int     aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int  keyLen,
+                    MV_U32 *plain, int numBlocks, MV_U32 *cipher);
+int     aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int  keyLen,
+                    MV_U32 *plain, int numBlocks, MV_U32 *cipher);
diff --git a/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c b/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c
new file mode 100644
index 000000000000..5213c6c38048
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.c
@@ -0,0 +1,316 @@
+/* rijndael-alg-ref.c   v2.0   August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ *          Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+
+#include "mvOs.h"
+
+#include "mvAesAlg.h"
+
+#include "mvAesBoxes.dat"
+
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb);
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC);
+void ShiftRow128Enc(MV_U8 a[4][MAXBC]);
+void ShiftRow128Dec(MV_U8 a[4][MAXBC]);
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256]);
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC]);
+void InvMixColumn(MV_U8 a[4][MAXBC]);
+
+
+#define mul(aa, bb) (mask[bb] & Alogtable[aa + Logtable[bb]])
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb)
+{
+    return mask[bb] & Alogtable[aa + Logtable[bb]];
+}
+
+
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC)
+{
+	/* Exor corresponding text input and round key input bytes
+	 */
+    ((MV_U32*)(&(a[0][0])))[0] ^= ((MV_U32*)(&(rk[0][0])))[0];
+    ((MV_U32*)(&(a[1][0])))[0] ^= ((MV_U32*)(&(rk[1][0])))[0];
+    ((MV_U32*)(&(a[2][0])))[0] ^= ((MV_U32*)(&(rk[2][0])))[0];
+    ((MV_U32*)(&(a[3][0])))[0] ^= ((MV_U32*)(&(rk[3][0])))[0];
+
+}
+
+void ShiftRow128Enc(MV_U8 a[4][MAXBC]) {
+	/* Row 0 remains unchanged
+	 * The other three rows are shifted a variable amount
+	 */
+	MV_U8 tmp[MAXBC];
+
+    tmp[0] = a[1][1];
+    tmp[1] = a[1][2];
+    tmp[2] = a[1][3];
+    tmp[3] = a[1][0];
+
+    ((MV_U32*)(&(a[1][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+     /*
+    a[1][0] = tmp[0];
+    a[1][1] = tmp[1];
+    a[1][2] = tmp[2];
+    a[1][3] = tmp[3];
+       */
+    tmp[0] = a[2][2];
+    tmp[1] = a[2][3];
+    tmp[2] = a[2][0];
+    tmp[3] = a[2][1];
+
+    ((MV_U32*)(&(a[2][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+      /*
+    a[2][0] = tmp[0];
+    a[2][1] = tmp[1];
+    a[2][2] = tmp[2];
+    a[2][3] = tmp[3];
+    */
+    tmp[0] = a[3][3];
+    tmp[1] = a[3][0];
+    tmp[2] = a[3][1];
+    tmp[3] = a[3][2];
+
+    ((MV_U32*)(&(a[3][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+    /*
+    a[3][0] = tmp[0];
+    a[3][1] = tmp[1];
+    a[3][2] = tmp[2];
+    a[3][3] = tmp[3];
+    */
+}
+
+void ShiftRow128Dec(MV_U8 a[4][MAXBC]) {
+	/* Row 0 remains unchanged
+	 * The other three rows are shifted a variable amount
+	 */
+	MV_U8 tmp[MAXBC];
+
+    tmp[0] = a[1][3];
+    tmp[1] = a[1][0];
+    tmp[2] = a[1][1];
+    tmp[3] = a[1][2];
+
+    ((MV_U32*)(&(a[1][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+    /*
+    a[1][0] = tmp[0];
+    a[1][1] = tmp[1];
+    a[1][2] = tmp[2];
+    a[1][3] = tmp[3];
+    */
+
+    tmp[0] = a[2][2];
+    tmp[1] = a[2][3];
+    tmp[2] = a[2][0];
+    tmp[3] = a[2][1];
+
+    ((MV_U32*)(&(a[2][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+    /*
+    a[2][0] = tmp[0];
+    a[2][1] = tmp[1];
+    a[2][2] = tmp[2];
+    a[2][3] = tmp[3];
+    */
+
+    tmp[0] = a[3][1];
+    tmp[1] = a[3][2];
+    tmp[2] = a[3][3];
+    tmp[3] = a[3][0];
+
+    ((MV_U32*)(&(a[3][0])))[0] = ((MV_U32*)(&(tmp[0])))[0];
+    /*
+    a[3][0] = tmp[0];
+    a[3][1] = tmp[1];
+    a[3][2] = tmp[2];
+    a[3][3] = tmp[3];
+    */
+}
+
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256]) {
+	/* Replace every byte of the input by the byte at that place
+	 * in the nonlinear S-box
+	 */
+	int i, j;
+
+	for(i = 0; i < 4; i++)
+		for(j = 0; j < 4; j++) a[i][j] = box[a[i][j]] ;
+}
+
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC]) {
+        /* Mix the four bytes of every column in a linear way
+	 */
+	MV_U8 b[4][MAXBC];
+	int i, j;
+
+	for(j = 0; j < 4; j++){
+        b[0][j] = mul(25,a[0][j]) ^ mul(1,a[1][j]) ^ a[2][j] ^ a[3][j];
+        b[1][j] = mul(25,a[1][j]) ^ mul(1,a[2][j]) ^ a[3][j] ^ a[0][j];
+        b[2][j] = mul(25,a[2][j]) ^ mul(1,a[3][j]) ^ a[0][j] ^ a[1][j];
+        b[3][j] = mul(25,a[3][j]) ^ mul(1,a[0][j]) ^ a[1][j] ^ a[2][j];
+    }
+	for(i = 0; i < 4; i++)
+		/*for(j = 0; j < BC; j++) a[i][j] = b[i][j];*/
+        ((MV_U32*)(&(a[i][0])))[0] = ((MV_U32*)(&(b[i][0])))[0] ^ ((MV_U32*)(&(rk[i][0])))[0];;
+}
+
+void InvMixColumn(MV_U8 a[4][MAXBC]) {
+        /* Mix the four bytes of every column in a linear way
+	 * This is the opposite operation of Mixcolumn
+	 */
+	MV_U8 b[4][MAXBC];
+	int i, j;
+
+	for(j = 0; j < 4; j++){
+        b[0][j] = mul(223,a[0][j]) ^ mul(104,a[1][j]) ^ mul(238,a[2][j]) ^ mul(199,a[3][j]);
+        b[1][j] = mul(223,a[1][j]) ^ mul(104,a[2][j]) ^ mul(238,a[3][j]) ^ mul(199,a[0][j]);
+        b[2][j] = mul(223,a[2][j]) ^ mul(104,a[3][j]) ^ mul(238,a[0][j]) ^ mul(199,a[1][j]);
+        b[3][j] = mul(223,a[3][j]) ^ mul(104,a[0][j]) ^ mul(238,a[1][j]) ^ mul(199,a[2][j]);
+    }
+	for(i = 0; i < 4; i++)
+		/*for(j = 0; j < BC; j++) a[i][j] = b[i][j];*/
+        ((MV_U32*)(&(a[i][0])))[0] = ((MV_U32*)(&(b[i][0])))[0];
+}
+
+int rijndaelKeySched (MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 W[MAXROUNDS+1][4][MAXBC])
+{
+	/* Calculate the necessary round keys
+	 * The number of calculations depends on keyBits and blockBits
+	 */
+	int KC, BC, ROUNDS;
+	int i, j, t, rconpointer = 0;
+	MV_U8 tk[4][MAXKC];
+
+	switch (keyBits) {
+	case 128: KC = 4; break;
+	case 192: KC = 6; break;
+	case 256: KC = 8; break;
+	default : return (-1);
+	}
+
+	switch (blockBits) {
+	case 128: BC = 4; break;
+	case 192: BC = 6; break;
+	case 256: BC = 8; break;
+	default : return (-2);
+	}
+
+	switch (keyBits >= blockBits ? keyBits : blockBits) {
+	case 128: ROUNDS = 10; break;
+	case 192: ROUNDS = 12; break;
+	case 256: ROUNDS = 14; break;
+	default : return (-3); /* this cannot happen */
+	}
+
+
+	for(j = 0; j < KC; j++)
+		for(i = 0; i < 4; i++)
+			tk[i][j] = k[i][j];
+	t = 0;
+	/* copy values into round key array */
+	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
+		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
+
+	while (t < (ROUNDS+1)*BC) { /* while not enough round key material calculated */
+		/* calculate new values */
+		for(i = 0; i < 4; i++)
+			tk[i][0] ^= S[tk[(i+1)%4][KC-1]];
+		tk[0][0] ^= rcon[rconpointer++];
+
+		if (KC != 8)
+			for(j = 1; j < KC; j++)
+				for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+		else {
+			for(j = 1; j < KC/2; j++)
+				for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+			for(i = 0; i < 4; i++) tk[i][KC/2] ^= S[tk[i][KC/2 - 1]];
+			for(j = KC/2 + 1; j < KC; j++)
+				for(i = 0; i < 4; i++) tk[i][j] ^= tk[i][j-1];
+	}
+	/* copy values into round key array */
+	for(j = 0; (j < KC) && (t < (ROUNDS+1)*BC); j++, t++)
+		for(i = 0; i < 4; i++) W[t / BC][i][t % BC] = tk[i][j];
+	}
+
+	return 0;
+}
+
+
+
+int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds)
+{
+	/* Encryption of one block.
+	 */
+	int r, BC, ROUNDS;
+
+    BC = 4;
+    ROUNDS = rounds;
+
+	/* begin with a key addition
+	 */
+
+	KeyAddition(a,rk[0],BC);
+
+    /* ROUNDS-1 ordinary rounds
+	 */
+	for(r = 1; r < ROUNDS; r++) {
+		Substitution(a,S);
+		ShiftRow128Enc(a);
+		MixColumn(a, rk[r]);
+		/*KeyAddition(a,rk[r],BC);*/
+	}
+
+	/* Last round is special: there is no MixColumn
+	 */
+	Substitution(a,S);
+	ShiftRow128Enc(a);
+	KeyAddition(a,rk[ROUNDS],BC);
+
+	return 0;
+}
+
+
+int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds)
+{
+	int r, BC, ROUNDS;
+
+    BC = 4;
+    ROUNDS = rounds;
+
+	/* To decrypt: apply the inverse operations of the encrypt routine,
+	 *             in opposite order
+	 *
+	 * (KeyAddition is an involution: it 's equal to its inverse)
+	 * (the inverse of Substitution with table S is Substitution with the inverse table of S)
+	 * (the inverse of Shiftrow is Shiftrow over a suitable distance)
+	 */
+
+        /* First the special round:
+	 *   without InvMixColumn
+	 *   with extra KeyAddition
+	 */
+	KeyAddition(a,rk[ROUNDS],BC);
+    ShiftRow128Dec(a);
+	Substitution(a,Si);
+
+	/* ROUNDS-1 ordinary rounds
+	 */
+	for(r = ROUNDS-1; r > 0; r--) {
+		KeyAddition(a,rk[r],BC);
+		InvMixColumn(a);
+		ShiftRow128Dec(a);
+		Substitution(a,Si);
+
+	}
+
+	/* End with the extra key addition
+	 */
+
+	KeyAddition(a,rk[0],BC);
+
+	return 0;
+}
diff --git a/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h b/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h
new file mode 100644
index 000000000000..ec81e403fac0
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/AES/mvAesAlg.h
@@ -0,0 +1,19 @@
+/* rijndael-alg-ref.h   v2.0   August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ *          Vincent Rijmen, K.U.Leuven
+ */
+#ifndef __RIJNDAEL_ALG_H
+#define __RIJNDAEL_ALG_H
+
+#define MAXBC				(128/32)
+#define MAXKC				(256/32)
+#define MAXROUNDS			14
+
+
+int rijndaelKeySched (MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 rk[MAXROUNDS+1][4][MAXBC]);
+
+int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds);
+int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS+1][4][MAXBC], int rounds);
+
+#endif /* __RIJNDAEL_ALG_H */
diff --git a/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c b/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c
new file mode 100644
index 000000000000..bea842b2808d
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/AES/mvAesApi.c
@@ -0,0 +1,310 @@
+/* rijndael-api-ref.c   v2.1   April 2000
+ * Reference ANSI C code
+ * authors: v2.0 Paulo Barreto
+ *               Vincent Rijmen, K.U.Leuven
+ *          v2.1 Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+#include "mvOs.h"
+
+#include "mvAes.h"
+#include "mvAesAlg.h"
+
+
+/*  Defines:
+	Add any additional defines you need
+*/
+
+#define     MODE_ECB        1    /*  Are we ciphering in ECB mode?   */
+#define     MODE_CBC        2    /*  Are we ciphering in CBC mode?   */
+#define     MODE_CFB1       3    /*  Are we ciphering in 1-bit CFB mode? */
+
+
+int     aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen)
+{
+    MV_U8   W[MAXROUNDS+1][4][MAXBC];
+	MV_U8   k[4][MAXKC];
+    MV_U8   j;
+	int     i, rounds, KC;
+
+	if (expandedKey == NULL)
+    {
+		return AES_BAD_KEY_INSTANCE;
+	}
+
+	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
+    {
+		return AES_BAD_KEY_MAT;
+	}
+
+	if (keyMaterial == NULL)
+    {
+		return AES_BAD_KEY_MAT;
+	}
+
+	/* initialize key schedule: */
+	for(i=0; i<keyLen/8; i++)
+    {
+		j = keyMaterial[i];
+		k[i % 4][i / 4] = j;
+	}
+
+	rijndaelKeySched (k, keyLen, blockLen, W);
+#ifdef MV_AES_DEBUG
+    {
+        MV_U8*  pW = &W[0][0][0];
+        int     x;
+
+        mvOsPrintf("Expended Key: size = %d\n", sizeof(W));
+        for(i=0; i<sizeof(W); i++)
+        {
+            mvOsPrintf("%02x ", pW[i]);
+        }
+        for(i=0; i<MAXROUNDS+1; i++)
+        {
+            mvOsPrintf("\n Round #%02d: ", i);
+            for(x=0; x<MAXBC; x++)
+            {
+                mvOsPrintf("%02x%02x%02x%02x ",
+                    W[i][0][x], W[i][1][x], W[i][2][x], W[i][3][x]);
+            }
+            mvOsPrintf("\n");
+        }
+    }
+#endif /* MV_AES_DEBUG */
+	switch (keyLen)
+    {
+	    case 128:
+            rounds = 10;
+            KC = 4;
+            break;
+	    case 192:
+            rounds = 12;
+            KC = 6;
+            break;
+	    case 256:
+            rounds = 14;
+            KC = 8;
+            break;
+	    default :
+            return (-1);
+	}
+
+    for(i=0; i<MAXBC; i++)
+    {
+        for(j=0; j<4; j++)
+        {
+            expandedKey[i*4+j] = W[rounds][j][i];
+        }
+    }
+    for(; i<KC; i++)
+    {
+        for(j=0; j<4; j++)
+        {
+            expandedKey[i*4+j] = W[rounds-1][j][i+MAXBC-KC];
+        }
+    }
+
+
+	return 0;
+}
+
+int     aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int  keyLen,
+                        MV_U32 *plain, int numBlocks, MV_U32 *cipher)
+{
+	int     i, j, t;
+	MV_U8   block[4][MAXBC];
+    int     rounds;
+    char    *input, *outBuffer;
+
+    input = (char*)plain;
+    outBuffer = (char*)cipher;
+
+        /* check parameter consistency: */
+    if( (expandedKey == NULL) || ((keyLen != 128) && (keyLen != 192) && (keyLen != 256)))
+    {
+        return AES_BAD_KEY_MAT;
+    }
+    if ((mode != MODE_ECB && mode != MODE_CBC))
+    {
+        return AES_BAD_CIPHER_STATE;
+    }
+
+	switch (keyLen)
+    {
+	    case 128: rounds = 10; break;
+	    case 192: rounds = 12; break;
+	    case 256: rounds = 14; break;
+	    default : return (-3); /* this cannot happen */
+	}
+
+
+	switch (mode)
+    {
+	    case MODE_ECB:
+		    for (i = 0; i < numBlocks; i++)
+            {
+			    for (j = 0; j < 4; j++)
+                {
+				    for(t = 0; t < 4; t++)
+				        /* parse input stream into rectangular array */
+					    block[t][j] = input[16*i+4*j+t] & 0xFF;
+			    }
+			    rijndaelEncrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+			    for (j = 0; j < 4; j++)
+                {
+				    /* parse rectangular array into output ciphertext bytes */
+				    for(t = 0; t < 4; t++)
+                        outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+
+			    }
+		    }
+		    break;
+
+	    case MODE_CBC:
+		    for (j = 0; j < 4; j++)
+            {
+			    for(t = 0; t < 4; t++)
+			    /* parse initial value into rectangular array */
+					block[t][j] = IV[t+4*j] & 0xFF;
+			}
+		    for (i = 0; i < numBlocks; i++)
+            {
+			    for (j = 0; j < 4; j++)
+                {
+				    for(t = 0; t < 4; t++)
+				        /* parse input stream into rectangular array and exor with
+				        IV or the previous ciphertext */
+					    block[t][j] ^= input[16*i+4*j+t] & 0xFF;
+			    }
+			    rijndaelEncrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+			    for (j = 0; j < 4; j++)
+                {
+				    /* parse rectangular array into output ciphertext bytes */
+				    for(t = 0; t < 4; t++)
+					    outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+			    }
+		    }
+		    break;
+
+	    default: return AES_BAD_CIPHER_STATE;
+	}
+
+	return 0;
+}
+
+int     aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int  keyLen,
+                            MV_U32 *srcData, int numBlocks, MV_U32 *dstData)
+{
+	int     i, j, t;
+	MV_U8   block[4][MAXBC];
+    MV_U8   iv[4][MAXBC];
+    int     rounds;
+    char    *input, *outBuffer;
+
+    input = (char*)srcData;
+    outBuffer = (char*)dstData;
+
+    if (expandedKey == NULL)
+    {
+		return AES_BAD_KEY_MAT;
+	}
+
+    /* check parameter consistency: */
+    if (keyLen != 128 && keyLen != 192 && keyLen != 256)
+    {
+        return AES_BAD_KEY_MAT;
+    }
+    if ((mode != MODE_ECB && mode != MODE_CBC))
+    {
+        return AES_BAD_CIPHER_STATE;
+    }
+
+	switch (keyLen)
+    {
+	    case 128: rounds = 10; break;
+	    case 192: rounds = 12; break;
+	    case 256: rounds = 14; break;
+	    default : return (-3); /* this cannot happen */
+	}
+
+
+	switch (mode)
+    {
+	    case MODE_ECB:
+		    for (i = 0; i < numBlocks; i++)
+            {
+			    for (j = 0; j < 4; j++)
+                {
+				    for(t = 0; t < 4; t++)
+                    {
+				        /* parse input stream into rectangular array */
+					    block[t][j] = input[16*i+4*j+t] & 0xFF;
+                    }
+			    }
+			    rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+			    for (j = 0; j < 4; j++)
+                {
+				    /* parse rectangular array into output ciphertext bytes */
+				    for(t = 0; t < 4; t++)
+					    outBuffer[16*i+4*j+t] = (MV_U8) block[t][j];
+			    }
+		    }
+		    break;
+
+	    case MODE_CBC:
+		    /* first block */
+		    for (j = 0; j < 4; j++)
+            {
+			    for(t = 0; t < 4; t++)
+                {
+			        /* parse input stream into rectangular array */
+				    block[t][j] = input[4*j+t] & 0xFF;
+                    iv[t][j] = block[t][j];
+                }
+		    }
+		    rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+
+		    for (j = 0; j < 4; j++)
+            {
+			    /* exor the IV and parse rectangular array into output ciphertext bytes */
+			    for(t = 0; t < 4; t++)
+                {
+				    outBuffer[4*j+t] = (MV_U8) (block[t][j] ^ IV[t+4*j]);
+                    IV[t+4*j] = iv[t][j];
+                }
+		    }
+
+		    /* next blocks */
+		    for (i = 1; i < numBlocks; i++)
+            {
+			    for (j = 0; j < 4; j++)
+                {
+				    for(t = 0; t < 4; t++)
+                    {
+				        /* parse input stream into rectangular array */
+                        iv[t][j] = input[16*i+4*j+t] & 0xFF;
+					    block[t][j] = iv[t][j];
+                    }
+			    }
+			    rijndaelDecrypt128(block, (MV_U8 (*)[4][MAXBC])expandedKey, rounds);
+
+			    for (j = 0; j < 4; j++)
+                {
+				    /* exor previous ciphertext block and parse rectangular array
+				       into output ciphertext bytes */
+				    for(t = 0; t < 4; t++)
+                    {
+					    outBuffer[16*i+4*j+t] = (MV_U8) (block[t][j] ^ IV[t+4*j]);
+                        IV[t+4*j] = iv[t][j];
+                    }
+			    }
+		    }
+		    break;
+
+	    default: return AES_BAD_CIPHER_STATE;
+	}
+
+	return 0;
+}
diff --git a/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat b/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat
new file mode 100644
index 000000000000..6c12b06836ab
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/AES/mvAesBoxes.dat
@@ -0,0 +1,123 @@
+static MV_U8 mask[256] = {
+0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+static MV_U8 Logtable[256] = {
+  0,   0,  25,   1,  50,   2,  26, 198,  75, 199,  27, 104,  51, 238, 223,   3,
+100,   4, 224,  14,  52, 141, 129, 239,  76, 113,   8, 200, 248, 105,  28, 193,
+125, 194,  29, 181, 249, 185,  39, 106,  77, 228, 166, 114, 154, 201,   9, 120,
+101,  47, 138,   5,  33,  15, 225,  36,  18, 240, 130,  69,  53, 147, 218, 142,
+150, 143, 219, 189,  54, 208, 206, 148,  19,  92, 210, 241,  64,  70, 131,  56,
+102, 221, 253,  48, 191,   6, 139,  98, 179,  37, 226, 152,  34, 136, 145,  16,
+126, 110,  72, 195, 163, 182,  30,  66,  58, 107,  40,  84, 250, 133,  61, 186,
+ 43, 121,  10,  21, 155, 159,  94, 202,  78, 212, 172, 229, 243, 115, 167,  87,
+175,  88, 168,  80, 244, 234, 214, 116,  79, 174, 233, 213, 231, 230, 173, 232,
+ 44, 215, 117, 122, 235,  22,  11, 245,  89, 203,  95, 176, 156, 169,  81, 160,
+127,  12, 246, 111,  23, 196,  73, 236, 216,  67,  31,  45, 164, 118, 123, 183,
+204, 187,  62,  90, 251,  96, 177, 134,  59,  82, 161, 108, 170,  85,  41, 157,
+151, 178, 135, 144,  97, 190, 220, 252, 188, 149, 207, 205,  55,  63,  91, 209,
+ 83,  57, 132,  60,  65, 162, 109,  71,  20,  42, 158,  93,  86, 242, 211, 171,
+ 68,  17, 146, 217,  35,  32,  46, 137, 180, 124, 184,  38, 119, 153, 227, 165,
+103,  74, 237, 222, 197,  49, 254,  24,  13,  99, 140, 128, 192, 247, 112,   7,
+};
+
+static MV_U8 Alogtable[512] = {
+  1,   3,   5,  15,  17,  51,  85, 255,  26,  46, 114, 150, 161, 248,  19,  53,
+ 95, 225,  56,  72, 216, 115, 149, 164, 247,   2,   6,  10,  30,  34, 102, 170,
+229,  52,  92, 228,  55,  89, 235,  38, 106, 190, 217, 112, 144, 171, 230,  49,
+ 83, 245,   4,  12,  20,  60,  68, 204,  79, 209, 104, 184, 211, 110, 178, 205,
+ 76, 212, 103, 169, 224,  59,  77, 215,  98, 166, 241,   8,  24,  40, 120, 136,
+131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206,  73, 219, 118, 154,
+181, 196,  87, 249,  16,  48,  80, 240,  11,  29,  39, 105, 187, 214,  97, 163,
+254,  25,  43, 125, 135, 146, 173, 236,  47, 113, 147, 174, 233,  32,  96, 160,
+251,  22,  58,  78, 210, 109, 183, 194,  93, 231,  50,  86, 250,  21,  63,  65,
+195,  94, 226,  61,  71, 201,  64, 192,  91, 237,  44, 116, 156, 191, 218, 117,
+159, 186, 213, 100, 172, 239,  42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+155, 182, 193,  88, 232,  35, 101, 175, 234,  37, 111, 177, 200,  67, 197,  84,
+252,  31,  33,  99, 165, 244,   7,   9,  27,  45, 119, 153, 176, 203,  70, 202,
+ 69, 207,  74, 222, 121, 139, 134, 145, 168, 227,  62,  66, 198,  81, 243,  14,
+ 18,  54,  90, 238,  41, 123, 141, 140, 143, 138, 133, 148, 167, 242,  13,  23,
+ 57,  75, 221, 124, 132, 151, 162, 253,  28,  36, 108, 180, 199,  82, 246,   1,
+
+       3,   5,  15,  17,  51,  85, 255,  26,  46, 114, 150, 161, 248,  19,  53,
+ 95, 225,  56,  72, 216, 115, 149, 164, 247,   2,   6,  10,  30,  34, 102, 170,
+229,  52,  92, 228,  55,  89, 235,  38, 106, 190, 217, 112, 144, 171, 230,  49,
+ 83, 245,   4,  12,  20,  60,  68, 204,  79, 209, 104, 184, 211, 110, 178, 205,
+ 76, 212, 103, 169, 224,  59,  77, 215,  98, 166, 241,   8,  24,  40, 120, 136,
+131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206,  73, 219, 118, 154,
+181, 196,  87, 249,  16,  48,  80, 240,  11,  29,  39, 105, 187, 214,  97, 163,
+254,  25,  43, 125, 135, 146, 173, 236,  47, 113, 147, 174, 233,  32,  96, 160,
+251,  22,  58,  78, 210, 109, 183, 194,  93, 231,  50,  86, 250,  21,  63,  65,
+195,  94, 226,  61,  71, 201,  64, 192,  91, 237,  44, 116, 156, 191, 218, 117,
+159, 186, 213, 100, 172, 239,  42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+155, 182, 193,  88, 232,  35, 101, 175, 234,  37, 111, 177, 200,  67, 197,  84,
+252,  31,  33,  99, 165, 244,   7,   9,  27,  45, 119, 153, 176, 203,  70, 202,
+ 69, 207,  74, 222, 121, 139, 134, 145, 168, 227,  62,  66, 198,  81, 243,  14,
+ 18,  54,  90, 238,  41, 123, 141, 140, 143, 138, 133, 148, 167, 242,  13,  23,
+ 57,  75, 221, 124, 132, 151, 162, 253,  28,  36, 108, 180, 199,  82, 246,   1,
+
+};
+
+static MV_U8 S[256] = {
+ 99, 124, 119, 123, 242, 107, 111, 197,  48,   1, 103,  43, 254, 215, 171, 118,
+202, 130, 201, 125, 250,  89,  71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
+183, 253, 147,  38,  54,  63, 247, 204,  52, 165, 229, 241, 113, 216,  49,  21,
+  4, 199,  35, 195,  24, 150,   5, 154,   7,  18, 128, 226, 235,  39, 178, 117,
+  9, 131,  44,  26,  27, 110,  90, 160,  82,  59, 214, 179,  41, 227,  47, 132,
+ 83, 209,   0, 237,  32, 252, 177,  91, 106, 203, 190,  57,  74,  76,  88, 207,
+208, 239, 170, 251,  67,  77,  51, 133,  69, 249,   2, 127,  80,  60, 159, 168,
+ 81, 163,  64, 143, 146, 157,  56, 245, 188, 182, 218,  33,  16, 255, 243, 210,
+205,  12,  19, 236,  95, 151,  68,  23, 196, 167, 126,  61, 100,  93,  25, 115,
+ 96, 129,  79, 220,  34,  42, 144, 136,  70, 238, 184,  20, 222,  94,  11, 219,
+224,  50,  58,  10,  73,   6,  36,  92, 194, 211, 172,  98, 145, 149, 228, 121,
+231, 200,  55, 109, 141, 213,  78, 169, 108,  86, 244, 234, 101, 122, 174,   8,
+186, 120,  37,  46,  28, 166, 180, 198, 232, 221, 116,  31,  75, 189, 139, 138,
+112,  62, 181, 102,  72,   3, 246,  14,  97,  53,  87, 185, 134, 193,  29, 158,
+225, 248, 152,  17, 105, 217, 142, 148, 155,  30, 135, 233, 206,  85,  40, 223,
+140, 161, 137,  13, 191, 230,  66, 104,  65, 153,  45,  15, 176,  84, 187,  22,
+};
+
+static MV_U8 Si[256] = {
+ 82,   9, 106, 213,  48,  54, 165,  56, 191,  64, 163, 158, 129, 243, 215, 251,
+124, 227,  57, 130, 155,  47, 255, 135,  52, 142,  67,  68, 196, 222, 233, 203,
+ 84, 123, 148,  50, 166, 194,  35,  61, 238,  76, 149,  11,  66, 250, 195,  78,
+  8,  46, 161, 102,  40, 217,  36, 178, 118,  91, 162,  73, 109, 139, 209,  37,
+114, 248, 246, 100, 134, 104, 152,  22, 212, 164,  92, 204,  93, 101, 182, 146,
+108, 112,  72,  80, 253, 237, 185, 218,  94,  21,  70,  87, 167, 141, 157, 132,
+144, 216, 171,   0, 140, 188, 211,  10, 247, 228,  88,   5, 184, 179,  69,   6,
+208,  44,  30, 143, 202,  63,  15,   2, 193, 175, 189,   3,   1,  19, 138, 107,
+ 58, 145,  17,  65,  79, 103, 220, 234, 151, 242, 207, 206, 240, 180, 230, 115,
+150, 172, 116,  34, 231, 173,  53, 133, 226, 249,  55, 232,  28, 117, 223, 110,
+ 71, 241,  26, 113,  29,  41, 197, 137, 111, 183,  98,  14, 170,  24, 190,  27,
+252,  86,  62,  75, 198, 210, 121,  32, 154, 219, 192, 254, 120, 205,  90, 244,
+ 31, 221, 168,  51, 136,   7, 199,  49, 177,  18,  16,  89,  39, 128, 236,  95,
+ 96,  81, 127, 169,  25, 181,  74,  13,  45, 229, 122, 159, 147, 201, 156, 239,
+160, 224,  59,  77, 174,  42, 245, 176, 200, 235, 187,  60, 131,  83, 153,  97,
+ 23,  43,   4, 126, 186, 119, 214,  38, 225, 105,  20,  99,  85,  33,  12, 125,
+};
+
+/*
+static MV_U8 iG[4][4] = {
+{0x0e, 0x09, 0x0d, 0x0b},
+{0x0b, 0x0e, 0x09, 0x0d},
+{0x0d, 0x0b, 0x0e, 0x09},
+{0x09, 0x0d, 0x0b, 0x0e},
+};
+*/
+static MV_U32 rcon[30] = {
+  0x01,0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, };
diff --git a/crypto/ocf/kirkwood/cesa/mvCesa.c b/crypto/ocf/kirkwood/cesa/mvCesa.c
new file mode 100644
index 000000000000..d9fbe0cf7277
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvCesa.c
@@ -0,0 +1,3126 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "cesa/mvCesa.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#undef CESA_DEBUG
+
+
+/********** Global variables **********/
+
+/*  If request size is more than MV_CESA_MAX_BUF_SIZE the
+ *  request is processed as fragmented request.
+ */
+
+MV_CESA_STATS           cesaStats;
+
+MV_BUF_INFO             cesaSramSaBuf;
+short                   cesaLastSid = -1;
+MV_CESA_SA*             pCesaSAD = NULL;
+MV_U16                  cesaMaxSA = 0;
+
+MV_CESA_REQ*            pCesaReqFirst = NULL;
+MV_CESA_REQ*            pCesaReqLast = NULL;
+MV_CESA_REQ*            pCesaReqEmpty = NULL;
+MV_CESA_REQ*            pCesaReqProcess = NULL;
+int                     cesaQueueDepth = 0;
+int                     cesaReqResources = 0;
+
+MV_CESA_SRAM_MAP*       cesaSramVirtPtr = NULL;
+MV_U32                  cesaCryptEngBase = 0;
+void			*cesaOsHandle = NULL;
+#if (MV_CESA_VERSION >= 3)
+MV_U32			cesaChainLength = 0;
+int                     chainReqNum = 0;
+MV_U32			chainIndex = 0;
+MV_CESA_REQ*  		pNextActiveChain = 0;
+MV_CESA_REQ*		pEndCurrChain = 0;
+MV_BOOL			isFirstReq = MV_TRUE;
+#endif
+
+static INLINE MV_U8*  mvCesaSramAddrGet(void)
+{
+#ifdef MV_CESA_NO_SRAM
+    return (MV_U8*)cesaSramVirtPtr;
+#else
+    return (MV_U8*)cesaCryptEngBase;
+#endif /* MV_CESA_NO_SRAM */
+}
+
+static INLINE MV_ULONG    mvCesaSramVirtToPhys(void* pDev, MV_U8* pSramVirt)
+{
+#ifdef MV_CESA_NO_SRAM
+    return (MV_ULONG)mvOsIoVirtToPhy(NULL, pSramVirt);
+#else
+    return (MV_ULONG)pSramVirt;
+#endif /* MV_CESA_NO_SRAM */
+}
+
+/* Internal Function prototypes */
+
+static INLINE void      mvCesaSramDescrBuild(MV_U32 config, int frag,
+                                 int cryptoOffset, int ivOffset, int cryptoLength,
+                                 int macOffset, int digestOffset, int macLength, int macTotalLen,
+                                 MV_CESA_REQ *pCesaReq, MV_DMA_DESC* pDmaDesc);
+
+static INLINE void      mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc);
+
+static INLINE int       mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+                                MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+                                int offset, int copySize, MV_BOOL skipFlush);
+
+static void        mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+                                    unsigned char innerIV[], unsigned char outerIV[]);
+
+static MV_STATUS   mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA,
+                                          int macDataSize);
+
+static MV_CESA_COMMAND*   mvCesaCtrModeInit(void);
+
+static MV_STATUS   mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd);
+static MV_STATUS   mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd);
+static void        mvCesaCtrModeFinish(MV_CESA_COMMAND *pCmd);
+
+static INLINE MV_STATUS mvCesaReqProcess(MV_CESA_REQ* pReq);
+static MV_STATUS   mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag);
+
+static INLINE MV_STATUS mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset);
+static INLINE MV_STATUS mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd);
+
+static INLINE void      mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq,
+                               int cryptoOffset, int macOffset,
+                               int* pCopySize, int* pCryptoDataSize, int* pMacDataSize);
+static MV_STATUS        mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size);
+
+
+/* Go to the next request in the request queue */
+static INLINE MV_CESA_REQ* MV_CESA_REQ_NEXT_PTR(MV_CESA_REQ* pReq)
+{
+    if(pReq == pCesaReqLast)
+        return pCesaReqFirst;
+
+    return pReq+1;
+}
+
+#if (MV_CESA_VERSION >= 3)
+/* Go to the previous request in the request queue */
+static INLINE MV_CESA_REQ* MV_CESA_REQ_PREV_PTR(MV_CESA_REQ* pReq)
+{
+    if(pReq == pCesaReqFirst)
+        return pCesaReqLast;
+
+    return pReq-1;
+}
+
+#endif
+
+
+static INLINE void mvCesaReqProcessStart(MV_CESA_REQ* pReq)
+{
+    int frag;
+
+#if (MV_CESA_VERSION >= 3)
+    pReq->state = MV_CESA_CHAIN;
+#else
+    pReq->state = MV_CESA_PROCESS;
+#endif
+    cesaStats.startCount++;
+
+    if(pReq->fragMode == MV_CESA_FRAG_NONE)
+    {
+        frag = 0;
+    }
+    else
+    {
+        frag = pReq->frags.nextFrag;
+        pReq->frags.nextFrag++;
+    }
+#if (MV_CESA_VERSION >= 2)
+    /* Enable TDMA engine */
+    MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0);
+    MV_REG_WRITE(MV_CESA_TDMA_NEXT_DESC_PTR_REG,
+            (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+#else
+    /* Enable IDMA engine */
+    MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0);
+    MV_REG_WRITE(IDMA_NEXT_DESC_PTR_REG(0),
+            (MV_U32)mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+#endif /* MV_CESA_VERSION >= 2 */
+
+#if defined(MV_BRIDGE_SYNC_REORDER)
+    mvOsBridgeReorderWA();
+#endif
+
+    /* Start Accelerator */
+    MV_REG_WRITE(MV_CESA_CMD_REG, MV_CESA_CMD_CHAN_ENABLE_MASK);
+}
+
+
+/*******************************************************************************
+* mvCesaHalInit - Initialize the CESA driver
+*
+* DESCRIPTION:
+*       This function initialize the CESA driver.
+*       1) Session database
+*       2) Request queue
+*       4) DMA descriptor lists - one list per request. Each list
+*           has MV_CESA_MAX_DMA_DESC descriptors.
+*
+* INPUT:
+*       numOfSession    - maximum number of supported sessions
+*       queueDepth      - number of elements in the request queue.
+*       pSramBase       - virtual address of Sram
+*	osHandle	- A handle used by the OS to allocate memory for the
+*			  module (Passed to the OS Services layer)
+*
+* RETURN:
+*       MV_OK           - Success
+*       MV_NO_RESOURCE  - Fail, can't allocate resources:
+*                         Session database, request queue,
+*                         DMA descriptors list, LRU cache database.
+*       MV_NOT_ALIGNED  - Sram base address is not 8 byte aligned.
+*
+*******************************************************************************/
+MV_STATUS mvCesaHalInit (int numOfSession, int queueDepth, char* pSramBase, MV_U32 cryptEngBase,
+			 void *osHandle)
+{
+    int     i, req;
+    MV_U32  descOffsetReg, configReg;
+    MV_CESA_SRAM_SA *pSramSA;
+
+
+    mvOsPrintf("mvCesaInit: sessions=%d, queue=%d, pSram=%p\n",
+                numOfSession, queueDepth, pSramBase);
+
+    cesaOsHandle = osHandle;
+    /* Create Session database */
+    pCesaSAD = mvOsMalloc(sizeof(MV_CESA_SA)*numOfSession);
+    if(pCesaSAD == NULL)
+    {
+        mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d SAs\n",
+                    sizeof(MV_CESA_SA)*numOfSession, numOfSession);
+        mvCesaFinish();
+        return MV_NO_RESOURCE;
+    }
+    memset(pCesaSAD, 0, sizeof(MV_CESA_SA)*numOfSession);
+    cesaMaxSA = numOfSession;
+
+    /* Allocate imag of sramSA in the DRAM */
+    cesaSramSaBuf.bufSize = sizeof(MV_CESA_SRAM_SA)*numOfSession +
+                                    CPU_D_CACHE_LINE_SIZE;
+
+    cesaSramSaBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle,cesaSramSaBuf.bufSize,
+						  &cesaSramSaBuf.bufPhysAddr,
+						  &cesaSramSaBuf.memHandle);
+
+    if(cesaSramSaBuf.bufVirtPtr == NULL)
+    {
+        mvOsPrintf("mvCesaInit: Can't allocate %d bytes for sramSA structures\n",
+                    cesaSramSaBuf.bufSize);
+        mvCesaFinish();
+        return MV_NO_RESOURCE;
+    }
+    memset(cesaSramSaBuf.bufVirtPtr, 0, cesaSramSaBuf.bufSize);
+    pSramSA = (MV_CESA_SRAM_SA*)MV_ALIGN_UP((MV_ULONG)cesaSramSaBuf.bufVirtPtr,
+                                                       CPU_D_CACHE_LINE_SIZE);
+    for(i=0; i<numOfSession; i++)
+    {
+        pCesaSAD[i].pSramSA = &pSramSA[i];
+    }
+
+    /* Create request queue */
+    pCesaReqFirst = mvOsMalloc(sizeof(MV_CESA_REQ)*queueDepth);
+    if(pCesaReqFirst == NULL)
+    {
+        mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d requests\n",
+                    sizeof(MV_CESA_REQ)*queueDepth, queueDepth);
+        mvCesaFinish();
+        return MV_NO_RESOURCE;
+    }
+    memset(pCesaReqFirst, 0, sizeof(MV_CESA_REQ)*queueDepth);
+    pCesaReqEmpty = pCesaReqFirst;
+    pCesaReqLast = pCesaReqFirst + (queueDepth-1);
+    pCesaReqProcess = pCesaReqEmpty;
+    cesaQueueDepth = queueDepth;
+    cesaReqResources = queueDepth;
+#if (MV_CESA_VERSION >= 3)
+    cesaChainLength = MAX_CESA_CHAIN_LENGTH;
+#endif
+    /* pSramBase must be 8 byte aligned */
+    if( MV_IS_NOT_ALIGN((MV_ULONG)pSramBase, 8) )
+    {
+        mvOsPrintf("mvCesaInit: pSramBase (%p) must be 8 byte aligned\n",
+                pSramBase);
+        mvCesaFinish();
+        return MV_NOT_ALIGNED;
+    }
+    cesaSramVirtPtr = (MV_CESA_SRAM_MAP*)pSramBase;
+
+    cesaCryptEngBase = cryptEngBase;
+
+    /*memset(cesaSramVirtPtr, 0, sizeof(MV_CESA_SRAM_MAP));*/
+
+    /* Clear registers */
+    MV_REG_WRITE( MV_CESA_CFG_REG, 0);
+    MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+    MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+
+    /* Initialize DMA descriptor lists for all requests in Request queue */
+    descOffsetReg = configReg = 0;
+    for(req=0; req<queueDepth; req++)
+    {
+        int             frag;
+        MV_CESA_REQ*    pReq;
+        MV_DMA_DESC*    pDmaDesc;
+
+        pReq = &pCesaReqFirst[req];
+
+        pReq->cesaDescBuf.bufSize = sizeof(MV_CESA_DESC)*MV_CESA_MAX_REQ_FRAGS +
+                                        CPU_D_CACHE_LINE_SIZE;
+
+	pReq->cesaDescBuf.bufVirtPtr =
+		mvOsIoCachedMalloc(osHandle,pReq->cesaDescBuf.bufSize,
+				   &pReq->cesaDescBuf.bufPhysAddr,
+				   &pReq->cesaDescBuf.memHandle);
+
+        if(pReq->cesaDescBuf.bufVirtPtr == NULL)
+        {
+            mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for CESA descriptors\n",
+                        req, pReq->cesaDescBuf.bufSize);
+                mvCesaFinish();
+                return MV_NO_RESOURCE;
+            }
+        memset(pReq->cesaDescBuf.bufVirtPtr, 0, pReq->cesaDescBuf.bufSize);
+        pReq->pCesaDesc = (MV_CESA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->cesaDescBuf.bufVirtPtr,
+                                                                CPU_D_CACHE_LINE_SIZE);
+
+        pReq->dmaDescBuf.bufSize = sizeof(MV_DMA_DESC)*MV_CESA_MAX_DMA_DESC*MV_CESA_MAX_REQ_FRAGS +
+                                    CPU_D_CACHE_LINE_SIZE;
+
+        pReq->dmaDescBuf.bufVirtPtr =
+		mvOsIoCachedMalloc(osHandle,pReq->dmaDescBuf.bufSize,
+				   &pReq->dmaDescBuf.bufPhysAddr,
+				   &pReq->dmaDescBuf.memHandle);
+
+        if(pReq->dmaDescBuf.bufVirtPtr == NULL)
+        {
+            mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for DMA descriptor list\n",
+                        req, pReq->dmaDescBuf.bufSize);
+            mvCesaFinish();
+            return MV_NO_RESOURCE;
+        }
+        memset(pReq->dmaDescBuf.bufVirtPtr, 0, pReq->dmaDescBuf.bufSize);
+        pDmaDesc = (MV_DMA_DESC*)MV_ALIGN_UP((MV_ULONG)pReq->dmaDescBuf.bufVirtPtr,
+                                                       CPU_D_CACHE_LINE_SIZE);
+
+        for(frag=0; frag<MV_CESA_MAX_REQ_FRAGS; frag++)
+        {
+            MV_CESA_DMA*    pDma = &pReq->dma[frag];
+
+            pDma->pDmaFirst = pDmaDesc;
+            pDma->pDmaLast = NULL;
+
+            for(i=0; i<MV_CESA_MAX_DMA_DESC-1; i++)
+            {
+                /* link all DMA descriptors together */
+                pDma->pDmaFirst[i].phyNextDescPtr =
+                        MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pDmaDesc[i+1]));
+            }
+            pDma->pDmaFirst[i].phyNextDescPtr = 0;
+            mvOsCacheFlush(NULL, &pDma->pDmaFirst[0], MV_CESA_MAX_DMA_DESC*sizeof(MV_DMA_DESC));
+
+            pDmaDesc += MV_CESA_MAX_DMA_DESC;
+        }
+    }
+    /*mvCesaCryptoIvSet(NULL, MV_CESA_MAX_IV_LENGTH);*/
+    descOffsetReg = (MV_U16)((MV_U8*)&cesaSramVirtPtr->desc - mvCesaSramAddrGet());
+    MV_REG_WRITE(MV_CESA_CHAN_DESC_OFFSET_REG, descOffsetReg);
+
+    configReg |= (MV_CESA_CFG_WAIT_DMA_MASK | MV_CESA_CFG_ACT_DMA_MASK);
+#if (MV_CESA_VERSION >= 3)
+    configReg |= MV_CESA_CFG_CHAIN_MODE_MASK;
+#endif
+
+#if (MV_CESA_VERSION >= 2)
+    /* Initialize TDMA engine */
+    MV_REG_WRITE(MV_CESA_TDMA_CTRL_REG, MV_CESA_TDMA_CTRL_VALUE);
+    MV_REG_WRITE(MV_CESA_TDMA_BYTE_COUNT_REG, 0);
+    MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG, 0);
+#else
+    /* Initialize IDMA #0 engine */
+    MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0);
+    MV_REG_WRITE(IDMA_BYTE_COUNT_REG(0), 0);
+    MV_REG_WRITE(IDMA_CURR_DESC_PTR_REG(0), 0);
+    MV_REG_WRITE(IDMA_CTRL_HIGH_REG(0), ICCHR_ENDIAN_LITTLE
+#ifdef MV_CPU_LE
+		| ICCHR_DESC_BYTE_SWAP_EN
+#endif
+		 );
+    /* Clear Cause Byte of IDMA channel to be used */
+    MV_REG_WRITE( IDMA_CAUSE_REG, ~ICICR_CAUSE_MASK_ALL(0));
+    MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), MV_CESA_IDMA_CTRL_LOW_VALUE);
+#endif /* (MV_CESA_VERSION >= 2) */
+
+    /* Set CESA configuration registers */
+    MV_REG_WRITE( MV_CESA_CFG_REG, configReg);
+    mvCesaDebugStatsClear();
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFinish - Shutdown the CESA driver
+*
+* DESCRIPTION:
+*       This function shutdown the CESA driver and free all allocted resources.
+*
+* INPUT:    None
+*
+* RETURN:
+*       MV_OK   - Success
+*       Other   - Fail
+*
+*******************************************************************************/
+MV_STATUS   mvCesaFinish (void)
+{
+    int             req;
+    MV_CESA_REQ*    pReq;
+
+    mvOsPrintf("mvCesaFinish: \n");
+
+    cesaSramVirtPtr = NULL;
+
+    /* Free all resources: DMA list, etc. */
+    for(req=0; req<cesaQueueDepth; req++)
+    {
+	pReq = &pCesaReqFirst[req];
+        if(pReq->dmaDescBuf.bufVirtPtr != NULL)
+        {
+		mvOsIoCachedFree(cesaOsHandle,pReq->dmaDescBuf.bufSize,
+				 pReq->dmaDescBuf.bufPhysAddr,
+				 pReq->dmaDescBuf.bufVirtPtr,
+				 pReq->dmaDescBuf.memHandle);
+        }
+        if(pReq->cesaDescBuf.bufVirtPtr != NULL)
+        {
+                mvOsIoCachedFree(cesaOsHandle,pReq->cesaDescBuf.bufSize,
+				 pReq->cesaDescBuf.bufPhysAddr,
+				 pReq->cesaDescBuf.bufVirtPtr,
+				 pReq->cesaDescBuf.memHandle);
+        }
+    }
+#if (MV_CESA_VERSION < 2)
+    MV_REG_WRITE(IDMA_CTRL_LOW_REG(0), 0);
+#endif /* (MV_CESA_VERSION < 2) */
+
+    /* Free request queue */
+    if(pCesaReqFirst != NULL)
+    {
+        mvOsFree(pCesaReqFirst);
+        pCesaReqFirst = pCesaReqLast = NULL;
+        pCesaReqEmpty = pCesaReqProcess = NULL;
+        cesaQueueDepth = cesaReqResources = 0;
+    }
+    /* Free SA database */
+    if(pCesaSAD != NULL)
+    {
+        mvOsFree(pCesaSAD);
+        pCesaSAD = NULL;
+        cesaMaxSA = 0;
+    }
+    MV_REG_WRITE( MV_CESA_CFG_REG, 0);
+    MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+    MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCryptoIvSet - Set IV value for Crypto algorithm working in CBC mode
+*
+* DESCRIPTION:
+*    This function set IV value using by Crypto algorithms in CBC mode.
+*   Each channel has its own IV value.
+*   This function gets IV value from the caller. If no IV value passed from
+*   the caller or only part of IV passed, the function will init the rest part
+*   of IV value (or the whole IV) by random value.
+*
+* INPUT:
+*       MV_U8*  pIV     - Pointer to IV value supplied by user. If pIV==NULL
+*                       the function will generate random IV value.
+*       int     ivSize  - size (in bytes) of IV provided by user. If ivSize is
+*                       smaller than maximum IV size, the function will complete
+*                       IV by random value.
+*
+* RETURN:
+*       MV_OK   - Success
+*       Other   - Fail
+*
+*******************************************************************************/
+MV_STATUS   mvCesaCryptoIvSet(MV_U8* pIV, int ivSize)
+{
+    MV_U8*  pSramIV;
+#if defined(MV646xx)
+    mvOsPrintf("mvCesaCryptoIvSet: ERR. shouldn't use this call on MV64660\n");
+#endif
+    pSramIV = cesaSramVirtPtr->cryptoIV;
+    if(ivSize > MV_CESA_MAX_IV_LENGTH)
+    {
+        mvOsPrintf("mvCesaCryptoIvSet: ivSize (%d) is too large\n", ivSize);
+        ivSize = MV_CESA_MAX_IV_LENGTH;
+    }
+    if(pIV != NULL)
+    {
+        memcpy(pSramIV, pIV, ivSize);
+        ivSize = MV_CESA_MAX_IV_LENGTH - ivSize;
+        pSramIV += ivSize;
+    }
+
+    while(ivSize > 0)
+    {
+        int size, mv_random = mvOsRand();
+
+        size = MV_MIN(ivSize, sizeof(mv_random));
+        memcpy(pSramIV, (void*)&mv_random, size);
+
+        pSramIV += size;
+        ivSize -= size;
+    }
+/*
+    mvOsCacheFlush(NULL, cesaSramVirtPtr->cryptoIV,
+                                MV_CESA_MAX_IV_LENGTH);
+    mvOsCacheInvalidate(NULL, cesaSramVirtPtr->cryptoIV,
+                              MV_CESA_MAX_IV_LENGTH);
+*/
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionOpen - Open new uni-directional crypto session
+*
+* DESCRIPTION:
+*       This function open new session.
+*
+* INPUT:
+*       MV_CESA_OPEN_SESSION *pSession - pointer to new session input parameters
+*
+* OUTPUT:
+*       short           *pSid  - session ID, should be used for all future
+*                                   requests over this session.
+*
+* RETURN:
+*       MV_OK           - Session opend successfully.
+*       MV_FULL         - All sessions are in use, no free place in
+*                       SA database.
+*       MV_BAD_PARAM    - One of session input parameters is invalid.
+*
+*******************************************************************************/
+MV_STATUS   mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short* pSid)
+{
+    short       sid;
+    MV_U32      config = 0;
+    int         digestSize;
+
+    cesaStats.openedCount++;
+
+    /* Find free entry in SAD */
+    for(sid=0; sid<cesaMaxSA; sid++)
+    {
+        if(pCesaSAD[sid].valid == 0)
+        {
+            break;
+        }
+    }
+    if(sid == cesaMaxSA)
+    {
+        mvOsPrintf("mvCesaSessionOpen: SA Database is FULL\n");
+        return MV_FULL;
+    }
+
+    /* Check Input parameters for Open session */
+    if (pSession->operation >= MV_CESA_MAX_OPERATION)
+    {
+        mvOsPrintf("mvCesaSessionOpen: Unexpected operation %d\n",
+                    pSession->operation);
+        return MV_BAD_PARAM;
+    }
+    config |= (pSession->operation << MV_CESA_OPERATION_OFFSET);
+
+    if( (pSession->direction != MV_CESA_DIR_ENCODE) &&
+        (pSession->direction != MV_CESA_DIR_DECODE) )
+    {
+        mvOsPrintf("mvCesaSessionOpen: Unexpected direction %d\n",
+                    pSession->direction);
+        return MV_BAD_PARAM;
+    }
+    config |= (pSession->direction << MV_CESA_DIRECTION_BIT);
+    /* Clear SA entry */
+    /* memset(&pCesaSAD[sid], 0, sizeof(pCesaSAD[sid])); */
+
+    /* Check AUTH parameters and update SA entry */
+    if(pSession->operation != MV_CESA_CRYPTO_ONLY)
+    {
+        /* For HMAC (MD5 and SHA1) - Maximum Key size is 64 bytes */
+        if( (pSession->macMode == MV_CESA_MAC_HMAC_MD5) ||
+            (pSession->macMode == MV_CESA_MAC_HMAC_SHA1) )
+        {
+            if(pSession->macKeyLength > MV_CESA_MAX_MAC_KEY_LENGTH)
+            {
+                mvOsPrintf("mvCesaSessionOpen: macKeyLength %d is too large\n",
+                            pSession->macKeyLength);
+                return MV_BAD_PARAM;
+            }
+            mvCesaHmacIvGet(pSession->macMode, pSession->macKey, pSession->macKeyLength,
+                            pCesaSAD[sid].pSramSA->macInnerIV,
+                            pCesaSAD[sid].pSramSA->macOuterIV);
+            pCesaSAD[sid].macKeyLength = pSession->macKeyLength;
+        }
+        switch(pSession->macMode)
+        {
+            case MV_CESA_MAC_MD5:
+            case MV_CESA_MAC_HMAC_MD5:
+                digestSize = MV_CESA_MD5_DIGEST_SIZE;
+                break;
+
+            case MV_CESA_MAC_SHA1:
+            case MV_CESA_MAC_HMAC_SHA1:
+                digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+                break;
+
+            default:
+                mvOsPrintf("mvCesaSessionOpen: Unexpected macMode %d\n",
+                            pSession->macMode);
+                return MV_BAD_PARAM;
+        }
+        config |= (pSession->macMode << MV_CESA_MAC_MODE_OFFSET);
+
+        /* Supported digest sizes: MD5 - 16 bytes (128 bits), */
+        /* SHA1 - 20 bytes (160 bits) or 12 bytes (96 bits) for both */
+        if( (pSession->digestSize != digestSize) && (pSession->digestSize != 12))
+        {
+            mvOsPrintf("mvCesaSessionOpen: Unexpected digest size %d\n",
+                        pSession->digestSize);
+            mvOsPrintf("\t Valid values [bytes]: MD5-16, SHA1-20, Both-12\n");
+            return MV_BAD_PARAM;
+        }
+        pCesaSAD[sid].digestSize = pSession->digestSize;
+
+        if(pCesaSAD[sid].digestSize == 12)
+        {
+            /* Set MV_CESA_MAC_DIGEST_SIZE_BIT if digest size is 96 bits */
+            config |= (MV_CESA_MAC_DIGEST_96B << MV_CESA_MAC_DIGEST_SIZE_BIT);
+        }
+    }
+
+    /* Check CRYPTO parameters and update SA entry */
+    if(pSession->operation != MV_CESA_MAC_ONLY)
+    {
+        switch(pSession->cryptoAlgorithm)
+        {
+            case MV_CESA_CRYPTO_DES:
+                pCesaSAD[sid].cryptoKeyLength = MV_CESA_DES_KEY_LENGTH;
+                pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+                break;
+
+            case MV_CESA_CRYPTO_3DES:
+                pCesaSAD[sid].cryptoKeyLength = MV_CESA_3DES_KEY_LENGTH;
+                pCesaSAD[sid].cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+                /* Only EDE mode is supported */
+                config |= (MV_CESA_CRYPTO_3DES_EDE <<
+                            MV_CESA_CRYPTO_3DES_MODE_BIT);
+                break;
+
+            case MV_CESA_CRYPTO_AES:
+                switch(pSession->cryptoKeyLength)
+                {
+                    case 16:
+                        pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_128_KEY_LENGTH;
+                        config |= (MV_CESA_CRYPTO_AES_KEY_128 <<
+                                       MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+                        break;
+
+                    case 24:
+                        pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_192_KEY_LENGTH;
+                        config |= (MV_CESA_CRYPTO_AES_KEY_192 <<
+                                       MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+                        break;
+
+                    case 32:
+                    default:
+                        pCesaSAD[sid].cryptoKeyLength = MV_CESA_AES_256_KEY_LENGTH;
+                        config |= (MV_CESA_CRYPTO_AES_KEY_256 <<
+                                       MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+                        break;
+                }
+                pCesaSAD[sid].cryptoBlockSize = MV_CESA_AES_BLOCK_SIZE;
+                break;
+
+            default:
+                mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoAlgorithm %d\n",
+                            pSession->cryptoAlgorithm);
+                return MV_BAD_PARAM;
+        }
+        config |= (pSession->cryptoAlgorithm << MV_CESA_CRYPTO_ALG_OFFSET);
+
+        if(pSession->cryptoKeyLength != pCesaSAD[sid].cryptoKeyLength)
+        {
+            mvOsPrintf("cesaSessionOpen: Wrong CryptoKeySize %d != %d\n",
+                        pSession->cryptoKeyLength, pCesaSAD[sid].cryptoKeyLength);
+            return MV_BAD_PARAM;
+        }
+
+        /* Copy Crypto key */
+        if( (pSession->cryptoAlgorithm == MV_CESA_CRYPTO_AES) &&
+            (pSession->direction == MV_CESA_DIR_DECODE))
+        {
+            /* Crypto Key for AES decode is computed from original key material */
+            /* and depend on cryptoKeyLength (128/192/256 bits) */
+            aesMakeKey(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey,
+                        pSession->cryptoKeyLength*8, MV_CESA_AES_BLOCK_SIZE*8);
+        }
+        else
+        {
+                /*panic("mvCesaSessionOpen2");*/
+                memcpy(pCesaSAD[sid].pSramSA->cryptoKey, pSession->cryptoKey,
+                    pCesaSAD[sid].cryptoKeyLength);
+
+        }
+
+        switch(pSession->cryptoMode)
+        {
+            case MV_CESA_CRYPTO_ECB:
+                pCesaSAD[sid].cryptoIvSize = 0;
+                break;
+
+            case MV_CESA_CRYPTO_CBC:
+                pCesaSAD[sid].cryptoIvSize = pCesaSAD[sid].cryptoBlockSize;
+                break;
+
+            case MV_CESA_CRYPTO_CTR:
+                /* Supported only for AES algorithm */
+                if(pSession->cryptoAlgorithm != MV_CESA_CRYPTO_AES)
+                {
+                    mvOsPrintf("mvCesaSessionOpen: CRYPTO CTR mode supported for AES only\n");
+                    return MV_BAD_PARAM;
+                }
+                pCesaSAD[sid].cryptoIvSize = 0;
+                pCesaSAD[sid].ctrMode = 1;
+                /* Replace to ECB mode for HW */
+                pSession->cryptoMode = MV_CESA_CRYPTO_ECB;
+                break;
+
+            default:
+                mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoMode %d\n",
+                            pSession->cryptoMode);
+                return MV_BAD_PARAM;
+        }
+
+        config |= (pSession->cryptoMode << MV_CESA_CRYPTO_MODE_BIT);
+    }
+    pCesaSAD[sid].config = config;
+
+    mvOsCacheFlush(NULL, pCesaSAD[sid].pSramSA, sizeof(MV_CESA_SRAM_SA));
+    if(pSid != NULL)
+        *pSid = sid;
+
+    pCesaSAD[sid].valid = 1;
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionClose - Close active crypto session
+*
+* DESCRIPTION:
+*       This function closes existing session
+*
+* INPUT:
+*       short sid   - Unique identifier of the session to be closed
+*
+* RETURN:
+*       MV_OK        - Session closed successfully.
+*       MV_BAD_PARAM - Session identifier is out of valid range.
+*       MV_NOT_FOUND - There is no active session with such ID.
+*
+*******************************************************************************/
+MV_STATUS mvCesaSessionClose(short sid)
+{
+    cesaStats.closedCount++;
+
+    if(sid >= cesaMaxSA)
+    {
+        mvOsPrintf("CESA Error: sid (%d) is too big\n", sid);
+        return MV_BAD_PARAM;
+    }
+    if(pCesaSAD[sid].valid == 0)
+    {
+        mvOsPrintf("CESA Warning: Session (sid=%d) is invalid\n", sid);
+        return MV_NOT_FOUND;
+    }
+    if(cesaLastSid == sid)
+        cesaLastSid = -1;
+
+    pCesaSAD[sid].valid = 0;
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaAction - Perform crypto operation
+*
+* DESCRIPTION:
+*       This function set new CESA request FIFO queue for further HW processing.
+*       The function checks request parameters before set new request to the queue.
+*       If one of the CESA channels is ready for processing the request will be
+*       passed to HW. When request processing is finished the CESA interrupt will
+*       be generated by HW. The caller should call mvCesaReadyGet() function to
+*       complete request processing and get result.
+*
+* INPUT:
+*       MV_CESA_COMMAND *pCmd   - pointer to new CESA request.
+*                               It includes pointers to Source and Destination
+*                               buffers, session identifier get from
+*                               mvCesaSessionOpen() function, pointer to caller
+*                               private data and all needed crypto parameters.
+*
+* RETURN:
+*       MV_OK             - request successfully added to request queue
+*                         and will be processed.
+*       MV_NO_MORE        - request successfully added to request queue and will
+*                         be processed, but request queue became Full and next
+*                         request will not be accepted.
+*       MV_NO_RESOURCE    - request queue is FULL and the request can not
+*                         be processed.
+*       MV_OUT_OF_CPU_MEM - memory allocation needed for request processing is
+*                         failed. Request can not be processed.
+*       MV_NOT_ALLOWED    - This mixed request (CRYPTO+MAC) can not be processed
+*                         as one request and should be splitted for two requests:
+*                         CRYPTO_ONLY and MAC_ONLY.
+*       MV_BAD_PARAM      - One of the request parameters is out of valid range.
+*                         The request can not be processed.
+*
+*******************************************************************************/
+MV_STATUS   mvCesaAction (MV_CESA_COMMAND *pCmd)
+{
+    MV_STATUS       status;
+    MV_CESA_REQ*    pReq = pCesaReqEmpty;
+    int             sid = pCmd->sessionId;
+    MV_CESA_SA*     pSA = &pCesaSAD[sid];
+#if (MV_CESA_VERSION >= 3)
+     MV_CESA_REQ*   pFromReq;
+     MV_CESA_REQ*   pToReq;
+#endif
+    cesaStats.reqCount++;
+
+    /* Check that the request queue is not FULL */
+    if(cesaReqResources == 0)
+        return MV_NO_RESOURCE;
+
+    if( (sid >= cesaMaxSA) || (!pSA->valid) )
+    {
+        mvOsPrintf("CESA Action Error: Session sid=%d is INVALID\n", sid);
+        return MV_BAD_PARAM;
+    }
+    pSA->count++;
+
+    if(pSA->ctrMode)
+    {
+        /* AES in CTR mode can't be mixed with Authentication */
+        if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+            (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+        {
+            mvOsPrintf("mvCesaAction : CRYPTO CTR mode can't be mixed with AUTH\n");
+            return MV_NOT_ALLOWED;
+        }
+        /* All other request parameters should not be checked because key stream */
+        /* (not user data) processed by AES HW engine */
+        pReq->pOrgCmd = pCmd;
+        /* Allocate temporary pCmd structure for Key stream */
+        pCmd = mvCesaCtrModeInit();
+        if(pCmd == NULL)
+            return MV_OUT_OF_CPU_MEM;
+
+        /* Prepare Key stream */
+        mvCesaCtrModePrepare(pCmd, pReq->pOrgCmd);
+        pReq->fixOffset = 0;
+    }
+    else
+    {
+        /* Check request parameters and calculae fixOffset */
+        status = mvCesaParamCheck(pSA, pCmd, &pReq->fixOffset);
+        if(status != MV_OK)
+        {
+            return status;
+        }
+    }
+    pReq->pCmd = pCmd;
+
+    /* Check if the packet need fragmentation */
+    if(pCmd->pSrc->mbufSize <= sizeof(cesaSramVirtPtr->buf) )
+    {
+        /* request size is smaller than single buffer size */
+        pReq->fragMode = MV_CESA_FRAG_NONE;
+
+        /* Prepare NOT fragmented packets */
+        status = mvCesaReqProcess(pReq);
+        if(status != MV_OK)
+        {
+            mvOsPrintf("CesaReady: ReqProcess error: pReq=%p, status=0x%x\n",
+                        pReq, status);
+        }
+#if (MV_CESA_VERSION >= 3)
+	pReq->frags.numFrag = 1;
+#endif
+    }
+    else
+    {
+        MV_U8     frag = 0;
+
+        /* request size is larger than buffer size - needs fragmentation */
+
+        /* Check restrictions for processing fragmented packets */
+        status = mvCesaFragParamCheck(pSA, pCmd);
+        if(status != MV_OK)
+            return status;
+
+        pReq->fragMode = MV_CESA_FRAG_FIRST;
+        pReq->frags.nextFrag = 0;
+
+        /* Prepare Process Fragmented packets */
+        while(pReq->fragMode != MV_CESA_FRAG_LAST)
+        {
+            if(frag >= MV_CESA_MAX_REQ_FRAGS)
+            {
+                mvOsPrintf("mvCesaAction Error: Too large request frag=%d\n", frag);
+                return MV_OUT_OF_CPU_MEM;
+            }
+            status = mvCesaFragReqProcess(pReq, frag);
+            if(status == MV_OK) {
+#if (MV_CESA_VERSION >= 3)
+		if(frag) {
+			pReq->dma[frag-1].pDmaLast->phyNextDescPtr =
+				MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+			mvOsCacheFlush(NULL, pReq->dma[frag-1].pDmaLast, sizeof(MV_DMA_DESC));
+		}
+#endif
+                frag++;
+        }
+        }
+        pReq->frags.numFrag = frag;
+#if (MV_CESA_VERSION >= 3)
+	if(chainReqNum) {
+		chainReqNum += pReq->frags.numFrag;
+		if(chainReqNum >= MAX_CESA_CHAIN_LENGTH)
+			chainReqNum = MAX_CESA_CHAIN_LENGTH;
+	}
+#endif
+    }
+
+    pReq->state = MV_CESA_PENDING;
+
+    pCesaReqEmpty = MV_CESA_REQ_NEXT_PTR(pReq);
+    cesaReqResources -= 1;
+
+/* #ifdef CESA_DEBUG */
+    if( (cesaQueueDepth - cesaReqResources) > cesaStats.maxReqCount)
+        cesaStats.maxReqCount = (cesaQueueDepth - cesaReqResources);
+/* #endif CESA_DEBUG */
+
+    cesaLastSid = sid;
+
+#if (MV_CESA_VERSION >= 3)
+    /* Are we within chain bounderies and follows the first request ? */
+    if((chainReqNum > 0) && (chainReqNum < MAX_CESA_CHAIN_LENGTH)) {
+	if(chainIndex) {
+		pFromReq = MV_CESA_REQ_PREV_PTR(pReq);
+		pToReq = pReq;
+		pReq->state = MV_CESA_CHAIN;
+		/* assume concatenating is possible */
+		pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast->phyNextDescPtr =
+			MV_32BIT_LE(mvCesaVirtToPhys(&pToReq->dmaDescBuf, pToReq->dma[0].pDmaFirst));
+		mvOsCacheFlush(NULL, pFromReq->dma[pFromReq->frags.numFrag-1].pDmaLast, sizeof(MV_DMA_DESC));
+
+		/* align active & next pointers */
+		if(pNextActiveChain->state != MV_CESA_PENDING)
+			pEndCurrChain = pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pReq);
+	}
+	else { /* we have only one chain, start new one */
+		chainReqNum = 0;
+		chainIndex++;
+		/* align active & next pointers  */
+		if(pNextActiveChain->state != MV_CESA_PENDING)
+			pEndCurrChain = pNextActiveChain = pReq;
+	}
+    }
+    else {
+		/* In case we concatenate full chain */
+		if(chainReqNum == MAX_CESA_CHAIN_LENGTH) {
+			chainIndex++;
+			if(pNextActiveChain->state != MV_CESA_PENDING)
+				pEndCurrChain = pNextActiveChain = pReq;
+			chainReqNum = 0;
+		}
+
+		pReq = pCesaReqProcess;
+		if(pReq->state == MV_CESA_PENDING) {
+			pNextActiveChain = pReq;
+			pEndCurrChain = MV_CESA_REQ_NEXT_PTR(pReq);
+			/* Start Process new request */
+			mvCesaReqProcessStart(pReq);
+		}
+    }
+
+    chainReqNum++;
+
+    if((chainIndex < MAX_CESA_CHAIN_LENGTH) && (chainReqNum > cesaStats.maxChainUsage))
+	cesaStats.maxChainUsage = chainReqNum;
+
+#else
+
+    /* Check status of CESA channels and process requests if possible */
+    pReq = pCesaReqProcess;
+    if(pReq->state == MV_CESA_PENDING)
+    {
+        /* Start Process new request */
+        mvCesaReqProcessStart(pReq);
+    }
+#endif
+    /* If request queue became FULL - return MV_NO_MORE */
+    if(cesaReqResources == 0)
+        return MV_NO_MORE;
+
+    return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCesaReadyGet - Get crypto request that processing is finished
+*
+* DESCRIPTION:
+*       This function complete request processing and return ready request to
+*       caller. To don't miss interrupts the caller must call this function
+*       while MV_OK or MV_TERMINATE values returned.
+*
+* INPUT:
+*   MV_U32          chanMap  - map of CESA channels finished thier job
+*                              accordingly with CESA Cause register.
+*   MV_CESA_RESULT* pResult  - pointer to structure contains information
+*                            about ready request. It includes pointer to
+*                            user private structure "pReqPrv", session identifier
+*                            for this request "sessionId" and return code.
+*                            Return code set to MV_FAIL if calculated digest value
+*                            on decode direction is different than digest value
+*                            in the packet.
+*
+* RETURN:
+*       MV_OK           - Success, ready request is returned.
+*       MV_NOT_READY    - Next request is not ready yet. New interrupt will
+*                       be generated for futher request processing.
+*       MV_EMPTY        - There is no more request for processing.
+*       MV_BUSY         - Fragmented request is not ready yet.
+*       MV_TERMINATE    - Call this function once more to complete processing
+*                       of fragmented request.
+*
+*******************************************************************************/
+MV_STATUS mvCesaReadyGet(MV_CESA_RESULT* pResult)
+{
+    MV_STATUS       status, readyStatus = MV_NOT_READY;
+    MV_U32          statusReg;
+    MV_CESA_REQ*    pReq;
+    MV_CESA_SA*     pSA;
+
+#if (MV_CESA_VERSION >= 3)
+    if(isFirstReq == MV_TRUE) {
+	if(chainIndex == 0)
+		chainReqNum = 0;
+
+	isFirstReq = MV_FALSE;
+
+	if(pNextActiveChain->state == MV_CESA_PENDING) {
+		/* Start request Process */
+		mvCesaReqProcessStart(pNextActiveChain);
+		pEndCurrChain = pNextActiveChain;
+		if(chainIndex > 0)
+			chainIndex--;
+		/* Update pNextActiveChain to next chain head */
+		   while(pNextActiveChain->state == MV_CESA_CHAIN)
+			pNextActiveChain = MV_CESA_REQ_NEXT_PTR(pNextActiveChain);
+	}
+    }
+
+    /* Check if there are more processed requests - can we remove pEndCurrChain ??? */
+    if(pCesaReqProcess == pEndCurrChain) {
+		isFirstReq = MV_TRUE;
+		pEndCurrChain = pNextActiveChain;
+#else
+    if(pCesaReqProcess->state != MV_CESA_PROCESS) {
+#endif
+        return MV_EMPTY;
+    }
+
+#ifdef CESA_DEBUG
+    statusReg = MV_REG_READ(MV_CESA_STATUS_REG);
+    if( statusReg & MV_CESA_STATUS_ACTIVE_MASK )
+    {
+        mvOsPrintf("mvCesaReadyGet: Not Ready, Status = 0x%x\n", statusReg);
+        cesaStats.notReadyCount++;
+        return MV_NOT_READY;
+    }
+#endif /* CESA_DEBUG */
+
+    cesaStats.readyCount++;
+
+    pReq = pCesaReqProcess;
+    pSA = &pCesaSAD[pReq->pCmd->sessionId];
+
+    pResult->retCode = MV_OK;
+    if(pReq->fragMode != MV_CESA_FRAG_NONE)
+    {
+        MV_U8*          pNewDigest;
+      int             frag;
+#if (MV_CESA_VERSION >= 3)
+      pReq->frags.nextFrag = 1;
+      while(pReq->frags.nextFrag <= pReq->frags.numFrag) {
+#endif
+	frag = (pReq->frags.nextFrag - 1);
+
+        /* Restore DMA descriptor list */
+        pReq->dma[frag].pDmaLast->phyNextDescPtr =
+                MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[frag].pDmaLast[1]));
+        pReq->dma[frag].pDmaLast = NULL;
+
+        /* Special processing for finished fragmented request */
+        if(pReq->frags.nextFrag >= pReq->frags.numFrag)
+        {
+            mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+            /* Fragmented packet is ready */
+            if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+            {
+                int  macDataSize = pReq->pCmd->macLength - pReq->frags.macSize;
+
+                if(macDataSize != 0)
+                {
+                    /* Calculate all other blocks by SW */
+                    mvCesaFragAuthComplete(pReq, pSA, macDataSize);
+                }
+
+                /* Copy new digest from SRAM to the Destination buffer */
+                pNewDigest = cesaSramVirtPtr->buf + pReq->frags.newDigestOffset;
+                status = mvCesaCopyToMbuf(pNewDigest, pReq->pCmd->pDst,
+                                   pReq->pCmd->digestOffset, pSA->digestSize);
+
+                /* For decryption: Compare new digest value with original one */
+                if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+                            (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+                {
+                    if( memcmp(pNewDigest, pReq->frags.orgDigest, pSA->digestSize) != 0)
+                    {
+/*
+                        mvOsPrintf("Digest error: chan=%d, newDigest=%p, orgDigest=%p, status = 0x%x\n",
+                            chan, pNewDigest, pReq->frags.orgDigest, MV_REG_READ(MV_CESA_STATUS_REG));
+*/
+                        /* Signiture verification is failed */
+                        pResult->retCode = MV_FAIL;
+                    }
+                }
+            }
+            readyStatus = MV_OK;
+        }
+#if (MV_CESA_VERSION >= 3)
+	pReq->frags.nextFrag++;
+      }
+#endif
+    }
+    else
+    {
+        mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+        /* Restore DMA descriptor list */
+        pReq->dma[0].pDmaLast->phyNextDescPtr =
+                MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[0].pDmaLast[1]));
+        pReq->dma[0].pDmaLast = NULL;
+        if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) ) &&
+            ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+                        (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) )
+        {
+            /* For AUTH on decode : Check Digest result in Status register */
+            statusReg = MV_REG_READ(MV_CESA_STATUS_REG);
+            if(statusReg & MV_CESA_STATUS_DIGEST_ERR_MASK)
+            {
+/*
+                mvOsPrintf("Digest error: chan=%d, status = 0x%x\n",
+                        chan, statusReg);
+*/
+                /* Signiture verification is failed */
+                pResult->retCode = MV_FAIL;
+            }
+        }
+        readyStatus = MV_OK;
+    }
+
+    if(readyStatus == MV_OK)
+    {
+        /* If Request is ready - Prepare pResult structure */
+        pResult->pReqPrv = pReq->pCmd->pReqPrv;
+        pResult->sessionId = pReq->pCmd->sessionId;
+
+        pReq->state = MV_CESA_IDLE;
+        pCesaReqProcess = MV_CESA_REQ_NEXT_PTR(pReq);
+        cesaReqResources++;
+
+        if(pSA->ctrMode)
+        {
+            /* For AES CTR mode - complete processing and free allocated resources */
+            mvCesaCtrModeComplete(pReq->pOrgCmd, pReq->pCmd);
+            mvCesaCtrModeFinish(pReq->pCmd);
+            pReq->pOrgCmd = NULL;
+        }
+    }
+
+#if (MV_CESA_VERSION < 3)
+    if(pCesaReqProcess->state == MV_CESA_PROCESS)
+    {
+        /* Start request Process */
+        mvCesaReqProcessStart(pCesaReqProcess);
+        if(readyStatus == MV_NOT_READY)
+            readyStatus = MV_BUSY;
+    }
+    else if(pCesaReqProcess != pCesaReqEmpty)
+    {
+        /* Start process new request from the queue */
+        mvCesaReqProcessStart(pCesaReqProcess);
+    }
+#endif
+    return readyStatus;
+}
+
+/***************** Functions to work with CESA_MBUF structure ******************/
+
+/*******************************************************************************
+* mvCesaMbufOffset - Locate offset in the Mbuf structure
+*
+* DESCRIPTION:
+*       This function locates offset inside Multi-Bufeer structure.
+*       It get fragment number and place in the fragment where the offset
+*       is located.
+*
+*
+* INPUT:
+*   MV_CESA_MBUF* pMbuf  - Pointer to multi-buffer structure
+*   int           offset - Offset from the beginning of the data presented by
+*                        the Mbuf structure.
+*
+* OUTPUT:
+*   int*        pBufOffset  - Offset from the beginning of the fragment where
+*                           the offset is located.
+*
+* RETURN:
+*       int - Number of fragment, where the offset is located\
+*
+*******************************************************************************/
+int     mvCesaMbufOffset(MV_CESA_MBUF* pMbuf, int offset, int* pBufOffset)
+{
+    int frag = 0;
+
+    while(offset > 0)
+    {
+        if(frag >= pMbuf->numFrags)
+        {
+            mvOsPrintf("mvCesaMbufOffset: Error: frag (%d) > numFrags (%d)\n",
+                    frag, pMbuf->numFrags);
+            return MV_INVALID;
+        }
+        if(offset < pMbuf->pFrags[frag].bufSize)
+        {
+            break;
+        }
+        offset -= pMbuf->pFrags[frag].bufSize;
+        frag++;
+    }
+    if(pBufOffset != NULL)
+        *pBufOffset = offset;
+
+    return frag;
+}
+
+/*******************************************************************************
+* mvCesaCopyFromMbuf - Copy data from the Mbuf structure to continuous buffer
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*   MV_U8*          pDstBuf  - Pointer to continuous buffer, where data is
+*                              copied to.
+*   MV_CESA_MBUF*   pSrcMbuf - Pointer to multi-buffer structure where data is
+*                              copied from.
+*   int             offset   - Offset in the Mbuf structure where located first
+*                            byte of data should be copied.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+*                         No data is copied.
+*       MV_EMPTY        - Multi-buffer structure has not enough data to copy
+*                       Data from the offset to end of Mbuf data is copied.
+*
+*******************************************************************************/
+MV_STATUS   mvCesaCopyFromMbuf(MV_U8* pDstBuf, MV_CESA_MBUF* pSrcMbuf,
+                               int offset, int size)
+{
+    int     frag, fragOffset, bufSize;
+    MV_U8*  pBuf;
+
+    if(size == 0)
+        return MV_OK;
+
+    frag = mvCesaMbufOffset(pSrcMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return MV_OUT_OF_RANGE;
+    }
+
+    bufSize = pSrcMbuf->pFrags[frag].bufSize - fragOffset;
+    pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+    while(MV_TRUE)
+    {
+        if(size <= bufSize)
+        {
+            memcpy(pDstBuf, pBuf, size);
+            return MV_OK;
+        }
+        memcpy(pDstBuf, pBuf, bufSize);
+        size -= bufSize;
+        frag++;
+        pDstBuf += bufSize;
+        if(frag >= pSrcMbuf->numFrags)
+            break;
+
+        bufSize = pSrcMbuf->pFrags[frag].bufSize;
+        pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr;
+    }
+    mvOsPrintf("mvCesaCopyFromMbuf: Mbuf is EMPTY - %d bytes isn't copied\n",
+                size);
+    return MV_EMPTY;
+}
+
+/*******************************************************************************
+* mvCesaCopyToMbuf - Copy data from continuous buffer to the Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*   MV_U8*          pSrcBuf  - Pointer to continuous buffer, where data is
+*                              copied from.
+*   MV_CESA_MBUF*   pDstMbuf - Pointer to multi-buffer structure where data is
+*                              copied to.
+*   int             offset   - Offset in the Mbuf structure where located first
+*                            byte of data should be copied.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+*                         No data is copied.
+*       MV_FULL         - Multi-buffer structure has not enough place to copy
+*                       all data. Data from the offset to end of Mbuf data
+*                       is copied.
+*
+*******************************************************************************/
+MV_STATUS   mvCesaCopyToMbuf(MV_U8* pSrcBuf, MV_CESA_MBUF* pDstMbuf,
+                               int offset, int size)
+{
+    int     frag, fragOffset, bufSize;
+    MV_U8*  pBuf;
+
+    if(size == 0)
+        return MV_OK;
+
+    frag = mvCesaMbufOffset(pDstMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return MV_OUT_OF_RANGE;
+    }
+
+    bufSize = pDstMbuf->pFrags[frag].bufSize - fragOffset;
+    pBuf = pDstMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+    while(MV_TRUE)
+    {
+        if(size <= bufSize)
+        {
+            memcpy(pBuf, pSrcBuf, size);
+            return MV_OK;
+        }
+        memcpy(pBuf, pSrcBuf, bufSize);
+        size -= bufSize;
+        frag++;
+        pSrcBuf += bufSize;
+        if(frag >= pDstMbuf->numFrags)
+            break;
+
+        bufSize = pDstMbuf->pFrags[frag].bufSize;
+        pBuf = pDstMbuf->pFrags[frag].bufVirtPtr;
+    }
+    mvOsPrintf("mvCesaCopyToMbuf: Mbuf is FULL - %d bytes isn't copied\n",
+                size);
+    return MV_FULL;
+}
+
+/*******************************************************************************
+* mvCesaMbufCopy - Copy data from one Mbuf structure to the other Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*
+*   MV_CESA_MBUF*   pDstMbuf - Pointer to multi-buffer structure where data is
+*                              copied to.
+*   int      dstMbufOffset   - Offset in the dstMbuf structure where first byte
+*                            of data should be copied to.
+*   MV_CESA_MBUF*   pSrcMbuf - Pointer to multi-buffer structure where data is
+*                              copied from.
+*   int      srcMbufOffset   - Offset in the srcMbuf structure where first byte
+*                            of data should be copied from.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, srcMbufOffset or dstMbufOffset is out of
+*                       srcMbuf or dstMbuf structure correspondently.
+*                       No data is copied.
+*       MV_BAD_SIZE     - srcMbuf or dstMbuf structure is too small to copy
+*                       all data. Partial data is copied
+*
+*******************************************************************************/
+MV_STATUS   mvCesaMbufCopy(MV_CESA_MBUF* pMbufDst, int dstMbufOffset,
+                           MV_CESA_MBUF* pMbufSrc, int srcMbufOffset, int size)
+{
+    int     srcFrag, dstFrag, srcSize, dstSize, srcOffset, dstOffset;
+    int     copySize;
+    MV_U8   *pSrc, *pDst;
+
+    if(size == 0)
+        return MV_OK;
+
+    srcFrag = mvCesaMbufOffset(pMbufSrc, srcMbufOffset, &srcOffset);
+    if(srcFrag == MV_INVALID)
+    {
+        mvOsPrintf("CESA srcMbuf Error: offset (%d) out of range\n", srcMbufOffset);
+        return MV_OUT_OF_RANGE;
+    }
+    pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr + srcOffset;
+    srcSize = pMbufSrc->pFrags[srcFrag].bufSize - srcOffset;
+
+    dstFrag = mvCesaMbufOffset(pMbufDst, dstMbufOffset, &dstOffset);
+    if(dstFrag == MV_INVALID)
+    {
+        mvOsPrintf("CESA dstMbuf Error: offset (%d) out of range\n", dstMbufOffset);
+        return MV_OUT_OF_RANGE;
+    }
+    pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr + dstOffset;
+    dstSize = pMbufDst->pFrags[dstFrag].bufSize - dstOffset;
+
+    while(size > 0)
+    {
+        copySize = MV_MIN(srcSize, dstSize);
+        if(size <= copySize)
+        {
+            memcpy(pDst, pSrc, size);
+            return MV_OK;
+        }
+        memcpy(pDst, pSrc, copySize);
+        size -= copySize;
+        srcSize -= copySize;
+        dstSize -= copySize;
+
+        if(srcSize == 0)
+        {
+            srcFrag++;
+            if(srcFrag >= pMbufSrc->numFrags)
+                break;
+
+            pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr;
+            srcSize = pMbufSrc->pFrags[srcFrag].bufSize;
+        }
+
+        if(dstSize == 0)
+        {
+            dstFrag++;
+            if(dstFrag >= pMbufDst->numFrags)
+                break;
+
+            pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr;
+            dstSize = pMbufDst->pFrags[dstFrag].bufSize;
+        }
+    }
+    mvOsPrintf("mvCesaMbufCopy: BAD size - %d bytes isn't copied\n",
+                size);
+
+    return MV_BAD_SIZE;
+}
+
+static MV_STATUS   mvCesaMbufCacheUnmap(MV_CESA_MBUF* pMbuf, int offset, int size)
+{
+    int     frag, fragOffset, bufSize;
+    MV_U8*  pBuf;
+
+    if(size == 0)
+        return MV_OK;
+
+    frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return MV_OUT_OF_RANGE;
+    }
+
+    bufSize = pMbuf->pFrags[frag].bufSize - fragOffset;
+    pBuf = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+    while(MV_TRUE)
+    {
+        if(size <= bufSize)
+        {
+            mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), size);
+            return MV_OK;
+        }
+
+        mvOsCacheUnmap(NULL, mvOsIoVirtToPhy(NULL, pBuf), bufSize);
+        size -= bufSize;
+        frag++;
+        if(frag >= pMbuf->numFrags)
+            break;
+
+        bufSize = pMbuf->pFrags[frag].bufSize;
+        pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+    }
+    mvOsPrintf("%s: Mbuf is FULL - %d bytes isn't Unmapped\n",
+                __FUNCTION__, size);
+    return MV_FULL;
+}
+
+
+/*************************************** Local Functions ******************************/
+
+/*******************************************************************************
+* mvCesaFragReqProcess - Process fragmented request
+*
+* DESCRIPTION:
+*       This function processes a fragment of fragmented request (First, Middle or Last)
+*
+*
+* INPUT:
+*       MV_CESA_REQ* pReq   - Pointer to the request in the request queue.
+*
+* RETURN:
+*       MV_OK        - The fragment is successfully passed to HW for processing.
+*       MV_TERMINATE - Means, that HW finished its work on this packet and no more
+*                    interrupts will be generated for this request.
+*                    Function mvCesaReadyGet() must be called to complete request
+*                    processing and get request result.
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaFragReqProcess(MV_CESA_REQ* pReq, MV_U8 frag)
+{
+    int                     i, copySize, cryptoDataSize, macDataSize, sid;
+    int                     cryptoIvOffset, digestOffset;
+    MV_U32                  config;
+    MV_CESA_COMMAND*        pCmd = pReq->pCmd;
+    MV_CESA_SA*             pSA;
+    MV_CESA_MBUF*           pMbuf;
+    MV_DMA_DESC*            pDmaDesc = pReq->dma[frag].pDmaFirst;
+    MV_U8*                  pSramBuf = cesaSramVirtPtr->buf;
+    int                     macTotalLen = 0;
+    int                     fixOffset, cryptoOffset, macOffset;
+
+    cesaStats.fragCount++;
+
+    sid = pReq->pCmd->sessionId;
+
+    pSA = &pCesaSAD[sid];
+
+    cryptoIvOffset = digestOffset = 0;
+    i = macDataSize = 0;
+    cryptoDataSize = 0;
+
+    /* First fragment processing */
+    if(pReq->fragMode == MV_CESA_FRAG_FIRST)
+    {
+        /* pReq->frags monitors processing of fragmented request between fragments */
+        pReq->frags.bufOffset = 0;
+        pReq->frags.cryptoSize = 0;
+        pReq->frags.macSize = 0;
+
+        config = pSA->config | (MV_CESA_FRAG_FIRST << MV_CESA_FRAG_MODE_OFFSET);
+
+        /* fixOffset can be not equal to zero only for FIRST fragment */
+        fixOffset = pReq->fixOffset;
+        /* For FIRST fragment crypto and mac offsets are taken from pCmd */
+        cryptoOffset = pCmd->cryptoOffset;
+        macOffset = pCmd->macOffset;
+
+        copySize = sizeof(cesaSramVirtPtr->buf) - pReq->fixOffset;
+
+        /* Find fragment size: Must meet all requirements for CRYPTO and MAC
+         * cryptoDataSize   - size of data will be encrypted/decrypted in this fragment
+         * macDataSize      - size of data will be signed/verified in this fragment
+         * copySize         - size of data will be copied from srcMbuf to SRAM and
+         *                  back to dstMbuf for this fragment
+         */
+        mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset,
+                        &copySize, &cryptoDataSize, &macDataSize);
+
+        if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET))
+        {
+            /* CryptoIV special processing */
+            if( (pSA->config & MV_CESA_CRYPTO_MODE_MASK) ==
+                (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT) )
+            {
+                /* In CBC mode for encode direction when IV from user */
+                if( (pCmd->ivFromUser) &&
+                    ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+                        (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) )
+                {
+
+                    /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+                    * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+                    * in the buffer to SRAM IVPointer
+                    */
+                    i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+                                    MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+                }
+
+                /* Special processing when IV is not located in the first fragment */
+                if(pCmd->ivOffset > (copySize - pSA->cryptoIvSize))
+                {
+                    /* Prepare dummy place for cryptoIV in SRAM */
+                    cryptoIvOffset = cesaSramVirtPtr->tempCryptoIV - mvCesaSramAddrGet();
+
+                    /* For Decryption: Copy IV value from pCmd->ivOffset to Special SRAM place */
+                    if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+                            (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+                    {
+                        i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->tempCryptoIV, &pDmaDesc[i],
+                                    MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+                    }
+                    else
+                    {
+                        /* For Encryption when IV is NOT from User: */
+                        /* Copy IV from SRAM to buffer (pCmd->ivOffset) */
+                        if(pCmd->ivFromUser == 0)
+                        {
+                            /* copy IV value from cryptoIV to Buffer (pCmd->ivOffset) */
+                            i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+                                    MV_TRUE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+                        }
+                    }
+                }
+                else
+                {
+                    cryptoIvOffset = pCmd->ivOffset;
+                }
+            }
+        }
+
+        if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+        {
+            /* MAC digest special processing on Decode direction */
+            if((pSA->config & MV_CESA_DIRECTION_MASK) ==
+                        (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))
+            {
+                /* Save digest from pCmd->digestOffset */
+                mvCesaCopyFromMbuf(pReq->frags.orgDigest,
+                               pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+                /* If pCmd->digestOffset is not located on the first */
+                if(pCmd->digestOffset > (copySize - pSA->digestSize))
+                {
+                    MV_U8  digestZero[MV_CESA_MAX_DIGEST_SIZE];
+
+                    /* Set zeros to pCmd->digestOffset (DRAM) */
+                    memset(digestZero, 0, MV_CESA_MAX_DIGEST_SIZE);
+                    mvCesaCopyToMbuf(digestZero, pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+                    /* Prepare dummy place for digest in SRAM */
+                    digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+                }
+                else
+                {
+                    digestOffset = pCmd->digestOffset;
+                }
+            }
+        }
+        /* Update SA in SRAM */
+        if(cesaLastSid != sid)
+        {
+            mvCesaSramSaUpdate(sid, &pDmaDesc[i]);
+            i++;
+        }
+
+        pReq->fragMode = MV_CESA_FRAG_MIDDLE;
+    }
+    else
+    {
+        /* Continue fragment */
+        fixOffset = 0;
+        cryptoOffset = 0;
+        macOffset = 0;
+        if( (pCmd->pSrc->mbufSize - pReq->frags.bufOffset) <= sizeof(cesaSramVirtPtr->buf))
+        {
+            /* Last fragment */
+            config = pSA->config | (MV_CESA_FRAG_LAST << MV_CESA_FRAG_MODE_OFFSET);
+            pReq->fragMode = MV_CESA_FRAG_LAST;
+            copySize = pCmd->pSrc->mbufSize - pReq->frags.bufOffset;
+
+            if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+            {
+                macDataSize = pCmd->macLength - pReq->frags.macSize;
+
+                /* If pCmd->digestOffset is not located on last fragment */
+                if(pCmd->digestOffset < pReq->frags.bufOffset)
+                {
+                    /* Prepare dummy place for digest in SRAM */
+                    digestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+                }
+                else
+                {
+                    digestOffset = pCmd->digestOffset - pReq->frags.bufOffset;
+                }
+                pReq->frags.newDigestOffset = digestOffset;
+                macTotalLen = pCmd->macLength;
+
+                /* HW can't calculate the Digest correctly for fragmented packets
+                 * in the following cases:
+                 *  - MV88F5182                                           ||
+                 *  - MV88F5181L when total macLength more that 16 Kbytes ||
+                 *  - total macLength more that 64 Kbytes
+                 */
+                if( (mvCtrlModelGet() == MV_5182_DEV_ID) ||
+                    ( (mvCtrlModelGet() == MV_5181_DEV_ID) &&
+                      (mvCtrlRevGet() >= MV_5181L_A0_REV)  &&
+                      (pCmd->macLength >= (1 << 14)) ) )
+                {
+                    return MV_TERMINATE;
+                }
+            }
+            if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+            {
+                cryptoDataSize = pCmd->cryptoLength - pReq->frags.cryptoSize;
+            }
+
+            /* cryptoIvOffset - don't care */
+        }
+        else
+        {
+            /* WA for MV88F5182 SHA1 and MD5 fragmentation mode */
+            if( (mvCtrlModelGet() == MV_5182_DEV_ID) &&
+                (((pSA->config & MV_CESA_MAC_MODE_MASK) ==
+                    (MV_CESA_MAC_MD5 << MV_CESA_MAC_MODE_OFFSET)) ||
+                ((pSA->config & MV_CESA_MAC_MODE_MASK) ==
+                    (MV_CESA_MAC_SHA1 << MV_CESA_MAC_MODE_OFFSET))) )
+            {
+                pReq->frags.newDigestOffset = cesaSramVirtPtr->tempDigest - mvCesaSramAddrGet();
+                pReq->fragMode = MV_CESA_FRAG_LAST;
+
+                return MV_TERMINATE;
+            }
+            /* Middle fragment */
+            config = pSA->config | (MV_CESA_FRAG_MIDDLE << MV_CESA_FRAG_MODE_OFFSET);
+            copySize = sizeof(cesaSramVirtPtr->buf);
+            /* digestOffset and cryptoIvOffset - don't care */
+
+            /* Find fragment size */
+            mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset,
+                            &copySize, &cryptoDataSize, &macDataSize);
+        }
+    }
+    /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+    pMbuf = pCmd->pSrc;
+    i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+                                MV_FALSE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+    /* Prepare CESA descriptor to copy from DRAM to SRAM by DMA */
+    mvCesaSramDescrBuild(config, frag,
+                cryptoOffset + fixOffset, cryptoIvOffset + fixOffset,
+                cryptoDataSize, macOffset + fixOffset,
+                digestOffset + fixOffset, macDataSize, macTotalLen,
+                pReq, &pDmaDesc[i]);
+    i++;
+
+   /* Add special descriptor Ownership for CPU */
+    pDmaDesc[i].byteCnt = 0;
+    pDmaDesc[i].phySrcAdd = 0;
+    pDmaDesc[i].phyDestAdd = 0;
+    i++;
+
+    /********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+    pMbuf = pCmd->pDst;
+    i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+                                MV_TRUE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+    /* Next field of Last DMA descriptor must be NULL */
+    pDmaDesc[i-1].phyNextDescPtr = 0;
+    pReq->dma[frag].pDmaLast = &pDmaDesc[i-1];
+    mvOsCacheFlush(NULL, pReq->dma[frag].pDmaFirst,
+                    i*sizeof(MV_DMA_DESC));
+
+    /*mvCesaDebugDescriptor(&cesaSramVirtPtr->desc[frag]);*/
+
+    pReq->frags.bufOffset += copySize;
+    pReq->frags.cryptoSize += cryptoDataSize;
+    pReq->frags.macSize += macDataSize;
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCesaReqProcess - Process regular (Non-fragmented) request
+*
+* DESCRIPTION:
+*       This function processes the whole (not fragmented) request
+*
+* INPUT:
+*       MV_CESA_REQ* pReq   - Pointer to the request in the request queue.
+*
+* RETURN:
+*       MV_OK   - The request is successfully passed to HW for processing.
+*       Other   - Failure. The request will not be processed
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaReqProcess(MV_CESA_REQ* pReq)
+{
+    MV_CESA_MBUF    *pMbuf;
+    MV_DMA_DESC     *pDmaDesc;
+    MV_U8           *pSramBuf;
+    int             sid, i, fixOffset;
+    MV_CESA_SA      *pSA;
+    MV_CESA_COMMAND *pCmd = pReq->pCmd;
+
+    cesaStats.procCount++;
+
+    sid = pCmd->sessionId;
+    pSA = &pCesaSAD[sid];
+    pDmaDesc = pReq->dma[0].pDmaFirst;
+    pSramBuf = cesaSramVirtPtr->buf;
+    fixOffset = pReq->fixOffset;
+
+/*
+    mvOsPrintf("mvCesaReqProcess: sid=%d, pSA=%p, pDmaDesc=%p, pSramBuf=%p\n",
+                sid, pSA, pDmaDesc, pSramBuf);
+*/
+    i = 0;
+
+    /* Crypto IV Special processing in CBC mode for Encryption direction */
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) &&
+        ((pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT)) &&
+        ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) &&
+        (pCmd->ivFromUser) )
+    {
+        /* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+         * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+         * in the buffer to SRAM IVPointer
+         */
+        i += mvCesaDmaCopyPrepare(pCmd->pSrc, cesaSramVirtPtr->cryptoIV, &pDmaDesc[i],
+                                    MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+    }
+
+    /* Update SA in SRAM */
+    if(cesaLastSid != sid)
+    {
+        mvCesaSramSaUpdate(sid, &pDmaDesc[i]);
+        i++;
+    }
+
+    /********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+    pMbuf = pCmd->pSrc;
+    i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+                                MV_FALSE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+    /* Prepare Security Accelerator descriptor to SRAM words 0 - 7 */
+    mvCesaSramDescrBuild(pSA->config, 0, pCmd->cryptoOffset + fixOffset,
+                        pCmd->ivOffset + fixOffset, pCmd->cryptoLength,
+                        pCmd->macOffset + fixOffset, pCmd->digestOffset + fixOffset,
+                        pCmd->macLength, pCmd->macLength, pReq, &pDmaDesc[i]);
+    i++;
+
+   /* Add special descriptor Ownership for CPU */
+    pDmaDesc[i].byteCnt = 0;
+    pDmaDesc[i].phySrcAdd = 0;
+    pDmaDesc[i].phyDestAdd = 0;
+    i++;
+
+    /********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+    pMbuf = pCmd->pDst;
+    i += mvCesaDmaCopyPrepare(pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+                                MV_TRUE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+    /* Next field of Last DMA descriptor must be NULL */
+    pDmaDesc[i-1].phyNextDescPtr = 0;
+    pReq->dma[0].pDmaLast = &pDmaDesc[i-1];
+    mvOsCacheFlush(NULL, pReq->dma[0].pDmaFirst, i*sizeof(MV_DMA_DESC));
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCesaSramDescrBuild - Set CESA descriptor in SRAM
+*
+* DESCRIPTION:
+*       This function builds CESA descriptor in SRAM from all Command parameters
+*
+*
+* INPUT:
+*       int     chan            - CESA channel uses the descriptor
+*       MV_U32  config          - 32 bits of WORD_0 in CESA descriptor structure
+*       int     cryptoOffset    - Offset from the beginning of SRAM buffer where
+*                               data for encryption/decription is started.
+*       int     ivOffset        - Offset of crypto IV from the SRAM base. Valid only
+*                               for first fragment.
+*       int     cryptoLength    - Size (in bytes) of data for encryption/descryption
+*                               operation on this fragment.
+*       int     macOffset       - Offset from the beginning of SRAM buffer where
+*                               data for Authentication is started
+*       int     digestOffset    - Offset from the beginning of SRAM buffer where
+*                               digest is located. Valid for first and last fragments.
+*       int     macLength       - Size (in bytes) of data for Authentication
+*                               operation on this fragment.
+*       int     macTotalLen     - Toatl size (in bytes) of data for Authentication
+*                               operation on the whole request (packet). Valid for
+*                               last fragment only.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void    mvCesaSramDescrBuild(MV_U32 config, int frag,
+                             int cryptoOffset, int ivOffset, int cryptoLength,
+                             int macOffset, int digestOffset, int macLength,
+                             int macTotalLen, MV_CESA_REQ* pReq, MV_DMA_DESC* pDmaDesc)
+{
+    MV_CESA_DESC*   pCesaDesc = &pReq->pCesaDesc[frag];
+    MV_CESA_DESC*   pSramDesc = pSramDesc = &cesaSramVirtPtr->desc;
+    MV_U16          sramBufOffset = (MV_U16)((MV_U8*)cesaSramVirtPtr->buf - mvCesaSramAddrGet());
+
+    pCesaDesc->config = MV_32BIT_LE(config);
+
+    if( (config & MV_CESA_OPERATION_MASK) !=
+         (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+    {
+        /* word 1 */
+        pCesaDesc->cryptoSrcOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+        pCesaDesc->cryptoDstOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+        /* word 2 */
+        pCesaDesc->cryptoDataLen = MV_16BIT_LE(cryptoLength);
+        /* word 3 */
+        pCesaDesc->cryptoKeyOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.cryptoKey -
+                                                            mvCesaSramAddrGet()));
+        /* word 4 */
+        pCesaDesc->cryptoIvOffset  = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->cryptoIV -
+                                                            mvCesaSramAddrGet()));
+        pCesaDesc->cryptoIvBufOffset = MV_16BIT_LE(sramBufOffset + ivOffset);
+    }
+
+    if( (config & MV_CESA_OPERATION_MASK) !=
+         (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+    {
+        /* word 5 */
+        pCesaDesc->macSrcOffset = MV_16BIT_LE(sramBufOffset + macOffset);
+        pCesaDesc->macTotalLen = MV_16BIT_LE(macTotalLen);
+
+        /* word 6 */
+        pCesaDesc->macDigestOffset = MV_16BIT_LE(sramBufOffset + digestOffset);
+        pCesaDesc->macDataLen = MV_16BIT_LE(macLength);
+
+        /* word 7 */
+        pCesaDesc->macInnerIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macInnerIV -
+                                 mvCesaSramAddrGet()));
+        pCesaDesc->macOuterIvOffset = MV_16BIT_LE((MV_U16)(cesaSramVirtPtr->sramSA.macOuterIV -
+                                 mvCesaSramAddrGet()));
+    }
+    /* Prepare DMA descriptor to CESA descriptor from DRAM to SRAM */
+    pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->cesaDescBuf, pCesaDesc));
+    pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)pSramDesc));
+    pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_DESC) | BIT31);
+
+    /* flush Source buffer */
+    mvOsCacheFlush(NULL, pCesaDesc, sizeof(MV_CESA_DESC));
+}
+
+/*******************************************************************************
+* mvCesaSramSaUpdate - Move required SA information to SRAM if needed.
+*
+* DESCRIPTION:
+*   Copy to SRAM values of the required SA.
+*
+*
+* INPUT:
+*       short       sid          - Session ID needs SRAM Cache update
+*       MV_DMA_DESC *pDmaDesc   - Pointer to DMA descriptor used to
+*                                copy SA values from DRAM to SRAM.
+*
+* RETURN:
+*       MV_OK           - Cache entry for this SA copied to SRAM.
+*       MV_NO_CHANGE    - Cache entry for this SA already exist in SRAM
+*
+*******************************************************************************/
+static INLINE void   mvCesaSramSaUpdate(short sid, MV_DMA_DESC *pDmaDesc)
+{
+    MV_CESA_SA      *pSA = &pCesaSAD[sid];
+
+    /* Prepare DMA descriptor to Copy CACHE_SA from SA database in DRAM to SRAM */
+     pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_SRAM_SA) | BIT31);
+    pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&cesaSramSaBuf, pSA->pSramSA));
+     pDmaDesc->phyDestAdd =
+          MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (MV_U8*)&cesaSramVirtPtr->sramSA));
+
+    /* Source buffer is already flushed during OpenSession*/
+    /*mvOsCacheFlush(NULL, &pSA->sramSA, sizeof(MV_CESA_SRAM_SA));*/
+}
+
+/*******************************************************************************
+* mvCesaDmaCopyPrepare - prepare DMA descriptor list to copy data presented by
+*                       Mbuf structure from DRAM to SRAM
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf       - pointer to Mbuf structure contains request
+*                                   data in DRAM
+*       MV_U8*          pSramBuf    - pointer to buffer in SRAM where data should
+*                                   be copied to.
+*       MV_DMA_DESC*    pDmaDesc   - pointer to first DMA descriptor for this copy.
+*                                   The function set number of DMA descriptors needed
+*                                   to copy the copySize bytes from Mbuf.
+*       MV_BOOL         isToMbuf    - Copy direction.
+*                                   MV_TRUE means copy from SRAM buffer to Mbuf in DRAM.
+*                                   MV_FALSE means copy from Mbuf in DRAM to SRAM buffer.
+*       int             offset      - Offset in the Mbuf structure that copy should be
+*                                   started from.
+*       int             copySize    - Size of data should be copied.
+*
+* RETURN:
+*       int  - number of DMA descriptors used for the copy.
+*
+*******************************************************************************/
+#ifndef MV_NETBSD
+static INLINE int    mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+                        MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+                        int offset, int copySize, MV_BOOL skipFlush)
+{
+    int     bufOffset, bufSize, size, frag, i;
+    MV_U8*  pBuf;
+
+    i = 0;
+
+    /* Calculate start place for copy: fragment number and offset in the fragment */
+    frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+    bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+    pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+
+    /* Size accumulate total copy size */
+    size = 0;
+
+    /* Create DMA lists to copy mBuf from pSrc to SRAM */
+    while(size < copySize)
+    {
+        /* Find copy size for each DMA descriptor */
+        bufSize = MV_MIN(bufSize, (copySize - size));
+        pDmaDesc[i].byteCnt = MV_32BIT_LE(bufSize | BIT31);
+        if(isToMbuf)
+        {
+            pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+            pDmaDesc[i].phySrcAdd  =
+                MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size)));
+            /* invalidate the buffer */
+	    if(skipFlush == MV_FALSE)
+		mvOsCacheInvalidate(NULL, pBuf, bufSize);
+        }
+        else
+        {
+            pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+            pDmaDesc[i].phyDestAdd =
+                MV_32BIT_LE(mvCesaSramVirtToPhys(NULL, (pSramBuf + size)));
+            /* flush the buffer */
+	    if(skipFlush == MV_FALSE)
+		mvOsCacheFlush(NULL, pBuf, bufSize);
+        }
+
+        /* Count number of used DMA descriptors */
+        i++;
+        size += bufSize;
+
+        /* go to next fragment in the Mbuf */
+        frag++;
+        pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+        bufSize = pMbuf->pFrags[frag].bufSize;
+    }
+    return i;
+}
+#else /* MV_NETBSD */
+static int    mvCesaDmaCopyPrepare(MV_CESA_MBUF* pMbuf, MV_U8* pSramBuf,
+                        MV_DMA_DESC* pDmaDesc, MV_BOOL isToMbuf,
+                        int offset, int copySize, MV_BOOL skipFlush)
+{
+	int bufOffset, bufSize, thisSize, size, frag, i;
+	MV_ULONG bufPhys, sramPhys;
+    MV_U8*  pBuf;
+
+	/*
+	 * Calculate start place for copy: fragment number and offset in
+	 * the fragment
+	 */
+    frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+
+	/*
+	 * Get SRAM physical address only once. We can update it in-place
+	 * as we build the descriptor chain.
+	 */
+	sramPhys = mvCesaSramVirtToPhys(NULL, pSramBuf);
+
+	/*
+	 * 'size' accumulates total copy size, 'i' counts desccriptors.
+	 */
+	size = i = 0;
+
+	/* Create DMA lists to copy mBuf from pSrc to SRAM */
+	while (size < copySize) {
+		/*
+		 * Calculate # of bytes to copy from the current fragment,
+		 * and the pointer to the start of data
+		 */
+    bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+    pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+		bufOffset = 0;	/* First frag may be non-zero */
+		frag++;
+
+		/*
+		 * As long as there is data in the current fragment...
+		 */
+		while (bufSize > 0) {
+			/*
+			 * Ensure we don't cross an MMU page boundary.
+			 * XXX: This is NetBSD-specific, but it is a
+			 * quick and dirty way to fix the problem.
+			 * A true HAL would rely on the OS-specific
+			 * driver to do this...
+			 */
+			thisSize = PAGE_SIZE -
+			    (((MV_ULONG)pBuf) & (PAGE_SIZE - 1));
+			thisSize = MV_MIN(bufSize, thisSize);
+			/*
+			 * Make sure we don't copy more than requested
+			 */
+			if (thisSize > (copySize - size)) {
+				thisSize = copySize - size;
+				bufSize = 0;
+			}
+
+			/*
+			 * Physicall address of this fragment
+			 */
+			bufPhys = MV_32BIT_LE(mvOsIoVirtToPhy(NULL, pBuf));
+
+			/*
+			 * Set up the descriptor
+			 */
+		        pDmaDesc[i].byteCnt = MV_32BIT_LE(thisSize | BIT31);
+		        if(isToMbuf) {
+				pDmaDesc[i].phyDestAdd = bufPhys;
+				pDmaDesc[i].phySrcAdd  = MV_32BIT_LE(sramPhys);
+            /* invalidate the buffer */
+				if(skipFlush == MV_FALSE)
+					mvOsCacheInvalidate(NULL, pBuf, thisSize);
+			} else {
+				pDmaDesc[i].phySrcAdd = bufPhys;
+				pDmaDesc[i].phyDestAdd = MV_32BIT_LE(sramPhys);
+            /* flush the buffer */
+				if(skipFlush == MV_FALSE)
+					mvOsCacheFlush(NULL, pBuf, thisSize);
+			}
+
+			pDmaDesc[i].phyNextDescPtr =
+			    MV_32BIT_LE(mvOsIoVirtToPhy(NULL,(&pDmaDesc[i+1])));
+
+        /* flush the DMA desc */
+        mvOsCacheFlush(NULL, &pDmaDesc[i], sizeof(MV_DMA_DESC));
+
+			/* Update state */
+			bufSize -= thisSize;
+			sramPhys += thisSize;
+			pBuf += thisSize;
+			size += thisSize;
+        i++;
+		}
+	}
+
+    return i;
+}
+#endif /* MV_NETBSD */
+/*******************************************************************************
+* mvCesaHmacIvGet - Calculate Inner and Outter values from HMAC key
+*
+* DESCRIPTION:
+*       This function calculate Inner and Outer values used for HMAC algorithm.
+*       This operation allows improve performance fro the whole HMAC processing.
+*
+* INPUT:
+*       MV_CESA_MAC_MODE    macMode     - Authentication mode: HMAC_MD5 or HMAC_SHA1.
+*       unsigned char       key[]       - Pointer to HMAC key.
+*       int                 keyLength   - Size of HMAC key (maximum 64 bytes)
+*
+* OUTPUT:
+*       unsigned char       innerIV[]   - HASH(key^inner)
+*       unsigned char       outerIV[]   - HASH(key^outter)
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void    mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+                     unsigned char innerIV[], unsigned char outerIV[])
+{
+    unsigned char   inner[MV_CESA_MAX_MAC_KEY_LENGTH];
+    unsigned char   outer[MV_CESA_MAX_MAC_KEY_LENGTH];
+    int             i, digestSize = 0;
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+    MV_U32          swapped32, val32, *pVal32;
+#endif
+    for(i=0; i<keyLength; i++)
+    {
+        inner[i] = 0x36 ^ key[i];
+        outer[i] = 0x5c ^ key[i];
+    }
+
+    for(i=keyLength; i<MV_CESA_MAX_MAC_KEY_LENGTH; i++)
+    {
+        inner[i] = 0x36;
+        outer[i] = 0x5c;
+    }
+    if(macMode == MV_CESA_MAC_HMAC_MD5)
+    {
+        MV_MD5_CONTEXT  ctx;
+
+        mvMD5Init(&ctx);
+        mvMD5Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+
+        memcpy(innerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+        memset(&ctx, 0, sizeof(ctx));
+
+        mvMD5Init(&ctx);
+        mvMD5Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+        memcpy(outerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+        memset(&ctx, 0, sizeof(ctx));
+        digestSize = MV_CESA_MD5_DIGEST_SIZE;
+    }
+    else if(macMode == MV_CESA_MAC_HMAC_SHA1)
+    {
+        MV_SHA1_CTX  ctx;
+
+        mvSHA1Init(&ctx);
+        mvSHA1Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+        memcpy(innerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+        memset(&ctx, 0, sizeof(ctx));
+
+        mvSHA1Init(&ctx);
+        mvSHA1Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+        memcpy(outerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+        memset(&ctx, 0, sizeof(ctx));
+        digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+    }
+    else
+    {
+        mvOsPrintf("hmacGetIV: Unexpected macMode %d\n", macMode);
+    }
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+    /* 32 bits Swap of Inner and Outer values */
+    pVal32 = (MV_U32*)innerIV;
+    for(i=0; i<digestSize/4; i++)
+    {
+        val32 = *pVal32;
+        swapped32 = MV_BYTE_SWAP_32BIT(val32);
+        *pVal32 = swapped32;
+        pVal32++;
+    }
+    pVal32 = (MV_U32*)outerIV;
+    for(i=0; i<digestSize/4; i++)
+    {
+        val32 = *pVal32;
+        swapped32 = MV_BYTE_SWAP_32BIT(val32);
+        *pVal32 = swapped32;
+        pVal32++;
+    }
+#endif  /* defined(MV_CPU_LE) || defined(MV_PPC) */
+}
+
+
+/*******************************************************************************
+* mvCesaFragSha1Complete - Complete SHA1 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf           - Pointer to Mbuf structure where data
+*                                       for SHA1 is placed.
+*       int             offset          - Offset in the Mbuf structure where
+*                                       unprocessed data for SHA1 is started.
+*       MV_U8*          pOuterIV        - Pointer to OUTER for this session.
+*                                       If pOuterIV==NULL - MAC mode is HASH_SHA1
+*                                       If pOuterIV!=NULL - MAC mode is HMAC_SHA1
+*       int             macLeftSize     - Size of unprocessed data for SHA1.
+*       int             macTotalSize    - Total size of data for SHA1 in the
+*                                       request (processed + unprocessed)
+*
+* OUTPUT:
+*       MV_U8*     pDigest  - Pointer to place where calculated Digest will
+*                           be stored.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void    mvCesaFragSha1Complete(MV_CESA_MBUF* pMbuf, int offset,
+                                      MV_U8* pOuterIV, int macLeftSize,
+                                      int macTotalSize, MV_U8* pDigest)
+{
+    MV_SHA1_CTX     ctx;
+    MV_U8           *pData;
+    int             i, frag, fragOffset, size;
+
+    /* Read temporary Digest from HW */
+    for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++)
+    {
+        ctx.state[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i));
+    }
+    /* Initialize MV_SHA1_CTX structure */
+    memset(ctx.buffer, 0, 64);
+    /* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+    /* so count[1] is always 0 */
+    ctx.count[0] = ((macTotalSize - macLeftSize) * 8);
+    ctx.count[1] = 0;
+
+    /* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+    if(pOuterIV != NULL)
+        ctx.count[0] += (64 * 8);
+
+    /* Get place of unprocessed data in the Mbuf structure */
+    frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return;
+    }
+
+    pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+    size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+    /* Complete Inner part */
+    while(macLeftSize > 0)
+    {
+        if(macLeftSize <= size)
+        {
+            mvSHA1Update(&ctx, pData, macLeftSize);
+            break;
+        }
+        mvSHA1Update(&ctx, pData, size);
+        macLeftSize -= size;
+        frag++;
+        pData = pMbuf->pFrags[frag].bufVirtPtr;
+        size = pMbuf->pFrags[frag].bufSize;
+    }
+    mvSHA1Final(pDigest, &ctx);
+/*
+    mvOsPrintf("mvCesaFragSha1Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+                pOuterIV, macLeftSize, macTotalSize);
+    mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+*/
+
+    if(pOuterIV != NULL)
+    {
+        /* If HMAC - Complete Outer part */
+        for(i=0; i<MV_CESA_SHA1_DIGEST_SIZE/4; i++)
+        {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+            ctx.state[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]);
+#else
+	    ctx.state[i] = ((MV_U32*)pOuterIV)[i];
+#endif
+	}
+        memset(ctx.buffer, 0, 64);
+
+        ctx.count[0] = 64*8;
+        ctx.count[1] = 0;
+        mvSHA1Update(&ctx, pDigest, MV_CESA_SHA1_DIGEST_SIZE);
+        mvSHA1Final(pDigest, &ctx);
+    }
+}
+
+/*******************************************************************************
+* mvCesaFragMd5Complete - Complete MD5 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf           - Pointer to Mbuf structure where data
+*                                       for SHA1 is placed.
+*       int             offset          - Offset in the Mbuf structure where
+*                                       unprocessed data for MD5 is started.
+*       MV_U8*          pOuterIV        - Pointer to OUTER for this session.
+*                                       If pOuterIV==NULL - MAC mode is HASH_MD5
+*                                       If pOuterIV!=NULL - MAC mode is HMAC_MD5
+*       int             macLeftSize     - Size of unprocessed data for MD5.
+*       int             macTotalSize    - Total size of data for MD5 in the
+*                                       request (processed + unprocessed)
+*
+* OUTPUT:
+*       MV_U8*     pDigest  - Pointer to place where calculated Digest will
+*                           be stored.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void    mvCesaFragMd5Complete(MV_CESA_MBUF* pMbuf, int offset,
+                                     MV_U8* pOuterIV, int macLeftSize,
+                                     int macTotalSize, MV_U8* pDigest)
+{
+    MV_MD5_CONTEXT  ctx;
+    MV_U8           *pData;
+    int             i, frag, fragOffset, size;
+
+    /* Read temporary Digest from HW */
+    for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++)
+    {
+        ctx.buf[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i));
+    }
+    memset(ctx.in, 0, 64);
+
+    /* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+    /* so count[1] is always 0 */
+    ctx.bits[0] = ((macTotalSize - macLeftSize) * 8);
+    ctx.bits[1] = 0;
+
+    /* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+    if(pOuterIV != NULL)
+        ctx.bits[0] += (64 * 8);
+
+    frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return;
+    }
+
+    pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+    size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+    /* Complete Inner part */
+    while(macLeftSize > 0)
+    {
+        if(macLeftSize <= size)
+        {
+            mvMD5Update(&ctx, pData, macLeftSize);
+            break;
+        }
+        mvMD5Update(&ctx, pData, size);
+        macLeftSize -= size;
+        frag++;
+        pData = pMbuf->pFrags[frag].bufVirtPtr;
+        size = pMbuf->pFrags[frag].bufSize;
+    }
+    mvMD5Final(pDigest, &ctx);
+
+/*
+    mvOsPrintf("mvCesaFragMd5Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+                pOuterIV, macLeftSize, macTotalSize);
+    mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+*/
+    if(pOuterIV != NULL)
+    {
+        /* Complete Outer part */
+        for(i=0; i<MV_CESA_MD5_DIGEST_SIZE/4; i++)
+        {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+            ctx.buf[i] = MV_BYTE_SWAP_32BIT(((MV_U32*)pOuterIV)[i]);
+#else
+	    ctx.buf[i] = ((MV_U32*)pOuterIV)[i];
+#endif
+	}
+        memset(ctx.in, 0, 64);
+
+        ctx.bits[0] = 64*8;
+        ctx.bits[1] = 0;
+        mvMD5Update(&ctx, pDigest, MV_CESA_MD5_DIGEST_SIZE);
+        mvMD5Final(pDigest, &ctx);
+    }
+}
+
+/*******************************************************************************
+* mvCesaFragAuthComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_REQ*    pReq,
+*       MV_CESA_SA*     pSA,
+*       int             macDataSize
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaFragAuthComplete(MV_CESA_REQ* pReq, MV_CESA_SA* pSA,
+                               int macDataSize)
+{
+    MV_CESA_COMMAND*        pCmd = pReq->pCmd;
+    MV_U8*                  pDigest;
+    MV_CESA_MAC_MODE        macMode;
+    MV_U8*                  pOuterIV = NULL;
+
+    /* Copy data from Source fragment to Destination */
+    if(pCmd->pSrc != pCmd->pDst)
+    {
+        mvCesaMbufCopy(pCmd->pDst, pReq->frags.bufOffset,
+                       pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+    }
+
+/*
+    mvCesaCopyFromMbuf(cesaSramVirtPtr->buf[0], pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+    mvCesaCopyToMbuf(cesaSramVirtPtr->buf[0], pCmd->pDst, pReq->frags.bufOffset, macDataSize);
+*/
+    pDigest = (mvCesaSramAddrGet() + pReq->frags.newDigestOffset);
+
+    macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+/*
+    mvOsPrintf("macDataSize=%d, macLength=%d, digestOffset=%d, macMode=%d\n",
+            macDataSize, pCmd->macLength, pCmd->digestOffset, macMode);
+*/
+    switch(macMode)
+    {
+        case MV_CESA_MAC_HMAC_MD5:
+            pOuterIV = pSA->pSramSA->macOuterIV;
+
+        case MV_CESA_MAC_MD5:
+            mvCesaFragMd5Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+                               macDataSize, pCmd->macLength, pDigest);
+        break;
+
+        case MV_CESA_MAC_HMAC_SHA1:
+            pOuterIV = pSA->pSramSA->macOuterIV;
+
+        case MV_CESA_MAC_SHA1:
+            mvCesaFragSha1Complete(pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+                               macDataSize, pCmd->macLength, pDigest);
+        break;
+
+        default:
+            mvOsPrintf("mvCesaFragAuthComplete: Unexpected macMode %d\n", macMode);
+            return MV_BAD_PARAM;
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeInit -
+*
+* DESCRIPTION:
+*
+*
+* INPUT: NONE
+*
+*
+* RETURN:
+*       MV_CESA_COMMAND*
+*
+*******************************************************************************/
+static MV_CESA_COMMAND*    mvCesaCtrModeInit(void)
+{
+    MV_CESA_MBUF    *pMbuf;
+    MV_U8           *pBuf;
+    MV_CESA_COMMAND *pCmd;
+
+    pBuf = mvOsMalloc(sizeof(MV_CESA_COMMAND) +
+                      sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) + 100);
+    if(pBuf == NULL)
+    {
+        mvOsPrintf("mvCesaSessionOpen: Can't allocate %u bytes for CTR Mode\n",
+                    sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) );
+        return NULL;
+    }
+    pCmd = (MV_CESA_COMMAND*)pBuf;
+    pBuf += sizeof(MV_CESA_COMMAND);
+
+    pMbuf = (MV_CESA_MBUF*)pBuf;
+    pBuf += sizeof(MV_CESA_MBUF);
+
+    pMbuf->pFrags = (MV_BUF_INFO*)pBuf;
+
+    pMbuf->numFrags = 1;
+    pCmd->pSrc = pMbuf;
+    pCmd->pDst = pMbuf;
+/*
+    mvOsPrintf("CtrModeInit: pCmd=%p, pSrc=%p, pDst=%p, pFrags=%p\n",
+                pCmd, pCmd->pSrc, pCmd->pDst,
+                pMbuf->pFrags);
+*/
+    return pCmd;
+}
+
+/*******************************************************************************
+* mvCesaCtrModePrepare -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS    mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd)
+{
+    MV_CESA_MBUF    *pMbuf;
+    MV_U8           *pBuf, *pIV;
+    MV_U32          counter, *pCounter;
+    int             cryptoSize = MV_ALIGN_UP(pCmd->cryptoLength, MV_CESA_AES_BLOCK_SIZE);
+/*
+    mvOsPrintf("CtrModePrepare: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+                pCmd, pCmd->pSrc, pCmd->pDst,
+                pCtrModeCmd, pCtrModeCmd->pSrc, pCtrModeCmd->pDst);
+*/
+    pMbuf = pCtrModeCmd->pSrc;
+
+    /* Allocate buffer for Key stream */
+    pBuf = mvOsIoCachedMalloc(cesaOsHandle,cryptoSize,
+			      &pMbuf->pFrags[0].bufPhysAddr,
+			      &pMbuf->pFrags[0].memHandle);
+    if(pBuf == NULL)
+    {
+        mvOsPrintf("mvCesaCtrModePrepare: Can't allocate %d bytes\n", cryptoSize);
+        return MV_OUT_OF_CPU_MEM;
+    }
+    memset(pBuf, 0, cryptoSize);
+    mvOsCacheFlush(NULL, pBuf, cryptoSize);
+
+    pMbuf->pFrags[0].bufVirtPtr = pBuf;
+    pMbuf->mbufSize = cryptoSize;
+    pMbuf->pFrags[0].bufSize = cryptoSize;
+
+    pCtrModeCmd->pReqPrv = pCmd->pReqPrv;
+    pCtrModeCmd->sessionId = pCmd->sessionId;
+
+    /* ivFromUser and ivOffset are don't care */
+    pCtrModeCmd->cryptoOffset = 0;
+    pCtrModeCmd->cryptoLength = cryptoSize;
+
+    /* digestOffset, macOffset and macLength are don't care */
+
+    mvCesaCopyFromMbuf(pBuf, pCmd->pSrc, pCmd->ivOffset, MV_CESA_AES_BLOCK_SIZE);
+    pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+    counter = *pCounter;
+    counter = MV_32BIT_BE(counter);
+    pIV = pBuf;
+    cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+
+    /* fill key stream */
+    while(cryptoSize > 0)
+    {
+        pBuf += MV_CESA_AES_BLOCK_SIZE;
+        memcpy(pBuf, pIV, MV_CESA_AES_BLOCK_SIZE - sizeof(counter));
+        pCounter = (MV_U32*)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+        counter++;
+        *pCounter = MV_32BIT_BE(counter);
+        cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+    }
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd)
+{
+    int         srcFrag, dstFrag, srcOffset, dstOffset, keyOffset, srcSize, dstSize;
+    int         cryptoSize = pCmd->cryptoLength;
+    MV_U8       *pSrc, *pDst, *pKey;
+    MV_STATUS   status = MV_OK;
+/*
+    mvOsPrintf("CtrModeComplete: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+                pCmd, pCmd->pSrc, pCmd->pDst,
+                pOrgCmd, pOrgCmd->pSrc, pOrgCmd->pDst);
+*/
+    /* XOR source data with key stream to destination data */
+    pKey = pCmd->pDst->pFrags[0].bufVirtPtr;
+    keyOffset = 0;
+
+    if( (pOrgCmd->pSrc != pOrgCmd->pDst) &&
+        (pOrgCmd->cryptoOffset > 0) )
+    {
+        /* Copy Prefix from source buffer to destination buffer */
+
+        status = mvCesaMbufCopy(pOrgCmd->pDst, 0,
+                                pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset);
+/*
+        status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc,
+                       0, pOrgCmd->cryptoOffset);
+        status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst,
+                       0, pOrgCmd->cryptoOffset);
+*/
+    }
+
+    srcFrag = mvCesaMbufOffset(pOrgCmd->pSrc, pOrgCmd->cryptoOffset, &srcOffset);
+    pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+    srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+
+    dstFrag = mvCesaMbufOffset(pOrgCmd->pDst, pOrgCmd->cryptoOffset, &dstOffset);
+    pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+    dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+
+    while(cryptoSize > 0)
+    {
+        pDst[dstOffset] = (pSrc[srcOffset] ^ pKey[keyOffset]);
+
+        cryptoSize--;
+        dstOffset++;
+        srcOffset++;
+        keyOffset++;
+
+        if(srcOffset >= srcSize)
+        {
+            srcFrag++;
+            srcOffset = 0;
+            pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+            srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+        }
+
+        if(dstOffset >= dstSize)
+        {
+            dstFrag++;
+            dstOffset = 0;
+            pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+            dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+        }
+    }
+
+    if(pOrgCmd->pSrc != pOrgCmd->pDst)
+    {
+        /* Copy Suffix from source buffer to destination buffer */
+        srcOffset = pOrgCmd->cryptoOffset + pOrgCmd->cryptoLength;
+
+        if( (pOrgCmd->pDst->mbufSize - srcOffset) > 0)
+        {
+            status = mvCesaMbufCopy(pOrgCmd->pDst, srcOffset,
+                                    pOrgCmd->pSrc, srcOffset,
+                                    pOrgCmd->pDst->mbufSize - srcOffset);
+        }
+
+/*
+        status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc,
+                                srcOffset, pOrgCmd->pSrc->mbufSize - srcOffset);
+        status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst,
+                       srcOffset, pOrgCmd->pDst->mbufSize - srcOffset);
+*/
+    }
+
+    /* Free buffer used for Key stream */
+    mvOsIoCachedFree(cesaOsHandle,pCmd->pDst->pFrags[0].bufSize,
+		     pCmd->pDst->pFrags[0].bufPhysAddr,
+                     pCmd->pDst->pFrags[0].bufVirtPtr,
+		     pCmd->pDst->pFrags[0].memHandle);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeFinish -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND* pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static void    mvCesaCtrModeFinish(MV_CESA_COMMAND* pCmd)
+{
+    mvOsFree(pCmd);
+}
+
+/*******************************************************************************
+* mvCesaParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd,
+                                    MV_U8* pFixOffset)
+{
+    MV_U8   fixOffset = 0xFF;
+
+    /* Check AUTH operation parameters */
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) )
+    {
+        /* MAC offset should be at least 4 byte aligned */
+        if( MV_IS_NOT_ALIGN(pCmd->macOffset, 4) )
+        {
+            mvOsPrintf("mvCesaAction: macOffset %d must be 4 byte aligned\n",
+                    pCmd->macOffset);
+            return MV_BAD_PARAM;
+        }
+        /* Digest offset must be 4 byte aligned */
+        if( MV_IS_NOT_ALIGN(pCmd->digestOffset, 4) )
+        {
+            mvOsPrintf("mvCesaAction: digestOffset %d must be 4 byte aligned\n",
+                    pCmd->digestOffset);
+            return MV_BAD_PARAM;
+        }
+        /* In addition all offsets should be the same alignment: 8 or 4 */
+        if(fixOffset == 0xFF)
+        {
+            fixOffset = (pCmd->macOffset % 8);
+        }
+        else
+        {
+            if( (pCmd->macOffset % 8) != fixOffset)
+            {
+                mvOsPrintf("mvCesaAction: macOffset %d mod 8 must be equal %d\n",
+                                pCmd->macOffset, fixOffset);
+                return MV_BAD_PARAM;
+            }
+        }
+        if( (pCmd->digestOffset % 8) != fixOffset)
+        {
+            mvOsPrintf("mvCesaAction: digestOffset %d mod 8 must be equal %d\n",
+                                pCmd->digestOffset, fixOffset);
+            return MV_BAD_PARAM;
+        }
+    }
+    /* Check CRYPTO operation parameters */
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) )
+    {
+        /* CryptoOffset should be at least 4 byte aligned */
+        if( MV_IS_NOT_ALIGN(pCmd->cryptoOffset, 4)  )
+        {
+            mvOsPrintf("CesaAction: cryptoOffset=%d must be 4 byte aligned\n",
+                        pCmd->cryptoOffset);
+            return MV_BAD_PARAM;
+        }
+        /* cryptoLength should be the whole number of blocks */
+        if( MV_IS_NOT_ALIGN(pCmd->cryptoLength, pSA->cryptoBlockSize) )
+        {
+            mvOsPrintf("mvCesaAction: cryptoLength=%d must be %d byte aligned\n",
+                        pCmd->cryptoLength, pSA->cryptoBlockSize);
+            return MV_BAD_PARAM;
+        }
+        if(fixOffset == 0xFF)
+        {
+            fixOffset = (pCmd->cryptoOffset % 8);
+        }
+        else
+        {
+            /* In addition all offsets should be the same alignment: 8 or 4 */
+            if( (pCmd->cryptoOffset % 8) != fixOffset)
+            {
+                mvOsPrintf("mvCesaAction: cryptoOffset %d mod 8 must be equal %d \n",
+                                pCmd->cryptoOffset, fixOffset);
+                return MV_BAD_PARAM;
+            }
+        }
+
+        /* check for CBC mode */
+        if(pSA->cryptoIvSize > 0)
+        {
+            /* cryptoIV must not be part of CryptoLength */
+            if( ((pCmd->ivOffset + pSA->cryptoIvSize) > pCmd->cryptoOffset) &&
+                (pCmd->ivOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) )
+            {
+                mvOsPrintf("mvCesaFragParamCheck: cryptoIvOffset (%d) is part of cryptoLength (%d+%d)\n",
+                        pCmd->ivOffset, pCmd->macOffset, pCmd->macLength);
+                return MV_BAD_PARAM;
+            }
+
+            /* ivOffset must be 4 byte aligned */
+            if( MV_IS_NOT_ALIGN(pCmd->ivOffset, 4) )
+            {
+                mvOsPrintf("CesaAction: ivOffset=%d must be 4 byte aligned\n",
+                            pCmd->ivOffset);
+                return MV_BAD_PARAM;
+            }
+            /* In addition all offsets should be the same alignment: 8 or 4 */
+            if( (pCmd->ivOffset % 8) != fixOffset)
+            {
+                mvOsPrintf("mvCesaAction: ivOffset %d mod 8 must be %d\n",
+                                pCmd->ivOffset, fixOffset);
+                return MV_BAD_PARAM;
+            }
+        }
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS   mvCesaFragParamCheck(MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd)
+{
+    int     offset;
+
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) )
+    {
+        /* macOffset must be less that SRAM buffer size */
+        if(pCmd->macOffset > (sizeof(cesaSramVirtPtr->buf) - MV_CESA_AUTH_BLOCK_SIZE))
+        {
+            mvOsPrintf("mvCesaFragParamCheck: macOffset is too large (%d)\n",
+                        pCmd->macOffset);
+            return MV_BAD_PARAM;
+        }
+        /* macOffset+macSize must be more than mbufSize - SRAM buffer size */
+        if( ((pCmd->macOffset + pCmd->macLength) > pCmd->pSrc->mbufSize) ||
+            ((pCmd->pSrc->mbufSize - (pCmd->macOffset + pCmd->macLength)) >=
+             sizeof(cesaSramVirtPtr->buf)) )
+        {
+            mvOsPrintf("mvCesaFragParamCheck: macLength is too large (%d), mbufSize=%d\n",
+                        pCmd->macLength, pCmd->pSrc->mbufSize);
+            return MV_BAD_PARAM;
+        }
+    }
+
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) )
+    {
+        /* cryptoOffset must be less that SRAM buffer size */
+        /* 4 for possible fixOffset */
+        if( (pCmd->cryptoOffset + 4) > (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize))
+        {
+            mvOsPrintf("mvCesaFragParamCheck: cryptoOffset is too large (%d)\n",
+                        pCmd->cryptoOffset);
+            return MV_BAD_PARAM;
+        }
+
+        /* cryptoOffset+cryptoSize must be more than mbufSize - SRAM buffer size */
+        if( ((pCmd->cryptoOffset + pCmd->cryptoLength) > pCmd->pSrc->mbufSize) ||
+            ((pCmd->pSrc->mbufSize - (pCmd->cryptoOffset + pCmd->cryptoLength)) >=
+             (sizeof(cesaSramVirtPtr->buf) - pSA->cryptoBlockSize)) )
+        {
+            mvOsPrintf("mvCesaFragParamCheck: cryptoLength is too large (%d), mbufSize=%d\n",
+                        pCmd->cryptoLength, pCmd->pSrc->mbufSize);
+            return MV_BAD_PARAM;
+        }
+    }
+
+    /* When MAC_THEN_CRYPTO or CRYPTO_THEN_MAC */
+    if( ((pSA->config & MV_CESA_OPERATION_MASK) ==
+            (MV_CESA_MAC_THEN_CRYPTO << MV_CESA_OPERATION_OFFSET)) ||
+        ((pSA->config & MV_CESA_OPERATION_MASK) ==
+            (MV_CESA_CRYPTO_THEN_MAC << MV_CESA_OPERATION_OFFSET)) )
+    {
+        if( (mvCtrlModelGet() == MV_5182_DEV_ID) ||
+            ( (mvCtrlModelGet() == MV_5181_DEV_ID) &&
+              (mvCtrlRevGet() >= MV_5181L_A0_REV)  &&
+              (pCmd->macLength >= (1 << 14)) ) )
+        {
+            return MV_NOT_ALLOWED;
+        }
+
+        /* abs(cryptoOffset-macOffset) must be aligned cryptoBlockSize */
+        if(pCmd->cryptoOffset > pCmd->macOffset)
+        {
+            offset = pCmd->cryptoOffset - pCmd->macOffset;
+        }
+        else
+        {
+            offset = pCmd->macOffset - pCmd->cryptoOffset;
+        }
+
+        if( MV_IS_NOT_ALIGN(offset,  pSA->cryptoBlockSize) )
+        {
+/*
+            mvOsPrintf("mvCesaFragParamCheck: (cryptoOffset - macOffset) must be %d byte aligned\n",
+                        pSA->cryptoBlockSize);
+*/
+            return MV_NOT_ALLOWED;
+        }
+        /* Digest must not be part of CryptoLength */
+        if( ((pCmd->digestOffset + pSA->digestSize) > pCmd->cryptoOffset) &&
+            (pCmd->digestOffset < (pCmd->cryptoOffset + pCmd->cryptoLength)) )
+        {
+/*
+            mvOsPrintf("mvCesaFragParamCheck: digestOffset (%d) is part of cryptoLength (%d+%d)\n",
+                        pCmd->digestOffset, pCmd->cryptoOffset, pCmd->cryptoLength);
+*/
+            return MV_NOT_ALLOWED;
+        }
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragSizeFind -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd,
+*       int cryptoOffset, int macOffset,
+*
+* OUTPUT:
+*       int* pCopySize, int* pCryptoDataSize, int* pMacDataSize
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static void   mvCesaFragSizeFind(MV_CESA_SA* pSA, MV_CESA_REQ* pReq,
+                                 int cryptoOffset, int macOffset,
+                          int* pCopySize, int* pCryptoDataSize, int* pMacDataSize)
+{
+    MV_CESA_COMMAND *pCmd = pReq->pCmd;
+    int             cryptoDataSize, macDataSize, copySize;
+
+    cryptoDataSize = macDataSize = 0;
+    copySize = *pCopySize;
+
+    if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+                (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET) )
+    {
+        cryptoDataSize = MV_MIN( (copySize - cryptoOffset),
+                                 (pCmd->cryptoLength - (pReq->frags.cryptoSize + 1)) );
+
+        /* cryptoSize for each fragment must be the whole number of blocksSize */
+        if( MV_IS_NOT_ALIGN(cryptoDataSize, pSA->cryptoBlockSize) )
+        {
+            cryptoDataSize = MV_ALIGN_DOWN(cryptoDataSize, pSA->cryptoBlockSize);
+            copySize = cryptoOffset + cryptoDataSize;
+        }
+    }
+    if( (pSA->config & MV_CESA_OPERATION_MASK) !=
+             (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET) )
+    {
+        macDataSize = MV_MIN( (copySize - macOffset),
+                              (pCmd->macLength - (pReq->frags.macSize + 1)));
+
+        /* macSize for each fragment (except last) must be the whole number of blocksSize */
+        if( MV_IS_NOT_ALIGN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE) )
+        {
+            macDataSize = MV_ALIGN_DOWN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE);
+            copySize = macOffset + macDataSize;
+        }
+        cryptoDataSize = copySize - cryptoOffset;
+    }
+    *pCopySize = copySize;
+
+    if(pCryptoDataSize != NULL)
+        *pCryptoDataSize = cryptoDataSize;
+
+    if(pMacDataSize != NULL)
+        *pMacDataSize = macDataSize;
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvCesa.h b/crypto/ocf/kirkwood/cesa/mvCesa.h
new file mode 100644
index 000000000000..65352dc471a3
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvCesa.h
@@ -0,0 +1,412 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCesa.h - Header File for Cryptographic Engines and Security Accelerator
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       the Marvell Cryptographic Engines and Security Accelerator.
+*
+*******************************************************************************/
+
+#ifndef __mvCesa_h__
+#define __mvCesa_h__
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvDebug.h"
+
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesa.h"
+#include "cesa/AES/mvAes.h"
+#include "mvSysHwConfig.h"
+
+#ifdef MV_INCLUDE_IDMA
+#include "idma/mvIdma.h"
+#include "idma/mvIdmaRegs.h"
+#else
+/* Redefine MV_DMA_DESC structure */
+typedef struct _mvDmaDesc
+{
+    MV_U32 	byteCnt;        /* The total number of bytes to transfer        */
+    MV_U32 	phySrcAdd;	    /* The physical source address                  */
+    MV_U32 	phyDestAdd;     /* The physical destination address             */
+    MV_U32	phyNextDescPtr; /* If we are using chain mode DMA transfer,     */
+				            /* then this pointer should point to the        */
+                            /* physical address of the next descriptor,     */
+                            /* otherwise it should be NULL.                 */
+}MV_DMA_DESC;
+#endif /* MV_INCLUDE_IDMA */
+
+#include "cesa/mvCesaRegs.h"
+
+#define MV_CESA_AUTH_BLOCK_SIZE         64 /* bytes */
+
+#define MV_CESA_MD5_DIGEST_SIZE         16 /* bytes */
+#define MV_CESA_SHA1_DIGEST_SIZE        20 /* bytes */
+
+#define MV_CESA_MAX_DIGEST_SIZE         MV_CESA_SHA1_DIGEST_SIZE
+
+#define MV_CESA_DES_KEY_LENGTH          8   /* bytes = 64 bits */
+#define MV_CESA_3DES_KEY_LENGTH         24  /* bytes = 192 bits */
+#define MV_CESA_AES_128_KEY_LENGTH      16  /* bytes = 128 bits */
+#define MV_CESA_AES_192_KEY_LENGTH      24  /* bytes = 192 bits */
+#define MV_CESA_AES_256_KEY_LENGTH      32  /* bytes = 256 bits */
+
+#define MV_CESA_MAX_CRYPTO_KEY_LENGTH   MV_CESA_AES_256_KEY_LENGTH
+
+#define MV_CESA_DES_BLOCK_SIZE          8 /* bytes = 64 bits */
+#define MV_CESA_3DES_BLOCK_SIZE         8 /* bytes = 64 bits */
+
+#define MV_CESA_AES_BLOCK_SIZE          16 /* bytes = 128 bits */
+
+#define MV_CESA_MAX_IV_LENGTH           MV_CESA_AES_BLOCK_SIZE
+
+#define MV_CESA_MAX_MAC_KEY_LENGTH      64 /* bytes */
+
+typedef struct
+{
+	MV_U8               cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+	MV_U8               macKey[MV_CESA_MAX_MAC_KEY_LENGTH];
+	MV_CESA_OPERATION   operation;
+	MV_CESA_DIRECTION   direction;
+	MV_CESA_CRYPTO_ALG  cryptoAlgorithm;
+	MV_CESA_CRYPTO_MODE cryptoMode;
+	MV_U8               cryptoKeyLength;
+	MV_CESA_MAC_MODE    macMode;
+	MV_U8               macKeyLength;
+	MV_U8               digestSize;
+
+} MV_CESA_OPEN_SESSION;
+
+typedef struct
+{
+    MV_BUF_INFO *pFrags;
+	MV_U16	    numFrags;
+    MV_U16      mbufSize;
+
+} MV_CESA_MBUF;
+
+typedef struct
+{
+    void* 	pReqPrv; /* instead of reqId */
+    MV_U32 	retCode;
+    MV_16   sessionId;
+
+} MV_CESA_RESULT;
+
+typedef void    (*MV_CESA_CALLBACK) (MV_CESA_RESULT* pResult);
+
+
+typedef struct
+{
+    void*               pReqPrv;    /* instead of reqId */
+    MV_CESA_MBUF*       pSrc;
+    MV_CESA_MBUF*       pDst;
+    MV_CESA_CALLBACK*   pFuncCB;
+    MV_16               sessionId;
+    MV_U16              ivFromUser;
+    MV_U16              ivOffset;
+    MV_U16              cryptoOffset;
+    MV_U16              cryptoLength;
+    MV_U16              digestOffset;
+    MV_U16              macOffset;
+    MV_U16              macLength;
+    MV_BOOL		skipFlush;
+} MV_CESA_COMMAND;
+
+
+
+MV_STATUS   mvCesaHalInit (int numOfSession, int queueDepth, char* pSramBase, MV_U32 cryptEngBase, void *osHandle);
+MV_STATUS   mvCesaFinish (void);
+MV_STATUS   mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short* pSid);
+MV_STATUS   mvCesaSessionClose(short sid);
+MV_STATUS   mvCesaCryptoIvSet(MV_U8* pIV, int ivSize);
+
+MV_STATUS   mvCesaAction (MV_CESA_COMMAND* pCmd);
+
+MV_U32      mvCesaInProcessGet(void);
+MV_STATUS   mvCesaReadyDispatch(void);
+MV_STATUS   mvCesaReadyGet(MV_CESA_RESULT* pResult);
+MV_BOOL     mvCesaIsReady(void);
+
+int     	mvCesaMbufOffset(MV_CESA_MBUF* pMbuf, int offset, int* pBufOffset);
+MV_STATUS   mvCesaCopyFromMbuf(MV_U8* pDst, MV_CESA_MBUF* pSrcMbuf,
+                               int offset, int size);
+MV_STATUS   mvCesaCopyToMbuf(MV_U8* pSrc, MV_CESA_MBUF* pDstMbuf,
+                               int offset, int size);
+MV_STATUS   mvCesaMbufCopy(MV_CESA_MBUF* pMbufDst, int dstMbufOffset,
+                           MV_CESA_MBUF* pMbufSrc, int srcMbufOffset, int size);
+
+/********** Debug functions ********/
+
+void        mvCesaDebugMbuf(const char* str, MV_CESA_MBUF *pMbuf, int offset, int size);
+void        mvCesaDebugSA(short sid, int mode);
+void        mvCesaDebugStats(void);
+void        mvCesaDebugStatsClear(void);
+void        mvCesaDebugRegs(void);
+void        mvCesaDebugStatus(void);
+void        mvCesaDebugQueue(int mode);
+void        mvCesaDebugSram(int mode);
+void        mvCesaDebugSAD(int mode);
+
+
+/********  CESA Private definitions ********/
+#if (MV_CESA_VERSION >= 2)
+#if (MV_CACHE_COHERENCY  == MV_CACHE_COHER_SW)
+#define MV_CESA_TDMA_CTRL_VALUE       MV_CESA_TDMA_DST_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+                                    | MV_CESA_TDMA_SRC_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+                                    | MV_CESA_TDMA_OUTSTAND_READ_EN_MASK                   \
+				    | MV_CESA_TDMA_NO_BYTE_SWAP_MASK			   \
+                                    | MV_CESA_TDMA_ENABLE_MASK
+#else
+#define MV_CESA_TDMA_CTRL_VALUE       MV_CESA_TDMA_DST_BURST_MASK(MV_CESA_TDMA_BURST_32B)  \
+                                    | MV_CESA_TDMA_SRC_BURST_MASK(MV_CESA_TDMA_BURST_128B) \
+                                    /*| MV_CESA_TDMA_OUTSTAND_READ_EN_MASK                   */\
+                                    | MV_CESA_TDMA_ENABLE_MASK
+
+#endif
+#else
+#define MV_CESA_IDMA_CTRL_LOW_VALUE   ICCLR_DST_BURST_LIM_128BYTE   \
+                                    | ICCLR_SRC_BURST_LIM_128BYTE   \
+                                    | ICCLR_INT_MODE_MASK           \
+                                    | ICCLR_BLOCK_MODE              \
+                                    | ICCLR_CHAN_ENABLE             \
+                                    | ICCLR_DESC_MODE_16M
+#endif /* MV_CESA_VERSION >= 2 */
+
+#define MV_CESA_MAX_PKT_SIZE        (64 * 1024)
+#define MV_CESA_MAX_MBUF_FRAGS      20
+
+#define MV_CESA_MAX_REQ_FRAGS       ( (MV_CESA_MAX_PKT_SIZE / MV_CESA_MAX_BUF_SIZE) + 1)
+
+#define MV_CESA_MAX_DMA_DESC    (MV_CESA_MAX_MBUF_FRAGS*2 + 5)
+
+#define MAX_CESA_CHAIN_LENGTH	20
+
+typedef enum
+{
+    MV_CESA_IDLE     = 0,
+    MV_CESA_PENDING,
+    MV_CESA_PROCESS,
+    MV_CESA_READY,
+#if (MV_CESA_VERSION >= 3)
+    MV_CESA_CHAIN,
+#endif
+} MV_CESA_STATE;
+
+
+/* Session database */
+
+/* Map of Key materials of the session in SRAM.
+ * Each field must be 8 byte aligned
+ * Total size: 32 + 24 + 24 = 80 bytes
+ */
+typedef struct
+{
+    MV_U8  cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+    MV_U8  macInnerIV[MV_CESA_MAX_DIGEST_SIZE];
+    MV_U8  reservedInner[4];
+    MV_U8  macOuterIV[MV_CESA_MAX_DIGEST_SIZE];
+    MV_U8  reservedOuter[4];
+
+} MV_CESA_SRAM_SA;
+
+typedef struct
+{
+    MV_CESA_SRAM_SA*    pSramSA;
+    MV_U32              config;
+    MV_U8               cryptoKeyLength;
+    MV_U8               cryptoIvSize;
+    MV_U8               cryptoBlockSize;
+    MV_U8               digestSize;
+    MV_U8               macKeyLength;
+    MV_U8               valid;
+    MV_U8               ctrMode;
+    MV_U32              count;
+
+} MV_CESA_SA;
+
+/* DMA list management */
+typedef struct
+{
+    MV_DMA_DESC*    pDmaFirst;
+    MV_DMA_DESC*    pDmaLast;
+
+} MV_CESA_DMA;
+
+
+typedef struct
+{
+    MV_U8               numFrag;
+    MV_U8               nextFrag;
+    int                 bufOffset;
+    int                 cryptoSize;
+    int                 macSize;
+    int                 newDigestOffset;
+    MV_U8               orgDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+} MV_CESA_FRAGS;
+
+/* Request queue */
+typedef struct
+{
+    MV_U8               state;
+    MV_U8               fragMode;
+    MV_U8               fixOffset;
+    MV_CESA_COMMAND*    pCmd;
+    MV_CESA_COMMAND*    pOrgCmd;
+    MV_BUF_INFO         dmaDescBuf;
+    MV_CESA_DMA         dma[MV_CESA_MAX_REQ_FRAGS];
+    MV_BUF_INFO         cesaDescBuf;
+    MV_CESA_DESC*       pCesaDesc;
+    MV_CESA_FRAGS       frags;
+
+
+} MV_CESA_REQ;
+
+
+/* SRAM map */
+/* Total SRAM size calculation */
+/*  SRAM size =
+ *              MV_CESA_MAX_BUF_SIZE  +
+ *              sizeof(MV_CESA_DESC)  +
+ *              MV_CESA_MAX_IV_LENGTH +
+ *              MV_CESA_MAX_IV_LENGTH +
+ *              MV_CESA_MAX_DIGEST_SIZE +
+ *              sizeof(MV_CESA_SRAM_SA)
+ *            = 1600 + 32 + 16 + 16 + 24 + 80 + 280 (reserved) = 2048 bytes
+ *            = 3200 + 32 + 16 + 16 + 24 + 80 + 728 (reserved) = 4096 bytes
+ */
+typedef struct
+{
+    MV_U8               buf[MV_CESA_MAX_BUF_SIZE];
+    MV_CESA_DESC        desc;
+    MV_U8               cryptoIV[MV_CESA_MAX_IV_LENGTH];
+    MV_U8               tempCryptoIV[MV_CESA_MAX_IV_LENGTH];
+    MV_U8               tempDigest[MV_CESA_MAX_DIGEST_SIZE+4];
+    MV_CESA_SRAM_SA     sramSA;
+
+} MV_CESA_SRAM_MAP;
+
+
+typedef struct
+{
+    MV_U32  openedCount;
+    MV_U32  closedCount;
+    MV_U32  fragCount;
+    MV_U32  reqCount;
+    MV_U32  maxReqCount;
+    MV_U32  procCount;
+    MV_U32  readyCount;
+    MV_U32  notReadyCount;
+    MV_U32  startCount;
+#if (MV_CESA_VERSION >= 3)
+    MV_U32  maxChainUsage;
+#endif
+
+} MV_CESA_STATS;
+
+
+/* External variables */
+
+extern MV_CESA_STATS    cesaStats;
+extern MV_CESA_FRAGS    cesaFrags;
+
+extern MV_BUF_INFO      cesaSramSaBuf;
+
+extern MV_CESA_SA*       pCesaSAD;
+extern MV_U16            cesaMaxSA;
+
+extern MV_CESA_REQ*      pCesaReqFirst;
+extern MV_CESA_REQ*      pCesaReqLast;
+extern MV_CESA_REQ*      pCesaReqEmpty;
+extern MV_CESA_REQ*      pCesaReqProcess;
+extern int               cesaQueueDepth;
+extern int               cesaReqResources;
+#if (MV_CESA_VERSION>= 3)
+extern MV_U32		cesaChainLength;
+#endif
+
+extern MV_CESA_SRAM_MAP*  cesaSramVirtPtr;
+extern MV_U32           cesaSramPhysAddr;
+
+static INLINE MV_ULONG  mvCesaVirtToPhys(MV_BUF_INFO* pBufInfo, void* pVirt)
+{
+    return (pBufInfo->bufPhysAddr + ((MV_U8*)pVirt - pBufInfo->bufVirtPtr));
+}
+
+/* Additional DEBUG functions */
+void        mvCesaDebugSramSA(MV_CESA_SRAM_SA* pSramSA, int mode);
+void        mvCesaDebugCmd(MV_CESA_COMMAND* pCmd,  int mode);
+void        mvCesaDebugDescriptor(MV_CESA_DESC* pDesc);
+
+
+
+#endif /* __mvCesa_h__ */
diff --git a/crypto/ocf/kirkwood/cesa/mvCesaDebug.c b/crypto/ocf/kirkwood/cesa/mvCesaDebug.c
new file mode 100644
index 000000000000..0b7cb486f424
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvCesaDebug.c
@@ -0,0 +1,484 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvDebug.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesa.h"
+#include "cesa/mvCesaRegs.h"
+#include "cesa/AES/mvAes.h"
+
+static const char*   mvCesaDebugStateStr(MV_CESA_STATE state)
+{
+    switch(state)
+    {
+        case MV_CESA_IDLE:
+            return "Idle";
+
+        case MV_CESA_PENDING:
+            return "Pend";
+
+        case MV_CESA_PROCESS:
+            return "Proc";
+
+        case MV_CESA_READY:
+            return "Ready";
+
+        default:
+            break;
+    }
+    return "Unknown";
+}
+
+static const char*   mvCesaDebugOperStr(MV_CESA_OPERATION oper)
+{
+    switch(oper)
+    {
+        case MV_CESA_MAC_ONLY:
+            return "MacOnly";
+
+        case MV_CESA_CRYPTO_ONLY:
+            return "CryptoOnly";
+
+        case MV_CESA_MAC_THEN_CRYPTO:
+            return "MacCrypto";
+
+        case MV_CESA_CRYPTO_THEN_MAC:
+            return "CryptoMac";
+
+        default:
+            break;
+    }
+    return "Null";
+}
+
+static const char* mvCesaDebugCryptoAlgStr(MV_CESA_CRYPTO_ALG cryptoAlg)
+{
+    switch(cryptoAlg)
+    {
+        case MV_CESA_CRYPTO_DES:
+            return "DES";
+
+        case MV_CESA_CRYPTO_3DES:
+            return "3DES";
+
+        case MV_CESA_CRYPTO_AES:
+            return "AES";
+
+        default:
+            break;
+    }
+    return "Null";
+}
+
+static const char* mvCesaDebugMacModeStr(MV_CESA_MAC_MODE macMode)
+{
+    switch(macMode)
+    {
+        case MV_CESA_MAC_MD5:
+            return "MD5";
+
+        case MV_CESA_MAC_SHA1:
+            return "SHA1";
+
+        case MV_CESA_MAC_HMAC_MD5:
+            return "HMAC-MD5";
+
+        case MV_CESA_MAC_HMAC_SHA1:
+            return "HMAC_SHA1";
+
+        default:
+            break;
+    }
+    return "Null";
+}
+
+void    mvCesaDebugCmd(MV_CESA_COMMAND* pCmd,  int mode)
+{
+    mvOsPrintf("pCmd=%p, pReqPrv=%p, pSrc=%p, pDst=%p, pCB=%p, sid=%d\n",
+                pCmd, pCmd->pReqPrv, pCmd->pSrc, pCmd->pDst,
+                pCmd->pFuncCB, pCmd->sessionId);
+    mvOsPrintf("isUser=%d, ivOffs=%d, crOffs=%d, crLen=%d, digest=%d, macOffs=%d, macLen=%d\n",
+                pCmd->ivFromUser, pCmd->ivOffset, pCmd->cryptoOffset, pCmd->cryptoLength,
+                pCmd->digestOffset, pCmd->macOffset, pCmd->macLength);
+}
+
+/* no need to use in tool */
+void     mvCesaDebugMbuf(const char* str, MV_CESA_MBUF *pMbuf, int offset, int size)
+{
+    int frag, len, fragOffset;
+
+    if(str != NULL)
+        mvOsPrintf("%s: pMbuf=%p, numFrags=%d, mbufSize=%d\n",
+                    str, pMbuf, pMbuf->numFrags, pMbuf->mbufSize);
+
+    frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+    if(frag == MV_INVALID)
+    {
+        mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+        return;
+    }
+
+    for(; frag<pMbuf->numFrags; frag++)
+    {
+        mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+                    frag, pMbuf->pFrags[frag].bufVirtPtr,
+                    pMbuf->pFrags[frag].bufSize);
+        if(size > 0)
+        {
+            len = MV_MIN(pMbuf->pFrags[frag].bufSize, size);
+            mvDebugMemDump(pMbuf->pFrags[frag].bufVirtPtr+fragOffset, len, 1);
+            size -= len;
+            fragOffset = 0;
+        }
+    }
+}
+
+void    mvCesaDebugRegs(void)
+{
+    mvOsPrintf("\t CESA Registers:\n");
+
+    mvOsPrintf("MV_CESA_CMD_REG                     : 0x%X = 0x%08x\n",
+                MV_CESA_CMD_REG,
+                MV_REG_READ( MV_CESA_CMD_REG ) );
+
+    mvOsPrintf("MV_CESA_CHAN_DESC_OFFSET_REG        : 0x%X = 0x%08x\n",
+                MV_CESA_CHAN_DESC_OFFSET_REG,
+                MV_REG_READ(MV_CESA_CHAN_DESC_OFFSET_REG) );
+
+    mvOsPrintf("MV_CESA_CFG_REG                     : 0x%X = 0x%08x\n",
+                MV_CESA_CFG_REG,
+                MV_REG_READ( MV_CESA_CFG_REG ) );
+
+    mvOsPrintf("MV_CESA_STATUS_REG                  : 0x%X = 0x%08x\n",
+                MV_CESA_STATUS_REG,
+                MV_REG_READ( MV_CESA_STATUS_REG ) );
+
+    mvOsPrintf("MV_CESA_ISR_CAUSE_REG               : 0x%X = 0x%08x\n",
+                MV_CESA_ISR_CAUSE_REG,
+                MV_REG_READ( MV_CESA_ISR_CAUSE_REG ) );
+
+    mvOsPrintf("MV_CESA_ISR_MASK_REG                : 0x%X = 0x%08x\n",
+                MV_CESA_ISR_MASK_REG,
+                MV_REG_READ( MV_CESA_ISR_MASK_REG ) );
+#if (MV_CESA_VERSION >= 2)
+    mvOsPrintf("MV_CESA_TDMA_CTRL_REG               : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_CTRL_REG,
+                MV_REG_READ( MV_CESA_TDMA_CTRL_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_BYTE_COUNT_REG         : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_BYTE_COUNT_REG,
+                MV_REG_READ( MV_CESA_TDMA_BYTE_COUNT_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_SRC_ADDR_REG           : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_SRC_ADDR_REG,
+                MV_REG_READ( MV_CESA_TDMA_SRC_ADDR_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_DST_ADDR_REG           : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_DST_ADDR_REG,
+                MV_REG_READ( MV_CESA_TDMA_DST_ADDR_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_NEXT_DESC_PTR_REG      : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_NEXT_DESC_PTR_REG,
+                MV_REG_READ( MV_CESA_TDMA_NEXT_DESC_PTR_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_CURR_DESC_PTR_REG      : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_CURR_DESC_PTR_REG,
+                MV_REG_READ( MV_CESA_TDMA_CURR_DESC_PTR_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_ERROR_CAUSE_REG        : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_ERROR_CAUSE_REG,
+                MV_REG_READ( MV_CESA_TDMA_ERROR_CAUSE_REG ) );
+
+    mvOsPrintf("MV_CESA_TDMA_ERROR_MASK_REG         : 0x%X = 0x%08x\n",
+                MV_CESA_TDMA_ERROR_MASK_REG,
+                MV_REG_READ( MV_CESA_TDMA_ERROR_CAUSE_REG ) );
+
+#endif
+}
+
+void    mvCesaDebugStatus(void)
+{
+    mvOsPrintf("\n\t CESA Status\n\n");
+
+    mvOsPrintf("pReqQ=%p, qDepth=%d, reqSize=%ld bytes, qRes=%d, ",
+                pCesaReqFirst, cesaQueueDepth, sizeof(MV_CESA_REQ),
+                cesaReqResources);
+#if (MV_CESA_VERSION >= 3)
+    mvOsPrintf("chainLength=%u\n",cesaChainLength);
+#else
+   mvOsPrintf("\n");
+#endif
+
+    mvOsPrintf("pSAD=%p, maxSA=%d, sizeSA=%ld bytes\n",
+                pCesaSAD, cesaMaxSA, sizeof(MV_CESA_SA));
+
+    mvOsPrintf("\n");
+
+    mvCesaDebugRegs();
+    mvCesaDebugStats();
+    mvCesaDebugStatsClear();
+}
+
+void    mvCesaDebugDescriptor(MV_CESA_DESC* pDesc)
+{
+    mvOsPrintf("config=0x%08x, crSrcOffs=0x%04x, crDstOffs=0x%04x\n",
+            pDesc->config, pDesc->cryptoSrcOffset, pDesc->cryptoDstOffset);
+
+    mvOsPrintf("crLen=0x%04x, crKeyOffs=0x%04x, ivOffs=0x%04x, ivBufOffs=0x%04x\n",
+            pDesc->cryptoDataLen, pDesc->cryptoKeyOffset,
+            pDesc->cryptoIvOffset, pDesc->cryptoIvBufOffset);
+
+    mvOsPrintf("macSrc=0x%04x, digest=0x%04x, macLen=0x%04x, inIv=0x%04x, outIv=0x%04x\n",
+            pDesc->macSrcOffset, pDesc->macDigestOffset, pDesc->macDataLen,
+            pDesc->macInnerIvOffset, pDesc->macOuterIvOffset);
+}
+
+void    mvCesaDebugQueue(int mode)
+{
+    mvOsPrintf("\n\t CESA Request Queue:\n\n");
+
+    mvOsPrintf("pFirstReq=%p, pLastReq=%p, qDepth=%d, reqSize=%ld bytes\n",
+                pCesaReqFirst, pCesaReqLast, cesaQueueDepth, sizeof(MV_CESA_REQ));
+
+    mvOsPrintf("pEmpty=%p, pProcess=%p, qResources=%d\n",
+                pCesaReqEmpty, pCesaReqProcess,
+                cesaReqResources);
+
+    if(mode != 0)
+    {
+        int             count = 0;
+        MV_CESA_REQ*    pReq = pCesaReqFirst;
+
+        for(count=0; count<cesaQueueDepth; count++)
+        {
+            /* Print out requsts */
+            mvOsPrintf("%02d. pReq=%p, state=%s, frag=0x%x, pCmd=%p, pDma=%p, pDesc=%p\n",
+                count, pReq, mvCesaDebugStateStr(pReq->state),
+                pReq->fragMode, pReq->pCmd, pReq->dma[0].pDmaFirst, &pReq->pCesaDesc[0]);
+            if(pReq->fragMode != MV_CESA_FRAG_NONE)
+            {
+                int frag;
+
+                mvOsPrintf("pFrags=%p, num=%d, next=%d, bufOffset=%d, cryptoSize=%d, macSize=%d\n",
+                            &pReq->frags, pReq->frags.numFrag, pReq->frags.nextFrag,
+                            pReq->frags.bufOffset, pReq->frags.cryptoSize, pReq->frags.macSize);
+                for(frag=0; frag<pReq->frags.numFrag; frag++)
+                {
+                    mvOsPrintf("#%d: pDmaFirst=%p, pDesc=%p\n", frag,
+                                pReq->dma[frag].pDmaFirst, &pReq->pCesaDesc[frag]);
+                }
+            }
+            if(mode > 1)
+            {
+                /* Print out Command */
+                mvCesaDebugCmd(pReq->pCmd, mode);
+
+                /* Print out Descriptor */
+                mvCesaDebugDescriptor(&pReq->pCesaDesc[0]);
+            }
+            pReq++;
+        }
+    }
+}
+
+
+void    mvCesaDebugSramSA(MV_CESA_SRAM_SA* pSramSA, int mode)
+{
+    if(pSramSA == NULL)
+    {
+        mvOsPrintf("cesaSramSA: Unexpected pSramSA=%p\n", pSramSA);
+        return;
+    }
+    mvOsPrintf("pSramSA=%p, sizeSramSA=%ld bytes\n",
+                pSramSA, sizeof(MV_CESA_SRAM_SA));
+
+    if(mode != 0)
+    {
+        mvOsPrintf("cryptoKey=%p, maxCryptoKey=%d bytes\n",
+                    pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH);
+        mvDebugMemDump(pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH, 1);
+
+        mvOsPrintf("macInnerIV=%p, maxInnerIV=%d bytes\n",
+                    pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE);
+        mvDebugMemDump(pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+
+        mvOsPrintf("macOuterIV=%p, maxOuterIV=%d bytes\n",
+                    pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE);
+        mvDebugMemDump(pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+    }
+}
+
+void    mvCesaDebugSA(short sid, int mode)
+{
+    MV_CESA_OPERATION   oper;
+    MV_CESA_DIRECTION   dir;
+    MV_CESA_CRYPTO_ALG  cryptoAlg;
+    MV_CESA_CRYPTO_MODE cryptoMode;
+    MV_CESA_MAC_MODE    macMode;
+    MV_CESA_SA*         pSA = &pCesaSAD[sid];
+
+    if( (pSA->valid) || ((pSA->count != 0) && (mode > 0)) || (mode >= 2) )
+    {
+        mvOsPrintf("\n\nCESA SA Entry #%d (%p) - %s (count=%d)\n",
+                    sid, pSA,
+                    pSA->valid ? "Valid" : "Invalid", pSA->count);
+
+        oper = (pSA->config & MV_CESA_OPERATION_MASK) >> MV_CESA_OPERATION_OFFSET;
+        dir  = (pSA->config & MV_CESA_DIRECTION_MASK) >> MV_CESA_DIRECTION_BIT;
+        mvOsPrintf("%s - %s ", mvCesaDebugOperStr(oper),
+                    (dir == MV_CESA_DIR_ENCODE) ? "Encode" : "Decode");
+        if(oper != MV_CESA_MAC_ONLY)
+        {
+            cryptoAlg = (pSA->config & MV_CESA_CRYPTO_ALG_MASK) >> MV_CESA_CRYPTO_ALG_OFFSET;
+            cryptoMode = (pSA->config & MV_CESA_CRYPTO_MODE_MASK) >> MV_CESA_CRYPTO_MODE_BIT;
+            mvOsPrintf("- %s - %s ", mvCesaDebugCryptoAlgStr(cryptoAlg),
+                        (cryptoMode == MV_CESA_CRYPTO_ECB) ? "ECB" : "CBC");
+        }
+        if(oper != MV_CESA_CRYPTO_ONLY)
+        {
+            macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+            mvOsPrintf("- %s ", mvCesaDebugMacModeStr(macMode));
+        }
+        mvOsPrintf("\n");
+
+        if(mode > 0)
+        {
+            mvOsPrintf("config=0x%08x, cryptoKeySize=%d, digestSize=%d\n",
+                        pCesaSAD[sid].config, pCesaSAD[sid].cryptoKeyLength,
+                        pCesaSAD[sid].digestSize);
+
+            mvCesaDebugSramSA(pCesaSAD[sid].pSramSA, mode);
+        }
+    }
+}
+
+
+/**/
+void    mvCesaDebugSram(int mode)
+{
+    mvOsPrintf("\n\t SRAM contents: size=%ld, pVirt=%p\n\n",
+            sizeof(MV_CESA_SRAM_MAP), cesaSramVirtPtr);
+
+    mvOsPrintf("\n\t Sram buffer: size=%d, pVirt=%p\n",
+                    MV_CESA_MAX_BUF_SIZE, cesaSramVirtPtr->buf);
+        if(mode != 0)
+            mvDebugMemDump(cesaSramVirtPtr->buf, 64, 1);
+
+    mvOsPrintf("\n");
+    mvOsPrintf("\n\t Sram descriptor: size=%ld, pVirt=%p\n",
+                    sizeof(MV_CESA_DESC), &cesaSramVirtPtr->desc);
+    if(mode != 0)
+    {
+        mvOsPrintf("\n");
+        mvCesaDebugDescriptor(&cesaSramVirtPtr->desc);
+    }
+    mvOsPrintf("\n\t Sram IV: size=%d, pVirt=%p\n",
+                    MV_CESA_MAX_IV_LENGTH, &cesaSramVirtPtr->cryptoIV);
+    if(mode != 0)
+    {
+        mvOsPrintf("\n");
+        mvDebugMemDump(cesaSramVirtPtr->cryptoIV, MV_CESA_MAX_IV_LENGTH, 1);
+    }
+    mvOsPrintf("\n");
+    mvCesaDebugSramSA(&cesaSramVirtPtr->sramSA, 0);
+}
+
+void    mvCesaDebugSAD(int mode)
+{
+    int sid;
+
+    mvOsPrintf("\n\t Cesa SAD status: pSAD=%p, maxSA=%d\n",
+                pCesaSAD, cesaMaxSA);
+
+    for(sid=0; sid<cesaMaxSA; sid++)
+    {
+        mvCesaDebugSA(sid, mode);
+    }
+}
+
+void    mvCesaDebugStats(void)
+{
+    mvOsPrintf("\n\t Cesa Statistics\n");
+
+    mvOsPrintf("Opened=%u, Closed=%u\n",
+                cesaStats.openedCount, cesaStats.closedCount);
+    mvOsPrintf("Req=%u, maxReq=%u, frags=%u, start=%u\n",
+                cesaStats.reqCount, cesaStats.maxReqCount,
+                cesaStats.fragCount, cesaStats.startCount);
+#if (MV_CESA_VERSION >= 3)
+    mvOsPrintf("maxChainUsage=%u\n",cesaStats.maxChainUsage);
+#endif
+    mvOsPrintf("\n");
+    mvOsPrintf("proc=%u, ready=%u, notReady=%u\n",
+                cesaStats.procCount, cesaStats.readyCount, cesaStats.notReadyCount);
+}
+
+void    mvCesaDebugStatsClear(void)
+{
+    memset(&cesaStats, 0, sizeof(cesaStats));
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvCesaRegs.h b/crypto/ocf/kirkwood/cesa/mvCesaRegs.h
new file mode 100644
index 000000000000..340e407b7e1c
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvCesaRegs.h
@@ -0,0 +1,356 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvCesaRegs_h__
+#define __mvCesaRegs_h__
+
+#include "mvTypes.h"
+
+typedef struct
+{
+    /* word 0 */
+    MV_U32  config;
+    /* word 1 */
+    MV_U16  cryptoSrcOffset;
+    MV_U16  cryptoDstOffset;
+    /* word 2 */
+    MV_U16  cryptoDataLen;
+    MV_U16  reserved1;
+    /* word 3 */
+    MV_U16  cryptoKeyOffset;
+    MV_U16  reserved2;
+    /* word 4 */
+    MV_U16  cryptoIvOffset;
+    MV_U16  cryptoIvBufOffset;
+    /* word 5 */
+    MV_U16  macSrcOffset;
+    MV_U16  macTotalLen;
+    /* word 6 */
+    MV_U16  macDigestOffset;
+    MV_U16  macDataLen;
+    /* word 7 */
+    MV_U16  macInnerIvOffset;
+    MV_U16  macOuterIvOffset;
+
+} MV_CESA_DESC;
+
+/* operation */
+typedef enum
+{
+    MV_CESA_MAC_ONLY         = 0,
+    MV_CESA_CRYPTO_ONLY      = 1,
+    MV_CESA_MAC_THEN_CRYPTO  = 2,
+    MV_CESA_CRYPTO_THEN_MAC  = 3,
+
+    MV_CESA_MAX_OPERATION
+
+} MV_CESA_OPERATION;
+
+#define MV_CESA_OPERATION_OFFSET        0
+#define MV_CESA_OPERATION_MASK          (0x3 << MV_CESA_OPERATION_OFFSET)
+
+/* mac algorithm */
+typedef enum
+{
+    MV_CESA_MAC_NULL        = 0,
+    MV_CESA_MAC_MD5         = 4,
+    MV_CESA_MAC_SHA1        = 5,
+    MV_CESA_MAC_HMAC_MD5    = 6,
+    MV_CESA_MAC_HMAC_SHA1   = 7,
+
+} MV_CESA_MAC_MODE;
+
+#define MV_CESA_MAC_MODE_OFFSET         4
+#define MV_CESA_MAC_MODE_MASK           (0x7 << MV_CESA_MAC_MODE_OFFSET)
+
+typedef enum
+{
+    MV_CESA_MAC_DIGEST_FULL = 0,
+    MV_CESA_MAC_DIGEST_96B  = 1,
+
+} MV_CESA_MAC_DIGEST_SIZE;
+
+#define MV_CESA_MAC_DIGEST_SIZE_BIT     7
+#define MV_CESA_MAC_DIGEST_SIZE_MASK    (1 << MV_CESA_MAC_DIGEST_SIZE_BIT)
+
+
+typedef enum
+{
+    MV_CESA_CRYPTO_NULL = 0,
+    MV_CESA_CRYPTO_DES  = 1,
+    MV_CESA_CRYPTO_3DES = 2,
+    MV_CESA_CRYPTO_AES  = 3,
+
+} MV_CESA_CRYPTO_ALG;
+
+#define MV_CESA_CRYPTO_ALG_OFFSET       8
+#define MV_CESA_CRYPTO_ALG_MASK         (0x3 << MV_CESA_CRYPTO_ALG_OFFSET)
+
+
+/* direction */
+typedef enum
+{
+    MV_CESA_DIR_ENCODE = 0,
+    MV_CESA_DIR_DECODE = 1,
+
+} MV_CESA_DIRECTION;
+
+#define MV_CESA_DIRECTION_BIT           12
+#define MV_CESA_DIRECTION_MASK          (1 << MV_CESA_DIRECTION_BIT)
+
+/* crypto IV mode */
+typedef enum
+{
+    MV_CESA_CRYPTO_ECB = 0,
+    MV_CESA_CRYPTO_CBC = 1,
+
+    /* NO HW Support */
+    MV_CESA_CRYPTO_CTR = 10,
+
+} MV_CESA_CRYPTO_MODE;
+
+#define MV_CESA_CRYPTO_MODE_BIT         16
+#define MV_CESA_CRYPTO_MODE_MASK        (1 << MV_CESA_CRYPTO_MODE_BIT)
+
+/* 3DES mode */
+typedef enum
+{
+    MV_CESA_CRYPTO_3DES_EEE = 0,
+    MV_CESA_CRYPTO_3DES_EDE = 1,
+
+} MV_CESA_CRYPTO_3DES_MODE;
+
+#define MV_CESA_CRYPTO_3DES_MODE_BIT    20
+#define MV_CESA_CRYPTO_3DES_MODE_MASK   (1 << MV_CESA_CRYPTO_3DES_MODE_BIT)
+
+
+/* AES Key Length */
+typedef enum
+{
+    MV_CESA_CRYPTO_AES_KEY_128 = 0,
+    MV_CESA_CRYPTO_AES_KEY_192 = 1,
+    MV_CESA_CRYPTO_AES_KEY_256 = 2,
+
+} MV_CESA_CRYPTO_AES_KEY_LEN;
+
+#define MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET   24
+#define MV_CESA_CRYPTO_AES_KEY_LEN_MASK     (0x3 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET)
+
+/* Fragmentation mode */
+typedef enum
+{
+    MV_CESA_FRAG_NONE   = 0,
+    MV_CESA_FRAG_FIRST  = 1,
+    MV_CESA_FRAG_LAST   = 2,
+    MV_CESA_FRAG_MIDDLE = 3,
+
+} MV_CESA_FRAG_MODE;
+
+#define MV_CESA_FRAG_MODE_OFFSET            30
+#define MV_CESA_FRAG_MODE_MASK              (0x3 << MV_CESA_FRAG_MODE_OFFSET)
+/*---------------------------------------------------------------------------*/
+
+/********** Security Accelerator Command Register **************/
+#define MV_CESA_CMD_REG                     (MV_CESA_REG_BASE + 0xE00)
+
+#define MV_CESA_CMD_CHAN_ENABLE_BIT         0
+#define MV_CESA_CMD_CHAN_ENABLE_MASK        (1 << MV_CESA_CMD_CHAN_ENABLE_BIT)
+
+#define MV_CESA_CMD_CHAN_DISABLE_BIT        2
+#define MV_CESA_CMD_CHAN_DISABLE_MASK       (1 << MV_CESA_CMD_CHAN_DISABLE_BIT)
+
+/********** Security Accelerator Descriptor Pointers Register **********/
+#define MV_CESA_CHAN_DESC_OFFSET_REG        (MV_CESA_REG_BASE + 0xE04)
+
+/********** Security Accelerator Configuration Register **********/
+#define MV_CESA_CFG_REG                     (MV_CESA_REG_BASE + 0xE08)
+
+#define MV_CESA_CFG_STOP_DIGEST_ERR_BIT     0
+#define MV_CESA_CFG_STOP_DIGEST_ERR_MASK    (1 << MV_CESA_CFG_STOP_DIGEST_ERR_BIT)
+
+#define MV_CESA_CFG_WAIT_DMA_BIT            7
+#define MV_CESA_CFG_WAIT_DMA_MASK           (1 << MV_CESA_CFG_WAIT_DMA_BIT)
+
+#define MV_CESA_CFG_ACT_DMA_BIT             9
+#define MV_CESA_CFG_ACT_DMA_MASK            (1 << MV_CESA_CFG_ACT_DMA_BIT)
+
+#define MV_CESA_CFG_CHAIN_MODE_BIT          11
+#define MV_CESA_CFG_CHAIN_MODE_MASK         (1 << MV_CESA_CFG_CHAIN_MODE_BIT)
+
+/********** Security Accelerator Status Register ***********/
+#define MV_CESA_STATUS_REG                  (MV_CESA_REG_BASE + 0xE0C)
+
+#define MV_CESA_STATUS_ACTIVE_BIT           0
+#define MV_CESA_STATUS_ACTIVE_MASK          (1 << MV_CESA_STATUS_ACTIVE_BIT)
+
+#define MV_CESA_STATUS_DIGEST_ERR_BIT       8
+#define MV_CESA_STATUS_DIGEST_ERR_MASK      (1 << MV_CESA_STATUS_DIGEST_ERR_BIT)
+
+
+/* Cryptographic Engines and Security Accelerator Interrupt Cause Register */
+#define MV_CESA_ISR_CAUSE_REG               (MV_CESA_REG_BASE + 0xE20)
+
+/* Cryptographic Engines and Security Accelerator Interrupt Mask Register */
+#define MV_CESA_ISR_MASK_REG                (MV_CESA_REG_BASE + 0xE24)
+
+#define MV_CESA_CAUSE_AUTH_MASK             (1 << 0)
+#define MV_CESA_CAUSE_DES_MASK              (1 << 1)
+#define MV_CESA_CAUSE_AES_ENCR_MASK         (1 << 2)
+#define MV_CESA_CAUSE_AES_DECR_MASK         (1 << 3)
+#define MV_CESA_CAUSE_DES_ALL_MASK          (1 << 4)
+
+#define MV_CESA_CAUSE_ACC_BIT               5
+#define MV_CESA_CAUSE_ACC_MASK              (1 << MV_CESA_CAUSE_ACC_BIT)
+
+#define MV_CESA_CAUSE_ACC_DMA_BIT           7
+#define MV_CESA_CAUSE_ACC_DMA_MASK          (1 << MV_CESA_CAUSE_ACC_DMA_BIT)
+#define MV_CESA_CAUSE_ACC_DMA_ALL_MASK      (3 << MV_CESA_CAUSE_ACC_DMA_BIT)
+
+#define MV_CESA_CAUSE_DMA_COMPL_BIT         9
+#define MV_CESA_CAUSE_DMA_COMPL_MASK        (1 << MV_CESA_CAUSE_DMA_COMPL_BIT)
+
+#define MV_CESA_CAUSE_DMA_OWN_ERR_BIT       10
+#define MV_CESA_CAUSE_DMA_OWN_ERR_MASK      (1 < MV_CESA_CAUSE_DMA_OWN_ERR_BIT)
+
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT     11
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_MASK    (1 < MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT)
+
+
+#define MV_CESA_AUTH_DATA_IN_REG            (MV_CESA_REG_BASE + 0xd38)
+#define MV_CESA_AUTH_BIT_COUNT_LOW_REG      (MV_CESA_REG_BASE + 0xd20)
+#define MV_CESA_AUTH_BIT_COUNT_HIGH_REG     (MV_CESA_REG_BASE + 0xd24)
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_REG(i) (MV_CESA_REG_BASE + 0xd00 + (i<<2))
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_A_REG  (MV_CESA_REG_BASE + 0xd00)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_B_REG  (MV_CESA_REG_BASE + 0xd04)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_C_REG  (MV_CESA_REG_BASE + 0xd08)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_D_REG  (MV_CESA_REG_BASE + 0xd0c)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_E_REG  (MV_CESA_REG_BASE + 0xd10)
+#define MV_CESA_AUTH_COMMAND_REG            (MV_CESA_REG_BASE + 0xd18)
+
+#define MV_CESA_AUTH_ALGORITHM_BIT          0
+#define MV_CESA_AUTH_ALGORITHM_MD5          (0<<AUTH_ALGORITHM_BIT)
+#define MV_CESA_AUTH_ALGORITHM_SHA1         (1<<AUTH_ALGORITHM_BIT)
+
+#define MV_CESA_AUTH_IV_MODE_BIT            1
+#define MV_CESA_AUTH_IV_MODE_INIT           (0<<AUTH_IV_MODE_BIT)
+#define MV_CESA_AUTH_IV_MODE_CONTINUE       (1<<AUTH_IV_MODE_BIT)
+
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_BIT     2
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_MASK    (1<<AUTH_DATA_BYTE_SWAP_BIT)
+
+
+#define MV_CESA_AUTH_IV_BYTE_SWAP_BIT       4
+#define MV_CESA_AUTH_IV_BYTE_SWAP_MASK      (1<<AUTH_IV_BYTE_SWAP_BIT)
+
+#define MV_CESA_AUTH_TERMINATION_BIT        31
+#define MV_CESA_AUTH_TERMINATION_MASK       (1<<AUTH_TERMINATION_BIT)
+
+
+/*************** TDMA Control Register ************************************************/
+#define MV_CESA_TDMA_CTRL_REG               (MV_CESA_TDMA_REG_BASE + 0x840)
+
+#define MV_CESA_TDMA_BURST_32B              3
+#define MV_CESA_TDMA_BURST_128B             4
+
+#define MV_CESA_TDMA_DST_BURST_OFFSET       0
+#define MV_CESA_TDMA_DST_BURST_ALL_MASK     (0x7<<MV_CESA_TDMA_DST_BURST_OFFSET)
+#define MV_CESA_TDMA_DST_BURST_MASK(burst)  ((burst)<<MV_CESA_TDMA_DST_BURST_OFFSET)
+
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_BIT   4
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_MASK  (1<<MV_CESA_TDMA_OUTSTAND_READ_EN_BIT)
+
+#define MV_CESA_TDMA_SRC_BURST_OFFSET       6
+#define MV_CESA_TDMA_SRC_BURST_ALL_MASK     (0x7<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+#define MV_CESA_TDMA_SRC_BURST_MASK(burst)  ((burst)<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+
+#define MV_CESA_TDMA_CHAIN_MODE_BIT         9
+#define MV_CESA_TDMA_NON_CHAIN_MODE_MASK    (1<<MV_CESA_TDMA_CHAIN_MODE_BIT)
+
+#define MV_CESA_TDMA_BYTE_SWAP_BIT	    11
+#define MV_CESA_TDMA_BYTE_SWAP_MASK	    (0 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+#define MV_CESA_TDMA_NO_BYTE_SWAP_MASK	    (1 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+
+#define MV_CESA_TDMA_ENABLE_BIT		    12
+#define MV_CESA_TDMA_ENABLE_MASK            (1<<MV_CESA_TDMA_ENABLE_BIT)
+
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_BIT    13
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_MASK   (1<<MV_CESA_TDMA_FETCH_NEXT_DESC_BIT)
+
+#define MV_CESA_TDMA_CHAN_ACTIVE_BIT	    14
+#define MV_CESA_TDMA_CHAN_ACTIVE_MASK       (1<<MV_CESA_TDMA_CHAN_ACTIVE_BIT)
+/*------------------------------------------------------------------------------------*/
+
+#define MV_CESA_TDMA_BYTE_COUNT_REG         (MV_CESA_TDMA_REG_BASE + 0x800)
+#define MV_CESA_TDMA_SRC_ADDR_REG           (MV_CESA_TDMA_REG_BASE + 0x810)
+#define MV_CESA_TDMA_DST_ADDR_REG           (MV_CESA_TDMA_REG_BASE + 0x820)
+#define MV_CESA_TDMA_NEXT_DESC_PTR_REG      (MV_CESA_TDMA_REG_BASE + 0x830)
+#define MV_CESA_TDMA_CURR_DESC_PTR_REG      (MV_CESA_TDMA_REG_BASE + 0x870)
+
+#define MV_CESA_TDMA_ERROR_CAUSE_REG        (MV_CESA_TDMA_REG_BASE + 0x8C0)
+#define MV_CESA_TDMA_ERROR_MASK_REG         (MV_CESA_TDMA_REG_BASE + 0x8C4)
+
+
+#endif /* __mvCesaRegs_h__ */
diff --git a/crypto/ocf/kirkwood/cesa/mvCesaTest.c b/crypto/ocf/kirkwood/cesa/mvCesaTest.c
new file mode 100644
index 000000000000..50f8bc9581c9
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvCesaTest.c
@@ -0,0 +1,3096 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+
+#if defined(MV_VXWORKS)
+
+#include "sysLib.h"
+#include "logLib.h"
+#include "tickLib.h"
+#include "intLib.h"
+#include "config.h"
+
+
+SEM_ID      cesaSemId = NULL;
+SEM_ID      cesaWaitSemId = NULL;
+
+#define CESA_TEST_LOCK(flags)       flags = intLock()
+#define CESA_TEST_UNLOCK(flags)     intUnlock(flags)
+
+#define CESA_TEST_WAIT_INIT()       cesaWaitSemId = semBCreate(SEM_Q_PRIORITY, SEM_EMPTY)
+#define CESA_TEST_WAKE_UP()         semGive(cesaWaitSemId)
+#define CESA_TEST_WAIT(cond, ms)    semTake(cesaWaitSemId, (sysClkRateGet()*ms)/1000)
+
+#define CESA_TEST_TICK_GET()        tickGet()
+#define CESA_TEST_TICK_TO_MS(tick)  (((tick)*1000)/sysClkRateGet())
+
+#elif defined(MV_LINUX)
+
+#include <linux/wait.h>
+wait_queue_head_t   cesaTest_waitq;
+spinlock_t          cesaLock;
+
+#define CESA_TEST_LOCK(flags)       spin_lock_irqsave( &cesaLock, flags)
+#define CESA_TEST_UNLOCK(flags)     spin_unlock_irqrestore( &cesaLock, flags);
+
+#define CESA_TEST_WAIT_INIT()       init_waitqueue_head(&cesaTest_waitq)
+#define CESA_TEST_WAKE_UP()         wake_up(&cesaTest_waitq)
+#define CESA_TEST_WAIT(cond, ms)    wait_event_timeout(cesaTest_waitq, (cond), msecs_to_jiffies(ms))
+
+#define CESA_TEST_TICK_GET()        jiffies
+#define CESA_TEST_TICK_TO_MS(tick)  jiffies_to_msecs(tick)
+
+#elif defined(MV_NETBSD)
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+static int	cesaLock;
+
+#define	CESA_TEST_LOCK(flags)		flags = splnet()
+#define	CESA_TEST_UNLOCK(flags)		splx(flags)
+
+#define	CESA_TEST_WAIT_INIT()		/* nothing */
+#define	CESA_TEST_WAKE_UP()		wakeup(&cesaLock)
+#define	CESA_TEST_WAIT(cond, ms)	\
+do {					\
+	while (!(cond))			\
+		tsleep(&cesaLock, PWAIT, "cesatest",mstohz(ms)); \
+} while (/*CONSTCOND*/0)
+
+#define	CESA_TEST_TICK_GET()		hardclock_ticks
+#define	CESA_TEST_TICK_TO_MS(tick)	((1000/hz)*(tick))
+
+#define	request_irq(i,h,t,n,a)	\
+	!mv_intr_establish((i),IPL_NET,(int(*)(void *))(h),(a))
+
+#else
+#error "Only Linux, VxWorks, or NetBSD OS are supported"
+#endif
+
+#include "mvDebug.h"
+
+#include "mvSysHwConfig.h"
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "cntmr/mvCntmr.h"
+#include "cesa/mvCesa.h"
+#include "cesa/mvCesaRegs.h"
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#if defined(CONFIG_MV646xx)
+#include "marvell_pic.h"
+#endif
+
+#define MV_CESA_USE_TIMER_ID    0
+#define CESA_DEF_BUF_SIZE       1500
+#define CESA_DEF_BUF_NUM        1
+#define CESA_DEF_SESSION_NUM    32
+
+#define CESA_DEF_ITER_NUM       100
+
+#define CESA_DEF_REQ_SIZE       256
+
+
+/* CESA Tests Debug */
+#undef CESA_TEST_DEBUG
+
+#ifdef CESA_TEST_DEBUG
+
+#   define CESA_TEST_DEBUG_PRINT(msg)   mvOsPrintf msg
+#   define CESA_TEST_DEBUG_CODE(code)   code
+
+typedef struct
+{
+    int             type;       /* 0 - isrEmpty, 1 - cesaReadyGet, 2 - cesaAction */
+    MV_U32          timeStamp;
+    MV_U32          cause;
+    MV_U32          realCause;
+    MV_U32          dmaCause;
+    int             resources;
+    MV_CESA_REQ*    pReqReady;
+    MV_CESA_REQ*    pReqEmpty;
+    MV_CESA_REQ*    pReqProcess;
+} MV_CESA_TEST_TRACE;
+
+#define MV_CESA_TEST_TRACE_SIZE      25
+
+static int cesaTestTraceIdx = 0;
+static MV_CESA_TEST_TRACE    cesaTestTrace[MV_CESA_TEST_TRACE_SIZE];
+
+static void cesaTestTraceAdd(int type, MV_U32 cause)
+{
+    cesaTestTrace[cesaTestTraceIdx].type = type;
+    cesaTestTrace[cesaTestTraceIdx].cause = cause;
+    cesaTestTrace[cesaTestTraceIdx].realCause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+    cesaTestTrace[cesaTestTraceIdx].dmaCause = MV_REG_READ(IDMA_CAUSE_REG);
+    cesaTestTrace[cesaTestTraceIdx].resources = cesaReqResources;
+    cesaTestTrace[cesaTestTraceIdx].pReqReady = pCesaReqReady;
+    cesaTestTrace[cesaTestTraceIdx].pReqEmpty = pCesaReqEmpty;
+    cesaTestTrace[cesaTestTraceIdx].pReqProcess = pCesaReqProcess;
+    cesaTestTrace[cesaTestTraceIdx].timeStamp = mvCntmrRead(MV_CESA_USE_TIMER_ID);
+    cesaTestTraceIdx++;
+    if(cesaTestTraceIdx == MV_CESA_TEST_TRACE_SIZE)
+        cesaTestTraceIdx = 0;
+}
+
+#else
+
+#   define CESA_TEST_DEBUG_PRINT(msg)
+#   define CESA_TEST_DEBUG_CODE(code)
+
+#endif /* CESA_TEST_DEBUG */
+
+int                 cesaExpReqId=0;
+int                 cesaCbIter=0;
+
+int                 cesaIdx;
+int                 cesaIteration;
+int                 cesaRateSize;
+int                 cesaReqSize;
+unsigned long       cesaTaskId;
+int                 cesaBufNum;
+int                 cesaBufSize;
+int                 cesaCheckOffset;
+int                 cesaCheckSize;
+int                 cesaCheckMode;
+int                 cesaTestIdx;
+int                 cesaCaseIdx;
+
+
+MV_U32      cesaTestIsrCount = 0;
+MV_U32      cesaTestIsrMissCount = 0;
+
+MV_U32      cesaCryptoError = 0;
+MV_U32      cesaReqIdError  = 0;
+MV_U32      cesaError = 0;
+
+char*       cesaHexBuffer = NULL;
+
+char*       cesaBinBuffer = NULL;
+char*       cesaExpBinBuffer = NULL;
+
+char*       cesaInputHexStr  = NULL;
+char*       cesaOutputHexStr = NULL;
+
+MV_BUF_INFO         cesaReqBufs[CESA_DEF_REQ_SIZE];
+
+MV_CESA_COMMAND*    cesaCmdRing;
+MV_CESA_RESULT      cesaResult;
+
+int                 cesaTestFull = 0;
+
+MV_BOOL             cesaIsReady = MV_FALSE;
+MV_U32              cesaCycles = 0;
+MV_U32              cesaBeginTicks = 0;
+MV_U32              cesaEndTicks = 0;
+MV_U32              cesaRate = 0;
+MV_U32              cesaRateAfterDot = 0;
+
+void 		    *cesaTestOSHandle = NULL;
+
+enum
+{
+    CESA_FAST_CHECK_MODE = 0,
+    CESA_FULL_CHECK_MODE,
+    CESA_NULL_CHECK_MODE,
+    CESA_SHOW_CHECK_MODE,
+    CESA_SW_SHOW_CHECK_MODE,
+    CESA_SW_NULL_CHECK_MODE,
+
+    CESA_MAX_CHECK_MODE
+};
+
+enum
+{
+    DES_TEST_TYPE         = 0,
+    TRIPLE_DES_TEST_TYPE  = 1,
+    AES_TEST_TYPE         = 2,
+    MD5_TEST_TYPE         = 3,
+    SHA_TEST_TYPE         = 4,
+    COMBINED_TEST_TYPE    = 5,
+
+    MAX_TEST_TYPE
+};
+
+/* Tests data base */
+typedef struct
+{
+    short           sid;
+    char            cryptoAlgorithm;    /* DES/3DES/AES */
+    char            cryptoMode;         /* ECB or CBC */
+    char            macAlgorithm;       /* MD5 / SHA1 */
+    char            operation;          /* CRYPTO/HMAC/CRYPTO+HMAC/HMAC+CRYPTO */
+    char            direction;          /* ENCODE(SIGN)/DECODE(VERIFY) */
+    unsigned char*  pCryptoKey;
+    int             cryptoKeySize;
+    unsigned char*  pMacKey;
+    int             macKeySize;
+    const char*     name;
+
+} MV_CESA_TEST_SESSION;
+
+typedef struct
+{
+    MV_CESA_TEST_SESSION*   pSessions;
+    int                     numSessions;
+
+} MV_CESA_TEST_DB_ENTRY;
+
+typedef struct
+{
+    char*           plainHexStr;
+    char*           cipherHexStr;
+    unsigned char*  pCryptoIV;
+    int             cryptoLength;
+    int             macLength;
+    int             digestOffset;
+
+} MV_CESA_TEST_CASE;
+
+typedef struct
+{
+    int     size;
+    const char* outputHexStr;
+
+} MV_CESA_SIZE_TEST;
+
+static unsigned char    cryptoKey1[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+                                        0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+                                        0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
+
+static unsigned char    cryptoKey7[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
+static unsigned char    iv1[]        = {0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef};
+
+
+static unsigned char    cryptoKey2[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
+
+static unsigned char    cryptoKey3[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+                                        0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17};
+
+static unsigned char    cryptoKey4[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+                                        0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+                                        0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+                                        0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f};
+
+static unsigned char    cryptoKey5[] = {0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
+                                        0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49};
+
+
+static unsigned char    key3des1[]   = {0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
+                                        0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01,
+                                        0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23};
+
+/*  Input ASCII string: The quick brown fox jump  */
+static char  plain3des1[]           =   "54686520717566636B2062726F776E20666F78206A756D70";
+static char  cipher3des1[]          =   "A826FD8CE53B855FCCE21C8112256FE668D5C05DD9B6B900";
+
+static unsigned char    key3des2[]  = {0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10,
+                                       0x43, 0xcd, 0x26, 0x5d, 0x58, 0x40, 0xea, 0xf1,
+                                       0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c};
+
+static unsigned char    iv3des2[]   = {0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75};
+
+static char  plain3des2[]           = "326a494cd33fe756";
+
+static char  cipher3desCbc2[]       = "8e29f75ea77e5475"
+                                      "b22b8d66de970692";
+
+static unsigned char    key3des3[]  = {0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc,
+                                       0x07, 0x54, 0xb9, 0x4f, 0x31, 0xcb, 0xb3, 0x85,
+                                       0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae};
+
+static unsigned char    iv3des3[]   = {0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65};
+
+static char  plain3des3[]           = "84401f78fe6c10876d8ea23094ea5309";
+
+static char  cipher3desCbc3[]       = "3d1de3cc132e3b65"
+                                      "7b1f7c7e3b1c948ebd04a75ffba7d2f5";
+
+static unsigned char    iv5[]        = {0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
+                                        0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9};
+
+static unsigned char    aesCtrKey[]  = {0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8,
+                                        0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0, 0xDC};
+
+static unsigned char    mdKey1[]     = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+                                        0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b};
+
+static unsigned char    mdKey2[]     = {0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+                                        0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa};
+
+static unsigned char    shaKey1[]    = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+                                        0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+                                        0x0b, 0x0b, 0x0b, 0x0b};
+
+static unsigned char    shaKey2[]    = {0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+                                        0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+                                        0xaa, 0xaa, 0xaa, 0xaa};
+
+static unsigned char    mdKey4[]     = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                                        0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10};
+
+static unsigned char    shaKey4[]    = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+                                        0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+                                        0x11, 0x12, 0x13, 0x14};
+
+
+static MV_CESA_TEST_SESSION   desTestSessions[] =
+{
+/*000*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+             NULL, 0,
+             "DES ECB encode",
+        },
+/*001*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+             NULL, 0,
+             "DES ECB decode",
+        },
+/*002*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+             NULL, 0,
+             "DES CBC encode"
+        },
+/*003*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey7, sizeof(cryptoKey7)/sizeof(cryptoKey7[0]),
+             NULL, 0,
+             "DES CBC decode"
+        },
+/*004*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0, NULL, 0,
+              "NULL Crypto Algorithm encode"
+        },
+};
+
+
+static MV_CESA_TEST_SESSION   tripleDesTestSessions[] =
+{
+/*100*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             NULL, 0,
+             "3DES ECB encode",
+        },
+/*101*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             NULL, 0,
+             "3DES ECB decode",
+        },
+/*102*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             NULL, 0,
+             "3DES CBC encode"
+        },
+/*103*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             NULL, 0,
+             "3DES CBC decode"
+        },
+/*104*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             key3des1, sizeof(key3des1),
+             NULL, 0,
+             "3DES ECB encode"
+        },
+/*105*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             key3des2, sizeof(key3des2),
+             NULL, 0,
+             "3DES ECB encode"
+        },
+/*106*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             key3des3, sizeof(key3des3),
+             NULL, 0,
+             "3DES ECB encode"
+        },
+};
+
+
+static MV_CESA_TEST_SESSION   aesTestSessions[] =
+{
+/*200*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey2, sizeof(cryptoKey2)/sizeof(cryptoKey2[0]),
+             NULL, 0,
+             "AES-128 ECB encode"
+        },
+/*201*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey2, sizeof(cryptoKey2)/sizeof(cryptoKey2[0]),
+             NULL, 0,
+             "AES-128 ECB decode"
+        },
+/*202*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+             NULL, 0,
+             "AES-128 CBC encode"
+        },
+/*203*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+             NULL, 0,
+             "AES-128 CBC decode"
+        },
+/*204*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey3, sizeof(cryptoKey3)/sizeof(cryptoKey3[0]),
+             NULL, 0,
+             "AES-192 ECB encode"
+        },
+/*205*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey3, sizeof(cryptoKey3)/sizeof(cryptoKey3[0]),
+             NULL, 0,
+             "AES-192 ECB decode"
+        },
+/*206*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey4, sizeof(cryptoKey4)/sizeof(cryptoKey4[0]),
+             NULL, 0,
+             "AES-256 ECB encode"
+        },
+/*207*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_DECODE,
+             cryptoKey4, sizeof(cryptoKey4)/sizeof(cryptoKey4[0]),
+             NULL, 0,
+             "AES-256 ECB decode"
+        },
+/*208*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CTR,
+             MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+             MV_CESA_DIR_ENCODE,
+             aesCtrKey, sizeof(aesCtrKey)/sizeof(aesCtrKey[0]),
+             NULL, 0,
+             "AES-128 CTR encode"
+        },
+};
+
+
+static MV_CESA_TEST_SESSION   md5TestSessions[] =
+{
+/*300*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             mdKey1, sizeof(mdKey1),
+             "HMAC-MD5 Generate Signature"
+        },
+/*301*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_DECODE,
+             NULL, 0,
+             mdKey1, sizeof(mdKey1),
+             "HMAC-MD5 Verify Signature"
+        },
+/*302*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             mdKey2, sizeof(mdKey2),
+             "HMAC-MD5 Generate Signature"
+        },
+/*303*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_DECODE,
+             NULL, 0,
+             mdKey2, sizeof(mdKey2),
+             "HMAC-MD5 Verify Signature"
+        },
+/*304*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             mdKey4, sizeof(mdKey4),
+             "HMAC-MD5 Generate Signature"
+        },
+/*305*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_MD5, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             NULL, 0,
+             "HASH-MD5 Generate Signature"
+        },
+};
+
+
+static MV_CESA_TEST_SESSION   shaTestSessions[] =
+{
+/*400*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             shaKey1, sizeof(shaKey1),
+             "HMAC-SHA1 Generate Signature"
+        },
+/*401*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_DECODE,
+             NULL, 0,
+             shaKey1, sizeof(shaKey1),
+             "HMAC-SHA1 Verify Signature"
+        },
+/*402*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             shaKey2, sizeof(shaKey2),
+             "HMAC-SHA1 Generate Signature"
+        },
+/*403*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_DECODE,
+             NULL, 0,
+             shaKey2, sizeof(shaKey2),
+             "HMAC-SHA1 Verify Signature"
+        },
+/*404*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             shaKey4, sizeof(shaKey4),
+             "HMAC-SHA1 Generate Signature"
+        },
+/*405*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_SHA1, MV_CESA_MAC_ONLY,
+             MV_CESA_DIR_ENCODE,
+             NULL, 0,
+             NULL, 0,
+             "HASH-SHA1 Generate Signature"
+        },
+};
+
+static MV_CESA_TEST_SESSION   combinedTestSessions[] =
+{
+/*500*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+             mdKey4, sizeof(mdKey4),
+             "DES + MD5 encode"
+        },
+/*501*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+             shaKey4, sizeof(shaKey4),
+             "DES + SHA1 encode"
+        },
+/*502*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             mdKey4, sizeof(mdKey4),
+             "3DES + MD5 encode"
+        },
+/*503*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             shaKey4, sizeof(shaKey4),
+             "3DES + SHA1 encode"
+        },
+/*504*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             mdKey4, sizeof(mdKey4),
+             "3DES CBC + MD5 encode"
+        },
+/*505*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             shaKey4, sizeof(shaKey4),
+             "3DES CBC + SHA1 encode"
+        },
+/*506*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+             mdKey4, sizeof(mdKey4),
+             "AES-128 CBC + MD5 encode"
+        },
+/*507*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+             MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+             MV_CESA_DIR_ENCODE,
+             cryptoKey5, sizeof(cryptoKey5)/sizeof(cryptoKey5[0]),
+             shaKey4, sizeof(shaKey4),
+             "AES-128 CBC + SHA1 encode"
+        },
+/*508*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+             MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_THEN_CRYPTO,
+             MV_CESA_DIR_DECODE,
+             cryptoKey1, sizeof(cryptoKey1)/sizeof(cryptoKey1[0]),
+             mdKey4, sizeof(mdKey4),
+             "HMAC-MD5 + 3DES decode"
+        },
+};
+
+
+static MV_CESA_TEST_DB_ENTRY cesaTestsDB[MAX_TEST_TYPE+1] =
+{
+    { desTestSessions,       sizeof(desTestSessions)/sizeof(desTestSessions[0]) },
+    { tripleDesTestSessions, sizeof(tripleDesTestSessions)/sizeof(tripleDesTestSessions[0]) },
+    { aesTestSessions,       sizeof(aesTestSessions)/sizeof(aesTestSessions[0]) },
+    { md5TestSessions,       sizeof(md5TestSessions)/sizeof(md5TestSessions[0]) },
+    { shaTestSessions,       sizeof(shaTestSessions)/sizeof(shaTestSessions[0]) },
+    { combinedTestSessions,  sizeof(combinedTestSessions)/sizeof(combinedTestSessions[0]) },
+    { NULL,                  0 }
+};
+
+
+char  cesaNullPlainHexText[]   = "000000000000000000000000000000000000000000000000";
+
+char  cesaPlainAsciiText[]     = "Now is the time for all ";
+char  cesaPlainHexEbc[]        = "4e6f77206973207468652074696d6520666f7220616c6c20";
+char  cesaCipherHexEcb[]       = "3fa40e8a984d48156a271787ab8883f9893d51ec4b563b53";
+char  cesaPlainHexCbc[]        = "1234567890abcdef4e6f77206973207468652074696d6520666f7220616c6c20";
+char  cesaCipherHexCbc[]       = "1234567890abcdefe5c7cdde872bf27c43e934008c389c0f683788499a7c05f6";
+
+char  cesaAesPlainHexEcb[]     = "000102030405060708090a0b0c0d0e0f";
+char  cesaAes128cipherHexEcb[] = "0a940bb5416ef045f1c39458c653ea5a";
+char  cesaAes192cipherHexEcb[] = "0060bffe46834bb8da5cf9a61ff220ae";
+char  cesaAes256cipherHexEcb[] = "5a6e045708fb7196f02e553d02c3a692";
+
+char  cesaAsciiStr1[]          = "Hi There";
+char  cesaDataHexStr1[]        = "4869205468657265";
+char  cesaHmacMd5digestHex1[]  = "9294727a3638bb1c13f48ef8158bfc9d";
+char  cesaHmacSha1digestHex1[] = "b617318655057264e28bc0b6fb378c8ef146be00";
+char  cesaDataAndMd5digest1[]  = "48692054686572659294727a3638bb1c13f48ef8158bfc9d";
+char  cesaDataAndSha1digest1[] = "4869205468657265b617318655057264e28bc0b6fb378c8ef146be00";
+
+char  cesaAesPlainText[]       = "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+                                 "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+                                 "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+                                 "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char  cesaAes128CipherCbc[]    = "c30e32ffedc0774e6aff6af0869f71aa"
+                                 "0f3af07a9a31a9c684db207eb0ef8e4e"
+                                 "35907aa632c3ffdf868bb7b29d3d46ad"
+                                 "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char  cesaAesIvPlainText[]     = "8ce82eefbea0da3c44699ed7db51b7d9"
+                                 "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+                                 "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf"
+                                 "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf"
+                                 "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char  cesaAes128IvCipherCbc[]  = "8ce82eefbea0da3c44699ed7db51b7d9"
+                                 "c30e32ffedc0774e6aff6af0869f71aa"
+                                 "0f3af07a9a31a9c684db207eb0ef8e4e"
+                                 "35907aa632c3ffdf868bb7b29d3d46ad"
+                                 "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char  cesaAesCtrPlain[]        = "00E0017B27777F3F4A1786F000000001"
+                                 "000102030405060708090A0B0C0D0E0F"
+                                 "101112131415161718191A1B1C1D1E1F"
+                                 "20212223";
+
+char  cesaAesCtrCipher[]       = "00E0017B27777F3F4A1786F000000001"
+                                 "C1CF48A89F2FFDD9CF4652E9EFDB72D7"
+                                 "4540A42BDE6D7836D59A5CEAAEF31053"
+                                 "25B2072F";
+
+
+
+/* Input cesaHmacHex3 is '0xdd' repeated 50 times */
+char  cesaHmacMd5digestHex3[]  = "56be34521d144c88dbb8c733f0e8b3f6";
+char  cesaHmacSha1digestHex3[] = "125d7342b9ac11cd91a39af48aa17b4f63f175d3";
+char  cesaDataHexStr3[50*2+1]          = "";
+char  cesaDataAndMd5digest3[sizeof(cesaDataHexStr3)+sizeof(cesaHmacMd5digestHex3)+8*2+1] = "";
+char  cesaDataAndSha1digest3[sizeof(cesaDataHexStr3)+sizeof(cesaHmacSha1digestHex3)+8*2+1] = "";
+
+/* Ascii string is "abc" */
+char hashHexStr3[] = "616263";
+char hashMd5digest3[] = "900150983cd24fb0d6963f7d28e17f72";
+char hashSha1digest3[] = "a9993e364706816aba3e25717850c26c9cd0d89d";
+
+char hashHexStr80[]     = "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930"
+                          "31323334353637383930";
+
+char hashMd5digest80[]           = "57edf4a22be3c955ac49da2e2107b67a";
+
+char tripleDesThenMd5digest80[]  = "b7726a03aad490bd6c5a452a89a1b271";
+char tripleDesThenSha1digest80[] = "b2ddeaca91030eab5b95a234ef2c0f6e738ff883";
+
+char cbc3desThenMd5digest80[]    = "6f463057e1a90e0e91ae505b527bcec0";
+char cbc3desThenSha1digest80[]   = "1b002ed050be743aa98860cf35659646bb8efcc0";
+
+char cbcAes128ThenMd5digest80[]  = "6b6e863ac5a71d15e3e9b1c86c9ba05f";
+char cbcAes128ThenSha1digest80[] = "13558472d1fc1c90dffec6e5136c7203452d509b";
+
+
+static MV_CESA_TEST_CASE  cesaTestCases[] =
+{
+ /*     plainHexStr          cipherHexStr               IV    crypto  mac     digest */
+ /*                                                           Length  Length  Offset */
+ /*0*/ { NULL,               NULL,                      NULL,   0,      0,      -1  },
+ /*1*/ { cesaPlainHexEbc,    cesaCipherHexEcb,          NULL,   24,     0,      -1  },
+ /*2*/ { cesaPlainHexCbc,    cesaCipherHexCbc,          NULL,   24,     0,      -1  },
+ /*3*/ { cesaAesPlainHexEcb, cesaAes128cipherHexEcb,    NULL,   16,     0,      -1  },
+ /*4*/ { cesaAesPlainHexEcb, cesaAes192cipherHexEcb,    NULL,   16,     0,      -1  },
+ /*5*/ { cesaAesPlainHexEcb, cesaAes256cipherHexEcb,    NULL,   16,     0,      -1  },
+ /*6*/ { cesaDataHexStr1,    cesaHmacMd5digestHex1,     NULL,   0,      8,      -1  },
+ /*7*/ { NULL,               cesaDataAndMd5digest1,     NULL,   0,      8,      -1  },
+ /*8*/ { cesaDataHexStr3,    cesaHmacMd5digestHex3,     NULL,   0,      50,     -1  },
+ /*9*/ { NULL,               cesaDataAndMd5digest3,     NULL,   0,      50,     -1  },
+/*10*/ { cesaAesPlainText,   cesaAes128IvCipherCbc,     iv5,    64,     0,      -1  },
+/*11*/ { cesaDataHexStr1,    cesaHmacSha1digestHex1,    NULL,   0,      8,      -1  },
+/*12*/ { NULL,               cesaDataAndSha1digest1,    NULL,   0,      8,      -1  },
+/*13*/ { cesaDataHexStr3,    cesaHmacSha1digestHex3,    NULL,   0,      50,     -1  },
+/*14*/ { NULL,               cesaDataAndSha1digest3,    NULL,   0,      50,     -1  },
+/*15*/ { hashHexStr3,        hashMd5digest3,            NULL,   0,      3,      -1  },
+/*16*/ { hashHexStr3,        hashSha1digest3,           NULL,   0,      3,      -1  },
+/*17*/ { hashHexStr80,       tripleDesThenMd5digest80,  NULL,   80,     80,     -1  },
+/*18*/ { hashHexStr80,       tripleDesThenSha1digest80, NULL,   80,     80,     -1  },
+/*19*/ { hashHexStr80,       cbc3desThenMd5digest80,    iv1,    80,     80,     -1  },
+/*20*/ { hashHexStr80,       cbc3desThenSha1digest80,   iv1,    80,     80,     -1  },
+/*21*/ { hashHexStr80,       cbcAes128ThenMd5digest80,  iv5,    80,     80,     -1  },
+/*22*/ { hashHexStr80,       cbcAes128ThenSha1digest80, iv5,    80,     80,     -1  },
+/*23*/ { cesaAesCtrPlain,    cesaAesCtrCipher,          NULL,   36,     0,      -1  },
+/*24*/ { cesaAesIvPlainText, cesaAes128IvCipherCbc,     NULL,   64,     0,      -1  },
+/*25*/ { plain3des1,         cipher3des1,               NULL,   0,      0,      -1  },
+/*26*/ { plain3des2,         cipher3desCbc2,            iv3des2,0,      0,      -1  },
+/*27*/ { plain3des3,         cipher3desCbc3,            iv3des3,0,      0,      -1  },
+};
+
+
+/* Key         = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ *               0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ * Input 0xdd repeated "size" times
+ */
+static MV_CESA_SIZE_TEST     mdMultiSizeTest302[] =
+{
+    { 80,   "7a031a640c14a4872814930b1ef3a5b2" },
+    { 512,  "5488e6c5a14dc72a79f28312ca5b939b" },
+    { 1000, "d00814f586a8b78a05724239d2531821" },
+    { 1001, "bf07df7b7f49d3f5b5ecacd4e9e63281" },
+    { 1002, "1ed4a1a802e87817a819d4e37bb4d0f7" },
+    { 1003, "5972ab64a4f265ee371dac2f2f137f90" },
+    { 1004, "71f95e7ec3aa7df2548e90898abdb28e" },
+    { 1005, "e082790b4857fcfc266e92e59e608814" },
+    { 1006, "9500f02fd8ac7fde8b10e4fece9a920d" },
+    { 1336, "e42edcce57d0b75b01aa09d71427948b" },
+    { 1344, "bb5454ada0deb49ba0a97ffd60f57071" },
+    { 1399, "0f44d793e744b24d53f44f295082ee8c" },
+    { 1400, "359de8a03a9b707928c6c60e0e8d79f1" },
+    { 1401, "e913858b484cbe2b384099ea88d8855b" },
+    { 1402, "d9848a164af53620e0540c1d7d87629e" },
+    { 1403, "0c9ee1c2c9ef45e9b625c26cbaf3e822" },
+    { 1404, "12edd4f609416e3c936170360561b064" },
+    { 1405, "7fc912718a05446395345009132bf562" },
+    { 1406, "882f17425e579ff0d85a91a59f308aa0" },
+    { 1407, "005cae408630a2fb5db82ad9db7e59da" },
+    { 1408, "64655f8b404b3fea7a3e3e609bc5088f" },
+    { 1409, "4a145284a7f74e01b6bb1a0ec6a0dd80" },
+    { 2048, "67caf64475650732def374ebb8bde3fd" },
+    { 2049, "6c84f11f472825f7e6cd125c2981884b" },
+    { 2050, "8999586754a73a99efbe4dbad2816d41" },
+    { 2051, "ba6946b610e098d286bc81091659dfff" },
+    { 2052, "d0afa01c92d4d13def2b024f36faed83" },
+    { 3072, "61d8beac61806afa2585d74a9a0e6974" },
+    { 3074, "f6501a28dcc24d1e4770505c51a87ed3" },
+    { 3075, "ea4a6929be67e33e61ff475369248b73" },
+    { 4048, "aa8c4d68f282a07e7385acdfa69f4bed" },
+    { 4052, "afb5ed2c0e1d430ea59e59ed5ed6b18a" },
+    { 4058, "9e8553f9bdd43aebe0bd729f0e600c99" },
+    { 6144, "f628f3e5d183fe5cdd3a5abee39cf872" },
+    { 6150, "89a3efcea9a2f25f919168ad4a1fd292" },
+    { 6400, "cdd176b7fb747873efa4da5e32bdf88f" },
+    { 6528, "b1d707b027354aca152c45ee559ccd3f" },
+    { 8192, "c600ea4429ac47f9941f09182166e51a" },
+    {16384, "16e8754bfbeb4c649218422792267a37" },
+    {18432, "0fd0607521b0aa8b52219cfbe215f63e" },
+    { 0, NULL },
+};
+
+/* Key         = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST     mdMultiSizeTest304[] =
+{
+    { 80,   "a456c4723fee6068530af5a2afa71627" },
+    { 512,  "f85c2a2344f5de68b432208ad13e5794" },
+    { 1000, "35464d6821fd4a293a41eb84e274c8c5" },
+    { 1001, "c08eedbdce60cceb54bc2d732bb32c8b" },
+    { 1002, "5664f71800c011cc311cb6943339c1b8" },
+    { 1003, "779c723b044c585dc7802b13e8501bdc" },
+    { 1004, "55e500766a2c307bc5c5fdd15e4cacd4" },
+    { 1005, "d5f978954f5c38529d1679d2b714f068" },
+    { 1006, "cd3efc827ce628b7281b72172693abf9" },
+    { 1336, "6f04479910785878ae6335b8d1e87edf" },
+    { 1344, "b6d27b50c2bce1ba2a8e1b5cc4324368" },
+    { 1399, "65f70a1d4c86e5eaeb0704c8a7816795" },
+    { 1400, "3394b5adc4cb3ff98843ca260a44a88a" },
+    { 1401, "3a06f3582033a66a4e57e0603ce94e74" },
+    { 1402, "e4d97f5ed51edc48abfa46eeb5c31752" },
+    { 1403, "3d05e40b080ee3bedf293cb87b7140e7" },
+    { 1404, "8cf294fc3cd153ab18dccb2a52cbf244" },
+    { 1405, "d1487bd42f6edd9b4dab316631159221" },
+    { 1406, "0527123b6bf6936cf5d369dc18c6c70f" },
+    { 1407, "3224a06639db70212a0cd1ae1fcc570a" },
+    { 1408, "a9e13335612c0356f5e2c27086e86c43" },
+    { 1409, "a86d1f37d1ed8a3552e9a4f04dceea98" },
+    { 2048, "396905c9b961cd0f6152abfb69c4449c" },
+    { 2049, "49f39bff85d9dcf059fadb89efc4a70f" },
+    { 2050, "3a2b4823bc4d0415656550226a63e34a" },
+    { 2051, "dec60580d406c782540f398ad0bcc7e0" },
+    { 2052, "32f76610a14310309eb748fe025081bf" },
+    { 3072, "45edc1a42bf9d708a621076b63b774da" },
+    { 3074, "9be1b333fe7c0c9f835fb369dc45f778" },
+    { 3075, "8c06fcac7bd0e7b7a17fd6508c09a549" },
+    { 4048, "0ddaef848184bf0ad98507a10f1e90e4" },
+    { 4052, "81976bcaeb274223983996c137875cb8" },
+    { 4058, "0b0a7a1c82bc7cbc64d8b7cd2dc2bb22" },
+    { 6144, "1c24056f52725ede2dff0d7f9fc9855f" },
+    { 6150, "b7f4b65681c4e43ee68ca466ca9ca4ec" },
+    { 6400, "443bbaab9f7331ddd4bf11b659cd43c8" },
+    { 6528, "216f44f23047cfee03a7a64f88f9a995" },
+    { 8192, "ac7a993b2cad54879dba1bde63e39097" },
+    { 8320, "55ed7be9682d6c0025b3221a62088d08" },
+    {16384, "c6c722087653b62007aea668277175e5" },
+    {18432, "f1faca8e907872c809e14ffbd85792d6" },
+    { 0, NULL },
+};
+
+/* HASH-MD5
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ *               repeated "size" times
+ */
+static MV_CESA_SIZE_TEST     mdMultiSizeTest305[] =
+{
+    { 80,   "57edf4a22be3c955ac49da2e2107b67a" },
+    { 512,  "c729ae8f0736cc377a9767a660eaa04e" },
+    { 1000, "f1257a8659eb92d36fe14c6bf3852a6a" },
+    { 1001, "f8a46fe8ea04fdc8c7de0e84042d3878" },
+    { 1002, "da188dd67bff87d58aa3c02af2d0cc0f" },
+    { 1003, "961753017feee04c9b93a8e51658a829" },
+    { 1004, "dd68c4338608dcc87807a711636bf2af" },
+    { 1005, "e338d567d3ce66bf69ada29658a8759b" },
+    { 1006, "443c9811e8b92599b0b149e8d7ec700a" },
+    { 1336, "89a98511706008ba4cbd0b4a24fa5646" },
+    { 1344, "335a919805f370b9e402a62c6fe01739" },
+    { 1399, "5d18d0eddcd84212fe28d812b5e80e3b" },
+    { 1400, "6b695c240d2dffd0dffc99459ca76db6" },
+    { 1401, "49590f61298a76719bc93a57a30136f5" },
+    { 1402, "94c2999fa3ef1910a683d69b2b8476f2" },
+    { 1403, "37073a02ab00ecba2645c57c228860db" },
+    { 1404, "1bcd06994fce28b624f0c5fdc2dcdd2b" },
+    { 1405, "11b93671a64c95079e8cf9e7cddc8b3d" },
+    { 1406, "4b6695772a4c66313fa4871017d05f36" },
+    { 1407, "d1539b97fbfda1c075624e958de19c5b" },
+    { 1408, "b801b9b69920907cd018e8063092ede9" },
+    { 1409, "b765f1406cfe78e238273ed01bbcaf7e" },
+    { 2048, "1d7e2c64ac29e2b3fb4c272844ed31f5" },
+    { 2049, "71d38fac49c6b1f4478d8d88447bcdd0" },
+    { 2050, "141c34a5592b1bebfa731e0b23d0cdba" },
+    { 2051, "c5e1853f21c59f5d6039bd13d4b380d8" },
+    { 2052, "dd44a0d128b63d4b5cccd967906472d7" },
+    { 3072, "37d158e33b21390822739d13db7b87fe" },
+    { 3074, "aef3b209d01d39d0597fe03634bbf441" },
+    { 3075, "335ffb428eabf210bada96d74d5a4012" },
+    { 4048, "2434c2b43d798d2819487a886261fc64" },
+    { 4052, "ac2fa84a8a33065b2e92e36432e861f8" },
+    { 4058, "856781f85616c341c3533d090c1e1e84" },
+    { 6144, "e5d134c652c18bf19833e115f7a82e9b" },
+    { 6150, "a09a353be7795fac2401dac5601872e6" },
+    { 6400, "08b9033ac6a1821398f50af75a2dbc83" },
+    { 6528, "3d47aa193a8540c091e7e02f779e6751" },
+    { 8192, "d3164e710c0626f6f395b38f20141cb7" },
+    { 8320, "b727589d9183ff4e8491dd24466974a3" },
+    {16384, "3f54d970793d2274d5b20d10a69938ac" },
+    {18432, "f558511dcf81985b7a1bb57fad970531" },
+    { 0, NULL },
+};
+
+
+/* Key         = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ *               0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ *               0xaa, 0xaa, 0xaa, 0xaa
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST     shaMultiSizeTest402[] =
+{
+    { 80,   "e812f370e659705a1649940d1f78cd7af18affd3" },
+    { 512,  "e547f886b2c15d995ed76a8a924cb408c8080f66" },
+    { 1000, "239443194409f1a5342ecde1a092c8f3a3ed790a" },
+    { 1001, "f278ab9a102850a9f48dc4e9e6822afe2d0c52b5" },
+    { 1002, "8bcc667df5ab6ece988b3af361d09747c77f4e72" },
+    { 1003, "0fae6046c7dc1d3e356b25af836f6077a363f338" },
+    { 1004, "0ea48401cc92ae6bc92ae76685269cb0167fbe1a" },
+    { 1005, "ecbcd7c879b295bafcd8766cbeac58cc371e31d1" },
+    { 1006, "eb4a4a3d07d1e9a15e6f1ab8a9c47f243e27324c" },
+    { 1336, "f5950ee1d77c10e9011d2149699c9366fe52529c" },
+    { 1344, "b04263604a63c351b0b3b9cf1785b4bdba6c8838" },
+    { 1399, "8cb1cff61d5b784045974a2fc69386e3b8d24218" },
+    { 1400, "9bb2f3fcbeddb2b90f0be797cd647334a2816d51" },
+    { 1401, "23ae462a7a0cb440f7445791079a5d75a535dd33" },
+    { 1402, "832974b524a4d3f9cc2f45a3cabf5ccef65cd2aa" },
+    { 1403, "d1c683742fe404c3c20d5704a5430e7832a7ec95" },
+    { 1404, "867c79042e64f310628e219d8b85594cd0c7adc3" },
+    { 1405, "c9d81d49d13d94358f56ccfd61af02b36c69f7c3" },
+    { 1406, "0df43daab2786172f9b8d07d61f14a070cf1287a" },
+    { 1407, "0fd8f3ad7f169534b274d4c66bbddd89f759e391" },
+    { 1408, "3987511182b18473a564436003139b808fa46343" },
+    { 1409, "ef667e063c9e9f539a8987a8d0bd3066ee85d901" },
+    { 2048, "921109c99f3fedaca21727156d5f2b4460175327" },
+    { 2049, "47188600dd165eb45f27c27196d3c46f4f042c1b" },
+    { 2050, "8831939904009338de10e7fa670847041387807d" },
+    { 2051, "2f8ebb5db2997d614e767be1050366f3641e7520" },
+    { 2052, "669e51cd730dae158d3bef8adba075bd95a0d011" },
+    { 3072, "cfee66cfd83abc8451af3c96c6b35a41cc6c55f5" },
+    { 3074, "216ea26f02976a261b7d21a4dd3085157bedfabd" },
+    { 3075, "bd612ebba021fd8e012b14c3bd60c8c5161fabc0" },
+    { 4048, "c2564c1fdf2d5e9d7dde7aace2643428e90662e8" },
+    { 4052, "91ce61fe924b445dfe7b5a1dcd10a27caec16df6" },
+    { 4058, "db2a9be5ee8124f091c7ebd699266c5de223c164" },
+    { 6144, "855109903feae2ba3a7a05a326b8a171116eb368" },
+    { 6150, "37520bb3a668294d9c7b073e7e3daf8fee248a78" },
+    { 6400, "60a353c841b6d2b1a05890349dad2fa33c7536b7" },
+    { 6528, "9e53a43a69bb42d7c8522ca8bd632e421d5edb36" },
+    { 8192, "a918cb0da862eaea0a33ee0efea50243e6b4927c" },
+    { 8320, "29a5dcf55d1db29cd113fcf0572ae414f1c71329" },
+    {16384, "6fb27966138e0c8d5a0d65ace817ebd53633cee1" },
+    {18432, "ca09900d891c7c9ae2a559b10f63a217003341c1" },
+    { 0, NULL },
+};
+
+/* Key         = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST     shaMultiSizeTest404[] =
+{
+    { 80,   "beaf20a34b06a87558d156c0949bc3957d40222e" },
+    { 512,  "3353955358d886bc2940a3c7f337ff7dafb59c7b" },
+    { 1000, "8737a542c5e9b2b6244b757ebb69d5bd602a829f" },
+    { 1001, "fd9e7582d8a5d3c9fe3b923e4e6a41b07a1eb4d4" },
+    { 1002, "a146d14a6fc3c274ff600568f4d75b977989e00d" },
+    { 1003, "be22601bbc027ddef2dec97d30b3dc424fd803c5" },
+    { 1004, "3e71fe99b2fe2b7bfdf4dbf0c7f3da25d7ea35e7" },
+    { 1005, "2c422735d7295408fddd76f5e8a83a2a8da13df3" },
+    { 1006, "6d875319049314b61855101a647b9ba3313428e6" },
+    { 1336, "c1631ea80bad9dc43a180712461b65a0598c711c" },
+    { 1344, "816069bf91d34581005746e2e0283d0f9c7b7605" },
+    { 1399, "4e139866dc61cfcb8b67ca2ebd637b3a538593af" },
+    { 1400, "ff2a0f8dd2b02c5417910f6f55d33a78e081a723" },
+    { 1401, "ab00c12be62336964cbce31ae97fe2a0002984d5" },
+    { 1402, "61349e7f999f3a1acc56c3e9a5060a9c4a7b05b6" },
+    { 1403, "3edbc0f61e435bc1317fa27d840076093fb79353" },
+    { 1404, "d052c6dfdbe63d45dab23ef9893e2aa4636aca1e" },
+    { 1405, "0cc16b7388d67bf0add15a31e6e6c753cfae4987" },
+    { 1406, "c96ba7eaad74253c38c22101b558d2850b1d1b90" },
+    { 1407, "3445428a40d2c6556e7c55797ad8d323b61a48d9" },
+    { 1408, "8d6444f937a09317c89834187b8ea9b8d3a8c56b" },
+    { 1409, "c700acd3ecd19014ea2bdb4d42510c467e088475" },
+    { 2048, "ee27d2a0cb77470c2f496212dfd68b5bb7b04e4b" },
+    { 2049, "683762d7a02983b26a6d046e6451d9cd82c25932" },
+    { 2050, "0fd20f1d55a9ee18363c2a6fd54aa13aee69992f" },
+    { 2051, "86c267d8cc4bc8d59090e4f8b303da960fd228b7" },
+    { 2052, "452395ae05b3ec503eea34f86fc0832485ad97c1" },
+    { 3072, "75198e3cfd0b9bcff2dabdf8e38e6fdaa33ca49a" },
+    { 3074, "4e24785ef080141ce4aab4675986d9acea624d7c" },
+    { 3075, "3a20c5978dd637ec0e809bf84f0d9ccf30bc65bf" },
+    { 4048, "3c32da256be7a7554922bf5fed51b0d2d09e59ad" },
+    { 4052, "fff898426ea16e54325ae391a32c6c9bce4c23c0" },
+    { 4058, "c800b9e562e1c91e1310116341a3c91d37f848ec" },
+    { 6144, "d91d509d0cc4376c2d05bf9a5097717a373530e6" },
+    { 6150, "d957030e0f13c5df07d9eec298542d8f94a07f12" },
+    { 6400, "bb745313c3d7dc17b3f955e5534ad500a1082613" },
+    { 6528, "77905f80d9ca82080bbb3e5654896dabfcfd1bdb" },
+    { 8192, "5237fd9a81830c974396f99f32047586612ff3c0" },
+    { 8320, "57668e28d5f2dba0839518a11db0f6af3d7e08bf" },
+    {16384, "62e093fde467f0748087beea32e9af97d5c61241" },
+    {18432, "845fb33130c7d6ea554fd5aacb9c50cf7ccb5929" },
+    { 0, NULL },
+};
+
+/* HASH-SHA1
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ *               repeated "size" times
+ */
+static MV_CESA_SIZE_TEST     shaMultiSizeTest405[] =
+{
+    { 80,   "50abf5706a150990a08b2c5ea40fa0e585554732" },
+    { 512,  "f14516a08948fa27917a974d219741a697ba0087" },
+    { 1000, "0bd18c378d5788817eb4f1e5dc07d867efa5cbf4" },
+    { 1001, "ca29b85c35db1b8aef83c977893a11159d1b7aa2" },
+    { 1002, "d83bc973eaaedb8a31437994dabbb3304b0be086" },
+    { 1003, "2cf7bbef0acd6c00536b5c58ca470df9a3a90b6c" },
+    { 1004, "e4375d09b1223385a8a393066f8209acfd936a80" },
+    { 1005, "1029b38043e027745d019ce1d2d68e3d8b9d8f99" },
+    { 1006, "deea16dcebbd8ac137e2b984deb639b9fb5e9680" },
+    { 1336, "ea031b065fff63dcfb6a41956e4777520cdbc55d" },
+    { 1344, "b52096c6445e6c0a8355995c70dc36ae186c863c" },
+    { 1399, "cde2f6f8379870db4b32cf17471dc828a8dbff2b" },
+    { 1400, "e53ff664064bc09fe5054c650806bd42d8179518" },
+    { 1401, "d1156db5ddafcace64cdb510ff0d4af9b9a8ad64" },
+    { 1402, "34ede0e9a909dd84a2ae291539105c0507b958e1" },
+    { 1403, "a772ca3536da77e6ad3251e4f9e1234a4d7b87c0" },
+    { 1404, "29740fd2b04e7a8bfd32242db6233156ad699948" },
+    { 1405, "65b17397495b70ce4865dad93bf991b74c97cce1" },
+    { 1406, "a7ee89cd0754061fdb91af7ea6abad2c69d542e3" },
+    { 1407, "3eebf82f7420188e23d328b7ce93580b279a5715" },
+    { 1408, "e08d3363a8b9a490dfb3a4c453452b8f114deeec" },
+    { 1409, "95d74df739181a4ff30b8c39e28793a36598e924" },
+    { 2048, "aa40262509c2abf84aab0197f83187fc90056d91" },
+    { 2049, "7dec28ef105bc313bade8d9a7cdeac58b99de5ea" },
+    { 2050, "d2e30f77ec81197de20f56588a156094ecb88450" },
+    { 2051, "6b22ccc874833e96551a39da0c0edcaa0d969d92" },
+    { 2052, "f843141e57875cd669af58744bc60aa9ea59549c" },
+    { 3072, "09c5fedeaa62c132e673cc3c608a00142273d086" },
+    { 3074, "b09e95eea9c7b1b007a58accec488301901a7f3d" },
+    { 3075, "e6226b77b4ada287a8c9bbcf4ed71eec5ce632dc" },
+    { 4048, "e99394894f855821951ddddf5bfc628547435f5c" },
+    { 4052, "32d2f1af38be9cfba6cd03d55a254d0b3e1eb382" },
+    { 4058, "d906552a4f2aca3a22e1fecccbcd183d7289d0ef" },
+    { 6144, "2e7f62d35a860988e1224dc0543204af19316041" },
+    { 6150, "d6b89698ee133df46fec9d552fadc328aa5a1b51" },
+    { 6400, "dff50e90c46853988fa3a4b4ce5dda6945aae976" },
+    { 6528, "9e63ec0430b96db02d38bc78357a2f63de2ab7f8" },
+    { 8192, "971eb71ed60394d5ab5abb12e88420bdd41b5992" },
+    { 8320, "91606a31b46afeaac965cecf87297e791b211013" },
+    {16384, "547f830a5ec1f5f170ce818f156b1002cabc7569" },
+    {18432, "f16f272787f3b8d539652e4dc315af6ab4fda0ef" },
+    { 0, NULL },
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     tripleDesMdMultiSizeTest502[] =
+{
+    {   64, "9586962a2aaaef28803dec2e17807a7f" },
+    {   80, "b7726a03aad490bd6c5a452a89a1b271" },
+    {  352, "f1ed9563aecc3c0d2766eb2bed3b4e4c" },
+    {  512, "0f9decb11ab40fe86f4d4d9397bc020e" },
+    { 1000, "3ba69deac12cab8ff9dff7dbd9669927" },
+    { 1336, "6cf47bf1e80e03e2c1d0945bc50d37d2" },
+    { 1344, "4be388dab21ceb3fa1b8d302e9b821f7" },
+    { 1400, "a58b79fb21dd9bfc6ec93e3b99fb0ef1" },
+    { 1408, "8bc97379fc2ac3237effcdd4f7a86528" },
+    { 2048, "1339f03ab3076f25a20bc4cba16eb5bf" },
+    { 3072, "731204d2d90c4b36ae41f5e1fb874288" },
+    { 4048, "c028d998cfda5642547b7e1ed5ea16e4" },
+    { 6144, "b1b19cd910cc51bd22992f1e59f1e068" },
+    { 6400, "44e4613496ba622deb0e7cb768135a2f" },
+    { 6528, "3b06b0a86f8db9cd67f9448dfcf10549" },
+    { 8192, "d581780b7163138a0f412be681457d82" },
+    {16384, "03b8ac05527faaf1bed03df149c65ccf" },
+    {18432, "677c8a86a41dab6c5d81b85b8fb10ff6" },
+    { 0, NULL },
+};
+
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     tripleDesShaMultiSizeTest503[] =
+{
+    {   64, "44a1e9bcbfc1429630d9ea68b7a48b0427a684f2" },
+    {   80, "b2ddeaca91030eab5b95a234ef2c0f6e738ff883" },
+    {  352, "4b91864c7ff629bdff75d9726421f76705452aaf" },
+    {  512, "6dd37faceeb2aa98ba74f4242ed6734a4d546af5" },
+    { 1000, "463661c30300be512a9df40904f0757cde5f1141" },
+    { 1336, "b931f831d9034fe59c65176400b039fe9c1f44a5" },
+    { 1344, "af8866b1cd4a4887d6185bfe72470ffdfb3648e1" },
+    { 1400, "49c6caf07296d5e31d2504d088bc5b20c3ee7cdb" },
+    { 1408, "fcae8deedbc6ebf0763575dc7e9de075b448a0f4" },
+    { 2048, "edece5012146c1faa0dd10f50b183ba5d2af58ac" },
+    { 3072, "5b83625adb43a488b8d64fecf39bb766818547b7" },
+    { 4048, "d2c533678d26c970293af60f14c8279dc708bfc9" },
+    { 6144, "b8f67af4f991b08b725f969b049ebf813bfacc5c" },
+    { 6400, "d9a6c7f746ac7a60ef2edbed2841cf851c25cfb0" },
+    { 6528, "376792b8c8d18161d15579fb7829e6e3a27e9946" },
+    { 8192, "d890eabdca195b34ef8724b28360cffa92ae5655" },
+    {16384, "a167ee52639ec7bf19aee9c6e8f76667c14134b9" },
+    {18432, "e4396ab56f67296b220985a12078f4a0e365d2cc" },
+    { 0, NULL },
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     cbc3desMdMultiSizeTest504[] =
+{
+    {   64, "8d10e00802460ede0058c139ba48bd2d" },
+    {   80, "6f463057e1a90e0e91ae505b527bcec0" },
+    {  352, "4938d48bdf86aece2c6851e7c6079788" },
+    {  512, "516705d59f3cf810ebf2a13a23a7d42e" },
+    { 1000, "a5a000ee5c830e67ddc6a2d2e5644b31" },
+    { 1336, "44af60087b74ed07950088efbe3b126a" },
+    { 1344, "1f5b39e0577920af731dabbfcf6dfc2a" },
+    { 1400, "6804ea640e29b9cd39e08bc37dbce734" },
+    { 1408, "4fb436624b02516fc9d1535466574bf9" },
+    { 2048, "c909b0985c423d8d86719f701e9e83db" },
+    { 3072, "cfe0bc34ef97213ee3d3f8b10122db21" },
+    { 4048, "03ea10b5ae4ddeb20aed6af373082ed1" },
+    { 6144, "b9a0ff4f87fc14b3c2dc6f0ed0998fdf" },
+    { 6400, "6995f85d9d4985dd99e974ec7dda9dd6" },
+    { 6528, "bbbb548ce2fa3d58467f6a6a5168a0e6" },
+    { 8192, "afe101fbe745bb449ae4f50d10801456" },
+    {16384, "9741706d0b1c923340c4660ff97cacdf" },
+    {18432, "b0217becb73cb8f61fd79c7ce9d023fb" },
+    { 0, NULL },
+};
+
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     cbc3desShaMultiSizeTest505[] =
+{
+    {   64, "409187e5bdb0be4a7754ca3747f7433dc4f01b98" },
+    {   80, "1b002ed050be743aa98860cf35659646bb8efcc0" },
+    {  352, "6cbf7ebe50fa4fa6eecc19eca23f9eae553ccfff" },
+    {  512, "cfb5253fb4bf72b743320c30c7e48c54965853b0" },
+    { 1000, "95e04e1ca2937e7c5a9aba9e42d2bcdb8a7af21f" },
+    { 1336, "3b5c1f5eee5837ebf67b83ae01405542d77a6627" },
+    { 1344, "2b3d42ab25615437f98a1ee310b81d07a02badc2" },
+    { 1400, "7f8687df7c1af44e4baf3c934b6cca5ab6bc993e" },
+    { 1408, "473a581c5f04f7527d50793c845471ac87e86430" },
+    { 2048, "e41d20cae7ebe34e6e828ed62b1e5734019037bb" },
+    { 3072, "275664afd7a561d804e6b0d204e53939cde653ae" },
+    { 4048, "0d220cc5b34aeeb46bbbd637dde6290b5a8285a3" },
+    { 6144, "cb393ddcc8b1c206060625b7d822ef9839e67bc5" },
+    { 6400, "dd3317e2a627fc04800f74a4b05bfda00fab0347" },
+    { 6528, "8a74c3b2441ab3f5a7e08895cc432566219a7c41" },
+    { 8192, "b8e6ef3a549ed0e005bd5b8b1a5fe6689e9711a7" },
+    {16384, "55f59404008276cdac0e2ba0d193af2d40eac5ce" },
+    {18432, "86ae6c4fc72369a54cce39938e2d0296cd9c6ec5" },
+    { 0, NULL },
+};
+
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     cbcAes128md5multiSizeTest506[] =
+{
+    {   16, "7ca4c2ba866751598720c5c4aa0d6786" },
+    {   64, "7dba7fb988e80da609b1fea7254bced8" },
+    {   80, "6b6e863ac5a71d15e3e9b1c86c9ba05f" },
+    {  352, "a1ceb9c2e3021002400d525187a9f38c" },
+    {  512, "596c055c1c55db748379223164075641" },
+    { 1008, "f920989c02f3b3603f53c99d89492377" },
+    { 1344, "2e496b73759d77ed32ea222dbd2e7b41" },
+    { 1408, "7178c046b3a8d772efdb6a71c4991ea4" },
+    { 2048, "a917f0099c69eb94079a8421714b6aad" },
+    { 3072, "693cd5033d7f5391d3c958519fa9e934" },
+    { 4048, "139dca91bcff65b3c40771749052906b" },
+    { 6144, "428d9cef6df4fb70a6e9b6bbe4819e55" },
+    { 6400, "9c0b909e76daa811e12b1fc17000a0c4" },
+    { 6528, "ad876f6297186a7be1f1b907ed860eda" },
+    { 8192, "479cbbaca37dd3191ea1f3e8134a0ef4" },
+    {16384, "60fda559c74f91df538100c9842f2f15" },
+    {18432, "4a3eb1cba1fa45f3981270953f720c42" },
+    { 0, NULL },
+};
+
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST     cbcAes128sha1multiSizeTest507[] =
+{
+    {   16, "9aa8dc1c45f0946daf78057fa978759c625c1fee" },
+    {   64, "9f588fc1ede851e5f8b20256abc9979465ae2189" },
+    {   80, "13558472d1fc1c90dffec6e5136c7203452d509b" },
+    {  352, "6b93518e006cfaa1f7adb24615e7291fb0a27e06" },
+    {  512, "096874951a77fbbf333e49d80c096ee2016e09bd" },
+    { 1008, "696fc203c2e4b5ae0ec5d1db3f623c490bc6dbac" },
+    { 1344, "79bf77509935ccd3528caaac6a5eb6481f74029b" },
+    { 1408, "627f9462b95fc188e8cfa7eec15119bdc5d4fcf1" },
+    { 2048, "3d50d0c005feba92fe41502d609fced9c882b4d1" },
+    { 3072, "758807e5b983e3a91c06fb218fe0f73f77111e94" },
+    { 4048, "ca90e85242e33f005da3504416a52098d0d31fb2" },
+    { 6144, "8044c1d4fd06642dfc46990b4f18b61ef1e972cf" },
+    { 6400, "166f1f4ea57409f04feba9fb1e39af0e00bd6f43" },
+    { 6528, "0389016a39485d6e330f8b4215ddf718b404f7e9" },
+    { 8192, "6df7ee2a8b61d6f7f860ce8dbf778f0c2a5b508b" },
+    {16384, "a70a6d8dfa1f91ded621c3dbaed34162bc48783f" },
+    {18432, "8dfad627922ce15df1eed10bdbed49244efa57db" },
+    { 0, NULL },
+};
+
+
+void    cesaTestPrintStatus(void);
+
+
+/*------------------------- LOCAL FUNCTIONs ---------------------------------*/
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND* pCmd,
+                  MV_CESA_TEST_SESSION* pTestSession, MV_U8* pIV, int ivSize);
+MV_STATUS testClose(int idx);
+MV_STATUS testOpen(int idx);
+void close_session(int sid);
+void cesaTestCheckReady(const MV_CESA_RESULT *r);
+void cesaCheckReady(MV_CESA_RESULT* r);
+void printTestResults(int idx, MV_STATUS status, int checkMode);
+void cesaLastResult(void);
+void cesaTestPrintReq(int req, int offset, int size);
+
+void cesaTestPrintStatus(void);
+void cesaTestPrintSession(int idx);
+void sizeTest(int testIdx, int iter, int checkMode);
+void multiTest(int iter, int reqSize, int checkMode);
+void oneTest(int testIdx, int caseIdx,int iter, int reqSize, int checkMode);
+void multiSizeTest(int idx, int iter, int checkMode, char* inputData);
+void cesaTest(int iter, int reqSize, int checkMode);
+void cesaOneTest(int testIdx, int caseIdx,int iter, int reqSize, int checkMode);
+void combiTest(int iter, int reqSize, int checkMode);
+void shaTest(int iter, int reqSize, int checkMode);
+void mdTest(int iter, int reqSize, int checkMode);
+void aesTest(int iter, int reqSize, int checkMode);
+void tripleDesTest(int iter, int reqSize, int checkMode);
+void desTest(int iter, int reqSize, int checkMode);
+void cesaTestStop(void);
+MV_STATUS testRun(int idx, int caseIdx, int iter,int reqSize, int checkMode);
+void cesaTestStart(int bufNum, int bufSize);
+
+
+static MV_U32      getRate(MV_U32* remainder)
+{
+    MV_U32     kBits, milliSec, rate;
+
+    milliSec = 0;
+    if( (cesaEndTicks - cesaBeginTicks) > 0)
+    {
+        milliSec = CESA_TEST_TICK_TO_MS(cesaEndTicks - cesaBeginTicks);
+    }
+    if(milliSec == 0)
+    {
+        if(remainder != NULL)
+            *remainder = 0;
+        return 0;
+    }
+
+    kBits = (cesaIteration*cesaRateSize*8)/1000;
+    rate = kBits/milliSec;
+    if(remainder != NULL)
+        *remainder = ((kBits % milliSec)*10)/milliSec;
+
+    return rate;
+}
+
+static char*    extractMbuf(MV_CESA_MBUF *pMbuf,
+                            int offset, int size, char* hexStr)
+{
+    mvCesaCopyFromMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset, size);
+    mvBinToHex((const MV_U8*)cesaBinBuffer, hexStr, size);
+
+    return hexStr;
+}
+
+static MV_BOOL  cesaCheckMbuf(MV_CESA_MBUF *pMbuf,
+                          const char* hexString, int offset,
+                          int checkSize)
+{
+    MV_BOOL     isFailed = MV_FALSE;
+    MV_STATUS   status;
+    int         size = strlen(hexString)/2;
+    int         checkedSize = 0;
+/*
+    mvOsPrintf("cesaCheckMbuf: pMbuf=%p, offset=%d, checkSize=%d, mBufSize=%d\n",
+                pMbuf, offset, checkSize, pMbuf->mbufSize);
+*/
+    if(pMbuf->mbufSize < (checkSize + offset))
+    {
+        mvOsPrintf("checkSize (%d) is too large: offset=%d, mbufSize=%d\n",
+                    checkSize, offset, pMbuf->mbufSize);
+        return MV_TRUE;
+    }
+    status = mvCesaCopyFromMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset, checkSize);
+    if(status != MV_OK)
+    {
+        mvOsPrintf("CesaTest: Can't copy %d bytes from Mbuf=%p to checkBuf=%p\n",
+                    checkSize, pMbuf, cesaBinBuffer);
+        return MV_TRUE;
+    }
+/*
+    mvDebugMemDump(cesaBinBuffer, size, 1);
+*/
+    mvHexToBin(hexString, (MV_U8*)cesaExpBinBuffer, size);
+
+    /* Compare buffers */
+    while(checkSize > checkedSize)
+    {
+        size = MV_MIN(size, (checkSize - checkedSize));
+        if(memcmp(cesaExpBinBuffer, &cesaBinBuffer[checkedSize], size) != 0)
+        {
+            mvOsPrintf("CheckMbuf failed: checkSize=%d, size=%d, checkedSize=%d\n",
+                        checkSize, size, checkedSize);
+            mvDebugMemDump(&cesaBinBuffer[checkedSize], size, 1);
+            mvDebugMemDump(cesaExpBinBuffer, size, 1);
+
+            isFailed = MV_TRUE;
+            break;
+        }
+        checkedSize += size;
+    }
+
+    return isFailed;
+}
+
+static MV_STATUS    cesaSetMbuf(MV_CESA_MBUF *pMbuf,
+                        const char* hexString,
+                        int offset, int reqSize)
+{
+    MV_STATUS   status = MV_OK;
+    int         copySize, size = strlen(hexString)/2;
+
+    mvHexToBin(hexString, (MV_U8*)cesaBinBuffer, size);
+
+    copySize = 0;
+    while(reqSize > copySize)
+    {
+        size = MV_MIN(size, (reqSize - copySize));
+
+        status = mvCesaCopyToMbuf((MV_U8*)cesaBinBuffer, pMbuf, offset+copySize, size);
+        if(status != MV_OK)
+        {
+            mvOsPrintf("cesaSetMbuf Error: Copy %d of %d bytes to MBuf\n",
+                        copySize, reqSize);
+            break;
+        }
+        copySize += size;
+    }
+    pMbuf->mbufSize = offset+copySize;
+    return status;
+}
+
+static  MV_CESA_TEST_SESSION* getTestSessionDb(int idx, int* pTestIdx)
+{
+    int                 testIdx, dbIdx = idx/100;
+
+    if(dbIdx > MAX_TEST_TYPE)
+    {
+        mvOsPrintf("Wrong index %d - No such test type\n", idx);
+        return NULL;
+    }
+    testIdx = idx % 100;
+
+    if(testIdx >= cesaTestsDB[dbIdx].numSessions)
+    {
+        mvOsPrintf("Wrong index %d - No such test\n", idx);
+        return NULL;
+    }
+    if(pTestIdx != NULL)
+        *pTestIdx = testIdx;
+
+    return  cesaTestsDB[dbIdx].pSessions;
+}
+
+/* Debug */
+void    cesaTestPrintReq(int req, int offset, int size)
+{
+    MV_CESA_MBUF*   pMbuf;
+
+    mvOsPrintf("cesaTestPrintReq: req=%d, offset=%d, size=%d\n",
+                req, offset, size);
+    mvDebugMemDump(cesaCmdRing, 128, 4);
+
+    pMbuf = cesaCmdRing[req].pSrc;
+    mvCesaDebugMbuf("src", pMbuf, offset,size);
+    pMbuf = cesaCmdRing[req].pDst;
+    mvCesaDebugMbuf("dst", pMbuf, offset, size);
+
+    cesaTestPrintStatus();
+}
+
+void    cesaLastResult(void)
+{
+        mvOsPrintf("Last Result: ReqId = %d, SessionId = %d, rc = (%d)\n",
+                (MV_U32)cesaResult.pReqPrv, cesaResult.sessionId,
+                cesaResult.retCode);
+}
+
+void    printTestResults(int idx, MV_STATUS status, int checkMode)
+{
+    int                     testIdx;
+    MV_CESA_TEST_SESSION*   pTestSessions = getTestSessionDb(idx, &testIdx);
+
+    if(pTestSessions == NULL)
+        return;
+
+    mvOsPrintf("%-35s %4dx%-4d : ", pTestSessions[testIdx].name,
+            cesaIteration, cesaReqSize);
+    if( (status == MV_OK)      &&
+        (cesaCryptoError == 0) &&
+        (cesaError == 0)       &&
+        (cesaReqIdError == 0) )
+    {
+        mvOsPrintf("Passed, Rate=%3u.%u Mbps (%5u cpp)\n",
+                     cesaRate, cesaRateAfterDot, cesaEndTicks - cesaBeginTicks);
+    }
+    else
+    {
+        mvOsPrintf("Failed, Status = 0x%x\n", status);
+        if(cesaCryptoError > 0)
+            mvOsPrintf("cryptoError : %d\n", cesaCryptoError);
+        if(cesaReqIdError > 0)
+            mvOsPrintf("reqIdError  : %d\n", cesaReqIdError);
+        if(cesaError > 0)
+            mvOsPrintf("cesaError  : %d\n", cesaError);
+    }
+    if(cesaTestIsrMissCount > 0)
+        mvOsPrintf("cesaIsrMissed  : %d\n", cesaTestIsrMissCount);
+}
+
+void cesaCheckReady(MV_CESA_RESULT* r)
+{
+    int             reqId;
+    MV_CESA_MBUF    *pMbuf;
+    MV_BOOL         isFailed;
+
+    cesaResult  =  *r;
+    reqId = (int)cesaResult.pReqPrv;
+    pMbuf = cesaCmdRing[reqId].pDst;
+
+/*
+    mvOsPrintf("cesaCheckReady: reqId=%d, checkOffset=%d, checkSize=%d\n",
+                    reqId, cesaCheckOffset, cesaCheckSize);
+*/
+    /* Check expected reqId */
+    if(reqId != cesaExpReqId)
+    {
+        cesaReqIdError++;
+/*
+        mvOsPrintf("CESA reqId Error: cbIter=%d (%d), reqId=%d, expReqId=%d\n",
+                    cesaCbIter, cesaIteration, reqId, cesaExpReqId);
+*/
+    }
+    else
+    {
+        if( (cesaCheckMode == CESA_FULL_CHECK_MODE) ||
+            (cesaCheckMode == CESA_FAST_CHECK_MODE) )
+        {
+            if(cesaResult.retCode != MV_OK)
+            {
+                cesaError++;
+
+                mvOsPrintf("CESA Error: cbIter=%d (%d), reqId=%d, rc=%d\n",
+                            cesaCbIter, cesaIteration, reqId, cesaResult.retCode);
+            }
+            else
+            {
+                if( (cesaCheckSize > 0) && (cesaOutputHexStr != NULL) )
+                {
+                    /* Check expected output */
+
+                    isFailed = cesaCheckMbuf(pMbuf, cesaOutputHexStr, cesaCheckOffset, cesaCheckSize);
+                    if(isFailed)
+                    {
+                        mvOsPrintf("CESA Crypto Error: cbIter=%d (%d), reqId=%d\n",
+                                    cesaCbIter, cesaIteration, reqId);
+
+                        CESA_TEST_DEBUG_PRINT(("Error: reqId=%d, reqSize=%d, checkOffset=%d, checkSize=%d\n",
+                                    reqId, cesaReqSize, cesaCheckOffset, cesaCheckSize));
+
+                        CESA_TEST_DEBUG_PRINT(("Output str: %s\n", cesaOutputHexStr));
+
+                        CESA_TEST_DEBUG_CODE( mvCesaDebugMbuf("error", pMbuf, 0, cesaCheckOffset+cesaCheckSize) );
+
+                        cesaCryptoError++;
+                    }
+                }
+            }
+        }
+    }
+    if(cesaCheckMode == CESA_SHOW_CHECK_MODE)
+    {
+        extractMbuf(pMbuf, cesaCheckOffset, cesaCheckSize, cesaHexBuffer);
+        mvOsPrintf("%4d, %s\n", cesaCheckOffset, cesaHexBuffer);
+    }
+
+    cesaCbIter++;
+    if(cesaCbIter >= cesaIteration)
+    {
+        cesaCbIter = 0;
+        cesaExpReqId = 0;
+        cesaIsReady = MV_TRUE;
+
+        cesaEndTicks = CESA_TEST_TICK_GET();
+        cesaRate = getRate(&cesaRateAfterDot);
+    }
+    else
+    {
+        cesaExpReqId = reqId + 1;
+        if(cesaExpReqId == CESA_DEF_REQ_SIZE)
+            cesaExpReqId = 0;
+    }
+}
+
+
+#ifdef MV_NETBSD
+static int cesaTestReadyIsr(void *arg)
+#else
+#ifdef __KERNEL__
+static irqreturn_t cesaTestReadyIsr( int irq , void *dev_id)
+#endif
+#ifdef MV_VXWORKS
+void   cesaTestReadyIsr(void)
+#endif
+#endif
+{
+    MV_U32          cause;
+    MV_STATUS       status;
+    MV_CESA_RESULT  result;
+
+    cesaTestIsrCount++;
+    /* Clear cause register */
+    cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+    if( (cause & MV_CESA_CAUSE_ACC_DMA_ALL_MASK) == 0)
+    {
+        mvOsPrintf("cesaTestReadyIsr: cause=0x%x\n", cause);
+#ifdef MV_NETBSD
+        return 0;
+#else
+#ifdef __KERNEL__
+        return 1;
+#else
+        return;
+#endif
+#endif
+    }
+
+    MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+    while(MV_TRUE)
+    {
+        /* Get Ready requests */
+        status = mvCesaReadyGet(&result);
+        if(status == MV_OK)
+            cesaCheckReady(&result);
+
+        break;
+    }
+    if( (cesaTestFull == 1) && (status != MV_BUSY) )
+    {
+        cesaTestFull = 0;
+        CESA_TEST_WAKE_UP();
+    }
+
+#ifdef __KERNEL__
+    return 1;
+#endif
+}
+
+void
+cesaTestCheckReady(const MV_CESA_RESULT *r)
+{
+	MV_CESA_RESULT result = *r;
+
+	cesaCheckReady(&result);
+
+	if (cesaTestFull == 1) {
+		cesaTestFull = 0;
+		CESA_TEST_WAKE_UP();
+	}
+}
+
+static INLINE int   open_session(MV_CESA_OPEN_SESSION* pOs)
+{
+    MV_U16      sid;
+    MV_STATUS   status;
+
+    status = mvCesaSessionOpen(pOs, (short*)&sid);
+    if(status != MV_OK)
+    {
+        mvOsPrintf("CesaTest: Can't open new session - status = 0x%x\n",
+                    status);
+        return -1;
+    }
+
+    return  (int)sid;
+}
+
+void close_session(int sid)
+{
+    MV_STATUS   status;
+
+    status = mvCesaSessionClose(sid);
+    if(status != MV_OK)
+    {
+        mvOsPrintf("CesaTest: Can't close session %d - status = 0x%x\n",
+                    sid, status);
+    }
+}
+
+MV_STATUS testOpen(int idx)
+{
+    MV_CESA_OPEN_SESSION    os;
+    int                     sid, i, testIdx;
+    MV_CESA_TEST_SESSION*   pTestSession;
+    MV_U16          digestSize = 0;
+
+    pTestSession = getTestSessionDb(idx, &testIdx);
+    if(pTestSession == NULL)
+    {
+        mvOsPrintf("Test %d is not exist\n", idx);
+        return MV_BAD_PARAM;
+    }
+    pTestSession = &pTestSession[testIdx];
+
+    if(pTestSession->sid != -1)
+    {
+        mvOsPrintf("Session for test %d already created: sid=%d\n",
+                    idx, pTestSession->sid);
+        return MV_OK;
+    }
+
+    os.cryptoAlgorithm = pTestSession->cryptoAlgorithm;
+    os.macMode = pTestSession->macAlgorithm;
+    switch(os.macMode)
+    {
+        case MV_CESA_MAC_MD5:
+        case MV_CESA_MAC_HMAC_MD5:
+            digestSize = MV_CESA_MD5_DIGEST_SIZE;
+            break;
+
+        case MV_CESA_MAC_SHA1:
+        case MV_CESA_MAC_HMAC_SHA1:
+            digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+            break;
+
+        case MV_CESA_MAC_NULL:
+            digestSize = 0;
+    }
+    os.cryptoMode = pTestSession->cryptoMode;
+    os.direction = pTestSession->direction;
+    os.operation = pTestSession->operation;
+
+    for(i=0; i<pTestSession->cryptoKeySize; i++)
+        os.cryptoKey[i] = pTestSession->pCryptoKey[i];
+
+    os.cryptoKeyLength = pTestSession->cryptoKeySize;
+
+    for(i=0; i<pTestSession->macKeySize; i++)
+        os.macKey[i] = pTestSession->pMacKey[i];
+
+    os.macKeyLength = pTestSession->macKeySize;
+    os.digestSize = digestSize;
+
+    sid = open_session(&os);
+    if(sid == -1)
+    {
+        mvOsPrintf("Can't open session for test %d: rc=0x%x\n",
+                    idx, cesaResult.retCode);
+        return cesaResult.retCode;
+    }
+    CESA_TEST_DEBUG_PRINT(("Opened session: sid = %d\n", sid));
+    pTestSession->sid = sid;
+    return MV_OK;
+}
+
+MV_STATUS   testClose(int idx)
+{
+    int                     testIdx;
+    MV_CESA_TEST_SESSION*   pTestSession;
+
+    pTestSession = getTestSessionDb(idx, &testIdx);
+    if(pTestSession == NULL)
+    {
+        mvOsPrintf("Test %d is not exist\n", idx);
+        return MV_BAD_PARAM;
+    }
+    pTestSession = &pTestSession[testIdx];
+
+    if(pTestSession->sid == -1)
+    {
+        mvOsPrintf("Test session %d is not opened\n", idx);
+        return MV_NO_SUCH;
+    }
+
+    close_session(pTestSession->sid);
+    pTestSession->sid = -1;
+
+    return MV_OK;
+}
+
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND* pCmd,
+             MV_CESA_TEST_SESSION* pTestSession, MV_U8* pIV, int ivSize)
+{
+    int                 cmdReqId = 0;
+    int                 i;
+    MV_STATUS           rc = MV_OK;
+    char                ivZeroHex[] = "0000";
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    if(pCmd == NULL)
+    {
+        mvOsPrintf("testCmd failed: pCmd=NULL\n");
+        return MV_BAD_PARAM;
+    }
+    pCmd->sessionId = sid;
+
+    cesaCryptoError = 0;
+    cesaReqIdError = 0;
+    cesaError = 0;
+    cesaTestIsrMissCount = 0;
+    cesaIsReady = MV_FALSE;
+    cesaIteration = iter;
+
+    if(cesaInputHexStr == NULL)
+        cesaInputHexStr = cesaPlainHexEbc;
+
+    for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+    {
+        pCmd->pSrc = (MV_CESA_MBUF*)(cesaCmdRing[i].pSrc);
+        if(pIV != NULL)
+        {
+            /* If IV from SA - set IV in Source buffer to zeros */
+            cesaSetMbuf(pCmd->pSrc, ivZeroHex, 0, pCmd->cryptoOffset);
+            cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, pCmd->cryptoOffset,
+                        (cesaReqSize - pCmd->cryptoOffset));
+        }
+        else
+        {
+            cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, 0, cesaReqSize);
+        }
+        pCmd->pDst = (MV_CESA_MBUF*)(cesaCmdRing[i].pDst);
+        cesaSetMbuf(pCmd->pDst, cesaNullPlainHexText, 0, cesaReqSize);
+
+        memcpy(&cesaCmdRing[i], pCmd, sizeof(*pCmd));
+    }
+
+    if(cesaCheckMode == CESA_SW_SHOW_CHECK_MODE)
+    {
+        MV_U8   pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+        if(pTestSession->macAlgorithm == MV_CESA_MAC_MD5)
+        {
+            mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+            mvOsPrintf("SW HASH_MD5: reqSize=%d, macLength=%d\n",
+                        cesaReqSize, pCmd->macLength);
+            mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+            return MV_OK;
+        }
+        if(pTestSession->macAlgorithm == MV_CESA_MAC_SHA1)
+        {
+            mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+            mvOsPrintf("SW HASH_SHA1: reqSize=%d, macLength=%d\n",
+                        cesaReqSize, pCmd->macLength);
+            mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+            return MV_OK;
+        }
+    }
+
+    cesaBeginTicks = CESA_TEST_TICK_GET();
+    CESA_TEST_DEBUG_CODE( memset(cesaTestTrace, 0, sizeof(cesaTestTrace));
+                     cesaTestTraceIdx = 0;
+    );
+
+    if(cesaCheckMode == CESA_SW_NULL_CHECK_MODE)
+    {
+        volatile MV_U8   pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+        for(i=0; i<iter; i++)
+        {
+            if(pTestSession->macAlgorithm == MV_CESA_MAC_MD5)
+            {
+                mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (unsigned char*)pDigest);
+            }
+            if(pTestSession->macAlgorithm == MV_CESA_MAC_SHA1)
+            {
+                mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (MV_U8 *)pDigest);
+            }
+        }
+        cesaEndTicks = CESA_TEST_TICK_GET();
+        cesaRate = getRate(&cesaRateAfterDot);
+        cesaIsReady = MV_TRUE;
+
+        return MV_OK;
+    }
+
+    /*cesaTestIsrCount = 0;*/
+    /*mvCesaDebugStatsClear();*/
+
+#ifndef MV_NETBSD
+    MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+#endif
+
+    for(i=0; i<iter; i++)
+    {
+        unsigned long flags;
+
+        pCmd = &cesaCmdRing[cmdReqId];
+        pCmd->pReqPrv = (void*)cmdReqId;
+
+        CESA_TEST_LOCK(flags);
+
+        rc = mvCesaAction(pCmd);
+        if(rc == MV_NO_RESOURCE)
+            cesaTestFull = 1;
+
+        CESA_TEST_UNLOCK(flags);
+
+        if(rc == MV_NO_RESOURCE)
+        {
+            CESA_TEST_LOCK(flags);
+            CESA_TEST_WAIT( (cesaTestFull == 0), 100);
+            CESA_TEST_UNLOCK(flags);
+            if(cesaTestFull == 1)
+            {
+                mvOsPrintf("CESA Test timeout: i=%d, iter=%d, cesaTestFull=%d\n",
+                            i, iter, cesaTestFull);
+                cesaTestFull = 0;
+                return MV_TIMEOUT;
+            }
+
+            CESA_TEST_LOCK(flags);
+
+            rc = mvCesaAction(pCmd);
+
+            CESA_TEST_UNLOCK(flags);
+        }
+        if( (rc != MV_OK) && (rc != MV_NO_MORE) )
+        {
+            mvOsPrintf("mvCesaAction failed: rc=%d\n", rc);
+            return rc;
+        }
+
+        cmdReqId++;
+        if(cmdReqId >= CESA_DEF_REQ_SIZE)
+            cmdReqId = 0;
+
+#ifdef MV_LINUX
+        /* Reschedule each 16 requests */
+        if( (i & 0xF) == 0)
+            schedule();
+#endif
+    }
+    return MV_OK;
+}
+
+void    cesaTestStart(int bufNum, int bufSize)
+{
+    int             i, j, idx;
+    MV_CESA_MBUF    *pMbufSrc, *pMbufDst;
+    MV_BUF_INFO     *pFragsSrc, *pFragsDst;
+    char            *pBuf;
+#ifndef MV_NETBSD
+    int             numOfSessions, queueDepth;
+    char            *pSram;
+    MV_STATUS       status;
+    MV_CPU_DEC_WIN  addrDecWin;
+#endif
+
+    cesaCmdRing = mvOsMalloc(sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+    if(cesaCmdRing == NULL)
+    {
+        mvOsPrintf("testStart: Can't allocate %ld bytes of memory\n",
+                sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+        return;
+    }
+    memset(cesaCmdRing, 0, sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+
+    if(bufNum == 0)
+        bufNum = CESA_DEF_BUF_NUM;
+
+    if(bufSize == 0)
+        bufSize = CESA_DEF_BUF_SIZE;
+
+    cesaBufNum = bufNum;
+    cesaBufSize = bufSize;
+    mvOsPrintf("CESA test started: bufNum = %d, bufSize = %d\n",
+                bufNum, bufSize);
+
+    cesaHexBuffer = mvOsMalloc(2*bufNum*bufSize);
+    if(cesaHexBuffer == NULL)
+    {
+        mvOsPrintf("testStart: Can't malloc %d bytes for cesaHexBuffer.\n",
+                    2*bufNum*bufSize);
+        return;
+    }
+    memset(cesaHexBuffer, 0, (2*bufNum*bufSize));
+
+    cesaBinBuffer = mvOsMalloc(bufNum*bufSize);
+    if(cesaBinBuffer == NULL)
+    {
+        mvOsPrintf("testStart: Can't malloc %d bytes for cesaBinBuffer\n",
+                    bufNum*bufSize);
+        return;
+    }
+    memset(cesaBinBuffer, 0, (bufNum*bufSize));
+
+    cesaExpBinBuffer = mvOsMalloc(bufNum*bufSize);
+    if(cesaExpBinBuffer == NULL)
+    {
+        mvOsPrintf("testStart: Can't malloc %d bytes for cesaExpBinBuffer\n",
+                    bufNum*bufSize);
+        return;
+    }
+    memset(cesaExpBinBuffer, 0, (bufNum*bufSize));
+
+    CESA_TEST_WAIT_INIT();
+
+    pMbufSrc = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+    pFragsSrc = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+    pMbufDst = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+    pFragsDst = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+    if( (pMbufSrc == NULL) || (pFragsSrc == NULL) ||
+        (pMbufDst == NULL) || (pFragsDst == NULL) )
+    {
+        mvOsPrintf("testStart: Can't malloc Src and Dst pMbuf and pFrags structures.\n");
+        /* !!!! Dima cesaTestCleanup();*/
+        return;
+    }
+
+    memset(pMbufSrc, 0, sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+    memset(pFragsSrc, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+    memset(pMbufDst, 0, sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+    memset(pFragsDst, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_DEF_REQ_SIZE);
+
+    mvOsPrintf("Cesa Test Start: pMbufSrc=%p, pFragsSrc=%p, pMbufDst=%p, pFragsDst=%p\n",
+                pMbufSrc, pFragsSrc, pMbufDst, pFragsDst);
+
+    idx = 0;
+    for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+    {
+        pBuf = mvOsIoCachedMalloc(cesaTestOSHandle,bufSize * bufNum * 2,
+				  &cesaReqBufs[i].bufPhysAddr,
+				  &cesaReqBufs[i].memHandle);
+		if(pBuf == NULL)
+	{
+		mvOsPrintf("testStart: Can't malloc %d bytes for pBuf\n",
+                    bufSize * bufNum * 2);
+		return;
+	}
+
+        memset(pBuf, 0, bufSize * bufNum * 2);
+        mvOsCacheFlush(cesaTestOSHandle,pBuf, bufSize * bufNum * 2);
+        if(pBuf == NULL)
+        {
+            mvOsPrintf("cesaTestStart: Can't allocate %d bytes for req_%d buffers\n",
+                        bufSize * bufNum * 2, i);
+            return;
+        }
+
+        cesaReqBufs[i].bufVirtPtr = (MV_U8*)pBuf;
+        cesaReqBufs[i].bufSize =  bufSize * bufNum * 2;
+
+        cesaCmdRing[i].pSrc = &pMbufSrc[i];
+        cesaCmdRing[i].pSrc->pFrags = &pFragsSrc[idx];
+        cesaCmdRing[i].pSrc->numFrags = bufNum;
+        cesaCmdRing[i].pSrc->mbufSize = 0;
+
+        cesaCmdRing[i].pDst = &pMbufDst[i];
+        cesaCmdRing[i].pDst->pFrags = &pFragsDst[idx];
+        cesaCmdRing[i].pDst->numFrags = bufNum;
+        cesaCmdRing[i].pDst->mbufSize = 0;
+
+        for(j=0; j<bufNum; j++)
+        {
+            cesaCmdRing[i].pSrc->pFrags[j].bufVirtPtr = (MV_U8*)pBuf;
+            cesaCmdRing[i].pSrc->pFrags[j].bufSize = bufSize;
+            pBuf += bufSize;
+            cesaCmdRing[i].pDst->pFrags[j].bufVirtPtr = (MV_U8*)pBuf;
+            cesaCmdRing[i].pDst->pFrags[j].bufSize = bufSize;
+            pBuf += bufSize;
+        }
+        idx += bufNum;
+    }
+
+#ifndef MV_NETBSD
+    if (mvCpuIfTargetWinGet(CRYPT_ENG, &addrDecWin) == MV_OK)
+        pSram = (char*)addrDecWin.addrWin.baseLow;
+    else
+    {
+        mvOsPrintf("mvCesaInit: ERR. mvCpuIfTargetWinGet failed\n");
+        return;
+    }
+
+#ifdef MV_CESA_NO_SRAM
+    pSram = mvOsMalloc(4*1024+8);
+    if(pSram == NULL)
+    {
+        mvOsPrintf("CesaTest: can't allocate %d bytes for SRAM simulation\n",
+                4*1024+8);
+        /* !!!! Dima cesaTestCleanup();*/
+        return;
+    }
+    pSram = (MV_U8*)MV_ALIGN_UP((MV_U32)pSram, 8);
+#endif /* MV_CESA_NO_SRAM */
+
+    numOfSessions = CESA_DEF_SESSION_NUM;
+    queueDepth = CESA_DEF_REQ_SIZE - MV_CESA_MAX_CHAN;
+
+    status = mvCesaInit(numOfSessions, queueDepth, pSram, NULL);
+    if(status != MV_OK)
+    {
+        mvOsPrintf("mvCesaInit is Failed: status = 0x%x\n", status);
+        /* !!!! Dima cesaTestCleanup();*/
+        return;
+    }
+#endif /* !MV_NETBSD */
+
+    /* Prepare data for tests */
+    for(i=0; i<50; i++)
+        strcat((char*)cesaDataHexStr3, "dd");
+
+    strcpy((char*)cesaDataAndMd5digest3,  cesaDataHexStr3);
+    strcpy((char*)cesaDataAndSha1digest3, cesaDataHexStr3);
+
+    /* Digest must be 8 byte aligned */
+    for(; i<56; i++)
+    {
+        strcat((char*)cesaDataAndMd5digest3, "00");
+        strcat((char*)cesaDataAndSha1digest3, "00");
+    }
+    strcat((char*)cesaDataAndMd5digest3,  cesaHmacMd5digestHex3);
+    strcat((char*)cesaDataAndSha1digest3, cesaHmacSha1digestHex3);
+
+#ifndef MV_NETBSD
+    MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+    MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK);
+#endif
+
+#ifdef MV_VXWORKS
+    {
+        MV_STATUS       status;
+
+        status = intConnect((VOIDFUNCPTR *)INT_LVL_CESA, cesaTestReadyIsr, (int)NULL);
+        if (status != OK)
+        {
+            mvOsPrintf("CESA: Can't connect CESA (%d) interrupt, status=0x%x \n",
+                        INT_LVL_CESA, status);
+            /* !!!! Dima cesaTestCleanup();*/
+            return;
+        }
+        cesaSemId = semMCreate(SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+        if(cesaSemId == NULL)
+        {
+            mvOsPrintf("cesaTestStart: Can't create semaphore\n");
+            return;
+        }
+        intEnable(INT_LVL_CESA);
+    }
+#endif /* MV_VXWORKS */
+
+#if !defined(MV_NETBSD) && defined(__KERNEL__)
+        if( request_irq(CESA_IRQ, cesaTestReadyIsr, (SA_INTERRUPT) , "cesa_test", NULL ) )
+        {
+            mvOsPrintf( "cannot assign irq\n" );
+            /* !!!! Dima cesaTestCleanup();*/
+            return;
+        }
+        spin_lock_init( &cesaLock );
+#endif
+}
+
+MV_STATUS   testRun(int idx, int caseIdx, int iter,
+                    int reqSize, int checkMode)
+{
+    int                     testIdx, count, sid, digestSize;
+    int                     blockSize;
+    MV_CESA_TEST_SESSION*   pTestSession;
+    MV_CESA_COMMAND         cmd;
+    MV_STATUS               status;
+
+    memset(&cmd, 0, sizeof(cmd));
+
+    pTestSession = getTestSessionDb(idx, &testIdx);
+    if(pTestSession == NULL)
+    {
+        mvOsPrintf("Test %d is not exist\n", idx);
+        return MV_BAD_PARAM;
+    }
+    pTestSession = &pTestSession[testIdx];
+
+    sid = pTestSession->sid;
+    if(sid == -1)
+    {
+        mvOsPrintf("Test %d is not opened\n", idx);
+        return MV_BAD_STATE;
+    }
+    switch(pTestSession->cryptoAlgorithm)
+    {
+        case MV_CESA_CRYPTO_DES:
+        case MV_CESA_CRYPTO_3DES:
+            blockSize = MV_CESA_DES_BLOCK_SIZE;
+            break;
+
+        case MV_CESA_CRYPTO_AES:
+            blockSize = MV_CESA_AES_BLOCK_SIZE;
+            break;
+
+        case MV_CESA_CRYPTO_NULL:
+            blockSize = 0;
+            break;
+
+        default:
+            mvOsPrintf("cesaTestRun: Bad CryptoAlgorithm=%d\n",
+                pTestSession->cryptoAlgorithm);
+        return MV_BAD_PARAM;
+    }
+    switch(pTestSession->macAlgorithm)
+    {
+        case MV_CESA_MAC_MD5:
+        case MV_CESA_MAC_HMAC_MD5:
+            digestSize = MV_CESA_MD5_DIGEST_SIZE;
+            break;
+
+        case MV_CESA_MAC_SHA1:
+        case MV_CESA_MAC_HMAC_SHA1:
+            digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+            break;
+        default:
+            digestSize = 0;
+    }
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    if(pTestSession->direction == MV_CESA_DIR_ENCODE)
+    {
+        cesaOutputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+        cesaInputHexStr = cesaTestCases[caseIdx].plainHexStr;
+    }
+    else
+    {
+        cesaOutputHexStr = cesaTestCases[caseIdx].plainHexStr;
+        cesaInputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+    }
+
+    cmd.sessionId = sid;
+    if(checkMode == CESA_FAST_CHECK_MODE)
+    {
+        cmd.cryptoLength = cesaTestCases[caseIdx].cryptoLength;
+        cmd.macLength = cesaTestCases[caseIdx].macLength;
+    }
+    else
+    {
+        cmd.cryptoLength = reqSize;
+        cmd.macLength = reqSize;
+    }
+    cesaRateSize = cmd.cryptoLength;
+    cesaReqSize = cmd.cryptoLength;
+    cmd.cryptoOffset = 0;
+    if(pTestSession->operation != MV_CESA_MAC_ONLY)
+    {
+        if( (pTestSession->cryptoMode == MV_CESA_CRYPTO_CBC) ||
+            (pTestSession->cryptoMode == MV_CESA_CRYPTO_CTR) )
+        {
+            cmd.ivOffset = 0;
+            cmd.cryptoOffset = blockSize;
+            if(cesaTestCases[caseIdx].pCryptoIV == NULL)
+            {
+                cmd.ivFromUser = 1;
+            }
+            else
+            {
+                cmd.ivFromUser = 0;
+                mvCesaCryptoIvSet(cesaTestCases[caseIdx].pCryptoIV, blockSize);
+            }
+            cesaReqSize = cmd.cryptoOffset + cmd.cryptoLength;
+        }
+    }
+
+/*
+    mvOsPrintf("ivFromUser=%d, cryptoLength=%d, cesaReqSize=%d, cryptoOffset=%d\n",
+                cmd.ivFromUser, cmd.cryptoLength, cesaReqSize, cmd.cryptoOffset);
+*/
+    if(pTestSession->operation != MV_CESA_CRYPTO_ONLY)
+    {
+        cmd.macOffset = cmd.cryptoOffset;
+
+        if(cesaTestCases[caseIdx].digestOffset == -1)
+        {
+            cmd.digestOffset = cmd.macOffset + cmd.macLength;
+            cmd.digestOffset = MV_ALIGN_UP(cmd.digestOffset, 8);
+        }
+        else
+        {
+            cmd.digestOffset = cesaTestCases[caseIdx].digestOffset;
+        }
+        if( (cmd.digestOffset + digestSize) > cesaReqSize)
+            cesaReqSize = cmd.digestOffset + digestSize;
+    }
+
+    cesaCheckMode = checkMode;
+
+    if(checkMode == CESA_NULL_CHECK_MODE)
+    {
+        cesaCheckSize = 0;
+        cesaCheckOffset = 0;
+    }
+    else
+    {
+        if(pTestSession->operation == MV_CESA_CRYPTO_ONLY)
+        {
+            cesaCheckOffset = 0;
+            cesaCheckSize = cmd.cryptoLength;
+        }
+        else
+        {
+            cesaCheckSize = digestSize;
+            cesaCheckOffset = cmd.digestOffset;
+        }
+    }
+/*
+    mvOsPrintf("reqSize=%d, checkSize=%d, checkOffset=%d, checkMode=%d\n",
+                cesaReqSize, cesaCheckSize, cesaCheckOffset, cesaCheckMode);
+
+    mvOsPrintf("blockSize=%d, ivOffset=%d, ivFromUser=%d, crOffset=%d, crLength=%d\n",
+                blockSize, cmd.ivOffset, cmd.ivFromUser,
+                cmd.cryptoOffset, cmd.cryptoLength);
+
+    mvOsPrintf("macOffset=%d, digestOffset=%d, macLength=%d\n",
+                cmd.macOffset, cmd.digestOffset, cmd.macLength);
+*/
+    status = testCmd(sid, iter, &cmd, pTestSession,
+                     cesaTestCases[caseIdx].pCryptoIV, blockSize);
+
+    if(status != MV_OK)
+        return status;
+
+    /* Wait when all callbacks is received */
+    count = 0;
+    while(cesaIsReady == MV_FALSE)
+    {
+        mvOsSleep(10);
+        count++;
+        if(count > 100)
+        {
+            mvOsPrintf("testRun: Timeout occured\n");
+            return MV_TIMEOUT;
+        }
+    }
+
+    return MV_OK;
+}
+
+
+void cesaTestStop(void)
+{
+    MV_CESA_MBUF    *pMbufSrc, *pMbufDst;
+    MV_BUF_INFO     *pFragsSrc, *pFragsDst;
+    int             i;
+
+    /* Release all allocated memories */
+    pMbufSrc = (MV_CESA_MBUF*)(cesaCmdRing[0].pSrc);
+    pFragsSrc = cesaCmdRing[0].pSrc->pFrags;
+
+    pMbufDst = (MV_CESA_MBUF*)(cesaCmdRing[0].pDst);
+    pFragsDst = cesaCmdRing[0].pDst->pFrags;
+
+    mvOsFree(pMbufSrc);
+    mvOsFree(pMbufDst);
+    mvOsFree(pFragsSrc);
+    mvOsFree(pFragsDst);
+
+    for(i=0; i<CESA_DEF_REQ_SIZE; i++)
+    {
+        mvOsIoCachedFree(cesaTestOSHandle,cesaReqBufs[i].bufSize,
+			 cesaReqBufs[i].bufPhysAddr,cesaReqBufs[i].bufVirtPtr,
+			 cesaReqBufs[i].memHandle);
+    }
+    cesaDataHexStr3[0] = '\0';
+}
+
+void    desTest(int iter, int reqSize, int checkMode)
+{
+    int         mode, i;
+    MV_STATUS   status;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+    i = iter;
+    if(mode != CESA_NULL_CHECK_MODE)
+        i = 1;
+
+    testOpen(0);
+    testOpen(1);
+    testOpen(2);
+    testOpen(3);
+
+/* DES / ECB mode / Encrypt only */
+    status = testRun(0, 1, iter, reqSize, checkMode);
+    printTestResults(0, status, checkMode);
+
+/* DES / ECB mode / Decrypt only */
+    status = testRun(1, 1, iter, reqSize, checkMode);
+    printTestResults(1, status, checkMode);
+
+/* DES / CBC mode / Encrypt only */
+    status = testRun(2, 2, i, reqSize, mode);
+    printTestResults(2, status, mode);
+
+/* DES / CBC mode / Decrypt only */
+    status = testRun(3, 2, iter, reqSize, mode);
+    printTestResults(3, status, mode);
+
+    testClose(0);
+    testClose(1);
+    testClose(2);
+    testClose(3);
+}
+
+void    tripleDesTest(int iter, int reqSize, int checkMode)
+{
+    int         mode, i;
+    MV_STATUS   status;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+    i = iter;
+    if(mode != CESA_NULL_CHECK_MODE)
+        i = 1;
+
+    testOpen(100);
+    testOpen(101);
+    testOpen(102);
+    testOpen(103);
+
+/* 3DES / ECB mode / Encrypt only */
+    status = testRun(100, 1, iter, reqSize, checkMode);
+    printTestResults(100, status, checkMode);
+
+/* 3DES / ECB mode / Decrypt only */
+    status = testRun(101, 1, iter, reqSize, checkMode);
+    printTestResults(101, status, checkMode);
+
+/* 3DES / CBC mode / Encrypt only */
+    status = testRun(102, 2, i, reqSize, mode);
+    printTestResults(102, status, mode);
+
+/* 3DES / CBC mode / Decrypt only */
+    status = testRun(103, 2, iter, reqSize, mode);
+    printTestResults(103, status, mode);
+
+    testClose(100);
+    testClose(101);
+    testClose(102);
+    testClose(103);
+}
+
+void    aesTest(int iter, int reqSize, int checkMode)
+{
+    MV_STATUS   status;
+    int         mode, i;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+
+    i = iter;
+    if(mode != CESA_NULL_CHECK_MODE)
+        i = 1;
+
+    testOpen(200);
+    testOpen(201);
+    testOpen(202);
+    testOpen(203);
+    testOpen(204);
+    testOpen(205);
+    testOpen(206);
+    testOpen(207);
+    testOpen(208);
+
+/* AES-128 Encode ECB mode */
+    status = testRun(200, 3, iter, reqSize, checkMode);
+    printTestResults(200, status, checkMode);
+
+/* AES-128 Decode ECB mode */
+    status = testRun(201, 3, iter, reqSize, checkMode);
+    printTestResults(201, status, checkMode);
+
+/* AES-128 Encode CBC mode (IV from SA) */
+    status = testRun(202, 10, i, reqSize, mode);
+    printTestResults(202, status, mode);
+
+/* AES-128 Encode CBC mode (IV from User) */
+    status = testRun(202, 24, i, reqSize, mode);
+    printTestResults(202, status, mode);
+
+/* AES-128 Decode CBC mode */
+    status = testRun(203, 24, iter, reqSize, mode);
+    printTestResults(203, status, checkMode);
+
+/* AES-192 Encode ECB mode */
+    status = testRun(204, 4, iter, reqSize, checkMode);
+    printTestResults(204, status, checkMode);
+
+/* AES-192 Decode ECB mode */
+    status = testRun(205, 4, iter, reqSize, checkMode);
+    printTestResults(205, status, checkMode);
+
+/* AES-256 Encode ECB mode */
+    status = testRun(206, 5, iter, reqSize, checkMode);
+    printTestResults(206, status, checkMode);
+
+/* AES-256 Decode ECB mode */
+    status = testRun(207, 5, iter, reqSize, checkMode);
+    printTestResults(207, status, checkMode);
+
+#if defined(MV_LINUX)
+/* AES-128 Encode CTR mode */
+    status = testRun(208, 23, iter, reqSize, mode);
+    printTestResults(208, status, checkMode);
+#endif
+    testClose(200);
+    testClose(201);
+    testClose(202);
+    testClose(203);
+    testClose(204);
+    testClose(205);
+    testClose(206);
+    testClose(207);
+    testClose(208);
+}
+
+
+void    mdTest(int iter, int reqSize, int checkMode)
+{
+    int         mode;
+    MV_STATUS   status;
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+
+    testOpen(300);
+    testOpen(301);
+    testOpen(302);
+    testOpen(303);
+    testOpen(305);
+
+/* HMAC-MD5 Generate signature test */
+    status = testRun(300, 6, iter, reqSize, mode);
+    printTestResults(300, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+    status = testRun(301, 7, iter, reqSize, mode);
+    printTestResults(301, status, checkMode);
+
+/* HMAC-MD5 Generate signature test */
+    status = testRun(302, 8, iter, reqSize, mode);
+    printTestResults(302, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+    status = testRun(303, 9, iter, reqSize, mode);
+    printTestResults(303, status, checkMode);
+
+/* HASH-MD5 Generate signature test */
+    status = testRun(305, 15, iter, reqSize, mode);
+    printTestResults(305, status, checkMode);
+
+    testClose(300);
+    testClose(301);
+    testClose(302);
+    testClose(303);
+    testClose(305);
+}
+
+void    shaTest(int iter, int reqSize, int checkMode)
+{
+    int         mode;
+    MV_STATUS   status;
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+
+    testOpen(400);
+    testOpen(401);
+    testOpen(402);
+    testOpen(403);
+    testOpen(405);
+
+/* HMAC-SHA1 Generate signature test */
+    status = testRun(400, 11, iter, reqSize, mode);
+    printTestResults(400, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+    status = testRun(401, 12, iter, reqSize, mode);
+    printTestResults(401, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+    status = testRun(402, 13, iter, reqSize, mode);
+    printTestResults(402, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+    status = testRun(403, 14, iter, reqSize, mode);
+    printTestResults(403, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+    status = testRun(405, 16, iter, reqSize, mode);
+    printTestResults(405, status, checkMode);
+
+    testClose(400);
+    testClose(401);
+    testClose(402);
+    testClose(403);
+    testClose(405);
+}
+
+void    combiTest(int iter, int reqSize, int checkMode)
+{
+    MV_STATUS   status;
+    int         mode, i;
+
+    mode = checkMode;
+    if(checkMode == CESA_FULL_CHECK_MODE)
+        mode = CESA_FAST_CHECK_MODE;
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    i = iter;
+    if(mode != CESA_NULL_CHECK_MODE)
+        i = 1;
+
+    testOpen(500);
+    testOpen(501);
+    testOpen(502);
+    testOpen(503);
+    testOpen(504);
+    testOpen(505);
+    testOpen(506);
+    testOpen(507);
+
+/* DES ECB + MD5 encode test */
+    status = testRun(500, 17, iter, reqSize, mode);
+    printTestResults(500, status, mode);
+
+/* DES ECB + SHA1 encode test */
+    status = testRun(501, 18, iter, reqSize, mode);
+    printTestResults(501, status, mode);
+
+/* 3DES ECB + MD5 encode test */
+    status = testRun(502, 17, iter, reqSize, mode);
+    printTestResults(502, status, mode);
+
+/* 3DES ECB + SHA1 encode test */
+    status = testRun(503, 18, iter, reqSize, mode);
+    printTestResults(503, status, mode);
+
+/* 3DES CBC + MD5 encode test */
+    status = testRun(504, 19, i, reqSize, mode);
+    printTestResults(504, status, mode);
+
+/* 3DES CBC + SHA1 encode test */
+    status = testRun(505, 20, i, reqSize, mode);
+    printTestResults(505, status, mode);
+
+/* AES-128 CBC + MD5 encode test */
+    status = testRun(506, 21, i, reqSize, mode);
+    printTestResults(506, status, mode);
+
+/* AES-128 CBC + SHA1 encode test */
+    status = testRun(507, 22, i, reqSize, mode);
+    printTestResults(507, status, mode);
+
+    testClose(500);
+    testClose(501);
+    testClose(502);
+    testClose(503);
+    testClose(504);
+    testClose(505);
+    testClose(506);
+    testClose(507);
+}
+
+void    cesaOneTest(int testIdx, int caseIdx,
+                    int iter, int reqSize, int checkMode)
+{
+    MV_STATUS   status;
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    mvOsPrintf("test=%d, case=%d, size=%d, iter=%d\n",
+                testIdx, caseIdx, reqSize, iter);
+
+    status = testOpen(testIdx);
+
+    status = testRun(testIdx, caseIdx, iter, reqSize, checkMode);
+    printTestResults(testIdx, status, checkMode);
+    status = testClose(testIdx);
+
+}
+
+void    cesaTest(int iter, int reqSize, int checkMode)
+{
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    mvOsPrintf("%d iteration\n", iter);
+    mvOsPrintf("%d size\n\n", reqSize);
+
+/* DES tests */
+    desTest(iter, reqSize, checkMode);
+
+/* 3DES tests */
+    tripleDesTest(iter, reqSize, checkMode);
+
+/* AES tests */
+    aesTest(iter, reqSize, checkMode);
+
+/* MD5 tests */
+    mdTest(iter, reqSize, checkMode);
+
+/* SHA-1 tests */
+    shaTest(iter, reqSize, checkMode);
+}
+
+void    multiSizeTest(int idx, int iter, int checkMode, char* inputData)
+{
+    MV_STATUS               status;
+    int                     i;
+    MV_CESA_SIZE_TEST*      pMultiTest;
+
+    if( testOpen(idx) != MV_OK)
+        return;
+
+    if(iter == 0)
+        iter = CESA_DEF_ITER_NUM;
+
+    if(checkMode == CESA_SHOW_CHECK_MODE)
+    {
+        iter = 1;
+    }
+    else
+        checkMode = CESA_FULL_CHECK_MODE;
+
+    cesaTestCases[0].plainHexStr = inputData;
+    cesaTestCases[0].pCryptoIV = NULL;
+
+    switch(idx)
+    {
+        case 302:
+            pMultiTest = mdMultiSizeTest302;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = cesaDataHexStr3;
+            break;
+
+        case 304:
+            pMultiTest = mdMultiSizeTest304;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 305:
+            pMultiTest = mdMultiSizeTest305;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 402:
+            pMultiTest = shaMultiSizeTest402;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 404:
+            pMultiTest = shaMultiSizeTest404;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 405:
+            pMultiTest = shaMultiSizeTest405;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 502:
+            pMultiTest = tripleDesMdMultiSizeTest502;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 503:
+            pMultiTest = tripleDesShaMultiSizeTest503;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 504:
+            iter = 1;
+            pMultiTest = cbc3desMdMultiSizeTest504;
+            cesaTestCases[0].pCryptoIV = iv1;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 505:
+            iter = 1;
+            pMultiTest = cbc3desShaMultiSizeTest505;
+            cesaTestCases[0].pCryptoIV = iv1;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 506:
+            iter = 1;
+            pMultiTest = cbcAes128md5multiSizeTest506;
+            cesaTestCases[0].pCryptoIV = iv5;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        case 507:
+            iter = 1;
+            pMultiTest = cbcAes128sha1multiSizeTest507;
+            cesaTestCases[0].pCryptoIV = iv5;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+            break;
+
+        default:
+            iter = 1;
+            checkMode = CESA_SHOW_CHECK_MODE;
+            pMultiTest = mdMultiSizeTest302;
+            if(inputData == NULL)
+                cesaTestCases[0].plainHexStr = hashHexStr80;
+    }
+    i = 0;
+    while(pMultiTest[i].outputHexStr != NULL)
+    {
+        cesaTestCases[0].cipherHexStr = (char *)pMultiTest[i].outputHexStr;
+        status = testRun(idx, 0, iter, pMultiTest[i].size,
+                         checkMode);
+        if(checkMode != CESA_SHOW_CHECK_MODE)
+        {
+            cesaReqSize = pMultiTest[i].size;
+            printTestResults(idx, status, checkMode);
+        }
+        if(status != MV_OK)
+            break;
+        i++;
+    }
+    testClose(idx);
+/*
+    mvCesaDebugStatus();
+    cesaTestPrintStatus();
+*/
+}
+
+void    open_session_test(int idx, int caseIdx, int iter)
+{
+    int         reqIdError, cryptoError, openErrors, i;
+    int         openErrDisp[100];
+    MV_STATUS   status;
+
+    memset(openErrDisp, 0, sizeof(openErrDisp));
+    openErrors = 0;
+    reqIdError = 0;
+    cryptoError = 0;
+    for(i=0; i<iter; i++)
+    {
+        status = testOpen(idx);
+        if(status != MV_OK)
+        {
+            openErrors++;
+            openErrDisp[status]++;
+        }
+        else
+        {
+            testRun(idx, caseIdx, 1, 0, CESA_FAST_CHECK_MODE);
+            if(cesaCryptoError > 0)
+                cryptoError++;
+            if(cesaReqIdError > 0)
+                reqIdError++;
+
+            testClose(idx);
+        }
+    }
+    if(cryptoError > 0)
+        mvOsPrintf("cryptoError : %d\n", cryptoError);
+    if(reqIdError > 0)
+        mvOsPrintf("reqIdError  : %d\n", reqIdError);
+
+    if(openErrors > 0)
+    {
+        mvOsPrintf("Open Errors = %d\n", openErrors);
+        for(i=0; i<100; i++)
+        {
+            if(openErrDisp[i] != 0)
+                mvOsPrintf("Error %d - occurs %d times\n", i, openErrDisp[i]);
+        }
+    }
+}
+
+
+void    loopback_test(int idx, int iter, int size, char* pPlainData)
+{
+}
+
+
+#if defined(MV_VXWORKS)
+int testMode = 0;
+unsigned __TASKCONV cesaTask(void* args)
+{
+    int reqSize = cesaReqSize;
+
+    if(testMode == 0)
+    {
+        cesaOneTest(cesaTestIdx, cesaCaseIdx, cesaIteration,
+                    reqSize, cesaCheckMode);
+    }
+    else
+    {
+        if(testMode == 1)
+        {
+            cesaTest(cesaIteration, reqSize, cesaCheckMode);
+            combiTest(cesaIteration, reqSize, cesaCheckMode);
+        }
+        else
+        {
+            multiSizeTest(cesaIdx, cesaIteration, cesaCheckMode, NULL);
+        }
+    }
+    return 0;
+}
+
+void oneTest(int testIdx, int caseIdx,
+              int iter, int reqSize, int checkMode)
+{
+    long    rc;
+
+    cesaIteration = iter;
+    cesaReqSize = cesaRateSize = reqSize;
+    cesaCheckMode = checkMode;
+    testMode = 0;
+    cesaTestIdx = testIdx;
+    cesaCaseIdx = caseIdx;
+    rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+    if (rc != MV_OK)
+    {
+        mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %ld\n", rc);
+    }
+}
+
+void multiTest(int iter, int reqSize, int checkMode)
+{
+    long    rc;
+
+    cesaIteration = iter;
+    cesaCheckMode = checkMode;
+    cesaReqSize = reqSize;
+    testMode = 1;
+    rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+    if (rc != MV_OK)
+    {
+        mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %ld\n", rc);
+    }
+}
+
+void sizeTest(int testIdx, int iter, int checkMode)
+{
+    long    rc;
+
+    cesaIteration = iter;
+        cesaCheckMode = checkMode;
+        testMode = 2;
+        cesaIdx = testIdx;
+    rc = mvOsTaskCreate("CESA_T", 100, 4*1024, cesaTask, NULL, &cesaTaskId);
+    if (rc != MV_OK)
+    {
+        mvOsPrintf("hMW: Can't create CESA test task, rc = %ld\n", rc);
+    }
+}
+
+#endif /* MV_VXWORKS */
+
+extern void    mvCesaDebugSA(short sid, int mode);
+void    cesaTestPrintSession(int idx)
+{
+    int                     testIdx;
+    MV_CESA_TEST_SESSION*   pTestSession;
+
+    pTestSession = getTestSessionDb(idx, &testIdx);
+    if(pTestSession == NULL)
+    {
+        mvOsPrintf("Test %d is not exist\n", idx);
+        return;
+    }
+    pTestSession = &pTestSession[testIdx];
+
+    if(pTestSession->sid == -1)
+    {
+        mvOsPrintf("Test session %d is not opened\n", idx);
+        return;
+    }
+
+    mvCesaDebugSA(pTestSession->sid, 1);
+}
+
+void    cesaTestPrintStatus(void)
+{
+    mvOsPrintf("\n\t Cesa Test Status\n\n");
+
+    mvOsPrintf("isrCount=%d\n",
+                cesaTestIsrCount);
+
+#ifdef CESA_TEST_DEBUG
+    {
+        int i, j;
+        j = cesaTestTraceIdx;
+        mvOsPrintf("No  Type  Cause   rCause   iCause   Res     Time     pReady    pProc    pEmpty\n");
+        for(i=0; i<MV_CESA_TEST_TRACE_SIZE; i++)
+        {
+            mvOsPrintf("%02d.  %d   0x%04x  0x%04x   0x%04x   0x%02x   0x%02x   %02d   0x%06x  %p  %p  %p\n",
+                j, cesaTestTrace[j].type, cesaTestTrace[j].cause, cesaTestTrace[j].realCause,
+                cesaTestTrace[j].dmaCause, cesaTestTrace[j].resources, cesaTestTrace[j].timeStamp,
+                cesaTestTrace[j].pReqReady, cesaTestTrace[j].pReqProcess, cesaTestTrace[j].pReqEmpty);
+            j++;
+            if(j == MV_CESA_TEST_TRACE_SIZE)
+                j = 0;
+        }
+    }
+#endif /* CESA_TEST_DEBUG */
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvLru.c b/crypto/ocf/kirkwood/cesa/mvLru.c
new file mode 100644
index 000000000000..4b5f877f4c46
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvLru.c
@@ -0,0 +1,158 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvLru.h"
+/* LRU Cache support */
+
+
+/* Init LRU cache database */
+MV_LRU_CACHE*   mvLruCacheInit(int numOfEntries)
+{
+    int             i;
+    MV_LRU_CACHE*   pLruCache;
+
+    pLruCache = mvOsMalloc(sizeof(MV_LRU_CACHE));
+    if(pLruCache == NULL)
+    {
+        return NULL;
+    }
+    memset(pLruCache, 0, sizeof(MV_LRU_CACHE));
+
+    pLruCache->table = mvOsMalloc(numOfEntries*sizeof(MV_LRU_ENTRY));
+    if(pLruCache->table == NULL)
+    {
+        mvOsFree(pLruCache);
+        return NULL;
+    }
+    memset(pLruCache->table, 0, numOfEntries*sizeof(MV_LRU_ENTRY));
+    pLruCache->tableSize = numOfEntries;
+
+    for(i=0; i<numOfEntries; i++)
+    {
+        pLruCache->table[i].next = i+1;
+        pLruCache->table[i].prev = i-1;
+    }
+    pLruCache->least = 0;
+    pLruCache->most = numOfEntries-1;
+
+    return pLruCache;
+}
+
+void    mvLruCacheFinish(MV_LRU_CACHE* pLruCache)
+{
+    mvOsFree(pLruCache->table);
+    mvOsFree(pLruCache);
+}
+
+/* Update LRU cache database after using cache Index */
+void    mvLruCacheIdxUpdate(MV_LRU_CACHE* pLruHndl, int cacheIdx)
+{
+    int prev, next;
+
+    if(cacheIdx == pLruHndl->most)
+        return;
+
+    next = pLruHndl->table[cacheIdx].next;
+    if(cacheIdx == pLruHndl->least)
+    {
+        pLruHndl->least = next;
+    }
+    else
+    {
+        prev = pLruHndl->table[cacheIdx].prev;
+
+        pLruHndl->table[next].prev = prev;
+        pLruHndl->table[prev].next = next;
+    }
+
+    pLruHndl->table[pLruHndl->most].next = cacheIdx;
+    pLruHndl->table[cacheIdx].prev = pLruHndl->most;
+    pLruHndl->most = cacheIdx;
+}
+
+/* Delete LRU cache entry */
+void    mvLruCacheIdxDelete(MV_LRU_CACHE* pLruHndl, int cacheIdx)
+{
+    int prev, next;
+
+    if(cacheIdx == pLruHndl->least)
+        return;
+
+    prev = pLruHndl->table[cacheIdx].prev;
+    if(cacheIdx == pLruHndl->most)
+    {
+        pLruHndl->most = prev;
+    }
+    else
+    {
+        next = pLruHndl->table[cacheIdx].next;
+
+        pLruHndl->table[next].prev = prev;
+        pLruHndl->table[prev].next = next;
+    }
+    pLruHndl->table[pLruHndl->least].prev = cacheIdx;
+    pLruHndl->table[cacheIdx].next = pLruHndl->least;
+    pLruHndl->least = cacheIdx;
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvLru.h b/crypto/ocf/kirkwood/cesa/mvLru.h
new file mode 100644
index 000000000000..39d2f89bae6c
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvLru.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvLru.h - Header File for Least Recently Used Cache algorithm
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       the Least Recently Used Cache algorithm.
+*
+*******************************************************************************/
+
+#ifndef __mvLru_h__
+#define __mvLru_h__
+
+
+typedef struct
+{
+    int next;
+    int prev;
+} MV_LRU_ENTRY;
+
+typedef struct
+{
+    int             least;
+    int             most;
+    MV_LRU_ENTRY*   table;
+    int             tableSize;
+
+}MV_LRU_CACHE;
+
+
+/* Find Cache index for replacement LRU */
+static INLINE int     mvLruCacheIdxFind(MV_LRU_CACHE* pLruHndl)
+{
+    return pLruHndl->least;
+}
+
+/* Init LRU cache module */
+MV_LRU_CACHE*   mvLruCacheInit(int numOfEntries);
+
+/* Finish LRU cache module */
+void    mvLruCacheFinish(MV_LRU_CACHE* pLruHndl);
+
+/* Update LRU cache database after using cache Index */
+void    mvLruCacheIdxUpdate(MV_LRU_CACHE* pLruHndl, int cacheIdx);
+
+/* Delete LRU cache entry */
+void    mvLruCacheIdxDelete(MV_LRU_CACHE* pLruHndl, int cacheIdx);
+
+
+#endif /* __mvLru_h__ */
diff --git a/crypto/ocf/kirkwood/cesa/mvMD5.c b/crypto/ocf/kirkwood/cesa/mvMD5.c
new file mode 100644
index 000000000000..f17e8224e718
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvMD5.c
@@ -0,0 +1,365 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest.  This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+
+#include "mvOs.h"
+#include "mvMD5.h"
+
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN]);
+
+#ifdef  MV_CPU_LE
+#define mvByteReverse(buf, len)   /* Nothing */
+#else
+static void mvByteReverse(unsigned char *buf, unsigned longs);
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+static void mvByteReverse(unsigned char *buf, unsigned longs)
+{
+    MV_U32 t;
+
+    do
+    {
+        t = (MV_U32) ((unsigned) buf[3] << 8 | buf[2]) << 16 |
+                      ((unsigned) buf[1] << 8 | buf[0]);
+        *(MV_U32 *) buf = t;
+        buf += 4;
+    } while (--longs);
+}
+#endif
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void    mvMD5Init(MV_MD5_CONTEXT *ctx)
+{
+    ctx->buf[0] = 0x67452301;
+    ctx->buf[1] = 0xefcdab89;
+    ctx->buf[2] = 0x98badcfe;
+    ctx->buf[3] = 0x10325476;
+
+    ctx->bits[0] = 0;
+    ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void    mvMD5Update(MV_MD5_CONTEXT *ctx, unsigned char const *buf, unsigned len)
+{
+    MV_U32 t;
+
+    /* Update bitcount */
+
+    t = ctx->bits[0];
+    if ((ctx->bits[0] = t + ((MV_U32) len << 3)) < t)
+        ctx->bits[1]++;         /* Carry from low to high */
+    ctx->bits[1] += len >> 29;
+
+    t = (t >> 3) & 0x3f;        /* Bytes already in shsInfo->data */
+
+    /* Handle any leading odd-sized chunks */
+
+    if (t)
+    {
+        unsigned char *p = (unsigned char *) ctx->in + t;
+
+        t = 64 - t;
+        if (len < t)
+        {
+            memcpy(p, buf, len);
+            return;
+        }
+        memcpy(p, buf, t);
+        mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+        mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+        buf += t;
+        len -= t;
+    }
+    /* Process data in 64-byte chunks */
+
+    while (len >= 64)
+    {
+        memcpy(ctx->in, buf, 64);
+        mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+        mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+        buf += 64;
+        len -= 64;
+    }
+
+    /* Handle any remaining bytes of data. */
+
+    memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void    mvMD5Final(unsigned char digest[MV_MD5_MAC_LEN], MV_MD5_CONTEXT *ctx)
+{
+    unsigned count;
+    unsigned char *p;
+
+    /* Compute number of bytes mod 64 */
+    count = (ctx->bits[0] >> 3) & 0x3F;
+
+    /* Set the first char of padding to 0x80.  This is safe since there is
+       always at least one byte free */
+    p = ctx->in + count;
+    *p++ = 0x80;
+
+    /* Bytes of padding needed to make 64 bytes */
+    count = 64 - 1 - count;
+
+    /* Pad out to 56 mod 64 */
+    if (count < 8)
+    {
+        /* Two lots of padding:  Pad the first block to 64 bytes */
+        memset(p, 0, count);
+        mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+        mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+
+        /* Now fill the next block with 56 bytes */
+        memset(ctx->in, 0, 56);
+    }
+    else
+    {
+        /* Pad block to 56 bytes */
+        memset(p, 0, count - 8);
+    }
+    mvByteReverse(ctx->in, 14);
+
+    /* Append length in bits and transform */
+    ((MV_U32 *) ctx->in)[14] = ctx->bits[0];
+    ((MV_U32 *) ctx->in)[15] = ctx->bits[1];
+
+    mvMD5Transform(ctx->buf, (MV_U32 *) ctx->in);
+    mvByteReverse((unsigned char *) ctx->buf, 4);
+    memcpy(digest, ctx->buf, MV_MD5_MAC_LEN);
+    memset(ctx, 0, sizeof(ctx));        /* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+        ( w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x )
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN])
+{
+    register MV_U32 a, b, c, d;
+
+    a = buf[0];
+    b = buf[1];
+    c = buf[2];
+    d = buf[3];
+
+    MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+    MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+    MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+    MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+    MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+    MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+    MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+    MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+    MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+    MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+    MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+    MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+    MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+    MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+    MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+    MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+    MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+    MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+    MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+    MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+    MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+    MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+    MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+    MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+    MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+    MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+    MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+    MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+    MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+    MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+    MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+    MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+    MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+    MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+    MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+    MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+    MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+    MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+    MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+    MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+    MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+    MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+    MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+    MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+    MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+    MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+    MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+    MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+    MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+    MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+    MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+    MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+    MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+    MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+    MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+    MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+    MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+    MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+    MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+    MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+    MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+    MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+    MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+    MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+    buf[0] += a;
+    buf[1] += b;
+    buf[2] += c;
+    buf[3] += d;
+}
+
+void    mvMD5(unsigned char const *buf, unsigned len, unsigned char* digest)
+{
+    MV_MD5_CONTEXT  ctx;
+
+    mvMD5Init(&ctx);
+    mvMD5Update(&ctx, buf, len);
+    mvMD5Final(digest, &ctx);
+}
+
+
+void    mvHmacMd5(unsigned char const* text, int text_len,
+                  unsigned char const* key, int key_len,
+                  unsigned char* digest)
+{
+    int             i;
+    MV_MD5_CONTEXT  ctx;
+    unsigned char   k_ipad[64+1]; /* inner padding - key XORd with ipad */
+    unsigned char   k_opad[64+1]; /* outer padding - key XORd with opad */
+
+    /* start out by storing key in pads */
+    memset(k_ipad, 0, 64);
+    memcpy(k_ipad, key, key_len);
+    memset(k_opad, 0, 64);
+    memcpy(k_opad, key, key_len);
+
+    /* XOR key with ipad and opad values */
+    for (i=0; i<64; i++)
+    {
+	    k_ipad[i] ^= 0x36;
+	    k_opad[i] ^= 0x5c;
+    }
+
+    /* perform inner MD5 */
+    mvMD5Init(&ctx);                   /* init ctx for 1st pass */
+    mvMD5Update(&ctx, k_ipad, 64);    /* start with inner pad */
+    mvMD5Update(&ctx, text, text_len); /* then text of datagram */
+    mvMD5Final(digest, &ctx);          /* finish up 1st pass */
+
+    /* perform outer MD5 */
+    mvMD5Init(&ctx);                   /* init ctx for 2nd pass */
+    mvMD5Update(&ctx, k_opad, 64);     /* start with outer pad */
+    mvMD5Update(&ctx, digest, 16);     /* then results of 1st hash */
+    mvMD5Final(digest, &ctx);          /* finish up 2nd pass */
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvMD5.h b/crypto/ocf/kirkwood/cesa/mvMD5.h
new file mode 100644
index 000000000000..d20281e22b33
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvMD5.h
@@ -0,0 +1,93 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvMD5_h__
+#define __mvMD5_h__
+
+#include "mvMD5.h"
+
+#define MV_MD5_MAC_LEN 16
+
+
+typedef struct
+{
+    MV_U32 buf[4];
+    MV_U32 bits[2];
+    MV_U8  in[64];
+
+} MV_MD5_CONTEXT;
+
+void mvMD5Init(MV_MD5_CONTEXT *context);
+void mvMD5Update(MV_MD5_CONTEXT *context, unsigned char const *buf,
+                unsigned len);
+void mvMD5Final(unsigned char digest[16], MV_MD5_CONTEXT *context);
+
+void mvMD5(unsigned char const *buf, unsigned len, unsigned char* digest);
+
+void mvHmacMd5(unsigned char const* text, int text_len,
+                  unsigned char const* key, int key_len,
+                  unsigned char* digest);
+
+
+#endif /* __mvMD5_h__ */
diff --git a/crypto/ocf/kirkwood/cesa/mvSHA1.c b/crypto/ocf/kirkwood/cesa/mvSHA1.c
new file mode 100644
index 000000000000..0b2f558efc22
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvSHA1.c
@@ -0,0 +1,252 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*
+ * SHA1 hash implementation and interface functions
+ * Copyright (c) 2003-2005, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ *
+ * See README and COPYING for more details.
+ */
+
+#include "mvOs.h"
+#include "mvSHA1.h"
+
+#define SHA1HANDSOFF
+
+typedef union
+{
+    MV_U8   c[64];
+    MV_U32  l[16];
+
+} CHAR64LONG16;
+
+static void mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer);
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+
+#ifdef MV_CPU_LE
+#define blk0(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) | \
+        (rol(block->l[i], 8) & 0x00FF00FF))
+#else
+#define blk0(i) block->l[i]
+#endif
+#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
+        block->l[(i + 8) & 15] ^ block->l[(i + 2) & 15] ^ block->l[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v,w,x,y,z,i) \
+        z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+        w = rol(w, 30);
+#define R1(v,w,x,y,z,i) \
+        z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+        w = rol(w, 30);
+#define R2(v,w,x,y,z,i) \
+        z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); w = rol(w, 30);
+#define R3(v,w,x,y,z,i) \
+        z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+        w = rol(w, 30);
+#define R4(v,w,x,y,z,i) \
+        z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+        w=rol(w, 30);
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void    mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer)
+{
+    MV_U32          a, b, c, d, e;
+    CHAR64LONG16*   block;
+
+#ifdef SHA1HANDSOFF
+    static MV_U32  workspace[16];
+
+    block = (CHAR64LONG16 *) workspace;
+    memcpy(block, buffer, 64);
+#else
+    block = (CHAR64LONG16 *) buffer;
+#endif
+    /* Copy context->state[] to working vars */
+    a = state[0];
+    b = state[1];
+    c = state[2];
+    d = state[3];
+    e = state[4];
+    /* 4 rounds of 20 operations each. Loop unrolled. */
+    R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+    R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+    R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+    R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+    R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+    R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+    R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+    R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+    R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+    R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+    R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+    R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+    R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+    R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+    R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+    R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+    R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+    R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+    R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+    R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+    /* Add the working vars back into context.state[] */
+    state[0] += a;
+    state[1] += b;
+    state[2] += c;
+    state[3] += d;
+    state[4] += e;
+    /* Wipe variables */
+    a = b = c = d = e = 0;
+}
+
+void    mvSHA1Init(MV_SHA1_CTX* context)
+{
+    /* SHA1 initialization constants */
+    context->state[0] = 0x67452301;
+    context->state[1] = 0xEFCDAB89;
+    context->state[2] = 0x98BADCFE;
+    context->state[3] = 0x10325476;
+    context->state[4] = 0xC3D2E1F0;
+    context->count[0] = context->count[1] = 0;
+}
+
+
+/* Run your data through this. */
+void    mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *data,
+                     unsigned int len)
+{
+    MV_U32 i, j;
+
+    j = (context->count[0] >> 3) & 63;
+    if ((context->count[0] += len << 3) < (len << 3))
+            context->count[1]++;
+    context->count[1] += (len >> 29);
+    if ((j + len) > 63)
+    {
+        memcpy(&context->buffer[j], data, (i = 64-j));
+        mvSHA1Transform(context->state, context->buffer);
+        for ( ; i + 63 < len; i += 64)
+        {
+            mvSHA1Transform(context->state, &data[i]);
+        }
+        j = 0;
+    }
+    else
+    {
+        i = 0;
+    }
+    memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+void    mvSHA1Final(MV_U8* digest, MV_SHA1_CTX* context)
+{
+    MV_U32  i;
+    MV_U8   finalcount[8];
+
+    for (i = 0; i < 8; i++)
+    {
+        finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)] >>
+                      ((3-(i & 3)) * 8) ) & 255);  /* Endian independent */
+    }
+    mvSHA1Update(context, (const unsigned char *) "\200", 1);
+    while ((context->count[0] & 504) != 448)
+    {
+        mvSHA1Update(context, (const unsigned char *) "\0", 1);
+    }
+    mvSHA1Update(context, finalcount, 8);  /* Should cause a mvSHA1Transform()
+                                          */
+    for (i = 0; i < 20; i++)
+    {
+        digest[i] = (unsigned char)
+                    ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
+    }
+    /* Wipe variables */
+    i = 0;
+    memset(context->buffer, 0, 64);
+    memset(context->state, 0, 20);
+    memset(context->count, 0, 8);
+    memset(finalcount, 0, 8);
+
+#ifdef SHA1HANDSOFF  /* make SHA1Transform overwrite it's own static vars */
+    mvSHA1Transform(context->state, context->buffer);
+#endif
+}
+
+
+void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8* digest)
+{
+    MV_SHA1_CTX  ctx;
+
+    mvSHA1Init(&ctx);
+    mvSHA1Update(&ctx, buf, len);
+    mvSHA1Final(digest, &ctx);
+}
diff --git a/crypto/ocf/kirkwood/cesa/mvSHA1.h b/crypto/ocf/kirkwood/cesa/mvSHA1.h
new file mode 100644
index 000000000000..1914f4768d1a
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa/mvSHA1.h
@@ -0,0 +1,88 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvSHA1_h__
+#define __mvSHA1_h__
+
+#include "mvSHA1.h"
+
+#define MV_SHA1_MAC_LEN 20
+
+
+typedef struct
+{
+    MV_U32 state[5];
+    MV_U32 count[2];
+    MV_U8  buffer[64];
+
+} MV_SHA1_CTX;
+
+void mvSHA1Init(MV_SHA1_CTX *context);
+void mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *buf, unsigned int len);
+void mvSHA1Final(MV_U8* digest, MV_SHA1_CTX *context);
+
+void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8* digest);
+
+
+#endif /* __mvSHA1_h__ */
diff --git a/crypto/ocf/kirkwood/cesa_ocf_drv.c b/crypto/ocf/kirkwood/cesa_ocf_drv.c
new file mode 100644
index 000000000000..ed929688cf4d
--- /dev/null
+++ b/crypto/ocf/kirkwood/cesa_ocf_drv.c
@@ -0,0 +1,1302 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <linux/platform_device.h>
+#include <asm/scatterlist.h>
+#include <linux/spinlock.h>
+#include "ctrlEnv/sys/mvSysCesa.h"
+#include "cesa/mvCesa.h" /* moved here before cryptodev.h due to include dependencies */
+#include <cryptodev.h>
+#include <uio.h>
+#include <plat/mv_cesa.h>
+#include <linux/mbus.h>
+#include "mvDebug.h"
+
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+
+#include "cesa/mvCesaRegs.h"
+#include "cesa/AES/mvAes.h"
+#include "cesa/mvLru.h"
+
+#undef  RT_DEBUG
+#ifdef RT_DEBUG
+static int debug = 1;
+module_param(debug, int, 1);
+MODULE_PARM_DESC(debug, "Enable debug");
+#undef dprintk
+#define dprintk(a...)	if (debug) { printk(a); } else
+#else
+static int debug = 0;
+#undef dprintk
+#define dprintk(a...)
+#endif
+
+
+/* TDMA Regs */
+#define WINDOW_BASE(i) 0xA00 + (i << 3)
+#define WINDOW_CTRL(i) 0xA04 + (i << 3)
+
+/* interrupt handling */
+#undef CESA_OCF_POLLING
+#undef CESA_OCF_TASKLET
+
+#if defined(CESA_OCF_POLLING) && defined(CESA_OCF_TASKLET)
+#error "don't use both tasklet and polling mode"
+#endif
+
+extern int cesaReqResources;
+/* support for spliting action into 2 actions */
+#define CESA_OCF_SPLIT
+
+/* general defines */
+#define CESA_OCF_MAX_SES 128
+#define CESA_Q_SIZE	 64
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)
+#define FRAG_PAGE(f)    (f).p
+#else
+#define FRAG_PAGE(f)    (f)
+#endif
+
+/* data structures */
+struct cesa_ocf_data {
+        int                                      cipher_alg;
+        int                                      auth_alg;
+	int					 encrypt_tn_auth;
+#define  auth_tn_decrypt  encrypt_tn_auth
+	int					 ivlen;
+	int 					 digestlen;
+	short					 sid_encrypt;
+	short					 sid_decrypt;
+	/* fragment workaround sessions */
+	short					 frag_wa_encrypt;
+	short					 frag_wa_decrypt;
+	short					 frag_wa_auth;
+};
+
+/* CESA device data */
+struct cesa_dev {
+	void __iomem *sram;
+	void __iomem *reg;
+        struct mv_cesa_platform_data *plat_data;
+	int irq;
+};
+
+#define DIGEST_BUF_SIZE	32
+struct cesa_ocf_process {
+	MV_CESA_COMMAND 			cesa_cmd;
+	MV_CESA_MBUF 				cesa_mbuf;
+	MV_BUF_INFO  				cesa_bufs[MV_CESA_MAX_MBUF_FRAGS];
+	char					digest[DIGEST_BUF_SIZE];
+	int					digest_len;
+	struct cryptop 				*crp;
+	int 					need_cb;
+};
+
+/* global variables */
+static int32_t			cesa_ocf_id 		= -1;
+static struct cesa_ocf_data 	*cesa_ocf_sessions[CESA_OCF_MAX_SES];
+static spinlock_t 		cesa_lock;
+static struct cesa_dev cesa_device;
+
+/* static APIs */
+static int 		cesa_ocf_process	(device_t, struct cryptop *, int);
+static int 		cesa_ocf_newsession	(device_t, u_int32_t *, struct cryptoini *);
+static int 		cesa_ocf_freesession	(device_t, u_int64_t);
+static void 		cesa_callback		(unsigned long);
+static irqreturn_t	cesa_interrupt_handler	(int, void *);
+#ifdef CESA_OCF_POLLING
+static void cesa_interrupt_polling(void);
+#endif
+#ifdef CESA_OCF_TASKLET
+static struct tasklet_struct cesa_ocf_tasklet;
+#endif
+
+static struct timeval          tt_start;
+static struct timeval          tt_end;
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+	softc_device_decl	sc_dev;
+} mv_cesa_dev;
+
+static device_method_t mv_cesa_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	cesa_ocf_newsession),
+	DEVMETHOD(cryptodev_freesession,cesa_ocf_freesession),
+	DEVMETHOD(cryptodev_process,	cesa_ocf_process),
+	DEVMETHOD(cryptodev_kprocess,	NULL),
+};
+
+
+
+/* Add debug Trace */
+#undef CESA_OCF_TRACE_DEBUG
+#ifdef CESA_OCF_TRACE_DEBUG
+
+#define MV_CESA_USE_TIMER_ID    0
+
+typedef struct
+{
+    int             type;       /* 0 - isrEmpty, 1 - cesaReadyGet, 2 - cesaAction */
+    MV_U32          timeStamp;
+    MV_U32          cause;
+    MV_U32          realCause;
+    MV_U32          dmaCause;
+    int             resources;
+    MV_CESA_REQ*    pReqReady;
+    MV_CESA_REQ*    pReqEmpty;
+    MV_CESA_REQ*    pReqProcess;
+} MV_CESA_TEST_TRACE;
+
+#define MV_CESA_TEST_TRACE_SIZE      50
+
+static int cesaTestTraceIdx = 0;
+static MV_CESA_TEST_TRACE    cesaTestTrace[MV_CESA_TEST_TRACE_SIZE];
+
+static void cesaTestTraceAdd(int type)
+{
+    cesaTestTrace[cesaTestTraceIdx].type = type;
+    cesaTestTrace[cesaTestTraceIdx].realCause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+    //cesaTestTrace[cesaTestTraceIdx].idmaCause = MV_REG_READ(IDMA_CAUSE_REG);
+    cesaTestTrace[cesaTestTraceIdx].resources = cesaReqResources;
+    cesaTestTrace[cesaTestTraceIdx].pReqReady = pCesaReqReady;
+    cesaTestTrace[cesaTestTraceIdx].pReqEmpty = pCesaReqEmpty;
+    cesaTestTrace[cesaTestTraceIdx].pReqProcess = pCesaReqProcess;
+    cesaTestTrace[cesaTestTraceIdx].timeStamp = mvCntmrRead(MV_CESA_USE_TIMER_ID);
+    cesaTestTraceIdx++;
+    if(cesaTestTraceIdx == MV_CESA_TEST_TRACE_SIZE)
+        cesaTestTraceIdx = 0;
+}
+
+#else /* CESA_OCF_TRACE_DEBUG */
+
+#define cesaTestTraceAdd(x)
+
+#endif /* CESA_OCF_TRACE_DEBUG */
+
+unsigned int
+get_usec(unsigned int start)
+{
+	if(start) {
+		do_gettimeofday (&tt_start);
+		return 0;
+	}
+	else {
+		do_gettimeofday (&tt_end);
+		tt_end.tv_sec -= tt_start.tv_sec;
+		tt_end.tv_usec -= tt_start.tv_usec;
+		if (tt_end.tv_usec < 0) {
+			tt_end.tv_usec += 1000 * 1000;
+			tt_end.tv_sec -= 1;
+		}
+	}
+	printk("time taken is  %d\n", (unsigned int)(tt_end.tv_usec + tt_end.tv_sec * 1000000));
+	return (tt_end.tv_usec + tt_end.tv_sec * 1000000);
+}
+
+#ifdef RT_DEBUG
+/*
+ * check that the crp action match the current session
+ */
+static int
+ocf_check_action(struct cryptop *crp, struct cesa_ocf_data *cesa_ocf_cur_ses) {
+	int count = 0;
+	int encrypt = 0, decrypt = 0, auth = 0;
+	struct cryptodesc *crd;
+
+        /* Go through crypto descriptors, processing as we go */
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next, count++) {
+		if(count > 2) {
+			printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+			return 1;
+		}
+
+		/* Encryption /Decryption */
+		if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+			/* check that the action is compatible with session */
+			if(encrypt || decrypt) {
+				printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+				return 1;
+			}
+
+			if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+				if( (count == 2) && (cesa_ocf_cur_ses->encrypt_tn_auth) ) {
+					printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+					return 1;
+				}
+				encrypt++;
+			}
+			else { 					/* decrypt */
+				if( (count == 2) && !(cesa_ocf_cur_ses->auth_tn_decrypt) ) {
+					printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+					return 1;
+				}
+				decrypt++;
+			}
+
+		}
+		/* Authentication */
+		else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+			/* check that the action is compatible with session */
+			if(auth) {
+				printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			if( (count == 2) && (decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			if( (count == 2) && (encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)) {
+				printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			auth++;
+		}
+		else {
+			printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+			return 1;
+		}
+	}
+	return 0;
+
+}
+#endif
+
+/*
+ * Process a request.
+ */
+static int
+cesa_ocf_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+	struct cesa_ocf_process *cesa_ocf_cmd_wa = NULL;
+	MV_CESA_COMMAND	*cesa_cmd;
+	struct cryptodesc *crd;
+	struct cesa_ocf_data *cesa_ocf_cur_ses;
+	int sid = 0, temp_len = 0, i;
+	int encrypt = 0, decrypt = 0, auth = 0;
+	int  status;
+	struct sk_buff *skb = NULL;
+	struct uio *uiop = NULL;
+	unsigned char *ivp;
+	MV_BUF_INFO *p_buf_info;
+	MV_CESA_MBUF *p_mbuf_info;
+	unsigned long flags;
+
+        dprintk("%s()\n", __FUNCTION__);
+
+	if( cesaReqResources <= 1 ) {
+                dprintk("%s,%d: ERESTART\n", __FILE__, __LINE__);
+                return ERESTART;
+	}
+
+#ifdef RT_DEBUG
+        /* Sanity check */
+        if (crp == NULL) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+                return EINVAL;
+        }
+
+        if (crp->crp_desc == NULL || crp->crp_buf == NULL ) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+                crp->crp_etype = EINVAL;
+                return EINVAL;
+        }
+
+        sid = crp->crp_sid & 0xffffffff;
+        if ((sid >= CESA_OCF_MAX_SES) || (cesa_ocf_sessions[sid] == NULL)) {
+                crp->crp_etype = ENOENT;
+                printk("%s,%d: ENOENT session %d \n", __FILE__, __LINE__, sid);
+                return EINVAL;
+        }
+#endif
+
+	sid = crp->crp_sid & 0xffffffff;
+	crp->crp_etype = 0;
+	cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+
+#ifdef RT_DEBUG
+	if(ocf_check_action(crp, cesa_ocf_cur_ses)){
+		goto p_error;
+	}
+#endif
+
+	/* malloc a new  cesa process */
+	cesa_ocf_cmd = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
+
+        if (cesa_ocf_cmd == NULL) {
+		printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+		goto p_error;
+	}
+	memset(cesa_ocf_cmd, 0, sizeof(struct cesa_ocf_process));
+
+	/* init cesa_process */
+	cesa_ocf_cmd->crp = crp;
+	/* always call callback */
+	cesa_ocf_cmd->need_cb = 1;
+
+	/* init cesa_cmd for usage of the HALs */
+	cesa_cmd = &cesa_ocf_cmd->cesa_cmd;
+	cesa_cmd->pReqPrv = (void *)cesa_ocf_cmd;
+	cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_encrypt; /* defualt use encrypt */
+
+	/* prepare src buffer 	*/
+	/* we send the entire buffer to the HAL, even if only part of it should be encrypt/auth.  */
+	/* if not using seesions for both encrypt and auth, then it will be wiser to to copy only */
+	/* from skip to crd_len. 								  */
+	p_buf_info = cesa_ocf_cmd->cesa_bufs;
+	p_mbuf_info = &cesa_ocf_cmd->cesa_mbuf;
+
+	p_buf_info += 2; /* save 2 first buffers for IV and digest -
+			    we won't append them to the end since, they
+			    might be places in an unaligned addresses. */
+
+	p_mbuf_info->pFrags = p_buf_info;
+	temp_len = 0;
+
+	/* handle SKB */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+
+		dprintk("%s,%d: handle SKB.\n", __FILE__, __LINE__);
+		skb = (struct sk_buff *) crp->crp_buf;
+
+                if (skb_shinfo(skb)->nr_frags >= (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+                        printk("%s,%d: %d nr_frags > MV_CESA_MAX_MBUF_FRAGS", __FILE__, __LINE__, skb_shinfo(skb)->nr_frags);
+                        goto p_error;
+                }
+
+		p_mbuf_info->mbufSize = skb->len;
+		temp_len = skb->len;
+		/* first skb fragment */
+		p_buf_info->bufSize = skb_headlen(skb);
+		p_buf_info->bufVirtPtr = skb->data;
+		p_buf_info++;
+
+		/* now handle all other skb fragments */
+		for ( i = 0; i < skb_shinfo(skb)->nr_frags; i++ ) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			p_buf_info->bufSize = frag->size;
+			p_buf_info->bufVirtPtr = page_address(FRAG_PAGE(frag->page)) + frag->page_offset;
+			p_buf_info++;
+		}
+		p_mbuf_info->numFrags = skb_shinfo(skb)->nr_frags + 1;
+	}
+	/* handle UIO */
+	else if(crp->crp_flags & CRYPTO_F_IOV) {
+
+		dprintk("%s,%d: handle UIO.\n", __FILE__, __LINE__);
+		uiop = (struct uio *) crp->crp_buf;
+
+                if (uiop->uio_iovcnt > (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+                        printk("%s,%d: %d uio_iovcnt > MV_CESA_MAX_MBUF_FRAGS \n", __FILE__, __LINE__, uiop->uio_iovcnt);
+                        goto p_error;
+                }
+
+		p_mbuf_info->mbufSize = crp->crp_ilen;
+		p_mbuf_info->numFrags = uiop->uio_iovcnt;
+		for(i = 0; i < uiop->uio_iovcnt; i++) {
+			p_buf_info->bufVirtPtr = uiop->uio_iov[i].iov_base;
+			p_buf_info->bufSize = uiop->uio_iov[i].iov_len;
+			temp_len += p_buf_info->bufSize;
+			dprintk("%s,%d: buf %x-> addr %x, size %x \n"
+				, __FILE__, __LINE__, i, (unsigned int)p_buf_info->bufVirtPtr, p_buf_info->bufSize);
+			p_buf_info++;
+		}
+
+	}
+	/* handle CONTIG */
+	else {
+		dprintk("%s,%d: handle CONTIG.\n", __FILE__, __LINE__);
+		p_mbuf_info->numFrags = 1;
+		p_mbuf_info->mbufSize = crp->crp_ilen;
+		p_buf_info->bufVirtPtr = crp->crp_buf;
+		p_buf_info->bufSize = crp->crp_ilen;
+		temp_len = crp->crp_ilen;
+		p_buf_info++;
+	}
+
+	/* Support up to 64K why? cause! */
+	if(crp->crp_ilen > 64*1024) {
+		printk("%s,%d: buf too big %x \n", __FILE__, __LINE__, crp->crp_ilen);
+		goto p_error;
+	}
+
+	if( temp_len != crp->crp_ilen ) {
+		printk("%s,%d: warning size don't match.(%x %x) \n", __FILE__, __LINE__, temp_len, crp->crp_ilen);
+	}
+
+	cesa_cmd->pSrc = p_mbuf_info;
+	cesa_cmd->pDst = p_mbuf_info;
+
+	/* restore p_buf_info to point to first available buf */
+	p_buf_info = cesa_ocf_cmd->cesa_bufs;
+	p_buf_info += 1;
+
+
+        /* Go through crypto descriptors, processing as we go */
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+
+		/* Encryption /Decryption */
+		if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+
+			dprintk("%s,%d: cipher", __FILE__, __LINE__);
+
+			cesa_cmd->cryptoOffset = crd->crd_skip;
+			cesa_cmd->cryptoLength = crd->crd_len;
+
+			if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+				dprintk(" encrypt \n");
+				encrypt++;
+
+				/* handle IV */
+				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {  /* IV from USER */
+					dprintk("%s,%d: IV from USER (offset %x) \n", __FILE__, __LINE__, crd->crd_inject);
+					cesa_cmd->ivFromUser = 1;
+					ivp = crd->crd_iv;
+
+					/*
+					 * do we have to copy the IV back to the buffer ?
+					 */
+					if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+						dprintk("%s,%d: copy the IV back to the buffer\n", __FILE__, __LINE__);
+						cesa_cmd->ivOffset = crd->crd_inject;
+						crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, cesa_ocf_cur_ses->ivlen, ivp);
+					}
+					else {
+						dprintk("%s,%d: don't copy the IV back to the buffer \n", __FILE__, __LINE__);
+						p_mbuf_info->numFrags++;
+						p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+						p_mbuf_info->pFrags = p_buf_info;
+
+						p_buf_info->bufVirtPtr = ivp;
+						p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+						p_buf_info--;
+
+						/* offsets */
+						cesa_cmd->ivOffset = 0;
+						cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+						if(auth) {
+							cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+							cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+						}
+					}
+                                }
+				else {					/* random IV */
+					dprintk("%s,%d: random IV \n", __FILE__, __LINE__);
+					cesa_cmd->ivFromUser = 0;
+
+					/*
+					 * do we have to copy the IV back to the buffer ?
+					 */
+					/* in this mode the HAL will always copy the IV */
+					/* given by the session to the ivOffset  	*/
+					if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+						cesa_cmd->ivOffset = crd->crd_inject;
+					}
+					else {
+						/* if IV isn't copy, then how will the user know which IV did we use??? */
+						printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+						goto p_error;
+					}
+				}
+			}
+			else { 					/* decrypt */
+				dprintk(" decrypt \n");
+				decrypt++;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_decrypt;
+
+				/* handle IV */
+				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+					dprintk("%s,%d: IV from USER \n", __FILE__, __LINE__);
+					/* append the IV buf to the mbuf */
+					cesa_cmd->ivFromUser = 1;
+					p_mbuf_info->numFrags++;
+					p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+					p_mbuf_info->pFrags = p_buf_info;
+
+					p_buf_info->bufVirtPtr = crd->crd_iv;
+					p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+					p_buf_info--;
+
+					/* offsets */
+					cesa_cmd->ivOffset = 0;
+					cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+					if(auth) {
+						cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+						cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+					}
+                                }
+				else {
+					dprintk("%s,%d: IV inside the buffer \n", __FILE__, __LINE__);
+					cesa_cmd->ivFromUser = 0;
+					cesa_cmd->ivOffset = crd->crd_inject;
+				}
+			}
+
+		}
+		/* Authentication */
+		else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+			dprintk("%s,%d:  Authentication \n", __FILE__, __LINE__);
+			auth++;
+			cesa_cmd->macOffset = crd->crd_skip;
+			cesa_cmd->macLength = crd->crd_len;
+
+			/* digest + mac */
+			cesa_cmd->digestOffset = crd->crd_inject;
+		}
+		else {
+			printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+			goto p_error;
+		}
+	}
+
+	dprintk("\n");
+	dprintk("%s,%d: Sending Action: \n", __FILE__, __LINE__);
+	dprintk("%s,%d: IV from user: %d. IV offset %x \n",  __FILE__, __LINE__, cesa_cmd->ivFromUser, cesa_cmd->ivOffset);
+	dprintk("%s,%d: crypt offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->cryptoOffset, cesa_cmd->cryptoLength);
+	dprintk("%s,%d: Auth offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->macOffset, cesa_cmd->macLength);
+	dprintk("%s,%d: set digest in offset %x . \n", __FILE__, __LINE__, cesa_cmd->digestOffset);
+	if(debug) {
+		mvCesaDebugMbuf("SRC BUFFER", cesa_cmd->pSrc, 0, cesa_cmd->pSrc->mbufSize);
+	}
+
+
+	/* send action to HAL */
+	spin_lock_irqsave(&cesa_lock, flags);
+	status = mvCesaAction(cesa_cmd);
+	spin_unlock_irqrestore(&cesa_lock, flags);
+
+	/* action not allowed */
+	if(status == MV_NOT_ALLOWED) {
+#ifdef CESA_OCF_SPLIT
+		/* if both encrypt and auth try to split */
+		if(auth && (encrypt || decrypt)) {
+			MV_CESA_COMMAND	*cesa_cmd_wa;
+
+			/* malloc a new cesa process and init it */
+			cesa_ocf_cmd_wa = kmalloc(sizeof(struct cesa_ocf_process), GFP_ATOMIC);
+
+			if (cesa_ocf_cmd_wa == NULL) {
+				printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+				goto p_error;
+			}
+			memcpy(cesa_ocf_cmd_wa, cesa_ocf_cmd, sizeof(struct cesa_ocf_process));
+			cesa_cmd_wa = &cesa_ocf_cmd_wa->cesa_cmd;
+			cesa_cmd_wa->pReqPrv = (void *)cesa_ocf_cmd_wa;
+			cesa_ocf_cmd_wa->need_cb = 0;
+
+			/* break requests to two operation, first operation completion won't call callback */
+			if((decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+			}
+			else if((decrypt) && !(cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+			}
+			else if((encrypt) && (cesa_ocf_cur_ses->encrypt_tn_auth)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+			}
+			else if((encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)){
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+			}
+			else {
+				printk("%s,%d: Unsupporterd fragment wa mode \n", __FILE__, __LINE__);
+				goto p_error;
+			}
+
+			/* send the 2 actions to the HAL */
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaAction(cesa_cmd_wa);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+
+			if((status != MV_NO_MORE) && (status != MV_OK)) {
+				printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+				goto p_error;
+			}
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaAction(cesa_cmd);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+
+		}
+		/* action not allowed and can't split */
+		else
+#endif
+		{
+			goto p_error;
+		}
+	}
+
+	/* Hal Q is full, send again. This should never happen */
+	if(status == MV_NO_RESOURCE) {
+		printk("%s,%d: cesa no more resources \n", __FILE__, __LINE__);
+		if(cesa_ocf_cmd)
+			kfree(cesa_ocf_cmd);
+		if(cesa_ocf_cmd_wa)
+			kfree(cesa_ocf_cmd_wa);
+		return ERESTART;
+	}
+	else if((status != MV_NO_MORE) && (status != MV_OK)) {
+                printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+		goto p_error;
+        }
+
+
+#ifdef CESA_OCF_POLLING
+	cesa_interrupt_polling();
+#endif
+	cesaTestTraceAdd(5);
+
+	return 0;
+p_error:
+	crp->crp_etype = EINVAL;
+	if(cesa_ocf_cmd)
+		kfree(cesa_ocf_cmd);
+	if(cesa_ocf_cmd_wa)
+		kfree(cesa_ocf_cmd_wa);
+	return EINVAL;
+}
+
+/*
+ * cesa callback.
+ */
+static void
+cesa_callback(unsigned long dummy)
+{
+	struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+	struct cryptop 		*crp = NULL;
+	MV_CESA_RESULT  	result[MV_CESA_MAX_CHAN];
+	int 			res_idx = 0,i;
+	MV_STATUS               status;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+#ifdef CESA_OCF_TASKLET
+	disable_irq(cesa_device.irq);
+#endif
+    while(MV_TRUE) {
+
+		 /* Get Ready requests */
+		spin_lock(&cesa_lock);
+		status = mvCesaReadyGet(&result[res_idx]);
+		spin_unlock(&cesa_lock);
+
+	        cesaTestTraceAdd(2);
+
+		    if(status != MV_OK) {
+#ifdef CESA_OCF_POLLING
+		        if(status == MV_BUSY) { /* Fragment */
+			        cesa_interrupt_polling();
+			        return;
+		        }
+#endif
+		    break;
+	    }
+	        res_idx++;
+		    break;
+	    }
+
+	for(i = 0; i < res_idx; i++) {
+
+		if(!result[i].pReqPrv) {
+			printk("%s,%d: warning private is NULL\n", __FILE__, __LINE__);
+			break;
+		}
+
+		cesa_ocf_cmd = result[i].pReqPrv;
+		crp = cesa_ocf_cmd->crp;
+
+		// ignore HMAC error.
+		//if(result->retCode)
+		//	crp->crp_etype = EIO;
+
+#if  defined(CESA_OCF_POLLING)
+		if(!cesa_ocf_cmd->need_cb){
+			cesa_interrupt_polling();
+		}
+#endif
+		if(cesa_ocf_cmd->need_cb) {
+			if(debug) {
+				mvCesaDebugMbuf("DST BUFFER", cesa_ocf_cmd->cesa_cmd.pDst, 0, cesa_ocf_cmd->cesa_cmd.pDst->mbufSize);
+			}
+			crypto_done(crp);
+		}
+		kfree(cesa_ocf_cmd);
+	}
+#ifdef CESA_OCF_TASKLET
+	enable_irq(cesa_device.irq);
+#endif
+
+	cesaTestTraceAdd(3);
+
+	return;
+}
+
+#ifdef CESA_OCF_POLLING
+static void
+cesa_interrupt_polling(void)
+{
+        u32                  	cause;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	/* Read cause register */
+	do {
+		cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+		cause &= MV_CESA_CAUSE_ACC_DMA_ALL_MASK;
+
+	} while (cause == 0);
+
+	/* clear interrupts */
+	MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+	cesa_callback(0);
+
+	return;
+}
+
+#endif
+
+/*
+ * cesa Interrupt polling routine.
+ */
+static irqreturn_t
+cesa_interrupt_handler(int irq, void *arg)
+{
+        u32                  	cause;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	cesaTestTraceAdd(0);
+
+	/* Read cause register */
+	cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+
+	if( (cause & MV_CESA_CAUSE_ACC_DMA_ALL_MASK) == 0)
+	{
+        /* Empty interrupt */
+		dprintk("%s,%d: cesaTestReadyIsr: cause=0x%x\n", __FILE__, __LINE__, cause);
+		return IRQ_HANDLED;
+	}
+
+	/* clear interrupts */
+	MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG, 0);
+
+	cesaTestTraceAdd(1);
+#ifdef CESA_OCF_TASKLET
+	tasklet_hi_schedule(&cesa_ocf_tasklet);
+#else
+	cesa_callback(0);
+#endif
+	return IRQ_HANDLED;
+}
+
+/*
+ * Open a session.
+ */
+static int
+/*cesa_ocf_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)*/
+cesa_ocf_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+	u32 status = 0, i;
+	u32 count = 0, auth = 0, encrypt =0;
+	struct cesa_ocf_data *cesa_ocf_cur_ses;
+	MV_CESA_OPEN_SESSION cesa_session;
+	MV_CESA_OPEN_SESSION *cesa_ses = &cesa_session;
+
+
+        dprintk("%s()\n", __FUNCTION__);
+        if (sid == NULL || cri == NULL) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+                return EINVAL;
+        }
+
+	/* leave first empty like in other implementations */
+        for (i = 1; i < CESA_OCF_MAX_SES; i++) {
+		if (cesa_ocf_sessions[i] == NULL)
+			break;
+	}
+
+	if(i >= CESA_OCF_MAX_SES) {
+		printk("%s,%d: no more sessions \n", __FILE__, __LINE__);
+                return EINVAL;
+	}
+
+        cesa_ocf_sessions[i] = (struct cesa_ocf_data *) kmalloc(sizeof(struct cesa_ocf_data), GFP_ATOMIC);
+        if (cesa_ocf_sessions[i] == NULL) {
+                cesa_ocf_freesession(NULL, i);
+                printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+                return ENOBUFS;
+        }
+	dprintk("%s,%d: new session %d \n", __FILE__, __LINE__, i);
+
+        *sid = i;
+        cesa_ocf_cur_ses = cesa_ocf_sessions[i];
+        memset(cesa_ocf_cur_ses, 0, sizeof(struct cesa_ocf_data));
+	cesa_ocf_cur_ses->sid_encrypt = -1;
+	cesa_ocf_cur_ses->sid_decrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_encrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_decrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_auth = -1;
+
+	/* init the session */
+	memset(cesa_ses, 0, sizeof(MV_CESA_OPEN_SESSION));
+	count = 1;
+        while (cri) {
+		if(count > 2) {
+			printk("%s,%d: don't support more then 2 operations\n", __FILE__, __LINE__);
+			goto error;
+		}
+                switch (cri->cri_alg) {
+		case CRYPTO_AES_CBC:
+			dprintk("%s,%d: (%d) AES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_AES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_AES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			dprintk("%s,%d: key length %d \n", __FILE__, __LINE__, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_3DES_CBC:
+			dprintk("%s,%d: (%d) 3DES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_3DES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_3DES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_DES_CBC:
+			dprintk("%s,%d: (%d) DES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_DES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_DES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_MD5:
+                case CRYPTO_MD5_HMAC:
+			dprintk("%s,%d: (%d) %sMD5 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_MD5)? "H-":" ");
+                        cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MD5_DIGEST_SIZE : 12;
+			cesa_ses->macMode = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MAC_MD5 : MV_CESA_MAC_HMAC_MD5;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+				goto error;
+			}
+			cesa_ses->macKeyLength = cri->cri_klen/8;
+			memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+			auth += count;
+			break;
+                case CRYPTO_SHA1:
+                case CRYPTO_SHA1_HMAC:
+			dprintk("%s,%d: (%d) %sSHA1 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_SHA1)? "H-":" ");
+                        cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_SHA1_DIGEST_SIZE : 12;
+			cesa_ses->macMode = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_MAC_SHA1 : MV_CESA_MAC_HMAC_SHA1;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+				goto error;
+			}
+			cesa_ses->macKeyLength = cri->cri_klen/8;
+			memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+			auth += count;
+			break;
+                default:
+                        printk("%s,%d: unknown algo 0x%x\n", __FILE__, __LINE__, cri->cri_alg);
+                        goto error;
+                }
+                cri = cri->cri_next;
+		count++;
+        }
+
+	if((encrypt > 2) || (auth > 2)) {
+		printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+                goto error;
+	}
+	/* create new sessions in HAL */
+	if(encrypt) {
+		cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+		/* encrypt session */
+		if(auth == 1) {
+			cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+		}
+		else if(auth == 2) {
+			cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+			cesa_ocf_cur_ses->encrypt_tn_auth = 1;
+		}
+		else {
+			cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+		}
+		cesa_ses->direction = MV_CESA_DIR_ENCODE;
+		status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+		/* decrypt session */
+		if( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) {
+			cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+		}
+		else if( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC ) {
+			cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+		}
+		cesa_ses->direction = MV_CESA_DIR_DECODE;
+		status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_decrypt);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+
+		/* preapre one action sessions for case we will need to split an action */
+#ifdef CESA_OCF_SPLIT
+		if(( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) ||
+			( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC )) {
+			/* open one session for encode and one for decode */
+			cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+			cesa_ses->direction = MV_CESA_DIR_ENCODE;
+			status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_encrypt);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+
+			cesa_ses->direction = MV_CESA_DIR_DECODE;
+			status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_decrypt);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+			/* open one session for auth */
+			cesa_ses->operation = MV_CESA_MAC_ONLY;
+			cesa_ses->direction = MV_CESA_DIR_ENCODE;
+			status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_auth);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+		}
+#endif
+	}
+	else { /* only auth */
+		cesa_ses->operation = MV_CESA_MAC_ONLY;
+		cesa_ses->direction = MV_CESA_DIR_ENCODE;
+		status = mvCesaSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+	}
+
+        return 0;
+error:
+	cesa_ocf_freesession(NULL, *sid);
+	return EINVAL;
+
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+cesa_ocf_freesession(device_t dev, u_int64_t tid)
+{
+        struct cesa_ocf_data *cesa_ocf_cur_ses;
+        u_int32_t sid = CRYPTO_SESID2LID(tid);
+	//unsigned long flags;
+
+        dprintk("%s() %d \n", __FUNCTION__, sid);
+        if ( (sid >= CESA_OCF_MAX_SES) || (cesa_ocf_sessions[sid] == NULL) ) {
+                printk("%s,%d: EINVAL can't free session %d \n", __FILE__, __LINE__, sid);
+                return(EINVAL);
+        }
+
+        /* Silently accept and return */
+        if (sid == 0)
+                return(0);
+
+	/* release session from HAL */
+	cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+	if (cesa_ocf_cur_ses->sid_encrypt != -1) {
+		mvCesaSessionClose(cesa_ocf_cur_ses->sid_encrypt);
+	}
+	if (cesa_ocf_cur_ses->sid_decrypt != -1) {
+		mvCesaSessionClose(cesa_ocf_cur_ses->sid_decrypt);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_encrypt != -1) {
+		mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_encrypt);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_decrypt != -1) {
+		mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_decrypt);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_auth != -1) {
+		mvCesaSessionClose(cesa_ocf_cur_ses->frag_wa_auth);
+	}
+
+	kfree(cesa_ocf_cur_ses);
+	cesa_ocf_sessions[sid] = NULL;
+
+        return 0;
+}
+
+
+/* TDMA Window setup */
+
+static void __init
+setup_tdma_mbus_windows(struct cesa_dev *dev)
+{
+    int i;
+
+    for (i = 0; i < 4; i++) {
+        writel(0, dev->reg + WINDOW_BASE(i));
+        writel(0, dev->reg + WINDOW_CTRL(i));
+    }
+
+    for (i = 0; i < dev->plat_data->dram->num_cs; i++) {
+        struct mbus_dram_window *cs = dev->plat_data->dram->cs + i;
+        writel(
+            ((cs->size - 1) & 0xffff0000) |
+            (cs->mbus_attr << 8) |
+            (dev->plat_data->dram->mbus_dram_target_id << 4) | 1,
+            dev->reg + WINDOW_CTRL(i)
+        );
+        writel(cs->base, dev->reg + WINDOW_BASE(i));
+    }
+}
+
+/*
+ * our driver startup and shutdown routines
+ */
+static int
+mv_cesa_ocf_init(struct platform_device *pdev)
+{
+#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
+	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(CESA))
+	{
+		dprintk("CESA is not mapped to this CPU\n");
+		return -ENODEV;
+	}
+#endif
+
+	dprintk("%s\n", __FUNCTION__);
+	memset(&mv_cesa_dev, 0, sizeof(mv_cesa_dev));
+	softc_device_init(&mv_cesa_dev, "MV CESA", 0, mv_cesa_methods);
+	cesa_ocf_id = crypto_get_driverid(softc_get_device(&mv_cesa_dev),CRYPTOCAP_F_HARDWARE);
+
+	if (cesa_ocf_id < 0)
+		panic("MV CESA crypto device cannot initialize!");
+
+	dprintk("%s,%d: cesa ocf device id is %d \n", __FILE__, __LINE__, cesa_ocf_id);
+
+	/* CESA unit is auto power on off */
+#if 0
+	if (MV_FALSE == mvCtrlPwrClckGet(CESA_UNIT_ID,0))
+	{
+		printk("\nWarning CESA %d is Powered Off\n",0);
+		return EINVAL;
+	}
+#endif
+
+	memset(&cesa_device, 0, sizeof(struct cesa_dev));
+	/* Get the IRQ, and crypto memory regions */
+	{
+		struct resource *res;
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+
+		if (!res)
+			return -ENXIO;
+
+		cesa_device.sram = ioremap(res->start, res->end - res->start + 1);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+
+		if (!res) {
+		        iounmap(cesa_device.sram);
+			return -ENXIO;
+                }
+                cesa_device.reg = ioremap(res->start, res->end - res->start + 1);
+		cesa_device.irq = platform_get_irq(pdev, 0);
+		cesa_device.plat_data = pdev->dev.platform_data;
+	        setup_tdma_mbus_windows(&cesa_device);
+
+	}
+
+
+	if( MV_OK != mvCesaInit(CESA_OCF_MAX_SES*5, CESA_Q_SIZE, cesa_device.reg,
+				NULL) ) {
+		printk("%s,%d: mvCesaInit Failed. \n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	/* clear and unmask Int */
+	MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+#ifndef CESA_OCF_POLLING
+    MV_REG_WRITE( MV_CESA_ISR_MASK_REG, MV_CESA_CAUSE_ACC_DMA_MASK);
+#endif
+#ifdef CESA_OCF_TASKLET
+	tasklet_init(&cesa_ocf_tasklet, cesa_callback, (unsigned int) 0);
+#endif
+	/* register interrupt */
+	if( request_irq( cesa_device.irq, cesa_interrupt_handler,
+                             (IRQF_DISABLED) , "cesa", &cesa_ocf_id) < 0) {
+		printk("%s,%d: cannot assign irq %x\n", __FILE__, __LINE__, cesa_device.reg);
+		return EINVAL;
+        }
+
+
+	memset(cesa_ocf_sessions, 0, sizeof(struct cesa_ocf_data *) * CESA_OCF_MAX_SES);
+
+#define	REGISTER(alg) \
+	crypto_register(cesa_ocf_id, alg, 0,0)
+	REGISTER(CRYPTO_AES_CBC);
+	REGISTER(CRYPTO_DES_CBC);
+	REGISTER(CRYPTO_3DES_CBC);
+	REGISTER(CRYPTO_MD5);
+	REGISTER(CRYPTO_MD5_HMAC);
+	REGISTER(CRYPTO_SHA1);
+	REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+	return 0;
+}
+
+static void
+mv_cesa_ocf_exit(struct platform_device *pdev)
+{
+	dprintk("%s()\n", __FUNCTION__);
+
+	crypto_unregister_all(cesa_ocf_id);
+	cesa_ocf_id = -1;
+	iounmap(cesa_device.reg);
+	iounmap(cesa_device.sram);
+	free_irq(cesa_device.irq, NULL);
+
+	/* mask and clear Int */
+	MV_REG_WRITE( MV_CESA_ISR_MASK_REG, 0);
+	MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG, 0);
+
+
+	if( MV_OK != mvCesaFinish() ) {
+		printk("%s,%d: mvCesaFinish Failed. \n", __FILE__, __LINE__);
+		return;
+	}
+}
+
+
+void cesa_ocf_debug(void)
+{
+
+#ifdef CESA_OCF_TRACE_DEBUG
+    {
+        int i, j;
+        j = cesaTestTraceIdx;
+        mvOsPrintf("No  Type   rCause   iCause   Proc   Isr   Res     Time     pReady    pProc    pEmpty\n");
+        for(i=0; i<MV_CESA_TEST_TRACE_SIZE; i++)
+        {
+            mvOsPrintf("%02d.  %d   0x%04x   0x%04x   0x%02x   0x%02x   %02d   0x%06x  %p  %p  %p\n",
+                j, cesaTestTrace[j].type, cesaTestTrace[j].realCause,
+                cesaTestTrace[j].idmaCause,
+                cesaTestTrace[j].resources, cesaTestTrace[j].timeStamp,
+                cesaTestTrace[j].pReqReady, cesaTestTrace[j].pReqProcess, cesaTestTrace[j].pReqEmpty);
+            j++;
+            if(j == MV_CESA_TEST_TRACE_SIZE)
+                j = 0;
+        }
+    }
+#endif
+
+}
+
+static struct platform_driver marvell_cesa = {
+	.probe		= mv_cesa_ocf_init,
+	.remove		= mv_cesa_ocf_exit,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "mv_crypto",
+	},
+};
+
+MODULE_ALIAS("platform:mv_crypto");
+
+static int __init mv_cesa_init(void)
+{
+	return platform_driver_register(&marvell_cesa);
+}
+
+module_init(mv_cesa_init);
+
+static void __exit mv_cesa_exit(void)
+{
+	platform_driver_unregister(&marvell_cesa);
+}
+
+module_exit(mv_cesa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ronen Shitrit");
+MODULE_DESCRIPTION("OCF module for Orion CESA crypto");
diff --git a/crypto/ocf/kirkwood/mvHal/common/mv802_3.h b/crypto/ocf/kirkwood/mvHal/common/mv802_3.h
new file mode 100644
index 000000000000..b03cdbd583c3
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mv802_3.h
@@ -0,0 +1,213 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmv802_3h
+#define __INCmv802_3h
+
+
+/* includes */
+#include "mvTypes.h"
+
+/* Defines */
+#define MV_MAX_ETH_DATA     1500
+
+/* 802.3 types */
+#define MV_IP_TYPE                  0x0800
+#define MV_IP_ARP_TYPE              0x0806
+#define MV_APPLE_TALK_ARP_TYPE      0x80F3
+#define MV_NOVELL_IPX_TYPE          0x8137
+#define MV_EAPOL_TYPE				0x888e
+
+
+
+/* Encapsulation header for RFC1042 and Ethernet_tunnel */
+
+#define MV_RFC1042_SNAP_HEADER     {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00}
+
+#define MV_ETH_SNAP_LSB             0xF8
+
+
+#define	MV_MAC_ADDR_SIZE	(6)
+#define MV_MAC_STR_SIZE		(20)
+#define MV_VLAN_HLEN		(4)
+
+/* This macro checks for a multicast mac address    */
+#define MV_IS_MULTICAST_MAC(mac)  (((mac)[0] & 0x1) == 1)
+
+
+/* This macro checks for an broadcast mac address     */
+#define MV_IS_BROADCAST_MAC(mac)            \
+       (((mac)[0] == 0xFF) &&       \
+        ((mac)[1] == 0xFF) &&       \
+        ((mac)[2] == 0xFF) &&       \
+        ((mac)[3] == 0xFF) &&       \
+        ((mac)[4] == 0xFF) &&       \
+        ((mac)[5] == 0xFF))
+
+
+/* Typedefs */
+typedef struct
+{
+    MV_U8     pDA[MV_MAC_ADDR_SIZE];
+    MV_U8     pSA[MV_MAC_ADDR_SIZE];
+    MV_U16    typeOrLen;
+
+} MV_802_3_HEADER;
+
+enum {
+  MV_IP_PROTO_NULL	= 0,    /* Dummy protocol for TCP               */
+  MV_IP_PROTO_ICMP	= 1,    /* Internet Control Message Protocol    */
+  MV_IP_PROTO_IGMP	= 2,    /* Internet Group Management Protocol   */
+  MV_IP_PROTO_IPIP	= 4,    /* IPIP tunnels (older KA9Q tunnels use 94) */
+  MV_IP_PROTO_TCP	= 6,    /* Transmission Control Protocol        */
+  MV_IP_PROTO_EGP	= 8,    /* Exterior Gateway Protocol            */
+  MV_IP_PROTO_PUP	= 12,   /* PUP protocol                         */
+  MV_IP_PROTO_UDP	= 17,   /* User Datagram Protocol               */
+  MV_IP_PROTO_IDP	= 22,   /* XNS IDP protocol                     */
+  MV_IP_PROTO_DCCP	= 33,   /* Datagram Congestion Control Protocol */
+  MV_IP_PROTO_IPV6	= 41,   /* IPv6-in-IPv4 tunnelling              */
+  MV_IP_PROTO_RSVP	= 46,   /* RSVP protocol                        */
+  MV_IP_PROTO_GRE	= 47,   /* Cisco GRE tunnels (rfc 1701,1702)    */
+  MV_IP_PROTO_ESP	= 50,   /* Encapsulation Security Payload protocol */
+  MV_IP_PROTO_AH	= 51,   /* Authentication Header protocol       */
+  MV_IP_PROTO_BEETPH	= 94,   /* IP option pseudo header for BEET     */
+  MV_IP_PROTO_PIM	= 103,
+  MV_IP_PROTO_COMP	= 108,  /* Compression Header protocol          */
+  MV_IP_PROTO_ZERO_HOP	= 114,  /* Any 0 hop protocol (IANA)            */
+  MV_IP_PROTO_SCTP	= 132,  /* Stream Control Transport Protocol    */
+  MV_IP_PROTO_UDPLITE	= 136,  /* UDP-Lite (RFC 3828)                  */
+
+  MV_IP_PROTO_RAW	= 255,  /* Raw IP packets                       */
+  MV_IP_PROTO_MAX
+};
+
+typedef struct
+{
+    MV_U8   version;
+    MV_U8   tos;
+    MV_U16  totalLength;
+    MV_U16  identifier;
+    MV_U16  fragmentCtrl;
+    MV_U8   ttl;
+    MV_U8   protocol;
+    MV_U16  checksum;
+    MV_U32  srcIP;
+    MV_U32  dstIP;
+
+} MV_IP_HEADER;
+
+typedef struct
+{
+    MV_U32 spi;
+    MV_U32 seqNum;
+} MV_ESP_HEADER;
+
+#define MV_ICMP_ECHOREPLY          0       /* Echo Reply                   */
+#define MV_ICMP_DEST_UNREACH       3       /* Destination Unreachable      */
+#define MV_ICMP_SOURCE_QUENCH      4       /* Source Quench                */
+#define MV_ICMP_REDIRECT           5       /* Redirect (change route)      */
+#define MV_ICMP_ECHO               8       /* Echo Request                 */
+#define MV_ICMP_TIME_EXCEEDED      11      /* Time Exceeded                */
+#define MV_ICMP_PARAMETERPROB      12      /* Parameter Problem            */
+#define MV_ICMP_TIMESTAMP          13      /* Timestamp Request            */
+#define MV_ICMP_TIMESTAMPREPLY     14      /* Timestamp Reply              */
+#define MV_ICMP_INFO_REQUEST       15      /* Information Request          */
+#define MV_ICMP_INFO_REPLY         16      /* Information Reply            */
+#define MV_ICMP_ADDRESS            17      /* Address Mask Request         */
+#define MV_ICMP_ADDRESSREPLY       18      /* Address Mask Reply           */
+
+typedef struct
+{
+    MV_U8   type;
+    MV_U8   code;
+    MV_U16  checksum;
+    MV_U16  id;
+    MV_U16  sequence;
+
+} MV_ICMP_ECHO_HEADER;
+
+typedef struct
+{
+    MV_U16  source;
+    MV_U16  dest;
+    MV_U32  seq;
+    MV_U32  ack_seq;
+    MV_U16  flags;
+    MV_U16  window;
+    MV_U16  chksum;
+    MV_U16  urg_offset;
+
+} MV_TCP_HEADER;
+
+typedef struct
+{
+    MV_U16  source;
+    MV_U16  dest;
+    MV_U16  len;
+    MV_U16  check;
+
+} MV_UDP_HEADER;
+
+#endif /* __INCmv802_3h */
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvCommon.c b/crypto/ocf/kirkwood/mvHal/common/mvCommon.c
new file mode 100644
index 000000000000..53d929244e9f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvCommon.c
@@ -0,0 +1,275 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvCommon.h"
+
+
+/*******************************************************************************
+* mvMacStrToHex - Convert MAC format string to hex.
+*
+* DESCRIPTION:
+*		This function convert MAC format string to hex.
+*
+* INPUT:
+*       macStr - MAC address string. Fornat of address string is
+*                uu:vv:ww:xx:yy:zz, where ":" can be any delimiter.
+*
+* OUTPUT:
+*       macHex - MAC in hex format.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvMacStrToHex(const char* macStr, MV_U8* macHex)
+{
+    int i;
+    char tmp[3];
+
+    for(i = 0; i < MV_MAC_ADDR_SIZE; i++)
+    {
+        tmp[0] = macStr[(i * 3) + 0];
+        tmp[1] = macStr[(i * 3) + 1];
+        tmp[2] = '\0';
+        macHex[i] = (MV_U8) (strtol(tmp, NULL, 16));
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvMacHexToStr - Convert MAC in hex format to string format.
+*
+* DESCRIPTION:
+*		This function convert MAC in hex format to string format.
+*
+* INPUT:
+*       macHex - MAC in hex format.
+*
+* OUTPUT:
+*       macStr - MAC address string. String format is uu:vv:ww:xx:yy:zz.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvMacHexToStr(MV_U8* macHex, char* macStr)
+{
+	int i;
+
+    for(i = 0; i < MV_MAC_ADDR_SIZE; i++)
+    {
+        mvOsSPrintf(&macStr[i * 3], "%02x:", macHex[i]);
+    }
+    macStr[(i * 3) - 1] = '\0';
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSizePrint - Print the given size with size unit description.
+*
+* DESCRIPTION:
+*		This function print the given size with size unit description.
+*       FOr example when size paramter is 0x180000, the function prints:
+*       "size 1MB+500KB"
+*
+* INPUT:
+*       size - Size in bytes.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvSizePrint(MV_U32 size)
+{
+    mvOsOutput("size ");
+
+    if(size >= _1G)
+    {
+        mvOsOutput("%3dGB ", size / _1G);
+        size %= _1G;
+        if(size)
+            mvOsOutput("+");
+    }
+    if(size >= _1M )
+    {
+        mvOsOutput("%3dMB ", size / _1M);
+        size %= _1M;
+        if(size)
+            mvOsOutput("+");
+    }
+    if(size >= _1K)
+    {
+        mvOsOutput("%3dKB ", size / _1K);
+        size %= _1K;
+        if(size)
+            mvOsOutput("+");
+    }
+    if(size > 0)
+    {
+        mvOsOutput("%3dB ", size);
+    }
+}
+
+/*******************************************************************************
+* mvHexToBin - Convert hex to binary
+*
+* DESCRIPTION:
+*		This function Convert hex to binary.
+*
+* INPUT:
+*       pHexStr - hex buffer pointer.
+*       size    - Size to convert.
+*
+* OUTPUT:
+*       pBin - Binary buffer pointer.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvHexToBin(const char* pHexStr, MV_U8* pBin, int size)
+{
+	int     j, i;
+    char    tmp[3];
+    MV_U8   byte;
+
+    for(j=0, i=0; j<size; j++, i+=2)
+    {
+        tmp[0] = pHexStr[i];
+        tmp[1] = pHexStr[i+1];
+        tmp[2] = '\0';
+        byte = (MV_U8) (strtol(tmp, NULL, 16) & 0xFF);
+        pBin[j] =  byte;
+    }
+}
+
+void     mvAsciiToHex(const char* asciiStr, char* hexStr)
+{
+	int	i=0;
+
+	while(asciiStr[i] != 0)
+	{
+		mvOsSPrintf(&hexStr[i*2], "%02x", asciiStr[i]);
+		i++;
+	}
+	hexStr[i*2] = 0;
+}
+
+
+void    mvBinToHex(const MV_U8* bin, char* hexStr, int size)
+{
+	int i;
+
+    for(i=0; i<size; i++)
+    {
+        mvOsSPrintf(&hexStr[i*2], "%02x", bin[i]);
+    }
+    hexStr[i*2] = '\0';
+}
+
+void    mvBinToAscii(const MV_U8* bin, char* asciiStr, int size)
+{
+	int i;
+
+    for(i=0; i<size; i++)
+    {
+        mvOsSPrintf(&asciiStr[i*2], "%c", bin[i]);
+    }
+    asciiStr[i*2] = '\0';
+}
+
+/*******************************************************************************
+* mvLog2 -
+*
+* DESCRIPTION:
+*	Calculate the Log2 of a given number.
+*
+* INPUT:
+*       num - A number to calculate the Log2 for.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Log 2 of the input number, or 0xFFFFFFFF if input is 0.
+*
+*******************************************************************************/
+MV_U32 mvLog2(MV_U32	num)
+{
+	MV_U32 result = 0;
+	if(num == 0)
+		return 0xFFFFFFFF;
+	while(num != 1)
+	{
+		num = num >> 1;
+		result++;
+	}
+	return result;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvCommon.h b/crypto/ocf/kirkwood/mvHal/common/mvCommon.h
new file mode 100644
index 000000000000..5caf47c80948
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvCommon.h
@@ -0,0 +1,308 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+#ifndef __INCmvCommonh
+#define __INCmvCommonh
+
+#include "mvTypes.h"
+
+/* Swap tool */
+
+/* 16bit nibble swap. For example 0x1234 -> 0x2143                          */
+#define MV_NIBBLE_SWAP_16BIT(X)    (((X&0xf) << 4) |     \
+                                    ((X&0xf0) >> 4) |    \
+                                    ((X&0xf00) << 4) |   \
+                                    ((X&0xf000) >> 4))
+
+/* 32bit nibble swap. For example 0x12345678 -> 0x21436587                  */
+#define MV_NIBBLE_SWAP_32BIT(X)    (((X&0xf) << 4) |       \
+                                    ((X&0xf0) >> 4) |      \
+                                    ((X&0xf00) << 4) |     \
+                                    ((X&0xf000) >> 4) |    \
+                                    ((X&0xf0000) << 4) |   \
+                                    ((X&0xf00000) >> 4) |  \
+                                    ((X&0xf000000) << 4) | \
+                                    ((X&0xf0000000) >> 4))
+
+/* 16bit byte swap. For example 0x1122 -> 0x2211                            */
+#define MV_BYTE_SWAP_16BIT(X) ((((X)&0xff)<<8) | (((X)&0xff00)>>8))
+
+/* 32bit byte swap. For example 0x11223344 -> 0x44332211                    */
+#define MV_BYTE_SWAP_32BIT(X) ((((X)&0xff)<<24) |                       \
+                               (((X)&0xff00)<<8) |                      \
+                               (((X)&0xff0000)>>8) |                    \
+                               (((X)&0xff000000)>>24))
+
+/* 64bit byte swap. For example 0x11223344.55667788 -> 0x88776655.44332211  */
+#define MV_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xffULL)<<56) |             \
+                                      (((X)&0xff00ULL)<<40) |           \
+                                      (((X)&0xff0000ULL)<<24) |         \
+                                      (((X)&0xff000000ULL)<<8) |        \
+                                      (((X)&0xff00000000ULL)>>8) |      \
+                                      (((X)&0xff0000000000ULL)>>24) |   \
+                                      (((X)&0xff000000000000ULL)>>40) | \
+                                      (((X)&0xff00000000000000ULL)>>56)))
+
+/* Endianess macros.                                                        */
+#if defined(MV_CPU_LE)
+    #define MV_16BIT_LE(X)  (X)
+    #define MV_32BIT_LE(X)  (X)
+    #define MV_64BIT_LE(X)  (X)
+    #define MV_16BIT_BE(X)  MV_BYTE_SWAP_16BIT(X)
+    #define MV_32BIT_BE(X)  MV_BYTE_SWAP_32BIT(X)
+    #define MV_64BIT_BE(X)  MV_BYTE_SWAP_64BIT(X)
+#elif defined(MV_CPU_BE)
+    #define MV_16BIT_LE(X)  MV_BYTE_SWAP_16BIT(X)
+    #define MV_32BIT_LE(X)  MV_BYTE_SWAP_32BIT(X)
+    #define MV_64BIT_LE(X)  MV_BYTE_SWAP_64BIT(X)
+    #define MV_16BIT_BE(X)  (X)
+    #define MV_32BIT_BE(X)  (X)
+    #define MV_64BIT_BE(X)  (X)
+#else
+    #error "CPU endianess isn't defined!\n"
+#endif
+
+
+/* Bit field definitions */
+#define NO_BIT      0x00000000
+#define BIT0        0x00000001
+#define BIT1        0x00000002
+#define BIT2        0x00000004
+#define BIT3        0x00000008
+#define BIT4        0x00000010
+#define BIT5        0x00000020
+#define BIT6        0x00000040
+#define BIT7        0x00000080
+#define BIT8        0x00000100
+#define BIT9        0x00000200
+#define BIT10       0x00000400
+#define BIT11       0x00000800
+#define BIT12       0x00001000
+#define BIT13       0x00002000
+#define BIT14       0x00004000
+#define BIT15       0x00008000
+#define BIT16       0x00010000
+#define BIT17       0x00020000
+#define BIT18       0x00040000
+#define BIT19       0x00080000
+#define BIT20       0x00100000
+#define BIT21       0x00200000
+#define BIT22       0x00400000
+#define BIT23       0x00800000
+#define BIT24       0x01000000
+#define BIT25       0x02000000
+#define BIT26       0x04000000
+#define BIT27       0x08000000
+#define BIT28       0x10000000
+#define BIT29       0x20000000
+#define BIT30       0x40000000
+#define BIT31       0x80000000
+
+/* Handy sizes */
+#define _1K         0x00000400
+#define _2K         0x00000800
+#define _4K         0x00001000
+#define _8K         0x00002000
+#define _16K        0x00004000
+#define _32K        0x00008000
+#define _64K        0x00010000
+#define _128K       0x00020000
+#define _256K       0x00040000
+#define _512K       0x00080000
+
+#define _1M         0x00100000
+#define _2M         0x00200000
+#define _4M         0x00400000
+#define _8M         0x00800000
+#define _16M        0x01000000
+#define _32M        0x02000000
+#define _64M        0x04000000
+#define _128M       0x08000000
+#define _256M       0x10000000
+#define _512M       0x20000000
+
+#define _1G         0x40000000
+#define _2G         0x80000000
+
+/* Tclock and Sys clock define */
+#define _100MHz     100000000
+#define _125MHz     125000000
+#define _133MHz     133333334
+#define _150MHz     150000000
+#define _160MHz     160000000
+#define _166MHz     166666667
+#define _175MHz     175000000
+#define _178MHz     178000000
+#define _183MHz     183333334
+#define _187MHz     187000000
+#define _192MHz     192000000
+#define _194MHz     194000000
+#define _200MHz     200000000
+#define _233MHz     233333334
+#define _250MHz     250000000
+#define _266MHz     266666667
+#define _300MHz     300000000
+
+/* For better address window table readability */
+#define EN			MV_TRUE
+#define DIS			MV_FALSE
+#define N_A			-1			/* Not applicable */
+
+/* Cache configuration options for memory (DRAM, SRAM, ... ) */
+
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED             0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT    1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB    2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW       3
+
+
+/* Macro for testing aligment. Positive if number is NOT aligned   */
+#define MV_IS_NOT_ALIGN(number, align)      ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0340   */
+#define MV_ALIGN_UP(number, align)                                          \
+(((number) & ((align) - 1)) ? (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for alignment down. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0320 */
+#define MV_ALIGN_DOWN(number, align) ((number) & ~((align)-1))
+
+/* This macro returns absolute value                                        */
+#define MV_ABS(number)  (((int)(number) < 0) ? -(int)(number) : (int)(number))
+
+
+/* Bit fields manipulation macros                                           */
+
+/* An integer word which its 'x' bit is set                                 */
+#define MV_BIT_MASK(bitNum)         (1 << (bitNum) )
+
+/* Checks wheter bit 'x' in integer word is set                             */
+#define MV_BIT_CHECK(word, bitNum)  ( (word) & MV_BIT_MASK(bitNum) )
+
+/* Clear (reset) bit 'x' in integer word (RMW - Read-Modify-Write)          */
+#define MV_BIT_CLEAR(word, bitNum)  ( (word) &= ~(MV_BIT_MASK(bitNum)) )
+
+/* Set bit 'x' in integer word (RMW)                                        */
+#define MV_BIT_SET(word, bitNum)    ( (word) |= MV_BIT_MASK(bitNum) )
+
+/* Invert bit 'x' in integer word (RMW)                                     */
+#define MV_BIT_INV(word, bitNum)    ( (word) ^= MV_BIT_MASK(bitNum) )
+
+/* Get the min between 'a' or 'b'                                           */
+#define MV_MIN(a,b)    (((a) < (b)) ? (a) : (b))
+
+/* Get the max between 'a' or 'b'                                           */
+#define MV_MAX(a,b)    (((a) < (b)) ? (b) : (a))
+
+/* Temporary */
+#define mvOsDivide(num, div)        \
+({                                  \
+    int i=0, rem=(num);             \
+                                    \
+    while(rem >= (div))             \
+    {                               \
+        rem -= (div);               \
+        i++;                        \
+    }                               \
+    (i);                            \
+})
+
+/* Temporary */
+#define mvOsReminder(num, div)      \
+({                                  \
+    int rem = (num);                \
+                                    \
+    while(rem >= (div))             \
+        rem -= (div);               \
+    (rem);                          \
+})
+
+#define MV_IP_QUAD(ipAddr)    ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF), \
+                              ((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF)
+
+#define MV_IS_POWER_OF_2(num) ((num != 0) && ((num & (num - 1)) == 0))
+
+#ifndef MV_ASMLANGUAGE
+/* mvCommon API list */
+
+MV_VOID     mvHexToBin(const char* pHexStr, MV_U8* pBin, int size);
+void        mvAsciiToHex(const char* asciiStr, char* hexStr);
+void        mvBinToHex(const MV_U8* bin, char* hexStr, int size);
+void        mvBinToAscii(const MV_U8* bin, char* asciiStr, int size);
+
+MV_STATUS mvMacStrToHex(const char* macStr, MV_U8* macHex);
+MV_STATUS mvMacHexToStr(MV_U8* macHex, char* macStr);
+void        mvSizePrint(MV_U32);
+
+MV_U32 mvLog2(MV_U32 num);
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif	/* __INCmvCommonh */
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvDebug.c b/crypto/ocf/kirkwood/mvHal/common/mvDebug.c
new file mode 100644
index 000000000000..c9f60e1eb338
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvDebug.c
@@ -0,0 +1,325 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+/* includes */
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvCommon.h"
+#include "mvDebug.h"
+
+/* Global variables effect on behave MV_DEBUG_PRINT and MV_DEBUG_CODE macros
+ * mvDebug  - map of bits (one for each module) bit=1 means enable
+ *          debug code and messages for this module
+ * mvModuleDebug - array of 32 bits varables one for each module
+ */
+MV_U32    mvDebug = 0;
+MV_U32    mvDebugModules[MV_MODULE_MAX];
+
+/* Init mvModuleDebug array to default values */
+void    mvDebugInit(void)
+{
+    int     bit;
+
+    mvDebug = 0;
+    for(bit=0; bit<MV_MODULE_MAX; bit++)
+    {
+        mvDebugModules[bit] = MV_DEBUG_FLAG_ERR | MV_DEBUG_FLAG_STATS;
+        mvDebug |= MV_BIT_MASK(bit);
+    }
+}
+
+void    mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable)
+{
+    if (isEnable)
+    {
+       MV_BIT_SET(mvDebug, module);
+    }
+    else
+       MV_BIT_CLEAR(mvDebug, module);
+}
+
+void    mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+    mvDebugModules[module] |= flags;
+}
+
+void    mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+    mvDebugModules[module] &= ~flags;
+}
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void* addr, int size, int access)
+{
+    int     i, j;
+    MV_U32  memAddr = (MV_U32)addr;
+
+    if(access == 0)
+        access = 1;
+
+    if( (access != 4) && (access != 2) && (access != 1) )
+    {
+        mvOsPrintf("%d wrong access size. Access must be 1 or 2 or 4\n",
+                    access);
+        return;
+    }
+    memAddr = MV_ALIGN_DOWN( (unsigned int)addr, 4);
+    size = MV_ALIGN_UP(size, 4);
+    addr = (void*)MV_ALIGN_DOWN( (unsigned int)addr, access);
+    while(size > 0)
+    {
+        mvOsPrintf("%08x: ", memAddr);
+        i = 0;
+        /* 32 bytes in the line */
+        while(i < 32)
+        {
+            if(memAddr >= (MV_U32)addr)
+            {
+                switch(access)
+                {
+                    case 1:
+                        if( memAddr == CPU_PHY_MEM(memAddr) )
+                        {
+                            mvOsPrintf("%02x ", MV_MEMIO8_READ(memAddr));
+                        }
+                        else
+                        {
+                            mvOsPrintf("%02x ", *((MV_U8*)memAddr));
+                        }
+                        break;
+
+                    case 2:
+                        if( memAddr == CPU_PHY_MEM(memAddr) )
+                        {
+                            mvOsPrintf("%04x ", MV_MEMIO16_READ(memAddr));
+                        }
+                        else
+                        {
+                            mvOsPrintf("%04x ", *((MV_U16*)memAddr));
+                        }
+                        break;
+
+                    case 4:
+                        if( memAddr == CPU_PHY_MEM(memAddr) )
+                        {
+                            mvOsPrintf("%08x ", MV_MEMIO32_READ(memAddr));
+                        }
+                        else
+                        {
+                            mvOsPrintf("%08x ", *((MV_U32*)memAddr));
+                        }
+                        break;
+                }
+            }
+            else
+            {
+                for(j=0; j<(access*2+1); j++)
+                    mvOsPrintf(" ");
+            }
+            i += access;
+            memAddr += access;
+            size -= access;
+            if(size <= 0)
+                break;
+        }
+        mvOsPrintf("\n");
+    }
+}
+
+void mvDebugPrintBufInfo(BUF_INFO* pBufInfo, int size, int access)
+{
+	if(pBufInfo == NULL)
+	{
+		mvOsPrintf("\n!!! pBufInfo = NULL\n");
+		return;
+	}
+    mvOsPrintf("\n*** pBufInfo=0x%x, cmdSts=0x%08x, pBuf=0x%x, bufSize=%d\n",
+               (unsigned int)pBufInfo,
+			   (unsigned int)pBufInfo->cmdSts,
+			   (unsigned int)pBufInfo->pBuff,
+			   (unsigned int)pBufInfo->bufSize);
+    mvOsPrintf("pData=0x%x, byteCnt=%d, pNext=0x%x, uInfo1=0x%x, uInfo2=0x%x\n",
+               (unsigned int)pBufInfo->pData,
+			   (unsigned int)pBufInfo->byteCnt,
+			   (unsigned int)pBufInfo->pNextBufInfo,
+               (unsigned int)pBufInfo->userInfo1,
+			   (unsigned int)pBufInfo->userInfo2);
+    if(pBufInfo->pData != NULL)
+    {
+        if(size > pBufInfo->byteCnt)
+            size = pBufInfo->byteCnt;
+        mvDebugMemDump(pBufInfo->pData, size, access);
+    }
+}
+
+void mvDebugPrintPktInfo(MV_PKT_INFO* pPktInfo, int size, int access)
+{
+    int frag, len;
+
+	if(pPktInfo == NULL)
+	{
+		mvOsPrintf("\n!!! pPktInfo = NULL\n");
+		return;
+	}
+    mvOsPrintf("\npPkt=%p, stat=0x%08x, numFr=%d, size=%d, pFr=%p, osInfo=0x%lx\n",
+                pPktInfo, pPktInfo->status, pPktInfo->numFrags, pPktInfo->pktSize,
+                pPktInfo->pFrags, pPktInfo->osInfo);
+
+    for(frag=0; frag<pPktInfo->numFrags; frag++)
+    {
+        mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+                    frag, pPktInfo->pFrags[frag].bufVirtPtr,
+                    pPktInfo->pFrags[frag].bufSize);
+        if(size > 0)
+        {
+            len = MV_MIN((int)pPktInfo->pFrags[frag].bufSize, size);
+            mvDebugMemDump(pPktInfo->pFrags[frag].bufVirtPtr, len, access);
+            size -= len;
+        }
+    }
+
+}
+
+void    mvDebugPrintIpAddr(MV_U32 ipAddr)
+{
+    mvOsPrintf("%d.%d.%d.%d", ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF),
+                              ((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF));
+}
+
+void    mvDebugPrintMacAddr(const MV_U8* pMacAddr)
+{
+    int     i;
+
+    mvOsPrintf("%02x", (unsigned int)pMacAddr[0]);
+    for(i=1; i<MV_MAC_ADDR_SIZE; i++)
+    {
+        mvOsPrintf(":%02x", pMacAddr[i]);
+    }
+    /* mvOsPrintf("\n");*/
+}
+
+
+/******* There are three functions deals with MV_DEBUG_TIMES structure ********/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES* pTimeEntry, int count, char* pName)
+{
+    pTimeEntry->begin = 0;
+    pTimeEntry->count = count;
+    pTimeEntry->end = 0;
+    pTimeEntry->left = pTimeEntry->count;
+    pTimeEntry->total = 0;
+    pTimeEntry->min = 0xFFFFFFFF;
+    pTimeEntry->max = 0x0;
+    strncpy(pTimeEntry->name, pName, sizeof(pTimeEntry->name)-1);
+    pTimeEntry->name[sizeof(pTimeEntry->name)-1] = '\0';
+}
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES* pTimeEntry, MV_BOOL isTitle)
+{
+    int     num;
+
+    if(isTitle == MV_TRUE)
+        mvOsPrintf("Event         NumOfEvents       TotalTime         Average       Min       Max\n");
+
+    num = pTimeEntry->count-pTimeEntry->left;
+    if(num > 0)
+    {
+        mvOsPrintf("%-11s     %6u          0x%08lx        %6lu     %6lu    %6lu\n",
+                pTimeEntry->name, num, pTimeEntry->total, pTimeEntry->total/num,
+                pTimeEntry->min, pTimeEntry->max);
+    }
+}
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES* pTimeEntry)
+{
+    MV_U32  delta;
+
+    if(pTimeEntry->left > 0)
+    {
+        if(pTimeEntry->end <= pTimeEntry->begin)
+        {
+            delta = pTimeEntry->begin - pTimeEntry->end;
+        }
+        else
+        {
+            delta = ((MV_U32)0x10000 - pTimeEntry->end) + pTimeEntry->begin;
+        }
+        pTimeEntry->total += delta;
+
+        if(delta < pTimeEntry->min)
+            pTimeEntry->min = delta;
+
+        if(delta > pTimeEntry->max)
+            pTimeEntry->max = delta;
+
+        pTimeEntry->left--;
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvDebug.h b/crypto/ocf/kirkwood/mvHal/common/mvDebug.h
new file mode 100644
index 000000000000..7dae01090fab
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvDebug.h
@@ -0,0 +1,177 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+
+#ifndef __INCmvDebugh
+#define __INCmvDebugh
+
+/* includes */
+#include "mvTypes.h"
+
+typedef enum
+{
+    MV_MODULE_INVALID  = -1,
+    MV_MODULE_ETH      = 0,
+    MV_MODULE_IDMA,
+    MV_MODULE_XOR,
+    MV_MODULE_TWASI,
+    MV_MODULE_MGI,
+    MV_MODULE_USB,
+    MV_MODULE_CESA,
+
+    MV_MODULE_MAX
+}MV_MODULE_ID;
+
+/* Define generic flags useful for most of modules */
+#define MV_DEBUG_FLAG_ALL   (0)
+#define MV_DEBUG_FLAG_INIT  (1 << 0)
+#define MV_DEBUG_FLAG_RX    (1 << 1)
+#define MV_DEBUG_FLAG_TX    (1 << 2)
+#define MV_DEBUG_FLAG_ERR   (1 << 3)
+#define MV_DEBUG_FLAG_TRACE (1 << 4)
+#define MV_DEBUG_FLAG_DUMP  (1 << 5)
+#define MV_DEBUG_FLAG_CACHE (1 << 6)
+#define MV_DEBUG_FLAG_IOCTL (1 << 7)
+#define MV_DEBUG_FLAG_STATS (1 << 8)
+
+extern MV_U32  mvDebug;
+extern MV_U32  mvDebugModules[MV_MODULE_MAX];
+
+#ifdef MV_DEBUG
+# define MV_DEBUG_PRINT(module, flags, msg)     mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code)     code
+#elif defined(MV_RT_DEBUG)
+# define MV_DEBUG_PRINT(module, flags, msg)                    \
+    if( (mvDebug & (1<<(module))) &&                           \
+        ((mvDebugModules[(module)] & (flags)) == (flags)) )    \
+        mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code)                    \
+    if( (mvDebug & (1<<(module))) &&                           \
+        ((mvDebugModules[(module)] & (flags)) == (flags)) )    \
+        code
+#else
+# define MV_DEBUG_PRINT(module, flags, msg)
+# define MV_DEBUG_CODE(module, flags, code)
+#endif
+
+
+
+/* typedefs */
+
+/*  time measurement structure used to check how much time pass between
+ *  two points
+ */
+typedef struct {
+    char            name[20];   /* name of the entry */
+    unsigned long   begin;      /* time measured on begin point */
+    unsigned long   end;        /* time measured on end point */
+    unsigned long   total;      /* Accumulated time */
+    unsigned long   left;       /* The rest measurement actions */
+    unsigned long   count;      /* Maximum measurement actions */
+    unsigned long   min;        /* Minimum time from begin to end */
+    unsigned long   max;        /* Maximum time from begin to end */
+} MV_DEBUG_TIMES;
+
+
+/* mvDebug.h API list */
+
+/****** Error Recording ******/
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void* addr, int size, int access);
+
+void mvDebugPrintBufInfo(BUF_INFO* pBufInfo, int size, int access);
+
+void mvDebugPrintPktInfo(MV_PKT_INFO* pPktInfo, int size, int access);
+
+void    mvDebugPrintIpAddr(MV_U32 ipAddr);
+
+void mvDebugPrintMacAddr(const MV_U8* pMacAddr);
+
+/**** There are three functions deals with MV_DEBUG_TIMES structure ****/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES* pTimeEntry, int count, char* name);
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES* pTimeEntry);
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES* pTimeEntry, MV_BOOL isTitle);
+
+
+/******** General ***********/
+
+/* Change value of mvDebugPrint global variable */
+
+void    mvDebugInit(void);
+void    mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable);
+void    mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags);
+void    mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags);
+
+
+#endif /* __INCmvDebug.h */
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h b/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h
new file mode 100644
index 000000000000..4f4eb478534a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvDeviceId.h
@@ -0,0 +1,225 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceIdh
+#define __INCmvDeviceIdh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines  */
+#define MARVELL_VEN_ID		    0x11ab
+
+/* Disco-3 */
+#define MV64460_DEV_ID          	0x6480
+#define MV64460B_DEV_ID         	0x6485
+#define MV64430_DEV_ID          	0x6420
+
+/* Disco-5 */
+#define MV64560_DEV_ID          	0x6450
+
+/* Disco-6 */
+#define MV64660_DEV_ID          	0x6460
+
+/* Orion */
+#define MV_1181_DEV_ID          	0x1181
+#define MV_5181_DEV_ID          	0x5181
+#define MV_5281_DEV_ID          	0x5281
+#define MV_5182_DEV_ID          	0x5182
+#define MV_8660_DEV_ID          	0x8660
+#define MV_5180_DEV_ID          	0x5180
+#define MV_5082_DEV_ID          	0x5082
+#define MV_1281_DEV_ID          	0x1281
+#define MV_6082_DEV_ID          	0x6082
+#define MV_6183_DEV_ID          	0x6183
+#define MV_6183L_DEV_ID          	0x6083
+
+#define MV_5281_D0_REV          	0x4
+#define MV_5281_D0_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D0_REV)
+#define MV_5281_D0_NAME         "88F5281 D0"
+
+#define MV_5281_D1_REV          	0x5
+#define MV_5281_D1_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D1_REV)
+#define MV_5281_D1_NAME         "88F5281 D1"
+
+#define MV_5281_D2_REV          	0x6
+#define MV_5281_D2_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D2_REV)
+#define MV_5281_D2_NAME         "88F5281 D2"
+
+
+#define MV_5181L_A0_REV         	0x8 /* need for PCIE Er */
+#define MV_5181_A1_REV          	0x1 /* for USB Er ..*/
+#define MV_5181_B0_REV          	0x2
+#define MV_5181_B1_REV          	0x3
+#define MV_5182_A1_REV          	0x1
+#define MV_5180N_B1_REV         	0x3
+#define MV_5181L_A0_ID          	((MV_5181_DEV_ID << 16) | MV_5181L_A0_REV)
+
+
+
+/* kw */
+#define MV_6281_DEV_ID          	0x6281
+#define MV_6192_DEV_ID          	0x6192
+#define MV_6190_DEV_ID          	0x6190
+#define MV_6180_DEV_ID          	0x6180
+
+#define MV_6281_A0_REV         		0x2
+#define MV_6281_A0_ID          		((MV_6281_DEV_ID << 16) | MV_6281_A0_REV)
+#define MV_6281_A0_NAME         	"88F6281 A0"
+
+#define MV_6192_A0_REV         		0x2
+#define MV_6192_A0_ID          		((MV_6192_DEV_ID << 16) | MV_6192_A0_REV)
+#define MV_6192_A0_NAME         	"88F6192 A0"
+
+#define MV_6190_A0_REV         		0x2
+#define MV_6190_A0_ID          		((MV_6190_DEV_ID << 16) | MV_6190_A0_REV)
+#define MV_6190_A0_NAME         	"88F6190 A0"
+
+#define MV_6180_A0_REV         		0x2
+#define MV_6180_A0_ID          		((MV_6180_DEV_ID << 16) | MV_6180_A0_REV)
+#define MV_6180_A0_NAME         	"88F6180 A0"
+
+#define MV_6281_A1_REV              0x3
+#define MV_6281_A1_ID               ((MV_6281_DEV_ID << 16) | MV_6281_A1_REV)
+#define MV_6281_A1_NAME             "88F6281 A1"
+
+#define MV_6192_A1_REV              0x3
+#define MV_6192_A1_ID               ((MV_6192_DEV_ID << 16) | MV_6192_A1_REV)
+#define MV_6192_A1_NAME             "88F6192 A1"
+
+#define MV_6190_A1_REV              0x3
+#define MV_6190_A1_ID               ((MV_6190_DEV_ID << 16) | MV_6190_A1_REV)
+#define MV_6190_A1_NAME             "88F6190 A1"
+
+#define MV_6180_A1_REV              0x3
+#define MV_6180_A1_ID               ((MV_6180_DEV_ID << 16) | MV_6180_A1_REV)
+#define MV_6180_A1_NAME             "88F6180 A1"
+
+#define MV_88F6XXX_A0_REV         	0x2
+#define MV_88F6XXX_A1_REV         	0x3
+/* Disco-Duo */
+#define MV_78XX0_ZY_DEV_ID       0x6381
+#define MV_78XX0_ZY_NAME         "MV78X00"
+
+#define MV_78XX0_Z0_REV         0x1
+#define MV_78XX0_Z0_ID          ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Z0_REV)
+#define MV_78XX0_Z0_NAME        "78X00 Z0"
+
+#define MV_78XX0_Y0_REV         0x2
+#define MV_78XX0_Y0_ID          ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Y0_REV)
+#define MV_78XX0_Y0_NAME        "78X00 Y0"
+
+#define MV_78XX0_DEV_ID       	0x7800
+#define MV_78XX0_NAME         	"MV78X00"
+
+#define MV_76100_DEV_ID      	0x7610
+#define MV_78200_DEV_ID      	0x7820
+#define MV_78100_DEV_ID      	0x7810
+#define MV_78XX0_A0_REV		0x1
+#define MV_78XX0_A1_REV		0x2
+
+#define MV_76100_NAME		"MV76100"
+#define MV_78100_NAME		"MV78100"
+#define MV_78200_NAME		"MV78200"
+
+#define MV_76100_A0_ID		((MV_76100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78100_A0_ID		((MV_78100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78200_A0_ID		((MV_78200_DEV_ID << 16) | MV_78XX0_A0_REV)
+
+#define MV_76100_A1_ID		((MV_76100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78100_A1_ID		((MV_78100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78200_A1_ID		((MV_78200_DEV_ID << 16) | MV_78XX0_A1_REV)
+
+#define MV_76100_A0_NAME	"MV76100 A0"
+#define MV_78100_A0_NAME	"MV78100 A0"
+#define MV_78200_A0_NAME	"MV78200 A0"
+#define MV_78XX0_A0_NAME	"MV78XX0 A0"
+
+#define MV_76100_A1_NAME	"MV76100 A1"
+#define MV_78100_A1_NAME	"MV78100 A1"
+#define MV_78200_A1_NAME	"MV78200 A1"
+#define MV_78XX0_A1_NAME	"MV78XX0 A1"
+
+/*MV88F632X family*/
+#define MV_6321_DEV_ID      	0x6321
+#define MV_6322_DEV_ID      	0x6322
+#define MV_6323_DEV_ID      	0x6323
+
+#define MV_6321_NAME		"88F6321"
+#define MV_6322_NAME		"88F6322"
+#define MV_6323_NAME		"88F6323"
+
+#define MV_632X_A1_REV		0x2
+
+#define MV_6321_A1_ID		((MV_6321_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6322_A1_ID		((MV_6322_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6323_A1_ID		((MV_6323_DEV_ID << 16) | MV_632X_A1_REV)
+
+#define MV_6321_A1_NAME		"88F6321 A1"
+#define MV_6322_A1_NAME		"88F6322 A1"
+#define MV_6323_A1_NAME		"88F6323 A1"
+
+
+#endif /* __INCmvDeviceIdh */
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h b/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h
new file mode 100644
index 000000000000..184919856989
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvHalVer.h
@@ -0,0 +1,73 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvHalVerh
+#define __INCmvHalVerh
+
+/* Defines */
+#define MV_HAL_VERSION			"FEROCEON_HAL_3_1_7"
+#define MV_RELEASE_BASELINE		"SoCandControllers_FEROCEON_RELEASE_7_9_2009_KW_4_3_4_DD_2_1_4_6183_1_1_4"
+
+#endif /* __INCmvHalVerh */
\ No newline at end of file
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvStack.c b/crypto/ocf/kirkwood/mvHal/common/mvStack.c
new file mode 100644
index 000000000000..4ce70128d665
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvStack.c
@@ -0,0 +1,152 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	* Redistributions of source code must retain the above copyright notice,
+	  this list of conditions and the following disclaimer.
+
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in the
+	  documentation and/or other materials provided with the distribution.
+
+	* Neither the name of Marvell nor the names of its contributors may be
+	  used to endorse or promote products derived from this software without
+	  specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/********************************************************************************
+* mvQueue.c
+*
+* FILENAME:    $Workfile: mvStack.c $
+* REVISION:    $Revision: 1.1 $
+* LAST UPDATE: $Modtime:  $
+*
+* DESCRIPTION:
+*     This file implements simple Stack LIFO functionality.
+*******************************************************************************/
+
+/* includes */
+#include "mvOs.h"
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvStack.h"
+
+/* defines  */
+
+
+/* Public functions */
+
+
+/* Purpose: Create new stack
+ * Inputs:
+ *	- MV_U32	noOfElements	- maximum number of elements in the stack.
+ *                              Each element 4 bytes size
+ * Return: void* - pointer to created stack.
+ */
+void*   mvStackCreate(int numOfElements)
+{
+	MV_STACK*   pStack;
+    MV_U32*     pStackElements;
+
+    pStack = (MV_STACK*)mvOsMalloc(sizeof(MV_STACK));
+    pStackElements = (MV_U32*)mvOsMalloc(numOfElements*sizeof(MV_U32));
+    if( (pStack == NULL) || (pStackElements == NULL) )
+    {
+	    mvOsPrintf("mvStack: Can't create new stack\n");
+        return NULL;
+    }
+    memset(pStackElements, 0, numOfElements*sizeof(MV_U32));
+    pStack->numOfElements = numOfElements;
+    pStack->stackIdx = 0;
+    pStack->stackElements = pStackElements;
+
+	return pStack;
+}
+
+/* Purpose: Delete existing stack
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function
+ *
+ * Return: MV_STATUS  	MV_NOT_FOUND - Failure. StackHandle is not valid.
+ *						MV_OK        - Success.
+ */
+MV_STATUS   mvStackDelete(void* stackHndl)
+{
+	MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+	if( (pStack == NULL) || (pStack->stackElements == NULL) )
+		return MV_NOT_FOUND;
+
+    mvOsFree(pStack->stackElements);
+    mvOsFree(pStack);
+
+    return MV_OK;
+}
+
+
+/* PrintOut status of the stack */
+void    mvStackStatus(void* stackHndl, MV_BOOL isPrintElements)
+{
+	int			i;
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+    mvOsPrintf("StackHandle=%p, pElements=%p, numElements=%d, stackIdx=%d\n",
+                stackHndl, pStack->stackElements, pStack->numOfElements,
+                pStack->stackIdx);
+    if(isPrintElements == MV_TRUE)
+    {
+        for(i=0; i<pStack->stackIdx; i++)
+        {
+            mvOsPrintf("%3d. Value=0x%x\n", i, pStack->stackElements[i]);
+        }
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvStack.h b/crypto/ocf/kirkwood/mvHal/common/mvStack.h
new file mode 100644
index 000000000000..f4352396bac3
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvStack.h
@@ -0,0 +1,191 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	* Redistributions of source code must retain the above copyright notice,
+	  this list of conditions and the following disclaimer.
+
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in the
+	  documentation and/or other materials provided with the distribution.
+
+	* Neither the name of Marvell nor the names of its contributors may be
+	  used to endorse or promote products derived from this software without
+	  specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/********************************************************************************
+* mvStack.h - Header File for :
+*
+* FILENAME:    $Workfile: mvStack.h $
+* REVISION:    $Revision: 1.1 $
+* LAST UPDATE: $Modtime:  $
+*
+* DESCRIPTION:
+*     This file defines simple Stack (LIFO) functionality.
+*
+*******************************************************************************/
+
+#ifndef __mvStack_h__
+#define __mvStack_h__
+
+
+/* includes */
+#include "mvTypes.h"
+
+
+/* defines  */
+
+
+/* typedefs */
+/* Data structure describes general purpose Stack */
+typedef struct
+{
+    int     stackIdx;
+    int     numOfElements;
+    MV_U32* stackElements;
+} MV_STACK;
+
+static INLINE MV_BOOL mvStackIsFull(void* stackHndl)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+    if(pStack->stackIdx == pStack->numOfElements)
+        return MV_TRUE;
+
+    return MV_FALSE;
+}
+
+static INLINE MV_BOOL mvStackIsEmpty(void* stackHndl)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+    if(pStack->stackIdx == 0)
+        return MV_TRUE;
+
+    return MV_FALSE;
+}
+/* Purpose: Push new element to stack
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function.
+ *	- MV_U32	value		- New element.
+ *
+ * Return: MV_STATUS  	MV_FULL - Failure. Stack is full.
+ *						MV_OK   - Success. Element is put to stack.
+ */
+static INLINE void mvStackPush(void* stackHndl, MV_U32 value)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+#ifdef MV_RT_DEBUG
+    if(pStack->stackIdx == pStack->numOfElements)
+    {
+        mvOsPrintf("mvStackPush: Stack is FULL\n");
+        return;
+    }
+#endif /* MV_RT_DEBUG */
+
+    pStack->stackElements[pStack->stackIdx] = value;
+    pStack->stackIdx++;
+}
+
+/* Purpose: Pop element from the top of stack and copy it to "pValue"
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function.
+ *	- MV_U32	value		- Element in the top of stack.
+ *
+ * Return: MV_STATUS  	MV_EMPTY - Failure. Stack is empty.
+ *						MV_OK    - Success. Element is removed from the stack and
+ *									copied to pValue argument
+ */
+static INLINE MV_U32   mvStackPop(void* stackHndl)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+#ifdef MV_RT_DEBUG
+    if(pStack->stackIdx == 0)
+    {
+        mvOsPrintf("mvStackPop: Stack is EMPTY\n");
+        return 0;
+    }
+#endif /* MV_RT_DEBUG */
+
+    pStack->stackIdx--;
+    return pStack->stackElements[pStack->stackIdx];
+}
+
+static INLINE int       mvStackIndex(void* stackHndl)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+    return pStack->stackIdx;
+}
+
+static INLINE int       mvStackFreeElements(void* stackHndl)
+{
+    MV_STACK*   pStack = (MV_STACK*)stackHndl;
+
+    return (pStack->numOfElements - pStack->stackIdx);
+}
+
+/* mvStack.h API list */
+
+/* Create new Stack */
+void*       mvStackCreate(int numOfElements);
+
+/* Delete existing stack */
+MV_STATUS   mvStackDelete(void* stackHndl);
+
+/* Print status of the stack */
+void        mvStackStatus(void* stackHndl, MV_BOOL isPrintElements);
+
+#endif /* __mvStack_h__ */
diff --git a/crypto/ocf/kirkwood/mvHal/common/mvTypes.h b/crypto/ocf/kirkwood/mvHal/common/mvTypes.h
new file mode 100644
index 000000000000..6896771c83d1
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/common/mvTypes.h
@@ -0,0 +1,244 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvTypesh
+#define __INCmvTypesh
+
+/* Defines */
+
+/* The following is a list of Marvell status    */
+#define MV_ERROR		    (-1)
+#define MV_OK			    (0x00)  /* Operation succeeded                   */
+#define MV_FAIL			    (0x01)	/* Operation failed                      */
+#define MV_BAD_VALUE        (0x02)  /* Illegal value (general)               */
+#define MV_OUT_OF_RANGE     (0x03)  /* The value is out of range             */
+#define MV_BAD_PARAM        (0x04)  /* Illegal parameter in function called  */
+#define MV_BAD_PTR          (0x05)  /* Illegal pointer value                 */
+#define MV_BAD_SIZE         (0x06)  /* Illegal size                          */
+#define MV_BAD_STATE        (0x07)  /* Illegal state of state machine        */
+#define MV_SET_ERROR        (0x08)  /* Set operation failed                  */
+#define MV_GET_ERROR        (0x09)  /* Get operation failed                  */
+#define MV_CREATE_ERROR     (0x0A)  /* Fail while creating an item           */
+#define MV_NOT_FOUND        (0x0B)  /* Item not found                        */
+#define MV_NO_MORE          (0x0C)  /* No more items found                   */
+#define MV_NO_SUCH          (0x0D)  /* No such item                          */
+#define MV_TIMEOUT          (0x0E)  /* Time Out                              */
+#define MV_NO_CHANGE        (0x0F)  /* Parameter(s) is already in this value */
+#define MV_NOT_SUPPORTED    (0x10)  /* This request is not support           */
+#define MV_NOT_IMPLEMENTED  (0x11)  /* Request supported but not implemented */
+#define MV_NOT_INITIALIZED  (0x12)  /* The item is not initialized           */
+#define MV_NO_RESOURCE      (0x13)  /* Resource not available (memory ...)   */
+#define MV_FULL             (0x14)  /* Item is full (Queue or table etc...)  */
+#define MV_EMPTY            (0x15)  /* Item is empty (Queue or table etc...) */
+#define MV_INIT_ERROR       (0x16)  /* Error occured while INIT process      */
+#define MV_HW_ERROR         (0x17)  /* Hardware error                        */
+#define MV_TX_ERROR         (0x18)  /* Transmit operation not succeeded      */
+#define MV_RX_ERROR         (0x19)  /* Recieve operation not succeeded       */
+#define MV_NOT_READY	    (0x1A)	/* The other side is not ready yet       */
+#define MV_ALREADY_EXIST    (0x1B)  /* Tried to create existing item         */
+#define MV_OUT_OF_CPU_MEM   (0x1C)  /* Cpu memory allocation failed.         */
+#define MV_NOT_STARTED      (0x1D)  /* Not started yet         */
+#define MV_BUSY             (0x1E)  /* Item is busy.                         */
+#define MV_TERMINATE        (0x1F)  /* Item terminates it's work.            */
+#define MV_NOT_ALIGNED      (0x20)  /* Wrong alignment                       */
+#define MV_NOT_ALLOWED      (0x21)  /* Operation NOT allowed                 */
+#define MV_WRITE_PROTECT    (0x22)  /* Write protected                       */
+
+
+#define MV_INVALID  (int)(-1)
+
+#define MV_FALSE	0
+#define MV_TRUE     (!(MV_FALSE))
+
+
+#ifndef NULL
+#define NULL ((void*)0)
+#endif
+
+
+#ifndef MV_ASMLANGUAGE
+/* typedefs */
+
+typedef char  MV_8;
+typedef unsigned char	MV_U8;
+
+typedef int		MV_32;
+typedef unsigned int	MV_U32;
+
+typedef short		MV_16;
+typedef unsigned short	MV_U16;
+
+#ifdef MV_PPC64
+typedef long		MV_64;
+typedef unsigned long	MV_U64;
+#else
+typedef long long		MV_64;
+typedef unsigned long long	MV_U64;
+#endif
+
+typedef long		MV_LONG;	/* 32/64 */
+typedef unsigned long	MV_ULONG;	/* 32/64 */
+
+typedef int     MV_STATUS;
+typedef int     MV_BOOL;
+typedef void    MV_VOID;
+typedef float   MV_FLOAT;
+
+typedef int 	(*MV_FUNCPTR) (void);	  /* ptr to function returning int   */
+typedef void 	(*MV_VOIDFUNCPTR) (void); /* ptr to function returning void  */
+typedef double 	(*MV_DBLFUNCPTR) (void);  /* ptr to function returning double*/
+typedef float 	(*MV_FLTFUNCPTR) (void);  /* ptr to function returning float */
+
+typedef MV_U32 MV_KHZ;
+typedef MV_U32 MV_MHZ;
+typedef MV_U32 MV_HZ;
+
+
+/* This enumerator describes the set of commands that can be applied on   	*/
+/* an engine (e.g. IDMA, XOR). Appling a comman depends on the current   	*/
+/* status (see MV_STATE enumerator)                      					*/
+/* Start can be applied only when status is IDLE                         */
+/* Stop can be applied only when status is IDLE, ACTIVE or PAUSED        */
+/* Pause can be applied only when status is ACTIVE                          */
+/* Restart can be applied only when status is PAUSED                        */
+typedef enum _mvCommand
+{
+    MV_START,              /* Start	*/
+    MV_STOP,               /* Stop     */
+    MV_PAUSE,              /* Pause    */
+    MV_RESTART             /* Restart  */
+} MV_COMMAND;
+
+/* This enumerator describes the set of state conditions.					*/
+/* Moving from one state to other is stricted.   							*/
+typedef enum _mvState
+{
+    MV_IDLE,
+    MV_ACTIVE,
+    MV_PAUSED,
+    MV_UNDEFINED_STATE
+} MV_STATE;
+
+
+/* This structure describes address space window. Window base can be        */
+/* 64 bit, window size up to 4GB                                            */
+typedef struct _mvAddrWin
+{
+    MV_U32      baseLow;    /* 32bit base low       */
+    MV_U32      baseHigh;   /* 32bit base high      */
+    MV_U32      size;       /* 32bit size           */
+}MV_ADDR_WIN;
+
+/* This binary enumerator describes protection attribute status             */
+typedef enum _mvProtRight
+{
+    ALLOWED,        /* Protection attribute allowed                         */
+    FORBIDDEN       /* Protection attribute forbidden                       */
+}MV_PROT_RIGHT;
+
+/* Unified struct for Rx and Tx packet operations. The user is required to 	*/
+/* be familier only with Tx/Rx descriptor command status.               	*/
+typedef struct _bufInfo
+{
+    MV_U32   cmdSts;        /* Tx/Rx command status                                     */
+        MV_U16   byteCnt;       /* Size of valid data in the buffer     */
+    MV_U16   bufSize;       /* Total size of the buffer             */
+    MV_U8    *pBuff;            /* Pointer to Buffer                    */
+    MV_U8    *pData;            /* Pointer to data in the Buffer        */
+    MV_U32   userInfo1;         /* Tx/Rx attached user information 1    */
+    MV_U32   userInfo2;         /* Tx/Rx attached user information 2    */
+    struct _bufInfo *pNextBufInfo;  /* Next buffer in packet            */
+} BUF_INFO;
+
+/* This structure contains information describing one of buffers
+ * (fragments) they are built Ethernet packet.
+ */
+typedef struct
+{
+     MV_U8*	    bufVirtPtr;
+     MV_ULONG	bufPhysAddr;
+     MV_U32   	bufSize;
+     MV_U32     dataSize;
+     MV_U32		memHandle;
+	 MV_32      bufAddrShift;
+} MV_BUF_INFO;
+
+/* This structure contains information describing Ethernet packet.
+ * The packet can be divided for few buffers (fragments)
+ */
+typedef struct
+{
+    MV_ULONG   	osInfo;
+    MV_BUF_INFO *pFrags;
+    MV_U32      status;
+    MV_U16      pktSize;
+    MV_U16      numFrags;
+    MV_U32      ownerId;
+    MV_U32      fragIP;
+} MV_PKT_INFO;
+
+#endif /* MV_ASMLANGUAGE */
+
+#endif /* __INCmvTypesh */
diff --git a/crypto/ocf/kirkwood/mvHal/dbg-trace.c b/crypto/ocf/kirkwood/mvHal/dbg-trace.c
new file mode 100644
index 000000000000..91df96fa0c1c
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/dbg-trace.c
@@ -0,0 +1,108 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "dbg-trace.h"
+
+#define TRACE_ARR_LEN   800
+#define STR_LEN         128
+struct trace {
+    struct timeval tv;
+        char str[STR_LEN];
+    unsigned int callback_val1;
+    unsigned int callback_val2;
+        char valid;
+};
+static unsigned int (*trc_callback1) (unsigned char) = NULL;
+static unsigned int (*trc_callback2) (unsigned char) = NULL;
+static unsigned char trc_param1 = 0;
+static unsigned char trc_param2 = 0;
+struct trace *trc_arr;
+static int trc_index;
+static int trc_active = 0;
+
+void TRC_START()
+{
+    trc_active = 1;
+}
+
+void TRC_STOP()
+{
+    trc_active = 0;
+}
+
+void TRC_INIT(void *callback1, void *callback2, unsigned char callback1_param, unsigned char callback2_param)
+{
+    printk("Marvell debug tracing is on\n");
+        trc_arr = (struct trace *)kmalloc(TRACE_ARR_LEN*sizeof(struct trace),GFP_KERNEL);
+    if(trc_arr == NULL)
+    {
+        printk("Can't allocate Debug Trace buffer\n");
+        return;
+    }
+        memset(trc_arr,0,TRACE_ARR_LEN*sizeof(struct trace));
+        trc_index = 0;
+    trc_callback1 = callback1;
+    trc_callback2 = callback2;
+    trc_param1 = callback1_param;
+    trc_param2 = callback2_param;
+}
+void TRC_REC(char *fmt,...)
+{
+    va_list args;
+        struct trace *trc = &trc_arr[trc_index];
+
+    if(trc_active == 0)
+        return;
+
+    do_gettimeofday(&trc->tv);
+    if(trc_callback1)
+        trc->callback_val1 = trc_callback1(trc_param1);
+    if(trc_callback2)
+        trc->callback_val2 = trc_callback2(trc_param2);
+    va_start(args, fmt);
+    vsprintf(trc->str,fmt,args);
+    va_end(args);
+        trc->valid = 1;
+        if((++trc_index) == TRACE_ARR_LEN) {
+                trc_index = 0;
+    }
+}
+void TRC_OUTPUT(void)
+{
+        int i,j;
+        struct trace *p;
+        printk("\n\nTrace %d items\n",TRACE_ARR_LEN);
+        for(i=0,j=trc_index; i<TRACE_ARR_LEN; i++,j++) {
+                if(j == TRACE_ARR_LEN)
+                        j = 0;
+                p = &trc_arr[j];
+                if(p->valid) {
+            unsigned long uoffs;
+            struct trace *plast;
+            if(p == &trc_arr[0])
+                plast = &trc_arr[TRACE_ARR_LEN-1];
+            else
+                plast = p-1;
+            if(p->tv.tv_sec == ((plast)->tv.tv_sec))
+                uoffs = (p->tv.tv_usec - ((plast)->tv.tv_usec));
+            else
+                uoffs = (1000000 - ((plast)->tv.tv_usec)) +
+                    ((p->tv.tv_sec - ((plast)->tv.tv_sec) - 1) * 1000000) +
+                    p->tv.tv_usec;
+                        printk("%03d: [+%ld usec]", j, (unsigned long)uoffs);
+            if(trc_callback1)
+                printk("[%u]",p->callback_val1);
+            if(trc_callback2)
+                printk("[%u]",p->callback_val2);
+            printk(": %s",p->str);
+        }
+                p->valid = 0;
+        }
+        memset(trc_arr,0,TRACE_ARR_LEN*sizeof(struct trace));
+        trc_index = 0;
+}
+void TRC_RELEASE(void)
+{
+        kfree(trc_arr);
+        trc_index = 0;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/dbg-trace.h b/crypto/ocf/kirkwood/mvHal/dbg-trace.h
new file mode 100644
index 000000000000..e3dd4809dbd2
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/dbg-trace.h
@@ -0,0 +1,24 @@
+
+#ifndef _MV_DBG_TRCE_H_
+#define _MV_DBG_TRCE_H_
+
+#ifdef CONFIG_MV_DBG_TRACE
+void TRC_INIT(void *callback1, void *callback2,
+    unsigned char callback1_param, unsigned char callback2_param);
+void TRC_REC(char *fmt,...);
+void TRC_OUTPUT(void);
+void TRC_RELEASE(void);
+void TRC_START(void);
+void TRC_STOP(void);
+
+#else
+#define TRC_INIT(x1,x2,x3,x4)
+#define TRC_REC(X...)
+#define TRC_OUTPUT()
+#define TRC_RELEASE()
+#define TRC_START()
+#define TRC_STOP()
+#endif
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c
new file mode 100644
index 000000000000..86dd2dacb83e
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.c
@@ -0,0 +1,2512 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "cpu/mvCpu.h"
+#include "cntmr/mvCntmr.h"
+#include "gpp/mvGpp.h"
+#include "twsi/mvTwsi.h"
+#include "pex/mvPex.h"
+#include "device/mvDevice.h"
+#include "eth/gbe/mvEthRegs.h"
+
+/* defines  */
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+extern MV_CPU_ARM_CLK _cpuARMDDRCLK[];
+
+#define CODE_IN_ROM		MV_FALSE
+#define CODE_IN_RAM		MV_TRUE
+
+extern	MV_BOARD_INFO*	boardInfoTbl[];
+#define BOARD_INFO(boardId)	boardInfoTbl[boardId - BOARD_ID_BASE]
+
+/* Locals */
+static MV_DEV_CS_INFO*  boardGetDevEntry(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+
+MV_U32 tClkRate   = -1;
+
+
+/*******************************************************************************
+* mvBoardEnvInit - Init board
+*
+* DESCRIPTION:
+*		In this function the board environment take care of device bank
+*		initialization.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvBoardEnvInit(MV_VOID)
+{
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardEnvInit:Board unknown.\n");
+		return;
+
+	}
+
+	/* Set GPP Out value */
+	MV_REG_WRITE(GPP_DATA_OUT_REG(0), BOARD_INFO(boardId)->gppOutValLow);
+	MV_REG_WRITE(GPP_DATA_OUT_REG(1), BOARD_INFO(boardId)->gppOutValHigh);
+
+	/* set GPP polarity */
+	mvGppPolaritySet(0, 0xFFFFFFFF, BOARD_INFO(boardId)->gppPolarityValLow);
+	mvGppPolaritySet(1, 0xFFFFFFFF, BOARD_INFO(boardId)->gppPolarityValHigh);
+
+    /* Workaround for Erratum FE-MISC-70*/
+    if(mvCtrlRevGet()==MV_88F6XXX_A0_REV)
+    {
+        BOARD_INFO(boardId)->gppOutEnValLow &= 0xfffffffd;
+        BOARD_INFO(boardId)->gppOutEnValLow |= (BOARD_INFO(boardId)->gppOutEnValHigh) & 0x00000002;
+    } /*End of WA*/
+
+	/* Set GPP Out Enable*/
+	mvGppTypeSet(0, 0xFFFFFFFF, BOARD_INFO(boardId)->gppOutEnValLow);
+	mvGppTypeSet(1, 0xFFFFFFFF, BOARD_INFO(boardId)->gppOutEnValHigh);
+
+	/* Nand CE */
+	MV_REG_BIT_SET(NAND_CTRL_REG, NAND_ACTCEBOOT_BIT);
+}
+
+/*******************************************************************************
+* mvBoardModelGet - Get Board model
+*
+* DESCRIPTION:
+*       This function returns 16bit describing board model.
+*       Board model is constructed of one byte major and minor numbers in the
+*       following manner:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       String describing board model.
+*
+*******************************************************************************/
+MV_U16 mvBoardModelGet(MV_VOID)
+{
+	return (mvBoardIdGet() >> 16);
+}
+
+/*******************************************************************************
+* mbBoardRevlGet - Get Board revision
+*
+* DESCRIPTION:
+*       This function returns a 32bit describing the board revision.
+*       Board revision is constructed of 4bytes. 2bytes describes major number
+*       and the other 2bytes describes minor munber.
+*       For example for board revision 3.4 the function will return
+*       0x00030004.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       String describing board model.
+*
+*******************************************************************************/
+MV_U16 mvBoardRevGet(MV_VOID)
+{
+	return (mvBoardIdGet() & 0xFFFF);
+}
+
+/*******************************************************************************
+* mvBoardNameGet - Get Board name
+*
+* DESCRIPTION:
+*       This function returns a string describing the board model and revision.
+*       String is extracted from board I2C EEPROM.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       pNameBuff - Buffer to contain board name string. Minimum size 32 chars.
+*
+* RETURN:
+*
+*       MV_ERROR if informantion can not be read.
+*******************************************************************************/
+MV_STATUS mvBoardNameGet(char *pNameBuff)
+{
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsSPrintf (pNameBuff, "Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+	mvOsSPrintf (pNameBuff, "%s",BOARD_INFO(boardId)->boardName);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardIsPortInSgmii -
+*
+* DESCRIPTION:
+*       This routine returns MV_TRUE for port number works in SGMII or MV_FALSE
+*	For all other options.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE - port in SGMII.
+*       MV_FALSE - other.
+*
+*******************************************************************************/
+MV_BOOL mvBoardIsPortInSgmii(MV_U32 ethPortNum)
+{
+    MV_BOOL ethPortSgmiiSupport[BOARD_ETH_PORT_NUM] = MV_ETH_PORT_SGMII;
+
+    if(ethPortNum >= BOARD_ETH_PORT_NUM)
+    {
+	    mvOsPrintf ("Invalid portNo=%d\n", ethPortNum);
+		return MV_FALSE;
+    }
+    return ethPortSgmiiSupport[ethPortNum];
+}
+
+/*******************************************************************************
+* mvBoardIsPortInGmii -
+*
+* DESCRIPTION:
+*       This routine returns MV_TRUE for port number works in GMII or MV_FALSE
+*	For all other options.
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE - port in GMII.
+*       MV_FALSE - other.
+*
+*******************************************************************************/
+MV_BOOL mvBoardIsPortInGmii(MV_VOID)
+{
+	MV_U32 devClassId, devClass = 0;
+	if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_AUTO)
+	{
+		/* Get MPP module ID */
+		devClassId = mvBoarModuleTypeGet(devClass);
+		if (MV_BOARD_MODULE_GMII_ID == devClassId)
+			return MV_TRUE;
+	}
+	else if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_GMII)
+		return MV_TRUE;
+
+    return MV_FALSE;
+}
+/*******************************************************************************
+* mvBoardPhyAddrGet - Get the phy address
+*
+* DESCRIPTION:
+*       This routine returns the Phy address of a given ethernet port.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit describing Phy address, -1 if the port number is wrong.
+*
+*******************************************************************************/
+MV_32 mvBoardPhyAddrGet(MV_U32 ethPortNum)
+{
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardPhyAddrGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pBoardMacInfo[ethPortNum].boardEthSmiAddr;
+}
+
+/*******************************************************************************
+* mvBoardMacSpeedGet - Get the Mac speed
+*
+* DESCRIPTION:
+*       This routine returns the Mac speed if pre define of a given ethernet port.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BOARD_MAC_SPEED, -1 if the port number is wrong.
+*
+*******************************************************************************/
+MV_BOARD_MAC_SPEED      mvBoardMacSpeedGet(MV_U32 ethPortNum)
+{
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardMacSpeedGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pBoardMacInfo[ethPortNum].boardMacSpeed;
+}
+
+/*******************************************************************************
+* mvBoardLinkStatusIrqGet - Get the IRQ number for the link status indication
+*
+* DESCRIPTION:
+*       This routine returns the IRQ number for the link status indication.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       the number of the IRQ for the link status indication, -1 if the port
+*	number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32	mvBoardLinkStatusIrqGet(MV_U32 ethPortNum)
+{
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardLinkStatusIrqGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].linkStatusIrq;
+}
+
+/*******************************************************************************
+* mvBoardSwitchPortGet - Get the mapping between the board connector and the
+* Ethernet Switch port
+*
+* DESCRIPTION:
+*       This routine returns the matching Switch port.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*	boardPortNum - logical number of the connector on the board
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       the matching Switch port, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32	mvBoardSwitchPortGet(MV_U32 ethPortNum, MV_U8 boardPortNum)
+{
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardSwitchPortGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+	if (boardPortNum >= BOARD_ETH_SWITCH_PORT_NUM)
+	{
+		mvOsPrintf("mvBoardSwitchPortGet: Illegal board port number.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].qdPort[boardPortNum];
+}
+
+/*******************************************************************************
+* mvBoardSwitchCpuPortGet - Get the the Ethernet Switch CPU port
+*
+* DESCRIPTION:
+*       This routine returns the Switch CPU port.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       the Switch CPU port, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32	mvBoardSwitchCpuPortGet(MV_U32 ethPortNum)
+{
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardSwitchCpuPortGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].qdCpuPort;
+}
+
+/*******************************************************************************
+* mvBoardIsSwitchConnected - Get switch connection status
+* DESCRIPTION:
+*       This routine returns port's connection status
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       1 - if ethPortNum is connected to switch, 0 otherwise
+*
+*******************************************************************************/
+MV_32	mvBoardIsSwitchConnected(MV_U32 ethPortNum)
+{
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardIsSwitchConnected: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	if(ethPortNum >= BOARD_INFO(boardId)->numBoardMacInfo)
+	{
+		mvOsPrintf("mvBoardIsSwitchConnected: Illegal port number(%u)\n", ethPortNum);
+		return MV_ERROR;
+	}
+
+	if((MV_32)(BOARD_INFO(boardId)->pSwitchInfo))
+	return (MV_32)(BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].switchOnPort == ethPortNum);
+	else
+		return 0;
+}
+/*******************************************************************************
+* mvBoardSmiScanModeGet - Get Switch SMI scan mode
+*
+* DESCRIPTION:
+*       This routine returns Switch SMI scan mode.
+*
+* INPUT:
+*       ethPortNum - Ethernet port number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       1 for SMI_MANUAL_MODE, -1 if the port number is wrong or if not relevant.
+*
+*******************************************************************************/
+MV_32	mvBoardSmiScanModeGet(MV_U32 ethPortNum)
+{
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardSmiScanModeGet: Board unknown.\n");
+		return MV_ERROR;
+	}
+
+	return BOARD_INFO(boardId)->pSwitchInfo[ethPortNum].smiScanMode;
+}
+/*******************************************************************************
+* mvBoardSpecInitGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN: Return MV_TRUE and parameters in case board need spesific phy init,
+* 	  otherwise return MV_FALSE.
+*
+*
+*******************************************************************************/
+
+MV_BOOL mvBoardSpecInitGet(MV_U32* regOff, MV_U32* data)
+{
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvBoardTclkGet - Get the board Tclk (Controller clock)
+*
+* DESCRIPTION:
+*       This routine extract the controller core clock.
+*       This function uses the controller counters to make identification.
+*		Note: In order to avoid interference, make sure task context switch
+*		and interrupts will not occure during this function operation
+*
+* INPUT:
+*       countNum - Counter number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit clock cycles in Hertz.
+*
+*******************************************************************************/
+MV_U32 mvBoardTclkGet(MV_VOID)
+{
+    if(mvCtrlModelGet()==MV_6281_DEV_ID)
+    {
+#if defined(TCLK_AUTO_DETECT)
+	MV_U32 tmpTClkRate = MV_BOARD_TCLK_166MHZ;
+
+    tmpTClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+    tmpTClkRate &= MSAR_TCLCK_MASK;
+
+    switch (tmpTClkRate)
+    {
+    case MSAR_TCLCK_166:
+            return MV_BOARD_TCLK_166MHZ;
+            break;
+    case MSAR_TCLCK_200:
+            return MV_BOARD_TCLK_200MHZ;
+            break;
+    }
+#else
+    return MV_BOARD_TCLK_200MHZ;
+#endif
+    }
+
+        return MV_BOARD_TCLK_166MHZ;
+
+}
+/*******************************************************************************
+* mvBoardSysClkGet - Get the board SysClk (CPU bus clock)
+*
+* DESCRIPTION:
+*       This routine extract the CPU bus clock.
+*
+* INPUT:
+*       countNum - Counter number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit clock cycles in Hertz.
+*
+*******************************************************************************/
+static MV_U32  mvBoard6180SysClkGet(MV_VOID)
+{
+	MV_U32 	sysClkRate=0;
+	MV_CPU_ARM_CLK _cpu6180_ddr_l2_CLK[] = MV_CPU6180_DDR_L2_CLCK_TBL;
+
+	sysClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+	sysClkRate = sysClkRate & MSAR_CPUCLCK_MASK_6180;
+	sysClkRate = sysClkRate >> MSAR_CPUCLCK_OFFS_6180;
+
+	sysClkRate = _cpu6180_ddr_l2_CLK[sysClkRate].ddrClk;
+
+	return sysClkRate;
+
+}
+
+MV_U32  mvBoardSysClkGet(MV_VOID)
+{
+#ifdef SYSCLK_AUTO_DETECT
+	MV_U32 sysClkRate, tmp, pClkRate, indexDdrRtio;
+	MV_U32 cpuCLK[] = MV_CPU_CLCK_TBL;
+	MV_U32 ddrRtio[][2] = MV_DDR_CLCK_RTIO_TBL;
+
+	if(mvCtrlModelGet() == MV_6180_DEV_ID)
+		return mvBoard6180SysClkGet();
+
+	tmp = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+	pClkRate = MSAR_CPUCLCK_EXTRACT(tmp);
+	pClkRate = cpuCLK[pClkRate];
+
+	indexDdrRtio = tmp & MSAR_DDRCLCK_RTIO_MASK;
+	indexDdrRtio = indexDdrRtio >> MSAR_DDRCLCK_RTIO_OFFS;
+    if(ddrRtio[indexDdrRtio][0] != 0)
+        sysClkRate = ((pClkRate * ddrRtio[indexDdrRtio][1]) / ddrRtio[indexDdrRtio][0]);
+    else
+        sysClkRate = 0;
+	return sysClkRate;
+#else
+	return MV_BOARD_DEFAULT_SYSCLK;
+#endif
+}
+
+
+/*******************************************************************************
+* mvBoardPexBridgeIntPinGet - Get PEX to PCI bridge interrupt pin number
+*
+* DESCRIPTION:
+*		Multi-ported PCI Express bridges that is implemented on the board
+*		collapse interrupts across multiple conventional PCI/PCI-X buses.
+*		A dual-headed PCI Express bridge would map (or "swizzle") the
+*		interrupts per the following table (in accordance with the respective
+*		logical PCI/PCI-X bridge's Device Number), collapse the INTA#-INTD#
+*		signals from its two logical PCI/PCI-X bridges, collapse the
+*		INTA#-INTD# signals from any internal sources, and convert the
+*		signals to in-band PCI Express messages. 10
+*		This function returns the upstream interrupt as it was converted by
+*		the bridge, according to board configuration and the following table:
+*					  		PCI dev num
+*			Interrupt pin 	7, 	8, 	9
+*		   			A  ->	A	D	C
+*		   			B  -> 	B	A	D
+*		   			C  -> 	C	B	A
+*		  			D  ->	D	C	B
+*
+*
+* INPUT:
+*       devNum - PCI/PCIX device number.
+*       intPin - PCI Int pin
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Int pin connected to the Interrupt controller
+*
+*******************************************************************************/
+MV_U32 mvBoardPexBridgeIntPinGet(MV_U32 devNum, MV_U32 intPin)
+{
+	MV_U32 realIntPin = ((intPin + (3 - (devNum % 4))) %4 );
+
+	if (realIntPin == 0) return 4;
+		else return realIntPin;
+
+}
+
+/*******************************************************************************
+* mvBoardDebugLedNumGet - Get number of debug Leds
+*
+* DESCRIPTION:
+* INPUT:
+*       boardId
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_U32 mvBoardDebugLedNumGet(MV_U32 boardId)
+{
+	return BOARD_INFO(boardId)->activeLedsNumber;
+}
+
+/*******************************************************************************
+* mvBoardDebugLeg - Set the board debug Leds
+*
+* DESCRIPTION: turn on/off status leds.
+* 	       Note: assume MPP leds are part of group 0 only.
+*
+* INPUT:
+*       hexNum - Number to be displied in hex by Leds.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvBoardDebugLed(MV_U32 hexNum)
+{
+    MV_U32 val = 0,totalMask, currentBitMask = 1,i;
+    MV_U32 boardId= mvBoardIdGet();
+
+    if (BOARD_INFO(boardId)->pLedGppPin == NULL)
+	return;
+
+    totalMask = (1 << BOARD_INFO(boardId)->activeLedsNumber) -1;
+    hexNum &= totalMask;
+    totalMask = 0;
+
+    for (i = 0 ; i < BOARD_INFO(boardId)->activeLedsNumber ; i++)
+    {
+	if (hexNum & currentBitMask)
+	{
+	    val |= (1 << BOARD_INFO(boardId)->pLedGppPin[i]);
+	}
+
+	totalMask |= (1 << BOARD_INFO(boardId)->pLedGppPin[i]);
+
+	currentBitMask = (currentBitMask << 1);
+    }
+
+    if (BOARD_INFO(boardId)->ledsPolarity)
+    {
+	mvGppValueSet(0, totalMask, val);
+    }
+    else
+    {
+	mvGppValueSet(0, totalMask, ~val);
+    }
+}
+
+
+/*******************************************************************************
+* mvBoarGpioPinGet - mvBoarGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		class - MV_BOARD_GPP_CLASS enum.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoarGpioPinNumGet(MV_BOARD_GPP_CLASS class, MV_U32 index)
+{
+	MV_U32 boardId, i;
+	MV_U32 indexFound = 0;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardRTCGpioPinGet:Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+        for (i = 0; i < BOARD_INFO(boardId)->numBoardGppInfo; i++)
+		if (BOARD_INFO(boardId)->pBoardGppInfo[i].devClass == class) {
+			if (indexFound == index)
+				return (MV_U32)BOARD_INFO(boardId)->pBoardGppInfo[i].gppPinNum;
+			else
+				indexFound++;
+
+		}
+
+	return MV_ERROR;
+}
+
+
+/*******************************************************************************
+* mvBoardRTCGpioPinGet - mvBoardRTCGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardRTCGpioPinGet(MV_VOID)
+{
+	return mvBoarGpioPinNumGet(BOARD_GPP_RTC, 0);
+}
+
+
+/*******************************************************************************
+* mvBoardReset - mvBoardReset
+*
+* DESCRIPTION:
+*			Reset the board
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       None
+*
+*******************************************************************************/
+MV_VOID	mvBoardReset(MV_VOID)
+{
+	MV_32 resetPin;
+
+	/* Get gpp reset pin if define */
+	resetPin = mvBoardResetGpioPinGet();
+	if (resetPin != MV_ERROR)
+	{
+		MV_REG_BIT_RESET( GPP_DATA_OUT_REG(0) ,(1 << resetPin));
+		MV_REG_BIT_RESET( GPP_DATA_OUT_EN_REG(0) ,(1 << resetPin));
+
+	}
+	else
+	{
+	    /* No gpp reset pin was found, try to reset ussing
+	    system reset out */
+	    MV_REG_BIT_SET( CPU_RSTOUTN_MASK_REG , BIT2);
+	    MV_REG_BIT_SET( CPU_SYS_SOFT_RST_REG , BIT0);
+	}
+}
+
+/*******************************************************************************
+* mvBoardResetGpioPinGet - mvBoardResetGpioPinGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardResetGpioPinGet(MV_VOID)
+{
+	return mvBoarGpioPinNumGet(BOARD_GPP_RESET, 0);
+}
+/*******************************************************************************
+* mvBoardSDIOGpioPinGet - mvBoardSDIOGpioPinGet
+*
+* DESCRIPTION:
+*	used for hotswap detection
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32  mvBoardSDIOGpioPinGet(MV_VOID)
+{
+	return mvBoarGpioPinNumGet(BOARD_GPP_SDIO_DETECT, 0);
+}
+
+/*******************************************************************************
+* mvBoardUSBVbusGpioPinGet - return Vbus input GPP
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int  devNo.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardUSBVbusGpioPinGet(MV_32 devId)
+{
+	return mvBoarGpioPinNumGet(BOARD_GPP_USB_VBUS, devId);
+}
+
+/*******************************************************************************
+* mvBoardUSBVbusEnGpioPinGet - return Vbus Enable output GPP
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int  devNo.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       GPIO pin number. The function return -1 for bad parameters.
+*
+*******************************************************************************/
+MV_32 mvBoardUSBVbusEnGpioPinGet(MV_32 devId)
+{
+	return mvBoarGpioPinNumGet(BOARD_GPP_USB_VBUS_EN, devId);
+}
+
+
+/*******************************************************************************
+* mvBoardGpioIntMaskGet - Get GPIO mask for interrupt pins
+*
+* DESCRIPTION:
+*		This function returns a 32-bit mask of GPP pins that connected to
+*		interrupt generating sources on board.
+*		For example if UART channel A is hardwired to GPP pin 8 and
+*		UART channel B is hardwired to GPP pin 4 the fuinction will return
+*		the value 0x000000110
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		See description. The function return -1 if board is not identified.
+*
+*******************************************************************************/
+MV_32 mvBoardGpioIntMaskLowGet(MV_VOID)
+{
+	MV_U32 boardId;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardGpioIntMaskGet:Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+	return BOARD_INFO(boardId)->intsGppMaskLow;
+}
+MV_32 mvBoardGpioIntMaskHighGet(MV_VOID)
+{
+	MV_U32 boardId;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardGpioIntMaskGet:Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+	return BOARD_INFO(boardId)->intsGppMaskHigh;
+}
+
+
+/*******************************************************************************
+* mvBoardMppGet - Get board dependent MPP register value
+*
+* DESCRIPTION:
+*		MPP settings are derived from board design.
+*		MPP group consist of 8 MPPs. An MPP group represent MPP
+*		control register.
+*       This function retrieves board dependend MPP register value.
+*
+* INPUT:
+*       mppGroupNum - MPP group number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit value describing MPP control register value.
+*
+*******************************************************************************/
+MV_32 mvBoardMppGet(MV_U32 mppGroupNum)
+{
+	MV_U32 boardId;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+	return BOARD_INFO(boardId)->pBoardMppConfigValue[0].mppGroup[mppGroupNum];
+}
+
+
+/*******************************************************************************
+* mvBoardMppGroupId - If MPP group type is AUTO then identify it using twsi
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppGroupIdUpdate(MV_VOID)
+{
+
+	MV_BOARD_MPP_GROUP_CLASS devClass;
+	MV_BOARD_MODULE_ID_CLASS devClassId;
+	MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+	MV_U32 devId;
+	MV_U32 maxMppGrp = 1;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			maxMppGrp = MV_6281_MPP_MAX_MODULE;
+			break;
+		case MV_6192_DEV_ID:
+			maxMppGrp = MV_6192_MPP_MAX_MODULE;
+			break;
+        case MV_6190_DEV_ID:
+            maxMppGrp = MV_6190_MPP_MAX_MODULE;
+            break;
+		case MV_6180_DEV_ID:
+			maxMppGrp = MV_6180_MPP_MAX_MODULE;
+			break;
+	}
+
+	for (devClass = 0; devClass < maxMppGrp; devClass++)
+	{
+		/* If MPP group can be defined by the module connected to it */
+		if (mvBoardMppGroupTypeGet(devClass) == MV_BOARD_AUTO)
+		{
+			/* Get MPP module ID */
+			devClassId = mvBoarModuleTypeGet(devClass);
+			if (MV_ERROR != devClassId)
+			{
+				switch(devClassId)
+				{
+				case MV_BOARD_MODULE_TDM_ID:
+				case MV_BOARD_MODULE_TDM_5CHAN_ID:
+					mppGroupType = MV_BOARD_TDM;
+					break;
+				case MV_BOARD_MODULE_AUDIO_ID:
+					mppGroupType = MV_BOARD_AUDIO;
+					break;
+				case MV_BOARD_MODULE_RGMII_ID:
+					mppGroupType = MV_BOARD_RGMII;
+					break;
+				case MV_BOARD_MODULE_GMII_ID:
+					mppGroupType = MV_BOARD_GMII;
+					break;
+				case MV_BOARD_MODULE_TS_ID:
+					mppGroupType = MV_BOARD_TS;
+					break;
+				case MV_BOARD_MODULE_MII_ID:
+					mppGroupType = MV_BOARD_MII;
+					break;
+				default:
+					mppGroupType = MV_BOARD_OTHER;
+					break;
+				}
+			}
+			else
+				/* The module bay is empty */
+				mppGroupType = MV_BOARD_OTHER;
+
+			/* Update MPP group type */
+			mvBoardMppGroupTypeSet(devClass, mppGroupType);
+		}
+
+		/* Update MPP output voltage for RGMII 1.8V. Set port to GMII for GMII module */
+		if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_RGMII))
+			MV_REG_BIT_SET(MPP_OUTPUT_DRIVE_REG,MPP_1_8_RGMII1_OUTPUT_DRIVE | MPP_1_8_RGMII0_OUTPUT_DRIVE);
+		else
+		{
+			if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_GMII))
+			{
+				MV_REG_BIT_RESET(MPP_OUTPUT_DRIVE_REG, BIT7 | BIT15);
+				MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(0),BIT3);
+				MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(1),BIT3);
+			}
+			else if ((mvBoardMppGroupTypeGet(devClass) == MV_BOARD_MII))
+			{
+				/* Assumption that the MDC & MDIO should be 3.3V */
+				MV_REG_BIT_RESET(MPP_OUTPUT_DRIVE_REG, BIT7 | BIT15);
+				/* Assumption that only ETH1 can be MII when using modules on DB */
+				MV_REG_BIT_RESET(ETH_PORT_SERIAL_CTRL_1_REG(1),BIT3);
+			}
+		}
+	}
+}
+
+/*******************************************************************************
+* mvBoardMppGroupTypeGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       mppGroupClass - MPP group number 0  for MPP[35:20] or 1 for MPP[49:36].
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_BOARD_MPP_TYPE_CLASS mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass)
+{
+	MV_U32 boardId;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+		return MV_ERROR;
+
+	}
+
+	if (mppGroupClass == MV_BOARD_MPP_GROUP_1)
+		return BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup1;
+	else
+		return BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup2;
+}
+
+/*******************************************************************************
+* mvBoardMppGroupTypeSet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       mppGroupClass - MPP group number 0  for MPP[35:20] or 1 for MPP[49:36].
+*       mppGroupType - MPP group type for MPP[35:20] or for MPP[49:36].
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppGroupTypeSet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass,
+						MV_BOARD_MPP_TYPE_CLASS mppGroupType)
+{
+	MV_U32 boardId;
+
+	boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardMppGet:Board unknown.\n");
+	}
+
+	if (mppGroupClass == MV_BOARD_MPP_GROUP_1)
+		BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup1 = mppGroupType;
+	else
+		BOARD_INFO(boardId)->pBoardMppTypeValue[0].boardMppGroup2 = mppGroupType;
+
+}
+
+/*******************************************************************************
+* mvBoardMppMuxSet - Update MPP mux
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppMuxSet(MV_VOID)
+{
+
+	MV_BOARD_MPP_GROUP_CLASS devClass;
+	MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+	MV_U32 devId;
+	MV_U8 muxVal = 0xf;
+	MV_U32 maxMppGrp = 1;
+    MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			maxMppGrp = MV_6281_MPP_MAX_MODULE;
+			break;
+		case MV_6192_DEV_ID:
+			maxMppGrp = MV_6192_MPP_MAX_MODULE;
+			break;
+        case MV_6190_DEV_ID:
+            maxMppGrp = MV_6190_MPP_MAX_MODULE;
+            break;
+		case MV_6180_DEV_ID:
+			maxMppGrp = MV_6180_MPP_MAX_MODULE;
+			break;
+	}
+
+	for (devClass = 0; devClass < maxMppGrp; devClass++)
+	{
+		mppGroupType = mvBoardMppGroupTypeGet(devClass);
+
+		switch(mppGroupType)
+		{
+			case MV_BOARD_TDM:
+				muxVal &= ~(devClass ? (0x2 << (devClass * 2)):0x0);
+				break;
+			case MV_BOARD_AUDIO:
+				 muxVal &= ~(devClass ? 0x7 : 0x0); /*old Z0 value 0xd:0x0*/
+				break;
+			case MV_BOARD_TS:
+				 muxVal &= ~(devClass ? (0x2 << (devClass * 2)):0x0);
+				break;
+			default:
+				muxVal |= (devClass ? 0xf : 0);
+				break;
+		}
+	}
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: twsi exp set\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(MV_BOARD_MUX_I2C_ADDR_ENTRY);
+	twsiSlave.slaveAddr.type = mvBoardTwsiExpAddrTypeGet(MV_BOARD_MUX_I2C_ADDR_ENTRY);
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 2;
+	twsiSlave.moreThen256 = MV_FALSE;
+
+
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+		return;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	/* Change twsi exp to output */
+	twsiSlave.offset = 6;
+	muxVal = 0;
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+		return;
+	}
+	DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+
+}
+
+/*******************************************************************************
+* mvBoardTdmMppSet - set MPPs in TDM module
+*
+* DESCRIPTION:
+*
+* INPUT: type of second telephony device
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardTdmMppSet(MV_32 chType)
+{
+
+	MV_BOARD_MPP_GROUP_CLASS devClass;
+	MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+	MV_U32 devId;
+	MV_U8 muxVal = 1;
+	MV_U8 muxValMask = 1;
+	MV_U8 twsiVal;
+	MV_U32 maxMppGrp = 1;
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			maxMppGrp = MV_6281_MPP_MAX_MODULE;
+			break;
+		case MV_6192_DEV_ID:
+			maxMppGrp = MV_6192_MPP_MAX_MODULE;
+			break;
+        case MV_6190_DEV_ID:
+            maxMppGrp = MV_6190_MPP_MAX_MODULE;
+            break;
+		case MV_6180_DEV_ID:
+			maxMppGrp = MV_6180_MPP_MAX_MODULE;
+			break;
+	}
+
+	for (devClass = 0; devClass < maxMppGrp; devClass++)
+	{
+		mppGroupType = mvBoardMppGroupTypeGet(devClass);
+		if(mppGroupType == MV_BOARD_TDM)
+			break;
+	}
+
+	if(devClass == maxMppGrp)
+		return;		/* TDM module not found */
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: twsi exp set\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(devClass);
+	twsiSlave.slaveAddr.type = ADDR7_BIT;
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 3;
+	twsiSlave.moreThen256 = MV_FALSE;
+
+	if(mvBoardIdGet() == RD_88F6281A_ID)
+	{
+		muxVal = 0xc;
+		muxValMask = 0xf3;
+	}
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        muxVal = (twsiVal & muxValMask) | muxVal;
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp out val fail\n");
+		return;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	/* Change twsi exp to output */
+	twsiSlave.offset = 7;
+	muxVal = 0xfe;
+	if(mvBoardIdGet() == RD_88F6281A_ID)
+		muxVal = 0xf3;
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+	muxVal = (twsiVal & muxVal);
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp change to out fail\n");
+		return;
+	}
+	DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+	/* reset the line to 0 */
+	twsiSlave.offset = 3;
+	muxVal = 0;
+	muxValMask = 1;
+
+	if(mvBoardIdGet() == RD_88F6281A_ID) {
+		muxVal = 0x0;
+		muxValMask = 0xf3;
+	}
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        muxVal = (twsiVal & muxValMask) | muxVal;
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp out val fail\n");
+		return;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	mvOsDelay(20);
+
+	/* set the line to 1 */
+	twsiSlave.offset = 3;
+	muxVal = 1;
+	muxValMask = 1;
+
+	if(mvBoardIdGet() == RD_88F6281A_ID)
+	{
+		muxVal = 0xc;
+		muxValMask = 0xf3;
+		if(chType) /* FXS - issue reset properly */
+		{
+			MV_REG_BIT_SET(GPP_DATA_OUT_REG(1), MV_GPP12);
+			mvOsDelay(50);
+			MV_REG_BIT_RESET(GPP_DATA_OUT_REG(1), MV_GPP12);
+		}
+		else /* FXO - issue reset via TDM_CODEC_RST*/
+		{
+		   /* change MPP44 type to TDM_CODEC_RST(0x2) */
+		   MV_REG_WRITE(MPP_CONTROL_REG5, ((MV_REG_READ(MPP_CONTROL_REG5) & 0xFFF0FFFF)  | BIT17));
+		}
+	}
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        muxVal = (twsiVal & muxValMask) | muxVal;
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp out val fail\n");
+		return;
+	}
+
+	/* TBD - 5 channels */
+#if defined(MV_TDM_5CHANNELS)
+	/* change MPP38 type to GPIO(0x0) & polarity for TDM_STROBE */
+	MV_REG_WRITE(MPP_CONTROL_REG4, (MV_REG_READ(MPP_CONTROL_REG4) & 0xF0FFFFFF));
+	mvGppPolaritySet(1, MV_GPP6, 0);
+
+	twsiSlave.offset = 6;
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(2);
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+	muxVal = (twsiVal & ~BIT2);
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp change to out fail\n");
+		return;
+	}
+
+
+	twsiSlave.offset = 2;
+
+	mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+	muxVal = (twsiVal & ~BIT2);
+
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &muxVal, 1) )
+	{
+		mvOsPrintf("Board: twsi exp change to out fail\n");
+		return;
+	}
+#endif
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+
+}
+/*******************************************************************************
+* mvBoardVoiceConnModeGet - return SLIC/DAA connection & interrupt modes
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_VOID mvBoardVoiceConnModeGet(MV_32* connMode, MV_32* irqMode)
+{
+	switch(mvBoardIdGet())
+	{
+		case RD_88F6281A_ID:
+			*connMode = DAISY_CHAIN_MODE;
+			*irqMode = INTERRUPT_TO_TDM;
+			break;
+		case DB_88F6281A_BP_ID:
+			 *connMode = DUAL_CHIP_SELECT_MODE;
+			 *irqMode = INTERRUPT_TO_TDM;
+			break;
+		case RD_88F6192A_ID:
+			*connMode = DUAL_CHIP_SELECT_MODE;
+			*irqMode = INTERRUPT_TO_TDM;
+			break;
+		case DB_88F6192A_BP_ID:
+			 *connMode = DUAL_CHIP_SELECT_MODE;
+			 *irqMode = INTERRUPT_TO_TDM;
+			break;
+		default:
+			*connMode = *irqMode = -1;
+			mvOsPrintf("mvBoardVoiceAssembleModeGet: TDM not supported(boardId=0x%x)\n",mvBoardIdGet());
+	}
+		return;
+
+}
+
+/*******************************************************************************
+* mvBoardMppModuleTypePrint - print module detect
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvBoardMppModuleTypePrint(MV_VOID)
+{
+
+	MV_BOARD_MPP_GROUP_CLASS devClass;
+	MV_BOARD_MPP_TYPE_CLASS mppGroupType;
+	MV_U32 devId;
+	MV_U32 maxMppGrp = 1;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			maxMppGrp = MV_6281_MPP_MAX_MODULE;
+			break;
+		case MV_6192_DEV_ID:
+			maxMppGrp = MV_6192_MPP_MAX_MODULE;
+			break;
+        case MV_6190_DEV_ID:
+            maxMppGrp = MV_6190_MPP_MAX_MODULE;
+            break;
+		case MV_6180_DEV_ID:
+			maxMppGrp = MV_6180_MPP_MAX_MODULE;
+			break;
+	}
+
+	for (devClass = 0; devClass < maxMppGrp; devClass++)
+	{
+		mppGroupType = mvBoardMppGroupTypeGet(devClass);
+
+		switch(mppGroupType)
+		{
+			case MV_BOARD_TDM:
+                if(devId != MV_6190_DEV_ID)
+                    mvOsPrintf("Module %d is TDM\n", devClass);
+				break;
+			case MV_BOARD_AUDIO:
+                if(devId != MV_6190_DEV_ID)
+                    mvOsPrintf("Module %d is AUDIO\n", devClass);
+				break;
+            case MV_BOARD_RGMII:
+                if(devId != MV_6190_DEV_ID)
+                    mvOsPrintf("Module %d is RGMII\n", devClass);
+				break;
+			case MV_BOARD_GMII:
+                if(devId != MV_6190_DEV_ID)
+                    mvOsPrintf("Module %d is GMII\n", devClass);
+				break;
+			case MV_BOARD_TS:
+                if(devId != MV_6190_DEV_ID)
+                    mvOsPrintf("Module %d is TS\n", devClass);
+				break;
+			default:
+				break;
+		}
+	}
+}
+
+/* Board devices API managments */
+
+/*******************************************************************************
+* mvBoardGetDeviceNumber - Get number of device of some type on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		number of those devices else the function returns 0
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDevicesNumber(MV_BOARD_DEV_CLASS devClass)
+{
+	MV_U32	foundIndex=0,devNum;
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("mvBoardGetDeviceNumber:Board unknown.\n");
+		return 0xFFFFFFFF;
+
+	}
+
+	for (devNum = START_DEV_CS; devNum < BOARD_INFO(boardId)->numBoardDeviceIf; devNum++)
+	{
+		if (BOARD_INFO(boardId)->pDevCsInfo[devNum].devClass == devClass)
+		{
+			foundIndex++;
+		}
+	}
+
+    return foundIndex;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceBaseAddr - Get base address of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       devIndex - The device sequential number on the board
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		Base address else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceBaseAddr(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_DEV_CS_INFO* devEntry;
+	devEntry = boardGetDevEntry(devNum,devClass);
+	if (devEntry != NULL)
+	{
+		return mvCpuIfTargetWinBaseLowGet(DEV_TO_TARGET(devEntry->deviceCS));
+
+	}
+
+	return 0xFFFFFFFF;
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceBusWidth - Get Bus width of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       devIndex - The device sequential number on the board
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		Bus width else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceBusWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_DEV_CS_INFO* devEntry;
+
+	devEntry = boardGetDevEntry(devNum,devClass);
+	if (devEntry != NULL)
+	{
+		return 8;
+	}
+
+	return 0xFFFFFFFF;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceWidth - Get dev width of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       devIndex - The device sequential number on the board
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		dev width else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_DEV_CS_INFO* devEntry;
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("Board unknown.\n");
+		return 0xFFFFFFFF;
+	}
+
+	devEntry = boardGetDevEntry(devNum,devClass);
+	if (devEntry != NULL)
+		return devEntry->devWidth;
+
+	return MV_ERROR;
+
+}
+
+/*******************************************************************************
+* mvBoardGetDeviceWinSize - Get the window size of a device existing on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       devIndex - The device sequential number on the board
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		window size else the function returns 0xffffffff
+*
+*
+*******************************************************************************/
+MV_32 mvBoardGetDeviceWinSize(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_DEV_CS_INFO* devEntry;
+	MV_U32 boardId = mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("Board unknown.\n");
+		return 0xFFFFFFFF;
+	}
+
+	devEntry = boardGetDevEntry(devNum,devClass);
+	if (devEntry != NULL)
+	{
+		return mvCpuIfTargetWinSizeGet(DEV_TO_TARGET(devEntry->deviceCS));
+	}
+
+	return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* boardGetDevEntry - returns the entry pointer of a device on the board
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       devIndex - The device sequential number on the board
+*		devType - The device type ( Flash,RTC , etc .. )
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       If the device is found on the board the then the functions returns the
+*		dev number else the function returns 0x0
+*
+*
+*******************************************************************************/
+static MV_DEV_CS_INFO*  boardGetDevEntry(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_U32	foundIndex=0,devIndex;
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("boardGetDevEntry: Board unknown.\n");
+		return NULL;
+
+	}
+
+	for (devIndex = START_DEV_CS; devIndex < BOARD_INFO(boardId)->numBoardDeviceIf; devIndex++)
+	{
+		/* TBR */
+		/*if (BOARD_INFO(boardId)->pDevCsInfo[devIndex].deviceCS == MV_BOOTDEVICE_INDEX)
+		     continue;*/
+
+		if (BOARD_INFO(boardId)->pDevCsInfo[devIndex].devClass == devClass)
+		{
+			if (foundIndex == devNum)
+			{
+				return &(BOARD_INFO(boardId)->pDevCsInfo[devIndex]);
+			}
+			foundIndex++;
+		}
+	}
+
+	/* device not found */
+	return NULL;
+}
+
+/* Get device CS number */
+
+MV_U32 boardGetDevCSNum(MV_32 devNum, MV_BOARD_DEV_CLASS devClass)
+{
+	MV_DEV_CS_INFO* devEntry;
+	MV_U32 boardId= mvBoardIdGet();
+
+	if (!((boardId >= BOARD_ID_BASE)&&(boardId < MV_MAX_BOARD_ID)))
+	{
+		mvOsPrintf("Board unknown.\n");
+		return 0xFFFFFFFF;
+
+	}
+
+
+	devEntry = boardGetDevEntry(devNum,devClass);
+	if (devEntry != NULL)
+		return devEntry->deviceCS;
+
+	return 0xFFFFFFFF;
+
+}
+
+/*******************************************************************************
+* mvBoardRtcTwsiAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardRtcTwsiAddrTypeGet()
+{
+	int i;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_RTC)
+			return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+	return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardRtcTwsiAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardRtcTwsiAddrGet()
+{
+	int i;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_RTC)
+			return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+	return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardA2DTwsiAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardA2DTwsiAddrTypeGet()
+{
+	int i;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_AUDIO_DEC)
+			return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+	return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardA2DTwsiAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardA2DTwsiAddrGet()
+{
+	int i;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_TWSI_AUDIO_DEC)
+			return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+	return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardTwsiExpAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiExpAddrTypeGet(MV_U32 index)
+{
+	int i;
+	MV_U32 indexFound = 0;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_EXP)
+		{
+			if (indexFound == index)
+				return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+			else
+				indexFound++;
+		}
+
+	return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardTwsiExpAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiExpAddrGet(MV_U32 index)
+{
+	int i;
+	MV_U32 indexFound = 0;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_EXP)
+		{
+			if (indexFound == index)
+				return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+			else
+				indexFound++;
+		}
+
+	return (0xFF);
+}
+
+
+/*******************************************************************************
+* mvBoardTwsiSatRAddrTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiSatRAddrTypeGet(MV_U32 index)
+{
+	int i;
+	MV_U32 indexFound = 0;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_SATR)
+		{
+			if (indexFound == index)
+				return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddrType;
+			else
+				indexFound++;
+		}
+
+	return (MV_ERROR);
+}
+
+/*******************************************************************************
+* mvBoardTwsiSatRAddrGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_U8 mvBoardTwsiSatRAddrGet(MV_U32 index)
+{
+	int i;
+	MV_U32 indexFound = 0;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (i = 0; i < BOARD_INFO(boardId)->numBoardTwsiDev; i++)
+		if (BOARD_INFO(boardId)->pBoardTwsiDev[i].devClass == BOARD_DEV_TWSI_SATR)
+		{
+			if (indexFound == index)
+				return BOARD_INFO(boardId)->pBoardTwsiDev[i].twsiDevAddr;
+			else
+				indexFound++;
+		}
+
+	return (0xFF);
+}
+
+/*******************************************************************************
+* mvBoardNandWidthGet -
+*
+* DESCRIPTION: Get the width of the first NAND device in byte.
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN: 1, 2, 4 or MV_ERROR
+*
+*
+*******************************************************************************/
+/*  */
+MV_32 mvBoardNandWidthGet(void)
+{
+	MV_U32 devNum;
+	MV_U32 devWidth;
+	MV_U32 boardId= mvBoardIdGet();
+
+	for (devNum = START_DEV_CS; devNum < BOARD_INFO(boardId)->numBoardDeviceIf; devNum++)
+	{
+		devWidth = mvBoardGetDeviceWidth(devNum, BOARD_DEV_NAND_FLASH);
+		if (devWidth != MV_ERROR)
+			return (devWidth / 8);
+	}
+
+	/* NAND wasn't found */
+	return MV_ERROR;
+}
+
+MV_U32 gBoardId = -1;
+
+/*******************************************************************************
+* mvBoardIdGet - Get Board model
+*
+* DESCRIPTION:
+*       This function returns board ID.
+*       Board ID is 32bit word constructed of board model (16bit) and
+*       board revision (16bit) in the following way: 0xMMMMRRRR.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit board ID number, '-1' if board is undefined.
+*
+*******************************************************************************/
+MV_U32 mvBoardIdGet(MV_VOID)
+{
+	MV_U32 tmpBoardId = -1;
+
+	if(gBoardId == -1)
+        {
+		#if defined(DB_88F6281A)
+		tmpBoardId = DB_88F6281A_BP_ID;
+		#elif defined(RD_88F6281A)
+		tmpBoardId = RD_88F6281A_ID;
+		#elif defined(DB_88F6192A)
+		tmpBoardId = DB_88F6192A_BP_ID;
+		#elif defined(DB_88F6190A)
+		tmpBoardId = DB_88F6190A_BP_ID;
+		#elif defined(RD_88F6192A)
+		tmpBoardId = RD_88F6192A_ID;
+		#elif defined(RD_88F6190A)
+		tmpBoardId = RD_88F6190A_ID;
+		#elif defined(DB_88F6180A)
+		tmpBoardId = DB_88F6180A_BP_ID;
+		#elif defined(RD_88F6281A_PCAC)
+		tmpBoardId = RD_88F6281A_PCAC_ID;
+		#elif defined(RD_88F6281A_SHEEVA_PLUG)
+		tmpBoardId = SHEEVA_PLUG_ID;
+		#elif defined(DB_CUSTOMER)
+		tmpBoardId = DB_CUSTOMER_ID;
+		#endif
+		gBoardId = tmpBoardId;
+	}
+
+	return gBoardId;
+}
+
+
+/*******************************************************************************
+* mvBoarModuleTypeGet - mvBoarModuleTypeGet
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		group num - MV_BOARD_MPP_GROUP_CLASS enum
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		module num - MV_BOARD_MODULE_CLASS enum
+*
+*******************************************************************************/
+MV_BOARD_MODULE_ID_CLASS mvBoarModuleTypeGet(MV_BOARD_MPP_GROUP_CLASS devClass)
+{
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+	MV_U8 data;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: Read MPP module ID\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(devClass);
+	twsiSlave.slaveAddr.type = mvBoardTwsiExpAddrTypeGet(devClass);
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 0;
+	twsiSlave.moreThen256 = MV_FALSE;
+
+
+
+	if( MV_OK != mvTwsiRead (0, &twsiSlave, &data, 1) )
+	{
+		DB(mvOsPrintf("Board: Read MPP module ID fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: Read MPP module ID succeded\n"));
+
+	return data;
+}
+
+/*******************************************************************************
+* mvBoarTwsiSatRGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		device num - one of three devices
+*		reg num - 0 or 1
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		reg value
+*
+*******************************************************************************/
+MV_U8 mvBoarTwsiSatRGet(MV_U8 devNum, MV_U8 regNum)
+{
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+	MV_U8 data;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: Read S@R device read\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiSatRAddrGet(devNum);
+	twsiSlave.slaveAddr.type = mvBoardTwsiSatRAddrTypeGet(devNum);
+	twsiSlave.validOffset = MV_TRUE;
+	/* Use offset as command */
+	twsiSlave.offset = regNum;
+	twsiSlave.moreThen256 = MV_FALSE;
+
+	if( MV_OK != mvTwsiRead (0, &twsiSlave, &data, 1) )
+	{
+		DB(mvOsPrintf("Board: Read S@R fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: Read S@R succeded\n"));
+
+	return data;
+}
+
+/*******************************************************************************
+* mvBoarTwsiSatRSet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		devNum - one of three devices
+*		regNum - 0 or 1
+*		regVal - value
+*
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		reg value
+*
+*******************************************************************************/
+MV_STATUS mvBoarTwsiSatRSet(MV_U8 devNum, MV_U8 regNum, MV_U8 regVal)
+{
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	twsiSlave.slaveAddr.address = mvBoardTwsiSatRAddrGet(devNum);
+	twsiSlave.slaveAddr.type = mvBoardTwsiSatRAddrTypeGet(devNum);
+	twsiSlave.validOffset = MV_TRUE;
+	DB(mvOsPrintf("Board: Write S@R device addr %x, type %x, data %x\n", twsiSlave.slaveAddr.address,\
+								twsiSlave.slaveAddr.type, regVal));
+	/* Use offset as command */
+	twsiSlave.offset = regNum;
+	twsiSlave.moreThen256 = MV_FALSE;
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &regVal, 1) )
+	{
+		DB(mvOsPrintf("Board: Write S@R fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: Write S@R succeded\n"));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardSlicGpioPinGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*
+*******************************************************************************/
+MV_32 mvBoardSlicGpioPinGet(MV_U32 slicNum)
+{
+	MV_U32 boardId;
+	boardId = mvBoardIdGet();
+
+	switch (boardId)
+	{
+	case DB_88F6281A_BP_ID:
+	case RD_88F6281A_ID:
+	default:
+		return MV_ERROR;
+		break;
+
+	}
+}
+
+/*******************************************************************************
+* mvBoardFanPowerControl - Turn on/off the fan power control on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+*        mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+*       MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardFanPowerControl(MV_BOOL mode)
+{
+
+	MV_U8 val = 1, twsiVal;
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	if(mvBoardIdGet() != RD_88F6281A_ID)
+        return MV_ERROR;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: twsi exp set\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(1);
+	twsiSlave.slaveAddr.type = ADDR7_BIT;
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 3;
+	twsiSlave.moreThen256 = MV_FALSE;
+        if(mode == MV_TRUE)
+            val = 0x1;
+        else
+            val = 0;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xfe) | val;
+
+        if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	/* Change twsi exp to output */
+	twsiSlave.offset = 7;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xfe);
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+        return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardHDDPowerControl - Turn on/off the HDD power control on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+*        mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+*       MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardHDDPowerControl(MV_BOOL mode)
+{
+
+	MV_U8 val = 1, twsiVal;
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	if(mvBoardIdGet() != RD_88F6281A_ID)
+        return MV_ERROR;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: twsi exp set\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(1);
+	twsiSlave.slaveAddr.type = ADDR7_BIT;
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 3;
+	twsiSlave.moreThen256 = MV_FALSE;
+        if(mode == MV_TRUE)
+            val = 0x2;
+        else
+            val = 0;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xfd) | val;
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	/* Change twsi exp to output */
+	twsiSlave.offset = 7;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xfd);
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+        return MV_OK;
+}
+
+/*******************************************************************************
+* mvBoardSDioWPControl - Turn on/off the SDIO WP on the RD-6281A
+*
+* DESCRIPTION:
+*
+* INPUT:
+*        mode - MV_TRUE = on ; MV_FALSE = off
+*
+* OUTPUT:
+*       MV_STATUS - MV_OK , MV_ERROR.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvBoardSDioWPControl(MV_BOOL mode)
+{
+
+	MV_U8 val = 1, twsiVal;
+	MV_TWSI_SLAVE twsiSlave;
+	MV_TWSI_ADDR slave;
+
+	if(mvBoardIdGet() != RD_88F6281A_ID)
+        return MV_ERROR;
+
+	/* TWSI init */
+	slave.type = ADDR7_BIT;
+	slave.address = 0;
+	mvTwsiInit(0, TWSI_SPEED, mvBoardTclkGet(), &slave, 0);
+
+	/* Read MPP module ID */
+	DB(mvOsPrintf("Board: twsi exp set\n"));
+	twsiSlave.slaveAddr.address = mvBoardTwsiExpAddrGet(0);
+	twsiSlave.slaveAddr.type = ADDR7_BIT;
+	twsiSlave.validOffset = MV_TRUE;
+	/* Offset is the first command after the address which indicate the register number to be read
+	   in next operation */
+	twsiSlave.offset = 3;
+	twsiSlave.moreThen256 = MV_FALSE;
+        if(mode == MV_TRUE)
+            val = 0x10;
+        else
+            val = 0;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xef) | val;
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp out val fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp out val succeded\n"));
+
+	/* Change twsi exp to output */
+	twsiSlave.offset = 7;
+        mvTwsiRead(0, &twsiSlave, &twsiVal, 1);
+        val = (twsiVal & 0xef);
+	if( MV_OK != mvTwsiWrite (0, &twsiSlave, &val, 1) )
+	{
+		DB(mvOsPrintf("Board: twsi exp change to out fail\n"));
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Board: twsi exp change to out succeded\n"));
+        return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h
new file mode 100644
index 000000000000..259aa59d6e5b
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvLib.h
@@ -0,0 +1,376 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCmvBoardEnvLibh
+#define __INCmvBoardEnvLibh
+
+/* defines */
+/* The below constant macros defines the board I2C EEPROM data offsets */
+
+
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "mvSysHwConfig.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+
+
+/* DUART stuff for Tclk detection only */
+#define DUART_BAUD_RATE			115200
+#define MAX_CLOCK_MARGINE		5000000	/* Maximum detected clock margine */
+
+/* Voice devices assembly modes */
+#define DAISY_CHAIN_MODE	1
+#define DUAL_CHIP_SELECT_MODE   0
+#define INTERRUPT_TO_MPP        1
+#define INTERRUPT_TO_TDM	0
+
+
+#define BOARD_ETH_PORT_NUM  MV_ETH_MAX_PORTS
+#define BOARD_ETH_SWITCH_PORT_NUM	5
+
+#define	MV_BOARD_MAX_USB_IF		1
+#define MV_BOARD_MAX_MPP		7
+#define MV_BOARD_NAME_LEN  		0x20
+
+typedef struct _boardData
+{
+   MV_U32 magic;
+   MV_U16 boardId;
+   MV_U8 boardVer;
+   MV_U8 boardRev;
+   MV_U32 reserved1;
+   MV_U32 reserved2;
+
+}BOARD_DATA;
+
+typedef enum _devBoardMppGroupClass
+{
+	MV_BOARD_MPP_GROUP_1,
+	MV_BOARD_MPP_GROUP_2,
+	MV_BOARD_MAX_MPP_GROUP
+}MV_BOARD_MPP_GROUP_CLASS;
+
+typedef enum _devBoardMppTypeClass
+{
+	MV_BOARD_AUTO,
+	MV_BOARD_TDM,
+	MV_BOARD_AUDIO,
+	MV_BOARD_RGMII,
+	MV_BOARD_GMII,
+	MV_BOARD_TS,
+	MV_BOARD_MII,
+	MV_BOARD_OTHER
+}MV_BOARD_MPP_TYPE_CLASS;
+
+typedef enum _devBoardModuleIdClass
+{
+	MV_BOARD_MODULE_TDM_ID = 1,
+	MV_BOARD_MODULE_AUDIO_ID,
+	MV_BOARD_MODULE_RGMII_ID,
+	MV_BOARD_MODULE_GMII_ID,
+	MV_BOARD_MODULE_TS_ID,
+	MV_BOARD_MODULE_MII_ID,
+	MV_BOARD_MODULE_TDM_5CHAN_ID,
+	MV_BOARD_MODULE_OTHER_ID
+}MV_BOARD_MODULE_ID_CLASS;
+
+typedef struct _boardMppTypeInfo
+{
+	MV_BOARD_MPP_TYPE_CLASS	boardMppGroup1;
+	MV_BOARD_MPP_TYPE_CLASS	boardMppGroup2;
+
+}MV_BOARD_MPP_TYPE_INFO;
+
+
+typedef enum _devBoardClass
+{
+	BOARD_DEV_NOR_FLASH,
+	BOARD_DEV_NAND_FLASH,
+	BOARD_DEV_SEVEN_SEG,
+	BOARD_DEV_FPGA,
+	BOARD_DEV_SRAM,
+	BOARD_DEV_SPI_FLASH,
+	BOARD_DEV_OTHER,
+}MV_BOARD_DEV_CLASS;
+
+typedef enum _devTwsiBoardClass
+{
+	BOARD_TWSI_RTC,
+	BOARD_DEV_TWSI_EXP,
+	BOARD_DEV_TWSI_SATR,
+	BOARD_TWSI_AUDIO_DEC,
+	BOARD_TWSI_OTHER
+}MV_BOARD_TWSI_CLASS;
+
+typedef enum _devGppBoardClass
+{
+	BOARD_GPP_RTC,
+	BOARD_GPP_MV_SWITCH,
+	BOARD_GPP_USB_VBUS,
+	BOARD_GPP_USB_VBUS_EN,
+	BOARD_GPP_USB_OC,
+	BOARD_GPP_USB_HOST_DEVICE,
+	BOARD_GPP_REF_CLCK,
+	BOARD_GPP_VOIP_SLIC,
+	BOARD_GPP_LIFELINE,
+	BOARD_GPP_BUTTON,
+	BOARD_GPP_TS_BUTTON_C,
+	BOARD_GPP_TS_BUTTON_U,
+	BOARD_GPP_TS_BUTTON_D,
+	BOARD_GPP_TS_BUTTON_L,
+	BOARD_GPP_TS_BUTTON_R,
+	BOARD_GPP_POWER_BUTTON,
+	BOARD_GPP_RESTOR_BUTTON,
+	BOARD_GPP_WPS_BUTTON,
+	BOARD_GPP_HDD0_POWER,
+	BOARD_GPP_HDD1_POWER,
+	BOARD_GPP_FAN_POWER,
+	BOARD_GPP_RESET,
+	BOARD_GPP_POWER_ON_LED,
+	BOARD_GPP_HDD_POWER,
+    BOARD_GPP_SDIO_POWER,
+    BOARD_GPP_SDIO_DETECT,
+    BOARD_GPP_SDIO_WP,
+	BOARD_GPP_SWITCH_PHY_INT,
+	BOARD_GPP_TSU_DIRCTION,
+	BOARD_GPP_OTHER
+}MV_BOARD_GPP_CLASS;
+
+
+typedef struct _devCsInfo
+{
+    MV_U8		deviceCS;
+    MV_U32		params;
+    MV_U32		devClass;	/* MV_BOARD_DEV_CLASS */
+    MV_U8		devWidth;
+
+}MV_DEV_CS_INFO;
+
+
+#define MV_BOARD_PHY_FORCE_10MB		0x0
+#define MV_BOARD_PHY_FORCE_100MB	0x1
+#define MV_BOARD_PHY_FORCE_1000MB	0x2
+#define MV_BOARD_PHY_SPEED_AUTO		0x3
+
+typedef struct _boardSwitchInfo
+{
+	MV_32	linkStatusIrq;
+	MV_32	qdPort[BOARD_ETH_SWITCH_PORT_NUM];
+	MV_32	qdCpuPort;
+	MV_32	smiScanMode; /* 1 for SMI_MANUAL_MODE, 0 otherwise */
+	MV_32	switchOnPort;
+
+}MV_BOARD_SWITCH_INFO;
+
+typedef struct _boardLedInfo
+{
+	MV_U8	activeLedsNumber;
+	MV_U8	ledsPolarity;	/* '0' or '1' to turn on led */
+	MV_U8*	gppPinNum; 	/* Pointer to GPP values */
+
+}MV_BOARD_LED_INFO;
+
+typedef struct _boardGppInfo
+{
+	MV_BOARD_GPP_CLASS	devClass;
+	MV_U8	gppPinNum;
+
+}MV_BOARD_GPP_INFO;
+
+
+typedef struct _boardTwsiInfo
+{
+	MV_BOARD_TWSI_CLASS	devClass;
+	MV_U8	twsiDevAddr;
+	MV_U8	twsiDevAddrType;
+
+}MV_BOARD_TWSI_INFO;
+
+
+typedef enum _boardMacSpeed
+{
+	BOARD_MAC_SPEED_10M,
+	BOARD_MAC_SPEED_100M,
+	BOARD_MAC_SPEED_1000M,
+	BOARD_MAC_SPEED_AUTO,
+
+}MV_BOARD_MAC_SPEED;
+
+typedef struct _boardMacInfo
+{
+	MV_BOARD_MAC_SPEED	boardMacSpeed;
+	MV_U8	boardEthSmiAddr;
+
+}MV_BOARD_MAC_INFO;
+
+typedef struct _boardMppInfo
+{
+	MV_U32		mppGroup[MV_BOARD_MAX_MPP];
+
+}MV_BOARD_MPP_INFO;
+
+typedef struct _boardInfo
+{
+	char 			   	boardName[MV_BOARD_NAME_LEN];
+	MV_U8				numBoardMppTypeValue;
+	MV_BOARD_MPP_TYPE_INFO*		pBoardMppTypeValue;
+	MV_U8				numBoardMppConfigValue;
+	MV_BOARD_MPP_INFO*		pBoardMppConfigValue;
+	MV_U32				intsGppMaskLow;
+	MV_U32				intsGppMaskHigh;
+	MV_U8				numBoardDeviceIf;
+	MV_DEV_CS_INFO*			pDevCsInfo;
+	MV_U8				numBoardTwsiDev;
+	MV_BOARD_TWSI_INFO*		pBoardTwsiDev;
+	MV_U8				numBoardMacInfo;
+	MV_BOARD_MAC_INFO*		pBoardMacInfo;
+	MV_U8				numBoardGppInfo;
+	MV_BOARD_GPP_INFO*		pBoardGppInfo;
+	MV_U8				activeLedsNumber;
+	MV_U8*				pLedGppPin;
+	MV_U8				ledsPolarity;	/* '0' or '1' to turn on led */
+	/* GPP values */
+	MV_U32				gppOutEnValLow;
+	MV_U32				gppOutEnValHigh;
+	MV_U32				gppOutValLow;
+	MV_U32				gppOutValHigh;
+	MV_U32				gppPolarityValLow;
+	MV_U32				gppPolarityValHigh;
+
+	/* Switch Configuration */
+	MV_BOARD_SWITCH_INFO*		pSwitchInfo;
+}MV_BOARD_INFO;
+
+
+
+MV_VOID 	mvBoardEnvInit(MV_VOID);
+MV_U32      	mvBoardIdGet(MV_VOID);
+MV_U16      	mvBoardModelGet(MV_VOID);
+MV_U16      	mvBoardRevGet(MV_VOID);
+MV_STATUS	mvBoardNameGet(char *pNameBuff);
+MV_32      	mvBoardPhyAddrGet(MV_U32 ethPortNum);
+MV_BOARD_MAC_SPEED      mvBoardMacSpeedGet(MV_U32 ethPortNum);
+MV_32		mvBoardLinkStatusIrqGet(MV_U32 ethPortNum);
+MV_32		mvBoardSwitchPortGet(MV_U32 ethPortNum, MV_U8 boardPortNum);
+MV_32		mvBoardSwitchCpuPortGet(MV_U32 ethPortNum);
+MV_32		mvBoardIsSwitchConnected(MV_U32 ethPortNum);
+MV_32		mvBoardSmiScanModeGet(MV_U32 ethPortNum);
+MV_BOOL     	mvBoardIsPortInSgmii(MV_U32 ethPortNum);
+MV_BOOL 	mvBoardIsPortInGmii(MV_VOID);
+MV_U32 		mvBoardTclkGet(MV_VOID);
+MV_U32      	mvBoardSysClkGet(MV_VOID);
+MV_U32 		mvBoardDebugLedNumGet(MV_U32 boardId);
+MV_VOID     	mvBoardDebugLed(MV_U32 hexNum);
+MV_32      	mvBoardMppGet(MV_U32 mppGroupNum);
+
+MV_U8		mvBoardRtcTwsiAddrTypeGet(MV_VOID);
+MV_U8		mvBoardRtcTwsiAddrGet(MV_VOID);
+
+MV_U8		mvBoardA2DTwsiAddrTypeGet(MV_VOID);
+MV_U8		mvBoardA2DTwsiAddrGet(MV_VOID);
+
+MV_U8 		mvBoardTwsiExpAddrGet(MV_U32 index);
+MV_U8 		mvBoardTwsiSatRAddrTypeGet(MV_U32 index);
+MV_U8 		mvBoardTwsiSatRAddrGet(MV_U32 index);
+MV_U8 		mvBoardTwsiExpAddrTypeGet(MV_U32 index);
+MV_BOARD_MODULE_ID_CLASS 	mvBoarModuleTypeGet(MV_BOARD_MPP_GROUP_CLASS devClass);
+MV_BOARD_MPP_TYPE_CLASS 	mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass);
+MV_VOID 	mvBoardMppGroupTypeSet(MV_BOARD_MPP_GROUP_CLASS mppGroupClass,
+						MV_BOARD_MPP_TYPE_CLASS mppGroupType);
+MV_VOID 	mvBoardMppGroupIdUpdate(MV_VOID);
+MV_VOID 	mvBoardMppMuxSet(MV_VOID);
+MV_VOID 	mvBoardTdmMppSet(MV_32 chType);
+MV_VOID 	mvBoardVoiceConnModeGet(MV_32* connMode, MV_32* irqMode);
+
+MV_VOID 	mvBoardMppModuleTypePrint(MV_VOID);
+MV_VOID	    	mvBoardReset(MV_VOID);
+MV_U8 		mvBoarTwsiSatRGet(MV_U8 devNum, MV_U8 regNum);
+MV_STATUS 		mvBoarTwsiSatRSet(MV_U8 devNum, MV_U8 regNum, MV_U8 regVal);
+MV_BOOL 	mvBoardSpecInitGet(MV_U32* regOff, MV_U32* data);
+/* Board devices API managments */
+MV_32  	    mvBoardGetDevicesNumber(MV_BOARD_DEV_CLASS devClass);
+MV_32  	    mvBoardGetDeviceBaseAddr(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32	    mvBoardGetDeviceBusWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32  	    mvBoardGetDeviceWidth(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_32  	    mvBoardGetDeviceWinSize(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+MV_U32 	    boardGetDevCSNum(MV_32 devNum, MV_BOARD_DEV_CLASS devClass);
+
+/* Gpio Pin Connections API */
+MV_32 	    mvBoardUSBVbusGpioPinGet(int devId);
+MV_32 	    mvBoardUSBVbusEnGpioPinGet(int devId);
+MV_U32      mvBoardPexBridgeIntPinGet(MV_U32 devNum, MV_U32 intPin);
+
+MV_32	    mvBoardResetGpioPinGet(MV_VOID);
+MV_32 	    mvBoardRTCGpioPinGet(MV_VOID);
+MV_32 	    mvBoardGpioIntMaskLowGet(MV_VOID);
+MV_32 	    mvBoardGpioIntMaskHighGet(MV_VOID);
+MV_32 	    mvBoardSlicGpioPinGet(MV_U32 slicNum);
+
+MV_32	    mvBoardSDIOGpioPinGet(MV_VOID);
+MV_STATUS   mvBoardSDioWPControl(MV_BOOL mode);
+MV_32	    mvBoarGpioPinNumGet(MV_BOARD_GPP_CLASS class, MV_U32 index);
+
+MV_32 	    mvBoardNandWidthGet(void);
+MV_STATUS   mvBoardFanPowerControl(MV_BOOL mode);
+MV_STATUS   mvBoardHDDPowerControl(MV_BOOL mode);
+#endif /* __INCmvBoardEnvLibh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c
new file mode 100644
index 000000000000..c9260af4478c
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.c
@@ -0,0 +1,846 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvCommon.h"
+#include "mvBoardEnvLib.h"
+#include "mvBoardEnvSpec.h"
+#include "twsi/mvTwsi.h"
+
+#define DB_88F6281A_BOARD_PCI_IF_NUM            0x0
+#define DB_88F6281A_BOARD_TWSI_DEF_NUM		    0x7
+#define DB_88F6281A_BOARD_MAC_INFO_NUM		    0x2
+#define DB_88F6281A_BOARD_GPP_INFO_NUM		    0x3
+#define DB_88F6281A_BOARD_MPP_CONFIG_NUM		0x1
+#define DB_88F6281A_BOARD_MPP_GROUP_TYPE_NUM	0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+    #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+    #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x2
+#else
+    #define DB_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#endif
+#define DB_88F6281A_BOARD_DEBUG_LED_NUM		    0x0
+
+
+MV_BOARD_TWSI_INFO	db88f6281AInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{
+	{BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+	{BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+	{BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4D, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4E, ADDR7_BIT},
+	{BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+	};
+
+MV_BOARD_MAC_INFO db88f6281AInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{
+	{BOARD_MAC_SPEED_AUTO, 0x8},
+	{BOARD_MAC_SPEED_AUTO, 0x9}
+	};
+
+MV_BOARD_MPP_TYPE_INFO db88f6281AInfoBoardMppTypeInfo[] =
+	/* {{MV_BOARD_MPP_TYPE_CLASS	boardMppGroup1,
+		MV_BOARD_MPP_TYPE_CLASS	boardMppGroup2}} */
+	{{MV_BOARD_AUTO, MV_BOARD_AUTO}
+	};
+
+MV_BOARD_GPP_INFO db88f6281AInfoBoardGppInfo[] =
+	/* {{MV_BOARD_GPP_CLASS	devClass, MV_U8	gppPinNum}} */
+	{
+	{BOARD_GPP_TSU_DIRCTION, 33}
+	/*muxed with TDM/Audio module via IOexpender
+	{BOARD_GPP_SDIO_DETECT, 38},
+	{BOARD_GPP_USB_VBUS, 49}*/
+	};
+
+MV_DEV_CS_INFO db88f6281AInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+		 {
+         {0, N_A, BOARD_DEV_NAND_FLASH, 8},	   /* NAND DEV */
+         {1, N_A, BOARD_DEV_SPI_FLASH, 8},	   /* SPI DEV */
+         };
+#else
+	 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO	db88f6281AInfoBoardMppConfigValue[] =
+	{{{
+	DB_88F6281A_MPP0_7,
+	DB_88F6281A_MPP8_15,
+	DB_88F6281A_MPP16_23,
+	DB_88F6281A_MPP24_31,
+	DB_88F6281A_MPP32_39,
+	DB_88F6281A_MPP40_47,
+	DB_88F6281A_MPP48_55
+	}}};
+
+
+MV_BOARD_INFO db88f6281AInfo = {
+	"DB-88F6281A-BP",				/* boardName[MAX_BOARD_NAME_LEN] */
+	DB_88F6281A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	db88f6281AInfoBoardMppTypeInfo,
+	DB_88F6281A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	db88f6281AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	0,						/* intsGppMaskHigh */
+	DB_88F6281A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	db88f6281AInfoBoardDeCsInfo,
+	DB_88F6281A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	db88f6281AInfoBoardTwsiDev,
+	DB_88F6281A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	db88f6281AInfoBoardMacInfo,
+	DB_88F6281A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	db88f6281AInfoBoardGppInfo,
+	DB_88F6281A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,						/* ledsPolarity */
+	DB_88F6281A_OE_LOW,				/* gppOutEnLow */
+	DB_88F6281A_OE_HIGH,				/* gppOutEnHigh */
+	DB_88F6281A_OE_VAL_LOW,				/* gppOutValLow */
+	DB_88F6281A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	BIT6, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+
+#define RD_88F6281A_BOARD_PCI_IF_NUM		0x0
+#define RD_88F6281A_BOARD_TWSI_DEF_NUM		0x2
+#define RD_88F6281A_BOARD_MAC_INFO_NUM		0x2
+#define RD_88F6281A_BOARD_GPP_INFO_NUM		0x5
+#define RD_88F6281A_BOARD_MPP_GROUP_TYPE_NUM	0x1
+#define RD_88F6281A_BOARD_MPP_CONFIG_NUM		0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+    #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+    #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x2
+#else
+    #define RD_88F6281A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#endif
+#define RD_88F6281A_BOARD_DEBUG_LED_NUM		0x0
+
+MV_BOARD_MAC_INFO rd88f6281AInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_1000M, 0xa},
+    {BOARD_MAC_SPEED_AUTO, 0xb}
+	};
+
+MV_BOARD_SWITCH_INFO rd88f6281AInfoBoardSwitchInfo[] =
+	/* MV_32 linkStatusIrq, {MV_32 qdPort0, MV_32 qdPort1, MV_32 qdPort2, MV_32 qdPort3, MV_32 qdPort4},
+		MV_32 qdCpuPort, MV_32 smiScanMode, MV_32 switchOnPort} */
+	{{38, {0, 1, 2, 3, -1}, 5, 2, 0},
+	 {-1, {-1}, -1, -1, -1}};
+
+MV_BOARD_TWSI_INFO	rd88f6281AInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{
+	{BOARD_DEV_TWSI_EXP, 0xFF, ADDR7_BIT}, /* dummy entry to align with modules indexes */
+	{BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT}
+	};
+
+MV_BOARD_MPP_TYPE_INFO rd88f6281AInfoBoardMppTypeInfo[] =
+	{{MV_BOARD_RGMII, MV_BOARD_TDM}
+	};
+
+MV_DEV_CS_INFO rd88f6281AInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+		 {
+         {0, N_A, BOARD_DEV_NAND_FLASH, 8},	   /* NAND DEV */
+         {1, N_A, BOARD_DEV_SPI_FLASH, 8},	   /* SPI DEV */
+         };
+#else
+		 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_GPP_INFO rd88f6281AInfoBoardGppInfo[] =
+	/* {{MV_BOARD_GPP_CLASS	devClass, MV_U8	gppPinNum}} */
+	{{BOARD_GPP_SDIO_DETECT, 28},
+    {BOARD_GPP_USB_OC, 29},
+    {BOARD_GPP_WPS_BUTTON, 35},
+    {BOARD_GPP_MV_SWITCH, 38},
+    {BOARD_GPP_USB_VBUS, 49}
+	};
+
+MV_BOARD_MPP_INFO	rd88f6281AInfoBoardMppConfigValue[] =
+	{{{
+	RD_88F6281A_MPP0_7,
+	RD_88F6281A_MPP8_15,
+	RD_88F6281A_MPP16_23,
+	RD_88F6281A_MPP24_31,
+	RD_88F6281A_MPP32_39,
+	RD_88F6281A_MPP40_47,
+	RD_88F6281A_MPP48_55
+	}}};
+
+MV_BOARD_INFO rd88f6281AInfo = {
+	"RD-88F6281A",				/* boardName[MAX_BOARD_NAME_LEN] */
+	RD_88F6281A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	rd88f6281AInfoBoardMppTypeInfo,
+	RD_88F6281A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	rd88f6281AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	RD_88F6281A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	rd88f6281AInfoBoardDeCsInfo,
+	RD_88F6281A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	rd88f6281AInfoBoardTwsiDev,
+	RD_88F6281A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	rd88f6281AInfoBoardMacInfo,
+	RD_88F6281A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	rd88f6281AInfoBoardGppInfo,
+	RD_88F6281A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	RD_88F6281A_OE_LOW,				/* gppOutEnLow */
+	RD_88F6281A_OE_HIGH,				/* gppOutEnHigh */
+	RD_88F6281A_OE_VAL_LOW,				/* gppOutValLow */
+	RD_88F6281A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	BIT6, 						/* gppPolarityValHigh */
+	rd88f6281AInfoBoardSwitchInfo			/* pSwitchInfo */
+};
+
+
+#define DB_88F6192A_BOARD_PCI_IF_NUM            0x0
+#define DB_88F6192A_BOARD_TWSI_DEF_NUM		    0x7
+#define DB_88F6192A_BOARD_MAC_INFO_NUM		    0x2
+#define DB_88F6192A_BOARD_GPP_INFO_NUM		    0x3
+#define DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM	0x1
+#define DB_88F6192A_BOARD_MPP_CONFIG_NUM		0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+    #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+    #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM	    0x2
+#else
+    #define DB_88F6192A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#endif
+#define DB_88F6192A_BOARD_DEBUG_LED_NUM		    0x0
+
+MV_BOARD_TWSI_INFO	db88f6192AInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{
+	{BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+	{BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+	{BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4D, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4E, ADDR7_BIT},
+	{BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+	};
+
+MV_BOARD_MAC_INFO db88f6192AInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{
+	{BOARD_MAC_SPEED_AUTO, 0x8},
+	{BOARD_MAC_SPEED_AUTO, 0x9}
+	};
+
+MV_BOARD_MPP_TYPE_INFO db88f6192AInfoBoardMppTypeInfo[] =
+	/* {{MV_BOARD_MPP_TYPE_CLASS	boardMppGroup1,
+		MV_BOARD_MPP_TYPE_CLASS	boardMppGroup2}} */
+	{{MV_BOARD_AUTO, MV_BOARD_OTHER}
+	};
+
+MV_DEV_CS_INFO db88f6192AInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+		 {
+         {0, N_A, BOARD_DEV_NAND_FLASH, 8},	   /* NAND DEV */
+         {1, N_A, BOARD_DEV_SPI_FLASH, 8},	   /* SPI DEV */
+         };
+#else
+		 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_GPP_INFO db88f6192AInfoBoardGppInfo[] =
+	/* {{MV_BOARD_GPP_CLASS	devClass, MV_U8	gppPinNum}} */
+	{
+    {BOARD_GPP_SDIO_WP, 20},
+	{BOARD_GPP_USB_VBUS, 22},
+	{BOARD_GPP_SDIO_DETECT, 23},
+	};
+
+MV_BOARD_MPP_INFO	db88f6192AInfoBoardMppConfigValue[] =
+	{{{
+	DB_88F6192A_MPP0_7,
+	DB_88F6192A_MPP8_15,
+	DB_88F6192A_MPP16_23,
+	DB_88F6192A_MPP24_31,
+	DB_88F6192A_MPP32_35
+	}}};
+
+MV_BOARD_INFO db88f6192AInfo = {
+	"DB-88F6192A-BP",				/* boardName[MAX_BOARD_NAME_LEN] */
+	DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	db88f6192AInfoBoardMppTypeInfo,
+	DB_88F6192A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	db88f6192AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	DB_88F6192A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	db88f6192AInfoBoardDeCsInfo,
+	DB_88F6192A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	db88f6192AInfoBoardTwsiDev,
+	DB_88F6192A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	db88f6192AInfoBoardMacInfo,
+	DB_88F6192A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	db88f6192AInfoBoardGppInfo,
+	DB_88F6192A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	DB_88F6192A_OE_LOW,				/* gppOutEnLow */
+	DB_88F6192A_OE_HIGH,				/* gppOutEnHigh */
+	DB_88F6192A_OE_VAL_LOW,				/* gppOutValLow */
+	DB_88F6192A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+#define DB_88F6190A_BOARD_MAC_INFO_NUM		0x1
+
+MV_BOARD_INFO db88f6190AInfo = {
+	"DB-88F6190A-BP",				/* boardName[MAX_BOARD_NAME_LEN] */
+	DB_88F6192A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	db88f6192AInfoBoardMppTypeInfo,
+	DB_88F6192A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	db88f6192AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	DB_88F6192A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	db88f6192AInfoBoardDeCsInfo,
+	DB_88F6192A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	db88f6192AInfoBoardTwsiDev,
+	DB_88F6190A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	db88f6192AInfoBoardMacInfo,
+	DB_88F6192A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	db88f6192AInfoBoardGppInfo,
+	DB_88F6192A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	DB_88F6192A_OE_LOW,				/* gppOutEnLow */
+	DB_88F6192A_OE_HIGH,				/* gppOutEnHigh */
+	DB_88F6192A_OE_VAL_LOW,				/* gppOutValLow */
+	DB_88F6192A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+#define RD_88F6192A_BOARD_PCI_IF_NUM		0x0
+#define RD_88F6192A_BOARD_TWSI_DEF_NUM		0x0
+#define RD_88F6192A_BOARD_MAC_INFO_NUM		0x1
+#define RD_88F6192A_BOARD_GPP_INFO_NUM		0xE
+#define RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM	0x1
+#define RD_88F6192A_BOARD_MPP_CONFIG_NUM		0x1
+#define RD_88F6192A_BOARD_DEVICE_CONFIG_NUM	0x1
+#define RD_88F6192A_BOARD_DEBUG_LED_NUM		0x3
+
+MV_U8	rd88f6192AInfoBoardDebugLedIf[] =
+	{17, 28, 29};
+
+MV_BOARD_MAC_INFO rd88f6192AInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_AUTO, 0x8}
+	};
+
+MV_BOARD_MPP_TYPE_INFO rd88f6192AInfoBoardMppTypeInfo[] =
+	/* {{MV_BOARD_MPP_TYPE_CLASS	boardMppGroup1,
+		MV_BOARD_MPP_TYPE_CLASS	boardMppGroup2}} */
+	{{MV_BOARD_OTHER, MV_BOARD_OTHER}
+	};
+
+MV_DEV_CS_INFO rd88f6192AInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+		 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+
+MV_BOARD_GPP_INFO rd88f6192AInfoBoardGppInfo[] =
+	/* {{MV_BOARD_GPP_CLASS	devClass, MV_U8	gppPinNum}} */
+	{
+	{BOARD_GPP_USB_VBUS_EN, 10},
+	{BOARD_GPP_USB_HOST_DEVICE, 11},
+	{BOARD_GPP_RESET, 14},
+	{BOARD_GPP_POWER_ON_LED, 15},
+	{BOARD_GPP_HDD_POWER, 16},
+	{BOARD_GPP_WPS_BUTTON, 24},
+	{BOARD_GPP_TS_BUTTON_C, 25},
+	{BOARD_GPP_USB_VBUS, 26},
+	{BOARD_GPP_USB_OC, 27},
+	{BOARD_GPP_TS_BUTTON_U, 30},
+	{BOARD_GPP_TS_BUTTON_R, 31},
+	{BOARD_GPP_TS_BUTTON_L, 32},
+	{BOARD_GPP_TS_BUTTON_D, 34},
+	{BOARD_GPP_FAN_POWER, 35}
+	};
+
+MV_BOARD_MPP_INFO	rd88f6192AInfoBoardMppConfigValue[] =
+	{{{
+	RD_88F6192A_MPP0_7,
+	RD_88F6192A_MPP8_15,
+	RD_88F6192A_MPP16_23,
+	RD_88F6192A_MPP24_31,
+	RD_88F6192A_MPP32_35
+	}}};
+
+MV_BOARD_INFO rd88f6192AInfo = {
+	"RD-88F6192A-NAS",				/* boardName[MAX_BOARD_NAME_LEN] */
+	RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	rd88f6192AInfoBoardMppTypeInfo,
+	RD_88F6192A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	rd88f6192AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	RD_88F6192A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	rd88f6192AInfoBoardDeCsInfo,
+	RD_88F6192A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	NULL,
+	RD_88F6192A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	rd88f6192AInfoBoardMacInfo,
+	RD_88F6192A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	rd88f6192AInfoBoardGppInfo,
+	RD_88F6192A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	rd88f6192AInfoBoardDebugLedIf,
+	0,										/* ledsPolarity */
+	RD_88F6192A_OE_LOW,				/* gppOutEnLow */
+	RD_88F6192A_OE_HIGH,				/* gppOutEnHigh */
+	RD_88F6192A_OE_VAL_LOW,				/* gppOutValLow */
+	RD_88F6192A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+MV_BOARD_INFO rd88f6190AInfo = {
+	"RD-88F6190A-NAS",				/* boardName[MAX_BOARD_NAME_LEN] */
+	RD_88F6192A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	rd88f6192AInfoBoardMppTypeInfo,
+	RD_88F6192A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	rd88f6192AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	RD_88F6192A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	rd88f6192AInfoBoardDeCsInfo,
+	RD_88F6192A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	NULL,
+	RD_88F6192A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	rd88f6192AInfoBoardMacInfo,
+	RD_88F6192A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	rd88f6192AInfoBoardGppInfo,
+	RD_88F6192A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	rd88f6192AInfoBoardDebugLedIf,
+	0,										/* ledsPolarity */
+	RD_88F6192A_OE_LOW,				/* gppOutEnLow */
+	RD_88F6192A_OE_HIGH,				/* gppOutEnHigh */
+	RD_88F6192A_OE_VAL_LOW,				/* gppOutValLow */
+	RD_88F6192A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+#define DB_88F6180A_BOARD_PCI_IF_NUM		0x0
+#define DB_88F6180A_BOARD_TWSI_DEF_NUM		0x5
+#define DB_88F6180A_BOARD_MAC_INFO_NUM		0x1
+#define DB_88F6180A_BOARD_GPP_INFO_NUM		0x0
+#define DB_88F6180A_BOARD_MPP_GROUP_TYPE_NUM	0x2
+#define DB_88F6180A_BOARD_MPP_CONFIG_NUM		0x1
+#define DB_88F6180A_BOARD_DEVICE_CONFIG_NUM	    0x1
+#define DB_88F6180A_BOARD_DEBUG_LED_NUM		0x0
+
+MV_BOARD_TWSI_INFO	db88f6180AInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{
+    {BOARD_DEV_TWSI_EXP, 0x20, ADDR7_BIT},
+    {BOARD_DEV_TWSI_EXP, 0x21, ADDR7_BIT},
+    {BOARD_DEV_TWSI_EXP, 0x27, ADDR7_BIT},
+	{BOARD_DEV_TWSI_SATR, 0x4C, ADDR7_BIT},
+	{BOARD_TWSI_AUDIO_DEC, 0x4A, ADDR7_BIT}
+	};
+
+MV_BOARD_MAC_INFO db88f6180AInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_AUTO, 0x8}
+	};
+
+MV_BOARD_GPP_INFO db88f6180AInfoBoardGppInfo[] =
+	/* {{MV_BOARD_GPP_CLASS	devClass, MV_U8	gppPinNum}} */
+	{
+	/* Muxed with TDM/Audio module via IOexpender
+	{BOARD_GPP_USB_VBUS, 6} */
+	};
+
+MV_BOARD_MPP_TYPE_INFO db88f6180AInfoBoardMppTypeInfo[] =
+	/* {{MV_BOARD_MPP_TYPE_CLASS	boardMppGroup1,
+		MV_BOARD_MPP_TYPE_CLASS	boardMppGroup2}} */
+	{{MV_BOARD_OTHER, MV_BOARD_AUTO}
+	};
+
+MV_DEV_CS_INFO db88f6180AInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#else
+		 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO	db88f6180AInfoBoardMppConfigValue[] =
+	{{{
+	DB_88F6180A_MPP0_7,
+	DB_88F6180A_MPP8_15,
+    DB_88F6180A_MPP16_23,
+    DB_88F6180A_MPP24_31,
+    DB_88F6180A_MPP32_39,
+    DB_88F6180A_MPP40_44
+	}}};
+
+MV_BOARD_INFO db88f6180AInfo = {
+	"DB-88F6180A-BP",				/* boardName[MAX_BOARD_NAME_LEN] */
+	DB_88F6180A_BOARD_MPP_GROUP_TYPE_NUM,		/* numBoardMppGroupType */
+	db88f6180AInfoBoardMppTypeInfo,
+	DB_88F6180A_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	db88f6180AInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	0,					/* intsGppMaskHigh */
+	DB_88F6180A_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	db88f6180AInfoBoardDeCsInfo,
+	DB_88F6180A_BOARD_TWSI_DEF_NUM,			/* numBoardTwsiDev */
+	db88f6180AInfoBoardTwsiDev,
+	DB_88F6180A_BOARD_MAC_INFO_NUM,			/* numBoardMacInfo */
+	db88f6180AInfoBoardMacInfo,
+	DB_88F6180A_BOARD_GPP_INFO_NUM,			/* numBoardGppInfo */
+	NULL,
+	DB_88F6180A_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	DB_88F6180A_OE_LOW,				/* gppOutEnLow */
+	DB_88F6180A_OE_HIGH,				/* gppOutEnHigh */
+	DB_88F6180A_OE_VAL_LOW,				/* gppOutValLow */
+	DB_88F6180A_OE_VAL_HIGH,				/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 						/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+
+#define RD_88F6281A_PCAC_BOARD_PCI_IF_NUM		0x0
+#define RD_88F6281A_PCAC_BOARD_TWSI_DEF_NUM		0x1
+#define RD_88F6281A_PCAC_BOARD_MAC_INFO_NUM		0x1
+#define RD_88F6281A_PCAC_BOARD_GPP_INFO_NUM		0x0
+#define RD_88F6281A_PCAC_BOARD_MPP_GROUP_TYPE_NUM	0x1
+#define RD_88F6281A_PCAC_BOARD_MPP_CONFIG_NUM		0x1
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+    #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM	    0x1
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+    #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM	    0x2
+#else
+    #define RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM	    0x1
+#endif
+#define RD_88F6281A_PCAC_BOARD_DEBUG_LED_NUM		0x4
+
+MV_U8	rd88f6281APcacInfoBoardDebugLedIf[] =
+	{38, 39, 40, 41};
+
+MV_BOARD_MAC_INFO rd88f6281APcacInfoBoardMacInfo[] =
+	/* {{MV_BOARD_MAC_SPEED	boardMacSpeed, MV_U8 boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_AUTO, 0x8}
+	};
+
+MV_BOARD_TWSI_INFO	rd88f6281APcacInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{
+	{BOARD_TWSI_OTHER, 0xa7, ADDR7_BIT}
+	};
+
+MV_BOARD_MPP_TYPE_INFO rd88f6281APcacInfoBoardMppTypeInfo[] =
+	{{MV_BOARD_OTHER, MV_BOARD_OTHER}
+	};
+
+MV_DEV_CS_INFO rd88f6281APcacInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+		 {
+         {0, N_A, BOARD_DEV_NAND_FLASH, 8},	   /* NAND DEV */
+         {1, N_A, BOARD_DEV_SPI_FLASH, 8},	   /* SPI DEV */
+         };
+#else
+	 {{1, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO	rd88f6281APcacInfoBoardMppConfigValue[] =
+	{{{
+	RD_88F6281A_PCAC_MPP0_7,
+	RD_88F6281A_PCAC_MPP8_15,
+	RD_88F6281A_PCAC_MPP16_23,
+	RD_88F6281A_PCAC_MPP24_31,
+	RD_88F6281A_PCAC_MPP32_39,
+	RD_88F6281A_PCAC_MPP40_47,
+	RD_88F6281A_PCAC_MPP48_55
+	}}};
+
+MV_BOARD_INFO rd88f6281APcacInfo = {
+	"RD-88F6281A-PCAC",				/* boardName[MAX_BOARD_NAME_LEN] */
+	RD_88F6281A_PCAC_BOARD_MPP_GROUP_TYPE_NUM,	/* numBoardMppGroupType */
+	rd88f6281APcacInfoBoardMppTypeInfo,
+	RD_88F6281A_PCAC_BOARD_MPP_CONFIG_NUM,		/* numBoardMppConfig */
+	rd88f6281APcacInfoBoardMppConfigValue,
+	0,						/* intsGppMaskLow */
+	(1 << 3),					/* intsGppMaskHigh */
+	RD_88F6281A_PCAC_BOARD_DEVICE_CONFIG_NUM,	/* numBoardDevIf */
+	rd88f6281APcacInfoBoardDeCsInfo,
+	RD_88F6281A_PCAC_BOARD_TWSI_DEF_NUM,		/* numBoardTwsiDev */
+	rd88f6281APcacInfoBoardTwsiDev,
+	RD_88F6281A_PCAC_BOARD_MAC_INFO_NUM,		/* numBoardMacInfo */
+	rd88f6281APcacInfoBoardMacInfo,
+	RD_88F6281A_PCAC_BOARD_GPP_INFO_NUM,		/* numBoardGppInfo */
+	0,
+	RD_88F6281A_PCAC_BOARD_DEBUG_LED_NUM,		/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	RD_88F6281A_PCAC_OE_LOW,			/* gppOutEnLow */
+	RD_88F6281A_PCAC_OE_HIGH,			/* gppOutEnHigh */
+	RD_88F6281A_PCAC_OE_VAL_LOW,			/* gppOutValLow */
+	RD_88F6281A_PCAC_OE_VAL_HIGH,			/* gppOutValHigh */
+	0,						/* gppPolarityValLow */
+	0, 	 					/* gppPolarityValHigh */
+	NULL						/* pSwitchInfo */
+};
+
+
+/* 6281 Sheeva Plug*/
+
+#define SHEEVA_PLUG_BOARD_PCI_IF_NUM		        0x0
+#define SHEEVA_PLUG_BOARD_TWSI_DEF_NUM		        0x0
+#define SHEEVA_PLUG_BOARD_MAC_INFO_NUM		        0x1
+#define SHEEVA_PLUG_BOARD_GPP_INFO_NUM		        0x0
+#define SHEEVA_PLUG_BOARD_MPP_GROUP_TYPE_NUN        0x1
+#define SHEEVA_PLUG_BOARD_MPP_CONFIG_NUM		    0x1
+#define SHEEVA_PLUG_BOARD_DEVICE_CONFIG_NUM	        0x1
+#define SHEEVA_PLUG_BOARD_DEBUG_LED_NUM		        0x1
+
+MV_U8	sheevaPlugInfoBoardDebugLedIf[] =
+	{49};
+
+MV_BOARD_MAC_INFO sheevaPlugInfoBoardMacInfo[] =
+    /* {{MV_BOARD_MAC_SPEED	boardMacSpeed,	MV_U8	boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_AUTO, 0x0}};
+
+MV_BOARD_TWSI_INFO	sheevaPlugInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{{BOARD_TWSI_OTHER, 0x0, ADDR7_BIT}};
+
+MV_BOARD_MPP_TYPE_INFO sheevaPlugInfoBoardMppTypeInfo[] =
+	{{MV_BOARD_OTHER, MV_BOARD_OTHER}
+	};
+
+MV_DEV_CS_INFO sheevaPlugInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+
+MV_BOARD_MPP_INFO	sheevaPlugInfoBoardMppConfigValue[] =
+	{{{
+	RD_SHEEVA_PLUG_MPP0_7,
+	RD_SHEEVA_PLUG_MPP8_15,
+	RD_SHEEVA_PLUG_MPP16_23,
+	RD_SHEEVA_PLUG_MPP24_31,
+	RD_SHEEVA_PLUG_MPP32_39,
+	RD_SHEEVA_PLUG_MPP40_47,
+	RD_SHEEVA_PLUG_MPP48_55
+	}}};
+
+MV_BOARD_INFO sheevaPlugInfo = {
+	"SHEEVA PLUG",				                /* boardName[MAX_BOARD_NAME_LEN] */
+	SHEEVA_PLUG_BOARD_MPP_GROUP_TYPE_NUN,		/* numBoardMppGroupType */
+	sheevaPlugInfoBoardMppTypeInfo,
+	SHEEVA_PLUG_BOARD_MPP_CONFIG_NUM,		    /* numBoardMppConfig */
+	sheevaPlugInfoBoardMppConfigValue,
+	0,						                    /* intsGppMaskLow */
+	0,					                        /* intsGppMaskHigh */
+	SHEEVA_PLUG_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	sheevaPlugInfoBoardDeCsInfo,
+	SHEEVA_PLUG_BOARD_TWSI_DEF_NUM,			    /* numBoardTwsiDev */
+	sheevaPlugInfoBoardTwsiDev,
+	SHEEVA_PLUG_BOARD_MAC_INFO_NUM,			    /* numBoardMacInfo */
+	sheevaPlugInfoBoardMacInfo,
+	SHEEVA_PLUG_BOARD_GPP_INFO_NUM,			    /* numBoardGppInfo */
+	0,
+	SHEEVA_PLUG_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	sheevaPlugInfoBoardDebugLedIf,
+	0,										/* ledsPolarity */
+	RD_SHEEVA_PLUG_OE_LOW,				            /* gppOutEnLow */
+	RD_SHEEVA_PLUG_OE_HIGH,				        /* gppOutEnHigh */
+	RD_SHEEVA_PLUG_OE_VAL_LOW,				        /* gppOutValLow */
+	RD_SHEEVA_PLUG_OE_VAL_HIGH,				    /* gppOutValHigh */
+	0,						                    /* gppPolarityValLow */
+	0, 						                    /* gppPolarityValHigh */
+    NULL										/* pSwitchInfo */
+};
+
+/* Customer specific board place holder*/
+
+#define DB_CUSTOMER_BOARD_PCI_IF_NUM		        0x0
+#define DB_CUSTOMER_BOARD_TWSI_DEF_NUM		        0x0
+#define DB_CUSTOMER_BOARD_MAC_INFO_NUM		        0x0
+#define DB_CUSTOMER_BOARD_GPP_INFO_NUM		        0x0
+#define DB_CUSTOMER_BOARD_MPP_GROUP_TYPE_NUN        0x0
+#define DB_CUSTOMER_BOARD_MPP_CONFIG_NUM		    0x0
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+    #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM	    0x0
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+    #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM	    0x0
+#else
+    #define DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM	    0x0
+#endif
+#define DB_CUSTOMER_BOARD_DEBUG_LED_NUM		0x0
+
+MV_U8	dbCustomerInfoBoardDebugLedIf[] =
+	{0};
+
+MV_BOARD_MAC_INFO dbCustomerInfoBoardMacInfo[] =
+    /* {{MV_BOARD_MAC_SPEED	boardMacSpeed,	MV_U8	boardEthSmiAddr}} */
+	{{BOARD_MAC_SPEED_AUTO, 0x0}};
+
+MV_BOARD_TWSI_INFO	dbCustomerInfoBoardTwsiDev[] =
+	/* {{MV_BOARD_DEV_CLASS	devClass, MV_U8	twsiDevAddr, MV_U8 twsiDevAddrType}} */
+	{{BOARD_TWSI_OTHER, 0x0, ADDR7_BIT}};
+
+MV_BOARD_MPP_TYPE_INFO dbCustomerInfoBoardMppTypeInfo[] =
+	{{MV_BOARD_OTHER, MV_BOARD_OTHER}
+	};
+
+MV_DEV_CS_INFO dbCustomerInfoBoardDeCsInfo[] =
+		/*{deviceCS, params, devType, devWidth}*/
+#if defined(MV_NAND) && defined(MV_NAND_BOOT)
+		 {{0, N_A, BOARD_DEV_NAND_FLASH, 8}};	   /* NAND DEV */
+#elif defined(MV_NAND) && defined(MV_SPI_BOOT)
+		 {
+         {0, N_A, BOARD_DEV_NAND_FLASH, 8},	   /* NAND DEV */
+         {2, N_A, BOARD_DEV_SPI_FLASH, 8},	   /* SPI DEV */
+         };
+#else
+		 {{2, N_A, BOARD_DEV_SPI_FLASH, 8}};	   /* SPI DEV */
+#endif
+
+MV_BOARD_MPP_INFO	dbCustomerInfoBoardMppConfigValue[] =
+	{{{
+	DB_CUSTOMER_MPP0_7,
+	DB_CUSTOMER_MPP8_15,
+	DB_CUSTOMER_MPP16_23,
+	DB_CUSTOMER_MPP24_31,
+	DB_CUSTOMER_MPP32_39,
+	DB_CUSTOMER_MPP40_47,
+	DB_CUSTOMER_MPP48_55
+	}}};
+
+MV_BOARD_INFO dbCustomerInfo = {
+	"DB-CUSTOMER",				                /* boardName[MAX_BOARD_NAME_LEN] */
+	DB_CUSTOMER_BOARD_MPP_GROUP_TYPE_NUN,		/* numBoardMppGroupType */
+	dbCustomerInfoBoardMppTypeInfo,
+	DB_CUSTOMER_BOARD_MPP_CONFIG_NUM,		    /* numBoardMppConfig */
+	dbCustomerInfoBoardMppConfigValue,
+	0,						                    /* intsGppMaskLow */
+	0,					                        /* intsGppMaskHigh */
+	DB_CUSTOMER_BOARD_DEVICE_CONFIG_NUM,		/* numBoardDevIf */
+	dbCustomerInfoBoardDeCsInfo,
+	DB_CUSTOMER_BOARD_TWSI_DEF_NUM,			    /* numBoardTwsiDev */
+	dbCustomerInfoBoardTwsiDev,
+	DB_CUSTOMER_BOARD_MAC_INFO_NUM,			    /* numBoardMacInfo */
+	dbCustomerInfoBoardMacInfo,
+	DB_CUSTOMER_BOARD_GPP_INFO_NUM,			    /* numBoardGppInfo */
+	0,
+	DB_CUSTOMER_BOARD_DEBUG_LED_NUM,			/* activeLedsNumber */
+	NULL,
+	0,										/* ledsPolarity */
+	DB_CUSTOMER_OE_LOW,				            /* gppOutEnLow */
+	DB_CUSTOMER_OE_HIGH,				        /* gppOutEnHigh */
+	DB_CUSTOMER_OE_VAL_LOW,				        /* gppOutValLow */
+	DB_CUSTOMER_OE_VAL_HIGH,				    /* gppOutValHigh */
+	0,						                    /* gppPolarityValLow */
+	0, 						                    /* gppPolarityValHigh */
+    NULL										/* pSwitchInfo */
+};
+
+MV_BOARD_INFO*	boardInfoTbl[] = 	{
+                    &db88f6281AInfo,
+                    &rd88f6281AInfo,
+                    &db88f6192AInfo,
+                    &rd88f6192AInfo,
+                    &db88f6180AInfo,
+                    &db88f6190AInfo,
+                    &rd88f6190AInfo,
+                    &rd88f6281APcacInfo,
+                    &dbCustomerInfo,
+                    &sheevaPlugInfo
+					};
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h
new file mode 100644
index 000000000000..1f5c197493c2
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/boardEnv/mvBoardEnvSpec.h
@@ -0,0 +1,262 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvBoardEnvSpech
+#define __INCmvBoardEnvSpech
+
+#include "mvSysHwConfig.h"
+
+
+/* For future use */
+#define BD_ID_DATA_START_OFFS		0x0
+#define BD_DETECT_SEQ_OFFS		0x0
+#define BD_SYS_NUM_OFFS			0x4
+#define BD_NAME_OFFS			0x8
+
+/* I2C bus addresses */
+#define MV_BOARD_CTRL_I2C_ADDR			0x0     /* Controller slave addr */
+#define MV_BOARD_CTRL_I2C_ADDR_TYPE 		ADDR7_BIT
+#define MV_BOARD_DIMM0_I2C_ADDR			0x56
+#define MV_BOARD_DIMM0_I2C_ADDR_TYPE 		ADDR7_BIT
+#define MV_BOARD_DIMM1_I2C_ADDR			0x54
+#define MV_BOARD_DIMM1_I2C_ADDR_TYPE 		ADDR7_BIT
+#define MV_BOARD_EEPROM_I2C_ADDR	    	0x51
+#define MV_BOARD_EEPROM_I2C_ADDR_TYPE 		ADDR7_BIT
+#define MV_BOARD_MAIN_EEPROM_I2C_ADDR	   	0x50
+#define MV_BOARD_MAIN_EEPROM_I2C_ADDR_TYPE 	ADDR7_BIT
+#define MV_BOARD_MUX_I2C_ADDR_ENTRY		0x2
+#define MV_BOARD_DIMM_I2C_CHANNEL		0x0
+
+#define BOOT_FLASH_INDEX			0
+#define MAIN_FLASH_INDEX			1
+
+#define BOARD_ETH_START_PORT_NUM	0
+
+/* Supported clocks */
+#define MV_BOARD_TCLK_100MHZ	100000000
+#define MV_BOARD_TCLK_125MHZ	125000000
+#define MV_BOARD_TCLK_133MHZ	133333333
+#define MV_BOARD_TCLK_150MHZ	150000000
+#define MV_BOARD_TCLK_166MHZ	166666667
+#define MV_BOARD_TCLK_200MHZ	200000000
+
+#define MV_BOARD_SYSCLK_100MHZ	100000000
+#define MV_BOARD_SYSCLK_125MHZ	125000000
+#define MV_BOARD_SYSCLK_133MHZ	133333333
+#define MV_BOARD_SYSCLK_150MHZ	150000000
+#define MV_BOARD_SYSCLK_166MHZ	166666667
+#define MV_BOARD_SYSCLK_200MHZ	200000000
+#define MV_BOARD_SYSCLK_233MHZ	233333333
+#define MV_BOARD_SYSCLK_250MHZ	250000000
+#define MV_BOARD_SYSCLK_267MHZ	266666667
+#define MV_BOARD_SYSCLK_300MHZ	300000000
+#define MV_BOARD_SYSCLK_333MHZ	333333334
+#define MV_BOARD_SYSCLK_400MHZ	400000000
+
+#define MV_BOARD_REFCLK_25MHZ	 25000000
+
+/* Board specific */
+/* =============================== */
+
+/* boards ID numbers */
+
+#define BOARD_ID_BASE           		0x0
+
+/* New board ID numbers */
+#define DB_88F6281A_BP_ID			(BOARD_ID_BASE)
+#define DB_88F6281_BP_MLL_ID        1680
+#define RD_88F6281A_ID				(BOARD_ID_BASE+0x1)
+#define RD_88F6281_MLL_ID			1682
+#define DB_88F6192A_BP_ID			(BOARD_ID_BASE+0x2)
+#define RD_88F6192A_ID				(BOARD_ID_BASE+0x3)
+#define RD_88F6192_MLL_ID			1681
+#define DB_88F6180A_BP_ID			(BOARD_ID_BASE+0x4)
+#define DB_88F6190A_BP_ID			(BOARD_ID_BASE+0x5)
+#define RD_88F6190A_ID				(BOARD_ID_BASE+0x6)
+#define RD_88F6281A_PCAC_ID			(BOARD_ID_BASE+0x7)
+#define DB_CUSTOMER_ID			    (BOARD_ID_BASE+0x8)
+#define SHEEVA_PLUG_ID			    (BOARD_ID_BASE+0x9)
+#define MV_MAX_BOARD_ID 			(SHEEVA_PLUG_ID + 1)
+
+/* DB-88F6281A-BP */
+#if defined(MV_NAND)
+    #define DB_88F6281A_MPP0_7                   	0x21111111
+#else
+    #define DB_88F6281A_MPP0_7                   	0x21112220
+#endif
+#define DB_88F6281A_MPP8_15                   	0x11113311
+#define DB_88F6281A_MPP16_23                   	0x00551111
+#define DB_88F6281A_MPP24_31                   	0x00000000
+#define DB_88F6281A_MPP32_39                   	0x00000000
+#define DB_88F6281A_MPP40_47                   	0x00000000
+#define DB_88F6281A_MPP48_55                   	0x00000000
+#define DB_88F6281A_OE_LOW                       0x0
+#if defined(MV_TDM_5CHANNELS)
+	#define DB_88F6281A_OE_HIGH		(BIT6)
+#else
+#define DB_88F6281A_OE_HIGH                      0x0
+#endif
+#define DB_88F6281A_OE_VAL_LOW                   0x0
+#define DB_88F6281A_OE_VAL_HIGH                  0x0
+
+/* RD-88F6281A */
+#if defined(MV_NAND)
+    #define RD_88F6281A_MPP0_7                   	0x21111111
+#else
+    #define RD_88F6281A_MPP0_7                   	0x21112220
+#endif
+#define RD_88F6281A_MPP8_15                   	0x11113311
+#define RD_88F6281A_MPP16_23                   	0x33331111
+#define RD_88F6281A_MPP24_31                   	0x33003333
+#define RD_88F6281A_MPP32_39                   	0x20440533
+#define RD_88F6281A_MPP40_47                   	0x22202222
+#define RD_88F6281A_MPP48_55                   	0x00000002
+#define RD_88F6281A_OE_LOW                      (BIT28 | BIT29)
+#define RD_88F6281A_OE_HIGH                     (BIT3 | BIT6 | BIT17)
+#define RD_88F6281A_OE_VAL_LOW                   0x0
+#define RD_88F6281A_OE_VAL_HIGH                  0x0
+
+/* DB-88F6192A-BP */
+#if defined(MV_NAND)
+    #define DB_88F6192A_MPP0_7                   	0x21111111
+#else
+    #define DB_88F6192A_MPP0_7                   	0x21112220
+#endif
+#define DB_88F6192A_MPP8_15                   	0x11113311
+#define DB_88F6192A_MPP16_23                   	0x00501111
+#define DB_88F6192A_MPP24_31                   	0x00000000
+#define DB_88F6192A_MPP32_35                   	0x00000000
+#define DB_88F6192A_OE_LOW                       (BIT22 | BIT23)
+#define DB_88F6192A_OE_HIGH                      0x0
+#define DB_88F6192A_OE_VAL_LOW                   0x0
+#define DB_88F6192A_OE_VAL_HIGH                  0x0
+
+/* RD-88F6192A */
+#define RD_88F6192A_MPP0_7                   	0x01222222
+#define RD_88F6192A_MPP8_15                   	0x00000011
+#define RD_88F6192A_MPP16_23                   	0x05550000
+#define RD_88F6192A_MPP24_31                   	0x0
+#define RD_88F6192A_MPP32_35                   	0x0
+#define RD_88F6192A_OE_LOW                      (BIT11 | BIT14 | BIT24 | BIT25 | BIT26 | BIT27 | BIT30 | BIT31)
+#define RD_88F6192A_OE_HIGH                     (BIT0 | BIT2)
+#define RD_88F6192A_OE_VAL_LOW                  0x18400
+#define RD_88F6192A_OE_VAL_HIGH                 0x8
+
+/* DB-88F6180A-BP */
+#if defined(MV_NAND)
+    #define DB_88F6180A_MPP0_7                   	0x21111111
+#else
+    #define DB_88F6180A_MPP0_7                   	0x01112222
+#endif
+#define DB_88F6180A_MPP8_15                   	0x11113311
+#define DB_88F6180A_MPP16_23                   	0x00001111
+#define DB_88F6180A_MPP24_31                   	0x0
+#define DB_88F6180A_MPP32_39                   	0x4444c000
+#define DB_88F6180A_MPP40_44                   	0x00044444
+#define DB_88F6180A_OE_LOW                       0x0
+#define DB_88F6180A_OE_HIGH                      0x0
+#define DB_88F6180A_OE_VAL_LOW                   0x0
+#define DB_88F6180A_OE_VAL_HIGH                  0x0
+
+/* RD-88F6281A_PCAC */
+#define RD_88F6281A_PCAC_MPP0_7                	0x21111111
+#define RD_88F6281A_PCAC_MPP8_15               	0x00003311
+#define RD_88F6281A_PCAC_MPP16_23              	0x00001100
+#define RD_88F6281A_PCAC_MPP24_31              	0x00000000
+#define RD_88F6281A_PCAC_MPP32_39              	0x00000000
+#define RD_88F6281A_PCAC_MPP40_47              	0x00000000
+#define RD_88F6281A_PCAC_MPP48_55              	0x00000000
+#define RD_88F6281A_PCAC_OE_LOW                 0x0
+#define RD_88F6281A_PCAC_OE_HIGH                0x0
+#define RD_88F6281A_PCAC_OE_VAL_LOW             0x0
+#define RD_88F6281A_PCAC_OE_VAL_HIGH            0x0
+
+/* SHEEVA PLUG */
+#define RD_SHEEVA_PLUG_MPP0_7                   0x01111111
+#define RD_SHEEVA_PLUG_MPP8_15                  0x11113322
+#define RD_SHEEVA_PLUG_MPP16_23                 0x00001111
+#define RD_SHEEVA_PLUG_MPP24_31                 0x00100000
+#define RD_SHEEVA_PLUG_MPP32_39                 0x00000000
+#define RD_SHEEVA_PLUG_MPP40_47                 0x00000000
+#define RD_SHEEVA_PLUG_MPP48_55                 0x00000000
+#define RD_SHEEVA_PLUG_OE_LOW                   0x0
+#define RD_SHEEVA_PLUG_OE_HIGH                  0x0
+#define RD_SHEEVA_PLUG_OE_VAL_LOW               (BIT29)
+#define RD_SHEEVA_PLUG_OE_VAL_HIGH              ((~(BIT17 | BIT16 | BIT15)) | BIT14)
+
+/* DB-CUSTOMER */
+#define DB_CUSTOMER_MPP0_7                	    0x21111111
+#define DB_CUSTOMER_MPP8_15               	    0x00003311
+#define DB_CUSTOMER_MPP16_23              	    0x00001100
+#define DB_CUSTOMER_MPP24_31              	    0x00000000
+#define DB_CUSTOMER_MPP32_39              	    0x00000000
+#define DB_CUSTOMER_MPP40_47              	    0x00000000
+#define DB_CUSTOMER_MPP48_55              	    0x00000000
+#define DB_CUSTOMER_OE_LOW                      0x0
+#define DB_CUSTOMER_OE_HIGH                     (~((BIT6) | (BIT7) | (BIT8) | (BIT9)))
+#define DB_CUSTOMER_OE_VAL_LOW                  0x0
+#define DB_CUSTOMER_OE_VAL_HIGH                 0x0
+
+#endif /* __INCmvBoardEnvSpech */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c b/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c
new file mode 100644
index 000000000000..9bb860743605
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.c
@@ -0,0 +1,224 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/* locals */
+
+/*******************************************************************************
+* mvCpuPclkGet - Get the CPU pClk (pipe clock)
+*
+* DESCRIPTION:
+*       This routine extract the CPU core clock.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit clock cycles in MHertz.
+*
+*******************************************************************************/
+/* 6180 have different clk reset sampling */
+
+static MV_U32 mvCpu6180PclkGet(MV_VOID)
+{
+	MV_U32 	tmpPClkRate=0;
+	MV_CPU_ARM_CLK cpu6180_ddr_l2_CLK[] = MV_CPU6180_DDR_L2_CLCK_TBL;
+
+	tmpPClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+	tmpPClkRate = tmpPClkRate & MSAR_CPUCLCK_MASK_6180;
+	tmpPClkRate = tmpPClkRate >> MSAR_CPUCLCK_OFFS_6180;
+
+	tmpPClkRate = cpu6180_ddr_l2_CLK[tmpPClkRate].cpuClk;
+
+	return tmpPClkRate;
+}
+
+
+MV_U32 mvCpuPclkGet(MV_VOID)
+{
+#if defined(PCLCK_AUTO_DETECT)
+	MV_U32 	tmpPClkRate=0;
+	MV_U32 cpuCLK[] = MV_CPU_CLCK_TBL;
+
+	if(mvCtrlModelGet() == MV_6180_DEV_ID)
+		return mvCpu6180PclkGet();
+
+	tmpPClkRate = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+	tmpPClkRate = MSAR_CPUCLCK_EXTRACT(tmpPClkRate);
+	tmpPClkRate = cpuCLK[tmpPClkRate];
+
+	return tmpPClkRate;
+#else
+	return MV_DEFAULT_PCLK
+#endif
+}
+
+#define MV_PROC_STR_SIZE 50
+
+static void mvCpuIfGetL2EccMode(MV_8 *buf)
+{
+    MV_U32 regVal = MV_REG_READ(CPU_L2_CONFIG_REG);
+    if (regVal & BIT2)
+	mvOsSPrintf(buf, "L2 ECC Enabled");
+    else
+	mvOsSPrintf(buf, "L2 ECC Disabled");
+}
+
+static void mvCpuIfGetL2Mode(MV_8 *buf)
+{
+    MV_U32 regVal = 0;
+    __asm volatile ("mrc	p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+    if (regVal & BIT22)
+	mvOsSPrintf(buf, "L2 Enabled");
+    else
+	mvOsSPrintf(buf, "L2 Disabled");
+}
+
+static void mvCpuIfGetL2PrefetchMode(MV_8 *buf)
+{
+    MV_U32 regVal = 0;
+    __asm volatile ("mrc	p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+    if (regVal & BIT24)
+	mvOsSPrintf(buf, "L2 Prefetch Disabled");
+    else
+	mvOsSPrintf(buf, "L2 Prefetch Enabled");
+}
+
+static void mvCpuIfGetWriteAllocMode(MV_8 *buf)
+{
+    MV_U32 regVal = 0;
+    __asm volatile ("mrc	p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+    if (regVal & BIT28)
+	mvOsSPrintf(buf, "Write Allocate Enabled");
+    else
+	mvOsSPrintf(buf, "Write Allocate Disabled");
+}
+
+static void mvCpuIfGetCpuStreamMode(MV_8 *buf)
+{
+    MV_U32 regVal = 0;
+    __asm volatile ("mrc	p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+    if (regVal & BIT29)
+	mvOsSPrintf(buf, "CPU Streaming Enabled");
+    else
+	mvOsSPrintf(buf, "CPU Streaming Disabled");
+}
+
+static void mvCpuIfPrintCpuRegs(void)
+{
+    MV_U32 regVal = 0;
+
+    __asm volatile ("mrc p15, 1, %0, c15, c1, 0" : "=r" (regVal)); /* Read Marvell extra features register */
+    mvOsPrintf("Extra Feature Reg = 0x%x\n",regVal);
+
+   __asm volatile ("mrc	p15, 0, %0, c1, c0, 0" : "=r" (regVal)); /* Read Control register */
+   mvOsPrintf("Control Reg = 0x%x\n",regVal);
+
+   __asm volatile ("mrc	p15, 0, %0, c0, c0, 0" : "=r" (regVal)); /* Read ID Code register */
+    mvOsPrintf("ID Code Reg = 0x%x\n",regVal);
+
+   __asm volatile ("mrc	p15, 0, %0, c0, c0, 1" : "=r" (regVal)); /* Read Cache Type register */
+   mvOsPrintf("Cache Type Reg = 0x%x\n",regVal);
+
+}
+
+MV_U32 mvCpuIfPrintSystemConfig(MV_8 *buffer, MV_U32 index)
+{
+  MV_U32 count = 0;
+
+  MV_8 L2_ECC_str[MV_PROC_STR_SIZE];
+  MV_8 L2_En_str[MV_PROC_STR_SIZE];
+  MV_8 L2_Prefetch_str[MV_PROC_STR_SIZE];
+  MV_8 Write_Alloc_str[MV_PROC_STR_SIZE];
+  MV_8 Cpu_Stream_str[MV_PROC_STR_SIZE];
+
+  mvCpuIfGetL2Mode(L2_En_str);
+  mvCpuIfGetL2EccMode(L2_ECC_str);
+  mvCpuIfGetL2PrefetchMode(L2_Prefetch_str);
+  mvCpuIfGetWriteAllocMode(Write_Alloc_str);
+  mvCpuIfGetCpuStreamMode(Cpu_Stream_str);
+  mvCpuIfPrintCpuRegs();
+
+  count += mvOsSPrintf(buffer + count + index, "%s\n", L2_En_str);
+  count += mvOsSPrintf(buffer + count + index, "%s\n", L2_ECC_str);
+  count += mvOsSPrintf(buffer + count + index, "%s\n", L2_Prefetch_str);
+  count += mvOsSPrintf(buffer + count + index, "%s\n", Write_Alloc_str);
+  count += mvOsSPrintf(buffer + count + index, "%s\n", Cpu_Stream_str);
+  return count;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h b/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h
new file mode 100644
index 000000000000..dd3a70ef0d56
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/cpu/mvCpu.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuh
+#define __INCmvCpuh
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/* defines */
+#define CPU_PART_MRVL131      0x131
+#define CPU_PART_ARM926       0x926
+#define CPU_PART_ARM946       0x946
+#define MV_CPU_ARM_CLK_ELM_SIZE	    12
+#define MV_CPU_ARM_CLK_RATIO_OFF    8
+#define MV_CPU_ARM_CLK_DDR_OFF	    4
+
+#ifndef MV_ASMLANGUAGE
+typedef struct _mvCpuArmClk
+{
+	MV_U32	      cpuClk;	  /* CPU clock in MHz */
+	MV_U32	      ddrClk;	  /* DDR clock in MHz */
+	MV_U32	      l2Clk;	  /* CPU DDR clock ratio */
+
+}MV_CPU_ARM_CLK;
+
+MV_U32    mvCpuPclkGet(MV_VOID);
+MV_VOID   mvCpuNameGet(char *pNameBuff);
+MV_U32  mvCpuL2ClkGet(MV_VOID);
+MV_U32 mvCpuIfPrintSystemConfig(MV_8 *buffer, MV_U32 index);
+MV_U32 whoAmI(MV_VOID);
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif /* __INCmvCpuh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c
new file mode 100644
index 000000000000..20bd3a35b63d
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.c
@@ -0,0 +1,293 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCtrlEnvAddrDec.h - Marvell controller address decode library
+*
+* DESCRIPTION:
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "ddr2/mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+
+#define MV_DEBUG
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/* Default Attributes array */
+MV_TARGET_ATTRIB	mvTargetDefaultsArray[] = TARGETS_DEF_ARRAY;
+extern MV_TARGET 	*sampleAtResetTargetArray;
+/* Dram\AHBToMbus\PEX share regsiter */
+
+#define CTRL_DEC_BASE_OFFS		16
+#define CTRL_DEC_BASE_MASK		(0xffff << CTRL_DEC_BASE_OFFS)
+#define CTRL_DEC_BASE_ALIGNMENT	0x10000
+
+#define CTRL_DEC_SIZE_OFFS		16
+#define CTRL_DEC_SIZE_MASK		(0xffff << CTRL_DEC_SIZE_OFFS)
+#define CTRL_DEC_SIZE_ALIGNMENT	0x10000
+
+#define CTRL_DEC_WIN_EN			BIT0
+
+
+
+/*******************************************************************************
+* mvCtrlAddrDecToReg - Get address decode register format values
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvCtrlAddrDecToReg(MV_ADDR_WIN *pAddrDecWin, MV_DEC_REGS *pAddrDecRegs)
+{
+
+	MV_U32 baseToReg=0 , sizeToReg=0;
+
+	/* BaseLow[31:16] => base register [31:16]		*/
+	baseToReg = pAddrDecWin->baseLow & CTRL_DEC_BASE_MASK;
+
+	/* Write to address decode Base Address Register                  */
+	pAddrDecRegs->baseReg &= ~CTRL_DEC_BASE_MASK;
+	pAddrDecRegs->baseReg |= baseToReg;
+
+	/* Get size register value according to window size						*/
+	sizeToReg = ctrlSizeToReg(pAddrDecWin->size, CTRL_DEC_SIZE_ALIGNMENT);
+
+	/* Size parameter validity check.                                   */
+	if (-1 == sizeToReg)
+	{
+		return MV_BAD_PARAM;
+	}
+
+	/* set size */
+	pAddrDecRegs->sizeReg &= ~CTRL_DEC_SIZE_MASK;
+	pAddrDecRegs->sizeReg |= (sizeToReg << CTRL_DEC_SIZE_OFFS);
+
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlRegToAddrDec - Extract address decode struct from registers.
+*
+* DESCRIPTION:
+*       This function extract address decode struct from address decode
+*       registers given as parameters.
+*
+* INPUT:
+*       pAddrDecRegs - Address decode register struct.
+*
+* OUTPUT:
+*       pAddrDecWin - Target window data structure.
+*
+* RETURN:
+*		MV_BAD_PARAM if address decode registers data is invalid.
+*
+*******************************************************************************/
+MV_STATUS mvCtrlRegToAddrDec(MV_DEC_REGS *pAddrDecRegs, MV_ADDR_WIN *pAddrDecWin)
+{
+	MV_U32 sizeRegVal;
+
+	sizeRegVal = (pAddrDecRegs->sizeReg & CTRL_DEC_SIZE_MASK) >>
+					CTRL_DEC_SIZE_OFFS;
+
+	pAddrDecWin->size = ctrlRegToSize(sizeRegVal, CTRL_DEC_SIZE_ALIGNMENT);
+
+
+	/* Extract base address						*/
+	/* Base register [31:16] ==> baseLow[31:16] 		*/
+	pAddrDecWin->baseLow = pAddrDecRegs->baseReg & CTRL_DEC_BASE_MASK;
+
+	pAddrDecWin->baseHigh =  0;
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlAttribGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_STATUS mvCtrlAttribGet(MV_TARGET target,
+						  MV_TARGET_ATTRIB *targetAttrib)
+{
+
+	targetAttrib->attrib = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].attrib;
+	targetAttrib->targetId = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].targetId;
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCtrlGetAttrib -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_TARGET mvCtrlTargetGet(MV_TARGET_ATTRIB *targetAttrib)
+{
+	MV_TARGET target;
+	MV_TARGET x;
+	for (target = SDRAM_CS0; target < MAX_TARGETS ; target ++)
+	{
+		x = MV_CHANGE_BOOT_CS(target);
+		if ((mvTargetDefaultsArray[x].attrib == targetAttrib->attrib) &&
+			(mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(target)].targetId == targetAttrib->targetId))
+		{
+			/* found it */
+			break;
+		}
+	}
+
+	return target;
+}
+
+MV_STATUS mvCtrlAddrDecToParams(MV_DEC_WIN *pAddrDecWin,
+                                MV_DEC_WIN_PARAMS *pWinParam)
+{
+	MV_U32 baseToReg=0, sizeToReg=0;
+
+	/* BaseLow[31:16] => base register [31:16]		*/
+	baseToReg = pAddrDecWin->addrWin.baseLow & CTRL_DEC_BASE_MASK;
+
+	/* Write to address decode Base Address Register                  */
+	pWinParam->baseAddr &= ~CTRL_DEC_BASE_MASK;
+	pWinParam->baseAddr |= baseToReg;
+
+	/* Get size register value according to window size						*/
+	sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, CTRL_DEC_SIZE_ALIGNMENT);
+
+	/* Size parameter validity check.                                   */
+	if (-1 == sizeToReg)
+	{
+        mvOsPrintf("mvCtrlAddrDecToParams: ERR. ctrlSizeToReg failed.\n");
+		return MV_BAD_PARAM;
+	}
+    pWinParam->size = sizeToReg;
+
+    pWinParam->attrib   = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(pAddrDecWin->target)].attrib;
+    pWinParam->targetId = mvTargetDefaultsArray[MV_CHANGE_BOOT_CS(pAddrDecWin->target)].targetId;
+
+    return MV_OK;
+}
+
+MV_STATUS mvCtrlParamsToAddrDec(MV_DEC_WIN_PARAMS *pWinParam,
+                                MV_DEC_WIN *pAddrDecWin)
+{
+    MV_TARGET_ATTRIB    targetAttrib;
+
+    pAddrDecWin->addrWin.baseLow = pWinParam->baseAddr;
+
+	/* Upper 32bit address base is supported under PCI High Address remap */
+	pAddrDecWin->addrWin.baseHigh = 0;
+
+	/* Prepare sizeReg to ctrlRegToSize function */
+    pAddrDecWin->addrWin.size = ctrlRegToSize(pWinParam->size, CTRL_DEC_SIZE_ALIGNMENT);
+
+	if (-1 == pAddrDecWin->addrWin.size)
+	{
+		DB(mvOsPrintf("mvCtrlParamsToAddrDec: ERR. ctrlRegToSize failed.\n"));
+		return MV_BAD_PARAM;
+	}
+    targetAttrib.targetId = pWinParam->targetId;
+    targetAttrib.attrib = pWinParam->attrib;
+
+    pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+    return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h
new file mode 100644
index 000000000000..3e2a1a321861
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAddrDec.h
@@ -0,0 +1,203 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvAddrDech
+#define __INCmvCtrlEnvAddrDech
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+
+
+/* defines  */
+/* DUnit attributes */
+#define ATMWCR_WIN_DUNIT_CS0_OFFS			0
+#define ATMWCR_WIN_DUNIT_CS0_MASK			BIT0
+#define ATMWCR_WIN_DUNIT_CS0_REQ			(0 << ATMWCR_WIN_DUNIT_CS0_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS1_OFFS			1
+#define ATMWCR_WIN_DUNIT_CS1_MASK			BIT1
+#define ATMWCR_WIN_DUNIT_CS1_REQ 			(0 << ATMWCR_WIN_DUNIT_CS1_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS2_OFFS			2
+#define ATMWCR_WIN_DUNIT_CS2_MASK			BIT2
+#define ATMWCR_WIN_DUNIT_CS2_REQ 			(0 << ATMWCR_WIN_DUNIT_CS2_OFFS)
+
+#define ATMWCR_WIN_DUNIT_CS3_OFFS			3
+#define ATMWCR_WIN_DUNIT_CS3_MASK			BIT3
+#define ATMWCR_WIN_DUNIT_CS3_REQ 			(0 << ATMWCR_WIN_DUNIT_CS3_OFFS)
+
+/* RUnit (Device)  attributes */
+#define ATMWCR_WIN_RUNIT_DEVCS0_OFFS		0
+#define ATMWCR_WIN_RUNIT_DEVCS0_MASK		BIT0
+#define ATMWCR_WIN_RUNIT_DEVCS0_REQ			(0 << ATMWCR_WIN_RUNIT_DEVCS0_OFFS)
+
+#define ATMWCR_WIN_RUNIT_DEVCS1_OFFS		1
+#define ATMWCR_WIN_RUNIT_DEVCS1_MASK		BIT1
+#define ATMWCR_WIN_RUNIT_DEVCS1_REQ 		(0 << ATMWCR_WIN_RUNIT_DEVCS1_OFFS)
+
+#define ATMWCR_WIN_RUNIT_DEVCS2_OFFS		2
+#define ATMWCR_WIN_RUNIT_DEVCS2_MASK		BIT2
+#define ATMWCR_WIN_RUNIT_DEVCS2_REQ 		(0 << ATMWCR_WIN_RUNIT_DEVCS2_OFFS)
+
+#define ATMWCR_WIN_RUNIT_BOOTCS_OFFS		4
+#define ATMWCR_WIN_RUNIT_BOOTCS_MASK		BIT4
+#define ATMWCR_WIN_RUNIT_BOOTCS_REQ 		(0 << ATMWCR_WIN_RUNIT_BOOTCS_OFFS)
+
+/* LMaster (PCI)  attributes */
+#define ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS		0
+#define ATMWCR_WIN_LUNIT_BYTE_SWP_MASK		BIT0
+#define ATMWCR_WIN_LUNIT_BYTE_SWP			(0 << ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS)
+#define ATMWCR_WIN_LUNIT_BYTE_NO_SWP		(1 << ATMWCR_WIN_LUNIT_BYTE_SWP_OFFS)
+
+
+#define ATMWCR_WIN_LUNIT_WORD_SWP_OFFS		1
+#define ATMWCR_WIN_LUNIT_WORD_SWP_MASK		BIT1
+#define ATMWCR_WIN_LUNIT_WORD_SWP			(0 << ATMWCR_WIN_LUNIT_WORD_SWP_OFFS)
+#define ATMWCR_WIN_LUNIT_WORD_NO_SWP		(1 << ATMWCR_WIN_LUNIT_WORD_SWP_OFFS)
+
+#define ATMWCR_WIN_LUNIT_NO_SNOOP			BIT2
+
+#define ATMWCR_WIN_LUNIT_TYPE_OFFS			3
+#define ATMWCR_WIN_LUNIT_TYPE_MASK			BIT3
+#define ATMWCR_WIN_LUNIT_TYPE_IO			(0 << ATMWCR_WIN_LUNIT_TYPE_OFFS)
+#define ATMWCR_WIN_LUNIT_TYPE_MEM			(1 << ATMWCR_WIN_LUNIT_TYPE_OFFS)
+
+#define ATMWCR_WIN_LUNIT_FORCE64_OFFS		4
+#define ATMWCR_WIN_LUNIT_FORCE64_MASK		BIT4
+#define ATMWCR_WIN_LUNIT_FORCE64			(0 << ATMWCR_WIN_LUNIT_FORCE64_OFFS)
+
+#define ATMWCR_WIN_LUNIT_ORDERING_OFFS		6
+#define ATMWCR_WIN_LUNIT_ORDERING_MASK		BIT6
+#define ATMWCR_WIN_LUNIT_ORDERING			(1 << ATMWCR_WIN_LUNIT_FORCE64_OFFS)
+
+/* PEX Attributes */
+#define ATMWCR_WIN_PEX_TYPE_OFFS			3
+#define ATMWCR_WIN_PEX_TYPE_MASK			BIT3
+#define ATMWCR_WIN_PEX_TYPE_IO				(0 << ATMWCR_WIN_PEX_TYPE_OFFS)
+#define ATMWCR_WIN_PEX_TYPE_MEM				(1 << ATMWCR_WIN_PEX_TYPE_OFFS)
+
+/* typedefs */
+
+/* Unsupported attributes for address decode:                               */
+/* 2) PCI0/1_REQ64n control                                                 */
+
+typedef struct _mvDecRegs
+{
+	MV_U32 baseReg;
+    MV_U32 baseRegHigh;
+    MV_U32 sizeReg;
+
+}MV_DEC_REGS;
+
+typedef struct _mvTargetAttrib
+{
+	MV_U8			attrib;			/* chip select attributes */
+	MV_TARGET_ID 		targetId; 		/* Target Id of this MV_TARGET */
+
+}MV_TARGET_ATTRIB;
+
+
+/* This structure describes address decode window                           */
+typedef struct _mvDecWin
+{
+    MV_TARGET       target;         /* Target for addr decode window        */
+    MV_ADDR_WIN     addrWin;        /* Address window of target             */
+    MV_BOOL     	enable;         /* Window enable/disable                */
+}MV_DEC_WIN;
+
+typedef struct _mvDecWinParams
+{
+    MV_TARGET_ID    targetId;   /* Target ID field */
+    MV_U8           attrib;     /* Attribute field */
+    MV_U32          baseAddr;   /* Base address in register format */
+    MV_U32          size;       /* Size in register format */
+}MV_DEC_WIN_PARAMS;
+
+
+/* mvCtrlEnvAddrDec API list */
+
+MV_STATUS mvCtrlAddrDecToReg(MV_ADDR_WIN *pAddrDecWin,
+							 MV_DEC_REGS *pAddrDecRegs);
+
+MV_STATUS mvCtrlRegToAddrDec(MV_DEC_REGS *pAddrDecRegs,
+							 MV_ADDR_WIN *pAddrDecWin);
+
+MV_STATUS mvCtrlAttribGet(MV_TARGET target,
+						  MV_TARGET_ATTRIB *targetAttrib);
+
+MV_TARGET mvCtrlTargetGet(MV_TARGET_ATTRIB *targetAttrib);
+
+
+MV_STATUS mvCtrlAddrDecToParams(MV_DEC_WIN *pAddrDecWin,
+                                MV_DEC_WIN_PARAMS *pWinParam);
+
+MV_STATUS mvCtrlParamsToAddrDec(MV_DEC_WIN_PARAMS *pWinParam,
+                                MV_DEC_WIN *pAddrDecWin);
+
+
+
+
+#endif /* __INCmvCtrlEnvAddrDech */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h
new file mode 100644
index 000000000000..3576f2529b3f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvAsm.h
@@ -0,0 +1,98 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvAsmh
+#define __INCmvCtrlEnvAsmh
+#include "pex/mvPexRegs.h"
+
+#define CHIP_BOND_REG					0x10034
+#define PCKG_OPT_MASK_AS 		#3
+#define PXCCARI_REVID_MASK_AS           #PXCCARI_REVID_MASK
+
+/* Read device ID into toReg bits 15:0 from 0xd0000000 */
+/* defines  */
+#define MV_DV_CTRL_MODEL_GET_ASM(toReg, tmpReg) \
+        MV_DV_REG_READ_ASM(toReg, tmpReg, CHIP_BOND_REG);\
+        and     toReg, toReg, PCKG_OPT_MASK_AS                 /* Mask for package ID */
+
+/* Read device ID into toReg bits 15:0 from 0xf1000000*/
+#define MV_CTRL_MODEL_GET_ASM(toReg, tmpReg) \
+        MV_REG_READ_ASM(toReg, tmpReg, CHIP_BOND_REG);\
+        and     toReg, toReg, PCKG_OPT_MASK_AS                  /* Mask for package ID */
+
+/* Read Revision into toReg bits 7:0 0xd0000000*/
+#define MV_DV_CTRL_REV_GET_ASM(toReg, tmpReg)	\
+        /* Read device revision */			\
+        MV_DV_REG_READ_ASM(toReg, tmpReg, PEX_CFG_DIRECT_ACCESS(0,PEX_CLASS_CODE_AND_REVISION_ID));\
+        and     toReg, toReg, PXCCARI_REVID_MASK_AS                  /* Mask for calss ID */
+
+/* Read Revision into toReg bits 7:0 0xf1000000*/
+#define MV_CTRL_REV_GET_ASM(toReg, tmpReg)	\
+        /* Read device revision */			\
+        MV_REG_READ_ASM(toReg, tmpReg, PEX_CFG_DIRECT_ACCESS(0,PEX_CLASS_CODE_AND_REVISION_ID));\
+        and     toReg, toReg, PXCCARI_REVID_MASK_AS                  /* Mask for calss ID */
+
+
+#endif /* __INCmvCtrlEnvAsmh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c
new file mode 100644
index 000000000000..6c9da6c76549
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.c
@@ -0,0 +1,1823 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "mvCommon.h"
+#include "mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#include "ctrlEnv/sys/mvSysPex.h"
+#endif
+
+#if defined(MV_INCLUDE_GIG_ETH)
+#include "ctrlEnv/sys/mvSysGbe.h"
+#endif
+
+#if defined(MV_INCLUDE_XOR)
+#include "ctrlEnv/sys/mvSysXor.h"
+#endif
+
+#if defined(MV_INCLUDE_SATA)
+#include "ctrlEnv/sys/mvSysSata.h"
+#endif
+
+#if defined(MV_INCLUDE_USB)
+#include "ctrlEnv/sys/mvSysUsb.h"
+#endif
+
+#if defined(MV_INCLUDE_AUDIO)
+#include "ctrlEnv/sys/mvSysAudio.h"
+#endif
+
+#if defined(MV_INCLUDE_CESA)
+#include "ctrlEnv/sys/mvSysCesa.h"
+#endif
+
+#if defined(MV_INCLUDE_TS)
+#include "ctrlEnv/sys/mvSysTs.h"
+#endif
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/*******************************************************************************
+* mvCtrlEnvInit - Initialize Marvell controller environment.
+*
+* DESCRIPTION:
+*       This function get environment information and initialize controller
+*       internal/external environment. For example
+*       1) MPP settings according to board MPP macros.
+*		NOTE: It is the user responsibility to shut down all DMA channels
+*		in device and disable controller sub units interrupts during
+*		boot process.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvCtrlEnvInit(MV_VOID)
+{
+	MV_U32 mppGroup;
+	MV_U32 devId;
+	MV_U32 boardId;
+	MV_U32 i;
+	MV_U32 maxMppGrp = 1;
+	MV_U32 mppVal = 0;
+	MV_U32 bootVal = 0;
+	MV_U32 mppGroupType = 0;
+	MV_U32 mppGroup1[][3] = MPP_GROUP_1_TYPE;
+	MV_U32 mppGroup2[][3] = MPP_GROUP_2_TYPE;
+
+	devId = mvCtrlModelGet();
+	boardId= mvBoardIdGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			maxMppGrp = MV_6281_MPP_MAX_GROUP;
+			break;
+		case MV_6192_DEV_ID:
+			maxMppGrp = MV_6192_MPP_MAX_GROUP;
+			break;
+        case MV_6190_DEV_ID:
+            maxMppGrp = MV_6190_MPP_MAX_GROUP;
+            break;
+		case MV_6180_DEV_ID:
+			maxMppGrp = MV_6180_MPP_MAX_GROUP;
+			break;
+	}
+
+	/* MPP Init */
+	/* We split mpp init to 3 phases:
+	 * 1. We init mpp[19:0] from the board info. mpp[23:20] will be over write
+	 * in phase 2.
+	 * 2. We detect the mpp group type and according the mpp values [35:20].
+	 * 3. We detect the mpp group type and according the mpp values [49:36].
+	 */
+	/* Mpp phase 1 mpp[19:0] */
+	/* Read MPP group from board level and assign to MPP register */
+	for (mppGroup = 0; mppGroup < 3; mppGroup++)
+	{
+		mppVal = mvBoardMppGet(mppGroup);
+		if (mppGroup == 0)
+		{
+		    bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+		    if (mvCtrlIsBootFromSPI())
+		    {
+			mppVal &= ~0xffff;
+			bootVal &= 0xffff;
+			mppVal |= bootVal;
+		    }
+		    else if (mvCtrlIsBootFromSPIUseNAND())
+		    {
+			mppVal &= ~0xf0000000;
+			bootVal &= 0xf0000000;
+			mppVal |= bootVal;
+		    }
+		    else if (mvCtrlIsBootFromNAND())
+		    {
+			mppVal &= ~0xffffff;
+			bootVal &= 0xffffff;
+			mppVal |= bootVal;
+		    }
+		}
+
+		if (mppGroup == 2)
+		{
+		    bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+		    if (mvCtrlIsBootFromNAND())
+		    {
+			mppVal &= ~0xff00;
+			bootVal &= 0xff00;
+			mppVal |= bootVal;
+		    }
+		}
+
+		MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+	}
+
+	/* Identify MPPs group */
+	mvBoardMppGroupIdUpdate();
+
+	/* Update MPPs mux relevent only on Marvell DB */
+	if ((boardId == DB_88F6281A_BP_ID) ||
+		(boardId == DB_88F6180A_BP_ID))
+		mvBoardMppMuxSet();
+
+	mppGroupType = mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_1);
+
+	/* Mpp phase 2 */
+	/* Read MPP group from board level and assign to MPP register */
+    if (devId != MV_6180_DEV_ID)
+    {
+        i = 0;
+	for (mppGroup = 2; mppGroup < 5; mppGroup++)
+	{
+		if ((mppGroupType == MV_BOARD_OTHER) ||
+			(boardId == RD_88F6281A_ID) ||
+			(boardId == RD_88F6192A_ID) ||
+                (boardId == RD_88F6190A_ID) ||
+                (boardId == RD_88F6281A_PCAC_ID) ||
+                (boardId == SHEEVA_PLUG_ID))
+			mppVal = mvBoardMppGet(mppGroup);
+		else
+		{
+			mppVal = mppGroup1[mppGroupType][i];
+			i++;
+		}
+
+		/* Group 2 is shared mpp[23:16] */
+		if (mppGroup == 2)
+		{
+                bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+			mppVal &= ~0xffff;
+			bootVal &= 0xffff;
+			mppVal |= bootVal;
+		}
+
+		MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+	}
+    }
+
+	if ((devId == MV_6192_DEV_ID) || (devId == MV_6190_DEV_ID))
+		return MV_OK;
+
+	/* Mpp phase 3 */
+	mppGroupType = mvBoardMppGroupTypeGet(MV_BOARD_MPP_GROUP_2);
+	/* Read MPP group from board level and assign to MPP register */
+	i = 0;
+	for (mppGroup = 4; mppGroup < 7; mppGroup++)
+	{
+		if ((mppGroupType == MV_BOARD_OTHER) ||
+			(boardId == RD_88F6281A_ID) ||
+            (boardId == RD_88F6281A_PCAC_ID) ||
+            (boardId == SHEEVA_PLUG_ID))
+			mppVal = mvBoardMppGet(mppGroup);
+		else
+		{
+			mppVal = mppGroup2[mppGroupType][i];
+			i++;
+		}
+
+		/* Group 4 is shared mpp[35:32] */
+		if (mppGroup == 4)
+		{
+            bootVal = MV_REG_READ(mvCtrlMppRegGet(mppGroup));
+			mppVal &= ~0xffff;
+			bootVal &= 0xffff;
+			mppVal |= bootVal;
+		}
+
+		MV_REG_WRITE(mvCtrlMppRegGet(mppGroup), mppVal);
+	}
+    /* Update SSCG configuration register*/
+    if(mvBoardIdGet() == DB_88F6281A_BP_ID || mvBoardIdGet() == DB_88F6192A_BP_ID ||
+       mvBoardIdGet() == DB_88F6190A_BP_ID || mvBoardIdGet() == DB_88F6180A_BP_ID)
+        MV_REG_WRITE(0x100d8, 0x53);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCtrlMppRegGet - return reg address of mpp group
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       mppGroup - MPP group.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_U32 - Register address.
+*
+*******************************************************************************/
+MV_U32 mvCtrlMppRegGet(MV_U32 mppGroup)
+{
+        MV_U32 ret;
+
+        switch(mppGroup){
+                case (0):       ret = MPP_CONTROL_REG0;
+                                break;
+                case (1):       ret = MPP_CONTROL_REG1;
+                                break;
+                case (2):       ret = MPP_CONTROL_REG2;
+                                break;
+                case (3):       ret = MPP_CONTROL_REG3;
+                                break;
+                case (4):       ret = MPP_CONTROL_REG4;
+                                break;
+                case (5):       ret = MPP_CONTROL_REG5;
+                                break;
+                case (6):       ret = MPP_CONTROL_REG6;
+                                break;
+                default:        ret = MPP_CONTROL_REG0;
+                                break;
+        }
+        return ret;
+}
+#if defined(MV_INCLUDE_PEX)
+/*******************************************************************************
+* mvCtrlPexMaxIfGet - Get Marvell controller number of PEX interfaces.
+*
+* DESCRIPTION:
+*       This function returns Marvell controller number of PEX interfaces.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Marvell controller number of PEX interfaces. If controller
+*		ID is undefined the function returns '0'.
+*
+*******************************************************************************/
+MV_U32 mvCtrlPexMaxIfGet(MV_VOID)
+{
+
+	return MV_PEX_MAX_IF;
+}
+#endif
+
+#if defined(MV_INCLUDE_GIG_ETH)
+/*******************************************************************************
+* mvCtrlEthMaxPortGet - Get Marvell controller number of etherent ports.
+*
+* DESCRIPTION:
+*       This function returns Marvell controller number of etherent port.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Marvell controller number of etherent port.
+*
+*******************************************************************************/
+MV_U32 mvCtrlEthMaxPortGet(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_ETH_MAX_PORTS;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_ETH_MAX_PORTS;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_ETH_MAX_PORTS;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_ETH_MAX_PORTS;
+			break;
+	}
+	return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_XOR)
+/*******************************************************************************
+* mvCtrlXorMaxChanGet - Get Marvell controller number of XOR channels.
+*
+* DESCRIPTION:
+*       This function returns Marvell controller number of XOR channels.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Marvell controller number of XOR channels.
+*
+*******************************************************************************/
+MV_U32 mvCtrlXorMaxChanGet(MV_VOID)
+{
+	return MV_XOR_MAX_CHAN;
+}
+#endif
+
+#if defined(MV_INCLUDE_USB)
+/*******************************************************************************
+* mvCtrlUsbHostMaxGet - Get number of Marvell Usb  controllers
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       returns number of Marvell USB  controllers.
+*
+*******************************************************************************/
+MV_U32 mvCtrlUsbMaxGet(void)
+{
+	return MV_USB_MAX_PORTS;
+}
+#endif
+
+
+#if defined(MV_INCLUDE_NAND)
+/*******************************************************************************
+* mvCtrlNandSupport - Return if this controller has integrated NAND flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if NAND is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32	  mvCtrlNandSupport(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_NAND;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_NAND;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_NAND;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_NAND;
+			break;
+	}
+	return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_SDIO)
+/*******************************************************************************
+* mvCtrlSdioSupport - Return if this controller has integrated SDIO flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if SDIO is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32	  mvCtrlSdioSupport(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_SDIO;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_SDIO;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_SDIO;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_SDIO;
+			break;
+	}
+	return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_TS)
+/*******************************************************************************
+* mvCtrlTsSupport - Return if this controller has integrated TS flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if TS is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32	  mvCtrlTsSupport(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_TS;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_TS;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_TS;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_TS;
+			break;
+	}
+	return 0;
+}
+#endif
+
+#if defined(MV_INCLUDE_AUDIO)
+/*******************************************************************************
+* mvCtrlAudioSupport - Return if this controller has integrated AUDIO flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if AUDIO is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32	  mvCtrlAudioSupport(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_AUDIO;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_AUDIO;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_AUDIO;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_AUDIO;
+			break;
+	}
+	return 0;
+
+}
+#endif
+
+#if defined(MV_INCLUDE_TDM)
+/*******************************************************************************
+* mvCtrlTdmSupport - Return if this controller has integrated TDM flash support
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if TDM is supported and MV_FALSE otherwise
+*
+*******************************************************************************/
+MV_U32	  mvCtrlTdmSupport(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = mvCtrlModelGet();
+
+	switch(devId){
+		case MV_6281_DEV_ID:
+			return MV_6281_TDM;
+			break;
+		case MV_6192_DEV_ID:
+			return MV_6192_TDM;
+			break;
+        case MV_6190_DEV_ID:
+            return MV_6190_TDM;
+            break;
+		case MV_6180_DEV_ID:
+			return MV_6180_TDM;
+			break;
+	}
+	return 0;
+
+}
+#endif
+
+/*******************************************************************************
+* mvCtrlModelGet - Get Marvell controller device model (Id)
+*
+* DESCRIPTION:
+*       This function returns 16bit describing the device model (ID) as defined
+*       in PCI Device and Vendor ID configuration register offset 0x0.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       16bit desscribing Marvell controller ID
+*
+*******************************************************************************/
+MV_U16 mvCtrlModelGet(MV_VOID)
+{
+	MV_U32 devId;
+
+	devId = MV_REG_READ(CHIP_BOND_REG);
+	devId &= PCKG_OPT_MASK;
+
+	switch(devId){
+		case 2:
+			return	MV_6281_DEV_ID;
+			break;
+    case 1:
+            if (((MV_REG_READ(PEX_CFG_DIRECT_ACCESS(0,PEX_DEVICE_AND_VENDOR_ID))& 0xffff0000) >> 16)
+                 == MV_6190_DEV_ID)
+                return	MV_6190_DEV_ID;
+            else
+                return	MV_6192_DEV_ID;
+			break;
+		case 0:
+			return	MV_6180_DEV_ID;
+			break;
+	}
+
+	return 0;
+}
+/*******************************************************************************
+* mvCtrlRevGet - Get Marvell controller device revision number
+*
+* DESCRIPTION:
+*       This function returns 8bit describing the device revision as defined
+*       in PCI Express Class Code and Revision ID Register.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       8bit desscribing Marvell controller revision number
+*
+*******************************************************************************/
+MV_U8 mvCtrlRevGet(MV_VOID)
+{
+	MV_U8 revNum;
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+	/* Check pex power state */
+	MV_U32 pexPower;
+	pexPower = mvCtrlPwrClckGet(PEX_UNIT_ID,0);
+	if (pexPower == MV_FALSE)
+		mvCtrlPwrClckSet(PEX_UNIT_ID, 0, MV_TRUE);
+#endif
+	revNum = (MV_U8)MV_REG_READ(PEX_CFG_DIRECT_ACCESS(0,PCI_CLASS_CODE_AND_REVISION_ID));
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+	/* Return to power off state */
+	if (pexPower == MV_FALSE)
+		mvCtrlPwrClckSet(PEX_UNIT_ID, 0, MV_FALSE);
+#endif
+	return ((revNum & PCCRIR_REVID_MASK) >> PCCRIR_REVID_OFFS);
+}
+
+/*******************************************************************************
+* mvCtrlNameGet - Get Marvell controller name
+*
+* DESCRIPTION:
+*       This function returns a string describing the device model and revision.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       pNameBuff - Buffer to contain device name string. Minimum size 30 chars.
+*
+* RETURN:
+*
+*       MV_ERROR if informantion can not be read.
+*******************************************************************************/
+MV_STATUS mvCtrlNameGet(char *pNameBuff)
+{
+	mvOsSPrintf (pNameBuff, "%s%x Rev %d", SOC_NAME_PREFIX,
+				mvCtrlModelGet(), mvCtrlRevGet());
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCtrlModelRevGet - Get Controller Model (Device ID) and Revision
+*
+* DESCRIPTION:
+*       This function returns 32bit value describing both Device ID and Revision
+*       as defined in PCI Express Device and Vendor ID Register and device revision
+*	    as defined in PCI Express Class Code and Revision ID Register.
+
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit describing both controller device ID and revision number
+*
+*******************************************************************************/
+MV_U32	mvCtrlModelRevGet(MV_VOID)
+{
+	return ((mvCtrlModelGet() << 16) | mvCtrlRevGet());
+}
+
+/*******************************************************************************
+* mvCtrlModelRevNameGet - Get Marvell controller name
+*
+* DESCRIPTION:
+*       This function returns a string describing the device model and revision.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       pNameBuff - Buffer to contain device name string. Minimum size 30 chars.
+*
+* RETURN:
+*
+*       MV_ERROR if informantion can not be read.
+*******************************************************************************/
+
+MV_STATUS mvCtrlModelRevNameGet(char *pNameBuff)
+{
+
+        switch (mvCtrlModelRevGet())
+        {
+        case MV_6281_A0_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6281_A0_NAME);
+                break;
+        case MV_6192_A0_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6192_A0_NAME);
+                break;
+        case MV_6180_A0_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6180_A0_NAME);
+                break;
+        case MV_6190_A0_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6190_A0_NAME);
+                break;
+        case MV_6281_A1_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6281_A1_NAME);
+                break;
+        case MV_6192_A1_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6192_A1_NAME);
+                break;
+        case MV_6180_A1_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6180_A1_NAME);
+                break;
+        case MV_6190_A1_ID:
+                mvOsSPrintf (pNameBuff, "%s",MV_6190_A1_NAME);
+                break;
+        default:
+                mvCtrlNameGet(pNameBuff);
+                break;
+        }
+
+        return MV_OK;
+}
+
+
+/*******************************************************************************
+* ctrlWinOverlapTest - Test address windows for overlaping.
+*
+* DESCRIPTION:
+*       This function checks the given two address windows for overlaping.
+*
+* INPUT:
+*       pAddrWin1 - Address window 1.
+*       pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*       MV_TRUE if address window overlaps, MV_FALSE otherwise.
+*******************************************************************************/
+MV_STATUS ctrlWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+    MV_U32 winBase1, winBase2;
+    MV_U32 winTop1, winTop2;
+
+	/* check if we have overflow than 4G*/
+	if (((0xffffffff - pAddrWin1->baseLow) < pAddrWin1->size-1)||
+	   ((0xffffffff - pAddrWin2->baseLow) < pAddrWin2->size-1))
+	{
+		return MV_TRUE;
+	}
+
+    winBase1 = pAddrWin1->baseLow;
+    winBase2 = pAddrWin2->baseLow;
+    winTop1  = winBase1 + pAddrWin1->size-1;
+    winTop2  = winBase2 + pAddrWin2->size-1;
+
+
+    if (((winBase1 <= winTop2 ) && ( winTop2 <= winTop1)) ||
+        ((winBase1 <= winBase2) && (winBase2 <= winTop1)))
+    {
+        return MV_TRUE;
+    }
+    else
+    {
+        return MV_FALSE;
+    }
+}
+
+/*******************************************************************************
+* ctrlWinWithinWinTest - Test address windows for overlaping.
+*
+* DESCRIPTION:
+*       This function checks the given win1 boundries is within
+*		win2 boundries.
+*
+* INPUT:
+*       pAddrWin1 - Address window 1.
+*       pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*       MV_TRUE if found win1 inside win2, MV_FALSE otherwise.
+*******************************************************************************/
+MV_STATUS ctrlWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+    MV_U32 winBase1, winBase2;
+    MV_U32 winTop1, winTop2;
+
+    winBase1 = pAddrWin1->baseLow;
+    winBase2 = pAddrWin2->baseLow;
+    winTop1  = winBase1 + pAddrWin1->size -1;
+    winTop2  = winBase2 + pAddrWin2->size -1;
+
+    if (((winBase1 >= winBase2 ) && ( winBase1 <= winTop2)) ||
+        ((winTop1  >= winBase2) && (winTop1 <= winTop2)))
+    {
+        return MV_TRUE;
+    }
+    else
+    {
+        return MV_FALSE;
+    }
+}
+
+static const char* cntrlName[] = TARGETS_NAME_ARRAY;
+
+/*******************************************************************************
+* mvCtrlTargetNameGet - Get Marvell controller target name
+*
+* DESCRIPTION:
+*       This function convert the trget enumeration to string.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Target name (const MV_8 *)
+*******************************************************************************/
+const MV_8* mvCtrlTargetNameGet( MV_TARGET target )
+{
+
+	if (target >= MAX_TARGETS)
+	{
+		return "target unknown";
+	}
+
+	return cntrlName[target];
+}
+
+/*******************************************************************************
+* mvCtrlAddrDecShow - Print the Controller units address decode map.
+*
+* DESCRIPTION:
+*		This function the Controller units address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvCtrlAddrDecShow(MV_VOID)
+{
+    mvCpuIfAddDecShow();
+    mvAhbToMbusAddDecShow();
+#if defined(MV_INCLUDE_PEX)
+	mvPexAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_USB)
+	mvUsbAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+	mvEthAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_XOR)
+	mvXorAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_SATA)
+    mvSataAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+    mvAudioAddrDecShow();
+#endif
+#if defined(MV_INCLUDE_TS)
+    mvTsuAddrDecShow();
+#endif
+}
+
+/*******************************************************************************
+* ctrlSizeToReg - Extract size value for register assignment.
+*
+* DESCRIPTION:
+*       Address decode size parameter must be programed from LSB to MSB as
+*       sequence of 1's followed by sequence of 0's. The number of 1's
+*       specifies the size of the window in 64 KB granularity (e.g. a
+*       value of 0x00ff specifies 256x64k = 16 MB).
+*       This function extract the size value from the size parameter according
+*		to given aligment paramter. For example for size 0x1000000 (16MB) and
+*		aligment 0x10000 (64KB) the function will return 0x00FF.
+*
+* INPUT:
+*       size - Size.
+*		alignment - Size alignment.	Note that alignment must be power of 2!
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit describing size register value correspond to size parameter.
+*		If value is '-1' size parameter or aligment are invalid.
+*******************************************************************************/
+MV_U32	ctrlSizeToReg(MV_U32 size, MV_U32 alignment)
+{
+	MV_U32 retVal;
+
+	/* Check size parameter alignment		*/
+	if ((0 == size) || (MV_IS_NOT_ALIGN(size, alignment)))
+	{
+		DB(mvOsPrintf("ctrlSizeToReg: ERR. Size is zero or not aligned.\n"));
+		return -1;
+	}
+
+	/* Take out the "alignment" portion out of the size parameter */
+	alignment--;	/* Now the alignmet is a sequance of '1' (e.g. 0xffff) 		*/
+					/* and size is 0x1000000 (16MB) for example	*/
+	while(alignment & 1)	/* Check that alignmet LSB is set	*/
+	{
+		size = (size >> 1); /* If LSB is set, move 'size' one bit to right	*/
+		alignment = (alignment >> 1);
+	}
+
+	/* If after the alignment first '0' was met we still have '1' in 		*/
+	/* it then aligment is invalid (not power of 2) 				*/
+	if (alignment)
+	{
+		DB(mvOsPrintf("ctrlSizeToReg: ERR. Alignment parameter 0x%x invalid.\n",
+			(MV_U32)alignment));
+		return -1;
+	}
+
+	/* Now the size is shifted right according to aligment: 0x0100			*/
+	size--;         /* Now the size is a sequance of '1': 0x00ff 			*/
+
+	retVal = size ;
+
+	/* Check that LSB to MSB is sequence of 1's followed by sequence of 0's		*/
+	while(size & 1)	/* Check that LSB is set	*/
+	{
+		size = (size >> 1); /* If LSB is set, move one bit to the right		*/
+	}
+
+    if (size) /* Sequance of 1's is over. Check that we have no other 1's		*/
+	{
+		DB(mvOsPrintf("ctrlSizeToReg: ERR. Size parameter 0x%x invalid.\n",
+                                                                        size));
+		return -1;
+	}
+
+    return retVal;
+
+}
+
+/*******************************************************************************
+* ctrlRegToSize - Extract size value from register value.
+*
+* DESCRIPTION:
+*       This function extract a size value from the register size parameter
+*		according to given aligment paramter. For example for register size
+*		value 0xff and aligment 0x10000 the function will return 0x01000000.
+*
+* INPUT:
+*       regSize   - Size as in register format.	See ctrlSizeToReg.
+*		alignment - Size alignment.	Note that alignment must be power of 2!
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit describing size.
+*		If value is '-1' size parameter or aligment are invalid.
+*******************************************************************************/
+MV_U32	ctrlRegToSize(MV_U32 regSize, MV_U32 alignment)
+{
+	MV_U32 temp;
+
+	/* Check that LSB to MSB is sequence of 1's followed by sequence of 0's		*/
+	temp = regSize;		/* Now the size is a sequance of '1': 0x00ff		*/
+
+	while(temp & 1)	/* Check that LSB is set					*/
+	{
+		temp = (temp >> 1); /* If LSB is set, move one bit to the right		*/
+	}
+
+    if (temp) /* Sequance of 1's is over. Check that we have no other 1's		*/
+	{
+		DB(mvOsPrintf("ctrlRegToSize: ERR. Size parameter 0x%x invalid.\n",
+					regSize));
+		return -1;
+	}
+
+
+	/* Check that aligment is a power of two					*/
+	temp = alignment - 1;/* Now the alignmet is a sequance of '1' (0xffff) 		*/
+
+	while(temp & 1)	/* Check that alignmet LSB is set				*/
+	{
+		temp = (temp >> 1); /* If LSB is set, move 'size' one bit to right	*/
+	}
+
+	/* If after the 'temp' first '0' was met we still have '1' in 'temp'		*/
+	/* then 'temp' is invalid (not power of 2) 					*/
+	if (temp)
+	{
+		DB(mvOsPrintf("ctrlSizeToReg: ERR. Alignment parameter 0x%x invalid.\n",
+					alignment));
+		return -1;
+	}
+
+	regSize++;      /* Now the size is 0x0100					*/
+
+	/* Add in the "alignment" portion to the register size parameter 		*/
+	alignment--;	/* Now the alignmet is a sequance of '1' (e.g. 0xffff) 		*/
+
+	while(alignment & 1)	/* Check that alignmet LSB is set			*/
+	{
+		regSize   = (regSize << 1); /* LSB is set, move 'size' one bit left	*/
+		alignment = (alignment >> 1);
+	}
+
+    return regSize;
+}
+
+
+/*******************************************************************************
+* ctrlSizeRegRoundUp - Round up given size
+*
+* DESCRIPTION:
+*       This function round up a given size to a size that fits the
+*       restrictions of size format given an aligment parameter.
+*		to given aligment paramter. For example for size parameter 0xa1000 and
+*		aligment 0x1000 the function will return 0xFF000.
+*
+* INPUT:
+*       size - Size.
+*		alignment - Size alignment.	Note that alignment must be power of 2!
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit describing size value correspond to size in register.
+*******************************************************************************/
+MV_U32	ctrlSizeRegRoundUp(MV_U32 size, MV_U32 alignment)
+{
+	MV_U32 msbBit = 0;
+    MV_U32 retSize;
+
+    /* Check if size parameter is already comply with restriction		*/
+	if (!(-1 == ctrlSizeToReg(size, alignment)))
+	{
+		return size;
+	}
+
+    while(size)
+	{
+		size = (size >> 1);
+        msbBit++;
+	}
+
+    retSize = (1 << msbBit);
+
+    if (retSize < alignment)
+    {
+        return alignment;
+    }
+    else
+    {
+        return retSize;
+    }
+}
+/*******************************************************************************
+* mvCtrlSysRstLengthCounterGet - Return number of milliseconds the reset button
+* 				 was pressed and clear counter
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN: number of milliseconds the reset button was pressed
+*******************************************************************************/
+MV_U32	mvCtrlSysRstLengthCounterGet(MV_VOID)
+{
+	static volatile MV_U32 Count = 0;
+
+	if(!Count) {
+		Count = (MV_REG_READ(SYSRST_LENGTH_COUNTER_REG) & SLCR_COUNT_MASK);
+		Count = (Count / (MV_BOARD_REFCLK_25MHZ / 1000));
+		/* clear counter for next boot */
+		MV_REG_BIT_SET(SYSRST_LENGTH_COUNTER_REG, SLCR_CLR_MASK);
+	}
+
+	DB(mvOsPrintf("mvCtrlSysRstLengthCounterGet: Reset button was pressed for %u milliseconds\n", Count));
+
+	return Count;
+}
+
+MV_BOOL	  mvCtrlIsBootFromSPI(MV_VOID)
+{
+    MV_U32 satr = 0;
+    satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+    if(mvCtrlModelGet() == MV_6180_DEV_ID)
+    {
+        if (MSAR_BOOT_MODE_6180(satr) == MSAR_BOOT_SPI_WITH_BOOTROM_6180)
+            return MV_TRUE;
+        else
+            return MV_FALSE;
+    }
+    satr = satr & MSAR_BOOT_MODE_MASK;
+    if (satr == MSAR_BOOT_SPI_WITH_BOOTROM)
+        return MV_TRUE;
+    else
+        return MV_FALSE;
+}
+
+MV_BOOL	  mvCtrlIsBootFromSPIUseNAND(MV_VOID)
+{
+    MV_U32 satr = 0;
+    if(mvCtrlModelGet() == MV_6180_DEV_ID)
+        return MV_FALSE;
+    satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+    satr = satr & MSAR_BOOT_MODE_MASK;
+
+    if (satr == MSAR_BOOT_SPI_USE_NAND_WITH_BOOTROM)
+        return MV_TRUE;
+    else
+        return MV_FALSE;
+}
+
+MV_BOOL	  mvCtrlIsBootFromNAND(MV_VOID)
+{
+    MV_U32 satr = 0;
+    satr = MV_REG_READ(MPP_SAMPLE_AT_RESET);
+    if(mvCtrlModelGet() == MV_6180_DEV_ID)
+    {
+        if (MSAR_BOOT_MODE_6180(satr) == MSAR_BOOT_NAND_WITH_BOOTROM_6180)
+            return MV_TRUE;
+        else
+            return MV_FALSE;
+    }
+    satr = satr & MSAR_BOOT_MODE_MASK;
+    if ((satr == MSAR_BOOT_NAND_WITH_BOOTROM))
+        return MV_TRUE;
+    else
+        return MV_FALSE;
+}
+
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+/*******************************************************************************
+* mvCtrlPwrSaveOn - Set Power save mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID   mvCtrlPwrSaveOn(MV_VOID)
+{
+	unsigned long old,temp;
+	/* Disable int */
+	__asm__ __volatile__("mrs %0, cpsr\n"
+			     "orr %1, %0, #0xc0\n"
+			     "msr cpsr_c, %1"
+			     : "=r" (old), "=r" (temp)
+			     :
+			     : "memory");
+
+	/* Set SoC in power save */
+	MV_REG_BIT_SET(POWER_MNG_CTRL_REG, BIT11);
+	/* Wait for int */
+	__asm__ __volatile__("mcr    p15, 0, r0, c7, c0, 4");
+
+	/* Enabled int */
+	__asm__ __volatile__("msr cpsr_c, %0"
+			     :
+			     : "r" (old)
+			     : "memory");
+}
+
+
+
+/*******************************************************************************
+* mvCtrlPwrSaveOff - Go out of power save mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID   mvCtrlPwrSaveOff(MV_VOID)
+{
+	unsigned long old,temp;
+	/* Disable int */
+	__asm__ __volatile__("mrs %0, cpsr\n"
+			     "orr %1, %0, #0xc0\n"
+			     "msr cpsr_c, %1"
+			     : "=r" (old), "=r" (temp)
+			     :
+			     : "memory");
+
+	/* Set SoC in power save */
+	MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, BIT11);
+	/* Wait for int */
+	__asm__ __volatile__("mcr    p15, 0, r0, c7, c0, 4");
+
+	/* Enabled int */
+	__asm__ __volatile__("msr cpsr_c, %0"
+			     :
+			     : "r" (old)
+			     : "memory");
+}
+
+/*******************************************************************************
+* mvCtrlPwrClckSet - Set Power State for specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID   mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable)
+{
+	switch (unitId)
+    {
+#if defined(MV_INCLUDE_PEX)
+	case PEX_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_PEXSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_PEXSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+	case ETH_GIG_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_GESTOPCLOCK_MASK(index));
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_GESTOPCLOCK_MASK(index));
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_INTEG_SATA)
+	case SATA_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SATASTOPCLOCK_MASK(index));
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SATASTOPCLOCK_MASK(index));
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+	case CESA_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SESTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SESTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_USB)
+	case USB_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_USBSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_USBSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+	case AUDIO_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_AUDIOSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_AUDIOSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_TS)
+	case TS_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_TSSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_TSSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_SDIO)
+	case SDIO_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_SDIOSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_SDIOSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_TDM)
+	case TDM_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_RESET(POWER_MNG_CTRL_REG, PMC_TDMSTOPCLOCK_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_SET(POWER_MNG_CTRL_REG, PMC_TDMSTOPCLOCK_MASK);
+		}
+		break;
+#endif
+
+	default:
+
+		break;
+
+	}
+}
+
+/*******************************************************************************
+* mvCtrlPwrClckGet - Get Power State of specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_BOOL		mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index)
+{
+	MV_U32 reg = MV_REG_READ(POWER_MNG_CTRL_REG);
+	MV_BOOL state = MV_TRUE;
+
+	switch (unitId)
+    {
+#if defined(MV_INCLUDE_PEX)
+	case PEX_UNIT_ID:
+		if ((reg & PMC_PEXSTOPCLOCK_MASK) == PMC_PEXSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+
+		break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+	case ETH_GIG_UNIT_ID:
+		if ((reg & PMC_GESTOPCLOCK_MASK(index)) == PMC_GESTOPCLOCK_STOP(index))
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_SATA)
+	case SATA_UNIT_ID:
+		if ((reg & PMC_SATASTOPCLOCK_MASK(index)) == PMC_SATASTOPCLOCK_STOP(index))
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+	case CESA_UNIT_ID:
+		if ((reg & PMC_SESTOPCLOCK_MASK) == PMC_SESTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_USB)
+	case USB_UNIT_ID:
+		if ((reg & PMC_USBSTOPCLOCK_MASK) == PMC_USBSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+	case AUDIO_UNIT_ID:
+		if ((reg & PMC_AUDIOSTOPCLOCK_MASK) == PMC_AUDIOSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_TS)
+	case TS_UNIT_ID:
+		if ((reg & PMC_TSSTOPCLOCK_MASK) == PMC_TSSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_SDIO)
+	case SDIO_UNIT_ID:
+		if ((reg & PMC_SDIOSTOPCLOCK_MASK)== PMC_SDIOSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_TDM)
+	case TDM_UNIT_ID:
+		if ((reg & PMC_TDMSTOPCLOCK_MASK) == PMC_TDMSTOPCLOCK_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+
+	default:
+		state = MV_TRUE;
+		break;
+	}
+
+
+	return state;
+}
+/*******************************************************************************
+* mvCtrlPwrMemSet - Set Power State for memory on specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_VOID   mvCtrlPwrMemSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable)
+{
+	switch (unitId)
+    {
+#if defined(MV_INCLUDE_PEX)
+	case PEX_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_PEXSTOPMEM_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_PEXSTOPMEM_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+	case ETH_GIG_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_GESTOPMEM_MASK(index));
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_GESTOPMEM_MASK(index));
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_INTEG_SATA)
+	case SATA_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_SATASTOPMEM_MASK(index));
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_SATASTOPMEM_MASK(index));
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+	case CESA_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_SESTOPMEM_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_SESTOPMEM_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_USB)
+	case USB_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_USBSTOPMEM_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_USBSTOPMEM_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+	case AUDIO_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_AUDIOSTOPMEM_MASK);
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_AUDIOSTOPMEM_MASK);
+		}
+		break;
+#endif
+#if defined(MV_INCLUDE_XOR)
+	case XOR_UNIT_ID:
+		if (enable == MV_FALSE)
+		{
+			MV_REG_BIT_SET(POWER_MNG_MEM_CTRL_REG, PMC_XORSTOPMEM_MASK(index));
+		}
+		else
+		{
+			MV_REG_BIT_RESET(POWER_MNG_MEM_CTRL_REG, PMC_XORSTOPMEM_MASK(index));
+		}
+		break;
+#endif
+	default:
+
+		break;
+
+	}
+}
+
+/*******************************************************************************
+* mvCtrlPwrMemGet - Get Power State of memory on specific Unit
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_BOOL		mvCtrlPwrMemGet(MV_UNIT_ID unitId, MV_U32 index)
+{
+	MV_U32 reg = MV_REG_READ(POWER_MNG_MEM_CTRL_REG);
+	MV_BOOL state = MV_TRUE;
+
+	switch (unitId)
+    {
+#if defined(MV_INCLUDE_PEX)
+	case PEX_UNIT_ID:
+		if ((reg & PMC_PEXSTOPMEM_MASK) == PMC_PEXSTOPMEM_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+
+		break;
+#endif
+#if defined(MV_INCLUDE_GIG_ETH)
+	case ETH_GIG_UNIT_ID:
+		if ((reg & PMC_GESTOPMEM_MASK(index)) == PMC_GESTOPMEM_STOP(index))
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_SATA)
+	case SATA_UNIT_ID:
+		if ((reg & PMC_SATASTOPMEM_MASK(index)) == PMC_SATASTOPMEM_STOP(index))
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_CESA)
+	case CESA_UNIT_ID:
+		if ((reg & PMC_SESTOPMEM_MASK) == PMC_SESTOPMEM_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_USB)
+	case USB_UNIT_ID:
+		if ((reg & PMC_USBSTOPMEM_MASK) == PMC_USBSTOPMEM_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+	case AUDIO_UNIT_ID:
+		if ((reg & PMC_AUDIOSTOPMEM_MASK) == PMC_AUDIOSTOPMEM_STOP)
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+#if defined(MV_INCLUDE_XOR)
+	case XOR_UNIT_ID:
+		if ((reg & PMC_XORSTOPMEM_MASK(index)) == PMC_XORSTOPMEM_STOP(index))
+		{
+			state = MV_FALSE;
+		}
+		else state = MV_TRUE;
+		break;
+#endif
+
+	default:
+		state = MV_TRUE;
+		break;
+	}
+
+
+	return state;
+}
+#else
+MV_VOID   mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable) {return;}
+MV_BOOL	  mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index) {return MV_TRUE;}
+#endif /* #if defined(MV_INCLUDE_CLK_PWR_CNTRL) */
+
+
+/*******************************************************************************
+* mvMPPConfigToSPI - Change MPP[3:0] configuration to SPI mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_VOID   mvMPPConfigToSPI(MV_VOID)
+{
+	MV_U32 mppVal = 0;
+	MV_U32 bootVal = 0;
+
+    if(!mvCtrlIsBootFromSPIUseNAND())
+        return;
+    mppVal = 0x00002220; /* Set MPP [3:1] to SPI mode */
+    bootVal = MV_REG_READ(mvCtrlMppRegGet(0));
+    bootVal &= 0xffff000f;
+        mppVal |= bootVal;
+
+    MV_REG_WRITE(mvCtrlMppRegGet(0), mppVal);
+}
+
+
+/*******************************************************************************
+* mvMPPConfigToDefault - Change MPP[7:0] configuration to default configuration
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+******************************************************************************/
+MV_VOID   mvMPPConfigToDefault(MV_VOID)
+{
+	MV_U32 mppVal = 0;
+	MV_U32 bootVal = 0;
+
+    if(!mvCtrlIsBootFromSPIUseNAND())
+        return;
+    mppVal = mvBoardMppGet(0);
+    bootVal = MV_REG_READ(mvCtrlMppRegGet(0));
+    mppVal &= ~0xffff000f;
+    bootVal &= 0xffff000f;
+        mppVal |= bootVal;
+
+    MV_REG_WRITE(mvCtrlMppRegGet(0), mppVal);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h
new file mode 100644
index 000000000000..2c7e8fa88428
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvLib.h
@@ -0,0 +1,185 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCtrlEnvLibh
+#define __INCmvCtrlEnvLibh
+
+/* includes */
+#include "mvSysHwConfig.h"
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mvOs.h"
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/mvCtrlEnvRegs.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+
+/* typedefs */
+
+/* This enumerator describes the possible HW cache coherency policies the   */
+/* controllers supports.                                                    */
+typedef enum _mvCachePolicy
+{
+    NO_COHERENCY,   /* No HW cache coherency support                        */
+    WT_COHERENCY,   /* HW cache coherency supported in Write Through policy */
+    WB_COHERENCY    /* HW cache coherency supported in Write Back policy    */
+}MV_CACHE_POLICY;
+
+
+/* The swapping is referred to a 64-bit words (as this is the controller    */
+/* internal data path width). This enumerator describes the possible        */
+/* data swap types. Below is an example of the data 0x0011223344556677      */
+typedef enum _mvSwapType
+{
+    MV_BYTE_SWAP,       /* Byte Swap                77 66 55 44 33 22 11 00 */
+    MV_NO_SWAP,         /* No swapping              00 11 22 33 44 55 66 77 */
+    MV_BYTE_WORD_SWAP,  /* Both byte and word swap  33 22 11 00 77 66 55 44 */
+    MV_WORD_SWAP,       /* Word swap                44 55 66 77 00 11 22 33 */
+    SWAP_TYPE_MAX	/* Delimiter for this enumerator					*/
+}MV_SWAP_TYPE;
+
+/* This structure describes access rights for Access protection windows     */
+/* that can be found in IDMA, XOR, Ethernet and MPSC units.                 */
+/* Note that the permission enumerator coresponds to its register format.   */
+/* For example, Read only premission is presented as "1" in register field. */
+typedef enum _mvAccessRights
+{
+	NO_ACCESS_ALLOWED = 0,  /* No access allowed            */
+	READ_ONLY         = 1,  /* Read only permission         */
+	ACC_RESERVED	  = 2,	/* Reserved access right		*/
+	FULL_ACCESS       = 3,  /* Read and Write permission    */
+	MAX_ACC_RIGHTS
+}MV_ACCESS_RIGHTS;
+
+
+/* mcspLib.h API list */
+
+MV_STATUS mvCtrlEnvInit(MV_VOID);
+MV_U32    mvCtrlMppRegGet(MV_U32 mppGroup);
+
+#if defined(MV_INCLUDE_PEX)
+MV_U32	  mvCtrlPexMaxIfGet(MV_VOID);
+#else
+#define   mvCtrlPexMaxIfGet()	(0)
+#endif
+
+#define   mvCtrlPciIfMaxIfGet()	(0)
+
+#if defined(MV_INCLUDE_GIG_ETH)
+MV_U32	  mvCtrlEthMaxPortGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_XOR)
+MV_U32 mvCtrlXorMaxChanGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_USB)
+MV_U32 	  mvCtrlUsbMaxGet(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_NAND)
+MV_U32	  mvCtrlNandSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_SDIO)
+MV_U32	  mvCtrlSdioSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_TS)
+MV_U32	  mvCtrlTsSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_AUDIO)
+MV_U32	  mvCtrlAudioSupport(MV_VOID);
+#endif
+#if defined(MV_INCLUDE_TDM)
+MV_U32	  mvCtrlTdmSupport(MV_VOID);
+#endif
+
+MV_U16    mvCtrlModelGet(MV_VOID);
+MV_U8     mvCtrlRevGet(MV_VOID);
+MV_STATUS mvCtrlNameGet(char *pNameBuff);
+MV_U32    mvCtrlModelRevGet(MV_VOID);
+MV_STATUS mvCtrlModelRevNameGet(char *pNameBuff);
+MV_VOID   mvCtrlAddrDecShow(MV_VOID);
+const MV_8* mvCtrlTargetNameGet(MV_TARGET target);
+MV_U32	  ctrlSizeToReg(MV_U32 size, MV_U32 alignment);
+MV_U32	  ctrlRegToSize(MV_U32 regSize, MV_U32 alignment);
+MV_U32	  ctrlSizeRegRoundUp(MV_U32 size, MV_U32 alignment);
+MV_U32	  mvCtrlSysRstLengthCounterGet(MV_VOID);
+MV_STATUS ctrlWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+MV_STATUS ctrlWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+
+MV_VOID   mvCtrlPwrClckSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable);
+MV_BOOL	  mvCtrlPwrClckGet(MV_UNIT_ID unitId, MV_U32 index);
+MV_VOID   mvCtrlPwrMemSet(MV_UNIT_ID unitId, MV_U32 index, MV_BOOL enable);
+MV_BOOL	  mvCtrlIsBootFromSPI(MV_VOID);
+MV_BOOL	  mvCtrlIsBootFromSPIUseNAND(MV_VOID);
+MV_BOOL	  mvCtrlIsBootFromNAND(MV_VOID);
+#if defined(MV_INCLUDE_CLK_PWR_CNTRL)
+MV_VOID   mvCtrlPwrSaveOn(MV_VOID);
+MV_VOID   mvCtrlPwrSaveOff(MV_VOID);
+#endif
+MV_BOOL	  mvCtrlPwrMemGet(MV_UNIT_ID unitId, MV_U32 index);
+MV_VOID   mvMPPConfigToSPI(MV_VOID);
+MV_VOID   mvMPPConfigToDefault(MV_VOID);
+
+
+#endif /* __INCmvCtrlEnvLibh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h
new file mode 100644
index 000000000000..34b805f24b78
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvRegs.h
@@ -0,0 +1,419 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCtrlEnvRegsh
+#define __INCmvCtrlEnvRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* CV Support */
+#define PEX0_MEM0 	PEX0_MEM
+#define PCI0_MEM0	PEX0_MEM
+
+/* Controller revision info */
+#define PCI_CLASS_CODE_AND_REVISION_ID			    0x008
+#define PCCRIR_REVID_OFFS				    0		/* Revision ID */
+#define PCCRIR_REVID_MASK				    (0xff << PCCRIR_REVID_OFFS)
+
+/* Controler environment registers offsets */
+
+/* Power Managment Control */
+#define POWER_MNG_MEM_CTRL_REG			0x20118
+
+#define PMC_GESTOPMEM_OFFS(port)		((port)? 13 : 0)
+#define PMC_GESTOPMEM_MASK(port)		(1 << PMC_GESTOPMEM_OFFS(port))
+#define PMC_GESTOPMEM_EN(port)			(0 << PMC_GESTOPMEM_OFFS(port))
+#define PMC_GESTOPMEM_STOP(port)		(1 << PMC_GESTOPMEM_OFFS(port))
+
+#define PMC_PEXSTOPMEM_OFFS			1
+#define PMC_PEXSTOPMEM_MASK			(1 << PMC_PEXSTOPMEM_OFFS)
+#define PMC_PEXSTOPMEM_EN			(0 << PMC_PEXSTOPMEM_OFFS)
+#define PMC_PEXSTOPMEM_STOP			(1 << PMC_PEXSTOPMEM_OFFS)
+
+#define PMC_USBSTOPMEM_OFFS			2
+#define PMC_USBSTOPMEM_MASK			(1 << PMC_USBSTOPMEM_OFFS)
+#define PMC_USBSTOPMEM_EN			(0 << PMC_USBSTOPMEM_OFFS)
+#define PMC_USBSTOPMEM_STOP			(1 << PMC_USBSTOPMEM_OFFS)
+
+#define PMC_DUNITSTOPMEM_OFFS			3
+#define PMC_DUNITSTOPMEM_MASK			(1 << PMC_DUNITSTOPMEM_OFFS)
+#define PMC_DUNITSTOPMEM_EN			(0 << PMC_DUNITSTOPMEM_OFFS)
+#define PMC_DUNITSTOPMEM_STOP			(1 << PMC_DUNITSTOPMEM_OFFS)
+
+#define PMC_RUNITSTOPMEM_OFFS			4
+#define PMC_RUNITSTOPMEM_MASK			(1 << PMC_RUNITSTOPMEM_OFFS)
+#define PMC_RUNITSTOPMEM_EN			(0 << PMC_RUNITSTOPMEM_OFFS)
+#define PMC_RUNITSTOPMEM_STOP			(1 << PMC_RUNITSTOPMEM_OFFS)
+
+#define PMC_XORSTOPMEM_OFFS(port)		(5+(port*2))
+#define PMC_XORSTOPMEM_MASK(port)		(1 << PMC_XORSTOPMEM_OFFS(port))
+#define PMC_XORSTOPMEM_EN(port)			(0 << PMC_XORSTOPMEM_OFFS(port))
+#define PMC_XORSTOPMEM_STOP(port)		(1 << PMC_XORSTOPMEM_OFFS(port))
+
+#define PMC_SATASTOPMEM_OFFS(port)		(6+(port*5))
+#define PMC_SATASTOPMEM_MASK(port)		(1 << PMC_SATASTOPMEM_OFFS(port))
+#define PMC_SATASTOPMEM_EN(port)		(0 << PMC_SATASTOPMEM_OFFS(port))
+#define PMC_SATASTOPMEM_STOP(port)		(1 << PMC_SATASTOPMEM_OFFS(port))
+
+#define PMC_SESTOPMEM_OFFS			8
+#define PMC_SESTOPMEM_MASK			(1 << PMC_SESTOPMEM_OFFS)
+#define PMC_SESTOPMEM_EN			(0 << PMC_SESTOPMEM_OFFS)
+#define PMC_SESTOPMEM_STOP			(1 << PMC_SESTOPMEM_OFFS)
+
+#define PMC_AUDIOSTOPMEM_OFFS			9
+#define PMC_AUDIOSTOPMEM_MASK			(1 << PMC_AUDIOSTOPMEM_OFFS)
+#define PMC_AUDIOSTOPMEM_EN			(0 << PMC_AUDIOSTOPMEM_OFFS)
+#define PMC_AUDIOSTOPMEM_STOP			(1 << PMC_AUDIOSTOPMEM_OFFS)
+
+#define POWER_MNG_CTRL_REG			0x2011C
+
+#define PMC_GESTOPCLOCK_OFFS(port)		((port)? 19 : 0)
+#define PMC_GESTOPCLOCK_MASK(port)		(1 << PMC_GESTOPCLOCK_OFFS(port))
+#define PMC_GESTOPCLOCK_EN(port)		(1 << PMC_GESTOPCLOCK_OFFS(port))
+#define PMC_GESTOPCLOCK_STOP(port)		(0 << PMC_GESTOPCLOCK_OFFS(port))
+
+#define PMC_PEXPHYSTOPCLOCK_OFFS		1
+#define PMC_PEXPHYSTOPCLOCK_MASK		(1 << PMC_PEXPHYSTOPCLOCK_OFFS)
+#define PMC_PEXPHYSTOPCLOCK_EN			(1 << PMC_PEXPHYSTOPCLOCK_OFFS)
+#define PMC_PEXPHYSTOPCLOCK_STOP		(0 << PMC_PEXPHYSTOPCLOCK_OFFS)
+
+#define PMC_PEXSTOPCLOCK_OFFS			2
+#define PMC_PEXSTOPCLOCK_MASK			(1 << PMC_PEXSTOPCLOCK_OFFS)
+#define PMC_PEXSTOPCLOCK_EN			(1 << PMC_PEXSTOPCLOCK_OFFS)
+#define PMC_PEXSTOPCLOCK_STOP			(0 << PMC_PEXSTOPCLOCK_OFFS)
+
+#define PMC_USBSTOPCLOCK_OFFS			3
+#define PMC_USBSTOPCLOCK_MASK			(1 << PMC_USBSTOPCLOCK_OFFS)
+#define PMC_USBSTOPCLOCK_EN			(1 << PMC_USBSTOPCLOCK_OFFS)
+#define PMC_USBSTOPCLOCK_STOP			(0 << PMC_USBSTOPCLOCK_OFFS)
+
+#define PMC_SDIOSTOPCLOCK_OFFS			4
+#define PMC_SDIOSTOPCLOCK_MASK			(1 << PMC_SDIOSTOPCLOCK_OFFS)
+#define PMC_SDIOSTOPCLOCK_EN			(1 << PMC_SDIOSTOPCLOCK_OFFS)
+#define PMC_SDIOSTOPCLOCK_STOP			(0 << PMC_SDIOSTOPCLOCK_OFFS)
+
+#define PMC_TSSTOPCLOCK_OFFS			5
+#define PMC_TSSTOPCLOCK_MASK			(1 << PMC_TSSTOPCLOCK_OFFS)
+#define PMC_TSSTOPCLOCK_EN			(1 << PMC_TSSTOPCLOCK_OFFS)
+#define PMC_TSSTOPCLOCK_STOP			(0 << PMC_TSSTOPCLOCK_OFFS)
+
+#define PMC_AUDIOSTOPCLOCK_OFFS			9
+#define PMC_AUDIOSTOPCLOCK_MASK			(1 << PMC_AUDIOSTOPCLOCK_OFFS)
+#define PMC_AUDIOSTOPCLOCK_EN			(1 << PMC_AUDIOSTOPCLOCK_OFFS)
+#define PMC_AUDIOSTOPCLOCK_STOP			(0 << PMC_AUDIOSTOPCLOCK_OFFS)
+
+#define PMC_POWERSAVE_OFFS			11
+#define PMC_POWERSAVE_MASK			(1 << PMC_POWERSAVE_OFFS)
+#define PMC_POWERSAVE_EN			(1 << PMC_POWERSAVE_OFFS)
+#define PMC_POWERSAVE_STOP			(0 << PMC_POWERSAVE_OFFS)
+
+
+
+
+#define PMC_SATASTOPCLOCK_OFFS(port)		(14+(port))
+#define PMC_SATASTOPCLOCK_MASK(port)		(1 << PMC_SATASTOPCLOCK_OFFS(port))
+#define PMC_SATASTOPCLOCK_EN(port)		(1 << PMC_SATASTOPCLOCK_OFFS(port))
+#define PMC_SATASTOPCLOCK_STOP(port)		(0 << PMC_SATASTOPCLOCK_OFFS(port))
+
+#define PMC_SESTOPCLOCK_OFFS			17
+#define PMC_SESTOPCLOCK_MASK			(1 << PMC_SESTOPCLOCK_OFFS)
+#define PMC_SESTOPCLOCK_EN			(1 << PMC_SESTOPCLOCK_OFFS)
+#define PMC_SESTOPCLOCK_STOP			(0 << PMC_SESTOPCLOCK_OFFS)
+
+#define PMC_TDMSTOPCLOCK_OFFS			20
+#define PMC_TDMSTOPCLOCK_MASK			(1 << PMC_TDMSTOPCLOCK_OFFS)
+#define PMC_TDMSTOPCLOCK_EN			(1 << PMC_TDMSTOPCLOCK_OFFS)
+#define PMC_TDMSTOPCLOCK_STOP			(0 << PMC_TDMSTOPCLOCK_OFFS)
+
+
+/* Controler environment registers offsets */
+#define MPP_CONTROL_REG0			0x10000
+#define MPP_CONTROL_REG1			0x10004
+#define MPP_CONTROL_REG2			0x10008
+#define MPP_CONTROL_REG3			0x1000C
+#define MPP_CONTROL_REG4			0x10010
+#define MPP_CONTROL_REG5			0x10014
+#define MPP_CONTROL_REG6			0x10018
+#define MPP_SAMPLE_AT_RESET			0x10030
+#define CHIP_BOND_REG				0x10034
+#define SYSRST_LENGTH_COUNTER_REG		0x10050
+#define SLCR_COUNT_OFFS				0
+#define SLCR_COUNT_MASK				(0x1FFFFFFF << SLCR_COUNT_OFFS)
+#define SLCR_CLR_OFFS				31
+#define SLCR_CLR_MASK				(1 << SLCR_CLR_OFFS)
+#define PCKG_OPT_MASK				0x3
+#define MPP_OUTPUT_DRIVE_REG			0x100E0
+#define MPP_RGMII0_OUTPUT_DRIVE_OFFS            7
+#define MPP_3_3_RGMII0_OUTPUT_DRIVE		(0x0 << MPP_RGMII0_OUTPUT_DRIVE_OFFS)
+#define MPP_1_8_RGMII0_OUTPUT_DRIVE		(0x1 << MPP_RGMII0_OUTPUT_DRIVE_OFFS)
+#define MPP_RGMII1_OUTPUT_DRIVE_OFFS            15
+#define MPP_3_3_RGMII1_OUTPUT_DRIVE		(0x0 << MPP_RGMII1_OUTPUT_DRIVE_OFFS)
+#define MPP_1_8_RGMII1_OUTPUT_DRIVE		(0x1 << MPP_RGMII1_OUTPUT_DRIVE_OFFS)
+
+#define MSAR_BOOT_MODE_OFFS                     12
+#define MSAR_BOOT_MODE_MASK                     (0x7 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_NAND_WITH_BOOTROM		        (0x5 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_SPI_WITH_BOOTROM              (0x4 << MSAR_BOOT_MODE_OFFS)
+#define MSAR_BOOT_SPI_USE_NAND_WITH_BOOTROM		(0x2 << MSAR_BOOT_MODE_OFFS)
+
+#define MSAR_BOOT_MODE_6180(X)                  (((X & 0x3000) >> 12) | \
+                                                ((X & 0x2) << 1))
+#define MSAR_BOOT_SPI_WITH_BOOTROM_6180         0x1
+#define MSAR_BOOT_NAND_WITH_BOOTROM_6180        0x5
+
+#define MSAR_TCLCK_OFFS				21
+#define MSAR_TCLCK_MASK				(0x1 << MSAR_TCLCK_OFFS)
+#define MSAR_TCLCK_166				(0x1 << MSAR_TCLCK_OFFS)
+#define MSAR_TCLCK_200				(0x0 << MSAR_TCLCK_OFFS)
+
+
+#define MSAR_CPUCLCK_EXTRACT(X)     (((X & 0x2) >> 1) | ((X & 0x400000) >> 21) | \
+                                    ((X & 0x18) >> 1))
+
+#define MSAR_CPUCLCK_OFFS_6180		2
+#define MSAR_CPUCLCK_MASK_6180		(0x7 << MSAR_CPUCLCK_OFFS_6180)
+
+#define MSAR_DDRCLCK_RTIO_OFFS		5
+#define MSAR_DDRCLCK_RTIO_MASK		(0xF << MSAR_DDRCLCK_RTIO_OFFS)
+
+#define MSAR_L2CLCK_EXTRACT(X)      (((X & 0x600) >> 9) | ((X & 0x80000) >> 17))
+
+#ifndef MV_ASMLANGUAGE
+/* CPU clock for 6281,6192  0->Resereved */
+#define MV_CPU_CLCK_TBL { 	0,		0, 		0, 		0,	\
+				600000000, 	0,		800000000,	1000000000,	\
+				0,	 	1200000000,	0,		0,		\
+				1500000000,	0,		0,		0}
+
+/* DDR clock RATIO for 6281,6192 {0,0}->Reserved */
+#define MV_DDR_CLCK_RTIO_TBL	{\
+	{0, 0}, {0, 0}, {2, 1}, {0, 0}, \
+	{3, 1}, {0, 0}, {4, 1}, {9, 2}, \
+	{5, 1}, {6, 1}, {0, 0}, {0, 0}, \
+	{0, 0}, {0, 0}, {0, 0}, {0, 0} \
+}
+
+/* L2 clock RATIO for 6281,6192 {1,1}->Reserved */
+#define MV_L2_CLCK_RTIO_TBL	{\
+	{0, 0}, {2, 1}, {0, 0}, {3, 1}, \
+	{0, 0}, {0, 0}, {0, 0}, {0, 0} \
+}
+
+/* 6180 have different clk reset sampling 		*/
+/* ARM CPU, DDR, L2 clock for 6180 {0,0,0}->Reserved 	*/
+#define MV_CPU6180_DDR_L2_CLCK_TBL    { \
+	{0,   		0,   		0		},\
+	{0,   		0,   		0		},\
+	{0,   		0,   		0		},\
+	{0,   		0,   		0		},\
+	{0,   		0,   		0		},\
+	{600000000, 	200000000, 	300000000	},\
+	{800000000, 	200000000, 	400000000	},\
+	{0,   		0,   		0		}\
+}
+
+
+
+/* These macros help units to identify a target Mbus Arbiter group */
+#define MV_TARGET_IS_DRAM(target)   \
+                            ((target >= SDRAM_CS0) && (target <= SDRAM_CS3))
+
+#define MV_TARGET_IS_PEX0(target)   \
+                            ((target >= PEX0_MEM) && (target <= PEX0_IO))
+
+#define MV_TARGET_IS_PEX1(target)   0
+
+#define MV_TARGET_IS_PEX(target) (MV_TARGET_IS_PEX0(target) || MV_TARGET_IS_PEX1(target))
+
+#define MV_TARGET_IS_DEVICE(target) \
+                            ((target >= DEVICE_CS0) && (target <= DEVICE_CS3))
+
+#define MV_PCI_DRAM_BAR_TO_DRAM_TARGET(bar)   0
+
+#define	MV_TARGET_IS_AS_BOOT(target) ((target) == (sampleAtResetTargetArray[ \
+                     (mvCtrlModelGet() == MV_6180_DEV_ID)? MSAR_BOOT_MODE_6180 \
+                     (MV_REG_READ(MPP_SAMPLE_AT_RESET)):((MV_REG_READ(MPP_SAMPLE_AT_RESET)\
+						 & MSAR_BOOT_MODE_MASK) >> MSAR_BOOT_MODE_OFFS)]))
+
+
+#define MV_CHANGE_BOOT_CS(target)	(((target) == DEV_BOOCS)?\
+					sampleAtResetTargetArray[(mvCtrlModelGet() == MV_6180_DEV_ID)? \
+                    MSAR_BOOT_MODE_6180(MV_REG_READ(MPP_SAMPLE_AT_RESET)): \
+                    ((MV_REG_READ(MPP_SAMPLE_AT_RESET) & MSAR_BOOT_MODE_MASK)\
+                     >> MSAR_BOOT_MODE_OFFS)]:(target))
+
+#define TCLK_TO_COUNTER_RATIO   1   /* counters running in Tclk */
+
+#define BOOT_TARGETS_NAME_ARRAY {       \
+    TBL_TERM,         	\
+    TBL_TERM,         	\
+    BOOT_ROM_CS,          	\
+    TBL_TERM,         	\
+    BOOT_ROM_CS,          	\
+    BOOT_ROM_CS,          \
+    TBL_TERM,         	\
+    TBL_TERM           \
+}
+
+#define BOOT_TARGETS_NAME_ARRAY_6180 {       \
+    TBL_TERM,         	\
+    BOOT_ROM_CS,          	\
+    TBL_TERM,           \
+    TBL_TERM,           \
+    TBL_TERM,           \
+    BOOT_ROM_CS,          \
+    TBL_TERM,           \
+    TBL_TERM           \
+}
+
+
+/* For old competability */
+#define DEVICE_CS0		NFLASH_CS
+#define DEVICE_CS1  		SPI_CS
+#define DEVICE_CS2  		BOOT_ROM_CS
+#define DEVICE_CS3  		DEV_BOOCS
+#define MV_BOOTDEVICE_INDEX   	0
+
+#define START_DEV_CS   		DEV_CS0
+#define DEV_TO_TARGET(dev)	((dev) + DEVICE_CS0)
+
+#define PCI_IF0_MEM0		PEX0_MEM
+#define PCI_IF0_IO		PEX0_IO
+
+
+/* This enumerator defines the Marvell controller target ID      */
+typedef enum _mvTargetId
+{
+    DRAM_TARGET_ID  = 0 ,    /* Port 0 -> DRAM interface         */
+    DEV_TARGET_ID   = 1,     /* Port 1 -> Nand/SPI 		*/
+    PEX0_TARGET_ID  = 4 ,    /* Port 4 -> PCI Express0 		*/
+    CRYPT_TARGET_ID = 3 ,    /* Port 3 --> Crypto Engine 	*/
+    SAGE_TARGET_ID = 12 ,    /* Port 12 -> SAGE Unit 	*/
+    MAX_TARGETS_ID
+}MV_TARGET_ID;
+
+
+/* This enumerator described the possible Controller paripheral targets.    */
+/* Controller peripherals are designated memory/IO address spaces that the  */
+/* controller can access. They are also refered as "targets"                */
+typedef enum _mvTarget
+{
+    TBL_TERM = -1, 	/* none valid target, used as targets list terminator*/
+    SDRAM_CS0,      	/* SDRAM chip select 0                          */
+    SDRAM_CS1,      	/* SDRAM chip select 1                          */
+    SDRAM_CS2,      	/* SDRAM chip select 2                          */
+    SDRAM_CS3,      	/* SDRAM chip select 3                          */
+    PEX0_MEM,		/* PCI Express 0 Memory				*/
+    PEX0_IO,		/* PCI Express 0 IO				*/
+    INTER_REGS,     	/* Internal registers                           */
+    NFLASH_CS,     	/* NFLASH_CS					*/
+    SPI_CS,     	/* SPI_CS					*/
+    BOOT_ROM_CS,        /* BOOT_ROM_CS                                  */
+    DEV_BOOCS,     	/* DEV_BOOCS					*/
+    CRYPT_ENG,      	/* Crypto Engine				*/
+#ifdef MV_INCLUDE_SAGE
+    SAGE_UNIT,      	/* SAGE Unit					*/
+#endif
+    MAX_TARGETS
+
+}MV_TARGET;
+
+#define TARGETS_DEF_ARRAY	{			\
+    {0x0E, DRAM_TARGET_ID }, /* SDRAM_CS0 */		\
+    {0x0D, DRAM_TARGET_ID }, /* SDRAM_CS1 */		\
+    {0x0B, DRAM_TARGET_ID }, /* SDRAM_CS0 */		\
+    {0x07, DRAM_TARGET_ID }, /* SDRAM_CS1 */		\
+    {0xE8, PEX0_TARGET_ID }, /* PEX0_MEM */			\
+    {0xE0, PEX0_TARGET_ID }, /* PEX0_IO */			\
+    {0xFF, 0xFF           }, /* INTER_REGS */		\
+    {0x2F, DEV_TARGET_ID  },  /* NFLASH_CS */		\
+    {0x1E, DEV_TARGET_ID  },  /* SPI_CS */		 	\
+    {0x1D, DEV_TARGET_ID  },  /* BOOT_ROM_CS */     \
+    {0x1E, DEV_TARGET_ID  },  /* DEV_BOOCS */		\
+    {0x01, CRYPT_TARGET_ID}, /* CRYPT_ENG */        \
+    {0x00, SAGE_TARGET_ID }  						\
+}
+
+
+#define TARGETS_NAME_ARRAY	{	\
+    "SDRAM_CS0",    /* SDRAM_CS0 */	\
+    "SDRAM_CS1",    /* SDRAM_CS1 */	\
+    "SDRAM_CS2",    /* SDRAM_CS2 */	\
+    "SDRAM_CS3",    /* SDRAM_CS3 */	\
+    "PEX0_MEM",	    /* PEX0_MEM */	\
+    "PEX0_IO",	    /* PEX0_IO */	\
+    "INTER_REGS",   /* INTER_REGS */	\
+    "NFLASH_CS",    /* NFLASH_CS */	\
+    "SPI_CS",	    /* SPI_CS */	\
+    "BOOT_ROM_CS",  /* BOOT_ROM_CS */ \
+    "DEV_BOOTCS",   /* DEV_BOOCS */	\
+    "CRYPT_ENG",    /* CRYPT_ENG */  \
+    "SAGE_UNIT"	   /* SAGE_UNIT */	\
+}
+#endif /* MV_ASMLANGUAGE */
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h
new file mode 100644
index 000000000000..a474f76de292
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/mvCtrlEnvSpec.h
@@ -0,0 +1,257 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCtrlEnvSpech
+#define __INCmvCtrlEnvSpech
+
+#include "mvDeviceId.h"
+#include "mvSysHwConfig.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define MV_ARM_SOC
+#define SOC_NAME_PREFIX			"MV88F"
+
+
+/* units base and port numbers */
+#ifdef MV_ASMLANGUAGE
+#define XOR_UNIT_BASE(unit)           	0x60800
+#else
+#define MV_XOR_REG_BASE			0x60000
+#define XOR_UNIT_BASE(unit)           	((unit)? 0x60900:0x60800)
+#endif
+
+#define TDM_REG_BASE			0xD0000
+#define USB_REG_BASE(dev)       	0x50000
+#define AUDIO_REG_BASE  		0xA0000
+#define SATA_REG_BASE           	0x80000
+#define MV_CESA_REG_BASE                0x3D000
+#define MV_CESA_TDMA_REG_BASE		0x30000
+#define MV_SDIO_REG_BASE		0x90000
+#define MV_ETH_REG_BASE(port)      	(((port) == 0) ? 0x72000 : 0x76000)
+#define MV_UART_CHAN_BASE(chanNum)	(0x12000 + (chanNum * 0x100))
+#define DRAM_BASE			0x0
+#define CNTMR_BASE                  	0x20300
+#define TWSI_SLAVE_BASE(chanNum)    	0x11000
+#define PEX_IF_BASE(pexIf)      	0x40000
+#define MPP_REG_BASE			0x10000
+#define TSU_GLOBAL_REG_BASE             0xB4000
+#define MAX_AHB_TO_MBUS_REG_BASE	0x20000
+
+#define INTER_REGS_SIZE	 	 	_1M
+/* This define describes the TWSI interrupt bit and location */
+#define TWSI_CPU_MAIN_INT_CAUSE_REG		0x20200
+#define TWSI0_CPU_MAIN_INT_BIT			(1<<29)
+#define TWSI_SPEED				100000
+
+#define MV_GPP_MAX_GROUP    		2
+#define MV_CNTMR_MAX_COUNTER 		2
+#define MV_UART_MAX_CHAN		2
+#define MV_XOR_MAX_UNIT         	2
+#define MV_XOR_MAX_CHAN         	4 /* total channels for all units together*/
+#define MV_XOR_MAX_CHAN_PER_UNIT       	2 /* channels for units */
+#define MV_SATA_MAX_CHAN	 	2
+
+#define MV_6281_MPP_MAX_MODULE    	2
+#define MV_6192_MPP_MAX_MODULE    	1
+#define MV_6190_MPP_MAX_MODULE    	1
+#define MV_6180_MPP_MAX_MODULE    	2
+#define MV_6281_MPP_MAX_GROUP    	7
+#define MV_6192_MPP_MAX_GROUP    	4
+#define MV_6190_MPP_MAX_GROUP    	4
+#define MV_6180_MPP_MAX_GROUP    	3
+
+#define MV_DRAM_MAX_CS      		4
+
+/* This define describes the maximum number of supported PCI\PCIX Interfaces*/
+#define MV_PCI_MAX_IF		0
+#define MV_PCI_START_IF		0
+
+/* This define describes the maximum number of supported PEX Interfaces 	*/
+#define MV_INCLUDE_PEX0
+#define MV_DISABLE_PEX_DEVICE_BAR
+#define MV_PEX_MAX_IF		1
+#define MV_PEX_START_IF		MV_PCI_MAX_IF
+
+/* This define describes the maximum number of supported PCI Interfaces 	*/
+#define MV_PCI_IF_MAX_IF   	(MV_PEX_MAX_IF+MV_PCI_MAX_IF)
+
+#define MV_ETH_MAX_PORTS		2
+#define MV_6281_ETH_MAX_PORTS	   	2
+#define MV_6192_ETH_MAX_PORTS	   	2
+#define MV_6190_ETH_MAX_PORTS	   	1
+#define MV_6180_ETH_MAX_PORTS	   	1
+
+#define MV_IDMA_MAX_CHAN    		0
+
+#define MV_USB_MAX_PORTS		1
+
+#define MV_USB_VERSION              1
+
+
+#define MV_6281_NAND			1
+#define MV_6192_NAND			1
+#define MV_6190_NAND			1
+#define MV_6180_NAND			0
+
+#define MV_6281_SDIO			1
+#define MV_6192_SDIO			1
+#define MV_6190_SDIO			1
+#define MV_6180_SDIO			1
+
+#define MV_6281_TS			1
+#define MV_6192_TS			1
+#define MV_6190_TS			0
+#define MV_6180_TS			0
+
+#define MV_6281_AUDIO			1
+#define MV_6192_AUDIO			1
+#define MV_6190_AUDIO			0
+#define MV_6180_AUDIO			1
+
+#define MV_6281_TDM			1
+#define MV_6192_TDM			1
+#define MV_6190_TDM			0
+#define MV_6180_TDM			0
+
+#define MV_DEVICE_MAX_CS      		4
+
+/* Others */
+#define PEX_HOST_BUS_NUM(pciIf)		(pciIf)
+#define PEX_HOST_DEV_NUM(pciIf)		0
+
+#define PCI_IO(pciIf)		(PEX0_IO)
+#define PCI_MEM(pciIf, memNum)  (PEX0_MEM0)
+/* CESA version #2: One channel, 2KB SRAM, TDMA */
+#if defined(MV_CESA_CHAIN_MODE_SUPPORT)
+	#define MV_CESA_VERSION		 	3
+#else
+#define MV_CESA_VERSION		 	2
+#endif
+#define MV_CESA_SRAM_SIZE               2*1024
+/* This define describes the maximum number of supported Ethernet ports 	*/
+#define MV_ETH_VERSION 			4
+#define MV_ETH_MAX_RXQ              	8
+#define MV_ETH_MAX_TXQ              	8
+#define MV_ETH_PORT_SGMII          	{ MV_FALSE, MV_FALSE }
+/* This define describes the the support of USB 	*/
+#define MV_USB_VERSION  		1
+
+#define MV_INCLUDE_SDRAM_CS0
+#define MV_INCLUDE_SDRAM_CS1
+#define MV_INCLUDE_SDRAM_CS2
+#define MV_INCLUDE_SDRAM_CS3
+
+#define MV_INCLUDE_DEVICE_CS0
+#define MV_INCLUDE_DEVICE_CS1
+#define MV_INCLUDE_DEVICE_CS2
+#define MV_INCLUDE_DEVICE_CS3
+
+#define MPP_GROUP_1_TYPE {\
+	{0, 0, 0}, /* Reserved for AUTO */ \
+	{0x22220000, 0x22222222, 0x2222}, /* TDM */ \
+	{0x44440000, 0x00044444, 0x0000}, /* AUDIO */ \
+	{0x33330000, 0x33003333, 0x0033}, /* RGMII */ \
+	{0x33330000, 0x03333333, 0x0033}, /* GMII */ \
+	{0x11110000, 0x11111111, 0x0001}, /* TS */ \
+	{0x33330000, 0x33333333, 0x3333}  /* MII */ \
+}
+
+#define MPP_GROUP_2_TYPE {\
+	{0, 0, 0}, /* Reserved for AUTO */ \
+	{0x22220000, 0x22222222, 0x22}, /* TDM */ \
+	{0x44440000, 0x00044444, 0x0}, /* AUDIO */ \
+	{0, 0, 0}, /* N_A */ \
+	{0, 0, 0}, /* N_A */ \
+	{0x11110000, 0x11111111, 0x01}  /* TS */ \
+}
+
+#ifndef MV_ASMLANGUAGE
+
+/* This enumerator defines the Marvell Units ID      */
+typedef enum _mvUnitId
+{
+    DRAM_UNIT_ID,
+    PEX_UNIT_ID,
+    ETH_GIG_UNIT_ID,
+    USB_UNIT_ID,
+    IDMA_UNIT_ID,
+    XOR_UNIT_ID,
+    SATA_UNIT_ID,
+    TDM_UNIT_ID,
+    UART_UNIT_ID,
+    CESA_UNIT_ID,
+    SPI_UNIT_ID,
+    AUDIO_UNIT_ID,
+    SDIO_UNIT_ID,
+    TS_UNIT_ID,
+    MAX_UNITS_ID
+
+}MV_UNIT_ID;
+
+#endif
+
+#endif /* __INCmvCtrlEnvSpech */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c
new file mode 100644
index 000000000000..00f8f71ae8b7
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.c
@@ -0,0 +1,1047 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ctrlEnv/sys/mvAhbToMbus.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#undef MV_DEBUG
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/* typedefs */
+
+
+/* CPU address remap registers offsets are inconsecutive. This struct 		*/
+/* describes address remap register offsets									*/
+typedef struct _ahbToMbusRemapRegOffs
+{
+    MV_U32 lowRegOffs;		/* Low 32-bit remap register offset			*/
+    MV_U32 highRegOffs;		/* High 32 bit remap register offset		*/
+}AHB_TO_MBUS_REMAP_REG_OFFS;
+
+/* locals   */
+static MV_STATUS ahbToMbusRemapRegOffsGet	(MV_U32 winNum,
+										AHB_TO_MBUS_REMAP_REG_OFFS *pRemapRegs);
+
+/*******************************************************************************
+* mvAhbToMbusInit - Initialize Ahb To Mbus Address Map !
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK laways.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusInit(void)
+{
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinSet - Set CPU-to-peripheral winNum address window
+*
+* DESCRIPTION:
+*       This function sets
+*       address window, also known as address decode window.
+*       A new address decode window is set for specified winNum address window.
+*       If address decode window parameter structure enables the window,
+*       the routine will also enable the winNum window, allowing CPU to access
+*       the winNum window.
+*
+* INPUT:
+*       winNum      - Windows number.
+*       pAddrDecWin - CPU winNum window data structure.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_OK if CPU winNum window was set correctly, MV_ERROR in case of
+*       address window overlapps with other active CPU winNum window or
+*		trying to assign 36bit base address while CPU does not support that.
+*       The function returns MV_NOT_SUPPORTED, if the winNum is unsupported.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinSet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin)
+{
+	MV_TARGET_ATTRIB targetAttribs;
+	MV_DEC_REGS decRegs;
+
+	/* Parameter checking   */
+	if (winNum >= MAX_AHB_TO_MBUS_WINS)
+	{
+		mvOsPrintf("mvAhbToMbusWinSet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+
+	/* read base register*/
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+	}
+	else
+	{
+		decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_INTEREG_REG);
+	}
+
+	/* check if address is aligned to the size */
+	if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvAhbToMbusWinSet:Error setting AHB to MBUS window %d to "\
+				   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+				   winNum,
+				   mvCtrlTargetNameGet(pAddrDecWin->target),
+				   pAddrDecWin->addrWin.baseLow,
+				   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	/* read control register*/
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		decRegs.sizeReg = MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+	}
+
+	if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+	{
+		mvOsPrintf("mvAhbToMbusWinSet:mvCtrlAddrDecToReg Failed\n");
+		return MV_ERROR;
+	}
+
+	/* enable\Disable */
+	if (MV_TRUE == pAddrDecWin->enable)
+	{
+		decRegs.sizeReg |= ATMWCR_WIN_ENABLE;
+	}
+	else
+	{
+		decRegs.sizeReg &= ~ATMWCR_WIN_ENABLE;
+	}
+
+	mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+	/* set attributes */
+	decRegs.sizeReg &= ~ATMWCR_WIN_ATTR_MASK;
+	decRegs.sizeReg |= targetAttribs.attrib << ATMWCR_WIN_ATTR_OFFS;
+	/* set target ID */
+	decRegs.sizeReg &= ~ATMWCR_WIN_TARGET_MASK;
+	decRegs.sizeReg |= targetAttribs.targetId << ATMWCR_WIN_TARGET_OFFS;
+
+#if !defined(MV_RUN_FROM_FLASH)
+    /* To be on the safe side we disable the window before writing the  */
+    /* new values.                                                      */
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		mvAhbToMbusWinEnable(winNum,MV_FALSE);
+	}
+#endif
+
+	/* 3) Write to address decode Base Address Register                   */
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum), decRegs.baseReg);
+	}
+	else
+	{
+		MV_REG_WRITE(AHB_TO_MBUS_WIN_INTEREG_REG, decRegs.baseReg);
+	}
+
+
+	/* Internal register space have no size	*/
+	/* register. Do not perform size register assigment for those targets 	*/
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		/* Write to address decode Size Register                        	*/
+		MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum), decRegs.sizeReg);
+	}
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinGet - Get CPU-to-peripheral winNum address window
+*
+* DESCRIPTION:
+*		Get the CPU peripheral winNum address window.
+*
+* INPUT:
+*       winNum - Peripheral winNum enumerator
+*
+* OUTPUT:
+*       pAddrDecWin - CPU winNum window information data structure.
+*
+* RETURN:
+*       MV_OK if winNum exist, MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinGet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin)
+{
+	MV_DEC_REGS decRegs;
+	MV_TARGET_ATTRIB targetAttrib;
+
+
+	/* Parameter checking   */
+	if (winNum >= MAX_AHB_TO_MBUS_WINS)
+	{
+		mvOsPrintf("mvAhbToMbusWinGet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+
+	/* Internal register space size have no size register*/
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		decRegs.sizeReg =  MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+	}
+	else
+	{
+		decRegs.sizeReg = 0;
+	}
+
+
+	/* Read base and size	*/
+	if (winNum != MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+	}
+	else
+	{
+		decRegs.baseReg = MV_REG_READ(AHB_TO_MBUS_WIN_INTEREG_REG);
+	}
+
+
+
+	if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+	{
+		mvOsPrintf("mvAhbToMbusWinGet: mvCtrlRegToAddrDec Failed \n");
+		return MV_ERROR;
+	}
+
+	if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+        pAddrDecWin->addrWin.size = INTER_REGS_SIZE;
+		pAddrDecWin->target = INTER_REGS;
+		pAddrDecWin->enable = MV_TRUE;
+
+		return MV_OK;
+	}
+
+
+	if (decRegs.sizeReg & ATMWCR_WIN_ENABLE)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+
+	}
+
+
+
+	if (-1 == pAddrDecWin->addrWin.size)
+	{
+		return MV_ERROR;
+	}
+
+	/* attrib and targetId */
+	targetAttrib.attrib = (decRegs.sizeReg & ATMWCR_WIN_ATTR_MASK) >>
+													ATMWCR_WIN_ATTR_OFFS;
+	targetAttrib.targetId = (decRegs.sizeReg & ATMWCR_WIN_TARGET_MASK) >>
+													ATMWCR_WIN_TARGET_OFFS;
+
+	pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinTargetGet - Get Window number associated with target
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_U32	  mvAhbToMbusWinTargetGet(MV_TARGET target)
+{
+	MV_AHB_TO_MBUS_DEC_WIN decWin;
+	MV_U32 winNum;
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetGet: target %d is Illigal\n", target);
+		return 0xffffffff;
+	}
+
+	if (INTER_REGS == target)
+	{
+		return MV_AHB_TO_MBUS_INTREG_WIN;
+	}
+
+	for (winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS ; winNum++)
+	{
+		if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+			continue;
+
+		if (mvAhbToMbusWinGet(winNum,&decWin) != MV_OK)
+		{
+			mvOsPrintf("mvAhbToMbusWinTargetGet: mvAhbToMbusWinGet fail\n");
+			return 0xffffffff;
+
+		}
+
+		if (decWin.enable == MV_TRUE)
+		{
+			if (decWin.target == target)
+			{
+				return winNum;
+			}
+
+		}
+
+	}
+
+	return 0xFFFFFFFF;
+
+
+}
+
+/*******************************************************************************
+* mvAhbToMbusWinAvailGet - Get First Available window number.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_U32    mvAhbToMbusWinAvailGet(MV_VOID)
+{
+        MV_AHB_TO_MBUS_DEC_WIN decWin;
+        MV_U32 winNum;
+
+        for (winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS ; winNum++)
+        {
+                if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+                        continue;
+
+                if (mvAhbToMbusWinGet(winNum,&decWin) != MV_OK)
+                {
+                        mvOsPrintf("mvAhbToMbusWinTargetGet: mvAhbToMbusWinGet fail\n");
+                        return 0xffffffff;
+
+                }
+
+                if (decWin.enable == MV_FALSE)
+                {
+			return winNum;
+                }
+
+        }
+
+        return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* mvAhbToMbusWinEnable - Enable/disable a CPU address decode window
+*
+* DESCRIPTION:
+*       This function enable/disable a CPU address decode window.
+*       if parameter 'enable' == MV_TRUE the routine will enable the
+*       window, thus enabling CPU accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - Peripheral winNum enumerator.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if protection window number was wrong, or the window
+*       overlapps other winNum window.
+*
+*******************************************************************************/
+MV_STATUS mvAhbToMbusWinEnable(MV_U32 winNum, MV_BOOL enable)
+{
+
+	/* Parameter checking   */
+	if (winNum >= MAX_AHB_TO_MBUS_WINS)
+	{
+		mvOsPrintf("mvAhbToMbusWinEnable: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	/* Internal registers bar can't be disable or enabled */
+	if (winNum == MV_AHB_TO_MBUS_INTREG_WIN)
+	{
+		return (enable ? MV_OK : MV_ERROR);
+	}
+
+    if (enable == MV_TRUE)
+    {
+		/* enable the window */
+		MV_REG_BIT_SET(AHB_TO_MBUS_WIN_CTRL_REG(winNum), ATMWCR_WIN_ENABLE);
+    }
+    else
+    {   /* Disable address decode winNum window                             */
+		MV_REG_BIT_RESET(AHB_TO_MBUS_WIN_CTRL_REG(winNum), ATMWCR_WIN_ENABLE);
+    }
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvAhbToMbusWinRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*       After a CPU address hits one of PCI address decode windows there is an
+*       option to remap the address to a different one. For example, CPU
+*       executes a read from PCI winNum window address 0x1200.0000. This
+*       can be modified so the address on the PCI bus would be 0x1400.0000
+*       Using the PCI address remap mechanism.
+*
+* INPUT:
+*       winNum      - Peripheral winNum enumerator. Must be a PCI winNum.
+*       pAddrDecWin - CPU winNum window information data structure.
+*                     Note that caller has to fill in the base field only. The
+*                     size field is ignored.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if winNum is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvAhbToMbusWinRemap(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32 baseAddr;
+	AHB_TO_MBUS_REMAP_REG_OFFS remapRegOffs;
+
+    MV_U32 effectiveBaseAddress=0,
+		   baseAddrValue=0,windowSizeValue=0;
+
+
+	/* Get registers offsets of given winNum 		*/
+	if (MV_NO_SUCH == ahbToMbusRemapRegOffsGet(winNum, &remapRegOffs))
+	{
+		return 0xffffffff;
+	}
+
+	/* 1) Set address remap low */
+    baseAddr = pAddrWin->baseLow;
+
+    /* Check base address aligment 					*/
+	/*
+	if (MV_IS_NOT_ALIGN(baseAddr, ATMWRLR_REMAP_LOW_ALIGNMENT))
+	{
+        mvOsPrintf("mvAhbToMbusPciRemap: Warning. Target base 0x%x unaligned\n",
+																baseAddr);
+        return MV_ERROR;
+	}
+	*/
+
+	/* BaseLow[31:16] => base register [31:16] 		*/
+	baseAddr = baseAddr & ATMWRLR_REMAP_LOW_MASK;
+
+    MV_REG_WRITE(remapRegOffs.lowRegOffs, baseAddr);
+
+	MV_REG_WRITE(remapRegOffs.highRegOffs, pAddrWin->baseHigh);
+
+
+	baseAddrValue = MV_REG_READ(AHB_TO_MBUS_WIN_BASE_REG(winNum));
+	windowSizeValue = MV_REG_READ(AHB_TO_MBUS_WIN_CTRL_REG(winNum));
+
+	baseAddrValue &= ATMWBR_BASE_MASK;
+	windowSizeValue &=ATMWCR_WIN_SIZE_MASK;
+
+   /* Start calculating the effective Base Address */
+   effectiveBaseAddress = baseAddrValue ;
+
+   /* The effective base address will be combined from the chopped (if any)
+	  remap value (according to the size value and remap mechanism) and the
+	  window's base address */
+   effectiveBaseAddress |= (((windowSizeValue) | 0xffff) & pAddrWin->baseLow);
+   /* If the effectiveBaseAddress exceed the window boundaries return an
+	  invalid value. */
+
+   if (effectiveBaseAddress > (baseAddrValue + (windowSizeValue | 0xffff)))
+   {
+		mvOsPrintf("mvAhbToMbusPciRemap: Error\n");
+		return 0xffffffff;
+   }
+
+	return effectiveBaseAddress;
+
+
+}
+/*******************************************************************************
+* mvAhbToMbusWinTargetSwap - Swap AhbToMbus windows between targets
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       target1      - CPU Interface target 1
+*       target2      - CPU Interface target 2
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if targets are illigal, or if one of the targets is not
+*	    associated to a valid window .
+*       MV_OK otherwise.
+*
+*******************************************************************************/
+
+
+MV_STATUS mvAhbToMbusWinTargetSwap(MV_TARGET target1,MV_TARGET target2)
+{
+	MV_U32 winNum1,winNum2;
+	MV_AHB_TO_MBUS_DEC_WIN winDec1,winDec2,winDecTemp;
+	AHB_TO_MBUS_REMAP_REG_OFFS remapRegs1,remapRegs2;
+	MV_U32 remapBaseLow1=0,remapBaseLow2=0;
+	MV_U32 remapBaseHigh1=0,remapBaseHigh2=0;
+
+
+	/* Check parameters */
+	if (target1 >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d is Illigal\n", target1);
+		return MV_ERROR;
+	}
+
+	if (target2 >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d is Illigal\n", target1);
+		return MV_ERROR;
+	}
+
+
+    /* get window associated with this target */
+	winNum1 = mvAhbToMbusWinTargetGet(target1);
+
+	if (winNum1 == 0xffffffff)
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d has illigal win %d\n",
+					target1,winNum1);
+		return MV_ERROR;
+
+	}
+
+    /* get window associated with this target */
+	winNum2 = mvAhbToMbusWinTargetGet(target2);
+
+	if (winNum2 == 0xffffffff)
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: target %d has illigal win %d\n",
+					target2,winNum2);
+		return MV_ERROR;
+
+	}
+
+	/* now Get original values of both Windows */
+	if (MV_OK != mvAhbToMbusWinGet(winNum1,&winDec1))
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: mvAhbToMbusWinGet failed win %d\n",
+					winNum1);
+		return MV_ERROR;
+
+	}
+	if (MV_OK != mvAhbToMbusWinGet(winNum2,&winDec2))
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: mvAhbToMbusWinGet failed win %d\n",
+					winNum2);
+		return MV_ERROR;
+
+	}
+
+
+	/* disable both windows */
+	if (MV_OK != mvAhbToMbusWinEnable(winNum1,MV_FALSE))
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: failed to enable window %d\n",
+					winNum1);
+		return MV_ERROR;
+
+	}
+	if (MV_OK != mvAhbToMbusWinEnable(winNum2,MV_FALSE))
+	{
+		mvOsPrintf("mvAhbToMbusWinTargetSwap: failed to enable windo %d\n",
+					winNum2);
+		return MV_ERROR;
+
+	}
+
+
+	/* now swap targets */
+
+	/* first save winDec2 values */
+	winDecTemp.addrWin.baseHigh = winDec2.addrWin.baseHigh;
+	winDecTemp.addrWin.baseLow = winDec2.addrWin.baseLow;
+	winDecTemp.addrWin.size = winDec2.addrWin.size;
+	winDecTemp.enable = winDec2.enable;
+	winDecTemp.target = winDec2.target;
+
+	/* winDec2 = winDec1 */
+	winDec2.addrWin.baseHigh = winDec1.addrWin.baseHigh;
+	winDec2.addrWin.baseLow = winDec1.addrWin.baseLow;
+	winDec2.addrWin.size = winDec1.addrWin.size;
+	winDec2.enable = winDec1.enable;
+	winDec2.target = winDec1.target;
+
+
+	/* winDec1 = winDecTemp */
+	winDec1.addrWin.baseHigh = winDecTemp.addrWin.baseHigh;
+	winDec1.addrWin.baseLow = winDecTemp.addrWin.baseLow;
+	winDec1.addrWin.size = winDecTemp.addrWin.size;
+	winDec1.enable = winDecTemp.enable;
+	winDec1.target = winDecTemp.target;
+
+
+	/* now set the new values */
+
+
+    mvAhbToMbusWinSet(winNum1,&winDec1);
+	mvAhbToMbusWinSet(winNum2,&winDec2);
+
+
+
+
+
+	/* now we will treat the remap windows if exist */
+
+
+	/* now check if one or both windows has a remap window
+	as well after the swap ! */
+
+	/* if a window had a remap value differnt than the base value
+	before the swap , then after the swap the remap value will be
+	equal to the base value unless both windows has a remap windows*/
+
+	/* first get old values */
+	if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum1,&remapRegs1))
+	{
+		remapBaseLow1 = MV_REG_READ(remapRegs1.lowRegOffs);
+	    remapBaseHigh1 = MV_REG_READ(remapRegs1.highRegOffs);
+
+	}
+	if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+	{
+		remapBaseLow2 = MV_REG_READ(remapRegs2.lowRegOffs);
+	    remapBaseHigh2 = MV_REG_READ(remapRegs2.highRegOffs);
+
+
+	}
+
+	/* now do the swap */
+	if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum1,&remapRegs1))
+	{
+		if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+		{
+			/* Two windows has a remap !!! so swap */
+
+			MV_REG_WRITE(remapRegs2.highRegOffs,remapBaseHigh1);
+			MV_REG_WRITE(remapRegs2.lowRegOffs,remapBaseLow1);
+
+			MV_REG_WRITE(remapRegs1.highRegOffs,remapBaseHigh2);
+			MV_REG_WRITE(remapRegs1.lowRegOffs,remapBaseLow2);
+
+
+
+		}
+		else
+		{
+			/* remap == base */
+			MV_REG_WRITE(remapRegs1.highRegOffs,winDec1.addrWin.baseHigh);
+			MV_REG_WRITE(remapRegs1.lowRegOffs,winDec1.addrWin.baseLow);
+
+		}
+
+	}
+	else if (MV_NO_SUCH != ahbToMbusRemapRegOffsGet(winNum2,&remapRegs2))
+	{
+		/* remap == base */
+		MV_REG_WRITE(remapRegs2.highRegOffs,winDec2.addrWin.baseHigh);
+		MV_REG_WRITE(remapRegs2.lowRegOffs,winDec2.addrWin.baseLow);
+
+	}
+
+
+
+	return MV_OK;
+
+
+}
+
+
+
+#if defined(MV_88F1181)
+
+/*******************************************************************************
+* mvAhbToMbusXbarCtrlSet - Set The CPU master Xbar arbitration.
+*
+* DESCRIPTION:
+*       This function sets CPU Mbus Arbiter
+*
+* INPUT:
+*       pPizzaArbArray - A priority Structure describing 16 "pizza slices". At
+*                    each clock cycle, the crossbar arbiter samples all
+*                    requests and gives the bus to the next agent according
+*                    to the "pizza".
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS  mvMbusArbSet(MV_MBUS_ARB_TARGET *pPizzaArbArray)
+{
+	MV_U32 sliceNum;
+	MV_U32 xbarCtrl = 0;
+	MV_MBUS_ARB_TARGET xbarTarget;
+
+	/* 1) Set crossbar control low register */
+	for (sliceNum = 0; sliceNum < MRLR_SLICE_NUM; sliceNum++)
+	{
+		xbarTarget = pPizzaArbArray[sliceNum];
+
+		/* sliceNum parameter check */
+		if (xbarTarget > MAX_MBUS_ARB_TARGETS)
+		{
+			mvOsPrintf("mvAhbToMbusXbarCtrlSet: ERR. Can't set Target %d\n",
+																  xbarTarget);
+			return MV_ERROR;
+		}
+		xbarCtrl |= (xbarTarget << MRLR_LOW_ARB_OFFS(sliceNum));
+	}
+	/* Write to crossbar control low register */
+    MV_REG_WRITE(MBUS_ARBITER_LOW_REG, xbarCtrl);
+
+	xbarCtrl = 0;
+
+	/* 2) Set crossbar control high register */
+	for (sliceNum = MRLR_SLICE_NUM;
+		 sliceNum < MRLR_SLICE_NUM+MRHR_SLICE_NUM;
+		 sliceNum++)
+	{
+
+		xbarTarget = pPizzaArbArray[sliceNum];
+
+		/* sliceNum parameter check */
+		if (xbarTarget > MAX_MBUS_ARB_TARGETS)
+		{
+			mvOsPrintf("mvAhbToMbusXbarCtrlSet: ERR. Can't set Target %d\n",
+																  xbarTarget);
+			return MV_ERROR;
+		}
+		xbarCtrl |= (xbarTarget << MRHR_HIGH_ARB_OFFS(sliceNum));
+	}
+	/* Write to crossbar control high register */
+    MV_REG_WRITE(MBUS_ARBITER_HIGH_REG, xbarCtrl);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvMbusArbCtrlSet - Set MBus Arbiter control register
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       ctrl  - pointer to MV_MBUS_ARB_CTRL register
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS mvMbusArbCtrlSet(MV_MBUS_ARB_CTRL *ctrl)
+{
+
+	if (ctrl->highPrio == MV_FALSE)
+	{
+		MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_ARM_TOP);
+	}
+	else
+	{
+		MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_ARM_TOP);
+	}
+
+	if (ctrl->fixedRoundRobin == MV_FALSE)
+	{
+		MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_TARGET_FIXED);
+	}
+	else
+	{
+		MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_TARGET_FIXED);
+	}
+
+	if (ctrl->starvEn == MV_FALSE)
+	{
+		MV_REG_BIT_RESET(MBUS_ARBITER_CTRL_REG, MACR_ARB_REQ_CTRL_EN);
+	}
+	else
+	{
+		MV_REG_BIT_SET(MBUS_ARBITER_CTRL_REG, MACR_ARB_REQ_CTRL_EN);
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvMbusArbCtrlGet - Get MBus Arbiter control register
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       ctrl  - pointer to MV_MBUS_ARB_CTRL register
+*
+* OUTPUT:
+*       ctrl  - pointer to MV_MBUS_ARB_CTRL register
+*
+* RETURN:
+*       MV_ERROR if paramers to function invalid.
+*
+*******************************************************************************/
+MV_STATUS mvMbusArbCtrlGet(MV_MBUS_ARB_CTRL *ctrl)
+{
+
+	MV_U32 ctrlReg = MV_REG_READ(MBUS_ARBITER_CTRL_REG);
+
+	if (ctrlReg & MACR_ARB_ARM_TOP)
+	{
+		ctrl->highPrio = MV_TRUE;
+	}
+	else
+	{
+		ctrl->highPrio = MV_FALSE;
+	}
+
+	if (ctrlReg & MACR_ARB_TARGET_FIXED)
+	{
+		ctrl->fixedRoundRobin = MV_TRUE;
+	}
+	else
+	{
+		ctrl->fixedRoundRobin = MV_FALSE;
+	}
+
+	if (ctrlReg & MACR_ARB_REQ_CTRL_EN)
+	{
+		ctrl->starvEn = MV_TRUE;
+	}
+	else
+	{
+		ctrl->starvEn = MV_FALSE;
+	}
+
+
+	return MV_OK;
+}
+
+#endif  /* #if defined(MV_88F1181) */
+
+
+
+/*******************************************************************************
+* ahbToMbusRemapRegOffsGet - Get CPU address remap register offsets
+*
+* DESCRIPTION:
+* 		CPU to PCI address remap registers offsets are inconsecutive.
+*		This function returns PCI address remap registers offsets.
+*
+* INPUT:
+*       winNum - Address decode window number. See MV_U32 enumerator.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*		MV_ERROR if winNum is not a PCI one.
+*
+*******************************************************************************/
+static MV_STATUS ahbToMbusRemapRegOffsGet(MV_U32 winNum,
+									AHB_TO_MBUS_REMAP_REG_OFFS *pRemapRegs)
+{
+	switch (winNum)
+	{
+		case 0:
+        case 1:
+			pRemapRegs->lowRegOffs  = AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum);
+			pRemapRegs->highRegOffs = AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum);
+			break;
+		case 2:
+		case 3:
+			if((mvCtrlModelGet() == MV_5281_DEV_ID) ||
+				(mvCtrlModelGet() == MV_1281_DEV_ID) ||
+				(mvCtrlModelGet() == MV_6183_DEV_ID) ||
+               (mvCtrlModelGet() == MV_6183L_DEV_ID))
+			{
+				pRemapRegs->lowRegOffs  = AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum);
+				pRemapRegs->highRegOffs = AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum);
+				break;
+			}
+			else
+			{
+				pRemapRegs->lowRegOffs  = 0;
+				pRemapRegs->highRegOffs = 0;
+
+				DB(mvOsPrintf("ahbToMbusRemapRegOffsGet: ERR. Invalid winNum %d\n",
+							winNum));
+				return MV_NO_SUCH;
+			}
+		default:
+		{
+			pRemapRegs->lowRegOffs  = 0;
+			pRemapRegs->highRegOffs = 0;
+
+			DB(mvOsPrintf("ahbToMbusRemapRegOffsGet: ERR. Invalid winNum %d\n",
+						winNum));
+			return MV_NO_SUCH;
+		}
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvAhbToMbusAddDecShow - Print the AHB to MBus bridge address decode map.
+*
+* DESCRIPTION:
+*		This function print the CPU address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvAhbToMbusAddDecShow(MV_VOID)
+{
+	MV_AHB_TO_MBUS_DEC_WIN win;
+	MV_U32 winNum;
+	mvOsOutput( "\n" );
+	mvOsOutput( "AHB To MBUS Bridge:\n" );
+	mvOsOutput( "-------------------\n" );
+
+	for( winNum = 0; winNum < MAX_AHB_TO_MBUS_WINS; winNum++ )
+	{
+		memset( &win, 0, sizeof(MV_AHB_TO_MBUS_DEC_WIN) );
+
+		mvOsOutput( "win%d - ", winNum );
+
+		if( mvAhbToMbusWinGet( winNum, &win ) == MV_OK )
+		{
+			if( win.enable )
+			{
+				mvOsOutput( "%s base %08x, ",
+				mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+				mvOsOutput( "...." );
+				mvSizePrint( win.addrWin.size );
+
+				mvOsOutput( "\n" );
+
+            }
+			else
+				mvOsOutput( "disable\n" );
+		}
+	}
+
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h
new file mode 100644
index 000000000000..647e06177456
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbus.h
@@ -0,0 +1,130 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvAhbToMbush
+#define __INCmvAhbToMbush
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+/* defines  */
+
+#if defined(MV_88F1181)
+/* This enumerator defines the Marvell controller possible MBUS arbiter     */
+/* target ports. It is used to define crossbar priority scheame (pizza)     */
+typedef enum _mvMBusArbTargetId
+{
+    DRAM_MBUS_ARB_TARGET = 0,    /* Port 0 -> DRAM interface         */
+    TWSI_MBUS_ARB_TARGET  = 1,     /* Port 1 -> TWSI 		    */
+    ARM_MBUS_ARB_TARGET   = 2,     /* Port 2 -> ARM		    */
+	PEX1_MBUS_ARB_TARGET  = 3,    /* Port 3 -> PCI Express 1		    */
+    PEX0_MBUS_ARB_TARGET  = 4,    /* Port 4 -> PCI Express0 		    */
+	MAX_MBUS_ARB_TARGETS
+}MV_MBUS_ARB_TARGET;
+
+typedef struct _mvMBusArbCtrl
+{
+	MV_BOOL starvEn;
+	MV_BOOL highPrio;
+	MV_BOOL fixedRoundRobin;
+
+}MV_MBUS_ARB_CTRL;
+
+#endif /* #if defined(MV_88F1181) */
+
+typedef struct _mvAhbtoMbusDecWin
+{
+	MV_TARGET	  target;
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+}MV_AHB_TO_MBUS_DEC_WIN;
+
+/* mvAhbToMbus.h API list */
+
+MV_STATUS mvAhbToMbusInit(MV_VOID);
+MV_STATUS mvAhbToMbusWinSet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAhbToMbusWinGet(MV_U32 winNum, MV_AHB_TO_MBUS_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAhbToMbusWinEnable(MV_U32 winNum,MV_BOOL enable);
+MV_U32    mvAhbToMbusWinRemap(MV_U32 winNum, MV_ADDR_WIN *pAddrDecWin);
+MV_U32	  mvAhbToMbusWinTargetGet(MV_TARGET target);
+MV_U32    mvAhbToMbusWinAvailGet(MV_VOID);
+MV_STATUS mvAhbToMbusWinTargetSwap(MV_TARGET target1,MV_TARGET target2);
+
+#if defined(MV_88F1181)
+
+MV_STATUS mvMbusArbSet(MV_MBUS_ARB_TARGET *pPizzaArbArray);
+MV_STATUS mvMbusArbCtrlSet(MV_MBUS_ARB_CTRL *ctrl);
+MV_STATUS mvMbusArbCtrlGet(MV_MBUS_ARB_CTRL *ctrl);
+
+#endif /* #if defined(MV_88F1181) */
+
+
+MV_VOID   mvAhbToMbusAddDecShow(MV_VOID);
+
+
+#endif /* __INCmvAhbToMbush */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h
new file mode 100644
index 000000000000..fd3c95f19fe1
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvAhbToMbusRegs.h
@@ -0,0 +1,142 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvAhbToMbusRegsh
+#define __INCmvAhbToMbusRegsh
+
+/******************************/
+/* ARM Address Map Registers  */
+/******************************/
+
+#define MAX_AHB_TO_MBUS_WINS	9
+#define MV_AHB_TO_MBUS_INTREG_WIN	8
+
+
+#define AHB_TO_MBUS_WIN_CTRL_REG(winNum)		(0x20000 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_BASE_REG(winNum)		(0x20004 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum)	(0x20008 + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum)	(0x2000C + (winNum)*0x10)
+#define AHB_TO_MBUS_WIN_INTEREG_REG				 0x20080
+
+/* Window Control Register      */
+/* AHB_TO_MBUS_WIN_CTRL_REG (ATMWCR)*/
+#define ATMWCR_WIN_ENABLE					BIT0	/* Window Enable */
+
+#define ATMWCR_WIN_TARGET_OFFS			4 /* The target interface associated
+											 with this window*/
+#define ATMWCR_WIN_TARGET_MASK			(0xf << ATMWCR_WIN_TARGET_OFFS)
+
+#define ATMWCR_WIN_ATTR_OFFS				8 /* The target interface attributes
+											 Associated with this window */
+#define ATMWCR_WIN_ATTR_MASK				(0xff << ATMWCR_WIN_ATTR_OFFS)
+
+
+/*
+Used with the Base register to set the address window size and location
+Must be programed from LSB to MSB as sequence of 1’s followed
+by sequence of 0’s. The number of 1’s specifies the size of the window
+in 64 KB granularity (e.g. a value of 0x00FF specifies 256 = 16 MB).
+
+NOTE: A value of 0x0 specifies 64KB size.
+*/
+#define ATMWCR_WIN_SIZE_OFFS				16 /* Window Size */
+#define ATMWCR_WIN_SIZE_MASK				(0xffff << ATMWCR_WIN_SIZE_OFFS)
+#define ATMWCR_WIN_SIZE_ALIGNMENT			0x10000
+
+/*  Window Base Register     */
+/* AHB_TO_MBUS_WIN_BASE_REG (ATMWBR) */
+
+/*
+Used with the size field to set the address window size and location.
+Corresponds to transaction address[31:16]
+*/
+#define ATMWBR_BASE_OFFS					16 /* Base Address */
+#define ATMWBR_BASE_MASK					(0xffff << 	ATMWBR_BASE_OFFS)
+#define ATMWBR_BASE_ALIGNMENT				0x10000
+
+/*  Window Remap Low Register   */
+/* AHB_TO_MBUS_WIN_REMAP_LOW_REG (ATMWRLR) */
+
+/*
+Used with the size field to specifies address bits[31:0] to be driven to
+the target interface.:
+target_addr[31:16] = (addr[31:16] & size[15:0]) | (remap[31:16] & ~size[15:0])
+*/
+#define ATMWRLR_REMAP_LOW_OFFS			16 /* Remap Address */
+#define ATMWRLR_REMAP_LOW_MASK			(0xffff << ATMWRLR_REMAP_LOW_OFFS)
+#define ATMWRLR_REMAP_LOW_ALIGNMENT		0x10000
+
+/* Window Remap High Register   */
+/* AHB_TO_MBUS_WIN_REMAP_HIGH_REG (ATMWRHR) */
+
+/*
+Specifies address bits[63:32] to be driven to the target interface.
+target_addr[63:32] = (RemapHigh[31:0]
+*/
+#define ATMWRHR_REMAP_HIGH_OFFS			0 /* Remap Address */
+#define ATMWRHR_REMAP_HIGH_MASK			(0xffffffff << ATMWRHR_REMAP_HIGH_OFFS)
+
+
+#endif /* __INCmvAhbToMbusRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c
new file mode 100644
index 000000000000..fff4529f3832
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.c
@@ -0,0 +1,1034 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/sys/mvAhbToMbusRegs.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "mvSysHwConfig.h"
+#include "mvSysDram.h"
+
+/*#define MV_DEBUG*/
+/* defines  */
+
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/* locals   */
+/* static functions */
+static MV_BOOL cpuTargetWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+
+MV_TARGET * sampleAtResetTargetArray;
+MV_TARGET sampleAtResetTargetArrayP[] = BOOT_TARGETS_NAME_ARRAY;
+MV_TARGET sampleAtResetTargetArray6180P[] = BOOT_TARGETS_NAME_ARRAY_6180;
+/*******************************************************************************
+* mvCpuIfInit - Initialize Controller CPU interface
+*
+* DESCRIPTION:
+*       This function initialize Controller CPU interface:
+*       1. Set CPU interface configuration registers.
+*       2. Set CPU master Pizza arbiter control according to static
+*          configuration described in configuration file.
+*       3. Opens CPU address decode windows. DRAM windows are assumed to be
+*		   already set (auto detection).
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfInit(MV_CPU_DEC_WIN *cpuAddrWinMap)
+{
+	MV_U32 regVal;
+	MV_TARGET target;
+	MV_ADDR_WIN addrWin;
+
+	if (cpuAddrWinMap == NULL)
+	{
+		DB(mvOsPrintf("mvCpuIfInit:ERR. cpuAddrWinMap == NULL\n"));
+		return MV_ERROR;
+	}
+
+    /*Initialize the boot target array according to device type*/
+    if(mvCtrlModelGet() == MV_6180_DEV_ID)
+        sampleAtResetTargetArray = sampleAtResetTargetArray6180P;
+    else
+        sampleAtResetTargetArray = sampleAtResetTargetArrayP;
+
+	/* Set ARM Configuration register */
+	regVal  = MV_REG_READ(CPU_CONFIG_REG);
+	regVal &= ~CPU_CONFIG_DEFAULT_MASK;
+	regVal |= CPU_CONFIG_DEFAULT;
+	MV_REG_WRITE(CPU_CONFIG_REG,regVal);
+
+	/* First disable all CPU target windows  */
+	for (target = 0; cpuAddrWinMap[target].enable != TBL_TERM; target++)
+	{
+		if ((MV_TARGET_IS_DRAM(target))||(target == INTER_REGS))
+		{
+			continue;
+		}
+
+#if defined(MV_MEM_OVER_PCI_WA) || defined(MV_UART_OVER_PCI_WA)
+		/* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+		if (MV_TARGET_IS_PCI(target))
+		{
+			continue;
+		}
+#endif
+
+#if defined(MV_MEM_OVER_PEX_WA) || defined(MV_UART_OVER_PEX_WA)
+		/* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+		if (MV_TARGET_IS_PEX(target))
+		{
+			continue;
+		}
+#endif
+#if defined(MV_RUN_FROM_FLASH)
+		/* Don't disable the boot device.                               */
+		if (target == DEV_BOOCS)
+		{
+			continue;
+		}
+#endif /* MV_RUN_FROM_FLASH */
+		mvCpuIfTargetWinEnable(MV_CHANGE_BOOT_CS(target),MV_FALSE);
+	}
+
+#if defined(MV_RUN_FROM_FLASH)
+	/* Resize the bootcs windows before other windows, because this     */
+	/* window is enabled and will cause an overlap if not resized.      */
+	target = DEV_BOOCS;
+
+	if (MV_OK != mvCpuIfTargetWinSet(target, &cpuAddrWinMap[target]))
+	{
+		DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinSet fail\n"));
+		return MV_ERROR;
+	}
+
+	addrWin.baseLow = cpuAddrWinMap[target].addrWin.baseLow;
+	addrWin.baseHigh = cpuAddrWinMap[target].addrWin.baseHigh;
+	if (0xffffffff == mvAhbToMbusWinRemap(cpuAddrWinMap[target].winNum ,&addrWin))
+	{
+		DB(mvOsPrintf("mvCpuIfInit:WARN. mvAhbToMbusWinRemap can't remap winNum=%d\n",
+					  cpuAddrWinMap[target].winNum));
+	}
+
+#endif /* MV_RUN_FROM_FLASH */
+
+	/* Go through all targets in user table until table terminator			*/
+	for (target = 0; cpuAddrWinMap[target].enable != TBL_TERM; target++)
+	{
+
+#if defined(MV_RUN_FROM_FLASH)
+	if (target == DEV_BOOCS)
+	{
+		continue;
+	}
+#endif /* MV_RUN_FROM_FLASH */
+
+	/* if DRAM auto sizing is used do not initialized DRAM target windows, 	*/
+	/* assuming this already has been done earlier.							*/
+#ifdef	MV_DRAM_AUTO_SIZE
+		if (MV_TARGET_IS_DRAM(target))
+		{
+			continue;
+		}
+#endif
+
+#if defined(MV_MEM_OVER_PCI_WA) || defined(MV_UART_OVER_PCI_WA)
+		/* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+		if (MV_TARGET_IS_PCI(target))
+		{
+			continue;
+		}
+#endif
+
+#if defined(MV_MEM_OVER_PEX_WA) || defined(MV_UART_OVER_PEX_WA)
+		/* If the target PEX or PCI and memory is over PEX or PCI we don't touch this CPU windows */
+		if (MV_TARGET_IS_PEX(target))
+		{
+			continue;
+		}
+#endif
+	/* If the target attribute is the same as the boot device attribute */
+	/* then it's stays disable */
+		if (MV_TARGET_IS_AS_BOOT(target))
+		{
+			continue;
+		}
+
+		if((0 == cpuAddrWinMap[target].addrWin.size) ||
+		   (DIS == cpuAddrWinMap[target].enable))
+
+		{
+			if (MV_OK != mvCpuIfTargetWinEnable(target, MV_FALSE))
+			{
+				DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinEnable fail\n"));
+				return MV_ERROR;
+			}
+
+		}
+		else
+		{
+			if (MV_OK != mvCpuIfTargetWinSet(target, &cpuAddrWinMap[target]))
+			{
+				DB(mvOsPrintf("mvCpuIfInit:ERR. mvCpuIfTargetWinSet fail\n"));
+				return MV_ERROR;
+			}
+
+			addrWin.baseLow = cpuAddrWinMap[target].addrWin.baseLow;
+			addrWin.baseHigh = cpuAddrWinMap[target].addrWin.baseHigh;
+			if (0xffffffff == mvAhbToMbusWinRemap(cpuAddrWinMap[target].winNum ,&addrWin))
+			{
+				DB(mvOsPrintf("mvCpuIfInit:WARN. mvAhbToMbusWinRemap can't remap winNum=%d\n",
+							  cpuAddrWinMap[target].winNum));
+			}
+
+
+		}
+    }
+
+	return MV_OK;
+
+
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinSet - Set CPU-to-peripheral target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI0_MEM0)
+*       address window, also known as address decode window.
+*       A new address decode window is set for specified target address window.
+*       If address decode window parameter structure enables the window,
+*       the routine will also enable the target window, allowing CPU to access
+*       the target window.
+*
+* INPUT:
+*       target      - Peripheral target enumerator.
+*       pAddrDecWin - CPU target window data structure.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_OK if CPU target window was set correctly, MV_ERROR in case of
+*       address window overlapps with other active CPU target window or
+*		trying to assign 36bit base address while CPU does not support that.
+*       The function returns MV_NOT_SUPPORTED, if the target is unsupported.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinSet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin)
+{
+	MV_AHB_TO_MBUS_DEC_WIN decWin;
+	MV_U32 existingWinNum;
+	MV_DRAM_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinSet: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* 2) Check if the requested window overlaps with current windows		*/
+	if (MV_TRUE == cpuTargetWinOverlap(target, &pAddrDecWin->addrWin))
+	{
+		mvOsPrintf("mvCpuIfTargetWinSet: ERR. Target %d overlap\n", target);
+		return MV_BAD_PARAM;
+	}
+
+	if (MV_TARGET_IS_DRAM(target))
+	{
+		/* copy relevant data to MV_DRAM_DEC_WIN structure */
+		addrDecWin.addrWin.baseHigh = pAddrDecWin->addrWin.baseHigh;
+		addrDecWin.addrWin.baseLow = pAddrDecWin->addrWin.baseLow;
+		addrDecWin.addrWin.size = pAddrDecWin->addrWin.size;
+		addrDecWin.enable = pAddrDecWin->enable;
+
+
+		if (mvDramIfWinSet(target,&addrDecWin) != MV_OK);
+		{
+			mvOsPrintf("mvCpuIfTargetWinSet: mvDramIfWinSet Failed\n");
+			return MV_ERROR;
+		}
+
+	}
+	else
+	{
+		/* copy relevant data to MV_AHB_TO_MBUS_DEC_WIN structure */
+		decWin.addrWin.baseLow = pAddrDecWin->addrWin.baseLow;
+		decWin.addrWin.baseHigh = pAddrDecWin->addrWin.baseHigh;
+		decWin.addrWin.size = pAddrDecWin->addrWin.size;
+		decWin.enable = pAddrDecWin->enable;
+		decWin.target = target;
+
+		existingWinNum = mvAhbToMbusWinTargetGet(target);
+
+		/* check if there is already another Window configured
+		for this target */
+		if ((existingWinNum < MAX_AHB_TO_MBUS_WINS )&&
+			(existingWinNum != pAddrDecWin->winNum))
+		{
+			/* if we want to enable the new winow number
+			passed by the user , then the old one should
+			be disabled */
+			if (MV_TRUE == pAddrDecWin->enable)
+			{
+				/* be sure it is disabled */
+				mvAhbToMbusWinEnable(existingWinNum , MV_FALSE);
+			}
+		}
+
+        if (mvAhbToMbusWinSet(pAddrDecWin->winNum,&decWin) != MV_OK)
+		{
+			mvOsPrintf("mvCpuIfTargetWinSet: mvAhbToMbusWinSet Failed\n");
+			return MV_ERROR;
+		}
+
+	}
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinGet - Get CPU-to-peripheral target address window
+*
+* DESCRIPTION:
+*		Get the CPU peripheral target address window.
+*
+* INPUT:
+*       target - Peripheral target enumerator
+*
+* OUTPUT:
+*       pAddrDecWin - CPU target window information data structure.
+*
+* RETURN:
+*       MV_OK if target exist, MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinGet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin)
+{
+
+	MV_U32 winNum=0xffffffff;
+	MV_AHB_TO_MBUS_DEC_WIN decWin;
+	MV_DRAM_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinGet: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	if (MV_TARGET_IS_DRAM(target))
+	{
+		if (mvDramIfWinGet(target,&addrDecWin) != MV_OK)
+		{
+			mvOsPrintf("mvCpuIfTargetWinGet: Failed to get window target %d\n",
+					   target);
+			return MV_ERROR;
+		}
+
+		/* copy relevant data to MV_CPU_DEC_WIN structure */
+		pAddrDecWin->addrWin.baseLow = addrDecWin.addrWin.baseLow;
+		pAddrDecWin->addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+		pAddrDecWin->addrWin.size = addrDecWin.addrWin.size;
+		pAddrDecWin->enable = addrDecWin.enable;
+		pAddrDecWin->winNum = 0xffffffff;
+
+	}
+	else
+	{
+		/* get the Window number associated with this target */
+
+		winNum = mvAhbToMbusWinTargetGet(target);
+		if (winNum >= MAX_AHB_TO_MBUS_WINS)
+		{
+			return MV_NO_SUCH;
+
+		}
+
+		if (mvAhbToMbusWinGet(winNum , &decWin) != MV_OK)
+		{
+			mvOsPrintf("%s: mvAhbToMbusWinGet Failed at winNum = %d\n",
+					   __FUNCTION__, winNum);
+			return MV_ERROR;
+
+		}
+
+		/* copy relevant data to MV_CPU_DEC_WIN structure */
+		pAddrDecWin->addrWin.baseLow = decWin.addrWin.baseLow;
+		pAddrDecWin->addrWin.baseHigh = decWin.addrWin.baseHigh;
+		pAddrDecWin->addrWin.size = decWin.addrWin.size;
+		pAddrDecWin->enable = decWin.enable;
+		pAddrDecWin->winNum = winNum;
+
+	}
+
+
+
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinEnable - Enable/disable a CPU address decode window
+*
+* DESCRIPTION:
+*       This function enable/disable a CPU address decode window.
+*       if parameter 'enable' == MV_TRUE the routine will enable the
+*       window, thus enabling CPU accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       target - Peripheral target enumerator.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if protection window number was wrong, or the window
+*       overlapps other target window.
+*
+*******************************************************************************/
+MV_STATUS mvCpuIfTargetWinEnable(MV_TARGET target,MV_BOOL enable)
+{
+	MV_U32 winNum, temp;
+	MV_CPU_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinEnable: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* get the window and check if it exist */
+	temp = mvCpuIfTargetWinGet(target, &addrDecWin);
+	if (MV_NO_SUCH == temp)
+	{
+		return (enable? MV_ERROR: MV_OK);
+	}
+	else if( MV_OK != temp)
+	{
+		mvOsPrintf("%s: ERR. Getting target %d failed.\n",__FUNCTION__, target);
+		return MV_ERROR;
+	}
+
+
+	/* check overlap */
+
+	if (MV_TRUE == enable)
+	{
+		if (MV_TRUE == cpuTargetWinOverlap(target, &addrDecWin.addrWin))
+		{
+			DB(mvOsPrintf("%s: ERR. Target %d overlap\n",__FUNCTION__, target));
+			return MV_ERROR;
+		}
+
+	}
+
+
+	if (MV_TARGET_IS_DRAM(target))
+	{
+		if (mvDramIfWinEnable(target , enable) != MV_OK)
+		{
+			mvOsPrintf("mvCpuIfTargetWinGet: mvDramIfWinEnable Failed at \n");
+			return MV_ERROR;
+
+		}
+
+	}
+	else
+	{
+		/* get the Window number associated with this target */
+
+		winNum = mvAhbToMbusWinTargetGet(target);
+
+		if (winNum >= MAX_AHB_TO_MBUS_WINS)
+		{
+			return (enable? MV_ERROR: MV_OK);
+		}
+
+		if (mvAhbToMbusWinEnable(winNum , enable) != MV_OK)
+		{
+			mvOsPrintf("mvCpuIfTargetWinGet: Failed to enable window = %d\n",
+					   winNum);
+			return MV_ERROR;
+
+		}
+
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvCpuIfTargetWinSizeGet - Get CPU target address window size
+*
+* DESCRIPTION:
+*		Get the size of CPU-to-peripheral target window.
+*
+* INPUT:
+*       target - Peripheral target enumerator
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit size. Function also returns '0' if window is closed.
+*		Function returns 0xFFFFFFFF in case of an error.
+*
+*******************************************************************************/
+MV_U32    mvCpuIfTargetWinSizeGet(MV_TARGET target)
+{
+	MV_CPU_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinSizeGet: target %d is Illigal\n", target);
+		return 0;
+	}
+
+    /* Get the winNum window */
+	if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+	{
+		mvOsPrintf("mvCpuIfTargetWinSizeGet:ERR. Getting target %d failed.\n",
+                                                                        target);
+		return 0;
+	}
+
+	/* Check if window is enabled   */
+	if (addrDecWin.enable == MV_TRUE)
+    {
+		return (addrDecWin.addrWin.size);
+    }
+    else
+    {
+        return 0;		/* Window disabled. return 0 */
+    }
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinBaseLowGet - Get CPU target address window base low
+*
+* DESCRIPTION:
+*       CPU-to-peripheral target address window base is constructed of
+*       two parts: Low and high.
+*		This function gets the CPU peripheral target low base address.
+*
+* INPUT:
+*       target - Peripheral target enumerator
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit low base address.
+*
+*******************************************************************************/
+MV_U32   mvCpuIfTargetWinBaseLowGet(MV_TARGET target)
+{
+	MV_CPU_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinBaseLowGet: target %d is Illigal\n", target);
+		return 0xffffffff;
+	}
+
+    /* Get the target window */
+	if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+	{
+		mvOsPrintf("mvCpuIfTargetWinBaseLowGet:ERR. Getting target %d failed.\n",
+                                                                        target);
+		return 0xffffffff;
+	}
+
+	if (MV_FALSE == addrDecWin.enable)
+	{
+		return 0xffffffff;
+	}
+	return (addrDecWin.addrWin.baseLow);
+}
+
+/*******************************************************************************
+* mvCpuIfTargetWinBaseHighGet - Get CPU target address window base high
+*
+* DESCRIPTION:
+*       CPU-to-peripheral target address window base is constructed of
+*       two parts: Low and high.
+*		This function gets the CPU peripheral target high base address.
+*
+* INPUT:
+*       target - Peripheral target enumerator
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit high base address.
+*
+*******************************************************************************/
+MV_U32    mvCpuIfTargetWinBaseHighGet(MV_TARGET target)
+{
+	MV_CPU_DEC_WIN addrDecWin;
+
+	target = MV_CHANGE_BOOT_CS(target);
+
+	/* Check parameters */
+	if (target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvCpuIfTargetWinBaseLowGet: target %d is Illigal\n", target);
+		return 0xffffffff;
+	}
+
+    /* Get the target window */
+	if (MV_OK != mvCpuIfTargetWinGet(target, &addrDecWin))
+	{
+		mvOsPrintf("mvCpuIfTargetWinBaseHighGet:ERR. Getting target %d failed.\n",
+                                                                        target);
+		return 0xffffffff;
+	}
+
+	if (MV_FALSE == addrDecWin.enable)
+	{
+		return 0;
+	}
+
+	return (addrDecWin.addrWin.baseHigh);
+}
+
+#if defined(MV_INCLUDE_PEX)
+/*******************************************************************************
+* mvCpuIfPexRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       pexTarget   - Peripheral target enumerator. Must be a PEX target.
+*       pAddrDecWin - CPU target window information data structure.
+*                     Note that caller has to fill in the base field only. The
+*                     size field is ignored.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if target is not a PEX one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPexRemap(MV_TARGET pexTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+	MV_U32 winNum;
+
+	/* Check parameters */
+
+	if (mvCtrlPexMaxIfGet() > 1)
+	{
+		if ((!MV_TARGET_IS_PEX1(pexTarget))&&(!MV_TARGET_IS_PEX0(pexTarget)))
+		{
+			mvOsPrintf("mvCpuIfPexRemap: target %d is Illigal\n",pexTarget);
+			return 0xffffffff;
+		}
+
+	}
+	else
+	{
+		if (!MV_TARGET_IS_PEX0(pexTarget))
+		{
+			mvOsPrintf("mvCpuIfPexRemap: target %d is Illigal\n",pexTarget);
+			return 0xffffffff;
+		}
+
+	}
+
+	/* get the Window number associated with this target */
+	winNum = mvAhbToMbusWinTargetGet(pexTarget);
+
+	if (winNum >= MAX_AHB_TO_MBUS_WINS)
+	{
+		mvOsPrintf("mvCpuIfPexRemap: mvAhbToMbusWinTargetGet Failed\n");
+		return 0xffffffff;
+
+	}
+
+	return mvAhbToMbusWinRemap(winNum , pAddrDecWin);
+}
+
+#endif
+
+#if defined(MV_INCLUDE_PCI)
+/*******************************************************************************
+* mvCpuIfPciRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       pciTarget   - Peripheral target enumerator. Must be a PCI target.
+*       pAddrDecWin - CPU target window information data structure.
+*                     Note that caller has to fill in the base field only. The
+*                     size field is ignored.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if target is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPciRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+	MV_U32 winNum;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_PCI(pciTarget))
+	{
+		mvOsPrintf("mvCpuIfPciRemap: target %d is Illigal\n",pciTarget);
+		return 0xffffffff;
+	}
+
+	/* get the Window number associated with this target */
+	winNum = mvAhbToMbusWinTargetGet(pciTarget);
+
+	if (winNum >= MAX_AHB_TO_MBUS_WINS)
+	{
+		mvOsPrintf("mvCpuIfPciRemap: mvAhbToMbusWinTargetGet Failed\n");
+		return 0xffffffff;
+
+	}
+
+	return mvAhbToMbusWinRemap(winNum , pAddrDecWin);
+}
+#endif /* MV_INCLUDE_PCI */
+
+
+/*******************************************************************************
+* mvCpuIfPciIfRemap - Set CPU remap register for address windows.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       pciTarget   - Peripheral target enumerator. Must be a PCI target.
+*       pAddrDecWin - CPU target window information data structure.
+*                     Note that caller has to fill in the base field only. The
+*                     size field is ignored.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if target is not a PCI one, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_U32 mvCpuIfPciIfRemap(MV_TARGET pciIfTarget, MV_ADDR_WIN *pAddrDecWin)
+{
+#if defined(MV_INCLUDE_PEX)
+	if (MV_TARGET_IS_PEX(pciIfTarget))
+	{
+		return mvCpuIfPexRemap(pciIfTarget,pAddrDecWin);
+	}
+#endif
+#if defined(MV_INCLUDE_PCI)
+
+	if (MV_TARGET_IS_PCI(pciIfTarget))
+	{
+		return mvCpuIfPciRemap(pciIfTarget,pAddrDecWin);
+	}
+#endif
+	return 0;
+}
+
+
+
+/*******************************************************************************
+* mvCpuIfTargetOfBaseAddressGet - Get the target according to base address
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       baseAddress -  base address to be checked
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       the target number that baseAddress belongs to or MAX_TARGETS is not
+*       found
+*
+*******************************************************************************/
+
+MV_TARGET mvCpuIfTargetOfBaseAddressGet(MV_U32 baseAddress)
+{
+	MV_CPU_DEC_WIN win;
+	MV_U32 target;
+
+	for( target = 0; target < MAX_TARGETS; target++ )
+	{
+		if( mvCpuIfTargetWinGet( target, &win ) == MV_OK )
+		{
+			if( win.enable )
+			{
+				if ((baseAddress >= win.addrWin.baseLow) &&
+					(baseAddress < win.addrWin.baseLow + win.addrWin.size)) break;
+            }
+		}
+		else return MAX_TARGETS;
+
+	}
+
+	return target;
+}
+/*******************************************************************************
+* cpuTargetWinOverlap - Detect CPU address decode windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case CPU address decode
+*       windows overlapps.
+*       This function detects CPU address decode windows overlapping of a
+*       specified target. The function does not check the target itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       target      - Peripheral target enumerator.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlaps current address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL cpuTargetWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32 			targetNum;
+    MV_CPU_DEC_WIN 	addrDecWin;
+	MV_STATUS		status;
+
+
+	for(targetNum = 0; targetNum < MAX_TARGETS; targetNum++)
+    {
+#if defined(MV_RUN_FROM_FLASH)
+		if(MV_TARGET_IS_AS_BOOT(target))
+		{
+			if (MV_CHANGE_BOOT_CS(targetNum) == target)
+				continue;
+		}
+#endif /* MV_RUN_FROM_FLASH */
+
+		/* don't check our target or illegal targets */
+        if (targetNum == target)
+        {
+            continue;
+        }
+
+		/* Get window parameters 	*/
+		status = mvCpuIfTargetWinGet(targetNum, &addrDecWin);
+        if(MV_NO_SUCH == status)
+        {
+            continue;
+        }
+		if(MV_OK != status)
+		{
+			DB(mvOsPrintf("cpuTargetWinOverlap: ERR. TargetWinGet failed\n"));
+            return MV_TRUE;
+		}
+
+		/* Do not check disabled windows	*/
+		if (MV_FALSE == addrDecWin.enable)
+		{
+			continue;
+		}
+
+        if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+		{
+			DB(mvOsPrintf(
+			"cpuTargetWinOverlap: Required target %d overlap current %d\n",
+								target, targetNum));
+			return MV_TRUE;
+		}
+    }
+
+	return MV_FALSE;
+
+}
+
+/*******************************************************************************
+* mvCpuIfAddDecShow - Print the CPU address decode map.
+*
+* DESCRIPTION:
+*		This function print the CPU address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvCpuIfAddDecShow(MV_VOID)
+{
+	MV_CPU_DEC_WIN win;
+	MV_U32 target;
+	mvOsOutput( "\n" );
+	mvOsOutput( "CPU Interface\n" );
+	mvOsOutput( "-------------\n" );
+
+	for( target = 0; target < MAX_TARGETS; target++ )
+	{
+
+		memset( &win, 0, sizeof(MV_CPU_DEC_WIN) );
+
+		mvOsOutput( "%s ",mvCtrlTargetNameGet(target));
+		mvOsOutput( "...." );
+
+		if( mvCpuIfTargetWinGet( target, &win ) == MV_OK )
+		{
+			if( win.enable )
+			{
+				mvOsOutput( "base %08x, ", win.addrWin.baseLow );
+				mvSizePrint( win.addrWin.size );
+				mvOsOutput( "\n" );
+
+            }
+			else
+				mvOsOutput( "disable\n" );
+		}
+		else if( mvCpuIfTargetWinGet( target, &win ) == MV_NO_SUCH )
+		{
+				mvOsOutput( "no such\n" );
+		}
+	}
+}
+
+/*******************************************************************************
+* mvCpuIfEnablePex - Enable PCI Express.
+*
+* DESCRIPTION:
+*		This function Enable PCI Express.
+*
+* INPUT:
+*       pexIf   -  PEX interface number.
+*       pexType -  MV_PEX_ROOT_COMPLEX - root complex device
+*		   MV_PEX_END_POINT - end point device
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+#if defined(MV_INCLUDE_PEX)
+MV_VOID mvCpuIfEnablePex(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+	/* Set pex mode incase S@R not exist */
+	if( pexType == MV_PEX_END_POINT)
+	{
+		MV_REG_BIT_RESET(PEX_CTRL_REG(pexIf),PXCR_DEV_TYPE_CTRL_MASK);
+		/* Change pex mode in capability reg */
+		MV_REG_BIT_RESET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_CAPABILITY_REG), BIT22);
+		MV_REG_BIT_SET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_CAPABILITY_REG), BIT20);
+
+	}
+	else
+	{
+		MV_REG_BIT_SET(PEX_CTRL_REG(pexIf),PXCR_DEV_TYPE_CTRL_MASK);
+	}
+
+	/* CPU config register Pex enable */
+	MV_REG_BIT_SET(CPU_CTRL_STAT_REG,CCSR_PCI_ACCESS_MASK);
+}
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h
new file mode 100644
index 000000000000..1c5a8e4a4458
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIf.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuIfh
+#define __INCmvCpuIfh
+
+/* includes */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "ctrlEnv/sys/mvAhbToMbus.h"
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvSysDram.h"
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#endif
+
+/* defines  */
+
+/* typedefs */
+/* This structure describes CPU interface address decode window               */
+typedef struct _mvCpuIfDecWin
+{
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_U32		  winNum;	  /* Window Number in the AHB To Mbus bridge */
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+}MV_CPU_DEC_WIN;
+
+
+
+/* mvCpuIfLib.h API list */
+
+/* mvCpuIfLib.h API list */
+
+MV_STATUS mvCpuIfInit(MV_CPU_DEC_WIN *cpuAddrWinMap);
+MV_STATUS mvCpuIfTargetWinSet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvCpuIfTargetWinGet(MV_TARGET target, MV_CPU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvCpuIfTargetWinEnable(MV_TARGET target,MV_BOOL enable);
+MV_U32    mvCpuIfTargetWinSizeGet(MV_TARGET target);
+MV_U32    mvCpuIfTargetWinBaseLowGet(MV_TARGET target);
+MV_U32    mvCpuIfTargetWinBaseHighGet(MV_TARGET target);
+MV_TARGET mvCpuIfTargetOfBaseAddressGet(MV_U32 baseAddress);
+#if defined(MV_INCLUDE_PEX)
+MV_U32    mvCpuIfPexRemap(MV_TARGET pexTarget, MV_ADDR_WIN *pAddrDecWin);
+MV_VOID   mvCpuIfEnablePex(MV_U32 pexIf, MV_PEX_TYPE pexType);
+#endif
+#if defined(MV_INCLUDE_PCI)
+MV_U32    mvCpuIfPciRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin);
+#endif
+MV_U32 	  mvCpuIfPciIfRemap(MV_TARGET pciTarget, MV_ADDR_WIN *pAddrDecWin);
+
+MV_VOID   mvCpuIfAddDecShow(MV_VOID);
+
+#if defined(MV88F6281)
+MV_STATUS mvCpuIfBridgeReorderWAInit(void);
+#endif
+
+#endif /* __INCmvCpuIfh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S
new file mode 100644
index 000000000000..b7efda02c34a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfInit.S
@@ -0,0 +1,163 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvDeviceId.h"
+#include "mvCtrlEnvRegs.h"
+#include "mvCpuIfRegs.h"
+#include "mvCtrlEnvAsm.h"
+
+
+/*******************************************************************************
+* mvCpuIfPreInit - Make early initialization of CPU interface.
+*
+* DESCRIPTION:
+*       The function will initialize the CPU interface parameters that must
+*       be initialize before any BUS activity towards the DDR interface,
+*       which means it must be executed from ROM. Because of that, the function
+*       is implemented in assembly code.
+*       The function configure the following CPU config register parameters:
+*       1) CPU2MbusLTickDrv
+*       2) CPU2MbusLTickSample.
+*       NOTE: This function must be called AFTER the internal register
+*       base is modified to INTER_REGS_BASE.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*       r11 holds return function address.
+*******************************************************************************/
+#define MV88F6281_PCKG_OPT	2
+#define MV88F6192_PCKG_OPT	1
+#define MV88F6180_PCKG_OPT	0
+
+	.globl _mvCpuIfPreInit
+_mvCpuIfPreInit:
+
+        mov     r11, LR     		/* Save link register */
+
+	/* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r4, r5);
+
+        /* goto calcConfigReg if device is 6281 */
+        ldr     r5, =MV88F6281_PCKG_OPT
+        cmp     r4, r5
+        beq     calcConfigReg
+
+        /* goto calcConfigReg if device is 6192/6190 */
+        ldr     r5, =MV88F6192_PCKG_OPT
+        cmp     r4, r5
+        beq     calcConfigReg
+
+        /* Else 6180 */
+        /* Get the "sample on reset" register */
+	MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+        ldr    r5, =MSAR_CPUCLCK_MASK_6180
+        and    r5, r4, r5
+	    mov    r5, r5, lsr #MSAR_CPUCLCK_OFFS_6180
+
+        ldr    r4, =CPU_2_MBUSL_DDR_CLK_1x3
+        cmp    r5, #CPU_2_DDR_CLK_1x3_1
+        beq    setConfigReg
+
+        ldr    r4, =CPU_2_MBUSL_DDR_CLK_1x4
+        cmp    r5, #CPU_2_DDR_CLK_1x4_1
+        beq    setConfigReg
+        b    setConfigReg
+
+calcConfigReg:
+        /* Get the "sample on reset" register */
+	    MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+        ldr    r5, =MSAR_DDRCLCK_RTIO_MASK
+        and    r5, r4, r5
+	    mov    r5, r5, lsr #MSAR_DDRCLCK_RTIO_OFFS
+
+        ldr    r4, =CPU_2_MBUSL_DDR_CLK_1x3
+        cmp    r5, #CPU_2_DDR_CLK_1x3
+        beq    setConfigReg
+
+        ldr    r4, =CPU_2_MBUSL_DDR_CLK_1x4
+        cmp    r5, #CPU_2_DDR_CLK_1x4
+        beq    setConfigReg
+
+        /* Else */
+        ldr    r4, =0
+
+setConfigReg:
+        /* Read CPU Config register */
+        MV_REG_READ_ASM (r7, r5, CPU_CONFIG_REG)
+        ldr    r5, =~(CCR_CPU_2_MBUSL_TICK_DRV_MASK | CCR_CPU_2_MBUSL_TICK_SMPL_MASK)
+        and    r7, r7, r5       /* Clear register fields */
+        orr    r7, r7, r4       /* Set the values according to the findings */
+        MV_REG_WRITE_ASM (r7, r5, CPU_CONFIG_REG)
+
+done:
+        mov     PC, r11         /* r11 is saved link register */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h
new file mode 100644
index 000000000000..6830bb0407a1
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvCpuIfRegs.h
@@ -0,0 +1,303 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvCpuIfRegsh
+#define __INCmvCpuIfRegsh
+
+/****************************************/
+/* ARM Control and Status Registers Map */
+/****************************************/
+
+#define CPU_CONFIG_REG				0x20100
+#define CPU_CTRL_STAT_REG			0x20104
+#define CPU_RSTOUTN_MASK_REG			0x20108
+#define CPU_SYS_SOFT_RST_REG			0x2010C
+#define CPU_AHB_MBUS_CAUSE_INT_REG		0x20110
+#define CPU_AHB_MBUS_MASK_INT_REG		0x20114
+#define CPU_FTDLL_CONFIG_REG			0x20120
+#define CPU_L2_CONFIG_REG			0x20128
+
+
+
+/* ARM Configuration register */
+/* CPU_CONFIG_REG (CCR) */
+
+
+/* Reset vector location */
+#define CCR_VEC_INIT_LOC_OFFS			1
+#define CCR_VEC_INIT_LOC_MASK			BIT1
+/* reset at 0x00000000 */
+#define CCR_VEC_INIT_LOC_0000			(0 << CCR_VEC_INIT_LOC_OFFS)
+/* reset at 0xFFFF0000 */
+#define CCR_VEC_INIT_LOC_FF00			(1 << CCR_VEC_INIT_LOC_OFFS)
+
+
+#define CCR_AHB_ERROR_PROP_OFFS			2
+#define CCR_AHB_ERROR_PROP_MASK			BIT2
+/* Erros are not propogated to AHB */
+#define CCR_AHB_ERROR_PROP_NO_INDICATE		(0 << CCR_AHB_ERROR_PROP_OFFS)
+/* Erros are propogated to AHB */
+#define CCR_AHB_ERROR_PROP_INDICATE		(1 << CCR_AHB_ERROR_PROP_OFFS)
+
+
+#define CCR_ENDIAN_INIT_OFFS			3
+#define CCR_ENDIAN_INIT_MASK			BIT3
+#define CCR_ENDIAN_INIT_LITTLE			(0 << CCR_ENDIAN_INIT_OFFS)
+#define CCR_ENDIAN_INIT_BIG			(1 << CCR_ENDIAN_INIT_OFFS)
+
+
+#define CCR_INCR_EN_OFFS			4
+#define CCR_INCR_EN_MASK			BIT4
+#define CCR_INCR_EN				BIT4
+
+
+#define CCR_NCB_BLOCKING_OFFS			5
+#define CCR_NCB_BLOCKING_MASK			(1 << CCR_NCB_BLOCKING_OFFS)
+#define CCR_NCB_BLOCKING_NON			(0 << CCR_NCB_BLOCKING_OFFS)
+#define CCR_NCB_BLOCKING_EN			(1 << CCR_NCB_BLOCKING_OFFS)
+
+#define CCR_CPU_2_MBUSL_TICK_DRV_OFFS		8
+#define CCR_CPU_2_MBUSL_TICK_DRV_MASK		(0xF << CCR_CPU_2_MBUSL_TICK_DRV_OFFS)
+#define CCR_CPU_2_MBUSL_TICK_SMPL_OFFS		12
+#define CCR_CPU_2_MBUSL_TICK_SMPL_MASK		(0xF << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS)
+#define CCR_ICACH_PREF_BUF_ENABLE		BIT16
+#define CCR_DCACH_PREF_BUF_ENABLE		BIT17
+
+/* Ratio options for CPU to DDR for 6281/6192/6190 */
+#define CPU_2_DDR_CLK_1x3	    4
+#define CPU_2_DDR_CLK_1x4	    6
+
+/* Ratio options for CPU to DDR for 6281 only */
+#define CPU_2_DDR_CLK_2x9	    7
+#define CPU_2_DDR_CLK_1x5	    8
+#define CPU_2_DDR_CLK_1x6	    9
+
+/* Ratio options for CPU to DDR for 6180 only */
+#define CPU_2_DDR_CLK_1x3_1	    0x5
+#define CPU_2_DDR_CLK_1x4_1	    0x6
+
+/* Default values for CPU to Mbus-L DDR Interface Tick Driver and 	*/
+/* CPU to Mbus-L Tick Sample fields in CPU config register		*/
+
+#define TICK_DRV_1x1	0
+#define TICK_DRV_1x2	0
+#define TICK_DRV_1x3	1
+#define TICK_DRV_1x4	2
+#define TICK_SMPL_1x1	0
+#define TICK_SMPL_1x2	1
+#define TICK_SMPL_1x3	0
+#define TICK_SMPL_1x4	0
+
+#define CPU_2_MBUSL_DDR_CLK_1x2						\
+		 ((TICK_DRV_1x2  << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | 	\
+		  (TICK_SMPL_1x2 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+#define CPU_2_MBUSL_DDR_CLK_1x3						\
+		 ((TICK_DRV_1x3  << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | 	\
+		  (TICK_SMPL_1x3 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+#define CPU_2_MBUSL_DDR_CLK_1x4						\
+		 ((TICK_DRV_1x4  << CCR_CPU_2_MBUSL_TICK_DRV_OFFS) | 	\
+		  (TICK_SMPL_1x4 << CCR_CPU_2_MBUSL_TICK_SMPL_OFFS))
+
+/* ARM Control and Status register */
+/* CPU_CTRL_STAT_REG (CCSR) */
+
+
+/*
+This is used to block PCI express\PCI from access Socrates/Feroceon GP
+while ARM boot is still in progress
+*/
+
+#define CCSR_PCI_ACCESS_OFFS			0
+#define CCSR_PCI_ACCESS_MASK			BIT0
+#define CCSR_PCI_ACCESS_ENABLE			(0 << CCSR_PCI_ACCESS_OFFS)
+#define CCSR_PCI_ACCESS_DISBALE			(1 << CCSR_PCI_ACCESS_OFFS)
+
+#define CCSR_ARM_RESET				BIT1
+#define CCSR_SELF_INT				BIT2
+#define CCSR_BIG_ENDIAN				BIT15
+
+
+/* RSTOUTn Mask Register */
+/* CPU_RSTOUTN_MASK_REG (CRMR) */
+
+#define CRMR_PEX_RST_OUT_OFFS			0
+#define CRMR_PEX_RST_OUT_MASK			BIT0
+#define CRMR_PEX_RST_OUT_ENABLE			(1 << CRMR_PEX_RST_OUT_OFFS)
+#define CRMR_PEX_RST_OUT_DISABLE		(0 << CRMR_PEX_RST_OUT_OFFS)
+
+#define CRMR_WD_RST_OUT_OFFS			1
+#define CRMR_WD_RST_OUT_MASK			BIT1
+#define CRMR_WD_RST_OUT_ENABLE			(1 << CRMR_WD_RST_OUT_OFFS)
+#define CRMR_WD_RST_OUT_DISBALE			(0 << CRMR_WD_RST_OUT_OFFS)
+
+#define CRMR_SOFT_RST_OUT_OFFS			2
+#define CRMR_SOFT_RST_OUT_MASK			BIT2
+#define CRMR_SOFT_RST_OUT_ENABLE		(1 << CRMR_SOFT_RST_OUT_OFFS)
+#define CRMR_SOFT_RST_OUT_DISBALE		(0 << CRMR_SOFT_RST_OUT_OFFS)
+
+/* System Software Reset Register */
+/* CPU_SYS_SOFT_RST_REG (CSSRR) */
+
+#define CSSRR_SYSTEM_SOFT_RST			BIT0
+
+/* AHB to Mbus Bridge Interrupt Cause Register*/
+/* CPU_AHB_MBUS_CAUSE_INT_REG (CAMCIR) */
+
+#define CAMCIR_ARM_SELF_INT			BIT0
+#define CAMCIR_ARM_TIMER0_INT_REQ		BIT1
+#define CAMCIR_ARM_TIMER1_INT_REQ		BIT2
+#define CAMCIR_ARM_WD_TIMER_INT_REQ		BIT3
+
+
+/* AHB to Mbus Bridge Interrupt Mask Register*/
+/* CPU_AHB_MBUS_MASK_INT_REG (CAMMIR) */
+
+#define CAMCIR_ARM_SELF_INT_OFFS		0
+#define CAMCIR_ARM_SELF_INT_MASK		BIT0
+#define CAMCIR_ARM_SELF_INT_EN			(1 << CAMCIR_ARM_SELF_INT_OFFS)
+#define CAMCIR_ARM_SELF_INT_DIS			(0 << CAMCIR_ARM_SELF_INT_OFFS)
+
+
+#define CAMCIR_ARM_TIMER0_INT_REQ_OFFS		1
+#define CAMCIR_ARM_TIMER0_INT_REQ_MASK		BIT1
+#define CAMCIR_ARM_TIMER0_INT_REQ_EN		(1 << CAMCIR_ARM_TIMER0_INT_REQ_OFFS)
+#define CAMCIR_ARM_TIMER0_INT_REQ_DIS		(0 << CAMCIR_ARM_TIMER0_INT_REQ_OFFS)
+
+#define CAMCIR_ARM_TIMER1_INT_REQ_OFFS		2
+#define CAMCIR_ARM_TIMER1_INT_REQ_MASK		BIT2
+#define CAMCIR_ARM_TIMER1_INT_REQ_EN		(1 << CAMCIR_ARM_TIMER1_INT_REQ_OFFS)
+#define CAMCIR_ARM_TIMER1_INT_REQ_DIS		(0 << CAMCIR_ARM_TIMER1_INT_REQ_OFFS)
+
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS 	3
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_MASK 	BIT3
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_EN	 	(1 << CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS)
+#define CAMCIR_ARM_WD_TIMER_INT_REQ_DIS	 	(0 << CAMCIR_ARM_WD_TIMER_INT_REQ_OFFS)
+
+/* CPU FTDLL Config register (CFCR) fields */
+#define CFCR_FTDLL_ICACHE_TAG_OFFS		0
+#define CFCR_FTDLL_ICACHE_TAG_MASK		(0x7F << CFCR_FTDLL_ICACHE_TAG_OFFS)
+#define CFCR_FTDLL_DCACHE_TAG_OFFS		8
+#define CFCR_FTDLL_DCACHE_TAG_MASK		(0x7F << CFCR_FTDLL_DCACHE_TAG_OFFS)
+#define CFCR_FTDLL_OVERWRITE_ENABLE		(1 << 15)
+/* For Orion 2 D2 only */
+#define CFCR_MRVL_CPU_ID_OFFS			16
+#define CFCR_MRVL_CPU_ID_MASK			(0x1 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_ARM_CPU_ID				(0x0 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_MRVL_CPU_ID			(0x1 << CFCR_MRVL_CPU_ID_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_OFFS		7
+#define CFCR_VFP_SUB_ARC_NUM_MASK		(0x1 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_1			(0x0 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+#define CFCR_VFP_SUB_ARC_NUM_2			(0x1 << CFCR_VFP_SUB_ARC_NUM_OFFS)
+
+/* CPU_L2_CONFIG_REG fields */
+#ifdef MV_CPU_LE
+#define CL2CR_L2_ECC_EN_OFFS			2
+#define CL2CR_L2_WT_MODE_OFFS			4
+#else
+#define CL2CR_L2_ECC_EN_OFFS			26
+#define CL2CR_L2_WT_MODE_OFFS			28
+#endif
+
+#define CL2CR_L2_ECC_EN_MASK			(1 << CL2CR_L2_ECC_EN_OFFS)
+#define CL2CR_L2_WT_MODE_MASK			(1 << CL2CR_L2_WT_MODE_OFFS)
+
+/*******************************************/
+/* Main Interrupt Controller Registers Map */
+/*******************************************/
+
+#define CPU_MAIN_INT_CAUSE_REG			0x20200
+#define CPU_MAIN_IRQ_MASK_REG			0x20204
+#define CPU_MAIN_FIQ_MASK_REG			0x20208
+#define CPU_ENPOINT_MASK_REG			0x2020C
+#define CPU_MAIN_INT_CAUSE_HIGH_REG		0x20210
+#define CPU_MAIN_IRQ_MASK_HIGH_REG		0x20214
+#define CPU_MAIN_FIQ_MASK_HIGH_REG		0x20218
+#define CPU_ENPOINT_MASK_HIGH_REG		0x2021C
+
+
+/*******************************************/
+/* ARM Doorbell Registers Map		   */
+/*******************************************/
+
+#define CPU_HOST_TO_ARM_DRBL_REG		0x20400
+#define CPU_HOST_TO_ARM_MASK_REG		0x20404
+#define CPU_ARM_TO_HOST_DRBL_REG		0x20408
+#define CPU_ARM_TO_HOST_MASK_REG		0x2040C
+
+
+
+/* CPU control register map */
+/* Set bits means value is about to change according to new value */
+#define CPU_CONFIG_DEFAULT_MASK         	(CCR_VEC_INIT_LOC_MASK  | CCR_AHB_ERROR_PROP_MASK)
+
+#define CPU_CONFIG_DEFAULT                      (CCR_VEC_INIT_LOC_FF00)
+
+/* CPU Control and status defaults */
+#define CPU_CTRL_STAT_DEFAULT_MASK              (CCSR_PCI_ACCESS_MASK)
+
+
+#define CPU_CTRL_STAT_DEFAULT                   (CCSR_PCI_ACCESS_ENABLE)
+
+#endif /* __INCmvCpuIfRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c
new file mode 100644
index 000000000000..96d5956760eb
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.c
@@ -0,0 +1,323 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvSysAudio.h"
+
+/*******************************************************************************
+* mvAudioWinSet - Set AUDIO target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the AUDIO will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - AUDIO target address decode window number.
+*       pAddrDecWin - AUDIO target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+MV_STATUS mvAudioWinSet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin)
+{
+    MV_TARGET_ATTRIB    targetAttribs;
+    MV_DEC_REGS         decRegs;
+
+    /* Parameter checking   */
+    if (winNum >= MV_AUDIO_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* check if address is aligned to the size */
+    if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+    {
+		mvOsPrintf("mvAudioWinSet:Error setting AUDIO window %d to "\
+			   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+			   winNum,
+			   mvCtrlTargetNameGet(pAddrDecWin->target),
+			   pAddrDecWin->addrWin.baseLow,
+			   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+    }
+
+    decRegs.baseReg = 0;
+    decRegs.sizeReg = 0;
+
+    if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+    {
+        mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+    /* set attributes */
+    decRegs.sizeReg &= ~MV_AUDIO_WIN_ATTR_MASK;
+    decRegs.sizeReg |= (targetAttribs.attrib << MV_AUDIO_WIN_ATTR_OFFSET);
+
+    /* set target ID */
+    decRegs.sizeReg &= ~MV_AUDIO_WIN_TARGET_MASK;
+    decRegs.sizeReg |= (targetAttribs.targetId << MV_AUDIO_WIN_TARGET_OFFSET);
+
+    if (pAddrDecWin->enable == MV_TRUE)
+    {
+        decRegs.sizeReg |= MV_AUDIO_WIN_ENABLE_MASK;
+    }
+    else
+    {
+        decRegs.sizeReg &= ~MV_AUDIO_WIN_ENABLE_MASK;
+    }
+
+    MV_REG_WRITE( MV_AUDIO_WIN_CTRL_REG(winNum), decRegs.sizeReg);
+    MV_REG_WRITE( MV_AUDIO_WIN_BASE_REG(winNum), decRegs.baseReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvAudioWinGet - Get AUDIO peripheral target address window.
+*
+* DESCRIPTION:
+*       Get AUDIO peripheral target address window.
+*
+* INPUT:
+*       winNum - AUDIO target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - AUDIO target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvAudioWinGet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS         decRegs;
+    MV_TARGET_ATTRIB    targetAttrib;
+
+    /* Parameter checking   */
+    if (winNum >= MV_AUDIO_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s : ERR. Invalid winNum %d\n",
+                    __FUNCTION__,  winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    decRegs.baseReg = MV_REG_READ( MV_AUDIO_WIN_BASE_REG(winNum) );
+    decRegs.sizeReg = MV_REG_READ( MV_AUDIO_WIN_CTRL_REG(winNum) );
+
+    if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+    {
+        mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    /* attrib and targetId */
+    targetAttrib.attrib = (decRegs.sizeReg & MV_AUDIO_WIN_ATTR_MASK) >>
+		MV_AUDIO_WIN_ATTR_OFFSET;
+    targetAttrib.targetId = (decRegs.sizeReg & MV_AUDIO_WIN_TARGET_MASK) >>
+		MV_AUDIO_WIN_TARGET_OFFSET;
+
+    pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+    /* Check if window is enabled   */
+    if(decRegs.sizeReg & MV_AUDIO_WIN_ENABLE_MASK)
+    {
+        pAddrDecWin->enable = MV_TRUE;
+    }
+    else
+    {
+        pAddrDecWin->enable = MV_FALSE;
+    }
+    return MV_OK;
+}
+/*******************************************************************************
+* mvAudioAddrDecShow - Print the AUDIO address decode map.
+*
+* DESCRIPTION:
+*		This function print the AUDIO address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvAudioAddrDecShow(MV_VOID)
+{
+
+	MV_AUDIO_DEC_WIN win;
+	int i;
+
+	if (MV_FALSE == mvCtrlPwrClckGet(AUDIO_UNIT_ID, 0))
+		return;
+
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "AUDIO:\n" );
+	mvOsOutput( "----\n" );
+
+	for( i = 0; i < MV_AUDIO_MAX_ADDR_DECODE_WIN; i++ )
+	{
+            memset( &win, 0, sizeof(MV_AUDIO_DEC_WIN) );
+
+	    mvOsOutput( "win%d - ", i );
+
+	    if( mvAudioWinGet( i, &win ) == MV_OK )
+	    {
+	        if( win.enable )
+	        {
+                    mvOsOutput( "%s base %08x, ",
+                    mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+                    mvOsOutput( "...." );
+
+                    mvSizePrint( win.addrWin.size );
+
+		    mvOsOutput( "\n" );
+                }
+		else
+		mvOsOutput( "disable\n" );
+	    }
+	}
+}
+
+
+/*******************************************************************************
+* mvAudioWinInit - Initialize the integrated AUDIO target address window.
+*
+* DESCRIPTION:
+*       Initialize the AUDIO peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvAudioInit(MV_VOID)
+{
+    int             winNum;
+    MV_AUDIO_DEC_WIN  audioWin;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    MV_U32          status;
+
+    mvAudioHalInit();
+
+    /* Initiate Audio address decode */
+
+    /* First disable all address decode windows */
+    for(winNum = 0; winNum < MV_AUDIO_MAX_ADDR_DECODE_WIN; winNum++)
+    {
+        MV_U32  regVal = MV_REG_READ(MV_AUDIO_WIN_CTRL_REG(winNum));
+        regVal &= ~MV_AUDIO_WIN_ENABLE_MASK;
+        MV_REG_WRITE(MV_AUDIO_WIN_CTRL_REG(winNum), regVal);
+    }
+
+    for(winNum = 0; winNum < MV_AUDIO_MAX_ADDR_DECODE_WIN; winNum++)
+    {
+
+		/* We will set the Window to DRAM_CS0 in default */
+		/* first get attributes from CPU If */
+		status = mvCpuIfTargetWinGet(SDRAM_CS0,
+									 &cpuAddrDecWin);
+
+		if (MV_OK != status)
+		{
+				mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+			return MV_ERROR;
+		}
+
+		if (cpuAddrDecWin.enable == MV_TRUE)
+		{
+			audioWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+			audioWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+			audioWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+			audioWin.enable           = MV_TRUE;
+			audioWin.target           = SDRAM_CS0;
+
+			if(MV_OK != mvAudioWinSet(winNum, &audioWin))
+			{
+				return MV_ERROR;
+			}
+		}
+	}
+
+    return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h
new file mode 100644
index 000000000000..f30611e6790a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysAudio.h
@@ -0,0 +1,122 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysAudioH
+#define __INCMVSysAudioH
+
+#include "mvCommon.h"
+#include "audio/mvAudio.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+/***********************************/
+/* Audio Address Decoding registers*/
+/***********************************/
+
+#define MV_AUDIO_MAX_ADDR_DECODE_WIN 		2
+#define MV_AUDIO_RECORD_WIN_NUM			0
+#define MV_AUDIO_PLAYBACK_WIN_NUM		1
+
+#define MV_AUDIO_WIN_CTRL_REG(win)        (AUDIO_REG_BASE + 0xA04 + ((win)<<3))
+#define MV_AUDIO_WIN_BASE_REG(win)        (AUDIO_REG_BASE + 0xA00 + ((win)<<3))
+
+#define MV_AUDIO_RECORD_WIN_CTRL_REG		MV_AUDIO_WIN_CTRL_REG(MV_AUDIO_RECORD_WIN_NUM)
+#define MV_AUDIO_RECORD_WIN_BASE_REG		MV_AUDIO_WIN_BASE_REG(MV_AUDIO_RECORD_WIN_NUM)
+#define MV_AUDIO_PLAYBACK_WIN_CTRL_REG		MV_AUDIO_WIN_CTRL_REG(MV_AUDIO_PLAYBACK_WIN_NUM)
+#define MV_AUDIO_PLAYBACK_WIN_BASE_REG		MV_AUDIO_WIN_BASE_REG(MV_AUDIO_PLAYBACK_WIN_NUM)
+
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_AUDIO_WIN_ENABLE_BIT               0
+#define MV_AUDIO_WIN_ENABLE_MASK              (1<<MV_AUDIO_WIN_ENABLE_BIT)
+
+#define MV_AUDIO_WIN_TARGET_OFFSET            4
+#define MV_AUDIO_WIN_TARGET_MASK              (0xF<<MV_AUDIO_WIN_TARGET_OFFSET)
+
+#define MV_AUDIO_WIN_ATTR_OFFSET              8
+#define MV_AUDIO_WIN_ATTR_MASK                (0xFF<<MV_AUDIO_WIN_ATTR_OFFSET)
+
+#define MV_AUDIO_WIN_SIZE_OFFSET              16
+#define MV_AUDIO_WIN_SIZE_MASK                (0xFFFF<<MV_AUDIO_WIN_SIZE_OFFSET)
+
+#define MV_AUDIO_WIN_BASE_OFFSET              16
+#define MV_AUDIO_WIN_BASE_MASK                (0xFFFF<<MV_AUDIO_WIN_BASE_OFFSET)
+
+
+typedef struct _mvAudioDecWin
+{
+    MV_TARGET     target;
+    MV_ADDR_WIN   addrWin;    /* An address window*/
+    MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+} MV_AUDIO_DEC_WIN;
+
+
+MV_STATUS mvAudioInit(MV_VOID);
+MV_STATUS mvAudioWinGet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAudioWinSet(MV_U32 winNum, MV_AUDIO_DEC_WIN *pAddrDecWin);
+MV_STATUS mvAudioWinInit(MV_VOID);
+MV_VOID   mvAudioAddrDecShow(MV_VOID);
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c
new file mode 100644
index 000000000000..9b50bae9e163
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.c
@@ -0,0 +1,382 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvSysCesa.h"
+
+#if (MV_CESA_VERSION >= 2)
+MV_TARGET tdmaAddrDecPrioTable[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+    SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+    SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+    SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+    SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+    PEX0_MEM,
+#endif
+
+    TBL_TERM
+};
+
+/*******************************************************************************
+* mvCesaWinGet - Get TDMA target address window.
+*
+* DESCRIPTION:
+*       Get TDMA target address window.
+*
+* INPUT:
+*       winNum - TDMA target address decode window number.
+*
+* OUTPUT:
+*       pDecWin - TDMA target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaWinGet(MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+    MV_DEC_WIN_PARAMS   winParam;
+    MV_U32              sizeReg, baseReg;
+
+    /* Parameter checking   */
+    if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN)
+    {
+        mvOsPrintf("%s : ERR. Invalid winNum %d\n",
+                    __FUNCTION__, winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    baseReg = MV_REG_READ( MV_CESA_TDMA_BASE_ADDR_REG(winNum) );
+    sizeReg = MV_REG_READ( MV_CESA_TDMA_WIN_CTRL_REG(winNum) );
+
+   /* Check if window is enabled   */
+    if(sizeReg & MV_CESA_TDMA_WIN_ENABLE_MASK)
+    {
+        pDecWin->enable = MV_TRUE;
+
+        /* Extract window parameters from registers */
+        winParam.targetId = (sizeReg & MV_CESA_TDMA_WIN_TARGET_MASK) >> MV_CESA_TDMA_WIN_TARGET_OFFSET;
+        winParam.attrib   = (sizeReg & MV_CESA_TDMA_WIN_ATTR_MASK) >> MV_CESA_TDMA_WIN_ATTR_OFFSET;
+        winParam.size     = (sizeReg & MV_CESA_TDMA_WIN_SIZE_MASK) >> MV_CESA_TDMA_WIN_SIZE_OFFSET;
+        winParam.baseAddr = (baseReg & MV_CESA_TDMA_WIN_BASE_MASK);
+
+        /* Translate the decode window parameters to address decode struct */
+        if (MV_OK != mvCtrlParamsToAddrDec(&winParam, pDecWin))
+        {
+            mvOsPrintf("Failed to translate register parameters to CESA address" \
+                       " decode window structure\n");
+            return MV_ERROR;
+        }
+    }
+    else
+    {
+        pDecWin->enable = MV_FALSE;
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* cesaWinOverlapDetect - Detect CESA TDMA address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case TDMA address decode
+*       windows overlapps.
+*       This function detects TDMA address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE     - if the given address window overlap current address
+*                   decode map,
+*       MV_FALSE    - otherwise, MV_ERROR if reading invalid data
+*                   from registers.
+*
+*******************************************************************************/
+static MV_STATUS cesaWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32          winNumIndex;
+    MV_DEC_WIN      addrDecWin;
+
+    for(winNumIndex=0; winNumIndex<MV_CESA_TDMA_ADDR_DEC_WIN; winNumIndex++)
+    {
+        /* Do not check window itself       */
+        if (winNumIndex == winNum)
+        {
+            continue;
+        }
+
+        /* Get window parameters    */
+        if (MV_OK != mvCesaWinGet(winNumIndex, &addrDecWin))
+        {
+            mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+            return MV_ERROR;
+        }
+
+        /* Do not check disabled windows    */
+        if(addrDecWin.enable == MV_FALSE)
+        {
+            continue;
+        }
+
+        if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+        {
+            return MV_TRUE;
+        }
+    }
+    return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvCesaTdmaWinSet - Set CESA TDMA target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the CESA TDMA will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - CESA TDMA target address decode window number.
+*       pAddrDecWin - CESA TDMA target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR        - if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM    - if base address is invalid parameter or target is
+*                       unknown.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaTdmaWinSet(MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+    MV_DEC_WIN_PARAMS   winParams;
+    MV_U32              sizeReg, baseReg;
+
+    /* Parameter checking   */
+    if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN)
+    {
+        mvOsPrintf("mvCesaTdmaWinSet: ERR. Invalid win num %d\n",winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows     */
+    if (MV_TRUE == cesaWinOverlapDetect(winNum, &pDecWin->addrWin))
+    {
+        mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+        return MV_ERROR;
+    }
+
+    /* check if address is aligned to the size */
+    if(MV_IS_NOT_ALIGN(pDecWin->addrWin.baseLow, pDecWin->addrWin.size))
+    {
+        mvOsPrintf("mvCesaTdmaWinSet: Error setting CESA TDMA window %d to "\
+                   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+                   winNum,
+                   mvCtrlTargetNameGet(pDecWin->target),
+                   pDecWin->addrWin.baseLow,
+                   pDecWin->addrWin.size);
+        return MV_ERROR;
+    }
+
+    if(MV_OK != mvCtrlAddrDecToParams(pDecWin, &winParams))
+    {
+        mvOsPrintf("%s: mvCtrlAddrDecToParams Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    /* set Size, Attributes and TargetID */
+    sizeReg = (((winParams.targetId << MV_CESA_TDMA_WIN_TARGET_OFFSET) & MV_CESA_TDMA_WIN_TARGET_MASK) |
+               ((winParams.attrib   << MV_CESA_TDMA_WIN_ATTR_OFFSET)   & MV_CESA_TDMA_WIN_ATTR_MASK)   |
+               ((winParams.size << MV_CESA_TDMA_WIN_SIZE_OFFSET) & MV_CESA_TDMA_WIN_SIZE_MASK));
+
+    if (pDecWin->enable == MV_TRUE)
+    {
+        sizeReg |= MV_CESA_TDMA_WIN_ENABLE_MASK;
+    }
+    else
+    {
+        sizeReg &= ~MV_CESA_TDMA_WIN_ENABLE_MASK;
+    }
+
+    /* Update Base value  */
+    baseReg = (winParams.baseAddr & MV_CESA_TDMA_WIN_BASE_MASK);
+
+    MV_REG_WRITE( MV_CESA_TDMA_WIN_CTRL_REG(winNum), sizeReg);
+    MV_REG_WRITE( MV_CESA_TDMA_BASE_ADDR_REG(winNum), baseReg);
+
+    return MV_OK;
+}
+
+
+static MV_STATUS   mvCesaTdmaAddrDecInit (void)
+{
+    MV_U32          winNum;
+    MV_STATUS       status;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    MV_DEC_WIN      cesaWin;
+    MV_U32          winPrioIndex = 0;
+
+    /* First disable all address decode windows */
+    for(winNum=0; winNum<MV_CESA_TDMA_ADDR_DEC_WIN; winNum++)
+    {
+        MV_REG_BIT_RESET(MV_CESA_TDMA_WIN_CTRL_REG(winNum), MV_CESA_TDMA_WIN_ENABLE_MASK);
+    }
+
+    /* Go through all windows in user table until table terminator      */
+    winNum = 0;
+    while( (tdmaAddrDecPrioTable[winPrioIndex] != TBL_TERM) &&
+           (winNum < MV_CESA_TDMA_ADDR_DEC_WIN) )    {
+
+        /* first get attributes from CPU If */
+        status = mvCpuIfTargetWinGet(tdmaAddrDecPrioTable[winPrioIndex],
+                                     &cpuAddrDecWin);
+        if(MV_NO_SUCH == status){
+	    winPrioIndex++;
+            continue;
+	}
+
+        if (MV_OK != status)
+        {
+            mvOsPrintf("cesaInit: TargetWinGet failed. winNum=%d, winIdx=%d, target=%d, status=0x%x\n",
+                        winNum, winPrioIndex, tdmaAddrDecPrioTable[winPrioIndex], status);
+            return MV_ERROR;
+        }
+        if (cpuAddrDecWin.enable == MV_TRUE)
+        {
+            cesaWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+            cesaWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+            cesaWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+            cesaWin.enable           = MV_TRUE;
+            cesaWin.target           = tdmaAddrDecPrioTable[winPrioIndex];
+
+#if defined(MV646xx)
+            /* Get the default attributes for that target window */
+            mvCtrlDefAttribGet(cesaWin.target, &cesaWin.addrWinAttr);
+#endif /* MV646xx */
+
+            if(MV_OK != mvCesaTdmaWinSet(winNum, &cesaWin))
+            {
+                mvOsPrintf("mvCesaTdmaWinSet FAILED: winNum=%d\n",
+                           winNum);
+                return MV_ERROR;
+            }
+            winNum++;
+        }
+        winPrioIndex++;
+    }
+    return MV_OK;
+}
+#endif /* MV_CESA_VERSION >= 2 */
+
+
+
+
+MV_STATUS mvCesaInit (int numOfSession, int queueDepth, char* pSramBase, void *osHandle)
+{
+    MV_U32 cesaCryptEngBase;
+    MV_CPU_DEC_WIN addrDecWin;
+
+    if(sizeof(MV_CESA_SRAM_MAP) > MV_CESA_SRAM_SIZE)
+    {
+        mvOsPrintf("mvCesaInit: Wrong SRAM map - %ld > %d\n",
+                sizeof(MV_CESA_SRAM_MAP), MV_CESA_SRAM_SIZE);
+        return MV_FAIL;
+    }
+#if 0
+    if (mvCpuIfTargetWinGet(CRYPT_ENG, &addrDecWin) == MV_OK)
+        cesaCryptEngBase = addrDecWin.addrWin.baseLow;
+    else
+    {
+        mvOsPrintf("mvCesaInit: ERR. mvCpuIfTargetWinGet failed\n");
+        return MV_ERROR;
+    }
+#else
+        cesaCryptEngBase = (MV_U32)pSramBase;
+#endif
+
+#if 0 /* Already done in the platform init */
+#if (MV_CESA_VERSION >= 2)
+    mvCesaTdmaAddrDecInit();
+#endif /* MV_CESA_VERSION >= 2 */
+#endif
+	return mvCesaHalInit(numOfSession, queueDepth, pSramBase, cesaCryptEngBase,
+			     osHandle);
+
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h
new file mode 100644
index 000000000000..9bc3fee77e78
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysCesa.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvSysCesa_h__
+#define __mvSysCesa_h__
+
+
+#include "mvCommon.h"
+#include "cesa/mvCesa.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+/***************************** TDMA Registers *************************************/
+
+#define MV_CESA_TDMA_ADDR_DEC_WIN           4
+
+#define MV_CESA_TDMA_BASE_ADDR_REG(win)     (MV_CESA_TDMA_REG_BASE + 0xa00 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_CTRL_REG(win)      (MV_CESA_TDMA_REG_BASE + 0xa04 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_ENABLE_BIT         0
+#define MV_CESA_TDMA_WIN_ENABLE_MASK        (1 << MV_CESA_TDMA_WIN_ENABLE_BIT)
+
+#define MV_CESA_TDMA_WIN_TARGET_OFFSET      4
+#define MV_CESA_TDMA_WIN_TARGET_MASK        (0xf << MV_CESA_TDMA_WIN_TARGET_OFFSET)
+
+#define MV_CESA_TDMA_WIN_ATTR_OFFSET        8
+#define MV_CESA_TDMA_WIN_ATTR_MASK          (0xff << MV_CESA_TDMA_WIN_ATTR_OFFSET)
+
+#define MV_CESA_TDMA_WIN_SIZE_OFFSET        16
+#define MV_CESA_TDMA_WIN_SIZE_MASK          (0xFFFF << MV_CESA_TDMA_WIN_SIZE_OFFSET)
+
+#define MV_CESA_TDMA_WIN_BASE_OFFSET        16
+#define MV_CESA_TDMA_WIN_BASE_MASK          (0xFFFF << MV_CESA_TDMA_WIN_BASE_OFFSET)
+
+
+MV_STATUS   mvCesaInit (int numOfSession, int queueDepth, char* pSramBase, void *osHandle);
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c
new file mode 100644
index 000000000000..8283dcf3aa2c
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.c
@@ -0,0 +1,347 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/sys/mvSysDram.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvDramIfWinSet - Set DRAM interface address decode window
+*
+* DESCRIPTION:
+*       This function sets DRAM interface address decode window.
+*
+* INPUT:
+*	    target      - System target. Use only SDRAM targets.
+*       pAddrDecWin - SDRAM address window structure.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+*       otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+	MV_U32 baseReg=0,sizeReg=0;
+	MV_U32 baseToReg=0 , sizeToReg=0;
+
+    /* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinSet: target %d is not SDRAM\n", target);
+		return MV_BAD_PARAM;
+	}
+
+    /* Check if the requested window overlaps with current enabled windows	*/
+    if (MV_TRUE == sdramIfWinOverlap(target, &pAddrDecWin->addrWin))
+	{
+        mvOsPrintf("mvDramIfWinSet: ERR. Target %d overlaps\n", target);
+		return MV_BAD_PARAM;
+	}
+
+	/* check if address is aligned to the size */
+	if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvDramIfWinSet:Error setting DRAM interface window %d."\
+				   "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+                   target,
+				   pAddrDecWin->addrWin.baseLow,
+				   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	/* read base register*/
+	baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(0,target));
+
+	/* read size register */
+	sizeReg = MV_REG_READ(SDRAM_SIZE_REG(0,target));
+
+	/* BaseLow[31:16] => base register [31:16]		*/
+	baseToReg = pAddrDecWin->addrWin.baseLow & SCBAR_BASE_MASK;
+
+	/* Write to address decode Base Address Register                  */
+	baseReg &= ~SCBAR_BASE_MASK;
+	baseReg |= baseToReg;
+
+	/* Translate the given window size to register format			*/
+	sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, SCSR_SIZE_ALIGNMENT);
+
+	/* Size parameter validity check.                                   */
+	if (-1 == sizeToReg)
+	{
+		mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n",target);
+		return MV_BAD_PARAM;
+	}
+
+	/* set size */
+	sizeReg &= ~SCSR_SIZE_MASK;
+	/* Size is located at upper 16 bits */
+	sizeReg |= (sizeToReg << SCSR_SIZE_OFFS);
+
+	/* enable/Disable */
+	if (MV_TRUE == pAddrDecWin->enable)
+	{
+		sizeReg |= SCSR_WIN_EN;
+	}
+	else
+	{
+		sizeReg &= ~SCSR_WIN_EN;
+	}
+
+	/* 3) Write to address decode Base Address Register                   */
+	MV_REG_WRITE(SDRAM_BASE_ADDR_REG(0,target), baseReg);
+
+	/* Write to address decode Size Register                        	*/
+	MV_REG_WRITE(SDRAM_SIZE_REG(0,target), sizeReg);
+
+	return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinGet - Get DRAM interface address decode window
+*
+* DESCRIPTION:
+*       This function gets DRAM interface address decode window.
+*
+* INPUT:
+*	    target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+*       pAddrDecWin - SDRAM address window structure.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+*       otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+	MV_U32 baseReg,sizeReg;
+	MV_U32 sizeRegVal;
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinGet: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* Read base and size registers */
+	sizeReg = MV_REG_READ(SDRAM_SIZE_REG(0,target));
+	baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(0,target));
+
+	sizeRegVal = (sizeReg & SCSR_SIZE_MASK) >> SCSR_SIZE_OFFS;
+
+	pAddrDecWin->addrWin.size = ctrlRegToSize(sizeRegVal,
+							SCSR_SIZE_ALIGNMENT);
+
+	/* Check if ctrlRegToSize returned OK */
+	if (-1 == pAddrDecWin->addrWin.size)
+	{
+		mvOsPrintf("mvDramIfWinGet: size of target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* Extract base address						*/
+	/* Base register [31:16] ==> baseLow[31:16] 		*/
+	pAddrDecWin->addrWin.baseLow = baseReg & SCBAR_BASE_MASK;
+
+	pAddrDecWin->addrWin.baseHigh =  0;
+
+
+	if (sizeReg & SCSR_WIN_EN)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinEnable - Enable/Disable SDRAM address decode window
+*
+* DESCRIPTION:
+*		This function enable/Disable SDRAM address decode window.
+*
+* INPUT:
+*	    target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		MV_ERROR in case function parameter are invalid, MV_OK otherewise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfWinEnable(MV_TARGET target, MV_BOOL enable)
+{
+	MV_DRAM_DEC_WIN 	addrDecWin;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinEnable: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	if (enable == MV_TRUE)
+	{   /* First check for overlap with other enabled windows				*/
+		if (MV_OK != mvDramIfWinGet(target, &addrDecWin))
+		{
+			mvOsPrintf("mvDramIfWinEnable:ERR. Getting target %d failed.\n",
+                                                                        target);
+			return MV_ERROR;
+		}
+		/* Check for overlapping */
+		if (MV_FALSE == sdramIfWinOverlap(target, &(addrDecWin.addrWin)))
+		{
+			/* No Overlap. Enable address decode winNum window              */
+			MV_REG_BIT_SET(SDRAM_SIZE_REG(0,target), SCSR_WIN_EN);
+		}
+		else
+		{   /* Overlap detected	*/
+			mvOsPrintf("mvDramIfWinEnable: ERR. Target %d overlap detect\n",
+                                                                        target);
+			return MV_ERROR;
+		}
+	}
+	else
+	{   /* Disable address decode winNum window                             */
+		MV_REG_BIT_RESET(SDRAM_SIZE_REG(0, target), SCSR_WIN_EN);
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* sdramIfWinOverlap - Check if an address window overlap an SDRAM address window
+*
+* DESCRIPTION:
+*		This function scan each SDRAM address decode window to test if it
+*		overlapps the given address windoow
+*
+* INPUT:
+*       target      - SDRAM target where the function skips checking.
+*       pAddrDecWin - The tested address window for overlapping with
+*					  SDRAM windows.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlaps any enabled address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+	MV_TARGET	targetNum;
+	MV_DRAM_DEC_WIN 	addrDecWin;
+
+	for(targetNum = SDRAM_CS0; targetNum < MV_DRAM_MAX_CS ; targetNum++)
+	{
+		/* don't check our winNum or illegal targets */
+		if (targetNum == target)
+		{
+			continue;
+		}
+
+		/* Get window parameters 	*/
+		if (MV_OK != mvDramIfWinGet(targetNum, &addrDecWin))
+		{
+			mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		/* Do not check disabled windows	*/
+		if (MV_FALSE == addrDecWin.enable)
+		{
+			continue;
+		}
+
+		if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+		{
+			mvOsPrintf(
+			"sdramIfWinOverlap: Required target %d overlap winNum %d\n",
+			target, targetNum);
+			return MV_TRUE;
+		}
+	}
+
+	return MV_FALSE;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h
new file mode 100644
index 000000000000..f16b9477b3b4
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysDram.h
@@ -0,0 +1,80 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __sysDram
+#define __sysDram
+
+/* This structure describes CPU interface address decode window               */
+typedef struct _mvDramIfDecWin
+{
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+}MV_DRAM_DEC_WIN;
+
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinEnable(MV_TARGET target, MV_BOOL enable);
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c
new file mode 100644
index 000000000000..d29d281b3277
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.c
@@ -0,0 +1,658 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "ctrlEnv/sys/mvSysGbe.h"
+
+
+
+typedef struct _mvEthDecWin
+{
+    MV_TARGET     target;
+    MV_ADDR_WIN   addrWin;  /* An address window*/
+    MV_BOOL       enable;   /* Address decode window is enabled/disabled */
+
+}MV_ETH_DEC_WIN;
+
+MV_TARGET ethAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+        SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+        SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+        SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+        SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS0)
+        DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+        DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+        DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+        DEVICE_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+        PEX0_IO,
+#endif
+        TBL_TERM
+};
+
+static MV_STATUS   ethWinOverlapDetect(int port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+static MV_STATUS   mvEthWinSet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin);
+static MV_STATUS   mvEthWinGet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin);
+
+
+/*******************************************************************************
+* mvEthWinInit - Initialize ETH address decode windows
+*
+* DESCRIPTION:
+*               This function initialize ETH window decode unit. It set the
+*               default address decode windows of the unit.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if setting fail.
+*******************************************************************************/
+/* Configure EthDrv memory map registes. */
+MV_STATUS 	mvEthWinInit (int port)
+{
+    MV_U32          winNum, status, winPrioIndex=0, i, regVal=0;
+    MV_ETH_DEC_WIN  ethWin;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    static MV_U32   accessProtReg = 0;
+
+#if (MV_ETH_VERSION <= 1)
+    static MV_BOOL  isFirst = MV_TRUE;
+
+    if(isFirst == MV_FALSE)
+    {
+        MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), accessProtReg);
+        return MV_OK;
+    }
+    isFirst = MV_FALSE;
+#endif /* MV_GIGA_ETH_VERSION */
+
+    /* Initiate Ethernet address decode */
+
+    /* First disable all address decode windows */
+    for(winNum=0; winNum<ETH_MAX_DECODE_WIN; winNum++)
+    {
+        regVal |= MV_BIT_MASK(winNum);
+    }
+    MV_REG_WRITE(ETH_BASE_ADDR_ENABLE_REG(port), regVal);
+
+   /* Go through all windows in user table until table terminator      */
+    for (winNum=0; ((ethAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+                    (winNum < ETH_MAX_DECODE_WIN)); )
+    {
+        /* first get attributes from CPU If */
+        status = mvCpuIfTargetWinGet(ethAddrDecPrioTap[winPrioIndex],
+                                     &cpuAddrDecWin);
+
+        if(MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+		if (MV_OK != status)
+		{
+			mvOsPrintf("mvEthWinInit: ERR. mvCpuIfTargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+        {
+            ethWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+            ethWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+            ethWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+            ethWin.enable = MV_TRUE;
+            ethWin.target = ethAddrDecPrioTap[winPrioIndex];
+
+            if(MV_OK != mvEthWinSet(port, winNum, &ethWin))
+            {
+                mvOsPrintf("mvEthWinInit: ERR. mvEthWinSet failed winNum=%d\n",
+                           winNum);
+                return MV_ERROR;
+            }
+            winNum++;
+        }
+        winPrioIndex ++;
+    }
+
+    /* set full access to all windows. */
+    for(i=0; i<winNum; i++)
+    {
+        accessProtReg |= (FULL_ACCESS << (i*2));
+    }
+    MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), accessProtReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinSet - Set ETH target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the ETH will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - ETH to target address decode window number.
+*       pAddrDecWin - ETH target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinSet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin)
+{
+    MV_TARGET_ATTRIB    targetAttribs;
+    MV_DEC_REGS         decRegs;
+
+    /* Parameter checking   */
+    if (winNum >= ETH_MAX_DECODE_WIN)
+    {
+        mvOsPrintf("mvEthWinSet: ERR. Invalid win num %d\n",winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows     */
+    if (MV_TRUE == ethWinOverlapDetect(port, winNum, &pAddrDecWin->addrWin))
+    {
+        mvOsPrintf("mvEthWinSet: ERR. Window %d overlap\n", winNum);
+        return MV_ERROR;
+    }
+
+	/* check if address is aligned to the size */
+	if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvEthWinSet: Error setting Ethernet window %d to "\
+				   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+				   winNum,
+				   mvCtrlTargetNameGet(pAddrDecWin->target),
+				   pAddrDecWin->addrWin.baseLow,
+				   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+
+    decRegs.baseReg = MV_REG_READ(ETH_WIN_BASE_REG(port, winNum));
+    decRegs.sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+    if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+    {
+        mvOsPrintf("mvEthWinSet:mvCtrlAddrDecToReg Failed\n");
+        return MV_ERROR;
+    }
+
+    mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+    /* set attributes */
+    decRegs.baseReg &= ~ETH_WIN_ATTR_MASK;
+    decRegs.baseReg |= targetAttribs.attrib << ETH_WIN_ATTR_OFFS;
+    /* set target ID */
+    decRegs.baseReg &= ~ETH_WIN_TARGET_MASK;
+    decRegs.baseReg |= targetAttribs.targetId << ETH_WIN_TARGET_OFFS;
+
+    /* for the safe side we disable the window before writing the new
+    values */
+    mvEthWinEnable(port, winNum, MV_FALSE);
+    MV_REG_WRITE(ETH_WIN_BASE_REG(port, winNum), decRegs.baseReg);
+
+    /* Write to address decode Size Register                            */
+    MV_REG_WRITE(ETH_WIN_SIZE_REG(port, winNum), decRegs.sizeReg);
+
+    /* Enable address decode target window                              */
+    if (pAddrDecWin->enable == MV_TRUE)
+    {
+            mvEthWinEnable(port, winNum, MV_TRUE);
+    }
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvETHWinGet - Get dma peripheral target address window.
+*
+* DESCRIPTION:
+*               Get ETH peripheral target address window.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - ETH target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinGet(int port, MV_U32 winNum, MV_ETH_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS decRegs;
+    MV_TARGET_ATTRIB targetAttrib;
+
+    /* Parameter checking   */
+    if (winNum >= ETH_MAX_DECODE_WIN)
+    {
+        mvOsPrintf("mvEthWinGet: ERR. Invalid winNum %d\n", winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    decRegs.baseReg =  MV_REG_READ(ETH_WIN_BASE_REG(port, winNum));
+    decRegs.sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+    if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+    {
+        mvOsPrintf("mvAhbToMbusWinGet: mvCtrlRegToAddrDec Failed \n");
+        return MV_ERROR;
+    }
+
+    /* attrib and targetId */
+    targetAttrib.attrib =
+     (decRegs.baseReg & ETH_WIN_ATTR_MASK) >> ETH_WIN_ATTR_OFFS;
+    targetAttrib.targetId =
+     (decRegs.baseReg & ETH_WIN_TARGET_MASK) >> ETH_WIN_TARGET_OFFS;
+
+    pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+    /* Check if window is enabled   */
+    if (~(MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port))) & (1 << winNum) )
+    {
+        pAddrDecWin->enable = MV_TRUE;
+    }
+    else
+    {
+        pAddrDecWin->enable = MV_FALSE;
+    }
+
+        return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinEnable - Enable/disable a ETH to target address window
+*
+* DESCRIPTION:
+*       This function enable/disable a ETH to target address window.
+*       According to parameter 'enable' the routine will enable the
+*       window, thus enabling ETH accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvEthWinEnable(int port, MV_U32 winNum,MV_BOOL enable)
+{
+    MV_ETH_DEC_WIN addrDecWin;
+
+    /* Parameter checking   */
+    if (winNum >= ETH_MAX_DECODE_WIN)
+    {
+        mvOsPrintf("mvEthTargetWinEnable:ERR. Invalid winNum%d\n",winNum);
+        return MV_ERROR;
+    }
+
+    if (enable == MV_TRUE)
+    {   /* First check for overlap with other enabled windows               */
+        /* Get current window */
+        if (MV_OK != mvEthWinGet(port, winNum, &addrDecWin))
+        {
+            mvOsPrintf("mvEthTargetWinEnable:ERR. targetWinGet fail\n");
+            return MV_ERROR;
+        }
+        /* Check for overlapping */
+        if (MV_FALSE == ethWinOverlapDetect(port, winNum, &(addrDecWin.addrWin)))
+        {
+            /* No Overlap. Enable address decode target window              */
+            MV_REG_BIT_RESET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+        }
+        else
+        {   /* Overlap detected */
+            mvOsPrintf("mvEthTargetWinEnable:ERR. Overlap detected\n");
+            return MV_ERROR;
+        }
+    }
+    else
+    {   /* Disable address decode target window                             */
+        MV_REG_BIT_SET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthWinTargetGet - Get Window number associated with target
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*       window number
+*
+*******************************************************************************/
+MV_U32  mvEthWinTargetGet(int port, MV_TARGET target)
+{
+    MV_ETH_DEC_WIN decWin;
+    MV_U32 winNum;
+
+    /* Check parameters */
+    if (target >= MAX_TARGETS)
+    {
+        mvOsPrintf("mvAhbToMbusWinTargetGet: target %d is Illigal\n", target);
+        return 0xffffffff;
+    }
+
+    for (winNum=0; winNum<ETH_MAX_DECODE_WIN; winNum++)
+    {
+        if (mvEthWinGet(port, winNum,&decWin) != MV_OK)
+        {
+            mvOsPrintf("mvAhbToMbusWinTargetGet: window returned error\n");
+            return 0xffffffff;
+        }
+
+        if (decWin.enable == MV_TRUE)
+        {
+            if (decWin.target == target)
+            {
+                return winNum;
+            }
+        }
+    }
+    return 0xFFFFFFFF;
+}
+
+/*******************************************************************************
+* mvEthProtWinSet - Set access protection of Ethernet to target window.
+*
+* DESCRIPTION:
+*       Each Ethernet port can be configured with access attributes for each
+*       of the Ethenret to target windows (address decode windows). This
+*       function sets access attributes to a given window for the given channel.
+*
+* INPUTS:
+*       ethPort   - ETH channel number. See MV_ETH_CHANNEL enumerator.
+*       winNum - IETH to target address decode window number.
+*       access - IETH access rights. See MV_ACCESS_RIGHTS enumerator.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR in case window number is invalid or access right reserved.
+*
+*******************************************************************************/
+MV_STATUS mvEthProtWinSet(MV_U32 portNo, MV_U32 winNum, MV_ACCESS_RIGHTS access)
+{
+    MV_U32 protReg;
+
+    /* Parameter checking   */
+    if(portNo >= mvCtrlEthMaxPortGet())
+    {
+        mvOsPrintf("mvEthProtWinSet:ERR. Invalid port number %d\n", portNo);
+        return MV_ERROR;
+    }
+
+    if (winNum >= ETH_MAX_DECODE_WIN)
+    {
+            mvOsPrintf("mvEthProtWinSet:ERR. Invalid winNum%d\n",winNum);
+            return MV_ERROR;
+    }
+
+    if((access == ACC_RESERVED) || (access >= MAX_ACC_RIGHTS))
+    {
+        mvOsPrintf("mvEthProtWinSet:ERR. Inv access param %d\n", access);
+        return MV_ERROR;
+    }
+    /* Read current protection register */
+    protReg = MV_REG_READ(ETH_ACCESS_PROTECT_REG(portNo));
+
+    /* Clear protection window field */
+    protReg &= ~(ETH_PROT_WIN_MASK(winNum));
+
+    /* Set new protection field value */
+    protReg |= (access << (ETH_PROT_WIN_OFFS(winNum)));
+
+    /* Write protection register back   */
+    MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(portNo), protReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* ethWinOverlapDetect - Detect ETH address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case ETH address decode
+*       windows overlapps.
+*       This function detects ETH address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS ethWinOverlapDetect(int port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32              baseAddrEnableReg;
+    MV_U32              winNumIndex;
+    MV_ETH_DEC_WIN      addrDecWin;
+
+    /* Read base address enable register. Do not check disabled windows     */
+    baseAddrEnableReg = MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port));
+
+    for (winNumIndex=0; winNumIndex<ETH_MAX_DECODE_WIN; winNumIndex++)
+    {
+        /* Do not check window itself           */
+        if (winNumIndex == winNum)
+        {
+            continue;
+        }
+
+        /* Do not check disabled windows        */
+        if (baseAddrEnableReg & (1 << winNumIndex))
+        {
+            continue;
+        }
+
+        /* Get window parameters        */
+        if (MV_OK != mvEthWinGet(port, winNumIndex, &addrDecWin))
+        {
+            mvOsPrintf("ethWinOverlapDetect: ERR. TargetWinGet failed\n");
+            return MV_ERROR;
+        }
+/*
+        mvOsPrintf("ethWinOverlapDetect:\n
+            winNumIndex =%d baseHigh =0x%x baseLow=0x%x size=0x%x enable=0x%x\n",
+            winNumIndex,
+            addrDecWin.addrWin.baseHigh,
+            addrDecWin.addrWin.baseLow,
+            addrDecWin.addrWin.size,
+            addrDecWin.enable);
+*/
+        if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+        {
+            return MV_TRUE;
+        }
+    }
+    return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvEthAddrDecShow - Print the Etherent address decode map.
+*
+* DESCRIPTION:
+*       This function print the Etherent address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+void    mvEthPortAddrDecShow(int port)
+{
+    MV_ETH_DEC_WIN  win;
+    int             i;
+
+    mvOsOutput( "\n" );
+    mvOsOutput( "ETH %d:\n", port );
+    mvOsOutput( "----\n" );
+
+    for( i = 0; i < ETH_MAX_DECODE_WIN; i++ )
+    {
+        memset( &win, 0, sizeof(ETH_MAX_DECODE_WIN) );
+
+        mvOsOutput( "win%d - ", i );
+
+        if( mvEthWinGet(port, i, &win ) == MV_OK )
+        {
+            if( win.enable )
+            {
+                mvOsOutput( "%s base %08x, ",
+                mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+                mvOsOutput( "...." );
+                mvSizePrint( win.addrWin.size );
+
+                mvOsOutput( "\n" );
+            }
+            else
+                mvOsOutput( "disable\n" );
+        }
+    }
+    return;
+}
+
+void    mvEthAddrDecShow(void)
+{
+    int port;
+
+    for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+    {
+	if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port)) continue;
+
+        mvEthPortAddrDecShow(port);
+    }
+}
+
+
+void    mvEthInit(void)
+{
+    MV_U32 port;
+
+    /* Power down all existing ports */
+    for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+    {
+	    if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port))
+            continue;
+
+        mvEthPortPowerUp(port);
+	    mvEthWinInit(port);
+    }
+    mvEthHalInit();
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h
new file mode 100644
index 000000000000..aac5517a689d
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysGbe.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysGbeh
+#define __INCmvSysGbeh
+
+#include "mvCommon.h"
+#include "eth/mvEth.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#define ETH_WIN_BASE_REG(port, win)         (MV_ETH_REG_BASE(port) + 0x200 + ((win)<<3))
+#define ETH_WIN_SIZE_REG(port, win)         (MV_ETH_REG_BASE(port) + 0x204 + ((win)<<3))
+#define ETH_WIN_REMAP_REG(port, win)        (MV_ETH_REG_BASE(port) + 0x280 + ((win)<<2))
+#define ETH_BASE_ADDR_ENABLE_REG(port)      (MV_ETH_REG_BASE(port) + 0x290)
+#define ETH_ACCESS_PROTECT_REG(port)        (MV_ETH_REG_BASE(port) + 0x294)
+
+/**** Address decode parameters ****/
+
+/* Ethernet Base Address Register bits */
+#define ETH_MAX_DECODE_WIN              6
+#define ETH_MAX_HIGH_ADDR_REMAP_WIN     4
+
+/* Ethernet Port Access Protect (EPAP) register */
+
+/* The target associated with this window*/
+#define ETH_WIN_TARGET_OFFS             0
+#define ETH_WIN_TARGET_MASK             (0xf << ETH_WIN_TARGET_OFFS)
+/* The target attributes Associated with window */
+#define ETH_WIN_ATTR_OFFS               8
+#define ETH_WIN_ATTR_MASK               (0xff << ETH_WIN_ATTR_OFFS)
+
+/* Ethernet Port Access Protect Register (EPAPR) */
+#define ETH_PROT_NO_ACCESS              NO_ACCESS_ALLOWED
+#define ETH_PROT_READ_ONLY              READ_ONLY
+#define ETH_PROT_FULL_ACCESS            FULL_ACCESS
+#define ETH_PROT_WIN_OFFS(winNum)       (2 * (winNum))
+#define ETH_PROT_WIN_MASK(winNum)       (0x3 << ETH_PROT_WIN_OFFS(winNum))
+
+MV_STATUS   mvEthWinInit (int port);
+MV_STATUS   mvEthWinEnable(int port, MV_U32 winNum, MV_BOOL enable);
+MV_U32      mvEthWinTargetGet(int port, MV_TARGET target);
+MV_STATUS mvEthProtWinSet(MV_U32 portNo, MV_U32 winNum, MV_ACCESS_RIGHTS
+		access);
+
+void        mvEthPortAddrDecShow(int port);
+
+MV_VOID     mvEthAddrDecShow(MV_VOID);
+
+void        mvEthInit(void);
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c
new file mode 100644
index 000000000000..62423bc8d1d4
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.c
@@ -0,0 +1,1695 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ctrlEnv/sys/mvSysPex.h"
+
+/* this structure describes the mapping between a Pex Window and a CPU target*/
+typedef struct _pexWinToTarget
+{
+	MV_TARGET target;
+	MV_BOOL	  enable;
+
+}PEX_WIN_TO_TARGET;
+
+/* this array is a priority array that define How Pex windows should be
+configured , We have only 6 Pex Windows that can be configured , but we
+have maximum of 9 CPU target windows ! the following array is a priority
+array where the lowest index has the highest priotiy and the highest
+index has the lowest priority of being cnfigured */
+
+MV_U32	pexDevBarPrioTable[] =
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+    DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+    DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+    DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+    DEVICE_CS3,
+#endif
+/*
+#if defined(MV_INCLUDE_DEVICE_CS4)
+    DEVICE_CS4,
+#endif
+*/
+    TBL_TERM
+};
+
+
+/* PEX Wins registers offsets are inconsecutive. This struct describes WIN	*/
+/* register offsets	and its function where its is located.					*/
+/* Also, PEX address remap registers offsets are inconsecutive. This struct	*/
+/* describes address remap register offsets									*/
+typedef struct _pexWinRegInfo
+{
+    MV_U32 baseLowRegOffs;
+	MV_U32 baseHighRegOffs;
+	MV_U32 sizeRegOffs;
+	MV_U32 remapLowRegOffs;
+	MV_U32 remapHighRegOffs;
+
+}PEX_WIN_REG_INFO;
+
+static MV_STATUS pexWinOverlapDetect(MV_U32 pexIf, MV_U32 winNum,
+									 MV_ADDR_WIN *pAddrWin);
+static MV_STATUS pexWinRegInfoGet(MV_U32 pexIf, MV_U32 winNum,
+								  PEX_WIN_REG_INFO *pWinRegInfo);
+
+static MV_STATUS pexBarIsValid(MV_U32 baseLow, MV_U32 size);
+
+static MV_BOOL pexIsWinWithinBar(MV_U32 pexIf,MV_ADDR_WIN *pAddrWin);
+static MV_BOOL pexBarOverlapDetect(MV_U32 pexIf,MV_U32 barNum,
+								   MV_ADDR_WIN *pAddrWin);
+const MV_8* pexBarNameGet( MV_U32 bar );
+
+
+/*******************************************************************************
+* mvPexInit - Initialize PEX interfaces
+*
+* DESCRIPTION:
+*
+* This function is responsible of intialization of the Pex Interface , It
+* configure the Pex Bars and Windows in the following manner:
+*
+*  Assumptions :
+*				Bar0 is always internal registers bar
+*			    Bar1 is always the DRAM bar
+*				Bar2 is always the Device bar
+*
+*  1) Sets the Internal registers bar base by obtaining the base from
+*	  the CPU Interface
+*  2) Sets the DRAM bar base and size by getting the base and size from
+*     the CPU Interface when the size is the sum of all enabled DRAM
+*	  chip selects and the base is the base of CS0 .
+*  3) Sets the Device bar base and size by getting these values from the
+*     CPU Interface when the base is the base of the lowest base of the
+*     Device chip selects, and the
+*
+*
+* INPUT:
+*
+*       pexIf   -  PEX interface number.
+*
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+MV_STATUS mvPexInit(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+	MV_U32 	       		bar;
+	MV_U32		   	winNum;
+	MV_PEX_BAR	  	pexBar;
+	MV_PEX_DEC_WIN 		pexWin;
+	MV_CPU_DEC_WIN 		addrDecWin;
+	MV_TARGET 		target;
+	MV_U32			pexCurrWin=0;
+	MV_U32			status;
+	/* default and exapntion rom
+	are always configured */
+
+#ifndef MV_DISABLE_PEX_DEVICE_BAR
+	MV_U32		   	winIndex;
+	MV_U32 		    	maxBase=0, sizeOfMaxBase=0;
+	MV_U32			pexStartWindow;
+#endif
+
+	/* Parameter checking   */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexInit: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	/* Enabled CPU access to PCI-Express */
+	mvCpuIfEnablePex(pexIf, pexType);
+
+    /* Start with bars */
+	/* First disable all PEX bars*/
+	for (bar = 0; bar < PEX_MAX_BARS; bar++)
+    {
+		if (PEX_INTER_REGS_BAR != bar)
+		{
+			if (MV_OK != mvPexBarEnable(pexIf, bar, MV_FALSE))
+			{
+				mvOsPrintf("mvPexInit:mvPexBarEnable bar =%d failed \n",bar);
+				return MV_ERROR;
+			}
+
+		}
+
+	}
+
+	/* and disable all PEX target windows  */
+	for (winNum = 0; winNum < PEX_MAX_TARGET_WIN - 2; winNum++)
+    {
+		if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_FALSE))
+		{
+			mvOsPrintf("mvPexInit:mvPexTargetWinEnable winNum =%d failed \n",
+					   winNum);
+			return MV_ERROR;
+
+		}
+	}
+
+	/* Now, go through all bars*/
+
+
+
+/******************************************************************************/
+/*                       Internal registers bar                               */
+/******************************************************************************/
+	bar = PEX_INTER_REGS_BAR;
+
+	/* we only open the bar , no need to open windows for this bar */
+
+	/* first get the CS attribute from the CPU Interface */
+	if (MV_OK !=mvCpuIfTargetWinGet(INTER_REGS,&addrDecWin))
+	{
+		mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",INTER_REGS);
+		return MV_ERROR;
+	}
+
+	pexBar.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+	pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+	pexBar.addrWin.size = addrDecWin.addrWin.size;
+	pexBar.enable = MV_TRUE;
+
+	if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+	{
+		mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+		return MV_ERROR;
+	}
+
+/******************************************************************************/
+/*                                DRAM bar                                    */
+/******************************************************************************/
+
+	bar = PEX_DRAM_BAR;
+
+	pexBar.addrWin.size = 0;
+
+	for (target = SDRAM_CS0;target < MV_DRAM_MAX_CS; target++ )
+	{
+
+		status = mvCpuIfTargetWinGet(target,&addrDecWin);
+
+		if((MV_NO_SUCH == status)&&(target != SDRAM_CS0))
+		{
+			continue;
+		}
+
+		/* first get attributes from CPU If */
+		if (MV_OK != status)
+		{
+			mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",target);
+			return MV_ERROR;
+		}
+		if (addrDecWin.enable == MV_TRUE)
+		{
+			/* the base is the base of DRAM CS0 always */
+			if (SDRAM_CS0 == target )
+			{
+				pexBar.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+				pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+
+			}
+
+			/* increment the bar size to be the sum of the size of all
+			DRAM chips selecs */
+			pexBar.addrWin.size += addrDecWin.addrWin.size;
+
+			/* set a Pex window for this target !
+			DRAM CS always will have a Pex Window , and is not a
+			part of the priority table */
+			pexWin.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+			pexWin.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+			pexWin.addrWin.size = addrDecWin.addrWin.size;
+
+			/* we disable the windows at first because we are not
+			sure that it is witihin bar boundries */
+			pexWin.enable =MV_FALSE;
+			pexWin.target = target;
+			pexWin.targetBar = bar;
+
+			if (MV_OK != mvPexTargetWinSet(pexIf,pexCurrWin++,&pexWin))
+			{
+				mvOsPrintf("mvPexInit: ERR. mvPexTargetWinSet failed\n");
+				return MV_ERROR;
+			}
+		}
+	}
+
+	/* check if the size of the bar is illeggal */
+	if (-1 == ctrlSizeToReg(pexBar.addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT))
+	{
+		/* try to get a good size */
+		pexBar.addrWin.size = ctrlSizeRegRoundUp(pexBar.addrWin.size,
+												 PXBCR_BAR_SIZE_ALIGNMENT);
+	}
+
+	/* check if the size and base are valid */
+	if (MV_TRUE == pexBarOverlapDetect(pexIf,bar,&pexBar.addrWin))
+	{
+		mvOsPrintf("mvPexInit:Warning :Bar %d size is illigal\n",bar);
+		mvOsPrintf("it will be disabled\n");
+		mvOsPrintf("please check Pex and CPU windows configuration\n");
+	}
+	else
+	{
+		pexBar.enable = MV_TRUE;
+
+		/* configure the bar */
+		if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+		{
+			mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+			return MV_ERROR;
+		}
+
+		/* after the bar was configured then we enable the Pex windows*/
+		for (winNum = 0;winNum < pexCurrWin ;winNum++)
+		{
+			if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_TRUE))
+			{
+				mvOsPrintf("mvPexInit: Can't enable window =%d\n",winNum);
+				return MV_ERROR;
+			}
+
+		}
+	}
+
+/******************************************************************************/
+/*                              DEVICE bar                                    */
+/******************************************************************************/
+
+/* Open the Device BAR for non linux only */
+#ifndef MV_DISABLE_PEX_DEVICE_BAR
+
+	/* then device  bar*/
+	bar = PEX_DEVICE_BAR;
+
+	/* save the starting window */
+	pexStartWindow = pexCurrWin;
+	pexBar.addrWin.size = 0;
+	pexBar.addrWin.baseLow = 0xffffffff;
+	pexBar.addrWin.baseHigh = 0;
+	maxBase = 0;
+
+	for (target = DEV_TO_TARGET(START_DEV_CS);target < DEV_TO_TARGET(MV_DEV_MAX_CS); target++ )
+	{
+		status = mvCpuIfTargetWinGet(target,&addrDecWin);
+
+		if (MV_NO_SUCH == status)
+		{
+			continue;
+		}
+
+		if (MV_OK != status)
+		{
+			mvOsPrintf("mvPexInit: ERR. mvCpuIfTargetWinGet failed target =%d\n",target);
+			return MV_ERROR;
+		}
+
+		if (addrDecWin.enable == MV_TRUE)
+		{
+			/* get the minimum base */
+			if (addrDecWin.addrWin.baseLow < pexBar.addrWin.baseLow)
+			{
+				pexBar.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+			}
+
+			/* get the maximum base */
+			if (addrDecWin.addrWin.baseLow > maxBase)
+			{
+				maxBase = addrDecWin.addrWin.baseLow;
+				sizeOfMaxBase = addrDecWin.addrWin.size;
+			}
+
+			/* search in the priority table for this target */
+			for (winIndex = 0; pexDevBarPrioTable[winIndex] != TBL_TERM;
+				 winIndex++)
+			{
+				if (pexDevBarPrioTable[winIndex] != target)
+				{
+					continue;
+				}
+				else if (pexDevBarPrioTable[winIndex] == target)
+				{
+					/*found it */
+
+					/* if the index of this target in the prio table is valid
+					then we set the Pex window for this target, a valid index is
+					an index that is lower than the number of the windows that
+					was not configured yet */
+
+					/* we subtract 2 always because the default and expantion
+					rom windows are always configured */
+					if ( pexCurrWin  < PEX_MAX_TARGET_WIN - 2)
+					{
+						/* set a Pex window for this target !  */
+						pexWin.addrWin.baseHigh = addrDecWin.addrWin.baseHigh;
+						pexWin.addrWin.baseLow = addrDecWin.addrWin.baseLow;
+						pexWin.addrWin.size = addrDecWin.addrWin.size;
+
+						/* we disable the windows at first because we are not
+						sure that it is witihin bar boundries */
+						pexWin.enable = MV_FALSE;
+						pexWin.target = target;
+						pexWin.targetBar = bar;
+
+						if (MV_OK != mvPexTargetWinSet(pexIf,pexCurrWin++,
+													   &pexWin))
+						{
+							mvOsPrintf("mvPexInit: ERR. Window Set failed\n");
+							return MV_ERROR;
+						}
+					}
+				}
+			}
+		}
+	}
+
+	pexBar.addrWin.size = maxBase - pexBar.addrWin.baseLow + sizeOfMaxBase;
+	pexBar.enable = MV_TRUE;
+
+	/* check if the size of the bar is illegal */
+	if (-1 == ctrlSizeToReg(pexBar.addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT))
+	{
+		/* try to get a good size */
+		pexBar.addrWin.size = ctrlSizeRegRoundUp(pexBar.addrWin.size,
+												 PXBCR_BAR_SIZE_ALIGNMENT);
+	}
+
+	/* check if the size and base are valid */
+	if (MV_TRUE == pexBarOverlapDetect(pexIf,bar,&pexBar.addrWin))
+	{
+		mvOsPrintf("mvPexInit:Warning :Bar %d size is illigal\n",bar);
+		mvOsPrintf("it will be disabled\n");
+		mvOsPrintf("please check Pex and CPU windows configuration\n");
+	}
+	else
+	{
+		if (MV_OK != mvPexBarSet(pexIf, bar, &pexBar))
+		{
+			mvOsPrintf("mvPexInit: ERR. mvPexBarSet %d failed\n", bar);
+			return MV_ERROR;
+		}
+
+		/* now enable the windows */
+		for (winNum = pexStartWindow; winNum < pexCurrWin ; winNum++)
+		{
+			if (MV_OK != mvPexTargetWinEnable(pexIf, winNum, MV_TRUE))
+			{
+				mvOsPrintf("mvPexInit:mvPexTargetWinEnable winNum =%d failed \n",
+						   winNum);
+				return MV_ERROR;
+			}
+		}
+	}
+
+#endif
+
+	return mvPexHalInit(pexIf, pexType);
+
+}
+
+/*******************************************************************************
+* mvPexTargetWinSet - Set PEX to peripheral target address window BAR
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_OK if PEX BAR target window was set correctly,
+*		MV_BAD_PARAM on bad params
+*       MV_ERROR otherwise
+*       (e.g. address window overlapps with other active PEX target window).
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinSet(MV_U32 pexIf, MV_U32 winNum,
+                            MV_PEX_DEC_WIN *pAddrDecWin)
+{
+
+	MV_DEC_REGS decRegs;
+	PEX_WIN_REG_INFO winRegInfo;
+	MV_TARGET_ATTRIB targetAttribs;
+
+	/* Parameter checking   */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexTargetWinSet: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	if (winNum >= PEX_MAX_TARGET_WIN)
+	{
+		mvOsPrintf("mvPexTargetWinSet: ERR. Invalid PEX winNum %d\n", winNum);
+		return MV_BAD_PARAM;
+
+	}
+
+	/* get the pex Window registers offsets */
+	pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+
+	if (MV_TRUE == pAddrDecWin->enable)
+	{
+
+		/* 2) Check if the requested window overlaps with current windows  */
+		if (MV_TRUE == pexWinOverlapDetect(pexIf,winNum, &pAddrDecWin->addrWin))
+		{
+			mvOsPrintf("mvPexTargetWinSet: ERR. Target %d overlap\n", winNum);
+			return MV_BAD_PARAM;
+		}
+
+		/* 2) Check if the requested window overlaps with current windows  */
+		if (MV_FALSE == pexIsWinWithinBar(pexIf,&pAddrDecWin->addrWin))
+		{
+			mvOsPrintf("mvPexTargetWinSet: Win %d should be in bar boundries\n",
+					   winNum);
+			return MV_BAD_PARAM;
+		}
+
+	}
+
+
+
+	/* read base register*/
+
+	if (winRegInfo.baseLowRegOffs)
+	{
+		decRegs.baseReg = MV_REG_READ(winRegInfo.baseLowRegOffs);
+	}
+	else
+	{
+		decRegs.baseReg = 0;
+	}
+
+	if (winRegInfo.sizeRegOffs)
+	{
+		decRegs.sizeReg = MV_REG_READ(winRegInfo.sizeRegOffs);
+	}
+	else
+	{
+		decRegs.sizeReg =0;
+	}
+
+	if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+	{
+		mvOsPrintf("mvPexTargetWinSet:mvCtrlAddrDecToReg Failed\n");
+		return MV_ERROR;
+	}
+
+	/* enable\Disable */
+	if (MV_TRUE == pAddrDecWin->enable)
+	{
+		decRegs.sizeReg |= PXWCR_WIN_EN;
+	}
+	else
+	{
+		decRegs.sizeReg &= ~PXWCR_WIN_EN;
+	}
+
+
+	/* clear bit location */
+	decRegs.sizeReg &= ~PXWCR_WIN_BAR_MAP_MASK;
+
+	/* set bar Mapping */
+	if (pAddrDecWin->targetBar == 1)
+	{
+		decRegs.sizeReg |= PXWCR_WIN_BAR_MAP_BAR1;
+	}
+	else if (pAddrDecWin->targetBar == 2)
+	{
+		decRegs.sizeReg |= PXWCR_WIN_BAR_MAP_BAR2;
+	}
+
+	mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+	/* set attributes */
+	decRegs.sizeReg &= ~PXWCR_ATTRIB_MASK;
+	decRegs.sizeReg |= targetAttribs.attrib << PXWCR_ATTRIB_OFFS;
+	/* set target ID */
+	decRegs.sizeReg &= ~PXWCR_TARGET_MASK;
+	decRegs.sizeReg |= targetAttribs.targetId << PXWCR_TARGET_OFFS;
+
+
+	/* 3) Write to address decode Base Address Register                   */
+
+	if (winRegInfo.baseLowRegOffs)
+	{
+		MV_REG_WRITE(winRegInfo.baseLowRegOffs, decRegs.baseReg);
+	}
+
+	/* write size reg */
+	if (winRegInfo.sizeRegOffs)
+	{
+		if ((MV_PEX_WIN_DEFAULT == winNum)||
+			(MV_PEX_WIN_EXP_ROM == winNum))
+		{
+			/* clear size because there is no size field*/
+			decRegs.sizeReg &= ~PXWCR_SIZE_MASK;
+
+			/* clear enable because there is no enable field*/
+			decRegs.sizeReg &= ~PXWCR_WIN_EN;
+
+		}
+
+		MV_REG_WRITE(winRegInfo.sizeRegOffs, decRegs.sizeReg);
+	}
+
+
+    return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexTargetWinGet - Get PEX to peripheral target address window
+*
+* DESCRIPTION:
+*		Get the PEX to peripheral target address window BAR.
+*
+* INPUT:
+*       pexIf - PEX interface number.
+*       bar   - BAR to be accessed by slave.
+*
+* OUTPUT:
+*       pAddrBarWin - PEX target window information data structure.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinGet(MV_U32 pexIf, MV_U32 winNum,
+                            MV_PEX_DEC_WIN *pAddrDecWin)
+{
+	MV_TARGET_ATTRIB targetAttrib;
+	MV_DEC_REGS decRegs;
+
+	PEX_WIN_REG_INFO winRegInfo;
+
+	/* Parameter checking   */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexTargetWinGet: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	if (winNum >= PEX_MAX_TARGET_WIN)
+	{
+		mvOsPrintf("mvPexTargetWinGet: ERR. Invalid PEX winNum %d\n", winNum);
+		return MV_BAD_PARAM;
+
+	}
+
+	/* get the pex Window registers offsets */
+	pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+	/* read base register*/
+	if (winRegInfo.baseLowRegOffs)
+	{
+		decRegs.baseReg = MV_REG_READ(winRegInfo.baseLowRegOffs);
+	}
+	else
+	{
+		decRegs.baseReg = 0;
+	}
+
+	/* read size reg */
+	if (winRegInfo.sizeRegOffs)
+	{
+		decRegs.sizeReg = MV_REG_READ(winRegInfo.sizeRegOffs);
+	}
+	else
+	{
+		decRegs.sizeReg =0;
+	}
+
+	if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+	{
+		mvOsPrintf("mvPexTargetWinGet: mvCtrlRegToAddrDec Failed \n");
+		return MV_ERROR;
+
+	}
+
+	if (decRegs.sizeReg & PXWCR_WIN_EN)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+
+	}
+
+
+	#if 0
+    if (-1 == pAddrDecWin->addrWin.size)
+	{
+		return MV_ERROR;
+	}
+	#endif
+
+
+	/* get target bar */
+	if ((decRegs.sizeReg & PXWCR_WIN_BAR_MAP_MASK) == PXWCR_WIN_BAR_MAP_BAR1 )
+	{
+		pAddrDecWin->targetBar = 1;
+	}
+	else if ((decRegs.sizeReg & PXWCR_WIN_BAR_MAP_MASK) ==
+			 PXWCR_WIN_BAR_MAP_BAR2 )
+	{
+		pAddrDecWin->targetBar = 2;
+	}
+
+	/* attrib and targetId */
+	pAddrDecWin->attrib = (decRegs.sizeReg & PXWCR_ATTRIB_MASK) >>
+													PXWCR_ATTRIB_OFFS;
+	pAddrDecWin->targetId = (decRegs.sizeReg & PXWCR_TARGET_MASK) >>
+													PXWCR_TARGET_OFFS;
+
+	targetAttrib.attrib = pAddrDecWin->attrib;
+	targetAttrib.targetId = pAddrDecWin->targetId;
+
+	pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+	return MV_OK;
+
+}
+
+
+/*******************************************************************************
+* mvPexTargetWinEnable - Enable/disable a PEX BAR window
+*
+* DESCRIPTION:
+*       This function enable/disable a PEX BAR window.
+*       if parameter 'enable' == MV_TRUE the routine will enable the
+*       window, thus enabling PEX accesses for that BAR (before enabling the
+*       window it is tested for overlapping). Otherwise, the window will
+*       be disabled.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*       bar    - BAR to be accessed by slave.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinEnable(MV_U32 pexIf,MV_U32 winNum, MV_BOOL enable)
+{
+	PEX_WIN_REG_INFO winRegInfo;
+	MV_PEX_DEC_WIN addrDecWin;
+
+	/* Parameter checking   */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexTargetWinEnable: ERR. Invalid PEX If %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	if (winNum >= PEX_MAX_TARGET_WIN)
+	{
+		mvOsPrintf("mvPexTargetWinEnable ERR. Invalid PEX winNum %d\n", winNum);
+		return MV_BAD_PARAM;
+
+	}
+
+
+	/* get the pex Window registers offsets */
+	pexWinRegInfoGet(pexIf,winNum,&winRegInfo);
+
+
+	/* if the address windows is disabled , we only disable the appropriare
+	pex window and ignore other settings */
+
+	if (MV_FALSE == enable)
+	{
+
+		/* this is not relevant to default and expantion rom
+		windows */
+		if (winRegInfo.sizeRegOffs)
+		{
+			if ((MV_PEX_WIN_DEFAULT != winNum)&&
+				(MV_PEX_WIN_EXP_ROM != winNum))
+			{
+				MV_REG_BIT_RESET(winRegInfo.sizeRegOffs, PXWCR_WIN_EN);
+			}
+		}
+
+	}
+	else
+	{
+		if (MV_OK != mvPexTargetWinGet(pexIf,winNum, &addrDecWin))
+		{
+			mvOsPrintf("mvPexTargetWinEnable: mvPexTargetWinGet Failed\n");
+			return MV_ERROR;
+		}
+
+		/* Check if the requested window overlaps with current windows	*/
+		if (MV_TRUE == pexWinOverlapDetect(pexIf,winNum, &addrDecWin.addrWin))
+		{
+			mvOsPrintf("mvPexTargetWinEnable: ERR. Target %d overlap\n", winNum);
+			return MV_BAD_PARAM;
+		}
+
+		if (MV_FALSE == pexIsWinWithinBar(pexIf,&addrDecWin.addrWin))
+		{
+			mvOsPrintf("mvPexTargetWinEnable: Win %d should be in bar boundries\n",
+					   winNum);
+			return MV_BAD_PARAM;
+		}
+
+
+		/* this is not relevant to default and expantion rom
+		windows */
+		if (winRegInfo.sizeRegOffs)
+		{
+			if ((MV_PEX_WIN_DEFAULT != winNum)&&
+				(MV_PEX_WIN_EXP_ROM != winNum))
+			{
+				MV_REG_BIT_SET(winRegInfo.sizeRegOffs, PXWCR_WIN_EN);
+			}
+		}
+
+
+	}
+
+	return MV_OK;
+
+}
+
+
+
+/*******************************************************************************
+* mvPexTargetWinRemap - Set PEX to target address window remap.
+*
+* DESCRIPTION:
+*       The PEX interface supports remap of the BAR original address window.
+*       For each BAR it is possible to define a remap address. For example
+*       an address 0x12345678 that hits BAR 0x10 (SDRAM CS[0]) will be modified
+*       according to remap register but will also be targeted to the
+*       SDRAM CS[0].
+*
+* INPUT:
+*       pexIf    - PEX interface number.
+*       bar      - Peripheral target enumerator accessed by slave.
+*       pAddrWin - Address window to be checked.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexTargetWinRemap(MV_U32 pexIf, MV_U32 winNum,
+                           MV_PEX_REMAP_WIN *pAddrWin)
+{
+
+	PEX_WIN_REG_INFO winRegInfo;
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX interface num %d\n",
+																		pexIf);
+		return MV_BAD_PARAM;
+	}
+	if (MV_PEX_WIN_DEFAULT == winNum)
+	{
+		mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX win num %d\n",
+																		winNum);
+		return MV_BAD_PARAM;
+
+	}
+
+	if (MV_IS_NOT_ALIGN(pAddrWin->addrWin.baseLow, PXWRR_REMAP_ALIGNMENT))
+	{
+		mvOsPrintf("mvPexTargetWinRemap: Error remap PEX interface %d win %d."\
+				   "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+				   pexIf,
+				   winNum,
+                   pAddrWin->addrWin.baseLow,
+				   pAddrWin->addrWin.size);
+
+		return MV_ERROR;
+	}
+
+	pexWinRegInfoGet(pexIf, winNum, &winRegInfo);
+
+	/* Set remap low register value */
+	MV_REG_WRITE(winRegInfo.remapLowRegOffs, pAddrWin->addrWin.baseLow);
+
+	/* Skip base high settings if the BAR has only base low (32-bit)		*/
+	if (0 != winRegInfo.remapHighRegOffs)
+	{
+		MV_REG_WRITE(winRegInfo.remapHighRegOffs, pAddrWin->addrWin.baseHigh);
+	}
+
+
+	if (pAddrWin->enable == MV_TRUE)
+	{
+		MV_REG_BIT_SET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+	}
+	else
+	{
+		MV_REG_BIT_RESET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexTargetWinRemapEnable -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPexTargetWinRemapEnable(MV_U32 pexIf, MV_U32 winNum,
+                           MV_BOOL enable)
+{
+	PEX_WIN_REG_INFO winRegInfo;
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX interface num %d\n",
+																		pexIf);
+		return MV_BAD_PARAM;
+	}
+	if (MV_PEX_WIN_DEFAULT == winNum)
+	{
+		mvOsPrintf("mvPexTargetWinRemap: ERR. Invalid PEX win num %d\n",
+																		winNum);
+		return MV_BAD_PARAM;
+
+	}
+
+
+	pexWinRegInfoGet(pexIf, winNum, &winRegInfo);
+
+	if (enable == MV_TRUE)
+	{
+		MV_REG_BIT_SET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+	}
+	else
+	{
+		MV_REG_BIT_RESET(winRegInfo.remapLowRegOffs,PXWRR_REMAP_EN);
+	}
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+*  mvPexBarSet - Set PEX bar address and size
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexBarSet(MV_U32 pexIf,
+						MV_U32 barNum,
+						MV_PEX_BAR *pAddrWin)
+{
+	MV_U32 regBaseLow;
+	MV_U32 regSize,sizeToReg;
+
+
+	/* check parameters */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexBarSet: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	if(barNum >= PEX_MAX_BARS)
+	{
+		mvOsPrintf("mvPexBarSet: ERR. Invalid bar number %d\n", barNum);
+		return MV_BAD_PARAM;
+	}
+
+
+	if (pAddrWin->addrWin.size == 0)
+	{
+		mvOsPrintf("mvPexBarSet: Size zero is Illigal\n" );
+		return MV_BAD_PARAM;
+	}
+
+
+	/* Check if the window complies with PEX spec							*/
+	if (MV_TRUE != pexBarIsValid(pAddrWin->addrWin.baseLow,
+								 pAddrWin->addrWin.size))
+	{
+        mvOsPrintf("mvPexBarSet: ERR. Target %d window invalid\n", barNum);
+		return MV_BAD_PARAM;
+	}
+
+    /* 2) Check if the requested bar overlaps with current bars		*/
+    if (MV_TRUE == pexBarOverlapDetect(pexIf,barNum, &pAddrWin->addrWin))
+	{
+        mvOsPrintf("mvPexBarSet: ERR. Target %d overlap\n", barNum);
+		return MV_BAD_PARAM;
+	}
+
+	/* Get size register value according to window size						*/
+	sizeToReg = ctrlSizeToReg(pAddrWin->addrWin.size, PXBCR_BAR_SIZE_ALIGNMENT);
+
+	/* Read bar size */
+	if (PEX_INTER_REGS_BAR != barNum) /* internal registers have no size */
+	{
+		regSize = MV_REG_READ(PEX_BAR_CTRL_REG(pexIf,barNum));
+
+		/* Size parameter validity check.                                   */
+		if (-1 == sizeToReg)
+		{
+			mvOsPrintf("mvPexBarSet: ERR. Target BAR %d size invalid.\n",barNum);
+			return MV_BAD_PARAM;
+		}
+
+		regSize &= ~PXBCR_BAR_SIZE_MASK;
+		regSize |= (sizeToReg << PXBCR_BAR_SIZE_OFFS) ;
+
+		MV_REG_WRITE(PEX_BAR_CTRL_REG(pexIf,barNum),regSize);
+
+	}
+
+	/* set size */
+
+
+
+	/* Read base address low */
+	regBaseLow = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,
+												   PEX_MV_BAR_BASE(barNum)));
+
+	/* clear current base */
+	if (PEX_INTER_REGS_BAR == barNum)
+	{
+		regBaseLow &= ~PXBIR_BASE_MASK;
+        regBaseLow |= (pAddrWin->addrWin.baseLow & PXBIR_BASE_MASK);
+	}
+	else
+	{
+		regBaseLow &= ~PXBR_BASE_MASK;
+		regBaseLow |= (pAddrWin->addrWin.baseLow & PXBR_BASE_MASK);
+	}
+
+	/* if we had a previous value that contain the bar type (MeM\IO), we want to
+	restore it */
+	regBaseLow |= PEX_BAR_DEFAULT_ATTRIB;
+
+
+
+	/* write base low */
+    MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE(barNum)),
+				regBaseLow);
+
+	if (pAddrWin->addrWin.baseHigh != 0)
+	{
+		/* Read base address high */
+		MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE_HIGH(barNum)),
+								 pAddrWin->addrWin.baseHigh);
+
+	}
+
+	/* lastly enable the Bar */
+	if (pAddrWin->enable == MV_TRUE)
+	{
+		if (PEX_INTER_REGS_BAR != barNum) /* internal registers
+												are enabled always */
+		{
+			MV_REG_BIT_SET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+		}
+	}
+	else if (MV_FALSE == pAddrWin->enable)
+	{
+		if (PEX_INTER_REGS_BAR != barNum) /* internal registers
+												are enabled always */
+		{
+			MV_REG_BIT_RESET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+		}
+
+	}
+
+
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+*  mvPexBarGet - Get PEX bar address and size
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPexBarGet(MV_U32 pexIf,
+								MV_U32 barNum,
+								MV_PEX_BAR *pAddrWin)
+{
+	/* check parameters */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexBarGet: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	if(barNum >= PEX_MAX_BARS)
+	{
+		mvOsPrintf("mvPexBarGet: ERR. Invalid bar number %d\n", barNum);
+		return MV_BAD_PARAM;
+	}
+
+	/* read base low */
+	pAddrWin->addrWin.baseLow =
+		MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE(barNum)));
+
+
+	if (PEX_INTER_REGS_BAR == barNum)
+	{
+		pAddrWin->addrWin.baseLow &= PXBIR_BASE_MASK;
+	}
+	else
+	{
+		pAddrWin->addrWin.baseLow &= PXBR_BASE_MASK;
+	}
+
+
+	/* read base high */
+	pAddrWin->addrWin.baseHigh =
+		MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_MV_BAR_BASE_HIGH(barNum)));
+
+
+	/* Read bar size */
+	if (PEX_INTER_REGS_BAR != barNum) /* internal registers have no size */
+	{
+		pAddrWin->addrWin.size = MV_REG_READ(PEX_BAR_CTRL_REG(pexIf,barNum));
+
+		/* check if enable or not */
+		if (pAddrWin->addrWin.size & PXBCR_BAR_EN)
+		{
+			pAddrWin->enable = MV_TRUE;
+		}
+		else
+		{
+			pAddrWin->enable = MV_FALSE;
+		}
+
+		/* now get the size */
+		pAddrWin->addrWin.size &= PXBCR_BAR_SIZE_MASK;
+		pAddrWin->addrWin.size >>= PXBCR_BAR_SIZE_OFFS;
+
+		pAddrWin->addrWin.size = ctrlRegToSize(pAddrWin->addrWin.size,
+											   PXBCR_BAR_SIZE_ALIGNMENT);
+
+	}
+	else /* PEX_INTER_REGS_BAR */
+	{
+		pAddrWin->addrWin.size = INTER_REGS_SIZE;
+		pAddrWin->enable = MV_TRUE;
+	}
+
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+*  mvPexBarEnable -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+
+MV_STATUS mvPexBarEnable(MV_U32 pexIf, MV_U32 barNum, MV_BOOL enable)
+{
+
+	MV_PEX_BAR pexBar;
+
+	/* check parameters */
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexBarEnable: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+
+	if(barNum >= PEX_MAX_BARS)
+	{
+		mvOsPrintf("mvPexBarEnable: ERR. Invalid bar number %d\n", barNum);
+		return MV_BAD_PARAM;
+	}
+
+    if (PEX_INTER_REGS_BAR == barNum)
+	{
+		if (MV_TRUE == enable)
+		{
+			return MV_OK;
+		}
+		else
+		{
+			return MV_ERROR;
+		}
+	}
+
+
+	if (MV_FALSE == enable)
+	{
+			/* disable bar and quit */
+			MV_REG_BIT_RESET(PEX_BAR_CTRL_REG(pexIf,barNum),PXBCR_BAR_EN);
+			return MV_OK;
+	}
+
+	/* else */
+
+	if (mvPexBarGet(pexIf,barNum,&pexBar) != MV_OK)
+	{
+		mvOsPrintf("mvPexBarEnable: mvPexBarGet Failed\n");
+		return MV_ERROR;
+
+	}
+
+	if (MV_TRUE == pexBar.enable)
+	{
+		/* it is already enabled !!! */
+		return MV_OK;
+	}
+
+	/* else enable the bar*/
+
+	pexBar.enable = MV_TRUE;
+
+	if (mvPexBarSet(pexIf,barNum,&pexBar) != MV_OK)
+	{
+		mvOsPrintf("mvPexBarEnable: mvPexBarSet Failed\n");
+		return MV_ERROR;
+
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* pexWinOverlapDetect - Detect address windows overlapping
+*
+* DESCRIPTION:
+*       This function detects address window overlapping of a given address
+*       window in PEX BARs.
+*
+* INPUT:
+*       pAddrWin - Address window to be checked.
+*       bar      - BAR to be accessed by slave.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexWinOverlapDetect(MV_U32 pexIf,
+									 MV_U32 winNum,
+									 MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32 		   win;
+	MV_PEX_DEC_WIN addrDecWin;
+
+
+	for(win = 0; win < PEX_MAX_TARGET_WIN -2 ; win++)
+    {
+        /* don't check our target or illegal targets */
+        if (winNum == win)
+        {
+            continue;
+        }
+
+		/* Get window parameters 	*/
+		if (MV_OK != mvPexTargetWinGet(pexIf, win, &addrDecWin))
+		{
+			mvOsPrintf("pexWinOverlapDetect: ERR. TargetWinGet failed win=%x\n",
+					   win);
+            return MV_ERROR;
+		}
+
+		/* Do not check disabled windows	*/
+		if (MV_FALSE == addrDecWin.enable)
+		{
+			continue;
+		}
+
+
+        if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+		{
+			mvOsPrintf("pexWinOverlapDetect: winNum %d overlap current %d\n",
+															winNum, win);
+			return MV_TRUE;
+		}
+    }
+
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* pexIsWinWithinBar - Detect if address is within PEX bar boundries
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexIsWinWithinBar(MV_U32 pexIf,
+								   MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32 		   bar;
+	MV_PEX_BAR addrDecWin;
+
+	for(bar = 0; bar < PEX_MAX_BARS; bar++)
+    {
+
+		/* Get window parameters 	*/
+		if (MV_OK != mvPexBarGet(pexIf, bar, &addrDecWin))
+		{
+			mvOsPrintf("pexIsWinWithinBar: ERR. mvPexBarGet failed\n");
+            return MV_ERROR;
+		}
+
+		/* Do not check disabled bars	*/
+		if (MV_FALSE == addrDecWin.enable)
+		{
+			continue;
+		}
+
+
+        if(MV_TRUE == ctrlWinWithinWinTest(pAddrWin, &addrDecWin.addrWin))
+		{
+			return MV_TRUE;
+		}
+    }
+
+	return MV_FALSE;
+
+}
+
+/*******************************************************************************
+* pexBarOverlapDetect - Detect address windows overlapping
+*
+* DESCRIPTION:
+*       This function detects address window overlapping of a given address
+*       window in PEX BARs.
+*
+* INPUT:
+*       pAddrWin - Address window to be checked.
+*       bar      - BAR to be accessed by slave.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL pexBarOverlapDetect(MV_U32 pexIf,
+									 MV_U32 barNum,
+									 MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32 		   bar;
+	MV_PEX_BAR barDecWin;
+
+
+	for(bar = 0; bar < PEX_MAX_BARS; bar++)
+    {
+        /* don't check our target or illegal targets */
+        if (barNum == bar)
+        {
+            continue;
+        }
+
+		/* Get window parameters 	*/
+		if (MV_OK != mvPexBarGet(pexIf, bar, &barDecWin))
+		{
+			mvOsPrintf("pexBarOverlapDetect: ERR. TargetWinGet failed\n");
+            return MV_ERROR;
+		}
+
+		/* don'nt check disabled bars */
+        if (barDecWin.enable == MV_FALSE)
+		{
+			continue;
+		}
+
+
+        if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &barDecWin.addrWin))
+		{
+			mvOsPrintf("pexBarOverlapDetect: winNum %d overlap current %d\n",
+															barNum, bar);
+			return MV_TRUE;
+		}
+    }
+
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* pexBarIsValid - Check if the given address window is valid
+*
+* DESCRIPTION:
+*		PEX spec restrict BAR base to be aligned to BAR size.
+*		This function checks if the given address window is valid.
+*
+* INPUT:
+*       baseLow - 32bit low base address.
+*       size    - Window size.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the address window is valid, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_STATUS pexBarIsValid(MV_U32 baseLow, MV_U32 size)
+{
+
+	/* PCI spec restrict BAR base to be aligned to BAR size					*/
+	if(MV_IS_NOT_ALIGN(baseLow, size))
+	{
+		return MV_ERROR;
+	}
+	else
+	{
+		return MV_TRUE;
+	}
+
+	return MV_TRUE;
+}
+
+/*******************************************************************************
+* pexBarRegInfoGet - Get BAR register information
+*
+* DESCRIPTION:
+* 		PEX BARs registers offsets are inconsecutive.
+*		This function gets a PEX BAR register information like register offsets
+*		and function location of the BAR.
+*
+* INPUT:
+*       pexIf - PEX interface number.
+*		bar	  - The PEX BAR in question.
+*
+* OUTPUT:
+*       pBarRegInfo - BAR register info struct.
+*
+* RETURN:
+*		MV_BAD_PARAM when bad parameters ,MV_ERROR on error ,othewise MV_OK
+*
+*******************************************************************************/
+static MV_STATUS pexWinRegInfoGet(MV_U32 pexIf,
+								  MV_U32 winNum,
+								  PEX_WIN_REG_INFO *pWinRegInfo)
+{
+
+	if ((winNum >= 0)&&(winNum <=3))
+	{
+		pWinRegInfo->baseLowRegOffs   = PEX_WIN0_3_BASE_REG(pexIf,winNum);
+		pWinRegInfo->baseHighRegOffs  = 0;
+		pWinRegInfo->sizeRegOffs      = PEX_WIN0_3_CTRL_REG(pexIf,winNum);
+		pWinRegInfo->remapLowRegOffs  = PEX_WIN0_3_REMAP_REG(pexIf,winNum);
+		pWinRegInfo->remapHighRegOffs = 0;
+	}
+	else if ((winNum >= 4)&&(winNum <=5))
+	{
+		pWinRegInfo->baseLowRegOffs   = PEX_WIN4_5_BASE_REG(pexIf,winNum);
+		pWinRegInfo->baseHighRegOffs  = 0;
+		pWinRegInfo->sizeRegOffs      = PEX_WIN4_5_CTRL_REG(pexIf,winNum);
+		pWinRegInfo->remapLowRegOffs  = PEX_WIN4_5_REMAP_REG(pexIf,winNum);
+		pWinRegInfo->remapHighRegOffs = PEX_WIN4_5_REMAP_HIGH_REG(pexIf,winNum);
+
+	}
+	else if (MV_PEX_WIN_DEFAULT == winNum)
+	{
+		pWinRegInfo->baseLowRegOffs   = 0;
+		pWinRegInfo->baseHighRegOffs  = 0;
+		pWinRegInfo->sizeRegOffs      = PEX_WIN_DEFAULT_CTRL_REG(pexIf);
+		pWinRegInfo->remapLowRegOffs  = 0;
+		pWinRegInfo->remapHighRegOffs = 0;
+	}
+	else if (MV_PEX_WIN_EXP_ROM == winNum)
+	{
+		pWinRegInfo->baseLowRegOffs   = 0;
+		pWinRegInfo->baseHighRegOffs  = 0;
+		pWinRegInfo->sizeRegOffs      = PEX_WIN_EXP_ROM_CTRL_REG(pexIf);
+		pWinRegInfo->remapLowRegOffs  = PEX_WIN_EXP_ROM_REMAP_REG(pexIf);
+		pWinRegInfo->remapHighRegOffs = 0;
+
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* pexBarNameGet - Get the string name of PEX BAR.
+*
+* DESCRIPTION:
+*		This function get the string name of PEX BAR.
+*
+* INPUT:
+*       bar - PEX bar number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       pointer to the string name of PEX BAR.
+*
+*******************************************************************************/
+const MV_8* pexBarNameGet( MV_U32 bar )
+{
+	switch( bar )
+	{
+		case PEX_INTER_REGS_BAR:
+			return "Internal Regs Bar0....";
+		case PEX_DRAM_BAR:
+			return "DRAM Bar1.............";
+		case PEX_DEVICE_BAR:
+			return "Devices Bar2..........";
+		default:
+			 return "Bar unknown";
+	}
+}
+/*******************************************************************************
+* mvPexAddrDecShow - Print the PEX address decode map (BARs and windows).
+*
+* DESCRIPTION:
+*		This function print the PEX address decode map (BARs and windows).
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvPexAddrDecShow(MV_VOID)
+{
+	MV_PEX_BAR pexBar;
+	MV_PEX_DEC_WIN win;
+	MV_U32 pexIf;
+	MV_U32 bar,winNum;
+
+	for( pexIf = 0; pexIf < mvCtrlPexMaxIfGet(); pexIf++ )
+	{
+		if (MV_FALSE == mvCtrlPwrClckGet(PEX_UNIT_ID, pexIf)) continue;
+		mvOsOutput( "\n" );
+		mvOsOutput( "PEX%d:\n", pexIf );
+		mvOsOutput( "-----\n" );
+
+		mvOsOutput( "\nPex Bars \n\n");
+
+		for( bar = 0; bar < PEX_MAX_BARS; bar++ )
+		{
+			memset( &pexBar, 0, sizeof(MV_PEX_BAR) );
+
+			mvOsOutput( "%s ", pexBarNameGet(bar) );
+
+			if( mvPexBarGet( pexIf, bar, &pexBar ) == MV_OK )
+			{
+				if( pexBar.enable )
+				{
+                    mvOsOutput( "base %08x, ", pexBar.addrWin.baseLow );
+                    mvSizePrint( pexBar.addrWin.size );
+                    mvOsOutput( "\n" );
+				}
+				else
+					mvOsOutput( "disable\n" );
+			}
+		}
+		mvOsOutput( "\nPex Decode Windows\n\n");
+
+		for( winNum = 0; winNum < PEX_MAX_TARGET_WIN - 2; winNum++)
+		{
+			memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+			mvOsOutput( "win%d - ", winNum );
+
+			if ( mvPexTargetWinGet(pexIf,winNum,&win) == MV_OK)
+			{
+				if (win.enable)
+				{
+					mvOsOutput( "%s base %08x, ",
+					mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+					mvOsOutput( "...." );
+					mvSizePrint( win.addrWin.size );
+
+					mvOsOutput( "\n" );
+				}
+				else
+					mvOsOutput( "disable\n" );
+
+
+			}
+		}
+
+		memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+		mvOsOutput( "default win - " );
+
+		if ( mvPexTargetWinGet(pexIf, MV_PEX_WIN_DEFAULT, &win) == MV_OK)
+		{
+			mvOsOutput( "%s ",
+			mvCtrlTargetNameGet(win.target) );
+			mvOsOutput( "\n" );
+		}
+		memset( &win, 0,sizeof(MV_PEX_DEC_WIN) );
+
+		mvOsOutput( "Expansion ROM - " );
+
+		if ( mvPexTargetWinGet(pexIf, MV_PEX_WIN_EXP_ROM, &win) == MV_OK)
+		{
+			mvOsOutput( "%s ",
+			mvCtrlTargetNameGet(win.target) );
+			mvOsOutput( "\n" );
+		}
+
+	}
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h
new file mode 100644
index 000000000000..dbe0ca5b9cf3
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysPex.h
@@ -0,0 +1,348 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCSysPEXH
+#define __INCSysPEXH
+
+#include "mvCommon.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+/* 4KB granularity */
+#define MINIMUM_WINDOW_SIZE     0x1000
+#define MINIMUM_BAR_SIZE        0x1000
+#define MINIMUM_BAR_SIZE_MASK	0xFFFFF000
+#define BAR_SIZE_OFFS			12
+#define BAR_SIZE_MASK			(0xFFFFF << BAR_SIZE_OFFS)
+
+
+
+#define MV_PEX_WIN_DEFAULT		6
+#define MV_PEX_WIN_EXP_ROM		7
+#define PEX_MAX_TARGET_WIN		8
+
+
+#define PEX_MAX_BARS			3
+#define PEX_INTER_REGS_BAR		0
+#define PEX_DRAM_BAR			1
+#define PEX_DEVICE_BAR			2
+
+/*************************************/
+/* PCI Express BAR Control Registers */
+/*************************************/
+#define PEX_BAR_CTRL_REG(pexIf,bar)		(0x41804 + (bar-1)*4- (pexIf)*0x10000)
+#define PEX_EXP_ROM_BAR_CTRL_REG(pexIf)	(0x4180C - (pexIf)*0x10000)
+
+
+/* PCI Express BAR Control Register */
+/* PEX_BAR_CTRL_REG (PXBCR) */
+
+#define PXBCR_BAR_EN				BIT0
+#define PXBCR_BAR_SIZE_OFFS			16
+#define PXBCR_BAR_SIZE_MASK			(0xffff << PXBCR_BAR_SIZE_OFFS)
+#define PXBCR_BAR_SIZE_ALIGNMENT	0x10000
+
+
+
+/* PCI Express Expansion ROM BAR Control Register */
+/* PEX_EXP_ROM_BAR_CTRL_REG (PXERBCR) */
+
+#define PXERBCR_EXPROM_EN			BIT0
+#define PXERBCR_EXPROMSZ_OFFS		19
+#define PXERBCR_EXPROMSZ_MASK		(0xf << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_512KB		(0x0 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_1024KB		(0x1 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_2048KB		(0x3 << PXERBCR_EXPROMSZ_OFFS)
+#define PXERBCR_EXPROMSZ_4096KB		(0x7 << PXERBCR_EXPROMSZ_OFFS)
+
+/************************************************/
+/* PCI Express Address Window Control Registers */
+/************************************************/
+#define PEX_WIN0_3_CTRL_REG(pexIf,winNum)       \
+                                (0x41820 + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN0_3_BASE_REG(pexIf,winNum)       \
+                                (0x41824 + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN0_3_REMAP_REG(pexIf,winNum)      \
+                                (0x4182C + (winNum) * 0x10 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_CTRL_REG(pexIf,winNum)       \
+                            (0x41860 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_BASE_REG(pexIf,winNum)       \
+                            (0x41864 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_REMAP_REG(pexIf,winNum)      \
+                            (0x4186C + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+#define PEX_WIN4_5_REMAP_HIGH_REG(pexIf,winNum) \
+                            (0x41870 + (winNum - 4) * 0x20 - (pexIf) * 0x10000)
+
+#define PEX_WIN_DEFAULT_CTRL_REG(pexIf)         (0x418B0 - (pexIf) * 0x10000)
+#define PEX_WIN_EXP_ROM_CTRL_REG(pexIf)         (0x418C0 - (pexIf) * 0x10000)
+#define PEX_WIN_EXP_ROM_REMAP_REG(pexIf)        (0x418C4 - (pexIf) * 0x10000)
+
+/* PCI Express Window Control Register */
+/* PEX_WIN_CTRL_REG (PXWCR) */
+
+#define	PXWCR_WIN_EN					BIT0 /* Window Enable.*/
+
+#define	PXWCR_WIN_BAR_MAP_OFFS			1    /* Mapping to BAR.*/
+#define	PXWCR_WIN_BAR_MAP_MASK			BIT1
+#define	PXWCR_WIN_BAR_MAP_BAR1			(0 << PXWCR_WIN_BAR_MAP_OFFS)
+#define	PXWCR_WIN_BAR_MAP_BAR2			(1 << PXWCR_WIN_BAR_MAP_OFFS)
+
+#define	PXWCR_TARGET_OFFS				4  /*Unit ID */
+#define	PXWCR_TARGET_MASK				(0xf << PXWCR_TARGET_OFFS)
+
+#define	PXWCR_ATTRIB_OFFS				8  /* target attributes */
+#define	PXWCR_ATTRIB_MASK				(0xff << PXWCR_ATTRIB_OFFS)
+
+#define	PXWCR_SIZE_OFFS					16 /* size */
+#define	PXWCR_SIZE_MASK					(0xffff << PXWCR_SIZE_OFFS)
+#define	PXWCR_SIZE_ALIGNMENT			0x10000
+
+/* PCI Express Window Base Register */
+/* PEX_WIN_BASE_REG (PXWBR)*/
+
+#define PXWBR_BASE_OFFS					16 /* address[31:16] */
+#define PXWBR_BASE_MASK					(0xffff << PXWBR_BASE_OFFS)
+#define PXWBR_BASE_ALIGNMENT			0x10000
+
+/* PCI Express Window Remap Register */
+/* PEX_WIN_REMAP_REG (PXWRR)*/
+
+#define PXWRR_REMAP_EN					BIT0
+#define PXWRR_REMAP_OFFS				16
+#define PXWRR_REMAP_MASK				(0xffff << PXWRR_REMAP_OFFS)
+#define PXWRR_REMAP_ALIGNMENT			0x10000
+
+/* PCI Express Window Remap (High) Register */
+/* PEX_WIN_REMAP_HIGH_REG (PXWRHR)*/
+
+#define PXWRHR_REMAP_HIGH_OFFS			0
+#define PXWRHR_REMAP_HIGH_MASK			(0xffffffff << PXWRHR_REMAP_HIGH_OFFS)
+
+/* PCI Express Default Window Control Register */
+/* PEX_WIN_DEFAULT_CTRL_REG (PXWDCR) */
+
+#define	PXWDCR_TARGET_OFFS				4  /*Unit ID */
+#define	PXWDCR_TARGET_MASK				(0xf << PXWDCR_TARGET_OFFS)
+#define	PXWDCR_ATTRIB_OFFS				8  /* target attributes */
+#define	PXWDCR_ATTRIB_MASK				(0xff << PXWDCR_ATTRIB_OFFS)
+
+/* PCI Express Expansion ROM Window Control Register */
+/* PEX_WIN_EXP_ROM_CTRL_REG (PXWERCR)*/
+
+#define	PXWERCR_TARGET_OFFS				4  /*Unit ID */
+#define	PXWERCR_TARGET_MASK				(0xf << PXWERCR_TARGET_OFFS)
+#define	PXWERCR_ATTRIB_OFFS				8  /* target attributes */
+#define	PXWERCR_ATTRIB_MASK				(0xff << PXWERCR_ATTRIB_OFFS)
+
+/* PCI Express Expansion ROM Window Remap Register */
+/* PEX_WIN_EXP_ROM_REMAP_REG (PXWERRR)*/
+
+#define PXWERRR_REMAP_EN				BIT0
+#define PXWERRR_REMAP_OFFS				16
+#define PXWERRR_REMAP_MASK				(0xffff << PXWERRR_REMAP_OFFS)
+#define PXWERRR_REMAP_ALIGNMENT			0x10000
+
+
+
+/*PEX_MEMORY_BAR_BASE_ADDR(barNum) (PXMBBA)*/
+/* PCI Express BAR0 Internal Register*/
+/*PEX BAR0_INTER_REG (PXBIR)*/
+
+#define PXBIR_IOSPACE			BIT0	/* Memory Space Indicator */
+
+#define PXBIR_TYPE_OFFS			1	   /* BAR Type/Init Val. */
+#define PXBIR_TYPE_MASK			(0x3 << PXBIR_TYPE_OFFS)
+#define PXBIR_TYPE_32BIT_ADDR	(0x0 << PXBIR_TYPE_OFFS)
+#define PXBIR_TYPE_64BIT_ADDR	(0x2 << PXBIR_TYPE_OFFS)
+
+#define PXBIR_PREFETCH_EN		BIT3 	/* Prefetch Enable */
+
+#define PXBIR_BASE_OFFS		20		/* Base address. Address bits [31:20] */
+#define PXBIR_BASE_MASK		(0xfff << PXBIR_BASE_OFFS)
+#define PXBIR_BASE_ALIGNMET	(1 << PXBIR_BASE_OFFS)
+
+
+/* PCI Express BAR0 Internal (High) Register*/
+/*PEX BAR0_INTER_REG_HIGH (PXBIRH)*/
+
+#define PXBIRH_BASE_OFFS			0		/* Base address. Bits [63:32] */
+#define PXBIRH_BASE_MASK			(0xffffffff << PBBHR_BASE_OFFS)
+
+
+#define PEX_BAR_DEFAULT_ATTRIB		0xc /* Memory - Prefetch - 64 bit address */
+#define PEX_BAR0_DEFAULT_ATTRIB	    PEX_BAR_DEFAULT_ATTRIB
+#define PEX_BAR1_DEFAULT_ATTRIB		PEX_BAR_DEFAULT_ATTRIB
+#define PEX_BAR2_DEFAULT_ATTRIB		PEX_BAR_DEFAULT_ATTRIB
+
+
+/* PCI Express BAR1 Register */
+/*  PCI Express BAR2 Register*/
+/*PEX BAR1_REG (PXBR)*/
+/*PEX BAR2_REG (PXBR)*/
+
+#define PXBR_IOSPACE			BIT0	/* Memory Space Indicator */
+
+#define PXBR_TYPE_OFFS			1	   /* BAR Type/Init Val. */
+#define PXBR_TYPE_MASK			(0x3 << PXBR_TYPE_OFFS)
+#define PXBR_TYPE_32BIT_ADDR	(0x0 << PXBR_TYPE_OFFS)
+#define PXBR_TYPE_64BIT_ADDR	(0x2 << PXBR_TYPE_OFFS)
+
+#define PXBR_PREFETCH_EN		BIT3 	/* Prefetch Enable */
+
+#define PXBR_BASE_OFFS		16		/* Base address. Address bits [31:16] */
+#define PXBR_BASE_MASK		(0xffff << PXBR_BASE_OFFS)
+#define PXBR_BASE_ALIGNMET	(1 << PXBR_BASE_OFFS)
+
+
+/* PCI Express BAR1 (High) Register*/
+/* PCI Express BAR2 (High) Register*/
+/*PEX BAR1_REG_HIGH (PXBRH)*/
+/*PEX BAR2_REG_HIGH (PXBRH)*/
+
+#define PXBRH_BASE_OFFS			0		/* Base address. Address bits [63:32] */
+#define PXBRH_BASE_MASK			(0xffffffff << PXBRH_BASE_OFFS)
+
+/* PCI Express Expansion ROM BAR Register*/
+/*PEX_EXPANSION_ROM_BASE_ADDR_REG (PXERBAR)*/
+
+#define PXERBAR_EXPROMEN		BIT0	/* Expansion ROM Enable */
+
+#define PXERBAR_BASE_512K_OFFS		19		/* Expansion ROM Base Address */
+#define PXERBAR_BASE_512K_MASK		(0x1fff << PXERBAR_BASE_512K_OFFS)
+
+#define PXERBAR_BASE_1MB_OFFS		20		/* Expansion ROM Base Address */
+#define PXERBAR_BASE_1MB_MASK		(0xfff << PXERBAR_BASE_1MB_OFFS)
+
+#define PXERBAR_BASE_2MB_OFFS		21		/* Expansion ROM Base Address */
+#define PXERBAR_BASE_2MB_MASK		(0x7ff << PXERBAR_BASE_2MB_OFFS)
+
+#define PXERBAR_BASE_4MB_OFFS		22		/* Expansion ROM Base Address */
+#define PXERBAR_BASE_4MB_MASK		(0x3ff << PXERBAR_BASE_4MB_OFFS)
+
+/* PEX Bar attributes */
+typedef struct _mvPexBar
+{
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+}MV_PEX_BAR;
+
+/* PEX Remap Window attributes */
+typedef struct _mvPexRemapWin
+{
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+}MV_PEX_REMAP_WIN;
+
+/* PEX Remap Window attributes */
+typedef struct _mvPexDecWin
+{
+	MV_TARGET	  target;
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_U32		  targetBar;
+	MV_U8			attrib;			/* chip select attributes */
+	MV_TARGET_ID 	targetId; 		/* Target Id of this MV_TARGET */
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+}MV_PEX_DEC_WIN;
+
+/* Global Functions prototypes */
+/* mvPexHalInit - Initialize PEX interfaces*/
+MV_STATUS mvPexInit(MV_U32 pexIf, MV_PEX_TYPE pexType);
+
+
+/* mvPexTargetWinSet - Set PEX to peripheral target address window BAR*/
+MV_STATUS mvPexTargetWinSet(MV_U32 pexIf, MV_U32 winNum,
+                            MV_PEX_DEC_WIN *pAddrDecWin);
+
+/* mvPexTargetWinGet - Get PEX to peripheral target address window*/
+MV_STATUS mvPexTargetWinGet(MV_U32 pexIf, MV_U32 winNum,
+                            MV_PEX_DEC_WIN *pAddrDecWin);
+
+/* mvPexTargetWinEnable - Enable/disable a PEX BAR window*/
+MV_STATUS mvPexTargetWinEnable(MV_U32 pexIf,MV_U32 winNum, MV_BOOL enable);
+
+/* mvPexTargetWinRemap - Set PEX to target address window remap.*/
+MV_STATUS mvPexTargetWinRemap(MV_U32 pexIf, MV_U32 winNum,
+                           MV_PEX_REMAP_WIN *pAddrWin);
+
+/* mvPexTargetWinRemapEnable -enable\disable a PEX Window remap.*/
+MV_STATUS mvPexTargetWinRemapEnable(MV_U32 pexIf, MV_U32 winNum,
+                           MV_BOOL enable);
+
+/* mvPexBarSet - Set PEX bar address and size */
+MV_STATUS mvPexBarSet(MV_U32 pexIf, MV_U32 barNum, MV_PEX_BAR *addrWin);
+
+/* mvPexBarGet - Get PEX bar address and size */
+MV_STATUS mvPexBarGet(MV_U32 pexIf, MV_U32 barNum, MV_PEX_BAR *addrWin);
+
+/* mvPexBarEnable - enable\disable a PEX bar*/
+MV_STATUS mvPexBarEnable(MV_U32 pexIf, MV_U32 barNum, MV_BOOL enable);
+
+/* mvPexAddrDecShow - Display address decode windows attributes */
+MV_VOID mvPexAddrDecShow(MV_VOID);
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c
new file mode 100644
index 000000000000..eed8cbaff058
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.c
@@ -0,0 +1,427 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "sata/CoreDriver/mvRegs.h"
+#include "ctrlEnv/sys/mvSysSata.h"
+
+MV_TARGET sataAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+    SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+    SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+    SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+    SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+	PEX0_MEM,
+#endif
+	TBL_TERM
+};
+
+
+/*******************************************************************************
+* sataWinOverlapDetect - Detect SATA address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case SATA address decode
+*       windows overlapps.
+*       This function detects SATA address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS sataWinOverlapDetect(int dev, MV_U32 winNum,
+				      MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32          winNumIndex;
+    MV_SATA_DEC_WIN  addrDecWin;
+
+    for(winNumIndex=0; winNumIndex<MV_SATA_MAX_ADDR_DECODE_WIN; winNumIndex++)
+    {
+        /* Do not check window itself       */
+        if (winNumIndex == winNum)
+        {
+            continue;
+        }
+
+        /* Get window parameters    */
+        if (MV_OK != mvSataWinGet(dev, winNumIndex, &addrDecWin))
+        {
+            mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+            return MV_ERROR;
+        }
+
+        /* Do not check disabled windows    */
+        if(addrDecWin.enable == MV_FALSE)
+        {
+            continue;
+        }
+
+        if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+        {
+            return MV_TRUE;
+        }
+    }
+    return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvSataWinSet - Set SATA target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the SATA will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - SATA target address decode window number.
+*       pAddrDecWin - SATA target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinSet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin)
+{
+    MV_TARGET_ATTRIB    targetAttribs;
+    MV_DEC_REGS         decRegs;
+
+    /* Parameter checking   */
+    if (winNum >= MV_SATA_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows         */
+    if (MV_TRUE == sataWinOverlapDetect(dev, winNum, &pAddrDecWin->addrWin))
+    {
+        mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+        return MV_ERROR;
+    }
+
+    /* check if address is aligned to the size */
+    if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+    {
+	mvOsPrintf("mvSataWinSet:Error setting SATA window %d to "\
+		   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+		   winNum,
+		   mvCtrlTargetNameGet(pAddrDecWin->target),
+		   pAddrDecWin->addrWin.baseLow,
+		   pAddrDecWin->addrWin.size);
+	return MV_ERROR;
+    }
+
+    decRegs.baseReg = 0;
+    decRegs.sizeReg = 0;
+
+    if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+    {
+        mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+    /* set attributes */
+    decRegs.sizeReg &= ~MV_SATA_WIN_ATTR_MASK;
+    decRegs.sizeReg |= (targetAttribs.attrib << MV_SATA_WIN_ATTR_OFFSET);
+
+    /* set target ID */
+    decRegs.sizeReg &= ~MV_SATA_WIN_TARGET_MASK;
+    decRegs.sizeReg |= (targetAttribs.targetId << MV_SATA_WIN_TARGET_OFFSET);
+
+    if (pAddrDecWin->enable == MV_TRUE)
+    {
+        decRegs.sizeReg |= MV_SATA_WIN_ENABLE_MASK;
+    }
+    else
+    {
+        decRegs.sizeReg &= ~MV_SATA_WIN_ENABLE_MASK;
+    }
+
+    MV_REG_WRITE( MV_SATA_WIN_CTRL_REG(dev, winNum), decRegs.sizeReg);
+    MV_REG_WRITE( MV_SATA_WIN_BASE_REG(dev, winNum), decRegs.baseReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSataWinGet - Get SATA peripheral target address window.
+*
+* DESCRIPTION:
+*       Get SATA peripheral target address window.
+*
+* INPUT:
+*       winNum - SATA target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - SATA target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinGet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS         decRegs;
+    MV_TARGET_ATTRIB    targetAttrib;
+
+    /* Parameter checking   */
+    if (winNum >= MV_SATA_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+                    __FUNCTION__, dev, winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    decRegs.baseReg = MV_REG_READ( MV_SATA_WIN_BASE_REG(dev, winNum) );
+    decRegs.sizeReg = MV_REG_READ( MV_SATA_WIN_CTRL_REG(dev, winNum) );
+
+    if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+    {
+        mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    /* attrib and targetId */
+    targetAttrib.attrib = (decRegs.sizeReg & MV_SATA_WIN_ATTR_MASK) >>
+		MV_SATA_WIN_ATTR_OFFSET;
+    targetAttrib.targetId = (decRegs.sizeReg & MV_SATA_WIN_TARGET_MASK) >>
+		MV_SATA_WIN_TARGET_OFFSET;
+
+    pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+    /* Check if window is enabled   */
+    if(decRegs.sizeReg & MV_SATA_WIN_ENABLE_MASK)
+    {
+        pAddrDecWin->enable = MV_TRUE;
+    }
+    else
+    {
+        pAddrDecWin->enable = MV_FALSE;
+    }
+    return MV_OK;
+}
+/*******************************************************************************
+* mvSataAddrDecShow - Print the SATA address decode map.
+*
+* DESCRIPTION:
+*		This function print the SATA address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvSataAddrDecShow(MV_VOID)
+{
+
+	MV_SATA_DEC_WIN win;
+	int i,j;
+
+
+
+    for( j = 0; j < MV_SATA_MAX_CHAN; j++ )
+    {
+	if (MV_FALSE == mvCtrlPwrClckGet(SATA_UNIT_ID, j))
+		return;
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "SATA %d:\n", j );
+	mvOsOutput( "----\n" );
+
+	for( i = 0; i < MV_SATA_MAX_ADDR_DECODE_WIN; i++ )
+	{
+            memset( &win, 0, sizeof(MV_SATA_DEC_WIN) );
+
+	    mvOsOutput( "win%d - ", i );
+
+	    if( mvSataWinGet(j, i, &win ) == MV_OK )
+	    {
+	        if( win.enable )
+	        {
+                    mvOsOutput( "%s base %08x, ",
+                    mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+                    mvOsOutput( "...." );
+
+                    mvSizePrint( win.addrWin.size );
+
+		    mvOsOutput( "\n" );
+                }
+		else
+		mvOsOutput( "disable\n" );
+	    }
+	}
+    }
+}
+
+
+/*******************************************************************************
+* mvSataWinInit - Initialize the integrated SATA target address window.
+*
+* DESCRIPTION:
+*       Initialize the SATA peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSataWinInit(MV_VOID)
+{
+    int             winNum;
+    MV_SATA_DEC_WIN  sataWin;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    MV_U32          status, winPrioIndex = 0;
+
+    /* Initiate Sata address decode */
+
+    /* First disable all address decode windows */
+    for(winNum = 0; winNum < MV_SATA_MAX_ADDR_DECODE_WIN; winNum++)
+    {
+        MV_U32  regVal = MV_REG_READ(MV_SATA_WIN_CTRL_REG(0, winNum));
+        regVal &= ~MV_SATA_WIN_ENABLE_MASK;
+        MV_REG_WRITE(MV_SATA_WIN_CTRL_REG(0, winNum), regVal);
+    }
+
+    winNum = 0;
+    while( (sataAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+           (winNum < MV_SATA_MAX_ADDR_DECODE_WIN) )
+    {
+        /* first get attributes from CPU If */
+        status = mvCpuIfTargetWinGet(sataAddrDecPrioTab[winPrioIndex],
+                                     &cpuAddrDecWin);
+
+        if(MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+	if (MV_OK != status)
+	{
+            mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+	    return MV_ERROR;
+	}
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+        {
+            sataWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+            sataWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+            sataWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+            sataWin.enable           = MV_TRUE;
+            sataWin.target           = sataAddrDecPrioTab[winPrioIndex];
+
+            if(MV_OK != mvSataWinSet(0/*dev*/, winNum, &sataWin))
+            {
+                return MV_ERROR;
+            }
+            winNum++;
+        }
+        winPrioIndex++;
+    }
+    return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h
new file mode 100644
index 000000000000..0ee55de19a72
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSata.h
@@ -0,0 +1,123 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysSataAddrDech
+#define __INCMVSysSataAddrDech
+
+#include "mvCommon.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _mvSataDecWin
+{
+    MV_TARGET     target;
+    MV_ADDR_WIN   addrWin;    /* An address window*/
+    MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+} MV_SATA_DEC_WIN;
+
+
+#define MV_SATA_MAX_ADDR_DECODE_WIN 4
+
+#define MV_SATA_WIN_CTRL_REG(dev, win)        (SATA_REG_BASE + 0x30 + ((win)<<4))
+#define MV_SATA_WIN_BASE_REG(dev, win)        (SATA_REG_BASE + 0x34 + ((win)<<4))
+
+/* BITs in Bridge Interrupt Cause and Mask registers */
+#define MV_SATA_ADDR_DECODE_ERROR_BIT        0
+#define MV_SATA_ADDR_DECODE_ERROR_MASK       (1<<MV_SATA_ADDR_DECODE_ERROR_BIT)
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_SATA_WIN_ENABLE_BIT               0
+#define MV_SATA_WIN_ENABLE_MASK              (1<<MV_SATA_WIN_ENABLE_BIT)
+
+#define MV_SATA_WIN_TARGET_OFFSET            4
+#define MV_SATA_WIN_TARGET_MASK              (0xF<<MV_SATA_WIN_TARGET_OFFSET)
+
+#define MV_SATA_WIN_ATTR_OFFSET              8
+#define MV_SATA_WIN_ATTR_MASK                (0xFF<<MV_SATA_WIN_ATTR_OFFSET)
+
+#define MV_SATA_WIN_SIZE_OFFSET              16
+#define MV_SATA_WIN_SIZE_MASK                (0xFFFF<<MV_SATA_WIN_SIZE_OFFSET)
+
+#define MV_SATA_WIN_BASE_OFFSET              16
+#define MV_SATA_WIN_BASE_MASK                (0xFFFF<<MV_SATA_WIN_BASE_OFFSET)
+
+MV_STATUS mvSataWinGet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinSet(int dev, MV_U32 winNum, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinByTargetGet(MV_TARGET target, MV_SATA_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSataWinInit(MV_VOID);
+MV_VOID   mvSataAddrDecShow(MV_VOID);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c
new file mode 100644
index 000000000000..e61d5e050e08
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.c
@@ -0,0 +1,424 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "cpu/mvCpu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "mvRegs.h"
+#include "ctrlEnv/sys/mvSysSdmmc.h"
+
+MV_TARGET sdmmcAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+    SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+    SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+    SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+    SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+	PEX0_MEM,
+#endif
+	TBL_TERM
+};
+
+
+/*******************************************************************************
+* sdmmcWinOverlapDetect - Detect SDMMC address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case SDMMC address decode
+*       windows overlapps.
+*       This function detects SDMMC address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS sdmmcWinOverlapDetect(int dev, MV_U32 winNum,
+				      MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32          winNumIndex;
+    MV_SDMMC_DEC_WIN  addrDecWin;
+
+    for(winNumIndex=0; winNumIndex<MV_SDMMC_MAX_ADDR_DECODE_WIN; winNumIndex++)
+    {
+        /* Do not check window itself       */
+        if (winNumIndex == winNum)
+        {
+            continue;
+        }
+
+        /* Get window parameters    */
+        if (MV_OK != mvSdmmcWinGet(dev, winNumIndex, &addrDecWin))
+        {
+            mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+            return MV_ERROR;
+        }
+
+        /* Do not check disabled windows    */
+        if(addrDecWin.enable == MV_FALSE)
+        {
+            continue;
+        }
+
+        if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+        {
+            return MV_TRUE;
+        }
+    }
+    return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvSdmmcWinSet - Set SDMMC target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the SDMMC will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - SDMMC target address decode window number.
+*       pAddrDecWin - SDMMC target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinSet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin)
+{
+    MV_TARGET_ATTRIB    targetAttribs;
+    MV_DEC_REGS         decRegs;
+
+    /* Parameter checking   */
+    if (winNum >= MV_SDMMC_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows         */
+    if (MV_TRUE == sdmmcWinOverlapDetect(dev, winNum, &pAddrDecWin->addrWin))
+    {
+        mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+        return MV_ERROR;
+    }
+
+    /* check if address is aligned to the size */
+    if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+    {
+	mvOsPrintf("mvSdmmcWinSet:Error setting SDMMC window %d to "\
+		   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+		   winNum,
+		   mvCtrlTargetNameGet(pAddrDecWin->target),
+		   pAddrDecWin->addrWin.baseLow,
+		   pAddrDecWin->addrWin.size);
+	return MV_ERROR;
+    }
+
+    decRegs.baseReg = 0;
+    decRegs.sizeReg = 0;
+
+    if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+    {
+        mvOsPrintf("%s: mvCtrlAddrDecToReg Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+    /* set attributes */
+    decRegs.sizeReg &= ~MV_SDMMC_WIN_ATTR_MASK;
+    decRegs.sizeReg |= (targetAttribs.attrib << MV_SDMMC_WIN_ATTR_OFFSET);
+
+    /* set target ID */
+    decRegs.sizeReg &= ~MV_SDMMC_WIN_TARGET_MASK;
+    decRegs.sizeReg |= (targetAttribs.targetId << MV_SDMMC_WIN_TARGET_OFFSET);
+
+    if (pAddrDecWin->enable == MV_TRUE)
+    {
+        decRegs.sizeReg |= MV_SDMMC_WIN_ENABLE_MASK;
+    }
+    else
+    {
+        decRegs.sizeReg &= ~MV_SDMMC_WIN_ENABLE_MASK;
+    }
+
+    MV_REG_WRITE( MV_SDMMC_WIN_CTRL_REG(dev, winNum), decRegs.sizeReg);
+    MV_REG_WRITE( MV_SDMMC_WIN_BASE_REG(dev, winNum), decRegs.baseReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSdmmcWinGet - Get SDMMC peripheral target address window.
+*
+* DESCRIPTION:
+*       Get SDMMC peripheral target address window.
+*
+* INPUT:
+*       winNum - SDMMC target address decode window number.
+*d
+* OUTPUT:
+*       pAddrDecWin - SDMMC target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinGet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS         decRegs;
+    MV_TARGET_ATTRIB    targetAttrib;
+
+    /* Parameter checking   */
+    if (winNum >= MV_SDMMC_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+                    __FUNCTION__, dev, winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    decRegs.baseReg = MV_REG_READ( MV_SDMMC_WIN_BASE_REG(dev, winNum) );
+    decRegs.sizeReg = MV_REG_READ( MV_SDMMC_WIN_CTRL_REG(dev, winNum) );
+
+    if (MV_OK != mvCtrlRegToAddrDec(&decRegs, &pAddrDecWin->addrWin) )
+    {
+        mvOsPrintf("%s: mvCtrlRegToAddrDec Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    /* attrib and targetId */
+    targetAttrib.attrib = (decRegs.sizeReg & MV_SDMMC_WIN_ATTR_MASK) >>
+		MV_SDMMC_WIN_ATTR_OFFSET;
+    targetAttrib.targetId = (decRegs.sizeReg & MV_SDMMC_WIN_TARGET_MASK) >>
+		MV_SDMMC_WIN_TARGET_OFFSET;
+
+    pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+    /* Check if window is enabled   */
+    if(decRegs.sizeReg & MV_SDMMC_WIN_ENABLE_MASK)
+    {
+        pAddrDecWin->enable = MV_TRUE;
+    }
+    else
+    {
+        pAddrDecWin->enable = MV_FALSE;
+    }
+    return MV_OK;
+}
+/*******************************************************************************
+* mvSdmmcAddrDecShow - Print the SDMMC address decode map.
+*
+* DESCRIPTION:
+*		This function print the SDMMC address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvSdmmcAddrDecShow(MV_VOID)
+{
+
+	MV_SDMMC_DEC_WIN win;
+	int i,j=0;
+
+
+
+	if (MV_FALSE == mvCtrlPwrClckGet(SDIO_UNIT_ID, 0))
+		return;
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "SDMMC %d:\n", j );
+	mvOsOutput( "----\n" );
+
+	for( i = 0; i < MV_SDMMC_MAX_ADDR_DECODE_WIN; i++ )
+	{
+            memset( &win, 0, sizeof(MV_SDMMC_DEC_WIN) );
+
+	    mvOsOutput( "win%d - ", i );
+
+	    if( mvSdmmcWinGet(j, i, &win ) == MV_OK )
+	    {
+	        if( win.enable )
+	        {
+                    mvOsOutput( "%s base %08x, ",
+                    mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+                    mvOsOutput( "...." );
+
+                    mvSizePrint( win.addrWin.size );
+
+		    mvOsOutput( "\n" );
+                }
+		else
+		mvOsOutput( "disable\n" );
+	    }
+	}
+}
+
+
+/*******************************************************************************
+* mvSdmmcWinInit - Initialize the integrated SDMMC target address window.
+*
+* DESCRIPTION:
+*       Initialize the SDMMC peripheral target address window.
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvSdmmcWinInit(MV_VOID)
+{
+    int             winNum;
+    MV_SDMMC_DEC_WIN  sdmmcWin;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    MV_U32          status, winPrioIndex = 0;
+
+    /* Initiate Sdmmc address decode */
+
+    /* First disable all address decode windows */
+    for(winNum = 0; winNum < MV_SDMMC_MAX_ADDR_DECODE_WIN; winNum++)
+    {
+        MV_U32  regVal = MV_REG_READ(MV_SDMMC_WIN_CTRL_REG(0, winNum));
+        regVal &= ~MV_SDMMC_WIN_ENABLE_MASK;
+        MV_REG_WRITE(MV_SDMMC_WIN_CTRL_REG(0, winNum), regVal);
+    }
+
+    winNum = 0;
+    while( (sdmmcAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+           (winNum < MV_SDMMC_MAX_ADDR_DECODE_WIN) )
+    {
+        /* first get attributes from CPU If */
+        status = mvCpuIfTargetWinGet(sdmmcAddrDecPrioTab[winPrioIndex],
+                                     &cpuAddrDecWin);
+
+        if(MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+	if (MV_OK != status)
+	{
+            mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+	    return MV_ERROR;
+	}
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+        {
+            sdmmcWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+            sdmmcWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+            sdmmcWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+            sdmmcWin.enable           = MV_TRUE;
+            sdmmcWin.target           = sdmmcAddrDecPrioTab[winPrioIndex];
+
+            if(MV_OK != mvSdmmcWinSet(0/*dev*/, winNum, &sdmmcWin))
+            {
+                return MV_ERROR;
+            }
+            winNum++;
+        }
+        winPrioIndex++;
+    }
+    return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h
new file mode 100644
index 000000000000..d52d19e543d0
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysSdmmc.h
@@ -0,0 +1,120 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCMVSysSdmmcAddrDech
+#define __INCMVSysSdmmcAddrDech
+
+#include "mvCommon.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _mvSdmmcDecWin
+{
+    MV_TARGET     target;
+    MV_ADDR_WIN   addrWin;    /* An address window*/
+    MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+
+} MV_SDMMC_DEC_WIN;
+
+
+#define MV_SDMMC_MAX_ADDR_DECODE_WIN 4
+
+#define MV_SDMMC_WIN_CTRL_REG(dev, win)        (MV_SDIO_REG_BASE + 0x108 + ((win)<<3))
+#define MV_SDMMC_WIN_BASE_REG(dev, win)        (MV_SDIO_REG_BASE + 0x10c + ((win)<<3))
+
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_SDMMC_WIN_ENABLE_BIT               0
+#define MV_SDMMC_WIN_ENABLE_MASK              (1<<MV_SDMMC_WIN_ENABLE_BIT)
+
+#define MV_SDMMC_WIN_TARGET_OFFSET            4
+#define MV_SDMMC_WIN_TARGET_MASK              (0xF<<MV_SDMMC_WIN_TARGET_OFFSET)
+
+#define MV_SDMMC_WIN_ATTR_OFFSET              8
+#define MV_SDMMC_WIN_ATTR_MASK                (0xFF<<MV_SDMMC_WIN_ATTR_OFFSET)
+
+#define MV_SDMMC_WIN_SIZE_OFFSET              16
+#define MV_SDMMC_WIN_SIZE_MASK                (0xFFFF<<MV_SDMMC_WIN_SIZE_OFFSET)
+
+#define MV_SDMMC_WIN_BASE_OFFSET              16
+#define MV_SDMMC_WIN_BASE_MASK                (0xFFFF<<MV_SDMMC_WIN_BASE_OFFSET)
+
+MV_STATUS mvSdmmcWinGet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinSet(int dev, MV_U32 winNum, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinByTargetGet(MV_TARGET target, MV_SDMMC_DEC_WIN *pAddrDecWin);
+MV_STATUS mvSdmmcWinInit(MV_VOID);
+MV_VOID   mvSdmmcAddrDecShow(MV_VOID);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c
new file mode 100644
index 000000000000..2f56e60bd184
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.c
@@ -0,0 +1,461 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvSysTdm.h"
+
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+static MV_TARGET tdmAddrDecPrioTap[] =
+{
+        PEX0_MEM,
+        SDRAM_CS0,
+        SDRAM_CS1,
+        SDRAM_CS2,
+        SDRAM_CS3,
+        DEVICE_CS0,
+        DEVICE_CS1,
+        DEVICE_CS2,
+        DEV_BOOCS,
+        PEX0_IO,
+        TBL_TERM
+};
+
+static MV_STATUS tdmWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvTdmWinInit - Initialize TDM address decode windows
+*
+* DESCRIPTION:
+*               This function initialize TDM window decode unit. It set the
+*               default address decode
+*               windows of the unit.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if setting fail.
+*******************************************************************************/
+
+MV_STATUS mvTdmWinInit(void)
+{
+	MV_U32 		winNum;
+	MV_U32		winPrioIndex = 0;
+	MV_CPU_DEC_WIN cpuAddrDecWin;
+	MV_TDM_DEC_WIN tdmWin;
+	MV_STATUS status;
+
+	/*Disable all windows*/
+	for (winNum = 0; winNum < TDM_MBUS_MAX_WIN; winNum++)
+	{
+		mvTdmWinEnable(winNum, MV_FALSE);
+	}
+
+	for (winNum = 0; ((tdmAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+					  (winNum < TDM_MBUS_MAX_WIN)); )
+	{
+		status = mvCpuIfTargetWinGet(tdmAddrDecPrioTap[winPrioIndex],
+									 &cpuAddrDecWin);
+        if (MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+		if (MV_OK != status)
+		{
+			mvOsPrintf("mvTdmInit: ERR. mvCpuIfTargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+		{
+			tdmWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+			tdmWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+			tdmWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+			tdmWin.enable = MV_TRUE;
+		    tdmWin.target = tdmAddrDecPrioTap[winPrioIndex];
+		    if (MV_OK != mvTdmWinSet(winNum, &tdmWin))
+		    {
+			    return MV_ERROR;
+		    }
+		    winNum++;
+		}
+		winPrioIndex++;
+    }
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinSet - Set TDM target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the TDM will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - TDM to target address decode window number.
+*       pAddrDecWin - TDM target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+
+MV_STATUS mvTdmWinSet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin)
+{
+	MV_TARGET_ATTRIB targetAttribs;
+	MV_DEC_REGS decRegs;
+	MV_U32 ctrlReg = 0;
+
+    /* Parameter checking   */
+    if (winNum >= TDM_MBUS_MAX_WIN)
+    {
+		mvOsPrintf("mvTdmWinSet: ERR. Invalid win num %d\n",winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows         */
+    if (MV_TRUE == tdmWinOverlapDetect(winNum, &pAddrDecWin->addrWin))
+	{
+	mvOsPrintf("mvTdmWinSet: ERR. Window %d overlap\n", winNum);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if (MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvTdmWinSet: Error setting TDM window %d to "\
+				   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+				   winNum,
+				   mvCtrlTargetNameGet(pAddrDecWin->target),
+				   pAddrDecWin->addrWin.baseLow,
+				   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	decRegs.baseReg = MV_REG_READ(TDM_WIN_BASE_REG(winNum));
+	decRegs.sizeReg = (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_SIZE_MASK) >>  TDM_WIN_SIZE_OFFS;
+
+	if (MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+	{
+			mvOsPrintf("mvTdmWinSet: mvCtrlAddrDecToReg Failed\n");
+			return MV_ERROR;
+	}
+
+	mvCtrlAttribGet(pAddrDecWin->target, &targetAttribs);
+
+	/* for the safe side we disable the window before writing the new
+	values */
+	mvTdmWinEnable(winNum, MV_FALSE);
+
+	ctrlReg |= (targetAttribs.attrib << TDM_WIN_ATTRIB_OFFS);
+	ctrlReg |= (targetAttribs.targetId << TDM_WIN_TARGET_OFFS);
+	ctrlReg |= (decRegs.sizeReg & TDM_WIN_SIZE_MASK);
+
+	/* Write to address base and control registers  */
+	MV_REG_WRITE(TDM_WIN_BASE_REG(winNum), decRegs.baseReg);
+	MV_REG_WRITE(TDM_WIN_CTRL_REG(winNum), ctrlReg);
+	/* Enable address decode target window  */
+	if (pAddrDecWin->enable == MV_TRUE)
+	{
+		mvTdmWinEnable(winNum, MV_TRUE);
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinGet - Get peripheral target address window.
+*
+* DESCRIPTION:
+*               Get TDM peripheral target address window.
+*
+* INPUT:
+*       winNum - TDM to target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - TDM target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+
+MV_STATUS mvTdmWinGet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin)
+{
+
+	MV_DEC_REGS decRegs;
+	MV_TARGET_ATTRIB targetAttrib;
+
+	/* Parameter checking   */
+	if (winNum >= TDM_MBUS_MAX_WIN)
+	{
+		mvOsPrintf("mvTdmWinGet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	decRegs.baseReg =  MV_REG_READ(TDM_WIN_BASE_REG(winNum));
+	decRegs.sizeReg = (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_SIZE_MASK) >>  TDM_WIN_SIZE_OFFS;
+
+	if (MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+	{
+		mvOsPrintf("mvTdmWinGet: mvCtrlRegToAddrDec Failed \n");
+		return MV_ERROR;
+	}
+
+	/* attrib and targetId */
+	targetAttrib.attrib =
+		(MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ATTRIB_MASK) >>  TDM_WIN_ATTRIB_OFFS;
+	targetAttrib.targetId =
+		(MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_TARGET_MASK) >>  TDM_WIN_TARGET_OFFS;
+
+	pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+	/* Check if window is enabled   */
+	if (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ENABLE_MASK)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvTdmWinEnable - Enable/disable a TDM to target address window
+*
+* DESCRIPTION:
+*       This function enable/disable a TDM to target address window.
+*       According to parameter 'enable' the routine will enable the
+*       window, thus enabling TDM accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - TDM to target address decode window number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvTdmWinEnable(int winNum, MV_BOOL enable)
+{
+	MV_TDM_DEC_WIN addrDecWin;
+
+	if (MV_TRUE == enable)
+	{
+		if (winNum >= TDM_MBUS_MAX_WIN)
+		{
+			mvOsPrintf("mvTdmWinEnable:ERR. Invalid winNum%d\n",winNum);
+			return MV_ERROR;
+		}
+
+		/* First check for overlap with other enabled windows				*/
+		/* Get current window */
+		if (MV_OK != mvTdmWinGet(winNum, &addrDecWin))
+		{
+			mvOsPrintf("mvTdmWinEnable:ERR. targetWinGet fail\n");
+			return MV_ERROR;
+		}
+		/* Check for overlapping */
+		if (MV_FALSE == tdmWinOverlapDetect(winNum, &(addrDecWin.addrWin)))
+		{
+			/* No Overlap. Enable address decode target window */
+			MV_REG_BIT_SET(TDM_WIN_CTRL_REG(winNum), TDM_WIN_ENABLE_MASK);
+		}
+		else
+		{   /* Overlap detected	*/
+			mvOsPrintf("mvTdmWinEnable:ERR. Overlap detected\n");
+			return MV_ERROR;
+		}
+	}
+	else
+	{
+		MV_REG_BIT_RESET(TDM_WIN_CTRL_REG(winNum), TDM_WIN_ENABLE_MASK);
+	}
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* tdmWinOverlapDetect - Detect TDM address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviour is expected in case TDM address decode
+*       windows overlapps.
+*       This function detects TDM address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS tdmWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32      	winNumIndex;
+    MV_TDM_DEC_WIN  	addrDecWin;
+
+    for (winNumIndex = 0; winNumIndex < TDM_MBUS_MAX_WIN; winNumIndex++)
+    {
+		/* Do not check window itself		*/
+        if (winNumIndex == winNum)
+		{
+			continue;
+		}
+		/* Do not check disabled windows	*/
+		if (MV_REG_READ(TDM_WIN_CTRL_REG(winNum)) & TDM_WIN_ENABLE_MASK)
+		{
+			/* Get window parameters 	*/
+			if (MV_OK != mvTdmWinGet(winNumIndex, &addrDecWin))
+			{
+				DB(mvOsPrintf("dmaWinOverlapDetect: ERR. TargetWinGet failed\n"));
+			return MV_ERROR;
+			}
+
+			if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+			{
+				return MV_TRUE;
+			}
+		}
+    }
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvTdmAddrDecShow - Print the TDM address decode map.
+*
+* DESCRIPTION:
+*       This function print the TDM address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvTdmAddrDecShow(MV_VOID)
+{
+	MV_TDM_DEC_WIN win;
+	int i;
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "TDM:\n" );
+	mvOsOutput( "----\n" );
+
+	for( i = 0; i < TDM_MBUS_MAX_WIN; i++ )
+	{
+		memset( &win, 0, sizeof(MV_TDM_DEC_WIN) );
+
+		mvOsOutput( "win%d - ", i );
+
+		if (mvTdmWinGet(i, &win ) == MV_OK )
+		{
+			if( win.enable )
+			{
+                mvOsOutput( "%s base %08x, ",
+                mvCtrlTargetNameGet(win.target), win.addrWin.baseLow);
+                mvOsOutput( "...." );
+                mvSizePrint( win.addrWin.size );
+				mvOsOutput( "\n" );
+			}
+			else
+				mvOsOutput( "disable\n" );
+		}
+	}
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h
new file mode 100644
index 000000000000..88810ff188f9
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTdm.h
@@ -0,0 +1,105 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysTdmh
+#define __INCmvSysTdmh
+
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+typedef struct _mvTdmDecWin
+{
+        MV_TARGET     target;
+        MV_ADDR_WIN   addrWin; /* An address window*/
+        MV_BOOL       enable;  /* Address decode window is enabled/disabled */
+} MV_TDM_DEC_WIN;
+
+MV_STATUS mvTdmWinInit(MV_VOID);
+MV_STATUS mvTdmWinSet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTdmWinGet(MV_U32 winNum, MV_TDM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTdmWinEnable(int winNum, MV_BOOL enable);
+MV_VOID mvTdmAddrDecShow(MV_VOID);
+
+
+#define TDM_MBUS_MAX_WIN	4
+#define TDM_WIN_CTRL_REG(win)	((TDM_REG_BASE + 0x4030) + (win<<4))
+#define TDM_WIN_BASE_REG(win)	((TDM_REG_BASE +0x4034) + (win<<4))
+
+/* TDM_WIN_CTRL_REG bits */
+#define TDM_WIN_ENABLE_OFFS	0
+#define TDM_WIN_ENABLE_MASK	(1<<TDM_WIN_ENABLE_OFFS)
+#define TDM_WIN_ENABLE		1
+#define TDM_WIN_TARGET_OFFS	4
+#define TDM_WIN_TARGET_MASK	(0xf<<TDM_WIN_TARGET_OFFS)
+#define TDM_WIN_ATTRIB_OFFS	8
+#define TDM_WIN_ATTRIB_MASK	(0xff<<TDM_WIN_ATTRIB_OFFS)
+#define TDM_WIN_SIZE_OFFS	16
+#define TDM_WIN_SIZE_MASK	(0xffff<<TDM_WIN_SIZE_OFFS)
+
+/* TDM_WIN_BASE_REG bits */
+#define TDM_BASE_OFFS		16
+#define TDM_BASE_MASK		(0xffff<<TDM_BASE_OFFS)
+
+#endif /*__INCmvSysTdmh*/
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c
new file mode 100644
index 000000000000..1e58a60b39bf
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.c
@@ -0,0 +1,591 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "ctrlEnv/sys/mvSysTs.h"
+
+
+typedef struct _mvTsuDecWin
+{
+        MV_TARGET     target;
+        MV_ADDR_WIN   addrWin; /* An address window*/
+        MV_BOOL       enable;  /* Address decode window is enabled/disabled */
+
+}MV_TSU_DEC_WIN;
+
+
+MV_TARGET tsuAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_PEX)
+        PEX0_MEM,
+#endif
+#if defined(MV_INCLUDE_PCI)
+        PCI0_MEM,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS0)
+        SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+        SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+        SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+        SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS0)
+        DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+        DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+        DEVICE_CS2,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+        DEVICE_CS3,
+#endif
+#if defined(MV_INCLUDE_PEX)
+        PEX0_IO,
+#endif
+#if defined(MV_INCLUDE_PCI)
+        PCI0_IO,
+#endif
+        TBL_TERM
+};
+
+static MV_STATUS tsuWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+static MV_STATUS mvTsuWinSet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin);
+static MV_STATUS mvTsuWinGet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin);
+MV_STATUS mvTsuWinEnable(MV_U32 winNum,MV_BOOL enable);
+
+/*******************************************************************************
+* mvTsuWinInit
+*
+* DESCRIPTION:
+* 	Initialize the TSU unit address decode windows.
+*
+* INPUT:
+*       None.
+* OUTPUT:
+*	None.
+* RETURN:
+*       MV_OK	- on success,
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinInit(void)
+{
+	MV_U32          winNum, status, winPrioIndex=0;
+	MV_TSU_DEC_WIN  tsuWin;
+	MV_CPU_DEC_WIN  cpuAddrDecWin;
+
+	/* First disable all address decode windows */
+	for(winNum = 0; winNum < TSU_MAX_DECODE_WIN; winNum++)
+	{
+		MV_REG_BIT_RESET(MV_TSU_WIN_CTRL_REG(winNum),
+				 TSU_WIN_CTRL_EN_MASK);
+	}
+
+	/* Go through all windows in user table until table terminator      */
+	for(winNum = 0; ((tsuAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+			 (winNum < TSU_MAX_DECODE_WIN));)
+	{
+		/* first get attributes from CPU If */
+		status = mvCpuIfTargetWinGet(tsuAddrDecPrioTap[winPrioIndex],
+					     &cpuAddrDecWin);
+
+		if(MV_NO_SUCH == status)
+		{
+			winPrioIndex++;
+			continue;
+		}
+		if(MV_OK != status)
+		{
+			mvOsPrintf("mvTsuWinInit: ERR. mvCpuIfTargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		if (cpuAddrDecWin.enable == MV_TRUE)
+		{
+			tsuWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+			tsuWin.addrWin.baseLow = cpuAddrDecWin.addrWin.baseLow;
+			tsuWin.addrWin.size = cpuAddrDecWin.addrWin.size;
+			tsuWin.enable = MV_TRUE;
+			tsuWin.target = tsuAddrDecPrioTap[winPrioIndex];
+
+			if(MV_OK != mvTsuWinSet(winNum, &tsuWin))
+			{
+				mvOsPrintf("mvTsuWinInit: ERR. mvTsuWinSet failed winNum=%d\n",
+					   winNum);
+				return MV_ERROR;
+			}
+			winNum++;
+		}
+		winPrioIndex ++;
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinSet
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the TSU will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - TSU to target address decode window number.
+*       pAddrDecWin - TSU target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR	- if address window overlapps with other address decode
+*			windows.
+*       MV_BAD_PARAM	- if base address is invalid parameter or target is
+*       		unknown.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinSet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin)
+{
+	MV_TARGET_ATTRIB    targetAttribs;
+	MV_DEC_REGS         decRegs;
+
+	/* Parameter checking   */
+	if(winNum >= TSU_MAX_DECODE_WIN)
+	{
+		mvOsPrintf("mvTsuWinSet: ERR. Invalid win num %d\n",winNum);
+		return MV_BAD_PARAM;
+	}
+
+	/* Check if the requested window overlapps with current windows     */
+	if(MV_TRUE == tsuWinOverlapDetect(winNum, &pAddrDecWin->addrWin))
+	{
+		mvOsPrintf("mvTsuWinSet: ERR. Window %d overlap\n", winNum);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow,pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvTsuWinSet: Error setting TSU window %d to target "
+			   "%s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+			   winNum, mvCtrlTargetNameGet(pAddrDecWin->target),
+			   pAddrDecWin->addrWin.baseLow,
+			   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	decRegs.baseReg = MV_REG_READ(MV_TSU_WIN_BASE_REG(winNum));
+	decRegs.sizeReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum));
+
+	if(MV_OK != mvCtrlAddrDecToReg(&(pAddrDecWin->addrWin),&decRegs))
+	{
+		mvOsPrintf("mvTsuWinSet: mvCtrlAddrDecToReg Failed\n");
+		return MV_ERROR;
+	}
+
+	mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+	/* set attributes */
+	decRegs.sizeReg &= ~TSU_WIN_CTRL_ATTR_MASK;
+	decRegs.sizeReg |= targetAttribs.attrib << TSU_WIN_CTRL_ATTR_OFFS;
+	/* set target ID */
+	decRegs.sizeReg &= ~TSU_WIN_CTRL_TARGET_MASK;
+	decRegs.sizeReg |= targetAttribs.targetId << TSU_WIN_CTRL_TARGET_OFFS;
+
+	/* for the safe side we disable the window before writing the new */
+	/* values */
+	mvTsuWinEnable(winNum, MV_FALSE);
+	MV_REG_WRITE(MV_TSU_WIN_CTRL_REG(winNum),decRegs.sizeReg);
+
+	/* Write to address decode Size Register                            */
+	MV_REG_WRITE(MV_TSU_WIN_BASE_REG(winNum), decRegs.baseReg);
+
+	/* Enable address decode target window                              */
+	if(pAddrDecWin->enable == MV_TRUE)
+	{
+		mvTsuWinEnable(winNum,MV_TRUE);
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinGet
+*
+* DESCRIPTION:
+*	Get TSU peripheral target address window.
+*
+* INPUT:
+*	winNum - TSU to target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - TSU target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinGet(MV_U32 winNum, MV_TSU_DEC_WIN *pAddrDecWin)
+{
+	MV_DEC_REGS decRegs;
+	MV_TARGET_ATTRIB targetAttrib;
+
+	/* Parameter checking   */
+	if(winNum >= TSU_MAX_DECODE_WIN)
+	{
+		mvOsPrintf("mvTsuWinGet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	decRegs.baseReg = MV_REG_READ(MV_TSU_WIN_BASE_REG(winNum));
+	decRegs.sizeReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum));
+
+	if(MV_OK != mvCtrlRegToAddrDec(&decRegs,&(pAddrDecWin->addrWin)))
+	{
+		mvOsPrintf("mvTsuWinGet: mvCtrlRegToAddrDec Failed \n");
+		return MV_ERROR;
+	}
+
+	/* attrib and targetId */
+	targetAttrib.attrib =
+		(decRegs.sizeReg & TSU_WIN_CTRL_ATTR_MASK) >> TSU_WIN_CTRL_ATTR_OFFS;
+	targetAttrib.targetId =
+		(decRegs.sizeReg & TSU_WIN_CTRL_TARGET_MASK) >> TSU_WIN_CTRL_TARGET_OFFS;
+
+	pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+	/* Check if window is enabled   */
+	if((MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNum)) & TSU_WIN_CTRL_EN_MASK))
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvTsuWinEnable
+*
+* DESCRIPTION:
+*       This function enable/disable a TSU to target address window.
+*       According to parameter 'enable' the routine will enable the
+*       window, thus enabling TSU accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - TSU to target address decode window number.
+*       enable - Enable / disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvTsuWinEnable(MV_U32 winNum,MV_BOOL enable)
+{
+	MV_TSU_DEC_WIN addrDecWin;
+
+	/* Parameter checking   */
+	if(winNum >= TSU_MAX_DECODE_WIN)
+	{
+		mvOsPrintf("mvTsuWinEnable: ERR. Invalid winNum%d\n",winNum);
+		return MV_ERROR;
+	}
+
+	if(enable == MV_TRUE)
+	{
+		/* First check for overlap with other enabled windows   */
+		/* Get current window.					*/
+		if(MV_OK != mvTsuWinGet(winNum,&addrDecWin))
+		{
+			mvOsPrintf("mvTsuWinEnable: ERR. targetWinGet fail\n");
+			return MV_ERROR;
+		}
+		/* Check for overlapping.	*/
+		if(MV_FALSE == tsuWinOverlapDetect(winNum,&(addrDecWin.addrWin)))
+		{
+			/* No Overlap. Enable address decode target window   */
+			MV_REG_BIT_SET(MV_TSU_WIN_CTRL_REG(winNum),
+				       TSU_WIN_CTRL_EN_MASK);
+		}
+		else
+		{
+			/* Overlap detected */
+			mvOsPrintf("mvTsuWinEnable: ERR. Overlap detected\n");
+			return MV_ERROR;
+		}
+	}
+	else
+	{
+		/* Disable address decode target window */
+		MV_REG_BIT_RESET(MV_TSU_WIN_CTRL_REG(winNum),
+				 TSU_WIN_CTRL_EN_MASK);
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvTsuWinTargetGet
+*
+* DESCRIPTION:
+*	Get Window number associated with target
+*
+* INPUT:
+*	target	- Target ID to get the window number for.
+* OUTPUT:
+*
+* RETURN:
+*       window number or 0xFFFFFFFF on error.
+*
+*******************************************************************************/
+MV_U32  mvTsuWinTargetGet(MV_TARGET target)
+{
+	MV_TSU_DEC_WIN decWin;
+	MV_U32 winNum;
+
+	/* Check parameters */
+	if(target >= MAX_TARGETS)
+	{
+		mvOsPrintf("mvTsuWinTargetGet: target %d is Illigal\n", target);
+		return 0xffffffff;
+	}
+
+	for(winNum = 0; winNum < TSU_MAX_DECODE_WIN; winNum++)
+	{
+		if(mvTsuWinGet(winNum,&decWin) != MV_OK)
+		{
+			mvOsPrintf("mvTsuWinGet: window returned error\n");
+			return 0xffffffff;
+		}
+
+		if (decWin.enable == MV_TRUE)
+		{
+			if(decWin.target == target)
+			{
+				return winNum;
+			}
+		}
+	}
+	return 0xFFFFFFFF;
+}
+
+
+/*******************************************************************************
+* tsuWinOverlapDetect
+*
+* DESCRIPTION:
+*	Detect TSU address windows overlapping
+*	An unpredicted behaviur is expected in case TSU address decode
+*	windows overlapps.
+*	This function detects TSU address decode windows overlapping of a
+*	specified window. The function does not check the window itself for
+*	overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS tsuWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+	MV_U32              ctrlReg;
+	MV_U32              winNumIndex;
+	MV_TSU_DEC_WIN      addrDecWin;
+
+	for(winNumIndex = 0; winNumIndex < TSU_MAX_DECODE_WIN; winNumIndex++)
+	{
+		/* Do not check window itself           */
+		if(winNumIndex == winNum)
+		{
+			continue;
+		}
+
+		/* Do not check disabled windows        */
+		ctrlReg = MV_REG_READ(MV_TSU_WIN_CTRL_REG(winNumIndex));
+		if((ctrlReg & TSU_WIN_CTRL_EN_MASK) == 0)
+		{
+			continue;
+		}
+
+		/* Get window parameters        */
+		if (MV_OK != mvTsuWinGet(winNumIndex, &addrDecWin))
+		{
+			mvOsPrintf("tsuWinOverlapDetect: ERR. mvTsuWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+		{
+			return MV_TRUE;
+		}
+	}
+	return MV_FALSE;
+}
+
+
+/*******************************************************************************
+* mvTsuAddrDecShow
+*
+* DESCRIPTION:
+*	Print the TSU address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+void mvTsuAddrDecShow(void)
+{
+	MV_TSU_DEC_WIN  win;
+	int             i;
+
+	if (MV_FALSE == mvCtrlPwrClckGet(TS_UNIT_ID, 0))
+		return;
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "TSU:\n");
+	mvOsOutput( "----\n" );
+
+	for(i = 0; i < TSU_MAX_DECODE_WIN; i++)
+	{
+		memset(&win, 0, sizeof(TSU_MAX_DECODE_WIN));
+		mvOsOutput( "win%d - ", i );
+
+		if(mvTsuWinGet(i, &win ) == MV_OK )
+		{
+			if(win.enable == MV_TRUE)
+			{
+				mvOsOutput("%s base %08x, ",
+					   mvCtrlTargetNameGet(win.target),
+					   win.addrWin.baseLow);
+				mvOsOutput( "...." );
+				mvSizePrint(win.addrWin.size );
+				mvOsOutput( "\n" );
+			}
+			else
+			{
+				mvOsOutput( "disable\n" );
+			}
+		}
+	}
+	return;
+}
+
+
+/*******************************************************************************
+* mvTsuInit
+*
+* DESCRIPTION:
+* 	Initialize the TSU unit, and get unit out of reset.
+*
+* INPUT:
+*       coreClock	- The core clock at which the TSU should operate.
+*       mode		- The mode on configure the unit into (serial/parallel).
+* 	memHandle	- Memory handle used for memory allocations.
+* OUTPUT:
+*	None.
+* RETURN:
+*       MV_OK	- on success,
+*
+*******************************************************************************/
+MV_STATUS mvTsuInit(MV_TSU_CORE_CLOCK coreClock, MV_TSU_PORTS_MODE mode,
+	            void *osHandle)
+{
+	MV_STATUS status;
+
+	status = mvTsuWinInit();
+	if(status == MV_OK)
+		status = mvTsuHalInit(coreClock,mode,osHandle);
+
+	return status;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h
new file mode 100644
index 000000000000..1478b09cec02
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysTs.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysTsh
+#define __INCmvSysTsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "ts/mvTsu.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define TSU_MAX_DECODE_WIN	4
+
+
+/*******************************************/
+/* TSU Windows Registers                   */
+/*******************************************/
+#define MV_TSU_WIN_CTRL_REG(win)	(TSU_GLOBAL_REG_BASE +0x30 + 0x10 * win)
+#define MV_TSU_WIN_BASE_REG(win)	(TSU_GLOBAL_REG_BASE +0x34 + 0x10 * win)
+
+/* TSU windows control register.		*/
+#define TSU_WIN_CTRL_EN_MASK		(0x1 << 0)
+#define TSU_WIN_CTRL_TARGET_OFFS	4
+#define TSU_WIN_CTRL_TARGET_MASK	(0xF << TSU_WIN_CTRL_TARGET_OFFS)
+#define TSU_WIN_CTRL_ATTR_OFFS		8
+#define TSU_WIN_CTRL_ATTR_MASK		(0xFF << TSU_WIN_CTRL_ATTR_OFFS)
+#define TSU_WIN_CTRL_SIZE_OFFS		16
+#define TSU_WIN_CTRL_SIZE_MASK		(0xFFFF << TSU_WIN_CTRL_SIZE_OFFS)
+
+/* TSU windows base register.			*/
+#define TSU_WIN_BASE_OFFS		16
+#define TSU_WIN_BASE_MASK		(0xFFFF << TSU_WIN_BASE_OFFS)
+
+MV_STATUS mvTsuWinInit(void);
+
+void mvTsuAddrDecShow(void);
+MV_STATUS mvTsuInit(MV_TSU_CORE_CLOCK coreClock, MV_TSU_PORTS_MODE mode,
+	            void *osHandle);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTsh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c
new file mode 100644
index 000000000000..0e552e672155
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.c
@@ -0,0 +1,495 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ctrlEnv/sys/mvSysUsb.h"
+
+MV_TARGET usbAddrDecPrioTab[] =
+{
+#if defined(MV_INCLUDE_SDRAM_CS0)
+    SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+    SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+    SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+    SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_CESA) && defined(USB_UNDERRUN_WA)
+    CRYPT_ENG,
+#endif
+#if defined(MV_INCLUDE_PEX)
+    PEX0_MEM,
+#endif
+    TBL_TERM
+};
+
+
+
+MV_STATUS   mvUsbInit(int dev, MV_BOOL isHost)
+{
+    MV_STATUS       status;
+
+    status = mvUsbWinInit(dev);
+    if(status != MV_OK)
+        return status;
+
+    return mvUsbHalInit(dev, isHost);
+}
+
+
+/*******************************************************************************
+* usbWinOverlapDetect - Detect USB address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case USB address decode
+*       windows overlapps.
+*       This function detects USB address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS usbWinOverlapDetect(int dev, MV_U32 winNum,
+                                     MV_ADDR_WIN *pAddrWin)
+{
+    MV_U32          winNumIndex;
+    MV_DEC_WIN      addrDecWin;
+
+    for(winNumIndex=0; winNumIndex<MV_USB_MAX_ADDR_DECODE_WIN; winNumIndex++)
+    {
+        /* Do not check window itself       */
+        if (winNumIndex == winNum)
+        {
+            continue;
+        }
+
+        /* Get window parameters    */
+        if (MV_OK != mvUsbWinGet(dev, winNumIndex, &addrDecWin))
+        {
+            mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__);
+            return MV_ERROR;
+        }
+
+        /* Do not check disabled windows    */
+        if(addrDecWin.enable == MV_FALSE)
+        {
+            continue;
+        }
+
+        if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+        {
+            return MV_TRUE;
+        }
+    }
+    return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvUsbWinSet - Set USB target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window, also known as address decode window.
+*       After setting this target window, the USB will be able to access the
+*       target within the address window.
+*
+* INPUT:
+*       winNum      - USB target address decode window number.
+*       pAddrDecWin - USB target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if address window overlapps with other address decode windows.
+*       MV_BAD_PARAM if base address is invalid parameter or target is
+*       unknown.
+*
+*******************************************************************************/
+MV_STATUS mvUsbWinSet(int dev, MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+    MV_DEC_WIN_PARAMS   winParams;
+    MV_U32              sizeReg, baseReg;
+
+    /* Parameter checking   */
+    if (winNum >= MV_USB_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the requested window overlapps with current windows         */
+    if (MV_TRUE == usbWinOverlapDetect(dev, winNum, &pDecWin->addrWin))
+    {
+        mvOsPrintf("%s: ERR. Window %d overlap\n", __FUNCTION__, winNum);
+        return MV_ERROR;
+    }
+
+    /* check if address is aligned to the size */
+    if(MV_IS_NOT_ALIGN(pDecWin->addrWin.baseLow, pDecWin->addrWin.size))
+    {
+        mvOsPrintf("mvUsbWinSet:Error setting USB window %d to "\
+                   "target %s.\nAddress 0x%08x is unaligned to size 0x%x.\n",
+                   winNum,
+                   mvCtrlTargetNameGet(pDecWin->target),
+                   pDecWin->addrWin.baseLow,
+                   pDecWin->addrWin.size);
+        return MV_ERROR;
+    }
+
+    if(MV_OK != mvCtrlAddrDecToParams(pDecWin, &winParams))
+    {
+        mvOsPrintf("%s: mvCtrlAddrDecToParams Failed\n", __FUNCTION__);
+        return MV_ERROR;
+    }
+
+    /* set Size, Attributes and TargetID */
+    sizeReg = (((winParams.targetId << MV_USB_WIN_TARGET_OFFSET) & MV_USB_WIN_TARGET_MASK) |
+               ((winParams.attrib   << MV_USB_WIN_ATTR_OFFSET)   & MV_USB_WIN_ATTR_MASK)   |
+               ((winParams.size << MV_USB_WIN_SIZE_OFFSET) & MV_USB_WIN_SIZE_MASK));
+
+#if defined(MV645xx) || defined(MV646xx)
+    /* If window is DRAM with HW cache coherency, make sure bit2 is set */
+    sizeReg &= ~MV_USB_WIN_BURST_WR_LIMIT_MASK;
+
+    if((MV_TARGET_IS_DRAM(pDecWin->target)) &&
+       (pDecWin->addrWinAttr.cachePolicy != NO_COHERENCY))
+    {
+        sizeReg |= MV_USB_WIN_BURST_WR_32BIT_LIMIT;
+    }
+    else
+    {
+        sizeReg |= MV_USB_WIN_BURST_WR_NO_LIMIT;
+    }
+#endif /* MV645xx || MV646xx */
+
+    if (pDecWin->enable == MV_TRUE)
+    {
+        sizeReg |= MV_USB_WIN_ENABLE_MASK;
+    }
+    else
+    {
+        sizeReg &= ~MV_USB_WIN_ENABLE_MASK;
+    }
+
+    /* Update Base value  */
+    baseReg = (winParams.baseAddr & MV_USB_WIN_BASE_MASK);
+
+    MV_REG_WRITE( MV_USB_WIN_CTRL_REG(dev, winNum), sizeReg);
+    MV_REG_WRITE( MV_USB_WIN_BASE_REG(dev, winNum), baseReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbWinGet - Get USB peripheral target address window.
+*
+* DESCRIPTION:
+*       Get USB peripheral target address window.
+*
+* INPUT:
+*       winNum - USB target address decode window number.
+*
+* OUTPUT:
+*       pDecWin - USB target window data structure.
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS mvUsbWinGet(int dev, MV_U32 winNum, MV_DEC_WIN *pDecWin)
+{
+    MV_DEC_WIN_PARAMS   winParam;
+    MV_U32              sizeReg, baseReg;
+
+    /* Parameter checking   */
+    if (winNum >= MV_USB_MAX_ADDR_DECODE_WIN)
+    {
+        mvOsPrintf("%s (dev=%d): ERR. Invalid winNum %d\n",
+                    __FUNCTION__, dev, winNum);
+        return MV_NOT_SUPPORTED;
+    }
+
+    baseReg = MV_REG_READ( MV_USB_WIN_BASE_REG(dev, winNum) );
+    sizeReg = MV_REG_READ( MV_USB_WIN_CTRL_REG(dev, winNum) );
+
+   /* Check if window is enabled   */
+    if(sizeReg & MV_USB_WIN_ENABLE_MASK)
+    {
+        pDecWin->enable = MV_TRUE;
+
+        /* Extract window parameters from registers */
+        winParam.targetId = (sizeReg & MV_USB_WIN_TARGET_MASK) >> MV_USB_WIN_TARGET_OFFSET;
+        winParam.attrib   = (sizeReg & MV_USB_WIN_ATTR_MASK) >> MV_USB_WIN_ATTR_OFFSET;
+        winParam.size     = (sizeReg & MV_USB_WIN_SIZE_MASK) >> MV_USB_WIN_SIZE_OFFSET;
+        winParam.baseAddr = (baseReg & MV_USB_WIN_BASE_MASK);
+
+        /* Translate the decode window parameters to address decode struct */
+        if (MV_OK != mvCtrlParamsToAddrDec(&winParam, pDecWin))
+        {
+            mvOsPrintf("Failed to translate register parameters to USB address" \
+                       " decode window structure\n");
+            return MV_ERROR;
+        }
+    }
+    else
+    {
+        pDecWin->enable = MV_FALSE;
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbWinInit -
+*
+* INPUT:
+*
+* OUTPUT:
+*
+* RETURN:
+*       MV_ERROR if register parameters are invalid.
+*
+*******************************************************************************/
+MV_STATUS   mvUsbWinInit(int dev)
+{
+    MV_STATUS       status;
+    MV_DEC_WIN      usbWin;
+    MV_CPU_DEC_WIN  cpuAddrDecWin;
+    int             winNum;
+    MV_U32          winPrioIndex = 0;
+
+    /* First disable all address decode windows */
+    for(winNum = 0; winNum < MV_USB_MAX_ADDR_DECODE_WIN; winNum++)
+    {
+        MV_REG_BIT_RESET(MV_USB_WIN_CTRL_REG(dev, winNum), MV_USB_WIN_ENABLE_MASK);
+    }
+
+    /* Go through all windows in user table until table terminator          */
+    winNum = 0;
+    while( (usbAddrDecPrioTab[winPrioIndex] != TBL_TERM) &&
+           (winNum < MV_USB_MAX_ADDR_DECODE_WIN) )
+    {
+        /* first get attributes from CPU If */
+        status = mvCpuIfTargetWinGet(usbAddrDecPrioTab[winPrioIndex],
+                                     &cpuAddrDecWin);
+
+        if(MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+        if (MV_OK != status)
+        {
+            mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+            return MV_ERROR;
+        }
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+        {
+            usbWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+            usbWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+            usbWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+            usbWin.enable           = MV_TRUE;
+            usbWin.target           = usbAddrDecPrioTab[winPrioIndex];
+
+#if defined(MV645xx) || defined(MV646xx)
+            /* Get the default attributes for that target window */
+            mvCtrlDefAttribGet(usbWin.target, &usbWin.addrWinAttr);
+#endif /* MV645xx || MV646xx */
+
+            if(MV_OK != mvUsbWinSet(dev, winNum, &usbWin))
+            {
+                return MV_ERROR;
+            }
+            winNum++;
+        }
+        winPrioIndex++;
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvUsbAddrDecShow - Print the USB address decode map.
+*
+* DESCRIPTION:
+*       This function print the USB address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvUsbAddrDecShow(MV_VOID)
+{
+    MV_DEC_WIN  addrDecWin;
+    int         i, winNum;
+
+    mvOsOutput( "\n" );
+    mvOsOutput( "USB:\n" );
+    mvOsOutput( "----\n" );
+
+    for(i=0; i<mvCtrlUsbMaxGet(); i++)
+    {
+        mvOsOutput( "Device %d:\n", i);
+
+        for(winNum = 0; winNum < MV_USB_MAX_ADDR_DECODE_WIN; winNum++)
+        {
+            memset(&addrDecWin, 0, sizeof(MV_DEC_WIN) );
+
+            mvOsOutput( "win%d - ", winNum );
+
+            if( mvUsbWinGet(i, winNum, &addrDecWin ) == MV_OK )
+            {
+                if( addrDecWin.enable )
+                {
+                    mvOsOutput( "%s base %08x, ",
+                        mvCtrlTargetNameGet(addrDecWin.target), addrDecWin.addrWin.baseLow );
+
+                    mvSizePrint( addrDecWin.addrWin.size );
+
+#if defined(MV645xx) || defined(MV646xx)
+                    switch( addrDecWin.addrWinAttr.swapType)
+                    {
+                        case MV_BYTE_SWAP:
+                            mvOsOutput( "BYTE_SWAP, " );
+                            break;
+                        case MV_NO_SWAP:
+                            mvOsOutput( "NO_SWAP  , " );
+                            break;
+                        case MV_BYTE_WORD_SWAP:
+                            mvOsOutput( "BYTE_WORD_SWAP, " );
+                            break;
+                        case MV_WORD_SWAP:
+                            mvOsOutput( "WORD_SWAP, " );
+                            break;
+                        default:
+                            mvOsOutput( "SWAP N/A , " );
+                    }
+
+                    switch( addrDecWin.addrWinAttr.cachePolicy )
+                    {
+                        case NO_COHERENCY:
+                            mvOsOutput( "NO_COHERENCY , " );
+                            break;
+                        case WT_COHERENCY:
+                            mvOsOutput( "WT_COHERENCY , " );
+                            break;
+                        case WB_COHERENCY:
+                            mvOsOutput( "WB_COHERENCY , " );
+                            break;
+                        default:
+                            mvOsOutput( "COHERENCY N/A, " );
+                    }
+
+                    switch( addrDecWin.addrWinAttr.pcixNoSnoop )
+                    {
+                        case 0:
+                            mvOsOutput( "PCI-X NS inactive, " );
+                            break;
+                        case 1:
+                            mvOsOutput( "PCI-X NS active  , " );
+                            break;
+                        default:
+                            mvOsOutput( "PCI-X NS N/A     , " );
+                    }
+
+                    switch( addrDecWin.addrWinAttr.p2pReq64 )
+                    {
+                        case 0:
+                            mvOsOutput( "REQ64 force" );
+                            break;
+                        case 1:
+                            mvOsOutput( "REQ64 detect" );
+                            break;
+                        default:
+                            mvOsOutput( "REQ64 N/A" );
+                    }
+#endif /* MV645xx || MV646xx */
+                    mvOsOutput( "\n" );
+                }
+                else
+                    mvOsOutput( "disable\n" );
+            }
+        }
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h
new file mode 100644
index 000000000000..b7129006f7f2
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysUsb.h
@@ -0,0 +1,125 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSysUsbh
+#define __INCmvSysUsbh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "usb/mvUsb.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define MV_USB_MAX_ADDR_DECODE_WIN  4
+
+/*******************************************/
+/* USB Bridge Registers                    */
+/*******************************************/
+#define MV_USB_BRIDGE_CTRL_REG(dev)              (USB_REG_BASE(dev) + 0x300)
+
+#define MV_USB_WIN_CTRL_REG(dev, win)        (USB_REG_BASE(dev) + 0x320 + ((win)<<4))
+#define MV_USB_WIN_BASE_REG(dev, win)        (USB_REG_BASE(dev) + 0x324 + ((win)<<4))
+
+/* BITs in Windows 0-3 Control and Base Registers */
+#define MV_USB_WIN_ENABLE_BIT               0
+#define MV_USB_WIN_ENABLE_MASK              (1 << MV_USB_WIN_ENABLE_BIT)
+
+#define MV_USB_WIN_BURST_WR_LIMIT_BIT       1
+#define MV_USB_WIN_BURST_WR_LIMIT_MASK      (1 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+#define MV_USB_WIN_BURST_WR_NO_LIMIT        (0 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+#define MV_USB_WIN_BURST_WR_32BIT_LIMIT     (1 << MV_USB_WIN_BURST_WR_LIMIT_BIT)
+
+#define MV_USB_WIN_TARGET_OFFSET            4
+#define MV_USB_WIN_TARGET_MASK              (0xF << MV_USB_WIN_TARGET_OFFSET)
+
+#define MV_USB_WIN_ATTR_OFFSET              8
+#define MV_USB_WIN_ATTR_MASK                (0xFF << MV_USB_WIN_ATTR_OFFSET)
+
+#define MV_USB_WIN_SIZE_OFFSET              16
+#define MV_USB_WIN_SIZE_MASK                (0xFFFF << MV_USB_WIN_SIZE_OFFSET)
+
+#define MV_USB_WIN_BASE_OFFSET              16
+#define MV_USB_WIN_BASE_MASK                (0xFFFF << MV_USB_WIN_BASE_OFFSET)
+
+
+#define MV_USB_BRIDGE_IPG_REG(dev)          (USB_REG_BASE(dev) + 0x360)
+
+
+MV_STATUS   mvUsbInit(int dev, MV_BOOL isHost);
+
+MV_STATUS   mvUsbWinInit(int dev);
+MV_STATUS   mvUsbWinSet(int dev, MV_U32 winNum, MV_DEC_WIN *pAddrWin);
+MV_STATUS   mvUsbWinGet(int dev, MV_U32 winNum, MV_DEC_WIN *pAddrWin);
+
+void        mvUsbAddrDecShow(void);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvUsbh */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c
new file mode 100644
index 000000000000..01bc362c1554
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.c
@@ -0,0 +1,662 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "xor/mvXor.h"
+#include "mvSysXor.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+
+static MV_STATUS xorWinOverlapDetect(MV_U32 unit,MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+MV_TARGET xorAddrDecPrioTap[] =
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+    DEVICE_CS0,
+#endif
+#if defined(MV_INCLUDE_PEX)
+	PEX0_MEM,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS0)
+    SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+    SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+    SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+    SDRAM_CS3,
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+    DEVICE_CS1,
+#endif
+#if defined(MV_INCLUDE_CESA)
+   CRYPT_ENG,
+#endif
+	TBL_TERM
+};
+static MV_STATUS mvXorInitWinsUnit (MV_U32 unit)
+{
+	MV_U32         winNum;
+	MV_XOR_DEC_WIN addrDecWin;
+	MV_CPU_DEC_WIN cpuAddrDecWin;
+	MV_U32          status;
+	MV_U32			winPrioIndex=0;
+
+	/* Initiate XOR address decode */
+
+	/* First disable all address decode windows */
+	for(winNum = 0; winNum < XOR_MAX_ADDR_DEC_WIN; winNum++)
+	{
+	    mvXorTargetWinEnable(unit,winNum, MV_FALSE);
+	}
+
+	/* Go through all windows in user table until table terminator			*/
+	for (winNum = 0; ((xorAddrDecPrioTap[winPrioIndex] != TBL_TERM) &&
+					(winNum < XOR_MAX_ADDR_DEC_WIN));)
+	{
+		/* first get attributes from CPU If */
+		status = mvCpuIfTargetWinGet(xorAddrDecPrioTap[winPrioIndex],
+									 &cpuAddrDecWin);
+
+        if(MV_NO_SUCH == status)
+        {
+            winPrioIndex++;
+            continue;
+        }
+		if (MV_OK != status)
+		{
+            mvOsPrintf("%s: ERR. mvCpuIfTargetWinGet failed\n", __FUNCTION__);
+			return MV_ERROR;
+		}
+
+
+        if (cpuAddrDecWin.enable == MV_TRUE)
+		{
+
+			addrDecWin.target           = xorAddrDecPrioTap[winPrioIndex];
+			addrDecWin.addrWin.baseLow  = cpuAddrDecWin.addrWin.baseLow;
+			addrDecWin.addrWin.baseHigh = cpuAddrDecWin.addrWin.baseHigh;
+			addrDecWin.addrWin.size     = cpuAddrDecWin.addrWin.size;
+			addrDecWin.enable           = MV_TRUE;
+
+			if (MV_OK != mvXorTargetWinSet(unit,winNum, &addrDecWin))
+			{
+				DB(mvOsPrintf("mvXorInit: ERR. mvDmaTargetWinSet failed\n"));
+				return MV_ERROR;
+			}
+			winNum++;
+		}
+		winPrioIndex++;
+
+	}
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvXorInit - Initialize XOR engine
+*
+* DESCRIPTION:
+*		This function initialize XOR unit. It set the default address decode
+*		windows of the unit.
+*		Note that if the address window is disabled in xorAddrDecMap, the
+*		window parameters will be set but the window will remain disabled.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*******************************************************************************/
+MV_STATUS mvXorInit (MV_VOID)
+{
+	MV_U32         i;
+
+	/* Initiate XOR address decode */
+	for(i = 0; i < MV_XOR_MAX_UNIT; i++)
+	    mvXorInitWinsUnit(i);
+
+	mvXorHalInit(MV_XOR_MAX_CHAN);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinSet - Set XOR target address window
+*
+* DESCRIPTION:
+*       This function sets a peripheral target (e.g. SDRAM bank0, PCI_MEM0)
+*       address window. After setting this target window, the XOR will be
+*       able to access the target within the address window.
+*
+* INPUT:
+*	    winNum - One of the possible XOR memory decode windows.
+*       target - Peripheral target enumerator.
+*       base   - Window base address.
+*       size   - Window size.
+*       enable - Window enable/disable.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinSet(MV_U32 unit, MV_U32 winNum, MV_XOR_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS xorDecRegs;
+	MV_TARGET_ATTRIB targetAttribs;
+    MV_U32      chan;
+
+    /* Parameter checking */
+    if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__, winNum));
+        return MV_BAD_PARAM;
+    }
+    if (pAddrDecWin == NULL)
+    {
+        DB(mvOsPrintf("%s: ERR. pAddrDecWin is NULL pointer\n", __FUNCTION__ ));
+        return MV_BAD_PTR;
+    }
+    /* Check if the requested window overlaps with current windows */
+    if (MV_TRUE == xorWinOverlapDetect(unit, winNum, &pAddrDecWin->addrWin))
+    {
+	DB(mvOsPrintf("%s: ERR. Window %d overlap\n",__FUNCTION__,winNum));
+	return MV_ERROR;
+    }
+
+    xorDecRegs.baseReg = MV_REG_READ(XOR_BASE_ADDR_REG(unit,winNum));
+    xorDecRegs.sizeReg = MV_REG_READ(XOR_SIZE_MASK_REG(unit,winNum));
+
+    /* Get Base Address and size registers values */
+    if(MV_OK != mvCtrlAddrDecToReg(&pAddrDecWin->addrWin, &xorDecRegs))
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid addr dec window\n",__FUNCTION__));
+        return MV_BAD_PARAM;
+	}
+
+
+	mvCtrlAttribGet(pAddrDecWin->target,&targetAttribs);
+
+	/* set attributes */
+	xorDecRegs.baseReg &= ~XEBARX_ATTR_MASK;
+	xorDecRegs.baseReg |= targetAttribs.attrib << XEBARX_ATTR_OFFS;
+	/* set target ID */
+	xorDecRegs.baseReg &= ~XEBARX_TARGET_MASK;
+	xorDecRegs.baseReg |= targetAttribs.targetId << XEBARX_TARGET_OFFS;
+
+
+    /* Write to address decode Base Address Register */
+	MV_REG_WRITE(XOR_BASE_ADDR_REG(unit,winNum), xorDecRegs.baseReg);
+
+    /* Write to Size Register */
+	MV_REG_WRITE(XOR_SIZE_MASK_REG(unit,winNum), xorDecRegs.sizeReg);
+
+    for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+    {
+        if (pAddrDecWin->enable)
+        {
+            MV_REG_BIT_SET(XOR_WINDOW_CTRL_REG(unit,chan),
+                           XEXWCR_WIN_EN_MASK(winNum));
+        }
+        else
+        {
+            MV_REG_BIT_RESET(XOR_WINDOW_CTRL_REG(unit,chan),
+                             XEXWCR_WIN_EN_MASK(winNum));
+        }
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinGet - Get xor peripheral target address window.
+*
+* DESCRIPTION:
+*		Get xor peripheral target address window.
+*
+* INPUT:
+*	  winNum - One of the possible XOR memory decode windows.
+*
+* OUTPUT:
+*       base   - Window base address.
+*       size   - Window size.
+*       enable - window enable/disable.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinGet(MV_U32 unit,MV_U32 winNum, MV_XOR_DEC_WIN *pAddrDecWin)
+{
+    MV_DEC_REGS xorDecRegs;
+	MV_TARGET_ATTRIB targetAttrib;
+    MV_U32      chan=0,chanWinEn;
+
+    /* Parameter checking */
+    if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid win num %d\n",__FUNCTION__ , winNum));
+        return MV_ERROR;
+    }
+
+    if (NULL == pAddrDecWin)
+    {
+        DB(mvOsPrintf("%s: ERR. pAddrDecWin is NULL pointer\n", __FUNCTION__ ));
+        return MV_BAD_PTR;
+    }
+
+    chanWinEn = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,0)) & XEXWCR_WIN_EN_MASK(winNum);
+
+    for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++) /* we should scan here all channels per unit */
+    {
+	/* Check if enable bit is equal for all channels */
+        if ((MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan)) &
+             XEXWCR_WIN_EN_MASK(winNum)) != chanWinEn)
+        {
+            mvOsPrintf("%s: ERR. Window enable field must be equal in "
+                              "all channels(chan=%d)\n",__FUNCTION__, chan);
+            return MV_ERROR;
+        }
+    }
+
+
+
+	xorDecRegs.baseReg  = MV_REG_READ(XOR_BASE_ADDR_REG(unit,winNum));
+	xorDecRegs.sizeReg  = MV_REG_READ(XOR_SIZE_MASK_REG(unit,winNum));
+
+	if (MV_OK != mvCtrlRegToAddrDec(&xorDecRegs, &pAddrDecWin->addrWin))
+	{
+		mvOsPrintf("%s: ERR. mvCtrlRegToAddrDec failed\n", __FUNCTION__);
+		return MV_ERROR;
+	}
+
+	/* attrib and targetId */
+	targetAttrib.attrib =
+		(xorDecRegs.baseReg & XEBARX_ATTR_MASK) >> XEBARX_ATTR_OFFS;
+	targetAttrib.targetId =
+		(xorDecRegs.baseReg & XEBARX_TARGET_MASK) >> XEBARX_TARGET_OFFS;
+
+
+	pAddrDecWin->target = mvCtrlTargetGet(&targetAttrib);
+
+	if(chanWinEn)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else pAddrDecWin->enable = MV_FALSE;
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorTargetWinEnable - Enable/disable a Xor address decode window
+*
+* DESCRIPTION:
+*       This function enable/disable a XOR address decode window.
+*       if parameter 'enable' == MV_TRUE the routine will enable the
+*       window, thus enabling XOR accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - Decode window number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorTargetWinEnable(MV_U32 unit,MV_U32 winNum, MV_BOOL enable)
+{
+	MV_XOR_DEC_WIN  addrDecWin;
+    MV_U32          chan;
+
+	/* Parameter checking   */
+    if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+    {
+        DB(mvOsPrintf("%s: ERR. Invalid winNum%d\n", __FUNCTION__, winNum));
+        return MV_ERROR;
+    }
+
+	if (enable == MV_TRUE)
+	{
+		/* Get current window */
+	    if (MV_OK != mvXorTargetWinGet(unit,winNum, &addrDecWin))
+		{
+			DB(mvOsPrintf("%s: ERR. targetWinGet fail\n", __FUNCTION__));
+			return MV_ERROR;
+		}
+
+		/* Check for overlapping */
+	    if (MV_TRUE == xorWinOverlapDetect(unit,winNum, &(addrDecWin.addrWin)))
+		{
+			/* Overlap detected	*/
+			DB(mvOsPrintf("%s: ERR. Overlap detected\n", __FUNCTION__));
+			return MV_ERROR;
+		}
+
+		/* No Overlap. Enable address decode target window */
+		for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+		{
+		    MV_REG_BIT_SET(XOR_WINDOW_CTRL_REG(unit,chan),
+						   XEXWCR_WIN_EN_MASK(winNum));
+		}
+
+	}
+	else
+	{
+		/* Disable address decode target window */
+
+		for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+		{
+		    MV_REG_BIT_RESET(XOR_WINDOW_CTRL_REG(unit,chan),
+							 XEXWCR_WIN_EN_MASK(winNum));
+		}
+
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorSetProtWinSet - Configure access attributes of a XOR engine
+*                               to one of the XOR memory windows.
+*
+* DESCRIPTION:
+*       Each engine can be configured with access attributes for each of the
+*       memory spaces. This function sets access attributes
+*       to a given window for the given engine
+*
+* INPUTS:
+*       chan    - One of the possible engines.
+*       winNum  - One of the possible XOR memory spaces.
+*       access  - Protection access rights.
+*       write   - Write rights.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorProtWinSet (MV_U32 unit,MV_U32 chan, MV_U32 winNum, MV_BOOL access,
+                           MV_BOOL write)
+{
+    MV_U32 temp;
+
+    /* Parameter checking   */
+    if (chan >= MV_XOR_MAX_CHAN_PER_UNIT)
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid chan num %d\n", __FUNCTION__ , chan));
+        return MV_BAD_PARAM;
+    }
+    if (winNum >= XOR_MAX_ADDR_DEC_WIN)
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid win num %d\n", __FUNCTION__, winNum));
+        return MV_BAD_PARAM;
+    }
+
+    temp = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan)) &
+        (~XEXWCR_WIN_ACC_MASK(winNum));
+
+    /* if access is disable */
+    if (!access)
+    {
+        /* disable access */
+        temp |= XEXWCR_WIN_ACC_NO_ACC(winNum);
+    }
+    /* if access is enable */
+    else
+    {
+        /* if write is enable */
+        if (write)
+        {
+            /* enable write */
+            temp |= XEXWCR_WIN_ACC_RW(winNum);
+        }
+        /* if write is disable */
+        else
+        {
+            /* disable write */
+            temp |= XEXWCR_WIN_ACC_RO(winNum);
+        }
+    }
+    MV_REG_WRITE(XOR_WINDOW_CTRL_REG(unit,chan),temp);
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvXorPciRemap - Set XOR remap register for PCI address windows.
+*
+* DESCRIPTION:
+*       only Windows 0-3 can be remapped.
+*
+* INPUT:
+*       winNum      - window number
+*       pAddrDecWin  - pointer to address space window structure
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvXorPciRemap(MV_U32 unit,MV_U32 winNum, MV_U32 addrHigh)
+{
+    /* Parameter checking   */
+    if (winNum >= XOR_MAX_REMAP_WIN)
+    {
+		DB(mvOsPrintf("%s: ERR. Invalid win num %d\n", __FUNCTION__, winNum));
+        return MV_BAD_PARAM;
+    }
+
+    MV_REG_WRITE(XOR_HIGH_ADDR_REMAP_REG(unit,winNum), addrHigh);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* xorWinOverlapDetect - Detect XOR address windows overlaping
+*
+* DESCRIPTION:
+*       An unpredicted behaviour is expected in case XOR address decode
+*       windows overlaps.
+*       This function detects XOR address decode windows overlaping of a
+*       specified window. The function does not check the window itself for
+*       overlaping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS xorWinOverlapDetect(MV_U32 unit,MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+	MV_U32 	        baseAddrEnableReg;
+	MV_U32          winNumIndex,chan;
+	MV_XOR_DEC_WIN  addrDecWin;
+
+	if (pAddrWin == NULL)
+	{
+		DB(mvOsPrintf("%s: ERR. pAddrWin is NULL pointer\n", __FUNCTION__ ));
+		return MV_BAD_PTR;
+	}
+
+	for (chan = 0; chan < MV_XOR_MAX_CHAN_PER_UNIT; chan++)
+	{
+		/* Read base address enable register. Do not check disabled windows	*/
+	    baseAddrEnableReg = MV_REG_READ(XOR_WINDOW_CTRL_REG(unit,chan));
+
+		for (winNumIndex = 0; winNumIndex < XOR_MAX_ADDR_DEC_WIN; winNumIndex++)
+		{
+			/* Do not check window itself */
+			if (winNumIndex == winNum)
+			{
+				continue;
+			}
+
+			/* Do not check disabled windows */
+			if ((baseAddrEnableReg & XEXWCR_WIN_EN_MASK(winNumIndex)) == 0)
+			{
+				continue;
+			}
+
+			/* Get window parameters */
+			if (MV_OK != mvXorTargetWinGet(unit,winNumIndex, &addrDecWin))
+			{
+				DB(mvOsPrintf("%s: ERR. TargetWinGet failed\n", __FUNCTION__ ));
+				return MV_ERROR;
+			}
+
+			if (MV_TRUE == ctrlWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+			{
+				return MV_TRUE;
+			}
+		}
+	}
+
+	return MV_FALSE;
+}
+
+static MV_VOID mvXorAddrDecShowUnit(MV_U32 unit)
+{
+	MV_XOR_DEC_WIN win;
+	int            i;
+
+	mvOsOutput( "\n" );
+	mvOsOutput( "XOR %d:\n", unit );
+	mvOsOutput( "----\n" );
+
+	for( i = 0; i < XOR_MAX_ADDR_DEC_WIN; i++ )
+	{
+		memset( &win, 0, sizeof(MV_XOR_DEC_WIN) );
+
+		mvOsOutput( "win%d - ", i );
+
+		if( mvXorTargetWinGet(unit, i, &win ) == MV_OK )
+		{
+			if( win.enable )
+			{
+				mvOsOutput( "%s base %x, ",
+				mvCtrlTargetNameGet(win.target), win.addrWin.baseLow );
+
+				mvSizePrint( win.addrWin.size );
+
+                mvOsOutput( "\n" );
+			}
+			else
+				mvOsOutput( "disable\n" );
+		}
+	}
+}
+
+/*******************************************************************************
+* mvXorAddrDecShow - Print the XOR address decode map.
+*
+* DESCRIPTION:
+*		This function print the XOR address decode map.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvXorAddrDecShow(MV_VOID)
+{
+	int            i;
+
+	for( i = 0; i < MV_XOR_MAX_UNIT; i++ )
+	    mvXorAddrDecShowUnit(i);
+
+}
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h
new file mode 100644
index 000000000000..0a7be8f25834
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/ctrlEnv/sys/mvSysXor.h
@@ -0,0 +1,140 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCMVSysXorh
+#define __INCMVSysXorh
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+#define XOR_MAX_ADDR_DEC_WIN	8	/* Maximum address decode windows		*/
+#define XOR_MAX_REMAP_WIN       4	/* Maximum address arbiter windows		*/
+
+/* XOR Engine Address Decoding Register Map */
+#define XOR_WINDOW_CTRL_REG(unit,chan)     (XOR_UNIT_BASE(unit)+(0x240 + ((chan) * 4)))
+#define XOR_BASE_ADDR_REG(unit,winNum)     (XOR_UNIT_BASE(unit)+(0x250 + ((winNum) * 4)))
+#define XOR_SIZE_MASK_REG(unit,winNum)     (XOR_UNIT_BASE(unit)+(0x270 + ((winNum) * 4)))
+#define XOR_HIGH_ADDR_REMAP_REG(unit,winNum) (XOR_UNIT_BASE(unit)+(0x290 + ((winNum) * 4)))
+
+/* XOR Engine [0..1] Window Control Registers (XExWCR) */
+#define XEXWCR_WIN_EN_OFFS(winNum)          (winNum)
+#define XEXWCR_WIN_EN_MASK(winNum)          (1 << (XEXWCR_WIN_EN_OFFS(winNum)))
+#define XEXWCR_WIN_EN_ENABLE(winNum)        (1 << (XEXWCR_WIN_EN_OFFS(winNum)))
+#define XEXWCR_WIN_EN_DISABLE(winNum)       (0 << (XEXWCR_WIN_EN_OFFS(winNum)))
+
+#define XEXWCR_WIN_ACC_OFFS(winNum)         ((2 * winNum) + 16)
+#define XEXWCR_WIN_ACC_MASK(winNum)         (3 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_NO_ACC(winNum)       (0 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_RO(winNum)           (1 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+#define XEXWCR_WIN_ACC_RW(winNum)           (3 << (XEXWCR_WIN_ACC_OFFS(winNum)))
+
+/* XOR Engine Base Address Registers (XEBARx) */
+#define XEBARX_TARGET_OFFS                  (0)
+#define XEBARX_TARGET_MASK                  (0xF << XEBARX_TARGET_OFFS)
+#define XEBARX_ATTR_OFFS                    (8)
+#define XEBARX_ATTR_MASK                    (0xFF << XEBARX_ATTR_OFFS)
+#define XEBARX_BASE_OFFS                    (16)
+#define XEBARX_BASE_MASK                    (0xFFFF << XEBARX_BASE_OFFS)
+
+/* XOR Engine Size Mask Registers (XESMRx) */
+#define XESMRX_SIZE_MASK_OFFS               (16)
+#define XESMRX_SIZE_MASK_MASK               (0xFFFF << XESMRX_SIZE_MASK_OFFS)
+
+/* XOR Engine High Address Remap Register (XEHARRx1) */
+#define XEHARRX_REMAP_OFFS                  (0)
+#define XEHARRX_REMAP_MASK                  (0xFFFFFFFF << XEHARRX_REMAP_OFFS)
+
+typedef struct _mvXorDecWin
+{
+    MV_TARGET     target;
+    MV_ADDR_WIN   addrWin; /* An address window*/
+    MV_BOOL       enable;  /* Address decode window is enabled/disabled */
+
+}MV_XOR_DEC_WIN;
+
+MV_STATUS   mvXorInit (MV_VOID);
+MV_STATUS   mvXorTargetWinSet(MV_U32 unit, MV_U32 winNum,
+			      MV_XOR_DEC_WIN *pAddrDecWin);
+MV_STATUS   mvXorTargetWinGet(MV_U32 unit, MV_U32 winNum,
+			      MV_XOR_DEC_WIN *pAddrDecWin);
+MV_STATUS   mvXorTargetWinEnable(MV_U32 unit,
+			      MV_U32 winNum, MV_BOOL enable);
+MV_STATUS   mvXorProtWinSet (MV_U32 unit,MV_U32 chan, MV_U32 winNum, MV_BOOL access,
+                             MV_BOOL write);
+MV_STATUS   mvXorPciRemap(MV_U32 unit, MV_U32 winNum, MV_U32 addrHigh);
+
+MV_VOID     mvXorAddrDecShow(MV_VOID);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c
new file mode 100644
index 000000000000..722971fd611e
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.c
@@ -0,0 +1,72 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "device/mvDevice.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h
new file mode 100644
index 000000000000..a8a382b17d03
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDevice.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceH
+#define __INCmvDeviceH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "device/mvDeviceRegs.h"
+
+
+#endif /* #ifndef __INCmvDeviceH */
diff --git a/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h
new file mode 100644
index 000000000000..599dfe3ba2ea
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/kw_family/device/mvDeviceRegs.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceRegsH
+#define __INCmvDeviceRegsH
+
+#ifndef MV_ASMLANGUAGE
+#include "ctrlEnv/mvCtrlEnvLib.h"
+/* This enumerator describes the Marvell controller possible devices that   */
+/* can be connected to its device interface.                                */
+typedef enum _mvDevice
+{
+#if defined(MV_INCLUDE_DEVICE_CS0)
+	DEV_CS0 = 0,    /* Device connected to dev CS[0]    */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS1)
+	DEV_CS1 = 1,        /* Device connected to dev CS[1]    */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS2)
+	DEV_CS2 = 2,        /* Device connected to dev CS[2]    */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS3)
+	DEV_CS3 = 3,        /* Device connected to dev CS[2]    */
+#endif
+#if defined(MV_INCLUDE_DEVICE_CS4)
+	DEV_CS4 = 4,        /* Device connected to BOOT dev    */
+#endif
+	MV_DEV_MAX_CS = MV_DEVICE_MAX_CS
+}MV_DEVICE;
+
+
+#endif /* MV_ASMLANGUAGE */
+
+
+#define NAND_CTRL_REG		0x10470
+
+#define NAND_ACTCEBOOT_BIT	BIT1
+
+
+#endif /* #ifndef __INCmvDeviceRegsH */
diff --git a/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c
new file mode 100644
index 000000000000..ecd9d60b9fa4
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.c
@@ -0,0 +1,210 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+/*******************************************************************************
+* mvOsCpuArchLib.c - Marvell CPU architecture library
+*
+* DESCRIPTION:
+*       This library introduce Marvell API for OS dependent CPU architecture
+*       APIs. This library introduce single CPU architecture services APKI
+*       cross OS.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/* includes */
+#include <asm/processor.h>
+#include "mvOs.h"
+
+static MV_U32 read_p15_c0 (void);
+
+/* defines  */
+#define ARM_ID_REVISION_OFFS	0
+#define ARM_ID_REVISION_MASK	(0xf << ARM_ID_REVISION_OFFS)
+
+#define ARM_ID_PART_NUM_OFFS	4
+#define ARM_ID_PART_NUM_MASK	(0xfff << ARM_ID_PART_NUM_OFFS)
+
+#define ARM_ID_ARCH_OFFS	16
+#define ARM_ID_ARCH_MASK	(0xf << ARM_ID_ARCH_OFFS)
+
+#define ARM_ID_VAR_OFFS		20
+#define ARM_ID_VAR_MASK		(0xf << ARM_ID_VAR_OFFS)
+
+#define ARM_ID_ASCII_OFFS	24
+#define ARM_ID_ASCII_MASK	(0xff << ARM_ID_ASCII_OFFS)
+
+
+
+void* mvOsIoCachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr,
+			  MV_U32 *memHandle)
+{
+    void *p = kmalloc( size, GFP_KERNEL );
+    *pPhyAddr = pci_map_single( osHandle, p, 0, PCI_DMA_BIDIRECTIONAL );
+    return p;
+}
+void* mvOsIoUncachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr,
+			    MV_U32 *memHandle)
+{
+    return pci_alloc_consistent( osHandle, size, (dma_addr_t *)pPhyAddr );
+}
+
+void mvOsIoUncachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr,
+			 MV_U32 memHandle)
+{
+    return pci_free_consistent( osHandle, size, pVirtAddr, (dma_addr_t)phyAddr );
+}
+
+void mvOsIoCachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr,
+		       MV_U32 memHandle )
+{
+    return kfree( pVirtAddr );
+}
+
+int mvOsRand(void)
+{
+    int rand;
+    get_random_bytes(&rand, sizeof(rand) );
+    return rand;
+}
+
+/*******************************************************************************
+* mvOsCpuVerGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Revision
+*
+*******************************************************************************/
+MV_U32 mvOsCpuRevGet( MV_VOID )
+{
+	return ((read_p15_c0() & ARM_ID_REVISION_MASK ) >> ARM_ID_REVISION_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuPartGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Part number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuPartGet( MV_VOID )
+{
+	return ((read_p15_c0() & ARM_ID_PART_NUM_MASK ) >> ARM_ID_PART_NUM_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuArchGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Architicture number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuArchGet( MV_VOID )
+{
+    return ((read_p15_c0() & ARM_ID_ARCH_MASK ) >> ARM_ID_ARCH_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuVarGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuVarGet( MV_VOID )
+{
+    return ((read_p15_c0() & ARM_ID_VAR_MASK ) >> ARM_ID_VAR_OFFS);
+}
+/*******************************************************************************
+* mvOsCpuAsciiGet() -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit CPU Variant number
+*
+*******************************************************************************/
+MV_U32 mvOsCpuAsciiGet( MV_VOID )
+{
+    return ((read_p15_c0() & ARM_ID_ASCII_MASK ) >> ARM_ID_ASCII_OFFS);
+}
+
+
+
+/*
+static unsigned long read_p15_c0 (void)
+*/
+/* read co-processor 15, register #0 (ID register) */
+static MV_U32 read_p15_c0 (void)
+{
+	MV_U32 value;
+
+	__asm__ __volatile__(
+		"mrc	p15, 0, %0, c0, c0, 0   @ read control reg\n"
+		: "=r" (value)
+		:
+		: "memory");
+
+	return value;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h
new file mode 100644
index 000000000000..c9d4e1a58331
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOs.h
@@ -0,0 +1,421 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef _MV_OS_LNX_H_
+#define _MV_OS_LNX_H_
+
+
+#ifdef __KERNEL__
+/* for kernel space */
+#include <linux/autoconf.h>
+#include <linux/interrupt.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/hardirq.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+
+#include <linux/random.h>
+
+#include "dbg-trace.h"
+
+extern void mv_early_printk(char *fmt,...);
+
+#define MV_ASM              __asm__ __volatile__
+#define INLINE              inline
+#define MV_TRC_REC	        TRC_REC
+#define mvOsPrintf          printk
+#define mvOsEarlyPrintf	    mv_early_printk
+#define mvOsOutput          printk
+#define mvOsSPrintf         sprintf
+#define mvOsMalloc(_size_)  kmalloc(_size_,GFP_ATOMIC)
+#define mvOsFree            kfree
+#define mvOsMemcpy          memcpy
+#define mvOsSleep(_mils_)   mdelay(_mils_)
+#define mvOsTaskLock()
+#define mvOsTaskUnlock()
+#define strtol              simple_strtoul
+#define mvOsDelay(x)        mdelay(x)
+#define mvOsUDelay(x)       udelay(x)
+#define mvCopyFromOs        copy_from_user
+#define mvCopyToOs          copy_to_user
+
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+
+#ifdef MV_NDEBUG
+#define mvOsAssert(cond)
+#else
+#define mvOsAssert(cond) { do { if(!(cond)) { BUG(); } }while(0); }
+#endif /* MV_NDEBUG */
+
+#else /* __KERNEL__ */
+
+/* for user space applications */
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+
+#define INLINE inline
+#define mvOsPrintf printf
+#define mvOsOutput printf
+#define mvOsMalloc(_size_) malloc(_size_)
+#define mvOsFree free
+#define mvOsAssert(cond) assert(cond)
+
+#endif /* __KERNEL__ */
+#define mvOsIoVirtToPhy(pDev, pVirtAddr)                            \
+    pci_map_single( (pDev), (pVirtAddr), 0, PCI_DMA_BIDIRECTIONAL )
+
+#define mvOsCacheClear(pDev, p, size )                              \
+    pci_map_single( (pDev), (p), (size), PCI_DMA_BIDIRECTIONAL)
+
+#define mvOsCacheFlush(pDev, p, size )                              \
+    pci_map_single( (pDev), (p), (size), PCI_DMA_TODEVICE)
+
+#define mvOsCacheInvalidate(pDev, p, size)                          \
+    pci_map_single( (pDev), (p), (size), PCI_DMA_FROMDEVICE )
+
+#define mvOsCacheUnmap(pDev, phys, size)                          \
+    pci_unmap_single( (pDev), (dma_addr_t)(phys), (size), PCI_DMA_FROMDEVICE )
+
+
+#define CPU_PHY_MEM(x)              (MV_U32)x
+#define CPU_MEMIO_CACHED_ADDR(x)    (void*)x
+#define CPU_MEMIO_UNCACHED_ADDR(x)  (void*)x
+
+
+/* CPU architecture dependent 32, 16, 8 bit read/write IO addresses */
+#define MV_MEMIO32_WRITE(addr, data)    \
+    ((*((volatile unsigned int*)(addr))) = ((unsigned int)(data)))
+
+#define MV_MEMIO32_READ(addr)           \
+    ((*((volatile unsigned int*)(addr))))
+
+#define MV_MEMIO16_WRITE(addr, data)    \
+    ((*((volatile unsigned short*)(addr))) = ((unsigned short)(data)))
+
+#define MV_MEMIO16_READ(addr)           \
+    ((*((volatile unsigned short*)(addr))))
+
+#define MV_MEMIO8_WRITE(addr, data)     \
+    ((*((volatile unsigned char*)(addr))) = ((unsigned char)(data)))
+
+#define MV_MEMIO8_READ(addr)            \
+    ((*((volatile unsigned char*)(addr))))
+
+
+/* No Fast Swap implementation (in assembler) for ARM */
+#define MV_32BIT_LE_FAST(val)            MV_32BIT_LE(val)
+#define MV_16BIT_LE_FAST(val)            MV_16BIT_LE(val)
+#define MV_32BIT_BE_FAST(val)            MV_32BIT_BE(val)
+#define MV_16BIT_BE_FAST(val)            MV_16BIT_BE(val)
+
+/* 32 and 16 bit read/write in big/little endian mode */
+
+/* 16bit write in little endian mode */
+#define MV_MEMIO_LE16_WRITE(addr, data) \
+        MV_MEMIO16_WRITE(addr, MV_16BIT_LE_FAST(data))
+
+/* 16bit read in little endian mode */
+static __inline MV_U16 MV_MEMIO_LE16_READ(MV_U32 addr)
+{
+    MV_U16 data;
+
+    data= (MV_U16)MV_MEMIO16_READ(addr);
+
+    return (MV_U16)MV_16BIT_LE_FAST(data);
+}
+
+/* 32bit write in little endian mode */
+#define MV_MEMIO_LE32_WRITE(addr, data) \
+        MV_MEMIO32_WRITE(addr, MV_32BIT_LE_FAST(data))
+
+/* 32bit read in little endian mode */
+static __inline MV_U32 MV_MEMIO_LE32_READ(MV_U32 addr)
+{
+    MV_U32 data;
+
+    data= (MV_U32)MV_MEMIO32_READ(addr);
+
+    return (MV_U32)MV_32BIT_LE_FAST(data);
+}
+
+static __inline void mvOsBCopy(char* srcAddr, char* dstAddr, int byteCount)
+{
+    while(byteCount != 0)
+    {
+        *dstAddr = *srcAddr;
+        dstAddr++;
+        srcAddr++;
+        byteCount--;
+    }
+}
+
+static INLINE MV_U64 mvOsDivMod64(MV_U64 divided, MV_U64 divisor, MV_U64* modulu)
+{
+    MV_U64  division = 0;
+
+    if(divisor == 1)
+	return divided;
+
+    while(divided >= divisor)
+    {
+	    division++;
+	    divided -= divisor;
+    }
+    if (modulu != NULL)
+        *modulu = divided;
+
+    return division;
+}
+
+#if defined(MV_BRIDGE_SYNC_REORDER)
+extern MV_U32 *mvUncachedParam;
+
+static __inline void mvOsBridgeReorderWA(void)
+{
+	volatile MV_U32 val = 0;
+
+	val = mvUncachedParam[0];
+}
+#endif
+
+
+/* Flash APIs */
+#define MV_FL_8_READ            MV_MEMIO8_READ
+#define MV_FL_16_READ           MV_MEMIO_LE16_READ
+#define MV_FL_32_READ           MV_MEMIO_LE32_READ
+#define MV_FL_8_DATA_READ       MV_MEMIO8_READ
+#define MV_FL_16_DATA_READ      MV_MEMIO16_READ
+#define MV_FL_32_DATA_READ      MV_MEMIO32_READ
+#define MV_FL_8_WRITE           MV_MEMIO8_WRITE
+#define MV_FL_16_WRITE          MV_MEMIO_LE16_WRITE
+#define MV_FL_32_WRITE          MV_MEMIO_LE32_WRITE
+#define MV_FL_8_DATA_WRITE      MV_MEMIO8_WRITE
+#define MV_FL_16_DATA_WRITE     MV_MEMIO16_WRITE
+#define MV_FL_32_DATA_WRITE     MV_MEMIO32_WRITE
+
+
+/* CPU cache information */
+#define CPU_I_CACHE_LINE_SIZE   32    /* 2do: replace 32 with linux core macro */
+#define CPU_D_CACHE_LINE_SIZE   32    /* 2do: replace 32 with linux core macro */
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+/* Data cache flush one line */
+#define mvOsCacheLineFlushInv(handle, addr)                     \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c14, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 1, %0, c15, c10, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 4");		\
+}
+
+#else
+
+/* Data cache flush one line */
+#define mvOsCacheLineFlushInv(handle, addr)                     \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c14, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" : : "r" (addr)); \
+}
+#endif
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+#define mvOsCacheLineInv(handle,addr)                           \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c6, 1" : : "r" (addr)); \
+ __asm__ __volatile__ ("mcr p15, 1, %0, c15, c11, 1" : : "r" (addr)); \
+}
+#else
+#define mvOsCacheLineInv(handle,addr)                           \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c6, 1" : : "r" (addr)); \
+}
+#endif
+
+#ifdef CONFIG_L2_CACHE_ENABLE
+/* Data cache flush one line */
+#define mvOsCacheLineFlush(handle, addr)                     \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 1, %0, c15, c9, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 0, r0, c7, c10, 4");          \
+}
+
+#else
+/* Data cache flush one line */
+#define mvOsCacheLineFlush(handle, addr)                     \
+{                                                               \
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 1" : : "r" (addr));\
+  __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" : : "r" (addr)); \
+}
+#endif
+
+static __inline void mvOsPrefetch(const void *ptr)
+{
+#ifdef CONFIG_USE_DSP
+        __asm__ __volatile__(
+                "pld\t%0"
+                :
+                : "o" (*(char *)ptr)
+                : "cc");
+#else
+	return;
+#endif
+}
+
+
+/* Flush CPU pipe */
+#define CPU_PIPE_FLUSH
+
+
+
+
+
+/* register manipulations  */
+
+/******************************************************************************
+* This debug function enable the write of each register that u-boot access to
+* to an array in the DRAM, the function record only MV_REG_WRITE access.
+* The function could not be operate when booting from flash.
+* In order to print the array we use the printreg command.
+******************************************************************************/
+/* #define REG_DEBUG */
+#if defined(REG_DEBUG)
+extern int reg_arry[2048][2];
+extern int reg_arry_index;
+#endif
+
+/* Marvell controller register read/write macros */
+#define MV_REG_VALUE(offset)          \
+                (MV_MEMIO32_READ((INTER_REGS_BASE | (offset))))
+
+#define MV_REG_READ(offset)             \
+        (MV_MEMIO_LE32_READ(INTER_REGS_BASE | (offset)))
+
+#if defined(REG_DEBUG)
+#define MV_REG_WRITE(offset, val)    \
+        MV_MEMIO_LE32_WRITE((INTER_REGS_BASE | (offset)), (val)); \
+        { \
+                reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+                reg_arry[reg_arry_index][1] = (val);\
+                reg_arry_index++;\
+        }
+#else
+#define MV_REG_WRITE(offset, val)    \
+        MV_MEMIO_LE32_WRITE((INTER_REGS_BASE | (offset)), (val));
+#endif
+
+#define MV_REG_BYTE_READ(offset)        \
+        (MV_MEMIO8_READ((INTER_REGS_BASE | (offset))))
+
+#if defined(REG_DEBUG)
+#define MV_REG_BYTE_WRITE(offset, val)  \
+        MV_MEMIO8_WRITE((INTER_REGS_BASE | (offset)), (val)); \
+        { \
+                reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+                reg_arry[reg_arry_index][1] = (val);\
+                reg_arry_index++;\
+        }
+#else
+#define MV_REG_BYTE_WRITE(offset, val)  \
+        MV_MEMIO8_WRITE((INTER_REGS_BASE | (offset)), (val))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_SET(offset, bitMask)                 \
+        (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+         (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) | \
+          MV_32BIT_LE_FAST(bitMask)))); \
+        { \
+                reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+                reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)));\
+                reg_arry_index++;\
+        }
+#else
+#define MV_REG_BIT_SET(offset, bitMask)                 \
+        (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+         (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) | \
+          MV_32BIT_LE_FAST(bitMask))))
+#endif
+
+#if defined(REG_DEBUG)
+#define MV_REG_BIT_RESET(offset,bitMask)                \
+        (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+         (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) & \
+          MV_32BIT_LE_FAST(~bitMask)))); \
+        { \
+                reg_arry[reg_arry_index][0] = (INTER_REGS_BASE | (offset));\
+                reg_arry[reg_arry_index][1] = (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)));\
+                reg_arry_index++;\
+        }
+#else
+#define MV_REG_BIT_RESET(offset,bitMask)                \
+        (MV_MEMIO32_WRITE((INTER_REGS_BASE | (offset)), \
+         (MV_MEMIO32_READ(INTER_REGS_BASE | (offset)) & \
+          MV_32BIT_LE_FAST(~bitMask))))
+#endif
+
+
+
+/* ARM architecture APIs */
+MV_U32  mvOsCpuRevGet (MV_VOID);
+MV_U32  mvOsCpuPartGet (MV_VOID);
+MV_U32  mvOsCpuArchGet (MV_VOID);
+MV_U32  mvOsCpuVarGet (MV_VOID);
+MV_U32  mvOsCpuAsciiGet (MV_VOID);
+
+/*  Other APIs  */
+void* mvOsIoCachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr, MV_U32 *memHandle);
+void* mvOsIoUncachedMalloc( void* osHandle, MV_U32 size, MV_ULONG* pPhyAddr, MV_U32 *memHandle );
+void mvOsIoUncachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr, MV_U32 memHandle );
+void mvOsIoCachedFree( void* osHandle, MV_U32 size, MV_ULONG phyAddr, void* pVirtAddr, MV_U32 memHandle );
+int mvOsRand(void);
+
+#endif /* _MV_OS_LNX_H_ */
diff --git a/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h
new file mode 100644
index 000000000000..170481aad0a5
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/linux_oss/mvOsSata.h
@@ -0,0 +1,158 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+/*******************************************************************************
+* mvOsLinux.h - O.S. interface header file for Linux
+*
+* DESCRIPTION:
+*       This header file contains OS dependent definition under Linux
+*
+* DEPENDENCIES:
+*       Linux kernel header files.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1.1 $
+*******************************************************************************/
+
+#ifndef __INCmvOsLinuxh
+#define __INCmvOsLinuxh
+
+/* Includes */
+#include <linux/autoconf.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/pci.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include "mvOs.h"
+
+
+/* Definitions */
+#define MV_DEFAULT_QUEUE_DEPTH 2
+#define MV_SATA_SUPPORT_EDMA_SINGLE_DATA_REGION
+#define MV_SATA_SUPPORT_GEN2E_128_QUEUE_LEN
+
+#ifdef CONFIG_MV88F6082
+ #define MV_SATA_OVERRIDE_SW_QUEUE_SIZE
+ #define MV_SATA_REQUESTED_SW_QUEUE_SIZE 2
+ #undef MV_SATA_SUPPORT_GEN2E_128_QUEUE_LEN
+#endif
+
+/* System dependent macro for flushing CPU write cache */
+#if defined (MV_BRIDGE_SYNC_REORDER)
+#define MV_CPU_WRITE_BUFFER_FLUSH()	do {	\
+						wmb();	\
+						mvOsBridgeReorderWA();	\
+					} while (0)
+#else
+#define MV_CPU_WRITE_BUFFER_FLUSH()     wmb()
+#endif /* CONFIG_MV78XX0 */
+
+/* System dependent little endian from / to CPU conversions */
+#define MV_CPU_TO_LE16(x)   cpu_to_le16(x)
+#define MV_CPU_TO_LE32(x)   cpu_to_le32(x)
+
+#define MV_LE16_TO_CPU(x)   le16_to_cpu(x)
+#define MV_LE32_TO_CPU(x)   le32_to_cpu(x)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define MV_BIG_ENDIAN_BITFIELD
+#endif
+
+/* System dependent register read / write in byte/word/dword variants */
+#define MV_REG_WRITE_BYTE(base, offset, val)    writeb(val, base + offset)
+#define MV_REG_WRITE_WORD(base, offset, val)    writew(val, base + offset)
+#define MV_REG_WRITE_DWORD(base, offset, val)   writel(val, base + offset)
+#define MV_REG_READ_BYTE(base, offset)          readb(base + offset)
+#define MV_REG_READ_WORD(base, offset)          readw(base + offset)
+#define MV_REG_READ_DWORD(base, offset)         readl(base + offset)
+
+
+/* Typedefs    */
+
+/* System dependant typedefs */
+typedef void            *MV_VOID_PTR;
+typedef u32             *MV_U32_PTR;
+typedef u16             *MV_U16_PTR;
+typedef u8              *MV_U8_PTR;
+typedef char            *MV_CHAR_PTR;
+typedef void            *MV_BUS_ADDR_T;
+typedef unsigned long   MV_CPU_FLAGS;
+
+
+/* Structures  */
+/* System dependent structure */
+typedef struct mvOsSemaphore
+{
+  int notUsed;
+} MV_OS_SEMAPHORE;
+
+
+/* Functions (User implemented)*/
+
+/* Semaphore init, take and release */
+#define mvOsSemInit(x) MV_TRUE
+#define mvOsSemTake(x)
+#define mvOsSemRelease(x)
+
+/* Interrupt masking and unmasking functions */
+MV_CPU_FLAGS mvOsSaveFlagsAndMaskCPUInterrupts(MV_VOID);
+MV_VOID      mvOsRestoreFlags(MV_CPU_FLAGS);
+
+/* Delay function in micro seconds resolution */
+void mvMicroSecondsDelay(MV_VOID_PTR, MV_U32);
+
+/* Typedefs    */
+typedef enum mvBoolean
+{
+    MV_SFALSE, MV_STRUE
+} MV_BOOLEAN;
+
+/* System logging function */
+#include "mvLog.h"
+/* Enable READ/WRITE Long SCSI command only when driver is compiled for debugging */
+#ifdef MV_LOGGER
+#define MV_SATA_SUPPORT_READ_WRITE_LONG
+#endif
+
+#define MV_IAL_LOG_ID       3
+
+#endif /* __INCmvOsLinuxh */
diff --git a/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h b/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h
new file mode 100644
index 000000000000..b02bbcbb30e3
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mvSysHwConfig.h
@@ -0,0 +1,374 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvSysHwCfg.h - Marvell system HW configuration file
+*
+* DESCRIPTION:
+*       None.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#ifndef __INCmvSysHwConfigh
+#define __INCmvSysHwConfigh
+
+#include "../../../../include/linux/autoconf.h"
+
+#define CONFIG_MARVELL	1
+
+/* includes */
+#define _1K         0x00000400
+#define _4K         0x00001000
+#define _8K         0x00002000
+#define _16K        0x00004000
+#define _32K        0x00008000
+#define _64K        0x00010000
+#define _128K       0x00020000
+#define _256K       0x00040000
+#define _512K       0x00080000
+
+#define _1M         0x00100000
+#define _2M         0x00200000
+#define _4M         0x00400000
+#define _8M         0x00800000
+#define _16M        0x01000000
+#define _32M        0x02000000
+#define _64M        0x04000000
+#define _128M       0x08000000
+#define _256M       0x10000000
+#define _512M       0x20000000
+
+#define _1G         0x40000000
+#define _2G         0x80000000
+
+/****************************************/
+/* Soc supporeted Units definitions	*/
+/****************************************/
+
+#ifdef CONFIG_MV_INCLUDE_PEX
+#define MV_INCLUDE_PEX
+#endif
+#ifdef CONFIG_MV_INCLUDE_TWSI
+#define MV_INCLUDE_TWSI
+#endif
+#ifdef CONFIG_MV_INCLUDE_CESA
+#define MV_INCLUDE_CESA
+#endif
+#ifdef CONFIG_MV_INCLUDE_GIG_ETH
+#define MV_INCLUDE_GIG_ETH
+#endif
+#ifdef CONFIG_MV_INCLUDE_INTEG_SATA
+#define MV_INCLUDE_INTEG_SATA
+#define MV_INCLUDE_SATA
+#endif
+#ifdef CONFIG_MV_INCLUDE_USB
+#define MV_INCLUDE_USB
+#define MV_USB_VOLTAGE_FIX
+#endif
+#ifdef CONFIG_MV_INCLUDE_NAND
+#define MV_INCLUDE_NAND
+#endif
+#ifdef CONFIG_MV_INCLUDE_TDM
+#define MV_INCLUDE_TDM
+#endif
+#ifdef CONFIG_MV_INCLUDE_XOR
+#define MV_INCLUDE_XOR
+#endif
+#ifdef CONFIG_MV_INCLUDE_TWSI
+#define MV_INCLUDE_TWSI
+#endif
+#ifdef CONFIG_MV_INCLUDE_UART
+#define MV_INCLUDE_UART
+#endif
+#ifdef CONFIG_MV_INCLUDE_SPI
+#define MV_INCLUDE_SPI
+#endif
+#ifdef CONFIG_MV_INCLUDE_SFLASH_MTD
+#define MV_INCLUDE_SFLASH_MTD
+#endif
+#ifdef CONFIG_MV_INCLUDE_AUDIO
+#define MV_INCLUDE_AUDIO
+#endif
+#ifdef CONFIG_MV_INCLUDE_TS
+#define MV_INCLUDE_TS
+#endif
+#ifdef CONFIG_MV_INCLUDE_SDIO
+#define MV_INCLUDE_SDIO
+#endif
+
+
+/* NAND flash stuff */
+#ifdef CONFIG_MV_NAND_BOOT
+#define MV_NAND_BOOT
+#endif
+#ifdef CONFIG_MV_NAND
+#define MV_NAND
+#endif
+
+/* SPI flash stuff */
+#ifdef CONFIG_MV_SPI_BOOT
+#define MV_SPI_BOOT
+#endif
+
+
+/****************************************************************/
+/************* General    configuration ********************/
+/****************************************************************/
+
+/* Enable Clock Power Control */
+#define MV_INCLUDE_CLK_PWR_CNTRL
+
+/* Disable the DEVICE BAR in the PEX */
+#define MV_DISABLE_PEX_DEVICE_BAR
+
+/* Allow the usage of early printings during initialization */
+#define MV_INCLUDE_EARLY_PRINTK
+
+/****************************************************************/
+/************* NFP configuration ********************************/
+/****************************************************************/
+#define MV_NFP_SEC_Q_SIZE		64
+#define MV_NFP_SEC_REQ_Q_SIZE		1000
+
+
+
+/****************************************************************/
+/************* CESA configuration ********************/
+/****************************************************************/
+
+#ifdef MV_INCLUDE_CESA
+
+#define MV_CESA_MAX_CHAN               4
+
+/* Use 2K of SRAM */
+#define MV_CESA_MAX_BUF_SIZE           1600
+
+#endif /* MV_INCLUDE_CESA */
+
+#if defined(CONFIG_MV_INCLUDE_GIG_ETH)
+
+#ifdef CONFIG_MV_NFP_STATS
+#define MV_FP_STATISTICS
+#else
+#undef MV_FP_STATISTICS
+#endif
+/* Default configuration for SKB_REUSE: 0 - Disabled, 1 - Enabled */
+#define MV_ETH_SKB_REUSE_DEFAULT    1
+/* Default configuration for TX_EN workaround: 0 - Disabled, 1 - Enabled */
+#define MV_ETH_TX_EN_DEFAULT        0
+
+/* un-comment if you want to perform tx_done from within the poll function */
+/* #define ETH_TX_DONE_ISR */
+
+/* put descriptors in uncached memory */
+/* #define ETH_DESCR_UNCACHED */
+
+/* Descriptors location: DRAM/internal-SRAM */
+#define ETH_DESCR_IN_SDRAM
+#undef  ETH_DESCR_IN_SRAM    /* No integrated SRAM in 88Fxx81 devices */
+
+#if defined(ETH_DESCR_IN_SRAM)
+#if defined(ETH_DESCR_UNCACHED)
+ #define ETH_DESCR_CONFIG_STR    "Uncached descriptors in integrated SRAM"
+#else
+ #define ETH_DESCR_CONFIG_STR    "Cached descriptors in integrated SRAM"
+#endif
+#elif defined(ETH_DESCR_IN_SDRAM)
+#if defined(ETH_DESCR_UNCACHED)
+ #define ETH_DESCR_CONFIG_STR    "Uncached descriptors in DRAM"
+#else
+ #define ETH_DESCR_CONFIG_STR    "Cached descriptors in DRAM"
+#endif
+#else
+ #error "Ethernet descriptors location undefined"
+#endif /* ETH_DESCR_IN_SRAM or ETH_DESCR_IN_SDRAM*/
+
+/* SW Sync-Barrier: not relevant for 88fxx81*/
+/* Reasnable to define this macro when descriptors in SRAM and buffers in DRAM */
+/* In RX the CPU theoretically might see himself as the descriptor owner,      */
+/* although the buffer hadn't been written to DRAM yet. Performance cost.      */
+/* #define INCLUDE_SYNC_BARR */
+
+/* Buffers cache coherency method (buffers in DRAM) */
+#ifndef MV_CACHE_COHER_SW
+/* Taken from mvCommon.h */
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED             0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT    1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB    2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW       3
+
+#endif
+
+/* DRAM cache coherency configuration */
+#define MV_CACHE_COHERENCY  MV_CACHE_COHER_SW
+
+
+#define ETHER_DRAM_COHER    MV_CACHE_COHER_SW   /* No HW coherency in 88Fxx81 devices */
+
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WB)
+ #define ETH_SDRAM_CONFIG_STR    "DRAM HW cache coherency (write-back)"
+#elif (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WT)
+ #define ETH_SDRAM_CONFIG_STR    "DRAM HW cache coherency (write-through)"
+#elif (ETHER_DRAM_COHER == MV_CACHE_COHER_SW)
+ #define ETH_SDRAM_CONFIG_STR    "DRAM SW cache-coherency"
+#elif (ETHER_DRAM_COHER == MV_UNCACHED)
+#   define ETH_SDRAM_CONFIG_STR  "DRAM uncached"
+#else
+ #error "Ethernet-DRAM undefined"
+#endif /* ETHER_DRAM_COHER */
+
+
+/****************************************************************/
+/************* Ethernet driver configuration ********************/
+/****************************************************************/
+
+/* port's default queueus */
+#define ETH_DEF_TXQ         0
+#define ETH_DEF_RXQ         0
+
+#define MV_ETH_RX_Q_NUM     CONFIG_MV_ETH_RX_Q_NUM
+#define MV_ETH_TX_Q_NUM     CONFIG_MV_ETH_TX_Q_NUM
+
+/* interrupt coalescing setting */
+#define ETH_TX_COAL    		    200
+#define ETH_RX_COAL    		    200
+
+/* Checksum offloading */
+#define TX_CSUM_OFFLOAD
+#define RX_CSUM_OFFLOAD
+
+#endif /* CONFIG_MV_INCLUDE_GIG_ETH */
+
+/****************************************************************/
+/*************** Telephony configuration ************************/
+/****************************************************************/
+#if defined(CONFIG_MV_TDM_LINEAR_MODE)
+ #define MV_TDM_LINEAR_MODE
+#elif defined(CONFIG_MV_TDM_ULAW_MODE)
+ #define MV_TDM_ULAW_MODE
+#endif
+
+#if defined(CONFIG_MV_TDM_5CHANNELS)
+ #define MV_TDM_5CHANNELS
+#endif
+
+#if defined(CONFIG_MV_TDM_USE_EXTERNAL_PCLK_SOURCE)
+ #define MV_TDM_USE_EXTERNAL_PCLK_SOURCE
+#endif
+
+/* We use the following registers to store DRAM interface pre configuration   */
+/* auto-detection results													  */
+/* IMPORTANT: We are using mask register for that purpose. Before writing     */
+/* to units mask register, make sure main maks register is set to disable     */
+/* all interrupts.                                                            */
+#define DRAM_BUF_REG0   0x30810 /* sdram bank 0 size            */
+#define DRAM_BUF_REG1   0x30820 /* sdram config                 */
+#define DRAM_BUF_REG2   0x30830 /* sdram mode                   */
+#define DRAM_BUF_REG3   0x308c4 /* dunit control low            */
+#define DRAM_BUF_REG4   0x60a90 /* sdram address control        */
+#define DRAM_BUF_REG5   0x60a94 /* sdram timing control low     */
+#define DRAM_BUF_REG6   0x60a98 /* sdram timing control high    */
+#define DRAM_BUF_REG7   0x60a9c /* sdram ODT control low        */
+#define DRAM_BUF_REG8   0x60b90 /* sdram ODT control high       */
+#define DRAM_BUF_REG9   0x60b94 /* sdram Dunit ODT control      */
+#define DRAM_BUF_REG10  0x60b98 /* sdram Extended Mode          */
+#define DRAM_BUF_REG11  0x60b9c /* sdram Ddr2 Time Low Reg      */
+#define DRAM_BUF_REG12  0x60a00 /* sdram Ddr2 Time High Reg     */
+#define DRAM_BUF_REG13  0x60a04 /* dunit Ctrl High              */
+#define DRAM_BUF_REG14  0x60b00 /* sdram second DIMM exist      */
+
+/* Following the pre-configuration registers default values restored after    */
+/* auto-detection is done                                                     */
+#define DRAM_BUF_REG_DV 0
+
+/* System Mapping */
+#define SDRAM_CS0_BASE  0x00000000
+#define SDRAM_CS0_SIZE  _256M
+
+#define SDRAM_CS1_BASE  0x10000000
+#define SDRAM_CS1_SIZE  _256M
+
+#define SDRAM_CS2_BASE  0x20000000
+#define SDRAM_CS2_SIZE  _256M
+
+#define SDRAM_CS3_BASE  0x30000000
+#define SDRAM_CS3_SIZE  _256M
+
+/* PEX */
+#define PEX0_MEM_BASE 0xe8000000
+#define PEX0_MEM_SIZE _128M
+
+#define PEX0_IO_BASE 0xf2000000
+#define PEX0_IO_SIZE _1M
+
+/* Device Chip Selects */
+#define NFLASH_CS_BASE 0xfa000000
+#define NFLASH_CS_SIZE _2M
+
+#define SPI_CS_BASE 0xf4000000
+#define SPI_CS_SIZE _16M
+
+#define CRYPT_ENG_BASE	0xf0000000
+#define CRYPT_ENG_SIZE	_2M
+
+#define BOOTDEV_CS_BASE	0xff800000
+#define BOOTDEV_CS_SIZE _8M
+
+/* CS2 - BOOTROM */
+#define DEVICE_CS2_BASE 0xff900000
+#define DEVICE_CS2_SIZE _1M
+
+/* PEX Work arround */
+/* the target we will use for the workarround */
+#define PEX_CONFIG_RW_WA_TARGET PEX0_MEM
+/*a flag that indicates if we are going to use the
+size and base of the target we using for the workarround
+window */
+#define PEX_CONFIG_RW_WA_USE_ORIGINAL_WIN_VALUES 1
+/* if the above flag is 0 then the following values
+will be used for the workarround window base and size,
+otherwise the following defines will be ignored */
+#define PEX_CONFIG_RW_WA_BASE 0xF3000000
+#define PEX_CONFIG_RW_WA_SIZE _16M
+
+/* Internal registers: size is defined in Controllerenvironment */
+#define INTER_REGS_BASE	0xFEE00000
+
+/* DRAM detection stuff */
+#define MV_DRAM_AUTO_SIZE
+
+/* Board clock detection */
+#define TCLK_AUTO_DETECT    	/* Use Tclk auto detection   */
+#define SYSCLK_AUTO_DETECT	/* Use SysClk auto detection */
+#define PCLCK_AUTO_DETECT  	/* Use PClk auto detection   */
+#define L2CLK_AUTO_DETECT 	/* Use L2Clk auto detection   */
+
+/* PEX-PCI\PCI-PCI Bridge*/
+#define PCI0_IF_PTP		0		/* Bridge exist on pciIf0*/
+
+
+
+#endif /* __INCmvSysHwConfigh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c
new file mode 100644
index 000000000000..69d8d6be2f11
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.c
@@ -0,0 +1,375 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCntmr.h"
+#include "cpu/mvCpu.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+extern unsigned int whoAmI(void);
+
+/*******************************************************************************
+* mvCntmrLoad -
+*
+* DESCRIPTION:
+*       Load an init Value to a given counter/timer
+*
+* INPUT:
+*       countNum - counter number
+*       value - value to be loaded
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrLoad(MV_U32 countNum, MV_U32 value)
+{
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+
+		mvOsPrintf(("mvCntmrLoad: Err. Illigal counter number \n"));
+		return MV_BAD_PARAM;;
+
+	}
+
+	MV_REG_WRITE(CNTMR_RELOAD_REG(countNum),value);
+	MV_REG_WRITE(CNTMR_VAL_REG(countNum),value);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrRead -
+*
+* DESCRIPTION:
+*  	Returns the value of the given Counter/Timer
+*
+* INPUT:
+*       countNum - counter number
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_U32 counter value
+*******************************************************************************/
+MV_U32 mvCntmrRead(MV_U32 countNum)
+{
+	return MV_REG_READ(CNTMR_VAL_REG(countNum));
+}
+
+/*******************************************************************************
+* mvCntmrWrite -
+*
+* DESCRIPTION:
+*  	Returns the value of the given Counter/Timer
+*
+* INPUT:
+*       countNum - counter number
+*		countVal - value to write
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None
+*******************************************************************************/
+void mvCntmrWrite(MV_U32 countNum,MV_U32 countVal)
+{
+	MV_REG_WRITE(CNTMR_VAL_REG(countNum),countVal);
+}
+
+/*******************************************************************************
+* mvCntmrCtrlSet -
+*
+* DESCRIPTION:
+*  	Set the Control to a given counter/timer
+*
+* INPUT:
+*       countNum - counter number
+*		pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrCtrlSet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl)
+{
+	MV_U32 cntmrCtrl;
+
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+
+		DB(mvOsPrintf(("mvCntmrCtrlSet: Err. Illigal counter number \n")));
+		return MV_BAD_PARAM;;
+
+	}
+
+	/* read control register */
+	cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+
+	if (pCtrl->enable)	/* enable counter\timer */
+	{
+		cntmrCtrl |= CTCR_ARM_TIMER_EN(countNum);
+	}
+	else	/* disable counter\timer */
+	{
+		cntmrCtrl &= ~CTCR_ARM_TIMER_EN(countNum);
+	}
+
+	if ( pCtrl->autoEnable ) /* Auto mode */
+	{
+		cntmrCtrl |= CTCR_ARM_TIMER_AUTO_EN(countNum);
+
+	}
+	else 	/* no auto mode */
+	{
+		cntmrCtrl &= ~CTCR_ARM_TIMER_AUTO_EN(countNum);
+	}
+
+	MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCntmrCtrlGet -
+*
+* DESCRIPTION:
+*  	Get the Control value of a given counter/timer
+*
+* INPUT:
+*       countNum - counter number
+*		pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+*       Counter\Timer control value
+*
+* RETURN:
+*       MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrCtrlGet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl)
+{
+	MV_U32 cntmrCtrl;
+
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+		DB(mvOsPrintf(("mvCntmrCtrlGet: Err. Illigal counter number \n")));
+		return MV_BAD_PARAM;;
+	}
+
+	/* read control register */
+	cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+	/* enable counter\timer */
+	if (cntmrCtrl & CTCR_ARM_TIMER_EN(countNum))
+	{
+		pCtrl->enable = MV_TRUE;
+	}
+	else
+	{
+		pCtrl->enable = MV_FALSE;
+	}
+
+	/* counter mode */
+	if (cntmrCtrl & CTCR_ARM_TIMER_AUTO_EN(countNum))
+	{
+		pCtrl->autoEnable = MV_TRUE;
+	}
+	else
+	{
+		pCtrl->autoEnable = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrEnable -
+*
+* DESCRIPTION:
+*  	Set the Enable-Bit to logic '1' ==> starting the counter
+*
+* INPUT:
+*       countNum - counter number
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrEnable(MV_U32 countNum)
+{
+	MV_U32 cntmrCtrl;
+
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+
+		DB(mvOsPrintf(("mvCntmrEnable: Err. Illigal counter number \n")));
+		return MV_BAD_PARAM;;
+
+	}
+
+	/* read control register */
+	cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+	/* enable counter\timer */
+	cntmrCtrl |= CTCR_ARM_TIMER_EN(countNum);
+
+
+	MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrDisable -
+*
+* DESCRIPTION:
+*  	Stop the counter/timer running, and returns its Value
+*
+* INPUT:
+*       countNum - counter number
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_U32 counter\timer value
+*******************************************************************************/
+MV_STATUS mvCntmrDisable(MV_U32 countNum)
+{
+	MV_U32 cntmrCtrl;
+
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+
+		DB(mvOsPrintf(("mvCntmrDisable: Err. Illigal counter number \n")));
+		return MV_BAD_PARAM;;
+
+	}
+
+	/* read control register */
+	cntmrCtrl = MV_REG_READ(CNTMR_CTRL_REG);
+
+	/* disable counter\timer */
+	cntmrCtrl &= ~CTCR_ARM_TIMER_EN(countNum);
+
+	MV_REG_WRITE(CNTMR_CTRL_REG,cntmrCtrl);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCntmrStart -
+*
+* DESCRIPTION:
+*  	Combined all the sub-operations above to one function: Load,setMode,Enable
+*
+* INPUT:
+*       countNum - counter number
+*		value - value of the counter\timer to be set
+*		pCtrl - pointer to MV_CNTMR_CTRL structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM on bad parameters , MV_ERROR on error ,MV_OK on sucess
+*******************************************************************************/
+MV_STATUS mvCntmrStart(MV_U32 countNum, MV_U32 value,
+                       MV_CNTMR_CTRL *pCtrl)
+{
+
+	if (countNum >= MV_CNTMR_MAX_COUNTER )
+	{
+
+		mvOsPrintf(("mvCntmrDisable: Err. Illigal counter number \n"));
+		return MV_BAD_PARAM;;
+
+	}
+
+	/* load value onto counter\timer */
+	mvCntmrLoad(countNum,value);
+
+	/* set the counter to load in the first time */
+	mvCntmrWrite(countNum,value);
+
+	/* set control for timer \ cunter and enable */
+	mvCntmrCtrlSet(countNum,pCtrl);
+
+	return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h
new file mode 100644
index 000000000000..7a2b9251eee4
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmr.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTmrWtdgh
+#define __INCmvTmrWtdgh
+
+/* includes */
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "cntmr/mvCntmrRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+
+/* This enumerator describe counters\watchdog numbers       */
+typedef enum _mvCntmrID
+{
+	TIMER0 = 0,
+	TIMER1,
+	WATCHDOG,
+	TIMER2,
+	TIMER3,
+}MV_CNTMR_ID;
+
+
+/* Counter / Timer control structure */
+typedef struct _mvCntmrCtrl
+{
+	MV_BOOL 				enable;		/* enable */
+	MV_BOOL					autoEnable;	/* counter/Timer                    */
+}MV_CNTMR_CTRL;
+
+
+/* Functions */
+
+/* Load an init Value to a given counter/timer */
+MV_STATUS mvCntmrLoad(MV_U32 countNum, MV_U32 value);
+
+/* Returns the value of the given Counter/Timer */
+MV_U32 mvCntmrRead(MV_U32 countNum);
+
+/* Write a value of the given Counter/Timer */
+void mvCntmrWrite(MV_U32 countNum,MV_U32 countVal);
+
+/* Set the Control to a given counter/timer */
+MV_STATUS mvCntmrCtrlSet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl);
+
+/* Get the value of a given counter/timer */
+MV_STATUS mvCntmrCtrlGet(MV_U32 countNum, MV_CNTMR_CTRL *pCtrl);
+
+/* Set the Enable-Bit to logic '1' ==> starting the counter. */
+MV_STATUS mvCntmrEnable(MV_U32 countNum);
+
+/* Stop the counter/timer running, and returns its Value. */
+MV_STATUS mvCntmrDisable(MV_U32 countNum);
+
+/* Combined all the sub-operations above to one function: Load,setMode,Enable */
+MV_STATUS mvCntmrStart(MV_U32 countNum, MV_U32 value,
+                       MV_CNTMR_CTRL *pCtrl);
+
+#endif /* __INCmvTmrWtdgh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h
new file mode 100644
index 000000000000..1cd904168674
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cntmr/mvCntmrRegs.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTmrwtdgRegsh
+#define __INCmvTmrwtdgRegsh
+
+/*******************************************/
+/* ARM Timers Registers Map                */
+/*******************************************/
+
+#define CNTMR_RELOAD_REG(tmrNum)	(CNTMR_BASE + 0x10 + (tmrNum)*8 + \
+						(((tmrNum) <= 3)?0:8))
+#define CNTMR_VAL_REG(tmrNum)		(CNTMR_BASE + 0x14 + (tmrNum)*8 + \
+						(((tmrNum) <= 3)?0:8))
+#define CNTMR_CTRL_REG			(CNTMR_BASE)
+
+/*For MV78XX0*/
+#define CNTMR_CAUSE_REG		(CPU_AHB_MBUS_CAUSE_INT_REG(whoAmI()))
+#define CNTMR_MASK_REG		(CPU_AHB_MBUS_MASK_INT_REG(whoAmI()))
+
+/* ARM Timers Registers Map                */
+/*******************************************/
+
+
+/* ARM Timers Control Register */
+/* CPU_TIMERS_CTRL_REG (CTCR) */
+
+#define TIMER0_NUM				0
+#define TIMER1_NUM				1
+#define WATCHDOG_NUM			2
+#define TIMER2_NUM				3
+#define TIMER3_NUM				4
+
+#define CTCR_ARM_TIMER_EN_OFFS(cntr)	(cntr * 2)
+#define CTCR_ARM_TIMER_EN_MASK(cntr)	(1 << CTCR_ARM_TIMER_EN_OFFS)
+#define CTCR_ARM_TIMER_EN(cntr)			(1 << CTCR_ARM_TIMER_EN_OFFS(cntr))
+#define CTCR_ARM_TIMER_DIS(cntr)		(0 << CTCR_ARM_TIMER_EN_OFFS(cntr))
+
+#define CTCR_ARM_TIMER_AUTO_OFFS(cntr)	((cntr * 2) + 1)
+#define CTCR_ARM_TIMER_AUTO_MASK(cntr)	BIT1
+#define CTCR_ARM_TIMER_AUTO_EN(cntr)	(1 << CTCR_ARM_TIMER_AUTO_OFFS(cntr))
+#define CTCR_ARM_TIMER_AUTO_DIS(cntr)	(0 << CTCR_ARM_TIMER_AUTO_OFFS(cntr))
+
+
+/* ARM Timer\Watchdog Reload Register */
+/* CNTMR_RELOAD_REG (TRR) */
+
+#define TRG_ARM_TIMER_REL_OFFS			0
+#define TRG_ARM_TIMER_REL_MASK			0xffffffff
+
+/* ARM Timer\Watchdog Register */
+/* CNTMR_VAL_REG (TVRG) */
+
+#define TVR_ARM_TIMER_OFFS			0
+#define TVR_ARM_TIMER_MASK			0xffffffff
+#define TVR_ARM_TIMER_MAX			0xffffffff
+
+
+
+#endif /* __INCmvTmrwtdgRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c
new file mode 100644
index 000000000000..03d6d0936fca
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.c
@@ -0,0 +1,207 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "mvOs.h"
+#include "mvCpuCntrs.h"
+
+
+const static MV_CPU_CNTRS_OPS  mvCpuCntrsOpsTbl[MV_CPU_CNTRS_NUM][MV_CPU_CNTRS_OPS_NUM] =
+{
+    /*0*/
+    {
+        MV_CPU_CNTRS_CYCLES,            MV_CPU_CNTRS_DCACHE_READ_HIT,       MV_CPU_CNTRS_DCACHE_READ_MISS,
+        MV_CPU_CNTRS_DCACHE_WRITE_HIT,  MV_CPU_CNTRS_DCACHE_WRITE_MISS,     MV_CPU_CNTRS_INSTRUCTIONS,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_MMU_READ_LATENCY,  MV_CPU_CNTRS_ICACHE_READ_LATENCY,   MV_CPU_CNTRS_WB_WRITE_LATENCY,
+        MV_CPU_CNTRS_LDM_STM_HOLD,      MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_DATA_WRITE_ACCESS, MV_CPU_CNTRS_DATA_READ_ACCESS,      MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_BRANCH_PREDICT_COUNT,
+    },
+    /*1*/
+    {
+        MV_CPU_CNTRS_CYCLES,            MV_CPU_CNTRS_ICACHE_READ_MISS,      MV_CPU_CNTRS_DCACHE_READ_MISS,
+        MV_CPU_CNTRS_DCACHE_WRITE_MISS, MV_CPU_CNTRS_ITLB_MISS,             MV_CPU_CNTRS_SINGLE_ISSUE,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_BRANCH_RETIRED,        MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_MMU_READ_BEAT,     MV_CPU_CNTRS_ICACHE_READ_LATENCY,   MV_CPU_CNTRS_WB_WRITE_BEAT,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_IS_HOLD,               MV_CPU_CNTRS_DATA_READ_ACCESS,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_INVALID,
+    },
+    /*2*/
+    {
+        MV_CPU_CNTRS_CYCLES,            MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_DCACHE_ACCESS,
+        MV_CPU_CNTRS_DTLB_MISS,         MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_BRANCH_PREDICT_MISS,   MV_CPU_CNTRS_WB_WRITE_BEAT,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_DCACHE_READ_LATENCY,   MV_CPU_CNTRS_DCACHE_WRITE_LATENCY,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_BIU_SIMULT_ACCESS,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_INVALID,
+    },
+    /*3*/
+    {
+        MV_CPU_CNTRS_CYCLES,            MV_CPU_CNTRS_DCACHE_READ_MISS,      MV_CPU_CNTRS_DCACHE_WRITE_MISS,
+        MV_CPU_CNTRS_TLB_MISS,          MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_INVALID,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_BRANCH_TAKEN,          MV_CPU_CNTRS_WB_FULL_CYCLES,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_DCACHE_READ_BEAT,      MV_CPU_CNTRS_DCACHE_WRITE_BEAT,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_BIU_ANY_ACCESS,
+        MV_CPU_CNTRS_INVALID,           MV_CPU_CNTRS_INVALID,               MV_CPU_CNTRS_DATA_WRITE_ACCESS,
+        MV_CPU_CNTRS_INVALID,
+    }
+};
+
+MV_CPU_CNTRS_ENTRY  mvCpuCntrsTbl[MV_CPU_CNTRS_NUM];
+
+MV_CPU_CNTRS_EVENT*   mvCpuCntrsEventTbl[128];
+
+void mvCpuCntrsReset(void)
+{
+    MV_U32 reg = 0;
+
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 0" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 1" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 2" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 3" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 4" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 5" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 6" : : "r" (reg));
+    MV_ASM ("mcr p15, 0, %0, c15, c13, 7" : : "r" (reg));
+}
+
+void program_counter(int counter, int op)
+{
+    MV_U32 reg =  (1 << op) | 0x1; /*enable*/
+
+    switch(counter)
+    {
+        case 0:
+         __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 0" : : "r" (reg));
+         return;
+
+        case 1:
+         __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 1" : : "r" (reg));
+         return;
+
+        case 2:
+         __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 2" : : "r" (reg));
+         return;
+
+        case 3:
+         __asm__ __volatile__ ("mcr p15, 0, %0, c15, c12, 3" : : "r" (reg));
+         return;
+
+        default:
+            mvOsPrintf("error in program_counter: bad counter number (%d)\n", counter);
+    }
+    return;
+}
+
+void mvCpuCntrsEventClear(MV_CPU_CNTRS_EVENT* pEvent)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+    {
+        pEvent->counters_sum[i] = 0;
+    }
+    pEvent->num_of_measurements = 0;
+}
+
+
+MV_CPU_CNTRS_EVENT* mvCpuCntrsEventCreate(char* name, MV_U32 print_threshold)
+{
+    int                     i;
+    MV_CPU_CNTRS_EVENT*     event = mvOsMalloc(sizeof(MV_CPU_CNTRS_EVENT));
+
+    if(event)
+    {
+        strncpy(event->name, name, sizeof(event->name));
+        event->num_of_measurements = 0;
+        event->avg_sample_count = print_threshold;
+        for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+        {
+            event->counters_before[i] = 0;
+            event->counters_after[i] = 0;
+            event->counters_sum[i] = 0;
+        }
+    }
+    return event;
+}
+
+void    mvCpuCntrsEventDelete(MV_CPU_CNTRS_EVENT* event)
+{
+    if(event != NULL)
+        mvOsFree(event);
+}
+
+
+MV_STATUS   mvCpuCntrsProgram(int counter, MV_CPU_CNTRS_OPS op,
+                                 char* name, MV_U32 overhead)
+{
+    int     i;
+
+    /* Find required operations */
+    for(i=0; i<MV_CPU_CNTRS_OPS_NUM; i++)
+    {
+        if( mvCpuCntrsOpsTbl[counter][i] == op)
+        {
+            strncpy(mvCpuCntrsTbl[counter].name, name, sizeof(mvCpuCntrsTbl[counter].name));
+            mvCpuCntrsTbl[counter].operation = op;
+            mvCpuCntrsTbl[counter].opIdx = i+1;
+            mvCpuCntrsTbl[counter].overhead = overhead;
+            program_counter(counter, mvCpuCntrsTbl[counter].opIdx);
+            mvOsPrintf("Counter=%d, opIdx=%d, overhead=%d\n",
+                        counter, mvCpuCntrsTbl[counter].opIdx, mvCpuCntrsTbl[counter].overhead);
+            return MV_OK;
+        }
+    }
+    return MV_NOT_FOUND;
+}
+
+void    mvCpuCntrsShow(MV_CPU_CNTRS_EVENT* pEvent)
+{
+    int     i;
+    MV_U64  counters_avg;
+
+    if(pEvent->num_of_measurements < pEvent->avg_sample_count)
+        return;
+
+    mvOsPrintf("%16s: ", pEvent->name);
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+    {
+        counters_avg = mvOsDivMod64(pEvent->counters_sum[i],
+                                  pEvent->num_of_measurements, NULL);
+        if(counters_avg >= mvCpuCntrsTbl[i].overhead)
+            counters_avg -= mvCpuCntrsTbl[i].overhead;
+        else
+            counters_avg = 0;
+
+        mvOsPrintf("%s=%5llu, ", mvCpuCntrsTbl[i].name, counters_avg);
+    }
+    mvOsPrintf("\n");
+    mvCpuCntrsEventClear(pEvent);
+    mvCpuCntrsReset();
+}
+
+void    mvCpuCntrsStatus(void)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+    {
+        mvOsPrintf("#%d: %s, overhead=%d\n",
+            i, mvCpuCntrsTbl[i].name, mvCpuCntrsTbl[i].overhead);
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h
new file mode 100644
index 000000000000..27e83c0e1d2a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuCntrs.h
@@ -0,0 +1,212 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mvCpuCntrs_h__
+#define __mvCpuCntrs_h__
+
+#include "mvTypes.h"
+#include "mvOs.h"
+
+
+#define MV_CPU_CNTRS_NUM            4
+#define MV_CPU_CNTRS_OPS_NUM        32
+
+typedef enum
+{
+    MV_CPU_CNTRS_INVALID = 0,
+    MV_CPU_CNTRS_CYCLES,
+    MV_CPU_CNTRS_ICACHE_READ_MISS,
+    MV_CPU_CNTRS_DCACHE_ACCESS,
+    MV_CPU_CNTRS_DCACHE_READ_MISS,
+    MV_CPU_CNTRS_DCACHE_READ_HIT,
+    MV_CPU_CNTRS_DCACHE_WRITE_MISS,
+    MV_CPU_CNTRS_DCACHE_WRITE_HIT,
+    MV_CPU_CNTRS_DTLB_MISS,
+    MV_CPU_CNTRS_TLB_MISS,
+    MV_CPU_CNTRS_ITLB_MISS,
+    MV_CPU_CNTRS_INSTRUCTIONS,
+    MV_CPU_CNTRS_SINGLE_ISSUE,
+    MV_CPU_CNTRS_MMU_READ_LATENCY,
+    MV_CPU_CNTRS_MMU_READ_BEAT,
+    MV_CPU_CNTRS_BRANCH_RETIRED,
+    MV_CPU_CNTRS_BRANCH_TAKEN,
+    MV_CPU_CNTRS_BRANCH_PREDICT_MISS,
+    MV_CPU_CNTRS_BRANCH_PREDICT_COUNT,
+    MV_CPU_CNTRS_WB_FULL_CYCLES,
+    MV_CPU_CNTRS_WB_WRITE_LATENCY,
+    MV_CPU_CNTRS_WB_WRITE_BEAT,
+    MV_CPU_CNTRS_ICACHE_READ_LATENCY,
+    MV_CPU_CNTRS_ICACHE_READ_BEAT,
+    MV_CPU_CNTRS_DCACHE_READ_LATENCY,
+    MV_CPU_CNTRS_DCACHE_READ_BEAT,
+    MV_CPU_CNTRS_DCACHE_WRITE_LATENCY,
+    MV_CPU_CNTRS_DCACHE_WRITE_BEAT,
+    MV_CPU_CNTRS_LDM_STM_HOLD,
+    MV_CPU_CNTRS_IS_HOLD,
+    MV_CPU_CNTRS_DATA_WRITE_ACCESS,
+    MV_CPU_CNTRS_DATA_READ_ACCESS,
+    MV_CPU_CNTRS_BIU_SIMULT_ACCESS,
+    MV_CPU_CNTRS_BIU_ANY_ACCESS,
+
+} MV_CPU_CNTRS_OPS;
+
+typedef struct
+{
+    char                name[16];
+    MV_CPU_CNTRS_OPS    operation;
+    int                 opIdx;
+    MV_U32              overhead;
+
+} MV_CPU_CNTRS_ENTRY;
+
+
+typedef struct
+{
+    char   name[16];
+    MV_U32 num_of_measurements;
+    MV_U32 avg_sample_count;
+    MV_U64 counters_before[MV_CPU_CNTRS_NUM];
+    MV_U64 counters_after[MV_CPU_CNTRS_NUM];
+    MV_U64 counters_sum[MV_CPU_CNTRS_NUM];
+
+} MV_CPU_CNTRS_EVENT;
+
+extern MV_CPU_CNTRS_ENTRY  mvCpuCntrsTbl[MV_CPU_CNTRS_NUM];
+
+
+MV_STATUS           mvCpuCntrsProgram(int counter, MV_CPU_CNTRS_OPS op,
+                                      char* name, MV_U32 overhead);
+void                mvCpuCntrsInit(void);
+MV_CPU_CNTRS_EVENT* mvCpuCntrsEventCreate(char* name, MV_U32 print_threshold);
+void                mvCpuCntrsEventDelete(MV_CPU_CNTRS_EVENT* event);
+void                mvCpuCntrsReset(void);
+void                mvCpuCntrsShow(MV_CPU_CNTRS_EVENT* pEvent);
+void 		    mvCpuCntrsEventClear(MV_CPU_CNTRS_EVENT* pEvent);
+
+/* internal */
+void 		    program_counter(int counter, int op);
+
+static INLINE MV_U64 mvCpuCntrsRead(const int counter)
+{
+    MV_U32 low = 0, high = 0;
+    MV_U32 ll = 0;
+
+    switch(counter)
+    {
+        case 0:
+            MV_ASM  ("mcr p15, 0, %0, c15, c12, 0" : : "r" (ll));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 0" : "=r" (low));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 1" : "=r" (high));
+         break;
+
+        case 1:
+            MV_ASM  ("mcr p15, 0, %0, c15, c12, 1" : : "r" (ll));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 2" : "=r" (low));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 3" : "=r" (high));
+         break;
+
+        case 2:
+            MV_ASM  ("mcr p15, 0, %0, c15, c12, 2" : : "r" (ll));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 4" : "=r" (low));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 5" : "=r" (high));
+         break;
+
+        case 3:
+            MV_ASM  ("mcr p15, 0, %0, c15, c12, 3" : : "r" (ll));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 6" : "=r" (low));
+            MV_ASM  ("mrc p15, 0, %0, c15, c13, 7" : "=r" (high));
+         break;
+
+        default:
+            mvOsPrintf("mv_cpu_cntrs_read: bad counter number (%d)\n", counter);
+    }
+    program_counter(counter, mvCpuCntrsTbl[counter].opIdx);
+    return (((MV_U64)high << 32 ) | low);
+
+}
+
+
+static INLINE void mvCpuCntrsReadBefore(MV_CPU_CNTRS_EVENT* pEvent)
+{
+#if 0
+    int i;
+
+    /* order is important - we want to measure the cycle count last here! */
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+        pEvent->counters_before[i] = mvCpuCntrsRead(i);
+#else
+    pEvent->counters_before[1] = mvCpuCntrsRead(1);
+    pEvent->counters_before[3] = mvCpuCntrsRead(3);
+    pEvent->counters_before[0] = mvCpuCntrsRead(0);
+    pEvent->counters_before[2] = mvCpuCntrsRead(2);
+#endif
+}
+
+static INLINE void mvCpuCntrsReadAfter(MV_CPU_CNTRS_EVENT* pEvent)
+{
+    int i;
+
+#if 0
+    /* order is important - we want to measure the cycle count first here! */
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+        pEvent->counters_after[i] = mvCpuCntrsRead(i);
+#else
+    pEvent->counters_after[2] = mvCpuCntrsRead(2);
+    pEvent->counters_after[0] = mvCpuCntrsRead(0);
+    pEvent->counters_after[3] = mvCpuCntrsRead(3);
+    pEvent->counters_after[1] = mvCpuCntrsRead(1);
+#endif
+
+    for(i=0; i<MV_CPU_CNTRS_NUM; i++)
+    {
+        pEvent->counters_sum[i] += (pEvent->counters_after[i] - pEvent->counters_before[i]);
+    }
+    pEvent->num_of_measurements++;
+}
+
+
+#ifdef CONFIG_MV_CPU_PERF_CNTRS
+
+#define MV_CPU_CNTRS_READ(counter)  mvCpuCntrsRead(counter)
+
+#define MV_CPU_CNTRS_START(event)	mvCpuCntrsReadBefore(event)
+
+#define MV_CPU_CNTRS_STOP(event)	mvCpuCntrsReadAfter(event)
+
+#define MV_CPU_CNTRS_SHOW(event)	mvCpuCntrsShow(event)
+
+#else
+
+#define MV_CPU_CNTRS_READ(counter)
+#define MV_CPU_CNTRS_START(event)
+#define MV_CPU_CNTRS_STOP(event)
+#define MV_CPU_CNTRS_SHOW(event)
+
+#endif /* CONFIG_MV_CPU_PERF_CNTRS */
+
+
+#endif /* __mvCpuCntrs_h__ */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c
new file mode 100644
index 000000000000..24010029518b
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.c
@@ -0,0 +1,143 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include "mvOs.h"
+#include "mvCpuL2Cntrs.h"
+
+
+
+MV_CPU_L2_CNTRS_ENTRY   mvCpuL2CntrsTbl[MV_CPU_L2_CNTRS_NUM];
+
+MV_CPU_L2_CNTRS_EVENT*  mvCpuL2CntrsEventTbl[128];
+
+void mvCpuL2CntrsReset(void)
+{
+    MV_U32 reg = 0;
+
+    MV_ASM ("mcr p15, 6, %0, c15, c13, 0" : : "r" (reg));
+    MV_ASM ("mcr p15, 6, %0, c15, c13, 1" : : "r" (reg));
+    MV_ASM ("mcr p15, 6, %0, c15, c13, 2" : : "r" (reg));
+    MV_ASM ("mcr p15, 6, %0, c15, c13, 3" : : "r" (reg));
+}
+
+static void mvCpuL2CntrConfig(int counter, int op)
+{
+    MV_U32 reg =  (1 << op) | 0x1; /*enable*/
+
+    switch(counter)
+    {
+        case 0:
+         MV_ASM ("mcr p15, 6, %0, c15, c12, 0" : : "r" (reg));
+         return;
+
+        case 1:
+         MV_ASM ("mcr p15, 6, %0, c15, c12, 1" : : "r" (reg));
+         return;
+
+        default:
+            mvOsPrintf("mvCpuL2CntrConfig: bad counter number (%d)\n", counter);
+    }
+    return;
+}
+
+void mvCpuL2CntrsEventClear(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+    {
+        pEvent->counters_sum[i] = 0;
+    }
+    pEvent->num_of_measurements = 0;
+}
+
+
+MV_CPU_L2_CNTRS_EVENT* mvCpuL2CntrsEventCreate(char* name, MV_U32 print_threshold)
+{
+    int                     i;
+    MV_CPU_L2_CNTRS_EVENT*  event = mvOsMalloc(sizeof(MV_CPU_L2_CNTRS_EVENT));
+
+    if(event)
+    {
+        strncpy(event->name, name, sizeof(event->name));
+        event->num_of_measurements = 0;
+        event->avg_sample_count = print_threshold;
+        for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+        {
+            event->counters_before[i] = 0;
+            event->counters_after[i] = 0;
+            event->counters_sum[i] = 0;
+        }
+    }
+    return event;
+}
+
+void    mvCpuL2CntrsEventDelete(MV_CPU_L2_CNTRS_EVENT* event)
+{
+    if(event != NULL)
+        mvOsFree(event);
+}
+
+
+MV_STATUS   mvCpuL2CntrsProgram(int counter, MV_CPU_L2_CNTRS_OPS op,
+                                 char* name, MV_U32 overhead)
+{
+    strncpy(mvCpuL2CntrsTbl[counter].name, name, sizeof(mvCpuL2CntrsTbl[counter].name));
+    mvCpuL2CntrsTbl[counter].operation = op;
+    mvCpuL2CntrsTbl[counter].opIdx = op;
+    mvCpuL2CntrsTbl[counter].overhead = overhead;
+    mvCpuL2CntrConfig(counter, op);
+    mvOsPrintf("CPU L2 Counter %d: operation=%d, overhead=%d\n",
+                        counter, op, overhead);
+    return MV_OK;
+}
+
+void    mvCpuL2CntrsShow(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+    int     i;
+    MV_U64  counters_avg;
+
+    if(pEvent->num_of_measurements < pEvent->avg_sample_count)
+        return;
+
+    mvOsPrintf("%16s: ", pEvent->name);
+    for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+    {
+        counters_avg = mvOsDivMod64(pEvent->counters_sum[i],
+                                    pEvent->num_of_measurements, NULL);
+
+        if(counters_avg >= mvCpuL2CntrsTbl[i].overhead)
+            counters_avg -= mvCpuL2CntrsTbl[i].overhead;
+        else
+            counters_avg = 0;
+
+        mvOsPrintf("%s=%5llu, ", mvCpuL2CntrsTbl[i].name, counters_avg);
+    }
+    mvOsPrintf("\n");
+    mvCpuL2CntrsEventClear(pEvent);
+    mvCpuL2CntrsReset();
+}
+
+void    mvCpuL2CntrsStatus(void)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+    {
+        mvOsPrintf("#%d: %s, overhead=%d\n",
+            i, mvCpuL2CntrsTbl[i].name, mvCpuL2CntrsTbl[i].overhead);
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h
new file mode 100644
index 000000000000..8b96ef86144a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/cpu/mvCpuL2Cntrs.h
@@ -0,0 +1,150 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mvCpuL2Cntrs_h__
+#define __mvCpuL2Cntrs_h__
+
+#include "mvTypes.h"
+#include "mvOs.h"
+
+
+#define MV_CPU_L2_CNTRS_NUM         2
+
+typedef enum
+{
+    MV_CPU_L2_CNTRS_ENABLE = 0,
+    MV_CPU_L2_CNTRS_DATA_REQ,
+    MV_CPU_L2_CNTRS_DATA_MISS_REQ,
+    MV_CPU_L2_CNTRS_INST_REQ,
+    MV_CPU_L2_CNTRS_INST_MISS_REQ,
+    MV_CPU_L2_CNTRS_DATA_READ_REQ,
+    MV_CPU_L2_CNTRS_DATA_READ_MISS_REQ,
+    MV_CPU_L2_CNTRS_DATA_WRITE_REQ,
+    MV_CPU_L2_CNTRS_DATA_WRITE_MISS_REQ,
+    MV_CPU_L2_CNTRS_RESERVED,
+    MV_CPU_L2_CNTRS_DIRTY_EVICT_REQ,
+    MV_CPU_L2_CNTRS_EVICT_BUFF_STALL,
+    MV_CPU_L2_CNTRS_ACTIVE_CYCLES,
+
+} MV_CPU_L2_CNTRS_OPS;
+
+typedef struct
+{
+    char                name[16];
+    MV_CPU_L2_CNTRS_OPS operation;
+    int                 opIdx;
+    MV_U32              overhead;
+
+} MV_CPU_L2_CNTRS_ENTRY;
+
+
+typedef struct
+{
+    char   name[16];
+    MV_U32 num_of_measurements;
+    MV_U32 avg_sample_count;
+    MV_U64 counters_before[MV_CPU_L2_CNTRS_NUM];
+    MV_U64 counters_after[MV_CPU_L2_CNTRS_NUM];
+    MV_U64 counters_sum[MV_CPU_L2_CNTRS_NUM];
+
+} MV_CPU_L2_CNTRS_EVENT;
+
+
+MV_STATUS               mvCpuL2CntrsProgram(int counter, MV_CPU_L2_CNTRS_OPS op,
+                                        char* name, MV_U32 overhead);
+void                    mvCpuL2CntrsInit(void);
+MV_CPU_L2_CNTRS_EVENT*  mvCpuL2CntrsEventCreate(char* name, MV_U32 print_threshold);
+void                    mvCpuL2CntrsEventDelete(MV_CPU_L2_CNTRS_EVENT* event);
+void                    mvCpuL2CntrsReset(void);
+void                    mvCpuL2CntrsShow(MV_CPU_L2_CNTRS_EVENT* pEvent);
+void 			mvCpuL2CntrsEventClear(MV_CPU_L2_CNTRS_EVENT* pEvent);
+
+static INLINE MV_U64 mvCpuL2CntrsRead(const int counter)
+{
+    MV_U32 low = 0, high = 0;
+
+    switch(counter)
+    {
+        case 0:
+            MV_ASM  ("mrc p15, 6, %0, c15, c13, 0" : "=r" (low));
+            MV_ASM  ("mrc p15, 6, %0, c15, c13, 1" : "=r" (high));
+         break;
+
+        case 1:
+            MV_ASM  ("mrc p15, 6, %0, c15, c13, 2" : "=r" (low));
+            MV_ASM  ("mrc p15, 6, %0, c15, c13, 3" : "=r" (high));
+         break;
+
+        default:
+            mvOsPrintf("mvCpuL2CntrsRead: bad counter number (%d)\n", counter);
+    }
+    return (((MV_U64)high << 32 ) | low);
+
+}
+
+static INLINE void mvCpuL2CntrsReadBefore(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+        pEvent->counters_before[i] = mvCpuL2CntrsRead(i);
+}
+
+static INLINE void mvCpuL2CntrsReadAfter(MV_CPU_L2_CNTRS_EVENT* pEvent)
+{
+    int i;
+
+    for(i=0; i<MV_CPU_L2_CNTRS_NUM; i++)
+    {
+        pEvent->counters_after[i] = mvCpuL2CntrsRead(i);
+        pEvent->counters_sum[i] += (pEvent->counters_after[i] - pEvent->counters_before[i]);
+    }
+    pEvent->num_of_measurements++;
+}
+
+
+#ifdef CONFIG_MV_CPU_L2_PERF_CNTRS
+
+#define MV_CPU_L2_CNTRS_READ(counter)   mvCpuL2CntrsRead(counter)
+
+#define MV_CPU_L2_CNTRS_START(event)	mvCpuL2CntrsReadBefore(event)
+
+#define MV_CPU_L2_CNTRS_STOP(event)	    mvCpuL2CntrsReadAfter(event)
+
+#define MV_CPU_L2_CNTRS_SHOW(event)	    mvCpuL2CntrsShow(event)
+
+#else
+
+#define MV_CPU_L2_CNTRS_READ(counter)
+#define MV_CPU_L2_CNTRS_START(event)
+#define MV_CPU_L2_CNTRS_STOP(event)
+#define MV_CPU_L2_CNTRS_SHOW(event)
+
+#endif /* CONFIG_MV_CPU_L2_PERF_CNTRS */
+
+
+#endif /* __mvCpuL2Cntrs_h__ */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c
new file mode 100644
index 000000000000..2fcaf59266eb
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.c
@@ -0,0 +1,1478 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ddr1_2/mvDram.h"
+#include "boardEnv/mvBoardEnvLib.h"
+
+#undef MV_DEBUG
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+                                            MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32  cas2ps(MV_U8 spd_byte);
+/*******************************************************************************
+* mvDramBankGet - Get the DRAM bank paramters.
+*
+* DESCRIPTION:
+*       This function retrieves DRAM bank parameters as described in
+*       DRAM_BANK_INFO struct to the controller DRAM unit. In case the board
+*       has its DRAM on DIMMs it will use its EEPROM to extract SPD data
+*       from it. Otherwise, if the DRAM is soldered on board, the function
+*       should insert its bank information into MV_DRAM_BANK_INFO struct.
+*
+* INPUT:
+*       bankNum  - Board DRAM bank number.
+*
+* OUTPUT:
+*       pBankInfo  - DRAM bank information struct.
+*
+* RETURN:
+*       MV_FAIL - Bank parameters could not be read.
+*
+*******************************************************************************/
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo)
+{
+    MV_DIMM_INFO dimmInfo;
+
+    DB(mvOsPrintf("Dram: mvDramBankInfoGet bank %d\n", bankNum));
+    /* zero pBankInfo structure */
+    memset(pBankInfo, 0, sizeof(*pBankInfo));
+
+    if((NULL == pBankInfo) || (bankNum >= MV_DRAM_MAX_CS ))
+    {
+        DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+        return MV_BAD_PARAM;
+    }
+    if( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+    {
+    DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+    return MV_FAIL;
+    }
+    if((dimmInfo.numOfModuleBanks == 1) && ((bankNum % 2) == 1))
+    {
+    DB(mvOsPrintf("Dram: ERR dimmSpdGet. Can't find DIMM bank 2 \n"));
+    return MV_FAIL;
+    }
+
+    /* convert Dimm info to Bank info */
+    cpyDimm2BankInfo(&dimmInfo, pBankInfo);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* cpyDimm2BankInfo - Convert a Dimm info struct into a bank info struct.
+*
+* DESCRIPTION:
+*       Convert a Dimm info struct into a bank info struct.
+*
+* INPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+*       pBankInfo  - DRAM bank information struct.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+                                                MV_DRAM_BANK_INFO *pBankInfo)
+{
+    pBankInfo->memoryType = pDimmInfo->memoryType;
+
+    /* DIMM dimensions */
+    pBankInfo->numOfRowAddr = pDimmInfo->numOfRowAddr;
+    pBankInfo->numOfColAddr = pDimmInfo->numOfColAddr;
+    pBankInfo->dataWidth = pDimmInfo->dataWidth;
+    pBankInfo->errorCheckType = pDimmInfo->errorCheckType;
+    pBankInfo->sdramWidth = pDimmInfo->sdramWidth;
+    pBankInfo->errorCheckDataWidth = pDimmInfo->errorCheckDataWidth;
+    pBankInfo->numOfBanksOnEachDevice = pDimmInfo->numOfBanksOnEachDevice;
+    pBankInfo->suportedCasLatencies = pDimmInfo->suportedCasLatencies;
+    pBankInfo->refreshInterval = pDimmInfo->refreshInterval;
+
+    /* DIMM timing parameters */
+    pBankInfo->minCycleTimeAtMaxCasLatPs = pDimmInfo->minCycleTimeAtMaxCasLatPs;
+    pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+                                    pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps;
+    pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+                                    pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps;
+
+    pBankInfo->minRowPrechargeTime     = pDimmInfo->minRowPrechargeTime;
+    pBankInfo->minRowActiveToRowActive = pDimmInfo->minRowActiveToRowActive;
+    pBankInfo->minRasToCasDelay        = pDimmInfo->minRasToCasDelay;
+    pBankInfo->minRasPulseWidth        = pDimmInfo->minRasPulseWidth;
+    pBankInfo->minWriteRecoveryTime    = pDimmInfo->minWriteRecoveryTime;
+    pBankInfo->minWriteToReadCmdDelay  = pDimmInfo->minWriteToReadCmdDelay;
+    pBankInfo->minReadToPrechCmdDelay  = pDimmInfo->minReadToPrechCmdDelay;
+    pBankInfo->minRefreshToActiveCmd   = pDimmInfo->minRefreshToActiveCmd;
+
+    /* Parameters calculated from the extracted DIMM information */
+    pBankInfo->size = pDimmInfo->size/pDimmInfo->numOfModuleBanks;
+    pBankInfo->deviceDensity = pDimmInfo->deviceDensity;
+    pBankInfo->numberOfDevices = pDimmInfo->numberOfDevices /
+                                 pDimmInfo->numOfModuleBanks;
+
+    /* DIMM attributes (MV_TRUE for yes) */
+
+    if ((pDimmInfo->memoryType == MEM_TYPE_SDRAM) ||
+        (pDimmInfo->memoryType == MEM_TYPE_DDR1)   )
+    {
+        if (pDimmInfo->dimmAttributes & BIT1)
+            pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+        else
+            pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+    }
+    else /* pDimmInfo->memoryType == MEM_TYPE_DDR2 */
+    {
+        if (pDimmInfo->dimmTypeInfo & (BIT0 | BIT4))
+            pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+        else
+            pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+    }
+
+    return;
+}
+
+/*******************************************************************************
+* dimmSpdCpy - Cpy SPD parameters from dimm 0 to dimm 1.
+*
+* DESCRIPTION:
+*       Read the DIMM SPD parameters from dimm 0 into dimm 1 SPD.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdCpy(MV_VOID)
+{
+    MV_U32 i;
+    MV_U32 spdChecksum;
+
+    MV_TWSI_SLAVE twsiSlave;
+    MV_U8 data[SPD_SIZE];
+
+    /* zero dimmInfo structure */
+    memset(data, 0, SPD_SIZE);
+
+    /* read the dimm eeprom */
+    DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+    twsiSlave.slaveAddr.address = MV_BOARD_DIMM0_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL,
+			&twsiSlave, data, SPD_SIZE) )
+    {
+        DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 0\n"));
+        return MV_FAIL;
+    }
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+    /* calculate SPD checksum */
+    spdChecksum = 0;
+
+    for(i = 0 ; i <= 62 ; i++)
+    {
+        spdChecksum += data[i];
+    }
+
+    if ((spdChecksum & 0xff) != data[63])
+    {
+        DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+                            (MV_U32)(spdChecksum & 0xff), data[63]));
+    }
+    else
+    {
+        DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+    }
+
+    /* copy the SPD content 1:1 into the DIMM 1 SPD */
+    twsiSlave.slaveAddr.address = MV_BOARD_DIMM1_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    for(i = 0 ; i < SPD_SIZE ; i++)
+    {
+	twsiSlave.offset = i;
+	if( MV_OK != mvTwsiWrite (MV_BOARD_DIMM_I2C_CHANNEL,
+				&twsiSlave, &data[i], 1) )
+	{
+	    mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 1 byte %d \n",i);
+	    return MV_FAIL;
+	}
+	mvOsDelay(5);
+    }
+
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+    return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdGet - Get the SPD parameters.
+*
+* DESCRIPTION:
+*       Read the DIMM SPD parameters into given struct parameter.
+*
+* INPUT:
+*       dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator.
+*
+* OUTPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* RETURN:
+*       MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo)
+{
+    MV_U32 i;
+    MV_U32 density = 1;
+    MV_U32 spdChecksum;
+
+    MV_TWSI_SLAVE twsiSlave;
+    MV_U8 data[SPD_SIZE];
+
+    if((NULL == pDimmInfo)|| (dimmNum >= MAX_DIMM_NUM))
+    {
+        DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+        return MV_BAD_PARAM;
+    }
+
+    /* zero dimmInfo structure */
+    memset(data, 0, SPD_SIZE);
+
+    /* read the dimm eeprom */
+    DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+    twsiSlave.slaveAddr.address = (dimmNum == 0) ?
+                            MV_BOARD_DIMM0_I2C_ADDR : MV_BOARD_DIMM1_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL,
+			&twsiSlave, data, SPD_SIZE) )
+    {
+        DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum %d \n", dimmNum));
+        return MV_FAIL;
+    }
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+    /* calculate SPD checksum */
+    spdChecksum = 0;
+
+        for(i = 0 ; i <= 62 ; i++)
+        {
+        spdChecksum += data[i];
+    }
+
+    if ((spdChecksum & 0xff) != data[63])
+    {
+        DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+                            (MV_U32)(spdChecksum & 0xff), data[63]));
+    }
+    else
+    {
+        DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+    }
+
+    /* copy the SPD content 1:1 into the dimmInfo structure*/
+    for(i = 0 ; i < SPD_SIZE ; i++)
+    {
+        pDimmInfo->spdRawData[i] = data[i];
+        DB(mvOsPrintf("SPD-EEPROM Byte %3d = %3x (%3d)\n",i, data[i], data[i]));
+    }
+
+    DB(mvOsPrintf("DRAM SPD Information:\n"));
+
+    /* Memory type (DDR / SDRAM) */
+    switch (data[DIMM_MEM_TYPE])
+    {
+        case (DIMM_MEM_TYPE_SDRAM):
+            pDimmInfo->memoryType = MEM_TYPE_SDRAM;
+            DB(mvOsPrintf("DRAM Memeory type SDRAM\n"));
+            break;
+        case (DIMM_MEM_TYPE_DDR1):
+            pDimmInfo->memoryType = MEM_TYPE_DDR1;
+            DB(mvOsPrintf("DRAM Memeory type DDR1\n"));
+            break;
+        case (DIMM_MEM_TYPE_DDR2):
+            pDimmInfo->memoryType = MEM_TYPE_DDR2;
+            DB(mvOsPrintf("DRAM Memeory type DDR2\n"));
+            break;
+        default:
+            mvOsPrintf("ERROR: Undefined memory type!\n");
+            return MV_ERROR;
+    }
+
+
+    /* Number Of Row Addresses */
+    pDimmInfo->numOfRowAddr = data[DIMM_ROW_NUM];
+    DB(mvOsPrintf("DRAM numOfRowAddr[3]         %d\n",pDimmInfo->numOfRowAddr));
+
+    /* Number Of Column Addresses */
+    pDimmInfo->numOfColAddr = data[DIMM_COL_NUM];
+    DB(mvOsPrintf("DRAM numOfColAddr[4]         %d\n",pDimmInfo->numOfColAddr));
+
+    /* Number Of Module Banks */
+    pDimmInfo->numOfModuleBanks = data[DIMM_MODULE_BANK_NUM];
+    DB(mvOsPrintf("DRAM numOfModuleBanks[5]     0x%x\n",
+                                                  pDimmInfo->numOfModuleBanks));
+
+    /* Number of module banks encoded differently for DDR2 */
+    if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+        pDimmInfo->numOfModuleBanks = (pDimmInfo->numOfModuleBanks & 0x7)+1;
+
+    /* Data Width */
+    pDimmInfo->dataWidth = data[DIMM_DATA_WIDTH];
+    DB(mvOsPrintf("DRAM dataWidth[6]            0x%x\n", pDimmInfo->dataWidth));
+
+    /* Minimum Cycle Time At Max CasLatancy */
+    pDimmInfo->minCycleTimeAtMaxCasLatPs = cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS]);
+
+    /* Error Check Type */
+    pDimmInfo->errorCheckType = data[DIMM_ERR_CHECK_TYPE];
+    DB(mvOsPrintf("DRAM errorCheckType[11]      0x%x\n",
+                                                    pDimmInfo->errorCheckType));
+
+    /* Refresh Interval */
+    pDimmInfo->refreshInterval = data[DIMM_REFRESH_INTERVAL];
+    DB(mvOsPrintf("DRAM refreshInterval[12]     0x%x\n",
+                                                   pDimmInfo->refreshInterval));
+
+    /* Sdram Width */
+    pDimmInfo->sdramWidth = data[DIMM_SDRAM_WIDTH];
+    DB(mvOsPrintf("DRAM sdramWidth[13]          0x%x\n",pDimmInfo->sdramWidth));
+
+    /* Error Check Data Width */
+    pDimmInfo->errorCheckDataWidth = data[DIMM_ERR_CHECK_DATA_WIDTH];
+    DB(mvOsPrintf("DRAM errorCheckDataWidth[14] 0x%x\n",
+                                               pDimmInfo->errorCheckDataWidth));
+
+    /* Burst Length Supported */
+    /*     SDRAM/DDR1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   |  2   |   1  *
+                    *********************************************************/
+    /*     DDR2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   | TBD  | TBD  *
+                    *********************************************************/
+
+    pDimmInfo->burstLengthSupported = data[DIMM_BURST_LEN_SUP];
+    DB(mvOsPrintf("DRAM burstLengthSupported[16] 0x%x\n",
+                                              pDimmInfo->burstLengthSupported));
+
+    /* Number Of Banks On Each Device */
+    pDimmInfo->numOfBanksOnEachDevice = data[DIMM_DEV_BANK_NUM];
+    DB(mvOsPrintf("DRAM numOfBanksOnEachDevice[17] 0x%x\n",
+                                            pDimmInfo->numOfBanksOnEachDevice));
+
+    /* Suported Cas Latencies */
+
+    /*      SDRAM:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  |  7   |  6   |  5   |  4   |  3   |   2  |   1  *
+            ********************************************************/
+
+    /*     DDR 1:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  |  4   | 3.5  |   3  | 2.5  |  2   | 1.5  |   1  *
+            *********************************************************/
+
+    /*     DDR 2:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+            *********************************************************/
+
+    pDimmInfo->suportedCasLatencies = data[DIMM_SUP_CAL];
+    DB(mvOsPrintf("DRAM suportedCasLatencies[18]    0x%x\n",
+                                              pDimmInfo->suportedCasLatencies));
+
+    /* For DDR2 only, get the DIMM type information */
+    if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+    {
+        pDimmInfo->dimmTypeInfo = data[DIMM_DDR2_TYPE_INFORMATION];
+        DB(mvOsPrintf("DRAM dimmTypeInfo[20] (DDR2) 0x%x\n",
+                                                      pDimmInfo->dimmTypeInfo));
+    }
+
+    /* SDRAM Modules Attributes */
+    pDimmInfo->dimmAttributes = data[DIMM_BUF_ADDR_CONT_IN];
+    DB(mvOsPrintf("DRAM dimmAttributes[21]          0x%x\n",
+                                                    pDimmInfo->dimmAttributes));
+
+    /* Minimum Cycle Time At Max CasLatancy Minus 1*/
+    pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+                                    cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS1]);
+
+    /* Minimum Cycle Time At Max CasLatancy Minus 2*/
+    pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+                                    cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS2]);
+
+    pDimmInfo->minRowPrechargeTime = data[DIMM_MIN_ROW_PRECHARGE_TIME];
+    DB(mvOsPrintf("DRAM minRowPrechargeTime[27]     0x%x\n",
+                                               pDimmInfo->minRowPrechargeTime));
+    pDimmInfo->minRowActiveToRowActive = data[DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE];
+    DB(mvOsPrintf("DRAM minRowActiveToRowActive[28] 0x%x\n",
+                                           pDimmInfo->minRowActiveToRowActive));
+    pDimmInfo->minRasToCasDelay = data[DIMM_MIN_RAS_TO_CAS_DELAY];
+    DB(mvOsPrintf("DRAM minRasToCasDelay[29]        0x%x\n",
+                                                  pDimmInfo->minRasToCasDelay));
+    pDimmInfo->minRasPulseWidth = data[DIMM_MIN_RAS_PULSE_WIDTH];
+    DB(mvOsPrintf("DRAM minRasPulseWidth[30]        0x%x\n",
+                                                  pDimmInfo->minRasPulseWidth));
+
+    /* DIMM Bank Density */
+    pDimmInfo->dimmBankDensity = data[DIMM_BANK_DENSITY];
+    DB(mvOsPrintf("DRAM dimmBankDensity[31]         0x%x\n",
+                                                   pDimmInfo->dimmBankDensity));
+
+    /* Only DDR2 includes Write Recovery Time field. Other SDRAM ignore     */
+    pDimmInfo->minWriteRecoveryTime = data[DIMM_MIN_WRITE_RECOVERY_TIME];
+    DB(mvOsPrintf("DRAM minWriteRecoveryTime[36]    0x%x\n",
+                                              pDimmInfo->minWriteRecoveryTime));
+
+    /* Only DDR2 includes Internal Write To Read Command Delay field.       */
+    pDimmInfo->minWriteToReadCmdDelay = data[DIMM_MIN_WRITE_TO_READ_CMD_DELAY];
+    DB(mvOsPrintf("DRAM minWriteToReadCmdDelay[37]  0x%x\n",
+                                            pDimmInfo->minWriteToReadCmdDelay));
+
+    /* Only DDR2 includes Internal Read To Precharge Command Delay field.   */
+    pDimmInfo->minReadToPrechCmdDelay = data[DIMM_MIN_READ_TO_PRECH_CMD_DELAY];
+    DB(mvOsPrintf("DRAM minReadToPrechCmdDelay[38]  0x%x\n",
+                                            pDimmInfo->minReadToPrechCmdDelay));
+
+    /* Only DDR2 includes Minimum Refresh to Activate/Refresh Command field */
+    pDimmInfo->minRefreshToActiveCmd = data[DIMM_MIN_REFRESH_TO_ACTIVATE_CMD];
+    DB(mvOsPrintf("DRAM minRefreshToActiveCmd[42]   0x%x\n",
+                                             pDimmInfo->minRefreshToActiveCmd));
+
+    /* calculating the sdram density. Representing device density from      */
+    /* bit 20 to allow representation of 4GB and above.                     */
+    /* For example, if density is 512Mbit 0x20000000, will be represent in  */
+    /* deviceDensity by 0x20000000 >> 16 --> 0x00000200. Another example    */
+    /* is density 8GB 0x200000000 >> 16 --> 0x00002000.                     */
+    density = (1 << ((pDimmInfo->numOfRowAddr + pDimmInfo->numOfColAddr) - 20));
+    pDimmInfo->deviceDensity = density *
+                                pDimmInfo->numOfBanksOnEachDevice *
+                                pDimmInfo->sdramWidth;
+    DB(mvOsPrintf("DRAM deviceDensity           %d\n",pDimmInfo->deviceDensity));
+
+    /* Number of devices includeing Error correction */
+    pDimmInfo->numberOfDevices = (pDimmInfo->dataWidth/pDimmInfo->sdramWidth) *
+                                  pDimmInfo->numOfModuleBanks;
+    DB(mvOsPrintf("DRAM numberOfDevices         %d\n",
+                                                   pDimmInfo->numberOfDevices));
+
+    pDimmInfo->size = 0;
+
+    /* Note that pDimmInfo->size is in MB units */
+    if (pDimmInfo->memoryType == MEM_TYPE_SDRAM)
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 8;                   /* Equal to 8MB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 16;                  /* Equal to 16MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 32;                  /* Equal to 32MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 64;                  /* Equal to 64MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+    else if (pDimmInfo->memoryType == MEM_TYPE_DDR1)
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 2048;                /* Equal to 2GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 16;                  /* Equal to 16MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 32;                  /* Equal to 32MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 64;                  /* Equal to 64MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+    else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 2048;                /* Equal to 2GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 4096;                /* Equal to 4GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 8192;                /* Equal to 8GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 16384;               /* Equal to 16GB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+
+    pDimmInfo->size *= pDimmInfo->numOfModuleBanks;
+
+    DB(mvOsPrintf("Dram: dimm size    %dMB \n",pDimmInfo->size));
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdPrint - Print the SPD parameters.
+*
+* DESCRIPTION:
+*       Print the Dimm SPD parameters.
+*
+* INPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID dimmSpdPrint(MV_U32 dimmNum)
+{
+    MV_DIMM_INFO dimmInfo;
+    MV_U32  i, temp = 0;
+    MV_U32  k, maskLeftOfPoint = 0, maskRightOfPoint = 0;
+    MV_U32  rightOfPoint = 0,leftOfPoint = 0, div, time_tmp, shift;
+    MV_U32  busClkPs;
+    MV_U8   trp_clocks=0, trcd_clocks, tras_clocks, trrd_clocks,
+            temp_buf[40], *spdRawData;
+
+    busClkPs = 1000000000 / (mvBoardSysClkGet() / 100);  /* in 10 ps units */
+
+    spdRawData = dimmInfo.spdRawData;
+
+    if(MV_OK != dimmSpdGet(dimmNum, &dimmInfo))
+    {
+        mvOsOutput("ERROR: Could not read SPD information!\n");
+        return;
+    }
+
+    /* find Manufactura of Dimm Module */
+    mvOsOutput("\nManufacturer's JEDEC ID Code:   ");
+    for(i = 0 ; i < DIMM_MODULE_MANU_SIZE ; i++)
+    {
+        mvOsOutput("%x",spdRawData[DIMM_MODULE_MANU_OFFS + i]);
+    }
+    mvOsOutput("\n");
+
+    /* Manufacturer's Specific Data */
+    for(i = 0 ; i < DIMM_MODULE_ID_SIZE ; i++)
+    {
+        temp_buf[i] = spdRawData[DIMM_MODULE_ID_OFFS + i];
+    }
+    mvOsOutput("Manufacturer's Specific Data:   %s\n", temp_buf);
+
+    /* Module Part Number */
+    for(i = 0 ; i < DIMM_MODULE_VEN_SIZE ; i++)
+    {
+        temp_buf[i] = spdRawData[DIMM_MODULE_VEN_OFFS + i];
+    }
+    mvOsOutput("Module Part Number:             %s\n", temp_buf);
+
+    /* Module Serial Number */
+    for(i = 0; i < sizeof(MV_U32); i++)
+    {
+	temp |= spdRawData[95+i] << 8*i;
+    }
+    mvOsOutput("DIMM Serial No.                 %ld (%lx)\n", (long)temp,
+                                    (long)temp);
+
+    /* find Manufac-Data of Dimm Module */
+    mvOsOutput("Manufactoring Date:             Year 20%d%d/ ww %d%d\n",
+                        ((spdRawData[93] & 0xf0) >> 4), (spdRawData[93] & 0xf),
+                        ((spdRawData[94] & 0xf0) >> 4), (spdRawData[94] & 0xf));
+    /* find modul_revision of Dimm Module */
+    mvOsOutput("Module Revision:                %d.%d\n",
+                                                spdRawData[91], spdRawData[92]);
+
+    /* find manufac_place of Dimm Module */
+    mvOsOutput("manufac_place:                  %d\n", spdRawData[72]);
+
+    /* go over the first 35 I2C data bytes */
+    for(i = 2 ; i <= 35 ; i++)
+       switch(i)
+        {
+            case 2:  /* Memory type (DDR1/2 / SDRAM) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                    mvOsOutput("Dram Type is:                   SDRAM\n");
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                    mvOsOutput("Dram Type is:                   SDRAM DDR1\n");
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                    mvOsOutput("Dram Type is:                   SDRAM DDR2\n");
+                else
+                    mvOsOutput("Dram Type unknown\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 3:  /* Number Of Row Addresses */
+                mvOsOutput("Module Number of row addresses: %d\n",
+                                                        dimmInfo.numOfRowAddr);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 4:  /* Number Of Column Addresses */
+                mvOsOutput("Module Number of col addresses: %d\n",
+                                                        dimmInfo.numOfColAddr);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 5:  /* Number Of Module Banks */
+                mvOsOutput("Number of Banks on Mod.:        %d\n",
+                                                    dimmInfo.numOfModuleBanks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 6:  /* Data Width */
+                mvOsOutput("Module Data Width:              %d bit\n",
+                                                           dimmInfo.dataWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 8:  /* Voltage Interface */
+                switch(spdRawData[i])
+                {
+                    case 0x0:
+                        mvOsOutput("Module is               TTL_5V_TOLERANT\n");
+                        break;
+                    case 0x1:
+                        mvOsOutput("Module is               LVTTL\n");
+                        break;
+                    case 0x2:
+                        mvOsOutput("Module is               HSTL_1_5V\n");
+                        break;
+                    case 0x3:
+                        mvOsOutput("Module is               SSTL_3_3V\n");
+                        break;
+                    case 0x4:
+                        mvOsOutput("Module is               SSTL_2_5V\n");
+                        break;
+                    case 0x5:
+                        if (dimmInfo.memoryType != MEM_TYPE_SDRAM)
+                        {
+                            mvOsOutput("Module is                 SSTL_1_8V\n");
+                            break;
+                        }
+                    default:
+                        mvOsOutput("Module is               VOLTAGE_UNKNOWN\n");
+                        break;
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 9:  /* Minimum Cycle Time At Max CasLatancy */
+                leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                /* DDR2 addition of right of point */
+                if ((spdRawData[i] & 0x0f) == 0xA)
+                {
+                    rightOfPoint = 25;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xB)
+                {
+                    rightOfPoint = 33;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xC)
+                {
+                    rightOfPoint = 66;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xD)
+                {
+                    rightOfPoint = 75;
+                }
+                mvOsOutput("Minimum Cycle Time At Max CL:   %d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 10: /* Clock To Data Out */
+                div = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 10:100;
+                time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                leftOfPoint     = time_tmp / div;
+                rightOfPoint    = time_tmp % div;
+                mvOsOutput("Clock To Data Out:              %d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 11: /* Error Check Type */
+                mvOsOutput("Error Check Type (0=NONE):      %d\n",
+                                                      dimmInfo.errorCheckType);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 12: /* Refresh Interval */
+                mvOsOutput("Refresh Rate:                   %x\n",
+                                                     dimmInfo.refreshInterval);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 13: /* Sdram Width */
+                mvOsOutput("Sdram Width:                    %d bits\n",
+                                                          dimmInfo.sdramWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 14: /* Error Check Data Width */
+                mvOsOutput("Error Check Data Width:         %d bits\n",
+                                                 dimmInfo.errorCheckDataWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+           case 15: /* Minimum Clock Delay is unsupported */
+                if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+                    (dimmInfo.memoryType == MEM_TYPE_DDR1))
+                {
+                    mvOsOutput("Minimum Clk Delay back to back: %d\n",
+                                                                spdRawData[i]);
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 16: /* Burst Length Supported */
+    /*     SDRAM/DDR1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   |  2   |   1  *
+                    *********************************************************/
+    /*     DDR2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   | TBD  | TBD  *
+                    *********************************************************/
+                mvOsOutput("Burst Length Supported: ");
+                if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+                    (dimmInfo.memoryType == MEM_TYPE_DDR1))
+                {
+                    if (dimmInfo.burstLengthSupported & BIT0)
+                        mvOsOutput("1, ");
+                    if (dimmInfo.burstLengthSupported & BIT1)
+                        mvOsOutput("2, ");
+                }
+                if (dimmInfo.burstLengthSupported & BIT2)
+                    mvOsOutput("4, ");
+                if (dimmInfo.burstLengthSupported & BIT3)
+                    mvOsOutput("8, ");
+
+                mvOsOutput(" Bit \n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 17: /* Number Of Banks On Each Device */
+                mvOsOutput("Number Of Banks On Each Chip:   %d\n",
+                                              dimmInfo.numOfBanksOnEachDevice);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 18: /* Suported Cas Latencies */
+
+            /*      SDRAM:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  |  7   |  6   |  5   |  4   |  3   |   2  |   1  *
+                    ********************************************************/
+
+            /*     DDR 1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  |  4   | 3.5  |   3  | 2.5  |  2   | 1.5  |   1  *
+                    *********************************************************/
+
+            /*     DDR 2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+                    *********************************************************/
+
+                mvOsOutput("Suported Cas Latencies: (CL) 			");
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    for (k = 0; k <=7; k++)
+                    {
+                        if (dimmInfo.suportedCasLatencies & (1 << k))
+                            mvOsOutput("%d,             ", k+1);
+                    }
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.suportedCasLatencies & BIT0)
+                        mvOsOutput("1, ");
+                    if (dimmInfo.suportedCasLatencies & BIT1)
+                        mvOsOutput("1.5, ");
+                    if (dimmInfo.suportedCasLatencies & BIT2)
+                        mvOsOutput("2, ");
+                    if (dimmInfo.suportedCasLatencies & BIT3)
+                        mvOsOutput("2.5, ");
+                    if (dimmInfo.suportedCasLatencies & BIT4)
+                        mvOsOutput("3, ");
+                    if (dimmInfo.suportedCasLatencies & BIT5)
+                        mvOsOutput("3.5, ");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if (dimmInfo.suportedCasLatencies & BIT2)
+                        mvOsOutput("2, ");
+                    if (dimmInfo.suportedCasLatencies & BIT3)
+                        mvOsOutput("3, ");
+                    if (dimmInfo.suportedCasLatencies & BIT4)
+                        mvOsOutput("4, ");
+                    if (dimmInfo.suportedCasLatencies & BIT5)
+                        mvOsOutput("5, ");
+                }
+                else
+                    mvOsOutput("?.?, ");
+                mvOsOutput("\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 20:   /* DDR2 DIMM type info */
+                if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if (dimmInfo.dimmTypeInfo & (BIT0 | BIT4))
+                        mvOsOutput("Registered DIMM (RDIMM)\n");
+                    else if (dimmInfo.dimmTypeInfo & (BIT1 | BIT5))
+                        mvOsOutput("Unbuffered DIMM (UDIMM)\n");
+                    else
+                        mvOsOutput("Unknown DIMM type.\n");
+                }
+
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 21: /* SDRAM Modules Attributes */
+                mvOsOutput("\nModule Attributes (SPD Byte 21): \n");
+
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if (dimmInfo.dimmAttributes & BIT0)
+                        mvOsOutput(" Buffered Addr/Control Input:   Yes\n");
+                    else
+                        mvOsOutput(" Buffered Addr/Control Input:   No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT1)
+                        mvOsOutput(" Registered Addr/Control Input: Yes\n");
+                    else
+                        mvOsOutput(" Registered Addr/Control Input: No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT2)
+                        mvOsOutput(" On-Card PLL (clock):           Yes \n");
+                    else
+                        mvOsOutput(" On-Card PLL (clock):           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT3)
+                        mvOsOutput(" Bufferd DQMB Input:            Yes \n");
+                    else
+                        mvOsOutput(" Bufferd DQMB Inputs:           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" Registered DQMB Inputs:        Yes \n");
+                    else
+                        mvOsOutput(" Registered DQMB Inputs:        No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT5)
+                        mvOsOutput(" Differential Clock Input:      Yes \n");
+                    else
+                        mvOsOutput(" Differential Clock Input:      No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT6)
+                        mvOsOutput(" redundant Row Addressing:      Yes \n");
+                    else
+                        mvOsOutput(" redundant Row Addressing:      No \n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.dimmAttributes & BIT0)
+                        mvOsOutput(" Buffered Addr/Control Input:   Yes\n");
+                    else
+                        mvOsOutput(" Buffered Addr/Control Input:   No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT1)
+                        mvOsOutput(" Registered Addr/Control Input: Yes\n");
+                    else
+                        mvOsOutput(" Registered Addr/Control Input: No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT2)
+                        mvOsOutput(" On-Card PLL (clock):           Yes \n");
+                    else
+                        mvOsOutput(" On-Card PLL (clock):           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT3)
+                        mvOsOutput(" FET Switch On-Card Enabled:    Yes \n");
+                    else
+                        mvOsOutput(" FET Switch On-Card Enabled:    No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" FET Switch External Enabled:   Yes \n");
+                    else
+                        mvOsOutput(" FET Switch External Enabled:   No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT5)
+                        mvOsOutput(" Differential Clock Input:      Yes \n");
+                    else
+                        mvOsOutput(" Differential Clock Input:      No \n");
+                }
+                else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+                {
+                    mvOsOutput(" Number of Active Registers on the DIMM: %d\n",
+                                        (dimmInfo.dimmAttributes & 0x3) + 1);
+
+                    mvOsOutput(" Number of PLLs on the DIMM: %d\n",
+                                      ((dimmInfo.dimmAttributes) >> 2) & 0x3);
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" FET Switch External Enabled:   Yes \n");
+                    else
+                        mvOsOutput(" FET Switch External Enabled:   No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT6)
+                        mvOsOutput(" Analysis probe installed:      Yes \n");
+                    else
+                        mvOsOutput(" Analysis probe installed:      No \n");
+                }
+
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 22: /* Suported AutoPreCharge */
+                mvOsOutput("\nModul Attributes (SPD Byte 22): \n");
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Early Ras Precharge:           Yes \n");
+                    else
+                        mvOsOutput(" Early Ras Precharge:           No \n");
+
+                    if ( spdRawData[i] & BIT1 )
+                        mvOsOutput(" AutoPreCharge:                 Yes \n");
+                    else
+                        mvOsOutput(" AutoPreCharge:                 No \n");
+
+                    if ( spdRawData[i] & BIT2 )
+                        mvOsOutput(" Precharge All:                 Yes \n");
+                    else
+                        mvOsOutput(" Precharge All:                 No \n");
+
+                    if ( spdRawData[i] & BIT3 )
+                        mvOsOutput(" Write 1/ReadBurst:             Yes \n");
+                    else
+                        mvOsOutput(" Write 1/ReadBurst:             No \n");
+
+                    if ( spdRawData[i] & BIT4 )
+                        mvOsOutput(" lower VCC tolerance:           5%%\n");
+                    else
+                        mvOsOutput(" lower VCC tolerance:           10%%\n");
+
+                    if ( spdRawData[i] & BIT5 )
+                        mvOsOutput(" upper VCC tolerance:           5%%\n");
+                    else
+                        mvOsOutput(" upper VCC tolerance:           10%%\n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Supports Weak Driver:          Yes \n");
+                    else
+                        mvOsOutput(" Supports Weak Driver:          No \n");
+
+                    if ( !(spdRawData[i] & BIT4) )
+                        mvOsOutput(" lower VCC tolerance:           0.2V\n");
+
+                    if ( !(spdRawData[i] & BIT5) )
+                        mvOsOutput(" upper VCC tolerance:           0.2V\n");
+
+                    if ( spdRawData[i] & BIT6 )
+                        mvOsOutput(" Concurrent Auto Preharge:      Yes \n");
+                    else
+                        mvOsOutput(" Concurrent Auto Preharge:      No \n");
+
+                    if ( spdRawData[i] & BIT7 )
+                        mvOsOutput(" Supports Fast AP:              Yes \n");
+                    else
+                        mvOsOutput(" Supports Fast AP:              No \n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Supports Weak Driver:          Yes \n");
+                    else
+                        mvOsOutput(" Supports Weak Driver:          No \n");
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 23:
+            /* Minimum Cycle Time At Maximum Cas Latancy Minus 1 (2nd highest CL) */
+                leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                /* DDR2 addition of right of point */
+                if ((spdRawData[i] & 0x0f) == 0xA)
+                {
+                    rightOfPoint = 25;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xB)
+                {
+                    rightOfPoint = 33;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xC)
+                {
+                    rightOfPoint = 66;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xD)
+                {
+                    rightOfPoint = 75;
+                }
+
+                mvOsOutput("Minimum Cycle Time At 2nd highest CasLatancy"
+                           "(0 = Not supported): %d.%d [ns]\n",
+                           leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 24: /* Clock To Data Out 2nd highest Cas Latency Value*/
+                div = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ? 10:100;
+                time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                    ((spdRawData[i] & 0x0f));
+                leftOfPoint     = time_tmp / div;
+                rightOfPoint    = time_tmp % div;
+                mvOsOutput("Clock To Data Out (2nd CL value): 		%d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 25:
+            /* Minimum Cycle Time At Maximum Cas Latancy Minus 2 (3rd highest CL) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+                    rightOfPoint = (spdRawData[i] & 0x3) * 25;
+                }
+                else    /* DDR1 or DDR2 */
+                {
+                    leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                    rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                    /* DDR2 addition of right of point */
+                    if ((spdRawData[i] & 0x0f) == 0xA)
+                    {
+                        rightOfPoint = 25;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xB)
+                    {
+                        rightOfPoint = 33;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xC)
+                    {
+                        rightOfPoint = 66;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xD)
+                    {
+                        rightOfPoint = 75;
+                    }
+                }
+                mvOsOutput("Minimum Cycle Time At 3rd highest CasLatancy"
+                           "(0 = Not supported): %d.%d [ns]\n",
+                           leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 26: /* Clock To Data Out 3rd highest Cas Latency Value*/
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+                    rightOfPoint = (spdRawData[i] & 0x3) * 25;
+                }
+                else    /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint     = 0;
+                    rightOfPoint    = time_tmp;
+                }
+                mvOsOutput("Clock To Data Out (3rd CL value): 		%d.%2d[ns]\n",
+                                                  leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 27: /* Minimum Row Precharge Time */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 10ps Intervals*/
+                trp_clocks = (temp + (busClkPs-1)) /  busClkPs;
+                mvOsOutput("Minimum Row Precharge Time [ns]: 		%d.%d = "
+                           "in Clk cycles %d\n",
+                           leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 28: /* Minimum Row Active to Row Active Time */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+                trrd_clocks = (temp + (busClkPs-1)) / busClkPs;
+                mvOsOutput("Minimum Row Active -To- Row Active Delay [ns]: "
+                           "%d.%d = in Clk cycles %d\n",
+                            leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 29: /* Minimum Ras-To-Cas Delay */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+                trcd_clocks = (temp + (busClkPs-1) )/ busClkPs;
+                mvOsOutput("Minimum Ras-To-Cas Delay [ns]: 			%d.%d = "
+                           "in Clk cycles %d\n",
+                           leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 30: /* Minimum Ras Pulse Width */
+                tras_clocks = (cas2ps(spdRawData[i])+(busClkPs-1)) / busClkPs;
+                mvOsOutput("Minimum Ras Pulse Width [ns]: 			%d = "
+                           "in Clk cycles %d\n", spdRawData[i], tras_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 31: /* Module Bank Density */
+                mvOsOutput("Module Bank Density (more than 1= Multisize-Module):");
+
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("8MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("16MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("32MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("64MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                        mvOsOutput("128MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT6)
+                        mvOsOutput("256MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT7)
+                        mvOsOutput("512MB, ");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("2GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("16MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("32MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("64MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                        mvOsOutput("128MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT6)
+                        mvOsOutput("256MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT7)
+                        mvOsOutput("512MB, ");
+                }
+                else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("2GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("4GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("8GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("16GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                    mvOsOutput("128MB, ");
+                        if (dimmInfo.dimmBankDensity & BIT6)
+                    mvOsOutput("256MB, ");
+                        if (dimmInfo.dimmBankDensity & BIT7)
+                    mvOsOutput("512MB, ");
+                }
+                mvOsOutput("\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 32: /* Address And Command Setup Time (measured in ns/1000) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                    leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Address And Command Setup Time [ns]: 		%d.%d\n",
+                                                     leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 33: /* Address And Command Hold Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                    leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Address And Command Hold Time [ns]: 		%d.%d\n",
+                                                   leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 34: /* Data Input Setup Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                        leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Data Input Setup Time [ns]: 			%d.%d\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 35: /* Data Input Hold Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                        leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Data Input Hold Time [ns]: 			%d.%d\n\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 36: /* Relevant for DDR2 only: Write Recovery Time */
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> 2);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint) * 25;
+                mvOsOutput("Write Recovery Time [ns]: 			%d.%d\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+        }
+
+}
+
+
+/*
+ * translate ns.ns/10 coding of SPD timing values
+ * into ps unit values
+ */
+/*******************************************************************************
+*  cas2ps - Translate x.y ns parameter to pico-seconds values
+*
+* DESCRIPTION:
+*       This function translates x.y nano seconds to its value in pico seconds.
+*       For example 3.75ns will return 3750.
+*
+* INPUT:
+*       spd_byte - DIMM SPD byte.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       value in pico seconds.
+*
+*******************************************************************************/
+static MV_U32  cas2ps(MV_U8 spd_byte)
+{
+    MV_U32 ns, ns10;
+
+    /* isolate upper nibble */
+    ns = (spd_byte >> 4) & 0x0F;
+    /* isolate lower nibble */
+    ns10 = (spd_byte & 0x0F);
+
+    if( ns10 < 10 ) {
+        ns10 *= 10;
+    }
+    else if( ns10 == 10 )
+        ns10 = 25;
+    else if( ns10 == 11 )
+        ns10 = 33;
+    else if( ns10 == 12 )
+        ns10 = 66;
+    else if( ns10 == 13 )
+        ns10 = 75;
+    else
+    {
+        mvOsOutput("cas2ps Err. unsupported cycle time.\n");
+    }
+
+    return (ns*1000 + ns10*10);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h
new file mode 100644
index 000000000000..584cdd5e06a7
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDram.h
@@ -0,0 +1,191 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDram
+#define __INCmvDram
+
+#include "ddr1_2/mvDramIf.h"
+#include "twsi/mvTwsi.h"
+
+#define MAX_DIMM_NUM 			2
+#define SPD_SIZE			128
+
+/* Dimm spd offsets */
+#define DIMM_MEM_TYPE 						2
+#define DIMM_ROW_NUM 						3
+#define DIMM_COL_NUM 						4
+#define DIMM_MODULE_BANK_NUM 				5
+#define DIMM_DATA_WIDTH 					6
+#define DIMM_VOLT_IF 						8
+#define DIMM_MIN_CC_AT_MAX_CAS 				9
+#define DIMM_ERR_CHECK_TYPE 				11
+#define DIMM_REFRESH_INTERVAL 				12
+#define DIMM_SDRAM_WIDTH 					13
+#define DIMM_ERR_CHECK_DATA_WIDTH 			14
+#define DIMM_MIN_CLK_DEL 					15
+#define DIMM_BURST_LEN_SUP 					16
+#define DIMM_DEV_BANK_NUM 					17
+#define DIMM_SUP_CAL 						18
+#define DIMM_DDR2_TYPE_INFORMATION          20      /* DDR2 only */
+#define DIMM_BUF_ADDR_CONT_IN 				21
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS1		23
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS2		25
+#define DIMM_MIN_ROW_PRECHARGE_TIME			27
+#define DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE	28
+#define DIMM_MIN_RAS_TO_CAS_DELAY			29
+#define DIMM_MIN_RAS_PULSE_WIDTH			30
+#define DIMM_BANK_DENSITY					31
+#define DIMM_MIN_WRITE_RECOVERY_TIME        36
+#define DIMM_MIN_WRITE_TO_READ_CMD_DELAY    37
+#define DIMM_MIN_READ_TO_PRECH_CMD_DELAY    38
+#define DIMM_MIN_REFRESH_TO_ACTIVATE_CMD    42
+
+/* Dimm Memory Type values */
+#define DIMM_MEM_TYPE_SDRAM					0x4
+#define DIMM_MEM_TYPE_DDR1 					0x7
+#define DIMM_MEM_TYPE_DDR2 					0x8
+
+#define DIMM_MODULE_MANU_OFFS 		64
+#define DIMM_MODULE_MANU_SIZE 		8
+#define DIMM_MODULE_VEN_OFFS 		73
+#define DIMM_MODULE_VEN_SIZE 		25
+#define DIMM_MODULE_ID_OFFS 		99
+#define DIMM_MODULE_ID_SIZE 		18
+
+/* enumeration for voltage levels. */
+typedef enum _mvDimmVoltageIf
+{
+    TTL_5V_TOLERANT,
+    LVTTL,
+    HSTL_1_5V,
+    SSTL_3_3V,
+    SSTL_2_5V,
+    VOLTAGE_UNKNOWN,
+} MV_DIMM_VOLTAGE_IF;
+
+
+/* enumaration for SDRAM CAS Latencies. */
+typedef enum _mvDimmSdramCas
+{
+    SD_CL_1 =1,
+    SD_CL_2,
+    SD_CL_3,
+    SD_CL_4,
+    SD_CL_5,
+    SD_CL_6,
+    SD_CL_7,
+    SD_FAULT
+}MV_DIMM_SDRAM_CAS;
+
+
+/* DIMM information structure */
+typedef struct _mvDimmInfo
+{
+    MV_MEMORY_TYPE  memoryType; 	/* DDR or SDRAM */
+
+    MV_U8       spdRawData[SPD_SIZE];  	/* Content of SPD-EEPROM copied 1:1  */
+
+    /* DIMM dimensions */
+    MV_U32  numOfRowAddr;
+    MV_U32  numOfColAddr;
+    MV_U32  numOfModuleBanks;
+    MV_U32  dataWidth;
+    MV_U32  errorCheckType;             /* ECC , PARITY..*/
+    MV_U32  sdramWidth;                 /* 4,8,16 or 32 */
+    MV_U32  errorCheckDataWidth;        /* 0 - no, 1 - Yes */
+    MV_U32  burstLengthSupported;
+    MV_U32  numOfBanksOnEachDevice;
+    MV_U32  suportedCasLatencies;
+    MV_U32  refreshInterval;
+    MV_U32  dimmBankDensity;
+    MV_U32  dimmTypeInfo;           /* DDR2 only */
+    MV_U32  dimmAttributes;
+
+    /* DIMM timing parameters */
+    MV_U32  minCycleTimeAtMaxCasLatPs;
+    MV_U32  minCycleTimeAtMaxCasLatMinus1Ps;
+    MV_U32  minCycleTimeAtMaxCasLatMinus2Ps;
+	MV_U32  minRowPrechargeTime;
+	MV_U32  minRowActiveToRowActive;
+	MV_U32  minRasToCasDelay;
+	MV_U32  minRasPulseWidth;
+    MV_U32  minWriteRecoveryTime;   /* DDR2 only */
+    MV_U32  minWriteToReadCmdDelay; /* DDR2 only */
+    MV_U32  minReadToPrechCmdDelay; /* DDR2 only */
+    MV_U32  minRefreshToActiveCmd;  /* DDR2 only */
+
+    /* Parameters calculated from the extracted DIMM information */
+    MV_U32  size;               /* 16,64,128,256 or 512 MByte in MB units */
+    MV_U32  deviceDensity;      /* 16,64,128,256 or 512 Mbit in MB units  */
+    MV_U32  numberOfDevices;
+
+} MV_DIMM_INFO;
+
+
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo);
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo);
+MV_VOID dimmSpdPrint(MV_U32 dimmNum);
+MV_STATUS dimmSpdCpy(MV_VOID);
+
+#endif /* __INCmvDram */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c
new file mode 100644
index 000000000000..b9ac463b0dd6
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.c
@@ -0,0 +1,1597 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ddr1_2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+
+
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* DRAM bank presence encoding */
+#define BANK_PRESENT_CS0				0x1
+#define BANK_PRESENT_CS0_CS1			0x3
+#define BANK_PRESENT_CS0_CS2			0x5
+#define BANK_PRESENT_CS0_CS1_CS2		0x7
+#define BANK_PRESENT_CS0_CS2_CS3		0xd
+#define BANK_PRESENT_CS0_CS2_CS3_CS4	0xf
+
+/* locals   */
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin);
+#if defined(MV_INC_BOARD_DDIM)
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas);
+static MV_U32 sdramModeRegCalc(MV_U32 minCas);
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk,
+						 MV_U32 forcedCl);
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+									  MV_U32 minCas, MV_U32 busClk);
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+									   MV_U32 busClk);
+
+/*******************************************************************************
+* mvDramIfDetect - Prepare DRAM interface configuration values.
+*
+* DESCRIPTION:
+*       This function implements the full DRAM detection and timing
+*       configuration for best system performance.
+*       Since this routine runs from a ROM device (Boot Flash), its stack
+*       resides on RAM, that might be the system DRAM. Changing DRAM
+*       configuration values while keeping vital data in DRAM is risky. That
+*       is why the function does not preform the configuration setting but
+*       prepare those in predefined 32bit registers (in this case IDMA
+*       registers are used) for other routine to perform the settings.
+*       The function will call for board DRAM SPD information for each DRAM
+*       chip select. The function will then analyze those SPD parameters of
+*       all DRAM banks in order to decide on DRAM configuration compatible
+*       for all DRAM banks.
+*       The function will set the CPU DRAM address decode registers.
+*       Note: This routine prepares values that will overide configuration of
+*       mvDramBasicAsmInit().
+*
+* INPUT:
+*       forcedCl - Forced CAL Latency. If equal to zero, do not force.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl)
+{
+	MV_U32 retVal = MV_OK;	/* return value */
+	MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+	MV_U32  busClk, size, base = 0, i, temp, deviceW, dimmW;
+	MV_U8	minCas;
+	MV_DRAM_DEC_WIN dramDecWin;
+
+	dramDecWin.addrWin.baseHigh = 0;
+
+	busClk = mvBoardSysClkGet();
+
+	if (0 == busClk)
+	{
+		mvOsPrintf("Dram: ERR. Can't detect system clock! \n");
+		return MV_ERROR;
+	}
+
+	/* Close DRAM banks except bank 0 (in case code is excecuting from it...) */
+#if defined(MV_INCLUDE_SDRAM_CS1)
+	for(i= SDRAM_CS1; i < MV_DRAM_MAX_CS; i++)
+		mvCpuIfTargetWinEnable(i, MV_FALSE);
+#endif
+
+	/* we will use bank 0 as the representative of the all the DRAM banks,  */
+	/* since bank 0 must exist.                                             */
+	for(i = 0; i < MV_DRAM_MAX_CS; i++)
+	{
+		/* if Bank exist */
+		if(MV_OK == mvDramBankInfoGet(i, &bankInfo[i]))
+		{
+			/* check it isn't SDRAM */
+			if(bankInfo[i].memoryType == MEM_TYPE_SDRAM)
+			{
+				mvOsPrintf("Dram: ERR. SDRAM type not supported !!!\n");
+				return MV_ERROR;
+			}
+			/* All banks must support registry in order to activate it */
+			if(bankInfo[i].registeredAddrAndControlInputs !=
+			   bankInfo[0].registeredAddrAndControlInputs)
+			{
+				mvOsPrintf("Dram: ERR. different Registered settings !!!\n");
+				return MV_ERROR;
+			}
+
+			/* Init the CPU window decode */
+			/* Note that the size in Bank info is in MB units 			*/
+			/* Note that the Dimm width might be different then the device DRAM width */
+			temp = MV_REG_READ(SDRAM_CONFIG_REG);
+
+			deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_16BIT )? 16 : 32;
+			dimmW = bankInfo[0].dataWidth - (bankInfo[0].dataWidth % 16);
+			size = ((bankInfo[i].size << 20) / (dimmW/deviceW));
+
+			/* We can not change DRAM window settings while excecuting  	*/
+			/* code from it. That is why we skip the DRAM CS[0], saving     */
+			/* it to the ROM configuration routine	*/
+			if(i == SDRAM_CS0)
+			{
+				MV_U32 sizeToReg;
+
+				/* Translate the given window size to register format */
+				sizeToReg = ctrlSizeToReg(size, SCSR_SIZE_ALIGNMENT);
+
+				/* Size parameter validity check. */
+				if (-1 == sizeToReg)
+				{
+					mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n"
+							   ,i);
+					return MV_BAD_PARAM;
+				}
+
+				/* Size is located at upper 16 bits */
+				sizeToReg <<= SCSR_SIZE_OFFS;
+
+				/* enable it */
+				sizeToReg |= SCSR_WIN_EN;
+
+				MV_REG_WRITE(DRAM_BUF_REG0, sizeToReg);
+			}
+			else
+			{
+				dramDecWin.addrWin.baseLow = base;
+				dramDecWin.addrWin.size = size;
+				dramDecWin.enable = MV_TRUE;
+
+				if (MV_OK != mvDramIfWinSet(SDRAM_CS0 + i, &dramDecWin))
+				{
+					mvOsPrintf("Dram: ERR. Fail to set bank %d!!!\n",
+							   SDRAM_CS0 + i);
+					return MV_ERROR;
+				}
+			}
+
+			base += size;
+
+			/* update the suportedCasLatencies mask */
+			bankInfo[0].suportedCasLatencies &= bankInfo[i].suportedCasLatencies;
+
+		}
+		else
+		{
+			if( i == 0 ) /* bank 0 doesn't exist */
+			{
+				mvOsPrintf("Dram: ERR. Fail to detect bank 0 !!!\n");
+				return MV_ERROR;
+			}
+			else
+			{
+				DB(mvOsPrintf("Dram: Could not find bank %d\n", i));
+				bankInfo[i].size = 0;     /* Mark this bank as non exist */
+			}
+		}
+	}
+
+	/* calculate minimum CAS */
+	minCas = minCasCalc(&bankInfo[0], busClk, forcedCl);
+	if (0 == minCas)
+	{
+		mvOsOutput("Dram: Warn: Could not find CAS compatible to SysClk %dMhz\n",
+				   (busClk / 1000000));
+
+		if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+		{
+			minCas = DDR2_CL_4; /* Continue with this CAS */
+			mvOsPrintf("Set default CAS latency 4\n");
+		}
+		else
+		{
+			minCas = DDR1_CL_3; /* Continue with this CAS */
+			mvOsPrintf("Set default CAS latency 3\n");
+		}
+	}
+
+	/* calc SDRAM_CONFIG_REG  and save it to temp register */
+	temp = sdramConfigRegCalc(&bankInfo[0], busClk);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramConfigRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG1, temp);
+
+	/* calc SDRAM_MODE_REG  and save it to temp register */
+	temp = sdramModeRegCalc(minCas);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramModeRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG2, temp);
+
+	/* calc SDRAM_EXTENDED_MODE_REG  and save it to temp register */
+	temp = sdramExtModeRegCalc(&bankInfo[0]);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramModeRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG10, temp);
+
+	/* calc D_UNIT_CONTROL_LOW  and save it to temp register */
+	temp = dunitCtrlLowRegCalc(&bankInfo[0], minCas);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. dunitCtrlLowRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG3, temp);
+
+	/* calc SDRAM_ADDR_CTRL_REG  and save it to temp register */
+	temp = sdramAddrCtrlRegCalc(&bankInfo[0]);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramAddrCtrlRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG4, temp);
+
+	/* calc SDRAM_TIMING_CTRL_LOW_REG  and save it to temp register */
+	temp = sdramTimeCtrlLowRegCalc(&bankInfo[0], minCas, busClk);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramTimeCtrlLowRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG5, temp);
+
+	/* calc SDRAM_TIMING_CTRL_HIGH_REG  and save it to temp register */
+	temp = sdramTimeCtrlHighRegCalc(&bankInfo[0], busClk);
+	if(-1 == temp)
+	{
+		mvOsPrintf("Dram: ERR. sdramTimeCtrlHighRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG6, temp);
+
+	/* Config DDR2 On Die Termination (ODT) registers */
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		sdramDDr2OdtConfig(bankInfo);
+	}
+
+	/* Note that DDR SDRAM Address/Control and Data pad calibration     */
+	/* settings is done in mvSdramIfConfig.s                            */
+
+	return retVal;
+}
+
+/*******************************************************************************
+* minCasCalc - Calculate the Minimum CAS latency which can be used.
+*
+* DESCRIPTION:
+*	Calculate the minimum CAS latency that can be used, base on the DRAM
+*	parameters and the SDRAM bus Clock freq.
+*
+* INPUT:
+*	busClk    - the DRAM bus Clock.
+*	pBankInfo - bank info parameters.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       The minimum CAS Latency. The function returns 0 if max CAS latency
+*		supported by banks is incompatible with system bus clock frequancy.
+*
+*******************************************************************************/
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk,
+						 MV_U32 forcedCl)
+{
+	MV_U32 count = 1, j;
+	MV_U32 busClkPs = 1000000000 / (busClk / 1000);  /* in ps units */
+	MV_U32 startBit, stopBit;
+
+	/*     DDR 1:
+			*******-******-******-******-******-******-******-*******
+			* bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+			*******-******-******-******-******-******-******-*******
+	CAS	=	* TBD  |  4   | 3.5  |   3  | 2.5  |  2   | 1.5  |   1  *
+			*********************************************************/
+
+	/*     DDR 2:
+			*******-******-******-******-******-******-******-*******
+			* bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+			*******-******-******-******-******-******-******-*******
+	CAS	=	* TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+			*********************************************************/
+
+
+	/* If we are asked to use the forced CAL */
+	if (forcedCl)
+	{
+		mvOsPrintf("DRAM: Using forced CL %d.%d\n", (forcedCl / 10),
+													(forcedCl % 10));
+
+		if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+		{
+			if (forcedCl == 30)
+				pBankInfo->suportedCasLatencies = 0x08;
+			else if (forcedCl == 40)
+				pBankInfo->suportedCasLatencies = 0x10;
+			else
+			{
+				mvOsPrintf("Forced CL %d.%d not supported. Set default CL 4\n",
+						   (forcedCl / 10), (forcedCl % 10));
+				pBankInfo->suportedCasLatencies = 0x10;
+			}
+		}
+		else
+		{
+			if (forcedCl == 15)
+				pBankInfo->suportedCasLatencies = 0x02;
+			else if (forcedCl == 20)
+				pBankInfo->suportedCasLatencies = 0x04;
+			else if (forcedCl == 25)
+				pBankInfo->suportedCasLatencies = 0x08;
+			else if (forcedCl == 30)
+				pBankInfo->suportedCasLatencies = 0x10;
+			else if (forcedCl == 40)
+				pBankInfo->suportedCasLatencies = 0x40;
+			else
+			{
+				mvOsPrintf("Forced CL %d.%d not supported. Set default CL 3\n",
+						   (forcedCl / 10), (forcedCl % 10));
+				pBankInfo->suportedCasLatencies = 0x10;
+			}
+		}
+
+		return pBankInfo->suportedCasLatencies;
+	}
+
+	/* go over the supported cas mask from Max Cas down and check if the 	*/
+	/* SysClk stands in its time requirments.								*/
+
+
+	DB(mvOsPrintf("Dram: minCasCalc supported mask = %x busClkPs = %x \n",
+								pBankInfo->suportedCasLatencies,busClkPs ));
+	for(j = 7; j > 0; j--)
+	{
+		if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+		{
+			/* Reset the bits for CL incompatible for the sysClk            */
+			switch (count)
+			{
+				case 1:
+					if (pBankInfo->minCycleTimeAtMaxCasLatPs > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 2:
+					if (pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 3:
+					if (pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				default:
+					pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					break;
+			}
+		}
+	}
+
+	DB(mvOsPrintf("Dram: minCasCalc support = %x (after SysCC calc)\n",
+				  pBankInfo->suportedCasLatencies ));
+
+	/* SDRAM DDR1 controller supports CL 1.5 to 3.5 */
+	/* SDRAM DDR2 controller supports CL 3 to 5     */
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		startBit = 3;   /* DDR2 support CL start with CL3 (bit 3) */
+		stopBit  = 5;   /* DDR2 support CL stops with CL5 (bit 5) */
+	}
+	else
+	{
+		startBit = 1;   /* DDR1 support CL start with CL1.5 (bit 3) */
+		stopBit  = 4;   /* DDR1 support CL stops with CL3 (bit 4)   */
+	}
+
+	for(j = startBit; j <= stopBit ; j++)
+	{
+		if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+		{
+			DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+			return (BIT0 << j);
+		}
+	}
+
+	return 0;
+}
+
+/*******************************************************************************
+* sdramConfigRegCalc - Calculate sdram config register
+*
+* DESCRIPTION: Calculate sdram config register optimized value based
+*			on the bank info parameters.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram config reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+	MV_U32 sdramConfig = 0;
+	MV_U32 refreshPeriod;
+
+	busClk /= 1000000; /* we work with busClk in MHz */
+
+	sdramConfig = MV_REG_READ(SDRAM_CONFIG_REG);
+
+	/* figure out the memory refresh internal */
+	switch (pBankInfo->refreshInterval & 0xf)
+	{
+		case 0x0: /* refresh period is 15.625 usec */
+			refreshPeriod = 15625;
+			break;
+		case 0x1: /* refresh period is 3.9 usec  	*/
+			refreshPeriod = 3900;
+			break;
+		case 0x2: /* refresh period is 7.8 usec 	*/
+			refreshPeriod = 7800;
+			break;
+		case 0x3: /* refresh period is 31.3 usec	*/
+			refreshPeriod = 31300;
+			break;
+		case 0x4: /* refresh period is 62.5 usec	*/
+			refreshPeriod = 62500;
+			break;
+		case 0x5: /* refresh period is 125 usec 	*/
+			refreshPeriod = 125000;
+			break;
+		default:  /* refresh period undefined 					*/
+			mvOsPrintf("Dram: ERR. DRAM refresh period is unknown!\n");
+			return -1;
+	}
+
+	/* Now the refreshPeriod is in register format value */
+	refreshPeriod = (busClk * refreshPeriod) / 1000;
+
+	DB(mvOsPrintf("Dram: sdramConfigRegCalc calculated refresh interval %0x\n",
+				  refreshPeriod));
+
+	/* make sure the refresh value is only 14 bits */
+	if(refreshPeriod > SDRAM_REFRESH_MAX)
+	{
+		refreshPeriod = SDRAM_REFRESH_MAX;
+		DB(mvOsPrintf("Dram: sdramConfigRegCalc adjusted refresh interval %0x\n",
+					  refreshPeriod));
+	}
+
+	/* Clear the refresh field */
+	sdramConfig &= ~SDRAM_REFRESH_MASK;
+
+	/* Set new value to refresh field */
+	sdramConfig |= (refreshPeriod & SDRAM_REFRESH_MASK);
+
+	/*  registered DRAM ? */
+	if ( pBankInfo->registeredAddrAndControlInputs )
+	{
+		/* it's registered DRAM, so set the reg. DRAM bit */
+		sdramConfig |= SDRAM_REGISTERED;
+		mvOsPrintf("DRAM Attribute: Registered address and control inputs.\n");
+	}
+
+	/* set DDR SDRAM devices configuration */
+	sdramConfig &= ~SDRAM_DCFG_MASK;    /* Clear Dcfg field */
+
+	switch (pBankInfo->sdramWidth)
+	{
+		case 8:  /* memory is x8 */
+			sdramConfig |= SDRAM_DCFG_X8_DEV;
+			DB(mvOsPrintf("Dram: sdramConfigRegCalc SDRAM device width x8\n"));
+			break;
+		case 16:
+			sdramConfig |= SDRAM_DCFG_X16_DEV;
+			DB(mvOsPrintf("Dram: sdramConfigRegCalc SDRAM device width x16\n"));
+			break;
+		default: /* memory width unsupported */
+			mvOsPrintf("Dram: ERR. DRAM chip width is unknown!\n");
+			return -1;
+	}
+
+	/* Set static default settings */
+	sdramConfig |= SDRAM_CONFIG_DV;
+
+	DB(mvOsPrintf("Dram: sdramConfigRegCalc set sdramConfig to 0x%x\n",
+				  sdramConfig));
+
+	return sdramConfig;
+}
+
+/*******************************************************************************
+* sdramModeRegCalc - Calculate sdram mode register
+*
+* DESCRIPTION: Calculate sdram mode register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramModeRegCalc(MV_U32 minCas)
+{
+	MV_U32 sdramMode;
+
+	sdramMode = MV_REG_READ(SDRAM_MODE_REG);
+
+	/* Clear CAS Latency field */
+	sdramMode &= ~SDRAM_CL_MASK;
+
+	mvOsPrintf("DRAM CAS Latency ");
+
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		switch (minCas)
+		{
+			case DDR2_CL_3:
+				sdramMode |= SDRAM_DDR2_CL_3;
+				mvOsPrintf("3.\n");
+				break;
+			case DDR2_CL_4:
+				sdramMode |= SDRAM_DDR2_CL_4;
+				mvOsPrintf("4.\n");
+				break;
+			case DDR2_CL_5:
+				sdramMode |= SDRAM_DDR2_CL_5;
+				mvOsPrintf("5.\n");
+				break;
+			default:
+				mvOsPrintf("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+				return -1;
+		}
+	sdramMode |= DDR2_MODE_REG_DV;
+	}
+	else	/* DDR1 */
+	{
+		switch (minCas)
+		{
+			case DDR1_CL_1_5:
+				sdramMode |= SDRAM_DDR1_CL_1_5;
+				mvOsPrintf("1.5\n");
+				break;
+			case DDR1_CL_2:
+				sdramMode |= SDRAM_DDR1_CL_2;
+				mvOsPrintf("2\n");
+				break;
+			case DDR1_CL_2_5:
+				sdramMode |= SDRAM_DDR1_CL_2_5;
+				mvOsPrintf("2.5\n");
+				break;
+			case DDR1_CL_3:
+				sdramMode |= SDRAM_DDR1_CL_3;
+				mvOsPrintf("3\n");
+				break;
+			case DDR1_CL_4:
+				sdramMode |= SDRAM_DDR1_CL_4;
+				mvOsPrintf("4\n");
+				break;
+			default:
+				mvOsPrintf("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+				return -1;
+		}
+		sdramMode |= DDR1_MODE_REG_DV;
+	}
+
+	DB(mvOsPrintf("nsdramModeRegCalc register 0x%x\n", sdramMode ));
+
+	return sdramMode;
+}
+
+/*******************************************************************************
+* sdramExtModeRegCalc - Calculate sdram Extended mode register
+*
+* DESCRIPTION:
+*		Return sdram Extended mode register value based
+*		on the bank info parameters and bank presence.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram Extended mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo)
+{
+	MV_U32 populateBanks = 0;
+	int bankNum;
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+	/* Represent the populate banks in binary form */
+	for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		if (0 != pBankInfo[bankNum].size)
+		{
+				populateBanks |= (1 << bankNum);
+			}
+		}
+
+		switch(populateBanks)
+		{
+			case(BANK_PRESENT_CS0):
+				return DDR_SDRAM_EXT_MODE_CS0_DV;
+
+			case(BANK_PRESENT_CS0_CS1):
+				return DDR_SDRAM_EXT_MODE_CS0_DV;
+
+			case(BANK_PRESENT_CS0_CS2):
+				return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+			case(BANK_PRESENT_CS0_CS1_CS2):
+				return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+			case(BANK_PRESENT_CS0_CS2_CS3):
+				return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+			case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+				return DDR_SDRAM_EXT_MODE_CS0_CS2_DV;
+
+			default:
+				mvOsPrintf("sdramExtModeRegCalc: Invalid DRAM bank presence\n");
+				return -1;
+		}
+	}
+	return 0;
+}
+
+/*******************************************************************************
+* dunitCtrlLowRegCalc - Calculate sdram dunit control low register
+*
+* DESCRIPTION: Calculate sdram dunit control low register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram dunit control low reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas)
+{
+	MV_U32 dunitCtrlLow;
+
+	dunitCtrlLow = MV_REG_READ(SDRAM_DUNIT_CTRL_REG);
+
+	/* Clear StBurstDel field */
+	dunitCtrlLow &= ~SDRAM_ST_BURST_DEL_MASK;
+
+#ifdef MV_88W8660
+	/* Clear address/control output timing field */
+	dunitCtrlLow &= ~SDRAM_CTRL_POS_RISE;
+#endif /* MV_88W8660 */
+
+	DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc\n"));
+
+	/* For proper sample of read data set the Dunit Control register's      */
+	/* stBurstDel bits [27:24]                                              */
+			/********-********-********-********-********-*********
+			* CL=1.5 |  CL=2  | CL=2.5 |  CL=3  |  CL=4  |  CL=5  *
+			*********-********-********-********-********-*********
+Not Reg.	*  0011  |  0011  |  0100  |  0100  |  0101  |  TBD   *
+			*********-********-********-********-********-*********
+Registered	*  0100  |  0100  |  0101  |  0101  |  0110  |  TBD   *
+			*********-********-********-********-********-*********/
+
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		switch (minCas)
+		{
+			case DDR2_CL_3:
+					/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			case DDR2_CL_4:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x6 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			default:
+				mvOsPrintf("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n",
+						   minCas);
+				return -1;
+		}
+	}
+	else    /* DDR1 */
+	{
+		switch (minCas)
+		{
+			case DDR1_CL_1_5:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x3 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			case DDR1_CL_2:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x3 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			case DDR1_CL_2_5:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			case DDR1_CL_3:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x4 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			case DDR1_CL_4:
+				/* registerd DDR SDRAM? */
+				if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+					dunitCtrlLow |= 0x6 << SDRAM_ST_BURST_DEL_OFFS;
+				else
+					dunitCtrlLow |= 0x5 << SDRAM_ST_BURST_DEL_OFFS;
+				break;
+			default:
+				mvOsPrintf("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n",
+						   minCas);
+				return -1;
+	}
+
+	}
+	DB(mvOsPrintf("Dram: Reg dunit control low = %x\n", dunitCtrlLow ));
+
+	return dunitCtrlLow;
+}
+
+/*******************************************************************************
+* sdramAddrCtrlRegCalc - Calculate sdram address control register
+*
+* DESCRIPTION: Calculate sdram address control register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram address control reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo)
+{
+	MV_U32 addrCtrl = 0;
+
+	/* Set Address Control register static configuration bits */
+	addrCtrl = MV_REG_READ(SDRAM_ADDR_CTRL_REG);
+
+	/* Set address control default value */
+	addrCtrl |= SDRAM_ADDR_CTRL_DV;
+
+	/* Clear DSize field */
+	addrCtrl &= ~SDRAM_DSIZE_MASK;
+
+	/* Note that density is in MB units */
+	switch (pBankInfo->deviceDensity)
+	{
+		case 128:                 /* 128 Mbit */
+			DB(mvOsPrintf("DRAM Device Density 128Mbit\n"));
+			addrCtrl |= SDRAM_DSIZE_128Mb;
+			break;
+		case 256:                 /* 256 Mbit */
+			DB(mvOsPrintf("DRAM Device Density 256Mbit\n"));
+			addrCtrl |= SDRAM_DSIZE_256Mb;
+			break;
+		case 512:                /* 512 Mbit */
+			DB(mvOsPrintf("DRAM Device Density 512Mbit\n"));
+			addrCtrl |= SDRAM_DSIZE_512Mb;
+			break;
+		default:
+			mvOsPrintf("Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+                       pBankInfo->deviceDensity);
+			return -1;
+	}
+
+	/* SDRAM address control */
+	DB(mvOsPrintf("Dram: setting sdram address control with: %x \n", addrCtrl));
+
+	return addrCtrl;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlLowRegCalc - Calculate sdram timing control low register
+*
+* DESCRIPTION:
+*       This function calculates sdram timing control low register
+*       optimized value based on the bank info parameters and the minCas.
+*
+* INPUT:
+*	    pBankInfo - sdram bank parameters
+*       busClk    - Bus clock
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram timinf control low reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+                                                MV_U32 minCas, MV_U32 busClk)
+{
+	MV_U32 tRp  = 0;
+	MV_U32 tRrd = 0;
+	MV_U32 tRcd = 0;
+	MV_U32 tRas = 0;
+	MV_U32 tWr  = 0;
+	MV_U32 tWtr = 0;
+	MV_U32 tRtp = 0;
+
+	MV_U32 bankNum;
+
+	busClk = busClk / 1000000;    /* In MHz */
+
+	/* Scan all DRAM banks to find maximum timing values */
+	for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		tRp  = MV_MAX(tRp,  pBankInfo[bankNum].minRowPrechargeTime);
+		tRrd = MV_MAX(tRrd, pBankInfo[bankNum].minRowActiveToRowActive);
+		tRcd = MV_MAX(tRcd, pBankInfo[bankNum].minRasToCasDelay);
+		tRas = MV_MAX(tRas, pBankInfo[bankNum].minRasPulseWidth);
+	}
+
+	/* Extract timing (in ns) from SPD value. We ignore the tenth ns part.  */
+	/* by shifting the data two bits right.                                 */
+	tRp  = tRp  >> 2;    /* For example 0x50 -> 20ns                        */
+	tRrd = tRrd >> 2;
+	tRcd = tRcd >> 2;
+
+	/* Extract clock cycles from time parameter. We need to round up        */
+	tRp  = ((busClk * tRp)  / 1000) + (((busClk * tRp)  % 1000) ? 1 : 0);
+	/* Micron work around for 133MHz */
+	if (busClk == 133)
+		tRp += 1;
+	DB(mvOsPrintf("Dram  Timing Low: tRp = %d ", tRp));
+	tRrd = ((busClk * tRrd) / 1000) + (((busClk * tRrd) % 1000) ? 1 : 0);
+	/* JEDEC min reqeirments tRrd = 2 */
+	if (tRrd < 2)
+		tRrd = 2;
+	DB(mvOsPrintf("tRrd = %d ", tRrd));
+	tRcd = ((busClk * tRcd) / 1000) + (((busClk * tRcd) % 1000) ? 1 : 0);
+	DB(mvOsPrintf("tRcd = %d ", tRcd));
+	tRas = ((busClk * tRas) / 1000) + (((busClk * tRas) % 1000) ? 1 : 0);
+	DB(mvOsPrintf("tRas = %d ", tRas));
+
+	/* tWr and tWtr is different for DDR1 and DDR2. tRtp is only for DDR2   */
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		/* Scan all DRAM banks to find maximum timing values */
+		for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+		{
+			tWr  = MV_MAX(tWr,  pBankInfo[bankNum].minWriteRecoveryTime);
+			tWtr = MV_MAX(tWtr, pBankInfo[bankNum].minWriteToReadCmdDelay);
+			tRtp = MV_MAX(tRtp, pBankInfo[bankNum].minReadToPrechCmdDelay);
+		}
+
+		/* Extract timing (in ns) from SPD value. We ignore the tenth ns    */
+		/* part by shifting the data two bits right.                        */
+		tWr  = tWr  >> 2;    /* For example 0x50 -> 20ns                    */
+		tWtr = tWtr >> 2;
+		tRtp = tRtp >> 2;
+
+		/* Extract clock cycles from time parameter. We need to round up    */
+		tWr  = ((busClk * tWr)  / 1000) + (((busClk * tWr)  % 1000) ? 1 : 0);
+		DB(mvOsPrintf("tWr = %d ", tWr));
+		tWtr = ((busClk * tWtr) / 1000) + (((busClk * tWtr) % 1000) ? 1 : 0);
+		/* JEDEC min reqeirments tWtr = 2 */
+		if (tWtr < 2)
+			tWtr = 2;
+		DB(mvOsPrintf("tWtr = %d ", tWtr));
+		tRtp = ((busClk * tRtp) / 1000) + (((busClk * tRtp) % 1000) ? 1 : 0);
+		/* JEDEC min reqeirments tRtp = 2 */
+		if (tRtp < 2)
+			tRtp = 2;
+		DB(mvOsPrintf("tRtp = %d ", tRtp));
+	}
+	else
+	{
+		tWr  = ((busClk*SDRAM_TWR) / 1000) + (((busClk*SDRAM_TWR) % 1000)?1:0);
+
+		if ((200 == busClk) || ((100 == busClk) && (DDR1_CL_1_5 == minCas)))
+		{
+			tWtr = 2;
+		}
+		else
+		{
+			tWtr = 1;
+		}
+
+		tRtp = 2; /* Must be set to 0x1 (two cycles) when using DDR1 */
+	}
+
+	DB(mvOsPrintf("tWtr = %d\n", tWtr));
+
+	/* Note: value of 0 in register means one cycle, 1 means two and so on  */
+	return (((tRp  - 1) << SDRAM_TRP_OFFS)	|
+			((tRrd - 1) << SDRAM_TRRD_OFFS)	|
+			((tRcd - 1) << SDRAM_TRCD_OFFS)	|
+			((tRas - 1) << SDRAM_TRAS_OFFS)	|
+			((tWr  - 1) << SDRAM_TWR_OFFS)	|
+			((tWtr - 1) << SDRAM_TWTR_OFFS)	|
+			((tRtp - 1) << SDRAM_TRTP_OFFS));
+}
+
+/*******************************************************************************
+* sdramTimeCtrlHighRegCalc - Calculate sdram timing control high register
+*
+* DESCRIPTION:
+*       This function calculates sdram timing control high register
+*       optimized value based on the bank info parameters and the bus clock.
+*
+* INPUT:
+*	    pBankInfo - sdram bank parameters
+*       busClk    - Bus clock
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram timinf control high reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo,
+                                                                MV_U32 busClk)
+{
+	MV_U32 tRfc;
+	MV_U32 timeNs = 0;
+	int bankNum;
+	MV_U32 sdramTw2wCyc = 0;
+
+	busClk = busClk / 1000000;    /* In MHz */
+
+	/* tRfc is different for DDR1 and DDR2. */
+	if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_DTYPE_DDR2)
+	{
+		MV_U32 bankNum;
+
+		/* Scan all DRAM banks to find maximum timing values */
+		for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+			timeNs = MV_MAX(timeNs,  pBankInfo[bankNum].minRefreshToActiveCmd);
+	}
+	else
+	{
+		if (pBankInfo[0].deviceDensity == _1G)
+		{
+			timeNs = SDRAM_TRFC_1G;
+		}
+		else
+		{
+			if (200 == busClk)
+			{
+				timeNs = SDRAM_TRFC_64_512M_AT_200MHZ;
+			}
+			else
+			{
+				timeNs = SDRAM_TRFC_64_512M;
+			}
+		}
+	}
+
+	tRfc = ((busClk * timeNs)  / 1000) + (((busClk * timeNs)  % 1000) ? 1 : 0);
+
+	DB(mvOsPrintf("Dram  Timing High: tRfc = %d\n", tRfc));
+
+
+	/* Represent the populate banks in binary form */
+	for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		if (0 != pBankInfo[bankNum].size)
+			sdramTw2wCyc++;
+	}
+
+	/* If we have more the 1 bank then we need the TW2W in 1 for ODT switch */
+	if (sdramTw2wCyc > 1)
+		sdramTw2wCyc = 1;
+	else
+		sdramTw2wCyc = 0;
+
+	/* Note: value of 0 in register means one cycle, 1 means two and so on  */
+	return ((((tRfc - 1) & SDRAM_TRFC_MASK)	<< SDRAM_TRFC_OFFS)		|
+			((SDRAM_TR2R_CYC - 1)			<< SDRAM_TR2R_OFFS)		|
+			((SDRAM_TR2WW2R_CYC - 1)		<< SDRAM_TR2W_W2R_OFFS)	|
+			(((tRfc - 1) >> 4)				<< SDRAM_TRFC_EXT_OFFS)	|
+			(sdramTw2wCyc					<< SDRAM_TW2W_OFFS));
+
+}
+
+/*******************************************************************************
+* sdramDDr2OdtConfig - Set DRAM DDR2 On Die Termination registers.
+*
+* DESCRIPTION:
+*       This function config DDR2 On Die Termination (ODT) registers.
+*	ODT configuration is done according to DIMM presence:
+*
+*       Presence	  Ctrl Low    Ctrl High  Dunit Ctrl   Ext Mode
+*	CS0	         0x84210000  0x00000000  0x0000780F  0x00000440
+*	CS0+CS1          0x84210000  0x00000000  0x0000780F  0x00000440
+*	CS0+CS2	    	 0x030C030C  0x00000000  0x0000740F  0x00000404
+*	CS0+CS1+CS2	 0x030C030C  0x00000000  0x0000740F  0x00000404
+*	CS0+CS2+CS3	 0x030C030C  0x00000000  0x0000740F  0x00000404
+*	CS0+CS1+CS2+CS3  0x030C030C  0x00000000  0x0000740F  0x00000404
+*
+* INPUT:
+*		pBankInfo - bank info parameters.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       None
+*******************************************************************************/
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo)
+{
+	MV_U32 populateBanks = 0;
+	MV_U32 odtCtrlLow, odtCtrlHigh, dunitOdtCtrl;
+	int bankNum;
+
+	/* Represent the populate banks in binary form */
+	for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		if (0 != pBankInfo[bankNum].size)
+		{
+				populateBanks |= (1 << bankNum);
+			}
+		}
+
+	switch(populateBanks)
+	{
+		case(BANK_PRESENT_CS0):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS1):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS2):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS1_CS2):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS2_CS3):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS2_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS2_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV;
+			break;
+		default:
+			mvOsPrintf("sdramDDr2OdtConfig: Invalid DRAM bank presence\n");
+			return;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG7, odtCtrlLow);
+	MV_REG_WRITE(DRAM_BUF_REG8, odtCtrlHigh);
+	MV_REG_WRITE(DRAM_BUF_REG9, dunitOdtCtrl);
+	return;
+}
+#endif /* defined(MV_INC_BOARD_DDIM) */
+
+/*******************************************************************************
+* mvDramIfWinSet - Set DRAM interface address decode window
+*
+* DESCRIPTION:
+*       This function sets DRAM interface address decode window.
+*
+* INPUT:
+*	    target      - System target. Use only SDRAM targets.
+*       pAddrDecWin - SDRAM address window structure.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+*       otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+	MV_U32 baseReg=0,sizeReg=0;
+	MV_U32 baseToReg=0 , sizeToReg=0;
+
+    /* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinSet: target %d is not SDRAM\n", target);
+		return MV_BAD_PARAM;
+	}
+
+    /* Check if the requested window overlaps with current enabled windows	*/
+    if (MV_TRUE == sdramIfWinOverlap(target, &pAddrDecWin->addrWin))
+	{
+        mvOsPrintf("mvDramIfWinSet: ERR. Target %d overlaps\n", target);
+		return MV_BAD_PARAM;
+	}
+
+	/* check if address is aligned to the size */
+	if(MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size))
+	{
+		mvOsPrintf("mvDramIfWinSet:Error setting DRAM interface window %d."\
+				   "\nAddress 0x%08x is unaligned to size 0x%x.\n",
+                   target,
+				   pAddrDecWin->addrWin.baseLow,
+				   pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	/* read base register*/
+	baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(target));
+
+	/* read size register */
+	sizeReg = MV_REG_READ(SDRAM_SIZE_REG(target));
+
+	/* BaseLow[31:16] => base register [31:16]		*/
+	baseToReg = pAddrDecWin->addrWin.baseLow & SCBAR_BASE_MASK;
+
+	/* Write to address decode Base Address Register                  */
+	baseReg &= ~SCBAR_BASE_MASK;
+	baseReg |= baseToReg;
+
+	/* Translate the given window size to register format			*/
+	sizeToReg = ctrlSizeToReg(pAddrDecWin->addrWin.size, SCSR_SIZE_ALIGNMENT);
+
+	/* Size parameter validity check.                                   */
+	if (-1 == sizeToReg)
+	{
+		mvOsPrintf("mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n",target);
+		return MV_BAD_PARAM;
+	}
+
+	/* set size */
+	sizeReg &= ~SCSR_SIZE_MASK;
+	/* Size is located at upper 16 bits */
+	sizeReg |= (sizeToReg << SCSR_SIZE_OFFS);
+
+	/* enable/Disable */
+	if (MV_TRUE == pAddrDecWin->enable)
+	{
+		sizeReg |= SCSR_WIN_EN;
+	}
+	else
+	{
+		sizeReg &= ~SCSR_WIN_EN;
+	}
+
+	/* 3) Write to address decode Base Address Register                   */
+	MV_REG_WRITE(SDRAM_BASE_ADDR_REG(target), baseReg);
+
+	/* Write to address decode Size Register                        	*/
+	MV_REG_WRITE(SDRAM_SIZE_REG(target), sizeReg);
+
+	return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinGet - Get DRAM interface address decode window
+*
+* DESCRIPTION:
+*       This function gets DRAM interface address decode window.
+*
+* INPUT:
+*	    target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+*       pAddrDecWin - SDRAM address window structure.
+*
+* RETURN:
+*       MV_BAD_PARAM if parameters are invalid or window is invalid, MV_OK
+*       otherwise.
+*******************************************************************************/
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin)
+{
+	MV_U32 baseReg,sizeReg;
+	MV_U32 sizeRegVal;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinGet: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* Read base and size registers */
+	sizeReg = MV_REG_READ(SDRAM_SIZE_REG(target));
+	baseReg = MV_REG_READ(SDRAM_BASE_ADDR_REG(target));
+
+	sizeRegVal = (sizeReg & SCSR_SIZE_MASK) >> SCSR_SIZE_OFFS;
+
+	pAddrDecWin->addrWin.size = ctrlRegToSize(sizeRegVal,
+											 SCSR_SIZE_ALIGNMENT);
+
+    /* Check if ctrlRegToSize returned OK */
+	if (-1 == pAddrDecWin->addrWin.size)
+	{
+		mvOsPrintf("mvDramIfWinGet: size of target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	/* Extract base address						*/
+	/* Base register [31:16] ==> baseLow[31:16] 		*/
+	pAddrDecWin->addrWin.baseLow = baseReg & SCBAR_BASE_MASK;
+
+	pAddrDecWin->addrWin.baseHigh =  0;
+
+
+	if (sizeReg & SCSR_WIN_EN)
+	{
+		pAddrDecWin->enable = MV_TRUE;
+	}
+	else
+	{
+		pAddrDecWin->enable = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+/*******************************************************************************
+* mvDramIfWinEnable - Enable/Disable SDRAM address decode window
+*
+* DESCRIPTION:
+*		This function enable/Disable SDRAM address decode window.
+*
+* INPUT:
+*	    target - System target. Use only SDRAM targets.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		MV_ERROR in case function parameter are invalid, MV_OK otherewise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfWinEnable(MV_TARGET target,MV_BOOL enable)
+{
+	MV_DRAM_DEC_WIN 	addrDecWin;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(target))
+	{
+		mvOsPrintf("mvDramIfWinEnable: target %d is Illigal\n", target);
+		return MV_ERROR;
+	}
+
+	if (enable == MV_TRUE)
+	{   /* First check for overlap with other enabled windows				*/
+		if (MV_OK != mvDramIfWinGet(target, &addrDecWin))
+		{
+			mvOsPrintf("mvDramIfWinEnable:ERR. Getting target %d failed.\n",
+                                                                        target);
+			return MV_ERROR;
+		}
+		/* Check for overlapping */
+		if (MV_FALSE == sdramIfWinOverlap(target, &(addrDecWin.addrWin)))
+		{
+			/* No Overlap. Enable address decode winNum window              */
+			MV_REG_BIT_SET(SDRAM_SIZE_REG(target), SCSR_WIN_EN);
+		}
+		else
+		{   /* Overlap detected	*/
+			mvOsPrintf("mvDramIfWinEnable: ERR. Target %d overlap detect\n",
+                                                                        target);
+			return MV_ERROR;
+		}
+	}
+	else
+	{   /* Disable address decode winNum window                             */
+		MV_REG_BIT_RESET(SDRAM_SIZE_REG(target), SCSR_WIN_EN);
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* sdramIfWinOverlap - Check if an address window overlap an SDRAM address window
+*
+* DESCRIPTION:
+*		This function scan each SDRAM address decode window to test if it
+*		overlapps the given address windoow
+*
+* INPUT:
+*       target      - SDRAM target where the function skips checking.
+*       pAddrDecWin - The tested address window for overlapping with
+*					  SDRAM windows.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlaps any enabled address
+*       decode map, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL sdramIfWinOverlap(MV_TARGET target, MV_ADDR_WIN *pAddrWin)
+{
+	MV_TARGET	targetNum;
+	MV_DRAM_DEC_WIN 	addrDecWin;
+
+	for(targetNum = SDRAM_CS0; targetNum < MV_DRAM_MAX_CS ; targetNum++)
+	{
+		/* don't check our winNum or illegal targets */
+		if (targetNum == target)
+		{
+			continue;
+		}
+
+		/* Get window parameters 	*/
+		if (MV_OK != mvDramIfWinGet(targetNum, &addrDecWin))
+		{
+			mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		/* Do not check disabled windows	*/
+		if (MV_FALSE == addrDecWin.enable)
+		{
+			continue;
+		}
+
+		if(MV_TRUE == ctrlWinOverlapTest(pAddrWin, &addrDecWin.addrWin))
+		{
+			mvOsPrintf(
+			"sdramIfWinOverlap: Required target %d overlap winNum %d\n",
+			target, targetNum);
+			return MV_TRUE;
+		}
+	}
+
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvDramIfBankSizeGet - Get DRAM interface bank size.
+*
+* DESCRIPTION:
+*       This function returns the size of a given DRAM bank.
+*
+* INPUT:
+*       bankNum - Bank number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM bank size. If bank is disabled the function return '0'. In case
+*		or paramter is invalid, the function returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfBankSizeGet(MV_U32 bankNum)
+{
+    MV_DRAM_DEC_WIN 	addrDecWin;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(bankNum))
+	{
+		mvOsPrintf("mvDramIfBankBaseGet: bankNum %d is invalid\n", bankNum);
+		return -1;
+	}
+	/* Get window parameters 	*/
+	if (MV_OK != mvDramIfWinGet(bankNum, &addrDecWin))
+	{
+		mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+		return -1;
+	}
+
+	if (MV_TRUE == addrDecWin.enable)
+	{
+		return addrDecWin.addrWin.size;
+	}
+	else
+	{
+		return 0;
+	}
+}
+
+
+/*******************************************************************************
+* mvDramIfSizeGet - Get DRAM interface total size.
+*
+* DESCRIPTION:
+*       This function get the DRAM total size.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM total size. In case or paramter is invalid, the function
+*		returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfSizeGet(MV_VOID)
+{
+	MV_U32 totalSize = 0, bankSize = 0, bankNum;
+
+	for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		bankSize = mvDramIfBankSizeGet(bankNum);
+
+		if (-1 == bankSize)
+		{
+			mvOsPrintf("Dram: mvDramIfSizeGet error with bank %d \n",bankNum);
+			return -1;
+		}
+		else
+		{
+			totalSize += bankSize;
+		}
+	}
+
+	DB(mvOsPrintf("Dram: Total DRAM size is 0x%x \n",totalSize));
+
+	return totalSize;
+}
+
+/*******************************************************************************
+* mvDramIfBankBaseGet - Get DRAM interface bank base.
+*
+* DESCRIPTION:
+*       This function returns the 32 bit base address of a given DRAM bank.
+*
+* INPUT:
+*       bankNum - Bank number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM bank size. If bank is disabled or paramter is invalid, the
+*		function returns -1.
+*
+*******************************************************************************/
+MV_32 mvDramIfBankBaseGet(MV_U32 bankNum)
+{
+    MV_DRAM_DEC_WIN 	addrDecWin;
+
+	/* Check parameters */
+	if (!MV_TARGET_IS_DRAM(bankNum))
+	{
+		mvOsPrintf("mvDramIfBankBaseGet: bankNum %d is invalid\n", bankNum);
+		return -1;
+	}
+	/* Get window parameters 	*/
+	if (MV_OK != mvDramIfWinGet(bankNum, &addrDecWin))
+	{
+		mvOsPrintf("sdramIfWinOverlap: ERR. TargetWinGet failed\n");
+		return -1;
+	}
+
+	if (MV_TRUE == addrDecWin.enable)
+	{
+		return addrDecWin.addrWin.baseLow;
+	}
+	else
+	{
+		return -1;
+	}
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h
new file mode 100644
index 000000000000..c50de76eda3a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIf.h
@@ -0,0 +1,179 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfh
+#define __INCmvDramIfh
+
+/* includes */
+#include "ddr1_2/mvDramIfRegs.h"
+#include "ddr1_2/mvDramIfConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines  */
+/* DRAM Timing parameters */
+#define SDRAM_TWR                    15  /* ns tWr */
+#define SDRAM_TRFC_64_512M_AT_200MHZ 70  /* ns tRfc for dens 64-512 @ 200MHz */
+#define SDRAM_TRFC_64_512M           75  /* ns tRfc for dens 64-512          */
+#define SDRAM_TRFC_1G                120 /* ns tRfc for dens 1GB             */
+#define SDRAM_TR2R_CYC               1   /* cycle for tR2r                   */
+#define SDRAM_TR2WW2R_CYC            1   /* cycle for tR2wW2r                */
+
+/* typedefs */
+
+/* enumeration for memory types */
+typedef enum _mvMemoryType
+{
+    MEM_TYPE_SDRAM,
+    MEM_TYPE_DDR1,
+    MEM_TYPE_DDR2
+}MV_MEMORY_TYPE;
+
+/* enumeration for DDR1 supported CAS Latencies */
+typedef enum _mvDimmDdr1Cas
+{
+    DDR1_CL_1_5  = 0x02,
+    DDR1_CL_2    = 0x04,
+    DDR1_CL_2_5  = 0x08,
+    DDR1_CL_3    = 0x10,
+    DDR1_CL_4    = 0x40,
+    DDR1_CL_FAULT
+} MV_DIMM_DDR1_CAS;
+
+/* enumeration for DDR2 supported CAS Latencies */
+typedef enum _mvDimmDdr2Cas
+{
+    DDR2_CL_3    = 0x08,
+    DDR2_CL_4    = 0x10,
+    DDR2_CL_5    = 0x20,
+    DDR2_CL_FAULT
+} MV_DIMM_DDR2_CAS;
+
+
+typedef struct _mvDramBankInfo
+{
+    MV_MEMORY_TYPE  memoryType; 	/* DDR1, DDR2 or SDRAM */
+
+    /* DIMM dimensions */
+    MV_U32  numOfRowAddr;
+    MV_U32  numOfColAddr;
+    MV_U32  dataWidth;
+    MV_U32  errorCheckType;             /* ECC , PARITY..*/
+    MV_U32  sdramWidth;                 /* 4,8,16 or 32 */
+    MV_U32  errorCheckDataWidth;        /* 0 - no, 1 - Yes */
+    MV_U32  burstLengthSupported;
+    MV_U32  numOfBanksOnEachDevice;
+    MV_U32  suportedCasLatencies;
+    MV_U32  refreshInterval;
+
+    /* DIMM timing parameters */
+    MV_U32  minCycleTimeAtMaxCasLatPs;
+    MV_U32  minCycleTimeAtMaxCasLatMinus1Ps;
+    MV_U32  minCycleTimeAtMaxCasLatMinus2Ps;
+	MV_U32  minRowPrechargeTime;
+	MV_U32  minRowActiveToRowActive;
+	MV_U32  minRasToCasDelay;
+	MV_U32  minRasPulseWidth;
+    MV_U32  minWriteRecoveryTime;   /* DDR2 only */
+    MV_U32  minWriteToReadCmdDelay; /* DDR2 only */
+    MV_U32  minReadToPrechCmdDelay; /* DDR2 only */
+    MV_U32  minRefreshToActiveCmd;  /* DDR2 only */
+
+    /* Parameters calculated from the extracted DIMM information */
+    MV_U32  size;
+    MV_U32  deviceDensity;           	/* 16,64,128,256 or 512 Mbit */
+    MV_U32  numberOfDevices;
+
+    /* DIMM attributes (MV_TRUE for yes) */
+    MV_BOOL registeredAddrAndControlInputs;
+
+}MV_DRAM_BANK_INFO;
+
+/* This structure describes CPU interface address decode window               */
+typedef struct _mvDramIfDecWin
+{
+	MV_ADDR_WIN   addrWin;    /* An address window*/
+	MV_BOOL       enable;     /* Address decode window is enabled/disabled    */
+}MV_DRAM_DEC_WIN;
+
+#include "ddr1_2/mvDram.h"
+
+/* mvDramIf.h API list */
+MV_VOID   mvDramIfBasicAsmInit(MV_VOID);
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl);
+MV_VOID   _mvDramIfConfig(MV_VOID);
+
+MV_STATUS mvDramIfWinSet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinGet(MV_TARGET target, MV_DRAM_DEC_WIN *pAddrDecWin);
+MV_STATUS mvDramIfWinEnable(MV_TARGET target,MV_BOOL enable);
+MV_32 mvDramIfBankSizeGet(MV_U32 bankNum);
+MV_32 mvDramIfBankBaseGet(MV_U32 bankNum);
+MV_32 mvDramIfSizeGet(MV_VOID);
+
+#if 0
+MV_STATUS mvDramIfMbusCtrlSet(MV_XBAR_TARGET *pPizzaArbArray);
+MV_STATUS mvDramIfMbusToutSet(MV_U32 timeout, MV_BOOL enable);
+#endif
+
+#endif /* __INCmvDramIfh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S
new file mode 100644
index 000000000000..f2a9365c0bcd
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfBasicInit.S
@@ -0,0 +1,988 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define MV_ASMLANGUAGE
+#include "mvSysHwConfig.h"
+#include "mvOsAsm.h"
+#include "mvBoardEnvSpec.h"
+#include "mvCpuIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "pci/mvPciRegs.h"
+#include "mvCtrlEnvSpec.h"
+#include "mvCtrlEnvAsm.h"
+#include "cpu/mvCpuArm.h"
+#include "mvCommon.h"
+
+/* defines */
+
+#if !defined(MV_INC_BOARD_DDIM)
+.globl dramBoot1
+dramBoot1:
+        .word   0
+
+/******************************************************************************
+*
+*
+*
+*
+*******************************************************************************/
+#if defined(DB_PRPMC) || defined(DB_PEX_PCI) || defined(DB_MNG)
+
+/* PEX_PCI and PRPMC boards 256 MB*/
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x0fff0001
+#define STATIC_SDRAM_CONFIG	     		0x03248400
+#define STATIC_SDRAM_MODE	     		0x62
+#define STATIC_DUNIT_CTRL_LOW	     		0x4041000
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x0000030F
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x0
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0
+#define STATIC_SDRAM_EXT_MODE          		0x0
+
+#elif defined(DB_FPGA)
+
+/* FPGA DC boards 256 MB*/
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x0fff0001
+#define STATIC_SDRAM_CONFIG	     		0x03208400	/* 32bit */
+#define STATIC_SDRAM_MODE	     		0x22
+#define STATIC_DUNIT_CTRL_LOW	     		0x03041000
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11112220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x0000000D
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x0
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0
+#define STATIC_SDRAM_EXT_MODE          		0x1
+
+#elif  defined(RD_88F6183GP) || defined(DB_CUSTOMER)
+
+/* Customer 1 DDR2 2 devices 512Mbit by 16 bit */
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x07ff0001
+#define STATIC_SDRAM_CONFIG	     		0x03158400
+#define STATIC_SDRAM_MODE	     		0x452
+#define STATIC_DUNIT_CTRL_LOW	     		0x06041000
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11912220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x00000502
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x00000601
+#define STATIC_SDRAM_EXT_MODE          		0x00000440
+
+
+#elif  defined(RD_88F6183AP)
+
+/* DDR2 1 devices 512Mbit by 16 bit */
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x03ff0001
+#define STATIC_SDRAM_CONFIG	     		0x1f154400
+#define STATIC_SDRAM_MODE	     		0x432
+#define STATIC_DUNIT_CTRL_LOW	     		0x04041000
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11912220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x00000502
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x00000601
+#define STATIC_SDRAM_EXT_MODE          		0x00000440
+
+/* 6082L MARVELL DIMM */
+#elif  defined(DB_88F6082LBP)
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x07ff0001
+#define STATIC_SDRAM_CONFIG	     		0x7f158400
+#define STATIC_SDRAM_MODE	     		0x432
+#define STATIC_DUNIT_CTRL_LOW	     		0x04041040
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000020
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11612220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x00000501
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x00010000
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000002
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x00000a01
+#define STATIC_SDRAM_EXT_MODE          		0x00000440
+
+#elif  defined(RD_88W8660_AP82S)
+
+/* Shark RD */
+
+#if defined(MV_DRAM_32M)
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x01ff0001
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000010
+#elif defined(MV_DRAM_16M)
+
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x00ff0001
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000000
+
+#else
+#error "NO DDR size selected"
+#endif
+
+#define STATIC_SDRAM_CONFIG	     		0x03144400
+#define STATIC_SDRAM_MODE	     		0x62
+#define STATIC_DUNIT_CTRL_LOW	     		0x4041000
+
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x0000040b
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x0
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0
+#define STATIC_SDRAM_EXT_MODE          		0x0
+
+#elif defined(RD_88W8660)
+
+/* Shark RD */
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x03ff0001
+#define STATIC_SDRAM_CONFIG	     		0x03144400
+#define STATIC_SDRAM_MODE	     		0x62
+#define STATIC_DUNIT_CTRL_LOW	     		0x4041000
+#define STATIC_SDRAM_ADDR_CTRL	     		0x00000010
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x0000040b
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x0
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0
+#define STATIC_SDRAM_EXT_MODE          		0x0
+
+#else /* NAS */
+
+
+#if defined(RD_88F5182)
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x3ff0001
+#define STATIC_SDRAM_ADDR_CTRL	    	 	0x20
+#else
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x7ff0001
+#define STATIC_SDRAM_ADDR_CTRL	    	 	0x20
+#endif
+
+#elif defined(RD_88F5182_3)
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x3ff0001
+#define STATIC_SDRAM_ADDR_CTRL	    	 	0x20
+#else
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x7ff0001
+#define STATIC_SDRAM_ADDR_CTRL	    	 	0x20
+#endif
+
+#else
+
+#define STATIC_SDRAM0_BANK0_SIZE	     	0x1ff0001
+#define STATIC_SDRAM_ADDR_CTRL	    	 	0x0
+
+#endif
+
+#if defined(MV_88F5082)
+#define STATIC_SDRAM_CONFIG	     		0x3144400
+#else
+#define STATIC_SDRAM_CONFIG	     		0x3148400
+#endif
+#define STATIC_SDRAM_MODE	     		0x62
+#define STATIC_DUNIT_CTRL_LOW	     		0x4041000
+#define STATIC_SDRAM_TIME_CTRL_LOW     		0x11602220
+#define STATIC_SDRAM_TIME_CTRL_HI	     	0x40c
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x0
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x0
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0
+#define STATIC_SDRAM_EXT_MODE          		0x0
+
+#endif
+
+	.globl _mvDramIfStaticInit
+_mvDramIfStaticInit:
+
+	mov     r11, LR     		/* Save link register */
+	mov	r10, r2
+
+        /* If we boot from NAND jump to DRAM sddress */
+
+        mov     r5, #1
+        ldr     r6, =dramBoot1
+        str     r5, [r6]                /* We started executing from DRAM */
+
+        ldr     r6, dramBoot1
+        cmp     r6, #0
+        bne     1f
+
+
+	/* set all dram windows to 0 */
+	mov	r6, #0
+	MV_REG_WRITE_ASM(r6, r5, 0x1504)
+	MV_REG_WRITE_ASM(r6, r5, 0x150c)
+	MV_REG_WRITE_ASM(r6, r5, 0x1514)
+	MV_REG_WRITE_ASM(r6, r5, 0x151c)
+
+	/* set all dram configuration in temp registers */
+	ldr	r6, = STATIC_SDRAM0_BANK0_SIZE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG0)
+	ldr	r6, = STATIC_SDRAM_CONFIG
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG1)
+	ldr	r6, = STATIC_SDRAM_MODE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG2)
+	ldr	r6, = STATIC_DUNIT_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG3)
+	ldr	r6, = STATIC_SDRAM_ADDR_CTRL
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG4)
+	ldr	r6, = STATIC_SDRAM_TIME_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG5)
+	ldr	r6, = STATIC_SDRAM_TIME_CTRL_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+	ldr	r6, = STATIC_SDRAM_ODT_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG7)
+	ldr	r6, = STATIC_SDRAM_ODT_CTRL_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG8)
+	ldr	r6, = STATIC_SDRAM_DUNIT_ODT_CTRL
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG9)
+	ldr	r6, = STATIC_SDRAM_EXT_MODE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG10)
+
+	mov 	sp, #0
+	bl	_mvDramIfConfig
+1:
+	mov 	r2, r10
+	mov     PC, r11         	/* r11 is saved link register */
+
+#else  /* #if !defined(MV_INC_BOARD_DDIM) */
+
+.globl dramBoot1
+dramBoot1:
+        .word   0
+
+/*******************************************************************************
+* mvDramIfBasicInit - Basic initialization of DRAM interface
+*
+* DESCRIPTION:
+*       The function will initialize the DRAM for basic usage. The function
+*       will use the TWSI assembly API to extract DIMM parameters according
+*       to which DRAM interface will be initialized.
+*       The function referes to the following DRAM parameters:
+*       1) DIMM is registered or not.
+*       2) DIMM width detection.
+*       3) DIMM density.
+*
+* INPUT:
+*       r3 - required size for initial DRAM.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*       Note:
+*       r4 holds I2C EEPROM address
+*       r5 holds SDRAM register base address
+*	r7 holds returned values
+*       r8 holds SDRAM various configuration registers value.
+*       r11 holds return function address.
+*******************************************************************************/
+/* Setting the offsets of the I2C registers */
+#define NUM_OF_ROWS_OFFSET            3
+#define NUM_OF_COLS_OFFSET            4
+#define NUM_OF_RANKS		      5
+#define SDRAM_WIDTH_OFFSET           13
+#define NUM_OF_BANKS_OFFSET          17
+#define SUPPORTED_CL_OFFSET          18
+#define DIMM_TYPE_INFO_OFFSET        20         /* DDR2 only    */
+#define SDRAM_MODULES_ATTR_OFFSET    21
+
+#define DRAM_DEV_DENSITY_128M        0x080
+#define DRAM_DEV_DENSITY_256M        0x100
+#define DRAM_DEV_DENSITY_512M        0x200
+       .globl _mvDramIfBasicInit
+       .extern _i2cInit
+
+_mvDramIfBasicInit:
+
+        mov     r11, LR     		/* Save link register */
+
+        mov     r5, #1
+        ldr     r8, =dramBoot1
+        str     r5, [r8]                /* We started executing from DRAM */
+
+        /* If we boot from NAND jump to DRAM sddress */
+        ldr     r8, dramBoot1
+        cmp     r8, #0
+        movne   pc, r11
+
+
+
+        bl      _i2cInit                /* Initialize TWSI master             */
+
+        /* Get default SDRAM Config values */
+        MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+        bic     r8, r8, #SDRAM_DCFG_MASK
+
+
+        /* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r4, r5);
+
+        /* Return if OrionN */
+        ldr     r5, =MV_5180_DEV_ID
+        cmp     r4, r5
+        beq     cat_through_end
+
+        /* Return if Orion1 */
+        ldr     r5, =MV_5181_DEV_ID
+        cmp     r4, r5
+        beq     cat_through_end
+
+        /* Return if Nas */
+        ldr     r5, =MV_5182_DEV_ID
+        cmp     r4, r5
+        beq     cat_through_end
+
+        /* Return if Shark */
+        ldr     r5, =MV_8660_DEV_ID
+        cmp     r4, r5
+        beq     cat_through_end
+
+        /* goto calcConfigReg if bigger than Orion2*/
+        ldr     r5, =MV_5281_DEV_ID
+        cmp     r4, r5
+        bne     cat_through
+
+cat_through:
+        /* set cat through - for better performance - in orion2 b0 and higher*/
+        orr     r8, r8, #SDRAM_CATTHR_EN
+
+cat_through_end:
+
+
+        /* Get registered/non registered info from DIMM */
+	bl  	_is_Registered
+        beq     nonRegistered
+
+setRegistered:
+        orr     r8, r8, #SDRAM_REGISTERED   /* Set registered bit(17)         */
+
+nonRegistered:
+	/* Get SDRAM width */
+	bl 	_get_width
+
+        orr     r6, r8, #SDRAM_DCFG_X16_DEV /* x16 devices  */
+        cmp     r7, #16
+        beq     setConfigReg
+
+        orr     r6, r8, #SDRAM_DCFG_X8_DEV  /* x8 devices   */
+        cmp     r7, #8
+        beq     setConfigReg
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+setConfigReg:
+        mov     r8, r6
+        ldr     r6, =SDRAM_CONFIG_DV
+        orr     r8, r8, r6              /* Add default settings */
+        mov     r6, r8                  /* Do not swap r8 content */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_CONFIG_REG)
+
+        /* Set maximum CL supported by DIMM */
+	bl	_get_CAL
+
+        /* r7 is DIMM supported CAS (e.g: 3 --> 0x1C)                         */
+        clz     r6, r7
+        rsb     r6, r6, #31     /* r6 = the bit number of MAX CAS supported   */
+
+        /* Check the DDR version */
+        tst     r8, #SDRAM_DTYPE_DDR2
+        bne     casDdr2
+
+casDdr1:
+        ldr	r7, =3		/* stBurstDel field value	*/
+	ldr     r8, =0x52       /* Assuming MAX CL = 1.5        */
+        cmp     r6, #1          /* If CL = 1.5 break            */
+        beq     setModeReg
+
+        ldr	r7, =3		/* stBurstDel field value	*/
+	ldr     r8, =0x22       /* Assuming MAX CL = 2          */
+        cmp     r6, #2          /* If CL = 2 break              */
+        beq     setModeReg
+
+        ldr	r7, =4		/* stBurstDel field value	*/
+	ldr     r8, =0x62       /* Assuming MAX CL = 2.5        */
+        cmp     r6, #3          /* If CL = 2.5 break            */
+        beq     setModeReg
+
+        ldr	r7, =4		/* stBurstDel field value	*/
+	ldr     r8, =0x32       /* Assuming MAX CL = 3          */
+        cmp     r6, #4          /* If CL = 3 break              */
+        beq     setModeReg
+
+        ldr	r7, =5		/* stBurstDel field value	*/
+	ldr     r8, =0x42       /* Assuming MAX CL = 4          */
+        cmp     r6, #6          /* If CL = 4 break              */
+        b       setModeReg
+
+        b       exit_ddrAutoConfig      /* This is an error !!  */
+
+casDdr2:
+        ldr	r7, =4		/* stBurstDel field value	*/
+	ldr     r8, =0x32      /* Assuming MAX CL = 3           */
+        cmp     r6, #3          /* If CL = 3 break              */
+        beq     casDdr2Cont
+
+        ldr	r7, =5		/* stBurstDel field value	*/
+	ldr     r8, =0x42      /* Assuming MAX CL = 4           */
+        cmp     r6, #4          /* If CL = 4 break              */
+        beq     casDdr2Cont
+
+        /* CL 5 currently unsupported. We use CL 4 instead      */
+        ldr	r7, =5		/* stBurstDel field value	*/
+	ldr     r8, =0x42      /* Assuming MAX CL = 5           */
+        cmp     r6, #5          /* If CL = 5 break              */
+        beq     casDdr2Cont
+
+        b       exit_ddrAutoConfig      /* This is an error !!  */
+casDdr2Cont:
+        /* Write recovery for auto-precharge relevant only in DDR2 */
+        orr     r8, r8, #0x400   /* Default value */
+
+setModeReg:
+        /* The CPU must not attempt to change the SDRAM Mode register setting */
+        /* prior to DRAM controller completion of the DRAM initialization     */
+        /* sequence. To guarantee this restriction, it is recommended that    */
+        /* the CPU sets the SDRAM Operation register to NOP command, performs */
+        /* read polling until the register is back in Normal operation value, */
+        /* and then sets SDRAM Mode register to it's new value.               */
+
+	/* write 'nop' to SDRAM operation */
+        mov     r6, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation       */
+_sdramOpPoll1:
+        ldr     r6, [r5]
+        cmp     r6, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll1
+
+        /* Now its safe to write new value to SDRAM Mode register             */
+        MV_REG_WRITE_ASM (r8, r5, SDRAM_MODE_REG)
+
+        /* Make the Dunit write the DRAM its new mode                         */
+        mov     r6, #0x3                 /* Mode Register Set command  */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation       */
+_sdramOpPoll2:
+        ldr     r6, [r5]
+        cmp     r6, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll2
+
+	/* Set Dunit control register according to max CL detected	      */
+	/* If we use registered DIMM, add 1 to stBurstDel		      */
+        MV_REG_READ_ASM (r6, r5, SDRAM_CONFIG_REG)
+	tst	r6, #SDRAM_REGISTERED
+	beq	setDunitReg
+	add	r7, r7, #1
+
+setDunitReg:
+        ldr     r6, =SDRAM_DUNIT_CTRL_LOW_DV
+        orr	r6, r6, r7, LSL #SDRAM_ST_BURST_DEL_OFFS
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+
+
+        /* DIMM density configuration*/
+        /* Density = (1 << (rowNum + colNum)) * dramWidth * dramBankNum       */
+Density:
+	bl 	_getDensity
+	mov 	r8, r7
+        mov     r8, r8, LSR #20 /* Move density 20 bits to the right  */
+                                /* For example 0x10000000 --> 0x1000 */
+
+        mov     r6, #0x00
+        cmp     r8, #DRAM_DEV_DENSITY_128M
+        beq     densCont
+
+        mov     r6, #0x10
+        cmp     r8, #DRAM_DEV_DENSITY_256M
+        beq     densCont
+
+        mov     r6, #0x20
+        cmp     r8, #DRAM_DEV_DENSITY_512M
+        beq     densCont
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+densCont:
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_REG)
+
+        /* Config DDR2 registers (Extended mode, ODTs and pad calibration)    */
+        MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+        tst     r8, #SDRAM_DTYPE_DDR2
+        beq     _extModeODTEnd
+
+
+	/* Set DDR Extended Mode register for working with CS[0]	      */
+        /* write 'nop' to SDRAM operation */
+        mov     r6, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation       */
+_sdramOpPoll3:
+        ldr     r6, [r5]
+        cmp     r6, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll3
+
+        /* Now its safe to write new value to SDRAM Extended Mode register    */
+        ldr	r6, =DDR_SDRAM_EXT_MODE_CS0_DV
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_EXTENDED_MODE_REG)
+
+        /* Make the Dunit write the DRAM its new extended mode                */
+        mov     r6, #0x4                /* Extended Mode Register Set command */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation       */
+_sdramOpPoll4:
+        ldr     r6, [r5]
+        cmp     r6, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll4
+
+	/* ODT configuration is done for single bank CS[0] only		      */
+        /* Config DDR2 On Die Termination (ODT) registers */
+        ldr     r6, =DDR2_ODT_CTRL_LOW_CS0_DV
+        MV_REG_WRITE_ASM (r6, r5, DDR2_SDRAM_ODT_CTRL_LOW_REG)
+
+        ldr     r6, =DDR2_ODT_CTRL_HIGH_CS0_DV
+        MV_REG_WRITE_ASM (r6, r5, DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+
+        ldr     r6, =DDR2_DUNIT_ODT_CTRL_CS0_DV
+        MV_REG_WRITE_ASM (r6, r5, DDR2_DUNIT_ODT_CONTROL_REG)
+
+
+        /* we will check what device we are running and perform
+        Initialization according to device value */
+
+_extModeODTEnd:
+
+        /* Implement Guideline (GL# MEM-2) P_CAL Automatic Calibration  */
+        /* Does Not Work for Address/Control and Data Pads.             */
+        /* Relevant for: 88F5181-A1/B0 and 88F5281-A0                   */
+
+	/* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r6, r5);
+        /* Read device revision */
+	MV_CTRL_REV_GET_ASM(r8, r5);
+
+	/* Continue if OrionN */
+        ldr     r5, =MV_5180_DEV_ID
+        cmp     r6, r5
+        bne     1f
+        b     glMem2End
+1:
+
+	/* Continue if Orion1 and device revision B1 */
+        ldr     r5, =MV_5181_DEV_ID
+        cmp     r6, r5
+        bne     1f
+
+        cmp     r8, #MV_5181_B1_REV
+        bge     glMem2End
+        b       glMem2Start
+1:
+
+        /* Orion NAS */
+        ldr     r5, =MV_5182_DEV_ID
+        cmp     r6, r5
+        beq     glMem2Start
+
+        /* Orion Shark */
+        ldr     r5, =MV_8660_DEV_ID
+        cmp     r6, r5
+        beq     glMem2Start
+
+	b	glMem2End
+
+glMem2Start:
+
+        /* DDR SDRAM Address/Control Pads Calibration                   */
+        MV_REG_READ_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        /* Set Bit [31] to make the register writable                   */
+        orr   r8, r6, #SDRAM_WR_EN
+
+        MV_REG_WRITE_ASM (r8, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        bic   r6, r6, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r6, r6, #SDRAM_TUNE_EN    /* Disable auto calibration     */
+        bic   r6, r6, #SDRAM_DRVN_MASK  /* Clear r5[5:0]<DrvN>          */
+        bic   r6, r6, #SDRAM_DRVP_MASK  /* Clear r5[11:6]<DrvP>         */
+
+        /* Get the final N locked value of driving strength [22:17]     */
+        mov   r5, r6
+        mov   r5, r5, LSL #9
+        mov   r5, r5, LSR #26    /* r5[5:0]<DrvN>  = r6[22:17]<LockN>   */
+        orr   r5, r5, r5, LSL #6 /* r5[11:6]<DrvP> = r5[5:0]<DrvN>      */
+
+        /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6]       */
+        orr   r6, r6, r5
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+        /* DDR SDRAM Data Pads Calibration                              */
+        MV_REG_READ_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+        /* Set Bit [31] to make the register writable                   */
+        orr   r8, r6, #SDRAM_WR_EN
+
+        MV_REG_WRITE_ASM (r8, r5, SDRAM_DATA_PADS_CAL_REG)
+
+        bic   r6, r6, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r6, r6, #SDRAM_TUNE_EN    /* Disable auto calibration     */
+        bic   r6, r6, #SDRAM_DRVN_MASK  /* Clear r5[5:0]<DrvN>          */
+        bic   r6, r6, #SDRAM_DRVP_MASK  /* Clear r5[11:6]<DrvP>         */
+
+        /* Get the final N locked value of driving strength [22:17]     */
+        mov   r5, r6
+        mov   r5, r5, LSL #9
+        mov   r5, r5, LSR #26
+        orr   r5, r5, r5, LSL #6 /* r5[5:0] = r6[22:17]<LockN>  */
+
+        /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6]       */
+        orr   r6, r6, r5
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+glMem2End:
+        /* Implement Guideline (GL# MEM-3) Drive Strength Value         */
+        /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0             */
+
+        /* Get SDRAM Config value */
+        MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+
+        /* Get DIMM type */
+        tst     r8, #SDRAM_DTYPE_DDR2
+        beq     ddr1StrengthVal
+
+ddr2StrengthVal:
+        ldr     r4, =DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+        ldr     r8, =DDR2_DATA_PAD_STRENGTH_TYPICAL_DV
+        b       setDrvStrength
+ddr1StrengthVal:
+        ldr     r4, =DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+        ldr     r8, =DDR1_DATA_PAD_STRENGTH_TYPICAL_DV
+
+setDrvStrength:
+        /* DDR SDRAM Address/Control Pads Calibration                   */
+        MV_REG_READ_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        orr   r6, r6, #SDRAM_WR_EN      /* Make register writeable      */
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+        HTOLL(r6,r5)
+
+        bic   r6, r6, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r6, r6, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+        orr   r6, r4, r6                /* Set default value for DDR    */
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+        /* DDR SDRAM Data Pads Calibration                              */
+        MV_REG_READ_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+        orr   r6, r6, #SDRAM_WR_EN      /* Make register writeable      */
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+        HTOLL(r6,r5)
+
+        bic   r6, r6, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r6, r6, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+        orr   r6, r8, r6                /* Set default value for DDR    */
+
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DATA_PADS_CAL_REG)
+
+
+        /* Implement Guideline (GL# MEM-4) DQS Reference Delay Tuning   */
+        /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0             */
+        /* Get the "sample on reset" register for the DDR frequancy     */
+
+#if defined(MV_RUN_FROM_FLASH)
+	/* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+	ldr   r7, = _cpuARMDDRCLK
+	ldr	r4, =_start
+	ldr	r4, [r4]
+	sub   r7, r7, r4
+         ldr   r4, = Lrom_start_of_data
+         ldr	r4, [r4]
+         add   r7, r4, r7
+#else
+	/* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+	ldr     r7, = _cpuARMDDRCLK
+	ldr	r4, =_start
+	sub     r7, r7, r4
+	add	r7, r7, #CFG_MONITOR_BASE
+#endif
+        /* Get the "sample on reset" register for the DDR frequancy     */
+        MV_REG_READ_ASM (r4, r5, MPP_SAMPLE_AT_RESET)
+        ldr     r5, =MSAR_ARMDDRCLCK_MASK
+        and     r5, r4, r5
+#if 0 /* YOTAM TO BE FIX */
+	mov    r5, r5, LSR #MSAR_ARMDDRCLCK_OFFS
+#endif
+
+	/* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r6, r8);
+
+        /* Continue if TC90 */
+        ldr     r8, =MV_1281_DEV_ID
+        cmp     r6, r6
+        beq     armClkMsb
+
+        /* Continue if Orion2 */
+        ldr     r8, =MV_5281_DEV_ID
+        cmp     r6, r8
+#if 0 /* YOTAM TO BE FIX */
+        bne     1f
+#endif
+
+armClkMsb:
+#if 0 /* YOTAM TO BE FIX */
+        tst    r4, #MSAR_ARMDDRCLCK_H_MASK
+        beq    1f
+        orr    r5, r5, #BIT4
+1:
+	ldr    r4, =MV_CPU_ARM_CLK_ELM_SIZE
+	mul    r5, r4, r5
+	add    r7, r7, r5
+	add    r7, r7, #MV_CPU_ARM_CLK_DDR_OFF
+	ldr    r5, [r7]
+#endif
+
+        /* Get SDRAM Config value */
+        MV_REG_READ_ASM (r8, r4, SDRAM_CONFIG_REG)
+
+        /* Get DIMM type */
+        tst     r8, #SDRAM_DTYPE_DDR2
+        beq     ddr1FtdllVal
+
+ddr2FtdllVal:
+        ldr    r4, =FTDLL_DDR2_250MHZ
+	ldr    r7, =_250MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_200MHZ
+	ldr    r7, =_200MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_166MHZ
+	ldr    r7, =_166MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_133MHZ
+        b       setFtdllReg
+
+ddr1FtdllVal:
+        ldr    r4, =FTDLL_DDR1_200MHZ
+	ldr    r7, =_200MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr    r4, =FTDLL_DDR1_166MHZ
+	ldr    r7, =_166MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr    r4, =FTDLL_DDR1_133MHZ
+	ldr    r7, =_133MHz
+        cmp    r5, r7
+        beq    setFtdllReg
+        ldr    r4, =0
+
+setFtdllReg:
+
+#if !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)
+        MV_REG_READ_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+        orr    r8, r8, r4
+        MV_REG_WRITE_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+        bic   r8, r8, #1
+        MV_REG_WRITE_ASM (r8, r5, SDRAM_FTDLL_CONFIG_REG)
+#endif /* !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)*/
+
+
+setTimingReg:
+        /* Set default Timing parameters */
+        MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+        tst     r8, #SDRAM_DTYPE_DDR2
+        bne     ddr2TimeParam
+
+ddr1TimeParam:
+        ldr     r6, =DDR1_TIMING_LOW_DV
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_LOW_REG)
+        ldr     r6, =DDR1_TIMING_HIGH_DV
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG)
+        b       timeParamDone
+
+ddr2TimeParam:
+        ldr     r6, =DDR2_TIMING_LOW_DV
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_LOW_REG)
+        ldr     r6, =DDR2_TIMING_HIGH_DV
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG)
+
+timeParamDone:
+        /* Open CS[0] window to requested size and enable it. Disable other   */
+	/* windows 							      */
+        ldr	r6, =SCBAR_BASE_MASK
+        sub     r3, r3, #1
+        and	r3, r3, r6
+	orr	r3, r3, #1	/* Enable bank */
+        MV_REG_WRITE_ASM (r3, r5, SDRAM_SIZE_REG(0))
+        ldr	r6, =0
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(1))
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(2))
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(3))
+
+exit_ddrAutoConfig:
+        mov     PC, r11         /* r11 is saved link register */
+
+
+/***************************************************************************************/
+/*       r4 holds I2C EEPROM address
+ *       r7 holds I2C EEPROM offset parameter for i2cRead and its --> returned value
+ *       r8 holds SDRAM various configuration registers value.
+ *	r13 holds Link register
+ */
+/**************************/
+_getDensity:
+	mov     r13, LR                            /* Save link register */
+
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR       /* reading from DIMM0      */
+        mov     r7, #NUM_OF_ROWS_OFFSET            /* offset  3               */
+        bl      _i2cRead
+        mov     r8, r7                             /* r8 save number of rows  */
+
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR       /* reading from DIMM0      */
+        mov     r7, #NUM_OF_COLS_OFFSET            /* offset  4               */
+        bl      _i2cRead
+        add     r8, r8, r7                         /* r8 = number of rows + number of col */
+
+        mov     r7, #0x1
+        mov     r8, r7, LSL r8                     /* r8 = (1 << r8)          */
+
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR       /* reading from DIMM0      */
+        mov     r7, #SDRAM_WIDTH_OFFSET            /* offset 13 */
+        bl      _i2cRead
+        mul     r8, r7, r8
+
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR       /* reading from DIMM0      */
+        mov     r7, #NUM_OF_BANKS_OFFSET           /* offset 17               */
+        bl      _i2cRead
+        mul     r7, r8, r7
+
+	mov     PC, r13
+
+/**************************/
+_get_width:
+	mov     r13, LR                 /* Save link register */
+
+        /* Get SDRAM width (SPD offset 13) */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #SDRAM_WIDTH_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+
+	mov     PC, r13
+
+/**************************/
+_get_CAL:
+	mov     r13, LR                 /* Save link register */
+
+        /* Set maximum CL supported by DIMM */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #SUPPORTED_CL_OFFSET     /* offset  18 */
+        bl      _i2cRead
+
+	mov     PC, r13
+
+/**************************/
+/* R8 - sdram configuration register.
+ * Return value in flag if no-registered then Z-flag is set
+ */
+_is_Registered:
+	mov     r13, LR                 /* Save link register */
+
+        /* Get registered/non registered info from DIMM */
+        tst     r8, #SDRAM_DTYPE_DDR2
+        bne     regDdr2
+
+regDdr1:
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #SDRAM_MODULES_ATTR_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+        tst     r7, #0x2
+	b	exit
+regDdr2:
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #DIMM_TYPE_INFO_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+        tst     r7, #0x11               /* DIMM type = regular RDIMM (0x01)   */
+                                        /* or Mini-RDIMM (0x10)               */
+exit:
+        mov     PC, r13
+
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S
new file mode 100644
index 000000000000..963c6c9148e6
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.S
@@ -0,0 +1,667 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvDramIfBasicAsm.s
+*
+* DESCRIPTION:
+*       Memory full detection and best timing configuration is done in
+*       C code. C runtime environment requires a stack. This module API
+*       initialize DRAM interface chip select 0 for basic functionality for
+*       the use of stack.
+*       The module API assumes DRAM information is stored in I2C EEPROM reside
+*       in a given I2C address MV_BOARD_DIMM0_I2C_ADDR. The I2C EEPROM
+*       internal data structure is assumed to be orgenzied in common DRAM
+*       vendor SPD structure.
+*       NOTE: DFCDL values are assumed to be already initialized prior to
+*       this module API activity.
+*
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/* includes */
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvSysHwConfig.h"
+#include "mvDramIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvCpuIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "pci/mvPciRegs.h"
+#include "mvCtrlEnvSpec.h"
+#include "mvCtrlEnvAsm.h"
+#include "cpu/mvCpuArm.h"
+#include "mvCommon.h"
+
+/* defines  */
+
+/* locals   */
+.data
+.globl _mvDramIfConfig
+
+.text
+
+/*******************************************************************************
+* _mvDramIfConfig - Basic DRAM interface initialization.
+*
+* DESCRIPTION:
+*       The function will initialize the following DRAM parameters using the
+*       values prepared by mvDramIfDetect routine. Values are located
+*       in predefined registers.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+
+_mvDramIfConfig:
+
+        /* Save register on stack */
+	cmp	sp, #0
+	beq	no_stack_s
+save_on_stack:
+        stmdb	sp!, {r1, r2, r3, r4, r7, r11}
+no_stack_s:
+
+	/* 1) Write to SDRAM coniguration register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG1)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_CONFIG_REG)
+        str     r4, [r1]
+
+	/* 2) Write Dunit control low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG3)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_REG)
+        str     r4, [r1]
+
+        /* 3) Write SDRAM address control register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG4)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_ADDR_CTRL_REG)
+        str     r4, [r1]
+
+        /* 4) Write SDRAM bank 0 size register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG0)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_SIZE_REG(0))
+        str     r4, [r1]
+
+        /* 5) Write SDRAM open pages control register */
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_OPEN_PAGE_CTRL_REG)
+        ldr     r4, =SDRAM_OPEN_PAGES_CTRL_REG_DV
+        str     r4, [r1]
+
+        /* 6) Write SDRAM timing Low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG5)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_LOW_REG)
+        str     r4, [r1]
+
+        /* 7) Write SDRAM timing High register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG6)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_HIGH_REG)
+        str     r4, [r1]
+
+        /* 8) Write SDRAM mode register */
+        /* The CPU must not attempt to change the SDRAM Mode register setting */
+        /* prior to DRAM controller completion of the DRAM initialization     */
+        /* sequence. To guarantee this restriction, it is recommended that    */
+        /* the CPU sets the SDRAM Operation register to NOP command, performs */
+        /* read polling until the register is back in Normal operation value, */
+        /* and then sets SDRAM Mode register to it’s new value.               */
+
+	/* 8.1 write 'nop' to SDRAM operation */
+        mov     r4, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM(r4, r1, SDRAM_OPERATION_REG)
+
+        /* 8.2 poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll1:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll1
+
+        /* 8.3 Now its safe to write new value to SDRAM Mode register         */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG2)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_MODE_REG)
+        str     r4, [r1]
+
+        /* 8.4 Make the Dunit write the DRAM its new mode                     */
+        mov     r4, #0x3                 /* Mode Register Set command  */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* 8.5 poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll2:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll2
+
+#ifndef DB_FPGA
+        /* Config DDR2 registers (Extended mode, ODTs and pad calibration)    */
+        MV_REG_READ_ASM (r4, r1, SDRAM_CONFIG_REG)
+        tst     r4, #SDRAM_DTYPE_DDR2
+        beq     _extModeODTEnd
+#endif /* DB_FPGA */
+
+        /* 9) Write SDRAM Extended mode register This operation should be     */
+        /*    done for each memory bank                                       */
+        /* write 'nop' to SDRAM operation */
+        mov     r4, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll3:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll3
+
+        /* Now its safe to write new value to SDRAM Extended Mode register    */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG10)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_EXTENDED_MODE_REG)
+        str     r4, [r1]
+
+        /* Go over each of the Banks */
+        ldr     r3, =0          /* r3 = DRAM bank Num */
+
+extModeLoop:
+        /* Set the SDRAM Operation Control to each of the DRAM banks          */
+        mov     r2, r3   /* Do not swap the bank counter value */
+        MV_REG_WRITE_ASM (r2, r1, SDRAM_OPERATION_CTRL_REG)
+
+        /* Make the Dunit write the DRAM its new mode                     */
+        mov     r4, #0x4        /* Extended Mode Register Set command  */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll4:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll4
+#ifndef DB_FPGA
+        add     r3, r3, #1
+        cmp     r3, #4         /* 4 = Number of banks */
+        bne     extModeLoop
+
+extModeEnd:
+        /* Config DDR2 On Die Termination (ODT) registers */
+        /* Write SDRAM DDR2 ODT control low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG7)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_LOW_REG)
+        str     r4, [r1]
+
+        /* Write SDRAM DDR2 ODT control high register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG8)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+        str     r4, [r1]
+
+        /* Write SDRAM DDR2 Dunit ODT control register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG9)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_DUNIT_ODT_CONTROL_REG)
+        str     r4, [r1]
+
+#endif /* DB_FPGA */
+_extModeODTEnd:
+#ifndef DB_FPGA
+        /* Implement Guideline (GL# MEM-2) P_CAL Automatic Calibration  */
+        /* Does Not Work for Address/Control and Data Pads.             */
+        /* Relevant for: 88F5181-A1/B0 and 88F5281-A0                   */
+
+	/* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r3, r1);
+        /* Read device revision */
+	MV_CTRL_REV_GET_ASM(r2, r1);
+
+	/* Continue if OrionN */
+        ldr     r1, =MV_5180_DEV_ID
+        cmp     r3, r1
+        bne     1f
+        b     glMem2End
+1:
+        /* Continue if Orion1 and device revision B1 */
+        ldr     r1, =MV_5181_DEV_ID
+        cmp     r3, r1
+        bne     1f
+
+        cmp     r2, #MV_5181_B1_REV
+        bge     glMem2End
+        b       glMem2Start
+1:
+
+        /* Orion NAS */
+        ldr     r1, =MV_5182_DEV_ID
+        cmp     r3, r1
+        beq     glMem2Start
+
+        /* Orion NAS */
+        ldr     r1, =MV_5082_DEV_ID
+        cmp     r3, r1
+        beq     glMem2Start
+
+        /* Orion Shark */
+        ldr     r1, =MV_8660_DEV_ID
+        cmp     r3, r1
+        beq     glMem2Start
+
+	b	glMem2End
+
+glMem2Start:
+
+        /* DDR SDRAM Address/Control Pads Calibration                         */
+        MV_REG_READ_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        /* Set Bit [31] to make the register writable                   */
+        orr   r2, r3, #SDRAM_WR_EN
+
+        MV_REG_WRITE_ASM (r2, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        bic   r3, r3, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r3, r3, #SDRAM_TUNE_EN    /* Disable auto calibration     */
+        bic   r3, r3, #SDRAM_DRVN_MASK  /* Clear r1[5:0]<DrvN>          */
+        bic   r3, r3, #SDRAM_DRVP_MASK  /* Clear r1[11:6]<DrvP>         */
+
+        /* Get the final N locked value of driving strength [22:17]     */
+        mov   r1, r3
+        mov   r1, r1, LSL #9
+        mov   r1, r1, LSR #26    /* r1[5:0]<DrvN>  = r3[22:17]<LockN>   */
+        orr   r1, r1, r1, LSL #6 /* r1[11:6]<DrvP> = r1[5:0]<DrvN>      */
+
+        /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6]       */
+        orr   r3, r3, r1
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+        /* DDR SDRAM Data Pads Calibration                         	*/
+        MV_REG_READ_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+        /* Set Bit [31] to make the register writable                   */
+        orr   r2, r3, #SDRAM_WR_EN
+
+        MV_REG_WRITE_ASM (r2, r1, SDRAM_DATA_PADS_CAL_REG)
+
+        bic   r3, r3, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r3, r3, #SDRAM_TUNE_EN    /* Disable auto calibration     */
+        bic   r3, r3, #SDRAM_DRVN_MASK  /* Clear r1[5:0]<DrvN>          */
+        bic   r3, r3, #SDRAM_DRVP_MASK  /* Clear r1[11:6]<DrvP>         */
+
+        /* Get the final N locked value of driving strength [22:17]     */
+        mov   r1, r3
+        mov   r1, r1, LSL #9
+        mov   r1, r1, LSR #26
+        orr   r1, r1, r1, LSL #6 /* r1[5:0] = r3[22:17]<LockN>  */
+
+        /* Write to both <DrvN> bits [5:0] and <DrvP> bits [11:6]       */
+        orr   r3, r3, r1
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+glMem2End:
+
+
+        /* Implement Guideline (GL# MEM-3) Drive Strength Value         */
+        /* Relevant for: 88F5181-A1/B0/B1, 88F5281-A0/B0/C/D, 88F5182,  */
+	/* 88F5082, 88F5181L, 88F6082/L, 88F6183, 88F6183L */
+
+        /* Get SDRAM Config value */
+        MV_REG_READ_ASM (r2, r1, SDRAM_CONFIG_REG)
+
+        /* Get DIMM type */
+        tst     r2, #SDRAM_DTYPE_DDR2
+        beq     ddr1StrengthVal
+
+ddr2StrengthVal:
+        ldr     r4, =DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+        ldr     r2, =DDR2_DATA_PAD_STRENGTH_TYPICAL_DV
+        b       setDrvStrength
+ddr1StrengthVal:
+        ldr     r4, =DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV
+        ldr     r2, =DDR1_DATA_PAD_STRENGTH_TYPICAL_DV
+
+setDrvStrength:
+        /* DDR SDRAM Address/Control Pads Calibration                   */
+        MV_REG_READ_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+        orr   r3, r3, #SDRAM_WR_EN      /* Make register writeable      */
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+        HTOLL(r3,r1)
+
+        bic   r3, r3, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r3, r3, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+        orr   r3, r4, r3                /* Set default value for DDR    */
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_ADDR_CTRL_PADS_CAL_REG)
+
+
+        /* DDR SDRAM Data Pads Calibration                         	      */
+        MV_REG_READ_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+        orr   r3, r3, #SDRAM_WR_EN      /* Make register writeable      */
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+        HTOLL(r3,r1)
+
+        bic   r3, r3, #SDRAM_WR_EN      /* Make register read-only      */
+        bic   r3, r3, #SDRAM_PRE_DRIVER_STRENGTH_MASK
+        orr   r3, r2, r3                /* Set default value for DDR    */
+
+        MV_REG_WRITE_ASM (r3, r1, SDRAM_DATA_PADS_CAL_REG)
+
+#if !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L)
+        /* Implement Guideline (GL# MEM-4) DQS Reference Delay Tuning   */
+        /* Relevant for: 88F5181-A1/B0/B1 and 88F5281-A0/B0/C/D, 88F5182 */
+	/* 88F5082, 88F5181L, 88F6082/L */
+
+	/* Calc the absolute address of the _cpuARMDDRCLK[] in the boot flash */
+	ldr     r7, = _cpuARMDDRCLK
+	ldr	r4, =_start
+	sub     r7, r7, r4
+	add	r7, r7, #CFG_MONITOR_BASE
+
+        /* Get the "sample on reset" register for the DDR frequancy     */
+        MV_REG_READ_ASM (r4, r1, MPP_SAMPLE_AT_RESET)
+        ldr     r1, =MSAR_ARMDDRCLCK_MASK
+        and     r1, r4, r1
+#if 0 /* YOTAM TO BE FIX */
+	mov    r1, r1, LSR #MSAR_ARMDDRCLCK_OFFS
+#endif
+
+	/* Read device ID  */
+	MV_CTRL_MODEL_GET_ASM(r3, r2);
+
+        /* Continue if TC90 */
+        ldr     r2, =MV_1281_DEV_ID
+        cmp     r3, r2
+        beq     armClkMsb
+
+        /* Continue if Orion2 */
+        ldr     r2, =MV_5281_DEV_ID
+        cmp     r3, r2
+#if 0 /* YOTAM TO BE FIX */
+        bne     1f
+#endif
+
+armClkMsb:
+#if 0 /* YOTAM TO BE FIX */
+	tst    r4, #MSAR_ARMDDRCLCK_H_MASK
+        beq    1f
+        orr    r1, r1, #BIT4
+1:
+	ldr    r4, =MV_CPU_ARM_CLK_ELM_SIZE
+	mul    r1, r4, r1
+	add    r7, r7, r1
+	add    r7, r7, #MV_CPU_ARM_CLK_DDR_OFF
+	ldr    r1, [r7]
+#endif
+
+        /* Get SDRAM Config value */
+        MV_REG_READ_ASM (r2, r4, SDRAM_CONFIG_REG)
+
+        /* Get DIMM type */
+        tst     r2, #SDRAM_DTYPE_DDR2
+        beq     ddr1FtdllVal
+
+ddr2FtdllVal:
+        ldr     r2, =MV_5281_DEV_ID
+        cmp     r3, r2
+	bne	2f
+	MV_CTRL_REV_GET_ASM(r3, r2)
+        cmp     r3, #MV_5281_D0_REV
+        beq     orin2_d0_ddr2_ftdll_val
+        cmp     r3, #MV_5281_D1_REV
+        beq     orin2_d1_ddr2_ftdll_val
+        cmp     r3, #MV_5281_D2_REV
+        beq     orin2_d1_ddr2_ftdll_val
+	b	ddr2_default_val
+
+/* Set Orion 2 D1 ftdll values for DDR2 */
+orin2_d1_ddr2_ftdll_val:
+	ldr    r4, =FTDLL_DDR2_250MHZ_5281_D1
+	ldr    r7, =_250MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_200MHZ_5281_D1
+	ldr    r7, =_200MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_166MHZ_5281_D0
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+	b	ddr2_default_val
+
+/* Set Orion 2 D0 ftdll values for DDR2 */
+orin2_d0_ddr2_ftdll_val:
+	ldr    r4, =FTDLL_DDR2_250MHZ_5281_D0
+	ldr    r7, =_250MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_200MHZ_5281_D0
+	ldr    r7, =_200MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_166MHZ_5281_D0
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+	b       ddr2_default_val
+2:
+        ldr     r2, =MV_5182_DEV_ID
+        cmp     r3, r2
+	bne	3f
+
+/* Set Orion nas ftdll values for DDR2 */
+orin_nas_ftdll_val:
+        ldr     r4, =FTDLL_DDR2_166MHZ_5182
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+
+/* default values for all other devices */
+3:
+ddr2_default_val:
+        ldr    r4, =FTDLL_DDR2_250MHZ
+	ldr    r7, =_250MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_200MHZ
+	ldr    r7, =_200MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_166MHZ
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR2_133MHZ
+	ldr    r7, =_133MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr    r4, =0
+        b       setFtdllReg
+
+ddr1FtdllVal:
+        ldr     r2, =MV_5281_DEV_ID
+        cmp     r3, r2
+	bne	2f
+	MV_CTRL_REV_GET_ASM(r3, r2)
+        cmp     r3, #MV_5281_D0_REV
+        bge     orin2_ddr1_ftdll_val
+	b	ddr1_default_val
+
+/* Set Orion 2 D0 and above ftdll values for DDR1 */
+orin2_ddr1_ftdll_val:
+        ldr     r4, =FTDLL_DDR1_200MHZ_5281_D0
+	ldr    r7, =_200MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+        ldr     r4, =FTDLL_DDR1_166MHZ_5281_D0
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+	b       ddr1_default_val
+2:
+        ldr     r2, =MV_5181_DEV_ID
+        cmp     r3, r2
+	bne	3f
+	MV_CTRL_REV_GET_ASM(r3, r2)
+        cmp     r3, #MV_5181_B1_REV
+        bge     orin1_ddr1_ftdll_val
+	b	ddr1_default_val
+
+/* Set Orion 1 ftdll values for DDR1 */
+orin1_ddr1_ftdll_val:
+        ldr     r4, =FTDLL_DDR1_166MHZ_5181_B1
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+3:
+ddr1_default_val:
+        ldr    r4, =FTDLL_DDR1_133MHZ
+	ldr    r7, =_133MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+
+        ldr    r4, =FTDLL_DDR1_166MHZ
+	ldr    r7, =_166MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+
+        ldr    r4, =FTDLL_DDR1_200MHZ
+	ldr    r7, =_200MHz
+        cmp    r1, r7
+        beq    setFtdllReg
+
+        ldr    r4, =0
+
+setFtdllReg:
+
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_FTDLL_CONFIG_REG)
+        HTOLL(r4,r1)
+        bic   r4, r4, #1
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_FTDLL_CONFIG_REG)
+
+#endif /* !defined(MV_88W8660) && !defined(MV_88F6183) && !defined(MV_88F6183L) */
+#endif /* DB_FPGA */
+
+restoreTmpRegs:
+        /* Restore the registers we used to save the DDR detect values */
+
+        ldr     r4, =DRAM_BUF_REG0_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG0)
+
+        ldr     r4, =DRAM_BUF_REG1_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG1)
+
+        ldr     r4, =DRAM_BUF_REG2_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG2)
+
+        ldr     r4, =DRAM_BUF_REG3_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG3)
+
+        ldr     r4, =DRAM_BUF_REG4_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG4)
+
+        ldr     r4, =DRAM_BUF_REG5_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG5)
+
+        ldr     r4, =DRAM_BUF_REG6_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG6)
+
+        ldr     r4, =DRAM_BUF_REG7_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG7)
+
+        ldr     r4, =DRAM_BUF_REG8_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG8)
+
+        ldr     r4, =DRAM_BUF_REG9_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG9)
+
+        ldr     r4, =DRAM_BUF_REG10_DV
+        MV_REG_WRITE_ASM (r4, r1, DRAM_BUF_REG10)
+
+
+        /* Restore registers */
+        /* Save register on stack */
+	cmp	sp, #0
+	beq	no_stack_l
+load_from_stack:
+        ldmia	sp!, {r1, r2, r3, r4, r7, r11}
+no_stack_l:
+
+        mov     pc, lr
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h
new file mode 100644
index 000000000000..8ad869e440cb
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfConfig.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfConfigh
+#define __INCmvDramIfConfigh
+
+/* includes */
+
+/* defines  */
+
+/* registers defaults values */
+
+#define SDRAM_CONFIG_DV 				\
+		(SDRAM_PERR_WRITE			|	\
+		 SDRAM_SRMODE				|	\
+		 SDRAM_SRCLK_GATED)
+
+#define SDRAM_DUNIT_CTRL_LOW_DV			\
+		(SDRAM_CTRL_POS_RISE		|	\
+		 SDRAM_CLK1DRV_NORMAL		|	\
+		 SDRAM_LOCKEN_ENABLE)
+
+#define SDRAM_ADDR_CTRL_DV	    0
+
+#define SDRAM_TIMING_CTRL_LOW_REG_DV 	\
+		((0x2 << SDRAM_TRCD_OFFS)	|	\
+		 (0x2 << SDRAM_TRP_OFFS)	|	\
+		 (0x1 << SDRAM_TWR_OFFS)	|	\
+		 (0x0 << SDRAM_TWTR_OFFS)	|	\
+		 (0x5 << SDRAM_TRAS_OFFS)	|	\
+		 (0x1 << SDRAM_TRRD_OFFS))
+/* TRFC 0x27, TW2W 0x1 */
+#define SDRAM_TIMING_CTRL_HIGH_REG_DV	(( 0x7 << SDRAM_TRFC_OFFS )	|\
+					( 0x2 << SDRAM_TRFC_EXT_OFFS)	|\
+					( 0x1 << SDRAM_TW2W_OFFS))
+
+#define SDRAM_OPEN_PAGES_CTRL_REG_DV	SDRAM_OPEN_PAGE_EN
+
+/* DDR2 ODT default register values */
+
+/* Presence	     Ctrl Low    Ctrl High  Dunit Ctrl   Ext Mode     */
+/*	CS0	         0x84210000  0x00000000  0x0000780F  0x00000440 */
+/*	CS0+CS1          0x84210000  0x00000000  0x0000780F  0x00000440 */
+/*	CS0+CS2	    	 0x030C030C  0x00000000  0x0000740F  0x00000404 */
+/*	CS0+CS1+CS2	 0x030C030C  0x00000000  0x0000740F  0x00000404 */
+/*	CS0+CS2+CS3	 0x030C030C  0x00000000  0x0000740F  0x00000404 */
+/*	CS0+CS1+CS2+CS3  0x030C030C  0x00000000  0x0000740F  0x00000404 */
+
+#define DDR2_ODT_CTRL_LOW_CS0_DV	0x84210000
+#define DDR2_ODT_CTRL_HIGH_CS0_DV	0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_DV	0x0000780F
+#define DDR_SDRAM_EXT_MODE_CS0_DV	0x00000440
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS2_DV	0x030C030C
+#define DDR2_ODT_CTRL_HIGH_CS0_CS2_DV	0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS2_DV	0x0000740F
+#define DDR_SDRAM_EXT_MODE_CS0_CS2_DV	0x00000404
+
+
+/* DDR SDRAM Adderss/Control and Data Pads Calibration default values */
+#define DDR1_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV	\
+		(1 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV	\
+		(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+
+#define DDR1_DATA_PAD_STRENGTH_TYPICAL_DV		\
+		(1 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define DDR2_DATA_PAD_STRENGTH_TYPICAL_DV		\
+		(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+/* DDR SDRAM Mode Register default value */
+#define DDR1_MODE_REG_DV			0x00000000
+#define DDR2_MODE_REG_DV			0x00000400
+
+/* DDR SDRAM Timing parameter default values */
+#define DDR1_TIMING_LOW_DV           0x11602220
+#define DDR1_TIMING_HIGH_DV          0x0000000d
+
+#define DDR2_TIMING_LOW_DV           0x11812220
+#define DDR2_TIMING_HIGH_DV          0x0000030f
+
+/* For Guideline (GL# MEM-4) DQS Reference Delay Tuning */
+#define FTDLL_DDR1_166MHZ           ((0x1 << 0)    | \
+                                     (0x7F<< 12)   | \
+                                     (0x1 << 22))
+
+#define FTDLL_DDR1_133MHZ           FTDLL_DDR1_166MHZ
+
+#define FTDLL_DDR1_200MHZ           ((0x1 << 0)    | \
+                                     (0x1 << 12)   | \
+                                     (0x3 << 14)   | \
+                                     (0x1 << 18)   | \
+                                     (0x1 << 22))
+
+
+#define FTDLL_DDR2_166MHZ           ((0x1 << 0)    | \
+                                     (0x1 << 12)   | \
+                                     (0x1 << 14)   | \
+                                     (0x1 << 16)   | \
+                                     (0x1 << 19)   | \
+                                     (0xF << 20))
+
+#define FTDLL_DDR2_133MHZ           FTDLL_DDR2_166MHZ
+
+#define FTDLL_DDR2_200MHZ           ((0x1 << 0)    | \
+                                     (0x1 << 12)   | \
+                                     (0x1 << 14)   | \
+                                     (0x1 << 16)   | \
+                                     (0x1 << 19)   | \
+                                     (0xF << 20))
+
+#define FTDLL_DDR2_250MHZ            0x445001
+
+/* Orion 1 B1 and above */
+#define FTDLL_DDR1_166MHZ_5181_B1    0x45D001
+
+/* Orion nas */
+#define FTDLL_DDR2_166MHZ_5182       0x597001
+
+/* Orion 2 D0 and above */
+#define FTDLL_DDR1_166MHZ_5281_D0    0x8D0001
+#define FTDLL_DDR1_200MHZ_5281_D0    0x8D0001
+#define FTDLL_DDR2_166MHZ_5281_D0    0x485001
+#define FTDLL_DDR2_200MHZ_5281_D0    0x485001
+#define FTDLL_DDR2_250MHZ_5281_D0    0x445001
+#define FTDLL_DDR2_200MHZ_5281_D1    0x995001
+#define FTDLL_DDR2_250MHZ_5281_D1    0x984801
+
+#endif /* __INCmvDramIfh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h
new file mode 100644
index 000000000000..2ff0db4dda8d
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr1_2/mvDramIfRegs.h
@@ -0,0 +1,306 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDramIfRegsh
+#define __INCmvDramIfRegsh
+
+
+/* DDR SDRAM Controller Address Decode Registers */
+/* SDRAM CSn Base Address Register (SCBAR) */
+#define SDRAM_BASE_ADDR_REG(csNum)	(0x1500 + (csNum * 8))
+#define SCBAR_BASE_OFFS				16
+#define SCBAR_BASE_MASK				(0xffff << SCBAR_BASE_OFFS)
+#define SCBAR_BASE_ALIGNMENT		0x10000
+
+/* SDRAM CSn Size Register (SCSR) */
+#define SDRAM_SIZE_REG(csNum)	(0x1504 + (csNum * 8))
+#define SCSR_WIN_EN					BIT0
+#define SCSR_SIZE_OFFS				16
+#define SCSR_SIZE_MASK				(0xffff << SCSR_SIZE_OFFS)
+#define SCSR_SIZE_ALIGNMENT			0x10000
+
+/* configuration register */
+#define SDRAM_CONFIG_REG   		0x1400
+#define SDRAM_REFRESH_OFFS 		0
+#define SDRAM_REFRESH_MAX  		0x3000
+#define SDRAM_REFRESH_MASK 		(SDRAM_REFRESH_MAX << SDRAM_REFRESH_OFFS)
+#define SDRAM_DWIDTH_OFFS       14
+#define SDRAM_DWIDTH_MASK       (3 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_16BIT      (1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_32BIT      (2 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DTYPE_OFFS        16
+#define SDRAM_DTYPE_MASK        (1 << SDRAM_DTYPE_OFFS)
+#define SDRAM_DTYPE_DDR1        (0 << SDRAM_DTYPE_OFFS)
+#define SDRAM_DTYPE_DDR2        (1 << SDRAM_DTYPE_OFFS)
+#define SDRAM_REGISTERED   		(1 << 17)
+#define SDRAM_PERR_OFFS    		18
+#define SDRAM_PERR_MASK    		(1 << SDRAM_PERR_OFFS)
+#define SDRAM_PERR_NO_WRITE     (0 << SDRAM_PERR_OFFS)
+#define SDRAM_PERR_WRITE        (1 << SDRAM_PERR_OFFS)
+#define SDRAM_DCFG_OFFS     	20
+#define SDRAM_DCFG_MASK     	(0x3 << SDRAM_DCFG_OFFS)
+#define SDRAM_DCFG_X16_DEV   	(1 << SDRAM_DCFG_OFFS)
+#define SDRAM_DCFG_X8_DEV   	(2 << SDRAM_DCFG_OFFS)
+#define SDRAM_SRMODE			(1 << 24)
+#define SDRAM_SRCLK_OFFS		25
+#define SDRAM_SRCLK_MASK		(1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_KEPT		(0 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_GATED		(1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_CATTH_OFFS		26
+#define SDRAM_CATTHR_EN			(1 << SDRAM_CATTH_OFFS)
+
+
+/* dunit control register */
+#define SDRAM_DUNIT_CTRL_REG  	0x1404
+#define SDRAM_CTRL_POS_OFFS	   	6
+#define SDRAM_CTRL_POS_FALL	   	(0 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_RISE	   	(1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CLK1DRV_OFFS      12
+#define SDRAM_CLK1DRV_MASK      (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_HIGH_Z    (0 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_NORMAL    (1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_LOCKEN_OFFS       18
+#define SDRAM_LOCKEN_MASK       (1 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_LOCKEN_DISABLE    (0 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_LOCKEN_ENABLE     (1 << SDRAM_LOCKEN_OFFS)
+#define SDRAM_ST_BURST_DEL_OFFS 	24
+#define SDRAM_ST_BURST_DEL_MAX 	0xf
+#define SDRAM_ST_BURST_DEL_MASK (SDRAM_ST_BURST_DEL_MAX<<SDRAM_ST_BURST_DEL_OFFS)
+
+/* sdram timing control low register */
+#define SDRAM_TIMING_CTRL_LOW_REG	0x1408
+#define SDRAM_TRCD_OFFS 		4
+#define SDRAM_TRCD_MASK 		(0xF << SDRAM_TRCD_OFFS)
+#define SDRAM_TRP_OFFS 			8
+#define SDRAM_TRP_MASK 			(0xF << SDRAM_TRP_OFFS)
+#define SDRAM_TWR_OFFS 			12
+#define SDRAM_TWR_MASK 			(0xF << SDRAM_TWR_OFFS)
+#define SDRAM_TWTR_OFFS 		16
+#define SDRAM_TWTR_MASK 		(0xF << SDRAM_TWTR_OFFS)
+#define SDRAM_TRAS_OFFS 		20
+#define SDRAM_TRAS_MASK 		(0xF << SDRAM_TRAS_OFFS)
+#define SDRAM_TRRD_OFFS 		24
+#define SDRAM_TRRD_MASK 		(0xF << SDRAM_TRRD_OFFS)
+#define SDRAM_TRTP_OFFS 		28
+#define SDRAM_TRTP_MASK 		(0xF << SDRAM_TRTP_OFFS)
+
+/* sdram timing control high register */
+#define SDRAM_TIMING_CTRL_HIGH_REG	0x140c
+#define SDRAM_TRFC_OFFS 		0
+#define SDRAM_TRFC_MASK 		(0xF << SDRAM_TRFC_OFFS)
+#define SDRAM_TR2R_OFFS 		4
+#define SDRAM_TR2R_MASK 		(0x3 << SDRAM_TR2R_OFFS)
+#define SDRAM_TR2W_W2R_OFFS 		6
+#define SDRAM_TR2W_W2R_MASK 		(0x3 << SDRAM_TR2W_W2R_OFFS)
+#define SDRAM_TRFC_EXT_OFFS		8
+#define SDRAM_TRFC_EXT_MASK		(0x1 << SDRAM_TRFC_EXT_OFFS)
+#define SDRAM_TW2W_OFFS		    10
+#define SDRAM_TW2W_MASK		    (0x1 << SDRAM_TW2W_OFFS)
+
+/* address control register */
+#define SDRAM_ADDR_CTRL_REG		0x1410
+#define SDRAM_DSIZE_OFFS   	    4
+#define SDRAM_DSIZE_MASK   	    (0x3 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_128Mb 	    (0x0 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_256Mb 	    (0x1 << SDRAM_DSIZE_OFFS)
+#define SDRAM_DSIZE_512Mb  	    (0x2 << SDRAM_DSIZE_OFFS)
+
+/* SDRAM Open Pages Control registers */
+#define SDRAM_OPEN_PAGE_CTRL_REG	0x1414
+#define SDRAM_OPEN_PAGE_EN		    (0 << 0)
+#define SDRAM_OPEN_PAGE_DIS		    (1 << 0)
+
+/* sdram opertion register */
+#define SDRAM_OPERATION_REG 	0x1418
+#define SDRAM_CMD_OFFS  		0
+#define SDRAM_CMD_MASK   		(0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NORMAL 		(0x0 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_PRECHARGE_ALL (0x1 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REFRESH_ALL 	(0x2 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REG_SET_CMD 	(0x3 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EXT_MODE_SET 	(0x4 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NOP 			(0x5 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_SLF_RFRSH 	(0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS2_CMD  	(0x8 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS3_CMD  	(0x9 << SDRAM_CMD_OFFS)
+
+/* sdram mode register */
+#define SDRAM_MODE_REG 			0x141c
+#define SDRAM_BURST_LEN_OFFS 	0
+#define SDRAM_BURST_LEN_MASK 	(0x7 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_BURST_LEN_4    	(0x2 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_CL_OFFS   		4
+#define SDRAM_CL_MASK   		(0x7 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_2	     	(0x2 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_3      	(0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_4      	(0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_1_5    	(0x5 << SDRAM_CL_OFFS)
+#define SDRAM_DDR1_CL_2_5    	(0x6 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_3      	(0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_4      	(0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_5    		(0x5 << SDRAM_CL_OFFS)
+#define SDRAM_TM_OFFS           7
+#define SDRAM_TM_MASK           (1 << SDRAM_TM_OFFS)
+#define SDRAM_TM_NORMAL         (0 << SDRAM_TM_OFFS)
+#define SDRAM_TM_TEST_MODE      (1 << SDRAM_TM_OFFS)
+#define SDRAM_DLL_OFFS          8
+#define SDRAM_DLL_MASK          (1 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_NORMAL        (0 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_RESET 	(1 << SDRAM_DLL_OFFS)
+#define SDRAM_WR_OFFS		11
+#define SDRAM_WR_MAX		7
+#define SDRAM_WR_MASK		(SDRAM_WR_MAX << SDRAM_WR_OFFS)
+#define SDRAM_PD_OFFS		12
+#define SDRAM_PD_MASK		(1 << SDRAM_PD_OFFS)
+#define SDRAM_PD_FAST_EXIT	(0 << SDRAM_PD_OFFS)
+#define SDRAM_PD_SLOW_EXIT	(1 << SDRAM_PD_OFFS)
+
+/* DDR SDRAM Extended Mode register (DSEMR) */
+#define SDRAM_EXTENDED_MODE_REG	0x1420
+#define DSEMR_DLL_ENABLE		(1 << 0)
+#define DSEMR_DS_OFFS			1
+#define DSEMR_DS_MASK			(1 << DSEMR_DS_OFFS)
+#define DSEMR_DS_NORMAL			(0 << DSEMR_DS_OFFS)
+#define DSEMR_DS_REDUCED		(1 << DSEMR_DS_OFFS)
+#define DSEMR_RTT0_OFFS			2
+#define DSEMR_RTT1_OFFS			6
+#define DSEMR_RTT_ODT_DISABLE	((0 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_75_OHM	((1 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_150_OHM	((0 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_OCD_OFFS			7
+#define DSEMR_OCD_MASK			(0x7 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_EXIT_CALIB	(0 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_DRIVE1		(1 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_DRIVE0		(2 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_ADJUST_MODE	(4 << DSEMR_OCD_OFFS)
+#define DSEMR_OCD_CALIB_DEFAULT	(7 << DSEMR_OCD_OFFS)
+#define DSEMR_DQS_OFFS			10
+#define DSEMR_DQS_MASK			(1 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_DIFFERENTIAL	(0 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_SINGLE_ENDED	(0 << DSEMR_DQS_OFFS)
+#define DSEMR_RDQS_ENABLE		(1 << 11)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN	(1 << 12)
+
+/* DDR SDRAM Operation Control Register */
+#define SDRAM_OPERATION_CTRL_REG	0x142c
+
+/* Dunit FTDLL Configuration Register */
+#define SDRAM_FTDLL_CONFIG_REG			0x1484
+
+/* Pads Calibration register */
+#define SDRAM_ADDR_CTRL_PADS_CAL_REG	0x14c0
+#define SDRAM_DATA_PADS_CAL_REG		0x14c4
+#define SDRAM_DRVN_OFFS 				0
+#define SDRAM_DRVN_MASK 				(0x3F << SDRAM_DRVN_OFFS)
+#define SDRAM_DRVP_OFFS 				6
+#define SDRAM_DRVP_MASK 				(0x3F << SDRAM_DRVP_OFFS)
+#define SDRAM_PRE_DRIVER_STRENGTH_OFFS	12
+#define SDRAM_PRE_DRIVER_STRENGTH_MASK	(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define SDRAM_TUNE_EN   		BIT16
+#define SDRAM_LOCK_OFFS 		17
+#define SDRAM_LOCK_MAKS 		(0x1F << SDRAM_LOCK_OFFS)
+#define SDRAM_LOCKN_OFFS 		17
+#define SDRAM_LOCKN_MAKS 		(0x3F << SDRAM_LOCKN_OFFS)
+#define SDRAM_LOCKP_OFFS 		23
+#define SDRAM_LOCKP_MAKS 		(0x3F << SDRAM_LOCKP_OFFS)
+#define SDRAM_WR_EN     		(1 << 31)
+
+/* DDR2 SDRAM ODT Control (Low) Register (DSOCLR) */
+#define DDR2_SDRAM_ODT_CTRL_LOW_REG  0x1494
+#define DSOCLR_ODT_RD_OFFS(odtNum)   (odtNum * 4)
+#define DSOCLR_ODT_RD_MASK(odtNum)   (0xf << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_RD(odtNum, bank)  ((1 << bank) << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_WR_OFFS(odtNum)   (16 + (odtNum * 4))
+#define DSOCLR_ODT_WR_MASK(odtNum)   (0xf << DSOCLR_ODT_WR_OFFS(odtNum))
+#define DSOCLR_ODT_WD(odtNum, bank)  ((1 << bank) << DSOCLR_ODT_WR_OFFS(odtNum))
+
+/* DDR2 SDRAM ODT Control (High) Register (DSOCHR) */
+#define DDR2_SDRAM_ODT_CTRL_HIGH_REG    0x1498
+/* Optional control values to DSOCHR_ODT_EN macro */
+#define DDR2_ODT_CTRL_DUNIT         0
+#define DDR2_ODT_CTRL_NEVER         1
+#define DDR2_ODT_CTRL_ALWAYS        3
+#define DSOCHR_ODT_EN_OFFS(odtNum)  (odtNum * 2)
+#define DSOCHR_ODT_EN_MASK(odtNum)  (0x3 << DSOCHR_ODT_EN_OFFS(odtNum))
+#define DSOCHR_ODT_EN(odtNum, ctrl) ((1 << ctrl) << DSOCHR_ODT_RD_OFFS(odtNum))
+
+/* DDR2 Dunit ODT Control Register (DDOCR)*/
+#define DDR2_DUNIT_ODT_CONTROL_REG  0x149c
+#define DDOCR_ODT_RD_OFFS           0
+#define DDOCR_ODT_RD_MASK           (0xf << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_RD(bank)          ((1 << bank) << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_WR_OFFS           4
+#define DDOCR_ODT_WR_MASK           (0xf << DDOCR_ODT_WR_OFFS)
+#define DDOCR_ODT_WR(bank)          ((1 << bank) << DDOCR_ODT_WR_OFFS)
+#define DSOCR_ODT_EN_OFFS           8
+#define DSOCR_ODT_EN_MASK           (0x3 << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_EN(ctrl)          ((1 << ctrl) << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_SEL_OFFS          10
+#define DSOCR_ODT_SEL_MASK          (0x3 << DSOCR_ODT_SEL_OFFS)
+
+/* DDR SDRAM Initialization Control Register (DSICR) */
+#define DDR_SDRAM_INIT_CTRL_REG		0x1480
+#define DSICR_INIT_EN				(1 << 0)
+
+#endif /* __INCmvDramIfRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c
new file mode 100644
index 000000000000..dc10ce1962b2
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.c
@@ -0,0 +1,1854 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#include "ddr2/mvDramIf.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+
+#include "ddr2/mvDramIfStaticInit.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* DRAM bank presence encoding */
+#define BANK_PRESENT_CS0			    0x1
+#define BANK_PRESENT_CS0_CS1			0x3
+#define BANK_PRESENT_CS0_CS2			0x5
+#define BANK_PRESENT_CS0_CS1_CS2		0x7
+#define BANK_PRESENT_CS0_CS2_CS3		0xd
+#define BANK_PRESENT_CS0_CS2_CS3_CS4	0xf
+
+/* locals   */
+#ifndef MV_STATIC_DRAM_ON_BOARD
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32  busClk, MV_STATUS TTmode );
+static MV_U32 dunitCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32  busClk);
+static MV_U32 sdramModeRegCalc(MV_U32 minCas);
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfoDIMM1);
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk);
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk, MV_U32 forcedCl);
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk);
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk);
+static MV_U32 sdramDdr2TimeLoRegCalc(MV_U32 minCas);
+static MV_U32 sdramDdr2TimeHiRegCalc(MV_U32 minCas);
+#endif
+MV_32 DRAM_CS_Order[MV_DRAM_MAX_CS] = {N_A
+
+#ifdef MV_INCLUDE_SDRAM_CS1
+		,N_A
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS2
+		,N_A
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS3
+    ,N_A
+#endif
+	};
+/* Get DRAM size of CS num */
+MV_U32 mvDramCsSizeGet(MV_U32 csNum)
+{
+	MV_DRAM_BANK_INFO bankInfo;
+	MV_U32  size, deviceW, dimmW;
+#ifdef MV78XX0
+	MV_U32  temp;
+#endif
+
+	if(MV_OK == mvDramBankInfoGet(csNum, &bankInfo))
+	{
+		if (0 == bankInfo.size)
+			return 0;
+
+		/* Note that the Dimm width might be different then the device DRAM width */
+#ifdef MV78XX0
+		temp = MV_REG_READ(SDRAM_CONFIG_REG);
+		deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_32BIT )? 32 : 64;
+#else
+		deviceW = 16 /* KW family */;
+#endif
+		dimmW = bankInfo.dataWidth - (bankInfo.dataWidth % 16);
+		size = ((bankInfo.size << 20) / (dimmW/deviceW));
+		return size;
+	}
+	else
+		return 0;
+}
+/*******************************************************************************
+* mvDramIfDetect - Prepare DRAM interface configuration values.
+*
+* DESCRIPTION:
+*       This function implements the full DRAM detection and timing
+*       configuration for best system performance.
+*       Since this routine runs from a ROM device (Boot Flash), its stack
+*       resides on RAM, that might be the system DRAM. Changing DRAM
+*       configuration values while keeping vital data in DRAM is risky. That
+*       is why the function does not preform the configuration setting but
+*       prepare those in predefined 32bit registers (in this case IDMA
+*       registers are used) for other routine to perform the settings.
+*       The function will call for board DRAM SPD information for each DRAM
+*       chip select. The function will then analyze those SPD parameters of
+*       all DRAM banks in order to decide on DRAM configuration compatible
+*       for all DRAM banks.
+*       The function will set the CPU DRAM address decode registers.
+*       Note: This routine prepares values that will overide configuration of
+*       mvDramBasicAsmInit().
+*
+* INPUT:
+*       forcedCl - Forced CAL Latency. If equal to zero, do not force.
+*       eccDisable - Force down the ECC.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl, MV_BOOL eccDisable)
+{
+	MV_32 	MV_DRAM_CS_order[MV_DRAM_MAX_CS] = {
+		SDRAM_CS0
+#ifdef MV_INCLUDE_SDRAM_CS1
+		,SDRAM_CS1
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS2
+		,SDRAM_CS2
+#endif
+#ifdef MV_INCLUDE_SDRAM_CS3
+		,SDRAM_CS3
+#endif
+		};
+	MV_U32  busClk, deviceW, dimmW;
+	MV_U32 numOfAllDevices = 0;
+	MV_STATUS TTMode;
+#ifndef MV_STATIC_DRAM_ON_BOARD
+	MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+	MV_U32  size, base = 0, i, j, temp, busClkPs;
+	MV_U8	minCas;
+	MV_CPU_DEC_WIN dramDecWin;
+	dramDecWin.addrWin.baseHigh = 0;
+#endif
+
+	busClk = mvBoardSysClkGet();
+
+	if (0 == busClk)
+	{
+		mvOsPrintf("Dram: ERR. Can't detect system clock! \n");
+		return MV_ERROR;
+	}
+
+#ifndef MV_STATIC_DRAM_ON_BOARD
+
+	busClkPs = 1000000000 / (busClk / 1000);  /* in ps units */
+	/* we will use bank 0 as the representative of the all the DRAM banks,  */
+	/* since bank 0 must exist.                                             */
+	for(i = 0; i < MV_DRAM_MAX_CS; i++)
+	{
+		/* if Bank exist */
+		if(MV_OK == mvDramBankInfoGet(i, &bankInfo[i]))
+		{
+			DB(mvOsPrintf("Dram: Find bank %d\n", i));
+			/* check it isn't SDRAM */
+			if(bankInfo[i].memoryType != MEM_TYPE_DDR2)
+			{
+				mvOsOutput("Dram: ERR. SDRAM type not supported !!!\n");
+				return MV_ERROR;
+			}
+
+			/* All banks must support the Mclk freqency */
+			if(bankInfo[i].minCycleTimeAtMaxCasLatPs > busClkPs)
+			{
+				mvOsOutput("Dram: ERR. Bank %d doesn't support memory clock!!!\n", i);
+				return MV_ERROR;
+			}
+
+			/* All banks must support registry in order to activate it */
+			if(bankInfo[i].registeredAddrAndControlInputs !=
+			   bankInfo[0].registeredAddrAndControlInputs)
+			{
+				mvOsOutput("Dram: ERR. different Registered settings !!!\n");
+				return MV_ERROR;
+			}
+
+			/* All banks must support same ECC mode */
+			if(bankInfo[i].errorCheckType !=
+			   bankInfo[0].errorCheckType)
+			{
+				mvOsOutput("Dram: ERR. different ECC settings !!!\n");
+				return MV_ERROR;
+			}
+
+		}
+		else
+		{
+			if( i == 0 ) /* bank 0 doesn't exist */
+			{
+				mvOsOutput("Dram: ERR. Fail to detect bank 0 !!!\n");
+				return MV_ERROR;
+			}
+			else
+			{
+				DB(mvOsPrintf("Dram: Could not find bank %d\n", i));
+				bankInfo[i].size = 0;     /* Mark this bank as non exist */
+			}
+		}
+	}
+
+#ifdef MV_INCLUDE_SDRAM_CS2
+	if (bankInfo[SDRAM_CS0].size <  bankInfo[SDRAM_CS2].size)
+	{
+		MV_DRAM_CS_order[0] = SDRAM_CS2;
+		MV_DRAM_CS_order[1] = SDRAM_CS3;
+		MV_DRAM_CS_order[2] = SDRAM_CS0;
+		MV_DRAM_CS_order[3] = SDRAM_CS1;
+		DRAM_CS_Order[0] = SDRAM_CS2;
+		DRAM_CS_Order[1] = SDRAM_CS3;
+		DRAM_CS_Order[2] = SDRAM_CS0;
+		DRAM_CS_Order[3] = SDRAM_CS1;
+
+	}
+	else
+#endif
+	{
+		MV_DRAM_CS_order[0] = SDRAM_CS0;
+		MV_DRAM_CS_order[1] = SDRAM_CS1;
+		DRAM_CS_Order[0] = SDRAM_CS0;
+		DRAM_CS_Order[1] = SDRAM_CS1;
+#ifdef MV_INCLUDE_SDRAM_CS2
+		MV_DRAM_CS_order[2] = SDRAM_CS2;
+		MV_DRAM_CS_order[3] = SDRAM_CS3;
+		DRAM_CS_Order[2] = SDRAM_CS2;
+		DRAM_CS_Order[3] = SDRAM_CS3;
+#endif
+	}
+
+	for(j = 0; j < MV_DRAM_MAX_CS; j++)
+	{
+		i = MV_DRAM_CS_order[j];
+
+		if (0 == bankInfo[i].size)
+			continue;
+
+			/* Init the CPU window decode */
+			/* Note that the Dimm width might be different then the device DRAM width */
+#ifdef MV78XX0
+			temp = MV_REG_READ(SDRAM_CONFIG_REG);
+			deviceW = ((temp & SDRAM_DWIDTH_MASK) == SDRAM_DWIDTH_32BIT )? 32 : 64;
+#else
+			deviceW = 16 /* KW family */;
+#endif
+			dimmW = bankInfo[0].dataWidth - (bankInfo[0].dataWidth % 16);
+			size = ((bankInfo[i].size << 20) / (dimmW/deviceW));
+
+			/* We can not change DRAM window settings while excecuting  	*/
+			/* code from it. That is why we skip the DRAM CS[0], saving     */
+			/* it to the ROM configuration routine				*/
+
+			numOfAllDevices += bankInfo[i].numberOfDevices;
+			if (i == MV_DRAM_CS_order[0])
+			{
+				MV_U32 sizeToReg;
+				/* Translate the given window size to register format		*/
+				sizeToReg = ctrlSizeToReg(size, SCSR_SIZE_ALIGNMENT);
+				/* Size parameter validity check.                           */
+				if (-1 == sizeToReg)
+				{
+					mvOsOutput("DRAM: mvCtrlAddrDecToReg: ERR. Win %d size invalid.\n"
+							   ,i);
+					return MV_BAD_PARAM;
+				}
+
+				DB(mvOsPrintf("Dram: Bank 0 Size - %x\n",sizeToReg);)
+				sizeToReg = (sizeToReg << SCSR_SIZE_OFFS);
+				sizeToReg |= SCSR_WIN_EN;
+				MV_REG_WRITE(DRAM_BUF_REG0, sizeToReg);
+			}
+			else
+			{
+				dramDecWin.addrWin.baseLow = base;
+				dramDecWin.addrWin.size = size;
+				dramDecWin.enable = MV_TRUE;
+				DB(mvOsPrintf("Dram: Enable window %d base 0x%x, size=0x%x\n",i, base, size));
+
+				/* Check if the DRAM size is more then 3GByte */
+				if (base < 0xC0000000)
+				{
+					DB(mvOsPrintf("Dram: Enable window %d base 0x%x, size=0x%x\n",i, base, size));
+				if (MV_OK != mvCpuIfTargetWinSet(i, &dramDecWin))
+					{
+						mvOsPrintf("Dram: ERR. Fail to set bank %d!!!\n", SDRAM_CS0 + i);
+						return 	MV_ERROR;
+					}
+				}
+			}
+
+			base += size;
+
+			/* update the suportedCasLatencies mask */
+			bankInfo[0].suportedCasLatencies &= bankInfo[i].suportedCasLatencies;
+	}
+
+	/* calculate minimum CAS */
+	minCas = minCasCalc(&bankInfo[0], &bankInfo[2], busClk, forcedCl);
+	if (0 == minCas)
+	{
+		mvOsOutput("Dram: Warn: Could not find CAS compatible to SysClk %dMhz\n",
+				   (busClk / 1000000));
+
+		minCas = DDR2_CL_4; /* Continue with this CAS */
+		mvOsOutput("Set default CAS latency 4\n");
+	}
+
+	/* calc SDRAM_CONFIG_REG  and save it to temp register */
+	temp = sdramConfigRegCalc(&bankInfo[0],&bankInfo[2], busClk);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramConfigRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+
+	/* check if ECC is enabled by the user */
+	if(eccDisable)
+	{
+		/* turn off ECC*/
+		temp &= ~BIT18;
+	}
+	DB(mvOsPrintf("Dram: sdramConfigRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG1, temp);
+
+	/* calc SDRAM_MODE_REG  and save it to temp register */
+	temp = sdramModeRegCalc(minCas);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramModeRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramModeRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG2, temp);
+
+	/* calc SDRAM_EXTENDED_MODE_REG  and save it to temp register */
+	temp = sdramExtModeRegCalc(&bankInfo[0], busClk);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramExtModeRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramExtModeRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG10, temp);
+
+	/* calc D_UNIT_CONTROL_LOW  and save it to temp register */
+	TTMode = MV_FALSE;
+	DB(mvOsPrintf("Dram: numOfAllDevices = %x\n",numOfAllDevices);)
+	if( (numOfAllDevices > 9) && (bankInfo[0].registeredAddrAndControlInputs == MV_FALSE) )
+	{
+		if ( ( (numOfAllDevices > 9) && (busClk > MV_BOARD_SYSCLK_200MHZ) ) ||
+			(numOfAllDevices > 18) )
+		{
+			mvOsOutput("Enable 2T ");
+			TTMode = MV_TRUE;
+		}
+	}
+
+	temp = dunitCtrlLowRegCalc(&bankInfo[0], minCas, busClk, TTMode );
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. dunitCtrlLowRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG3, temp);
+
+	/* calc D_UNIT_CONTROL_HIGH  and save it to temp register */
+	temp = dunitCtrlHighRegCalc(&bankInfo[0], busClk);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. dunitCtrlHighRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: dunitCtrlHighRegCalc - %x\n",temp);)
+	/* check if ECC is enabled by the user */
+	if(eccDisable)
+	{
+		/* turn off sample stage if no ecc */
+		temp &= ~SDRAM__D2P_EN;;
+	}
+	MV_REG_WRITE(DRAM_BUF_REG13, temp);
+
+	/* calc SDRAM_ADDR_CTRL_REG  and save it to temp register */
+	temp = sdramAddrCtrlRegCalc(&bankInfo[0],&bankInfo[2]);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramAddrCtrlRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramAddrCtrlRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG4, temp);
+
+	/* calc SDRAM_TIMING_CTRL_LOW_REG  and save it to temp register */
+	temp = sdramTimeCtrlLowRegCalc(&bankInfo[0], minCas, busClk);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramTimeCtrlLowRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramTimeCtrlLowRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG5, temp);
+
+	/* calc SDRAM_TIMING_CTRL_HIGH_REG  and save it to temp register */
+	temp = sdramTimeCtrlHighRegCalc(&bankInfo[0], busClk);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramTimeCtrlHighRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramTimeCtrlHighRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG6, temp);
+
+	sdramDDr2OdtConfig(bankInfo);
+
+	/* calc DDR2_SDRAM_TIMING_LOW_REG  and save it to temp register */
+	temp = sdramDdr2TimeLoRegCalc(minCas);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramDdr2TimeLoRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramDdr2TimeLoRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG11, temp);
+
+	/* calc DDR2_SDRAM_TIMING_HIGH_REG  and save it to temp register */
+	temp = sdramDdr2TimeHiRegCalc(minCas);
+	if(-1 == temp)
+	{
+		mvOsOutput("Dram: ERR. sdramDdr2TimeHiRegCalc failed !!!\n");
+		return MV_ERROR;
+	}
+	DB(mvOsPrintf("Dram: sdramDdr2TimeHiRegCalc - %x\n",temp);)
+	MV_REG_WRITE(DRAM_BUF_REG12, temp);
+#endif
+
+	/* Note that DDR SDRAM Address/Control and Data pad calibration     */
+	/* settings is done in mvSdramIfConfig.s                            */
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvDramIfBankBaseGet - Get DRAM interface bank base.
+*
+* DESCRIPTION:
+*       This function returns the 32 bit base address of a given DRAM bank.
+*
+* INPUT:
+*       bankNum - Bank number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM bank size. If bank is disabled or paramter is invalid, the
+*		function returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfBankBaseGet(MV_U32 bankNum)
+{
+	DB(mvOsPrintf("Dram: mvDramIfBankBaseGet Bank %d base addr is %x \n",
+				  bankNum, mvCpuIfTargetWinBaseLowGet(SDRAM_CS0 + bankNum)));
+	return mvCpuIfTargetWinBaseLowGet(SDRAM_CS0 + bankNum);
+}
+
+/*******************************************************************************
+* mvDramIfBankSizeGet - Get DRAM interface bank size.
+*
+* DESCRIPTION:
+*       This function returns the size of a given DRAM bank.
+*
+* INPUT:
+*       bankNum - Bank number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM bank size. If bank is disabled the function return '0'. In case
+*		or paramter is invalid, the function returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfBankSizeGet(MV_U32 bankNum)
+{
+	DB(mvOsPrintf("Dram: mvDramIfBankSizeGet Bank %d size is %x \n",
+				  bankNum, mvCpuIfTargetWinSizeGet(SDRAM_CS0 + bankNum)));
+	return mvCpuIfTargetWinSizeGet(SDRAM_CS0 + bankNum);
+}
+
+
+/*******************************************************************************
+* mvDramIfSizeGet - Get DRAM interface total size.
+*
+* DESCRIPTION:
+*       This function get the DRAM total size.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       DRAM total size. In case or paramter is invalid, the function
+*		returns -1.
+*
+*******************************************************************************/
+MV_U32 mvDramIfSizeGet(MV_VOID)
+{
+	MV_U32 size = 0, i;
+
+	for(i = 0; i < MV_DRAM_MAX_CS; i++)
+		size += mvDramIfBankSizeGet(i);
+
+	DB(mvOsPrintf("Dram: mvDramIfSizeGet size is %x \n",size));
+	return size;
+}
+
+/*******************************************************************************
+* mvDramIfSingleBitErrThresholdSet - Set single bit ECC threshold.
+*
+* DESCRIPTION:
+*       The ECC single bit error threshold is the number of single bit
+*       errors to happen before the Dunit generates an interrupt.
+*       This function set single bit ECC threshold.
+*
+* INPUT:
+*       threshold - threshold.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if threshold is to big, MV_OK otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvDramIfSingleBitErrThresholdSet(MV_U32 threshold)
+{
+    MV_U32 regVal;
+
+    if (threshold > SECR_THRECC_MAX)
+    {
+        return MV_BAD_PARAM;
+    }
+
+    regVal = MV_REG_READ(SDRAM_ECC_CONTROL_REG);
+    regVal &= ~SECR_THRECC_MASK;
+    regVal |= ((SECR_THRECC(threshold) & SECR_THRECC_MASK));
+    MV_REG_WRITE(SDRAM_ECC_CONTROL_REG, regVal);
+
+    return MV_OK;
+}
+
+#ifndef MV_STATIC_DRAM_ON_BOARD
+/*******************************************************************************
+* minCasCalc - Calculate the Minimum CAS latency which can be used.
+*
+* DESCRIPTION:
+*	Calculate the minimum CAS latency that can be used, base on the DRAM
+*	parameters and the SDRAM bus Clock freq.
+*
+* INPUT:
+*	busClk    - the DRAM bus Clock.
+*	pBankInfo - bank info parameters.
+*	forcedCl - Forced CAS Latency multiplied by 10. If equal to zero, do not force.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       The minimum CAS Latency. The function returns 0 if max CAS latency
+*		supported by banks is incompatible with system bus clock frequancy.
+*
+*******************************************************************************/
+
+static MV_U32 minCasCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk, MV_U32 forcedCl)
+{
+	MV_U32 count = 1, j;
+	MV_U32 busClkPs = 1000000000 / (busClk / 1000);  /* in ps units */
+	MV_U32 startBit, stopBit;
+	MV_U32 minCas0 = 0, minCas2 = 0;
+
+
+	/*     DDR 2:
+			*******-******-******-******-******-******-******-*******
+			* bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+			*******-******-******-******-******-******-******-*******
+	CAS	=	* TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+	Disco VI=	* TBD  | TBD  |  5   |  4   |  3   |  TBD   | TBD | TBD *
+	Disco Duo=	* TBD  |   6  |  5   |  4   |  3   |  TBD   | TBD | TBD *
+			*********************************************************/
+
+
+	/* If we are asked to use the forced CAL  we change the suported CAL to be forcedCl only */
+	if (forcedCl)
+	{
+		mvOsOutput("DRAM: Using forced CL %d.%d\n", (forcedCl / 10), (forcedCl % 10));
+
+			if (forcedCl == 30)
+				pBankInfo->suportedCasLatencies = 0x08;
+			else if (forcedCl == 40)
+				pBankInfo->suportedCasLatencies = 0x10;
+			else if (forcedCl == 50)
+				pBankInfo->suportedCasLatencies = 0x20;
+			else if (forcedCl == 60)
+				pBankInfo->suportedCasLatencies = 0x40;
+			else
+			{
+				mvOsPrintf("Forced CL %d.%d not supported. Set default CL 4\n",
+						   (forcedCl / 10), (forcedCl % 10));
+				pBankInfo->suportedCasLatencies = 0x10;
+			}
+
+		return pBankInfo->suportedCasLatencies;
+	}
+
+	/* go over the supported cas mask from Max Cas down and check if the 	*/
+	/* SysClk stands in its time requirments.				*/
+
+	DB(mvOsPrintf("Dram: minCasCalc supported mask = %x busClkPs = %x \n",
+								pBankInfo->suportedCasLatencies,busClkPs ));
+	count = 1;
+	for(j = 7; j > 0; j--)
+	{
+		if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+		{
+			/* Reset the bits for CL incompatible for the sysClk */
+			switch (count)
+			{
+				case 1:
+					if (pBankInfo->minCycleTimeAtMaxCasLatPs > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 2:
+					if (pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 3:
+					if (pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+						pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				default:
+					pBankInfo->suportedCasLatencies &= ~(BIT0 << j);
+					break;
+			}
+		}
+	}
+
+	DB(mvOsPrintf("Dram: minCasCalc support = %x (after SysCC calc)\n",
+											pBankInfo->suportedCasLatencies ));
+
+	count = 1;
+	DB(mvOsPrintf("Dram2: minCasCalc supported mask = %x busClkPs = %x \n",
+								pBankInfo2->suportedCasLatencies,busClkPs ));
+	for(j = 7; j > 0; j--)
+	{
+		if((pBankInfo2->suportedCasLatencies >> j) & BIT0 )
+		{
+			/* Reset the bits for CL incompatible for the sysClk */
+			switch (count)
+			{
+				case 1:
+					if (pBankInfo2->minCycleTimeAtMaxCasLatPs > busClkPs)
+						pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 2:
+					if (pBankInfo2->minCycleTimeAtMaxCasLatMinus1Ps > busClkPs)
+						pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				case 3:
+					if (pBankInfo2->minCycleTimeAtMaxCasLatMinus2Ps > busClkPs)
+						pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+					count++;
+					break;
+				default:
+					pBankInfo2->suportedCasLatencies &= ~(BIT0 << j);
+					break;
+			}
+		}
+	}
+
+	DB(mvOsPrintf("Dram2: minCasCalc support = %x (after SysCC calc)\n",
+									pBankInfo2->suportedCasLatencies ));
+
+	startBit = 3;   /* DDR2 support CL start with CL3 (bit 3) */
+	stopBit  = 6;   /* DDR2 support CL stops with CL6 (bit 6) */
+
+	for(j = startBit; j <= stopBit ; j++)
+	{
+		if((pBankInfo->suportedCasLatencies >> j) & BIT0 )
+		{
+			DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+			minCas0 = (BIT0 << j);
+			break;
+		}
+	}
+
+	for(j = startBit; j <= stopBit ; j++)
+	{
+		if((pBankInfo2->suportedCasLatencies >> j) & BIT0 )
+		{
+			DB(mvOsPrintf("Dram: minCasCalc choose CAS %x \n",(BIT0 << j)));
+			minCas2 = (BIT0 << j);
+			break;
+		}
+	}
+
+	if (minCas2 > minCas0)
+		return minCas2;
+	else
+		return minCas0;
+
+	return 0;
+}
+
+/*******************************************************************************
+* sdramConfigRegCalc - Calculate sdram config register
+*
+* DESCRIPTION: Calculate sdram config register optimized value based
+*			on the bank info parameters.
+*
+* INPUT:
+*	busClk    - the DRAM bus Clock.
+*	pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram config reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramConfigRegCalc(MV_DRAM_BANK_INFO *pBankInfo,MV_DRAM_BANK_INFO *pBankInfo2, MV_U32 busClk)
+{
+	MV_U32 sdramConfig = 0;
+	MV_U32 refreshPeriod;
+
+	busClk /= 1000000; /* we work with busClk in MHz */
+
+	sdramConfig = MV_REG_READ(SDRAM_CONFIG_REG);
+
+	/* figure out the memory refresh internal */
+	switch (pBankInfo->refreshInterval & 0xf)
+	{
+		case 0x0: /* refresh period is 15.625 usec */
+				refreshPeriod = 15625;
+				break;
+		case 0x1: /* refresh period is 3.9 usec  	*/
+				refreshPeriod = 3900;
+				break;
+		case 0x2: /* refresh period is 7.8 usec 	*/
+				refreshPeriod = 7800;
+				break;
+		case 0x3: /* refresh period is 31.3 usec	*/
+				refreshPeriod = 31300;
+				break;
+		case 0x4: /* refresh period is 62.5 usec	*/
+				refreshPeriod = 62500;
+				break;
+		case 0x5: /* refresh period is 125 usec 	*/
+				refreshPeriod = 125000;
+				break;
+		default:  /* refresh period undefined 					*/
+				mvOsPrintf("Dram: ERR. DRAM refresh period is unknown!\n");
+				return -1;
+    }
+
+	/* Now the refreshPeriod is in register format value */
+	refreshPeriod = (busClk * refreshPeriod) / 1000;
+
+	DB(mvOsPrintf("Dram: sdramConfigRegCalc calculated refresh interval %0x\n",
+				  refreshPeriod));
+
+	/* make sure the refresh value is only 14 bits */
+	if(refreshPeriod > SDRAM_REFRESH_MAX)
+	{
+		refreshPeriod = SDRAM_REFRESH_MAX;
+		DB(mvOsPrintf("Dram: sdramConfigRegCalc adjusted refresh interval %0x\n",
+					  refreshPeriod));
+	}
+
+	/* Clear the refresh field */
+	sdramConfig &= ~SDRAM_REFRESH_MASK;
+
+	/* Set new value to refresh field */
+	sdramConfig |= (refreshPeriod & SDRAM_REFRESH_MASK);
+
+	/*  registered DRAM ? */
+	if ( pBankInfo->registeredAddrAndControlInputs )
+	{
+		/* it's registered DRAM, so set the reg. DRAM bit */
+		sdramConfig |= SDRAM_REGISTERED;
+		DB(mvOsPrintf("DRAM Attribute: Registered address and control inputs.\n");)
+	}
+
+	/* ECC and IERR support */
+	sdramConfig &= ~SDRAM_ECC_MASK;    /* Clear ECC field */
+	sdramConfig &= ~SDRAM_IERR_MASK;    /* Clear IErr field */
+
+	if ( pBankInfo->errorCheckType )
+	{
+		sdramConfig |= SDRAM_ECC_EN;
+		sdramConfig |= SDRAM_IERR_REPORTE;
+                DB(mvOsPrintf("Dram: mvDramIfDetect Enabling ECC\n"));
+	}
+	else
+	{
+                sdramConfig |= SDRAM_ECC_DIS;
+		sdramConfig |= SDRAM_IERR_IGNORE;
+                DB(mvOsPrintf("Dram: mvDramIfDetect Disabling ECC!\n"));
+	}
+	/* Set static default settings */
+	sdramConfig |= SDRAM_CONFIG_DV;
+
+	DB(mvOsPrintf("Dram: sdramConfigRegCalc set sdramConfig to 0x%x\n",
+				  sdramConfig));
+
+	return sdramConfig;
+}
+
+/*******************************************************************************
+* sdramModeRegCalc - Calculate sdram mode register
+*
+* DESCRIPTION: Calculate sdram mode register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramModeRegCalc(MV_U32 minCas)
+{
+	MV_U32 sdramMode;
+
+	sdramMode = MV_REG_READ(SDRAM_MODE_REG);
+
+	/* Clear CAS Latency field */
+	sdramMode &= ~SDRAM_CL_MASK;
+
+	DB(mvOsPrintf("DRAM CAS Latency ");)
+
+		switch (minCas)
+		{
+			case DDR2_CL_3:
+				sdramMode |= SDRAM_DDR2_CL_3;
+				DB(mvOsPrintf("3.\n");)
+				break;
+			case DDR2_CL_4:
+				sdramMode |= SDRAM_DDR2_CL_4;
+				DB(mvOsPrintf("4.\n");)
+				break;
+			case DDR2_CL_5:
+				sdramMode |= SDRAM_DDR2_CL_5;
+				DB(mvOsPrintf("5.\n");)
+				break;
+			case DDR2_CL_6:
+				sdramMode |= SDRAM_DDR2_CL_6;
+				DB(mvOsPrintf("6.\n");)
+				break;
+			default:
+				mvOsOutput("\nsdramModeRegCalc ERROR: Max. CL out of range\n");
+				return -1;
+        }
+
+	DB(mvOsPrintf("\nsdramModeRegCalc register 0x%x\n", sdramMode ));
+
+	return sdramMode;
+}
+/*******************************************************************************
+* sdramExtModeRegCalc - Calculate sdram Extended mode register
+*
+* DESCRIPTION:
+*		Return sdram Extended mode register value based
+*		on the bank info parameters and bank presence.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*	busClk - DRAM frequency
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram Extended mode reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramExtModeRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+	MV_U32 populateBanks = 0;
+	int bankNum;
+
+		/* Represent the populate banks in binary form */
+		for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+		{
+			if (0 != pBankInfo[bankNum].size)
+			{
+				populateBanks |= (1 << bankNum);
+			}
+		}
+
+		switch(populateBanks)
+		{
+			case(BANK_PRESENT_CS0):
+			case(BANK_PRESENT_CS0_CS1):
+				return DDR_SDRAM_EXT_MODE_CS0_CS1_DV;
+
+			case(BANK_PRESENT_CS0_CS2):
+			case(BANK_PRESENT_CS0_CS1_CS2):
+			case(BANK_PRESENT_CS0_CS2_CS3):
+			case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+				if (busClk >= MV_BOARD_SYSCLK_267MHZ)
+				    return DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV;
+				else
+				    return DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV;
+
+			default:
+				mvOsOutput("sdramExtModeRegCalc: Invalid DRAM bank presence\n");
+				return -1;
+		}
+	return 0;
+}
+
+/*******************************************************************************
+* dunitCtrlLowRegCalc - Calculate sdram dunit control low register
+*
+* DESCRIPTION: Calculate sdram dunit control low register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram dunit control low reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32  busClk, MV_STATUS TTMode)
+{
+	MV_U32 dunitCtrlLow, cl;
+	MV_U32 sbOutR[4]={3,5,7,9} ;
+	MV_U32 sbOutU[4]={1,3,5,7} ;
+
+	dunitCtrlLow = MV_REG_READ(SDRAM_DUNIT_CTRL_REG);
+
+        DB(mvOsPrintf("Dram: dunitCtrlLowRegCalc\n"));
+
+	/* Clear StBurstOutDel field */
+	dunitCtrlLow &= ~SDRAM_SB_OUT_MASK;
+
+	/* Clear StBurstInDel field */
+	dunitCtrlLow &= ~SDRAM_SB_IN_MASK;
+
+	/* Clear CtrlPos field */
+	dunitCtrlLow &= ~SDRAM_CTRL_POS_MASK;
+
+	/* Clear 2T field */
+	dunitCtrlLow &= ~SDRAM_2T_MASK;
+	if (TTMode == MV_TRUE)
+	{
+		dunitCtrlLow |= SDRAM_2T_MODE;
+	}
+
+	/* For proper sample of read data set the Dunit Control register's      */
+	/* stBurstInDel bits [27:24]                                            */
+	/*		200MHz - 267MHz None reg  = CL + 1			*/
+	/*		200MHz - 267MHz reg	  = CL + 2			*/
+	/*		> 267MHz None reg  = CL + 2			*/
+	/*		> 267MHz reg	  = CL + 3			*/
+
+	/* For proper sample of read data set the Dunit Control register's      */
+	/* stBurstOutDel bits [23:20]                                           */
+			/********-********-********-********-
+			*  CL=3  |  CL=4  |  CL=5  |  CL=6  |
+			*********-********-********-********-
+	Not Reg.	*  0001  |  0011  |  0101  |  0111  |
+			*********-********-********-********-
+	Registered	*  0011  |  0101  |  0111  |  1001  |
+			*********-********-********-********/
+
+		/* Set Dunit Control low default value */
+		dunitCtrlLow |= SDRAM_DUNIT_CTRL_LOW_DDR2_DV;
+
+		switch (minCas)
+		{
+			case DDR2_CL_3: cl = 3; break;
+			case DDR2_CL_4: cl = 4; break;
+			case DDR2_CL_5: cl = 5; break;
+			case DDR2_CL_6: cl = 6; break;
+			default:
+				mvOsOutput("Dram: dunitCtrlLowRegCalc Max. CL out of range %d\n", minCas);
+				return -1;
+		}
+
+		/* registerd DDR SDRAM? */
+		if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+		{
+			dunitCtrlLow |= (sbOutR[cl-3]) << SDRAM_SB_OUT_DEL_OFFS;
+		}
+		else
+		{
+			dunitCtrlLow |= (sbOutU[cl-3]) << SDRAM_SB_OUT_DEL_OFFS;
+		}
+
+		DB(mvOsPrintf("\n\ndunitCtrlLowRegCalc: CL = %d, frequencies=%d\n", cl, busClk));
+
+		if (busClk <= MV_BOARD_SYSCLK_267MHZ)
+		{
+			if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+				cl = cl + 2;
+			else
+				cl = cl + 1;
+		}
+		else
+		{
+			if (pBankInfo->registeredAddrAndControlInputs == MV_TRUE)
+				cl = cl + 3;
+			else
+				cl = cl + 2;
+		}
+
+        DB(mvOsPrintf("dunitCtrlLowRegCalc: SDRAM_SB_IN_DEL_OFFS = %d \n", cl));
+		dunitCtrlLow |= cl << SDRAM_SB_IN_DEL_OFFS;
+
+	DB(mvOsPrintf("Dram: Reg dunit control low = %x\n", dunitCtrlLow ));
+
+	return dunitCtrlLow;
+}
+
+/*******************************************************************************
+* dunitCtrlHighRegCalc - Calculate sdram dunit control high register
+*
+* DESCRIPTION: Calculate sdram dunit control high register optimized value based
+*			on the bus clock.
+*
+* INPUT:
+*	busClk	  - DRAM frequency.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram dunit control high reg value.
+*
+*******************************************************************************/
+static MV_U32 dunitCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32  busClk)
+{
+	MV_U32 dunitCtrlHigh;
+	dunitCtrlHigh = MV_REG_READ(SDRAM_DUNIT_CTRL_HI_REG);
+	if(busClk > MV_BOARD_SYSCLK_300MHZ)
+		dunitCtrlHigh |= SDRAM__P2D_EN;
+	else
+		dunitCtrlHigh &= ~SDRAM__P2D_EN;
+
+	if(busClk > MV_BOARD_SYSCLK_267MHZ)
+	    dunitCtrlHigh |= (SDRAM__WR_MESH_DELAY_EN | SDRAM__PUP_ZERO_SKEW_EN | SDRAM__ADD_HALF_FCC_EN);
+
+	/* If ECC support we turn on D2P sample */
+	dunitCtrlHigh &= ~SDRAM__D2P_EN;    /* Clear D2P bit */
+	if (( pBankInfo->errorCheckType ) && (busClk > MV_BOARD_SYSCLK_267MHZ))
+		dunitCtrlHigh |= SDRAM__D2P_EN;
+
+	return dunitCtrlHigh;
+}
+
+/*******************************************************************************
+* sdramAddrCtrlRegCalc - Calculate sdram address control register
+*
+* DESCRIPTION: Calculate sdram address control register optimized value based
+*			on the bank info parameters and the minCas.
+*
+* INPUT:
+*	pBankInfo - sdram bank parameters
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram address control reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramAddrCtrlRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_DRAM_BANK_INFO *pBankInfoDIMM1)
+{
+	MV_U32 addrCtrl = 0;
+
+	if (pBankInfoDIMM1->size)
+	{
+		switch (pBankInfoDIMM1->sdramWidth)
+		{
+			case 4:  /* memory is x4 */
+				mvOsOutput("sdramAddrCtrlRegCalc: Error - x4 not supported!\n");
+				return -1;
+				break;
+			case 8:  /* memory is x8 */
+				addrCtrl |= SDRAM_ADDRSEL_X8(2) | SDRAM_ADDRSEL_X8(3);
+				DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device DIMM2 width x8\n"));
+				break;
+			case 16:
+				addrCtrl |= SDRAM_ADDRSEL_X16(2) | SDRAM_ADDRSEL_X16(3);
+				DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device DIMM2 width x16\n"));
+				break;
+			default: /* memory width unsupported */
+				mvOsOutput("sdramAddrCtrlRegCalc: ERR. DRAM chip width is unknown!\n");
+				return -1;
+		}
+	}
+
+	switch (pBankInfo->sdramWidth)
+	{
+		case 4:  /* memory is x4 */
+			mvOsOutput("sdramAddrCtrlRegCalc: Error - x4 not supported!\n");
+			return -1;
+			break;
+		case 8:  /* memory is x8 */
+			addrCtrl |= SDRAM_ADDRSEL_X8(0) | SDRAM_ADDRSEL_X8(1);
+			DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device width x8\n"));
+			break;
+		case 16:
+			addrCtrl |= SDRAM_ADDRSEL_X16(0) | SDRAM_ADDRSEL_X16(1);
+			DB(mvOsPrintf("sdramAddrCtrlRegCalc: sdramAddrCtrlRegCalc SDRAM device width x16\n"));
+			break;
+		default: /* memory width unsupported */
+			mvOsOutput("sdramAddrCtrlRegCalc: ERR. DRAM chip width is unknown!\n");
+			return -1;
+	}
+
+	/* Note that density is in MB units */
+	switch (pBankInfo->deviceDensity)
+	{
+		case 256:                 /* 256 Mbit */
+			DB(mvOsPrintf("DRAM Device Density 256Mbit\n"));
+			addrCtrl |= SDRAM_DSIZE_256Mb(0) | SDRAM_DSIZE_256Mb(1);
+			break;
+		case 512:                /* 512 Mbit */
+			DB(mvOsPrintf("DRAM Device Density 512Mbit\n"));
+			addrCtrl |= SDRAM_DSIZE_512Mb(0) | SDRAM_DSIZE_512Mb(1);
+			break;
+		case 1024:                /* 1 Gbit */
+			DB(mvOsPrintf("DRAM Device Density 1Gbit\n"));
+			addrCtrl |= SDRAM_DSIZE_1Gb(0) | SDRAM_DSIZE_1Gb(1);
+			break;
+		case 2048:                /* 2 Gbit */
+			DB(mvOsPrintf("DRAM Device Density 2Gbit\n"));
+			addrCtrl |= SDRAM_DSIZE_2Gb(0) | SDRAM_DSIZE_2Gb(1);
+			break;
+		default:
+			mvOsOutput("Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+                       pBankInfo->deviceDensity);
+			return -1;
+        }
+
+	if (pBankInfoDIMM1->size)
+	{
+		switch (pBankInfoDIMM1->deviceDensity)
+		{
+			case 256:                 /* 256 Mbit */
+				DB(mvOsPrintf("DIMM2: DRAM Device Density 256Mbit\n"));
+				addrCtrl |= SDRAM_DSIZE_256Mb(2) | SDRAM_DSIZE_256Mb(3);
+				break;
+			case 512:                /* 512 Mbit */
+				DB(mvOsPrintf("DIMM2: DRAM Device Density 512Mbit\n"));
+				addrCtrl |= SDRAM_DSIZE_512Mb(2) | SDRAM_DSIZE_512Mb(3);
+				break;
+			case 1024:                /* 1 Gbit */
+				DB(mvOsPrintf("DIMM2: DRAM Device Density 1Gbit\n"));
+				addrCtrl |= SDRAM_DSIZE_1Gb(2) | SDRAM_DSIZE_1Gb(3);
+				break;
+			case 2048:                /* 2 Gbit */
+				DB(mvOsPrintf("DIMM2: DRAM Device Density 2Gbit\n"));
+				addrCtrl |= SDRAM_DSIZE_2Gb(2) | SDRAM_DSIZE_2Gb(3);
+				break;
+			default:
+				mvOsOutput("DIMM2: Dram: sdramAddrCtrl unsupported RAM-Device size %d\n",
+						   pBankInfoDIMM1->deviceDensity);
+				return -1;
+		}
+	}
+	/* SDRAM address control */
+	DB(mvOsPrintf("Dram: setting sdram address control with: %x \n", addrCtrl));
+
+	return addrCtrl;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlLowRegCalc - Calculate sdram timing control low register
+*
+* DESCRIPTION:
+*       This function calculates sdram timing control low register
+*       optimized value based on the bank info parameters and the minCas.
+*
+* INPUT:
+*	    pBankInfo - sdram bank parameters
+*	minCas	  - minimum CAS supported.
+*       busClk    - Bus clock
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram timing control low reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlLowRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 minCas, MV_U32 busClk)
+{
+    MV_U32 tRp  = 0;
+    MV_U32 tRrd = 0;
+    MV_U32 tRcd = 0;
+    MV_U32 tRas = 0;
+    MV_U32 tWr  = 0;
+    MV_U32 tWtr = 0;
+    MV_U32 tRtp = 0;
+    MV_U32 timeCtrlLow = 0;
+
+    MV_U32 bankNum;
+
+    busClk = busClk / 1000000;    /* In MHz */
+
+    /* Scan all DRAM banks to find maximum timing values */
+    for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+    {
+        tRp  = MV_MAX(tRp,  pBankInfo[bankNum].minRowPrechargeTime);
+        tRrd = MV_MAX(tRrd, pBankInfo[bankNum].minRowActiveToRowActive);
+        tRcd = MV_MAX(tRcd, pBankInfo[bankNum].minRasToCasDelay);
+        tRas = MV_MAX(tRas, pBankInfo[bankNum].minRasPulseWidth);
+    }
+
+    /* Extract timing (in ns) from SPD value. We ignore the tenth ns part.  */
+    /* by shifting the data two bits right.                                 */
+    tRp  = tRp  >> 2;    /* For example 0x50 -> 20ns                        */
+    tRrd = tRrd >> 2;
+    tRcd = tRcd >> 2;
+
+    /* Extract clock cycles from time parameter. We need to round up        */
+    tRp  = ((busClk * tRp)  / 1000) + (((busClk * tRp)  % 1000) ? 1 : 0);
+    DB(mvOsPrintf("Dram  Timing Low: tRp = %d ", tRp));
+    tRrd = ((busClk * tRrd) / 1000) + (((busClk * tRrd) % 1000) ? 1 : 0);
+	/* JEDEC min reqeirments tRrd = 2 */
+	if (tRrd < 2)
+		tRrd = 2;
+    DB(mvOsPrintf("tRrd = %d ", tRrd));
+    tRcd = ((busClk * tRcd) / 1000) + (((busClk * tRcd) % 1000) ? 1 : 0);
+    DB(mvOsPrintf("tRcd = %d ", tRcd));
+    tRas = ((busClk * tRas) / 1000) + (((busClk * tRas) % 1000) ? 1 : 0);
+    DB(mvOsPrintf("tRas = %d ", tRas));
+
+    /* tWr and tWtr is different for DDR1 and DDR2. tRtp is only for DDR2   */
+	/* Scan all DRAM banks to find maximum timing values */
+	for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+	    tWr  = MV_MAX(tWr,  pBankInfo[bankNum].minWriteRecoveryTime);
+	    tWtr = MV_MAX(tWtr, pBankInfo[bankNum].minWriteToReadCmdDelay);
+	    tRtp = MV_MAX(tRtp, pBankInfo[bankNum].minReadToPrechCmdDelay);
+	}
+
+	/* Extract timing (in ns) from SPD value. We ignore the tenth ns    */
+	/* part by shifting the data two bits right.                        */
+	tWr  = tWr  >> 2;    /* For example 0x50 -> 20ns                    */
+	tWtr = tWtr >> 2;
+	tRtp = tRtp >> 2;
+	/* Extract clock cycles from time parameter. We need to round up    */
+	tWr  = ((busClk * tWr)  / 1000) + (((busClk * tWr)  % 1000) ? 1 : 0);
+	DB(mvOsPrintf("tWr = %d ", tWr));
+	tWtr = ((busClk * tWtr) / 1000) + (((busClk * tWtr) % 1000) ? 1 : 0);
+	/* JEDEC min reqeirments tWtr = 2 */
+	if (tWtr < 2)
+		tWtr = 2;
+	DB(mvOsPrintf("tWtr = %d ", tWtr));
+	tRtp = ((busClk * tRtp) / 1000) + (((busClk * tRtp) % 1000) ? 1 : 0);
+	/* JEDEC min reqeirments tRtp = 2 */
+	if (tRtp < 2)
+	tRtp = 2;
+	DB(mvOsPrintf("tRtp = %d ", tRtp));
+
+	/* Note: value of 0 in register means one cycle, 1 means two and so on  */
+	timeCtrlLow = (((tRp  - 1) << SDRAM_TRP_OFFS) |
+		    ((tRrd - 1) << SDRAM_TRRD_OFFS) |
+		    ((tRcd - 1) << SDRAM_TRCD_OFFS) |
+		    (((tRas - 1) << SDRAM_TRAS_OFFS) & SDRAM_TRAS_MASK)|
+		    ((tWr  - 1) << SDRAM_TWR_OFFS)  |
+		    ((tWtr - 1) << SDRAM_TWTR_OFFS)	|
+		    ((tRtp - 1) << SDRAM_TRTP_OFFS));
+
+	/* Check extended tRas bit */
+	if ((tRas - 1) & BIT4)
+	    timeCtrlLow |= (1 << SDRAM_EXT_TRAS_OFFS);
+
+	return timeCtrlLow;
+}
+
+/*******************************************************************************
+* sdramTimeCtrlHighRegCalc - Calculate sdram timing control high register
+*
+* DESCRIPTION:
+*       This function calculates sdram timing control high register
+*       optimized value based on the bank info parameters and the bus clock.
+*
+* INPUT:
+*	    pBankInfo - sdram bank parameters
+*       busClk    - Bus clock
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       sdram timing control high reg value.
+*
+*******************************************************************************/
+static MV_U32 sdramTimeCtrlHighRegCalc(MV_DRAM_BANK_INFO *pBankInfo, MV_U32 busClk)
+{
+	MV_U32 tRfc;
+	MV_U32 timingHigh;
+	MV_U32 timeNs = 0;
+	MV_U32 bankNum;
+
+	busClk = busClk / 1000000;    /* In MHz */
+
+	/* Set DDR timing high register static configuration bits */
+	timingHigh = MV_REG_READ(SDRAM_TIMING_CTRL_HIGH_REG);
+
+	/* Set DDR timing high register default value */
+	timingHigh |= SDRAM_TIMING_CTRL_HIGH_REG_DV;
+
+	/* Clear tRfc field */
+	timingHigh &= ~SDRAM_TRFC_MASK;
+
+	/* Scan all DRAM banks to find maximum timing values */
+	for (bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		timeNs = MV_MAX(timeNs,  pBankInfo[bankNum].minRefreshToActiveCmd);
+		DB(mvOsPrintf("Dram:  Timing High: minRefreshToActiveCmd = %d\n",
+				pBankInfo[bankNum].minRefreshToActiveCmd));
+	}
+	if(busClk >= 333 && mvCtrlModelGet() == MV_78XX0_A1_REV)
+    {
+        timingHigh |= 0x1 << SDRAM_TR2W_W2R_OFFS;
+    }
+
+	tRfc = ((busClk * timeNs)  / 1000) + (((busClk * timeNs)  % 1000) ? 1 : 0);
+	/* Note: value of 0 in register means one cycle, 1 means two and so on  */
+	DB(mvOsPrintf("Dram:  Timing High: tRfc = %d\n", tRfc));
+	timingHigh |= (((tRfc - 1) & SDRAM_TRFC_MASK) << SDRAM_TRFC_OFFS);
+	DB(mvOsPrintf("Dram:  Timing High: tRfc = %d\n", tRfc));
+
+	/* SDRAM timing high */
+	DB(mvOsPrintf("Dram: setting timing high with: %x \n", timingHigh));
+
+	return timingHigh;
+}
+/*******************************************************************************
+* sdramDDr2OdtConfig - Set DRAM DDR2 On Die Termination registers.
+*
+* DESCRIPTION:
+*       This function config DDR2 On Die Termination (ODT) registers.
+*
+* INPUT:
+*		pBankInfo - bank info parameters.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       None
+*******************************************************************************/
+static void sdramDDr2OdtConfig(MV_DRAM_BANK_INFO *pBankInfo)
+{
+	MV_U32 populateBanks = 0;
+	MV_U32 odtCtrlLow, odtCtrlHigh, dunitOdtCtrl;
+	int bankNum;
+
+	/* Represent the populate banks in binary form */
+	for(bankNum = 0; bankNum < MV_DRAM_MAX_CS; bankNum++)
+	{
+		if (0 != pBankInfo[bankNum].size)
+		{
+				populateBanks |= (1 << bankNum);
+			}
+		}
+
+	switch(populateBanks)
+	{
+		case(BANK_PRESENT_CS0):
+		case(BANK_PRESENT_CS0_CS1):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS1_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS1_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV;
+			break;
+		case(BANK_PRESENT_CS0_CS2):
+		case(BANK_PRESENT_CS0_CS1_CS2):
+		case(BANK_PRESENT_CS0_CS2_CS3):
+		case(BANK_PRESENT_CS0_CS2_CS3_CS4):
+			odtCtrlLow   = DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV;
+			odtCtrlHigh  = DDR2_ODT_CTRL_HIGH_CS0_CS1_CS2_CS3_DV;
+			dunitOdtCtrl = DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV;
+			break;
+		default:
+			DB(mvOsPrintf("sdramDDr2OdtConfig: Invalid DRAM bank presence\n"));
+			return;
+	}
+	/* DDR2 SDRAM ODT ctrl low  */
+	DB(mvOsPrintf("Dram: DDR2 setting ODT ctrl low with: %x \n", odtCtrlLow));
+	MV_REG_WRITE(DRAM_BUF_REG7, odtCtrlLow);
+
+	/* DDR2 SDRAM ODT ctrl high  */
+	DB(mvOsPrintf("Dram: DDR2 setting ODT ctrl high with: %x \n", odtCtrlHigh));
+	MV_REG_WRITE(DRAM_BUF_REG8, odtCtrlHigh);
+
+	/* DDR2 DUNIT ODT ctrl  */
+	if ( ((mvCtrlModelGet() == MV_78XX0_DEV_ID) && (mvCtrlRevGet() == MV_78XX0_Y0_REV)) ||
+		(mvCtrlModelGet() == MV_76100_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78100_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78200_DEV_ID) )
+		dunitOdtCtrl &= ~(BIT9|BIT8); /* Clear ODT always on */
+
+	DB(mvOsPrintf("DUNIT: DDR2 setting ODT ctrl with: %x \n", dunitOdtCtrl));
+	MV_REG_WRITE(DRAM_BUF_REG9, dunitOdtCtrl);
+	return;
+}
+/*******************************************************************************
+* sdramDdr2TimeLoRegCalc - Set DDR2 DRAM Timing Low registers.
+*
+* DESCRIPTION:
+*       This function config DDR2 DRAM Timing low registers.
+*
+* INPUT:
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       DDR2 sdram timing low reg value.
+*******************************************************************************/
+static MV_U32 sdramDdr2TimeLoRegCalc(MV_U32 minCas)
+{
+	MV_U8 cl = -1;
+	MV_U32 ddr2TimeLoReg;
+
+	/* read and clear the feilds we are going to set */
+	ddr2TimeLoReg = MV_REG_READ(SDRAM_DDR2_TIMING_LO_REG);
+	ddr2TimeLoReg &= ~(SD2TLR_TODT_ON_RD_MASK	|
+			   SD2TLR_TODT_OFF_RD_MASK	|
+			   SD2TLR_TODT_ON_CTRL_RD_MASK	|
+			   SD2TLR_TODT_OFF_CTRL_RD_MASK);
+
+	if( minCas == DDR2_CL_3 )
+	{
+		cl = 3;
+	}
+	else if( minCas == DDR2_CL_4 )
+	{
+		cl = 4;
+	}
+	else if( minCas == DDR2_CL_5 )
+	{
+		cl = 5;
+	}
+	else if( minCas == DDR2_CL_6 )
+	{
+		cl = 6;
+	}
+	else
+	{
+		DB(mvOsPrintf("sdramDdr2TimeLoRegCalc: CAS latency %d unsupported. using CAS latency 4\n",
+				minCas));
+		cl = 4;
+	}
+
+	ddr2TimeLoReg |= ((cl-3) << SD2TLR_TODT_ON_RD_OFFS);
+	ddr2TimeLoReg |= ( cl << SD2TLR_TODT_OFF_RD_OFFS);
+	ddr2TimeLoReg |= ( cl << SD2TLR_TODT_ON_CTRL_RD_OFFS);
+	ddr2TimeLoReg |= ((cl+3) << SD2TLR_TODT_OFF_CTRL_RD_OFFS);
+
+	/* DDR2 SDRAM timing low */
+	DB(mvOsPrintf("Dram: DDR2 setting timing low with: %x \n", ddr2TimeLoReg));
+
+	return ddr2TimeLoReg;
+}
+
+/*******************************************************************************
+* sdramDdr2TimeHiRegCalc - Set DDR2 DRAM Timing High registers.
+*
+* DESCRIPTION:
+*       This function config DDR2 DRAM Timing high registers.
+*
+* INPUT:
+*	minCas	  - minimum CAS supported.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       DDR2 sdram timing high reg value.
+*******************************************************************************/
+static MV_U32 sdramDdr2TimeHiRegCalc(MV_U32 minCas)
+{
+	MV_U8 cl = -1;
+	MV_U32 ddr2TimeHiReg;
+
+	/* read and clear the feilds we are going to set */
+	ddr2TimeHiReg = MV_REG_READ(SDRAM_DDR2_TIMING_HI_REG);
+	ddr2TimeHiReg &= ~(SD2THR_TODT_ON_WR_MASK	|
+			   SD2THR_TODT_OFF_WR_MASK	|
+			   SD2THR_TODT_ON_CTRL_WR_MASK	|
+			   SD2THR_TODT_OFF_CTRL_WR_MASK);
+
+	if( minCas == DDR2_CL_3 )
+	{
+		cl = 3;
+	}
+	else if( minCas == DDR2_CL_4 )
+	{
+		cl = 4;
+	}
+	else if( minCas == DDR2_CL_5 )
+	{
+		cl = 5;
+	}
+	else if( minCas == DDR2_CL_6 )
+	{
+		cl = 6;
+	}
+	else
+	{
+		mvOsOutput("sdramDdr2TimeHiRegCalc: CAS latency %d unsupported. using CAS latency 4\n",
+				minCas);
+		cl = 4;
+	}
+
+	ddr2TimeHiReg |= ((cl-3) << SD2THR_TODT_ON_WR_OFFS);
+	ddr2TimeHiReg |= ( cl << SD2THR_TODT_OFF_WR_OFFS);
+	ddr2TimeHiReg |= ( cl << SD2THR_TODT_ON_CTRL_WR_OFFS);
+	ddr2TimeHiReg |= ((cl+3) << SD2THR_TODT_OFF_CTRL_WR_OFFS);
+
+	/* DDR2 SDRAM timin high  */
+	DB(mvOsPrintf("Dram: DDR2 setting timing high with: %x \n", ddr2TimeHiReg));
+
+	return ddr2TimeHiReg;
+}
+#endif
+
+/*******************************************************************************
+* mvDramIfCalGet - Get CAS Latency
+*
+* DESCRIPTION:
+*       This function get the CAS Latency.
+*
+* INPUT:
+*       None
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       CAS latency times 10 (to avoid using floating point).
+*
+*******************************************************************************/
+MV_U32 mvDramIfCalGet(void)
+{
+	MV_U32 sdramCasLat, casLatMask;
+
+    casLatMask = (MV_REG_READ(SDRAM_MODE_REG) & SDRAM_CL_MASK);
+
+    switch (casLatMask)
+    {
+        case SDRAM_DDR2_CL_3:
+            sdramCasLat = 30;
+            break;
+        case SDRAM_DDR2_CL_4:
+            sdramCasLat = 40;
+            break;
+        case SDRAM_DDR2_CL_5:
+            sdramCasLat = 50;
+            break;
+        case SDRAM_DDR2_CL_6:
+            sdramCasLat = 60;
+            break;
+        default:
+            mvOsOutput("mvDramIfCalGet: Err, unknown DDR2 CAL\n");
+            return -1;
+    }
+
+    return sdramCasLat;
+}
+
+
+/*******************************************************************************
+* mvDramIfSelfRefreshSet - Put the dram in self refresh mode -
+*
+* DESCRIPTION:
+*               add support in power management.
+*
+*
+* INPUT:
+*       None
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       None
+*
+*******************************************************************************/
+
+MV_VOID mvDramIfSelfRefreshSet()
+{
+    MV_U32 operReg;
+
+      operReg =  MV_REG_READ(SDRAM_OPERATION_REG);
+      MV_REG_WRITE(SDRAM_OPERATION_REG ,operReg |SDRAM_CMD_SLF_RFRSH);
+      /* Read until register is reset to 0 */
+      while(MV_REG_READ(SDRAM_OPERATION_REG));
+}
+/*******************************************************************************
+* mvDramIfDimGetSPDversion - return DIMM SPD version.
+*
+* DESCRIPTION:
+*		This function prints the DRAM controller information.
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		None.
+*
+*******************************************************************************/
+static void mvDramIfDimGetSPDversion(MV_U32 *pMajor, MV_U32 *pMinor, MV_U32 bankNum)
+{
+	MV_DIMM_INFO dimmInfo;
+	if (bankNum >= MV_DRAM_MAX_CS )
+	{
+		DB(mvOsPrintf("Dram: mvDramIfDimGetSPDversion bad params \n"));
+		return ;
+	}
+	memset(&dimmInfo,0,sizeof(dimmInfo));
+	if ( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+	{
+		DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+		return ;
+	}
+	*pMajor = dimmInfo.spdRawData[DIMM_SPD_VERSION]/10;
+	*pMinor = dimmInfo.spdRawData[DIMM_SPD_VERSION]%10;
+}
+/*******************************************************************************
+* mvDramIfShow - Show DRAM controller information.
+*
+* DESCRIPTION:
+*		This function prints the DRAM controller information.
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*		None.
+*
+*******************************************************************************/
+void mvDramIfShow(void)
+{
+    int i, sdramCasLat, sdramCsSize;
+	MV_U32 Major=0, Minor=0;
+
+    mvOsOutput("DRAM Controller info:\n");
+
+    mvOsOutput("Total DRAM ");
+    mvSizePrint(mvDramIfSizeGet());
+    mvOsOutput("\n");
+
+	for(i = 0; i < MV_DRAM_MAX_CS; i++)
+	{
+        sdramCsSize = mvDramIfBankSizeGet(i);
+        if (sdramCsSize)
+        {
+			if (0 == (i & 1))
+			{
+				mvDramIfDimGetSPDversion(&Major, &Minor,i);
+				mvOsOutput("DIMM %d version %d.%d\n", i/2, Major, Minor);
+			}
+            mvOsOutput("\tDRAM CS[%d] ", i);
+            mvSizePrint(sdramCsSize);
+            mvOsOutput("\n");
+        }
+    }
+    sdramCasLat = mvDramIfCalGet();
+
+    if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_ECC_EN)
+    {
+        mvOsOutput("ECC enabled, ");
+    }
+    else
+    {
+        mvOsOutput("ECC Disabled, ");
+    }
+
+    if (MV_REG_READ(SDRAM_CONFIG_REG) & SDRAM_REGISTERED)
+    {
+        mvOsOutput("Registered DIMM\n");
+    }
+    else
+    {
+        mvOsOutput("Non registered DIMM\n");
+    }
+
+    mvOsOutput("Configured CAS Latency %d.%d\n", sdramCasLat/10, sdramCasLat%10);
+}
+/*******************************************************************************
+* mvDramIfGetFirstCS - find the  DRAM bank on the lower address
+*
+*
+* DESCRIPTION:
+*       This function return the fisrt CS on address 0
+*
+* INPUT:
+*		None.
+*
+* OUTPUT:
+*		None.
+*
+* RETURN:
+*       SDRAM_CS0 or SDRAM_CS2
+*
+*******************************************************************************/
+MV_U32 mvDramIfGetFirstCS(void)
+{
+	MV_DRAM_BANK_INFO bankInfo[MV_DRAM_MAX_CS];
+
+	if (DRAM_CS_Order[0] == N_A)
+	{
+		mvDramBankInfoGet(SDRAM_CS0, &bankInfo[SDRAM_CS0]);
+#ifdef MV_INCLUDE_SDRAM_CS2
+		mvDramBankInfoGet(SDRAM_CS2, &bankInfo[SDRAM_CS2]);
+#endif
+
+#ifdef MV_INCLUDE_SDRAM_CS2
+		if (bankInfo[SDRAM_CS0].size <  bankInfo[SDRAM_CS2].size)
+		{
+			DRAM_CS_Order[0] = SDRAM_CS2;
+			DRAM_CS_Order[1] = SDRAM_CS3;
+			DRAM_CS_Order[2] = SDRAM_CS0;
+			DRAM_CS_Order[3] = SDRAM_CS1;
+
+			return SDRAM_CS2;
+		}
+#endif
+		DRAM_CS_Order[0] = SDRAM_CS0;
+		DRAM_CS_Order[1] = SDRAM_CS1;
+#ifdef MV_INCLUDE_SDRAM_CS2
+		DRAM_CS_Order[2] = SDRAM_CS2;
+		DRAM_CS_Order[3] = SDRAM_CS3;
+#endif
+		return SDRAM_CS0;
+	}
+	return DRAM_CS_Order[0];
+}
+/*******************************************************************************
+* mvDramIfGetCSorder -
+*
+*
+* DESCRIPTION:
+*       This function return the fisrt CS on address 0
+*
+* INPUT:
+*		CS number.
+*
+* OUTPUT:
+*		CS order.
+*
+* RETURN:
+*       SDRAM_CS0 or SDRAM_CS2
+*
+* NOTE: mvDramIfGetFirstCS must be caled before this subroutine
+*******************************************************************************/
+MV_U32 mvDramIfGetCSorder(MV_U32 csOrder )
+{
+	return DRAM_CS_Order[csOrder];
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h
new file mode 100644
index 000000000000..3ceeae6f3b61
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIf.h
@@ -0,0 +1,172 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfh
+#define __INCmvDramIfh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+#include "ddr2/mvDramIfRegs.h"
+#include "ddr2/mvDramIfConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines  */
+/* DRAM Timing parameters */
+#define SDRAM_TWR                    15  /* ns tWr */
+#define SDRAM_TRFC_64_512M_AT_200MHZ 70  /* ns tRfc for dens 64-512 @ 200MHz */
+#define SDRAM_TRFC_64_512M           75  /* ns tRfc for dens 64-512          */
+#define SDRAM_TRFC_1G                120 /* ns tRfc for dens 1GB             */
+#define SDRAM_TR2R_CYC               1   /* cycle for tR2r                   */
+
+#define CAL_AUTO_DETECT     0   /* Do not force CAS latancy (mvDramIfDetect) */
+#define ECC_DISABLE         1   /* Force ECC to Disable                      */
+#define ECC_ENABLE          0   /* Force ECC to ENABLE                       */
+/* typedefs */
+
+/* enumeration for memory types */
+typedef enum _mvMemoryType
+{
+    MEM_TYPE_SDRAM,
+    MEM_TYPE_DDR1,
+    MEM_TYPE_DDR2
+}MV_MEMORY_TYPE;
+
+/* enumeration for DDR2 supported CAS Latencies */
+typedef enum _mvDimmDdr2Cas
+{
+    DDR2_CL_3    = 0x08,
+    DDR2_CL_4    = 0x10,
+    DDR2_CL_5    = 0x20,
+    DDR2_CL_6    = 0x40,
+    DDR2_CL_FAULT
+} MV_DIMM_DDR2_CAS;
+
+
+typedef struct _mvDramBankInfo
+{
+    MV_MEMORY_TYPE  memoryType; 	/* DDR1, DDR2 or SDRAM */
+
+    /* DIMM dimensions */
+    MV_U32  numOfRowAddr;
+    MV_U32  numOfColAddr;
+    MV_U32  dataWidth;
+    MV_U32  errorCheckType;             /* ECC , PARITY..*/
+    MV_U32  sdramWidth;                 /* 4,8,16 or 32 */
+    MV_U32  errorCheckDataWidth;        /* 0 - no, 1 - Yes */
+    MV_U32  burstLengthSupported;
+    MV_U32  numOfBanksOnEachDevice;
+    MV_U32  suportedCasLatencies;
+    MV_U32  refreshInterval;
+
+    /* DIMM timing parameters */
+    MV_U32  minCycleTimeAtMaxCasLatPs;
+    MV_U32  minCycleTimeAtMaxCasLatMinus1Ps;
+    MV_U32  minCycleTimeAtMaxCasLatMinus2Ps;
+    MV_U32  minRowPrechargeTime;
+    MV_U32  minRowActiveToRowActive;
+    MV_U32  minRasToCasDelay;
+    MV_U32  minRasPulseWidth;
+    MV_U32  minWriteRecoveryTime;   /* DDR2 only */
+    MV_U32  minWriteToReadCmdDelay; /* DDR2 only */
+    MV_U32  minReadToPrechCmdDelay; /* DDR2 only */
+    MV_U32  minRefreshToActiveCmd;  /* DDR2 only */
+
+    /* Parameters calculated from the extracted DIMM information */
+    MV_U32  size;
+    MV_U32  deviceDensity;           	/* 16,64,128,256 or 512 Mbit */
+    MV_U32  numberOfDevices;
+
+    /* DIMM attributes (MV_TRUE for yes) */
+    MV_BOOL registeredAddrAndControlInputs;
+    MV_BOOL registeredDQMBinputs;
+
+}MV_DRAM_BANK_INFO;
+
+#include "ddr2/spd/mvSpd.h"
+
+/* mvDramIf.h API list */
+MV_VOID   mvDramIfBasicAsmInit(MV_VOID);
+MV_STATUS mvDramIfDetect(MV_U32 forcedCl, MV_BOOL eccDisable);
+MV_VOID   _mvDramIfConfig(int entryNum);
+
+MV_U32 mvDramIfBankSizeGet(MV_U32 bankNum);
+MV_U32 mvDramIfBankBaseGet(MV_U32 bankNum);
+MV_U32 mvDramIfSizeGet(MV_VOID);
+MV_U32 mvDramIfCalGet(void);
+MV_STATUS mvDramIfSingleBitErrThresholdSet(MV_U32 threshold);
+MV_VOID mvDramIfSelfRefreshSet(void);
+void mvDramIfShow(void);
+MV_U32 mvDramIfGetFirstCS(void);
+MV_U32 mvDramIfGetCSorder(MV_U32 csOrder );
+MV_U32 mvDramCsSizeGet(MV_U32 csNum);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S
new file mode 100644
index 000000000000..43fb4eb19a6e
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfBasicInit.S
@@ -0,0 +1,986 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#define	_ASMLANGUAGE
+#define MV_ASMLANGUAGE
+#include "mvSysHwConfig.h"
+#include "mvOsAsm.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "mvDramIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "ctrlEnv/mvCtrlEnvAsm.h"
+#include "mvCommon.h"
+
+/* defines */
+
+#if defined(MV_STATIC_DRAM_ON_BOARD)
+.globl dramBoot1
+dramBoot1:
+        .word   0
+
+/******************************************************************************
+*
+*
+*
+*
+*******************************************************************************/
+#if defined(DB_MV78XX0) || defined(DB_MV88F632X)
+/* DDR2 boards 512MB 333MHz */
+#define STATIC_SDRAM0_BANK0_SIZE		0x1ffffff1 /*	0x1504	*/
+#define STATIC_SDRAM_CONFIG	     		0x43048C30 /*	0x1400  */
+#define STATIC_SDRAM_MODE	     		0x00000652 /*	0x141c  */
+#define STATIC_DUNIT_CTRL_LOW			0x38543000 /*   0x1404  */
+#define STATIC_DUNIT_CTRL_HI			0x0000FFFF /*   0x1424  */
+#define STATIC_SDRAM_ADDR_CTRL			0x00000088 /*   0x1410  */
+#define STATIC_SDRAM_TIME_CTRL_LOW		0x22125441 /*   0x1408  */
+#define STATIC_SDRAM_TIME_CTRL_HI		0x00000A29 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000E80F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000040 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00085520 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00008552 /*   0x147C  */
+
+#elif defined(RD_MV78XX0_AMC)
+/* On board DDR2 512MB 400MHz CL5 */
+#define STATIC_SDRAM0_BANK0_SIZE		0x1ffffff1 /*	0x1504	*/
+#define STATIC_SDRAM_CONFIG	     		0x43008C30 /*	0x1400  */
+#define STATIC_SDRAM_MODE	     		0x00000652 /*	0x141c  */
+#define STATIC_DUNIT_CTRL_LOW			0x38543000 /*   0x1404  */
+#define STATIC_DUNIT_CTRL_HI			0x0000F07F /*   0x1424  */
+#define STATIC_SDRAM_ADDR_CTRL			0x000000DD /*   0x1410  */
+#define STATIC_SDRAM_TIME_CTRL_LOW		0x23135441 /*   0x1408  */
+#define STATIC_SDRAM_TIME_CTRL_HI		0x00000A32 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000EB0F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000040 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00085520 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00008552 /*   0x147C  */
+
+#elif defined(RD_MV78XX0_H3C)
+/* DDR2 boards 512MB 333MHz */
+#define STATIC_SDRAM0_BANK0_SIZE		0x1ffffff1 /*	0x1504	*/
+#define STATIC_SDRAM_CONFIG	     		0x43048a25 /*	0x1400  */
+#define STATIC_SDRAM_MODE	     		0x00000652 /*	0x141c  */
+#define STATIC_DUNIT_CTRL_LOW			0x38543000 /*   0x1404  */
+#define STATIC_DUNIT_CTRL_HI			0x0000F07F /*   0x1424  */
+#define STATIC_SDRAM_ADDR_CTRL			0x00000088 /*   0x1410  */
+#define STATIC_SDRAM_TIME_CTRL_LOW		0x2202444e /*   0x1408  */
+#define STATIC_SDRAM_TIME_CTRL_HI		0x00000A22 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000EB0F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000040 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00085520 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00008552 /*   0x147C  */
+
+#elif defined(RD_MV78XX0_PCAC)
+/* DDR2 boards 256MB 200MHz */
+#define STATIC_SDRAM0_BANK0_SIZE		0x0ffffff1 /*	0x1504	*/
+#define STATIC_SDRAM_CONFIG	     		0x43000a25 /*	0x1400  */
+#define STATIC_SDRAM_MODE	     		0x00000652 /*	0x141c  */
+#define STATIC_DUNIT_CTRL_LOW			0x38543000 /*   0x1404  */
+#define STATIC_DUNIT_CTRL_HI			0x0000F07F /*   0x1424  */
+#define STATIC_SDRAM_ADDR_CTRL			0x000000DD /*   0x1410  */
+#define STATIC_SDRAM_TIME_CTRL_LOW		0x2202444e /*   0x1408  */
+#define STATIC_SDRAM_TIME_CTRL_HI		0x00000822 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000EB0F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000040 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00085520 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00008552 /*   0x147C  */
+
+#else
+/* DDR2 MV88F6281 boards 256MB 400MHz */
+#define STATIC_SDRAM0_BANK0_SIZE		0x0FFFFFF1 /*	0x1504	*/
+#define STATIC_SDRAM_CONFIG	     		0x43000c30 /*	0x1400  */
+#define STATIC_SDRAM_MODE	     		0x00000C52 /*	0x141c  */
+#define STATIC_DUNIT_CTRL_LOW			0x39543000 /*   0x1404  */
+#define STATIC_DUNIT_CTRL_HI			0x0000F1FF /*   0x1424  */
+#define STATIC_SDRAM_ADDR_CTRL			0x000000cc /*   0x1410  */
+#define STATIC_SDRAM_TIME_CTRL_LOW		0x22125451 /*   0x1408  */
+#define STATIC_SDRAM_TIME_CTRL_HI		0x00000A33 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x003C0000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000F80F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000042 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00085520 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00008552 /*   0x147C  */
+#endif /* MV78XX0 */
+
+	.globl _mvDramIfStaticInit
+_mvDramIfStaticInit:
+
+	mov     r11, LR     		/* Save link register */
+	mov	r10, r2
+
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+	orr	r6, r6, #BIT4	/* Enable 2T mode */
+	bic	r6, r6, #BIT6	/* clear ctrlPos */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+#endif
+
+	/*DDR SDRAM Initialization Control */
+	ldr	r6, =DSICR_INIT_EN
+	MV_REG_WRITE_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+2:	MV_REG_READ_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+         and    r6, r6, #DSICR_INIT_EN
+         cmp    r6, #0
+         bne 2b
+
+        /* If we boot from NAND jump to DRAM address */
+        mov     r5, #1
+        ldr     r6, =dramBoot1
+        str     r5, [r6]                /* We started executing from DRAM */
+
+        ldr     r6, dramBoot1
+        cmp     r6, #0
+        bne     1f
+
+	/* set all dram windows to 0 */
+	mov	r6, #0
+	MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,0))
+	MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,1))
+	MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,2))
+	MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,3))
+	ldr	r6, = STATIC_SDRAM0_BANK0_SIZE
+	MV_REG_WRITE_ASM(r6, r5, SDRAM_SIZE_REG(0,0))
+
+
+	/* set all dram configuration in temp registers */
+	ldr	r6, = STATIC_SDRAM0_BANK0_SIZE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG0)
+	ldr	r6, = STATIC_SDRAM_CONFIG
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG1)
+	ldr	r6, = STATIC_SDRAM_MODE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG2)
+	ldr	r6, = STATIC_DUNIT_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG3)
+	ldr	r6, = STATIC_SDRAM_ADDR_CTRL
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG4)
+	ldr	r6, = STATIC_SDRAM_TIME_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG5)
+	ldr	r6, = STATIC_SDRAM_TIME_CTRL_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+	ldr	r6, = STATIC_SDRAM_ODT_CTRL_LOW
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG7)
+	ldr	r6, = STATIC_SDRAM_ODT_CTRL_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG8)
+	ldr	r6, = STATIC_SDRAM_DUNIT_ODT_CTRL
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG9)
+	ldr	r6, = STATIC_SDRAM_EXT_MODE
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG10)
+	ldr	r6, = STATIC_SDRAM_DDR2_TIMING_LO
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG11)
+	ldr	r6, = STATIC_SDRAM_DDR2_TIMING_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG12)
+#ifndef MV_NAND_BOOT
+	ldr	r6, = STATIC_DUNIT_CTRL_HI
+	MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG13)
+#endif
+
+	ldr	sp,=0
+	bl	_mvDramIfConfig
+	ldr	r0, =0
+#ifdef MV78XX0
+	bl	_mvDramIfEccMemInit
+#endif
+1:
+	mov 	r2, r10
+	mov     PC, r11         	/* r11 is saved link register */
+
+#else  /* #if defined(MV_STATIC_DRAM_ON_BOARD) */
+
+.globl dramBoot1
+dramBoot1:
+        .word   0
+
+/*******************************************************************************
+* mvDramIfBasicInit - Basic initialization of DRAM interface
+*
+* DESCRIPTION:
+*       The function will initialize the DRAM for basic usage. The function
+*       will use the TWSI assembly API to extract DIMM parameters according
+*       to which DRAM interface will be initialized.
+*       The function referes to the following DRAM parameters:
+*       1) DIMM is registered or not.
+*       2) DIMM width detection.
+*       3) DIMM density.
+*
+* INPUT:
+*       r3 - required size for initial DRAM.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*       Note:
+*       r4 holds I2C EEPROM address
+*       r5 holds SDRAM register base address
+*	r7 holds returned values
+*       r8 holds SDRAM various configuration registers value.
+*       r11 holds return function address.
+*******************************************************************************/
+/* Setting the offsets of the I2C registers */
+#define DIMM_TYPE_OFFSET	      2
+#define NUM_OF_ROWS_OFFSET            3
+#define NUM_OF_COLS_OFFSET            4
+#define NUM_OF_RANKS		      5
+#define DIMM_CONFIG_TYPE             11
+#define SDRAM_WIDTH_OFFSET           13
+#define NUM_OF_BANKS_OFFSET          17
+#define SUPPORTED_CL_OFFSET          18
+#define DIMM_TYPE_INFO_OFFSET        20         /* DDR2 only    */
+#define SDRAM_MODULES_ATTR_OFFSET    21
+#define RANK_SIZE_OFFSET             31
+
+#define DRAM_DEV_DENSITY_128M         128
+#define DRAM_DEV_DENSITY_256M         256
+#define DRAM_DEV_DENSITY_512M         512
+#define DRAM_DEV_DENSITY_1G          1024
+#define DRAM_DEV_DENSITY_2G          2048
+
+#define DRAM_RANK_DENSITY_128M       0x20
+#define DRAM_RANK_DENSITY_256M       0x40
+#define DRAM_RANK_DENSITY_512M       0x80
+#define DRAM_RANK_DENSITY_1G	     0x1
+#define DRAM_RANK_DENSITY_2G	     0x2
+
+       .globl _mvDramIfBasicInit
+       .extern _i2cInit
+_mvDramIfBasicInit:
+
+        mov     r11, LR     		/* Save link register */
+
+	/* Set Dunit high control register	      */
+        MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+	orr	r6, r6, #BIT7 /* SDRAM__D2P_EN */
+	orr	r6, r6, #BIT8 /* SDRAM__P2D_EN */
+#ifdef MV78XX0
+	orr	r6, r6, #BIT9 /* SDRAM__ADD_HALF_FCC_EN */
+	orr	r6, r6, #BIT10 /* SDRAM__PUP_ZERO_SKEW_EN */
+	orr	r6, r6, #BIT11 /* SDRAM__WR_MASH_DELAY_EN */
+#endif
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+	orr	r6, r6, #BIT4	/* Enable 2T mode */
+	bic	r6, r6, #BIT6	/* clear ctrlPos */
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+#endif
+
+	/*DDR SDRAM Initialization Control */
+	ldr	r6, =DSICR_INIT_EN
+	MV_REG_WRITE_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+2:	MV_REG_READ_ASM (r6, r1, DDR_SDRAM_INIT_CTRL_REG)
+         and    r6, r6, #DSICR_INIT_EN
+         cmp    r6, #0
+         bne 2b
+
+        mov     r5, #1
+        ldr     r8, =dramBoot1
+        str     r5, [r8]                /* We started executing from DRAM */
+
+        /* If we boot from NAND jump to DRAM address */
+        ldr     r8, dramBoot1
+        cmp     r8, #0
+        movne   pc, r11
+
+        bl      _i2cInit                /* Initialize TWSI master             */
+
+        /* Check if we have more then 1 dimm */
+	ldr	r6, =0
+	MV_REG_WRITE_ASM (r6, r1, DRAM_BUF_REG14)
+#ifdef MV78XX0
+	bl _is_Second_Dimm_Exist
+	beq single_dimm
+	ldr	r6, =1
+	MV_REG_WRITE_ASM (r6, r1, DRAM_BUF_REG14)
+single_dimm:
+        bl      _i2cInit                /* Initialize TWSI master             */
+#endif
+
+        /* Get default SDRAM Config values */
+        MV_REG_READ_ASM (r8, r5, SDRAM_CONFIG_REG)
+
+        /* Get registered/non registered info from DIMM */
+	bl  	_is_Registered
+        beq     nonRegistered
+
+setRegistered:
+        orr     r8, r8, #SDRAM_REGISTERED   /* Set registered bit(17)         */
+nonRegistered:
+#ifdef MV78XX0
+        /* Get ECC/non ECC info from DIMM */
+	bl  	_is_Ecc
+        beq     setConfigReg
+
+setEcc:
+        orr     r8, r8, #SDRAM_ECC_EN   /* Set ecc bit(18)         */
+#endif
+setConfigReg:
+        MV_REG_WRITE_ASM (r8, r5, DRAM_BUF_REG1)
+
+        /* Set maximum CL supported by DIMM */
+	bl	_get_CAL
+
+        /* r7 is DIMM supported CAS (e.g: 3 --> 0x1C)                         */
+        clz     r6, r7
+        rsb     r6, r6, #31     /* r6 = the bit number of MAX CAS supported   */
+
+casDdr2:
+	ldr     r7, =0x41        /* stBurstInDel|stBurstOutDel field value     */
+	ldr     r3, =0x53       /* stBurstInDel|stBurstOutDel registered value*/
+	ldr     r8, =0x32      /* Assuming MAX CL = 3           */
+        cmp     r6, #3          /* If CL = 3 break              */
+        beq     casDdr2Cont
+
+	ldr     r7, =0x53        /* stBurstInDel|stBurstOutDel field value     */
+	ldr     r3, =0x65       /* stBurstInDel|stBurstOutDel registered value*/
+	ldr     r8, =0x42      /* Assuming MAX CL = 4           */
+        cmp     r6, #4          /* If CL = 4 break              */
+        beq     casDdr2Cont
+
+	ldr     r7, =0x65        /* stBurstInDel|stBurstOutDel field value     */
+	ldr     r3, =0x77       /* stBurstInDel|stBurstOutDel registered value*/
+	ldr     r8, =0x52      /* Assuming MAX CL = 5           */
+        cmp     r6, #5          /* If CL = 5 break              */
+        beq     casDdr2Cont
+
+	ldr     r7, =0x77        /* stBurstInDel|stBurstOutDel field value     */
+	ldr     r3, =0x89       /* stBurstInDel|stBurstOutDel registered value*/
+	ldr     r8, =0x62      /* Assuming MAX CL = 6           */
+        cmp     r6, #6          /* If CL = 5 break              */
+        beq     casDdr2Cont
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig      /* This is an error !!  */
+casDdr2Cont:
+
+        /* Get default SDRAM Mode values */
+        MV_REG_READ_ASM (r6, r5, SDRAM_MODE_REG)
+        bic     r6, r6, #(BIT6 | BIT5 | BIT4) /* Clear CL filed */
+	orr	r6, r6, r8
+        MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG2)
+
+	/* Set Dunit control register according to max CL detected	      */
+        MV_REG_READ_ASM (r6, r5, DRAM_BUF_REG1)
+	tst	r6, #SDRAM_REGISTERED
+	beq	setDunitReg
+	mov	r7, r3
+
+setDunitReg:
+#ifdef MV78XX0
+        /* Set SDRAM Extended Mode register for double DIMM */
+	/* Check DRAM frequency for more then 267MHz set ODT Rtt to 50ohm */
+
+        MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+	ldr	r5, =MSAR_SYSCLCK_MASK
+	and	r4, r4, r5
+	ldr	r5, =MSAR_SYSCLCK_333
+	cmp	r4, r5
+	ble	Clock333
+	add r7, r7, #0x10
+Clock333:
+#endif
+
+        MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_REG)
+	bic	r6, r6, #(0xff << 20) /* Clear SBout and SBin */
+	orr	r6, r6, #BIT4	/* Enable 2T mode */
+	bic	r6, r6, #BIT6	/* clear ctrlPos */
+	orr	r6, r6, r7, LSL #20
+        MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG3)
+
+	/* Set Dunit high control register	      */
+        MV_REG_READ_ASM (r6, r5, SDRAM_DUNIT_CTRL_HI_REG)
+	orr	r6, r6, #BIT7 /* SDRAM__D2P_EN */
+	orr	r6, r6, #BIT8 /* SDRAM__P2D_EN */
+#ifdef MV78XX0
+	orr	r6, r6, #BIT9 /* SDRAM__ADD_HALF_FCC_EN */
+	orr	r6, r6, #BIT10 /* SDRAM__PUP_ZERO_SKEW_EN */
+	orr	r6, r6, #BIT11 /* SDRAM__WR_MASH_DELAY_EN */
+#endif
+        MV_REG_WRITE_ASM (r6, r5, DRAM_BUF_REG13)
+
+        /* DIMM density configuration*/
+        /* Density = (1 << (rowNum + colNum)) * dramWidth * dramBankNum       */
+Density:
+	/* Get bank 0 and 1 density */
+	ldr	r6, =0
+	bl 	_getDensity
+
+	mov 	r8, r7
+        mov     r8, r8, LSR #20 /* Move density 20 bits to the right  */
+                                /* For example 0x10000000 --> 0x1000 */
+
+        mov     r3, #(SDRAM_DSIZE_256Mb(0) | SDRAM_DSIZE_256Mb(1))
+        cmp     r8, #DRAM_DEV_DENSITY_256M
+        beq     get_bank_2_density
+
+        mov     r3, #(SDRAM_DSIZE_512Mb(0) | SDRAM_DSIZE_512Mb(1))
+        cmp     r8, #DRAM_DEV_DENSITY_512M
+        beq     get_bank_2_density
+
+        mov     r3, #(SDRAM_DSIZE_1Gb(0) | SDRAM_DSIZE_1Gb(1))
+        cmp     r8, #DRAM_DEV_DENSITY_1G
+        beq     get_bank_2_density
+
+        mov     r3, #(SDRAM_DSIZE_2Gb(0) | SDRAM_DSIZE_2Gb(1))
+        cmp     r8, #DRAM_DEV_DENSITY_2G
+        beq     get_bank_2_density
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+get_bank_2_density:
+	/* Check for second dimm */
+	MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+	cmp	r6, #1
+	bne 	get_width
+
+	/* Get bank 2 and 3 density */
+	ldr	r6, =2
+	bl 	_getDensity
+
+	mov 	r8, r7
+        mov     r8, r8, LSR #20 /* Move density 20 bits to the right  */
+                                /* For example 0x10000000 --> 0x1000 */
+
+        orr     r3, r3, #(SDRAM_DSIZE_256Mb(2) | SDRAM_DSIZE_256Mb(3))
+        cmp     r8, #DRAM_DEV_DENSITY_256M
+        beq     get_width
+
+        and     r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+        orr     r3, r3, #(SDRAM_DSIZE_512Mb(2) | SDRAM_DSIZE_512Mb(3))
+        cmp     r8, #DRAM_DEV_DENSITY_512M
+        beq     get_width
+
+        and     r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+        orr     r3, r3, #(SDRAM_DSIZE_1Gb(2) | SDRAM_DSIZE_1Gb(3))
+        cmp     r8, #DRAM_DEV_DENSITY_1G
+        beq     get_width
+
+        and     r3, r3, #~(SDRAM_DSIZE_MASK(2) | SDRAM_DSIZE_MASK(3))
+        orr     r3, r3, #(SDRAM_DSIZE_2Gb(2) | SDRAM_DSIZE_2Gb(3))
+        cmp     r8, #DRAM_DEV_DENSITY_2G
+        beq     get_width
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+	/* Get SDRAM width */
+get_width:
+	/* Get bank 0 and 1 width */
+	ldr	r6, =0
+	bl 	_get_width
+
+        cmp     r7, #8           /* x8 devices   */
+        beq     get_bank_2_width
+
+        orr     r3, r3, #(SDRAM_ADDRSEL_X16(0) | SDRAM_ADDRSEL_X16(1)) /* x16 devices  */
+        cmp     r7, #16
+        beq     get_bank_2_width
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+get_bank_2_width:
+	/* Check for second dimm */
+	MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+	cmp	r6, #1
+	bne 	densCont
+
+	/* Get bank 2 and 3 width */
+	ldr	r6, =2
+	bl 	_get_width
+
+        cmp     r7, #8           /* x8 devices   */
+        beq     densCont
+
+        orr     r3, r3, #(SDRAM_ADDRSEL_X16(2) | SDRAM_ADDRSEL_X16(3)) /* x16 devices  */
+        cmp     r7, #16
+        beq     densCont
+
+        /* This is an error. return */
+        b       exit_ddrAutoConfig
+
+densCont:
+        MV_REG_WRITE_ASM (r3, r5, DRAM_BUF_REG4)
+
+        /* Set SDRAM timing control low register */
+	ldr	r4, =SDRAM_TIMING_CTRL_LOW_REG_DEFAULT
+        /* MV_REG_READ_ASM (r4, r5, SDRAM_TIMING_CTRL_LOW_REG) */
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG5)
+
+        /* Set SDRAM timing control high register */
+	ldr	r6, =SDRAM_TIMING_CTRL_HIGH_REG_DEFAULT
+
+    MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+	ldr	r5, =MSAR_SYSCLCK_MASK
+	and	r4, r4, r5
+	ldr	r5, =MSAR_SYSCLCK_333
+	cmp	r4, r5
+	blt	timingHighClock333
+    orr r6, r6, #BIT9
+
+timingHighClock333:
+    /* MV_REG_READ_ASM (r6, r5, SDRAM_TIMING_CTRL_HIGH_REG) */
+    MV_REG_WRITE_ASM(r6, r5, DRAM_BUF_REG6)
+
+	/* Check for second dimm */
+	MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+	cmp	r6, #1
+	bne 	single_dimm_odt
+
+        /* Set SDRAM ODT control low register for double DIMM*/
+        ldr	r4, =DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG7)
+
+        /* Set DUNIT ODT control register for double DIMM */
+        ldr	r4, =DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG9)
+
+#ifdef MV78XX0
+        /* Set SDRAM Extended Mode register for double DIMM */
+	/* Check DRAM frequency for more then 267MHz set ODT Rtt to 50ohm */
+
+        MV_REG_READ_ASM (r4, r5, CPU_RESET_SAMPLE_L_REG)
+	ldr	r5, =MSAR_SYSCLCK_MASK
+	and	r4, r4, r5
+	ldr	r5, =MSAR_SYSCLCK_267
+	cmp	r4, r5
+	beq	slow_dram_clock_rtt
+	ldr	r5, =MSAR_SYSCLCK_300
+	cmp	r4, r5
+	beq	slow_dram_clock_rtt
+	ldr	r5, =MSAR_SYSCLCK_333
+	cmp	r4, r5
+	beq	fast_dram_clock_rtt
+	ldr	r5, =MSAR_SYSCLCK_400
+	cmp	r4, r5
+	beq	fast_dram_clock_rtt
+
+	b	slow_dram_clock_rtt
+
+fast_dram_clock_rtt:
+        ldr	r4, =DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+	b odt_config_end
+#endif
+slow_dram_clock_rtt:
+        ldr	r4, =DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+	b odt_config_end
+
+single_dimm_odt:
+        /* Set SDRAM ODT control low register */
+        ldr	r4, =DDR2_ODT_CTRL_LOW_CS0_CS1_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG7)
+
+        /* Set DUNIT ODT control register */
+        ldr	r4, =DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG9)
+
+        /* Set SDRAM Extended Mode register */
+        ldr	r4, =DDR_SDRAM_EXT_MODE_CS0_CS1_DV
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG10)
+
+odt_config_end:
+        /* SDRAM ODT control high register is left as default */
+        MV_REG_READ_ASM (r4, r5, DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG8)
+
+        /*Read CL and set the DDR2 registers accordingly */
+        MV_REG_READ_ASM (r6, r5, DRAM_BUF_REG2)
+        and r6, r6, #SDRAM_CL_MASK
+        mov r4, r6
+        orr r4, r4, r6, LSL #4
+        orr r4, r4, r6, LSL #8
+        orr r4, r4, r6, LSL #12
+        mov r5, #0x30000
+        add r4, r4, r5
+        sub r4, r4, #0x30
+        /* Set SDRAM Ddr2 Timing Low register */
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG11)
+
+        /* Set SDRAM Ddr2 Timing High register */
+        mov r4, r4, LSR #4
+        MV_REG_WRITE_ASM(r4, r5, DRAM_BUF_REG12)
+
+timeParamDone:
+	/* Close all windows */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+        and	r6, r6,#~SCSR_SIZE_MASK
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+        and	r6, r6,#~SCSR_SIZE_MASK
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+        and	r6, r6,#~SCSR_SIZE_MASK
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+        and	r6, r6,#~SCSR_SIZE_MASK
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+
+        /* Set sdram bank 0 size and enable it */
+	ldr	r6, =0
+	bl _mvDramIfGetDimmSizeFromSpd
+#ifdef MV78XX0
+	/* Check DRAM width */
+        MV_REG_READ_ASM (r4, r5, SDRAM_CONFIG_REG)
+	ldr	r5, =SDRAM_DWIDTH_MASK
+	and	r4, r4, r5
+	ldr	r5, =SDRAM_DWIDTH_64BIT
+	cmp	r4, r5
+	beq	dram_64bit_width
+	/* Utilize only 32bit width */
+	mov	r8, r8, LSR #1
+#else
+	/* Utilize only 16bit width */
+	mov	r8, r8, LSR #2
+#endif
+dram_64bit_width:
+	/* Update first dimm size return value R8 */
+        MV_REG_READ_ASM (r5, r6, SDRAM_SIZE_REG(0,0))
+        ldr	r6, =~SCSR_SIZE_MASK
+	and	r5, r5, r6
+	orr	r5, r5, r8
+        MV_REG_WRITE_ASM(r5, r8, SDRAM_SIZE_REG(0,0))
+
+	/* Clear bank 2 size */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+        and	r6, r6,#~SCSR_SIZE_MASK
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+
+	/* Check for second dimm */
+	MV_REG_READ_ASM (r6, r1, DRAM_BUF_REG14)
+	cmp	r6, #1
+	bne 	defualt_order
+
+        /* Set sdram bank 2 size */
+	ldr	r6, =2
+	bl _mvDramIfGetDimmSizeFromSpd
+#ifdef MV78XX0
+	/* Check DRAM width */
+        MV_REG_READ_ASM (r4, r5, SDRAM_CONFIG_REG)
+	ldr	r5, =SDRAM_DWIDTH_MASK
+	and	r4, r4, r5
+	ldr	r5, =SDRAM_DWIDTH_64BIT
+	cmp	r4, r5
+	beq	dram_64bit_width2
+	/* Utilize only 32bit width */
+	mov	r8, r8, LSR #1
+#else
+	/* Utilize only 16bit width */
+	mov	r8, r8, LSR #2
+#endif
+dram_64bit_width2:
+	/* Update first dimm size return value R8 */
+        MV_REG_READ_ASM (r5, r6, SDRAM_SIZE_REG(0,2))
+        ldr	r6, =~SCSR_SIZE_MASK
+	and	r5, r5, r6
+	orr	r5, r5, r8
+        MV_REG_WRITE_ASM(r5, r8, SDRAM_SIZE_REG(0,2))
+
+	/* Close windows 1 and 3 */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,1))
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+        and	r6, r6,#~1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,3))
+
+	/* Check dimm size for setting dram bank order */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+        MV_REG_READ_ASM (r4, r5, SDRAM_SIZE_REG(0,2))
+        and	r6, r6,#SCSR_SIZE_MASK
+        and	r4, r4,#SCSR_SIZE_MASK
+	cmp	r6, r4
+	bge	defualt_order
+
+	/* Bank 2 is biger then bank 0 */
+	ldr	r6,=0
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_BASE_ADDR_REG(0,2))
+
+	/* Open win 2 */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+        orr	r6, r6,#1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,2))
+
+	ldr	sp,=0
+	bl	_mvDramIfConfig
+#ifdef MV78XX0
+	/* Init ECC on CS 2 */
+	ldr	r0, =2
+	bl	_mvDramIfEccMemInit
+#endif
+        mov     PC, r11         /* r11 is saved link register */
+
+defualt_order:
+
+	/* Open win 0 */
+        MV_REG_READ_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+        orr	r6, r6,#1
+        MV_REG_WRITE_ASM (r6, r5, SDRAM_SIZE_REG(0,0))
+
+	ldr	sp,=0
+	bl	_mvDramIfConfig
+#ifdef MV78XX0
+	/* Init ECC on CS 0 */
+	ldr	r0, =0
+	bl	_mvDramIfEccMemInit
+#endif
+exit_ddrAutoConfig:
+        mov     PC, r11         /* r11 is saved link register */
+
+
+/***************************************************************************************/
+/*       r4 holds I2C EEPROM address
+ *       r7 holds I2C EEPROM offset parameter for i2cRead and its --> returned value
+ *       r8 holds SDRAM various configuration registers value.
+ *	r13 holds Link register
+ */
+/**************************/
+_getDensity:
+	mov     r13, LR                            /* Save link register */
+
+	/* Read SPD rank size from DIMM0 */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR       /* reading from DIMM0      */
+
+	cmp	r6, #0
+	beq	1f
+
+	/* Read SPD rank size from DIMM1 */
+        mov     r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1            */
+
+1:
+        mov     r7, #NUM_OF_ROWS_OFFSET            /* offset  3               */
+        bl      _i2cRead
+        mov     r8, r7                             /* r8 save number of rows  */
+
+        mov     r7, #NUM_OF_COLS_OFFSET            /* offset  4               */
+        bl      _i2cRead
+        add     r8, r8, r7                         /* r8 = number of rows + number of col */
+
+        mov     r7, #0x1
+        mov     r8, r7, LSL r8                     /* r8 = (1 << r8)          */
+
+        mov     r7, #SDRAM_WIDTH_OFFSET            /* offset 13 */
+        bl      _i2cRead
+        mul     r8, r7, r8
+
+        mov     r7, #NUM_OF_BANKS_OFFSET           /* offset 17               */
+        bl      _i2cRead
+        mul     r7, r8, r7
+
+	mov     PC, r13
+
+/**************************/
+_get_width:
+	mov     r13, LR                 /* Save link register */
+
+	/* Read SPD rank size from DIMM0 */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+
+	cmp	r6, #0
+	beq	1f
+
+	/* Read SPD rank size from DIMM1 */
+        mov     r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1            */
+
+1:
+        /* Get SDRAM width (SPD offset 13) */
+        mov     r7, #SDRAM_WIDTH_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+
+	mov     PC, r13
+
+/**************************/
+_get_CAL:
+	mov     r13, LR                 /* Save link register */
+
+        /* Set maximum CL supported by DIMM */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #SUPPORTED_CL_OFFSET     /* offset  18 */
+        bl      _i2cRead
+
+	mov     PC, r13
+
+/**************************/
+/* R8 - sdram configuration register.
+ * Return value in flag if no-registered then Z-flag is set
+ */
+_is_Registered:
+	mov     r13, LR                 /* Save link register */
+#if defined(MV645xx)
+        /* Get registered/non registered info from DIMM */
+        tst     r8, #SDRAM_DTYPE_DDR2
+        bne     regDdr2
+
+regDdr1:
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #SDRAM_MODULES_ATTR_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+
+        tst     r7, #0x2
+	b	exit
+#endif
+regDdr2:
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #DIMM_TYPE_INFO_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+
+        tst     r7, #0x11               /* DIMM type = regular RDIMM (0x01)   */
+                                        /* or Mini-RDIMM (0x10)               */
+exit:
+        mov     PC, r13
+
+
+/**************************/
+/* Return value in flag if no-Ecc then Z-flag is set */
+_is_Ecc:
+	mov     r13, LR                 /* Save link register */
+
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #DIMM_CONFIG_TYPE
+        bl      _i2cRead                /* result in r7                       */
+
+        tst     r7, #0x2               /* bit 1 -> Data ECC */
+        mov     PC, r13
+
+/**************************/
+/* Return value in flag if no second DIMM then Z-flag is set */
+_is_Second_Dimm_Exist:
+	mov     r13, LR                 /* Save link register */
+
+        mov     r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM0            */
+        mov     r7, #DIMM_TYPE_OFFSET
+        bl      _i2cRead                /* result in r7                       */
+
+	tst     r7, #0x8               /* bit3 is '1' -> DDR 2 */
+        mov     PC, r13
+
+/*******************************************************************************
+* _mvDramIfGetDimmSizeFromSpd  - read bank 0 dram's size
+*
+* DESCRIPTION:
+*       The function will read the bank 0 dram size(SPD version 1.0 and above )
+*
+* INPUT:
+*       r6 - dram bank number.
+*
+* OUTPUT:
+*	none
+*/
+_mvDramIfGetDimmSizeFromSpd:
+
+	mov     r13, LR                 /* Save link register */
+
+	/* Read SPD rank size from DIMM0 */
+        mov     r4, #MV_BOARD_DIMM0_I2C_ADDR /* reading from DIMM0            */
+
+	cmp	r6, #0
+	beq	1f
+
+	/* Read SPD rank size from DIMM1 */
+        mov     r4, #MV_BOARD_DIMM1_I2C_ADDR /* reading from DIMM1            */
+
+1:
+        mov     r7, #RANK_SIZE_OFFSET	/* offset  31 */
+        bl      _i2cRead
+
+pass_read:
+	ldr     r8, =(0x7 << SCSR_SIZE_OFFS)
+        cmp	r7, #DRAM_RANK_DENSITY_128M
+        beq     endDimmSize
+
+	ldr     r8, =(0xf << SCSR_SIZE_OFFS)
+        cmp	r7, #DRAM_RANK_DENSITY_256M
+        beq     endDimmSize
+
+        ldr     r8, =(0x1f << SCSR_SIZE_OFFS)
+        cmp	r7, #DRAM_RANK_DENSITY_512M
+        beq     endDimmSize
+
+        ldr     r8, =(0x3f << SCSR_SIZE_OFFS)
+        cmp	r7, #DRAM_RANK_DENSITY_1G
+        beq     endDimmSize
+
+        ldr     r8, =(0x7f  << SCSR_SIZE_OFFS)     /* DRAM_RANK_DENSITY_2G */
+endDimmSize:
+        mov     PC, r13
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S
new file mode 100644
index 000000000000..751916d71508
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.S
@@ -0,0 +1,527 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvDramIfBasicAsm.s
+*
+* DESCRIPTION:
+*       Memory full detection and best timing configuration is done in
+*       C code. C runtime environment requires a stack. This module API
+*       initialize DRAM interface chip select 0 for basic functionality for
+*       the use of stack.
+*       The module API assumes DRAM information is stored in I2C EEPROM reside
+*       in a given I2C address MV_BOARD_DIMM0_I2C_ADDR. The I2C EEPROM
+*       internal data structure is assumed to be orgenzied in common DRAM
+*       vendor SPD structure.
+*       NOTE: DFCDL values are assumed to be already initialized prior to
+*       this module API activity.
+*
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/* includes */
+#define	_ASMLANGUAGE
+#define MV_ASMLANGUAGE
+#include "mvOsAsm.h"
+#include "mvSysHwConfig.h"
+#include "mvDramIfRegs.h"
+#include "mvDramIfConfig.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "mvCommon.h"
+
+/* defines  */
+
+/* locals   */
+.data
+.globl _mvDramIfConfig
+.text
+.globl _mvDramIfMemInit
+
+/*******************************************************************************
+* _mvDramIfConfig - Basic DRAM interface initialization.
+*
+* DESCRIPTION:
+*       The function will initialize the following DRAM parameters using the
+*       values prepared by mvDramIfDetect routine. Values are located
+*       in predefined registers.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+
+_mvDramIfConfig:
+
+        /* Save register on stack */
+	cmp	sp, #0
+	beq	no_stack_s
+save_on_stack:
+        stmdb	sp!, {r1, r2, r3, r4}
+no_stack_s:
+
+	/* Dunit FTDLL Configuration Register */
+	/* 0) Write to SDRAM FTDLL coniguration register */
+        ldr     r4, = SDRAM_FTDLL_REG_DEFAULT_LEFT;
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_LEFT_REG)
+        str     r4, [r1]
+        ldr     r4, = SDRAM_FTDLL_REG_DEFAULT_RIGHT;
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_RIGHT_REG)
+        str     r4, [r1]
+        ldr     r4, = SDRAM_FTDLL_REG_DEFAULT_UP;
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_FTDLL_CONFIG_UP_REG)
+        str     r4, [r1]
+
+	/* 1) Write to SDRAM coniguration register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG1)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_CONFIG_REG)
+        str     r4, [r1]
+
+	/* 2) Write Dunit control low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG3)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_REG)
+        str     r4, [r1]
+
+	/* 2) Write Dunit control high register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG13)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_DUNIT_CTRL_HI_REG)
+        str     r4, [r1]
+
+        /* 3) Write SDRAM address control register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG4)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_ADDR_CTRL_REG)
+        str     r4, [r1]
+#if defined(MV_STATIC_DRAM_ON_BOARD)
+        /* 4) Write SDRAM bank 0 size register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG0)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_SIZE_REG(0,0))
+        str     r4, [r1]
+#endif
+
+        /* 5) Write SDRAM open pages control register */
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_OPEN_PAGE_CTRL_REG)
+        ldr     r4, =SDRAM_OPEN_PAGES_CTRL_REG_DV
+        str     r4, [r1]
+
+        /* 6) Write SDRAM timing Low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG5)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_LOW_REG)
+        str     r4, [r1]
+
+        /* 7) Write SDRAM timing High register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG6)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_TIMING_CTRL_HIGH_REG)
+        str     r4, [r1]
+
+        /* Config DDR2 On Die Termination (ODT) registers */
+        /* Write SDRAM DDR2 ODT control low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG7)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_LOW_REG)
+        str     r4, [r1]
+
+        /* Write SDRAM DDR2 ODT control high register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG8)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_SDRAM_ODT_CTRL_HIGH_REG)
+        str     r4, [r1]
+
+        /* Write SDRAM DDR2 Dunit ODT control register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG9)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + DDR2_DUNIT_ODT_CONTROL_REG)
+        str     r4, [r1]
+
+        /* Write DDR2 SDRAM timing Low register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG11)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_DDR2_TIMING_LO_REG)
+        str     r4, [r1]
+
+        /* Write DDR2 SDRAM timing High register */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG12)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_DDR2_TIMING_HI_REG)
+        str     r4, [r1]
+
+        /* 8) Write SDRAM mode register */
+        /* The CPU must not attempt to change the SDRAM Mode register setting */
+        /* prior to DRAM controller completion of the DRAM initialization     */
+        /* sequence. To guarantee this restriction, it is recommended that    */
+        /* the CPU sets the SDRAM Operation register to NOP command, performs */
+        /* read polling until the register is back in Normal operation value, */
+        /* and then sets SDRAM Mode register to its new value.               */
+
+	/* 8.1 write 'nop' to SDRAM operation */
+        mov     r4, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM(r4, r1, SDRAM_OPERATION_REG)
+
+        /* 8.2 poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll1:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll1
+
+        /* 8.3 Now its safe to write new value to SDRAM Mode register         */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG2)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_MODE_REG)
+        str     r4, [r1]
+
+        /* 8.4 Make the Dunit write the DRAM its new mode                     */
+        mov     r4, #0x3                 /* Mode Register Set command  */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* 8.5 poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll2:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll2
+
+        /* Now its safe to write new value to SDRAM Extended Mode regist */
+        ldr     r1, =(INTER_REGS_BASE + DRAM_BUF_REG10)
+        ldr     r4, [r1]
+        ldr     r1, =(INTER_REGS_BASE + SDRAM_EXTENDED_MODE_REG)
+        str     r4, [r1]
+
+        /* 9) Write SDRAM Extended mode register This operation should be     */
+        /*    done for each memory bank                                       */
+        /* write 'nop' to SDRAM operation */
+        mov     r4, #0x5                 /* 'NOP' command              */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll3:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll3
+        /* Go over each of the Banks */
+        ldr     r3, =0          /* r3 = DRAM bank Num */
+
+extModeLoop:
+        /* Set the SDRAM Operation Control to each of the DRAM banks          */
+        mov     r4, r3   /* Do not swap the bank counter value */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_CTRL_REG)
+
+        /* Make the Dunit write the DRAM its new mode                     */
+        mov     r4, #0x4        /* Extended Mode Register Set command  */
+        MV_REG_WRITE_ASM (r4, r1, SDRAM_OPERATION_REG)
+
+        /* poll SDRAM operation. Make sure its back to normal operation   */
+_sdramOpPoll4:
+        ldr     r4, [r1]
+        cmp     r4, #0                          /* '0' = Normal SDRAM Mode    */
+        bne     _sdramOpPoll4
+
+        add     r3, r3, #1
+        cmp     r3, #4         /* 4 = Number of banks */
+        bne     extModeLoop
+
+extModeEnd:
+cmp	sp, #0
+	beq	no_stack_l
+	mov     r1, LR                        	/* Save link register */
+#if defined(MV78XX0)
+	bl   	_mvDramIfMemInit
+#endif
+	mov	LR,r1				/* restore link register */
+load_from_stack:
+	/* Restore registers */
+        ldmia	sp!, {r1, r2, r3, r4}
+no_stack_l:
+
+        mov     pc, lr
+
+
+/*******************************************************************************
+* _mvDramIfEccMemInit - Basic DRAM ECC initialization.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+#define XOR_CHAN0         0   /* XOR channel 0 used for memory initialization */
+#define XOR_UNIT0         0   /* XOR unit 0 used for memory initialization */
+#define XOR_ADDR_DEC_WIN0 0   /* Enable DRAM access using XOR decode window 0 */
+/* XOR engine register offsets macros */
+#define XOR_CONFIG_REG(chan)                (XOR_UNIT_BASE(0) + 0x10 + ((chan)    * 4))
+#define XOR_ACTIVATION_REG(chan)            (XOR_UNIT_BASE(0) + 0x20 + ((chan)    * 4))
+#define XOR_CAUSE_REG			            (XOR_UNIT_BASE(0) + 0x30)
+#define XOR_ERROR_CAUSE_REG                 (XOR_UNIT_BASE(0) + 0x50)
+#define XOR_ERROR_ADDR_REG                  (XOR_UNIT_BASE(0) + 0x60)
+#define XOR_INIT_VAL_LOW_REG                (XOR_UNIT_BASE(0) + 0x2E0)
+#define XOR_INIT_VAL_HIGH_REG               (XOR_UNIT_BASE(0) + 0x2E4)
+#define XOR_DST_PTR_REG(chan)               (XOR_UNIT_BASE(0) + 0x2B0 + ((chan)    * 4))
+#define XOR_BLOCK_SIZE_REG(chan)            (XOR_UNIT_BASE(0) + 0x2C0 + ((chan)    * 4))
+
+/* XOR Engine Address Decoding Register Map */
+#define XOR_WINDOW_CTRL_REG(unit,chan)     (XOR_UNIT_BASE(unit)+(0x240 + ((chan) * 4)))
+#define XOR_BASE_ADDR_REG(unit,winNum)     (XOR_UNIT_BASE(unit)+(0x250 + ((winNum) * 4)))
+#define XOR_SIZE_MASK_REG(unit,winNum)     (XOR_UNIT_BASE(unit)+(0x270 + ((winNum) * 4)))
+
+.globl _mvDramIfEccMemInit
+/*******************************************************************************
+* _mvDramIfEccMemInit  - mem init for dram cs
+*
+* DESCRIPTION:
+*       This function will clean the cs by ussing the XOR mem init.
+*
+* INPUT:
+*       r0 - dram bank number.
+*
+* OUTPUT:
+*	none
+*/
+_mvDramIfEccMemInit:
+
+        /* Save register on stack */
+	cmp	sp, #0
+	beq	no_stack_s1
+save_on_stack1:
+        stmdb	sp!, {r0,r1, r2, r3, r4, r5, r6}
+no_stack_s1:
+
+	ldr	r1, = 0
+
+        /* Disable all XOR address decode windows to avoid possible overlap */
+        MV_REG_WRITE_ASM (r1, r5, (XOR_WINDOW_CTRL_REG(XOR_UNIT0,XOR_CHAN0)))
+
+        /* Init r5 to first XOR_SIZE_MASK_REG */
+		mov		r5, r0, LSL #3
+        add     r5, r5,#0x1500
+        add     r5, r5,#0x04
+        add     r5, r5,#(INTER_REGS_BASE)
+        ldr     r6, [r5]
+        HTOLL(r6,r5)
+        MV_REG_WRITE_ASM (r6, r5, XOR_SIZE_MASK_REG(XOR_UNIT0,XOR_ADDR_DEC_WIN0))
+
+	mov		r5, r0, LSL #3
+        add     r5, r5,#0x1500
+        add     r5, r5,#(INTER_REGS_BASE)
+        ldr     r6, [r5]
+        /* Update destination & size */
+        MV_REG_WRITE_ASM(r6, r5, XOR_DST_PTR_REG(XOR_CHAN0))
+        HTOLL(r6,r5)
+        /* Init r6 to first XOR_BASE_ADDR_REG */
+	ldr	r4, = 0xf
+	ldr	r5, = 0x1
+	mov	r5, r5, LSL r0
+	bic	r4, r4, r5
+	mov 	r4, r4, LSL #8
+
+        orr	r6, r6, r4
+        MV_REG_WRITE_ASM (r6, r5, XOR_BASE_ADDR_REG(XOR_UNIT0,XOR_ADDR_DEC_WIN0))
+
+	ldr	r6, = 0xff0001
+        MV_REG_WRITE_ASM (r6, r5, XOR_WINDOW_CTRL_REG(XOR_UNIT0,XOR_CHAN0))
+
+        /* Configure XOR engine for memory init function.           */
+        MV_REG_READ_ASM (r6, r5, XOR_CONFIG_REG(XOR_CHAN0))
+        and	r6, r6, #~0x7        	/* Clear operation mode field      */
+        orr     r6, r6, #0x4             /* Set operation to memory init    */
+        MV_REG_WRITE_ASM(r6, r5, XOR_CONFIG_REG(XOR_CHAN0))
+
+        /* Set initVal in the XOR Engine Initial Value Registers       */
+	ldr	r6, = 0xfeedfeed
+        MV_REG_WRITE_ASM(r6, r5, XOR_INIT_VAL_LOW_REG)
+	ldr	r6, = 0xfeedfeed
+        MV_REG_WRITE_ASM(r6, r5, XOR_INIT_VAL_HIGH_REG)
+
+        /* Set block size using DRAM bank size  */
+
+	mov	r5, r0, LSL #3
+        add     r5, r5,#0x1500
+        add     r5, r5,#0x04
+        add     r5, r5,#(INTER_REGS_BASE)
+
+        ldr     r6, [r5]
+        HTOLL(r6,r5)
+	and	r6, r6, #SCSR_SIZE_MASK
+	mov	r5, r6, LSR #SCSR_SIZE_OFFS
+        add	r5, r5, #1
+	mov	r6, r5, LSL #SCSR_SIZE_OFFS
+        MV_REG_WRITE_ASM(r6, r5, XOR_BLOCK_SIZE_REG(XOR_CHAN0))
+
+        /* Clean interrupt cause*/
+        MV_REG_WRITE_ASM(r1, r5, XOR_CAUSE_REG)
+
+        /* Clean error interrupt cause*/
+        MV_REG_READ_ASM(r6, r5, XOR_ERROR_CAUSE_REG)
+        MV_REG_READ_ASM(r6, r5, XOR_ERROR_ADDR_REG)
+
+        /* Start transfer */
+        MV_REG_READ_ASM (r6, r5, XOR_ACTIVATION_REG(XOR_CHAN0))
+        orr     r6, r6, #0x1 /* Preform start command      */
+        MV_REG_WRITE_ASM(r6, r5, XOR_ACTIVATION_REG(XOR_CHAN0))
+
+        /* Wait for engine to finish */
+waitForComplete:
+        MV_REG_READ_ASM(r6, r5, XOR_CAUSE_REG)
+        and   	r6, r6, #2
+	cmp	r6, #0
+        beq     waitForComplete
+
+        /* Clear all error report registers */
+        MV_REG_WRITE_ASM(r1, r5, SDRAM_SINGLE_BIT_ERR_CNTR_REG)
+        MV_REG_WRITE_ASM(r1, r5, SDRAM_DOUBLE_BIT_ERR_CNTR_REG)
+
+        MV_REG_WRITE_ASM(r1, r5, SDRAM_ERROR_CAUSE_REG)
+
+	cmp	sp, #0
+	beq	no_stack_l1
+load_from_stack1:
+        ldmia	sp!, {r0, r1, r2, r3, r4, r5, r6}
+no_stack_l1:
+        mov     pc, lr
+
+
+/*******************************************************************************
+* mvDramIfMemInit - Use XOR to clear all memory.
+*
+* DESCRIPTION:
+*       Use assembler function _mvDramIfEccMemInit to fill all memory with FEADFEAD pattern.
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+#if defined(MV78XX0)
+
+_mvDramIfMemInit:
+        stmdb	sp!, {r0,r1, r2, r3, r4, r5, r6}
+	mov     r6, LR                 /* Save link register */
+	/* Check if dram bank 0 has to be init for ECC */
+	MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,0))
+	and 	r3, r0, #SCSR_WIN_EN
+        cmp     r3, #0
+	beq   	no_bank_0
+	MV_REG_READ_ASM(r0, r5,  SDRAM_BASE_ADDR_REG(0,0))
+        cmp     r0, #0
+	beq   	no_bank_0
+	mov	r0,#0
+	bl	_mvDramIfEccMemInit
+
+no_bank_0:
+	/* Check if dram bank 1 has to be init for ECC */
+        MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,1))
+	and 	r0, r0, #SCSR_WIN_EN
+        cmp     r0, #0
+	beq   	no_bank_1
+	mov	r0,#1
+	bl	_mvDramIfEccMemInit
+no_bank_1:
+	/* Check if dram bank 2 has to be init for ECC */
+	MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,2))
+	and 	r0, r0, #SCSR_WIN_EN
+        cmp     r0, #0
+	beq   	no_bank_2
+	MV_REG_READ_ASM(r0, r5,  SDRAM_BASE_ADDR_REG(0,2))
+        cmp     r0, #0
+	beq   	no_bank_2
+	mov	r0,#2
+	bl	_mvDramIfEccMemInit
+
+no_bank_2:
+	/* Check if dram bank 3 has to be init for ECC */
+	MV_REG_READ_ASM (r0, r5, SDRAM_SIZE_REG(0,3))
+	and 	r0, r0, #SCSR_WIN_EN
+	cmp     r0, #0
+	beq   	no_bank_3
+	mov	r0,#3
+	bl	_mvDramIfEccMemInit
+no_bank_3:
+	mov     LR ,r6                /* restore link register */
+	ldmia	sp!, {r0, r1, r2, r3, r4, r5, r6}
+	mov     pc, lr
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h
new file mode 100644
index 000000000000..ccfd49197773
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfConfig.h
@@ -0,0 +1,157 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfConfigh
+#define __INCmvDramIfConfigh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* includes */
+
+/* defines  */
+
+/* registers defaults values */
+
+#define SDRAM_CONFIG_DV 	(SDRAM_SRMODE_DRAM | BIT25 | BIT30)
+
+#define SDRAM_DUNIT_CTRL_LOW_DDR2_DV			\
+		(SDRAM_SRCLK_KEPT		|	\
+		 SDRAM_CLK1DRV_NORMAL		|	\
+		 (BIT28 | BIT29))
+
+#define SDRAM_ADDR_CTRL_DV	    2
+
+#define SDRAM_TIMING_CTRL_LOW_REG_DV 	\
+		((0x2 << SDRAM_TRCD_OFFS) | 	\
+		 (0x2 << SDRAM_TRP_OFFS)  | 	\
+		 (0x1 << SDRAM_TWR_OFFS)  | 	\
+		 (0x0 << SDRAM_TWTR_OFFS) | 	\
+		 (0x5 << SDRAM_TRAS_OFFS) | 	\
+		 (0x1 << SDRAM_TRRD_OFFS))
+
+/* Note: value of 0 in register means one cycle, 1 means two and so on  */
+#define SDRAM_TIMING_CTRL_HIGH_REG_DV 	\
+		((0x0 << SDRAM_TR2R_OFFS)	|	\
+		 (0x0 << SDRAM_TR2W_W2R_OFFS)	|	\
+		 (0x1 << SDRAM_TW2W_OFFS))
+
+#define SDRAM_OPEN_PAGES_CTRL_REG_DV 	SDRAM_OPEN_PAGE_EN
+
+/* Presence	     Ctrl Low    Ctrl High  Dunit Ctrl   Ext Mode     */
+/* CS0              0x84210000  0x00000000  0x0000780F  0x00000440    */
+/* CS0+CS1          0x84210000  0x00000000  0x0000780F  0x00000440    */
+/* CS0+CS2          0x030C030C  0x00000000  0x0000740F  0x00000404    */
+/* CS0+CS1+CS2      0x030C030C  0x00000000  0x0000740F  0x00000404    */
+/* CS0+CS2+CS3      0x030C030C  0x00000000  0x0000740F  0x00000404    */
+/* CS0+CS1+CS2+CS3  0x030C030C  0x00000000  0x0000740F  0x00000404    */
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS1_DV		0x84210000
+#define DDR2_ODT_CTRL_HIGH_CS0_CS1_DV		0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS1_DV		0x0000E80F
+#ifdef MV78XX0
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_DV		0x00000040
+#else
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_DV		0x00000440
+#endif
+
+#define DDR2_ODT_CTRL_LOW_CS0_CS1_CS2_CS3_DV	0x030C030C
+#define DDR2_ODT_CTRL_HIGH_CS0_CS1_CS2_CS3_DV	0x00000000
+#define DDR2_DUNIT_ODT_CTRL_CS0_CS1_CS2_CS3_DV	0x0000F40F
+#ifdef MV78XX0
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV	0x00000004
+#define DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV	0x00000044
+#else
+#define DDR_SDRAM_EXT_MODE_CS0_CS1_CS2_CS3_DV	0x00000404
+#define DDR_SDRAM_EXT_MODE_FAST_CS0_CS1_CS2_CS3_DV	0x00000444
+#endif
+
+/* DDR SDRAM Adderss/Control and Data Pads Calibration default values */
+#define DDR2_ADDR_CTRL_PAD_STRENGTH_TYPICAL_DV	\
+		(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+#define DDR2_DATA_PAD_STRENGTH_TYPICAL_DV		\
+		(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+
+/* DDR SDRAM Mode Register default value */
+#define DDR2_MODE_REG_DV		(SDRAM_BURST_LEN_4 | SDRAM_WR_3_CYC)
+/* DDR SDRAM Timing parameter default values */
+#define SDRAM_TIMING_CTRL_LOW_REG_DEFAULT  	0x33136552
+#define SDRAM_TRFC_DEFAULT_VALUE		0x34
+#define SDRAM_TRFC_DEFAULT		SDRAM_TRFC_DEFAULT_VALUE
+#define SDRAM_TW2W_DEFALT		(0x1 << SDRAM_TW2W_OFFS)
+
+#define SDRAM_TIMING_CTRL_HIGH_REG_DEFAULT  (SDRAM_TRFC_DEFAULT | SDRAM_TW2W_DEFALT)
+
+#define SDRAM_FTDLL_REG_DEFAULT_LEFT  		0x88C800
+#define SDRAM_FTDLL_REG_DEFAULT_RIGHT  		0x88C800
+#define SDRAM_FTDLL_REG_DEFAULT_UP  		0x88C800
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h
new file mode 100644
index 000000000000..58175f6d0a27
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfRegs.h
@@ -0,0 +1,423 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDramIfRegsh
+#define __INCmvDramIfRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* DDR SDRAM Controller Address Decode Registers */
+ /* SDRAM CSn Base Address Register (SCBAR) */
+#define SDRAM_BASE_ADDR_REG(cpu,csNum)	(0x1500 + ((csNum) * 8) + ((cpu) * 0x70))
+#define SCBAR_BASE_OFFS			16
+#define SCBAR_BASE_MASK			(0xffff << SCBAR_BASE_OFFS)
+#define SCBAR_BASE_ALIGNMENT		0x10000
+
+/* SDRAM CSn Size Register (SCSR) */
+#define SDRAM_SIZE_REG(cpu,csNum)	(0x1504 + ((csNum) * 8) + ((cpu) * 0x70))
+#define SCSR_SIZE_OFFS			24
+#define SCSR_SIZE_MASK			(0xff << SCSR_SIZE_OFFS)
+#define SCSR_SIZE_ALIGNMENT		0x1000000
+#define SCSR_WIN_EN			BIT0
+
+/* configuration register */
+#define SDRAM_CONFIG_REG   		(DRAM_BASE + 0x1400)
+#define SDRAM_REFRESH_OFFS 		0
+#define SDRAM_REFRESH_MAX  		0x3FFF
+#define SDRAM_REFRESH_MASK 		(SDRAM_REFRESH_MAX << SDRAM_REFRESH_OFFS)
+#define SDRAM_DWIDTH_OFFS       	15
+#define SDRAM_DWIDTH_MASK       	(1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_32BIT      	(0 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_DWIDTH_64BIT      	(1 << SDRAM_DWIDTH_OFFS)
+#define SDRAM_REGISTERED   		(1 << 17)
+#define SDRAM_ECC_OFFS    		18
+#define SDRAM_ECC_MASK    		(1 << SDRAM_ECC_OFFS)
+#define SDRAM_ECC_DIS     		(0 << SDRAM_ECC_OFFS)
+#define SDRAM_ECC_EN        		(1 << SDRAM_ECC_OFFS)
+#define SDRAM_IERR_OFFS    		19
+#define SDRAM_IERR_MASK    		(1 << SDRAM_IERR_OFFS)
+#define SDRAM_IERR_REPORTE     		(0 << SDRAM_IERR_OFFS)
+#define SDRAM_IERR_IGNORE      		(1 << SDRAM_IERR_OFFS)
+#define SDRAM_SRMODE_OFFS       	24
+#define SDRAM_SRMODE_MASK       	(1 << SDRAM_SRMODE_OFFS)
+#define SDRAM_SRMODE_POWER      	(0 << SDRAM_SRMODE_OFFS)
+#define SDRAM_SRMODE_DRAM       	(1 << SDRAM_SRMODE_OFFS)
+
+/* dunit control low register */
+#define SDRAM_DUNIT_CTRL_REG  		(DRAM_BASE + 0x1404)
+#define SDRAM_2T_OFFS			4
+#define SDRAM_2T_MASK			(1 << SDRAM_2T_OFFS)
+#define SDRAM_2T_MODE			(1 << SDRAM_2T_OFFS)
+
+#define SDRAM_SRCLK_OFFS		5
+#define SDRAM_SRCLK_MASK		(1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_KEPT		(0 << SDRAM_SRCLK_OFFS)
+#define SDRAM_SRCLK_GATED		(1 << SDRAM_SRCLK_OFFS)
+#define SDRAM_CTRL_POS_OFFS	   	6
+#define SDRAM_CTRL_POS_MASK		(1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_FALL	   	(0 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CTRL_POS_RISE	   	(1 << SDRAM_CTRL_POS_OFFS)
+#define SDRAM_CLK1DRV_OFFS      	12
+#define SDRAM_CLK1DRV_MASK      	(1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_HIGH_Z    	(0 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK1DRV_NORMAL    	(1 << SDRAM_CLK1DRV_OFFS)
+#define SDRAM_CLK2DRV_OFFS      	13
+#define SDRAM_CLK2DRV_MASK      	(1 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_CLK2DRV_HIGH_Z    	(0 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_CLK2DRV_NORMAL    	(1 << SDRAM_CLK2DRV_OFFS)
+#define SDRAM_SB_OUT_DEL_OFFS 		20
+#define SDRAM_SB_OUT_DEL_MAX 		0xf
+#define SDRAM_SB_OUT_MASK 		(SDRAM_SB_OUT_DEL_MAX<<SDRAM_SB_OUT_DEL_OFFS)
+#define SDRAM_SB_IN_DEL_OFFS 		24
+#define SDRAM_SB_IN_DEL_MAX 		0xf
+#define SDRAM_SB_IN_MASK 		(SDRAM_SB_IN_DEL_MAX<<SDRAM_SB_IN_DEL_OFFS)
+
+/* dunit control hight register */
+#define SDRAM_DUNIT_CTRL_HI_REG  	(DRAM_BASE + 0x1424)
+#define SDRAM__D2P_OFFS			7
+#define SDRAM__D2P_EN			(1 << SDRAM__D2P_OFFS)
+#define SDRAM__P2D_OFFS			8
+#define SDRAM__P2D_EN			(1 << SDRAM__P2D_OFFS)
+#define SDRAM__ADD_HALF_FCC_OFFS	9
+#define SDRAM__ADD_HALF_FCC_EN		(1 << SDRAM__ADD_HALF_FCC_OFFS)
+#define SDRAM__PUP_ZERO_SKEW_OFFS	10
+#define SDRAM__PUP_ZERO_SKEW_EN		(1 << SDRAM__PUP_ZERO_SKEW_OFFS)
+#define SDRAM__WR_MESH_DELAY_OFFS	11
+#define SDRAM__WR_MESH_DELAY_EN		(1 << SDRAM__WR_MESH_DELAY_OFFS)
+
+/* sdram timing control low register */
+#define SDRAM_TIMING_CTRL_LOW_REG	(DRAM_BASE + 0x1408)
+#define SDRAM_TRCD_OFFS 		4
+#define SDRAM_TRCD_MASK 		(0xF << SDRAM_TRCD_OFFS)
+#define SDRAM_TRP_OFFS 			8
+#define SDRAM_TRP_MASK 			(0xF << SDRAM_TRP_OFFS)
+#define SDRAM_TWR_OFFS 			12
+#define SDRAM_TWR_MASK 			(0xF << SDRAM_TWR_OFFS)
+#define SDRAM_TWTR_OFFS 		16
+#define SDRAM_TWTR_MASK 		(0xF << SDRAM_TWTR_OFFS)
+#define SDRAM_TRAS_OFFS 		0
+#define SDRAM_TRAS_MASK 		(0xF << SDRAM_TRAS_OFFS)
+#define SDRAM_EXT_TRAS_OFFS 		20
+#define SDRAM_EXT_TRAS_MASK 		(0x1 << SDRAM_EXT_TRAS_OFFS)
+#define SDRAM_TRRD_OFFS 		24
+#define SDRAM_TRRD_MASK 		(0xF << SDRAM_TRRD_OFFS)
+#define SDRAM_TRTP_OFFS			28
+#define SDRAM_TRTP_MASK			(0xF << SDRAM_TRTP_OFFS)
+#define SDRAM_TRTP_DDR1 		(0x1 << SDRAM_TRTP_OFFS)
+
+/* sdram timing control high register */
+#define SDRAM_TIMING_CTRL_HIGH_REG	(DRAM_BASE + 0x140c)
+#define SDRAM_TRFC_OFFS 		0
+#define SDRAM_TRFC_MASK 		(0x3F << SDRAM_TRFC_OFFS)
+#define SDRAM_TR2R_OFFS 		7
+#define SDRAM_TR2R_MASK 		(0x3 << SDRAM_TR2R_OFFS)
+#define SDRAM_TR2W_W2R_OFFS		9
+#define SDRAM_TR2W_W2R_MASK		(0x3 << SDRAM_TR2W_W2R_OFFS)
+#define SDRAM_TW2W_OFFS			11
+#define SDRAM_TW2W_MASK			(0x3 << SDRAM_TW2W_OFFS)
+
+/* sdram DDR2 timing low register (SD2TLR) */
+#define SDRAM_DDR2_TIMING_LO_REG	(DRAM_BASE + 0x1428)
+#define SD2TLR_TODT_ON_RD_OFFS		4
+#define SD2TLR_TODT_ON_RD_MASK		(0xF << SD2TLR_TODT_ON_RD_OFFS)
+#define SD2TLR_TODT_OFF_RD_OFFS		8
+#define SD2TLR_TODT_OFF_RD_MASK		(0xF << SD2TLR_TODT_OFF_RD_OFFS)
+#define SD2TLR_TODT_ON_CTRL_RD_OFFS	12
+#define SD2TLR_TODT_ON_CTRL_RD_MASK	(0xF << SD2TLR_TODT_ON_CTRL_RD_OFFS)
+#define SD2TLR_TODT_OFF_CTRL_RD_OFFS	16
+#define SD2TLR_TODT_OFF_CTRL_RD_MASK	(0xF << SD2TLR_TODT_OFF_CTRL_RD_OFFS)
+
+/* sdram DDR2 timing high register (SD2TLR) */
+#define SDRAM_DDR2_TIMING_HI_REG	(DRAM_BASE + 0x147C)
+#define SD2THR_TODT_ON_WR_OFFS		0
+#define SD2THR_TODT_ON_WR_MASK		(0xF << SD2THR_TODT_ON_WR_OFFS)
+#define SD2THR_TODT_OFF_WR_OFFS		4
+#define SD2THR_TODT_OFF_WR_MASK		(0xF << SD2THR_TODT_OFF_WR_OFFS)
+#define SD2THR_TODT_ON_CTRL_WR_OFFS	8
+#define SD2THR_TODT_ON_CTRL_WR_MASK	(0xF << SD2THR_TODT_ON_CTRL_WR_OFFS)
+#define SD2THR_TODT_OFF_CTRL_WR_OFFS	12
+#define SD2THR_TODT_OFF_CTRL_WR_MASK	(0xF << SD2THR_TODT_OFF_CTRL_WR_OFFS)
+
+/* address control register */
+#define SDRAM_ADDR_CTRL_REG		(DRAM_BASE + 0x1410)
+#define SDRAM_ADDRSEL_OFFS(cs)		(4 * (cs))
+#define SDRAM_ADDRSEL_MASK(cs)		(0x3 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_ADDRSEL_X8(cs)		(0x0 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_ADDRSEL_X16(cs)		(0x1 << SDRAM_ADDRSEL_OFFS(cs))
+#define SDRAM_DSIZE_OFFS(cs)   	    	(2 + 4 * (cs))
+#define SDRAM_DSIZE_MASK(cs)   	    	(0x3 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_256Mb(cs) 	    	(0x1 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_512Mb(cs)  	    	(0x2 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_1Gb(cs)  	    	(0x3 << SDRAM_DSIZE_OFFS(cs))
+#define SDRAM_DSIZE_2Gb(cs)  	    	(0x0 << SDRAM_DSIZE_OFFS(cs))
+
+/* SDRAM Open Pages Control registers */
+#define SDRAM_OPEN_PAGE_CTRL_REG	(DRAM_BASE + 0x1414)
+#define SDRAM_OPEN_PAGE_EN			(0 << 0)
+#define SDRAM_OPEN_PAGE_DIS			(1 << 0)
+
+/* sdram opertion register */
+#define SDRAM_OPERATION_REG 		(DRAM_BASE + 0x1418)
+#define SDRAM_CMD_OFFS  			0
+#define SDRAM_CMD_MASK   			(0xF << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NORMAL 			(0x0 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_PRECHARGE_ALL 	(0x1 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REFRESH_ALL 		(0x2 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_REG_SET_CMD 		(0x3 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EXT_MODE_SET 		(0x4 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_NOP 				(0x5 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_SLF_RFRSH 		(0x7 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS2_CMD  		(0x8 << SDRAM_CMD_OFFS)
+#define SDRAM_CMD_EMRS3_CMD  		(0x9 << SDRAM_CMD_OFFS)
+
+/* sdram mode register */
+#define SDRAM_MODE_REG 				(DRAM_BASE + 0x141c)
+#define SDRAM_BURST_LEN_OFFS 		0
+#define SDRAM_BURST_LEN_MASK 		(0x7 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_BURST_LEN_4    		(0x2 << SDRAM_BURST_LEN_OFFS)
+#define SDRAM_CL_OFFS   			4
+#define SDRAM_CL_MASK   			(0x7 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_3      		(0x3 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_4      		(0x4 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_5    		    (0x5 << SDRAM_CL_OFFS)
+#define SDRAM_DDR2_CL_6    		    (0x6 << SDRAM_CL_OFFS)
+
+#define SDRAM_TM_OFFS           	7
+#define SDRAM_TM_MASK           	(1 << SDRAM_TM_OFFS)
+#define SDRAM_TM_NORMAL         	(0 << SDRAM_TM_OFFS)
+#define SDRAM_TM_TEST_MODE      	(1 << SDRAM_TM_OFFS)
+#define SDRAM_DLL_OFFS         		8
+#define SDRAM_DLL_MASK          	(1 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_NORMAL        	(0 << SDRAM_DLL_OFFS)
+#define SDRAM_DLL_RESET 			(1 << SDRAM_DLL_OFFS)
+#define SDRAM_WR_OFFS				9
+#define SDRAM_WR_MAX				7
+#define SDRAM_WR_MASK				(SDRAM_WR_MAX << SDRAM_WR_OFFS)
+#define SDRAM_WR_2_CYC				(1 << SDRAM_WR_OFFS)
+#define SDRAM_WR_3_CYC				(2 << SDRAM_WR_OFFS)
+#define SDRAM_WR_4_CYC				(3 << SDRAM_WR_OFFS)
+#define SDRAM_WR_5_CYC				(4 << SDRAM_WR_OFFS)
+#define SDRAM_WR_6_CYC				(5 << SDRAM_WR_OFFS)
+#define SDRAM_PD_OFFS				12
+#define SDRAM_PD_MASK				(1 << SDRAM_PD_OFFS)
+#define SDRAM_PD_FAST_EXIT			(0 << SDRAM_PD_OFFS)
+#define SDRAM_PD_SLOW_EXIT			(1 << SDRAM_PD_OFFS)
+
+/* DDR SDRAM Extended Mode register (DSEMR) */
+#define SDRAM_EXTENDED_MODE_REG		(DRAM_BASE + 0x1420)
+#define DSEMR_DLL_ENABLE			0
+#define DSEMR_DLL_DISABLE			1
+#define DSEMR_DS_OFFS				1
+#define DSEMR_DS_MASK				(1 << DSEMR_DS_OFFS)
+#define DSEMR_DS_NORMAL				(0 << DSEMR_DS_OFFS)
+#define DSEMR_DS_REDUCED			(1 << DSEMR_DS_OFFS)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN	(0 << 12)
+#define DSEMR_RTT0_OFFS				2
+#define DSEMR_RTT1_OFFS				6
+#define DSEMR_RTT_ODT_DISABLE		((0 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_75_OHM		((1 << DSEMR_RTT0_OFFS)||(0 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_150_OHM		((0 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_RTT_ODT_50_OHM		((1 << DSEMR_RTT0_OFFS)||(1 << DSEMR_RTT1_OFFS))
+#define DSEMR_DQS_OFFS				10
+#define DSEMR_DQS_MASK				(1 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_DIFFERENTIAL		(0 << DSEMR_DQS_OFFS)
+#define DSEMR_DQS_SINGLE_ENDED		(1 << DSEMR_DQS_OFFS)
+#define DSEMR_RDQS_ENABLE			(1 << 11)
+#define DSEMR_QOFF_OUTPUT_BUFF_EN	(0 << 12)
+#define DSEMR_QOFF_OUTPUT_BUFF_DIS	(1 << 12)
+
+/* DDR SDRAM Operation Control Register */
+#define SDRAM_OPERATION_CTRL_REG	(DRAM_BASE + 0x142c)
+
+/* Dunit FTDLL Configuration Register */
+#define SDRAM_FTDLL_CONFIG_LEFT_REG		(DRAM_BASE + 0x1484)
+#define SDRAM_FTDLL_CONFIG_RIGHT_REG		(DRAM_BASE + 0x161C)
+#define SDRAM_FTDLL_CONFIG_UP_REG		(DRAM_BASE + 0x1620)
+
+/* Pads Calibration register */
+#define SDRAM_ADDR_CTRL_PADS_CAL_REG	(DRAM_BASE + 0x14c0)
+#define SDRAM_DATA_PADS_CAL_REG		    (DRAM_BASE + 0x14c4)
+#define SDRAM_DRVN_OFFS 			0
+#define SDRAM_DRVN_MASK 			(0x3F << SDRAM_DRVN_OFFS)
+#define SDRAM_DRVP_OFFS 			6
+#define SDRAM_DRVP_MASK 			(0x3F << SDRAM_DRVP_OFFS)
+#define SDRAM_PRE_DRIVER_STRENGTH_OFFS		12
+#define SDRAM_PRE_DRIVER_STRENGTH_MASK		(3 << SDRAM_PRE_DRIVER_STRENGTH_OFFS)
+#define SDRAM_TUNE_EN   		BIT16
+#define SDRAM_LOCKN_OFFS 			17
+#define SDRAM_LOCKN_MAKS 			(0x3F << SDRAM_LOCKN_OFFS)
+#define SDRAM_LOCKP_OFFS 			23
+#define SDRAM_LOCKP_MAKS 			(0x3F << SDRAM_LOCKP_OFFS)
+#define SDRAM_WR_EN     			(1 << 31)
+
+/* DDR2 SDRAM ODT Control (Low) Register (DSOCLR) */
+#define DDR2_SDRAM_ODT_CTRL_LOW_REG (DRAM_BASE + 0x1494)
+#define DSOCLR_ODT_RD_OFFS(odtNum)  (odtNum * 4)
+#define DSOCLR_ODT_RD_MASK(odtNum)  (0xf << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_RD(odtNum, bank) ((1 << bank) << DSOCLR_ODT_RD_OFFS(odtNum))
+#define DSOCLR_ODT_WR_OFFS(odtNum)  (16 + (odtNum * 4))
+#define DSOCLR_ODT_WR_MASK(odtNum)  (0xf << DSOCLR_ODT_WR_OFFS(odtNum))
+#define DSOCLR_ODT_WR(odtNum, bank) ((1 << bank) << DSOCLR_ODT_WR_OFFS(odtNum))
+
+/* DDR2 SDRAM ODT Control (High) Register (DSOCHR) */
+#define DDR2_SDRAM_ODT_CTRL_HIGH_REG    	(DRAM_BASE + 0x1498)
+/* Optional control values to DSOCHR_ODT_EN macro */
+#define DDR2_ODT_CTRL_DUNIT         0
+#define DDR2_ODT_CTRL_NEVER         1
+#define DDR2_ODT_CTRL_ALWAYS        3
+#define DSOCHR_ODT_EN_OFFS(odtNum)  (odtNum * 2)
+#define DSOCHR_ODT_EN_MASK(odtNum)  (0x3 << DSOCHR_ODT_EN_OFFS(odtNum))
+#define DSOCHR_ODT_EN(odtNum, ctrl) (ctrl << DSOCHR_ODT_EN_OFFS(odtNum))
+
+/* DDR2 Dunit ODT Control Register (DDOCR)*/
+#define DDR2_DUNIT_ODT_CONTROL_REG  (DRAM_BASE + 0x149c)
+#define DDOCR_ODT_RD_OFFS          	0
+#define DDOCR_ODT_RD_MASK           (0xf << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_RD(bank)          ((1 << bank) << DDOCR_ODT_RD_OFFS)
+#define DDOCR_ODT_WR_OFFS           4
+#define DDOCR_ODT_WR_MASK           (0xf << DDOCR_ODT_WR_OFFS)
+#define DDOCR_ODT_WR(bank)          ((1 << bank) << DDOCR_ODT_WR_OFFS)
+#define DSOCR_ODT_EN_OFFS           8
+#define DSOCR_ODT_EN_MASK           (0x3 << DSOCR_ODT_EN_OFFS)
+/* For ctrl parameters see DDR2 SDRAM ODT Control (High) Register (0x1498) above. */
+#define DSOCR_ODT_EN(ctrl)         	(ctrl << DSOCR_ODT_EN_OFFS)
+#define DSOCR_ODT_SEL_DISABLE	    0
+#define DSOCR_ODT_SEL_75_OHM	    2
+#define DSOCR_ODT_SEL_150_OHM	    1
+#define DSOCR_ODT_SEL_50_OHM        3
+#define DSOCR_DQ_ODT_SEL_OFFS       10
+#define DSOCR_DQ_ODT_SEL_MASK       (0x3 << DSOCR_DQ_ODT_SEL_OFFS)
+#define DSOCR_DQ_ODT_SEL(odtSel)    (odtSel << DSOCR_DQ_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_SEL_OFFS       12
+#define DSOCR_ST_ODT_SEL_MASK       (0x3 << DSOCR_ST_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_SEL(odtSel)    (odtSel << DSOCR_ST_ODT_SEL_OFFS)
+#define DSOCR_ST_ODT_EN             (1 << 14)
+
+/* DDR SDRAM Initialization Control Register (DSICR) */
+#define DDR_SDRAM_INIT_CTRL_REG	    (DRAM_BASE + 0x1480)
+#define DSICR_INIT_EN		    	(1 << 0)
+#define DSICR_T200_SET		    	(1 << 8)
+
+/* sdram extended mode2 register (SEM2R) */
+#define SDRAM_EXTENDED_MODE2_REG	(DRAM_BASE + 0x148C)
+#define SEM2R_EMRS2_DDR2_OFFS		0
+#define SEM2R_EMRS2_DDR2_MASK		(0x7FFF << SEM2R_EMRS2_DDR2_OFFS)
+
+/* sdram extended mode3 register (SEM3R) */
+#define SDRAM_EXTENDED_MODE3_REG	(DRAM_BASE + 0x1490)
+#define SEM3R_EMRS3_DDR2_OFFS		0
+#define SEM3R_EMRS3_DDR2_MASK		(0x7FFF << SEM3R_EMRS3_DDR2_OFFS)
+
+/* sdram error registers */
+#define SDRAM_ERROR_CAUSE_REG               	(DRAM_BASE + 0x14d0)
+#define SDRAM_ERROR_MASK_REG                	(DRAM_BASE + 0x14d4)
+#define SDRAM_ERROR_DATA_LOW_REG            	(DRAM_BASE + 0x1444)
+#define SDRAM_ERROR_DATA_HIGH_REG           	(DRAM_BASE + 0x1440)
+#define SDRAM_ERROR_ADDR_REG                	(DRAM_BASE + 0x1450)
+#define SDRAM_ERROR_ECC_REG                 	(DRAM_BASE + 0x1448)
+#define SDRAM_CALC_ECC_REG                  	(DRAM_BASE + 0x144c)
+#define SDRAM_ECC_CONTROL_REG               	(DRAM_BASE + 0x1454)
+#define SDRAM_SINGLE_BIT_ERR_CNTR_REG 		(DRAM_BASE + 0x1458)
+#define SDRAM_DOUBLE_BIT_ERR_CNTR_REG 		(DRAM_BASE + 0x145c)
+
+/* SDRAM Error Cause Register (SECR) */
+#define SECR_SINGLE_BIT_ERR			BIT0
+#define SECR_DOUBLE_BIT_ERR			BIT1
+#define SECR_DATA_PATH_PARITY_ERR	BIT2
+/* SDRAM Error Address Register (SEAR) */
+#define SEAR_ERR_TYPE_OFFS			0
+#define SEAR_ERR_TYPE_MASK      	(1 << SEAR_ERR_TYPE_OFFS)
+#define SEAR_ERR_TYPE_SINGLE    	0
+#define SEAR_ERR_TYPE_DOUBLE    	(1 << SEAR_ERR_TYPE_OFFS)
+#define SEAR_ERR_CS_OFFS			1
+#define SEAR_ERR_CS_MASK			(3 << SEAR_ERR_CS_OFFS)
+#define SEAR_ERR_CS(csNum)			(csNum << SEAR_ERR_CS_OFFS)
+#define SEAR_ERR_ADDR_OFFS      	3
+#define SEAR_ERR_ADDR_MASK      	(0x1FFFFFFF << SEAR_ERR_ADDR_OFFS)
+
+/* SDRAM ECC Control Register (SECR) */
+#define SECR_FORCEECC_OFFS          0
+#define SECR_FORCEECC_MASK          (0xFF << SECR_FORCEECC_OFFS)
+#define SECR_FORCEEN_OFFS           8
+#define SECR_FORCEEN_MASK           (1 << SECR_FORCEEN_OFFS)
+#define SECR_ECC_CALC_MASK          (0 << SECR_FORCEEN_OFFS)
+#define SECR_ECC_USER_MASK          (1 << SECR_FORCEEN_OFFS)
+#define SECR_PERRPROP_EN            BIT9
+#define SECR_CNTMODE_OFFS           10
+#define SECR_CNTMODE_MASK           (1 << SECR_CNTMODE_OFFS)
+#define SECR_ALL_IN_CS0             (0 << SECR_CNTMODE_OFFS)
+#define SECR_NORMAL_COUNTER         (1 << SECR_CNTMODE_OFFS)
+#define SECR_THRECC_OFFS            16
+#define SECR_THRECC_MAX             0xFF
+#define SECR_THRECC_MASK            (SECR_THRECC_MAX << SECR_THRECC_OFFS)
+#define SECR_THRECC(threshold)      (threshold << SECR_THRECC_OFFS)
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvDramIfRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h
new file mode 100644
index 000000000000..ac2a57a3f4c7
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/mvDramIfStaticInit.h
@@ -0,0 +1,178 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvDramIfStaticInith
+#define __INCmvDramIfStaticInith
+
+#ifdef MV_STATIC_DRAM_ON_BOARD
+#define STATIC_DRAM_BANK_1
+#undef	STATIC_DRAM_BANK_2
+#undef	STATIC_DRAM_BANK_3
+#undef 	STATIC_DRAM_BANK_4
+
+
+#ifdef MV_DIMM_TS256MLQ72V5U
+#define	STATIC_DRAM_BANK_2
+#define	STATIC_DRAM_BANK_3
+#undef 	STATIC_DRAM_BANK_4
+
+#define STATIC_SDRAM_CONFIG_REG		    0x4724481A  /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG         0x37707450  /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG    0x11A13330  /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG   0x00000601  /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG          0x00001CB2  /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG               0x00000642  /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_ODT_CTRL_LOW	    0x030C030C /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	    0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    	    0x0000740F /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          	    0x00000404 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO         0x00074410 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI         0x00007441 /*   0x147C  */
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0       0x3FFF /* size bank0 dimm0   - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK1_SIZE_DIMM0       0x3FFF /* size bank1 dimm0   */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1       0x3FFF /* size bank0 dimm1   */
+#define STATIC_SDRAM_RANK1_SIZE_DIMM1       0x0	   /* size bank1 dimm1   */
+
+#endif /* TS256MLQ72V5U */
+
+
+#ifdef MV_MT9VDDT3272AG
+/* one DIMM 256M  */
+#define STATIC_SDRAM_CONFIG_REG		    0x5820040d  /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG         0xC4000540  /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG    0x01602220  /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG   0x0000000b  /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG          0x00000012  /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG               0x00000062  /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0       0x0fff /* size bank0 dimm0   - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1       0x0    /* size bank0 dimm1   */
+
+#endif /* MV_MT9VDDT3272AG */
+
+
+
+#ifdef MV_D27RB12P
+/*
+Two DIMM 512M + ECC enabled, Registered DIMM  CAS Latency 2.5
+*/
+
+#define STATIC_SDRAM_CONFIG_REG		    0x6826081E  /* offset 0x1400 - DMA reg-0xf1000814 */
+#define STATIC_SDRAM_DUNIT_CTRL_REG         0xC5000540  /* offset 0x1404 - DMA reg-0xf100081c */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG    0x01501220  /* offset 0x1408 - DMA reg-0xf1000824 */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG   0x00000009  /* offset 0x140c - DMA reg-0xf1000828 */
+#define STATIC_SDRAM_ADDR_CTRL_REG          0x00000012  /* offset 0x1410 - DMA reg-0xf1000820 */
+#define STATIC_SDRAM_MODE_REG               0x00000062  /* offset 0x141c - DMA reg-0xf1000818 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0       0x0FFF /* size bank0 dimm0   - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_RANK0_SIZE_DIMM1       0x0FFF    /* size bank0 dimm1   */
+
+#define STATIC_DRAM_BANK_2
+
+#define STATIC_DRAM_BANK_3
+#define STATIC_DRAM_BANK_4
+
+#endif /*  mv_D27RB12P  */
+
+#ifdef RD_MV645XX
+
+#define STATIC_MEM_TYPE				MEM_TYPE_DDR2
+#define STATIC_DIMM_INFO_BANK0_SIZE		256
+/* DDR2 boards 256 MB*/
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0       	0x00000fff /* size bank0 dimm0   - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_CONFIG_REG	     		0x07190618
+#define STATIC_SDRAM_MODE_REG	     		0x00000432
+#define STATIC_SDRAM_DUNIT_CTRL_REG     	0xf4a03440
+#define STATIC_SDRAM_ADDR_CTRL_REG	     	0x00000022
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG    	0x11712220
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG	0x00000504
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000780f
+#define STATIC_SDRAM_EXT_MODE          		0x00000440
+#define STATIC_SDRAM_DDR2_TIMING_LO         	0x00063300
+#define STATIC_SDRAM_DDR2_TIMING_HI         	0x00006330
+#endif /* RD_MV645XX */
+
+#if MV_DIMM_M3783354CZ3_CE6
+
+#define STATIC_SDRAM_RANK0_SIZE_DIMM0		0x00000FFF /* 0x2010 size bank0 dimm0   - DMA reg-0xf1000810 */
+#define STATIC_SDRAM_CONFIG_REG	     		0x07190618 /*   0x1400  */
+#define STATIC_SDRAM_MODE_REG	     		0x00000432 /*   0x141c  */
+#define STATIC_SDRAM_DUNIT_CTRL_REG     	0xf4a03440 /*   0x1404  */
+#define STATIC_SDRAM_ADDR_CTRL_REG	     	0x00000022 /*   0x1410  */
+#define STATIC_SDRAM_TIMING_CTRL_LOW_REG	0x11712220 /*   0x1408  */
+#define STATIC_SDRAM_TIMING_CTRL_HIGH_REG	0x00000504 /*   0x140c  */
+#define STATIC_SDRAM_ODT_CTRL_LOW	     	0x84210000 /*   0x1494  */
+#define STATIC_SDRAM_ODT_CTRL_HI	     	0x00000000 /*   0x1498  */
+#define STATIC_SDRAM_DUNIT_ODT_CTRL    		0x0000780f /*   0x149c  */
+#define STATIC_SDRAM_EXT_MODE          		0x00000440 /*   0x1420  */
+#define STATIC_SDRAM_DDR2_TIMING_LO		0x00063300 /*   0x1428  */
+#define STATIC_SDRAM_DDR2_TIMING_HI		0x00006330 /*   0x147C  */
+
+#endif /* MV_DIMM_M3783354CZ3_CE6 */
+
+#endif /* MV_STATIC_DRAM_ON_BOARD */
+#endif /* __INCmvDramIfStaticInith */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c
new file mode 100644
index 000000000000..94c89edc1838
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.c
@@ -0,0 +1,1473 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "ddr2/spd/mvSpd.h"
+#include "boardEnv/mvBoardEnvLib.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+                                            MV_DRAM_BANK_INFO *pBankInfo);
+static MV_U32  cas2ps(MV_U8 spd_byte);
+/*******************************************************************************
+* mvDramBankGet - Get the DRAM bank paramters.
+*
+* DESCRIPTION:
+*       This function retrieves DRAM bank parameters as described in
+*       DRAM_BANK_INFO struct to the controller DRAM unit. In case the board
+*       has its DRAM on DIMMs it will use its EEPROM to extract SPD data
+*       from it. Otherwise, if the DRAM is soldered on board, the function
+*       should insert its bank information into MV_DRAM_BANK_INFO struct.
+*
+* INPUT:
+*       bankNum  - Board DRAM bank number.
+*
+* OUTPUT:
+*       pBankInfo  - DRAM bank information struct.
+*
+* RETURN:
+*       MV_FAIL - Bank parameters could not be read.
+*
+*******************************************************************************/
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo)
+{
+    MV_DIMM_INFO dimmInfo;
+
+    DB(mvOsPrintf("Dram: mvDramBankInfoGet bank %d\n", bankNum));
+    /* zero pBankInfo structure */
+
+    if((NULL == pBankInfo) || (bankNum >= MV_DRAM_MAX_CS ))
+    {
+        DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+        return MV_BAD_PARAM;
+    }
+    memset(pBankInfo, 0, sizeof(*pBankInfo));
+
+	if ( MV_OK != dimmSpdGet((MV_U32)(bankNum/2), &dimmInfo))
+	{
+		DB(mvOsPrintf("Dram: ERR dimmSpdGet failed to get dimm info \n"));
+		return MV_FAIL;
+	}
+	if ((dimmInfo.numOfModuleBanks == 1) && ((bankNum % 2) == 1))
+	{
+		DB(mvOsPrintf("Dram: ERR dimmSpdGet. Can't find DIMM bank 2 \n"));
+		return MV_FAIL;
+	}
+	/* convert Dimm info to Bank info */
+    cpyDimm2BankInfo(&dimmInfo, pBankInfo);
+    return MV_OK;
+}
+
+/*******************************************************************************
+* cpyDimm2BankInfo - Convert a Dimm info struct into a bank info struct.
+*
+* DESCRIPTION:
+*       Convert a Dimm info struct into a bank info struct.
+*
+* INPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+*       pBankInfo  - DRAM bank information struct.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+static MV_VOID cpyDimm2BankInfo(MV_DIMM_INFO *pDimmInfo,
+                                                MV_DRAM_BANK_INFO *pBankInfo)
+{
+    pBankInfo->memoryType = pDimmInfo->memoryType;
+
+    /* DIMM dimensions */
+    pBankInfo->numOfRowAddr = pDimmInfo->numOfRowAddr;
+    pBankInfo->numOfColAddr = pDimmInfo->numOfColAddr;
+    pBankInfo->dataWidth = pDimmInfo->dataWidth;
+    pBankInfo->errorCheckType = pDimmInfo->errorCheckType;
+    pBankInfo->sdramWidth = pDimmInfo->sdramWidth;
+    pBankInfo->errorCheckDataWidth = pDimmInfo->errorCheckDataWidth;
+    pBankInfo->numOfBanksOnEachDevice = pDimmInfo->numOfBanksOnEachDevice;
+    pBankInfo->suportedCasLatencies = pDimmInfo->suportedCasLatencies;
+    pBankInfo->refreshInterval = pDimmInfo->refreshInterval;
+
+    /* DIMM timing parameters */
+    pBankInfo->minCycleTimeAtMaxCasLatPs = pDimmInfo->minCycleTimeAtMaxCasLatPs;
+    pBankInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+                                    pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps;
+    pBankInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+                                    pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps;
+
+    pBankInfo->minRowPrechargeTime     = pDimmInfo->minRowPrechargeTime;
+    pBankInfo->minRowActiveToRowActive = pDimmInfo->minRowActiveToRowActive;
+    pBankInfo->minRasToCasDelay        = pDimmInfo->minRasToCasDelay;
+    pBankInfo->minRasPulseWidth        = pDimmInfo->minRasPulseWidth;
+    pBankInfo->minWriteRecoveryTime    = pDimmInfo->minWriteRecoveryTime;
+    pBankInfo->minWriteToReadCmdDelay  = pDimmInfo->minWriteToReadCmdDelay;
+    pBankInfo->minReadToPrechCmdDelay  = pDimmInfo->minReadToPrechCmdDelay;
+    pBankInfo->minRefreshToActiveCmd   = pDimmInfo->minRefreshToActiveCmd;
+
+    /* Parameters calculated from the extracted DIMM information */
+    pBankInfo->size = pDimmInfo->size/pDimmInfo->numOfModuleBanks;
+    pBankInfo->deviceDensity = pDimmInfo->deviceDensity;
+    pBankInfo->numberOfDevices = pDimmInfo->numberOfDevices /
+                                 pDimmInfo->numOfModuleBanks;
+
+    /* DIMM attributes (MV_TRUE for yes) */
+
+    if ((pDimmInfo->memoryType == MEM_TYPE_SDRAM) ||
+        (pDimmInfo->memoryType == MEM_TYPE_DDR1)   )
+    {
+        if (pDimmInfo->dimmAttributes & BIT1)
+            pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+        else
+            pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+    }
+    else /* pDimmInfo->memoryType == MEM_TYPE_DDR2 */
+    {
+        if (pDimmInfo->dimmTypeInfo & (BIT0 | BIT4))
+            pBankInfo->registeredAddrAndControlInputs = MV_TRUE;
+        else
+            pBankInfo->registeredAddrAndControlInputs = MV_FALSE;
+    }
+
+    return;
+}
+/*******************************************************************************
+* dimmSpdCpy - Cpy SPD parameters from dimm 0 to dimm 1.
+*
+* DESCRIPTION:
+*       Read the DIMM SPD parameters from dimm 0 into dimm 1 SPD.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdCpy(MV_VOID)
+{
+    MV_U32 i;
+    MV_U32 spdChecksum;
+
+    MV_TWSI_SLAVE twsiSlave;
+    MV_U8 data[SPD_SIZE];
+
+    /* zero dimmInfo structure */
+    memset(data, 0, SPD_SIZE);
+
+    /* read the dimm eeprom */
+    DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+    twsiSlave.slaveAddr.address = MV_BOARD_DIMM0_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, data, SPD_SIZE) )
+    {
+        DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 0\n"));
+        return MV_FAIL;
+    }
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+    /* calculate SPD checksum */
+    spdChecksum = 0;
+
+    for(i = 0 ; i <= 62 ; i++)
+    {
+        spdChecksum += data[i];
+    }
+
+    if ((spdChecksum & 0xff) != data[63])
+    {
+        DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+                            (MV_U32)(spdChecksum & 0xff), data[63]));
+    }
+    else
+    {
+        DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+    }
+
+    /* copy the SPD content 1:1 into the DIMM 1 SPD */
+    twsiSlave.slaveAddr.address = MV_BOARD_DIMM1_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    for(i = 0 ; i < SPD_SIZE ; i++)
+    {
+	twsiSlave.offset = i;
+	if( MV_OK != mvTwsiWrite (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, &data[i], 1) )
+	{
+	    mvOsPrintf("DRAM: ERR. no DIMM in dimmNum 1 byte %d \n",i);
+	    return MV_FAIL;
+	}
+	mvOsDelay(5);
+    }
+
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+    return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdGet - Get the SPD parameters.
+*
+* DESCRIPTION:
+*       Read the DIMM SPD parameters into given struct parameter.
+*
+* INPUT:
+*       dimmNum - DIMM number. See MV_BOARD_DIMM_NUM enumerator.
+*
+* OUTPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* RETURN:
+*       MV_TRUE if function could read DIMM parameters, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo)
+{
+    MV_U32 i;
+    MV_U32 density = 1;
+    MV_U32 spdChecksum;
+
+    MV_TWSI_SLAVE twsiSlave;
+    MV_U8 data[SPD_SIZE];
+
+    if((NULL == pDimmInfo)|| (dimmNum >= MAX_DIMM_NUM))
+    {
+        DB(mvOsPrintf("Dram: mvDramBankInfoGet bad params \n"));
+        return MV_BAD_PARAM;
+    }
+
+    /* zero dimmInfo structure */
+    memset(data, 0, SPD_SIZE);
+
+    /* read the dimm eeprom */
+    DB(mvOsPrintf("DRAM: Read Dimm eeprom\n"));
+    twsiSlave.slaveAddr.address = (dimmNum == 0) ?
+                            MV_BOARD_DIMM0_I2C_ADDR : MV_BOARD_DIMM1_I2C_ADDR;
+    twsiSlave.slaveAddr.type = ADDR7_BIT;
+    twsiSlave.validOffset = MV_TRUE;
+    twsiSlave.offset = 0;
+    twsiSlave.moreThen256 = MV_FALSE;
+
+    if( MV_OK != mvTwsiRead (MV_BOARD_DIMM_I2C_CHANNEL, &twsiSlave, data, SPD_SIZE) )
+    {
+        DB(mvOsPrintf("DRAM: ERR. no DIMM in dimmNum %d \n", dimmNum));
+        return MV_FAIL;
+    }
+    DB(puts("DRAM: Reading dimm info succeded.\n"));
+
+    /* calculate SPD checksum */
+    spdChecksum = 0;
+
+        for(i = 0 ; i <= 62 ; i++)
+        {
+        spdChecksum += data[i];
+    }
+
+    if ((spdChecksum & 0xff) != data[63])
+    {
+        DB(mvOsPrintf("DRAM: Warning. Wrong SPD Checksum %2x, expValue=%2x\n",
+                            (MV_U32)(spdChecksum & 0xff), data[63]));
+    }
+    else
+    {
+        DB(mvOsPrintf("DRAM: SPD Checksum ok!\n"));
+    }
+
+    /* copy the SPD content 1:1 into the dimmInfo structure*/
+    for(i = 0 ; i < SPD_SIZE ; i++)
+    {
+        pDimmInfo->spdRawData[i] = data[i];
+        DB(mvOsPrintf("SPD-EEPROM Byte %3d = %3x (%3d)\n",i, data[i], data[i]));
+    }
+
+    DB(mvOsPrintf("DRAM SPD Information:\n"));
+
+    /* Memory type (DDR / SDRAM) */
+    switch (data[DIMM_MEM_TYPE])
+    {
+        case (DIMM_MEM_TYPE_SDRAM):
+            pDimmInfo->memoryType = MEM_TYPE_SDRAM;
+            DB(mvOsPrintf("DRAM Memeory type SDRAM\n"));
+            break;
+        case (DIMM_MEM_TYPE_DDR1):
+            pDimmInfo->memoryType = MEM_TYPE_DDR1;
+            DB(mvOsPrintf("DRAM Memeory type DDR1\n"));
+            break;
+        case (DIMM_MEM_TYPE_DDR2):
+            pDimmInfo->memoryType = MEM_TYPE_DDR2;
+            DB(mvOsPrintf("DRAM Memeory type DDR2\n"));
+            break;
+        default:
+            mvOsPrintf("ERROR: Undefined memory type!\n");
+            return MV_ERROR;
+    }
+
+
+    /* Number Of Row Addresses */
+    pDimmInfo->numOfRowAddr = data[DIMM_ROW_NUM];
+    DB(mvOsPrintf("DRAM numOfRowAddr[3]         %d\n",pDimmInfo->numOfRowAddr));
+
+    /* Number Of Column Addresses */
+    pDimmInfo->numOfColAddr = data[DIMM_COL_NUM];
+    DB(mvOsPrintf("DRAM numOfColAddr[4]         %d\n",pDimmInfo->numOfColAddr));
+
+    /* Number Of Module Banks */
+    pDimmInfo->numOfModuleBanks = data[DIMM_MODULE_BANK_NUM];
+    DB(mvOsPrintf("DRAM numOfModuleBanks[5]     0x%x\n",
+                                                  pDimmInfo->numOfModuleBanks));
+
+    /* Number of module banks encoded differently for DDR2 */
+    if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+        pDimmInfo->numOfModuleBanks = (pDimmInfo->numOfModuleBanks & 0x7)+1;
+
+    /* Data Width */
+    pDimmInfo->dataWidth = data[DIMM_DATA_WIDTH];
+    DB(mvOsPrintf("DRAM dataWidth[6]            0x%x\n", pDimmInfo->dataWidth));
+
+    /* Minimum Cycle Time At Max CasLatancy */
+    pDimmInfo->minCycleTimeAtMaxCasLatPs = cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS]);
+
+    /* Error Check Type */
+    pDimmInfo->errorCheckType = data[DIMM_ERR_CHECK_TYPE];
+    DB(mvOsPrintf("DRAM errorCheckType[11]      0x%x\n",
+                                                    pDimmInfo->errorCheckType));
+
+    /* Refresh Interval */
+    pDimmInfo->refreshInterval = data[DIMM_REFRESH_INTERVAL];
+    DB(mvOsPrintf("DRAM refreshInterval[12]     0x%x\n",
+                                                   pDimmInfo->refreshInterval));
+
+    /* Sdram Width */
+    pDimmInfo->sdramWidth = data[DIMM_SDRAM_WIDTH];
+    DB(mvOsPrintf("DRAM sdramWidth[13]          0x%x\n",pDimmInfo->sdramWidth));
+
+    /* Error Check Data Width */
+    pDimmInfo->errorCheckDataWidth = data[DIMM_ERR_CHECK_DATA_WIDTH];
+    DB(mvOsPrintf("DRAM errorCheckDataWidth[14] 0x%x\n",
+                                               pDimmInfo->errorCheckDataWidth));
+
+    /* Burst Length Supported */
+    /*     SDRAM/DDR1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   |  2   |   1  *
+                    *********************************************************/
+    /*     DDR2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   | TBD  | TBD  *
+                    *********************************************************/
+
+    pDimmInfo->burstLengthSupported = data[DIMM_BURST_LEN_SUP];
+    DB(mvOsPrintf("DRAM burstLengthSupported[16] 0x%x\n",
+                                              pDimmInfo->burstLengthSupported));
+
+    /* Number Of Banks On Each Device */
+    pDimmInfo->numOfBanksOnEachDevice = data[DIMM_DEV_BANK_NUM];
+    DB(mvOsPrintf("DRAM numOfBanksOnEachDevice[17] 0x%x\n",
+                                            pDimmInfo->numOfBanksOnEachDevice));
+
+    /* Suported Cas Latencies */
+
+    /*      SDRAM:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  |  7   |  6   |  5   |  4   |  3   |   2  |   1  *
+            ********************************************************/
+
+    /*     DDR 1:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  |  4   | 3.5  |   3  | 2.5  |  2   | 1.5  |   1  *
+            *********************************************************/
+
+    /*     DDR 2:
+            *******-******-******-******-******-******-******-*******
+            * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+            *******-******-******-******-******-******-******-*******
+    CAS =   * TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+            *********************************************************/
+
+    pDimmInfo->suportedCasLatencies = data[DIMM_SUP_CAL];
+    DB(mvOsPrintf("DRAM suportedCasLatencies[18]    0x%x\n",
+                                              pDimmInfo->suportedCasLatencies));
+
+    /* For DDR2 only, get the DIMM type information */
+    if (pDimmInfo->memoryType == MEM_TYPE_DDR2)
+    {
+        pDimmInfo->dimmTypeInfo = data[DIMM_DDR2_TYPE_INFORMATION];
+        DB(mvOsPrintf("DRAM dimmTypeInfo[20] (DDR2) 0x%x\n",
+                                                      pDimmInfo->dimmTypeInfo));
+    }
+
+    /* SDRAM Modules Attributes */
+    pDimmInfo->dimmAttributes = data[DIMM_BUF_ADDR_CONT_IN];
+    DB(mvOsPrintf("DRAM dimmAttributes[21]          0x%x\n",
+                                                    pDimmInfo->dimmAttributes));
+
+    /* Minimum Cycle Time At Max CasLatancy Minus 1*/
+    pDimmInfo->minCycleTimeAtMaxCasLatMinus1Ps =
+                                    cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS1]);
+
+    /* Minimum Cycle Time At Max CasLatancy Minus 2*/
+    pDimmInfo->minCycleTimeAtMaxCasLatMinus2Ps =
+                                    cas2ps(data[DIMM_MIN_CC_AT_MAX_CAS_MINUS2]);
+
+    pDimmInfo->minRowPrechargeTime = data[DIMM_MIN_ROW_PRECHARGE_TIME];
+    DB(mvOsPrintf("DRAM minRowPrechargeTime[27]     0x%x\n",
+                                               pDimmInfo->minRowPrechargeTime));
+    pDimmInfo->minRowActiveToRowActive = data[DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE];
+    DB(mvOsPrintf("DRAM minRowActiveToRowActive[28] 0x%x\n",
+                                           pDimmInfo->minRowActiveToRowActive));
+    pDimmInfo->minRasToCasDelay = data[DIMM_MIN_RAS_TO_CAS_DELAY];
+    DB(mvOsPrintf("DRAM minRasToCasDelay[29]        0x%x\n",
+                                                  pDimmInfo->minRasToCasDelay));
+    pDimmInfo->minRasPulseWidth = data[DIMM_MIN_RAS_PULSE_WIDTH];
+    DB(mvOsPrintf("DRAM minRasPulseWidth[30]        0x%x\n",
+                                                  pDimmInfo->minRasPulseWidth));
+
+    /* DIMM Bank Density */
+    pDimmInfo->dimmBankDensity = data[DIMM_BANK_DENSITY];
+    DB(mvOsPrintf("DRAM dimmBankDensity[31]         0x%x\n",
+                                                   pDimmInfo->dimmBankDensity));
+
+    /* Only DDR2 includes Write Recovery Time field. Other SDRAM ignore     */
+    pDimmInfo->minWriteRecoveryTime = data[DIMM_MIN_WRITE_RECOVERY_TIME];
+    DB(mvOsPrintf("DRAM minWriteRecoveryTime[36]    0x%x\n",
+                                              pDimmInfo->minWriteRecoveryTime));
+
+    /* Only DDR2 includes Internal Write To Read Command Delay field.       */
+    pDimmInfo->minWriteToReadCmdDelay = data[DIMM_MIN_WRITE_TO_READ_CMD_DELAY];
+    DB(mvOsPrintf("DRAM minWriteToReadCmdDelay[37]  0x%x\n",
+                                            pDimmInfo->minWriteToReadCmdDelay));
+
+    /* Only DDR2 includes Internal Read To Precharge Command Delay field.   */
+    pDimmInfo->minReadToPrechCmdDelay = data[DIMM_MIN_READ_TO_PRECH_CMD_DELAY];
+    DB(mvOsPrintf("DRAM minReadToPrechCmdDelay[38]  0x%x\n",
+                                            pDimmInfo->minReadToPrechCmdDelay));
+
+    /* Only DDR2 includes Minimum Refresh to Activate/Refresh Command field */
+    pDimmInfo->minRefreshToActiveCmd = data[DIMM_MIN_REFRESH_TO_ACTIVATE_CMD];
+    DB(mvOsPrintf("DRAM minRefreshToActiveCmd[42]   0x%x\n",
+                                             pDimmInfo->minRefreshToActiveCmd));
+
+    /* calculating the sdram density. Representing device density from      */
+    /* bit 20 to allow representation of 4GB and above.                     */
+    /* For example, if density is 512Mbit 0x20000000, will be represent in  */
+    /* deviceDensity by 0x20000000 >> 16 --> 0x00000200. Another example    */
+    /* is density 8GB 0x200000000 >> 16 --> 0x00002000.                     */
+    density = (1 << ((pDimmInfo->numOfRowAddr + pDimmInfo->numOfColAddr) - 20));
+    pDimmInfo->deviceDensity = density *
+                                pDimmInfo->numOfBanksOnEachDevice *
+                                pDimmInfo->sdramWidth;
+    DB(mvOsPrintf("DRAM deviceDensity           %d\n",pDimmInfo->deviceDensity));
+
+    /* Number of devices includeing Error correction */
+    pDimmInfo->numberOfDevices = (pDimmInfo->dataWidth/pDimmInfo->sdramWidth) *
+                                  pDimmInfo->numOfModuleBanks;
+    DB(mvOsPrintf("DRAM numberOfDevices         %d\n",
+                                                   pDimmInfo->numberOfDevices));
+
+    pDimmInfo->size = 0;
+
+    /* Note that pDimmInfo->size is in MB units */
+    if (pDimmInfo->memoryType == MEM_TYPE_SDRAM)
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 8;                   /* Equal to 8MB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 16;                  /* Equal to 16MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 32;                  /* Equal to 32MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 64;                  /* Equal to 64MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+    else if (pDimmInfo->memoryType == MEM_TYPE_DDR1)
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 2048;                /* Equal to 2GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 16;                  /* Equal to 16MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 32;                  /* Equal to 32MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 64;                  /* Equal to 64MB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+    else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+    {
+        if (pDimmInfo->dimmBankDensity & BIT0)
+            pDimmInfo->size += 1024;                /* Equal to 1GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT1)
+            pDimmInfo->size += 2048;                /* Equal to 2GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT2)
+            pDimmInfo->size += 4096;                /* Equal to 4GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT3)
+            pDimmInfo->size += 8192;                /* Equal to 8GB     */
+        else if (pDimmInfo->dimmBankDensity & BIT4)
+            pDimmInfo->size += 16384;               /* Equal to 16GB    */
+        else if (pDimmInfo->dimmBankDensity & BIT5)
+            pDimmInfo->size += 128;                 /* Equal to 128MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT6)
+            pDimmInfo->size += 256;                 /* Equal to 256MB   */
+        else if (pDimmInfo->dimmBankDensity & BIT7)
+            pDimmInfo->size += 512;                 /* Equal to 512MB   */
+    }
+
+    pDimmInfo->size *= pDimmInfo->numOfModuleBanks;
+
+    DB(mvOsPrintf("Dram: dimm size    %dMB \n",pDimmInfo->size));
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* dimmSpdPrint - Print the SPD parameters.
+*
+* DESCRIPTION:
+*       Print the Dimm SPD parameters.
+*
+* INPUT:
+*       pDimmInfo - DIMM information structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID dimmSpdPrint(MV_U32 dimmNum)
+{
+    MV_DIMM_INFO dimmInfo;
+    MV_U32  i, temp = 0;
+    MV_U32  k, maskLeftOfPoint = 0, maskRightOfPoint = 0;
+    MV_U32  rightOfPoint = 0,leftOfPoint = 0, div, time_tmp, shift;
+    MV_U32  busClkPs;
+    MV_U8   trp_clocks=0, trcd_clocks, tras_clocks, trrd_clocks,
+            temp_buf[40], *spdRawData;
+
+    busClkPs = 1000000000 / (mvBoardSysClkGet() / 100);  /* in 10 ps units */
+
+    spdRawData = dimmInfo.spdRawData;
+
+    if(MV_OK != dimmSpdGet(dimmNum, &dimmInfo))
+    {
+        mvOsOutput("ERROR: Could not read SPD information!\n");
+        return;
+    }
+
+    /* find Manufactura of Dimm Module */
+    mvOsOutput("\nManufacturer's JEDEC ID Code:   ");
+    for(i = 0 ; i < DIMM_MODULE_MANU_SIZE ; i++)
+    {
+        mvOsOutput("%x",spdRawData[DIMM_MODULE_MANU_OFFS + i]);
+    }
+    mvOsOutput("\n");
+
+    /* Manufacturer's Specific Data */
+    for(i = 0 ; i < DIMM_MODULE_ID_SIZE ; i++)
+    {
+        temp_buf[i] = spdRawData[DIMM_MODULE_ID_OFFS + i];
+    }
+    mvOsOutput("Manufacturer's Specific Data:   %s\n", temp_buf);
+
+    /* Module Part Number */
+    for(i = 0 ; i < DIMM_MODULE_VEN_SIZE ; i++)
+    {
+        temp_buf[i] = spdRawData[DIMM_MODULE_VEN_OFFS + i];
+    }
+    mvOsOutput("Module Part Number:             %s\n", temp_buf);
+
+    /* Module Serial Number */
+    for(i = 0; i < sizeof(MV_U32); i++)
+    {
+	temp |= spdRawData[95+i] << 8*i;
+    }
+    mvOsOutput("DIMM Serial No.                 %ld (%lx)\n", (long)temp,
+                                    (long)temp);
+
+    /* find Manufac-Data of Dimm Module */
+    mvOsOutput("Manufactoring Date:             Year 20%d%d/ ww %d%d\n",
+                        ((spdRawData[93] & 0xf0) >> 4), (spdRawData[93] & 0xf),
+                        ((spdRawData[94] & 0xf0) >> 4), (spdRawData[94] & 0xf));
+    /* find modul_revision of Dimm Module */
+    mvOsOutput("Module Revision:                %d.%d\n",
+               spdRawData[62]/10, spdRawData[62]%10);
+
+    /* find manufac_place of Dimm Module */
+    mvOsOutput("manufac_place:                  %d\n", spdRawData[72]);
+
+    /* go over the first 35 I2C data bytes */
+    for(i = 2 ; i <= 35 ; i++)
+       switch(i)
+        {
+            case 2:  /* Memory type (DDR1/2 / SDRAM) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                    mvOsOutput("Dram Type is:                   SDRAM\n");
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                    mvOsOutput("Dram Type is:                   SDRAM DDR1\n");
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                    mvOsOutput("Dram Type is:                   SDRAM DDR2\n");
+                else
+                    mvOsOutput("Dram Type unknown\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 3:  /* Number Of Row Addresses */
+                mvOsOutput("Module Number of row addresses: %d\n",
+                                                        dimmInfo.numOfRowAddr);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 4:  /* Number Of Column Addresses */
+                mvOsOutput("Module Number of col addresses: %d\n",
+                                                        dimmInfo.numOfColAddr);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 5:  /* Number Of Module Banks */
+                mvOsOutput("Number of Banks on Mod.:        %d\n",
+                                                    dimmInfo.numOfModuleBanks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 6:  /* Data Width */
+                mvOsOutput("Module Data Width:              %d bit\n",
+                                                           dimmInfo.dataWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 8:  /* Voltage Interface */
+                switch(spdRawData[i])
+                {
+                    case 0x0:
+                        mvOsOutput("Module is               TTL_5V_TOLERANT\n");
+                        break;
+                    case 0x1:
+                        mvOsOutput("Module is               LVTTL\n");
+                        break;
+                    case 0x2:
+                        mvOsOutput("Module is               HSTL_1_5V\n");
+                        break;
+                    case 0x3:
+                        mvOsOutput("Module is               SSTL_3_3V\n");
+                        break;
+                    case 0x4:
+                        mvOsOutput("Module is               SSTL_2_5V\n");
+                        break;
+                    case 0x5:
+                        if (dimmInfo.memoryType != MEM_TYPE_SDRAM)
+                        {
+                            mvOsOutput("Module is                 SSTL_1_8V\n");
+                            break;
+                        }
+                    default:
+                        mvOsOutput("Module is               VOLTAGE_UNKNOWN\n");
+                        break;
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 9:  /* Minimum Cycle Time At Max CasLatancy */
+                leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                /* DDR2 addition of right of point */
+                if ((spdRawData[i] & 0x0f) == 0xA)
+                {
+                    rightOfPoint = 25;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xB)
+                {
+                    rightOfPoint = 33;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xC)
+                {
+                    rightOfPoint = 66;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xD)
+                {
+                    rightOfPoint = 75;
+                }
+                mvOsOutput("Minimum Cycle Time At Max CL:   %d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 10: /* Clock To Data Out */
+                div = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 10:100;
+                time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                leftOfPoint     = time_tmp / div;
+                rightOfPoint    = time_tmp % div;
+                mvOsOutput("Clock To Data Out:              %d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 11: /* Error Check Type */
+                mvOsOutput("Error Check Type (0=NONE):      %d\n",
+                                                      dimmInfo.errorCheckType);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 12: /* Refresh Interval */
+                mvOsOutput("Refresh Rate:                   %x\n",
+                                                     dimmInfo.refreshInterval);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 13: /* Sdram Width */
+                mvOsOutput("Sdram Width:                    %d bits\n",
+                                                          dimmInfo.sdramWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 14: /* Error Check Data Width */
+                mvOsOutput("Error Check Data Width:         %d bits\n",
+                                                 dimmInfo.errorCheckDataWidth);
+                break;
+/*----------------------------------------------------------------------------*/
+
+           case 15: /* Minimum Clock Delay is unsupported */
+                if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+                    (dimmInfo.memoryType == MEM_TYPE_DDR1))
+                {
+                    mvOsOutput("Minimum Clk Delay back to back: %d\n",
+                                                                spdRawData[i]);
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 16: /* Burst Length Supported */
+    /*     SDRAM/DDR1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   |  2   |   1  *
+                    *********************************************************/
+    /*     DDR2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+    burst length =  * Page | TBD  | TBD  | TBD  |  8   |  4   | TBD  | TBD  *
+                    *********************************************************/
+                mvOsOutput("Burst Length Supported: ");
+                if ((dimmInfo.memoryType == MEM_TYPE_SDRAM) ||
+                    (dimmInfo.memoryType == MEM_TYPE_DDR1))
+                {
+                    if (dimmInfo.burstLengthSupported & BIT0)
+                        mvOsOutput("1, ");
+                    if (dimmInfo.burstLengthSupported & BIT1)
+                        mvOsOutput("2, ");
+                }
+                if (dimmInfo.burstLengthSupported & BIT2)
+                    mvOsOutput("4, ");
+                if (dimmInfo.burstLengthSupported & BIT3)
+                    mvOsOutput("8, ");
+
+                mvOsOutput(" Bit \n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 17: /* Number Of Banks On Each Device */
+                mvOsOutput("Number Of Banks On Each Chip:   %d\n",
+                                              dimmInfo.numOfBanksOnEachDevice);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 18: /* Suported Cas Latencies */
+
+            /*      SDRAM:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  |  7   |  6   |  5   |  4   |  3   |   2  |   1  *
+                    ********************************************************/
+
+            /*     DDR 1:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  |  4   | 3.5  |   3  | 2.5  |  2   | 1.5  |   1  *
+                    *********************************************************/
+
+            /*     DDR 2:
+                    *******-******-******-******-******-******-******-*******
+                    * bit7 | bit6 | bit5 | bit4 | bit3 | bit2 | bit1 | bit0 *
+                    *******-******-******-******-******-******-******-*******
+            CAS =   * TBD  | TBD  |  5   |  4   |  3   |  2   | TBD  | TBD  *
+                    *********************************************************/
+
+                mvOsOutput("Suported Cas Latencies: (CL) 			");
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    for (k = 0; k <=7; k++)
+                    {
+                        if (dimmInfo.suportedCasLatencies & (1 << k))
+                            mvOsOutput("%d,             ", k+1);
+                    }
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.suportedCasLatencies & BIT0)
+                        mvOsOutput("1, ");
+                    if (dimmInfo.suportedCasLatencies & BIT1)
+                        mvOsOutput("1.5, ");
+                    if (dimmInfo.suportedCasLatencies & BIT2)
+                        mvOsOutput("2, ");
+                    if (dimmInfo.suportedCasLatencies & BIT3)
+                        mvOsOutput("2.5, ");
+                    if (dimmInfo.suportedCasLatencies & BIT4)
+                        mvOsOutput("3, ");
+                    if (dimmInfo.suportedCasLatencies & BIT5)
+                        mvOsOutput("3.5, ");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if (dimmInfo.suportedCasLatencies & BIT2)
+                        mvOsOutput("2, ");
+                    if (dimmInfo.suportedCasLatencies & BIT3)
+                        mvOsOutput("3, ");
+                    if (dimmInfo.suportedCasLatencies & BIT4)
+                        mvOsOutput("4, ");
+                    if (dimmInfo.suportedCasLatencies & BIT5)
+                        mvOsOutput("5, ");
+                }
+                else
+                    mvOsOutput("?.?, ");
+                mvOsOutput("\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 20:   /* DDR2 DIMM type info */
+                if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if (dimmInfo.dimmTypeInfo & (BIT0 | BIT4))
+                        mvOsOutput("Registered DIMM (RDIMM)\n");
+                    else if (dimmInfo.dimmTypeInfo & (BIT1 | BIT5))
+                        mvOsOutput("Unbuffered DIMM (UDIMM)\n");
+                    else
+                        mvOsOutput("Unknown DIMM type.\n");
+                }
+
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 21: /* SDRAM Modules Attributes */
+                mvOsOutput("\nModule Attributes (SPD Byte 21): \n");
+
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if (dimmInfo.dimmAttributes & BIT0)
+                        mvOsOutput(" Buffered Addr/Control Input:   Yes\n");
+                    else
+                        mvOsOutput(" Buffered Addr/Control Input:   No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT1)
+                        mvOsOutput(" Registered Addr/Control Input: Yes\n");
+                    else
+                        mvOsOutput(" Registered Addr/Control Input: No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT2)
+                        mvOsOutput(" On-Card PLL (clock):           Yes \n");
+                    else
+                        mvOsOutput(" On-Card PLL (clock):           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT3)
+                        mvOsOutput(" Bufferd DQMB Input:            Yes \n");
+                    else
+                        mvOsOutput(" Bufferd DQMB Inputs:           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" Registered DQMB Inputs:        Yes \n");
+                    else
+                        mvOsOutput(" Registered DQMB Inputs:        No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT5)
+                        mvOsOutput(" Differential Clock Input:      Yes \n");
+                    else
+                        mvOsOutput(" Differential Clock Input:      No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT6)
+                        mvOsOutput(" redundant Row Addressing:      Yes \n");
+                    else
+                        mvOsOutput(" redundant Row Addressing:      No \n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.dimmAttributes & BIT0)
+                        mvOsOutput(" Buffered Addr/Control Input:   Yes\n");
+                    else
+                        mvOsOutput(" Buffered Addr/Control Input:   No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT1)
+                        mvOsOutput(" Registered Addr/Control Input: Yes\n");
+                    else
+                        mvOsOutput(" Registered Addr/Control Input: No\n");
+
+                    if (dimmInfo.dimmAttributes & BIT2)
+                        mvOsOutput(" On-Card PLL (clock):           Yes \n");
+                    else
+                        mvOsOutput(" On-Card PLL (clock):           No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT3)
+                        mvOsOutput(" FET Switch On-Card Enabled:    Yes \n");
+                    else
+                        mvOsOutput(" FET Switch On-Card Enabled:    No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" FET Switch External Enabled:   Yes \n");
+                    else
+                        mvOsOutput(" FET Switch External Enabled:   No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT5)
+                        mvOsOutput(" Differential Clock Input:      Yes \n");
+                    else
+                        mvOsOutput(" Differential Clock Input:      No \n");
+                }
+                else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+                {
+                    mvOsOutput(" Number of Active Registers on the DIMM: %d\n",
+                                        (dimmInfo.dimmAttributes & 0x3) + 1);
+
+                    mvOsOutput(" Number of PLLs on the DIMM: %d\n",
+                                      ((dimmInfo.dimmAttributes) >> 2) & 0x3);
+
+                    if (dimmInfo.dimmAttributes & BIT4)
+                        mvOsOutput(" FET Switch External Enabled:   Yes \n");
+                    else
+                        mvOsOutput(" FET Switch External Enabled:   No \n");
+
+                    if (dimmInfo.dimmAttributes & BIT6)
+                        mvOsOutput(" Analysis probe installed:      Yes \n");
+                    else
+                        mvOsOutput(" Analysis probe installed:      No \n");
+                }
+
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 22: /* Suported AutoPreCharge */
+                mvOsOutput("\nModul Attributes (SPD Byte 22): \n");
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Early Ras Precharge:           Yes \n");
+                    else
+                        mvOsOutput(" Early Ras Precharge:           No \n");
+
+                    if ( spdRawData[i] & BIT1 )
+                        mvOsOutput(" AutoPreCharge:                 Yes \n");
+                    else
+                        mvOsOutput(" AutoPreCharge:                 No \n");
+
+                    if ( spdRawData[i] & BIT2 )
+                        mvOsOutput(" Precharge All:                 Yes \n");
+                    else
+                        mvOsOutput(" Precharge All:                 No \n");
+
+                    if ( spdRawData[i] & BIT3 )
+                        mvOsOutput(" Write 1/ReadBurst:             Yes \n");
+                    else
+                        mvOsOutput(" Write 1/ReadBurst:             No \n");
+
+                    if ( spdRawData[i] & BIT4 )
+                        mvOsOutput(" lower VCC tolerance:           5%%\n");
+                    else
+                        mvOsOutput(" lower VCC tolerance:           10%%\n");
+
+                    if ( spdRawData[i] & BIT5 )
+                        mvOsOutput(" upper VCC tolerance:           5%%\n");
+                    else
+                        mvOsOutput(" upper VCC tolerance:           10%%\n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Supports Weak Driver:          Yes \n");
+                    else
+                        mvOsOutput(" Supports Weak Driver:          No \n");
+
+                    if ( !(spdRawData[i] & BIT4) )
+                        mvOsOutput(" lower VCC tolerance:           0.2V\n");
+
+                    if ( !(spdRawData[i] & BIT5) )
+                        mvOsOutput(" upper VCC tolerance:           0.2V\n");
+
+                    if ( spdRawData[i] & BIT6 )
+                        mvOsOutput(" Concurrent Auto Preharge:      Yes \n");
+                    else
+                        mvOsOutput(" Concurrent Auto Preharge:      No \n");
+
+                    if ( spdRawData[i] & BIT7 )
+                        mvOsOutput(" Supports Fast AP:              Yes \n");
+                    else
+                        mvOsOutput(" Supports Fast AP:              No \n");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR2)
+                {
+                    if ( spdRawData[i] & BIT0 )
+                        mvOsOutput(" Supports Weak Driver:          Yes \n");
+                    else
+                        mvOsOutput(" Supports Weak Driver:          No \n");
+                }
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 23:
+            /* Minimum Cycle Time At Maximum Cas Latancy Minus 1 (2nd highest CL) */
+                leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                /* DDR2 addition of right of point */
+                if ((spdRawData[i] & 0x0f) == 0xA)
+                {
+                    rightOfPoint = 25;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xB)
+                {
+                    rightOfPoint = 33;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xC)
+                {
+                    rightOfPoint = 66;
+                }
+                if ((spdRawData[i] & 0x0f) == 0xD)
+                {
+                    rightOfPoint = 75;
+                }
+
+                mvOsOutput("Minimum Cycle Time At 2nd highest CasLatancy"
+                           "(0 = Not supported): %d.%d [ns]\n",
+                           leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 24: /* Clock To Data Out 2nd highest Cas Latency Value*/
+                div = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ? 10:100;
+                time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                    ((spdRawData[i] & 0x0f));
+                leftOfPoint     = time_tmp / div;
+                rightOfPoint    = time_tmp % div;
+                mvOsOutput("Clock To Data Out (2nd CL value): 		%d.%d [ns]\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 25:
+            /* Minimum Cycle Time At Maximum Cas Latancy Minus 2 (3rd highest CL) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+                    rightOfPoint = (spdRawData[i] & 0x3) * 25;
+                }
+                else    /* DDR1 or DDR2 */
+                {
+                    leftOfPoint = (spdRawData[i] & 0xf0) >> 4;
+                    rightOfPoint = (spdRawData[i] & 0x0f) * 10;
+
+                    /* DDR2 addition of right of point */
+                    if ((spdRawData[i] & 0x0f) == 0xA)
+                    {
+                        rightOfPoint = 25;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xB)
+                    {
+                        rightOfPoint = 33;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xC)
+                    {
+                        rightOfPoint = 66;
+                    }
+                    if ((spdRawData[i] & 0x0f) == 0xD)
+                    {
+                        rightOfPoint = 75;
+                    }
+                }
+                mvOsOutput("Minimum Cycle Time At 3rd highest CasLatancy"
+                           "(0 = Not supported): %d.%d [ns]\n",
+                           leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 26: /* Clock To Data Out 3rd highest Cas Latency Value*/
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    leftOfPoint = (spdRawData[i] & 0xfc) >> 2;
+                    rightOfPoint = (spdRawData[i] & 0x3) * 25;
+                }
+                else    /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint     = 0;
+                    rightOfPoint    = time_tmp;
+                }
+                mvOsOutput("Clock To Data Out (3rd CL value): 		%d.%2d[ns]\n",
+                                                  leftOfPoint, rightOfPoint );
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 27: /* Minimum Row Precharge Time */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 10ps Intervals*/
+                trp_clocks = (temp + (busClkPs-1)) /  busClkPs;
+                mvOsOutput("Minimum Row Precharge Time [ns]: 		%d.%d = "
+                           "in Clk cycles %d\n",
+                           leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 28: /* Minimum Row Active to Row Active Time */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+                trrd_clocks = (temp + (busClkPs-1)) / busClkPs;
+                mvOsOutput("Minimum Row Active -To- Row Active Delay [ns]: "
+                           "%d.%d = in Clk cycles %d\n",
+                            leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 29: /* Minimum Ras-To-Cas Delay */
+                shift = (dimmInfo.memoryType == MEM_TYPE_SDRAM)? 0:2;
+                maskLeftOfPoint  = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0xff : 0xfc;
+                maskRightOfPoint = (dimmInfo.memoryType == MEM_TYPE_SDRAM) ?
+                                                                    0x00 : 0x03;
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> shift);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint)*25;
+                temp = ((leftOfPoint*100) + rightOfPoint);/* in 100ns Interval*/
+                trcd_clocks = (temp + (busClkPs-1) )/ busClkPs;
+                mvOsOutput("Minimum Ras-To-Cas Delay [ns]: 			%d.%d = "
+                           "in Clk cycles %d\n",
+                           leftOfPoint, rightOfPoint, trp_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 30: /* Minimum Ras Pulse Width */
+                tras_clocks = (cas2ps(spdRawData[i])+(busClkPs-1)) / busClkPs;
+                mvOsOutput("Minimum Ras Pulse Width [ns]: 			%d = "
+                           "in Clk cycles %d\n", spdRawData[i], tras_clocks);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 31: /* Module Bank Density */
+                mvOsOutput("Module Bank Density (more than 1= Multisize-Module):");
+
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("8MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("16MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("32MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("64MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                        mvOsOutput("128MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT6)
+                        mvOsOutput("256MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT7)
+                        mvOsOutput("512MB, ");
+                }
+                else if (dimmInfo.memoryType == MEM_TYPE_DDR1)
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("2GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("16MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("32MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("64MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                        mvOsOutput("128MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT6)
+                        mvOsOutput("256MB, ");
+                    if (dimmInfo.dimmBankDensity & BIT7)
+                        mvOsOutput("512MB, ");
+                }
+                else /* if (dimmInfo.memoryType == MEM_TYPE_DDR2) */
+                {
+                    if (dimmInfo.dimmBankDensity & BIT0)
+                        mvOsOutput("1GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT1)
+                        mvOsOutput("2GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT2)
+                        mvOsOutput("4GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT3)
+                        mvOsOutput("8GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT4)
+                        mvOsOutput("16GB, ");
+                    if (dimmInfo.dimmBankDensity & BIT5)
+                    mvOsOutput("128MB, ");
+                        if (dimmInfo.dimmBankDensity & BIT6)
+                    mvOsOutput("256MB, ");
+                        if (dimmInfo.dimmBankDensity & BIT7)
+                    mvOsOutput("512MB, ");
+                }
+                mvOsOutput("\n");
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 32: /* Address And Command Setup Time (measured in ns/1000) */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                    leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Address And Command Setup Time [ns]: 		%d.%d\n",
+                                                     leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 33: /* Address And Command Hold Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                    leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Address And Command Hold Time [ns]: 		%d.%d\n",
+                                                   leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 34: /* Data Input Setup Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                        leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Data Input Setup Time [ns]: 			%d.%d\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 35: /* Data Input Hold Time */
+                if (dimmInfo.memoryType == MEM_TYPE_SDRAM)
+                {
+                    rightOfPoint = (spdRawData[i] & 0x0f);
+                    leftOfPoint  = (spdRawData[i] & 0xf0) >> 4;
+                    if(leftOfPoint > 7)
+                    {
+                        leftOfPoint *= -1;
+                    }
+                }
+                else /* DDR1 or DDR2 */
+                {
+                    time_tmp = (((spdRawData[i] & 0xf0) >> 4)*10) +
+                                                      ((spdRawData[i] & 0x0f));
+                    leftOfPoint = time_tmp / 100;
+                    rightOfPoint = time_tmp % 100;
+                }
+                mvOsOutput("Data Input Hold Time [ns]: 			%d.%d\n\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+
+            case 36: /* Relevant for DDR2 only: Write Recovery Time */
+                leftOfPoint = ((spdRawData[i] & maskLeftOfPoint) >> 2);
+                rightOfPoint = (spdRawData[i] & maskRightOfPoint) * 25;
+                mvOsOutput("Write Recovery Time [ns]: 			%d.%d\n",
+                                                    leftOfPoint, rightOfPoint);
+                break;
+/*----------------------------------------------------------------------------*/
+        }
+
+}
+
+
+/*
+ * translate ns.ns/10 coding of SPD timing values
+ * into ps unit values
+ */
+/*******************************************************************************
+*  cas2ps - Translate x.y ns parameter to pico-seconds values
+*
+* DESCRIPTION:
+*       This function translates x.y nano seconds to its value in pico seconds.
+*       For example 3.75ns will return 3750.
+*
+* INPUT:
+*       spd_byte - DIMM SPD byte.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       value in pico seconds.
+*
+*******************************************************************************/
+static MV_U32  cas2ps(MV_U8 spd_byte)
+{
+    MV_U32 ns, ns10;
+
+    /* isolate upper nibble */
+    ns = (spd_byte >> 4) & 0x0F;
+    /* isolate lower nibble */
+    ns10 = (spd_byte & 0x0F);
+
+    if( ns10 < 10 ) {
+        ns10 *= 10;
+    }
+    else if( ns10 == 10 )
+        ns10 = 25;
+    else if( ns10 == 11 )
+        ns10 = 33;
+    else if( ns10 == 12 )
+        ns10 = 66;
+    else if( ns10 == 13 )
+        ns10 = 75;
+    else
+    {
+        mvOsOutput("cas2ps Err. unsupported cycle time.\n");
+    }
+
+    return (ns*1000 + ns10*10);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h
new file mode 100644
index 000000000000..9a4d7f04ccfb
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/ddr2/spd/mvSpd.h
@@ -0,0 +1,192 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDram
+#define __INCmvDram
+
+#include "ddr2/mvDramIf.h"
+#include "twsi/mvTwsi.h"
+
+#define MAX_DIMM_NUM 			2
+#define SPD_SIZE			128
+
+/* Dimm spd offsets */
+#define DIMM_MEM_TYPE 					2
+#define DIMM_ROW_NUM 					3
+#define DIMM_COL_NUM 					4
+#define DIMM_MODULE_BANK_NUM 				5
+#define DIMM_DATA_WIDTH 				6
+#define DIMM_VOLT_IF 					8
+#define DIMM_MIN_CC_AT_MAX_CAS 				9
+#define DIMM_ERR_CHECK_TYPE 				11
+#define DIMM_REFRESH_INTERVAL 				12
+#define DIMM_SDRAM_WIDTH 				13
+#define DIMM_ERR_CHECK_DATA_WIDTH 			14
+#define DIMM_MIN_CLK_DEL 				15
+#define DIMM_BURST_LEN_SUP 				16
+#define DIMM_DEV_BANK_NUM 				17
+#define DIMM_SUP_CAL 					18
+#define DIMM_DDR2_TYPE_INFORMATION          		20      /* DDR2 only */
+#define DIMM_BUF_ADDR_CONT_IN 				21
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS1			23
+#define DIMM_MIN_CC_AT_MAX_CAS_MINUS2			25
+#define DIMM_MIN_ROW_PRECHARGE_TIME			27
+#define DIMM_MIN_ROW_ACTIVE_TO_ROW_ACTIVE		28
+#define DIMM_MIN_RAS_TO_CAS_DELAY			29
+#define DIMM_MIN_RAS_PULSE_WIDTH			30
+#define DIMM_BANK_DENSITY				31
+#define DIMM_MIN_WRITE_RECOVERY_TIME        		36
+#define DIMM_MIN_WRITE_TO_READ_CMD_DELAY    		37
+#define DIMM_MIN_READ_TO_PRECH_CMD_DELAY    		38
+#define DIMM_MIN_REFRESH_TO_ACTIVATE_CMD    		42
+#define DIMM_SPD_VERSION    				62
+
+/* Dimm Memory Type values */
+#define DIMM_MEM_TYPE_SDRAM					0x4
+#define DIMM_MEM_TYPE_DDR1 					0x7
+#define DIMM_MEM_TYPE_DDR2 					0x8
+
+#define DIMM_MODULE_MANU_OFFS 		64
+#define DIMM_MODULE_MANU_SIZE 		8
+#define DIMM_MODULE_VEN_OFFS 		73
+#define DIMM_MODULE_VEN_SIZE 		25
+#define DIMM_MODULE_ID_OFFS 		99
+#define DIMM_MODULE_ID_SIZE 		18
+
+/* enumeration for voltage levels. */
+typedef enum _mvDimmVoltageIf
+{
+    TTL_5V_TOLERANT,
+    LVTTL,
+    HSTL_1_5V,
+    SSTL_3_3V,
+    SSTL_2_5V,
+    VOLTAGE_UNKNOWN,
+} MV_DIMM_VOLTAGE_IF;
+
+
+/* enumaration for SDRAM CAS Latencies. */
+typedef enum _mvDimmSdramCas
+{
+    SD_CL_1 =1,
+    SD_CL_2,
+    SD_CL_3,
+    SD_CL_4,
+    SD_CL_5,
+    SD_CL_6,
+    SD_CL_7,
+    SD_FAULT
+}MV_DIMM_SDRAM_CAS;
+
+
+/* DIMM information structure */
+typedef struct _mvDimmInfo
+{
+    MV_MEMORY_TYPE  memoryType; 	/* DDR or SDRAM */
+
+    MV_U8       spdRawData[SPD_SIZE];  	/* Content of SPD-EEPROM copied 1:1  */
+
+    /* DIMM dimensions */
+    MV_U32  numOfRowAddr;
+    MV_U32  numOfColAddr;
+    MV_U32  numOfModuleBanks;
+    MV_U32  dataWidth;
+    MV_U32  errorCheckType;             /* ECC , PARITY..*/
+    MV_U32  sdramWidth;                 /* 4,8,16 or 32 */
+    MV_U32  errorCheckDataWidth;        /* 0 - no, 1 - Yes */
+    MV_U32  burstLengthSupported;
+    MV_U32  numOfBanksOnEachDevice;
+    MV_U32  suportedCasLatencies;
+    MV_U32  refreshInterval;
+    MV_U32  dimmBankDensity;
+    MV_U32  dimmTypeInfo;           /* DDR2 only */
+    MV_U32  dimmAttributes;
+
+    /* DIMM timing parameters */
+    MV_U32  minCycleTimeAtMaxCasLatPs;
+    MV_U32  minCycleTimeAtMaxCasLatMinus1Ps;
+    MV_U32  minCycleTimeAtMaxCasLatMinus2Ps;
+	MV_U32  minRowPrechargeTime;
+	MV_U32  minRowActiveToRowActive;
+	MV_U32  minRasToCasDelay;
+	MV_U32  minRasPulseWidth;
+    MV_U32  minWriteRecoveryTime;   /* DDR2 only */
+    MV_U32  minWriteToReadCmdDelay; /* DDR2 only */
+    MV_U32  minReadToPrechCmdDelay; /* DDR2 only */
+    MV_U32  minRefreshToActiveCmd;  /* DDR2 only */
+
+    /* Parameters calculated from the extracted DIMM information */
+    MV_U32  size;               /* 16,64,128,256 or 512 MByte in MB units */
+    MV_U32  deviceDensity;      /* 16,64,128,256 or 512 Mbit in MB units  */
+    MV_U32  numberOfDevices;
+
+} MV_DIMM_INFO;
+
+
+MV_STATUS mvDramBankInfoGet(MV_U32 bankNum, MV_DRAM_BANK_INFO *pBankInfo);
+MV_STATUS dimmSpdGet(MV_U32 dimmNum, MV_DIMM_INFO *pDimmInfo);
+MV_VOID dimmSpdPrint(MV_U32 dimmNum);
+MV_STATUS dimmSpdCpy(MV_VOID);
+
+#endif /* __INCmvDram */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c
new file mode 100644
index 000000000000..769631d24eb0
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEth.c
@@ -0,0 +1,2943 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.c - Marvell's Gigabit Ethernet controller low level driver
+*
+* DESCRIPTION:
+*       This file introduce OS independent APIs to Marvell's Gigabit Ethernet
+*       controller. This Gigabit Ethernet Controller driver API controls
+*       1) Operations (i.e. port Init, Finish, Up, Down, PhyReset etc').
+*       2) Data flow (i.e. port Send, Receive etc').
+*       3) MAC Filtering functions (ethSetMcastAddr, ethSetRxFilterMode, etc.)
+*       4) MIB counters support (ethReadMibCounter)
+*       5) Debug functions (ethPortRegs, ethPortCounters, ethPortQueues, etc.)
+*       Each Gigabit Ethernet port is controlled via ETH_PORT_CTRL struct.
+*       This struct includes configuration information as well as driver
+*       internal data needed for its operations.
+*
+*       Supported Features:
+*       - OS independent. All required OS services are implemented via external
+*       OS dependent components (like osLayer or ethOsg)
+*       - The user is free from Rx/Tx queue managing.
+*       - Simple Gigabit Ethernet port operation API.
+*       - Simple Gigabit Ethernet port data flow API.
+*       - Data flow and operation API support per queue functionality.
+*       - Support cached descriptors for better performance.
+*       - PHY access and control API.
+*       - Port Configuration API.
+*       - Full control over Special and Other Multicast MAC tables.
+*
+*******************************************************************************/
+/* includes */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "eth-phy/mvEthPhy.h"
+#include "eth/mvEth.h"
+#include "eth/gbe/mvEthGbe.h"
+#include "cpu/mvCpu.h"
+
+#ifdef INCLUDE_SYNC_BARR
+#include "sys/mvCpuIf.h"
+#endif
+
+#ifdef MV_RT_DEBUG
+#   define ETH_DEBUG
+#endif
+
+
+/* locals */
+MV_BOOL         ethDescInSram;
+MV_BOOL         ethDescSwCoher;
+
+/* This array holds the control structure of each port */
+ETH_PORT_CTRL* ethPortCtrl[MV_ETH_MAX_PORTS];
+
+/* Ethernet Port Local routines */
+
+static void    ethInitRxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue);
+
+static void    ethInitTxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue);
+
+static void    ethSetUcastTable(int portNo, int queue);
+
+static MV_BOOL ethSetUcastAddr (int ethPortNum, MV_U8 lastNibble, int queue);
+static MV_BOOL ethSetSpecialMcastAddr(int ethPortNum, MV_U8 lastByte, int queue);
+static MV_BOOL ethSetOtherMcastAddr(int ethPortNum, MV_U8 crc8, int queue);
+
+static void    ethFreeDescrMemory(ETH_PORT_CTRL* pEthPortCtrl, MV_BUF_INFO* pDescBuf);
+static MV_U8*  ethAllocDescrMemory(ETH_PORT_CTRL* pEthPortCtrl, int size,
+                                   MV_ULONG* pPhysAddr, MV_U32 *memHandle);
+
+static MV_U32 mvEthMruGet(MV_U32 maxRxPktSize);
+
+static void mvEthPortSgmiiConfig(int port);
+
+
+
+/******************************************************************************/
+/*                      EthDrv Initialization functions                       */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthHalInit - Initialize the Giga Ethernet unit
+*
+* DESCRIPTION:
+*       This function initialize the Giga Ethernet unit.
+*       1) Configure Address decode windows of the unit
+*       2) Set registers to HW default values.
+*       3) Clear and Disable interrupts
+*
+* INPUT:  NONE
+*
+* RETURN: NONE
+*
+* NOTE: this function is called once in the boot process.
+*******************************************************************************/
+void    mvEthHalInit(void)
+{
+    int port;
+
+    /* Init static data structures */
+    for (port=0; port<MV_ETH_MAX_PORTS; port++)
+    {
+        ethPortCtrl[port] = NULL;
+    }
+    /* Power down all existing ports */
+    for(port=0; port<mvCtrlEthMaxPortGet(); port++)
+    {
+
+#if defined (MV78200)
+	    /* Skip ports mapped to another CPU*/
+	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(GIGA0+port))
+	{
+		    continue;
+	}
+#endif
+
+	/* Skip power down ports */
+	if (MV_FALSE == mvCtrlPwrClckGet(ETH_GIG_UNIT_ID, port)) continue;
+
+        /* Disable Giga Ethernet Unit interrupts */
+        MV_REG_WRITE(ETH_UNIT_INTR_MASK_REG(port), 0);
+
+        /* Clear ETH_UNIT_INTR_CAUSE_REG register */
+        MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+    }
+
+    mvEthMemAttrGet(&ethDescInSram, &ethDescSwCoher);
+
+#if defined(ETH_DESCR_IN_SRAM)
+    if(ethDescInSram == MV_FALSE)
+    {
+        mvOsPrintf("ethDrv: WARNING! Descriptors will be allocated in DRAM instead of SRAM.\n");
+    }
+#endif /* ETH_DESCR_IN_SRAM */
+}
+
+/*******************************************************************************
+* mvEthMemAttrGet - Define properties (SRAM/DRAM, SW_COHER / HW_COHER / UNCACHED)
+*                       of of memory location for RX and TX descriptors.
+*
+* DESCRIPTION:
+*       This function allocates memory for RX and TX descriptors.
+*       - If ETH_DESCR_IN_SRAM defined, allocate from SRAM memory.
+*       - If ETH_DESCR_IN_SDRAM defined, allocate from SDRAM memory.
+*
+* INPUT:
+*   MV_BOOL* pIsSram - place of descriptors:
+*                      MV_TRUE  - in SRAM
+*                      MV_FALSE - in DRAM
+*   MV_BOOL* pIsSwCoher - cache coherency of descriptors:
+*                      MV_TRUE  - driver is responsible for cache coherency
+*                      MV_FALSE - driver is not responsible for cache coherency
+*
+* RETURN:
+*
+*******************************************************************************/
+void   mvEthMemAttrGet(MV_BOOL* pIsSram, MV_BOOL* pIsSwCoher)
+{
+    MV_BOOL isSram, isSwCoher;
+
+    isSram = MV_FALSE;
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_SW)
+    isSwCoher = MV_TRUE;
+#else
+    isSwCoher = MV_FALSE;
+#endif
+
+#if defined(ETH_DESCR_IN_SRAM)
+    if( mvCtrlSramSizeGet() > 0)
+    {
+        isSram = MV_TRUE;
+        #if (INTEG_SRAM_COHER == MV_CACHE_COHER_SW)
+            isSwCoher = MV_TRUE;
+        #else
+            isSwCoher = MV_FALSE;
+        #endif
+    }
+#endif /* ETH_DESCR_IN_SRAM */
+
+    if(pIsSram != NULL)
+        *pIsSram = isSram;
+
+    if(pIsSwCoher != NULL)
+        *pIsSwCoher = isSwCoher;
+}
+
+
+
+/******************************************************************************/
+/*                      Port Initialization functions                         */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthPortInit - Initialize the Ethernet port driver
+*
+* DESCRIPTION:
+*       This function initialize the ethernet port.
+*       1) Allocate and initialize internal port Control structure.
+*       2) Create RX and TX descriptor rings for default RX and TX queues
+*       3) Disable RX and TX operations, clear cause registers and
+*          mask all interrupts.
+*       4) Set all registers to default values and clean all MAC tables.
+*
+* INPUT:
+*       int             portNo          - Ethernet port number
+*       ETH_PORT_INIT   *pEthPortInit   - Ethernet port init structure
+*
+* RETURN:
+*       void* - ethernet port handler, that should be passed to the most other
+*               functions dealing with this port.
+*
+* NOTE: This function is called once per port when loading the eth module.
+*******************************************************************************/
+void*   mvEthPortInit(int portNo, MV_ETH_PORT_INIT *pEthPortInit)
+{
+    int             queue, descSize;
+    ETH_PORT_CTRL*  pPortCtrl;
+
+    /* Check validity of parameters */
+    if( (portNo >= (int)mvCtrlEthMaxPortGet()) ||
+        (pEthPortInit->rxDefQ   >= MV_ETH_RX_Q_NUM)  ||
+        (pEthPortInit->maxRxPktSize < 1518) )
+    {
+        mvOsPrintf("EthPort #%d: Bad initialization parameters\n", portNo);
+        return NULL;
+    }
+    if( (pEthPortInit->rxDescrNum[pEthPortInit->rxDefQ]) == 0)
+    {
+        mvOsPrintf("EthPort #%d: rxDefQ (%d) must be created\n",
+                    portNo, pEthPortInit->rxDefQ);
+        return NULL;
+    }
+
+    pPortCtrl = (ETH_PORT_CTRL*)mvOsMalloc( sizeof(ETH_PORT_CTRL) );
+    if(pPortCtrl == NULL)
+    {
+       mvOsPrintf("EthDrv: Can't allocate %dB for port #%d control structure!\n",
+                   (int)sizeof(ETH_PORT_CTRL), portNo);
+       return NULL;
+    }
+
+    memset(pPortCtrl, 0, sizeof(ETH_PORT_CTRL) );
+    ethPortCtrl[portNo] = pPortCtrl;
+
+    pPortCtrl->portState = MV_UNDEFINED_STATE;
+
+    pPortCtrl->portNo = portNo;
+
+    pPortCtrl->osHandle = pEthPortInit->osHandle;
+
+    /* Copy Configuration parameters */
+    pPortCtrl->portConfig.maxRxPktSize = pEthPortInit->maxRxPktSize;
+    pPortCtrl->portConfig.rxDefQ = pEthPortInit->rxDefQ;
+    pPortCtrl->portConfig.ejpMode = 0;
+
+    for( queue=0; queue<MV_ETH_RX_Q_NUM; queue++ )
+    {
+        pPortCtrl->rxQueueConfig[queue].descrNum = pEthPortInit->rxDescrNum[queue];
+    }
+    for( queue=0; queue<MV_ETH_TX_Q_NUM; queue++ )
+    {
+        pPortCtrl->txQueueConfig[queue].descrNum = pEthPortInit->txDescrNum[queue];
+    }
+
+    mvEthPortDisable(pPortCtrl);
+
+    /* Set the board information regarding PHY address */
+    mvEthPhyAddrSet(pPortCtrl, mvBoardPhyAddrGet(portNo) );
+
+    /* Create all requested RX queues */
+    for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+    {
+        if(pPortCtrl->rxQueueConfig[queue].descrNum == 0)
+            continue;
+
+        /* Allocate memory for RX descriptors */
+        descSize = ((pPortCtrl->rxQueueConfig[queue].descrNum * ETH_RX_DESC_ALIGNED_SIZE) +
+                                                        CPU_D_CACHE_LINE_SIZE);
+
+        pPortCtrl->rxQueue[queue].descBuf.bufVirtPtr =
+                        ethAllocDescrMemory(pPortCtrl, descSize,
+					    &pPortCtrl->rxQueue[queue].descBuf.bufPhysAddr,
+					    &pPortCtrl->rxQueue[queue].descBuf.memHandle);
+        pPortCtrl->rxQueue[queue].descBuf.bufSize = descSize;
+        if(pPortCtrl->rxQueue[queue].descBuf.bufVirtPtr == NULL)
+        {
+            mvOsPrintf("EthPort #%d, rxQ=%d: Can't allocate %d bytes in %s for %d RX descr\n",
+                        pPortCtrl->portNo, queue, descSize,
+                        ethDescInSram ? "SRAM" : "DRAM",
+                        pPortCtrl->rxQueueConfig[queue].descrNum);
+            return NULL;
+        }
+
+        ethInitRxDescRing(pPortCtrl, queue);
+    }
+    /* Create TX queues */
+    for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+    {
+        if(pPortCtrl->txQueueConfig[queue].descrNum == 0)
+            continue;
+
+        /* Allocate memory for TX descriptors */
+        descSize = ((pPortCtrl->txQueueConfig[queue].descrNum * ETH_TX_DESC_ALIGNED_SIZE) +
+                                                        CPU_D_CACHE_LINE_SIZE);
+
+        pPortCtrl->txQueue[queue].descBuf.bufVirtPtr =
+		ethAllocDescrMemory(pPortCtrl, descSize,
+				    &pPortCtrl->txQueue[queue].descBuf.bufPhysAddr,
+				    &pPortCtrl->txQueue[queue].descBuf.memHandle);
+        pPortCtrl->txQueue[queue].descBuf.bufSize = descSize;
+        if(pPortCtrl->txQueue[queue].descBuf.bufVirtPtr == NULL)
+        {
+            mvOsPrintf("EthPort #%d, txQ=%d: Can't allocate %d bytes in %s for %d TX descr\n",
+                        pPortCtrl->portNo, queue, descSize, ethDescInSram ? "SRAM" : "DRAM",
+                        pPortCtrl->txQueueConfig[queue].descrNum);
+            return NULL;
+        }
+
+        ethInitTxDescRing(pPortCtrl, queue);
+    }
+    mvEthDefaultsSet(pPortCtrl);
+
+    pPortCtrl->portState = MV_IDLE;
+    return pPortCtrl;
+}
+
+/*******************************************************************************
+* ethPortFinish - Finish the Ethernet port driver
+*
+* DESCRIPTION:
+*       This function finish the ethernet port.
+*       1) Down ethernet port if needed.
+*       2) Delete RX and TX descriptor rings for all created RX and TX queues
+*       3) Free internal port Control structure.
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   NONE.
+*
+*******************************************************************************/
+void    mvEthPortFinish(void* pPortHndl)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    int             queue, portNo  = pPortCtrl->portNo;
+
+    if(pPortCtrl->portState == MV_ACTIVE)
+    {
+        mvOsPrintf("ethPort #%d: Warning !!! Finish port in Active state\n",
+                 portNo);
+        mvEthPortDisable(pPortHndl);
+    }
+
+    /* Free all allocated RX queues */
+    for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+    {
+        ethFreeDescrMemory(pPortCtrl, &pPortCtrl->rxQueue[queue].descBuf);
+    }
+
+    /* Free all allocated TX queues */
+    for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+    {
+        ethFreeDescrMemory(pPortCtrl, &pPortCtrl->txQueue[queue].descBuf);
+    }
+
+    /* Free port control structure */
+    mvOsFree(pPortCtrl);
+
+    ethPortCtrl[portNo] = NULL;
+}
+
+/*******************************************************************************
+* mvEthDefaultsSet - Set defaults to the ethernet port
+*
+* DESCRIPTION:
+*       This function set default values to the ethernet port.
+*       1) Clear Cause registers and Mask all interrupts
+*       2) Clear all MAC tables
+*       3) Set defaults to all registers
+*       4) Reset all created RX and TX descriptors ring
+*       5) Reset PHY
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+* NOTE:
+*   This function update all the port configuration except those set
+*   Initialy by the OsGlue by MV_ETH_PORT_INIT.
+*   This function can be called after portDown to return the port setting
+*   to defaults.
+*******************************************************************************/
+MV_STATUS   mvEthDefaultsSet(void* pPortHndl)
+{
+    int                 ethPortNo, queue;
+    ETH_PORT_CTRL*      pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    ETH_QUEUE_CTRL*     pQueueCtrl;
+    MV_U32              txPrio;
+    MV_U32              portCfgReg, portCfgExtReg, portSerialCtrlReg, portSerialCtrl1Reg, portSdmaCfgReg;
+    MV_BOARD_MAC_SPEED  boardMacCfg;
+
+    ethPortNo = pPortCtrl->portNo;
+
+    /* Clear Cause registers */
+    MV_REG_WRITE(ETH_INTR_CAUSE_REG(ethPortNo),0);
+    MV_REG_WRITE(ETH_INTR_CAUSE_EXT_REG(ethPortNo),0);
+
+    /* Mask all interrupts */
+    MV_REG_WRITE(ETH_INTR_MASK_REG(ethPortNo),0);
+    MV_REG_WRITE(ETH_INTR_MASK_EXT_REG(ethPortNo),0);
+
+    portCfgReg  =   PORT_CONFIG_VALUE;
+    portCfgExtReg  =  PORT_CONFIG_EXTEND_VALUE;
+
+    boardMacCfg = mvBoardMacSpeedGet(ethPortNo);
+
+    if(boardMacCfg == BOARD_MAC_SPEED_100M)
+    {
+        portSerialCtrlReg = PORT_SERIAL_CONTROL_100MB_FORCE_VALUE;
+    }
+    else if(boardMacCfg == BOARD_MAC_SPEED_1000M)
+    {
+        portSerialCtrlReg = PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE;
+    }
+    else
+    {
+        portSerialCtrlReg =  PORT_SERIAL_CONTROL_VALUE;
+    }
+
+    /* build PORT_SDMA_CONFIG_REG */
+    portSdmaCfgReg = ETH_TX_INTR_COAL_MASK(0);
+    portSdmaCfgReg |= ETH_TX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+
+#if ( (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WB) ||  \
+      (ETHER_DRAM_COHER == MV_CACHE_COHER_HW_WT) )
+    /* some devices have restricted RX burst size when using HW coherency */
+    portSdmaCfgReg |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_4_64BIT_VALUE);
+#else
+    portSdmaCfgReg |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+#endif
+
+#if defined(MV_CPU_BE)
+    /* big endian */
+# if defined(MV_ARM)
+    portSdmaCfgReg |= (ETH_RX_NO_DATA_SWAP_MASK |
+                       ETH_TX_NO_DATA_SWAP_MASK |
+                       ETH_DESC_SWAP_MASK);
+# elif defined(MV_PPC)
+    portSdmaCfgReg |= (ETH_RX_DATA_SWAP_MASK |
+                       ETH_TX_DATA_SWAP_MASK |
+                       ETH_NO_DESC_SWAP_MASK);
+# else
+# error "Giga Ethernet Swap policy is not defined for the CPU_ARCH"
+# endif /* MV_ARM / MV_PPC */
+
+#else /* MV_CPU_LE */
+    /* little endian */
+    portSdmaCfgReg |= (ETH_RX_NO_DATA_SWAP_MASK |
+                       ETH_TX_NO_DATA_SWAP_MASK |
+                       ETH_NO_DESC_SWAP_MASK);
+#endif /* MV_CPU_BE / MV_CPU_LE */
+
+    pPortCtrl->portRxQueueCmdReg = 0;
+    pPortCtrl->portTxQueueCmdReg = 0;
+
+#if (MV_ETH_VERSION >= 4)
+    if(pPortCtrl->portConfig.ejpMode == MV_TRUE)
+    {
+        MV_REG_WRITE(ETH_TXQ_CMD_1_REG(ethPortNo), ETH_TX_EJP_ENABLE_MASK);
+    }
+    else
+    {
+        MV_REG_WRITE(ETH_TXQ_CMD_1_REG(ethPortNo), 0)
+    }
+#endif /* (MV_ETH_VERSION >= 4) */
+
+    ethSetUcastTable(ethPortNo, -1);
+    mvEthSetSpecialMcastTable(ethPortNo, -1);
+    mvEthSetOtherMcastTable(ethPortNo, -1);
+
+    portSerialCtrlReg &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+
+    portSerialCtrlReg |= mvEthMruGet(pPortCtrl->portConfig.maxRxPktSize);
+
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNo), portSerialCtrlReg);
+
+    /* Update value of PortConfig register accordingly with all RxQueue types */
+    pPortCtrl->portConfig.rxArpQ = pPortCtrl->portConfig.rxDefQ;
+    pPortCtrl->portConfig.rxBpduQ = pPortCtrl->portConfig.rxDefQ;
+    pPortCtrl->portConfig.rxTcpQ = pPortCtrl->portConfig.rxDefQ;
+    pPortCtrl->portConfig.rxUdpQ = pPortCtrl->portConfig.rxDefQ;
+
+    portCfgReg &= ~ETH_DEF_RX_QUEUE_ALL_MASK;
+    portCfgReg |= ETH_DEF_RX_QUEUE_MASK(pPortCtrl->portConfig.rxDefQ);
+
+    portCfgReg &= ~ETH_DEF_RX_ARP_QUEUE_ALL_MASK;
+    portCfgReg |= ETH_DEF_RX_ARP_QUEUE_MASK(pPortCtrl->portConfig.rxArpQ);
+
+    portCfgReg &= ~ETH_DEF_RX_BPDU_QUEUE_ALL_MASK;
+    portCfgReg |= ETH_DEF_RX_BPDU_QUEUE_MASK(pPortCtrl->portConfig.rxBpduQ);
+
+    portCfgReg &= ~ETH_DEF_RX_TCP_QUEUE_ALL_MASK;
+    portCfgReg |= ETH_DEF_RX_TCP_QUEUE_MASK(pPortCtrl->portConfig.rxTcpQ);
+
+    portCfgReg &= ~ETH_DEF_RX_UDP_QUEUE_ALL_MASK;
+    portCfgReg |= ETH_DEF_RX_UDP_QUEUE_MASK(pPortCtrl->portConfig.rxUdpQ);
+
+    /* Assignment of Tx CTRP of given queue */
+    txPrio = 0;
+
+    for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+    {
+        pQueueCtrl = &pPortCtrl->txQueue[queue];
+
+        if(pQueueCtrl->pFirstDescr != NULL)
+        {
+            ethResetTxDescRing(pPortCtrl, queue);
+
+            MV_REG_WRITE(ETH_TXQ_TOKEN_COUNT_REG(ethPortNo, queue),
+                         0x3fffffff);
+            MV_REG_WRITE(ETH_TXQ_TOKEN_CFG_REG(ethPortNo, queue),
+                         0x03ffffff);
+        }
+        else
+        {
+            MV_REG_WRITE(ETH_TXQ_TOKEN_COUNT_REG(ethPortNo, queue),  0x0);
+            MV_REG_WRITE(ETH_TXQ_TOKEN_CFG_REG(ethPortNo, queue), 0x0);
+        }
+    }
+
+    /* Assignment of Rx CRDP of given queue */
+    for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+    {
+        ethResetRxDescRing(pPortCtrl, queue);
+    }
+
+    /* Allow receiving packes with odd number of preamble nibbles */
+    portSerialCtrl1Reg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(ethPortNo));
+    portSerialCtrl1Reg |= ETH_EN_MII_ODD_PRE_MASK;
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(ethPortNo), portSerialCtrl1Reg);
+
+    /* Assign port configuration and command. */
+    MV_REG_WRITE(ETH_PORT_CONFIG_REG(ethPortNo), portCfgReg);
+
+    MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(ethPortNo), portCfgExtReg);
+
+    /* Assign port SDMA configuration */
+    MV_REG_WRITE(ETH_SDMA_CONFIG_REG(ethPortNo), portSdmaCfgReg);
+
+    /* Turn off the port/queue bandwidth limitation */
+    MV_REG_WRITE(ETH_MAX_TRANSMIT_UNIT_REG(ethPortNo), 0x0);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* ethPortUp - Start the Ethernet port RX and TX activity.
+*
+* DESCRIPTION:
+*       This routine start Rx and Tx activity:
+*
+*       Note: Each Rx and Tx queue descriptor's list must be initialized prior
+*       to calling this function (use etherInitTxDescRing for Tx queues and
+*       etherInitRxDescRing for Rx queues).
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   MV_STATUS
+*           MV_OK - Success, Others - Failure.
+*
+* NOTE : used for port link up.
+*******************************************************************************/
+MV_STATUS   mvEthPortUp(void* pEthPortHndl)
+{
+    int             ethPortNo;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+
+    ethPortNo = pPortCtrl->portNo;
+
+    if( (pPortCtrl->portState != MV_ACTIVE) &&
+        (pPortCtrl->portState != MV_PAUSED) )
+    {
+        mvOsPrintf("ethDrv port%d: Unexpected port state %d\n",
+                        ethPortNo, pPortCtrl->portState);
+        return MV_BAD_STATE;
+    }
+
+    ethPortNo = pPortCtrl->portNo;
+
+    /* Enable port RX. */
+    MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(ethPortNo), pPortCtrl->portRxQueueCmdReg);
+
+    /* Enable port TX. */
+    MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(ethPortNo)) = pPortCtrl->portTxQueueCmdReg;
+
+    pPortCtrl->portState = MV_ACTIVE;
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* ethPortDown - Stop the Ethernet port activity.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure.
+*
+* NOTE : used for port link down.
+*******************************************************************************/
+MV_STATUS   mvEthPortDown(void* pEthPortHndl)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    int             ethPortNum = pPortCtrl->portNo;
+    unsigned int    regData;
+    volatile int    uDelay, mDelay;
+
+    /* Stop Rx port activity. Check port Rx activity. */
+    regData = (MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(ethPortNum))) & ETH_RXQ_ENABLE_MASK;
+    if(regData != 0)
+    {
+        /* Issue stop command for active channels only */
+        MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(ethPortNum), (regData << ETH_RXQ_DISABLE_OFFSET));
+    }
+
+    /* Stop Tx port activity. Check port Tx activity. */
+    regData = (MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(ethPortNum))) & ETH_TXQ_ENABLE_MASK;
+    if(regData != 0)
+    {
+        /* Issue stop command for active channels only */
+        MV_REG_WRITE(ETH_TX_QUEUE_COMMAND_REG(ethPortNum),
+                            (regData << ETH_TXQ_DISABLE_OFFSET) );
+    }
+
+    /* Force link down */
+/*
+    regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+    regData &= ~(ETH_DO_NOT_FORCE_LINK_FAIL_MASK);
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+*/
+    /* Wait for all Rx activity to terminate. */
+    mDelay = 0;
+    do
+    {
+        if(mDelay >= RX_DISABLE_TIMEOUT_MSEC)
+        {
+            mvOsPrintf("ethPort_%d: TIMEOUT for RX stopped !!! rxQueueCmd - 0x08%x\n",
+                        ethPortNum, regData);
+            break;
+        }
+        mvOsDelay(1);
+        mDelay++;
+
+        /* Check port RX Command register that all Rx queues are stopped */
+        regData = MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(ethPortNum));
+    }
+    while(regData & 0xFF);
+
+    /* Wait for all Tx activity to terminate. */
+    mDelay = 0;
+    do
+    {
+        if(mDelay >= TX_DISABLE_TIMEOUT_MSEC)
+        {
+            mvOsPrintf("ethPort_%d: TIMEOUT for TX stoped !!! txQueueCmd - 0x08%x\n",
+                        ethPortNum, regData);
+            break;
+        }
+        mvOsDelay(1);
+        mDelay++;
+
+        /* Check port TX Command register that all Tx queues are stopped */
+        regData = MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(ethPortNum));
+    }
+    while(regData & 0xFF);
+
+    /* Double check to Verify that TX FIFO is Empty */
+    mDelay = 0;
+    while(MV_TRUE)
+    {
+        do
+        {
+            if(mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC)
+            {
+                mvOsPrintf("\n ethPort_%d: TIMEOUT for TX FIFO empty !!! portStatus - 0x08%x\n",
+                            ethPortNum, regData);
+                break;
+            }
+            mvOsDelay(1);
+            mDelay++;
+
+            regData = MV_REG_READ(ETH_PORT_STATUS_REG(ethPortNum));
+        }
+        while( ((regData & ETH_TX_FIFO_EMPTY_MASK) == 0) ||
+               ((regData & ETH_TX_IN_PROGRESS_MASK) != 0) );
+
+        if(mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC)
+            break;
+
+        /* Double check */
+        regData = MV_REG_READ(ETH_PORT_STATUS_REG(ethPortNum));
+        if( ((regData & ETH_TX_FIFO_EMPTY_MASK) != 0) &&
+            ((regData & ETH_TX_IN_PROGRESS_MASK) == 0) )
+        {
+            break;
+        }
+        else
+            mvOsPrintf("ethPort_%d: TX FIFO Empty double check failed. %d msec, portStatus=0x%x\n",
+                                ethPortNum, mDelay, regData);
+    }
+
+    /* Do NOT force link down */
+/*
+    regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+    regData |= (ETH_DO_NOT_FORCE_LINK_FAIL_MASK);
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+*/
+    /* Wait about 2500 tclk cycles */
+    uDelay = (PORT_DISABLE_WAIT_TCLOCKS/(mvBoardTclkGet()/1000000));
+    mvOsUDelay(uDelay);
+
+    pPortCtrl->portState = MV_PAUSED;
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* ethPortEnable - Enable the Ethernet port and Start RX and TX.
+*
+* DESCRIPTION:
+*       This routine enable the Ethernet port and Rx and Tx activity:
+*
+*       Note: Each Rx and Tx queue descriptor's list must be initialized prior
+*       to calling this function (use etherInitTxDescRing for Tx queues and
+*       etherInitRxDescRing for Rx queues).
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure.
+*
+* NOTE: main usage is to enable the port after ifconfig up.
+*******************************************************************************/
+MV_STATUS   mvEthPortEnable(void* pEthPortHndl)
+{
+    int             ethPortNo;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    MV_U32      portSerialCtrlReg;
+
+    ethPortNo = pPortCtrl->portNo;
+
+    /* Enable port */
+    portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNo));
+    portSerialCtrlReg |= (ETH_DO_NOT_FORCE_LINK_FAIL_MASK | ETH_PORT_ENABLE_MASK);
+
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNo), portSerialCtrlReg);
+
+    mvEthMibCountersClear(pEthPortHndl);
+
+    pPortCtrl->portState = MV_PAUSED;
+
+    /* If Link is UP, Start RX and TX traffic */
+    if( MV_REG_READ( ETH_PORT_STATUS_REG(ethPortNo) ) & ETH_LINK_UP_MASK)
+        return( mvEthPortUp(pEthPortHndl) );
+
+    return MV_NOT_READY;
+}
+
+
+/*******************************************************************************
+* mvEthPortDisable - Stop RX and TX activities and Disable the Ethernet port.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       void*   pEthPortHndl  - Ethernet port handler
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure.
+*
+* NOTE: main usage is to disable the port after ifconfig down.
+*******************************************************************************/
+MV_STATUS   mvEthPortDisable(void* pEthPortHndl)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    int             ethPortNum = pPortCtrl->portNo;
+    unsigned int    regData;
+    volatile int    mvDelay;
+
+    if(pPortCtrl->portState == MV_ACTIVE)
+    {
+        /* Stop RX and TX activities */
+        mvEthPortDown(pEthPortHndl);
+    }
+
+    /* Reset the Enable bit in the Serial Control Register */
+    regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(ethPortNum));
+    regData &= ~(ETH_PORT_ENABLE_MASK);
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(ethPortNum), regData);
+
+    /* Wait about 2500 tclk cycles */
+    mvDelay = (PORT_DISABLE_WAIT_TCLOCKS*(mvCpuPclkGet()/mvBoardTclkGet()));
+    for(mvDelay; mvDelay>0; mvDelay--);
+
+    pPortCtrl->portState = MV_IDLE;
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPortForceTxDone - Get next buffer from TX queue in spite of buffer ownership.
+*
+* DESCRIPTION:
+*       This routine used to free buffers attached to the Tx ring and should
+*       be called only when Giga Ethernet port is Down
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       int         txQueue         - Number of TX queue.
+*
+* OUTPUT:
+*       MV_PKT_INFO *pPktInfo       - Pointer to packet was sent.
+*
+* RETURN:
+*       MV_EMPTY    - There is no more buffers in this queue.
+*       MV_OK       - Buffer detached from the queue and pPktInfo structure
+*                   filled with relevant information.
+*
+*******************************************************************************/
+MV_PKT_INFO*    mvEthPortForceTxDone(void* pEthPortHndl, int txQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    MV_PKT_INFO*    pPktInfo;
+    ETH_TX_DESC*    pTxDesc;
+    int             port = pPortCtrl->portNo;
+
+    pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+    while( (pQueueCtrl->pUsedDescr != pQueueCtrl->pCurrentDescr) ||
+           (pQueueCtrl->resource == 0) )
+    {
+        /* Free next descriptor */
+        pQueueCtrl->resource++;
+        pTxDesc = (ETH_TX_DESC*)pQueueCtrl->pUsedDescr;
+
+	/* pPktInfo is available only in descriptors which are last descriptors */
+        pPktInfo = (MV_PKT_INFO*)pTxDesc->returnInfo;
+	if (pPktInfo)
+		pPktInfo->status = pTxDesc->cmdSts;
+
+        pTxDesc->cmdSts = 0x0;
+        pTxDesc->returnInfo = 0x0;
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pTxDesc);
+
+        pQueueCtrl->pUsedDescr = TX_NEXT_DESC_PTR(pTxDesc, pQueueCtrl);
+
+        if (pPktInfo)
+		if (pPktInfo->status  & ETH_TX_LAST_DESC_MASK)
+			return pPktInfo;
+    }
+    MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(port, txQueue),
+                    (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+    return NULL;
+}
+
+
+
+/*******************************************************************************
+* mvEthPortForceRx - Get next buffer from RX queue in spite of buffer ownership.
+*
+* DESCRIPTION:
+*       This routine used to free buffers attached to the Rx ring and should
+*       be called only when Giga Ethernet port is Down
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       int         rxQueue         - Number of Rx queue.
+*
+* OUTPUT:
+*       MV_PKT_INFO *pPktInfo       - Pointer to received packet.
+*
+* RETURN:
+*       MV_EMPTY    - There is no more buffers in this queue.
+*       MV_OK       - Buffer detached from the queue and pBufInfo structure
+*                   filled with relevant information.
+*
+*******************************************************************************/
+MV_PKT_INFO*    mvEthPortForceRx(void* pEthPortHndl, int rxQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    ETH_RX_DESC*    pRxDesc;
+    MV_PKT_INFO*    pPktInfo;
+    int             port = pPortCtrl->portNo;
+
+    pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+    if(pQueueCtrl->resource == 0)
+    {
+        MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(port, rxQueue),
+                    (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+
+        return NULL;
+    }
+    /* Free next descriptor */
+    pQueueCtrl->resource--;
+    pRxDesc = (ETH_RX_DESC*)pQueueCtrl->pCurrentDescr;
+    pPktInfo = (MV_PKT_INFO*)pRxDesc->returnInfo;
+
+    pPktInfo->status  = pRxDesc->cmdSts;
+    pRxDesc->cmdSts = 0x0;
+    pRxDesc->returnInfo = 0x0;
+    ETH_DESCR_FLUSH_INV(pPortCtrl, pRxDesc);
+
+    pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxDesc, pQueueCtrl);
+    return pPktInfo;
+}
+
+
+/******************************************************************************/
+/*                          Port Configuration functions                      */
+/******************************************************************************/
+/*******************************************************************************
+* mvEthMruGet - Get MRU configuration for Max Rx packet size.
+*
+* INPUT:
+*           MV_U32 maxRxPktSize - max  packet size.
+*
+* RETURN:   MV_U32 - MRU configuration.
+*
+*******************************************************************************/
+static MV_U32 mvEthMruGet(MV_U32 maxRxPktSize)
+{
+    MV_U32 portSerialCtrlReg = 0;
+
+    if(maxRxPktSize > 9192)
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_9700BYTE;
+    else if(maxRxPktSize > 9022)
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_9192BYTE;
+    else if(maxRxPktSize > 1552)
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_9022BYTE;
+    else if(maxRxPktSize > 1522)
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_1552BYTE;
+    else if(maxRxPktSize > 1518)
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_1522BYTE;
+    else
+        portSerialCtrlReg |= ETH_MAX_RX_PACKET_1518BYTE;
+
+    return portSerialCtrlReg;
+}
+
+/*******************************************************************************
+* mvEthRxCoalSet  - Sets coalescing interrupt mechanism on RX path
+*
+* DESCRIPTION:
+*       This routine sets the RX coalescing interrupt mechanism parameter.
+*       This parameter is a timeout counter, that counts in 64 tClk
+*       chunks, that when timeout event occurs a maskable interrupt occurs.
+*       The parameter is calculated using the tCLK frequency of the
+*       MV-64xxx chip, and the required number is in micro seconds.
+*
+* INPUT:
+*       void*           pPortHndl   - Ethernet Port handler.
+*       MV_U32          uSec        - Number of micro seconds between
+*                                   RX interrupts
+*
+* RETURN:
+*       None.
+*
+* COMMENT:
+*   1 sec           - TCLK_RATE clocks
+*   1 uSec          - TCLK_RATE / 1,000,000 clocks
+*
+*   Register Value for N micro seconds -  ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_U32    mvEthRxCoalSet (void* pPortHndl, MV_U32 uSec)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    MV_U32          coal = ((uSec * (mvBoardTclkGet() / 1000000)) / 64);
+    MV_U32          portSdmaCfgReg;
+
+    portSdmaCfgReg =  MV_REG_READ(ETH_SDMA_CONFIG_REG(pPortCtrl->portNo));
+    portSdmaCfgReg &= ~ETH_RX_INTR_COAL_ALL_MASK;
+
+    portSdmaCfgReg |= ETH_RX_INTR_COAL_MASK(coal);
+
+#if (MV_ETH_VERSION >= 2)
+    /* Set additional bit if needed ETH_RX_INTR_COAL_MSB_BIT (25) */
+    if(ETH_RX_INTR_COAL_MASK(coal) > ETH_RX_INTR_COAL_ALL_MASK)
+        portSdmaCfgReg |= ETH_RX_INTR_COAL_MSB_MASK;
+#endif /* MV_ETH_VERSION >= 2 */
+
+    MV_REG_WRITE (ETH_SDMA_CONFIG_REG(pPortCtrl->portNo), portSdmaCfgReg);
+    return coal;
+}
+
+/*******************************************************************************
+* mvEthTxCoalSet - Sets coalescing interrupt mechanism on TX path
+*
+* DESCRIPTION:
+*       This routine sets the TX coalescing interrupt mechanism parameter.
+*       This parameter is a timeout counter, that counts in 64 tClk
+*       chunks, that when timeout event occurs a maskable interrupt
+*       occurs.
+*       The parameter is calculated using the tCLK frequency of the
+*       MV-64xxx chip, and the required number is in micro seconds.
+*
+* INPUT:
+*       void*           pPortHndl    - Ethernet Port handler.
+*       MV_U32          uSec        - Number of micro seconds between
+*                                   RX interrupts
+*
+* RETURN:
+*       None.
+*
+* COMMENT:
+*   1 sec           - TCLK_RATE clocks
+*   1 uSec          - TCLK_RATE / 1,000,000 clocks
+*
+*   Register Value for N micro seconds -  ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+*******************************************************************************/
+MV_U32    mvEthTxCoalSet(void* pPortHndl, MV_U32 uSec)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    MV_U32          coal = ((uSec * (mvBoardTclkGet() / 1000000)) / 64);
+    MV_U32          regVal;
+
+    regVal = MV_REG_READ(ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo));
+    regVal &= ~ETH_TX_INTR_COAL_ALL_MASK;
+    regVal |= ETH_TX_INTR_COAL_MASK(coal);
+
+    /* Set TX Coalescing mechanism */
+    MV_REG_WRITE (ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo), regVal);
+    return coal;
+}
+
+/*******************************************************************************
+* mvEthCoalGet - Gets RX and TX coalescing values in micro seconds
+*
+* DESCRIPTION:
+*       This routine gets the RX and TX coalescing interrupt values.
+*       The parameter is calculated using the tCLK frequency of the
+*       MV-64xxx chip, and the returned numbers are in micro seconds.
+*
+* INPUTs:
+*       void*   pPortHndl   - Ethernet Port handler.
+*
+* OUTPUTs:
+*       MV_U32* pRxCoal     - Number of micro seconds between RX interrupts
+*       MV_U32* pTxCoal     - Number of micro seconds between TX interrupts
+*
+* RETURN:
+*       MV_STATUS   MV_OK  - success
+*                   Others - failure.
+*
+* COMMENT:
+*   1 sec           - TCLK_RATE clocks
+*   1 uSec          - TCLK_RATE / 1,000,000 clocks
+*
+*   Register Value for N micro seconds -  ((N * ( (TCLK_RATE / 1,000,000)) / 64)
+*
+*******************************************************************************/
+MV_STATUS   mvEthCoalGet(void* pPortHndl, MV_U32* pRxCoal, MV_U32* pTxCoal)
+{
+    MV_U32  regVal, coal, usec;
+
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    /* get TX Coalescing */
+    regVal = MV_REG_READ (ETH_TX_FIFO_URGENT_THRESH_REG(pPortCtrl->portNo));
+    coal = ((regVal & ETH_TX_INTR_COAL_ALL_MASK) >> ETH_TX_INTR_COAL_OFFSET);
+
+    usec = (coal * 64) / (mvBoardTclkGet() / 1000000);
+    if(pTxCoal != NULL)
+        *pTxCoal = usec;
+
+    /* Get RX Coalescing */
+    regVal =  MV_REG_READ(ETH_SDMA_CONFIG_REG(pPortCtrl->portNo));
+    coal = ((regVal & ETH_RX_INTR_COAL_ALL_MASK) >> ETH_RX_INTR_COAL_OFFSET);
+
+#if (MV_ETH_VERSION >= 2)
+    if(regVal & ETH_RX_INTR_COAL_MSB_MASK)
+    {
+        /* Add MSB */
+        coal |= (ETH_RX_INTR_COAL_ALL_MASK + 1);
+    }
+#endif /* MV_ETH_VERSION >= 2 */
+
+    usec = (coal * 64) / (mvBoardTclkGet() / 1000000);
+    if(pRxCoal != NULL)
+        *pRxCoal = usec;
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMaxRxSizeSet -
+*
+* DESCRIPTION:
+*       Change maximum receive size of the port. This configuration will take place
+*       after next call of ethPortSetDefaults() function.
+*
+* INPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS   mvEthMaxRxSizeSet(void* pPortHndl, int maxRxSize)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    MV_U32      portSerialCtrlReg;
+
+    if((maxRxSize < 1518) || (maxRxSize & ~ETH_RX_BUFFER_MASK))
+       return MV_BAD_PARAM;
+
+    pPortCtrl->portConfig.maxRxPktSize = maxRxSize;
+
+    portSerialCtrlReg =  MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(pPortCtrl->portNo));
+    portSerialCtrlReg &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+    portSerialCtrlReg |= mvEthMruGet(pPortCtrl->portConfig.maxRxPktSize);
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(pPortCtrl->portNo), portSerialCtrlReg);
+
+    return MV_OK;
+}
+
+
+/******************************************************************************/
+/*                      MAC Filtering functions                               */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthRxFilterModeSet - Configure Fitering mode of Ethernet port
+*
+* DESCRIPTION:
+*       This routine used to free buffers attached to the Rx ring and should
+*       be called only when Giga Ethernet port is Down
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       MV_BOOL     isPromisc       - Promiscous mode
+*                                   MV_TRUE  - accept all Broadcast, Multicast
+*                                              and Unicast packets
+*                                   MV_FALSE - accept all Broadcast,
+*                                              specially added Multicast and
+*                                              single Unicast packets
+*
+* RETURN:   MV_STATUS   MV_OK - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS   mvEthRxFilterModeSet(void* pEthPortHndl, MV_BOOL isPromisc)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    int             queue;
+    MV_U32      portCfgReg;
+
+    portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+    /* Set / Clear UPM bit in port configuration register */
+    if(isPromisc)
+    {
+        /* Accept all multicast packets to RX default queue */
+        queue = pPortCtrl->portConfig.rxDefQ;
+        portCfgReg |= ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+        memset(pPortCtrl->mcastCount, 1, sizeof(pPortCtrl->mcastCount));
+        MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(pPortCtrl->portNo),0xFFFF);
+        MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(pPortCtrl->portNo),0xFFFFFFFF);
+    }
+    else
+    {
+        /* Reject all Multicast addresses */
+        queue = -1;
+        portCfgReg &= ~ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+        /* Clear all mcastCount */
+        memset(pPortCtrl->mcastCount, 0, sizeof(pPortCtrl->mcastCount));
+    }
+    MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+    /* Set Special Multicast and Other Multicast tables */
+    mvEthSetSpecialMcastTable(pPortCtrl->portNo, queue);
+    mvEthSetOtherMcastTable(pPortCtrl->portNo, queue);
+    ethSetUcastTable(pPortCtrl->portNo, queue);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMacAddrSet - This function Set the port Unicast address.
+*
+* DESCRIPTION:
+*       This function Set the port Ethernet MAC address. This address
+*       will be used to send Pause frames if enabled. Packets with this
+*       address will be accepted and dispatched to default RX queue
+*
+* INPUT:
+*       void*   pEthPortHndl    - Ethernet port handler.
+*       char*   pAddr           - Address to be set
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success,  Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS   mvEthMacAddrSet(void* pPortHndl, unsigned char *pAddr, int queue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    unsigned int    macH;
+    unsigned int    macL;
+
+    if(queue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", queue);
+        return MV_BAD_PARAM;
+    }
+
+    if(queue != -1)
+    {
+        macL =  (pAddr[4] << 8) | (pAddr[5]);
+        macH =  (pAddr[0] << 24)| (pAddr[1] << 16) |
+                (pAddr[2] << 8) | (pAddr[3] << 0);
+
+        MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(pPortCtrl->portNo),  macL);
+        MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(pPortCtrl->portNo), macH);
+    }
+
+    /* Accept frames of this address */
+    ethSetUcastAddr(pPortCtrl->portNo, pAddr[5], queue);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMacAddrGet - This function returns the port Unicast address.
+*
+* DESCRIPTION:
+*       This function returns the port Ethernet MAC address.
+*
+* INPUT:
+*       int     portNo          - Ethernet port number.
+*       char*   pAddr           - Pointer where address will be written to
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success,  Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS   mvEthMacAddrGet(int portNo, unsigned char *pAddr)
+{
+    unsigned int    macH;
+    unsigned int    macL;
+
+    if(pAddr == NULL)
+    {
+        mvOsPrintf("mvEthMacAddrGet: NULL pointer.\n");
+        return MV_BAD_PARAM;
+    }
+
+    macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(portNo));
+    macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(portNo));
+    pAddr[0] = (macH >> 24) & 0xff;
+    pAddr[1] = (macH >> 16) & 0xff;
+    pAddr[2] = (macH >> 8) & 0xff;
+    pAddr[3] = macH  & 0xff;
+    pAddr[4] = (macL >> 8) & 0xff;
+    pAddr[5] = macL  & 0xff;
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthMcastCrc8Get - Calculate CRC8 of MAC address.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       MV_U8*  pAddr           - Address to calculate CRC-8
+*
+* RETURN: MV_U8 - CRC-8 of this MAC address
+*
+*******************************************************************************/
+MV_U8   mvEthMcastCrc8Get(MV_U8* pAddr)
+{
+    unsigned int    macH;
+    unsigned int    macL;
+    int             macArray[48];
+    int             crc[8];
+    int             i;
+    unsigned char   crcResult = 0;
+
+        /* Calculate CRC-8 out of the given address */
+    macH =  (pAddr[0] << 8) | (pAddr[1]);
+    macL =  (pAddr[2] << 24)| (pAddr[3] << 16) |
+            (pAddr[4] << 8) | (pAddr[5] << 0);
+
+    for(i=0; i<32; i++)
+        macArray[i] = (macL >> i) & 0x1;
+
+    for(i=32; i<48; i++)
+        macArray[i] = (macH >> (i - 32)) & 0x1;
+
+    crc[0] = macArray[45] ^ macArray[43] ^ macArray[40] ^ macArray[39] ^
+             macArray[35] ^ macArray[34] ^ macArray[31] ^ macArray[30] ^
+             macArray[28] ^ macArray[23] ^ macArray[21] ^ macArray[19] ^
+             macArray[18] ^ macArray[16] ^ macArray[14] ^ macArray[12] ^
+             macArray[8]  ^ macArray[7]  ^ macArray[6]  ^ macArray[0];
+
+    crc[1] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+             macArray[41] ^ macArray[39] ^ macArray[36] ^ macArray[34] ^
+             macArray[32] ^ macArray[30] ^ macArray[29] ^ macArray[28] ^
+             macArray[24] ^ macArray[23] ^ macArray[22] ^ macArray[21] ^
+             macArray[20] ^ macArray[18] ^ macArray[17] ^ macArray[16] ^
+             macArray[15] ^ macArray[14] ^ macArray[13] ^ macArray[12] ^
+             macArray[9]  ^ macArray[6]  ^ macArray[1]  ^ macArray[0];
+
+    crc[2] = macArray[47] ^ macArray[46] ^ macArray[44] ^ macArray[43] ^
+             macArray[42] ^ macArray[39] ^ macArray[37] ^ macArray[34] ^
+             macArray[33] ^ macArray[29] ^ macArray[28] ^ macArray[25] ^
+             macArray[24] ^ macArray[22] ^ macArray[17] ^ macArray[15] ^
+             macArray[13] ^ macArray[12] ^ macArray[10] ^ macArray[8]  ^
+             macArray[6]  ^ macArray[2]  ^ macArray[1]  ^ macArray[0];
+
+    crc[3] = macArray[47] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+             macArray[40] ^ macArray[38] ^ macArray[35] ^ macArray[34] ^
+             macArray[30] ^ macArray[29] ^ macArray[26] ^ macArray[25] ^
+             macArray[23] ^ macArray[18] ^ macArray[16] ^ macArray[14] ^
+             macArray[13] ^ macArray[11] ^ macArray[9]  ^ macArray[7]  ^
+             macArray[3]  ^ macArray[2]  ^ macArray[1];
+
+    crc[4] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[41] ^
+             macArray[39] ^ macArray[36] ^ macArray[35] ^ macArray[31] ^
+             macArray[30] ^ macArray[27] ^ macArray[26] ^ macArray[24] ^
+             macArray[19] ^ macArray[17] ^ macArray[15] ^ macArray[14] ^
+             macArray[12] ^ macArray[10] ^ macArray[8]  ^ macArray[4]  ^
+             macArray[3]  ^ macArray[2];
+
+    crc[5] = macArray[47] ^ macArray[46] ^ macArray[45] ^ macArray[42] ^
+             macArray[40] ^ macArray[37] ^ macArray[36] ^ macArray[32] ^
+             macArray[31] ^ macArray[28] ^ macArray[27] ^ macArray[25] ^
+             macArray[20] ^ macArray[18] ^ macArray[16] ^ macArray[15] ^
+             macArray[13] ^ macArray[11] ^ macArray[9]  ^ macArray[5]  ^
+             macArray[4]  ^ macArray[3];
+
+    crc[6] = macArray[47] ^ macArray[46] ^ macArray[43] ^ macArray[41] ^
+             macArray[38] ^ macArray[37] ^ macArray[33] ^ macArray[32] ^
+             macArray[29] ^ macArray[28] ^ macArray[26] ^ macArray[21] ^
+             macArray[19] ^ macArray[17] ^ macArray[16] ^ macArray[14] ^
+             macArray[12] ^ macArray[10] ^ macArray[6]  ^ macArray[5]  ^
+             macArray[4];
+
+    crc[7] = macArray[47] ^ macArray[44] ^ macArray[42] ^ macArray[39] ^
+             macArray[38] ^ macArray[34] ^ macArray[33] ^ macArray[30] ^
+             macArray[29] ^ macArray[27] ^ macArray[22] ^ macArray[20] ^
+             macArray[18] ^ macArray[17] ^ macArray[15] ^ macArray[13] ^
+             macArray[11] ^ macArray[7]  ^ macArray[6]  ^ macArray[5];
+
+    for(i=0; i<8; i++)
+        crcResult = crcResult | (crc[i] << i);
+
+    return crcResult;
+}
+/*******************************************************************************
+* mvEthMcastAddrSet - Multicast address settings.
+*
+* DESCRIPTION:
+*       This API controls the MV device MAC multicast support.
+*       The MV device supports multicast using two tables:
+*       1) Special Multicast Table for MAC addresses of the form
+*          0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+*          The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+*          Table entries in the DA-Filter table.
+*          In this case, the function calls ethPortSmcAddr() routine to set the
+*          Special Multicast Table.
+*       2) Other Multicast Table for multicast of another type. A CRC-8bit
+*          is used as an index to the Other Multicast Table entries in the
+*          DA-Filter table.
+*          In this case, the function calculates the CRC-8bit value and calls
+*          ethPortOmcAddr() routine to set the Other Multicast Table.
+*
+* INPUT:
+*       void*   pEthPortHndl    - Ethernet port handler.
+*       MV_U8*  pAddr           - Address to be set
+*       int     queue           - RX queue to capture all packets with this
+*                               Multicast MAC address.
+*                               -1 means delete this Multicast address.
+*
+* RETURN: MV_STATUS
+*       MV_TRUE - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS   mvEthMcastAddrSet(void* pPortHndl, MV_U8 *pAddr, int queue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    unsigned char   crcResult = 0;
+
+    if(queue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethPort %d: RX queue #%d is out of range\n",
+                    pPortCtrl->portNo, queue);
+        return MV_BAD_PARAM;
+    }
+
+    if((pAddr[0] == 0x01) &&
+       (pAddr[1] == 0x00) &&
+       (pAddr[2] == 0x5E) &&
+       (pAddr[3] == 0x00) &&
+       (pAddr[4] == 0x00))
+    {
+        ethSetSpecialMcastAddr(pPortCtrl->portNo, pAddr[5], queue);
+    }
+    else
+    {
+        crcResult = mvEthMcastCrc8Get(pAddr);
+
+        /* Check Add counter for this CRC value */
+        if(queue == -1)
+        {
+            if(pPortCtrl->mcastCount[crcResult] == 0)
+            {
+                mvOsPrintf("ethPort #%d: No valid Mcast for crc8=0x%02x\n",
+                            pPortCtrl->portNo, (unsigned)crcResult);
+                return MV_NO_SUCH;
+            }
+
+            pPortCtrl->mcastCount[crcResult]--;
+            if(pPortCtrl->mcastCount[crcResult] != 0)
+            {
+                mvOsPrintf("ethPort #%d: After delete there are %d valid Mcast for crc8=0x%02x\n",
+                            pPortCtrl->portNo, pPortCtrl->mcastCount[crcResult],
+                            (unsigned)crcResult);
+                return MV_NO_CHANGE;
+            }
+        }
+        else
+        {
+            pPortCtrl->mcastCount[crcResult]++;
+            if(pPortCtrl->mcastCount[crcResult] > 1)
+            {
+                mvOsPrintf("ethPort #%d: Valid Mcast for crc8=0x%02x already exists\n",
+                                pPortCtrl->portNo, (unsigned)crcResult);
+                return MV_NO_CHANGE;
+            }
+        }
+        ethSetOtherMcastAddr(pPortCtrl->portNo, crcResult, queue);
+    }
+    return MV_OK;
+}
+
+/*******************************************************************************
+* ethSetUcastTable - Unicast address settings.
+*
+* DESCRIPTION:
+*      Set all entries in the Unicast MAC Table queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+static void    ethSetUcastTable(int portNo, int queue)
+{
+    int     offset;
+    MV_U32  regValue;
+
+    if(queue == -1)
+    {
+        regValue = 0;
+    }
+    else
+    {
+        regValue = (((0x01 | (queue<<1)) << 0)  |
+                    ((0x01 | (queue<<1)) << 8)  |
+                    ((0x01 | (queue<<1)) << 16) |
+                    ((0x01 | (queue<<1)) << 24));
+    }
+
+    for (offset=0; offset<=0xC; offset+=4)
+        MV_REG_WRITE((ETH_DA_FILTER_UCAST_BASE(portNo) + offset), regValue);
+}
+
+/*******************************************************************************
+* mvEthSetSpecialMcastTable - Special Multicast address settings.
+*
+* DESCRIPTION:
+*   Set all entries to the Special Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID    mvEthSetSpecialMcastTable(int portNo, int queue)
+{
+    int     offset;
+    MV_U32  regValue;
+
+    if(queue == -1)
+    {
+        regValue = 0;
+    }
+    else
+    {
+        regValue = (((0x01 | (queue<<1)) << 0)  |
+                    ((0x01 | (queue<<1)) << 8)  |
+                    ((0x01 | (queue<<1)) << 16) |
+                    ((0x01 | (queue<<1)) << 24));
+    }
+
+    for (offset=0; offset<=0xFC; offset+=4)
+    {
+        MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(portNo) +
+                      offset), regValue);
+    }
+}
+
+/*******************************************************************************
+* mvEthSetOtherMcastTable - Other Multicast address settings.
+*
+* DESCRIPTION:
+*   Set all entries to the Other Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID    mvEthSetOtherMcastTable(int portNo, int queue)
+{
+    int     offset;
+    MV_U32  regValue;
+
+    if(queue == -1)
+    {
+        regValue = 0;
+    }
+    else
+    {
+        regValue = (((0x01 | (queue<<1)) << 0)  |
+                    ((0x01 | (queue<<1)) << 8)  |
+                    ((0x01 | (queue<<1)) << 16) |
+                    ((0x01 | (queue<<1)) << 24));
+    }
+
+    for (offset=0; offset<=0xFC; offset+=4)
+    {
+        MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(portNo) +
+                      offset), regValue);
+    }
+}
+
+/*******************************************************************************
+* ethSetUcastAddr - This function Set the port unicast address table
+*
+* DESCRIPTION:
+*       This function locates the proper entry in the Unicast table for the
+*       specified MAC nibble and sets its properties according to function
+*       parameters.
+*
+* INPUT:
+*       int     ethPortNum  - Port number.
+*       MV_U8   lastNibble  - Unicast MAC Address last nibble.
+*       int     queue       - Rx queue number for this MAC address.
+*                           value "-1" means remove address
+*
+* OUTPUT:
+*       This function add/removes MAC addresses from the port unicast address
+*       table.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetUcastAddr(int portNo, MV_U8 lastNibble, int queue)
+{
+    unsigned int unicastReg;
+    unsigned int tblOffset;
+    unsigned int regOffset;
+
+    /* Locate the Unicast table entry */
+    lastNibble  = (0xf & lastNibble);
+    tblOffset = (lastNibble / 4) * 4; /* Register offset from unicast table base*/
+    regOffset = lastNibble % 4;     /* Entry offset within the above register */
+
+
+    unicastReg = MV_REG_READ( (ETH_DA_FILTER_UCAST_BASE(portNo) +
+                               tblOffset));
+
+
+    if(queue == -1)
+    {
+        /* Clear accepts frame bit at specified unicast DA table entry */
+        unicastReg &= ~(0xFF << (8*regOffset));
+    }
+    else
+    {
+        unicastReg &= ~(0xFF << (8*regOffset));
+        unicastReg |= ((0x01 | (queue<<1)) << (8*regOffset));
+    }
+    MV_REG_WRITE( (ETH_DA_FILTER_UCAST_BASE(portNo) + tblOffset),
+                  unicastReg);
+
+    return MV_TRUE;
+}
+
+/*******************************************************************************
+* ethSetSpecialMcastAddr - Special Multicast address settings.
+*
+* DESCRIPTION:
+*       This routine controls the MV device special MAC multicast support.
+*       The Special Multicast Table for MAC addresses supports MAC of the form
+*       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+*       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+*       Table entries in the DA-Filter table.
+*       This function set the Special Multicast Table appropriate entry
+*       according to the argument given.
+*
+* INPUT:
+*       int     ethPortNum      Port number.
+*       unsigned char   mcByte      Multicast addr last byte (MAC DA[7:0] bits).
+*       int          queue      Rx queue number for this MAC address.
+*       int             option      0 = Add, 1 = remove address.
+*
+* OUTPUT:
+*       See description.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetSpecialMcastAddr(int ethPortNum, MV_U8 lastByte, int queue)
+{
+    unsigned int smcTableReg;
+    unsigned int tblOffset;
+    unsigned int regOffset;
+
+    /* Locate the SMC table entry */
+    tblOffset = (lastByte / 4);     /* Register offset from SMC table base    */
+    regOffset = lastByte % 4;       /* Entry offset within the above register */
+
+    smcTableReg = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(ethPortNum) + tblOffset*4));
+
+    if(queue == -1)
+    {
+        /* Clear accepts frame bit at specified Special DA table entry */
+        smcTableReg &= ~(0xFF << (8 * regOffset));
+    }
+    else
+    {
+        smcTableReg &= ~(0xFF << (8 * regOffset));
+        smcTableReg |= ((0x01 | (queue<<1)) << (8 * regOffset));
+    }
+    MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(ethPortNum) +
+                  tblOffset*4), smcTableReg);
+
+    return MV_TRUE;
+}
+
+/*******************************************************************************
+* ethSetOtherMcastAddr - Multicast address settings.
+*
+* DESCRIPTION:
+*       This routine controls the MV device Other MAC multicast support.
+*       The Other Multicast Table is used for multicast of another type.
+*       A CRC-8bit is used as an index to the Other Multicast Table entries
+*       in the DA-Filter table.
+*       The function gets the CRC-8bit value from the calling routine and
+*       set the Other Multicast Table appropriate entry according to the
+*       CRC-8 argument given.
+*
+* INPUT:
+*       int     ethPortNum  Port number.
+*       MV_U8   crc8        A CRC-8bit (Polynomial: x^8+x^2+x^1+1).
+*       int     queue       Rx queue number for this MAC address.
+*
+* OUTPUT:
+*       See description.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL ethSetOtherMcastAddr(int ethPortNum, MV_U8 crc8, int queue)
+{
+    unsigned int omcTableReg;
+    unsigned int tblOffset;
+    unsigned int regOffset;
+
+    /* Locate the OMC table entry */
+    tblOffset = (crc8 / 4) * 4;     /* Register offset from OMC table base    */
+    regOffset = crc8 % 4;           /* Entry offset within the above register */
+
+    omcTableReg = MV_REG_READ(
+        (ETH_DA_FILTER_OTH_MCAST_BASE(ethPortNum) + tblOffset));
+
+    if(queue == -1)
+    {
+        /* Clear accepts frame bit at specified Other DA table entry */
+        omcTableReg &= ~(0xFF << (8 * regOffset));
+    }
+    else
+    {
+        omcTableReg &= ~(0xFF << (8 * regOffset));
+        omcTableReg |= ((0x01 | (queue<<1)) << (8 * regOffset));
+    }
+
+    MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(ethPortNum) + tblOffset),
+                    omcTableReg);
+
+    return MV_TRUE;
+}
+
+
+/******************************************************************************/
+/*                      MIB Counters functions                                */
+/******************************************************************************/
+
+
+/*******************************************************************************
+* mvEthMibCounterRead - Read a MIB counter
+*
+* DESCRIPTION:
+*       This function reads a MIB counter of a specific ethernet port.
+*       NOTE - Read from ETH_MIB_GOOD_OCTETS_RECEIVED_LOW or
+*              ETH_MIB_GOOD_OCTETS_SENT_LOW counters will return 64 bits value,
+*              so pHigh32 pointer should not be NULL in this case.
+*
+* INPUT:
+*       int           ethPortNum  - Ethernet Port number.
+*       unsigned int  mibOffset   - MIB counter offset.
+*
+* OUTPUT:
+*       MV_U32*       pHigh32 - pointer to place where 32 most significant bits
+*                             of the counter will be stored.
+*
+* RETURN:
+*       32 low sgnificant bits of MIB counter value.
+*
+*******************************************************************************/
+MV_U32  mvEthMibCounterRead(void* pPortHandle, unsigned int mibOffset,
+                            MV_U32* pHigh32)
+{
+    int             portNo;
+    MV_U32          valLow32, valHigh32;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+    portNo = pPortCtrl->portNo;
+
+    valLow32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(portNo) + mibOffset);
+
+    /* Implement FEr ETH. Erroneous Value when Reading the Upper 32-bits    */
+    /* of a 64-bit MIB Counter.                                             */
+    if( (mibOffset == ETH_MIB_GOOD_OCTETS_RECEIVED_LOW) ||
+        (mibOffset == ETH_MIB_GOOD_OCTETS_SENT_LOW) )
+    {
+        valHigh32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(portNo) + mibOffset + 4);
+        if(pHigh32 != NULL)
+            *pHigh32 = valHigh32;
+    }
+    return valLow32;
+}
+
+/*******************************************************************************
+* mvEthMibCountersClear - Clear all MIB counters
+*
+* DESCRIPTION:
+*       This function clears all MIB counters
+*
+* INPUT:
+*       int           ethPortNum  - Ethernet Port number.
+*
+*
+* RETURN:   void
+*
+*******************************************************************************/
+void  mvEthMibCountersClear(void* pPortHandle)
+{
+    int             i, portNo;
+    unsigned int    dummy;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+    portNo = pPortCtrl->portNo;
+
+    /* Perform dummy reads from MIB counters */
+    for(i=ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i<ETH_MIB_LATE_COLLISION; i+=4)
+        dummy = MV_REG_READ((ETH_MIB_COUNTERS_BASE(portNo) + i));
+}
+
+
+/******************************************************************************/
+/*                        RX Dispatching configuration routines               */
+/******************************************************************************/
+
+int     mvEthTosToRxqGet(void* pPortHandle, int tos)
+{
+    MV_U32          regValue;
+    int             regIdx, regOffs, rxq;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+    if(tos > 0xFF)
+    {
+        mvOsPrintf("eth_%d: tos=0x%x is out of range\n", pPortCtrl->portNo, tos);
+        return -1;
+    }
+    regIdx  = mvOsDivide(tos>>2, 10);
+    regOffs = mvOsReminder(tos>>2, 10);
+
+    regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx) );
+    rxq = (regValue >> (regOffs*3));
+    rxq &= 0x7;
+
+    return rxq;
+}
+
+/*******************************************************************************
+* mvEthTosToRxqSet - Map packets with special TOS value to special RX queue
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     tos         - TOS value in the IP header of the packet
+*       int     rxq         - RX Queue for packets with the configured TOS value
+*                           Negative value (-1) means no special processing for these packets,
+*                           so they will be processed as regular packets.
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvEthTosToRxqSet(void* pPortHandle, int tos, int rxq)
+{
+    MV_U32          regValue;
+    int             regIdx, regOffs;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+
+    if( (rxq < 0) || (rxq >= MV_ETH_RX_Q_NUM) )
+    {
+        mvOsPrintf("eth_%d: RX queue #%d is out of range\n", pPortCtrl->portNo, rxq);
+        return MV_BAD_PARAM;
+    }
+    if(tos > 0xFF)
+    {
+        mvOsPrintf("eth_%d: tos=0x%x is out of range\n", pPortCtrl->portNo, tos);
+        return MV_BAD_PARAM;
+    }
+    regIdx  = mvOsDivide(tos>>2, 10);
+    regOffs = mvOsReminder(tos>>2, 10);
+
+    regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx) );
+    regValue &= ~(0x7 << (regOffs*3));
+    regValue |= (rxq << (regOffs*3));
+
+    MV_REG_WRITE(ETH_DIFF_SERV_PRIO_REG(pPortCtrl->portNo, regIdx), regValue);
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthVlanPrioRxQueue - Configure RX queue to capture VLAN tagged packets with
+*                        special priority bits [0-2]
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     bpduQueue   - Special queue to capture VLAN tagged packets with special
+*                           priority.
+*                           Negative value (-1) means no special processing for these packets,
+*                           so they will be processed as regular packets.
+*
+* RETURN:   MV_STATUS
+*       MV_OK       - Success
+*       MV_FAIL     - Failed.
+*
+*******************************************************************************/
+MV_STATUS   mvEthVlanPrioRxQueue(void* pPortHandle, int vlanPrio, int vlanPrioQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    MV_U32          vlanPrioReg;
+
+    if(vlanPrioQueue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", vlanPrioQueue);
+        return MV_BAD_PARAM;
+    }
+    if(vlanPrio >= 8)
+    {
+        mvOsPrintf("ethDrv: vlanPrio=%d is out of range\n", vlanPrio);
+        return MV_BAD_PARAM;
+    }
+
+    vlanPrioReg = MV_REG_READ(ETH_VLAN_TAG_TO_PRIO_REG(pPortCtrl->portNo));
+    vlanPrioReg &= ~(0x7 << (vlanPrio*3));
+    vlanPrioReg |= (vlanPrioQueue << (vlanPrio*3));
+    MV_REG_WRITE(ETH_VLAN_TAG_TO_PRIO_REG(pPortCtrl->portNo), vlanPrioReg);
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthBpduRxQueue - Configure RX queue to capture BPDU packets.
+*
+* DESCRIPTION:
+*       This function defines processing of BPDU packets.
+*   BPDU packets can be accepted and captured to one of RX queues
+*   or can be processing as regular Multicast packets.
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     bpduQueue   - Special queue to capture BPDU packets (DA is equal to
+*                           01-80-C2-00-00-00 through 01-80-C2-00-00-FF,
+*                           except for the Flow-Control Pause packets).
+*                           Negative value (-1) means no special processing for BPDU,
+*                           packets so they will be processed as regular Multicast packets.
+*
+* RETURN:   MV_STATUS
+*       MV_OK       - Success
+*       MV_FAIL     - Failed.
+*
+*******************************************************************************/
+MV_STATUS   mvEthBpduRxQueue(void* pPortHandle, int bpduQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    MV_U32      portCfgReg;
+    MV_U32      portCfgExtReg;
+
+    if(bpduQueue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", bpduQueue);
+        return MV_BAD_PARAM;
+    }
+
+    portCfgExtReg = MV_REG_READ(ETH_PORT_CONFIG_EXTEND_REG(pPortCtrl->portNo));
+
+    portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+    if(bpduQueue >= 0)
+    {
+        pPortCtrl->portConfig.rxBpduQ = bpduQueue;
+
+        portCfgReg &= ~ETH_DEF_RX_BPDU_QUEUE_ALL_MASK;
+        portCfgReg |= ETH_DEF_RX_BPDU_QUEUE_MASK(pPortCtrl->portConfig.rxBpduQ);
+
+        MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+        portCfgExtReg |= ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK;
+    }
+    else
+    {
+        pPortCtrl->portConfig.rxBpduQ = -1;
+        /* no special processing for BPDU packets */
+        portCfgExtReg &= (~ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK);
+    }
+
+    MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(pPortCtrl->portNo),  portCfgExtReg);
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthArpRxQueue - Configure RX queue to capture ARP packets.
+*
+* DESCRIPTION:
+*       This function defines processing of ARP (type=0x0806) packets.
+*   ARP packets can be accepted and captured to one of RX queues
+*   or can be processed as other Broadcast packets.
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     arpQueue    - Special queue to capture ARP packets (type=0x806).
+*                           Negative value (-1) means discard ARP packets
+*
+* RETURN:   MV_STATUS
+*       MV_OK       - Success
+*       MV_FAIL     - Failed.
+*
+*******************************************************************************/
+MV_STATUS   mvEthArpRxQueue(void* pPortHandle, int arpQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    MV_U32      portCfgReg;
+
+    if(arpQueue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", arpQueue);
+        return MV_BAD_PARAM;
+    }
+
+    portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+    if(arpQueue >= 0)
+    {
+        pPortCtrl->portConfig.rxArpQ = arpQueue;
+        portCfgReg &= ~ETH_DEF_RX_ARP_QUEUE_ALL_MASK;
+        portCfgReg |= ETH_DEF_RX_ARP_QUEUE_MASK(pPortCtrl->portConfig.rxArpQ);
+
+        portCfgReg &= (~ETH_REJECT_ARP_BCAST_MASK);
+    }
+    else
+    {
+        pPortCtrl->portConfig.rxArpQ = -1;
+        portCfgReg |= ETH_REJECT_ARP_BCAST_MASK;
+    }
+
+    MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthTcpRxQueue - Configure RX queue to capture TCP packets.
+*
+* DESCRIPTION:
+*       This function defines processing of TCP packets.
+*   TCP packets can be accepted and captured to one of RX queues
+*   or can be processed as regular Unicast packets.
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     tcpQueue    - Special queue to capture TCP packets. Value "-1"
+*                           means no special processing for TCP packets,
+*                           so they will be processed as regular
+*
+* RETURN:   MV_STATUS
+*       MV_OK       - Success
+*       MV_FAIL     - Failed.
+*
+*******************************************************************************/
+MV_STATUS   mvEthTcpRxQueue(void* pPortHandle, int tcpQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    MV_U32      portCfgReg;
+
+    if(tcpQueue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", tcpQueue);
+        return MV_BAD_PARAM;
+    }
+    portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+    if(tcpQueue >= 0)
+    {
+        pPortCtrl->portConfig.rxTcpQ = tcpQueue;
+        portCfgReg &= ~ETH_DEF_RX_TCP_QUEUE_ALL_MASK;
+        portCfgReg |= ETH_DEF_RX_TCP_QUEUE_MASK(pPortCtrl->portConfig.rxTcpQ);
+
+        portCfgReg |= ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK;
+    }
+    else
+    {
+        pPortCtrl->portConfig.rxTcpQ = -1;
+        portCfgReg &= (~ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK);
+    }
+
+    MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthUdpRxQueue - Configure RX queue to capture UDP packets.
+*
+* DESCRIPTION:
+*       This function defines processing of UDP packets.
+*   TCP packets can be accepted and captured to one of RX queues
+*   or can be processed as regular Unicast packets.
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*       int     udpQueue    - Special queue to capture UDP packets. Value "-1"
+*                           means no special processing for UDP packets,
+*                           so they will be processed as regular
+*
+* RETURN:   MV_STATUS
+*       MV_OK       - Success
+*       MV_FAIL     - Failed.
+*
+*******************************************************************************/
+MV_STATUS   mvEthUdpRxQueue(void* pPortHandle, int udpQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    MV_U32          portCfgReg;
+
+    if(udpQueue >= MV_ETH_RX_Q_NUM)
+    {
+        mvOsPrintf("ethDrv: RX queue #%d is out of range\n", udpQueue);
+        return MV_BAD_PARAM;
+    }
+
+    portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(pPortCtrl->portNo));
+
+    if(udpQueue >= 0)
+    {
+        pPortCtrl->portConfig.rxUdpQ = udpQueue;
+        portCfgReg &= ~ETH_DEF_RX_UDP_QUEUE_ALL_MASK;
+        portCfgReg |= ETH_DEF_RX_UDP_QUEUE_MASK(pPortCtrl->portConfig.rxUdpQ);
+
+        portCfgReg |= ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK;
+    }
+    else
+    {
+        pPortCtrl->portConfig.rxUdpQ = -1;
+        portCfgReg &= ~ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK;
+    }
+
+    MV_REG_WRITE(ETH_PORT_CONFIG_REG(pPortCtrl->portNo), portCfgReg);
+
+    return MV_OK;
+}
+
+
+/******************************************************************************/
+/*                          Speed, Duplex, FlowControl routines               */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvEthSpeedDuplexSet - Set Speed and Duplex of the port.
+*
+* DESCRIPTION:
+*       This function configure the port to work with desirable Duplex and Speed.
+*       Changing of these parameters are allowed only when port is disabled.
+*       This function disable the port if was enabled, change duplex and speed
+*       and, enable the port back if needed.
+*
+* INPUT:
+*       void*           pPortHandle - Pointer to port specific handler;
+*       ETH_PORT_SPEED  speed       - Speed of the port.
+*       ETH_PORT_SPEED  duplex      - Duplex of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_NOT_FOUND    - Failed. Port is not initialized.
+*       MV_BAD_PARAM    - Input parameters (speed/duplex) in conflict.
+*       MV_BAD_VALUE    - Value of one of input parameters (speed, duplex)
+*                       is not valid
+*
+*******************************************************************************/
+MV_STATUS   mvEthSpeedDuplexSet(void* pPortHandle, MV_ETH_PORT_SPEED speed,
+                                MV_ETH_PORT_DUPLEX duplex)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+    MV_U32      portSerialCtrlReg;
+
+    if( (port < 0) || (port >= (int)mvCtrlEthMaxPortGet()) )
+        return MV_OUT_OF_RANGE;
+
+    pPortCtrl = ethPortCtrl[port];
+    if(pPortCtrl == NULL)
+        return MV_NOT_FOUND;
+
+    /* Check validity */
+    if( (speed == MV_ETH_SPEED_1000) && (duplex == MV_ETH_DUPLEX_HALF) )
+        return MV_BAD_PARAM;
+
+    portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+    /* Set Speed */
+    switch(speed)
+    {
+        case MV_ETH_SPEED_AN:
+            portSerialCtrlReg &= ~ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+            break;
+
+        case MV_ETH_SPEED_10:
+            portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+            portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+            portSerialCtrlReg &= ~ETH_SET_MII_SPEED_100_MASK;
+            break;
+
+        case MV_ETH_SPEED_100:
+            portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+            portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+            portSerialCtrlReg |= ETH_SET_MII_SPEED_100_MASK;
+            break;
+
+        case MV_ETH_SPEED_1000:
+            portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+            portSerialCtrlReg |= ETH_SET_GMII_SPEED_1000_MASK;
+            break;
+
+        default:
+            mvOsPrintf("ethDrv: Unexpected Speed value %d\n", speed);
+            return MV_BAD_VALUE;
+    }
+    /* Set duplex */
+    switch(duplex)
+    {
+        case MV_ETH_DUPLEX_AN:
+            portSerialCtrlReg &= ~ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+            break;
+
+        case MV_ETH_DUPLEX_HALF:
+            portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+            portSerialCtrlReg &= ~ETH_SET_FULL_DUPLEX_MASK;
+            break;
+
+        case MV_ETH_DUPLEX_FULL:
+            portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+            portSerialCtrlReg |= ETH_SET_FULL_DUPLEX_MASK;
+            break;
+
+        default:
+            mvOsPrintf("ethDrv: Unexpected Duplex value %d\n", duplex);
+            return MV_BAD_VALUE;
+    }
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), portSerialCtrlReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthFlowCtrlSet - Set Flow Control of the port.
+*
+* DESCRIPTION:
+*       This function configure the port to work with desirable Duplex and
+*       Speed. Changing of these parameters are allowed only when port is
+*       disabled. This function disable the port if was enabled, change
+*       duplex and speed and, enable the port back if needed.
+*
+* INPUT:
+*       void*           pPortHandle - Pointer to port specific handler;
+*       MV_ETH_PORT_FC  flowControl - Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_NOT_FOUND    - Failed. Port is not initialized.
+*       MV_BAD_VALUE    - Value flowControl parameters is not valid
+*
+*******************************************************************************/
+MV_STATUS   mvEthFlowCtrlSet(void* pPortHandle, MV_ETH_PORT_FC flowControl)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+    MV_U32      portSerialCtrlReg;
+
+    if( (port < 0) || (port >= (int)mvCtrlEthMaxPortGet() ) )
+        return MV_OUT_OF_RANGE;
+
+    pPortCtrl = ethPortCtrl[port];
+    if(pPortCtrl == NULL)
+        return MV_NOT_FOUND;
+
+    portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+    switch(flowControl)
+    {
+        case MV_ETH_FC_AN_ADV_DIS:
+            portSerialCtrlReg &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+            portSerialCtrlReg &= ~ETH_ADVERTISE_SYM_FC_MASK;
+            break;
+
+        case MV_ETH_FC_AN_ADV_SYM:
+            portSerialCtrlReg &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+            portSerialCtrlReg |= ETH_ADVERTISE_SYM_FC_MASK;
+            break;
+
+        case MV_ETH_FC_DISABLE:
+            portSerialCtrlReg |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+            portSerialCtrlReg &= ~ETH_SET_FLOW_CTRL_MASK;
+            break;
+
+        case MV_ETH_FC_ENABLE:
+            portSerialCtrlReg |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+            portSerialCtrlReg |= ETH_SET_FLOW_CTRL_MASK;
+            break;
+
+        default:
+            mvOsPrintf("ethDrv: Unexpected FlowControl value %d\n", flowControl);
+            return MV_BAD_VALUE;
+    }
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), portSerialCtrlReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthHeaderModeSet - Set port header mode.
+*
+* DESCRIPTION:
+*       This function configures the port to work in Marvell-Header mode.
+*
+* INPUT:
+*       void*           pPortHandle - Pointer to port specific handler;
+*       MV_ETH_HEADER_MODE headerMode - The header mode to set the port in.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_NOT_SUPPORTED- Feature not supported.
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_NOT_FOUND    - Failed. Port is not initialized.
+*       MV_BAD_VALUE    - Value of headerMode or numRxQueue parameter is not valid.
+*
+*******************************************************************************/
+MV_STATUS mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+    MV_U32			mvHeaderReg;
+    MV_U32          numRxQ = MV_ETH_RX_Q_NUM;
+
+    if((port < 0) || (port >= mvCtrlEthMaxPortGet()))
+        return MV_OUT_OF_RANGE;
+
+    pPortCtrl = ethPortCtrl[port];
+    if(pPortCtrl == NULL)
+        return MV_NOT_FOUND;
+
+    mvHeaderReg = MV_REG_READ(ETH_PORT_MARVELL_HEADER_REG(port));
+    /* Disable header mode.             */
+    mvHeaderReg &= ~ETH_MVHDR_EN_MASK;
+
+    if(headerMode != MV_ETH_DISABLE_HEADER_MODE)
+    {
+        /* Enable Header mode.              */
+        mvHeaderReg |= ETH_MVHDR_EN_MASK;
+
+        /* Clear DA-Prefix  & MHMask fields.*/
+        mvHeaderReg &= ~(ETH_MVHDR_DAPREFIX_MASK | ETH_MVHDR_MHMASK_MASK);
+
+        if(numRxQ > 1)
+        {
+            switch (headerMode)
+            {
+                case(MV_ETH_ENABLE_HEADER_MODE_PRI_2_1):
+                    mvHeaderReg |= ETH_MVHDR_DAPREFIX_PRI_1_2;
+                    break;
+                case(MV_ETH_ENABLE_HEADER_MODE_PRI_DBNUM):
+                    mvHeaderReg |= ETH_MVHDR_DAPREFIX_DBNUM_PRI;
+                    break;
+                case(MV_ETH_ENABLE_HEADER_MODE_PRI_SPID):
+                    mvHeaderReg |= ETH_MVHDR_DAPREFIX_SPID_PRI;
+                    break;
+                default:
+                    break;
+            }
+
+            switch (numRxQ)
+            {
+                case (4):
+                    mvHeaderReg |= ETH_MVHDR_MHMASK_4_QUEUE;
+                    break;
+                case (8):
+                    mvHeaderReg |= ETH_MVHDR_MHMASK_8_QUEUE;
+                    break;
+                default:
+                    break;
+            }
+        }
+    }
+
+    MV_REG_WRITE(ETH_PORT_MARVELL_HEADER_REG(port), mvHeaderReg);
+
+    return MV_OK;
+}
+
+#if (MV_ETH_VERSION >= 4)
+/*******************************************************************************
+* mvEthEjpModeSet - Enable / Disable EJP policy for TX.
+*
+* DESCRIPTION:
+*       This function
+*
+* INPUT:
+*       void*           pPortHandle - Pointer to port specific handler;
+*       MV_BOOL         TRUE - enable EJP mode
+*                       FALSE - disable EJP mode
+*
+* OUTPUT:   MV_STATUS
+*       MV_OK           - Success
+*       Other           - Failure
+*
+* RETURN:   None.
+*
+*******************************************************************************/
+MV_STATUS    mvEthEjpModeSet(void* pPortHandle, int mode)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+
+    if((port < 0) || (port >= mvCtrlEthMaxPortGet()))
+        return MV_OUT_OF_RANGE;
+
+    pPortCtrl = ethPortCtrl[port];
+    if(pPortCtrl == NULL)
+        return MV_NOT_FOUND;
+
+    pPortCtrl->portConfig.ejpMode = mode;
+    if(mode)
+    {
+        /* EJP enabled */
+        MV_REG_WRITE(ETH_TXQ_CMD_1_REG(port), ETH_TX_EJP_ENABLE_MASK);
+    }
+    else
+    {
+        /* EJP disabled */
+        MV_REG_WRITE(ETH_TXQ_CMD_1_REG(port), 0);
+    }
+    mvOsPrintf("eth_%d: EJP %s - ETH_TXQ_CMD_1_REG: 0x%x = 0x%08x\n",
+        port, mode ? "Enabled" : "Disabled", ETH_TXQ_CMD_1_REG(port),
+                    MV_REG_READ(ETH_TXQ_CMD_1_REG(port)));
+
+    return MV_OK;
+}
+#endif /* MV_ETH_VERSION >= 4 */
+
+/*******************************************************************************
+* mvEthStatusGet - Get major properties of the port .
+*
+* DESCRIPTION:
+*       This function get major properties of the port (link, speed, duplex,
+*       flowControl, etc) and return them using the single structure.
+*
+* INPUT:
+*       void*           pPortHandle - Pointer to port specific handler;
+*
+* OUTPUT:
+*       MV_ETH_PORT_STATUS* pStatus - Pointer to structure, were port status
+*                                   will be placed.
+*
+* RETURN:   None.
+*
+*******************************************************************************/
+void    mvEthStatusGet(void* pPortHandle, MV_ETH_PORT_STATUS* pStatus)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+
+    MV_U32  regValue;
+
+    regValue = MV_REG_READ( ETH_PORT_STATUS_REG(port) );
+
+    if(regValue & ETH_GMII_SPEED_1000_MASK)
+        pStatus->speed = MV_ETH_SPEED_1000;
+    else if(regValue & ETH_MII_SPEED_100_MASK)
+        pStatus->speed = MV_ETH_SPEED_100;
+    else
+        pStatus->speed = MV_ETH_SPEED_10;
+
+    if(regValue & ETH_LINK_UP_MASK)
+        pStatus->isLinkUp = MV_TRUE;
+    else
+        pStatus->isLinkUp = MV_FALSE;
+
+    if(regValue & ETH_FULL_DUPLEX_MASK)
+        pStatus->duplex = MV_ETH_DUPLEX_FULL;
+    else
+        pStatus->duplex = MV_ETH_DUPLEX_HALF;
+
+
+    if(regValue & ETH_ENABLE_RCV_FLOW_CTRL_MASK)
+        pStatus->flowControl = MV_ETH_FC_ENABLE;
+    else
+        pStatus->flowControl = MV_ETH_FC_DISABLE;
+}
+
+
+/******************************************************************************/
+/*                         PHY Control Functions                              */
+/******************************************************************************/
+
+
+/*******************************************************************************
+* mvEthPhyAddrSet - Set the ethernet port PHY address.
+*
+* DESCRIPTION:
+*       This routine set the ethernet port PHY address according to given
+*       parameter.
+*
+* INPUT:
+*       void*   pPortHandle     - Pointer to port specific handler;
+*       int     phyAddr         - PHY address
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+void    mvEthPhyAddrSet(void* pPortHandle, int phyAddr)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+    unsigned int    regData;
+
+    regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+    regData &= ~ETH_PHY_ADDR_MASK;
+    regData |=  phyAddr;
+
+    MV_REG_WRITE(ETH_PHY_ADDR_REG(port), regData);
+
+    return;
+}
+
+/*******************************************************************************
+* mvEthPhyAddrGet - Get the ethernet port PHY address.
+*
+* DESCRIPTION:
+*       This routine returns the given ethernet port PHY address.
+*
+* INPUT:
+*       void*   pPortHandle - Pointer to port specific handler;
+*
+*
+* RETURN: int - PHY address.
+*
+*******************************************************************************/
+int     mvEthPhyAddrGet(void* pPortHandle)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHandle;
+    int             port = pPortCtrl->portNo;
+    unsigned int    regData;
+
+    regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+    return ((regData >> (5 * port)) & 0x1f);
+}
+
+/******************************************************************************/
+/*                Descriptor handling Functions                               */
+/******************************************************************************/
+
+/*******************************************************************************
+* etherInitRxDescRing - Curve a Rx chain desc list and buffer in memory.
+*
+* DESCRIPTION:
+*       This function prepares a Rx chained list of descriptors and packet
+*       buffers in a form of a ring. The routine must be called after port
+*       initialization routine and before port start routine.
+*       The Ethernet SDMA engine uses CPU bus addresses to access the various
+*       devices in the system (i.e. DRAM). This function uses the ethernet
+*       struct 'virtual to physical' routine (set by the user) to set the ring
+*       with physical addresses.
+*
+* INPUT:
+*       ETH_QUEUE_CTRL  *pEthPortCtrl   Ethernet Port Control srtuct.
+*       int             rxQueue         Number of Rx queue.
+*       int             rxDescNum       Number of Rx descriptors
+*       MV_U8*          rxDescBaseAddr  Rx descriptors memory area base addr.
+*
+* OUTPUT:
+*       The routine updates the Ethernet port control struct with information
+*       regarding the Rx descriptors and buffers.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void ethInitRxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue)
+{
+    ETH_RX_DESC     *pRxDescBase, *pRxDesc, *pRxPrevDesc;
+    int             ix, rxDescNum = pPortCtrl->rxQueueConfig[queue].descrNum;
+    ETH_QUEUE_CTRL  *pQueueCtrl = &pPortCtrl->rxQueue[queue];
+
+    /* Make sure descriptor address is cache line size aligned  */
+    pRxDescBase = (ETH_RX_DESC*)MV_ALIGN_UP((MV_ULONG)pQueueCtrl->descBuf.bufVirtPtr,
+                                     CPU_D_CACHE_LINE_SIZE);
+
+    pRxDesc      = (ETH_RX_DESC*)pRxDescBase;
+    pRxPrevDesc  = pRxDesc;
+
+    /* initialize the Rx descriptors ring */
+    for (ix=0; ix<rxDescNum; ix++)
+    {
+        pRxDesc->bufSize     = 0x0;
+        pRxDesc->byteCnt     = 0x0;
+        pRxDesc->cmdSts      = ETH_BUFFER_OWNED_BY_HOST;
+        pRxDesc->bufPtr      = 0x0;
+        pRxDesc->returnInfo  = 0x0;
+        pRxPrevDesc = pRxDesc;
+        if(ix == (rxDescNum-1))
+        {
+            /* Closing Rx descriptors ring */
+            pRxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pRxDescBase);
+        }
+        else
+        {
+            pRxDesc = (ETH_RX_DESC*)((MV_ULONG)pRxDesc + ETH_RX_DESC_ALIGNED_SIZE);
+            pRxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pRxDesc);
+        }
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pRxPrevDesc);
+    }
+
+    pQueueCtrl->pCurrentDescr = pRxDescBase;
+    pQueueCtrl->pUsedDescr = pRxDescBase;
+
+    pQueueCtrl->pFirstDescr = pRxDescBase;
+    pQueueCtrl->pLastDescr = pRxDesc;
+    pQueueCtrl->resource = 0;
+}
+
+void ethResetRxDescRing(void* pPortHndl, int queue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->rxQueue[queue];
+    ETH_RX_DESC*    pRxDesc = (ETH_RX_DESC*)pQueueCtrl->pFirstDescr;
+
+    pQueueCtrl->resource = 0;
+    if(pQueueCtrl->pFirstDescr != NULL)
+    {
+        while(MV_TRUE)
+        {
+            pRxDesc->bufSize     = 0x0;
+            pRxDesc->byteCnt     = 0x0;
+            pRxDesc->cmdSts      = ETH_BUFFER_OWNED_BY_HOST;
+            pRxDesc->bufPtr      = 0x0;
+            pRxDesc->returnInfo  = 0x0;
+            ETH_DESCR_FLUSH_INV(pPortCtrl, pRxDesc);
+            if( (void*)pRxDesc == pQueueCtrl->pLastDescr)
+                    break;
+            pRxDesc = RX_NEXT_DESC_PTR(pRxDesc, pQueueCtrl);
+        }
+        pQueueCtrl->pCurrentDescr = pQueueCtrl->pFirstDescr;
+        pQueueCtrl->pUsedDescr = pQueueCtrl->pFirstDescr;
+
+        /* Update RX Command register */
+        pPortCtrl->portRxQueueCmdReg |= (1 << queue);
+
+        /* update HW */
+        MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue),
+                 (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+    }
+    else
+    {
+        /* Update RX Command register */
+        pPortCtrl->portRxQueueCmdReg &= ~(1 << queue);
+
+        /* update HW */
+        MV_REG_WRITE( ETH_RX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue), 0);
+    }
+}
+
+/*******************************************************************************
+* etherInitTxDescRing - Curve a Tx chain desc list and buffer in memory.
+*
+* DESCRIPTION:
+*       This function prepares a Tx chained list of descriptors and packet
+*       buffers in a form of a ring. The routine must be called after port
+*       initialization routine and before port start routine.
+*       The Ethernet SDMA engine uses CPU bus addresses to access the various
+*       devices in the system (i.e. DRAM). This function uses the ethernet
+*       struct 'virtual to physical' routine (set by the user) to set the ring
+*       with physical addresses.
+*
+* INPUT:
+*       ETH_PORT_CTRL   *pEthPortCtrl   Ethernet Port Control srtuct.
+*       int             txQueue         Number of Tx queue.
+*       int             txDescNum       Number of Tx descriptors
+*       int             txBuffSize      Size of Tx buffer
+*       MV_U8*          pTxDescBase     Tx descriptors memory area base addr.
+*
+* OUTPUT:
+*       The routine updates the Ethernet port control struct with information
+*       regarding the Tx descriptors and buffers.
+*
+* RETURN:   None.
+*
+*******************************************************************************/
+static void ethInitTxDescRing(ETH_PORT_CTRL* pPortCtrl, int queue)
+{
+    ETH_TX_DESC     *pTxDescBase, *pTxDesc, *pTxPrevDesc;
+    int             ix, txDescNum = pPortCtrl->txQueueConfig[queue].descrNum;
+    ETH_QUEUE_CTRL  *pQueueCtrl = &pPortCtrl->txQueue[queue];
+
+    /* Make sure descriptor address is cache line size aligned  */
+    pTxDescBase = (ETH_TX_DESC*)MV_ALIGN_UP((MV_ULONG)pQueueCtrl->descBuf.bufVirtPtr,
+                                     CPU_D_CACHE_LINE_SIZE);
+
+    pTxDesc      = (ETH_TX_DESC*)pTxDescBase;
+    pTxPrevDesc  = pTxDesc;
+
+    /* initialize the Tx descriptors ring */
+    for (ix=0; ix<txDescNum; ix++)
+    {
+        pTxDesc->byteCnt     = 0x0000;
+        pTxDesc->L4iChk      = 0x0000;
+        pTxDesc->cmdSts      = ETH_BUFFER_OWNED_BY_HOST;
+        pTxDesc->bufPtr      = 0x0;
+        pTxDesc->returnInfo  = 0x0;
+
+        pTxPrevDesc = pTxDesc;
+
+        if(ix == (txDescNum-1))
+        {
+            /* Closing Tx descriptors ring */
+            pTxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pTxDescBase);
+        }
+        else
+        {
+            pTxDesc = (ETH_TX_DESC*)((MV_ULONG)pTxDesc + ETH_TX_DESC_ALIGNED_SIZE);
+            pTxPrevDesc->nextDescPtr = (MV_U32)ethDescVirtToPhy(pQueueCtrl, (void*)pTxDesc);
+        }
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pTxPrevDesc);
+    }
+
+    pQueueCtrl->pCurrentDescr = pTxDescBase;
+    pQueueCtrl->pUsedDescr = pTxDescBase;
+
+    pQueueCtrl->pFirstDescr = pTxDescBase;
+    pQueueCtrl->pLastDescr = pTxDesc;
+    /* Leave one TX descriptor out of use */
+    pQueueCtrl->resource = txDescNum - 1;
+}
+
+void ethResetTxDescRing(void* pPortHndl, int queue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->txQueue[queue];
+    ETH_TX_DESC*    pTxDesc = (ETH_TX_DESC*)pQueueCtrl->pFirstDescr;
+
+    pQueueCtrl->resource = 0;
+    if(pQueueCtrl->pFirstDescr != NULL)
+    {
+        while(MV_TRUE)
+        {
+            pTxDesc->byteCnt     = 0x0000;
+            pTxDesc->L4iChk      = 0x0000;
+            pTxDesc->cmdSts      = ETH_BUFFER_OWNED_BY_HOST;
+            pTxDesc->bufPtr      = 0x0;
+            pTxDesc->returnInfo  = 0x0;
+            ETH_DESCR_FLUSH_INV(pPortCtrl, pTxDesc);
+            pQueueCtrl->resource++;
+            if( (void*)pTxDesc == pQueueCtrl->pLastDescr)
+                    break;
+            pTxDesc = TX_NEXT_DESC_PTR(pTxDesc, pQueueCtrl);
+        }
+        /* Leave one TX descriptor out of use */
+        pQueueCtrl->resource--;
+        pQueueCtrl->pCurrentDescr = pQueueCtrl->pFirstDescr;
+        pQueueCtrl->pUsedDescr = pQueueCtrl->pFirstDescr;
+
+        /* Update TX Command register */
+        pPortCtrl->portTxQueueCmdReg |= MV_32BIT_LE_FAST(1 << queue);
+        /* update HW */
+        MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue),
+        (MV_U32)ethDescVirtToPhy(pQueueCtrl, pQueueCtrl->pCurrentDescr) );
+    }
+    else
+    {
+        /* Update TX Command register */
+        pPortCtrl->portTxQueueCmdReg &=  MV_32BIT_LE_FAST(~(1 << queue));
+        /* update HW */
+        MV_REG_WRITE( ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue), 0 );
+    }
+}
+
+/*******************************************************************************
+* ethAllocDescrMemory - Free memory allocated for RX and TX descriptors.
+*
+* DESCRIPTION:
+*       This function allocates memory for RX and TX descriptors.
+*       - If ETH_DESCR_IN_SRAM defined, allocate memory from SRAM.
+*       - If ETH_DESCR_IN_SDRAM defined, allocate memory in SDRAM.
+*
+* INPUT:
+*       int size - size of memory should be allocated.
+*
+* RETURN: None
+*
+*******************************************************************************/
+static MV_U8*  ethAllocDescrMemory(ETH_PORT_CTRL* pPortCtrl, int descSize,
+                            MV_ULONG* pPhysAddr, MV_U32 *memHandle)
+{
+    MV_U8*  pVirt;
+
+#if defined(ETH_DESCR_IN_SRAM)
+    if(ethDescInSram == MV_TRUE)
+        pVirt = (char*)mvSramMalloc(descSize, pPhysAddr);
+    else
+#endif /* ETH_DESCR_IN_SRAM */
+    {
+#ifdef ETH_DESCR_UNCACHED
+        pVirt = (MV_U8*)mvOsIoUncachedMalloc(pPortCtrl->osHandle, descSize,
+					    pPhysAddr,memHandle);
+#else
+        pVirt = (MV_U8*)mvOsIoCachedMalloc(pPortCtrl->osHandle, descSize,
+					  pPhysAddr, memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+    }
+    memset(pVirt, 0, descSize);
+
+    return pVirt;
+}
+
+/*******************************************************************************
+* ethFreeDescrMemory - Free memory allocated for RX and TX descriptors.
+*
+* DESCRIPTION:
+*       This function frees memory allocated for RX and TX descriptors.
+*       - If ETH_DESCR_IN_SRAM defined, free memory using gtSramFree() function.
+*       - If ETH_DESCR_IN_SDRAM defined, free memory using mvOsFree() function.
+*
+* INPUT:
+*       void* pVirtAddr - virtual pointer to memory allocated for RX and TX
+*                       desriptors.
+*
+* RETURN: None
+*
+*******************************************************************************/
+void    ethFreeDescrMemory(ETH_PORT_CTRL* pPortCtrl, MV_BUF_INFO* pDescBuf)
+{
+    if( (pDescBuf == NULL) || (pDescBuf->bufVirtPtr == NULL) )
+        return;
+
+#if defined(ETH_DESCR_IN_SRAM)
+    if( ethDescInSram )
+    {
+        mvSramFree(pDescBuf->bufSize, pDescBuf->bufPhysAddr, pDescBuf->bufVirtPtr);
+        return;
+    }
+#endif /* ETH_DESCR_IN_SRAM */
+
+#ifdef ETH_DESCR_UNCACHED
+    mvOsIoUncachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+                     pDescBuf->bufVirtPtr,pDescBuf->memHandle);
+#else
+    mvOsIoCachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+                     pDescBuf->bufVirtPtr,pDescBuf->memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+}
+
+/******************************************************************************/
+/*                Other Functions                                         */
+/******************************************************************************/
+
+void mvEthPortPowerUp(int port)
+{
+    MV_U32  regVal;
+
+    /* MAC Cause register should be cleared */
+    MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+	if (mvBoardIsPortInSgmii(port))
+    mvEthPortSgmiiConfig(port);
+
+    /* Cancel Port Reset */
+    regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+    regVal &= (~ETH_PORT_RESET_MASK);
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+    while( (MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port)) & ETH_PORT_RESET_MASK) != 0);
+}
+
+void mvEthPortPowerDown(int port)
+{
+    MV_U32  regVal;
+
+    /* Port must be DISABLED */
+    regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+    if( (regVal & ETH_PORT_ENABLE_MASK) != 0)
+    {
+        mvOsPrintf("ethPort #%d: PowerDown - port must be Disabled (PSC=0x%x)\n",
+                    port, regVal);
+        return;
+    }
+
+    /* Port Reset (Read after write the register as a precaution) */
+    regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal | ETH_PORT_RESET_MASK);
+    while((MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port)) & ETH_PORT_RESET_MASK) == 0);
+}
+
+static void mvEthPortSgmiiConfig(int port)
+{
+    MV_U32  regVal;
+
+    regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+
+    regVal |= (ETH_SGMII_MODE_MASK /*| ETH_INBAND_AUTO_NEG_ENABLE_MASK */);
+    regVal &= (~ETH_INBAND_AUTO_NEG_BYPASS_MASK);
+
+    MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c
new file mode 100644
index 000000000000..62edcb57d10d
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.c
@@ -0,0 +1,748 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEthDebug.c - Source file for user friendly debug functions
+*
+* DESCRIPTION:
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "eth-phy/mvEthPhy.h"
+#include "eth/mvEth.h"
+#include "eth/gbe/mvEthDebug.h"
+
+/* #define mvOsPrintf printf */
+
+void    mvEthPortShow(void* pHndl);
+void    mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode);
+
+/******************************************************************************/
+/*                          Debug functions                                   */
+/******************************************************************************/
+void    ethRxCoal(int port, int usec)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthRxCoalSet(pHndl, usec);
+    }
+}
+
+void    ethTxCoal(int port, int usec)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthTxCoalSet(pHndl, usec);
+    }
+}
+
+#if (MV_ETH_VERSION >= 4)
+void     ethEjpModeSet(int port, int mode)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthEjpModeSet(pHndl, mode);
+    }
+}
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void    ethBpduRxQ(int port, int bpduQueue)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthBpduRxQueue(pHndl, bpduQueue);
+    }
+}
+
+void    ethArpRxQ(int port, int arpQueue)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthArpRxQueue(pHndl, arpQueue);
+    }
+}
+
+void    ethTcpRxQ(int port, int tcpQueue)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthTcpRxQueue(pHndl, tcpQueue);
+    }
+}
+
+void    ethUdpRxQ(int port, int udpQueue)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthUdpRxQueue(pHndl, udpQueue);
+    }
+}
+
+void    ethTxPolicyRegs(int port)
+{
+    int             queue;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)mvEthPortHndlGet(port);
+
+    if(pPortCtrl == NULL)
+    {
+        return;
+    }
+    mvOsPrintf("Port #%d TX Policy: EJP=%d, TXQs: ",
+                port, pPortCtrl->portConfig.ejpMode);
+    for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+    {
+        if(pPortCtrl->txQueueConfig[queue].descrNum > 0)
+            mvOsPrintf("%d, ", queue);
+    }
+    mvOsPrintf("\n");
+
+    mvOsPrintf("\n\t TX policy Port #%d configuration registers\n", port);
+
+    mvOsPrintf("ETH_TX_QUEUE_COMMAND_REG            : 0x%X = 0x%08x\n",
+                ETH_TX_QUEUE_COMMAND_REG(port),
+                MV_REG_READ( ETH_TX_QUEUE_COMMAND_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_FIXED_PRIO_CFG_REG           : 0x%X = 0x%08x\n",
+                ETH_TX_FIXED_PRIO_CFG_REG(port),
+                MV_REG_READ( ETH_TX_FIXED_PRIO_CFG_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_TOKEN_RATE_CFG_REG           : 0x%X = 0x%08x\n",
+                ETH_TX_TOKEN_RATE_CFG_REG(port),
+                MV_REG_READ( ETH_TX_TOKEN_RATE_CFG_REG(port) ) );
+
+    mvOsPrintf("ETH_MAX_TRANSMIT_UNIT_REG           : 0x%X = 0x%08x\n",
+                ETH_MAX_TRANSMIT_UNIT_REG(port),
+                MV_REG_READ( ETH_MAX_TRANSMIT_UNIT_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_TOKEN_BUCKET_SIZE_REG        : 0x%X = 0x%08x\n",
+                ETH_TX_TOKEN_BUCKET_SIZE_REG(port),
+                MV_REG_READ( ETH_TX_TOKEN_BUCKET_SIZE_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_TOKEN_BUCKET_COUNT_REG       : 0x%X = 0x%08x\n",
+                ETH_TX_TOKEN_BUCKET_COUNT_REG(port),
+                MV_REG_READ( ETH_TX_TOKEN_BUCKET_COUNT_REG(port) ) );
+
+    for(queue=0; queue<MV_ETH_MAX_TXQ; queue++)
+    {
+        mvOsPrintf("\n\t TX policy Port #%d, Queue #%d configuration registers\n", port, queue);
+
+        mvOsPrintf("ETH_TXQ_TOKEN_COUNT_REG             : 0x%X = 0x%08x\n",
+                ETH_TXQ_TOKEN_COUNT_REG(port, queue),
+                MV_REG_READ( ETH_TXQ_TOKEN_COUNT_REG(port, queue) ) );
+
+        mvOsPrintf("ETH_TXQ_TOKEN_CFG_REG               : 0x%X = 0x%08x\n",
+                ETH_TXQ_TOKEN_CFG_REG(port, queue),
+                MV_REG_READ( ETH_TXQ_TOKEN_CFG_REG(port, queue) ) );
+
+        mvOsPrintf("ETH_TXQ_ARBITER_CFG_REG             : 0x%X = 0x%08x\n",
+                ETH_TXQ_ARBITER_CFG_REG(port, queue),
+                MV_REG_READ( ETH_TXQ_ARBITER_CFG_REG(port, queue) ) );
+    }
+    mvOsPrintf("\n");
+}
+
+/* Print important registers of Ethernet port */
+void    ethPortRegs(int port)
+{
+    mvOsPrintf("\n\t ethGiga #%d port Registers:\n", port);
+
+    mvOsPrintf("ETH_PORT_STATUS_REG                 : 0x%X = 0x%08x\n",
+                ETH_PORT_STATUS_REG(port),
+                MV_REG_READ( ETH_PORT_STATUS_REG(port) ) );
+
+    mvOsPrintf("ETH_PORT_SERIAL_CTRL_REG            : 0x%X = 0x%08x\n",
+                ETH_PORT_SERIAL_CTRL_REG(port),
+                MV_REG_READ( ETH_PORT_SERIAL_CTRL_REG(port) ) );
+
+    mvOsPrintf("ETH_PORT_CONFIG_REG                 : 0x%X = 0x%08x\n",
+                ETH_PORT_CONFIG_REG(port),
+                MV_REG_READ( ETH_PORT_CONFIG_REG(port) ) );
+
+    mvOsPrintf("ETH_PORT_CONFIG_EXTEND_REG          : 0x%X = 0x%08x\n",
+                ETH_PORT_CONFIG_EXTEND_REG(port),
+                MV_REG_READ( ETH_PORT_CONFIG_EXTEND_REG(port) ) );
+
+    mvOsPrintf("ETH_SDMA_CONFIG_REG                 : 0x%X = 0x%08x\n",
+                ETH_SDMA_CONFIG_REG(port),
+                MV_REG_READ( ETH_SDMA_CONFIG_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_FIFO_URGENT_THRESH_REG       : 0x%X = 0x%08x\n",
+                ETH_TX_FIFO_URGENT_THRESH_REG(port),
+                MV_REG_READ( ETH_TX_FIFO_URGENT_THRESH_REG(port) ) );
+
+    mvOsPrintf("ETH_RX_QUEUE_COMMAND_REG            : 0x%X = 0x%08x\n",
+                ETH_RX_QUEUE_COMMAND_REG(port),
+                MV_REG_READ( ETH_RX_QUEUE_COMMAND_REG(port) ) );
+
+    mvOsPrintf("ETH_TX_QUEUE_COMMAND_REG            : 0x%X = 0x%08x\n",
+                ETH_TX_QUEUE_COMMAND_REG(port),
+                MV_REG_READ( ETH_TX_QUEUE_COMMAND_REG(port) ) );
+
+    mvOsPrintf("ETH_INTR_CAUSE_REG                  : 0x%X = 0x%08x\n",
+                ETH_INTR_CAUSE_REG(port),
+                MV_REG_READ( ETH_INTR_CAUSE_REG(port) ) );
+
+    mvOsPrintf("ETH_INTR_EXTEND_CAUSE_REG           : 0x%X = 0x%08x\n",
+                ETH_INTR_CAUSE_EXT_REG(port),
+                MV_REG_READ( ETH_INTR_CAUSE_EXT_REG(port) ) );
+
+    mvOsPrintf("ETH_INTR_MASK_REG                   : 0x%X = 0x%08x\n",
+                ETH_INTR_MASK_REG(port),
+                MV_REG_READ( ETH_INTR_MASK_REG(port) ) );
+
+    mvOsPrintf("ETH_INTR_EXTEND_MASK_REG            : 0x%X = 0x%08x\n",
+                ETH_INTR_MASK_EXT_REG(port),
+                MV_REG_READ( ETH_INTR_MASK_EXT_REG(port) ) );
+
+    mvOsPrintf("ETH_RX_DESCR_STAT_CMD_REG           : 0x%X = 0x%08x\n",
+                ETH_RX_DESCR_STAT_CMD_REG(port, 0),
+                MV_REG_READ( ETH_RX_DESCR_STAT_CMD_REG(port, 0) ) );
+
+    mvOsPrintf("ETH_RX_BYTE_COUNT_REG               : 0x%X = 0x%08x\n",
+                ETH_RX_BYTE_COUNT_REG(port, 0),
+                MV_REG_READ( ETH_RX_BYTE_COUNT_REG(port, 0) ) );
+
+    mvOsPrintf("ETH_RX_BUF_PTR_REG                  : 0x%X = 0x%08x\n",
+                ETH_RX_BUF_PTR_REG(port, 0),
+                MV_REG_READ( ETH_RX_BUF_PTR_REG(port, 0) ) );
+
+    mvOsPrintf("ETH_RX_CUR_DESC_PTR_REG             : 0x%X = 0x%08x\n",
+                ETH_RX_CUR_DESC_PTR_REG(port, 0),
+                MV_REG_READ( ETH_RX_CUR_DESC_PTR_REG(port, 0) ) );
+}
+
+
+/* Print Giga Ethernet UNIT registers */
+void    ethRegs(int port)
+{
+    mvOsPrintf("ETH_PHY_ADDR_REG               : 0x%X = 0x%08x\n",
+                ETH_PHY_ADDR_REG(port),
+                MV_REG_READ(ETH_PHY_ADDR_REG(port)) );
+
+    mvOsPrintf("ETH_UNIT_INTR_CAUSE_REG        : 0x%X = 0x%08x\n",
+                ETH_UNIT_INTR_CAUSE_REG(port),
+                MV_REG_READ( ETH_UNIT_INTR_CAUSE_REG(port)) );
+
+    mvOsPrintf("ETH_UNIT_INTR_MASK_REG         : 0x%X = 0x%08x\n",
+                ETH_UNIT_INTR_MASK_REG(port),
+                MV_REG_READ( ETH_UNIT_INTR_MASK_REG(port)) );
+
+    mvOsPrintf("ETH_UNIT_ERROR_ADDR_REG        : 0x%X = 0x%08x\n",
+                ETH_UNIT_ERROR_ADDR_REG(port),
+                MV_REG_READ(ETH_UNIT_ERROR_ADDR_REG(port)) );
+
+    mvOsPrintf("ETH_UNIT_INT_ADDR_ERROR_REG    : 0x%X = 0x%08x\n",
+                ETH_UNIT_INT_ADDR_ERROR_REG(port),
+                MV_REG_READ(ETH_UNIT_INT_ADDR_ERROR_REG(port)) );
+
+}
+
+/******************************************************************************/
+/*                      MIB Counters functions                                */
+/******************************************************************************/
+
+/*******************************************************************************
+* ethClearMibCounters - Clear all MIB counters
+*
+* DESCRIPTION:
+*       This function clears all MIB counters of a specific ethernet port.
+*       A read from the MIB counter will reset the counter.
+*
+* INPUT:
+*       int    port -  Ethernet Port number.
+*
+* RETURN: None
+*
+*******************************************************************************/
+void ethClearCounters(int port)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+        mvEthMibCountersClear(pHndl);
+
+    return;
+}
+
+
+/* Print counters of the Ethernet port */
+void    ethPortCounters(int port)
+{
+    MV_U32  regValue, regValHigh;
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl == NULL)
+        return;
+
+    mvOsPrintf("\n\t Port #%d MIB Counters\n\n", port);
+
+    mvOsPrintf("GoodFramesReceived          = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FRAMES_RECEIVED, NULL));
+    mvOsPrintf("BadFramesReceived           = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_BAD_FRAMES_RECEIVED, NULL));
+    mvOsPrintf("BroadcastFramesReceived     = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_BROADCAST_FRAMES_RECEIVED, NULL));
+    mvOsPrintf("MulticastFramesReceived     = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_MULTICAST_FRAMES_RECEIVED, NULL));
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW,
+                                 &regValHigh);
+    mvOsPrintf("GoodOctetsReceived          = 0x%08x%08x\n",
+               regValHigh, regValue);
+
+    mvOsPrintf("\n");
+    mvOsPrintf("GoodFramesSent              = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FRAMES_SENT, NULL));
+    mvOsPrintf("BroadcastFramesSent         = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_BROADCAST_FRAMES_SENT, NULL));
+    mvOsPrintf("MulticastFramesSent         = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_MULTICAST_FRAMES_SENT, NULL));
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_OCTETS_SENT_LOW,
+                                 &regValHigh);
+    mvOsPrintf("GoodOctetsSent              = 0x%08x%08x\n", regValHigh, regValue);
+
+
+    mvOsPrintf("\n\t FC Control Counters\n");
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_UNREC_MAC_CONTROL_RECEIVED, NULL);
+    mvOsPrintf("UnrecogMacControlReceived   = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_GOOD_FC_RECEIVED, NULL);
+    mvOsPrintf("GoodFCFramesReceived        = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_FC_RECEIVED, NULL);
+    mvOsPrintf("BadFCFramesReceived         = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_FC_SENT, NULL);
+    mvOsPrintf("FCFramesSent                = %u\n", regValue);
+
+
+    mvOsPrintf("\n\t RX Errors\n");
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_OCTETS_RECEIVED, NULL);
+    mvOsPrintf("BadOctetsReceived           = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_UNDERSIZE_RECEIVED, NULL);
+    mvOsPrintf("UndersizeFramesReceived     = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_FRAGMENTS_RECEIVED, NULL);
+    mvOsPrintf("FragmentsReceived           = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_OVERSIZE_RECEIVED, NULL);
+    mvOsPrintf("OversizeFramesReceived      = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_JABBER_RECEIVED, NULL);
+    mvOsPrintf("JabbersReceived             = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_MAC_RECEIVE_ERROR, NULL);
+    mvOsPrintf("MacReceiveErrors            = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_BAD_CRC_EVENT, NULL);
+    mvOsPrintf("BadCrcReceived              = %u\n", regValue);
+
+    mvOsPrintf("\n\t TX Errors\n");
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR, NULL);
+    mvOsPrintf("TxMacErrors                 = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_EXCESSIVE_COLLISION, NULL);
+    mvOsPrintf("TxExcessiveCollisions       = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_COLLISION, NULL);
+    mvOsPrintf("TxCollisions                = %u\n", regValue);
+
+    regValue = mvEthMibCounterRead(pHndl, ETH_MIB_LATE_COLLISION, NULL);
+    mvOsPrintf("TxLateCollisions            = %u\n", regValue);
+
+
+    mvOsPrintf("\n");
+    regValue = MV_REG_READ( ETH_RX_DISCARD_PKTS_CNTR_REG(port));
+    mvOsPrintf("Rx Discard packets counter  = %u\n", regValue);
+
+    regValue = MV_REG_READ(ETH_RX_OVERRUN_PKTS_CNTR_REG(port));
+    mvOsPrintf("Rx Overrun packets counter  = %u\n", regValue);
+}
+
+/* Print RMON counters of the Ethernet port */
+void    ethPortRmonCounters(int port)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl == NULL)
+        return;
+
+    mvOsPrintf("\n\t Port #%d RMON MIB Counters\n\n", port);
+
+    mvOsPrintf("64 ByteFramesReceived           = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_64_OCTETS, NULL));
+    mvOsPrintf("65...127 ByteFramesReceived     = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_65_TO_127_OCTETS, NULL));
+    mvOsPrintf("128...255 ByteFramesReceived    = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_128_TO_255_OCTETS, NULL));
+    mvOsPrintf("256...511 ByteFramesReceived    = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_256_TO_511_OCTETS, NULL));
+    mvOsPrintf("512...1023 ByteFramesReceived   = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_512_TO_1023_OCTETS, NULL));
+    mvOsPrintf("1024...Max ByteFramesReceived   = %u\n",
+              mvEthMibCounterRead(pHndl, ETH_MIB_FRAMES_1024_TO_MAX_OCTETS, NULL));
+}
+
+/* Print port information */
+void    ethPortStatus(int port)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthPortShow(pHndl);
+    }
+}
+
+/* Print port queues information */
+void    ethPortQueues(int port, int rxQueue, int txQueue, int mode)
+{
+    void*   pHndl;
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvEthQueuesShow(pHndl, rxQueue, txQueue, mode);
+    }
+}
+
+void    ethUcastSet(int port, char* macStr, int queue)
+{
+    void*   pHndl;
+    MV_U8   macAddr[MV_MAC_ADDR_SIZE];
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvMacStrToHex(macStr, macAddr);
+        mvEthMacAddrSet(pHndl, macAddr, queue);
+    }
+}
+
+
+void    ethPortUcastShow(int port)
+{
+    MV_U32  unicastReg, macL, macH;
+    int     i, j;
+
+    macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(port));
+    macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(port));
+
+    mvOsPrintf("\n\t Port #%d Unicast MAC table: %02x:%02x:%02x:%02x:%02x:%02x\n\n",
+                port, ((macH >> 24) & 0xff), ((macH >> 16) & 0xff),
+                      ((macH >> 8) & 0xff), (macH  & 0xff),
+                      ((macL >> 8) & 0xff), (macL  & 0xff) );
+
+    for (i=0; i<4; i++)
+    {
+        unicastReg = MV_REG_READ( (ETH_DA_FILTER_UCAST_BASE(port) + i*4));
+        for(j=0; j<4; j++)
+        {
+            MV_U8   macEntry = (unicastReg >> (8*j)) & 0xFF;
+
+            mvOsPrintf("%X: %8s, Q = %d\n", i*4+j,
+                (macEntry & BIT0) ? "Accept" : "Reject", (macEntry >> 1) & 0x7);
+        }
+    }
+}
+
+void    ethMcastAdd(int port, char* macStr, int queue)
+{
+    void*   pHndl;
+    MV_U8   macAddr[MV_MAC_ADDR_SIZE];
+
+    pHndl = mvEthPortHndlGet(port);
+    if(pHndl != NULL)
+    {
+        mvMacStrToHex(macStr, macAddr);
+        mvEthMcastAddrSet(pHndl, macAddr, queue);
+    }
+}
+
+void    ethPortMcast(int port)
+{
+    int     tblIdx, regIdx;
+    MV_U32  regVal;
+
+    mvOsPrintf("\n\t Port #%d Special (IP) Multicast table: 01:00:5E:00:00:XX\n\n",
+                port);
+
+    for(tblIdx=0; tblIdx<(256/4); tblIdx++)
+    {
+        regVal = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(port) + tblIdx*4));
+        for(regIdx=0; regIdx<4; regIdx++)
+        {
+            if((regVal & (0x01 << (regIdx*8))) != 0)
+            {
+                mvOsPrintf("0x%02X: Accepted, rxQ = %d\n",
+                    tblIdx*4+regIdx, ((regVal >> (regIdx*8+1)) & 0x07));
+            }
+        }
+    }
+    mvOsPrintf("\n\t Port #%d Other Multicast table\n\n", port);
+    for(tblIdx=0; tblIdx<(256/4); tblIdx++)
+    {
+        regVal = MV_REG_READ((ETH_DA_FILTER_OTH_MCAST_BASE(port) + tblIdx*4));
+        for(regIdx=0; regIdx<4; regIdx++)
+        {
+            if((regVal & (0x01 << (regIdx*8))) != 0)
+            {
+                mvOsPrintf("Crc8=0x%02X: Accepted, rxQ = %d\n",
+                    tblIdx*4+regIdx, ((regVal >> (regIdx*8+1)) & 0x07));
+            }
+        }
+    }
+}
+
+
+/* Print status of Ethernet port */
+void    mvEthPortShow(void* pHndl)
+{
+    MV_U32              regValue, rxCoal, txCoal;
+    int                 speed, queue, port;
+    ETH_PORT_CTRL*      pPortCtrl = (ETH_PORT_CTRL*)pHndl;
+
+    port = pPortCtrl->portNo;
+
+    regValue = MV_REG_READ( ETH_PORT_STATUS_REG(port) );
+
+    mvOsPrintf("\n\t ethGiga #%d port Status: 0x%04x = 0x%08x\n\n",
+                port, ETH_PORT_STATUS_REG(port), regValue);
+
+    mvOsPrintf("descInSram=%d, descSwCoher=%d\n",
+                ethDescInSram, ethDescSwCoher);
+
+    if(regValue & ETH_GMII_SPEED_1000_MASK)
+        speed = 1000;
+    else if(regValue & ETH_MII_SPEED_100_MASK)
+        speed = 100;
+    else
+        speed = 10;
+
+    mvEthCoalGet(pPortCtrl, &rxCoal, &txCoal);
+
+    /* Link, Speed, Duplex, FlowControl */
+    mvOsPrintf("Link=%s, Speed=%d, Duplex=%s, RxFlowControl=%s",
+                (regValue & ETH_LINK_UP_MASK) ? "UP" : "DOWN",
+                speed,
+                (regValue & ETH_FULL_DUPLEX_MASK) ? "FULL" : "HALF",
+                (regValue & ETH_ENABLE_RCV_FLOW_CTRL_MASK) ? "ENABLE" : "DISABLE");
+
+    mvOsPrintf("\n");
+
+    mvOsPrintf("RxCoal = %d usec, TxCoal = %d usec\n",
+                rxCoal, txCoal);
+
+    mvOsPrintf("rxDefQ=%d, arpQ=%d, bpduQ=%d, tcpQ=%d, udpQ=%d\n\n",
+                pPortCtrl->portConfig.rxDefQ, pPortCtrl->portConfig.rxArpQ,
+                pPortCtrl->portConfig.rxBpduQ,
+                pPortCtrl->portConfig.rxTcpQ, pPortCtrl->portConfig.rxUdpQ);
+
+    /* Print all RX and TX queues */
+    for(queue=0; queue<MV_ETH_RX_Q_NUM; queue++)
+    {
+        mvOsPrintf("RX Queue #%d: base=0x%lx, free=%d\n",
+                    queue, (MV_ULONG)pPortCtrl->rxQueue[queue].pFirstDescr,
+                    mvEthRxResourceGet(pPortCtrl, queue) );
+    }
+    mvOsPrintf("\n");
+    for(queue=0; queue<MV_ETH_TX_Q_NUM; queue++)
+    {
+        mvOsPrintf("TX Queue #%d: base=0x%lx, free=%d\n",
+                queue, (MV_ULONG)pPortCtrl->txQueue[queue].pFirstDescr,
+                mvEthTxResourceGet(pPortCtrl, queue) );
+    }
+}
+
+/* Print RX and TX queue of the Ethernet port */
+void    mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode)
+{
+    ETH_PORT_CTRL   *pPortCtrl = (ETH_PORT_CTRL*)pHndl;
+    ETH_QUEUE_CTRL  *pQueueCtrl;
+    MV_U32          regValue;
+    ETH_RX_DESC     *pRxDescr;
+    ETH_TX_DESC     *pTxDescr;
+    int             i, port = pPortCtrl->portNo;
+
+    if( (rxQueue >=0) && (rxQueue < MV_ETH_RX_Q_NUM) )
+    {
+        pQueueCtrl = &(pPortCtrl->rxQueue[rxQueue]);
+        mvOsPrintf("Port #%d, RX Queue #%d\n\n", port, rxQueue);
+
+        mvOsPrintf("CURR_RX_DESC_PTR        : 0x%X = 0x%08x\n",
+            ETH_RX_CUR_DESC_PTR_REG(port, rxQueue),
+            MV_REG_READ( ETH_RX_CUR_DESC_PTR_REG(port, rxQueue)));
+
+
+        if(pQueueCtrl->pFirstDescr != NULL)
+        {
+            mvOsPrintf("pFirstDescr=0x%lx, pLastDescr=0x%lx, numOfResources=%d\n",
+                (MV_ULONG)pQueueCtrl->pFirstDescr, (MV_ULONG)pQueueCtrl->pLastDescr,
+                pQueueCtrl->resource);
+            mvOsPrintf("pCurrDescr: 0x%lx, pUsedDescr: 0x%lx\n",
+                (MV_ULONG)pQueueCtrl->pCurrentDescr,
+                (MV_ULONG)pQueueCtrl->pUsedDescr);
+
+            if(mode == 1)
+            {
+                pRxDescr = (ETH_RX_DESC*)pQueueCtrl->pFirstDescr;
+                i = 0;
+                do
+                {
+                    mvOsPrintf("%3d. desc=%08x (%08x), cmd=%08x, data=%4d, buf=%4d, buf=%08x, pkt=%lx, os=%lx\n",
+                                i, (MV_U32)pRxDescr, (MV_U32)ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pRxDescr),
+                                pRxDescr->cmdSts, pRxDescr->byteCnt, (MV_U32)pRxDescr->bufSize,
+                                (unsigned int)pRxDescr->bufPtr, (MV_ULONG)pRxDescr->returnInfo,
+                                ((MV_PKT_INFO*)pRxDescr->returnInfo)->osInfo);
+
+                    ETH_DESCR_INV(pPortCtrl, pRxDescr);
+                    pRxDescr = RX_NEXT_DESC_PTR(pRxDescr, pQueueCtrl);
+                    i++;
+                } while (pRxDescr != pQueueCtrl->pFirstDescr);
+            }
+        }
+        else
+            mvOsPrintf("RX Queue #%d is NOT CREATED\n", rxQueue);
+    }
+
+    if( (txQueue >=0) && (txQueue < MV_ETH_TX_Q_NUM) )
+    {
+        pQueueCtrl = &(pPortCtrl->txQueue[txQueue]);
+        mvOsPrintf("Port #%d, TX Queue #%d\n\n", port, txQueue);
+
+        regValue = MV_REG_READ( ETH_TX_CUR_DESC_PTR_REG(port, txQueue));
+        mvOsPrintf("CURR_TX_DESC_PTR        : 0x%X = 0x%08x\n",
+                    ETH_TX_CUR_DESC_PTR_REG(port, txQueue), regValue);
+
+        if(pQueueCtrl->pFirstDescr != NULL)
+        {
+            mvOsPrintf("pFirstDescr=0x%lx, pLastDescr=0x%lx, numOfResources=%d\n",
+                       (MV_ULONG)pQueueCtrl->pFirstDescr,
+                       (MV_ULONG)pQueueCtrl->pLastDescr,
+                        pQueueCtrl->resource);
+            mvOsPrintf("pCurrDescr: 0x%lx, pUsedDescr: 0x%lx\n",
+                       (MV_ULONG)pQueueCtrl->pCurrentDescr,
+                       (MV_ULONG)pQueueCtrl->pUsedDescr);
+
+            if(mode == 1)
+            {
+                pTxDescr = (ETH_TX_DESC*)pQueueCtrl->pFirstDescr;
+                i = 0;
+                do
+                {
+                    mvOsPrintf("%3d. desc=%08x (%08x), cmd=%08x, data=%4d, buf=%08x, pkt=%lx, os=%lx\n",
+                                i, (MV_U32)pTxDescr, (MV_U32)ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxDescr),
+                                pTxDescr->cmdSts, pTxDescr->byteCnt,
+                                (MV_U32)pTxDescr->bufPtr, (MV_ULONG)pTxDescr->returnInfo,
+                                pTxDescr->returnInfo ? (((MV_PKT_INFO*)pTxDescr->returnInfo)->osInfo) : 0x0);
+
+                    ETH_DESCR_INV(pPortCtrl, pTxDescr);
+                    pTxDescr = TX_NEXT_DESC_PTR(pTxDescr, pQueueCtrl);
+                    i++;
+                } while (pTxDescr != pQueueCtrl->pFirstDescr);
+            }
+        }
+        else
+            mvOsPrintf("TX Queue #%d is NOT CREATED\n", txQueue);
+    }
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h
new file mode 100644
index 000000000000..4a90043b1307
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthDebug.h
@@ -0,0 +1,146 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __MV_ETH_DEBUG_H__
+#define __MV_ETH_DEBUG_H__
+
+#if 0
+/*
+ ** Externs
+ */
+void     ethBpduRxQ(int port, int bpduQueue);
+void     ethArpRxQ(int port, int bpduQueue);
+void     ethTcpRxQ(int port, int bpduQueue);
+void     ethUdpRxQ(int port, int bpduQueue);
+void     ethMcastAdd(int port, char* macStr, int queue);
+
+#ifdef INCLUDE_MULTI_QUEUE
+void   	ethRxPolicy( int port);
+void   	ethTxPolicy( int port);
+void   	ethTxPolDA(int port, char* macStr, int txQ, char* headerHexStr);
+void   	ethRxPolMode(int port, MV_ETH_PRIO_MODE prioMode);
+void    ethRxPolQ(int port, int rxQueue, int rxQuota);
+#endif /* INCLUDE_MULTI_QUEUE */
+
+void    print_egiga_stat(void *sc, unsigned int port);
+void    ethPortStatus (int port);
+void    ethPortQueues( int port, int rxQueue, int txQueue, int mode);
+void    ethPortMcast(int port);
+void    ethPortRegs(int port);
+void    ethPortCounters(int port);
+void 	ethPortRmonCounters(int port);
+void    ethRxCoal(int port, int usec);
+void    ethTxCoal(int port, int usec);
+
+void    ethRegs(int port);
+void	ethClearCounters(int port);
+void    ethUcastSet(int port, char* macStr, int queue);
+void    ethPortUcastShow(int port);
+
+#ifdef CONFIG_MV_ETH_HEADER
+void	run_com_header(const char *buffer);
+#endif
+
+#ifdef INCLUDE_MULTI_QUEUE
+void    ethRxPolMode(int port, MV_ETH_PRIO_MODE prioMode);
+void    ethRxPolQ(int port, int queue, int quota);
+void    ethRxPolicy(int port);
+void    ethTxPolDef(int port, int txQ, char* headerHexStr);
+void    ethTxPolDA(int port, char* macStr, int txQ, char* headerHexStr);
+void    ethTxPolicy(int port);
+#endif /* INCLUDE_MULTI_QUEUE */
+
+#if (MV_ETH_VERSION >= 4)
+void     ethEjpModeSet(int port, int mode)
+#endif
+#endif /* 0 */
+
+
+
+
+void    ethRxCoal(int port, int usec);
+void    ethTxCoal(int port, int usec);
+#if (MV_ETH_VERSION >= 4)
+void     ethEjpModeSet(int port, int mode);
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void    ethBpduRxQ(int port, int bpduQueue);
+void    ethArpRxQ(int port, int arpQueue);
+void    ethTcpRxQ(int port, int tcpQueue);
+void    ethUdpRxQ(int port, int udpQueue);
+void    ethTxPolicyRegs(int port);
+void    ethPortRegs(int port);
+void    ethRegs(int port);
+void ethClearCounters(int port);
+void    ethPortCounters(int port);
+void    ethPortRmonCounters(int port);
+void    ethPortStatus(int port);
+void    ethPortQueues(int port, int rxQueue, int txQueue, int mode);
+void    ethUcastSet(int port, char* macStr, int queue);
+void    ethPortUcastShow(int port);
+void    ethMcastAdd(int port, char* macStr, int queue);
+void    ethPortMcast(int port);
+void    mvEthPortShow(void* pHndl);
+void    mvEthQueuesShow(void* pHndl, int rxQueue, int txQueue, int mode);
+
+#endif
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h
new file mode 100644
index 000000000000..fa116991997e
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthGbe.h
@@ -0,0 +1,749 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.h - Header File for : Marvell Gigabit Ethernet Controller
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration specific to
+*       the Marvell Gigabit Ethernet Controller.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#ifndef __mvEthGbe_h__
+#define __mvEthGbe_h__
+
+extern MV_BOOL         ethDescInSram;
+extern MV_BOOL         ethDescSwCoher;
+extern ETH_PORT_CTRL*  ethPortCtrl[];
+
+static INLINE MV_ULONG  ethDescVirtToPhy(ETH_QUEUE_CTRL* pQueueCtrl, MV_U8* pDesc)
+{
+#if defined (ETH_DESCR_IN_SRAM)
+    if( ethDescInSram )
+        return mvSramVirtToPhy(pDesc);
+    else
+#endif /* ETH_DESCR_IN_SRAM */
+        return (pQueueCtrl->descBuf.bufPhysAddr + (pDesc - pQueueCtrl->descBuf.bufVirtPtr));
+}
+/* Return port handler */
+#define mvEthPortHndlGet(port)  ethPortCtrl[port]
+
+/* Used as WA for HW/SW race on TX */
+static INLINE int      mvEthPortTxEnable(void* pPortHndl, int queue, int max_deep)
+{
+    int                 deep = 0;
+    MV_U32              txCurrReg, txEnReg;
+    ETH_TX_DESC*        pTxLastDesc;
+    ETH_QUEUE_CTRL*     pQueueCtrl;
+    ETH_PORT_CTRL*      pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
+    if( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) == 0)
+    {
+        MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+        return 0;
+    }
+
+    pQueueCtrl = &pPortCtrl->txQueue[queue];
+    pTxLastDesc = pQueueCtrl->pCurrentDescr;
+    txCurrReg = MV_REG_READ(ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue));
+    if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
+    {
+        /* All descriptors are processed, no chance for race */
+        return 0;
+    }
+
+    /* Check distance betwee HW and SW location: */
+    /* If distance between HW and SW pointers is less than max_deep descriptors */
+    /* Race condition is possible, so wait end of TX and restart TXQ */
+    while(deep < max_deep)
+    {
+        pTxLastDesc = TX_PREV_DESC_PTR(pTxLastDesc, pQueueCtrl);
+        if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
+        {
+            int count = 0;
+
+            while( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) != 0)
+            {
+                count++;
+                if(count > 10000)
+                {
+                    mvOsPrintf("mvEthPortTxEnable: timeout - TXQ_CMD=0x%08x\n",
+                                MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) );
+                    break;
+                }
+                txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
+            }
+
+            MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+            return count;
+        }
+        deep++;
+    }
+    /* Distance between HW and SW pointers is more than max_deep descriptors, */
+    /* So NO race condition - do nothing */
+    return -1;
+}
+
+
+/* defines  */
+#define ETH_CSUM_MIN_BYTE_COUNT     72
+
+/* Tailgate and Kirwood have only 2K TX FIFO */
+#if (MV_ETH_VERSION == 2) || (MV_ETH_VERSION == 4)
+#define ETH_CSUM_MAX_BYTE_COUNT     1600
+#else
+#define ETH_CSUM_MAX_BYTE_COUNT     9*1024
+#endif /* MV_ETH_VERSION */
+
+#define ETH_MV_HEADER_SIZE	    2
+#define ETH_MV_TX_EN
+
+/* An offest in Tx descriptors to store data for buffers less than 8 Bytes */
+#define MIN_TX_BUFF_LOAD            8
+#define TX_BUF_OFFSET_IN_DESC       (ETH_TX_DESC_ALIGNED_SIZE - MIN_TX_BUFF_LOAD)
+
+/* Default port configuration value */
+#define PORT_CONFIG_VALUE                       \
+             ETH_DEF_RX_QUEUE_MASK(0)       |   \
+             ETH_DEF_RX_ARP_QUEUE_MASK(0)   |   \
+             ETH_DEF_RX_TCP_QUEUE_MASK(0)   |   \
+             ETH_DEF_RX_UDP_QUEUE_MASK(0)   |   \
+             ETH_DEF_RX_BPDU_QUEUE_MASK(0)  |   \
+             ETH_RX_CHECKSUM_WITH_PSEUDO_HDR
+
+/* Default port extend configuration value */
+#define PORT_CONFIG_EXTEND_VALUE            0
+
+#define PORT_SERIAL_CONTROL_VALUE                           \
+            ETH_DISABLE_FC_AUTO_NEG_MASK                |   \
+            BIT9                                        |   \
+            ETH_DO_NOT_FORCE_LINK_FAIL_MASK             |   \
+            ETH_MAX_RX_PACKET_1552BYTE                  |   \
+            ETH_SET_FULL_DUPLEX_MASK
+
+#define PORT_SERIAL_CONTROL_100MB_FORCE_VALUE               \
+            ETH_FORCE_LINK_PASS_MASK                    |   \
+            ETH_DISABLE_DUPLEX_AUTO_NEG_MASK            |   \
+            ETH_DISABLE_FC_AUTO_NEG_MASK                |   \
+            BIT9                                        |   \
+            ETH_DO_NOT_FORCE_LINK_FAIL_MASK             |   \
+            ETH_DISABLE_SPEED_AUTO_NEG_MASK             |   \
+            ETH_SET_FULL_DUPLEX_MASK                    |   \
+            ETH_SET_MII_SPEED_100_MASK                  |   \
+            ETH_MAX_RX_PACKET_1552BYTE
+
+
+#define PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE              \
+            ETH_FORCE_LINK_PASS_MASK                    |   \
+            ETH_DISABLE_DUPLEX_AUTO_NEG_MASK            |   \
+            ETH_DISABLE_FC_AUTO_NEG_MASK                |   \
+            BIT9                                        |   \
+            ETH_DO_NOT_FORCE_LINK_FAIL_MASK             |   \
+            ETH_DISABLE_SPEED_AUTO_NEG_MASK             |   \
+            ETH_SET_FULL_DUPLEX_MASK                    |   \
+            ETH_SET_GMII_SPEED_1000_MASK                |   \
+            ETH_MAX_RX_PACKET_1552BYTE
+
+#define PORT_SERIAL_CONTROL_SGMII_IBAN_VALUE                \
+            ETH_DISABLE_FC_AUTO_NEG_MASK                |   \
+            BIT9                                        |   \
+            ETH_IN_BAND_AN_EN_MASK                      |   \
+            ETH_DO_NOT_FORCE_LINK_FAIL_MASK             |   \
+            ETH_MAX_RX_PACKET_1552BYTE
+
+/* Function headers: */
+MV_VOID     mvEthSetSpecialMcastTable(int portNo, int queue);
+MV_STATUS   mvEthArpRxQueue(void* pPortHandle, int arpQueue);
+MV_STATUS   mvEthUdpRxQueue(void* pPortHandle, int udpQueue);
+MV_STATUS   mvEthTcpRxQueue(void* pPortHandle, int tcpQueue);
+MV_STATUS   mvEthMacAddrGet(int portNo, unsigned char *pAddr);
+MV_VOID     mvEthSetOtherMcastTable(int portNo, int queue);
+MV_STATUS   mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode);
+/* Interrupt Coalesting functions */
+MV_U32      mvEthRxCoalSet(void* pPortHndl, MV_U32 uSec);
+MV_U32      mvEthTxCoalSet(void* pPortHndl, MV_U32 uSec);
+MV_STATUS   mvEthCoalGet(void* pPortHndl, MV_U32* pRxCoal, MV_U32* pTxCoal);
+
+/******************************************************************************/
+/*                          Data Flow functions                               */
+/******************************************************************************/
+static INLINE void      mvEthPortTxRestart(void* pPortHndl)
+{
+    ETH_PORT_CTRL*      pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
+}
+
+/* Get number of Free resources in specific TX queue */
+static INLINE int     mvEthTxResourceGet(void* pPortHndl, int txQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    return (pPortCtrl->txQueue[txQueue].resource);
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int     mvEthRxResourceGet(void* pPortHndl, int rxQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    return (pPortCtrl->rxQueue[rxQueue].resource);
+}
+
+static INLINE int     mvEthTxQueueIsFull(void* pPortHndl, int txQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    if(pPortCtrl->txQueue[txQueue].resource == 0)
+        return MV_TRUE;
+
+    return MV_FALSE;
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int     mvEthRxQueueIsFull(void* pPortHndl, int rxQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+    if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+        (pQueueCtrl->resource != 0) )
+        return MV_TRUE;
+
+    return MV_FALSE;
+}
+
+static INLINE int     mvEthTxQueueIsEmpty(void* pPortHndl, int txQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+    if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+        (pQueueCtrl->resource != 0) )
+    {
+        return MV_TRUE;
+    }
+    return MV_FALSE;
+}
+
+/* Get number of Free resources in specific RX queue */
+static INLINE int     mvEthRxQueueIsEmpty(void* pPortHndl, int rxQueue)
+{
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
+
+    if(pPortCtrl->rxQueue[rxQueue].resource == 0)
+        return MV_TRUE;
+
+    return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvEthPortTx - Send an Ethernet packet
+*
+* DESCRIPTION:
+*       This routine send a given packet described by pPktInfo parameter.
+*       Single buffer only.
+*
+* INPUT:
+*       void*       pEthPortHndl  - Ethernet Port handler.
+*       int         txQueue       - Number of Tx queue.
+*       MV_PKT_INFO *pPktInfo     - User packet to send.
+*
+* RETURN:
+*       MV_NO_RESOURCE  - No enough resources to send this packet.
+*       MV_ERROR        - Unexpected Fatal error.
+*       MV_OK           - Packet send successfully.
+*
+*******************************************************************************/
+static INLINE MV_STATUS   mvEthPortTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
+{
+    ETH_TX_DESC*    pTxCurrDesc;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    int             portNo;
+    MV_BUF_INFO*    pBufInfo = pPktInfo->pFrags;
+
+#ifdef ETH_DEBUG
+    if(pPortCtrl->portState != MV_ACTIVE)
+        return MV_BAD_STATE;
+#endif /* ETH_DEBUG */
+
+    portNo = pPortCtrl->portNo;
+    pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+    /* Get the Tx Desc ring indexes */
+    pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+    /* Check if there is enough resources to send the packet */
+    if(pQueueCtrl->resource == 0)
+        return MV_NO_RESOURCE;
+
+    pTxCurrDesc->byteCnt = pBufInfo->dataSize;
+
+    /* Flash Buffer */
+    if(pPktInfo->pktSize != 0)
+    {
+#ifdef MV_NETBSD
+        pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
+        ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
+#else
+        pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
+#endif
+        pPktInfo->pktSize = 0;
+    }
+    else
+        pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
+
+    pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
+
+    /* There is only one buffer in the packet */
+    /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+    pTxCurrDesc->cmdSts = pPktInfo->status              |
+                          ETH_BUFFER_OWNED_BY_DMA       |
+                          ETH_TX_GENERATE_CRC_MASK      |
+                          ETH_TX_ENABLE_INTERRUPT_MASK  |
+                          ETH_TX_ZERO_PADDING_MASK      |
+                          ETH_TX_FIRST_DESC_MASK        |
+                          ETH_TX_LAST_DESC_MASK;
+
+    ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+
+    pQueueCtrl->resource--;
+    pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+
+    /* Apply send command */
+    MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthPortSgTx - Send an Ethernet packet
+*
+* DESCRIPTION:
+*       This routine send a given packet described by pBufInfo parameter. It
+*       supports transmitting of a packet spaned over multiple buffers.
+*
+* INPUT:
+*       void*       pEthPortHndl  - Ethernet Port handler.
+*       int         txQueue       - Number of Tx queue.
+*       MV_PKT_INFO *pPktInfo     - User packet to send.
+*
+* RETURN:
+*       MV_NO_RESOURCE  - No enough resources to send this packet.
+*       MV_ERROR        - Unexpected Fatal error.
+*       MV_OK           - Packet send successfully.
+*
+*******************************************************************************/
+static INLINE MV_STATUS   mvEthPortSgTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
+{
+    ETH_TX_DESC*    pTxFirstDesc;
+    ETH_TX_DESC*    pTxCurrDesc;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    int             portNo, bufCount;
+    MV_BUF_INFO*    pBufInfo = pPktInfo->pFrags;
+    MV_U8*          pTxBuf;
+
+#ifdef ETH_DEBUG
+    if(pPortCtrl->portState != MV_ACTIVE)
+        return MV_BAD_STATE;
+#endif /* ETH_DEBUG */
+
+    portNo = pPortCtrl->portNo;
+    pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+    /* Get the Tx Desc ring indexes */
+    pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+    /* Check if there is enough resources to send the packet */
+    if(pQueueCtrl->resource < pPktInfo->numFrags)
+        return MV_NO_RESOURCE;
+
+    /* Remember first desc */
+    pTxFirstDesc  = pTxCurrDesc;
+
+    bufCount = 0;
+    while(MV_TRUE)
+    {
+        if(pBufInfo[bufCount].dataSize <= MIN_TX_BUFF_LOAD)
+        {
+            /* Buffers with a payload smaller than MIN_TX_BUFF_LOAD (8 bytes) must be aligned    */
+            /* to 64-bit boundary. Two options here:                                             */
+            /* 1) Usually, copy the payload to the reserved 8 bytes inside descriptor.           */
+            /* 2) In the Half duplex workaround, the reserved 8 bytes inside descriptor are used */
+            /*    as a pointer to the aligned buffer, copy the small payload to this buffer.     */
+            pTxBuf = ((MV_U8*)pTxCurrDesc)+TX_BUF_OFFSET_IN_DESC;
+            mvOsBCopy(pBufInfo[bufCount].bufVirtPtr, pTxBuf, pBufInfo[bufCount].dataSize);
+            pTxCurrDesc->bufPtr = ethDescVirtToPhy(pQueueCtrl, pTxBuf);
+        }
+        else
+        {
+            /* Flash Buffer */
+#ifdef MV_NETBSD
+            pTxCurrDesc->bufPtr = pBufInfo[bufCount].bufPhysAddr;
+	    ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
+#else
+            pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
+#endif
+        }
+
+        pTxCurrDesc->byteCnt = pBufInfo[bufCount].dataSize;
+        bufCount++;
+
+        if(bufCount >= pPktInfo->numFrags)
+            break;
+
+        if(bufCount > 1)
+        {
+            /* There is middle buffer of the packet Not First and Not Last */
+            pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA;
+            ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+        }
+        /* Go to next descriptor and next buffer */
+        pTxCurrDesc = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+    }
+    /* Set last desc with DMA ownership and interrupt enable. */
+    pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
+    if(bufCount == 1)
+    {
+        /* There is only one buffer in the packet */
+        /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+        pTxCurrDesc->cmdSts = pPktInfo->status              |
+                              ETH_BUFFER_OWNED_BY_DMA       |
+                              ETH_TX_GENERATE_CRC_MASK      |
+                              ETH_TX_ENABLE_INTERRUPT_MASK  |
+                              ETH_TX_ZERO_PADDING_MASK      |
+                              ETH_TX_FIRST_DESC_MASK        |
+                              ETH_TX_LAST_DESC_MASK;
+
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+    }
+    else
+    {
+        /* Last but not First */
+        pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA       |
+                              ETH_TX_ENABLE_INTERRUPT_MASK  |
+                              ETH_TX_ZERO_PADDING_MASK      |
+                              ETH_TX_LAST_DESC_MASK;
+
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
+
+        /* Update First when more than one buffer in the packet */
+        /* The OSG might set some bits for checksum offload, so add them to first descriptor */
+        pTxFirstDesc->cmdSts = pPktInfo->status             |
+                               ETH_BUFFER_OWNED_BY_DMA      |
+                               ETH_TX_GENERATE_CRC_MASK     |
+                               ETH_TX_FIRST_DESC_MASK;
+
+        ETH_DESCR_FLUSH_INV(pPortCtrl, pTxFirstDesc);
+    }
+    /* Update txQueue state */
+    pQueueCtrl->resource -= bufCount;
+    pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
+
+    /* Apply send command */
+    MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPortTxDone - Free all used Tx descriptors and mBlks.
+*
+* DESCRIPTION:
+*       This routine returns the transmitted packet information to the caller.
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       int         txQueue         - Number of Tx queue.
+*
+* OUTPUT:
+*       MV_PKT_INFO *pPktInfo       - Pointer to packet was sent.
+*
+* RETURN:
+*       MV_NOT_FOUND    - No transmitted packets to return. Transmit in progress.
+*       MV_EMPTY        - No transmitted packets to return. TX Queue is empty.
+*       MV_ERROR        - Unexpected Fatal error.
+*       MV_OK           - There is transmitted packet in the queue,
+*                       'pPktInfo' filled with relevant information.
+*
+*******************************************************************************/
+static INLINE MV_PKT_INFO*    mvEthPortTxDone(void* pEthPortHndl, int txQueue)
+{
+    ETH_TX_DESC*    pTxCurrDesc;
+    ETH_TX_DESC*    pTxUsedDesc;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    MV_PKT_INFO*    pPktInfo;
+    MV_U32          commandStatus;
+
+    pQueueCtrl = &pPortCtrl->txQueue[txQueue];
+
+    pTxUsedDesc = pQueueCtrl->pUsedDescr;
+    pTxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+    while(MV_TRUE)
+    {
+        /* No more used descriptors */
+        commandStatus = pTxUsedDesc->cmdSts;
+        if (commandStatus  & (ETH_BUFFER_OWNED_BY_DMA))
+        {
+            ETH_DESCR_INV(pPortCtrl, pTxUsedDesc);
+            return NULL;
+        }
+        if( (pTxUsedDesc == pTxCurrDesc) &&
+            (pQueueCtrl->resource != 0) )
+        {
+            return NULL;
+        }
+        pQueueCtrl->resource++;
+        pQueueCtrl->pUsedDescr = TX_NEXT_DESC_PTR(pTxUsedDesc, pQueueCtrl);
+        if(commandStatus & (ETH_TX_LAST_DESC_MASK))
+        {
+            pPktInfo = (MV_PKT_INFO*)pTxUsedDesc->returnInfo;
+            pPktInfo->status  = commandStatus;
+            return pPktInfo;
+        }
+        pTxUsedDesc = pQueueCtrl->pUsedDescr;
+    }
+}
+
+/*******************************************************************************
+* mvEthPortRx - Get new received packets from Rx queue.
+*
+* DESCRIPTION:
+*       This routine returns the received data to the caller. There is no
+*       data copying during routine operation. All information is returned
+*       using pointer to packet information struct passed from the caller.
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       int         rxQueue         - Number of Rx queue.
+*
+* OUTPUT:
+*       MV_PKT_INFO *pPktInfo       - Pointer to received packet.
+*
+* RETURN:
+*       MV_NO_RESOURCE  - No free resources in RX queue.
+*       MV_ERROR        - Unexpected Fatal error.
+*       MV_OK           - New packet received and 'pBufInfo' structure filled
+*                       with relevant information.
+*
+*******************************************************************************/
+static INLINE MV_PKT_INFO*    mvEthPortRx(void* pEthPortHndl, int rxQueue)
+{
+    ETH_RX_DESC     *pRxCurrDesc;
+    MV_U32          commandStatus;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    MV_PKT_INFO*    pPktInfo;
+
+    pQueueCtrl = &(pPortCtrl->rxQueue[rxQueue]);
+
+    /* Check resources */
+    if(pQueueCtrl->resource == 0)
+    {
+        mvOsPrintf("ethPortRx: no more resources\n");
+        return NULL;
+    }
+    while(MV_TRUE)
+    {
+        /* Get the Rx Desc ring 'curr and 'used' indexes */
+        pRxCurrDesc = pQueueCtrl->pCurrentDescr;
+
+	commandStatus   = pRxCurrDesc->cmdSts;
+        if (commandStatus & (ETH_BUFFER_OWNED_BY_DMA))
+        {
+            /* Nothing to receive... */
+            ETH_DESCR_INV(pPortCtrl, pRxCurrDesc);
+            return NULL;
+        }
+
+        /* Valid RX only if FIRST and LAST bits are set */
+        if( (commandStatus & (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK)) ==
+                             (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK) )
+        {
+            pPktInfo = (MV_PKT_INFO*)pRxCurrDesc->returnInfo;
+            pPktInfo->pFrags->dataSize  = pRxCurrDesc->byteCnt - 4;
+            pPktInfo->status            = commandStatus;
+            pPktInfo->fragIP            = pRxCurrDesc->bufSize & ETH_RX_IP_FRAGMENTED_FRAME_MASK;
+
+            pQueueCtrl->resource--;
+            /* Update 'curr' in data structure */
+            pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
+
+#ifdef INCLUDE_SYNC_BARR
+            mvCpuIfSyncBarr(DRAM_TARGET);
+#endif
+            return pPktInfo;
+        }
+        else
+        {
+            ETH_RX_DESC*    pRxUsedDesc = pQueueCtrl->pUsedDescr;
+
+#ifdef ETH_DEBUG
+            mvOsPrintf("ethDrv: Unexpected Jumbo frame: "
+                       "status=0x%08x, byteCnt=%d, pData=0x%x\n",
+                        commandStatus, pRxCurrDesc->byteCnt, pRxCurrDesc->bufPtr);
+#endif /* ETH_DEBUG */
+
+            /* move buffer from pCurrentDescr position to pUsedDescr position */
+            pRxUsedDesc->bufPtr     = pRxCurrDesc->bufPtr;
+            pRxUsedDesc->returnInfo = pRxCurrDesc->returnInfo;
+            pRxUsedDesc->bufSize    = pRxCurrDesc->bufSize & ETH_RX_BUFFER_MASK;
+
+            /* Return the descriptor to DMA ownership */
+            pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA |
+                                  ETH_RX_ENABLE_INTERRUPT_MASK;
+
+            /* Flush descriptor and CPU pipe */
+            ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
+
+            /* Move the used descriptor pointer to the next descriptor */
+            pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
+            pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
+        }
+    }
+}
+
+/*******************************************************************************
+* mvEthPortRxDone - Returns a Rx buffer back to the Rx ring.
+*
+* DESCRIPTION:
+*       This routine returns a Rx buffer back to the Rx ring.
+*
+* INPUT:
+*       void*       pEthPortHndl    - Ethernet Port handler.
+*       int         rxQueue         - Number of Rx queue.
+*       MV_PKT_INFO *pPktInfo       - Pointer to received packet.
+*
+* RETURN:
+*       MV_ERROR        - Unexpected Fatal error.
+*       MV_OUT_OF_RANGE - RX queue is already FULL, so this buffer can't be
+*                       returned to this queue.
+*       MV_FULL         - Buffer returned successfully and RX queue became full.
+*                       More buffers should not be returned at the time.
+*       MV_OK           - Buffer returned successfully and there are more free
+*                       places in the queue.
+*
+*******************************************************************************/
+static INLINE MV_STATUS   mvEthPortRxDone(void* pEthPortHndl, int rxQueue, MV_PKT_INFO *pPktInfo)
+{
+    ETH_RX_DESC*    pRxUsedDesc;
+    ETH_QUEUE_CTRL* pQueueCtrl;
+    ETH_PORT_CTRL*  pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
+
+    pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
+
+    /* Get 'used' Rx descriptor */
+    pRxUsedDesc = pQueueCtrl->pUsedDescr;
+
+    /* Check that ring is not FULL */
+    if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
+        (pQueueCtrl->resource != 0) )
+    {
+        mvOsPrintf("%s %d: out of range Error resource=%d, curr=%p, used=%p\n",
+                    __FUNCTION__, pPortCtrl->portNo, pQueueCtrl->resource,
+                    pQueueCtrl->pCurrentDescr, pQueueCtrl->pUsedDescr);
+        return MV_OUT_OF_RANGE;
+    }
+
+    pRxUsedDesc->bufPtr     = pPktInfo->pFrags->bufPhysAddr;
+    pRxUsedDesc->returnInfo = (MV_ULONG)pPktInfo;
+    pRxUsedDesc->bufSize    = pPktInfo->pFrags->bufSize & ETH_RX_BUFFER_MASK;
+
+    /* Invalidate data buffer accordingly with pktSize */
+    if(pPktInfo->pktSize != 0)
+    {
+        ETH_PACKET_CACHE_INVALIDATE(pPktInfo->pFrags->bufVirtPtr, pPktInfo->pktSize);
+        pPktInfo->pktSize = 0;
+    }
+
+    /* Return the descriptor to DMA ownership */
+    pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT_MASK;
+
+    /* Flush descriptor and CPU pipe */
+    ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
+
+    pQueueCtrl->resource++;
+
+    /* Move the used descriptor pointer to the next descriptor */
+    pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
+
+    /* If ring became Full return MV_FULL */
+    if(pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr)
+        return MV_FULL;
+
+    return MV_OK;
+}
+
+
+#endif /* __mvEthGbe_h__ */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h
new file mode 100644
index 000000000000..b610dd279747
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/gbe/mvEthRegs.h
@@ -0,0 +1,700 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+        this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvEthRegsh
+#define __INCmvEthRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/****************************************/
+/*        Ethernet Unit Registers       */
+/****************************************/
+#define ETH_REG_BASE 					MV_ETH_REG_BASE
+
+#define ETH_PHY_ADDR_REG(port)              (ETH_REG_BASE(port) + 0x000)
+#define ETH_SMI_REG(port)                   (ETH_REG_BASE(port) + 0x004)
+#define ETH_UNIT_DEF_ADDR_REG(port)         (ETH_REG_BASE(port) + 0x008)
+#define ETH_UNIT_DEF_ID_REG(port)           (ETH_REG_BASE(port) + 0x00c)
+#define ETH_UNIT_RESERVED(port)             (ETH_REG_BASE(port) + 0x014)
+#define ETH_UNIT_INTR_CAUSE_REG(port)       (ETH_REG_BASE(port) + 0x080)
+#define ETH_UNIT_INTR_MASK_REG(port)        (ETH_REG_BASE(port) + 0x084)
+
+
+#define ETH_UNIT_ERROR_ADDR_REG(port)       (ETH_REG_BASE(port) + 0x094)
+#define ETH_UNIT_INT_ADDR_ERROR_REG(port)   (ETH_REG_BASE(port) + 0x098)
+#define ETH_UNIT_CONTROL_REG(port)          (ETH_REG_BASE(port) + 0x0B0)
+
+#define ETH_PORT_CONFIG_REG(port)           (ETH_REG_BASE(port) + 0x400)
+#define ETH_PORT_CONFIG_EXTEND_REG(port)    (ETH_REG_BASE(port) + 0x404)
+#define ETH_MII_SERIAL_PARAM_REG(port)      (ETH_REG_BASE(port) + 0x408)
+#define ETH_GMII_SERIAL_PARAM_REG(port)     (ETH_REG_BASE(port) + 0x40c)
+#define ETH_VLAN_ETHER_TYPE_REG(port)       (ETH_REG_BASE(port) + 0x410)
+#define ETH_MAC_ADDR_LOW_REG(port)          (ETH_REG_BASE(port) + 0x414)
+#define ETH_MAC_ADDR_HIGH_REG(port)         (ETH_REG_BASE(port) + 0x418)
+#define ETH_SDMA_CONFIG_REG(port)           (ETH_REG_BASE(port) + 0x41c)
+#define ETH_DIFF_SERV_PRIO_REG(port, code)  (ETH_REG_BASE(port) + 0x420  + ((code)<<2))
+#define ETH_PORT_SERIAL_CTRL_REG(port)      (ETH_REG_BASE(port) + 0x43c)
+#define ETH_VLAN_TAG_TO_PRIO_REG(port)      (ETH_REG_BASE(port) + 0x440)
+#define ETH_PORT_STATUS_REG(port)           (ETH_REG_BASE(port) + 0x444)
+
+#define ETH_RX_QUEUE_COMMAND_REG(port)      (ETH_REG_BASE(port) + 0x680)
+#define ETH_TX_QUEUE_COMMAND_REG(port)      (ETH_REG_BASE(port) + 0x448)
+
+#define ETH_PORT_SERIAL_CTRL_1_REG(port)    (ETH_REG_BASE(port) + 0x44c)
+#define ETH_PORT_STATUS_1_REG(port)         (ETH_REG_BASE(port) + 0x450)
+#define ETH_PORT_MARVELL_HEADER_REG(port)   (ETH_REG_BASE(port) + 0x454)
+#define ETH_PORT_FIFO_PARAMS_REG(port)      (ETH_REG_BASE(port) + 0x458)
+#define ETH_MAX_TOKEN_BUCKET_SIZE_REG(port) (ETH_REG_BASE(port) + 0x45c)
+#define ETH_INTR_CAUSE_REG(port)            (ETH_REG_BASE(port) + 0x460)
+#define ETH_INTR_CAUSE_EXT_REG(port)        (ETH_REG_BASE(port) + 0x464)
+#define ETH_INTR_MASK_REG(port)             (ETH_REG_BASE(port) + 0x468)
+#define ETH_INTR_MASK_EXT_REG(port)         (ETH_REG_BASE(port) + 0x46c)
+#define ETH_TX_FIFO_URGENT_THRESH_REG(port) (ETH_REG_BASE(port) + 0x474)
+#define ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (ETH_REG_BASE(port) + 0x47c)
+#define ETH_RX_DISCARD_PKTS_CNTR_REG(port)  (ETH_REG_BASE(port) + 0x484)
+#define ETH_RX_OVERRUN_PKTS_CNTR_REG(port)  (ETH_REG_BASE(port) + 0x488)
+#define ETH_INTERNAL_ADDR_ERROR_REG(port)   (ETH_REG_BASE(port) + 0x494)
+#define ETH_TX_FIXED_PRIO_CFG_REG(port)     (ETH_REG_BASE(port) + 0x4dc)
+#define ETH_TX_TOKEN_RATE_CFG_REG(port)     (ETH_REG_BASE(port) + 0x4e0)
+#define ETH_TX_QUEUE_COMMAND1_REG(port)     (ETH_REG_BASE(port) + 0x4e4)
+#define ETH_MAX_TRANSMIT_UNIT_REG(port)     (ETH_REG_BASE(port) + 0x4e8)
+#define ETH_TX_TOKEN_BUCKET_SIZE_REG(port)  (ETH_REG_BASE(port) + 0x4ec)
+#define ETH_TX_TOKEN_BUCKET_COUNT_REG(port) (ETH_REG_BASE(port) + 0x780)
+#define ETH_RX_DESCR_STAT_CMD_REG(port, q)  (ETH_REG_BASE(port) + 0x600 + ((q)<<4))
+#define ETH_RX_BYTE_COUNT_REG(port, q)      (ETH_REG_BASE(port) + 0x604 + ((q)<<4))
+#define ETH_RX_BUF_PTR_REG(port, q)         (ETH_REG_BASE(port) + 0x608 + ((q)<<4))
+#define ETH_RX_CUR_DESC_PTR_REG(port, q)    (ETH_REG_BASE(port) + 0x60c + ((q)<<4))
+#define ETH_TX_CUR_DESC_PTR_REG(port, q)    (ETH_REG_BASE(port) + 0x6c0 + ((q)<<2))
+
+#define ETH_TXQ_TOKEN_COUNT_REG(port, q)    (ETH_REG_BASE(port) + 0x700 + ((q)<<4))
+#define ETH_TXQ_TOKEN_CFG_REG(port, q)      (ETH_REG_BASE(port) + 0x704 + ((q)<<4))
+#define ETH_TXQ_ARBITER_CFG_REG(port, q)    (ETH_REG_BASE(port) + 0x708 + ((q)<<4))
+
+#if (MV_ETH_VERSION >= 4)
+#define ETH_TXQ_CMD_1_REG(port)             (ETH_REG_BASE(port) + 0x4E4)
+#define ETH_EJP_TX_HI_IPG_REG(port)         (ETH_REG_BASE(port) + 0x7A8)
+#define ETH_EJP_TX_LO_IPG_REG(port)         (ETH_REG_BASE(port) + 0x7B8)
+#define ETH_EJP_HI_TKN_LO_PKT_REG(port)     (ETH_REG_BASE(port) + 0x7C0)
+#define ETH_EJP_HI_TKN_ASYNC_PKT_REG(port)  (ETH_REG_BASE(port) + 0x7C4)
+#define ETH_EJP_LO_TKN_ASYNC_PKT_REG(port)  (ETH_REG_BASE(port) + 0x7C8)
+#define ETH_EJP_TX_SPEED_REG(port)          (ETH_REG_BASE(port) + 0x7D0)
+#endif /* MV_ETH_VERSION >= 4 */
+
+#define ETH_MIB_COUNTERS_BASE(port)         (ETH_REG_BASE(port) + 0x1000)
+#define ETH_DA_FILTER_SPEC_MCAST_BASE(port) (ETH_REG_BASE(port) + 0x1400)
+#define ETH_DA_FILTER_OTH_MCAST_BASE(port)  (ETH_REG_BASE(port) + 0x1500)
+#define ETH_DA_FILTER_UCAST_BASE(port)      (ETH_REG_BASE(port) + 0x1600)
+
+/* Phy address register definitions */
+#define ETH_PHY_ADDR_OFFS          0
+#define ETH_PHY_ADDR_MASK          (0x1f <<ETH_PHY_ADDR_OFFS)
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW    0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH   0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED         0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR   0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED        0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED         0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED   0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED   0x1c
+#define ETH_MIB_FRAMES_64_OCTETS            0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS     0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS    0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS    0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS   0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS   0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW        0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH       0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT            0x40
+#define ETH_MIB_EXCESSIVE_COLLISION         0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT       0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT       0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED  0x50
+#define ETH_MIB_FC_SENT                     0x54
+#define ETH_MIB_GOOD_FC_RECEIVED            0x58
+#define ETH_MIB_BAD_FC_RECEIVED             0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED          0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED          0x64
+#define ETH_MIB_OVERSIZE_RECEIVED           0x68
+#define ETH_MIB_JABBER_RECEIVED             0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR           0x70
+#define ETH_MIB_BAD_CRC_EVENT               0x74
+#define ETH_MIB_COLLISION                   0x78
+#define ETH_MIB_LATE_COLLISION              0x7c
+
+
+/****************************************/
+/*        Ethernet Unit Register BITs   */
+/****************************************/
+
+#define ETH_RXQ_ENABLE_OFFSET               0
+#define ETH_RXQ_ENABLE_MASK                 (0x000000FF << ETH_RXQ_ENABLE_OFFSET)
+
+#define ETH_RXQ_DISABLE_OFFSET              8
+#define ETH_RXQ_DISABLE_MASK                (0x000000FF << ETH_RXQ_DISABLE_OFFSET)
+
+/***** BITs of Transmit Queue Command (TQC) register *****/
+#define ETH_TXQ_ENABLE_OFFSET               0
+#define ETH_TXQ_ENABLE_MASK                 (0x000000FF << ETH_TXQ_ENABLE_OFFSET)
+
+#define ETH_TXQ_DISABLE_OFFSET              8
+#define ETH_TXQ_DISABLE_MASK                (0x000000FF << ETH_TXQ_DISABLE_OFFSET)
+
+#if (MV_ETH_VERSION >= 4)
+#define ETH_TX_EJP_RESET_BIT                0
+#define ETH_TX_EJP_RESET_MASK               (1 << ETH_TX_EJP_RESET_BIT)
+
+#define ETH_TX_EJP_ENABLE_BIT               2
+#define ETH_TX_EJP_ENABLE_MASK              (1 << ETH_TX_EJP_ENABLE_BIT)
+
+#define ETH_TX_LEGACY_WRR_BIT               3
+#define ETH_TX_LEGACY_WRR_MASK              (1 << ETH_TX_LEGACY_WRR_BIT)
+#endif /* (MV_ETH_VERSION >= 4) */
+
+/***** BITs of Ethernet Port Status reg (PSR) *****/
+#define ETH_LINK_UP_BIT                     1
+#define ETH_LINK_UP_MASK                    (1<<ETH_LINK_UP_BIT)
+
+#define ETH_FULL_DUPLEX_BIT                 2
+#define ETH_FULL_DUPLEX_MASK                (1<<ETH_FULL_DUPLEX_BIT)
+
+#define ETH_ENABLE_RCV_FLOW_CTRL_BIT        3
+#define ETH_ENABLE_RCV_FLOW_CTRL_MASK       (1<<ETH_ENABLE_RCV_FLOW_CTRL_BIT)
+
+#define ETH_GMII_SPEED_1000_BIT             4
+#define ETH_GMII_SPEED_1000_MASK            (1<<ETH_GMII_SPEED_1000_BIT)
+
+#define ETH_MII_SPEED_100_BIT               5
+#define ETH_MII_SPEED_100_MASK              (1<<ETH_MII_SPEED_100_BIT)
+
+#define ETH_TX_IN_PROGRESS_BIT              7
+#define ETH_TX_IN_PROGRESS_MASK             (1<<ETH_TX_IN_PROGRESS_BIT)
+
+#define ETH_TX_FIFO_EMPTY_BIT               10
+#define ETH_TX_FIFO_EMPTY_MASK              (1<<ETH_TX_FIFO_EMPTY_BIT)
+
+/***** BITs of Ethernet Port Status 1 reg (PS1R) *****/
+#define ETH_AUTO_NEG_DONE_BIT               4
+#define ETH_AUTO_NEG_DONE_MASK              (1<<ETH_AUTO_NEG_DONE_BIT)
+
+#define ETH_SERDES_PLL_LOCKED_BIT           6
+#define ETH_SERDES_PLL_LOCKED_MASK          (1<<ETH_SERDES_PLL_LOCKED_BIT)
+
+/***** BITs of Port Configuration reg (PxCR) *****/
+#define ETH_UNICAST_PROMISCUOUS_MODE_BIT    0
+#define ETH_UNICAST_PROMISCUOUS_MODE_MASK   (1<<ETH_UNICAST_PROMISCUOUS_MODE_BIT)
+
+#define ETH_DEF_RX_QUEUE_OFFSET             1
+#define ETH_DEF_RX_QUEUE_ALL_MASK           (0x7<<ETH_DEF_RX_QUEUE_OFFSET)
+#define ETH_DEF_RX_QUEUE_MASK(queue)        ((queue)<<ETH_DEF_RX_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_ARP_QUEUE_OFFSET         4
+#define ETH_DEF_RX_ARP_QUEUE_ALL_MASK       (0x7<<ETH_DEF_RX_ARP_QUEUE_OFFSET)
+#define ETH_DEF_RX_ARP_QUEUE_MASK(queue)    ((queue)<<ETH_DEF_RX_ARP_QUEUE_OFFSET)
+
+#define ETH_REJECT_NOT_IP_ARP_BCAST_BIT     7
+#define ETH_REJECT_NOT_IP_ARP_BCAST_MASK    (1<<ETH_REJECT_NOT_IP_ARP_BCAST_BIT)
+
+#define ETH_REJECT_IP_BCAST_BIT             8
+#define ETH_REJECT_IP_BCAST_MASK            (1<<ETH_REJECT_IP_BCAST_BIT)
+
+#define ETH_REJECT_ARP_BCAST_BIT            9
+#define ETH_REJECT_ARP_BCAST_MASK           (1<<ETH_REJECT_ARP_BCAST_BIT)
+
+#define ETH_TX_NO_SET_ERROR_SUMMARY_BIT     12
+#define ETH_TX_NO_SET_ERROR_SUMMARY_MASK    (1<<ETH_TX_NO_SET_ERROR_SUMMARY_BIT)
+
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT   14
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK  (1<<ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT)
+
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT   15
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK  (1<<ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT)
+
+#define ETH_DEF_RX_TCP_QUEUE_OFFSET         16
+#define ETH_DEF_RX_TCP_QUEUE_ALL_MASK       (0x7<<ETH_DEF_RX_TCP_QUEUE_OFFSET)
+#define ETH_DEF_RX_TCP_QUEUE_MASK(queue)    ((queue)<<ETH_DEF_RX_TCP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_UDP_QUEUE_OFFSET         19
+#define ETH_DEF_RX_UDP_QUEUE_ALL_MASK       (0x7<<ETH_DEF_RX_UDP_QUEUE_OFFSET)
+#define ETH_DEF_RX_UDP_QUEUE_MASK(queue)    ((queue)<<ETH_DEF_RX_UDP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_BPDU_QUEUE_OFFSET        22
+#define ETH_DEF_RX_BPDU_QUEUE_ALL_MASK      (0x7<<ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+#define ETH_DEF_RX_BPDU_QUEUE_MASK(queue)   ((queue)<<ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+
+#define ETH_RX_CHECKSUM_MODE_OFFSET         25
+#define ETH_RX_CHECKSUM_NO_PSEUDO_HDR       (0<<ETH_RX_CHECKSUM_MODE_OFFSET)
+#define ETH_RX_CHECKSUM_WITH_PSEUDO_HDR     (1<<ETH_RX_CHECKSUM_MODE_OFFSET)
+
+/***** BITs of Port Configuration Extend reg (PxCXR) *****/
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT    1
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK   (1<<ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT)
+
+#define ETH_TX_DISABLE_GEN_CRC_BIT          3
+#define ETH_TX_DISABLE_GEN_CRC_MASK         (1<<ETH_TX_DISABLE_GEN_CRC_BIT)
+
+/***** BITs of Tx/Rx queue command reg (RQCR/TQCR) *****/
+#define ETH_QUEUE_ENABLE_OFFSET             0
+#define ETH_QUEUE_ENABLE_ALL_MASK           (0xFF<<ETH_QUEUE_ENABLE_OFFSET)
+#define ETH_QUEUE_ENABLE_MASK(queue)        (1<<((queue)+ETH_QUEUE_ENABLE_OFFSET))
+
+#define ETH_QUEUE_DISABLE_OFFSET            8
+#define ETH_QUEUE_DISABLE_ALL_MASK          (0xFF<<ETH_QUEUE_DISABLE_OFFSET)
+#define ETH_QUEUE_DISABLE_MASK(queue)       (1<<((queue)+ETH_QUEUE_DISABLE_OFFSET))
+
+
+/***** BITs of Port Sdma Configuration reg (SDCR) *****/
+#define ETH_RX_FRAME_INTERRUPT_BIT          0
+#define ETH_RX_FRAME_INTERRUPT_MASK         (1<<ETH_RX_FRAME_INTERRUPT_BIT)
+
+#define ETH_BURST_SIZE_1_64BIT_VALUE        0
+#define ETH_BURST_SIZE_2_64BIT_VALUE        1
+#define ETH_BURST_SIZE_4_64BIT_VALUE        2
+#define ETH_BURST_SIZE_8_64BIT_VALUE        3
+#define ETH_BURST_SIZE_16_64BIT_VALUE       4
+
+#define ETH_RX_BURST_SIZE_OFFSET            1
+#define ETH_RX_BURST_SIZE_ALL_MASK          (0x7<<ETH_RX_BURST_SIZE_OFFSET)
+#define ETH_RX_BURST_SIZE_MASK(burst)       ((burst)<<ETH_RX_BURST_SIZE_OFFSET)
+
+#define ETH_RX_NO_DATA_SWAP_BIT             4
+#define ETH_RX_NO_DATA_SWAP_MASK            (1<<ETH_RX_NO_DATA_SWAP_BIT)
+#define ETH_RX_DATA_SWAP_MASK               (0<<ETH_RX_NO_DATA_SWAP_BIT)
+
+#define ETH_TX_NO_DATA_SWAP_BIT             5
+#define ETH_TX_NO_DATA_SWAP_MASK            (1<<ETH_TX_NO_DATA_SWAP_BIT)
+#define ETH_TX_DATA_SWAP_MASK               (0<<ETH_TX_NO_DATA_SWAP_BIT)
+
+#define ETH_DESC_SWAP_BIT                   6
+#define ETH_DESC_SWAP_MASK                  (1<<ETH_DESC_SWAP_BIT)
+#define ETH_NO_DESC_SWAP_MASK               (0<<ETH_DESC_SWAP_BIT)
+
+#define ETH_RX_INTR_COAL_OFFSET             7
+#define ETH_RX_INTR_COAL_ALL_MASK           (0x3fff<<ETH_RX_INTR_COAL_OFFSET)
+#define ETH_RX_INTR_COAL_MASK(value)        (((value)<<ETH_RX_INTR_COAL_OFFSET)  \
+                                             & ETH_RX_INTR_COAL_ALL_MASK)
+
+#define ETH_TX_BURST_SIZE_OFFSET            22
+#define ETH_TX_BURST_SIZE_ALL_MASK          (0x7<<ETH_TX_BURST_SIZE_OFFSET)
+#define ETH_TX_BURST_SIZE_MASK(burst)       ((burst)<<ETH_TX_BURST_SIZE_OFFSET)
+
+#define ETH_RX_INTR_COAL_MSB_BIT            25
+#define ETH_RX_INTR_COAL_MSB_MASK           (1<<ETH_RX_INTR_COAL_MSB_BIT)
+
+/* BITs Port #x Tx FIFO Urgent Threshold (PxTFUT) */
+#define ETH_TX_INTR_COAL_OFFSET             4
+#define ETH_TX_INTR_COAL_ALL_MASK           (0x3fff << ETH_TX_INTR_COAL_OFFSET)
+#define ETH_TX_INTR_COAL_MASK(value)        (((value) << ETH_TX_INTR_COAL_OFFSET)  \
+                                             & ETH_TX_INTR_COAL_ALL_MASK)
+
+/* BITs of Port Serial Control reg (PSCR) */
+#define ETH_PORT_ENABLE_BIT                 0
+#define ETH_PORT_ENABLE_MASK                (1<<ETH_PORT_ENABLE_BIT)
+
+#define ETH_FORCE_LINK_PASS_BIT             1
+#define ETH_FORCE_LINK_PASS_MASK            (1<<ETH_FORCE_LINK_PASS_BIT)
+
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_BIT     2
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_MASK    (1<<ETH_DISABLE_DUPLEX_AUTO_NEG_BIT)
+
+#define ETH_DISABLE_FC_AUTO_NEG_BIT         3
+#define ETH_DISABLE_FC_AUTO_NEG_MASK        (1<<ETH_DISABLE_FC_AUTO_NEG_BIT)
+
+#define ETH_ADVERTISE_SYM_FC_BIT            4
+#define ETH_ADVERTISE_SYM_FC_MASK           (1<<ETH_ADVERTISE_SYM_FC_BIT)
+
+#define ETH_TX_FC_MODE_OFFSET               5
+#define ETH_TX_FC_MODE_MASK                 (3<<ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_NO_PAUSE                  (0<<ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_SEND_PAUSE                (1<<ETH_TX_FC_MODE_OFFSET)
+
+#define ETH_TX_BP_MODE_OFFSET               7
+#define ETH_TX_BP_MODE_MASK                 (3<<ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_NO_JAM                    (0<<ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_SEND_JAM                  (1<<ETH_TX_BP_MODE_OFFSET)
+
+#define ETH_DO_NOT_FORCE_LINK_FAIL_BIT      10
+#define ETH_DO_NOT_FORCE_LINK_FAIL_MASK     (1<<ETH_DO_NOT_FORCE_LINK_FAIL_BIT)
+
+#define ETH_RETRANSMIT_FOREVER_BIT          11
+#define ETH_RETRANSMIT_FOREVER_MASK         (1<<ETH_RETRANSMIT_FOREVER_BIT)
+
+#define ETH_DISABLE_SPEED_AUTO_NEG_BIT      13
+#define ETH_DISABLE_SPEED_AUTO_NEG_MASK     (1<<ETH_DISABLE_SPEED_AUTO_NEG_BIT)
+
+#define ETH_DTE_ADVERT_BIT                  14
+#define ETH_DTE_ADVERT_MASK                 (1<<ETH_DTE_ADVERT_BIT)
+
+#define ETH_MII_PHY_MODE_BIT                15
+#define ETH_MII_PHY_MODE_MAC                (0<<ETH_MII_PHY_MODE_BIT)
+#define ETH_MII_PHY_MODE_PHY                (1<<ETH_MII_PHY_MODE_BIT)
+
+#define ETH_MII_SOURCE_SYNCH_BIT            16
+#define ETH_MII_STANDARD_SYNCH              (0<<ETH_MII_SOURCE_SYNCH_BIT)
+#define ETH_MII_400Mbps_SYNCH               (1<<ETH_MII_SOURCE_CLK_BIT)
+
+#define ETH_MAX_RX_PACKET_SIZE_OFFSET       17
+#define ETH_MAX_RX_PACKET_SIZE_MASK         (7<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1518BYTE          (0<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1522BYTE          (1<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1552BYTE          (2<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9022BYTE          (3<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9192BYTE          (4<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9700BYTE          (5<<ETH_MAX_RX_PACKET_SIZE_OFFSET)
+
+#define ETH_SET_FULL_DUPLEX_BIT             21
+#define ETH_SET_FULL_DUPLEX_MASK            (1<<ETH_SET_FULL_DUPLEX_BIT)
+
+#define ETH_SET_FLOW_CTRL_BIT               22
+#define ETH_SET_FLOW_CTRL_MASK              (1<<ETH_SET_FLOW_CTRL_BIT)
+
+#define ETH_SET_GMII_SPEED_1000_BIT         23
+#define ETH_SET_GMII_SPEED_1000_MASK        (1<<ETH_SET_GMII_SPEED_1000_BIT)
+
+#define ETH_SET_MII_SPEED_100_BIT           24
+#define ETH_SET_MII_SPEED_100_MASK          (1<<ETH_SET_MII_SPEED_100_BIT)
+
+/* BITs of Port Serial Control 1 reg (PSC1R) */
+#define ETH_PSC_ENABLE_BIT                  2
+#define ETH_PSC_ENABLE_MASK                 (1<<ETH_PSC_ENABLE_BIT)
+
+#define ETH_RGMII_ENABLE_BIT                3
+#define ETH_RGMII_ENABLE_MASK               (1<<ETH_RGMII_ENABLE_BIT)
+
+#define ETH_PORT_RESET_BIT                  4
+#define ETH_PORT_RESET_MASK                 (1<<ETH_PORT_RESET_BIT)
+
+#define ETH_INBAND_AUTO_NEG_ENABLE_BIT      6
+#define ETH_INBAND_AUTO_NEG_ENABLE_MASK     (1<<ETH_INBAND_AUTO_NEG_ENABLE_BIT)
+
+#define ETH_INBAND_AUTO_NEG_BYPASS_BIT      7
+#define ETH_INBAND_AUTO_NEG_BYPASS_MASK     (1<<ETH_INBAND_AUTO_NEG_BYPASS_BIT)
+
+#define ETH_INBAND_AUTO_NEG_START_BIT       8
+#define ETH_INBAND_AUTO_NEG_START_MASK      (1<<ETH_INBAND_AUTO_NEG_START_BIT)
+
+#define ETH_PORT_TYPE_BIT                   11
+#define ETH_PORT_TYPE_1000BasedX_MASK       (1<<ETH_PORT_TYPE_BIT)
+
+#define ETH_SGMII_MODE_BIT                  12
+#define ETH_1000BaseX_MODE_MASK             (0<<ETH_SGMII_MODE_BIT)
+#define ETH_SGMII_MODE_MASK                 (1<<ETH_SGMII_MODE_BIT)
+
+#define ETH_MGMII_MODE_BIT                  13
+
+#define ETH_EN_MII_ODD_PRE_BIT		    22
+#define ETH_EN_MII_ODD_PRE_MASK		    (1<<ETH_EN_MII_ODD_PRE_BIT)
+
+/* BITs of SDMA Descriptor Command/Status field */
+#if defined(MV_CPU_BE)
+typedef struct _ethRxDesc
+{
+    MV_U16      byteCnt    ;    /* Descriptor buffer byte count     */
+    MV_U16      bufSize    ;    /* Buffer size                      */
+    MV_U32      cmdSts     ;    /* Descriptor command status        */
+    MV_U32      nextDescPtr;    /* Next descriptor pointer          */
+    MV_U32      bufPtr     ;    /* Descriptor buffer pointer        */
+    MV_ULONG    returnInfo ;    /* User resource return information */
+} ETH_RX_DESC;
+
+typedef struct _ethTxDesc
+{
+    MV_U16      byteCnt    ;    /* Descriptor buffer byte count     */
+    MV_U16      L4iChk     ;    /* CPU provided TCP Checksum        */
+    MV_U32      cmdSts     ;    /* Descriptor command status        */
+    MV_U32      nextDescPtr;    /* Next descriptor pointer          */
+    MV_U32      bufPtr     ;    /* Descriptor buffer pointer        */
+    MV_ULONG    returnInfo ;    /* User resource return information */
+    MV_U8*      alignBufPtr;    /* Pointer to 8 byte aligned buffer */
+} ETH_TX_DESC;
+
+#elif defined(MV_CPU_LE)
+
+typedef struct _ethRxDesc
+{
+    MV_U32      cmdSts     ;    /* Descriptor command status        */
+    MV_U16      bufSize    ;    /* Buffer size                      */
+    MV_U16      byteCnt    ;    /* Descriptor buffer byte count     */
+    MV_U32      bufPtr     ;    /* Descriptor buffer pointer        */
+    MV_U32      nextDescPtr;    /* Next descriptor pointer          */
+    MV_ULONG    returnInfo ;    /* User resource return information */
+} ETH_RX_DESC;
+
+typedef struct _ethTxDesc
+{
+    MV_U32      cmdSts     ;    /* Descriptor command status        */
+    MV_U16      L4iChk     ;    /* CPU provided TCP Checksum        */
+    MV_U16      byteCnt    ;    /* Descriptor buffer byte count     */
+    MV_U32      bufPtr     ;    /* Descriptor buffer pointer        */
+    MV_U32      nextDescPtr;    /* Next descriptor pointer          */
+    MV_ULONG    returnInfo ;    /* User resource return information */
+    MV_U8*      alignBufPtr;    /* Pointer to 32 byte aligned buffer */
+} ETH_TX_DESC;
+
+#else
+#error "MV_CPU_BE or MV_CPU_LE must be defined"
+#endif /* MV_CPU_BE || MV_CPU_LE */
+
+/* Buffer offset from buffer pointer */
+#define ETH_RX_BUF_OFFSET               0x2
+
+
+/* Tx & Rx descriptor bits */
+#define ETH_ERROR_SUMMARY_BIT               0
+#define ETH_ERROR_SUMMARY_MASK              (1<<ETH_ERROR_SUMMARY_BIT)
+
+#define ETH_BUFFER_OWNER_BIT                31
+#define ETH_BUFFER_OWNED_BY_DMA             (1<<ETH_BUFFER_OWNER_BIT)
+#define ETH_BUFFER_OWNED_BY_HOST            (0<<ETH_BUFFER_OWNER_BIT)
+
+/* Tx descriptor bits */
+#define ETH_TX_ERROR_CODE_OFFSET            1
+#define ETH_TX_ERROR_CODE_MASK              (3<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_LATE_COLLISION_ERROR         (0<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_UNDERRUN_ERROR               (1<<ETH_TX_ERROR_CODE_OFFSET)
+#define ETH_TX_EXCESSIVE_COLLISION_ERROR    (2<<ETH_TX_ERROR_CODE_OFFSET)
+
+#define ETH_TX_LLC_SNAP_FORMAT_BIT          9
+#define ETH_TX_LLC_SNAP_FORMAT_MASK         (1<<ETH_TX_LLC_SNAP_FORMAT_BIT)
+
+#define ETH_TX_IP_FRAG_BIT                  10
+#define ETH_TX_IP_FRAG_MASK                 (1<<ETH_TX_IP_FRAG_BIT)
+#define ETH_TX_IP_FRAG                      (0<<ETH_TX_IP_FRAG_BIT)
+#define ETH_TX_IP_NO_FRAG                   (1<<ETH_TX_IP_FRAG_BIT)
+
+#define ETH_TX_IP_HEADER_LEN_OFFSET         11
+#define ETH_TX_IP_HEADER_LEN_ALL_MASK       (0xF<<ETH_TX_IP_HEADER_LEN_OFFSET)
+#define ETH_TX_IP_HEADER_LEN_MASK(len)      ((len)<<ETH_TX_IP_HEADER_LEN_OFFSET)
+
+#define ETH_TX_VLAN_TAGGED_FRAME_BIT        15
+#define ETH_TX_VLAN_TAGGED_FRAME_MASK       (1<<ETH_TX_VLAN_TAGGED_FRAME_BIT)
+
+#define ETH_TX_L4_TYPE_BIT                  16
+#define ETH_TX_L4_TCP_TYPE                  (0<<ETH_TX_L4_TYPE_BIT)
+#define ETH_TX_L4_UDP_TYPE                  (1<<ETH_TX_L4_TYPE_BIT)
+
+#define ETH_TX_GENERATE_L4_CHKSUM_BIT       17
+#define ETH_TX_GENERATE_L4_CHKSUM_MASK      (1<<ETH_TX_GENERATE_L4_CHKSUM_BIT)
+
+#define ETH_TX_GENERATE_IP_CHKSUM_BIT       18
+#define ETH_TX_GENERATE_IP_CHKSUM_MASK      (1<<ETH_TX_GENERATE_IP_CHKSUM_BIT)
+
+#define ETH_TX_ZERO_PADDING_BIT             19
+#define ETH_TX_ZERO_PADDING_MASK            (1<<ETH_TX_ZERO_PADDING_BIT)
+
+#define ETH_TX_LAST_DESC_BIT                20
+#define ETH_TX_LAST_DESC_MASK               (1<<ETH_TX_LAST_DESC_BIT)
+
+#define ETH_TX_FIRST_DESC_BIT               21
+#define ETH_TX_FIRST_DESC_MASK              (1<<ETH_TX_FIRST_DESC_BIT)
+
+#define ETH_TX_GENERATE_CRC_BIT             22
+#define ETH_TX_GENERATE_CRC_MASK            (1<<ETH_TX_GENERATE_CRC_BIT)
+
+#define ETH_TX_ENABLE_INTERRUPT_BIT         23
+#define ETH_TX_ENABLE_INTERRUPT_MASK        (1<<ETH_TX_ENABLE_INTERRUPT_BIT)
+
+#define ETH_TX_AUTO_MODE_BIT                30
+#define ETH_TX_AUTO_MODE_MASK               (1<<ETH_TX_AUTO_MODE_BIT)
+
+
+/* Rx descriptor bits */
+#define ETH_RX_ERROR_CODE_OFFSET            1
+#define ETH_RX_ERROR_CODE_MASK              (3<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_CRC_ERROR                    (0<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_OVERRUN_ERROR                (1<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_MAX_FRAME_LEN_ERROR          (2<<ETH_RX_ERROR_CODE_OFFSET)
+#define ETH_RX_RESOURCE_ERROR               (3<<ETH_RX_ERROR_CODE_OFFSET)
+
+#define ETH_RX_L4_CHECKSUM_OFFSET           3
+#define ETH_RX_L4_CHECKSUM_MASK             (0xffff<<ETH_RX_L4_CHECKSUM_OFFSET)
+
+#define ETH_RX_VLAN_TAGGED_FRAME_BIT        19
+#define ETH_RX_VLAN_TAGGED_FRAME_MASK       (1<<ETH_RX_VLAN_TAGGED_FRAME_BIT)
+
+#define ETH_RX_BPDU_FRAME_BIT               20
+#define ETH_RX_BPDU_FRAME_MASK              (1<<ETH_RX_BPDU_FRAME_BIT)
+
+#define ETH_RX_L4_TYPE_OFFSET               21
+#define ETH_RX_L4_TYPE_MASK                 (3<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_TCP_TYPE                  (0<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_UDP_TYPE                  (1<<ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_OTHER_TYPE                (2<<ETH_RX_L4_TYPE_OFFSET)
+
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_BIT      23
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_MASK     (1<<ETH_RX_NOT_LLC_SNAP_FORMAT_BIT)
+
+#define ETH_RX_IP_FRAME_TYPE_BIT            24
+#define ETH_RX_IP_FRAME_TYPE_MASK           (1<<ETH_RX_IP_FRAME_TYPE_BIT)
+
+#define ETH_RX_IP_HEADER_OK_BIT             25
+#define ETH_RX_IP_HEADER_OK_MASK            (1<<ETH_RX_IP_HEADER_OK_BIT)
+
+#define ETH_RX_LAST_DESC_BIT                26
+#define ETH_RX_LAST_DESC_MASK               (1<<ETH_RX_LAST_DESC_BIT)
+
+#define ETH_RX_FIRST_DESC_BIT               27
+#define ETH_RX_FIRST_DESC_MASK              (1<<ETH_RX_FIRST_DESC_BIT)
+
+#define ETH_RX_UNKNOWN_DA_BIT               28
+#define ETH_RX_UNKNOWN_DA_MASK              (1<<ETH_RX_UNKNOWN_DA_BIT)
+
+#define ETH_RX_ENABLE_INTERRUPT_BIT         29
+#define ETH_RX_ENABLE_INTERRUPT_MASK        (1<<ETH_RX_ENABLE_INTERRUPT_BIT)
+
+#define ETH_RX_L4_CHECKSUM_OK_BIT           30
+#define ETH_RX_L4_CHECKSUM_OK_MASK          (1<<ETH_RX_L4_CHECKSUM_OK_BIT)
+
+/* Rx descriptor bufSize field */
+#define ETH_RX_IP_FRAGMENTED_FRAME_BIT      2
+#define ETH_RX_IP_FRAGMENTED_FRAME_MASK     (1<<ETH_RX_IP_FRAGMENTED_FRAME_BIT)
+
+#define ETH_RX_BUFFER_MASK                  0xFFF8
+
+
+/* Ethernet Cause Register BITs */
+#define ETH_CAUSE_RX_READY_SUM_BIT          0
+#define ETH_CAUSE_EXTEND_BIT                1
+
+#define ETH_CAUSE_RX_READY_OFFSET           2
+#define ETH_CAUSE_RX_READY_BIT(queue)       (ETH_CAUSE_RX_READY_OFFSET + (queue))
+#define ETH_CAUSE_RX_READY_MASK(queue)      (1 << (ETH_CAUSE_RX_READY_BIT(queue)))
+
+#define ETH_CAUSE_RX_ERROR_SUM_BIT          10
+#define ETH_CAUSE_RX_ERROR_OFFSET           11
+#define ETH_CAUSE_RX_ERROR_BIT(queue)       (ETH_CAUSE_RX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_RX_ERROR_MASK(queue)      (1 << (ETH_CAUSE_RX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_TX_END_BIT                19
+#define ETH_CAUSE_SUM_BIT                   31
+
+/* Ethernet Cause Extended Register BITs */
+#define ETH_CAUSE_TX_BUF_OFFSET             0
+#define ETH_CAUSE_TX_BUF_BIT(queue)         (ETH_CAUSE_TX_BUF_OFFSET + (queue))
+#define ETH_CAUSE_TX_BUF_MASK(queue)        (1 << (ETH_CAUSE_TX_BUF_BIT(queue)))
+
+#define ETH_CAUSE_TX_ERROR_OFFSET           8
+#define ETH_CAUSE_TX_ERROR_BIT(queue)       (ETH_CAUSE_TX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_TX_ERROR_MASK(queue)      (1 << (ETH_CAUSE_TX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_PHY_STATUS_CHANGE_BIT     16
+#define ETH_CAUSE_RX_OVERRUN_BIT            18
+#define ETH_CAUSE_TX_UNDERRUN_BIT           19
+#define ETH_CAUSE_LINK_STATE_CHANGE_BIT     20
+#define ETH_CAUSE_INTERNAL_ADDR_ERR_BIT     23
+#define ETH_CAUSE_EXTEND_SUM_BIT            31
+
+/* Marvell Header Register */
+/* Marvell Header register bits */
+#define ETH_MVHDR_EN_BIT                    0
+#define ETH_MVHDR_EN_MASK                   (1 << ETH_MVHDR_EN_BIT)
+
+#define ETH_MVHDR_DAPREFIX_BIT              1
+#define ETH_MVHDR_DAPREFIX_MASK             (0x3 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_PRI_1_2          (0x1 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_DBNUM_PRI        (0x2 << ETH_MVHDR_DAPREFIX_BIT)
+#define ETH_MVHDR_DAPREFIX_SPID_PRI         (0x3 << ETH_MVHDR_DAPREFIX_BIT)
+
+#define ETH_MVHDR_MHMASK_BIT                8
+#define ETH_MVHDR_MHMASK_MASK               (0x3 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_8_QUEUE            (0x0 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_4_QUEUE            (0x1 << ETH_MVHDR_MHMASK_BIT)
+#define ETH_MVHDR_MHMASK_2_QUEUE            (0x3 << ETH_MVHDR_MHMASK_BIT)
+
+
+/* Relevant for 6183 ONLY */
+#define ETH_UNIT_PORTS_PADS_CALIB_0_REG     (MV_ETH_REG_BASE(0) + 0x0A0)
+#define ETH_UNIT_PORTS_PADS_CALIB_1_REG     (MV_ETH_REG_BASE(0) + 0x0A4)
+#define ETH_UNIT_PORTS_PADS_CALIB_2_REG     (MV_ETH_REG_BASE(0) + 0x0A8)
+/* Ethernet Unit Ports Pads Calibration_REG (ETH_UNIT_PORTS_PADS_CALIB_x_REG)  */
+#define ETH_ETHERNET_PAD_CLIB_DRVN_OFFS		0
+#define ETH_ETHERNET_PAD_CLIB_DRVN_MASK		(0x1F << ETH_ETHERNET_PAD_CLIB_DRVN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_DRVP_OFFS         5
+#define ETH_ETHERNET_PAD_CLIB_DRVP_MASK         (0x1F << ETH_ETHERNET_PAD_CLIB_DRVP_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_TUNEEN_OFFS       16
+#define ETH_ETHERNET_PAD_CLIB_TUNEEN_MASK       (0x1 << ETH_ETHERNET_PAD_CLIB_TUNEEN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_LOCKN_OFFS        17
+#define ETH_ETHERNET_PAD_CLIB_LOCKN_MASK        (0x1F << ETH_ETHERNET_PAD_CLIB_LOCKN_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_OFFST_OFFS        24
+#define ETH_ETHERNET_PAD_CLIB_OFFST_MASK        (0x1F << ETH_ETHERNET_PAD_CLIB_OFFST_OFFS)
+
+#define ETH_ETHERNET_PAD_CLIB_WR_EN_OFFS        31
+#define ETH_ETHERNET_PAD_CLIB_WR_EN_MASK        (0x1  << ETH_ETHERNET_PAD_CLIB_WR_EN_OFFS)
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvEthRegsh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h
new file mode 100644
index 000000000000..74746759c64a
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/eth/mvEth.h
@@ -0,0 +1,354 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvEth.h - Header File for : Ethernet Controller
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       Marvell Gigabit Ethernet Controllers.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#ifndef __mvEth_h__
+#define __mvEth_h__
+
+/* includes */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+#include "eth/gbe/mvEthRegs.h"
+#include "mvSysHwConfig.h"
+
+/* defines  */
+
+#define MV_ETH_EXTRA_FRAGS_NUM      2
+
+
+typedef enum
+{
+    MV_ETH_SPEED_AN,
+    MV_ETH_SPEED_10,
+    MV_ETH_SPEED_100,
+    MV_ETH_SPEED_1000
+
+} MV_ETH_PORT_SPEED;
+
+typedef enum
+{
+    MV_ETH_DUPLEX_AN,
+    MV_ETH_DUPLEX_HALF,
+    MV_ETH_DUPLEX_FULL
+
+} MV_ETH_PORT_DUPLEX;
+
+typedef enum
+{
+    MV_ETH_FC_AN_ADV_DIS,
+    MV_ETH_FC_AN_ADV_SYM,
+    MV_ETH_FC_DISABLE,
+    MV_ETH_FC_ENABLE
+
+} MV_ETH_PORT_FC;
+
+typedef enum
+{
+    MV_ETH_PRIO_FIXED = 0,  /* Fixed priority mode */
+    MV_ETH_PRIO_WRR   = 1   /* Weighted round robin priority mode */
+} MV_ETH_PRIO_MODE;
+
+/* Ethernet port specific infomation */
+typedef struct
+{
+    int     maxRxPktSize;
+    int     rxDefQ;
+    int     rxBpduQ;
+    int     rxArpQ;
+    int     rxTcpQ;
+    int     rxUdpQ;
+    int     ejpMode;
+} MV_ETH_PORT_CFG;
+
+typedef struct
+{
+    int     descrNum;
+} MV_ETH_RX_Q_CFG;
+
+typedef struct
+{
+    int         descrNum;
+    MV_ETH_PRIO_MODE    prioMode;
+    int         quota;
+} MV_ETH_TX_Q_CFG;
+
+typedef struct
+{
+    int     maxRxPktSize;
+    int     rxDefQ;
+    int     txDescrNum[MV_ETH_TX_Q_NUM];
+    int     rxDescrNum[MV_ETH_RX_Q_NUM];
+    void    *osHandle;
+} MV_ETH_PORT_INIT;
+
+typedef struct
+{
+    MV_BOOL             isLinkUp;
+    MV_ETH_PORT_SPEED   speed;
+    MV_ETH_PORT_DUPLEX  duplex;
+    MV_ETH_PORT_FC      flowControl;
+
+} MV_ETH_PORT_STATUS;
+
+typedef enum
+{
+	MV_ETH_DISABLE_HEADER_MODE = 0,
+	MV_ETH_ENABLE_HEADER_MODE_PRI_2_1 = 1,
+	MV_ETH_ENABLE_HEADER_MODE_PRI_DBNUM = 2,
+	MV_ETH_ENABLE_HEADER_MODE_PRI_SPID = 3
+} MV_ETH_HEADER_MODE;
+
+
+/* ethernet.h API list */
+void        mvEthHalInit(void);
+void        mvEthMemAttrGet(MV_BOOL* pIsSram, MV_BOOL* pIsSwCoher);
+
+/* Port Initalization routines */
+void*       mvEthPortInit (int port, MV_ETH_PORT_INIT *pPortInit);
+void        ethResetTxDescRing(void* pPortHndl, int queue);
+void        ethResetRxDescRing(void* pPortHndl, int queue);
+
+void*       mvEthPortHndlGet(int port);
+
+void        mvEthPortFinish(void* pEthPortHndl);
+MV_STATUS   mvEthPortDown(void* pEthPortHndl);
+MV_STATUS   mvEthPortDisable(void* pEthPortHndl);
+MV_STATUS   mvEthPortUp(void* pEthPortHndl);
+MV_STATUS   mvEthPortEnable(void* pEthPortHndl);
+
+/* Port data flow routines */
+MV_PKT_INFO *mvEthPortForceTxDone(void* pEthPortHndl, int txQueue);
+MV_PKT_INFO *mvEthPortForceRx(void* pEthPortHndl, int rxQueue);
+
+/* Port Configuration routines */
+MV_STATUS   mvEthDefaultsSet(void* pEthPortHndl);
+MV_STATUS   mvEthMaxRxSizeSet(void* pPortHndl, int maxRxSize);
+
+/* Port RX MAC Filtering control routines */
+MV_U8       mvEthMcastCrc8Get(MV_U8* pAddr);
+MV_STATUS   mvEthRxFilterModeSet(void* pPortHndl, MV_BOOL isPromisc);
+MV_STATUS   mvEthMacAddrSet(void* pPortHandle, MV_U8* pMacAddr, int queue);
+MV_STATUS   mvEthMcastAddrSet(void* pPortHandle, MV_U8 *pAddr, int queue);
+
+/* MIB Counters APIs */
+MV_U32      mvEthMibCounterRead(void* pPortHndl, unsigned int mibOffset,
+                               MV_U32* pHigh32);
+void        mvEthMibCountersClear(void* pPortHandle);
+
+/* TX Scheduling configuration routines */
+MV_STATUS   mvEthTxQueueConfig(void* pPortHandle, int txQueue,
+                               MV_ETH_PRIO_MODE txPrioMode, int txQuota);
+
+/* RX Dispatching configuration routines */
+MV_STATUS   mvEthBpduRxQueue(void* pPortHandle, int bpduQueue);
+MV_STATUS   mvEthVlanPrioRxQueue(void* pPortHandle, int vlanPrio, int vlanPrioQueue);
+MV_STATUS   mvEthTosToRxqSet(void* pPortHandle, int tos, int rxq);
+int         mvEthTosToRxqGet(void* pPortHandle, int tos);
+
+/* Speed, Duplex, FlowControl routines */
+MV_STATUS   mvEthSpeedDuplexSet(void* pPortHandle, MV_ETH_PORT_SPEED speed,
+                                                   MV_ETH_PORT_DUPLEX duplex);
+
+MV_STATUS   mvEthFlowCtrlSet(void* pPortHandle, MV_ETH_PORT_FC flowControl);
+
+#if (MV_ETH_VERSION >= 4)
+MV_STATUS   mvEthEjpModeSet(void* pPortHandle, int mode);
+#endif /* (MV_ETH_VERSION >= 4) */
+
+void        mvEthStatusGet(void* pPortHandle, MV_ETH_PORT_STATUS* pStatus);
+
+/* Marvell Header control               */
+MV_STATUS   mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode);
+
+/* PHY routines */
+void       mvEthPhyAddrSet(void* pPortHandle, int phyAddr);
+int        mvEthPhyAddrGet(void* pPortHandle);
+
+/* Power management routines */
+void        mvEthPortPowerDown(int port);
+void        mvEthPortPowerUp(int port);
+
+/******************** ETH PRIVATE ************************/
+
+/*#define UNCACHED_TX_BUFFERS*/
+/*#define UNCACHED_RX_BUFFERS*/
+
+
+/* Port attributes */
+/* Size of a Tx/Rx descriptor used in chain list data structure */
+#define ETH_RX_DESC_ALIGNED_SIZE        32
+#define ETH_TX_DESC_ALIGNED_SIZE        32
+
+#define TX_DISABLE_TIMEOUT_MSEC     1000
+#define RX_DISABLE_TIMEOUT_MSEC     1000
+#define TX_FIFO_EMPTY_TIMEOUT_MSEC  10000
+#define PORT_DISABLE_WAIT_TCLOCKS   5000
+
+/* Macros that save access to desc in order to find next desc pointer  */
+#define RX_NEXT_DESC_PTR(pRxDescr, pQueueCtrl)                              \
+        ((pRxDescr) == (pQueueCtrl)->pLastDescr) ?                          \
+               (ETH_RX_DESC*)((pQueueCtrl)->pFirstDescr) :                  \
+               (ETH_RX_DESC*)(((MV_ULONG)(pRxDescr)) + ETH_RX_DESC_ALIGNED_SIZE)
+
+#define TX_NEXT_DESC_PTR(pTxDescr, pQueueCtrl)                              \
+        ((pTxDescr) == (pQueueCtrl)->pLastDescr) ?                          \
+               (ETH_TX_DESC*)((pQueueCtrl)->pFirstDescr) :                  \
+               (ETH_TX_DESC*)(((MV_ULONG)(pTxDescr)) + ETH_TX_DESC_ALIGNED_SIZE)
+
+#define RX_PREV_DESC_PTR(pRxDescr, pQueueCtrl)                              \
+        ((pRxDescr) == (pQueueCtrl)->pFirstDescr) ?                          \
+               (ETH_RX_DESC*)((pQueueCtrl)->pLastDescr) :                  \
+               (ETH_RX_DESC*)(((MV_ULONG)(pRxDescr)) - ETH_RX_DESC_ALIGNED_SIZE)
+
+#define TX_PREV_DESC_PTR(pTxDescr, pQueueCtrl)                              \
+        ((pTxDescr) == (pQueueCtrl)->pFirstDescr) ?                          \
+               (ETH_TX_DESC*)((pQueueCtrl)->pLastDescr) :                  \
+               (ETH_TX_DESC*)(((MV_ULONG)(pTxDescr)) - ETH_TX_DESC_ALIGNED_SIZE)
+
+
+/* Queue specific information */
+typedef struct
+{
+    void*       pFirstDescr;
+    void*       pLastDescr;
+    void*       pCurrentDescr;
+    void*       pUsedDescr;
+    int         resource;
+    MV_BUF_INFO descBuf;
+} ETH_QUEUE_CTRL;
+
+
+/* Ethernet port specific infomation */
+typedef struct _ethPortCtrl
+{
+    int             portNo;
+    ETH_QUEUE_CTRL  rxQueue[MV_ETH_RX_Q_NUM]; /* Rx ring resource  */
+    ETH_QUEUE_CTRL  txQueue[MV_ETH_TX_Q_NUM]; /* Tx ring resource  */
+
+    MV_ETH_PORT_CFG portConfig;
+    MV_ETH_RX_Q_CFG rxQueueConfig[MV_ETH_RX_Q_NUM];
+    MV_ETH_TX_Q_CFG txQueueConfig[MV_ETH_TX_Q_NUM];
+
+    /* Register images - For DP */
+    MV_U32          portTxQueueCmdReg;   /* Port active Tx queues summary    */
+    MV_U32          portRxQueueCmdReg;   /* Port active Rx queues summary    */
+
+    MV_STATE        portState;
+
+    MV_U8           mcastCount[256];
+    MV_U32*         hashPtr;
+    void 	    *osHandle;
+} ETH_PORT_CTRL;
+
+/************** MACROs ****************/
+
+/* MACROs to Flush / Invalidate TX / RX Buffers */
+#if (ETHER_DRAM_COHER == MV_CACHE_COHER_SW) && !defined(UNCACHED_TX_BUFFERS)
+#   define ETH_PACKET_CACHE_FLUSH(pAddr, size)                                  \
+        mvOsCacheClear(NULL, (pAddr), (size));                                  \
+        /*CPU_PIPE_FLUSH;*/
+#else
+#   define ETH_PACKET_CACHE_FLUSH(pAddr, size)                                  \
+        mvOsIoVirtToPhy(NULL, (pAddr));
+#endif /* ETHER_DRAM_COHER == MV_CACHE_COHER_SW */
+
+#if ( (ETHER_DRAM_COHER == MV_CACHE_COHER_SW) && !defined(UNCACHED_RX_BUFFERS) )
+#   define ETH_PACKET_CACHE_INVALIDATE(pAddr, size)                             \
+        mvOsCacheInvalidate (NULL, (pAddr), (size));                            \
+        /*CPU_PIPE_FLUSH;*/
+#else
+#   define ETH_PACKET_CACHE_INVALIDATE(pAddr, size)
+#endif /* ETHER_DRAM_COHER == MV_CACHE_COHER_SW && !UNCACHED_RX_BUFFERS */
+
+#ifdef ETH_DESCR_UNCACHED
+
+#define ETH_DESCR_FLUSH_INV(pPortCtrl, pDescr)
+#define ETH_DESCR_INV(pPortCtrl, pDescr)
+
+#else
+
+#define ETH_DESCR_FLUSH_INV(pPortCtrl, pDescr)      \
+        mvOsCacheLineFlushInv(pPortCtrl->osHandle, (MV_ULONG)(pDescr))
+
+#define ETH_DESCR_INV(pPortCtrl, pDescr)            \
+        mvOsCacheLineInv(pPortCtrl->osHandle, (MV_ULONG)(pDescr))
+
+#endif /* ETH_DESCR_UNCACHED */
+
+#include "eth/gbe/mvEthGbe.h"
+
+#endif /* __mvEth_h__ */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c
new file mode 100644
index 000000000000..e81981c47e52
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.c
@@ -0,0 +1,360 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "gpp/mvGpp.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+static MV_VOID gppRegSet(MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value);
+
+/*******************************************************************************
+* mvGppTypeSet - Enable a GPP (OUT) pin
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the type
+*               of corresponding GPP will be set. Other GPPs are ignored.
+*       value - 32bit value that describes GPP type per pin.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Set GPP8 to input and GPP15 to output.
+*       mvGppTypeSet(0, (GPP8 | GPP15),
+*                    ((MV_GPP_IN & GPP8) | (MV_GPP_OUT & GPP15)) );
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvGppTypeSet(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+	if (group >= MV_GPP_MAX_GROUP)
+	{
+		DB(mvOsPrintf("mvGppTypeSet: ERR. invalid group number \n"));
+		return MV_BAD_PARAM;
+	}
+
+	gppRegSet(group, GPP_DATA_OUT_EN_REG(group), mask, value);
+
+    /* Workaround for Erratum FE-MISC-70*/
+    if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1))
+    {
+        mask &= 0x2;
+        gppRegSet(0, GPP_DATA_OUT_EN_REG(0), mask, value);
+    } /*End of WA*/
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvGppBlinkEn - Set a GPP (IN) Pin list to blink every ~100ms
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the type
+*               of corresponding GPP will be set. Other GPPs are ignored.
+*       value - 32bit value that describes GPP blink per pin.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Set GPP8 to be static and GPP15 to be blinking.
+*       mvGppBlinkEn(0, (GPP8 | GPP15),
+*                    ((MV_GPP_OUT_STATIC & GPP8) | (MV_GPP_OUT_BLINK & GPP15)) );
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvGppBlinkEn(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+	if (group >= MV_GPP_MAX_GROUP)
+	{
+		DB(mvOsPrintf("mvGppBlinkEn: ERR. invalid group number \n"));
+		return MV_BAD_PARAM;
+	}
+
+	gppRegSet(group, GPP_BLINK_EN_REG(group), mask, value);
+
+	return MV_OK;
+
+}
+/*******************************************************************************
+* mvGppPolaritySet - Set a GPP (IN) Pin list Polarity mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the type
+*               of corresponding GPP will be set. Other GPPs are ignored.
+*       value - 32bit value that describes GPP polarity per pin.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Set GPP8 to the actual pin value and GPP15 to be inverted.
+*       mvGppPolaritySet(0, (GPP8 | GPP15),
+*                    ((MV_GPP_IN_ORIGIN & GPP8) | (MV_GPP_IN_INVERT & GPP15)) );
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvGppPolaritySet(MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+	if (group >= MV_GPP_MAX_GROUP)
+	{
+		DB(mvOsPrintf("mvGppPolaritySet: ERR. invalid group number \n"));
+		return MV_BAD_PARAM;
+	}
+
+	gppRegSet(group, GPP_DATA_IN_POL_REG(group), mask, value);
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvGppPolarityGet - Get a value of relevant bits from GPP Polarity register.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the
+*               returned value is valid for it.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Get GPP8 and GPP15 value.
+*       mvGppPolarityGet(0, (GPP8 | GPP15));
+*
+* RETURN:
+*       32bit value that describes GPP polatity mode per pin.
+*
+*******************************************************************************/
+MV_U32  mvGppPolarityGet(MV_U32 group, MV_U32 mask)
+{
+    MV_U32  regVal;
+
+	if (group >= MV_GPP_MAX_GROUP)
+	{
+		DB(mvOsPrintf("mvGppActiveSet: Error invalid group number \n"));
+		return MV_ERROR;
+	}
+    regVal = MV_REG_READ(GPP_DATA_IN_POL_REG(group));
+
+    return (regVal & mask);
+}
+
+/*******************************************************************************
+* mvGppValueGet - Get a GPP Pin list value.
+*
+* DESCRIPTION:
+*       This function get GPP value.
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the
+*               returned value is valid for it.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Get GPP8 and GPP15 value.
+*       mvGppValueGet(0, (GPP8 | GPP15));
+*
+* RETURN:
+*       32bit value that describes GPP activity mode per pin.
+*
+*******************************************************************************/
+MV_U32 mvGppValueGet(MV_U32 group, MV_U32 mask)
+{
+	MV_U32 gppData;
+
+	gppData = MV_REG_READ(GPP_DATA_IN_REG(group));
+
+	gppData &= mask;
+
+	return gppData;
+
+}
+
+/*******************************************************************************
+* mvGppValueSet - Set a GPP Pin list value.
+*
+* DESCRIPTION:
+*       This function set value for given GPP pin list.
+*
+* INPUT:
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the
+*               value of corresponding GPP will be set accordingly. Other GPP
+*               are not affected.
+*       value - 32bit value that describes GPP value per pin.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Set GPP8 value of '0' and GPP15 value of '1'.
+*       mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (GPP15)) );
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvGppValueSet (MV_U32 group, MV_U32 mask, MV_U32 value)
+{
+	MV_U32 outEnable, tmp;
+	MV_U32 i;
+
+	if (group >= MV_GPP_MAX_GROUP)
+	{
+		DB(mvOsPrintf("mvGppValueSet: Error invalid group number \n"));
+		return MV_BAD_PARAM;
+	}
+
+	/* verify that the gpp pin is configured as output 		*/
+	/* Note that in the register out enabled -> bit = '0'. 	*/
+	outEnable = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(group));
+
+    /* Workaround for Erratum FE-MISC-70*/
+    if(mvCtrlRevGet()==MV_88F6XXX_A0_REV && (group == 1))
+    {
+        tmp = ~MV_REG_READ(GPP_DATA_OUT_EN_REG(0));
+        outEnable &= 0xfffffffd;
+        outEnable |= (tmp & 0x2);
+    } /*End of WA*/
+
+	for (i = 0 ; i < 32 ;i++)
+	{
+		if (((mask & (1 << i)) & (outEnable & (1 << i))) != (mask & (1 << i)))
+		{
+			mvOsPrintf("mvGppValueSet: Err. An attempt to set output "\
+					   "value to GPP %d in input mode.\n", i);
+			return MV_ERROR;
+		}
+	}
+
+	gppRegSet(group, GPP_DATA_OUT_REG(group), mask, value);
+
+	return MV_OK;
+
+}
+/*******************************************************************************
+* gppRegSet - Set a specific GPP pin on a specific GPP register
+*
+* DESCRIPTION:
+*       This function set a specific GPP pin on a specific GPP register
+*
+* INPUT:
+*		regOffs - GPP Register offset
+*       group - GPP group number
+*       mask  - 32bit mask value. Each set bit in the mask means that the
+*               value of corresponding GPP will be set accordingly. Other GPP
+*               are not affected.
+*       value - 32bit value that describes GPP value per pin.
+*
+* OUTPUT:
+*       None.
+*
+* EXAMPLE:
+*       Set GPP8 value of '0' and GPP15 value of '1'.
+*       mvGppActiveSet(0, (GPP8 | GPP15), ((0 & GPP8) | (1 & GPP15)) );
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+static MV_VOID gppRegSet (MV_U32 group, MV_U32 regOffs,MV_U32 mask,MV_U32 value)
+{
+	MV_U32 gppData;
+
+	gppData = MV_REG_READ(regOffs);
+
+	gppData &= ~mask;
+
+	gppData |= (value & mask);
+
+	MV_REG_WRITE(regOffs, gppData);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h
new file mode 100644
index 000000000000..82603d7e027f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGpp.h
@@ -0,0 +1,117 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvGppH
+#define __INCmvGppH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "gpp/mvGppRegs.h"
+
+/* These macros describes the GPP type. Each of the GPPs pins can        	*/
+/* be assigned to act as a general purpose input or output pin.             */
+#define	MV_GPP_IN	0xFFFFFFFF      			/* GPP input   */
+#define MV_GPP_OUT  0    						/* GPP output  */
+
+
+/* These macros describes the GPP Out Enable. */
+#define	MV_GPP_OUT_DIS	0xFFFFFFFF         	/* Out pin disabled*/
+#define MV_GPP_OUT_EN   0    			 	/* Out pin enabled*/
+
+/* These macros describes the GPP Out Blinking. */
+/* When set and the corresponding bit in GPIO Data Out Enable Control 		*/
+/* Register is enabled, the GPIO pin blinks every ~100 ms (a period of		*/
+/* 2^24 TCLK clocks).														*/
+#define	MV_GPP_OUT_BLINK	0xFFFFFFFF         	/* Out pin blinking*/
+#define MV_GPP_OUT_STATIC   0    			 	/* Out pin static*/
+
+
+/* These macros describes the GPP Polarity. */
+/* When set to 1 GPIO Data In Register reflects the inverted value of the	*/
+/* corresponding pin.														*/
+
+#define	MV_GPP_IN_INVERT	0xFFFFFFFF         	/* Inverted value is got*/
+#define MV_GPP_IN_ORIGIN    0    			 	/* original value is got*/
+
+/* mvGppTypeSet - Set PP pin mode (IN or OUT) */
+MV_STATUS mvGppTypeSet(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppBlinkEn - Set a GPP (IN) Pin list to blink every ~100ms */
+MV_STATUS mvGppBlinkEn(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppPolaritySet - Set a GPP (IN) Pin list Polarity mode. */
+MV_STATUS mvGppPolaritySet(MV_U32 group, MV_U32 mask, MV_U32 value);
+
+/* mvGppPolarityGet - Get the Polarity of a GPP Pin */
+MV_U32  mvGppPolarityGet(MV_U32 group, MV_U32 mask);
+
+/* mvGppValueGet - Get a GPP Pin list value.*/
+MV_U32 mvGppValueGet(MV_U32 group, MV_U32 mask);
+
+
+/* mvGppValueSet - Set a GPP Pin list value. */
+MV_STATUS mvGppValueSet (MV_U32 group, MV_U32 mask, MV_U32 value);
+
+#endif /* #ifndef __INCmvGppH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h
new file mode 100644
index 000000000000..14b199fa28e3
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/gpp/mvGppRegs.h
@@ -0,0 +1,116 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvGppRegsH
+#define __INCmvGppRegsH
+
+#define    MV_GPP0  BIT0
+#define    MV_GPP1  BIT1
+#define    MV_GPP2  BIT2
+#define    MV_GPP3  BIT3
+#define    MV_GPP4  BIT4
+#define    MV_GPP5  BIT5
+#define    MV_GPP6  BIT6
+#define    MV_GPP7  BIT7
+#define    MV_GPP8  BIT8
+#define    MV_GPP9  BIT9
+#define    MV_GPP10 BIT10
+#define    MV_GPP11 BIT11
+#define    MV_GPP12 BIT12
+#define    MV_GPP13 BIT13
+#define    MV_GPP14 BIT14
+#define    MV_GPP15 BIT15
+#define    MV_GPP16 BIT16
+#define    MV_GPP17 BIT17
+#define    MV_GPP18 BIT18
+#define    MV_GPP19 BIT19
+#define    MV_GPP20 BIT20
+#define    MV_GPP21 BIT21
+#define    MV_GPP22 BIT22
+#define    MV_GPP23 BIT23
+#define    MV_GPP24 BIT24
+#define    MV_GPP25 BIT25
+#define    MV_GPP26 BIT26
+#define    MV_GPP27 BIT27
+#define    MV_GPP28 BIT28
+#define    MV_GPP29 BIT29
+#define    MV_GPP30 BIT30
+#define    MV_GPP31 BIT31
+
+
+/* registers offsets */
+
+#define GPP_DATA_OUT_REG(grp)			((grp == 0) ? 0x10100 : 0x10140)
+#define GPP_DATA_OUT_EN_REG(grp)		((grp == 0) ? 0x10104 : 0x10144)
+#define GPP_BLINK_EN_REG(grp)			((grp == 0) ? 0x10108 : 0x10148)
+#define GPP_DATA_IN_POL_REG(grp)		((grp == 0) ? 0x1010C : 0x1014c)
+#define GPP_DATA_IN_REG(grp)			((grp == 0) ? 0x10110 : 0x10150)
+#define GPP_INT_CAUSE_REG(grp)			((grp == 0) ? 0x10114 : 0x10154)
+#define GPP_INT_MASK_REG(grp)			((grp == 0) ? 0x10118 : 0x10158)
+#define GPP_INT_LVL_REG(grp)			((grp == 0) ? 0x1011c : 0x1015c)
+
+#define GPP_DATA_OUT_SET_REG			0x10120
+#define GPP_DATA_OUT_CLEAR_REG			0x10124
+
+#endif /* #ifndef __INCmvGppRegsH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c
new file mode 100644
index 000000000000..ca2710c13218
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.c
@@ -0,0 +1,666 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPciIf.h"
+#include "ctrlEnv/sys/mvSysPex.h"
+
+#if defined(MV_INCLUDE_PCI)
+#include "ctrlEnv/sys/mvSysPci.h"
+#endif
+
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvPciInit - Initialize PCI interfaces
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+
+
+MV_STATUS mvPciIfInit(MV_U32 pciIf, PCI_IF_MODE pciIfmode)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+
+        MV_PCI_MOD pciMod;
+
+        if (PCI_IF_MODE_HOST == pciIfmode)
+        {
+            pciMod = MV_PCI_MOD_HOST;
+        }
+        else if (PCI_IF_MODE_DEVICE == pciIfmode)
+        {
+            pciMod = MV_PCI_MOD_DEVICE;
+        }
+        else
+        {
+            mvOsPrintf("%s: ERROR!!! Bus %d mode %d neither host nor device!\n",
+                        __FUNCTION__, pciIf, pciIfmode);
+            return MV_FAIL;
+        }
+
+        return mvPciInit(pciIf - MV_PCI_START_IF, pciMod);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+
+        MV_PEX_TYPE pexType;
+
+        if (PCI_IF_MODE_HOST == pciIfmode)
+        {
+            pexType = MV_PEX_ROOT_COMPLEX;
+        }
+        else if (PCI_IF_MODE_DEVICE == pciIfmode)
+        {
+            pexType = MV_PEX_END_POINT;
+        }
+        else
+        {
+            mvOsPrintf("%s: ERROR!!! Bus %d type %d neither root complex nor" \
+                       " end point\n", __FUNCTION__, pciIf, pciIfmode);
+            return MV_FAIL;
+        }
+		return mvPexInit(pciIf - MV_PEX_START_IF, pexType);
+
+		#else
+		return MV_OK;
+		#endif
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+/* PCI configuration space read write */
+
+/*******************************************************************************
+* mvPciConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit read from PCI configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to read from local bus segment, use
+*       bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       bus     - PCI segment bus number.
+*       dev     - PCI device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPciIfConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciConfigRead(pciIf - MV_PCI_START_IF,
+								bus,
+								dev,
+                                func,
+								regOff);
+		#else
+		return 0xffffffff;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexConfigRead(pciIf - MV_PEX_START_IF,
+								bus,
+								dev,
+                                func,
+								regOff);
+		#else
+		return 0xffffffff;
+		#endif
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return 0;
+
+}
+
+/*******************************************************************************
+* mvPciConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit write to PCI configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to write to local bus segment, use
+*       bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       bus     - PCI segment bus number.
+*       dev     - PCI device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*       data    - 32bit data.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciConfigWrite(pciIf - MV_PCI_START_IF,
+								bus,
+								dev,
+                                func,
+								regOff,
+								data);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexConfigWrite(pciIf - MV_PEX_START_IF,
+								bus,
+								dev,
+                                func,
+								regOff,
+								data);
+		#else
+		return MV_OK;
+		#endif
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciMasterEnable - Enable/disale PCI interface master transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PCI command status
+*       (offset 0x4) to set/reset bit 2. After this bit is set, the PCI
+*       master is allowed to gain ownership on the bus, otherwise it is
+*       incapable to do so.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfMasterEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciMasterEnable(pciIf - MV_PCI_START_IF,
+								enable);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexMasterEnable(pciIf - MV_PEX_START_IF,
+								enable);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+
+/*******************************************************************************
+* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PCI command status
+*       (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+*       the PCI slave is allowed to respond to PCI IO space access (bit 0)
+*       and PCI memory space access (bit 1).
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       dev     - PCI device number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfSlaveEnable(MV_U32 pciIf,MV_U32 bus, MV_U32 dev, MV_BOOL enable)
+{
+
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciSlaveEnable(pciIf - MV_PCI_START_IF,bus,dev,
+								enable);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexSlaveEnable(pciIf - MV_PEX_START_IF,bus,dev,
+								enable);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumSet - Set PCI interface local bus number.
+*
+* DESCRIPTION:
+*       This function sets given PCI interface its local bus number.
+*       Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       busNum - Bus number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PCI interface is PCI-X.
+*		MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciLocalBusNumSet(pciIf - MV_PCI_START_IF,
+								busNum);
+		#else
+		return MV_OK;
+		#endif
+    }
+    else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexLocalBusNumSet(pciIf - MV_PEX_START_IF,
+								busNum);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumGet - Get PCI interface local bus number.
+*
+* DESCRIPTION:
+*       This function gets the local bus number of a given PCI interface.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciIfLocalBusNumGet(MV_U32 pciIf)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciLocalBusNumGet(pciIf - MV_PCI_START_IF);
+		#else
+		return 0xFFFFFFFF;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexLocalBusNumGet(pciIf - MV_PEX_START_IF);
+		#else
+		return 0xFFFFFFFF;
+		#endif
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n",__FUNCTION__, pciIf);
+	}
+
+	return 0;
+
+}
+
+
+/*******************************************************************************
+* mvPciLocalDevNumSet - Set PCI interface local device number.
+*
+* DESCRIPTION:
+*       This function sets given PCI interface its local device number.
+*       Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       devNum - Device number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PCI interface is PCI-X. MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciIfLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciLocalDevNumSet(pciIf - MV_PCI_START_IF,
+								devNum);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexLocalDevNumSet(pciIf - MV_PEX_START_IF,
+								devNum);
+		#else
+		return MV_OK;
+		#endif
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return MV_FAIL;
+
+}
+
+/*******************************************************************************
+* mvPciLocalDevNumGet - Get PCI interface local device number.
+*
+* DESCRIPTION:
+*       This function gets the local device number of a given PCI interface.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciIfLocalDevNumGet(MV_U32 pciIf)
+{
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PCI)
+		return mvPciLocalDevNumGet(pciIf - MV_PCI_START_IF);
+		#else
+		return 0xFFFFFFFF;
+		#endif
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		#if defined(MV_INCLUDE_PEX)
+		return mvPexLocalDevNumGet(pciIf - MV_PEX_START_IF);
+		#else
+		return 0xFFFFFFFF;
+		#endif
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return 0;
+
+}
+
+/*******************************************************************************
+* mvPciIfTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+PCI_IF_TYPE mvPciIfTypeGet(MV_U32 pciIf)
+{
+
+	if ((pciIf >= MV_PCI_START_IF)&&(pciIf < MV_PCI_MAX_IF + MV_PCI_START_IF))
+	{
+		return PCI_IF_TYPE_CONVEN_PCIX;
+	}
+	else if ((pciIf >= MV_PEX_START_IF) &&
+			 (pciIf < MV_PEX_MAX_IF + MV_PEX_START_IF))
+	{
+		return PCI_IF_TYPE_PEX;
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return 0xffffffff;
+
+}
+
+/*******************************************************************************
+* mvPciIfTypeGet -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+
+MV_U32  mvPciRealIfNumGet(MV_U32 pciIf)
+{
+
+	PCI_IF_TYPE pciIfType = mvPciIfTypeGet(pciIf);
+
+	if (PCI_IF_TYPE_CONVEN_PCIX == pciIfType)
+	{
+		return (pciIf - MV_PCI_START_IF);
+	}
+	else if (PCI_IF_TYPE_PEX == pciIfType)
+	{
+		return (pciIf - MV_PEX_START_IF);
+
+	}
+	else
+	{
+		mvOsPrintf("%s: ERROR!!! Invalid pciIf %d\n", __FUNCTION__, pciIf);
+	}
+
+	return 0xffffffff;
+
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h
new file mode 100644
index 000000000000..bfce3b7fb91c
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIf.h
@@ -0,0 +1,133 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIIFH
+#define __INCPCIIFH
+
+#include "mvSysHwConfig.h"
+#include "pci-if/mvPciIfRegs.h"
+#if defined(MV_INCLUDE_PEX)
+#include "pex/mvPex.h"
+#endif
+#if defined(MV_INCLUDE_PCI)
+#include "pci/mvPci.h"
+#endif
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvAddrDec.h"
+
+typedef enum _mvPCIIfType
+{
+	PCI_IF_TYPE_CONVEN_PCIX,
+	PCI_IF_TYPE_PEX
+
+}PCI_IF_TYPE;
+
+typedef enum _mvPCIIfMode
+{
+	PCI_IF_MODE_HOST,
+	PCI_IF_MODE_DEVICE
+}PCI_IF_MODE;
+
+
+/* Global Functions prototypes */
+
+/* mvPciIfInit - Initialize PCI interfaces*/
+MV_STATUS mvPciIfInit(MV_U32 pciIf, PCI_IF_MODE pciIfmode);
+
+/* mvPciIfConfigRead - Read from configuration space */
+MV_U32 mvPciIfConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+						MV_U32 func,MV_U32 regOff);
+
+/* mvPciIfConfigWrite - Write to configuration space */
+MV_STATUS mvPciIfConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPciIfMasterEnable - Enable/disale PCI interface master transactions.*/
+MV_STATUS mvPciIfMasterEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciIfSlaveEnable - Enable/disale PCI interface slave transactions.*/
+MV_STATUS mvPciIfSlaveEnable(MV_U32 pciIf,MV_U32 bus, MV_U32 dev,
+							 MV_BOOL enable);
+
+/* mvPciIfLocalBusNumSet - Set PCI interface local bus number.*/
+MV_STATUS mvPciIfLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum);
+
+/* mvPciIfLocalBusNumGet - Get PCI interface local bus number.*/
+MV_U32 mvPciIfLocalBusNumGet(MV_U32 pciIf);
+
+/* mvPciIfLocalDevNumSet - Set PCI interface local device number.*/
+MV_STATUS mvPciIfLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum);
+
+/* mvPciIfLocalDevNumGet - Get PCI interface local device number.*/
+MV_U32 mvPciIfLocalDevNumGet(MV_U32 pciIf);
+
+/* mvPciIfTypeGet - Get PCI If type*/
+PCI_IF_TYPE mvPciIfTypeGet(MV_U32 pciIf);
+
+MV_U32  mvPciRealIfNumGet(MV_U32 pciIf);
+
+/* mvPciIfAddrDecShow - Display address decode windows attributes */
+MV_VOID mvPciIfAddrDecShow(MV_VOID);
+
+#endif /* #ifndef __INCPCIIFH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h
new file mode 100644
index 000000000000..3df6198ef845
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/mvPciIfRegs.h
@@ -0,0 +1,244 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIIFREGSH
+#define __INCPCIIFREGSH
+
+
+/* defines */
+#define MAX_PCI_DEVICES         32
+#define MAX_PCI_FUNCS           8
+#define MAX_PCI_BUSSES          128
+
+/***************************************/
+/* PCI Configuration registers */
+/***************************************/
+
+/*********************************************/
+/* PCI Configuration, Function 0, Registers  */
+/*********************************************/
+
+
+/* Standard registers */
+#define PCI_DEVICE_AND_VENDOR_ID					0x000
+#define PCI_STATUS_AND_COMMAND						0x004
+#define PCI_CLASS_CODE_AND_REVISION_ID			    0x008
+#define PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE		0x00C
+#define PCI_MEMORY_BAR_BASE_ADDR(barNum)		 	(0x010 + ((barNum) << 2))
+#define PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID		 	0x02C
+#define PCI_EXPANSION_ROM_BASE_ADDR_REG			    0x030
+#define PCI_CAPABILTY_LIST_POINTER			        0x034
+#define PCI_INTERRUPT_PIN_AND_LINE					0x03C
+
+
+/* PCI Device and Vendor ID Register (PDVIR) */
+#define PDVIR_VEN_ID_OFFS			0 	/* Vendor ID */
+#define PDVIR_VEN_ID_MASK			(0xffff << PDVIR_VEN_ID_OFFS)
+
+#define PDVIR_DEV_ID_OFFS			16	/* Device ID */
+#define PDVIR_DEV_ID_MASK  			(0xffff << PDVIR_DEV_ID_OFFS)
+
+/* PCI Status and Command Register (PSCR) */
+#define PSCR_IO_EN			BIT0 	/* IO Enable 							  */
+#define PSCR_MEM_EN			BIT1	/* Memory Enable 						  */
+#define PSCR_MASTER_EN		BIT2	/* Master Enable 						  */
+#define PSCR_SPECIAL_EN		BIT3	/* Special Cycle Enable 				  */
+#define PSCR_MEM_WRI_INV	BIT4	/* Memory Write and Invalidate Enable	  */
+#define PSCR_VGA			BIT5	/* VGA Palette Snoops 					  */
+#define PSCR_PERR_EN		BIT6	/* Parity Errors Respond Enable 		  */
+#define PSCR_ADDR_STEP   	BIT7    /* Address Stepping Enable (Wait Cycle En)*/
+#define PSCR_SERR_EN		BIT8	/* Ability to assert SERR# line			  */
+#define PSCR_FAST_BTB_EN	BIT9	/* generate fast back-to-back transactions*/
+#define PSCR_CAP_LIST		BIT20	/* Capability List Support 				  */
+#define PSCR_66MHZ_EN		BIT21   /* 66 MHz Capable 						  */
+#define PSCR_UDF_EN			BIT22   /* User definable features 				  */
+#define PSCR_TAR_FAST_BB 	BIT23   /* fast back-to-back transactions capable */
+#define PSCR_DATA_PERR		BIT24   /* Data Parity reported 				  */
+
+#define PSCR_DEVSEL_TIM_OFFS 	25  /* DEVSEL timing */
+#define PSCR_DEVSEL_TIM_MASK 	(0x3 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_FAST	(0x0 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_MED 	(0x1 << PSCR_DEVSEL_TIM_OFFS)
+#define PSCR_DEVSEL_TIM_SLOW 	(0x2 << PSCR_DEVSEL_TIM_OFFS)
+
+#define PSCR_SLAVE_TABORT	BIT27	/* Signalled Target Abort 	*/
+#define PSCR_MASTER_TABORT	BIT28	/* Recieved Target Abort 	*/
+#define PSCR_MABORT			BIT29	/* Recieved Master Abort 	*/
+#define PSCR_SYSERR			BIT30	/* Signalled system error 	*/
+#define PSCR_DET_PARERR		BIT31	/* Detect Parity Error 		*/
+
+/* 	PCI configuration register offset=0x08 fields
+	(PCI_CLASS_CODE_AND_REVISION_ID)(PCCRI) 				*/
+
+#define PCCRIR_REVID_OFFS		0		/* Revision ID */
+#define PCCRIR_REVID_MASK		(0xff << PCCRIR_REVID_OFFS)
+
+#define PCCRIR_FULL_CLASS_OFFS	8		/* Full Class Code */
+#define PCCRIR_FULL_CLASS_MASK	(0xffffff << PCCRIR_FULL_CLASS_OFFS)
+
+#define PCCRIR_PROGIF_OFFS		8		/* Prog .I/F*/
+#define PCCRIR_PROGIF_MASK		(0xff << PCCRIR_PROGIF_OFFS)
+
+#define PCCRIR_SUB_CLASS_OFFS	16		/* Sub Class*/
+#define PCCRIR_SUB_CLASS_MASK	(0xff << PCCRIR_SUB_CLASS_OFFS)
+
+#define PCCRIR_BASE_CLASS_OFFS	24		/* Base Class*/
+#define PCCRIR_BASE_CLASS_MASK	(0xff << PCCRIR_BASE_CLASS_OFFS)
+
+/* 	PCI configuration register offset=0x0C fields
+	(PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE)(PBHTLTCL) 				*/
+
+#define PBHTLTCLR_CACHELINE_OFFS		0	/* Specifies the cache line size */
+#define PBHTLTCLR_CACHELINE_MASK		(0xff << PBHTLTCLR_CACHELINE_OFFS)
+
+#define PBHTLTCLR_LATTIMER_OFFS			8	/* latency timer */
+#define PBHTLTCLR_LATTIMER_MASK			(0xff << PBHTLTCLR_LATTIMER_OFFS)
+
+#define PBHTLTCLR_HEADTYPE_FULL_OFFS	16	/* Full Header Type */
+#define PBHTLTCLR_HEADTYPE_FULL_MASK	(0xff << PBHTLTCLR_HEADTYPE_FULL_OFFS)
+
+#define PBHTLTCLR_MULTI_FUNC			BIT23	/* Multi/Single function */
+
+#define PBHTLTCLR_HEADER_OFFS			16		/* Header type */
+#define PBHTLTCLR_HEADER_MASK			(0x7f << PBHTLTCLR_HEADER_OFFS)
+#define PBHTLTCLR_HEADER_STANDARD		(0x0 << PBHTLTCLR_HEADER_OFFS)
+#define PBHTLTCLR_HEADER_PCI2PCI_BRIDGE	(0x1 << PBHTLTCLR_HEADER_OFFS)
+
+
+#define PBHTLTCLR_BISTCOMP_OFFS		24	/* BIST Completion Code */
+#define PBHTLTCLR_BISTCOMP_MASK		(0xf << PBHTLTCLR_BISTCOMP_OFFS)
+
+#define PBHTLTCLR_BISTACT			BIT30	/* BIST Activate bit */
+#define PBHTLTCLR_BISTCAP			BIT31	/* BIST Capable Bit */
+
+
+/* PCI Bar Base Low Register (PBBLR) */
+#define PBBLR_IOSPACE			BIT0	/* Memory Space Indicator */
+
+#define PBBLR_TYPE_OFFS			1	   /* BAR Type/Init Val. */
+#define PBBLR_TYPE_MASK			(0x3 << PBBLR_TYPE_OFFS)
+#define PBBLR_TYPE_32BIT_ADDR	(0x0 << PBBLR_TYPE_OFFS)
+#define PBBLR_TYPE_64BIT_ADDR	(0x2 << PBBLR_TYPE_OFFS)
+
+#define PBBLR_PREFETCH_EN		BIT3 	/* Prefetch Enable */
+
+
+#define PBBLR_MEM_BASE_OFFS		4	/* Memory Bar Base address. Corresponds to
+									address bits [31:4] */
+#define PBBLR_MEM_BASE_MASK		(0xfffffff << PBBLR_MEM_BASE_OFFS)
+
+#define PBBLR_IO_BASE_OFFS		2	/* IO Bar Base address. Corresponds to
+										address bits [31:2] */
+#define PBBLR_IO_BASE_MASK		(0x3fffffff << PBBLR_IO_BASE_OFFS)
+
+
+#define PBBLR_BASE_OFFS			12		/* Base address. Address bits [31:12] */
+#define PBBLR_BASE_MASK			(0xfffff << PBBLR_BASE_OFFS)
+#define PBBLR_BASE_ALIGNMET		(1 << PBBLR_BASE_OFFS)
+
+
+/* PCI Bar Base High Fegister (PBBHR) */
+#define PBBHR_BASE_OFFS			0		/* Base address. Address bits [31:12] */
+#define PBBHR_BASE_MASK			(0xffffffff << PBBHR_BASE_OFFS)
+
+
+/* 	PCI configuration register offset=0x2C fields
+	(PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID)(PSISVI) 				*/
+
+#define PSISVIR_VENID_OFFS	0	/* Subsystem Manufacturer Vendor ID Number */
+#define PSISVIR_VENID_MASK	(0xffff << PSISVIR_VENID_OFFS)
+
+#define PSISVIR_DEVID_OFFS	16	/* Subsystem Device ID Number */
+#define PSISVIR_DEVID_MASK	(0xffff << PSISVIR_DEVID_OFFS)
+
+/* 	PCI configuration register offset=0x30 fields
+	(PCI_EXPANSION_ROM_BASE_ADDR_REG)(PERBA) 				*/
+
+#define PERBAR_EXPROMEN		BIT0	/* Expansion ROM Enable */
+
+#define PERBAR_BASE_OFFS		12		/* Expansion ROM Base Address */
+#define PERBAR_BASE_MASK		(0xfffff << PERBAR_BASE_OFFS)
+
+/* 	PCI configuration register offset=0x34 fields
+	(PCI_CAPABILTY_LIST_POINTER)(PCLP) 				*/
+
+#define PCLPR_CAPPTR_OFFS	0		/* Capability List Pointer */
+#define PCLPR_CAPPTR_MASK	(0xff << PCLPR_CAPPTR_OFFS)
+
+/* 	PCI configuration register offset=0x3C fields
+	(PCI_INTERRUPT_PIN_AND_LINE)(PIPL) 				*/
+
+#define PIPLR_INTLINE_OFFS	0	/* Interrupt line (IRQ) */
+#define PIPLR_INTLINE_MASK	(0xff << PIPLR_INTLINE_OFFS)
+
+#define PIPLR_INTPIN_OFFS	8	/* interrupt pin (A,B,C,D) */
+#define PIPLR_INTPIN_MASK	(0xff << PIPLR_INTPIN_OFFS)
+
+#define PIPLR_MINGRANT_OFFS	16	/* Minimum Grant on 250 nano seconds units */
+#define PIPLR_MINGRANT_MASK	(0xff << PIPLR_MINGRANT_OFFS)
+
+#define PIPLR_MAXLATEN_OFFS	24	/* Maximum latency on 250 nano seconds units */
+#define PIPLR_MAXLATEN_MASK	(0xff << PIPLR_MAXLATEN_OFFS)
+
+#endif /* #ifndef __INCPCIIFREGSH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c
new file mode 100644
index 000000000000..1bc2b222daff
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.c
@@ -0,0 +1,1003 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/* includes */
+#include "mvPciUtils.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* #define MV_DEBUG */
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+	#define mvOsPrintf printf
+#else
+	#define DB(x)
+#endif
+
+/*
+This module only support scanning of Header type 00h of pci devices
+There is no suppotr for Header type 01h of pci devices  ( PCI bridges )
+*/
+
+
+static MV_STATUS pciDetectDevice(MV_U32 pciIf,
+								 MV_U32 bus,
+								 MV_U32 dev,
+								 MV_U32 func,
+								 MV_PCI_DEVICE *pPciAgent);
+
+static MV_U32 pciDetectDeviceBars(MV_U32 pciIf,
+									MV_U32 bus,
+									MV_U32 dev,
+									MV_U32 func,
+									MV_PCI_DEVICE *pPciAgent);
+
+
+
+
+
+
+/*******************************************************************************
+* mvPciScan - Scan a PCI interface bus
+*
+* DESCRIPTION:
+* Performs a full scan on a PCI interface and returns all possible details
+* on the agents found on the bus.
+*
+* INPUT:
+*       pciIf       - PCI Interface
+*       pPciAgents 	- Pointer to an Array of the pci agents to be detected
+*		pPciAgentsNum - pPciAgents array maximum number of elements
+*
+* OUTPUT:
+*       pPciAgents - Array of the pci agents detected on the bus
+*		pPciAgentsNum - Number of pci agents detected on the bus
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+
+MV_STATUS mvPciScan(MV_U32 pciIf,
+					MV_PCI_DEVICE *pPciAgents,
+					MV_U32 *pPciAgentsNum)
+{
+
+	MV_U32 devIndex,funcIndex=0,busIndex=0,detectedDevNum=0;
+    MV_U32 localBus=mvPciIfLocalBusNumGet(pciIf);
+    MV_PCI_DEVICE *pPciDevice;
+	MV_PCI_DEVICE *pMainDevice;
+
+	DB(mvOsPrintf("mvPciScan: PCI interface num %d\n", pciIf));
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPexMaxIfGet())
+	{
+		DB(mvOsPrintf("mvPciScan: ERR. Invalid PCI interface num %d\n", pciIf));
+		return MV_BAD_PARAM;
+	}
+	if (NULL == pPciAgents)
+	{
+		DB(mvOsPrintf("mvPciScan: ERR. pPciAgents=NULL \n"));
+		return MV_BAD_PARAM;
+	}
+	if (NULL == pPciAgentsNum)
+	{
+		DB(mvOsPrintf("mvPciScan: ERR. pPciAgentsNum=NULL \n"));
+		return MV_BAD_PARAM;
+	}
+
+
+	DB(mvOsPrintf("mvPciScan: PCI interface num %d mvPciMasterEnable\n", pciIf));
+	/* Master enable the MV PCI master */
+	if (MV_OK != mvPciIfMasterEnable(pciIf,MV_TRUE))
+	{
+		DB(mvOsPrintf("mvPciScan: ERR. mvPciMasterEnable failed  \n"));
+		return MV_ERROR;
+
+	}
+
+	DB(mvOsPrintf("mvPciScan: PCI interface num scan%d\n", pciIf));
+
+	/* go through all busses */
+	for (busIndex=localBus ; busIndex < MAX_PCI_BUSSES ; busIndex++)
+	{
+		/* go through all possible devices on the local bus */
+		for (devIndex=0 ; devIndex < MAX_PCI_DEVICES ; devIndex++)
+		{
+			/* always start with function equal to zero */
+			funcIndex=0;
+
+			pPciDevice=&pPciAgents[detectedDevNum];
+			DB(mvOsPrintf("mvPciScan: PCI interface num scan%d:%d\n", busIndex, devIndex));
+
+			if (MV_ERROR == pciDetectDevice(pciIf,
+										   busIndex,
+										   devIndex,
+										   funcIndex,
+										   pPciDevice))
+			{
+				/* no device detected , try the next address */
+				continue;
+			}
+
+			/* We are here ! means we have detected a device*/
+			/* always we start with only one function per device */
+			pMainDevice = pPciDevice;
+			pPciDevice->funtionsNum = 1;
+
+
+			/* move on */
+			detectedDevNum++;
+
+
+			/* check if we have no more room for a new device */
+			if (detectedDevNum == *pPciAgentsNum)
+			{
+				DB(mvOsPrintf("mvPciScan: ERR. array passed too small \n"));
+				return MV_ERROR;
+			}
+
+			/* check the detected device if it is a multi functional device then
+			scan all device functions*/
+			if (pPciDevice->isMultiFunction == MV_TRUE)
+			{
+				/* start with function number 1 because we have already detected
+				function 0 */
+				for (funcIndex=1; funcIndex<MAX_PCI_FUNCS ; funcIndex++)
+				{
+					pPciDevice=&pPciAgents[detectedDevNum];
+
+					if (MV_ERROR == pciDetectDevice(pciIf,
+												   busIndex,
+												   devIndex,
+												   funcIndex,
+												   pPciDevice))
+					{
+						/* no device detected means no more functions !*/
+						continue;
+					}
+					/* We are here ! means we have detected a device */
+
+					/* move on */
+					pMainDevice->funtionsNum++;
+					detectedDevNum++;
+
+					/* check if we have no more room for a new device */
+					if (detectedDevNum == *pPciAgentsNum)
+					{
+						DB(mvOsPrintf("mvPciScan: ERR. Array too small\n"));
+						return MV_ERROR;
+					}
+
+
+				}
+			}
+
+		}
+
+	}
+
+	/* return the number of devices actually detected on the bus ! */
+	*pPciAgentsNum = detectedDevNum;
+
+	return MV_OK;
+
+}
+
+
+/*******************************************************************************
+* pciDetectDevice - Detect a pci device parameters
+*
+* DESCRIPTION:
+*	This function detect if a pci agent exist on certain address !
+*   and if exists then it fills all possible information on the
+*   agent
+*
+* INPUT:
+*       pciIf       - PCI Interface
+*		bus		-	Bus number
+*		dev		- 	Device number
+*		func	-	Function number
+*
+*
+*
+* OUTPUT:
+*       pPciAgent - pointer to the pci agent filled with its information
+*
+* RETURN:
+*       MV_ERROR if no device , MV_OK otherwise
+*
+*******************************************************************************/
+
+static MV_STATUS pciDetectDevice(MV_U32 pciIf,
+								 MV_U32 bus,
+								 MV_U32 dev,
+								 MV_U32 func,
+								 MV_PCI_DEVICE *pPciAgent)
+{
+	MV_U32 pciData;
+
+	/* no Parameters checking ! because it is static function and it is assumed
+	that all parameters were checked in the calling function */
+
+
+	/* Try read the PCI Vendor ID and Device ID */
+
+	/*  We will scan only ourselves and the PCI slots that exist on the
+		board, because we may have a case that we have one slot that has
+		a Cardbus connector, and because CardBus answers all IDsels we want
+		to scan only this slot and ourseleves.
+
+	*/
+	#if defined(MV_INCLUDE_PCI)
+	if ((PCI_IF_TYPE_CONVEN_PCIX == mvPciIfTypeGet(pciIf)) &&
+					(DB_88F5181_DDR1_PRPMC != mvBoardIdGet()) &&
+					(DB_88F5181_DDR1_PEXPCI != mvBoardIdGet()) &&
+					(DB_88F5181_DDR1_MNG != mvBoardIdGet()))
+	{
+
+			if (mvBoardIsOurPciSlot(bus, dev) == MV_FALSE)
+			{
+				return MV_ERROR;
+			}
+	}
+	#endif /* defined(MV_INCLUDE_PCI) */
+
+	pciData = mvPciIfConfigRead(pciIf, bus,dev,func, PCI_DEVICE_AND_VENDOR_ID);
+
+	if (PCI_ERROR_CODE == pciData)
+	{
+		/* no device exist */
+		return MV_ERROR;
+	}
+
+	/* we are here ! means a device is detected */
+
+	/* fill basic information */
+	pPciAgent->busNumber=bus;
+	pPciAgent->deviceNum=dev;
+	pPciAgent->function=func;
+
+	/* Fill the PCI Vendor ID and Device ID */
+
+	pPciAgent->venID = (pciData & PDVIR_VEN_ID_MASK) >> PDVIR_VEN_ID_OFFS;
+	pPciAgent->deviceID = (pciData & PDVIR_DEV_ID_MASK) >> PDVIR_DEV_ID_OFFS;
+
+	/* Read Status and command */
+	pciData = mvPciIfConfigRead(pciIf,
+							  bus,dev,func,
+							  PCI_STATUS_AND_COMMAND);
+
+
+	/* Fill related Status and Command information*/
+
+	if (pciData & PSCR_TAR_FAST_BB)
+	{
+		pPciAgent->isFastB2BCapable = MV_TRUE;
+	}
+	else
+	{
+		pPciAgent->isFastB2BCapable = MV_FALSE;
+	}
+
+	if (pciData & PSCR_CAP_LIST)
+	{
+		pPciAgent->isCapListSupport=MV_TRUE;
+	}
+	else
+	{
+		pPciAgent->isCapListSupport=MV_FALSE;
+	}
+
+	if (pciData & PSCR_66MHZ_EN)
+	{
+		pPciAgent->is66MHZCapable=MV_TRUE;
+	}
+	else
+	{
+		pPciAgent->is66MHZCapable=MV_FALSE;
+	}
+
+	/* Read Class Code and Revision */
+	pciData = mvPciIfConfigRead(pciIf,
+							  bus,dev,func,
+							  PCI_CLASS_CODE_AND_REVISION_ID);
+
+
+	pPciAgent->baseClassCode =
+		(pciData & PCCRIR_BASE_CLASS_MASK) >> PCCRIR_BASE_CLASS_OFFS;
+
+	pPciAgent->subClassCode =
+		(pciData & PCCRIR_SUB_CLASS_MASK) >> PCCRIR_SUB_CLASS_OFFS;
+
+	pPciAgent->progIf =
+		(pciData & PCCRIR_PROGIF_MASK) >> PCCRIR_PROGIF_OFFS;
+
+	pPciAgent->revisionID =
+		(pciData & PCCRIR_REVID_MASK) >> PCCRIR_REVID_OFFS;
+
+	/* Read  PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE */
+	pciData = mvPciIfConfigRead(pciIf,
+							  bus,dev,func,
+							  PCI_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE);
+
+
+
+	pPciAgent->pciCacheLine=
+		(pciData & PBHTLTCLR_CACHELINE_MASK ) >> PBHTLTCLR_CACHELINE_OFFS;
+	pPciAgent->pciLatencyTimer=
+		(pciData & PBHTLTCLR_LATTIMER_MASK) >> PBHTLTCLR_LATTIMER_OFFS;
+
+	switch (pciData & PBHTLTCLR_HEADER_MASK)
+	{
+	case PBHTLTCLR_HEADER_STANDARD:
+
+		pPciAgent->pciHeader=MV_PCI_STANDARD;
+		break;
+	case PBHTLTCLR_HEADER_PCI2PCI_BRIDGE:
+
+		pPciAgent->pciHeader=MV_PCI_PCI2PCI_BRIDGE;
+		break;
+
+	}
+
+	if (pciData & PBHTLTCLR_MULTI_FUNC)
+	{
+		pPciAgent->isMultiFunction=MV_TRUE;
+	}
+	else
+	{
+		pPciAgent->isMultiFunction=MV_FALSE;
+	}
+
+	if (pciData & PBHTLTCLR_BISTCAP)
+	{
+		pPciAgent->isBISTCapable=MV_TRUE;
+	}
+	else
+	{
+		pPciAgent->isBISTCapable=MV_FALSE;
+	}
+
+
+	/* read this device pci bars */
+
+	pciDetectDeviceBars(pciIf,
+					  bus,dev,func,
+					 pPciAgent);
+
+
+	/* check if we are bridge*/
+	if ((pPciAgent->baseClassCode == PCI_BRIDGE_CLASS)&&
+		(pPciAgent->subClassCode == P2P_BRIDGE_SUB_CLASS_CODE))
+	{
+
+		/* Read  P2P_BUSSES_NUM */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  P2P_BUSSES_NUM);
+
+		pPciAgent->p2pPrimBusNum =
+			(pciData & PBM_PRIME_BUS_NUM_MASK) >> PBM_PRIME_BUS_NUM_OFFS;
+
+		pPciAgent->p2pSecBusNum =
+			(pciData & PBM_SEC_BUS_NUM_MASK) >> PBM_SEC_BUS_NUM_OFFS;
+
+		pPciAgent->p2pSubBusNum =
+			(pciData & PBM_SUB_BUS_NUM_MASK) >> PBM_SUB_BUS_NUM_OFFS;
+
+		pPciAgent->p2pSecLatencyTimer =
+			(pciData & PBM_SEC_LAT_TMR_MASK) >> PBM_SEC_LAT_TMR_OFFS;
+
+		/* Read  P2P_IO_BASE_LIMIT_SEC_STATUS */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  P2P_IO_BASE_LIMIT_SEC_STATUS);
+
+		pPciAgent->p2pSecStatus =
+			(pciData & PIBLSS_SEC_STATUS_MASK) >> PIBLSS_SEC_STATUS_OFFS;
+
+
+		pPciAgent->p2pIObase =
+			(pciData & PIBLSS_IO_BASE_MASK) << PIBLSS_IO_LIMIT_OFFS;
+
+		/* clear low address (should be zero)*/
+		pPciAgent->p2pIObase &= PIBLSS_HIGH_ADDR_MASK;
+
+		pPciAgent->p2pIOLimit =
+			(pciData & PIBLSS_IO_LIMIT_MASK);
+
+		/* fill low address with 0xfff */
+		pPciAgent->p2pIOLimit |= PIBLSS_LOW_ADDR_MASK;
+
+
+		switch ((pciData & PIBLSS_ADD_CAP_MASK) >> PIBLSS_ADD_CAP_OFFS)
+		{
+		case PIBLSS_ADD_CAP_16BIT:
+
+			pPciAgent->bIO32 = MV_FALSE;
+
+			break;
+		case PIBLSS_ADD_CAP_32BIT:
+
+			pPciAgent->bIO32 = MV_TRUE;
+
+			/* Read  P2P_IO_BASE_LIMIT_UPPER_16 */
+			pciData = mvPciIfConfigRead(pciIf,
+									  bus,dev,func,
+									  P2P_IO_BASE_LIMIT_UPPER_16);
+
+			pPciAgent->p2pIObase |=
+				(pciData & PRBU_IO_UPP_BASE_MASK) << PRBU_IO_UPP_LIMIT_OFFS;
+
+
+			pPciAgent->p2pIOLimit |=
+				(pciData & PRBU_IO_UPP_LIMIT_MASK);
+
+			break;
+
+		}
+
+
+		/* Read  P2P_MEM_BASE_LIMIT */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  P2P_MEM_BASE_LIMIT);
+
+		pPciAgent->p2pMemBase =
+			(pciData & PMBL_MEM_BASE_MASK) << PMBL_MEM_LIMIT_OFFS;
+
+		/* clear low address */
+		pPciAgent->p2pMemBase &= PMBL_HIGH_ADDR_MASK;
+
+		pPciAgent->p2pMemLimit =
+			(pciData & PMBL_MEM_LIMIT_MASK);
+
+		/* add 0xfffff */
+		pPciAgent->p2pMemLimit |= PMBL_LOW_ADDR_MASK;
+
+
+		/* Read  P2P_PREF_MEM_BASE_LIMIT */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  P2P_PREF_MEM_BASE_LIMIT);
+
+
+		pPciAgent->p2pPrefMemBase =
+			(pciData & PRMBL_PREF_MEM_BASE_MASK) << PRMBL_PREF_MEM_LIMIT_OFFS;
+
+		/* get high address only */
+		pPciAgent->p2pPrefMemBase &= PRMBL_HIGH_ADDR_MASK;
+
+
+
+		pPciAgent->p2pPrefMemLimit =
+			(pciData & PRMBL_PREF_MEM_LIMIT_MASK);
+
+		/* add 0xfffff */
+		pPciAgent->p2pPrefMemLimit |= PRMBL_LOW_ADDR_MASK;
+
+		switch (pciData & PRMBL_ADD_CAP_MASK)
+		{
+		case PRMBL_ADD_CAP_32BIT:
+
+			pPciAgent->bPrefMem64 = MV_FALSE;
+
+			/* Read  P2P_PREF_BASE_UPPER_32 */
+			pPciAgent->p2pPrefBaseUpper32Bits = 0;
+
+			/* Read  P2P_PREF_LIMIT_UPPER_32 */
+			pPciAgent->p2pPrefLimitUpper32Bits = 0;
+
+			break;
+		case PRMBL_ADD_CAP_64BIT:
+
+			pPciAgent->bPrefMem64 = MV_TRUE;
+
+			/* Read  P2P_PREF_BASE_UPPER_32 */
+			pPciAgent->p2pPrefBaseUpper32Bits = mvPciIfConfigRead(pciIf,
+									  bus,dev,func,
+									  P2P_PREF_BASE_UPPER_32);
+
+			/* Read  P2P_PREF_LIMIT_UPPER_32 */
+			pPciAgent->p2pPrefLimitUpper32Bits = mvPciIfConfigRead(pciIf,
+									  bus,dev,func,
+									  P2P_PREF_LIMIT_UPPER_32);
+
+			break;
+
+		}
+
+	}
+	else /* no bridge */
+	{
+		/* Read  PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  PCI_SUBSYS_ID_AND_SUBSYS_VENDOR_ID);
+
+
+		pPciAgent->subSysVenID =
+			(pciData & PSISVIR_VENID_MASK) >> PSISVIR_VENID_OFFS;
+		pPciAgent->subSysID =
+			(pciData & PSISVIR_DEVID_MASK) >> PSISVIR_DEVID_OFFS;
+
+
+		/* Read  PCI_EXPANSION_ROM_BASE_ADDR_REG */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  PCI_EXPANSION_ROM_BASE_ADDR_REG);
+
+
+		if (pciData & PERBAR_EXPROMEN)
+		{
+			pPciAgent->isExpRom = MV_TRUE;
+		}
+		else
+		{
+			pPciAgent->isExpRom = MV_FALSE;
+		}
+
+		pPciAgent->expRomAddr =
+			(pciData & PERBAR_BASE_MASK) >> PERBAR_BASE_OFFS;
+
+	}
+
+
+	if (MV_TRUE == pPciAgent->isCapListSupport)
+	{
+		/* Read  PCI_CAPABILTY_LIST_POINTER */
+		pciData = mvPciIfConfigRead(pciIf,
+								  bus,dev,func,
+								  PCI_CAPABILTY_LIST_POINTER);
+
+		pPciAgent->capListPointer =
+			(pciData & PCLPR_CAPPTR_MASK) >> PCLPR_CAPPTR_OFFS;
+
+	}
+
+	/* Read  PCI_INTERRUPT_PIN_AND_LINE */
+	pciData = mvPciIfConfigRead(pciIf,
+							  bus,dev,func,
+							  PCI_INTERRUPT_PIN_AND_LINE);
+
+
+	pPciAgent->irqLine=
+		(pciData & PIPLR_INTLINE_MASK) >> PIPLR_INTLINE_OFFS;
+
+	pPciAgent->intPin=
+		(MV_PCI_INT_PIN)(pciData & PIPLR_INTPIN_MASK) >> PIPLR_INTPIN_OFFS;
+
+	pPciAgent->minGrant=
+		(pciData & PIPLR_MINGRANT_MASK) >> PIPLR_MINGRANT_OFFS;
+	pPciAgent->maxLatency=
+		(pciData & PIPLR_MAXLATEN_MASK) >> PIPLR_MAXLATEN_OFFS;
+
+	mvPciClassNameGet(pPciAgent->baseClassCode,
+					  (MV_8 *)pPciAgent->type);
+
+	return MV_OK;
+
+
+}
+
+/*******************************************************************************
+* pciDetectDeviceBars - Detect a pci device bars
+*
+* DESCRIPTION:
+*	This function detects all pci agent bars
+*
+* INPUT:
+*       pciIf       - PCI Interface
+*		bus		-	Bus number
+*		dev		- 	Device number
+*		func	-	Function number
+*
+*
+*
+* OUTPUT:
+*       pPciAgent - pointer to the pci agent filled with its information
+*
+* RETURN:
+*       detected bars number
+*
+*******************************************************************************/
+static MV_U32 pciDetectDeviceBars(MV_U32 pciIf,
+									MV_U32 bus,
+									MV_U32 dev,
+									MV_U32 func,
+									MV_PCI_DEVICE *pPciAgent)
+{
+	MV_U32 pciData,barIndex,detectedBar=0;
+	MV_U32 tmpBaseHigh=0,tmpBaseLow=0;
+	MV_U32 pciMaxBars=0;
+
+	pPciAgent->barsNum=0;
+
+	/* check if we are bridge*/
+	if ((pPciAgent->baseClassCode == PCI_BRIDGE_CLASS)&&
+		(pPciAgent->subClassCode == P2P_BRIDGE_SUB_CLASS_CODE))
+	{
+		pciMaxBars = 2;
+	}
+	else /* no bridge */
+	{
+		pciMaxBars = 6;
+	}
+
+	/* read this device pci bars */
+	for (barIndex = 0 ; barIndex < pciMaxBars ; barIndex++ )
+	{
+		/* Read  PCI_MEMORY_BAR_BASE_ADDR */
+		tmpBaseLow = pciData = mvPciIfConfigRead(pciIf,
+									   bus,dev,func,
+						               PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+		pPciAgent->pciBar[detectedBar].barOffset =
+			PCI_MEMORY_BAR_BASE_ADDR(barIndex);
+
+		/* check if the bar is 32bit or 64bit bar */
+		switch (pciData & PBBLR_TYPE_MASK)
+		{
+		case PBBLR_TYPE_32BIT_ADDR:
+			pPciAgent->pciBar[detectedBar].barType = PCI_32BIT_BAR;
+			break;
+		case PBBLR_TYPE_64BIT_ADDR:
+			pPciAgent->pciBar[detectedBar].barType = PCI_64BIT_BAR;
+			break;
+
+		}
+
+		/* check if it is memory or IO bar */
+		if (pciData & PBBLR_IOSPACE)
+		{
+			pPciAgent->pciBar[detectedBar].barMapping=PCI_IO_BAR;
+		}
+		else
+		{
+			pPciAgent->pciBar[detectedBar].barMapping=PCI_MEMORY_BAR;
+		}
+
+		/* if it is memory bar then check if it is prefetchable */
+		if (PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping)
+		{
+			if (pciData & PBBLR_PREFETCH_EN)
+			{
+				pPciAgent->pciBar[detectedBar].isPrefetchable = MV_TRUE;
+			}
+			else
+			{
+				pPciAgent->pciBar[detectedBar].isPrefetchable = MV_FALSE;
+			}
+
+            pPciAgent->pciBar[detectedBar].barBaseLow =
+				pciData & PBBLR_MEM_BASE_MASK;
+
+
+		}
+		else /* IO Bar */
+		{
+			pPciAgent->pciBar[detectedBar].barBaseLow =
+				pciData & PBBLR_IO_BASE_MASK;
+
+		}
+
+		pPciAgent->pciBar[detectedBar].barBaseHigh=0;
+
+		if (PCI_64BIT_BAR == pPciAgent->pciBar[detectedBar].barType)
+		{
+			barIndex++;
+
+			tmpBaseHigh = pPciAgent->pciBar[detectedBar].barBaseHigh =
+				mvPciIfConfigRead(pciIf,
+								bus,dev,func,
+								PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+
+		}
+
+		/* calculating full base address (64bit) */
+		pPciAgent->pciBar[detectedBar].barBaseAddr =
+			(MV_U64)pPciAgent->pciBar[detectedBar].barBaseHigh;
+
+		pPciAgent->pciBar[detectedBar].barBaseAddr <<= 32;
+
+		pPciAgent->pciBar[detectedBar].barBaseAddr |=
+			(MV_U64)pPciAgent->pciBar[detectedBar].barBaseLow;
+
+
+
+		/* get the sizes of the the bar */
+
+		pPciAgent->pciBar[detectedBar].barSizeHigh=0;
+
+		if ((PCI_64BIT_BAR == pPciAgent->pciBar[detectedBar].barType) &&
+			(PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping))
+
+		{
+			/* write oxffffffff to the bar to get the size */
+			/* start with sizelow ( original value was saved in tmpBaseLow ) */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex-1),
+							0xffffffff);
+
+			/* read size */
+			pPciAgent->pciBar[detectedBar].barSizeLow =
+				mvPciIfConfigRead(pciIf,
+								bus,dev,func,
+								PCI_MEMORY_BAR_BASE_ADDR(barIndex-1));
+
+
+
+			/* restore original value */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex-1),
+							tmpBaseLow);
+
+
+			/* now do the same for BaseHigh */
+
+			/* write oxffffffff to the bar to get the size */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+							0xffffffff);
+
+			/* read size */
+			pPciAgent->pciBar[detectedBar].barSizeHigh =
+				mvPciIfConfigRead(pciIf,
+								bus,dev,func,
+								PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+			/* restore original value */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+							tmpBaseHigh);
+
+			if ((0 == pPciAgent->pciBar[detectedBar].barSizeLow)&&
+				(0 == pPciAgent->pciBar[detectedBar].barSizeHigh))
+			{
+				/* this bar is not applicable for this device,
+				   ignore all previous settings and check the next bar*/
+
+				/* we though this was a 64bit bar , and it seems this
+				   was wrong ! so decrement barIndex */
+				barIndex--;
+				continue;
+			}
+
+			/* calculate the full 64 bit size  */
+
+			if (0 != pPciAgent->pciBar[detectedBar].barSizeHigh)
+			{
+				pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+
+				pPciAgent->pciBar[detectedBar].barSizeLow =
+					~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+				pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+
+			}
+			else
+			{
+
+				pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+
+				pPciAgent->pciBar[detectedBar].barSizeLow =
+					~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+				pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+
+			}
+
+
+
+		}
+		else /* 32bit bar */
+		{
+			/* write oxffffffff to the bar to get the size */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+							0xffffffff);
+
+			/* read size */
+			pPciAgent->pciBar[detectedBar].barSizeLow =
+				mvPciIfConfigRead(pciIf,
+								bus,dev,func,
+								PCI_MEMORY_BAR_BASE_ADDR(barIndex));
+
+			if (0 == pPciAgent->pciBar[detectedBar].barSizeLow)
+			{
+				/* this bar is not applicable for this device,
+				   ignore all previous settings and check the next bar*/
+				continue;
+			}
+
+
+			/* restore original value */
+			mvPciIfConfigWrite(pciIf,
+							bus,dev,func,
+							PCI_MEMORY_BAR_BASE_ADDR(barIndex),
+							tmpBaseLow);
+
+		/* calculate size low */
+
+			if (PCI_MEMORY_BAR == pPciAgent->pciBar[detectedBar].barMapping)
+			{
+				pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_MEM_BASE_MASK;
+			}
+			else
+			{
+				pPciAgent->pciBar[detectedBar].barSizeLow &= PBBLR_IO_BASE_MASK;
+			}
+
+			pPciAgent->pciBar[detectedBar].barSizeLow =
+				~pPciAgent->pciBar[detectedBar].barSizeLow + 1;
+
+			pPciAgent->pciBar[detectedBar].barSizeHigh = 0;
+			pPciAgent->pciBar[detectedBar].barSize =
+				(MV_U64)pPciAgent->pciBar[detectedBar].barSizeLow;
+
+
+		}
+
+		/* we are here ! this means we have already detected a bar for
+		this device , now move on */
+
+		detectedBar++;
+		pPciAgent->barsNum++;
+	}
+
+	return detectedBar;
+}
+
+
+/*******************************************************************************
+* mvPciClassNameGet - get PCI  class name
+*
+* DESCRIPTION:
+*		This function returns the PCI class name
+*
+* INPUT:
+*       baseClassCode       - Base Class Code.
+*
+* OUTPUT:
+*       pType - the class name
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciClassNameGet(MV_U32 baseClassCode, MV_8 *pType)
+{
+
+    switch(baseClassCode)
+    {
+        case 0x0:
+            strcpy(pType,"Old generation device");
+            break;
+        case 0x1:
+            strcpy(pType,"Mass storage controller");
+            break;
+        case 0x2:
+            strcpy(pType,"Network controller");
+            break;
+        case 0x3:
+            strcpy(pType,"Display controller");
+            break;
+        case 0x4:
+            strcpy(pType,"Multimedia device");
+            break;
+        case 0x5:
+            strcpy(pType,"Memory controller");
+            break;
+        case 0x6:
+            strcpy(pType,"Bridge Device");
+            break;
+        case 0x7:
+            strcpy(pType,"Simple Communication controllers");
+            break;
+        case 0x8:
+            strcpy(pType,"Base system peripherals");
+            break;
+        case 0x9:
+            strcpy(pType,"Input Devices");
+            break;
+        case 0xa:
+            strcpy(pType,"Docking stations");
+            break;
+        case 0xb:
+            strcpy(pType,"Processors");
+            break;
+        case 0xc:
+            strcpy(pType,"Serial bus controllers");
+            break;
+        case 0xd:
+            strcpy(pType,"Wireless controllers");
+            break;
+        case 0xe:
+            strcpy(pType,"Intelligent I/O controllers");
+            break;
+        case 0xf:
+            strcpy(pType,"Satellite communication controllers");
+            break;
+        case 0x10:
+            strcpy(pType,"Encryption/Decryption controllers");
+            break;
+        case 0x11:
+            strcpy(pType,"Data acquisition and signal processing controllers");
+            break;
+        default:
+            strcpy(pType,"Unknown device");
+            break;
+    }
+
+	return MV_OK;
+
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h
new file mode 100644
index 000000000000..e25520348403
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci-if/pci_util/mvPciUtils.h
@@ -0,0 +1,323 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvPciUtilsh
+#define __INCmvPciUtilsh
+
+/*
+This module only support scanning of Header type 00h of pci devices
+There is no suppotr for Header type 01h of pci devices  ( PCI bridges )
+*/
+
+/* includes */
+#include "mvSysHwConfig.h"
+#include "pci-if/mvPciIf.h"
+#include "pci/mvPciRegs.h"
+
+
+
+/* PCI base address low bar mask */
+#define PCI_ERROR_CODE                      0xffffffff
+
+#define PCI_BRIDGE_CLASS					0x6
+#define P2P_BRIDGE_SUB_CLASS_CODE			0x4
+
+
+#define P2P_BUSSES_NUM						0x18
+#define P2P_IO_BASE_LIMIT_SEC_STATUS		0x1C
+#define P2P_MEM_BASE_LIMIT					0x20
+#define P2P_PREF_MEM_BASE_LIMIT				0x24
+#define P2P_PREF_BASE_UPPER_32				0x28
+#define P2P_PREF_LIMIT_UPPER_32				0x2C
+#define P2P_IO_BASE_LIMIT_UPPER_16			0x30
+#define P2P_EXP_ROM							0x38
+
+/* P2P_BUSSES_NUM  (PBM) */
+
+#define PBM_PRIME_BUS_NUM_OFFS				0
+#define PBM_PRIME_BUS_NUM_MASK				(0xff << PBM_PRIME_BUS_NUM_OFFS)
+
+#define PBM_SEC_BUS_NUM_OFFS				8
+#define PBM_SEC_BUS_NUM_MASK				(0xff << PBM_SEC_BUS_NUM_OFFS)
+
+#define PBM_SUB_BUS_NUM_OFFS				16
+#define PBM_SUB_BUS_NUM_MASK				(0xff << PBM_SUB_BUS_NUM_OFFS)
+
+#define PBM_SEC_LAT_TMR_OFFS				24
+#define PBM_SEC_LAT_TMR_MASK				(0xff << PBM_SEC_LAT_TMR_OFFS)
+
+/* P2P_IO_BASE_LIMIT_SEC_STATUS (PIBLSS) */
+
+#define PIBLSS_IO_BASE_OFFS					0
+#define PIBLSS_IO_BASE_MASK					(0xff << PIBLSS_IO_BASE_OFFS)
+
+#define PIBLSS_ADD_CAP_OFFS					0
+#define PIBLSS_ADD_CAP_MASK 				(0x3 << PIBLSS_ADD_CAP_OFFS)
+#define PIBLSS_ADD_CAP_16BIT 				(0x0 << PIBLSS_ADD_CAP_OFFS)
+#define PIBLSS_ADD_CAP_32BIT 				(0x1 << PIBLSS_ADD_CAP_OFFS)
+
+#define PIBLSS_LOW_ADDR_OFFS				0
+#define PIBLSS_LOW_ADDR_MASK				(0xFFF << PIBLSS_LOW_ADDR_OFFS)
+
+#define PIBLSS_HIGH_ADDR_OFFS				12
+#define PIBLSS_HIGH_ADDR_MASK				(0xF << PIBLSS_HIGH_ADDR_OFFS)
+
+#define PIBLSS_IO_LIMIT_OFFS				8
+#define PIBLSS_IO_LIMIT_MASK				(0xff << PIBLSS_IO_LIMIT_OFFS)
+
+#define PIBLSS_SEC_STATUS_OFFS				16
+#define PIBLSS_SEC_STATUS_MASK				(0xffff << PIBLSS_SEC_STATUS_OFFS)
+
+
+/* P2P_MEM_BASE_LIMIT (PMBL)*/
+
+#define PMBL_MEM_BASE_OFFS					0
+#define PMBL_MEM_BASE_MASK					(0xffff << PMBL_MEM_BASE_OFFS)
+
+#define PMBL_MEM_LIMIT_OFFS					16
+#define PMBL_MEM_LIMIT_MASK					(0xffff << PMBL_MEM_LIMIT_OFFS)
+
+
+#define PMBL_LOW_ADDR_OFFS					0
+#define PMBL_LOW_ADDR_MASK					(0xFFFFF << PMBL_LOW_ADDR_OFFS)
+
+#define PMBL_HIGH_ADDR_OFFS					20
+#define PMBL_HIGH_ADDR_MASK					(0xFFF << PMBL_HIGH_ADDR_OFFS)
+
+
+/* P2P_PREF_MEM_BASE_LIMIT (PRMBL) */
+
+#define PRMBL_PREF_MEM_BASE_OFFS			0
+#define PRMBL_PREF_MEM_BASE_MASK			(0xffff << PRMBL_PREF_MEM_BASE_OFFS)
+
+#define PRMBL_PREF_MEM_LIMIT_OFFS			16
+#define PRMBL_PREF_MEM_LIMIT_MASK			(0xffff<<PRMBL_PREF_MEM_LIMIT_OFFS)
+
+#define PRMBL_LOW_ADDR_OFFS					0
+#define PRMBL_LOW_ADDR_MASK					(0xFFFFF << PRMBL_LOW_ADDR_OFFS)
+
+#define PRMBL_HIGH_ADDR_OFFS				20
+#define PRMBL_HIGH_ADDR_MASK				(0xFFF << PRMBL_HIGH_ADDR_OFFS)
+
+#define PRMBL_ADD_CAP_OFFS					0
+#define PRMBL_ADD_CAP_MASK					(0xf << PRMBL_ADD_CAP_OFFS)
+#define PRMBL_ADD_CAP_32BIT					(0x0 << PRMBL_ADD_CAP_OFFS)
+#define PRMBL_ADD_CAP_64BIT					(0x1 << PRMBL_ADD_CAP_OFFS)
+
+/* P2P_IO_BASE_LIMIT_UPPER_16 (PIBLU) */
+
+#define PRBU_IO_UPP_BASE_OFFS				0
+#define PRBU_IO_UPP_BASE_MASK				(0xffff << PRBU_IO_UPP_BASE_OFFS)
+
+#define PRBU_IO_UPP_LIMIT_OFFS				16
+#define PRBU_IO_UPP_LIMIT_MASK				(0xffff << PRBU_IO_UPP_LIMIT_OFFS)
+
+
+/* typedefs */
+
+typedef enum _mvPciBarMapping
+{
+    PCI_MEMORY_BAR,
+    PCI_IO_BAR,
+    PCI_NO_MAPPING
+}MV_PCI_BAR_MAPPING;
+
+typedef enum _mvPciBarType
+{
+    PCI_32BIT_BAR,
+    PCI_64BIT_BAR
+}MV_PCI_BAR_TYPE;
+
+typedef enum _mvPciIntPin
+{
+    MV_PCI_INTA = 1,
+    MV_PCI_INTB = 2,
+    MV_PCI_INTC = 3,
+    MV_PCI_INTD = 4
+}MV_PCI_INT_PIN;
+
+typedef enum _mvPciHeader
+{
+    MV_PCI_STANDARD,
+    MV_PCI_PCI2PCI_BRIDGE
+
+}MV_PCI_HEADER;
+
+
+/* BAR structure */
+typedef struct _pciBar
+{
+    MV_U32 barOffset;
+    MV_U32 barBaseLow;
+    MV_U32 barBaseHigh;
+    MV_U32 barSizeLow;
+    MV_U32 barSizeHigh;
+    /* The 'barBaseAddr' is a 64-bit variable
+       that will contain the TOTAL base address
+       value achived by combining both the 'barBaseLow'
+       and the 'barBaseHigh' parameters as follows:
+
+       BIT: 63          31         0
+            |           |         |
+            barBaseHigh barBaseLow */
+    MV_U64 barBaseAddr;
+    /* The 'barSize' is a 64-bit variable
+       that will contain the TOTAL size achived
+       by combining both the 'barSizeLow' and
+       the 'barSizeHigh' parameters as follows:
+
+       BIT: 63          31         0
+            |           |         |
+            barSizeHigh barSizeLow
+
+       NOTE: The total size described above
+             is AFTER the size calculation as
+             described in PCI spec rev2.2 */
+    MV_U64 barSize;
+    MV_BOOL            isPrefetchable;
+    MV_PCI_BAR_TYPE       barType;
+    MV_PCI_BAR_MAPPING    barMapping;
+
+
+} PCI_BAR;
+
+/* Device information structure */
+typedef struct _mvPciDevice
+{
+    /* Device specific information */
+	MV_U32			busNumber; 	/* Pci agent bus number */
+    MV_U32			deviceNum;	/* Pci agent device number */
+    MV_U32			function;	/* Pci agent function number */
+
+	MV_U32			venID;		/* Pci agent Vendor ID */
+    MV_U32			deviceID;	/* Pci agent Device ID */
+
+    MV_BOOL			isFastB2BCapable;	/* Capability of Fast Back to Back
+										   transactions */
+	MV_BOOL			isCapListSupport;	/* Support of Capability list */
+	MV_BOOL			is66MHZCapable;		/* 66MHZ support */
+
+    MV_U32			baseClassCode;		/* Pci agent base Class Code */
+    MV_U32			subClassCode;		/* Pci agent sub Class Code */
+    MV_U32			progIf;				/* Pci agent Programing interface */
+	MV_U32			revisionID;
+
+    PCI_BAR			pciBar[6]; 			/* Pci agent bar list */
+
+	MV_U32			p2pPrimBusNum;		/* P2P Primary Bus number*/
+	MV_U32			p2pSecBusNum;		/* P2P Secondary Bus Number*/
+	MV_U32			p2pSubBusNum;		/* P2P Subordinate bus Number */
+	MV_U32			p2pSecLatencyTimer;	/* P2P Econdary Latency Timer*/
+	MV_U32			p2pIObase;			/* P2P IO Base */
+	MV_U32			p2pIOLimit;			/* P2P IO Linit */
+	MV_BOOL			bIO32;
+	MV_U32			p2pSecStatus;		/* P2P Secondary Status */
+	MV_U32			p2pMemBase;			/* P2P Memory Space */
+	MV_U32			p2pMemLimit;		/* P2P Memory Limit*/
+	MV_U32			p2pPrefMemBase;		/* P2P Prefetchable Mem Base*/
+	MV_U32			p2pPrefMemLimit;	/* P2P Prefetchable Memory Limit*/
+	MV_BOOL			bPrefMem64;
+	MV_U32			p2pPrefBaseUpper32Bits;/* P2P Prefetchable upper 32 bits*/
+	MV_U32			p2pPrefLimitUpper32Bits;/* P2P prefetchable limit upper 32*/
+
+
+	MV_U32			pciCacheLine;		/* Pci agent cache line */
+	MV_U32			pciLatencyTimer;	/* Pci agent Latency timer  */
+    MV_PCI_HEADER	pciHeader;			/* Pci agent header type*/
+    MV_BOOL			isMultiFunction;	/* Multi function support */
+	MV_BOOL			isBISTCapable;		/* Self test capable */
+
+	MV_U32			subSysID;			/* Sub System ID */
+	MV_U32			subSysVenID;		/* Sub System Vendor ID */
+
+	MV_BOOL			isExpRom;			/* Expantion Rom support */
+	MV_U32			expRomAddr;			/* Expantion Rom pointer */
+
+	MV_U32			capListPointer;		/* Capability list pointer */
+
+	MV_U32			irqLine;		/* IRQ line  */
+	MV_PCI_INT_PIN	intPin;			/* Interrupt pin */
+	MV_U32			minGrant;		/* Minimum grant*/
+	MV_U32			maxLatency;		/* Maximum latency*/
+
+	MV_U32 			funtionsNum;	/* pci agent total functions number */
+
+	MV_U32 			barsNum;
+    MV_U8           type[60];		/* class name of the pci agent */
+
+
+} MV_PCI_DEVICE;
+
+/* PCI gloabl functions */
+MV_STATUS mvPciClassNameGet(MV_U32 classCode, MV_8 *pType);
+
+
+/* Performs a full scan on both PCIs and returns all possible details on the
+   agents found on the bus. */
+MV_STATUS mvPciScan(MV_U32 pciIf,
+					MV_PCI_DEVICE *pPciAgents,
+					MV_U32 *pPciAgentsNum);
+
+
+#endif /* #ifndef __INCmvPciUtilsh */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c
new file mode 100644
index 000000000000..2e61d214d089
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.c
@@ -0,0 +1,1043 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "pci/mvPci.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+
+
+MV_VOID mvPciHalInit(MV_U32 pciIf, MV_PCI_MOD pciIfmod)
+{
+        if (MV_PCI_MOD_HOST == pciIfmod)
+    {
+
+                mvPciLocalBusNumSet(pciIf, PCI_HOST_BUS_NUM(pciIf));
+                mvPciLocalDevNumSet(pciIf, PCI_HOST_DEV_NUM(pciIf));
+
+                /* Local device master Enable */
+                mvPciMasterEnable(pciIf, MV_TRUE);
+
+                /* Local device slave Enable */
+                mvPciSlaveEnable(pciIf, mvPciLocalBusNumGet(pciIf),
+                                                 mvPciLocalDevNumGet(pciIf), MV_TRUE);
+        }
+        /* enable CPU-2-PCI ordering */
+        MV_REG_BIT_SET(PCI_CMD_REG(0), PCR_CPU_TO_PCI_ORDER_EN);
+}
+
+/*******************************************************************************
+* mvPciCommandSet - Set PCI comman register value.
+*
+* DESCRIPTION:
+*       This function sets a given PCI interface with its command register
+*       value.
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       command - 32bit value to be written to comamnd register.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM if pciIf is not in range otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciCommandSet(MV_U32 pciIf, MV_U32 command)
+{
+    MV_U32 locBusNum, locDevNum, regVal;
+
+    locBusNum =  mvPciLocalBusNumGet(pciIf);
+    locDevNum =  mvPciLocalDevNumGet(pciIf);
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciCommandSet: ERR. Invalid PCI IF num %d\n", pciIf);
+		return MV_BAD_PARAM;
+	}
+
+	/* Set command register */
+	MV_REG_WRITE(PCI_CMD_REG(pciIf), command);
+
+    /* Upodate device max outstanding split tarnsaction */
+    if ((command & PCR_CPU_TO_PCI_ORDER_EN) &&
+        (command & PCR_PCI_TO_CPU_ORDER_EN))
+    {
+        /* Read PCI-X command register */
+        regVal = mvPciConfigRead (pciIf, locBusNum, locDevNum, 0, PCIX_COMMAND);
+
+        /* clear bits 22:20 */
+        regVal &= 0xff8fffff;
+
+        /* set reset value */
+        regVal |= (0x3 << 20);
+
+        /* Write back the value */
+        mvPciConfigWrite (pciIf, locBusNum, locDevNum, 0, PCIX_COMMAND, regVal);
+    }
+
+	return MV_OK;
+
+
+}
+
+
+/*******************************************************************************
+* mvPciModeGet - Get PCI interface mode.
+*
+* DESCRIPTION:
+*       This function returns the given PCI interface mode.
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*
+* OUTPUT:
+*       pPciMode - Pointer to PCI mode structure.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciModeGet(MV_U32 pciIf, MV_PCI_MODE *pPciMode)
+{
+	MV_U32 pciMode;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciModeGet: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_BAD_PARAM;
+	}
+	if (NULL == pPciMode)
+	{
+		mvOsPrintf("mvPciModeGet: ERR. pPciMode = NULL  \n");
+		return MV_BAD_PARAM;
+	}
+
+	/* Read pci mode register */
+	pciMode = MV_REG_READ(PCI_MODE_REG(pciIf));
+
+	switch (pciMode & PMR_PCI_MODE_MASK)
+	{
+		case PMR_PCI_MODE_CONV:
+            pPciMode->pciType  = MV_PCI_CONV;
+
+			if (MV_REG_READ(PCI_DLL_CTRL_REG(pciIf)) & PDC_DLL_EN)
+			{
+				pPciMode->pciSpeed = 66000000; /* 66MHZ */
+			}
+			else
+			{
+				pPciMode->pciSpeed = 33000000; /* 33MHZ */
+			}
+
+			break;
+
+		case PMR_PCI_MODE_PCIX_66MHZ:
+			pPciMode->pciType  = MV_PCIX;
+			pPciMode->pciSpeed = 66000000; /* 66MHZ */
+			break;
+
+		case PMR_PCI_MODE_PCIX_100MHZ:
+			pPciMode->pciType  = MV_PCIX;
+			pPciMode->pciSpeed = 100000000; /* 100MHZ */
+			break;
+
+		case PMR_PCI_MODE_PCIX_133MHZ:
+			pPciMode->pciType  = MV_PCIX;
+			pPciMode->pciSpeed = 133000000; /* 133MHZ */
+			break;
+
+		default:
+			{
+				mvOsPrintf("mvPciModeGet: ERR. Non existing mode !!\n");
+				return MV_ERROR;
+			}
+	}
+
+	switch (pciMode & PMR_PCI_64_MASK)
+	{
+		case PMR_PCI_64_64BIT:
+			pPciMode->pciWidth = MV_PCI_64;
+			break;
+
+		case PMR_PCI_64_32BIT:
+            pPciMode->pciWidth = MV_PCI_32;
+            break;
+
+		default:
+			{
+				mvOsPrintf("mvPciModeGet: ERR. Non existing mode !!\n");
+				return MV_ERROR;
+			}
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciRetrySet - Set PCI retry counters
+*
+* DESCRIPTION:
+*       This function specifies the number of times the PCI controller
+*       retries a transaction before it quits.
+*       Applies to the PCI Master when acting as a requester.
+*       Applies to the PCI slave when acting as a completer (PCI-X mode).
+*       A 0x00 value means a "retry forever".
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       counter - Number of times PCI controller retry. Use counter value
+*                 up to PRR_RETRY_CNTR_MAX.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciRetrySet(MV_U32 pciIf, MV_U32 counter)
+{
+	MV_U32 pciRetry;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciRetrySet: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_BAD_PARAM;
+	}
+
+	if (counter >= PRR_RETRY_CNTR_MAX)
+	{
+		mvOsPrintf("mvPciRetrySet: ERR. Invalid counter: %d\n", counter);
+		return MV_BAD_PARAM;
+
+	}
+
+	/* Reading PCI retry register */
+    pciRetry  = MV_REG_READ(PCI_RETRY_REG(pciIf));
+
+	pciRetry &= ~PRR_RETRY_CNTR_MASK;
+
+	pciRetry |= (counter << PRR_RETRY_CNTR_OFFS);
+
+	/* write new value */
+	MV_REG_WRITE(PCI_RETRY_REG(pciIf), pciRetry);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciDiscardTimerSet - Set PCI discard timer
+*
+* DESCRIPTION:
+*       This function set PCI discard timer.
+*       In conventional PCI mode:
+*       Specifies the number of PCLK cycles the PCI slave keeps a non-accessed
+*       read buffers (non-completed delayed read) before invalidate the buffer.
+*       Set to '0' to disable the timer. The PCI slave waits for delayed
+*       read completion forever.
+*       In PCI-X mode:
+*       Specifies the number of PCLK cycles the PCI master waits for split
+*       completion transaction, before it invalidates the pre-allocated read
+*       buffer.
+*       Set to '0' to disable the timer. The PCI master waits for split
+*       completion forever.
+*       NOTE: Must be set to a number greater than MV_PCI_MAX_DISCARD_CLK,
+*       unless using the "wait for ever" setting 0x0.
+*       NOTE: Must not be updated while there are pending read requests.
+*
+* INPUT:
+*       pciIf      - PCI interface number.
+*       pClkCycles - Number of PCI clock cycles.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciDiscardTimerSet(MV_U32 pciIf, MV_U32 pClkCycles)
+{
+	MV_U32 pciDiscardTimer;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciDiscardTimerSet: ERR. Invalid PCI interface %d\n",
+																		pciIf);
+		return MV_BAD_PARAM;
+	}
+
+	if (pClkCycles >= PDTR_TIMER_MIN)
+	{
+		mvOsPrintf("mvPciDiscardTimerSet: ERR. Invalid Clk value: %d\n",
+																   pClkCycles);
+		return MV_BAD_PARAM;
+
+	}
+
+	/* Read  PCI Discard Timer */
+	pciDiscardTimer  = MV_REG_READ(PCI_DISCARD_TIMER_REG(pciIf));
+
+	pciDiscardTimer &= ~PDTR_TIMER_MASK;
+
+    pciDiscardTimer |= (pClkCycles << PDTR_TIMER_OFFS);
+
+	/* Write new value */
+	MV_REG_WRITE(PCI_DISCARD_TIMER_REG(pciIf), pciDiscardTimer);
+
+	return MV_OK;
+
+}
+
+/* PCI Arbiter routines */
+
+/*******************************************************************************
+* mvPciArbEnable - PCI arbiter enable/disable
+*
+* DESCRIPTION:
+*       This fuction enable/disables a given PCI interface arbiter.
+*       NOTE: Arbiter setting can not be changed while in work. It should only
+*             be set once.
+* INPUT:
+*       pciIf  - PCI interface number.
+*       enable - Enable/disable parameter. If enable = MV_TRUE then enable.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvPciArbEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+	MV_U32 regVal;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciArbEnable: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_ERROR;
+	}
+
+    /* Set PCI Arbiter Control register according to default configuration 	*/
+	regVal = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+
+	/* Make sure arbiter disabled before changing its values */
+	MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+	regVal &= ~PCI_ARBITER_CTRL_DEFAULT_MASK;
+
+	regVal |= PCI_ARBITER_CTRL_DEFAULT;		/* Set default configuration	*/
+
+	if (MV_TRUE == enable)
+	{
+		regVal |= PACR_ARB_ENABLE;
+	}
+	else
+	{
+		regVal &= ~PACR_ARB_ENABLE;
+	}
+
+	/* Write to register 										            */
+	MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), regVal);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciArbParkDis - Disable arbiter parking on agent
+*
+* DESCRIPTION:
+*       This function disables the PCI arbiter from parking on the given agent
+*       list.
+*
+* INPUT:
+*       pciIf        - PCI interface number.
+*       pciAgentMask - When a bit in the mask is set to '1', parking on
+*                      the associated PCI master is disabled. Mask bit
+*                      refers to bit 0 - 6. For example disable parking on PCI
+*                      agent 3 set pciAgentMask 0x4 (bit 3 is set).
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvPciArbParkDis(MV_U32 pciIf, MV_U32 pciAgentMask)
+{
+	MV_U32 pciArbiterCtrl;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciArbParkDis: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_ERROR;
+	}
+
+	/* Reading Arbiter Control register */
+	pciArbiterCtrl = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+
+	/* Arbiter must be disabled before changing parking */
+	MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+	/* do the change */
+    pciArbiterCtrl &= ~PACR_PARK_DIS_MASK;
+	pciArbiterCtrl |= (pciAgentMask << PACR_PARK_DIS_OFFS);
+
+	/* writing new value ( if th earbiter was enabled before the change		*/
+	/* here it will be reenabled 											*/
+	MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), pciArbiterCtrl);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciArbBrokDetectSet - Set PCI arbiter broken detection
+*
+* DESCRIPTION:
+*       This function sets the maximum number of cycles that the arbiter
+*       waits for a PCI master to respond to its grant assertion. If a
+*       PCI agent fails to respond within this time, the PCI arbiter aborts
+*       the transaction and performs a new arbitration cycle.
+*       NOTE: Value must be greater than '1' for conventional PCI and
+*       greater than '5' for PCI-X.
+*
+* INPUT:
+*       pciIf      - PCI interface number.
+*       pClkCycles - Number of PCI clock cycles. If equal to '0' the broken
+*                    master detection is disabled.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciArbBrokDetectSet(MV_U32 pciIf, MV_U32 pClkCycles)
+{
+	MV_U32 pciArbiterCtrl;
+	MV_U32 pciMode;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciArbBrokDetectSet: ERR. Invalid PCI interface %d\n",
+																		pciIf);
+		return MV_BAD_PARAM;
+	}
+
+	/* Checking PCI mode and if pClkCycles is legal value */
+	pciMode = MV_REG_READ(PCI_MODE_REG(pciIf));
+	pciMode &= PMR_PCI_MODE_MASK;
+
+	if (PMR_PCI_MODE_CONV == pciMode)
+	{
+		if (pClkCycles < PACR_BROKEN_VAL_CONV_MIN)
+			return MV_ERROR;
+	}
+	else
+	{
+		if (pClkCycles < PACR_BROKEN_VAL_PCIX_MIN)
+			return MV_ERROR;
+	}
+
+	pClkCycles <<= PACR_BROKEN_VAL_OFFS;
+
+	/* Reading Arbiter Control register */
+	pciArbiterCtrl  = MV_REG_READ(PCI_ARBITER_CTRL_REG(pciIf));
+	pciArbiterCtrl &= ~PACR_BROKEN_VAL_MASK;
+	pciArbiterCtrl |= pClkCycles;
+
+	/* Arbiter must be disabled before changing broken detection */
+	MV_REG_BIT_RESET(PCI_ARBITER_CTRL_REG(pciIf), PACR_ARB_ENABLE);
+
+	/* writing new value ( if th earbiter was enabled before the change 	*/
+	/* here it will be reenabled 											*/
+
+	MV_REG_WRITE(PCI_ARBITER_CTRL_REG(pciIf), pciArbiterCtrl);
+
+	return MV_OK;
+}
+
+/* PCI configuration space read write */
+
+/*******************************************************************************
+* mvPciConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit read from PCI configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to read from local bus segment, use
+*       bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       bus     - PCI segment bus number.
+*       dev     - PCI device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPciConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff)
+{
+	MV_U32 pciData = 0;
+
+	/* Parameter checking   */
+	if (PCI_DEFAULT_IF != pciIf)
+	{
+		if (pciIf >= mvCtrlPciMaxIfGet())
+		{
+			mvOsPrintf("mvPciConfigRead: ERR. Invalid PCI interface %d\n",pciIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	if (dev >= MAX_PCI_DEVICES)
+	{
+		DB(mvOsPrintf("mvPciConfigRead: ERR. device number illigal %d\n", dev));
+		return 0xFFFFFFFF;
+	}
+
+	if (func >= MAX_PCI_FUNCS)
+	{
+		DB(mvOsPrintf("mvPciConfigRead: ERR. function number illigal %d\n", func));
+		return 0xFFFFFFFF;
+	}
+
+	if (bus >= MAX_PCI_BUSSES)
+	{
+		DB(mvOsPrintf("mvPciConfigRead: ERR. bus number illigal %d\n", bus));
+		return MV_ERROR;
+	}
+
+
+	/* Creating PCI address to be passed */
+	pciData |= (bus << PCAR_BUS_NUM_OFFS);
+	pciData |= (dev << PCAR_DEVICE_NUM_OFFS);
+	pciData |= (func << PCAR_FUNC_NUM_OFFS);
+	pciData |= (regOff & PCAR_REG_NUM_MASK);
+
+	pciData |= PCAR_CONFIG_EN;
+
+	/* Write the address to the PCI configuration address register */
+	MV_REG_WRITE(PCI_CONFIG_ADDR_REG(pciIf), pciData);
+
+	/* In order to let the PCI controller absorbed the address of the read 	*/
+	/* transaction we perform a validity check that the address was written */
+	if(pciData != MV_REG_READ(PCI_CONFIG_ADDR_REG(pciIf)))
+	{
+		return MV_ERROR;
+	}
+	/* Read the Data returned in the PCI Data register */
+	pciData = MV_REG_READ(PCI_CONFIG_DATA_REG(pciIf));
+
+	return pciData;
+}
+
+/*******************************************************************************
+* mvPciConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit write to PCI configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to write to local bus segment, use
+*       bus number retrieved from mvPciLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pciIf   - PCI interface number.
+*       bus     - PCI segment bus number.
+*       dev     - PCI device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*       data    - 32bit data.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+	MV_U32 pciData = 0;
+
+	/* Parameter checking   */
+	if (PCI_DEFAULT_IF != pciIf)
+	{
+		if (pciIf >= mvCtrlPciMaxIfGet())
+		{
+			mvOsPrintf("mvPciConfigWrite: ERR. Invalid PCI interface %d\n",
+																		pciIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	if (dev >= MAX_PCI_DEVICES)
+	{
+		mvOsPrintf("mvPciConfigWrite: ERR. device number illigal %d\n",dev);
+		return MV_BAD_PARAM;
+	}
+
+	if (func >= MAX_PCI_FUNCS)
+	{
+		mvOsPrintf("mvPciConfigWrite: ERR. function number illigal %d\n", func);
+		return MV_ERROR;
+	}
+
+	if (bus >= MAX_PCI_BUSSES)
+	{
+		mvOsPrintf("mvPciConfigWrite: ERR. bus number illigal %d\n", bus);
+		return MV_ERROR;
+	}
+
+	/* Creating PCI address to be passed */
+	pciData |= (bus << PCAR_BUS_NUM_OFFS);
+	pciData |= (dev << PCAR_DEVICE_NUM_OFFS);
+	pciData |= (func << PCAR_FUNC_NUM_OFFS);
+	pciData |= (regOff & PCAR_REG_NUM_MASK);
+
+	pciData |= PCAR_CONFIG_EN;
+
+	/* Write the address to the PCI configuration address register */
+	MV_REG_WRITE(PCI_CONFIG_ADDR_REG(pciIf), pciData);
+
+	/* In order to let the PCI controller absorbed the address of the read 	*/
+	/* transaction we perform a validity check that the address was written */
+	if(pciData != MV_REG_READ(PCI_CONFIG_ADDR_REG(pciIf)))
+	{
+		return MV_ERROR;
+	}
+
+	/* Write the Data passed to the PCI Data register */
+	MV_REG_WRITE(PCI_CONFIG_DATA_REG(pciIf), data);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciMasterEnable - Enable/disale PCI interface master transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PCI command status
+*       (offset 0x4) to set/reset bit 2. After this bit is set, the PCI
+*       master is allowed to gain ownership on the bus, otherwise it is
+*       incapable to do so.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciMasterEnable(MV_U32 pciIf, MV_BOOL enable)
+{
+	MV_U32 pciCommandStatus;
+	MV_U32 RegOffs;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciMasterEnable: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_ERROR;
+	}
+
+	localBus = mvPciLocalBusNumGet(pciIf);
+	localDev = mvPciLocalDevNumGet(pciIf);
+
+	RegOffs = PCI_STATUS_AND_COMMAND;
+
+	pciCommandStatus = mvPciConfigRead(pciIf, localBus, localDev, 0, RegOffs);
+
+	if (MV_TRUE == enable)
+	{
+		pciCommandStatus |= PSCR_MASTER_EN;
+	}
+	else
+	{
+		pciCommandStatus &= ~PSCR_MASTER_EN;
+	}
+
+	mvPciConfigWrite(pciIf, localBus, localDev, 0, RegOffs, pciCommandStatus);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PCI command status
+*       (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+*       the PCI slave is allowed to respond to PCI IO space access (bit 0)
+*       and PCI memory space access (bit 1).
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       dev     - PCI device number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciSlaveEnable(MV_U32 pciIf, MV_U32 bus, MV_U32 dev, MV_BOOL enable)
+{
+	MV_U32 pciCommandStatus;
+	MV_U32 RegOffs;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciSlaveEnable: ERR. Invalid PCI interface %d\n", pciIf);
+		return MV_BAD_PARAM;
+	}
+	if (dev >= MAX_PCI_DEVICES)
+	{
+		mvOsPrintf("mvPciLocalDevNumSet: ERR. device number illigal %d\n", dev);
+		return MV_BAD_PARAM;
+
+	}
+
+	RegOffs = PCI_STATUS_AND_COMMAND;
+
+	pciCommandStatus=mvPciConfigRead(pciIf, bus, dev, 0, RegOffs);
+
+    if (MV_TRUE == enable)
+	{
+		pciCommandStatus |= (PSCR_IO_EN | PSCR_MEM_EN);
+	}
+	else
+	{
+		pciCommandStatus &= ~(PSCR_IO_EN | PSCR_MEM_EN);
+	}
+
+	mvPciConfigWrite(pciIf, bus, dev, 0, RegOffs, pciCommandStatus);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciLocalBusNumSet - Set PCI interface local bus number.
+*
+* DESCRIPTION:
+*       This function sets given PCI interface its local bus number.
+*       Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       busNum - Bus number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PCI interface is PCI-X.
+*       MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum)
+{
+	MV_U32 pciP2PConfig;
+	MV_PCI_MODE pciMode;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciLocalBusNumSet: ERR. Invalid PCI interface %d\n",pciIf);
+		return MV_BAD_PARAM;
+	}
+	if (busNum >= MAX_PCI_BUSSES)
+	{
+		mvOsPrintf("mvPciLocalBusNumSet: ERR. bus number illigal %d\n", busNum);
+		return MV_ERROR;
+
+	}
+
+	localBus = mvPciLocalBusNumGet(pciIf);
+	localDev = mvPciLocalDevNumGet(pciIf);
+
+
+	/* PCI interface mode */
+	mvPciModeGet(pciIf, &pciMode);
+
+	/* if PCI type is PCI-X then it is not allowed to change the dev number */
+	if (MV_PCIX == pciMode.pciType)
+	{
+		pciP2PConfig = mvPciConfigRead(pciIf, localBus, localDev, 0, PCIX_STATUS );
+
+		pciP2PConfig &= ~PXS_BN_MASK;
+
+		pciP2PConfig |= (busNum << PXS_BN_OFFS) & PXS_BN_MASK;
+
+		mvPciConfigWrite(pciIf, localBus, localDev, 0, PCIX_STATUS,pciP2PConfig );
+
+	}
+	else
+	{
+		pciP2PConfig  = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+		pciP2PConfig &= ~PPCR_BUS_NUM_MASK;
+
+		pciP2PConfig |= (busNum << PPCR_BUS_NUM_OFFS) & PPCR_BUS_NUM_MASK;
+
+		MV_REG_WRITE(PCI_P2P_CONFIG_REG(pciIf), pciP2PConfig);
+
+	}
+
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPciLocalBusNumGet - Get PCI interface local bus number.
+*
+* DESCRIPTION:
+*       This function gets the local bus number of a given PCI interface.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciLocalBusNumGet(MV_U32 pciIf)
+{
+	MV_U32 pciP2PConfig;
+
+	/* Parameter checking   */
+	if (PCI_DEFAULT_IF != pciIf)
+	{
+		if (pciIf >= mvCtrlPciMaxIfGet())
+		{
+			mvOsPrintf("mvPciLocalBusNumGet: ERR. Invalid PCI interface %d\n",
+																		pciIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	pciP2PConfig  = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+	pciP2PConfig &= PPCR_BUS_NUM_MASK;
+	return (pciP2PConfig >> PPCR_BUS_NUM_OFFS);
+}
+
+
+/*******************************************************************************
+* mvPciLocalDevNumSet - Set PCI interface local device number.
+*
+* DESCRIPTION:
+*       This function sets given PCI interface its local device number.
+*       Note: In case the PCI interface is PCI-X, the information is read-only.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*       devNum - Device number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PCI interface is PCI-X. MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPciLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum)
+{
+	MV_U32 pciP2PConfig;
+	MV_PCI_MODE pciMode;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+	/* Parameter checking   */
+	if (pciIf >= mvCtrlPciMaxIfGet())
+	{
+		mvOsPrintf("mvPciLocalDevNumSet: ERR. Invalid PCI interface %d\n",pciIf);
+		return MV_BAD_PARAM;
+	}
+	if (devNum >= MAX_PCI_DEVICES)
+	{
+		mvOsPrintf("mvPciLocalDevNumSet: ERR. device number illigal %d\n",
+																	   devNum);
+		return MV_BAD_PARAM;
+
+	}
+
+	localBus = mvPciLocalBusNumGet(pciIf);
+	localDev = mvPciLocalDevNumGet(pciIf);
+
+	/* PCI interface mode */
+	mvPciModeGet(pciIf, &pciMode);
+
+	/* if PCI type is PCIX then it is not allowed to change the dev number */
+	if (MV_PCIX == pciMode.pciType)
+	{
+		pciP2PConfig = mvPciConfigRead(pciIf, localBus, localDev, 0, PCIX_STATUS );
+
+		pciP2PConfig &= ~PXS_DN_MASK;
+
+		pciP2PConfig |= (devNum << PXS_DN_OFFS) & PXS_DN_MASK;
+
+		mvPciConfigWrite(pciIf,localBus, localDev, 0, PCIX_STATUS,pciP2PConfig );
+	}
+	else
+	{
+		pciP2PConfig  = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+		pciP2PConfig &= ~PPCR_DEV_NUM_MASK;
+
+		pciP2PConfig |= (devNum << PPCR_DEV_NUM_OFFS) & PPCR_DEV_NUM_MASK;
+
+		MV_REG_WRITE(PCI_P2P_CONFIG_REG(pciIf), pciP2PConfig);
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPciLocalDevNumGet - Get PCI interface local device number.
+*
+* DESCRIPTION:
+*       This function gets the local device number of a given PCI interface.
+*
+* INPUT:
+*       pciIf  - PCI interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPciLocalDevNumGet(MV_U32 pciIf)
+{
+	MV_U32 pciP2PConfig;
+
+	/* Parameter checking   */
+
+	if (PCI_DEFAULT_IF != pciIf)
+	{
+		if (pciIf >= mvCtrlPciMaxIfGet())
+		{
+			mvOsPrintf("mvPciLocalDevNumGet: ERR. Invalid PCI interface %d\n",
+																		pciIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	pciP2PConfig  = MV_REG_READ(PCI_P2P_CONFIG_REG(pciIf));
+
+	pciP2PConfig &= PPCR_DEV_NUM_MASK;
+
+	return (pciP2PConfig >> PPCR_DEV_NUM_OFFS);
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h
new file mode 100644
index 000000000000..55244fa0026f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPci.h
@@ -0,0 +1,182 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCPCIH
+#define __INCPCIH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "pci/mvPciRegs.h"
+
+
+/* NOTE not supported in this driver:
+
+ Built In Self Test (BIST)
+ Vital Product Data (VPD)
+ Message Signaled Interrupt (MSI)
+ Power Management
+ Compact PCI Hot Swap
+ Header retarget
+
+Registers not supported:
+1) PCI DLL Status and Control (PCI0 0x1D20, PCI1 0x1DA0)
+2) PCI/MPP Pads Calibration (CI0/MPP[31:16] 0x1D1C, PCI1/MPP[15:0] 0X1D9C)
+*/
+
+/* defines */
+/* The number of supported PCI interfaces depend on Marvell controller 		*/
+/* device number. This device number ID is located on the PCI unit 			*/
+/* configuration header. This creates a loop where calling PCI 				*/
+/* configuration read/write	routine results a call to get PCI configuration */
+/* information etc. This macro defines a default PCI interface. This PCI	*/
+/* interface is sure to exist.												*/
+#define PCI_DEFAULT_IF	0
+
+
+/* typedefs */
+/* The Marvell controller supports both conventional PCI and PCI-X.         */
+/* This enumeration describes the PCI type.                                 */
+typedef enum _mvPciType
+{
+    MV_PCI_CONV,    /* Conventional PCI */
+    MV_PCIX         /* PCI-X            */
+}MV_PCI_TYPE;
+
+typedef enum _mvPciMod
+{
+	MV_PCI_MOD_HOST,
+	MV_PCI_MOD_DEVICE
+}MV_PCI_MOD;
+
+
+/* The Marvell controller supports both PCI width of 32 and 64 bit.         */
+/* This enumerator describes PCI width                                      */
+typedef enum _mvPciWidth
+{
+    MV_PCI_32,  /* PCI width 32bit  */
+    MV_PCI_64   /* PCI width 64bit  */
+}MV_PCI_WIDTH;
+
+/* This structure describes the PCI unit configured type, speed and width.  */
+typedef struct _mvPciMode
+{
+    MV_PCI_TYPE  pciType;    /* PCI type                                    */
+    MV_U32       pciSpeed;   /* Assuming PCI base clock on board is 33MHz   */
+    MV_PCI_WIDTH pciWidth;   /* PCI bus width                               */
+}MV_PCI_MODE;
+
+/* mvPciInit - Initialize PCI interfaces*/
+MV_VOID mvPciHalInit(MV_U32 pciIf, MV_PCI_MOD pciIfmod);
+
+/* mvPciCommandSet - Set PCI comman register value.*/
+MV_STATUS mvPciCommandSet(MV_U32 pciIf, MV_U32 command);
+
+/* mvPciModeGet - Get PCI interface mode.*/
+MV_STATUS mvPciModeGet(MV_U32 pciIf, MV_PCI_MODE *pPciMode);
+
+/* mvPciRetrySet - Set PCI retry counters*/
+MV_STATUS mvPciRetrySet(MV_U32 pciIf, MV_U32 counter);
+
+/* mvPciDiscardTimerSet - Set PCI discard timer*/
+MV_STATUS mvPciDiscardTimerSet(MV_U32 pciIf, MV_U32 pClkCycles);
+
+/* mvPciArbEnable - PCI arbiter enable/disable*/
+MV_STATUS mvPciArbEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciArbParkDis - Disable arbiter parking on agent */
+MV_STATUS mvPciArbParkDis(MV_U32 pciIf, MV_U32 pciAgentMask);
+
+/* mvPciArbBrokDetectSet - Set PCI arbiter broken detection */
+MV_STATUS mvPciArbBrokDetectSet(MV_U32 pciIf, MV_U32 pClkCycles);
+
+/* mvPciConfigRead - Read from configuration space */
+MV_U32 mvPciConfigRead (MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+						MV_U32 func,MV_U32 regOff);
+
+/* mvPciConfigWrite - Write to configuration space */
+MV_STATUS mvPciConfigWrite(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPciMasterEnable - Enable/disale PCI interface master transactions.*/
+MV_STATUS mvPciMasterEnable(MV_U32 pciIf, MV_BOOL enable);
+
+/* mvPciSlaveEnable - Enable/disale PCI interface slave transactions.*/
+MV_STATUS mvPciSlaveEnable(MV_U32 pciIf, MV_U32 bus, MV_U32 dev,MV_BOOL enable);
+
+/* mvPciLocalBusNumSet - Set PCI interface local bus number.*/
+MV_STATUS mvPciLocalBusNumSet(MV_U32 pciIf, MV_U32 busNum);
+
+/* mvPciLocalBusNumGet - Get PCI interface local bus number.*/
+MV_U32 mvPciLocalBusNumGet(MV_U32 pciIf);
+
+/* mvPciLocalDevNumSet - Set PCI interface local device number.*/
+MV_STATUS mvPciLocalDevNumSet(MV_U32 pciIf, MV_U32 devNum);
+
+/* mvPciLocalDevNumGet - Get PCI interface local device number.*/
+MV_U32 mvPciLocalDevNumGet(MV_U32 pciIf);
+
+
+#endif /* #ifndef __INCPCIH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h
new file mode 100644
index 000000000000..9b7c7a58908b
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pci/mvPciRegs.h
@@ -0,0 +1,410 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPCIREGSH
+#define __INCPCIREGSH
+
+
+#include "pci-if/mvPciIfRegs.h"
+/* defines */
+#define MAX_PCI_DEVICES         32
+#define MAX_PCI_FUNCS           8
+#define MAX_PCI_BUSSES          128
+
+/* enumerators */
+
+/* This enumerator described the possible PCI slave targets.    	   */
+/* PCI slave targets are designated memory/IO address spaces that the  */
+/* PCI slave targets can access. They are also refered as "targets"    */
+/* this enumeratoe order is determined by the content of :
+		PCI_BASE_ADDR_ENABLE_REG 				 					*/
+
+
+/* registers offsetes defines */
+
+
+
+/*************************/
+/* PCI control registers */
+/*************************/
+/* maen : should add new registers */
+#define PCI_CMD_REG(pciIf)				 		(0x30c00  + ((pciIf) * 0x80))
+#define PCI_MODE_REG(pciIf)				 		(0x30d00  + ((pciIf) * 0x80))
+#define PCI_RETRY_REG(pciIf)					(0x30c04  + ((pciIf) * 0x80))
+#define PCI_DISCARD_TIMER_REG(pciIf)			(0x30d04  + ((pciIf) * 0x80))
+#define PCI_ARBITER_CTRL_REG(pciIf)				(0x31d00 + ((pciIf) * 0x80))
+#define PCI_P2P_CONFIG_REG(pciIf)				(0x31d14 + ((pciIf) * 0x80))
+#define PCI_ACCESS_CTRL_BASEL_REG(pciIf, targetWin) \
+							(0x31e00 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+#define PCI_ACCESS_CTRL_BASEH_REG(pciIf, targetWin) \
+							(0x31e04 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+#define PCI_ACCESS_CTRL_SIZE_REG(pciIf, targetWin)	\
+							(0x31e08 + ((pciIf) * 0x80) + ((targetWin) * 0x10))
+
+#define PCI_DLL_CTRL_REG(pciIf)	   		 		(0x31d20  + ((pciIf) * 0x80))
+
+/* PCI Dll Control (PDC)*/
+#define PDC_DLL_EN					BIT0
+
+
+/* PCI Command Register (PCR) */
+#define PCR_MASTER_BYTE_SWAP_EN     BIT0
+#define PCR_MASTER_WR_COMBINE_EN    BIT4
+#define PCR_MASTER_RD_COMBINE_EN    BIT5
+#define PCR_MASTER_WR_TRIG_WHOLE    BIT6
+#define PCR_MASTER_RD_TRIG_WHOLE    BIT7
+#define PCR_MASTER_MEM_RD_LINE_EN   BIT8
+#define PCR_MASTER_MEM_RD_MULT_EN   BIT9
+#define PCR_MASTER_WORD_SWAP_EN     BIT10
+#define PCR_SLAVE_WORD_SWAP_EN      BIT11
+#define PCR_NS_ACCORDING_RCV_TRANS  BIT14
+#define PCR_MASTER_PCIX_REQ64N_EN   BIT15
+#define PCR_SLAVE_BYTE_SWAP_EN      BIT16
+#define PCR_MASTER_DAC_EN           BIT17
+#define PCR_MASTER_M64_ALLIGN       BIT18
+#define PCR_ERRORS_PROPAGATION_EN   BIT19
+#define PCR_SLAVE_SWAP_ENABLE       BIT20
+#define PCR_MASTER_SWAP_ENABLE      BIT21
+#define PCR_MASTER_INT_SWAP_EN      BIT22
+#define PCR_LOOP_BACK_ENABLE        BIT23
+#define PCR_SLAVE_INTREG_SWAP_OFFS  24
+#define PCR_SLAVE_INTREG_SWAP_MASK  0x3
+#define PCR_SLAVE_INTREG_BYTE_SWAP  \
+                             (MV_BYTE_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_NO_SWAP    \
+                             (MV_NO_SWAP   << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_BYTE_WORD  \
+                             (MV_BYTE_WORD_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_SLAVE_INTREG_WORD_SWAP  \
+                             (MV_WORD_SWAP << PCR_SLAVE_INT_REG_SWAP_MASK)
+#define PCR_RESET_REASSERTION_EN    BIT26
+#define PCR_PCI_TO_CPU_REG_ORDER_EN BIT28
+#define PCR_CPU_TO_PCI_ORDER_EN     BIT29
+#define PCR_PCI_TO_CPU_ORDER_EN     BIT30
+
+/* PCI Mode Register (PMR) */
+#define PMR_PCI_ID_OFFS 			0  /* PCI Interface ID */
+#define PMR_PCI_ID_MASK 			(0x1 << PMR_PCI_ID_OFFS)
+#define PMR_PCI_ID_PCI(pciNum) 		((pciNum) << PCI_MODE_PCIID_OFFS)
+
+#define PMR_PCI_64_OFFS				2 	/* 64-bit PCI Interface */
+#define PMR_PCI_64_MASK				(0x1 << PMR_PCI_64_OFFS)
+#define PMR_PCI_64_64BIT			(0x1 << PMR_PCI_64_OFFS)
+#define PMR_PCI_64_32BIT			(0x0 << PMR_PCI_64_OFFS)
+
+#define PMR_PCI_MODE_OFFS			4 	/* PCI interface mode of operation */
+#define PMR_PCI_MODE_MASK			(0x3 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_CONV			(0x0 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_66MHZ		(0x1 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_100MHZ	(0x2 << PMR_PCI_MODE_OFFS)
+#define PMR_PCI_MODE_PCIX_133MHZ	(0x3 << PMR_PCI_MODE_OFFS)
+
+#define PMR_EXP_ROM_SUPPORT			BIT8	/* Expansion ROM Active */
+
+#define PMR_PCI_RESET_OFFS			31 /* PCI Interface Reset Indication */
+#define PMR_PCI_RESET_MASK			(0x1 << PMR_PCI_RESET_OFFS)
+#define PMR_PCI_RESET_PCIXRST		(0x0 << PMR_PCI_RESET_OFFS)
+
+
+/* PCI Retry Register (PRR) */
+#define PRR_RETRY_CNTR_OFFS			16 /* Retry Counter */
+#define PRR_RETRY_CNTR_MAX			0xff
+#define PRR_RETRY_CNTR_MASK			(PRR_RETRY_CNTR_MAX << PRR_RETRY_CNTR_OFFS)
+
+
+/* PCI Discard Timer Register (PDTR) */
+#define PDTR_TIMER_OFFS				0	/* Timer */
+#define PDTR_TIMER_MAX				0xffff
+#define PDTR_TIMER_MIN				0x7F
+#define PDTR_TIMER_MASK				(PDTR_TIMER_MAX << PDTR_TIMER_OFFS)
+
+
+/* PCI Arbiter Control Register (PACR) */
+#define PACR_BROKEN_DETECT_EN		BIT1	/* Broken Detection Enable */
+
+#define PACR_BROKEN_VAL_OFFS		3	/* Broken Value */
+#define PACR_BROKEN_VAL_MASK		(0xf << PACR_BROKEN_VAL_OFFS)
+#define PACR_BROKEN_VAL_CONV_MIN	0x2
+#define PACR_BROKEN_VAL_PCIX_MIN	0x6
+
+#define PACR_PARK_DIS_OFFS		14	/* Parking Disable */
+#define PACR_PARK_DIS_MAX_AGENT	0x3f
+#define PACR_PARK_DIS_MASK		(PACR_PARK_DIS_MAX_AGENT<<PACR_PARK_DIS_OFFS)
+#define PACR_PARK_DIS(agent)	((1 << (agent)) << PACR_PARK_DIS_OFFS)
+
+#define PACR_ARB_ENABLE				BIT31	/* Enable Internal Arbiter */
+
+
+/* PCI P2P Configuration Register (PPCR) */
+#define PPCR_2ND_BUS_L_OFFS			0	/* 2nd PCI Interface Bus Range Lower */
+#define PPCR_2ND_BUS_L_MASK			(0xff << PPCR_2ND_BUS_L_OFFS)
+
+#define PPCR_2ND_BUS_H_OFFS			8	/* 2nd PCI Interface Bus Range Upper */
+#define PPCR_2ND_BUS_H_MASK			(0xff << PPCR_2ND_BUS_H_OFFS)
+
+#define PPCR_BUS_NUM_OFFS			16  /* The PCI interface's Bus number */
+#define PPCR_BUS_NUM_MASK			(0xff << PPCR_BUS_NUM_OFFS)
+
+#define PPCR_DEV_NUM_OFFS			24  /* The PCI interface’s Device number */
+#define PPCR_DEV_NUM_MASK			(0xff << PPCR_DEV_NUM_OFFS)
+
+
+/* PCI Access Control Base Low Register (PACBLR) */
+#define PACBLR_EN					BIT0 /* Access control window enable */
+
+#define PACBLR_ACCPROT				BIT4 /* Access Protect */
+#define PACBLR_WRPROT				BIT5 /* Write Protect */
+
+#define PACBLR_PCISWAP_OFFS			6 	 /* PCI slave Data Swap Control */
+#define PACBLR_PCISWAP_MASK			(0x3 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_BYTE			(0x0 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_NO_SWAP		(0x1 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_BYTE_WORD	(0x2 << PACBLR_PCISWAP_OFFS)
+#define PACBLR_PCISWAP_WORD			(0x3 << PACBLR_PCISWAP_OFFS)
+
+#define PACBLR_RDMBURST_OFFS		8 /* Read Max Burst */
+#define PACBLR_RDMBURST_MASK		(0x3 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_32BYTE		(0x0 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_64BYTE		(0x1 << PACBLR_RDMBURST_OFFS)
+#define PACBLR_RDMBURST_128BYTE		(0x2 << PACBLR_RDMBURST_OFFS)
+
+#define PACBLR_RDSIZE_OFFS			10 /* Typical PCI read transaction Size. */
+#define PACBLR_RDSIZE_MASK			(0x3 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_32BYTE		(0x0 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_64BYTE		(0x1 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_128BYTE		(0x2 << PACBLR_RDSIZE_OFFS)
+#define PACBLR_RDSIZE_256BYTE		(0x3 << PACBLR_RDSIZE_OFFS)
+
+#define PACBLR_BASE_L_OFFS			12	/* Corresponds to address bits [31:12] */
+#define PACBLR_BASE_L_MASK			(0xfffff << PACBLR_BASE_L_OFFS)
+#define PACBLR_BASE_L_ALIGNMENT		(1 << PACBLR_BASE_L_OFFS)
+#define PACBLR_BASE_ALIGN_UP(base)  \
+                             ((base+PACBLR_BASE_L_ALIGNMENT)&PACBLR_BASE_L_MASK)
+#define PACBLR_BASE_ALIGN_DOWN(base)  (base & PACBLR_BASE_L_MASK)
+
+
+/* PCI Access Control Base High Register (PACBHR) 	*/
+#define PACBHR_BASE_H_OFFS			0	/* Corresponds to address bits [63:32] */
+#define PACBHR_CTRL_BASE_H_MASK		(0xffffffff << PACBHR_BASE_H_OFFS)
+
+/* PCI Access Control Size Register (PACSR) 		*/
+#define PACSR_WRMBURST_OFFS			8 /* Write Max Burst */
+#define PACSR_WRMBURST_MASK			(0x3 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_32BYTE		(0x0 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_64BYTE		(0x1 << PACSR_WRMBURST_OFFS)
+#define PACSR_WRMBURST_128BYTE		(0x2 << PACSR_WRMBURST_OFFS)
+
+#define PACSR_PCI_ORDERING			BIT11 /* PCI Ordering required */
+
+#define PACSR_SIZE_OFFS				12	/* PCI access window size */
+#define PACSR_SIZE_MASK				(0xfffff << PACSR_SIZE_OFFS)
+#define PACSR_SIZE_ALIGNMENT		(1 << PACSR_SIZE_OFFS)
+#define PACSR_SIZE_ALIGN_UP(size)   \
+                                   ((size+PACSR_SIZE_ALIGNMENT)&PACSR_SIZE_MASK)
+#define PACSR_SIZE_ALIGN_DOWN(size) (size & PACSR_SIZE_MASK)
+
+
+/***************************************/
+/* PCI Configuration Access Registers  */
+/***************************************/
+
+#define PCI_CONFIG_ADDR_REG(pciIf)	(0x30C78 - ((pciIf) * 0x80) )
+#define PCI_CONFIG_DATA_REG(pciIf)	(0x30C7C - ((pciIf) * 0x80) )
+#define PCI_INT_ACK_REG(pciIf)		(0x30C34 + ((pciIf) * 0x80) )
+
+/* PCI Configuration Address Register (PCAR) */
+#define PCAR_REG_NUM_OFFS			2
+#define PCAR_REG_NUM_MASK			(0x3F << PCAR_REG_NUM_OFFS)
+
+#define PCAR_FUNC_NUM_OFFS			8
+#define PCAR_FUNC_NUM_MASK			(0x7 << PCAR_FUNC_NUM_OFFS)
+
+#define PCAR_DEVICE_NUM_OFFS		11
+#define PCAR_DEVICE_NUM_MASK		(0x1F << PCAR_DEVICE_NUM_OFFS)
+
+#define PCAR_BUS_NUM_OFFS			16
+#define PCAR_BUS_NUM_MASK			(0xFF << PCAR_BUS_NUM_OFFS)
+
+#define PCAR_CONFIG_EN				BIT31
+
+
+/***************************************/
+/* PCI Configuration registers */
+/***************************************/
+
+/*********************************************/
+/* PCI Configuration, Function 0, Registers  */
+/*********************************************/
+
+/* Marvell Specific */
+#define PCI_SCS0_BASE_ADDR_LOW			   			0x010
+#define PCI_SCS0_BASE_ADDR_HIGH			   			0x014
+#define PCI_SCS1_BASE_ADDR_LOW		  				0x018
+#define PCI_SCS1_BASE_ADDR_HIGH			  			0x01C
+#define PCI_INTER_REG_MEM_MAPPED_BASE_ADDR_L 		0x020
+#define PCI_INTER_REG_MEM_MAPPED_BASE_ADDR_H		0x024
+
+/* capability list */
+#define PCI_POWER_MNG_CAPABILITY		            0x040
+#define PCI_POWER_MNG_STATUS_CONTROL		        0x044
+#define PCI_VPD_ADDRESS_REG	                        0x048
+#define PCI_VPD_DATA_REG	                        0x04c
+#define PCI_MSI_MESSAGE_CONTROL						0x050
+#define PCI_MSI_MESSAGE_ADDR		                0x054
+#define PCI_MSI_MESSAGE_UPPER_ADDR		            0x058
+#define PCI_MSI_MESSAGE_DATA		                0x05c
+#define PCIX_COMMAND		                        0x060
+#define PCIX_STATUS		                            0x064
+#define PCI_COMPACT_PCI_HOT_SWAP		            0x068
+
+
+/*********************************************/
+/* PCI Configuration, Function 1, Registers  */
+/*********************************************/
+
+#define PCI_SCS2_BASE_ADDR_LOW						0x10
+#define PCI_SCS2_BASE_ADDR_HIGH						0x14
+#define PCI_SCS3_BASE_ADDR_LOW		 				0x18
+#define PCI_SCS3_BASE_ADDR_HIGH						0x1c
+
+
+/***********************************************/
+/*  PCI Configuration, Function 2, Registers   */
+/***********************************************/
+
+#define PCI_DEVCS0_BASE_ADDR_LOW					0x10
+#define PCI_DEVCS0_BASE_ADDR_HIGH		 			0x14
+#define PCI_DEVCS1_BASE_ADDR_LOW		 			0x18
+#define PCI_DEVCS1_BASE_ADDR_HIGH		      		0x1c
+#define PCI_DEVCS2_BASE_ADDR_LOW		 			0x20
+#define PCI_DEVCS2_BASE_ADDR_HIGH		      		0x24
+
+/***********************************************/
+/*  PCI Configuration, Function 3, Registers   */
+/***********************************************/
+
+#define PCI_BOOTCS_BASE_ADDR_LOW					0x18
+#define PCI_BOOTCS_BASE_ADDR_HIGH		      		0x1c
+
+/***********************************************/
+/*  PCI Configuration, Function 4, Registers   */
+/***********************************************/
+
+#define PCI_P2P_MEM0_BASE_ADDR_LOW				   	0x10
+#define PCI_P2P_MEM0_BASE_ADDR_HIGH		 			0x14
+#define PCI_P2P_IO_BASE_ADDR		               	0x20
+#define PCI_INTER_REGS_IO_MAPPED_BASE_ADDR		   0x24
+
+/* PCIX_STATUS  register fields (PXS) */
+
+#define PXS_FN_OFFS		0	/* Description Number */
+#define PXS_FN_MASK		(0x7 << PXS_FN_OFFS)
+
+#define PXS_DN_OFFS		3	/* Device Number */
+#define PXS_DN_MASK		(0x1f << PXS_DN_OFFS)
+
+#define PXS_BN_OFFS		8	/* Bus Number */
+#define PXS_BN_MASK		(0xff << PXS_BN_OFFS)
+
+
+/* PCI Error Report Register Map */
+#define PCI_SERRN_MASK_REG(pciIf)		(0x30c28  + (pciIf * 0x80))
+#define PCI_CAUSE_REG(pciIf)			(0x31d58 + (pciIf * 0x80))
+#define PCI_MASK_REG(pciIf)				(0x31d5C + (pciIf * 0x80))
+#define PCI_ERROR_ADDR_LOW_REG(pciIf)	(0x31d40 + (pciIf * 0x80))
+#define PCI_ERROR_ADDR_HIGH_REG(pciIf)	(0x31d44 + (pciIf * 0x80))
+#define PCI_ERROR_ATTRIBUTE_REG(pciIf)	(0x31d48 + (pciIf * 0x80))
+#define PCI_ERROR_COMMAND_REG(pciIf)	(0x31d50 + (pciIf * 0x80))
+
+/* PCI Interrupt Cause Register (PICR) */
+#define PICR_ERR_SEL_OFFS           27
+#define PICR_ERR_SEL_MASK           (0x1f << PICR_ERR_SEL_OFFS)
+
+/* PCI Error Command Register (PECR) */
+#define PECR_ERR_CMD_OFFS			0
+#define PECR_ERR_CMD_MASK			(0xf << PECR_ERR_CMD_OFFS)
+#define PECR_DAC					BIT4
+
+
+/* defaults */
+/* Set bits means value is about to change according to new value */
+#define PCI_COMMAND_DEFAULT_MASK                0xffffdff1
+#define PCI_COMMAND_DEFAULT                             \
+                (PCR_MASTER_WR_TRIG_WHOLE   |   \
+         PCR_MASTER_RD_TRIG_WHOLE       |       \
+                 PCR_MASTER_MEM_RD_LINE_EN      |       \
+         PCR_MASTER_MEM_RD_MULT_EN  |   \
+                 PCR_NS_ACCORDING_RCV_TRANS     |       \
+                 PCR_MASTER_PCIX_REQ64N_EN      |       \
+                 PCR_MASTER_DAC_EN                      |       \
+                 PCR_MASTER_M64_ALLIGN          |       \
+                 PCR_ERRORS_PROPAGATION_EN)
+
+
+#define PCI_ARBITER_CTRL_DEFAULT_MASK   0x801fc07a
+#define PCI_ARBITER_CTRL_DEFAULT        \
+        (PACR_BROKEN_VAL_PCIX_MIN << PACR_BROKEN_VAL_OFFS)
+
+
+#endif /* #ifndef __INCPCIREGSH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c
new file mode 100644
index 000000000000..84cf85ab1dbe
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.c
@@ -0,0 +1,1140 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "pex/mvPex.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* defines  */
+#ifdef MV_DEBUG
+#define DB(x)	x
+#else
+#define DB(x)
+#endif
+
+MV_STATUS mvPexHalInit(MV_U32 pexIf, MV_PEX_TYPE pexType)
+{
+	MV_PEX_MODE pexMode;
+	MV_U32 regVal;
+	MV_U32	status;
+
+    /* First implement Guideline (GL# PCI Express-2) Wrong Default Value    */
+    /* to Transmitter Output Current (TXAMP) Relevant for: 88F5181-A1/B0/B1 */
+    /* and 88F5281-B0 and above, 88F5182, 88F5082, 88F5181L, 88F6082/L      */
+
+    if ((mvCtrlModelGet() != MV_1281_DEV_ID) &&
+	(mvCtrlModelGet() != MV_6281_DEV_ID) &&
+	(mvCtrlModelGet() != MV_6192_DEV_ID) &&
+    (mvCtrlModelGet() != MV_6190_DEV_ID) &&
+	(mvCtrlModelGet() != MV_6180_DEV_ID) &&
+		(mvCtrlModelGet() != MV_6183_DEV_ID) &&
+	(mvCtrlModelGet() != MV_6183L_DEV_ID) &&
+		(mvCtrlModelGet() != MV_78100_DEV_ID) &&
+		(mvCtrlModelGet() != MV_78200_DEV_ID) &&
+	(mvCtrlModelGet() != MV_76100_DEV_ID) &&
+	(mvCtrlModelGet() != MV_78XX0_DEV_ID))
+    {
+
+        /* Read current value of TXAMP */
+        MV_REG_WRITE(0x41b00, 0x80820000);   /* Write the read command   */
+
+        regVal = MV_REG_READ(0x41b00);      /* Extract the data         */
+
+        /* Prepare new data for write */
+        regVal &= ~0x7;                     /* Clear bits [2:0]         */
+        regVal |=  0x4;                     /* Set the new value        */
+        regVal &= ~0x80000000;              /* Set "write" command      */
+        MV_REG_WRITE(0x41b00, regVal);      /* Write the write command  */
+
+    }
+    else
+    {
+        /* Implement 1.0V termination GL for 88F1281 device only */
+        /* BIT0 - Common mode feedback */
+        /* BIT3 - TxBuf, extra drive for 1.0V termination */
+        if (mvCtrlModelGet() == MV_1281_DEV_ID)
+        {
+                MV_REG_WRITE(0x41b00, 0x80860000);   /* Write the read command   */
+                regVal = MV_REG_READ(0x41b00);      /* Extract the data         */
+                regVal |= (BIT0 | BIT3);
+                regVal &= ~0x80000000;              /* Set "write" command      */
+                MV_REG_WRITE(0x41b00, regVal);      /* Write the write command  */
+
+                MV_REG_WRITE(0x31b00, 0x80860000);   /* Write the read command   */
+                regVal = MV_REG_READ(0x31b00);      /* Extract the data         */
+                regVal |= (BIT0 | BIT3);
+                regVal &= ~0x80000000;              /* Set "write" command      */
+                MV_REG_WRITE(0x31b00, regVal);      /* Write the write command  */
+        }
+    }
+
+        if( mvPexModeGet(pexIf, &pexMode) != MV_OK)
+        {
+                mvOsPrintf("PEX init ERR. mvPexModeGet failed (pexType=%d)\n",pexMode.pexType);
+                return MV_ERROR;
+        }
+
+        /* Check that required PEX type is the one set in reset time */
+        if (pexType != pexMode.pexType)
+        {
+                /* No Link. Shut down the Phy */
+		mvPexPowerDown(pexIf);
+                mvOsPrintf("PEX init ERR. PEX type sampled mismatch (%d,%d)\n",pexType,pexMode.pexType);
+                return MV_ERROR;
+        }
+
+        if (MV_PEX_ROOT_COMPLEX == pexType)
+        {
+                mvPexLocalBusNumSet(pexIf, PEX_HOST_BUS_NUM(pexIf));
+                mvPexLocalDevNumSet(pexIf, PEX_HOST_DEV_NUM(pexIf));
+
+                /* Local device master Enable */
+                mvPexMasterEnable(pexIf, MV_TRUE);
+
+                /* Local device slave Enable */
+                mvPexSlaveEnable(pexIf, mvPexLocalBusNumGet(pexIf),
+                                                 mvPexLocalDevNumGet(pexIf), MV_TRUE);
+		/* Interrupt disable */
+		status = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_STATUS_AND_COMMAND));
+		status |= PXSAC_INT_DIS;
+		MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_STATUS_AND_COMMAND), status);
+        }
+
+        /* now wait 500 ms to be sure the link is valid (spec compliant) */
+        mvOsDelay(500);
+	/* Check if we have link */
+	if (MV_REG_READ(PEX_STATUS_REG(pexIf)) & PXSR_DL_DOWN)
+	{
+		mvOsPrintf("PEX%d interface detected no Link.\n",pexIf);
+		return MV_NO_SUCH;
+	}
+
+	if (MV_PEX_WITDH_X1 ==  pexMode.pexWidth)
+	{
+		mvOsPrintf("PEX%d interface detected Link X1\n",pexIf);
+	}
+	else
+	{
+		mvOsPrintf("PEX%d interface detected Link X4\n",pexIf);
+	}
+
+#ifdef PCIE_VIRTUAL_BRIDGE_SUPPORT
+	mvPexVrtBrgInit(pexIf);
+#endif
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexModeGet - Get Pex Mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       pexIf   - PEX interface number.
+*
+* OUTPUT:
+*       pexMode - Pex mode structure
+*
+* RETURN:
+*       MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_U32 mvPexModeGet(MV_U32 pexIf,MV_PEX_MODE *pexMode)
+{
+	MV_U32 pexData;
+
+	/* Parameter checking   */
+	if (PEX_DEFAULT_IF != pexIf)
+	{
+		if (pexIf >= mvCtrlPexMaxIfGet())
+		{
+			mvOsPrintf("mvPexModeGet: ERR. Invalid PEX interface %d\n",pexIf);
+			return MV_ERROR;
+		}
+	}
+
+	pexData = MV_REG_READ(PEX_CTRL_REG(pexIf));
+
+	switch (pexData & PXCR_DEV_TYPE_CTRL_MASK)
+	{
+	case PXCR_DEV_TYPE_CTRL_CMPLX:
+		pexMode->pexType = MV_PEX_ROOT_COMPLEX;
+		break;
+	case PXCR_DEV_TYPE_CTRL_POINT:
+		pexMode->pexType = MV_PEX_END_POINT;
+		break;
+
+	}
+
+    /* Check if we have link */
+    if (MV_REG_READ(PEX_STATUS_REG(pexIf)) & PXSR_DL_DOWN)
+    {
+        pexMode->pexLinkUp = MV_FALSE;
+
+        /* If there is no link, the auto negotiation data is worthless */
+        pexMode->pexWidth  = MV_PEX_WITDH_INVALID;
+    }
+    else
+    {
+        pexMode->pexLinkUp = MV_TRUE;
+
+        /* We have link. The link width is now valid */
+        pexData = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG));
+        pexMode->pexWidth = ((pexData & PXLCSR_NEG_LNK_WDTH_MASK) >>
+                             PXLCSR_NEG_LNK_WDTH_OFFS);
+    }
+
+    return MV_OK;
+}
+
+
+/* PEX configuration space read write */
+
+/*******************************************************************************
+* mvPexConfigRead - Read from configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit read from PEX configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to read from local bus segment, use
+*       bus number retrieved from mvPexLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pexIf   - PEX interface number.
+*       bus     - PEX segment bus number.
+*       dev     - PEX device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       32bit register data, 0xffffffff on error
+*
+*******************************************************************************/
+MV_U32 mvPexConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff)
+{
+#if defined(PCIE_VIRTUAL_BRIDGE_SUPPORT)
+        return mvPexVrtBrgConfigRead (pexIf, bus, dev, func, regOff);
+}
+
+MV_U32 mvPexHwConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff)
+{
+#endif
+	MV_U32 pexData = 0;
+	MV_U32	localDev,localBus;
+
+	/* Parameter checking   */
+	if (PEX_DEFAULT_IF != pexIf)
+	{
+		if (pexIf >= mvCtrlPexMaxIfGet())
+		{
+			mvOsPrintf("mvPexConfigRead: ERR. Invalid PEX interface %d\n",pexIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	if (dev >= MAX_PEX_DEVICES)
+	{
+		DB(mvOsPrintf("mvPexConfigRead: ERR. device number illigal %d\n", dev));
+		return 0xFFFFFFFF;
+	}
+
+	if (func >= MAX_PEX_FUNCS)
+	{
+		DB(mvOsPrintf("mvPexConfigRead: ERR. function num illigal %d\n", func));
+		return 0xFFFFFFFF;
+	}
+
+	if (bus >= MAX_PEX_BUSSES)
+	{
+		DB(mvOsPrintf("mvPexConfigRead: ERR. bus number illigal %d\n", bus));
+		return MV_ERROR;
+	}
+
+    DB(mvOsPrintf("mvPexConfigRead: pexIf %d, bus %d, dev %d, func %d, regOff 0x%x\n",
+                   pexIf, bus, dev, func, regOff));
+
+	localDev = mvPexLocalDevNumGet(pexIf);
+	localBus = mvPexLocalBusNumGet(pexIf);
+
+    /* Speed up the process. In case on no link, return MV_ERROR */
+    if ((dev != localDev) || (bus != localBus))
+    {
+        pexData = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+        if ((pexData & PXSR_DL_DOWN))
+        {
+            return MV_ERROR;
+        }
+    }
+
+    /* in PCI Express we have only one device number */
+	/* and this number is the first number we encounter
+	else that the localDev*/
+	/* spec pex define return on config read/write on any device */
+	if (bus == localBus)
+	{
+		if (localDev == 0)
+		{
+			/* if local dev is 0 then the first number we encounter
+			after 0 is 1 */
+			if ((dev != 1)&&(dev != localDev))
+			{
+				return MV_ERROR;
+			}
+		}
+		else
+		{
+			/* if local dev is not 0 then the first number we encounter
+			is 0 */
+
+			if ((dev != 0)&&(dev != localDev))
+			{
+				return MV_ERROR;
+			}
+		}
+		if(func != 0 ) /* i.e bridge */
+		{
+			return MV_ERROR;
+		}
+	}
+
+
+	/* Creating PEX address to be passed */
+	pexData = (bus << PXCAR_BUS_NUM_OFFS);
+	pexData |= (dev << PXCAR_DEVICE_NUM_OFFS);
+	pexData |= (func << PXCAR_FUNC_NUM_OFFS);
+	pexData |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+	/* extended register space */
+	pexData |=(((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >>
+				PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS);
+
+	pexData |= PXCAR_CONFIG_EN;
+
+	/* Write the address to the PEX configuration address register */
+	MV_REG_WRITE(PEX_CFG_ADDR_REG(pexIf), pexData);
+
+	DB(mvOsPrintf("mvPexConfigRead:address pexData=%x ",pexData));
+
+
+	/* In order to let the PEX controller absorbed the address of the read 	*/
+	/* transaction we perform a validity check that the address was written */
+	if(pexData != MV_REG_READ(PEX_CFG_ADDR_REG(pexIf)))
+	{
+		return MV_ERROR;
+	}
+
+	/* cleaning Master Abort */
+	MV_REG_BIT_SET(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_STATUS_AND_COMMAND),
+				   PXSAC_MABORT);
+#if 0
+	/* Guideline (GL# PCI Express-1) Erroneous Read Data on Configuration   */
+	/* This guideline is relevant for all devices except of the following devices:
+	   88F5281-BO and above, 88F5181L-A0 and above, 88F1281 A0 and above
+	   88F6183 A0 and above, 88F6183L  */
+	if ( ( (dev != localDev) || (bus != localBus) ) &&
+		(
+		!(MV_5281_DEV_ID == mvCtrlModelGet())&&
+		!((MV_5181_DEV_ID == mvCtrlModelGet())&& (mvCtrlRevGet() >= MV_5181L_A0_REV))&&
+		!(MV_1281_DEV_ID == mvCtrlModelGet())&&
+		!(MV_6183_DEV_ID == mvCtrlModelGet())&&
+		!(MV_6183L_DEV_ID == mvCtrlModelGet())&&
+		!(MV_6281_DEV_ID == mvCtrlModelGet())&&
+		!(MV_6192_DEV_ID == mvCtrlModelGet())&&
+		!(MV_6190_DEV_ID == mvCtrlModelGet())&&
+        !(MV_6180_DEV_ID == mvCtrlModelGet())&&
+		!(MV_78XX0_DEV_ID == mvCtrlModelGet())
+		))
+	{
+
+		/* PCI-Express configuration read work-around */
+
+		/* we will use one of the Punit (AHBToMbus) windows to access the xbar
+		and read the data from there */
+		/*
+		Need to configure the 2 free Punit (AHB to MBus bridge)
+		address decoding windows:
+		Configure the flash Window to handle Configuration space requests
+		for PEX0/1:
+		1.    write 0x7931/0x7941 to the flash window and the size,
+		      79-xbar attr (pci cfg), 3/4-xbar target (pex0/1), 1-WinEn
+		2.    write base to flash window
+
+		Configuration transactions from the CPU should write/read the data
+		to/from address of the form:
+		addr[31:28] = 0x5 (for PEX0) or 0x6 (for PEX1)
+		addr[27:24] = extended register number
+		addr[23:16] = bus number
+		addr[15:11] = device number
+		addr[10:8]   = function number
+		addr[7:0]     = register number
+		*/
+
+		#include "ctrlEnv/sys/mvAhbToMbus.h"
+		{
+			MV_U32 winNum;
+			MV_AHB_TO_MBUS_DEC_WIN originWin;
+			MV_U32 pciAddr=0;
+			MV_U32 remapLow=0,remapHigh=0;
+
+			/*
+			We will use DEV_CS2\Flash window for this workarround
+			*/
+
+			winNum = mvAhbToMbusWinTargetGet(PEX_CONFIG_RW_WA_TARGET);
+
+			/* save remap values if exist */
+			if ((1 == winNum)||(0 == winNum))
+			{
+				remapLow = MV_REG_READ(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum));
+				remapHigh = MV_REG_READ(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum));
+
+			}
+
+
+			/* save the original window values */
+			mvAhbToMbusWinGet(winNum,&originWin);
+
+			if (PEX_CONFIG_RW_WA_USE_ORIGINAL_WIN_VALUES)
+			{
+				/* set the window as xbar window */
+				if (pexIf)
+				{
+					MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+					(0x7931 | (((originWin.addrWin.size >> 16)-1) ) << 16));
+				}
+				else
+				{
+					MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+					(0x7941 | (((originWin.addrWin.size >> 16)-1) ) << 16));
+				}
+
+				MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum),
+							 originWin.addrWin.baseLow);
+
+				/*pciAddr = originWin.addrWin.baseLow;*/
+				pciAddr = (MV_U32)CPU_MEMIO_UNCACHED_ADDR(
+					(MV_U32)originWin.addrWin.baseLow);
+
+			}
+			else
+			{
+				/* set the window as xbar window */
+				if (pexIf)
+				{
+					MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+					(0x7931 | (((PEX_CONFIG_RW_WA_SIZE >> 16)-1) ) << 16));
+				}
+				else
+				{
+					MV_REG_WRITE(AHB_TO_MBUS_WIN_CTRL_REG(winNum),
+					(0x7941 | (((PEX_CONFIG_RW_WA_SIZE >> 16)-1) ) << 16));
+				}
+
+				MV_REG_WRITE(AHB_TO_MBUS_WIN_BASE_REG(winNum),
+							 PEX_CONFIG_RW_WA_BASE);
+
+				pciAddr = (MV_U32)CPU_MEMIO_UNCACHED_ADDR(PEX_CONFIG_RW_WA_BASE);
+			}
+
+
+			/* remap should be as base */
+			if ((1 == winNum)||(0 == winNum))
+			{
+			   MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum),pciAddr);
+			   MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum),0);
+
+			}
+
+			/* extended register space */
+			pciAddr |= (bus << 16);
+			pciAddr |= (dev << 11);
+			pciAddr |= (func << 8);
+			pciAddr |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+
+			pexData = *(MV_U32*)pciAddr;
+			pexData = MV_32BIT_LE(pexData); /* Data always in LE */
+
+			/* restore the original window values */
+			mvAhbToMbusWinSet(winNum,&originWin);
+
+			/* restore original remap values*/
+			if ((1 == winNum)||(0 == winNum))
+			{
+			   MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_LOW_REG(winNum),remapLow);
+			   MV_REG_WRITE(AHB_TO_MBUS_WIN_REMAP_HIGH_REG(winNum),remapHigh);
+
+			}
+		}
+	}
+	else
+#endif
+	{
+		/* Read the Data returned in the PEX Data register */
+		pexData = MV_REG_READ(PEX_CFG_DATA_REG(pexIf));
+
+	}
+
+	DB(mvOsPrintf("mvPexConfigRead: got : %x \n",pexData));
+
+	return pexData;
+
+}
+
+/*******************************************************************************
+* mvPexConfigWrite - Write to configuration space
+*
+* DESCRIPTION:
+*       This function performs a 32 bit write to PEX configuration space.
+*       It supports both type 0 and type 1 of Configuration Transactions
+*       (local and over bridge). In order to write to local bus segment, use
+*       bus number retrieved from mvPexLocalBusNumGet(). Other bus numbers
+*       will result configuration transaction of type 1 (over bridge).
+*
+* INPUT:
+*       pexIf   - PEX interface number.
+*       bus     - PEX segment bus number.
+*       dev     - PEX device number.
+*       func    - Function number.
+*       regOffs - Register offset.
+*       data    - 32bit data.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+#if defined(PCIE_VIRTUAL_BRIDGE_SUPPORT)
+        return mvPexVrtBrgConfigWrite (pexIf, bus, dev, func, regOff, data);
+}
+
+MV_STATUS mvPexHwConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+#endif
+	MV_U32 pexData = 0;
+	MV_U32	localDev,localBus;
+
+	/* Parameter checking   */
+	if (PEX_DEFAULT_IF != pexIf)
+	{
+		if (pexIf >= mvCtrlPexMaxIfGet())
+		{
+			mvOsPrintf("mvPexConfigWrite: ERR. Invalid PEX interface %d\n",
+																		pexIf);
+			return MV_ERROR;
+		}
+	}
+
+	if (dev >= MAX_PEX_DEVICES)
+	{
+		mvOsPrintf("mvPexConfigWrite: ERR. device number illigal %d\n",dev);
+		return MV_BAD_PARAM;
+	}
+
+	if (func >= MAX_PEX_FUNCS)
+	{
+		mvOsPrintf("mvPexConfigWrite: ERR. function number illigal %d\n", func);
+		return MV_ERROR;
+	}
+
+	if (bus >= MAX_PEX_BUSSES)
+	{
+		mvOsPrintf("mvPexConfigWrite: ERR. bus number illigal %d\n", bus);
+		return MV_ERROR;
+	}
+
+
+
+	localDev = mvPexLocalDevNumGet(pexIf);
+	localBus = mvPexLocalBusNumGet(pexIf);
+
+
+	/* in PCI Express we have only one device number other than ourselves*/
+	/* and this number is the first number we encounter
+		else than the localDev that can be any valid dev number*/
+	/* pex spec define return on config read/write on any device */
+	if (bus == localBus)
+	{
+
+		if (localDev == 0)
+		{
+			/* if local dev is 0 then the first number we encounter
+			after 0 is 1 */
+			if ((dev != 1)&&(dev != localDev))
+			{
+				return MV_ERROR;
+			}
+
+		}
+		else
+		{
+			/* if local dev is not 0 then the first number we encounter
+			is 0 */
+
+			if ((dev != 0)&&(dev != localDev))
+			{
+				return MV_ERROR;
+			}
+		}
+
+
+	}
+
+	/* if we are not accessing ourselves , then check the link */
+	if ((dev != localDev) || (bus != localBus) )
+	{
+		/* workarround */
+		/* when no link return MV_ERROR */
+
+		pexData = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+		if ((pexData & PXSR_DL_DOWN))
+		{
+			return MV_ERROR;
+		}
+
+	}
+
+	pexData =0;
+
+	/* Creating PEX address to be passed */
+	pexData |= (bus << PXCAR_BUS_NUM_OFFS);
+	pexData |= (dev << PXCAR_DEVICE_NUM_OFFS);
+	pexData |= (func << PXCAR_FUNC_NUM_OFFS);
+	pexData |= (regOff & PXCAR_REG_NUM_MASK); /* lgacy register space */
+	/* extended register space */
+	pexData |=(((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >>
+				PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS);
+	pexData |= PXCAR_CONFIG_EN;
+
+	DB(mvOsPrintf("mvPexConfigWrite: If=%x bus=%x func=%x dev=%x regOff=%x data=%x \n",
+		   pexIf,bus,func,dev,regOff,data,pexData) );
+
+	/* Write the address to the PEX configuration address register */
+	MV_REG_WRITE(PEX_CFG_ADDR_REG(pexIf), pexData);
+
+	/* Clear CPU pipe. Important where CPU can perform OOO execution */
+	CPU_PIPE_FLUSH;
+
+	/* In order to let the PEX controller absorbed the address of the read 	*/
+	/* transaction we perform a validity check that the address was written */
+	if(pexData != MV_REG_READ(PEX_CFG_ADDR_REG(pexIf)))
+	{
+		return MV_ERROR;
+	}
+
+	/* Write the Data passed to the PEX Data register */
+	MV_REG_WRITE(PEX_CFG_DATA_REG(pexIf), data);
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexMasterEnable - Enable/disale PEX interface master transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PEX command status
+*       (offset 0x4) to set/reset bit 2. After this bit is set, the PEX
+*       master is allowed to gain ownership on the bus, otherwise it is
+*       incapable to do so.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexMasterEnable(MV_U32 pexIf, MV_BOOL enable)
+{
+	MV_U32 pexCommandStatus;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexMasterEnable: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_ERROR;
+	}
+
+	localBus = mvPexLocalBusNumGet(pexIf);
+	localDev = mvPexLocalDevNumGet(pexIf);
+
+	pexCommandStatus = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf,
+							    PEX_STATUS_AND_COMMAND));
+
+
+	if (MV_TRUE == enable)
+	{
+		pexCommandStatus |= PXSAC_MASTER_EN;
+	}
+	else
+	{
+		pexCommandStatus &= ~PXSAC_MASTER_EN;
+	}
+
+
+	MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf,PEX_STATUS_AND_COMMAND),
+				 pexCommandStatus);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexSlaveEnable - Enable/disale PEX interface slave transactions.
+*
+* DESCRIPTION:
+*       This function performs read modified write to PEX command status
+*       (offset 0x4) to set/reset bit 0 and 1. After those bits are set,
+*       the PEX slave is allowed to respond to PEX IO space access (bit 0)
+*       and PEX memory space access (bit 1).
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*       dev     - PEX device number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_BAD_PARAM for bad parameters ,MV_ERROR on error ! otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexSlaveEnable(MV_U32 pexIf, MV_U32 bus,MV_U32 dev, MV_BOOL enable)
+{
+	MV_U32 pexCommandStatus;
+	MV_U32 RegOffs;
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexSlaveEnable: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+	if (dev >= MAX_PEX_DEVICES)
+	{
+		mvOsPrintf("mvPexLocalDevNumSet: ERR. device number illigal %d\n", dev);
+		return MV_BAD_PARAM;
+
+	}
+
+
+	RegOffs = PEX_STATUS_AND_COMMAND;
+
+	pexCommandStatus = mvPexConfigRead(pexIf, bus, dev, 0, RegOffs);
+
+    if (MV_TRUE == enable)
+	{
+		pexCommandStatus |= (PXSAC_IO_EN | PXSAC_MEM_EN);
+	}
+	else
+	{
+		pexCommandStatus &= ~(PXSAC_IO_EN | PXSAC_MEM_EN);
+	}
+
+	mvPexConfigWrite(pexIf, bus, dev, 0, RegOffs, pexCommandStatus);
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvPexLocalBusNumSet - Set PEX interface local bus number.
+*
+* DESCRIPTION:
+*       This function sets given PEX interface its local bus number.
+*       Note: In case the PEX interface is PEX-X, the information is read-only.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*       busNum - Bus number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PEX interface is PEX-X.
+*		MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexLocalBusNumSet(MV_U32 pexIf, MV_U32 busNum)
+{
+	MV_U32 pexStatus;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexLocalBusNumSet: ERR. Invalid PEX interface %d\n",pexIf);
+		return MV_BAD_PARAM;
+	}
+	if (busNum >= MAX_PEX_BUSSES)
+	{
+		mvOsPrintf("mvPexLocalBusNumSet: ERR. bus number illigal %d\n", busNum);
+		return MV_ERROR;
+
+	}
+
+	localBus = mvPexLocalBusNumGet(pexIf);
+	localDev = mvPexLocalDevNumGet(pexIf);
+
+
+
+	pexStatus  = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+	pexStatus &= ~PXSR_PEX_BUS_NUM_MASK;
+
+	pexStatus |= (busNum << PXSR_PEX_BUS_NUM_OFFS) & PXSR_PEX_BUS_NUM_MASK;
+
+	MV_REG_WRITE(PEX_STATUS_REG(pexIf), pexStatus);
+
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexLocalBusNumGet - Get PEX interface local bus number.
+*
+* DESCRIPTION:
+*       This function gets the local bus number of a given PEX interface.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local bus number.0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPexLocalBusNumGet(MV_U32 pexIf)
+{
+	MV_U32 pexStatus;
+
+	/* Parameter checking   */
+	if (PEX_DEFAULT_IF != pexIf)
+	{
+		if (pexIf >= mvCtrlPexMaxIfGet())
+		{
+			mvOsPrintf("mvPexLocalBusNumGet: ERR. Invalid PEX interface %d\n",pexIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+
+	pexStatus  = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+	pexStatus &= PXSR_PEX_BUS_NUM_MASK;
+
+	return (pexStatus >> PXSR_PEX_BUS_NUM_OFFS);
+
+}
+
+
+/*******************************************************************************
+* mvPexLocalDevNumSet - Set PEX interface local device number.
+*
+* DESCRIPTION:
+*       This function sets given PEX interface its local device number.
+*       Note: In case the PEX interface is PEX-X, the information is read-only.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*       devNum - Device number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_NOT_ALLOWED in case PEX interface is PEX-X.
+*		MV_BAD_PARAM on bad parameters ,
+*       otherwise MV_OK
+*
+*******************************************************************************/
+MV_STATUS mvPexLocalDevNumSet(MV_U32 pexIf, MV_U32 devNum)
+{
+	MV_U32 pexStatus;
+	MV_U32 localBus;
+	MV_U32 localDev;
+
+	/* Parameter checking   */
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexLocalDevNumSet: ERR. Invalid PEX interface %d\n",pexIf);
+		return MV_BAD_PARAM;
+	}
+	if (devNum >= MAX_PEX_DEVICES)
+	{
+		mvOsPrintf("mvPexLocalDevNumSet: ERR. device number illigal %d\n",
+																	   devNum);
+		return MV_BAD_PARAM;
+
+	}
+
+	localBus = mvPexLocalBusNumGet(pexIf);
+	localDev = mvPexLocalDevNumGet(pexIf);
+
+
+	pexStatus  = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+	pexStatus &= ~PXSR_PEX_DEV_NUM_MASK;
+
+	pexStatus |= (devNum << PXSR_PEX_DEV_NUM_OFFS) & PXSR_PEX_DEV_NUM_MASK;
+
+	MV_REG_WRITE(PEX_STATUS_REG(pexIf), pexStatus);
+
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPexLocalDevNumGet - Get PEX interface local device number.
+*
+* DESCRIPTION:
+*       This function gets the local device number of a given PEX interface.
+*
+* INPUT:
+*       pexIf  - PEX interface number.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Local device number. 0xffffffff on Error
+*
+*******************************************************************************/
+MV_U32 mvPexLocalDevNumGet(MV_U32 pexIf)
+{
+	MV_U32 pexStatus;
+
+	/* Parameter checking   */
+
+	if (PEX_DEFAULT_IF != pexIf)
+	{
+		if (pexIf >= mvCtrlPexMaxIfGet())
+		{
+			mvOsPrintf("mvPexLocalDevNumGet: ERR. Invalid PEX interface %d\n",
+																		pexIf);
+			return 0xFFFFFFFF;
+		}
+	}
+
+	pexStatus  = MV_REG_READ(PEX_STATUS_REG(pexIf));
+
+	pexStatus &= PXSR_PEX_DEV_NUM_MASK;
+
+	return (pexStatus >> PXSR_PEX_DEV_NUM_OFFS);
+}
+
+MV_VOID mvPexPhyRegRead(MV_U32 pexIf, MV_U32 regOffset, MV_U16 *value)
+{
+
+	MV_U32 regAddr;
+	if (pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexPhyRegRead: ERR. Invalid PEX interface %d\n", pexIf);
+		return;
+	}
+	regAddr = (BIT31 | ((regOffset & 0x3fff) << 16));
+	MV_REG_WRITE(PEX_PHY_ACCESS_REG(pexIf), regAddr);
+	*value = MV_REG_READ(PEX_PHY_ACCESS_REG(pexIf));
+}
+
+
+MV_VOID mvPexPhyRegWrite(MV_U32 pexIf, MV_U32 regOffset, MV_U16 value)
+{
+
+	MV_U32 regAddr;
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexPhyRegWrite: ERR. Invalid PEX interface %d\n", pexIf);
+		return;
+	}
+	regAddr = (((regOffset & 0x3fff) << 16) | value);
+	MV_REG_WRITE(PEX_PHY_ACCESS_REG(pexIf), regAddr);
+}
+
+/*******************************************************************************
+* mvPexActiveStateLinkPMEnable
+*
+* DESCRIPTION:
+*       Enable Active Link State Power Management
+*
+* INPUT:
+*       pexIf   - PEX interface number.
+*	enable	- MV_TRUE to enable ASPM, MV_FALSE to disable.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_STATUS mvPexActiveStateLinkPMEnable(MV_U32 pexIf, MV_BOOL enable)
+{
+	MV_U32 reg;
+
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexActiveStateLinkPMEnable: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_ERROR;
+	}
+
+	reg = MV_REG_READ(PEX_PWR_MNG_EXT_REG(pexIf)) & ~PXPMER_L1_ASPM_EN_MASK;
+	if(enable == MV_TRUE)
+		reg |= PXPMER_L1_ASPM_EN_MASK;
+	MV_REG_WRITE(PEX_PWR_MNG_EXT_REG(pexIf), reg);
+
+	/* Enable / Disable L0/1 entry */
+	reg = MV_REG_READ(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG))
+			& ~PXLCSR_ASPM_CNT_MASK;
+	if(enable == MV_TRUE)
+		reg |= PXLCSR_ASPM_CNT_L0S_L1S_ENT_SUPP;
+	MV_REG_WRITE(PEX_CFG_DIRECT_ACCESS(pexIf, PEX_LINK_CTRL_STAT_REG), reg);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvPexForceX1
+*
+* DESCRIPTION:
+*       shut down lanes 1-3 if recognize that attached to an x1 end-point
+* INPUT:
+*       pexIf   - PEX interface number.
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       MV_OK on success , MV_ERROR otherwise
+*
+*******************************************************************************/
+MV_U32 mvPexForceX1(MV_U32 pexIf)
+{
+	MV_U32 regData = 0;
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexForceX1: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_BAD_PARAM;
+	}
+
+	regData  = MV_REG_READ(PEX_CTRL_REG(pexIf)) & ~(PXCR_CONF_LINK_MASK) ;
+	regData |= PXCR_CONF_LINK_X1;
+
+	MV_REG_WRITE(PEX_CTRL_REG(pexIf), regData);
+	return MV_OK;
+}
+
+MV_BOOL mvPexIsPowerUp(MV_U32 pexIf)
+{
+	if(pexIf >= mvCtrlPexMaxIfGet())
+	{
+		mvOsPrintf("mvPexIsPowerUp: ERR. Invalid PEX interface %d\n", pexIf);
+		return MV_FALSE;
+	}
+	return mvCtrlPwrClckGet(PEX_UNIT_ID, pexIf);
+}
+
+
+MV_VOID mvPexPowerDown(MV_U32 pexIf)
+{
+	if ( (mvCtrlModelGet() == MV_78XX0_DEV_ID) ||
+		(mvCtrlModelGet() == MV_76100_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78100_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78200_DEV_ID) )
+	{
+		mvCtrlPwrClckSet(PEX_UNIT_ID, pexIf, MV_FALSE);
+	}
+	else
+	{
+		MV_REG_WRITE((0x41B00 -(pexIf)*0x10000), 0x20800087);
+	}
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h
new file mode 100644
index 000000000000..38459626b077
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPex.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPEXH
+#define __INCPEXH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "pex/mvPexRegs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+
+
+/* NOTE not supported in this driver:*/
+
+
+/* defines */
+/* The number of supported PEX interfaces depend on Marvell controller 		*/
+/* device number. This device number ID is located on the PEX unit 			*/
+/* configuration header. This creates a loop where calling PEX 				*/
+/* configuration read/write	routine results a call to get PEX configuration */
+/* information etc. This macro defines a default PEX interface. This PEX	*/
+/* interface is sure to exist.												*/
+#define PEX_DEFAULT_IF	0
+
+
+/* typedefs */
+/* The Marvell controller supports both root complex and end point devices */
+/* This enumeration describes the PEX type.                                 */
+typedef enum _mvPexType
+{
+    MV_PEX_ROOT_COMPLEX,   	/* root complex device */
+    MV_PEX_END_POINT        /* end point device */
+}MV_PEX_TYPE;
+
+typedef enum _mvPexWidth
+{
+    MV_PEX_WITDH_X1 = 1,
+    MV_PEX_WITDH_X2,
+    MV_PEX_WITDH_X3,
+    MV_PEX_WITDH_X4,
+    MV_PEX_WITDH_INVALID
+}MV_PEX_WIDTH;
+
+/* PEX Bar attributes */
+typedef struct _mvPexMode
+{
+	MV_PEX_TYPE 	pexType;
+	MV_PEX_WIDTH    pexWidth;
+	MV_BOOL         pexLinkUp;
+}MV_PEX_MODE;
+
+
+
+/* Global Functions prototypes */
+/* mvPexInit - Initialize PEX interfaces*/
+MV_STATUS mvPexHalInit(MV_U32 pexIf, MV_PEX_TYPE pexType);
+
+/* mvPexModeGet - Get Pex If mode */
+MV_U32 mvPexModeGet(MV_U32 pexIf,MV_PEX_MODE *pexMode);
+
+/* mvPexConfigRead - Read from configuration space */
+MV_U32 mvPexConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+						MV_U32 func,MV_U32 regOff);
+
+/* mvPexConfigWrite - Write to configuration space */
+MV_STATUS mvPexConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+/* mvPexMasterEnable - Enable/disale PEX interface master transactions.*/
+MV_STATUS mvPexMasterEnable(MV_U32 pexIf, MV_BOOL enable);
+
+/* mvPexSlaveEnable - Enable/disale PEX interface slave transactions.*/
+MV_STATUS mvPexSlaveEnable(MV_U32 pexIf, MV_U32 bus,MV_U32 dev, MV_BOOL enable);
+
+/* mvPexLocalBusNumSet - Set PEX interface local bus number.*/
+MV_STATUS mvPexLocalBusNumSet(MV_U32 pexIf, MV_U32 busNum);
+
+/* mvPexLocalBusNumGet - Get PEX interface local bus number.*/
+MV_U32 mvPexLocalBusNumGet(MV_U32 pexIf);
+
+/* mvPexLocalDevNumSet - Set PEX interface local device number.*/
+MV_STATUS mvPexLocalDevNumSet(MV_U32 pexIf, MV_U32 devNum);
+
+/* mvPexLocalDevNumGet - Get PEX interface local device number.*/
+MV_U32 mvPexLocalDevNumGet(MV_U32 pexIf);
+/* mvPexForceX1 - Force PEX interface to X1 mode. */
+MV_U32 mvPexForceX1(MV_U32 pexIf);
+
+/* mvPexIsPowerUp - Is PEX interface Power up? */
+MV_BOOL mvPexIsPowerUp(MV_U32 pexIf);
+
+/* mvPexPowerDown - Power Down */
+MV_VOID mvPexPowerDown(MV_U32 pexIf);
+
+/* mvPexPowerUp - Power Up */
+MV_VOID mvPexPowerUp(MV_U32 pexIf);
+
+/* mvPexPhyRegRead - Pex phy read */
+MV_VOID mvPexPhyRegRead(MV_U32 pexIf, MV_U32 regOffset, MV_U16 *value);
+
+/* mvPexPhyRegWrite - Pex phy write */
+MV_VOID mvPexPhyRegWrite(MV_U32 pexIf, MV_U32 regOffset, MV_U16 value);
+
+MV_STATUS mvPexActiveStateLinkPMEnable(MV_U32 pexIf, MV_BOOL enable);
+
+#endif /* #ifndef __INCPEXH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h
new file mode 100644
index 000000000000..330291736023
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvPexRegs.h
@@ -0,0 +1,749 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCPEXREGSH
+#define __INCPEXREGSH
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines */
+#define MAX_PEX_DEVICES         32
+#define MAX_PEX_FUNCS           8
+#define MAX_PEX_BUSSES          256
+
+
+
+/*********************************************************/
+/* PCI Express Configuration Cycles Generation Registers */
+/*********************************************************/
+
+#define PEX_CFG_ADDR_REG(pexIf)		((PEX_IF_BASE(pexIf)) + 0x18F8)
+#define PEX_CFG_DATA_REG(pexIf)		((PEX_IF_BASE(pexIf)) + 0x18FC)
+#define PEX_PHY_ACCESS_REG(pexIf)	((PEX_IF_BASE(pexIf)) + 0x1B00)
+/* PCI Express Configuration Address Register */
+/* PEX_CFG_ADDR_REG (PXCAR)*/
+
+#define PXCAR_REG_NUM_OFFS			2
+#define PXCAR_REG_NUM_MAX			0x3F
+#define PXCAR_REG_NUM_MASK			(PXCAR_REG_NUM_MAX << PXCAR_REG_NUM_OFFS)
+#define PXCAR_FUNC_NUM_OFFS			8
+#define PXCAR_FUNC_NUM_MAX			0x7
+#define PXCAR_FUNC_NUM_MASK			(PXCAR_FUNC_NUM_MAX << PXCAR_FUNC_NUM_OFFS)
+#define PXCAR_DEVICE_NUM_OFFS		11
+#define PXCAR_DEVICE_NUM_MAX		0x1F
+#define PXCAR_DEVICE_NUM_MASK		(PXCAR_DEVICE_NUM_MAX << PXCAR_DEVICE_NUM_OFFS)
+#define PXCAR_BUS_NUM_OFFS			16
+#define PXCAR_BUS_NUM_MAX			0xFF
+#define PXCAR_BUS_NUM_MASK			(PXCAR_BUS_NUM_MAX << PXCAR_BUS_NUM_OFFS)
+#define PXCAR_EXT_REG_NUM_OFFS		24
+#define PXCAR_EXT_REG_NUM_MAX		0xF
+
+/* in pci express register address is now the legacy register address (8 bits)
+with the new extended register address (more 4 bits) , below is the mask of
+the upper 4 bits of the full register address */
+
+#define PXCAR_REAL_EXT_REG_NUM_OFFS	8
+#define PXCAR_EXT_REG_NUM_MASK		(PXCAR_EXT_REG_NUM_MAX << PXCAR_EXT_REG_NUM_OFFS)
+#define PXCAR_CONFIG_EN				BIT31
+
+#define PXCAR_REAL_EXT_REG_NUM_OFFS     8
+#define PXCAR_REAL_EXT_REG_NUM_MASK     (0xF << PXCAR_REAL_EXT_REG_NUM_OFFS)
+
+/* The traditional PCI spec defined 6-bit field to describe register offset.*/
+/* The new PCI Express extend the register offset by an extra 4-bits.       */
+/* The below macro assign 10-bit register offset into the apprpreate        */
+/* fields in the CFG_ADDR_REG                                               */
+#define PXCAR_REG_OFFS_SET(regOffs)                         \
+ ( (regOff & PXCAR_REG_NUM_MASK) | \
+   ( ((regOff & PXCAR_REAL_EXT_REG_NUM_MASK) >> PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS) )
+
+/***********************************/
+/* PCI Express Interrupt registers */
+/***********************************/
+#define PEX_CAUSE_REG(pexIf)		((PEX_IF_BASE(pexIf)) + 0x1900)
+#define PEX_MASK_REG(pexIf)		((PEX_IF_BASE(pexIf)) + 0x1910)
+
+#define PXICR_TX_REQ_IN_DLDOWN_ERR  BIT0  /* Transmit request while field   */
+                                          /* <DLDown> of the PCI Express    */
+/* PCI Express Interrupt Cause */
+/* PEX_INT_CAUSE_REG  (PXICR)*/
+/* PEX_INT_MASK_REG*/
+/*
+NOTE:All bits except bits[27:24] are Read/Write Clear only. A cause bit sets
+upon an error event occurrence. A write of 0 clears the bit. A write of 1 has
+no affect. Bits[24:27} are set and cleared upon reception of interrupt
+emulation messages.
+
+Mask bit per cause bit. If a bit is set to 1, the corresponding event is
+enabled. Mask does not affect setting of the Interrupt Cause register bits;
+it only affects the assertion of the interrupt .*/
+
+
+#define PXICR_MDIS_CAUSE			BIT1  /* Attempt to generate PCI transaction
+                                             while master is disabled */
+#define PXICR_ERR_WRTO_REG_CAUSE	BIT3  /* Erroneous write attempt to
+                                             PCI Express internal register*/
+#define PXICR_HIT_DFLT_WIN_ERR		BIT4  /* Hit Default Window Error */
+#define PXICR_RX_RAM_PAR_ERR        BIT6  /* Rx RAM Parity Error */
+#define PXICR_TX_RAM_PAR_ERR        BIT7  /* Tx RAM Parity Error */
+#define PXICR_COR_ERR_DET			BIT8  /* Correctable Error Detected*/
+#define PXICR_NF_ERR_DET			BIT9  /* Non-Fatal Error Detected*/
+#define PXICR_FERR_DET				BIT10 /* Fatal Error Detected*/
+#define PXICR_DSTATE_CHANGE			BIT11 /* Dstate Change Indication*/
+#define PXICR_BIST					BIT12 /* PCI-Express BIST activated*/
+#define PXICR_FLW_CTRL_PROT     BIT14 /* Flow Control Protocol Error */
+
+#define PXICR_RCV_UR_CA_ERR         BIT15 /* Received UR or CA status. */
+#define PXICR_RCV_ERR_FATAL			BIT16 /* Received ERR_FATAL message.*/
+#define PXICR_RCV_ERR_NON_FATAL		BIT17 /* Received ERR_NONFATAL message*/
+#define PXICR_RCV_ERR_COR			BIT18 /* Received ERR_COR message.*/
+#define PXICR_RCV_CRS				BIT19 /* Received CRS completion status*/
+#define PXICR_SLV_HOT_RESET			BIT20 /* Received Hot Reset Indication*/
+#define PXICR_SLV_DIS_LINK			BIT21 /* Slave Disable Link Indication*/
+#define PXICR_SLV_LB				BIT22 /* Slave Loopback Indication*/
+#define PXICR_LINK_FAIL				BIT23 /* Link Failure indication.*/
+#define PXICR_RCV_INTA				BIT24 /* IntA status.*/
+#define PXICR_RCV_INTB				BIT25 /* IntB status.*/
+#define PXICR_RCV_INTC				BIT26 /* IntC status.*/
+#define PXICR_RCV_INTD				BIT27 /* IntD status.*/
+#define PXICR_RCV_PM_PME            BIT28 /* Received PM_PME message. */
+
+
+/********************************************/
+/* PCI Express Control and Status Registers */
+/********************************************/
+#define PEX_CTRL_REG(pexIf)				((PEX_IF_BASE(pexIf)) + 0x1A00)
+#define PEX_STATUS_REG(pexIf)				((PEX_IF_BASE(pexIf)) + 0x1A04)
+#define PEX_COMPLT_TMEOUT_REG(pexIf)			((PEX_IF_BASE(pexIf)) + 0x1A10)
+#define PEX_PWR_MNG_EXT_REG(pexIf)			((PEX_IF_BASE(pexIf)) + 0x1A18)
+#define PEX_FLOW_CTRL_REG(pexIf)			((PEX_IF_BASE(pexIf)) + 0x1A20)
+#define PEX_ACK_TMR_4X_REG(pexIf)			((PEX_IF_BASE(pexIf)) + 0x1A30)
+#define PEX_ACK_TMR_1X_REG(pexIf)			((PEX_IF_BASE(pexIf)) + 0x1A40)
+#define PEX_TL_CTRL_REG(pexIf)				((PEX_IF_BASE(pexIf)) + 0x1AB0)
+
+
+#define PEX_RAM_PARITY_CTRL_REG(pexIf)  		((PEX_IF_BASE(pexIf)) + 0x1A50)
+/* PCI Express Control Register */
+/* PEX_CTRL_REG (PXCR) */
+
+#define PXCR_CONF_LINK_OFFS             0
+#define PXCR_CONF_LINK_MASK             (1 << PXCR_CONF_LINK_OFFS)
+#define PXCR_CONF_LINK_X4               (0 << PXCR_CONF_LINK_OFFS)
+#define PXCR_CONF_LINK_X1               (1 << PXCR_CONF_LINK_OFFS)
+#define PXCR_DEV_TYPE_CTRL_OFFS			1     /*PCI ExpressDevice Type Control*/
+#define PXCR_DEV_TYPE_CTRL_MASK			BIT1
+#define PXCR_DEV_TYPE_CTRL_CMPLX		(1 << PXCR_DEV_TYPE_CTRL_OFFS)
+#define PXCR_DEV_TYPE_CTRL_POINT		(0 << PXCR_DEV_TYPE_CTRL_OFFS)
+#define PXCR_CFG_MAP_TO_MEM_EN			BIT2  /* Configuration Header Mapping
+											   to Memory Space Enable         */
+
+#define PXCR_CFG_MAP_TO_MEM_EN			BIT2 /* Configuration Header Mapping
+											   to Memory Space Enable*/
+
+#define PXCR_RSRV1_OFFS					5
+#define PXCR_RSRV1_MASK					(0x7 << PXCR_RSRV1_OFFS)
+#define PXCR_RSRV1_VAL					(0x0 << PXCR_RSRV1_OFFS)
+
+#define PXCR_CONF_MAX_OUTSTND_OFFS		8 /*Maximum outstanding NP requests as a master*/
+#define PXCR_CONF_MAX_OUTSTND_MASK		(0x3 << PXCR_CONF_MAX_OUTSTND_OFFS)
+
+
+#define PXCR_CONF_NFTS_OFFS				16 /*number of FTS Ordered-Sets*/
+#define PXCR_CONF_NFTS_MASK				(0xff << PXCR_CONF_NFTS_OFFS)
+
+#define PXCR_CONF_MSTR_HOT_RESET		BIT24 /*Master Hot-Reset.*/
+#define PXCR_CONF_MSTR_LB				BIT26 /* Master Loopback */
+#define PXCR_CONF_MSTR_DIS_SCRMB		BIT27 /* Master Disable Scrambling*/
+#define PXCR_CONF_DIRECT_DIS_SCRMB		BIT28 /* Direct Disable Scrambling*/
+
+/* PCI Express Status Register */
+/* PEX_STATUS_REG (PXSR) */
+
+#define PXSR_DL_DOWN					BIT0 /* DL_Down indication.*/
+
+#define PXSR_PEX_BUS_NUM_OFFS			8 /* Bus Number Indication */
+#define PXSR_PEX_BUS_NUM_MASK			(0xff << PXSR_PEX_BUS_NUM_OFFS)
+
+#define PXSR_PEX_DEV_NUM_OFFS			16 /* Device Number Indication */
+#define PXSR_PEX_DEV_NUM_MASK			(0x1f << PXSR_PEX_DEV_NUM_OFFS)
+
+#define PXSR_PEX_SLV_HOT_RESET			BIT24 /* Slave Hot Reset Indication*/
+#define PXSR_PEX_SLV_DIS_LINK			BIT25 /* Slave Disable Link Indication*/
+#define PXSR_PEX_SLV_LB					BIT26 /* Slave Loopback Indication*/
+#define PXSR_PEX_SLV_DIS_SCRMB			BIT27 /* Slave Disable Scrambling Indication*/
+
+
+/* PCI Express Completion Timeout Register */
+/* PEX_COMPLT_TMEOUT_REG (PXCTR)*/
+
+#define PXCTR_CMP_TO_THRSHLD_OFFS		0 /* Completion Timeout Threshold */
+#define PXCTR_CMP_TO_THRSHLD_MASK		(0xffff << PXCTR_CMP_TO_THRSHLD_OFFS)
+
+/* PCI Express Power Management Extended Register */
+/* PEX_PWR_MNG_EXT_REG (PXPMER) */
+
+#define PXPMER_L1_ASPM_EN_OFFS			1
+#define PXPMER_L1_ASPM_EN_MASK			(0x1 << PXPMER_L1_ASPM_EN_OFFS)
+
+/* PCI Express Flow Control Register */
+/* PEX_FLOW_CTRL_REG (PXFCR)*/
+
+#define PXFCR_PH_INIT_FC_OFFS			0 /*Posted Headers Flow Control Credit
+										    Initial Value.*/
+#define PXFCR_PH_INIT_FC_MASK			(0xff << PXFCR_PH_INIT_FC_OFFS)
+
+
+#define PXFCR_NPH_INIT_FC_OFFS			8 /* Classified Non-Posted Headers
+											 Flow Control Credit Initial Value*/
+#define PXFCR_NPH_INIT_FC_MASK			(0xff << PXFCR_NPH_INIT_FC_OFFS)
+
+#define PXFCR_CH_INIT_FC_OFFS			16 /* Completion Headers Flow Control
+											  Credit Initial Value Infinite*/
+
+#define PXFCR_CH_INIT_FC_MASK			(0xff << PXFCR_CH_INIT_FC_OFFS)
+
+#define PXFCR_FC_UPDATE_TO_OFFS			24 /* Flow Control Update Timeout */
+#define PXFCR_FC_UPDATE_TO_MASK			(0xff << PXFCR_FC_UPDATE_TO_OFFS)
+
+/* PCI Express Acknowledge Timers (4X) Register */
+/* PEX_ACK_TMR_4X_REG (PXAT4R) */
+#define PXAT1R_ACK_LAT_TOX4_OFFS		0  /* Ack Latency Timer Timeout Value */
+#define PXAT1R_ACK_LAT_TOX4_MASK		(0xffff << PXAT4R_ACK_LAT_TOX1_OFFS)
+#define PXAT1R_ACK_RPLY_TOX4_OFFS		16 /* Ack Replay Timer Timeout Value  */
+#define PXAT1R_ACK_RPLY_TOX4_MASK		(0xffff << PXAT1R_ACK_RPLY_TOX1_OFFS)
+
+/* PCI Express Acknowledge Timers (1X) Register */
+/* PEX_ACK_TMR_1X_REG (PXAT1R) */
+
+#define PXAT1R_ACK_LAT_TOX1_OFFS		0 /* Acknowledge Latency Timer Timeout
+										     Value for 1X Link*/
+#define PXAT1R_ACK_LAT_TOX1_MASK		(0xffff << PXAT1R_ACK_LAT_TOX1_OFFS)
+
+#define PXAT1R_ACK_RPLY_TOX1_OFFS		16 /* Acknowledge Replay Timer Timeout
+											  Value for 1X*/
+#define PXAT1R_ACK_RPLY_TOX1_MASK		(0xffff << PXAT1R_ACK_RPLY_TOX1_OFFS)
+
+
+/* PCI Express TL Control Register */
+/* PEX_TL_CTRL_REG (PXTCR) */
+
+#define PXTCR_TX_CMP_BUFF_NO_OFFS		8 /*Number of completion buffers in Tx*/
+#define PXTCR_TX_CMP_BUFF_NO_MASK		(0xf << PXTCR_TX_CMP_BUFF_NO_OFFS)
+
+/* PCI Express Debug MAC Control Register */
+/* PEX_DEBUG_MAC_CTRL_REG (PXDMCR) */
+
+#define PXDMCR_LINKUP					BIT4
+
+
+
+/**********************************************/
+/* PCI Express Configuration Header Registers */
+/**********************************************/
+#define PEX_CFG_DIRECT_ACCESS(pexIf,cfgReg)	((PEX_IF_BASE(pexIf)) + (cfgReg))
+
+#define PEX_DEVICE_AND_VENDOR_ID					0x000
+#define PEX_STATUS_AND_COMMAND						0x004
+#define PEX_CLASS_CODE_AND_REVISION_ID			    0x008
+#define PEX_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE		0x00C
+#define PEX_MEMORY_BAR_BASE_ADDR(barNum)		 	(0x010 + ((barNum) << 2))
+#define PEX_MV_BAR_BASE(barNum)						(0x010 + (barNum) * 8)
+#define PEX_MV_BAR_BASE_HIGH(barNum)				(0x014 + (barNum) * 8)
+#define PEX_BAR0_INTER_REG							0x010
+#define PEX_BAR0_INTER_REG_HIGH						0x014
+#define PEX_BAR1_REG								0x018
+#define PEX_BAR1_REG_HIGH							0x01C
+#define PEX_BAR2_REG								0x020
+#define PEX_BAR2_REG_HIGH							0x024
+
+#define PEX_SUBSYS_ID_AND_SUBSYS_VENDOR_ID			0x02C
+#define PEX_EXPANSION_ROM_BASE_ADDR_REG				0x030
+#define PEX_CAPABILTY_LIST_POINTER					0x034
+#define PEX_INTERRUPT_PIN_AND_LINE					0x03C
+
+/* capability list */
+#define PEX_POWER_MNG_CAPABILITY		            0x040
+#define PEX_POWER_MNG_STATUS_CONTROL		        0x044
+
+#define PEX_MSI_MESSAGE_CONTROL						0x050
+#define PEX_MSI_MESSAGE_ADDR		                0x054
+#define PEX_MSI_MESSAGE_HIGH_ADDR		            0x058
+#define PEX_MSI_MESSAGE_DATA		                0x05C
+
+#define PEX_CAPABILITY_REG							0x60
+#define PEX_DEV_CAPABILITY_REG						0x64
+#define PEX_DEV_CTRL_STAT_REG						0x68
+#define PEX_LINK_CAPABILITY_REG						0x6C
+#define PEX_LINK_CTRL_STAT_REG						0x70
+
+#define PEX_ADV_ERR_RPRT_HDR_TRGT_REG				0x100
+#define PEX_UNCORRECT_ERR_STAT_REG					0x104
+#define PEX_UNCORRECT_ERR_MASK_REG					0x108
+#define PEX_UNCORRECT_ERR_SERVITY_REG				0x10C
+#define PEX_CORRECT_ERR_STAT_REG					0x110
+#define PEX_CORRECT_ERR_MASK_REG					0x114
+#define PEX_ADV_ERR_CAPABILITY_CTRL_REG				0x118
+#define PEX_HDR_LOG_FIRST_DWORD_REG					0x11C
+#define PEX_HDR_LOG_SECOND_DWORD_REG				0x120
+#define PEX_HDR_LOG_THIRD_DWORD_REG					0x124
+#define PEX_HDR_LOG_FOURTH_DWORD_REG				0x128
+
+
+
+/* PCI Express Device and Vendor ID Register*/
+/*PEX_DEVICE_AND_VENDOR_ID (PXDAVI)*/
+
+#define PXDAVI_VEN_ID_OFFS			0 	/* Vendor ID */
+#define PXDAVI_VEN_ID_MASK			(0xffff << PXDAVI_VEN_ID_OFFS)
+
+#define PXDAVI_DEV_ID_OFFS			16	/* Device ID */
+#define PXDAVI_DEV_ID_MASK  		(0xffff << PXDAVI_DEV_ID_OFFS)
+
+
+/* PCI Express Command and Status Register*/
+/*PEX_STATUS_AND_COMMAND (PXSAC)*/
+
+#define PXSAC_IO_EN			BIT0 	/* IO Enable 							  */
+#define PXSAC_MEM_EN		BIT1	/* Memory Enable 						  */
+#define PXSAC_MASTER_EN		BIT2	/* Master Enable 						  */
+#define PXSAC_PERR_EN		BIT6	/* Parity Errors Respond Enable 		  */
+#define PXSAC_SERR_EN		BIT8	/* Ability to assert SERR# line			  */
+#define PXSAC_INT_DIS		BIT10   /* Interrupt Disable 					  */
+#define PXSAC_INT_STAT		BIT19   /* Interrupt Status 			*/
+#define PXSAC_CAP_LIST		BIT20	/* Capability List Support 				  */
+#define PXSAC_MAS_DATA_PERR	BIT24   /* Master Data Parity Error				  */
+#define PXSAC_SLAVE_TABORT	BIT27	/* Signalled Target Abort 	*/
+#define PXSAC_RT_ABORT		BIT28	/* Recieved Target Abort 	*/
+#define PXSAC_MABORT			BIT29	/* Recieved Master Abort 	*/
+#define PXSAC_SYSERR			BIT30	/* Signalled system error 	*/
+#define PXSAC_DET_PARERR		BIT31	/* Detect Parity Error 		*/
+
+
+/* PCI Express Class Code and Revision ID Register*/
+/*PEX_CLASS_CODE_AND_REVISION_ID (PXCCARI)*/
+
+#define PXCCARI_REVID_OFFS		0		/* Revision ID */
+#define PXCCARI_REVID_MASK		(0xff << PXCCARI_REVID_OFFS)
+
+#define PXCCARI_FULL_CLASS_OFFS	8		/* Full Class Code */
+#define PXCCARI_FULL_CLASS_MASK	(0xffffff << PXCCARI_FULL_CLASS_OFFS)
+
+#define PXCCARI_PROGIF_OFFS		8		/* Prog .I/F*/
+#define PXCCARI_PROGIF_MASK		(0xff << PXCCARI_PROGIF_OFFS)
+
+#define PXCCARI_SUB_CLASS_OFFS	16		/* Sub Class*/
+#define PXCCARI_SUB_CLASS_MASK	(0xff << PXCCARI_SUB_CLASS_OFFS)
+
+#define PXCCARI_BASE_CLASS_OFFS	24		/* Base Class*/
+#define PXCCARI_BASE_CLASS_MASK	(0xff << PXCCARI_BASE_CLASS_OFFS)
+
+
+/* PCI Express BIST, Header Type and Cache Line Size Register*/
+/*PEX_BIST_HDR_TYPE_LAT_TMR_CACHE_LINE (PXBHTLTCL)*/
+
+#define PXBHTLTCL_CACHELINE_OFFS		0	/* Specifies the cache line size */
+#define PXBHTLTCL_CACHELINE_MASK		(0xff << PXBHTLTCL_CACHELINE_OFFS)
+
+#define PXBHTLTCL_HEADTYPE_FULL_OFFS	16	/* Full Header Type */
+#define PXBHTLTCL_HEADTYPE_FULL_MASK	(0xff << PXBHTLTCL_HEADTYPE_FULL_OFFS)
+
+#define PXBHTLTCL_MULTI_FUNC			BIT23	/* Multi/Single function */
+
+#define PXBHTLTCL_HEADER_OFFS			16		/* Header type */
+#define PXBHTLTCL_HEADER_MASK			(0x7f << PXBHTLTCL_HEADER_OFFS)
+#define PXBHTLTCL_HEADER_STANDARD		(0x0 << PXBHTLTCL_HEADER_OFFS)
+#define PXBHTLTCL_HEADER_PCI2PCI_BRIDGE	(0x1 << PXBHTLTCL_HEADER_OFFS)
+
+
+#define PXBHTLTCL_BISTCOMP_OFFS		24	/* BIST Completion Code */
+#define PXBHTLTCL_BISTCOMP_MASK		(0xf << PXBHTLTCL_BISTCOMP_OFFS)
+
+#define PXBHTLTCL_BISTACT			BIT30	/* BIST Activate bit */
+#define PXBHTLTCL_BISTCAP			BIT31	/* BIST Capable Bit */
+#define PXBHTLTCL_BISTCAP_OFFS		31
+#define PXBHTLTCL_BISTCAP_MASK		BIT31
+#define PXBHTLTCL_BISTCAP_VAL		0
+
+
+/* PCI Express Subsystem Device and Vendor ID */
+/*PEX_SUBSYS_ID_AND_SUBSYS_VENDOR_ID (PXSIASVI)*/
+
+#define PXSIASVI_VENID_OFFS	0	/* Subsystem Manufacturer Vendor ID Number */
+#define PXSIASVI_VENID_MASK	(0xffff << PXSIASVI_VENID_OFFS)
+
+#define PXSIASVI_DEVID_OFFS	16	/* Subsystem Device ID Number */
+#define PXSIASVI_DEVID_MASK	(0xffff << PXSIASVI_DEVID_OFFS)
+
+
+/* PCI Express Capability List Pointer Register*/
+/*PEX_CAPABILTY_LIST_POINTER (PXCLP)*/
+
+#define PXCLP_CAPPTR_OFFS	0		/* Capability List Pointer */
+#define PXCLP_CAPPTR_MASK	(0xff << PXCLP_CAPPTR_OFFS)
+
+/* PCI Express Interrupt Pin and Line Register */
+/*PEX_INTERRUPT_PIN_AND_LINE (PXIPAL)*/
+
+#define PXIPAL_INTLINE_OFFS	0	/* Interrupt line (IRQ) */
+#define PXIPAL_INTLINE_MASK	(0xff << PXIPAL_INTLINE_OFFS)
+
+#define PXIPAL_INTPIN_OFFS	8	/* interrupt pin (A,B,C,D) */
+#define PXIPAL_INTPIN_MASK	(0xff << PXIPAL_INTPIN_OFFS)
+
+
+/* PCI Express Power Management Capability Header Register*/
+/*PEX_POWER_MNG_CAPABILITY (PXPMC)*/
+
+#define PXPMC_CAP_ID_OFFS		0 /* Capability ID */
+#define PXPMC_CAP_ID_MASK		(0xff << PXPMC_CAP_ID_OFFS)
+
+#define PXPMC_NEXT_PTR_OFFS		8 /* Next Item Pointer */
+#define PXPMC_NEXT_PTR_MASK		(0xff << PXPMC_NEXT_PTR_OFFS)
+
+#define PXPMC_PMC_VER_OFFS		16 /* PCI Power Management Capability Version*/
+#define PXPMC_PMC_VER_MASK		(0x7 << PXPMC_PMC_VER_OFFS)
+
+#define PXPMC_DSI 				BIT21/* Device Specific Initialization */
+
+#define PXPMC_AUX_CUR_OFFS		22 /* Auxiliary Current Requirements */
+#define PXPMC_AUX_CUR_MASK		(0x7 << PXPMC_AUX_CUR_OFFS)
+
+#define PXPMC_D1_SUP 			BIT25 /* D1 Power Management support*/
+
+#define PXPMC_D2_SUP 			BIT26 /* D2 Power Management support*/
+
+#define PXPMC_PME_SUP_OFFS		27 /* PM Event generation support*/
+#define PXPMC_PME_SUP_MASK		(0x1f << PXPMC_PME_SUP_OFFS)
+
+/* PCI Express Power Management Control and Status Register*/
+/*PEX_POWER_MNG_STATUS_CONTROL (PXPMSC)*/
+
+#define PXPMSC_PM_STATE_OFFS	0	/* Power State */
+#define PXPMSC_PM_STATE_MASK	(0x3 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D0		(0x0 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D1		(0x1 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D2		(0x2 << PXPMSC_PM_STATE_OFFS)
+#define PXPMSC_PM_STATE_D3		(0x3 << PXPMSC_PM_STATE_OFFS)
+
+#define PXPMSC_PME_EN			BIT8/* PM_PME Message Generation Enable */
+
+#define PXPMSC_PM_DATA_SEL_OFFS	9	/* Data Select*/
+#define PXPMSC_PM_DATA_SEL_MASK	(0xf << PXPMSC_PM_DATA_SEL_OFFS)
+
+#define PXPMSC_PM_DATA_SCALE_OFFS	13	/* Data Scale */
+#define PXPMSC_PM_DATA_SCALE_MASK	(0x3 << PXPMSC_PM_DATA_SCALE_OFFS)
+
+#define PXPMSC_PME_STAT				BIT15/* PME Status */
+
+#define PXPMSC_PM_DATA_OFFS			24		/* State Data */
+#define PXPMSC_PM_DATA_MASK			(0xff << PXPMSC_PM_DATA_OFFS)
+
+
+/* PCI Express MSI Message Control Register*/
+/*PEX_MSI_MESSAGE_CONTROL (PXMMC)*/
+
+#define PXMMC_CAP_ID_OFFS			0 /* Capability ID */
+#define PXMMC_CAP_ID_MASK			(0xff << PXMMC_CAP_ID_OFFS)
+
+#define PXMMC_NEXT_PTR_OFFS			8 /* Next Item Pointer */
+#define PXMMC_NEXT_PTR_MASK			(0xff << PXMMC_NEXT_PTR_OFFS)
+
+#define PXMMC_MSI_EN				BIT18 /* MSI Enable */
+
+#define PXMMC_MULTI_CAP_OFFS		17 /* Multiple Message Capable */
+#define PXMMC_MULTI_CAP_MASK		(0x7 << PXMMC_MULTI_CAP_OFFS)
+
+#define PXMMC_MULTI_EN_OFFS			20  /* Multiple Messages Enable */
+#define PXMMC_MULTI_EN_MASK			(0x7 << PXMMC_MULTI_EN_OFFS)
+
+#define PXMMC_ADDR64				BIT23	/* 64-bit Addressing Capable */
+
+
+/* PCI Express MSI Message Address Register*/
+/*PEX_MSI_MESSAGE_ADDR (PXMMA)*/
+
+#define PXMMA_MSI_ADDR_OFFS			2 /* Message Address  corresponds to
+										Address[31:2] of the MSI MWr TLP*/
+#define PXMMA_MSI_ADDR_MASK			(0x3fffffff << PXMMA_MSI_ADDR_OFFS)
+
+
+/* PCI Express MSI Message Address (High) Register */
+/*PEX_MSI_MESSAGE_HIGH_ADDR (PXMMHA)*/
+
+#define PXMMA_MSI_ADDR_H_OFFS		0 /* Message Upper Address corresponds to
+											Address[63:32] of the MSI MWr TLP*/
+#define PXMMA_MSI_ADDR_H_MASK		(0xffffffff << PXMMA_MSI_ADDR_H_OFFS )
+
+
+/* PCI Express MSI Message Data Register*/
+/*PEX_MSI_MESSAGE_DATA (PXMMD)*/
+
+#define PXMMD_MSI_DATA_OFFS 		0 /* Message Data */
+#define PXMMD_MSI_DATA_MASK 		(0xffff << PXMMD_MSI_DATA_OFFS )
+
+
+/* PCI Express Capability Register*/
+/*PEX_CAPABILITY_REG (PXCR)*/
+
+#define PXCR_CAP_ID_OFFS			0	/* Capability ID*/
+#define PXCR_CAP_ID_MASK			(0xff << PXCR_CAP_ID_OFFS)
+
+#define PXCR_NEXT_PTR_OFFS			8 /* Next Item Pointer*/
+#define PXCR_NEXT_PTR_MASK			(0xff << PXCR_NEXT_PTR_OFFS)
+
+#define PXCR_CAP_VER_OFFS			16 /* Capability Version*/
+#define PXCR_CAP_VER_MASK			(0xf << PXCR_CAP_VER_OFFS)
+
+#define PXCR_DEV_TYPE_OFFS			20 /*  Device/Port Type*/
+#define PXCR_DEV_TYPE_MASK			(0xf << PXCR_DEV_TYPE_OFFS)
+
+#define PXCR_SLOT_IMP 				BIT24 /* Slot Implemented*/
+
+#define PXCR_INT_MSG_NUM_OFFS		25 /* Interrupt Message Number*/
+#define PXCR_INT_MSG_NUM_MASK		(0x1f << PXCR_INT_MSG_NUM_OFFS)
+
+
+/* PCI Express Device Capabilities Register */
+/*PEX_DEV_CAPABILITY_REG (PXDCR)*/
+
+#define PXDCR_MAX_PLD_SIZE_SUP_OFFS			0 /* Maximum Payload Size Supported*/
+#define PXDCR_MAX_PLD_SIZE_SUP_MASK			(0x7 << PXDCR_MAX_PLD_SIZE_SUP_OFFS)
+
+#define PXDCR_EP_L0S_ACC_LAT_OFFS			6/* Endpoint L0s Acceptable Latency*/
+#define PXDCR_EP_L0S_ACC_LAT_MASK			(0x7 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_64NS_LESS		(0x0 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_64NS_128NS		(0x1 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_128NS_256NS	(0x2 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_256NS_512NS	(0x3 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_512NS_1US		(0x4 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_1US_2US		(0x5 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_2US_4US		(0x6 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+#define PXDCR_EP_L0S_ACC_LAT_4US_MORE		(0x7 << PXDCR_EP_L0S_ACC_LAT_OFFS)
+
+#define PXDCR_EP_L1_ACC_LAT_OFFS 			9 /* Endpoint L1 Acceptable Latency*/
+#define PXDCR_EP_L1_ACC_LAT_MASK			(0x7 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_64NS_LESS       (0x0 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_64NS_128NS      (0x1 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_128NS_256NS     (0x2 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_256NS_512NS     (0x3 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_512NS_1US       (0x4 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_1US_2US         (0x5 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_2US_4US         (0x6 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXDCR_EP_L1_ACC_LAT_4US_MORE        (0x7 << PXDCR_EP_L1_ACC_LAT_OFFS)
+
+
+#define PXDCR_ATT_BUT_PRS_OFFS				12 /* Attention Button Present*/
+#define PXDCR_ATT_BUT_PRS_MASK				BIT12
+#define PXDCR_ATT_BUT_PRS_IMPLEMENTED		BIT12
+
+#define PXDCR_ATT_IND_PRS_OFFS				13 /* Attention Indicator Present*/
+#define PXDCR_ATT_IND_PRS_MASK				BIT13
+#define PXDCR_ATT_IND_PRS_IMPLEMENTED		BIT13
+
+#define PXDCR_PWR_IND_PRS_OFFS        		14/* Power Indicator Present*/
+#define PXDCR_PWR_IND_PRS_MASK       		BIT14
+#define PXDCR_PWR_IND_PRS_IMPLEMENTED		BIT14
+
+#define PXDCR_CAP_SPL_VAL_OFFS				18 /*Captured Slot Power Limit
+												 Value*/
+#define PXDCR_CAP_SPL_VAL_MASK				(0xff << PXDCR_CAP_SPL_VAL_OFFS)
+
+#define PXDCR_CAP_SP_LSCL_OFFS				26 /* Captured Slot Power Limit
+												  Scale */
+#define PXDCR_CAP_SP_LSCL_MASK				(0x3 << PXDCR_CAP_SP_LSCL_OFFS)
+
+/* PCI Express Device Control Status Register */
+/*PEX_DEV_CTRL_STAT_REG (PXDCSR)*/
+
+#define PXDCSR_COR_ERR_REP_EN		BIT0 /* Correctable Error Reporting Enable*/
+#define PXDCSR_NF_ERR_REP_EN		BIT1 /* Non-Fatal Error Reporting Enable*/
+#define PXDCSR_F_ERR_REP_EN			BIT2 /* Fatal Error Reporting Enable*/
+#define PXDCSR_UR_REP_EN			BIT3 /* Unsupported Request (UR)
+													Reporting Enable*/
+#define PXDCSR_EN_RO 				BIT4 /* Enable Relaxed Ordering*/
+
+#define PXDCSR_MAX_PLD_SZ_OFFS		5	 /* Maximum Payload Size*/
+#define PXDCSR_MAX_PLD_SZ_MASK		(0x7 << PXDCSR_MAX_PLD_SZ_OFFS)
+#define PXDCSR_MAX_PLD_SZ_128B		(0x0 << PXDCSR_MAX_PLD_SZ_OFFS)
+#define PXDCSR_EN_NS				BIT11  /* Enable No Snoop*/
+
+#define PXDCSR_MAX_RD_RQ_SZ_OFFS	12 /* Maximum Read Request Size*/
+#define PXDCSR_MAX_RD_RQ_SZ_MASK	(0x7 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_128B	(0x0 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_256B	(0x1 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_512B	(0x2 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_1KB		(0x3 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_2KB		(0x4 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+#define PXDCSR_MAX_RD_RQ_SZ_4KB		(0x5 << PXDCSR_MAX_RD_RQ_SZ_OFFS)
+
+#define PXDCSR_COR_ERR_DET 			BIT16 /* Correctable Error Detected*/
+#define PXDCSR_NF_ERR_DET 			BIT17 /* Non-Fatal Error Detected.*/
+#define PXDCSR_F_ERR_DET 			BIT18 /* Fatal Error Detected.*/
+#define PXDCSR_UR_DET				BIT19 /* Unsupported Request Detected */
+#define PXDCSR_AUX_PWR_DET 			BIT20 /* Reserved*/
+
+#define PXDCSR_TRANS_PEND_OFFS 			21 /* Transactions Pending*/
+#define PXDCSR_TRANS_PEND_MASK 			BIT21
+#define PXDCSR_TRANS_PEND_NOT_COMPLETED (0x1 << PXDCSR_TRANS_PEND_OFFS)
+
+
+/* PCI Express Link Capabilities Register*/
+/*PEX_LINK_CAPABILITY_REG (PXLCR)*/
+
+#define PXLCR_MAX_LINK_SPD_OFFS		0 /* Maximum Link Speed*/
+#define PXLCR_MAX_LINK_SPD_MASK		(0xf << PXLCR_MAX_LINK_SPD_OFFS)
+
+#define PXLCR_MAX_LNK_WDTH_OFFS 	3 /* Maximum Link Width*/
+#define PXLCR_MAX_LNK_WDTH_MASK		(0x3f << PXLCR_MAX_LNK_WDTH_OFFS)
+
+#define PXLCR_ASPM_SUP_OFFS 		10 /* Active State Link PM Support*/
+#define PXLCR_ASPM_SUP_MASK			(0x3 << PXLCR_ASPM_SUP_OFFS)
+
+#define PXLCR_L0S_EXT_LAT_OFFS 			12 /* L0s Exit Latency*/
+#define PXLCR_L0S_EXT_LAT_MASK			(0x7 << PXLCR_L0S_EXT_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_64NS_LESS     (0x0 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_64NS_128NS   	(0x1 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_128NS_256NS   (0x2 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_256NS_512NS   (0x3 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_512NS_1US     (0x4 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_1US_2US       (0x5 << PXDCR_EP_L1_ACC_LAT_OFFS)
+#define PXLCR_L0S_EXT_LAT_2US_4US       (0x6 << PXDCR_EP_L1_ACC_LAT_OFFS)
+
+#define PXLCR_POR_TNUM_OFFS 			24 /* Port Number */
+#define PXLCR_POR_TNUM_MASK				(0xff << PXLCR_POR_TNUM_OFFS)
+
+/* PCI Express Link Control Status Register */
+/*PEX_LINK_CTRL_STAT_REG (PXLCSR)*/
+
+#define PXLCSR_ASPM_CNT_OFFS			0 /* Active State Link PM Control */
+#define PXLCSR_ASPM_CNT_MASK			(0x3 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_DISABLED		(0x0 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L0S_ENT_SUPP		(0x1 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L1S_ENT_SUPP		(0x2 << PXLCSR_ASPM_CNT_OFFS)
+#define PXLCSR_ASPM_CNT_L0S_L1S_ENT_SUPP	(0x3 << PXLCSR_ASPM_CNT_OFFS)
+
+#define PXLCSR_RCB_OFFS				3 /* Read Completion Boundary */
+#define PXLCSR_RCB_MASK				BIT3
+#define PXLCSR_RCB_64B				(0 << PXLCSR_RCB_OFFS)
+#define PXLCSR_RCB_128B				(1 << PXLCSR_RCB_OFFS)
+
+#define PXLCSR_LNK_DIS 				BIT4 /* Link Disable */
+#define PXLCSR_RETRN_LNK 			BIT5 /* Retrain Link */
+#define PXLCSR_CMN_CLK_CFG			BIT6 /* Common Clock Configuration */
+#define PXLCSR_EXTD_SNC 			BIT7 /* Extended Sync */
+
+#define PXLCSR_LNK_SPD_OFFS 		16 /* Link Speed */
+#define PXLCSR_LNK_SPD_MASK			(0xf << PXLCSR_LNK_SPD_OFFS)
+
+#define PXLCSR_NEG_LNK_WDTH_OFFS	20  /* Negotiated Link Width */
+#define PXLCSR_NEG_LNK_WDTH_MASK 	(0x3f << PXLCSR_NEG_LNK_WDTH_OFFS)
+#define PXLCSR_NEG_LNK_WDTH_X1		(0x1 << PXLCSR_NEG_LNK_WDTH_OFFS)
+
+#define PXLCSR_LNK_TRN 				BIT27 /* Link Training */
+
+#define PXLCSR_SLT_CLK_CFG_OFFS		28 /* Slot Clock Configuration */
+#define PXLCSR_SLT_CLK_CFG_MASK		BIT28
+#define PXLCSR_SLT_CLK_CFG_INDPNT	(0x0 << PXLCSR_SLT_CLK_CFG_OFFS)
+#define PXLCSR_SLT_CLK_CFG_REF		(0x1 << PXLCSR_SLT_CLK_CFG_OFFS)
+
+/* PCI Express Advanced Error Report Header Register */
+/*PEX_ADV_ERR_RPRT_HDR_TRGT_REG (PXAERHTR)*/
+
+/* PCI Express Uncorrectable Error Status Register*/
+/*PEX_UNCORRECT_ERR_STAT_REG (PXUESR)*/
+
+/* PCI Express Uncorrectable Error Mask Register */
+/*PEX_UNCORRECT_ERR_MASK_REG (PXUEMR)*/
+
+/* PCI Express Uncorrectable Error Severity Register */
+/*PEX_UNCORRECT_ERR_SERVITY_REG (PXUESR)*/
+
+/* PCI Express Correctable Error Status Register */
+/*PEX_CORRECT_ERR_STAT_REG (PXCESR)*/
+
+/* PCI Express Correctable Error Mask Register */
+/*PEX_CORRECT_ERR_MASK_REG (PXCEMR)*/
+
+/* PCI Express Advanced Error Capability and Control Register*/
+/*PEX_ADV_ERR_CAPABILITY_CTRL_REG (PXAECCR)*/
+
+/* PCI Express Header Log First DWORD Register*/
+/*PEX_HDR_LOG_FIRST_DWORD_REG (PXHLFDR)*/
+
+/* PCI Express Header Log Second DWORD Register*/
+/*PEX_HDR_LOG_SECOND_DWORD_REG (PXHLSDR)*/
+
+/* PCI Express Header Log Third DWORD Register*/
+/*PEX_HDR_LOG_THIRD_DWORD_REG (PXHLTDR)*/
+
+/* PCI Express Header Log Fourth DWORD Register*/
+/*PEX_HDR_LOG_FOURTH_DWORD_REG (PXHLFDR)*/
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* #ifndef __INCPEXREGSH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c
new file mode 100644
index 000000000000..1306482b65cf
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.c
@@ -0,0 +1,311 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPex.h"
+
+//#define MV_DEBUG
+/* defines  */
+#ifdef MV_DEBUG
+	#define DB(x)	x
+#else
+	#define DB(x)
+#endif
+
+/* locals */
+typedef struct
+{
+	MV_U32 data;
+	MV_U32 mask;
+}PEX_HEADER_DATA;
+
+/* local function forwad decleration */
+MV_U32 mvPexHwConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff);
+MV_STATUS mvPexHwConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data);
+void resetPexConfig(MV_U32 pexIf, MV_U32 bus, MV_U32 dev);
+
+
+PEX_HEADER_DATA configHdr[16] =
+{
+{0x888811ab, 0x00000000}, /*[device ID, vendor ID] */
+{0x00100007, 0x0000ffff}, /*[status register, command register] */
+{0x0604000e, 0x00000000}, /*[programming interface, sub class code, class code, revision ID] */
+{0x00010008, 0x00000000},  /*[BIST, header type, latency time, cache line] */
+{0x00000000, 0x00000000},  /*[base address 0] */
+{0x00000000, 0x00000000},  /*[base address 1] */
+{0x00000000, 0x00ffffff},  /*[secondary latency timersubordinate bus number, secondary bus number, primary bus number] */
+{0x0000f101, 0x00000000},  /*[secondary status ,IO limit, IO base] */
+{0x9ff0a000, 0x00000000},  /*[memory limit, memory base] */
+{0x0001fff1, 0x00000000},  /*[prefetch memory limit, prefetch memory base] */
+{0xffffffff, 0x00000000},  /*[prefetch memory base upper] */
+{0x00000000, 0x00000000},  /*[prefetch memory limit upper] */
+{0xeffff000, 0x00000000},  /*[IO limit upper 16 bits, IO base upper 16 bits] */
+{0x00000000, 0x00000000},  /*[reserved, capability pointer] */
+{0x00000000, 0x00000000},  /*[expansion ROM base address] */
+{0x00000000, 0x000000FF},  /*[bridge control, interrupt pin, interrupt line] */
+};
+
+
+#define HEADER_WRITE(data, offset) configHdr[offset/4].data = ((configHdr[offset/4].data & ~configHdr[offset/4].mask) | \
+																(data & configHdr[offset/4].mask))
+#define HEADER_READ(offset) configHdr[offset/4].data
+
+/*******************************************************************************
+* mvVrtBrgPexInit - Initialize PEX interfaces
+*
+* DESCRIPTION:
+*
+* This function is responsible of intialization of the Pex Interface , It
+* configure the Pex Bars and Windows in the following manner:
+*
+*  Assumptions :
+*				Bar0 is always internal registers bar
+*			    Bar1 is always the DRAM bar
+*				Bar2 is always the Device bar
+*
+*  1) Sets the Internal registers bar base by obtaining the base from
+*	  the CPU Interface
+*  2) Sets the DRAM bar base and size by getting the base and size from
+*     the CPU Interface when the size is the sum of all enabled DRAM
+*	  chip selects and the base is the base of CS0 .
+*  3) Sets the Device bar base and size by getting these values from the
+*     CPU Interface when the base is the base of the lowest base of the
+*     Device chip selects, and the
+*
+*
+* INPUT:
+*
+*       pexIf   -  PEX interface number.
+*
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK if function success otherwise MV_ERROR or MV_BAD_PARAM
+*
+*******************************************************************************/
+MV_STATUS mvPexVrtBrgInit(MV_U32 pexIf)
+{
+	/* reset PEX tree to recover previous U-boot/Boot configurations */
+	MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+
+
+	resetPexConfig(pexIf, localBus, 1);
+	return MV_OK;
+}
+
+
+MV_U32 mvPexVrtBrgConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev, MV_U32 func,
+                        MV_U32 regOff)
+{
+
+	MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+	MV_U32 localDev = mvPexLocalDevNumGet(pexIf);
+	MV_U32 val;
+	if(bus == localBus)
+	{
+		if(dev > 1)
+		{
+/* on the local device allow only device #0 & #1 */
+			return 0xffffffff;
+		}
+		else
+		if (dev == localDev)
+		{
+			/* read the memory controller registers */
+			return mvPexHwConfigRead (pexIf, bus, dev, func, regOff);
+		}
+		else
+		{
+			/* access the virtual brg header */
+			return HEADER_READ(regOff);
+		}
+	}
+	else
+	if(bus == (localBus + 1))
+	{
+		/* access the device behind the virtual bridge */
+		if((dev == localDev) || (dev > 1))
+		{
+			return 0xffffffff;
+		}
+		else
+		{
+			/* access the device behind the virtual bridge, in this case
+			*  change the bus number to the local bus number in order to
+			*  generate type 0 config cycle
+			*/
+			mvPexLocalBusNumSet(pexIf, bus);
+			mvPexLocalDevNumSet(pexIf, 1);
+			val = mvPexHwConfigRead (pexIf, bus, 0, func, regOff);
+			mvPexLocalBusNumSet(pexIf, localBus);
+			mvPexLocalDevNumSet(pexIf, localDev);
+			return val;
+		}
+	}
+	/* for all other devices use the HW function to get the
+	*  requested registers
+	*/
+	mvPexLocalDevNumSet(pexIf, 1);
+	val = mvPexHwConfigRead (pexIf, bus, dev, func, regOff);
+	mvPexLocalDevNumSet(pexIf, localDev);
+	return val;
+}
+
+
+MV_STATUS mvPexVrtBrgConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data)
+{
+	MV_U32 localBus = mvPexLocalBusNumGet(pexIf);
+	MV_U32 localDev = mvPexLocalDevNumGet(pexIf);
+	MV_STATUS	status;
+
+	if(bus == localBus)
+	{
+		if(dev > 1)
+		{
+			/* on the local device allow only device #0 & #1 */
+			return MV_ERROR;
+		}
+		else
+		if (dev == localDev)
+		{
+			/* read the memory controller registers */
+			return mvPexHwConfigWrite (pexIf, bus, dev, func, regOff, data);
+		}
+		else
+		{
+			/* access the virtual brg header */
+			HEADER_WRITE(data, regOff);
+			return MV_OK;
+		}
+	}
+	else
+	if(bus == (localBus + 1))
+	{
+		/* access the device behind the virtual bridge */
+		if((dev == localDev) || (dev > 1))
+		{
+			return MV_ERROR;
+		}
+		else
+		{
+			/* access the device behind the virtual bridge, in this case
+			*  change the bus number to the local bus number in order to
+			*  generate type 0 config cycle
+			*/
+			//return mvPexHwConfigWrite (pexIf, localBus, dev, func, regOff, data);
+			mvPexLocalBusNumSet(pexIf, bus);
+			mvPexLocalDevNumSet(pexIf, 1);
+			status = mvPexHwConfigWrite (pexIf, bus, 0, func, regOff, data);
+			mvPexLocalBusNumSet(pexIf, localBus);
+			mvPexLocalDevNumSet(pexIf, localDev);
+			return status;
+
+		}
+	}
+	/* for all other devices use the HW function to get the
+	*  requested registers
+	*/
+	mvPexLocalDevNumSet(pexIf, 1);
+	status = mvPexHwConfigWrite (pexIf, bus, dev, func, regOff, data);
+	mvPexLocalDevNumSet(pexIf, localDev);
+	return status;
+}
+
+
+
+
+void resetPexConfig(MV_U32 pexIf, MV_U32 bus, MV_U32 dev)
+{
+	MV_U32 tData;
+	MV_U32 i;
+
+	/* restore the PEX configuration to initialization state */
+	/* in case PEX P2P call recursive and reset config */
+	tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x0);
+	if(tData != 0xffffffff)
+	{
+		/* agent had been found - check whether P2P */
+		tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x8);
+		if((tData & 0xffff0000) == 0x06040000)
+		{/* P2P */
+			/* get the sec bus and the subordinate */
+			MV_U32 secBus;
+			tData = mvPexHwConfigRead (pexIf, bus, dev, 0x0, 0x18);
+			secBus = ((tData >> 8) & 0xff);
+			/* now scan on sec bus */
+			for(i = 0;i < 0xff;i++)
+			{
+				resetPexConfig(pexIf, secBus, i);
+			}
+			/* now reset this device */
+			DB(mvOsPrintf("Reset bus %d dev %d\n", bus, dev));
+			mvPexHwConfigWrite(pexIf, bus, dev, 0x0, 0x18, 0x0);
+			DB(mvOsPrintf("Reset bus %d dev %d\n", bus, dev));
+		}
+	}
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h
new file mode 100644
index 000000000000..0741713be9a9
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/pex/mvVrtBrgPex.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCVRTBRGPEXH
+#define __INCVRTBRGPEXH
+
+
+/* Global Functions prototypes */
+/* mvPexInit - Initialize PEX interfaces*/
+MV_STATUS mvPexVrtBrgInit(MV_U32 pexIf);
+
+/* mvPexConfigRead - Read from configuration space */
+MV_U32 mvPexVrtBrgConfigRead (MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+						MV_U32 func,MV_U32 regOff);
+
+/* mvPexConfigWrite - Write to configuration space */
+MV_STATUS mvPexVrtBrgConfigWrite(MV_U32 pexIf, MV_U32 bus, MV_U32 dev,
+                           MV_U32 func, MV_U32 regOff, MV_U32 data);
+
+
+#endif /* #ifndef __INCPEXH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c
new file mode 100644
index 000000000000..2d4443a50f47
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.c
@@ -0,0 +1,1521 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvOs.h"
+#include "sflash/mvSFlash.h"
+#include "sflash/mvSFlashSpec.h"
+#include "spi/mvSpi.h"
+#include "spi/mvSpiCmnd.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+/* Globals */
+static MV_SFLASH_DEVICE_PARAMS sflash[] = {
+    /* ST M25P32 SPI flash, 4MB, 64 sectors of 64K each */
+    {
+     MV_M25P_WREN_CMND_OPCD,
+     MV_M25P_WRDI_CMND_OPCD,
+     MV_M25P_RDID_CMND_OPCD,
+     MV_M25P_RDSR_CMND_OPCD,
+     MV_M25P_WRSR_CMND_OPCD,
+     MV_M25P_READ_CMND_OPCD,
+     MV_M25P_FAST_RD_CMND_OPCD,
+     MV_M25P_PP_CMND_OPCD,
+     MV_M25P_SE_CMND_OPCD,
+     MV_M25P_BE_CMND_OPCD,
+     MV_M25P_RES_CMND_OPCD,
+     MV_SFLASH_NO_SPECIFIC_OPCD,    /* power save not supported */
+     MV_M25P32_SECTOR_SIZE,
+     MV_M25P32_SECTOR_NUMBER,
+     MV_M25P_PAGE_SIZE,
+     "ST M25P32",
+     MV_M25PXXX_ST_MANF_ID,
+     MV_M25P32_DEVICE_ID,
+     MV_M25P32_MAX_SPI_FREQ,
+     MV_M25P32_MAX_FAST_SPI_FREQ,
+     MV_M25P32_FAST_READ_DUMMY_BYTES
+    },
+    /* ST M25P64 SPI flash, 8MB, 128 sectors of 64K each */
+    {
+     MV_M25P_WREN_CMND_OPCD,
+     MV_M25P_WRDI_CMND_OPCD,
+     MV_M25P_RDID_CMND_OPCD,
+     MV_M25P_RDSR_CMND_OPCD,
+     MV_M25P_WRSR_CMND_OPCD,
+     MV_M25P_READ_CMND_OPCD,
+     MV_M25P_FAST_RD_CMND_OPCD,
+     MV_M25P_PP_CMND_OPCD,
+     MV_M25P_SE_CMND_OPCD,
+     MV_M25P_BE_CMND_OPCD,
+     MV_M25P_RES_CMND_OPCD,
+     MV_SFLASH_NO_SPECIFIC_OPCD,    /* power save not supported */
+     MV_M25P64_SECTOR_SIZE,
+     MV_M25P64_SECTOR_NUMBER,
+     MV_M25P_PAGE_SIZE,
+     "ST M25P64",
+     MV_M25PXXX_ST_MANF_ID,
+     MV_M25P64_DEVICE_ID,
+     MV_M25P64_MAX_SPI_FREQ,
+     MV_M25P64_MAX_FAST_SPI_FREQ,
+     MV_M25P64_FAST_READ_DUMMY_BYTES
+    },
+    /* ST M25P128 SPI flash, 16MB, 64 sectors of 256K each */
+    {
+     MV_M25P_WREN_CMND_OPCD,
+     MV_M25P_WRDI_CMND_OPCD,
+     MV_M25P_RDID_CMND_OPCD,
+     MV_M25P_RDSR_CMND_OPCD,
+     MV_M25P_WRSR_CMND_OPCD,
+     MV_M25P_READ_CMND_OPCD,
+     MV_M25P_FAST_RD_CMND_OPCD,
+     MV_M25P_PP_CMND_OPCD,
+     MV_M25P_SE_CMND_OPCD,
+     MV_M25P_BE_CMND_OPCD,
+     MV_M25P_RES_CMND_OPCD,
+     MV_SFLASH_NO_SPECIFIC_OPCD,    /* power save not supported */
+     MV_M25P128_SECTOR_SIZE,
+     MV_M25P128_SECTOR_NUMBER,
+     MV_M25P_PAGE_SIZE,
+     "ST M25P128",
+     MV_M25PXXX_ST_MANF_ID,
+     MV_M25P128_DEVICE_ID,
+     MV_M25P128_MAX_SPI_FREQ,
+     MV_M25P128_MAX_FAST_SPI_FREQ,
+     MV_M25P128_FAST_READ_DUMMY_BYTES
+    },
+    /* Macronix MXIC MX25L6405 SPI flash, 8MB, 128 sectors of 64K each */
+    {
+     MV_MX25L_WREN_CMND_OPCD,
+     MV_MX25L_WRDI_CMND_OPCD,
+     MV_MX25L_RDID_CMND_OPCD,
+     MV_MX25L_RDSR_CMND_OPCD,
+     MV_MX25L_WRSR_CMND_OPCD,
+     MV_MX25L_READ_CMND_OPCD,
+     MV_MX25L_FAST_RD_CMND_OPCD,
+     MV_MX25L_PP_CMND_OPCD,
+     MV_MX25L_SE_CMND_OPCD,
+     MV_MX25L_BE_CMND_OPCD,
+     MV_MX25L_RES_CMND_OPCD,
+     MV_MX25L_DP_CMND_OPCD,
+     MV_MX25L6405_SECTOR_SIZE,
+     MV_MX25L6405_SECTOR_NUMBER,
+     MV_MXIC_PAGE_SIZE,
+     "MXIC MX25L6405",
+     MV_MXIC_MANF_ID,
+     MV_MX25L6405_DEVICE_ID,
+     MV_MX25L6405_MAX_SPI_FREQ,
+     MV_MX25L6405_MAX_FAST_SPI_FREQ,
+     MV_MX25L6405_FAST_READ_DUMMY_BYTES
+    },
+    /* SPANSION S25FL128P SPI flash, 16MB, 64 sectors of 256K each */
+    {
+     MV_S25FL_WREN_CMND_OPCD,
+     MV_S25FL_WRDI_CMND_OPCD,
+     MV_S25FL_RDID_CMND_OPCD,
+     MV_S25FL_RDSR_CMND_OPCD,
+     MV_S25FL_WRSR_CMND_OPCD,
+     MV_S25FL_READ_CMND_OPCD,
+     MV_S25FL_FAST_RD_CMND_OPCD,
+     MV_S25FL_PP_CMND_OPCD,
+     MV_S25FL_SE_CMND_OPCD,
+     MV_S25FL_BE_CMND_OPCD,
+     MV_S25FL_RES_CMND_OPCD,
+     MV_S25FL_DP_CMND_OPCD,
+     MV_S25FL128_SECTOR_SIZE,
+     MV_S25FL128_SECTOR_NUMBER,
+     MV_S25FL_PAGE_SIZE,
+     "SPANSION S25FL128",
+     MV_SPANSION_MANF_ID,
+     MV_S25FL128_DEVICE_ID,
+     MV_S25FL128_MAX_SPI_FREQ,
+     MV_M25P128_MAX_FAST_SPI_FREQ,
+     MV_M25P128_FAST_READ_DUMMY_BYTES
+    }
+};
+
+/* Static Functions */
+static MV_STATUS    mvWriteEnable   (MV_SFLASH_INFO * pFlinfo);
+static MV_STATUS    mvStatusRegGet  (MV_SFLASH_INFO * pFlinfo, MV_U8 * pStatReg);
+static MV_STATUS    mvStatusRegSet  (MV_SFLASH_INFO * pFlinfo, MV_U8 sr);
+static MV_STATUS    mvWaitOnWipClear(MV_SFLASH_INFO * pFlinfo);
+static MV_STATUS    mvSFlashPageWr  (MV_SFLASH_INFO * pFlinfo, MV_U32 offset, \
+							         MV_U8* pPageBuff, MV_U32 buffSize);
+static MV_STATUS    mvSFlashWithDefaultsIdGet (MV_SFLASH_INFO * pFlinfo, \
+                                            MV_U8* manId, MV_U16* devId);
+
+/*******************************************************************************
+* mvWriteEnable - serialize the write enable sequence
+*
+* DESCRIPTION:
+*       transmit the sequence for write enable
+*
+********************************************************************************/
+static MV_STATUS mvWriteEnable(MV_SFLASH_INFO * pFlinfo)
+{
+	MV_U8 cmd[MV_SFLASH_WREN_CMND_LENGTH];
+
+
+    cmd[0] = sflash[pFlinfo->index].opcdWREN;
+
+	return mvSpiWriteThenRead(cmd, MV_SFLASH_WREN_CMND_LENGTH, NULL, 0, 0);
+}
+
+/*******************************************************************************
+* mvStatusRegGet - Retrieve the value of the status register
+*
+* DESCRIPTION:
+*       perform the RDSR sequence to get the 8bit status register
+*
+********************************************************************************/
+static MV_STATUS mvStatusRegGet(MV_SFLASH_INFO * pFlinfo, MV_U8 * pStatReg)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_RDSR_CMND_LENGTH];
+	MV_U8 sr[MV_SFLASH_RDSR_REPLY_LENGTH];
+
+
+
+
+	cmd[0] = sflash[pFlinfo->index].opcdRDSR;
+
+	if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_RDSR_CMND_LENGTH, sr,
+                                         MV_SFLASH_RDSR_REPLY_LENGTH,0)) != MV_OK)
+        return ret;
+
+    *pStatReg = sr[0];
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvWaitOnWipClear - Block waiting for the WIP (write in progress) to be cleared
+*
+* DESCRIPTION:
+*       Block waiting for the WIP (write in progress) to be cleared
+*
+********************************************************************************/
+static MV_STATUS mvWaitOnWipClear(MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+	MV_U32 i;
+    MV_U8 stat;
+
+	for (i=0; i<MV_SFLASH_MAX_WAIT_LOOP; i++)
+	{
+        if ((ret = mvStatusRegGet(pFlinfo, &stat)) != MV_OK)
+            return ret;
+
+		if ((stat & MV_SFLASH_STATUS_REG_WIP_MASK) == 0)
+			return MV_OK;
+	}
+
+    DB(mvOsPrintf("%s WARNING: Write Timeout!\n", __FUNCTION__);)
+	return MV_TIMEOUT;
+}
+
+/*******************************************************************************
+* mvWaitOnChipEraseDone - Block waiting for the WIP (write in progress) to be
+*                         cleared after a chip erase command which is supposed
+*                         to take about 2:30 minutes
+*
+* DESCRIPTION:
+*       Block waiting for the WIP (write in progress) to be cleared
+*
+********************************************************************************/
+static MV_STATUS mvWaitOnChipEraseDone(MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+	MV_U32 i;
+    MV_U8 stat;
+
+	for (i=0; i<MV_SFLASH_CHIP_ERASE_MAX_WAIT_LOOP; i++)
+	{
+        if ((ret = mvStatusRegGet(pFlinfo, &stat)) != MV_OK)
+            return ret;
+
+		if ((stat & MV_SFLASH_STATUS_REG_WIP_MASK) == 0)
+			return MV_OK;
+	}
+
+    DB(mvOsPrintf("%s WARNING: Write Timeout!\n", __FUNCTION__);)
+	return MV_TIMEOUT;
+}
+
+/*******************************************************************************
+*  mvStatusRegSet - Set the value of the 8bit status register
+*
+* DESCRIPTION:
+*       Set the value of the 8bit status register
+*
+********************************************************************************/
+static MV_STATUS mvStatusRegSet(MV_SFLASH_INFO * pFlinfo, MV_U8 sr)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_WRSR_CMND_LENGTH];
+
+
+    /* Issue the Write enable command prior the WRSR command */
+	if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+		return ret;
+
+    /* Write the SR with the new values */
+    cmd[0] = sflash[pFlinfo->index].opcdWRSR;
+	cmd[1] = sr;
+
+	if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_WRSR_CMND_LENGTH, NULL, 0, 0)) != MV_OK)
+		return ret;
+
+    if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+		return ret;
+
+    mvOsDelay(1);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashPageWr - Write up to 256 Bytes in the same page
+*
+* DESCRIPTION:
+*       Write a buffer up to the page size in length provided that the whole address
+*		range is within the same page (alligned to page bounderies)
+*
+*******************************************************************************/
+static MV_STATUS mvSFlashPageWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+							     MV_U8* pPageBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_PP_CMND_LENGTH];
+
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invalid parameter device index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+	/* check that we do not cross the page bounderies */
+    if (((offset & (sflash[pFlinfo->index].pageSize - 1)) + buffSize) >
+        sflash[pFlinfo->index].pageSize)
+    {
+        DB(mvOsPrintf("%s WARNING: Page allignment problem!\n", __FUNCTION__);)
+		return MV_OUT_OF_RANGE;
+    }
+
+	/* Issue the Write enable command prior the page program command */
+	if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+		return ret;
+
+    cmd[0] = sflash[pFlinfo->index].opcdPP;
+	cmd[1] = ((offset >> 16) & 0xFF);
+	cmd[2] = ((offset >> 8) & 0xFF);
+	cmd[3] = (offset & 0xFF);
+
+	if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_PP_CMND_LENGTH, pPageBuff, buffSize)) != MV_OK)
+		return ret;
+
+	if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+		return ret;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashWithDefaultsIdGet - Try to read the manufacturer and Device IDs from
+*       the device using the default RDID opcode and the default WREN opcode.
+*
+* DESCRIPTION:
+*       This is used to detect a generic device that uses the default opcodes
+*       for the WREN and RDID.
+*
+********************************************************************************/
+static MV_STATUS mvSFlashWithDefaultsIdGet (MV_SFLASH_INFO * pFlinfo, MV_U8* manId, MV_U16* devId)
+{
+    MV_STATUS ret;
+    MV_U8 cmdRDID[MV_SFLASH_RDID_CMND_LENGTH];
+	MV_U8 id[MV_SFLASH_RDID_REPLY_LENGTH];
+
+
+
+    /* Use the default RDID opcode to read the IDs */
+    cmdRDID[0] = MV_SFLASH_DEFAULT_RDID_OPCD;   /* unknown model try default */
+	if ((ret = mvSpiWriteThenRead(cmdRDID, MV_SFLASH_RDID_CMND_LENGTH, id, MV_SFLASH_RDID_REPLY_LENGTH, 0)) != MV_OK)
+		return ret;
+
+	*manId = id[0];
+	*devId = 0;
+	*devId |= (id[1] << 8);
+	*devId |= id[2];
+
+	return MV_OK;
+}
+
+/*
+#####################################################################################
+#####################################################################################
+*/
+
+/*******************************************************************************
+* mvSFlashInit - Initialize the serial flash device
+*
+* DESCRIPTION:
+*       Perform the neccessary initialization and configuration
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*           pFlinfo->baseAddr: base address in fast mode.
+*           pFlinfo->index: Index of the flash in the sflash tabel. If the SPI
+*                           flash device does not support read Id command with
+*                           the standard opcode, then the user should supply this
+*                           as an input to skip the autodetection process!!!!
+*
+* OUTPUT:
+*       pFlinfo: pointer to the Flash information structure after detection
+*           pFlinfo->manufacturerId: Manufacturer ID
+*           pFlinfo->deviceId: Device ID
+*           pFlinfo->sectorSize: size of the sector (all sectors are the same).
+*           pFlinfo->sectorNumber: number of sectors.
+*           pFlinfo->pageSize: size of the page.
+*           pFlinfo->index: Index of the detected flash in the sflash tabel
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashInit (MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+    MV_U8 manf;
+    MV_U16 dev;
+    MV_U32 indx;
+    MV_BOOL detectFlag = MV_FALSE;
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Initialize the SPI interface with low frequency to make sure that the read ID succeeds */
+    if ((ret = mvSpiInit(MV_SFLASH_BASIC_SPI_FREQ)) != MV_OK)
+    {
+        mvOsPrintf("%s ERROR: Failed to initialize the SPI interface!\n", __FUNCTION__);
+        return ret;
+    }
+
+    /* First try to read the Manufacturer and Device IDs */
+    if ((ret = mvSFlashIdGet(pFlinfo, &manf, &dev)) != MV_OK)
+    {
+        mvOsPrintf("%s ERROR: Failed to get the SFlash ID!\n", __FUNCTION__);
+        return ret;
+    }
+
+    /* loop over the whole table and look for the appropriate SFLASH */
+    for (indx=0; indx<MV_ARRAY_SIZE(sflash); indx++)
+    {
+        if ((manf == sflash[indx].manufacturerId) && (dev == sflash[indx].deviceId))
+        {
+            pFlinfo->manufacturerId = manf;
+            pFlinfo->deviceId = dev;
+            pFlinfo->index = indx;
+            detectFlag = MV_TRUE;
+        }
+    }
+
+    if(!detectFlag)
+    {
+        mvOsPrintf("%s ERROR: Unknown SPI flash device!\n", __FUNCTION__);
+        return MV_FAIL;
+    }
+
+    /* fill the info based on the model detected */
+    pFlinfo->sectorSize = sflash[pFlinfo->index].sectorSize;
+    pFlinfo->sectorNumber = sflash[pFlinfo->index].sectorNumber;
+    pFlinfo->pageSize = sflash[pFlinfo->index].pageSize;
+
+    /* Set the SPI frequency to the MAX allowed for the device for best performance */
+    if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFreq)) != MV_OK)
+    {
+        mvOsPrintf("%s ERROR: Failed to set the SPI frequency!\n", __FUNCTION__);
+        return ret;
+    }
+
+    /* As default lock the SR */
+    if ((ret = mvSFlashStatRegLock(pFlinfo, MV_TRUE)) != MV_OK)
+        return ret;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashSectorErase - Erasse a single sector of the serial flash
+*
+* DESCRIPTION:
+*       Issue the erase sector command and address
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		secNumber: sector Number to erase (0 -> (sectorNumber-1))
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashSectorErase (MV_SFLASH_INFO * pFlinfo, MV_U32 secNumber)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_SE_CMND_LENGTH];
+
+    MV_U32 secAddr = (secNumber * pFlinfo->sectorSize);
+#if 0
+    MV_U32 i;
+    MV_U32 * pW = (MV_U32*) (secAddr + pFlinfo->baseAddr);
+    MV_U32 erasedWord = 0xFFFFFFFF;
+    MV_U32 wordsPerSector = (pFlinfo->sectorSize / sizeof(MV_U32));
+    MV_BOOL eraseNeeded = MV_FALSE;
+#endif
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* check that the sector number is valid */
+    if (secNumber >= pFlinfo->sectorNumber)
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter sector number!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* we don't want to access SPI in direct mode from in-direct API,
+	becasue of timing issue between CS asserts. */
+#if 0
+    /* First compare to FF and check if erase is needed */
+    for (i=0; i<wordsPerSector; i++)
+    {
+        if (memcmp(pW, &erasedWord, sizeof(MV_U32)) != 0)
+        {
+            eraseNeeded = MV_TRUE;
+            break;
+        }
+
+        ++pW;
+    }
+    if (!eraseNeeded)
+        return MV_OK;
+#endif
+
+    cmd[0] = sflash[pFlinfo->index].opcdSE;
+	cmd[1] = ((secAddr >> 16) & 0xFF);
+	cmd[2] = ((secAddr >> 8) & 0xFF);
+	cmd[3] = (secAddr & 0xFF);
+
+	/* Issue the Write enable command prior the sector erase command */
+	if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+		return ret;
+
+	if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_SE_CMND_LENGTH, NULL, 0)) != MV_OK)
+		return ret;
+
+	if ((ret = mvWaitOnWipClear(pFlinfo)) != MV_OK)
+		return ret;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashChipErase - Erasse the whole serial flash
+*
+* DESCRIPTION:
+*       Issue the bulk (chip) erase command
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashChipErase (MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_BE_CMND_LENGTH];
+
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    cmd[0] = sflash[pFlinfo->index].opcdBE;
+
+	/* Issue the Write enable command prior the Bulk erase command */
+	if ((ret = mvWriteEnable(pFlinfo)) != MV_OK)
+		return ret;
+
+    if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_BE_CMND_LENGTH, NULL, 0)) != MV_OK)
+		return ret;
+
+	if ((ret = mvWaitOnChipEraseDone(pFlinfo)) != MV_OK)
+		return ret;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashBlockRd - Read from the serial flash
+*
+* DESCRIPTION:
+*       Issue the read command and address then perfom the needed read
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		offset: byte offset with the flash to start reading from
+*		pReadBuff: pointer to the buffer to read the data in
+*		buffSize: size of the buffer to read.
+*
+* OUTPUT:
+*       pReadBuff: pointer to the buffer containing the read data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+						   MV_U8* pReadBuff, MV_U32 buffSize)
+{
+	MV_U8 cmd[MV_SFLASH_READ_CMND_LENGTH];
+
+
+    /* check for NULL pointer */
+    if ((pFlinfo == NULL) || (pReadBuff == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    cmd[0] = sflash[pFlinfo->index].opcdREAD;
+	cmd[1] = ((offset >> 16) & 0xFF);
+	cmd[2] = ((offset >> 8) & 0xFF);
+	cmd[3] = (offset & 0xFF);
+
+	return mvSpiWriteThenRead(cmd, MV_SFLASH_READ_CMND_LENGTH, pReadBuff, buffSize, 0);
+}
+
+/*******************************************************************************
+* mvSFlashFastBlockRd - Fast read from the serial flash
+*
+* DESCRIPTION:
+*       Issue the fast read command and address then perfom the needed read
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		offset: byte offset with the flash to start reading from
+*		pReadBuff: pointer to the buffer to read the data in
+*		buffSize: size of the buffer to read.
+*
+* OUTPUT:
+*       pReadBuff: pointer to the buffer containing the read data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashFastBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+						       MV_U8* pReadBuff, MV_U32 buffSize)
+{
+    MV_U8 cmd[MV_SFLASH_READ_CMND_LENGTH];
+    MV_STATUS ret;
+
+    /* check for NULL pointer */
+    if ((pFlinfo == NULL) || (pReadBuff == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* Set the SPI frequency to the MAX allowed for fast-read operations */
+    mvOsPrintf("Setting freq to %d.\n",sflash[pFlinfo->index].spiMaxFastFreq);
+    if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFastFreq)) != MV_OK)
+    {
+        mvOsPrintf("%s ERROR: Failed to set the SPI fast frequency!\n", __FUNCTION__);
+        return ret;
+    }
+
+    cmd[0] = sflash[pFlinfo->index].opcdFSTRD;
+    cmd[1] = ((offset >> 16) & 0xFF);
+    cmd[2] = ((offset >> 8) & 0xFF);
+    cmd[3] = (offset & 0xFF);
+
+
+    ret = mvSpiWriteThenRead(cmd, MV_SFLASH_READ_CMND_LENGTH, pReadBuff, buffSize,
+                             sflash[pFlinfo->index].spiFastRdDummyBytes);
+
+    /* Reset the SPI frequency to the MAX allowed for the device for best performance */
+    if ((ret = mvSpiBaudRateSet(sflash[pFlinfo->index].spiMaxFreq)) != MV_OK)
+    {
+        mvOsPrintf("%s ERROR: Failed to set the SPI frequency!\n", __FUNCTION__);
+        return ret;
+    }
+
+    return ret;
+}
+
+
+/*******************************************************************************
+* mvSFlashBlockWr - Write a buffer with any size
+*
+* DESCRIPTION:
+*       write regardless of the page boundaries and size limit per Page
+*		program command
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		offset: byte offset within the flash region
+*		pWriteBuff: pointer to the buffer holding the data to program
+*		buffSize: size of the buffer to write
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashBlockWr (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+						   MV_U8* pWriteBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+	MV_U32 data2write	= buffSize;
+    MV_U32 preAllOffset = (offset & MV_SFLASH_PAGE_ALLIGN_MASK(MV_M25P_PAGE_SIZE));
+    MV_U32 preAllSz		= (preAllOffset ? (MV_M25P_PAGE_SIZE - preAllOffset) : 0);
+	MV_U32 writeOffset	= offset;
+
+    /* check for NULL pointer */
+#ifndef CONFIG_MARVELL
+    if(NULL == pWriteBuff)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+#endif
+
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+	/* check that the buffer size does not exceed the flash size */
+    if ((offset + buffSize) > mvSFlashSizeGet(pFlinfo))
+    {
+        DB(mvOsPrintf("%s WARNING: Write exceeds flash size!\n", __FUNCTION__);)
+	    return MV_OUT_OF_RANGE;
+    }
+
+	/* check if the total block size is less than the first chunk remainder */
+	if (data2write < preAllSz)
+		preAllSz = data2write;
+
+	/* check if programing does not start at a 64byte alligned offset */
+	if (preAllSz)
+	{
+		if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, preAllSz)) != MV_OK)
+			return ret;
+
+		/* increment pointers and counters */
+		writeOffset += preAllSz;
+		data2write -= preAllSz;
+		pWriteBuff += preAllSz;
+	}
+
+	/* program the data that fits in complete page chunks */
+	while (data2write >= sflash[pFlinfo->index].pageSize)
+	{
+		if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, sflash[pFlinfo->index].pageSize)) != MV_OK)
+			return ret;
+
+		/* increment pointers and counters */
+		writeOffset += sflash[pFlinfo->index].pageSize;
+		data2write -= sflash[pFlinfo->index].pageSize;
+		pWriteBuff += sflash[pFlinfo->index].pageSize;
+	}
+
+	/* program the last partial chunk */
+	if (data2write)
+	{
+		if ((ret = mvSFlashPageWr(pFlinfo, writeOffset, pWriteBuff, data2write)) != MV_OK)
+			return ret;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashIdGet - Get the manufacturer and device IDs.
+*
+* DESCRIPTION:
+*       Get the Manufacturer and device IDs from the serial flash through
+*		writing the RDID command then reading 3 bytes of data. In case that
+*       this command was called for the first time in order to detect the
+*       manufacturer and device IDs, then the default RDID opcode will be used
+*       unless the device index is indicated by the user (in case the SPI flash
+*       does not use the default RDID opcode).
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		pManId: pointer to the 8bit variable to hold the manufacturing ID
+*		pDevId: pointer to the 16bit variable to hold the device ID
+*
+* OUTPUT:
+*		pManId: pointer to the 8bit variable holding the manufacturing ID
+*		pDevId: pointer to the 16bit variable holding the device ID
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashIdGet (MV_SFLASH_INFO * pFlinfo, MV_U8* pManId, MV_U16* pDevId)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_RDID_CMND_LENGTH];
+	MV_U8 id[MV_SFLASH_RDID_REPLY_LENGTH];
+
+
+
+    /* check for NULL pointer */
+    if ((pFlinfo == NULL) || (pManId == NULL) || (pDevId == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+        return mvSFlashWithDefaultsIdGet(pFlinfo, pManId, pDevId);
+    else
+        cmd[0] = sflash[pFlinfo->index].opcdRDID;
+
+	if ((ret = mvSpiWriteThenRead(cmd, MV_SFLASH_RDID_CMND_LENGTH, id, MV_SFLASH_RDID_REPLY_LENGTH, 0)) != MV_OK)
+		return ret;
+
+	*pManId = id[0];
+	*pDevId = 0;
+	*pDevId |= (id[1] << 8);
+	*pDevId |= id[2];
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashWpRegionSet - Set the Write-Protected region
+*
+* DESCRIPTION:
+*       Set the Write-Protected region
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		wpRegion: which region will be protected
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashWpRegionSet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION wpRegion)
+{
+    MV_U8 wpMask;
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* Check if the chip is an ST flash; then WP supports only 3 bits */
+    if (pFlinfo->manufacturerId == MV_M25PXXX_ST_MANF_ID)
+    {
+        switch (wpRegion)
+        {
+            case MV_WP_NONE:
+                wpMask = MV_M25P_STATUS_BP_NONE;
+                break;
+
+            case MV_WP_UPR_1OF128:
+                DB(mvOsPrintf("%s WARNING: Invaild option for this flash chip!\n", __FUNCTION__);)
+                return MV_NOT_SUPPORTED;
+
+            case MV_WP_UPR_1OF64:
+                wpMask = MV_M25P_STATUS_BP_1_OF_64;
+                break;
+
+            case MV_WP_UPR_1OF32:
+                wpMask = MV_M25P_STATUS_BP_1_OF_32;
+                break;
+
+            case MV_WP_UPR_1OF16:
+                wpMask = MV_M25P_STATUS_BP_1_OF_16;
+                break;
+
+            case MV_WP_UPR_1OF8:
+                wpMask = MV_M25P_STATUS_BP_1_OF_8;
+                break;
+
+            case MV_WP_UPR_1OF4:
+                wpMask = MV_M25P_STATUS_BP_1_OF_4;
+                break;
+
+            case MV_WP_UPR_1OF2:
+                wpMask = MV_M25P_STATUS_BP_1_OF_2;
+                break;
+
+            case MV_WP_ALL:
+                wpMask = MV_M25P_STATUS_BP_ALL;
+                break;
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+                return MV_BAD_PARAM;
+        }
+    }
+    /* check if the manufacturer is MXIC then the WP is 4bits */
+    else if (pFlinfo->manufacturerId == MV_MXIC_MANF_ID)
+    {
+        switch (wpRegion)
+        {
+            case MV_WP_NONE:
+                wpMask = MV_MX25L_STATUS_BP_NONE;
+                break;
+
+            case MV_WP_UPR_1OF128:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_128;
+                break;
+
+            case MV_WP_UPR_1OF64:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_64;
+                break;
+
+            case MV_WP_UPR_1OF32:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_32;
+                break;
+
+            case MV_WP_UPR_1OF16:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_16;
+                break;
+
+            case MV_WP_UPR_1OF8:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_8;
+                break;
+
+            case MV_WP_UPR_1OF4:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_4;
+                break;
+
+            case MV_WP_UPR_1OF2:
+                wpMask = MV_MX25L_STATUS_BP_1_OF_2;
+                break;
+
+            case MV_WP_ALL:
+                wpMask = MV_MX25L_STATUS_BP_ALL;
+                break;
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+                return MV_BAD_PARAM;
+        }
+    }
+    /* check if the manufacturer is SPANSION then the WP is 4bits */
+    else if (pFlinfo->manufacturerId == MV_SPANSION_MANF_ID)
+    {
+        switch (wpRegion)
+        {
+            case MV_WP_NONE:
+                wpMask = MV_S25FL_STATUS_BP_NONE;
+                break;
+
+            case MV_WP_UPR_1OF128:
+                DB(mvOsPrintf("%s WARNING: Invaild option for this flash chip!\n", __FUNCTION__);)
+                return MV_NOT_SUPPORTED;
+
+            case MV_WP_UPR_1OF64:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_64;
+                break;
+
+            case MV_WP_UPR_1OF32:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_32;
+                break;
+
+            case MV_WP_UPR_1OF16:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_16;
+                break;
+
+            case MV_WP_UPR_1OF8:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_8;
+                break;
+
+            case MV_WP_UPR_1OF4:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_4;
+                break;
+
+            case MV_WP_UPR_1OF2:
+                wpMask = MV_S25FL_STATUS_BP_1_OF_2;
+                break;
+
+            case MV_WP_ALL:
+                wpMask = MV_S25FL_STATUS_BP_ALL;
+                break;
+
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Invaild parameter WP region!\n", __FUNCTION__);)
+                return MV_BAD_PARAM;
+        }
+    }
+    else
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter Manufacturer ID!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* Verify that the SRWD bit is always set - register is s/w locked */
+    wpMask |= MV_SFLASH_STATUS_REG_SRWD_MASK;
+
+	return mvStatusRegSet(pFlinfo, wpMask);
+}
+
+/*******************************************************************************
+* mvSFlashWpRegionGet - Get the Write-Protected region configured
+*
+* DESCRIPTION:
+*       Get from the chip the Write-Protected region configured
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		pWpRegion: pointer to the variable to return the WP region in
+*
+* OUTPUT:
+*		wpRegion: pointer to the variable holding the WP region configured
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashWpRegionGet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION * pWpRegion)
+{
+    MV_STATUS ret;
+	MV_U8 reg;
+
+    /* check for NULL pointer */
+    if ((pFlinfo == NULL) || (pWpRegion == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    if ((ret = mvStatusRegGet(pFlinfo, &reg)) != MV_OK)
+        return ret;
+
+    /* Check if the chip is an ST flash; then WP supports only 3 bits */
+    if (pFlinfo->manufacturerId == MV_M25PXXX_ST_MANF_ID)
+    {
+        switch ((reg & MV_M25P_STATUS_REG_WP_MASK))
+        {
+            case MV_M25P_STATUS_BP_NONE:
+                *pWpRegion = MV_WP_NONE;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_64:
+                *pWpRegion = MV_WP_UPR_1OF64;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_32:
+                *pWpRegion = MV_WP_UPR_1OF32;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_16:
+                *pWpRegion = MV_WP_UPR_1OF16;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_8:
+                *pWpRegion = MV_WP_UPR_1OF8;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_4:
+                *pWpRegion = MV_WP_UPR_1OF4;
+                break;
+
+            case MV_M25P_STATUS_BP_1_OF_2:
+                *pWpRegion = MV_WP_UPR_1OF2;
+                break;
+
+            case MV_M25P_STATUS_BP_ALL:
+                *pWpRegion = MV_WP_ALL;
+                break;
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+                return MV_BAD_VALUE;
+        }
+    }
+    /* check if the manufacturer is MXIC then the WP is 4bits */
+    else if (pFlinfo->manufacturerId == MV_MXIC_MANF_ID)
+    {
+        switch ((reg & MV_MX25L_STATUS_REG_WP_MASK))
+        {
+            case MV_MX25L_STATUS_BP_NONE:
+                *pWpRegion = MV_WP_NONE;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_128:
+                *pWpRegion = MV_WP_UPR_1OF128;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_64:
+                *pWpRegion = MV_WP_UPR_1OF64;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_32:
+                *pWpRegion = MV_WP_UPR_1OF32;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_16:
+                *pWpRegion = MV_WP_UPR_1OF16;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_8:
+                *pWpRegion = MV_WP_UPR_1OF8;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_4:
+                *pWpRegion = MV_WP_UPR_1OF4;
+                break;
+
+            case MV_MX25L_STATUS_BP_1_OF_2:
+                *pWpRegion = MV_WP_UPR_1OF2;
+                break;
+
+            case MV_MX25L_STATUS_BP_ALL:
+                *pWpRegion = MV_WP_ALL;
+                break;
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+                return MV_BAD_VALUE;
+        }
+    }
+    /* Check if the chip is an SPANSION flash; then WP supports only 3 bits */
+    else if (pFlinfo->manufacturerId == MV_SPANSION_MANF_ID)
+    {
+        switch ((reg & MV_S25FL_STATUS_REG_WP_MASK))
+        {
+            case MV_S25FL_STATUS_BP_NONE:
+                *pWpRegion = MV_WP_NONE;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_64:
+                *pWpRegion = MV_WP_UPR_1OF64;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_32:
+                *pWpRegion = MV_WP_UPR_1OF32;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_16:
+                *pWpRegion = MV_WP_UPR_1OF16;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_8:
+                *pWpRegion = MV_WP_UPR_1OF8;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_4:
+                *pWpRegion = MV_WP_UPR_1OF4;
+                break;
+
+            case MV_S25FL_STATUS_BP_1_OF_2:
+                *pWpRegion = MV_WP_UPR_1OF2;
+                break;
+
+            case MV_S25FL_STATUS_BP_ALL:
+                *pWpRegion = MV_WP_ALL;
+                break;
+
+            default:
+                DB(mvOsPrintf("%s WARNING: Unidentified WP region in h/w!\n", __FUNCTION__);)
+                return MV_BAD_VALUE;
+        }
+    }
+    else
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter Manufacturer ID!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSFlashStatRegLock - Lock the status register for writing - W/Vpp
+*		pin should be low to take effect
+*
+* DESCRIPTION:
+*       Lock the access to the Status Register for writing. This will
+*		cause the flash to enter the hardware protection mode if the W/Vpp
+*		is low. If the W/Vpp is hi, the chip will be in soft protection mode, but
+*		the register will continue to be writable if WREN sequence was used.
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*		srLock: enable/disable (MV_TRUE/MV_FALSE) status registor lock mechanism
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashStatRegLock (MV_SFLASH_INFO * pFlinfo, MV_BOOL srLock)
+{
+    MV_STATUS ret;
+	MV_U8 reg;
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    if ((ret = mvStatusRegGet(pFlinfo, &reg)) != MV_OK)
+        return ret;
+
+	if (srLock)
+		reg |= MV_SFLASH_STATUS_REG_SRWD_MASK;
+	else
+		reg &= ~MV_SFLASH_STATUS_REG_SRWD_MASK;
+
+	return mvStatusRegSet(pFlinfo, reg);
+}
+
+/*******************************************************************************
+* mvSFlashSizeGet - Get the size of the SPI flash
+*
+* DESCRIPTION:
+*       based on the sector number and size of each sector calculate the total
+*       size of the flash memory.
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_U32 mvSFlashSizeGet (MV_SFLASH_INFO * pFlinfo)
+{
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return 0;
+    }
+
+    return (pFlinfo->sectorSize * pFlinfo->sectorNumber);
+}
+
+/*******************************************************************************
+* mvSFlashPowerSaveEnter - Cause the falsh device to go into power save mode
+*
+* DESCRIPTION:
+*       Enter a special power save mode.
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashPowerSaveEnter(MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_DP_CMND_LENGTH];
+
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return 0;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* check that power save mode is supported in the specific device */
+    if (sflash[pFlinfo->index].opcdPwrSave == MV_SFLASH_NO_SPECIFIC_OPCD)
+    {
+        DB(mvOsPrintf("%s WARNING: Power save not supported for this device!\n", __FUNCTION__);)
+        return MV_NOT_SUPPORTED;
+    }
+
+    cmd[0] = sflash[pFlinfo->index].opcdPwrSave;
+
+    if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_DP_CMND_LENGTH, NULL, 0)) != MV_OK)
+		return ret;
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvSFlashPowerSaveExit - Cause the falsh device to exit the power save mode
+*
+* DESCRIPTION:
+*       Exit the deep power save mode.
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Size of the flash in bytes.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSFlashPowerSaveExit (MV_SFLASH_INFO * pFlinfo)
+{
+    MV_STATUS ret;
+	MV_U8 cmd[MV_SFLASH_RES_CMND_LENGTH];
+
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return 0;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return MV_BAD_PARAM;
+    }
+
+    /* check that power save mode is supported in the specific device */
+    if (sflash[pFlinfo->index].opcdRES == MV_SFLASH_NO_SPECIFIC_OPCD)
+    {
+        DB(mvOsPrintf("%s WARNING: Read Electronic Signature not supported for this device!\n", __FUNCTION__);)
+        return MV_NOT_SUPPORTED;
+    }
+
+    cmd[0] = sflash[pFlinfo->index].opcdRES;
+
+    if ((ret = mvSpiWriteThenWrite(cmd, MV_SFLASH_RES_CMND_LENGTH, NULL, 0)) != MV_OK)
+		return ret;
+
+    /* add the delay needed for the device to wake up */
+    mvOsDelay(MV_MXIC_DP_EXIT_DELAY);   /* 30 ms */
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvSFlashModelGet - Retreive the string with the device manufacturer and model
+*
+* DESCRIPTION:
+*       Retreive the string with the device manufacturer and model
+*
+* INPUT:
+*       pFlinfo: pointer to the Flash information structure
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       pointer to the string indicating the device manufacturer and model
+*
+*
+*******************************************************************************/
+const MV_8 * mvSFlashModelGet (MV_SFLASH_INFO * pFlinfo)
+{
+    static const MV_8 * unknModel = (const MV_8 *)"Unknown";
+
+    /* check for NULL pointer */
+    if (pFlinfo == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return 0;
+    }
+
+    /* Protection - check if the model was detected */
+    if (pFlinfo->index >= MV_ARRAY_SIZE(sflash))
+    {
+        DB(mvOsPrintf("%s WARNING: Invaild parameter index!\n", __FUNCTION__);)
+        return unknModel;
+    }
+
+    return sflash[pFlinfo->index].deviceModel;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h
new file mode 100644
index 000000000000..f441a5cf52b1
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlash.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSFlashH
+#define __INCmvSFlashH
+
+#include "mvTypes.h"
+
+/* MCAROS */
+#define MV_SFLASH_PAGE_ALLIGN_MASK(pgSz)    (pgSz-1)
+#define MV_ARRAY_SIZE(a)                    ((sizeof(a)) / (sizeof(a[0])))
+
+/* Constants */
+#define MV_INVALID_DEVICE_NUMBER            0xFFFFFFFF
+/* 10 MHz is the minimum possible SPI frequency when tclk is set 200MHz*/
+#define MV_SFLASH_BASIC_SPI_FREQ            10000000
+/* enumerations */
+typedef enum
+{
+	MV_WP_NONE,             /* Unprotect the whole chip */
+    MV_WP_UPR_1OF128,       /* Write protect the upper 1/128 part */
+    MV_WP_UPR_1OF64,        /* Write protect the upper 1/64 part */
+	MV_WP_UPR_1OF32,        /* Write protect the upper 1/32 part */
+	MV_WP_UPR_1OF16,        /* Write protect the upper 1/16 part */
+	MV_WP_UPR_1OF8,         /* Write protect the upper 1/8 part */
+	MV_WP_UPR_1OF4,         /* Write protect the upper 1/4 part */
+	MV_WP_UPR_1OF2,         /* Write protect the upper 1/2 part */
+	MV_WP_ALL               /* Write protect the whole chip */
+} MV_SFLASH_WP_REGION;
+
+/* Type Definitions */
+typedef struct
+{
+    MV_U8   opcdWREN;       /* Write enable opcode */
+    MV_U8   opcdWRDI;       /* Write disable opcode */
+    MV_U8   opcdRDID;       /* Read ID opcode */
+    MV_U8   opcdRDSR;       /* Read Status Register opcode */
+    MV_U8   opcdWRSR;       /* Write Status register opcode */
+    MV_U8   opcdREAD;       /* Read opcode */
+    MV_U8   opcdFSTRD;      /* Fast Read opcode */
+    MV_U8   opcdPP;         /* Page program opcode */
+    MV_U8   opcdSE;         /* Sector erase opcode */
+    MV_U8   opcdBE;         /* Bulk erase opcode */
+    MV_U8   opcdRES;        /* Read electronic signature */
+    MV_U8   opcdPwrSave;    /* Go into power save mode */
+    MV_U32  sectorSize;     /* Size of each sector */
+    MV_U32  sectorNumber;   /* Number of sectors */
+    MV_U32  pageSize;       /* size of each page */
+    const char * deviceModel;    /* string with the device model */
+    MV_U32  manufacturerId; /* The manufacturer ID */
+    MV_U32  deviceId;       /* Device ID */
+    MV_U32  spiMaxFreq;     /* The MAX frequency that can be used with the device */
+    MV_U32  spiMaxFastFreq; /* The MAX frequency that can be used with the device for fast reads */
+    MV_U32  spiFastRdDummyBytes; /* Number of dumy bytes to read before real data when working in fast read mode. */
+} MV_SFLASH_DEVICE_PARAMS;
+
+typedef struct
+{
+    MV_U32					baseAddr;       /* Flash Base Address used in fast mode */
+	MV_U8	                manufacturerId;	/* Manufacturer ID */
+    MV_U16	                deviceId;	    /* Device ID */
+    MV_U32                  sectorSize;     /* Size of each sector - all the same */
+    MV_U32                  sectorNumber;   /* Number of sectors */
+    MV_U32                  pageSize;       /* Page size - affect allignment */
+    MV_U32                  index;          /* index of the device in the sflash table (internal parameter) */
+} MV_SFLASH_INFO;
+
+/* Function Prototypes */
+/* Init */
+MV_STATUS	mvSFlashInit		(MV_SFLASH_INFO * pFlinfo);
+
+/* erase */
+MV_STATUS 	mvSFlashSectorErase (MV_SFLASH_INFO * pFlinfo, MV_U32 secNumber);
+MV_STATUS 	mvSFlashChipErase   (MV_SFLASH_INFO * pFlinfo);
+
+/* Read */
+MV_STATUS	mvSFlashBlockRd  	(MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+							     MV_U8* pReadBuff, MV_U32 buffSize);
+MV_STATUS mvSFlashFastBlockRd (MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+							     MV_U8* pReadBuff, MV_U32 buffSize);
+
+/* write regardless of the page boundaries and size limit per Page program command */
+MV_STATUS	mvSFlashBlockWr		(MV_SFLASH_INFO * pFlinfo, MV_U32 offset,
+							     MV_U8* pWriteBuff, MV_U32 buffSize);
+/* Get IDs */
+MV_STATUS 	mvSFlashIdGet      	(MV_SFLASH_INFO * pFlinfo, MV_U8* pManId, MV_U16* pDevId);
+
+/* Set and Get the Write Protection region - if the Status register is not locked */
+MV_STATUS   mvSFlashWpRegionSet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION wpRegion);
+MV_STATUS   mvSFlashWpRegionGet (MV_SFLASH_INFO * pFlinfo, MV_SFLASH_WP_REGION * pWpRegion);
+
+/* Lock the status register for writing - W/Vpp pin should be low to take effect */
+MV_STATUS   mvSFlashStatRegLock (MV_SFLASH_INFO * pFlinfo, MV_BOOL srLock);
+
+/* Get the regions sizes */
+MV_U32      mvSFlashSizeGet     (MV_SFLASH_INFO * pFlinfo);
+
+/* Cause the falsh device to go into power save mode */
+MV_STATUS   mvSFlashPowerSaveEnter(MV_SFLASH_INFO * pFlinfo);
+MV_STATUS   mvSFlashPowerSaveExit (MV_SFLASH_INFO * pFlinfo);
+
+/* Retreive the string with the device manufacturer and model */
+const MV_8 *     mvSFlashModelGet    (MV_SFLASH_INFO * pFlinfo);
+
+#endif /* __INCmvSFlashH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h
new file mode 100644
index 000000000000..6a7fd1376f88
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/sflash/mvSFlashSpec.h
@@ -0,0 +1,232 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSFlashSpecH
+#define __INCmvSFlashSpecH
+
+/* Constants */
+#define		MV_SFLASH_READ_CMND_LENGTH		    4		/* 1B opcode + 3B address */
+#define		MV_SFLASH_SE_CMND_LENGTH		    4		/* 1B opcode + 3B address */
+#define		MV_SFLASH_BE_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_PP_CMND_LENGTH		    4		/* 1B opcode + 3B address */
+#define		MV_SFLASH_WREN_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_WRDI_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_RDID_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_RDID_REPLY_LENGTH		    3		/* 1B manf ID and 2B device ID */
+#define		MV_SFLASH_RDSR_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_RDSR_REPLY_LENGTH		    1		/* 1B status */
+#define		MV_SFLASH_WRSR_CMND_LENGTH		    2		/* 1B opcode + 1B status value */
+#define		MV_SFLASH_DP_CMND_LENGTH		    1		/* 1B opcode */
+#define		MV_SFLASH_RES_CMND_LENGTH		    1		/* 1B opcode */
+
+/* Status Register Bit Masks */
+#define		MV_SFLASH_STATUS_REG_WIP_OFFSET	    0	    /* bit 0; write in progress */
+#define		MV_SFLASH_STATUS_REG_WP_OFFSET	    2       /* bit 2-4; write protect option */
+#define		MV_SFLASH_STATUS_REG_SRWD_OFFSET	7	    /* bit 7; lock status register write */
+#define		MV_SFLASH_STATUS_REG_WIP_MASK	    (0x1 << MV_SFLASH_STATUS_REG_WIP_OFFSET)
+#define		MV_SFLASH_STATUS_REG_SRWD_MASK	    (0x1 << MV_SFLASH_STATUS_REG_SRWD_OFFSET)
+
+#define		MV_SFLASH_MAX_WAIT_LOOP			    1000000
+#define     MV_SFLASH_CHIP_ERASE_MAX_WAIT_LOOP  0x50000000
+
+#define		MV_SFLASH_DEFAULT_RDID_OPCD		    0x9F	/* Default Read ID */
+#define     MV_SFLASH_DEFAULT_WREN_OPCD         0x06	/* Default Write Enable */
+#define     MV_SFLASH_NO_SPECIFIC_OPCD          0x00
+
+/********************************/
+/*  ST M25Pxxx Device Specific  */
+/********************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define     MV_M25PXXX_ST_MANF_ID               0x20
+#define     MV_M25P32_DEVICE_ID                 0x2016
+#define     MV_M25P32_MAX_SPI_FREQ              20000000    /* 20MHz */
+#define     MV_M25P32_MAX_FAST_SPI_FREQ         50000000    /* 50MHz */
+#define     MV_M25P32_FAST_READ_DUMMY_BYTES     1
+#define     MV_M25P64_DEVICE_ID                 0x2017
+#define     MV_M25P64_MAX_SPI_FREQ              20000000    /* 20MHz */
+#define     MV_M25P64_MAX_FAST_SPI_FREQ         50000000    /* 50MHz */
+#define     MV_M25P64_FAST_READ_DUMMY_BYTES     1
+#define     MV_M25P128_DEVICE_ID                0x2018
+#define     MV_M25P128_MAX_SPI_FREQ             20000000    /* 20MHz */
+#define     MV_M25P128_MAX_FAST_SPI_FREQ        50000000    /* 50MHz */
+#define     MV_M25P128_FAST_READ_DUMMY_BYTES    1
+
+
+/* Sector Sizes and population per device model*/
+#define     MV_M25P32_SECTOR_SIZE               0x10000 /* 64K */
+#define     MV_M25P64_SECTOR_SIZE               0x10000 /* 64K */
+#define     MV_M25P128_SECTOR_SIZE              0x40000 /* 256K */
+#define     MV_M25P32_SECTOR_NUMBER             64
+#define     MV_M25P64_SECTOR_NUMBER             128
+#define     MV_M25P128_SECTOR_NUMBER            64
+#define		MV_M25P_PAGE_SIZE				    0x100   /* 256 byte */
+
+#define		MV_M25P_WREN_CMND_OPCD			    0x06	/* Write Enable */
+#define		MV_M25P_WRDI_CMND_OPCD			    0x04	/* Write Disable */
+#define		MV_M25P_RDID_CMND_OPCD			    0x9F	/* Read ID */
+#define		MV_M25P_RDSR_CMND_OPCD			    0x05	/* Read Status Register */
+#define		MV_M25P_WRSR_CMND_OPCD			    0x01	/* Write Status Register */
+#define		MV_M25P_READ_CMND_OPCD			    0x03	/* Sequential Read */
+#define		MV_M25P_FAST_RD_CMND_OPCD		    0x0B	/* Fast Read */
+#define		MV_M25P_PP_CMND_OPCD			    0x02	/* Page Program */
+#define		MV_M25P_SE_CMND_OPCD			    0xD8	/* Sector Erase */
+#define		MV_M25P_BE_CMND_OPCD			    0xC7	/* Bulk Erase */
+#define		MV_M25P_RES_CMND_OPCD			    0xAB	/* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 3bits */
+#define		MV_M25P_STATUS_REG_WP_MASK	        (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_NONE              (0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_64           (0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_32           (0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_16           (0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_8            (0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_4            (0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_1_OF_2            (0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_M25P_STATUS_BP_ALL               (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+/************************************/
+/*  MXIC MX25L6405 Device Specific  */
+/************************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define     MV_MXIC_MANF_ID                     0xC2
+#define     MV_MX25L6405_DEVICE_ID              0x2017
+#define     MV_MX25L6405_MAX_SPI_FREQ           20000000    /* 20MHz */
+#define     MV_MX25L6405_MAX_FAST_SPI_FREQ      50000000    /* 50MHz */
+#define     MV_MX25L6405_FAST_READ_DUMMY_BYTES  1
+#define     MV_MXIC_DP_EXIT_DELAY               30          /* 30 ms */
+
+/* Sector Sizes and population per device model*/
+#define     MV_MX25L6405_SECTOR_SIZE            0x10000 /* 64K */
+#define     MV_MX25L6405_SECTOR_NUMBER          128
+#define		MV_MXIC_PAGE_SIZE			        0x100   /* 256 byte */
+
+#define		MV_MX25L_WREN_CMND_OPCD			    0x06	/* Write Enable */
+#define		MV_MX25L_WRDI_CMND_OPCD			    0x04	/* Write Disable */
+#define		MV_MX25L_RDID_CMND_OPCD			    0x9F	/* Read ID */
+#define		MV_MX25L_RDSR_CMND_OPCD			    0x05	/* Read Status Register */
+#define		MV_MX25L_WRSR_CMND_OPCD			    0x01	/* Write Status Register */
+#define		MV_MX25L_READ_CMND_OPCD			    0x03	/* Sequential Read */
+#define		MV_MX25L_FAST_RD_CMND_OPCD		    0x0B	/* Fast Read */
+#define		MV_MX25L_PP_CMND_OPCD			    0x02	/* Page Program */
+#define		MV_MX25L_SE_CMND_OPCD			    0xD8	/* Sector Erase */
+#define		MV_MX25L_BE_CMND_OPCD			    0xC7	/* Bulk Erase */
+#define     MV_MX25L_DP_CMND_OPCD               0xB9    /* Deep Power Down */
+#define		MV_MX25L_RES_CMND_OPCD			    0xAB	/* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 4bits */
+#define		MV_MX25L_STATUS_REG_WP_MASK	        (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_NONE             (0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_128         (0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_64          (0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_32          (0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_16          (0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_8           (0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_4           (0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_1_OF_2           (0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     MV_MX25L_STATUS_BP_ALL              (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+/************************************/
+/*  SPANSION S25FL128P Device Specific  */
+/************************************/
+
+/* Manufacturer IDs and Device IDs for SFLASHs supported by the driver */
+#define     MV_SPANSION_MANF_ID                     	0x01
+#define     MV_S25FL128_DEVICE_ID              		0x2018
+#define     MV_S25FL128_MAX_SPI_FREQ           		33000000    /* 33MHz */
+#define     MV_S25FL128_MAX_FAST_SPI_FREQ        	104000000    /* 104MHz */
+#define     MV_S25FL128_FAST_READ_DUMMY_BYTES    	1
+
+/* Sector Sizes and population per device model*/
+#define     MV_S25FL128_SECTOR_SIZE            			0x40000 /* 256K */
+#define     MV_S25FL128_SECTOR_NUMBER          			64
+#define	    MV_S25FL_PAGE_SIZE			        	0x100   /* 256 byte */
+
+#define		MV_S25FL_WREN_CMND_OPCD			    0x06	/* Write Enable */
+#define		MV_S25FL_WRDI_CMND_OPCD			    0x04	/* Write Disable */
+#define		MV_S25FL_RDID_CMND_OPCD			    0x9F	/* Read ID */
+#define		MV_S25FL_RDSR_CMND_OPCD			    0x05	/* Read Status Register */
+#define		MV_S25FL_WRSR_CMND_OPCD			    0x01	/* Write Status Register */
+#define		MV_S25FL_READ_CMND_OPCD			    0x03	/* Sequential Read */
+#define		MV_S25FL_FAST_RD_CMND_OPCD		    0x0B	/* Fast Read */
+#define		MV_S25FL_PP_CMND_OPCD			    0x02	/* Page Program */
+#define		MV_S25FL_SE_CMND_OPCD			    0xD8	/* Sector Erase */
+#define		MV_S25FL_BE_CMND_OPCD			    0xC7	/* Bulk Erase */
+#define     	MV_S25FL_DP_CMND_OPCD               	    0xB9    	/* Deep Power Down */
+#define		MV_S25FL_RES_CMND_OPCD			    0xAB	/* Read Electronic Signature */
+
+/* Status Register Write Protect Bit Masks - 4bits */
+#define		MV_S25FL_STATUS_REG_WP_MASK	        (0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_NONE             	(0x00 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_128         	(0x01 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_64          	(0x02 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_32          	(0x03 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_16          	(0x04 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_8           	(0x05 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_4           	(0x06 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_1_OF_2           	(0x07 << MV_SFLASH_STATUS_REG_WP_OFFSET)
+#define     	MV_S25FL_STATUS_BP_ALL              	(0x0F << MV_SFLASH_STATUS_REG_WP_OFFSET)
+
+#endif /* __INCmvSFlashSpecH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c
new file mode 100644
index 000000000000..55fecc136bb0
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.c
@@ -0,0 +1,574 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "spi/mvSpi.h"
+#include "spi/mvSpiSpec.h"
+
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+/* #define MV_DEBUG */
+#ifdef MV_DEBUG
+#define DB(x) x
+#define mvOsPrintf printf
+#else
+#define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvSpi16bitDataTxRx - Transmt and receive data
+*
+* DESCRIPTION:
+*       Tx data and block waiting for data to be transmitted
+*
+********************************************************************************/
+static MV_STATUS mvSpi16bitDataTxRx (MV_U16 txData, MV_U16 * pRxData)
+{
+    MV_U32 i;
+    MV_BOOL ready = MV_FALSE;
+
+    /* First clear the bit in the interrupt cause register */
+    MV_REG_WRITE(MV_SPI_INT_CAUSE_REG, 0x0);
+
+    /* Transmit data */
+    MV_REG_WRITE(MV_SPI_DATA_OUT_REG, MV_16BIT_LE(txData));
+
+    /* wait with timeout for memory ready */
+    for (i=0; i<MV_SPI_WAIT_RDY_MAX_LOOP; i++)
+    {
+        if (MV_REG_READ(MV_SPI_INT_CAUSE_REG))
+        {
+            ready = MV_TRUE;
+            break;
+        }
+#ifdef MV_SPI_SLEEP_ON_WAIT
+        mvOsSleep(1);
+#endif /* MV_SPI_SLEEP_ON_WAIT */
+    }
+
+    if (!ready)
+        return MV_TIMEOUT;
+
+    /* check that the RX data is needed */
+    if (pRxData)
+    {
+	if ((MV_U32)pRxData &  0x1) /* check if address is not alligned to 16bit */
+	{
+#if defined(MV_CPU_LE)
+		/* perform the data write to the buffer in two stages with 8bit each */
+		MV_U8 * bptr = (MV_U8*)pRxData;
+		MV_U16 data = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+		*bptr = (data & 0xFF);
+		++bptr;
+		*bptr = ((data >> 8) & 0xFF);
+
+#elif defined(MV_CPU_BE)
+
+		/* perform the data write to the buffer in two stages with 8bit each */
+		MV_U8 * bptr = (MV_U8 *)pRxData;
+		MV_U16 data = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+		*bptr = ((data >> 8) & 0xFF);
+		++bptr;
+		*bptr = (data & 0xFF);
+
+#else
+    #error "CPU endianess isn't defined!\n"
+#endif
+
+	}
+	else
+		*pRxData = MV_16BIT_LE(MV_REG_READ(MV_SPI_DATA_IN_REG));
+    }
+
+    return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvSpi8bitDataTxRx - Transmt and receive data (8bits)
+*
+* DESCRIPTION:
+*       Tx data and block waiting for data to be transmitted
+*
+********************************************************************************/
+static MV_STATUS mvSpi8bitDataTxRx (MV_U8 txData, MV_U8 * pRxData)
+{
+    MV_U32 i;
+    MV_BOOL ready = MV_FALSE;
+
+    /* First clear the bit in the interrupt cause register */
+    MV_REG_WRITE(MV_SPI_INT_CAUSE_REG, 0x0);
+
+    /* Transmit data */
+    MV_REG_WRITE(MV_SPI_DATA_OUT_REG, txData);
+
+    /* wait with timeout for memory ready */
+    for (i=0; i<MV_SPI_WAIT_RDY_MAX_LOOP; i++)
+    {
+        if (MV_REG_READ(MV_SPI_INT_CAUSE_REG))
+        {
+            ready = MV_TRUE;
+            break;
+        }
+#ifdef MV_SPI_SLEEP_ON_WAIT
+        mvOsSleep(1);
+#endif /* MV_SPI_SLEEP_ON_WAIT */
+    }
+
+    if (!ready)
+        return MV_TIMEOUT;
+
+    /* check that the RX data is needed */
+    if (pRxData)
+	*pRxData = MV_REG_READ(MV_SPI_DATA_IN_REG);
+
+    return MV_OK;
+}
+
+/*
+#####################################################################################
+#####################################################################################
+*/
+
+/*******************************************************************************
+* mvSpiInit - Initialize the SPI controller
+*
+* DESCRIPTION:
+*       Perform the neccessary initialization in order to be able to send an
+*		receive over the SPI interface.
+*
+* INPUT:
+*       serialBaudRate: Baud rate (SPI clock frequency)
+*		use16BitMode: Whether to use 2bytes (MV_TRUE) or 1bytes (MV_FALSE)
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiInit	(MV_U32 serialBaudRate)
+{
+    MV_STATUS ret;
+
+    /* Set the serial clock */
+    if ((ret = mvSpiBaudRateSet(serialBaudRate)) != MV_OK)
+        return ret;
+
+    /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+    mvMPPConfigToSPI();
+
+	/* Configure the default SPI mode to be 16bit */
+	MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* Fix ac timing on SPI in 6183, 6183L and 78x00 only */
+	if ( (mvCtrlModelGet() == MV_6183_DEV_ID) ||
+		 (mvCtrlModelGet() == MV_6183L_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78100_DEV_ID) ||
+		(mvCtrlModelGet() == MV_78200_DEV_ID) ||
+		(mvCtrlModelGet() == MV_76100_DEV_ID))
+	    MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, BIT14);
+
+    /* Verify that the CS is deasserted */
+    mvSpiCsDeassert();
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiBaudRateSet - Set the Frequency of the SPI clock
+*
+* DESCRIPTION:
+*       Set the Prescale bits to adapt to the requested baud rate (the clock
+*       used for thr SPI).
+*
+* INPUT:
+*       serialBaudRate: Baud rate (SPI clock frequency)
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiBaudRateSet (MV_U32 serialBaudRate)
+{
+    MV_U8 i;
+	/* MV_U8 preScale[32] = {1, 1, 2, 3, 4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15,
+						  2, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30};
+	*/
+	MV_U8 preScale[14] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30};
+	MV_U8 bestPrescaleIndx = 100;
+	MV_U32 minBaudOffset = 0xFFFFFFFF;
+	MV_U32 cpuClk = mvBoardTclkGet(); /*mvCpuPclkGet();*/
+	MV_U32 tempReg;
+
+	/* Find the best prescale configuration - less or equal */
+	for (i=0; i<14; i++)
+	{
+		/* check for higher - irrelevent */
+		if ((cpuClk / preScale[i]) > serialBaudRate)
+			continue;
+
+		/* check for exact fit */
+		if ((cpuClk / preScale[i]) == serialBaudRate)
+		{
+			bestPrescaleIndx = i;
+			break;
+		}
+
+		/* check if this is better than the previous one */
+		if ((serialBaudRate - (cpuClk / preScale[i])) < minBaudOffset)
+		{
+			minBaudOffset = (serialBaudRate - (cpuClk / preScale[i]));
+			bestPrescaleIndx = i;
+		}
+	}
+
+	if (bestPrescaleIndx > 14)
+    {
+        mvOsPrintf("%s ERROR: SPI baud rate prescale error!\n", __FUNCTION__);
+		return MV_OUT_OF_RANGE;
+    }
+
+	/* configure the Prescale */
+	tempReg = MV_REG_READ(MV_SPI_IF_CONFIG_REG);
+	tempReg = ((tempReg & ~MV_SPI_CLK_PRESCALE_MASK) | (bestPrescaleIndx + 0x12));
+	MV_REG_WRITE(MV_SPI_IF_CONFIG_REG, tempReg);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiCsAssert - Assert the Chip Select pin indicating a new transfer
+*
+* DESCRIPTION:
+*       Assert The chip select - used to select an external SPI device
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+********************************************************************************/
+MV_VOID mvSpiCsAssert(MV_VOID)
+{
+    /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+    mvMPPConfigToSPI();
+    mvOsUDelay(1);
+    MV_REG_BIT_SET(MV_SPI_IF_CTRL_REG, MV_SPI_CS_ENABLE_MASK);
+}
+
+/*******************************************************************************
+* mvSpiCsDeassert - DeAssert the Chip Select pin indicating the end of a
+*				  SPI transfer sequence
+*
+* DESCRIPTION:
+*       DeAssert the chip select pin
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+********************************************************************************/
+MV_VOID mvSpiCsDeassert(MV_VOID)
+{
+	MV_REG_BIT_RESET(MV_SPI_IF_CTRL_REG, MV_SPI_CS_ENABLE_MASK);
+
+    /* For devices in which the SPI is muxed on the MPP with other interfaces*/
+    mvMPPConfigToDefault();
+}
+
+/*******************************************************************************
+* mvSpiRead - Read a buffer over the SPI interface
+*
+* DESCRIPTION:
+*       Receive (read) a buffer over the SPI interface in 16bit chunks. If the
+*		buffer size is odd, then the last chunk will be 8bits. Chip select is not
+*       handled at this level.
+*
+* INPUT:
+*		pRxBuff: Pointer to the buffer to hold the received data
+*		buffSize: length of the pRxBuff
+*
+* OUTPUT:
+*		pRxBuff: Pointer to the buffer with the received data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiRead	(MV_U8* pRxBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+	MV_U32 bytesLeft = buffSize;
+	MV_U16* rxPtr = (MV_U16*)pRxBuff;
+
+    /* check for null parameters */
+    if (pRxBuff == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check that the buffer pointer and the buffer size are 16bit aligned */
+    if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pRxBuff & 1) == 0))
+    {
+	/* Verify that the SPI mode is in 16bit mode */
+	MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX as long we have complete 16bit chunks */
+	while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+	{
+		/* Transmitted and wait for the transfer to be completed */
+		if ((ret = mvSpi16bitDataTxRx(MV_SPI_DUMMY_WRITE_16BITS, rxPtr)) != MV_OK)
+			return ret;
+
+		/* increment the pointers */
+		rxPtr++;
+		bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+	}
+
+    }
+    else
+    {
+	/* Verify that the SPI mode is in 8bit mode */
+	MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX in 8bit chanks */
+	while (bytesLeft > 0)
+	{
+		/* Transmitted and wait for the transfer to be completed */
+		if ((ret = mvSpi8bitDataTxRx(MV_SPI_DUMMY_WRITE_8BITS, pRxBuff)) != MV_OK)
+			return ret;
+		/* increment the pointers */
+		pRxBuff++;
+		bytesLeft--;
+	}
+    }
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSpiWrite - Transmit a buffer over the SPI interface
+*
+* DESCRIPTION:
+*       Transmit a buffer over the SPI interface in 16bit chunks. If the
+*		buffer size is odd, then the last chunk will be 8bits. No chip select
+*       action is taken.
+*
+* INPUT:
+*		pTxBuff: Pointer to the buffer holding the TX data
+*		buffSize: length of the pTxBuff
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiWrite(MV_U8* pTxBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+	MV_U32 bytesLeft = buffSize;
+	MV_U16* txPtr = (MV_U16*)pTxBuff;
+
+    /* check for null parameters */
+    if (pTxBuff == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check that the buffer pointer and the buffer size are 16bit aligned */
+    if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pTxBuff & 1) == 0))
+    {
+	/* Verify that the SPI mode is in 16bit mode */
+	MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX as long we have complete 16bit chunks */
+	while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+	{
+        /* Transmitted and wait for the transfer to be completed */
+		if ((ret = mvSpi16bitDataTxRx(*txPtr, NULL)) != MV_OK)
+			return ret;
+
+		/* increment the pointers */
+		txPtr++;
+		bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+	}
+    }
+    else
+    {
+
+	/* Verify that the SPI mode is in 8bit mode */
+	MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX in 8bit chanks */
+	while (bytesLeft > 0)
+	{
+		/* Transmitted and wait for the transfer to be completed */
+		if ((ret = mvSpi8bitDataTxRx(*pTxBuff, NULL)) != MV_OK)
+			return ret;
+
+		/* increment the pointers */
+		pTxBuff++;
+		bytesLeft--;
+	}
+    }
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvSpiReadWrite - Read and Write a buffer simultanuosely
+*
+* DESCRIPTION:
+*       Transmit and receive a buffer over the SPI in 16bit chunks. If the
+*		buffer size is odd, then the last chunk will be 8bits. The SPI chip
+*       select is not handled implicitely.
+*
+* INPUT:
+*       pRxBuff: Pointer to the buffer to write the RX info in
+*		pTxBuff: Pointer to the buffer holding the TX info
+*		buffSize: length of both the pTxBuff and pRxBuff
+*
+* OUTPUT:
+*       pRxBuff: Pointer of the buffer holding the RX data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiReadWrite(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+    MV_U32 bytesLeft = buffSize;
+    MV_U16* txPtr = (MV_U16*)pTxBuff;
+    MV_U16* rxPtr = (MV_U16*)pRxBuff;
+
+    /* check for null parameters */
+    if ((pRxBuff == NULL) || (pTxBuff == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+    /* Check that the buffer pointer and the buffer size are 16bit aligned */
+    if ((((MV_U32)buffSize & 1) == 0) && (((MV_U32)pTxBuff & 1) == 0) && (((MV_U32)pRxBuff & 1) == 0))
+    {
+	/* Verify that the SPI mode is in 16bit mode */
+	MV_REG_BIT_SET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX as long we have complete 16bit chunks */
+	while (bytesLeft >= MV_SPI_16_BIT_CHUNK_SIZE)
+	{
+        /* Transmitted and wait for the transfer to be completed */
+		if ((ret = mvSpi16bitDataTxRx(*txPtr, rxPtr)) != MV_OK)
+			return ret;
+
+		/* increment the pointers */
+		txPtr++;
+		rxPtr++;
+		bytesLeft -= MV_SPI_16_BIT_CHUNK_SIZE;
+	}
+    }
+    else
+    {
+	/* Verify that the SPI mode is in 8bit mode */
+	MV_REG_BIT_RESET(MV_SPI_IF_CONFIG_REG, MV_SPI_BYTE_LENGTH_MASK);
+
+	/* TX/RX in 8bit chanks */
+	while (bytesLeft > 0)
+	{
+		/* Transmitted and wait for the transfer to be completed */
+		if ( (ret = mvSpi8bitDataTxRx(*pTxBuff, pRxBuff) ) != MV_OK)
+			return ret;
+		pRxBuff++;
+		pTxBuff++;
+		bytesLeft--;
+	}
+    }
+
+	return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h
new file mode 100644
index 000000000000..5991e38002c5
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpi.h
@@ -0,0 +1,94 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpihH
+#define __INCmvSpihH
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+
+/* Function Prototypes */
+/* Init */
+MV_STATUS   mvSpiInit		(MV_U32 serialBaudRate);
+
+/* Set the Frequency of the Spi clock */
+MV_STATUS   mvSpiBaudRateSet(MV_U32 serialBaudRate);
+
+/* Assert the SPI chip select */
+MV_VOID     mvSpiCsAssert   (MV_VOID);
+
+/* De-assert the SPI chip select */
+MV_VOID     mvSpiCsDeassert (MV_VOID);
+
+/* Simultanuous Read and write */
+MV_STATUS	mvSpiReadWrite	(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* serialize a buffer on the TX line - Rx is ignored */
+MV_STATUS	mvSpiWrite  	(MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* read from the RX line by writing dummy values to the TX line */
+MV_STATUS	mvSpiRead   	(MV_U8* pRxBuff, MV_U32 buffSize);
+
+#endif /* __INCmvSpihH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c
new file mode 100644
index 000000000000..87eea12f0042
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.c
@@ -0,0 +1,248 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "spi/mvSpi.h"
+#include "spi/mvSpiSpec.h"
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+
+/*******************************************************************************
+* mvSpiReadAndWrite - Read and Write a buffer simultanuousely
+*
+* DESCRIPTION:
+*       Transmit and receive a buffer over the SPI in 16bit chunks. If the
+*		buffer size is odd, then the last chunk will be 8bits.
+*
+* INPUT:
+*       pRxBuff: Pointer to the buffer to write the RX info in
+*		pTxBuff: Pointer to the buffer holding the TX info
+*		buffSize: length of both the pTxBuff and pRxBuff
+*
+* OUTPUT:
+*       pRxBuff: Pointer of the buffer holding the RX data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiReadAndWrite(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize)
+{
+    MV_STATUS ret;
+
+    /* check for null parameters */
+    if ((pRxBuff == NULL) || (pTxBuff == NULL) || (buffSize == 0))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+	/* First assert the chip select */
+	mvSpiCsAssert();
+
+    ret = mvSpiReadWrite(pRxBuff, pTxBuff, buffSize);
+
+	/* Finally deassert the chip select */
+	mvSpiCsDeassert();
+
+	return ret;
+}
+
+/*******************************************************************************
+* mvSpiWriteThenWrite - Serialize a command followed by the data over the TX line
+*
+* DESCRIPTION:
+*       Assert the chip select line. Transmit the command buffer followed by
+*       the data buffer. Then deassert the CS line.
+*
+* INPUT:
+*       pCmndBuff: Pointer to the command buffer to transmit
+*       cmndSize: length of the command size
+*		pTxDataBuff: Pointer to the data buffer to transmit
+*		txDataSize: length of the data buffer
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS	mvSpiWriteThenWrite (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pTxDataBuff,
+                                 MV_U32 txDataSize)
+{
+    MV_STATUS ret = MV_OK, tempRet;
+
+    /* check for null parameters */
+#ifndef CONFIG_MARVELL
+    if(NULL == pTxDataBuff)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+#endif
+
+    if (pCmndBuff == NULL)
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+	/* First assert the chip select */
+	mvSpiCsAssert();
+
+    /* first write the command */
+    if ((cmndSize) && (pCmndBuff != NULL))
+    {
+        if ((tempRet = mvSpiWrite(pCmndBuff, cmndSize)) != MV_OK)
+            ret = tempRet;
+    }
+
+    /* Then write the data buffer */
+#ifndef CONFIG_MARVELL
+    if (txDataSize)
+#else
+    if ((txDataSize) && (pTxDataBuff != NULL))
+#endif
+    {
+        if ((tempRet = mvSpiWrite(pTxDataBuff, txDataSize)) != MV_OK)
+            ret = tempRet;
+    }
+
+	/* Finally deassert the chip select */
+	mvSpiCsDeassert();
+
+	return ret;
+}
+
+/*******************************************************************************
+* mvSpiWriteThenRead - Serialize a command then read a data buffer
+*
+* DESCRIPTION:
+*       Assert the chip select line. Transmit the command buffer then read
+*       the data buffer. Then deassert the CS line.
+*
+* INPUT:
+*       pCmndBuff: Pointer to the command buffer to transmit
+*       cmndSize: length of the command size
+*		pRxDataBuff: Pointer to the buffer to read the data in
+*		txDataSize: length of the data buffer
+*
+* OUTPUT:
+*		pRxDataBuff: Pointer to the buffer holding the data
+*
+* RETURN:
+*       Success or Error code.
+*
+*
+*******************************************************************************/
+MV_STATUS mvSpiWriteThenRead (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pRxDataBuff,
+                              MV_U32 rxDataSize,MV_U32 dummyBytesToRead)
+{
+    MV_STATUS ret = MV_OK, tempRet;
+    MV_U8   dummyByte;
+
+    /* check for null parameters */
+    if ((pCmndBuff == NULL) && (pRxDataBuff == NULL))
+    {
+        mvOsPrintf("%s ERROR: Null pointer parameter!\n", __FUNCTION__);
+        return MV_BAD_PARAM;
+    }
+
+	/* First assert the chip select */
+	mvSpiCsAssert();
+
+    /* first write the command */
+    if ((cmndSize) && (pCmndBuff != NULL))
+    {
+        if ((tempRet = mvSpiWrite(pCmndBuff, cmndSize)) != MV_OK)
+            ret = tempRet;
+    }
+
+    /* Read dummy bytes before real data.   */
+    while(dummyBytesToRead)
+    {
+        mvSpiRead(&dummyByte,1);
+        dummyBytesToRead--;
+    }
+
+    /* Then write the data buffer */
+    if ((rxDataSize) && (pRxDataBuff != NULL))
+    {
+        if ((tempRet = mvSpiRead(pRxDataBuff, rxDataSize)) != MV_OK)
+            ret = tempRet;
+    }
+
+	/* Finally deassert the chip select */
+	mvSpiCsDeassert();
+
+	return ret;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h
new file mode 100644
index 000000000000..329e26b7c032
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiCmnd.h
@@ -0,0 +1,82 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpiCmndhH
+#define __INCmvSpiCmndhH
+
+#include "mvTypes.h"
+
+/* Function Prototypes */
+
+/* Simultanuous Read and write */
+MV_STATUS	mvSpiReadAndWrite	(MV_U8* pRxBuff, MV_U8* pTxBuff, MV_U32 buffSize);
+
+/* write command - write a command and then write data */
+MV_STATUS	mvSpiWriteThenWrite (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pTxDataBuff, MV_U32 txDataSize);
+
+/* read command - write a command and then read data by writing dummy data */
+MV_STATUS mvSpiWriteThenRead (MV_U8* pCmndBuff, MV_U32 cmndSize, MV_U8* pRxDataBuff,
+                              MV_U32 rxDataSize,MV_U32 dummyBytesToRead);
+
+#endif /* __INCmvSpiCmndhH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h
new file mode 100644
index 000000000000..2879c672eb6f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/spi/mvSpiSpec.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvSpiSpecH
+#define __INCmvSpiSpecH
+
+/* Constants */
+#define		MV_SPI_WAIT_RDY_MAX_LOOP			100000
+#define		MV_SPI_16_BIT_CHUNK_SIZE			2
+#define		MV_SPI_DUMMY_WRITE_16BITS			0xFFFF
+#define		MV_SPI_DUMMY_WRITE_8BITS			0xFF
+
+/* Marvell Flash Device Controller Registers */
+#define		MV_SPI_CTRLR_OFST					0x10600
+#define		MV_SPI_IF_CTRL_REG					(MV_SPI_CTRLR_OFST + 0x00)
+#define		MV_SPI_IF_CONFIG_REG				(MV_SPI_CTRLR_OFST + 0x04)
+#define		MV_SPI_DATA_OUT_REG					(MV_SPI_CTRLR_OFST + 0x08)
+#define		MV_SPI_DATA_IN_REG					(MV_SPI_CTRLR_OFST + 0x0c)
+#define		MV_SPI_INT_CAUSE_REG				(MV_SPI_CTRLR_OFST + 0x10)
+#define		MV_SPI_INT_CAUSE_MASK_REG			(MV_SPI_CTRLR_OFST + 0x14)
+
+/* Serial Memory Interface Control Register Masks */
+#define		MV_SPI_CS_ENABLE_OFFSET				0		/* bit 0 */
+#define		MV_SPI_MEMORY_READY_OFFSET			1		/* bit 1 */
+#define		MV_SPI_CS_ENABLE_MASK				(0x1  << MV_SPI_CS_ENABLE_OFFSET)
+#define		MV_SPI_MEMORY_READY_MASK			(0x1  << MV_SPI_MEMORY_READY_OFFSET)
+
+/* Serial Memory Interface Configuration Register Masks */
+#define		MV_SPI_CLK_PRESCALE_OFFSET			0		/* bit 0-4 */
+#define		MV_SPI_BYTE_LENGTH_OFFSET			5		/* bit 5 */
+#define		MV_SPI_ADDRESS_BURST_LENGTH_OFFSET  8	    /* bit 8-9 */
+#define		MV_SPI_CLK_PRESCALE_MASK			(0x1F << MV_SPI_CLK_PRESCALE_OFFSET)
+#define		MV_SPI_BYTE_LENGTH_MASK				(0x1  << MV_SPI_BYTE_LENGTH_OFFSET)
+#define		MV_SPI_ADDRESS_BURST_LENGTH_MASK	(0x3  << MV_SPI_ADDRESS_BURST_LENGTH_OFFSET)
+
+#endif /* __INCmvSpiSpecH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c
new file mode 100644
index 000000000000..a603f734f6f6
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.c
@@ -0,0 +1,1023 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#include "mvTwsi.h"
+#include "mvTwsiSpec.h"
+#include "cpu/mvCpu.h"
+
+
+/*#define MV_DEBUG*/
+#ifdef MV_DEBUG
+#define DB(x) x
+#else
+#define DB(x)
+#endif
+
+static MV_VOID twsiIntFlgClr(MV_U8 chanNum);
+static MV_BOOL twsiMainIntGet(MV_U8 chanNum);
+static MV_VOID twsiAckBitSet(MV_U8 chanNum);
+static MV_U32 twsiStsGet(MV_U8 chanNum);
+static MV_VOID twsiReset(MV_U8 chanNum);
+static MV_STATUS twsiAddr7BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command);
+static MV_STATUS twsiAddr10BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command);
+static MV_STATUS twsiDataTransmit(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize);
+static MV_STATUS twsiDataReceive(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize);
+static MV_STATUS twsiTargetOffsSet(MV_U8 chanNum, MV_U32 offset,MV_BOOL moreThen256);
+
+
+static MV_BOOL twsiTimeoutChk(MV_U32 timeout, const MV_8 *pString)
+{
+	if(timeout >= TWSI_TIMEOUT_VALUE)
+	{
+		DB(mvOsPrintf("%s",pString));
+		return MV_TRUE;
+	}
+	return MV_FALSE;
+
+}
+/*******************************************************************************
+* mvTwsiStartBitSet - Set start bit on the bus
+*
+* DESCRIPTION:
+*       This routine sets the start bit on the TWSI bus.
+*       The routine first checks for interrupt flag condition, then it sets
+*       the start bit  in the TWSI Control register.
+*       If the interrupt flag condition check previously was set, the function
+*       will clear it.
+*       The function then wait for the start bit to be cleared by the HW.
+*       Then it waits for the interrupt flag to be set and eventually, the
+*       TWSI status is checked to be 0x8 or 0x10(repeated start bit).
+*
+* INPUT:
+*       chanNum - TWSI channel.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK is start bit was set successfuly on the bus.
+*       MV_FAIL if interrupt flag was set before setting start bit.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiStartBitSet(MV_U8 chanNum)
+{
+	MV_BOOL isIntFlag = MV_FALSE;
+	MV_U32 timeout, temp;
+
+	DB(mvOsPrintf("TWSI: mvTwsiStartBitSet \n"));
+	/* check Int flag */
+	if(twsiMainIntGet(chanNum))
+		isIntFlag = MV_TRUE;
+	/* set start Bit */
+	temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_START_BIT);
+
+	/* in case that the int flag was set before i.e. repeated start bit */
+	if(isIntFlag){
+		DB(mvOsPrintf("TWSI: mvTwsiStartBitSet repeated start Bit\n"));
+		twsiIntFlgClr(chanNum);
+	}
+
+	/* wait for interrupt */
+	timeout = 0;
+	while(!twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: mvTwsiStartBitSet ERROR - Start Clear bit TimeOut .\n"))
+		return MV_TIMEOUT;
+
+
+	/* check that start bit went down */
+	if((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_START_BIT) != 0)
+	{
+		mvOsPrintf("TWSI: mvTwsiStartBitSet ERROR - start bit didn't went down\n");
+		return MV_FAIL;
+	}
+
+	/* check the status */
+	temp = twsiStsGet(chanNum);
+	if(( temp != TWSI_START_CON_TRA ) && ( temp != TWSI_REPEATED_START_CON_TRA ))
+	  {
+		mvOsPrintf("TWSI: mvTwsiStartBitSet ERROR - status %x after Set Start Bit. \n",temp);
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvTwsiStopBitSet - Set stop bit on the bus
+*
+* DESCRIPTION:
+*       This routine set the stop bit on the TWSI bus.
+*       The function then wait for the stop bit to be cleared by the HW.
+*       Finally the function checks for status of 0xF8.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE is stop bit was set successfuly on the bus.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiStopBitSet(MV_U8 chanNum)
+{
+	MV_U32	timeout, temp;
+
+	/* Generate stop bit */
+	temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_STOP_BIT);
+
+	twsiIntFlgClr(chanNum);
+
+	/* wait for stop bit to come down */
+	timeout = 0;
+	while( ((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_STOP_BIT) != 0) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: mvTwsiStopBitSet ERROR - Stop bit TimeOut .\n"))
+		return MV_TIMEOUT;
+
+	/* check that the stop bit went down */
+	if((MV_REG_READ(TWSI_CONTROL_REG(chanNum)) & TWSI_CONTROL_STOP_BIT) != 0)
+	{
+		mvOsPrintf("TWSI: mvTwsiStopBitSet ERROR - stop bit didn't went down. \n");
+		return MV_FAIL;
+	}
+
+	/* check the status */
+	temp = twsiStsGet(chanNum);
+	if( temp != TWSI_NO_REL_STS_INT_FLAG_IS_KEPT_0){
+		mvOsPrintf("TWSI: mvTwsiStopBitSet ERROR - status %x after Stop Bit. \n", temp);
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* twsiMainIntGet - Get twsi bit from main Interrupt cause.
+*
+* DESCRIPTION:
+*       This routine returns the twsi interrupt flag value.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE is interrupt flag is set, MV_FALSE otherwise.
+*
+*******************************************************************************/
+static MV_BOOL twsiMainIntGet(MV_U8 chanNum)
+{
+	MV_U32 temp;
+
+	/* get the int flag bit */
+
+	temp = MV_REG_READ(TWSI_CPU_MAIN_INT_CAUSE_REG);
+	if (temp & (TWSI0_CPU_MAIN_INT_BIT << chanNum))
+	    return MV_TRUE;
+
+	return MV_FALSE;
+}
+/*******************************************************************************
+* twsiIntFlgClr - Clear Interrupt flag.
+*
+* DESCRIPTION:
+*       This routine clears the interrupt flag. It does NOT poll the interrupt
+*       to make sure the clear. After clearing the interrupt, it waits for at
+*       least 1 miliseconds.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+static MV_VOID twsiIntFlgClr(MV_U8 chanNum)
+{
+	MV_U32 temp;
+
+	/* wait for 1 mili to prevent TWSI register write after write problems */
+	mvOsDelay(1);
+	/* clear the int flag bit */
+	temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum),temp & ~(TWSI_CONTROL_INT_FLAG_SET));
+
+	/* wait for 1 mili sec for the clear to take effect */
+	mvOsDelay(1);
+
+	return;
+}
+
+
+/*******************************************************************************
+* twsiAckBitSet - Set acknowledge bit on the bus
+*
+* DESCRIPTION:
+*       This routine set the acknowledge bit on the TWSI bus.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+static MV_VOID twsiAckBitSet(MV_U8 chanNum)
+{
+	MV_U32 temp;
+
+	/*Set the Ack bit */
+	temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp | TWSI_CONTROL_ACK);
+
+	/* Add delay of 1ms */
+	mvOsDelay(1);
+	return;
+}
+
+
+/*******************************************************************************
+* twsiInit - Initialize TWSI interface
+*
+* DESCRIPTION:
+*       This routine:
+*	-Reset the TWSI.
+*	-Initialize the TWSI clock baud rate according to given frequancy
+*	 parameter based on Tclk frequancy and enables TWSI slave.
+*       -Set the ack bit.
+*	-Assign the TWSI slave address according to the TWSI address Type.
+*
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       frequancy - TWSI frequancy in KHz. (up to 100KHZ)
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Actual frequancy.
+*
+*******************************************************************************/
+MV_U32 mvTwsiInit(MV_U8 chanNum, MV_HZ frequancy, MV_U32 Tclk, MV_TWSI_ADDR *pTwsiAddr, MV_BOOL generalCallEnable)
+{
+	MV_U32	n,m,freq,margin,minMargin = 0xffffffff;
+	MV_U32	power;
+	MV_U32	actualFreq = 0,actualN = 0,actualM = 0,val;
+
+	if(frequancy > 100000)
+	{
+		mvOsPrintf("Warning TWSI frequancy is too high, please use up tp 100Khz. \n");
+	}
+
+	DB(mvOsPrintf("TWSI: mvTwsiInit - Tclk = %d freq = %d\n",Tclk,frequancy));
+	/* Calucalte N and M for the TWSI clock baud rate */
+	for(n = 0 ; n < 8 ; n++)
+	{
+		for(m = 0 ; m < 16 ; m++)
+		{
+			power = 2 << n; /* power = 2^(n+1) */
+			freq = Tclk/(10*(m+1)*power);
+			margin = MV_ABS(frequancy - freq);
+			if(margin < minMargin)
+			{
+				minMargin   = margin;
+				actualFreq  = freq;
+				actualN     = n;
+				actualM     = m;
+			}
+		}
+		}
+	DB(mvOsPrintf("TWSI: mvTwsiInit - actN %d actM %d actFreq %d\n",actualN , actualM, actualFreq));
+	/* Reset the TWSI logic */
+	twsiReset(chanNum);
+
+	/* Set the baud rate */
+	val = ((actualM<< TWSI_BAUD_RATE_M_OFFS) | actualN << TWSI_BAUD_RATE_N_OFFS);
+	MV_REG_WRITE(TWSI_STATUS_BAUDE_RATE_REG(chanNum),val);
+
+	/* Enable the TWSI and slave */
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), TWSI_CONTROL_ENA | TWSI_CONTROL_ACK);
+
+	/* set the TWSI slave address */
+	if( pTwsiAddr->type == ADDR10_BIT )/* 10 Bit deviceAddress */
+	{
+		/* writing the 2 most significant bits of the 10 bit address*/
+		val = ((pTwsiAddr->address & TWSI_SLAVE_ADDR_10BIT_MASK) >> TWSI_SLAVE_ADDR_10BIT_OFFS );
+		/* bits 7:3 must be 0x11110 */
+		val |= TWSI_SLAVE_ADDR_10BIT_CONST;
+		/* set GCE bit */
+		if(generalCallEnable)
+			val |= TWSI_SLAVE_ADDR_GCE_ENA;
+		/* write slave address */
+		MV_REG_WRITE(TWSI_SLAVE_ADDR_REG(chanNum),val);
+
+		/* writing the 8 least significant bits of the 10 bit address*/
+		val = (pTwsiAddr->address << TWSI_EXTENDED_SLAVE_OFFS) & TWSI_EXTENDED_SLAVE_MASK;
+		MV_REG_WRITE(TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum), val);
+	}
+	else /*7 bit address*/
+	{
+		/* set the 7 Bits address */
+		MV_REG_WRITE(TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum),0x0);
+		val = (pTwsiAddr->address << TWSI_SLAVE_ADDR_7BIT_OFFS) & TWSI_SLAVE_ADDR_7BIT_MASK;
+		MV_REG_WRITE(TWSI_SLAVE_ADDR_REG(chanNum), val);
+	}
+
+	/* unmask twsi int */
+    val = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+	MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), val | TWSI_CONTROL_INT_ENA);
+	/* Add delay of 1ms */
+	mvOsDelay(1);
+
+   return actualFreq;
+}
+
+
+/*******************************************************************************
+* twsiStsGet - Get the TWSI status value.
+*
+* DESCRIPTION:
+*       This routine returns the TWSI status value.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_U32 - the TWSI status.
+*
+*******************************************************************************/
+static MV_U32 twsiStsGet(MV_U8 chanNum)
+{
+    return MV_REG_READ(TWSI_STATUS_BAUDE_RATE_REG(chanNum));
+
+}
+
+/*******************************************************************************
+* twsiReset - Reset the TWSI.
+*
+* DESCRIPTION:
+*       Resets the TWSI logic and sets all TWSI registers to their reset values.
+*
+* INPUT:
+*      chanNum - TWSI channel
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None
+*
+*******************************************************************************/
+static MV_VOID twsiReset(MV_U8 chanNum)
+{
+	/* Reset the TWSI logic */
+	MV_REG_WRITE(TWSI_SOFT_RESET_REG(chanNum),0);
+
+	/* wait for 2 mili sec */
+	mvOsDelay(2);
+
+	return;
+}
+
+
+
+
+/******************************* POLICY ****************************************/
+
+
+
+/*******************************************************************************
+* mvTwsiAddrSet - Set address on TWSI bus.
+*
+* DESCRIPTION:
+*       This function Set address (7 or 10 Bit address) on the Twsi Bus.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       pTwsiAddr - twsi address.
+*	command	 - read / write .
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK - if setting the address completed succesfully.
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiAddrSet(MV_U8 chanNum, MV_TWSI_ADDR *pTwsiAddr, MV_TWSI_CMD command)
+{
+	DB(mvOsPrintf("TWSI: mvTwsiAddr7BitSet addr %x , type %d, cmd is %s\n",pTwsiAddr->address,\
+					pTwsiAddr->type, ((command==MV_TWSI_WRITE)?"Write":"Read") ));
+	/* 10 Bit address */
+	if(pTwsiAddr->type == ADDR10_BIT)
+	{
+		return twsiAddr10BitSet(chanNum, pTwsiAddr->address,command);
+	}
+	/* 7 Bit address */
+	else
+	{
+		return twsiAddr7BitSet(chanNum, pTwsiAddr->address,command);
+	}
+
+}
+
+/*******************************************************************************
+* twsiAddr10BitSet - Set 10 Bit address on TWSI bus.
+*
+* DESCRIPTION:
+*       There are two address phases:
+*       1) Write '11110' to data register bits [7:3] and 10-bit address MSB
+*          (bits [9:8]) to data register bits [2:1] plus a write(0) or read(1) bit
+*          to the Data register. Then it clears interrupt flag which drive
+*          the address on the TWSI bus. The function then waits for interrupt
+*          flag to be active and status 0x18 (write) or 0x40 (read) to be set.
+*       2) write the rest of 10-bit address to data register and clears
+*          interrupt flag which drive the address on the TWSI bus. The
+*          function then waits for interrupt flag to be active and status
+*          0xD0 (write) or 0xE0 (read) to be set.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       deviceAddress - twsi address.
+*	command	 - read / write .
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK - if setting the address completed succesfully.
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiAddr10BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command)
+{
+	MV_U32 val,timeout;
+
+	/* writing the 2 most significant bits of the 10 bit address*/
+	val = ((deviceAddress & TWSI_DATA_ADDR_10BIT_MASK) >> TWSI_DATA_ADDR_10BIT_OFFS );
+	/* bits 7:3 must be 0x11110 */
+	val |= TWSI_DATA_ADDR_10BIT_CONST;
+	/* set command */
+	val |= command;
+	MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+	/* WA add a delay */
+	mvOsDelay(1);
+
+	/* clear Int flag */
+	twsiIntFlgClr(chanNum);
+
+	/* wait for Int to be Set */
+	timeout = 0;
+	while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr10BitSet ERROR - 1st addr (10Bit) Int TimeOut.\n"))
+		return MV_TIMEOUT;
+
+	/* check the status */
+	val = twsiStsGet(chanNum);
+	if(( (val != TWSI_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+	   ( (val != TWSI_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+	{
+		mvOsPrintf("TWSI: twsiAddr10BitSet ERROR - status %x 1st addr (10 Bit) in %s mode.\n"\
+						,val, ((command==MV_TWSI_WRITE)?"Write":"Read") );
+		return MV_FAIL;
+	}
+
+	/* set 	8 LSB of the address */
+	val = (deviceAddress << TWSI_DATA_ADDR_7BIT_OFFS) & TWSI_DATA_ADDR_7BIT_MASK;
+	MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+
+	/* clear Int flag */
+	twsiIntFlgClr(chanNum);
+
+	/* wait for Int to be Set */
+	timeout = 0;
+	while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr10BitSet ERROR - 2nd (10 Bit) Int TimOut.\n"))
+		return MV_TIMEOUT;
+
+	/* check the status */
+	val = twsiStsGet(chanNum);
+	if(( (val != TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+	   ( (val != TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+	{
+		mvOsPrintf("TWSI: twsiAddr10BitSet ERROR - status %x 2nd addr(10 Bit) in %s mode.\n"\
+						,val, ((command==MV_TWSI_WRITE)?"Write":"Read") );
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* twsiAddr7BitSet - Set 7 Bit address on TWSI bus.
+*
+* DESCRIPTION:
+*       This function writes 7 bit address plus a write or read bit to the
+*       Data register. Then it clears interrupt flag which drive the address on
+*       the TWSI bus. The function then waits for interrupt flag to be active
+*       and status 0x18 (write) or 0x40 (read) to be set.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       deviceAddress - twsi address.
+*	command	 - read / write .
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK - if setting the address completed succesfully.
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiAddr7BitSet(MV_U8 chanNum, MV_U32 deviceAddress,MV_TWSI_CMD command)
+{
+	MV_U32 val,timeout;
+
+	/* set the address */
+	val = (deviceAddress << TWSI_DATA_ADDR_7BIT_OFFS) & TWSI_DATA_ADDR_7BIT_MASK;
+	/* set command */
+	val |= command;
+	MV_REG_WRITE(TWSI_DATA_REG(chanNum), val);
+	/* WA add a delay */
+	mvOsDelay(1);
+
+	/* clear Int flag */
+	twsiIntFlgClr(chanNum);
+
+	/* wait for Int to be Set */
+	timeout = 0;
+	while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiAddr7BitSet ERROR - Addr (7 Bit) int TimeOut.\n"))
+		return MV_TIMEOUT;
+
+	/* check the status */
+	val = twsiStsGet(chanNum);
+	if(( (val != TWSI_AD_PLS_RD_BIT_TRA_ACK_REC) && (command == MV_TWSI_READ ) ) ||
+	   ( (val != TWSI_AD_PLS_WR_BIT_TRA_ACK_REC) && (command == MV_TWSI_WRITE) ))
+	{
+		/* only in debug, since in boot we try to read the SPD of both DRAM, and we don't
+			want error messeges in case DIMM doesn't exist. */
+		DB(mvOsPrintf("TWSI: twsiAddr7BitSet ERROR - status %x addr (7 Bit) in %s mode.\n"\
+						,val,((command==MV_TWSI_WRITE)?"Write":"Read") ));
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* twsiDataWrite - Trnasmit a data block over TWSI bus.
+*
+* DESCRIPTION:
+*       This function writes a given data block to TWSI bus in 8 bit granularity.
+*	first The function waits for interrupt flag to be active then
+*       For each 8-bit data:
+*        The function writes data to data register. It then clears
+*        interrupt flag which drives the data on the TWSI bus.
+*        The function then waits for interrupt flag to be active and status
+*        0x28 to be set.
+*
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       pBlock - Data block.
+*	blockSize - number of chars in pBlock.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK - if transmiting the block completed succesfully,
+*	MV_BAD_PARAM - if pBlock is NULL,
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiDataTransmit(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize)
+{
+	MV_U32 timeout, temp, blockSizeWr = blockSize;
+
+	if(NULL == pBlock)
+		return MV_BAD_PARAM;
+
+	/* wait for Int to be Set */
+	timeout = 0;
+	while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataTransmit ERROR - Read Data Int TimeOut.\n"))
+		return MV_TIMEOUT;
+
+	while(blockSizeWr)
+	{
+		/* write the data*/
+		MV_REG_WRITE(TWSI_DATA_REG(chanNum),(MV_U32)*pBlock);
+		DB(mvOsPrintf("TWSI: twsiDataTransmit place = %d write %x \n",\
+						blockSize - blockSizeWr, *pBlock));
+		pBlock++;
+		blockSizeWr--;
+
+		twsiIntFlgClr(chanNum);
+
+		/* wait for Int to be Set */
+		timeout = 0;
+		while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+		/* check for timeout */
+		if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataTransmit ERROR - Read Data Int TimeOut.\n"))
+			return MV_TIMEOUT;
+
+		/* check the status */
+		temp = twsiStsGet(chanNum);
+		if(temp != TWSI_M_TRAN_DATA_BYTE_ACK_REC)
+		{
+			mvOsPrintf("TWSI: twsiDataTransmit ERROR - status %x in write trans\n",temp);
+			return MV_FAIL;
+		}
+
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* twsiDataReceive - Receive data block from TWSI bus.
+*
+* DESCRIPTION:
+*       This function receive data block from TWSI bus in 8bit granularity
+*       into pBlock buffer.
+*	first The function waits for interrupt flag to be active then
+*       For each 8-bit data:
+*        It clears the interrupt flag which allows the next data to be
+*        received from TWSI bus.
+*	 The function waits for interrupt flag to be active,
+*	 and status reg is 0x50.
+*	 Then the function reads data from data register, and copies it to
+*	 the given buffer.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       blockSize - number of bytes to read.
+*
+* OUTPUT:
+*       pBlock - Data block.
+*
+* RETURN:
+*       MV_OK - if receive transaction completed succesfully,
+*	MV_BAD_PARAM - if pBlock is NULL,
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiDataReceive(MV_U8 chanNum, MV_U8 *pBlock, MV_U32 blockSize)
+{
+	MV_U32 timeout, temp, blockSizeRd = blockSize;
+	if(NULL == pBlock)
+		return MV_BAD_PARAM;
+
+	/* wait for Int to be Set */
+	timeout = 0;
+	while( !twsiMainIntGet(chanNum) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+	/* check for timeout */
+	if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataReceive ERROR - Read Data int Time out .\n"))
+		return MV_TIMEOUT;
+
+	while(blockSizeRd)
+	{
+		if(blockSizeRd == 1)
+		{
+			/* clear ack and Int flag */
+			temp = MV_REG_READ(TWSI_CONTROL_REG(chanNum));
+			temp &=  ~(TWSI_CONTROL_ACK);
+			MV_REG_WRITE(TWSI_CONTROL_REG(chanNum), temp);
+		}
+		twsiIntFlgClr(chanNum);
+		/* wait for Int to be Set */
+		timeout = 0;
+		while( (!twsiMainIntGet(chanNum)) && (timeout++ < TWSI_TIMEOUT_VALUE));
+
+		/* check for timeout */
+		if(MV_TRUE == twsiTimeoutChk(timeout,"TWSI: twsiDataReceive ERROR - Read Data Int Time out .\n"))
+			return MV_TIMEOUT;
+
+		/* check the status */
+		temp = twsiStsGet(chanNum);
+		if((temp != TWSI_M_REC_RD_DATA_ACK_TRA) && (blockSizeRd !=1))
+		{
+			mvOsPrintf("TWSI: twsiDataReceive ERROR - status %x in read trans \n",temp);
+			return MV_FAIL;
+		}
+		else if((temp != TWSI_M_REC_RD_DATA_ACK_NOT_TRA) && (blockSizeRd ==1))
+		{
+			mvOsPrintf("TWSI: twsiDataReceive ERROR - status %x in Rd Terminate\n",temp);
+			return MV_FAIL;
+		}
+
+		/* read the data*/
+		*pBlock = (MV_U8)MV_REG_READ(TWSI_DATA_REG(chanNum));
+		DB(mvOsPrintf("TWSI: twsiDataReceive  place %d read %x \n",\
+						blockSize - blockSizeRd,*pBlock));
+		pBlock++;
+		blockSizeRd--;
+	}
+
+	return MV_OK;
+}
+
+
+
+/*******************************************************************************
+* twsiTargetOffsSet - Set TWST target offset on TWSI bus.
+*
+* DESCRIPTION:
+*       The function support TWSI targets that have inside address space (for
+*       example EEPROMs). The function:
+*       1) Convert the given offset into pBlock and size.
+*		in case the offset should be set to a TWSI slave which support
+*		more then 256 bytes offset, the offset setting will be done
+*		in 2 transactions.
+*       2) Use twsiDataTransmit to place those on the bus.
+*
+* INPUT:
+*	chanNum - TWSI channel
+*       offset - offset to be set on the EEPROM device.
+*	moreThen256 - whether the EEPROM device support more then 256 byte offset.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK - if setting the offset completed succesfully.
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+static MV_STATUS twsiTargetOffsSet(MV_U8 chanNum, MV_U32 offset, MV_BOOL moreThen256)
+{
+	MV_U8 offBlock[2];
+	MV_U32 offSize;
+
+	if(moreThen256 == MV_TRUE)
+	{
+		offBlock[0] = (offset >> 8) & 0xff;
+		offBlock[1] = offset & 0xff;
+		offSize = 2;
+	}
+	else
+	{
+		offBlock[0] = offset & 0xff;
+		offSize = 1;
+	}
+	DB(mvOsPrintf("TWSI: twsiTargetOffsSet offSize = %x addr1 = %x addr2 = %x\n",\
+							offSize,offBlock[0],offBlock[1]));
+	return twsiDataTransmit(chanNum, offBlock, offSize);
+
+}
+
+/*******************************************************************************
+* mvTwsiRead - Read data block from a TWSI Slave.
+*
+* DESCRIPTION:
+*       The function calls the following functions:
+*       -) mvTwsiStartBitSet();
+*	if(EEPROM device)
+*       	-) mvTwsiAddrSet(w);
+*       	-) twsiTargetOffsSet();
+*       	-) mvTwsiStartBitSet();
+*       -) mvTwsiAddrSet(r);
+*       -) twsiDataReceive();
+*       -) mvTwsiStopBitSet();
+*
+* INPUT:
+*	chanNum - TWSI channel
+*      	pTwsiSlave - Twsi Slave structure.
+*       blockSize - number of bytes to read.
+*
+* OUTPUT:
+*      	pBlock - Data block.
+*
+* RETURN:
+*       MV_OK - if EEPROM read transaction completed succesfully,
+* 	MV_BAD_PARAM - if pBlock is NULL,
+*	MV_FAIL otherwmise.
+*
+*******************************************************************************/
+MV_STATUS mvTwsiRead(MV_U8 chanNum, MV_TWSI_SLAVE *pTwsiSlave, MV_U8 *pBlock, MV_U32 blockSize)
+{
+	if((NULL == pBlock) || (NULL == pTwsiSlave))
+		return MV_BAD_PARAM;
+	if(MV_OK != mvTwsiStartBitSet(chanNum))
+	{
+		mvTwsiStopBitSet(chanNum);
+		 return MV_FAIL;
+	}
+
+	DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStartBitSet\n"));
+
+	/* in case offset exsist (i.e. eeprom ) */
+	if(MV_TRUE == pTwsiSlave->validOffset)
+	{
+		if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_WRITE))
+		{
+			mvTwsiStopBitSet(chanNum);
+			return MV_FAIL;
+		}
+		DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiAddrSet\n"));
+		if(MV_OK != twsiTargetOffsSet(chanNum, pTwsiSlave->offset, pTwsiSlave->moreThen256))
+		{
+			mvTwsiStopBitSet(chanNum);
+			return MV_FAIL;
+		}
+		DB(mvOsPrintf("TWSI: mvTwsiEepromRead after twsiTargetOffsSet\n"));
+		if(MV_OK != mvTwsiStartBitSet(chanNum))
+		{
+			mvTwsiStopBitSet(chanNum);
+			return MV_FAIL;
+		}
+		DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStartBitSet\n"));
+	}
+	if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_READ))
+	{
+		mvTwsiStopBitSet(chanNum);
+		return MV_FAIL;
+	}
+	DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiAddrSet\n"));
+	if(MV_OK != twsiDataReceive(chanNum, pBlock, blockSize))
+	{
+		mvTwsiStopBitSet(chanNum);
+		return MV_FAIL;
+	}
+	DB(mvOsPrintf("TWSI: mvTwsiEepromRead after twsiDataReceive\n"));
+
+	if(MV_OK != mvTwsiStopBitSet(chanNum))
+	{
+		return MV_FAIL;
+	}
+
+	twsiAckBitSet(chanNum);
+
+	DB(mvOsPrintf("TWSI: mvTwsiEepromRead after mvTwsiStopBitSet\n"));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvTwsiWrite - Write data block to a TWSI Slave.
+*
+* DESCRIPTION:
+*       The function calls the following functions:
+*       -) mvTwsiStartBitSet();
+*       -) mvTwsiAddrSet();
+*	-)if(EEPROM device)
+*       	-) twsiTargetOffsSet();
+*       -) twsiDataTransmit();
+*       -) mvTwsiStopBitSet();
+*
+* INPUT:
+*	chanNum - TWSI channel
+*      	eepromAddress - eeprom address.
+*       blockSize - number of bytes to write.
+*      	pBlock - Data block.
+*
+* OUTPUT:
+*	None
+*
+* RETURN:
+*       MV_OK - if EEPROM read transaction completed succesfully.
+*	MV_BAD_PARAM - if pBlock is NULL,
+*	MV_FAIL otherwmise.
+*
+* NOTE: Part of the EEPROM, required that the offset will be aligned to the
+*	max write burst supported.
+*******************************************************************************/
+MV_STATUS mvTwsiWrite(MV_U8 chanNum, MV_TWSI_SLAVE *pTwsiSlave, MV_U8 *pBlock, MV_U32 blockSize)
+{
+	if((NULL == pBlock) || (NULL == pTwsiSlave))
+		return MV_BAD_PARAM;
+
+	if(MV_OK != mvTwsiStartBitSet(chanNum))
+	{
+		mvTwsiStopBitSet(chanNum);
+		return MV_FAIL;
+	}
+
+	DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after mvTwsiStartBitSet\n"));
+	if(MV_OK != mvTwsiAddrSet(chanNum, &(pTwsiSlave->slaveAddr), MV_TWSI_WRITE))
+	{
+		mvTwsiStopBitSet(chanNum);
+		return MV_FAIL;
+	}
+	DB(mvOsPrintf("TWSI :mvTwsiEepromWrite after mvTwsiAddrSet\n"));
+
+	/* in case offset exsist (i.e. eeprom ) */
+	if(MV_TRUE == pTwsiSlave->validOffset)
+	{
+		if(MV_OK != twsiTargetOffsSet(chanNum, pTwsiSlave->offset, pTwsiSlave->moreThen256))
+		{
+			mvTwsiStopBitSet(chanNum);
+			return MV_FAIL;
+		}
+		DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after twsiTargetOffsSet\n"));
+	}
+	if(MV_OK != twsiDataTransmit(chanNum, pBlock, blockSize))
+	{
+		mvTwsiStopBitSet(chanNum);
+		return MV_FAIL;
+	}
+	DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after twsiDataTransmit\n"));
+	if(MV_OK != mvTwsiStopBitSet(chanNum))
+	{
+		return MV_FAIL;
+	}
+	DB(mvOsPrintf("TWSI: mvTwsiEepromWrite after mvTwsiStopBitSet\n"));
+
+	return MV_OK;
+}
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h
new file mode 100644
index 000000000000..463570173756
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsi.h
@@ -0,0 +1,120 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCmvTwsiH
+#define __INCmvTwsiH
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* need to update this includes */
+#include "twsi/mvTwsiSpec.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+
+
+/* The TWSI interface supports both 7-bit and 10-bit addressing.            */
+/* This enumerator describes addressing type.                               */
+typedef enum _mvTwsiAddrType
+{
+    ADDR7_BIT,                      /* 7 bit address    */
+    ADDR10_BIT                      /* 10 bit address   */
+}MV_TWSI_ADDR_TYPE;
+
+/* This structure describes TWSI address.                                   */
+typedef struct _mvTwsiAddr
+{
+    MV_U32              address;    /* address          */
+    MV_TWSI_ADDR_TYPE   type;       /* Address type     */
+}MV_TWSI_ADDR;
+
+/* This structure describes a TWSI slave.                                   */
+typedef struct _mvTwsiSlave
+{
+    MV_TWSI_ADDR	slaveAddr;
+    MV_BOOL 		validOffset;		/* whether the slave has offset (i.e. Eeprom  etc.) 	*/
+    MV_U32		offset;		/* offset in the slave.					*/
+    MV_BOOL 		moreThen256;	/* whether the ofset is bigger then 256 		*/
+}MV_TWSI_SLAVE;
+
+/* This enumerator describes TWSI protocol commands.                        */
+typedef enum _mvTwsiCmd
+{
+    MV_TWSI_WRITE,   /* TWSI write command - 0 according to spec   */
+    MV_TWSI_READ   /* TWSI read command  - 1 according to spec */
+}MV_TWSI_CMD;
+
+MV_STATUS mvTwsiStartBitSet(MV_U8 chanNum);
+MV_STATUS mvTwsiStopBitSet(MV_U8 chanNum);
+MV_STATUS mvTwsiAddrSet(MV_U8 chanNum, MV_TWSI_ADDR *twsiAddr, MV_TWSI_CMD command);
+
+MV_U32 mvTwsiInit(MV_U8 chanNum, MV_KHZ frequancy, MV_U32 Tclk, MV_TWSI_ADDR *twsiAddr, MV_BOOL generalCallEnable);
+MV_STATUS mvTwsiRead (MV_U8 chanNum, MV_TWSI_SLAVE *twsiSlave, MV_U8 *pBlock, MV_U32 blockSize);
+MV_STATUS mvTwsiWrite(MV_U8 chanNum, MV_TWSI_SLAVE *twsiSlave, MV_U8 *pBlock, MV_U32 blockSize);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTwsiH */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S
new file mode 100644
index 000000000000..1a677981452f
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiEeprom.S
@@ -0,0 +1,457 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+/* includes */
+#define MV_ASMLANGUAGE
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "boardEnv/mvBoardEnvSpec.h"
+#include "mvOsAsm.h"
+#include "mvTwsiSpec.h"
+#include "mvSysHwConfig.h"
+#include "ctrlEnv/sys/mvCpuIfRegs.h"
+#include "mvCommon.h"
+
+#define I2C_CH MV_BOARD_DIMM_I2C_CHANNEL
+
+/* defines */
+/* defines  */
+
+
+        .data
+        .global _i2cInit
+        .global _i2cRead
+
+        .text
+
+/*******************************************************************************
+* _i2cInit - Initialize TWSI interface
+*
+* DESCRIPTION:
+*       The function performs TWSI interface initialization. It resets the
+*       TWSI state machine and initialize its clock to 100KHz assuming Tclock
+*       of 133MHz.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+_i2cInit:
+        mov     r9, LR     /* Save link register */
+        mov     r0, #0     /* Make sure r0 is zero */
+
+        /* Reset the i2c Mechanism first */
+        MV_REG_WRITE_ASM (r0, r1, TWSI_SOFT_RESET_REG(I2C_CH))
+
+        bl      _twsiDelay
+        bl      _twsiDelay
+
+        /* Initializing the I2C mechanism. Assuming Tclock frequency          */
+        /* of 166MHz. The I2C frequency in that case will be 100KHz.          */
+        /* For this settings, M = 9 and N = 3. Set the baud-rate with the     */
+        /* value of 0x2b (freq of ==> 100KHz                                  */
+        /* see spec for more details about the calculation of this value)     */
+        mov     r6, #(9 << 3 | 3)
+        MV_REG_WRITE_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+
+        /* Enable the I2C master */
+	/* Enable TWSI interrupt in main mask reg */
+        mov     r6, #0xC4
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+        /* Let the slow TWSI machine get used to the idea that it is enabled  */
+        bl      _twsiDelay
+
+
+        mov     PC, r9         /* r9 is saved link register */
+
+/*******************************************************************************
+* _twsiDelay - Perform delay.
+*
+* DESCRIPTION:
+*       The function performs a delay to enable TWSI logic to stable.
+*
+* INPUT:
+*       None.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+_twsiDelay:
+        mov     r10, #0x100000 /*was 0x400*/
+
+_twsiDelayLoop:
+        subs    r10, r10, #1
+        bne     _twsiDelayLoop
+
+        mov     PC, LR
+
+/*******************************************************************************
+* _i2cRead - Read byte from I2C EEPROM device.
+*
+* DESCRIPTION:
+*       The function returns a byte from I2C EEPROM device.
+*       The EEPROM device is 7-bit address type.
+*
+* INPUT:
+*       r4 has the DIMM0 base address with shift 1 bit to the left
+*       r7 has the EEPROM offset
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       r4 returns '0' if address can not be read.
+*       r7 has byte value in case read is successful.
+*
+*******************************************************************************/
+_i2cRead:
+        mov     r9, LR     /* Save link register */
+
+        /* Transmit the device address and desired offset within the EEPROM. */
+
+        /* Generate Start Bit */
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        orr     r6, r6, #TWSI_CONTROL_START_BIT
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+        /* Wait for the interrupt flag (bit3) to be set  */
+        mov     r10, #0x50000
+loop_1:
+        subs    r10, r10, #1
+        beq     loop_1_timeout
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_1
+
+loop_1_timeout:
+
+        /* Wait for the start bit to be reset by HW */
+        mov     r10, #0x50000
+loop_2:
+        subs    r10, r10, #1
+        beq     loop_2_timeout
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        tst     r6, #TWSI_CONTROL_START_BIT
+        bne     loop_2
+
+loop_2_timeout:
+
+        /* Wait for the status TWSI_START_CONDITION_TRA = 0x8 */
+        mov     r10, #0x50000
+loop_3:
+        subs    r10, r10, #1
+        beq     loop_3_timeout
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x08
+        bne     loop_3
+
+loop_3_timeout:
+
+        /* writing the address of (DIMM0/1 << 1) with write indication */
+        mov     r6, r4, LSL #1 /* Write operation address bit 0 must be 0 */
+        MV_REG_WRITE_ASM (r6, r1, TWSI_DATA_REG(I2C_CH))
+
+        bl      _twsiDelay
+        /* Clear the interrupt flag */
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Waiting for the interrupt flag to be set which means that the
+           address has been transmitted                                  */
+loop_4:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_4       /* if tst = 0, then the bit is not set yet */
+
+        /* Wait for status TWSI_ADDR_PLUS_WRITE_BIT_TRA_ACK_REC = 0x18 */
+        mov     r10, #0x50000         /* Set r10 to 0x50000 =~ 328,000 */
+
+loop_5:
+        subs    r10, r10, #1          /* timeout count down         */
+        bne     testStatus
+        mov     r4, #0                /* r4 = 0 -> operation failed */
+        b       exit_i2cRead          /* Exit if timeout (No DIMM)  */
+
+testStatus:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x18
+        bne     loop_5
+
+
+        /* check if the offset is bigger than 256 byte*/
+        tst     r7, #0x80000000
+        bne     great_than_256
+
+        /* Write the offset to be read from the DIMM EEPROM */
+        MV_REG_WRITE_ASM (r7, r1, TWSI_DATA_REG(I2C_CH))
+
+        b after_offset
+
+great_than_256:
+        mov     r10, r7, LSR #8
+        and     r10, r10, #0xff
+        /* Write the offset0 to be read from the  EEPROM */
+        MV_REG_WRITE_ASM (r10, r1, TWSI_DATA_REG(I2C_CH))
+
+        /* Clear the interrupt flag ==> signaling that the address can now
+           be transmited                                                    */
+
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Wait for the interrupt to be set again ==> address has transmited */
+loop_6_1:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_6_1
+
+        /* Wait for status TWSI_MAS_TRAN_DATA_BYTE_ACK_REC = 0x28 */
+loop_7_1:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x28
+        bne     loop_7_1
+
+
+        mov     r10, r7
+        and     r10, r10, #0xff
+        /* Write the offset1 to be read from the  EEPROM */
+        MV_REG_WRITE_ASM (r10, r1, TWSI_DATA_REG(I2C_CH))
+
+
+
+after_offset:
+
+        /* Clear the interrupt flag ==> signaling that the address can now
+           be transmited                                                    */
+
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Wait for the interrupt to be set again ==> address has transmited */
+loop_6:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_6
+
+        /* Wait for status TWSI_MAS_TRAN_DATA_BYTE_ACK_REC = 0x28 */
+loop_7:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x28
+        bne     loop_7
+
+        /* Retransmit the device address with read indication to get the data */
+
+        /* generate a repeated start bit */
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        orr     r6, r6, #TWSI_CONTROL_START_BIT
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+
+        /* Clear the interrupt flag ==> the start bit will be transmitted. */
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+       /* Wait for the interrupt flag (bit3) to be set */
+loop_9:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_9
+
+        /* Wait for the start bit to be reset by HW */
+loop_8:
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        tst     r6, #TWSI_CONTROL_START_BIT
+        bne     loop_8
+
+        /* Wait for status TWSI_REPEATED_START_CONDITION_TRA = 0x10 */
+loop_10:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x10
+        bne     loop_10
+
+        /* Writing the address of (DIMM0<<1) with read indication (bit0 is 1) */
+        mov     r6, r4, LSL #1
+        orr     r6, r6, #1     /* Read operation address bit 0 must be 1 */
+        MV_REG_WRITE_ASM (r6, r1, TWSI_DATA_REG(I2C_CH))
+
+        /* Clear the interrupt flag ==> the address will be transmitted */
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Wait for the interrupt flag (bit3) to be set as a result of
+           transmitting the address.                                     */
+loop_11:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_11
+
+         /* Wait for status TWSI_ADDR_PLUS_READ_BIT_TRA_ACK_REC = 0x40 */
+loop_12:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x40
+        bne     loop_12
+
+        /* Clear the interrupt flag and the Acknoledge bit */
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #(TWSI_CONTROL_INT_FLAG_SET | TWSI_CONTROL_ACK)
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Wait for the interrupt flag (bit3) to be set */
+loop_14:
+#ifdef MV78XX0
+        MV_REG_READ_ASM (r6, r1, CPU_INT_LOW_REG(I2C_CH))
+        tst     r6, #BIT2
+#else
+        MV_REG_READ_ASM (r6, r1, CPU_MAIN_INT_CAUSE_REG)
+        tst     r6, #BIT5
+#endif
+        beq     loop_14
+
+        /* Wait for status TWSI_MAS_REC_READ_DATA_ACK_NOT_TRA = 0x58 */
+loop_15:
+        MV_REG_READ_ASM (r6, r1, TWSI_STATUS_BAUDE_RATE_REG(I2C_CH))
+        cmp     r6, #0x58
+        bne     loop_15
+
+        /* Store the data in r7. */
+        MV_REG_READ_ASM (r7, r1, TWSI_DATA_REG(I2C_CH))
+
+        /* Generate stop bit */
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        orr     r6, r6, #TWSI_CONTROL_STOP_BIT
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+
+
+        /* Clear the interrupt flag  */
+        bl      _twsiDelay
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bic     r6, r6, #TWSI_CONTROL_INT_FLAG_SET
+        MV_REG_WRITE_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        bl      _twsiDelay
+
+        /* Wait for the stop bit to be reset by HW */
+loop_16:
+        MV_REG_READ_ASM (r6, r1, TWSI_CONTROL_REG(I2C_CH))
+        tst     r6, #TWSI_CONTROL_INT_FLAG_SET
+        bne     loop_16
+
+exit_i2cRead:
+        mov     PC, r9         /* r9 is saved link register */
diff --git a/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h
new file mode 100644
index 000000000000..0df960a7a5c7
--- /dev/null
+++ b/crypto/ocf/kirkwood/mvHal/mv_hal/twsi/mvTwsiSpec.h
@@ -0,0 +1,160 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/****************************************/
+/* TWSI Registers                        */
+/****************************************/
+#ifndef __INCmvTwsiSpech
+#define __INCmvTwsiSpech
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* defines */
+#define TWSI_SLAVE_ADDR_REG(chanNum)	(TWSI_SLAVE_BASE(chanNum)+ 0x00)
+
+#define TWSI_SLAVE_ADDR_GCE_ENA		BIT0
+#define TWSI_SLAVE_ADDR_7BIT_OFFS	0x1
+#define TWSI_SLAVE_ADDR_7BIT_MASK 	(0xFF << TWSI_SLAVE_ADDR_7BIT_OFFS)
+#define TWSI_SLAVE_ADDR_10BIT_OFFS	0x7
+#define TWSI_SLAVE_ADDR_10BIT_MASK 	0x300
+#define	TWSI_SLAVE_ADDR_10BIT_CONST 	0xF0
+
+
+#define TWSI_EXTENDED_SLAVE_ADDR_REG(chanNum)	(TWSI_SLAVE_BASE(chanNum) + 0x10)
+#define TWSI_EXTENDED_SLAVE_OFFS 	0
+#define TWSI_EXTENDED_SLAVE_MASK	(0xFF << TWSI_EXTENDED_SLAVE_OFFS)
+
+
+#define TWSI_DATA_REG(chanNum)		(TWSI_SLAVE_BASE(chanNum) + 0x04)
+#define TWSI_DATA_COMMAND_OFFS		0x0
+#define TWSI_DATA_COMMAND_MASK 		(0x1 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_COMMAND_WR		(0x1 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_COMMAND_RD		(0x0 << TWSI_DATA_COMMAND_OFFS)
+#define TWSI_DATA_ADDR_7BIT_OFFS	0x1
+#define TWSI_DATA_ADDR_7BIT_MASK 	(0xFF << TWSI_DATA_ADDR_7BIT_OFFS)
+#define TWSI_DATA_ADDR_10BIT_OFFS	0x7
+#define TWSI_DATA_ADDR_10BIT_MASK	0x300
+#define TWSI_DATA_ADDR_10BIT_CONST	0xF0
+
+
+#define TWSI_CONTROL_REG(chanNum)	(TWSI_SLAVE_BASE(chanNum) + 0x08)
+#define TWSI_CONTROL_ACK            	BIT2
+#define TWSI_CONTROL_INT_FLAG_SET   	BIT3
+#define TWSI_CONTROL_STOP_BIT    	BIT4
+#define TWSI_CONTROL_START_BIT 		BIT5
+#define TWSI_CONTROL_ENA     		BIT6
+#define TWSI_CONTROL_INT_ENA    	BIT7
+
+
+#define TWSI_STATUS_BAUDE_RATE_REG(chanNum)	(TWSI_SLAVE_BASE(chanNum) + 0x0c)
+#define TWSI_BAUD_RATE_N_OFFS		0
+#define TWSI_BAUD_RATE_N_MASK		(0x7 << TWSI_BAUD_RATE_N_OFFS)
+#define TWSI_BAUD_RATE_M_OFFS   	3
+#define TWSI_BAUD_RATE_M_MASK  		(0xF << TWSI_BAUD_RATE_M_OFFS)
+
+#define TWSI_SOFT_RESET_REG(chanNum)	(TWSI_SLAVE_BASE(chanNum) + 0x1c)
+
+/* defines */
+#define TWSI_TIMEOUT_VALUE 		0x500
+
+/* TWSI status codes */
+#define TWSI_BUS_ERROR                                            0x00
+#define TWSI_START_CON_TRA                                        0x08
+#define TWSI_REPEATED_START_CON_TRA                               0x10
+#define TWSI_AD_PLS_WR_BIT_TRA_ACK_REC                            0x18
+#define TWSI_AD_PLS_WR_BIT_TRA_ACK_NOT_REC                        0x20
+#define TWSI_M_TRAN_DATA_BYTE_ACK_REC                             0x28
+#define TWSI_M_TRAN_DATA_BYTE_ACK_NOT_REC                         0x30
+#define TWSI_M_LOST_ARB_DUR_AD_OR_DATA_TRA                        0x38
+#define TWSI_AD_PLS_RD_BIT_TRA_ACK_REC                            0x40
+#define TWSI_AD_PLS_RD_BIT_TRA_ACK_NOT_REC                        0x48
+#define TWSI_M_REC_RD_DATA_ACK_TRA                                0x50
+#define TWSI_M_REC_RD_DATA_ACK_NOT_TRA                            0x58
+#define TWSI_SLA_REC_AD_PLS_WR_BIT_ACK_TRA                        0x60
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_AD_IS_TRGT_TO_SLA_ACK_TRA_W    0x68
+#define TWSI_GNL_CALL_REC_ACK_TRA                                 0x70
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_GNL_CALL_AD_REC_ACK_TRA        0x78
+#define TWSI_SLA_REC_WR_DATA_AF_REC_SLA_AD_ACK_TRAN               0x80
+#define TWSI_SLA_REC_WR_DATA_AF_REC_SLA_AD_ACK_NOT_TRAN           0x88
+#define TWSI_SLA_REC_WR_DATA_AF_REC_GNL_CALL_ACK_TRAN             0x90
+#define TWSI_SLA_REC_WR_DATA_AF_REC_GNL_CALL_ACK_NOT_TRAN         0x98
+#define TWSI_SLA_REC_STOP_OR_REPEATED_STRT_CON                    0xA0
+#define TWSI_SLA_REC_AD_PLS_RD_BIT_ACK_TRA                        0xA8
+#define TWSI_M_LOST_ARB_DUR_AD_TRA_AD_IS_TRGT_TO_SLA_ACK_TRA_R    0xB0
+#define TWSI_SLA_TRA_RD_DATA_ACK_REC                              0xB8
+#define TWSI_SLA_TRA_RD_DATA_ACK_NOT_REC                          0xC0
+#define TWSI_SLA_TRA_LAST_RD_DATA_ACK_REC                         0xC8
+#define TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_REC                        0xD0
+#define TWSI_SEC_AD_PLS_WR_BIT_TRA_ACK_NOT_REC                    0xD8
+#define TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_REC                        0xE0
+#define TWSI_SEC_AD_PLS_RD_BIT_TRA_ACK_NOT_REC                    0xE8
+#define TWSI_NO_REL_STS_INT_FLAG_IS_KEPT_0                        0xF8
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvTwsiSpech */
diff --git a/crypto/ocf/ocf-bench.c b/crypto/ocf/ocf-bench.c
new file mode 100644
index 000000000000..f3fe9d0e9af4
--- /dev/null
+++ b/crypto/ocf/ocf-bench.c
@@ -0,0 +1,514 @@
+/*
+ * A loadable module that benchmarks the OCF crypto speed from kernel space.
+ *
+ * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <cryptodev.h>
+
+#ifdef I_HAVE_AN_XSCALE_WITH_INTEL_SDK
+#define BENCH_IXP_ACCESS_LIB 1
+#endif
+#ifdef BENCH_IXP_ACCESS_LIB
+#include <IxTypes.h>
+#include <IxOsBuffMgt.h>
+#include <IxNpeDl.h>
+#include <IxCryptoAcc.h>
+#include <IxQMgr.h>
+#include <IxOsServices.h>
+#include <IxOsCacheMMU.h>
+#endif
+
+/*
+ * support for access lib version 1.4
+ */
+#ifndef IX_MBUF_PRIV
+#define IX_MBUF_PRIV(x) ((x)->priv)
+#endif
+
+/*
+ * the number of simultaneously active requests
+ */
+static int request_q_len = 40;
+module_param(request_q_len, int, 0);
+MODULE_PARM_DESC(request_q_len, "Number of outstanding requests");
+
+/*
+ * how many requests we want to have processed
+ */
+static int request_num = 1024;
+module_param(request_num, int, 0);
+MODULE_PARM_DESC(request_num, "run for at least this many requests");
+
+/*
+ * the size of each request
+ */
+static int request_size = 1488;
+module_param(request_size, int, 0);
+MODULE_PARM_DESC(request_size, "size of each request");
+
+/*
+ * OCF batching of requests
+ */
+static int request_batch = 1;
+module_param(request_batch, int, 0);
+MODULE_PARM_DESC(request_batch, "enable OCF request batching");
+
+/*
+ * OCF immediate callback on completion
+ */
+static int request_cbimm = 1;
+module_param(request_cbimm, int, 0);
+MODULE_PARM_DESC(request_cbimm, "enable OCF immediate callback on completion");
+
+/*
+ * a structure for each request
+ */
+typedef struct  {
+	struct work_struct work;
+#ifdef BENCH_IXP_ACCESS_LIB
+	IX_MBUF mbuf;
+#endif
+	unsigned char *buffer;
+} request_t;
+
+static request_t *requests;
+
+static spinlock_t ocfbench_counter_lock;
+static int outstanding;
+static int total;
+
+/*************************************************************************/
+/*
+ * OCF benchmark routines
+ */
+
+static uint64_t ocf_cryptoid;
+static unsigned long jstart, jstop;
+
+static int ocf_init(void);
+static int ocf_cb(struct cryptop *crp);
+static void ocf_request(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ocf_request_wq(struct work_struct *work);
+#endif
+
+static int
+ocf_init(void)
+{
+	int error;
+	struct cryptoini crie, cria;
+	struct cryptodesc crda, crde;
+
+	memset(&crie, 0, sizeof(crie));
+	memset(&cria, 0, sizeof(cria));
+	memset(&crde, 0, sizeof(crde));
+	memset(&crda, 0, sizeof(crda));
+
+	cria.cri_alg  = CRYPTO_SHA1_HMAC;
+	cria.cri_klen = 20 * 8;
+	cria.cri_key  = "0123456789abcdefghij";
+
+	//crie.cri_alg  = CRYPTO_3DES_CBC;
+	crie.cri_alg  = CRYPTO_AES_CBC;
+	crie.cri_klen = 24 * 8;
+	crie.cri_key  = "0123456789abcdefghijklmn";
+
+	crie.cri_next = &cria;
+
+	error = crypto_newsession(&ocf_cryptoid, &crie,
+				CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
+	if (error) {
+		printk("crypto_newsession failed %d\n", error);
+		return -1;
+	}
+	return 0;
+}
+
+static int
+ocf_cb(struct cryptop *crp)
+{
+	request_t *r = (request_t *) crp->crp_opaque;
+	unsigned long flags;
+
+	if (crp->crp_etype)
+		printk("Error in OCF processing: %d\n", crp->crp_etype);
+	crypto_freereq(crp);
+	crp = NULL;
+
+	/* do all requests  but take at least 1 second */
+	spin_lock_irqsave(&ocfbench_counter_lock, flags);
+	total++;
+	if (total > request_num && jstart + HZ < jiffies) {
+		outstanding--;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+
+	schedule_work(&r->work);
+	return 0;
+}
+
+
+static void
+ocf_request(void *arg)
+{
+	request_t *r = arg;
+	struct cryptop *crp = crypto_getreq(2);
+	struct cryptodesc *crde, *crda;
+	unsigned long flags;
+
+	if (!crp) {
+		spin_lock_irqsave(&ocfbench_counter_lock, flags);
+		outstanding--;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		return;
+	}
+
+	crde = crp->crp_desc;
+	crda = crde->crd_next;
+
+	crda->crd_skip = 0;
+	crda->crd_flags = 0;
+	crda->crd_len = request_size;
+	crda->crd_inject = request_size;
+	crda->crd_alg = CRYPTO_SHA1_HMAC;
+	crda->crd_key = "0123456789abcdefghij";
+	crda->crd_klen = 20 * 8;
+
+	crde->crd_skip = 0;
+	crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_ENCRYPT;
+	crde->crd_len = request_size;
+	crde->crd_inject = request_size;
+	//crde->crd_alg = CRYPTO_3DES_CBC;
+	crde->crd_alg = CRYPTO_AES_CBC;
+	crde->crd_key = "0123456789abcdefghijklmn";
+	crde->crd_klen = 24 * 8;
+
+	crp->crp_ilen = request_size + 64;
+	crp->crp_flags = 0;
+	if (request_batch)
+		crp->crp_flags |= CRYPTO_F_BATCH;
+	if (request_cbimm)
+		crp->crp_flags |= CRYPTO_F_CBIMM;
+	crp->crp_buf = (caddr_t) r->buffer;
+	crp->crp_callback = ocf_cb;
+	crp->crp_sid = ocf_cryptoid;
+	crp->crp_opaque = (caddr_t) r;
+	crypto_dispatch(crp);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ocf_request_wq(struct work_struct *work)
+{
+	request_t *r = container_of(work, request_t, work);
+	ocf_request(r);
+}
+#endif
+
+static void
+ocf_done(void)
+{
+	crypto_freesession(ocf_cryptoid);
+}
+
+/*************************************************************************/
+#ifdef BENCH_IXP_ACCESS_LIB
+/*************************************************************************/
+/*
+ * CryptoAcc benchmark routines
+ */
+
+static IxCryptoAccCtx ixp_ctx;
+static UINT32 ixp_ctx_id;
+static IX_MBUF ixp_pri;
+static IX_MBUF ixp_sec;
+static int ixp_registered = 0;
+
+static void ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp,
+					IxCryptoAccStatus status);
+static void ixp_perform_cb(UINT32 ctx_id, IX_MBUF *sbufp, IX_MBUF *dbufp,
+					IxCryptoAccStatus status);
+static void ixp_request(void *arg);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void ixp_request_wq(struct work_struct *work);
+#endif
+
+static int
+ixp_init(void)
+{
+	IxCryptoAccStatus status;
+
+	ixp_ctx.cipherCtx.cipherAlgo = IX_CRYPTO_ACC_CIPHER_3DES;
+	ixp_ctx.cipherCtx.cipherMode = IX_CRYPTO_ACC_MODE_CBC;
+	ixp_ctx.cipherCtx.cipherKeyLen = 24;
+	ixp_ctx.cipherCtx.cipherBlockLen = IX_CRYPTO_ACC_DES_BLOCK_64;
+	ixp_ctx.cipherCtx.cipherInitialVectorLen = IX_CRYPTO_ACC_DES_IV_64;
+	memcpy(ixp_ctx.cipherCtx.key.cipherKey, "0123456789abcdefghijklmn", 24);
+
+	ixp_ctx.authCtx.authAlgo = IX_CRYPTO_ACC_AUTH_SHA1;
+	ixp_ctx.authCtx.authDigestLen = 12;
+	ixp_ctx.authCtx.aadLen = 0;
+	ixp_ctx.authCtx.authKeyLen = 20;
+	memcpy(ixp_ctx.authCtx.key.authKey, "0123456789abcdefghij", 20);
+
+	ixp_ctx.useDifferentSrcAndDestMbufs = 0;
+	ixp_ctx.operation = IX_CRYPTO_ACC_OP_ENCRYPT_AUTH ;
+
+	IX_MBUF_MLEN(&ixp_pri)  = IX_MBUF_PKT_LEN(&ixp_pri) = 128;
+	IX_MBUF_MDATA(&ixp_pri) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+	IX_MBUF_MLEN(&ixp_sec)  = IX_MBUF_PKT_LEN(&ixp_sec) = 128;
+	IX_MBUF_MDATA(&ixp_sec) = (unsigned char *) kmalloc(128, SLAB_ATOMIC);
+
+	status = ixCryptoAccCtxRegister(&ixp_ctx, &ixp_pri, &ixp_sec,
+			ixp_register_cb, ixp_perform_cb, &ixp_ctx_id);
+
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status) {
+		while (!ixp_registered)
+			schedule();
+		return ixp_registered < 0 ? -1 : 0;
+	}
+
+	printk("ixp: ixCryptoAccCtxRegister failed %d\n", status);
+	return -1;
+}
+
+static void
+ixp_register_cb(UINT32 ctx_id, IX_MBUF *bufp, IxCryptoAccStatus status)
+{
+	if (bufp) {
+		IX_MBUF_MLEN(bufp) = IX_MBUF_PKT_LEN(bufp) = 0;
+		kfree(IX_MBUF_MDATA(bufp));
+		IX_MBUF_MDATA(bufp) = NULL;
+	}
+
+	if (IX_CRYPTO_ACC_STATUS_WAIT == status)
+		return;
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS == status)
+		ixp_registered = 1;
+	else
+		ixp_registered = -1;
+}
+
+static void
+ixp_perform_cb(
+	UINT32 ctx_id,
+	IX_MBUF *sbufp,
+	IX_MBUF *dbufp,
+	IxCryptoAccStatus status)
+{
+	request_t *r = NULL;
+	unsigned long flags;
+
+	/* do all requests  but take at least 1 second */
+	spin_lock_irqsave(&ocfbench_counter_lock, flags);
+	total++;
+	if (total > request_num && jstart + HZ < jiffies) {
+		outstanding--;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		return;
+	}
+
+	if (!sbufp || !(r = IX_MBUF_PRIV(sbufp))) {
+		printk("crappo %p %p\n", sbufp, r);
+		outstanding--;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+
+	schedule_work(&r->work);
+}
+
+static void
+ixp_request(void *arg)
+{
+	request_t *r = arg;
+	IxCryptoAccStatus status;
+	unsigned long flags;
+
+	memset(&r->mbuf, 0, sizeof(r->mbuf));
+	IX_MBUF_MLEN(&r->mbuf) = IX_MBUF_PKT_LEN(&r->mbuf) = request_size + 64;
+	IX_MBUF_MDATA(&r->mbuf) = r->buffer;
+	IX_MBUF_PRIV(&r->mbuf) = r;
+	status = ixCryptoAccAuthCryptPerform(ixp_ctx_id, &r->mbuf, NULL,
+			0, request_size, 0, request_size, request_size, r->buffer);
+	if (IX_CRYPTO_ACC_STATUS_SUCCESS != status) {
+		printk("status1 = %d\n", status);
+		spin_lock_irqsave(&ocfbench_counter_lock, flags);
+		outstanding--;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		return;
+	}
+	return;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+static void
+ixp_request_wq(struct work_struct *work)
+{
+	request_t *r = container_of(work, request_t, work);
+	ixp_request(r);
+}
+#endif
+
+static void
+ixp_done(void)
+{
+	/* we should free the session here but I am lazy :-) */
+}
+
+/*************************************************************************/
+#endif /* BENCH_IXP_ACCESS_LIB */
+/*************************************************************************/
+
+int
+ocfbench_init(void)
+{
+	int i;
+	unsigned long mbps;
+	unsigned long flags;
+
+	printk("Crypto Speed tests\n");
+
+	requests = kmalloc(sizeof(request_t) * request_q_len, GFP_KERNEL);
+	if (!requests) {
+		printk("malloc failed\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < request_q_len; i++) {
+		/* +64 for return data */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+		INIT_WORK(&requests[i].work, ocf_request_wq);
+#else
+		INIT_WORK(&requests[i].work, ocf_request, &requests[i]);
+#endif
+		requests[i].buffer = kmalloc(request_size + 128, GFP_DMA);
+		if (!requests[i].buffer) {
+			printk("malloc failed\n");
+			return -EINVAL;
+		}
+		memset(requests[i].buffer, '0' + i, request_size + 128);
+	}
+
+	/*
+	 * OCF benchmark
+	 */
+	printk("OCF: testing ...\n");
+	if (ocf_init() == -1)
+		return -EINVAL;
+
+	spin_lock_init(&ocfbench_counter_lock);
+	total = outstanding = 0;
+	jstart = jiffies;
+	for (i = 0; i < request_q_len; i++) {
+		spin_lock_irqsave(&ocfbench_counter_lock, flags);
+		outstanding++;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		ocf_request(&requests[i]);
+	}
+	while (outstanding > 0)
+		schedule();
+	jstop = jiffies;
+
+	mbps = 0;
+	if (jstop > jstart) {
+		mbps = (unsigned long) total * (unsigned long) request_size * 8;
+		mbps /= ((jstop - jstart) * 1000) / HZ;
+	}
+	printk("OCF: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
+			total, request_size, (int)(jstop - jstart),
+			((int)mbps) / 1000, ((int)mbps) % 1000);
+	ocf_done();
+
+#ifdef BENCH_IXP_ACCESS_LIB
+	/*
+	 * IXP benchmark
+	 */
+	printk("IXP: testing ...\n");
+	ixp_init();
+	total = outstanding = 0;
+	jstart = jiffies;
+	for (i = 0; i < request_q_len; i++) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
+		INIT_WORK(&requests[i].work, ixp_request_wq);
+#else
+		INIT_WORK(&requests[i].work, ixp_request, &requests[i]);
+#endif
+		spin_lock_irqsave(&ocfbench_counter_lock, flags);
+		outstanding++;
+		spin_unlock_irqrestore(&ocfbench_counter_lock, flags);
+		ixp_request(&requests[i]);
+	}
+	while (outstanding > 0)
+		schedule();
+	jstop = jiffies;
+
+	mbps = 0;
+	if (jstop > jstart) {
+		mbps = (unsigned long) total * (unsigned long) request_size * 8;
+		mbps /= ((jstop - jstart) * 1000) / HZ;
+	}
+	printk("IXP: %d requests of %d bytes in %d jiffies (%d.%03d Mbps)\n",
+			total, request_size, jstop - jstart,
+			((int)mbps) / 1000, ((int)mbps) % 1000);
+	ixp_done();
+#endif /* BENCH_IXP_ACCESS_LIB */
+
+	for (i = 0; i < request_q_len; i++)
+		kfree(requests[i].buffer);
+	kfree(requests);
+	return -EINVAL; /* always fail to load so it can be re-run quickly ;-) */
+}
+
+static void __exit ocfbench_exit(void)
+{
+}
+
+module_init(ocfbench_init);
+module_exit(ocfbench_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("Benchmark various in-kernel crypto speeds");
diff --git a/crypto/ocf/ocf-compat.h b/crypto/ocf/ocf-compat.h
new file mode 100644
index 000000000000..2bb78b2e266e
--- /dev/null
+++ b/crypto/ocf/ocf-compat.h
@@ -0,0 +1,373 @@
+#ifndef _BSD_COMPAT_H_
+#define _BSD_COMPAT_H_ 1
+/****************************************************************************/
+/*
+ * Provide compat routines for older linux kernels and BSD kernels
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2010 David McCullough <david_mccullough@mcafee.com>
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this file
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+/****************************************************************************/
+#ifdef __KERNEL__
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+
+/*
+ * fake some BSD driver interface stuff specifically for OCF use
+ */
+
+typedef struct ocf_device *device_t;
+
+typedef struct {
+	int (*cryptodev_newsession)(device_t dev, u_int32_t *sidp, struct cryptoini *cri);
+	int (*cryptodev_freesession)(device_t dev, u_int64_t tid);
+	int (*cryptodev_process)(device_t dev, struct cryptop *crp, int hint);
+	int (*cryptodev_kprocess)(device_t dev, struct cryptkop *krp, int hint);
+} device_method_t;
+#define DEVMETHOD(id, func)	id: func
+
+struct ocf_device {
+	char name[32];		/* the driver name */
+	char nameunit[32];	/* the driver name + HW instance */
+	int  unit;
+	device_method_t	methods;
+	void *softc;
+};
+
+#define CRYPTODEV_NEWSESSION(dev, sid, cri) \
+	((*(dev)->methods.cryptodev_newsession)(dev,sid,cri))
+#define CRYPTODEV_FREESESSION(dev, sid) \
+	((*(dev)->methods.cryptodev_freesession)(dev, sid))
+#define CRYPTODEV_PROCESS(dev, crp, hint) \
+	((*(dev)->methods.cryptodev_process)(dev, crp, hint))
+#define CRYPTODEV_KPROCESS(dev, krp, hint) \
+	((*(dev)->methods.cryptodev_kprocess)(dev, krp, hint))
+
+#define device_get_name(dev)	((dev)->name)
+#define device_get_nameunit(dev)	((dev)->nameunit)
+#define device_get_unit(dev)	((dev)->unit)
+#define device_get_softc(dev)	((dev)->softc)
+
+#define	softc_device_decl \
+		struct ocf_device _device; \
+		device_t
+
+#define	softc_device_init(_sc, _name, _unit, _methods) \
+	if (1) {\
+	strncpy((_sc)->_device.name, _name, sizeof((_sc)->_device.name) - 1); \
+	snprintf((_sc)->_device.nameunit, sizeof((_sc)->_device.name), "%s%d", _name, _unit); \
+	(_sc)->_device.unit = _unit; \
+	(_sc)->_device.methods = _methods; \
+	(_sc)->_device.softc = (void *) _sc; \
+	*(device_t *)((softc_get_device(_sc))+1) = &(_sc)->_device; \
+	} else
+
+#define	softc_get_device(_sc)	(&(_sc)->_device)
+
+/*
+ * iomem support for 2.4 and 2.6 kernels
+ */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_iomem_t	unsigned long
+
+/*
+ * implement simple workqueue like support for older kernels
+ */
+
+#include <linux/tqueue.h>
+
+#define work_struct tq_struct
+
+#define INIT_WORK(wp, fp, ap) \
+	do { \
+		(wp)->sync = 0; \
+		(wp)->routine = (fp); \
+		(wp)->data = (ap); \
+	} while (0)
+
+#define schedule_work(wp) \
+	do { \
+		queue_task((wp), &tq_immediate); \
+		mark_bh(IMMEDIATE_BH); \
+	} while (0)
+
+#define flush_scheduled_work()	run_task_queue(&tq_immediate)
+
+#else
+#define ocf_iomem_t	void __iomem *
+
+#include <linux/workqueue.h>
+
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
+#include <linux/fdtable.h>
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+#define files_fdtable(files)	(files)
+#endif
+
+#ifdef MODULE_PARM
+#undef module_param	/* just in case */
+#define	module_param(a,b,c)		MODULE_PARM(a,"i")
+#endif
+
+#define bzero(s,l)		memset(s,0,l)
+#define bcopy(s,d,l)	memcpy(d,s,l)
+#define bcmp(x, y, l)	memcmp(x,y,l)
+
+#define MIN(x,y)	((x) < (y) ? (x) : (y))
+
+#define device_printf(dev, a...) ({ \
+				printk("%s: ", device_get_nameunit(dev)); printk(a); \
+			})
+
+#undef printf
+#define printf(fmt...)	printk(fmt)
+
+#define KASSERT(c,p)	if (!(c)) { printk p ; } else
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define ocf_daemonize(str) \
+	daemonize(); \
+	spin_lock_irq(&current->sigmask_lock); \
+	sigemptyset(&current->blocked); \
+	recalc_sigpending(current); \
+	spin_unlock_irq(&current->sigmask_lock); \
+	sprintf(current->comm, str);
+#else
+#define ocf_daemonize(str) daemonize(str);
+#endif
+
+#define	TAILQ_INSERT_TAIL(q,d,m) list_add_tail(&(d)->m, (q))
+#define	TAILQ_EMPTY(q)	list_empty(q)
+#define	TAILQ_FOREACH(v, q, m) list_for_each_entry(v, q, m)
+
+#define read_random(p,l) get_random_bytes(p,l)
+
+#define DELAY(x)	((x) > 2000 ? mdelay((x)/1000) : udelay(x))
+#define strtoul simple_strtoul
+
+#define pci_get_vendor(dev)	((dev)->vendor)
+#define pci_get_device(dev)	((dev)->device)
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+#define pci_set_consistent_dma_mask(dev, mask) (0)
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_dma_sync_single_for_cpu pci_dma_sync_single
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef htole32
+#define htole32(x)	cpu_to_le32(x)
+#endif
+#ifndef htobe32
+#define htobe32(x)	cpu_to_be32(x)
+#endif
+#ifndef htole16
+#define htole16(x)	cpu_to_le16(x)
+#endif
+#ifndef htobe16
+#define htobe16(x)	cpu_to_be16(x)
+#endif
+
+/* older kernels don't have these */
+
+#include <asm/irq.h>
+#if !defined(IRQ_NONE) && !defined(IRQ_RETVAL)
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_WAKE_THREAD
+#define IRQ_RETVAL
+#define irqreturn_t void
+typedef irqreturn_t (*irq_handler_t)(int irq, void *arg, struct pt_regs *regs);
+#endif
+#ifndef IRQF_SHARED
+#define IRQF_SHARED	SA_SHIRQ
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
+# define strlcpy(dest,src,len) \
+		({strncpy(dest,src,(len)-1); ((char *)dest)[(len)-1] = '\0'; })
+#endif
+
+#ifndef MAX_ERRNO
+#define MAX_ERRNO	4095
+#endif
+#ifndef IS_ERR_VALUE
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,5)
+#include <linux/err.h>
+#endif
+#ifndef IS_ERR_VALUE
+#define IS_ERR_VALUE(x) ((unsigned long)(x) >= (unsigned long)-MAX_ERRNO)
+#endif
+#endif
+
+/*
+ * common debug for all
+ */
+#undef dprintk
+#if 1
+#define dprintk(a...)	do { if (debug) printk(a); } while(0)
+#else
+#define dprintk(a...)
+#endif
+
+#ifndef SLAB_ATOMIC
+/* Changed in 2.6.20, must use GFP_ATOMIC now */
+#define	SLAB_ATOMIC	GFP_ATOMIC
+#endif
+
+/*
+ * need some additional support for older kernels */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,2)
+#define pci_register_driver_compat(driver, rc) \
+	do { \
+		if ((rc) > 0) { \
+			(rc) = 0; \
+		} else if (rc == 0) { \
+			(rc) = -ENODEV; \
+		} else { \
+			pci_unregister_driver(driver); \
+		} \
+	} while (0)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+#define pci_register_driver_compat(driver,rc) ((rc) = (rc) < 0 ? (rc) : 0)
+#else
+#define pci_register_driver_compat(driver,rc)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+
+#include <linux/mm.h>
+#include <asm/scatterlist.h>
+
+static inline void sg_set_page(struct scatterlist *sg,  struct page *page,
+			       unsigned int len, unsigned int offset)
+{
+	sg->page = page;
+	sg->offset = offset;
+	sg->length = len;
+}
+
+static inline void *sg_virt(struct scatterlist *sg)
+{
+	return page_address(sg->page) + sg->offset;
+}
+
+#define sg_init_table(sg, n)
+
+#define sg_mark_end(sg)
+
+#endif
+
+#ifndef late_initcall
+#define late_initcall(init) module_init(init)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) || !defined(CONFIG_SMP)
+#define ocf_for_each_cpu(cpu) for ((cpu) = 0; (cpu) == 0; (cpu)++)
+#else
+#define ocf_for_each_cpu(cpu) for_each_present_cpu(cpu)
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+#include <linux/sched.h>
+#define	kill_proc(p,s,v)	send_sig(s,find_task_by_vpid(p),0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4)
+
+struct ocf_thread {
+	struct task_struct	*task;
+	int					(*func)(void *arg);
+	void				*arg;
+};
+
+/* thread startup helper func */
+static inline int ocf_run_thread(void *arg)
+{
+	struct ocf_thread *t = (struct ocf_thread *) arg;
+	if (!t)
+		return -1; /* very bad */
+	t->task = current;
+	daemonize();
+	spin_lock_irq(&current->sigmask_lock);
+	sigemptyset(&current->blocked);
+	recalc_sigpending(current);
+	spin_unlock_irq(&current->sigmask_lock);
+	return (*t->func)(t->arg);
+}
+
+#define kthread_create(f,a,fmt...) \
+	({ \
+		struct ocf_thread t; \
+		pid_t p; \
+		t.task = NULL; \
+		t.func = (f); \
+		t.arg = (a); \
+		p = kernel_thread(ocf_run_thread, &t, CLONE_FS|CLONE_FILES); \
+		while (p != (pid_t) -1 && t.task == NULL) \
+			schedule(); \
+		if (t.task) \
+			snprintf(t.task->comm, sizeof(t.task->comm), fmt); \
+		(t.task); \
+	})
+
+#define kthread_bind(t,cpu)	/**/
+
+#define kthread_should_stop()	(strcmp(current->comm, "stopping") == 0)
+
+#define kthread_stop(t) \
+	({ \
+		strcpy((t)->comm, "stopping"); \
+		kill_proc((t)->pid, SIGTERM, 1); \
+		do { \
+			schedule(); \
+		} while (kill_proc((t)->pid, SIGTERM, 1) == 0); \
+	})
+
+#else
+#include <linux/kthread.h>
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)
+#define	skb_frag_page(x)	((x)->page)
+#endif
+
+#endif /* __KERNEL__ */
+
+/****************************************************************************/
+#endif /* _BSD_COMPAT_H_ */
diff --git a/crypto/ocf/ocfnull/Makefile b/crypto/ocf/ocfnull/Makefile
new file mode 100644
index 000000000000..7eca8a7396b2
--- /dev/null
+++ b/crypto/ocf/ocfnull/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_OCFNULL) += ocfnull.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/..
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/ocfnull/ocfnull.c b/crypto/ocf/ocfnull/ocfnull.c
new file mode 100644
index 000000000000..5fe68933ccc3
--- /dev/null
+++ b/crypto/ocf/ocfnull/ocfnull.c
@@ -0,0 +1,204 @@
+/*
+ * An OCF module for determining the cost of crypto versus the cost of
+ * IPSec processing outside of OCF.  This modules gives us the effect of
+ * zero cost encryption,  of course you will need to run it at both ends
+ * since it does no crypto at all.
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/interrupt.h>
+
+#include <cryptodev.h>
+#include <uio.h>
+
+static int32_t			 null_id = -1;
+static u_int32_t		 null_sesnum = 0;
+
+static int null_process(device_t, struct cryptop *, int);
+static int null_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int null_freesession(device_t, u_int64_t);
+
+#define debug ocfnull_debug
+int ocfnull_debug = 0;
+module_param(ocfnull_debug, int, 0644);
+MODULE_PARM_DESC(ocfnull_debug, "Enable debug");
+
+/*
+ * dummy device structure
+ */
+
+static struct {
+	softc_device_decl	sc_dev;
+} nulldev;
+
+static device_method_t null_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	null_newsession),
+	DEVMETHOD(cryptodev_freesession,null_freesession),
+	DEVMETHOD(cryptodev_process,	null_process),
+};
+
+/*
+ * Generate a new software session.
+ */
+static int
+null_newsession(device_t arg, u_int32_t *sid, struct cryptoini *cri)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid == NULL || cri == NULL) {
+		dprintk("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	if (null_sesnum == 0)
+		null_sesnum++;
+	*sid = null_sesnum++;
+	return 0;
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+null_freesession(device_t arg, u_int64_t tid)
+{
+	u_int32_t sid = CRYPTO_SESID2LID(tid);
+
+	dprintk("%s()\n", __FUNCTION__);
+	if (sid > null_sesnum) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	/* Silently accept and return */
+	if (sid == 0)
+		return 0;
+	return 0;
+}
+
+
+/*
+ * Process a request.
+ */
+static int
+null_process(device_t arg, struct cryptop *crp, int hint)
+{
+	unsigned int lid;
+
+	dprintk("%s()\n", __FUNCTION__);
+
+	/* Sanity check */
+	if (crp == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+	crp->crp_etype = 0;
+
+	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		crp->crp_etype = EINVAL;
+		goto done;
+	}
+
+	/*
+	 * find the session we are using
+	 */
+
+	lid = crp->crp_sid & 0xffffffff;
+	if (lid >= null_sesnum || lid == 0) {
+		crp->crp_etype = ENOENT;
+		dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+		goto done;
+	}
+
+done:
+	crypto_done(crp);
+	return 0;
+}
+
+
+/*
+ * our driver startup and shutdown routines
+ */
+
+static int
+null_init(void)
+{
+	dprintk("%s(%p)\n", __FUNCTION__, null_init);
+
+	memset(&nulldev, 0, sizeof(nulldev));
+	softc_device_init(&nulldev, "ocfnull", 0, null_methods);
+
+	null_id = crypto_get_driverid(softc_get_device(&nulldev),
+				CRYPTOCAP_F_HARDWARE);
+	if (null_id < 0)
+		panic("ocfnull: crypto device cannot initialize!");
+
+#define	REGISTER(alg) \
+	crypto_register(null_id,alg,0,0)
+	REGISTER(CRYPTO_DES_CBC);
+	REGISTER(CRYPTO_3DES_CBC);
+	REGISTER(CRYPTO_RIJNDAEL128_CBC);
+	REGISTER(CRYPTO_MD5);
+	REGISTER(CRYPTO_SHA1);
+	REGISTER(CRYPTO_MD5_HMAC);
+	REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+	return 0;
+}
+
+static void
+null_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	crypto_unregister_all(null_id);
+	null_id = -1;
+}
+
+module_init(null_init);
+module_exit(null_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("ocfnull - claims a lot but does nothing");
diff --git a/crypto/ocf/pasemi/Makefile b/crypto/ocf/pasemi/Makefile
new file mode 100644
index 000000000000..7807d2f77127
--- /dev/null
+++ b/crypto/ocf/pasemi/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_PASEMI) += pasemi.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/pasemi/pasemi.c b/crypto/ocf/pasemi/pasemi.c
new file mode 100644
index 000000000000..1b4333cddeee
--- /dev/null
+++ b/crypto/ocf/pasemi/pasemi.c
@@ -0,0 +1,1007 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient DMA Crypto Engine
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/scatterlist.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <cryptodev.h>
+#include <uio.h>
+#include "pasemi_fnu.h"
+
+#define DRV_NAME "pasemi"
+
+#define TIMER_INTERVAL 1000
+
+static void __devexit pasemi_dma_remove(struct pci_dev *pdev);
+static struct pasdma_status volatile * dma_status;
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debug");
+
+static void pasemi_desc_start(struct pasemi_desc *desc, u64 hdr)
+{
+	desc->postop = 0;
+	desc->quad[0] = hdr;
+	desc->quad_cnt = 1;
+	desc->size = 1;
+}
+
+static void pasemi_desc_build(struct pasemi_desc *desc, u64 val)
+{
+	desc->quad[desc->quad_cnt++] = val;
+	desc->size = (desc->quad_cnt + 1) / 2;
+}
+
+static void pasemi_desc_hdr(struct pasemi_desc *desc, u64 hdr)
+{
+	desc->quad[0] |= hdr;
+}
+
+static int pasemi_desc_size(struct pasemi_desc *desc)
+{
+	return desc->size;
+}
+
+static void pasemi_ring_add_desc(
+				 struct pasemi_fnu_txring *ring,
+				 struct pasemi_desc *desc,
+				 struct cryptop *crp) {
+	int i;
+	int ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
+
+	TX_DESC_INFO(ring, ring->next_to_fill).desc_size = desc->size;
+	TX_DESC_INFO(ring, ring->next_to_fill).desc_postop = desc->postop;
+	TX_DESC_INFO(ring, ring->next_to_fill).cf_crp = crp;
+
+	for (i = 0; i < desc->quad_cnt; i += 2) {
+		ring_index = 2 * (ring->next_to_fill & (TX_RING_SIZE-1));
+		ring->desc[ring_index] = desc->quad[i];
+		ring->desc[ring_index + 1] = desc->quad[i + 1];
+		ring->next_to_fill++;
+	}
+
+	if (desc->quad_cnt & 1)
+		ring->desc[ring_index + 1] = 0;
+}
+
+static void pasemi_ring_incr(struct pasemi_softc *sc, int chan_index, int incr)
+{
+	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_INCR(sc->base_chan + chan_index),
+		 incr);
+}
+
+/*
+ * Generate a new software session.
+ */
+static int
+pasemi_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+	struct cryptoini *c, *encini = NULL, *macini = NULL;
+	struct pasemi_softc *sc = device_get_softc(dev);
+	struct pasemi_session *ses = NULL, **sespp;
+	int sesn, blksz = 0;
+	u64 ccmd = 0;
+	unsigned long flags;
+	struct pasemi_desc init_desc;
+	struct pasemi_fnu_txring *txring;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	if (sidp == NULL || cri == NULL || sc == NULL) {
+		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return -EINVAL;
+	}
+	for (c = cri; c != NULL; c = c->cri_next) {
+		if (ALG_IS_SIG(c->cri_alg)) {
+			if (macini)
+				return -EINVAL;
+			macini = c;
+		} else if (ALG_IS_CIPHER(c->cri_alg)) {
+			if (encini)
+				return -EINVAL;
+			encini = c;
+		} else {
+			DPRINTF("UNKNOWN c->cri_alg %d\n", c->cri_alg);
+			return -EINVAL;
+		}
+	}
+	if (encini == NULL && macini == NULL)
+		return -EINVAL;
+	if (encini) {
+		/* validate key length */
+		switch (encini->cri_alg) {
+		case CRYPTO_DES_CBC:
+			if (encini->cri_klen != 64)
+				return -EINVAL;
+			ccmd = DMA_CALGO_DES;
+			break;
+		case CRYPTO_3DES_CBC:
+			if (encini->cri_klen != 192)
+				return -EINVAL;
+			ccmd = DMA_CALGO_3DES;
+			break;
+		case CRYPTO_AES_CBC:
+			if (encini->cri_klen != 128 &&
+			    encini->cri_klen != 192 &&
+			    encini->cri_klen != 256)
+				return -EINVAL;
+			ccmd = DMA_CALGO_AES;
+			break;
+		case CRYPTO_ARC4:
+			if (encini->cri_klen != 128)
+				return -EINVAL;
+			ccmd = DMA_CALGO_ARC;
+			break;
+		default:
+			DPRINTF("UNKNOWN encini->cri_alg %d\n",
+				encini->cri_alg);
+			return -EINVAL;
+		}
+	}
+
+	if (macini) {
+		switch (macini->cri_alg) {
+		case CRYPTO_MD5:
+		case CRYPTO_MD5_HMAC:
+			blksz = 16;
+			break;
+		case CRYPTO_SHA1:
+		case CRYPTO_SHA1_HMAC:
+			blksz = 20;
+			break;
+		default:
+			DPRINTF("UNKNOWN macini->cri_alg %d\n",
+				macini->cri_alg);
+			return -EINVAL;
+		}
+		if (((macini->cri_klen + 7) / 8) > blksz) {
+			DPRINTF("key length %d bigger than blksize %d not supported\n",
+				((macini->cri_klen + 7) / 8), blksz);
+			return -EINVAL;
+		}
+	}
+
+	for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+		if (sc->sc_sessions[sesn] == NULL) {
+			sc->sc_sessions[sesn] = (struct pasemi_session *)
+				kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
+			ses = sc->sc_sessions[sesn];
+			break;
+		} else if (sc->sc_sessions[sesn]->used == 0) {
+			ses = sc->sc_sessions[sesn];
+			break;
+		}
+	}
+
+	if (ses == NULL) {
+		sespp = (struct pasemi_session **)
+			kzalloc(sc->sc_nsessions * 2 *
+				sizeof(struct pasemi_session *), GFP_ATOMIC);
+		if (sespp == NULL)
+			return -ENOMEM;
+		memcpy(sespp, sc->sc_sessions,
+		       sc->sc_nsessions * sizeof(struct pasemi_session *));
+		kfree(sc->sc_sessions);
+		sc->sc_sessions = sespp;
+		sesn = sc->sc_nsessions;
+		ses = sc->sc_sessions[sesn] = (struct pasemi_session *)
+			kzalloc(sizeof(struct pasemi_session), GFP_ATOMIC);
+		if (ses == NULL)
+			return -ENOMEM;
+		sc->sc_nsessions *= 2;
+	}
+
+	ses->used = 1;
+
+	ses->dma_addr = pci_map_single(sc->dma_pdev, (void *) ses->civ,
+				       sizeof(struct pasemi_session), DMA_TO_DEVICE);
+
+	/* enter the channel scheduler */
+	spin_lock_irqsave(&sc->sc_chnlock, flags);
+
+	/* ARC4 has to be processed by the even channel */
+	if (encini && (encini->cri_alg == CRYPTO_ARC4))
+		ses->chan = sc->sc_lastchn & ~1;
+	else
+		ses->chan = sc->sc_lastchn;
+	sc->sc_lastchn = (sc->sc_lastchn + 1) % sc->sc_num_channels;
+
+	spin_unlock_irqrestore(&sc->sc_chnlock, flags);
+
+	txring = &sc->tx[ses->chan];
+
+	if (encini) {
+		ses->ccmd = ccmd;
+		ses->keysz = (encini->cri_klen - 63) / 64;
+		memcpy(ses->key, encini->cri_key, (ses->keysz + 1) * 8);
+
+		pasemi_desc_start(&init_desc,
+				  XCT_CTRL_HDR(ses->chan, (encini && macini) ? 0x68 : 0x40, DMA_FN_CIV0));
+		pasemi_desc_build(&init_desc,
+				  XCT_FUN_SRC_PTR((encini && macini) ? 0x68 : 0x40, ses->dma_addr));
+	}
+	if (macini) {
+		if (macini->cri_alg == CRYPTO_MD5_HMAC ||
+		    macini->cri_alg == CRYPTO_SHA1_HMAC)
+			memcpy(ses->hkey, macini->cri_key, blksz);
+		else {
+			/* Load initialization constants(RFC 1321, 3174) */
+			ses->hiv[0] = 0x67452301efcdab89ULL;
+			ses->hiv[1] = 0x98badcfe10325476ULL;
+			ses->hiv[2] = 0xc3d2e1f000000000ULL;
+		}
+		ses->hseq = 0ULL;
+	}
+
+	spin_lock_irqsave(&txring->fill_lock, flags);
+
+	if (((txring->next_to_fill + pasemi_desc_size(&init_desc)) -
+	     txring->next_to_clean) > TX_RING_SIZE) {
+		spin_unlock_irqrestore(&txring->fill_lock, flags);
+		return ERESTART;
+	}
+
+	if (encini) {
+		pasemi_ring_add_desc(txring, &init_desc, NULL);
+		pasemi_ring_incr(sc, ses->chan,
+				 pasemi_desc_size(&init_desc));
+	}
+
+	txring->sesn = sesn;
+	spin_unlock_irqrestore(&txring->fill_lock, flags);
+
+	*sidp = PASEMI_SID(sesn);
+	return 0;
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+pasemi_freesession(device_t dev, u_int64_t tid)
+{
+	struct pasemi_softc *sc = device_get_softc(dev);
+	int session;
+	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (sc == NULL)
+		return -EINVAL;
+	session = PASEMI_SESSION(sid);
+	if (session >= sc->sc_nsessions || !sc->sc_sessions[session])
+		return -EINVAL;
+
+	pci_unmap_single(sc->dma_pdev,
+			 sc->sc_sessions[session]->dma_addr,
+			 sizeof(struct pasemi_session), DMA_TO_DEVICE);
+	memset(sc->sc_sessions[session], 0,
+	       sizeof(struct pasemi_session));
+
+	return 0;
+}
+
+static int
+pasemi_process(device_t dev, struct cryptop *crp, int hint)
+{
+
+	int err = 0, ivsize, srclen = 0, reinit = 0, reinit_size = 0, chsel;
+	struct pasemi_softc *sc = device_get_softc(dev);
+	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+	caddr_t ivp;
+	struct pasemi_desc init_desc, work_desc;
+	struct pasemi_session *ses;
+	struct sk_buff *skb;
+	struct uio *uiop;
+	unsigned long flags;
+	struct pasemi_fnu_txring *txring;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (crp == NULL || crp->crp_callback == NULL || sc == NULL)
+		return -EINVAL;
+
+	crp->crp_etype = 0;
+	if (PASEMI_SESSION(crp->crp_sid) >= sc->sc_nsessions)
+		return -EINVAL;
+
+	ses = sc->sc_sessions[PASEMI_SESSION(crp->crp_sid)];
+
+	crd1 = crp->crp_desc;
+	if (crd1 == NULL) {
+		err = -EINVAL;
+		goto errout;
+	}
+	crd2 = crd1->crd_next;
+
+	if (ALG_IS_SIG(crd1->crd_alg)) {
+		maccrd = crd1;
+		if (crd2 == NULL)
+			enccrd = NULL;
+		else if (ALG_IS_CIPHER(crd2->crd_alg) &&
+			 (crd2->crd_flags & CRD_F_ENCRYPT) == 0)
+			enccrd = crd2;
+		else
+			goto erralg;
+	} else if (ALG_IS_CIPHER(crd1->crd_alg)) {
+		enccrd = crd1;
+		if (crd2 == NULL)
+			maccrd = NULL;
+		else if (ALG_IS_SIG(crd2->crd_alg) &&
+			 (crd1->crd_flags & CRD_F_ENCRYPT))
+			maccrd = crd2;
+		else
+			goto erralg;
+	} else
+		goto erralg;
+
+	chsel = ses->chan;
+
+	txring = &sc->tx[chsel];
+
+	if (enccrd && !maccrd) {
+		if (enccrd->crd_alg == CRYPTO_ARC4)
+			reinit = 1;
+		reinit_size = 0x40;
+		srclen = crp->crp_ilen;
+
+		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I
+				  | XCT_FUN_FUN(chsel));
+		if (enccrd->crd_flags & CRD_F_ENCRYPT)
+			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_ENC);
+		else
+			pasemi_desc_hdr(&work_desc, XCT_FUN_CRM_DEC);
+	} else if (enccrd && maccrd) {
+		if (enccrd->crd_alg == CRYPTO_ARC4)
+			reinit = 1;
+		reinit_size = 0x68;
+
+		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+			/* Encrypt -> Authenticate */
+			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_ENC_SIG
+					  | XCT_FUN_A | XCT_FUN_FUN(chsel));
+			srclen = maccrd->crd_skip + maccrd->crd_len;
+		} else {
+			/* Authenticate -> Decrypt */
+			pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG_DEC
+					  | XCT_FUN_24BRES | XCT_FUN_FUN(chsel));
+			pasemi_desc_build(&work_desc, 0);
+			pasemi_desc_build(&work_desc, 0);
+			pasemi_desc_build(&work_desc, 0);
+			work_desc.postop = PASEMI_CHECK_SIG;
+			srclen = crp->crp_ilen;
+		}
+
+		pasemi_desc_hdr(&work_desc, XCT_FUN_SHL(maccrd->crd_skip / 4));
+		pasemi_desc_hdr(&work_desc, XCT_FUN_CHL(enccrd->crd_skip - maccrd->crd_skip));
+	} else if (!enccrd && maccrd) {
+		srclen = maccrd->crd_len;
+
+		pasemi_desc_start(&init_desc,
+				  XCT_CTRL_HDR(chsel, 0x58, DMA_FN_HKEY0));
+		pasemi_desc_build(&init_desc,
+				  XCT_FUN_SRC_PTR(0x58, ((struct pasemi_session *)ses->dma_addr)->hkey));
+
+		pasemi_desc_start(&work_desc, XCT_FUN_O | XCT_FUN_I | XCT_FUN_CRM_SIG
+				  | XCT_FUN_A | XCT_FUN_FUN(chsel));
+	}
+
+	if (enccrd) {
+		switch (enccrd->crd_alg) {
+		case CRYPTO_3DES_CBC:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_3DES |
+					XCT_FUN_BCM_CBC);
+			ivsize = sizeof(u64);
+			break;
+		case CRYPTO_DES_CBC:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_DES |
+					XCT_FUN_BCM_CBC);
+			ivsize = sizeof(u64);
+			break;
+		case CRYPTO_AES_CBC:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_AES |
+					XCT_FUN_BCM_CBC);
+			ivsize = 2 * sizeof(u64);
+			break;
+		case CRYPTO_ARC4:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_ALG_ARC);
+			ivsize = 0;
+			break;
+		default:
+			printk(DRV_NAME ": unimplemented enccrd->crd_alg %d\n",
+			       enccrd->crd_alg);
+			err = -EINVAL;
+			goto errout;
+		}
+
+		ivp = (ivsize == sizeof(u64)) ? (caddr_t) &ses->civ[1] : (caddr_t) &ses->civ[0];
+		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				memcpy(ivp, enccrd->crd_iv, ivsize);
+			else
+				read_random(ivp, ivsize);
+			/* If IV is not present in the buffer already, it has to be copied there */
+			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+						enccrd->crd_inject, ivsize, ivp);
+		} else {
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				/* IV is provided expicitly in descriptor */
+				memcpy(ivp, enccrd->crd_iv, ivsize);
+			else
+				/* IV is provided in the packet */
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+						enccrd->crd_inject, ivsize,
+						ivp);
+		}
+	}
+
+	if (maccrd) {
+		switch (maccrd->crd_alg) {
+		case CRYPTO_MD5:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_MD5 |
+					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+			break;
+		case CRYPTO_SHA1:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_SHA1 |
+					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+			break;
+		case CRYPTO_MD5_HMAC:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_MD5 |
+					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+			break;
+		case CRYPTO_SHA1_HMAC:
+			pasemi_desc_hdr(&work_desc, XCT_FUN_SIG_HMAC_SHA1 |
+					XCT_FUN_HSZ((crp->crp_ilen - maccrd->crd_inject) / 4));
+			break;
+		default:
+			printk(DRV_NAME ": unimplemented maccrd->crd_alg %d\n",
+			       maccrd->crd_alg);
+			err = -EINVAL;
+			goto errout;
+		}
+	}
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		/* using SKB buffers */
+		skb = (struct sk_buff *)crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags) {
+			printk(DRV_NAME ": skb frags unimplemented\n");
+			err = -EINVAL;
+			goto errout;
+		}
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_DST_PTR(skb->len, pci_map_single(
+						sc->dma_pdev, skb->data,
+						skb->len, DMA_TO_DEVICE)));
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_SRC_PTR(
+				srclen, pci_map_single(
+					sc->dma_pdev, skb->data,
+					srclen, DMA_TO_DEVICE)));
+		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		/* using IOV buffers */
+		uiop = (struct uio *)crp->crp_buf;
+		if (uiop->uio_iovcnt > 1) {
+			printk(DRV_NAME ": iov frags unimplemented\n");
+			err = -EINVAL;
+			goto errout;
+		}
+
+		/* crp_olen is never set; always use crp_ilen */
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
+						sc->dma_pdev,
+						uiop->uio_iov->iov_base,
+						crp->crp_ilen, DMA_TO_DEVICE)));
+		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_SRC_PTR(srclen, pci_map_single(
+						sc->dma_pdev,
+						uiop->uio_iov->iov_base,
+						srclen, DMA_TO_DEVICE)));
+	} else {
+		/* using contig buffers */
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_DST_PTR(crp->crp_ilen, pci_map_single(
+						sc->dma_pdev,
+						crp->crp_buf,
+						crp->crp_ilen, DMA_TO_DEVICE)));
+		pasemi_desc_build(
+			&work_desc,
+			XCT_FUN_SRC_PTR(srclen, pci_map_single(
+						sc->dma_pdev,
+						crp->crp_buf, srclen,
+						DMA_TO_DEVICE)));
+		pasemi_desc_hdr(&work_desc, XCT_FUN_LLEN(srclen));
+	}
+
+	spin_lock_irqsave(&txring->fill_lock, flags);
+
+	if (txring->sesn != PASEMI_SESSION(crp->crp_sid)) {
+		txring->sesn = PASEMI_SESSION(crp->crp_sid);
+		reinit = 1;
+	}
+
+	if (enccrd) {
+		pasemi_desc_start(&init_desc,
+				  XCT_CTRL_HDR(chsel, reinit ? reinit_size : 0x10, DMA_FN_CIV0));
+		pasemi_desc_build(&init_desc,
+				  XCT_FUN_SRC_PTR(reinit ? reinit_size : 0x10, ses->dma_addr));
+	}
+
+	if (((txring->next_to_fill + pasemi_desc_size(&init_desc) +
+	      pasemi_desc_size(&work_desc)) -
+	     txring->next_to_clean) > TX_RING_SIZE) {
+		spin_unlock_irqrestore(&txring->fill_lock, flags);
+		err = ERESTART;
+		goto errout;
+	}
+
+	pasemi_ring_add_desc(txring, &init_desc, NULL);
+	pasemi_ring_add_desc(txring, &work_desc, crp);
+
+	pasemi_ring_incr(sc, chsel,
+			 pasemi_desc_size(&init_desc) +
+			 pasemi_desc_size(&work_desc));
+
+	spin_unlock_irqrestore(&txring->fill_lock, flags);
+
+	mod_timer(&txring->crypto_timer, jiffies + TIMER_INTERVAL);
+
+	return 0;
+
+erralg:
+	printk(DRV_NAME ": unsupported algorithm or algorithm order alg1 %d alg2 %d\n",
+	       crd1->crd_alg, crd2->crd_alg);
+	err = -EINVAL;
+
+errout:
+	if (err != ERESTART) {
+		crp->crp_etype = err;
+		crypto_done(crp);
+	}
+	return err;
+}
+
+static int pasemi_clean_tx(struct pasemi_softc *sc, int chan)
+{
+	int i, j, ring_idx;
+	struct pasemi_fnu_txring *ring = &sc->tx[chan];
+	u16 delta_cnt;
+	int flags, loops = 10;
+	int desc_size;
+	struct cryptop *crp;
+
+	spin_lock_irqsave(&ring->clean_lock, flags);
+
+	while ((delta_cnt = (dma_status->tx_sta[sc->base_chan + chan]
+			     & PAS_STATUS_PCNT_M) - ring->total_pktcnt)
+	       && loops--) {
+
+		for (i = 0; i < delta_cnt; i++) {
+			desc_size = TX_DESC_INFO(ring, ring->next_to_clean).desc_size;
+			crp = TX_DESC_INFO(ring, ring->next_to_clean).cf_crp;
+			if (crp) {
+				ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
+				if (TX_DESC_INFO(ring, ring->next_to_clean).desc_postop & PASEMI_CHECK_SIG) {
+					/* Need to make sure signature matched,
+					 * if not - return error */
+					if (!(ring->desc[ring_idx + 1] & (1ULL << 63)))
+						crp->crp_etype = -EINVAL;
+				}
+				crypto_done(TX_DESC_INFO(ring,
+							 ring->next_to_clean).cf_crp);
+				TX_DESC_INFO(ring, ring->next_to_clean).cf_crp = NULL;
+				pci_unmap_single(
+					sc->dma_pdev,
+					XCT_PTR_ADDR_LEN(ring->desc[ring_idx + 1]),
+					PCI_DMA_TODEVICE);
+
+				ring->desc[ring_idx] = ring->desc[ring_idx + 1] = 0;
+
+				ring->next_to_clean++;
+				for (j = 1; j < desc_size; j++) {
+					ring_idx = 2 *
+						(ring->next_to_clean &
+						 (TX_RING_SIZE-1));
+					pci_unmap_single(
+						sc->dma_pdev,
+						XCT_PTR_ADDR_LEN(ring->desc[ring_idx]),
+						PCI_DMA_TODEVICE);
+					if (ring->desc[ring_idx + 1])
+						pci_unmap_single(
+							sc->dma_pdev,
+							XCT_PTR_ADDR_LEN(
+								ring->desc[
+									ring_idx + 1]),
+							PCI_DMA_TODEVICE);
+					ring->desc[ring_idx] =
+						ring->desc[ring_idx + 1] = 0;
+					ring->next_to_clean++;
+				}
+			} else {
+				for (j = 0; j < desc_size; j++) {
+					ring_idx = 2 * (ring->next_to_clean & (TX_RING_SIZE-1));
+					ring->desc[ring_idx] =
+						ring->desc[ring_idx + 1] = 0;
+					ring->next_to_clean++;
+				}
+			}
+		}
+
+		ring->total_pktcnt += delta_cnt;
+	}
+	spin_unlock_irqrestore(&ring->clean_lock, flags);
+
+	return 0;
+}
+
+static void sweepup_tx(struct pasemi_softc *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->sc_num_channels; i++)
+		pasemi_clean_tx(sc, i);
+}
+
+static irqreturn_t pasemi_intr(int irq, void *arg, struct pt_regs *regs)
+{
+	struct pasemi_softc *sc = arg;
+	unsigned int reg;
+	int chan = irq - sc->base_irq;
+	int chan_index = sc->base_chan + chan;
+	u64 stat = dma_status->tx_sta[chan_index];
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (!(stat & PAS_STATUS_CAUSE_M))
+		return IRQ_NONE;
+
+	pasemi_clean_tx(sc, chan);
+
+	stat = dma_status->tx_sta[chan_index];
+
+	reg = PAS_IOB_DMA_TXCH_RESET_PINTC |
+		PAS_IOB_DMA_TXCH_RESET_PCNT(sc->tx[chan].total_pktcnt);
+
+	if (stat & PAS_STATUS_SOFT)
+		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
+
+	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), reg);
+
+
+	return IRQ_HANDLED;
+}
+
+static int pasemi_dma_setup_tx_resources(struct pasemi_softc *sc, int chan)
+{
+	u32 val;
+	int chan_index = chan + sc->base_chan;
+	int ret;
+	struct pasemi_fnu_txring *ring;
+
+	ring = &sc->tx[chan];
+
+	spin_lock_init(&ring->fill_lock);
+	spin_lock_init(&ring->clean_lock);
+
+	ring->desc_info = kzalloc(sizeof(struct pasemi_desc_info) *
+				  TX_RING_SIZE, GFP_KERNEL);
+	if (!ring->desc_info)
+		return -ENOMEM;
+
+	/* Allocate descriptors */
+	ring->desc = dma_alloc_coherent(&sc->dma_pdev->dev,
+					TX_RING_SIZE *
+					2 * sizeof(u64),
+					&ring->dma, GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	memset((void *) ring->desc, 0, TX_RING_SIZE * 2 * sizeof(u64));
+
+	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_RESET(chan_index), 0x30);
+
+	ring->total_pktcnt = 0;
+
+	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEL(chan_index),
+		 PAS_DMA_TXCHAN_BASEL_BRBL(ring->dma));
+
+	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->dma >> 32);
+	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 2);
+
+	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_BASEU(chan_index), val);
+
+	out_le32(sc->dma_regs + PAS_DMA_TXCHAN_CFG(chan_index),
+		 PAS_DMA_TXCHAN_CFG_TY_FUNC |
+		 PAS_DMA_TXCHAN_CFG_TATTR(chan) |
+		 PAS_DMA_TXCHAN_CFG_WT(2));
+
+	/* enable tx channel */
+	out_le32(sc->dma_regs +
+		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+		 PAS_DMA_TXCHAN_TCMDSTA_EN);
+
+	out_le32(sc->iob_regs + PAS_IOB_DMA_TXCH_CFG(chan_index),
+		 PAS_IOB_DMA_TXCH_CFG_CNTTH(1000));
+
+	ring->next_to_fill = 0;
+	ring->next_to_clean = 0;
+
+	snprintf(ring->irq_name, sizeof(ring->irq_name),
+		 "%s%d", "crypto", chan);
+
+	ring->irq = irq_create_mapping(NULL, sc->base_irq + chan);
+	ret = request_irq(ring->irq, (irq_handler_t)
+			  pasemi_intr, IRQF_DISABLED, ring->irq_name, sc);
+	if (ret) {
+		printk(KERN_ERR DRV_NAME ": failed to hook irq %d ret %d\n",
+		       ring->irq, ret);
+		ring->irq = -1;
+		return ret;
+	}
+
+	setup_timer(&ring->crypto_timer, (void *) sweepup_tx, (unsigned long) sc);
+
+	return 0;
+}
+
+static device_method_t pasemi_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,		pasemi_newsession),
+	DEVMETHOD(cryptodev_freesession,	pasemi_freesession),
+	DEVMETHOD(cryptodev_process,		pasemi_process),
+};
+
+/* Set up the crypto device structure, private data,
+ * and anything else we need before we start */
+
+static int __devinit
+pasemi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct pasemi_softc *sc;
+	int ret, i;
+
+	DPRINTF(KERN_ERR "%s()\n", __FUNCTION__);
+
+	sc = kzalloc(sizeof(*sc), GFP_KERNEL);
+	if (!sc)
+		return -ENOMEM;
+
+	softc_device_init(sc, DRV_NAME, 1, pasemi_methods);
+
+	pci_set_drvdata(pdev, sc);
+
+	spin_lock_init(&sc->sc_chnlock);
+
+	sc->sc_sessions = (struct pasemi_session **)
+		kzalloc(PASEMI_INITIAL_SESSIONS *
+			sizeof(struct pasemi_session *), GFP_ATOMIC);
+	if (sc->sc_sessions == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	sc->sc_nsessions = PASEMI_INITIAL_SESSIONS;
+	sc->sc_lastchn = 0;
+	sc->base_irq = pdev->irq + 6;
+	sc->base_chan = 6;
+	sc->sc_cid = -1;
+	sc->dma_pdev = pdev;
+
+	sc->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
+	if (!sc->iob_pdev) {
+		dev_err(&pdev->dev, "Can't find I/O Bridge\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/* This is hardcoded and ugly, but we have some firmware versions
+	 * who don't provide the register space in the device tree. Luckily
+	 * they are at well-known locations so we can just do the math here.
+	 */
+	sc->dma_regs =
+		ioremap(0xe0000000 + (sc->dma_pdev->devfn << 12), 0x2000);
+	sc->iob_regs =
+		ioremap(0xe0000000 + (sc->iob_pdev->devfn << 12), 0x2000);
+	if (!sc->dma_regs || !sc->iob_regs) {
+		dev_err(&pdev->dev, "Can't map registers\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	dma_status = __ioremap(0xfd800000, 0x1000, 0);
+	if (!dma_status) {
+		ret = -ENODEV;
+		dev_err(&pdev->dev, "Can't map dmastatus space\n");
+		goto out;
+	}
+
+	sc->tx = (struct pasemi_fnu_txring *)
+		kzalloc(sizeof(struct pasemi_fnu_txring)
+			* 8, GFP_KERNEL);
+	if (!sc->tx) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* Initialize the h/w */
+	out_le32(sc->dma_regs + PAS_DMA_COM_CFG,
+		 (in_le32(sc->dma_regs + PAS_DMA_COM_CFG) |
+		  PAS_DMA_COM_CFG_FWF));
+	out_le32(sc->dma_regs + PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
+
+	for (i = 0; i < PASEMI_FNU_CHANNELS; i++) {
+		sc->sc_num_channels++;
+		ret = pasemi_dma_setup_tx_resources(sc, i);
+		if (ret)
+			goto out;
+	}
+
+	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),
+					 CRYPTOCAP_F_HARDWARE);
+	if (sc->sc_cid < 0) {
+		printk(KERN_ERR DRV_NAME ": could not get crypto driver id\n");
+		ret = -ENXIO;
+		goto out;
+	}
+
+	/* register algorithms with the framework */
+	printk(DRV_NAME ":");
+
+	crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+
+	return 0;
+
+out:
+	pasemi_dma_remove(pdev);
+	return ret;
+}
+
+#define MAX_RETRIES 5000
+
+static void pasemi_free_tx_resources(struct pasemi_softc *sc, int chan)
+{
+	struct pasemi_fnu_txring *ring = &sc->tx[chan];
+	int chan_index = chan + sc->base_chan;
+	int retries;
+	u32 stat;
+
+	/* Stop the channel */
+	out_le32(sc->dma_regs +
+		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+		 PAS_DMA_TXCHAN_TCMDSTA_ST);
+
+	for (retries = 0; retries < MAX_RETRIES; retries++) {
+		stat = in_le32(sc->dma_regs +
+			       PAS_DMA_TXCHAN_TCMDSTA(chan_index));
+		if (!(stat & PAS_DMA_TXCHAN_TCMDSTA_ACT))
+			break;
+		cond_resched();
+	}
+
+	if (stat & PAS_DMA_TXCHAN_TCMDSTA_ACT)
+		dev_err(&sc->dma_pdev->dev, "Failed to stop tx channel %d\n",
+			chan_index);
+
+	/* Disable the channel */
+	out_le32(sc->dma_regs +
+		 PAS_DMA_TXCHAN_TCMDSTA(chan_index),
+		 0);
+
+	if (ring->desc_info)
+		kfree((void *) ring->desc_info);
+	if (ring->desc)
+		dma_free_coherent(&sc->dma_pdev->dev,
+				  TX_RING_SIZE *
+				  2 * sizeof(u64),
+				  (void *) ring->desc, ring->dma);
+	if (ring->irq != -1)
+		free_irq(ring->irq, sc);
+
+	del_timer(&ring->crypto_timer);
+}
+
+static void __devexit pasemi_dma_remove(struct pci_dev *pdev)
+{
+	struct pasemi_softc *sc = pci_get_drvdata(pdev);
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (sc->sc_cid >= 0) {
+		crypto_unregister_all(sc->sc_cid);
+	}
+
+	if (sc->tx) {
+		for (i = 0; i < sc->sc_num_channels; i++)
+			pasemi_free_tx_resources(sc, i);
+
+		kfree(sc->tx);
+	}
+	if (sc->sc_sessions) {
+		for (i = 0; i < sc->sc_nsessions; i++)
+			kfree(sc->sc_sessions[i]);
+		kfree(sc->sc_sessions);
+	}
+	if (sc->iob_pdev)
+		pci_dev_put(sc->iob_pdev);
+	if (sc->dma_regs)
+		iounmap(sc->dma_regs);
+	if (sc->iob_regs)
+		iounmap(sc->iob_regs);
+	kfree(sc);
+}
+
+static struct pci_device_id pasemi_dma_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa007) },
+};
+
+MODULE_DEVICE_TABLE(pci, pasemi_dma_pci_tbl);
+
+static struct pci_driver pasemi_dma_driver = {
+	.name		= "pasemi_dma",
+	.id_table	= pasemi_dma_pci_tbl,
+	.probe		= pasemi_dma_probe,
+	.remove		= __devexit_p(pasemi_dma_remove),
+};
+
+static void __exit pasemi_dma_cleanup_module(void)
+{
+	pci_unregister_driver(&pasemi_dma_driver);
+	__iounmap(dma_status);
+	dma_status = NULL;
+}
+
+int pasemi_dma_init_module(void)
+{
+	return pci_register_driver(&pasemi_dma_driver);
+}
+
+module_init(pasemi_dma_init_module);
+module_exit(pasemi_dma_cleanup_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Egor Martovetsky egor@pasemi.com");
+MODULE_DESCRIPTION("OCF driver for PA Semi PWRficient DMA Crypto Engine");
diff --git a/crypto/ocf/pasemi/pasemi_fnu.h b/crypto/ocf/pasemi/pasemi_fnu.h
new file mode 100644
index 000000000000..1a0dcc8bbd37
--- /dev/null
+++ b/crypto/ocf/pasemi/pasemi_fnu.h
@@ -0,0 +1,410 @@
+/*
+ * Copyright (C) 2007 PA Semi, Inc
+ *
+ * Driver for the PA Semi PWRficient DMA Crypto Engine, soft state and
+ * hardware register layouts.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#ifndef PASEMI_FNU_H
+#define PASEMI_FNU_H
+
+#include <linux/spinlock.h>
+
+#define	PASEMI_SESSION(sid)	((sid) & 0xffffffff)
+#define	PASEMI_SID(sesn)	((sesn) & 0xffffffff)
+#define	DPRINTF(a...)	if (debug) { printk(DRV_NAME ": " a); }
+
+/* Must be a power of two */
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+#define TX_DESC(ring, num)	((ring)->desc[2 * (num & (TX_RING_SIZE-1))])
+#define TX_DESC_INFO(ring, num)	((ring)->desc_info[(num) & (TX_RING_SIZE-1)])
+#define MAX_DESC_SIZE 8
+#define PASEMI_INITIAL_SESSIONS 10
+#define PASEMI_FNU_CHANNELS 8
+
+/* DMA descriptor */
+struct pasemi_desc {
+	u64 quad[2*MAX_DESC_SIZE];
+	int quad_cnt;
+	int size;
+	int postop;
+};
+
+/*
+ * Holds per descriptor data
+ */
+struct pasemi_desc_info {
+	int			desc_size;
+	int			desc_postop;
+#define PASEMI_CHECK_SIG 0x1
+
+	struct cryptop          *cf_crp;
+};
+
+/*
+ * Holds per channel data
+ */
+struct pasemi_fnu_txring {
+	volatile u64		*desc;
+	volatile struct
+	pasemi_desc_info	*desc_info;
+	dma_addr_t		dma;
+	struct timer_list       crypto_timer;
+	spinlock_t		fill_lock;
+	spinlock_t		clean_lock;
+	unsigned int		next_to_fill;
+	unsigned int		next_to_clean;
+	u16			total_pktcnt;
+	int			irq;
+	int			sesn;
+	char			irq_name[10];
+};
+
+/*
+ * Holds data specific to a single pasemi device.
+ */
+struct pasemi_softc {
+	softc_device_decl	sc_cdev;
+	struct pci_dev		*dma_pdev;	/* device backpointer */
+	struct pci_dev		*iob_pdev;	/* device backpointer */
+	void __iomem		*dma_regs;
+	void __iomem		*iob_regs;
+	int			base_irq;
+	int			base_chan;
+	int32_t			sc_cid;		/* crypto tag */
+	int			sc_nsessions;
+	struct pasemi_session	**sc_sessions;
+	int			sc_num_channels;/* number of crypto channels */
+
+	/* pointer to the array of txring datastructures, one txring per channel */
+	struct pasemi_fnu_txring *tx;
+
+	/*
+	 * mutual exclusion for the channel scheduler
+	 */
+	spinlock_t		sc_chnlock;
+	/* last channel used, for now use round-robin to allocate channels */
+	int			sc_lastchn;
+};
+
+struct pasemi_session {
+	u64 civ[2];
+	u64 keysz;
+	u64 key[4];
+	u64 ccmd;
+	u64 hkey[4];
+	u64 hseq;
+	u64 giv[2];
+	u64 hiv[4];
+
+	int used;
+	dma_addr_t	dma_addr;
+	int chan;
+};
+
+/* status register layout in IOB region, at 0xfd800000 */
+struct pasdma_status {
+	u64 rx_sta[64];
+	u64 tx_sta[20];
+};
+
+#define ALG_IS_CIPHER(alg) ((alg == CRYPTO_DES_CBC)		|| \
+				(alg == CRYPTO_3DES_CBC)	|| \
+				(alg == CRYPTO_AES_CBC)		|| \
+				(alg == CRYPTO_ARC4)		|| \
+				(alg == CRYPTO_NULL_CBC))
+
+#define ALG_IS_SIG(alg) ((alg == CRYPTO_MD5)			|| \
+				(alg == CRYPTO_MD5_HMAC)	|| \
+				(alg == CRYPTO_SHA1)		|| \
+				(alg == CRYPTO_SHA1_HMAC)	|| \
+				(alg == CRYPTO_NULL_HMAC))
+
+enum {
+	PAS_DMA_COM_TXCMD = 0x100,	/* Transmit Command Register  */
+	PAS_DMA_COM_TXSTA = 0x104,	/* Transmit Status Register   */
+	PAS_DMA_COM_RXCMD = 0x108,	/* Receive Command Register   */
+	PAS_DMA_COM_RXSTA = 0x10c,	/* Receive Status Register    */
+	PAS_DMA_COM_CFG   = 0x114,	/* DMA Configuration Register */
+};
+
+/* All these registers live in the PCI configuration space for the DMA PCI
+ * device. Use the normal PCI config access functions for them.
+ */
+
+#define PAS_DMA_COM_CFG_FWF	0x18000000
+
+#define PAS_DMA_COM_TXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_TXSTA_ACT	0x00000001 /* active */
+#define PAS_DMA_COM_RXCMD_EN	0x00000001 /* enable */
+#define PAS_DMA_COM_RXSTA_ACT	0x00000001 /* active */
+
+#define _PAS_DMA_TXCHAN_STRIDE	0x20    /* Size per channel		*/
+#define _PAS_DMA_TXCHAN_TCMDSTA	0x300	/* Command / Status		*/
+#define _PAS_DMA_TXCHAN_CFG	0x304	/* Configuration		*/
+#define _PAS_DMA_TXCHAN_DSCRBU	0x308	/* Descriptor BU Allocation	*/
+#define _PAS_DMA_TXCHAN_INCR	0x310	/* Descriptor increment		*/
+#define _PAS_DMA_TXCHAN_CNT	0x314	/* Descriptor count/offset	*/
+#define _PAS_DMA_TXCHAN_BASEL	0x318	/* Descriptor ring base (low)	*/
+#define _PAS_DMA_TXCHAN_BASEU	0x31c	/*			(high)	*/
+#define PAS_DMA_TXCHAN_TCMDSTA(c) (0x300+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_TCMDSTA_EN	0x00000001	/* Enabled */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ST	0x00000002	/* Stop interface */
+#define    PAS_DMA_TXCHAN_TCMDSTA_ACT	0x00010000	/* Active */
+#define PAS_DMA_TXCHAN_CFG(c)     (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_CFG_TY_FUNC	0x00000002	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TY_IFACE	0x00000000	/* Type = interface */
+#define    PAS_DMA_TXCHAN_CFG_TATTR_M	0x0000003c
+#define    PAS_DMA_TXCHAN_CFG_TATTR_S	2
+#define    PAS_DMA_TXCHAN_CFG_TATTR(x)	(((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \
+					 PAS_DMA_TXCHAN_CFG_TATTR_M)
+#define    PAS_DMA_TXCHAN_CFG_WT_M	0x000001c0
+#define    PAS_DMA_TXCHAN_CFG_WT_S	6
+#define    PAS_DMA_TXCHAN_CFG_WT(x)	(((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \
+					 PAS_DMA_TXCHAN_CFG_WT_M)
+#define    PAS_DMA_TXCHAN_CFG_LPSQ_FAST	0x00000400
+#define    PAS_DMA_TXCHAN_CFG_LPDQ_FAST	0x00000800
+#define    PAS_DMA_TXCHAN_CFG_CF	0x00001000	/* Clean first line */
+#define    PAS_DMA_TXCHAN_CFG_CL	0x00002000	/* Clean last line */
+#define    PAS_DMA_TXCHAN_CFG_UP	0x00004000	/* update tx descr when sent */
+#define PAS_DMA_TXCHAN_INCR(c)    (0x310+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define PAS_DMA_TXCHAN_BASEL(c)   (0x318+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_M	0xffffffc0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL_S	0
+#define    PAS_DMA_TXCHAN_BASEL_BRBL(x)	(((x) << PAS_DMA_TXCHAN_BASEL_BRBL_S) & \
+					 PAS_DMA_TXCHAN_BASEL_BRBL_M)
+#define PAS_DMA_TXCHAN_BASEU(c)   (0x31c+(c)*_PAS_DMA_TXCHAN_STRIDE)
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_M	0x00000fff
+#define    PAS_DMA_TXCHAN_BASEU_BRBH_S	0
+#define    PAS_DMA_TXCHAN_BASEU_BRBH(x)	(((x) << PAS_DMA_TXCHAN_BASEU_BRBH_S) & \
+					 PAS_DMA_TXCHAN_BASEU_BRBH_M)
+/* # of cache lines worth of buffer ring */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_M	0x3fff0000
+#define    PAS_DMA_TXCHAN_BASEU_SIZ_S	16		/* 0 = 16K */
+#define    PAS_DMA_TXCHAN_BASEU_SIZ(x)	(((x) << PAS_DMA_TXCHAN_BASEU_SIZ_S) & \
+					 PAS_DMA_TXCHAN_BASEU_SIZ_M)
+
+#define    PAS_STATUS_PCNT_M		0x000000000000ffffull
+#define    PAS_STATUS_PCNT_S		0
+#define    PAS_STATUS_DCNT_M		0x00000000ffff0000ull
+#define    PAS_STATUS_DCNT_S		16
+#define    PAS_STATUS_BPCNT_M		0x0000ffff00000000ull
+#define    PAS_STATUS_BPCNT_S		32
+#define    PAS_STATUS_CAUSE_M		0xf000000000000000ull
+#define    PAS_STATUS_TIMER		0x1000000000000000ull
+#define    PAS_STATUS_ERROR		0x2000000000000000ull
+#define    PAS_STATUS_SOFT		0x4000000000000000ull
+#define    PAS_STATUS_INT		0x8000000000000000ull
+
+#define PAS_IOB_DMA_RXCH_CFG(i)		(0x1100 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_RXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_RXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_RXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_TXCH_CFG(i)		(0x1200 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_M		0x00000fff
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH_S		0
+#define    PAS_IOB_DMA_TXCH_CFG_CNTTH(x)	(((x) << PAS_IOB_DMA_TXCH_CFG_CNTTH_S) & \
+						 PAS_IOB_DMA_TXCH_CFG_CNTTH_M)
+#define PAS_IOB_DMA_RXCH_STAT(i)	(0x1300 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_RXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_RXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_RXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_TXCH_STAT(i)	(0x1400 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_STAT_INTGEN	0x00001000
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_M	0x00000fff
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL_S	0
+#define    PAS_IOB_DMA_TXCH_STAT_CNTDEL(x)	(((x) << PAS_IOB_DMA_TXCH_STAT_CNTDEL_S) &\
+						 PAS_IOB_DMA_TXCH_STAT_CNTDEL_M)
+#define PAS_IOB_DMA_RXCH_RESET(i)	(0x1500 + (i)*4)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT_S	16
+#define    PAS_IOB_DMA_RXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_RXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_RXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_RXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_RXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_RXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_RXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_RXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_RXCH_RESET_PINTC		0x00000001
+#define PAS_IOB_DMA_TXCH_RESET(i)	(0x1600 + (i)*4)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_M	0xffff0000
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT_S	16
+#define    PAS_IOB_DMA_TXCH_RESET_PCNT(x)	(((x) << PAS_IOB_DMA_TXCH_RESET_PCNT_S) & \
+						 PAS_IOB_DMA_TXCH_RESET_PCNT_M)
+#define    PAS_IOB_DMA_TXCH_RESET_PCNTRST	0x00000020
+#define    PAS_IOB_DMA_TXCH_RESET_DCNTRST	0x00000010
+#define    PAS_IOB_DMA_TXCH_RESET_TINTC		0x00000008
+#define    PAS_IOB_DMA_TXCH_RESET_DINTC		0x00000004
+#define    PAS_IOB_DMA_TXCH_RESET_SINTC		0x00000002
+#define    PAS_IOB_DMA_TXCH_RESET_PINTC		0x00000001
+
+#define PAS_IOB_DMA_COM_TIMEOUTCFG		0x1700
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M	0x00ffffff
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S	0
+#define    PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(x)	(((x) << PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_S) & \
+						 PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT_M)
+
+/* Transmit descriptor fields */
+#define	XCT_MACTX_T		0x8000000000000000ull
+#define	XCT_MACTX_ST		0x4000000000000000ull
+#define XCT_MACTX_NORES		0x0000000000000000ull
+#define XCT_MACTX_8BRES		0x1000000000000000ull
+#define XCT_MACTX_24BRES	0x2000000000000000ull
+#define XCT_MACTX_40BRES	0x3000000000000000ull
+#define XCT_MACTX_I		0x0800000000000000ull
+#define XCT_MACTX_O		0x0400000000000000ull
+#define XCT_MACTX_E		0x0200000000000000ull
+#define XCT_MACTX_VLAN_M	0x0180000000000000ull
+#define XCT_MACTX_VLAN_NOP	0x0000000000000000ull
+#define XCT_MACTX_VLAN_REMOVE	0x0080000000000000ull
+#define XCT_MACTX_VLAN_INSERT   0x0100000000000000ull
+#define XCT_MACTX_VLAN_REPLACE  0x0180000000000000ull
+#define XCT_MACTX_CRC_M		0x0060000000000000ull
+#define XCT_MACTX_CRC_NOP	0x0000000000000000ull
+#define XCT_MACTX_CRC_INSERT	0x0020000000000000ull
+#define XCT_MACTX_CRC_PAD	0x0040000000000000ull
+#define XCT_MACTX_CRC_REPLACE	0x0060000000000000ull
+#define XCT_MACTX_SS		0x0010000000000000ull
+#define XCT_MACTX_LLEN_M	0x00007fff00000000ull
+#define XCT_MACTX_LLEN_S	32ull
+#define XCT_MACTX_LLEN(x)	((((long)(x)) << XCT_MACTX_LLEN_S) & \
+				 XCT_MACTX_LLEN_M)
+#define XCT_MACTX_IPH_M		0x00000000f8000000ull
+#define XCT_MACTX_IPH_S		27ull
+#define XCT_MACTX_IPH(x)	((((long)(x)) << XCT_MACTX_IPH_S) & \
+				 XCT_MACTX_IPH_M)
+#define XCT_MACTX_IPO_M		0x0000000007c00000ull
+#define XCT_MACTX_IPO_S		22ull
+#define XCT_MACTX_IPO(x)	((((long)(x)) << XCT_MACTX_IPO_S) & \
+				 XCT_MACTX_IPO_M)
+#define XCT_MACTX_CSUM_M	0x0000000000000060ull
+#define XCT_MACTX_CSUM_NOP	0x0000000000000000ull
+#define XCT_MACTX_CSUM_TCP	0x0000000000000040ull
+#define XCT_MACTX_CSUM_UDP	0x0000000000000060ull
+#define XCT_MACTX_V6		0x0000000000000010ull
+#define XCT_MACTX_C		0x0000000000000004ull
+#define XCT_MACTX_AL2		0x0000000000000002ull
+
+#define XCT_PTR_T		0x8000000000000000ull
+#define XCT_PTR_LEN_M		0x7ffff00000000000ull
+#define XCT_PTR_LEN_S		44
+#define XCT_PTR_LEN(x)		((((long)(x)) << XCT_PTR_LEN_S) & \
+				 XCT_PTR_LEN_M)
+#define XCT_PTR_ADDR_M		0x00000fffffffffffull
+#define XCT_PTR_ADDR_S		0
+#define XCT_PTR_ADDR(x)		((((long)(x)) << XCT_PTR_ADDR_S) & \
+				 XCT_PTR_ADDR_M)
+
+/* Function descriptor fields */
+#define	XCT_FUN_T		0x8000000000000000ull
+#define	XCT_FUN_ST		0x4000000000000000ull
+#define XCT_FUN_NORES		0x0000000000000000ull
+#define XCT_FUN_8BRES		0x1000000000000000ull
+#define XCT_FUN_24BRES		0x2000000000000000ull
+#define XCT_FUN_40BRES		0x3000000000000000ull
+#define XCT_FUN_I		0x0800000000000000ull
+#define XCT_FUN_O		0x0400000000000000ull
+#define XCT_FUN_E		0x0200000000000000ull
+#define XCT_FUN_FUN_S		54
+#define XCT_FUN_FUN_M		0x01c0000000000000ull
+#define XCT_FUN_FUN(num)	((((long)(num)) << XCT_FUN_FUN_S) & \
+				XCT_FUN_FUN_M)
+#define XCT_FUN_CRM_NOP		0x0000000000000000ull
+#define XCT_FUN_CRM_SIG		0x0008000000000000ull
+#define XCT_FUN_CRM_ENC		0x0010000000000000ull
+#define XCT_FUN_CRM_DEC		0x0018000000000000ull
+#define XCT_FUN_CRM_SIG_ENC	0x0020000000000000ull
+#define XCT_FUN_CRM_ENC_SIG	0x0028000000000000ull
+#define XCT_FUN_CRM_SIG_DEC	0x0030000000000000ull
+#define XCT_FUN_CRM_DEC_SIG	0x0038000000000000ull
+#define XCT_FUN_LLEN_M		0x0007ffff00000000ull
+#define XCT_FUN_LLEN_S		32ULL
+#define XCT_FUN_LLEN(x)		((((long)(x)) << XCT_FUN_LLEN_S) & \
+				 XCT_FUN_LLEN_M)
+#define XCT_FUN_SHL_M		0x00000000f8000000ull
+#define XCT_FUN_SHL_S		27ull
+#define XCT_FUN_SHL(x)		((((long)(x)) << XCT_FUN_SHL_S) & \
+				 XCT_FUN_SHL_M)
+#define XCT_FUN_CHL_M		0x0000000007c00000ull
+#define XCT_FUN_CHL_S		22ull
+#define XCT_FUN_CHL(x)		((((long)(x)) << XCT_FUN_CHL_S) & \
+				 XCT_FUN_CHL_M)
+#define XCT_FUN_HSZ_M		0x00000000003c0000ull
+#define XCT_FUN_HSZ_S		18ull
+#define XCT_FUN_HSZ(x)		((((long)(x)) << XCT_FUN_HSZ_S) & \
+				 XCT_FUN_HSZ_M)
+#define XCT_FUN_ALG_DES		0x0000000000000000ull
+#define XCT_FUN_ALG_3DES	0x0000000000008000ull
+#define XCT_FUN_ALG_AES		0x0000000000010000ull
+#define XCT_FUN_ALG_ARC		0x0000000000018000ull
+#define XCT_FUN_ALG_KASUMI	0x0000000000020000ull
+#define XCT_FUN_BCM_ECB		0x0000000000000000ull
+#define XCT_FUN_BCM_CBC		0x0000000000001000ull
+#define XCT_FUN_BCM_CFB		0x0000000000002000ull
+#define XCT_FUN_BCM_OFB		0x0000000000003000ull
+#define XCT_FUN_BCM_CNT		0x0000000000003800ull
+#define XCT_FUN_BCM_KAS_F8	0x0000000000002800ull
+#define XCT_FUN_BCM_KAS_F9	0x0000000000001800ull
+#define XCT_FUN_BCP_NO_PAD	0x0000000000000000ull
+#define XCT_FUN_BCP_ZRO		0x0000000000000200ull
+#define XCT_FUN_BCP_PL		0x0000000000000400ull
+#define XCT_FUN_BCP_INCR	0x0000000000000600ull
+#define XCT_FUN_SIG_MD5		(0ull << 4)
+#define XCT_FUN_SIG_SHA1	(2ull << 4)
+#define XCT_FUN_SIG_HMAC_MD5	(8ull << 4)
+#define XCT_FUN_SIG_HMAC_SHA1	(10ull << 4)
+#define XCT_FUN_A		0x0000000000000008ull
+#define XCT_FUN_C		0x0000000000000004ull
+#define XCT_FUN_AL2		0x0000000000000002ull
+#define XCT_FUN_SE		0x0000000000000001ull
+
+#define XCT_FUN_SRC_PTR(len, addr)	(XCT_PTR_LEN(len) | XCT_PTR_ADDR(addr))
+#define XCT_FUN_DST_PTR(len, addr)	(XCT_FUN_SRC_PTR(len, addr) | \
+					0x8000000000000000ull)
+
+#define XCT_CTRL_HDR_FUN_NUM_M		0x01c0000000000000ull
+#define XCT_CTRL_HDR_FUN_NUM_S		54
+#define XCT_CTRL_HDR_LEN_M		0x0007ffff00000000ull
+#define XCT_CTRL_HDR_LEN_S		32
+#define XCT_CTRL_HDR_REG_M		0x00000000000000ffull
+#define XCT_CTRL_HDR_REG_S		0
+
+#define XCT_CTRL_HDR(funcN,len,reg)	(0x9400000000000000ull | \
+			((((long)(funcN)) << XCT_CTRL_HDR_FUN_NUM_S) \
+			& XCT_CTRL_HDR_FUN_NUM_M) | \
+			((((long)(len)) << \
+			XCT_CTRL_HDR_LEN_S) & XCT_CTRL_HDR_LEN_M) | \
+			((((long)(reg)) << \
+			XCT_CTRL_HDR_REG_S) & XCT_CTRL_HDR_REG_M))
+
+/* Function config command options */
+#define	DMA_CALGO_DES			0x00
+#define	DMA_CALGO_3DES			0x01
+#define	DMA_CALGO_AES			0x02
+#define	DMA_CALGO_ARC			0x03
+
+#define DMA_FN_CIV0			0x02
+#define DMA_FN_CIV1			0x03
+#define DMA_FN_HKEY0			0x0a
+
+#define XCT_PTR_ADDR_LEN(ptr)		((ptr) & XCT_PTR_ADDR_M), \
+			(((ptr) & XCT_PTR_LEN_M) >> XCT_PTR_LEN_S)
+
+#endif /* PASEMI_FNU_H */
diff --git a/crypto/ocf/random.c b/crypto/ocf/random.c
new file mode 100644
index 000000000000..0a2f7b91387d
--- /dev/null
+++ b/crypto/ocf/random.c
@@ -0,0 +1,316 @@
+/*
+ * A system independant way of adding entropy to the kernels pool
+ * this way the drivers can focus on the real work and we can take
+ * care of pushing it to the appropriate place in the kernel.
+ *
+ * This should be fast and callable from timers/interrupts
+ *
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/unistd.h>
+#include <linux/poll.h>
+#include <linux/random.h>
+#include <cryptodev.h>
+
+#ifdef CONFIG_OCF_FIPS
+#include "rndtest.h"
+#endif
+
+#ifndef HAS_RANDOM_INPUT_WAIT
+#error "Please do not enable OCF_RANDOMHARVEST unless you have applied patches"
+#endif
+
+/*
+ * a hack to access the debug levels from the crypto driver
+ */
+extern int crypto_debug;
+#define debug crypto_debug
+
+/*
+ * a list of all registered random providers
+ */
+static LIST_HEAD(random_ops);
+static int started = 0;
+static int initted = 0;
+
+struct random_op {
+	struct list_head random_list;
+	u_int32_t driverid;
+	int (*read_random)(void *arg, u_int32_t *buf, int len);
+	void *arg;
+};
+
+static int random_proc(void *arg);
+
+static pid_t		randomproc = (pid_t) -1;
+static spinlock_t	random_lock;
+
+/*
+ * just init the spin locks
+ */
+static int
+crypto_random_init(void)
+{
+	spin_lock_init(&random_lock);
+	initted = 1;
+	return(0);
+}
+
+/*
+ * Add the given random reader to our list (if not present)
+ * and start the thread (if not already started)
+ *
+ * we have to assume that driver id is ok for now
+ */
+int
+crypto_rregister(
+	u_int32_t driverid,
+	int (*read_random)(void *arg, u_int32_t *buf, int len),
+	void *arg)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct random_op	*rops, *tmp;
+
+	dprintk("%s,%d: %s(0x%x, %p, %p)\n", __FILE__, __LINE__,
+			__FUNCTION__, driverid, read_random, arg);
+
+	if (!initted)
+		crypto_random_init();
+
+#if 0
+	struct cryptocap	*cap;
+
+	cap = crypto_checkdriver(driverid);
+	if (!cap)
+		return EINVAL;
+#endif
+
+	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+		if (rops->driverid == driverid && rops->read_random == read_random)
+			return EEXIST;
+	}
+
+	rops = (struct random_op *) kmalloc(sizeof(*rops), GFP_KERNEL);
+	if (!rops)
+		return ENOMEM;
+
+	rops->driverid    = driverid;
+	rops->read_random = read_random;
+	rops->arg = arg;
+
+	spin_lock_irqsave(&random_lock, flags);
+	list_add_tail(&rops->random_list, &random_ops);
+	if (!started) {
+		randomproc = kernel_thread(random_proc, NULL, CLONE_FS|CLONE_FILES);
+		if (randomproc < 0) {
+			ret = randomproc;
+			printk("crypto: crypto_rregister cannot start random thread; "
+					"error %d", ret);
+		} else
+			started = 1;
+	}
+	spin_unlock_irqrestore(&random_lock, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(crypto_rregister);
+
+int
+crypto_runregister_all(u_int32_t driverid)
+{
+	struct random_op *rops, *tmp;
+	unsigned long flags;
+
+	dprintk("%s,%d: %s(0x%x)\n", __FILE__, __LINE__, __FUNCTION__, driverid);
+
+	list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+		if (rops->driverid == driverid) {
+			list_del(&rops->random_list);
+			kfree(rops);
+		}
+	}
+
+	spin_lock_irqsave(&random_lock, flags);
+	if (list_empty(&random_ops) && started)
+		kill_proc(randomproc, SIGKILL, 1);
+	spin_unlock_irqrestore(&random_lock, flags);
+	return(0);
+}
+EXPORT_SYMBOL(crypto_runregister_all);
+
+/*
+ * while we can add entropy to random.c continue to read random data from
+ * the drivers and push it to random.
+ */
+static int
+random_proc(void *arg)
+{
+	int n;
+	int wantcnt;
+	int bufcnt = 0;
+	int retval = 0;
+	int *buf = NULL;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+	daemonize();
+	spin_lock_irq(&current->sigmask_lock);
+	sigemptyset(&current->blocked);
+	recalc_sigpending(current);
+	spin_unlock_irq(&current->sigmask_lock);
+	sprintf(current->comm, "ocf-random");
+#else
+	daemonize("ocf-random");
+	allow_signal(SIGKILL);
+#endif
+
+	(void) get_fs();
+	set_fs(get_ds());
+
+#ifdef CONFIG_OCF_FIPS
+#define NUM_INT (RNDTEST_NBYTES/sizeof(int))
+#else
+#define NUM_INT 32
+#endif
+
+	/*
+	 * some devices can transferr their RNG data direct into memory,
+	 * so make sure it is device friendly
+	 */
+	buf = kmalloc(NUM_INT * sizeof(int), GFP_DMA);
+	if (NULL == buf) {
+		printk("crypto: RNG could not allocate memory\n");
+		retval = -ENOMEM;
+		goto bad_alloc;
+	}
+
+	wantcnt = NUM_INT;   /* start by adding some entropy */
+
+	/*
+	 * its possible due to errors or driver removal that we no longer
+	 * have anything to do,  if so exit or we will consume all the CPU
+	 * doing nothing
+	 */
+	while (!list_empty(&random_ops)) {
+		struct random_op	*rops, *tmp;
+
+#ifdef CONFIG_OCF_FIPS
+		if (wantcnt)
+			wantcnt = NUM_INT; /* FIPs mode can do 20000 bits or none */
+#endif
+
+		/* see if we can get enough entropy to make the world
+		 * a better place.
+		 */
+		while (bufcnt < wantcnt && bufcnt < NUM_INT) {
+			list_for_each_entry_safe(rops, tmp, &random_ops, random_list) {
+
+				n = (*rops->read_random)(rops->arg, &buf[bufcnt],
+							 NUM_INT - bufcnt);
+
+				/* on failure remove the random number generator */
+				if (n == -1) {
+					list_del(&rops->random_list);
+					printk("crypto: RNG (driverid=0x%x) failed, disabling\n",
+							rops->driverid);
+					kfree(rops);
+				} else if (n > 0)
+					bufcnt += n;
+			}
+			/* give up CPU for a bit, just in case as this is a loop */
+			schedule();
+		}
+
+
+#ifdef CONFIG_OCF_FIPS
+		if (bufcnt > 0 && rndtest_buf((unsigned char *) &buf[0])) {
+			dprintk("crypto: buffer had fips errors, discarding\n");
+			bufcnt = 0;
+		}
+#endif
+
+		/*
+		 * if we have a certified buffer,  we can send some data
+		 * to /dev/random and move along
+		 */
+		if (bufcnt > 0) {
+			/* add what we have */
+			random_input_words(buf, bufcnt, bufcnt*sizeof(int)*8);
+			bufcnt = 0;
+		}
+
+		/* give up CPU for a bit so we don't hog while filling */
+		schedule();
+
+		/* wait for needing more */
+		wantcnt = random_input_wait();
+
+		if (wantcnt <= 0)
+			wantcnt = 0; /* try to get some info again */
+		else
+			/* round up to one word or we can loop forever */
+			wantcnt = (wantcnt + (sizeof(int)*8)) / (sizeof(int)*8);
+		if (wantcnt > NUM_INT) {
+			wantcnt = NUM_INT;
+		}
+
+		if (signal_pending(current)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+			spin_lock_irq(&current->sigmask_lock);
+#endif
+			flush_signals(current);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+			spin_unlock_irq(&current->sigmask_lock);
+#endif
+		}
+	}
+
+	kfree(buf);
+
+bad_alloc:
+	spin_lock_irq(&random_lock);
+	randomproc = (pid_t) -1;
+	started = 0;
+	spin_unlock_irq(&random_lock);
+
+	return retval;
+}
diff --git a/crypto/ocf/rndtest.c b/crypto/ocf/rndtest.c
new file mode 100644
index 000000000000..886414485592
--- /dev/null
+++ b/crypto/ocf/rndtest.c
@@ -0,0 +1,299 @@
+/*	$OpenBSD$	*/
+
+/*
+ * OCF/Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <linux/unistd.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <cryptodev.h>
+#include "rndtest.h"
+
+static struct rndtest_stats rndstats;
+
+static	void rndtest_test(struct rndtest_state *);
+
+/* The tests themselves */
+static	int rndtest_monobit(struct rndtest_state *);
+static	int rndtest_runs(struct rndtest_state *);
+static	int rndtest_longruns(struct rndtest_state *);
+static	int rndtest_chi_4(struct rndtest_state *);
+
+static	int rndtest_runs_check(struct rndtest_state *, int, int *);
+static	void rndtest_runs_record(struct rndtest_state *, int, int *);
+
+static const struct rndtest_testfunc {
+	int (*test)(struct rndtest_state *);
+} rndtest_funcs[] = {
+	{ rndtest_monobit },
+	{ rndtest_runs },
+	{ rndtest_chi_4 },
+	{ rndtest_longruns },
+};
+
+#define	RNDTEST_NTESTS	(sizeof(rndtest_funcs)/sizeof(rndtest_funcs[0]))
+
+static void
+rndtest_test(struct rndtest_state *rsp)
+{
+	int i, rv = 0;
+
+	rndstats.rst_tests++;
+	for (i = 0; i < RNDTEST_NTESTS; i++)
+		rv |= (*rndtest_funcs[i].test)(rsp);
+	rsp->rs_discard = (rv != 0);
+}
+
+
+extern int crypto_debug;
+#define rndtest_verbose 2
+#define rndtest_report(rsp, failure, fmt, a...) \
+	{ if (failure || crypto_debug) { printk("rng_test: " fmt "\n", a); } else; }
+
+#define	RNDTEST_MONOBIT_MINONES	9725
+#define	RNDTEST_MONOBIT_MAXONES	10275
+
+static int
+rndtest_monobit(struct rndtest_state *rsp)
+{
+	int i, ones = 0, j;
+	u_int8_t r;
+
+	for (i = 0; i < RNDTEST_NBYTES; i++) {
+		r = rsp->rs_buf[i];
+		for (j = 0; j < 8; j++, r <<= 1)
+			if (r & 0x80)
+				ones++;
+	}
+	if (ones > RNDTEST_MONOBIT_MINONES &&
+	    ones < RNDTEST_MONOBIT_MAXONES) {
+		if (rndtest_verbose > 1)
+			rndtest_report(rsp, 0, "monobit pass (%d < %d < %d)",
+			    RNDTEST_MONOBIT_MINONES, ones,
+			    RNDTEST_MONOBIT_MAXONES);
+		return (0);
+	} else {
+		if (rndtest_verbose)
+			rndtest_report(rsp, 1,
+			    "monobit failed (%d ones)", ones);
+		rndstats.rst_monobit++;
+		return (-1);
+	}
+}
+
+#define	RNDTEST_RUNS_NINTERVAL	6
+
+static const struct rndtest_runs_tabs {
+	u_int16_t min, max;
+} rndtest_runs_tab[] = {
+	{ 2343, 2657 },
+	{ 1135, 1365 },
+	{ 542, 708 },
+	{ 251, 373 },
+	{ 111, 201 },
+	{ 111, 201 },
+};
+
+static int
+rndtest_runs(struct rndtest_state *rsp)
+{
+	int i, j, ones, zeros, rv = 0;
+	int onei[RNDTEST_RUNS_NINTERVAL], zeroi[RNDTEST_RUNS_NINTERVAL];
+	u_int8_t c;
+
+	bzero(onei, sizeof(onei));
+	bzero(zeroi, sizeof(zeroi));
+	ones = zeros = 0;
+	for (i = 0; i < RNDTEST_NBYTES; i++) {
+		c = rsp->rs_buf[i];
+		for (j = 0; j < 8; j++, c <<= 1) {
+			if (c & 0x80) {
+				ones++;
+				rndtest_runs_record(rsp, zeros, zeroi);
+				zeros = 0;
+			} else {
+				zeros++;
+				rndtest_runs_record(rsp, ones, onei);
+				ones = 0;
+			}
+		}
+	}
+	rndtest_runs_record(rsp, ones, onei);
+	rndtest_runs_record(rsp, zeros, zeroi);
+
+	rv |= rndtest_runs_check(rsp, 0, zeroi);
+	rv |= rndtest_runs_check(rsp, 1, onei);
+
+	if (rv)
+		rndstats.rst_runs++;
+
+	return (rv);
+}
+
+static void
+rndtest_runs_record(struct rndtest_state *rsp, int len, int *intrv)
+{
+	if (len == 0)
+		return;
+	if (len > RNDTEST_RUNS_NINTERVAL)
+		len = RNDTEST_RUNS_NINTERVAL;
+	len -= 1;
+	intrv[len]++;
+}
+
+static int
+rndtest_runs_check(struct rndtest_state *rsp, int val, int *src)
+{
+	int i, rv = 0;
+
+	for (i = 0; i < RNDTEST_RUNS_NINTERVAL; i++) {
+		if (src[i] < rndtest_runs_tab[i].min ||
+		    src[i] > rndtest_runs_tab[i].max) {
+			rndtest_report(rsp, 1,
+			    "%s interval %d failed (%d, %d-%d)",
+			    val ? "ones" : "zeros",
+			    i + 1, src[i], rndtest_runs_tab[i].min,
+			    rndtest_runs_tab[i].max);
+			rv = -1;
+		} else {
+			rndtest_report(rsp, 0,
+			    "runs pass %s interval %d (%d < %d < %d)",
+			    val ? "ones" : "zeros",
+			    i + 1, rndtest_runs_tab[i].min, src[i],
+			    rndtest_runs_tab[i].max);
+		}
+	}
+	return (rv);
+}
+
+static int
+rndtest_longruns(struct rndtest_state *rsp)
+{
+	int i, j, ones = 0, zeros = 0, maxones = 0, maxzeros = 0;
+	u_int8_t c;
+
+	for (i = 0; i < RNDTEST_NBYTES; i++) {
+		c = rsp->rs_buf[i];
+		for (j = 0; j < 8; j++, c <<= 1) {
+			if (c & 0x80) {
+				zeros = 0;
+				ones++;
+				if (ones > maxones)
+					maxones = ones;
+			} else {
+				ones = 0;
+				zeros++;
+				if (zeros > maxzeros)
+					maxzeros = zeros;
+			}
+		}
+	}
+
+	if (maxones < 26 && maxzeros < 26) {
+		rndtest_report(rsp, 0, "longruns pass (%d ones, %d zeros)",
+			maxones, maxzeros);
+		return (0);
+	} else {
+		rndtest_report(rsp, 1, "longruns fail (%d ones, %d zeros)",
+			maxones, maxzeros);
+		rndstats.rst_longruns++;
+		return (-1);
+	}
+}
+
+/*
+ * chi^2 test over 4 bits: (this is called the poker test in FIPS 140-2,
+ * but it is really the chi^2 test over 4 bits (the poker test as described
+ * by Knuth vol 2 is something different, and I take him as authoritative
+ * on nomenclature over NIST).
+ */
+#define	RNDTEST_CHI4_K	16
+#define	RNDTEST_CHI4_K_MASK	(RNDTEST_CHI4_K - 1)
+
+/*
+ * The unnormalized values are used so that we don't have to worry about
+ * fractional precision.  The "real" value is found by:
+ *	(V - 1562500) * (16 / 5000) = Vn   (where V is the unnormalized value)
+ */
+#define	RNDTEST_CHI4_VMIN	1563181		/* 2.1792 */
+#define	RNDTEST_CHI4_VMAX	1576929		/* 46.1728 */
+
+static int
+rndtest_chi_4(struct rndtest_state *rsp)
+{
+	unsigned int freq[RNDTEST_CHI4_K], i, sum;
+
+	for (i = 0; i < RNDTEST_CHI4_K; i++)
+		freq[i] = 0;
+
+	/* Get number of occurances of each 4 bit pattern */
+	for (i = 0; i < RNDTEST_NBYTES; i++) {
+		freq[(rsp->rs_buf[i] >> 4) & RNDTEST_CHI4_K_MASK]++;
+		freq[(rsp->rs_buf[i] >> 0) & RNDTEST_CHI4_K_MASK]++;
+	}
+
+	for (i = 0, sum = 0; i < RNDTEST_CHI4_K; i++)
+		sum += freq[i] * freq[i];
+
+	if (sum >= 1563181 && sum <= 1576929) {
+		rndtest_report(rsp, 0, "chi^2(4): pass (sum %u)", sum);
+		return (0);
+	} else {
+		rndtest_report(rsp, 1, "chi^2(4): failed (sum %u)", sum);
+		rndstats.rst_chi++;
+		return (-1);
+	}
+}
+
+int
+rndtest_buf(unsigned char *buf)
+{
+	struct rndtest_state rsp;
+
+	memset(&rsp, 0, sizeof(rsp));
+	rsp.rs_buf = buf;
+	rndtest_test(&rsp);
+	return(rsp.rs_discard);
+}
diff --git a/crypto/ocf/rndtest.h b/crypto/ocf/rndtest.h
new file mode 100644
index 000000000000..e9d8ec8d3949
--- /dev/null
+++ b/crypto/ocf/rndtest.h
@@ -0,0 +1,54 @@
+/*	$FreeBSD: src/sys/dev/rndtest/rndtest.h,v 1.1 2003/03/11 22:54:44 sam Exp $	*/
+/*	$OpenBSD$	*/
+
+/*
+ * Copyright (c) 2002 Jason L. Wright (jason@thought.net)
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *	This product includes software developed by Jason L. Wright
+ * 4. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/* Some of the tests depend on these values */
+#define	RNDTEST_NBYTES	2500
+#define	RNDTEST_NBITS	(8 * RNDTEST_NBYTES)
+
+struct rndtest_state {
+	int		rs_discard;	/* discard/accept random data */
+	u_int8_t	*rs_buf;
+};
+
+struct rndtest_stats {
+	u_int32_t	rst_discard;	/* number of bytes discarded */
+	u_int32_t	rst_tests;	/* number of test runs */
+	u_int32_t	rst_monobit;	/* monobit test failures */
+	u_int32_t	rst_runs;	/* 0/1 runs failures */
+	u_int32_t	rst_longruns;	/* longruns failures */
+	u_int32_t	rst_chi;	/* chi^2 failures */
+};
+
+extern int rndtest_buf(unsigned char *buf);
diff --git a/crypto/ocf/safe/Makefile b/crypto/ocf/safe/Makefile
new file mode 100644
index 000000000000..5a9d91f2fff5
--- /dev/null
+++ b/crypto/ocf/safe/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_SAFE) += safe.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/safe/hmachack.h b/crypto/ocf/safe/hmachack.h
new file mode 100644
index 000000000000..cc1348b4702c
--- /dev/null
+++ b/crypto/ocf/safe/hmachack.h
@@ -0,0 +1,36 @@
+/*
+ * until we find a cleaner way, include the BSD md5/sha1 code
+ * here
+ */
+#ifdef HMAC_HACK
+#define LITTLE_ENDIAN 1234
+#define BIG_ENDIAN 4321
+#ifdef __LITTLE_ENDIAN
+#define BYTE_ORDER LITTLE_ENDIAN
+#endif
+#ifdef __BIG_ENDIAN
+#define BYTE_ORDER BIG_ENDIAN
+#endif
+
+u_int8_t hmac_ipad_buffer[64] = {
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
+    0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
+};
+
+u_int8_t hmac_opad_buffer[64] = {
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
+    0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
+};
+#endif /* HMAC_HACK */
diff --git a/crypto/ocf/safe/md5.c b/crypto/ocf/safe/md5.c
new file mode 100644
index 000000000000..8c6ded4022c7
--- /dev/null
+++ b/crypto/ocf/safe/md5.c
@@ -0,0 +1,308 @@
+/*	$KAME: md5.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/crypto/md5.c,v 1.9 2004/01/27 19:49:19 des Exp $");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+#include <crypto/md5.h>
+#endif
+
+#define SHIFT(X, s) (((X) << (s)) | ((X) >> (32 - (s))))
+
+#define F(X, Y, Z) (((X) & (Y)) | ((~X) & (Z)))
+#define G(X, Y, Z) (((X) & (Z)) | ((Y) & (~Z)))
+#define H(X, Y, Z) ((X) ^ (Y) ^ (Z))
+#define I(X, Y, Z) ((Y) ^ ((X) | (~Z)))
+
+#define ROUND1(a, b, c, d, k, s, i) { \
+	(a) = (a) + F((b), (c), (d)) + X[(k)] + T[(i)]; \
+	(a) = SHIFT((a), (s)); \
+	(a) = (b) + (a); \
+}
+
+#define ROUND2(a, b, c, d, k, s, i) { \
+	(a) = (a) + G((b), (c), (d)) + X[(k)] + T[(i)]; \
+	(a) = SHIFT((a), (s)); \
+	(a) = (b) + (a); \
+}
+
+#define ROUND3(a, b, c, d, k, s, i) { \
+	(a) = (a) + H((b), (c), (d)) + X[(k)] + T[(i)]; \
+	(a) = SHIFT((a), (s)); \
+	(a) = (b) + (a); \
+}
+
+#define ROUND4(a, b, c, d, k, s, i) { \
+	(a) = (a) + I((b), (c), (d)) + X[(k)] + T[(i)]; \
+	(a) = SHIFT((a), (s)); \
+	(a) = (b) + (a); \
+}
+
+#define Sa	 7
+#define Sb	12
+#define Sc	17
+#define Sd	22
+
+#define Se	 5
+#define Sf	 9
+#define Sg	14
+#define Sh	20
+
+#define Si	 4
+#define Sj	11
+#define Sk	16
+#define Sl	23
+
+#define Sm	 6
+#define Sn	10
+#define So	15
+#define Sp	21
+
+#define MD5_A0	0x67452301
+#define MD5_B0	0xefcdab89
+#define MD5_C0	0x98badcfe
+#define MD5_D0	0x10325476
+
+/* Integer part of 4294967296 times abs(sin(i)), where i is in radians. */
+static const u_int32_t T[65] = {
+	0,
+	0xd76aa478, 	0xe8c7b756,	0x242070db,	0xc1bdceee,
+	0xf57c0faf,	0x4787c62a, 	0xa8304613,	0xfd469501,
+	0x698098d8,	0x8b44f7af,	0xffff5bb1,	0x895cd7be,
+	0x6b901122, 	0xfd987193, 	0xa679438e,	0x49b40821,
+
+	0xf61e2562,	0xc040b340, 	0x265e5a51, 	0xe9b6c7aa,
+	0xd62f105d,	0x2441453,	0xd8a1e681,	0xe7d3fbc8,
+	0x21e1cde6,	0xc33707d6, 	0xf4d50d87, 	0x455a14ed,
+	0xa9e3e905,	0xfcefa3f8, 	0x676f02d9, 	0x8d2a4c8a,
+
+	0xfffa3942,	0x8771f681, 	0x6d9d6122, 	0xfde5380c,
+	0xa4beea44, 	0x4bdecfa9, 	0xf6bb4b60, 	0xbebfbc70,
+	0x289b7ec6, 	0xeaa127fa, 	0xd4ef3085,	0x4881d05,
+	0xd9d4d039, 	0xe6db99e5, 	0x1fa27cf8, 	0xc4ac5665,
+
+	0xf4292244, 	0x432aff97, 	0xab9423a7, 	0xfc93a039,
+	0x655b59c3, 	0x8f0ccc92, 	0xffeff47d, 	0x85845dd1,
+	0x6fa87e4f, 	0xfe2ce6e0, 	0xa3014314, 	0x4e0811a1,
+	0xf7537e82, 	0xbd3af235, 	0x2ad7d2bb, 	0xeb86d391,
+};
+
+static const u_int8_t md5_paddat[MD5_BUFLEN] = {
+	0x80,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+	0,	0,	0,	0,	0,	0,	0,	0,
+};
+
+static void md5_calc(u_int8_t *, md5_ctxt *);
+
+void md5_init(ctxt)
+	md5_ctxt *ctxt;
+{
+	ctxt->md5_n = 0;
+	ctxt->md5_i = 0;
+	ctxt->md5_sta = MD5_A0;
+	ctxt->md5_stb = MD5_B0;
+	ctxt->md5_stc = MD5_C0;
+	ctxt->md5_std = MD5_D0;
+	bzero(ctxt->md5_buf, sizeof(ctxt->md5_buf));
+}
+
+void md5_loop(ctxt, input, len)
+	md5_ctxt *ctxt;
+	u_int8_t *input;
+	u_int len; /* number of bytes */
+{
+	u_int gap, i;
+
+	ctxt->md5_n += len * 8; /* byte to bit */
+	gap = MD5_BUFLEN - ctxt->md5_i;
+
+	if (len >= gap) {
+		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
+			gap);
+		md5_calc(ctxt->md5_buf, ctxt);
+
+		for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
+			md5_calc((u_int8_t *)(input + i), ctxt);
+		}
+
+		ctxt->md5_i = len - i;
+		bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
+	} else {
+		bcopy((void *)input, (void *)(ctxt->md5_buf + ctxt->md5_i),
+			len);
+		ctxt->md5_i += len;
+	}
+}
+
+void md5_pad(ctxt)
+	md5_ctxt *ctxt;
+{
+	u_int gap;
+
+	/* Don't count up padding. Keep md5_n. */
+	gap = MD5_BUFLEN - ctxt->md5_i;
+	if (gap > 8) {
+		bcopy(md5_paddat,
+		      (void *)(ctxt->md5_buf + ctxt->md5_i),
+		      gap - sizeof(ctxt->md5_n));
+	} else {
+		/* including gap == 8 */
+		bcopy(md5_paddat, (void *)(ctxt->md5_buf + ctxt->md5_i),
+			gap);
+		md5_calc(ctxt->md5_buf, ctxt);
+		bcopy((md5_paddat + gap),
+		      (void *)ctxt->md5_buf,
+		      MD5_BUFLEN - sizeof(ctxt->md5_n));
+	}
+
+	/* 8 byte word */
+#if BYTE_ORDER == LITTLE_ENDIAN
+	bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+	ctxt->md5_buf[56] = ctxt->md5_n8[7];
+	ctxt->md5_buf[57] = ctxt->md5_n8[6];
+	ctxt->md5_buf[58] = ctxt->md5_n8[5];
+	ctxt->md5_buf[59] = ctxt->md5_n8[4];
+	ctxt->md5_buf[60] = ctxt->md5_n8[3];
+	ctxt->md5_buf[61] = ctxt->md5_n8[2];
+	ctxt->md5_buf[62] = ctxt->md5_n8[1];
+	ctxt->md5_buf[63] = ctxt->md5_n8[0];
+#endif
+
+	md5_calc(ctxt->md5_buf, ctxt);
+}
+
+void md5_result(digest, ctxt)
+	u_int8_t *digest;
+	md5_ctxt *ctxt;
+{
+	/* 4 byte words */
+#if BYTE_ORDER == LITTLE_ENDIAN
+	bcopy(&ctxt->md5_st8[0], digest, 16);
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+	digest[ 0] = ctxt->md5_st8[ 3]; digest[ 1] = ctxt->md5_st8[ 2];
+	digest[ 2] = ctxt->md5_st8[ 1]; digest[ 3] = ctxt->md5_st8[ 0];
+	digest[ 4] = ctxt->md5_st8[ 7]; digest[ 5] = ctxt->md5_st8[ 6];
+	digest[ 6] = ctxt->md5_st8[ 5]; digest[ 7] = ctxt->md5_st8[ 4];
+	digest[ 8] = ctxt->md5_st8[11]; digest[ 9] = ctxt->md5_st8[10];
+	digest[10] = ctxt->md5_st8[ 9]; digest[11] = ctxt->md5_st8[ 8];
+	digest[12] = ctxt->md5_st8[15]; digest[13] = ctxt->md5_st8[14];
+	digest[14] = ctxt->md5_st8[13]; digest[15] = ctxt->md5_st8[12];
+#endif
+}
+
+static void md5_calc(b64, ctxt)
+	u_int8_t *b64;
+	md5_ctxt *ctxt;
+{
+	u_int32_t A = ctxt->md5_sta;
+	u_int32_t B = ctxt->md5_stb;
+	u_int32_t C = ctxt->md5_stc;
+	u_int32_t D = ctxt->md5_std;
+#if BYTE_ORDER == LITTLE_ENDIAN
+	u_int32_t *X = (u_int32_t *)b64;
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+	/* 4 byte words */
+	/* what a brute force but fast! */
+	u_int32_t X[16];
+	u_int8_t *y = (u_int8_t *)X;
+	y[ 0] = b64[ 3]; y[ 1] = b64[ 2]; y[ 2] = b64[ 1]; y[ 3] = b64[ 0];
+	y[ 4] = b64[ 7]; y[ 5] = b64[ 6]; y[ 6] = b64[ 5]; y[ 7] = b64[ 4];
+	y[ 8] = b64[11]; y[ 9] = b64[10]; y[10] = b64[ 9]; y[11] = b64[ 8];
+	y[12] = b64[15]; y[13] = b64[14]; y[14] = b64[13]; y[15] = b64[12];
+	y[16] = b64[19]; y[17] = b64[18]; y[18] = b64[17]; y[19] = b64[16];
+	y[20] = b64[23]; y[21] = b64[22]; y[22] = b64[21]; y[23] = b64[20];
+	y[24] = b64[27]; y[25] = b64[26]; y[26] = b64[25]; y[27] = b64[24];
+	y[28] = b64[31]; y[29] = b64[30]; y[30] = b64[29]; y[31] = b64[28];
+	y[32] = b64[35]; y[33] = b64[34]; y[34] = b64[33]; y[35] = b64[32];
+	y[36] = b64[39]; y[37] = b64[38]; y[38] = b64[37]; y[39] = b64[36];
+	y[40] = b64[43]; y[41] = b64[42]; y[42] = b64[41]; y[43] = b64[40];
+	y[44] = b64[47]; y[45] = b64[46]; y[46] = b64[45]; y[47] = b64[44];
+	y[48] = b64[51]; y[49] = b64[50]; y[50] = b64[49]; y[51] = b64[48];
+	y[52] = b64[55]; y[53] = b64[54]; y[54] = b64[53]; y[55] = b64[52];
+	y[56] = b64[59]; y[57] = b64[58]; y[58] = b64[57]; y[59] = b64[56];
+	y[60] = b64[63]; y[61] = b64[62]; y[62] = b64[61]; y[63] = b64[60];
+#endif
+
+	ROUND1(A, B, C, D,  0, Sa,  1); ROUND1(D, A, B, C,  1, Sb,  2);
+	ROUND1(C, D, A, B,  2, Sc,  3); ROUND1(B, C, D, A,  3, Sd,  4);
+	ROUND1(A, B, C, D,  4, Sa,  5); ROUND1(D, A, B, C,  5, Sb,  6);
+	ROUND1(C, D, A, B,  6, Sc,  7); ROUND1(B, C, D, A,  7, Sd,  8);
+	ROUND1(A, B, C, D,  8, Sa,  9); ROUND1(D, A, B, C,  9, Sb, 10);
+	ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
+	ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
+	ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
+
+	ROUND2(A, B, C, D,  1, Se, 17); ROUND2(D, A, B, C,  6, Sf, 18);
+	ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A,  0, Sh, 20);
+	ROUND2(A, B, C, D,  5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
+	ROUND2(C, D, A, B, 15, Sg, 23); ROUND2(B, C, D, A,  4, Sh, 24);
+	ROUND2(A, B, C, D,  9, Se, 25); ROUND2(D, A, B, C, 14, Sf, 26);
+	ROUND2(C, D, A, B,  3, Sg, 27); ROUND2(B, C, D, A,  8, Sh, 28);
+	ROUND2(A, B, C, D, 13, Se, 29); ROUND2(D, A, B, C,  2, Sf, 30);
+	ROUND2(C, D, A, B,  7, Sg, 31); ROUND2(B, C, D, A, 12, Sh, 32);
+
+	ROUND3(A, B, C, D,  5, Si, 33); ROUND3(D, A, B, C,  8, Sj, 34);
+	ROUND3(C, D, A, B, 11, Sk, 35); ROUND3(B, C, D, A, 14, Sl, 36);
+	ROUND3(A, B, C, D,  1, Si, 37); ROUND3(D, A, B, C,  4, Sj, 38);
+	ROUND3(C, D, A, B,  7, Sk, 39); ROUND3(B, C, D, A, 10, Sl, 40);
+	ROUND3(A, B, C, D, 13, Si, 41); ROUND3(D, A, B, C,  0, Sj, 42);
+	ROUND3(C, D, A, B,  3, Sk, 43); ROUND3(B, C, D, A,  6, Sl, 44);
+	ROUND3(A, B, C, D,  9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
+	ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A,  2, Sl, 48);
+
+	ROUND4(A, B, C, D,  0, Sm, 49); ROUND4(D, A, B, C,  7, Sn, 50);
+	ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A,  5, Sp, 52);
+	ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C,  3, Sn, 54);
+	ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A,  1, Sp, 56);
+	ROUND4(A, B, C, D,  8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
+	ROUND4(C, D, A, B,  6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
+	ROUND4(A, B, C, D,  4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
+	ROUND4(C, D, A, B,  2, So, 63); ROUND4(B, C, D, A,  9, Sp, 64);
+
+	ctxt->md5_sta += A;
+	ctxt->md5_stb += B;
+	ctxt->md5_stc += C;
+	ctxt->md5_std += D;
+}
diff --git a/crypto/ocf/safe/md5.h b/crypto/ocf/safe/md5.h
new file mode 100644
index 000000000000..690f5bfc11f3
--- /dev/null
+++ b/crypto/ocf/safe/md5.h
@@ -0,0 +1,76 @@
+/*	$FreeBSD: src/sys/crypto/md5.h,v 1.4 2002/03/20 05:13:50 alfred Exp $	*/
+/*	$KAME: md5.h,v 1.4 2000/03/27 04:36:22 sumikawa Exp $	*/
+
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _NETINET6_MD5_H_
+#define _NETINET6_MD5_H_
+
+#define MD5_BUFLEN	64
+
+typedef struct {
+	union {
+		u_int32_t	md5_state32[4];
+		u_int8_t	md5_state8[16];
+	} md5_st;
+
+#define md5_sta		md5_st.md5_state32[0]
+#define md5_stb		md5_st.md5_state32[1]
+#define md5_stc		md5_st.md5_state32[2]
+#define md5_std		md5_st.md5_state32[3]
+#define md5_st8		md5_st.md5_state8
+
+	union {
+		u_int64_t	md5_count64;
+		u_int8_t	md5_count8[8];
+	} md5_count;
+#define md5_n	md5_count.md5_count64
+#define md5_n8	md5_count.md5_count8
+
+	u_int	md5_i;
+	u_int8_t	md5_buf[MD5_BUFLEN];
+} md5_ctxt;
+
+extern void md5_init(md5_ctxt *);
+extern void md5_loop(md5_ctxt *, u_int8_t *, u_int);
+extern void md5_pad(md5_ctxt *);
+extern void md5_result(u_int8_t *, md5_ctxt *);
+
+/* compatibility */
+#define MD5_CTX		md5_ctxt
+#define MD5Init(x)	md5_init((x))
+#define MD5Update(x, y, z)	md5_loop((x), (y), (z))
+#define MD5Final(x, y) \
+do {				\
+	md5_pad((y));		\
+	md5_result((x), (y));	\
+} while (0)
+
+#endif /* ! _NETINET6_MD5_H_*/
diff --git a/crypto/ocf/safe/safe.c b/crypto/ocf/safe/safe.c
new file mode 100644
index 000000000000..a392b454f286
--- /dev/null
+++ b/crypto/ocf/safe/safe.c
@@ -0,0 +1,2230 @@
+/*-
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2004-2010 David McCullough
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+__FBSDID("$FreeBSD: src/sys/dev/safe/safe.c,v 1.18 2007/03/21 03:42:50 sam Exp $");
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+
+/*
+ * SafeNet SafeXcel-1141 hardware crypto accelerator
+ */
+
+#include <cryptodev.h>
+#include <uio.h>
+#include <safe/safereg.h>
+#include <safe/safevar.h>
+
+#if 1
+#define	DPRINTF(a)	do { \
+						if (debug) { \
+							printk("%s: ", sc ? \
+								device_get_nameunit(sc->sc_dev) : "safe"); \
+							printk a; \
+						} \
+					} while (0)
+#else
+#define	DPRINTF(a)
+#endif
+
+/*
+ * until we find a cleaner way, include the BSD md5/sha1 code
+ * here
+ */
+#define HMAC_HACK 1
+#ifdef HMAC_HACK
+#include <safe/hmachack.h>
+#include <safe/md5.h>
+#include <safe/md5.c>
+#include <safe/sha1.h>
+#include <safe/sha1.c>
+#endif /* HMAC_HACK */
+
+/* add proc entry for this */
+struct safe_stats safestats;
+
+#define debug safe_debug
+int safe_debug = 0;
+module_param(safe_debug, int, 0644);
+MODULE_PARM_DESC(safe_debug, "Enable debug");
+
+static	void safe_callback(struct safe_softc *, struct safe_ringentry *);
+static	void safe_feed(struct safe_softc *, struct safe_ringentry *);
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+static	void safe_rng_init(struct safe_softc *);
+int safe_rngbufsize = 8;		/* 32 bytes each read  */
+module_param(safe_rngbufsize, int, 0644);
+MODULE_PARM_DESC(safe_rngbufsize, "RNG polling buffer size (32-bit words)");
+int safe_rngmaxalarm = 8;		/* max alarms before reset */
+module_param(safe_rngmaxalarm, int, 0644);
+MODULE_PARM_DESC(safe_rngmaxalarm, "RNG max alarms before reset");
+#endif /* SAFE_NO_RNG */
+
+static void safe_totalreset(struct safe_softc *sc);
+static int safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op);
+static int safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op);
+static int safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re);
+static int safe_kprocess(device_t dev, struct cryptkop *krp, int hint);
+static int safe_kstart(struct safe_softc *sc);
+static int safe_ksigbits(struct safe_softc *sc, struct crparam *cr);
+static void safe_kfeed(struct safe_softc *sc);
+static void safe_kpoll(unsigned long arg);
+static void safe_kload_reg(struct safe_softc *sc, u_int32_t off,
+								u_int32_t len, struct crparam *n);
+
+static	int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
+static	int safe_freesession(device_t, u_int64_t);
+static	int safe_process(device_t, struct cryptop *, int);
+
+static device_method_t safe_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	safe_newsession),
+	DEVMETHOD(cryptodev_freesession,safe_freesession),
+	DEVMETHOD(cryptodev_process,	safe_process),
+	DEVMETHOD(cryptodev_kprocess,	safe_kprocess),
+};
+
+#define	READ_REG(sc,r)			readl((sc)->sc_base_addr + (r))
+#define WRITE_REG(sc,r,val)		writel((val), (sc)->sc_base_addr + (r))
+
+#define SAFE_MAX_CHIPS 8
+static struct safe_softc *safe_chip_idx[SAFE_MAX_CHIPS];
+
+/*
+ * split our buffers up into safe DMAable byte fragments to avoid lockup
+ * bug in 1141 HW on rev 1.0.
+ */
+
+static int
+pci_map_linear(
+	struct safe_softc *sc,
+	struct safe_operand *buf,
+	void *addr,
+	int len)
+{
+	dma_addr_t tmp;
+	int chunk, tlen = len;
+
+	tmp = pci_map_single(sc->sc_pcidev, addr, len, PCI_DMA_BIDIRECTIONAL);
+
+	buf->mapsize += len;
+	while (len > 0) {
+		chunk = (len > sc->sc_max_dsize) ? sc->sc_max_dsize : len;
+		buf->segs[buf->nsegs].ds_addr = tmp;
+		buf->segs[buf->nsegs].ds_len  = chunk;
+		buf->segs[buf->nsegs].ds_tlen = tlen;
+		buf->nsegs++;
+		tmp  += chunk;
+		len  -= chunk;
+		tlen = 0;
+	}
+	return 0;
+}
+
+/*
+ * map in a given uio buffer (great on some arches :-)
+ */
+
+static int
+pci_map_uio(struct safe_softc *sc, struct safe_operand *buf, struct uio *uio)
+{
+	struct iovec *iov = uio->uio_iov;
+	int n;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	buf->mapsize = 0;
+	buf->nsegs = 0;
+
+	for (n = 0; n < uio->uio_iovcnt; n++) {
+		pci_map_linear(sc, buf, iov->iov_base, iov->iov_len);
+		iov++;
+	}
+
+	/* identify this buffer by the first segment */
+	buf->map = (void *) buf->segs[0].ds_addr;
+	return(0);
+}
+
+/*
+ * map in a given sk_buff
+ */
+
+static int
+pci_map_skb(struct safe_softc *sc,struct safe_operand *buf,struct sk_buff *skb)
+{
+	int i;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	buf->mapsize = 0;
+	buf->nsegs = 0;
+
+	pci_map_linear(sc, buf, skb->data, skb_headlen(skb));
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		pci_map_linear(sc, buf,
+				page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+				                        skb_shinfo(skb)->frags[i].page_offset,
+				skb_shinfo(skb)->frags[i].size);
+	}
+
+	/* identify this buffer by the first segment */
+	buf->map = (void *) buf->segs[0].ds_addr;
+	return(0);
+}
+
+
+#if 0 /* not needed at this time */
+static void
+pci_sync_operand(struct safe_softc *sc, struct safe_operand *buf)
+{
+	int i;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+	for (i = 0; i < buf->nsegs; i++)
+		pci_dma_sync_single_for_cpu(sc->sc_pcidev, buf->segs[i].ds_addr,
+				buf->segs[i].ds_len, PCI_DMA_BIDIRECTIONAL);
+}
+#endif
+
+static void
+pci_unmap_operand(struct safe_softc *sc, struct safe_operand *buf)
+{
+	int i;
+	DPRINTF(("%s()\n", __FUNCTION__));
+	for (i = 0; i < buf->nsegs; i++) {
+		if (buf->segs[i].ds_tlen) {
+			DPRINTF(("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
+			pci_unmap_single(sc->sc_pcidev, buf->segs[i].ds_addr,
+					buf->segs[i].ds_tlen, PCI_DMA_BIDIRECTIONAL);
+			DPRINTF(("%s - unmap %d 0x%x %d done\n", __FUNCTION__, i, buf->segs[i].ds_addr, buf->segs[i].ds_tlen));
+		}
+		buf->segs[i].ds_addr = 0;
+		buf->segs[i].ds_len = 0;
+		buf->segs[i].ds_tlen = 0;
+	}
+	buf->nsegs = 0;
+	buf->mapsize = 0;
+	buf->map = 0;
+}
+
+
+/*
+ * SafeXcel Interrupt routine
+ */
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+safe_intr(int irq, void *arg)
+#else
+safe_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+	struct safe_softc *sc = arg;
+	int stat;
+	unsigned long flags;
+
+	stat = READ_REG(sc, SAFE_HM_STAT);
+
+	DPRINTF(("%s(stat=0x%x)\n", __FUNCTION__, stat));
+
+	if (stat == 0)		/* shared irq, not for us */
+		return IRQ_NONE;
+
+	WRITE_REG(sc, SAFE_HI_CLR, stat);	/* IACK */
+
+	if ((stat & SAFE_INT_PE_DDONE)) {
+		/*
+		 * Descriptor(s) done; scan the ring and
+		 * process completed operations.
+		 */
+		spin_lock_irqsave(&sc->sc_ringmtx, flags);
+		while (sc->sc_back != sc->sc_front) {
+			struct safe_ringentry *re = sc->sc_back;
+
+#ifdef SAFE_DEBUG
+			if (debug) {
+				safe_dump_ringstate(sc, __func__);
+				safe_dump_request(sc, __func__, re);
+			}
+#endif
+			/*
+			 * safe_process marks ring entries that were allocated
+			 * but not used with a csr of zero.  This insures the
+			 * ring front pointer never needs to be set backwards
+			 * in the event that an entry is allocated but not used
+			 * because of a setup error.
+			 */
+			DPRINTF(("%s re->re_desc.d_csr=0x%x\n", __FUNCTION__, re->re_desc.d_csr));
+			if (re->re_desc.d_csr != 0) {
+				if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr)) {
+					DPRINTF(("%s !CSR_IS_DONE\n", __FUNCTION__));
+					break;
+				}
+				if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len)) {
+					DPRINTF(("%s !LEN_IS_DONE\n", __FUNCTION__));
+					break;
+				}
+				sc->sc_nqchip--;
+				safe_callback(sc, re);
+			}
+			if (++(sc->sc_back) == sc->sc_ringtop)
+				sc->sc_back = sc->sc_ring;
+		}
+		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+	}
+
+	/*
+	 * Check to see if we got any DMA Error
+	 */
+	if (stat & SAFE_INT_PE_ERROR) {
+		printk("%s: dmaerr dmastat %08x\n", device_get_nameunit(sc->sc_dev),
+				(int)READ_REG(sc, SAFE_PE_DMASTAT));
+		safestats.st_dmaerr++;
+		safe_totalreset(sc);
+#if 0
+		safe_feed(sc);
+#endif
+	}
+
+	if (sc->sc_needwakeup) {		/* XXX check high watermark */
+		int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
+		DPRINTF(("%s: wakeup crypto %x\n", __func__,
+			sc->sc_needwakeup));
+		sc->sc_needwakeup &= ~wakeup;
+		crypto_unblock(sc->sc_cid, wakeup);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * safe_feed() - post a request to chip
+ */
+static void
+safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
+{
+	DPRINTF(("%s()\n", __FUNCTION__));
+#ifdef SAFE_DEBUG
+	if (debug) {
+		safe_dump_ringstate(sc, __func__);
+		safe_dump_request(sc, __func__, re);
+	}
+#endif
+	sc->sc_nqchip++;
+	if (sc->sc_nqchip > safestats.st_maxqchip)
+		safestats.st_maxqchip = sc->sc_nqchip;
+	/* poke h/w to check descriptor ring, any value can be written */
+	WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
+}
+
+#define	N(a)	(sizeof(a) / sizeof (a[0]))
+static void
+safe_setup_enckey(struct safe_session *ses, caddr_t key)
+{
+	int i;
+
+	bcopy(key, ses->ses_key, ses->ses_klen / 8);
+
+	/* PE is little-endian, insure proper byte order */
+	for (i = 0; i < N(ses->ses_key); i++)
+		ses->ses_key[i] = htole32(ses->ses_key[i]);
+}
+
+static void
+safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
+{
+#ifdef HMAC_HACK
+	MD5_CTX md5ctx;
+	SHA1_CTX sha1ctx;
+	int i;
+
+
+	for (i = 0; i < klen; i++)
+		key[i] ^= HMAC_IPAD_VAL;
+
+	if (algo == CRYPTO_MD5_HMAC) {
+		MD5Init(&md5ctx);
+		MD5Update(&md5ctx, key, klen);
+		MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+		bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
+	} else {
+		SHA1Init(&sha1ctx);
+		SHA1Update(&sha1ctx, key, klen);
+		SHA1Update(&sha1ctx, hmac_ipad_buffer,
+		    SHA1_HMAC_BLOCK_LEN - klen);
+		bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+	}
+
+	for (i = 0; i < klen; i++)
+		key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+	if (algo == CRYPTO_MD5_HMAC) {
+		MD5Init(&md5ctx);
+		MD5Update(&md5ctx, key, klen);
+		MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+		bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
+	} else {
+		SHA1Init(&sha1ctx);
+		SHA1Update(&sha1ctx, key, klen);
+		SHA1Update(&sha1ctx, hmac_opad_buffer,
+		    SHA1_HMAC_BLOCK_LEN - klen);
+		bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+	}
+
+	for (i = 0; i < klen; i++)
+		key[i] ^= HMAC_OPAD_VAL;
+
+#if 0
+	/*
+	 * this code prevents SHA working on a BE host,
+	 * so it is obviously wrong.  I think the byte
+	 * swap setup we do with the chip fixes this for us
+	 */
+
+	/* PE is little-endian, insure proper byte order */
+	for (i = 0; i < N(ses->ses_hminner); i++) {
+		ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
+		ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
+	}
+#endif
+#else /* HMAC_HACK */
+	printk("safe: md5/sha not implemented\n");
+#endif /* HMAC_HACK */
+}
+#undef N
+
+/*
+ * Allocate a new 'session' and return an encoded session id.  'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+	struct safe_softc *sc = device_get_softc(dev);
+	struct cryptoini *c, *encini = NULL, *macini = NULL;
+	struct safe_session *ses = NULL;
+	int sesn;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (sidp == NULL || cri == NULL || sc == NULL)
+		return (EINVAL);
+
+	for (c = cri; c != NULL; c = c->cri_next) {
+		if (c->cri_alg == CRYPTO_MD5_HMAC ||
+		    c->cri_alg == CRYPTO_SHA1_HMAC ||
+		    c->cri_alg == CRYPTO_NULL_HMAC) {
+			if (macini)
+				return (EINVAL);
+			macini = c;
+		} else if (c->cri_alg == CRYPTO_DES_CBC ||
+		    c->cri_alg == CRYPTO_3DES_CBC ||
+		    c->cri_alg == CRYPTO_AES_CBC ||
+		    c->cri_alg == CRYPTO_NULL_CBC) {
+			if (encini)
+				return (EINVAL);
+			encini = c;
+		} else
+			return (EINVAL);
+	}
+	if (encini == NULL && macini == NULL)
+		return (EINVAL);
+	if (encini) {			/* validate key length */
+		switch (encini->cri_alg) {
+		case CRYPTO_DES_CBC:
+			if (encini->cri_klen != 64)
+				return (EINVAL);
+			break;
+		case CRYPTO_3DES_CBC:
+			if (encini->cri_klen != 192)
+				return (EINVAL);
+			break;
+		case CRYPTO_AES_CBC:
+			if (encini->cri_klen != 128 &&
+			    encini->cri_klen != 192 &&
+			    encini->cri_klen != 256)
+				return (EINVAL);
+			break;
+		}
+	}
+
+	if (sc->sc_sessions == NULL) {
+		ses = sc->sc_sessions = (struct safe_session *)
+			kmalloc(sizeof(struct safe_session), SLAB_ATOMIC);
+		if (ses == NULL)
+			return (ENOMEM);
+		memset(ses, 0, sizeof(struct safe_session));
+		sesn = 0;
+		sc->sc_nsessions = 1;
+	} else {
+		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+			if (sc->sc_sessions[sesn].ses_used == 0) {
+				ses = &sc->sc_sessions[sesn];
+				break;
+			}
+		}
+
+		if (ses == NULL) {
+			sesn = sc->sc_nsessions;
+			ses = (struct safe_session *)
+				kmalloc((sesn + 1) * sizeof(struct safe_session), SLAB_ATOMIC);
+			if (ses == NULL)
+				return (ENOMEM);
+			memset(ses, 0, (sesn + 1) * sizeof(struct safe_session));
+			bcopy(sc->sc_sessions, ses, sesn *
+			    sizeof(struct safe_session));
+			bzero(sc->sc_sessions, sesn *
+			    sizeof(struct safe_session));
+			kfree(sc->sc_sessions);
+			sc->sc_sessions = ses;
+			ses = &sc->sc_sessions[sesn];
+			sc->sc_nsessions++;
+		}
+	}
+
+	bzero(ses, sizeof(struct safe_session));
+	ses->ses_used = 1;
+
+	if (encini) {
+		ses->ses_klen = encini->cri_klen;
+		if (encini->cri_key != NULL)
+			safe_setup_enckey(ses, encini->cri_key);
+	}
+
+	if (macini) {
+		ses->ses_mlen = macini->cri_mlen;
+		if (ses->ses_mlen == 0) {
+			if (macini->cri_alg == CRYPTO_MD5_HMAC)
+				ses->ses_mlen = MD5_HASH_LEN;
+			else
+				ses->ses_mlen = SHA1_HASH_LEN;
+		}
+
+		if (macini->cri_key != NULL) {
+			safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
+			    macini->cri_klen / 8);
+		}
+	}
+
+	*sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
+	return (0);
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+safe_freesession(device_t dev, u_int64_t tid)
+{
+	struct safe_softc *sc = device_get_softc(dev);
+	int session, ret;
+	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (sc == NULL)
+		return (EINVAL);
+
+	session = SAFE_SESSION(sid);
+	if (session < sc->sc_nsessions) {
+		bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
+		ret = 0;
+	} else
+		ret = EINVAL;
+	return (ret);
+}
+
+
+static int
+safe_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct safe_softc *sc = device_get_softc(dev);
+	int err = 0, i, nicealign, uniform;
+	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+	int bypass, oplen, ivsize;
+	caddr_t iv;
+	int16_t coffset;
+	struct safe_session *ses;
+	struct safe_ringentry *re;
+	struct safe_sarec *sa;
+	struct safe_pdesc *pd;
+	u_int32_t cmd0, cmd1, staterec, rand_iv[4];
+	unsigned long flags;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
+		safestats.st_invalid++;
+		return (EINVAL);
+	}
+	if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
+		safestats.st_badsession++;
+		return (EINVAL);
+	}
+
+	spin_lock_irqsave(&sc->sc_ringmtx, flags);
+	if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
+		safestats.st_ringfull++;
+		sc->sc_needwakeup |= CRYPTO_SYMQ;
+		spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+		return (ERESTART);
+	}
+	re = sc->sc_front;
+
+	staterec = re->re_sa.sa_staterec;	/* save */
+	/* NB: zero everything but the PE descriptor */
+	bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
+	re->re_sa.sa_staterec = staterec;	/* restore */
+
+	re->re_crp = crp;
+	re->re_sesn = SAFE_SESSION(crp->crp_sid);
+
+	re->re_src.nsegs = 0;
+	re->re_dst.nsegs = 0;
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		re->re_src_skb = (struct sk_buff *)crp->crp_buf;
+		re->re_dst_skb = (struct sk_buff *)crp->crp_buf;
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		re->re_src_io = (struct uio *)crp->crp_buf;
+		re->re_dst_io = (struct uio *)crp->crp_buf;
+	} else {
+		safestats.st_badflags++;
+		err = EINVAL;
+		goto errout;	/* XXX we don't handle contiguous blocks! */
+	}
+
+	sa = &re->re_sa;
+	ses = &sc->sc_sessions[re->re_sesn];
+
+	crd1 = crp->crp_desc;
+	if (crd1 == NULL) {
+		safestats.st_nodesc++;
+		err = EINVAL;
+		goto errout;
+	}
+	crd2 = crd1->crd_next;
+
+	cmd0 = SAFE_SA_CMD0_BASIC;		/* basic group operation */
+	cmd1 = 0;
+	if (crd2 == NULL) {
+		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+		    crd1->crd_alg == CRYPTO_NULL_HMAC) {
+			maccrd = crd1;
+			enccrd = NULL;
+			cmd0 |= SAFE_SA_CMD0_OP_HASH;
+		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+		    crd1->crd_alg == CRYPTO_3DES_CBC ||
+		    crd1->crd_alg == CRYPTO_AES_CBC ||
+		    crd1->crd_alg == CRYPTO_NULL_CBC) {
+			maccrd = NULL;
+			enccrd = crd1;
+			cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
+		} else {
+			safestats.st_badalg++;
+			err = EINVAL;
+			goto errout;
+		}
+	} else {
+		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+		    crd1->crd_alg == CRYPTO_NULL_HMAC) &&
+		    (crd2->crd_alg == CRYPTO_DES_CBC ||
+			crd2->crd_alg == CRYPTO_3DES_CBC ||
+		        crd2->crd_alg == CRYPTO_AES_CBC ||
+		        crd2->crd_alg == CRYPTO_NULL_CBC) &&
+		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+			maccrd = crd1;
+			enccrd = crd2;
+		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+		    crd1->crd_alg == CRYPTO_3DES_CBC ||
+		    crd1->crd_alg == CRYPTO_AES_CBC ||
+		    crd1->crd_alg == CRYPTO_NULL_CBC) &&
+		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+			crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+			crd2->crd_alg == CRYPTO_NULL_HMAC) &&
+		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
+			enccrd = crd1;
+			maccrd = crd2;
+		} else {
+			safestats.st_badalg++;
+			err = EINVAL;
+			goto errout;
+		}
+		cmd0 |= SAFE_SA_CMD0_OP_BOTH;
+	}
+
+	if (enccrd) {
+		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
+			safe_setup_enckey(ses, enccrd->crd_key);
+
+		if (enccrd->crd_alg == CRYPTO_DES_CBC) {
+			cmd0 |= SAFE_SA_CMD0_DES;
+			cmd1 |= SAFE_SA_CMD1_CBC;
+			ivsize = 2*sizeof(u_int32_t);
+		} else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
+			cmd0 |= SAFE_SA_CMD0_3DES;
+			cmd1 |= SAFE_SA_CMD1_CBC;
+			ivsize = 2*sizeof(u_int32_t);
+		} else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
+			cmd0 |= SAFE_SA_CMD0_AES;
+			cmd1 |= SAFE_SA_CMD1_CBC;
+			if (ses->ses_klen == 128)
+			     cmd1 |=  SAFE_SA_CMD1_AES128;
+			else if (ses->ses_klen == 192)
+			     cmd1 |=  SAFE_SA_CMD1_AES192;
+			else
+			     cmd1 |=  SAFE_SA_CMD1_AES256;
+			ivsize = 4*sizeof(u_int32_t);
+		} else {
+			cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
+			ivsize = 0;
+		}
+
+		/*
+		 * Setup encrypt/decrypt state.  When using basic ops
+		 * we can't use an inline IV because hash/crypt offset
+		 * must be from the end of the IV to the start of the
+		 * crypt data and this leaves out the preceding header
+		 * from the hash calculation.  Instead we place the IV
+		 * in the state record and set the hash/crypt offset to
+		 * copy both the header+IV.
+		 */
+		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+			cmd0 |= SAFE_SA_CMD0_OUTBOUND;
+
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				iv = enccrd->crd_iv;
+			else
+				read_random((iv = (caddr_t) &rand_iv[0]), sizeof(rand_iv));
+			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+				    enccrd->crd_inject, ivsize, iv);
+			}
+			bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
+			/* make iv LE */
+			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
+				re->re_sastate.sa_saved_iv[i] =
+					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
+			cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
+			re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
+		} else {
+			cmd0 |= SAFE_SA_CMD0_INBOUND;
+
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+				bcopy(enccrd->crd_iv,
+					re->re_sastate.sa_saved_iv, ivsize);
+			} else {
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+				    enccrd->crd_inject, ivsize,
+				    (caddr_t)re->re_sastate.sa_saved_iv);
+			}
+			/* make iv LE */
+			for (i = 0; i < ivsize/sizeof(re->re_sastate.sa_saved_iv[0]); i++)
+				re->re_sastate.sa_saved_iv[i] =
+					cpu_to_le32(re->re_sastate.sa_saved_iv[i]);
+			cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
+		}
+		/*
+		 * For basic encryption use the zero pad algorithm.
+		 * This pads results to an 8-byte boundary and
+		 * suppresses padding verification for inbound (i.e.
+		 * decrypt) operations.
+		 *
+		 * NB: Not sure if the 8-byte pad boundary is a problem.
+		 */
+		cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
+
+		/* XXX assert key bufs have the same size */
+		bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
+	}
+
+	if (maccrd) {
+		if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			safe_setup_mackey(ses, maccrd->crd_alg,
+			    maccrd->crd_key, maccrd->crd_klen / 8);
+		}
+
+		if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+			cmd0 |= SAFE_SA_CMD0_MD5;
+			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
+		} else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
+			cmd0 |= SAFE_SA_CMD0_SHA1;
+			cmd1 |= SAFE_SA_CMD1_HMAC;	/* NB: enable HMAC */
+		} else {
+			cmd0 |= SAFE_SA_CMD0_HASH_NULL;
+		}
+		/*
+		 * Digest data is loaded from the SA and the hash
+		 * result is saved to the state block where we
+		 * retrieve it for return to the caller.
+		 */
+		/* XXX assert digest bufs have the same size */
+		bcopy(ses->ses_hminner, sa->sa_indigest,
+			sizeof(sa->sa_indigest));
+		bcopy(ses->ses_hmouter, sa->sa_outdigest,
+			sizeof(sa->sa_outdigest));
+
+		cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
+		re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
+	}
+
+	if (enccrd && maccrd) {
+		/*
+		 * The offset from hash data to the start of
+		 * crypt data is the difference in the skips.
+		 */
+		bypass = maccrd->crd_skip;
+		coffset = enccrd->crd_skip - maccrd->crd_skip;
+		if (coffset < 0) {
+			DPRINTF(("%s: hash does not precede crypt; "
+				"mac skip %u enc skip %u\n",
+				__func__, maccrd->crd_skip, enccrd->crd_skip));
+			safestats.st_skipmismatch++;
+			err = EINVAL;
+			goto errout;
+		}
+		oplen = enccrd->crd_skip + enccrd->crd_len;
+		if (maccrd->crd_skip + maccrd->crd_len != oplen) {
+			DPRINTF(("%s: hash amount %u != crypt amount %u\n",
+				__func__, maccrd->crd_skip + maccrd->crd_len,
+				oplen));
+			safestats.st_lenmismatch++;
+			err = EINVAL;
+			goto errout;
+		}
+#ifdef SAFE_DEBUG
+		if (debug) {
+			printf("mac: skip %d, len %d, inject %d\n",
+			    maccrd->crd_skip, maccrd->crd_len,
+			    maccrd->crd_inject);
+			printf("enc: skip %d, len %d, inject %d\n",
+			    enccrd->crd_skip, enccrd->crd_len,
+			    enccrd->crd_inject);
+			printf("bypass %d coffset %d oplen %d\n",
+				bypass, coffset, oplen);
+		}
+#endif
+		if (coffset & 3) {	/* offset must be 32-bit aligned */
+			DPRINTF(("%s: coffset %u misaligned\n",
+				__func__, coffset));
+			safestats.st_coffmisaligned++;
+			err = EINVAL;
+			goto errout;
+		}
+		coffset >>= 2;
+		if (coffset > 255) {	/* offset must be <256 dwords */
+			DPRINTF(("%s: coffset %u too big\n",
+				__func__, coffset));
+			safestats.st_cofftoobig++;
+			err = EINVAL;
+			goto errout;
+		}
+		/*
+		 * Tell the hardware to copy the header to the output.
+		 * The header is defined as the data from the end of
+		 * the bypass to the start of data to be encrypted.
+		 * Typically this is the inline IV.  Note that you need
+		 * to do this even if src+dst are the same; it appears
+		 * that w/o this bit the crypted data is written
+		 * immediately after the bypass data.
+		 */
+		cmd1 |= SAFE_SA_CMD1_HDRCOPY;
+		/*
+		 * Disable IP header mutable bit handling.  This is
+		 * needed to get correct HMAC calculations.
+		 */
+		cmd1 |= SAFE_SA_CMD1_MUTABLE;
+	} else {
+		if (enccrd) {
+			bypass = enccrd->crd_skip;
+			oplen = bypass + enccrd->crd_len;
+		} else {
+			bypass = maccrd->crd_skip;
+			oplen = bypass + maccrd->crd_len;
+		}
+		coffset = 0;
+	}
+	/* XXX verify multiple of 4 when using s/g */
+	if (bypass > 96) {		/* bypass offset must be <= 96 bytes */
+		DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
+		safestats.st_bypasstoobig++;
+		err = EINVAL;
+		goto errout;
+	}
+
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		if (pci_map_skb(sc, &re->re_src, re->re_src_skb)) {
+			safestats.st_noload++;
+			err = ENOMEM;
+			goto errout;
+		}
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		if (pci_map_uio(sc, &re->re_src, re->re_src_io)) {
+			safestats.st_noload++;
+			err = ENOMEM;
+			goto errout;
+		}
+	}
+	nicealign = safe_dmamap_aligned(sc, &re->re_src);
+	uniform = safe_dmamap_uniform(sc, &re->re_src);
+
+	DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
+		nicealign, uniform, re->re_src.nsegs));
+	if (re->re_src.nsegs > 1) {
+		re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
+			((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
+		for (i = 0; i < re->re_src_nsegs; i++) {
+			/* NB: no need to check if there's space */
+			pd = sc->sc_spfree;
+			if (++(sc->sc_spfree) == sc->sc_springtop)
+				sc->sc_spfree = sc->sc_spring;
+
+			KASSERT((pd->pd_flags&3) == 0 ||
+				(pd->pd_flags&3) == SAFE_PD_DONE,
+				("bogus source particle descriptor; flags %x",
+				pd->pd_flags));
+			pd->pd_addr = re->re_src_segs[i].ds_addr;
+			pd->pd_size = re->re_src_segs[i].ds_len;
+			pd->pd_flags = SAFE_PD_READY;
+		}
+		cmd0 |= SAFE_SA_CMD0_IGATHER;
+	} else {
+		/*
+		 * No need for gather, reference the operand directly.
+		 */
+		re->re_desc.d_src = re->re_src_segs[0].ds_addr;
+	}
+
+	if (enccrd == NULL && maccrd != NULL) {
+		/*
+		 * Hash op; no destination needed.
+		 */
+	} else {
+		if (crp->crp_flags & (CRYPTO_F_IOV|CRYPTO_F_SKBUF)) {
+			if (!nicealign) {
+				safestats.st_iovmisaligned++;
+				err = EINVAL;
+				goto errout;
+			}
+			if (uniform != 1) {
+				device_printf(sc->sc_dev, "!uniform source\n");
+				if (!uniform) {
+					/*
+					 * There's no way to handle the DMA
+					 * requirements with this uio.  We
+					 * could create a separate DMA area for
+					 * the result and then copy it back,
+					 * but for now we just bail and return
+					 * an error.  Note that uio requests
+					 * > SAFE_MAX_DSIZE are handled because
+					 * the DMA map and segment list for the
+					 * destination wil result in a
+					 * destination particle list that does
+					 * the necessary scatter DMA.
+					 */
+					safestats.st_iovnotuniform++;
+					err = EINVAL;
+					goto errout;
+				}
+			} else
+				re->re_dst = re->re_src;
+		} else {
+			safestats.st_badflags++;
+			err = EINVAL;
+			goto errout;
+		}
+
+		if (re->re_dst.nsegs > 1) {
+			re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
+			    ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
+			for (i = 0; i < re->re_dst_nsegs; i++) {
+				pd = sc->sc_dpfree;
+				KASSERT((pd->pd_flags&3) == 0 ||
+					(pd->pd_flags&3) == SAFE_PD_DONE,
+					("bogus dest particle descriptor; flags %x",
+						pd->pd_flags));
+				if (++(sc->sc_dpfree) == sc->sc_dpringtop)
+					sc->sc_dpfree = sc->sc_dpring;
+				pd->pd_addr = re->re_dst_segs[i].ds_addr;
+				pd->pd_flags = SAFE_PD_READY;
+			}
+			cmd0 |= SAFE_SA_CMD0_OSCATTER;
+		} else {
+			/*
+			 * No need for scatter, reference the operand directly.
+			 */
+			re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
+		}
+	}
+
+	/*
+	 * All done with setup; fillin the SA command words
+	 * and the packet engine descriptor.  The operation
+	 * is now ready for submission to the hardware.
+	 */
+	sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
+	sa->sa_cmd1 = cmd1
+		    | (coffset << SAFE_SA_CMD1_OFFSET_S)
+		    | SAFE_SA_CMD1_SAREV1	/* Rev 1 SA data structure */
+		    | SAFE_SA_CMD1_SRPCI
+		    ;
+	/*
+	 * NB: the order of writes is important here.  In case the
+	 * chip is scanning the ring because of an outstanding request
+	 * it might nab this one too.  In that case we need to make
+	 * sure the setup is complete before we write the length
+	 * field of the descriptor as it signals the descriptor is
+	 * ready for processing.
+	 */
+	re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
+	if (maccrd)
+		re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
+	wmb();
+	re->re_desc.d_len = oplen
+			  | SAFE_PE_LEN_READY
+			  | (bypass << SAFE_PE_LEN_BYPASS_S)
+			  ;
+
+	safestats.st_ipackets++;
+	safestats.st_ibytes += oplen;
+
+	if (++(sc->sc_front) == sc->sc_ringtop)
+		sc->sc_front = sc->sc_ring;
+
+	/* XXX honor batching */
+	safe_feed(sc, re);
+	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+	return (0);
+
+errout:
+	if (re->re_src.map != re->re_dst.map)
+		pci_unmap_operand(sc, &re->re_dst);
+	if (re->re_src.map)
+		pci_unmap_operand(sc, &re->re_src);
+	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+	if (err != ERESTART) {
+		crp->crp_etype = err;
+		crypto_done(crp);
+	} else {
+		sc->sc_needwakeup |= CRYPTO_SYMQ;
+	}
+	return (err);
+}
+
+static void
+safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
+{
+	struct cryptop *crp = (struct cryptop *)re->re_crp;
+	struct cryptodesc *crd;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	safestats.st_opackets++;
+	safestats.st_obytes += re->re_dst.mapsize;
+
+	if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
+		device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
+			re->re_desc.d_csr,
+			re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
+		safestats.st_peoperr++;
+		crp->crp_etype = EIO;		/* something more meaningful? */
+	}
+
+	if (re->re_dst.map != NULL && re->re_dst.map != re->re_src.map)
+		pci_unmap_operand(sc, &re->re_dst);
+	pci_unmap_operand(sc, &re->re_src);
+
+	/*
+	 * If result was written to a differet mbuf chain, swap
+	 * it in as the return value and reclaim the original.
+	 */
+	if ((crp->crp_flags & CRYPTO_F_SKBUF) && re->re_src_skb != re->re_dst_skb) {
+		device_printf(sc->sc_dev, "no CRYPTO_F_SKBUF swapping support\n");
+		/* kfree_skb(skb) */
+		/* crp->crp_buf = (caddr_t)re->re_dst_skb */
+		return;
+	}
+
+	if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
+		/* copy out ICV result */
+		for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+			if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
+			    crd->crd_alg == CRYPTO_SHA1_HMAC ||
+			    crd->crd_alg == CRYPTO_NULL_HMAC))
+				continue;
+			if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
+				/*
+				 * SHA-1 ICV's are byte-swapped; fix 'em up
+				 * before copy them to their destination.
+				 */
+				re->re_sastate.sa_saved_indigest[0] =
+					cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
+				re->re_sastate.sa_saved_indigest[1] =
+					cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
+				re->re_sastate.sa_saved_indigest[2] =
+					cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
+			} else {
+				re->re_sastate.sa_saved_indigest[0] =
+					cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
+				re->re_sastate.sa_saved_indigest[1] =
+					cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
+				re->re_sastate.sa_saved_indigest[2] =
+					cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
+			}
+			crypto_copyback(crp->crp_flags, crp->crp_buf,
+			    crd->crd_inject,
+			    sc->sc_sessions[re->re_sesn].ses_mlen,
+			    (caddr_t)re->re_sastate.sa_saved_indigest);
+			break;
+		}
+	}
+	crypto_done(crp);
+}
+
+
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+#define	SAFE_RNG_MAXWAIT	1000
+
+static void
+safe_rng_init(struct safe_softc *sc)
+{
+	u_int32_t w, v;
+	int i;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	WRITE_REG(sc, SAFE_RNG_CTRL, 0);
+	/* use default value according to the manual */
+	WRITE_REG(sc, SAFE_RNG_CNFG, 0x834);	/* magic from SafeNet */
+	WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+	/*
+	 * There is a bug in rev 1.0 of the 1140 that when the RNG
+	 * is brought out of reset the ready status flag does not
+	 * work until the RNG has finished its internal initialization.
+	 *
+	 * So in order to determine the device is through its
+	 * initialization we must read the data register, using the
+	 * status reg in the read in case it is initialized.  Then read
+	 * the data register until it changes from the first read.
+	 * Once it changes read the data register until it changes
+	 * again.  At this time the RNG is considered initialized.
+	 * This could take between 750ms - 1000ms in time.
+	 */
+	i = 0;
+	w = READ_REG(sc, SAFE_RNG_OUT);
+	do {
+		v = READ_REG(sc, SAFE_RNG_OUT);
+		if (v != w) {
+			w = v;
+			break;
+		}
+		DELAY(10);
+	} while (++i < SAFE_RNG_MAXWAIT);
+
+	/* Wait Until data changes again */
+	i = 0;
+	do {
+		v = READ_REG(sc, SAFE_RNG_OUT);
+		if (v != w)
+			break;
+		DELAY(10);
+	} while (++i < SAFE_RNG_MAXWAIT);
+}
+
+static __inline void
+safe_rng_disable_short_cycle(struct safe_softc *sc)
+{
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	WRITE_REG(sc, SAFE_RNG_CTRL,
+		READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
+}
+
+static __inline void
+safe_rng_enable_short_cycle(struct safe_softc *sc)
+{
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	WRITE_REG(sc, SAFE_RNG_CTRL,
+		READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
+}
+
+static __inline u_int32_t
+safe_rng_read(struct safe_softc *sc)
+{
+	int i;
+
+	i = 0;
+	while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
+		;
+	return READ_REG(sc, SAFE_RNG_OUT);
+}
+
+static int
+safe_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+	struct safe_softc *sc = (struct safe_softc *) arg;
+	int i, rc;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	safestats.st_rng++;
+	/*
+	 * Fetch the next block of data.
+	 */
+	if (maxwords > safe_rngbufsize)
+		maxwords = safe_rngbufsize;
+	if (maxwords > SAFE_RNG_MAXBUFSIZ)
+		maxwords = SAFE_RNG_MAXBUFSIZ;
+retry:
+	/* read as much as we can */
+	for (rc = 0; rc < maxwords; rc++) {
+		if (READ_REG(sc, SAFE_RNG_STAT) != 0)
+			break;
+		buf[rc] = READ_REG(sc, SAFE_RNG_OUT);
+	}
+	if (rc == 0)
+		return 0;
+	/*
+	 * Check the comparator alarm count and reset the h/w if
+	 * it exceeds our threshold.  This guards against the
+	 * hardware oscillators resonating with external signals.
+	 */
+	if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
+		u_int32_t freq_inc, w;
+
+		DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
+			(unsigned)READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
+		safestats.st_rngalarm++;
+		safe_rng_enable_short_cycle(sc);
+		freq_inc = 18;
+		for (i = 0; i < 64; i++) {
+			w = READ_REG(sc, SAFE_RNG_CNFG);
+			freq_inc = ((w + freq_inc) & 0x3fL);
+			w = ((w & ~0x3fL) | freq_inc);
+			WRITE_REG(sc, SAFE_RNG_CNFG, w);
+
+			WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+			(void) safe_rng_read(sc);
+			DELAY(25);
+
+			if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
+				safe_rng_disable_short_cycle(sc);
+				goto retry;
+			}
+			freq_inc = 1;
+		}
+		safe_rng_disable_short_cycle(sc);
+	} else
+		WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
+
+	return(rc);
+}
+#endif /* defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG) */
+
+
+/*
+ * Resets the board.  Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+safe_reset_board(struct safe_softc *sc)
+{
+	u_int32_t v;
+	/*
+	 * Reset the device.  The manual says no delay
+	 * is needed between marking and clearing reset.
+	 */
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	v = READ_REG(sc, SAFE_PE_DMACFG) &~
+		(SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
+		 SAFE_PE_DMACFG_SGRESET);
+	WRITE_REG(sc, SAFE_PE_DMACFG, v
+				    | SAFE_PE_DMACFG_PERESET
+				    | SAFE_PE_DMACFG_PDRRESET
+				    | SAFE_PE_DMACFG_SGRESET);
+	WRITE_REG(sc, SAFE_PE_DMACFG, v);
+}
+
+/*
+ * Initialize registers we need to touch only once.
+ */
+static void
+safe_init_board(struct safe_softc *sc)
+{
+	u_int32_t v, dwords;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	v = READ_REG(sc, SAFE_PE_DMACFG);
+	v &=~ (   SAFE_PE_DMACFG_PEMODE
+			| SAFE_PE_DMACFG_FSENA		/* failsafe enable */
+			| SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
+			| SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
+			| SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
+			| SAFE_PE_DMACFG_ESPDESC	/* endian-swap part. desc's */
+			| SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
+			| SAFE_PE_DMACFG_ESPACKET	/* swap the packet data */
+		  );
+	v |= SAFE_PE_DMACFG_FSENA		/* failsafe enable */
+	  |  SAFE_PE_DMACFG_GPRPCI		/* gather ring on PCI */
+	  |  SAFE_PE_DMACFG_SPRPCI		/* scatter ring on PCI */
+	  |  SAFE_PE_DMACFG_ESDESC		/* endian-swap descriptors */
+	  |  SAFE_PE_DMACFG_ESPDESC		/* endian-swap part. desc's */
+	  |  SAFE_PE_DMACFG_ESSA		/* endian-swap SA's */
+#if 0
+	  |  SAFE_PE_DMACFG_ESPACKET    /* swap the packet data */
+#endif
+	  ;
+	WRITE_REG(sc, SAFE_PE_DMACFG, v);
+
+#ifdef __BIG_ENDIAN
+	/* tell the safenet that we are 4321 and not 1234 */
+	WRITE_REG(sc, SAFE_ENDIAN, 0xe4e41b1b);
+#endif
+
+	if (sc->sc_chiprev == SAFE_REV(1,0)) {
+		/*
+		 * Avoid large PCI DMA transfers.  Rev 1.0 has a bug where
+		 * "target mode transfers" done while the chip is DMA'ing
+		 * >1020 bytes cause the hardware to lockup.  To avoid this
+		 * we reduce the max PCI transfer size and use small source
+		 * particle descriptors (<= 256 bytes).
+		 */
+		WRITE_REG(sc, SAFE_DMA_CFG, 256);
+		device_printf(sc->sc_dev,
+			"Reduce max DMA size to %u words for rev %u.%u WAR\n",
+			(unsigned) ((READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff),
+			(unsigned) SAFE_REV_MAJ(sc->sc_chiprev),
+			(unsigned) SAFE_REV_MIN(sc->sc_chiprev));
+		sc->sc_max_dsize = 256;
+	} else {
+		sc->sc_max_dsize = SAFE_MAX_DSIZE;
+	}
+
+	/* NB: operands+results are overlaid */
+	WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
+	WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
+	/*
+	 * Configure ring entry size and number of items in the ring.
+	 */
+	KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
+		("PE ring entry not 32-bit aligned!"));
+	dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
+	WRITE_REG(sc, SAFE_PE_RINGCFG,
+		(dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
+	WRITE_REG(sc, SAFE_PE_RINGPOLL, 0);	/* disable polling */
+
+	WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
+	WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
+	WRITE_REG(sc, SAFE_PE_PARTSIZE,
+		(SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
+	/*
+	 * NB: destination particles are fixed size.  We use
+	 *     an mbuf cluster and require all results go to
+	 *     clusters or smaller.
+	 */
+	WRITE_REG(sc, SAFE_PE_PARTCFG, sc->sc_max_dsize);
+
+	/* it's now safe to enable PE mode, do it */
+	WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
+
+	/*
+	 * Configure hardware to use level-triggered interrupts and
+	 * to interrupt after each descriptor is processed.
+	 */
+	WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
+	WRITE_REG(sc, SAFE_HI_CLR, 0xffffffff);
+	WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
+	WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
+}
+
+
+/*
+ * Clean up after a chip crash.
+ * It is assumed that the caller in splimp()
+ */
+static void
+safe_cleanchip(struct safe_softc *sc)
+{
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (sc->sc_nqchip != 0) {
+		struct safe_ringentry *re = sc->sc_back;
+
+		while (re != sc->sc_front) {
+			if (re->re_desc.d_csr != 0)
+				safe_free_entry(sc, re);
+			if (++re == sc->sc_ringtop)
+				re = sc->sc_ring;
+		}
+		sc->sc_back = re;
+		sc->sc_nqchip = 0;
+	}
+}
+
+/*
+ * free a safe_q
+ * It is assumed that the caller is within splimp().
+ */
+static int
+safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
+{
+	struct cryptop *crp;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	/*
+	 * Free header MCR
+	 */
+	if ((re->re_dst_skb != NULL) && (re->re_src_skb != re->re_dst_skb))
+#ifdef NOTYET
+		m_freem(re->re_dst_m);
+#else
+		printk("%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+	crp = (struct cryptop *)re->re_crp;
+
+	re->re_desc.d_csr = 0;
+
+	crp->crp_etype = EFAULT;
+	crypto_done(crp);
+	return(0);
+}
+
+/*
+ * Routine to reset the chip and clean up.
+ * It is assumed that the caller is in splimp()
+ */
+static void
+safe_totalreset(struct safe_softc *sc)
+{
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	safe_reset_board(sc);
+	safe_init_board(sc);
+	safe_cleanchip(sc);
+}
+
+/*
+ * Is the operand suitable aligned for direct DMA.  Each
+ * segment must be aligned on a 32-bit boundary and all
+ * but the last segment must be a multiple of 4 bytes.
+ */
+static int
+safe_dmamap_aligned(struct safe_softc *sc, const struct safe_operand *op)
+{
+	int i;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	for (i = 0; i < op->nsegs; i++) {
+		if (op->segs[i].ds_addr & 3)
+			return (0);
+		if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
+			return (0);
+	}
+	return (1);
+}
+
+/*
+ * Is the operand suitable for direct DMA as the destination
+ * of an operation.  The hardware requires that each ``particle''
+ * but the last in an operation result have the same size.  We
+ * fix that size at SAFE_MAX_DSIZE bytes.  This routine returns
+ * 0 if some segment is not a multiple of of this size, 1 if all
+ * segments are exactly this size, or 2 if segments are at worst
+ * a multple of this size.
+ */
+static int
+safe_dmamap_uniform(struct safe_softc *sc, const struct safe_operand *op)
+{
+	int result = 1;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (op->nsegs > 0) {
+		int i;
+
+		for (i = 0; i < op->nsegs-1; i++) {
+			if (op->segs[i].ds_len % sc->sc_max_dsize)
+				return (0);
+			if (op->segs[i].ds_len != sc->sc_max_dsize)
+				result = 2;
+		}
+	}
+	return (result);
+}
+
+static int
+safe_kprocess(device_t dev, struct cryptkop *krp, int hint)
+{
+	struct safe_softc *sc = device_get_softc(dev);
+	struct safe_pkq *q;
+	unsigned long flags;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (sc == NULL) {
+		krp->krp_status = EINVAL;
+		goto err;
+	}
+
+	if (krp->krp_op != CRK_MOD_EXP) {
+		krp->krp_status = EOPNOTSUPP;
+		goto err;
+	}
+
+	q = (struct safe_pkq *) kmalloc(sizeof(*q), GFP_KERNEL);
+	if (q == NULL) {
+		krp->krp_status = ENOMEM;
+		goto err;
+	}
+	memset(q, 0, sizeof(*q));
+	q->pkq_krp = krp;
+	INIT_LIST_HEAD(&q->pkq_list);
+
+	spin_lock_irqsave(&sc->sc_pkmtx, flags);
+	list_add_tail(&q->pkq_list, &sc->sc_pkq);
+	safe_kfeed(sc);
+	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
+	return (0);
+
+err:
+	crypto_kdone(krp);
+	return (0);
+}
+
+#define	SAFE_CRK_PARAM_BASE	0
+#define	SAFE_CRK_PARAM_EXP	1
+#define	SAFE_CRK_PARAM_MOD	2
+
+static int
+safe_kstart(struct safe_softc *sc)
+{
+	struct cryptkop *krp = sc->sc_pkq_cur->pkq_krp;
+	int exp_bits, mod_bits, base_bits;
+	u_int32_t op, a_off, b_off, c_off, d_off;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (krp->krp_iparams < 3 || krp->krp_oparams != 1) {
+		krp->krp_status = EINVAL;
+		return (1);
+	}
+
+	base_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_BASE]);
+	if (base_bits > 2048)
+		goto too_big;
+	if (base_bits <= 0)		/* 5. base not zero */
+		goto too_small;
+
+	exp_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_EXP]);
+	if (exp_bits > 2048)
+		goto too_big;
+	if (exp_bits <= 0)		/* 1. exponent word length > 0 */
+		goto too_small;		/* 4. exponent not zero */
+
+	mod_bits = safe_ksigbits(sc, &krp->krp_param[SAFE_CRK_PARAM_MOD]);
+	if (mod_bits > 2048)
+		goto too_big;
+	if (mod_bits <= 32)		/* 2. modulus word length > 1 */
+		goto too_small;		/* 8. MSW of modulus != zero */
+	if (mod_bits < exp_bits)	/* 3 modulus len >= exponent len */
+		goto too_small;
+	if ((krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p[0] & 1) == 0)
+		goto bad_domain;	/* 6. modulus is odd */
+	if (mod_bits > krp->krp_param[krp->krp_iparams].crp_nbits)
+		goto too_small;		/* make sure result will fit */
+
+	/* 7. modulus > base */
+	if (mod_bits < base_bits)
+		goto too_small;
+	if (mod_bits == base_bits) {
+		u_int8_t *basep, *modp;
+		int i;
+
+		basep = krp->krp_param[SAFE_CRK_PARAM_BASE].crp_p +
+		    ((base_bits + 7) / 8) - 1;
+		modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
+		    ((mod_bits + 7) / 8) - 1;
+
+		for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
+			if (*modp < *basep)
+				goto too_small;
+			if (*modp > *basep)
+				break;
+		}
+	}
+
+	/* And on the 9th step, he rested. */
+
+	WRITE_REG(sc, SAFE_PK_A_LEN, (exp_bits + 31) / 32);
+	WRITE_REG(sc, SAFE_PK_B_LEN, (mod_bits + 31) / 32);
+	if (mod_bits > 1024) {
+		op = SAFE_PK_FUNC_EXP4;
+		a_off = 0x000;
+		b_off = 0x100;
+		c_off = 0x200;
+		d_off = 0x300;
+	} else {
+		op = SAFE_PK_FUNC_EXP16;
+		a_off = 0x000;
+		b_off = 0x080;
+		c_off = 0x100;
+		d_off = 0x180;
+	}
+	sc->sc_pk_reslen = b_off - a_off;
+	sc->sc_pk_resoff = d_off;
+
+	/* A is exponent, B is modulus, C is base, D is result */
+	safe_kload_reg(sc, a_off, b_off - a_off,
+	    &krp->krp_param[SAFE_CRK_PARAM_EXP]);
+	WRITE_REG(sc, SAFE_PK_A_ADDR, a_off >> 2);
+	safe_kload_reg(sc, b_off, b_off - a_off,
+	    &krp->krp_param[SAFE_CRK_PARAM_MOD]);
+	WRITE_REG(sc, SAFE_PK_B_ADDR, b_off >> 2);
+	safe_kload_reg(sc, c_off, b_off - a_off,
+	    &krp->krp_param[SAFE_CRK_PARAM_BASE]);
+	WRITE_REG(sc, SAFE_PK_C_ADDR, c_off >> 2);
+	WRITE_REG(sc, SAFE_PK_D_ADDR, d_off >> 2);
+
+	WRITE_REG(sc, SAFE_PK_FUNC, op | SAFE_PK_FUNC_RUN);
+
+	return (0);
+
+too_big:
+	krp->krp_status = E2BIG;
+	return (1);
+too_small:
+	krp->krp_status = ERANGE;
+	return (1);
+bad_domain:
+	krp->krp_status = EDOM;
+	return (1);
+}
+
+static int
+safe_ksigbits(struct safe_softc *sc, struct crparam *cr)
+{
+	u_int plen = (cr->crp_nbits + 7) / 8;
+	int i, sig = plen * 8;
+	u_int8_t c, *p = cr->crp_p;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	for (i = plen - 1; i >= 0; i--) {
+		c = p[i];
+		if (c != 0) {
+			while ((c & 0x80) == 0) {
+				sig--;
+				c <<= 1;
+			}
+			break;
+		}
+		sig -= 8;
+	}
+	return (sig);
+}
+
+static void
+safe_kfeed(struct safe_softc *sc)
+{
+	struct safe_pkq *q, *tmp;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (list_empty(&sc->sc_pkq) && sc->sc_pkq_cur == NULL)
+		return;
+	if (sc->sc_pkq_cur != NULL)
+		return;
+	list_for_each_entry_safe(q, tmp, &sc->sc_pkq, pkq_list) {
+		sc->sc_pkq_cur = q;
+		list_del(&q->pkq_list);
+		if (safe_kstart(sc) != 0) {
+			crypto_kdone(q->pkq_krp);
+			kfree(q);
+			sc->sc_pkq_cur = NULL;
+		} else {
+			/* op started, start polling */
+			mod_timer(&sc->sc_pkto, jiffies + 1);
+			break;
+		}
+	}
+}
+
+static void
+safe_kpoll(unsigned long arg)
+{
+	struct safe_softc *sc = NULL;
+	struct safe_pkq *q;
+	struct crparam *res;
+	int i;
+	u_int32_t buf[64];
+	unsigned long flags;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (arg >= SAFE_MAX_CHIPS)
+		return;
+	sc = safe_chip_idx[arg];
+	if (!sc) {
+		DPRINTF(("%s() - bad callback\n", __FUNCTION__));
+		return;
+	}
+
+	spin_lock_irqsave(&sc->sc_pkmtx, flags);
+	if (sc->sc_pkq_cur == NULL)
+		goto out;
+	if (READ_REG(sc, SAFE_PK_FUNC) & SAFE_PK_FUNC_RUN) {
+		/* still running, check back later */
+		mod_timer(&sc->sc_pkto, jiffies + 1);
+		goto out;
+	}
+
+	q = sc->sc_pkq_cur;
+	res = &q->pkq_krp->krp_param[q->pkq_krp->krp_iparams];
+	bzero(buf, sizeof(buf));
+	bzero(res->crp_p, (res->crp_nbits + 7) / 8);
+	for (i = 0; i < sc->sc_pk_reslen >> 2; i++)
+		buf[i] = le32_to_cpu(READ_REG(sc, SAFE_PK_RAM_START +
+		    sc->sc_pk_resoff + (i << 2)));
+	bcopy(buf, res->crp_p, (res->crp_nbits + 7) / 8);
+	/*
+	 * reduce the bits that need copying if possible
+	 */
+	res->crp_nbits = min(res->crp_nbits,sc->sc_pk_reslen * 8);
+	res->crp_nbits = safe_ksigbits(sc, res);
+
+	for (i = SAFE_PK_RAM_START; i < SAFE_PK_RAM_END; i += 4)
+		WRITE_REG(sc, i, 0);
+
+	crypto_kdone(q->pkq_krp);
+	kfree(q);
+	sc->sc_pkq_cur = NULL;
+
+	safe_kfeed(sc);
+out:
+	spin_unlock_irqrestore(&sc->sc_pkmtx, flags);
+}
+
+static void
+safe_kload_reg(struct safe_softc *sc, u_int32_t off, u_int32_t len,
+    struct crparam *n)
+{
+	u_int32_t buf[64], i;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	bzero(buf, sizeof(buf));
+	bcopy(n->crp_p, buf, (n->crp_nbits + 7) / 8);
+
+	for (i = 0; i < len >> 2; i++)
+		WRITE_REG(sc, SAFE_PK_RAM_START + off + (i << 2),
+		    cpu_to_le32(buf[i]));
+}
+
+#ifdef SAFE_DEBUG
+static void
+safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
+{
+	printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
+		, tag
+		, READ_REG(sc, SAFE_DMA_ENDIAN)
+		, READ_REG(sc, SAFE_DMA_SRCADDR)
+		, READ_REG(sc, SAFE_DMA_DSTADDR)
+		, READ_REG(sc, SAFE_DMA_STAT)
+	);
+}
+
+static void
+safe_dump_intrstate(struct safe_softc *sc, const char *tag)
+{
+	printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
+		, tag
+		, READ_REG(sc, SAFE_HI_CFG)
+		, READ_REG(sc, SAFE_HI_MASK)
+		, READ_REG(sc, SAFE_HI_DESC_CNT)
+		, READ_REG(sc, SAFE_HU_STAT)
+		, READ_REG(sc, SAFE_HM_STAT)
+	);
+}
+
+static void
+safe_dump_ringstate(struct safe_softc *sc, const char *tag)
+{
+	u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
+
+	/* NB: assume caller has lock on ring */
+	printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
+		tag,
+		estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
+		(unsigned long)(sc->sc_back - sc->sc_ring),
+		(unsigned long)(sc->sc_front - sc->sc_ring));
+}
+
+static void
+safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
+{
+	int ix, nsegs;
+
+	ix = re - sc->sc_ring;
+	printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
+		, tag
+		, re, ix
+		, re->re_desc.d_csr
+		, re->re_desc.d_src
+		, re->re_desc.d_dst
+		, re->re_desc.d_sa
+		, re->re_desc.d_len
+	);
+	if (re->re_src.nsegs > 1) {
+		ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
+			sizeof(struct safe_pdesc);
+		for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
+			printf(" spd[%u] %p: %p size %u flags %x"
+				, ix, &sc->sc_spring[ix]
+				, (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
+				, sc->sc_spring[ix].pd_size
+				, sc->sc_spring[ix].pd_flags
+			);
+			if (sc->sc_spring[ix].pd_size == 0)
+				printf(" (zero!)");
+			printf("\n");
+			if (++ix == SAFE_TOTAL_SPART)
+				ix = 0;
+		}
+	}
+	if (re->re_dst.nsegs > 1) {
+		ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
+			sizeof(struct safe_pdesc);
+		for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
+			printf(" dpd[%u] %p: %p flags %x\n"
+				, ix, &sc->sc_dpring[ix]
+				, (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
+				, sc->sc_dpring[ix].pd_flags
+			);
+			if (++ix == SAFE_TOTAL_DPART)
+				ix = 0;
+		}
+	}
+	printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
+		re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
+	printf("sa: key %x %x %x %x %x %x %x %x\n"
+		, re->re_sa.sa_key[0]
+		, re->re_sa.sa_key[1]
+		, re->re_sa.sa_key[2]
+		, re->re_sa.sa_key[3]
+		, re->re_sa.sa_key[4]
+		, re->re_sa.sa_key[5]
+		, re->re_sa.sa_key[6]
+		, re->re_sa.sa_key[7]
+	);
+	printf("sa: indigest %x %x %x %x %x\n"
+		, re->re_sa.sa_indigest[0]
+		, re->re_sa.sa_indigest[1]
+		, re->re_sa.sa_indigest[2]
+		, re->re_sa.sa_indigest[3]
+		, re->re_sa.sa_indigest[4]
+	);
+	printf("sa: outdigest %x %x %x %x %x\n"
+		, re->re_sa.sa_outdigest[0]
+		, re->re_sa.sa_outdigest[1]
+		, re->re_sa.sa_outdigest[2]
+		, re->re_sa.sa_outdigest[3]
+		, re->re_sa.sa_outdigest[4]
+	);
+	printf("sr: iv %x %x %x %x\n"
+		, re->re_sastate.sa_saved_iv[0]
+		, re->re_sastate.sa_saved_iv[1]
+		, re->re_sastate.sa_saved_iv[2]
+		, re->re_sastate.sa_saved_iv[3]
+	);
+	printf("sr: hashbc %u indigest %x %x %x %x %x\n"
+		, re->re_sastate.sa_saved_hashbc
+		, re->re_sastate.sa_saved_indigest[0]
+		, re->re_sastate.sa_saved_indigest[1]
+		, re->re_sastate.sa_saved_indigest[2]
+		, re->re_sastate.sa_saved_indigest[3]
+		, re->re_sastate.sa_saved_indigest[4]
+	);
+}
+
+static void
+safe_dump_ring(struct safe_softc *sc, const char *tag)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sc_ringmtx, flags);
+	printf("\nSafeNet Ring State:\n");
+	safe_dump_intrstate(sc, tag);
+	safe_dump_dmastatus(sc, tag);
+	safe_dump_ringstate(sc, tag);
+	if (sc->sc_nqchip) {
+		struct safe_ringentry *re = sc->sc_back;
+		do {
+			safe_dump_request(sc, tag, re);
+			if (++re == sc->sc_ringtop)
+				re = sc->sc_ring;
+		} while (re != sc->sc_front);
+	}
+	spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+}
+#endif /* SAFE_DEBUG */
+
+
+static int safe_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+{
+	struct safe_softc *sc = NULL;
+	u32 mem_start, mem_len, cmd;
+	int i, rc, devinfo;
+	dma_addr_t raddr;
+	static int num_chips = 0;
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	if (pci_enable_device(dev) < 0)
+		return(-ENODEV);
+
+	if (!dev->irq) {
+		printk("safe: found device with no IRQ assigned. check BIOS settings!");
+		pci_disable_device(dev);
+		return(-ENODEV);
+	}
+
+	if (pci_set_mwi(dev)) {
+		printk("safe: pci_set_mwi failed!");
+		return(-ENODEV);
+	}
+
+	sc = (struct safe_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+	if (!sc)
+		return(-ENOMEM);
+	memset(sc, 0, sizeof(*sc));
+
+	softc_device_init(sc, "safe", num_chips, safe_methods);
+
+	sc->sc_irq = -1;
+	sc->sc_cid = -1;
+	sc->sc_pcidev = dev;
+	if (num_chips < SAFE_MAX_CHIPS) {
+		safe_chip_idx[device_get_unit(sc->sc_dev)] = sc;
+		num_chips++;
+	}
+
+	INIT_LIST_HEAD(&sc->sc_pkq);
+	spin_lock_init(&sc->sc_pkmtx);
+
+	pci_set_drvdata(sc->sc_pcidev, sc);
+
+	/* we read its hardware registers as memory */
+	mem_start = pci_resource_start(sc->sc_pcidev, 0);
+	mem_len   = pci_resource_len(sc->sc_pcidev, 0);
+
+	sc->sc_base_addr = (ocf_iomem_t) ioremap(mem_start, mem_len);
+	if (!sc->sc_base_addr) {
+		device_printf(sc->sc_dev, "failed to ioremap 0x%x-0x%x\n",
+				mem_start, mem_start + mem_len - 1);
+		goto out;
+	}
+
+	/* fix up the bus size */
+	if (pci_set_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
+		device_printf(sc->sc_dev, "No usable DMA configuration, aborting.\n");
+		goto out;
+	}
+	if (pci_set_consistent_dma_mask(sc->sc_pcidev, DMA_32BIT_MASK)) {
+		device_printf(sc->sc_dev, "No usable consistent DMA configuration, aborting.\n");
+		goto out;
+	}
+
+	pci_set_master(sc->sc_pcidev);
+
+	pci_read_config_dword(sc->sc_pcidev, PCI_COMMAND, &cmd);
+
+	if (!(cmd & PCI_COMMAND_MEMORY)) {
+		device_printf(sc->sc_dev, "failed to enable memory mapping\n");
+		goto out;
+	}
+
+	if (!(cmd & PCI_COMMAND_MASTER)) {
+		device_printf(sc->sc_dev, "failed to enable bus mastering\n");
+		goto out;
+	}
+
+	rc = request_irq(dev->irq, safe_intr, IRQF_SHARED, "safe", sc);
+	if (rc) {
+		device_printf(sc->sc_dev, "failed to hook irq %d\n", sc->sc_irq);
+		goto out;
+	}
+	sc->sc_irq = dev->irq;
+
+	sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
+			(SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
+
+	/*
+	 * Allocate packet engine descriptors.
+	 */
+	sc->sc_ringalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+			SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+			&sc->sc_ringalloc.dma_paddr);
+	if (!sc->sc_ringalloc.dma_vaddr) {
+		device_printf(sc->sc_dev, "cannot allocate PE descriptor ring\n");
+		goto out;
+	}
+
+	/*
+	 * Hookup the static portion of all our data structures.
+	 */
+	sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
+	sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
+	sc->sc_front = sc->sc_ring;
+	sc->sc_back = sc->sc_ring;
+	raddr = sc->sc_ringalloc.dma_paddr;
+	bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
+	for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
+		struct safe_ringentry *re = &sc->sc_ring[i];
+
+		re->re_desc.d_sa = raddr +
+			offsetof(struct safe_ringentry, re_sa);
+		re->re_sa.sa_staterec = raddr +
+			offsetof(struct safe_ringentry, re_sastate);
+
+		raddr += sizeof (struct safe_ringentry);
+	}
+	spin_lock_init(&sc->sc_ringmtx);
+
+	/*
+	 * Allocate scatter and gather particle descriptors.
+	 */
+	sc->sc_spalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+			SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
+			&sc->sc_spalloc.dma_paddr);
+	if (!sc->sc_spalloc.dma_vaddr) {
+		device_printf(sc->sc_dev, "cannot allocate source particle descriptor ring\n");
+		goto out;
+	}
+	sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
+	sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
+	sc->sc_spfree = sc->sc_spring;
+	bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
+
+	sc->sc_dpalloc.dma_vaddr = pci_alloc_consistent(sc->sc_pcidev,
+			SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+			&sc->sc_dpalloc.dma_paddr);
+	if (!sc->sc_dpalloc.dma_vaddr) {
+		device_printf(sc->sc_dev, "cannot allocate destination particle descriptor ring\n");
+		goto out;
+	}
+	sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
+	sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
+	sc->sc_dpfree = sc->sc_dpring;
+	bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
+
+	sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
+	if (sc->sc_cid < 0) {
+		device_printf(sc->sc_dev, "could not get crypto driver id\n");
+		goto out;
+	}
+
+	printf("%s:", device_get_nameunit(sc->sc_dev));
+
+	devinfo = READ_REG(sc, SAFE_DEVINFO);
+	if (devinfo & SAFE_DEVINFO_RNG) {
+		sc->sc_flags |= SAFE_FLAGS_RNG;
+		printf(" rng");
+	}
+	if (devinfo & SAFE_DEVINFO_PKEY) {
+		printf(" key");
+		sc->sc_flags |= SAFE_FLAGS_KEY;
+		crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
+#if 0
+		crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
+#endif
+		init_timer(&sc->sc_pkto);
+		sc->sc_pkto.function = safe_kpoll;
+		sc->sc_pkto.data = (unsigned long) device_get_unit(sc->sc_dev);
+	}
+	if (devinfo & SAFE_DEVINFO_DES) {
+		printf(" des/3des");
+		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+	}
+	if (devinfo & SAFE_DEVINFO_AES) {
+		printf(" aes");
+		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+	}
+	if (devinfo & SAFE_DEVINFO_MD5) {
+		printf(" md5");
+		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+	}
+	if (devinfo & SAFE_DEVINFO_SHA1) {
+		printf(" sha1");
+		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+	}
+	printf(" null");
+	crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
+	crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
+	/* XXX other supported algorithms */
+	printf("\n");
+
+	safe_reset_board(sc);		/* reset h/w */
+	safe_init_board(sc);		/* init h/w */
+
+#if defined(CONFIG_OCF_RANDOMHARVEST) && !defined(SAFE_NO_RNG)
+	if (sc->sc_flags & SAFE_FLAGS_RNG) {
+		safe_rng_init(sc);
+		crypto_rregister(sc->sc_cid, safe_read_random, sc);
+	}
+#endif /* SAFE_NO_RNG */
+
+	return (0);
+
+out:
+	if (sc->sc_cid >= 0)
+		crypto_unregister_all(sc->sc_cid);
+	if (sc->sc_irq != -1)
+		free_irq(sc->sc_irq, sc);
+	if (sc->sc_ringalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
+	if (sc->sc_spalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
+	if (sc->sc_dpalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
+	kfree(sc);
+	return(-ENODEV);
+}
+
+static void safe_remove(struct pci_dev *dev)
+{
+	struct safe_softc *sc = pci_get_drvdata(dev);
+
+	DPRINTF(("%s()\n", __FUNCTION__));
+
+	/* XXX wait/abort active ops */
+
+	WRITE_REG(sc, SAFE_HI_MASK, 0);		/* disable interrupts */
+
+	del_timer_sync(&sc->sc_pkto);
+
+	crypto_unregister_all(sc->sc_cid);
+
+	safe_cleanchip(sc);
+
+	if (sc->sc_irq != -1)
+		free_irq(sc->sc_irq, sc);
+	if (sc->sc_ringalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
+				sc->sc_ringalloc.dma_vaddr, sc->sc_ringalloc.dma_paddr);
+	if (sc->sc_spalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+				sc->sc_spalloc.dma_vaddr, sc->sc_spalloc.dma_paddr);
+	if (sc->sc_dpalloc.dma_vaddr)
+		pci_free_consistent(sc->sc_pcidev,
+				SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
+				sc->sc_dpalloc.dma_vaddr, sc->sc_dpalloc.dma_paddr);
+	sc->sc_irq = -1;
+	sc->sc_ringalloc.dma_vaddr = NULL;
+	sc->sc_spalloc.dma_vaddr = NULL;
+	sc->sc_dpalloc.dma_vaddr = NULL;
+}
+
+static struct pci_device_id safe_pci_tbl[] = {
+	{ PCI_VENDOR_SAFENET, PCI_PRODUCT_SAFEXCEL,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ },
+};
+MODULE_DEVICE_TABLE(pci, safe_pci_tbl);
+
+static struct pci_driver safe_driver = {
+	.name         = "safe",
+	.id_table     = safe_pci_tbl,
+	.probe        =	safe_probe,
+	.remove       = safe_remove,
+	/* add PM stuff here one day */
+};
+
+static int __init safe_init (void)
+{
+	struct safe_softc *sc = NULL;
+	int rc;
+
+	DPRINTF(("%s(%p)\n", __FUNCTION__, safe_init));
+
+	rc = pci_register_driver(&safe_driver);
+	pci_register_driver_compat(&safe_driver, rc);
+
+	return rc;
+}
+
+static void __exit safe_exit (void)
+{
+	pci_unregister_driver(&safe_driver);
+}
+
+module_init(safe_init);
+module_exit(safe_exit);
+
+MODULE_LICENSE("BSD");
+MODULE_AUTHOR("David McCullough <david_mccullough@mcafee.com>");
+MODULE_DESCRIPTION("OCF driver for safenet PCI crypto devices");
diff --git a/crypto/ocf/safe/safereg.h b/crypto/ocf/safe/safereg.h
new file mode 100644
index 000000000000..d3461f9b0a5e
--- /dev/null
+++ b/crypto/ocf/safe/safereg.h
@@ -0,0 +1,421 @@
+/*-
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/safe/safereg.h,v 1.1 2003/07/21 21:46:07 sam Exp $
+ */
+#ifndef _SAFE_SAFEREG_H_
+#define	_SAFE_SAFEREG_H_
+
+/*
+ * Register definitions for SafeNet SafeXcel-1141 crypto device.
+ * Definitions from revision 1.3 (Nov 6 2002) of the User's Manual.
+ */
+
+#define BS_BAR			0x10	/* DMA base address register */
+#define	BS_TRDY_TIMEOUT		0x40	/* TRDY timeout */
+#define	BS_RETRY_TIMEOUT	0x41	/* DMA retry timeout */
+
+#define	PCI_VENDOR_SAFENET	0x16ae		/* SafeNet, Inc. */
+
+/* SafeNet */
+#define	PCI_PRODUCT_SAFEXCEL	0x1141		/* 1141 */
+
+#define	SAFE_PE_CSR		0x0000	/* Packet Enginge Ctrl/Status */
+#define	SAFE_PE_SRC		0x0004	/* Packet Engine Source */
+#define	SAFE_PE_DST		0x0008	/* Packet Engine Destination */
+#define	SAFE_PE_SA		0x000c	/* Packet Engine SA */
+#define	SAFE_PE_LEN		0x0010	/* Packet Engine Length */
+#define	SAFE_PE_DMACFG		0x0040	/* Packet Engine DMA Configuration */
+#define	SAFE_PE_DMASTAT		0x0044	/* Packet Engine DMA Status */
+#define	SAFE_PE_PDRBASE		0x0048	/* Packet Engine Descriptor Ring Base */
+#define	SAFE_PE_RDRBASE		0x004c	/* Packet Engine Result Ring Base */
+#define	SAFE_PE_RINGCFG		0x0050	/* Packet Engine Ring Configuration */
+#define	SAFE_PE_RINGPOLL	0x0054	/* Packet Engine Ring Poll */
+#define	SAFE_PE_IRNGSTAT	0x0058	/* Packet Engine Internal Ring Status */
+#define	SAFE_PE_ERNGSTAT	0x005c	/* Packet Engine External Ring Status */
+#define	SAFE_PE_IOTHRESH	0x0060	/* Packet Engine I/O Threshold */
+#define	SAFE_PE_GRNGBASE	0x0064	/* Packet Engine Gather Ring Base */
+#define	SAFE_PE_SRNGBASE	0x0068	/* Packet Engine Scatter Ring Base */
+#define	SAFE_PE_PARTSIZE	0x006c	/* Packet Engine Particlar Ring Size */
+#define	SAFE_PE_PARTCFG		0x0070	/* Packet Engine Particle Ring Config */
+#define	SAFE_CRYPTO_CTRL	0x0080	/* Crypto Control */
+#define	SAFE_DEVID		0x0084	/* Device ID */
+#define	SAFE_DEVINFO		0x0088	/* Device Info */
+#define	SAFE_HU_STAT		0x00a0	/* Host Unmasked Status */
+#define	SAFE_HM_STAT		0x00a4	/* Host Masked Status (read-only) */
+#define	SAFE_HI_CLR		0x00a4	/* Host Clear Interrupt (write-only) */
+#define	SAFE_HI_MASK		0x00a8	/* Host Mask Control */
+#define	SAFE_HI_CFG		0x00ac	/* Interrupt Configuration */
+#define	SAFE_HI_RD_DESCR	0x00b4	/* Force Descriptor Read */
+#define	SAFE_HI_DESC_CNT	0x00b8	/* Host Descriptor Done Count */
+#define	SAFE_DMA_ENDIAN		0x00c0	/* Master Endian Status */
+#define	SAFE_DMA_SRCADDR	0x00c4	/* DMA Source Address Status */
+#define	SAFE_DMA_DSTADDR	0x00c8	/* DMA Destination Address Status */
+#define	SAFE_DMA_STAT		0x00cc	/* DMA Current Status */
+#define	SAFE_DMA_CFG		0x00d4	/* DMA Configuration/Status */
+#define	SAFE_ENDIAN		0x00e0	/* Endian Configuration */
+#define	SAFE_PK_A_ADDR		0x0800	/* Public Key A Address */
+#define	SAFE_PK_B_ADDR		0x0804	/* Public Key B Address */
+#define	SAFE_PK_C_ADDR		0x0808	/* Public Key C Address */
+#define	SAFE_PK_D_ADDR		0x080c	/* Public Key D Address */
+#define	SAFE_PK_A_LEN		0x0810	/* Public Key A Length */
+#define	SAFE_PK_B_LEN		0x0814	/* Public Key B Length */
+#define	SAFE_PK_SHIFT		0x0818	/* Public Key Shift */
+#define	SAFE_PK_FUNC		0x081c	/* Public Key Function */
+#define SAFE_PK_RAM_START	0x1000	/* Public Key RAM start address */
+#define SAFE_PK_RAM_END		0x1fff	/* Public Key RAM end address */
+
+#define	SAFE_RNG_OUT		0x0100	/* RNG Output */
+#define	SAFE_RNG_STAT		0x0104	/* RNG Status */
+#define	SAFE_RNG_CTRL		0x0108	/* RNG Control */
+#define	SAFE_RNG_A		0x010c	/* RNG A */
+#define	SAFE_RNG_B		0x0110	/* RNG B */
+#define	SAFE_RNG_X_LO		0x0114	/* RNG X [31:0] */
+#define	SAFE_RNG_X_MID		0x0118	/* RNG X [63:32] */
+#define	SAFE_RNG_X_HI		0x011c	/* RNG X [80:64] */
+#define	SAFE_RNG_X_CNTR		0x0120	/* RNG Counter */
+#define	SAFE_RNG_ALM_CNT	0x0124	/* RNG Alarm Count */
+#define	SAFE_RNG_CNFG		0x0128	/* RNG Configuration */
+#define	SAFE_RNG_LFSR1_LO	0x012c	/* RNG LFSR1 [31:0] */
+#define	SAFE_RNG_LFSR1_HI	0x0130	/* RNG LFSR1 [47:32] */
+#define	SAFE_RNG_LFSR2_LO	0x0134	/* RNG LFSR1 [31:0] */
+#define	SAFE_RNG_LFSR2_HI	0x0138	/* RNG LFSR1 [47:32] */
+
+#define	SAFE_PE_CSR_READY	0x00000001	/* ready for processing */
+#define	SAFE_PE_CSR_DONE	0x00000002	/* h/w completed processing */
+#define	SAFE_PE_CSR_LOADSA	0x00000004	/* load SA digests */
+#define	SAFE_PE_CSR_HASHFINAL	0x00000010	/* do hash pad & write result */
+#define	SAFE_PE_CSR_SABUSID	0x000000c0	/* bus id for SA */
+#define	SAFE_PE_CSR_SAPCI	0x00000040	/* PCI bus id for SA */
+#define	SAFE_PE_CSR_NXTHDR	0x0000ff00	/* next hdr value for IPsec */
+#define	SAFE_PE_CSR_FPAD	0x0000ff00	/* fixed pad for basic ops */
+#define	SAFE_PE_CSR_STATUS	0x00ff0000	/* operation result status */
+#define	SAFE_PE_CSR_AUTH_FAIL	0x00010000	/* ICV mismatch (inbound) */
+#define	SAFE_PE_CSR_PAD_FAIL	0x00020000	/* pad verify fail (inbound) */
+#define	SAFE_PE_CSR_SEQ_FAIL	0x00040000	/* sequence number (inbound) */
+#define	SAFE_PE_CSR_XERROR	0x00080000	/* extended error follows */
+#define	SAFE_PE_CSR_XECODE	0x00f00000	/* extended error code */
+#define	SAFE_PE_CSR_XECODE_S	20
+#define	SAFE_PE_CSR_XECODE_BADCMD	0	/* invalid command */
+#define	SAFE_PE_CSR_XECODE_BADALG	1	/* invalid algorithm */
+#define	SAFE_PE_CSR_XECODE_ALGDIS	2	/* algorithm disabled */
+#define	SAFE_PE_CSR_XECODE_ZEROLEN	3	/* zero packet length */
+#define	SAFE_PE_CSR_XECODE_DMAERR	4	/* bus DMA error */
+#define	SAFE_PE_CSR_XECODE_PIPEABORT	5	/* secondary bus DMA error */
+#define	SAFE_PE_CSR_XECODE_BADSPI	6	/* IPsec SPI mismatch */
+#define	SAFE_PE_CSR_XECODE_TIMEOUT	10	/* failsafe timeout */
+#define	SAFE_PE_CSR_PAD		0xff000000	/* ESP padding control/status */
+#define	SAFE_PE_CSR_PAD_MIN	0x00000000	/* minimum IPsec padding */
+#define	SAFE_PE_CSR_PAD_16	0x08000000	/* pad to 16-byte boundary */
+#define	SAFE_PE_CSR_PAD_32	0x10000000	/* pad to 32-byte boundary */
+#define	SAFE_PE_CSR_PAD_64	0x20000000	/* pad to 64-byte boundary */
+#define	SAFE_PE_CSR_PAD_128	0x40000000	/* pad to 128-byte boundary */
+#define	SAFE_PE_CSR_PAD_256	0x80000000	/* pad to 256-byte boundary */
+
+/*
+ * Check the CSR to see if the PE has returned ownership to
+ * the host.  Note that before processing a descriptor this
+ * must be done followed by a check of the SAFE_PE_LEN register
+ * status bits to avoid premature processing of a descriptor
+ * on its way back to the host.
+ */
+#define	SAFE_PE_CSR_IS_DONE(_csr) \
+    (((_csr) & (SAFE_PE_CSR_READY | SAFE_PE_CSR_DONE)) == SAFE_PE_CSR_DONE)
+
+#define	SAFE_PE_LEN_LENGTH	0x000fffff	/* total length (bytes) */
+#define	SAFE_PE_LEN_READY	0x00400000	/* ready for processing */
+#define	SAFE_PE_LEN_DONE	0x00800000	/* h/w completed processing */
+#define	SAFE_PE_LEN_BYPASS	0xff000000	/* bypass offset (bytes) */
+#define	SAFE_PE_LEN_BYPASS_S	24
+
+#define	SAFE_PE_LEN_IS_DONE(_len) \
+    (((_len) & (SAFE_PE_LEN_READY | SAFE_PE_LEN_DONE)) == SAFE_PE_LEN_DONE)
+
+/* NB: these apply to HU_STAT, HM_STAT, HI_CLR, and HI_MASK */
+#define	SAFE_INT_PE_CDONE	0x00000002	/* PE context done */
+#define	SAFE_INT_PE_DDONE	0x00000008	/* PE descriptor done */
+#define	SAFE_INT_PE_ERROR	0x00000010	/* PE error */
+#define	SAFE_INT_PE_ODONE	0x00000020	/* PE operation done */
+
+#define	SAFE_HI_CFG_PULSE	0x00000001	/* use pulse interrupt */
+#define	SAFE_HI_CFG_LEVEL	0x00000000	/* use level interrupt */
+#define	SAFE_HI_CFG_AUTOCLR	0x00000002	/* auto-clear pulse interrupt */
+
+#define	SAFE_ENDIAN_PASS	0x000000e4	/* straight pass-thru */
+#define	SAFE_ENDIAN_SWAB	0x0000001b	/* swap bytes in 32-bit word */
+
+#define	SAFE_PE_DMACFG_PERESET	0x00000001	/* reset packet engine */
+#define	SAFE_PE_DMACFG_PDRRESET	0x00000002	/* reset PDR counters/ptrs */
+#define	SAFE_PE_DMACFG_SGRESET	0x00000004	/* reset scatter/gather cache */
+#define	SAFE_PE_DMACFG_FSENA	0x00000008	/* enable failsafe reset */
+#define	SAFE_PE_DMACFG_PEMODE	0x00000100	/* packet engine mode */
+#define	SAFE_PE_DMACFG_SAPREC	0x00000200	/* SA precedes packet */
+#define	SAFE_PE_DMACFG_PKFOLL	0x00000400	/* packet follows descriptor */
+#define	SAFE_PE_DMACFG_GPRBID	0x00003000	/* gather particle ring busid */
+#define	SAFE_PE_DMACFG_GPRPCI	0x00001000	/* PCI gather particle ring */
+#define	SAFE_PE_DMACFG_SPRBID	0x0000c000	/* scatter part. ring busid */
+#define	SAFE_PE_DMACFG_SPRPCI	0x00004000	/* PCI scatter part. ring */
+#define	SAFE_PE_DMACFG_ESDESC	0x00010000	/* endian swap descriptors */
+#define	SAFE_PE_DMACFG_ESSA	0x00020000	/* endian swap SA data */
+#define	SAFE_PE_DMACFG_ESPACKET	0x00040000	/* endian swap packet data */
+#define	SAFE_PE_DMACFG_ESPDESC	0x00080000	/* endian swap particle desc. */
+#define	SAFE_PE_DMACFG_NOPDRUP	0x00100000	/* supp. PDR ownership update */
+#define	SAFE_PD_EDMACFG_PCIMODE	0x01000000	/* PCI target mode */
+
+#define	SAFE_PE_DMASTAT_PEIDONE	0x00000001	/* PE core input done */
+#define	SAFE_PE_DMASTAT_PEODONE	0x00000002	/* PE core output done */
+#define	SAFE_PE_DMASTAT_ENCDONE	0x00000004	/* encryption done */
+#define	SAFE_PE_DMASTAT_IHDONE	0x00000008	/* inner hash done */
+#define	SAFE_PE_DMASTAT_OHDONE	0x00000010	/* outer hash (HMAC) done */
+#define	SAFE_PE_DMASTAT_PADFLT	0x00000020	/* crypto pad fault */
+#define	SAFE_PE_DMASTAT_ICVFLT	0x00000040	/* ICV fault */
+#define	SAFE_PE_DMASTAT_SPIMIS	0x00000080	/* SPI mismatch */
+#define	SAFE_PE_DMASTAT_CRYPTO	0x00000100	/* crypto engine timeout */
+#define	SAFE_PE_DMASTAT_CQACT	0x00000200	/* command queue active */
+#define	SAFE_PE_DMASTAT_IRACT	0x00000400	/* input request active */
+#define	SAFE_PE_DMASTAT_ORACT	0x00000800	/* output request active */
+#define	SAFE_PE_DMASTAT_PEISIZE	0x003ff000	/* PE input size:32-bit words */
+#define	SAFE_PE_DMASTAT_PEOSIZE	0xffc00000	/* PE out. size:32-bit words */
+
+#define	SAFE_PE_RINGCFG_SIZE	0x000003ff	/* ring size (descriptors) */
+#define	SAFE_PE_RINGCFG_OFFSET	0xffff0000	/* offset btw desc's (dwords) */
+#define	SAFE_PE_RINGCFG_OFFSET_S	16
+
+#define	SAFE_PE_RINGPOLL_POLL	0x00000fff	/* polling frequency/divisor */
+#define	SAFE_PE_RINGPOLL_RETRY	0x03ff0000	/* polling frequency/divisor */
+#define	SAFE_PE_RINGPOLL_CONT	0x80000000	/* continuously poll */
+
+#define	SAFE_PE_IRNGSTAT_CQAVAIL 0x00000001	/* command queue available */
+
+#define	SAFE_PE_ERNGSTAT_NEXT	0x03ff0000	/* index of next packet desc. */
+#define	SAFE_PE_ERNGSTAT_NEXT_S	16
+
+#define	SAFE_PE_IOTHRESH_INPUT	0x000003ff	/* input threshold (dwords) */
+#define	SAFE_PE_IOTHRESH_OUTPUT	0x03ff0000	/* output threshold (dwords) */
+
+#define	SAFE_PE_PARTCFG_SIZE	0x0000ffff	/* scatter particle size */
+#define	SAFE_PE_PARTCFG_GBURST	0x00030000	/* gather particle burst */
+#define	SAFE_PE_PARTCFG_GBURST_2	0x00000000
+#define	SAFE_PE_PARTCFG_GBURST_4	0x00010000
+#define	SAFE_PE_PARTCFG_GBURST_8	0x00020000
+#define	SAFE_PE_PARTCFG_GBURST_16	0x00030000
+#define	SAFE_PE_PARTCFG_SBURST	0x000c0000	/* scatter particle burst */
+#define	SAFE_PE_PARTCFG_SBURST_2	0x00000000
+#define	SAFE_PE_PARTCFG_SBURST_4	0x00040000
+#define	SAFE_PE_PARTCFG_SBURST_8	0x00080000
+#define	SAFE_PE_PARTCFG_SBURST_16	0x000c0000
+
+#define	SAFE_PE_PARTSIZE_SCAT	0xffff0000	/* scatter particle ring size */
+#define	SAFE_PE_PARTSIZE_GATH	0x0000ffff	/* gather particle ring size */
+
+#define	SAFE_CRYPTO_CTRL_3DES	0x00000001	/* enable 3DES support */
+#define	SAFE_CRYPTO_CTRL_PKEY	0x00010000	/* enable public key support */
+#define	SAFE_CRYPTO_CTRL_RNG	0x00020000	/* enable RNG support */
+
+#define	SAFE_DEVINFO_REV_MIN	0x0000000f	/* minor rev for chip */
+#define	SAFE_DEVINFO_REV_MAJ	0x000000f0	/* major rev for chip */
+#define	SAFE_DEVINFO_REV_MAJ_S	4
+#define	SAFE_DEVINFO_DES	0x00000100	/* DES/3DES support present */
+#define	SAFE_DEVINFO_ARC4	0x00000200	/* ARC4 support present */
+#define	SAFE_DEVINFO_AES	0x00000400	/* AES support present */
+#define	SAFE_DEVINFO_MD5	0x00001000	/* MD5 support present */
+#define	SAFE_DEVINFO_SHA1	0x00002000	/* SHA-1 support present */
+#define	SAFE_DEVINFO_RIPEMD	0x00004000	/* RIPEMD support present */
+#define	SAFE_DEVINFO_DEFLATE	0x00010000	/* Deflate support present */
+#define	SAFE_DEVINFO_SARAM	0x00100000	/* on-chip SA RAM present */
+#define	SAFE_DEVINFO_EMIBUS	0x00200000	/* EMI bus present */
+#define	SAFE_DEVINFO_PKEY	0x00400000	/* public key support present */
+#define	SAFE_DEVINFO_RNG	0x00800000	/* RNG present */
+
+#define	SAFE_REV(_maj, _min)	(((_maj) << SAFE_DEVINFO_REV_MAJ_S) | (_min))
+#define	SAFE_REV_MAJ(_chiprev) \
+	(((_chiprev) & SAFE_DEVINFO_REV_MAJ) >> SAFE_DEVINFO_REV_MAJ_S)
+#define	SAFE_REV_MIN(_chiprev)	((_chiprev) & SAFE_DEVINFO_REV_MIN)
+
+#define	SAFE_PK_FUNC_MULT	0x00000001	/* Multiply function */
+#define	SAFE_PK_FUNC_SQUARE	0x00000004	/* Square function */
+#define	SAFE_PK_FUNC_ADD	0x00000010	/* Add function */
+#define	SAFE_PK_FUNC_SUB	0x00000020	/* Subtract function */
+#define	SAFE_PK_FUNC_LSHIFT	0x00000040	/* Left-shift function */
+#define	SAFE_PK_FUNC_RSHIFT	0x00000080	/* Right-shift function */
+#define	SAFE_PK_FUNC_DIV	0x00000100	/* Divide function */
+#define	SAFE_PK_FUNC_CMP	0x00000400	/* Compare function */
+#define	SAFE_PK_FUNC_COPY	0x00000800	/* Copy function */
+#define	SAFE_PK_FUNC_EXP16	0x00002000	/* Exponentiate (4-bit ACT) */
+#define	SAFE_PK_FUNC_EXP4	0x00004000	/* Exponentiate (2-bit ACT) */
+#define	SAFE_PK_FUNC_RUN	0x00008000	/* start/status */
+
+#define	SAFE_RNG_STAT_BUSY	0x00000001	/* busy, data not valid */
+
+#define	SAFE_RNG_CTRL_PRE_LFSR	0x00000001	/* enable output pre-LFSR */
+#define	SAFE_RNG_CTRL_TST_MODE	0x00000002	/* enable test mode */
+#define	SAFE_RNG_CTRL_TST_RUN	0x00000004	/* start test state machine */
+#define	SAFE_RNG_CTRL_ENA_RING1	0x00000008	/* test entropy oscillator #1 */
+#define	SAFE_RNG_CTRL_ENA_RING2	0x00000010	/* test entropy oscillator #2 */
+#define	SAFE_RNG_CTRL_DIS_ALARM	0x00000020	/* disable RNG alarm reports */
+#define	SAFE_RNG_CTRL_TST_CLOCK	0x00000040	/* enable test clock */
+#define	SAFE_RNG_CTRL_SHORTEN	0x00000080	/* shorten state timers */
+#define	SAFE_RNG_CTRL_TST_ALARM	0x00000100	/* simulate alarm state */
+#define	SAFE_RNG_CTRL_RST_LFSR	0x00000200	/* reset LFSR */
+
+/*
+ * Packet engine descriptor.  Note that d_csr is a copy of the
+ * SAFE_PE_CSR register and all definitions apply, and d_len
+ * is a copy of the SAFE_PE_LEN register and all definitions apply.
+ * d_src and d_len may point directly to contiguous data or to a
+ * list of ``particle descriptors'' when using scatter/gather i/o.
+ */
+struct safe_desc {
+	u_int32_t	d_csr;			/* per-packet control/status */
+	u_int32_t	d_src;			/* source address */
+	u_int32_t	d_dst;			/* destination address */
+	u_int32_t	d_sa;			/* SA address */
+	u_int32_t	d_len;			/* length, bypass, status */
+};
+
+/*
+ * Scatter/Gather particle descriptor.
+ *
+ * NB: scatter descriptors do not specify a size; this is fixed
+ *     by the setting of the SAFE_PE_PARTCFG register.
+ */
+struct safe_pdesc {
+	u_int32_t	pd_addr;		/* particle address */
+#ifdef __BIG_ENDIAN
+	u_int16_t	pd_flags;		/* control word */
+	u_int16_t	pd_size;		/* particle size (bytes) */
+#else
+	u_int16_t	pd_flags;		/* control word */
+	u_int16_t	pd_size;		/* particle size (bytes) */
+#endif
+};
+
+#define	SAFE_PD_READY	0x0001			/* ready for processing */
+#define	SAFE_PD_DONE	0x0002			/* h/w completed processing */
+
+/*
+ * Security Association (SA) Record (Rev 1).  One of these is
+ * required for each operation processed by the packet engine.
+ */
+struct safe_sarec {
+	u_int32_t	sa_cmd0;
+	u_int32_t	sa_cmd1;
+	u_int32_t	sa_resv0;
+	u_int32_t	sa_resv1;
+	u_int32_t	sa_key[8];		/* DES/3DES/AES key */
+	u_int32_t	sa_indigest[5];		/* inner digest */
+	u_int32_t	sa_outdigest[5];	/* outer digest */
+	u_int32_t	sa_spi;			/* SPI */
+	u_int32_t	sa_seqnum;		/* sequence number */
+	u_int32_t	sa_seqmask[2];		/* sequence number mask */
+	u_int32_t	sa_resv2;
+	u_int32_t	sa_staterec;		/* address of state record */
+	u_int32_t	sa_resv3[2];
+	u_int32_t	sa_samgmt0;		/* SA management field 0 */
+	u_int32_t	sa_samgmt1;		/* SA management field 0 */
+};
+
+#define	SAFE_SA_CMD0_OP		0x00000007	/* operation code */
+#define	SAFE_SA_CMD0_OP_CRYPT	0x00000000	/* encrypt/decrypt (basic) */
+#define	SAFE_SA_CMD0_OP_BOTH	0x00000001	/* encrypt-hash/hash-decrypto */
+#define	SAFE_SA_CMD0_OP_HASH	0x00000003	/* hash (outbound-only) */
+#define	SAFE_SA_CMD0_OP_ESP	0x00000000	/* ESP in/out (proto) */
+#define	SAFE_SA_CMD0_OP_AH	0x00000001	/* AH in/out (proto) */
+#define	SAFE_SA_CMD0_INBOUND	0x00000008	/* inbound operation */
+#define	SAFE_SA_CMD0_OUTBOUND	0x00000000	/* outbound operation */
+#define	SAFE_SA_CMD0_GROUP	0x00000030	/* operation group */
+#define	SAFE_SA_CMD0_BASIC	0x00000000	/* basic operation */
+#define	SAFE_SA_CMD0_PROTO	0x00000010	/* protocol/packet operation */
+#define	SAFE_SA_CMD0_BUNDLE	0x00000020	/* bundled operation (resvd) */
+#define	SAFE_SA_CMD0_PAD	0x000000c0	/* crypto pad method */
+#define	SAFE_SA_CMD0_PAD_IPSEC	0x00000000	/* IPsec padding */
+#define	SAFE_SA_CMD0_PAD_PKCS7	0x00000040	/* PKCS#7 padding */
+#define	SAFE_SA_CMD0_PAD_CONS	0x00000080	/* constant padding */
+#define	SAFE_SA_CMD0_PAD_ZERO	0x000000c0	/* zero padding */
+#define	SAFE_SA_CMD0_CRYPT_ALG	0x00000f00	/* symmetric crypto algorithm */
+#define	SAFE_SA_CMD0_DES	0x00000000	/* DES crypto algorithm */
+#define	SAFE_SA_CMD0_3DES	0x00000100	/* 3DES crypto algorithm */
+#define	SAFE_SA_CMD0_AES	0x00000300	/* AES crypto algorithm */
+#define	SAFE_SA_CMD0_CRYPT_NULL	0x00000f00	/* null crypto algorithm */
+#define	SAFE_SA_CMD0_HASH_ALG	0x0000f000	/* hash algorithm */
+#define	SAFE_SA_CMD0_MD5	0x00000000	/* MD5 hash algorithm */
+#define	SAFE_SA_CMD0_SHA1	0x00001000	/* SHA-1 hash algorithm */
+#define	SAFE_SA_CMD0_HASH_NULL	0x0000f000	/* null hash algorithm */
+#define	SAFE_SA_CMD0_HDR_PROC	0x00080000	/* header processing */
+#define	SAFE_SA_CMD0_IBUSID	0x00300000	/* input bus id */
+#define	SAFE_SA_CMD0_IPCI	0x00100000	/* PCI input bus id */
+#define	SAFE_SA_CMD0_OBUSID	0x00c00000	/* output bus id */
+#define	SAFE_SA_CMD0_OPCI	0x00400000	/* PCI output bus id */
+#define	SAFE_SA_CMD0_IVLD	0x03000000	/* IV loading */
+#define	SAFE_SA_CMD0_IVLD_NONE	0x00000000	/* IV no load (reuse) */
+#define	SAFE_SA_CMD0_IVLD_IBUF	0x01000000	/* IV load from input buffer */
+#define	SAFE_SA_CMD0_IVLD_STATE	0x02000000	/* IV load from state */
+#define	SAFE_SA_CMD0_HSLD	0x0c000000	/* hash state loading */
+#define	SAFE_SA_CMD0_HSLD_SA	0x00000000	/* hash state load from SA */
+#define	SAFE_SA_CMD0_HSLD_STATE	0x08000000	/* hash state load from state */
+#define	SAFE_SA_CMD0_HSLD_NONE	0x0c000000	/* hash state no load */
+#define	SAFE_SA_CMD0_SAVEIV	0x10000000	/* save IV */
+#define	SAFE_SA_CMD0_SAVEHASH	0x20000000	/* save hash state */
+#define	SAFE_SA_CMD0_IGATHER	0x40000000	/* input gather */
+#define	SAFE_SA_CMD0_OSCATTER	0x80000000	/* output scatter */
+
+#define	SAFE_SA_CMD1_HDRCOPY	0x00000002	/* copy header to output */
+#define	SAFE_SA_CMD1_PAYCOPY	0x00000004	/* copy payload to output */
+#define	SAFE_SA_CMD1_PADCOPY	0x00000008	/* copy pad to output */
+#define	SAFE_SA_CMD1_IPV4	0x00000000	/* IPv4 protocol */
+#define	SAFE_SA_CMD1_IPV6	0x00000010	/* IPv6 protocol */
+#define	SAFE_SA_CMD1_MUTABLE	0x00000020	/* mutable bit processing */
+#define	SAFE_SA_CMD1_SRBUSID	0x000000c0	/* state record bus id */
+#define	SAFE_SA_CMD1_SRPCI	0x00000040	/* state record from PCI */
+#define	SAFE_SA_CMD1_CRMODE	0x00000300	/* crypto mode */
+#define	SAFE_SA_CMD1_ECB	0x00000000	/* ECB crypto mode */
+#define	SAFE_SA_CMD1_CBC	0x00000100	/* CBC crypto mode */
+#define	SAFE_SA_CMD1_OFB	0x00000200	/* OFB crypto mode */
+#define	SAFE_SA_CMD1_CFB	0x00000300	/* CFB crypto mode */
+#define	SAFE_SA_CMD1_CRFEEDBACK	0x00000c00	/* crypto feedback mode */
+#define	SAFE_SA_CMD1_64BIT	0x00000000	/* 64-bit crypto feedback */
+#define	SAFE_SA_CMD1_8BIT	0x00000400	/* 8-bit crypto feedback */
+#define	SAFE_SA_CMD1_1BIT	0x00000800	/* 1-bit crypto feedback */
+#define	SAFE_SA_CMD1_128BIT	0x00000c00	/* 128-bit crypto feedback */
+#define	SAFE_SA_CMD1_OPTIONS	0x00001000	/* HMAC/options mutable bit */
+#define	SAFE_SA_CMD1_HMAC	SAFE_SA_CMD1_OPTIONS
+#define	SAFE_SA_CMD1_SAREV1	0x00008000	/* SA Revision 1 */
+#define	SAFE_SA_CMD1_OFFSET	0x00ff0000	/* hash/crypto offset(dwords) */
+#define	SAFE_SA_CMD1_OFFSET_S	16
+#define	SAFE_SA_CMD1_AESKEYLEN	0x0f000000	/* AES key length */
+#define	SAFE_SA_CMD1_AES128	0x02000000	/* 128-bit AES key */
+#define	SAFE_SA_CMD1_AES192	0x03000000	/* 192-bit AES key */
+#define	SAFE_SA_CMD1_AES256	0x04000000	/* 256-bit AES key */
+
+/*
+ * Security Associate State Record (Rev 1).
+ */
+struct safe_sastate {
+	u_int32_t	sa_saved_iv[4];		/* saved IV (DES/3DES/AES) */
+	u_int32_t	sa_saved_hashbc;	/* saved hash byte count */
+	u_int32_t	sa_saved_indigest[5];	/* saved inner digest */
+};
+#endif /* _SAFE_SAFEREG_H_ */
diff --git a/crypto/ocf/safe/safevar.h b/crypto/ocf/safe/safevar.h
new file mode 100644
index 000000000000..11d8304aacef
--- /dev/null
+++ b/crypto/ocf/safe/safevar.h
@@ -0,0 +1,229 @@
+/*-
+ * The linux port of this code done by David McCullough
+ * Copyright (C) 2004-2010 David McCullough <david_mccullough@mcafee.com>
+ * The license and original author are listed below.
+ *
+ * Copyright (c) 2003 Sam Leffler, Errno Consulting
+ * Copyright (c) 2003 Global Technology Associates, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD: src/sys/dev/safe/safevar.h,v 1.2 2006/05/17 18:34:26 pjd Exp $
+ */
+#ifndef _SAFE_SAFEVAR_H_
+#define	_SAFE_SAFEVAR_H_
+
+/* Maximum queue length */
+#ifndef SAFE_MAX_NQUEUE
+#define SAFE_MAX_NQUEUE	60
+#endif
+
+#define	SAFE_MAX_PART		64	/* Maximum scatter/gather depth */
+#define	SAFE_DMA_BOUNDARY	0	/* No boundary for source DMA ops */
+#define	SAFE_MAX_DSIZE		2048 /* MCLBYTES Fixed scatter particle size */
+#define	SAFE_MAX_SSIZE		0x0ffff	/* Maximum gather particle size */
+#define	SAFE_MAX_DMA		0xfffff	/* Maximum PE operand size (20 bits) */
+/* total src+dst particle descriptors */
+#define	SAFE_TOTAL_DPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
+#define	SAFE_TOTAL_SPART	(SAFE_MAX_NQUEUE * SAFE_MAX_PART)
+
+#define	SAFE_RNG_MAXBUFSIZ	128	/* 32-bit words */
+
+#define	SAFE_CARD(sid)		(((sid) & 0xf0000000) >> 28)
+#define	SAFE_SESSION(sid)	( (sid) & 0x0fffffff)
+#define	SAFE_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
+
+#define SAFE_DEF_RTY		0xff	/* PCI Retry Timeout */
+#define SAFE_DEF_TOUT		0xff	/* PCI TRDY Timeout */
+#define SAFE_DEF_CACHELINE	0x01	/* Cache Line setting */
+
+#ifdef __KERNEL__
+/*
+ * State associated with the allocation of each chunk
+ * of memory setup for DMA.
+ */
+struct safe_dma_alloc {
+	dma_addr_t		dma_paddr;
+	void			*dma_vaddr;
+};
+
+/*
+ * Cryptographic operand state.  One of these exists for each
+ * source and destination operand passed in from the crypto
+ * subsystem.  When possible source and destination operands
+ * refer to the same memory.  More often they are distinct.
+ * We track the virtual address of each operand as well as
+ * where each is mapped for DMA.
+ */
+struct safe_operand {
+	union {
+		struct sk_buff *skb;
+		struct uio *io;
+	} u;
+	void			*map;
+	int				mapsize;	/* total number of bytes in segs */
+	struct {
+		dma_addr_t	ds_addr;
+		int			ds_len;
+		int			ds_tlen;
+	} segs[SAFE_MAX_PART];
+	int				nsegs;
+};
+
+/*
+ * Packet engine ring entry and cryptographic operation state.
+ * The packet engine requires a ring of descriptors that contain
+ * pointers to various cryptographic state.  However the ring
+ * configuration register allows you to specify an arbitrary size
+ * for ring entries.  We use this feature to collect most of the
+ * state for each cryptographic request into one spot.  Other than
+ * ring entries only the ``particle descriptors'' (scatter/gather
+ * lists) and the actual operand data are kept separate.  The
+ * particle descriptors must also be organized in rings.  The
+ * operand data can be located aribtrarily (modulo alignment constraints).
+ *
+ * Note that the descriptor ring is mapped onto the PCI bus so
+ * the hardware can DMA data.  This means the entire ring must be
+ * contiguous.
+ */
+struct safe_ringentry {
+	struct safe_desc	re_desc;	/* command descriptor */
+	struct safe_sarec	re_sa;		/* SA record */
+	struct safe_sastate	re_sastate;	/* SA state record */
+
+	struct cryptop		*re_crp;	/* crypto operation */
+
+	struct safe_operand	re_src;		/* source operand */
+	struct safe_operand	re_dst;		/* destination operand */
+
+	int			re_sesn;	/* crypto session ID */
+	int			re_flags;
+#define	SAFE_QFLAGS_COPYOUTIV	0x1		/* copy back on completion */
+#define	SAFE_QFLAGS_COPYOUTICV	0x2		/* copy back on completion */
+};
+
+#define	re_src_skb	re_src.u.skb
+#define	re_src_io	re_src.u.io
+#define	re_src_map	re_src.map
+#define	re_src_nsegs	re_src.nsegs
+#define	re_src_segs	re_src.segs
+#define	re_src_mapsize	re_src.mapsize
+
+#define	re_dst_skb	re_dst.u.skb
+#define	re_dst_io	re_dst.u.io
+#define	re_dst_map	re_dst.map
+#define	re_dst_nsegs	re_dst.nsegs
+#define	re_dst_segs	re_dst.segs
+#define	re_dst_mapsize	re_dst.mapsize
+
+struct rndstate_test;
+
+struct safe_session {
+	u_int32_t	ses_used;
+	u_int32_t	ses_klen;		/* key length in bits */
+	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
+	u_int32_t	ses_mlen;		/* hmac length in bytes */
+	u_int32_t	ses_hminner[5];		/* hmac inner state */
+	u_int32_t	ses_hmouter[5];		/* hmac outer state */
+};
+
+struct safe_pkq {
+	struct list_head	pkq_list;
+	struct cryptkop		*pkq_krp;
+};
+
+struct safe_softc {
+	softc_device_decl	sc_dev;
+	u32			sc_irq;
+
+	struct pci_dev		*sc_pcidev;
+	ocf_iomem_t		sc_base_addr;
+
+	u_int			sc_chiprev;	/* major/minor chip revision */
+	int			sc_flags;	/* device specific flags */
+#define	SAFE_FLAGS_KEY		0x01		/* has key accelerator */
+#define	SAFE_FLAGS_RNG		0x02		/* hardware rng */
+	int			sc_suspended;
+	int			sc_needwakeup;	/* notify crypto layer */
+	int32_t			sc_cid;		/* crypto tag */
+
+	struct safe_dma_alloc	sc_ringalloc;	/* PE ring allocation state */
+	struct safe_ringentry	*sc_ring;	/* PE ring */
+	struct safe_ringentry	*sc_ringtop;	/* PE ring top */
+	struct safe_ringentry	*sc_front;	/* next free entry */
+	struct safe_ringentry	*sc_back;	/* next pending entry */
+	int			sc_nqchip;	/* # passed to chip */
+	spinlock_t		sc_ringmtx;	/* PE ring lock */
+	struct safe_pdesc	*sc_spring;	/* src particle ring */
+	struct safe_pdesc	*sc_springtop;	/* src particle ring top */
+	struct safe_pdesc	*sc_spfree;	/* next free src particle */
+	struct safe_dma_alloc	sc_spalloc;	/* src particle ring state */
+	struct safe_pdesc	*sc_dpring;	/* dest particle ring */
+	struct safe_pdesc	*sc_dpringtop;	/* dest particle ring top */
+	struct safe_pdesc	*sc_dpfree;	/* next free dest particle */
+	struct safe_dma_alloc	sc_dpalloc;	/* dst particle ring state */
+	int			sc_nsessions;	/* # of sessions */
+	struct safe_session	*sc_sessions;	/* sessions */
+
+	struct timer_list	sc_pkto;	/* PK polling */
+	spinlock_t		sc_pkmtx;	/* PK lock */
+	struct list_head	sc_pkq;		/* queue of PK requests */
+	struct safe_pkq		*sc_pkq_cur;	/* current processing request */
+	u_int32_t		sc_pk_reslen, sc_pk_resoff;
+
+	int			sc_max_dsize;	/* maximum safe DMA size */
+};
+#endif /* __KERNEL__ */
+
+struct safe_stats {
+	u_int64_t st_ibytes;
+	u_int64_t st_obytes;
+	u_int32_t st_ipackets;
+	u_int32_t st_opackets;
+	u_int32_t st_invalid;		/* invalid argument */
+	u_int32_t st_badsession;	/* invalid session id */
+	u_int32_t st_badflags;		/* flags indicate !(mbuf | uio) */
+	u_int32_t st_nodesc;		/* op submitted w/o descriptors */
+	u_int32_t st_badalg;		/* unsupported algorithm */
+	u_int32_t st_ringfull;		/* PE descriptor ring full */
+	u_int32_t st_peoperr;		/* PE marked error */
+	u_int32_t st_dmaerr;		/* PE DMA error */
+	u_int32_t st_bypasstoobig;	/* bypass > 96 bytes */
+	u_int32_t st_skipmismatch;	/* enc part begins before auth part */
+	u_int32_t st_lenmismatch;	/* enc length different auth length */
+	u_int32_t st_coffmisaligned;	/* crypto offset not 32-bit aligned */
+	u_int32_t st_cofftoobig;	/* crypto offset > 255 words */
+	u_int32_t st_iovmisaligned;	/* iov op not aligned */
+	u_int32_t st_iovnotuniform;	/* iov op not suitable */
+	u_int32_t st_unaligned;		/* unaligned src caused copy */
+	u_int32_t st_notuniform;	/* non-uniform src caused copy */
+	u_int32_t st_nomap;		/* bus_dmamap_create failed */
+	u_int32_t st_noload;		/* bus_dmamap_load_* failed */
+	u_int32_t st_nombuf;		/* MGET* failed */
+	u_int32_t st_nomcl;		/* MCLGET* failed */
+	u_int32_t st_maxqchip;		/* max mcr1 ops out for processing */
+	u_int32_t st_rng;		/* RNG requests */
+	u_int32_t st_rngalarm;		/* RNG alarm requests */
+	u_int32_t st_noicvcopy;		/* ICV data copies suppressed */
+};
+#endif /* _SAFE_SAFEVAR_H_ */
diff --git a/crypto/ocf/safe/sha1.c b/crypto/ocf/safe/sha1.c
new file mode 100644
index 000000000000..4e360e20db30
--- /dev/null
+++ b/crypto/ocf/safe/sha1.c
@@ -0,0 +1,279 @@
+/*	$KAME: sha1.c,v 1.5 2000/11/08 06:13:08 itojun Exp $	*/
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#if 0
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD: src/sys/crypto/sha1.c,v 1.9 2003/06/10 21:36:57 obrien Exp $");
+
+#include <sys/types.h>
+#include <sys/cdefs.h>
+#include <sys/time.h>
+#include <sys/systm.h>
+
+#include <crypto/sha1.h>
+#endif
+
+/* sanity check */
+#if BYTE_ORDER != BIG_ENDIAN
+# if BYTE_ORDER != LITTLE_ENDIAN
+#  define unsupported 1
+# endif
+#endif
+
+#ifndef unsupported
+
+/* constant table */
+static u_int32_t _K[] = { 0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6 };
+#define	K(t)	_K[(t) / 20]
+
+#define	F0(b, c, d)	(((b) & (c)) | ((~(b)) & (d)))
+#define	F1(b, c, d)	(((b) ^ (c)) ^ (d))
+#define	F2(b, c, d)	(((b) & (c)) | ((b) & (d)) | ((c) & (d)))
+#define	F3(b, c, d)	(((b) ^ (c)) ^ (d))
+
+#define	S(n, x)		(((x) << (n)) | ((x) >> (32 - n)))
+
+#undef H
+#define	H(n)	(ctxt->h.b32[(n)])
+#define	COUNT	(ctxt->count)
+#define	BCOUNT	(ctxt->c.b64[0] / 8)
+#define	W(n)	(ctxt->m.b32[(n)])
+
+#define	PUTBYTE(x)	{ \
+	ctxt->m.b8[(COUNT % 64)] = (x);		\
+	COUNT++;				\
+	COUNT %= 64;				\
+	ctxt->c.b64[0] += 8;			\
+	if (COUNT % 64 == 0)			\
+		sha1_step(ctxt);		\
+     }
+
+#define	PUTPAD(x)	{ \
+	ctxt->m.b8[(COUNT % 64)] = (x);		\
+	COUNT++;				\
+	COUNT %= 64;				\
+	if (COUNT % 64 == 0)			\
+		sha1_step(ctxt);		\
+     }
+
+static void sha1_step(struct sha1_ctxt *);
+
+static void
+sha1_step(ctxt)
+	struct sha1_ctxt *ctxt;
+{
+	u_int32_t	a, b, c, d, e;
+	size_t t, s;
+	u_int32_t	tmp;
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+	struct sha1_ctxt tctxt;
+	bcopy(&ctxt->m.b8[0], &tctxt.m.b8[0], 64);
+	ctxt->m.b8[0] = tctxt.m.b8[3]; ctxt->m.b8[1] = tctxt.m.b8[2];
+	ctxt->m.b8[2] = tctxt.m.b8[1]; ctxt->m.b8[3] = tctxt.m.b8[0];
+	ctxt->m.b8[4] = tctxt.m.b8[7]; ctxt->m.b8[5] = tctxt.m.b8[6];
+	ctxt->m.b8[6] = tctxt.m.b8[5]; ctxt->m.b8[7] = tctxt.m.b8[4];
+	ctxt->m.b8[8] = tctxt.m.b8[11]; ctxt->m.b8[9] = tctxt.m.b8[10];
+	ctxt->m.b8[10] = tctxt.m.b8[9]; ctxt->m.b8[11] = tctxt.m.b8[8];
+	ctxt->m.b8[12] = tctxt.m.b8[15]; ctxt->m.b8[13] = tctxt.m.b8[14];
+	ctxt->m.b8[14] = tctxt.m.b8[13]; ctxt->m.b8[15] = tctxt.m.b8[12];
+	ctxt->m.b8[16] = tctxt.m.b8[19]; ctxt->m.b8[17] = tctxt.m.b8[18];
+	ctxt->m.b8[18] = tctxt.m.b8[17]; ctxt->m.b8[19] = tctxt.m.b8[16];
+	ctxt->m.b8[20] = tctxt.m.b8[23]; ctxt->m.b8[21] = tctxt.m.b8[22];
+	ctxt->m.b8[22] = tctxt.m.b8[21]; ctxt->m.b8[23] = tctxt.m.b8[20];
+	ctxt->m.b8[24] = tctxt.m.b8[27]; ctxt->m.b8[25] = tctxt.m.b8[26];
+	ctxt->m.b8[26] = tctxt.m.b8[25]; ctxt->m.b8[27] = tctxt.m.b8[24];
+	ctxt->m.b8[28] = tctxt.m.b8[31]; ctxt->m.b8[29] = tctxt.m.b8[30];
+	ctxt->m.b8[30] = tctxt.m.b8[29]; ctxt->m.b8[31] = tctxt.m.b8[28];
+	ctxt->m.b8[32] = tctxt.m.b8[35]; ctxt->m.b8[33] = tctxt.m.b8[34];
+	ctxt->m.b8[34] = tctxt.m.b8[33]; ctxt->m.b8[35] = tctxt.m.b8[32];
+	ctxt->m.b8[36] = tctxt.m.b8[39]; ctxt->m.b8[37] = tctxt.m.b8[38];
+	ctxt->m.b8[38] = tctxt.m.b8[37]; ctxt->m.b8[39] = tctxt.m.b8[36];
+	ctxt->m.b8[40] = tctxt.m.b8[43]; ctxt->m.b8[41] = tctxt.m.b8[42];
+	ctxt->m.b8[42] = tctxt.m.b8[41]; ctxt->m.b8[43] = tctxt.m.b8[40];
+	ctxt->m.b8[44] = tctxt.m.b8[47]; ctxt->m.b8[45] = tctxt.m.b8[46];
+	ctxt->m.b8[46] = tctxt.m.b8[45]; ctxt->m.b8[47] = tctxt.m.b8[44];
+	ctxt->m.b8[48] = tctxt.m.b8[51]; ctxt->m.b8[49] = tctxt.m.b8[50];
+	ctxt->m.b8[50] = tctxt.m.b8[49]; ctxt->m.b8[51] = tctxt.m.b8[48];
+	ctxt->m.b8[52] = tctxt.m.b8[55]; ctxt->m.b8[53] = tctxt.m.b8[54];
+	ctxt->m.b8[54] = tctxt.m.b8[53]; ctxt->m.b8[55] = tctxt.m.b8[52];
+	ctxt->m.b8[56] = tctxt.m.b8[59]; ctxt->m.b8[57] = tctxt.m.b8[58];
+	ctxt->m.b8[58] = tctxt.m.b8[57]; ctxt->m.b8[59] = tctxt.m.b8[56];
+	ctxt->m.b8[60] = tctxt.m.b8[63]; ctxt->m.b8[61] = tctxt.m.b8[62];
+	ctxt->m.b8[62] = tctxt.m.b8[61]; ctxt->m.b8[63] = tctxt.m.b8[60];
+#endif
+
+	a = H(0); b = H(1); c = H(2); d = H(3); e = H(4);
+
+	for (t = 0; t < 20; t++) {
+		s = t & 0x0f;
+		if (t >= 16) {
+			W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+		}
+		tmp = S(5, a) + F0(b, c, d) + e + W(s) + K(t);
+		e = d; d = c; c = S(30, b); b = a; a = tmp;
+	}
+	for (t = 20; t < 40; t++) {
+		s = t & 0x0f;
+		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+		tmp = S(5, a) + F1(b, c, d) + e + W(s) + K(t);
+		e = d; d = c; c = S(30, b); b = a; a = tmp;
+	}
+	for (t = 40; t < 60; t++) {
+		s = t & 0x0f;
+		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+		tmp = S(5, a) + F2(b, c, d) + e + W(s) + K(t);
+		e = d; d = c; c = S(30, b); b = a; a = tmp;
+	}
+	for (t = 60; t < 80; t++) {
+		s = t & 0x0f;
+		W(s) = S(1, W((s+13) & 0x0f) ^ W((s+8) & 0x0f) ^ W((s+2) & 0x0f) ^ W(s));
+		tmp = S(5, a) + F3(b, c, d) + e + W(s) + K(t);
+		e = d; d = c; c = S(30, b); b = a; a = tmp;
+	}
+
+	H(0) = H(0) + a;
+	H(1) = H(1) + b;
+	H(2) = H(2) + c;
+	H(3) = H(3) + d;
+	H(4) = H(4) + e;
+
+	bzero(&ctxt->m.b8[0], 64);
+}
+
+/*------------------------------------------------------------*/
+
+void
+sha1_init(ctxt)
+	struct sha1_ctxt *ctxt;
+{
+	bzero(ctxt, sizeof(struct sha1_ctxt));
+	H(0) = 0x67452301;
+	H(1) = 0xefcdab89;
+	H(2) = 0x98badcfe;
+	H(3) = 0x10325476;
+	H(4) = 0xc3d2e1f0;
+}
+
+void
+sha1_pad(ctxt)
+	struct sha1_ctxt *ctxt;
+{
+	size_t padlen;		/*pad length in bytes*/
+	size_t padstart;
+
+	PUTPAD(0x80);
+
+	padstart = COUNT % 64;
+	padlen = 64 - padstart;
+	if (padlen < 8) {
+		bzero(&ctxt->m.b8[padstart], padlen);
+		COUNT += padlen;
+		COUNT %= 64;
+		sha1_step(ctxt);
+		padstart = COUNT % 64;	/* should be 0 */
+		padlen = 64 - padstart;	/* should be 64 */
+	}
+	bzero(&ctxt->m.b8[padstart], padlen - 8);
+	COUNT += (padlen - 8);
+	COUNT %= 64;
+#if BYTE_ORDER == BIG_ENDIAN
+	PUTPAD(ctxt->c.b8[0]); PUTPAD(ctxt->c.b8[1]);
+	PUTPAD(ctxt->c.b8[2]); PUTPAD(ctxt->c.b8[3]);
+	PUTPAD(ctxt->c.b8[4]); PUTPAD(ctxt->c.b8[5]);
+	PUTPAD(ctxt->c.b8[6]); PUTPAD(ctxt->c.b8[7]);
+#else
+	PUTPAD(ctxt->c.b8[7]); PUTPAD(ctxt->c.b8[6]);
+	PUTPAD(ctxt->c.b8[5]); PUTPAD(ctxt->c.b8[4]);
+	PUTPAD(ctxt->c.b8[3]); PUTPAD(ctxt->c.b8[2]);
+	PUTPAD(ctxt->c.b8[1]); PUTPAD(ctxt->c.b8[0]);
+#endif
+}
+
+void
+sha1_loop(ctxt, input, len)
+	struct sha1_ctxt *ctxt;
+	const u_int8_t *input;
+	size_t len;
+{
+	size_t gaplen;
+	size_t gapstart;
+	size_t off;
+	size_t copysiz;
+
+	off = 0;
+
+	while (off < len) {
+		gapstart = COUNT % 64;
+		gaplen = 64 - gapstart;
+
+		copysiz = (gaplen < len - off) ? gaplen : len - off;
+		bcopy(&input[off], &ctxt->m.b8[gapstart], copysiz);
+		COUNT += copysiz;
+		COUNT %= 64;
+		ctxt->c.b64[0] += copysiz * 8;
+		if (COUNT % 64 == 0)
+			sha1_step(ctxt);
+		off += copysiz;
+	}
+}
+
+void
+sha1_result(ctxt, digest0)
+	struct sha1_ctxt *ctxt;
+	caddr_t digest0;
+{
+	u_int8_t *digest;
+
+	digest = (u_int8_t *)digest0;
+	sha1_pad(ctxt);
+#if BYTE_ORDER == BIG_ENDIAN
+	bcopy(&ctxt->h.b8[0], digest, 20);
+#else
+	digest[0] = ctxt->h.b8[3]; digest[1] = ctxt->h.b8[2];
+	digest[2] = ctxt->h.b8[1]; digest[3] = ctxt->h.b8[0];
+	digest[4] = ctxt->h.b8[7]; digest[5] = ctxt->h.b8[6];
+	digest[6] = ctxt->h.b8[5]; digest[7] = ctxt->h.b8[4];
+	digest[8] = ctxt->h.b8[11]; digest[9] = ctxt->h.b8[10];
+	digest[10] = ctxt->h.b8[9]; digest[11] = ctxt->h.b8[8];
+	digest[12] = ctxt->h.b8[15]; digest[13] = ctxt->h.b8[14];
+	digest[14] = ctxt->h.b8[13]; digest[15] = ctxt->h.b8[12];
+	digest[16] = ctxt->h.b8[19]; digest[17] = ctxt->h.b8[18];
+	digest[18] = ctxt->h.b8[17]; digest[19] = ctxt->h.b8[16];
+#endif
+}
+
+#endif /*unsupported*/
diff --git a/crypto/ocf/safe/sha1.h b/crypto/ocf/safe/sha1.h
new file mode 100644
index 000000000000..0e19d9071f93
--- /dev/null
+++ b/crypto/ocf/safe/sha1.h
@@ -0,0 +1,72 @@
+/*	$FreeBSD: src/sys/crypto/sha1.h,v 1.8 2002/03/20 05:13:50 alfred Exp $	*/
+/*	$KAME: sha1.h,v 1.5 2000/03/27 04:36:23 sumikawa Exp $	*/
+
+/*
+ * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the project nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+/*
+ * FIPS pub 180-1: Secure Hash Algorithm (SHA-1)
+ * based on: http://csrc.nist.gov/fips/fip180-1.txt
+ * implemented by Jun-ichiro itojun Itoh <itojun@itojun.org>
+ */
+
+#ifndef _NETINET6_SHA1_H_
+#define _NETINET6_SHA1_H_
+
+struct sha1_ctxt {
+	union {
+		u_int8_t	b8[20];
+		u_int32_t	b32[5];
+	} h;
+	union {
+		u_int8_t	b8[8];
+		u_int64_t	b64[1];
+	} c;
+	union {
+		u_int8_t	b8[64];
+		u_int32_t	b32[16];
+	} m;
+	u_int8_t	count;
+};
+
+#ifdef __KERNEL__
+extern void sha1_init(struct sha1_ctxt *);
+extern void sha1_pad(struct sha1_ctxt *);
+extern void sha1_loop(struct sha1_ctxt *, const u_int8_t *, size_t);
+extern void sha1_result(struct sha1_ctxt *, caddr_t);
+
+/* compatibilty with other SHA1 source codes */
+typedef struct sha1_ctxt SHA1_CTX;
+#define SHA1Init(x)		sha1_init((x))
+#define SHA1Update(x, y, z)	sha1_loop((x), (y), (z))
+#define SHA1Final(x, y)		sha1_result((y), (x))
+#endif /* __KERNEL__ */
+
+#define	SHA1_RESULTLEN	(160/8)
+
+#endif /*_NETINET6_SHA1_H_*/
diff --git a/crypto/ocf/talitos/Makefile b/crypto/ocf/talitos/Makefile
new file mode 100644
index 000000000000..b682f0d012df
--- /dev/null
+++ b/crypto/ocf/talitos/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_TALITOS) += talitos.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/talitos/talitos.c b/crypto/ocf/talitos/talitos.c
new file mode 100644
index 000000000000..185d2b6b0853
--- /dev/null
+++ b/crypto/ocf/talitos/talitos.c
@@ -0,0 +1,1355 @@
+/*
+ * crypto/ocf/talitos/talitos.c
+ *
+ * An OCF-Linux module that uses Freescale's SEC to do the crypto.
+ * Based on crypto/ocf/hifn and crypto/ocf/safe OCF drivers
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * This code written by Kim A. B. Phillips <kim.phillips@freescale.com>
+ * some code copied from files with the following:
+ * Copyright (C) 2004-2007 David McCullough <david_mccullough@mcafee.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * ---------------------------------------------------------------------------
+ *
+ * NOTES:
+ *
+ * The Freescale SEC (also known as 'talitos') resides on the
+ * internal bus, and runs asynchronous to the processor core.  It has
+ * a wide gamut of cryptographic acceleration features, including single-
+ * pass IPsec (also known as algorithm chaining).  To properly utilize
+ * all of the SEC's performance enhancing features, further reworking
+ * of higher level code (framework, applications) will be necessary.
+ *
+ * The following table shows which SEC version is present in which devices:
+ *
+ * Devices       SEC version
+ *
+ * 8272, 8248    SEC 1.0
+ * 885, 875      SEC 1.2
+ * 8555E, 8541E  SEC 2.0
+ * 8349E         SEC 2.01
+ * 8548E         SEC 2.1
+ *
+ * The following table shows the features offered by each SEC version:
+ *
+ * 	                       Max.   chan-
+ * version  Bus I/F       Clock  nels  DEU AESU AFEU MDEU PKEU RNG KEU
+ *
+ * SEC 1.0  internal 64b  100MHz   4     1    1    1    1    1   1   0
+ * SEC 1.2  internal 32b   66MHz   1     1    1    0    1    0   0   0
+ * SEC 2.0  internal 64b  166MHz   4     1    1    1    1    1   1   0
+ * SEC 2.01 internal 64b  166MHz   4     1    1    1    1    1   1   0
+ * SEC 2.1  internal 64b  333MHz   4     1    1    1    1    1   1   1
+ *
+ * Each execution unit in the SEC has two modes of execution; channel and
+ * slave/debug.  This driver employs the channel infrastructure in the
+ * device for convenience.  Only the RNG is directly accessed due to the
+ * convenience of its random fifo pool.  The relationship between the
+ * channels and execution units is depicted in the following diagram:
+ *
+ *    -------   ------------
+ * ---| ch0 |---|          |
+ *    -------   |          |
+ *              |          |------+-------+-------+-------+------------
+ *    -------   |          |      |       |       |       |           |
+ * ---| ch1 |---|          |      |       |       |       |           |
+ *    -------   |          |   ------  ------  ------  ------      ------
+ *              |controller|   |DEU |  |AESU|  |MDEU|  |PKEU| ...  |RNG |
+ *    -------   |          |   ------  ------  ------  ------      ------
+ * ---| ch2 |---|          |      |       |       |       |           |
+ *    -------   |          |      |       |       |       |           |
+ *              |          |------+-------+-------+-------+------------
+ *    -------   |          |
+ * ---| ch3 |---|          |
+ *    -------   ------------
+ *
+ * Channel ch0 may drive an aes operation to the aes unit (AESU),
+ * and, at the same time, ch1 may drive a message digest operation
+ * to the mdeu. Each channel has an input descriptor FIFO, and the
+ * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
+ * a buffer overrun error is triggered. The controller is responsible
+ * for fetching the data from descriptor pointers, and passing the
+ * data to the appropriate EUs. The controller also writes the
+ * cryptographic operation's result to memory. The SEC notifies
+ * completion by triggering an interrupt and/or setting the 1st byte
+ * of the hdr field to 0xff.
+ *
+ * TODO:
+ * o support more algorithms
+ * o support more versions of the SEC
+ * o add support for linux 2.4
+ * o scatter-gather (sg) support
+ * o add support for public key ops (PKEU)
+ * o add statistics
+ */
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) && !defined(AUTOCONF_INCLUDED)
+#include <linux/config.h>
+#endif
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/scatterlist.h>
+#include <linux/dma-mapping.h>  /* dma_map_single() */
+#include <linux/moduleparam.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
+#include <linux/platform_device.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+#include <linux/of_platform.h>
+#endif
+
+#include <cryptodev.h>
+#include <uio.h>
+
+#define DRV_NAME "talitos"
+
+#include "talitos_dev.h"
+#include "talitos_soft.h"
+
+#define read_random(p,l) get_random_bytes(p,l)
+
+const char talitos_driver_name[] = "Talitos OCF";
+const char talitos_driver_version[] = "0.2";
+
+static int talitos_newsession(device_t dev, u_int32_t *sidp,
+								struct cryptoini *cri);
+static int talitos_freesession(device_t dev, u_int64_t tid);
+static int talitos_process(device_t dev, struct cryptop *crp, int hint);
+static void dump_talitos_status(struct talitos_softc *sc);
+static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
+								int chsel);
+static void talitos_doneprocessing(struct talitos_softc *sc);
+static void talitos_init_device(struct talitos_softc *sc);
+static void talitos_reset_device_master(struct talitos_softc *sc);
+static void talitos_reset_device(struct talitos_softc *sc);
+static void talitos_errorprocessing(struct talitos_softc *sc);
+#ifdef CONFIG_PPC_MERGE
+static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match);
+static int talitos_remove(struct of_device *ofdev);
+#else
+static int talitos_probe(struct platform_device *pdev);
+static int talitos_remove(struct platform_device *pdev);
+#endif
+#ifdef CONFIG_OCF_RANDOMHARVEST
+static int talitos_read_random(void *arg, u_int32_t *buf, int maxwords);
+static void talitos_rng_init(struct talitos_softc *sc);
+#endif
+
+static device_method_t talitos_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	talitos_newsession),
+	DEVMETHOD(cryptodev_freesession,talitos_freesession),
+	DEVMETHOD(cryptodev_process,	talitos_process),
+};
+
+#define debug talitos_debug
+int talitos_debug = 0;
+module_param(talitos_debug, int, 0644);
+MODULE_PARM_DESC(talitos_debug, "Enable debug");
+
+static inline void talitos_write(volatile unsigned *addr, u32 val)
+{
+        out_be32(addr, val);
+}
+
+static inline u32 talitos_read(volatile unsigned *addr)
+{
+        u32 val;
+        val = in_be32(addr);
+        return val;
+}
+
+static void dump_talitos_status(struct talitos_softc *sc)
+{
+	unsigned int v, v_hi, i, *ptr;
+	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
+	v_hi = talitos_read(sc->sc_base_addr + TALITOS_MCR_HI);
+	printk(KERN_INFO "%s: MCR          0x%08x_%08x\n",
+			device_get_nameunit(sc->sc_cdev), v, v_hi);
+	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
+	v_hi = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
+	printk(KERN_INFO "%s: IMR          0x%08x_%08x\n",
+			device_get_nameunit(sc->sc_cdev), v, v_hi);
+	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
+	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
+	printk(KERN_INFO "%s: ISR          0x%08x_%08x\n",
+			device_get_nameunit(sc->sc_cdev), v, v_hi);
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CDPR);
+		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CDPR_HI);
+		printk(KERN_INFO "%s: CDPR     ch%d 0x%08x_%08x\n",
+				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+	}
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CCPSR);
+		v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CCPSR_HI);
+		printk(KERN_INFO "%s: CCPSR    ch%d 0x%08x_%08x\n",
+				device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+	}
+	ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
+	for (i = 0; i < 16; i++) {
+		v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
+		printk(KERN_INFO "%s: DESCBUF  ch0 0x%08x_%08x (tdp%02d)\n",
+				device_get_nameunit(sc->sc_cdev), v, v_hi, i);
+	}
+	return;
+}
+
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
+/*
+ * pull random numbers off the RNG FIFO, not exceeding amount available
+ */
+static int
+talitos_read_random(void *arg, u_int32_t *buf, int maxwords)
+{
+	struct talitos_softc *sc = (struct talitos_softc *) arg;
+	int rc;
+	u_int32_t v;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/* check for things like FIFO underflow */
+	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
+	if (unlikely(v)) {
+		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
+				device_get_nameunit(sc->sc_cdev), v);
+		return 0;
+	}
+	/*
+	 * OFL is number of available 64-bit words,
+	 * shift and convert to a 32-bit word count
+	 */
+	v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
+	v = (v & TALITOS_RNGSR_HI_OFL) >> (16 - 1);
+	if (maxwords > v)
+		maxwords = v;
+	for (rc = 0; rc < maxwords; rc++) {
+		buf[rc] = talitos_read(sc->sc_base_addr +
+			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+	}
+	if (maxwords & 1) {
+		/*
+		 * RNG will complain with an AE in the RNGISR
+		 * if we don't complete the pairs of 32-bit reads
+		 * to its 64-bit register based FIFO
+		 */
+		v = talitos_read(sc->sc_base_addr +
+			TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+	}
+
+	return rc;
+}
+
+static void
+talitos_rng_init(struct talitos_softc *sc)
+{
+	u_int32_t v;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	/* reset RNG EU */
+	v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
+	v |= TALITOS_RNGRCR_HI_SR;
+	talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
+	while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
+		& TALITOS_RNGSR_HI_RD) == 0)
+			cpu_relax();
+	/*
+	 * we tell the RNG to start filling the RNG FIFO
+	 * by writing the RNGDSR
+	 */
+	v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
+	talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
+	/*
+	 * 64 bits of data will be pushed onto the FIFO every
+	 * 256 SEC cycles until the FIFO is full.  The RNG then
+	 * attempts to keep the FIFO full.
+	 */
+	v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
+	if (v) {
+		printk(KERN_ERR "%s: RNGISR_HI error %08x\n",
+			device_get_nameunit(sc->sc_cdev), v);
+		return;
+	}
+	/*
+	 * n.b. we need to add a FIPS test here - if the RNG is going
+	 * to fail, it's going to fail at reset time
+	 */
+	return;
+}
+#endif /* CONFIG_OCF_RANDOMHARVEST */
+
+/*
+ * Generate a new software session.
+ */
+static int
+talitos_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+	struct cryptoini *c, *encini = NULL, *macini = NULL;
+	struct talitos_softc *sc = device_get_softc(dev);
+	struct talitos_session *ses = NULL;
+	int sesn;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	if (sidp == NULL || cri == NULL || sc == NULL) {
+		DPRINTF("%s,%d - EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+	for (c = cri; c != NULL; c = c->cri_next) {
+		if (c->cri_alg == CRYPTO_MD5 ||
+		    c->cri_alg == CRYPTO_MD5_HMAC ||
+		    c->cri_alg == CRYPTO_SHA1 ||
+		    c->cri_alg == CRYPTO_SHA1_HMAC ||
+		    c->cri_alg == CRYPTO_NULL_HMAC) {
+			if (macini)
+				return EINVAL;
+			macini = c;
+		} else if (c->cri_alg == CRYPTO_DES_CBC ||
+		    c->cri_alg == CRYPTO_3DES_CBC ||
+		    c->cri_alg == CRYPTO_AES_CBC ||
+		    c->cri_alg == CRYPTO_NULL_CBC) {
+			if (encini)
+				return EINVAL;
+			encini = c;
+		} else {
+			DPRINTF("UNKNOWN c->cri_alg %d\n", encini->cri_alg);
+			return EINVAL;
+		}
+	}
+	if (encini == NULL && macini == NULL)
+		return EINVAL;
+	if (encini) {
+		/* validate key length */
+		switch (encini->cri_alg) {
+		case CRYPTO_DES_CBC:
+			if (encini->cri_klen != 64)
+				return EINVAL;
+			break;
+		case CRYPTO_3DES_CBC:
+			if (encini->cri_klen != 192) {
+				return EINVAL;
+			}
+			break;
+		case CRYPTO_AES_CBC:
+			if (encini->cri_klen != 128 &&
+			    encini->cri_klen != 192 &&
+			    encini->cri_klen != 256)
+				return EINVAL;
+			break;
+		default:
+			DPRINTF("UNKNOWN encini->cri_alg %d\n",
+				encini->cri_alg);
+			return EINVAL;
+		}
+	}
+
+	if (sc->sc_sessions == NULL) {
+		ses = sc->sc_sessions = (struct talitos_session *)
+			kmalloc(sizeof(struct talitos_session), SLAB_ATOMIC);
+		if (ses == NULL)
+			return ENOMEM;
+		memset(ses, 0, sizeof(struct talitos_session));
+		sesn = 0;
+		sc->sc_nsessions = 1;
+	} else {
+		for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+			if (sc->sc_sessions[sesn].ses_used == 0) {
+				ses = &sc->sc_sessions[sesn];
+				break;
+			}
+		}
+
+		if (ses == NULL) {
+			/* allocating session */
+			sesn = sc->sc_nsessions;
+			ses = (struct talitos_session *) kmalloc(
+				(sesn + 1) * sizeof(struct talitos_session),
+				SLAB_ATOMIC);
+			if (ses == NULL)
+				return ENOMEM;
+			memset(ses, 0,
+				(sesn + 1) * sizeof(struct talitos_session));
+			memcpy(ses, sc->sc_sessions,
+				sesn * sizeof(struct talitos_session));
+			memset(sc->sc_sessions, 0,
+				sesn * sizeof(struct talitos_session));
+			kfree(sc->sc_sessions);
+			sc->sc_sessions = ses;
+			ses = &sc->sc_sessions[sesn];
+			sc->sc_nsessions++;
+		}
+	}
+
+	ses->ses_used = 1;
+
+	if (encini) {
+		ses->ses_klen = (encini->cri_klen + 7) / 8;
+		memcpy(ses->ses_key, encini->cri_key, ses->ses_klen);
+		if (macini) {
+			/* doing hash on top of cipher */
+			ses->ses_hmac_len = (macini->cri_klen + 7) / 8;
+			memcpy(ses->ses_hmac, macini->cri_key,
+				ses->ses_hmac_len);
+		}
+	} else if (macini) {
+		/* doing hash */
+		ses->ses_klen = (macini->cri_klen + 7) / 8;
+		memcpy(ses->ses_key, macini->cri_key, ses->ses_klen);
+	}
+
+	/* back compat way of determining MSC result len */
+	if (macini) {
+		ses->ses_mlen = macini->cri_mlen;
+		if (ses->ses_mlen == 0) {
+			if (macini->cri_alg == CRYPTO_MD5_HMAC)
+				ses->ses_mlen = MD5_HASH_LEN;
+			else
+				ses->ses_mlen = SHA1_HASH_LEN;
+		}
+	}
+
+	/* really should make up a template td here,
+	 * and only fill things like i/o and direction in process() */
+
+	/* assign session ID */
+	*sidp = TALITOS_SID(sc->sc_num, sesn);
+	return 0;
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+talitos_freesession(device_t dev, u_int64_t tid)
+{
+	struct talitos_softc *sc = device_get_softc(dev);
+	int session, ret;
+	u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
+
+	if (sc == NULL)
+		return EINVAL;
+	session = TALITOS_SESSION(sid);
+	if (session < sc->sc_nsessions) {
+		memset(&sc->sc_sessions[session], 0,
+			sizeof(sc->sc_sessions[session]));
+		ret = 0;
+	} else
+		ret = EINVAL;
+	return ret;
+}
+
+/*
+ * launch device processing - it will come back with done notification
+ * in the form of an interrupt and/or HDR_DONE_BITS in header
+ */
+static int
+talitos_submit(
+	struct talitos_softc *sc,
+	struct talitos_desc *td,
+	int chsel)
+{
+	u_int32_t v;
+
+	v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
+	talitos_write(sc->sc_base_addr +
+		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
+	talitos_write(sc->sc_base_addr +
+		chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
+	return 0;
+}
+
+static int
+talitos_process(device_t dev, struct cryptop *crp, int hint)
+{
+	int i, err = 0, ivsize;
+	struct talitos_softc *sc = device_get_softc(dev);
+	struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+	caddr_t iv;
+	struct talitos_session *ses;
+	struct talitos_desc *td;
+	unsigned long flags;
+	/* descriptor mappings */
+	int hmac_key, hmac_data, cipher_iv, cipher_key,
+		in_fifo, out_fifo, cipher_iv_out;
+	static int chsel = -1;
+	u_int32_t rand_iv[4];
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
+		return EINVAL;
+	}
+	crp->crp_etype = 0;
+	if (TALITOS_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
+		return EINVAL;
+	}
+
+	ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
+
+        /* enter the channel scheduler */
+	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+	/* reuse channel that already had/has requests for the required EU */
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		if (sc->sc_chnlastalg[i] == crp->crp_desc->crd_alg)
+			break;
+	}
+	if (i == sc->sc_num_channels) {
+		/*
+		 * haven't seen this algo the last sc_num_channels or more
+		 * use round robin in this case
+		 * nb: sc->sc_num_channels must be power of 2
+		 */
+		chsel = (chsel + 1) & (sc->sc_num_channels - 1);
+	} else {
+		/*
+		 * matches channel with same target execution unit;
+		 * use same channel in this case
+		 */
+		chsel = i;
+	}
+	sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
+
+        /* release the channel scheduler lock */
+	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+	/* acquire the selected channel fifo lock */
+	spin_lock_irqsave(&sc->sc_chnfifolock[chsel], flags);
+
+	/* find and reserve next available descriptor-cryptop pair */
+	for (i = 0; i < sc->sc_chfifo_len; i++) {
+		if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
+			/*
+			 * ensure correct descriptor formation by
+			 * avoiding inadvertently setting "optional" entries
+			 * e.g. not using "optional" dptr2 for MD/HMAC descs
+			 */
+			memset(&sc->sc_chnfifo[chsel][i].cf_desc,
+				0, sizeof(*td));
+			/* reserve it with done notification request bit */
+			sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
+				TALITOS_DONE_NOTIFY;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&sc->sc_chnfifolock[chsel], flags);
+
+	if (i == sc->sc_chfifo_len) {
+		/* fifo full */
+		err = ERESTART;
+		goto errout;
+	}
+
+	td = &sc->sc_chnfifo[chsel][i].cf_desc;
+	sc->sc_chnfifo[chsel][i].cf_crp = crp;
+
+	crd1 = crp->crp_desc;
+	if (crd1 == NULL) {
+		err = EINVAL;
+		goto errout;
+	}
+	crd2 = crd1->crd_next;
+	/* prevent compiler warning */
+	hmac_key = 0;
+	hmac_data = 0;
+	if (crd2 == NULL) {
+		td->hdr |= TD_TYPE_COMMON_NONSNOOP_NO_AFEU;
+		/* assign descriptor dword ptr mappings for this desc. type */
+		cipher_iv = 1;
+		cipher_key = 2;
+		in_fifo = 3;
+		cipher_iv_out = 5;
+		if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+		    crd1->crd_alg == CRYPTO_SHA1 ||
+		    crd1->crd_alg == CRYPTO_MD5) {
+			out_fifo = 5;
+			maccrd = crd1;
+			enccrd = NULL;
+		} else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+		    crd1->crd_alg == CRYPTO_3DES_CBC ||
+		    crd1->crd_alg == CRYPTO_AES_CBC ||
+		    crd1->crd_alg == CRYPTO_ARC4) {
+			out_fifo = 4;
+			maccrd = NULL;
+			enccrd = crd1;
+		} else {
+			DPRINTF("UNKNOWN crd1->crd_alg %d\n", crd1->crd_alg);
+			err = EINVAL;
+			goto errout;
+		}
+	} else {
+		if (sc->sc_desc_types & TALITOS_HAS_DT_IPSEC_ESP) {
+			td->hdr |= TD_TYPE_IPSEC_ESP;
+		} else {
+			DPRINTF("unimplemented: multiple descriptor ipsec\n");
+			err = EINVAL;
+			goto errout;
+		}
+		/* assign descriptor dword ptr mappings for this desc. type */
+		hmac_key = 0;
+		hmac_data = 1;
+		cipher_iv = 2;
+		cipher_key = 3;
+		in_fifo = 4;
+		out_fifo = 5;
+		cipher_iv_out = 6;
+		if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+                     crd1->crd_alg == CRYPTO_SHA1_HMAC ||
+                     crd1->crd_alg == CRYPTO_MD5 ||
+                     crd1->crd_alg == CRYPTO_SHA1) &&
+		    (crd2->crd_alg == CRYPTO_DES_CBC ||
+		     crd2->crd_alg == CRYPTO_3DES_CBC ||
+		     crd2->crd_alg == CRYPTO_AES_CBC ||
+		     crd2->crd_alg == CRYPTO_ARC4) &&
+		    ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+			maccrd = crd1;
+			enccrd = crd2;
+		} else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+		     crd1->crd_alg == CRYPTO_ARC4 ||
+		     crd1->crd_alg == CRYPTO_3DES_CBC ||
+		     crd1->crd_alg == CRYPTO_AES_CBC) &&
+		    (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+                     crd2->crd_alg == CRYPTO_SHA1_HMAC ||
+                     crd2->crd_alg == CRYPTO_MD5 ||
+                     crd2->crd_alg == CRYPTO_SHA1) &&
+		    (crd1->crd_flags & CRD_F_ENCRYPT)) {
+			enccrd = crd1;
+			maccrd = crd2;
+		} else {
+			/* We cannot order the SEC as requested */
+			printk("%s: cannot do the order\n",
+					device_get_nameunit(sc->sc_cdev));
+			err = EINVAL;
+			goto errout;
+		}
+	}
+	/* assign in_fifo and out_fifo based on input/output struct type */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+		/* using SKB buffers */
+		struct sk_buff *skb = (struct sk_buff *)crp->crp_buf;
+		if (skb_shinfo(skb)->nr_frags) {
+			printk("%s: skb frags unimplemented\n",
+					device_get_nameunit(sc->sc_cdev));
+			err = EINVAL;
+			goto errout;
+		}
+		td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
+			skb->len, DMA_TO_DEVICE);
+		td->ptr[in_fifo].len = skb->len;
+		td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
+			skb->len, DMA_TO_DEVICE);
+		td->ptr[out_fifo].len = skb->len;
+		td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
+			skb->len, DMA_TO_DEVICE);
+	} else if (crp->crp_flags & CRYPTO_F_IOV) {
+		/* using IOV buffers */
+		struct uio *uiop = (struct uio *)crp->crp_buf;
+		if (uiop->uio_iovcnt > 1) {
+			printk("%s: iov frags unimplemented\n",
+					device_get_nameunit(sc->sc_cdev));
+			err = EINVAL;
+			goto errout;
+		}
+		td->ptr[in_fifo].ptr = dma_map_single(NULL,
+			uiop->uio_iov->iov_base, crp->crp_ilen, DMA_TO_DEVICE);
+		td->ptr[in_fifo].len = crp->crp_ilen;
+		/* crp_olen is never set; always use crp_ilen */
+		td->ptr[out_fifo].ptr = dma_map_single(NULL,
+			uiop->uio_iov->iov_base,
+			crp->crp_ilen, DMA_TO_DEVICE);
+		td->ptr[out_fifo].len = crp->crp_ilen;
+	} else {
+		/* using contig buffers */
+		td->ptr[in_fifo].ptr = dma_map_single(NULL,
+			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
+		td->ptr[in_fifo].len = crp->crp_ilen;
+		td->ptr[out_fifo].ptr = dma_map_single(NULL,
+			crp->crp_buf, crp->crp_ilen, DMA_TO_DEVICE);
+		td->ptr[out_fifo].len = crp->crp_ilen;
+	}
+	if (enccrd) {
+		switch (enccrd->crd_alg) {
+		case CRYPTO_3DES_CBC:
+			td->hdr |= TALITOS_MODE0_DEU_3DES;
+			/* FALLTHROUGH */
+		case CRYPTO_DES_CBC:
+			td->hdr |= TALITOS_SEL0_DEU
+				|  TALITOS_MODE0_DEU_CBC;
+			if (enccrd->crd_flags & CRD_F_ENCRYPT)
+				td->hdr |= TALITOS_MODE0_DEU_ENC;
+			ivsize = 2*sizeof(u_int32_t);
+			DPRINTF("%cDES ses %d ch %d len %d\n",
+				(td->hdr & TALITOS_MODE0_DEU_3DES)?'3':'1',
+				(u32)TALITOS_SESSION(crp->crp_sid),
+				chsel, td->ptr[in_fifo].len);
+			break;
+		case CRYPTO_AES_CBC:
+			td->hdr |= TALITOS_SEL0_AESU
+				|  TALITOS_MODE0_AESU_CBC;
+			if (enccrd->crd_flags & CRD_F_ENCRYPT)
+				td->hdr |= TALITOS_MODE0_AESU_ENC;
+			ivsize = 4*sizeof(u_int32_t);
+			DPRINTF("AES  ses %d ch %d len %d\n",
+				(u32)TALITOS_SESSION(crp->crp_sid),
+				chsel, td->ptr[in_fifo].len);
+			break;
+		default:
+			printk("%s: unimplemented enccrd->crd_alg %d\n",
+					device_get_nameunit(sc->sc_cdev), enccrd->crd_alg);
+			err = EINVAL;
+			goto errout;
+		}
+		/*
+		 * Setup encrypt/decrypt state.  When using basic ops
+		 * we can't use an inline IV because hash/crypt offset
+		 * must be from the end of the IV to the start of the
+		 * crypt data and this leaves out the preceding header
+		 * from the hash calculation.  Instead we place the IV
+		 * in the state record and set the hash/crypt offset to
+		 * copy both the header+IV.
+		 */
+		if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+			td->hdr |= TALITOS_DIR_OUTBOUND;
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+				iv = enccrd->crd_iv;
+			else
+				read_random((iv = (caddr_t) rand_iv), sizeof(rand_iv));
+			if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+				crypto_copyback(crp->crp_flags, crp->crp_buf,
+				    enccrd->crd_inject, ivsize, iv);
+			}
+		} else {
+			td->hdr |= TALITOS_DIR_INBOUND;
+			if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+				iv = enccrd->crd_iv;
+			} else {
+				iv = (caddr_t) rand_iv;
+				crypto_copydata(crp->crp_flags, crp->crp_buf,
+				    enccrd->crd_inject, ivsize, iv);
+			}
+		}
+		td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
+			DMA_TO_DEVICE);
+		td->ptr[cipher_iv].len = ivsize;
+		/*
+		 * we don't need the cipher iv out length/pointer
+		 * field to do ESP IPsec. Therefore we set the len field as 0,
+		 * which tells the SEC not to do anything with this len/ptr
+		 * field. Previously, when length/pointer as pointing to iv,
+		 * it gave us corruption of packets.
+		 */
+		td->ptr[cipher_iv_out].len = 0;
+	}
+	if (enccrd && maccrd) {
+		/* this is ipsec only for now */
+		td->hdr |= TALITOS_SEL1_MDEU
+			|  TALITOS_MODE1_MDEU_INIT
+			|  TALITOS_MODE1_MDEU_PAD;
+		switch (maccrd->crd_alg) {
+			case	CRYPTO_MD5:
+				td->hdr |= TALITOS_MODE1_MDEU_MD5;
+				break;
+			case	CRYPTO_MD5_HMAC:
+				td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
+				break;
+			case	CRYPTO_SHA1:
+				td->hdr |= TALITOS_MODE1_MDEU_SHA1;
+				break;
+			case	CRYPTO_SHA1_HMAC:
+				td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
+				break;
+			default:
+				/* We cannot order the SEC as requested */
+				printk("%s: cannot do the order\n",
+						device_get_nameunit(sc->sc_cdev));
+				err = EINVAL;
+				goto errout;
+		}
+		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
+		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
+			/*
+			 * The offset from hash data to the start of
+			 * crypt data is the difference in the skips.
+			 */
+			/* ipsec only for now */
+			td->ptr[hmac_key].ptr = dma_map_single(NULL,
+				ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
+			td->ptr[hmac_key].len = ses->ses_hmac_len;
+			td->ptr[in_fifo].ptr  += enccrd->crd_skip;
+			td->ptr[in_fifo].len  =  enccrd->crd_len;
+			td->ptr[out_fifo].ptr += enccrd->crd_skip;
+			td->ptr[out_fifo].len =  enccrd->crd_len;
+			/* bytes of HMAC to postpend to ciphertext */
+			td->ptr[out_fifo].extent =  ses->ses_mlen;
+			td->ptr[hmac_data].ptr += maccrd->crd_skip;
+			td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
+		}
+		if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
+			printk("%s: CRD_F_KEY_EXPLICIT unimplemented\n",
+					device_get_nameunit(sc->sc_cdev));
+		}
+	}
+	if (!enccrd && maccrd) {
+		/* single MD5 or SHA */
+		td->hdr |= TALITOS_SEL0_MDEU
+				|  TALITOS_MODE0_MDEU_INIT
+				|  TALITOS_MODE0_MDEU_PAD;
+		switch (maccrd->crd_alg) {
+			case	CRYPTO_MD5:
+				td->hdr |= TALITOS_MODE0_MDEU_MD5;
+				DPRINTF("MD5  ses %d ch %d len %d\n",
+					(u32)TALITOS_SESSION(crp->crp_sid),
+					chsel, td->ptr[in_fifo].len);
+				break;
+			case	CRYPTO_MD5_HMAC:
+				td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
+				break;
+			case	CRYPTO_SHA1:
+				td->hdr |= TALITOS_MODE0_MDEU_SHA1;
+				DPRINTF("SHA1 ses %d ch %d len %d\n",
+					(u32)TALITOS_SESSION(crp->crp_sid),
+					chsel, td->ptr[in_fifo].len);
+				break;
+			case	CRYPTO_SHA1_HMAC:
+				td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
+				break;
+			default:
+				/* We cannot order the SEC as requested */
+				DPRINTF("cannot do the order\n");
+				err = EINVAL;
+				goto errout;
+		}
+
+		if (crp->crp_flags & CRYPTO_F_IOV)
+			td->ptr[out_fifo].ptr += maccrd->crd_inject;
+
+		if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
+		   (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
+			td->ptr[hmac_key].ptr = dma_map_single(NULL,
+				ses->ses_hmac, ses->ses_hmac_len,
+				DMA_TO_DEVICE);
+			td->ptr[hmac_key].len = ses->ses_hmac_len;
+		}
+	}
+	else {
+		/* using process key (session data has duplicate) */
+		td->ptr[cipher_key].ptr = dma_map_single(NULL,
+			enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
+			DMA_TO_DEVICE);
+		td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
+	}
+	/* descriptor complete - GO! */
+	return talitos_submit(sc, td, chsel);
+
+errout:
+	if (err != ERESTART) {
+		crp->crp_etype = err;
+		crypto_done(crp);
+	}
+	return err;
+}
+
+/* go through all channels descriptors, notifying OCF what has
+ * _and_hasn't_ successfully completed and reset the device
+ * (otherwise it's up to decoding desc hdrs!)
+ */
+static void talitos_errorprocessing(struct talitos_softc *sc)
+{
+	unsigned long flags;
+	int i, j;
+
+	/* disable further scheduling until under control */
+	spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+	if (debug) dump_talitos_status(sc);
+	/* go through descriptors, try and salvage those successfully done,
+	 * and EIO those that weren't
+	 */
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+		for (j = 0; j < sc->sc_chfifo_len; j++) {
+			if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
+				if ((sc->sc_chnfifo[i][j].cf_desc.hdr
+					& TALITOS_HDR_DONE_BITS)
+					!= TALITOS_HDR_DONE_BITS) {
+					/* this one didn't finish */
+					/* signify in crp->etype */
+					sc->sc_chnfifo[i][j].cf_crp->crp_etype
+						= EIO;
+				}
+			} else
+				continue; /* free entry */
+			/* either way, notify ocf */
+			crypto_done(sc->sc_chnfifo[i][j].cf_crp);
+			/* and tag it available again
+			 *
+			 * memset to ensure correct descriptor formation by
+			 * avoiding inadvertently setting "optional" entries
+			 * e.g. not using "optional" dptr2 MD/HMAC processing
+			 */
+			memset(&sc->sc_chnfifo[i][j].cf_desc,
+				0, sizeof(struct talitos_desc));
+		}
+		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
+	}
+	/* reset and initialize the SEC h/w device */
+	talitos_reset_device(sc);
+	talitos_init_device(sc);
+#ifdef CONFIG_OCF_RANDOMHARVEST
+	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)
+		talitos_rng_init(sc);
+#endif
+
+	/* Okay. Stand by. */
+	spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+	return;
+}
+
+/* go through all channels descriptors, notifying OCF what's been done */
+static void talitos_doneprocessing(struct talitos_softc *sc)
+{
+	unsigned long flags;
+	int i, j;
+
+	/* go through descriptors looking for done bits */
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+		for (j = 0; j < sc->sc_chfifo_len; j++) {
+			/* descriptor has done bits set? */
+			if ((sc->sc_chnfifo[i][j].cf_desc.hdr
+				& TALITOS_HDR_DONE_BITS)
+				== TALITOS_HDR_DONE_BITS) {
+				/* notify ocf */
+				crypto_done(sc->sc_chnfifo[i][j].cf_crp);
+				/* and tag it available again
+				 *
+				 * memset to ensure correct descriptor formation by
+				 * avoiding inadvertently setting "optional" entries
+				 * e.g. not using "optional" dptr2 MD/HMAC processing
+				 */
+				memset(&sc->sc_chnfifo[i][j].cf_desc,
+					0, sizeof(struct talitos_desc));
+			}
+		}
+		spin_unlock_irqrestore(&sc->sc_chnfifolock[i], flags);
+	}
+	return;
+}
+
+static irqreturn_t
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
+talitos_intr(int irq, void *arg)
+#else
+talitos_intr(int irq, void *arg, struct pt_regs *regs)
+#endif
+{
+	struct talitos_softc *sc = arg;
+	u_int32_t v, v_hi;
+
+	/* ack */
+	v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
+	v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
+	talitos_write(sc->sc_base_addr + TALITOS_ICR, v);
+	talitos_write(sc->sc_base_addr + TALITOS_ICR_HI, v_hi);
+
+	if (unlikely(v & TALITOS_ISR_ERROR)) {
+		/* Okay, Houston, we've had a problem here. */
+		printk(KERN_DEBUG "%s: got error interrupt - ISR 0x%08x_%08x\n",
+				device_get_nameunit(sc->sc_cdev), v, v_hi);
+		talitos_errorprocessing(sc);
+	} else
+	if (likely(v & TALITOS_ISR_DONE)) {
+		talitos_doneprocessing(sc);
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+ * Initialize registers we need to touch only once.
+ */
+static void
+talitos_init_device(struct talitos_softc *sc)
+{
+	u_int32_t v;
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/* init all channels */
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		v = talitos_read(sc->sc_base_addr +
+			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
+		v |= TALITOS_CH_CCCR_HI_CDWE
+		  |  TALITOS_CH_CCCR_HI_CDIE;  /* invoke interrupt if done */
+		talitos_write(sc->sc_base_addr +
+			i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
+	}
+	/* enable all interrupts */
+	v = talitos_read(sc->sc_base_addr + TALITOS_IMR);
+	v |= TALITOS_IMR_ALL;
+	talitos_write(sc->sc_base_addr + TALITOS_IMR, v);
+	v = talitos_read(sc->sc_base_addr + TALITOS_IMR_HI);
+	v |= TALITOS_IMR_HI_ERRONLY;
+	talitos_write(sc->sc_base_addr + TALITOS_IMR_HI, v);
+	return;
+}
+
+/*
+ * set the master reset bit on the device.
+ */
+static void
+talitos_reset_device_master(struct talitos_softc *sc)
+{
+	u_int32_t v;
+
+	/* Reset the device by writing 1 to MCR:SWR and waiting 'til cleared */
+	v = talitos_read(sc->sc_base_addr + TALITOS_MCR);
+	talitos_write(sc->sc_base_addr + TALITOS_MCR, v | TALITOS_MCR_SWR);
+
+	while (talitos_read(sc->sc_base_addr + TALITOS_MCR) & TALITOS_MCR_SWR)
+		cpu_relax();
+
+	return;
+}
+
+/*
+ * Resets the device.  Values in the registers are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+static void
+talitos_reset_device(struct talitos_softc *sc)
+{
+	u_int32_t v;
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	/*
+	 * Master reset
+	 * errata documentation: warning: certain SEC interrupts
+	 * are not fully cleared by writing the MCR:SWR bit,
+	 * set bit twice to completely reset
+	 */
+	talitos_reset_device_master(sc);	/* once */
+	talitos_reset_device_master(sc);	/* and once again */
+
+	/* reset all channels */
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CCCR);
+		talitos_write(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+			TALITOS_CH_CCCR, v | TALITOS_CH_CCCR_RESET);
+	}
+}
+
+/* Set up the crypto device structure, private data,
+ * and anything else we need before we start */
+#ifdef CONFIG_PPC_MERGE
+static int talitos_probe(struct of_device *ofdev, const struct of_device_id *match)
+#else
+static int talitos_probe(struct platform_device *pdev)
+#endif
+{
+	struct talitos_softc *sc = NULL;
+	struct resource *r;
+#ifdef CONFIG_PPC_MERGE
+	struct device *device = &ofdev->dev;
+	struct device_node *np = ofdev->node;
+	const unsigned int *prop;
+	int err;
+	struct resource res;
+#endif
+	static int num_chips = 0;
+	int rc;
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+
+	sc = (struct talitos_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+	if (!sc)
+		return -ENOMEM;
+	memset(sc, 0, sizeof(*sc));
+
+	softc_device_init(sc, DRV_NAME, num_chips, talitos_methods);
+
+	sc->sc_irq = -1;
+	sc->sc_cid = -1;
+#ifndef CONFIG_PPC_MERGE
+	sc->sc_dev = pdev;
+#endif
+	sc->sc_num = num_chips++;
+
+#ifdef CONFIG_PPC_MERGE
+	dev_set_drvdata(device, sc);
+#else
+	platform_set_drvdata(sc->sc_dev, sc);
+#endif
+
+	/* get the irq line */
+#ifdef CONFIG_PPC_MERGE
+	err = of_address_to_resource(np, 0, &res);
+	if (err)
+		return -EINVAL;
+	r = &res;
+
+	sc->sc_irq = irq_of_parse_and_map(np, 0);
+#else
+	/* get a pointer to the register memory */
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	sc->sc_irq = platform_get_irq(pdev, 0);
+#endif
+	rc = request_irq(sc->sc_irq, talitos_intr, 0,
+			device_get_nameunit(sc->sc_cdev), sc);
+	if (rc) {
+		printk(KERN_ERR "%s: failed to hook irq %d\n",
+				device_get_nameunit(sc->sc_cdev), sc->sc_irq);
+		sc->sc_irq = -1;
+		goto out;
+	}
+
+	sc->sc_base_addr = (ocf_iomem_t) ioremap(r->start, (r->end - r->start));
+	if (!sc->sc_base_addr) {
+		printk(KERN_ERR "%s: failed to ioremap\n",
+				device_get_nameunit(sc->sc_cdev));
+		goto out;
+	}
+
+	/* figure out our SEC's properties and capabilities */
+	sc->sc_chiprev = (u64)talitos_read(sc->sc_base_addr + TALITOS_ID) << 32
+		 | talitos_read(sc->sc_base_addr + TALITOS_ID_HI);
+	DPRINTF("sec id 0x%llx\n", sc->sc_chiprev);
+
+#ifdef CONFIG_PPC_MERGE
+	/* get SEC properties from device tree, defaulting to SEC 2.0 */
+
+	prop = of_get_property(np, "num-channels", NULL);
+	sc->sc_num_channels = prop ? *prop : TALITOS_NCHANNELS_SEC_2_0;
+
+	prop = of_get_property(np, "channel-fifo-len", NULL);
+	sc->sc_chfifo_len = prop ? *prop : TALITOS_CHFIFOLEN_SEC_2_0;
+
+	prop = of_get_property(np, "exec-units-mask", NULL);
+	sc->sc_exec_units = prop ? *prop : TALITOS_HAS_EUS_SEC_2_0;
+
+	prop = of_get_property(np, "descriptor-types-mask", NULL);
+	sc->sc_desc_types = prop ? *prop : TALITOS_HAS_DESCTYPES_SEC_2_0;
+#else
+	/* bulk should go away with openfirmware flat device tree support */
+	if (sc->sc_chiprev & TALITOS_ID_SEC_2_0) {
+		sc->sc_num_channels = TALITOS_NCHANNELS_SEC_2_0;
+		sc->sc_chfifo_len = TALITOS_CHFIFOLEN_SEC_2_0;
+		sc->sc_exec_units = TALITOS_HAS_EUS_SEC_2_0;
+		sc->sc_desc_types = TALITOS_HAS_DESCTYPES_SEC_2_0;
+	} else {
+		printk(KERN_ERR "%s: failed to id device\n",
+				device_get_nameunit(sc->sc_cdev));
+		goto out;
+	}
+#endif
+
+	/* + 1 is for the meta-channel lock used by the channel scheduler */
+	sc->sc_chnfifolock = (spinlock_t *) kmalloc(
+		(sc->sc_num_channels + 1) * sizeof(spinlock_t), GFP_KERNEL);
+	if (!sc->sc_chnfifolock)
+		goto out;
+	for (i = 0; i < sc->sc_num_channels + 1; i++) {
+		spin_lock_init(&sc->sc_chnfifolock[i]);
+	}
+
+	sc->sc_chnlastalg = (int *) kmalloc(
+		sc->sc_num_channels * sizeof(int), GFP_KERNEL);
+	if (!sc->sc_chnlastalg)
+		goto out;
+	memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
+
+	sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
+		sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
+		GFP_KERNEL);
+	if (!sc->sc_chnfifo)
+		goto out;
+	for (i = 0; i < sc->sc_num_channels; i++) {
+		sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
+			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
+			GFP_KERNEL);
+		if (!sc->sc_chnfifo[i])
+			goto out;
+		memset(sc->sc_chnfifo[i], 0,
+			sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
+	}
+
+	/* reset and initialize the SEC h/w device */
+	talitos_reset_device(sc);
+	talitos_init_device(sc);
+
+	sc->sc_cid = crypto_get_driverid(softc_get_device(sc),CRYPTOCAP_F_HARDWARE);
+	if (sc->sc_cid < 0) {
+		printk(KERN_ERR "%s: could not get crypto driver id\n",
+				device_get_nameunit(sc->sc_cdev));
+		goto out;
+	}
+
+	/* register algorithms with the framework */
+	printk("%s:", device_get_nameunit(sc->sc_cdev));
+
+	if (sc->sc_exec_units & TALITOS_HAS_EU_RNG)  {
+		printk(" rng");
+#ifdef CONFIG_OCF_RANDOMHARVEST
+		talitos_rng_init(sc);
+		crypto_rregister(sc->sc_cid, talitos_read_random, sc);
+#endif
+	}
+	if (sc->sc_exec_units & TALITOS_HAS_EU_DEU) {
+		printk(" des/3des");
+		crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+		crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+	}
+	if (sc->sc_exec_units & TALITOS_HAS_EU_AESU) {
+		printk(" aes");
+		crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+	}
+	if (sc->sc_exec_units & TALITOS_HAS_EU_MDEU) {
+		printk(" md5");
+		crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
+		/* HMAC support only with IPsec for now */
+		crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+		printk(" sha1");
+		crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
+		/* HMAC support only with IPsec for now */
+		crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+	}
+	printk("\n");
+	return 0;
+
+out:
+#ifndef CONFIG_PPC_MERGE
+	talitos_remove(pdev);
+#endif
+	return -ENOMEM;
+}
+
+#ifdef CONFIG_PPC_MERGE
+static int talitos_remove(struct of_device *ofdev)
+#else
+static int talitos_remove(struct platform_device *pdev)
+#endif
+{
+#ifdef CONFIG_PPC_MERGE
+	struct talitos_softc *sc = dev_get_drvdata(&ofdev->dev);
+#else
+	struct talitos_softc *sc = platform_get_drvdata(pdev);
+#endif
+	int i;
+
+	DPRINTF("%s()\n", __FUNCTION__);
+	if (sc->sc_cid >= 0)
+		crypto_unregister_all(sc->sc_cid);
+	if (sc->sc_chnfifo) {
+		for (i = 0; i < sc->sc_num_channels; i++)
+			if (sc->sc_chnfifo[i])
+				kfree(sc->sc_chnfifo[i]);
+		kfree(sc->sc_chnfifo);
+	}
+	if (sc->sc_chnlastalg)
+		kfree(sc->sc_chnlastalg);
+	if (sc->sc_chnfifolock)
+		kfree(sc->sc_chnfifolock);
+	if (sc->sc_irq != -1)
+		free_irq(sc->sc_irq, sc);
+	if (sc->sc_base_addr)
+		iounmap((void *) sc->sc_base_addr);
+	kfree(sc);
+	return 0;
+}
+
+#ifdef CONFIG_PPC_MERGE
+static struct of_device_id talitos_match[] = {
+	{
+		.type = "crypto",
+		.compatible = "talitos",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, talitos_match);
+
+static struct of_platform_driver talitos_driver = {
+	.name		= DRV_NAME,
+	.match_table	= talitos_match,
+	.probe		= talitos_probe,
+	.remove		= talitos_remove,
+};
+
+static int __init talitos_init(void)
+{
+	return of_register_platform_driver(&talitos_driver);
+}
+
+static void __exit talitos_exit(void)
+{
+	of_unregister_platform_driver(&talitos_driver);
+}
+#else
+/* Structure for a platform device driver */
+static struct platform_driver talitos_driver = {
+	.probe = talitos_probe,
+	.remove = talitos_remove,
+	.driver = {
+		.name = "fsl-sec2",
+	}
+};
+
+static int __init talitos_init(void)
+{
+	return platform_driver_register(&talitos_driver);
+}
+
+static void __exit talitos_exit(void)
+{
+	platform_driver_unregister(&talitos_driver);
+}
+#endif
+
+module_init(talitos_init);
+module_exit(talitos_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("kim.phillips@freescale.com");
+MODULE_DESCRIPTION("OCF driver for Freescale SEC (talitos)");
diff --git a/crypto/ocf/talitos/talitos_dev.h b/crypto/ocf/talitos/talitos_dev.h
new file mode 100644
index 000000000000..86bb57c6fee6
--- /dev/null
+++ b/crypto/ocf/talitos/talitos_dev.h
@@ -0,0 +1,277 @@
+/*
+ * Freescale SEC (talitos) device dependent data structures
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* device ID register values */
+#define TALITOS_ID_SEC_2_0	0x40
+#define TALITOS_ID_SEC_2_1	0x40 /* cross ref with IP block revision reg */
+
+/*
+ * following num_channels, channel-fifo-depth, exec-unit-mask, and
+ * descriptor-types-mask are for forward-compatibility with openfirmware
+ * flat device trees
+ */
+
+/*
+ *  num_channels : the number of channels available in each SEC version.
+ */
+
+/* n.b. this driver requires these values be a power of 2 */
+#define TALITOS_NCHANNELS_SEC_1_0	4
+#define TALITOS_NCHANNELS_SEC_1_2	1
+#define TALITOS_NCHANNELS_SEC_2_0	4
+#define TALITOS_NCHANNELS_SEC_2_01	4
+#define TALITOS_NCHANNELS_SEC_2_1	4
+#define TALITOS_NCHANNELS_SEC_2_4	4
+
+/*
+ *  channel-fifo-depth : The number of descriptor
+ *  pointers a channel fetch fifo can hold.
+ */
+#define TALITOS_CHFIFOLEN_SEC_1_0	1
+#define TALITOS_CHFIFOLEN_SEC_1_2	1
+#define TALITOS_CHFIFOLEN_SEC_2_0	24
+#define TALITOS_CHFIFOLEN_SEC_2_01	24
+#define TALITOS_CHFIFOLEN_SEC_2_1	24
+#define TALITOS_CHFIFOLEN_SEC_2_4	24
+
+/*
+ *  exec-unit-mask : The bitmask representing what Execution Units (EUs)
+ *  are available. EU information should be encoded following the SEC's
+ *  EU_SEL0 bitfield documentation, i.e. as follows:
+ *
+ *    bit 31 = set if SEC permits no-EU selection (should be always set)
+ *    bit 30 = set if SEC has the ARC4 EU (AFEU)
+ *    bit 29 = set if SEC has the des/3des EU (DEU)
+ *    bit 28 = set if SEC has the message digest EU (MDEU)
+ *    bit 27 = set if SEC has the random number generator EU (RNG)
+ *    bit 26 = set if SEC has the public key EU (PKEU)
+ *    bit 25 = set if SEC has the aes EU (AESU)
+ *    bit 24 = set if SEC has the Kasumi EU (KEU)
+ *
+ */
+#define TALITOS_HAS_EU_NONE		(1<<0)
+#define TALITOS_HAS_EU_AFEU		(1<<1)
+#define TALITOS_HAS_EU_DEU		(1<<2)
+#define TALITOS_HAS_EU_MDEU		(1<<3)
+#define TALITOS_HAS_EU_RNG		(1<<4)
+#define TALITOS_HAS_EU_PKEU		(1<<5)
+#define TALITOS_HAS_EU_AESU		(1<<6)
+#define TALITOS_HAS_EU_KEU		(1<<7)
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_EUS_SEC_1_0		0x7f
+#define TALITOS_HAS_EUS_SEC_1_2		0x4d
+#define TALITOS_HAS_EUS_SEC_2_0		0x7f
+#define TALITOS_HAS_EUS_SEC_2_01	0x7f
+#define TALITOS_HAS_EUS_SEC_2_1		0xff
+#define TALITOS_HAS_EUS_SEC_2_4		0x7f
+
+/*
+ *  descriptor-types-mask : The bitmask representing what descriptors
+ *  are available. Descriptor type information should be encoded
+ *  following the SEC's Descriptor Header Dword DESC_TYPE field
+ *  documentation, i.e. as follows:
+ *
+ *    bit 0  = set if SEC supports the aesu_ctr_nonsnoop desc. type
+ *    bit 1  = set if SEC supports the ipsec_esp descriptor type
+ *    bit 2  = set if SEC supports the common_nonsnoop desc. type
+ *    bit 3  = set if SEC supports the 802.11i AES ccmp desc. type
+ *    bit 4  = set if SEC supports the hmac_snoop_no_afeu desc. type
+ *    bit 5  = set if SEC supports the srtp descriptor type
+ *    bit 6  = set if SEC supports the non_hmac_snoop_no_afeu desc.type
+ *    bit 7  = set if SEC supports the pkeu_assemble descriptor type
+ *    bit 8  = set if SEC supports the aesu_key_expand_output desc.type
+ *    bit 9  = set if SEC supports the pkeu_ptmul descriptor type
+ *    bit 10 = set if SEC supports the common_nonsnoop_afeu desc. type
+ *    bit 11 = set if SEC supports the pkeu_ptadd_dbl descriptor type
+ *
+ *  ..and so on and so forth.
+ */
+#define TALITOS_HAS_DT_AESU_CTR_NONSNOOP	(1<<0)
+#define TALITOS_HAS_DT_IPSEC_ESP		(1<<1)
+#define TALITOS_HAS_DT_COMMON_NONSNOOP		(1<<2)
+
+/* the corresponding masks for each SEC version */
+#define TALITOS_HAS_DESCTYPES_SEC_2_0	0x01010ebf
+#define TALITOS_HAS_DESCTYPES_SEC_2_1	0x012b0ebf
+
+/*
+ * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
+ */
+
+/* global register offset addresses */
+#define TALITOS_ID		0x1020
+#define TALITOS_ID_HI		0x1024
+#define TALITOS_MCR		0x1030		/* master control register */
+#define TALITOS_MCR_HI		0x1038		/* master control register */
+#define TALITOS_MCR_SWR		0x1
+#define TALITOS_IMR		0x1008		/* interrupt mask register */
+#define TALITOS_IMR_ALL		0x00010fff	/* enable all interrupts mask */
+#define TALITOS_IMR_ERRONLY	0x00010aaa	/* enable error interrupts */
+#define TALITOS_IMR_HI		0x100C		/* interrupt mask register */
+#define TALITOS_IMR_HI_ALL	0x00323333	/* enable all interrupts mask */
+#define TALITOS_IMR_HI_ERRONLY	0x00222222	/* enable error interrupts */
+#define TALITOS_ISR		0x1010		/* interrupt status register */
+#define TALITOS_ISR_ERROR	0x00010faa	/* errors mask */
+#define TALITOS_ISR_DONE	0x00000055	/* channel(s) done mask */
+#define TALITOS_ISR_HI		0x1014		/* interrupt status register */
+#define TALITOS_ICR		0x1018		/* interrupt clear register */
+#define TALITOS_ICR_HI		0x101C		/* interrupt clear register */
+
+/* channel register address stride */
+#define TALITOS_CH_OFFSET	0x100
+
+/* channel register offset addresses and bits */
+#define TALITOS_CH_CCCR		0x1108	/* Crypto-Channel Config Register */
+#define TALITOS_CH_CCCR_RESET	0x1	/* Channel Reset bit */
+#define TALITOS_CH_CCCR_HI	0x110c	/* Crypto-Channel Config Register */
+#define TALITOS_CH_CCCR_HI_CDWE	0x10	/* Channel done writeback enable bit */
+#define TALITOS_CH_CCCR_HI_NT	0x4	/* Notification type bit */
+#define TALITOS_CH_CCCR_HI_CDIE	0x2	/* Channel Done Interrupt Enable bit */
+#define TALITOS_CH_CCPSR	0x1110	/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_CCPSR_HI	0x1114	/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_FF		0x1148	/* Fetch FIFO */
+#define TALITOS_CH_FF_HI	0x114c	/* Fetch FIFO's FETCH_ADRS */
+#define TALITOS_CH_CDPR		0x1140	/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_CDPR_HI	0x1144	/* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_DESCBUF	0x1180	/* (thru 11bf) Crypto-Channel
+					 * Descriptor Buffer (debug) */
+
+/* execution unit register offset addresses and bits */
+#define TALITOS_DEUSR		0x2028	/* DEU status register */
+#define TALITOS_DEUSR_HI	0x202c	/* DEU status register */
+#define TALITOS_DEUISR		0x2030	/* DEU interrupt status register */
+#define TALITOS_DEUISR_HI	0x2034	/* DEU interrupt status register */
+#define TALITOS_DEUICR		0x2038	/* DEU interrupt control register */
+#define TALITOS_DEUICR_HI	0x203c	/* DEU interrupt control register */
+#define TALITOS_AESUISR		0x4030	/* AESU interrupt status register */
+#define TALITOS_AESUISR_HI	0x4034	/* AESU interrupt status register */
+#define TALITOS_AESUICR		0x4038	/* AESU interrupt control register */
+#define TALITOS_AESUICR_HI	0x403c	/* AESU interrupt control register */
+#define TALITOS_MDEUISR		0x6030	/* MDEU interrupt status register */
+#define TALITOS_MDEUISR_HI	0x6034	/* MDEU interrupt status register */
+#define TALITOS_RNGSR		0xa028	/* RNG status register */
+#define TALITOS_RNGSR_HI	0xa02c	/* RNG status register */
+#define TALITOS_RNGSR_HI_RD	0x1	/* RNG Reset done */
+#define TALITOS_RNGSR_HI_OFL	0xff0000/* number of dwords in RNG output FIFO*/
+#define TALITOS_RNGDSR		0xa010	/* RNG data size register */
+#define TALITOS_RNGDSR_HI	0xa014	/* RNG data size register */
+#define TALITOS_RNG_FIFO	0xa800	/* RNG FIFO - pool of random numbers */
+#define TALITOS_RNGISR		0xa030	/* RNG Interrupt status register */
+#define TALITOS_RNGISR_HI	0xa034	/* RNG Interrupt status register */
+#define TALITOS_RNGRCR		0xa018	/* RNG Reset control register */
+#define TALITOS_RNGRCR_HI	0xa01c	/* RNG Reset control register */
+#define TALITOS_RNGRCR_HI_SR	0x1	/* RNG RNGRCR:Software Reset */
+
+/* descriptor pointer entry */
+struct talitos_desc_ptr {
+	u16	len;		/* length */
+	u8	extent;		/* jump (to s/g link table) and extent */
+	u8	res;		/* reserved */
+	u32	ptr;		/* pointer */
+};
+
+/* descriptor */
+struct talitos_desc {
+	u32	hdr;				/* header */
+	u32	res;				/* reserved */
+	struct talitos_desc_ptr		ptr[7];	/* ptr/len pair array */
+};
+
+/* talitos descriptor header (hdr) bits */
+
+/* primary execution unit select */
+#define	TALITOS_SEL0_AFEU	0x10000000
+#define	TALITOS_SEL0_DEU	0x20000000
+#define	TALITOS_SEL0_MDEU	0x30000000
+#define	TALITOS_SEL0_RNG	0x40000000
+#define	TALITOS_SEL0_PKEU	0x50000000
+#define	TALITOS_SEL0_AESU	0x60000000
+
+/* primary execution unit mode (MODE0) and derivatives */
+#define	TALITOS_MODE0_AESU_CBC		0x00200000
+#define	TALITOS_MODE0_AESU_ENC		0x00100000
+#define	TALITOS_MODE0_DEU_CBC		0x00400000
+#define	TALITOS_MODE0_DEU_3DES		0x00200000
+#define	TALITOS_MODE0_DEU_ENC		0x00100000
+#define	TALITOS_MODE0_MDEU_INIT		0x01000000	/* init starting regs */
+#define	TALITOS_MODE0_MDEU_HMAC		0x00800000
+#define	TALITOS_MODE0_MDEU_PAD		0x00400000	/* PD */
+#define	TALITOS_MODE0_MDEU_MD5		0x00200000
+#define	TALITOS_MODE0_MDEU_SHA256	0x00100000
+#define	TALITOS_MODE0_MDEU_SHA1		0x00000000	/* SHA-160 */
+#define	TALITOS_MODE0_MDEU_MD5_HMAC	\
+		(TALITOS_MODE0_MDEU_MD5 | TALITOS_MODE0_MDEU_HMAC)
+#define	TALITOS_MODE0_MDEU_SHA256_HMAC	\
+		(TALITOS_MODE0_MDEU_SHA256 | TALITOS_MODE0_MDEU_HMAC)
+#define	TALITOS_MODE0_MDEU_SHA1_HMAC	\
+		(TALITOS_MODE0_MDEU_SHA1 | TALITOS_MODE0_MDEU_HMAC)
+
+/* secondary execution unit select (SEL1) */
+/* it's MDEU or nothing */
+#define	TALITOS_SEL1_MDEU	0x00030000
+
+/* secondary execution unit mode (MODE1) and derivatives */
+#define	TALITOS_MODE1_MDEU_INIT		0x00001000	/* init starting regs */
+#define	TALITOS_MODE1_MDEU_HMAC		0x00000800
+#define	TALITOS_MODE1_MDEU_PAD		0x00000400	/* PD */
+#define	TALITOS_MODE1_MDEU_MD5		0x00000200
+#define	TALITOS_MODE1_MDEU_SHA256	0x00000100
+#define	TALITOS_MODE1_MDEU_SHA1		0x00000000	/* SHA-160 */
+#define	TALITOS_MODE1_MDEU_MD5_HMAC	\
+	(TALITOS_MODE1_MDEU_MD5 | TALITOS_MODE1_MDEU_HMAC)
+#define	TALITOS_MODE1_MDEU_SHA256_HMAC	\
+	(TALITOS_MODE1_MDEU_SHA256 | TALITOS_MODE1_MDEU_HMAC)
+#define	TALITOS_MODE1_MDEU_SHA1_HMAC	\
+	(TALITOS_MODE1_MDEU_SHA1 | TALITOS_MODE1_MDEU_HMAC)
+
+/* direction of overall data flow (DIR) */
+#define	TALITOS_DIR_OUTBOUND	0x00000000
+#define	TALITOS_DIR_INBOUND	0x00000002
+
+/* done notification (DN) */
+#define	TALITOS_DONE_NOTIFY	0x00000001
+
+/* descriptor types */
+/* odd numbers here are valid on SEC2 and greater only (e.g. ipsec_esp) */
+#define TD_TYPE_AESU_CTR_NONSNOOP	(0 << 3)
+#define TD_TYPE_IPSEC_ESP		(1 << 3)
+#define TD_TYPE_COMMON_NONSNOOP_NO_AFEU	(2 << 3)
+#define TD_TYPE_HMAC_SNOOP_NO_AFEU	(4 << 3)
+
+#define TALITOS_HDR_DONE_BITS	0xff000000
+
+#define	DPRINTF(a...)	do { \
+						if (debug) { \
+							printk("%s: ", sc ? \
+								device_get_nameunit(sc->sc_cdev) : "talitos"); \
+							printk(a); \
+						} \
+					} while (0)
diff --git a/crypto/ocf/talitos/talitos_soft.h b/crypto/ocf/talitos/talitos_soft.h
new file mode 100644
index 000000000000..eda9c2efe107
--- /dev/null
+++ b/crypto/ocf/talitos/talitos_soft.h
@@ -0,0 +1,76 @@
+/*
+ * Freescale SEC data structures for integration with ocf-linux
+ *
+ * Copyright (c) 2006 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ *    derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * paired descriptor and associated crypto operation
+ */
+struct desc_cryptop_pair {
+	struct talitos_desc	cf_desc;	/* descriptor ptr */
+	struct cryptop		*cf_crp;	/* cryptop ptr */
+};
+
+/*
+ * Holds data specific to a single talitos device.
+ */
+struct talitos_softc {
+	softc_device_decl	sc_cdev;
+	struct platform_device	*sc_dev;	/* device backpointer */
+	ocf_iomem_t		sc_base_addr;
+	int			sc_irq;
+	int			sc_num;		/* if we have multiple chips */
+	int32_t			sc_cid;		/* crypto tag */
+	u64			sc_chiprev;	/* major/minor chip revision */
+	int			sc_nsessions;
+	struct talitos_session	*sc_sessions;
+	int			sc_num_channels;/* number of crypto channels */
+	int			sc_chfifo_len;	/* channel fetch fifo len */
+	int			sc_exec_units;	/* execution units mask */
+	int			sc_desc_types;	/* descriptor types mask */
+	/*
+	 * mutual exclusion for intra-channel resources, e.g. fetch fifos
+	 * the last entry is a meta-channel lock used by the channel scheduler
+	 */
+	spinlock_t		*sc_chnfifolock;
+	/* sc_chnlastalgo contains last algorithm for that channel */
+	int			*sc_chnlastalg;
+	/* sc_chnfifo holds pending descriptor--crypto operation pairs */
+	struct desc_cryptop_pair	**sc_chnfifo;
+};
+
+struct talitos_session {
+	u_int32_t	ses_used;
+	u_int32_t	ses_klen;		/* key length in bits */
+	u_int32_t	ses_key[8];		/* DES/3DES/AES key */
+	u_int32_t	ses_hmac[5];		/* hmac inner state */
+	u_int32_t	ses_hmac_len;		/* hmac length */
+	u_int32_t	ses_mlen;		/* desired hash result len (12=ipsec or 16) */
+};
+
+#define	TALITOS_SESSION(sid)	((sid) & 0x0fffffff)
+#define	TALITOS_SID(crd, sesn)	(((crd) << 28) | ((sesn) & 0x0fffffff))
diff --git a/crypto/ocf/ubsec_ssb/Makefile b/crypto/ocf/ubsec_ssb/Makefile
new file mode 100644
index 000000000000..ee8a84af2906
--- /dev/null
+++ b/crypto/ocf/ubsec_ssb/Makefile
@@ -0,0 +1,11 @@
+# for SGlinux builds
+-include $(ROOTDIR)/modules/.config
+
+obj-$(CONFIG_OCF_UBSEC_SSB) += ubsec_ssb.o
+
+obj ?= .
+EXTRA_CFLAGS += -I$(obj)/.. -I$(obj)/
+
+ifdef TOPDIR
+-include $(TOPDIR)/Rules.make
+endif
diff --git a/crypto/ocf/ubsec_ssb/bsdqueue.h b/crypto/ocf/ubsec_ssb/bsdqueue.h
new file mode 100644
index 000000000000..98f923439232
--- /dev/null
+++ b/crypto/ocf/ubsec_ssb/bsdqueue.h
@@ -0,0 +1,527 @@
+/*  $OpenBSD: queue.h,v 1.32 2007/04/30 18:42:34 pedro Exp $    */
+/*  $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $   */
+
+/*
+ * Copyright (c) 1991, 1993
+ *  The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *  @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _BSD_SYS_QUEUE_H_
+#define _BSD_SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction.  Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+#if defined(QUEUE_MACRO_DEBUG) || (defined(_KERNEL) && defined(DIAGNOSTIC))
+#define _Q_INVALIDATE(a) (a) = ((void *)-1)
+#else
+#define _Q_INVALIDATE(a)
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define BSD_SLIST_HEAD(name, type)                      \
+struct name {                               \
+    struct type *slh_first; /* first element */         \
+}
+
+#define BSD_SLIST_HEAD_INITIALIZER(head)                    \
+    { NULL }
+
+#define BSD_SLIST_ENTRY(type)                       \
+struct {                                \
+    struct type *sle_next;  /* next element */          \
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define BSD_SLIST_FIRST(head)   ((head)->slh_first)
+#define BSD_SLIST_END(head)     NULL
+#define BSD_SLIST_EMPTY(head)   (BSD_SLIST_FIRST(head) == BSD_SLIST_END(head))
+#define BSD_SLIST_NEXT(elm, field)  ((elm)->field.sle_next)
+
+#define BSD_SLIST_FOREACH(var, head, field)                 \
+    for((var) = BSD_SLIST_FIRST(head);                  \
+        (var) != BSD_SLIST_END(head);                   \
+        (var) = BSD_SLIST_NEXT(var, field))
+
+#define BSD_SLIST_FOREACH_PREVPTR(var, varp, head, field)           \
+    for ((varp) = &BSD_SLIST_FIRST((head));             \
+        ((var) = *(varp)) != BSD_SLIST_END(head);           \
+        (varp) = &BSD_SLIST_NEXT((var), field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define BSD_SLIST_INIT(head) {                      \
+    BSD_SLIST_FIRST(head) = BSD_SLIST_END(head);                \
+}
+
+#define BSD_SLIST_INSERT_AFTER(slistelm, elm, field) do {           \
+    (elm)->field.sle_next = (slistelm)->field.sle_next;     \
+    (slistelm)->field.sle_next = (elm);             \
+} while (0)
+
+#define BSD_SLIST_INSERT_HEAD(head, elm, field) do {            \
+    (elm)->field.sle_next = (head)->slh_first;          \
+    (head)->slh_first = (elm);                  \
+} while (0)
+
+#define BSD_SLIST_REMOVE_NEXT(head, elm, field) do {            \
+    (elm)->field.sle_next = (elm)->field.sle_next->field.sle_next;  \
+} while (0)
+
+#define BSD_SLIST_REMOVE_HEAD(head, field) do {             \
+    (head)->slh_first = (head)->slh_first->field.sle_next;      \
+} while (0)
+
+#define BSD_SLIST_REMOVE(head, elm, type, field) do {           \
+    if ((head)->slh_first == (elm)) {               \
+        BSD_SLIST_REMOVE_HEAD((head), field);           \
+    } else {                            \
+        struct type *curelm = (head)->slh_first;        \
+                                    \
+        while (curelm->field.sle_next != (elm))         \
+            curelm = curelm->field.sle_next;        \
+        curelm->field.sle_next =                \
+            curelm->field.sle_next->field.sle_next;     \
+        _Q_INVALIDATE((elm)->field.sle_next);           \
+    }                               \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define BSD_LIST_HEAD(name, type)                       \
+struct name {                               \
+    struct type *lh_first;  /* first element */         \
+}
+
+#define BSD_LIST_HEAD_INITIALIZER(head)                 \
+    { NULL }
+
+#define BSD_LIST_ENTRY(type)                        \
+struct {                                \
+    struct type *le_next;   /* next element */          \
+    struct type **le_prev;  /* address of previous next element */  \
+}
+
+/*
+ * List access methods
+ */
+#define BSD_LIST_FIRST(head)        ((head)->lh_first)
+#define BSD_LIST_END(head)          NULL
+#define BSD_LIST_EMPTY(head)        (BSD_LIST_FIRST(head) == BSD_LIST_END(head))
+#define BSD_LIST_NEXT(elm, field)       ((elm)->field.le_next)
+
+#define BSD_LIST_FOREACH(var, head, field)                  \
+    for((var) = BSD_LIST_FIRST(head);                   \
+        (var)!= BSD_LIST_END(head);                 \
+        (var) = BSD_LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define BSD_LIST_INIT(head) do {                        \
+    BSD_LIST_FIRST(head) = BSD_LIST_END(head);              \
+} while (0)
+
+#define BSD_LIST_INSERT_AFTER(listelm, elm, field) do {         \
+    if (((elm)->field.le_next = (listelm)->field.le_next) != NULL)  \
+        (listelm)->field.le_next->field.le_prev =       \
+            &(elm)->field.le_next;              \
+    (listelm)->field.le_next = (elm);               \
+    (elm)->field.le_prev = &(listelm)->field.le_next;       \
+} while (0)
+
+#define BSD_LIST_INSERT_BEFORE(listelm, elm, field) do {            \
+    (elm)->field.le_prev = (listelm)->field.le_prev;        \
+    (elm)->field.le_next = (listelm);               \
+    *(listelm)->field.le_prev = (elm);              \
+    (listelm)->field.le_prev = &(elm)->field.le_next;       \
+} while (0)
+
+#define BSD_LIST_INSERT_HEAD(head, elm, field) do {             \
+    if (((elm)->field.le_next = (head)->lh_first) != NULL)      \
+        (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+    (head)->lh_first = (elm);                   \
+    (elm)->field.le_prev = &(head)->lh_first;           \
+} while (0)
+
+#define BSD_LIST_REMOVE(elm, field) do {                    \
+    if ((elm)->field.le_next != NULL)               \
+        (elm)->field.le_next->field.le_prev =           \
+            (elm)->field.le_prev;               \
+    *(elm)->field.le_prev = (elm)->field.le_next;           \
+    _Q_INVALIDATE((elm)->field.le_prev);                \
+    _Q_INVALIDATE((elm)->field.le_next);                \
+} while (0)
+
+#define BSD_LIST_REPLACE(elm, elm2, field) do {             \
+    if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+        (elm2)->field.le_next->field.le_prev =          \
+            &(elm2)->field.le_next;             \
+    (elm2)->field.le_prev = (elm)->field.le_prev;           \
+    *(elm2)->field.le_prev = (elm2);                \
+    _Q_INVALIDATE((elm)->field.le_prev);                \
+    _Q_INVALIDATE((elm)->field.le_next);                \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define BSD_SIMPLEQ_HEAD(name, type)                    \
+struct name {                               \
+    struct type *sqh_first; /* first element */         \
+    struct type **sqh_last; /* addr of last next element */     \
+}
+
+#define BSD_SIMPLEQ_HEAD_INITIALIZER(head)                  \
+    { NULL, &(head).sqh_first }
+
+#define BSD_SIMPLEQ_ENTRY(type)                     \
+struct {                                \
+    struct type *sqe_next;  /* next element */          \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define BSD_SIMPLEQ_FIRST(head)     ((head)->sqh_first)
+#define BSD_SIMPLEQ_END(head)       NULL
+#define BSD_SIMPLEQ_EMPTY(head)     (BSD_SIMPLEQ_FIRST(head) == BSD_SIMPLEQ_END(head))
+#define BSD_SIMPLEQ_NEXT(elm, field)    ((elm)->field.sqe_next)
+
+#define BSD_SIMPLEQ_FOREACH(var, head, field)               \
+    for((var) = BSD_SIMPLEQ_FIRST(head);                \
+        (var) != BSD_SIMPLEQ_END(head);                 \
+        (var) = BSD_SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define BSD_SIMPLEQ_INIT(head) do {                     \
+    (head)->sqh_first = NULL;                   \
+    (head)->sqh_last = &(head)->sqh_first;              \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_HEAD(head, elm, field) do {          \
+    if (((elm)->field.sqe_next = (head)->sqh_first) == NULL)    \
+        (head)->sqh_last = &(elm)->field.sqe_next;      \
+    (head)->sqh_first = (elm);                  \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_TAIL(head, elm, field) do {          \
+    (elm)->field.sqe_next = NULL;                   \
+    *(head)->sqh_last = (elm);                  \
+    (head)->sqh_last = &(elm)->field.sqe_next;          \
+} while (0)
+
+#define BSD_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do {        \
+    if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+        (head)->sqh_last = &(elm)->field.sqe_next;      \
+    (listelm)->field.sqe_next = (elm);              \
+} while (0)
+
+#define BSD_SIMPLEQ_REMOVE_HEAD(head, field) do {           \
+    if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+        (head)->sqh_last = &(head)->sqh_first;          \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define BSD_TAILQ_HEAD(name, type)                      \
+struct name {                               \
+    struct type *tqh_first; /* first element */         \
+    struct type **tqh_last; /* addr of last next element */     \
+}
+
+#define BSD_TAILQ_HEAD_INITIALIZER(head)                    \
+    { NULL, &(head).tqh_first }
+
+#define BSD_TAILQ_ENTRY(type)                       \
+struct {                                \
+    struct type *tqe_next;  /* next element */          \
+    struct type **tqe_prev; /* address of previous next element */  \
+}
+
+/*
+ * tail queue access methods
+ */
+#define BSD_TAILQ_FIRST(head)       ((head)->tqh_first)
+#define BSD_TAILQ_END(head)         NULL
+#define BSD_TAILQ_NEXT(elm, field)      ((elm)->field.tqe_next)
+#define BSD_TAILQ_LAST(head, headname)                  \
+    (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define BSD_TAILQ_PREV(elm, headname, field)                \
+    (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define BSD_TAILQ_EMPTY(head)                       \
+    (BSD_TAILQ_FIRST(head) == BSD_TAILQ_END(head))
+
+#define BSD_TAILQ_FOREACH(var, head, field)                 \
+    for((var) = BSD_TAILQ_FIRST(head);                  \
+        (var) != BSD_TAILQ_END(head);                   \
+        (var) = BSD_TAILQ_NEXT(var, field))
+
+#define BSD_TAILQ_FOREACH_REVERSE(var, head, headname, field)       \
+    for((var) = BSD_TAILQ_LAST(head, headname);             \
+        (var) != BSD_TAILQ_END(head);                   \
+        (var) = BSD_TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define BSD_TAILQ_INIT(head) do {                       \
+    (head)->tqh_first = NULL;                   \
+    (head)->tqh_last = &(head)->tqh_first;              \
+} while (0)
+
+#define BSD_TAILQ_INSERT_HEAD(head, elm, field) do {            \
+    if (((elm)->field.tqe_next = (head)->tqh_first) != NULL)    \
+        (head)->tqh_first->field.tqe_prev =         \
+            &(elm)->field.tqe_next;             \
+    else                                \
+        (head)->tqh_last = &(elm)->field.tqe_next;      \
+    (head)->tqh_first = (elm);                  \
+    (elm)->field.tqe_prev = &(head)->tqh_first;         \
+} while (0)
+
+#define BSD_TAILQ_INSERT_TAIL(head, elm, field) do {            \
+    (elm)->field.tqe_next = NULL;                   \
+    (elm)->field.tqe_prev = (head)->tqh_last;           \
+    *(head)->tqh_last = (elm);                  \
+    (head)->tqh_last = &(elm)->field.tqe_next;          \
+} while (0)
+
+#define BSD_TAILQ_INSERT_AFTER(head, listelm, elm, field) do {      \
+    if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+        (elm)->field.tqe_next->field.tqe_prev =         \
+            &(elm)->field.tqe_next;             \
+    else                                \
+        (head)->tqh_last = &(elm)->field.tqe_next;      \
+    (listelm)->field.tqe_next = (elm);              \
+    (elm)->field.tqe_prev = &(listelm)->field.tqe_next;     \
+} while (0)
+
+#define BSD_TAILQ_INSERT_BEFORE(listelm, elm, field) do {           \
+    (elm)->field.tqe_prev = (listelm)->field.tqe_prev;      \
+    (elm)->field.tqe_next = (listelm);              \
+    *(listelm)->field.tqe_prev = (elm);             \
+    (listelm)->field.tqe_prev = &(elm)->field.tqe_next;     \
+} while (0)
+
+#define BSD_TAILQ_REMOVE(head, elm, field) do {             \
+    if (((elm)->field.tqe_next) != NULL)                \
+        (elm)->field.tqe_next->field.tqe_prev =         \
+            (elm)->field.tqe_prev;              \
+    else                                \
+        (head)->tqh_last = (elm)->field.tqe_prev;       \
+    *(elm)->field.tqe_prev = (elm)->field.tqe_next;         \
+    _Q_INVALIDATE((elm)->field.tqe_prev);               \
+    _Q_INVALIDATE((elm)->field.tqe_next);               \
+} while (0)
+
+#define BSD_TAILQ_REPLACE(head, elm, elm2, field) do {          \
+    if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL)   \
+        (elm2)->field.tqe_next->field.tqe_prev =        \
+            &(elm2)->field.tqe_next;                \
+    else                                \
+        (head)->tqh_last = &(elm2)->field.tqe_next;     \
+    (elm2)->field.tqe_prev = (elm)->field.tqe_prev;         \
+    *(elm2)->field.tqe_prev = (elm2);               \
+    _Q_INVALIDATE((elm)->field.tqe_prev);               \
+    _Q_INVALIDATE((elm)->field.tqe_next);               \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define BSD_CIRCLEQ_HEAD(name, type)                    \
+struct name {                               \
+    struct type *cqh_first;     /* first element */     \
+    struct type *cqh_last;      /* last element */      \
+}
+
+#define BSD_CIRCLEQ_HEAD_INITIALIZER(head)                  \
+    { BSD_CIRCLEQ_END(&head), BSD_CIRCLEQ_END(&head) }
+
+#define BSD_CIRCLEQ_ENTRY(type)                     \
+struct {                                \
+    struct type *cqe_next;      /* next element */      \
+    struct type *cqe_prev;      /* previous element */      \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define BSD_CIRCLEQ_FIRST(head)     ((head)->cqh_first)
+#define BSD_CIRCLEQ_LAST(head)      ((head)->cqh_last)
+#define BSD_CIRCLEQ_END(head)       ((void *)(head))
+#define BSD_CIRCLEQ_NEXT(elm, field)    ((elm)->field.cqe_next)
+#define BSD_CIRCLEQ_PREV(elm, field)    ((elm)->field.cqe_prev)
+#define BSD_CIRCLEQ_EMPTY(head)                     \
+    (BSD_CIRCLEQ_FIRST(head) == BSD_CIRCLEQ_END(head))
+
+#define BSD_CIRCLEQ_FOREACH(var, head, field)               \
+    for((var) = BSD_CIRCLEQ_FIRST(head);                \
+        (var) != BSD_CIRCLEQ_END(head);                 \
+        (var) = BSD_CIRCLEQ_NEXT(var, field))
+
+#define BSD_CIRCLEQ_FOREACH_REVERSE(var, head, field)           \
+    for((var) = BSD_CIRCLEQ_LAST(head);                 \
+        (var) != BSD_CIRCLEQ_END(head);                 \
+        (var) = BSD_CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define BSD_CIRCLEQ_INIT(head) do {                     \
+    (head)->cqh_first = BSD_CIRCLEQ_END(head);              \
+    (head)->cqh_last = BSD_CIRCLEQ_END(head);               \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do {        \
+    (elm)->field.cqe_next = (listelm)->field.cqe_next;      \
+    (elm)->field.cqe_prev = (listelm);              \
+    if ((listelm)->field.cqe_next == BSD_CIRCLEQ_END(head))     \
+        (head)->cqh_last = (elm);               \
+    else                                \
+        (listelm)->field.cqe_next->field.cqe_prev = (elm);  \
+    (listelm)->field.cqe_next = (elm);              \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do {       \
+    (elm)->field.cqe_next = (listelm);              \
+    (elm)->field.cqe_prev = (listelm)->field.cqe_prev;      \
+    if ((listelm)->field.cqe_prev == BSD_CIRCLEQ_END(head))     \
+        (head)->cqh_first = (elm);              \
+    else                                \
+        (listelm)->field.cqe_prev->field.cqe_next = (elm);  \
+    (listelm)->field.cqe_prev = (elm);              \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_HEAD(head, elm, field) do {          \
+    (elm)->field.cqe_next = (head)->cqh_first;          \
+    (elm)->field.cqe_prev = BSD_CIRCLEQ_END(head);          \
+    if ((head)->cqh_last == BSD_CIRCLEQ_END(head))          \
+        (head)->cqh_last = (elm);               \
+    else                                \
+        (head)->cqh_first->field.cqe_prev = (elm);      \
+    (head)->cqh_first = (elm);                  \
+} while (0)
+
+#define BSD_CIRCLEQ_INSERT_TAIL(head, elm, field) do {          \
+    (elm)->field.cqe_next = BSD_CIRCLEQ_END(head);          \
+    (elm)->field.cqe_prev = (head)->cqh_last;           \
+    if ((head)->cqh_first == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_first = (elm);              \
+    else                                \
+        (head)->cqh_last->field.cqe_next = (elm);       \
+    (head)->cqh_last = (elm);                   \
+} while (0)
+
+#define BSD_CIRCLEQ_REMOVE(head, elm, field) do {               \
+    if ((elm)->field.cqe_next == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_last = (elm)->field.cqe_prev;       \
+    else                                \
+        (elm)->field.cqe_next->field.cqe_prev =         \
+            (elm)->field.cqe_prev;              \
+    if ((elm)->field.cqe_prev == BSD_CIRCLEQ_END(head))         \
+        (head)->cqh_first = (elm)->field.cqe_next;      \
+    else                                \
+        (elm)->field.cqe_prev->field.cqe_next =         \
+            (elm)->field.cqe_next;              \
+    _Q_INVALIDATE((elm)->field.cqe_prev);               \
+    _Q_INVALIDATE((elm)->field.cqe_next);               \
+} while (0)
+
+#define BSD_CIRCLEQ_REPLACE(head, elm, elm2, field) do {            \
+    if (((elm2)->field.cqe_next = (elm)->field.cqe_next) ==     \
+        BSD_CIRCLEQ_END(head))                      \
+        (head).cqh_last = (elm2);               \
+    else                                \
+        (elm2)->field.cqe_next->field.cqe_prev = (elm2);    \
+    if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) ==     \
+        BSD_CIRCLEQ_END(head))                      \
+        (head).cqh_first = (elm2);              \
+    else                                \
+        (elm2)->field.cqe_prev->field.cqe_next = (elm2);    \
+    _Q_INVALIDATE((elm)->field.cqe_prev);               \
+    _Q_INVALIDATE((elm)->field.cqe_next);               \
+} while (0)
+
+#endif  /* !_BSD_SYS_QUEUE_H_ */
diff --git a/crypto/ocf/ubsec_ssb/ubsec_ssb.c b/crypto/ocf/ubsec_ssb/ubsec_ssb.c
new file mode 100644
index 000000000000..c02ec25e1fdf
--- /dev/null
+++ b/crypto/ocf/ubsec_ssb/ubsec_ssb.c
@@ -0,0 +1,2219 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
+ * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
+ * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+#undef UBSEC_DEBUG
+#undef UBSEC_VERBOSE_DEBUG
+
+#ifdef UBSEC_VERBOSE_DEBUG
+#define UBSEC_DEBUG
+#endif
+
+/*
+ * uBsec BCM5365 hardware crypto accelerator
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/proc_fs.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <asm/io.h>
+
+#include <linux/ssb/ssb.h>
+
+/*
+ * BSD queue
+ */
+//#include "bsdqueue.h"
+
+/*
+ * OCF
+ */
+#include <cryptodev.h>
+#include <uio.h>
+
+#define HMAC_HACK 1
+
+#define HMAC_HACK 1
+#ifdef HMAC_HACK
+#include <safe/hmachack.h>
+#include <safe/md5.h>
+#include <safe/md5.c>
+#include <safe/sha1.h>
+#include <safe/sha1.c>
+#endif
+
+#include "bsdqueue.h"
+#include "ubsecreg.h"
+#include "ubsecvar.h"
+
+#define DRV_MODULE_NAME     "ubsec_ssb"
+#define PFX DRV_MODULE_NAME ": "
+#define DRV_MODULE_VERSION  "0.02"
+#define DRV_MODULE_RELDATE  "Feb 21, 2009"
+
+#if 1
+#define DPRINTF(a...) \
+    if (debug) \
+    { \
+        printk(DRV_MODULE_NAME ": " a); \
+    }
+#else
+#define DPRINTF(a...)
+#endif
+
+/*
+ * Prototypes
+ */
+static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
+static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
+    const struct ssb_device_id *ent);
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
+int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
+    struct device *self);
+static void ubsec_setup_mackey(struct ubsec_session *ses, int algo,
+    caddr_t key, int klen);
+static int dma_map_skb(struct ubsec_softc *sc,
+    struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
+static int dma_map_uio(struct ubsec_softc *sc,
+    struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
+static void dma_unmap(struct ubsec_softc *sc,
+    struct ubsec_dma_alloc *q_map, int mlen);
+static int ubsec_dmamap_aligned(struct ubsec_softc *sc,
+    const struct ubsec_dma_alloc *q_map, int mlen);
+
+#ifdef UBSEC_DEBUG
+static int proc_read(char *buf, char **start, off_t offset,
+    int size, int *peof, void *data);
+#endif
+
+void ubsec_reset_board(struct ubsec_softc *);
+void ubsec_init_board(struct ubsec_softc *);
+void ubsec_cleanchip(struct ubsec_softc *);
+void ubsec_totalreset(struct ubsec_softc *);
+int  ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
+
+static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
+static int ubsec_freesession(device_t, u_int64_t);
+static int ubsec_process(device_t, struct cryptop *, int);
+
+void    ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
+void    ubsec_feed(struct ubsec_softc *);
+void    ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
+void    ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
+int     ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
+        size_t, int);
+
+/* DEBUG crap... */
+void ubsec_dump_pb(struct ubsec_pktbuf *);
+void ubsec_dump_mcr(struct ubsec_mcr *);
+
+#define READ_REG(sc,r) \
+    ssb_read32((sc)->sdev, (r));
+#define WRITE_REG(sc,r,val) \
+    ssb_write32((sc)->sdev, (r), (val));
+#define READ_REG_SDEV(sdev,r) \
+    ssb_read32((sdev), (r));
+#define WRITE_REG_SDEV(sdev,r,val) \
+    ssb_write32((sdev), (r), (val));
+
+#define SWAP32(x) (x) = htole32(ntohl((x)))
+#define HTOLE32(x) (x) = htole32(x)
+
+#ifdef __LITTLE_ENDIAN
+#define letoh16(x) (x)
+#define letoh32(x) (x)
+#endif
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+#define UBSEC_SSB_MAX_CHIPS 1
+static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
+static struct ubsec_stats ubsecstats;
+
+#ifdef UBSEC_DEBUG
+static struct proc_dir_entry *procdebug;
+#endif
+
+static struct ssb_device_id ubsec_ssb_tbl[] = {
+    /* Broadcom BCM5365P IPSec Core */
+    SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
+    SSB_DEVTABLE_END
+};
+
+static struct ssb_driver ubsec_ssb_driver = {
+    .name       = DRV_MODULE_NAME,
+    .id_table   = ubsec_ssb_tbl,
+    .probe      = ubsec_ssb_probe,
+    .remove     = __devexit_p(ubsec_ssb_remove),
+     /*
+    .suspend    = ubsec_ssb_suspend,
+    .resume     = ubsec_ssb_resume
+    */
+};
+
+static device_method_t ubsec_ssb_methods = {
+    /* crypto device methods */
+    DEVMETHOD(cryptodev_newsession, ubsec_newsession),
+    DEVMETHOD(cryptodev_freesession,ubsec_freesession),
+    DEVMETHOD(cryptodev_process,    ubsec_process),
+};
+
+#ifdef UBSEC_DEBUG
+static int
+proc_read(char *buf, char **start, off_t offset,
+    int size, int *peof, void *data)
+{
+    int i = 0, byteswritten = 0, ret;
+    unsigned int stat, ctrl;
+#ifdef UBSEC_VERBOSE_DEBUG
+    struct ubsec_q *q;
+    struct ubsec_dma *dmap;
+#endif
+
+    while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
+    {
+        struct ubsec_softc *sc = ubsec_chip_idx[i];
+
+        stat = READ_REG(sc, BS_STAT);
+        ctrl = READ_REG(sc, BS_CTRL);
+        ret = snprintf((buf + byteswritten),
+            (size - byteswritten) ,
+            "DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+        byteswritten += ret;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
+
+        /* Dump all queues MCRs */
+        if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+            BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
+            {
+                dmap = q->q_dma;
+                ubsec_dump_mcr(&dmap->d_dma->d_mcr);
+            }
+        }
+#endif
+
+        i++;
+    }
+
+    *peof = 1;
+
+    return byteswritten;
+}
+#endif
+
+/*
+ * map in a given sk_buff
+ */
+static int
+dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
+{
+    int i = 0;
+    dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    /*
+     * We support only a limited number of fragments.
+     */
+    if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
+    {
+        printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+        return (-ENOMEM);
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
+#endif
+
+    /* first data package */
+    tmp = dma_map_single(sc->sc_dv,
+                         skb->data,
+                         skb_headlen(skb),
+                         DMA_BIDIRECTIONAL);
+
+    q_map[i].dma_paddr = tmp;
+    q_map[i].dma_vaddr = skb->data;
+    q_map[i].dma_size = skb_headlen(skb);
+
+    if (unlikely(tmp == 0))
+    {
+        printk(KERN_ERR "Could not map memory region for dma.\n");
+        return (-EINVAL);
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
+#endif
+
+
+    /* all other data packages */
+    for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1,
+            (unsigned int)page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+            skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
+#endif
+
+        tmp = dma_map_single(sc->sc_dv,
+                             page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+                                 skb_shinfo(skb)->frags[i].page_offset,
+                             skb_shinfo(skb)->frags[i].size,
+                             DMA_BIDIRECTIONAL);
+
+        q_map[i + 1].dma_paddr = tmp;
+        q_map[i + 1].dma_vaddr = (void*)(page_address(skb_frag_page(&skb_shinfo(skb)->frags[i])) +
+                                  skb_shinfo(skb)->frags[i].page_offset);
+        q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
+
+        if (unlikely(tmp == 0))
+        {
+            printk(KERN_ERR "Could not map memory region for dma.\n");
+            return (-EINVAL);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
+#endif
+
+    }
+    *mlen = i + 1;
+
+    return(0);
+}
+
+/*
+ * map in a given uio buffer
+ */
+
+static int
+dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
+{
+    struct iovec *iov = uio->uio_iov;
+    int n;
+    dma_addr_t tmp;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    /*
+     * We support only a limited number of fragments.
+     */
+    if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
+    {
+        printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
+        return (-ENOMEM);
+    }
+
+    for (n = 0; n < uio->uio_iovcnt; n++) {
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
+#endif
+        tmp = dma_map_single(sc->sc_dv,
+                             iov->iov_base,
+                             iov->iov_len,
+                             DMA_BIDIRECTIONAL);
+
+        q_map[n].dma_paddr = tmp;
+        q_map[n].dma_vaddr = iov->iov_base;
+        q_map[n].dma_size = iov->iov_len;
+
+        if (unlikely(tmp == 0))
+                       {
+            printk(KERN_ERR "Could not map memory region for dma.\n");
+            return (-EINVAL);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);
+#endif
+
+        iov++;
+    }
+    *mlen = n;
+
+    return(0);
+}
+
+static void
+dma_unmap(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, int mlen)
+{
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for(i = 0; i < mlen; i++)
+    {
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, (unsigned int)q_map[i].dma_paddr, q_map[i].dma_size);
+#endif
+        dma_unmap_single(sc->sc_dv,
+                         q_map[i].dma_paddr,
+                         q_map[i].dma_size,
+                         DMA_BIDIRECTIONAL);
+    }
+    return;
+}
+
+/*
+ * Is the operand suitable aligned for direct DMA.  Each
+ * segment must be aligned on a 32-bit boundary and all
+ * but the last segment must be a multiple of 4 bytes.
+ */
+static int
+ubsec_dmamap_aligned(struct ubsec_softc *sc, const struct ubsec_dma_alloc *q_map, int mlen)
+{
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for (i = 0; i < mlen; i++) {
+        if (q_map[i].dma_paddr & 3)
+            return (0);
+        if (i != (mlen - 1) && (q_map[i].dma_size & 3))
+            return (0);
+    }
+    return (1);
+}
+
+
+#define N(a)    (sizeof(a) / sizeof (a[0]))
+static void
+ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
+{
+#ifdef HMAC_HACK
+    MD5_CTX md5ctx;
+    SHA1_CTX sha1ctx;
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= HMAC_IPAD_VAL;
+
+    if (algo == CRYPTO_MD5_HMAC) {
+        MD5Init(&md5ctx);
+        MD5Update(&md5ctx, key, klen);
+        MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+        bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
+    } else {
+        SHA1Init(&sha1ctx);
+        SHA1Update(&sha1ctx, key, klen);
+        SHA1Update(&sha1ctx, hmac_ipad_buffer,
+            SHA1_HMAC_BLOCK_LEN - klen);
+        bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+    }
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
+
+    if (algo == CRYPTO_MD5_HMAC) {
+        MD5Init(&md5ctx);
+        MD5Update(&md5ctx, key, klen);
+        MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
+        bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
+    } else {
+        SHA1Init(&sha1ctx);
+        SHA1Update(&sha1ctx, key, klen);
+        SHA1Update(&sha1ctx, hmac_opad_buffer,
+            SHA1_HMAC_BLOCK_LEN - klen);
+        bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+    }
+
+    for (i = 0; i < klen; i++)
+        key[i] ^= HMAC_OPAD_VAL;
+
+#else /* HMAC_HACK */
+    DPRINTF("md5/sha not implemented\n");
+#endif /* HMAC_HACK */
+}
+#undef N
+
+static int
+__devinit ubsec_ssb_probe(struct ssb_device *sdev,
+    const struct ssb_device_id *ent)
+{
+    int err;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    err = ssb_bus_powerup(sdev->bus, 0);
+    if (err) {
+        dev_err(sdev->dev, "Failed to powerup the bus\n");
+	goto err_out;
+    }
+
+    err = request_irq(sdev->irq, (irq_handler_t)ubsec_ssb_isr,
+        IRQF_DISABLED | IRQF_SHARED, DRV_MODULE_NAME, sdev);
+    if (err) {
+        dev_err(sdev->dev, "Could not request irq\n");
+        goto err_out_powerdown;
+    }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
+    err = dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(32)) ||
+	  dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(32));
+#else
+    err = ssb_dma_set_mask(sdev, DMA_32BIT_MASK);
+#endif
+    if (err) {
+        dev_err(sdev->dev,
+        "Required 32BIT DMA mask unsupported by the system.\n");
+        goto err_out_free_irq;
+    }
+
+    printk(KERN_INFO "Sentry5(tm) ROBOGateway(tm) IPSec Core at IRQ %u\n",
+        sdev->irq);
+
+    DPRINTF("Vendor: %x, core id: %x, revision: %x\n",
+        sdev->id.vendor, sdev->id.coreid, sdev->id.revision);
+
+    ssb_device_enable(sdev, 0);
+
+    if (ubsec_attach(sdev, ent, sdev->dev) != 0)
+        goto err_out_disable;
+
+#ifdef UBSEC_DEBUG
+    procdebug = create_proc_entry(DRV_MODULE_NAME, S_IRUSR, NULL);
+    if (procdebug)
+    {
+        procdebug->read_proc = proc_read;
+        procdebug->data = NULL;
+    } else
+        DPRINTF("Unable to create proc file.\n");
+#endif
+
+    return 0;
+
+err_out_disable:
+    ssb_device_disable(sdev, 0);
+
+err_out_free_irq:
+    free_irq(sdev->irq, sdev);
+
+err_out_powerdown:
+    ssb_bus_may_powerdown(sdev->bus);
+
+err_out:
+    return err;
+}
+
+static void __devexit ubsec_ssb_remove(struct ssb_device *sdev) {
+
+    struct ubsec_softc *sc;
+    unsigned int ctrlflgs;
+    struct ubsec_dma *dmap;
+    u_int32_t i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    ctrlflgs = READ_REG_SDEV(sdev, BS_CTRL);
+    /* disable all IPSec Core interrupts globally */
+    ctrlflgs ^= (BS_CTRL_MCR1INT | BS_CTRL_MCR2INT |
+        BS_CTRL_DMAERR);
+    WRITE_REG_SDEV(sdev, BS_CTRL, ctrlflgs);
+
+    free_irq(sdev->irq, sdev);
+
+    sc = (struct ubsec_softc *)ssb_get_drvdata(sdev);
+
+    /* unregister all crypto algorithms */
+    crypto_unregister_all(sc->sc_cid);
+
+    /* Free queue / dma memory */
+    for (i = 0; i < UBS_MAX_NQUEUE; i++) {
+        struct ubsec_q *q;
+
+        q = sc->sc_queuea[i];
+        if (q != NULL)
+        {
+            dmap = q->q_dma;
+            if (dmap != NULL)
+            {
+                ubsec_dma_free(sc, &dmap->d_alloc);
+                q->q_dma = NULL;
+            }
+            kfree(q);
+        }
+        sc->sc_queuea[i] = NULL;
+    }
+
+    ssb_device_disable(sdev, 0);
+    ssb_bus_may_powerdown(sdev->bus);
+    ssb_set_drvdata(sdev, NULL);
+
+#ifdef UBSEC_DEBUG
+    if (procdebug)
+        remove_proc_entry(DRV_MODULE_NAME, NULL);
+#endif
+
+}
+
+
+int
+ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
+    struct device *self)
+{
+    struct ubsec_softc *sc = NULL;
+    struct ubsec_dma *dmap;
+    u_int32_t i;
+    static int num_chips = 0;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    sc = (struct ubsec_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
+    if (!sc)
+        return(-ENOMEM);
+    memset(sc, 0, sizeof(*sc));
+
+    sc->sc_dv = sdev->dev;
+    sc->sdev = sdev;
+
+    spin_lock_init(&sc->sc_ringmtx);
+
+    softc_device_init(sc, "ubsec_ssb", num_chips, ubsec_ssb_methods);
+
+    /* Maybe someday there are boards with more than one chip available */
+    if (num_chips < UBSEC_SSB_MAX_CHIPS) {
+        ubsec_chip_idx[device_get_unit(sc->sc_dev)] = sc;
+        num_chips++;
+    }
+
+    ssb_set_drvdata(sdev, sc);
+
+    BSD_SIMPLEQ_INIT(&sc->sc_queue);
+    BSD_SIMPLEQ_INIT(&sc->sc_qchip);
+    BSD_SIMPLEQ_INIT(&sc->sc_queue2);
+    BSD_SIMPLEQ_INIT(&sc->sc_qchip2);
+    BSD_SIMPLEQ_INIT(&sc->sc_q2free);
+
+    sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
+
+    sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
+    if (sc->sc_cid < 0) {
+        device_printf(sc->sc_dev, "could not get crypto driver id\n");
+        return -1;
+    }
+
+    BSD_SIMPLEQ_INIT(&sc->sc_freequeue);
+    dmap = sc->sc_dmaa;
+    for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
+        struct ubsec_q *q;
+
+        q = (struct ubsec_q *)kmalloc(sizeof(struct ubsec_q), GFP_KERNEL);
+        if (q == NULL) {
+            printf(": can't allocate queue buffers\n");
+            break;
+        }
+
+        if (ubsec_dma_malloc(sc, &dmap->d_alloc, sizeof(struct ubsec_dmachunk),0)) {
+            printf(": can't allocate dma buffers\n");
+            kfree(q);
+            break;
+        }
+        dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
+
+        q->q_dma = dmap;
+        sc->sc_queuea[i] = q;
+
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+    }
+
+    /*
+     * Reset Broadcom chip
+     */
+    ubsec_reset_board(sc);
+
+    /*
+     * Init Broadcom chip
+     */
+    ubsec_init_board(sc);
+
+    /* supported crypto algorithms */
+    crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
+    crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+
+    if (sc->sc_flags & UBS_FLAGS_AES) {
+        crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
+        printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES AES128 AES192 AES256 MD5_HMAC SHA1_HMAC\n");
+    }
+    else
+        printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES MD5_HMAC SHA1_HMAC\n");
+
+    crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
+    crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
+
+    return 0;
+}
+
+/*
+ * UBSEC Interrupt routine
+ */
+static irqreturn_t
+ubsec_ssb_isr(int irq, void *arg, struct pt_regs *regs)
+{
+    struct ubsec_softc *sc = NULL;
+    volatile u_int32_t stat;
+    struct ubsec_q *q;
+    struct ubsec_dma *dmap;
+    int npkts = 0, i;
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    sc = (struct ubsec_softc *)ssb_get_drvdata(arg);
+
+    stat = READ_REG(sc, BS_STAT);
+
+    stat &= sc->sc_statmask;
+    if (stat == 0)
+        return IRQ_NONE;
+
+    WRITE_REG(sc, BS_STAT, stat);       /* IACK */
+
+    /*
+     * Check to see if we have any packets waiting for us
+     */
+    if ((stat & BS_STAT_MCR1_DONE)) {
+        while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+            q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+            dmap = q->q_dma;
+
+            if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
+            {
+                DPRINTF("error while processing MCR. Flags = %x\n", dmap->d_dma->d_mcr.mcr_flags);
+                break;
+            }
+
+            BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+
+            npkts = q->q_nstacked_mcrs;
+            /*
+             * search for further sc_qchip ubsec_q's that share
+             * the same MCR, and complete them too, they must be
+             * at the top.
+             */
+            for (i = 0; i < npkts; i++) {
+                if(q->q_stacked_mcr[i])
+                    ubsec_callback(sc, q->q_stacked_mcr[i]);
+                else
+                    break;
+            }
+            ubsec_callback(sc, q);
+        }
+
+        /*
+         * Don't send any more packet to chip if there has been
+         * a DMAERR.
+         */
+        if (likely(!(stat & BS_STAT_DMAERR)))
+            ubsec_feed(sc);
+        else
+            DPRINTF("DMA error occurred. Stop feeding crypto chip.\n");
+    }
+
+    /*
+     * Check to see if we got any DMA Error
+     */
+    if (stat & BS_STAT_DMAERR) {
+        volatile u_int32_t a = READ_REG(sc, BS_ERR);
+
+        printf(KERN_ERR "%s: dmaerr %s@%08x\n", DRV_MODULE_NAME,
+            (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
+
+        ubsecstats.hst_dmaerr++;
+        ubsec_totalreset(sc);
+        ubsec_feed(sc);
+    }
+
+    return IRQ_HANDLED;
+}
+
+/*
+ * ubsec_feed() - aggregate and post requests to chip
+ *        It is assumed that the caller set splnet()
+ */
+void
+ubsec_feed(struct ubsec_softc *sc)
+{
+#ifdef UBSEC_VERBOSE_DEBUG
+    static int max;
+#endif
+    struct ubsec_q *q, *q2;
+    int npkts, i;
+    void *v;
+    u_int32_t stat;
+
+    npkts = sc->sc_nqueue;
+    if (npkts > UBS_MAX_AGGR)
+        npkts = UBS_MAX_AGGR;
+    if (npkts < 2)
+        goto feed1;
+
+    stat = READ_REG(sc, BS_STAT);
+
+    if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+        if(stat & BS_STAT_DMAERR) {
+            ubsec_totalreset(sc);
+            ubsecstats.hst_dmaerr++;
+        }
+        return;
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("merging %d records\n", npkts);
+
+    /* XXX temporary aggregation statistics reporting code */
+    if (max < npkts) {
+        max = npkts;
+        DPRINTF("%s: new max aggregate %d\n", DRV_MODULE_NAME, max);
+    }
+#endif /* UBSEC_VERBOSE_DEBUG */
+
+    q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+    BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+    --sc->sc_nqueue;
+
+#if 0
+    /*
+     * XXX
+     * We use dma_map_single() - no sync required!
+     */
+
+    bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+        0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+    if (q->q_dst_map != NULL)
+        bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+            0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+
+    q->q_nstacked_mcrs = npkts - 1;     /* Number of packets stacked */
+
+    for (i = 0; i < q->q_nstacked_mcrs; i++) {
+        q2 = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+        bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
+            0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+        if (q2->q_dst_map != NULL)
+            bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
+                0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+#endif
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+        --sc->sc_nqueue;
+
+        v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
+            sizeof(struct ubsec_mcr_add);
+        bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
+        q->q_stacked_mcr[i] = q2;
+    }
+    q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+#if 0
+    bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+        0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+        BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+    WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("feed (1): q->chip %p %08x %08x\n", q,
+        (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+        (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+    return;
+
+feed1:
+    while (!BSD_SIMPLEQ_EMPTY(&sc->sc_queue)) {
+        stat = READ_REG(sc, BS_STAT);
+
+        if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
+            if(stat & BS_STAT_DMAERR) {
+                ubsec_totalreset(sc);
+                ubsecstats.hst_dmaerr++;
+            }
+            break;
+        }
+
+        q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
+
+#if 0
+        bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+            0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
+        if (q->q_dst_map != NULL)
+            bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+                0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
+        bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
+            0, q->q_dma->d_alloc.dma_map->dm_mapsize,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+#endif
+
+        WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_mcr));
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("feed (2): q->chip %p %08x %08x\n", q,
+            (u_int32_t)q->q_dma->d_alloc.dma_paddr,
+            (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_mcr)));
+#endif /* UBSEC_DEBUG */
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
+        --sc->sc_nqueue;
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
+    }
+}
+
+/*
+ * Allocate a new 'session' and return an encoded session id.  'sidp'
+ * contains our registration id, and should contain an encoded session
+ * id on successful allocation.
+ */
+static int
+ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
+{
+    struct cryptoini *c, *encini = NULL, *macini = NULL;
+    struct ubsec_softc *sc = NULL;
+    struct ubsec_session *ses = NULL;
+    int sesn, i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (sidp == NULL || cri == NULL)
+        return (EINVAL);
+
+    sc = device_get_softc(dev);
+
+    if (sc == NULL)
+        return (EINVAL);
+
+    for (c = cri; c != NULL; c = c->cri_next) {
+        if (c->cri_alg == CRYPTO_MD5_HMAC ||
+            c->cri_alg == CRYPTO_SHA1_HMAC) {
+            if (macini)
+                return (EINVAL);
+            macini = c;
+        } else if (c->cri_alg == CRYPTO_DES_CBC ||
+            c->cri_alg == CRYPTO_3DES_CBC ||
+            c->cri_alg == CRYPTO_AES_CBC) {
+            if (encini)
+                return (EINVAL);
+            encini = c;
+        } else
+            return (EINVAL);
+    }
+    if (encini == NULL && macini == NULL)
+        return (EINVAL);
+
+    if (sc->sc_sessions == NULL) {
+        ses = sc->sc_sessions = (struct ubsec_session *)kmalloc(
+            sizeof(struct ubsec_session), SLAB_ATOMIC);
+        if (ses == NULL)
+            return (ENOMEM);
+        memset(ses, 0, sizeof(struct ubsec_session));
+        sesn = 0;
+        sc->sc_nsessions = 1;
+    } else {
+        for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
+            if (sc->sc_sessions[sesn].ses_used == 0) {
+                ses = &sc->sc_sessions[sesn];
+                break;
+            }
+        }
+
+        if (ses == NULL) {
+            sesn = sc->sc_nsessions;
+            ses = (struct ubsec_session *)kmalloc((sesn + 1) *
+                sizeof(struct ubsec_session), SLAB_ATOMIC);
+            if (ses == NULL)
+                return (ENOMEM);
+            memset(ses, 0, (sesn + 1) * sizeof(struct ubsec_session));
+            bcopy(sc->sc_sessions, ses, sesn *
+                sizeof(struct ubsec_session));
+            bzero(sc->sc_sessions, sesn *
+                sizeof(struct ubsec_session));
+            kfree(sc->sc_sessions);
+            sc->sc_sessions = ses;
+            ses = &sc->sc_sessions[sesn];
+            sc->sc_nsessions++;
+        }
+    }
+
+    bzero(ses, sizeof(struct ubsec_session));
+    ses->ses_used = 1;
+    if (encini) {
+        /* get an IV */
+        /* XXX may read fewer than requested */
+        read_random(ses->ses_iv, sizeof(ses->ses_iv));
+
+        /* Go ahead and compute key in ubsec's byte order */
+        if (encini->cri_alg == CRYPTO_DES_CBC) {
+            /* DES uses the same key three times:
+             * 1st encrypt -> 2nd decrypt -> 3nd encrypt */
+            bcopy(encini->cri_key, &ses->ses_key[0], 8);
+            bcopy(encini->cri_key, &ses->ses_key[2], 8);
+            bcopy(encini->cri_key, &ses->ses_key[4], 8);
+            ses->ses_keysize = 192; /* Fake! Actually its only 64bits ..
+                                       oh no it is even less: 54bits. */
+        } else if(encini->cri_alg == CRYPTO_3DES_CBC) {
+            bcopy(encini->cri_key, ses->ses_key, 24);
+            ses->ses_keysize = 192;
+        } else if(encini->cri_alg == CRYPTO_AES_CBC) {
+            ses->ses_keysize = encini->cri_klen;
+
+            if (ses->ses_keysize != 128 &&
+                ses->ses_keysize != 192 &&
+                ses->ses_keysize != 256)
+            {
+                DPRINTF("unsupported AES key size: %d\n", ses->ses_keysize);
+                return (EINVAL);
+            }
+            bcopy(encini->cri_key, ses->ses_key, (ses->ses_keysize / 8));
+        }
+
+        /* Hardware requires the keys in little endian byte order */
+        for (i=0; i < (ses->ses_keysize / 32); i++)
+            SWAP32(ses->ses_key[i]);
+    }
+
+    if (macini) {
+        ses->ses_mlen = macini->cri_mlen;
+
+        if (ses->ses_mlen == 0 ||
+            ses->ses_mlen > SHA1_HASH_LEN) {
+
+            if (macini->cri_alg == CRYPTO_MD5_HMAC ||
+                macini->cri_alg == CRYPTO_SHA1_HMAC)
+            {
+                ses->ses_mlen = DEFAULT_HMAC_LEN;
+            } else
+            {
+                /*
+                 * Reserved for future usage. MD5/SHA1 calculations have
+                 * different hash sizes.
+                 */
+                printk(KERN_ERR DRV_MODULE_NAME ": unsupported hash operation with mac/hash len: %d\n", ses->ses_mlen);
+                return (EINVAL);
+            }
+
+        }
+
+        if (macini->cri_key != NULL) {
+            ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key,
+                macini->cri_klen / 8);
+        }
+    }
+
+    *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
+    return (0);
+}
+
+/*
+ * Deallocate a session.
+ */
+static int
+ubsec_freesession(device_t dev, u_int64_t tid)
+{
+    struct ubsec_softc *sc = device_get_softc(dev);
+    int session;
+    u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (sc == NULL)
+        return (EINVAL);
+
+    session = UBSEC_SESSION(sid);
+    if (session < sc->sc_nsessions) {
+        bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
+        return (0);
+    } else
+        return (EINVAL);
+}
+
+static int
+ubsec_process(device_t dev, struct cryptop *crp, int hint)
+{
+    struct ubsec_q *q = NULL;
+    int err = 0, i, j, nicealign;
+    struct ubsec_softc *sc = device_get_softc(dev);
+    struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+    int encoffset = 0, macoffset = 0, cpskip, cpoffset;
+    int sskip, dskip, stheend, dtheend, ivsize = 8;
+    int16_t coffset;
+    struct ubsec_session *ses;
+    struct ubsec_generic_ctx ctx;
+    struct ubsec_dma *dmap = NULL;
+    unsigned long flags;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    if (unlikely(crp == NULL || crp->crp_callback == NULL)) {
+        ubsecstats.hst_invalid++;
+        return (EINVAL);
+    }
+
+    if (unlikely(sc == NULL))
+        return (EINVAL);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_lock_irqsave\n");
+#endif
+    spin_lock_irqsave(&sc->sc_ringmtx, flags);
+    //spin_lock_irq(&sc->sc_ringmtx);
+
+    if (BSD_SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
+        ubsecstats.hst_queuefull++;
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("spin_unlock_irqrestore\n");
+#endif
+        spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+        //spin_unlock_irq(&sc->sc_ringmtx);
+        err = ENOMEM;
+        goto errout2;
+    }
+
+    q = BSD_SIMPLEQ_FIRST(&sc->sc_freequeue);
+    BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_unlock_irqrestore\n");
+#endif
+    spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+    //spin_unlock_irq(&sc->sc_ringmtx);
+
+    dmap = q->q_dma; /* Save dma pointer */
+    bzero(q, sizeof(struct ubsec_q));
+    bzero(&ctx, sizeof(ctx));
+
+    q->q_sesn = UBSEC_SESSION(crp->crp_sid);
+    q->q_dma = dmap;
+    ses = &sc->sc_sessions[q->q_sesn];
+
+    if (crp->crp_flags & CRYPTO_F_SKBUF) {
+        q->q_src_m = (struct sk_buff *)crp->crp_buf;
+        q->q_dst_m = (struct sk_buff *)crp->crp_buf;
+    } else if (crp->crp_flags & CRYPTO_F_IOV) {
+        q->q_src_io = (struct uio *)crp->crp_buf;
+        q->q_dst_io = (struct uio *)crp->crp_buf;
+    } else {
+        err = EINVAL;
+        goto errout;    /* XXX we don't handle contiguous blocks! */
+    }
+
+    bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
+
+    dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
+    dmap->d_dma->d_mcr.mcr_flags = 0;
+    q->q_crp = crp;
+
+    crd1 = crp->crp_desc;
+    if (crd1 == NULL) {
+        err = EINVAL;
+        goto errout;
+    }
+    crd2 = crd1->crd_next;
+
+    if (crd2 == NULL) {
+        if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
+            crd1->crd_alg == CRYPTO_SHA1_HMAC) {
+            maccrd = crd1;
+            enccrd = NULL;
+        } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
+            crd1->crd_alg == CRYPTO_3DES_CBC ||
+            crd1->crd_alg == CRYPTO_AES_CBC) {
+            maccrd = NULL;
+            enccrd = crd1;
+        } else {
+            err = EINVAL;
+            goto errout;
+        }
+    } else {
+        if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
+            crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
+            (crd2->crd_alg == CRYPTO_DES_CBC ||
+            crd2->crd_alg == CRYPTO_3DES_CBC ||
+            crd2->crd_alg == CRYPTO_AES_CBC) &&
+            ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
+            maccrd = crd1;
+            enccrd = crd2;
+        } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
+            crd1->crd_alg == CRYPTO_3DES_CBC ||
+            crd1->crd_alg == CRYPTO_AES_CBC) &&
+            (crd2->crd_alg == CRYPTO_MD5_HMAC ||
+            crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
+            (crd1->crd_flags & CRD_F_ENCRYPT)) {
+            enccrd = crd1;
+            maccrd = crd2;
+        } else {
+            /*
+             * We cannot order the ubsec as requested
+             */
+            printk(KERN_ERR DRV_MODULE_NAME ": got wrong algorithm/signature order.\n");
+            err = EINVAL;
+            goto errout;
+        }
+    }
+
+    /* Encryption/Decryption requested */
+    if (enccrd) {
+        encoffset = enccrd->crd_skip;
+
+        if (enccrd->crd_alg == CRYPTO_DES_CBC ||
+            enccrd->crd_alg == CRYPTO_3DES_CBC)
+        {
+            ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
+            ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_DES);
+            ivsize = 8;     /* [3]DES uses 64bit IVs */
+        } else {
+            ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_AES);
+            ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
+            ivsize = 16;    /* AES uses 128bit IVs / [3]DES 64bit IVs */
+
+            switch(ses->ses_keysize)
+            {
+                case 128:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES128);
+                    break;
+                case 192:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES192);
+                    break;
+                case 256:
+                    ctx.pc_flags |= htole16(UBS_PKTCTX_AES256);
+                    break;
+                default:
+                    DPRINTF("invalid AES key size: %d\n", ses->ses_keysize);
+                    err = EINVAL;
+                    goto errout;
+            }
+        }
+
+        if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+            /* Direction: Outbound */
+
+            q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
+
+            if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+                bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+            } else {
+                for(i=0; i < (ivsize / 4); i++)
+                    ctx.pc_iv[i] = ses->ses_iv[i];
+            }
+
+            /* If there is no IV in the buffer -> copy it here */
+            if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+                if (crp->crp_flags & CRYPTO_F_SKBUF)
+                    /*
+                    m_copyback(q->q_src_m,
+                        enccrd->crd_inject,
+                        8, ctx.pc_iv);
+                    */
+                    crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_m,
+                        enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+                else if (crp->crp_flags & CRYPTO_F_IOV)
+                    /*
+                    cuio_copyback(q->q_src_io,
+                        enccrd->crd_inject,
+                        8, ctx.pc_iv);
+                    */
+                    crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_io,
+                        enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
+            }
+        } else {
+            /* Direction: Inbound */
+
+            ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
+
+            if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+                bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
+            else if (crp->crp_flags & CRYPTO_F_SKBUF)
+                /*
+                m_copydata(q->q_src_m, enccrd->crd_inject,
+                    8, (caddr_t)ctx.pc_iv);
+                */
+                crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_m,
+                    enccrd->crd_inject, ivsize,
+                    (caddr_t)ctx.pc_iv);
+            else if (crp->crp_flags & CRYPTO_F_IOV)
+                /*
+                cuio_copydata(q->q_src_io,
+                    enccrd->crd_inject, 8,
+                    (caddr_t)ctx.pc_iv);
+                */
+                crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_io,
+                    enccrd->crd_inject, ivsize,
+                    (caddr_t)ctx.pc_iv);
+
+        }
+
+        /* Even though key & IV sizes differ from cipher to cipher
+         * copy / swap the full array lengths. Let the compiler unroll
+         * the loop to increase the cpu pipeline performance... */
+        for(i=0; i < 8; i++)
+            ctx.pc_key[i] = ses->ses_key[i];
+        for(i=0; i < 4; i++)
+            SWAP32(ctx.pc_iv[i]);
+    }
+
+    /* Authentication requested */
+    if (maccrd) {
+        macoffset = maccrd->crd_skip;
+
+        if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
+            ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
+        else
+            ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
+
+        for (i = 0; i < 5; i++) {
+            ctx.pc_hminner[i] = ses->ses_hminner[i];
+            ctx.pc_hmouter[i] = ses->ses_hmouter[i];
+
+            HTOLE32(ctx.pc_hminner[i]);
+            HTOLE32(ctx.pc_hmouter[i]);
+        }
+    }
+
+    if (enccrd && maccrd) {
+        /*
+         * ubsec cannot handle packets where the end of encryption
+         * and authentication are not the same, or where the
+         * encrypted part begins before the authenticated part.
+         */
+        if (((encoffset + enccrd->crd_len) !=
+            (macoffset + maccrd->crd_len)) ||
+            (enccrd->crd_skip < maccrd->crd_skip)) {
+            err = EINVAL;
+            goto errout;
+        }
+        sskip = maccrd->crd_skip;
+        cpskip = dskip = enccrd->crd_skip;
+        stheend = maccrd->crd_len;
+        dtheend = enccrd->crd_len;
+        coffset = enccrd->crd_skip - maccrd->crd_skip;
+        cpoffset = cpskip + dtheend;
+#ifdef UBSEC_DEBUG
+        DPRINTF("mac: skip %d, len %d, inject %d\n",
+            maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
+        DPRINTF("enc: skip %d, len %d, inject %d\n",
+            enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
+        DPRINTF("src: skip %d, len %d\n", sskip, stheend);
+        DPRINTF("dst: skip %d, len %d\n", dskip, dtheend);
+        DPRINTF("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
+            coffset, stheend, cpskip, cpoffset);
+#endif
+    } else {
+        cpskip = dskip = sskip = macoffset + encoffset;
+        dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
+        cpoffset = cpskip + dtheend;
+        coffset = 0;
+    }
+    ctx.pc_offset = htole16(coffset >> 2);
+
+#if 0
+    if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
+        0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
+        err = ENOMEM;
+        goto errout;
+    }
+#endif
+
+    if (crp->crp_flags & CRYPTO_F_SKBUF) {
+#if 0
+        if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
+            q->q_src_m, BUS_DMA_NOWAIT) != 0) {
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+            q->q_src_map = NULL;
+            err = ENOMEM;
+            goto errout;
+        }
+#endif
+        err = dma_map_skb(sc, q->q_src_map, q->q_src_m, &q->q_src_len);
+        if (unlikely(err != 0))
+            goto errout;
+
+    } else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+        if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
+            q->q_src_io, BUS_DMA_NOWAIT) != 0) {
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+            q->q_src_map = NULL;
+            err = ENOMEM;
+            goto errout;
+        }
+#endif
+        err = dma_map_uio(sc, q->q_src_map, q->q_src_io, &q->q_src_len);
+        if (unlikely(err != 0))
+           goto errout;
+    }
+
+    /*
+     * Check alignment
+     */
+    nicealign = ubsec_dmamap_aligned(sc, q->q_src_map, q->q_src_len);
+
+    dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("src skip: %d\n", sskip);
+#endif
+    for (i = j = 0; i < q->q_src_len; i++) {
+        struct ubsec_pktbuf *pb;
+        size_t packl = q->q_src_map[i].dma_size;
+        dma_addr_t packp = q->q_src_map[i].dma_paddr;
+
+        if (sskip >= packl) {
+            sskip -= packl;
+            continue;
+        }
+
+        packl -= sskip;
+        packp += sskip;
+        sskip = 0;
+
+        /* maximum fragment size is 0xfffc */
+        if (packl > 0xfffc) {
+            DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+            err = EIO;
+            goto errout;
+        }
+
+        if (j == 0)
+            pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
+        else
+            pb = &dmap->d_dma->d_sbuf[j - 1];
+
+        pb->pb_addr = htole32(packp);
+
+        if (stheend) {
+            if (packl > stheend) {
+                pb->pb_len = htole32(stheend);
+                stheend = 0;
+            } else {
+                pb->pb_len = htole32(packl);
+                stheend -= packl;
+            }
+        } else
+            pb->pb_len = htole32(packl);
+
+        if ((i + 1) == q->q_src_len)
+            pb->pb_next = 0;
+        else
+            pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                offsetof(struct ubsec_dmachunk, d_sbuf[j]));
+        j++;
+    }
+
+    if (enccrd == NULL && maccrd != NULL) {
+        /* Authentication only */
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
+        dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
+            htole32(dmap->d_alloc.dma_paddr +
+            offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+#ifdef UBSEC_DEBUG
+        DPRINTF("opkt: %x %x %x\n",
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
+            dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
+#endif
+    } else {
+        if (crp->crp_flags & CRYPTO_F_IOV) {
+            if (!nicealign) {
+                err = EINVAL;
+                goto errout;
+            }
+#if 0
+            if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+                UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+                &q->q_dst_map) != 0) {
+                err = ENOMEM;
+                goto errout;
+            }
+            if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
+                q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
+                bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+                q->q_dst_map = NULL;
+                goto errout;
+            }
+#endif
+
+            /* HW shall copy the result into the source memory */
+            for(i = 0; i < q->q_src_len; i++)
+                q->q_dst_map[i] = q->q_src_map[i];
+
+            q->q_dst_len = q->q_src_len;
+            q->q_has_dst = 0;
+
+        } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
+            if (nicealign) {
+
+                /* HW shall copy the result into the source memory */
+                q->q_dst_m = q->q_src_m;
+                for(i = 0; i < q->q_src_len; i++)
+                    q->q_dst_map[i] = q->q_src_map[i];
+
+                q->q_dst_len = q->q_src_len;
+                q->q_has_dst = 0;
+
+            } else {
+#ifdef NOTYET
+                int totlen, len;
+                struct sk_buff *m, *top, **mp;
+
+                totlen = q->q_src_map->dm_mapsize;
+                if (q->q_src_m->m_flags & M_PKTHDR) {
+                    len = MHLEN;
+                    MGETHDR(m, M_DONTWAIT, MT_DATA);
+                } else {
+                    len = MLEN;
+                    MGET(m, M_DONTWAIT, MT_DATA);
+                }
+                if (m == NULL) {
+                    err = ENOMEM;
+                    goto errout;
+                }
+                if (len == MHLEN)
+                    M_DUP_PKTHDR(m, q->q_src_m);
+                if (totlen >= MINCLSIZE) {
+                    MCLGET(m, M_DONTWAIT);
+                    if (m->m_flags & M_EXT)
+                        len = MCLBYTES;
+                }
+                m->m_len = len;
+                top = NULL;
+                mp = &top;
+
+                while (totlen > 0) {
+                    if (top) {
+                        MGET(m, M_DONTWAIT, MT_DATA);
+                        if (m == NULL) {
+                            m_freem(top);
+                            err = ENOMEM;
+                            goto errout;
+                        }
+                        len = MLEN;
+                    }
+                    if (top && totlen >= MINCLSIZE) {
+                        MCLGET(m, M_DONTWAIT);
+                        if (m->m_flags & M_EXT)
+                            len = MCLBYTES;
+                    }
+                    m->m_len = len = min(totlen, len);
+                    totlen -= len;
+                    *mp = m;
+                    mp = &m->m_next;
+                }
+                q->q_dst_m = top;
+                ubsec_mcopy(q->q_src_m, q->q_dst_m,
+                    cpskip, cpoffset);
+                if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
+                    UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
+                    &q->q_dst_map) != 0) {
+                    err = ENOMEM;
+                    goto errout;
+                }
+                if (bus_dmamap_load_mbuf(sc->sc_dmat,
+                    q->q_dst_map, q->q_dst_m,
+                    BUS_DMA_NOWAIT) != 0) {
+                    bus_dmamap_destroy(sc->sc_dmat,
+                    q->q_dst_map);
+                    q->q_dst_map = NULL;
+                    err = ENOMEM;
+                    goto errout;
+                }
+#else
+                device_printf(sc->sc_dev,
+                    "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
+                    __FILE__, __LINE__);
+                err = EINVAL;
+                goto errout;
+#endif
+            }
+        } else {
+            err = EINVAL;
+            goto errout;
+        }
+
+#ifdef UBSEC_DEBUG
+        DPRINTF("dst skip: %d\n", dskip);
+#endif
+        for (i = j = 0; i < q->q_dst_len; i++) {
+            struct ubsec_pktbuf *pb;
+            size_t packl = q->q_dst_map[i].dma_size;
+            dma_addr_t packp = q->q_dst_map[i].dma_paddr;
+
+            if (dskip >= packl) {
+                dskip -= packl;
+                continue;
+            }
+
+            packl -= dskip;
+            packp += dskip;
+            dskip = 0;
+
+            if (packl > 0xfffc) {
+                DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
+                err = EIO;
+                goto errout;
+            }
+
+            if (j == 0)
+                pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
+            else
+                pb = &dmap->d_dma->d_dbuf[j - 1];
+
+            pb->pb_addr = htole32(packp);
+
+            if (dtheend) {
+                if (packl > dtheend) {
+                    pb->pb_len = htole32(dtheend);
+                    dtheend = 0;
+                } else {
+                    pb->pb_len = htole32(packl);
+                    dtheend -= packl;
+                }
+            } else
+                pb->pb_len = htole32(packl);
+
+            if ((i + 1) == q->q_dst_len) {
+                if (maccrd)
+                    /* Authentication:
+                     * The last fragment of the output buffer
+                     * contains the HMAC. */
+                    pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                        offsetof(struct ubsec_dmachunk, d_macbuf[0]));
+                else
+                    pb->pb_next = 0;
+            } else
+                pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
+                    offsetof(struct ubsec_dmachunk, d_dbuf[j]));
+            j++;
+        }
+    }
+
+    dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
+        offsetof(struct ubsec_dmachunk, d_ctx));
+
+    if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
+        /* new Broadcom cards with dynamic long command context structure */
+
+        if (enccrd != NULL &&
+            enccrd->crd_alg == CRYPTO_AES_CBC)
+        {
+            struct ubsec_pktctx_aes128 *ctxaes128;
+            struct ubsec_pktctx_aes192 *ctxaes192;
+            struct ubsec_pktctx_aes256 *ctxaes256;
+
+            switch(ses->ses_keysize)
+            {
+                /* AES 128bit */
+                case 128:
+                ctxaes128 = (struct ubsec_pktctx_aes128 *)
+                    (dmap->d_alloc.dma_vaddr +
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes128->pc_len = htole16(sizeof(struct ubsec_pktctx_aes128));
+                ctxaes128->pc_type = ctx.pc_type;
+                ctxaes128->pc_flags = ctx.pc_flags;
+                ctxaes128->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 4; i++)
+                    ctxaes128->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes128->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes128->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes128->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+                /* AES 192bit */
+                case 192:
+                ctxaes192 = (struct ubsec_pktctx_aes192 *)
+                    (dmap->d_alloc.dma_vaddr +
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes192->pc_len = htole16(sizeof(struct ubsec_pktctx_aes192));
+                ctxaes192->pc_type = ctx.pc_type;
+                ctxaes192->pc_flags = ctx.pc_flags;
+                ctxaes192->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 6; i++)
+                    ctxaes192->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes192->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes192->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes192->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+                /* AES 256bit */
+                case 256:
+                ctxaes256 = (struct ubsec_pktctx_aes256 *)
+                    (dmap->d_alloc.dma_vaddr +
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+                ctxaes256->pc_len = htole16(sizeof(struct ubsec_pktctx_aes256));
+                ctxaes256->pc_type = ctx.pc_type;
+                ctxaes256->pc_flags = ctx.pc_flags;
+                ctxaes256->pc_offset = ctx.pc_offset;
+                for (i = 0; i < 8; i++)
+                    ctxaes256->pc_aeskey[i] = ctx.pc_key[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes256->pc_hminner[i] = ctx.pc_hminner[i];
+                for (i = 0; i < 5; i++)
+                    ctxaes256->pc_hmouter[i] = ctx.pc_hmouter[i];
+                for (i = 0; i < 4; i++)
+                    ctxaes256->pc_iv[i] = ctx.pc_iv[i];
+                break;
+
+            }
+        } else {
+            /*
+             * [3]DES / MD5_HMAC / SHA1_HMAC
+             *
+             * MD5_HMAC / SHA1_HMAC can use the IPSEC 3DES operation without
+             * encryption.
+             */
+            struct ubsec_pktctx_des *ctxdes;
+
+            ctxdes = (struct ubsec_pktctx_des *)(dmap->d_alloc.dma_vaddr +
+                offsetof(struct ubsec_dmachunk, d_ctx));
+
+            ctxdes->pc_len = htole16(sizeof(struct ubsec_pktctx_des));
+            ctxdes->pc_type = ctx.pc_type;
+            ctxdes->pc_flags = ctx.pc_flags;
+            ctxdes->pc_offset = ctx.pc_offset;
+            for (i = 0; i < 6; i++)
+                ctxdes->pc_deskey[i] = ctx.pc_key[i];
+            for (i = 0; i < 5; i++)
+                ctxdes->pc_hminner[i] = ctx.pc_hminner[i];
+            for (i = 0; i < 5; i++)
+                ctxdes->pc_hmouter[i] = ctx.pc_hmouter[i];
+            ctxdes->pc_iv[0] = ctx.pc_iv[0];
+            ctxdes->pc_iv[1] = ctx.pc_iv[1];
+        }
+    } else
+    {
+        /* old Broadcom card with fixed small command context structure */
+
+        /*
+         * [3]DES / MD5_HMAC / SHA1_HMAC
+         */
+        struct ubsec_pktctx *ctxs;
+
+        ctxs = (struct ubsec_pktctx *)(dmap->d_alloc.dma_vaddr +
+                    offsetof(struct ubsec_dmachunk, d_ctx));
+
+        /* transform generic context into small context */
+        for (i = 0; i < 6; i++)
+            ctxs->pc_deskey[i] = ctx.pc_key[i];
+        for (i = 0; i < 5; i++)
+            ctxs->pc_hminner[i] = ctx.pc_hminner[i];
+        for (i = 0; i < 5; i++)
+            ctxs->pc_hmouter[i] = ctx.pc_hmouter[i];
+        ctxs->pc_iv[0] = ctx.pc_iv[0];
+        ctxs->pc_iv[1] = ctx.pc_iv[1];
+        ctxs->pc_flags = ctx.pc_flags;
+        ctxs->pc_offset = ctx.pc_offset;
+    }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_lock_irqsave\n");
+#endif
+    spin_lock_irqsave(&sc->sc_ringmtx, flags);
+    //spin_lock_irq(&sc->sc_ringmtx);
+
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
+    sc->sc_nqueue++;
+    ubsecstats.hst_ipackets++;
+    ubsecstats.hst_ibytes += stheend;
+    ubsec_feed(sc);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+    DPRINTF("spin_unlock_irqrestore\n");
+#endif
+    spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+    //spin_unlock_irq(&sc->sc_ringmtx);
+
+    return (0);
+
+errout:
+    if (q != NULL) {
+#ifdef NOTYET
+        if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+            m_freem(q->q_dst_m);
+#endif
+
+        if ((q->q_has_dst == 1) && q->q_dst_len > 0) {
+#if 0
+            bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+            bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+#endif
+            dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+        }
+        if (q->q_src_len > 0) {
+#if 0
+            bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+            bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+            dma_unmap(sc, q->q_src_map, q->q_src_len);
+        }
+
+#ifdef UBSEC_VERBOSE_DEBUG
+        DPRINTF("spin_lock_irqsave\n");
+#endif
+        spin_lock_irqsave(&sc->sc_ringmtx, flags);
+        //spin_lock_irq(&sc->sc_ringmtx);
+
+        BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+
+#ifdef UBSEC_VERBOSE_DEBUG
+       DPRINTF("spin_unlock_irqrestore\n");
+#endif
+        spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
+        //spin_unlock_irq(&sc->sc_ringmtx);
+
+    }
+    if (err == EINVAL)
+        ubsecstats.hst_invalid++;
+    else
+        ubsecstats.hst_nomem++;
+errout2:
+    crp->crp_etype = err;
+    crypto_done(crp);
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s() err = %x\n", __FUNCTION__, err);
+#endif
+
+    return (0);
+}
+
+void
+ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+    struct cryptop *crp = (struct cryptop *)q->q_crp;
+    struct cryptodesc *crd;
+    struct ubsec_dma *dmap = q->q_dma;
+    int ivsize = 8;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    ubsecstats.hst_opackets++;
+    ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
+
+#if 0
+    bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
+        dmap->d_alloc.dma_map->dm_mapsize,
+        BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+    if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
+        bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
+            0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
+        bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
+        bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+    }
+    bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
+        0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
+    bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
+    bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+#endif
+
+    if ((q->q_has_dst == 1) && q->q_dst_len > 0)
+        dma_unmap(sc, q->q_dst_map, q->q_dst_len);
+
+    dma_unmap(sc, q->q_src_map, q->q_src_len);
+
+#ifdef NOTYET
+    if ((crp->crp_flags & CRYPTO_F_SKBUF) && (q->q_src_m != q->q_dst_m)) {
+        m_freem(q->q_src_m);
+        crp->crp_buf = (caddr_t)q->q_dst_m;
+    }
+#endif
+
+    /* copy out IV for future use */
+    if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+            if (crd->crd_alg != CRYPTO_DES_CBC &&
+                crd->crd_alg != CRYPTO_3DES_CBC &&
+                crd->crd_alg != CRYPTO_AES_CBC)
+                continue;
+
+            if (crd->crd_alg == CRYPTO_AES_CBC)
+                ivsize = 16;
+            else
+                ivsize = 8;
+
+            if (crp->crp_flags & CRYPTO_F_SKBUF)
+#if 0
+                m_copydata((struct sk_buff *)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - 8, 8,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+                crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - ivsize, ivsize,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+
+            else if (crp->crp_flags & CRYPTO_F_IOV) {
+#if 0
+                cuio_copydata((struct uio *)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - 8, 8,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+#endif
+                crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
+                    crd->crd_skip + crd->crd_len - ivsize, ivsize,
+                    (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
+
+            }
+            break;
+        }
+    }
+
+    for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+        if (crd->crd_alg != CRYPTO_MD5_HMAC &&
+            crd->crd_alg != CRYPTO_SHA1_HMAC)
+            continue;
+#if 0
+        if (crp->crp_flags & CRYPTO_F_SKBUF)
+            m_copyback((struct sk_buff *)crp->crp_buf,
+                crd->crd_inject, 12,
+                dmap->d_dma->d_macbuf);
+#endif
+#if 0
+            /* BUG? it does not honor the mac len.. */
+            crypto_copyback(crp->crp_flags, crp->crp_buf,
+                crd->crd_inject, 12,
+                (caddr_t)dmap->d_dma->d_macbuf);
+#endif
+            crypto_copyback(crp->crp_flags, crp->crp_buf,
+                crd->crd_inject,
+                sc->sc_sessions[q->q_sesn].ses_mlen,
+                (caddr_t)dmap->d_dma->d_macbuf);
+#if 0
+        else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
+            bcopy((caddr_t)dmap->d_dma->d_macbuf,
+                crp->crp_mac, 12);
+#endif
+        break;
+    }
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+    crypto_done(crp);
+}
+
+void
+ubsec_mcopy(struct sk_buff *srcm, struct sk_buff *dstm, int hoffset, int toffset)
+{
+    int i, j, dlen, slen;
+    caddr_t dptr, sptr;
+
+    j = 0;
+    sptr = srcm->data;
+    slen = srcm->len;
+    dptr = dstm->data;
+    dlen = dstm->len;
+
+    while (1) {
+        for (i = 0; i < min(slen, dlen); i++) {
+            if (j < hoffset || j >= toffset)
+                *dptr++ = *sptr++;
+            slen--;
+            dlen--;
+            j++;
+        }
+        if (slen == 0) {
+            srcm = srcm->next;
+            if (srcm == NULL)
+                return;
+            sptr = srcm->data;
+            slen = srcm->len;
+        }
+        if (dlen == 0) {
+            dstm = dstm->next;
+            if (dstm == NULL)
+                return;
+            dptr = dstm->data;
+            dlen = dstm->len;
+        }
+    }
+}
+
+int
+ubsec_dma_malloc(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma,
+    size_t size, int mapflags)
+{
+    dma->dma_vaddr = dma_alloc_coherent(sc->sc_dv,
+        size, &dma->dma_paddr, GFP_KERNEL);
+
+    if (likely(dma->dma_vaddr))
+    {
+        dma->dma_size = size;
+        return (0);
+    }
+
+    DPRINTF("could not allocate %d bytes of coherent memory.\n", size);
+
+    return (1);
+}
+
+void
+ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
+{
+    dma_free_coherent(sc->sc_dv, dma->dma_size, dma->dma_vaddr,
+        dma->dma_paddr);
+}
+
+/*
+ * Resets the board.  Values in the regesters are left as is
+ * from the reset (i.e. initial values are assigned elsewhere).
+ */
+void
+ubsec_reset_board(struct ubsec_softc *sc)
+{
+    volatile u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Send reset signal to chip.\n");
+
+    ctrl = READ_REG(sc, BS_CTRL);
+    ctrl |= BS_CTRL_RESET;
+    WRITE_REG(sc, BS_CTRL, ctrl);
+
+    /*
+     * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
+     */
+    DELAY(10);
+}
+
+/*
+ * Init Broadcom registers
+ */
+void
+ubsec_init_board(struct ubsec_softc *sc)
+{
+    u_int32_t ctrl;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Initialize chip.\n");
+
+    ctrl = READ_REG(sc, BS_CTRL);
+    ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
+    ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT | BS_CTRL_DMAERR;
+
+    WRITE_REG(sc, BS_CTRL, ctrl);
+
+    /* Set chip capabilities (BCM5365P) */
+    sc->sc_flags |= UBS_FLAGS_LONGCTX | UBS_FLAGS_AES;
+}
+
+/*
+ * Clean up after a chip crash.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_cleanchip(struct ubsec_softc *sc)
+{
+    struct ubsec_q *q;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("Clean up queues after chip crash.\n");
+
+    while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
+        q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
+        BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
+        ubsec_free_q(sc, q);
+    }
+}
+
+/*
+ * free a ubsec_q
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+int
+ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
+{
+    struct ubsec_q *q2;
+    struct cryptop *crp;
+    int npkts;
+    int i;
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+
+    npkts = q->q_nstacked_mcrs;
+
+    for (i = 0; i < npkts; i++) {
+        if(q->q_stacked_mcr[i]) {
+            q2 = q->q_stacked_mcr[i];
+
+            if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
+#ifdef NOTYET
+                m_freem(q2->q_dst_m);
+#else
+                printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+            crp = (struct cryptop *)q2->q_crp;
+
+            BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
+
+            crp->crp_etype = EFAULT;
+            crypto_done(crp);
+        } else {
+            break;
+        }
+    }
+
+    /*
+     * Free header MCR
+     */
+    if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+#ifdef NOTYET
+        m_freem(q->q_dst_m);
+#else
+        printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
+#endif
+
+    crp = (struct cryptop *)q->q_crp;
+
+    BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
+
+    crp->crp_etype = EFAULT;
+    crypto_done(crp);
+    return(0);
+}
+
+/*
+ * Routine to reset the chip and clean up.
+ * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
+ */
+void
+ubsec_totalreset(struct ubsec_softc *sc)
+{
+
+#ifdef UBSEC_DEBUG
+    DPRINTF("%s()\n", __FUNCTION__);
+#endif
+    DPRINTF("initiate total chip reset.. \n");
+    ubsec_reset_board(sc);
+    ubsec_init_board(sc);
+    ubsec_cleanchip(sc);
+}
+
+void
+ubsec_dump_pb(struct ubsec_pktbuf *pb)
+{
+    printf("addr 0x%x (0x%x) next 0x%x\n",
+        pb->pb_addr, pb->pb_len, pb->pb_next);
+}
+
+void
+ubsec_dump_mcr(struct ubsec_mcr *mcr)
+{
+    struct ubsec_mcr_add *ma;
+    int i;
+
+    printf("MCR:\n");
+    printf(" pkts: %u, flags 0x%x\n",
+        letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
+    ma = (struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
+    for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
+        printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
+            letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
+            letoh16(ma->mcr_reserved));
+        printf(" %d: ipkt ", i);
+        ubsec_dump_pb(&ma->mcr_ipktbuf);
+        printf(" %d: opkt ", i);
+        ubsec_dump_pb(&ma->mcr_opktbuf);
+        ma++;
+    }
+    printf("END MCR\n");
+}
+
+static int __init mod_init(void) {
+        return ssb_driver_register(&ubsec_ssb_driver);
+}
+
+static void __exit mod_exit(void) {
+        ssb_driver_unregister(&ubsec_ssb_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+// Meta information
+MODULE_AUTHOR("Daniel Mueller <daniel@danm.de>");
+MODULE_LICENSE("BSD");
+MODULE_DESCRIPTION("OCF driver for BCM5365P IPSec Core");
+MODULE_VERSION(DRV_MODULE_VERSION);
diff --git a/crypto/ocf/ubsec_ssb/ubsecreg.h b/crypto/ocf/ubsec_ssb/ubsecreg.h
new file mode 100644
index 000000000000..dafac5b4138c
--- /dev/null
+++ b/crypto/ocf/ubsec_ssb/ubsecreg.h
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/*
+ * Register definitions for 5601 BlueSteel Networks Ubiquitous Broadband
+ * Security "uBSec" chip.  Definitions from revision 2.8 of the product
+ * datasheet.
+ */
+
+#define BS_BAR          0x10    /* DMA base address register */
+#define BS_TRDY_TIMEOUT     0x40    /* TRDY timeout */
+#define BS_RETRY_TIMEOUT    0x41    /* DMA retry timeout */
+
+#define UBS_PCI_RTY_SHIFT           8
+#define UBS_PCI_RTY_MASK            0xff
+#define UBS_PCI_RTY(misc) \
+    (((misc) >> UBS_PCI_RTY_SHIFT) & UBS_PCI_RTY_MASK)
+
+#define UBS_PCI_TOUT_SHIFT          0
+#define UBS_PCI_TOUT_MASK           0xff
+#define UBS_PCI_TOUT(misc) \
+    (((misc) >> PCI_TOUT_SHIFT) & PCI_TOUT_MASK)
+
+/*
+ * DMA Control & Status Registers (offset from BS_BAR)
+ */
+#define BS_MCR1     0x20    /* DMA Master Command Record 1 */
+#define BS_CTRL     0x24    /* DMA Control */
+#define BS_STAT     0x28    /* DMA Status */
+#define BS_ERR      0x2c    /* DMA Error Address */
+#define BS_DEV_ID   0x34    /* IPSec Device ID */
+
+/* BS_CTRL - DMA Control */
+#define BS_CTRL_RESET       0x80000000  /* hardware reset, 5805/5820 */
+#define BS_CTRL_MCR2INT     0x40000000  /* enable intr MCR for MCR2 */
+#define BS_CTRL_MCR1INT     0x20000000  /* enable intr MCR for MCR1 */
+#define BS_CTRL_OFM     0x10000000  /* Output fragment mode */
+#define BS_CTRL_BE32        0x08000000  /* big-endian, 32bit bytes */
+#define BS_CTRL_BE64        0x04000000  /* big-endian, 64bit bytes */
+#define BS_CTRL_DMAERR      0x02000000  /* enable intr DMA error */
+#define BS_CTRL_RNG_M       0x01800000  /* RNG mode */
+#define BS_CTRL_RNG_1       0x00000000  /* 1bit rn/one slow clock */
+#define BS_CTRL_RNG_4       0x00800000  /* 1bit rn/four slow clocks */
+#define BS_CTRL_RNG_8       0x01000000  /* 1bit rn/eight slow clocks */
+#define BS_CTRL_RNG_16      0x01800000  /* 1bit rn/16 slow clocks */
+#define BS_CTRL_SWNORM      0x00400000  /* 582[01], sw normalization */
+#define BS_CTRL_FRAG_M      0x0000ffff  /* output fragment size mask */
+#define BS_CTRL_LITTLE_ENDIAN   (BS_CTRL_BE32 | BS_CTRL_BE64)
+
+/* BS_STAT - DMA Status */
+#define BS_STAT_MCR1_BUSY   0x80000000  /* MCR1 is busy */
+#define BS_STAT_MCR1_FULL   0x40000000  /* MCR1 is full */
+#define BS_STAT_MCR1_DONE   0x20000000  /* MCR1 is done */
+#define BS_STAT_DMAERR      0x10000000  /* DMA error */
+#define BS_STAT_MCR2_FULL   0x08000000  /* MCR2 is full */
+#define BS_STAT_MCR2_DONE   0x04000000  /* MCR2 is done */
+#define BS_STAT_MCR1_ALLEMPTY   0x02000000  /* 5821, MCR1 is empty */
+#define BS_STAT_MCR2_ALLEMPTY   0x01000000  /* 5821, MCR2 is empty */
+
+/* BS_ERR - DMA Error Address */
+#define BS_ERR_ADDR     0xfffffffc  /* error address mask */
+#define BS_ERR_READ     0x00000002  /* fault was on read */
+
+struct ubsec_pktctx {
+    u_int32_t   pc_deskey[6];       /* 3DES key */
+    u_int32_t   pc_hminner[5];      /* hmac inner state */
+    u_int32_t   pc_hmouter[5];      /* hmac outer state */
+    u_int32_t   pc_iv[2];       /* [3]DES iv */
+    u_int16_t   pc_flags;       /* flags, below */
+    u_int16_t   pc_offset;      /* crypto offset */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_ENC_3DES 0x8000      /* use 3des */
+#define UBS_PKTCTX_ENC_AES  0x8000      /* use aes */
+#define UBS_PKTCTX_ENC_NONE 0x0000      /* no encryption */
+#define UBS_PKTCTX_INBOUND  0x4000      /* inbound packet */
+#define UBS_PKTCTX_AUTH     0x3000      /* authentication mask */
+#define UBS_PKTCTX_AUTH_NONE    0x0000      /* no authentication */
+#define UBS_PKTCTX_AUTH_MD5 0x1000      /* use hmac-md5 */
+#define UBS_PKTCTX_AUTH_SHA1    0x2000      /* use hmac-sha1 */
+#define UBS_PKTCTX_AES128   0x0         /* AES 128bit keys */
+#define UBS_PKTCTX_AES192   0x100       /* AES 192bit keys */
+#define UBS_PKTCTX_AES256   0x200       /* AES 256bit keys */
+
+struct ubsec_pktctx_des {
+    volatile u_int16_t  pc_len;     /* length of ctx struct */
+    volatile u_int16_t  pc_type;    /* context type */
+    volatile u_int16_t  pc_flags;   /* flags, same as above */
+    volatile u_int16_t  pc_offset;  /* crypto/auth offset */
+    volatile u_int32_t  pc_deskey[6];   /* 3DES key */
+    volatile u_int32_t  pc_iv[2];   /* [3]DES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes128 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[4];   /* AES 128bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes192 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[6];   /* AES 192bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+struct ubsec_pktctx_aes256 {
+    volatile u_int16_t  pc_len;         /* length of ctx struct */
+    volatile u_int16_t  pc_type;        /* context type */
+    volatile u_int16_t  pc_flags;       /* flags, same as above */
+    volatile u_int16_t  pc_offset;      /* crypto/auth offset */
+    volatile u_int32_t  pc_aeskey[8];   /* AES 256bit key */
+    volatile u_int32_t  pc_iv[4];       /* AES iv */
+    volatile u_int32_t  pc_hminner[5];  /* hmac inner state */
+    volatile u_int32_t  pc_hmouter[5];  /* hmac outer state */
+} __attribute__ ((packed));
+
+#define UBS_PKTCTX_TYPE_IPSEC_DES   0x0000
+#define UBS_PKTCTX_TYPE_IPSEC_AES   0x0040
+
+struct ubsec_pktbuf {
+    volatile u_int32_t  pb_addr;    /* address of buffer start */
+    volatile u_int32_t  pb_next;    /* pointer to next pktbuf */
+    volatile u_int32_t  pb_len;     /* packet length */
+} __attribute__ ((packed));
+#define UBS_PKTBUF_LEN      0x0000ffff  /* length mask */
+
+struct ubsec_mcr {
+    volatile u_int16_t  mcr_pkts;   /* #pkts in this mcr */
+    volatile u_int16_t  mcr_flags;  /* mcr flags (below) */
+    volatile u_int32_t  mcr_cmdctxp;    /* command ctx pointer */
+    struct ubsec_pktbuf mcr_ipktbuf;    /* input chain header */
+    volatile u_int16_t  mcr_reserved;
+    volatile u_int16_t  mcr_pktlen;
+    struct ubsec_pktbuf mcr_opktbuf;    /* output chain header */
+} __attribute__ ((packed));
+
+struct ubsec_mcr_add {
+    volatile u_int32_t  mcr_cmdctxp;    /* command ctx pointer */
+    struct ubsec_pktbuf mcr_ipktbuf;    /* input chain header */
+    volatile u_int16_t  mcr_reserved;
+    volatile u_int16_t  mcr_pktlen;
+    struct ubsec_pktbuf mcr_opktbuf;    /* output chain header */
+} __attribute__ ((packed));
+
+#define UBS_MCR_DONE        0x0001      /* mcr has been processed */
+#define UBS_MCR_ERROR       0x0002      /* error in processing */
+#define UBS_MCR_ERRORCODE   0xff00      /* error type */
+
+struct ubsec_ctx_keyop {
+    volatile u_int16_t  ctx_len;    /* command length */
+    volatile u_int16_t  ctx_op;     /* operation code */
+    volatile u_int8_t   ctx_pad[60];    /* padding */
+} __attribute__ ((packed));
+#define UBS_CTXOP_DHPKGEN   0x01        /* dh public key generation */
+#define UBS_CTXOP_DHSSGEN   0x02        /* dh shared secret gen. */
+#define UBS_CTXOP_RSAPUB    0x03        /* rsa public key op */
+#define UBS_CTXOP_RSAPRIV   0x04        /* rsa private key op */
+#define UBS_CTXOP_DSASIGN   0x05        /* dsa signing op */
+#define UBS_CTXOP_DSAVRFY   0x06        /* dsa verification */
+#define UBS_CTXOP_RNGBYPASS 0x41        /* rng direct test mode */
+#define UBS_CTXOP_RNGSHA1   0x42        /* rng sha1 test mode */
+#define UBS_CTXOP_MODADD    0x43        /* modular addition */
+#define UBS_CTXOP_MODSUB    0x44        /* modular subtraction */
+#define UBS_CTXOP_MODMUL    0x45        /* modular multiplication */
+#define UBS_CTXOP_MODRED    0x46        /* modular reduction */
+#define UBS_CTXOP_MODEXP    0x47        /* modular exponentiation */
+#define UBS_CTXOP_MODINV    0x48        /* modular inverse */
+
+struct ubsec_ctx_rngbypass {
+    volatile u_int16_t  rbp_len;    /* command length, 64 */
+    volatile u_int16_t  rbp_op;     /* rng bypass, 0x41 */
+    volatile u_int8_t   rbp_pad[60];    /* padding */
+} __attribute__ ((packed));
+
+/* modexp: C = (M ^ E) mod N */
+struct ubsec_ctx_modexp {
+    volatile u_int16_t  me_len;     /* command length */
+    volatile u_int16_t  me_op;      /* modexp, 0x47 */
+    volatile u_int16_t  me_E_len;   /* E (bits) */
+    volatile u_int16_t  me_N_len;   /* N (bits) */
+    u_int8_t        me_N[2048/8];   /* N */
+} __attribute__ ((packed));
+
+struct ubsec_ctx_rsapriv {
+    volatile u_int16_t  rpr_len;    /* command length */
+    volatile u_int16_t  rpr_op;     /* rsaprivate, 0x04 */
+    volatile u_int16_t  rpr_q_len;  /* q (bits) */
+    volatile u_int16_t  rpr_p_len;  /* p (bits) */
+    u_int8_t        rpr_buf[5 * 1024 / 8];  /* parameters: */
+                        /* p, q, dp, dq, pinv */
+} __attribute__ ((packed));
diff --git a/crypto/ocf/ubsec_ssb/ubsecvar.h b/crypto/ocf/ubsec_ssb/ubsecvar.h
new file mode 100644
index 000000000000..301a57024672
--- /dev/null
+++ b/crypto/ocf/ubsec_ssb/ubsecvar.h
@@ -0,0 +1,227 @@
+
+/*
+ * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
+ * Copyright (c) 2000 Theo de Raadt
+ * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Effort sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F30602-01-2-0537.
+ *
+ */
+
+/* Maximum queue length */
+#ifndef UBS_MAX_NQUEUE
+#define UBS_MAX_NQUEUE      60
+#endif
+
+#define UBS_MAX_SCATTER     64  /* Maximum scatter/gather depth */
+
+#ifndef UBS_MAX_AGGR
+#define UBS_MAX_AGGR        5   /* Maximum aggregation count */
+#endif
+
+#define UBSEC_CARD(sid)     (((sid) & 0xf0000000) >> 28)
+#define UBSEC_SESSION(sid)  ( (sid) & 0x0fffffff)
+#define UBSEC_SID(crd, sesn)    (((crd) << 28) | ((sesn) & 0x0fffffff))
+
+#define UBS_DEF_RTY     0xff    /* PCI Retry Timeout */
+#define UBS_DEF_TOUT        0xff    /* PCI TRDY Timeout */
+#define UBS_DEF_CACHELINE   0x01    /* Cache Line setting */
+
+#define DEFAULT_HMAC_LEN     12
+
+struct ubsec_dma_alloc {
+    dma_addr_t      dma_paddr;
+    void            *dma_vaddr;
+    /*
+    bus_dmamap_t            dma_map;
+    bus_dma_segment_t       dma_seg;
+    */
+    size_t          dma_size;
+    /*
+    int             dma_nseg;
+    */
+};
+
+struct ubsec_q2 {
+    BSD_SIMPLEQ_ENTRY(ubsec_q2)     q_next;
+    struct ubsec_dma_alloc      q_mcr;
+    struct ubsec_dma_alloc      q_ctx;
+    u_int               q_type;
+};
+
+struct ubsec_q2_rng {
+    struct ubsec_q2         rng_q;
+    struct ubsec_dma_alloc      rng_buf;
+    int             rng_used;
+};
+
+/* C = (M ^ E) mod N */
+#define UBS_MODEXP_PAR_M    0
+#define UBS_MODEXP_PAR_E    1
+#define UBS_MODEXP_PAR_N    2
+struct ubsec_q2_modexp {
+    struct ubsec_q2         me_q;
+    struct cryptkop *       me_krp;
+    struct ubsec_dma_alloc      me_M;
+    struct ubsec_dma_alloc      me_E;
+    struct ubsec_dma_alloc      me_C;
+    struct ubsec_dma_alloc      me_epb;
+    int             me_modbits;
+    int             me_shiftbits;
+    int             me_normbits;
+};
+
+#define UBS_RSAPRIV_PAR_P   0
+#define UBS_RSAPRIV_PAR_Q   1
+#define UBS_RSAPRIV_PAR_DP  2
+#define UBS_RSAPRIV_PAR_DQ  3
+#define UBS_RSAPRIV_PAR_PINV    4
+#define UBS_RSAPRIV_PAR_MSGIN   5
+#define UBS_RSAPRIV_PAR_MSGOUT  6
+struct ubsec_q2_rsapriv {
+    struct ubsec_q2         rpr_q;
+    struct cryptkop *       rpr_krp;
+    struct ubsec_dma_alloc      rpr_msgin;
+    struct ubsec_dma_alloc      rpr_msgout;
+};
+
+#define UBSEC_RNG_BUFSIZ    16      /* measured in 32bit words */
+
+struct ubsec_dmachunk {
+    struct ubsec_mcr    d_mcr;
+    struct ubsec_mcr_add    d_mcradd[UBS_MAX_AGGR-1];
+    struct ubsec_pktbuf d_sbuf[UBS_MAX_SCATTER-1];
+    struct ubsec_pktbuf d_dbuf[UBS_MAX_SCATTER-1];
+    u_int32_t       d_macbuf[5];
+    union {
+        struct ubsec_pktctx_aes256 ctxaes256;
+        struct ubsec_pktctx_aes192 ctxaes192;
+        struct ubsec_pktctx_des ctxdes;
+        struct ubsec_pktctx_aes128 ctxaes128;
+        struct ubsec_pktctx     ctx;
+    } d_ctx;
+};
+
+struct ubsec_dma {
+    BSD_SIMPLEQ_ENTRY(ubsec_dma)    d_next;
+    struct ubsec_dmachunk       *d_dma;
+    struct ubsec_dma_alloc      d_alloc;
+};
+
+#define UBS_FLAGS_KEY       0x01        /* has key accelerator */
+#define UBS_FLAGS_LONGCTX   0x02        /* uses long ipsec ctx */
+#define UBS_FLAGS_BIGKEY    0x04        /* 2048bit keys */
+#define UBS_FLAGS_HWNORM    0x08        /* hardware normalization */
+#define UBS_FLAGS_RNG       0x10        /* hardware rng */
+#define UBS_FLAGS_AES       0x20        /* hardware AES support */
+
+struct ubsec_q {
+    BSD_SIMPLEQ_ENTRY(ubsec_q)      q_next;
+    int             q_nstacked_mcrs;
+    struct ubsec_q          *q_stacked_mcr[UBS_MAX_AGGR-1];
+    struct cryptop          *q_crp;
+    struct ubsec_dma        *q_dma;
+
+    //struct mbuf           *q_src_m, *q_dst_m;
+    struct sk_buff      *q_src_m, *q_dst_m;
+    struct uio          *q_src_io, *q_dst_io;
+
+    /*
+    bus_dmamap_t            q_src_map;
+    bus_dmamap_t            q_dst_map;
+    */
+
+    /* DMA addresses for In-/Out packages */
+    int q_src_len;
+    int q_dst_len;
+    struct ubsec_dma_alloc  q_src_map[UBS_MAX_SCATTER];
+    struct ubsec_dma_alloc  q_dst_map[UBS_MAX_SCATTER];
+    int q_has_dst;
+
+    int             q_sesn;
+    int             q_flags;
+};
+
+struct ubsec_softc {
+    softc_device_decl   sc_dev;
+    struct ssb_device   *sdev;      /* device backpointer */
+
+    struct device       *sc_dv;     /* generic device */
+    void                *sc_ih;     /* interrupt handler cookie */
+    int                 sc_flags;   /* device specific flags */
+    u_int32_t           sc_statmask;    /* interrupt status mask */
+    int32_t             sc_cid;     /* crypto tag */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_queue;   /* packet queue, mcr1 */
+    int                 sc_nqueue;  /* count enqueued, mcr1 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_qchip;   /* on chip, mcr1 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q)  sc_freequeue;   /* list of free queue elements */
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_queue2;  /* packet queue, mcr2 */
+    int                 sc_nqueue2; /* count enqueued, mcr2 */
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_qchip2;  /* on chip, mcr2 */
+    int                 sc_nsessions;   /* # of sessions */
+    struct ubsec_session        *sc_sessions;   /* sessions */
+    int                 sc_rnghz;   /* rng poll time */
+    struct ubsec_q2_rng sc_rng;
+    struct ubsec_dma    sc_dmaa[UBS_MAX_NQUEUE];
+    struct ubsec_q      *sc_queuea[UBS_MAX_NQUEUE];
+    BSD_SIMPLEQ_HEAD(,ubsec_q2) sc_q2free;  /* free list */
+    spinlock_t          sc_ringmtx; /* PE ring lock */
+};
+
+#define UBSEC_QFLAGS_COPYOUTIV      0x1
+
+struct ubsec_session {
+    u_int32_t   ses_used;
+    u_int32_t   ses_key[8];         /* 3DES/AES key */
+    u_int32_t   ses_hminner[5];     /* hmac inner state */
+    u_int32_t   ses_hmouter[5];     /* hmac outer state */
+    u_int32_t   ses_iv[4];          /* [3]DES/AES iv */
+    u_int32_t   ses_keysize;        /* AES key size */
+    u_int32_t   ses_mlen;           /* hmac/hash length */
+};
+
+struct ubsec_stats {
+    u_int64_t hst_ibytes;
+    u_int64_t hst_obytes;
+    u_int32_t hst_ipackets;
+    u_int32_t hst_opackets;
+    u_int32_t hst_invalid;
+    u_int32_t hst_nomem;
+    u_int32_t hst_queuefull;
+    u_int32_t hst_dmaerr;
+    u_int32_t hst_mcrerr;
+    u_int32_t hst_nodmafree;
+};
+
+struct ubsec_generic_ctx {
+    u_int32_t   pc_key[8];      /* [3]DES/AES key */
+    u_int32_t   pc_hminner[5];  /* hmac inner state */
+    u_int32_t   pc_hmouter[5];  /* hmac outer state */
+    u_int32_t   pc_iv[4];       /* [3]DES/AES iv */
+    u_int16_t   pc_flags;       /* flags, below */
+    u_int16_t   pc_offset;      /* crypto offset */
+    u_int16_t   pc_type;        /* Cryptographic operation */
+};
diff --git a/crypto/ocf/uio.h b/crypto/ocf/uio.h
new file mode 100644
index 000000000000..03a62491fa6e
--- /dev/null
+++ b/crypto/ocf/uio.h
@@ -0,0 +1,54 @@
+#ifndef _OCF_UIO_H_
+#define _OCF_UIO_H_
+
+#include <linux/uio.h>
+
+/*
+ * The linux uio.h doesn't have all we need.  To be fully api compatible
+ * with the BSD cryptodev,  we need to keep this around.  Perhaps this can
+ * be moved back into the linux/uio.h
+ *
+ * Linux port done by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2006-2010 David McCullough
+ * Copyright (C) 2004-2005 Intel Corporation.
+ *
+ * LICENSE TERMS
+ *
+ * The free distribution and use of this software in both source and binary
+ * form is allowed (with or without changes) provided that:
+ *
+ *   1. distributions of this source code include the above copyright
+ *      notice, this list of conditions and the following disclaimer;
+ *
+ *   2. distributions in binary form include the above copyright
+ *      notice, this list of conditions and the following disclaimer
+ *      in the documentation and/or other associated materials;
+ *
+ *   3. the copyright holder's name is not used to endorse products
+ *      built using this software without specific written permission.
+ *
+ * ALTERNATIVELY, provided that this notice is retained in full, this product
+ * may be distributed under the terms of the GNU General Public License (GPL),
+ * in which case the provisions of the GPL apply INSTEAD OF those given above.
+ *
+ * DISCLAIMER
+ *
+ * This software is provided 'as is' with no explicit or implied warranties
+ * in respect of its properties, including, but not limited to, correctness
+ * and/or fitness for purpose.
+ * ---------------------------------------------------------------------------
+ */
+
+struct uio {
+	struct	iovec *uio_iov;
+	int		uio_iovcnt;
+	off_t	uio_offset;
+	int		uio_resid;
+#if 0
+	enum	uio_seg uio_segflg;
+	enum	uio_rw uio_rw;
+	struct  thread *uio_td;
+#endif
+};
+
+#endif
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 78eabff2fe46..5d396924dd98 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -89,6 +89,15 @@ config SATA_AHCI
 
 	  If unsure, say N.
 
+config SATA_AHCI_MV
+	tristate "Marvell AHCI SATA support"
+	help
+	  This option enables support for the Marvell Serial ATA
+	  platform AHCI controller.  Currently supports SOC devices
+	  compliant to AHCI rev. 1.0
+
+	  If unsure, say N.
+
 config SATA_AHCI_PLATFORM
 	tristate "Platform AHCI SATA support"
 	help
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a3..45f4f8abb0c7 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_ATA)		+= libata.o
 obj-$(CONFIG_SATA_AHCI)		+= ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)	+= acard-ahci.o libahci.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o
+obj-$(CONFIG_SATA_AHCI_MV)      += ahci_mv.o libahci.o
 obj-$(CONFIG_SATA_FSL)		+= sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)	+= sata_inic162x.o
 obj-$(CONFIG_SATA_SIL24)	+= sata_sil24.o
diff --git a/drivers/ata/ahci_mv.c b/drivers/ata/ahci_mv.c
new file mode 100644
index 000000000000..940b26f61d4e
--- /dev/null
+++ b/drivers/ata/ahci_mv.c
@@ -0,0 +1,306 @@
+/*
+ * ahci_mv.c - Marvell AHCI SATA platform support
+ *
+ * Copyright 2013: Marvell Corporation, all rights reserved.
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/ata_platform.h>
+#include <linux/mbus.h>
+#include "ahci.h"
+
+#define AHCI_WINDOW_CTRL(win)	(0x60 + ((win) << 4))
+#define AHCI_WINDOW_BASE(win)	(0x64 + ((win) << 4))
+#define AHCI_WINDOW_SIZE(win)	(0x68 + ((win) << 4))
+
+#define VENDOR_SPECIFIC_0_ADDR  0xa0
+#define VENDOR_SPECIFIC_0_DATA  0xa4
+
+static void ahci_mv_windows_config(struct ahci_host_priv *hpriv,
+				   const struct mbus_dram_target_info *dram)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		writel(0, hpriv->mmio + AHCI_WINDOW_CTRL(i));
+		writel(0, hpriv->mmio + AHCI_WINDOW_BASE(i));
+		writel(0, hpriv->mmio + AHCI_WINDOW_SIZE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel((cs->mbus_attr << 8) |
+		       (dram->mbus_dram_target_id << 4) | 1,
+		       hpriv->mmio + AHCI_WINDOW_CTRL(i));
+		writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+		writel(((cs->size - 1) & 0xffff0000),
+		       hpriv->mmio + AHCI_WINDOW_SIZE(i));
+	}
+}
+
+static void ahci_mv_host_stop(struct ata_host *host);
+
+static struct ata_port_operations ahci_mv_ops = {
+	.inherits = &ahci_ops,
+	.host_stop = ahci_mv_host_stop,
+};
+
+static const struct ata_port_info ahci_mv_port_info = {
+	.flags	   = AHCI_FLAG_COMMON,
+	.pio_mask  = ATA_PIO4,
+	.udma_mask = ATA_UDMA6,
+	.port_ops  = &ahci_mv_ops,
+};
+
+static struct scsi_host_template ahci_mv_platform_sht = {
+	AHCI_SHT("ahci_mv_platform"),
+};
+
+static const struct of_device_id ahci_mv_of_match[] = {
+	{ .compatible = "marvell,ahci-sata", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static int ahci_mv_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct mbus_dram_target_info *dram;
+	struct ata_port_info pi = ahci_mv_port_info;
+	const struct ata_port_info *ppi[] = { &pi, NULL };
+	struct ahci_host_priv *hpriv;
+	struct ata_host *host;
+	struct resource *mem;
+	int irq;
+	int n_ports;
+	int i;
+	int rc;
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem) {
+		dev_err(dev, "no mmio space\n");
+		return -EINVAL;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0) {
+		dev_err(dev, "no irq\n");
+		return -EINVAL;
+	}
+
+	hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
+	if (!hpriv) {
+		dev_err(dev, "can't alloc ahci_host_priv\n");
+		return -ENOMEM;
+	}
+
+	hpriv->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(hpriv->clk)) {
+		dev_err(dev, "can't get clock\n");
+		return PTR_ERR(hpriv->clk);
+	}
+
+	rc = clk_prepare_enable(hpriv->clk);
+	if (rc < 0) {
+		dev_err(dev, "can't enable clock\n");
+		return rc;
+	}
+
+	hpriv->flags |= (unsigned long)pi.private_data;
+
+	hpriv->mmio = devm_request_and_ioremap(dev, mem);
+	if (!hpriv->mmio) {
+		dev_err(dev, "can't map %pR\n", mem);
+		clk_disable_unprepare(hpriv->clk);
+		return -ENOMEM;
+	}
+
+	/*
+	 * (Re-)program MBUS remapping windows if we are asked to.
+	 */
+	dram = mv_mbus_dram_info();
+	if (dram)
+		ahci_mv_windows_config(hpriv, dram);
+
+	ahci_save_initial_config(dev, hpriv, 0, 0);
+
+	/* prepare host */
+	if (hpriv->cap & HOST_CAP_NCQ)
+		pi.flags |= ATA_FLAG_NCQ;
+
+	if (hpriv->cap & HOST_CAP_PMP)
+		pi.flags |= ATA_FLAG_PMP;
+
+	ahci_set_em_messages(hpriv, &pi);
+
+	/*
+	 * CAP.NP sometimes indicate the index of the last enabled
+	 * port, at other times, that of the last possible port, so
+	 * determining the maximum port number requires looking at
+	 * both CAP.NP and port_map.
+	 */
+	n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
+
+	host = ata_host_alloc_pinfo(dev, ppi, n_ports);
+	if (!host) {
+		clk_disable_unprepare(hpriv->clk);
+		return -ENOMEM;
+	}
+
+	host->private_data = hpriv;
+
+	if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
+		host->flags |= ATA_HOST_PARALLEL_SCAN;
+	else
+		printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
+
+	if (pi.flags & ATA_FLAG_EM)
+		ahci_reset_em(host);
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+
+		ata_port_desc(ap, "mmio %pR", mem);
+		ata_port_desc(ap, "port 0x%x", 0x100 + ap->port_no * 0x80);
+
+		/* set enclosure management message type */
+		if (ap->flags & ATA_FLAG_EM)
+			ap->em_message_type = hpriv->em_msg_type;
+
+		/* disabled/not-implemented port */
+		if (!(hpriv->port_map & (1 << i)))
+			ap->ops = &ata_dummy_port_ops;
+	}
+
+	/* Enabling regret bit to enables the SATA unit to regret
+	a request that didn't receive an acknowlegde and avoid a deadlock */
+
+	writel(0x4, hpriv->mmio + VENDOR_SPECIFIC_0_ADDR);
+	writel(0x80, hpriv->mmio + VENDOR_SPECIFIC_0_DATA);
+
+	rc = ahci_reset_controller(host);
+	if (rc) {
+		clk_disable_unprepare(hpriv->clk);
+		return rc;
+	}
+
+	ahci_init_controller(host);
+	ahci_print_info(host, "platform");
+
+	rc = ata_host_activate(host, irq, ahci_interrupt, IRQF_SHARED,
+			       &ahci_mv_platform_sht);
+	if (rc) {
+		clk_disable_unprepare(hpriv->clk);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void ahci_mv_host_stop(struct ata_host *host)
+{
+	struct ahci_host_priv *hpriv = host->private_data;
+	clk_disable_unprepare(hpriv->clk);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ahci_mv_suspend(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	void __iomem *mmio = hpriv->mmio;
+	u32 ctl;
+	int rc;
+
+	if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+		dev_err(dev, "firmware update required for suspend/resume\n");
+		return -EIO;
+	}
+
+	/*
+	 * AHCI spec rev1.1 section 8.3.3:
+	 * Software must disable interrupts prior to requesting a
+	 * transition of the HBA to D3 state.
+	 */
+	ctl = readl(mmio + HOST_CTL);
+	ctl &= ~HOST_IRQ_EN;
+	writel(ctl, mmio + HOST_CTL);
+	readl(mmio + HOST_CTL); /* flush */
+
+	rc = ata_host_suspend(host, PMSG_SUSPEND);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int ahci_mv_resume(struct device *dev)
+{
+	struct ata_host *host = dev_get_drvdata(dev);
+	struct ahci_host_priv *hpriv = host->private_data;
+	const struct mbus_dram_target_info *dram;
+	int rc;
+
+	dram = mv_mbus_dram_info();
+	if (dram)
+		ahci_mv_windows_config(hpriv, dram);
+
+	if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+		rc = ahci_reset_controller(host);
+		if (rc)
+			return rc;
+
+		ahci_init_controller(host);
+	}
+
+	ata_host_resume(host);
+
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(ahci_mv_pm_ops, ahci_mv_suspend, ahci_mv_resume);
+
+static struct platform_driver ahci_mv_driver = {
+	.probe = ahci_mv_probe,
+	.remove = ata_platform_remove_one,
+	.driver = {
+		.name = "ahci_mv",
+		.owner = THIS_MODULE,
+		.of_match_table = ahci_mv_of_match,
+		.pm = &ahci_mv_pm_ops,
+	},
+};
+
+module_platform_driver(ahci_mv_driver);
+
+MODULE_DESCRIPTION("Marvell AHCI SATA platform driver");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ahci_mv");
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
 				       ATA_LFLAG_NO_SRST |
 				       ATA_LFLAG_ASSUME_ATA;
 		}
+	} else if (vendor == 0x11ab && devid == 0x4140) {
+		/* Marvell 4140 quirks */
+		ata_for_each_link(link, ap, EDGE) {
+			/* port 4 is for SEMB device and it doesn't like SRST */
+			if (link->pmp == 4)
+				link->flags |= ATA_LFLAG_DISABLED;
+		}
 	}
 }
 
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index b256ff5b6579..98daea5e399d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -67,10 +67,12 @@
 #include <linux/gfp.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/of_gpio.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <linux/libata.h>
+#include <linux/dmaengine.h>
 
 #define DRV_NAME	"sata_mv"
 #define DRV_VERSION	"1.28"
@@ -2411,10 +2413,19 @@ static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
 {
 	struct mv_port_priv *pp = ap->private_data;
 	struct ata_queued_cmd *qc;
+	struct ata_link *link = NULL;
 
 	if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
 		return NULL;
-	qc = ata_qc_from_tag(ap, ap->link.active_tag);
+
+	ata_for_each_link(link, ap, EDGE)
+		if (ata_link_active(link))
+			break;
+
+	if (!link)
+		link = &ap->link;
+
+	qc = ata_qc_from_tag(ap, link->active_tag);
 	if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
 		return qc;
 	return NULL;
@@ -2800,6 +2811,9 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
 	in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
 			>> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
 
+	dma_sync_single_for_cpu(ap->dev , (dma_addr_t) NULL,
+			(size_t) NULL, DMA_FROM_DEVICE);
+
 	/* Process new responses from since the last time we looked */
 	while (in_index != pp->resp_idx) {
 		unsigned int tag;
@@ -4028,6 +4042,26 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
 	}
 }
 
+/**     mv_gpio_power_ctrl - Shut down SATA power supply via GPIO pins.
+ *	@enable: selection of enable/disable power supply
+ */
+static void mv_gpio_power_ctrl(struct platform_device *pdev, bool enable)
+{
+	struct device_node *np = pdev->dev.of_node;
+	int gpio_count, i, gpio;
+
+	if (np) {
+		gpio_count = of_gpio_named_count(np, "sd-gpios");
+		for (i = 0; i < gpio_count; i++) {
+			gpio = of_get_named_gpio(np, "sd-gpios", i);
+			if (enable == true)
+				gpio_set_value(gpio, GPIOF_OUT_INIT_HIGH);
+			else
+				gpio_set_value(gpio, GPIOF_OUT_INIT_LOW);
+		}
+	}
+}
+
 /**
  *      mv_platform_probe - handle a positive probe of an soc Marvell
  *      host
@@ -4051,6 +4085,9 @@ static int mv_platform_probe(struct platform_device *pdev)
 	int port;
 #endif
 
+	/* Enable GPIO power output */
+	mv_gpio_power_ctrl(pdev, true);
+
 	ata_print_version_once(&pdev->dev, DRV_VERSION);
 
 	/*
@@ -4233,11 +4270,18 @@ static int mv_platform_resume(struct platform_device *pdev)
 
 	return 0;
 }
+
 #else
 #define mv_platform_suspend NULL
 #define mv_platform_resume NULL
 #endif
 
+void mv_platform_shutdown(struct platform_device *pdev)
+{
+	mv_platform_remove(pdev);
+	mv_gpio_power_ctrl(pdev, false);
+}
+
 #ifdef CONFIG_OF
 static struct of_device_id mv_sata_dt_ids[] = {
 	{ .compatible = "marvell,armada-370-sata", },
@@ -4252,6 +4296,7 @@ static struct platform_driver mv_platform_driver = {
 	.remove		= mv_platform_remove,
 	.suspend	= mv_platform_suspend,
 	.resume		= mv_platform_resume,
+	.shutdown	= mv_platform_shutdown,
 	.driver		= {
 		.name = DRV_NAME,
 		.owner = THIS_MODULE,
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4a8116547873..f8d69debc1a8 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -344,7 +344,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
 		size = p->bsize;
 
 	if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
-		printk(KERN_ERR "loop: transfer error block %ld\n",
+		printk(KERN_ERR "loop: transfer error block %lld\n",
 		       page->index);
 		size = -EINVAL;
 	}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 8740f46b4d0d..962020f30da0 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -35,13 +35,9 @@
  *
  * - Provides an API for platform code or device drivers to
  *   dynamically add or remove address decoding windows for the CPU ->
- *   device accesses. This API is mvebu_mbus_add_window(),
- *   mvebu_mbus_add_window_remap_flags() and
- *   mvebu_mbus_del_window(). Since the (target, attribute) values
- *   differ from one SoC family to another, the API uses a 'const char
- *   *' string to identify devices, and this driver is responsible for
- *   knowing the mapping between the name of a device and its
- *   corresponding (target, attribute) in the current SoC family.
+ *   device accesses. This API is mvebu_mbus_add_window_by_id(),
+ *   mvebu_mbus_add_window_remap_by_id() and
+ *   mvebu_mbus_del_window().
  *
  * - Provides a debugfs interface in /sys/kernel/debug/mvebu-mbus/ to
  *   see the list of CPU -> SDRAM windows and their configuration
@@ -49,6 +45,8 @@
  *   configuration (file 'devices').
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -58,6 +56,15 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/debugfs.h>
+#include <linux/syscore_ops.h>
+
+/* #define MBUS_DEBUG */
+
+#ifdef MBUS_DEBUG
+#define dprintk(a...) printk(a)
+#else
+#define dprintk(a...)
+#endif
 
 /*
  * DDR target is the same on all platforms.
@@ -95,53 +102,56 @@
 
 #define DOVE_DDR_BASE_CS_OFF(n) ((n) << 4)
 
-struct mvebu_mbus_mapping {
-	const char *name;
-	u8 target;
-	u8 attr;
-	u8 attrmask;
-};
-
-/*
- * Masks used for the 'attrmask' field of mvebu_mbus_mapping. They
- * allow to get the real attribute value, discarding the special bits
- * used to select a PCI MEM region or a PCI WA region. This allows the
- * debugfs code to reverse-match the name of a device from its
- * target/attr values.
- *
- * For all devices except PCI, all bits of 'attr' must be
- * considered. For most SoCs, only bit 3 should be ignored (it allows
- * to select between PCI MEM and PCI I/O). On Orion5x however, there
- * is the special bit 5 to select a PCI WA region.
- */
-#define MAPDEF_NOMASK       0xff
-#define MAPDEF_PCIMASK      0xf7
-#define MAPDEF_ORIONPCIMASK 0xd7
+/* Relative to mbusbridge_base */
+#define MBUS_BRIDGE_CTRL_OFF	0x0
+#define  MBUS_BRIDGE_SIZE_MASK  0xffff0000
+#define MBUS_BRIDGE_BASE_OFF	0x4
+#define  MBUS_BRIDGE_BASE_MASK  0xffff0000
 
-/* Macro used to define one mvebu_mbus_mapping entry */
-#define MAPDEF(__n, __t, __a, __m) \
-	{ .name = __n, .target = __t, .attr = __a, .attrmask = __m }
+/* Maximum number of windows, for all known platforms */
+#define MBUS_WINS_MAX		20
 
 struct mvebu_mbus_state;
 
 struct mvebu_mbus_soc_data {
 	unsigned int num_wins;
-	unsigned int num_remappable_wins;
 	unsigned int (*win_cfg_offset)(const int win);
+	unsigned int (*win_remap_offset)(const int win);
+	bool has_mbus_bridge;
 	void (*setup_cpu_target)(struct mvebu_mbus_state *s);
+	int (*save_cpu_target)(struct mvebu_mbus_state *s,
+			       u32 *store_addr);
 	int (*show_cpu_target)(struct mvebu_mbus_state *s,
 			       struct seq_file *seq, void *v);
-	const struct mvebu_mbus_mapping *map;
+};
+
+/*
+ * Used to store the state of one MBus window accross suspend/resume.
+ */
+struct mvebu_mbus_win_data {
+	u32 ctrl;
+	u32 base;
+	u32 remap_lo;
+	u32 remap_hi;
 };
 
 struct mvebu_mbus_state {
 	void __iomem *mbuswins_base;
 	void __iomem *sdramwins_base;
+	void __iomem *mbusbridge_base;
+	phys_addr_t sdramwins_phys_base;
 	struct dentry *debugfs_root;
 	struct dentry *debugfs_sdram;
 	struct dentry *debugfs_devs;
+	struct resource pcie_mem_aperture;
+	struct resource pcie_io_aperture;
 	const struct mvebu_mbus_soc_data *soc;
 	int hw_io_coherency;
+
+	/* Used during suspend/resume */
+	u32 mbus_bridge_ctrl;
+	u32 mbus_bridge_base;
+	struct mvebu_mbus_win_data wins[MBUS_WINS_MAX];
 };
 
 static struct mvebu_mbus_state mbus_state;
@@ -153,6 +163,15 @@ const struct mbus_dram_target_info *mv_mbus_dram_info(void)
 }
 EXPORT_SYMBOL_GPL(mv_mbus_dram_info);
 
+/* Checks whether the given window has remap capability */
+static bool mvebu_mbus_window_is_remappable(struct mvebu_mbus_state *mbus,
+					    const int win)
+{
+	unsigned int offset = mbus->soc->win_remap_offset(win);
+
+	return offset != MVEBU_MBUS_NO_REMAP;
+}
+
 /*
  * Functions to manipulate the address decoding windows
  */
@@ -184,9 +203,12 @@ static void mvebu_mbus_read_window(struct mvebu_mbus_state *mbus,
 		*attr = (ctrlreg & WIN_CTRL_ATTR_MASK) >> WIN_CTRL_ATTR_SHIFT;
 
 	if (remap) {
-		if (win < mbus->soc->num_remappable_wins) {
-			u32 remap_low = readl(addr + WIN_REMAP_LO_OFF);
-			u32 remap_hi  = readl(addr + WIN_REMAP_HI_OFF);
+		if (mvebu_mbus_window_is_remappable(mbus, win)) {
+			u32 remap_low, remap_hi;
+			addr = mbus->mbuswins_base +
+					mbus->soc->win_remap_offset(win);
+			remap_low = readl(addr + WIN_REMAP_LO_OFF);
+			remap_hi  = readl(addr + WIN_REMAP_HI_OFF);
 			*remap = ((u64)remap_hi << 32) | remap_low;
 		} else
 			*remap = 0;
@@ -199,10 +221,11 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
 	void __iomem *addr;
 
 	addr = mbus->mbuswins_base + mbus->soc->win_cfg_offset(win);
-
 	writel(0, addr + WIN_BASE_OFF);
 	writel(0, addr + WIN_CTRL_OFF);
-	if (win < mbus->soc->num_remappable_wins) {
+
+	if (mvebu_mbus_window_is_remappable(mbus, win)) {
+		addr = mbus->mbuswins_base + mbus->soc->win_remap_offset(win);
 		writel(0, addr + WIN_REMAP_LO_OFF);
 		writel(0, addr + WIN_REMAP_HI_OFF);
 	}
@@ -292,6 +315,8 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
 {
 	void __iomem *addr = mbus->mbuswins_base +
 		mbus->soc->win_cfg_offset(win);
+	void __iomem *addr_rmp = mbus->mbuswins_base +
+		mbus->soc->win_remap_offset(win);
 	u32 ctrl, remap_addr;
 
 	ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
@@ -301,15 +326,23 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
 
 	writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
 	writel(ctrl, addr + WIN_CTRL_OFF);
-	if (win < mbus->soc->num_remappable_wins) {
+
+	if (mvebu_mbus_window_is_remappable(mbus, win)) {
 		if (remap == MVEBU_MBUS_NO_REMAP)
 			remap_addr = base;
 		else
 			remap_addr = remap;
-		writel(remap_addr & WIN_REMAP_LOW, addr + WIN_REMAP_LO_OFF);
-		writel(0, addr + WIN_REMAP_HI_OFF);
+		writel(remap_addr & WIN_REMAP_LOW, addr_rmp + WIN_REMAP_LO_OFF);
+		writel(0, addr_rmp + WIN_REMAP_HI_OFF);
 	}
 
+	dprintk("== %s: decoding window ==\n", __func__);
+	dprintk("base_phys: 0x%x, base_low 0x%x, ctrl 0x%x, remap_addr 0x%x\n",
+	    base, base & WIN_BASE_LOW, ctrl, remap_addr);
+
+	dprintk("base_addr: %p, ctrl_addr: %p\n",
+	    addr + WIN_BASE_OFF, addr + WIN_CTRL_OFF);
+
 	return 0;
 }
 
@@ -321,19 +354,27 @@ static int mvebu_mbus_alloc_window(struct mvebu_mbus_state *mbus,
 	int win;
 
 	if (remap == MVEBU_MBUS_NO_REMAP) {
-		for (win = mbus->soc->num_remappable_wins;
-		     win < mbus->soc->num_wins; win++)
+		for (win = 0; win < mbus->soc->num_wins; win++) {
+			if (mvebu_mbus_window_is_remappable(mbus, win))
+				continue;
+
 			if (mvebu_mbus_window_is_free(mbus, win))
 				return mvebu_mbus_setup_window(mbus, win, base,
 							       size, remap,
 							       target, attr);
+		}
 	}
 
+	for (win = 0; win < mbus->soc->num_wins; win++) {
+		/* Skip window if need remap but is not supported */
+		if ((remap != MVEBU_MBUS_NO_REMAP) &&
+		    (!mvebu_mbus_window_is_remappable(mbus, win)))
+			continue;
 
-	for (win = 0; win < mbus->soc->num_wins; win++)
 		if (mvebu_mbus_window_is_free(mbus, win))
 			return mvebu_mbus_setup_window(mbus, win, base, size,
 						       remap, target, attr);
+	}
 
 	return -ENOMEM;
 }
@@ -426,8 +467,7 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
 		u64 wbase, wremap;
 		u32 wsize;
 		u8 wtarget, wattr;
-		int enabled, i;
-		const char *name;
+		int enabled;
 
 		mvebu_mbus_read_window(mbus, win,
 				       &enabled, &wbase, &wsize,
@@ -438,20 +478,11 @@ static int mvebu_devs_debug_show(struct seq_file *seq, void *v)
 			continue;
 		}
 
-
-		for (i = 0; mbus->soc->map[i].name; i++)
-			if (mbus->soc->map[i].target == wtarget &&
-			    mbus->soc->map[i].attr ==
-			    (wattr & mbus->soc->map[i].attrmask))
-				break;
-
-		name = mbus->soc->map[i].name ?: "unknown";
-
-		seq_printf(seq, "[%02d] %016llx - %016llx : %s",
+		seq_printf(seq, "[%02d] %016llx - %016llx : %04x:%04x",
 			   win, (unsigned long long)wbase,
-			   (unsigned long long)(wbase + wsize), name);
+			   (unsigned long long)(wbase + wsize), wtarget, wattr);
 
-		if (win < mbus->soc->num_remappable_wins) {
+		if (mvebu_mbus_window_is_remappable(mbus, win)) {
 			seq_printf(seq, " (remap %016llx)\n",
 				   (unsigned long long)wremap);
 		} else
@@ -477,12 +508,46 @@ static const struct file_operations mvebu_devs_debug_fops = {
  * SoC-specific functions and definitions
  */
 
-static unsigned int orion_mbus_win_offset(int win)
+static unsigned int generic_mbus_win_cfg_offset(int win)
 {
 	return win << 4;
 }
 
-static unsigned int armada_370_xp_mbus_win_offset(int win)
+static unsigned int generic_mbus_win_remap_2_offset(int win)
+{
+	if (win < 2)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_4_offset(int win)
+{
+	if (win < 4)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int generic_mbus_win_remap_8_offset(int win)
+{
+	if (win < 8)
+		return generic_mbus_win_cfg_offset(win);
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_xp_mbus_win_remap_offset(int win)
+{
+	if (win < 8)
+		return win << 4;
+	else if (win == 13)
+		return 0xF0 - WIN_REMAP_LO_OFF;
+	else
+		return MVEBU_MBUS_NO_REMAP;
+}
+
+static unsigned int armada_370_xp_mbus_win_cfg_offset(int win)
 {
 	/* The register layout is a bit annoying and the below code
 	 * tries to cope with it.
@@ -502,7 +567,7 @@ static unsigned int armada_370_xp_mbus_win_offset(int win)
 		return 0x90 + ((win - 8) << 3);
 }
 
-static unsigned int mv78xx0_mbus_win_offset(int win)
+static unsigned int mv78xx0_mbus_win_cfg_offset(int win)
 {
 	if (win < 8)
 		return win << 4;
@@ -515,35 +580,110 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
 {
 	int i;
 	int cs;
+	struct mvebu_mbus_state *s = &mbus_state;
+	u32 mbus_bridge_base = 0, mbus_bridge_size = 0;
+	u64 mbus_bridge_end = 0;
+
+	if (s->mbusbridge_base) {
+		mbus_bridge_base =
+			(readl(s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF) &
+			 MBUS_BRIDGE_BASE_MASK);
+		mbus_bridge_size =
+			(readl(s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF) |
+			 ~MBUS_BRIDGE_SIZE_MASK) + 1;
+		mbus_bridge_end = (u64)mbus_bridge_base + mbus_bridge_size;
+	}
 
 	mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
 
 	for (i = 0, cs = 0; i < 4; i++) {
-		u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
-		u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+		u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+		u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+		u64 end;
+		struct mbus_dram_window *w;
+
+		/* Ignore entries that are not enabled */
+		if (!(size & DDR_SIZE_ENABLED))
+			continue;
 
 		/*
-		 * We only take care of entries for which the chip
-		 * select is enabled, and that don't have high base
-		 * address bits set (devices can only access the first
-		 * 32 bits of the memory).
+		 * Ignore entries whose base address is above 2^32,
+		 * since devices cannot DMA to such high addresses
 		 */
-		if ((size & DDR_SIZE_ENABLED) &&
-		    !(base & DDR_BASE_CS_HIGH_MASK)) {
-			struct mbus_dram_window *w;
+		if (base & DDR_BASE_CS_HIGH_MASK)
+			continue;
 
-			w = &mvebu_mbus_dram_info.cs[cs++];
-			w->cs_index = i;
-			w->mbus_attr = 0xf & ~(1 << i);
-			if (mbus->hw_io_coherency)
-				w->mbus_attr |= ATTR_HW_COHERENCY;
-			w->base = base & DDR_BASE_CS_LOW_MASK;
-			w->size = (size | ~DDR_SIZE_MASK) + 1;
+		base = base & DDR_BASE_CS_LOW_MASK;
+		size = (size | ~DDR_SIZE_MASK) + 1;
+		end = base + size;
+
+		/*
+		 * Adjust base/size of the current CS to make sure it
+		 * doesn't overlap with the MBus bridge window. This
+		 * is particularly important for devices that do DMA
+		 * from DRAM to a SRAM mapped in a MBus window, such
+		 * as the CESA cryptographic engine.
+		 */
+
+		if (s->mbusbridge_base) {
+			/*
+			 * The CS is fully enclosed inside the MBus bridge
+			 * area, so ignore it.
+			 */
+			if (base >= mbus_bridge_base && end <= mbus_bridge_end)
+				continue;
+
+			/*
+			 * Beginning of CS overlaps with end of MBus, raise CS
+			 * base address, and shrink its size.
+			 */
+			if (base >= mbus_bridge_base && end > mbus_bridge_end) {
+				pr_info(" ==> 1\n");
+				size -= mbus_bridge_end - base;
+				base = mbus_bridge_end;
+			}
+
+			/*
+			 * End of CS overlaps with beginning of MBus, shrink
+			 * CS size.
+			 */
+			if (base < mbus_bridge_base && end > mbus_bridge_base)
+				size -= end - mbus_bridge_base;
 		}
+
+		w = &mvebu_mbus_dram_info.cs[cs++];
+		w->cs_index = i;
+		w->mbus_attr = 0xf & ~(1 << i);
+		if (mbus->hw_io_coherency)
+			w->mbus_attr |= ATTR_HW_COHERENCY;
+		w->base = base;
+		w->size = size;
 	}
 	mvebu_mbus_dram_info.num_cs = cs;
 }
 
+static int
+mvebu_mbus_default_save_cpu_target(struct mvebu_mbus_state *mbus,
+				   u32 *store_addr)
+{
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+		u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
+
+		writel(mbus->sdramwins_phys_base + DDR_BASE_CS_OFF(i),
+		       store_addr++);
+		writel(base, store_addr++);
+		writel(mbus->sdramwins_phys_base + DDR_SIZE_CS_OFF(i),
+		       store_addr++);
+		writel(size, store_addr++);
+	}
+
+	/* We've written 16 words to the store address */
+	return 16;
+}
+
 static void __init
 mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
 {
@@ -574,100 +714,45 @@ mvebu_mbus_dove_setup_cpu_target(struct mvebu_mbus_state *mbus)
 	mvebu_mbus_dram_info.num_cs = cs;
 }
 
-static const struct mvebu_mbus_mapping armada_370_map[] = {
-	MAPDEF("bootrom",     1, 0xe0, MAPDEF_NOMASK),
-	MAPDEF("devbus-boot", 1, 0x2f, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs0",  1, 0x3e, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs1",  1, 0x3d, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs2",  1, 0x3b, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs3",  1, 0x37, MAPDEF_NOMASK),
-	MAPDEF("pcie0.0",     4, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.0",     8, 0xe0, MAPDEF_PCIMASK),
-	{},
-};
+int mvebu_mbus_save_cpu_target(u32 *store_addr)
+{
+	return mbus_state.soc->save_cpu_target(&mbus_state, store_addr);
+}
+
 
 static const struct mvebu_mbus_soc_data armada_370_mbus_data = {
 	.num_wins            = 20,
-	.num_remappable_wins = 8,
-	.win_cfg_offset      = armada_370_xp_mbus_win_offset,
+	.win_cfg_offset      = armada_370_xp_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_8_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = armada_370_map,
-};
-
-static const struct mvebu_mbus_mapping armada_xp_map[] = {
-	MAPDEF("bootrom",     1, 0x1d, MAPDEF_NOMASK),
-	MAPDEF("devbus-boot", 1, 0x2f, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs0",  1, 0x3e, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs1",  1, 0x3d, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs2",  1, 0x3b, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs3",  1, 0x37, MAPDEF_NOMASK),
-	MAPDEF("pcie0.0",     4, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.1",     4, 0xd0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.2",     4, 0xb0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.3",     4, 0x70, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.0",     8, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.1",     8, 0xd0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.2",     8, 0xb0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.3",     8, 0x70, MAPDEF_PCIMASK),
-	MAPDEF("pcie2.0",     4, 0xf0, MAPDEF_PCIMASK),
-	MAPDEF("pcie3.0",     8, 0xf0, MAPDEF_PCIMASK),
-	{},
 };
 
 static const struct mvebu_mbus_soc_data armada_xp_mbus_data = {
 	.num_wins            = 20,
-	.num_remappable_wins = 8,
-	.win_cfg_offset      = armada_370_xp_mbus_win_offset,
+	.has_mbus_bridge     = true,
+	.win_cfg_offset      = armada_370_xp_mbus_win_cfg_offset,
+	.win_remap_offset    = armada_xp_mbus_win_remap_offset,
+	.save_cpu_target     = mvebu_mbus_default_save_cpu_target,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = armada_xp_map,
-};
-
-static const struct mvebu_mbus_mapping kirkwood_map[] = {
-	MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.0", 4, 0xd0, MAPDEF_PCIMASK),
-	MAPDEF("sram",    3, 0x01, MAPDEF_NOMASK),
-	MAPDEF("nand",    1, 0x2f, MAPDEF_NOMASK),
-	{},
 };
 
 static const struct mvebu_mbus_soc_data kirkwood_mbus_data = {
 	.num_wins            = 8,
-	.num_remappable_wins = 4,
-	.win_cfg_offset      = orion_mbus_win_offset,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = kirkwood_map,
-};
-
-static const struct mvebu_mbus_mapping dove_map[] = {
-	MAPDEF("pcie0.0",    0x4, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.0",    0x8, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("cesa",       0x3, 0x01, MAPDEF_NOMASK),
-	MAPDEF("bootrom",    0x1, 0xfd, MAPDEF_NOMASK),
-	MAPDEF("scratchpad", 0xd, 0x0, MAPDEF_NOMASK),
-	{},
 };
 
 static const struct mvebu_mbus_soc_data dove_mbus_data = {
 	.num_wins            = 8,
-	.num_remappable_wins = 4,
-	.win_cfg_offset      = orion_mbus_win_offset,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
 	.setup_cpu_target    = mvebu_mbus_dove_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_dove,
-	.map                 = dove_map,
-};
-
-static const struct mvebu_mbus_mapping orion5x_map[] = {
-	MAPDEF("pcie0.0",     4, 0x51, MAPDEF_ORIONPCIMASK),
-	MAPDEF("pci0.0",      3, 0x51, MAPDEF_ORIONPCIMASK),
-	MAPDEF("devbus-boot", 1, 0x0f, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs0",  1, 0x1e, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs1",  1, 0x1d, MAPDEF_NOMASK),
-	MAPDEF("devbus-cs2",  1, 0x1b, MAPDEF_NOMASK),
-	MAPDEF("sram",        0, 0x00, MAPDEF_NOMASK),
-	{},
 };
 
 /*
@@ -676,43 +761,26 @@ static const struct mvebu_mbus_mapping orion5x_map[] = {
  */
 static const struct mvebu_mbus_soc_data orion5x_4win_mbus_data = {
 	.num_wins            = 8,
-	.num_remappable_wins = 4,
-	.win_cfg_offset      = orion_mbus_win_offset,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_4_offset,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = orion5x_map,
 };
 
 static const struct mvebu_mbus_soc_data orion5x_2win_mbus_data = {
 	.num_wins            = 8,
-	.num_remappable_wins = 2,
-	.win_cfg_offset      = orion_mbus_win_offset,
+	.win_cfg_offset      = generic_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_2_offset,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = orion5x_map,
-};
-
-static const struct mvebu_mbus_mapping mv78xx0_map[] = {
-	MAPDEF("pcie0.0", 4, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.1", 4, 0xd0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.2", 4, 0xb0, MAPDEF_PCIMASK),
-	MAPDEF("pcie0.3", 4, 0x70, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.0", 8, 0xe0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.1", 8, 0xd0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.2", 8, 0xb0, MAPDEF_PCIMASK),
-	MAPDEF("pcie1.3", 8, 0x70, MAPDEF_PCIMASK),
-	MAPDEF("pcie2.0", 4, 0xf0, MAPDEF_PCIMASK),
-	MAPDEF("pcie3.0", 8, 0xf0, MAPDEF_PCIMASK),
-	{},
 };
 
 static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
 	.num_wins            = 14,
-	.num_remappable_wins = 8,
-	.win_cfg_offset      = mv78xx0_mbus_win_offset,
+	.win_cfg_offset      = mv78xx0_mbus_win_cfg_offset,
+	.win_remap_offset    = generic_mbus_win_remap_8_offset,
 	.setup_cpu_target    = mvebu_mbus_default_setup_cpu_target,
 	.show_cpu_target     = mvebu_sdram_debug_show_orion,
-	.map                 = mv78xx0_map,
 };
 
 /*
@@ -724,6 +792,10 @@ static const struct mvebu_mbus_soc_data mv78xx0_mbus_data = {
 static const struct of_device_id of_mvebu_mbus_ids[] = {
 	{ .compatible = "marvell,armada370-mbus",
 	  .data = &armada_370_mbus_data, },
+	{ .compatible = "marvell,armada375-mbus",
+	  .data = &armada_xp_mbus_data, },
+	{ .compatible = "marvell,armada380-mbus",
+	  .data = &armada_xp_mbus_data, },
 	{ .compatible = "marvell,armadaxp-mbus",
 	  .data = &armada_xp_mbus_data, },
 	{ .compatible = "marvell,kirkwood-mbus",
@@ -746,48 +818,27 @@ static const struct of_device_id of_mvebu_mbus_ids[] = {
 /*
  * Public API of the driver
  */
-int mvebu_mbus_add_window_remap_flags(const char *devname, phys_addr_t base,
-				      size_t size, phys_addr_t remap,
-				      unsigned int flags)
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+				      unsigned int attribute,
+				      phys_addr_t base, size_t size,
+				      phys_addr_t remap)
 {
 	struct mvebu_mbus_state *s = &mbus_state;
-	u8 target, attr;
-	int i;
-
-	if (!s->soc->map)
-		return -ENODEV;
-
-	for (i = 0; s->soc->map[i].name; i++)
-		if (!strcmp(s->soc->map[i].name, devname))
-			break;
-
-	if (!s->soc->map[i].name) {
-		pr_err("mvebu-mbus: unknown device '%s'\n", devname);
-		return -ENODEV;
-	}
-
-	target = s->soc->map[i].target;
-	attr   = s->soc->map[i].attr;
 
-	if (flags == MVEBU_MBUS_PCI_MEM)
-		attr |= 0x8;
-	else if (flags == MVEBU_MBUS_PCI_WA)
-		attr |= 0x28;
-
-	if (!mvebu_mbus_window_conflicts(s, base, size, target, attr)) {
-		pr_err("mvebu-mbus: cannot add window '%s', conflicts with another window\n",
-		       devname);
+	if (!mvebu_mbus_window_conflicts(s, base, size, target, attribute)) {
+		pr_err("cannot add window '%x:%x', conflicts with another window\n",
+		       target, attribute);
 		return -EINVAL;
 	}
 
-	return mvebu_mbus_alloc_window(s, base, size, remap, target, attr);
-
+	return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
 }
 
-int mvebu_mbus_add_window(const char *devname, phys_addr_t base, size_t size)
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+				phys_addr_t base, size_t size)
 {
-	return mvebu_mbus_add_window_remap_flags(devname, base, size,
-						 MVEBU_MBUS_NO_REMAP, 0);
+	return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
+						 size, MVEBU_MBUS_NO_REMAP);
 }
 
 int mvebu_mbus_del_window(phys_addr_t base, size_t size)
@@ -802,6 +853,52 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size)
 	return 0;
 }
 
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
+{
+	if (!res)
+		return;
+	*res = mbus_state.pcie_mem_aperture;
+}
+
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
+{
+	if (!res)
+		return;
+	*res = mbus_state.pcie_io_aperture;
+}
+
+int mvebu_mbus_get_addr_win_info(phys_addr_t phyaddr, u8 *trg_id, u8 *attr)
+{
+	const struct mbus_dram_target_info *dram;
+	int i;
+
+	if (NULL == trg_id || NULL == attr) {
+		pr_err("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+	/* Get dram info */
+	dram = mv_mbus_dram_info();
+	if (!dram) {
+		pr_err("%s: No DRAM information\n", __func__);
+		return -ENODEV;
+	}
+	/* Check addr in the range or not */
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+		if (cs->base <= phyaddr && phyaddr <= (cs->base + cs->size)) {
+			*trg_id = dram->mbus_dram_target_id;
+			*attr = cs->mbus_attr;
+			break;
+		}
+	}
+	if (i == dram->num_cs) {
+		pr_err("%s: Invalid dram address 0x%x\n", __func__, phyaddr);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 static __init int mvebu_mbus_debugfs_init(void)
 {
 	struct mvebu_mbus_state *s = &mbus_state;
@@ -828,25 +925,73 @@ static __init int mvebu_mbus_debugfs_init(void)
 }
 fs_initcall(mvebu_mbus_debugfs_init);
 
-int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
-			   size_t mbuswins_size,
-			   phys_addr_t sdramwins_phys_base,
-			   size_t sdramwins_size)
+static int mvebu_mbus_suspend(void)
 {
-	struct mvebu_mbus_state *mbus = &mbus_state;
-	const struct of_device_id *of_id;
+	struct mvebu_mbus_state *s = &mbus_state;
 	int win;
 
-	for (of_id = of_mvebu_mbus_ids; of_id->compatible; of_id++)
-		if (!strcmp(of_id->compatible, soc))
-			break;
-
-	if (!of_id->compatible) {
-		pr_err("mvebu-mbus: could not find a matching SoC family\n");
+	if (!s->mbusbridge_base)
 		return -ENODEV;
+
+	for (win = 0; win < s->soc->num_wins; win++) {
+		void __iomem *addr = s->mbuswins_base +
+			s->soc->win_cfg_offset(win);
+
+		s->wins[win].base = readl(addr + WIN_BASE_OFF);
+		s->wins[win].ctrl = readl(addr + WIN_CTRL_OFF);
+
+		if (mvebu_mbus_window_is_remappable(s, win)) {
+			s->wins[win].remap_lo = readl(addr + WIN_REMAP_LO_OFF);
+			s->wins[win].remap_hi = readl(addr + WIN_REMAP_HI_OFF);
+		}
 	}
 
-	mbus->soc = of_id->data;
+	s->mbus_bridge_ctrl = readl(s->mbusbridge_base +
+				    MBUS_BRIDGE_CTRL_OFF);
+	s->mbus_bridge_base = readl(s->mbusbridge_base +
+				    MBUS_BRIDGE_BASE_OFF);
+
+	return 0;
+}
+
+static void mvebu_mbus_resume(void)
+{
+	struct mvebu_mbus_state *s = &mbus_state;
+	int win;
+
+	writel(s->mbus_bridge_ctrl,
+	       s->mbusbridge_base + MBUS_BRIDGE_CTRL_OFF);
+	writel(s->mbus_bridge_base,
+	       s->mbusbridge_base + MBUS_BRIDGE_BASE_OFF);
+
+	for (win = 0; win < s->soc->num_wins; win++) {
+		void __iomem *addr = s->mbuswins_base +
+			s->soc->win_cfg_offset(win);
+
+		writel(s->wins[win].base, addr + WIN_BASE_OFF);
+		writel(s->wins[win].ctrl, addr + WIN_CTRL_OFF);
+
+		if (mvebu_mbus_window_is_remappable(s, win)) {
+			writel(s->wins[win].remap_lo, addr + WIN_REMAP_LO_OFF);
+			writel(s->wins[win].remap_hi, addr + WIN_REMAP_HI_OFF);
+		}
+	}
+}
+
+struct syscore_ops mvebu_mbus_syscore_ops = {
+	.suspend	= mvebu_mbus_suspend,
+	.resume		= mvebu_mbus_resume,
+};
+
+static int __init mvebu_mbus_common_init(struct mvebu_mbus_state *mbus,
+					 phys_addr_t mbuswins_phys_base,
+					 size_t mbuswins_size,
+					 phys_addr_t sdramwins_phys_base,
+					 size_t sdramwins_size,
+					 phys_addr_t mbusbridge_phys_base,
+					 size_t mbusbridge_size)
+{
+	int win;
 
 	mbus->mbuswins_base = ioremap(mbuswins_phys_base, mbuswins_size);
 	if (!mbus->mbuswins_base)
@@ -858,13 +1003,354 @@ int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
 		return -ENOMEM;
 	}
 
-	if (of_find_compatible_node(NULL, NULL, "marvell,coherency-fabric"))
-		mbus->hw_io_coherency = 1;
+	mbus->sdramwins_phys_base = sdramwins_phys_base;
+
+	if (mbusbridge_phys_base) {
+		mbus->mbusbridge_base = ioremap(mbusbridge_phys_base,
+						mbusbridge_size);
+		if (!mbus->mbusbridge_base) {
+			iounmap(mbus->sdramwins_base);
+			iounmap(mbus->mbuswins_base);
+			return -ENOMEM;
+		}
+	} else
+		mbus->mbusbridge_base = NULL;
 
 	for (win = 0; win < mbus->soc->num_wins; win++)
 		mvebu_mbus_disable_window(mbus, win);
 
 	mbus->soc->setup_cpu_target(mbus);
 
+	register_syscore_ops(&mvebu_mbus_syscore_ops);
+
+	return 0;
+}
+
+int __init mvebu_mbus_init(const char *soc, phys_addr_t mbuswins_phys_base,
+			   size_t mbuswins_size,
+			   phys_addr_t sdramwins_phys_base,
+			   size_t sdramwins_size)
+{
+	const struct of_device_id *of_id;
+
+	for (of_id = of_mvebu_mbus_ids; of_id->compatible; of_id++)
+		if (!strcmp(of_id->compatible, soc))
+			break;
+
+	if (!of_id->compatible) {
+		pr_err("could not find a matching SoC family\n");
+		return -ENODEV;
+	}
+
+	mbus_state.soc = of_id->data;
+
+	return mvebu_mbus_common_init(&mbus_state,
+			mbuswins_phys_base,
+			mbuswins_size,
+			sdramwins_phys_base,
+			sdramwins_size, 0, 0);
+}
+
+#ifdef CONFIG_OF
+/*
+ * The window IDs in the ranges DT property have the following format:
+ *  - bits 28 to 31: MBus custom field
+ *  - bits 24 to 27: window target ID
+ *  - bits 16 to 23: window attribute ID
+ *  - bits  0 to 15: unused
+ */
+#define CUSTOM(id) (((id) & 0xF0000000) >> 24)
+#define TARGET(id) (((id) & 0x0F000000) >> 24)
+#define ATTR(id)   (((id) & 0x00FF0000) >> 16)
+
+static int __init mbus_dt_setup_win(struct mvebu_mbus_state *mbus,
+				    u32 base, u32 size,
+				    u8 target, u8 attr)
+{
+	if (!mvebu_mbus_window_conflicts(mbus, base, size, target, attr)) {
+		pr_err("cannot add window '%04x:%04x', conflicts with another window\n",
+		       target, attr);
+		return -EBUSY;
+	}
+
+	if (mvebu_mbus_alloc_window(mbus, base, size, MVEBU_MBUS_NO_REMAP,
+				    target, attr)) {
+		pr_err("cannot add window '%04x:%04x', too many windows\n",
+		       target, attr);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int
+mbus_parse_ranges(struct device_node *node,
+		  int *addr_cells, int *c_addr_cells, int *c_size_cells,
+		  int *cell_count, const __be32 **ranges_start,
+		  const __be32 **ranges_end)
+{
+	const __be32 *prop;
+	int ranges_len, tuple_len;
+
+	/* Allow a node with no 'ranges' property */
+	*ranges_start = of_get_property(node, "ranges", &ranges_len);
+	if (*ranges_start == NULL) {
+		*addr_cells = *c_addr_cells = *c_size_cells = *cell_count = 0;
+		*ranges_start = *ranges_end = NULL;
+		return 0;
+	}
+	*ranges_end = *ranges_start + ranges_len / sizeof(__be32);
+
+	*addr_cells = of_n_addr_cells(node);
+
+	prop = of_get_property(node, "#address-cells", NULL);
+	*c_addr_cells = be32_to_cpup(prop);
+
+	prop = of_get_property(node, "#size-cells", NULL);
+	*c_size_cells = be32_to_cpup(prop);
+
+	*cell_count = *addr_cells + *c_addr_cells + *c_size_cells;
+	tuple_len = (*cell_count) * sizeof(__be32);
+
+	if (ranges_len % tuple_len) {
+		pr_warn("malformed ranges entry '%s'\n", node->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __init mbus_dt_setup(struct mvebu_mbus_state *mbus,
+				struct device_node *np)
+{
+	int addr_cells, c_addr_cells, c_size_cells;
+	int i, ret, cell_count;
+	const __be32 *r, *ranges_start, *ranges_end;
+
+	ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
+				&c_size_cells, &cell_count,
+				&ranges_start, &ranges_end);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
+		u32 windowid, base, size;
+		u8 target, attr;
+
+		/*
+		 * An entry with a non-zero custom field do not
+		 * correspond to a static window, so skip it.
+		 */
+		windowid = of_read_number(r, 1);
+		if (CUSTOM(windowid))
+			continue;
+
+		target = TARGET(windowid);
+		attr = ATTR(windowid);
+
+		base = of_read_number(r + c_addr_cells, addr_cells);
+		size = of_read_number(r + c_addr_cells + addr_cells,
+				      c_size_cells);
+		ret = mbus_dt_setup_win(mbus, base, size, target, attr);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+static void __init mvebu_mbus_get_pcie_resources(struct device_node *np,
+						 struct resource *mem,
+						 struct resource *io)
+{
+	u32 reg[2];
+	int ret;
+
+	/*
+	 * These are optional, so we clear them and they'll
+	 * be zero if they are missing from the DT.
+	 */
+	memset(mem, 0, sizeof(struct resource));
+	memset(io, 0, sizeof(struct resource));
+
+	ret = of_property_read_u32_array(np, "pcie-mem-aperture", reg, ARRAY_SIZE(reg));
+	if (!ret) {
+		mem->start = reg[0];
+		mem->end = mem->start + reg[1];
+		mem->flags = IORESOURCE_MEM;
+	}
+
+	ret = of_property_read_u32_array(np, "pcie-io-aperture", reg, ARRAY_SIZE(reg));
+	if (!ret) {
+		io->start = reg[0];
+		io->end = io->start + reg[1];
+		io->flags = IORESOURCE_IO;
+	}
+}
+
+int __init mvebu_mbus_dt_init(bool is_coherent)
+{
+	struct resource mbuswins_res, sdramwins_res, mbusbridge_res;
+	struct device_node *np, *controller;
+	const struct of_device_id *of_id;
+	const __be32 *prop;
+	int ret;
+
+	np = of_find_matching_node(NULL, of_mvebu_mbus_ids);
+	if (!np) {
+		pr_err("could not find a matching SoC family\n");
+		return -ENODEV;
+	}
+
+	of_id = of_match_node(of_mvebu_mbus_ids, np);
+	mbus_state.soc = of_id->data;
+
+	prop = of_get_property(np, "controller", NULL);
+	if (!prop) {
+		pr_err("required 'controller' property missing\n");
+		return -EINVAL;
+	}
+
+	controller = of_find_node_by_phandle(be32_to_cpup(prop));
+	if (!controller) {
+		pr_err("could not find an 'mbus-controller' node\n");
+		return -ENODEV;
+	}
+
+	if (of_address_to_resource(controller, 0, &mbuswins_res)) {
+		pr_err("cannot get MBUS register address\n");
+		return -EINVAL;
+	}
+
+	if (of_address_to_resource(controller, 1, &sdramwins_res)) {
+		pr_err("cannot get SDRAM register address\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Set the resource to 0 so that it can be left unmapped by
+	 * mvebu_mbus_common_init() if the DT doesn't carry the
+	 * necessary information. This is needed to preserve backward
+	 * compatibility.
+	 */
+	memset(&mbusbridge_res, 0, sizeof(mbusbridge_res));
+
+	if (mbus_state.soc->has_mbus_bridge) {
+		if (of_address_to_resource(controller, 2, &mbusbridge_res))
+			pr_warn(FW_WARN "deprecated mbus-mvebu Device Tree, suspend/resume will not work\n");
+	}
+
+	mbus_state.hw_io_coherency = is_coherent;
+
+	/* Get optional pcie-{mem,io}-aperture properties */
+	mvebu_mbus_get_pcie_resources(np, &mbus_state.pcie_mem_aperture,
+					  &mbus_state.pcie_io_aperture);
+
+	ret = mvebu_mbus_common_init(&mbus_state,
+				     mbuswins_res.start,
+				     resource_size(&mbuswins_res),
+				     sdramwins_res.start,
+				     resource_size(&sdramwins_res),
+				     mbusbridge_res.start,
+				     resource_size(&mbusbridge_res));
+	if (ret)
+		return ret;
+
+	/* Setup statically declared windows in the DT */
+	return mbus_dt_setup(&mbus_state, np);
+}
+
+int mvebu_mbus_win_addr_get(u8 target_id, u8 attribute, u32 *phy_base, u32 *size)
+{
+	int addr_cells, c_addr_cells, c_size_cells;
+	int i, ret, cell_count;
+	const __be32 *r, *ranges_start, *ranges_end;
+	struct device_node *np;
+
+	np = of_find_matching_node(NULL, of_mvebu_mbus_ids);
+	if (!np) {
+		pr_err("could not find a matching SoC family\n");
+		return -ENODEV;
+	}
+
+	ret = mbus_parse_ranges(np, &addr_cells, &c_addr_cells,
+				&c_size_cells, &cell_count,
+				&ranges_start, &ranges_end);
+	if (ret < 0)
+		return ret;
+
+	*phy_base = 0;
+	*size = 0;
+	for (i = 0, r = ranges_start; r < ranges_end; r += cell_count, i++) {
+		u32 windowid;
+		u8 target, attr;
+
+		/*
+		 * An entry with a non-zero custom field do not
+		 * correspond to a static window, so skip it.
+		 */
+		windowid = of_read_number(r, 1);
+		if (CUSTOM(windowid))
+			continue;
+
+		target = TARGET(windowid);
+		attr = ATTR(windowid);
+		if (target_id != target || attr != attribute)
+			continue;
+
+		*phy_base = of_read_number(r + c_addr_cells, addr_cells);
+		*size = of_read_number(r + c_addr_cells + addr_cells,
+				      c_size_cells);
+		break;
+	}
 	return 0;
 }
+
+#ifdef MBUS_DEBUG
+void mbus_debug_window()
+{
+	void __iomem *win_addr = mbus_state.mbuswins_base;
+	int i, j;
+
+	pr_info("----------- mbus window -----------\n");
+
+	/* win 0-7 has 4reg: ctrl, base, remap_l,remap_h */
+	for (i = 0; i <= 7; i++) {
+
+		pr_info("WIN%d\n", i);
+		for (j = 0; j < 4; j++) {
+
+			pr_info("\twin(%d,%d): %p\t 0x%x\n", i, j,
+			    win_addr, readl(win_addr));
+
+			win_addr += 4;
+		}
+	}
+
+	pr_info("\nINTERREGS_WIN(%d): %p\t 0x%x\n", i++, win_addr,
+							      readl(win_addr));
+	win_addr += 4;
+	pr_info("\nSYNC_BARIER_WIN(%d): %p\t 0x%x\n\n", i++, win_addr,
+							      readl(win_addr));
+	win_addr += 4;
+
+	/* hole */
+	win_addr += 8;
+
+	/* win 8-19 has 2reg: ctrl, base */
+	for (i = 8; i <= 19; i++) {
+
+		pr_info("WIN%d\n", i);
+		for (j = 0; j < 2; j++) {
+
+			pr_info("\twin(%d,%d): %p\t 0x%x\n", i, j,
+			    win_addr, readl(win_addr));
+
+			win_addr += 4;
+		}
+	}
+
+	pr_info("\n");
+
+	/* TODO: sdramwins_base */
+
+}
+#endif /* MBUS_DEBUG */
+#endif
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index 57323fd15ec9..71f0fdee14df 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -6,3 +6,6 @@ config MVEBU_CLK_CPU
 
 config MVEBU_CLK_GATING
        bool
+
+config MVEBU_CLK_COREDIV
+	bool
diff --git a/drivers/clk/mvebu/Makefile b/drivers/clk/mvebu/Makefile
index 58df3dc49363..2eed72b60711 100644
--- a/drivers/clk/mvebu/Makefile
+++ b/drivers/clk/mvebu/Makefile
@@ -1,3 +1,4 @@
 obj-$(CONFIG_MVEBU_CLK_CORE) 	+= clk.o clk-core.o
 obj-$(CONFIG_MVEBU_CLK_CPU) 	+= clk-cpu.o
 obj-$(CONFIG_MVEBU_CLK_GATING) 	+= clk-gating-ctrl.o
+obj-$(CONFIG_MVEBU_CLK_COREDIV)	+= clk-corediv.o
diff --git a/drivers/clk/mvebu/clk-core.c b/drivers/clk/mvebu/clk-core.c
index e5c477b49934..72b730ee7358 100644
--- a/drivers/clk/mvebu/clk-core.c
+++ b/drivers/clk/mvebu/clk-core.c
@@ -314,6 +314,235 @@ static const struct core_clocks armada_xp_core_clocks = {
 
 #endif /* CONFIG_MACH_ARMADA_370_XP */
 
+#ifdef CONFIG_MACH_ARMADA_375
+/*
+ * For Armada 375 Sample At Reset the CPU, DDR and L2 clock are all
+ * defined in the same time
+ *
+ * SAR0[21:17]   : CPU frequency    DDR frequency   L2 frequency
+ *		 6   =  400 MHz	    400 MHz	    200 MHz
+ *		 15  =  600 MHz	    600 MHz	    300 MHz
+ *		 21  =  800 MHz	    534 MHz	    400 MHz
+ *		 25  = 1000 MHz	    500 MHz	    500 MHz
+ *		 others reserved.
+ *
+ * SAR0[22]   : TCLK frequency
+ *		 0 = 166 MHz
+ *		 1 = 200 MHz
+ */
+
+#define	    SAR1_A375_TCLK_FREQ_OPT		22
+#define	    SAR1_A375_TCLK_FREQ_OPT_MASK	0x1
+#define	    SAR1_A375_CPU_DDR_L2_FREQ_OPT	17
+#define	    SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK	0x1F
+
+static const u32 __initconst armada_375_tclk_frequencies[] = {
+	166000000,
+	200000000,
+};
+
+static u32 __init armada_375_get_tclk_freq(void __iomem *sar)
+{
+	u8 tclk_freq_select = 0;
+
+	tclk_freq_select = ((readl(sar) >> SAR1_A375_TCLK_FREQ_OPT) &
+			    SAR1_A375_TCLK_FREQ_OPT_MASK);
+	return armada_375_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 __initconst armada_375_cpu_frequencies[] = {
+	0, 0, 0, 0, 0, 0,
+	400000000,
+	0, 0, 0, 0, 0, 0, 0, 0,
+	600000000,
+	0, 0, 0, 0, 0,
+	800000000,
+	0, 0, 0,
+	1000000000,
+};
+
+static u32 __init armada_375_get_cpu_freq(void __iomem *sar)
+{
+	u32 cpu_freq;
+	u8 cpu_freq_select = 0;
+
+	cpu_freq_select = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
+			   SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
+	if (cpu_freq_select >= ARRAY_SIZE(armada_375_cpu_frequencies)) {
+		pr_err("CPU freq select unsupported %d\n", cpu_freq_select);
+		cpu_freq = 0;
+	} else
+		cpu_freq = armada_375_cpu_frequencies[cpu_freq_select];
+
+	return cpu_freq;
+}
+
+enum { A375_CPU_TO_DDR, A375_CPU_TO_L2};
+
+static const struct core_ratio __initconst armada_375_core_ratios[] = {
+	{ .id = A375_CPU_TO_L2,	 .name = "l2clk" },
+	{ .id = A375_CPU_TO_DDR, .name = "ddrclk" },
+};
+
+static const int __initconst armada_375_cpu_l2_ratios[32][2] = {
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {1, 2}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {1, 2},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {1, 2}, {0, 1}, {0, 1},
+	{0, 1}, {1, 2}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_375_cpu_ddr_ratios[32][2] = {
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {1, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {2, 3},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {2, 3}, {0, 1}, {0, 1},
+	{0, 1}, {1, 2}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_375_get_clk_ratio(
+	void __iomem *sar, int id, int *mult, int *div)
+{
+	u32 opt = ((readl(sar) >> SAR1_A375_CPU_DDR_L2_FREQ_OPT) &
+		SAR1_A375_CPU_DDR_L2_FREQ_OPT_MASK);
+
+	switch (id) {
+	case A375_CPU_TO_L2:
+		*mult = armada_375_cpu_l2_ratios[opt][0];
+		*div = armada_375_cpu_l2_ratios[opt][1];
+		break;
+	case A375_CPU_TO_DDR:
+		*mult = armada_375_cpu_ddr_ratios[opt][0];
+		*div = armada_375_cpu_ddr_ratios[opt][1];
+		break;
+	}
+}
+
+static const struct core_clocks armada_375_core_clocks = {
+	.get_tclk_freq = armada_375_get_tclk_freq,
+	.get_cpu_freq = armada_375_get_cpu_freq,
+	.get_clk_ratio = armada_375_get_clk_ratio,
+	.ratios = armada_375_core_ratios,
+	.num_ratios = ARRAY_SIZE(armada_375_core_ratios),
+};
+
+#endif /* CONFIG_MACH_ARMADA_375 */
+
+#ifdef CONFIG_MACH_ARMADA_380
+/*
+ * SAR[14:10] : Ratios between PCLK0, NBCLK, HCLK and DRAM clocks
+ *
+ * SAR[15]    : TCLK frequency
+ *		 0 = 250 MHz
+ *		 1 = 200 MHz
+ */
+
+#define	    SAR_A380_TCLK_FREQ_OPT              15
+#define	    SAR_A380_TCLK_FREQ_OPT_MASK	        0x1
+#define	    SAR_A380_CPU_DDR_L2_FREQ_OPT        10
+#define	    SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK	0x1F
+
+static const u32 __initconst armada_380_tclk_frequencies[] = {
+	250000000,
+	200000000,
+};
+
+static u32 __init armada_380_get_tclk_freq(void __iomem *sar)
+{
+	u8 tclk_freq_select = 0;
+
+	tclk_freq_select = ((readl(sar) >> SAR_A380_TCLK_FREQ_OPT) &
+			    SAR_A380_TCLK_FREQ_OPT_MASK);
+	return armada_380_tclk_frequencies[tclk_freq_select];
+}
+
+static const u32 __initconst armada_380_cpu_frequencies[] = {
+	0, 0, 0, 0,
+	1066 * 1000 * 1000, 0, 0, 0,
+	1332 * 1000 * 1000, 0, 0, 0,
+	1600 * 1000 * 1000,
+};
+
+static u32 __init armada_380_get_cpu_freq(void __iomem *sar)
+{
+	u32 cpu_freq;
+	u8 cpu_freq_select = 0;
+
+	cpu_freq_select = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
+			   SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
+	if (cpu_freq_select >= ARRAY_SIZE(armada_380_cpu_frequencies)) {
+		pr_err("CPU freq select unsupported %d\n", cpu_freq_select);
+		cpu_freq = 0;
+	} else
+		cpu_freq = armada_380_cpu_frequencies[cpu_freq_select];
+
+	return cpu_freq;
+}
+
+enum { A380_CPU_TO_DDR, A380_CPU_TO_L2 };
+
+static const struct core_ratio __initconst armada_380_core_ratios[] = {
+	{ .id = A380_CPU_TO_L2,	 .name = "l2clk" },
+	{ .id = A380_CPU_TO_DDR, .name = "ddrclk" },
+};
+
+static const int __initconst armada_380_cpu_l2_ratios[32][2] = {
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static const int __initconst armada_380_cpu_ddr_ratios[32][2] = {
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{1, 2}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+	{0, 1}, {0, 1}, {0, 1}, {0, 1},
+};
+
+static void __init armada_380_get_clk_ratio(
+	void __iomem *sar, int id, int *mult, int *div)
+{
+	u32 opt = ((readl(sar) >> SAR_A380_CPU_DDR_L2_FREQ_OPT) &
+		SAR_A380_CPU_DDR_L2_FREQ_OPT_MASK);
+
+	switch (id) {
+	case A380_CPU_TO_L2:
+		*mult = armada_380_cpu_l2_ratios[opt][0];
+		*div = armada_380_cpu_l2_ratios[opt][1];
+		break;
+	case A380_CPU_TO_DDR:
+		*mult = armada_380_cpu_ddr_ratios[opt][0];
+		*div = armada_380_cpu_ddr_ratios[opt][1];
+		break;
+	}
+}
+
+static const struct core_clocks armada_380_core_clocks = {
+	.get_tclk_freq = armada_380_get_tclk_freq,
+	.get_cpu_freq = armada_380_get_cpu_freq,
+	.get_clk_ratio = armada_380_get_clk_ratio,
+	.ratios = armada_380_core_ratios,
+	.num_ratios = ARRAY_SIZE(armada_380_core_ratios),
+};
+
+#endif /* CONFIG_MACH_ARMADA_380 */
+
+
 /*
  * Dove PLL sample-at-reset configuration
  *
@@ -642,6 +871,18 @@ static const __initdata struct of_device_id clk_core_match[] = {
 		.data = &armada_xp_core_clocks,
 	},
 #endif
+#ifdef CONFIG_MACH_ARMADA_375
+	{
+		.compatible = "marvell,armada-375-core-clock",
+		.data = &armada_375_core_clocks,
+	},
+#endif
+#ifdef CONFIG_MACH_ARMADA_380
+	{
+		.compatible = "marvell,armada-380-core-clock",
+		.data = &armada_380_core_clocks,
+	},
+#endif
 #ifdef CONFIG_ARCH_DOVE
 	{
 		.compatible = "marvell,dove-core-clock",
diff --git a/drivers/clk/mvebu/clk-corediv.c b/drivers/clk/mvebu/clk-corediv.c
new file mode 100644
index 000000000000..5cfa3a98f8ab
--- /dev/null
+++ b/drivers/clk/mvebu/clk-corediv.c
@@ -0,0 +1,261 @@
+/*
+ * MVEBU Core divider clock
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Ezequiel Garcia <ezequiel.garcia@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+
+#define CORE_CLK_DIV_RATIO_MASK		0xff
+
+struct clk_corediv_desc {
+	unsigned int mask;
+	unsigned int offset;
+	unsigned int fieldbit;
+};
+
+struct clk_corediv {
+	struct clk_hw hw;
+	void __iomem *reg;
+	struct clk_corediv_desc desc;
+	spinlock_t lock;
+};
+
+static struct clk_onecell_data clk_data;
+
+static u32 ratio_reload;
+static u32 enable_bit_offset;
+static u32 ratio_offset;
+
+static const struct clk_corediv_desc mvebu_corediv_desc[] __initconst = {
+	{ .mask = 0x3f, .offset = 8, .fieldbit = 1 }, /* NAND clock */
+};
+
+#define to_corediv_clk(p) container_of(p, struct clk_corediv, hw)
+
+static int clk_corediv_is_enabled(struct clk_hw *hwclk)
+{
+	struct clk_corediv *corediv = to_corediv_clk(hwclk);
+	struct clk_corediv_desc *desc = &corediv->desc;
+	u32 enable_mask = BIT(desc->fieldbit) << enable_bit_offset;
+
+	return !!(readl(corediv->reg) & enable_mask);
+}
+
+static int clk_corediv_enable(struct clk_hw *hwclk)
+{
+	struct clk_corediv *corediv = to_corediv_clk(hwclk);
+	struct clk_corediv_desc *desc = &corediv->desc;
+	unsigned long flags = 0;
+	u32 reg;
+
+	spin_lock_irqsave(&corediv->lock, flags);
+
+	reg = readl(corediv->reg);
+	reg |= (BIT(desc->fieldbit) << enable_bit_offset);
+	writel(reg, corediv->reg);
+
+	spin_unlock_irqrestore(&corediv->lock, flags);
+
+	return 0;
+}
+
+static void clk_corediv_disable(struct clk_hw *hwclk)
+{
+	struct clk_corediv *corediv = to_corediv_clk(hwclk);
+	struct clk_corediv_desc *desc = &corediv->desc;
+	unsigned long flags = 0;
+	u32 reg;
+
+	spin_lock_irqsave(&corediv->lock, flags);
+
+	reg = readl(corediv->reg);
+	reg &= ~(BIT(desc->fieldbit) << enable_bit_offset);
+	writel(reg, corediv->reg);
+
+	spin_unlock_irqrestore(&corediv->lock, flags);
+}
+
+static unsigned long clk_corediv_recalc_rate(struct clk_hw *hwclk,
+					 unsigned long parent_rate)
+{
+	struct clk_corediv *corediv = to_corediv_clk(hwclk);
+	struct clk_corediv_desc *desc = &corediv->desc;
+	u32 reg, div;
+
+	reg = readl(corediv->reg + ratio_offset);
+	div = (reg >> desc->offset) & desc->mask;
+	return parent_rate / div;
+}
+
+static long clk_corediv_round_rate(struct clk_hw *hwclk, unsigned long rate,
+			       unsigned long *parent_rate)
+{
+	u32 div;
+
+	div = DIV_ROUND_UP(*parent_rate, rate);
+
+	return *parent_rate / div;
+}
+
+static int clk_corediv_set_rate(struct clk_hw *hwclk, unsigned long rate,
+			    unsigned long parent_rate)
+{
+	struct clk_corediv *corediv = to_corediv_clk(hwclk);
+	struct clk_corediv_desc *desc = &corediv->desc;
+	unsigned long flags = 0;
+	u32 reg, div;
+
+	div = parent_rate / rate;
+
+	spin_lock_irqsave(&corediv->lock, flags);
+
+	/* Write new divider to the divider ratio register */
+	reg = readl(corediv->reg + ratio_offset);
+	reg &= ~(desc->mask << desc->offset);
+	reg |= (div & desc->mask) << desc->offset;
+	writel(reg, corediv->reg + ratio_offset);
+
+	/* Set reload-force for this clock */
+	reg = readl(corediv->reg) | BIT(desc->fieldbit);
+	writel(reg, corediv->reg);
+
+	/* Now trigger the clock update */
+	reg = readl(corediv->reg) | ratio_reload;
+	writel(reg, corediv->reg);
+
+	/*
+	 * Wait for clocks to settle down, and then clear all the
+	 * ratios request and the reload request.
+	 */
+	udelay(1000);
+	reg &= ~(CORE_CLK_DIV_RATIO_MASK | ratio_reload);
+	writel(reg, corediv->reg);
+	udelay(1000);
+
+	spin_unlock_irqrestore(&corediv->lock, flags);
+
+	return 0;
+}
+
+static void __init
+mvebu_corediv_clk_init(struct device_node *node, const struct clk_ops *ops)
+{
+	struct clk_init_data init;
+	struct clk_corediv *corediv;
+	struct clk **clks;
+	void __iomem *base;
+	const char *parent_name;
+	const char *clk_name;
+	int i;
+
+	base = of_iomap(node, 0);
+	if (WARN_ON(!base))
+		return;
+
+	parent_name = of_clk_get_parent_name(node, 0);
+
+	clk_data.clk_num = ARRAY_SIZE(mvebu_corediv_desc);
+
+	/* clks holds the clock array */
+	clks = kcalloc(clk_data.clk_num, sizeof(struct clk *),
+				GFP_KERNEL);
+	if (WARN_ON(!clks))
+		goto err_unmap;
+	/* corediv holds the clock specific array */
+	corediv = kcalloc(clk_data.clk_num, sizeof(struct clk_corediv),
+				GFP_KERNEL);
+	if (WARN_ON(!corediv))
+		goto err_free_clks;
+
+	spin_lock_init(&corediv->lock);
+
+	for (i = 0; i < clk_data.clk_num; i++) {
+		of_property_read_string_index(node, "clock-output-names",
+					      i, &clk_name);
+		init.num_parents = 1;
+		init.parent_names = &parent_name;
+		init.name = clk_name;
+		init.ops = ops;
+		init.flags = 0;
+
+		corediv[i].desc = mvebu_corediv_desc[i];
+		corediv[i].reg = base;
+		corediv[i].hw.init = &init;
+
+		clks[i] = clk_register(NULL, &corediv[i].hw);
+		WARN_ON(IS_ERR(clks[i]));
+	}
+
+	clk_data.clks = clks;
+	of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
+	return;
+
+err_free_clks:
+	kfree(clks);
+err_unmap:
+	iounmap(base);
+}
+
+static void __init mvebu_corediv_clk_a370_init(struct device_node *node)
+{
+	static const struct clk_ops corediv_ops = {
+		.enable = clk_corediv_enable,
+		.disable = clk_corediv_disable,
+		.is_enabled = clk_corediv_is_enabled,
+		.recalc_rate = clk_corediv_recalc_rate,
+		.round_rate = clk_corediv_round_rate,
+		.set_rate = clk_corediv_set_rate,
+	};
+
+	enable_bit_offset = 24;
+	ratio_offset = 8;
+	ratio_reload = BIT(8);
+
+	mvebu_corediv_clk_init(node, &corediv_ops);
+}
+CLK_OF_DECLARE(mvebu_corediv_a370_clk, "marvell,armada-370-corediv-clock",
+	       mvebu_corediv_clk_a370_init);
+
+static void __init mvebu_corediv_clk_a375_init(struct device_node *node)
+{
+	static const struct clk_ops corediv_ops = {
+		.recalc_rate = clk_corediv_recalc_rate,
+		.round_rate = clk_corediv_round_rate,
+		.set_rate = clk_corediv_set_rate,
+	};
+
+	ratio_offset = 4;
+	ratio_reload = BIT(8);
+
+	mvebu_corediv_clk_init(node, &corediv_ops);
+}
+CLK_OF_DECLARE(mvebu_corediv_a375_clk, "marvell,armada-375-corediv-clock",
+	       mvebu_corediv_clk_a375_init);
+
+static void __init mvebu_corediv_clk_a38x_init(struct device_node *node)
+{
+	static const struct clk_ops corediv_ops = {
+		.recalc_rate = clk_corediv_recalc_rate,
+		.round_rate = clk_corediv_round_rate,
+		.set_rate = clk_corediv_set_rate,
+	};
+
+	ratio_offset = 4;
+	ratio_reload = BIT(8);
+
+	mvebu_corediv_clk_init(node, &corediv_ops);
+}
+CLK_OF_DECLARE(mvebu_corediv_a38x_clk, "marvell,armada-38x-corediv-clock",
+	       mvebu_corediv_clk_a38x_init);
diff --git a/drivers/clk/mvebu/clk-gating-ctrl.c b/drivers/clk/mvebu/clk-gating-ctrl.c
index ebf141d4374b..7cf5aff23be2 100644
--- a/drivers/clk/mvebu/clk-gating-ctrl.c
+++ b/drivers/clk/mvebu/clk-gating-ctrl.c
@@ -17,11 +17,14 @@
 #include <linux/clk/mvebu.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/syscore_ops.h>
 
 struct mvebu_gating_ctrl {
 	spinlock_t lock;
 	struct clk **gates;
 	int num_gates;
+	void __iomem *base;
+	u32 saved_reg;
 };
 
 struct mvebu_soc_descr {
@@ -32,10 +35,11 @@ struct mvebu_soc_descr {
 
 #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
 
+static struct mvebu_gating_ctrl *ctrl;
+
 static struct clk *mvebu_clk_gating_get_src(
 	struct of_phandle_args *clkspec, void *data)
 {
-	struct mvebu_gating_ctrl *ctrl = (struct mvebu_gating_ctrl *)data;
 	int n;
 
 	if (clkspec->args_count < 1)
@@ -50,15 +54,35 @@ static struct clk *mvebu_clk_gating_get_src(
 	return ERR_PTR(-ENODEV);
 }
 
+static int mvebu_clk_gating_suspend(void)
+{
+	ctrl->saved_reg = readl(ctrl->base);
+	return 0;
+}
+
+static void mvebu_clk_gating_resume(void)
+{
+	writel(ctrl->saved_reg, ctrl->base);
+}
+
+static struct syscore_ops clk_gate_syscore_ops = {
+	.suspend = mvebu_clk_gating_suspend,
+	.resume = mvebu_clk_gating_resume,
+};
+
 static void __init mvebu_clk_gating_setup(
 	struct device_node *np, const struct mvebu_soc_descr *descr)
 {
-	struct mvebu_gating_ctrl *ctrl;
 	struct clk *clk;
 	void __iomem *base;
 	const char *default_parent = NULL;
 	int n;
 
+	if (ctrl) {
+		pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
+		return;
+	}
+
 	base = of_iomap(np, 0);
 
 	clk = of_clk_get(np, 0);
@@ -73,6 +97,8 @@ static void __init mvebu_clk_gating_setup(
 
 	spin_lock_init(&ctrl->lock);
 
+	ctrl->base = base;
+
 	/*
 	 * Count, allocate, and register clock gates
 	 */
@@ -106,6 +132,8 @@ static void __init mvebu_clk_gating_setup(
 		WARN_ON(IS_ERR(ctrl->gates[n]));
 	}
 	of_clk_add_provider(np, mvebu_clk_gating_get_src, ctrl);
+
+	register_syscore_ops(&clk_gate_syscore_ops);
 }
 
 /*
@@ -119,8 +147,8 @@ static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
 	{ "pex1_en", NULL,  2 },
 	{ "ge1", NULL, 3 },
 	{ "ge0", NULL, 4 },
-	{ "pex0", NULL, 5 },
-	{ "pex1", NULL, 9 },
+	{ "pex0", "pex0_en", 5 },
+	{ "pex1", "pex1_en", 9 },
 	{ "sata0", NULL, 15 },
 	{ "sdio", NULL, 17 },
 	{ "tdm", NULL, 25 },
@@ -130,6 +158,73 @@ static const struct mvebu_soc_descr __initconst armada_370_gating_descr[] = {
 };
 #endif
 
+
+#ifdef CONFIG_MACH_ARMADA_375
+static const struct mvebu_soc_descr __initconst armada_375_gating_descr[] = {
+	{ "tdmmc", NULL, 0 },
+	{ "xpon", NULL, 1 },
+	{ "mu", NULL, 2 },
+	{ "pp", NULL, 3 },
+	{ "ptp", NULL, 4 },
+	{ "pex0", NULL, 5 },
+	{ "pex1", NULL, 6 },
+	{ "audio", NULL, 8 },
+	{ "isi_slic", NULL, 9 },
+	{ "zsi_slic", NULL, 10 },
+	{ "nd_clk", "nand", 11 },
+	{ "switch", NULL, 12 },
+	{ "ssi_slic", NULL, 13 },
+	{ "sata0_link", "sata0_core", 14 },
+	{ "sata0_core", NULL, 15 },
+	{ "usb3", NULL, 16 },
+	{ "sdio", NULL, 17 },
+	{ "usb", NULL, 18 },
+	{ "gop", NULL, 19 },
+	{ "sata1_link", "sata1_core", 20 },
+	{ "sata1_core", NULL, 21 },
+	{ "xor0", NULL, 22 },
+	{ "xor1", NULL, 23 },
+	{ "copro", NULL, 24 },
+	{ "tdm", NULL, 25 },
+	{ "usb_p1", NULL, 26 },
+	{ "crypto0_enc", NULL, 28 },
+	{ "crypto0_core", NULL, 29 },
+	{ "crypto1_enc", NULL, 30 },
+	{ "crypto1_core", NULL, 31 },
+	{ }
+};
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_380
+static const struct mvebu_soc_descr __initconst armada_380_gating_descr[] = {
+	{ "audio", NULL, 0 },
+	{ "ge2", NULL, 2 },
+	{ "ge1", NULL, 3 },
+	{ "ge0", NULL, 4 },
+	{ "pex1", NULL, 5 },
+	{ "pex2", NULL, 6 },
+	{ "pex3", NULL, 7 },
+	{ "pex0", NULL, 8 },
+	{ "usb3h0", NULL, 9 },
+	{ "usb3h1", NULL, 10 },
+	{ "usb3d", NULL, 11 },
+	{ "bm", NULL, 13 },
+	{ "crypto0z", NULL, 14 },
+	{ "sata0", NULL, 15 },
+	{ "crypto1z", NULL, 16 },
+	{ "sdio", NULL, 17 },
+	{ "usb2", NULL, 18 },
+	{ "crypto1", NULL, 21 },
+	{ "xor0", NULL, 22 },
+	{ "crypto0", NULL, 23 },
+	{ "tdm", NULL, 25 },
+	{ "xor1", NULL, 28 },
+	{ "pnc", NULL, 29 },
+	{ "sata1", NULL, 30 },
+	{ }
+};
+#endif
+
 #ifdef CONFIG_MACH_ARMADA_XP
 static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
 	{ "audio", NULL, 0 },
@@ -137,10 +232,14 @@ static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
 	{ "ge2", NULL,  2 },
 	{ "ge1", NULL, 3 },
 	{ "ge0", NULL, 4 },
-	{ "pex0", NULL, 5 },
-	{ "pex1", NULL, 6 },
-	{ "pex2", NULL, 7 },
-	{ "pex3", NULL, 8 },
+	{ "pex00", NULL, 5 },
+	{ "pex01", NULL, 6 },
+	{ "pex02", NULL, 7 },
+	{ "pex03", NULL, 8 },
+	{ "pex10", NULL, 9 },
+	{ "pex11", NULL, 10 },
+	{ "pex12", NULL, 11 },
+	{ "pex13", NULL, 12 },
 	{ "bp", NULL, 13 },
 	{ "sata0lnk", NULL, 14 },
 	{ "sata0", "sata0lnk", 15 },
@@ -152,6 +251,8 @@ static const struct mvebu_soc_descr __initconst armada_xp_gating_descr[] = {
 	{ "xor0", NULL, 22 },
 	{ "crypto", NULL, 23 },
 	{ "tdm", NULL, 25 },
+	{ "pex20", NULL, 26 },
+	{ "pex30", NULL, 27 },
 	{ "xor1", NULL, 28 },
 	{ "sata1lnk", NULL, 29 },
 	{ "sata1", "sata1lnk", 30 },
@@ -213,6 +314,20 @@ static const __initdata struct of_device_id clk_gating_match[] = {
 	},
 #endif
 
+#ifdef CONFIG_MACH_ARMADA_375
+	{
+		.compatible = "marvell,armada-375-gating-clock",
+		.data = armada_375_gating_descr,
+	},
+#endif
+
+#ifdef CONFIG_MACH_ARMADA_380
+	{
+		.compatible = "marvell,armada-380-gating-clock",
+		.data = armada_380_gating_descr,
+	},
+#endif
+
 #ifdef CONFIG_MACH_ARMADA_XP
 	{
 		.compatible = "marvell,armada-xp-gating-clock",
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f151c6cf27c3..525a2fad0868 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -24,6 +24,7 @@ config DW_APB_TIMER_OF
 
 config ARMADA_370_XP_TIMER
 	bool
+	select CLKSRC_OF
 
 config SUN4I_TIMER
 	bool
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 47a673070d70..82eaaebbf100 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -13,6 +13,24 @@
  *
  * Timer 0 is used as free-running clocksource, while timer 1 is
  * used as clock_event_device.
+ *
+ * ---
+ * Clocksource driver for Armada 370, Armada 375 and Armada XP SoC.
+ * This driver implements one compatible string for each SoC, given
+ * each has its own characteristics:
+ *
+ *   * Armada 370 has no 25 MHz fixed timer.
+ *
+ *   * Armada 375 has a non-usable 25 Mhz fixed timer, due to hardware
+ *     issues.
+ *
+ *   * Armada 380 has a 25 Mhz fixed timer.
+ *
+ *   * Armada XP cannot work properly without such 25 MHz fixed timer as
+ *     doing otherwise leads to using a clocksource whose frequency varies
+ *     when doing cpufreq frequency changes.
+ *
+ * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
  */
 
 #include <linux/init.h>
@@ -31,17 +49,19 @@
 #include <asm/sched_clock.h>
 #include <asm/localtimer.h>
 #include <linux/percpu.h>
+#include <linux/syscore_ops.h>
+
 /*
  * Timer block registers.
  */
 #define TIMER_CTRL_OFF		0x0000
-#define  TIMER0_EN		 0x0001
-#define  TIMER0_RELOAD_EN	 0x0002
-#define  TIMER0_25MHZ            0x0800
+#define  TIMER0_EN		 BIT(0)
+#define  TIMER0_RELOAD_EN	 BIT(1)
+#define  TIMER0_25MHZ            BIT(11)
 #define  TIMER0_DIV(div)         ((div) << 19)
-#define  TIMER1_EN		 0x0004
-#define  TIMER1_RELOAD_EN	 0x0008
-#define  TIMER1_25MHZ            0x1000
+#define  TIMER1_EN		 BIT(2)
+#define  TIMER1_RELOAD_EN	 BIT(3)
+#define  TIMER1_25MHZ            BIT(12)
 #define  TIMER1_DIV(div)         ((div) << 22)
 #define TIMER_EVENTS_STATUS	0x0004
 #define  TIMER0_CLR_MASK         (~0x1)
@@ -71,6 +91,18 @@ static u32 ticks_per_jiffy;
 
 static struct clock_event_device __percpu **percpu_armada_370_xp_evt;
 
+static void timer_ctrl_clrset(u32 clr, u32 set)
+{
+	writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set,
+		timer_base + TIMER_CTRL_OFF);
+}
+
+static void local_timer_ctrl_clrset(u32 clr, u32 set)
+{
+	writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set,
+		local_base + TIMER_CTRL_OFF);
+}
+
 static u32 notrace armada_370_xp_read_sched_clock(void)
 {
 	return ~readl(timer_base + TIMER0_VAL_OFF);
@@ -83,7 +115,6 @@ static int
 armada_370_xp_clkevt_next_event(unsigned long delta,
 				struct clock_event_device *dev)
 {
-	u32 u;
 	/*
 	 * Clear clockevent timer interrupt.
 	 */
@@ -97,11 +128,8 @@ armada_370_xp_clkevt_next_event(unsigned long delta,
 	/*
 	 * Enable the timer.
 	 */
-	u = readl(local_base + TIMER_CTRL_OFF);
-	u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN |
-	     TIMER0_DIV(TIMER_DIVIDER_SHIFT));
-	writel(u, local_base + TIMER_CTRL_OFF);
-
+	local_timer_ctrl_clrset(TIMER0_RELOAD_EN,
+				TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT));
 	return 0;
 }
 
@@ -109,8 +137,6 @@ static void
 armada_370_xp_clkevt_mode(enum clock_event_mode mode,
 			  struct clock_event_device *dev)
 {
-	u32 u;
-
 	if (mode == CLOCK_EVT_MODE_PERIODIC) {
 
 		/*
@@ -122,18 +148,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode,
 		/*
 		 * Enable timer.
 		 */
-
-		u = readl(local_base + TIMER_CTRL_OFF);
-
-		writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
-			TIMER0_DIV(TIMER_DIVIDER_SHIFT)),
-			local_base + TIMER_CTRL_OFF);
+		local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN |
+					   TIMER0_EN |
+					   TIMER0_DIV(TIMER_DIVIDER_SHIFT));
 	} else {
 		/*
 		 * Disable timer.
 		 */
-		u = readl(local_base + TIMER_CTRL_OFF);
-		writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF);
+		local_timer_ctrl_clrset(TIMER0_EN, 0);
 
 		/*
 		 * ACK pending timer interrupt.
@@ -169,18 +191,18 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
  */
 static int __cpuinit armada_370_xp_timer_setup(struct clock_event_device *evt)
 {
-	u32 u;
+	u32 clr = 0, set = 0;
 	int cpu = smp_processor_id();
 
 	/* Use existing clock_event for cpu 0 */
 	if (!smp_processor_id())
 		return 0;
 
-	u = readl(local_base + TIMER_CTRL_OFF);
 	if (timer25Mhz)
-		writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+		set = TIMER0_25MHZ;
 	else
-		writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF);
+		clr = TIMER0_25MHZ;
+	local_timer_ctrl_clrset(clr, set);
 
 	evt->name		= armada_370_xp_clkevt.name;
 	evt->irq		= armada_370_xp_clkevt.irq;
@@ -210,42 +232,43 @@ static struct local_timer_ops armada_370_xp_local_timer_ops __cpuinitdata = {
 	.stop	=  armada_370_xp_timer_stop,
 };
 
-void __init armada_370_xp_timer_init(void)
+static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
+
+static int armada_370_xp_timer_suspend(void)
+{
+	timer0_ctrl_reg = readl(timer_base + TIMER_CTRL_OFF);
+	timer0_local_ctrl_reg = readl(local_base + TIMER_CTRL_OFF);
+	return 0;
+}
+
+static void armada_370_xp_timer_resume(void)
+{
+	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
+	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
+	writel(timer0_ctrl_reg, timer_base + TIMER_CTRL_OFF);
+	writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
+}
+
+struct syscore_ops armada_370_xp_timer_syscore_ops = {
+	.suspend	= armada_370_xp_timer_suspend,
+	.resume		= armada_370_xp_timer_resume,
+};
+
+static void __init armada_370_xp_timer_common_init(struct device_node *np)
 {
-	u32 u;
-	struct device_node *np;
+	u32 clr = 0, set = 0;
 	int res;
 
-	np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
 	timer_base = of_iomap(np, 0);
 	WARN_ON(!timer_base);
 	local_base = of_iomap(np, 1);
 
-	if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
-		/* The fixed 25MHz timer is available so let's use it */
-		u = readl(local_base + TIMER_CTRL_OFF);
-		writel(u | TIMER0_25MHZ,
-		       local_base + TIMER_CTRL_OFF);
-		u = readl(timer_base + TIMER_CTRL_OFF);
-		writel(u | TIMER0_25MHZ,
-		       timer_base + TIMER_CTRL_OFF);
-		timer_clk = 25000000;
-	} else {
-		unsigned long rate = 0;
-		struct clk *clk = of_clk_get(np, 0);
-		WARN_ON(IS_ERR(clk));
-		rate =  clk_get_rate(clk);
-		u = readl(local_base + TIMER_CTRL_OFF);
-		writel(u & ~(TIMER0_25MHZ),
-		       local_base + TIMER_CTRL_OFF);
-
-		u = readl(timer_base + TIMER_CTRL_OFF);
-		writel(u & ~(TIMER0_25MHZ),
-		       timer_base + TIMER_CTRL_OFF);
-
-		timer_clk = rate / TIMER_DIVIDER;
-		timer25Mhz = false;
-	}
+	if (timer25Mhz)
+		set = TIMER0_25MHZ;
+	else
+		clr = TIMER0_25MHZ;
+	timer_ctrl_clrset(clr, set);
+	local_timer_ctrl_clrset(clr, set);
 
 	/*
 	 * We use timer 0 as clocksource, and private(local) timer 0
@@ -255,11 +278,6 @@ void __init armada_370_xp_timer_init(void)
 
 	ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
 
-	/*
-	 * Set scale and timer for sched_clock.
-	 */
-	setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
-
 	/*
 	 * Setup free-running clocksource timer (interrupts
 	 * disabled).
@@ -267,10 +285,13 @@ void __init armada_370_xp_timer_init(void)
 	writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
 	writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
 
-	u = readl(timer_base + TIMER_CTRL_OFF);
+	timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
+			     TIMER0_DIV(TIMER_DIVIDER_SHIFT));
 
-	writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
-		TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+	/*
+	 * Set scale and timer for sched_clock.
+	 */
+	setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
 
 	clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
 			      "armada_370_xp_clocksource",
@@ -298,4 +319,58 @@ void __init armada_370_xp_timer_init(void)
 		local_timer_register(&armada_370_xp_local_timer_ops);
 #endif
 	}
+
+	register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+}
+
+static void __init armada_xp_timer_init(struct device_node *np)
+{
+	struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+	/* The 25Mhz fixed clock is mandatory, and must always be available */
+	BUG_ON(IS_ERR(clk));
+	timer_clk = clk_get_rate(clk);
+
+	armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
+		       armada_xp_timer_init);
+
+static void __init armada_370_timer_init(struct device_node *np)
+{
+	struct clk *clk = of_clk_get(np, 0);
+
+	BUG_ON(IS_ERR(clk));
+	timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
+	timer25Mhz = false;
+
+	armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
+		       armada_370_timer_init);
+
+static void __init armada_375_timer_init(struct device_node *np)
+{
+	struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+	/* The 25Mhz fixed clock is mandatory, and must always be available */
+	BUG_ON(IS_ERR(clk));
+	timer_clk = clk_get_rate(clk);
+
+	armada_370_xp_timer_common_init(np);
+}
+CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
+		       armada_375_timer_init);
+
+static void __init armada_380_timer_init(struct device_node *np)
+{
+	struct clk *clk = of_clk_get_by_name(np, "fixed");
+
+	/* The 25Mhz fixed clock is mandatory, and must always be available */
+	BUG_ON(IS_ERR(clk));
+	timer_clk = clk_get_rate(clk);
+
+	armada_370_xp_timer_common_init(np);
 }
+CLOCKSOURCE_OF_DECLARE(armada_380, "marvell,armada-380-timer",
+		       armada_380_timer_init);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index c4cc27e5c8a5..a17189bf8dee 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -39,4 +39,10 @@ config CPU_IDLE_CALXEDA
 	help
 	  Select this to enable cpuidle on Calxeda processors.
 
+config ARM_MVEBU_V7_CPUIDLE
+	bool "CPU Idle Driver for mvebu v7 family processors"
+	depends on ARCH_MVEBU
+	help
+	  Select this to enable cpuidle on Armada 370, 38x and XP processors.
+
 endif
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 0d8bd55e776f..d96e53b640b8 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -4,6 +4,6 @@
 
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
-
+obj-$(CONFIG_ARM_MVEBU_V7_CPUIDLE) += cpuidle-mvebu-v7.o
 obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
 obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
new file mode 100644
index 000000000000..8bfac4d71b06
--- /dev/null
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -0,0 +1,136 @@
+/*
+ * Marvell Armada 370, 38x and XP SoC cpuidle driver
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Nadav Haklai <nadavh@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Maintainer: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/module.h>
+#include <linux/mvebu-v7-cpuidle.h>
+#include <linux/of.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <asm/cpuidle.h>
+
+#define MVEBU_V7_FLAG_DEEP_IDLE	0x10000
+
+static struct mvebu_v7_cpuidle *pcpuidle;
+
+static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+				struct cpuidle_driver *drv,
+				int index)
+{
+	int ret;
+	bool deepidle = false;
+
+	cpu_pm_enter();
+
+	if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
+		deepidle = true;
+
+	ret = pcpuidle->cpu_suspend(deepidle);
+	if (ret)
+		return ret;
+
+	cpu_pm_exit();
+
+	return index;
+}
+
+static struct cpuidle_driver armadaxp_cpuidle_driver = {
+	.name			= "armada_xp_idle",
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= mvebu_v7_enter_idle,
+		.exit_latency		= 10,
+		.power_usage		= 50,
+		.target_residency	= 100,
+		.flags			= CPUIDLE_FLAG_TIME_VALID,
+		.name			= "Idle",
+		.desc			= "CPU power down",
+	},
+	.states[2]		= {
+		.enter			= mvebu_v7_enter_idle,
+		.exit_latency		= 100,
+		.power_usage		= 5,
+		.target_residency	= 1000,
+		.flags			= (CPUIDLE_FLAG_TIME_VALID |
+					   MVEBU_V7_FLAG_DEEP_IDLE),
+		.name			= "Deep idle",
+		.desc			= "CPU and L2 Fabric power down",
+	},
+	.state_count = 3,
+};
+
+static struct cpuidle_driver armada370_cpuidle_driver = {
+	.name			= "armada_370_idle",
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= mvebu_v7_enter_idle,
+		.exit_latency		= 100,
+		.power_usage		= 5,
+		.target_residency	= 1000,
+		.flags			= (CPUIDLE_FLAG_TIME_VALID |
+					   MVEBU_V7_FLAG_DEEP_IDLE),
+		.name			= "Deep Idle",
+		.desc			= "CPU and L2 Fabric power down",
+	},
+	.state_count = 2,
+};
+
+static struct cpuidle_driver armada38x_cpuidle_driver = {
+	.name			= "armada_38x_idle",
+	.states[0]		= ARM_CPUIDLE_WFI_STATE,
+	.states[1]		= {
+		.enter			= mvebu_v7_enter_idle,
+		.exit_latency		= 10,
+		.power_usage		= 5,
+		.target_residency	= 100,
+		.flags			= CPUIDLE_FLAG_TIME_VALID,
+		.name			= "Idle",
+		.desc			= "CPU and SCU power down",
+	},
+	.state_count = 2,
+};
+
+static int mvebu_v7_cpuidle_probe(struct platform_device *pdev)
+{
+	struct cpuidle_driver *drv;
+
+	pcpuidle = pdev->dev.platform_data;
+
+	if (pcpuidle->type == CPUIDLE_ARMADA_XP)
+		drv = &armadaxp_cpuidle_driver;
+	else if (pcpuidle->type == CPUIDLE_ARMADA_370)
+		drv = &armada370_cpuidle_driver;
+	else if (pcpuidle->type == CPUIDLE_ARMADA_38X)
+		drv = &armada38x_cpuidle_driver;
+	else
+		return -EINVAL;
+
+	return cpuidle_register(drv, NULL);
+}
+
+static struct platform_driver mvebu_v7_cpuidle_plat_driver = {
+	.driver = {
+		.name = "cpuidle-mvebu-v7",
+		.owner = THIS_MODULE,
+	},
+	.probe = mvebu_v7_cpuidle_probe,
+};
+
+module_platform_driver(mvebu_v7_cpuidle_plat_driver);
+
+MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>");
+MODULE_DESCRIPTION("Marvel EBU v7 cpuidle driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dffb85525368..0f0021fbfd64 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -173,6 +173,18 @@ config CRYPTO_DEV_MV_CESA
 
 	  Currently the driver supports AES in ECB and CBC mode without DMA.
 
+config MV_INCLUDE_CESA
+	bool "CESA Support"
+	depends on ARCH_MVEBU
+	default n
+        help
+	  This enables the driver for the CESA - Cryptographic Engines and
+	  Security Accelerators found in the Marvell Armada SoC devices.
+
+if MV_INCLUDE_CESA = "y"
+	source "drivers/crypto/mvebu_cesa/Kconfig"
+endif
+
 config CRYPTO_DEV_NIAGARA2
        tristate "Niagara2 Stream Processing Unit driver"
        select CRYPTO_DES
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 38ce13d3b79b..dc09cd53ccb6 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,3 +21,4 @@ obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_AES) += atmel-aes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_TDES) += atmel-tdes.o
 obj-$(CONFIG_CRYPTO_DEV_ATMEL_SHA) += atmel-sha.o
+obj-$(CONFIG_MV_INCLUDE_CESA) += mvebu_cesa/
diff --git a/drivers/crypto/mvebu_cesa/Kconfig b/drivers/crypto/mvebu_cesa/Kconfig
new file mode 100644
index 000000000000..a0393f6cf298
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/Kconfig
@@ -0,0 +1,19 @@
+menu "Cesa options"
+	depends on MV_INCLUDE_CESA
+
+config  MV_CESA
+	bool "Support for Marvell Cryptographic Engine and Security Acceleration(CESA)"
+	default y
+
+config  MV_CESA_TOOL_ARMADA
+	bool "Support for Marvell CESA Tool"
+	default y
+	depends on MV_CESA
+	select MV_CESA_TOOL
+	---help---
+
+config  MV_CESA_TOOL
+	depends on MV_CESA_TOOL_ARMADA
+	tristate
+
+endmenu
diff --git a/drivers/crypto/mvebu_cesa/Makefile b/drivers/crypto/mvebu_cesa/Makefile
new file mode 100755
index 000000000000..ef3f36cd8b60
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/Makefile
@@ -0,0 +1,30 @@
+#
+# Makefile for the Marvell CESA driver
+#
+
+CPU_ARCH	= ARM
+ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
+ENDIAN      = BE
+else
+ENDIAN      = LE
+endif
+
+MV_DEFINE = -DMV_LINUX -DMV_CPU_$(ENDIAN) -DMV_$(CPU_ARCH)
+
+CESA_DIR	:= drivers/crypto/mvebu_cesa
+
+obj-y += cesa_if.o cesa_ocf_drv.o cesa_test.o
+
+obj-$(CONFIG_MV_CESA_TOOL) += cesa_dev.o
+
+obj-y += hal/mvCesa.o hal/mvCesaDebug.o hal/mvSHA256.o	\
+	 hal/mvMD5.o hal/mvSHA1.o hal/AES/mvAesAlg.o	\
+	 hal/AES/mvAesApi.o
+
+EXTRA_INCLUDE	=  -I$(CESA_DIR)
+EXTRA_INCLUDE	+= -I$(srctree)/arch/arm/mach-mvebu/include/mach
+EXTRA_INCLUDE	+= -I$(srctree)/arch/arm/mach-mvebu/linux_oss
+EXTRA_INCLUDE	+= -I$(CESA_DIR)/hal
+EXTRA_INCLUDE	+= -I$(srctree)/crypto/ocf
+
+ccflags-y	+= $(EXTRA_INCLUDE) $(MV_DEFINE)
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/README b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/README
new file mode 100644
index 000000000000..bf4a1aa7c89b
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/README
@@ -0,0 +1,66 @@
+LibreSWAN
+=========
+In order to explore the CESA through the LibreSWAN, please follow the next steps:
+
+1. Download libreswan-3.8 (http://www.libreswan.org/), then untar+unzip it.
+
+2. Apply the patch:
+	- mv_libreswan_3_8.patch:
+		> Disable default DEBUG support.
+		> Enable OCF support.
+		> Set OCF flag CRYPTO_F_CBIMM to mark immediate callback handling.
+
+3. Compile:
+
+ + ipsec module (cross compilation) :
+  'make KERNELSRC=<path to this release, after config> module ARCH=arm CC=<path_to_cross_compile> LD=<path_to_cross_loader>'
+  then copy the module to the host FS: /lib/modules/<kernel_name>/kernel/net/ipsec/
+
+ + ipsec user (native) :
+  make sure that you have the kernel source on the FS.
+  'make KERNELSRC=<path to this release, after config> programs'
+  'make install'
+
+4. Before tunnel is enabled on target, reverse path filtering(rp_filter) must be disabled under sysfs, using
+      the following commands:
+      - echo 0  > /proc/sys/net/ipv4/conf/eth<x>/rp_filter
+      - echo 0  > /proc/sys/net/ipv4/conf/all/rp_filter
+      - echo 1  > /proc/sys/net/ipv4/ip_no_pmtu_disc
+Reverse Path Filtering(rp_filter): it is a technology that is used on IP routers to try and prevent source address spoofing,
+which is often used for DenialOfService attacks. RPF works by checking the source IP of each packet received on an interface
+against the routing table. If the best route for the source IP address does not use the same interface that the packet was received on the packet is dropped.
+
+5. Before tunnel is enabled on target, PMTU discovery must be disabled under procfs, using the following command:
+      - echo 1 > /proc/sys/net/ipv4/ip_no_pmtu_disc
+
+IPSec routing using encryption/authentication only:
+
+  basic vpn connection:
+  +++++++++++++++++++++
+ - platform: conncted with egiga.
+ - make sure you have 'ip' (part of the iproute package) installed.
+ - edit /etc/ipsec.conf (on both sides) ,check the "man ipsec.conf" :
+
+config setup
+        interfaces="ipsec0=eth0"    # Virtual/physical interfaces
+        klipsdebug="none"             # Debug KLIPS
+        plutodebug="none"             # Debug PLUTO
+
+conn dsmp_psk_vpn
+    type=tunnel                    # type of the connection: tunnel(default),passthrough,transport,reject,drop
+    authby=secret
+    left=192.168.1.1
+    leftsubnet=192.168.1.0/16
+    right=192.168.0.1              # Remote information
+    rightsubnet=192.168.0.0/16
+    auto=start                     # start this connection at startup
+
+
+ - edit /etc/ipsec.secrets (on both sides) to have shared secret.
+192.168.1.1 192.168.0.1 : PSK "123456"
+
+ - side1: 'ifconfig eth0 192.168.0.1 netmask 255.255.0.0'
+ - side2: 'ifconfig eth0 192.168.1.1 netmask 255.255.0.0'
+ - check connectivity: ping from side1 to 192.168.1.1
+ - '/etc/init.d/ipsec start' (on both sides), create new interface ipsec0.
+ - check connectivity: ping from side1 to 192.168.1.1 --> VPN is working (make sure by sniffing)
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_build.sh b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_build.sh
new file mode 100755
index 000000000000..985fb6ec5e64
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_build.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+# IPSEC Module build script
+set -e
+
+PKG=libreswan-3.8.tar.gz
+
+# Optionaly download the sources from web
+if [[ ! -f ${PKG} ]]; then
+	wget https://download.libreswan.org/${PKG}
+fi
+
+# Extract sources localy
+tar -zxvf ${PKG}
+
+PDIR=${PKG%.*}
+if [ ! -d "${PDIR}" ]; then
+	PDIR=${PDIR%.*}
+fi
+cd ${PDIR}
+
+# Apply marvell patch over libreswan sources
+patch -p1 < ../mv_libreswan_3_8.patch
+
+# Prepare path to source and kernel revision
+KSRC=../../../../../../
+KREV=`cat ${KSRC}/include/config/kernel.release`
+
+# Build ipsec module
+make clean
+make KERNELSRC=${KSRC} module ARCH=arm
+
+# Copy module to the main libreswan directory and rename
+cp modobj/ipsec.ko ../ipsec_${KREV}.ko
+cd ..
+
+# Copy to modules output directory
+if [ "${1}" != "" ]; then
+	mkdir -p ${1}/kernel/ipsec
+	cp ${PDIR}/modobj/ipsec.ko ${1}/kernel/ipsec/
+fi
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_routing_setup.sh b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_routing_setup.sh
new file mode 100644
index 000000000000..28fe3a276de8
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/libreswan/ipsec_routing_setup.sh
@@ -0,0 +1,36 @@
+# This is an example how to setup the network interfaces in order to run IPSec routing
+
+ifconfig eth0 192.168.0.1 hw ether 00:00:00:00:61:92 netmask 255.255.0.0
+ifconfig eth1 192.167.0.1 hw ether 00:00:00:00:62:81 netmask 255.255.0.0
+echo 1 > /proc/sys/net/ipv4/ip_forward
+arp -s 192.167.0.250 00:00:00:00:00:12
+arp -s 192.168.1.1 00:00:00:00:00:11
+#IPSec Configuration
+insmod /ipsec.ko
+
+#SmartBit Configuration
+#eth0 - 192.168.1.1 --> 192.167.0.250
+#         00:00:00:00:00:12 --> 00:00:00:00:61:92
+#eth1   - 192.167.0.250 --> 192.168.1.1
+#         00:00:00:00:00:11 -> 00:00:00:00:62:81
+
+
+# unmark this to disble flow control for Yukon/E1000 NICs
+#ethtool -A eth0 tx off
+#ethtool -A eth0 rx off
+#ethtool -A eth2 tx off
+#ethtool -A eth2 rx off
+
+# Here we build static SA database since it is not supported anymore from OpenSWAN 2.6.18 and on
+# This is example setup for ESP 3DES/SHA1
+ipsec spi --clear
+ipsec eroute --clear
+enckey=0x0123456789abcdef02468ace13579bdf123456789abcdef0
+authkey=0x0123456789abcdef02468ace13579bdf12345678
+ipsec spi --af inet --edst 192.168.1.1 --spi 0x12345678 --proto esp --src 192.168.0.1 --esp 3des-sha1 --enckey $enckey --authkey $authkey
+ipsec spi --af inet --edst 192.168.1.1 --spi 0x12345678 --proto tun --src 192.168.0.1 --dst 192.168.1.1 --ip4
+ipsec spigrp inet 192.168.1.1 0x12345678 tun inet 192.168.1.1 0x12345678 esp
+ipsec eroute --add --eraf inet --src 192.167.0.0/16 --dst 192.168.1.0/24 --said tun0x12345678@192.168.1.1
+ipsec tncfg --attach --virtual ipsec0 --physical eth0
+ifconfig ipsec0 inet 192.168.0.1 netmask 255.255.0.0 broadcast 192.168.255.255 up
+route add -host 192.168.1.1 gw 192.168.0.1 dev ipsec0
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/openssl/README b/drivers/crypto/mvebu_cesa/cesa_apps/openssl/README
new file mode 100644
index 000000000000..210caeb88247
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/openssl/README
@@ -0,0 +1,20 @@
+OpenSSL
+===========
+ In order to explore the CESA unit through the OpenSSL, you have to use the
+ OpenSSL OCF cryptodev engine.
+ By default the cryptodev engine will use the CESA
+ accelerator.
+
+ o In case missing, create a crypto device: mknod /dev/crypto c 10 70
+ o Download latest openssl source package from http://www.openssl.org, then untar+unzip it.
+ o In case missing, copy from kernel crypto/ocf/cryptodev.h to file-system path: /usr/include/crypto .
+ o Run: ./config -DHAVE_CRYPTODEV no-shared
+ o and compile ...('make' and 'make install')
+
+ you can run a speed test to make sure everything is working:
+	openssl speed -evp des3 -elapsed
+ Modify /dev/crypto to /dev/crypto_tmp so that the speed test will use
+ now software encryption.
+ Run again:
+        openssl speed -evp des3 -elapsed
+ Compare results.
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/openswan/README b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/README
new file mode 100644
index 000000000000..dc576c175127
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/README
@@ -0,0 +1,69 @@
+OpenSWAN
+=========
+In order to explore the CESA through the OpenSWAN, please follow the next steps:
+
+1. Download openswan-2.6.41 (http://www.openswan.org/), then untar+unzip it.
+
+2. Apply the patch:
+	- mv_openswan_2_6_41.patch:
+		> Disable default DEBUG support.
+		> Enable OCF support.
+		> Set OCF flag CRYPTO_F_CBIMM to mark immediate callback handling.
+
+	- lk_3_10_support_openswan_2_6_41.patch:
+		> Add support for LKv3.10 (imported from libreswan 3.8)
+
+3. Compile:
+
+ + ipsec module (cross compilation) :
+  'make KERNELSRC=<path to this release, after config> module ARCH=arm CC=<path_to_cross_compile> LD=<path_to_cross_loader>'
+  then copy the module to the host FS: /lib/modules/<kernel_name>/kernel/net/ipsec/
+
+ + ipsec user (native) :
+  make sure that you have the kernel source on the FS.
+  'make KERNELSRC=<path to this release, after config> programs'
+  'make install'
+
+4. Before tunnel is enabled on target, reverse path filtering(rp_filter) must be disabled under sysfs, using
+      the following commands:
+      - echo 0  > /proc/sys/net/ipv4/conf/eth<x>/rp_filter
+      - echo 0  > /proc/sys/net/ipv4/conf/all/rp_filter
+      - echo 1  > /proc/sys/net/ipv4/ip_no_pmtu_disc
+Reverse Path Filtering(rp_filter): it is a technology that is used on IP routers to try and prevent source address spoofing,
+which is often used for DenialOfService attacks. RPF works by checking the source IP of each packet received on an interface
+against the routing table. If the best route for the source IP address does not use the same interface that the packet was received on the packet is dropped.
+
+5. Before tunnel is enabled on target, PMTU discovery must be disabled under procfs, using the following command:
+      - echo 1 > /proc/sys/net/ipv4/ip_no_pmtu_disc
+
+IPSec routing using encryption/authentication only:
+
+  basic vpn connection:
+  +++++++++++++++++++++
+ - platform: conncted with egiga.
+ - make sure you have 'ip' (part of the iproute package) installed.
+ - edit /etc/ipsec.conf (on both sides) ,check the "man ipsec.conf" :
+
+config setup
+        interfaces="ipsec0=eth0"    # Virtual/physical interfaces
+        klipsdebug="none"             # Debug KLIPS
+        plutodebug="none"             # Debug PLUTO
+
+conn dsmp_psk_vpn
+    type=tunnel                    # type of the connection: tunnel(default),passthrough,transport,reject,drop
+    authby=secret
+    left=192.168.1.1
+    leftsubnet=192.168.1.0/16
+    right=192.168.0.1              # Remote information
+    rightsubnet=192.168.0.0/16
+    auto=start                     # start this connection at startup
+
+
+ - edit /etc/ipsec.secrets (on both sides) to have shared secret.
+192.168.1.1 192.168.0.1 : PSK "123456"
+
+ - side1: 'ifconfig eth0 192.168.0.1 netmask 255.255.0.0'
+ - side2: 'ifconfig eth0 192.168.1.1 netmask 255.255.0.0'
+ - check connectivity: ping from side1 to 192.168.1.1
+ - '/etc/init.d/ipsec start' (on both sides), create new interface ipsec0.
+ - check connectivity: ping from side1 to 192.168.1.1 --> VPN is working (make sure by sniffing)
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_build.sh b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_build.sh
new file mode 100755
index 000000000000..372e1f94c77c
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_build.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# IPSEC Module build script
+set -e
+
+# Optionaly download the sources from web
+wget --no-check-certificate http://download.openswan.org/openswan/openswan-2.6.41.tar.gz
+
+# Extract sources localy
+tar -zxvf openswan-2.6.41.tar.gz
+cd openswan-2.6.41
+
+# Apply marvell patch over openswan sources
+patch -p1 < ../mv_openswan_2_6_41.patch
+# Add support for LKv3.10 (imported from libreswan 3.8)
+patch -p1 < ../lk_3_10_support_openswan_2_6_41.patch
+
+# Build ipsec module
+make KERNELSRC=../../../../../../ module ARCH=arm
+
+# Copy to modules output directory
+if [ "$1" != "" ]; then
+	mkdir -p ${1}/kernel/ipsec
+	cp modobj26/ipsec.ko ${1}/kernel/ipsec
+fi
diff --git a/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_routing_setup.sh b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_routing_setup.sh
new file mode 100644
index 000000000000..28fe3a276de8
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_apps/openswan/ipsec_routing_setup.sh
@@ -0,0 +1,36 @@
+# This is an example how to setup the network interfaces in order to run IPSec routing
+
+ifconfig eth0 192.168.0.1 hw ether 00:00:00:00:61:92 netmask 255.255.0.0
+ifconfig eth1 192.167.0.1 hw ether 00:00:00:00:62:81 netmask 255.255.0.0
+echo 1 > /proc/sys/net/ipv4/ip_forward
+arp -s 192.167.0.250 00:00:00:00:00:12
+arp -s 192.168.1.1 00:00:00:00:00:11
+#IPSec Configuration
+insmod /ipsec.ko
+
+#SmartBit Configuration
+#eth0 - 192.168.1.1 --> 192.167.0.250
+#         00:00:00:00:00:12 --> 00:00:00:00:61:92
+#eth1   - 192.167.0.250 --> 192.168.1.1
+#         00:00:00:00:00:11 -> 00:00:00:00:62:81
+
+
+# unmark this to disble flow control for Yukon/E1000 NICs
+#ethtool -A eth0 tx off
+#ethtool -A eth0 rx off
+#ethtool -A eth2 tx off
+#ethtool -A eth2 rx off
+
+# Here we build static SA database since it is not supported anymore from OpenSWAN 2.6.18 and on
+# This is example setup for ESP 3DES/SHA1
+ipsec spi --clear
+ipsec eroute --clear
+enckey=0x0123456789abcdef02468ace13579bdf123456789abcdef0
+authkey=0x0123456789abcdef02468ace13579bdf12345678
+ipsec spi --af inet --edst 192.168.1.1 --spi 0x12345678 --proto esp --src 192.168.0.1 --esp 3des-sha1 --enckey $enckey --authkey $authkey
+ipsec spi --af inet --edst 192.168.1.1 --spi 0x12345678 --proto tun --src 192.168.0.1 --dst 192.168.1.1 --ip4
+ipsec spigrp inet 192.168.1.1 0x12345678 tun inet 192.168.1.1 0x12345678 esp
+ipsec eroute --add --eraf inet --src 192.167.0.0/16 --dst 192.168.1.0/24 --said tun0x12345678@192.168.1.1
+ipsec tncfg --attach --virtual ipsec0 --physical eth0
+ifconfig ipsec0 inet 192.168.0.1 netmask 255.255.0.0 broadcast 192.168.255.255 up
+route add -host 192.168.1.1 gw 192.168.0.1 dev ipsec0
diff --git a/drivers/crypto/mvebu_cesa/cesa_dev.c b/drivers/crypto/mvebu_cesa/cesa_dev.c
new file mode 100755
index 000000000000..72b4722c6974
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_dev.c
@@ -0,0 +1,314 @@
+#ifdef CONFIG_ARCH_MVEBU
+#include <generated/autoconf.h>
+#else
+#include <linux/autoconf.h>
+#endif
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/unistd.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/miscdevice.h>
+#include <linux/version.h>
+#include <asm/uaccess.h>
+
+#include "cesa_dev.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
+#include <linux/syscalls.h>
+#endif
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvCesa.h"
+
+
+static int debug = 1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug,
+	   "Enable debug");
+
+extern void cesaTest(int iter, int reqSize, int checkMode);
+extern void combiTest(int iter, int reqSize, int checkMode);
+extern void cesaOneTest(int testIdx, int caseIdx, int iter, int reqSize, int checkMode);
+extern void multiSizeTest(int idx, int checkMode, int iter, char* inputData);
+extern void aesTest(int iter, int reqSize, int checkMode);
+extern void desTest(int iter, int reqSize, int checkMode);
+extern void tripleDesTest(int iter, int reqSize, int checkMode);
+extern void mdTest(int iter, int reqSize, int checkMode);
+extern void sha1Test(int iter, int reqSize, int checkMode);
+extern void sha2Test(int iter, int reqSize, int checkMode);
+
+int run_cesa_test(CESA_TEST *cesa_test)
+{
+	switch(cesa_test->test){
+		case(MULTI):
+			combiTest(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+			break;
+		case(SIZE):
+                        multiSizeTest(cesa_test->req_size, cesa_test->iter, cesa_test->checkmode, NULL);
+			break;
+		case(SINGLE):
+			cesaOneTest(cesa_test->session_id, cesa_test->data_id, cesa_test->iter,
+					cesa_test->req_size, cesa_test->checkmode);
+			break;
+		case(AES):
+			aesTest(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+			break;
+		case(DES):
+			desTest(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+			break;
+		case(TRI_DES):
+			tripleDesTest(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+			break;
+		case(MD5):
+                        mdTest(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+                        break;
+		case(SHA1):
+                        sha1Test(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+                        break;
+		case(SHA2):
+                        sha2Test(cesa_test->iter, cesa_test->req_size, cesa_test->checkmode);
+                        break;
+
+		default:
+			dprintk("%s(unknown test 0x%x)\n", __FUNCTION__, cesa_test->test);
+			return -EINVAL;
+	}
+	return 0;
+}
+
+extern void    		mvCesaDebugSAD(int mode);
+extern void    		mvCesaDebugSA(short sid, int mode);
+extern void    		mvCesaDebugQueue(int mode);
+extern void    		mvCesaDebugStatus(void);
+extern void    		mvCesaDebugSram(int mode);
+extern void    		cesaTestPrintReq(int req, int offset, int size);
+extern void	   	    cesaTestPrintSession(int idx);
+extern void	   	    cesaTestPrintStatus(void);
+
+
+int run_cesa_debug(CESA_DEBUG *cesa_debug)
+{
+	int error = 0;
+
+	if (mv_cesa_mode == CESA_TEST_M) {
+		dprintk("%s:cesa mode %d\n", __func__, mv_cesa_mode);
+
+		switch (cesa_debug->debug) {
+		case(STATUS):
+			mvCesaDebugStatus();
+			break;
+		case(QUEUE):
+			mvCesaDebugQueue(cesa_debug->mode);
+			break;
+		case(SA):
+			mvCesaDebugSA(cesa_debug->index,
+			    cesa_debug->mode);
+			break;
+		case(SRAM):
+			mvCesaDebugSram(cesa_debug->mode);
+			break;
+		case(SAD):
+			mvCesaDebugSAD(cesa_debug->mode);
+			break;
+		case(TST_REQ):
+			cesaTestPrintReq(cesa_debug->index, 0,
+			    cesa_debug->size);
+			break;
+		case(TST_SES):
+			cesaTestPrintSession(cesa_debug->index);
+			break;
+		case(TST_STATS):
+			cesaTestPrintStatus();
+			break;
+		default:
+			dprintk("%s(unknown debug 0x%x)\n",
+			    __func__, cesa_debug->debug);
+			error = EINVAL;
+			break;
+		}
+	}
+
+	else if (mv_cesa_mode == CESA_OCF_M) {
+		dprintk("%s:cesa mode %d\n", __func__, mv_cesa_mode);
+
+		switch (cesa_debug->debug) {
+		case(STATUS):
+			mvCesaDebugStatus();
+			break;
+		case(QUEUE):
+			mvCesaDebugQueue(cesa_debug->mode);
+			break;
+		case(SA):
+			mvCesaDebugSA(cesa_debug->index,
+			    cesa_debug->mode);
+			break;
+		case(SRAM):
+			mvCesaDebugSram(cesa_debug->mode);
+			break;
+		case(SAD):
+			mvCesaDebugSAD(cesa_debug->mode);
+			break;
+		default:
+			dprintk("%s(unknown debug 0x%x)\n",
+			    __func__, cesa_debug->debug);
+			error = EINVAL;
+			break;
+		}
+	}
+
+	return(-error);
+}
+
+
+static long
+cesadev_ioctl(
+	struct file *filp,
+	unsigned int cmd,
+	unsigned long arg)
+{
+	CESA_DEBUG cesa_debug;
+	u32 error = 0;
+
+	dprintk("%s: cmd=0x%x, CIOCDEBUG=0x%x, CIOCTEST=0x%x\n",
+                __FUNCTION__, cmd, CIOCDEBUG, CIOCTEST);
+
+	if (mv_cesa_mode == CESA_TEST_M) {
+		dprintk("%s:cesa mode %d\n", __func__, mv_cesa_mode);
+
+		switch (cmd) {
+		case CIOCDEBUG:
+			if (copy_from_user(&cesa_debug, (void *)arg,
+							   sizeof(CESA_DEBUG)))
+				error = -EFAULT;
+
+			dprintk("%s(CIOCDBG): dbg %d idx %d mode %d size %d\n",
+				__func__, cesa_debug.debug,
+				cesa_debug.index, cesa_debug.mode,
+				cesa_debug.size);
+			error = run_cesa_debug(&cesa_debug);
+			break;
+		case CIOCTEST:
+			{
+			CESA_TEST cesa_test;
+
+			if (copy_from_user(&cesa_test, (void *)arg,
+							    sizeof(CESA_TEST)))
+				error = -EFAULT;
+
+			dprintk("%s(CIOCTST): test %d iter %d req_size %d",
+			    __func__, cesa_test.test, cesa_test.iter,
+			    cesa_test.req_size);
+			dprintk(" checkmode %d sess_id %d data_id %d\n",
+				cesa_test.checkmode, cesa_test.session_id,
+				cesa_test.data_id);
+			error = run_cesa_test(&cesa_test);
+			}
+			break;
+		default:
+			dprintk("%s(unknown ioctl 0x%x)\n", __func__, cmd);
+			error = EINVAL;
+			break;
+		}
+	}
+
+	else if (mv_cesa_mode == CESA_OCF_M) {
+		dprintk("%s:cesa mode %d\n", __func__, mv_cesa_mode);
+
+		switch (cmd) {
+		case CIOCDEBUG:
+			if (copy_from_user(&cesa_debug, (void *)arg,
+							   sizeof(CESA_DEBUG)))
+				error = -EFAULT;
+
+			dprintk("%s(CIOCDBG): dbg %d idx %d mode %d size %d\n",
+				__func__, cesa_debug.debug,
+				cesa_debug.index, cesa_debug.mode,
+				cesa_debug.size);
+			error = run_cesa_debug(&cesa_debug);
+			break;
+		default:
+			dprintk("%s(unknown ioctl 0x%x)\n", __func__, cmd);
+			error = EINVAL;
+			break;
+		}
+	}
+
+	return(-error);
+}
+
+static int
+cesadev_open(struct inode *inode, struct file *filp)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	return(0);
+}
+
+static int
+cesadev_release(struct inode *inode, struct file *filp)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	return(0);
+}
+
+
+static struct file_operations cesadev_fops = {
+	.owner = THIS_MODULE,
+	.open = cesadev_open,
+	.release = cesadev_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)
+	.ioctl = cesadev_ioctl,
+#endif
+#ifdef HAVE_UNLOCKED_IOCTL
+	.unlocked_ioctl = cesadev_ioctl,
+#endif
+};
+
+static struct miscdevice cesadev = {
+	.minor = CESADEV_MINOR,
+	.name = "cesa",
+	.fops = &cesadev_fops,
+};
+
+static int __init
+cesadev_init(void)
+{
+	int rc;
+
+#if defined(CONFIG_MV78200) || defined(CONFIG_MV632X)
+	if (MV_FALSE == mvSocUnitIsMappedToThisCpu(CESA))
+	{
+		dprintk("CESA is not mapped to this CPU\n");
+		return -ENODEV;
+	}
+#endif
+
+	dprintk("%s(%p)\n", __FUNCTION__, cesadev_init);
+	rc = misc_register(&cesadev);
+	if (rc) {
+		printk(KERN_ERR "cesadev: registration of /dev/cesadev failed\n");
+		return(rc);
+	}
+	return(0);
+}
+
+static void __exit
+cesadev_exit(void)
+{
+	dprintk("%s()\n", __FUNCTION__);
+	misc_deregister(&cesadev);
+}
+
+module_init(cesadev_init);
+module_exit(cesadev_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ronen Shitrit");
+MODULE_DESCRIPTION("Cesadev (user interface to CESA)");
diff --git a/drivers/crypto/mvebu_cesa/cesa_dev.h b/drivers/crypto/mvebu_cesa/cesa_dev.h
new file mode 100644
index 000000000000..05301d41a948
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_dev.h
@@ -0,0 +1,63 @@
+#ifndef _CESA_DEV_H_
+#define _CESA_DEV_H_
+
+/*
+ * common debug for all
+ */
+#if 1
+#define dprintk(a...)	if (debug) { printk(a); } else
+#else
+#define dprintk(a...)
+#endif
+
+typedef enum {
+	MULTI = 0,
+	SIZE,
+	SINGLE,
+	AES,
+	DES,
+	TRI_DES,
+	MD5,
+	SHA1,
+	SHA2,
+	MAX_CESA_TEST_TYPE
+} CESA_TEST_TYPE;
+
+typedef struct {
+	CESA_TEST_TYPE 		test;
+	unsigned int	  	iter;		/* How many interation to run */
+	unsigned int	  	req_size;	/* request buffer size */
+	unsigned int		checkmode;	/* check mode: verify or not */
+	unsigned int		session_id; 	/* relevant only for single test */
+	unsigned int		data_id;   	/* relevant only for single test */
+} CESA_TEST;
+
+typedef enum {
+	STATUS = 0,
+	CHAN,
+	QUEUE,
+	SA,
+	CACHE_IDX,
+	SRAM,
+	SAD,
+	TST_REQ,
+	TST_SES,
+    TST_STATS,
+	MAX_CESA_DEBUG_TYPE
+} CESA_DEBUG_TYPE;
+
+typedef struct {
+	CESA_DEBUG_TYPE	debug;
+	unsigned int	index; /* general index */
+	unsigned int	mode;  /* verbos mode */
+	unsigned int 	size;  /* size of buffer */
+} CESA_DEBUG;
+
+
+/*
+ * done against open of /dev/cesa, to get a cloned descriptor.
+ */
+#define	CIOCDEBUG	_IOWR('c', 150, CESA_DEBUG)
+#define	CIOCTEST	_IOWR('c', 151, CESA_TEST)
+
+#endif /* _CESA_DEV_H_ */
diff --git a/drivers/crypto/mvebu_cesa/cesa_if.c b/drivers/crypto/mvebu_cesa/cesa_if.c
new file mode 100644
index 000000000000..d05a9750d94b
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_if.c
@@ -0,0 +1,825 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "cesa_if.h"
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/spinlock_types.h>
+
+#define WINDOW_CTRL(i)		(0xA04 + ((i) << 3))
+#define WINDOW_BASE(i)		(0xA00 + ((i) << 3))
+
+#define MV_CESA_IF_MAX_WEIGHT	0xFFFFFFFF
+
+/* Globals */
+static MV_CESA_RESULT **pResQ;
+static MV_CESA_RESULT *resQ;
+static MV_CESA_RESULT *pEmptyResult;
+static MV_CESA_FLOW_TYPE flowType[MV_CESA_CHANNELS];
+static MV_U32 chanWeight[MV_CESA_CHANNELS];
+static MV_STATUS isReady[MV_CESA_CHANNELS];
+static MV_CESA_POLICY cesaPolicy;
+static MV_U8 splitChanId;
+static MV_U32 resQueueDepth;
+static MV_U32 reqId;
+static MV_U32 resId;
+static spinlock_t chanLock[MV_CESA_CHANNELS];
+static DEFINE_SPINLOCK(cesaIfLock);
+static DEFINE_SPINLOCK(cesaIsrLock);
+
+
+/*
+ * Initialized in cesa_<mode>_probe, where <mode>: ocf or test
+ */
+MV_U32 mv_cesa_base[MV_CESA_CHANNELS], mv_cesa_tdma_base[MV_CESA_CHANNELS];
+enum cesa_mode mv_cesa_mode = CESA_UNKNOWN_M;
+u32 mv_cesa_time_threshold, mv_cesa_threshold, mv_cesa_channels;
+enum cesa_feature mv_cesa_feature = CESA_UNKNOWN;
+
+struct cesa_s2r_reg {
+	uint32_t desc_offset;
+	uint32_t config_reg;
+	uint32_t int_coal_th;
+	uint32_t int_time_th;
+	uint32_t tdma_ctrl;
+};
+
+MV_STATUS mvCesaIfInit(int numOfSession, int queueDepth, void *osHandle, MV_CESA_HAL_DATA *halData)
+{
+	MV_U8 chan = 0;
+
+	/* Init parameters */
+	reqId = 0;
+	resId = 0;
+	resQueueDepth = (mv_cesa_channels * queueDepth);
+
+	if (mv_cesa_channels == 1)
+		cesaPolicy = CESA_SINGLE_CHAN_POLICY;
+	else
+		cesaPolicy = CESA_DUAL_CHAN_BALANCED_POLICY;
+
+	/* Allocate reordered queue for completed results */
+	resQ = (MV_CESA_RESULT *)mvOsMalloc(resQueueDepth * sizeof(MV_CESA_RESULT));
+	if (resQ == NULL) {
+		mvOsPrintf("%s: Error, resQ malloc failed\n", __func__);
+		return MV_ERROR;
+	}
+	pEmptyResult = &resQ[0];
+
+	/* Allocate result pointers queue */
+	pResQ = (MV_CESA_RESULT **)mvOsMalloc(resQueueDepth * sizeof(MV_CESA_RESULT*));
+	if (pResQ == NULL) {
+		mvOsPrintf("%s: Error, pResQ malloc failed\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* Init shared spinlocks */
+	spin_lock_init(&cesaIfLock);
+	spin_lock_init(&cesaIsrLock);
+
+	/* Per channel init */
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+		spin_lock_init(&chanLock[chan]);
+		chanWeight[chan] = 0;
+		flowType[chan] = 0;
+		isReady[chan] = MV_TRUE;
+	}
+
+	/* Clear global resources */
+	memset(pResQ, 0, (resQueueDepth * sizeof(MV_CESA_RESULT *)));
+	memset(resQ, 0, (resQueueDepth * sizeof(MV_CESA_RESULT)));
+
+	return mvCesaHalInit(numOfSession, queueDepth, osHandle, halData);
+}
+
+MV_STATUS mvCesaIfAction(MV_CESA_COMMAND *pCmd)
+{
+	MV_U8 chan = 0, chanId = 0xff;
+	MV_U32 min = MV_CESA_IF_MAX_WEIGHT; /* max possible value */
+	MV_STATUS status;
+	MV_ULONG flags = 0;
+
+	/* Handle request according to selected policy */
+	switch (cesaPolicy) {
+	case CESA_WEIGHTED_CHAN_POLICY:
+	case CESA_NULL_POLICY:
+		for (chan = 0; chan < mv_cesa_channels; chan++) {
+			if ((cesaReqResources[chan] > 0) && (chanWeight[chan] < min)) {
+				min = chanWeight[chan];
+				chanId = chan;
+			}
+		}
+
+		/* Any room for the request ? */
+		if (cesaReqResources[chanId] <= 1)
+			return MV_NO_RESOURCE;
+
+		spin_lock_irqsave(&chanLock[chanId], flags);
+		chanWeight[chanId] += pCmd->pSrc->mbufSize;
+		spin_unlock_irqrestore(&chanLock[chanId], flags);
+		break;
+
+	case CESA_DUAL_CHAN_BALANCED_POLICY:
+		spin_lock(&cesaIfLock);
+		chanId = (reqId % 2);
+		spin_unlock(&cesaIfLock);
+
+		/* Any room for the request ? */
+		if (cesaReqResources[chanId] <= 1)
+			return MV_NO_RESOURCE;
+
+		break;
+
+	case CESA_FLOW_ASSOC_CHAN_POLICY:
+		for (chan = 0; chan < mv_cesa_channels; chan++) {
+			if (flowType[chan] == pCmd->flowType) {
+				chanId = chan;
+				break;
+			}
+		}
+
+		if(chanId == 0xff) {
+			mvOsPrintf("%s: Error, policy was not set correctly\n", __func__);
+			return MV_ERROR;
+		}
+
+		break;
+
+	case CESA_SINGLE_CHAN_POLICY:
+		spin_lock(&cesaIfLock);
+		chanId = 0;
+		spin_unlock(&cesaIfLock);
+
+		/* Any room for the request ? */
+		if (cesaReqResources[chanId] <= 1)
+			return MV_NO_RESOURCE;
+
+		break;
+
+	default:
+		mvOsPrintf("%s: Error, policy not supported\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* Check if we need to handle split packet */
+	if (pCmd->split != MV_CESA_SPLIT_NONE) {
+		if (pCmd->split == MV_CESA_SPLIT_FIRST) {
+			spin_lock(&cesaIfLock);
+			splitChanId = chanId;
+			spin_unlock(&cesaIfLock);
+		} else	/* MV_CESA_SPLIT_SECOND */
+			chanId = splitChanId;
+	}
+
+	/* Update current request id then increment */
+	spin_lock(&cesaIfLock);
+	pCmd->reqId = reqId;
+	spin_unlock(&cesaIfLock);
+
+	/* Inject request to CESA driver */
+	spin_lock_irqsave(&chanLock[chanId], flags);
+	status = mvCesaAction(chanId, pCmd);
+
+	/* Check status */
+	if ((status == MV_OK) || (status == MV_NO_MORE))
+		reqId = ((reqId + 1) % resQueueDepth);
+
+	spin_unlock_irqrestore(&chanLock[chanId], flags);
+
+	return status;
+}
+
+MV_STATUS mvCesaIfReadyGet(MV_U8 chan, MV_CESA_RESULT *pResult)
+{
+	MV_STATUS status;
+	MV_CESA_RESULT *pCurrResult;
+	MV_ULONG flags;
+
+	/* Validate channel index */
+	if (chan >= mv_cesa_channels) {
+		printk("%s: Error, bad channel index(%d)\n", __func__, chan);
+		return MV_ERROR;
+	}
+
+	/* Prevent pushing requests till finish to extract pending requests */
+	spin_lock_irqsave(&chanLock[chan], flags);
+
+	/* Are there any pending requests in CESA driver ? */
+	if (isReady[chan] == MV_FALSE)
+		goto out;
+
+	while (1) {
+
+		spin_lock(&cesaIsrLock);
+		pCurrResult = pEmptyResult;
+		spin_unlock(&cesaIsrLock);
+
+		/* Get next result */
+		status = mvCesaReadyGet(chan, pCurrResult);
+
+		if (status != MV_OK)
+			break;
+
+		spin_lock(&cesaIsrLock);
+		pEmptyResult = ((pEmptyResult != &resQ[resQueueDepth-1]) ? (pEmptyResult + 1) : &resQ[0]);
+		spin_unlock(&cesaIsrLock);
+
+		/* Handle request according to selected policy */
+		switch (cesaPolicy) {
+		case CESA_WEIGHTED_CHAN_POLICY:
+		case CESA_NULL_POLICY:
+			chanWeight[chan] -= pCurrResult->mbufSize;
+			break;
+
+		case CESA_FLOW_ASSOC_CHAN_POLICY:
+			/* TBD - handle policy */
+			break;
+
+		case CESA_DUAL_CHAN_BALANCED_POLICY:
+		case CESA_SINGLE_CHAN_POLICY:
+			break;
+
+		default:
+			mvOsPrintf("%s: Error, policy not supported\n", __func__);
+			return MV_ERROR;
+		}
+
+
+		if (pResQ[pCurrResult->reqId] != NULL)
+			mvOsPrintf("%s: Warning, result entry not empty(reqId=%d, chan=%d, resId=%d)\n", __func__, pCurrResult->reqId, chan, resId);
+
+		/* Save current result */
+		spin_lock(&cesaIsrLock);
+		pResQ[pCurrResult->reqId] = pCurrResult;
+		spin_unlock(&cesaIsrLock);
+
+		if (mv_cesa_feature == INT_PER_PACKET)
+			break;
+	}
+
+out:
+	spin_lock(&cesaIsrLock);
+
+	if (pResQ[resId] == NULL) {
+		isReady[chan] = MV_TRUE;
+		status = MV_NOT_READY;
+	} else {
+		/* Send results in order */
+		isReady[chan] = MV_FALSE;
+		/* Fill result data */
+		pResult->retCode = pResQ[resId]->retCode;
+		pResult->pReqPrv = pResQ[resId]->pReqPrv;
+		pResult->sessionId = pResQ[resId]->sessionId;
+		pResult->mbufSize = pResQ[resId]->mbufSize;
+		pResult->reqId = pResQ[resId]->reqId;
+		pResQ[resId] = NULL;
+		resId = ((resId + 1) % resQueueDepth);
+		status = MV_OK;
+	}
+
+	spin_unlock(&cesaIsrLock);
+
+	/* Release per channel lock */
+	spin_unlock_irqrestore(&chanLock[chan], flags);
+
+	return status;
+}
+
+MV_STATUS mvCesaIfPolicySet(MV_CESA_POLICY policy, MV_CESA_FLOW_TYPE flow)
+{
+	MV_U8 chan = 0;
+
+	spin_lock(&cesaIfLock);
+
+	if (cesaPolicy == CESA_NULL_POLICY) {
+		cesaPolicy = policy;
+	} else {
+		/* Check if more than 1 policy was assigned */
+		if (cesaPolicy != policy) {
+			spin_unlock(&cesaIfLock);
+			mvOsPrintf("%s: Error, can not support multiple policies\n", __func__);
+			return MV_ERROR;
+		}
+	}
+
+	if (policy == CESA_FLOW_ASSOC_CHAN_POLICY) {
+
+		if (flow == CESA_NULL_FLOW_TYPE) {
+			spin_unlock(&cesaIfLock);
+			mvOsPrintf("%s: Error, bad policy configuration\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* Find next empty entry */
+		for (chan = 0; chan < mv_cesa_channels; chan++) {
+			if (flowType[chan] == CESA_NULL_FLOW_TYPE)
+				flowType[chan] = flow;
+		}
+
+		if (chan == mv_cesa_channels) {
+			spin_unlock(&cesaIfLock);
+			mvOsPrintf("%s: Error, no empty entry is available\n", __func__);
+			return MV_ERROR;
+		}
+
+	}
+
+	spin_unlock(&cesaIfLock);
+
+	return MV_OK;
+}
+
+MV_STATUS mvCesaIfPolicyGet(MV_CESA_POLICY *pCesaPolicy)
+{
+	*pCesaPolicy = cesaPolicy;
+
+	return MV_OK;
+}
+
+MV_STATUS mvCesaIfFinish(void)
+{
+	/* Free global resources */
+	mvOsFree(pResQ);
+	mvOsFree(resQ);
+
+	return mvCesaFinish();
+}
+
+MV_STATUS mvCesaIfSessionOpen(MV_CESA_OPEN_SESSION *pSession, short *pSid)
+{
+	return mvCesaSessionOpen(pSession, pSid);
+}
+
+MV_STATUS mvCesaIfSessionClose(short sid)
+{
+	return mvCesaSessionClose(sid);
+}
+
+MV_VOID mvCesaIfDebugMbuf(const char *str, MV_CESA_MBUF *pMbuf, int offset, int size)
+{
+	return mvCesaDebugMbuf(str, pMbuf, offset, size);
+}
+
+void mv_bin_to_hex(const MV_U8 *bin, char *hexStr, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		mvOsSPrintf(&hexStr[i * 2], "%02x", bin[i]);
+
+	hexStr[i * 2] = '\0';
+}
+
+/*******************************************************************************
+* mv_hex_to_bin - Convert hex to binary
+*
+* DESCRIPTION:
+*		This function Convert hex to binary.
+*
+* INPUT:
+*       pHexStr - hex buffer pointer.
+*       size    - Size to convert.
+*
+* OUTPUT:
+*       pBin - Binary buffer pointer.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mv_hex_to_bin(const char *pHexStr, MV_U8 *pBin, int size)
+{
+	int j, i;
+	char tmp[3];
+	MV_U8 byte;
+
+	for (j = 0, i = 0; j < size; j++, i += 2) {
+		tmp[0] = pHexStr[i];
+		tmp[1] = pHexStr[i + 1];
+		tmp[2] = '\0';
+		byte = (MV_U8) (strtol(tmp, NULL, 16) & 0xFF);
+		pBin[j] = byte;
+	}
+}
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mv_debug_mem_dump(void *addr, int size, int access)
+{
+	int i, j;
+	MV_U32 memAddr = (MV_U32) addr;
+
+	if (access == 0)
+		access = 1;
+
+	if ((access != 4) && (access != 2) && (access != 1)) {
+		mvOsPrintf("%d wrong access size. Access must be 1 or 2 or 4\n", access);
+		return;
+	}
+	memAddr = MV_ALIGN_DOWN((unsigned int)addr, 4);
+	size = MV_ALIGN_UP(size, 4);
+	addr = (void *)MV_ALIGN_DOWN((unsigned int)addr, access);
+	while (size > 0) {
+		mvOsPrintf("%08x: ", memAddr);
+		i = 0;
+		/* 32 bytes in the line */
+		while (i < 32) {
+			if (memAddr >= (MV_U32) addr) {
+				switch (access) {
+				case 1:
+					mvOsPrintf("%02x ", MV_MEMIO8_READ(memAddr));
+					break;
+
+				case 2:
+					mvOsPrintf("%04x ", MV_MEMIO16_READ(memAddr));
+					break;
+
+				case 4:
+					mvOsPrintf("%08x ", MV_MEMIO32_READ(memAddr));
+					break;
+				}
+			} else {
+				for (j = 0; j < (access * 2 + 1); j++)
+					mvOsPrintf(" ");
+			}
+			i += access;
+			memAddr += access;
+			size -= access;
+			if (size <= 0)
+				break;
+		}
+		mvOsPrintf("\n");
+	}
+}
+
+static void
+mv_cesa_conf_mbus_windows(const struct mbus_dram_target_info *dram, MV_U8 chan)
+{
+	int i;
+	void __iomem *base = (void __iomem *)mv_cesa_tdma_base[chan];
+	dprintk("%s base: %p, dram_n_cs: %x\n", __func__, base, dram->num_cs);
+
+	for (i = 0; i < 4; i++) {
+		writel(0, base + WINDOW_CTRL(i));
+		writel(0, base + WINDOW_BASE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel(((cs->size - 1) & 0xffff0000) |
+		    (cs->mbus_attr << 8) |
+		    (dram->mbus_dram_target_id << 4) | 1,
+		    base + WINDOW_CTRL(i));
+		writel(cs->base, base + WINDOW_BASE(i));
+
+		dprintk("%s %d: ctrlv 0x%x ctrl_addr: %p basev 0x%x\n",
+		    __func__, i, ((cs->size - 1) & 0xffff0000) |
+		    (cs->mbus_attr << 8) |
+		    (dram->mbus_dram_target_id << 4) | 1,
+		    base + WINDOW_CTRL(i), cs->base);
+	}
+}
+
+int
+mv_get_cesa_resources(struct platform_device *pdev)
+{
+	struct device_node *np;
+	struct resource *r;
+	MV_U8 chan = 0;
+	const char *cesa_f;
+	int ret;
+
+	/*
+	 * Preparation resources for all CESA chan
+	 */
+	for_each_child_of_node(pdev->dev.of_node, np) {
+
+		/*
+		 * CESA base
+		 */
+		r = platform_get_resource(pdev, IORESOURCE_MEM, 2 * chan);
+		if (r == NULL) {
+			dev_err(&pdev->dev, "no IO memory resource defined\n");
+			return -ENODEV;
+		}
+
+		r = devm_request_mem_region(&pdev->dev, r->start,
+		    resource_size(r), pdev->name);
+		if (r == NULL) {
+			dev_err(&pdev->dev, "failed to request mem res\n");
+			return -EBUSY;
+		}
+
+		mv_cesa_base[chan] = (MV_U32)devm_ioremap(&pdev->dev,
+		    r->start, resource_size(r));
+
+
+		/*
+		 * TDMA base
+		 */
+		r = platform_get_resource(pdev, IORESOURCE_MEM, 2 * chan + 1);
+		if (r == NULL) {
+			dev_err(&pdev->dev, "no IO memory resource defined\n");
+			return -ENODEV;
+		}
+
+		r = devm_request_mem_region(&pdev->dev, r->start,
+		    resource_size(r), pdev->name);
+		if (r == NULL) {
+			dev_err(&pdev->dev, "failed to request mem res\n");
+			return -EBUSY;
+		}
+
+		mv_cesa_tdma_base[chan] = (MV_U32)devm_ioremap(&pdev->dev,
+		    r->start, resource_size(r));
+
+		/*
+		 * Debugs
+		 */
+		dprintk("%s: r->end 0x%x, r->end 0x%x\n", __func__,
+		    r->start, r->end);
+		dprintk("%s: c_base[%d] 0x%x, t_base[%d] 0x%x\n", __func__,
+		    chan, mv_cesa_base[chan], chan, mv_cesa_tdma_base[chan]);
+
+		chan++;
+	}
+
+	/* Get mv_cesa_channels */
+	ret = 0;
+	ret |= of_property_read_u32(pdev->dev.of_node, "cesa,channels",
+							    &mv_cesa_channels);
+
+	if (ret != 0) {
+		dev_err(&pdev->dev,
+		    "missing or bad \'cesa,channels\' parameter in dts\n");
+		dprintk("%s: mv_cesa_channels = %d", __func__,
+		    mv_cesa_channels);
+		return -ENOENT;
+	} else if (mv_cesa_channels != chan) {
+		dev_err(&pdev->dev,
+		    "cesa,channels declared in dts is not equal %s\n",
+		    "to nr of cesa childs defined in dts");
+		return -ENOENT;
+	} else
+		dev_info(&pdev->dev,
+		    "%s: Total CESA HW channels supported %d\n", __func__,
+		    mv_cesa_channels);
+
+	/*
+	 * Get interrupt mode and parameters
+	 */
+	cesa_f = of_get_property(pdev->dev.of_node, "cesa,feature",
+									 NULL);
+	if (strncmp(cesa_f, "chain", 5) &&
+	    strncmp(cesa_f, "int_coalescing", 14) &&
+	    strncmp(cesa_f, "int_per_packet", 14)) {
+		dev_err(&pdev->dev,
+		    "%s: unknown cesa feature %s from dts cesa,feature\n"
+		    , __func__, cesa_f);
+		return -ENODEV;
+	}
+
+	if (strncmp(cesa_f, "chain", 5) == 0)
+		mv_cesa_feature = CHAIN;
+	else if (strncmp(cesa_f, "int_coalescing", 14) == 0)
+		mv_cesa_feature = INT_COALESCING;
+	else if (strncmp(cesa_f, "int_per_packet", 14) == 0)
+		mv_cesa_feature = INT_PER_PACKET;
+
+	dev_info(&pdev->dev, "%s: CESA feature: %s(%d)\n", __func__,
+						cesa_f, mv_cesa_feature);
+
+	/* Parse device tree and acquire threshold configuration */
+	ret = 0;
+	ret |= of_property_read_u32(pdev->dev.of_node, "cesa,time_threshold",
+						      &mv_cesa_time_threshold);
+	ret |= of_property_read_u32(pdev->dev.of_node, "cesa,threshold",
+							   &mv_cesa_threshold);
+
+	if ((ret != 0) &&
+		       (mv_cesa_feature == INT_COALESCING)) {
+		dev_err(&pdev->dev,
+		    "missing or bad threshold configuration in dts\n");
+		dprintk("%s threshold 0x%x, threshold_time 0x%x\n",
+		    __func__, mv_cesa_threshold, mv_cesa_time_threshold);
+		return -ENOENT;
+	} else if (mv_cesa_feature == INT_COALESCING)
+		dev_info(&pdev->dev,
+		    "%s threshold 0x%x, threshold_time 0x%x\n", __func__,
+		    mv_cesa_threshold, mv_cesa_time_threshold);
+
+	return 0;
+}
+
+/*
+ * Initialize CESA subsystem
+ * Based on mach-spec version of mvSysCesaInit (mvSysCesa.c)
+ */
+MV_STATUS mvSysCesaInit(int numOfSession, int queueDepth, void *osHandle,
+						  struct platform_device *pdev)
+{
+	MV_CESA_HAL_DATA halData;
+	MV_STATUS status;
+	MV_U8 chan = 0;
+	const struct mbus_dram_target_info *dram;
+	struct device_node *np, *np_sram;
+	struct resource res;
+	int err, ret;
+#ifdef CONFIG_PM
+	struct cesa_s2r_reg (*s2r_reg)[MV_CESA_CHANNELS];
+
+	s2r_reg = devm_kzalloc(&pdev->dev, MV_CESA_CHANNELS * sizeof(struct cesa_s2r_reg), GFP_KERNEL);
+	if (!s2r_reg)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, s2r_reg);
+#endif
+
+	np_sram = of_find_compatible_node(NULL, NULL, "marvell,cesa-sram");
+	if (!np_sram) {
+		dev_err(&pdev->dev, "Cannot find 'marvell,cesa-sram' node");
+		return -ENOENT;
+	}
+
+	dram = mv_mbus_dram_info();
+
+	/*
+	 * Preparation for each CESA chan
+	 */
+	for_each_child_of_node(pdev->dev.of_node, np) {
+
+		/*
+		 * (Re-)program MBUS remapping windows if we are asked to.
+		 */
+		if (dram)
+			mv_cesa_conf_mbus_windows(dram, chan);
+
+		/*
+		 * Read base addr from Security Accelerator SRAM (CESA)
+		 * needed for hal configuration (based on bootrom)
+		 */
+		err = of_address_to_resource(np_sram, chan, &res);
+		if (err < 0) {
+			dev_err(&pdev->dev, "Cannot get 'cesa-sram' addr");
+			return -ENOENT;
+		}
+
+		dprintk("%s r_start 0x%x, r_end 0x%x\n",
+		    __func__, res.start, res.end);
+
+		if (!request_mem_region(res.start, resource_size(&res),
+				    pdev->name)) {
+			dev_err(&pdev->dev, "failed to request mem res\n");
+			return -EBUSY;
+		}
+
+		halData.sramPhysBase[chan] = res.start;
+		halData.sramVirtBase[chan] = ioremap(res.start,
+				       resource_size(&res));
+
+		ret = of_property_read_u16(pdev->dev.of_node,
+		    "cesa,sramOffset", &halData.sramOffset[chan]);
+		if (ret != 0) {
+			dev_err(&pdev->dev,
+			    "missing or bad CESA sramOffset in dts\n");
+			return -ENOENT;
+		}
+
+		dprintk("%s: sram phys: 0x%lx, sram virt: %p, sram_off 0x%x\n",
+		    __func__, halData.sramPhysBase[chan],
+		    halData.sramVirtBase[chan], halData.sramOffset[chan]);
+
+		chan++;
+	}
+
+	/*
+	 * XXX: Instead of use mvCtrlModelGet() and mvCtrlRevGet() which uses
+	 * mach-spec functions (including PEX reg read), ctrlModel and ctrlRev
+	 * are taken from the dts
+	 */
+
+	np = pdev->dev.of_node;
+	ret = 0;
+	ret |= of_property_read_u16(np, "cesa,ctrlModel", &halData.ctrlModel);
+	ret |= of_property_read_u8(np, "cesa,ctrlRev", &halData.ctrlRev);
+	if (ret != 0) {
+		dev_err(&pdev->dev,
+		    "missing or bad CESA configuration from FDT\n");
+		return -ENOENT;
+	}
+
+	dprintk("%s: ctrlModel: %x, ctrlRev: %x\n",
+	    __func__, halData.ctrlModel, halData.ctrlRev);
+
+	status = mvCesaIfInit(numOfSession, queueDepth, osHandle, &halData);
+
+	return status;
+}
+
+#ifdef CONFIG_PM
+int cesa_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct cesa_s2r_reg (*s2r_reg)[MV_CESA_CHANNELS] = platform_get_drvdata(pdev);
+	uint8_t chan;
+
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+		s2r_reg[chan]->desc_offset = MV_REG_READ(MV_CESA_CHAN_DESC_OFFSET_REG(chan));
+		s2r_reg[chan]->config_reg = MV_REG_READ(MV_CESA_CFG_REG(chan));
+		s2r_reg[chan]->int_coal_th = MV_REG_READ(MV_CESA_INT_COAL_TH_REG(chan));
+		s2r_reg[chan]->int_time_th = MV_REG_READ(MV_CESA_INT_TIME_TH_REG(chan));
+		s2r_reg[chan]->tdma_ctrl = MV_REG_READ(MV_CESA_TDMA_CTRL_REG(chan));
+	}
+
+	return 0;
+}
+
+int cesa_resume(struct platform_device *pdev)
+{
+	struct cesa_s2r_reg (*s2r_reg)[MV_CESA_CHANNELS] = platform_get_drvdata(pdev);
+	const struct mbus_dram_target_info *dram;
+	uint8_t chan;
+
+	dram = mv_mbus_dram_info();
+
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+		mv_cesa_conf_mbus_windows(dram, chan);
+
+		MV_REG_WRITE(MV_CESA_CHAN_DESC_OFFSET_REG(chan), s2r_reg[chan]->desc_offset);
+		MV_REG_WRITE(MV_CESA_CFG_REG(chan), s2r_reg[chan]->config_reg);
+		MV_REG_WRITE(MV_CESA_INT_COAL_TH_REG(chan), s2r_reg[chan]->int_coal_th);
+		MV_REG_WRITE(MV_CESA_INT_TIME_TH_REG(chan), s2r_reg[chan]->int_time_th);
+		MV_REG_WRITE(MV_CESA_TDMA_CTRL_REG(chan), s2r_reg[chan]->tdma_ctrl);
+
+		/* clear and unmask Int */
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_MASK_REG(chan), MV_CESA_CAUSE_EOP_COAL_MASK);
+	}
+
+	return 0;
+}
+#endif
diff --git a/drivers/crypto/mvebu_cesa/cesa_if.h b/drivers/crypto/mvebu_cesa/cesa_if.h
new file mode 100644
index 000000000000..7dbf589be4ca
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_if.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCesa.h - Header File for Cryptographic Engines and Security Accelerator
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       the Marvell Cryptographic Engines and Security Accelerator.
+*
+*******************************************************************************/
+
+#ifndef __mvCesaIf_h__
+#define __mvCesaIf_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mvCesa.h"
+#include "mvCesaRegs.h"
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/mbus.h>
+#include <linux/clk.h>
+
+#ifdef CONFIG_PM
+int cesa_suspend(struct platform_device *pdev, pm_message_t state);
+int cesa_resume(struct platform_device *pdev);
+#endif
+
+/* #define CESA_DEBUGS */
+#ifdef CESA_DEBUGS
+#define dprintk(a...) printk(a)
+#else
+#define dprintk(a...)
+#endif
+
+
+	MV_STATUS mvCesaIfInit(int numOfSession, int queueDepth, void *osHandle, MV_CESA_HAL_DATA *halData);
+	MV_STATUS mvCesaIfFinish(void);
+	MV_STATUS mvCesaIfSessionOpen(MV_CESA_OPEN_SESSION *pSession, short *pSid);
+	MV_STATUS mvCesaIfSessionClose(short sid);
+	MV_STATUS mvCesaIfAction(MV_CESA_COMMAND *pCmd);
+	MV_STATUS mvCesaIfReadyGet(MV_U8 chan, MV_CESA_RESULT *pResult);
+	MV_STATUS mvCesaIfPolicySet(MV_CESA_POLICY policy, MV_CESA_FLOW_TYPE flow);
+	MV_STATUS mvCesaIfPolicyGet(MV_CESA_POLICY *pCesaPolicy);
+	MV_VOID mvCesaIfDebugMbuf(const char *str, MV_CESA_MBUF *pMbuf, int offset, int size);
+
+	void mv_bin_to_hex(const MV_U8 *bin, char *hexStr, int size);
+	MV_VOID mv_hex_to_bin(const char *pHexStr, MV_U8 *pBin, int size);
+	void mv_debug_mem_dump(void *addr, int size, int access);
+	MV_STATUS mvSysCesaInit(int numOfSession, int queueDepth,
+				 void *osHandle, struct platform_device *pdev);
+	int mv_get_cesa_resources(struct platform_device *pdev);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __mvCesaIf_h__ */
diff --git a/drivers/crypto/mvebu_cesa/cesa_ocf_drv.c b/drivers/crypto/mvebu_cesa/cesa_ocf_drv.c
new file mode 100755
index 000000000000..032d0b4c2644
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_ocf_drv.c
@@ -0,0 +1,1379 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/crypto.h>
+#include <linux/mm.h>
+#include <linux/skbuff.h>
+#include <linux/random.h>
+#include <asm/scatterlist.h>
+#include <linux/spinlock.h>
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "cesa_if.h" /* moved here before cryptodev.h due to include dependencies */
+#include <cryptodev.h>
+#include <uio.h>
+
+#include "mvDebug.h"
+
+#include "mvMD5.h"
+#include "mvSHA1.h"
+
+#include "mvCesaRegs.h"
+#include "AES/mvAes.h"
+#include "mvLru.h"
+
+#ifndef CONFIG_OF
+#error cesa_ocf driver supports only DT configuration
+#endif
+
+
+#undef RT_DEBUG
+#ifdef RT_DEBUG
+static int debug = 1;
+module_param(debug, int, 1);
+MODULE_PARM_DESC(debug, "Enable debug");
+#undef dprintk
+#define dprintk(a...)	if (debug) { printk(a); } else
+#else
+static int debug = 0;
+#undef dprintk
+#define dprintk(a...)
+#endif
+
+#define	DRIVER_NAME	"armada-cesa-ocf"
+
+/* interrupt handling */
+#undef CESA_OCF_TASKLET
+
+extern int cesaReqResources[MV_CESA_CHANNELS];
+/* support for spliting action into 2 actions */
+#define CESA_OCF_SPLIT
+
+/* general defines */
+#define CESA_OCF_MAX_SES 	128
+#define CESA_Q_SIZE		256
+#define CESA_RESULT_Q_SIZE	(CESA_Q_SIZE * MV_CESA_CHANNELS * 2)
+#define CESA_OCF_POOL_SIZE	(CESA_Q_SIZE * MV_CESA_CHANNELS * 2)
+
+/* data structures */
+struct cesa_ocf_data {
+        int                                      cipher_alg;
+        int                                      auth_alg;
+	int					 encrypt_tn_auth;
+#define  auth_tn_decrypt  encrypt_tn_auth
+	int					 ivlen;
+	int 					 digestlen;
+	short					 sid_encrypt;
+	short					 sid_decrypt;
+	/* fragment workaround sessions */
+	short					 frag_wa_encrypt;
+	short					 frag_wa_decrypt;
+	short					 frag_wa_auth;
+};
+
+#define DIGEST_BUF_SIZE	32
+struct cesa_ocf_process {
+	MV_CESA_COMMAND 			cesa_cmd;
+	MV_CESA_MBUF 				cesa_mbuf;
+	MV_BUF_INFO  				cesa_bufs[MV_CESA_MAX_MBUF_FRAGS];
+	char					digest[DIGEST_BUF_SIZE];
+	int					digest_len;
+	struct cryptop 				*crp;
+	int 					need_cb;
+};
+
+/* global variables */
+static int32_t			cesa_ocf_id 		= -1;
+static struct cesa_ocf_data 	**cesa_ocf_sessions = NULL;
+static u_int32_t		cesa_ocf_sesnum = 0;
+static DEFINE_SPINLOCK(cesa_lock);
+static atomic_t result_count;
+static struct cesa_ocf_process *result_Q[CESA_RESULT_Q_SIZE];
+static unsigned int next_result;
+static unsigned int result_done;
+static unsigned char chan_id[MV_CESA_CHANNELS];
+
+/* static APIs */
+static int 		cesa_ocf_process	(device_t, struct cryptop *, int);
+static int 		cesa_ocf_newsession	(device_t, u_int32_t *, struct cryptoini *);
+static int 		cesa_ocf_freesession	(device_t, u_int64_t);
+static inline void 	cesa_callback		(unsigned long);
+static irqreturn_t	cesa_interrupt_handler	(int, void *);
+#ifdef CESA_OCF_TASKLET
+static struct tasklet_struct cesa_ocf_tasklet;
+#endif
+
+static struct timeval          tt_start;
+static struct timeval          tt_end;
+static struct cesa_ocf_process *cesa_ocf_pool = NULL;
+static struct cesa_ocf_process *cesa_ocf_stack[CESA_OCF_POOL_SIZE];
+static unsigned int cesa_ocf_stack_idx;
+
+/*
+ * dummy device structure
+ */
+static struct {
+	softc_device_decl	sc_dev;
+} mv_cesa_dev;
+
+static device_method_t mv_cesa_methods = {
+	/* crypto device methods */
+	DEVMETHOD(cryptodev_newsession,	cesa_ocf_newsession),
+	DEVMETHOD(cryptodev_freesession,cesa_ocf_freesession),
+	DEVMETHOD(cryptodev_process,	cesa_ocf_process),
+	DEVMETHOD(cryptodev_kprocess,	NULL),
+};
+
+unsigned int
+get_usec(unsigned int start)
+{
+	if(start) {
+		do_gettimeofday (&tt_start);
+		return 0;
+	}
+	else {
+		do_gettimeofday (&tt_end);
+		tt_end.tv_sec -= tt_start.tv_sec;
+		tt_end.tv_usec -= tt_start.tv_usec;
+		if (tt_end.tv_usec < 0) {
+			tt_end.tv_usec += 1000 * 1000;
+			tt_end.tv_sec -= 1;
+		}
+	}
+	printk("time taken is  %d\n", (unsigned int)(tt_end.tv_usec + tt_end.tv_sec * 1000000));
+	return (tt_end.tv_usec + tt_end.tv_sec * 1000000);
+}
+
+static void
+skb_copy_bits_back(struct sk_buff *skb, int offset, caddr_t cp, int len)
+{
+        int i;
+        if (offset < skb_headlen(skb)) {
+                memcpy(skb->data + offset, cp, min_t(int, skb_headlen(skb), len));
+                len -= skb_headlen(skb);
+                cp += skb_headlen(skb);
+        }
+        offset -= skb_headlen(skb);
+        for (i = 0; len > 0 && i < skb_shinfo(skb)->nr_frags; i++) {
+                if (offset < skb_shinfo(skb)->frags[i].size) {
+                        memcpy(page_address(skb_shinfo(skb)->frags[i].page.p) +
+                                        skb_shinfo(skb)->frags[i].page_offset,
+                                        cp, min_t(int, skb_shinfo(skb)->frags[i].size, len));
+                        len -= skb_shinfo(skb)->frags[i].size;
+                        cp += skb_shinfo(skb)->frags[i].size;
+                }
+                offset -= skb_shinfo(skb)->frags[i].size;
+        }
+}
+
+
+#ifdef RT_DEBUG
+/*
+ * check that the crp action match the current session
+ */
+static int
+ocf_check_action(struct cryptop *crp, struct cesa_ocf_data *cesa_ocf_cur_ses) {
+	int count = 0;
+	int encrypt = 0, decrypt = 0, auth = 0;
+	struct cryptodesc *crd;
+
+        /* Go through crypto descriptors, processing as we go */
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next, count++) {
+		if(count > 2) {
+			printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+			return 1;
+		}
+
+		/* Encryption /Decryption */
+		if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+			/* check that the action is compatible with session */
+			if(encrypt || decrypt) {
+				printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+				return 1;
+			}
+
+			if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+				if( (count == 2) && (cesa_ocf_cur_ses->encrypt_tn_auth) ) {
+					printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+					return 1;
+				}
+				encrypt++;
+			}
+			else { 					/* decrypt */
+				if( (count == 2) && !(cesa_ocf_cur_ses->auth_tn_decrypt) ) {
+					printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+					return 1;
+				}
+				decrypt++;
+			}
+
+		}
+		/* Authentication */
+		else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+			/* check that the action is compatible with session */
+			if(auth) {
+				printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			if( (count == 2) && (decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			if( (count == 2) && (encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)) {
+				printk("%s,%d: sequence isn't supported by this session.\n", __FILE__, __LINE__);
+				return 1;
+			}
+			auth++;
+		}
+		else {
+			printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+			return 1;
+		}
+	}
+	return 0;
+
+}
+#endif
+
+static inline struct cesa_ocf_process* cesa_ocf_alloc(void)
+{
+	struct cesa_ocf_process *retval;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cesa_lock, flags);
+	if (cesa_ocf_stack_idx == 0) {
+		spin_unlock_irqrestore(&cesa_lock, flags);
+		return NULL;
+	}
+
+	retval = cesa_ocf_stack[--cesa_ocf_stack_idx];
+	spin_unlock_irqrestore(&cesa_lock, flags);
+
+	return retval;
+}
+
+static inline void cesa_ocf_free(struct cesa_ocf_process *ocf_process_p)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cesa_lock, flags);
+	cesa_ocf_stack[cesa_ocf_stack_idx++] = ocf_process_p;
+	spin_unlock_irqrestore(&cesa_lock, flags);
+}
+
+
+/*
+ * Process a request.
+ */
+static int
+cesa_ocf_process(device_t dev, struct cryptop *crp, int hint)
+{
+	struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+	struct cesa_ocf_process *cesa_ocf_cmd_wa = NULL;
+	MV_CESA_COMMAND	*cesa_cmd;
+	struct cryptodesc *crd;
+	struct cesa_ocf_data *cesa_ocf_cur_ses;
+	int sid = 0, temp_len = 0, i;
+	int encrypt = 0, decrypt = 0, auth = 0;
+	int  status, free_resrc = 0;
+	struct sk_buff *skb = NULL;
+	struct uio *uiop = NULL;
+	unsigned char *ivp;
+	MV_BUF_INFO *p_buf_info;
+	MV_CESA_MBUF *p_mbuf_info;
+	unsigned long flags;
+	unsigned char chan = 0;
+
+
+        dprintk("%s()\n", __func__);
+
+	for (chan = 0; chan < mv_cesa_channels; chan++)
+		free_resrc += cesaReqResources[chan];
+
+		/* In case request should be split, at least 2 slots
+			should be available in CESA fifo */
+		if (free_resrc < 2) {
+			dprintk("%s,%d: ERESTART\n", __FILE__, __LINE__);
+			return -ERESTART;
+		}
+
+#ifdef RT_DEBUG
+        /* Sanity check */
+        if (crp == NULL) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return -EINVAL;
+        }
+
+        if (crp->crp_desc == NULL || crp->crp_buf == NULL ) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+                crp->crp_etype = EINVAL;
+		return -EINVAL;
+        }
+
+        sid = crp->crp_sid & 0xffffffff;
+        if ((sid >= cesa_ocf_sesnum) || (cesa_ocf_sessions[sid] == NULL)) {
+		crp->crp_etype = -ENOENT;
+		printk(KERN_ERR "%s,%d: ENOENT session %d\n", __FILE__,
+				__LINE__, sid);
+		return -EINVAL;
+        }
+#endif
+
+	sid = crp->crp_sid & 0xffffffff;
+	crp->crp_etype = 0;
+	cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+
+#ifdef RT_DEBUG
+	if(ocf_check_action(crp, cesa_ocf_cur_ses)){
+		goto p_error;
+	}
+#endif
+
+	/* Allocate new cesa process from local pool */
+	cesa_ocf_cmd = cesa_ocf_alloc();
+	if (cesa_ocf_cmd == NULL) {
+		printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+		goto p_error;
+	}
+
+	/* init cesa_process */
+	cesa_ocf_cmd->crp = crp;
+	/* always call callback */
+	cesa_ocf_cmd->need_cb = 1;
+
+	/* init cesa_cmd for usage of the HALs */
+	cesa_cmd = &cesa_ocf_cmd->cesa_cmd;
+	cesa_cmd->pReqPrv = (void *)cesa_ocf_cmd;
+	cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_encrypt; /* defualt use encrypt */
+
+	/* prepare src buffer 	*/
+	/* we send the entire buffer to the HAL, even if only part of it should be encrypt/auth.  */
+	/* if not using seesions for both encrypt and auth, then it will be wiser to to copy only */
+	/* from skip to crd_len. 								  */
+	p_buf_info = cesa_ocf_cmd->cesa_bufs;
+	p_mbuf_info = &cesa_ocf_cmd->cesa_mbuf;
+
+	p_buf_info += 2; /* save 2 first buffers for IV and digest -
+			    we won't append them to the end since, they
+			    might be places in an unaligned addresses. */
+
+	p_mbuf_info->pFrags = p_buf_info;
+	temp_len = 0;
+
+	/* handle SKB */
+	if (crp->crp_flags & CRYPTO_F_SKBUF) {
+
+		dprintk("%s,%d: handle SKB.\n", __FILE__, __LINE__);
+		skb = (struct sk_buff *) crp->crp_buf;
+
+                if (skb_shinfo(skb)->nr_frags >= (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+                        printk("%s,%d: %d nr_frags > MV_CESA_MAX_MBUF_FRAGS", __FILE__, __LINE__, skb_shinfo(skb)->nr_frags);
+                        goto p_error;
+                }
+
+		p_mbuf_info->mbufSize = skb->len;
+		temp_len = skb->len;
+		/* first skb fragment */
+		p_buf_info->bufSize = skb_headlen(skb);
+		p_buf_info->bufVirtPtr = skb->data;
+		p_buf_info++;
+
+		/* now handle all other skb fragments */
+		for ( i = 0; i < skb_shinfo(skb)->nr_frags; i++ ) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+			p_buf_info->bufSize = frag->size;
+			p_buf_info->bufVirtPtr = page_address(frag->page.p) + frag->page_offset;
+			p_buf_info++;
+		}
+		p_mbuf_info->numFrags = skb_shinfo(skb)->nr_frags + 1;
+	}
+	/* handle UIO */
+	else if(crp->crp_flags & CRYPTO_F_IOV) {
+
+		dprintk("%s,%d: handle UIO.\n", __FILE__, __LINE__);
+		uiop = (struct uio *) crp->crp_buf;
+
+                if (uiop->uio_iovcnt > (MV_CESA_MAX_MBUF_FRAGS - 1)) {
+                        printk("%s,%d: %d uio_iovcnt > MV_CESA_MAX_MBUF_FRAGS \n", __FILE__, __LINE__, uiop->uio_iovcnt);
+                        goto p_error;
+                }
+
+		p_mbuf_info->mbufSize = crp->crp_ilen;
+		p_mbuf_info->numFrags = uiop->uio_iovcnt;
+		for(i = 0; i < uiop->uio_iovcnt; i++) {
+			p_buf_info->bufVirtPtr = uiop->uio_iov[i].iov_base;
+			p_buf_info->bufSize = uiop->uio_iov[i].iov_len;
+			temp_len += p_buf_info->bufSize;
+			dprintk("%s,%d: buf %x-> addr %x, size %x \n"
+				, __FILE__, __LINE__, i, (unsigned int)p_buf_info->bufVirtPtr, p_buf_info->bufSize);
+			p_buf_info++;
+		}
+
+	}
+	/* handle CONTIG */
+	else {
+		dprintk("%s,%d: handle CONTIG.\n", __FILE__, __LINE__);
+		p_mbuf_info->numFrags = 1;
+		p_mbuf_info->mbufSize = crp->crp_ilen;
+		p_buf_info->bufVirtPtr = crp->crp_buf;
+		p_buf_info->bufSize = crp->crp_ilen;
+		temp_len = crp->crp_ilen;
+		p_buf_info++;
+	}
+
+	/* Support up to 64K why? cause! */
+	if(crp->crp_ilen > 64*1024) {
+		printk("%s,%d: buf too big %x \n", __FILE__, __LINE__, crp->crp_ilen);
+		goto p_error;
+	}
+
+	if( temp_len != crp->crp_ilen ) {
+		printk("%s,%d: warning size don't match.(%x %x) \n", __FILE__, __LINE__, temp_len, crp->crp_ilen);
+	}
+
+	cesa_cmd->pSrc = p_mbuf_info;
+	cesa_cmd->pDst = p_mbuf_info;
+
+	/* restore p_buf_info to point to first available buf */
+	p_buf_info = cesa_ocf_cmd->cesa_bufs;
+	p_buf_info += 1;
+
+
+        /* Go through crypto descriptors, processing as we go */
+        for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
+
+		/* Encryption /Decryption */
+		if(crd->crd_alg == cesa_ocf_cur_ses->cipher_alg) {
+
+			dprintk("%s,%d: cipher", __FILE__, __LINE__);
+
+			cesa_cmd->cryptoOffset = crd->crd_skip;
+			cesa_cmd->cryptoLength = crd->crd_len;
+
+			if(crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
+				dprintk(" encrypt \n");
+				encrypt++;
+
+				/* handle IV */
+				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {  /* IV from USER */
+					dprintk("%s,%d: IV from USER (offset %x) \n", __FILE__, __LINE__, crd->crd_inject);
+					cesa_cmd->ivFromUser = 1;
+					ivp = crd->crd_iv;
+
+					/*
+					 * do we have to copy the IV back to the buffer ?
+					 */
+					if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+						dprintk("%s,%d: copy the IV back to the buffer\n", __FILE__, __LINE__);
+						cesa_cmd->ivOffset = crd->crd_inject;
+						if (crp->crp_flags & CRYPTO_F_SKBUF)
+							skb_copy_bits_back(skb, crd->crd_inject, ivp, cesa_ocf_cur_ses->ivlen);
+						else if (crp->crp_flags & CRYPTO_F_IOV)
+							cuio_copyback(uiop,crd->crd_inject, cesa_ocf_cur_ses->ivlen,(caddr_t)ivp);
+						else
+							memcpy(crp->crp_buf + crd->crd_inject, ivp, cesa_ocf_cur_ses->ivlen);
+					}
+					else {
+						dprintk("%s,%d: don't copy the IV back to the buffer \n", __FILE__, __LINE__);
+						p_mbuf_info->numFrags++;
+						p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+						p_mbuf_info->pFrags = p_buf_info;
+
+						p_buf_info->bufVirtPtr = ivp;
+						p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+						p_buf_info--;
+
+						/* offsets */
+						cesa_cmd->ivOffset = 0;
+						cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+						if(auth) {
+							cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+							cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+						}
+					}
+                                }
+				else {					/* random IV */
+					dprintk("%s,%d: random IV \n", __FILE__, __LINE__);
+					cesa_cmd->ivFromUser = 0;
+
+					/*
+					 * do we have to copy the IV back to the buffer ?
+					 */
+					/* in this mode the HAL will always copy the IV */
+					/* given by the session to the ivOffset  	*/
+					if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+						cesa_cmd->ivOffset = crd->crd_inject;
+					}
+					else {
+						/* if IV isn't copy, then how will the user know which IV did we use??? */
+						printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+						goto p_error;
+					}
+				}
+			}
+			else { 					/* decrypt */
+				dprintk(" decrypt \n");
+				decrypt++;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->sid_decrypt;
+
+				/* handle IV */
+				if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+					dprintk("%s,%d: IV from USER \n", __FILE__, __LINE__);
+					/* append the IV buf to the mbuf */
+					cesa_cmd->ivFromUser = 1;
+					p_mbuf_info->numFrags++;
+					p_mbuf_info->mbufSize += cesa_ocf_cur_ses->ivlen;
+					p_mbuf_info->pFrags = p_buf_info;
+
+					p_buf_info->bufVirtPtr = crd->crd_iv;
+					p_buf_info->bufSize = cesa_ocf_cur_ses->ivlen;
+					p_buf_info--;
+
+					/* offsets */
+					cesa_cmd->ivOffset = 0;
+					cesa_cmd->cryptoOffset += cesa_ocf_cur_ses->ivlen;
+					if(auth) {
+						cesa_cmd->macOffset += cesa_ocf_cur_ses->ivlen;
+						cesa_cmd->digestOffset += cesa_ocf_cur_ses->ivlen;
+					}
+                                }
+				else {
+					dprintk("%s,%d: IV inside the buffer \n", __FILE__, __LINE__);
+					cesa_cmd->ivFromUser = 0;
+					cesa_cmd->ivOffset = crd->crd_inject;
+				}
+			}
+
+		}
+		/* Authentication */
+		else if(crd->crd_alg == cesa_ocf_cur_ses->auth_alg) {
+			dprintk("%s,%d:  Authentication \n", __FILE__, __LINE__);
+			auth++;
+			cesa_cmd->macOffset = crd->crd_skip;
+			cesa_cmd->macLength = crd->crd_len;
+
+			/* digest + mac */
+			cesa_cmd->digestOffset = crd->crd_inject;
+		}
+		else {
+			printk("%s,%d: Alg isn't supported by this session.\n", __FILE__, __LINE__);
+			goto p_error;
+		}
+	}
+
+	dprintk("\n");
+	dprintk("%s,%d: Sending Action: \n", __FILE__, __LINE__);
+	dprintk("%s,%d: IV from user: %d. IV offset %x \n",  __FILE__, __LINE__, cesa_cmd->ivFromUser, cesa_cmd->ivOffset);
+	dprintk("%s,%d: crypt offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->cryptoOffset, cesa_cmd->cryptoLength);
+	dprintk("%s,%d: Auth offset %x len %x \n", __FILE__, __LINE__, cesa_cmd->macOffset, cesa_cmd->macLength);
+	dprintk("%s,%d: set digest in offset %x . \n", __FILE__, __LINE__, cesa_cmd->digestOffset);
+	if(debug) {
+		mvCesaIfDebugMbuf("SRC BUFFER", cesa_cmd->pSrc, 0, cesa_cmd->pSrc->mbufSize);
+	}
+
+	cesa_cmd->split = MV_CESA_SPLIT_NONE;
+
+	/* send action to HAL */
+	spin_lock_irqsave(&cesa_lock, flags);
+	status = mvCesaIfAction(cesa_cmd);
+	spin_unlock_irqrestore(&cesa_lock, flags);
+
+	/* action not allowed */
+	if(status == MV_NOT_ALLOWED) {
+#ifdef CESA_OCF_SPLIT
+		/* if both encrypt and auth try to split */
+		if(auth && (encrypt || decrypt)) {
+			MV_CESA_COMMAND	*cesa_cmd_wa;
+
+			/* Allocate new cesa process from local pool and initialize it */
+			cesa_ocf_cmd_wa = cesa_ocf_alloc();
+
+			if (cesa_ocf_cmd_wa == NULL) {
+				printk("%s,%d: ENOBUFS \n", __FILE__, __LINE__);
+				goto p_error;
+			}
+			memcpy(cesa_ocf_cmd_wa, cesa_ocf_cmd, sizeof(struct cesa_ocf_process));
+			cesa_cmd_wa = &cesa_ocf_cmd_wa->cesa_cmd;
+			cesa_cmd_wa->pReqPrv = (void *)cesa_ocf_cmd_wa;
+			cesa_ocf_cmd_wa->need_cb = 0;
+			cesa_cmd_wa->split = MV_CESA_SPLIT_FIRST;
+			cesa_cmd->split = MV_CESA_SPLIT_SECOND;
+
+			/* break requests to two operation, first operation completion won't call callback */
+			if((decrypt) && (cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+			}
+			else if((decrypt) && !(cesa_ocf_cur_ses->auth_tn_decrypt)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_decrypt;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+			}
+			else if((encrypt) && (cesa_ocf_cur_ses->encrypt_tn_auth)) {
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+			}
+			else if((encrypt) && !(cesa_ocf_cur_ses->encrypt_tn_auth)){
+				cesa_cmd_wa->sessionId = cesa_ocf_cur_ses->frag_wa_auth;
+				cesa_cmd->sessionId = cesa_ocf_cur_ses->frag_wa_encrypt;
+			}
+			else {
+				printk("%s,%d: Unsupporterd fragment wa mode \n", __FILE__, __LINE__);
+				goto p_error;
+			}
+
+			/* send the 2 actions to the HAL */
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaIfAction(cesa_cmd_wa);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+
+			if((status != MV_NO_MORE) && (status != MV_OK)) {
+				printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+				goto p_error;
+			}
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaIfAction(cesa_cmd);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+
+		}
+		/* action not allowed and can't split */
+		else
+#endif
+		{
+			goto p_error;
+		}
+	}
+
+	/* Hal Q is full, send again. This should never happen */
+	if(status == MV_NO_RESOURCE) {
+		dprintk("%s,%d: cesa no more resources \n", __FILE__, __LINE__);
+		if(cesa_ocf_cmd)
+			cesa_ocf_free(cesa_ocf_cmd);
+		if(cesa_ocf_cmd_wa)
+			cesa_ocf_free(cesa_ocf_cmd_wa);
+
+		return -ERESTART;
+	} else if ((status != MV_NO_MORE) && (status != MV_OK)) {
+                printk("%s,%d: cesa action failed, status = 0x%x\n", __FILE__, __LINE__, status);
+		goto p_error;
+        }
+
+	dprintk("%s, status %d\n", __func__, status);
+
+	return 0;
+
+p_error:
+	crp->crp_etype = -EINVAL;
+	if(cesa_ocf_cmd)
+		cesa_ocf_free(cesa_ocf_cmd);
+	if(cesa_ocf_cmd_wa)
+		cesa_ocf_free(cesa_ocf_cmd_wa);
+
+	return -EINVAL;
+}
+
+/*
+ * cesa callback.
+ */
+static inline void
+cesa_callback(unsigned long dummy)
+{
+	struct cesa_ocf_process *cesa_ocf_cmd = NULL;
+	struct cryptop 		*crp = NULL;
+	int need_cb;
+
+	dprintk("%s()\n", __func__);
+
+	spin_lock(&cesa_lock);
+
+	while (atomic_read(&result_count) != 0) {
+		cesa_ocf_cmd = result_Q[result_done];
+		need_cb = cesa_ocf_cmd->need_cb;
+		crp = cesa_ocf_cmd->crp;
+
+		if (debug && need_cb)
+			mvCesaIfDebugMbuf("DST BUFFER", cesa_ocf_cmd->cesa_cmd.pDst, 0,
+							cesa_ocf_cmd->cesa_cmd.pDst->mbufSize);
+
+		result_done = ((result_done + 1) % CESA_RESULT_Q_SIZE);
+		atomic_dec(&result_count);
+		cesa_ocf_stack[cesa_ocf_stack_idx++] = cesa_ocf_cmd;
+
+		if (need_cb) {
+			spin_unlock(&cesa_lock);
+			crypto_done(crp);
+			spin_lock(&cesa_lock);
+		}
+	}
+
+	spin_unlock(&cesa_lock);
+
+	return;
+}
+
+/*
+ * cesa Interrupt Service Routine.
+ */
+static irqreturn_t
+cesa_interrupt_handler(int irq, void *arg)
+{
+	MV_CESA_RESULT  	result;
+	MV_STATUS               status;
+	unsigned int cause, mask;
+	unsigned char chan = *((u8 *)arg);
+
+	dprintk("%s()\n", __func__);
+
+	if (mv_cesa_feature == INT_COALESCING)
+		mask = MV_CESA_CAUSE_EOP_COAL_MASK;
+	else
+		mask = MV_CESA_CAUSE_ACC_DMA_MASK;
+
+	/* Read cause register */
+	cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG(chan));
+
+	if (likely(cause & mask)) {
+
+		spin_lock(&cesa_lock);
+
+		/* Clear pending irq */
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+
+		/* Get completed results */
+		while (atomic_read(&result_count) < CESA_RESULT_Q_SIZE) {
+
+			status = mvCesaIfReadyGet(chan, &result);
+			if (status != MV_OK)
+				break;
+
+			result_Q[next_result] = (struct cesa_ocf_process *)result.pReqPrv;
+			next_result = ((next_result + 1) % CESA_RESULT_Q_SIZE);
+			atomic_inc(&result_count);
+		}
+
+		spin_unlock(&cesa_lock);
+
+		if (atomic_read(&result_count) >= CESA_RESULT_Q_SIZE) {
+			/* In case reaching this point -result_Q should be tuned   */
+				printk("%s: Error: Q request is full(chan=%d)\n", __func__, chan);
+				return IRQ_HANDLED;
+		}
+	}
+
+	if(likely(atomic_read(&result_count) > 0))
+#ifdef CESA_OCF_TASKLET
+		tasklet_hi_schedule(&cesa_ocf_tasklet);
+#else
+		cesa_callback(0);
+#endif
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Open a session.
+ */
+static int
+/*cesa_ocf_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)*/
+cesa_ocf_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
+{
+	u32 status = 0, i = 0;
+	unsigned long flags = 0;
+	u32 count = 0, auth = 0, encrypt =0;
+	struct cesa_ocf_data *cesa_ocf_cur_ses;
+	MV_CESA_OPEN_SESSION cesa_session;
+	MV_CESA_OPEN_SESSION *cesa_ses = &cesa_session;
+
+
+        dprintk("%s()\n", __func__);
+        if (sid == NULL || cri == NULL) {
+                printk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+                return EINVAL;
+        }
+
+	if (cesa_ocf_sessions) {
+		for (i = 1; i < cesa_ocf_sesnum; i++)
+			if (cesa_ocf_sessions[i] == NULL)
+				break;
+	} else
+		i = 1;
+
+	if (cesa_ocf_sessions == NULL || i == cesa_ocf_sesnum) {
+		struct cesa_ocf_data **cesa_ocf_new_sessions;
+
+		if (cesa_ocf_sessions == NULL) {
+			i = 1; /* We leave cesa_ocf_sessions[0] empty */
+			cesa_ocf_sesnum = CESA_OCF_MAX_SES;
+		}
+		else
+			cesa_ocf_sesnum *= 2;
+
+		cesa_ocf_new_sessions = kmalloc(cesa_ocf_sesnum * sizeof(struct cesa_ocf_data *), SLAB_ATOMIC);
+		if (cesa_ocf_new_sessions == NULL) {
+			/* Reset session number */
+			if (cesa_ocf_sesnum == CESA_OCF_MAX_SES)
+				cesa_ocf_sesnum = 0;
+			else
+				cesa_ocf_sesnum /= 2;
+			printk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+			return ENOBUFS;
+		}
+		memset(cesa_ocf_new_sessions, 0, cesa_ocf_sesnum * sizeof(struct cesa_ocf_data *));
+
+		/* Copy existing sessions */
+		if (cesa_ocf_sessions) {
+			memcpy(cesa_ocf_new_sessions, cesa_ocf_sessions,
+			    (cesa_ocf_sesnum / 2) * sizeof(struct cesa_ocf_data *));
+			kfree(cesa_ocf_sessions);
+		}
+
+		cesa_ocf_sessions = cesa_ocf_new_sessions;
+	}
+
+	cesa_ocf_sessions[i] = (struct cesa_ocf_data *) kmalloc(sizeof(struct cesa_ocf_data),
+			SLAB_ATOMIC);
+	if (cesa_ocf_sessions[i] == NULL) {
+		cesa_ocf_freesession(NULL, i);
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return ENOBUFS;
+	}
+
+	dprintk("%s,%d: new session %d \n", __FILE__, __LINE__, i);
+
+        *sid = i;
+        cesa_ocf_cur_ses = cesa_ocf_sessions[i];
+        memset(cesa_ocf_cur_ses, 0, sizeof(struct cesa_ocf_data));
+	cesa_ocf_cur_ses->sid_encrypt = -1;
+	cesa_ocf_cur_ses->sid_decrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_encrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_decrypt = -1;
+	cesa_ocf_cur_ses->frag_wa_auth = -1;
+
+	/* init the session */
+	memset(cesa_ses, 0, sizeof(MV_CESA_OPEN_SESSION));
+	count = 1;
+        while (cri) {
+		if(count > 2) {
+			printk("%s,%d: don't support more then 2 operations\n", __FILE__, __LINE__);
+			goto error;
+		}
+                switch (cri->cri_alg) {
+		case CRYPTO_AES_CBC:
+			dprintk("%s,%d: (%d) AES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_AES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_AES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			dprintk("%s,%d: key length %d \n", __FILE__, __LINE__, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_3DES_CBC:
+			dprintk("%s,%d: (%d) 3DES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_3DES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_3DES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_DES_CBC:
+			dprintk("%s,%d: (%d) DES CBC \n", __FILE__, __LINE__, count);
+			cesa_ocf_cur_ses->cipher_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->ivlen = MV_CESA_DES_BLOCK_SIZE;
+			cesa_ses->cryptoAlgorithm = MV_CESA_CRYPTO_DES;
+			cesa_ses->cryptoMode = MV_CESA_CRYPTO_CBC;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: CRYPTO key too long.\n", __FILE__, __LINE__);
+				goto error;
+			}
+			memcpy(cesa_ses->cryptoKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->cryptoKeyLength = cri->cri_klen/8;
+			encrypt += count;
+			break;
+                case CRYPTO_MD5:
+                case CRYPTO_MD5_HMAC:
+			dprintk("%s,%d: (%d) %sMD5 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_MD5)? "H-":" ");
+                        cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MD5_DIGEST_SIZE : 12;
+			cesa_ses->macMode = (cri->cri_alg == CRYPTO_MD5)? MV_CESA_MAC_MD5 : MV_CESA_MAC_HMAC_MD5;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+				goto error;
+			}
+			cesa_ses->macKeyLength = cri->cri_klen/8;
+			memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+			auth += count;
+			break;
+                case CRYPTO_SHA1:
+                case CRYPTO_SHA1_HMAC:
+			dprintk("%s,%d: (%d) %sSHA1 CBC \n", __FILE__, __LINE__, count, (cri->cri_alg != CRYPTO_SHA1)? "H-":" ");
+                        cesa_ocf_cur_ses->auth_alg = cri->cri_alg;
+			cesa_ocf_cur_ses->digestlen = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_SHA1_DIGEST_SIZE : 12;
+			cesa_ses->macMode = (cri->cri_alg == CRYPTO_SHA1)? MV_CESA_MAC_SHA1 : MV_CESA_MAC_HMAC_SHA1;
+			if(cri->cri_klen/8 > MV_CESA_MAX_CRYPTO_KEY_LENGTH) {
+				printk("%s,%d: MAC key too long. \n", __FILE__, __LINE__);
+				goto error;
+			}
+			cesa_ses->macKeyLength = cri->cri_klen/8;
+			memcpy(cesa_ses->macKey, cri->cri_key, cri->cri_klen/8);
+			cesa_ses->digestSize = cesa_ocf_cur_ses->digestlen;
+			auth += count;
+			break;
+                default:
+                        printk("%s,%d: unknown algo 0x%x\n", __FILE__, __LINE__, cri->cri_alg);
+                        goto error;
+                }
+                cri = cri->cri_next;
+		count++;
+        }
+
+	if((encrypt > 2) || (auth > 2)) {
+		printk("%s,%d: session mode is not supported.\n", __FILE__, __LINE__);
+                goto error;
+	}
+
+	/* create new sessions in HAL */
+	if(encrypt) {
+		cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+		/* encrypt session */
+		if(auth == 1) {
+			cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+		}
+		else if(auth == 2) {
+			cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+			cesa_ocf_cur_ses->encrypt_tn_auth = 1;
+		}
+		else {
+			cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+		}
+		cesa_ses->direction = MV_CESA_DIR_ENCODE;
+		spin_lock_irqsave(&cesa_lock, flags);
+		status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+		/* decrypt session */
+		if( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) {
+			cesa_ses->operation = MV_CESA_CRYPTO_THEN_MAC;
+		}
+		else if( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC ) {
+			cesa_ses->operation = MV_CESA_MAC_THEN_CRYPTO;
+		}
+		cesa_ses->direction = MV_CESA_DIR_DECODE;
+		spin_lock_irqsave(&cesa_lock, flags);
+		status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_decrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+
+		/* preapre one action sessions for case we will need to split an action */
+#ifdef CESA_OCF_SPLIT
+		if(( cesa_ses->operation == MV_CESA_MAC_THEN_CRYPTO ) ||
+			( cesa_ses->operation == MV_CESA_CRYPTO_THEN_MAC )) {
+			/* open one session for encode and one for decode */
+			cesa_ses->operation = MV_CESA_CRYPTO_ONLY;
+			cesa_ses->direction = MV_CESA_DIR_ENCODE;
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_encrypt);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+
+			cesa_ses->direction = MV_CESA_DIR_DECODE;
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_decrypt);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+			/* open one session for auth */
+			cesa_ses->operation = MV_CESA_MAC_ONLY;
+			cesa_ses->direction = MV_CESA_DIR_ENCODE;
+			spin_lock_irqsave(&cesa_lock, flags);
+			status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->frag_wa_auth);
+			spin_unlock_irqrestore(&cesa_lock, flags);
+			if(status != MV_OK) {
+				printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+				goto error;
+			}
+		}
+#endif
+	}
+	else { /* only auth */
+		cesa_ses->operation = MV_CESA_MAC_ONLY;
+		cesa_ses->direction = MV_CESA_DIR_ENCODE;
+		spin_lock_irqsave(&cesa_lock, flags);
+		status = mvCesaIfSessionOpen(cesa_ses, &cesa_ocf_cur_ses->sid_encrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+		if(status != MV_OK) {
+			printk("%s,%d: Can't open new session - status = 0x%x\n", __FILE__, __LINE__, status);
+			goto error;
+		}
+	}
+
+	return 0;
+error:
+	cesa_ocf_freesession(NULL, *sid);
+	return EINVAL;
+
+}
+
+
+/*
+ * Free a session.
+ */
+static int
+cesa_ocf_freesession(device_t dev, u_int64_t tid)
+{
+        struct cesa_ocf_data *cesa_ocf_cur_ses;
+        u_int32_t sid = CRYPTO_SESID2LID(tid);
+	unsigned long flags = 0;
+
+        dprintk("%s() %d \n", __func__, sid);
+	if (sid > cesa_ocf_sesnum || cesa_ocf_sessions == NULL ||
+			cesa_ocf_sessions[sid] == NULL) {
+		dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+		return EINVAL;
+	}
+
+        /* Silently accept and return */
+        if (sid == 0)
+                return(0);
+
+	/* release session from HAL */
+	cesa_ocf_cur_ses = cesa_ocf_sessions[sid];
+	if (cesa_ocf_cur_ses->sid_encrypt != -1) {
+		spin_lock_irqsave(&cesa_lock, flags);
+		mvCesaIfSessionClose(cesa_ocf_cur_ses->sid_encrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+	}
+	if (cesa_ocf_cur_ses->sid_decrypt != -1) {
+		spin_lock_irqsave(&cesa_lock, flags);
+		mvCesaIfSessionClose(cesa_ocf_cur_ses->sid_decrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_encrypt != -1) {
+		spin_lock_irqsave(&cesa_lock, flags);
+		mvCesaIfSessionClose(cesa_ocf_cur_ses->frag_wa_encrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_decrypt != -1) {
+		spin_lock_irqsave(&cesa_lock, flags);
+		mvCesaIfSessionClose(cesa_ocf_cur_ses->frag_wa_decrypt);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+	}
+	if (cesa_ocf_cur_ses->frag_wa_auth != -1) {
+		spin_lock_irqsave(&cesa_lock, flags);
+		mvCesaIfSessionClose(cesa_ocf_cur_ses->frag_wa_auth);
+		spin_unlock_irqrestore(&cesa_lock, flags);
+	}
+
+	kfree(cesa_ocf_cur_ses);
+	cesa_ocf_sessions[sid] = NULL;
+
+        return 0;
+}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
+extern int crypto_init(void);
+#endif
+
+/*
+ * our driver startup and shutdown routines
+ */
+static int
+cesa_ocf_probe(struct platform_device *pdev)
+{
+	u8 chan = 0;
+	const char *irq_str[] = {"cesa0","cesa1"};
+	const char *cesa_m;
+	unsigned int mask;
+	struct device_node *np;
+	struct clk *clk;
+	int err, i, j;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "CESA device node not available\n");
+		return -ENOENT;
+	}
+
+	/*
+	 * Check driver mode from dts
+	 */
+	cesa_m = of_get_property(pdev->dev.of_node, "cesa,mode", NULL);
+	if (strncmp(cesa_m, "ocf", 3) != 0) {
+		dprintk("%s: device operate in %s mode\n", __func__, cesa_m);
+		return -ENODEV;
+	}
+	mv_cesa_mode = CESA_OCF_M;
+
+	err = mv_get_cesa_resources(pdev);
+	if (err != 0)
+		return err;
+
+	j = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	dprintk("%s: Gate %d clocks\n", __func__, (j > 0 ? j : 1));
+	/*
+	 * If property "clock-names" does not exist (j < 0), assume that there
+	 * is only one clock which needs gating (j > 0 ? j : 1)
+	 */
+	for (i = 0; i < (j > 0 ? j : 1); i++) {
+
+		/* Not all platforms can gate the clock, so it is not
+		 * an error if the clock does not exists.
+		 */
+		clk = of_clk_get(pdev->dev.of_node, i);
+		if (!IS_ERR(clk))
+			clk_prepare_enable(clk);
+	}
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
+	crypto_init();
+#endif
+
+	/* Init globals */
+	next_result = 0;
+	result_done = 0;
+
+	/* The pool size here is twice bigger than requests queue size due to possible reordering */
+	cesa_ocf_pool = (struct cesa_ocf_process*)kmalloc((sizeof(struct cesa_ocf_process) *
+					CESA_OCF_POOL_SIZE), GFP_KERNEL);
+	if (cesa_ocf_pool == NULL) {
+		dev_err(&pdev->dev, "%s,%d: ENOBUFS\n", __FILE__, __LINE__);
+		return -EINVAL;
+	}
+
+	for (cesa_ocf_stack_idx = 0; cesa_ocf_stack_idx < CESA_OCF_POOL_SIZE; cesa_ocf_stack_idx++)
+		cesa_ocf_stack[cesa_ocf_stack_idx] = &cesa_ocf_pool[cesa_ocf_stack_idx];
+
+	memset(cesa_ocf_pool, 0, (sizeof(struct cesa_ocf_process) * CESA_OCF_POOL_SIZE));
+	memset(&mv_cesa_dev, 0, sizeof(mv_cesa_dev));
+	softc_device_init(&mv_cesa_dev, "MV CESA", 0, mv_cesa_methods);
+	cesa_ocf_id = crypto_get_driverid(softc_get_device(&mv_cesa_dev),CRYPTOCAP_F_HARDWARE);
+
+	if (cesa_ocf_id < 0)
+		panic("MV CESA crypto device cannot initialize!");
+
+	dprintk("%s,%d: cesa ocf device id is %d\n",
+					      __FILE__, __LINE__, cesa_ocf_id);
+
+	if (MV_OK !=
+	    mvSysCesaInit(CESA_OCF_MAX_SES*5, CESA_Q_SIZE, &pdev->dev, pdev)) {
+		dev_err(&pdev->dev, "%s,%d: mvCesaInit Failed.\n",
+							   __FILE__, __LINE__);
+		return -EINVAL;
+	}
+
+	if (mv_cesa_feature == INT_COALESCING)
+		mask = MV_CESA_CAUSE_EOP_COAL_MASK;
+	else
+		mask = MV_CESA_CAUSE_ACC_DMA_MASK;
+
+	/*
+	 * Preparation for each CESA chan
+	 */
+	for_each_child_of_node(pdev->dev.of_node, np) {
+		int irq;
+
+		/*
+		 * Get IRQ from FDT and map it to the Linux IRQ nr
+		 */
+		irq = irq_of_parse_and_map(np, 0);
+		if (!irq) {
+			dev_err(&pdev->dev, "IRQ nr missing in device tree\n");
+			return -ENOENT;
+		}
+
+		dprintk("%s: cesa irq %d, chan %d\n", __func__,
+					      irq, chan);
+
+		/* clear and unmask Int */
+		MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE( MV_CESA_ISR_MASK_REG(chan), mask);
+
+		chan_id[chan] = chan;
+
+		/* register interrupt */
+		if (request_irq(irq, cesa_interrupt_handler,
+				(IRQF_DISABLED) , irq_str[chan], &chan_id[chan]) < 0) {
+			dev_err(&pdev->dev, "%s,%d: cannot assign irq %x\n",
+			    __FILE__, __LINE__, irq);
+			return -EINVAL;
+		}
+
+		chan++;
+	}
+
+#ifdef CESA_OCF_TASKLET
+	tasklet_init(&cesa_ocf_tasklet, cesa_callback, (unsigned int) 0);
+#endif
+
+#define	REGISTER(alg) \
+	crypto_register(cesa_ocf_id, alg, 0,0)
+	REGISTER(CRYPTO_AES_CBC);
+	REGISTER(CRYPTO_DES_CBC);
+	REGISTER(CRYPTO_3DES_CBC);
+	REGISTER(CRYPTO_MD5);
+	REGISTER(CRYPTO_MD5_HMAC);
+	REGISTER(CRYPTO_SHA1);
+	REGISTER(CRYPTO_SHA1_HMAC);
+#undef REGISTER
+
+#ifdef CESA_DEBUGS
+	mvCesaDebugRegs();
+#endif
+	dev_info(&pdev->dev, "%s: CESA driver operate in %s(%d) mode\n",
+					       __func__, cesa_m, mv_cesa_mode);
+	return 0;
+}
+
+static int
+cesa_ocf_remove(struct platform_device *pdev)
+{
+	struct device_node *np;
+	u8 chan = 0;
+	int irq;
+
+	dprintk("%s()\n", __func__);
+
+	crypto_unregister_all(cesa_ocf_id);
+	cesa_ocf_id = -1;
+	kfree(cesa_ocf_pool);
+
+	for_each_child_of_node(pdev->dev.of_node, np) {
+
+		irq = irq_of_parse_and_map(np, 0);
+		if (!irq) {
+			dev_err(&pdev->dev, "IRQ nr missing in device tree\n");
+			return -ENOENT;
+		}
+
+		free_irq(irq, NULL);
+
+		/* mask and clear Int */
+		MV_REG_WRITE( MV_CESA_ISR_MASK_REG(chan), 0);
+		MV_REG_WRITE( MV_CESA_ISR_CAUSE_REG(chan), 0);
+
+		chan++;
+	}
+
+	if( MV_OK != mvCesaIfFinish() ) {
+		dev_err(&pdev->dev, "%s,%d: mvCesaFinish Failed.\n",
+							   __FILE__, __LINE__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void cesa_ocf_shutdown(struct platform_device *pdev)
+{
+	struct clk *clk;
+	int  i, j;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "CESA device node not available\n");
+		return;
+	}
+
+	j = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	dprintk("%s: Gate %d clocks\n", __func__, (j > 0 ? j : 1));
+	/*
+	 * If property "clock-names" does not exist (j < 0), assume that there
+	 * is only one clock which needs gating (j > 0 ? j : 1)
+	 */
+	for (i = 0; i < (j > 0 ? j : 1); i++) {
+		/* Not all platforms can gate the clock, so it is not
+		 * an error if the clock does not exists.
+		 */
+		clk = of_clk_get(pdev->dev.of_node, i);
+		if (!IS_ERR(clk))
+			clk_disable_unprepare(clk);
+	}
+}
+
+static struct of_device_id mv_cesa_dt_ids[] = {
+	{ .compatible = "marvell,armada-cesa", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mv_cesa_dt_ids);
+
+static struct platform_driver mv_cesa_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(mv_cesa_dt_ids),
+	},
+	.probe		= cesa_ocf_probe,
+	.remove		= cesa_ocf_remove,
+	.shutdown	= cesa_ocf_shutdown,
+#ifdef CONFIG_PM
+	.resume		= cesa_resume,
+	.suspend	= cesa_suspend,
+#endif
+};
+
+static int __init cesa_ocf_init(void)
+{
+	return platform_driver_register(&mv_cesa_driver);
+}
+module_init(cesa_ocf_init);
+
+static void __exit cesa_ocf_exit(void)
+{
+	platform_driver_unregister(&mv_cesa_driver);
+}
+module_exit(cesa_ocf_exit);
+
+MODULE_LICENSE("Marvell/GPL");
+MODULE_AUTHOR("Ronen Shitrit");
+MODULE_DESCRIPTION("OCF module for Marvell CESA based SoC");
diff --git a/drivers/crypto/mvebu_cesa/cesa_test.c b/drivers/crypto/mvebu_cesa/cesa_test.c
new file mode 100644
index 000000000000..93603747e071
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/cesa_test.c
@@ -0,0 +1,3165 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+
+#if defined(MV_VXWORKS)
+
+#include "sysLib.h"
+#include "logLib.h"
+#include "tickLib.h"
+#include "intLib.h"
+#include "config.h"
+
+SEM_ID cesaSemId = NULL;
+SEM_ID cesaWaitSemId = NULL;
+
+#define CESA_TEST_LOCK(flags)       flags = intLock()
+#define CESA_TEST_UNLOCK(flags)     intUnlock(flags)
+
+#define CESA_TEST_WAIT_INIT()       cesaWaitSemId = semBCreate(SEM_Q_PRIORITY, SEM_EMPTY)
+#define CESA_TEST_WAKE_UP()         semGive(cesaWaitSemId)
+#define CESA_TEST_WAIT(cond, ms)    semTake(cesaWaitSemId, (sysClkRateGet()*ms)/1000)
+
+#define CESA_TEST_TICK_GET()        tickGet()
+#define CESA_TEST_TICK_TO_MS(tick)  (((tick)*1000)/sysClkRateGet())
+
+#elif defined(MV_LINUX)
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+static int buf_size = 20000;
+/*MODULE_PARM(buf_size, "i");*/
+module_param(buf_size, int, 0644);
+MODULE_PARM_DESC(buf_size, "Size of each data buffer");
+
+static int buf_num = 1;
+/*MODULE_PARM(buf_num, "i");*/
+module_param(buf_num, int, 0644);
+
+MODULE_PARM_DESC(buf_num, "Number of data buffers for each request");
+
+static wait_queue_head_t cesaTest_waitq;
+static spinlock_t cesaLock;
+static struct timeval tv;
+
+#define	DRIVER_NAME	"armada-cesa-test"
+
+#define CESA_TEST_LOCK(flags)       spin_lock_irqsave(&cesaLock, flags)
+#define CESA_TEST_UNLOCK(flags)     spin_unlock_irqrestore(&cesaLock, flags);
+
+#define CESA_TEST_WAIT_INIT()       init_waitqueue_head(&cesaTest_waitq)
+#define CESA_TEST_WAKE_UP()         wake_up(&cesaTest_waitq)
+#define CESA_TEST_WAIT(cond, ms)    wait_event_timeout(cesaTest_waitq, (cond), msecs_to_jiffies(ms))
+
+#elif defined(MV_NETBSD)
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+static int cesaLock;
+
+#define	CESA_TEST_LOCK(flags)		flags = splnet()
+#define	CESA_TEST_UNLOCK(flags)		splx(flags)
+
+#define	CESA_TEST_WAIT_INIT()	/* nothing */
+#define	CESA_TEST_WAKE_UP()		wakeup(&cesaLock)
+#define	CESA_TEST_WAIT(cond, ms)	\
+do {					\
+	while (!(cond))			\
+		tsleep(&cesaLock, PWAIT, "cesatest", mstohz(ms)); \
+} while (/*CONSTCOND*/0)
+
+#define	CESA_TEST_TICK_GET()		hardclock_ticks
+#define	CESA_TEST_TICK_TO_MS(tick)	((1000/hz)*(tick))
+
+#define	request_irq(i, h, t, n, a)	\
+	(!mv_intr_establish((i), IPL_NET, (int(*)(void *))(h), (a)))
+
+#else
+#error "Only Linux, VxWorks, or NetBSD OS are supported"
+#endif
+
+#include "mvDebug.h"
+
+#include "mvSysCesaConfig.h"
+#include "cesa_if.h"
+#include "mvMD5.h"
+#include "mvSHA1.h"
+#include "mvSHA256.h"
+
+#if defined(CONFIG_MV646xx)
+#include "marvell_pic.h"
+#endif
+
+#define MV_CESA_USE_TIMER_ID    0
+#define CESA_DEF_BUF_SIZE       1500
+#define CESA_DEF_BUF_NUM        1
+#define CESA_DEF_SESSION_NUM    32
+
+#define CESA_DEF_ITER_NUM       100
+
+#define CESA_DEF_REQ_SIZE       256
+#define CESA_TEST_REQ_SIZE      (CESA_DEF_REQ_SIZE * MV_CESA_CHANNELS)
+
+/* CESA Tests Debug */
+#undef CESA_TEST_DEBUG
+
+#ifdef CESA_TEST_DEBUG
+
+#   define CESA_TEST_DEBUG_PRINT(msg)   mvOsPrintf msg
+#   define CESA_TEST_DEBUG_CODE(code)   code
+
+typedef struct {
+	int type;		/* 0 - isrEmpty, 1 - cesaReadyGet, 2 - cesaAction */
+	MV_U32 timeStamp;
+	MV_U32 cause;
+	MV_U32 realCause;
+	MV_U32 dmaErrCause;
+	int resources;
+	MV_CESA_REQ *pReqReady;
+	MV_CESA_REQ *pReqEmpty;
+	MV_CESA_REQ *pReqProcess;
+} MV_CESA_TEST_TRACE;
+
+#define MV_CESA_TEST_TRACE_SIZE      25
+
+static int cesaTestTraceIdx;
+static MV_CESA_TEST_TRACE cesaTestTrace[MV_CESA_TEST_TRACE_SIZE];
+
+#if 0
+static void cesaTestTraceAdd(int type, MV_U32 cause)
+{
+	cesaTestTrace[cesaTestTraceIdx].type = type;
+	cesaTestTrace[cesaTestTraceIdx].cause = cause;
+	cesaTestTrace[cesaTestTraceIdx].realCause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG);
+	cesaTestTrace[cesaTestTraceIdx].dmaErrCause = MV_REG_READ(MV_CESA_TDMA_ERROR_CAUSE_REG);
+	cesaTestTrace[cesaTestTraceIdx].resources = cesaReqResources;
+	cesaTestTrace[cesaTestTraceIdx].pReqReady = pCesaReqReady;
+	cesaTestTrace[cesaTestTraceIdx].pReqEmpty = pCesaReqEmpty;
+	cesaTestTrace[cesaTestTraceIdx].pReqProcess = pCesaReqProcess;
+	cesaTestTrace[cesaTestTraceIdx].timeStamp = mvCntmrRead(MV_CESA_USE_TIMER_ID);
+	cesaTestTraceIdx++;
+	if (cesaTestTraceIdx == MV_CESA_TEST_TRACE_SIZE)
+		cesaTestTraceIdx = 0;
+}
+#endif
+#else
+
+#   define CESA_TEST_DEBUG_PRINT(msg)
+#   define CESA_TEST_DEBUG_CODE(code)
+
+#endif /* CESA_TEST_DEBUG */
+
+int cesaExpReqId = 0;
+int cesaCbIter = 0;
+unsigned int cmdReqId = 0;
+
+int cesaIdx;
+int cesaIteration;
+int cesaRateSize;
+int cesaReqSize;
+unsigned long cesaTaskId;
+int cesaBufNum;
+int cesaBufSize;
+int cesaCheckOffset;
+int cesaCheckSize;
+int cesaCheckMode;
+int cesaTestIdx;
+int cesaCaseIdx;
+
+MV_U32 cesaTestIsrCount[2] = {0, 0};
+MV_U32 cesaTestIsrMissCount = 0;
+
+MV_U32 cesaCryptoError = 0;
+MV_U32 cesaReqIdError = 0;
+MV_U32 cesaError = 0;
+
+char *cesaHexBuffer = NULL;
+
+char *cesaBinBuffer = NULL;
+char *cesaExpBinBuffer = NULL;
+
+char *cesaInputHexStr = NULL;
+char *cesaOutputHexStr = NULL;
+
+MV_BUF_INFO cesaReqBufs[CESA_TEST_REQ_SIZE];
+static u8 chanId[MV_CESA_CHANNELS];
+static spinlock_t cesaChanLock[MV_CESA_CHANNELS];
+
+MV_CESA_COMMAND *cesaCmdRing;
+MV_CESA_RESULT cesaResult;
+
+int cesaTestFull = 0;
+
+MV_BOOL cesaIsReady = MV_FALSE;
+MV_U32 cesaCycles = 0;
+MV_U32 cesaBeginTicks = 0;
+MV_U32 cesaEndTicks = 0;
+MV_U32 cesaRate = 0;
+MV_U32 cesaRateAfterDot = 0;
+
+void *cesaTestOSHandle = NULL;
+
+enum {
+	CESA_FAST_CHECK_MODE = 0,
+	CESA_FULL_CHECK_MODE,
+	CESA_NULL_CHECK_MODE,
+	CESA_SHOW_CHECK_MODE,
+	CESA_SW_SHOW_CHECK_MODE,
+	CESA_SW_NULL_CHECK_MODE,
+
+	CESA_MAX_CHECK_MODE
+};
+
+enum {
+	DES_TEST_TYPE = 0,
+	TRIPLE_DES_TEST_TYPE = 1,
+	AES_TEST_TYPE = 2,
+	MD5_TEST_TYPE = 3,
+	SHA1_TEST_TYPE = 4,
+	SHA2_TEST_TYPE = 5,
+	COMBINED_TEST_TYPE = 6,
+	MAX_TEST_TYPE
+};
+
+/* Tests data base */
+typedef struct {
+	short sid;
+	char cryptoAlgorithm;	/* DES/3DES/AES */
+	char cryptoMode;	/* ECB or CBC */
+	char macAlgorithm;	/* MD5 / SHA1 / SHA2 */
+	char operation;		/* CRYPTO/HMAC/CRYPTO+HMAC/HMAC+CRYPTO */
+	char direction;		/* ENCODE(SIGN)/DECODE(VERIFY) */
+	unsigned char *pCryptoKey;
+	int cryptoKeySize;
+	unsigned char *pMacKey;
+	int macKeySize;
+	const char *name;
+} MV_CESA_TEST_SESSION;
+
+typedef struct {
+	MV_CESA_TEST_SESSION *pSessions;
+	int numSessions;
+} MV_CESA_TEST_DB_ENTRY;
+
+typedef struct {
+	char *plainHexStr;
+	char *cipherHexStr;
+	unsigned char *pCryptoIV;
+	int cryptoLength;
+	int macLength;
+	int digestOffset;
+} MV_CESA_TEST_CASE;
+
+typedef struct {
+	int size;
+	const char *outputHexStr;
+} MV_CESA_SIZE_TEST;
+
+static unsigned char cryptoKey1[] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+	0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+	0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef
+};
+
+static unsigned char cryptoKey7[] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef };
+static unsigned char iv1[] = { 0x12, 0x34, 0x56, 0x78, 0x90, 0xab, 0xcd, 0xef };
+
+static unsigned char cryptoKey2[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F
+};
+
+static unsigned char cryptoKey3[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17
+};
+
+static unsigned char cryptoKey4[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+};
+
+static unsigned char cryptoKey5[] = { 0x56, 0xe4, 0x7a, 0x38, 0xc5, 0x59, 0x89, 0x74,
+	0xbc, 0x46, 0x90, 0x3d, 0xba, 0x29, 0x03, 0x49
+};
+
+static unsigned char key3des1[] = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF,
+	0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01,
+	0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23
+};
+
+/*  Input ASCII string: The quick brown fox jump  */
+static char plain3des1[] = "54686520717566636B2062726F776E20666F78206A756D70";
+static char cipher3des1[] = "A826FD8CE53B855FCCE21C8112256FE668D5C05DD9B6B900";
+
+static unsigned char key3des2[] = { 0x62, 0x7f, 0x46, 0x0e, 0x08, 0x10, 0x4a, 0x10,
+	0x43, 0xcd, 0x26, 0x5d, 0x58, 0x40, 0xea, 0xf1,
+	0x31, 0x3e, 0xdf, 0x97, 0xdf, 0x2a, 0x8a, 0x8c
+};
+
+static unsigned char iv3des2[] = { 0x8e, 0x29, 0xf7, 0x5e, 0xa7, 0x7e, 0x54, 0x75 };
+
+static char plain3des2[] = "326a494cd33fe756";
+
+static char cipher3desCbc2[] = "8e29f75ea77e5475" "b22b8d66de970692";
+
+static unsigned char key3des3[] = { 0x37, 0xae, 0x5e, 0xbf, 0x46, 0xdf, 0xf2, 0xdc,
+	0x07, 0x54, 0xb9, 0x4f, 0x31, 0xcb, 0xb3, 0x85,
+	0x5e, 0x7f, 0xd3, 0x6d, 0xc8, 0x70, 0xbf, 0xae
+};
+
+static unsigned char iv3des3[] = { 0x3d, 0x1d, 0xe3, 0xcc, 0x13, 0x2e, 0x3b, 0x65 };
+
+static char plain3des3[] = "84401f78fe6c10876d8ea23094ea5309";
+
+static char cipher3desCbc3[] = "3d1de3cc132e3b65" "7b1f7c7e3b1c948ebd04a75ffba7d2f5";
+
+static unsigned char iv5[] = { 0x8c, 0xe8, 0x2e, 0xef, 0xbe, 0xa0, 0xda, 0x3c,
+	0x44, 0x69, 0x9e, 0xd7, 0xdb, 0x51, 0xb7, 0xd9
+};
+
+static unsigned char aesCtrKey[] = { 0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8,
+	0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0, 0xDC
+};
+
+static unsigned char mdKey1[] = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+	0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b
+};
+
+static unsigned char mdKey2[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+};
+
+static unsigned char shaKey1[] = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+	0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+	0x0b, 0x0b, 0x0b, 0x0b
+};
+
+static unsigned char shaKey2[] = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+	0xaa, 0xaa, 0xaa, 0xaa
+};
+
+static unsigned char mdKey4[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+	0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+};
+
+static unsigned char shaKey4[] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+	0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10,
+	0x11, 0x12, 0x13, 0x14
+};
+
+static unsigned char sha2Key1[] = { 0x0, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14,
+	0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f
+};
+
+
+static MV_CESA_TEST_SESSION desTestSessions[] = {
+/*000*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey7, sizeof(cryptoKey7) / sizeof(cryptoKey7[0]),
+	 NULL, 0,
+	 "DES ECB encode",
+	 }
+	,
+/*001*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey7, sizeof(cryptoKey7) / sizeof(cryptoKey7[0]),
+	 NULL, 0,
+	 "DES ECB decode",
+	 }
+	,
+/*002*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey7, sizeof(cryptoKey7) / sizeof(cryptoKey7[0]),
+	 NULL, 0,
+	 "DES CBC encode"}
+	,
+/*003*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey7, sizeof(cryptoKey7) / sizeof(cryptoKey7[0]),
+	 NULL, 0,
+	 "DES CBC decode"}
+	,
+/*004*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0, NULL, 0,
+	 "NULL Crypto Algorithm encode"}
+	,
+};
+
+static MV_CESA_TEST_SESSION tripleDesTestSessions[] = {
+/*100*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 NULL, 0,
+	 "3DES ECB encode",
+	 }
+	,
+/*101*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 NULL, 0,
+	 "3DES ECB decode",
+	 }
+	,
+/*102*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 NULL, 0,
+	 "3DES CBC encode"}
+	,
+/*103*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 NULL, 0,
+	 "3DES CBC decode"}
+	,
+/*104*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 key3des1, sizeof(key3des1),
+	 NULL, 0,
+	 "3DES ECB encode"}
+	,
+/*105*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 key3des2, sizeof(key3des2),
+	 NULL, 0,
+	 "3DES ECB encode"}
+	,
+/*106*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 key3des3, sizeof(key3des3),
+	 NULL, 0,
+	 "3DES ECB encode"}
+	,
+};
+
+static MV_CESA_TEST_SESSION aesTestSessions[] = {
+/*200*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey2, sizeof(cryptoKey2) / sizeof(cryptoKey2[0]),
+	 NULL, 0,
+	 "AES-128 ECB encode"}
+	,
+/*201*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey2, sizeof(cryptoKey2) / sizeof(cryptoKey2[0]),
+	 NULL, 0,
+	 "AES-128 ECB decode"}
+	,
+/*202*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey5, sizeof(cryptoKey5) / sizeof(cryptoKey5[0]),
+	 NULL, 0,
+	 "AES-128 CBC encode"}
+	,
+/*203*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey5, sizeof(cryptoKey5) / sizeof(cryptoKey5[0]),
+	 NULL, 0,
+	 "AES-128 CBC decode"}
+	,
+/*204*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey3, sizeof(cryptoKey3) / sizeof(cryptoKey3[0]),
+	 NULL, 0,
+	 "AES-192 ECB encode"}
+	,
+/*205*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey3, sizeof(cryptoKey3) / sizeof(cryptoKey3[0]),
+	 NULL, 0,
+	 "AES-192 ECB decode"}
+	,
+/*206*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey4, sizeof(cryptoKey4) / sizeof(cryptoKey4[0]),
+	 NULL, 0,
+	 "AES-256 ECB encode"}
+	,
+/*207*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey4, sizeof(cryptoKey4) / sizeof(cryptoKey4[0]),
+	 NULL, 0,
+	 "AES-256 ECB decode"}
+	,
+/*208*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CTR,
+	 MV_CESA_MAC_NULL, MV_CESA_CRYPTO_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 aesCtrKey, sizeof(aesCtrKey) / sizeof(aesCtrKey[0]),
+	 NULL, 0,
+	 "AES-128 CTR encode"}
+	,
+};
+
+static MV_CESA_TEST_SESSION md5TestSessions[] = {
+/*300*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 mdKey1, sizeof(mdKey1),
+	 "HMAC-MD5 Generate Signature"}
+	,
+/*301*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 mdKey1, sizeof(mdKey1),
+	 "HMAC-MD5 Verify Signature"}
+	,
+/*302*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 mdKey2, sizeof(mdKey2),
+	 "HMAC-MD5 Generate Signature"}
+	,
+/*303*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 mdKey2, sizeof(mdKey2),
+	 "HMAC-MD5 Verify Signature"}
+	,
+/*304*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 mdKey4, sizeof(mdKey4),
+	 "HMAC-MD5 Generate Signature"}
+	,
+/*305*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_MD5, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 NULL, 0,
+	 "HASH-MD5 Generate Signature"}
+	,
+};
+
+static MV_CESA_TEST_SESSION sha1TestSessions[] = {
+/*400*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 shaKey1, sizeof(shaKey1),
+	 "HMAC-SHA1 Generate Signature"}
+	,
+/*401*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 shaKey1, sizeof(shaKey1),
+	 "HMAC-SHA1 Verify Signature"}
+	,
+/*402*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 shaKey2, sizeof(shaKey2),
+	 "HMAC-SHA1 Generate Signature"}
+	,
+/*403*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 shaKey2, sizeof(shaKey2),
+	 "HMAC-SHA1 Verify Signature"}
+	,
+/*404*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 shaKey4, sizeof(shaKey4),
+	 "HMAC-SHA1 Generate Signature"}
+	,
+/*405*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_SHA1, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 NULL, 0,
+	 "HASH-SHA1 Generate Signature"}
+	,
+};
+
+static MV_CESA_TEST_SESSION sha2TestSessions[] = {
+/*500*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA2, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 sha2Key1, sizeof(sha2Key1),
+	 "HMAC-SHA2 Generate Signature"}
+	,
+/*501*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA2, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 sha2Key1, sizeof(sha2Key1),
+	 "HMAC-SHA2 Verify Signature"}
+	,
+/*502*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_SHA2, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_ENCODE,
+	 NULL, 0,
+	 NULL, 0,
+	 "HASH-SHA2 Generate Signature"}
+	,
+/*503*/ {-1, MV_CESA_CRYPTO_NULL, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_SHA2, MV_CESA_MAC_ONLY,
+	 MV_CESA_DIR_DECODE,
+	 NULL, 0,
+	 NULL, 0,
+	 "HASH-SHA2 Verify Signature"}
+	,
+};
+
+static MV_CESA_TEST_SESSION combinedTestSessions[] = {
+/*600*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+	 mdKey4, sizeof(mdKey4),
+	 "DES + MD5 encode"}
+	,
+/*601*/ {-1, MV_CESA_CRYPTO_DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, MV_CESA_DES_KEY_LENGTH,
+	 shaKey4, sizeof(shaKey4),
+	 "DES + SHA1 encode"}
+	,
+/*602*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 mdKey4, sizeof(mdKey4),
+	 "3DES + MD5 encode"}
+	,
+/*603*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 shaKey4, sizeof(shaKey4),
+	 "3DES + SHA1 encode"}
+	,
+/*604*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 mdKey4, sizeof(mdKey4),
+	 "3DES CBC + MD5 encode"}
+	,
+/*605*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 shaKey4, sizeof(shaKey4),
+	 "3DES CBC + SHA1 encode"}
+	,
+/*606*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey5, sizeof(cryptoKey5) / sizeof(cryptoKey5[0]),
+	 mdKey4, sizeof(mdKey4),
+	 "AES-128 CBC + MD5 encode"}
+	,
+/*607*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_CBC,
+	 MV_CESA_MAC_HMAC_SHA1, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey5, sizeof(cryptoKey5) / sizeof(cryptoKey5[0]),
+	 shaKey4, sizeof(shaKey4),
+	 "AES-128 CBC + SHA1 encode"}
+	,
+/*608*/ {-1, MV_CESA_CRYPTO_3DES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_MD5, MV_CESA_MAC_THEN_CRYPTO,
+	 MV_CESA_DIR_DECODE,
+	 cryptoKey1, sizeof(cryptoKey1) / sizeof(cryptoKey1[0]),
+	 mdKey4, sizeof(mdKey4),
+	 "HMAC-MD5 + 3DES decode"}
+	,
+/*609*/ {-1, MV_CESA_CRYPTO_AES, MV_CESA_CRYPTO_ECB,
+	 MV_CESA_MAC_HMAC_SHA2, MV_CESA_CRYPTO_THEN_MAC,
+	 MV_CESA_DIR_ENCODE,
+	 cryptoKey2, sizeof(cryptoKey2) / sizeof(cryptoKey2[0]),
+	 sha2Key1, sizeof(sha2Key1),
+	 "AES-128 CBC + SHA2 encode"}
+	,
+};
+
+static MV_CESA_TEST_DB_ENTRY cesaTestsDB[MAX_TEST_TYPE + 1] = {
+	{desTestSessions, sizeof(desTestSessions) / sizeof(desTestSessions[0])}
+	,
+	{tripleDesTestSessions, sizeof(tripleDesTestSessions) / sizeof(tripleDesTestSessions[0])}
+	,
+	{aesTestSessions, sizeof(aesTestSessions) / sizeof(aesTestSessions[0])}
+	,
+	{md5TestSessions, sizeof(md5TestSessions) / sizeof(md5TestSessions[0])}
+	,
+	{sha1TestSessions, sizeof(sha1TestSessions) / sizeof(sha1TestSessions[0])}
+	,
+	{sha2TestSessions, sizeof(sha2TestSessions) / sizeof(sha2TestSessions[0])}
+	,
+	{combinedTestSessions, sizeof(combinedTestSessions) / sizeof(combinedTestSessions[0])}
+	,
+	{NULL, 0}
+};
+
+char cesaNullPlainHexText[] = "000000000000000000000000000000000000000000000000";
+
+char cesaPlainAsciiText[] = "Now is the time for all ";
+char cesaPlainHexEbc[] = "4e6f77206973207468652074696d6520666f7220616c6c20";
+char cesaCipherHexEcb[] = "3fa40e8a984d48156a271787ab8883f9893d51ec4b563b53";
+char cesaPlainHexCbc[] = "1234567890abcdef4e6f77206973207468652074696d6520666f7220616c6c20";
+char cesaCipherHexCbc[] = "1234567890abcdefe5c7cdde872bf27c43e934008c389c0f683788499a7c05f6";
+
+char cesaAesPlainHexEcb[] = "000102030405060708090a0b0c0d0e0f";
+char cesaAes128cipherHexEcb[] = "0a940bb5416ef045f1c39458c653ea5a";
+char cesaAes192cipherHexEcb[] = "0060bffe46834bb8da5cf9a61ff220ae";
+char cesaAes256cipherHexEcb[] = "5a6e045708fb7196f02e553d02c3a692";
+
+char cesaAsciiStr1[] = "Hi There";
+char cesaDataHexStr1[] = "4869205468657265";
+char cesaHmacMd5digestHex1[] = "9294727a3638bb1c13f48ef8158bfc9d";
+char cesaHmacSha1digestHex1[] = "b617318655057264e28bc0b6fb378c8ef146be00";
+char cesaHmacSha2digestHex1[] = "a28cf43130ee696a98f14a37678b56bcfcbdd9e5cf69717fecf5480f0ebdf790";
+char cesaDataAndMd5digest1[] = "48692054686572659294727a3638bb1c13f48ef8158bfc9d";
+char cesaDataAndSha1digest1[] = "4869205468657265b617318655057264e28bc0b6fb378c8ef146be00";
+char cesaDataAndSha2digest1[] = "53616D706C65206D65737361676520666F72206B65796C656E3C626C6F636B6C656E"
+				"000000000000" "a28cf43130ee696a98f14a37678b56bcfcbdd9e5cf69717fecf5480f0ebdf790";
+
+char cesaAesPlainText[] = "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+    "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char cesaAes128CipherCbc[] = "c30e32ffedc0774e6aff6af0869f71aa"
+    "0f3af07a9a31a9c684db207eb0ef8e4e" "35907aa632c3ffdf868bb7b29d3d46ad" "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char cesaAesIvPlainText[] = "8ce82eefbea0da3c44699ed7db51b7d9"
+    "a0a1a2a3a4a5a6a7a8a9aaabacadaeaf"
+    "b0b1b2b3b4b5b6b7b8b9babbbcbdbebf" "c0c1c2c3c4c5c6c7c8c9cacbcccdcecf" "d0d1d2d3d4d5d6d7d8d9dadbdcdddedf";
+
+char cesaAes128IvCipherCbc[] = "8ce82eefbea0da3c44699ed7db51b7d9"
+    "c30e32ffedc0774e6aff6af0869f71aa"
+    "0f3af07a9a31a9c684db207eb0ef8e4e" "35907aa632c3ffdf868bb7b29d3d46ad" "83ce9f9a102ee99d49a53e87f4c3da55";
+
+char cesaAesCtrPlain[] = "00E0017B27777F3F4A1786F000000001"
+    "000102030405060708090A0B0C0D0E0F" "101112131415161718191A1B1C1D1E1F" "20212223";
+
+char cesaAesCtrCipher[] = "00E0017B27777F3F4A1786F000000001"
+    "C1CF48A89F2FFDD9CF4652E9EFDB72D7" "4540A42BDE6D7836D59A5CEAAEF31053" "25B2072F";
+
+/* Input cesaHmacHex3 is '0xdd' repeated 50 times */
+char cesaHmacMd5digestHex3[] = "56be34521d144c88dbb8c733f0e8b3f6";
+char cesaHmacSha1digestHex3[] = "125d7342b9ac11cd91a39af48aa17b4f63f175d3";
+char cesaDataHexStr3[50 * 2 + 1] = "";
+char cesaDataAndMd5digest3[sizeof(cesaDataHexStr3) + sizeof(cesaHmacMd5digestHex3) + 8 * 2 + 1] = "";
+char cesaDataAndSha1digest3[sizeof(cesaDataHexStr3) + sizeof(cesaHmacSha1digestHex3) + 8 * 2 + 1] = "";
+char cesaDataAndSha2digest3[] = "6162630000000000"
+				"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad";
+
+/* Ascii string is "abc" */
+char hashHexStr3[] = "616263";
+char hashMd5digest3[] = "900150983cd24fb0d6963f7d28e17f72";
+char hashSha1digest3[] = "a9993e364706816aba3e25717850c26c9cd0d89d";
+char hashSha2digest3[] = "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad";
+
+char hashHexStr80[] = "31323334353637383930"
+    "31323334353637383930"
+    "31323334353637383930"
+    "31323334353637383930" "31323334353637383930" "31323334353637383930" "31323334353637383930" "31323334353637383930";
+
+char hashMd5digest80[] = "57edf4a22be3c955ac49da2e2107b67a";
+
+char tripleDesThenMd5digest80[] = "b7726a03aad490bd6c5a452a89a1b271";
+char tripleDesThenSha1digest80[] = "b2ddeaca91030eab5b95a234ef2c0f6e738ff883";
+
+char cbc3desThenMd5digest80[] = "6f463057e1a90e0e91ae505b527bcec0";
+char cbc3desThenSha1digest80[] = "1b002ed050be743aa98860cf35659646bb8efcc0";
+
+char cbcAes128ThenMd5digest80[] = "6b6e863ac5a71d15e3e9b1c86c9ba05f";
+char cbcAes128ThenSha1digest80[] = "13558472d1fc1c90dffec6e5136c7203452d509b";
+
+char cesaDataHexStr4[] = "53616D706C65206D65737361676520666F72206B65796C656E3C626C6F636B6C656E";
+char aes128EcbThenHmacSha2[] = "631cb14ab7f43a46c7517a32f793d64c8a7814ebfb32294b9d20f2f79eb6baba";
+
+static MV_CESA_TEST_CASE cesaTestCases[] = {
+/*     plainHexStr          cipherHexStr               IV    crypto  mac     digest */
+/*                                                           Length  Length  Offset */
+/*0 */ {NULL, NULL, NULL, 0, 0, -1},
+/*1 */ {cesaPlainHexEbc, cesaCipherHexEcb, NULL, 24, 0, -1},
+/*2 */ {cesaPlainHexCbc, cesaCipherHexCbc, NULL, 24, 0, -1},
+/*3 */ {cesaAesPlainHexEcb, cesaAes128cipherHexEcb, NULL, 16, 0, -1},
+/*4 */ {cesaAesPlainHexEcb, cesaAes192cipherHexEcb, NULL, 16, 0, -1},
+/*5 */ {cesaAesPlainHexEcb, cesaAes256cipherHexEcb, NULL, 16, 0, -1},
+/*6 */ {cesaDataHexStr1, cesaHmacMd5digestHex1, NULL, 0, 8, -1},
+/*7 */ {NULL, cesaDataAndMd5digest1, NULL, 0, 8, -1},
+/*8 */ {cesaDataHexStr3, cesaHmacMd5digestHex3, NULL, 0, 50, -1},
+/*9 */ {NULL, cesaDataAndMd5digest3, NULL, 0, 50, -1},
+/*10*/ {cesaAesPlainText, cesaAes128IvCipherCbc, iv5, 64, 0, -1},
+/*11*/ {cesaDataHexStr1, cesaHmacSha1digestHex1, NULL, 0, 8, -1},
+/*12*/ {NULL, cesaDataAndSha1digest1, NULL, 0, 8, -1},
+/*13*/ {cesaDataHexStr3, cesaHmacSha1digestHex3, NULL, 0, 50, -1},
+/*14*/ {NULL, cesaDataAndSha1digest3, NULL, 0, 50, -1},
+/*15*/ {hashHexStr3, hashMd5digest3, NULL, 0, 3, -1},
+/*16*/ {hashHexStr3, hashSha1digest3, NULL, 0, 3, -1},
+/*17*/ {hashHexStr80, tripleDesThenMd5digest80, NULL, 80, 80, -1},
+/*18*/ {hashHexStr80, tripleDesThenSha1digest80, NULL, 80, 80, -1},
+/*19*/ {hashHexStr80, cbc3desThenMd5digest80, iv1, 80, 80, -1},
+/*20*/ {hashHexStr80, cbc3desThenSha1digest80, iv1, 80, 80, -1},
+/*21*/ {hashHexStr80, cbcAes128ThenMd5digest80, iv5, 80, 80, -1},
+/*22*/ {hashHexStr80, cbcAes128ThenSha1digest80, iv5, 80, 80, -1},
+/*23*/ {cesaAesCtrPlain, cesaAesCtrCipher, NULL, 36, 0, -1},
+/*24*/ {cesaAesIvPlainText, cesaAes128IvCipherCbc, NULL, 64, 0, -1},
+/*25*/ {plain3des1, cipher3des1, NULL, 0, 0, -1},
+/*26*/ {plain3des2, cipher3desCbc2, iv3des2, 0, 0, -1},
+/*27*/ {plain3des3, cipher3desCbc3, iv3des3, 0, 0, -1},
+/*28*/ {cesaDataHexStr4, cesaHmacSha2digestHex1, NULL, 0, 34, -1},
+/*29*/ {NULL, cesaDataAndSha2digest1, NULL, 0, 34, -1},
+/*30*/ {hashHexStr3, hashSha2digest3, NULL, 0, 3, -1},
+/*31*/ {NULL, cesaDataAndSha2digest3, NULL, 0, 3, -1},
+/*32*/ {cesaAesPlainHexEcb, aes128EcbThenHmacSha2, NULL, 16, 16, -1},
+};
+
+/* Key         = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ *               0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ * Input 0xdd repeated "size" times
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest302[] = {
+	{80, "7a031a640c14a4872814930b1ef3a5b2"},
+	{512, "5488e6c5a14dc72a79f28312ca5b939b"},
+	{1000, "d00814f586a8b78a05724239d2531821"},
+	{1001, "bf07df7b7f49d3f5b5ecacd4e9e63281"},
+	{1002, "1ed4a1a802e87817a819d4e37bb4d0f7"},
+	{1003, "5972ab64a4f265ee371dac2f2f137f90"},
+	{1004, "71f95e7ec3aa7df2548e90898abdb28e"},
+	{1005, "e082790b4857fcfc266e92e59e608814"},
+	{1006, "9500f02fd8ac7fde8b10e4fece9a920d"},
+	{1336, "e42edcce57d0b75b01aa09d71427948b"},
+	{1344, "bb5454ada0deb49ba0a97ffd60f57071"},
+	{1399, "0f44d793e744b24d53f44f295082ee8c"},
+	{1400, "359de8a03a9b707928c6c60e0e8d79f1"},
+	{1401, "e913858b484cbe2b384099ea88d8855b"},
+	{1402, "d9848a164af53620e0540c1d7d87629e"},
+	{1403, "0c9ee1c2c9ef45e9b625c26cbaf3e822"},
+	{1404, "12edd4f609416e3c936170360561b064"},
+	{1405, "7fc912718a05446395345009132bf562"},
+	{1406, "882f17425e579ff0d85a91a59f308aa0"},
+	{1407, "005cae408630a2fb5db82ad9db7e59da"},
+	{1408, "64655f8b404b3fea7a3e3e609bc5088f"},
+	{1409, "4a145284a7f74e01b6bb1a0ec6a0dd80"},
+	{2048, "67caf64475650732def374ebb8bde3fd"},
+	{2049, "6c84f11f472825f7e6cd125c2981884b"},
+	{2050, "8999586754a73a99efbe4dbad2816d41"},
+	{2051, "ba6946b610e098d286bc81091659dfff"},
+	{2052, "d0afa01c92d4d13def2b024f36faed83"},
+	{3072, "61d8beac61806afa2585d74a9a0e6974"},
+	{3074, "f6501a28dcc24d1e4770505c51a87ed3"},
+	{3075, "ea4a6929be67e33e61ff475369248b73"},
+	{4048, "aa8c4d68f282a07e7385acdfa69f4bed"},
+	{4052, "afb5ed2c0e1d430ea59e59ed5ed6b18a"},
+	{4058, "9e8553f9bdd43aebe0bd729f0e600c99"},
+	{6144, "f628f3e5d183fe5cdd3a5abee39cf872"},
+	{6150, "89a3efcea9a2f25f919168ad4a1fd292"},
+	{6400, "cdd176b7fb747873efa4da5e32bdf88f"},
+	{6528, "b1d707b027354aca152c45ee559ccd3f"},
+	{8192, "c600ea4429ac47f9941f09182166e51a"},
+	{16384, "16e8754bfbeb4c649218422792267a37"},
+	{18432, "0fd0607521b0aa8b52219cfbe215f63e"},
+	{0, NULL},
+};
+
+/* Key         = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest304[] = {
+	{80, "a456c4723fee6068530af5a2afa71627"},
+	{512, "f85c2a2344f5de68b432208ad13e5794"},
+	{1000, "35464d6821fd4a293a41eb84e274c8c5"},
+	{1001, "c08eedbdce60cceb54bc2d732bb32c8b"},
+	{1002, "5664f71800c011cc311cb6943339c1b8"},
+	{1003, "779c723b044c585dc7802b13e8501bdc"},
+	{1004, "55e500766a2c307bc5c5fdd15e4cacd4"},
+	{1005, "d5f978954f5c38529d1679d2b714f068"},
+	{1006, "cd3efc827ce628b7281b72172693abf9"},
+	{1336, "6f04479910785878ae6335b8d1e87edf"},
+	{1344, "b6d27b50c2bce1ba2a8e1b5cc4324368"},
+	{1399, "65f70a1d4c86e5eaeb0704c8a7816795"},
+	{1400, "3394b5adc4cb3ff98843ca260a44a88a"},
+	{1401, "3a06f3582033a66a4e57e0603ce94e74"},
+	{1402, "e4d97f5ed51edc48abfa46eeb5c31752"},
+	{1403, "3d05e40b080ee3bedf293cb87b7140e7"},
+	{1404, "8cf294fc3cd153ab18dccb2a52cbf244"},
+	{1405, "d1487bd42f6edd9b4dab316631159221"},
+	{1406, "0527123b6bf6936cf5d369dc18c6c70f"},
+	{1407, "3224a06639db70212a0cd1ae1fcc570a"},
+	{1408, "a9e13335612c0356f5e2c27086e86c43"},
+	{1409, "a86d1f37d1ed8a3552e9a4f04dceea98"},
+	{2048, "396905c9b961cd0f6152abfb69c4449c"},
+	{2049, "49f39bff85d9dcf059fadb89efc4a70f"},
+	{2050, "3a2b4823bc4d0415656550226a63e34a"},
+	{2051, "dec60580d406c782540f398ad0bcc7e0"},
+	{2052, "32f76610a14310309eb748fe025081bf"},
+	{3072, "45edc1a42bf9d708a621076b63b774da"},
+	{3074, "9be1b333fe7c0c9f835fb369dc45f778"},
+	{3075, "8c06fcac7bd0e7b7a17fd6508c09a549"},
+	{4048, "0ddaef848184bf0ad98507a10f1e90e4"},
+	{4052, "81976bcaeb274223983996c137875cb8"},
+	{4058, "0b0a7a1c82bc7cbc64d8b7cd2dc2bb22"},
+	{6144, "1c24056f52725ede2dff0d7f9fc9855f"},
+	{6150, "b7f4b65681c4e43ee68ca466ca9ca4ec"},
+	{6400, "443bbaab9f7331ddd4bf11b659cd43c8"},
+	{6528, "216f44f23047cfee03a7a64f88f9a995"},
+	{8192, "ac7a993b2cad54879dba1bde63e39097"},
+	{8320, "55ed7be9682d6c0025b3221a62088d08"},
+	{16384, "c6c722087653b62007aea668277175e5"},
+	{18432, "f1faca8e907872c809e14ffbd85792d6"},
+	{0, NULL},
+};
+
+/* HASH-MD5
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ *               repeated "size" times
+ */
+static MV_CESA_SIZE_TEST mdMultiSizeTest305[] = {
+	{80, "57edf4a22be3c955ac49da2e2107b67a"},
+	{512, "c729ae8f0736cc377a9767a660eaa04e"},
+	{1000, "f1257a8659eb92d36fe14c6bf3852a6a"},
+	{1001, "f8a46fe8ea04fdc8c7de0e84042d3878"},
+	{1002, "da188dd67bff87d58aa3c02af2d0cc0f"},
+	{1003, "961753017feee04c9b93a8e51658a829"},
+	{1004, "dd68c4338608dcc87807a711636bf2af"},
+	{1005, "e338d567d3ce66bf69ada29658a8759b"},
+	{1006, "443c9811e8b92599b0b149e8d7ec700a"},
+	{1336, "89a98511706008ba4cbd0b4a24fa5646"},
+	{1344, "335a919805f370b9e402a62c6fe01739"},
+	{1399, "5d18d0eddcd84212fe28d812b5e80e3b"},
+	{1400, "6b695c240d2dffd0dffc99459ca76db6"},
+	{1401, "49590f61298a76719bc93a57a30136f5"},
+	{1402, "94c2999fa3ef1910a683d69b2b8476f2"},
+	{1403, "37073a02ab00ecba2645c57c228860db"},
+	{1404, "1bcd06994fce28b624f0c5fdc2dcdd2b"},
+	{1405, "11b93671a64c95079e8cf9e7cddc8b3d"},
+	{1406, "4b6695772a4c66313fa4871017d05f36"},
+	{1407, "d1539b97fbfda1c075624e958de19c5b"},
+	{1408, "b801b9b69920907cd018e8063092ede9"},
+	{1409, "b765f1406cfe78e238273ed01bbcaf7e"},
+	{2048, "1d7e2c64ac29e2b3fb4c272844ed31f5"},
+	{2049, "71d38fac49c6b1f4478d8d88447bcdd0"},
+	{2050, "141c34a5592b1bebfa731e0b23d0cdba"},
+	{2051, "c5e1853f21c59f5d6039bd13d4b380d8"},
+	{2052, "dd44a0d128b63d4b5cccd967906472d7"},
+	{3072, "37d158e33b21390822739d13db7b87fe"},
+	{3074, "aef3b209d01d39d0597fe03634bbf441"},
+	{3075, "335ffb428eabf210bada96d74d5a4012"},
+	{4048, "2434c2b43d798d2819487a886261fc64"},
+	{4052, "ac2fa84a8a33065b2e92e36432e861f8"},
+	{4058, "856781f85616c341c3533d090c1e1e84"},
+	{6144, "e5d134c652c18bf19833e115f7a82e9b"},
+	{6150, "a09a353be7795fac2401dac5601872e6"},
+	{6400, "08b9033ac6a1821398f50af75a2dbc83"},
+	{6528, "3d47aa193a8540c091e7e02f779e6751"},
+	{8192, "d3164e710c0626f6f395b38f20141cb7"},
+	{8320, "b727589d9183ff4e8491dd24466974a3"},
+	{16384, "3f54d970793d2274d5b20d10a69938ac"},
+	{18432, "f558511dcf81985b7a1bb57fad970531"},
+	{0, NULL},
+};
+
+/* Key         = 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ *               0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa
+ *               0xaa, 0xaa, 0xaa, 0xaa
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST sha1MultiSizeTest402[] = {
+	{80, "e812f370e659705a1649940d1f78cd7af18affd3"},
+	{512, "e547f886b2c15d995ed76a8a924cb408c8080f66"},
+	{1000, "239443194409f1a5342ecde1a092c8f3a3ed790a"},
+	{1001, "f278ab9a102850a9f48dc4e9e6822afe2d0c52b5"},
+	{1002, "8bcc667df5ab6ece988b3af361d09747c77f4e72"},
+	{1003, "0fae6046c7dc1d3e356b25af836f6077a363f338"},
+	{1004, "0ea48401cc92ae6bc92ae76685269cb0167fbe1a"},
+	{1005, "ecbcd7c879b295bafcd8766cbeac58cc371e31d1"},
+	{1006, "eb4a4a3d07d1e9a15e6f1ab8a9c47f243e27324c"},
+	{1336, "f5950ee1d77c10e9011d2149699c9366fe52529c"},
+	{1344, "b04263604a63c351b0b3b9cf1785b4bdba6c8838"},
+	{1399, "8cb1cff61d5b784045974a2fc69386e3b8d24218"},
+	{1400, "9bb2f3fcbeddb2b90f0be797cd647334a2816d51"},
+	{1401, "23ae462a7a0cb440f7445791079a5d75a535dd33"},
+	{1402, "832974b524a4d3f9cc2f45a3cabf5ccef65cd2aa"},
+	{1403, "d1c683742fe404c3c20d5704a5430e7832a7ec95"},
+	{1404, "867c79042e64f310628e219d8b85594cd0c7adc3"},
+	{1405, "c9d81d49d13d94358f56ccfd61af02b36c69f7c3"},
+	{1406, "0df43daab2786172f9b8d07d61f14a070cf1287a"},
+	{1407, "0fd8f3ad7f169534b274d4c66bbddd89f759e391"},
+	{1408, "3987511182b18473a564436003139b808fa46343"},
+	{1409, "ef667e063c9e9f539a8987a8d0bd3066ee85d901"},
+	{2048, "921109c99f3fedaca21727156d5f2b4460175327"},
+	{2049, "47188600dd165eb45f27c27196d3c46f4f042c1b"},
+	{2050, "8831939904009338de10e7fa670847041387807d"},
+	{2051, "2f8ebb5db2997d614e767be1050366f3641e7520"},
+	{2052, "669e51cd730dae158d3bef8adba075bd95a0d011"},
+	{3072, "cfee66cfd83abc8451af3c96c6b35a41cc6c55f5"},
+	{3074, "216ea26f02976a261b7d21a4dd3085157bedfabd"},
+	{3075, "bd612ebba021fd8e012b14c3bd60c8c5161fabc0"},
+	{4048, "c2564c1fdf2d5e9d7dde7aace2643428e90662e8"},
+	{4052, "91ce61fe924b445dfe7b5a1dcd10a27caec16df6"},
+	{4058, "db2a9be5ee8124f091c7ebd699266c5de223c164"},
+	{6144, "855109903feae2ba3a7a05a326b8a171116eb368"},
+	{6150, "37520bb3a668294d9c7b073e7e3daf8fee248a78"},
+	{6400, "60a353c841b6d2b1a05890349dad2fa33c7536b7"},
+	{6528, "9e53a43a69bb42d7c8522ca8bd632e421d5edb36"},
+	{8192, "a918cb0da862eaea0a33ee0efea50243e6b4927c"},
+	{8320, "29a5dcf55d1db29cd113fcf0572ae414f1c71329"},
+	{16384, "6fb27966138e0c8d5a0d65ace817ebd53633cee1"},
+	{18432, "ca09900d891c7c9ae2a559b10f63a217003341c1"},
+	{0, NULL},
+};
+
+/* Key         = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ */
+static MV_CESA_SIZE_TEST sha1MultiSizeTest404[] = {
+	{80, "beaf20a34b06a87558d156c0949bc3957d40222e"},
+	{512, "3353955358d886bc2940a3c7f337ff7dafb59c7b"},
+	{1000, "8737a542c5e9b2b6244b757ebb69d5bd602a829f"},
+	{1001, "fd9e7582d8a5d3c9fe3b923e4e6a41b07a1eb4d4"},
+	{1002, "a146d14a6fc3c274ff600568f4d75b977989e00d"},
+	{1003, "be22601bbc027ddef2dec97d30b3dc424fd803c5"},
+	{1004, "3e71fe99b2fe2b7bfdf4dbf0c7f3da25d7ea35e7"},
+	{1005, "2c422735d7295408fddd76f5e8a83a2a8da13df3"},
+	{1006, "6d875319049314b61855101a647b9ba3313428e6"},
+	{1336, "c1631ea80bad9dc43a180712461b65a0598c711c"},
+	{1344, "816069bf91d34581005746e2e0283d0f9c7b7605"},
+	{1399, "4e139866dc61cfcb8b67ca2ebd637b3a538593af"},
+	{1400, "ff2a0f8dd2b02c5417910f6f55d33a78e081a723"},
+	{1401, "ab00c12be62336964cbce31ae97fe2a0002984d5"},
+	{1402, "61349e7f999f3a1acc56c3e9a5060a9c4a7b05b6"},
+	{1403, "3edbc0f61e435bc1317fa27d840076093fb79353"},
+	{1404, "d052c6dfdbe63d45dab23ef9893e2aa4636aca1e"},
+	{1405, "0cc16b7388d67bf0add15a31e6e6c753cfae4987"},
+	{1406, "c96ba7eaad74253c38c22101b558d2850b1d1b90"},
+	{1407, "3445428a40d2c6556e7c55797ad8d323b61a48d9"},
+	{1408, "8d6444f937a09317c89834187b8ea9b8d3a8c56b"},
+	{1409, "c700acd3ecd19014ea2bdb4d42510c467e088475"},
+	{2048, "ee27d2a0cb77470c2f496212dfd68b5bb7b04e4b"},
+	{2049, "683762d7a02983b26a6d046e6451d9cd82c25932"},
+	{2050, "0fd20f1d55a9ee18363c2a6fd54aa13aee69992f"},
+	{2051, "86c267d8cc4bc8d59090e4f8b303da960fd228b7"},
+	{2052, "452395ae05b3ec503eea34f86fc0832485ad97c1"},
+	{3072, "75198e3cfd0b9bcff2dabdf8e38e6fdaa33ca49a"},
+	{3074, "4e24785ef080141ce4aab4675986d9acea624d7c"},
+	{3075, "3a20c5978dd637ec0e809bf84f0d9ccf30bc65bf"},
+	{4048, "3c32da256be7a7554922bf5fed51b0d2d09e59ad"},
+	{4052, "fff898426ea16e54325ae391a32c6c9bce4c23c0"},
+	{4058, "c800b9e562e1c91e1310116341a3c91d37f848ec"},
+	{6144, "d91d509d0cc4376c2d05bf9a5097717a373530e6"},
+	{6150, "d957030e0f13c5df07d9eec298542d8f94a07f12"},
+	{6400, "bb745313c3d7dc17b3f955e5534ad500a1082613"},
+	{6528, "77905f80d9ca82080bbb3e5654896dabfcfd1bdb"},
+	{8192, "5237fd9a81830c974396f99f32047586612ff3c0"},
+	{8320, "57668e28d5f2dba0839518a11db0f6af3d7e08bf"},
+	{16384, "62e093fde467f0748087beea32e9af97d5c61241"},
+	{18432, "845fb33130c7d6ea554fd5aacb9c50cf7ccb5929"},
+	{0, NULL},
+};
+
+/* HASH-SHA1
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ *               repeated "size" times
+ */
+static MV_CESA_SIZE_TEST sha1MultiSizeTest405[] = {
+	{80, "50abf5706a150990a08b2c5ea40fa0e585554732"},
+	{512, "f14516a08948fa27917a974d219741a697ba0087"},
+	{1000, "0bd18c378d5788817eb4f1e5dc07d867efa5cbf4"},
+	{1001, "ca29b85c35db1b8aef83c977893a11159d1b7aa2"},
+	{1002, "d83bc973eaaedb8a31437994dabbb3304b0be086"},
+	{1003, "2cf7bbef0acd6c00536b5c58ca470df9a3a90b6c"},
+	{1004, "e4375d09b1223385a8a393066f8209acfd936a80"},
+	{1005, "1029b38043e027745d019ce1d2d68e3d8b9d8f99"},
+	{1006, "deea16dcebbd8ac137e2b984deb639b9fb5e9680"},
+	{1336, "ea031b065fff63dcfb6a41956e4777520cdbc55d"},
+	{1344, "b52096c6445e6c0a8355995c70dc36ae186c863c"},
+	{1399, "cde2f6f8379870db4b32cf17471dc828a8dbff2b"},
+	{1400, "e53ff664064bc09fe5054c650806bd42d8179518"},
+	{1401, "d1156db5ddafcace64cdb510ff0d4af9b9a8ad64"},
+	{1402, "34ede0e9a909dd84a2ae291539105c0507b958e1"},
+	{1403, "a772ca3536da77e6ad3251e4f9e1234a4d7b87c0"},
+	{1404, "29740fd2b04e7a8bfd32242db6233156ad699948"},
+	{1405, "65b17397495b70ce4865dad93bf991b74c97cce1"},
+	{1406, "a7ee89cd0754061fdb91af7ea6abad2c69d542e3"},
+	{1407, "3eebf82f7420188e23d328b7ce93580b279a5715"},
+	{1408, "e08d3363a8b9a490dfb3a4c453452b8f114deeec"},
+	{1409, "95d74df739181a4ff30b8c39e28793a36598e924"},
+	{2048, "aa40262509c2abf84aab0197f83187fc90056d91"},
+	{2049, "7dec28ef105bc313bade8d9a7cdeac58b99de5ea"},
+	{2050, "d2e30f77ec81197de20f56588a156094ecb88450"},
+	{2051, "6b22ccc874833e96551a39da0c0edcaa0d969d92"},
+	{2052, "f843141e57875cd669af58744bc60aa9ea59549c"},
+	{3072, "09c5fedeaa62c132e673cc3c608a00142273d086"},
+	{3074, "b09e95eea9c7b1b007a58accec488301901a7f3d"},
+	{3075, "e6226b77b4ada287a8c9bbcf4ed71eec5ce632dc"},
+	{4048, "e99394894f855821951ddddf5bfc628547435f5c"},
+	{4052, "32d2f1af38be9cfba6cd03d55a254d0b3e1eb382"},
+	{4058, "d906552a4f2aca3a22e1fecccbcd183d7289d0ef"},
+	{6144, "2e7f62d35a860988e1224dc0543204af19316041"},
+	{6150, "d6b89698ee133df46fec9d552fadc328aa5a1b51"},
+	{6400, "dff50e90c46853988fa3a4b4ce5dda6945aae976"},
+	{6528, "9e63ec0430b96db02d38bc78357a2f63de2ab7f8"},
+	{8192, "971eb71ed60394d5ab5abb12e88420bdd41b5992"},
+	{8320, "91606a31b46afeaac965cecf87297e791b211013"},
+	{16384, "547f830a5ec1f5f170ce818f156b1002cabc7569"},
+	{18432, "f16f272787f3b8d539652e4dc315af6ab4fda0ef"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST tripleDesMdMultiSizeTest602[] = {
+	{64, "9586962a2aaaef28803dec2e17807a7f"},
+	{80, "b7726a03aad490bd6c5a452a89a1b271"},
+	{352, "f1ed9563aecc3c0d2766eb2bed3b4e4c"},
+	{512, "0f9decb11ab40fe86f4d4d9397bc020e"},
+	{1000, "3ba69deac12cab8ff9dff7dbd9669927"},
+	{1336, "6cf47bf1e80e03e2c1d0945bc50d37d2"},
+	{1344, "4be388dab21ceb3fa1b8d302e9b821f7"},
+	{1400, "a58b79fb21dd9bfc6ec93e3b99fb0ef1"},
+	{1408, "8bc97379fc2ac3237effcdd4f7a86528"},
+	{2048, "1339f03ab3076f25a20bc4cba16eb5bf"},
+	{3072, "731204d2d90c4b36ae41f5e1fb874288"},
+	{4048, "c028d998cfda5642547b7e1ed5ea16e4"},
+	{6144, "b1b19cd910cc51bd22992f1e59f1e068"},
+	{6400, "44e4613496ba622deb0e7cb768135a2f"},
+	{6528, "3b06b0a86f8db9cd67f9448dfcf10549"},
+	{8192, "d581780b7163138a0f412be681457d82"},
+	{16384, "03b8ac05527faaf1bed03df149c65ccf"},
+	{18432, "677c8a86a41dab6c5d81b85b8fb10ff6"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST tripleDesShaMultiSizeTest603[] = {
+	{64, "44a1e9bcbfc1429630d9ea68b7a48b0427a684f2"},
+	{80, "b2ddeaca91030eab5b95a234ef2c0f6e738ff883"},
+	{352, "4b91864c7ff629bdff75d9726421f76705452aaf"},
+	{512, "6dd37faceeb2aa98ba74f4242ed6734a4d546af5"},
+	{1000, "463661c30300be512a9df40904f0757cde5f1141"},
+	{1336, "b931f831d9034fe59c65176400b039fe9c1f44a5"},
+	{1344, "af8866b1cd4a4887d6185bfe72470ffdfb3648e1"},
+	{1400, "49c6caf07296d5e31d2504d088bc5b20c3ee7cdb"},
+	{1408, "fcae8deedbc6ebf0763575dc7e9de075b448a0f4"},
+	{2048, "edece5012146c1faa0dd10f50b183ba5d2af58ac"},
+	{3072, "5b83625adb43a488b8d64fecf39bb766818547b7"},
+	{4048, "d2c533678d26c970293af60f14c8279dc708bfc9"},
+	{6144, "b8f67af4f991b08b725f969b049ebf813bfacc5c"},
+	{6400, "d9a6c7f746ac7a60ef2edbed2841cf851c25cfb0"},
+	{6528, "376792b8c8d18161d15579fb7829e6e3a27e9946"},
+	{8192, "d890eabdca195b34ef8724b28360cffa92ae5655"},
+	{16384, "a167ee52639ec7bf19aee9c6e8f76667c14134b9"},
+	{18432, "e4396ab56f67296b220985a12078f4a0e365d2cc"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbc3desMdMultiSizeTest604[] = {
+	{64, "8d10e00802460ede0058c139ba48bd2d"},
+	{80, "6f463057e1a90e0e91ae505b527bcec0"},
+	{352, "4938d48bdf86aece2c6851e7c6079788"},
+	{512, "516705d59f3cf810ebf2a13a23a7d42e"},
+	{1000, "a5a000ee5c830e67ddc6a2d2e5644b31"},
+	{1336, "44af60087b74ed07950088efbe3b126a"},
+	{1344, "1f5b39e0577920af731dabbfcf6dfc2a"},
+	{1400, "6804ea640e29b9cd39e08bc37dbce734"},
+	{1408, "4fb436624b02516fc9d1535466574bf9"},
+	{2048, "c909b0985c423d8d86719f701e9e83db"},
+	{3072, "cfe0bc34ef97213ee3d3f8b10122db21"},
+	{4048, "03ea10b5ae4ddeb20aed6af373082ed1"},
+	{6144, "b9a0ff4f87fc14b3c2dc6f0ed0998fdf"},
+	{6400, "6995f85d9d4985dd99e974ec7dda9dd6"},
+	{6528, "bbbb548ce2fa3d58467f6a6a5168a0e6"},
+	{8192, "afe101fbe745bb449ae4f50d10801456"},
+	{16384, "9741706d0b1c923340c4660ff97cacdf"},
+	{18432, "b0217becb73cb8f61fd79c7ce9d023fb"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to 3DES block size (8 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbc3desShaMultiSizeTest605[] = {
+	{64, "409187e5bdb0be4a7754ca3747f7433dc4f01b98"},
+	{80, "1b002ed050be743aa98860cf35659646bb8efcc0"},
+	{352, "6cbf7ebe50fa4fa6eecc19eca23f9eae553ccfff"},
+	{512, "cfb5253fb4bf72b743320c30c7e48c54965853b0"},
+	{1000, "95e04e1ca2937e7c5a9aba9e42d2bcdb8a7af21f"},
+	{1336, "3b5c1f5eee5837ebf67b83ae01405542d77a6627"},
+	{1344, "2b3d42ab25615437f98a1ee310b81d07a02badc2"},
+	{1400, "7f8687df7c1af44e4baf3c934b6cca5ab6bc993e"},
+	{1408, "473a581c5f04f7527d50793c845471ac87e86430"},
+	{2048, "e41d20cae7ebe34e6e828ed62b1e5734019037bb"},
+	{3072, "275664afd7a561d804e6b0d204e53939cde653ae"},
+	{4048, "0d220cc5b34aeeb46bbbd637dde6290b5a8285a3"},
+	{6144, "cb393ddcc8b1c206060625b7d822ef9839e67bc5"},
+	{6400, "dd3317e2a627fc04800f74a4b05bfda00fab0347"},
+	{6528, "8a74c3b2441ab3f5a7e08895cc432566219a7c41"},
+	{8192, "b8e6ef3a549ed0e005bd5b8b1a5fe6689e9711a7"},
+	{16384, "55f59404008276cdac0e2ba0d193af2d40eac5ce"},
+	{18432, "86ae6c4fc72369a54cce39938e2d0296cd9c6ec5"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbcAes128md5multiSizeTest606[] = {
+	{16, "7ca4c2ba866751598720c5c4aa0d6786"},
+	{64, "7dba7fb988e80da609b1fea7254bced8"},
+	{80, "6b6e863ac5a71d15e3e9b1c86c9ba05f"},
+	{352, "a1ceb9c2e3021002400d525187a9f38c"},
+	{512, "596c055c1c55db748379223164075641"},
+	{1008, "f920989c02f3b3603f53c99d89492377"},
+	{1344, "2e496b73759d77ed32ea222dbd2e7b41"},
+	{1408, "7178c046b3a8d772efdb6a71c4991ea4"},
+	{2048, "a917f0099c69eb94079a8421714b6aad"},
+	{3072, "693cd5033d7f5391d3c958519fa9e934"},
+	{4048, "139dca91bcff65b3c40771749052906b"},
+	{6144, "428d9cef6df4fb70a6e9b6bbe4819e55"},
+	{6400, "9c0b909e76daa811e12b1fc17000a0c4"},
+	{6528, "ad876f6297186a7be1f1b907ed860eda"},
+	{8192, "479cbbaca37dd3191ea1f3e8134a0ef4"},
+	{16384, "60fda559c74f91df538100c9842f2f15"},
+	{18432, "4a3eb1cba1fa45f3981270953f720c42"},
+	{0, NULL},
+};
+
+/* CryptoKey   = 0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef,
+ *               0x01234567, 0x89abcdef;
+ * IV          = 0x12345678, 0x90abcdef
+ * MacKey      = 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ *               0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10
+ *               0x11, 0x12, 0x13, 0x14
+ * InputHexStr = "31323334353637383930" (ASCII = "1234567890")
+ * Note: only sizes aligned to AES block size (16 bytes) allowed
+ */
+static MV_CESA_SIZE_TEST cbcAes128sha1multiSizeTest607[] = {
+	{16, "9aa8dc1c45f0946daf78057fa978759c625c1fee"},
+	{64, "9f588fc1ede851e5f8b20256abc9979465ae2189"},
+	{80, "13558472d1fc1c90dffec6e5136c7203452d509b"},
+	{352, "6b93518e006cfaa1f7adb24615e7291fb0a27e06"},
+	{512, "096874951a77fbbf333e49d80c096ee2016e09bd"},
+	{1008, "696fc203c2e4b5ae0ec5d1db3f623c490bc6dbac"},
+	{1344, "79bf77509935ccd3528caaac6a5eb6481f74029b"},
+	{1408, "627f9462b95fc188e8cfa7eec15119bdc5d4fcf1"},
+	{2048, "3d50d0c005feba92fe41502d609fced9c882b4d1"},
+	{3072, "758807e5b983e3a91c06fb218fe0f73f77111e94"},
+	{4048, "ca90e85242e33f005da3504416a52098d0d31fb2"},
+	{6144, "8044c1d4fd06642dfc46990b4f18b61ef1e972cf"},
+	{6400, "166f1f4ea57409f04feba9fb1e39af0e00bd6f43"},
+	{6528, "0389016a39485d6e330f8b4215ddf718b404f7e9"},
+	{8192, "6df7ee2a8b61d6f7f860ce8dbf778f0c2a5b508b"},
+	{16384, "a70a6d8dfa1f91ded621c3dbaed34162bc48783f"},
+	{18432, "8dfad627922ce15df1eed10bdbed49244efa57db"},
+	{0, NULL},
+};
+
+void cesaTestPrintStatus(void);
+
+/*------------------------- LOCAL FUNCTIONs ---------------------------------*/
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND *pCmd,
+		  MV_CESA_TEST_SESSION *pTestSession, MV_U8 *pIV, int ivSize);
+MV_STATUS testClose(int idx);
+MV_STATUS testOpen(int idx);
+void close_session(int sid);
+void cesaTestCheckReady(const MV_CESA_RESULT *r);
+void cesaCheckReady(MV_CESA_RESULT *r);
+void printTestResults(int idx, MV_STATUS status, int checkMode);
+void cesaLastResult(void);
+void cesaTestPrintReq(int req, int offset, int size);
+
+void cesaTestPrintStatus(void);
+void cesaTestPrintSession(int idx);
+void sizeTest(int testIdx, int iter, int checkMode);
+void multiTest(int iter, int reqSize, int checkMode);
+void oneTest(int testIdx, int caseIdx, int iter, int reqSize, int checkMode);
+void multiSizeTest(int idx, int iter, int checkMode, char *inputData);
+void cesaTest(int iter, int reqSize, int checkMode);
+void cesaOneTest(int testIdx, int caseIdx, int iter, int reqSize, int checkMode);
+void combiTest(int iter, int reqSize, int checkMode);
+void sha1Test(int iter, int reqSize, int checkMode);
+void sha2Test(int iter, int reqSize, int checkMode);
+void mdTest(int iter, int reqSize, int checkMode);
+void aesTest(int iter, int reqSize, int checkMode);
+void tripleDesTest(int iter, int reqSize, int checkMode);
+void desTest(int iter, int reqSize, int checkMode);
+void cesaTestStop(void);
+MV_STATUS testRun(int idx, int caseIdx, int iter, int reqSize, int checkMode);
+void cesaTestStart(int bufNum, int bufSize);
+
+static MV_U32 getRate(MV_U32 *remainder)
+{
+	MV_U32 rate;
+#ifdef MV_LINUX
+	rate = ((cesaIteration * cesaRateSize * 8) / (cesaEndTicks - cesaBeginTicks));
+	*remainder = 0;
+#else
+	MV_U32 kBits, milliSec;
+
+	milliSec = 0;
+	if ((cesaEndTicks - cesaBeginTicks) > 0)
+		milliSec = CESA_TEST_TICK_TO_MS(cesaEndTicks - cesaBeginTicks);
+
+	if (milliSec == 0) {
+		if (remainder != NULL)
+			*remainder = 0;
+		return 0;
+	}
+
+	kBits = (cesaIteration * cesaRateSize * 8) / 1000;
+	rate = kBits / milliSec;
+	if (remainder != NULL)
+		*remainder = ((kBits % milliSec) * 10) / milliSec;
+#endif
+	return rate;
+}
+
+static char *extractMbuf(MV_CESA_MBUF *pMbuf, int offset, int size, char *hexStr)
+{
+	mvCesaCopyFromMbuf((MV_U8 *)cesaBinBuffer, pMbuf, offset, size);
+	mv_bin_to_hex((const MV_U8 *)cesaBinBuffer, hexStr, size);
+
+	return hexStr;
+}
+
+static MV_BOOL cesaCheckMbuf(MV_CESA_MBUF *pMbuf, const char *hexString, int offset, int checkSize)
+{
+	MV_BOOL isFailed = MV_FALSE;
+	MV_STATUS status;
+	int size = strlen(hexString) / 2;
+	int checkedSize = 0;
+
+/*
+	mvOsPrintf("cesaCheckMbuf: pMbuf=%p, offset=%d, checkSize=%d, mBufSize=%d\n",
+			pMbuf, offset, checkSize, pMbuf->mbufSize);
+*/
+	if (pMbuf->mbufSize < (checkSize + offset)) {
+		mvOsPrintf("checkSize (%d) is too large: offset=%d, mbufSize=%d\n", checkSize, offset, pMbuf->mbufSize);
+		return MV_TRUE;
+	}
+	status = mvCesaCopyFromMbuf((MV_U8 *)cesaBinBuffer, pMbuf, offset, checkSize);
+	if (status != MV_OK) {
+		mvOsPrintf("CesaTest: Can't copy %d bytes from Mbuf=%p to checkBuf=%p\n",
+			   checkSize, pMbuf, cesaBinBuffer);
+		return MV_TRUE;
+	}
+/*
+    mv_debug_mem_dump(cesaBinBuffer, size, 1);
+*/
+	mv_hex_to_bin(hexString, (MV_U8 *)cesaExpBinBuffer, size);
+
+	/* Compare buffers */
+	while (checkSize > checkedSize) {
+		size = MV_MIN(size, (checkSize - checkedSize));
+		if (memcmp(cesaExpBinBuffer, &cesaBinBuffer[checkedSize], size) != 0) {
+			mvOsPrintf("CheckMbuf failed: checkSize=%d, size=%d, checkedSize=%d\n",
+				   checkSize, size, checkedSize);
+			mv_debug_mem_dump(&cesaBinBuffer[checkedSize], size, 1);
+			mv_debug_mem_dump(cesaExpBinBuffer, size, 1);
+
+			isFailed = MV_TRUE;
+			break;
+		}
+		checkedSize += size;
+	}
+
+	return isFailed;
+}
+
+static MV_STATUS cesaSetMbuf(MV_CESA_MBUF *pMbuf, const char *hexString, int offset, int reqSize)
+{
+	MV_STATUS status = MV_OK;
+	int copySize, size = strlen(hexString) / 2;
+
+	mv_hex_to_bin(hexString, (MV_U8 *)cesaBinBuffer, size);
+
+	copySize = 0;
+	while (reqSize > copySize) {
+		size = MV_MIN(size, (reqSize - copySize));
+
+		status = mvCesaCopyToMbuf((MV_U8 *)cesaBinBuffer, pMbuf, offset + copySize, size);
+		if (status != MV_OK) {
+			mvOsPrintf("cesaSetMbuf Error: Copy %d of %d bytes to MBuf\n", copySize, reqSize);
+			break;
+		}
+		copySize += size;
+	}
+	pMbuf->mbufSize = offset + copySize;
+	return status;
+}
+
+static MV_CESA_TEST_SESSION *getTestSessionDb(int idx, int *pTestIdx)
+{
+	int testIdx, dbIdx = idx / 100;
+
+	if (dbIdx > MAX_TEST_TYPE) {
+		mvOsPrintf("Wrong index %d - No such test type\n", idx);
+		return NULL;
+	}
+	testIdx = idx % 100;
+
+	if (testIdx >= cesaTestsDB[dbIdx].numSessions) {
+		mvOsPrintf("Wrong index %d - No such test\n", idx);
+		return NULL;
+	}
+	if (pTestIdx != NULL)
+		*pTestIdx = testIdx;
+
+	return cesaTestsDB[dbIdx].pSessions;
+}
+
+/* Debug */
+void cesaTestPrintReq(int req, int offset, int size)
+{
+	MV_CESA_MBUF *pMbuf;
+
+	mvOsPrintf("cesaTestPrintReq: req=%d, offset=%d, size=%d\n", req, offset, size);
+	mv_debug_mem_dump(cesaCmdRing, 128, 4);
+
+	pMbuf = cesaCmdRing[req].pSrc;
+	mvCesaIfDebugMbuf("src", pMbuf, offset, size);
+	pMbuf = cesaCmdRing[req].pDst;
+	mvCesaIfDebugMbuf("dst", pMbuf, offset, size);
+
+	cesaTestPrintStatus();
+}
+
+void cesaLastResult(void)
+{
+	mvOsPrintf("Last Result: ReqId = %d, SessionId = %d, rc = (%d)\n",
+		   (MV_U32) cesaResult.pReqPrv, cesaResult.sessionId, cesaResult.retCode);
+}
+
+void printTestResults(int idx, MV_STATUS status, int checkMode)
+{
+	int testIdx;
+	MV_CESA_TEST_SESSION *pTestSessions = getTestSessionDb(idx, &testIdx);
+
+	if (pTestSessions == NULL)
+		return;
+
+	mvOsPrintf("%-35s %4dx%-4d : ", pTestSessions[testIdx].name, cesaIteration, cesaReqSize);
+	if ((status == MV_OK) && (cesaCryptoError == 0) && (cesaError == 0) && (cesaReqIdError == 0)) {
+		mvOsPrintf("Passed, Rate=%3u.%u Mbps (%5u cpp)\n",
+			   cesaRate, cesaRateAfterDot, cesaEndTicks - cesaBeginTicks);
+	} else {
+		mvOsPrintf("Failed, Status = 0x%x\n", status);
+		if (cesaCryptoError > 0)
+			mvOsPrintf("cryptoError : %d\n", cesaCryptoError);
+		if (cesaReqIdError > 0)
+			mvOsPrintf("reqIdError  : %d\n", cesaReqIdError);
+		if (cesaError > 0)
+			mvOsPrintf("cesaError  : %d\n", cesaError);
+	}
+	if (cesaTestIsrMissCount > 0)
+		mvOsPrintf("cesaIsrMissed  : %d\n", cesaTestIsrMissCount);
+}
+
+void cesaCheckReady(MV_CESA_RESULT *r)
+{
+	int reqId;
+	MV_CESA_MBUF *pMbuf;
+	MV_BOOL isFailed;
+
+	reqId = (int)r->pReqPrv;
+	cesaResult = *r;
+	pMbuf = cesaCmdRing[reqId].pDst;
+
+/*
+	mvOsPrintf("cesaCheckReady: reqId=%d, checkOffset=%d, checkSize=%d\n",
+			reqId, cesaCheckOffset, cesaCheckSize);
+*/
+
+	/* Check expected reqId */
+	if (reqId != cesaExpReqId) {
+		cesaReqIdError++;
+	mvOsPrintf("CESA reqId Error: cbIter=%d (%d), reqId=%d, expReqId=%d\n",
+				cesaCbIter, cesaIteration, reqId, cesaExpReqId);
+
+	} else {
+
+		if ((cesaCheckMode == CESA_FULL_CHECK_MODE) || (cesaCheckMode == CESA_FAST_CHECK_MODE)) {
+			if (r->retCode != MV_OK) {
+				cesaError++;
+
+				mvOsPrintf("CESA Error: cbIter=%d (%d), reqId=%d, rc=%d\n",
+					   cesaCbIter, cesaIteration, reqId, r->retCode);
+			} else {
+
+				if ((cesaCheckSize > 0) && (cesaOutputHexStr != NULL)) {
+					/* Check expected output */
+
+					isFailed =
+					    cesaCheckMbuf(pMbuf, cesaOutputHexStr, cesaCheckOffset, cesaCheckSize);
+
+					if (isFailed) {
+						mvOsPrintf("CESA Crypto Error: cbIter=%d (%d), reqId=%d\n",
+							   cesaCbIter, cesaIteration, reqId);
+
+						CESA_TEST_DEBUG_PRINT(("Error: reqId=%d, reqSize=%d, checkOffset=%d, checkSize=%d\n",
+											   reqId, cesaReqSize, cesaCheckOffset, cesaCheckSize));
+
+						CESA_TEST_DEBUG_PRINT(("Output str: %s\n", cesaOutputHexStr));
+
+						CESA_TEST_DEBUG_CODE(mvCesaIfDebugMbuf
+								     ("error", pMbuf, 0,
+								      cesaCheckOffset + cesaCheckSize));
+
+						cesaCryptoError++;
+					}
+				}
+			}
+		}
+	}
+	if (cesaCheckMode == CESA_SHOW_CHECK_MODE) {
+		extractMbuf(pMbuf, cesaCheckOffset, cesaCheckSize, cesaHexBuffer);
+		mvOsPrintf("%4d, %s\n", cesaCheckOffset, cesaHexBuffer);
+	}
+
+	cesaCbIter++;
+
+	if (cesaCbIter >= cesaIteration) {
+		cesaCbIter = 0;
+		cesaExpReqId = 0;
+		cesaIsReady = MV_TRUE;
+#ifdef MV_LINUX
+		do_gettimeofday(&tv);
+		cesaEndTicks = ((tv.tv_sec * 1000000) + tv.tv_usec);
+#else
+		cesaEndTicks = CESA_TEST_TICK_GET();
+#endif
+		cesaRate = getRate(&cesaRateAfterDot);
+	} else {
+		cesaExpReqId = reqId + 1;
+		if (cesaExpReqId == CESA_TEST_REQ_SIZE)
+			cesaExpReqId = 0;
+	}
+}
+
+#ifdef MV_NETBSD
+static int cesaTestReadyIsr(void *arg)
+#else
+#ifdef __KERNEL__
+static irqreturn_t cesaTestReadyIsr(int irq, void *dev_id)
+#endif
+#ifdef MV_VXWORKS
+void cesaTestReadyIsr(void)
+#endif
+#endif
+{
+	MV_U32 cause, mask, ready = 0;
+	MV_STATUS status;
+	MV_CESA_RESULT result;
+	MV_U8 chan = *((MV_U8 *)dev_id);
+
+	if (mv_cesa_feature == INT_COALESCING)
+		mask = MV_CESA_CAUSE_EOP_COAL_MASK;
+	else
+		mask = MV_CESA_CAUSE_ACC_DMA_MASK;
+
+	spin_lock(&cesaLock);
+
+	/* Read cause register */
+	cause = MV_REG_READ(MV_CESA_ISR_CAUSE_REG(chan));
+
+	if ((cause & mask) != 0) {
+
+		/* Clear pending irq */
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), ~cause);
+
+		cesaTestIsrCount[chan]++;
+
+		/* Get Ready requests */
+		while (1) {
+			status = mvCesaIfReadyGet(chan, &result);
+			if (status != MV_OK)
+				break;
+
+			ready++;
+			cesaCheckReady(&result);
+		}
+
+		if ((cesaTestFull == 1) && (ready >= 1)) {
+			cesaTestFull = 0;
+			CESA_TEST_WAKE_UP();
+		}
+	}
+
+	spin_unlock(&cesaLock);
+
+
+#ifdef MV_NETBSD
+		return 0;
+#else
+#ifdef __KERNEL__
+		return IRQ_HANDLED;
+#else
+		return;
+#endif
+#endif
+
+}
+
+void cesaTestCheckReady(const MV_CESA_RESULT *r)
+{
+	MV_CESA_RESULT result = *r;
+
+	cesaCheckReady(&result);
+
+	if (cesaTestFull == 1) {
+		cesaTestFull = 0;
+		CESA_TEST_WAKE_UP();
+	}
+}
+
+static INLINE int open_session(MV_CESA_OPEN_SESSION *pOs)
+{
+	MV_U16 sid;
+	MV_STATUS status;
+
+	status = mvCesaIfSessionOpen(pOs, (short *)&sid);
+	if (status != MV_OK) {
+		mvOsPrintf("CesaTest: Can't open new session - status = 0x%x\n", status);
+		return -1;
+	}
+
+	return (int)sid;
+}
+
+void close_session(int sid)
+{
+	MV_STATUS status;
+
+	status = mvCesaIfSessionClose(sid);
+	if (status != MV_OK)
+		mvOsPrintf("CesaTest: Can't close session %d - status = 0x%x\n", sid, status);
+
+}
+
+MV_STATUS testOpen(int idx)
+{
+	MV_CESA_OPEN_SESSION os;
+	int sid, i, testIdx;
+	MV_CESA_TEST_SESSION *pTestSession;
+	MV_U16 digestSize = 0;
+
+	pTestSession = getTestSessionDb(idx, &testIdx);
+	if (pTestSession == NULL) {
+		mvOsPrintf("Test %d is not exist\n", idx);
+		return MV_BAD_PARAM;
+	}
+	pTestSession = &pTestSession[testIdx];
+
+	if (pTestSession->sid != -1) {
+		mvOsPrintf("Session for test %d already created: sid=%d\n", idx, pTestSession->sid);
+		return MV_OK;
+	}
+
+	os.cryptoAlgorithm = pTestSession->cryptoAlgorithm;
+	os.macMode = pTestSession->macAlgorithm;
+	switch (os.macMode) {
+	case MV_CESA_MAC_MD5:
+	case MV_CESA_MAC_HMAC_MD5:
+		digestSize = MV_CESA_MD5_DIGEST_SIZE;
+		break;
+
+	case MV_CESA_MAC_SHA1:
+	case MV_CESA_MAC_HMAC_SHA1:
+		digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+		break;
+
+	case MV_CESA_MAC_SHA2:
+	case MV_CESA_MAC_HMAC_SHA2:
+		digestSize = MV_CESA_SHA2_DIGEST_SIZE;
+		break;
+
+	case MV_CESA_MAC_NULL:
+		digestSize = 0;
+	}
+	os.cryptoMode = pTestSession->cryptoMode;
+	os.direction = pTestSession->direction;
+	os.operation = pTestSession->operation;
+
+	for (i = 0; i < pTestSession->cryptoKeySize; i++)
+		os.cryptoKey[i] = pTestSession->pCryptoKey[i];
+
+	os.cryptoKeyLength = pTestSession->cryptoKeySize;
+
+	for (i = 0; i < pTestSession->macKeySize; i++)
+		os.macKey[i] = pTestSession->pMacKey[i];
+
+	os.macKeyLength = pTestSession->macKeySize;
+	os.digestSize = digestSize;
+
+	sid = open_session(&os);
+	if (sid == -1) {
+		mvOsPrintf("Can't open session for test %d: rc=0x%x\n", idx, cesaResult.retCode);
+		return cesaResult.retCode;
+	}
+	CESA_TEST_DEBUG_PRINT(("Opened session: sid = %d\n", sid));
+	pTestSession->sid = sid;
+	return MV_OK;
+}
+
+MV_STATUS testClose(int idx)
+{
+	int testIdx;
+	MV_CESA_TEST_SESSION *pTestSession;
+
+	pTestSession = getTestSessionDb(idx, &testIdx);
+	if (pTestSession == NULL) {
+		mvOsPrintf("Test %d is not exist\n", idx);
+		return MV_BAD_PARAM;
+	}
+	pTestSession = &pTestSession[testIdx];
+
+	if (pTestSession->sid == -1) {
+		mvOsPrintf("Test session %d is not opened\n", idx);
+		return MV_NO_SUCH;
+	}
+
+	close_session(pTestSession->sid);
+	pTestSession->sid = -1;
+
+	return MV_OK;
+}
+
+MV_STATUS testCmd(int sid, int iter, MV_CESA_COMMAND *pCmd,
+		  MV_CESA_TEST_SESSION *pTestSession, MV_U8 *pIV, int ivSize)
+{
+	int i;
+	MV_STATUS rc = MV_OK;
+	char ivZeroHex[] = "0000";
+	unsigned char chan = 0;
+	static unsigned long flags;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	if (pCmd == NULL) {
+		mvOsPrintf("testCmd failed: pCmd=NULL\n");
+		return MV_BAD_PARAM;
+	}
+	pCmd->sessionId = sid;
+
+	cesaCryptoError = 0;
+	cesaReqIdError = 0;
+	cesaError = 0;
+	cesaTestIsrMissCount = 0;
+	cesaIsReady = MV_FALSE;
+	cesaIteration = iter;
+	cmdReqId = 0;
+
+	if (cesaInputHexStr == NULL)
+		cesaInputHexStr = cesaPlainHexEbc;
+
+	for (i = 0; i < CESA_TEST_REQ_SIZE; i++) {
+		pCmd->pSrc = (MV_CESA_MBUF *) (cesaCmdRing[i].pSrc);
+		if (pIV != NULL) {
+			/* If IV from SA - set IV in Source buffer to zeros */
+			cesaSetMbuf(pCmd->pSrc, ivZeroHex, 0, pCmd->cryptoOffset);
+			cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, pCmd->cryptoOffset,
+				    (cesaReqSize - pCmd->cryptoOffset));
+		} else {
+			cesaSetMbuf(pCmd->pSrc, cesaInputHexStr, 0, cesaReqSize);
+		}
+		pCmd->pDst = (MV_CESA_MBUF *) (cesaCmdRing[i].pDst);
+		cesaSetMbuf(pCmd->pDst, cesaNullPlainHexText, 0, cesaReqSize);
+
+		memcpy(&cesaCmdRing[i], pCmd, sizeof(*pCmd));
+	}
+
+	if (cesaCheckMode == CESA_SW_SHOW_CHECK_MODE) {
+		MV_U8 pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+		if (pTestSession->macAlgorithm == MV_CESA_MAC_MD5) {
+			mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+			mvOsPrintf("SW HASH_MD5: reqSize=%d, macLength=%d\n", cesaReqSize, pCmd->macLength);
+			mv_debug_mem_dump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+			return MV_OK;
+		}
+		if (pTestSession->macAlgorithm == MV_CESA_MAC_SHA1) {
+			mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+			mvOsPrintf("SW HASH_SHA1: reqSize=%d, macLength=%d\n", cesaReqSize, pCmd->macLength);
+			mv_debug_mem_dump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+			return MV_OK;
+		}
+		if (pTestSession->macAlgorithm == MV_CESA_MAC_SHA2) {
+			mvSHA256(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, pDigest);
+			mvOsPrintf("SW HASH_SHA2: reqSize=%d, macLength=%d\n", cesaReqSize, pCmd->macLength);
+			mv_debug_mem_dump(pDigest, MV_CESA_SHA2_DIGEST_SIZE, 1);
+			return MV_OK;
+		}
+	}
+#ifdef MV_LINUX
+	do_gettimeofday(&tv);
+	cesaBeginTicks = ((tv.tv_sec * 1000000) + tv.tv_usec);
+#else
+	cesaBeginTicks = CESA_TEST_TICK_GET();
+#endif
+	CESA_TEST_DEBUG_CODE(memset(cesaTestTrace, 0, sizeof(cesaTestTrace)); cesaTestTraceIdx = 0;);
+
+	if (cesaCheckMode == CESA_SW_NULL_CHECK_MODE) {
+		MV_U8 pDigest[MV_CESA_MAX_DIGEST_SIZE];
+
+		for (i = 0; i < iter; i++) {
+
+			if (pTestSession->macAlgorithm == MV_CESA_MAC_MD5)
+				mvMD5(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (unsigned char *)pDigest);
+
+			if (pTestSession->macAlgorithm == MV_CESA_MAC_SHA1)
+				mvSHA1(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (MV_U8 *) pDigest);
+
+			if (pTestSession->macAlgorithm == MV_CESA_MAC_SHA2)
+				mvSHA256(pCmd->pSrc->pFrags[0].bufVirtPtr, pCmd->macLength, (MV_U8 *) pDigest);
+		}
+#ifdef MV_LINUX
+		do_gettimeofday(&tv);
+		cesaEndTicks = ((tv.tv_sec * 1000000) + tv.tv_usec);
+#else
+		cesaEndTicks = CESA_TEST_TICK_GET();
+#endif
+		cesaRate = getRate(&cesaRateAfterDot);
+		cesaIsReady = MV_TRUE;
+
+		return MV_OK;
+	}
+
+	/*cesaTestIsrCount = 0; */
+	/*mvCesaDebugStatsClear(); */
+
+#ifndef MV_NETBSD
+	for (chan = 0; chan < mv_cesa_channels; chan++)
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+#endif
+
+	for (i = 0; i < iter; i++) {
+
+		CESA_TEST_LOCK(flags);
+
+		pCmd = &cesaCmdRing[cmdReqId];
+		pCmd->pReqPrv = (void *)cmdReqId;
+
+		cmdReqId++;
+		if (cmdReqId >= CESA_TEST_REQ_SIZE)
+			cmdReqId = 0;
+
+		CESA_TEST_UNLOCK(flags);
+
+		pCmd->split = MV_CESA_SPLIT_NONE;
+
+		rc = mvCesaIfAction(pCmd);
+
+		if (rc == MV_NO_RESOURCE) {
+			CESA_TEST_LOCK(flags);
+			cesaTestFull = 1;
+			CESA_TEST_UNLOCK(flags);
+
+			CESA_TEST_WAIT((cesaTestFull == 0), 1000);
+
+			if (cesaTestFull == 1) {
+				mvOsPrintf("CESA Test timeout: i=%d, iter=%d, cesaTestFull=%d\n",
+					   i, iter, cesaTestFull);
+				CESA_TEST_LOCK(flags);
+				cesaTestFull = 0;
+				CESA_TEST_UNLOCK(flags);
+				return MV_TIMEOUT;
+			}
+			rc = mvCesaIfAction(pCmd);
+		}
+
+		if ((rc != MV_OK) && (rc != MV_NO_MORE)) {
+			mvOsPrintf("mvCesaIfAction failed: rc=%d\n", rc);
+			return rc;
+		}
+
+#ifdef MV_LINUX
+		/* Reschedule each 16 requests */
+		if ((i & 0xF) == 0)
+			schedule();
+#endif
+	}
+	return MV_OK;
+}
+
+static int
+cesa_test_probe(struct platform_device *pdev)
+{
+	struct device_node *np;
+	struct clk *clk;
+	int i, j, idx, err, bufNum = buf_num, bufSize = buf_size, irq;
+	MV_CESA_MBUF *pMbufSrc, *pMbufDst;
+	MV_BUF_INFO *pFragsSrc, *pFragsDst;
+	char *pBuf;
+#ifndef MV_NETBSD
+	int numOfSessions;
+	MV_STATUS status;
+#endif
+	MV_U8 chan = 0;
+	MV_U32 mask;
+	const char* irqName[] = {"cesa_test:0", "cesa_test:1"};
+	const char *cesa_m;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "CESA device node not available\n");
+		return -ENOENT;
+	}
+
+	/*
+	 * Check driver mode from dts
+	 */
+	cesa_m = of_get_property(pdev->dev.of_node, "cesa,mode", NULL);
+	if (strncmp(cesa_m, "test", 4) != 0) {
+		dprintk("%s: device operate in %s mode\n", __func__, cesa_m);
+		return -ENODEV;
+	}
+	mv_cesa_mode = CESA_TEST_M;
+
+	j = of_property_count_strings(pdev->dev.of_node, "clock-names");
+	dprintk("%s: Gate %d clocks\n", __func__, (j > 0 ? j : 1));
+	/*
+	 * If property "clock-names" does not exist (j < 0), assume that there
+	 * is only one clock which needs gating (j > 0 ? j : 1)
+	 */
+	for (i = 0; i < (j > 0 ? j : 1); i++) {
+
+		/* Not all platforms can gate the clock, so it is not
+		 * an error if the clock does not exists.
+		 */
+		clk = of_clk_get(pdev->dev.of_node, i);
+		if (!IS_ERR(clk))
+			clk_prepare_enable(clk);
+	}
+
+	err = mv_get_cesa_resources(pdev);
+	if (err != 0)
+		return err;
+
+	cesaCmdRing = mvOsMalloc(sizeof(MV_CESA_COMMAND) * CESA_TEST_REQ_SIZE);
+	if (cesaCmdRing == NULL) {
+		mvOsPrintf("testStart: Can't allocate %d bytes of memory\n",
+			   (int)(sizeof(MV_CESA_COMMAND) * CESA_TEST_REQ_SIZE));
+		return -EINVAL;
+	}
+	memset(cesaCmdRing, 0, sizeof(MV_CESA_COMMAND) * CESA_TEST_REQ_SIZE);
+
+	if (bufNum == 0)
+		bufNum = CESA_DEF_BUF_NUM;
+
+	if (bufSize == 0)
+		bufSize = CESA_DEF_BUF_SIZE;
+
+	cesaBufNum = bufNum;
+	cesaBufSize = bufSize;
+	mvOsPrintf("CESA test started: bufNum = %d, bufSize = %d\n", bufNum, bufSize);
+
+	cesaHexBuffer = mvOsMalloc(2 * bufNum * bufSize);
+	if (cesaHexBuffer == NULL) {
+		mvOsPrintf("testStart: Can't malloc %d bytes for cesaHexBuffer.\n", 2 * bufNum * bufSize);
+		return -EINVAL;
+	}
+	memset(cesaHexBuffer, 0, (2 * bufNum * bufSize));
+
+	cesaBinBuffer = mvOsMalloc(bufNum * bufSize);
+	if (cesaBinBuffer == NULL) {
+		mvOsPrintf("testStart: Can't malloc %d bytes for cesaBinBuffer\n", bufNum * bufSize);
+		return -EINVAL;
+	}
+	memset(cesaBinBuffer, 0, (bufNum * bufSize));
+
+	cesaExpBinBuffer = mvOsMalloc(bufNum * bufSize);
+	if (cesaExpBinBuffer == NULL) {
+		mvOsPrintf("testStart: Can't malloc %d bytes for cesaExpBinBuffer\n", bufNum * bufSize);
+		return -EINVAL;
+	}
+	memset(cesaExpBinBuffer, 0, (bufNum * bufSize));
+
+	CESA_TEST_WAIT_INIT();
+
+	pMbufSrc = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_TEST_REQ_SIZE);
+	pFragsSrc = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_TEST_REQ_SIZE);
+
+	pMbufDst = mvOsMalloc(sizeof(MV_CESA_MBUF) * CESA_TEST_REQ_SIZE);
+	pFragsDst = mvOsMalloc(sizeof(MV_BUF_INFO) * bufNum * CESA_TEST_REQ_SIZE);
+
+	if ((pMbufSrc == NULL) || (pFragsSrc == NULL) || (pMbufDst == NULL) || (pFragsDst == NULL)) {
+		mvOsPrintf("testStart: Can't malloc Src and Dst pMbuf and pFrags structures.\n");
+		/* !!!! Dima cesaTestCleanup(); */
+		return -EINVAL;
+	}
+
+	memset(pMbufSrc, 0, sizeof(MV_CESA_MBUF) * CESA_TEST_REQ_SIZE);
+	memset(pFragsSrc, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_TEST_REQ_SIZE);
+
+	memset(pMbufDst, 0, sizeof(MV_CESA_MBUF) * CESA_TEST_REQ_SIZE);
+	memset(pFragsDst, 0, sizeof(MV_BUF_INFO) * bufNum * CESA_TEST_REQ_SIZE);
+
+	mvOsPrintf("Cesa Test Start: pMbufSrc=%p, pFragsSrc=%p, pMbufDst=%p, pFragsDst=%p\n",
+		   pMbufSrc, pFragsSrc, pMbufDst, pFragsDst);
+
+	idx = 0;
+	for (i = 0; i < CESA_TEST_REQ_SIZE; i++) {
+		pBuf = mvOsIoCachedMalloc(cesaTestOSHandle, bufSize * bufNum * 2,
+					  &cesaReqBufs[i].bufPhysAddr, &cesaReqBufs[i].memHandle);
+		if (pBuf == NULL) {
+			mvOsPrintf("testStart: Can't malloc %d bytes for pBuf\n", bufSize * bufNum * 2);
+			return -EINVAL;
+		}
+
+		memset(pBuf, 0, bufSize * bufNum * 2);
+		mvOsCacheFlush(cesaTestOSHandle, pBuf, bufSize * bufNum * 2);
+		if (pBuf == NULL) {
+			mvOsPrintf("cesaTestStart: Can't allocate %d bytes for req_%d buffers\n",
+				   bufSize * bufNum * 2, i);
+			return -EINVAL;
+		}
+
+		cesaReqBufs[i].bufVirtPtr = (MV_U8 *) pBuf;
+		cesaReqBufs[i].bufSize = bufSize * bufNum * 2;
+
+		cesaCmdRing[i].pSrc = &pMbufSrc[i];
+		cesaCmdRing[i].pSrc->pFrags = &pFragsSrc[idx];
+		cesaCmdRing[i].pSrc->numFrags = bufNum;
+		cesaCmdRing[i].pSrc->mbufSize = 0;
+
+		cesaCmdRing[i].pDst = &pMbufDst[i];
+		cesaCmdRing[i].pDst->pFrags = &pFragsDst[idx];
+		cesaCmdRing[i].pDst->numFrags = bufNum;
+		cesaCmdRing[i].pDst->mbufSize = 0;
+
+		for (j = 0; j < bufNum; j++) {
+			cesaCmdRing[i].pSrc->pFrags[j].bufVirtPtr = (MV_U8 *) pBuf;
+			cesaCmdRing[i].pSrc->pFrags[j].bufSize = bufSize;
+			pBuf += bufSize;
+			cesaCmdRing[i].pDst->pFrags[j].bufVirtPtr = (MV_U8 *) pBuf;
+			cesaCmdRing[i].pDst->pFrags[j].bufSize = bufSize;
+			pBuf += bufSize;
+		}
+		idx += bufNum;
+	}
+
+#ifndef MV_NETBSD
+	numOfSessions = CESA_DEF_SESSION_NUM;
+
+	status = mvSysCesaInit(numOfSessions, CESA_DEF_REQ_SIZE, &pdev->dev, pdev);
+	if (status != MV_OK) {
+		mvOsPrintf("mvCesaInit is Failed: status = 0x%x\n", status);
+		/* !!!! Dima cesaTestCleanup(); */
+		return -EINVAL;
+	}
+#endif /* !MV_NETBSD */
+
+	/* Prepare data for tests */
+	for (i = 0; i < 50; i++)
+		strcat((char *)cesaDataHexStr3, "dd");
+
+	strcpy((char *)cesaDataAndMd5digest3, cesaDataHexStr3);
+	strcpy((char *)cesaDataAndSha1digest3, cesaDataHexStr3);
+
+	/* Digest must be 8 byte aligned */
+	for (; i < 56; i++) {
+		strcat((char *)cesaDataAndMd5digest3, "00");
+		strcat((char *)cesaDataAndSha1digest3, "00");
+	}
+	strcat((char *)cesaDataAndMd5digest3, cesaHmacMd5digestHex3);
+	strcat((char *)cesaDataAndSha1digest3, cesaHmacSha1digestHex3);
+
+	if (mv_cesa_feature == INT_COALESCING)
+		mask = MV_CESA_CAUSE_EOP_COAL_MASK;
+	else
+		mask = MV_CESA_CAUSE_ACC_DMA_MASK;
+
+#ifndef __KERNEL__
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#else
+	/*
+	 * Preparation for each CESA chan
+	 */
+	for_each_child_of_node(pdev->dev.of_node, np) {
+#endif
+#ifndef MV_NETBSD
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_MASK_REG(chan), mask);
+#endif
+
+#ifdef MV_VXWORKS
+	{
+		MV_STATUS status;
+
+		status = intConnect((VOIDFUNCPTR *) INT_LVL_CESA, cesaTestReadyIsr, (int)NULL);
+		if (status != OK) {
+			mvOsPrintf("CESA: Can't connect CESA (%d) interrupt, status=0x%x \n", INT_LVL_CESA, status);
+			/* !!!! Dima cesaTestCleanup(); */
+			return;
+		}
+		cesaSemId = semMCreate(SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+		if (cesaSemId == NULL) {
+			mvOsPrintf("cesaTestStart: Can't create semaphore\n");
+			return;
+		}
+		intEnable(INT_LVL_CESA);
+	}
+#endif /* MV_VXWORKS */
+
+#if !defined(MV_NETBSD) && defined(__KERNEL__)
+		/*
+		 * Get IRQ from FDT and map it to the Linux IRQ nr
+		 */
+		irq = irq_of_parse_and_map(np, 0);
+		if (!irq) {
+			dev_err(&pdev->dev, "IRQ nr missing in device tree\n");
+			return -ENOENT;
+		}
+
+		dprintk("%s: cesa irq %d, chan %d\n", __func__,
+					      irq, chan);
+		chanId[chan] = chan;
+		spin_lock_init(&cesaChanLock[chan]);
+		if (request_irq(irq, cesaTestReadyIsr, (IRQF_DISABLED),
+					       irqName[chan], &chanId[chan])) {
+			mvOsPrintf("cannot assign irq\n");
+			/* !!!! Dima cesaTestCleanup(); */
+			return -EINVAL;
+		}
+		chan++;
+	}
+	spin_lock_init(&cesaLock);
+
+#ifdef CESA_DEBUGS
+	mvCesaDebugRegs();
+#endif
+	dev_info(&pdev->dev, "%s: CESA driver operate in %s(%d) mode\n",
+					       __func__, cesa_m, mv_cesa_mode);
+	return 0;
+
+#endif
+}
+
+MV_STATUS testRun(int idx, int caseIdx, int iter, int reqSize, int checkMode)
+{
+	int testIdx, count, sid, digestSize;
+	int blockSize;
+	MV_CESA_TEST_SESSION *pTestSession;
+	MV_CESA_COMMAND cmd;
+	MV_STATUS status;
+	MV_U8 chan;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	pTestSession = getTestSessionDb(idx, &testIdx);
+	if (pTestSession == NULL) {
+		mvOsPrintf("Test %d is not exist\n", idx);
+		return MV_BAD_PARAM;
+	}
+	pTestSession = &pTestSession[testIdx];
+
+	sid = pTestSession->sid;
+	if (sid == -1) {
+		mvOsPrintf("Test %d is not opened\n", idx);
+		return MV_BAD_STATE;
+	}
+	switch (pTestSession->cryptoAlgorithm) {
+	case MV_CESA_CRYPTO_DES:
+	case MV_CESA_CRYPTO_3DES:
+		blockSize = MV_CESA_DES_BLOCK_SIZE;
+		break;
+
+	case MV_CESA_CRYPTO_AES:
+		blockSize = MV_CESA_AES_BLOCK_SIZE;
+		break;
+
+	case MV_CESA_CRYPTO_NULL:
+		blockSize = 0;
+		break;
+
+	default:
+		mvOsPrintf("cesaTestRun: Bad CryptoAlgorithm=%d\n", pTestSession->cryptoAlgorithm);
+		return MV_BAD_PARAM;
+	}
+	switch (pTestSession->macAlgorithm) {
+	case MV_CESA_MAC_MD5:
+	case MV_CESA_MAC_HMAC_MD5:
+		digestSize = MV_CESA_MD5_DIGEST_SIZE;
+		break;
+
+	case MV_CESA_MAC_SHA1:
+	case MV_CESA_MAC_HMAC_SHA1:
+		digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+		break;
+
+	case MV_CESA_MAC_SHA2:
+	case MV_CESA_MAC_HMAC_SHA2:
+		digestSize = MV_CESA_SHA2_DIGEST_SIZE;
+		break;
+
+	default:
+		digestSize = 0;
+	}
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	if (pTestSession->direction == MV_CESA_DIR_ENCODE) {
+		cesaOutputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+		cesaInputHexStr = cesaTestCases[caseIdx].plainHexStr;
+	} else {
+		cesaOutputHexStr = cesaTestCases[caseIdx].plainHexStr;
+		cesaInputHexStr = cesaTestCases[caseIdx].cipherHexStr;
+	}
+
+	cmd.sessionId = sid;
+	if (checkMode == CESA_FAST_CHECK_MODE) {
+		cmd.cryptoLength = cesaTestCases[caseIdx].cryptoLength;
+		cmd.macLength = cesaTestCases[caseIdx].macLength;
+	} else {
+		cmd.cryptoLength = reqSize;
+		cmd.macLength = reqSize;
+	}
+	cesaRateSize = cmd.cryptoLength;
+	cesaReqSize = cmd.cryptoLength;
+	cmd.cryptoOffset = 0;
+	if (pTestSession->operation != MV_CESA_MAC_ONLY) {
+		if ((pTestSession->cryptoMode == MV_CESA_CRYPTO_CBC) ||
+		    (pTestSession->cryptoMode == MV_CESA_CRYPTO_CTR)) {
+			cmd.ivOffset = 0;
+			cmd.cryptoOffset = blockSize;
+			if (cesaTestCases[caseIdx].pCryptoIV == NULL) {
+				cmd.ivFromUser = 1;
+			} else {
+				cmd.ivFromUser = 0;
+				for (chan = 0; chan < mv_cesa_channels; chan++)
+					mvCesaCryptoIvSet(chan, cesaTestCases[caseIdx].pCryptoIV, blockSize);
+			}
+			cesaReqSize = cmd.cryptoOffset + cmd.cryptoLength;
+		}
+	}
+
+/*
+	mvOsPrintf("ivFromUser=%d, cryptoLength=%d, cesaReqSize=%d, cryptoOffset=%d\n",
+				cmd.ivFromUser, cmd.cryptoLength, cesaReqSize, cmd.cryptoOffset);
+*/
+	if (pTestSession->operation != MV_CESA_CRYPTO_ONLY) {
+		cmd.macOffset = cmd.cryptoOffset;
+
+		if (cesaTestCases[caseIdx].digestOffset == -1) {
+			cmd.digestOffset = cmd.macOffset + cmd.macLength;
+			cmd.digestOffset = MV_ALIGN_UP(cmd.digestOffset, 8);
+		} else {
+			cmd.digestOffset = cesaTestCases[caseIdx].digestOffset;
+		}
+		if ((cmd.digestOffset + digestSize) > cesaReqSize)
+			cesaReqSize = cmd.digestOffset + digestSize;
+	}
+
+	cesaCheckMode = checkMode;
+
+	if (checkMode == CESA_NULL_CHECK_MODE) {
+		cesaCheckSize = 0;
+		cesaCheckOffset = 0;
+	} else {
+		if (pTestSession->operation == MV_CESA_CRYPTO_ONLY) {
+			cesaCheckOffset = 0;
+			cesaCheckSize = cmd.cryptoLength;
+		} else {
+			cesaCheckSize = digestSize;
+			cesaCheckOffset = cmd.digestOffset;
+		}
+	}
+/*
+	mvOsPrintf("reqSize=%d, checkSize=%d, checkOffset=%d, checkMode=%d\n",
+			cesaReqSize, cesaCheckSize, cesaCheckOffset, cesaCheckMode);
+
+	mvOsPrintf("blockSize=%d, ivOffset=%d, ivFromUser=%d, crOffset=%d, crLength=%d\n",
+			blockSize, cmd.ivOffset, cmd.ivFromUser,
+			cmd.cryptoOffset, cmd.cryptoLength);
+
+	mvOsPrintf("macOffset=%d, digestOffset=%d, macLength=%d\n",
+			cmd.macOffset, cmd.digestOffset, cmd.macLength);
+*/
+	status = testCmd(sid, iter, &cmd, pTestSession, cesaTestCases[caseIdx].pCryptoIV, blockSize);
+
+	if (status != MV_OK)
+		return status;
+
+	/* Wait when all callbacks is received */
+	count = 0;
+	while (cesaIsReady == MV_FALSE) {
+		mvOsSleep(10);
+		count++;
+		if (count > 100) {
+			mvOsPrintf("testRun: Timeout occured\n");
+			return MV_TIMEOUT;
+		}
+	}
+
+	return MV_OK;
+}
+
+static int
+cesa_test_remove(struct platform_device *pdev)
+{
+	MV_CESA_MBUF *pMbufSrc, *pMbufDst;
+	MV_BUF_INFO *pFragsSrc, *pFragsDst;
+	int i;
+
+	/* Release all allocated memories */
+	pMbufSrc = (MV_CESA_MBUF *) (cesaCmdRing[0].pSrc);
+	pFragsSrc = cesaCmdRing[0].pSrc->pFrags;
+
+	pMbufDst = (MV_CESA_MBUF *) (cesaCmdRing[0].pDst);
+	pFragsDst = cesaCmdRing[0].pDst->pFrags;
+
+	mvOsFree(pMbufSrc);
+	mvOsFree(pMbufDst);
+	mvOsFree(pFragsSrc);
+	mvOsFree(pFragsDst);
+
+	for (i = 0; i < CESA_TEST_REQ_SIZE; i++) {
+		mvOsIoCachedFree(cesaTestOSHandle, cesaReqBufs[i].bufSize,
+				 cesaReqBufs[i].bufPhysAddr, cesaReqBufs[i].bufVirtPtr, cesaReqBufs[i].memHandle);
+	}
+	cesaDataHexStr3[0] = '\0';
+
+	/* Free CESA HAL resources */
+	return mvCesaIfFinish();
+}
+
+void desTest(int iter, int reqSize, int checkMode)
+{
+	int mode, i;
+	MV_STATUS status;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+	i = iter;
+	if (mode != CESA_NULL_CHECK_MODE)
+		i = 1;
+
+	testOpen(0);
+	testOpen(1);
+	testOpen(2);
+	testOpen(3);
+
+/* DES / ECB mode / Encrypt only */
+	status = testRun(0, 1, iter, reqSize, checkMode);
+	printTestResults(0, status, checkMode);
+
+/* DES / ECB mode / Decrypt only */
+	status = testRun(1, 1, iter, reqSize, checkMode);
+	printTestResults(1, status, checkMode);
+
+/* DES / CBC mode / Encrypt only */
+	status = testRun(2, 2, i, reqSize, mode);
+	printTestResults(2, status, mode);
+
+/* DES / CBC mode / Decrypt only */
+	status = testRun(3, 2, iter, reqSize, mode);
+	printTestResults(3, status, mode);
+
+	testClose(0);
+	testClose(1);
+	testClose(2);
+	testClose(3);
+}
+
+void tripleDesTest(int iter, int reqSize, int checkMode)
+{
+	int mode, i;
+	MV_STATUS status;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+	i = iter;
+	if (mode != CESA_NULL_CHECK_MODE)
+		i = 1;
+
+	testOpen(100);
+	testOpen(101);
+	testOpen(102);
+	testOpen(103);
+
+/* 3DES / ECB mode / Encrypt only */
+	status = testRun(100, 1, iter, reqSize, checkMode);
+	printTestResults(100, status, checkMode);
+
+/* 3DES / ECB mode / Decrypt only */
+	status = testRun(101, 1, iter, reqSize, checkMode);
+	printTestResults(101, status, checkMode);
+
+/* 3DES / CBC mode / Encrypt only */
+	status = testRun(102, 2, i, reqSize, mode);
+	printTestResults(102, status, mode);
+
+/* 3DES / CBC mode / Decrypt only */
+	status = testRun(103, 2, iter, reqSize, mode);
+	printTestResults(103, status, mode);
+
+	testClose(100);
+	testClose(101);
+	testClose(102);
+	testClose(103);
+}
+
+void aesTest(int iter, int reqSize, int checkMode)
+{
+	MV_STATUS status;
+	int mode, i;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+
+	i = iter;
+	if (mode != CESA_NULL_CHECK_MODE)
+		i = 1;
+
+	testOpen(200);
+	testOpen(201);
+	testOpen(202);
+	testOpen(203);
+	testOpen(204);
+	testOpen(205);
+	testOpen(206);
+	testOpen(207);
+	testOpen(208);
+
+/* AES-128 Encode ECB mode */
+	status = testRun(200, 3, iter, reqSize, checkMode);
+	printTestResults(200, status, checkMode);
+
+/* AES-128 Decode ECB mode */
+	status = testRun(201, 3, iter, reqSize, checkMode);
+	printTestResults(201, status, checkMode);
+
+/* AES-128 Encode CBC mode (IV from SA) */
+	status = testRun(202, 10, i, reqSize, mode);
+	printTestResults(202, status, mode);
+
+/* AES-128 Encode CBC mode (IV from User) */
+	status = testRun(202, 24, i, reqSize, mode);
+	printTestResults(202, status, mode);
+
+/* AES-128 Decode CBC mode */
+	status = testRun(203, 24, iter, reqSize, mode);
+	printTestResults(203, status, checkMode);
+
+/* AES-192 Encode ECB mode */
+	status = testRun(204, 4, iter, reqSize, checkMode);
+	printTestResults(204, status, checkMode);
+
+/* AES-192 Decode ECB mode */
+	status = testRun(205, 4, iter, reqSize, checkMode);
+	printTestResults(205, status, checkMode);
+
+/* AES-256 Encode ECB mode */
+	status = testRun(206, 5, iter, reqSize, checkMode);
+	printTestResults(206, status, checkMode);
+
+/* AES-256 Decode ECB mode */
+	status = testRun(207, 5, iter, reqSize, checkMode);
+	printTestResults(207, status, checkMode);
+
+#if 0
+/* AES-128 Encode CTR mode */
+	status = testRun(208, 23, iter, reqSize, mode);
+	printTestResults(208, status, checkMode);
+#endif
+
+	testClose(200);
+	testClose(201);
+	testClose(202);
+	testClose(203);
+	testClose(204);
+	testClose(205);
+	testClose(206);
+	testClose(207);
+	testClose(208);
+}
+
+void mdTest(int iter, int reqSize, int checkMode)
+{
+	int mode;
+	MV_STATUS status;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+
+	testOpen(300);
+	testOpen(301);
+	testOpen(302);
+	testOpen(303);
+	testOpen(305);
+
+/* HMAC-MD5 Generate signature test */
+	status = testRun(300, 6, iter, reqSize, mode);
+	printTestResults(300, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+	status = testRun(301, 7, iter, reqSize, mode);
+	printTestResults(301, status, checkMode);
+
+/* HMAC-MD5 Generate signature test */
+	status = testRun(302, 8, iter, reqSize, mode);
+	printTestResults(302, status, checkMode);
+
+/* HMAC-MD5 Verify Signature test */
+	status = testRun(303, 9, iter, reqSize, mode);
+	printTestResults(303, status, checkMode);
+
+/* HASH-MD5 Generate signature test */
+	status = testRun(305, 15, iter, reqSize, mode);
+	printTestResults(305, status, checkMode);
+
+	testClose(300);
+	testClose(301);
+	testClose(302);
+	testClose(303);
+	testClose(305);
+}
+
+void sha1Test(int iter, int reqSize, int checkMode)
+{
+	int mode;
+	MV_STATUS status;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+
+	testOpen(400);
+	testOpen(401);
+	testOpen(402);
+	testOpen(403);
+	testOpen(405);
+
+/* HMAC-SHA1 Generate signature test */
+	status = testRun(400, 11, iter, reqSize, mode);
+	printTestResults(400, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+	status = testRun(401, 12, iter, reqSize, mode);
+	printTestResults(401, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+	status = testRun(402, 13, iter, reqSize, mode);
+	printTestResults(402, status, checkMode);
+
+/* HMAC-SHA1 Verify Signature test */
+	status = testRun(403, 14, iter, reqSize, mode);
+	printTestResults(403, status, checkMode);
+
+/* HMAC-SHA1 Generate signature test */
+	status = testRun(405, 16, iter, reqSize, mode);
+	printTestResults(405, status, checkMode);
+
+	testClose(400);
+	testClose(401);
+	testClose(402);
+	testClose(403);
+	testClose(405);
+}
+
+void sha2Test(int iter, int reqSize, int checkMode)
+{
+	int mode;
+	MV_STATUS status;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+
+	testOpen(500);
+	testOpen(501);
+	testOpen(502);
+	testOpen(503);
+
+/* HMAC-SHA2 Generate signature test */
+	status = testRun(500, 28, iter, reqSize, mode);
+	printTestResults(500, status, checkMode);
+/* HMAC-SHA2 Verify signature test */
+	status = testRun(501, 29, iter, reqSize, mode);
+	printTestResults(501, status, checkMode);
+/* HMAC-SHA2 Generate signature test */
+	status = testRun(502, 30, iter, reqSize, mode);
+	printTestResults(502, status, checkMode);
+/* HMAC-SHA2 Verify signature test */
+	status = testRun(503, 31, iter, reqSize, mode);
+	printTestResults(503, status, checkMode);
+
+	testClose(500);
+	testClose(501);
+	testClose(502);
+	testClose(503);
+}
+
+void combiTest(int iter, int reqSize, int checkMode)
+{
+	MV_STATUS status;
+	int mode, i;
+
+	mode = checkMode;
+	if (checkMode == CESA_FULL_CHECK_MODE)
+		mode = CESA_FAST_CHECK_MODE;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	i = iter;
+	if (mode != CESA_NULL_CHECK_MODE)
+		i = 1;
+
+	testOpen(600);
+	testOpen(601);
+	testOpen(602);
+	testOpen(603);
+	testOpen(604);
+	testOpen(605);
+	testOpen(606);
+	testOpen(607);
+	testOpen(609);
+
+/* DES ECB + MD5 encode test */
+	status = testRun(600, 17, iter, reqSize, mode);
+	printTestResults(600, status, mode);
+
+/* DES ECB + SHA1 encode test */
+	status = testRun(601, 18, iter, reqSize, mode);
+	printTestResults(601, status, mode);
+
+/* 3DES ECB + MD5 encode test */
+	status = testRun(602, 17, iter, reqSize, mode);
+	printTestResults(602, status, mode);
+
+/* 3DES ECB + SHA1 encode test */
+	status = testRun(603, 18, iter, reqSize, mode);
+	printTestResults(603, status, mode);
+
+/* 3DES CBC + MD5 encode test */
+	status = testRun(604, 19, i, reqSize, mode);
+	printTestResults(604, status, mode);
+
+/* 3DES CBC + SHA1 encode test */
+	status = testRun(605, 20, i, reqSize, mode);
+	printTestResults(605, status, mode);
+
+/* AES-128 CBC + MD5 encode test */
+	status = testRun(606, 21, i, reqSize, mode);
+	printTestResults(606, status, mode);
+
+/* AES-128 CBC + SHA1 encode test */
+	status = testRun(607, 22, i, reqSize, mode);
+	printTestResults(607, status, mode);
+
+/* AES-128 CBC + SHA2 encode test */
+	status = testRun(609, 32, i, reqSize, mode);
+	printTestResults(609, status, mode);
+
+	testClose(600);
+	testClose(601);
+	testClose(602);
+	testClose(603);
+	testClose(604);
+	testClose(605);
+	testClose(606);
+	testClose(607);
+	testClose(609);
+}
+
+void cesaOneTest(int testIdx, int caseIdx, int iter, int reqSize, int checkMode)
+{
+	MV_STATUS status;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	mvOsPrintf("test=%d, case=%d, size=%d, iter=%d\n", testIdx, caseIdx, reqSize, iter);
+
+	status = testOpen(testIdx);
+
+	status = testRun(testIdx, caseIdx, iter, reqSize, checkMode);
+	printTestResults(testIdx, status, checkMode);
+	status = testClose(testIdx);
+
+}
+
+void cesaTest(int iter, int reqSize, int checkMode)
+{
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	mvOsPrintf("%d iteration\n", iter);
+	mvOsPrintf("%d size\n\n", reqSize);
+
+/* DES tests */
+	desTest(iter, reqSize, checkMode);
+
+/* 3DES tests */
+	tripleDesTest(iter, reqSize, checkMode);
+
+/* AES tests */
+	aesTest(iter, reqSize, checkMode);
+
+/* MD5 tests */
+	mdTest(iter, reqSize, checkMode);
+
+/* SHA-1 tests */
+	sha1Test(iter, reqSize, checkMode);
+
+/* SHA-2 tests */
+	sha2Test(iter, reqSize, checkMode);
+}
+
+void multiSizeTest(int idx, int iter, int checkMode, char *inputData)
+{
+	MV_STATUS status;
+	int i;
+	MV_CESA_SIZE_TEST *pMultiTest;
+
+	if (testOpen(idx) != MV_OK)
+		return;
+
+	if (iter == 0)
+		iter = CESA_DEF_ITER_NUM;
+
+	if (checkMode == CESA_SHOW_CHECK_MODE)
+		iter = 1;
+	else
+		checkMode = CESA_FULL_CHECK_MODE;
+
+	cesaTestCases[0].plainHexStr = inputData;
+	cesaTestCases[0].pCryptoIV = NULL;
+
+	switch (idx) {
+	case 302:
+		pMultiTest = mdMultiSizeTest302;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = cesaDataHexStr3;
+		break;
+
+	case 304:
+		pMultiTest = mdMultiSizeTest304;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 305:
+		pMultiTest = mdMultiSizeTest305;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 402:
+		pMultiTest = sha1MultiSizeTest402;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 404:
+		pMultiTest = sha1MultiSizeTest404;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 405:
+		pMultiTest = sha1MultiSizeTest405;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 602:
+		pMultiTest = tripleDesMdMultiSizeTest602;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 603:
+		pMultiTest = tripleDesShaMultiSizeTest603;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 604:
+		iter = 1;
+		pMultiTest = cbc3desMdMultiSizeTest604;
+		cesaTestCases[0].pCryptoIV = iv1;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 605:
+		iter = 1;
+		pMultiTest = cbc3desShaMultiSizeTest605;
+		cesaTestCases[0].pCryptoIV = iv1;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 606:
+		iter = 1;
+		pMultiTest = cbcAes128md5multiSizeTest606;
+		cesaTestCases[0].pCryptoIV = iv5;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	case 607:
+		iter = 1;
+		pMultiTest = cbcAes128sha1multiSizeTest607;
+		cesaTestCases[0].pCryptoIV = iv5;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+		break;
+
+	default:
+		iter = 1;
+		checkMode = CESA_SHOW_CHECK_MODE;
+		pMultiTest = mdMultiSizeTest302;
+		if (inputData == NULL)
+			cesaTestCases[0].plainHexStr = hashHexStr80;
+	}
+	i = 0;
+	while (pMultiTest[i].outputHexStr != NULL) {
+		cesaTestCases[0].cipherHexStr = (char *)pMultiTest[i].outputHexStr;
+		status = testRun(idx, 0, iter, pMultiTest[i].size, checkMode);
+		if (checkMode != CESA_SHOW_CHECK_MODE) {
+			cesaReqSize = pMultiTest[i].size;
+			printTestResults(idx, status, checkMode);
+		}
+		if (status != MV_OK)
+			break;
+		i++;
+	}
+	testClose(idx);
+/*
+    mvCesaDebugStatus();
+    cesaTestPrintStatus();
+*/
+}
+
+void open_session_test(int idx, int caseIdx, int iter)
+{
+	int reqIdError, cryptoError, openErrors, i;
+	int openErrDisp[100];
+	MV_STATUS status;
+
+	memset(openErrDisp, 0, sizeof(openErrDisp));
+	openErrors = 0;
+	reqIdError = 0;
+	cryptoError = 0;
+	for (i = 0; i < iter; i++) {
+		status = testOpen(idx);
+		if (status != MV_OK) {
+			openErrors++;
+			openErrDisp[status]++;
+		} else {
+			testRun(idx, caseIdx, 1, 0, CESA_FAST_CHECK_MODE);
+			if (cesaCryptoError > 0)
+				cryptoError++;
+			if (cesaReqIdError > 0)
+				reqIdError++;
+
+			testClose(idx);
+		}
+	}
+	if (cryptoError > 0)
+		mvOsPrintf("cryptoError : %d\n", cryptoError);
+	if (reqIdError > 0)
+		mvOsPrintf("reqIdError  : %d\n", reqIdError);
+
+	if (openErrors > 0) {
+		mvOsPrintf("Open Errors = %d\n", openErrors);
+		for (i = 0; i < 100; i++) {
+			if (openErrDisp[i] != 0)
+				mvOsPrintf("Error %d - occurs %d times\n", i, openErrDisp[i]);
+		}
+	}
+}
+
+void loopback_test(int idx, int iter, int size, char *pPlainData)
+{
+}
+
+#if defined(MV_VXWORKS)
+int testMode = 0;
+unsigned __TASKCONV cesaTask(void *args)
+{
+	int reqSize = cesaReqSize;
+
+	if (testMode == 0) {
+		cesaOneTest(cesaTestIdx, cesaCaseIdx, cesaIteration, reqSize, cesaCheckMode);
+	} else {
+		if (testMode == 1) {
+			cesaTest(cesaIteration, reqSize, cesaCheckMode);
+			combiTest(cesaIteration, reqSize, cesaCheckMode);
+		} else {
+			multiSizeTest(cesaIdx, cesaIteration, cesaCheckMode, NULL);
+		}
+	}
+	return 0;
+}
+
+void oneTest(int testIdx, int caseIdx, int iter, int reqSize, int checkMode)
+{
+	long rc;
+
+	cesaIteration = iter;
+	cesaReqSize = cesaRateSize = reqSize;
+	cesaCheckMode = checkMode;
+	testMode = 0;
+	cesaTestIdx = testIdx;
+	cesaCaseIdx = caseIdx;
+	rc = mvOsTaskCreate("CESA_T", 100, 4 * 1024, cesaTask, NULL, &cesaTaskId);
+	if (rc != MV_OK)
+		mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %d\n", (int)rc);
+}
+
+void multiTest(int iter, int reqSize, int checkMode)
+{
+	long rc;
+
+	cesaIteration = iter;
+	cesaCheckMode = checkMode;
+	cesaReqSize = reqSize;
+	testMode = 1;
+	rc = mvOsTaskCreate("CESA_T", 100, 4 * 1024, cesaTask, NULL, &cesaTaskId);
+	if (rc != MV_OK)
+		mvOsPrintf("hMW: Can't create CESA multiCmd test task, rc = %d\n", (int)rc);
+}
+
+void sizeTest(int testIdx, int iter, int checkMode)
+{
+	long rc;
+
+	cesaIteration = iter;
+	cesaCheckMode = checkMode;
+	testMode = 2;
+	cesaIdx = testIdx;
+	rc = mvOsTaskCreate("CESA_T", 100, 4 * 1024, cesaTask, NULL, &cesaTaskId);
+	if (rc != MV_OK)
+		mvOsPrintf("hMW: Can't create CESA test task, rc = %d\n", (int)rc);
+}
+
+#endif /* MV_VXWORKS */
+
+extern void mvCesaDebugSA(short sid, int mode);
+void cesaTestPrintSession(int idx)
+{
+	int testIdx;
+	MV_CESA_TEST_SESSION *pTestSession;
+
+	pTestSession = getTestSessionDb(idx, &testIdx);
+	if (pTestSession == NULL) {
+		mvOsPrintf("Test %d is not exist\n", idx);
+		return;
+	}
+	pTestSession = &pTestSession[testIdx];
+
+	if (pTestSession->sid == -1) {
+		mvOsPrintf("Test session %d is not opened\n", idx);
+		return;
+	}
+
+	mvCesaDebugSA(pTestSession->sid, 1);
+}
+
+void cesaTestPrintStatus(void)
+{
+	mvOsPrintf("\n\t Cesa Test Status\n\n");
+
+	mvOsPrintf("isrCount=%d\n", cesaTestIsrCount[0]);
+
+#ifdef CESA_TEST_DEBUG
+	{
+		int i, j;
+		j = cesaTestTraceIdx;
+		mvOsPrintf("No  Type  Cause   rCause   errCause   Res     Time     pReady    pProc    pEmpty\n");
+		for (i = 0; i < MV_CESA_TEST_TRACE_SIZE; i++) {
+			mvOsPrintf
+			    ("%02d.  %d   0x%04x  0x%04x   0x%04x   0x%02x   0x%02x   %02d   0x%06x  %p  %p  %p\n", j,
+			     cesaTestTrace[j].type, cesaTestTrace[j].cause, cesaTestTrace[j].realCause,
+			     cesaTestTrace[j].dmaErrCause, cesaTestTrace[j].resources, cesaTestTrace[j].timeStamp,
+			     cesaTestTrace[j].pReqReady, cesaTestTrace[j].pReqProcess, cesaTestTrace[j].pReqEmpty);
+			j++;
+			if (j == MV_CESA_TEST_TRACE_SIZE)
+				j = 0;
+		}
+	}
+#endif /* CESA_TEST_DEBUG */
+}
+
+static struct of_device_id mv_cesa_dt_ids[] = {
+	{ .compatible = "marvell,armada-cesa", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mv_cesa_dt_ids);
+
+static struct platform_driver mv_cesa_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(mv_cesa_dt_ids),
+	},
+	.probe		= cesa_test_probe,
+	.remove		= cesa_test_remove,
+#ifdef CONFIG_PM
+	.resume		= cesa_resume,
+	.suspend	= cesa_suspend,
+#endif
+};
+
+static int __init cesa_test_init(void)
+{
+	return platform_driver_register(&mv_cesa_driver);
+}
+module_init(cesa_test_init);
+
+static void __exit cesa_test_exit(void)
+{
+	platform_driver_unregister(&mv_cesa_driver);
+}
+module_exit(cesa_test_exit);
+
+MODULE_LICENSE("Marvell/GPL");
+MODULE_AUTHOR("Ronen Shitrit");
+MODULE_DESCRIPTION("TEST module for Marvell CESA based SoC");
diff --git a/drivers/crypto/mvebu_cesa/hal/AES/mvAes.h b/drivers/crypto/mvebu_cesa/hal/AES/mvAes.h
new file mode 100644
index 000000000000..f41acb0fee0d
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/AES/mvAes.h
@@ -0,0 +1,70 @@
+/* mvAes.h   v2.0   August '99
+* Reference ANSI C code
+*/
+
+/*  AES Cipher header file for ANSI C Submissions
+      Lawrence E. Bassham III
+      Computer Security Division
+      National Institute of Standards and Technology
+
+    April 15, 1998
+
+    This sample is to assist implementers developing to the Cryptographic
+	API Profile for AES Candidate Algorithm Submissions.  Please consult this
+	document as a cross-reference.
+
+    ANY CHANGES, WHERE APPROPRIATE, TO INFORMATION PROVIDED IN THIS FILE
+	MUST BE DOCUMENTED.  CHANGES ARE ONLY APPROPRIATE WHERE SPECIFIED WITH
+	THE STRING "CHANGE POSSIBLE".  FUNCTION CALLS AND THEIR PARAMETERS CANNOT
+	BE CHANGED.  STRUCTURES CAN BE ALTERED TO ALLOW IMPLEMENTERS TO INCLUDE
+	IMPLEMENTATION SPECIFIC INFORMATION.
+*/
+
+/*  Includes:
+	Standard include files
+*/
+
+#ifndef __mvAes_h__
+#define __mvAes_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mvOs.h"
+
+/*  Error Codes - CHANGE POSSIBLE: inclusion of additional error codes  */
+
+/*  Key direction is invalid, e.g., unknown value */
+#define     AES_BAD_KEY_DIR        -1
+
+/*  Key material not of correct length */
+#define     AES_BAD_KEY_MAT        -2
+
+/*  Key passed is not valid  */
+#define     AES_BAD_KEY_INSTANCE   -3
+
+/*  Params struct passed to cipherInit invalid */
+#define     AES_BAD_CIPHER_MODE    -4
+
+/*  Cipher in wrong state (e.g., not initialized) */
+#define     AES_BAD_CIPHER_STATE   -5
+
+#define     AES_BAD_CIPHER_INSTANCE   -7
+
+/*  Function protoypes  */
+/*  CHANGED: makeKey(): parameter blockLen added
+	this parameter is absolutely necessary if you want to
+	setup the round keys in a variable block length setting
+	cipherInit(): parameter blockLen added (for obvious reasons)
+ */
+	int aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen);
+	int aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+			       MV_U32 *plain, int numBlocks, MV_U32 *cipher);
+	int aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+			       MV_U32 *plain, int numBlocks, MV_U32 *cipher);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.c b/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.c
new file mode 100644
index 000000000000..de62cc351601
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.c
@@ -0,0 +1,341 @@
+/* rijndael-alg-ref.c   v2.0   August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ *          Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvAesAlg.h"
+#include "mvAesBoxes.dat"
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb);
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC);
+void ShiftRow128Enc(MV_U8 a[4][MAXBC]);
+void ShiftRow128Dec(MV_U8 a[4][MAXBC]);
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256]);
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC]);
+void InvMixColumn(MV_U8 a[4][MAXBC]);
+
+#define mul(aa, bb) (mask[bb] & Alogtable[aa + Logtable[bb]])
+
+MV_U8 mul1(MV_U8 aa, MV_U8 bb)
+{
+	return mask[bb] & Alogtable[aa + Logtable[bb]];
+}
+
+void KeyAddition(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC], MV_U8 BC)
+{
+	/* Exor corresponding text input and round key input bytes
+	 */
+	((MV_U32 *) (&(a[0][0])))[0] ^= ((MV_U32 *) (&(rk[0][0])))[0];
+	((MV_U32 *) (&(a[1][0])))[0] ^= ((MV_U32 *) (&(rk[1][0])))[0];
+	((MV_U32 *) (&(a[2][0])))[0] ^= ((MV_U32 *) (&(rk[2][0])))[0];
+	((MV_U32 *) (&(a[3][0])))[0] ^= ((MV_U32 *) (&(rk[3][0])))[0];
+
+}
+
+void ShiftRow128Enc(MV_U8 a[4][MAXBC])
+{
+	/* Row 0 remains unchanged
+	 * The other three rows are shifted a variable amount
+	 */
+	MV_U8 tmp[MAXBC];
+
+	tmp[0] = a[1][1];
+	tmp[1] = a[1][2];
+	tmp[2] = a[1][3];
+	tmp[3] = a[1][0];
+
+	((MV_U32 *) (&(a[1][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[1][0] = tmp[0];
+	   a[1][1] = tmp[1];
+	   a[1][2] = tmp[2];
+	   a[1][3] = tmp[3];
+	 */
+	tmp[0] = a[2][2];
+	tmp[1] = a[2][3];
+	tmp[2] = a[2][0];
+	tmp[3] = a[2][1];
+
+	((MV_U32 *) (&(a[2][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[2][0] = tmp[0];
+	   a[2][1] = tmp[1];
+	   a[2][2] = tmp[2];
+	   a[2][3] = tmp[3];
+	 */
+	tmp[0] = a[3][3];
+	tmp[1] = a[3][0];
+	tmp[2] = a[3][1];
+	tmp[3] = a[3][2];
+
+	((MV_U32 *) (&(a[3][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[3][0] = tmp[0];
+	   a[3][1] = tmp[1];
+	   a[3][2] = tmp[2];
+	   a[3][3] = tmp[3];
+	 */
+}
+
+void ShiftRow128Dec(MV_U8 a[4][MAXBC])
+{
+	/* Row 0 remains unchanged
+	 * The other three rows are shifted a variable amount
+	 */
+	MV_U8 tmp[MAXBC];
+
+	tmp[0] = a[1][3];
+	tmp[1] = a[1][0];
+	tmp[2] = a[1][1];
+	tmp[3] = a[1][2];
+
+	((MV_U32 *) (&(a[1][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[1][0] = tmp[0];
+	   a[1][1] = tmp[1];
+	   a[1][2] = tmp[2];
+	   a[1][3] = tmp[3];
+	 */
+
+	tmp[0] = a[2][2];
+	tmp[1] = a[2][3];
+	tmp[2] = a[2][0];
+	tmp[3] = a[2][1];
+
+	((MV_U32 *) (&(a[2][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[2][0] = tmp[0];
+	   a[2][1] = tmp[1];
+	   a[2][2] = tmp[2];
+	   a[2][3] = tmp[3];
+	 */
+
+	tmp[0] = a[3][1];
+	tmp[1] = a[3][2];
+	tmp[2] = a[3][3];
+	tmp[3] = a[3][0];
+
+	((MV_U32 *) (&(a[3][0])))[0] = ((MV_U32 *) (&(tmp[0])))[0];
+	/*
+	   a[3][0] = tmp[0];
+	   a[3][1] = tmp[1];
+	   a[3][2] = tmp[2];
+	   a[3][3] = tmp[3];
+	 */
+}
+
+void Substitution(MV_U8 a[4][MAXBC], MV_U8 box[256])
+{
+	/* Replace every byte of the input by the byte at that place
+	 * in the nonlinear S-box
+	 */
+	int i, j;
+
+	for (i = 0; i < 4; i++)
+		for (j = 0; j < 4; j++)
+			a[i][j] = box[a[i][j]];
+}
+
+void MixColumn(MV_U8 a[4][MAXBC], MV_U8 rk[4][MAXBC])
+{
+	/* Mix the four bytes of every column in a linear way
+	 */
+	MV_U8 b[4][MAXBC];
+	int i, j;
+
+	for (j = 0; j < 4; j++) {
+		b[0][j] = mul(25, a[0][j]) ^ mul(1, a[1][j]) ^ a[2][j] ^ a[3][j];
+		b[1][j] = mul(25, a[1][j]) ^ mul(1, a[2][j]) ^ a[3][j] ^ a[0][j];
+		b[2][j] = mul(25, a[2][j]) ^ mul(1, a[3][j]) ^ a[0][j] ^ a[1][j];
+		b[3][j] = mul(25, a[3][j]) ^ mul(1, a[0][j]) ^ a[1][j] ^ a[2][j];
+	}
+	for (i = 0; i < 4; i++)
+		/*for(j = 0; j < BC; j++) a[i][j] = b[i][j]; */
+		((MV_U32 *)(&(a[i][0])))[0] = ((MV_U32*)(&(b[i][0])))[0] ^ ((MV_U32*)(&(rk[i][0])))[0];;
+}
+
+void InvMixColumn(MV_U8 a[4][MAXBC])
+{
+	/* Mix the four bytes of every column in a linear way
+	 * This is the opposite operation of Mixcolumn
+	 */
+	MV_U8 b[4][MAXBC];
+	int i, j;
+
+	for (j = 0; j < 4; j++) {
+		b[0][j] = mul(223, a[0][j]) ^ mul(104, a[1][j]) ^ mul(238, a[2][j]) ^ mul(199, a[3][j]);
+		b[1][j] = mul(223, a[1][j]) ^ mul(104, a[2][j]) ^ mul(238, a[3][j]) ^ mul(199, a[0][j]);
+		b[2][j] = mul(223, a[2][j]) ^ mul(104, a[3][j]) ^ mul(238, a[0][j]) ^ mul(199, a[1][j]);
+		b[3][j] = mul(223, a[3][j]) ^ mul(104, a[0][j]) ^ mul(238, a[1][j]) ^ mul(199, a[2][j]);
+	}
+	for (i = 0; i < 4; i++)
+		/*for(j = 0; j < BC; j++) a[i][j] = b[i][j]; */
+		((MV_U32 *) (&(a[i][0])))[0] = ((MV_U32 *) (&(b[i][0])))[0];
+}
+
+int rijndaelKeySched(MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 W[MAXROUNDS + 1][4][MAXBC])
+{
+	/* Calculate the necessary round keys
+	 * The number of calculations depends on keyBits and blockBits
+	 */
+	int KC, BC, ROUNDS;
+	int i, j, t, rconpointer = 0;
+	MV_U8 tk[4][MAXKC];
+
+	switch (keyBits) {
+	case 128:
+		KC = 4;
+		break;
+	case 192:
+		KC = 6;
+		break;
+	case 256:
+		KC = 8;
+		break;
+	default:
+		return (-1);
+	}
+
+	switch (blockBits) {
+	case 128:
+		BC = 4;
+		break;
+	case 192:
+		BC = 6;
+		break;
+	case 256:
+		BC = 8;
+		break;
+	default:
+		return (-2);
+	}
+
+	switch (keyBits >= blockBits ? keyBits : blockBits) {
+	case 128:
+		ROUNDS = 10;
+		break;
+	case 192:
+		ROUNDS = 12;
+		break;
+	case 256:
+		ROUNDS = 14;
+		break;
+	default:
+		return (-3);	/* this cannot happen */
+	}
+
+	for (j = 0; j < KC; j++)
+		for (i = 0; i < 4; i++)
+			tk[i][j] = k[i][j];
+	t = 0;
+	/* copy values into round key array */
+	for (j = 0; (j < KC) && (t < (ROUNDS + 1) * BC); j++, t++)
+		for (i = 0; i < 4; i++)
+			W[t / BC][i][t % BC] = tk[i][j];
+
+	while (t < (ROUNDS + 1) * BC) {	/* while not enough round key material calculated */
+		/* calculate new values */
+		for (i = 0; i < 4; i++)
+			tk[i][0] ^= S[tk[(i + 1) % 4][KC - 1]];
+		tk[0][0] ^= rcon[rconpointer++];
+
+		if (KC != 8)
+			for (j = 1; j < KC; j++)
+				for (i = 0; i < 4; i++)
+					tk[i][j] ^= tk[i][j - 1];
+		else {
+			for (j = 1; j < KC / 2; j++)
+				for (i = 0; i < 4; i++)
+					tk[i][j] ^= tk[i][j - 1];
+			for (i = 0; i < 4; i++)
+				tk[i][KC / 2] ^= S[tk[i][KC / 2 - 1]];
+			for (j = KC / 2 + 1; j < KC; j++)
+				for (i = 0; i < 4; i++)
+					tk[i][j] ^= tk[i][j - 1];
+		}
+		/* copy values into round key array */
+		for (j = 0; (j < KC) && (t < (ROUNDS + 1) * BC); j++, t++)
+			for (i = 0; i < 4; i++)
+				W[t / BC][i][t % BC] = tk[i][j];
+	}
+
+	return 0;
+}
+
+int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS + 1][4][MAXBC], int rounds)
+{
+	/* Encryption of one block.
+	 */
+	int r, BC, ROUNDS;
+
+	BC = 4;
+	ROUNDS = rounds;
+
+	/* begin with a key addition
+	 */
+
+	KeyAddition(a, rk[0], BC);
+
+	/* ROUNDS-1 ordinary rounds
+	 */
+	for (r = 1; r < ROUNDS; r++) {
+		Substitution(a, S);
+		ShiftRow128Enc(a);
+		MixColumn(a, rk[r]);
+		/*KeyAddition(a,rk[r],BC); */
+	}
+
+	/* Last round is special: there is no MixColumn
+	 */
+	Substitution(a, S);
+	ShiftRow128Enc(a);
+	KeyAddition(a, rk[ROUNDS], BC);
+
+	return 0;
+}
+
+int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS + 1][4][MAXBC], int rounds)
+{
+	int r, BC, ROUNDS;
+
+	BC = 4;
+	ROUNDS = rounds;
+
+	/* To decrypt: apply the inverse operations of the encrypt routine,
+	 *             in opposite order
+	 *
+	 * (KeyAddition is an involution: it 's equal to its inverse)
+	 * (the inverse of Substitution with table S is Substitution with the inverse table of S)
+	 * (the inverse of Shiftrow is Shiftrow over a suitable distance)
+	 */
+
+	/* First the special round:
+	 *   without InvMixColumn
+	 *   with extra KeyAddition
+	 */
+	KeyAddition(a, rk[ROUNDS], BC);
+	ShiftRow128Dec(a);
+	Substitution(a, Si);
+
+	/* ROUNDS-1 ordinary rounds
+	 */
+	for (r = ROUNDS - 1; r > 0; r--) {
+		KeyAddition(a, rk[r], BC);
+		InvMixColumn(a);
+		ShiftRow128Dec(a);
+		Substitution(a, Si);
+
+	}
+
+	/* End with the extra key addition
+	 */
+
+	KeyAddition(a, rk[0], BC);
+
+	return 0;
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.h b/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.h
new file mode 100644
index 000000000000..864cfc437fcc
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/AES/mvAesAlg.h
@@ -0,0 +1,24 @@
+/* rijndael-alg-ref.h   v2.0   August '99
+ * Reference ANSI C code
+ * authors: Paulo Barreto
+ *          Vincent Rijmen, K.U.Leuven
+ */
+#ifndef __RIJNDAEL_ALG_H
+#define __RIJNDAEL_ALG_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAXBC				(128/32)
+#define MAXKC				(256/32)
+#define MAXROUNDS			14
+
+	int rijndaelKeySched(MV_U8 k[4][MAXKC], int keyBits, int blockBits, MV_U8 rk[MAXROUNDS + 1][4][MAXBC]);
+	int rijndaelEncrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS + 1][4][MAXBC], int rounds);
+	int rijndaelDecrypt128(MV_U8 a[4][MAXBC], MV_U8 rk[MAXROUNDS + 1][4][MAXBC], int rounds);
+
+#ifdef __cplusplus
+}
+#endif
+#endif				/* __RIJNDAEL_ALG_H */
diff --git a/drivers/crypto/mvebu_cesa/hal/AES/mvAesApi.c b/drivers/crypto/mvebu_cesa/hal/AES/mvAesApi.c
new file mode 100644
index 000000000000..c5915d91b5d2
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/AES/mvAesApi.c
@@ -0,0 +1,270 @@
+/* rijndael-api-ref.c   v2.1   April 2000
+ * Reference ANSI C code
+ * authors: v2.0 Paulo Barreto
+ *               Vincent Rijmen, K.U.Leuven
+ *          v2.1 Vincent Rijmen, K.U.Leuven
+ *
+ * This code is placed in the public domain.
+ */
+
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#include "mvAes.h"
+#include "mvAesAlg.h"
+
+/*  Defines:
+	Add any additional defines you need
+*/
+
+#define     MODE_ECB        1	/*  Are we ciphering in ECB mode?   */
+#define     MODE_CBC        2	/*  Are we ciphering in CBC mode?   */
+#define     MODE_CFB1       3	/*  Are we ciphering in 1-bit CFB mode? */
+
+int aesMakeKey(MV_U8 *expandedKey, MV_U8 *keyMaterial, int keyLen, int blockLen)
+{
+	MV_U8 W[MAXROUNDS + 1][4][MAXBC];
+	MV_U8 k[4][MAXKC];
+	MV_U8 j;
+	int i, rounds, KC;
+
+	if (expandedKey == NULL)
+		return AES_BAD_KEY_INSTANCE;
+
+	if (!((keyLen == 128) || (keyLen == 192) || (keyLen == 256)))
+		return AES_BAD_KEY_MAT;
+
+	if (keyMaterial == NULL)
+		return AES_BAD_KEY_MAT;
+
+	/* initialize key schedule: */
+	for (i = 0; i < keyLen / 8; i++) {
+		j = keyMaterial[i];
+		k[i % 4][i / 4] = j;
+	}
+
+	rijndaelKeySched(k, keyLen, blockLen, W);
+#ifdef MV_AES_DEBUG
+	{
+		MV_U8 *pW = &W[0][0][0];
+		int x;
+
+		mvOsPrintf("Expended Key: size = %d\n", sizeof(W));
+		for (i = 0; i < sizeof(W); i++)
+			mvOsPrintf("%02x ", pW[i]);
+
+		for (i = 0; i < MAXROUNDS + 1; i++) {
+			mvOsPrintf("\n Round #%02d: ", i);
+			for (x = 0; x < MAXBC; x++)
+				mvOsPrintf("%02x%02x%02x%02x ", W[i][0][x], W[i][1][x], W[i][2][x], W[i][3][x]);
+			mvOsPrintf("\n");
+		}
+	}
+#endif /* MV_AES_DEBUG */
+	switch (keyLen) {
+	case 128:
+		rounds = 10;
+		KC = 4;
+		break;
+	case 192:
+		rounds = 12;
+		KC = 6;
+		break;
+	case 256:
+		rounds = 14;
+		KC = 8;
+		break;
+	default:
+		return (-1);
+	}
+
+	for (i = 0; i < MAXBC; i++)
+		for (j = 0; j < 4; j++)
+			expandedKey[i * 4 + j] = W[rounds][j][i];
+
+	for (; i < KC; i++)
+		for (j = 0; j < 4; j++)
+			expandedKey[i * 4 + j] = W[rounds - 1][j][i + MAXBC - KC];
+
+	return 0;
+}
+
+int aesBlockEncrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+		       MV_U32 *plain, int numBlocks, MV_U32 *cipher)
+{
+	int i, j, t;
+	MV_U8 block[4][MAXBC];
+	int rounds;
+	char *input, *outBuffer;
+
+	input = (char *)plain;
+	outBuffer = (char *)cipher;
+
+	/* check parameter consistency: */
+	if ((expandedKey == NULL) || ((keyLen != 128) && (keyLen != 192) && (keyLen != 256)))
+		return AES_BAD_KEY_MAT;
+
+	if ((mode != MODE_ECB && mode != MODE_CBC))
+		return AES_BAD_CIPHER_STATE;
+
+	switch (keyLen) {
+	case 128:
+		rounds = 10;
+		break;
+	case 192:
+		rounds = 12;
+		break;
+	case 256:
+		rounds = 14;
+		break;
+	default:
+		return (-3);	/* this cannot happen */
+	}
+
+	switch (mode) {
+	case MODE_ECB:
+		for (i = 0; i < numBlocks; i++) {
+			for (j = 0; j < 4; j++) {
+				for (t = 0; t < 4; t++)
+					/* parse input stream into rectangular array */
+					block[t][j] = input[16 * i + 4 * j + t] & 0xFF;
+			}
+			rijndaelEncrypt128(block, (MV_U8(*)[4][MAXBC]) expandedKey, rounds);
+			for (j = 0; j < 4; j++) {
+				/* parse rectangular array into output ciphertext bytes */
+				for (t = 0; t < 4; t++)
+					outBuffer[16 * i + 4 * j + t] = (MV_U8) block[t][j];
+			}
+		}
+		break;
+
+	case MODE_CBC:
+		for (j = 0; j < 4; j++) {
+			for (t = 0; t < 4; t++)
+				/* parse initial value into rectangular array */
+				block[t][j] = IV[t + 4 * j] & 0xFF;
+		}
+		for (i = 0; i < numBlocks; i++) {
+			for (j = 0; j < 4; j++) {
+				for (t = 0; t < 4; t++)
+					/* parse input stream into rectangular array and exor with
+					   IV or the previous ciphertext */
+					block[t][j] ^= input[16 * i + 4 * j + t] & 0xFF;
+			}
+			rijndaelEncrypt128(block, (MV_U8(*)[4][MAXBC]) expandedKey, rounds);
+			for (j = 0; j < 4; j++) {
+				/* parse rectangular array into output ciphertext bytes */
+				for (t = 0; t < 4; t++)
+					outBuffer[16 * i + 4 * j + t] = (MV_U8) block[t][j];
+			}
+		}
+		break;
+
+	default:
+		return AES_BAD_CIPHER_STATE;
+	}
+
+	return 0;
+}
+
+int aesBlockDecrypt128(MV_U8 mode, MV_U8 *IV, MV_U8 *expandedKey, int keyLen,
+		       MV_U32 *srcData, int numBlocks, MV_U32 *dstData)
+{
+	int i, j, t;
+	MV_U8 block[4][MAXBC];
+	MV_U8 iv[4][MAXBC];
+	int rounds;
+	char *input, *outBuffer;
+
+	input = (char *)srcData;
+	outBuffer = (char *)dstData;
+
+	if (expandedKey == NULL)
+		return AES_BAD_KEY_MAT;
+
+	/* check parameter consistency: */
+	if (keyLen != 128 && keyLen != 192 && keyLen != 256)
+		return AES_BAD_KEY_MAT;
+
+	if ((mode != MODE_ECB && mode != MODE_CBC))
+		return AES_BAD_CIPHER_STATE;
+
+	switch (keyLen) {
+	case 128:
+		rounds = 10;
+		break;
+	case 192:
+		rounds = 12;
+		break;
+	case 256:
+		rounds = 14;
+		break;
+	default:
+		return (-3);	/* this cannot happen */
+	}
+
+	switch (mode) {
+	case MODE_ECB:
+		for (i = 0; i < numBlocks; i++) {
+			for (j = 0; j < 4; j++) {
+				for (t = 0; t < 4; t++) {
+					/* parse input stream into rectangular array */
+					block[t][j] = input[16 * i + 4 * j + t] & 0xFF;
+				}
+			}
+			rijndaelDecrypt128(block, (MV_U8(*)[4][MAXBC]) expandedKey, rounds);
+			for (j = 0; j < 4; j++) {
+				/* parse rectangular array into output ciphertext bytes */
+				for (t = 0; t < 4; t++)
+					outBuffer[16 * i + 4 * j + t] = (MV_U8) block[t][j];
+			}
+		}
+		break;
+
+	case MODE_CBC:
+		/* first block */
+		for (j = 0; j < 4; j++) {
+			for (t = 0; t < 4; t++) {
+				/* parse input stream into rectangular array */
+				block[t][j] = input[4 * j + t] & 0xFF;
+				iv[t][j] = block[t][j];
+			}
+		}
+		rijndaelDecrypt128(block, (MV_U8(*)[4][MAXBC]) expandedKey, rounds);
+
+		for (j = 0; j < 4; j++) {
+			/* exor the IV and parse rectangular array into output ciphertext bytes */
+			for (t = 0; t < 4; t++) {
+				outBuffer[4 * j + t] = (MV_U8) (block[t][j] ^ IV[t + 4 * j]);
+				IV[t + 4 * j] = iv[t][j];
+			}
+		}
+
+		/* next blocks */
+		for (i = 1; i < numBlocks; i++) {
+			for (j = 0; j < 4; j++) {
+				for (t = 0; t < 4; t++) {
+					/* parse input stream into rectangular array */
+					iv[t][j] = input[16 * i + 4 * j + t] & 0xFF;
+					block[t][j] = iv[t][j];
+				}
+			}
+			rijndaelDecrypt128(block, (MV_U8(*)[4][MAXBC]) expandedKey, rounds);
+
+			for (j = 0; j < 4; j++) {
+				/* exor previous ciphertext block and parse rectangular array
+				   into output ciphertext bytes */
+				for (t = 0; t < 4; t++) {
+					outBuffer[16 * i + 4 * j + t] = (MV_U8) (block[t][j] ^ IV[t + 4 * j]);
+					IV[t + 4 * j] = iv[t][j];
+				}
+			}
+		}
+		break;
+
+	default:
+		return AES_BAD_CIPHER_STATE;
+	}
+
+	return 0;
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/AES/mvAesBoxes.dat b/drivers/crypto/mvebu_cesa/hal/AES/mvAesBoxes.dat
new file mode 100644
index 000000000000..f1cbab7dc813
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/AES/mvAesBoxes.dat
@@ -0,0 +1,125 @@
+static MV_U8 mask[256] = {
+	0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+};
+
+static MV_U8 Logtable[256] = {
+	0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
+	100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
+	125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
+	101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
+	150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
+	102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
+	126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
+	43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
+	175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
+	44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
+	127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
+	204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
+	151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
+	83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
+	68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
+	103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7,
+};
+
+static MV_U8 Alogtable[512] = {
+	1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
+	95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
+	229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
+	83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
+	76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
+	131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
+	181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
+	254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
+	251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
+	195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
+	159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+	155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
+	252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
+	69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
+	18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
+	57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
+
+	3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
+	95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
+	229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
+	83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
+	76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
+	131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
+	181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
+	254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
+	251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
+	195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
+	159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
+	155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
+	252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
+	69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
+	18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
+	57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1,
+
+};
+
+static MV_U8 S[256] = {
+	99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
+	202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
+	183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
+	4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
+	9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
+	83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
+	208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
+	81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
+	205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
+	96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
+	224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
+	231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
+	186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
+	112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
+	225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
+	140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22,
+};
+
+static MV_U8 Si[256] = {
+	82, 9, 106, 213, 48, 54, 165, 56, 191, 64, 163, 158, 129, 243, 215, 251,
+	124, 227, 57, 130, 155, 47, 255, 135, 52, 142, 67, 68, 196, 222, 233, 203,
+	84, 123, 148, 50, 166, 194, 35, 61, 238, 76, 149, 11, 66, 250, 195, 78,
+	8, 46, 161, 102, 40, 217, 36, 178, 118, 91, 162, 73, 109, 139, 209, 37,
+	114, 248, 246, 100, 134, 104, 152, 22, 212, 164, 92, 204, 93, 101, 182, 146,
+	108, 112, 72, 80, 253, 237, 185, 218, 94, 21, 70, 87, 167, 141, 157, 132,
+	144, 216, 171, 0, 140, 188, 211, 10, 247, 228, 88, 5, 184, 179, 69, 6,
+	208, 44, 30, 143, 202, 63, 15, 2, 193, 175, 189, 3, 1, 19, 138, 107,
+	58, 145, 17, 65, 79, 103, 220, 234, 151, 242, 207, 206, 240, 180, 230, 115,
+	150, 172, 116, 34, 231, 173, 53, 133, 226, 249, 55, 232, 28, 117, 223, 110,
+	71, 241, 26, 113, 29, 41, 197, 137, 111, 183, 98, 14, 170, 24, 190, 27,
+	252, 86, 62, 75, 198, 210, 121, 32, 154, 219, 192, 254, 120, 205, 90, 244,
+	31, 221, 168, 51, 136, 7, 199, 49, 177, 18, 16, 89, 39, 128, 236, 95,
+	96, 81, 127, 169, 25, 181, 74, 13, 45, 229, 122, 159, 147, 201, 156, 239,
+	160, 224, 59, 77, 174, 42, 245, 176, 200, 235, 187, 60, 131, 83, 153, 97,
+	23, 43, 4, 126, 186, 119, 214, 38, 225, 105, 20, 99, 85, 33, 12, 125,
+};
+
+/*
+static MV_U8 iG[4][4] = {
+{0x0e, 0x09, 0x0d, 0x0b},
+{0x0b, 0x0e, 0x09, 0x0d},
+{0x0d, 0x0b, 0x0e, 0x09},
+{0x09, 0x0d, 0x0b, 0x0e},
+};
+*/
+static MV_U32 rcon[30] = {
+	0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc,
+	    0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91,
+};
diff --git a/drivers/crypto/mvebu_cesa/hal/mvCesa.c b/drivers/crypto/mvebu_cesa/hal/mvCesa.c
new file mode 100644
index 000000000000..b39a8c573375
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvCesa.c
@@ -0,0 +1,3229 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#endif
+#include "mvSysCesaConfig.h"
+#include "mvCesaRegs.h"
+#include "mvCesa.h"
+#include "AES/mvAes.h"
+#include "mvMD5.h"
+#include "mvSHA1.h"
+#include "mvSHA256.h"
+
+#undef CESA_DEBUG
+
+/********** Global variables **********/
+
+/*  If request size is more than MV_CESA_MAX_BUF_SIZE the
+ *  request is processed as fragmented request.
+ */
+
+MV_CESA_STATS cesaStats;
+MV_16 cesaLastSid[MV_CESA_CHANNELS];
+MV_CESA_SA **pCesaSAD = NULL;
+MV_U32 cesaMaxSA = 0;
+MV_CESA_REQ *pCesaReqFirst[MV_CESA_CHANNELS];
+MV_CESA_REQ *pCesaReqLast[MV_CESA_CHANNELS];
+MV_CESA_REQ *pCesaReqEmpty[MV_CESA_CHANNELS];
+MV_CESA_REQ *pCesaReqProcess[MV_CESA_CHANNELS];
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+MV_CESA_REQ *pCesaReqStartNext[MV_CESA_CHANNELS];
+MV_CESA_REQ *pCesaReqProcessCurr[MV_CESA_CHANNELS];
+#endif
+
+int cesaQueueDepth[MV_CESA_CHANNELS];
+int cesaReqResources[MV_CESA_CHANNELS];
+
+MV_CESA_SRAM_MAP *cesaSramVirtPtr[MV_CESA_CHANNELS];
+void *cesaOsHandle = NULL;
+MV_U16 ctrlModel;
+MV_U8 ctrlRev;
+MV_U32 sha2CmdVal;
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+
+MV_U32 cesaChainLength[MV_CESA_CHANNELS];
+int chainReqNum[MV_CESA_CHANNELS];
+MV_U32 chainIndex[MV_CESA_CHANNELS];
+MV_CESA_REQ *pNextActiveChain[MV_CESA_CHANNELS];
+MV_CESA_REQ *pEndCurrChain[MV_CESA_CHANNELS];
+MV_BOOL isFirstReq[MV_CESA_CHANNELS];
+
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+static MV_CESA_HAL_DATA cesaHalData;
+
+static INLINE MV_U8 *mvCesaSramAddrGet(MV_U8 chan)
+{
+	return (MV_U8 *) cesaHalData.sramPhysBase[chan];
+}
+
+static INLINE MV_ULONG mvCesaSramVirtToPhys(MV_U8 chan, void *pDev, MV_U8 *pSramVirt)
+{
+	return (MV_ULONG) (pSramVirt - cesaHalData.sramVirtBase[chan]) + cesaHalData.sramPhysBase[chan];
+}
+
+/* Internal Function prototypes */
+
+static INLINE void mvCesaSramDescrBuild(MV_U8 chan, MV_U32 config, int frag,
+					int cryptoOffset, int ivOffset, int cryptoLength,
+					int macOffset, int digestOffset, int macLength, int macTotalLen,
+					MV_CESA_REQ *pCesaReq, MV_DMA_DESC *pDmaDesc);
+
+static INLINE void mvCesaSramSaUpdate(MV_U8 chan, short sid, MV_DMA_DESC *pDmaDesc);
+
+static INLINE int mvCesaDmaCopyPrepare(MV_U8 chan, MV_CESA_MBUF *pMbuf, MV_U8 *pSramBuf,
+				       MV_DMA_DESC *pDmaDesc, MV_BOOL isToMbuf,
+				       int offset, int copySize, MV_BOOL skipFlush);
+
+static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+			    unsigned char innerIV[], unsigned char outerIV[]);
+
+static MV_STATUS mvCesaFragAuthComplete(MV_U8 chan, MV_CESA_REQ *pReq, MV_CESA_SA *pSA, int macDataSize);
+
+static MV_CESA_COMMAND *mvCesaCtrModeInit(void);
+
+static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd);
+static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd);
+static void mvCesaCtrModeFinish(MV_CESA_COMMAND *pCmd);
+
+static INLINE MV_STATUS mvCesaReqProcess(MV_U8 chan, MV_CESA_REQ *pReq);
+static MV_STATUS mvCesaFragReqProcess(MV_U8 chan, MV_CESA_REQ *pReq, MV_U8 frag);
+
+static INLINE MV_STATUS mvCesaParamCheck(MV_CESA_SA *pSA, MV_CESA_COMMAND *pCmd, MV_U8 *pFixOffset);
+static INLINE MV_STATUS mvCesaFragParamCheck(MV_U8 chan, MV_CESA_SA *pSA, MV_CESA_COMMAND *pCmd);
+
+static INLINE void mvCesaFragSizeFind(MV_CESA_SA *pSA, MV_CESA_REQ *pReq,
+				      int cryptoOffset, int macOffset,
+				      int *pCopySize, int *pCryptoDataSize, int *pMacDataSize);
+static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF *pMbuf, int offset, int size);
+static MV_STATUS mvCesaUpdateSADSize(MV_U32 size);
+
+/* Go to the next request in the request queue */
+static INLINE MV_CESA_REQ *MV_CESA_REQ_NEXT_PTR(MV_U8 chan, MV_CESA_REQ *pReq)
+{
+	if (pReq == pCesaReqLast[chan])
+		return pCesaReqFirst[chan];
+
+	return (pReq + 1);
+}
+
+/* Go to the previous request in the request queue */
+static INLINE MV_CESA_REQ *MV_CESA_REQ_PREV_PTR(MV_U8 chan, MV_CESA_REQ *pReq)
+{
+	if (pReq == pCesaReqFirst[chan])
+		return pCesaReqLast[chan];
+
+	return (pReq - 1);
+}
+
+static INLINE void mvCesaReqProcessStart(MV_U8 chan, MV_CESA_REQ *pReq)
+{
+	MV_32 frag;
+
+#ifdef MV_CESA_CHAIN_MODE
+	pReq->state = MV_CESA_CHAIN;
+#elif CONFIG_OF
+	if (mv_cesa_feature == CHAIN)
+		pReq->state = MV_CESA_CHAIN;
+	else
+		pReq->state = MV_CESA_PROCESS;
+#else
+	pReq->state = MV_CESA_PROCESS;
+#endif /* MV_CESA_CHAIN_MODE */
+
+	cesaStats.startCount++;
+	(pReq->use)++;
+
+	if (pReq->fragMode == MV_CESA_FRAG_NONE) {
+		frag = 0;
+	} else {
+		frag = pReq->frags.nextFrag;
+		pReq->frags.nextFrag++;
+	}
+
+	/* Enable TDMA engine */
+	MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG(chan), 0);
+	MV_REG_WRITE(MV_CESA_TDMA_NEXT_DESC_PTR_REG(chan),
+		     (MV_U32) mvCesaVirtToPhys(&pReq->dmaDescBuf, pReq->dma[frag].pDmaFirst));
+
+#if defined(MV_BRIDGE_SYNC_REORDER)
+	mvOsBridgeReorderWA();
+#endif
+
+	/* Start Accelerator */
+	/* For KW2/Z2, DSMP/Z1: Enable also bit[31] for SHA-2 support */
+	MV_REG_BIT_SET(MV_CESA_CMD_REG(chan), (MV_CESA_CMD_CHAN_ENABLE_MASK | sha2CmdVal));
+}
+
+/*******************************************************************************
+* mvCesaHalInit - Initialize the CESA driver
+*
+* DESCRIPTION:
+*       This function initialize the CESA driver.
+*       1) Session database
+*       2) Request queue
+*       4) DMA descriptor lists - one list per request. Each list
+*           has MV_CESA_MAX_DMA_DESC descriptors.
+*
+* INPUT:
+*       numOfSession    - maximum number of supported sessions
+*       queueDepth      - number of elements in the request queue.
+*	    osHandle	    - A handle used by the OS to allocate memory for the
+*			            module (Passed to the OS Services layer)
+*
+* RETURN:
+*       MV_OK           - Success
+*       MV_NO_RESOURCE  - Fail, can't allocate resources:
+*                         Session database, request queue,
+*                         DMA descriptors list, LRU cache database.
+*       MV_NOT_ALIGNED  - Sram base address is not 8 byte aligned.
+*
+*******************************************************************************/
+MV_STATUS mvCesaHalInit(int numOfSession, int queueDepth, void *osHandle, MV_CESA_HAL_DATA *halData)
+{
+	int i, req;
+	MV_U32 descOffsetReg, configReg;
+	MV_U8 chan;
+
+	cesaOsHandle = osHandle;
+	sha2CmdVal = 0;
+
+#ifdef CONFIG_OF
+	mvOsPrintf("mvCesaInit: channels=%d, session=%d, queue=%d\n",
+	    mv_cesa_channels, numOfSession, queueDepth);
+#else
+	mvOsPrintf("mvCesaInit: channels=%d, session=%d, queue=%d\n", MV_CESA_CHANNELS, numOfSession, queueDepth);
+#endif
+
+	/* Create initial Session database */
+	pCesaSAD = mvOsMalloc(sizeof(MV_CESA_SA *) * numOfSession);
+	if (pCesaSAD == NULL) {
+		mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d SAs\n",
+			   sizeof(MV_CESA_SA *) * numOfSession, numOfSession);
+		mvCesaFinish();
+		return MV_NO_RESOURCE;
+	}
+	memset(pCesaSAD, 0, sizeof(MV_CESA_SA *) * numOfSession);
+	cesaMaxSA = numOfSession;
+
+	ctrlModel = halData->ctrlModel;
+	ctrlRev = halData->ctrlRev;
+
+	/* Initiliaze per channel resources */
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+
+		cesaSramVirtPtr[chan] = (MV_CESA_SRAM_MAP *) (halData->sramVirtBase[chan] + halData->sramOffset[chan]);
+
+		/* Create request queue */
+		pCesaReqFirst[chan] = mvOsMalloc(sizeof(MV_CESA_REQ) * queueDepth);
+		if (pCesaReqFirst[chan] == NULL) {
+			mvOsPrintf("mvCesaInit: Can't allocate %u bytes for %d requests\n",
+				   sizeof(MV_CESA_REQ) * queueDepth, queueDepth);
+			mvCesaFinish();
+			return MV_NO_RESOURCE;
+		}
+		memset(pCesaReqFirst[chan], 0, sizeof(MV_CESA_REQ) * queueDepth);
+		pCesaReqEmpty[chan] = pCesaReqFirst[chan];
+		pCesaReqLast[chan] = pCesaReqFirst[chan] + (queueDepth - 1);
+		pCesaReqProcess[chan] = pCesaReqEmpty[chan];
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == INT_COALESCING) {
+#endif /* CONFIG_OF */
+			pCesaReqStartNext[chan] = pCesaReqFirst[chan];
+			pCesaReqProcessCurr[chan] = NULL;
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+		cesaQueueDepth[chan] = queueDepth;
+		cesaReqResources[chan] = queueDepth;
+		cesaLastSid[chan] = -1;
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+			cesaChainLength[chan] = MAX_CESA_CHAIN_LENGTH;
+			chainReqNum[chan] = 0;
+			chainIndex[chan] = 0;
+			pNextActiveChain[chan] = NULL;
+			pEndCurrChain[chan] = NULL;
+			isFirstReq[chan] = MV_TRUE;
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+		/* pSramBase must be 8 byte aligned */
+		if (MV_IS_NOT_ALIGN((MV_ULONG) cesaSramVirtPtr[chan], 8)) {
+			mvOsPrintf("mvCesaInit: pSramBase (%p) must be 8 byte aligned\n", cesaSramVirtPtr[chan]);
+			mvCesaFinish();
+			return MV_NOT_ALIGNED;
+		}
+
+		/* Clear registers */
+		MV_REG_WRITE(MV_CESA_CFG_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_MASK_REG(chan), 0);
+
+		/* Initialize DMA descriptor lists for all requests in Request queue */
+		descOffsetReg = configReg = 0;
+		for (req = 0; req < queueDepth; req++) {
+			int frag;
+			MV_CESA_REQ *pReq;
+			MV_DMA_DESC *pDmaDesc;
+
+			pReq = &pCesaReqFirst[chan][req];
+
+			pReq->cesaDescBuf.bufSize = sizeof(MV_CESA_DESC) * MV_CESA_MAX_REQ_FRAGS + CPU_D_CACHE_LINE_SIZE;
+
+			pReq->cesaDescBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle, pReq->cesaDescBuf.bufSize,
+							&pReq->cesaDescBuf.bufPhysAddr,
+							&pReq->cesaDescBuf.memHandle);
+			if (pReq->cesaDescBuf.bufVirtPtr == NULL) {
+				mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for CESA descriptors\n",
+						req, pReq->cesaDescBuf.bufSize);
+				mvCesaFinish();
+				return MV_NO_RESOURCE;
+			}
+			memset(pReq->cesaDescBuf.bufVirtPtr, 0, pReq->cesaDescBuf.bufSize);
+
+			pReq->pCesaDesc = (MV_CESA_DESC *) MV_ALIGN_UP((MV_ULONG) pReq->cesaDescBuf.bufVirtPtr,
+							CPU_D_CACHE_LINE_SIZE);
+
+			pReq->dmaDescBuf.bufSize = sizeof(MV_DMA_DESC) * MV_CESA_MAX_DMA_DESC * MV_CESA_MAX_REQ_FRAGS +
+							CPU_D_CACHE_LINE_SIZE;
+
+			pReq->dmaDescBuf.bufVirtPtr = mvOsIoCachedMalloc(osHandle, pReq->dmaDescBuf.bufSize,
+							&pReq->dmaDescBuf.bufPhysAddr, &pReq->dmaDescBuf.memHandle);
+
+			if (pReq->dmaDescBuf.bufVirtPtr == NULL) {
+				mvOsPrintf("mvCesaInit: req=%d, Can't allocate %d bytes for DMA descriptor list\n",
+					req, pReq->dmaDescBuf.bufSize);
+				mvCesaFinish();
+				return MV_NO_RESOURCE;
+			}
+			memset(pReq->dmaDescBuf.bufVirtPtr, 0, pReq->dmaDescBuf.bufSize);
+			pDmaDesc = (MV_DMA_DESC *) MV_ALIGN_UP((MV_ULONG) pReq->dmaDescBuf.bufVirtPtr,
+						CPU_D_CACHE_LINE_SIZE);
+
+			for (frag = 0; frag < MV_CESA_MAX_REQ_FRAGS; frag++) {
+				MV_CESA_DMA *pDma = &pReq->dma[frag];
+
+				pDma->pDmaFirst = pDmaDesc;
+				pDma->pDmaLast = NULL;
+
+				for (i = 0; i < MV_CESA_MAX_DMA_DESC - 1; i++) {
+					/* link all DMA descriptors together */
+					pDma->pDmaFirst[i].phyNextDescPtr =
+						MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pDmaDesc[i + 1]));
+				}
+				pDma->pDmaFirst[i].phyNextDescPtr = 0;
+				mvOsCacheFlush(cesaOsHandle, &pDma->pDmaFirst[0], MV_CESA_MAX_DMA_DESC * sizeof(MV_DMA_DESC));
+
+				pDmaDesc += MV_CESA_MAX_DMA_DESC;
+			}
+		}
+
+		/*mvCesaCryptoIvSet(NULL, MV_CESA_MAX_IV_LENGTH); */
+		descOffsetReg = (MV_U16)((MV_U8 *)&cesaSramVirtPtr[chan]->desc - mvCesaSramAddrGet(chan));
+		MV_REG_WRITE(MV_CESA_CHAN_DESC_OFFSET_REG(chan), descOffsetReg);
+
+		configReg |= (MV_CESA_CFG_WAIT_DMA_MASK | MV_CESA_CFG_ACT_DMA_MASK);
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+			configReg |= MV_CESA_CFG_CHAIN_MODE_MASK;
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+		/* Initialize TDMA engine */
+		MV_REG_WRITE(MV_CESA_TDMA_CTRL_REG(chan), MV_CESA_TDMA_CTRL_VALUE);
+		MV_REG_WRITE(MV_CESA_TDMA_BYTE_COUNT_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_TDMA_CURR_DESC_PTR_REG(chan), 0);
+
+		switch ((MV_U16)(ctrlModel & 0xff00)) {
+		case 0x6500: /* Avanta1 */
+			if (ctrlRev < 2) {
+				/* Parallel mode should be disabled('1') for chip rev. < A0 */
+				configReg |= MV_CESA_CFG_ENC_AUTH_PARALLEL_MODE_MASK;
+				sha2CmdVal = BIT31;
+			}
+			break;
+		case 0x6600: /* Avanta-LP */
+			if (ctrlRev > 2) {
+				MV_REG_BIT_SET(MV_CESA_TDMA_CTRL_REG(chan),
+						       MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_3TRANS_BIT);
+				sha2CmdVal = BIT31;
+			}
+			break;
+		case 0x6700: /* A370 */
+			if (ctrlModel == 0x6720) {
+				MV_REG_BIT_SET(MV_CESA_TDMA_CTRL_REG(chan),
+					       MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_3TRANS_BIT);
+				sha2CmdVal = BIT31;
+			} else {
+				/* Support maximum of 4 outstanding read transactions */
+				MV_REG_BIT_SET(MV_CESA_TDMA_CTRL_REG(chan), MV_CESA_TDMA_OUTSTAND_NEW_MODE_BIT);
+			}
+			break;
+		case 0x6800: /* A38x */
+			MV_REG_BIT_SET(MV_CESA_TDMA_CTRL_REG(chan), MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_3TRANS_BIT);
+			sha2CmdVal = BIT31;
+			break;
+		case 0x7800: /* AXP */
+			if (ctrlRev < 1) { /* Z1 step */
+#ifdef AURORA_IO_CACHE_COHERENCY
+				/* No support for outstanding read with I/0 cache coherency on AXP/Z1 */
+				MV_REG_BIT_RESET(MV_CESA_TDMA_CTRL_REG(chan), MV_CESA_TDMA_OUTSTAND_READ_EN_MASK);
+#endif
+				/* Parallel mode should be disabled('1') for chip rev. < A0 */
+				configReg |= MV_CESA_CFG_ENC_AUTH_PARALLEL_MODE_MASK;
+			} else { /*  A0/B0 steps */
+				/* Support maximum of 3 outstanding read transactions */
+				MV_REG_BIT_SET(MV_CESA_TDMA_CTRL_REG(chan), MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_3TRANS_BIT);
+			}
+			sha2CmdVal = BIT31;
+			break;
+		default:
+			mvOsPrintf("Error, chip revision(%d) no supported\n", halData->ctrlRev);
+			break;
+		}
+
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+		configReg |= MV_CESA_CFG_CHAIN_MODE_MASK;
+		/* Enable interrupt coalescing */
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == INT_COALESCING) {
+			MV_REG_WRITE(MV_CESA_INT_COAL_TH_REG(chan),
+			    mv_cesa_threshold);
+			MV_REG_WRITE(MV_CESA_INT_TIME_TH_REG(chan),
+			    mv_cesa_time_threshold);
+		}
+#else /* CONFIG_OF */
+		MV_REG_WRITE(MV_CESA_INT_COAL_TH_REG(chan), MV_CESA_INT_COAL_THRESHOLD);
+		MV_REG_WRITE(MV_CESA_INT_TIME_TH_REG(chan), MV_CESA_INT_COAL_TIME_THRESHOLD);
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+
+		/* Set CESA configuration registers */
+		MV_REG_WRITE(MV_CESA_CFG_REG(chan), configReg);
+	}
+
+	mvCesaDebugStatsClear();
+	mvOsMemcpy(&cesaHalData, halData, sizeof(MV_CESA_HAL_DATA));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFinish - Shutdown the CESA driver
+*
+* DESCRIPTION:
+*       This function shutdown the CESA driver and free all allocted resources.
+*
+* INPUT:    None
+*
+* RETURN:
+*       MV_OK   - Success
+*       Other   - Fail
+*
+*******************************************************************************/
+MV_STATUS mvCesaFinish(void)
+{
+	int req, sid;
+	MV_CESA_REQ *pReq;
+	MV_U8 chan;
+
+	mvOsPrintf("mvCesaFinish:\n");
+
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+
+		cesaSramVirtPtr[chan] = NULL;
+
+		MV_REG_WRITE(MV_CESA_CFG_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_MASK_REG(chan), 0);
+
+
+		/* Free all resources: DMA list, etc. */
+		for (req = 0; req < cesaQueueDepth[chan]; req++) {
+			pReq = &pCesaReqFirst[chan][req];
+			if (pReq->dmaDescBuf.bufVirtPtr != NULL) {
+				mvOsIoCachedFree(cesaOsHandle, pReq->dmaDescBuf.bufSize,
+					pReq->dmaDescBuf.bufPhysAddr,
+					pReq->dmaDescBuf.bufVirtPtr, pReq->dmaDescBuf.memHandle);
+			}
+			if (pReq->cesaDescBuf.bufVirtPtr != NULL) {
+				mvOsIoCachedFree(cesaOsHandle, pReq->cesaDescBuf.bufSize,
+					pReq->cesaDescBuf.bufPhysAddr,
+					pReq->cesaDescBuf.bufVirtPtr, pReq->cesaDescBuf.memHandle);
+			}
+
+			/* Free request queue */
+			if (pCesaReqFirst[chan] != NULL) {
+				mvOsFree(pCesaReqFirst[chan]);
+				pCesaReqFirst[chan] = pCesaReqLast[chan] = NULL;
+				pCesaReqEmpty[chan] = pCesaReqProcess[chan] = NULL;
+				cesaQueueDepth[chan] = cesaReqResources[chan] = 0;
+			}
+		}
+	}
+
+	/* Free SA database */
+	if (pCesaSAD != NULL) {
+		for (sid = 0; sid < cesaMaxSA; sid++) {
+			/* Free SRAM SA structure */
+			mvOsIoCachedFree(cesaOsHandle, pCesaSAD[sid]->sramSABuffSize,
+					 pCesaSAD[sid]->sramSAPhysAddr,
+					 pCesaSAD[sid]->sramSABuff, pCesaSAD[sid]->memHandle);
+			/* Free SA structure */
+			mvOsFree(pCesaSAD[sid]);
+			pCesaSAD[sid] = NULL;
+		}
+
+		cesaMaxSA = 0;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCryptoIvSet - Set IV value for Crypto algorithm working in CBC mode
+*
+* DESCRIPTION:
+*    This function set IV value using by Crypto algorithms in CBC mode.
+*   Each channel has its own IV value.
+*   This function gets IV value from the caller. If no IV value passed from
+*   the caller or only part of IV passed, the function will init the rest part
+*   of IV value (or the whole IV) by random value.
+*
+* INPUT:
+*       MV_U8*  pIV     - Pointer to IV value supplied by user. If pIV==NULL
+*                       the function will generate random IV value.
+*       int     ivSize  - size (in bytes) of IV provided by user. If ivSize is
+*                       smaller than maximum IV size, the function will complete
+*                       IV by random value.
+*
+* RETURN:
+*       MV_OK   - Success
+*       Other   - Fail
+*
+*******************************************************************************/
+MV_STATUS mvCesaCryptoIvSet(MV_U8 chan, MV_U8 *pIV, int ivSize)
+{
+	MV_U8 *pSramIV;
+
+	pSramIV = cesaSramVirtPtr[chan]->cryptoIV;
+	if (ivSize > MV_CESA_MAX_IV_LENGTH) {
+		mvOsPrintf("mvCesaCryptoIvSet: ivSize (%d) is too large\n", ivSize);
+		ivSize = MV_CESA_MAX_IV_LENGTH;
+	}
+	if (pIV != NULL) {
+		memcpy(pSramIV, pIV, ivSize);
+		ivSize = MV_CESA_MAX_IV_LENGTH - ivSize;
+		pSramIV += ivSize;
+	}
+
+	while (ivSize > 0) {
+		int size, mv_random = mvOsRand();
+
+		size = MV_MIN(ivSize, sizeof(mv_random));
+		memcpy(pSramIV, (void *)&mv_random, size);
+
+		pSramIV += size;
+		ivSize -= size;
+	}
+/*
+    mvOsCacheFlush(cesaOsHandle, cesaSramVirtPtr[chan]->cryptoIV, MV_CESA_MAX_IV_LENGTH);
+    mvOsCacheInvalidate(cesaOsHandle, cesaSramVirtPtr[chan]->cryptoIV, MV_CESA_MAX_IV_LENGTH);
+*/
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionOpen - Open new uni-directional crypto session
+*
+* DESCRIPTION:
+*       This function open new session.
+*
+* INPUT:
+*       MV_CESA_OPEN_SESSION *pSession - pointer to new session input parameters
+*
+* OUTPUT:
+*       short           *pSid  - session ID, should be used for all future
+*                                   requests over this session.
+*
+* RETURN:
+*       MV_OK           - Session opend successfully.
+*       MV_FULL         - All sessions are in use, no free place in
+*                       SA database.
+*       MV_BAD_PARAM    - One of session input parameters is invalid.
+*
+*******************************************************************************/
+MV_STATUS mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short *pSid)
+{
+	short sid;
+	MV_U32 config = 0;
+	int digestSize;
+	MV_BUF_INFO cesaSramSaBuf;
+
+	cesaStats.openedCount++;
+
+	/* Find free entry in SAD */
+	for (sid = 0; sid < cesaMaxSA; sid++)
+		if (pCesaSAD[sid] == NULL)
+			break;
+
+	/* No more sessions left ? */
+	if (sid == cesaMaxSA) {
+		if (MV_FAIL == mvCesaUpdateSADSize(cesaMaxSA * 2)) {
+			mvOsPrintf("mvCesaSessionOpen: SA Database is FULL\n");
+			return MV_FULL;
+		}
+	}
+
+	/* Allocate SA entry */
+	pCesaSAD[sid] = mvOsMalloc(sizeof(MV_CESA_SA));
+	if (pCesaSAD[sid] == NULL) {
+		mvOsPrintf("mvCesaSessionOpen: Can't allocate %d bytes for SA structures\n", sizeof(MV_CESA_SA));
+		return MV_FULL;
+	}
+	memset(pCesaSAD[sid], 0, sizeof(MV_CESA_SA));
+
+	/* Allocate image of sramSA in DRAM */
+	cesaSramSaBuf.bufSize = sizeof(MV_CESA_SRAM_SA) + CPU_D_CACHE_LINE_SIZE;
+
+	cesaSramSaBuf.bufVirtPtr = mvOsIoCachedMalloc(cesaOsHandle, cesaSramSaBuf.bufSize,
+						      &cesaSramSaBuf.bufPhysAddr, &cesaSramSaBuf.memHandle);
+
+	if (cesaSramSaBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("mvCesaSessionOpen: Can't allocate %d bytes for sramSA structures\n", cesaSramSaBuf.bufSize);
+		return MV_FULL;
+	}
+	memset(cesaSramSaBuf.bufVirtPtr, 0, cesaSramSaBuf.bufSize);
+
+	/* Save allocation parameters */
+	pCesaSAD[sid]->sramSABuff = cesaSramSaBuf.bufVirtPtr;
+	pCesaSAD[sid]->sramSABuffSize = cesaSramSaBuf.bufSize;
+	pCesaSAD[sid]->memHandle = cesaSramSaBuf.memHandle;
+	pCesaSAD[sid]->pSramSA = (MV_CESA_SRAM_SA *) MV_ALIGN_UP((MV_ULONG) cesaSramSaBuf.bufVirtPtr,
+								 CPU_D_CACHE_LINE_SIZE);
+
+	/* Align physical address to the beginning of SRAM SA */
+	pCesaSAD[sid]->sramSAPhysAddr = MV_32BIT_LE(mvCesaVirtToPhys(&cesaSramSaBuf, pCesaSAD[sid]->pSramSA));
+
+	/* Check Input parameters for Open session */
+	if (pSession->operation >= MV_CESA_MAX_OPERATION) {
+		mvOsPrintf("mvCesaSessionOpen: Unexpected operation %d\n", pSession->operation);
+		return MV_BAD_PARAM;
+	}
+	config |= (pSession->operation << MV_CESA_OPERATION_OFFSET);
+
+	if ((pSession->direction != MV_CESA_DIR_ENCODE) && (pSession->direction != MV_CESA_DIR_DECODE)) {
+		mvOsPrintf("mvCesaSessionOpen: Unexpected direction %d\n", pSession->direction);
+		return MV_BAD_PARAM;
+	}
+	config |= (pSession->direction << MV_CESA_DIRECTION_BIT);
+	/* Clear SA entry */
+	/* memset(&pCesaSAD[sid], 0, sizeof(pCesaSAD[sid])); */
+
+	/* Check AUTH parameters and update SA entry */
+	if (pSession->operation != MV_CESA_CRYPTO_ONLY) {
+		/* For HMAC (MD5/SHA1/SHA2) - Maximum Key size is 64 bytes */
+		if ((pSession->macMode == MV_CESA_MAC_HMAC_MD5) || (pSession->macMode == MV_CESA_MAC_HMAC_SHA1) ||
+					(pSession->macMode == MV_CESA_MAC_HMAC_SHA2)) {
+			if (pSession->macKeyLength > MV_CESA_MAX_MAC_KEY_LENGTH) {
+				mvOsPrintf("mvCesaSessionOpen: macKeyLength %d is too large\n", pSession->macKeyLength);
+				return MV_BAD_PARAM;
+			}
+			mvCesaHmacIvGet(pSession->macMode, pSession->macKey, pSession->macKeyLength,
+					pCesaSAD[sid]->pSramSA->macInnerIV, pCesaSAD[sid]->pSramSA->macOuterIV);
+			pCesaSAD[sid]->macKeyLength = pSession->macKeyLength;
+		}
+		switch (pSession->macMode) {
+		case MV_CESA_MAC_MD5:
+		case MV_CESA_MAC_HMAC_MD5:
+			digestSize = MV_CESA_MD5_DIGEST_SIZE;
+			break;
+
+		case MV_CESA_MAC_SHA1:
+		case MV_CESA_MAC_HMAC_SHA1:
+			digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+			break;
+
+		case MV_CESA_MAC_SHA2:
+		case MV_CESA_MAC_HMAC_SHA2:
+			digestSize = MV_CESA_SHA2_DIGEST_SIZE;
+			break;
+
+		default:
+			mvOsPrintf("mvCesaSessionOpen: Unexpected macMode %d\n", pSession->macMode);
+			return MV_BAD_PARAM;
+		}
+		config |= (pSession->macMode << MV_CESA_MAC_MODE_OFFSET);
+
+		/* Supported digest sizes:     */
+		/* MD5 - 16 bytes (128 bits),  */
+		/* SHA1 - 20 bytes (160 bits), */
+		/* SHA2 - 32 bytes (256 bits) or 12 bytes (96 bits) for all */
+		if ((pSession->digestSize != digestSize) && (pSession->digestSize != 12)) {
+			mvOsPrintf("mvCesaSessionOpen: Unexpected digest size %d\n", pSession->digestSize);
+			mvOsPrintf("\t Valid values [bytes]: MD5-16, SHA1-20, SHA2-32, All-12\n");
+			return MV_BAD_PARAM;
+		}
+		pCesaSAD[sid]->digestSize = pSession->digestSize;
+
+		if (pCesaSAD[sid]->digestSize == 12) {
+			/* Set MV_CESA_MAC_DIGEST_SIZE_BIT if digest size is 96 bits */
+			config |= (MV_CESA_MAC_DIGEST_96B << MV_CESA_MAC_DIGEST_SIZE_BIT);
+		}
+	}
+
+	/* Check CRYPTO parameters and update SA entry */
+	if (pSession->operation != MV_CESA_MAC_ONLY) {
+		switch (pSession->cryptoAlgorithm) {
+		case MV_CESA_CRYPTO_DES:
+			pCesaSAD[sid]->cryptoKeyLength = MV_CESA_DES_KEY_LENGTH;
+			pCesaSAD[sid]->cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+			break;
+
+		case MV_CESA_CRYPTO_3DES:
+			pCesaSAD[sid]->cryptoKeyLength = MV_CESA_3DES_KEY_LENGTH;
+			pCesaSAD[sid]->cryptoBlockSize = MV_CESA_DES_BLOCK_SIZE;
+			/* Only EDE mode is supported */
+			config |= (MV_CESA_CRYPTO_3DES_EDE << MV_CESA_CRYPTO_3DES_MODE_BIT);
+			break;
+
+		case MV_CESA_CRYPTO_AES:
+			switch (pSession->cryptoKeyLength) {
+			case 16:
+				pCesaSAD[sid]->cryptoKeyLength = MV_CESA_AES_128_KEY_LENGTH;
+				config |= (MV_CESA_CRYPTO_AES_KEY_128 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+				break;
+
+			case 24:
+				pCesaSAD[sid]->cryptoKeyLength = MV_CESA_AES_192_KEY_LENGTH;
+				config |= (MV_CESA_CRYPTO_AES_KEY_192 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+				break;
+
+			case 32:
+			default:
+				pCesaSAD[sid]->cryptoKeyLength = MV_CESA_AES_256_KEY_LENGTH;
+				config |= (MV_CESA_CRYPTO_AES_KEY_256 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET);
+				break;
+			}
+			pCesaSAD[sid]->cryptoBlockSize = MV_CESA_AES_BLOCK_SIZE;
+			break;
+
+		default:
+			mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoAlgorithm %d\n", pSession->cryptoAlgorithm);
+			return MV_BAD_PARAM;
+		}
+		config |= (pSession->cryptoAlgorithm << MV_CESA_CRYPTO_ALG_OFFSET);
+
+		if (pSession->cryptoKeyLength != pCesaSAD[sid]->cryptoKeyLength) {
+			mvOsPrintf("cesaSessionOpen: Wrong CryptoKeySize %d != %d\n",
+				   pSession->cryptoKeyLength, pCesaSAD[sid]->cryptoKeyLength);
+			return MV_BAD_PARAM;
+		}
+
+		/* Copy Crypto key */
+		if ((pSession->cryptoAlgorithm == MV_CESA_CRYPTO_AES) && (pSession->direction == MV_CESA_DIR_DECODE)) {
+			/* Crypto Key for AES decode is computed from original key material */
+			/* and depend on cryptoKeyLength (128/192/256 bits) */
+			aesMakeKey(pCesaSAD[sid]->pSramSA->cryptoKey, pSession->cryptoKey,
+				   pSession->cryptoKeyLength * 8, MV_CESA_AES_BLOCK_SIZE * 8);
+		} else {
+			/*panic("mvCesaSessionOpen2"); */
+			memcpy(pCesaSAD[sid]->pSramSA->cryptoKey, pSession->cryptoKey, pCesaSAD[sid]->cryptoKeyLength);
+
+		}
+
+		switch (pSession->cryptoMode) {
+		case MV_CESA_CRYPTO_ECB:
+			pCesaSAD[sid]->cryptoIvSize = 0;
+			break;
+
+		case MV_CESA_CRYPTO_CBC:
+			pCesaSAD[sid]->cryptoIvSize = pCesaSAD[sid]->cryptoBlockSize;
+			break;
+
+		case MV_CESA_CRYPTO_CTR:
+			/* Supported only for AES algorithm */
+			if (pSession->cryptoAlgorithm != MV_CESA_CRYPTO_AES) {
+				mvOsPrintf("mvCesaSessionOpen: CRYPTO CTR mode supported for AES only\n");
+				return MV_BAD_PARAM;
+			}
+			pCesaSAD[sid]->cryptoIvSize = 0;
+			pCesaSAD[sid]->ctrMode = 1;
+			/* Replace to ECB mode for HW */
+			pSession->cryptoMode = MV_CESA_CRYPTO_ECB;
+			break;
+
+		default:
+			mvOsPrintf("mvCesaSessionOpen: Unexpected cryptoMode %d\n", pSession->cryptoMode);
+			return MV_BAD_PARAM;
+		}
+
+		config |= (pSession->cryptoMode << MV_CESA_CRYPTO_MODE_BIT);
+	}
+	pCesaSAD[sid]->config = config;
+
+	mvOsCacheFlush(cesaOsHandle, pCesaSAD[sid]->pSramSA, sizeof(MV_CESA_SRAM_SA));
+	if (pSid != NULL)
+		*pSid = sid;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSessionClose - Close active crypto session
+*
+* DESCRIPTION:
+*       This function closes existing session
+*
+* INPUT:
+*       short sid   - Unique identifier of the session to be closed
+*
+* RETURN:
+*       MV_OK        - Session closed successfully.
+*       MV_BAD_PARAM - Session identifier is out of valid range.
+*       MV_NOT_FOUND - There is no active session with such ID.
+*
+*******************************************************************************/
+MV_STATUS mvCesaSessionClose(short sid)
+{
+	MV_U8 chan;
+
+	cesaStats.closedCount++;
+
+	if (sid >= cesaMaxSA) {
+		mvOsPrintf("CESA Error: sid (%d) is too big\n", sid);
+		return MV_BAD_PARAM;
+	}
+
+	if (pCesaSAD[sid] == NULL) {
+		mvOsPrintf("CESA Warning: Session (sid=%d) is invalid\n", sid);
+		return MV_NOT_FOUND;
+	}
+
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+		if (cesaLastSid[chan] == sid)
+			cesaLastSid[chan] = -1;
+	}
+
+	/* Free SA structures */
+	mvOsIoCachedFree(cesaOsHandle, pCesaSAD[sid]->sramSABuffSize,
+			 pCesaSAD[sid]->sramSAPhysAddr, pCesaSAD[sid]->sramSABuff, pCesaSAD[sid]->memHandle);
+	mvOsFree(pCesaSAD[sid]);
+
+	pCesaSAD[sid] = NULL;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaAction - Perform crypto operation
+*
+* DESCRIPTION:
+*       This function set new CESA request FIFO queue for further HW processing.
+*       The function checks request parameters before set new request to the queue.
+*       If one of the CESA channels is ready for processing the request will be
+*       passed to HW. When request processing is finished the CESA interrupt will
+*       be generated by HW. The caller should call mvCesaReadyGet() function to
+*       complete request processing and get result.
+*
+* INPUT:
+* 	MV_U8 chan		- channel ID.
+*       MV_CESA_COMMAND *pCmd   - pointer to new CESA request.
+*                               It includes pointers to Source and Destination
+*                               buffers, session identifier get from
+*                               mvCesaSessionOpen() function, pointer to caller
+*                               private data and all needed crypto parameters.
+*
+* RETURN:
+*       MV_OK             - request successfully added to request queue
+*                         and will be processed.
+*       MV_NO_MORE        - request successfully added to request queue and will
+*                         be processed, but request queue became Full and next
+*                         request will not be accepted.
+*       MV_NO_RESOURCE    - request queue is FULL and the request can not
+*                         be processed.
+*       MV_OUT_OF_CPU_MEM - memory allocation needed for request processing is
+*                         failed. Request can not be processed.
+*       MV_NOT_ALLOWED    - This mixed request (CRYPTO+MAC) can not be processed
+*                         as one request and should be splitted for two requests:
+*                         CRYPTO_ONLY and MAC_ONLY.
+*       MV_BAD_PARAM      - One of the request parameters is out of valid range.
+*                         The request can not be processed.
+*
+*******************************************************************************/
+MV_STATUS mvCesaAction(MV_U8 chan, MV_CESA_COMMAND *pCmd)
+{
+	MV_STATUS status;
+	MV_CESA_REQ *pReq = pCesaReqEmpty[chan];
+	int sid = pCmd->sessionId;
+	MV_CESA_SA *pSA = pCesaSAD[sid];
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+	MV_CESA_REQ *pFromReq;
+	MV_CESA_REQ *pToReq;
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+	cesaStats.reqCount++;
+
+	/* Check that the request queue is not FULL */
+	if (cesaReqResources[chan] == 0)
+		return MV_NO_RESOURCE;
+
+	if ((sid >= cesaMaxSA) || (pSA == NULL)) {
+		mvOsPrintf("CESA Action Error: Session sid=%d is INVALID\n", sid);
+		return MV_BAD_PARAM;
+	}
+	pSA->count++;
+
+	if (pSA->ctrMode) {
+		/* AES in CTR mode can't be mixed with Authentication */
+		if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+			mvOsPrintf("mvCesaAction : CRYPTO CTR mode can't be mixed with AUTH\n");
+			return MV_NOT_ALLOWED;
+		}
+		/* All other request parameters should not be checked because key stream */
+		/* (not user data) processed by AES HW engine */
+		pReq->pOrgCmd = pCmd;
+		/* Allocate temporary pCmd structure for Key stream */
+		pCmd = mvCesaCtrModeInit();
+		if (pCmd == NULL)
+			return MV_OUT_OF_CPU_MEM;
+
+		/* Prepare Key stream */
+		mvCesaCtrModePrepare(pCmd, pReq->pOrgCmd);
+		pReq->fixOffset = 0;
+	} else {
+		/* Check request parameters and calculae fixOffset */
+		status = mvCesaParamCheck(pSA, pCmd, &pReq->fixOffset);
+		if (status != MV_OK)
+			return status;
+	}
+	pReq->pCmd = pCmd;
+
+	/* Check if the packet need fragmentation */
+	if (pCmd->pSrc->mbufSize <= sizeof(cesaSramVirtPtr[chan]->buf)) {
+		/* request size is smaller than single buffer size */
+		pReq->fragMode = MV_CESA_FRAG_NONE;
+
+		/* Prepare NOT fragmented packets */
+		status = mvCesaReqProcess(chan, pReq);
+		if (status != MV_OK)
+			mvOsPrintf("CesaReady: ReqProcess error: pReq=%p, status=0x%x\n", pReq, status);
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+			pReq->frags.numFrag = 1;
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+	} else {
+		MV_U8 frag = 0;
+
+		/* request size is larger than buffer size - needs fragmentation */
+
+		/* Check restrictions for processing fragmented packets */
+		status = mvCesaFragParamCheck(chan, pSA, pCmd);
+		if (status != MV_OK)
+			return status;
+
+		pReq->fragMode = MV_CESA_FRAG_FIRST;
+		pReq->frags.nextFrag = 0;
+
+		/* Prepare Process Fragmented packets */
+		while (pReq->fragMode != MV_CESA_FRAG_LAST) {
+			if (frag >= MV_CESA_MAX_REQ_FRAGS) {
+				mvOsPrintf("mvCesaAction Error: Too large request frag=%d\n", frag);
+				return MV_OUT_OF_CPU_MEM;
+			}
+			status = mvCesaFragReqProcess(chan, pReq, frag);
+			if (status == MV_OK) {
+#if defined(MV_CESA_CHAIN_MODE) || defined(MV_CESA_INT_COALESCING_SUPPORT) || \
+							     defined(CONFIG_OF)
+#ifdef CONFIG_OF
+				if ((mv_cesa_feature == INT_COALESCING) ||
+						(mv_cesa_feature == CHAIN)) {
+#endif /* CONFIG_OF */
+					if (frag) {
+						pReq->dma[frag - 1].pDmaLast->phyNextDescPtr =
+						    MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf,
+							pReq->dma[frag].pDmaFirst));
+						mvOsCacheFlush(cesaOsHandle, pReq->dma[frag - 1].pDmaLast,
+						    sizeof(MV_DMA_DESC));
+					}
+#ifdef CONFIG_OF
+				}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF*/
+				frag++;
+			}
+		}
+		pReq->frags.numFrag = frag;
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+			if (chainReqNum[chan]) {
+				chainReqNum[chan] += pReq->frags.numFrag;
+				if (chainReqNum[chan] >= MAX_CESA_CHAIN_LENGTH)
+					chainReqNum[chan] =
+					    MAX_CESA_CHAIN_LENGTH;
+		}
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF*/
+	}
+
+	pReq->state = MV_CESA_PENDING;
+
+	pCesaReqEmpty[chan] = MV_CESA_REQ_NEXT_PTR(chan, pCesaReqEmpty[chan]);
+	cesaReqResources[chan] -= 1;
+
+/* #ifdef CESA_DEBUG */
+	if ((cesaQueueDepth[chan] - cesaReqResources[chan]) > cesaStats.maxReqCount)
+		cesaStats.maxReqCount = (cesaQueueDepth[chan] - cesaReqResources[chan]);
+/* #endif CESA_DEBUG */
+
+	cesaLastSid[chan] = sid;
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+
+		/* Are we within chain bounderies and follows the first request ? */
+		if ((chainReqNum[chan] > 0) && (chainReqNum[chan] < MAX_CESA_CHAIN_LENGTH)) {
+			if (chainIndex[chan]) {
+				pFromReq = MV_CESA_REQ_PREV_PTR(chan, pReq);
+				pToReq = pReq;
+				pReq->state = MV_CESA_CHAIN;
+
+				/* assume concatenating is possible */
+				pFromReq->dma[pFromReq->frags.numFrag - 1].pDmaLast->phyNextDescPtr =
+				    MV_32BIT_LE(mvCesaVirtToPhys(&pToReq->dmaDescBuf, pToReq->dma[0].pDmaFirst));
+				mvOsCacheFlush(cesaOsHandle, pFromReq->dma[pFromReq->frags.numFrag - 1].pDmaLast,
+				    sizeof(MV_DMA_DESC));
+
+
+				/* align active & next pointers */
+				if (pNextActiveChain[chan]->state != MV_CESA_PENDING)
+					pEndCurrChain[chan] = pNextActiveChain[chan] =
+					    MV_CESA_REQ_NEXT_PTR(chan, pReq);
+			} else {	/* we have only one chain, start new one */
+				chainReqNum[chan] = 0;
+				chainIndex[chan]++;
+				/* align active & next pointers  */
+				if (pNextActiveChain[chan]->state != MV_CESA_PENDING)
+					pEndCurrChain[chan] = pNextActiveChain[chan] = pReq;
+			}
+		} else {
+			/* In case we concatenate full chain */
+			if (chainReqNum[chan] == MAX_CESA_CHAIN_LENGTH) {
+				chainIndex[chan]++;
+				if (pNextActiveChain[chan]->state != MV_CESA_PENDING)
+					pEndCurrChain[chan] = pNextActiveChain[chan] = pReq;
+				chainReqNum[chan] = 0;
+			}
+
+			pReq = pCesaReqProcess[chan];
+			if (pReq->state == MV_CESA_PENDING) {
+				pNextActiveChain[chan] = pReq;
+				pEndCurrChain[chan] = MV_CESA_REQ_NEXT_PTR(chan, pReq);
+				/* Start Process new request */
+				mvCesaReqProcessStart(chan, pReq);
+			}
+		}
+
+		chainReqNum[chan]++;
+
+		if ((chainIndex[chan] < MAX_CESA_CHAIN_LENGTH) && (chainReqNum[chan] > cesaStats.maxChainUsage))
+			cesaStats.maxChainUsage = chainReqNum[chan];
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE) || CONFIG_OF */
+
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == INT_COALESCING) {
+#endif /* CONFIG_OF */
+
+		/* Check if processing of previous packet was completed */
+		if (!(MV_REG_READ(MV_CESA_STATUS_REG(chan)) & MV_CESA_STATUS_ACTIVE_MASK)) {
+			if (pCesaReqStartNext[chan]->state == MV_CESA_PENDING) {
+				mvCesaReqProcessStart(chan, pCesaReqStartNext[chan]);
+				pCesaReqProcessCurr[chan] = pCesaReqStartNext[chan];
+				pCesaReqStartNext[chan] = MV_CESA_REQ_NEXT_PTR(chan, pCesaReqStartNext[chan]);
+			}
+		}
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+
+#if defined(MV_CESA_INT_PER_PACKET) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == INT_PER_PACKET) {
+#endif /* CONFIG_OF */
+
+		/* Check status of CESA channels and process requests if possible */
+		pReq = pCesaReqProcess[chan];
+		if (pReq->state == MV_CESA_PENDING) {
+			/* Start Process new request */
+			mvCesaReqProcessStart(chan, pReq);
+		}
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_PER_PACKET || CONFIG_OF */
+
+	/* If request queue became FULL - return MV_NO_MORE */
+	if (cesaReqResources[chan] == 0)
+		return MV_NO_MORE;
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvCesaReadyGet - Get crypto request that processing is finished
+*
+* DESCRIPTION:
+*       This function complete request processing and return ready request to
+*       caller. To don't miss interrupts the caller must call this function
+*       while MV_OK or MV_TERMINATE values returned.
+*
+* INPUT:
+*   MV_U32          chanMap  - map of CESA channels finished thier job
+*                              accordingly with CESA Cause register.
+*   MV_CESA_RESULT* pResult  - pointer to structure contains information
+*                            about ready request. It includes pointer to
+*                            user private structure "pReqPrv", session identifier
+*                            for this request "sessionId" and return code.
+*                            Return code set to MV_FAIL if calculated digest value
+*                            on decode direction is different than digest value
+*                            in the packet.
+*
+* RETURN:
+*       MV_OK           - Success, ready request is returned.
+*       MV_NOT_READY    - Next request is not ready yet. New interrupt will
+*                       be generated for futher request processing.
+*       MV_EMPTY        - There is no more request for processing.
+*       MV_BUSY         - Fragmented request is not ready yet.
+*       MV_TERMINATE    - Call this function once more to complete processing
+*                       of fragmented request.
+*
+*******************************************************************************/
+MV_STATUS mvCesaReadyGet(MV_U8 chan, MV_CESA_RESULT *pResult)
+{
+	MV_STATUS status, readyStatus = MV_NOT_READY;
+	MV_U32 statusReg;
+	MV_CESA_REQ *pReq;
+	MV_CESA_SA *pSA;
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+
+		if (isFirstReq[chan] == MV_TRUE) {
+
+			if (chainIndex[chan] == 0)
+				chainReqNum[chan] = 0;
+
+			isFirstReq[chan] = MV_FALSE;
+
+			if (pNextActiveChain[chan]->state == MV_CESA_PENDING) {
+
+				/* Start request Process */
+				mvCesaReqProcessStart(chan, pNextActiveChain[chan]);
+				pEndCurrChain[chan] = pNextActiveChain[chan];
+				if (chainIndex[chan] > 0)
+					chainIndex[chan]--;
+				/* Update pNextActiveChain to next chain head */
+				while (pNextActiveChain[chan]->state == MV_CESA_CHAIN)
+					pNextActiveChain[chan] = MV_CESA_REQ_NEXT_PTR(chan, pNextActiveChain[chan]);
+			}
+
+		}
+
+		/* Check if there are more processed requests - can we remove pEndCurrChain ??? */
+		if (pCesaReqProcess[chan] == pEndCurrChain[chan]) {
+
+			isFirstReq[chan] = MV_TRUE;
+			pEndCurrChain[chan] = pNextActiveChain[chan];
+			return MV_EMPTY;
+		}
+#ifdef CONFIG_OF
+	} else {
+		if (pCesaReqProcess[chan]->state != MV_CESA_PROCESS)
+			return MV_EMPTY;
+	}
+#endif
+#else
+	if (pCesaReqProcess[chan]->state != MV_CESA_PROCESS) {
+		return MV_EMPTY;
+	}
+#endif /* MV_CESA_CHAIN_MODE */
+
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == INT_COALESCING) {
+#endif /* CONFIG_OF */
+		statusReg = MV_REG_READ(MV_CESA_STATUS_REG(chan));
+		if ((statusReg & MV_CESA_STATUS_ACTIVE_MASK) &&
+			(pCesaReqProcessCurr[chan] == pCesaReqProcess[chan])) {
+			cesaStats.notReadyCount++;
+			return MV_NOT_READY;
+		}
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+
+	cesaStats.readyCount++;
+
+	pReq = pCesaReqProcess[chan];
+	pSA = pCesaSAD[pReq->pCmd->sessionId];
+
+	pResult->retCode = MV_OK;
+	if (pReq->fragMode != MV_CESA_FRAG_NONE) {
+		MV_U8 *pNewDigest;
+		int frag;
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(MV_CESA_INT_COALESCING_SUPPORT) || \
+							     defined(CONFIG_OF)
+		pReq->frags.nextFrag = 1;
+		while (pReq->frags.nextFrag <= pReq->frags.numFrag) {
+#endif
+
+			frag = (pReq->frags.nextFrag - 1);
+
+			/* Restore DMA descriptor list */
+			pReq->dma[frag].pDmaLast->phyNextDescPtr =
+			    MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[frag].pDmaLast[1]));
+			pReq->dma[frag].pDmaLast = NULL;
+
+			/* Special processing for finished fragmented request */
+			if (pReq->frags.nextFrag >= pReq->frags.numFrag) {
+				mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+				/* Fragmented packet is ready */
+				if ((pSA->config & MV_CESA_OPERATION_MASK) !=
+				    (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+					int macDataSize = pReq->pCmd->macLength - pReq->frags.macSize;
+
+					if (macDataSize != 0) {
+						/* Calculate all other blocks by SW */
+						mvCesaFragAuthComplete(chan, pReq, pSA, macDataSize);
+					}
+
+					/* Copy new digest from SRAM to the Destination buffer */
+					pNewDigest = cesaSramVirtPtr[chan]->buf + pReq->frags.newDigestOffset;
+					status = mvCesaCopyToMbuf(pNewDigest, pReq->pCmd->pDst,
+								  pReq->pCmd->digestOffset, pSA->digestSize);
+
+					/* For decryption: Compare new digest value with original one */
+					if ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+					    (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) {
+						if (memcmp(pNewDigest, pReq->frags.orgDigest, pSA->digestSize) != 0) {
+/*
+						mvOsPrintf("Digest error: chan=%d, newDigest=%p, orgDigest=%p, status = 0x%x\n",
+							chan, pNewDigest, pReq->frags.orgDigest, MV_REG_READ(MV_CESA_STATUS_REG));
+*/
+							/* Signiture verification is failed */
+							pResult->retCode = MV_FAIL;
+						}
+					}
+				}
+				readyStatus = MV_OK;
+			}
+#if defined(MV_CESA_CHAIN_MODE) || defined(MV_CESA_INT_COALESCING_SUPPORT) || \
+							     defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if ((mv_cesa_feature == INT_COALESCING) ||
+					(mv_cesa_feature == CHAIN))
+			pReq->frags.nextFrag++;
+		else
+			break;
+#else /* CONFIG_OF */
+			pReq->frags.nextFrag++;
+#endif /* CONFIG_OF */
+		}
+#endif /* MV_CESA_CHAIN_MODE || MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+	} else {
+		mvCesaMbufCacheUnmap(pReq->pCmd->pDst, 0, pReq->pCmd->pDst->mbufSize);
+
+		/* Restore DMA descriptor list */
+		pReq->dma[0].pDmaLast->phyNextDescPtr =
+		    MV_32BIT_LE(mvCesaVirtToPhys(&pReq->dmaDescBuf, &pReq->dma[0].pDmaLast[1]));
+		pReq->dma[0].pDmaLast = NULL;
+		if (((pSA->config & MV_CESA_OPERATION_MASK) !=
+		     (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) &&
+		    ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT))) {
+			/* For AUTH on decode : Check Digest result in Status register */
+			statusReg = MV_REG_READ(MV_CESA_STATUS_REG(chan));
+			if (statusReg & MV_CESA_STATUS_DIGEST_ERR_MASK) {
+/*
+				mvOsPrintf("Digest error: chan=%d, status = 0x%x\n",
+						chan, statusReg);
+*/
+				/* Signiture verification is failed */
+				pResult->retCode = MV_FAIL;
+			}
+		}
+		readyStatus = MV_OK;
+	}
+
+	if (readyStatus == MV_OK) {
+		/* If Request is ready - Prepare pResult structure */
+		pResult->pReqPrv = pReq->pCmd->pReqPrv;
+		pResult->sessionId = pReq->pCmd->sessionId;
+		pResult->mbufSize = pReq->pCmd->pSrc->mbufSize;
+		pResult->reqId = pReq->pCmd->reqId;
+		pReq->state = MV_CESA_IDLE;
+		pCesaReqProcess[chan] = MV_CESA_REQ_NEXT_PTR(chan, pCesaReqProcess[chan]);
+		cesaReqResources[chan]++;
+
+		if (pSA->ctrMode) {
+			/* For AES CTR mode - complete processing and free allocated resources */
+			mvCesaCtrModeComplete(pReq->pOrgCmd, pReq->pCmd);
+			mvCesaCtrModeFinish(pReq->pCmd);
+			pReq->pOrgCmd = NULL;
+		}
+	}
+
+#if defined(MV_CESA_INT_PER_PACKET) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == INT_PER_PACKET) {
+#endif /* CONFIG_OF */
+
+		if (pCesaReqProcess[chan]->state == MV_CESA_PENDING)
+			mvCesaReqProcessStart(chan, pCesaReqProcess[chan]);
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_PER_PACKET || CONFIG_OF */
+
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == INT_COALESCING) {
+#endif /* CONFIG_OF */
+		statusReg = MV_REG_READ(MV_CESA_STATUS_REG(chan));
+		if (!(statusReg & MV_CESA_STATUS_ACTIVE_MASK)) {
+			if (pCesaReqStartNext[chan]->state == MV_CESA_PENDING) {
+				mvCesaReqProcessStart(chan, pCesaReqStartNext[chan]);
+				pCesaReqProcessCurr[chan] = pCesaReqStartNext[chan];
+				pCesaReqStartNext[chan] = MV_CESA_REQ_NEXT_PTR(chan, pCesaReqStartNext[chan]);
+			}
+		}
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+	return readyStatus;
+}
+
+/***************** Functions to work with CESA_MBUF structure ******************/
+
+/*******************************************************************************
+* mvCesaMbufOffset - Locate offset in the Mbuf structure
+*
+* DESCRIPTION:
+*       This function locates offset inside Multi-Bufeer structure.
+*       It get fragment number and place in the fragment where the offset
+*       is located.
+*
+*
+* INPUT:
+*   MV_CESA_MBUF* pMbuf  - Pointer to multi-buffer structure
+*   int           offset - Offset from the beginning of the data presented by
+*                        the Mbuf structure.
+*
+* OUTPUT:
+*   int*        pBufOffset  - Offset from the beginning of the fragment where
+*                           the offset is located.
+*
+* RETURN:
+*       int - Number of fragment, where the offset is located\
+*
+*******************************************************************************/
+int mvCesaMbufOffset(MV_CESA_MBUF *pMbuf, int offset, int *pBufOffset)
+{
+	int frag = 0;
+
+	while (offset > 0) {
+		if (frag >= pMbuf->numFrags) {
+			mvOsPrintf("mvCesaMbufOffset: Error: frag (%d) > numFrags (%d)\n", frag, pMbuf->numFrags);
+			return MV_INVALID;
+		}
+		if (offset < pMbuf->pFrags[frag].bufSize)
+			break;
+
+		offset -= pMbuf->pFrags[frag].bufSize;
+		frag++;
+	}
+	if (pBufOffset != NULL)
+		*pBufOffset = offset;
+
+	return frag;
+}
+
+/*******************************************************************************
+* mvCesaCopyFromMbuf - Copy data from the Mbuf structure to continuous buffer
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*   MV_U8*          pDstBuf  - Pointer to continuous buffer, where data is
+*                              copied to.
+*   MV_CESA_MBUF*   pSrcMbuf - Pointer to multi-buffer structure where data is
+*                              copied from.
+*   int             offset   - Offset in the Mbuf structure where located first
+*                            byte of data should be copied.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+*                         No data is copied.
+*       MV_EMPTY        - Multi-buffer structure has not enough data to copy
+*                       Data from the offset to end of Mbuf data is copied.
+*
+*******************************************************************************/
+MV_STATUS mvCesaCopyFromMbuf(MV_U8 *pDstBuf, MV_CESA_MBUF *pSrcMbuf, int offset, int size)
+{
+	int frag, fragOffset, bufSize;
+	MV_U8 *pBuf;
+
+	if (size == 0)
+		return MV_OK;
+
+	frag = mvCesaMbufOffset(pSrcMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return MV_OUT_OF_RANGE;
+	}
+
+	bufSize = pSrcMbuf->pFrags[frag].bufSize - fragOffset;
+	pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	while (MV_TRUE) {
+		if (size <= bufSize) {
+			memcpy(pDstBuf, pBuf, size);
+			return MV_OK;
+		}
+		memcpy(pDstBuf, pBuf, bufSize);
+		size -= bufSize;
+		frag++;
+		pDstBuf += bufSize;
+		if (frag >= pSrcMbuf->numFrags)
+			break;
+
+		bufSize = pSrcMbuf->pFrags[frag].bufSize;
+		pBuf = pSrcMbuf->pFrags[frag].bufVirtPtr;
+	}
+	mvOsPrintf("mvCesaCopyFromMbuf: Mbuf is EMPTY - %d bytes isn't copied\n", size);
+	return MV_EMPTY;
+}
+
+/*******************************************************************************
+* mvCesaCopyToMbuf - Copy data from continuous buffer to the Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*   MV_U8*          pSrcBuf  - Pointer to continuous buffer, where data is
+*                              copied from.
+*   MV_CESA_MBUF*   pDstMbuf - Pointer to multi-buffer structure where data is
+*                              copied to.
+*   int             offset   - Offset in the Mbuf structure where located first
+*                            byte of data should be copied.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, offset is out of Multi-buffer data range.
+*                         No data is copied.
+*       MV_FULL         - Multi-buffer structure has not enough place to copy
+*                       all data. Data from the offset to end of Mbuf data
+*                       is copied.
+*
+*******************************************************************************/
+MV_STATUS mvCesaCopyToMbuf(MV_U8 *pSrcBuf, MV_CESA_MBUF *pDstMbuf, int offset, int size)
+{
+	int frag, fragOffset, bufSize;
+	MV_U8 *pBuf;
+
+	if (size == 0)
+		return MV_OK;
+
+	frag = mvCesaMbufOffset(pDstMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return MV_OUT_OF_RANGE;
+	}
+
+	bufSize = pDstMbuf->pFrags[frag].bufSize - fragOffset;
+	pBuf = pDstMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	while (MV_TRUE) {
+		if (size <= bufSize) {
+			memcpy(pBuf, pSrcBuf, size);
+			return MV_OK;
+		}
+		memcpy(pBuf, pSrcBuf, bufSize);
+		size -= bufSize;
+		frag++;
+		pSrcBuf += bufSize;
+		if (frag >= pDstMbuf->numFrags)
+			break;
+
+		bufSize = pDstMbuf->pFrags[frag].bufSize;
+		pBuf = pDstMbuf->pFrags[frag].bufVirtPtr;
+	}
+	mvOsPrintf("mvCesaCopyToMbuf: Mbuf is FULL - %d bytes isn't copied\n", size);
+	return MV_FULL;
+}
+
+/*******************************************************************************
+* mvCesaMbufCopy - Copy data from one Mbuf structure to the other Mbuf structure
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*
+*   MV_CESA_MBUF*   pDstMbuf - Pointer to multi-buffer structure where data is
+*                              copied to.
+*   int      dstMbufOffset   - Offset in the dstMbuf structure where first byte
+*                            of data should be copied to.
+*   MV_CESA_MBUF*   pSrcMbuf - Pointer to multi-buffer structure where data is
+*                              copied from.
+*   int      srcMbufOffset   - Offset in the srcMbuf structure where first byte
+*                            of data should be copied from.
+*   int             size     - Size of data should be copied
+*
+* RETURN:
+*       MV_OK           - Success, all data is copied successfully.
+*       MV_OUT_OF_RANGE - Failed, srcMbufOffset or dstMbufOffset is out of
+*                       srcMbuf or dstMbuf structure correspondently.
+*                       No data is copied.
+*       MV_BAD_SIZE     - srcMbuf or dstMbuf structure is too small to copy
+*                       all data. Partial data is copied
+*
+*******************************************************************************/
+MV_STATUS mvCesaMbufCopy(MV_CESA_MBUF *pMbufDst, int dstMbufOffset,
+			 MV_CESA_MBUF *pMbufSrc, int srcMbufOffset, int size)
+{
+	int srcFrag, dstFrag, srcSize, dstSize, srcOffset, dstOffset;
+	int copySize;
+	MV_U8 *pSrc, *pDst;
+
+	if (size == 0)
+		return MV_OK;
+
+	srcFrag = mvCesaMbufOffset(pMbufSrc, srcMbufOffset, &srcOffset);
+	if (srcFrag == MV_INVALID) {
+		mvOsPrintf("CESA srcMbuf Error: offset (%d) out of range\n", srcMbufOffset);
+		return MV_OUT_OF_RANGE;
+	}
+	pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr + srcOffset;
+	srcSize = pMbufSrc->pFrags[srcFrag].bufSize - srcOffset;
+
+	dstFrag = mvCesaMbufOffset(pMbufDst, dstMbufOffset, &dstOffset);
+	if (dstFrag == MV_INVALID) {
+		mvOsPrintf("CESA dstMbuf Error: offset (%d) out of range\n", dstMbufOffset);
+		return MV_OUT_OF_RANGE;
+	}
+	pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr + dstOffset;
+	dstSize = pMbufDst->pFrags[dstFrag].bufSize - dstOffset;
+
+	while (size > 0) {
+		copySize = MV_MIN(srcSize, dstSize);
+		if (size <= copySize) {
+			memcpy(pDst, pSrc, size);
+			return MV_OK;
+		}
+		memcpy(pDst, pSrc, copySize);
+		size -= copySize;
+		srcSize -= copySize;
+		dstSize -= copySize;
+
+		if (srcSize == 0) {
+			srcFrag++;
+			if (srcFrag >= pMbufSrc->numFrags)
+				break;
+
+			pSrc = pMbufSrc->pFrags[srcFrag].bufVirtPtr;
+			srcSize = pMbufSrc->pFrags[srcFrag].bufSize;
+		}
+
+		if (dstSize == 0) {
+			dstFrag++;
+			if (dstFrag >= pMbufDst->numFrags)
+				break;
+
+			pDst = pMbufDst->pFrags[dstFrag].bufVirtPtr;
+			dstSize = pMbufDst->pFrags[dstFrag].bufSize;
+		}
+	}
+	mvOsPrintf("mvCesaMbufCopy: BAD size - %d bytes isn't copied\n", size);
+
+	return MV_BAD_SIZE;
+}
+
+MV_STATUS mvCesaUpdateSADSize(MV_U32 size)
+{
+	MV_CESA_SA **pNewCesaSAD = NULL;
+
+	/*mvOsPrintf("mvCesaUpdateSADSize: Increasing SA Database to %d sessions\n",size); */
+
+	/* Allocate new buffer to hold larger SAD */
+	pNewCesaSAD = mvOsMalloc(sizeof(MV_CESA_SA *) * size);
+	if (pNewCesaSAD == NULL) {
+		mvOsPrintf("mvCesaUpdateSADSize: Can't allocate %d bytes for new SAD buffer\n", size);
+		return MV_FAIL;
+	}
+	memset(pNewCesaSAD, 0, (sizeof(MV_CESA_SA *) * size));
+	mvOsMemcpy(pNewCesaSAD, pCesaSAD, (sizeof(MV_CESA_SA *) * cesaMaxSA));
+	mvOsFree(pCesaSAD);
+	pCesaSAD = pNewCesaSAD;
+	cesaMaxSA = size;
+
+	return MV_OK;
+}
+
+static MV_STATUS mvCesaMbufCacheUnmap(MV_CESA_MBUF *pMbuf, int offset, int size)
+{
+	int frag, fragOffset, bufSize;
+	MV_U8 *pBuf;
+
+	if (size == 0)
+		return MV_OK;
+
+	frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return MV_OUT_OF_RANGE;
+	}
+
+	bufSize = pMbuf->pFrags[frag].bufSize - fragOffset;
+	pBuf = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	while (MV_TRUE) {
+		if (size <= bufSize) {
+			mvOsCacheUnmap(cesaOsHandle, mvOsIoVirtToPhy(cesaOsHandle, pBuf), size);
+			return MV_OK;
+		}
+
+		mvOsCacheUnmap(cesaOsHandle, mvOsIoVirtToPhy(cesaOsHandle, pBuf), bufSize);
+		size -= bufSize;
+		frag++;
+		if (frag >= pMbuf->numFrags)
+			break;
+
+		bufSize = pMbuf->pFrags[frag].bufSize;
+		pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+	}
+	mvOsPrintf("%s: Mbuf is FULL - %d bytes isn't Unmapped\n", __func__, size);
+	return MV_FULL;
+}
+
+/*************************************** Local Functions ******************************/
+
+/*******************************************************************************
+* mvCesaFragReqProcess - Process fragmented request
+*
+* DESCRIPTION:
+*       This function processes a fragment of fragmented request (First, Middle or Last)
+*
+*
+* INPUT:
+*       MV_CESA_REQ* pReq   - Pointer to the request in the request queue.
+*
+* RETURN:
+*       MV_OK        - The fragment is successfully passed to HW for processing.
+*       MV_TERMINATE - Means, that HW finished its work on this packet and no more
+*                    interrupts will be generated for this request.
+*                    Function mvCesaReadyGet() must be called to complete request
+*                    processing and get request result.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragReqProcess(MV_U8 chan, MV_CESA_REQ *pReq, MV_U8 frag)
+{
+	int i, copySize, cryptoDataSize, macDataSize, sid;
+	int cryptoIvOffset, digestOffset;
+	MV_U32 config;
+	MV_CESA_COMMAND *pCmd = pReq->pCmd;
+	MV_CESA_SA *pSA;
+	MV_CESA_MBUF *pMbuf;
+	MV_DMA_DESC *pDmaDesc = pReq->dma[frag].pDmaFirst;
+	MV_U8 *pSramBuf = cesaSramVirtPtr[chan]->buf;
+	int macTotalLen = 0;
+	int fixOffset, cryptoOffset, macOffset;
+
+	cesaStats.fragCount++;
+
+	sid = pReq->pCmd->sessionId;
+
+	pSA = pCesaSAD[sid];
+
+	cryptoIvOffset = digestOffset = 0;
+	i = macDataSize = 0;
+	cryptoDataSize = 0;
+
+	/* First fragment processing */
+	if (pReq->fragMode == MV_CESA_FRAG_FIRST) {
+		/* pReq->frags monitors processing of fragmented request between fragments */
+		pReq->frags.bufOffset = 0;
+		pReq->frags.cryptoSize = 0;
+		pReq->frags.macSize = 0;
+
+		config = pSA->config | (MV_CESA_FRAG_FIRST << MV_CESA_FRAG_MODE_OFFSET);
+
+		/* fixOffset can be not equal to zero only for FIRST fragment */
+		fixOffset = pReq->fixOffset;
+		/* For FIRST fragment crypto and mac offsets are taken from pCmd */
+		cryptoOffset = pCmd->cryptoOffset;
+		macOffset = pCmd->macOffset;
+
+		copySize = sizeof(cesaSramVirtPtr[chan]->buf) - pReq->fixOffset;
+
+		/* Find fragment size: Must meet all requirements for CRYPTO and MAC
+		 * cryptoDataSize   - size of data will be encrypted/decrypted in this fragment
+		 * macDataSize      - size of data will be signed/verified in this fragment
+		 * copySize         - size of data will be copied from srcMbuf to SRAM and
+		 *                  back to dstMbuf for this fragment
+		 */
+		mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset, &copySize, &cryptoDataSize, &macDataSize);
+
+		if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) {
+			/* CryptoIV special processing */
+			if ((pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT)) {
+				/* In CBC mode for encode direction when IV from user */
+				if ((pCmd->ivFromUser) &&
+				    ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+				     (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT))) {
+
+					/* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+					 * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+					 * in the buffer to SRAM IVPointer
+					 */
+					i += mvCesaDmaCopyPrepare(chan, pCmd->pSrc, cesaSramVirtPtr[chan]->cryptoIV,
+									&pDmaDesc[i], MV_FALSE, pCmd->ivOffset,
+									pSA->cryptoIvSize, pCmd->skipFlush);
+				}
+
+				/* Special processing when IV is not located in the first fragment */
+				if (pCmd->ivOffset > (copySize - pSA->cryptoIvSize)) {
+					/* Prepare dummy place for cryptoIV in SRAM */
+					cryptoIvOffset = cesaSramVirtPtr[chan]->tempCryptoIV - mvCesaSramAddrGet(chan);
+
+					/* For Decryption: Copy IV value from pCmd->ivOffset to Special SRAM place */
+					if ((pSA->config & MV_CESA_DIRECTION_MASK) ==
+					    (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) {
+						i += mvCesaDmaCopyPrepare(chan, pCmd->pSrc,
+										cesaSramVirtPtr[chan]->tempCryptoIV,
+										&pDmaDesc[i], MV_FALSE, pCmd->ivOffset,
+										pSA->cryptoIvSize, pCmd->skipFlush);
+					} else {
+						/* For Encryption when IV is NOT from User: */
+						/* Copy IV from SRAM to buffer (pCmd->ivOffset) */
+						if (pCmd->ivFromUser == 0) {
+							/* copy IV value from cryptoIV to Buffer (pCmd->ivOffset) */
+							i += mvCesaDmaCopyPrepare(chan, pCmd->pSrc,
+								cesaSramVirtPtr[chan]->cryptoIV, &pDmaDesc[i],
+								MV_TRUE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+						}
+					}
+				} else {
+					cryptoIvOffset = pCmd->ivOffset;
+				}
+			}
+		}
+
+		if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+			/* MAC digest special processing on Decode direction */
+			if ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_DECODE << MV_CESA_DIRECTION_BIT)) {
+				/* Save digest from pCmd->digestOffset */
+				mvCesaCopyFromMbuf(pReq->frags.orgDigest,
+						   pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+				/* If pCmd->digestOffset is not located on the first */
+				if (pCmd->digestOffset > (copySize - pSA->digestSize)) {
+					MV_U8 digestZero[MV_CESA_MAX_DIGEST_SIZE];
+
+					/* Set zeros to pCmd->digestOffset (DRAM) */
+					memset(digestZero, 0, MV_CESA_MAX_DIGEST_SIZE);
+					mvCesaCopyToMbuf(digestZero, pCmd->pSrc, pCmd->digestOffset, pSA->digestSize);
+
+					/* Prepare dummy place for digest in SRAM */
+					digestOffset = cesaSramVirtPtr[chan]->tempDigest - mvCesaSramAddrGet(chan);
+				} else {
+					digestOffset = pCmd->digestOffset;
+				}
+			}
+		}
+		/* Update SA in SRAM */
+		if (cesaLastSid[chan] != sid) {
+			mvCesaSramSaUpdate(chan, sid, &pDmaDesc[i]);
+			i++;
+		}
+
+		pReq->fragMode = MV_CESA_FRAG_MIDDLE;
+	} else {
+		/* Continue fragment */
+		fixOffset = 0;
+		cryptoOffset = 0;
+		macOffset = 0;
+		if ((pCmd->pSrc->mbufSize - pReq->frags.bufOffset) <= sizeof(cesaSramVirtPtr[chan]->buf)) {
+			/* Last fragment */
+			config = pSA->config | (MV_CESA_FRAG_LAST << MV_CESA_FRAG_MODE_OFFSET);
+			pReq->fragMode = MV_CESA_FRAG_LAST;
+			copySize = pCmd->pSrc->mbufSize - pReq->frags.bufOffset;
+
+			if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+				macDataSize = pCmd->macLength - pReq->frags.macSize;
+
+				/* If pCmd->digestOffset is not located on last fragment */
+				if (pCmd->digestOffset < pReq->frags.bufOffset) {
+					/* Prepare dummy place for digest in SRAM */
+					digestOffset = cesaSramVirtPtr[chan]->tempDigest - mvCesaSramAddrGet(chan);
+				} else {
+					digestOffset = pCmd->digestOffset - pReq->frags.bufOffset;
+				}
+				pReq->frags.newDigestOffset = digestOffset;
+				macTotalLen = pCmd->macLength;
+			}
+
+			if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET))
+				cryptoDataSize = pCmd->cryptoLength - pReq->frags.cryptoSize;
+
+			/* cryptoIvOffset - don't care */
+		} else {
+			/* Middle fragment */
+			config = pSA->config | (MV_CESA_FRAG_MIDDLE << MV_CESA_FRAG_MODE_OFFSET);
+			copySize = sizeof(cesaSramVirtPtr[chan]->buf);
+			/* digestOffset and cryptoIvOffset - don't care */
+
+			/* Find fragment size */
+			mvCesaFragSizeFind(pSA, pReq, cryptoOffset, macOffset,
+					   &copySize, &cryptoDataSize, &macDataSize);
+		}
+	}
+
+	/********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+	pMbuf = pCmd->pSrc;
+	i += mvCesaDmaCopyPrepare(chan, pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+				  MV_FALSE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+	/* Prepare CESA descriptor to copy from DRAM to SRAM by DMA */
+	mvCesaSramDescrBuild(chan, config, frag,
+			     cryptoOffset + fixOffset, cryptoIvOffset + fixOffset,
+			     cryptoDataSize, macOffset + fixOffset,
+			     digestOffset + fixOffset, macDataSize, macTotalLen, pReq, &pDmaDesc[i]);
+	i++;
+
+	/* Add special descriptor Ownership for CPU */
+	pDmaDesc[i].byteCnt = 0;
+	pDmaDesc[i].phySrcAdd = 0;
+	pDmaDesc[i].phyDestAdd = 0;
+	i++;
+
+	/********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+	pMbuf = pCmd->pDst;
+	i += mvCesaDmaCopyPrepare(chan, pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+				  MV_TRUE, pReq->frags.bufOffset, copySize, pCmd->skipFlush);
+
+	/* Next field of Last DMA descriptor must be NULL */
+	pDmaDesc[i - 1].phyNextDescPtr = 0;
+	pReq->dma[frag].pDmaLast = &pDmaDesc[i - 1];
+	mvOsCacheFlush(cesaOsHandle, pReq->dma[frag].pDmaFirst, i * sizeof(MV_DMA_DESC));
+
+	/*mvCesaDebugDescriptor(&cesaSramVirtPtr[chan]->desc[frag]); */
+
+	pReq->frags.bufOffset += copySize;
+	pReq->frags.cryptoSize += cryptoDataSize;
+	pReq->frags.macSize += macDataSize;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaReqProcess - Process regular (Non-fragmented) request
+*
+* DESCRIPTION:
+*       This function processes the whole (not fragmented) request
+*
+* INPUT:
+*       MV_CESA_REQ* pReq   - Pointer to the request in the request queue.
+*
+* RETURN:
+*       MV_OK   - The request is successfully passed to HW for processing.
+*       Other   - Failure. The request will not be processed
+*
+*******************************************************************************/
+static MV_STATUS mvCesaReqProcess(MV_U8 chan, MV_CESA_REQ *pReq)
+{
+	MV_CESA_MBUF *pMbuf;
+	MV_DMA_DESC *pDmaDesc;
+	MV_U8 *pSramBuf;
+	int sid, i, fixOffset;
+	MV_CESA_SA *pSA;
+	MV_CESA_COMMAND *pCmd = pReq->pCmd;
+
+	cesaStats.procCount++;
+
+	sid = pCmd->sessionId;
+	pSA = pCesaSAD[sid];
+	pDmaDesc = pReq->dma[0].pDmaFirst;
+	pSramBuf = cesaSramVirtPtr[chan]->buf;
+	fixOffset = pReq->fixOffset;
+
+/*
+    mvOsPrintf("mvCesaReqProcess: sid=%d, pSA=%p, pDmaDesc=%p, pSramBuf=%p\n",
+			sid, pSA, pDmaDesc, pSramBuf);
+*/
+	i = 0;
+
+	/* Crypto IV Special processing in CBC mode for Encryption direction */
+	if (((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) &&
+	    ((pSA->config & MV_CESA_CRYPTO_MODE_MASK) == (MV_CESA_CRYPTO_CBC << MV_CESA_CRYPTO_MODE_BIT)) &&
+	    ((pSA->config & MV_CESA_DIRECTION_MASK) == (MV_CESA_DIR_ENCODE << MV_CESA_DIRECTION_BIT)) &&
+	    (pCmd->ivFromUser)) {
+		/* For Crypto Encode in CBC mode HW always takes IV from SRAM IVPointer,
+		 * (not from IVBufPointer). So when ivFromUser==1, we should copy IV from user place
+		 * in the buffer to SRAM IVPointer
+		 */
+		i += mvCesaDmaCopyPrepare(chan, pCmd->pSrc, cesaSramVirtPtr[chan]->cryptoIV, &pDmaDesc[i],
+					  MV_FALSE, pCmd->ivOffset, pSA->cryptoIvSize, pCmd->skipFlush);
+	}
+
+	/* Update SA in SRAM */
+	if (cesaLastSid[chan] != sid) {
+		mvCesaSramSaUpdate(chan, sid, &pDmaDesc[i]);
+		i++;
+	}
+
+	/********* Prepare DMA descriptors to copy from pSrc to SRAM *********/
+	pMbuf = pCmd->pSrc;
+	i += mvCesaDmaCopyPrepare(chan, pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+				  MV_FALSE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+	/* Prepare Security Accelerator descriptor to SRAM words 0 - 7 */
+	mvCesaSramDescrBuild(chan, pSA->config, 0, pCmd->cryptoOffset + fixOffset,
+			     pCmd->ivOffset + fixOffset, pCmd->cryptoLength,
+			     pCmd->macOffset + fixOffset, pCmd->digestOffset + fixOffset,
+			     pCmd->macLength, pCmd->macLength, pReq, &pDmaDesc[i]);
+	i++;
+
+	/* Add special descriptor Ownership for CPU */
+	pDmaDesc[i].byteCnt = 0;
+	pDmaDesc[i].phySrcAdd = 0;
+	pDmaDesc[i].phyDestAdd = 0;
+	i++;
+
+	/********* Prepare DMA descriptors to copy from SRAM to pDst *********/
+	pMbuf = pCmd->pDst;
+	i += mvCesaDmaCopyPrepare(chan, pMbuf, pSramBuf + fixOffset, &pDmaDesc[i],
+				  MV_TRUE, 0, pMbuf->mbufSize, pCmd->skipFlush);
+
+	/* Next field of Last DMA descriptor must be NULL */
+	pDmaDesc[i - 1].phyNextDescPtr = 0;
+	pReq->dma[0].pDmaLast = &pDmaDesc[i - 1];
+	mvOsCacheFlush(cesaOsHandle, pReq->dma[0].pDmaFirst, i * sizeof(MV_DMA_DESC));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaSramDescrBuild - Set CESA descriptor in SRAM
+*
+* DESCRIPTION:
+*       This function builds CESA descriptor in SRAM from all Command parameters
+*
+*
+* INPUT:
+*       int     chan            - CESA channel uses the descriptor
+*       MV_U32  config          - 32 bits of WORD_0 in CESA descriptor structure
+*       int     cryptoOffset    - Offset from the beginning of SRAM buffer where
+*                               data for encryption/decription is started.
+*       int     ivOffset        - Offset of crypto IV from the SRAM base. Valid only
+*                               for first fragment.
+*       int     cryptoLength    - Size (in bytes) of data for encryption/descryption
+*                               operation on this fragment.
+*       int     macOffset       - Offset from the beginning of SRAM buffer where
+*                               data for Authentication is started
+*       int     digestOffset    - Offset from the beginning of SRAM buffer where
+*                               digest is located. Valid for first and last fragments.
+*       int     macLength       - Size (in bytes) of data for Authentication
+*                               operation on this fragment.
+*       int     macTotalLen     - Toatl size (in bytes) of data for Authentication
+*                               operation on the whole request (packet). Valid for
+*                               last fragment only.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void mvCesaSramDescrBuild(MV_U8 chan, MV_U32 config, int frag,
+				 int cryptoOffset, int ivOffset, int cryptoLength,
+				 int macOffset, int digestOffset, int macLength,
+				 int macTotalLen, MV_CESA_REQ *pReq, MV_DMA_DESC *pDmaDesc)
+{
+	MV_CESA_DESC *pCesaDesc = &pReq->pCesaDesc[frag];
+	MV_CESA_DESC *pSramDesc = &cesaSramVirtPtr[chan]->desc;
+	MV_U16 sramBufOffset = (MV_U16)((MV_U8 *)cesaSramVirtPtr[chan]->buf - mvCesaSramAddrGet(chan));
+
+	pCesaDesc->config = MV_32BIT_LE(config);
+
+	if ((config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) {
+		/* word 1 */
+		pCesaDesc->cryptoSrcOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+		pCesaDesc->cryptoDstOffset = MV_16BIT_LE(sramBufOffset + cryptoOffset);
+		/* word 2 */
+		pCesaDesc->cryptoDataLen = MV_16BIT_LE(cryptoLength);
+		/* word 3 */
+		pCesaDesc->cryptoKeyOffset = MV_16BIT_LE((MV_U16) (cesaSramVirtPtr[chan]->sramSA.cryptoKey -
+								   mvCesaSramAddrGet(chan)));
+		/* word 4 */
+		pCesaDesc->cryptoIvOffset = MV_16BIT_LE((MV_U16) (cesaSramVirtPtr[chan]->cryptoIV - mvCesaSramAddrGet(chan)));
+		pCesaDesc->cryptoIvBufOffset = MV_16BIT_LE(sramBufOffset + ivOffset);
+	}
+
+	if ((config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+		/* word 5 */
+		pCesaDesc->macSrcOffset = MV_16BIT_LE(sramBufOffset + macOffset);
+		pCesaDesc->macTotalLen = MV_16BIT_LE(macTotalLen);
+
+		/* word 6 */
+		pCesaDesc->macDigestOffset = MV_16BIT_LE(sramBufOffset + digestOffset);
+		pCesaDesc->macDataLen = MV_16BIT_LE(macLength);
+
+		/* word 7 */
+		pCesaDesc->macInnerIvOffset = MV_16BIT_LE((MV_U16) (cesaSramVirtPtr[chan]->sramSA.macInnerIV -
+								    mvCesaSramAddrGet(chan)));
+		pCesaDesc->macOuterIvOffset = MV_16BIT_LE((MV_U16) (cesaSramVirtPtr[chan]->sramSA.macOuterIV -
+								    mvCesaSramAddrGet(chan)));
+	}
+	/* Prepare DMA descriptor to CESA descriptor from DRAM to SRAM */
+	pDmaDesc->phySrcAdd = MV_32BIT_LE(mvCesaVirtToPhys(&pReq->cesaDescBuf, pCesaDesc));
+	pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(chan, NULL, (MV_U8 *) pSramDesc));
+	pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_DESC) | BIT31);
+
+	/* flush Source buffer */
+	mvOsCacheFlush(cesaOsHandle, pCesaDesc, sizeof(MV_CESA_DESC));
+}
+
+/*******************************************************************************
+* mvCesaSramSaUpdate - Move required SA information to SRAM if needed.
+*
+* DESCRIPTION:
+*   Copy to SRAM values of the required SA.
+*
+*
+* INPUT:
+*       short       sid          - Session ID needs SRAM Cache update
+*       MV_DMA_DESC *pDmaDesc   - Pointer to DMA descriptor used to
+*                                copy SA values from DRAM to SRAM.
+*
+* RETURN:
+*       MV_OK           - Cache entry for this SA copied to SRAM.
+*       MV_NO_CHANGE    - Cache entry for this SA already exist in SRAM
+*
+*******************************************************************************/
+static INLINE void mvCesaSramSaUpdate(MV_U8 chan, short sid, MV_DMA_DESC *pDmaDesc)
+{
+	MV_CESA_SA *pSA = pCesaSAD[sid];
+
+	/* Prepare DMA descriptor to Copy CACHE_SA from SA database in DRAM to SRAM */
+	pDmaDesc->byteCnt = MV_32BIT_LE(sizeof(MV_CESA_SRAM_SA) | BIT31);
+	pDmaDesc->phySrcAdd = pSA->sramSAPhysAddr;
+	pDmaDesc->phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(chan, NULL, (MV_U8 *)&cesaSramVirtPtr[chan]->sramSA));
+
+	/* Source buffer is already flushed during OpenSession */
+	/*mvOsCacheFlush(cesaOsHandle, &pSA->sramSA, sizeof(MV_CESA_SRAM_SA)); */
+}
+
+/*******************************************************************************
+* mvCesaDmaCopyPrepare - prepare DMA descriptor list to copy data presented by
+*                       Mbuf structure from DRAM to SRAM
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf       - pointer to Mbuf structure contains request
+*                                   data in DRAM
+*       MV_U8*          pSramBuf    - pointer to buffer in SRAM where data should
+*                                   be copied to.
+*       MV_DMA_DESC*    pDmaDesc   - pointer to first DMA descriptor for this copy.
+*                                   The function set number of DMA descriptors needed
+*                                   to copy the copySize bytes from Mbuf.
+*       MV_BOOL         isToMbuf    - Copy direction.
+*                                   MV_TRUE means copy from SRAM buffer to Mbuf in DRAM.
+*                                   MV_FALSE means copy from Mbuf in DRAM to SRAM buffer.
+*       int             offset      - Offset in the Mbuf structure that copy should be
+*                                   started from.
+*       int             copySize    - Size of data should be copied.
+*
+* RETURN:
+*       int  - number of DMA descriptors used for the copy.
+*
+*******************************************************************************/
+#ifndef MV_NETBSD
+static INLINE int mvCesaDmaCopyPrepare(MV_U8 chan, MV_CESA_MBUF *pMbuf, MV_U8 *pSramBuf,
+				       MV_DMA_DESC *pDmaDesc, MV_BOOL isToMbuf,
+				       int offset, int copySize, MV_BOOL skipFlush)
+{
+	int bufOffset, bufSize, size, frag, i;
+	MV_U8 *pBuf;
+
+	i = 0;
+
+	/* Calculate start place for copy: fragment number and offset in the fragment */
+	frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+	bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+	pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+
+	/* Size accumulate total copy size */
+	size = 0;
+
+	/* Create DMA lists to copy mBuf from pSrc to SRAM */
+	while (size < copySize) {
+		/* Find copy size for each DMA descriptor */
+		bufSize = MV_MIN(bufSize, (copySize - size));
+		pDmaDesc[i].byteCnt = MV_32BIT_LE(bufSize | BIT31);
+		if (isToMbuf) {
+			pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvOsIoVirtToPhy(cesaOsHandle, pBuf));
+			pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(chan, NULL, (pSramBuf + size)));
+			/* invalidate the buffer */
+			if (skipFlush == MV_FALSE)
+				mvOsCacheInvalidate(cesaOsHandle, pBuf, bufSize);
+		} else {
+			pDmaDesc[i].phySrcAdd = MV_32BIT_LE(mvOsIoVirtToPhy(cesaOsHandle, pBuf));
+			pDmaDesc[i].phyDestAdd = MV_32BIT_LE(mvCesaSramVirtToPhys(chan, NULL, (pSramBuf + size)));
+			/* flush the buffer */
+			if (skipFlush == MV_FALSE)
+				mvOsCacheFlush(cesaOsHandle, pBuf, bufSize);
+		}
+
+		/* Count number of used DMA descriptors */
+		i++;
+		size += bufSize;
+
+		/* go to next fragment in the Mbuf */
+		frag++;
+		pBuf = pMbuf->pFrags[frag].bufVirtPtr;
+		bufSize = pMbuf->pFrags[frag].bufSize;
+	}
+	return i;
+}
+#else /* MV_NETBSD */
+static int mvCesaDmaCopyPrepare(MV_U8 chan, MV_CESA_MBUF *pMbuf, MV_U8 *pSramBuf,
+				MV_DMA_DESC *pDmaDesc, MV_BOOL isToMbuf, int offset, int copySize, MV_BOOL skipFlush)
+{
+	int bufOffset, bufSize, thisSize, size, frag, i;
+	MV_ULONG bufPhys, sramPhys;
+	MV_U8 *pBuf;
+
+	/*
+	 * Calculate start place for copy: fragment number and offset in
+	 * the fragment
+	 */
+	frag = mvCesaMbufOffset(pMbuf, offset, &bufOffset);
+
+	/*
+	 * Get SRAM physical address only once. We can update it in-place
+	 * as we build the descriptor chain.
+	 */
+	sramPhys = mvCesaSramVirtToPhys(chan, NULL, pSramBuf);
+
+	/*
+	 * 'size' accumulates total copy size, 'i' counts desccriptors.
+	 */
+	size = i = 0;
+
+	/* Create DMA lists to copy mBuf from pSrc to SRAM */
+	while (size < copySize) {
+		/*
+		 * Calculate # of bytes to copy from the current fragment,
+		 * and the pointer to the start of data
+		 */
+		bufSize = pMbuf->pFrags[frag].bufSize - bufOffset;
+		pBuf = pMbuf->pFrags[frag].bufVirtPtr + bufOffset;
+		bufOffset = 0;	/* First frag may be non-zero */
+		frag++;
+
+		/*
+		 * As long as there is data in the current fragment...
+		 */
+		while (bufSize > 0) {
+			/*
+			 * Ensure we don't cross an MMU page boundary.
+			 * XXX: This is NetBSD-specific, but it is a
+			 * quick and dirty way to fix the problem.
+			 * A true HAL would rely on the OS-specific
+			 * driver to do this...
+			 */
+			thisSize = PAGE_SIZE - (((MV_ULONG) pBuf) & (PAGE_SIZE - 1));
+			thisSize = MV_MIN(bufSize, thisSize);
+			/*
+			 * Make sure we don't copy more than requested
+			 */
+			if (thisSize > (copySize - size)) {
+				thisSize = copySize - size;
+				bufSize = 0;
+			}
+
+			/*
+			 * Physicall address of this fragment
+			 */
+			bufPhys = MV_32BIT_LE(mvOsIoVirtToPhy(cesaOsHandle, pBuf));
+
+			/*
+			 * Set up the descriptor
+			 */
+			pDmaDesc[i].byteCnt = MV_32BIT_LE(thisSize | BIT31);
+			if (isToMbuf) {
+				pDmaDesc[i].phyDestAdd = bufPhys;
+				pDmaDesc[i].phySrcAdd = MV_32BIT_LE(sramPhys);
+				/* invalidate the buffer */
+				if (skipFlush == MV_FALSE)
+					mvOsCacheInvalidate(cesaOsHandle, pBuf, thisSize);
+			} else {
+				pDmaDesc[i].phySrcAdd = bufPhys;
+				pDmaDesc[i].phyDestAdd = MV_32BIT_LE(sramPhys);
+				/* flush the buffer */
+				if (skipFlush == MV_FALSE)
+					mvOsCacheFlush(cesaOsHandle, pBuf, thisSize);
+			}
+
+			pDmaDesc[i].phyNextDescPtr = MV_32BIT_LE(mvOsIoVirtToPhy(cesaOsHandle, (&pDmaDesc[i + 1])));
+
+			/* flush the DMA desc */
+			mvOsCacheFlush(cesaOsHandle, &pDmaDesc[i], sizeof(MV_DMA_DESC));
+
+			/* Update state */
+			bufSize -= thisSize;
+			sramPhys += thisSize;
+			pBuf += thisSize;
+			size += thisSize;
+			i++;
+		}
+	}
+
+	return i;
+}
+#endif /* MV_NETBSD */
+/*******************************************************************************
+* mvCesaHmacIvGet - Calculate Inner and Outter values from HMAC key
+*
+* DESCRIPTION:
+*       This function calculate Inner and Outer values used for HMAC algorithm.
+*       This operation allows improve performance fro the whole HMAC processing.
+*
+* INPUT:
+*       MV_CESA_MAC_MODE    macMode     - Authentication mode: HMAC_MD5, HMAC_SHA1 or HMAC_SHA2.
+*       unsigned char       key[]       - Pointer to HMAC key.
+*       int                 keyLength   - Size of HMAC key (maximum 64 bytes)
+*
+* OUTPUT:
+*       unsigned char       innerIV[]   - HASH(key^inner)
+*       unsigned char       outerIV[]   - HASH(key^outter)
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void mvCesaHmacIvGet(MV_CESA_MAC_MODE macMode, unsigned char key[], int keyLength,
+			    unsigned char innerIV[], unsigned char outerIV[])
+{
+	unsigned char inner[MV_CESA_MAX_MAC_KEY_LENGTH];
+	unsigned char outer[MV_CESA_MAX_MAC_KEY_LENGTH];
+	int i, digestSize = 0;
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+	MV_U32 swapped32, val32, *pVal32;
+#endif
+	for (i = 0; i < keyLength; i++) {
+		inner[i] = 0x36 ^ key[i];
+		outer[i] = 0x5c ^ key[i];
+	}
+
+	for (i = keyLength; i < MV_CESA_MAX_MAC_KEY_LENGTH; i++) {
+		inner[i] = 0x36;
+		outer[i] = 0x5c;
+	}
+	if (macMode == MV_CESA_MAC_HMAC_MD5) {
+		MV_MD5_CONTEXT ctx;
+
+		mvMD5Init(&ctx);
+		mvMD5Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+
+		memcpy(innerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+
+		mvMD5Init(&ctx);
+		mvMD5Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+		memcpy(outerIV, ctx.buf, MV_CESA_MD5_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+		digestSize = MV_CESA_MD5_DIGEST_SIZE;
+	} else if (macMode == MV_CESA_MAC_HMAC_SHA1) {
+		MV_SHA1_CTX ctx;
+
+		mvSHA1Init(&ctx);
+		mvSHA1Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+		memcpy(innerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+
+		mvSHA1Init(&ctx);
+		mvSHA1Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+		memcpy(outerIV, ctx.state, MV_CESA_SHA1_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+		digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+	} else if (macMode == MV_CESA_MAC_HMAC_SHA2) {
+		sha256_context ctx;
+
+		mvSHA256Init(&ctx);
+		mvSHA256Update(&ctx, inner, MV_CESA_MAX_MAC_KEY_LENGTH);
+		memcpy(innerIV, ctx.state, MV_CESA_SHA2_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+
+		mvSHA256Init(&ctx);
+		mvSHA256Update(&ctx, outer, MV_CESA_MAX_MAC_KEY_LENGTH);
+		memcpy(outerIV, ctx.state, MV_CESA_SHA2_DIGEST_SIZE);
+		memset(&ctx, 0, sizeof(ctx));
+		digestSize = MV_CESA_SHA2_DIGEST_SIZE;
+	} else {
+		mvOsPrintf("hmacGetIV: Unexpected macMode %d\n", macMode);
+	}
+#if defined(MV_CPU_LE) || defined(MV_PPC)
+	/* 32 bits Swap of Inner and Outer values */
+	pVal32 = (MV_U32 *) innerIV;
+	for (i = 0; i < digestSize / 4; i++) {
+		val32 = *pVal32;
+		swapped32 = MV_BYTE_SWAP_32BIT(val32);
+		*pVal32 = swapped32;
+		pVal32++;
+	}
+	pVal32 = (MV_U32 *) outerIV;
+	for (i = 0; i < digestSize / 4; i++) {
+		val32 = *pVal32;
+		swapped32 = MV_BYTE_SWAP_32BIT(val32);
+		*pVal32 = swapped32;
+		pVal32++;
+	}
+#endif /* defined(MV_CPU_LE) || defined(MV_PPC) */
+}
+
+/*******************************************************************************
+* mvCesaFragSha1Complete - Complete SHA1 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf           - Pointer to Mbuf structure where data
+*                                       for SHA1 is placed.
+*       int             offset          - Offset in the Mbuf structure where
+*                                       unprocessed data for SHA1 is started.
+*       MV_U8*          pOuterIV        - Pointer to OUTER for this session.
+*                                       If pOuterIV==NULL - MAC mode is HASH_SHA1
+*                                       If pOuterIV!=NULL - MAC mode is HMAC_SHA1
+*       int             macLeftSize     - Size of unprocessed data for SHA1.
+*       int             macTotalSize    - Total size of data for SHA1 in the
+*                                       request (processed + unprocessed)
+*
+* OUTPUT:
+*       MV_U8*     pDigest  - Pointer to place where calculated Digest will
+*                           be stored.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void mvCesaFragSha1Complete(MV_U8 chan, MV_CESA_MBUF *pMbuf, int offset,
+				   MV_U8 *pOuterIV, int macLeftSize, int macTotalSize, MV_U8 *pDigest)
+{
+	MV_SHA1_CTX ctx;
+	MV_U8 *pData;
+	int i, frag, fragOffset, size;
+
+	/* Read temporary Digest from HW */
+	for (i = 0; i < MV_CESA_SHA1_DIGEST_SIZE / 4; i++)
+		ctx.state[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(chan, i));
+
+	/* Initialize MV_SHA1_CTX structure */
+	memset(ctx.buffer, 0, 64);
+	/* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+	/* so count[1] is always 0 */
+	ctx.count[0] = ((macTotalSize - macLeftSize) * 8);
+	ctx.count[1] = 0;
+
+	/* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+	if (pOuterIV != NULL)
+		ctx.count[0] += (64 * 8);
+
+	/* Get place of unprocessed data in the Mbuf structure */
+	frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return;
+	}
+
+	pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+	/* Complete Inner part */
+	while (macLeftSize > 0) {
+		if (macLeftSize <= size) {
+			mvSHA1Update(&ctx, pData, macLeftSize);
+			break;
+		}
+		mvSHA1Update(&ctx, pData, size);
+		macLeftSize -= size;
+		frag++;
+		pData = pMbuf->pFrags[frag].bufVirtPtr;
+		size = pMbuf->pFrags[frag].bufSize;
+	}
+	mvSHA1Final(pDigest, &ctx);
+/*
+    mvOsPrintf("mvCesaFragSha1Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+			pOuterIV, macLeftSize, macTotalSize);
+	mvDebugMemDump(pDigest, MV_CESA_SHA1_DIGEST_SIZE, 1);
+*/
+
+	if (pOuterIV != NULL) {
+		/* If HMAC - Complete Outer part */
+		for (i = 0; i < MV_CESA_SHA1_DIGEST_SIZE / 4; i++) {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+			ctx.state[i] = MV_BYTE_SWAP_32BIT(((MV_U32 *) pOuterIV)[i]);
+#else
+			ctx.state[i] = ((MV_U32 *) pOuterIV)[i];
+#endif
+		}
+		memset(ctx.buffer, 0, 64);
+
+		ctx.count[0] = 64 * 8;
+		ctx.count[1] = 0;
+		mvSHA1Update(&ctx, pDigest, MV_CESA_SHA1_DIGEST_SIZE);
+		mvSHA1Final(pDigest, &ctx);
+	}
+}
+
+/*******************************************************************************
+* mvCesaFragMd5Complete - Complete MD5 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf           - Pointer to Mbuf structure where data
+*                                       for SHA1 is placed.
+*       int             offset          - Offset in the Mbuf structure where
+*                                       unprocessed data for MD5 is started.
+*       MV_U8*          pOuterIV        - Pointer to OUTER for this session.
+*                                       If pOuterIV==NULL - MAC mode is HASH_MD5
+*                                       If pOuterIV!=NULL - MAC mode is HMAC_MD5
+*       int             macLeftSize     - Size of unprocessed data for MD5.
+*       int             macTotalSize    - Total size of data for MD5 in the
+*                                       request (processed + unprocessed)
+*
+* OUTPUT:
+*       MV_U8*     pDigest  - Pointer to place where calculated Digest will
+*                           be stored.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void mvCesaFragMd5Complete(MV_U8 chan, MV_CESA_MBUF *pMbuf, int offset,
+				  MV_U8 *pOuterIV, int macLeftSize, int macTotalSize, MV_U8 *pDigest)
+{
+	MV_MD5_CONTEXT ctx;
+	MV_U8 *pData;
+	int i, frag, fragOffset, size;
+
+	/* Read temporary Digest from HW */
+	for (i = 0; i < MV_CESA_MD5_DIGEST_SIZE / 4; i++)
+		ctx.buf[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(chan, i));
+
+	memset(ctx.in, 0, 64);
+
+	/* Set count[0] in bits. 32 bits is enough for 512 MBytes */
+	/* so count[1] is always 0 */
+	ctx.bits[0] = ((macTotalSize - macLeftSize) * 8);
+	ctx.bits[1] = 0;
+
+	/* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+	if (pOuterIV != NULL)
+		ctx.bits[0] += (64 * 8);
+
+	frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return;
+	}
+
+	pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+	/* Complete Inner part */
+	while (macLeftSize > 0) {
+		if (macLeftSize <= size) {
+			mvMD5Update(&ctx, pData, macLeftSize);
+			break;
+		}
+		mvMD5Update(&ctx, pData, size);
+		macLeftSize -= size;
+		frag++;
+		pData = pMbuf->pFrags[frag].bufVirtPtr;
+		size = pMbuf->pFrags[frag].bufSize;
+	}
+	mvMD5Final(pDigest, &ctx);
+
+/*
+    mvOsPrintf("mvCesaFragMd5Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+				pOuterIV, macLeftSize, macTotalSize);
+    mvDebugMemDump(pDigest, MV_CESA_MD5_DIGEST_SIZE, 1);
+*/
+	if (pOuterIV != NULL) {
+		/* Complete Outer part */
+		for (i = 0; i < MV_CESA_MD5_DIGEST_SIZE / 4; i++) {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+			ctx.buf[i] = MV_BYTE_SWAP_32BIT(((MV_U32 *) pOuterIV)[i]);
+#else
+			ctx.buf[i] = ((MV_U32 *) pOuterIV)[i];
+#endif
+		}
+		memset(ctx.in, 0, 64);
+
+		ctx.bits[0] = 64 * 8;
+		ctx.bits[1] = 0;
+		mvMD5Update(&ctx, pDigest, MV_CESA_MD5_DIGEST_SIZE);
+		mvMD5Final(pDigest, &ctx);
+	}
+}
+
+/*******************************************************************************
+* mvCesaFragSha2Complete - Complete SHA2 authentication started by HW using SW
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_MBUF*   pMbuf           - Pointer to Mbuf structure where data
+*                                       for SHA2 is placed.
+*       int             offset          - Offset in the Mbuf structure where
+*                                       unprocessed data for SHA2 is started.
+*       MV_U8*          pOuterIV        - Pointer to OUTER for this session.
+*                                       If pOuterIV==NULL - MAC mode is HASH_SHA2
+*                                       If pOuterIV!=NULL - MAC mode is HMAC_SHA2
+*       int             macLeftSize     - Size of unprocessed data for SHA2.
+*       int             macTotalSize    - Total size of data for SHA2 in the
+*                                       request (processed + unprocessed)
+*
+* OUTPUT:
+*       MV_U8*     pDigest  - Pointer to place where calculated Digest will
+*                           be stored.
+*
+* RETURN:   None
+*
+*******************************************************************************/
+static void mvCesaFragSha2Complete(MV_U8 chan, MV_CESA_MBUF *pMbuf, int offset,
+				   MV_U8 *pOuterIV, int macLeftSize, int macTotalSize, MV_U8 *pDigest)
+{
+	sha256_context ctx;
+	MV_U8 *pData;
+	int i, frag, fragOffset, size;
+
+	/* Read temporary Digest from HW */
+	for (i = 0; i < MV_CESA_SHA2_DIGEST_SIZE / 4; i++)
+		ctx.state[i] = MV_REG_READ(MV_CESA_AUTH_INIT_VAL_DIGEST_REG(chan, i));
+
+	/* Initialize sha256_context structure */
+	memset(ctx.buffer, 0, 64);
+	/* Set total[0] in bits. 32 bits is enough for 512 MBytes */
+	/* so total[1] is always 0 */
+	ctx.total[0] = ((macTotalSize - macLeftSize) * 8);
+	ctx.total[1] = 0;
+
+	/* If HMAC - add size of Inner block (64 bytes) ro count[0] */
+	if (pOuterIV != NULL)
+		ctx.total[0] += (64 * 8);
+
+	/* Get place of unprocessed data in the Mbuf structure */
+	frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return;
+	}
+
+	pData = pMbuf->pFrags[frag].bufVirtPtr + fragOffset;
+	size = pMbuf->pFrags[frag].bufSize - fragOffset;
+
+	/* Complete Inner part */
+	while (macLeftSize > 0) {
+		if (macLeftSize <= size) {
+			mvSHA256Update(&ctx, pData, macLeftSize);
+			break;
+		}
+		mvSHA256Update(&ctx, pData, size);
+		macLeftSize -= size;
+		frag++;
+		pData = pMbuf->pFrags[frag].bufVirtPtr;
+		size = pMbuf->pFrags[frag].bufSize;
+	}
+	mvSHA256Finish(&ctx, pDigest);
+/*
+    mvOsPrintf("mvCesaFragSha2Complete: pOuterIV=%p, macLeftSize=%d, macTotalSize=%d\n",
+			pOuterIV, macLeftSize, macTotalSize);
+	mvDebugMemDump(pDigest, MV_CESA_SHA2_DIGEST_SIZE, 1);
+*/
+
+	if (pOuterIV != NULL) {
+		/* If HMAC - Complete Outer part */
+		for (i = 0; i < MV_CESA_SHA2_DIGEST_SIZE / 4; i++) {
+#if defined(MV_CPU_LE) || defined(MV_ARM)
+			ctx.state[i] = MV_BYTE_SWAP_32BIT(((MV_U32 *) pOuterIV)[i]);
+#else
+			ctx.state[i] = ((MV_U32 *) pOuterIV)[i];
+#endif
+		}
+		memset(ctx.buffer, 0, 64);
+
+		ctx.total[0] = 64 * 8;
+		ctx.total[1] = 0;
+		mvSHA256Update(&ctx, pDigest, MV_CESA_SHA2_DIGEST_SIZE);
+		mvSHA256Finish(&ctx, pDigest);
+	}
+}
+
+
+/*******************************************************************************
+* mvCesaFragAuthComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+* 	MB_U8		chan,
+*       MV_CESA_REQ*    pReq,
+*       MV_CESA_SA*     pSA,
+*       int             macDataSize
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragAuthComplete(MV_U8 chan, MV_CESA_REQ *pReq, MV_CESA_SA *pSA, int macDataSize)
+{
+	MV_CESA_COMMAND *pCmd = pReq->pCmd;
+	MV_U8 *pDigest;
+	MV_CESA_MAC_MODE macMode;
+	MV_U8 *pOuterIV = NULL;
+
+	/* Copy data from Source fragment to Destination */
+	if (pCmd->pSrc != pCmd->pDst)
+		mvCesaMbufCopy(pCmd->pDst, pReq->frags.bufOffset, pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+
+/*
+    mvCesaCopyFromMbuf(cesaSramVirtPtr[chan]->buf[0], pCmd->pSrc, pReq->frags.bufOffset, macDataSize);
+    mvCesaCopyToMbuf(cesaSramVirtPtr[chan]->buf[0], pCmd->pDst, pReq->frags.bufOffset, macDataSize);
+*/
+	pDigest = (mvCesaSramAddrGet(chan) + pReq->frags.newDigestOffset);
+
+	macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+/*
+    mvOsPrintf("macDataSize=%d, macLength=%d, digestOffset=%d, macMode=%d\n",
+		macDataSize, pCmd->macLength, pCmd->digestOffset, macMode);
+*/
+	switch (macMode) {
+	case MV_CESA_MAC_HMAC_MD5:
+		pOuterIV = pSA->pSramSA->macOuterIV;
+		/* fallthrough */
+
+	case MV_CESA_MAC_MD5:
+		mvCesaFragMd5Complete(chan, pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+				      macDataSize, pCmd->macLength, pDigest);
+		break;
+
+	case MV_CESA_MAC_HMAC_SHA1:
+		pOuterIV = pSA->pSramSA->macOuterIV;
+		/* fallthrough */
+
+	case MV_CESA_MAC_SHA1:
+		mvCesaFragSha1Complete(chan, pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+				       macDataSize, pCmd->macLength, pDigest);
+		break;
+
+	case MV_CESA_MAC_HMAC_SHA2:
+		pOuterIV = pSA->pSramSA->macOuterIV;
+		/* fallthrough */
+
+	case MV_CESA_MAC_SHA2:
+		mvCesaFragSha2Complete(chan, pCmd->pDst, pReq->frags.bufOffset, pOuterIV,
+				       macDataSize, pCmd->macLength, pDigest);
+		break;
+
+	default:
+		mvOsPrintf("mvCesaFragAuthComplete: Unexpected macMode %d\n", macMode);
+		return MV_BAD_PARAM;
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeInit -
+*
+* DESCRIPTION:
+*
+*
+* INPUT: NONE
+*
+*
+* RETURN:
+*       MV_CESA_COMMAND*
+*
+*******************************************************************************/
+static MV_CESA_COMMAND *mvCesaCtrModeInit(void)
+{
+	MV_CESA_MBUF *pMbuf;
+	MV_U8 *pBuf;
+	MV_CESA_COMMAND *pCmd;
+
+	pBuf = mvOsMalloc(sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO) + 100);
+	if (pBuf == NULL) {
+		mvOsPrintf("mvCesaCtrModeInit: Can't allocate %u bytes for CTR Mode\n",
+			   sizeof(MV_CESA_COMMAND) + sizeof(MV_CESA_MBUF) + sizeof(MV_BUF_INFO));
+		return NULL;
+	}
+	pCmd = (MV_CESA_COMMAND *)pBuf;
+	pBuf += sizeof(MV_CESA_COMMAND);
+
+	pMbuf = (MV_CESA_MBUF *)pBuf;
+	pBuf += sizeof(MV_CESA_MBUF);
+
+	pMbuf->pFrags = (MV_BUF_INFO *)pBuf;
+
+	pMbuf->numFrags = 1;
+	pCmd->pSrc = pMbuf;
+	pCmd->pDst = pMbuf;
+/*
+    mvOsPrintf("CtrModeInit: pCmd=%p, pSrc=%p, pDst=%p, pFrags=%p\n", pCmd, pCmd->pSrc, pCmd->pDst,
+			pMbuf->pFrags);
+*/
+	return pCmd;
+}
+
+/*******************************************************************************
+* mvCesaCtrModePrepare -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaCtrModePrepare(MV_CESA_COMMAND *pCtrModeCmd, MV_CESA_COMMAND *pCmd)
+{
+	MV_CESA_MBUF *pMbuf;
+	MV_U8 *pBuf, *pIV;
+	MV_U32 counter, *pCounter;
+	int cryptoSize = MV_ALIGN_UP(pCmd->cryptoLength, MV_CESA_AES_BLOCK_SIZE);
+/*
+    mvOsPrintf("CtrModePrepare: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+			pCmd, pCmd->pSrc, pCmd->pDst,
+			pCtrModeCmd, pCtrModeCmd->pSrc, pCtrModeCmd->pDst);
+*/
+	pMbuf = pCtrModeCmd->pSrc;
+
+	/* Allocate buffer for Key stream */
+	pBuf = mvOsIoCachedMalloc(cesaOsHandle, cryptoSize, &pMbuf->pFrags[0].bufPhysAddr, &pMbuf->pFrags[0].memHandle);
+	if (pBuf == NULL) {
+		mvOsPrintf("mvCesaCtrModePrepare: Can't allocate %d bytes\n", cryptoSize);
+		return MV_OUT_OF_CPU_MEM;
+	}
+	memset(pBuf, 0, cryptoSize);
+	mvOsCacheFlush(cesaOsHandle, pBuf, cryptoSize);
+
+	pMbuf->pFrags[0].bufVirtPtr = pBuf;
+	pMbuf->mbufSize = cryptoSize;
+	pMbuf->pFrags[0].bufSize = cryptoSize;
+
+	pCtrModeCmd->pReqPrv = pCmd->pReqPrv;
+	pCtrModeCmd->sessionId = pCmd->sessionId;
+
+	/* ivFromUser and ivOffset are don't care */
+	pCtrModeCmd->cryptoOffset = 0;
+	pCtrModeCmd->cryptoLength = cryptoSize;
+
+	/* digestOffset, macOffset and macLength are don't care */
+
+	mvCesaCopyFromMbuf(pBuf, pCmd->pSrc, pCmd->ivOffset, MV_CESA_AES_BLOCK_SIZE);
+	pCounter = (MV_U32 *)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+	counter = *pCounter;
+	counter = MV_32BIT_BE(counter);
+	pIV = pBuf;
+	cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+
+	/* fill key stream */
+	while (cryptoSize > 0) {
+		pBuf += MV_CESA_AES_BLOCK_SIZE;
+		memcpy(pBuf, pIV, MV_CESA_AES_BLOCK_SIZE - sizeof(counter));
+		pCounter = (MV_U32 *)(pBuf + (MV_CESA_AES_BLOCK_SIZE - sizeof(counter)));
+		counter++;
+		*pCounter = MV_32BIT_BE(counter);
+		cryptoSize -= MV_CESA_AES_BLOCK_SIZE;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeComplete -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaCtrModeComplete(MV_CESA_COMMAND *pOrgCmd, MV_CESA_COMMAND *pCmd)
+{
+	int srcFrag, dstFrag, srcOffset, dstOffset, keyOffset, srcSize, dstSize;
+	int cryptoSize = pCmd->cryptoLength;
+	MV_U8 *pSrc, *pDst, *pKey;
+	MV_STATUS status = MV_OK;
+/*
+    mvOsPrintf("CtrModeComplete: pCmd=%p, pCtrSrc=%p, pCtrDst=%p, pOrgCmd=%p, pOrgSrc=%p, pOrgDst=%p\n",
+			pCmd, pCmd->pSrc, pCmd->pDst,
+			pOrgCmd, pOrgCmd->pSrc, pOrgCmd->pDst);
+*/
+	/* XOR source data with key stream to destination data */
+	pKey = pCmd->pDst->pFrags[0].bufVirtPtr;
+	keyOffset = 0;
+
+	if ((pOrgCmd->pSrc != pOrgCmd->pDst) && (pOrgCmd->cryptoOffset > 0)) {
+		/* Copy Prefix from source buffer to destination buffer */
+
+		status = mvCesaMbufCopy(pOrgCmd->pDst, 0, pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset);
+/*
+		status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc, 0, pOrgCmd->cryptoOffset);
+		status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst, 0, pOrgCmd->cryptoOffset);
+*/
+	}
+
+	srcFrag = mvCesaMbufOffset(pOrgCmd->pSrc, pOrgCmd->cryptoOffset, &srcOffset);
+	pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+	srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+
+	dstFrag = mvCesaMbufOffset(pOrgCmd->pDst, pOrgCmd->cryptoOffset, &dstOffset);
+	pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+	dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+
+	while (cryptoSize > 0) {
+		pDst[dstOffset] = (pSrc[srcOffset] ^ pKey[keyOffset]);
+
+		cryptoSize--;
+		dstOffset++;
+		srcOffset++;
+		keyOffset++;
+
+		if (srcOffset >= srcSize) {
+			srcFrag++;
+			srcOffset = 0;
+			pSrc = pOrgCmd->pSrc->pFrags[srcFrag].bufVirtPtr;
+			srcSize = pOrgCmd->pSrc->pFrags[srcFrag].bufSize;
+		}
+
+		if (dstOffset >= dstSize) {
+			dstFrag++;
+			dstOffset = 0;
+			pDst = pOrgCmd->pDst->pFrags[dstFrag].bufVirtPtr;
+			dstSize = pOrgCmd->pDst->pFrags[dstFrag].bufSize;
+		}
+	}
+
+	if (pOrgCmd->pSrc != pOrgCmd->pDst) {
+		/* Copy Suffix from source buffer to destination buffer */
+		srcOffset = pOrgCmd->cryptoOffset + pOrgCmd->cryptoLength;
+
+		if ((pOrgCmd->pDst->mbufSize - srcOffset) > 0) {
+			status = mvCesaMbufCopy(pOrgCmd->pDst, srcOffset,
+						pOrgCmd->pSrc, srcOffset, pOrgCmd->pDst->mbufSize - srcOffset);
+		}
+
+/*
+		status = mvCesaCopyFromMbuf(tempBuf, pOrgCmd->pSrc, srcOffset, pOrgCmd->pSrc->mbufSize - srcOffset);
+		status = mvCesaCopyToMbuf(tempBuf, pOrgCmd->pDst, srcOffset, pOrgCmd->pDst->mbufSize - srcOffset);
+*/
+	}
+
+	/* Free buffer used for Key stream */
+	mvOsIoCachedFree(cesaOsHandle, pCmd->pDst->pFrags[0].bufSize,
+			 pCmd->pDst->pFrags[0].bufPhysAddr,
+			 pCmd->pDst->pFrags[0].bufVirtPtr, pCmd->pDst->pFrags[0].memHandle);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaCtrModeFinish -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_COMMAND* pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static void mvCesaCtrModeFinish(MV_CESA_COMMAND *pCmd)
+{
+	mvOsFree(pCmd);
+}
+
+/*******************************************************************************
+* mvCesaParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd, MV_U8* pFixOffset
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaParamCheck(MV_CESA_SA *pSA, MV_CESA_COMMAND *pCmd, MV_U8 *pFixOffset)
+{
+	MV_U8 fixOffset = 0xFF;
+
+/*
+	mvOsPrintf("mvCesaParamCheck:macOffset=%d digestOffset=%d cryptoOffset=%d ivOffset=%d"
+		"cryptoLength=%d cryptoBlockSize=%d mbufSize=%d\n",
+		pCmd->macOffset, pCmd->digestOffset, pCmd->cryptoOffset, pCmd->ivOffset,
+		pCmd->cryptoLength, pSA->cryptoBlockSize, pCmd->pSrc->mbufSize);
+*/
+
+	/* Check AUTH operation parameters */
+	if (((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET))) {
+		/* MAC offset should be at least 4 byte aligned */
+		if (MV_IS_NOT_ALIGN(pCmd->macOffset, 4)) {
+			mvOsPrintf("mvCesaAction: macOffset %d must be 4 byte aligned\n", pCmd->macOffset);
+			return MV_BAD_PARAM;
+		}
+		/* Digest offset must be 4 byte aligned */
+		if (MV_IS_NOT_ALIGN(pCmd->digestOffset, 4)) {
+			mvOsPrintf("mvCesaAction: digestOffset %d must be 4 byte aligned\n", pCmd->digestOffset);
+			return MV_BAD_PARAM;
+		}
+		/* In addition all offsets should be the same alignment: 8 or 4 */
+		if (fixOffset == 0xFF) {
+			fixOffset = (pCmd->macOffset % 8);
+		} else {
+			if ((pCmd->macOffset % 8) != fixOffset) {
+				mvOsPrintf("mvCesaAction: macOffset %d mod 8 must be equal %d\n",
+					   pCmd->macOffset, fixOffset);
+				return MV_BAD_PARAM;
+			}
+		}
+		if ((pCmd->digestOffset % 8) != fixOffset) {
+			mvOsPrintf("mvCesaAction: digestOffset %d mod 8 must be equal %d\n",
+				   pCmd->digestOffset, fixOffset);
+			return MV_BAD_PARAM;
+		}
+	}
+	/* Check CRYPTO operation parameters */
+	if (((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET))) {
+		/* CryptoOffset should be at least 4 byte aligned */
+		if (MV_IS_NOT_ALIGN(pCmd->cryptoOffset, 4)) {
+			mvOsPrintf("CesaAction: cryptoOffset=%d must be 4 byte aligned\n", pCmd->cryptoOffset);
+			return MV_BAD_PARAM;
+		}
+		/* cryptoLength should be the whole number of blocks */
+		if (MV_IS_NOT_ALIGN(pCmd->cryptoLength, pSA->cryptoBlockSize)) {
+			mvOsPrintf("mvCesaAction: cryptoLength=%d must be %d byte aligned\n",
+				   pCmd->cryptoLength, pSA->cryptoBlockSize);
+			return MV_BAD_PARAM;
+		}
+		if (fixOffset == 0xFF) {
+			fixOffset = (pCmd->cryptoOffset % 8);
+		} else {
+			/* In addition all offsets should be the same alignment: 8 or 4 */
+			if ((pCmd->cryptoOffset % 8) != fixOffset) {
+				mvOsPrintf("mvCesaAction: cryptoOffset %d mod 8 must be equal %d \n",
+					   pCmd->cryptoOffset, fixOffset);
+				return MV_BAD_PARAM;
+			}
+		}
+
+		/* check for CBC mode */
+		if (pSA->cryptoIvSize > 0) {
+			/* cryptoIV must not be part of CryptoLength */
+			if (((pCmd->ivOffset + pSA->cryptoIvSize) > pCmd->cryptoOffset) &&
+			    (pCmd->ivOffset < (pCmd->cryptoOffset + pCmd->cryptoLength))) {
+				mvOsPrintf
+				    ("mvCesaFragParamCheck: cryptoIvOffset (%d) is part of cryptoLength (%d+%d)\n",
+				     pCmd->ivOffset, pCmd->macOffset, pCmd->macLength);
+				return MV_BAD_PARAM;
+			}
+
+			/* ivOffset must be 4 byte aligned */
+			if (MV_IS_NOT_ALIGN(pCmd->ivOffset, 4)) {
+				mvOsPrintf("CesaAction: ivOffset=%d must be 4 byte aligned\n", pCmd->ivOffset);
+				return MV_BAD_PARAM;
+			}
+			/* In addition all offsets should be the same alignment: 8 or 4 */
+			if ((pCmd->ivOffset % 8) != fixOffset) {
+				mvOsPrintf("mvCesaAction: ivOffset %d mod 8 must be %d\n", pCmd->ivOffset, fixOffset);
+				return MV_BAD_PARAM;
+			}
+		}
+	}
+/*
+	if (fixOffset != 0) {
+		mvOsPrintf("%s: fixOffset = %d\n", __func__, fixOffset);
+		mvOsPrintf("macOff=%d digestOff=%d cryptoOff=%d ivOff=%d cryptoLen=%d cryptoBlockSize=%d mbufSize=%d\n",
+			pCmd->macOffset, pCmd->digestOffset, pCmd->cryptoOffset, pCmd->ivOffset,
+			pCmd->cryptoLength, pSA->cryptoBlockSize, pCmd->pSrc->mbufSize);
+	}
+*/
+	*pFixOffset = fixOffset;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragParamCheck -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_U8 chan, MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static MV_STATUS mvCesaFragParamCheck(MV_U8 chan, MV_CESA_SA *pSA, MV_CESA_COMMAND *pCmd)
+{
+	int offset;
+
+	if (((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET))) {
+		/* macOffset must be less that SRAM buffer size */
+		if (pCmd->macOffset > (sizeof(cesaSramVirtPtr[chan]->buf) - MV_CESA_AUTH_BLOCK_SIZE)) {
+			mvOsPrintf("mvCesaFragParamCheck: macOffset is too large (%d)\n", pCmd->macOffset);
+			return MV_BAD_PARAM;
+		}
+		/* macOffset+macSize must be more than mbufSize - SRAM buffer size */
+		if (((pCmd->macOffset + pCmd->macLength) > pCmd->pSrc->mbufSize) ||
+		    ((pCmd->pSrc->mbufSize - (pCmd->macOffset + pCmd->macLength)) >= sizeof(cesaSramVirtPtr[chan]->buf))) {
+			mvOsPrintf("mvCesaFragParamCheck: macLength is too large (%d), mbufSize=%d\n",
+				   pCmd->macLength, pCmd->pSrc->mbufSize);
+			return MV_BAD_PARAM;
+		}
+	}
+
+	if (((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET))) {
+		/* cryptoOffset must be less that SRAM buffer size */
+		/* 4 for possible fixOffset */
+		if ((pCmd->cryptoOffset + 4) > (sizeof(cesaSramVirtPtr[chan]->buf) - pSA->cryptoBlockSize)) {
+			mvOsPrintf("mvCesaFragParamCheck: cryptoOffset is too large (%d)\n", pCmd->cryptoOffset);
+			return MV_BAD_PARAM;
+		}
+
+		/* cryptoOffset+cryptoSize must be more than mbufSize - SRAM buffer size */
+		if (((pCmd->cryptoOffset + pCmd->cryptoLength) > pCmd->pSrc->mbufSize) ||
+		    ((pCmd->pSrc->mbufSize - (pCmd->cryptoOffset + pCmd->cryptoLength)) >=
+		     (sizeof(cesaSramVirtPtr[chan]->buf) - pSA->cryptoBlockSize))) {
+			mvOsPrintf("mvCesaFragParamCheck: cryptoLength is too large (%d), mbufSize=%d\n",
+				   pCmd->cryptoLength, pCmd->pSrc->mbufSize);
+			return MV_BAD_PARAM;
+		}
+	}
+
+	/* When MAC_THEN_CRYPTO or CRYPTO_THEN_MAC */
+	if (((pSA->config & MV_CESA_OPERATION_MASK) ==
+	     (MV_CESA_MAC_THEN_CRYPTO << MV_CESA_OPERATION_OFFSET)) ||
+	    ((pSA->config & MV_CESA_OPERATION_MASK) == (MV_CESA_CRYPTO_THEN_MAC << MV_CESA_OPERATION_OFFSET))) {
+
+		/* abs(cryptoOffset-macOffset) must be aligned cryptoBlockSize */
+		if (pCmd->cryptoOffset > pCmd->macOffset)
+			offset = pCmd->cryptoOffset - pCmd->macOffset;
+		else
+			offset = pCmd->macOffset - pCmd->cryptoOffset;
+
+		if (MV_IS_NOT_ALIGN(offset, pSA->cryptoBlockSize)) {
+/*
+		mvOsPrintf("mvCesaFragParamCheck: (cryptoOffset - macOffset) must be %d byte aligned\n",
+				pSA->cryptoBlockSize);
+*/
+			return MV_NOT_ALLOWED;
+		}
+		/* Digest must not be part of CryptoLength */
+		if (((pCmd->digestOffset + pSA->digestSize) > pCmd->cryptoOffset) &&
+		    (pCmd->digestOffset < (pCmd->cryptoOffset + pCmd->cryptoLength))) {
+/*
+		mvOsPrintf("mvCesaFragParamCheck: digestOffset (%d) is part of cryptoLength (%d+%d)\n",
+					pCmd->digestOffset, pCmd->cryptoOffset, pCmd->cryptoLength);
+*/
+			return MV_NOT_ALLOWED;
+		}
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaFragSizeFind -
+*
+* DESCRIPTION:
+*
+*
+* INPUT:
+*       MV_CESA_SA* pSA, MV_CESA_COMMAND *pCmd,
+*       int cryptoOffset, int macOffset,
+*
+* OUTPUT:
+*       int* pCopySize, int* pCryptoDataSize, int* pMacDataSize
+*
+* RETURN:
+*       MV_STATUS
+*
+*******************************************************************************/
+static void mvCesaFragSizeFind(MV_CESA_SA *pSA, MV_CESA_REQ *pReq,
+			       int cryptoOffset, int macOffset, int *pCopySize, int *pCryptoDataSize, int *pMacDataSize)
+{
+	MV_CESA_COMMAND *pCmd = pReq->pCmd;
+	int cryptoDataSize, macDataSize, copySize;
+
+	cryptoDataSize = macDataSize = 0;
+	copySize = *pCopySize;
+
+	if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_MAC_ONLY << MV_CESA_OPERATION_OFFSET)) {
+		cryptoDataSize = MV_MIN((copySize - cryptoOffset), (pCmd->cryptoLength - (pReq->frags.cryptoSize + 1)));
+
+		/* cryptoSize for each fragment must be the whole number of blocksSize */
+		if (MV_IS_NOT_ALIGN(cryptoDataSize, pSA->cryptoBlockSize)) {
+			cryptoDataSize = MV_ALIGN_DOWN(cryptoDataSize, pSA->cryptoBlockSize);
+			copySize = cryptoOffset + cryptoDataSize;
+		}
+	}
+	if ((pSA->config & MV_CESA_OPERATION_MASK) != (MV_CESA_CRYPTO_ONLY << MV_CESA_OPERATION_OFFSET)) {
+		macDataSize = MV_MIN((copySize - macOffset), (pCmd->macLength - (pReq->frags.macSize + 1)));
+
+		/* macSize for each fragment (except last) must be the whole number of blocksSize */
+		if (MV_IS_NOT_ALIGN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE)) {
+			macDataSize = MV_ALIGN_DOWN(macDataSize, MV_CESA_AUTH_BLOCK_SIZE);
+			copySize = macOffset + macDataSize;
+		}
+		cryptoDataSize = copySize - cryptoOffset;
+	}
+	*pCopySize = copySize;
+
+	if (pCryptoDataSize != NULL)
+		*pCryptoDataSize = cryptoDataSize;
+
+	if (pMacDataSize != NULL)
+		*pMacDataSize = macDataSize;
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvCesa.h b/drivers/crypto/mvebu_cesa/hal/mvCesa.h
new file mode 100644
index 000000000000..530232987595
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvCesa.h
@@ -0,0 +1,381 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvCesa.h - Header File for Cryptographic Engines and Security Accelerator
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       the Marvell Cryptographic Engines and Security Accelerator.
+*
+*******************************************************************************/
+
+#ifndef __mvCesa_h__
+#define __mvCesa_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#endif
+#include "mvSysCesaConfig.h"
+#include "mvCesaRegs.h"
+
+typedef enum {
+	MV_CESA_SPLIT_NONE = 0,
+	MV_CESA_SPLIT_FIRST = 1,
+	MV_CESA_SPLIT_SECOND = 2
+} MV_CESA_SPLIT;
+
+typedef enum {
+	CESA_NULL_POLICY = 0,
+	CESA_SINGLE_CHAN_POLICY,
+	CESA_DUAL_CHAN_BALANCED_POLICY,
+	CESA_WEIGHTED_CHAN_POLICY,
+	CESA_FLOW_ASSOC_CHAN_POLICY
+} MV_CESA_POLICY;
+
+typedef enum {
+	CESA_NULL_FLOW_TYPE = 0,
+	CESA_IPSEC_FLOW_TYPE,
+	CESA_SSL_FLOW_TYPE,
+	CESA_DISK_FLOW_TYPE,
+	CESA_NFPSEC_FLOW_TYPE
+} MV_CESA_FLOW_TYPE;
+
+typedef struct {
+	MV_ULONG sramPhysBase[MV_CESA_CHANNELS];
+	MV_U8 *sramVirtBase[MV_CESA_CHANNELS];
+	MV_U16 sramOffset[MV_CESA_CHANNELS];
+	MV_U16 ctrlModel;	/* Controller Model     */
+	MV_U8 ctrlRev;		/* Controller Revision  */
+} MV_CESA_HAL_DATA;
+
+/* Redefine MV_DMA_DESC structure */
+typedef struct _mvDmaDesc {
+	MV_U32 byteCnt;	/* The total number of bytes to transfer        */
+	MV_U32 phySrcAdd;	/* The physical source address                  */
+	MV_U32 phyDestAdd;	/* The physical destination address             */
+	MV_U32 phyNextDescPtr;	/* If we are using chain mode DMA transfer,     */
+	/* then this pointer should point to the        */
+	/* physical address of the next descriptor,     */
+	/* otherwise it should be NULL.                 */
+} MV_DMA_DESC;
+
+#define MV_CESA_AUTH_BLOCK_SIZE         64	/* bytes */
+
+#define MV_CESA_MD5_DIGEST_SIZE         16	/* bytes */
+#define MV_CESA_SHA1_DIGEST_SIZE        20	/* bytes */
+#define MV_CESA_SHA2_DIGEST_SIZE	32	/* bytes */
+
+#define MV_CESA_MAX_DIGEST_SIZE         MV_CESA_SHA2_DIGEST_SIZE
+
+#define MV_CESA_DES_KEY_LENGTH          8	/* bytes = 64 bits */
+#define MV_CESA_3DES_KEY_LENGTH         24	/* bytes = 192 bits */
+#define MV_CESA_AES_128_KEY_LENGTH      16	/* bytes = 128 bits */
+#define MV_CESA_AES_192_KEY_LENGTH      24	/* bytes = 192 bits */
+#define MV_CESA_AES_256_KEY_LENGTH      32	/* bytes = 256 bits */
+
+#define MV_CESA_MAX_CRYPTO_KEY_LENGTH   MV_CESA_AES_256_KEY_LENGTH
+
+#define MV_CESA_DES_BLOCK_SIZE          8	/* bytes = 64 bits */
+#define MV_CESA_3DES_BLOCK_SIZE         8	/* bytes = 64 bits */
+
+#define MV_CESA_AES_BLOCK_SIZE          16	/* bytes = 128 bits */
+
+#define MV_CESA_MAX_IV_LENGTH           MV_CESA_AES_BLOCK_SIZE
+
+#define MV_CESA_MAX_MAC_KEY_LENGTH      64	/* bytes */
+
+typedef struct {
+	MV_U8 cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+	MV_U8 macKey[MV_CESA_MAX_MAC_KEY_LENGTH];
+	MV_CESA_OPERATION operation;
+	MV_CESA_DIRECTION direction;
+	MV_CESA_CRYPTO_ALG cryptoAlgorithm;
+	MV_CESA_CRYPTO_MODE cryptoMode;
+	MV_U8 cryptoKeyLength;
+	MV_CESA_MAC_MODE macMode;
+	MV_U8 macKeyLength;
+	MV_U8 digestSize;
+} MV_CESA_OPEN_SESSION;
+
+typedef struct {
+	MV_BUF_INFO *pFrags;
+	MV_U16 numFrags;
+	MV_U16 mbufSize;
+} MV_CESA_MBUF;
+
+typedef struct {
+	void *pReqPrv;	/* instead of reqId */
+	MV_U32 retCode;
+	MV_16 sessionId;
+	MV_U16 mbufSize;
+	MV_U32 reqId;	/* Driver internal */
+	MV_U32 chanId;
+} MV_CESA_RESULT;
+
+typedef void (*MV_CESA_CALLBACK) (MV_CESA_RESULT *pResult);
+
+typedef struct {
+	void *pReqPrv;	/* instead of reqId */
+	MV_CESA_MBUF *pSrc;
+	MV_CESA_MBUF *pDst;
+	MV_CESA_CALLBACK *pFuncCB;
+	MV_16 sessionId;
+	MV_U16 ivFromUser;
+	MV_U16 ivOffset;
+	MV_U16 cryptoOffset;
+	MV_U16 cryptoLength;
+	MV_U16 digestOffset;
+	MV_U16 macOffset;
+	MV_U16 macLength;
+	MV_BOOL skipFlush;
+	MV_CESA_SPLIT split;
+	MV_U32 reqId;	/* Driver internal */
+	MV_CESA_FLOW_TYPE flowType;
+} MV_CESA_COMMAND;
+
+MV_STATUS mvCesaHalInit(int numOfSession, int queueDepth, void *osHandle, MV_CESA_HAL_DATA *halData);
+MV_STATUS mvCesaTdmaWinInit(MV_U8 chan, MV_UNIT_WIN_INFO *addrWinMap);
+MV_STATUS mvCesaFinish(void);
+MV_STATUS mvCesaSessionOpen(MV_CESA_OPEN_SESSION *pSession, short *pSid);
+MV_STATUS mvCesaSessionClose(short sid);
+MV_STATUS mvCesaCryptoIvSet(MV_U8 chan, MV_U8 *pIV, int ivSize);
+MV_STATUS mvCesaAction(MV_U8 chan, MV_CESA_COMMAND *pCmd);
+MV_STATUS mvCesaReadyGet(MV_U8 chan, MV_CESA_RESULT *pResult);
+int mvCesaMbufOffset(MV_CESA_MBUF *pMbuf, int offset, int *pBufOffset);
+MV_STATUS mvCesaCopyFromMbuf(MV_U8 *pDst, MV_CESA_MBUF *pSrcMbuf, int offset, int size);
+MV_STATUS mvCesaCopyToMbuf(MV_U8 *pSrc, MV_CESA_MBUF *pDstMbuf, int offset, int size);
+MV_STATUS mvCesaMbufCopy(MV_CESA_MBUF *pMbufDst, int dstMbufOffset,
+			MV_CESA_MBUF *pMbufSrc, int srcMbufOffset, int size);
+
+/********** Debug functions ********/
+void mvCesaDebugMbuf(const char *str, MV_CESA_MBUF *pMbuf, int offset, int size);
+void mvCesaDebugSA(short sid, int mode);
+void mvCesaDebugStats(void);
+void mvCesaDebugStatsClear(void);
+void mvCesaDebugRegs(void);
+void mvCesaDebugStatus(void);
+void mvCesaDebugQueue(int mode);
+void mvCesaDebugSram(int mode);
+void mvCesaDebugSAD(int mode);
+
+/********  CESA Private definitions ********/
+#define MV_CESA_TDMA_CTRL_VALUE       (MV_CESA_TDMA_DST_BURST_MASK(MV_CESA_TDMA_BURST_128B) 		\
+						| MV_CESA_TDMA_SRC_BURST_MASK(MV_CESA_TDMA_BURST_128B)  \
+						| MV_CESA_TDMA_OUTSTAND_READ_EN_MASK                    \
+						| MV_CESA_TDMA_NO_BYTE_SWAP_MASK			\
+						| MV_CESA_TDMA_ENABLE_MASK)
+
+#define MV_CESA_MAX_PKT_SIZE        (64 * 1024)
+#define MV_CESA_MAX_MBUF_FRAGS      20
+
+#define MV_CESA_MAX_REQ_FRAGS       ((MV_CESA_MAX_PKT_SIZE / MV_CESA_MAX_BUF_SIZE) + 1)
+
+#define MV_CESA_MAX_DMA_DESC    (MV_CESA_MAX_MBUF_FRAGS*2 + 5)
+
+#define MAX_CESA_CHAIN_LENGTH	20
+
+typedef enum {
+	MV_CESA_IDLE = 0,
+	MV_CESA_PENDING,
+	MV_CESA_PROCESS,
+	MV_CESA_READY,
+	MV_CESA_CHAIN,
+} MV_CESA_STATE;
+
+/* Session database */
+
+/* Map of Key materials of the session in SRAM.
+ * Each field must be 8 byte aligned
+ * Total size: 32 + 24 + 24 = 80 bytes
+ */
+typedef struct {
+	MV_U8 cryptoKey[MV_CESA_MAX_CRYPTO_KEY_LENGTH];
+	MV_U8 macInnerIV[MV_CESA_MAX_DIGEST_SIZE];
+	MV_U8 reservedInner[4];
+	MV_U8 macOuterIV[MV_CESA_MAX_DIGEST_SIZE];
+	MV_U8 reservedOuter[4];
+} MV_CESA_SRAM_SA;
+
+typedef struct {
+	MV_CESA_SRAM_SA *pSramSA;
+	MV_U8 *sramSABuff;	/* holds initial allocation virtual address */
+	MV_U32 sramSABuffSize;
+	MV_ULONG sramSAPhysAddr;	/* holds initial allocation physical address  */
+	MV_U32 memHandle;
+	MV_U32 config;
+	MV_U8 cryptoKeyLength;
+	MV_U8 cryptoIvSize;
+	MV_U8 cryptoBlockSize;
+	MV_U8 digestSize;
+	MV_U8 macKeyLength;
+	MV_U8 ctrMode;
+	MV_U32 count;
+} MV_CESA_SA;
+
+/* DMA list management */
+typedef struct {
+	MV_DMA_DESC *pDmaFirst;
+	MV_DMA_DESC *pDmaLast;
+} MV_CESA_DMA;
+
+typedef struct {
+	MV_U8 numFrag;
+	MV_U8 nextFrag;
+	int bufOffset;
+	int cryptoSize;
+	int macSize;
+	int newDigestOffset;
+	MV_U8 orgDigest[MV_CESA_MAX_DIGEST_SIZE];
+} MV_CESA_FRAGS;
+
+/* Request queue */
+typedef struct {
+	MV_U8 state;
+	MV_U8 fragMode;
+	MV_U8 fixOffset;
+	MV_CESA_COMMAND *pCmd;
+	MV_CESA_COMMAND *pOrgCmd;
+	MV_BUF_INFO dmaDescBuf;
+	MV_CESA_DMA dma[MV_CESA_MAX_REQ_FRAGS];
+	MV_BUF_INFO cesaDescBuf;
+	MV_CESA_DESC *pCesaDesc;
+	MV_CESA_FRAGS frags;
+	MV_32 use;
+} MV_CESA_REQ;
+
+/* SRAM map */
+/* Total SRAM size calculation */
+/*  SRAM size =
+ *              MV_CESA_MAX_BUF_SIZE  +
+ *              sizeof(MV_CESA_DESC)  +
+ *              MV_CESA_MAX_IV_LENGTH +
+ *              MV_CESA_MAX_IV_LENGTH +
+ *              MV_CESA_MAX_DIGEST_SIZE +
+ *              sizeof(MV_CESA_SRAM_SA)
+ *            = 1600 + 32 + 16 + 16 + 24 + 80 + 280 (reserved) = 2048 bytes
+ *            = 3200 + 32 + 16 + 16 + 24 + 80 + 728 (reserved) = 4096 bytes
+ */
+typedef struct {
+	MV_U8 buf[MV_CESA_MAX_BUF_SIZE];
+	MV_CESA_DESC desc;
+	MV_U8 cryptoIV[MV_CESA_MAX_IV_LENGTH];
+	MV_U8 tempCryptoIV[MV_CESA_MAX_IV_LENGTH];
+	MV_U8 tempDigest[MV_CESA_MAX_DIGEST_SIZE + 4];
+	MV_CESA_SRAM_SA sramSA;
+} MV_CESA_SRAM_MAP;
+
+typedef struct {
+	MV_U32 openedCount;
+	MV_U32 closedCount;
+	MV_U32 fragCount;
+	MV_U32 reqCount;
+	MV_U32 maxReqCount;
+	MV_U32 procCount;
+	MV_U32 readyCount;
+	MV_U32 notReadyCount;
+	MV_U32 startCount;
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+	MV_U32 maxChainUsage;
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+} MV_CESA_STATS;
+
+/* External variables */
+
+extern MV_CESA_STATS cesaStats;
+extern MV_CESA_FRAGS cesaFrags;
+extern MV_CESA_SA **pCesaSAD;
+extern MV_U32 cesaMaxSA;
+extern MV_CESA_REQ *pCesaReqFirst[MV_CESA_CHANNELS];
+extern MV_CESA_REQ *pCesaReqLast[MV_CESA_CHANNELS];
+extern MV_CESA_REQ *pCesaReqEmpty[MV_CESA_CHANNELS];
+extern MV_CESA_REQ *pCesaReqProcess[MV_CESA_CHANNELS];
+extern int cesaQueueDepth[MV_CESA_CHANNELS];
+extern int cesaReqResources[MV_CESA_CHANNELS];
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+	extern MV_U32 cesaChainLength[MV_CESA_CHANNELS];
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+extern MV_CESA_SRAM_MAP *cesaSramVirtPtr[MV_CESA_CHANNELS];
+
+static INLINE MV_ULONG mvCesaVirtToPhys(MV_BUF_INFO *pBufInfo, void *pVirt)
+{
+	return (pBufInfo->bufPhysAddr + ((MV_U8 *)pVirt - pBufInfo->bufVirtPtr));
+}
+
+/* Additional DEBUG functions */
+void mvCesaDebugSramSA(MV_CESA_SRAM_SA *pSramSA, int mode);
+void mvCesaDebugCmd(MV_CESA_COMMAND *pCmd, int mode);
+void mvCesaDebugDescriptor(MV_CESA_DESC *pDesc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __mvCesa_h__ */
diff --git a/drivers/crypto/mvebu_cesa/hal/mvCesaAddrDec.c b/drivers/crypto/mvebu_cesa/hal/mvCesaAddrDec.c
new file mode 100644
index 000000000000..99c231277305
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvCesaAddrDec.c
@@ -0,0 +1,289 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "mvCesa.h"
+#include "mvCesaRegs.h"
+
+MV_TARGET tdmaAddrDecPrioTable[] = {
+#if defined(MV_INCLUDE_SDRAM_CS0)
+	SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+	SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+	SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+	SDRAM_CS3,
+#endif
+	TBL_TERM
+};
+
+static MV_STATUS cesaWinOverlapDetect(MV_U8 chan, MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+static MV_STATUS mvCesaTdmaWinRead(MV_U8 chan, MV_U32 winNum, MV_UNIT_WIN_INFO *pDecWin);
+static MV_STATUS mvCesaTdmaWinWrite(MV_U8 chan, MV_U32 winNum, MV_UNIT_WIN_INFO *pDecWin);
+
+/*******************************************************************************
+* mvCesaTdmaWinRead.
+*
+* DESCRIPTION:
+*       Read TDMA target address window.
+*
+* INPUT:
+*	chan - Channel ID.
+*       winNum - TDMA target address decode window number.
+*
+* OUTPUT:
+*       pDecWin - TDMA target window data structure.
+*
+* RETURN:
+*	MV_BAD_PARAM if winNum is invalid.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaTdmaWinRead(MV_U8 chan, MV_U32 winNum, MV_UNIT_WIN_INFO *pDecWin)
+{
+	MV_U32 sizeReg, baseReg;
+	MV_U32 size;
+
+	/* Parameter checking   */
+	if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN) {
+		mvOsPrintf("%s : ERR. Invalid winNum %d\n", __func__, winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	baseReg = MV_REG_READ(MV_CESA_TDMA_BASE_ADDR_REG(chan, winNum));
+	sizeReg = MV_REG_READ(MV_CESA_TDMA_WIN_CTRL_REG(chan, winNum));
+
+	/* Check if window is enabled   */
+	if (sizeReg & MV_CESA_TDMA_WIN_ENABLE_MASK) {
+		pDecWin->enable = MV_TRUE;
+
+		/* Extract window parameters from registers */
+		pDecWin->targetId = (sizeReg & MV_CESA_TDMA_WIN_TARGET_MASK) >> MV_CESA_TDMA_WIN_TARGET_OFFSET;
+		pDecWin->attrib = (sizeReg & MV_CESA_TDMA_WIN_ATTR_MASK) >> MV_CESA_TDMA_WIN_ATTR_OFFSET;
+
+		size = (sizeReg & MV_CESA_TDMA_WIN_SIZE_MASK) >> MV_CESA_TDMA_WIN_SIZE_OFFSET;
+		pDecWin->addrWin.size = (size + 1) * (1 << MV_CESA_TDMA_WIN_SIZE_OFFSET);
+		pDecWin->addrWin.baseLow = (baseReg & MV_CESA_TDMA_WIN_BASE_MASK);
+		pDecWin->addrWin.baseHigh = 0;
+	} else {
+		pDecWin->enable = MV_FALSE;
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvCesaTdmaWinWrite
+*
+* DESCRIPTION:
+*	This function writes the address decoding registers according to the
+*	given window configuration.
+*
+* INPUT:
+*	chan - Channel ID.
+*       pAddrDecWin - CESA TDMA target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK on success,
+*	MV_BAD_PARAM if winNum is invalid or size is not a power of 2.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+static MV_STATUS mvCesaTdmaWinWrite(MV_U8 chan, MV_U32 winNum, MV_UNIT_WIN_INFO *pDecWin)
+{
+	MV_U32 sizeReg, baseReg;
+	MV_U32 size;
+
+	/* Parameter checking   */
+	if (winNum >= MV_CESA_TDMA_ADDR_DEC_WIN) {
+		mvOsPrintf("mvCesaTdmaWinSet: ERR. Invalid win num %d\n", winNum);
+		return MV_BAD_PARAM;
+	}
+
+	/* Check if the requested window overlapps with current windows     */
+	if (MV_TRUE == cesaWinOverlapDetect(chan, winNum, &pDecWin->addrWin)) {
+		mvOsPrintf("%s: ERR. Window %d overlap\n", __func__, winNum);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if (MV_IS_NOT_ALIGN(pDecWin->addrWin.baseLow, pDecWin->addrWin.size)) {
+		mvOsPrintf("mvCesaTdmaWinSet: Error setting CESA TDMA window %d.\n"
+			   "Address 0x%08x is unaligned to size 0x%x.\n",
+			   winNum, pDecWin->addrWin.baseLow, (MV_U32)pDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	if (!MV_IS_POWER_OF_2(pDecWin->addrWin.size)) {
+		mvOsPrintf("mvCesaTdmaWinWrite: Error setting CESA window %d. "
+			   "Window size is not a power to 2.", winNum);
+		return MV_BAD_PARAM;
+	}
+
+	size = (pDecWin->addrWin.size / (1 << MV_CESA_TDMA_WIN_SIZE_OFFSET)) - 1;
+
+	/* set Size, Attributes and TargetID */
+	sizeReg = (((pDecWin->targetId << MV_CESA_TDMA_WIN_TARGET_OFFSET) & MV_CESA_TDMA_WIN_TARGET_MASK) |
+		   ((pDecWin->attrib << MV_CESA_TDMA_WIN_ATTR_OFFSET) & MV_CESA_TDMA_WIN_ATTR_MASK) |
+		   ((size << MV_CESA_TDMA_WIN_SIZE_OFFSET) & MV_CESA_TDMA_WIN_SIZE_MASK));
+
+	if (pDecWin->enable == MV_TRUE)
+		sizeReg |= MV_CESA_TDMA_WIN_ENABLE_MASK;
+	else
+		sizeReg &= ~MV_CESA_TDMA_WIN_ENABLE_MASK;
+
+	/* Update Base value  */
+	baseReg = (pDecWin->addrWin.baseLow & MV_CESA_TDMA_WIN_BASE_MASK);
+
+	MV_REG_WRITE(MV_CESA_TDMA_WIN_CTRL_REG(chan, winNum), sizeReg);
+	MV_REG_WRITE(MV_CESA_TDMA_BASE_ADDR_REG(chan, winNum), baseReg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* cesaWinOverlapDetect - Detect CESA TDMA address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case TDMA address decode
+*       windows overlapps.
+*       This function detects TDMA address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+* 	chan	    - Channel ID
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE     - if the given address window overlap current address
+*                   decode map,
+*       MV_FALSE    - otherwise, MV_ERROR if reading invalid data
+*                   from registers.
+*
+*******************************************************************************/
+static MV_STATUS cesaWinOverlapDetect(MV_U8 chan, MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+	MV_U32 winNumIndex;
+	MV_UNIT_WIN_INFO addrDecWin;
+
+	for (winNumIndex = 0; winNumIndex < MV_CESA_TDMA_ADDR_DEC_WIN; winNumIndex++) {
+		/* Do not check window itself       */
+		if (winNumIndex == winNum)
+			continue;
+
+		/* Get window parameters    */
+		if (MV_OK != mvCesaTdmaWinRead(chan, winNumIndex, &addrDecWin)) {
+			mvOsPrintf("%s: ERR. TargetWinGet failed\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* Do not check disabled windows    */
+		if (addrDecWin.enable == MV_FALSE)
+			continue;
+
+		if (MV_TRUE == mvWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+			return MV_TRUE;
+	}
+	return MV_FALSE;
+}
+
+MV_STATUS mvCesaTdmaWinInit(MV_U8 chan, MV_UNIT_WIN_INFO *addrWinMap)
+{
+	MV_U32 winNum;
+	MV_UNIT_WIN_INFO *addrDecWin;
+	MV_U32 winPrioIndex = 0;
+
+	/* First disable all address decode windows */
+	for (winNum = 0; winNum < MV_CESA_TDMA_ADDR_DEC_WIN; winNum++)
+		MV_REG_BIT_RESET(MV_CESA_TDMA_WIN_CTRL_REG(chan, winNum), MV_CESA_TDMA_WIN_ENABLE_MASK);
+
+	/* Go through all windows in user table until table terminator      */
+	winNum = 0;
+	while ((tdmaAddrDecPrioTable[winPrioIndex] != TBL_TERM) && (winNum < MV_CESA_TDMA_ADDR_DEC_WIN)) {
+
+		addrDecWin = &addrWinMap[tdmaAddrDecPrioTable[winPrioIndex]];
+		if (addrDecWin->enable == MV_TRUE) {
+			if (MV_OK != mvCesaTdmaWinWrite(chan, winNum, addrDecWin)) {
+				mvOsPrintf("mvCesaTdmaWinSet FAILED: winNum=%d\n", winNum);
+				return MV_ERROR;
+			}
+			winNum++;
+		}
+		winPrioIndex++;
+	}
+	return MV_OK;
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvCesaDebug.c b/drivers/crypto/mvebu_cesa/hal/mvCesaDebug.c
new file mode 100644
index 000000000000..058152e477d3
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvCesaDebug.c
@@ -0,0 +1,557 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvDebug.h"
+
+#ifdef CONFIG_OF
+#include "cesa_if.h"
+#include "mvMD5.h"
+#include "mvSHA1.h"
+#include "mvSHA256.h"
+#include "mvCesaRegs.h"
+#include "mvCesa.h"
+#include "AES/mvAes.h"
+#else
+#include "cesa/mvMD5.h"
+#include "cesa/mvSHA1.h"
+#include "cesa/mvSHA256.h"
+#include "cesa/mvCesaRegs.h"
+#include "cesa/mvCesa.h"
+#include "cesa/AES/mvAes.h"
+#endif /* CONFIG_OF */
+
+#define MV_CESA_VERSION		3
+
+static const char *mvCesaDebugStateStr(MV_CESA_STATE state)
+{
+	switch (state) {
+	case MV_CESA_IDLE:
+		return "Idle";
+
+	case MV_CESA_PENDING:
+		return "Pend";
+
+	case MV_CESA_PROCESS:
+		return "Proc";
+
+	case MV_CESA_READY:
+		return "Ready";
+
+	default:
+		break;
+	}
+	return "Unknown";
+}
+
+static const char *mvCesaDebugOperStr(MV_CESA_OPERATION oper)
+{
+	switch (oper) {
+	case MV_CESA_MAC_ONLY:
+		return "MacOnly";
+
+	case MV_CESA_CRYPTO_ONLY:
+		return "CryptoOnly";
+
+	case MV_CESA_MAC_THEN_CRYPTO:
+		return "MacCrypto";
+
+	case MV_CESA_CRYPTO_THEN_MAC:
+		return "CryptoMac";
+
+	default:
+		break;
+	}
+	return "Null";
+}
+
+static const char *mvCesaDebugCryptoAlgStr(MV_CESA_CRYPTO_ALG cryptoAlg)
+{
+	switch (cryptoAlg) {
+	case MV_CESA_CRYPTO_DES:
+		return "DES";
+
+	case MV_CESA_CRYPTO_3DES:
+		return "3DES";
+
+	case MV_CESA_CRYPTO_AES:
+		return "AES";
+
+	default:
+		break;
+	}
+	return "Null";
+}
+
+static const char *mvCesaDebugMacModeStr(MV_CESA_MAC_MODE macMode)
+{
+	switch (macMode) {
+	case MV_CESA_MAC_MD5:
+		return "MD5";
+
+	case MV_CESA_MAC_SHA1:
+		return "SHA1";
+
+	case MV_CESA_MAC_SHA2:
+		return "SHA2";
+
+	case MV_CESA_MAC_HMAC_MD5:
+		return "HMAC-MD5";
+
+	case MV_CESA_MAC_HMAC_SHA1:
+		return "HMAC_SHA1";
+
+	case MV_CESA_MAC_HMAC_SHA2:
+		return "HMAC_SHA2";
+
+	default:
+		break;
+	}
+	return "Null";
+}
+
+void mvCesaDebugCmd(MV_CESA_COMMAND *pCmd, int mode)
+{
+	mvOsPrintf("pCmd=%p, pReqPrv=%p, pSrc=%p, pDst=%p, pCB=%p, sid=%d\n",
+		   pCmd, pCmd->pReqPrv, pCmd->pSrc, pCmd->pDst, pCmd->pFuncCB, pCmd->sessionId);
+	mvOsPrintf("isUser=%d, ivOffs=%d, crOffs=%d, crLen=%d, digest=%d, macOffs=%d, macLen=%d\n",
+		   pCmd->ivFromUser, pCmd->ivOffset, pCmd->cryptoOffset, pCmd->cryptoLength,
+		   pCmd->digestOffset, pCmd->macOffset, pCmd->macLength);
+}
+
+/* no need to use in tool */
+void mvCesaDebugMbuf(const char *str, MV_CESA_MBUF *pMbuf, int offset, int size)
+{
+	int frag, len, fragOffset;
+
+	if (str != NULL)
+		mvOsPrintf("%s: pMbuf=%p, numFrags=%d, mbufSize=%d\n", str, pMbuf, pMbuf->numFrags, pMbuf->mbufSize);
+
+	frag = mvCesaMbufOffset(pMbuf, offset, &fragOffset);
+	if (frag == MV_INVALID) {
+		mvOsPrintf("CESA Mbuf Error: offset (%d) out of range\n", offset);
+		return;
+	}
+
+	for (; frag < pMbuf->numFrags; frag++) {
+		mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+			   frag, pMbuf->pFrags[frag].bufVirtPtr, pMbuf->pFrags[frag].bufSize);
+		if (size > 0) {
+			len = MV_MIN(pMbuf->pFrags[frag].bufSize, size);
+#ifdef CONFIG_OF
+			mv_debug_mem_dump(pMbuf->pFrags[frag].bufVirtPtr + fragOffset, len, 1);
+#else
+			mvDebugMemDump(pMbuf->pFrags[frag].bufVirtPtr + fragOffset, len, 1);
+#endif
+			size -= len;
+			fragOffset = 0;
+		}
+	}
+}
+
+void mvCesaDebugRegs(void)
+{
+	MV_U8 chan = 0;
+	MV_U8 i = 0;
+
+	mvOsPrintf("\t CESA Registers:\n");
+
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+		mvOsPrintf("\n\nChannel %d:\n", chan);
+		mvOsPrintf("===========\n");
+		mvOsPrintf("MV_CESA_CMD_REG                     : 0x%X = 0x%08x\n",
+			MV_CESA_CMD_REG(chan), MV_REG_READ(MV_CESA_CMD_REG(chan)));
+
+		mvOsPrintf("MV_CESA_CHAN_DESC_OFFSET_REG        : 0x%X = 0x%08x\n",
+			MV_CESA_CHAN_DESC_OFFSET_REG(chan), MV_REG_READ(MV_CESA_CHAN_DESC_OFFSET_REG(chan)));
+
+		mvOsPrintf("MV_CESA_CFG_REG                     : 0x%X = 0x%08x\n",
+			MV_CESA_CFG_REG(chan), MV_REG_READ(MV_CESA_CFG_REG(chan)));
+
+		mvOsPrintf("MV_CESA_STATUS_REG                  : 0x%X = 0x%08x\n",
+			MV_CESA_STATUS_REG(chan), MV_REG_READ(MV_CESA_STATUS_REG(chan)));
+
+		mvOsPrintf("MV_CESA_ISR_CAUSE_REG               : 0x%X = 0x%08x\n",
+			MV_CESA_ISR_CAUSE_REG(chan), MV_REG_READ(MV_CESA_ISR_CAUSE_REG(chan)));
+
+		mvOsPrintf("MV_CESA_ISR_MASK_REG                : 0x%X = 0x%08x\n",
+			MV_CESA_ISR_MASK_REG(chan), MV_REG_READ(MV_CESA_ISR_MASK_REG(chan)));
+#if defined(MV_CESA_INT_COALESCING_SUPPORT) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == INT_COALESCING) {
+#endif /* CONFIG_OF */
+			mvOsPrintf("%s\t: 0x%X = 0x%08x\n",
+			    "MV_CESA_INT_COAL_TH_REG",
+			    MV_CESA_INT_COAL_TH_REG(chan),
+			    MV_REG_READ(MV_CESA_INT_COAL_TH_REG(chan)));
+			mvOsPrintf("%s\t: 0x%X = 0x%08x\n",
+			    "MV_CESA_INT_TIME_TH_REG",
+			    MV_CESA_INT_TIME_TH_REG(chan),
+			    MV_REG_READ(MV_CESA_INT_TIME_TH_REG(chan)));
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_INT_COALESCING_SUPPORT || CONFIG_OF */
+#if (MV_CESA_VERSION >= 2)
+		mvOsPrintf("MV_CESA_TDMA_CTRL_REG               : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_CTRL_REG(chan), MV_REG_READ(MV_CESA_TDMA_CTRL_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_BYTE_COUNT_REG         : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_BYTE_COUNT_REG(chan), MV_REG_READ(MV_CESA_TDMA_BYTE_COUNT_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_SRC_ADDR_REG           : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_SRC_ADDR_REG(chan), MV_REG_READ(MV_CESA_TDMA_SRC_ADDR_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_DST_ADDR_REG           : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_DST_ADDR_REG(chan), MV_REG_READ(MV_CESA_TDMA_DST_ADDR_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_NEXT_DESC_PTR_REG      : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_NEXT_DESC_PTR_REG(chan), MV_REG_READ(MV_CESA_TDMA_NEXT_DESC_PTR_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_CURR_DESC_PTR_REG      : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_CURR_DESC_PTR_REG(chan), MV_REG_READ(MV_CESA_TDMA_CURR_DESC_PTR_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_ERROR_CAUSE_REG        : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_ERROR_CAUSE_REG(chan), MV_REG_READ(MV_CESA_TDMA_ERROR_CAUSE_REG(chan)));
+
+		mvOsPrintf("MV_CESA_TDMA_ERROR_MASK_REG         : 0x%X = 0x%08x\n",
+			MV_CESA_TDMA_ERROR_MASK_REG(chan),
+			MV_REG_READ(MV_CESA_TDMA_ERROR_MASK_REG(chan)));
+
+		mvOsPrintf("\n=========== decoding windows ===========\n");
+		for (i = 0; i < MV_CESA_TDMA_ADDR_DEC_WIN; i++) {
+			mvOsPrintf("%s\t: 0x%X = 0x%08x\n",
+			    "MV_CESA_TDMA_BASE_ADDR_REG",
+			    MV_CESA_TDMA_BASE_ADDR_REG(chan, i),
+			    MV_REG_READ(MV_CESA_TDMA_BASE_ADDR_REG(chan, i)));
+			mvOsPrintf("%s\t: 0x%X = 0x%08x\n",
+			    "MV_CESA_TDMA_WIN_CTRL_REG",
+			    MV_CESA_TDMA_WIN_CTRL_REG(chan, i),
+			    MV_REG_READ(MV_CESA_TDMA_WIN_CTRL_REG(chan, i)));
+		}
+
+#endif
+	}
+}
+
+void mvCesaDebugStatus(void)
+{
+	MV_U8 chan = 0;
+	mvOsPrintf("\n\t CESA Status\n\n");
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+
+		mvOsPrintf("Channel %d: pReqQ=%p, qDepth=%d, reqSize=%d bytes, qRes=%d",
+			chan, pCesaReqFirst[chan], cesaQueueDepth[chan], (int)sizeof(MV_CESA_REQ), cesaReqResources[chan]);
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+
+#ifdef CONFIG_OF
+		if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+			mvOsPrintf(", chainLength=%u", cesaChainLength[chan]);
+#ifdef CONFIG_OF
+		}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+		mvOsPrintf("\n");
+	}
+	mvOsPrintf("pSAD=%p, maxSA=%d, sizeSA=%d bytes\n", pCesaSAD, cesaMaxSA, (int)sizeof(MV_CESA_SA));
+
+	mvOsPrintf("\n");
+
+	mvCesaDebugRegs();
+	mvCesaDebugStats();
+	mvCesaDebugStatsClear();
+}
+
+void mvCesaDebugDescriptor(MV_CESA_DESC *pDesc)
+{
+	mvOsPrintf("config=0x%08x, crSrcOffs=0x%04x, crDstOffs=0x%04x\n",
+		pDesc->config, pDesc->cryptoSrcOffset, pDesc->cryptoDstOffset);
+
+	mvOsPrintf("crLen=0x%04x, crKeyOffs=0x%04x, ivOffs=0x%04x, ivBufOffs=0x%04x\n",
+		pDesc->cryptoDataLen, pDesc->cryptoKeyOffset, pDesc->cryptoIvOffset, pDesc->cryptoIvBufOffset);
+
+	mvOsPrintf("macSrc=0x%04x, digest=0x%04x, macLen=0x%04x, inIv=0x%04x, outIv=0x%04x\n",
+			pDesc->macSrcOffset, pDesc->macDigestOffset, pDesc->macDataLen,
+			pDesc->macInnerIvOffset, pDesc->macOuterIvOffset);
+}
+
+void mvCesaDebugQueue(int mode)
+{
+	MV_U8 chan = 0;
+
+	mvOsPrintf("\n\t CESA Request Queue:\n\n");
+
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+		mvOsPrintf("\n\nChannel %d:\n", chan);
+		mvOsPrintf("===========\n");
+
+		mvOsPrintf("pFirstReq=%p, pLastReq=%p, qDepth=%d, reqSize=%d bytes\n",
+			pCesaReqFirst[chan], pCesaReqLast[chan], cesaQueueDepth[chan], (int)sizeof(MV_CESA_REQ));
+
+		mvOsPrintf("pEmpty=%p, pProcess=%p, qResources=%d\n", pCesaReqEmpty[chan],
+					pCesaReqProcess[chan], cesaReqResources[chan]);
+
+		if (mode != 0) {
+			int count = 0;
+			MV_CESA_REQ *pReq = pCesaReqFirst[chan];
+
+			for (count = 0; count < cesaQueueDepth[chan]; count++) {
+				/* Print out requsts */
+				mvOsPrintf("%02d. pReq=%p, state=%s, frag=0x%x, pCmd=%p, pDma=%p, pDesc=%p, reqId=%u, use=%u\n",
+					count, pReq, mvCesaDebugStateStr(pReq->state),
+					pReq->fragMode, pReq->pCmd, pReq->dma[0].pDmaFirst, &pReq->pCesaDesc[0],
+					pReq->pCmd->reqId, pReq->use);
+				if (pReq->fragMode != MV_CESA_FRAG_NONE) {
+					int frag;
+
+					mvOsPrintf("pFrags=%p, num=%d, next=%d, bufOffset=%d, cryptoSize=%d, macSize=%d\n",
+						&pReq->frags, pReq->frags.numFrag, pReq->frags.nextFrag,
+						pReq->frags.bufOffset, pReq->frags.cryptoSize, pReq->frags.macSize);
+					for (frag = 0; frag < pReq->frags.numFrag; frag++) {
+						mvOsPrintf("#%d: pDmaFirst=%p, pDesc=%p\n", frag,
+							pReq->dma[frag].pDmaFirst, &pReq->pCesaDesc[frag]);
+					}
+				}
+				if (mode > 1) {
+					/* Print out Command */
+					mvCesaDebugCmd(pReq->pCmd, mode);
+
+					/* Print out Descriptor */
+					mvCesaDebugDescriptor(&pReq->pCesaDesc[0]);
+				}
+				pReq++;
+			}
+		}
+	}
+}
+
+void mvCesaDebugSramSA(MV_CESA_SRAM_SA *pSramSA, int mode)
+{
+	if (pSramSA == NULL) {
+		mvOsPrintf("cesaSramSA: Unexpected pSramSA=%p\n", pSramSA);
+		return;
+	}
+	mvOsPrintf("pSramSA=%p, sizeSramSA=%d bytes\n", pSramSA, (int)sizeof(MV_CESA_SRAM_SA));
+
+	if (mode != 0) {
+		mvOsPrintf("cryptoKey=%p, maxCryptoKey=%d bytes\n", pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH);
+#ifdef CONFIG_OF
+		mv_debug_mem_dump(pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH, 1);
+#else
+		mvDebugMemDump(pSramSA->cryptoKey, MV_CESA_MAX_CRYPTO_KEY_LENGTH, 1);
+#endif
+
+		mvOsPrintf("macInnerIV=%p, maxInnerIV=%d bytes\n", pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE);
+#ifdef CONFIG_OF
+		mv_debug_mem_dump(pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+#else
+		mvDebugMemDump(pSramSA->macInnerIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+#endif
+
+		mvOsPrintf("macOuterIV=%p, maxOuterIV=%d bytes\n", pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE);
+#ifdef CONFIG_OF
+		mv_debug_mem_dump(pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+#else
+		mvDebugMemDump(pSramSA->macOuterIV, MV_CESA_MAX_DIGEST_SIZE, 1);
+#endif
+	}
+}
+
+void mvCesaDebugSA(short sid, int mode)
+{
+	MV_CESA_OPERATION oper;
+	MV_CESA_DIRECTION dir;
+	MV_CESA_CRYPTO_ALG cryptoAlg;
+	MV_CESA_CRYPTO_MODE cryptoMode;
+	MV_CESA_MAC_MODE macMode;
+	MV_CESA_SA *pSA = pCesaSAD[sid];
+
+	if (pSA != NULL) {
+		/*if(((pSA->count != 0) && (mode > 0)) || (mode >= 2))
+		   { */
+		mvOsPrintf("\n\nCESA SA Entry #%d (%p) - %s (count=%d)\n",
+			   sid, pSA, (pSA != NULL) ? "Valid" : "Invalid", pSA->count);
+
+		oper = (pSA->config & MV_CESA_OPERATION_MASK) >> MV_CESA_OPERATION_OFFSET;
+		dir = (pSA->config & MV_CESA_DIRECTION_MASK) >> MV_CESA_DIRECTION_BIT;
+		mvOsPrintf("%s - %s ", mvCesaDebugOperStr(oper), (dir == MV_CESA_DIR_ENCODE) ? "Encode" : "Decode");
+		if (oper != MV_CESA_MAC_ONLY) {
+			cryptoAlg = (pSA->config & MV_CESA_CRYPTO_ALG_MASK) >> MV_CESA_CRYPTO_ALG_OFFSET;
+			cryptoMode = (pSA->config & MV_CESA_CRYPTO_MODE_MASK) >> MV_CESA_CRYPTO_MODE_BIT;
+			mvOsPrintf("- %s - %s ", mvCesaDebugCryptoAlgStr(cryptoAlg),
+				   (cryptoMode == MV_CESA_CRYPTO_ECB) ? "ECB" : "CBC");
+		}
+		if (oper != MV_CESA_CRYPTO_ONLY) {
+			macMode = (pSA->config & MV_CESA_MAC_MODE_MASK) >> MV_CESA_MAC_MODE_OFFSET;
+			mvOsPrintf("- %s ", mvCesaDebugMacModeStr(macMode));
+		}
+		mvOsPrintf("\n");
+
+		if (mode > 0) {
+			mvOsPrintf("config=0x%08x, cryptoKeySize=%d, digestSize=%d\n",
+				   pCesaSAD[sid]->config, pCesaSAD[sid]->cryptoKeyLength, pCesaSAD[sid]->digestSize);
+
+			mvCesaDebugSramSA(pCesaSAD[sid]->pSramSA, mode);
+		}
+		/*} */
+	}
+}
+
+ /**/ void mvCesaDebugSram(int mode)
+{
+	MV_U8 chan = 0;
+
+#ifdef CONFIG_OF
+	for (chan = 0; chan < mv_cesa_channels; chan++) {
+#else
+	for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+#endif
+		mvOsPrintf("\n\nChannel %d:\n", chan);
+		mvOsPrintf("===========\n");
+
+		mvOsPrintf("\n\t SRAM contents: size=%d, pVirt=%p\n\n", (int)sizeof(MV_CESA_SRAM_MAP),
+						cesaSramVirtPtr[chan]);
+
+		mvOsPrintf("\n\t Sram buffer: size=%d, pVirt=%p\n", MV_CESA_MAX_BUF_SIZE, cesaSramVirtPtr[chan]->buf);
+		if (mode != 0)
+#ifdef CONFIG_OF
+			mv_debug_mem_dump(cesaSramVirtPtr[chan]->buf, 64, 1);
+#else
+			mvDebugMemDump(cesaSramVirtPtr[chan]->buf, 64, 1);
+#endif
+
+		mvOsPrintf("\n");
+		mvOsPrintf("\n\t Sram descriptor: size=%d, pVirt=%p\n", (int)sizeof(MV_CESA_DESC),
+						&cesaSramVirtPtr[chan]->desc);
+		if (mode != 0) {
+			mvOsPrintf("\n");
+			mvCesaDebugDescriptor(&cesaSramVirtPtr[chan]->desc);
+		}
+		mvOsPrintf("\n\t Sram IV: size=%d, pVirt=%p\n", MV_CESA_MAX_IV_LENGTH, &cesaSramVirtPtr[chan]->cryptoIV);
+		if (mode != 0) {
+			mvOsPrintf("\n");
+#ifdef CONFIG_OF
+			mv_debug_mem_dump(cesaSramVirtPtr[chan]->cryptoIV, MV_CESA_MAX_IV_LENGTH, 1);
+#else
+			mvDebugMemDump(cesaSramVirtPtr[chan]->cryptoIV, MV_CESA_MAX_IV_LENGTH, 1);
+#endif
+		}
+		mvOsPrintf("\n");
+		mvCesaDebugSramSA(&cesaSramVirtPtr[chan]->sramSA, 0);
+	}
+}
+
+void mvCesaDebugSAD(int mode)
+{
+	int sid;
+
+	mvOsPrintf("\n\t Cesa SAD status: pSAD=%p, maxSA=%d\n", pCesaSAD, cesaMaxSA);
+
+	for (sid = 0; sid < cesaMaxSA; sid++)
+		mvCesaDebugSA(sid, mode);
+}
+
+void mvCesaDebugStats(void)
+{
+	mvOsPrintf("\n\t Cesa Statistics\n");
+
+	mvOsPrintf("Opened=%u, Closed=%u\n", cesaStats.openedCount, cesaStats.closedCount);
+	mvOsPrintf("Req=%u, maxReq=%u, frags=%u, start=%u\n",
+		   cesaStats.reqCount, cesaStats.maxReqCount, cesaStats.fragCount, cesaStats.startCount);
+
+#if defined(MV_CESA_CHAIN_MODE) || defined(CONFIG_OF)
+#ifdef CONFIG_OF
+	if (mv_cesa_feature == CHAIN) {
+#endif /* CONFIG_OF */
+		mvOsPrintf("maxChainUsage=%u\n", cesaStats.maxChainUsage);
+#ifdef CONFIG_OF
+	}
+#endif /* CONFIG_OF */
+#endif /* MV_CESA_CHAIN_MODE || CONFIG_OF */
+
+	mvOsPrintf("\n");
+	mvOsPrintf("proc=%u, ready=%u, notReady=%u\n",
+		   cesaStats.procCount, cesaStats.readyCount, cesaStats.notReadyCount);
+}
+
+void mvCesaDebugStatsClear(void)
+{
+	memset(&cesaStats, 0, sizeof(cesaStats));
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvCesaRegs.h b/drivers/crypto/mvebu_cesa/hal/mvCesaRegs.h
new file mode 100644
index 000000000000..05c2157a030a
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvCesaRegs.h
@@ -0,0 +1,384 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvCesaRegs_h__
+#define __mvCesaRegs_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mvSysCesaConfig.h"
+
+	typedef struct {
+		/* word 0 */
+		MV_U32 config;
+		/* word 1 */
+		MV_U16 cryptoSrcOffset;
+		MV_U16 cryptoDstOffset;
+		/* word 2 */
+		MV_U16 cryptoDataLen;
+		MV_U16 reserved1;
+		/* word 3 */
+		MV_U16 cryptoKeyOffset;
+		MV_U16 reserved2;
+		/* word 4 */
+		MV_U16 cryptoIvOffset;
+		MV_U16 cryptoIvBufOffset;
+		/* word 5 */
+		MV_U16 macSrcOffset;
+		MV_U16 macTotalLen;
+		/* word 6 */
+		MV_U16 macDigestOffset;
+		MV_U16 macDataLen;
+		/* word 7 */
+		MV_U16 macInnerIvOffset;
+		MV_U16 macOuterIvOffset;
+	} MV_CESA_DESC;
+
+/* operation */
+	typedef enum {
+		MV_CESA_MAC_ONLY = 0,
+		MV_CESA_CRYPTO_ONLY = 1,
+		MV_CESA_MAC_THEN_CRYPTO = 2,
+		MV_CESA_CRYPTO_THEN_MAC = 3,
+		MV_CESA_MAX_OPERATION
+	} MV_CESA_OPERATION;
+
+#define MV_CESA_OPERATION_OFFSET        		0
+#define MV_CESA_OPERATION_MASK          		(0x3 << MV_CESA_OPERATION_OFFSET)
+
+/* mac algorithm */
+	typedef enum {
+		MV_CESA_MAC_NULL = 0,
+		MV_CESA_MAC_SHA2 = 1,
+		MV_CESA_MAC_HMAC_SHA2 = 3,
+		MV_CESA_MAC_MD5 = 4,
+		MV_CESA_MAC_SHA1 = 5,
+		MV_CESA_MAC_HMAC_MD5 = 6,
+		MV_CESA_MAC_HMAC_SHA1 = 7,
+	} MV_CESA_MAC_MODE;
+
+#define MV_CESA_MAC_MODE_OFFSET         		4
+#define MV_CESA_MAC_MODE_MASK           		(0x7 << MV_CESA_MAC_MODE_OFFSET)
+
+	typedef enum {
+		MV_CESA_MAC_DIGEST_FULL = 0,
+		MV_CESA_MAC_DIGEST_96B = 1,
+	} MV_CESA_MAC_DIGEST_SIZE;
+
+#define MV_CESA_MAC_DIGEST_SIZE_BIT     		7
+#define MV_CESA_MAC_DIGEST_SIZE_MASK    		(1 << MV_CESA_MAC_DIGEST_SIZE_BIT)
+
+	typedef enum {
+		MV_CESA_CRYPTO_NULL = 0,
+		MV_CESA_CRYPTO_DES = 1,
+		MV_CESA_CRYPTO_3DES = 2,
+		MV_CESA_CRYPTO_AES = 3,
+	} MV_CESA_CRYPTO_ALG;
+
+#define MV_CESA_CRYPTO_ALG_OFFSET       		8
+#define MV_CESA_CRYPTO_ALG_MASK         		(0x3 << MV_CESA_CRYPTO_ALG_OFFSET)
+
+/* direction */
+	typedef enum {
+		MV_CESA_DIR_ENCODE = 0,
+		MV_CESA_DIR_DECODE = 1,
+	} MV_CESA_DIRECTION;
+
+#define MV_CESA_DIRECTION_BIT           		12
+#define MV_CESA_DIRECTION_MASK          		(1 << MV_CESA_DIRECTION_BIT)
+
+/* crypto IV mode */
+	typedef enum {
+		MV_CESA_CRYPTO_ECB = 0,
+		MV_CESA_CRYPTO_CBC = 1,
+		/* NO HW Support */
+		MV_CESA_CRYPTO_CTR = 10,
+	} MV_CESA_CRYPTO_MODE;
+
+#define MV_CESA_CRYPTO_MODE_BIT         		16
+#define MV_CESA_CRYPTO_MODE_MASK        		(1 << MV_CESA_CRYPTO_MODE_BIT)
+
+/* 3DES mode */
+	typedef enum {
+		MV_CESA_CRYPTO_3DES_EEE = 0,
+		MV_CESA_CRYPTO_3DES_EDE = 1,
+	} MV_CESA_CRYPTO_3DES_MODE;
+
+#define MV_CESA_CRYPTO_3DES_MODE_BIT    		20
+#define MV_CESA_CRYPTO_3DES_MODE_MASK   		(1 << MV_CESA_CRYPTO_3DES_MODE_BIT)
+
+/* AES Key Length */
+	typedef enum {
+		MV_CESA_CRYPTO_AES_KEY_128 = 0,
+		MV_CESA_CRYPTO_AES_KEY_192 = 1,
+		MV_CESA_CRYPTO_AES_KEY_256 = 2,
+	} MV_CESA_CRYPTO_AES_KEY_LEN;
+
+#define MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET   		24
+#define MV_CESA_CRYPTO_AES_KEY_LEN_MASK     		(0x3 << MV_CESA_CRYPTO_AES_KEY_LEN_OFFSET)
+
+/* Fragmentation mode */
+	typedef enum {
+		MV_CESA_FRAG_NONE = 0,
+		MV_CESA_FRAG_FIRST = 1,
+		MV_CESA_FRAG_LAST = 2,
+		MV_CESA_FRAG_MIDDLE = 3,
+	} MV_CESA_FRAG_MODE;
+
+#define MV_CESA_FRAG_MODE_OFFSET            		30
+#define MV_CESA_FRAG_MODE_MASK              		(0x3 << MV_CESA_FRAG_MODE_OFFSET)
+/*---------------------------------------------------------------------------*/
+
+/********** Security Accelerator Command Register **************/
+#define MV_CESA_CMD_REG(chan)               		(MV_CESA_REGS_BASE(chan) + 0xE00)
+
+#define MV_CESA_CMD_CHAN_ENABLE_BIT         		0
+#define MV_CESA_CMD_CHAN_ENABLE_MASK        		(1 << MV_CESA_CMD_CHAN_ENABLE_BIT)
+
+#define MV_CESA_CMD_CHAN_DISABLE_BIT        		2
+#define MV_CESA_CMD_CHAN_DISABLE_MASK       		(1 << MV_CESA_CMD_CHAN_DISABLE_BIT)
+
+/********** Security Accelerator Descriptor Pointers Register **********/
+#define MV_CESA_CHAN_DESC_OFFSET_REG(chan)  		(MV_CESA_REGS_BASE(chan) + 0xE04)
+
+/********** Security Accelerator Configuration Register **********/
+#define MV_CESA_CFG_REG(chan)                     	(MV_CESA_REGS_BASE(chan) + 0xE08)
+
+#define MV_CESA_CFG_STOP_DIGEST_ERR_BIT     		0
+#define MV_CESA_CFG_STOP_DIGEST_ERR_MASK    		(1 << MV_CESA_CFG_STOP_DIGEST_ERR_BIT)
+
+#define MV_CESA_CFG_WAIT_DMA_BIT            		7
+#define MV_CESA_CFG_WAIT_DMA_MASK           		(1 << MV_CESA_CFG_WAIT_DMA_BIT)
+
+#define MV_CESA_CFG_ACT_DMA_BIT             		9
+#define MV_CESA_CFG_ACT_DMA_MASK            		(1 << MV_CESA_CFG_ACT_DMA_BIT)
+
+#define MV_CESA_CFG_CHAIN_MODE_BIT          		11
+#define MV_CESA_CFG_CHAIN_MODE_MASK         		(1 << MV_CESA_CFG_CHAIN_MODE_BIT)
+
+#define MV_CESA_CFG_ENC_AUTH_PARALLEL_MODE_BIT		13
+#define MV_CESA_CFG_ENC_AUTH_PARALLEL_MODE_MASK		(1 << MV_CESA_CFG_ENC_AUTH_PARALLEL_MODE_BIT)
+
+/********** Security Accelerator Status Register ***********/
+#define MV_CESA_STATUS_REG(chan)            		(MV_CESA_REGS_BASE(chan) + 0xE0C)
+
+#define MV_CESA_STATUS_ACTIVE_BIT           		0
+#define MV_CESA_STATUS_ACTIVE_MASK          		(1 << MV_CESA_STATUS_ACTIVE_BIT)
+
+#define MV_CESA_STATUS_DIGEST_ERR_BIT       		8
+#define MV_CESA_STATUS_DIGEST_ERR_MASK      		(1 << MV_CESA_STATUS_DIGEST_ERR_BIT)
+
+/* Cryptographic Engines and Security Accelerator Interrupt Cause Register */
+#define MV_CESA_ISR_CAUSE_REG(chan)         		(MV_CESA_REGS_BASE(chan) + 0xE20)
+
+/* Cryptographic Engines and Security Accelerator Interrupt Mask Register */
+#define MV_CESA_ISR_MASK_REG(chan)          		(MV_CESA_REGS_BASE(chan) + 0xE24)
+
+#define MV_CESA_CAUSE_AUTH_MASK             		(1 << 0)
+#define MV_CESA_CAUSE_DES_MASK              		(1 << 1)
+#define MV_CESA_CAUSE_AES_ENCR_MASK         		(1 << 2)
+#define MV_CESA_CAUSE_AES_DECR_MASK         		(1 << 3)
+#define MV_CESA_CAUSE_DES_ALL_MASK          		(1 << 4)
+
+#define MV_CESA_CAUSE_ACC_BIT(chan)              	(5 + chan)
+#define MV_CESA_CAUSE_ACC_MASK(chan)              	(1 << MV_CESA_CAUSE_ACC_BIT(chan))
+
+#define MV_CESA_CAUSE_ACC_DMA_BIT           		7
+#define MV_CESA_CAUSE_ACC_DMA_MASK          		(1 << MV_CESA_CAUSE_ACC_DMA_BIT)
+#define MV_CESA_CAUSE_ACC_DMA_ALL_MASK      		(3 << MV_CESA_CAUSE_ACC_DMA_BIT)
+
+#define MV_CESA_CAUSE_DMA_COMPL_BIT         		9
+#define MV_CESA_CAUSE_DMA_COMPL_MASK        		(1 << MV_CESA_CAUSE_DMA_COMPL_BIT)
+
+#define MV_CESA_CAUSE_DMA_OWN_ERR_BIT       		10
+#define MV_CESA_CAUSE_DMA_OWN_ERR_MASK      		(1 << MV_CESA_CAUSE_DMA_OWN_ERR_BIT)
+
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT     		11
+#define MV_CESA_CAUSE_DMA_CHAIN_PKT_MASK    		(1 << MV_CESA_CAUSE_DMA_CHAIN_PKT_BIT)
+
+#define MV_CESA_CAUSE_EOP_COAL_BIT			14
+#define MV_CESA_CAUSE_EOP_COAL_MASK			(1 << MV_CESA_CAUSE_EOP_COAL_BIT)
+
+/* MV_CESA_INT_COALESCING_SUPPORT */
+/* Cryptographic Interrupt Coalescing Threshold Register */
+#define MV_CESA_INT_COAL_TH_REG(chan)			(MV_CESA_REGS_BASE(chan) + 0xE30)
+#define MV_CESA_EOP_PACKET_COAL_TH_OFFSET		0
+#define MV_CESA_EOP_PACKET_COAL_TH_MASK			(0xff << MV_CESA_EOP_PACKET_COAL_TH_OFFSET)
+
+/* Cryptographic Interrupt Time Threshold Register */
+#define MV_CESA_INT_TIME_TH_REG(chan)			(MV_CESA_REGS_BASE(chan) + 0xE34)
+#define MV_CESA_EOP_TIME_TH_OFFSET			0
+#define MV_CESA_EOP_TIME_TH_MASK			(0xffffff << MV_CESA_EOP_TIME_TH_OFFSET)
+
+/* !MV_CESA_INT_COALESCING_SUPPORT */
+
+#define MV_CESA_AUTH_DATA_IN_REG(chan)      		(MV_CESA_REGS_BASE(chan) + 0xd38)
+#define MV_CESA_AUTH_BIT_COUNT_LOW_REG(chan)      	(MV_CESA_REGS_BASE(chan) + 0xd20)
+#define MV_CESA_AUTH_BIT_COUNT_HIGH_REG(chan)     	(MV_CESA_REGS_BASE(chan) + 0xd24)
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_REG(chan, i)	(MV_CESA_REGS_BASE(chan) + 0xd00 + (i<<2))
+
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_A_REG(chan)  	(MV_CESA_REGS_BASE(chan) + 0xd00)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_B_REG(chan)  	(MV_CESA_REGS_BASE(chan) + 0xd04)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_C_REG(chan)  	(MV_CESA_REGS_BASE(chan) + 0xd08)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_D_REG(chan)  	(MV_CESA_REGS_BASE(chan) + 0xd0c)
+#define MV_CESA_AUTH_INIT_VAL_DIGEST_E_REG(chan)  	(MV_CESA_REGS_BASE(chan) + 0xd10)
+#define MV_CESA_AUTH_COMMAND_REG(chan)            	(MV_CESA_REGS_BASE(chan) + 0xd18)
+
+#define MV_CESA_AUTH_ALGORITHM_BIT          		0
+#define MV_CESA_AUTH_ALGORITHM_MD5          		(0<<AUTH_ALGORITHM_BIT)
+#define MV_CESA_AUTH_ALGORITHM_SHA1         		(1<<AUTH_ALGORITHM_BIT)
+
+#define MV_CESA_AUTH_IV_MODE_BIT            		1
+#define MV_CESA_AUTH_IV_MODE_INIT           		(0<<AUTH_IV_MODE_BIT)
+#define MV_CESA_AUTH_IV_MODE_CONTINUE       		(1<<AUTH_IV_MODE_BIT)
+
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_BIT     		2
+#define MV_CESA_AUTH_DATA_BYTE_SWAP_MASK    		(1<<AUTH_DATA_BYTE_SWAP_BIT)
+
+#define MV_CESA_AUTH_IV_BYTE_SWAP_BIT       		4
+#define MV_CESA_AUTH_IV_BYTE_SWAP_MASK      		(1<<AUTH_IV_BYTE_SWAP_BIT)
+
+#define MV_CESA_AUTH_TERMINATION_BIT        		31
+#define MV_CESA_AUTH_TERMINATION_MASK       		(1<<AUTH_TERMINATION_BIT)
+
+/*************** TDMA Control Register ************************************************/
+#define MV_CESA_TDMA_CTRL_REG(chan)               	(MV_CESA_TDMA_REGS_BASE(chan) + 0x840)
+
+#define MV_CESA_TDMA_BURST_32B              		3
+#define MV_CESA_TDMA_BURST_128B             		4
+
+#define MV_CESA_TDMA_DST_BURST_OFFSET       		0
+#define MV_CESA_TDMA_DST_BURST_ALL_MASK     		(0x7<<MV_CESA_TDMA_DST_BURST_OFFSET)
+#define MV_CESA_TDMA_DST_BURST_MASK(burst)  		((burst)<<MV_CESA_TDMA_DST_BURST_OFFSET)
+
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_BIT   		4
+#define MV_CESA_TDMA_OUTSTAND_READ_EN_MASK  		(1<<MV_CESA_TDMA_OUTSTAND_READ_EN_BIT)
+
+#define MV_CESA_TDMA_SRC_BURST_OFFSET       		6
+#define MV_CESA_TDMA_SRC_BURST_ALL_MASK     		(0x7<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+#define MV_CESA_TDMA_SRC_BURST_MASK(burst)  		((burst)<<MV_CESA_TDMA_SRC_BURST_OFFSET)
+
+#define MV_CESA_TDMA_CHAIN_MODE_BIT         		9
+#define MV_CESA_TDMA_NON_CHAIN_MODE_MASK    		(1<<MV_CESA_TDMA_CHAIN_MODE_BIT)
+
+#define MV_CESA_TDMA_BYTE_SWAP_BIT	    		11
+#define MV_CESA_TDMA_BYTE_SWAP_MASK	    		(0 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+#define MV_CESA_TDMA_NO_BYTE_SWAP_MASK	    		(1 << MV_CESA_TDMA_BYTE_SWAP_BIT)
+
+#define MV_CESA_TDMA_ENABLE_BIT		    		12
+#define MV_CESA_TDMA_ENABLE_MASK            		(1<<MV_CESA_TDMA_ENABLE_BIT)
+
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_BIT    		13
+#define MV_CESA_TDMA_FETCH_NEXT_DESC_MASK   		(1<<MV_CESA_TDMA_FETCH_NEXT_DESC_BIT)
+
+#define MV_CESA_TDMA_CHAN_ACTIVE_BIT	    		14
+#define MV_CESA_TDMA_CHAN_ACTIVE_MASK       		(1<<MV_CESA_TDMA_CHAN_ACTIVE_BIT)
+
+#define MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET		16
+#define MV_CESA_TDMA_NUM_OF_OUTSTAND_MASK		(0x3 << MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET)
+#define MV_CESA_TDMA_OUTSTAND_NORMAL_MODE_BIT		(0x0 << MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET)
+#define MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_2TRANS_BIT	(0x1 << MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET)
+#define MV_CESA_TDMA_OUTSTAND_OUT_OF_ORDER_3TRANS_BIT	(0x2 << MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET)
+#define MV_CESA_TDMA_OUTSTAND_NEW_MODE_BIT		(0x3 << MV_CESA_TDMA_NUM_OF_OUTSTAND_OFFSET)
+/*------------------------------------------------------------------------------------*/
+
+#define MV_CESA_TDMA_BYTE_COUNT_REG(chan)         	(MV_CESA_TDMA_REGS_BASE(chan) + 0x800)
+#define MV_CESA_TDMA_SRC_ADDR_REG(chan)           	(MV_CESA_TDMA_REGS_BASE(chan) + 0x810)
+#define MV_CESA_TDMA_DST_ADDR_REG(chan)           	(MV_CESA_TDMA_REGS_BASE(chan) + 0x820)
+#define MV_CESA_TDMA_NEXT_DESC_PTR_REG(chan)      	(MV_CESA_TDMA_REGS_BASE(chan) + 0x830)
+#define MV_CESA_TDMA_CURR_DESC_PTR_REG(chan)      	(MV_CESA_TDMA_REGS_BASE(chan) + 0x870)
+
+#define MV_CESA_TDMA_ERROR_CAUSE_REG(chan)        	(MV_CESA_TDMA_REGS_BASE(chan) + 0x8C8)
+#define MV_CESA_TDMA_ERROR_MASK_REG(chan)         	(MV_CESA_TDMA_REGS_BASE(chan) + 0x8CC)
+
+/*************** Address Decode Register ********************************************/
+
+#define MV_CESA_TDMA_ADDR_DEC_WIN           		4
+
+#define MV_CESA_TDMA_BASE_ADDR_REG(chan, win)     	(MV_CESA_TDMA_REGS_BASE(chan) + 0xa00 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_CTRL_REG(chan, win)      	(MV_CESA_TDMA_REGS_BASE(chan) + 0xa04 + (win<<3))
+
+#define MV_CESA_TDMA_WIN_ENABLE_BIT         		0
+#define MV_CESA_TDMA_WIN_ENABLE_MASK        		(1 << MV_CESA_TDMA_WIN_ENABLE_BIT)
+
+#define MV_CESA_TDMA_WIN_TARGET_OFFSET      		4
+#define MV_CESA_TDMA_WIN_TARGET_MASK        		(0xf << MV_CESA_TDMA_WIN_TARGET_OFFSET)
+
+#define MV_CESA_TDMA_WIN_ATTR_OFFSET        		8
+#define MV_CESA_TDMA_WIN_ATTR_MASK          		(0xff << MV_CESA_TDMA_WIN_ATTR_OFFSET)
+
+#define MV_CESA_TDMA_WIN_SIZE_OFFSET        		16
+#define MV_CESA_TDMA_WIN_SIZE_MASK          		(0xFFFF << MV_CESA_TDMA_WIN_SIZE_OFFSET)
+
+#define MV_CESA_TDMA_WIN_BASE_OFFSET        		16
+#define MV_CESA_TDMA_WIN_BASE_MASK          		(0xFFFF << MV_CESA_TDMA_WIN_BASE_OFFSET)
+
+#ifdef __cplusplus
+}
+#endif
+#endif				/* __mvCesaRegs_h__ */
diff --git a/drivers/crypto/mvebu_cesa/hal/mvLru.c b/drivers/crypto/mvebu_cesa/hal/mvLru.c
new file mode 100644
index 000000000000..fe977616d3eb
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvLru.c
@@ -0,0 +1,150 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvLru.h"
+/* LRU Cache support */
+
+/* Init LRU cache database */
+MV_LRU_CACHE *mvLruCacheInit(int numOfEntries)
+{
+	int i;
+	MV_LRU_CACHE *pLruCache;
+
+	pLruCache = mvOsMalloc(sizeof(MV_LRU_CACHE));
+
+	if (pLruCache == NULL)
+		return NULL;
+
+	memset(pLruCache, 0, sizeof(MV_LRU_CACHE));
+
+	pLruCache->table = mvOsMalloc(numOfEntries * sizeof(MV_LRU_ENTRY));
+	if (pLruCache->table == NULL) {
+		mvOsFree(pLruCache);
+		return NULL;
+	}
+	memset(pLruCache->table, 0, numOfEntries * sizeof(MV_LRU_ENTRY));
+	pLruCache->tableSize = numOfEntries;
+
+	for (i = 0; i < numOfEntries; i++) {
+		pLruCache->table[i].next = i + 1;
+		pLruCache->table[i].prev = i - 1;
+	}
+	pLruCache->least = 0;
+	pLruCache->most = numOfEntries - 1;
+
+	return pLruCache;
+}
+
+void mvLruCacheFinish(MV_LRU_CACHE *pLruCache)
+{
+	mvOsFree(pLruCache->table);
+	mvOsFree(pLruCache);
+}
+
+/* Update LRU cache database after using cache Index */
+void mvLruCacheIdxUpdate(MV_LRU_CACHE *pLruHndl, int cacheIdx)
+{
+	int prev, next;
+
+	if (cacheIdx == pLruHndl->most)
+		return;
+
+	next = pLruHndl->table[cacheIdx].next;
+	if (cacheIdx == pLruHndl->least) {
+		pLruHndl->least = next;
+	} else {
+		prev = pLruHndl->table[cacheIdx].prev;
+
+		pLruHndl->table[next].prev = prev;
+		pLruHndl->table[prev].next = next;
+	}
+
+	pLruHndl->table[pLruHndl->most].next = cacheIdx;
+	pLruHndl->table[cacheIdx].prev = pLruHndl->most;
+	pLruHndl->most = cacheIdx;
+}
+
+/* Delete LRU cache entry */
+void mvLruCacheIdxDelete(MV_LRU_CACHE *pLruHndl, int cacheIdx)
+{
+	int prev, next;
+
+	if (cacheIdx == pLruHndl->least)
+		return;
+
+	prev = pLruHndl->table[cacheIdx].prev;
+	if (cacheIdx == pLruHndl->most) {
+		pLruHndl->most = prev;
+	} else {
+		next = pLruHndl->table[cacheIdx].next;
+
+		pLruHndl->table[next].prev = prev;
+		pLruHndl->table[prev].next = next;
+	}
+	pLruHndl->table[pLruHndl->least].prev = cacheIdx;
+	pLruHndl->table[cacheIdx].next = pLruHndl->least;
+	pLruHndl->least = cacheIdx;
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvLru.h b/drivers/crypto/mvebu_cesa/hal/mvLru.h
new file mode 100644
index 000000000000..15097d827add
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvLru.h
@@ -0,0 +1,113 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvLru.h - Header File for Least Recently Used Cache algorithm
+*
+* DESCRIPTION:
+*       This header file contains macros typedefs and function declaration for
+*       the Least Recently Used Cache algorithm.
+*
+*******************************************************************************/
+
+#ifndef __mvLru_h__
+#define __mvLru_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+	int next;
+	int prev;
+} MV_LRU_ENTRY;
+
+typedef struct {
+	int least;
+	int most;
+	MV_LRU_ENTRY *table;
+	int tableSize;
+} MV_LRU_CACHE;
+
+/* Find Cache index for replacement LRU */
+static INLINE int mvLruCacheIdxFind(MV_LRU_CACHE *pLruHndl)
+{
+	return pLruHndl->least;
+}
+/* Init LRU cache module */
+MV_LRU_CACHE *mvLruCacheInit(int numOfEntries);
+
+/* Finish LRU cache module */
+void mvLruCacheFinish(MV_LRU_CACHE *pLruHndl);
+
+/* Update LRU cache database after using cache Index */
+void mvLruCacheIdxUpdate(MV_LRU_CACHE *pLruHndl, int cacheIdx);
+
+/* Delete LRU cache entry */
+void mvLruCacheIdxDelete(MV_LRU_CACHE *pLruHndl, int cacheIdx);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __mvLru_h__ */
diff --git a/drivers/crypto/mvebu_cesa/hal/mvMD5.c b/drivers/crypto/mvebu_cesa/hal/mvMD5.c
new file mode 100644
index 000000000000..c9b235398998
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvMD5.c
@@ -0,0 +1,355 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*
+ * This code implements the MD5 message-digest algorithm.
+ * The algorithm is due to Ron Rivest.  This code was
+ * written by Colin Plumb in 1993, no copyright is claimed.
+ * This code is in the public domain; do with it what you wish.
+ *
+ * Equivalent code is available from RSA Data Security, Inc.
+ * This code has been tested against that, and is equivalent,
+ * except that you don't need to include two pages of legalese
+ * with every copy.
+ *
+ * To compute the message digest of a chunk of bytes, declare an
+ * MD5Context structure, pass it to MD5Init, call MD5Update as
+ * needed on buffers full of bytes, and then call MD5Final, which
+ * will fill a supplied 16-byte array with the digest.
+ */
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvMD5.h"
+
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN]);
+
+#ifdef MV_CPU_LE
+#define mvByteReverse(buf, len)	/* Nothing */
+#else
+static void mvByteReverse(unsigned char *buf, unsigned longs);
+
+/*
+ * Note: this code is harmless on little-endian machines.
+ */
+static void mvByteReverse(unsigned char *buf, unsigned longs)
+{
+	MV_U32 t;
+
+	do {
+		t = (MV_U32) ((unsigned)buf[3] << 8 | buf[2]) << 16 | ((unsigned)buf[1] << 8 | buf[0]);
+		*(MV_U32 *)buf = t;
+		buf += 4;
+	} while (--longs);
+}
+#endif
+
+/*
+ * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
+ * initialization constants.
+ */
+void mvMD5Init(MV_MD5_CONTEXT *ctx)
+{
+	ctx->buf[0] = 0x67452301;
+	ctx->buf[1] = 0xefcdab89;
+	ctx->buf[2] = 0x98badcfe;
+	ctx->buf[3] = 0x10325476;
+
+	ctx->bits[0] = 0;
+	ctx->bits[1] = 0;
+}
+
+/*
+ * Update context to reflect the concatenation of another buffer full
+ * of bytes.
+ */
+void mvMD5Update(MV_MD5_CONTEXT *ctx, unsigned char const *buf, unsigned len)
+{
+	MV_U32 t;
+
+	/* Update bitcount */
+
+	t = ctx->bits[0];
+	ctx->bits[0] = t + ((MV_U32)len << 3);
+	if (ctx->bits[0] < t)
+		ctx->bits[1]++;	/* Carry from low to high */
+	ctx->bits[1] += len >> 29;
+
+	t = (t >> 3) & 0x3f;	/* Bytes already in shsInfo->data */
+
+	/* Handle any leading odd-sized chunks */
+
+	if (t) {
+		unsigned char *p = (unsigned char *)ctx->in + t;
+
+		t = 64 - t;
+		if (len < t) {
+			memcpy(p, buf, len);
+			return;
+		}
+		memcpy(p, buf, t);
+		mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+		mvMD5Transform(ctx->buf, (MV_U32 *)ctx->in);
+		buf += t;
+		len -= t;
+	}
+	/* Process data in 64-byte chunks */
+
+	while (len >= 64) {
+		memcpy(ctx->in, buf, 64);
+		mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+		mvMD5Transform(ctx->buf, (MV_U32 *)ctx->in);
+		buf += 64;
+		len -= 64;
+	}
+
+	/* Handle any remaining bytes of data. */
+
+	memcpy(ctx->in, buf, len);
+}
+
+/*
+ * Final wrapup - pad to 64-byte boundary with the bit pattern
+ * 1 0* (64-bit count of bits processed, MSB-first)
+ */
+void mvMD5Final(unsigned char digest[MV_MD5_MAC_LEN], MV_MD5_CONTEXT *ctx)
+{
+	unsigned count;
+	unsigned char *p;
+
+	/* Compute number of bytes mod 64 */
+	count = (ctx->bits[0] >> 3) & 0x3F;
+
+	/* Set the first char of padding to 0x80.  This is safe since there is
+	   always at least one byte free */
+	p = ctx->in + count;
+	*p++ = 0x80;
+
+	/* Bytes of padding needed to make 64 bytes */
+	count = 64 - 1 - count;
+
+	/* Pad out to 56 mod 64 */
+	if (count < 8) {
+		/* Two lots of padding:  Pad the first block to 64 bytes */
+		memset(p, 0, count);
+		mvByteReverse(ctx->in, MV_MD5_MAC_LEN);
+		mvMD5Transform(ctx->buf, (MV_U32 *)ctx->in);
+
+		/* Now fill the next block with 56 bytes */
+		memset(ctx->in, 0, 56);
+	} else {
+		/* Pad block to 56 bytes */
+		memset(p, 0, count - 8);
+	}
+	mvByteReverse(ctx->in, 14);
+
+	/* Append length in bits and transform */
+	((MV_U32 *) ctx->in)[14] = ctx->bits[0];
+	((MV_U32 *) ctx->in)[15] = ctx->bits[1];
+
+	mvMD5Transform(ctx->buf, (MV_U32 *)ctx->in);
+	mvByteReverse((unsigned char *)ctx->buf, 4);
+	memcpy(digest, ctx->buf, MV_MD5_MAC_LEN);
+	memset(ctx, 0, sizeof(ctx));	/* In case it's sensitive */
+}
+
+/* The four core functions - F1 is optimized somewhat */
+
+/* #define F1(x, y, z) (x & y | ~x & z) */
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+#define F2(x, y, z) F1(z, x, y)
+#define F3(x, y, z) (x ^ y ^ z)
+#define F4(x, y, z) (y ^ (x | ~z))
+
+/* This is the central step in the MD5 algorithm. */
+#define MD5STEP(f, w, x, y, z, data, s) \
+	(w += f(x, y, z) + data,  w = w<<s | w>>(32-s),  w += x)
+
+/*
+ * The core of the MD5 algorithm, this alters an existing MD5 hash to
+ * reflect the addition of 16 longwords of new data.  MD5Update blocks
+ * the data and converts bytes into longwords for this routine.
+ */
+static void mvMD5Transform(MV_U32 buf[4], MV_U32 const in[MV_MD5_MAC_LEN])
+{
+	register MV_U32 a, b, c, d;
+
+	a = buf[0];
+	b = buf[1];
+	c = buf[2];
+	d = buf[3];
+
+	MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
+	MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
+	MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
+	MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
+	MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
+	MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
+	MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
+	MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
+	MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
+	MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
+	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
+	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
+	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
+	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
+	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
+	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
+
+	MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
+	MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
+	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
+	MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
+	MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
+	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
+	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
+	MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
+	MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
+	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
+	MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
+	MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
+	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
+	MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
+	MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
+	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
+
+	MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
+	MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
+	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
+	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
+	MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
+	MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
+	MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
+	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
+	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
+	MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
+	MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
+	MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
+	MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
+	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
+	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
+	MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
+
+	MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
+	MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
+	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
+	MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
+	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
+	MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
+	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
+	MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
+	MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
+	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
+	MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
+	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
+	MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
+	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
+	MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
+	MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
+
+	buf[0] += a;
+	buf[1] += b;
+	buf[2] += c;
+	buf[3] += d;
+}
+
+void mvMD5(unsigned char const *buf, unsigned len, unsigned char *digest)
+{
+	MV_MD5_CONTEXT ctx;
+
+	mvMD5Init(&ctx);
+	mvMD5Update(&ctx, buf, len);
+	mvMD5Final(digest, &ctx);
+}
+
+void mvHmacMd5(unsigned char const *text, int text_len, unsigned char const *key, int key_len, unsigned char *digest)
+{
+	int i;
+	MV_MD5_CONTEXT ctx;
+	unsigned char k_ipad[64 + 1];	/* inner padding - key XORd with ipad */
+	unsigned char k_opad[64 + 1];	/* outer padding - key XORd with opad */
+
+	/* start out by storing key in pads */
+	memset(k_ipad, 0, 64);
+	memcpy(k_ipad, key, key_len);
+	memset(k_opad, 0, 64);
+	memcpy(k_opad, key, key_len);
+
+	/* XOR key with ipad and opad values */
+	for (i = 0; i < 64; i++) {
+		k_ipad[i] ^= 0x36;
+		k_opad[i] ^= 0x5c;
+	}
+
+	/* perform inner MD5 */
+	mvMD5Init(&ctx);	/* init ctx for 1st pass */
+	mvMD5Update(&ctx, k_ipad, 64);	/* start with inner pad */
+	mvMD5Update(&ctx, text, text_len);	/* then text of datagram */
+	mvMD5Final(digest, &ctx);	/* finish up 1st pass */
+
+	/* perform outer MD5 */
+	mvMD5Init(&ctx);	/* init ctx for 2nd pass */
+	mvMD5Update(&ctx, k_opad, 64);	/* start with outer pad */
+	mvMD5Update(&ctx, digest, 16);	/* then results of 1st hash */
+	mvMD5Final(digest, &ctx);	/* finish up 2nd pass */
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvMD5.h b/drivers/crypto/mvebu_cesa/hal/mvMD5.h
new file mode 100644
index 000000000000..051fb57d7d1c
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvMD5.h
@@ -0,0 +1,92 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvMD5_h__
+#define __mvMD5_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MV_MD5_MAC_LEN 16
+
+	typedef struct {
+		MV_U32 buf[4];
+		MV_U32 bits[2];
+		MV_U8 in[64];
+	} MV_MD5_CONTEXT;
+
+	void mvMD5Init(MV_MD5_CONTEXT *context);
+	void mvMD5Update(MV_MD5_CONTEXT *context, unsigned char const *buf, unsigned len);
+	void mvMD5Final(unsigned char digest[16], MV_MD5_CONTEXT *context);
+
+	void mvMD5(unsigned char const *buf, unsigned len, unsigned char *digest);
+
+	void mvHmacMd5(unsigned char const *text, int text_len,
+		       unsigned char const *key, int key_len, unsigned char *digest);
+
+#ifdef __cplusplus
+}
+#endif
+#endif				/* __mvMD5_h__ */
diff --git a/drivers/crypto/mvebu_cesa/hal/mvSHA1.c b/drivers/crypto/mvebu_cesa/hal/mvSHA1.c
new file mode 100644
index 000000000000..8ccb8b8539f4
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvSHA1.c
@@ -0,0 +1,301 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*
+ * SHA1 hash implementation and interface functions
+ * Copyright (c) 2003-2005, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Alternatively, this software may be distributed under the terms of BSD
+ * license.
+ *
+ * See README and COPYING for more details.
+ */
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvSHA1.h"
+
+#define SHA1HANDSOFF
+
+typedef union {
+	MV_U8 c[64];
+	MV_U32 l[16];
+
+} CHAR64LONG16;
+
+static void mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer);
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+#ifdef MV_CPU_LE
+#define blk0(i) (block->l[i] = (rol(block->l[i], 24) & 0xFF00FF00) | \
+		(rol(block->l[i], 8) & 0x00FF00FF))
+#else
+#define blk0(i) (block->l[i])
+#endif
+#define blk(i) (block->l[i & 15] = rol(block->l[(i + 13) & 15] ^ \
+		block->l[(i + 8) & 15] ^ block->l[(i + 2) & 15] ^ block->l[i & 15], 1))
+
+/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
+#define R0(v, w, x, y, z, i) \
+		z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
+		w = rol(w, 30);
+#define R1(v, w, x, y, z, i) \
+		z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
+		w = rol(w, 30);
+#define R2(v, w, x, y, z, i) \
+		z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); w = rol(w, 30);
+#define R3(v, w, x, y, z, i) \
+		z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
+		w = rol(w, 30);
+#define R4(v, w, x, y, z, i) \
+		z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
+		w = rol(w, 30);
+
+/* Hash a single 512-bit block. This is the core of the algorithm. */
+static void mvSHA1Transform(MV_U32 state[5], const MV_U8 *buffer)
+{
+	MV_U32 a, b, c, d, e;
+	CHAR64LONG16 *block;
+
+#ifdef SHA1HANDSOFF
+	static MV_U32 workspace[16];
+
+	block = (CHAR64LONG16 *) workspace;
+	memcpy(block, buffer, 64);
+#else
+	block = (CHAR64LONG16 *) buffer;
+#endif
+	/* Copy context->state[] to working vars */
+	a = state[0];
+	b = state[1];
+	c = state[2];
+	d = state[3];
+	e = state[4];
+	/* 4 rounds of 20 operations each. Loop unrolled. */
+	R0(a, b, c, d, e, 0);
+	R0(e, a, b, c, d, 1);
+	R0(d, e, a, b, c, 2);
+	R0(c, d, e, a, b, 3);
+	R0(b, c, d, e, a, 4);
+	R0(a, b, c, d, e, 5);
+	R0(e, a, b, c, d, 6);
+	R0(d, e, a, b, c, 7);
+	R0(c, d, e, a, b, 8);
+	R0(b, c, d, e, a, 9);
+	R0(a, b, c, d, e, 10);
+	R0(e, a, b, c, d, 11);
+	R0(d, e, a, b, c, 12);
+	R0(c, d, e, a, b, 13);
+	R0(b, c, d, e, a, 14);
+	R0(a, b, c, d, e, 15);
+	R1(e, a, b, c, d, 16);
+	R1(d, e, a, b, c, 17);
+	R1(c, d, e, a, b, 18);
+	R1(b, c, d, e, a, 19);
+	R2(a, b, c, d, e, 20);
+	R2(e, a, b, c, d, 21);
+	R2(d, e, a, b, c, 22);
+	R2(c, d, e, a, b, 23);
+	R2(b, c, d, e, a, 24);
+	R2(a, b, c, d, e, 25);
+	R2(e, a, b, c, d, 26);
+	R2(d, e, a, b, c, 27);
+	R2(c, d, e, a, b, 28);
+	R2(b, c, d, e, a, 29);
+	R2(a, b, c, d, e, 30);
+	R2(e, a, b, c, d, 31);
+	R2(d, e, a, b, c, 32);
+	R2(c, d, e, a, b, 33);
+	R2(b, c, d, e, a, 34);
+	R2(a, b, c, d, e, 35);
+	R2(e, a, b, c, d, 36);
+	R2(d, e, a, b, c, 37);
+	R2(c, d, e, a, b, 38);
+	R2(b, c, d, e, a, 39);
+	R3(a, b, c, d, e, 40);
+	R3(e, a, b, c, d, 41);
+	R3(d, e, a, b, c, 42);
+	R3(c, d, e, a, b, 43);
+	R3(b, c, d, e, a, 44);
+	R3(a, b, c, d, e, 45);
+	R3(e, a, b, c, d, 46);
+	R3(d, e, a, b, c, 47);
+	R3(c, d, e, a, b, 48);
+	R3(b, c, d, e, a, 49);
+	R3(a, b, c, d, e, 50);
+	R3(e, a, b, c, d, 51);
+	R3(d, e, a, b, c, 52);
+	R3(c, d, e, a, b, 53);
+	R3(b, c, d, e, a, 54);
+	R3(a, b, c, d, e, 55);
+	R3(e, a, b, c, d, 56);
+	R3(d, e, a, b, c, 57);
+	R3(c, d, e, a, b, 58);
+	R3(b, c, d, e, a, 59);
+	R4(a, b, c, d, e, 60);
+	R4(e, a, b, c, d, 61);
+	R4(d, e, a, b, c, 62);
+	R4(c, d, e, a, b, 63);
+	R4(b, c, d, e, a, 64);
+	R4(a, b, c, d, e, 65);
+	R4(e, a, b, c, d, 66);
+	R4(d, e, a, b, c, 67);
+	R4(c, d, e, a, b, 68);
+	R4(b, c, d, e, a, 69);
+	R4(a, b, c, d, e, 70);
+	R4(e, a, b, c, d, 71);
+	R4(d, e, a, b, c, 72);
+	R4(c, d, e, a, b, 73);
+	R4(b, c, d, e, a, 74);
+	R4(a, b, c, d, e, 75);
+	R4(e, a, b, c, d, 76);
+	R4(d, e, a, b, c, 77);
+	R4(c, d, e, a, b, 78);
+	R4(b, c, d, e, a, 79);
+	/* Add the working vars back into context.state[] */
+	state[0] += a;
+	state[1] += b;
+	state[2] += c;
+	state[3] += d;
+	state[4] += e;
+	/* Wipe variables */
+	a = b = c = d = e = 0;
+}
+
+void mvSHA1Init(MV_SHA1_CTX *context)
+{
+	/* SHA1 initialization constants */
+	context->state[0] = 0x67452301;
+	context->state[1] = 0xEFCDAB89;
+	context->state[2] = 0x98BADCFE;
+	context->state[3] = 0x10325476;
+	context->state[4] = 0xC3D2E1F0;
+	context->count[0] = context->count[1] = 0;
+}
+
+/* Run your data through this. */
+void mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *data, unsigned int len)
+{
+	MV_U32 i, j;
+
+	j = (context->count[0] >> 3) & 63;
+	context->count[0] += len << 3;
+	if (context->count[0] < (len << 3))
+		context->count[1]++;
+	context->count[1] += (len >> 29);
+	if ((j + len) > 63) {
+		memcpy(&context->buffer[j], data, (i = 64 - j));
+		mvSHA1Transform(context->state, context->buffer);
+		for (; i + 63 < len; i += 64)
+			mvSHA1Transform(context->state, &data[i]);
+		j = 0;
+	} else {
+		i = 0;
+	}
+	memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+void mvSHA1Final(MV_U8 *digest, MV_SHA1_CTX *context)
+{
+	MV_U32 i;
+	MV_U8 finalcount[8];
+
+	for (i = 0; i < 8; i++)
+		finalcount[i] = (unsigned char)((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255);
+	/* Endian independent */
+
+	mvSHA1Update(context, (const unsigned char *)"\200", 1);
+	while ((context->count[0] & 504) != 448)
+		mvSHA1Update(context, (const unsigned char *)"\0", 1);
+
+	mvSHA1Update(context, finalcount, 8);	/* Should cause a mvSHA1Transform()
+						 */
+	for (i = 0; i < 20; i++) {
+		digest[i] = (unsigned char)
+		    ((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
+	}
+	/* Wipe variables */
+	i = 0;
+	memset(context->buffer, 0, 64);
+	memset(context->state, 0, 20);
+	memset(context->count, 0, 8);
+	memset(finalcount, 0, 8);
+
+#ifdef SHA1HANDSOFF		/* make SHA1Transform overwrite it's own static vars */
+	mvSHA1Transform(context->state, context->buffer);
+#endif
+}
+
+void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8 *digest)
+{
+	MV_SHA1_CTX ctx;
+
+	mvSHA1Init(&ctx);
+	mvSHA1Update(&ctx, buf, len);
+	mvSHA1Final(digest, &ctx);
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvSHA1.h b/drivers/crypto/mvebu_cesa/hal/mvSHA1.h
new file mode 100644
index 000000000000..1890c9b33194
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvSHA1.h
@@ -0,0 +1,89 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvSHA1_h__
+#define __mvSHA1_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MV_SHA1_MAC_LEN 20
+
+	typedef struct {
+		MV_U32 state[5];
+		MV_U32 count[2];
+		MV_U8 buffer[64];
+	} MV_SHA1_CTX;
+
+	void mvSHA1Init(MV_SHA1_CTX *context);
+	void mvSHA1Update(MV_SHA1_CTX *context, MV_U8 const *buf, unsigned int len);
+	void mvSHA1Final(MV_U8 *digest, MV_SHA1_CTX *context);
+
+	void mvSHA1(MV_U8 const *buf, unsigned int len, MV_U8 *digest);
+
+#ifdef __cplusplus
+}
+#endif
+#endif				/* __mvSHA1_h__ */
diff --git a/drivers/crypto/mvebu_cesa/hal/mvSHA256.c b/drivers/crypto/mvebu_cesa/hal/mvSHA256.c
new file mode 100644
index 000000000000..02626a6c8752
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvSHA256.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*
+ * sha2.c
+ *
+ * Version 1.0.0beta1
+ *
+ * Written by Aaron D. Gifford <me@aarongifford.com>
+ *
+ * Copyright 2000 Aaron D. Gifford.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the copyright holder nor the names of contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) AND CONTRIBUTOR(S) ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR(S) OR CONTRIBUTOR(S) BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvSHA256.h"
+
+#define GET_UINT32(n, b, i)			\
+{						\
+    (n) = ((MV_U32) (b)[(i)] << 24)		\
+	| ((MV_U32) (b)[(i) + 1] << 16)		\
+	| ((MV_U32) (b)[(i) + 2] <<  8)		\
+	| ((MV_U32) (b)[(i) + 3]);		\
+}
+
+#define PUT_UINT32(n, b, i)			\
+{						\
+    (b)[(i)] = (MV_U8) ((n) >> 24);		\
+    (b)[(i) + 1] = (MV_U8) ((n) >> 16);		\
+    (b)[(i) + 2] = (MV_U8) ((n) >>  8);		\
+    (b)[(i) + 3] = (MV_U8) (n);			\
+}
+
+MV_VOID mvSHA256Init(sha256_context *ctx)
+{
+	ctx->total[0] = 0;
+	ctx->total[1] = 0;
+
+	ctx->state[0] = 0x6A09E667;
+	ctx->state[1] = 0xBB67AE85;
+	ctx->state[2] = 0x3C6EF372;
+	ctx->state[3] = 0xA54FF53A;
+	ctx->state[4] = 0x510E527F;
+	ctx->state[5] = 0x9B05688C;
+	ctx->state[6] = 0x1F83D9AB;
+	ctx->state[7] = 0x5BE0CD19;
+}
+
+MV_VOID sha256_process(sha256_context *ctx, MV_U8 data[64])
+{
+	MV_U32 temp1, temp2, W[64];
+	MV_U32 A, B, C, D, E, F, G, H;
+
+	GET_UINT32(W[0], data, 0);
+	GET_UINT32(W[1], data, 4);
+	GET_UINT32(W[2], data, 8);
+	GET_UINT32(W[3], data, 12);
+	GET_UINT32(W[4], data, 16);
+	GET_UINT32(W[5], data, 20);
+	GET_UINT32(W[6], data, 24);
+	GET_UINT32(W[7], data, 28);
+	GET_UINT32(W[8], data, 32);
+	GET_UINT32(W[9], data, 36);
+	GET_UINT32(W[10], data, 40);
+	GET_UINT32(W[11], data, 44);
+	GET_UINT32(W[12], data, 48);
+	GET_UINT32(W[13], data, 52);
+	GET_UINT32(W[14], data, 56);
+	GET_UINT32(W[15], data, 60);
+
+#define  SHR(x, n) ((x & 0xFFFFFFFF) >> n)
+#define ROTR(x, n) (SHR(x, n) | (x << (32 - n)))
+
+#define S0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^  SHR(x, 3))
+#define S1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^  SHR(x, 10))
+
+#define S2(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
+#define S3(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
+
+#define F0(x, y, z) ((x & y) | (z & (x | y)))
+#define F1(x, y, z) (z ^ (x & (y ^ z)))
+
+#define R(t)						\
+(							\
+    W[t] = S1(W[t -  2]) + W[t -  7] +			\
+	S0(W[t - 15]) + W[t - 16]			\
+)
+
+#define P(a, b, c, d, e, f, g, h, x, K)                  \
+{                                               	 \
+    temp1 = h + S3(e) + F1(e, f, g) + K + x;      	 \
+    temp2 = S2(a) + F0(a, b, c);                  	 \
+    d += temp1; h = temp1 + temp2;              	 \
+}
+
+	A = ctx->state[0];
+	B = ctx->state[1];
+	C = ctx->state[2];
+	D = ctx->state[3];
+	E = ctx->state[4];
+	F = ctx->state[5];
+	G = ctx->state[6];
+	H = ctx->state[7];
+
+	P(A, B, C, D, E, F, G, H, W[0], 0x428A2F98);
+	P(H, A, B, C, D, E, F, G, W[1], 0x71374491);
+	P(G, H, A, B, C, D, E, F, W[2], 0xB5C0FBCF);
+	P(F, G, H, A, B, C, D, E, W[3], 0xE9B5DBA5);
+	P(E, F, G, H, A, B, C, D, W[4], 0x3956C25B);
+	P(D, E, F, G, H, A, B, C, W[5], 0x59F111F1);
+	P(C, D, E, F, G, H, A, B, W[6], 0x923F82A4);
+	P(B, C, D, E, F, G, H, A, W[7], 0xAB1C5ED5);
+	P(A, B, C, D, E, F, G, H, W[8], 0xD807AA98);
+	P(H, A, B, C, D, E, F, G, W[9], 0x12835B01);
+	P(G, H, A, B, C, D, E, F, W[10], 0x243185BE);
+	P(F, G, H, A, B, C, D, E, W[11], 0x550C7DC3);
+	P(E, F, G, H, A, B, C, D, W[12], 0x72BE5D74);
+	P(D, E, F, G, H, A, B, C, W[13], 0x80DEB1FE);
+	P(C, D, E, F, G, H, A, B, W[14], 0x9BDC06A7);
+	P(B, C, D, E, F, G, H, A, W[15], 0xC19BF174);
+	P(A, B, C, D, E, F, G, H, R(16), 0xE49B69C1);
+	P(H, A, B, C, D, E, F, G, R(17), 0xEFBE4786);
+	P(G, H, A, B, C, D, E, F, R(18), 0x0FC19DC6);
+	P(F, G, H, A, B, C, D, E, R(19), 0x240CA1CC);
+	P(E, F, G, H, A, B, C, D, R(20), 0x2DE92C6F);
+	P(D, E, F, G, H, A, B, C, R(21), 0x4A7484AA);
+	P(C, D, E, F, G, H, A, B, R(22), 0x5CB0A9DC);
+	P(B, C, D, E, F, G, H, A, R(23), 0x76F988DA);
+	P(A, B, C, D, E, F, G, H, R(24), 0x983E5152);
+	P(H, A, B, C, D, E, F, G, R(25), 0xA831C66D);
+	P(G, H, A, B, C, D, E, F, R(26), 0xB00327C8);
+	P(F, G, H, A, B, C, D, E, R(27), 0xBF597FC7);
+	P(E, F, G, H, A, B, C, D, R(28), 0xC6E00BF3);
+	P(D, E, F, G, H, A, B, C, R(29), 0xD5A79147);
+	P(C, D, E, F, G, H, A, B, R(30), 0x06CA6351);
+	P(B, C, D, E, F, G, H, A, R(31), 0x14292967);
+	P(A, B, C, D, E, F, G, H, R(32), 0x27B70A85);
+	P(H, A, B, C, D, E, F, G, R(33), 0x2E1B2138);
+	P(G, H, A, B, C, D, E, F, R(34), 0x4D2C6DFC);
+	P(F, G, H, A, B, C, D, E, R(35), 0x53380D13);
+	P(E, F, G, H, A, B, C, D, R(36), 0x650A7354);
+	P(D, E, F, G, H, A, B, C, R(37), 0x766A0ABB);
+	P(C, D, E, F, G, H, A, B, R(38), 0x81C2C92E);
+	P(B, C, D, E, F, G, H, A, R(39), 0x92722C85);
+	P(A, B, C, D, E, F, G, H, R(40), 0xA2BFE8A1);
+	P(H, A, B, C, D, E, F, G, R(41), 0xA81A664B);
+	P(G, H, A, B, C, D, E, F, R(42), 0xC24B8B70);
+	P(F, G, H, A, B, C, D, E, R(43), 0xC76C51A3);
+	P(E, F, G, H, A, B, C, D, R(44), 0xD192E819);
+	P(D, E, F, G, H, A, B, C, R(45), 0xD6990624);
+	P(C, D, E, F, G, H, A, B, R(46), 0xF40E3585);
+	P(B, C, D, E, F, G, H, A, R(47), 0x106AA070);
+	P(A, B, C, D, E, F, G, H, R(48), 0x19A4C116);
+	P(H, A, B, C, D, E, F, G, R(49), 0x1E376C08);
+	P(G, H, A, B, C, D, E, F, R(50), 0x2748774C);
+	P(F, G, H, A, B, C, D, E, R(51), 0x34B0BCB5);
+	P(E, F, G, H, A, B, C, D, R(52), 0x391C0CB3);
+	P(D, E, F, G, H, A, B, C, R(53), 0x4ED8AA4A);
+	P(C, D, E, F, G, H, A, B, R(54), 0x5B9CCA4F);
+	P(B, C, D, E, F, G, H, A, R(55), 0x682E6FF3);
+	P(A, B, C, D, E, F, G, H, R(56), 0x748F82EE);
+	P(H, A, B, C, D, E, F, G, R(57), 0x78A5636F);
+	P(G, H, A, B, C, D, E, F, R(58), 0x84C87814);
+	P(F, G, H, A, B, C, D, E, R(59), 0x8CC70208);
+	P(E, F, G, H, A, B, C, D, R(60), 0x90BEFFFA);
+	P(D, E, F, G, H, A, B, C, R(61), 0xA4506CEB);
+	P(C, D, E, F, G, H, A, B, R(62), 0xBEF9A3F7);
+	P(B, C, D, E, F, G, H, A, R(63), 0xC67178F2);
+
+	ctx->state[0] += A;
+	ctx->state[1] += B;
+	ctx->state[2] += C;
+	ctx->state[3] += D;
+	ctx->state[4] += E;
+	ctx->state[5] += F;
+	ctx->state[6] += G;
+	ctx->state[7] += H;
+}
+
+MV_VOID mvSHA256Update(sha256_context *ctx, MV_U8 *input, MV_U32 length)
+{
+	MV_U32 left, fill;
+
+	if (!length)
+		return;
+
+	left = ctx->total[0] & 0x3F;
+	fill = 64 - left;
+
+	ctx->total[0] += length;
+	ctx->total[0] &= 0xFFFFFFFF;
+
+	if (ctx->total[0] < length)
+		ctx->total[1]++;
+
+	if (left && length >= fill) {
+		memcpy((MV_VOID *) (ctx->buffer + left), (MV_VOID *) input, fill);
+		sha256_process(ctx, ctx->buffer);
+		length -= fill;
+		input += fill;
+		left = 0;
+	}
+
+	while (length >= 64) {
+		sha256_process(ctx, input);
+		length -= 64;
+		input += 64;
+	}
+
+	if (length)
+		memcpy((MV_VOID *) (ctx->buffer + left), (MV_VOID *) input, length);
+}
+
+static MV_U8 sha256_padding[64] = {
+	0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+MV_VOID mvSHA256Finish(sha256_context *ctx, MV_U8 *digest)
+{
+	MV_U32 last, padn;
+	MV_U32 high, low;
+	MV_U8 msglen[8];
+
+	high = (ctx->total[0] >> 29)
+	    | (ctx->total[1] << 3);
+	low = (ctx->total[0] << 3);
+
+	PUT_UINT32(high, msglen, 0);
+	PUT_UINT32(low, msglen, 4);
+
+	last = ctx->total[0] & 0x3F;
+	padn = (last < 56) ? (56 - last) : (120 - last);
+
+	mvSHA256Update(ctx, sha256_padding, padn);
+	mvSHA256Update(ctx, msglen, 8);
+
+	PUT_UINT32(ctx->state[0], digest, 0);
+	PUT_UINT32(ctx->state[1], digest, 4);
+	PUT_UINT32(ctx->state[2], digest, 8);
+	PUT_UINT32(ctx->state[3], digest, 12);
+	PUT_UINT32(ctx->state[4], digest, 16);
+	PUT_UINT32(ctx->state[5], digest, 20);
+	PUT_UINT32(ctx->state[6], digest, 24);
+	PUT_UINT32(ctx->state[7], digest, 28);
+}
+
+MV_VOID mvSHA256(MV_U8 *buf, MV_U32 len, MV_U8 * digest)
+{
+	sha256_context ctx;
+
+	mvSHA256Init(&ctx);
+	mvSHA256Update(&ctx, buf, len);
+	mvSHA256Finish(&ctx, digest);
+}
diff --git a/drivers/crypto/mvebu_cesa/hal/mvSHA256.h b/drivers/crypto/mvebu_cesa/hal/mvSHA256.h
new file mode 100644
index 000000000000..ac91708b43e6
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/hal/mvSHA256.h
@@ -0,0 +1,87 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef _mvSHA256_H
+#define _mvSHA256_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+    MV_U32 total[2];
+    MV_U32 state[8];
+    MV_U8 buffer[64];
+}
+sha256_context;
+
+MV_VOID mvSHA256Init(sha256_context *ctx);
+MV_VOID mvSHA256Update(sha256_context *ctx, MV_U8 *input, MV_U32 length);
+MV_VOID mvSHA256Finish(sha256_context *ctx, MV_U8 *digest);
+MV_VOID mvSHA256(MV_U8 *buf, MV_U32 len, MV_U8 *digest);
+
+#ifdef __cplusplus
+}
+#endif
+#endif				/* __mvSHA256_h__ */
diff --git a/drivers/crypto/mvebu_cesa/mvSysCesaConfig.h b/drivers/crypto/mvebu_cesa/mvSysCesaConfig.h
new file mode 100644
index 000000000000..84560b64386f
--- /dev/null
+++ b/drivers/crypto/mvebu_cesa/mvSysCesaConfig.h
@@ -0,0 +1,73 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvSysCesaConfig.h - Marvell Cesa unit specific configurations
+*
+* DESCRIPTION:
+*       None.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#ifndef __mvSysCesaConfig_h__
+#define __mvSysCesaConfig_h__
+
+/*
+** Base address for cesa registers.
+*/
+
+extern MV_U32 mv_cesa_base[], mv_cesa_tdma_base[];
+
+/* This enumerator defines the Marvell CESA feature*/
+enum cesa_mode {
+	CESA_UNKNOWN_M = -1,
+	CESA_OCF_M,
+	CESA_TEST_M
+};
+
+enum cesa_feature {
+	CESA_UNKNOWN = -1,
+	CHAIN = 0,
+	INT_COALESCING,
+	INT_PER_PACKET
+};
+
+extern enum cesa_mode mv_cesa_mode;
+extern u32 mv_cesa_time_threshold, mv_cesa_threshold, mv_cesa_channels;
+extern enum cesa_feature mv_cesa_feature;
+
+#define MV_CESA_REGS_BASE(chan)		(mv_cesa_base[chan])
+
+#define MV_CESA_TDMA_REGS_BASE(chan)	(mv_cesa_tdma_base[chan])
+
+/*
+ * MV_CESA_CHANNELS have to be known at compilation time, since it is used
+ * for table size declaration. It is defined for max available CESA channels
+ */
+#define MV_CESA_CHANNELS	2
+
+/*
+ * Use 2K of SRAM
+ */
+#define MV_CESA_MAX_BUF_SIZE	1600
+
+#endif /* __mvSysCesaConfig_h__ */
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 0ba5a95199d3..d946d830432c 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -133,6 +133,17 @@ config MV_XOR
 	---help---
 	  Enable support for the Marvell XOR engine.
 
+config MV_MEMCPY
+	bool "Marvell MEMCPY engine support"
+	depends on PLAT_ORION
+	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+	help
+	  Enable support for the Marvell MEMCPY engine
+	  This driver only supports the memcpy and drops
+	  the support for memset and xor in favor of more
+	  efficient operation.
+
 config MX3_IPU
 	bool "MX3x Image Processing Unit support"
 	depends on ARCH_MXC
@@ -341,9 +352,19 @@ config NET_DMA
 	  Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise
 	  say N.
 
+config SPLICE_NET_DMA_SUPPORT
+        bool "Net DMA support for splice"
+	depends on NET_DMA && !HIGHMEM
+	default n
+	help
+	  This enables the use of DMA engines in the network stack to
+	  offload splice operations, freeing CPU cycles.
+
+	  If unsure, say N.
+
 config ASYNC_TX_DMA
 	bool "Async_tx: Offload support for the async_tx api"
-	depends on DMA_ENGINE
+	depends on DMA_ENGINE && (!DMA_CACHE_RWFO || AURORA_IO_CACHE_COHERENCY)
 	help
 	  This allows the async_tx api to take advantage of offload engines for
 	  memcpy, memset, xor, and raid6 p+q operations.  If your platform has
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index a2b0df591f95..6845a61166e4 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
 obj-$(CONFIG_FSL_DMA) += fsldma.o
 obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
+obj-$(CONFIG_MV_MEMCPY) += mv_memcpy.o
 obj-$(CONFIG_MV_XOR) += mv_xor.o
 obj-$(CONFIG_DW_DMAC) += dw_dmac.o
 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 93f7992bee5c..043bb65176e4 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -65,11 +65,14 @@
 #include <linux/acpi.h>
 #include <linux/acpi_dma.h>
 #include <linux/of_dma.h>
+#include <linux/pagemap.h>
 
 static DEFINE_MUTEX(dma_list_mutex);
 static DEFINE_IDR(dma_idr);
 static LIST_HEAD(dma_device_list);
 static long dmaengine_ref_count;
+static struct page *temp_page = NULL;
+
 
 /* --- sysfs implementation --- */
 
@@ -924,6 +927,8 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 }
 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 
+#define DMA_ENGINE_MIN_OP_SIZE 128
+
 /**
  * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
  * @chan: DMA channel to offload copy to
@@ -947,6 +952,40 @@ dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 	dma_cookie_t cookie;
 	unsigned long flags;
 
+	if (!page) {
+		printk(KERN_ERR "%s page %p\n", __FUNCTION__, (void*)page);
+		return -EFAULT;
+	}
+	/*
+	  This code snippet is for Marvell XOR engine that supports operation on len < 128
+	  So if we get a copy operation smaller than 128, we use memcpy
+	  Also, we're creating a dummy dma operation in order to satisfy upper layers waiting
+	  for a valid cookie return code.
+	*/
+	if (len < DMA_ENGINE_MIN_OP_SIZE)
+	{
+		void * dst = kmap_atomic(page) + offset;
+		memcpy(dst, kdata, len);
+		kunmap_atomic(dst);
+
+		dma_src = dma_map_page(dev->dev, temp_page, 0, PAGE_SIZE, DMA_TO_DEVICE);
+		dma_dest = dma_map_page(dev->dev, temp_page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+		flags = DMA_CTRL_ACK;
+		tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, DMA_ENGINE_MIN_OP_SIZE, flags);
+
+		if (!tx) {
+			dma_unmap_page(dev->dev, dma_src, PAGE_SIZE, DMA_TO_DEVICE);
+			dma_unmap_page(dev->dev, dma_dest, PAGE_SIZE, DMA_FROM_DEVICE);
+			return -ENOMEM;
+		}
+
+		tx->callback = NULL;
+		cookie = tx->tx_submit(tx);
+
+		return cookie;
+	}
+
 	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
@@ -995,6 +1034,40 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 	dma_cookie_t cookie;
 	unsigned long flags;
 
+	if (!dest_pg || !src_pg) {
+		printk(KERN_ERR "%s dest_pg %p src_pg %p\n", __FUNCTION__, (void*)dest_pg, (void*)src_pg);
+		return -EFAULT;
+	}
+
+	/*
+	  This code snippet is for Marvell XOR engine that doesn't support operations on len < 128
+	  So if we get a copy operation smaller than 128, we use memcpy
+	  Also, we're creating a dummy dma operation in order to satisfy upper layers waiting
+	  for a valid cookie return code.
+	*/
+	if (len < DMA_ENGINE_MIN_OP_SIZE)
+	{
+		void * dst = kmap_atomic(dest_pg) + dest_off;
+		memcpy(dst, src_pg+src_off, len);
+		kunmap_atomic(dst);
+
+		dma_src = dma_map_page(dev->dev, temp_page, 0, PAGE_SIZE, DMA_TO_DEVICE);
+		dma_dest = dma_map_page(dev->dev, temp_page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+		flags = DMA_CTRL_ACK;
+		tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, DMA_ENGINE_MIN_OP_SIZE, flags);
+
+		if (!tx) {
+			dma_unmap_page(dev->dev, dma_src, PAGE_SIZE, DMA_TO_DEVICE);
+			dma_unmap_page(dev->dev, dma_dest, PAGE_SIZE, DMA_FROM_DEVICE);
+			return -ENOMEM;
+		}
+
+		tx->callback = NULL;
+		cookie = tx->tx_submit(tx);
+
+		return cookie;
+	}
+
 	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 				DMA_FROM_DEVICE);
@@ -1092,6 +1165,9 @@ EXPORT_SYMBOL_GPL(dma_run_dependencies);
 
 static int __init dma_bus_init(void)
 {
+	temp_page = alloc_pages(GFP_KERNEL, 0);
+	if (!temp_page)
+                BUG();
 	return class_register(&dma_devclass);
 }
 arch_initcall(dma_bus_init);
diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
index bb48a57c2fc1..a04a995ae0fc 100644
--- a/drivers/dma/iovlock.c
+++ b/drivers/dma/iovlock.c
@@ -121,6 +121,73 @@ out:
 	return NULL;
 }
 
+#ifdef CONFIG_SPLICE_NET_DMA_SUPPORT
+struct dma_pinned_list *dma_pin_kernel_iovec_pages(struct iovec *iov, size_t len)
+{
+	struct dma_pinned_list *local_list;
+	struct page **pages;
+	int i, j;
+	int nr_iovecs = 0;
+	int iovec_len_used = 0;
+	int iovec_pages_used = 0;
+
+	/* determine how many iovecs/pages there are, up front */
+	do {
+		iovec_len_used += iov[nr_iovecs].iov_len;
+		iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]);
+		nr_iovecs++;
+	} while (iovec_len_used < len);
+
+	/* single kmalloc for pinned list, page_list[], and the page arrays */
+	local_list = kmalloc(sizeof(*local_list)
+		+ (nr_iovecs * sizeof (struct dma_page_list))
+		+ (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
+	if (!local_list)
+		goto out;
+
+	/* list of pages starts right after the page list array */
+	pages = (struct page **) &local_list->page_list[nr_iovecs];
+
+	local_list->nr_iovecs = 0;
+
+	for (i = 0; i < nr_iovecs; i++) {
+		struct dma_page_list *page_list = &local_list->page_list[i];
+		int offset;
+
+		len -= iov[i].iov_len;
+
+		if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
+			goto unpin;
+
+		page_list->nr_pages = num_pages_spanned(&iov[i]);
+		page_list->base_address = iov[i].iov_base;
+
+		page_list->pages = pages;
+		pages += page_list->nr_pages;
+
+		for (offset=0, j=0; j < page_list->nr_pages; j++, offset+=PAGE_SIZE) {
+			page_list->pages[j] = phys_to_page(__pa((unsigned int)page_list->base_address) + offset);
+		}
+		local_list->nr_iovecs = i + 1;
+	}
+
+	return local_list;
+
+unpin:
+	kfree(local_list);
+out:
+	return NULL;
+}
+
+void dma_unpin_kernel_iovec_pages(struct dma_pinned_list *pinned_list)
+{
+	if (!pinned_list)
+		return;
+
+	kfree(pinned_list);
+}
+#endif
+
 void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
 {
 	int i, j;
diff --git a/drivers/dma/mv_memcpy.c b/drivers/dma/mv_memcpy.c
new file mode 100644
index 000000000000..9772e02318a8
--- /dev/null
+++ b/drivers/dma/mv_memcpy.c
@@ -0,0 +1,581 @@
+/*
+ * offload engine driver for the Marvell XOR engine
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mbus.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+
+#include "mv_memcpy.h"
+
+#define MV_MEMCPY_NAME	"mv_memcpy"
+
+static struct dentry *dfs_root;
+static struct dentry *dfs_stats;
+
+struct {
+	unsigned int avg_busy_loops;
+	unsigned int avg_busy_cnt;
+	unsigned int issue_pen;
+	unsigned int prep;
+	unsigned int tx_status;
+	unsigned int break_chain;
+} mv_memcpy_stats;
+
+#define MV_MEMCPY_BANK_SIZE 8
+
+static void mv_memcpy_set_mode(struct mv_memcpy_chan *chan,
+			       enum dma_transaction_type type)
+{
+	u32 op_mode;
+	u32 config = readl_relaxed(MEMCPY_CONFIG(chan));
+
+	switch (type) {
+	case DMA_MEMCPY:
+		op_mode = MEMCPY_OPERATION_MODE_MEMCPY;
+		break;
+	default:
+		BUG();
+		return;
+	}
+
+	config &= ~0x7;
+	config |= op_mode;
+	writel_relaxed(config, MEMCPY_CONFIG(chan));
+}
+
+static void mv_memcpy_set_next_desc(struct mv_memcpy_chan *chan,
+				    u32 next_desc_addr)
+{
+	writel_relaxed(next_desc_addr, MEMCPY_NEXT_DESC(chan));
+}
+
+#define to_mv_memcpy_chans(chan)		\
+	container_of(chan, struct mv_memcpy_chans, common)
+
+static void mv_memcpy_chan_activate(struct mv_memcpy_chan *chan)
+{
+	writel_relaxed(1, MEMCPY_ACTIVATION(chan));
+}
+
+static char mv_memcpy_chan_is_busy(struct mv_memcpy_chan *chan)
+{
+	u32 state = readl_relaxed(MEMCPY_ACTIVATION(chan));
+
+	state = (state >> 4) & 0x3;
+	return (state == 1) ? 1 : 0;
+}
+
+static void mv_memcpy_free_bank(struct dma_chan *chan,
+				int channel_num,
+				int bank_num)
+{
+	struct mv_memcpy_chans *mv_chans = to_mv_memcpy_chans(chan);
+	struct mv_memcpy_chan *mv_chan = &mv_chans->chan[channel_num];
+	struct device *dev = mv_chans->common.device->dev;
+	struct dma_async_tx_descriptor *txd;
+	unsigned int buff_index;
+	int j;
+
+	for (j = 0; j < mv_chans->coalescing; j++) {
+		buff_index = (bank_num * MV_MEMCPY_BANK_SIZE) + j;
+		txd = &mv_chan->buff_info[buff_index].async_tx;
+		/* Unmap the dst buffer, if requested */
+		if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+			if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+				dma_unmap_single(dev,
+						 mv_chan->buff_info[buff_index].dst_addr,
+						 mv_chan->buff_info[buff_index].len,
+						 DMA_FROM_DEVICE);
+
+			else
+				dma_unmap_page(dev,
+					       mv_chan->buff_info[buff_index].dst_addr,
+					       mv_chan->buff_info[buff_index].len,
+					       DMA_FROM_DEVICE);
+
+		}
+
+		/* Unmap the src buffer, if requested */
+		if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+			if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+				dma_unmap_single(dev,
+						 mv_chan->buff_info[buff_index].src_addr,
+						 mv_chan->buff_info[buff_index].len,
+						 DMA_TO_DEVICE);
+			else
+				dma_unmap_page(dev,
+					       mv_chan->buff_info[buff_index].src_addr,
+					       mv_chan->buff_info[buff_index].len,
+					       DMA_TO_DEVICE);
+		}
+	}
+	return;
+}
+
+
+
+/************************ DMA engine API functions ****************************/
+
+/**
+ * mv_memcpy_status - poll the status of an MEMCPY transaction
+ * @chan: MEMCPY channel handle
+ * @cookie: MEMCPY transaction identifier
+ * @txstate: MEMCPY transactions state holder (or NULL)
+ */
+static enum dma_status mv_memcpy_tx_status(struct dma_chan *chan,
+					   dma_cookie_t cookie,
+					   struct dma_tx_state *txstate)
+{
+	struct mv_memcpy_chans *mv_chans = to_mv_memcpy_chans(chan);
+	struct mv_memcpy_chan *mv_chan;
+	unsigned long irq_flags;
+	struct mv_memcpy_desc *desc;
+	int i;
+	int last_index;
+
+	spin_lock_irqsave(&mv_chans->lock, irq_flags);
+	mv_memcpy_stats.tx_status++;
+
+	for (i = 0; i < mv_chans->num_channels; i++) {
+		mv_chan = &mv_chans->chan[i];
+
+		/* let the channels finish pending descriptors */
+		while (mv_memcpy_chan_is_busy(&mv_chans->chan[i]))
+			;
+
+		/* free bank */
+		if (mv_chan->active_bank != -1)
+			mv_memcpy_free_bank(chan, i, mv_chan->active_bank);
+
+		/* if we have some descriptors pending in the current bank chain flush them */
+		if (mv_chan->next_index) {
+			mv_memcpy_stats.break_chain++;
+			last_index = (mv_chan->next_bank * MV_MEMCPY_BANK_SIZE) + mv_chan->next_index - 1;
+
+			/* we're triggering so cut the chain */
+			desc = &mv_chan->dma_desc_pool_virt[last_index];
+			desc->phy_next_desc = 0x0;
+
+			mv_chan->active_bank = mv_chan->next_bank;
+
+			/* set the descriptor pointer */
+			mv_memcpy_set_next_desc(mv_chan, mv_chan->dma_desc_pool +
+						((mv_chan->active_bank * MV_MEMCPY_BANK_SIZE) *
+						 sizeof(struct mv_memcpy_desc)));
+
+			/* trigger channel */
+			wmb();
+			mv_memcpy_chan_activate(mv_chan);
+		}
+	}
+
+	for (i = 0; i < mv_chans->num_channels; i++) {
+		mv_chan = &mv_chans->chan[i];
+
+		/* if we have some descriptors pending in the current bank chain flush them */
+		if (mv_chan->next_index) {
+			/* let the channels finish pending descriptors */
+			while (mv_memcpy_chan_is_busy(mv_chan))
+				;
+
+			/* free bank */
+			mv_memcpy_free_bank(chan, i, mv_chan->active_bank);
+
+			/* nothing to clean on next round */
+			mv_chan->active_bank = -1;
+			/* switch bank */
+			mv_chan->next_bank = 1 - mv_chan->next_bank;
+			mv_chan->next_index = 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&mv_chans->lock, irq_flags);
+	return DMA_SUCCESS;
+}
+
+static dma_cookie_t
+mv_memcpy_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	/*
+	 * FIXME: This is mostly odd and it looks like a bug.
+	 * A positive dma_cookie_t descriptor is a valid one,
+	 * so this '1' indicates a valid descriptor.
+	 */
+	return 1;
+}
+
+/* returns the number of allocated descriptors */
+static int mv_memcpy_alloc_chan_resources(struct dma_chan *chan)
+{
+	struct mv_memcpy_chans *mv_chans = to_mv_memcpy_chans(chan);
+
+	return mv_chans->num_channels * mv_chans->coalescing;
+}
+
+static struct dma_async_tx_descriptor *
+mv_memcpy_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+			  size_t len, unsigned long flags)
+{
+	struct mv_memcpy_chans *mv_chans = to_mv_memcpy_chans(chan);
+	struct mv_memcpy_chan *mv_chan = &mv_chans->chan[mv_chans->next_chan];
+	struct mv_memcpy_desc *desc;
+	u32 next_desc;
+	struct mv_memcpy_desc tmp_desc;
+	unsigned long irq_flags;
+	unsigned int trigger_chan = 0;
+	unsigned int current_desc;
+	int last_bank;
+
+	mv_memcpy_stats.prep++;
+
+	spin_lock_irqsave(&mv_chans->lock, irq_flags);
+
+	last_bank = mv_chan->active_bank;
+
+	current_desc = (mv_chan->next_bank * MV_MEMCPY_BANK_SIZE) + mv_chan->next_index;
+
+	/* fill the descriptor */
+	desc = &mv_chan->dma_desc_pool_virt[current_desc];
+	next_desc = mv_chan->dma_desc_pool +
+		((mv_chan->next_bank * MV_MEMCPY_BANK_SIZE + mv_chan->next_index + 1)
+		 * sizeof(struct mv_memcpy_desc));
+
+	tmp_desc.status = (0x1 << 31);
+	tmp_desc.desc_command = 0x1 | (0x1 << 31);
+	tmp_desc.byte_count = len;
+	tmp_desc.phy_dest_addr = dest;
+	tmp_desc.phy_src_addr[0] = src;
+	tmp_desc.phy_next_desc = next_desc;
+
+	/* incrementing next index and bank to use */
+	mv_chan->next_index = (mv_chan->next_index + 1) % mv_chans->coalescing;
+	if (!mv_chan->next_index) {
+		/* saving the bank to trigger */
+		mv_chan->active_bank = mv_chan->next_bank;
+		mv_chan->next_bank = 1 - mv_chan->next_bank;
+		trigger_chan = 1;
+		/* we're triggering so cut the chain */
+		tmp_desc.phy_next_desc = 0x0;
+	}
+
+	/* write desc */
+	memcpy(desc, &tmp_desc, sizeof(struct mv_memcpy_desc));
+
+	/* update sw chain */
+	mv_chan->buff_info[current_desc].src_addr = src;
+	mv_chan->buff_info[current_desc].dst_addr = dest;
+	mv_chan->buff_info[current_desc].len = len;
+	mv_chan->buff_info[current_desc].async_tx.flags = flags;
+	mv_chan->buff_info[current_desc].async_tx.tx_submit = mv_memcpy_tx_submit;
+	mv_chan->buff_info[current_desc].async_tx.phys = (dma_addr_t) desc;
+	mv_chan->buff_info[current_desc].async_tx.chan = chan;
+
+	/* trigger if needed */
+	if (trigger_chan) {
+
+		/* Check if engine is idle. */
+		while (mv_memcpy_chan_is_busy(mv_chan))
+			mv_memcpy_stats.avg_busy_loops++;
+
+		/* Set the descriptor pointer */
+		mv_memcpy_set_next_desc(mv_chan, mv_chan->dma_desc_pool +
+					((mv_chan->active_bank * MV_MEMCPY_BANK_SIZE) *
+					 sizeof(struct mv_memcpy_desc)));
+
+		/* trigger channel */
+		wmb();
+		mv_memcpy_chan_activate(mv_chan);
+
+		/* free bank */
+		if (mv_chan->active_bank != -1)
+			mv_memcpy_free_bank(chan, mv_chans->next_chan, last_bank);
+
+		/* set next channel */
+		mv_chans->next_chan = (mv_chans->next_chan + 1) % mv_chans->num_channels;
+	}
+
+	spin_unlock_irqrestore(&mv_chans->lock, irq_flags);
+
+	return &(mv_chan->buff_info[current_desc].async_tx);
+
+}
+
+static void mv_memcpy_free_chan_resources(struct dma_chan *chan)
+{
+	return;
+}
+
+static void mv_memcpy_issue_pending(struct dma_chan *chan)
+{
+	mv_memcpy_stats.issue_pen++;
+
+	return;
+}
+
+static int mv_memcpy_remove(struct platform_device *dev)
+{
+	/* FIXME: Empty remove? */
+	return 0;
+}
+
+static void
+mv_memcpy_conf_mbus_windows(void __iomem *base,
+			    const struct mbus_dram_target_info *dram)
+{
+	u32 win_enable = 0;
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		writel(0, base + WINDOW_BASE(i));
+		writel(0, base + WINDOW_SIZE(i));
+		if (i < 4)
+			writel(0, base + WINDOW_REMAP_HIGH(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel((cs->base & 0xffff0000) |
+		       (cs->mbus_attr << 8) |
+		       dram->mbus_dram_target_id, base + WINDOW_BASE(i));
+		writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
+
+		win_enable |= (1 << i);
+		win_enable |= 3 << (16 + (2 * i));
+	}
+
+	writel(win_enable, base + WINDOW_BAR_ENABLE(0));
+	writel(win_enable, base + WINDOW_BAR_ENABLE(1));
+}
+
+static int mv_memcpy_debugfs_show(struct seq_file *seq, void *private)
+{
+	seq_printf(seq, "AVG busy loops            - %d\n", mv_memcpy_stats.avg_busy_loops);
+	seq_printf(seq, "chain broke               - %d\n", mv_memcpy_stats.break_chain);
+	seq_printf(seq, "mv_memcpy_issue_pending   - %d\n", mv_memcpy_stats.issue_pen);
+	seq_printf(seq, "mv_memcpy_prep_dma_memcpy - %d\n", mv_memcpy_stats.prep);
+	seq_printf(seq, "mv_memcpy_tx_status       - %d\n", mv_memcpy_stats.tx_status);
+	return 0;
+}
+
+static int mv_memcpy_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, mv_memcpy_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+	.open		= mv_memcpy_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int mv_memcpy_debugfs_create(struct device *dev)
+{
+	int err;
+
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
+	dfs_root = debugfs_create_dir(MV_MEMCPY_NAME, NULL);
+	if (IS_ERR_OR_NULL(dfs_root)) {
+		err = dfs_root ? -ENODEV : PTR_ERR(dfs_root);
+
+		dev_err(dev, "cannot create \"%s\" debugfs directory, err %d\n",
+			MV_MEMCPY_NAME, err);
+		return err;
+	}
+
+	dfs_stats = debugfs_create_file("mv_memcpy_stats", S_IRUSR,
+				   dfs_root, NULL, &dfs_fops);
+	if (IS_ERR_OR_NULL(dfs_stats))
+		goto out_remove;
+	return 0;
+
+out_remove:
+	debugfs_remove_recursive(dfs_root);
+	err = dfs_stats ? PTR_ERR(dfs_stats) : -ENODEV;
+	return err;
+}
+
+static int mv_memcpy_probe(struct platform_device *pdev)
+{
+	int ret, i, j, chan_id;
+	struct mv_memcpy_device *mv_dev;
+	struct mv_memcpy_chans *mv_chans;
+	struct mv_memcpy_chan *mv_chan;
+	struct dma_device *dma_dev;
+	struct resource *res;
+	const struct mbus_dram_target_info *dram;
+
+	dev_notice(&pdev->dev, "Marvell MEMCPY driver\n");
+
+	/* allocate device */
+	mv_dev = devm_kzalloc(&pdev->dev, sizeof(*mv_dev), GFP_KERNEL);
+	if (!mv_dev)
+		return -ENOMEM;
+
+	/* set number of engines allocated to this driver */
+	mv_dev->num_engines = pdev->num_resources;
+
+	/*
+	 * allocate channel
+	 * this is kind of a fake channel
+	 * it holds all the engines channels selected and appear to the
+	 * dma driver as one channel only, the driver handle the internal switching
+	 */
+	mv_chans = devm_kzalloc(&pdev->dev, sizeof(*mv_chans), GFP_KERNEL);
+	if (!mv_chans)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "marvell,coalescing",
+				  &mv_chans->coalescing);
+	if (ret)
+		mv_chans->coalescing = 1;
+
+	mv_chans->device = mv_dev;
+	mv_chans->num_channels = mv_dev->num_engines * 2;
+	/* start work from channels #0 */
+	mv_chans->next_chan = 0;
+
+	dev_printk(KERN_NOTICE, &pdev->dev, "initiating %d engines\n", mv_dev->num_engines);
+
+	for (i = 0; i < mv_dev->num_engines; i++) {
+		/* get engine resources */
+		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+		if (!res)
+			return -ENODEV;
+
+		mv_dev->engine_base[i] = devm_ioremap(&pdev->dev, res->start,
+						      resource_size(res));
+		if (!mv_dev->engine_base[i])
+			return -EBUSY;
+
+		/*
+		 * (Re-)program MBUS remapping windows if we are asked to.
+		 */
+		dram = mv_mbus_dram_info();
+		if (dram)
+			mv_memcpy_conf_mbus_windows(mv_dev->engine_base[i], dram);
+
+		/* initialize both channels on selected engines */
+		for (j = 0; j < 2; j++) {
+			/* allocate coherent memory for hardware descriptors
+			 * note: writecombine gives slightly better performance, but
+			 * requires that we explicitly flush the writes
+			 */
+			chan_id = (i * 2) + j;
+
+			mv_chan = &mv_chans->chan[chan_id];
+
+			if (!mv_chan)
+				return -ENOMEM;
+
+			mv_chan->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
+									     MV_MEMCPY_POOL_SIZE,
+									     &mv_chan->dma_desc_pool,
+									     GFP_KERNEL);
+			if (!mv_chan->dma_desc_pool_virt)
+				return -ENOMEM;
+
+			/* internal channel id */
+			mv_chan->idx = j;
+			mv_chan->base = mv_dev->engine_base[i];
+
+			if (!mv_chan->base) {
+				ret = -ENOMEM;
+				goto err_free_dma;
+			}
+
+			/* set channels as memcpy */
+			mv_memcpy_set_mode(mv_chan, DMA_MEMCPY);
+
+			/* initialize indexes*/
+			mv_chan->next_bank = 0;
+			mv_chan->active_bank = -1;
+			mv_chan->next_index = 0;
+		}
+	}
+
+	/* general initializations */
+	dma_dev = &mv_dev->common;
+
+	/* only memcpy capability */
+	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+
+	/* save driver data */
+	mv_dev->pdev = pdev;
+	platform_set_drvdata(pdev, mv_dev);
+
+	INIT_LIST_HEAD(&dma_dev->channels);
+
+	/* set base routines */
+	dma_dev->device_alloc_chan_resources = mv_memcpy_alloc_chan_resources;
+	dma_dev->device_free_chan_resources = mv_memcpy_free_chan_resources;
+	dma_dev->device_tx_status = mv_memcpy_tx_status;
+	dma_dev->device_issue_pending = mv_memcpy_issue_pending;
+	dma_dev->dev = &pdev->dev;
+	dma_dev->device_prep_dma_memcpy = mv_memcpy_prep_dma_memcpy;
+
+	spin_lock_init(&mv_chans->lock);
+	mv_chans->common.device = dma_dev;
+
+	list_add_tail(&mv_chans->common.device_node, &dma_dev->channels);
+	dma_async_device_register(dma_dev);
+
+	mv_memcpy_debugfs_create(&pdev->dev);
+	return 0;
+
+err_free_dma:
+	for (i = 0; i < mv_chans->num_channels; i++)
+		dma_free_coherent(&mv_dev->pdev->dev, MV_MEMCPY_POOL_SIZE,
+				  mv_chans->chan[i].dma_desc_pool_virt, mv_chans->chan[i].dma_desc_pool);
+	return ret;
+}
+
+static struct of_device_id mv_memcpy_dt_ids[] = {
+       { .compatible = "marvell,orion-memcpy", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mv_memcpy_dt_ids);
+
+static struct platform_driver mv_memcpy_driver = {
+	.probe		= mv_memcpy_probe,
+	.remove		= mv_memcpy_remove,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= MV_MEMCPY_NAME,
+		.of_match_table = of_match_ptr(mv_memcpy_dt_ids),
+	},
+};
+
+module_platform_driver(mv_memcpy_driver);
+
+MODULE_AUTHOR("Lior Amsalem <alior@marvell.com>");
+MODULE_DESCRIPTION("DMA engine driver for Marvell's memcpy engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/mv_memcpy.h b/drivers/dma/mv_memcpy.h
new file mode 100644
index 000000000000..ba3ec8bb1d9f
--- /dev/null
+++ b/drivers/dma/mv_memcpy.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2007, 2008, Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef MV_MEMCPY_H
+#define MV_MEMCPY_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+
+#define USE_TIMER
+#define MV_MEMCPY_POOL_SIZE		PAGE_SIZE
+#define MV_MEMCPY_SLOT_SIZE		64
+#define MV_MEMCPY_THRESHOLD		1
+
+#define MEMCPY_OPERATION_MODE_MEMCPY	2
+
+#define MEMCPY_CURR_DESC(chan)	(chan->base + 0x210 + (chan->idx * 4))
+#define MEMCPY_NEXT_DESC(chan)	(chan->base + 0x200 + (chan->idx * 4))
+#define MEMCPY_BYTE_COUNT(chan)	(chan->base + 0x220 + (chan->idx * 4))
+#define MEMCPY_DEST_POINTER(chan)	(chan->base + 0x2B0 + (chan->idx * 4))
+#define MEMCPY_BLOCK_SIZE(chan)	(chan->base + 0x2C0 + (chan->idx * 4))
+#define MEMCPY_INIT_VALUE_LOW(chan)	(chan->base + 0x2E0)
+#define MEMCPY_INIT_VALUE_HIGH(chan)	(chan->base + 0x2E4)
+
+#define MEMCPY_CONFIG(chan)	(chan->base + 0x10 + (chan->idx * 4))
+#define MEMCPY_ACTIVATION(chan)	(chan->base + 0x20 + (chan->idx * 4))
+#define MEMCPY_INTR_CAUSE(chan)	(chan->base + 0x30)
+#define MEMCPY_INTR_MASK(chan)	(chan->base + 0x40)
+#define MEMCPY_ERROR_CAUSE(chan)	(chan->base + 0x50)
+#define MEMCPY_ERROR_ADDR(chan)	(chan->base + 0x60)
+#define MEMCPY_INTR_MASK_VALUE	0x3F7
+
+#define WINDOW_BASE(w)		(0x250 + ((w) << 2))
+#define WINDOW_SIZE(w)		(0x270 + ((w) << 2))
+#define WINDOW_REMAP_HIGH(w)	(0x290 + ((w) << 2))
+#define WINDOW_BAR_ENABLE(chan)	(0x240 + ((chan) << 2))
+
+/* This structure describes MEMCPY descriptor size 64bytes */
+struct mv_memcpy_desc {
+	u32 status;		/* descriptor execution status */
+	u32 crc32_result;	/* result of CRC-32 calculation */
+	u32 desc_command;	/* type of operation to be carried out */
+	u32 phy_next_desc;	/* next descriptor address pointer */
+	u32 byte_count;		/* size of src/dst blocks in bytes */
+	u32 phy_dest_addr;	/* destination block address */
+	u32 phy_src_addr[8];	/* source block addresses */
+	u32 reserved0;
+	u32 reserved1;
+};
+
+struct mv_memcpy_chan {
+	dma_cookie_t completed_cookie;
+	void __iomem *base;
+	unsigned int idx;
+	dma_addr_t   dma_desc_pool;
+	struct mv_memcpy_desc *dma_desc_pool_virt;
+	unsigned int next_bank;
+	int active_bank;
+	unsigned int next_index;
+	unsigned int num_descs[2];
+	struct {
+		struct dma_async_tx_descriptor	async_tx;
+		dma_addr_t src_addr;
+		dma_addr_t dst_addr;
+		size_t     len;
+	} buff_info[16];
+};
+
+/**
+ * struct mv_memcpy_chan - internal representation of a MEMCPY channel
+ * @completed_cookie: identifier for the most recently completed operation
+ * @mmr_base: memory mapped register base
+ * @idx: the index of the memcpy channel
+ * @device: parent device
+ * @common: common dmaengine channel object members
+ * @dma_desc_pool: base of DMA descriptor region (DMA address)
+ * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
+ */
+struct mv_memcpy_chans {
+	struct mv_memcpy_device	*device;
+	struct dma_chan         common;
+	spinlock_t              lock; /* protects the descriptor slot pool */
+	int                     num_channels;
+	struct mv_memcpy_chan   chan[4];
+	unsigned int            next_chan;
+	unsigned int            coalescing;
+};
+
+/**
+ * struct mv_memcpy_device - internal representation of a MEMCPY device
+ * @pdev: Platform device
+ * @id: HW MEMCPY Device selector
+ * @common: embedded struct dma_device
+ * @lock: serializes enqueue/dequeue operations to the descriptors pool
+ */
+struct mv_memcpy_device {
+	struct platform_device  *pdev;
+	struct dma_device       common;
+	int                     num_engines;
+	void __iomem	        *engine_base[2];
+};
+
+/* Stores certain registers during suspend to RAM */
+struct mv_memcpy_save_regs {
+	int memcpy_config;
+	int interrupt_mask;
+};
+
+#define MV_MEMCPY_MIN_BYTE_COUNT (16)
+#define MEMCPY_MAX_BYTE_COUNT    ((16 * 1024 * 1024) - 1)
+#define MV_MEMCPY_MAX_BYTE_COUNT MEMCPY_MAX_BYTE_COUNT
+
+#endif
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index d64ae14f2706..a4d85b2b3061 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -30,10 +30,15 @@
 #include <linux/of_irq.h>
 #include <linux/irqdomain.h>
 #include <linux/platform_data/dma-mv_xor.h>
+#include <linux/crc32c.h>
 
 #include "dmaengine.h"
 #include "mv_xor.h"
 
+unsigned int dummy1[MV_XOR_MIN_BYTE_COUNT];
+unsigned int dummy2[MV_XOR_MIN_BYTE_COUNT];
+dma_addr_t dummy1_addr, dummy2_addr;
+
 static void mv_xor_issue_pending(struct dma_chan *chan);
 
 #define to_mv_xor_chan(chan)		\
@@ -48,10 +53,17 @@ static void mv_xor_issue_pending(struct dma_chan *chan);
 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
+	u32 command = 0;
 
 	hw_desc->status = (1 << 31);
 	hw_desc->phy_next_desc = 0;
-	hw_desc->desc_command = (1 << 31);
+	if (flags & DMA_PREP_INTERRUPT)
+		command = (1 << 31);
+
+	if (desc->type == DMA_CRC32C)
+		command |= (1 << 30);	/* CRCLast */
+
+	hw_desc->desc_command = command;
 }
 
 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
@@ -64,7 +76,7 @@ static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
 				int src_idx)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
-	return hw_desc->phy_src_addr[src_idx];
+	return hw_desc->phy_src_addr[mv_phy_src_idx(src_idx)];
 }
 
 
@@ -83,17 +95,6 @@ static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
 	hw_desc->phy_next_desc = next_desc_addr;
 }
 
-static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
-{
-	struct mv_xor_desc *hw_desc = desc->hw_desc;
-	hw_desc->phy_next_desc = 0;
-}
-
-static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
-{
-	desc->value = val;
-}
-
 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
 				  dma_addr_t addr)
 {
@@ -101,59 +102,36 @@ static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
 	hw_desc->phy_dest_addr = addr;
 }
 
-static int mv_chan_memset_slot_count(size_t len)
-{
-	return 1;
-}
-
-#define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
-
 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
 				 int index, dma_addr_t addr)
 {
 	struct mv_xor_desc *hw_desc = desc->hw_desc;
-	hw_desc->phy_src_addr[index] = addr;
+	hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr;
 	if (desc->type == DMA_XOR)
 		hw_desc->desc_command |= (1 << index);
 }
 
 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
 {
-	return __raw_readl(XOR_CURR_DESC(chan));
+	return readl_relaxed(XOR_CURR_DESC(chan));
 }
 
 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
 					u32 next_desc_addr)
 {
-	__raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
-}
-
-static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
-{
-	__raw_writel(desc_addr, XOR_DEST_POINTER(chan));
-}
-
-static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
-{
-	__raw_writel(block_size, XOR_BLOCK_SIZE(chan));
-}
-
-static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
-{
-	__raw_writel(value, XOR_INIT_VALUE_LOW(chan));
-	__raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
+	writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
 }
 
 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
 {
-	u32 val = __raw_readl(XOR_INTR_MASK(chan));
+	u32 val = readl_relaxed(XOR_INTR_MASK(chan));
 	val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
-	__raw_writel(val, XOR_INTR_MASK(chan));
+	writel_relaxed(val, XOR_INTR_MASK(chan));
 }
 
 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
 {
-	u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
+	u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
 	intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
 	return intr_cause;
 }
@@ -168,35 +146,22 @@ static int mv_is_err_intr(u32 intr_cause)
 
 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
 {
-	u32 val = ~(1 << (chan->idx * 16));
+	u32 val = ~(3 << (chan->idx * 16));
 	dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
-	__raw_writel(val, XOR_INTR_CAUSE(chan));
+	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 }
 
 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
 {
 	u32 val = 0xFFFF0000 >> (chan->idx * 16);
-	__raw_writel(val, XOR_INTR_CAUSE(chan));
-}
-
-static int mv_can_chain(struct mv_xor_desc_slot *desc)
-{
-	struct mv_xor_desc_slot *chain_old_tail = list_entry(
-		desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
-
-	if (chain_old_tail->type != desc->type)
-		return 0;
-	if (desc->type == DMA_MEMSET)
-		return 0;
-
-	return 1;
+	writel_relaxed(val, XOR_INTR_CAUSE(chan));
 }
 
 static void mv_set_mode(struct mv_xor_chan *chan,
 			       enum dma_transaction_type type)
 {
 	u32 op_mode;
-	u32 config = __raw_readl(XOR_CONFIG(chan));
+	u32 config = readl_relaxed(XOR_CONFIG(chan));
 
 	switch (type) {
 	case DMA_XOR:
@@ -205,8 +170,8 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 	case DMA_MEMCPY:
 		op_mode = XOR_OPERATION_MODE_MEMCPY;
 		break;
-	case DMA_MEMSET:
-		op_mode = XOR_OPERATION_MODE_MEMSET;
+	case DMA_CRC32C:
+		op_mode = XOR_OPERATION_MODE_CRC32C;
 		break;
 	default:
 		dev_err(mv_chan_to_devp(chan),
@@ -218,49 +183,35 @@ static void mv_set_mode(struct mv_xor_chan *chan,
 
 	config &= ~0x7;
 	config |= op_mode;
-	__raw_writel(config, XOR_CONFIG(chan));
+
+#if defined(__BIG_ENDIAN)
+	config |= XOR_DESCRIPTOR_SWAP;
+#else
+	config &= ~XOR_DESCRIPTOR_SWAP;
+#endif
+
+	writel_relaxed(config, XOR_CONFIG(chan));
 	chan->current_type = type;
 }
 
 static void mv_chan_activate(struct mv_xor_chan *chan)
 {
-	u32 activation;
+	dev_dbg(mv_chan_to_devp(chan), "activate chan %d\n", chan->dmadev.dev_id);
 
-	dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
-	activation = __raw_readl(XOR_ACTIVATION(chan));
-	activation |= 0x1;
-	__raw_writel(activation, XOR_ACTIVATION(chan));
+	/* writel ensures all descriptors are flushed before
+	   activation*/
+	writel(0x1, XOR_ACTIVATION(chan));
 }
 
 static char mv_chan_is_busy(struct mv_xor_chan *chan)
 {
-	u32 state = __raw_readl(XOR_ACTIVATION(chan));
+	u32 state = readl_relaxed(XOR_ACTIVATION(chan));
 
 	state = (state >> 4) & 0x3;
 
 	return (state == 1) ? 1 : 0;
 }
 
-static int mv_chan_xor_slot_count(size_t len, int src_cnt)
-{
-	return 1;
-}
-
-/**
- * mv_xor_free_slots - flags descriptor slots for reuse
- * @slot: Slot to free
- * Caller must hold &mv_chan->lock while calling this function
- */
-static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
-			      struct mv_xor_desc_slot *slot)
-{
-	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n",
-		__func__, __LINE__, slot);
-
-	slot->slots_per_op = 0;
-
-}
-
 /*
  * mv_xor_start_new_chain - program the engine to operate on new chain headed by
  * sw_desc
@@ -269,24 +220,12 @@ static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
 				   struct mv_xor_desc_slot *sw_desc)
 {
-	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n",
-		__func__, __LINE__, sw_desc);
-	if (sw_desc->type != mv_chan->current_type)
-		mv_set_mode(mv_chan, sw_desc->type);
-
-	if (sw_desc->type == DMA_MEMSET) {
-		/* for memset requests we need to program the engine, no
-		 * descriptors used.
-		 */
-		struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
-		mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
-		mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
-		mv_chan_set_value(mv_chan, sw_desc->value);
-	} else {
-		/* set the hardware chain */
-		mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
-	}
-	mv_chan->pending += sw_desc->slot_cnt;
+	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p phys %x\n",
+		__func__, __LINE__, sw_desc, sw_desc->async_tx.phys);
+
+	/* set the hardware chain */
+	mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
+	mv_chan->pending++;
 	mv_xor_issue_pending(&mv_chan->dmachan);
 }
 
@@ -309,8 +248,8 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
 		/* unmap dma addresses
 		 * (unmap_single vs unmap_page?)
 		 */
-		if (desc->group_head && desc->unmap_len) {
-			struct mv_xor_desc_slot *unmap = desc->group_head;
+		if (desc->unmap_len) {
+			struct mv_xor_desc_slot *unmap = desc;
 			struct device *dev = mv_chan_to_devp(mv_chan);
 			u32 len = unmap->unmap_len;
 			enum dma_ctrl_flags flags = desc->async_tx.flags;
@@ -340,7 +279,6 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
 						       DMA_TO_DEVICE);
 				}
 			}
-			desc->group_head = NULL;
 		}
 	}
 
@@ -357,12 +295,10 @@ mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
 
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
-				 completed_node) {
+				 node) {
 
-		if (async_tx_test_ack(&iter->async_tx)) {
-			list_del(&iter->completed_node);
-			mv_xor_free_slots(mv_chan, iter);
-		}
+		if (async_tx_test_ack(&iter->async_tx))
+			list_move_tail(&iter->node, &mv_chan->free_slots);
 	}
 	return 0;
 }
@@ -373,17 +309,16 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
 {
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n",
 		__func__, __LINE__, desc, desc->async_tx.flags);
-	list_del(&desc->chain_node);
+
 	/* the client is allowed to attach dependent operations
 	 * until 'ack' is set
 	 */
-	if (!async_tx_test_ack(&desc->async_tx)) {
+	if (!async_tx_test_ack(&desc->async_tx))
 		/* move this slot to the completed_slots */
-		list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
-		return 0;
-	}
+		list_move_tail(&desc->node, &mv_chan->completed_slots);
+	else
+		list_move_tail(&desc->node, &mv_chan->free_slots);
 
-	mv_xor_free_slots(mv_chan, desc);
 	return 0;
 }
 
@@ -393,7 +328,17 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
 	dma_cookie_t cookie = 0;
 	int busy = mv_chan_is_busy(mv_chan);
 	u32 current_desc = mv_chan_get_current_desc(mv_chan);
-	int seen_current = 0;
+	int current_cleaned = 0;
+	struct mv_xor_desc *hw_desc;
+	struct dma_chan *dma_chan;
+	dma_chan = &mv_chan->dmachan;
+
+	/* IO sync must be after reading the current_desc to unsure all descriptors are
+	 * updated currectly in DRAM, and no XOR -> DRAM transactions are buffered
+	 * this ensure all descriptors are synced to the current_desc position
+	 */
+	dma_sync_single_for_cpu(dma_chan->device->dev, (dma_addr_t) NULL,
+				(size_t) NULL, DMA_FROM_DEVICE);
 
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__);
 	dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc);
@@ -404,39 +349,51 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
 	 */
 
 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
-					chain_node) {
-		prefetch(_iter);
-		prefetch(&_iter->async_tx);
+				 node) {
+		/* clean finished descriptors */
+		hw_desc = iter->hw_desc;
+		if (hw_desc->status & XOR_DESC_SUCCESS) {
+			if (iter->type == DMA_CRC32C) {
+				struct mv_xor_desc *hw_desc = iter->hw_desc;
+				BUG_ON(!iter->crc32_result);
+				*iter->crc32_result = ~hw_desc->crc32_result;
+			}
 
-		/* do not advance past the current descriptor loaded into the
-		 * hardware channel, subsequent descriptors are either in
-		 * process or have not been submitted
-		 */
-		if (seen_current)
-			break;
+			cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
 
-		/* stop the search if we reach the current descriptor and the
-		 * channel is busy
-		 */
-		if (iter->async_tx.phys == current_desc) {
-			seen_current = 1;
-			if (busy)
+			/* done processing desc, clean slot */
+			mv_xor_clean_slot(iter, mv_chan);
+
+			/* break if we did cleaned the current */
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 1;
+				break;
+			}
+		} else {
+			if (iter->async_tx.phys == current_desc) {
+				current_cleaned = 0;
 				break;
+			}
 		}
-
-		cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie);
-
-		if (mv_xor_clean_slot(iter, mv_chan))
-			break;
 	}
 
 	if ((busy == 0) && !list_empty(&mv_chan->chain)) {
-		struct mv_xor_desc_slot *chain_head;
-		chain_head = list_entry(mv_chan->chain.next,
-					struct mv_xor_desc_slot,
-					chain_node);
-
-		mv_xor_start_new_chain(mv_chan, chain_head);
+		if (current_cleaned) {
+			/* current descriptor cleaned and removed, run from list head */
+			iter = list_entry(mv_chan->chain.next,
+						struct mv_xor_desc_slot,
+						node);
+			mv_xor_start_new_chain(mv_chan, iter);
+		} else {
+			if (!list_is_last(&iter->node, &mv_chan->chain)) {
+				/* descriptors are still waiting after current, trigger them */
+				iter = list_entry(iter->node.next, struct mv_xor_desc_slot, node);
+				mv_xor_start_new_chain(mv_chan, iter);
+			} else {
+				/* some descriptors are still waiting to be cleaned */
+				tasklet_schedule(&mv_chan->irq_tasklet);
+			}
+		}
 	}
 
 	if (cookie > 0)
@@ -457,81 +414,29 @@ static void mv_xor_tasklet(unsigned long data)
 	mv_xor_slot_cleanup(chan);
 }
 
-static struct mv_xor_desc_slot *
-mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
-		    int slots_per_op)
+static struct mv_xor_desc_slot *mv_xor_alloc_slot(struct mv_xor_chan *mv_chan)
 {
-	struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
-	LIST_HEAD(chain);
-	int slots_found, retry = 0;
+	struct mv_xor_desc_slot *iter;
 
-	/* start search from the last allocated descrtiptor
-	 * if a contiguous allocation can not be found start searching
-	 * from the beginning of the list
-	 */
-retry:
-	slots_found = 0;
-	if (retry == 0)
-		iter = mv_chan->last_used;
-	else
-		iter = list_entry(&mv_chan->all_slots,
-			struct mv_xor_desc_slot,
-			slot_node);
-
-	list_for_each_entry_safe_continue(
-		iter, _iter, &mv_chan->all_slots, slot_node) {
-		prefetch(_iter);
-		prefetch(&_iter->async_tx);
-		if (iter->slots_per_op) {
-			/* give up after finding the first busy slot
-			 * on the second pass through the list
-			 */
-			if (retry)
-				break;
+	spin_lock_bh(&mv_chan->lock);
 
-			slots_found = 0;
-			continue;
-		}
+	if (!list_empty(&mv_chan->free_slots)) {
+		iter = list_first_entry(&mv_chan->free_slots,
+					struct mv_xor_desc_slot,
+					node);
 
-		/* start the allocation if the slot is correctly aligned */
-		if (!slots_found++)
-			alloc_start = iter;
-
-		if (slots_found == num_slots) {
-			struct mv_xor_desc_slot *alloc_tail = NULL;
-			struct mv_xor_desc_slot *last_used = NULL;
-			iter = alloc_start;
-			while (num_slots) {
-				int i;
-
-				/* pre-ack all but the last descriptor */
-				async_tx_ack(&iter->async_tx);
-
-				list_add_tail(&iter->chain_node, &chain);
-				alloc_tail = iter;
-				iter->async_tx.cookie = 0;
-				iter->slot_cnt = num_slots;
-				iter->xor_check_result = NULL;
-				for (i = 0; i < slots_per_op; i++) {
-					iter->slots_per_op = slots_per_op - i;
-					last_used = iter;
-					iter = list_entry(iter->slot_node.next,
-						struct mv_xor_desc_slot,
-						slot_node);
-				}
-				num_slots -= slots_per_op;
-			}
-			alloc_tail->group_head = alloc_start;
-			alloc_tail->async_tx.cookie = -EBUSY;
-			list_splice(&chain, &alloc_tail->tx_list);
-			mv_chan->last_used = last_used;
-			mv_desc_clear_next_desc(alloc_start);
-			mv_desc_clear_next_desc(alloc_tail);
-			return alloc_tail;
-		}
+		list_move_tail(&iter->node, &mv_chan->allocated_slots);
+
+		spin_unlock_bh(&mv_chan->lock);
+
+		/* pre-ack descriptor */
+		async_tx_ack(&iter->async_tx);
+		iter->async_tx.cookie = -EBUSY;
+
+		return iter;
 	}
-	if (!retry++)
-		goto retry;
+
+	spin_unlock_bh(&mv_chan->lock);
 
 	/* try to free some slots if the allocation fails */
 	tasklet_schedule(&mv_chan->irq_tasklet);
@@ -545,7 +450,7 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 {
 	struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
-	struct mv_xor_desc_slot *grp_start, *old_chain_tail;
+	struct mv_xor_desc_slot *old_chain_tail;
 	dma_cookie_t cookie;
 	int new_hw_chain = 1;
 
@@ -553,30 +458,25 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 		"%s sw_desc %p: async_tx %p\n",
 		__func__, sw_desc, &sw_desc->async_tx);
 
-	grp_start = sw_desc->group_head;
 
 	spin_lock_bh(&mv_chan->lock);
 	cookie = dma_cookie_assign(tx);
 
 	if (list_empty(&mv_chan->chain))
-		list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
+		list_move_tail(&sw_desc->node, &mv_chan->chain);
 	else {
 		new_hw_chain = 0;
 
 		old_chain_tail = list_entry(mv_chan->chain.prev,
 					    struct mv_xor_desc_slot,
-					    chain_node);
-		list_splice_init(&grp_start->tx_list,
-				 &old_chain_tail->chain_node);
-
-		if (!mv_can_chain(grp_start))
-			goto submit_done;
+					    node);
+		list_move_tail(&sw_desc->node, &mv_chan->chain);
 
 		dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
 			old_chain_tail->async_tx.phys);
 
 		/* fix up the hardware chain */
-		mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
+		mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys);
 
 		/* if the channel is not busy */
 		if (!mv_chan_is_busy(mv_chan)) {
@@ -591,9 +491,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
 	}
 
 	if (new_hw_chain)
-		mv_xor_start_new_chain(mv_chan, grp_start);
+		mv_xor_start_new_chain(mv_chan, sw_desc);
 
-submit_done:
 	spin_unlock_bh(&mv_chan->lock);
 
 	return cookie;
@@ -622,9 +521,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 
 		dma_async_tx_descriptor_init(&slot->async_tx, chan);
 		slot->async_tx.tx_submit = mv_xor_tx_submit;
-		INIT_LIST_HEAD(&slot->chain_node);
-		INIT_LIST_HEAD(&slot->slot_node);
-		INIT_LIST_HEAD(&slot->tx_list);
+		INIT_LIST_HEAD(&slot->node);
 		hw_desc = (char *) mv_chan->dma_desc_pool;
 		slot->async_tx.phys =
 			(dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
@@ -632,95 +529,77 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
 
 		spin_lock_bh(&mv_chan->lock);
 		mv_chan->slots_allocated = idx;
-		list_add_tail(&slot->slot_node, &mv_chan->all_slots);
+		list_add_tail(&slot->node, &mv_chan->free_slots);
 		spin_unlock_bh(&mv_chan->lock);
 	}
 
-	if (mv_chan->slots_allocated && !mv_chan->last_used)
-		mv_chan->last_used = list_entry(mv_chan->all_slots.next,
-					struct mv_xor_desc_slot,
-					slot_node);
-
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"allocated %d descriptor slots last_used: %p\n",
-		mv_chan->slots_allocated, mv_chan->last_used);
+		"allocated %d descriptor slots\n",
+		mv_chan->slots_allocated);
 
 	return mv_chan->slots_allocated ? : -ENOMEM;
 }
 
 static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
-		size_t len, unsigned long flags)
+mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
 {
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
-	struct mv_xor_desc_slot *sw_desc, *grp_start;
-	int slot_cnt;
+	struct mv_xor_desc_slot *sw_desc;
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s dest: %x src %x len: %u flags: %ld\n",
-		__func__, dest, src, len, flags);
-	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
-		return NULL;
-
-	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
+		"%s flags: %ld\n",
+		__func__, flags);
 
-	spin_lock_bh(&mv_chan->lock);
-	slot_cnt = mv_chan_memcpy_slot_count(len);
-	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+	sw_desc = mv_xor_alloc_slot(mv_chan);
 	if (sw_desc) {
-		sw_desc->type = DMA_MEMCPY;
+		sw_desc->type = DMA_XOR;
 		sw_desc->async_tx.flags = flags;
-		grp_start = sw_desc->group_head;
-		mv_desc_init(grp_start, flags);
-		mv_desc_set_byte_count(grp_start, len);
-		mv_desc_set_dest_addr(sw_desc->group_head, dest);
-		mv_desc_set_src_addr(grp_start, 0, src);
-		sw_desc->unmap_src_cnt = 1;
-		sw_desc->unmap_len = len;
+		mv_desc_init(sw_desc, DMA_PREP_INTERRUPT);
+		/* the byte count field is the same as in memcpy desc*/
+		mv_desc_set_byte_count(sw_desc, MV_XOR_MIN_BYTE_COUNT);
+		mv_desc_set_dest_addr(sw_desc, dummy1_addr);
+		sw_desc->unmap_src_cnt = 0;
+		sw_desc->unmap_len = 0;
+		mv_desc_set_src_addr(sw_desc, 1, dummy2_addr);
 	}
-	spin_unlock_bh(&mv_chan->lock);
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
 		"%s sw_desc %p async_tx %p\n",
-		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
-
+		__func__, sw_desc, &sw_desc->async_tx);
 	return sw_desc ? &sw_desc->async_tx : NULL;
 }
 
 static struct dma_async_tx_descriptor *
-mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
-		       size_t len, unsigned long flags)
+mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
+		size_t len, unsigned long flags)
 {
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
-	struct mv_xor_desc_slot *sw_desc, *grp_start;
-	int slot_cnt;
+	struct mv_xor_desc_slot *sw_desc;
 
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s dest: %x len: %u flags: %ld\n",
-		__func__, dest, len, flags);
+		"%s dest: %x src %x len: %u flags: %ld\n",
+		__func__, dest, src, len, flags);
 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 		return NULL;
 
 	BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
 
-	spin_lock_bh(&mv_chan->lock);
-	slot_cnt = mv_chan_memset_slot_count(len);
-	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+	sw_desc = mv_xor_alloc_slot(mv_chan);
 	if (sw_desc) {
-		sw_desc->type = DMA_MEMSET;
+		sw_desc->type = DMA_XOR;
 		sw_desc->async_tx.flags = flags;
-		grp_start = sw_desc->group_head;
-		mv_desc_init(grp_start, flags);
-		mv_desc_set_byte_count(grp_start, len);
-		mv_desc_set_dest_addr(sw_desc->group_head, dest);
-		mv_desc_set_block_fill_val(grp_start, value);
+		mv_desc_init(sw_desc, flags);
+		mv_desc_set_byte_count(sw_desc, len);
+		mv_desc_set_dest_addr(sw_desc, dest);
+		mv_desc_set_src_addr(sw_desc, 0, src);
 		sw_desc->unmap_src_cnt = 1;
 		sw_desc->unmap_len = len;
 	}
-	spin_unlock_bh(&mv_chan->lock);
+
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s sw_desc %p async_tx %p \n",
-		__func__, sw_desc, &sw_desc->async_tx);
+		"%s sw_desc %p async_tx %p\n",
+		__func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
+
 	return sw_desc ? &sw_desc->async_tx : NULL;
 }
 
@@ -729,8 +608,7 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 		    unsigned int src_cnt, size_t len, unsigned long flags)
 {
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
-	struct mv_xor_desc_slot *sw_desc, *grp_start;
-	int slot_cnt;
+	struct mv_xor_desc_slot *sw_desc;
 
 	if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
 		return NULL;
@@ -741,29 +619,62 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
 		"%s src_cnt: %d len: dest %x %u flags: %ld\n",
 		__func__, src_cnt, len, dest, flags);
 
-	spin_lock_bh(&mv_chan->lock);
-	slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
-	sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
+	sw_desc = mv_xor_alloc_slot(mv_chan);
 	if (sw_desc) {
 		sw_desc->type = DMA_XOR;
 		sw_desc->async_tx.flags = flags;
-		grp_start = sw_desc->group_head;
-		mv_desc_init(grp_start, flags);
+		mv_desc_init(sw_desc, flags);
 		/* the byte count field is the same as in memcpy desc*/
-		mv_desc_set_byte_count(grp_start, len);
-		mv_desc_set_dest_addr(sw_desc->group_head, dest);
+		mv_desc_set_byte_count(sw_desc, len);
+		mv_desc_set_dest_addr(sw_desc, dest);
 		sw_desc->unmap_src_cnt = src_cnt;
 		sw_desc->unmap_len = len;
 		while (src_cnt--)
-			mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
+			mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]);
 	}
-	spin_unlock_bh(&mv_chan->lock);
+
 	dev_dbg(mv_chan_to_devp(mv_chan),
-		"%s sw_desc %p async_tx %p \n",
+		"%s sw_desc %p async_tx %p\n",
 		__func__, sw_desc, &sw_desc->async_tx);
 	return sw_desc ? &sw_desc->async_tx : NULL;
 }
 
+static struct dma_async_tx_descriptor *
+mv_xor_prep_dma_crc32c(struct dma_chan *chan, dma_addr_t src,
+		size_t len, u32 *seed, unsigned long flags)
+{
+	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
+	struct mv_xor_desc_slot *sw_desc;
+
+	dev_dbg(mv_chan_to_devp(mv_chan), "%s src: %x len: %u flags: %lx\n",
+		__func__, src, len, flags);
+
+	/* This HW only supports only ~0 seed
+	 * Check for data size limitations
+	 */
+	if (*seed != ~0 ||
+	    unlikely(len < MV_XOR_MIN_BYTE_COUNT) ||
+	    unlikely(len > XOR_MAX_BYTE_COUNT))
+		return NULL;
+
+	sw_desc = mv_xor_alloc_slot(mv_chan);
+	if (sw_desc) {
+		sw_desc->type = DMA_CRC32C;
+		sw_desc->async_tx.flags = flags;
+		mv_desc_init(sw_desc, flags);
+		mv_desc_set_byte_count(sw_desc, len);
+		mv_desc_set_src_addr(sw_desc, 0, src);
+		sw_desc->unmap_src_cnt = 1;
+		sw_desc->unmap_len = len;
+		sw_desc->crc32_result = seed;
+	}
+
+	dev_dbg(mv_chan_to_devp(mv_chan), "%s sw_desc %p async_tx %p\n",
+		__func__, sw_desc, &sw_desc->async_tx);
+
+	return sw_desc ? &sw_desc->async_tx : NULL;
+}
+
 static void mv_xor_free_chan_resources(struct dma_chan *chan)
 {
 	struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
@@ -774,22 +685,26 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan)
 
 	spin_lock_bh(&mv_chan->lock);
 	list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
-					chain_node) {
+					node) {
 		in_use_descs++;
-		list_del(&iter->chain_node);
+		list_move_tail(&iter->node, &mv_chan->free_slots);
 	}
 	list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
-				 completed_node) {
+				 node) {
+		in_use_descs++;
+		list_move_tail(&iter->node, &mv_chan->free_slots);
+	}
+	list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots,
+				 node) {
 		in_use_descs++;
-		list_del(&iter->completed_node);
+		list_move_tail(&iter->node, &mv_chan->free_slots);
 	}
 	list_for_each_entry_safe_reverse(
-		iter, _iter, &mv_chan->all_slots, slot_node) {
-		list_del(&iter->slot_node);
+		iter, _iter, &mv_chan->free_slots, node) {
+		list_del(&iter->node);
 		kfree(iter);
 		mv_chan->slots_allocated--;
 	}
-	mv_chan->last_used = NULL;
 
 	dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n",
 		__func__, mv_chan->slots_allocated);
@@ -815,7 +730,9 @@ static enum dma_status mv_xor_status(struct dma_chan *chan,
 
 	ret = dma_cookie_status(chan, cookie, txstate);
 	if (ret == DMA_SUCCESS) {
+		spin_lock_bh(&mv_chan->lock);
 		mv_xor_clean_completed_slots(mv_chan);
+		spin_unlock_bh(&mv_chan->lock);
 		return ret;
 	}
 	mv_xor_slot_cleanup(mv_chan);
@@ -827,22 +744,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
 {
 	u32 val;
 
-	val = __raw_readl(XOR_CONFIG(chan));
+	val = readl_relaxed(XOR_CONFIG(chan));
 	dev_err(mv_chan_to_devp(chan), "config       0x%08x\n", val);
 
-	val = __raw_readl(XOR_ACTIVATION(chan));
+	val = readl_relaxed(XOR_ACTIVATION(chan));
 	dev_err(mv_chan_to_devp(chan), "activation   0x%08x\n", val);
 
-	val = __raw_readl(XOR_INTR_CAUSE(chan));
+	val = readl_relaxed(XOR_INTR_CAUSE(chan));
 	dev_err(mv_chan_to_devp(chan), "intr cause   0x%08x\n", val);
 
-	val = __raw_readl(XOR_INTR_MASK(chan));
+	val = readl_relaxed(XOR_INTR_MASK(chan));
 	dev_err(mv_chan_to_devp(chan), "intr mask    0x%08x\n", val);
 
-	val = __raw_readl(XOR_ERROR_CAUSE(chan));
+	val = readl_relaxed(XOR_ERROR_CAUSE(chan));
 	dev_err(mv_chan_to_devp(chan), "error cause  0x%08x\n", val);
 
-	val = __raw_readl(XOR_ERROR_ADDR(chan));
+	val = readl_relaxed(XOR_ERROR_ADDR(chan));
 	dev_err(mv_chan_to_devp(chan), "error addr   0x%08x\n", val);
 }
 
@@ -963,6 +880,70 @@ out:
 	return err;
 }
 
+#define MV_XOR_CRC32_TEST_SIZE	PAGE_SIZE
+
+static int mv_xor_crc32_self_test(struct mv_xor_chan *mv_chan)
+{
+	int i;
+	void *src;
+	u32 sum;
+	dma_addr_t src_dma;
+	struct dma_chan *dma_chan;
+	dma_cookie_t cookie;
+	struct dma_async_tx_descriptor *tx;
+	int err = 0;
+
+	src = kmalloc(MV_XOR_CRC32_TEST_SIZE, GFP_KERNEL);
+	if (!src)
+		return -ENOMEM;
+
+	/* Fill in src buffer */
+	for (i = 0; i < MV_XOR_CRC32_TEST_SIZE; i++)
+		((u8 *) src)[i] = (u8)i;
+
+	dma_chan = &mv_chan->dmachan;
+
+	if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	src_dma = dma_map_single(dma_chan->device->dev, src,
+				 MV_XOR_CRC32_TEST_SIZE, DMA_TO_DEVICE);
+
+	sum = ~0;
+	tx = mv_xor_prep_dma_crc32c(dma_chan, src_dma,
+				    MV_XOR_CRC32_TEST_SIZE, &sum, 0);
+
+	if (unlikely(tx == (struct dma_async_tx_descriptor *)1))
+		BUG();
+
+	BUG_ON(!tx);
+
+	cookie = mv_xor_tx_submit(tx);
+	msleep(20);
+
+	if (mv_xor_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
+		dev_err(dma_chan->device->dev,
+			"Self-test crc32 timed out, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+	if (crc32c(~(u32)0, src, MV_XOR_CRC32_TEST_SIZE) != sum) {
+		dev_err(dma_chan->device->dev,
+			"Self-test crc32c failed compare, disabling\n");
+		err = -ENODEV;
+		goto free_resources;
+	}
+
+free_resources:
+	mv_xor_free_chan_resources(dma_chan);
+out:
+	kfree(src);
+	return err;
+}
+
 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
 static int
 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
@@ -1137,12 +1118,14 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 	/* set prep routines based on capability */
 	if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
 		dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
-	if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
-		dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
 		dma_dev->max_xor = 8;
 		dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
 	}
+	if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+		dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt;
+	if (dma_has_cap(DMA_CRC32C, dma_dev->cap_mask))
+		dma_dev->device_prep_dma_crc32c = mv_xor_prep_dma_crc32c;
 
 	mv_chan->mmr_base = xordev->xor_base;
 	if (!mv_chan->mmr_base) {
@@ -1162,12 +1145,24 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 
 	mv_chan_unmask_interrupts(mv_chan);
 
-	mv_set_mode(mv_chan, DMA_MEMCPY);
+	if (dma_has_cap(DMA_CRC32C, dma_dev->cap_mask)) {
+		/* channel can support CRC or XOR mode only, not both */
+		if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
+		    dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ||
+		    dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) {
+			BUG();
+			ret = -EINVAL;
+			goto err_free_dma;
+		}
+		mv_set_mode(mv_chan, DMA_CRC32C);
+	} else
+		mv_set_mode(mv_chan, DMA_XOR);
 
 	spin_lock_init(&mv_chan->lock);
 	INIT_LIST_HEAD(&mv_chan->chain);
 	INIT_LIST_HEAD(&mv_chan->completed_slots);
-	INIT_LIST_HEAD(&mv_chan->all_slots);
+	INIT_LIST_HEAD(&mv_chan->free_slots);
+	INIT_LIST_HEAD(&mv_chan->allocated_slots);
 	mv_chan->dmachan.device = dma_dev;
 	dma_cookie_init(&mv_chan->dmachan);
 
@@ -1187,11 +1182,18 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
 			goto err_free_irq;
 	}
 
+	if (dma_has_cap(DMA_CRC32C, dma_dev->cap_mask)) {
+		ret = mv_xor_crc32_self_test(mv_chan);
+		dev_dbg(&pdev->dev, "crc32 self test returned %d\n", ret);
+		if (ret)
+			goto err_free_irq;
+	}
+
 	dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n",
 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
-		 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
 		 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
-		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
+		 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "",
+		 dma_has_cap(DMA_CRC32C, dma_dev->cap_mask) ? "crc32c " : "");
 
 	dma_async_device_register(dma_dev);
 	return mv_chan;
@@ -1247,6 +1249,11 @@ static int mv_xor_probe(struct platform_device *pdev)
 
 	dev_notice(&pdev->dev, "Marvell shared XOR driver\n");
 
+	dummy1_addr = dma_map_single(NULL, (void *)dummy1,
+				     MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE);
+	dummy2_addr = dma_map_single(NULL, (void *)dummy2,
+				     MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE);
+
 	xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL);
 	if (!xordev)
 		return -ENOMEM;
@@ -1298,10 +1305,10 @@ static int mv_xor_probe(struct platform_device *pdev)
 				dma_cap_set(DMA_MEMCPY, cap_mask);
 			if (of_property_read_bool(np, "dmacap,xor"))
 				dma_cap_set(DMA_XOR, cap_mask);
-			if (of_property_read_bool(np, "dmacap,memset"))
-				dma_cap_set(DMA_MEMSET, cap_mask);
 			if (of_property_read_bool(np, "dmacap,interrupt"))
 				dma_cap_set(DMA_INTERRUPT, cap_mask);
+			if (of_property_read_bool(np, "dmacap,crc32c"))
+				dma_cap_set(DMA_CRC32C, cap_mask);
 
 			irq = irq_of_parse_and_map(np, 0);
 			if (!irq) {
@@ -1384,6 +1391,49 @@ static int mv_xor_remove(struct platform_device *pdev)
 	return 0;
 }
 
+void mv_xor_shutdown(struct platform_device *pdev)
+{
+	struct mv_xor_device *xordev = platform_get_drvdata(pdev);
+
+	if (!IS_ERR(xordev->clk)) {
+		clk_disable_unprepare(xordev->clk);
+		clk_put(xordev->clk);
+	}
+}
+
+static int mv_xor_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct mv_xor_device *xordev = platform_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+		if (xordev->channels[i]) {
+			struct mv_xor_chan *mv_chan = xordev->channels[i];
+
+			mv_chan->suspend_regs.config = readl_relaxed(XOR_CONFIG(mv_chan));
+			mv_chan->suspend_regs.int_mask = readl_relaxed(XOR_INTR_MASK(mv_chan));
+		}
+	}
+	return 0;
+}
+
+static int mv_xor_resume(struct platform_device *dev)
+{
+	struct mv_xor_device *xordev = platform_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
+		if (xordev->channels[i]) {
+			struct mv_xor_chan *mv_chan = xordev->channels[i];
+
+			writel_relaxed(mv_chan->suspend_regs.config, XOR_CONFIG(mv_chan));
+			writel_relaxed(mv_chan->suspend_regs.int_mask, XOR_INTR_MASK(mv_chan));
+		}
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_OF
 static struct of_device_id mv_xor_dt_ids[] = {
        { .compatible = "marvell,orion-xor", },
@@ -1395,6 +1445,9 @@ MODULE_DEVICE_TABLE(of, mv_xor_dt_ids);
 static struct platform_driver mv_xor_driver = {
 	.probe		= mv_xor_probe,
 	.remove		= mv_xor_remove,
+	.shutdown	= mv_xor_shutdown,
+	.suspend	= mv_xor_suspend,
+	.resume		= mv_xor_resume,
 	.driver		= {
 		.owner	        = THIS_MODULE,
 		.name	        = MV_XOR_NAME,
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index c632a4761fcf..51f63ef5ff69 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -24,14 +24,18 @@
 #include <linux/interrupt.h>
 
 #define USE_TIMER
-#define MV_XOR_POOL_SIZE		PAGE_SIZE
 #define MV_XOR_SLOT_SIZE		64
+/* allocating 3072 descriptors for each pool */
+#define MV_XOR_POOL_SIZE		(MV_XOR_SLOT_SIZE*3072)
 #define MV_XOR_THRESHOLD		1
 #define MV_XOR_MAX_CHANNELS             2
 
+/* Values for the XOR_CONFIG register */
 #define XOR_OPERATION_MODE_XOR		0
+#define XOR_OPERATION_MODE_CRC32C	1
 #define XOR_OPERATION_MODE_MEMCPY	2
-#define XOR_OPERATION_MODE_MEMSET	4
+#define XOR_DESCRIPTOR_SWAP		BIT(14)
+#define XOR_DESC_SUCCESS		0x40000000
 
 #define XOR_CURR_DESC(chan)	(chan->mmr_base + 0x210 + (chan->idx * 4))
 #define XOR_NEXT_DESC(chan)	(chan->mmr_base + 0x200 + (chan->idx * 4))
@@ -47,7 +51,7 @@
 #define XOR_INTR_MASK(chan)	(chan->mmr_base + 0x40)
 #define XOR_ERROR_CAUSE(chan)	(chan->mmr_base + 0x50)
 #define XOR_ERROR_ADDR(chan)	(chan->mmr_base + 0x60)
-#define XOR_INTR_MASK_VALUE	0x3F5
+#define XOR_INTR_MASK_VALUE	0x3F7
 
 #define WINDOW_BASE(w)		(0x250 + ((w) << 2))
 #define WINDOW_SIZE(w)		(0x270 + ((w) << 2))
@@ -62,6 +66,13 @@ struct mv_xor_device {
 	struct mv_xor_chan   *channels[MV_XOR_MAX_CHANNELS];
 };
 
+
+/* Stores certain registers during suspend to RAM */
+struct mv_xor_suspend_regs {
+	int config;
+	int int_mask;
+};
+
 /**
  * struct mv_xor_chan - internal representation of a XOR channel
  * @pending: allows batching of hardware operations
@@ -69,11 +80,11 @@ struct mv_xor_device {
  * @mmr_base: memory mapped register base
  * @idx: the index of the xor channel
  * @chain: device chain view of the descriptors
+ * @free_slots: free slots usable by the channel
+ * @allocated_slots: slots allocated by the driver
  * @completed_slots: slots completed by HW but still need to be acked
  * @device: parent device
  * @common: common dmaengine channel object members
- * @last_used: place holder for allocation to continue from where it left off
- * @all_slots: complete domain of slots usable by the channel
  * @slots_allocated: records the actual size of the descriptor slot pool
  * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
  */
@@ -84,15 +95,16 @@ struct mv_xor_chan {
 	unsigned int		idx;
 	int                     irq;
 	enum dma_transaction_type	current_type;
+	struct mv_xor_suspend_regs	suspend_regs;
 	struct list_head	chain;
+	struct list_head	free_slots;
+	struct list_head	allocated_slots;
 	struct list_head	completed_slots;
 	dma_addr_t		dma_desc_pool;
 	void			*dma_desc_pool_virt;
 	size_t                  pool_size;
 	struct dma_device	dmadev;
 	struct dma_chan		dmachan;
-	struct mv_xor_desc_slot	*last_used;
-	struct list_head	all_slots;
 	int			slots_allocated;
 	struct tasklet_struct	irq_tasklet;
 #ifdef USE_TIMER
@@ -103,36 +115,25 @@ struct mv_xor_chan {
 
 /**
  * struct mv_xor_desc_slot - software descriptor
- * @slot_node: node on the mv_xor_chan.all_slots list
- * @chain_node: node on the mv_xor_chan.chain list
- * @completed_node: node on the mv_xor_chan.completed_slots list
+ * @node: node on the mv_xor_chan lists
  * @hw_desc: virtual address of the hardware descriptor chain
  * @phys: hardware address of the hardware descriptor chain
- * @group_head: first operation in a transaction
- * @slot_cnt: total slots used in an transaction (group of operations)
- * @slots_per_op: number of slots per operation
+ * @slot_used: slot in use or not
  * @idx: pool index
  * @unmap_src_cnt: number of xor sources
  * @unmap_len: transaction bytecount
- * @tx_list: list of slots that make up a multi-descriptor transaction
  * @async_tx: support for the async_tx api
  * @xor_check_result: result of zero sum
  * @crc32_result: result crc calculation
  */
 struct mv_xor_desc_slot {
-	struct list_head	slot_node;
-	struct list_head	chain_node;
-	struct list_head	completed_node;
+	struct list_head	node;
 	enum dma_transaction_type	type;
 	void			*hw_desc;
-	struct mv_xor_desc_slot	*group_head;
-	u16			slot_cnt;
-	u16			slots_per_op;
 	u16			idx;
 	u16			unmap_src_cnt;
 	u32			value;
 	size_t			unmap_len;
-	struct list_head	tx_list;
 	struct dma_async_tx_descriptor	async_tx;
 	union {
 		u32		*xor_check_result;
@@ -144,7 +145,16 @@ struct mv_xor_desc_slot {
 #endif
 };
 
-/* This structure describes XOR descriptor size 64bytes	*/
+/*
+ * This structure describes XOR descriptor size 64bytes. The
+ * mv_phy_src_idx() macro must be used when indexing the values of the
+ * phy_src_addr[] array. This is due to the fact that the 'descriptor
+ * swap' feature, used on big endian systems, swaps descriptors data
+ * within blocks of 8 bytes. So two consecutive values of the
+ * phy_src_addr[] array are actually swapped in big-endian, which
+ * explains the different mv_phy_src_idx() implementation.
+ */
+#if defined(__LITTLE_ENDIAN)
 struct mv_xor_desc {
 	u32 status;		/* descriptor execution status */
 	u32 crc32_result;	/* result of CRC-32 calculation */
@@ -156,6 +166,21 @@ struct mv_xor_desc {
 	u32 reserved0;
 	u32 reserved1;
 };
+#define mv_phy_src_idx(src_idx) (src_idx)
+#else
+struct mv_xor_desc {
+	u32 crc32_result;	/* result of CRC-32 calculation */
+	u32 status;		/* descriptor execution status */
+	u32 phy_next_desc;	/* next descriptor address pointer */
+	u32 desc_command;	/* type of operation to be carried out */
+	u32 phy_dest_addr;	/* destination block address */
+	u32 byte_count;		/* size of src/dst blocks in bytes */
+	u32 phy_src_addr[8];	/* source block addresses */
+	u32 reserved1;
+	u32 reserved0;
+};
+#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
+#endif
 
 #define to_mv_sw_desc(addr_hw_desc)		\
 	container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 5c27da6a2853..39ab4bbc063d 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -44,6 +44,7 @@
 #include <linux/of_device.h>
 #include <linux/clk.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/irqchip/chained_irq.h>
 
 /*
  * GPIO unit register offsets.
@@ -82,6 +83,12 @@ struct mvebu_gpio_chip {
 	int		   irqbase;
 	struct irq_domain *domain;
 	int                soc_variant;
+	u32		out_reg;
+	u32		io_conf_reg;
+	u32		blink_en_reg;
+	u32		in_pol_reg;
+	u32		edge_mask_regs[4];
+	u32		level_mask_regs[4];
 };
 
 /*
@@ -438,12 +445,15 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
 static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 {
 	struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
+	struct irq_chip *chip = irq_desc_get_chip(desc);
 	u32 cause, type;
 	int i;
 
 	if (mvchip == NULL)
 		return;
 
+	chained_irq_enter(chip, desc);
+
 	cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
 		readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
 	cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
@@ -466,8 +476,11 @@ static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
 			polarity ^= 1 << i;
 			writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
 		}
+
 		generic_handle_irq(irq);
 	}
+
+	chained_irq_exit(chip, desc);
 }
 
 #ifdef CONFIG_DEBUG_FS
@@ -547,6 +560,93 @@ static struct of_device_id mvebu_gpio_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
 
+static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
+	int i;
+
+	mvchip->out_reg = readl(mvebu_gpioreg_out(mvchip));
+	mvchip->io_conf_reg = readl(mvebu_gpioreg_io_conf(mvchip));
+	mvchip->blink_en_reg = readl(mvebu_gpioreg_blink(mvchip));
+	mvchip->in_pol_reg = readl(mvebu_gpioreg_in_pol(mvchip));
+
+	switch (mvchip->soc_variant) {
+	case MVEBU_GPIO_SOC_VARIANT_ORION:
+		mvchip->edge_mask_regs[0] =
+			readl(mvchip->membase + GPIO_EDGE_MASK_OFF);
+		mvchip->level_mask_regs[0] =
+			readl(mvchip->membase + GPIO_LEVEL_MASK_OFF);
+		break;
+	case MVEBU_GPIO_SOC_VARIANT_MV78200:
+		for (i = 0; i < 2; i++) {
+			mvchip->edge_mask_regs[i] =
+				readl(mvchip->membase +
+				      GPIO_EDGE_MASK_MV78200_OFF(i));
+			mvchip->level_mask_regs[i] =
+				readl(mvchip->membase +
+				      GPIO_LEVEL_MASK_MV78200_OFF(i));
+		}
+		break;
+	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+		for (i = 0; i < 4; i++) {
+			mvchip->edge_mask_regs[i] =
+				readl(mvchip->membase +
+				      GPIO_EDGE_MASK_ARMADAXP_OFF(i));
+			mvchip->level_mask_regs[i] =
+				readl(mvchip->membase +
+				      GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+		}
+		break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
+static int mvebu_gpio_resume(struct platform_device *pdev)
+{
+	struct mvebu_gpio_chip *mvchip = platform_get_drvdata(pdev);
+	int i;
+
+	writel(mvchip->out_reg, mvebu_gpioreg_out(mvchip));
+	writel(mvchip->io_conf_reg, mvebu_gpioreg_io_conf(mvchip));
+	writel(mvchip->blink_en_reg, mvebu_gpioreg_blink(mvchip));
+	writel(mvchip->in_pol_reg, mvebu_gpioreg_in_pol(mvchip));
+
+	switch (mvchip->soc_variant) {
+	case MVEBU_GPIO_SOC_VARIANT_ORION:
+		writel(mvchip->edge_mask_regs[0],
+		       mvchip->membase + GPIO_EDGE_MASK_OFF);
+		writel(mvchip->level_mask_regs[0],
+		       mvchip->membase + GPIO_LEVEL_MASK_OFF);
+		break;
+	case MVEBU_GPIO_SOC_VARIANT_MV78200:
+		for (i = 0; i < 2; i++) {
+			writel(mvchip->edge_mask_regs[i],
+			       mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(i));
+			writel(mvchip->level_mask_regs[i],
+			       mvchip->membase +
+			       GPIO_LEVEL_MASK_MV78200_OFF(i));
+		}
+		break;
+	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
+		for (i = 0; i < 4; i++) {
+			writel(mvchip->edge_mask_regs[i],
+			       mvchip->membase +
+			       GPIO_EDGE_MASK_ARMADAXP_OFF(i));
+			writel(mvchip->level_mask_regs[i],
+			       mvchip->membase +
+			       GPIO_LEVEL_MASK_ARMADAXP_OFF(i));
+		}
+		break;
+	default:
+		BUG();
+	}
+
+	return 0;
+}
+
 static int mvebu_gpio_probe(struct platform_device *pdev)
 {
 	struct mvebu_gpio_chip *mvchip;
@@ -578,6 +678,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
 		return -ENOMEM;
 	}
 
+	platform_set_drvdata(pdev, mvchip);
+
 	if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
 		dev_err(&pdev->dev, "Missing ngpios OF property\n");
 		return -ENODEV;
@@ -735,6 +837,8 @@ static struct platform_driver mvebu_gpio_driver = {
 		.of_match_table = mvebu_gpio_of_match,
 	},
 	.probe		= mvebu_gpio_probe,
+	.suspend        = mvebu_gpio_suspend,
+	.resume         = mvebu_gpio_resume,
 };
 
 static int __init mvebu_gpio_init(void)
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 1a3abd6a0bfc..ac12609558f9 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -19,19 +19,16 @@
 #include <linux/platform_device.h>
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_irq.h>
 #include <linux/of_i2c.h>
 #include <linux/clk.h>
 #include <linux/err.h>
+#include <linux/delay.h>
 
-/* Register defines */
-#define	MV64XXX_I2C_REG_SLAVE_ADDR			0x00
-#define	MV64XXX_I2C_REG_DATA				0x04
-#define	MV64XXX_I2C_REG_CONTROL				0x08
-#define	MV64XXX_I2C_REG_STATUS				0x0c
-#define	MV64XXX_I2C_REG_BAUD				0x0c
-#define	MV64XXX_I2C_REG_EXT_SLAVE_ADDR			0x10
-#define	MV64XXX_I2C_REG_SOFT_RESET			0x1c
+#define MV64XXX_I2C_ADDR_ADDR(val)			((val & 0x7f) << 1)
+#define MV64XXX_I2C_BAUD_DIV_N(val)			(val & 0x7)
+#define MV64XXX_I2C_BAUD_DIV_M(val)			((val & 0xf) << 3)
 
 #define	MV64XXX_I2C_REG_CONTROL_ACK			0x00000004
 #define	MV64XXX_I2C_REG_CONTROL_IFLG			0x00000008
@@ -59,6 +56,32 @@
 #define	MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK	0xe8
 #define	MV64XXX_I2C_STATUS_NO_STATUS			0xf8
 
+/* Register defines (I2C bridge) */
+#define	MV64XXX_I2C_REG_TX_DATA_LO			0xc0
+#define	MV64XXX_I2C_REG_TX_DATA_HI			0xc4
+#define	MV64XXX_I2C_REG_RX_DATA_LO			0xc8
+#define	MV64XXX_I2C_REG_RX_DATA_HI			0xcc
+#define	MV64XXX_I2C_REG_BRIDGE_CONTROL			0xd0
+#define	MV64XXX_I2C_REG_BRIDGE_STATUS			0xd4
+#define	MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE		0xd8
+#define	MV64XXX_I2C_REG_BRIDGE_INTR_MASK		0xdC
+#define	MV64XXX_I2C_REG_BRIDGE_TIMING			0xe0
+
+/* Bridge Control values */
+#define	MV64XXX_I2C_BRIDGE_CONTROL_WR			0x00000001
+#define	MV64XXX_I2C_BRIDGE_CONTROL_RD			0x00000002
+#define	MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT		2
+#define	MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT		0x00001000
+#define	MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT	13
+#define	MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT	16
+#define	MV64XXX_I2C_BRIDGE_CONTROL_ENABLE		0x00080000
+
+/* Bridge Status values */
+#define	MV64XXX_I2C_BRIDGE_STATUS_ERROR			0x00000001
+#define	MV64XXX_I2C_STATUS_OFFLOAD_ERROR		0xf0000001
+#define	MV64XXX_I2C_STATUS_OFFLOAD_OK			0xf0000000
+
+
 /* Driver states */
 enum {
 	MV64XXX_I2C_STATE_INVALID,
@@ -77,23 +100,36 @@ enum {
 	MV64XXX_I2C_ACTION_CONTINUE,
 	MV64XXX_I2C_ACTION_SEND_START,
 	MV64XXX_I2C_ACTION_SEND_RESTART,
+	MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
 	MV64XXX_I2C_ACTION_SEND_ADDR_1,
 	MV64XXX_I2C_ACTION_SEND_ADDR_2,
 	MV64XXX_I2C_ACTION_SEND_DATA,
 	MV64XXX_I2C_ACTION_RCV_DATA,
 	MV64XXX_I2C_ACTION_RCV_DATA_STOP,
 	MV64XXX_I2C_ACTION_SEND_STOP,
+	MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
+};
+
+struct mv64xxx_i2c_regs {
+	u8	addr;
+	u8	ext_addr;
+	u8	data;
+	u8	control;
+	u8	status;
+	u8	clock;
+	u8	soft_reset;
 };
 
 struct mv64xxx_i2c_data {
+	struct i2c_msg		*msgs;
+	int			num_msgs;
 	int			irq;
 	u32			state;
 	u32			action;
 	u32			aborting;
 	u32			cntl_bits;
 	void __iomem		*reg_base;
-	u32			reg_base_p;
-	u32			reg_size;
+	struct mv64xxx_i2c_regs	reg_offsets;
 	u32			addr1;
 	u32			addr2;
 	u32			bytes_left;
@@ -110,8 +146,121 @@ struct mv64xxx_i2c_data {
 	spinlock_t		lock;
 	struct i2c_msg		*msg;
 	struct i2c_adapter	adapter;
+	bool			offload_enabled;
+/* 5us delay in order to avoid repeated start timing violation */
+	bool			errata_delay;
+};
+
+static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
+	.addr		= 0x00,
+	.ext_addr	= 0x10,
+	.data		= 0x04,
+	.control	= 0x08,
+	.status		= 0x0c,
+	.clock		= 0x0c,
+	.soft_reset	= 0x1c,
 };
 
+static void
+mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
+	struct i2c_msg *msg)
+{
+	u32	dir = 0;
+
+	drv_data->msg = msg;
+	drv_data->byte_posn = 0;
+	drv_data->bytes_left = msg->len;
+	drv_data->aborting = 0;
+	drv_data->rc = 0;
+	drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
+		MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN;
+
+	if (msg->flags & I2C_M_RD)
+		dir = 1;
+
+	if (msg->flags & I2C_M_TEN) {
+		drv_data->addr1 = 0xf0 | (((u32)msg->addr & 0x300) >> 7) | dir;
+		drv_data->addr2 = (u32)msg->addr & 0xff;
+	} else {
+		drv_data->addr1 = MV64XXX_I2C_ADDR_ADDR((u32)msg->addr) | dir;
+		drv_data->addr2 = 0;
+	}
+}
+
+static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
+{
+	unsigned long data_reg_hi = 0;
+	unsigned long data_reg_lo = 0;
+	unsigned long ctrl_reg;
+	struct i2c_msg *msg = drv_data->msgs;
+
+	if (!drv_data->offload_enabled)
+		return -EOPNOTSUPP;
+
+	drv_data->msg = msg;
+	drv_data->byte_posn = 0;
+	drv_data->bytes_left = msg->len;
+	drv_data->aborting = 0;
+	drv_data->rc = 0;
+	/* Only regular transactions can be offloaded */
+	if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
+		return -EINVAL;
+
+	/* Only 1-8 byte transfers can be offloaded */
+	if (msg->len < 1 || msg->len > 8)
+		return -EINVAL;
+
+	/* Build transaction */
+	ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
+		   (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
+
+	if ((msg->flags & I2C_M_TEN) != 0)
+		ctrl_reg |=  MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
+
+	if ((msg->flags & I2C_M_RD) == 0) {
+		u8 local_buf[8] = { 0 };
+
+		memcpy(local_buf, msg->buf, msg->len);
+		data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
+		data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
+
+		ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
+		    (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
+
+		writel_relaxed(data_reg_lo,
+			drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
+		writel_relaxed(data_reg_hi,
+			drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
+
+	} else {
+		ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
+		    (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
+	}
+
+	/* Execute transaction */
+	writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+
+	return 0;
+}
+
+static void
+mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
+{
+	struct i2c_msg *msg = drv_data->msg;
+
+	if (msg->flags & I2C_M_RD) {
+		u32 data_reg_lo = readl(drv_data->reg_base +
+				MV64XXX_I2C_REG_RX_DATA_LO);
+		u32 data_reg_hi = readl(drv_data->reg_base +
+				MV64XXX_I2C_REG_RX_DATA_HI);
+		u8 local_buf[8] = { 0 };
+
+		*((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
+		*((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
+		memcpy(msg->buf, local_buf, msg->len);
+	}
+
+}
 /*
  *****************************************************************************
  *
@@ -124,13 +273,22 @@ struct mv64xxx_i2c_data {
 static void
 mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
 {
-	writel(0, drv_data->reg_base + MV64XXX_I2C_REG_SOFT_RESET);
-	writel((((drv_data->freq_m & 0xf) << 3) | (drv_data->freq_n & 0x7)),
-		drv_data->reg_base + MV64XXX_I2C_REG_BAUD);
-	writel(0, drv_data->reg_base + MV64XXX_I2C_REG_SLAVE_ADDR);
-	writel(0, drv_data->reg_base + MV64XXX_I2C_REG_EXT_SLAVE_ADDR);
+	if (drv_data->offload_enabled) {
+		writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+		writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_TIMING);
+		writel(0, drv_data->reg_base +
+			MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+		writel(0, drv_data->reg_base +
+			MV64XXX_I2C_REG_BRIDGE_INTR_MASK);
+	}
+
+	writel(0, drv_data->reg_base + drv_data->reg_offsets.soft_reset);
+	writel(MV64XXX_I2C_BAUD_DIV_M(drv_data->freq_m) | MV64XXX_I2C_BAUD_DIV_N(drv_data->freq_n),
+		drv_data->reg_base + drv_data->reg_offsets.clock);
+	writel(0, drv_data->reg_base + drv_data->reg_offsets.addr);
+	writel(0, drv_data->reg_base + drv_data->reg_offsets.ext_addr);
 	writel(MV64XXX_I2C_REG_CONTROL_TWSIEN | MV64XXX_I2C_REG_CONTROL_STOP,
-		drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+		drv_data->reg_base + drv_data->reg_offsets.control);
 	drv_data->state = MV64XXX_I2C_STATE_IDLE;
 }
 
@@ -170,7 +328,7 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
 		if ((drv_data->bytes_left == 0)
 				|| (drv_data->aborting
 					&& (drv_data->byte_posn != 0))) {
-			if (drv_data->send_stop) {
+			if (drv_data->send_stop || drv_data->aborting) {
 				drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
 				drv_data->state = MV64XXX_I2C_STATE_IDLE;
 			} else {
@@ -227,7 +385,17 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
 		/* Doesn't seem to be a device at other end */
 		drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP;
 		drv_data->state = MV64XXX_I2C_STATE_IDLE;
-		drv_data->rc = -ENODEV;
+		drv_data->rc = -ENXIO;
+		break;
+
+	case MV64XXX_I2C_STATUS_OFFLOAD_OK:
+		if (drv_data->send_stop || drv_data->aborting) {
+			drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
+			drv_data->state = MV64XXX_I2C_STATE_IDLE;
+		} else {
+			drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
+			drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
+		}
 		break;
 
 	default:
@@ -246,60 +414,90 @@ static void
 mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
 {
 	switch(drv_data->action) {
+	case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
+		mv64xxx_i2c_update_offload_data(drv_data);
+		writel(0, drv_data->reg_base +	MV64XXX_I2C_REG_BRIDGE_CONTROL);
+		writel(0, drv_data->reg_base +
+			MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+		/* FALLTHRU */
 	case MV64XXX_I2C_ACTION_SEND_RESTART:
-		drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
-		drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
-		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
-		drv_data->block = 0;
-		wake_up(&drv_data->waitq);
+		/* We should only get here if we have further messages */
+		BUG_ON(drv_data->num_msgs == 0);
+
+		drv_data->msgs++;
+		drv_data->num_msgs--;
+		if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+			drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+			writel(drv_data->cntl_bits,
+			drv_data->reg_base + drv_data->reg_offsets.control);
+
+			/* Setup for the next message */
+			mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+		}
+		if (drv_data->errata_delay)
+			udelay(5);
+
+		/*
+		 * We're never at the start of the message here, and by this
+		 * time it's already too late to do any protocol mangling.
+		 * Thankfully, do not advertise support for that feature.
+		 */
+		drv_data->send_stop = drv_data->num_msgs == 1;
 		break;
 
 	case MV64XXX_I2C_ACTION_CONTINUE:
 		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
 	case MV64XXX_I2C_ACTION_SEND_START:
-		writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+		/* Can we offload this msg ? */
+		if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+			/* No, switch to standard path */
+			mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+			writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+				drv_data->reg_base + drv_data->reg_offsets.control);
+		}
 		break;
 
 	case MV64XXX_I2C_ACTION_SEND_ADDR_1:
 		writel(drv_data->addr1,
-			drv_data->reg_base + MV64XXX_I2C_REG_DATA);
+			drv_data->reg_base + drv_data->reg_offsets.data);
 		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
 	case MV64XXX_I2C_ACTION_SEND_ADDR_2:
 		writel(drv_data->addr2,
-			drv_data->reg_base + MV64XXX_I2C_REG_DATA);
+			drv_data->reg_base + drv_data->reg_offsets.data);
 		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
 	case MV64XXX_I2C_ACTION_SEND_DATA:
 		writel(drv_data->msg->buf[drv_data->byte_posn++],
-			drv_data->reg_base + MV64XXX_I2C_REG_DATA);
+			drv_data->reg_base + drv_data->reg_offsets.data);
 		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
 	case MV64XXX_I2C_ACTION_RCV_DATA:
 		drv_data->msg->buf[drv_data->byte_posn++] =
-			readl(drv_data->reg_base + MV64XXX_I2C_REG_DATA);
+			readl(drv_data->reg_base + drv_data->reg_offsets.data);
 		writel(drv_data->cntl_bits,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		break;
 
 	case MV64XXX_I2C_ACTION_RCV_DATA_STOP:
 		drv_data->msg->buf[drv_data->byte_posn++] =
-			readl(drv_data->reg_base + MV64XXX_I2C_REG_DATA);
+			readl(drv_data->reg_base + drv_data->reg_offsets.data);
 		drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
 		writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
 		drv_data->block = 0;
+		if (drv_data->errata_delay)
+			udelay(5);
+
 		wake_up(&drv_data->waitq);
 		break;
 
@@ -309,11 +507,21 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
 			"mv64xxx_i2c_do_action: Invalid action: %d\n",
 			drv_data->action);
 		drv_data->rc = -EIO;
+
 		/* FALLTHRU */
 	case MV64XXX_I2C_ACTION_SEND_STOP:
 		drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
 		writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
-			drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
+			drv_data->reg_base + drv_data->reg_offsets.control);
+		drv_data->block = 0;
+		wake_up(&drv_data->waitq);
+		break;
+
+	case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP:
+		mv64xxx_i2c_update_offload_data(drv_data);
+		writel(0, drv_data->reg_base +	MV64XXX_I2C_REG_BRIDGE_CONTROL);
+		writel(0, drv_data->reg_base +
+			MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
 		drv_data->block = 0;
 		wake_up(&drv_data->waitq);
 		break;
@@ -329,9 +537,24 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
 	irqreturn_t	rc = IRQ_NONE;
 
 	spin_lock_irqsave(&drv_data->lock, flags);
-	while (readl(drv_data->reg_base + MV64XXX_I2C_REG_CONTROL) &
+
+	if (drv_data->offload_enabled) {
+		while (readl(drv_data->reg_base +
+				MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) {
+			int reg_status = readl(drv_data->reg_base +
+					MV64XXX_I2C_REG_BRIDGE_STATUS);
+			if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
+				status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
+			else
+				status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
+			mv64xxx_i2c_fsm(drv_data, status);
+			mv64xxx_i2c_do_action(drv_data);
+			rc = IRQ_HANDLED;
+		}
+	}
+	while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
 						MV64XXX_I2C_REG_CONTROL_IFLG) {
-		status = readl(drv_data->reg_base + MV64XXX_I2C_REG_STATUS);
+		status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
 		mv64xxx_i2c_fsm(drv_data, status);
 		mv64xxx_i2c_do_action(drv_data);
 		rc = IRQ_HANDLED;
@@ -348,32 +571,6 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
  *
  *****************************************************************************
  */
-static void
-mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
-	struct i2c_msg *msg)
-{
-	u32	dir = 0;
-
-	drv_data->msg = msg;
-	drv_data->byte_posn = 0;
-	drv_data->bytes_left = msg->len;
-	drv_data->aborting = 0;
-	drv_data->rc = 0;
-	drv_data->cntl_bits = MV64XXX_I2C_REG_CONTROL_ACK |
-		MV64XXX_I2C_REG_CONTROL_INTEN | MV64XXX_I2C_REG_CONTROL_TWSIEN;
-
-	if (msg->flags & I2C_M_RD)
-		dir = 1;
-
-	if (msg->flags & I2C_M_TEN) {
-		drv_data->addr1 = 0xf0 | (((u32)msg->addr & 0x300) >> 7) | dir;
-		drv_data->addr2 = (u32)msg->addr & 0xff;
-	} else {
-		drv_data->addr1 = ((u32)msg->addr & 0x7f) << 1 | dir;
-		drv_data->addr2 = 0;
-	}
-}
-
 static void
 mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
 {
@@ -414,36 +611,14 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
 
 static int
 mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
-				int is_first, int is_last)
+				int is_last)
 {
 	unsigned long	flags;
 
 	spin_lock_irqsave(&drv_data->lock, flags);
-	mv64xxx_i2c_prepare_for_io(drv_data, msg);
 
-	if (unlikely(msg->flags & I2C_M_NOSTART)) { /* Skip start/addr phases */
-		if (drv_data->msg->flags & I2C_M_RD) {
-			/* No action to do, wait for slave to send a byte */
-			drv_data->action = MV64XXX_I2C_ACTION_CONTINUE;
-			drv_data->state =
-				MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_DATA;
-		} else {
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_DATA;
-			drv_data->state =
-				MV64XXX_I2C_STATE_WAITING_FOR_SLAVE_ACK;
-			drv_data->bytes_left--;
-		}
-	} else {
-		if (is_first) {
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
-			drv_data->state =
-				MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
-		} else {
-			drv_data->action = MV64XXX_I2C_ACTION_SEND_ADDR_1;
-			drv_data->state =
-				MV64XXX_I2C_STATE_WAITING_FOR_ADDR_1_ACK;
-		}
-	}
+	drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+	drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
 
 	drv_data->send_stop = is_last;
 	drv_data->block = 1;
@@ -471,16 +646,27 @@ static int
 mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
 {
 	struct mv64xxx_i2c_data *drv_data = i2c_get_adapdata(adap);
-	int	i, rc;
+	int rc, ret = num;
 
-	for (i = 0; i < num; i++) {
-		rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[i],
-						i == 0, i + 1 == num);
-		if (rc < 0)
-			return rc;
-	}
+	BUG_ON(drv_data->msgs != NULL);
+	drv_data->msgs = msgs;
+	drv_data->num_msgs = num;
+
+	rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
+	if (rc < 0)
+		ret = rc;
+
+	/* Sleep for >=5ms after sending the STOP condition which starts the
+	 * internal write cycle. This is needed while working with some EEPROMs
+	 * which max Twr = 5ms (Write Cycle Time).
+	 */
+	if (!(msgs->flags & I2C_M_RD))
+		usleep_range(5000, 5500);
+
+	drv_data->num_msgs = 0;
+	drv_data->msgs = NULL;
 
-	return num;
+	return ret;
 }
 
 static const struct i2c_algorithm mv64xxx_i2c_algo = {
@@ -495,39 +681,13 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
  *
  *****************************************************************************
  */
-static int
-mv64xxx_i2c_map_regs(struct platform_device *pd,
-	struct mv64xxx_i2c_data *drv_data)
-{
-	int size;
-	struct resource	*r = platform_get_resource(pd, IORESOURCE_MEM, 0);
-
-	if (!r)
-		return -ENODEV;
-
-	size = resource_size(r);
-
-	if (!request_mem_region(r->start, size, drv_data->adapter.name))
-		return -EBUSY;
-
-	drv_data->reg_base = ioremap(r->start, size);
-	drv_data->reg_base_p = r->start;
-	drv_data->reg_size = size;
-
-	return 0;
-}
-
-static void
-mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
-{
-	if (drv_data->reg_base) {
-		iounmap(drv_data->reg_base);
-		release_mem_region(drv_data->reg_base_p, drv_data->reg_size);
-	}
-
-	drv_data->reg_base = NULL;
-	drv_data->reg_base_p = 0;
-}
+static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
+	{ .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+	{ .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+	{ .compatible = "marvell,mv78230-a0-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+	{}
+};
+MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
 
 #ifdef CONFIG_OF
 static int
@@ -562,9 +722,11 @@ mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
 
 static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
-		  struct device_node *np)
+		  struct device *dev)
 {
-	u32 bus_freq, tclk;
+	const struct of_device_id *device;
+	struct device_node *np = dev->of_node;
+	u32 bus_freq, tclk, timeout;
 	int rc = 0;
 
 	/* CLK is mandatory when using DT to describe the i2c bus. We
@@ -580,7 +742,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
 		goto out;
 	}
 	tclk = clk_get_rate(drv_data->clk);
-	of_property_read_u32(np, "clock-frequency", &bus_freq);
+
+	rc = of_property_read_u32(np, "clock-frequency", &bus_freq);
+	if (rc)
+		bus_freq = 100000; /* 100kHz by default */
+
 	if (!mv64xxx_find_baud_factors(bus_freq, tclk,
 				       &drv_data->freq_n, &drv_data->freq_m)) {
 		rc = -EINVAL;
@@ -588,10 +754,29 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
 	}
 	drv_data->irq = irq_of_parse_and_map(np, 0);
 
-	/* Its not yet defined how timeouts will be specified in device tree.
-	 * So hard code the value to 1 second.
+	if (of_property_read_u32(np, "timeout-ms", &timeout))
+		timeout = 1000; /* 1000ms by default */
+	drv_data->adapter.timeout = msecs_to_jiffies(timeout);
+
+	device = of_match_device(mv64xxx_i2c_of_match_table, dev);
+	if (!device)
+		return -ENODEV;
+
+	memcpy(&drv_data->reg_offsets, device->data, sizeof(drv_data->reg_offsets));
+
+	/*
+	 * For controllers embedded in new SoCs activate the
+	 * Transaction Generator support and the errata fix.
 	 */
-	drv_data->adapter.timeout = HZ;
+	if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
+		drv_data->offload_enabled = true;
+		drv_data->errata_delay = true;
+	}
+
+	if (of_device_is_compatible(np, "marvell,mv78230-a0-i2c")) {
+		drv_data->offload_enabled = false;
+		drv_data->errata_delay = true;
+	}
 out:
 	return rc;
 #endif
@@ -599,7 +784,7 @@ out:
 #else /* CONFIG_OF */
 static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
-		  struct device_node *np)
+		  struct device *dev)
 {
 	return -ENODEV;
 }
@@ -610,19 +795,21 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 {
 	struct mv64xxx_i2c_data		*drv_data;
 	struct mv64xxx_i2c_pdata	*pdata = pd->dev.platform_data;
+	struct resource	*r;
 	int	rc;
 
 	if ((!pdata && !pd->dev.of_node))
 		return -ENODEV;
 
-	drv_data = kzalloc(sizeof(struct mv64xxx_i2c_data), GFP_KERNEL);
+	drv_data = devm_kzalloc(&pd->dev, sizeof(struct mv64xxx_i2c_data),
+				GFP_KERNEL);
 	if (!drv_data)
 		return -ENOMEM;
 
-	if (mv64xxx_i2c_map_regs(pd, drv_data)) {
-		rc = -ENODEV;
-		goto exit_kfree;
-	}
+	r = platform_get_resource(pd, IORESOURCE_MEM, 0);
+	drv_data->reg_base = devm_ioremap_resource(&pd->dev, r);
+	if (IS_ERR(drv_data->reg_base))
+		return PTR_ERR(drv_data->reg_base);
 
 	strlcpy(drv_data->adapter.name, MV64XXX_I2C_CTLR_NAME " adapter",
 		sizeof(drv_data->adapter.name));
@@ -632,7 +819,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 
 #if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
-	drv_data->clk = clk_get(&pd->dev, NULL);
+	drv_data->clk = devm_clk_get(&pd->dev, NULL);
 	if (!IS_ERR(drv_data->clk)) {
 		clk_prepare(drv_data->clk);
 		clk_enable(drv_data->clk);
@@ -643,14 +830,16 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 		drv_data->freq_n = pdata->freq_n;
 		drv_data->irq = platform_get_irq(pd, 0);
 		drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
+		drv_data->offload_enabled = false;
+		memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
 	} else if (pd->dev.of_node) {
-		rc = mv64xxx_of_config(drv_data, pd->dev.of_node);
+		rc = mv64xxx_of_config(drv_data, &pd->dev);
 		if (rc)
-			goto exit_unmap_regs;
+			goto exit_clk;
 	}
 	if (drv_data->irq < 0) {
 		rc = -ENXIO;
-		goto exit_unmap_regs;
+		goto exit_clk;
 	}
 
 	drv_data->adapter.dev.parent = &pd->dev;
@@ -664,13 +853,13 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 
 	mv64xxx_i2c_hw_init(drv_data);
 
-	if (request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
-			MV64XXX_I2C_CTLR_NAME, drv_data)) {
+	rc = request_irq(drv_data->irq, mv64xxx_i2c_intr, 0,
+			 MV64XXX_I2C_CTLR_NAME, drv_data);
+	if (rc) {
 		dev_err(&drv_data->adapter.dev,
-			"mv64xxx: Can't register intr handler irq: %d\n",
-			drv_data->irq);
-		rc = -EINVAL;
-		goto exit_unmap_regs;
+			"mv64xxx: Can't register intr handler irq%d: %d\n",
+			drv_data->irq, rc);
+		goto exit_clk;
 	} else if ((rc = i2c_add_numbered_adapter(&drv_data->adapter)) != 0) {
 		dev_err(&drv_data->adapter.dev,
 			"mv64xxx: Can't add i2c adapter, rc: %d\n", -rc);
@@ -681,9 +870,9 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 
 	return 0;
 
-	exit_free_irq:
-		free_irq(drv_data->irq, drv_data);
-	exit_unmap_regs:
+exit_free_irq:
+	free_irq(drv_data->irq, drv_data);
+exit_clk:
 #if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
 	if (!IS_ERR(drv_data->clk)) {
@@ -691,9 +880,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
 		clk_unprepare(drv_data->clk);
 	}
 #endif
-		mv64xxx_i2c_unmap_regs(drv_data);
-	exit_kfree:
-		kfree(drv_data);
 	return rc;
 }
 
@@ -704,7 +890,6 @@ mv64xxx_i2c_remove(struct platform_device *dev)
 
 	i2c_del_adapter(&drv_data->adapter);
 	free_irq(drv_data->irq, drv_data);
-	mv64xxx_i2c_unmap_regs(drv_data);
 #if defined(CONFIG_HAVE_CLK)
 	/* Not all platforms have a clk */
 	if (!IS_ERR(drv_data->clk)) {
@@ -712,20 +897,25 @@ mv64xxx_i2c_remove(struct platform_device *dev)
 		clk_unprepare(drv_data->clk);
 	}
 #endif
-	kfree(drv_data);
 
 	return 0;
 }
 
-static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
-	{ .compatible = "marvell,mv64xxx-i2c", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
+static int mv64xxx_i2c_resume(struct platform_device *dev)
+{
+	struct mv64xxx_i2c_data *drv_data = platform_get_drvdata(dev);
+
+	mv64xxx_i2c_hw_init(drv_data);
+
+	return 0;
+}
 
 static struct platform_driver mv64xxx_i2c_driver = {
 	.probe	= mv64xxx_i2c_probe,
 	.remove	= mv64xxx_i2c_remove,
+#ifdef CONFIG_PM
+	.resume = mv64xxx_i2c_resume,
+#endif
 	.driver	= {
 		.owner	= THIS_MODULE,
 		.name	= MV64XXX_I2C_CTLR_NAME,
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index a51ee009ed83..0d438f5eefb1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -18,10 +18,15 @@
 #include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/of_pci.h>
 #include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/msi.h>
 #include <asm/mach/arch.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
@@ -32,6 +37,7 @@
 /* Interrupt Controller Registers Map */
 #define ARMADA_370_XP_INT_SET_MASK_OFFS		(0x48)
 #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS	(0x4C)
+#define ARMADA_370_XP_INT_CPU_SUBSYS_MASK_OFFS	(0x54)
 
 #define ARMADA_370_XP_INT_CONTROL		(0x00)
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS	(0x30)
@@ -39,6 +45,7 @@
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)	(0x100 + irq*4)
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS		(0x44)
+#define ARMADA_375_PPI_CAUSE			(0x10)
 
 #define ARMADA_370_XP_SW_TRIG_INT_OFFS           (0x4)
 #define ARMADA_370_XP_IN_DRBEL_MSK_OFFS          (0xc)
@@ -47,16 +54,34 @@
 #define ARMADA_370_XP_MAX_PER_CPU_IRQS		(28)
 
 #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ	(5)
+#define ARMADA_370_XP_CPU_SUBSYS_PERF_CNT	(3)
+
+#define ARMADA_370_XP_GBE0_PER_CPU_IRQ		(8)
+#define ARMADA_370_XP_GBE3_PER_CPU_IRQ		(15)
 
 #define IPI_DOORBELL_START                      (0)
 #define IPI_DOORBELL_END                        (8)
 #define IPI_DOORBELL_MASK                       0xFF
+#define PCI_MSI_DOORBELL_START                  (16)
+#define PCI_MSI_DOORBELL_NR                     (16)
+#define PCI_MSI_DOORBELL_END                    (32)
+#define PCI_MSI_DOORBELL_MASK                   0xFFFF0000
 
+#ifdef CONFIG_SMP
 static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+#endif
 
+static void __iomem *cpus_int_base;
 static void __iomem *per_cpu_int_base;
 static void __iomem *main_int_base;
 static struct irq_domain *armada_370_xp_mpic_domain;
+static u32 doorbell_mask_reg;
+#ifdef CONFIG_PCI_MSI
+static struct irq_domain *armada_370_xp_msi_domain;
+static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
+static DEFINE_MUTEX(msi_used_lock);
+static phys_addr_t msi_doorbell_addr;
+#endif
 
 /*
  * In SMP mode:
@@ -67,9 +92,26 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
 {
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
+#ifdef CONFIG_SMP
+	int cpu;
+
+	if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+#else
 	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+#endif
 		writel(hwirq, main_int_base +
 				ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
+#ifdef CONFIG_SMP
+	/* In case of Network Per CPU IRQ and SMP - Mask all CPUs */
+	else if ((hwirq >= ARMADA_370_XP_GBE0_PER_CPU_IRQ) &&
+		 (hwirq <= ARMADA_370_XP_GBE3_PER_CPU_IRQ) &&
+		 (nr_cpu_ids > 1)) {
+		for_each_possible_cpu(cpu) {
+			if (cpumask_test_cpu(cpu, d->affinity))
+				writel(hwirq, cpus_int_base + 0x100 * cpu + ARMADA_370_XP_INT_SET_MASK_OFFS);
+		}
+	}
+#endif
 	else
 		writel(hwirq, per_cpu_int_base +
 				ARMADA_370_XP_INT_SET_MASK_OFFS);
@@ -79,14 +121,169 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
 {
 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
+#ifdef CONFIG_SMP
+	int cpu;
+
+	if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+#else
 	if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+#endif
 		writel(hwirq, main_int_base +
 				ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+#ifdef CONFIG_SMP
+	/* In case of Network Per CPU IRQ and SMP - Set correct affinity to the IRQ */
+	else if ((hwirq >= ARMADA_370_XP_GBE0_PER_CPU_IRQ) &&
+		 (hwirq <= ARMADA_370_XP_GBE3_PER_CPU_IRQ) &&
+		 (nr_cpu_ids > 1)) {
+		for_each_possible_cpu(cpu) {
+			if (cpumask_test_cpu(cpu, d->affinity))
+				writel(hwirq, cpus_int_base + 0x100 * cpu + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+		}
+	}
+#endif
 	else
 		writel(hwirq, per_cpu_int_base +
 				ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
 }
 
+#ifdef CONFIG_PCI_MSI
+
+static int armada_370_xp_alloc_msi(void)
+{
+	int hwirq;
+
+	mutex_lock(&msi_used_lock);
+	hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR);
+	if (hwirq >= PCI_MSI_DOORBELL_NR)
+		hwirq = -ENOSPC;
+	else
+		set_bit(hwirq, msi_used);
+	mutex_unlock(&msi_used_lock);
+
+	return hwirq;
+}
+
+static void armada_370_xp_free_msi(int hwirq)
+{
+	mutex_lock(&msi_used_lock);
+	if (!test_bit(hwirq, msi_used))
+		pr_err("trying to free unused MSI#%d\n", hwirq);
+	else
+		clear_bit(hwirq, msi_used);
+	mutex_unlock(&msi_used_lock);
+}
+
+static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
+				       struct pci_dev *pdev,
+				       struct msi_desc *desc)
+{
+	struct msi_msg msg;
+	irq_hw_number_t hwirq;
+	int virq;
+
+	hwirq = armada_370_xp_alloc_msi();
+	if (hwirq < 0)
+		return hwirq;
+
+	virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq);
+	if (!virq) {
+		armada_370_xp_free_msi(hwirq);
+		return -EINVAL;
+	}
+
+	irq_set_msi_desc(virq, desc);
+
+	msg.address_lo = msi_doorbell_addr;
+	msg.address_hi = 0;
+	msg.data = 0xf00 | (hwirq + 16);
+
+	write_msi_msg(virq, &msg);
+	return 0;
+}
+
+static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
+					   unsigned int irq)
+{
+	struct irq_data *d = irq_get_irq_data(irq);
+	irq_dispose_mapping(irq);
+	armada_370_xp_free_msi(d->hwirq);
+}
+
+static struct irq_chip armada_370_xp_msi_irq_chip = {
+	.name = "armada_370_xp_msi_irq",
+	.irq_enable = unmask_msi_irq,
+	.irq_disable = mask_msi_irq,
+	.irq_mask = mask_msi_irq,
+	.irq_unmask = unmask_msi_irq,
+};
+
+static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
+				 irq_hw_number_t hw)
+{
+	irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
+				 handle_simple_irq);
+	set_irq_flags(virq, IRQF_VALID);
+
+	return 0;
+}
+
+static const struct irq_domain_ops armada_370_xp_msi_irq_ops = {
+	.map = armada_370_xp_msi_map,
+};
+
+static int armada_370_xp_msi_init(struct device_node *node,
+				  phys_addr_t main_int_phys_base)
+{
+	struct msi_chip *msi_chip;
+	u32 reg;
+	int ret;
+
+	msi_doorbell_addr = main_int_phys_base +
+		ARMADA_370_XP_SW_TRIG_INT_OFFS;
+
+	msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL);
+	if (!msi_chip)
+		return -ENOMEM;
+
+	msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
+	msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+	msi_chip->of_node = node;
+
+	armada_370_xp_msi_domain =
+		irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR,
+				      &armada_370_xp_msi_irq_ops,
+				      NULL);
+	if (!armada_370_xp_msi_domain) {
+		kfree(msi_chip);
+		return -ENOMEM;
+	}
+
+	ret = of_pci_msi_chip_add(msi_chip);
+	if (ret < 0) {
+		irq_domain_remove(armada_370_xp_msi_domain);
+		kfree(msi_chip);
+		return ret;
+	}
+
+	reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS)
+		| PCI_MSI_DOORBELL_MASK;
+
+	writel(reg, per_cpu_int_base +
+	       ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+
+	/* Unmask IPI interrupt */
+	writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+
+	return 0;
+}
+#else
+static inline int armada_370_xp_msi_init(struct device_node *node,
+					 phys_addr_t main_int_phys_base)
+{
+	return 0;
+}
+#endif
+
 #ifdef CONFIG_SMP
 static int armada_xp_set_affinity(struct irq_data *d,
 				  const struct cpumask *mask_val, bool force)
@@ -107,8 +304,10 @@ static int armada_xp_set_affinity(struct irq_data *d,
 	 * Forbid mutlicore interrupt affinity
 	 * This is required since the MPIC HW doesn't limit
 	 * several CPUs from acknowledging the same interrupt.
+	 * Note: Allow GBE interrupt set affinity.
 	 */
-	if (count > 1)
+	if (count > 1 && (hwirq < ARMADA_370_XP_GBE0_PER_CPU_IRQ ||
+			  hwirq > ARMADA_370_XP_GBE3_PER_CPU_IRQ))
 		return -EINVAL;
 
 	for_each_cpu(cpu, cpu_online_mask)
@@ -136,18 +335,43 @@ static struct irq_chip armada_370_xp_irq_chip = {
 #endif
 };
 
+static void armada_370_xp_enable_pmu_irq(void *data)
+{
+	unsigned long cpuid = smp_processor_id();
+	writel(1 << cpuid, per_cpu_int_base +
+				ARMADA_370_XP_INT_CPU_SUBSYS_MASK_OFFS);
+}
+
 static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
 				      unsigned int virq, irq_hw_number_t hw)
 {
 	armada_370_xp_irq_mask(irq_get_irq_data(virq));
+#ifdef CONFIG_SMP
+	if (hw > ARMADA_370_XP_MAX_PER_CPU_IRQS)
+#else
 	if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+#endif
 		writel(hw, per_cpu_int_base +
 			ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
 	else
 		writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
 	irq_set_status_flags(virq, IRQ_LEVEL);
 
-	if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
+	/*
+	 * Setup CPU Subsystem Mask registers.
+	 * Enable only Performance Counter Overflow interrupts.
+	 */
+	if (hw == ARMADA_370_XP_CPU_SUBSYS_PERF_CNT)
+		on_each_cpu_mask(cpu_online_mask,
+			armada_370_xp_enable_pmu_irq, NULL, 1);
+
+	/*
+	 * Setup Timer0 and CPU Subsystem Summary as Per-CPU interrupts.
+	 * Although the CPU Subsystem Cause contains also global IRQs,
+	 * we will use only Performance Counter Overflow interrupts.
+	 */
+	if ((hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) ||
+				(hw == ARMADA_370_XP_CPU_SUBSYS_PERF_CNT)) {
 		irq_set_percpu_devid(virq);
 		irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
 					handle_percpu_devid_irq);
@@ -201,6 +425,82 @@ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
 	.xlate = irq_domain_xlate_onecell,
 };
 
+#ifdef CONFIG_PCI_MSI
+/*
+ * This function handles MSI for the case where the MPIC is a slave of
+ * another interrupt controller. We unfortunately cannot factorize
+ * this with the MSI handling code in armada_370_xp_handle_irq()
+ * because this function must call handle_IRQ() while the below
+ * function calls generic_handle_irq().
+ */
+static void armada_370_xp_mpic_handle_cascade_msi(void)
+{
+	u32 msimask, msinr;
+
+	msimask = readl_relaxed(per_cpu_int_base +
+				ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+		& PCI_MSI_DOORBELL_MASK;
+
+	writel(~msimask, per_cpu_int_base +
+	       ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+	for (msinr = PCI_MSI_DOORBELL_START;
+	     msinr < PCI_MSI_DOORBELL_END; msinr++) {
+		int irq;
+
+		if (!(msimask & BIT(msinr)))
+			continue;
+
+		irq = irq_find_mapping(armada_370_xp_msi_domain,
+				       msinr - 16);
+		generic_handle_irq(irq);
+	}
+}
+#else
+static void armada_370_xp_mpic_handle_cascade_msi(void) { }
+#endif
+
+static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
+						  struct irq_desc *desc)
+{
+	struct irq_chip *chip = irq_get_chip(irq);
+	unsigned long irqmap, irqn, cpuid, irqsrc;
+	unsigned int cascade_irq;
+#ifdef CONFIG_SMP
+	struct irq_data *irqd;
+#endif
+
+	chained_irq_enter(chip, desc);
+
+	cpuid = raw_smp_processor_id();
+	irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
+	for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
+
+		irqsrc = readl_relaxed(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(irqn));
+		/*
+		 * Check if the interrupt is not masked on current CPU.
+		 * Test IRQ (0-1) and FIQ (8-9) mask bits.
+		 */
+		if ((irqsrc & (0x101 << cpuid)) == 0)
+			continue;
+
+		if (irqn == 1) {
+			armada_370_xp_mpic_handle_cascade_msi();
+		} else {
+			cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
+#ifdef CONFIG_SMP
+			irqd = irq_get_irq_data(cascade_irq);
+			if (cpumask_test_cpu(cpuid, irqd->affinity))
+				generic_handle_irq(cascade_irq);
+#else
+			generic_handle_irq(cascade_irq);
+#endif
+		}
+	}
+
+	chained_irq_exit(chip, desc);
+}
+
 static asmlinkage void __exception_irq_entry
 armada_370_xp_handle_irq(struct pt_regs *regs)
 {
@@ -214,12 +514,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
 		if (irqnr > 1022)
 			break;
 
-		if (irqnr > 0) {
+		if (irqnr > 1) {
 			irqnr =	irq_find_mapping(armada_370_xp_mpic_domain,
 					irqnr);
 			handle_IRQ(irqnr, regs);
 			continue;
 		}
+
+#ifdef CONFIG_PCI_MSI
+		/* MSI handling */
+		if (irqnr == 1) {
+			u32 msimask, msinr;
+
+			msimask = readl_relaxed(per_cpu_int_base +
+						ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
+				& PCI_MSI_DOORBELL_MASK;
+
+			writel(~msimask, per_cpu_int_base +
+			       ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
+
+			for (msinr = PCI_MSI_DOORBELL_START;
+			     msinr < PCI_MSI_DOORBELL_END; msinr++) {
+				int irq;
+
+				if (!(msimask & BIT(msinr)))
+					continue;
+
+				irq = irq_find_mapping(armada_370_xp_msi_domain,
+						       msinr - 16);
+				handle_IRQ(irq, regs);
+			}
+		}
+#endif
+
 #ifdef CONFIG_SMP
 		/* IPI Handling */
 		if (irqnr == 0) {
@@ -245,27 +572,96 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
 	} while (1);
 }
 
+static int armada_370_xp_mpic_suspend(void)
+{
+	doorbell_mask_reg = readl(per_cpu_int_base +
+				  ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+	return 0;
+}
+
+static void armada_370_xp_mpic_resume(void)
+{
+	int nirqs;
+	irq_hw_number_t irq;
+
+	/* Re-enable interrupts */
+	nirqs = (readl(main_int_base + ARMADA_370_XP_INT_CONTROL) >> 2) & 0x3ff;
+	for (irq = 0; irq < nirqs; irq++) {
+		struct irq_data *data;
+		int virq;
+
+		virq = irq_linear_revmap(armada_370_xp_mpic_domain, irq);
+		if (virq == 0)
+			continue;
+
+		if (irq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+			writel(irq, per_cpu_int_base +
+			       ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+		else
+			writel(irq, main_int_base +
+			       ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+
+		data = irq_get_irq_data(virq);
+		if (!irqd_irq_disabled(data))
+			armada_370_xp_irq_unmask(data);
+	}
+
+	/* Reconfigure doorbells for IPIs and MSIs */
+	writel(doorbell_mask_reg,
+	       per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS);
+	if (doorbell_mask_reg & IPI_DOORBELL_MASK)
+		writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+	if (doorbell_mask_reg & PCI_MSI_DOORBELL_MASK)
+		writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+struct syscore_ops armada_370_xp_mpic_syscore_ops = {
+	.suspend	= armada_370_xp_mpic_suspend,
+	.resume		= armada_370_xp_mpic_resume,
+};
+
 static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 					     struct device_node *parent)
 {
+	struct resource main_int_res, per_cpu_int_res, cpus_int_res;
+	int parent_irq;
 	u32 control;
 
-	main_int_base = of_iomap(node, 0);
-	per_cpu_int_base = of_iomap(node, 1);
+	BUG_ON(of_address_to_resource(node, 0, &main_int_res));
+	BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res));
 
+	BUG_ON(!request_mem_region(main_int_res.start,
+				   resource_size(&main_int_res),
+				   node->full_name));
+	BUG_ON(!request_mem_region(per_cpu_int_res.start,
+				   resource_size(&per_cpu_int_res),
+				   node->full_name));
+
+	main_int_base = ioremap(main_int_res.start,
+				resource_size(&main_int_res));
 	BUG_ON(!main_int_base);
+
+	per_cpu_int_base = ioremap(per_cpu_int_res.start,
+				   resource_size(&per_cpu_int_res));
 	BUG_ON(!per_cpu_int_base);
 
+	if (nr_cpu_ids > 1) {
+		BUG_ON(of_address_to_resource(node, 2, &cpus_int_res));
+		BUG_ON(!request_mem_region(cpus_int_res.start,
+					   resource_size(&cpus_int_res),
+					   node->full_name));
+		cpus_int_base = ioremap(cpus_int_res.start,
+					resource_size(&cpus_int_res));
+		BUG_ON(!cpus_int_base);
+	}
+
 	control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
 
 	armada_370_xp_mpic_domain =
 		irq_domain_add_linear(node, (control >> 2) & 0x3ff,
 				&armada_370_xp_mpic_irq_ops, NULL);
 
-	if (!armada_370_xp_mpic_domain)
-		panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
-
-	irq_set_default_host(armada_370_xp_mpic_domain);
+	BUG_ON(!armada_370_xp_mpic_domain);
 
 #ifdef CONFIG_SMP
 	armada_xp_mpic_smp_cpu_init();
@@ -280,7 +676,18 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
 #endif
 
-	set_handle_irq(armada_370_xp_handle_irq);
+	armada_370_xp_msi_init(node, main_int_res.start);
+
+	parent_irq = irq_of_parse_and_map(node, 0);
+	if (parent_irq <= 0) {
+		irq_set_default_host(armada_370_xp_mpic_domain);
+		set_handle_irq(armada_370_xp_handle_irq);
+	} else {
+		irq_set_chained_handler(parent_irq,
+					armada_370_xp_mpic_handle_cascade_irq);
+	}
+
+	register_syscore_ops(&armada_370_xp_mpic_syscore_ops);
 
 	return 0;
 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 5a2c75499824..7da0152c63fe 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -848,7 +848,7 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
 	else
 		set_bit_le(bit, kaddr);
 	kunmap_atomic(kaddr);
-	pr_debug("set file bit %lu page %lu\n", bit, page->index);
+	pr_debug("set file bit %lu page %llu\n", bit, page->index);
 	/* record page number so it gets flushed to disk when unplug occurs */
 	set_page_attr(bitmap, page->index, BITMAP_PAGE_DIRTY);
 }
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 6d2d41ae9e32..2e5cdfdd6bfa 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,7 +1,7 @@
 /*
  * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
- * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
  *
  * This file is released under the GPL.
  */
@@ -18,84 +18,66 @@
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
 #include <linux/backing-dev.h>
-#include <linux/percpu.h>
-#include <linux/atomic.h>
+#include <asm/atomic.h>
 #include <linux/scatterlist.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
-#include <crypto/hash.h>
-#include <crypto/md5.h>
-#include <crypto/algapi.h>
 
 #include <linux/device-mapper.h>
 
+#if defined(CONFIG_OCF_DM_CRYPT)
+extern int cesaReqResources[];
+extern void DUMP_OCF_POOL(void);
+
+//#define DM_DEBUG
+#undef DM_DEBUG
+#ifdef DM_DEBUG
+#define dmprintk printk
+#else
+#define dmprintk(fmt,args...)
+#endif
+
+#include <../crypto/ocf/cryptodev.h>
+#endif
+
 #define DM_MSG_PREFIX "crypt"
+#define MESG_STR(x) x, sizeof(x)
+
+extern int crypto_debug;
+/*
+ * per bio private data
+ */
+struct crypt_io {
+	struct dm_target *target;
+	struct bio *base_bio;
+	struct work_struct work;
+	atomic_t pending;
+	int error;
+	int post_process;
+};
 
 /*
  * context holding the current state of a multi-part conversion
  */
 struct convert_context {
-	struct completion restart;
 	struct bio *bio_in;
 	struct bio *bio_out;
 	unsigned int offset_in;
 	unsigned int offset_out;
 	unsigned int idx_in;
 	unsigned int idx_out;
-	sector_t cc_sector;
-	atomic_t cc_pending;
-};
-
-/*
- * per bio private data
- */
-struct dm_crypt_io {
-	struct crypt_config *cc;
-	struct bio *base_bio;
-	struct work_struct work;
-
-	struct convert_context ctx;
-
-	atomic_t io_pending;
-	int error;
 	sector_t sector;
-	struct dm_crypt_io *base_io;
-};
-
-struct dm_crypt_request {
-	struct convert_context *ctx;
-	struct scatterlist sg_in;
-	struct scatterlist sg_out;
-	sector_t iv_sector;
+	int write;
 };
 
 struct crypt_config;
 
 struct crypt_iv_operations {
 	int (*ctr)(struct crypt_config *cc, struct dm_target *ti,
-		   const char *opts);
+	           const char *opts);
 	void (*dtr)(struct crypt_config *cc);
-	int (*init)(struct crypt_config *cc);
-	int (*wipe)(struct crypt_config *cc);
-	int (*generator)(struct crypt_config *cc, u8 *iv,
-			 struct dm_crypt_request *dmreq);
-	int (*post)(struct crypt_config *cc, u8 *iv,
-		    struct dm_crypt_request *dmreq);
-};
-
-struct iv_essiv_private {
-	struct crypto_hash *hash_tfm;
-	u8 *salt;
-};
-
-struct iv_benbi_private {
-	int shift;
-};
-
-#define LMK_SEED_SIZE 64 /* hash + 0 */
-struct iv_lmk_private {
-	struct crypto_shash *hash_tfm;
-	u8 *seed;
+	const char *(*status)(struct crypt_config *cc);
+	int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector);
 };
 
 /*
@@ -103,108 +85,59 @@ struct iv_lmk_private {
  * and encrypts / decrypts at the same time.
  */
 enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
-
-/*
- * Duplicated per-CPU state for cipher.
- */
-struct crypt_cpu {
-	struct ablkcipher_request *req;
-};
-
-/*
- * The fields in here must be read only after initialization,
- * changing state should be in crypt_cpu.
- */
 struct crypt_config {
 	struct dm_dev *dev;
 	sector_t start;
 
 	/*
-	 * pool for per bio private data, crypto requests and
-	 * encryption requeusts/buffer pages
+	 * pool for per bio private data and
+	 * for encryption buffer pages
 	 */
 	mempool_t *io_pool;
-	mempool_t *req_pool;
 	mempool_t *page_pool;
 	struct bio_set *bs;
 
-	struct workqueue_struct *io_queue;
-	struct workqueue_struct *crypt_queue;
-
-	char *cipher;
-	char *cipher_string;
-
+	/*
+	 * crypto related data
+	 */
 	struct crypt_iv_operations *iv_gen_ops;
+	char *iv_mode;
 	union {
-		struct iv_essiv_private essiv;
-		struct iv_benbi_private benbi;
-		struct iv_lmk_private lmk;
+		struct crypto_cipher *essiv_tfm;
+		int benbi_shift;
 	} iv_gen_private;
 	sector_t iv_offset;
 	unsigned int iv_size;
 
-	/*
-	 * Duplicated per cpu state. Access through
-	 * per_cpu_ptr() only.
-	 */
-	struct crypt_cpu __percpu *cpu;
-
-	/* ESSIV: struct crypto_cipher *essiv_tfm */
-	void *iv_private;
-	struct crypto_ablkcipher **tfms;
-	unsigned tfms_count;
-
-	/*
-	 * Layout of each crypto request:
-	 *
-	 *   struct ablkcipher_request
-	 *      context
-	 *      padding
-	 *   struct dm_crypt_request
-	 *      padding
-	 *   IV
-	 *
-	 * The padding is added so that dm_crypt_request and the IV are
-	 * correctly aligned.
-	 */
-	unsigned int dmreq_start;
-
+	char cipher[CRYPTO_MAX_ALG_NAME];
+	char chainmode[CRYPTO_MAX_ALG_NAME];
+#if defined(CONFIG_OCF_DM_CRYPT)
+	struct cryptoini 	cr_dm;    		/* OCF session */
+	uint64_t 	 	ocf_cryptoid;		/* OCF sesssion ID */
+#else
+	struct crypto_blkcipher *tfm;
+#endif
 	unsigned long flags;
 	unsigned int key_size;
-	unsigned int key_parts;
 	u8 key[0];
 };
 
-#define MIN_IOS        16
+#define MIN_IOS        256
 #define MIN_POOL_PAGES 32
+#define MIN_BIO_PAGES  8
 
+static unsigned int _crypt_requests;
+static DEFINE_SPINLOCK(_crypt_lock);
+static wait_queue_head_t _crypt_waitq;
 static struct kmem_cache *_crypt_io_pool;
 
-static void clone_init(struct dm_crypt_io *, struct bio *);
-static void kcryptd_queue_crypt(struct dm_crypt_io *io);
-static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
-
-static struct crypt_cpu *this_crypt_config(struct crypt_config *cc)
-{
-	return this_cpu_ptr(cc->cpu);
-}
-
-/*
- * Use this to access cipher attributes that are the same for each CPU.
- */
-static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
-{
-	return cc->tfms[0];
-}
+static void clone_init(struct crypt_io *, struct bio *);
 
 /*
  * Different IV generation algorithms:
  *
  * plain: the initial vector is the 32-bit little-endian version of the sector
- *        number, padded with zeros if necessary.
- *
- * plain64: the initial vector is the 64-bit little-endian version of the sector
- *        number, padded with zeros if necessary.
+ *        number, padded with zeros if neccessary.
  *
  * essiv: "encrypted sector|salt initial vector", the sector number is
  *        encrypted with the bulk cipher using a salt as key. The salt
@@ -216,203 +149,112 @@ static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
  * null: the initial vector is always zero.  Provides compatibility with
  *       obsolete loop_fish2 devices.  Do not use for new devices.
  *
- * lmk:  Compatible implementation of the block chaining mode used
- *       by the Loop-AES block device encryption system
- *       designed by Jari Ruusu. See http://loop-aes.sourceforge.net/
- *       It operates on full 512 byte sectors and uses CBC
- *       with an IV derived from the sector number, the data and
- *       optionally extra IV seed.
- *       This means that after decryption the first block
- *       of sector must be tweaked according to decrypted data.
- *       Loop-AES can use three encryption schemes:
- *         version 1: is plain aes-cbc mode
- *         version 2: uses 64 multikey scheme with lmk IV generator
- *         version 3: the same as version 2 with additional IV seed
- *                   (it uses 65 keys, last key is used as IV seed)
- *
  * plumb: unimplemented, see:
  * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
  */
 
-static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv,
-			      struct dm_crypt_request *dmreq)
+static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
 	memset(iv, 0, cc->iv_size);
-	*(__le32 *)iv = cpu_to_le32(dmreq->iv_sector & 0xffffffff);
+	*(u32 *)iv = cpu_to_le32(sector & 0xffffffff);
 
 	return 0;
 }
 
-static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
-				struct dm_crypt_request *dmreq)
-{
-	memset(iv, 0, cc->iv_size);
-	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
-
-	return 0;
-}
-
-/* Initialise ESSIV - compute salt but no local memory allocations */
-static int crypt_iv_essiv_init(struct crypt_config *cc)
+static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
+	                      const char *opts)
 {
-	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
+	struct crypto_cipher *essiv_tfm;
+	struct crypto_hash *hash_tfm;
 	struct hash_desc desc;
 	struct scatterlist sg;
-	struct crypto_cipher *essiv_tfm;
+	unsigned int saltsize;
+	u8 *salt;
 	int err;
 
-	sg_init_one(&sg, cc->key, cc->key_size);
-	desc.tfm = essiv->hash_tfm;
-	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
-	if (err)
-		return err;
-
-	essiv_tfm = cc->iv_private;
-
-	err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
-			    crypto_hash_digestsize(essiv->hash_tfm));
-	if (err)
-		return err;
-
-	return 0;
-}
-
-/* Wipe salt and reset key derived from volume key */
-static int crypt_iv_essiv_wipe(struct crypt_config *cc)
-{
-	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
-	struct crypto_cipher *essiv_tfm;
-	int r, err = 0;
+	if (opts == NULL) {
+		ti->error = "Digest algorithm missing for ESSIV mode";
+		return -EINVAL;
+	}
 
-	memset(essiv->salt, 0, salt_size);
+	/* Hash the cipher key with the given hash algorithm */
+	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(hash_tfm)) {
+		ti->error = "Error initializing ESSIV hash";
+		return PTR_ERR(hash_tfm);
+	}
 
-	essiv_tfm = cc->iv_private;
-	r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
-	if (r)
-		err = r;
+	saltsize = crypto_hash_digestsize(hash_tfm);
+	salt = kmalloc(saltsize, GFP_KERNEL);
+	if (salt == NULL) {
+		ti->error = "Error kmallocing salt storage in ESSIV";
+		crypto_free_hash(hash_tfm);
+		return -ENOMEM;
+	}
 
-	return err;
-}
+	sg_init_one(&sg, cc->key, cc->key_size);
+	desc.tfm = hash_tfm;
+	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+	err = crypto_hash_digest(&desc, &sg, cc->key_size, salt);
+	crypto_free_hash(hash_tfm);
 
-/* Set up per cpu cipher state */
-static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
-					     struct dm_target *ti,
-					     u8 *salt, unsigned saltsize)
-{
-	struct crypto_cipher *essiv_tfm;
-	int err;
+	if (err) {
+		ti->error = "Error calculating hash in ESSIV";
+		kfree(salt);
+		return err;
+	}
 
 	/* Setup the essiv_tfm with the given salt */
 	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
 	if (IS_ERR(essiv_tfm)) {
 		ti->error = "Error allocating crypto tfm for ESSIV";
-		return essiv_tfm;
+		kfree(salt);
+		return PTR_ERR(essiv_tfm);
 	}
-
+#if  defined(CONFIG_OCF_DM_CRYPT)
+	if (crypto_cipher_blocksize(essiv_tfm) != cc->iv_size) {
+#else
 	if (crypto_cipher_blocksize(essiv_tfm) !=
-	    crypto_ablkcipher_ivsize(any_tfm(cc))) {
+	    crypto_blkcipher_ivsize(cc->tfm)) {
+#endif
 		ti->error = "Block size of ESSIV cipher does "
-			    "not match IV size of block cipher";
+			        "not match IV size of block cipher";
 		crypto_free_cipher(essiv_tfm);
-		return ERR_PTR(-EINVAL);
+		kfree(salt);
+		return -EINVAL;
 	}
-
 	err = crypto_cipher_setkey(essiv_tfm, salt, saltsize);
 	if (err) {
 		ti->error = "Failed to set key for ESSIV cipher";
 		crypto_free_cipher(essiv_tfm);
-		return ERR_PTR(err);
+		kfree(salt);
+		return err;
 	}
+	kfree(salt);
 
-	return essiv_tfm;
+	cc->iv_gen_private.essiv_tfm = essiv_tfm;
+	return 0;
 }
 
 static void crypt_iv_essiv_dtr(struct crypt_config *cc)
 {
-	struct crypto_cipher *essiv_tfm;
-	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-
-	crypto_free_hash(essiv->hash_tfm);
-	essiv->hash_tfm = NULL;
-
-	kzfree(essiv->salt);
-	essiv->salt = NULL;
-
-	essiv_tfm = cc->iv_private;
-
-	if (essiv_tfm)
-		crypto_free_cipher(essiv_tfm);
-
-	cc->iv_private = NULL;
-}
-
-static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
-			      const char *opts)
-{
-	struct crypto_cipher *essiv_tfm = NULL;
-	struct crypto_hash *hash_tfm = NULL;
-	u8 *salt = NULL;
-	int err;
-
-	if (!opts) {
-		ti->error = "Digest algorithm missing for ESSIV mode";
-		return -EINVAL;
-	}
-
-	/* Allocate hash algorithm */
-	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
-	if (IS_ERR(hash_tfm)) {
-		ti->error = "Error initializing ESSIV hash";
-		err = PTR_ERR(hash_tfm);
-		goto bad;
-	}
-
-	salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
-	if (!salt) {
-		ti->error = "Error kmallocing salt storage in ESSIV";
-		err = -ENOMEM;
-		goto bad;
-	}
-
-	cc->iv_gen_private.essiv.salt = salt;
-	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
-
-	essiv_tfm = setup_essiv_cpu(cc, ti, salt,
-				crypto_hash_digestsize(hash_tfm));
-	if (IS_ERR(essiv_tfm)) {
-		crypt_iv_essiv_dtr(cc);
-		return PTR_ERR(essiv_tfm);
-	}
-	cc->iv_private = essiv_tfm;
-
-	return 0;
-
-bad:
-	if (hash_tfm && !IS_ERR(hash_tfm))
-		crypto_free_hash(hash_tfm);
-	kfree(salt);
-	return err;
+	crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+	cc->iv_gen_private.essiv_tfm = NULL;
 }
 
-static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
-			      struct dm_crypt_request *dmreq)
+static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
-	struct crypto_cipher *essiv_tfm = cc->iv_private;
-
 	memset(iv, 0, cc->iv_size);
-	*(__le64 *)iv = cpu_to_le64(dmreq->iv_sector);
-	crypto_cipher_encrypt_one(essiv_tfm, iv, iv);
-
+	*(u64 *)iv = cpu_to_le64(sector);
+	crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
 	return 0;
 }
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 			      const char *opts)
 {
-	unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
+	unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
 	int log = ilog2(bs);
 
 	/* we need to calculate how far we must shift the sector count
@@ -428,7 +270,7 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
 		return -EINVAL;
 	}
 
-	cc->iv_gen_private.benbi.shift = 9 - log;
+	cc->iv_gen_private.benbi_shift = 9 - log;
 
 	return 0;
 }
@@ -437,412 +279,425 @@ static void crypt_iv_benbi_dtr(struct crypt_config *cc)
 {
 }
 
-static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv,
-			      struct dm_crypt_request *dmreq)
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
 	__be64 val;
 
 	memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
 
-	val = cpu_to_be64(((u64)dmreq->iv_sector << cc->iv_gen_private.benbi.shift) + 1);
+	val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
 	put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
 
 	return 0;
 }
+#endif
 
-static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv,
-			     struct dm_crypt_request *dmreq)
+static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
 {
 	memset(iv, 0, cc->iv_size);
 
 	return 0;
 }
 
-static void crypt_iv_lmk_dtr(struct crypt_config *cc)
-{
-	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+static struct crypt_iv_operations crypt_iv_plain_ops = {
+	.generator = crypt_iv_plain_gen
+};
 
-	if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
-		crypto_free_shash(lmk->hash_tfm);
-	lmk->hash_tfm = NULL;
+static struct crypt_iv_operations crypt_iv_essiv_ops = {
+	.ctr       = crypt_iv_essiv_ctr,
+	.dtr       = crypt_iv_essiv_dtr,
+	.generator = crypt_iv_essiv_gen
+};
 
-	kzfree(lmk->seed);
-	lmk->seed = NULL;
-}
+#if !defined(CONFIG_OCF_DM_CRYPT)
+static struct crypt_iv_operations crypt_iv_benbi_ops = {
+	.ctr	   = crypt_iv_benbi_ctr,
+	.dtr	   = crypt_iv_benbi_dtr,
+	.generator = crypt_iv_benbi_gen
+};
+#endif
 
-static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
-			    const char *opts)
-{
-	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+static struct crypt_iv_operations crypt_iv_null_ops = {
+	.generator = crypt_iv_null_gen
+};
 
-	lmk->hash_tfm = crypto_alloc_shash("md5", 0, 0);
-	if (IS_ERR(lmk->hash_tfm)) {
-		ti->error = "Error initializing LMK hash";
-		return PTR_ERR(lmk->hash_tfm);
-	}
+#if defined(CONFIG_OCF_DM_CRYPT)
+static void dec_pending(struct crypt_io *io, int error);
 
-	/* No seed in LMK version 2 */
-	if (cc->key_parts == cc->tfms_count) {
-		lmk->seed = NULL;
-		return 0;
-	}
+struct ocf_wr_priv {
+	u32 		 	dm_ocf_wr_completed;	/* Num of wr completions */
+	u32 		 	dm_ocf_wr_pending;	/* Num of wr pendings */
+	wait_queue_head_t	dm_ocf_wr_queue;	/* waiting Q, for wr completion */
+};
 
-	lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
-	if (!lmk->seed) {
-		crypt_iv_lmk_dtr(cc);
-		ti->error = "Error kmallocing seed storage in LMK";
-		return -ENOMEM;
-	}
+/* WARN: ordering between processes is not guaranteed due to 'wake' handling */
+static int dm_ocf_wr_cb(struct cryptop *crp)
+{
+	struct ocf_wr_priv *ocf_wr_priv;
+	unsigned long flags;
 
-	return 0;
-}
+	if(crp == NULL) {
+		printk("dm_ocf_wr_cb: crp is NULL!! \n");
+		return 0;
+	}
 
-static int crypt_iv_lmk_init(struct crypt_config *cc)
-{
-	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
-	int subkey_size = cc->key_size / cc->key_parts;
+	ocf_wr_priv = (struct ocf_wr_priv*)crp->crp_opaque;
 
-	/* LMK seed is on the position of LMK_KEYS + 1 key */
-	if (lmk->seed)
-		memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
-		       crypto_shash_digestsize(lmk->hash_tfm));
+	ocf_wr_priv->dm_ocf_wr_completed++;
 
-	return 0;
-}
+	/* if no more pending for read, wake up the read task. */
+	if(ocf_wr_priv->dm_ocf_wr_completed == ocf_wr_priv->dm_ocf_wr_pending)
+		wake_up(&ocf_wr_priv->dm_ocf_wr_queue);
 
-static int crypt_iv_lmk_wipe(struct crypt_config *cc)
-{
-	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
+	crypto_freereq(crp);
 
-	if (lmk->seed)
-		memset(lmk->seed, 0, LMK_SEED_SIZE);
+	spin_lock_irqsave(&_crypt_lock, flags);
+	if (_crypt_requests > 0)
+		_crypt_requests -= 1;
+	spin_unlock_irqrestore(&_crypt_lock, flags);
 
+	wake_up(&_crypt_waitq);
 	return 0;
 }
 
-static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
-			    struct dm_crypt_request *dmreq,
-			    u8 *data)
+static int dm_ocf_rd_cb(struct cryptop *crp)
 {
-	struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
-	struct {
-		struct shash_desc desc;
-		char ctx[crypto_shash_descsize(lmk->hash_tfm)];
-	} sdesc;
-	struct md5_state md5state;
-	u32 buf[4];
-	int i, r;
-
-	sdesc.desc.tfm = lmk->hash_tfm;
-	sdesc.desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-
-	r = crypto_shash_init(&sdesc.desc);
-	if (r)
-		return r;
-
-	if (lmk->seed) {
-		r = crypto_shash_update(&sdesc.desc, lmk->seed, LMK_SEED_SIZE);
-		if (r)
-			return r;
+	struct crypt_io *io;
+	unsigned long flags;
+
+	if(crp == NULL) {
+		printk("dm_ocf_rd_cb: crp is NULL!! \n");
+		return 0;
 	}
 
-	/* Sector is always 512B, block size 16, add data of blocks 1-31 */
-	r = crypto_shash_update(&sdesc.desc, data + 16, 16 * 31);
-	if (r)
-		return r;
-
-	/* Sector is cropped to 56 bits here */
-	buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
-	buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
-	buf[2] = cpu_to_le32(4024);
-	buf[3] = 0;
-	r = crypto_shash_update(&sdesc.desc, (u8 *)buf, sizeof(buf));
-	if (r)
-		return r;
-
-	/* No MD5 padding here */
-	r = crypto_shash_export(&sdesc.desc, &md5state);
-	if (r)
-		return r;
-
-	for (i = 0; i < MD5_HASH_WORDS; i++)
-		__cpu_to_le32s(&md5state.hash[i]);
-	memcpy(iv, &md5state.hash, cc->iv_size);
+	io = (struct crypt_io *)crp->crp_opaque;
 
-	return 0;
-}
+	crypto_freereq(crp);
 
-static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
-			    struct dm_crypt_request *dmreq)
-{
-	u8 *src;
-	int r = 0;
+	if(io != NULL)
+		dec_pending(io, 0);
 
-	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
-		src = kmap_atomic(sg_page(&dmreq->sg_in));
-		r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
-		kunmap_atomic(src);
-	} else
-		memset(iv, 0, cc->iv_size);
+	spin_lock_irqsave(&_crypt_lock, flags);
+	if (_crypt_requests > 0)
+		_crypt_requests -= 1;
+	spin_unlock_irqrestore(&_crypt_lock, flags);
 
-	return r;
+	wake_up(&_crypt_waitq);
+	return 0;
 }
 
-static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
-			     struct dm_crypt_request *dmreq)
+static inline int dm_ocf_process(struct crypt_config *cc, struct scatterlist *out,
+		struct scatterlist *in, unsigned int len, u8 *iv, int iv_size, int write, void *priv)
 {
-	u8 *dst;
-	int r;
-
-	if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
-		return 0;
+	struct cryptop *crp;
+	struct cryptodesc *crda = NULL;
+	unsigned long flags;
+	unsigned int cr;
 
-	dst = kmap_atomic(sg_page(&dmreq->sg_out));
-	r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
+	if(!iv) {
+		printk("dm_ocf_process: only CBC mode is supported\n");
+		return -EPERM;
+	}
 
-	/* Tweak the first block of plaintext sector */
-	if (!r)
-		crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
+	crp = crypto_getreq(1);	 /* only encryption/decryption */
+	if (!crp) {
+		printk("dm_ocf_process: crypto_getreq failed!!\n");
+		return -ENOMEM;
+	}
 
-	kunmap_atomic(dst);
-	return r;
-}
+	crda = crp->crp_desc;
 
-static struct crypt_iv_operations crypt_iv_plain_ops = {
-	.generator = crypt_iv_plain_gen
-};
-
-static struct crypt_iv_operations crypt_iv_plain64_ops = {
-	.generator = crypt_iv_plain64_gen
-};
+	crda->crd_flags  = (write)? CRD_F_ENCRYPT: 0;
+	crda->crd_alg    = cc->cr_dm.cri_alg;
+	crda->crd_skip   = 0;
+	crda->crd_len    = len;
+	crda->crd_inject = 0; /* NA */
+	crda->crd_klen   = cc->cr_dm.cri_klen;
+	crda->crd_key    = cc->cr_dm.cri_key;
 
-static struct crypt_iv_operations crypt_iv_essiv_ops = {
-	.ctr       = crypt_iv_essiv_ctr,
-	.dtr       = crypt_iv_essiv_dtr,
-	.init      = crypt_iv_essiv_init,
-	.wipe      = crypt_iv_essiv_wipe,
-	.generator = crypt_iv_essiv_gen
-};
-
-static struct crypt_iv_operations crypt_iv_benbi_ops = {
-	.ctr	   = crypt_iv_benbi_ctr,
-	.dtr	   = crypt_iv_benbi_dtr,
-	.generator = crypt_iv_benbi_gen
-};
+	if (iv) {
+		crda->crd_flags |= (CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT);
+		if( iv_size > EALG_MAX_BLOCK_LEN ) {
+			printk("dm_ocf_process: iv is too big!!\n");
+		}
+		memcpy(&crda->crd_iv, iv, iv_size);
+	}
 
-static struct crypt_iv_operations crypt_iv_null_ops = {
-	.generator = crypt_iv_null_gen
-};
+	/* according to the current implementation the in and the out are the same buffer for read, and different for write*/
+	if (sg_virt(out) != sg_virt(in)) {
+		memcpy(sg_virt(out), sg_virt(in), len);
+		dmprintk("dm_ocf_process: copy buffers!! \n");
+	}
 
-static struct crypt_iv_operations crypt_iv_lmk_ops = {
-	.ctr	   = crypt_iv_lmk_ctr,
-	.dtr	   = crypt_iv_lmk_dtr,
-	.init	   = crypt_iv_lmk_init,
-	.wipe	   = crypt_iv_lmk_wipe,
-	.generator = crypt_iv_lmk_gen,
-	.post	   = crypt_iv_lmk_post
-};
+	dmprintk("len: %d\n",len);
+	crp->crp_ilen = len; /* Total input length */
+	crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_BATCH;
+	crp->crp_buf = sg_virt(out);
+	crp->crp_opaque = priv;
+	if (write) {
+       crp->crp_callback = dm_ocf_wr_cb;
+	}
+	else {
+		crp->crp_callback = dm_ocf_rd_cb;
+	}
+	crp->crp_sid = cc->ocf_cryptoid;
+
+	spin_lock_irqsave(&_crypt_lock, flags);
+	while (crypto_dispatch(crp) != 0) {
+		if (_crypt_requests == 0) {
+			spin_unlock_irqrestore(&_crypt_lock, flags);
+			schedule();
+			spin_lock_irqsave(&_crypt_lock, flags);
+		} else {
+			cr = _crypt_requests;
+			spin_unlock_irqrestore(&_crypt_lock, flags);
+			wait_event(_crypt_waitq, _crypt_requests < cr);
+			spin_lock_irqsave(&_crypt_lock, flags);
+		}
+	}
+	_crypt_requests += 1;
+	spin_unlock_irqrestore(&_crypt_lock, flags);
 
-static void crypt_convert_init(struct crypt_config *cc,
-			       struct convert_context *ctx,
-			       struct bio *bio_out, struct bio *bio_in,
-			       sector_t sector)
-{
-	ctx->bio_in = bio_in;
-	ctx->bio_out = bio_out;
-	ctx->offset_in = 0;
-	ctx->offset_out = 0;
-	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
-	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
-	ctx->cc_sector = sector + cc->iv_offset;
-	init_completion(&ctx->restart);
+	return 0;
 }
 
-static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
-					     struct ablkcipher_request *req)
+static inline int
+ocf_crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
+                          struct scatterlist *in, unsigned int length,
+                          int write, sector_t sector, void *priv)
 {
-	return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
-}
+	u8 iv[cc->iv_size];
+	int r;
+	if (cc->iv_gen_ops) {
+		r = cc->iv_gen_ops->generator(cc, iv, sector);
+		if (r < 0)
+			return r;
+		r = dm_ocf_process(cc, out, in, length, iv, cc->iv_size, write, priv);
+	} else {
+		r = dm_ocf_process(cc, out, in, length, NULL, 0, write, priv);
+	}
 
-static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
-					       struct dm_crypt_request *dmreq)
-{
-	return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+	return r;
 }
 
-static u8 *iv_of_dmreq(struct crypt_config *cc,
-		       struct dm_crypt_request *dmreq)
+/*
+ * Encrypt / decrypt data from one bio to another one (can be the same one)
+ */
+static int ocf_crypt_convert(struct crypt_config *cc,
+                         struct convert_context *ctx, struct crypt_io *io)
 {
-	return (u8 *)ALIGN((unsigned long)(dmreq + 1),
-		crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
-}
+	int r = 0;
+	long wr_timeout = 30000;
+	long wr_tm;
+	int num = 0;
+	void *priv = NULL;
+	struct ocf_wr_priv *ocf_wr_priv = NULL;
+	if (ctx->write) {
+		ocf_wr_priv = kmalloc(sizeof(struct ocf_wr_priv),GFP_KERNEL);
+		if(!ocf_wr_priv) {
+			printk("ocf_crypt_convert: out of memory \n");
+			return -ENOMEM;
+		}
+		ocf_wr_priv->dm_ocf_wr_pending = 0;
+		ocf_wr_priv->dm_ocf_wr_completed = 0;
+		init_waitqueue_head(&ocf_wr_priv->dm_ocf_wr_queue);
+		priv = ocf_wr_priv;
+	}
 
-static int crypt_convert_block(struct crypt_config *cc,
-			       struct convert_context *ctx,
-			       struct ablkcipher_request *req)
-{
-	struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
-	struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
-	struct dm_crypt_request *dmreq;
-	u8 *iv;
-	int r;
+	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
+	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
+		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+		struct scatterlist sg_in, sg_out;
+		sg_init_table(&sg_in, 1);
+		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
+				bv_in->bv_offset + ctx->offset_in);
+
+		sg_init_table(&sg_out, 1);
+		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
+				bv_out->bv_offset + ctx->offset_out);
+
+		ctx->offset_in += sg_in.length;
+		if (ctx->offset_in >= bv_in->bv_len) {
+			ctx->offset_in = 0;
+			ctx->idx_in++;
+		}
 
-	dmreq = dmreq_of_req(cc, req);
-	iv = iv_of_dmreq(cc, dmreq);
+		ctx->offset_out += sg_out.length;
+		if (ctx->offset_out >= bv_out->bv_len) {
+			ctx->offset_out = 0;
+			ctx->idx_out++;
+		}
 
-	dmreq->iv_sector = ctx->cc_sector;
-	dmreq->ctx = ctx;
-	sg_init_table(&dmreq->sg_in, 1);
-	sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
-		    bv_in->bv_offset + ctx->offset_in);
+		if(ctx->write) {
+			num++;
+		}
+		/* if last read in the context - send the io, so the OCF read callback will release the IO. */
+		else if(!(ctx->idx_in < ctx->bio_in->bi_vcnt && ctx->idx_out < ctx->bio_out->bi_vcnt)) {
+			priv = io;
+		}
 
-	sg_init_table(&dmreq->sg_out, 1);
-	sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
-		    bv_out->bv_offset + ctx->offset_out);
+		r = ocf_crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
+		                              ctx->write, ctx->sector, priv);
+		if (r < 0){
+			printk("ocf_crypt_convert: ocf_crypt_convert_scatterlist failed \n");
+			break;
+		}
 
-	ctx->offset_in += 1 << SECTOR_SHIFT;
-	if (ctx->offset_in >= bv_in->bv_len) {
-		ctx->offset_in = 0;
-		ctx->idx_in++;
+		ctx->sector++;
 	}
 
-	ctx->offset_out += 1 << SECTOR_SHIFT;
-	if (ctx->offset_out >= bv_out->bv_len) {
-		ctx->offset_out = 0;
-		ctx->idx_out++;
+	if (ctx->write) {
+		ocf_wr_priv->dm_ocf_wr_pending += num;
+		wr_tm = wait_event_timeout(ocf_wr_priv->dm_ocf_wr_queue,
+				(ocf_wr_priv->dm_ocf_wr_pending == ocf_wr_priv->dm_ocf_wr_completed)
+									, msecs_to_jiffies(wr_timeout) );
+		if (!wr_tm) {
+			printk("ocf_crypt_convert: wr work was not finished in %ld msecs, %d pending %d completed.\n",
+				wr_timeout, ocf_wr_priv->dm_ocf_wr_pending, ocf_wr_priv->dm_ocf_wr_completed);
+		}
+		kfree(ocf_wr_priv);
 	}
 
+	return r;
+}
+
+#else /* CONFIG_OCF_DM_CRYPT */
+
+static int
+crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
+                          struct scatterlist *in, unsigned int length,
+                          int write, sector_t sector)
+{
+	u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
+	struct blkcipher_desc desc = {
+		.tfm = cc->tfm,
+		.info = iv,
+		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
+	};
+	int r;
 	if (cc->iv_gen_ops) {
-		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
+		r = cc->iv_gen_ops->generator(cc, iv, sector);
 		if (r < 0)
 			return r;
-	}
 
-	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
-				     1 << SECTOR_SHIFT, iv);
-
-	if (bio_data_dir(ctx->bio_in) == WRITE)
-		r = crypto_ablkcipher_encrypt(req);
-	else
-		r = crypto_ablkcipher_decrypt(req);
-
-	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
-		r = cc->iv_gen_ops->post(cc, iv, dmreq);
+		if (write)
+			r = crypto_blkcipher_encrypt_iv(&desc, out, in, length);
+		else
+			r = crypto_blkcipher_decrypt_iv(&desc, out, in, length);
+	} else {
+		if (write)
+			r = crypto_blkcipher_encrypt(&desc, out, in, length);
+		else
+			r = crypto_blkcipher_decrypt(&desc, out, in, length);
+	}
 
 	return r;
 }
 
-static void kcryptd_async_done(struct crypto_async_request *async_req,
-			       int error);
+#endif
 
-static void crypt_alloc_req(struct crypt_config *cc,
-			    struct convert_context *ctx)
+static void
+crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx,
+                   struct bio *bio_out, struct bio *bio_in,
+                   sector_t sector, int write)
 {
-	struct crypt_cpu *this_cc = this_crypt_config(cc);
-	unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
-
-	if (!this_cc->req)
-		this_cc->req = mempool_alloc(cc->req_pool, GFP_NOIO);
-
-	ablkcipher_request_set_tfm(this_cc->req, cc->tfms[key_index]);
-	ablkcipher_request_set_callback(this_cc->req,
-	    CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
-	    kcryptd_async_done, dmreq_of_req(cc, this_cc->req));
+	ctx->bio_in = bio_in;
+	ctx->bio_out = bio_out;
+	ctx->offset_in = 0;
+	ctx->offset_out = 0;
+	ctx->idx_in = bio_in ? bio_in->bi_idx : 0;
+	ctx->idx_out = bio_out ? bio_out->bi_idx : 0;
+	ctx->sector = sector + cc->iv_offset;
+	ctx->write = write;
 }
 
+#if !defined(CONFIG_OCF_DM_CRYPT)
 /*
  * Encrypt / decrypt data from one bio to another one (can be the same one)
  */
 static int crypt_convert(struct crypt_config *cc,
-			 struct convert_context *ctx)
+                         struct convert_context *ctx)
 {
-	struct crypt_cpu *this_cc = this_crypt_config(cc);
-	int r;
-
-	atomic_set(&ctx->cc_pending, 1);
-
+	int r = 0;
 	while(ctx->idx_in < ctx->bio_in->bi_vcnt &&
 	      ctx->idx_out < ctx->bio_out->bi_vcnt) {
+		struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
+		struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
+		struct scatterlist sg_in, sg_out;
+		sg_init_table(&sg_in, 1);
+		sg_set_page(&sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
+				bv_in->bv_offset + ctx->offset_in);
+		sg_init_table(&sg_out, 1);
+		sg_set_page(&sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
+				bv_out->bv_offset + ctx->offset_out);
+
+		ctx->offset_in += sg_in.length;
+		if (ctx->offset_in >= bv_in->bv_len) {
+			ctx->offset_in = 0;
+			ctx->idx_in++;
+		}
 
-		crypt_alloc_req(cc, ctx);
-
-		atomic_inc(&ctx->cc_pending);
-
-		r = crypt_convert_block(cc, ctx, this_cc->req);
-
-		switch (r) {
-		/* async */
-		case -EBUSY:
-			wait_for_completion(&ctx->restart);
-			INIT_COMPLETION(ctx->restart);
-			/* fall through*/
-		case -EINPROGRESS:
-			this_cc->req = NULL;
-			ctx->cc_sector++;
-			continue;
+		ctx->offset_out += sg_out.length;
+		if (ctx->offset_out >= bv_out->bv_len) {
+			ctx->offset_out = 0;
+			ctx->idx_out++;
+		}
 
-		/* sync */
-		case 0:
-			atomic_dec(&ctx->cc_pending);
-			ctx->cc_sector++;
-			cond_resched();
-			continue;
+		r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length,
+		                              ctx->write, ctx->sector);
+		if (r < 0)
+			break;
 
-		/* error */
-		default:
-			atomic_dec(&ctx->cc_pending);
-			return r;
-		}
+		ctx->sector++;
 	}
 
-	return 0;
+	return r;
 }
+#endif
 
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages, indicated by
- * *out_of_pages set to 1.
+ * May return a smaller bio when running out of pages
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
-				      unsigned *out_of_pages)
+static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size)
 {
-	struct crypt_config *cc = io->cc;
+	struct crypt_config *cc = io->target->private;
 	struct bio *clone;
 	unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-	gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-	unsigned i, len;
-	struct page *page;
-
+	gfp_t gfp_mask = GFP_NOIO;
+	unsigned int i;
 	clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
 	if (!clone)
 		return NULL;
 
 	clone_init(io, clone);
-	*out_of_pages = 0;
 
 	for (i = 0; i < nr_iovecs; i++) {
-		page = mempool_alloc(cc->page_pool, gfp_mask);
-		if (!page) {
-			*out_of_pages = 1;
+		struct bio_vec *bv = bio_iovec_idx(clone, i);
+
+		bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask);
+		if (!bv->bv_page)
 			break;
-		}
 
 		/*
-		 * If additional pages cannot be allocated without waiting,
-		 * return a partially-allocated bio.  The caller will then try
-		 * to allocate more bios while submitting this partial bio.
+		 * if additional pages cannot be allocated without waiting,
+		 * return a partially allocated bio, the caller will then try
+		 * to allocate additional bios while submitting this partial bio
 		 */
-		gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
-
-		len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+		if (i == (MIN_BIO_PAGES - 1))
+			gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
 
-		if (!bio_add_page(clone, page, len, 0)) {
-			mempool_free(page, cc->page_pool);
-			break;
-		}
+		bv->bv_offset = 0;
+		if (size > PAGE_SIZE)
+			bv->bv_len = PAGE_SIZE;
+		else
+			bv->bv_len = size;
 
-		size -= len;
+		clone->bi_size += bv->bv_len;
+		clone->bi_vcnt++;
+		size -= bv->bv_len;
 	}
 
 	if (!clone->bi_size) {
@@ -853,113 +708,140 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
 	return clone;
 }
 
-static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
+static void crypt_free_buffer_pages(struct crypt_config *cc,
+                                    struct bio *clone)
 {
 	unsigned int i;
 	struct bio_vec *bv;
-
-	bio_for_each_segment_all(bv, clone, i) {
+	for (i = 0; i < clone->bi_vcnt; i++) {
+		bv = bio_iovec_idx(clone, i);
 		BUG_ON(!bv->bv_page);
 		mempool_free(bv->bv_page, cc->page_pool);
 		bv->bv_page = NULL;
 	}
 }
 
-static struct dm_crypt_io *crypt_io_alloc(struct crypt_config *cc,
-					  struct bio *bio, sector_t sector)
-{
-	struct dm_crypt_io *io;
-
-	io = mempool_alloc(cc->io_pool, GFP_NOIO);
-	io->cc = cc;
-	io->base_bio = bio;
-	io->sector = sector;
-	io->error = 0;
-	io->base_io = NULL;
-	atomic_set(&io->io_pending, 0);
-
-	return io;
-}
-
-static void crypt_inc_pending(struct dm_crypt_io *io)
-{
-	atomic_inc(&io->io_pending);
-}
-
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
- * If base_io is set, wait for the last fragment to complete.
  */
-static void crypt_dec_pending(struct dm_crypt_io *io)
+static void dec_pending(struct crypt_io *io, int error)
 {
-	struct crypt_config *cc = io->cc;
-	struct bio *base_bio = io->base_bio;
-	struct dm_crypt_io *base_io = io->base_io;
-	int error = io->error;
+	struct crypt_config *cc = (struct crypt_config *) io->target->private;
+	struct bio_vec *tovec, *fromvec;
+	struct bio *bio = io->base_bio;
+#ifdef CONFIG_HIGHMEM
+	struct bio *origbio;
+	unsigned long flags;
+	char *vfrom, *vto;
+	unsigned int i;
+#endif /* CONFIG_HIGHMEM */
+
+	if (error < 0)
+		io->error = error;
 
-	if (!atomic_dec_and_test(&io->io_pending))
+	if (!atomic_dec_and_test(&io->pending))
 		return;
 
-	mempool_free(io, cc->io_pool);
+#ifdef CONFIG_HIGHMEM
+	if (bio_flagged(bio, BIO_BOUNCED)) {
+		origbio = bio->bi_private;
+
+		/* We have bounced bio, so copy data back if it is necessary */
+		if (bio_data_dir(bio) == READ) {
+			__bio_for_each_segment(tovec, origbio, i, 0) {
+				fromvec = bio->bi_io_vec + i;
+
+				/* Page not bounced */
+				if (tovec->bv_page == fromvec->bv_page)
+					continue;
+
+				/*
+				 * Page bounced - we have to copy data.
+				 * We are using tovec->bv_offset and
+				 * tovec->bv_len as originals might
+				 * have been modified.
+				 */
+				vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
+				local_irq_save(flags);
+
+				vto = kmap_atomic(tovec->bv_page);
+				memcpy(vto + tovec->bv_offset, vfrom, tovec->bv_len);
+				kunmap_atomic(vto);
+
+				local_irq_restore(flags);
+			}
+		}
 
-	if (likely(!base_io))
-		bio_endio(base_bio, error);
-	else {
-		if (error && !base_io->error)
-			base_io->error = error;
-		crypt_dec_pending(base_io);
+		/* Free bounced pages */
+		__bio_for_each_segment(fromvec, bio, i, 0) {
+			tovec = origbio->bi_io_vec + i;
+
+			/* Page not bounced */
+			if (tovec->bv_page == fromvec->bv_page)
+				continue;
+
+			/* Page bounced: free it! */
+			mempool_free(fromvec->bv_page, cc->page_pool);
+		}
+
+		/* Release our bounced bio */
+		bio_put(bio);
+		bio = origbio;
 	}
+#endif /* CONFIG_HIGHMEM */
+
+	bio_endio(bio, io->error);
+	mempool_free(io, cc->io_pool);
 }
 
 /*
- * kcryptd/kcryptd_io:
+ * kcryptd:
  *
  * Needed because it would be very unwise to do decryption in an
  * interrupt context.
- *
- * kcryptd performs the actual encryption or decryption.
- *
- * kcryptd_io performs the IO submission.
- *
- * They must be separated as otherwise the final stages could be
- * starved by new requests which can block in the first stages due
- * to memory allocation.
- *
- * The work is done per CPU global for all dm-crypt instances.
- * They should not depend on each other and do not block.
  */
-static void crypt_endio(struct bio *clone, int error)
-{
-	struct dm_crypt_io *io = clone->bi_private;
-	struct crypt_config *cc = io->cc;
-	unsigned rw = bio_data_dir(clone);
+static struct workqueue_struct *_kcryptd_workqueue;
+static void kcryptd_do_work(struct work_struct *work);
 
-	if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error))
-		error = -EIO;
+static void kcryptd_queue_io(struct crypt_io *io)
+{
+	INIT_WORK(&io->work, kcryptd_do_work);
+	queue_work(_kcryptd_workqueue, &io->work);
+}
 
+static void crypt_endio(struct bio *clone, int error)
+{
+	struct crypt_io *io = clone->bi_private;
+	struct crypt_config *cc = io->target->private;
+	unsigned read_io = bio_data_dir(clone) == READ;
 	/*
-	 * free the processed pages
+	 * free the processed pages, even if
+	 * it's only a partially completed write
 	 */
-	if (rw == WRITE)
+	if (!read_io)
 		crypt_free_buffer_pages(cc, clone);
 
-	bio_put(clone);
-
-	if (rw == READ && !error) {
-		kcryptd_queue_crypt(io);
-		return;
+	if (!read_io) {
+		goto out;
 	}
+	if (unlikely(!bio_flagged(clone, BIO_UPTODATE))) {
+		error = -EIO;
+		goto out;
+	}
+	bio_put(clone);
+	io->post_process = 1;
+	kcryptd_queue_io(io);
+	return;
 
-	if (unlikely(error))
-		io->error = error;
-
-	crypt_dec_pending(io);
+out:
+	bio_put(clone);
+	dec_pending(io, error);
 }
 
-static void clone_init(struct dm_crypt_io *io, struct bio *clone)
+static void clone_init(struct crypt_io *io, struct bio *clone)
 {
-	struct crypt_config *cc = io->cc;
+	struct crypt_config *cc = io->target->private;
 
 	clone->bi_private = io;
 	clone->bi_end_io  = crypt_endio;
@@ -967,246 +849,136 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
 	clone->bi_rw      = io->base_bio->bi_rw;
 }
 
-static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
+static void process_read(struct crypt_io *io)
 {
-	struct crypt_config *cc = io->cc;
+	struct crypt_config *cc = io->target->private;
 	struct bio *base_bio = io->base_bio;
 	struct bio *clone;
+	sector_t sector = base_bio->bi_sector - io->target->begin;
+	atomic_inc(&io->pending);
 
 	/*
 	 * The block layer might modify the bvec array, so always
 	 * copy the required bvecs because we need the original
 	 * one in order to decrypt the whole bio data *afterwards*.
 	 */
-	clone = bio_clone_bioset(base_bio, gfp, cc->bs);
-	if (!clone)
-		return 1;
-
-	crypt_inc_pending(io);
-
-	clone_init(io, clone);
-	clone->bi_sector = cc->start + io->sector;
-
-	generic_make_request(clone);
-	return 0;
-}
-
-static void kcryptd_io_write(struct dm_crypt_io *io)
-{
-	struct bio *clone = io->ctx.bio_out;
-	generic_make_request(clone);
-}
-
-static void kcryptd_io(struct work_struct *work)
-{
-	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-
-	if (bio_data_dir(io->base_bio) == READ) {
-		crypt_inc_pending(io);
-		if (kcryptd_io_read(io, GFP_NOIO))
-			io->error = -ENOMEM;
-		crypt_dec_pending(io);
-	} else
-		kcryptd_io_write(io);
-}
-
-static void kcryptd_queue_io(struct dm_crypt_io *io)
-{
-	struct crypt_config *cc = io->cc;
-
-	INIT_WORK(&io->work, kcryptd_io);
-	queue_work(cc->io_queue, &io->work);
-}
-
-static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
-{
-	struct bio *clone = io->ctx.bio_out;
-	struct crypt_config *cc = io->cc;
-
-	if (unlikely(io->error < 0)) {
-		crypt_free_buffer_pages(cc, clone);
-		bio_put(clone);
-		crypt_dec_pending(io);
+	clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs);
+	if (unlikely(!clone)) {
+		dec_pending(io, -ENOMEM);
 		return;
 	}
 
-	/* crypt_convert should have filled the clone bio */
-	BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
-
-	clone->bi_sector = cc->start + io->sector;
+	clone_init(io, clone);
+	clone->bi_idx = 0;
+	clone->bi_vcnt = bio_segments(base_bio);
+	clone->bi_size = base_bio->bi_size;
+	clone->bi_sector = cc->start + sector;
+	memcpy(clone->bi_io_vec, bio_iovec(base_bio),
+	       sizeof(struct bio_vec) * clone->bi_vcnt);
 
-	if (async)
-		kcryptd_queue_io(io);
-	else
-		generic_make_request(clone);
+	generic_make_request(clone);
 }
 
-static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
+static void process_write(struct crypt_io *io)
 {
-	struct crypt_config *cc = io->cc;
+	struct crypt_config *cc = io->target->private;
+	struct bio *base_bio = io->base_bio;
 	struct bio *clone;
-	struct dm_crypt_io *new_io;
-	int crypt_finished;
-	unsigned out_of_pages = 0;
-	unsigned remaining = io->base_bio->bi_size;
-	sector_t sector = io->sector;
-	int r;
+	struct convert_context ctx;
+	unsigned remaining = base_bio->bi_size;
+	sector_t sector = base_bio->bi_sector - io->target->begin;
+	atomic_inc(&io->pending);
 
-	/*
-	 * Prevent io from disappearing until this function completes.
-	 */
-	crypt_inc_pending(io);
-	crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
+	crypt_convert_init(cc, &ctx, NULL, base_bio, sector, 1);
 
 	/*
 	 * The allocated buffers can be smaller than the whole bio,
 	 * so repeat the whole process until all the data can be handled.
 	 */
 	while (remaining) {
-		clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
+		clone = crypt_alloc_buffer(io, remaining);
 		if (unlikely(!clone)) {
-			io->error = -ENOMEM;
-			break;
+			dec_pending(io, -ENOMEM);
+			return;
+		}
+
+		ctx.bio_out = clone;
+		ctx.idx_out = 0;
+#if defined(CONFIG_OCF_DM_CRYPT)
+	if (unlikely(ocf_crypt_convert(cc, &ctx, io)< 0)) {
+#else
+		if (unlikely(crypt_convert(cc, &ctx) < 0)) {
+#endif
+			crypt_free_buffer_pages(cc, clone);
+			bio_put(clone);
+			dec_pending(io, -EIO);
+			return;
 		}
 
-		io->ctx.bio_out = clone;
-		io->ctx.idx_out = 0;
+		/* crypt_convert should have filled the clone bio */
+		BUG_ON(ctx.idx_out < clone->bi_vcnt);
 
+		clone->bi_sector = cc->start + sector;
 		remaining -= clone->bi_size;
 		sector += bio_sectors(clone);
 
-		crypt_inc_pending(io);
-
-		r = crypt_convert(cc, &io->ctx);
-		if (r < 0)
-			io->error = -EIO;
-
-		crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
-
-		/* Encryption was already finished, submit io now */
-		if (crypt_finished) {
-			kcryptd_crypt_write_io_submit(io, 0);
-
-			/*
-			 * If there was an error, do not try next fragments.
-			 * For async, error is processed in async handler.
-			 */
-			if (unlikely(r < 0))
-				break;
-
-			io->sector = sector;
-		}
+		/* Grab another reference to the io struct
+		 * before we kick off the request */
+		if (remaining)
+			atomic_inc(&io->pending);
 
-		/*
-		 * Out of memory -> run queues
-		 * But don't wait if split was due to the io size restriction
-		 */
-		if (unlikely(out_of_pages))
-			congestion_wait(BLK_RW_ASYNC, HZ/100);
+		generic_make_request(clone);
 
-		/*
-		 * With async crypto it is unsafe to share the crypto context
-		 * between fragments, so switch to a new dm_crypt_io structure.
-		 */
-		if (unlikely(!crypt_finished && remaining)) {
-			new_io = crypt_io_alloc(io->cc, io->base_bio,
-						sector);
-			crypt_inc_pending(new_io);
-			crypt_convert_init(cc, &new_io->ctx, NULL,
-					   io->base_bio, sector);
-			new_io->ctx.idx_in = io->ctx.idx_in;
-			new_io->ctx.offset_in = io->ctx.offset_in;
-
-			/*
-			 * Fragments after the first use the base_io
-			 * pending count.
-			 */
-			if (!io->base_io)
-				new_io->base_io = io;
-			else {
-				new_io->base_io = io->base_io;
-				crypt_inc_pending(io->base_io);
-				crypt_dec_pending(io);
-			}
+		/* Do not reference clone after this - it
+		 * may be gone already. */
 
-			io = new_io;
-		}
+		/* out of memory -> run queues */
+		if (remaining)
+			congestion_wait(WRITE, HZ/100);
 	}
-
-	crypt_dec_pending(io);
 }
 
-static void kcryptd_crypt_read_done(struct dm_crypt_io *io)
+static void process_read_endio(struct crypt_io *io)
 {
-	crypt_dec_pending(io);
-}
-
-static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
-{
-	struct crypt_config *cc = io->cc;
-	int r = 0;
-
-	crypt_inc_pending(io);
+	struct crypt_config *cc = io->target->private;
+	struct convert_context ctx;
+#if defined(CONFIG_OCF_DM_CRYPT)
+	u32 r;
+#endif
 
-	crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio,
-			   io->sector);
+	crypt_convert_init(cc, &ctx, io->base_bio, io->base_bio,
+			   io->base_bio->bi_sector - io->target->begin, 0);
 
-	r = crypt_convert(cc, &io->ctx);
-	if (r < 0)
-		io->error = -EIO;
+#if defined(CONFIG_OCF_DM_CRYPT)
+	r = ocf_crypt_convert(cc, &ctx, io);
+	if (r < 0) {
+		u32 rd_failed_timeout = 500;
+		wait_queue_head_t dm_ocf_rd_failed_queu;
 
-	if (atomic_dec_and_test(&io->ctx.cc_pending))
-		kcryptd_crypt_read_done(io);
+		init_waitqueue_head(&dm_ocf_rd_failed_queu);
 
-	crypt_dec_pending(io);
-}
+		/* wait a bit before freeing the io, maybe few requests are still in process */
+		wait_event_timeout(dm_ocf_rd_failed_queu, 0, msecs_to_jiffies(rd_failed_timeout) );
 
-static void kcryptd_async_done(struct crypto_async_request *async_req,
-			       int error)
-{
-	struct dm_crypt_request *dmreq = async_req->data;
-	struct convert_context *ctx = dmreq->ctx;
-	struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
-	struct crypt_config *cc = io->cc;
+		dec_pending(io, r);
 
-	if (error == -EINPROGRESS) {
-		complete(&ctx->restart);
-		return;
 	}
-
-	if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
-		error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
-
-	if (error < 0)
-		io->error = -EIO;
-
-	mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool);
-
-	if (!atomic_dec_and_test(&ctx->cc_pending))
-		return;
-
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_crypt_read_done(io);
-	else
-		kcryptd_crypt_write_io_submit(io, 1);
+#else
+	dec_pending(io, crypt_convert(cc, &ctx));
+#endif
 }
 
-static void kcryptd_crypt(struct work_struct *work)
+static void kcryptd_do_work(struct work_struct *work)
 {
-	struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
-
-	if (bio_data_dir(io->base_bio) == READ)
-		kcryptd_crypt_read_convert(io);
+	struct crypt_io *io = container_of(work, struct crypt_io, work);
+	if (io->post_process) {
+		process_read_endio(io);
+	}
+	else if (bio_data_dir(io->base_bio) == READ) {
+		process_read(io);
+	}
 	else
-		kcryptd_crypt_write_convert(io);
-}
-
-static void kcryptd_queue_crypt(struct dm_crypt_io *io)
-{
-	struct crypt_config *cc = io->cc;
-
-	INIT_WORK(&io->work, kcryptd_crypt);
-	queue_work(cc->crypt_queue, &io->work);
+		process_write(io);
 }
 
 /*
@@ -1215,15 +987,17 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
 static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
 {
 	char buffer[3];
+	char *endp;
 	unsigned int i;
-
 	buffer[2] = '\0';
 
 	for (i = 0; i < size; i++) {
 		buffer[0] = *hex++;
 		buffer[1] = *hex++;
 
-		if (kstrtou8(buffer, 16, &key[i]))
+		key[i] = (u8)simple_strtoul(buffer, &endp, 16);
+
+		if (endp != &buffer[2])
 			return -EINVAL;
 	}
 
@@ -1233,471 +1007,370 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
 	return 0;
 }
 
-static void crypt_free_tfms(struct crypt_config *cc)
-{
-	unsigned i;
-
-	if (!cc->tfms)
-		return;
-
-	for (i = 0; i < cc->tfms_count; i++)
-		if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
-			crypto_free_ablkcipher(cc->tfms[i]);
-			cc->tfms[i] = NULL;
-		}
-
-	kfree(cc->tfms);
-	cc->tfms = NULL;
-}
-
-static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
-{
-	unsigned i;
-	int err;
-
-	cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
-			   GFP_KERNEL);
-	if (!cc->tfms)
-		return -ENOMEM;
-
-	for (i = 0; i < cc->tfms_count; i++) {
-		cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
-		if (IS_ERR(cc->tfms[i])) {
-			err = PTR_ERR(cc->tfms[i]);
-			crypt_free_tfms(cc);
-			return err;
-		}
-	}
-
-	return 0;
-}
-
-static int crypt_setkey_allcpus(struct crypt_config *cc)
-{
-	unsigned subkey_size = cc->key_size >> ilog2(cc->tfms_count);
-	int err = 0, i, r;
-
-	for (i = 0; i < cc->tfms_count; i++) {
-		r = crypto_ablkcipher_setkey(cc->tfms[i],
-					     cc->key + (i * subkey_size),
-					     subkey_size);
-		if (r)
-			err = r;
-	}
-
-	return err;
-}
-
 static int crypt_set_key(struct crypt_config *cc, char *key)
 {
-	int r = -EINVAL;
-	int key_string_len = strlen(key);
-
-	/* The key size may not be changed. */
-	if (cc->key_size != (key_string_len >> 1))
-		goto out;
+	unsigned key_size = strlen(key) >> 1;
+	if (cc->key_size && cc->key_size != key_size)
+		return -EINVAL;
 
-	/* Hyphen (which gives a key_size of zero) means there is no key. */
-	if (!cc->key_size && strcmp(key, "-"))
-		goto out;
+	cc->key_size = key_size; /* initial settings */
 
-	if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
-		goto out;
+	if ((!key_size && strcmp(key, "-")) ||
+	    (key_size && crypt_decode_key(cc->key, key, key_size) < 0))
+		return -EINVAL;
 
 	set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 
-	r = crypt_setkey_allcpus(cc);
-
-out:
-	/* Hex key string not needed after here, so wipe it. */
-	memset(key, '0', key_string_len);
-
-	return r;
+	return 0;
 }
 
 static int crypt_wipe_key(struct crypt_config *cc)
 {
 	clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
 	memset(&cc->key, 0, cc->key_size * sizeof(u8));
-
-	return crypt_setkey_allcpus(cc);
-}
-
-static void crypt_dtr(struct dm_target *ti)
-{
-	struct crypt_config *cc = ti->private;
-	struct crypt_cpu *cpu_cc;
-	int cpu;
-
-	ti->private = NULL;
-
-	if (!cc)
-		return;
-
-	if (cc->io_queue)
-		destroy_workqueue(cc->io_queue);
-	if (cc->crypt_queue)
-		destroy_workqueue(cc->crypt_queue);
-
-	if (cc->cpu)
-		for_each_possible_cpu(cpu) {
-			cpu_cc = per_cpu_ptr(cc->cpu, cpu);
-			if (cpu_cc->req)
-				mempool_free(cpu_cc->req, cc->req_pool);
-		}
-
-	crypt_free_tfms(cc);
-
-	if (cc->bs)
-		bioset_free(cc->bs);
-
-	if (cc->page_pool)
-		mempool_destroy(cc->page_pool);
-	if (cc->req_pool)
-		mempool_destroy(cc->req_pool);
-	if (cc->io_pool)
-		mempool_destroy(cc->io_pool);
-
-	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
-		cc->iv_gen_ops->dtr(cc);
-
-	if (cc->dev)
-		dm_put_device(ti, cc->dev);
-
-	if (cc->cpu)
-		free_percpu(cc->cpu);
-
-	kzfree(cc->cipher);
-	kzfree(cc->cipher_string);
-
-	/* Must zero key material before freeing */
-	kzfree(cc);
+	return 0;
 }
 
-static int crypt_ctr_cipher(struct dm_target *ti,
-			    char *cipher_in, char *key)
+/*
+ * Construct an encryption mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ */
+static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
-	struct crypt_config *cc = ti->private;
-	char *tmp, *cipher, *chainmode, *ivmode, *ivopts, *keycount;
-	char *cipher_api = NULL;
-	int ret = -EINVAL;
-	char dummy;
-
-	/* Convert to crypto api definition? */
-	if (strchr(cipher_in, '(')) {
-		ti->error = "Bad cipher specification";
-		return -EINVAL;
-	}
-
-	cc->cipher_string = kstrdup(cipher_in, GFP_KERNEL);
-	if (!cc->cipher_string)
-		goto bad_mem;
-
-	/*
-	 * Legacy dm-crypt cipher specification
-	 * cipher[:keycount]-mode-iv:ivopts
-	 */
-	tmp = cipher_in;
-	keycount = strsep(&tmp, "-");
-	cipher = strsep(&keycount, ":");
-
-	if (!keycount)
-		cc->tfms_count = 1;
-	else if (sscanf(keycount, "%u%c", &cc->tfms_count, &dummy) != 1 ||
-		 !is_power_of_2(cc->tfms_count)) {
-		ti->error = "Bad cipher key count specification";
+	struct crypt_config *cc;
+#ifndef CONFIG_OCF_DM_CRYPT
+	struct crypto_blkcipher *tfm;
+#endif
+	char *tmp;
+	char *cipher;
+	char *chainmode;
+	char *ivmode;
+	char *ivopts;
+	unsigned int key_size;
+	unsigned long long tmpll;
+	if (argc != 5) {
+		ti->error = "Not enough arguments";
 		return -EINVAL;
 	}
-	cc->key_parts = cc->tfms_count;
-
-	cc->cipher = kstrdup(cipher, GFP_KERNEL);
-	if (!cc->cipher)
-		goto bad_mem;
 
+	tmp = argv[0];
+	cipher = strsep(&tmp, "-");
 	chainmode = strsep(&tmp, "-");
 	ivopts = strsep(&tmp, "-");
 	ivmode = strsep(&ivopts, ":");
 
 	if (tmp)
-		DMWARN("Ignoring unexpected additional cipher options");
+		DMWARN("Unexpected additional cipher options");
 
-	cc->cpu = __alloc_percpu(sizeof(*(cc->cpu)),
-				 __alignof__(struct crypt_cpu));
-	if (!cc->cpu) {
-		ti->error = "Cannot allocate per cpu state";
-		goto bad_mem;
+	key_size = strlen(argv[1]) >> 1;
+
+	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
+	if (cc == NULL) {
+		ti->error =
+			"Cannot allocate transparent encryption context";
+		return -ENOMEM;
 	}
 
-	/*
-	 * For compatibility with the original dm-crypt mapping format, if
-	 * only the cipher name is supplied, use cbc-plain.
-	 */
-	if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) {
+	if (crypt_set_key(cc, argv[1])) {
+		ti->error = "Error decoding key";
+		goto bad1;
+	}
+
+	/* Compatiblity mode for old dm-crypt cipher strings */
+	if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) {
 		chainmode = "cbc";
 		ivmode = "plain";
 	}
 
 	if (strcmp(chainmode, "ecb") && !ivmode) {
-		ti->error = "IV mechanism required";
-		return -EINVAL;
+		ti->error = "This chaining mode requires an IV mechanism";
+		goto bad1;
 	}
 
-	cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL);
-	if (!cipher_api)
-		goto bad_mem;
-
-	ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME,
-		       "%s(%s)", chainmode, cipher);
-	if (ret < 0) {
-		kfree(cipher_api);
-		goto bad_mem;
+	if (snprintf(cc->cipher, CRYPTO_MAX_ALG_NAME, "%s(%s)", chainmode,
+		     cipher) >= CRYPTO_MAX_ALG_NAME) {
+		ti->error = "Chain mode + cipher name is too long";
+		goto bad1;
 	}
 
-	/* Allocate cipher */
-	ret = crypt_alloc_tfms(cc, cipher_api);
-	if (ret < 0) {
-		ti->error = "Error allocating crypto tfm";
-		goto bad;
-	}
+#if defined(CONFIG_OCF_DM_CRYPT)
+	/* prepare a new OCF session */
+        memset(&cc->cr_dm, 0, sizeof(struct cryptoini));
 
-	/* Initialize and set key */
-	ret = crypt_set_key(cc, key);
-	if (ret < 0) {
-		ti->error = "Error decoding and setting key";
-		goto bad;
+	if((strcmp(cipher,"aes") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_AES_CBC;
+	else if((strcmp(cipher,"des") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_DES_CBC;
+	else if((strcmp(cipher,"des3_ede") == 0) && (strcmp(chainmode, "cbc") == 0))
+		cc->cr_dm.cri_alg  = CRYPTO_3DES_CBC;
+	else {
+		ti->error = DM_MSG_PREFIX "using OCF: unknown cipher or bad chain mode";
+		goto bad1;
 	}
 
-	/* Initialize IV */
-	cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
-	if (cc->iv_size)
-		/* at least a 64 bit sector number should fit in our buffer */
-		cc->iv_size = max(cc->iv_size,
-				  (unsigned int)(sizeof(u64) / sizeof(u8)));
-	else if (ivmode) {
-		DMWARN("Selected cipher does not support IVs");
-		ivmode = NULL;
+	/*strcpy(cc->cipher, cipher);*/
+	dmprintk("key size is %d\n",cc->key_size);
+        cc->cr_dm.cri_klen = cc->key_size*8;
+        cc->cr_dm.cri_key  = cc->key;
+        cc->cr_dm.cri_next = NULL;
+
+        if(crypto_newsession(&cc->ocf_cryptoid, &cc->cr_dm, 0)){
+		dmprintk("crypt_ctr: crypto_newsession failed\n");
+                ti->error = DM_MSG_PREFIX "crypto_newsession failed";
+                goto bad2;
+        }
+
+#else
+
+	tfm = crypto_alloc_blkcipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
+	if (IS_ERR(tfm)) {
+		ti->error = "Error allocating crypto tfm";
+		goto bad1;
 	}
+#endif
+	strcpy(cc->cipher, cipher);
+	strcpy(cc->chainmode, chainmode);
+#if !defined(CONFIG_OCF_DM_CRYPT)
+	cc->tfm = tfm;
+#endif
+
+	/*
+	 * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
+	 * See comments at iv code
+	 */
 
-	/* Choose ivmode, see comments at iv code. */
 	if (ivmode == NULL)
 		cc->iv_gen_ops = NULL;
 	else if (strcmp(ivmode, "plain") == 0)
 		cc->iv_gen_ops = &crypt_iv_plain_ops;
-	else if (strcmp(ivmode, "plain64") == 0)
-		cc->iv_gen_ops = &crypt_iv_plain64_ops;
 	else if (strcmp(ivmode, "essiv") == 0)
 		cc->iv_gen_ops = &crypt_iv_essiv_ops;
+#if !defined(CONFIG_OCF_DM_CRYPT)
 	else if (strcmp(ivmode, "benbi") == 0)
 		cc->iv_gen_ops = &crypt_iv_benbi_ops;
+#endif
 	else if (strcmp(ivmode, "null") == 0)
 		cc->iv_gen_ops = &crypt_iv_null_ops;
-	else if (strcmp(ivmode, "lmk") == 0) {
-		cc->iv_gen_ops = &crypt_iv_lmk_ops;
-		/* Version 2 and 3 is recognised according
-		 * to length of provided multi-key string.
-		 * If present (version 3), last key is used as IV seed.
-		 */
-		if (cc->key_size % cc->key_parts)
-			cc->key_parts++;
-	} else {
-		ret = -EINVAL;
+	else {
 		ti->error = "Invalid IV mode";
-		goto bad;
-	}
-
-	/* Allocate IV */
-	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) {
-		ret = cc->iv_gen_ops->ctr(cc, ti, ivopts);
-		if (ret < 0) {
-			ti->error = "Error creating IV";
-			goto bad;
-		}
+		goto bad2;
 	}
 
-	/* Initialize IV (set keys for ESSIV etc) */
-	if (cc->iv_gen_ops && cc->iv_gen_ops->init) {
-		ret = cc->iv_gen_ops->init(cc);
-		if (ret < 0) {
-			ti->error = "Error initialising IV";
-			goto bad;
-		}
+#if defined(CONFIG_OCF_DM_CRYPT)
+	switch (cc->cr_dm.cri_alg) {
+		case CRYPTO_AES_CBC:
+			cc->iv_size = 16;
+			break;
+		default:
+			cc->iv_size = 8;
+			break;
 	}
 
-	ret = 0;
-bad:
-	kfree(cipher_api);
-	return ret;
-
-bad_mem:
-	ti->error = "Cannot allocate cipher strings";
-	return -ENOMEM;
-}
-
-/*
- * Construct an encryption mapping:
- * <cipher> <key> <iv_offset> <dev_path> <start>
- */
-static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
-{
-	struct crypt_config *cc;
-	unsigned int key_size, opt_params;
-	unsigned long long tmpll;
-	int ret;
-	struct dm_arg_set as;
-	const char *opt_string;
-	char dummy;
-
-	static struct dm_arg _args[] = {
-		{0, 1, "Invalid number of feature args"},
-	};
-
-	if (argc < 5) {
-		ti->error = "Not enough arguments";
-		return -EINVAL;
-	}
+	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
+	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
+		goto bad2;
+#else
 
-	key_size = strlen(argv[1]) >> 1;
+	if (cc->iv_gen_ops && cc->iv_gen_ops->ctr &&
+	    cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0)
+		goto bad2;
 
-	cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL);
-	if (!cc) {
-		ti->error = "Cannot allocate encryption context";
-		return -ENOMEM;
+	cc->iv_size = crypto_blkcipher_ivsize(tfm);
+	if (cc->iv_size)
+		/* at least a 64 bit sector number should fit in our buffer */
+		cc->iv_size = max(cc->iv_size,
+		                  (unsigned int)(sizeof(u64) / sizeof(u8)));
+	else {
+		if (cc->iv_gen_ops) {
+			DMWARN("Selected cipher does not support IVs");
+			if (cc->iv_gen_ops->dtr)
+				cc->iv_gen_ops->dtr(cc);
+			cc->iv_gen_ops = NULL;
+		}
 	}
-	cc->key_size = key_size;
-
-	ti->private = cc;
-	ret = crypt_ctr_cipher(ti, argv[0], argv[1]);
-	if (ret < 0)
-		goto bad;
-
-	ret = -ENOMEM;
+#endif
 	cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
 	if (!cc->io_pool) {
 		ti->error = "Cannot allocate crypt io mempool";
-		goto bad;
-	}
-
-	cc->dmreq_start = sizeof(struct ablkcipher_request);
-	cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
-	cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment());
-	cc->dmreq_start += crypto_ablkcipher_alignmask(any_tfm(cc)) &
-			   ~(crypto_tfm_ctx_alignment() - 1);
-
-	cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
-			sizeof(struct dm_crypt_request) + cc->iv_size);
-	if (!cc->req_pool) {
-		ti->error = "Cannot allocate crypt request mempool";
-		goto bad;
+		goto bad3;
 	}
 
 	cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
 	if (!cc->page_pool) {
 		ti->error = "Cannot allocate page mempool";
-		goto bad;
+		goto bad4;
 	}
 
-	cc->bs = bioset_create(MIN_IOS, 0);
+	cc->bs = bioset_create(MIN_IOS, MIN_IOS);
 	if (!cc->bs) {
 		ti->error = "Cannot allocate crypt bioset";
-		goto bad;
+		goto bad_bs;
+	}
+#if !defined(CONFIG_OCF_DM_CRYPT)
+	if (crypto_blkcipher_setkey(tfm, cc->key, key_size) < 0) {
+		ti->error = "Error setting key";
+		goto bad5;
 	}
+#endif
 
-	ret = -EINVAL;
-	if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[2], "%llu", &tmpll) != 1) {
 		ti->error = "Invalid iv_offset sector";
-		goto bad;
+		goto bad5;
 	}
 	cc->iv_offset = tmpll;
 
-	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
-		ti->error = "Device lookup failed";
-		goto bad;
-	}
-
-	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) {
+	if (sscanf(argv[4], "%llu", &tmpll) != 1) {
 		ti->error = "Invalid device sector";
-		goto bad;
+		goto bad5;
 	}
 	cc->start = tmpll;
 
-	argv += 5;
-	argc -= 5;
-
-	/* Optional parameters */
-	if (argc) {
-		as.argc = argc;
-		as.argv = argv;
-
-		ret = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
-		if (ret)
-			goto bad;
-
-		opt_string = dm_shift_arg(&as);
+	if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+								&cc->dev)) {
+		ti->error = "Device lookup failed";
+		goto bad5;
+	}
 
-		if (opt_params == 1 && opt_string &&
-		    !strcasecmp(opt_string, "allow_discards"))
-			ti->num_discard_bios = 1;
-		else if (opt_params) {
-			ret = -EINVAL;
-			ti->error = "Invalid feature arguments";
-			goto bad;
+	if (ivmode && cc->iv_gen_ops) {
+		if (ivopts)
+			*(ivopts - 1) = ':';
+		cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL);
+		if (!cc->iv_mode) {
+			ti->error = "Error kmallocing iv_mode string";
+			goto bad5;
 		}
-	}
+		strcpy(cc->iv_mode, ivmode);
+	} else
+		cc->iv_mode = NULL;
 
-	ret = -ENOMEM;
-	cc->io_queue = alloc_workqueue("kcryptd_io",
-				       WQ_NON_REENTRANT|
-				       WQ_MEM_RECLAIM,
-				       1);
-	if (!cc->io_queue) {
-		ti->error = "Couldn't create kcryptd io queue";
-		goto bad;
-	}
+	ti->private = cc;
+	return 0;
 
-	cc->crypt_queue = alloc_workqueue("kcryptd",
-					  WQ_NON_REENTRANT|
-					  WQ_CPU_INTENSIVE|
-					  WQ_MEM_RECLAIM,
-					  1);
-	if (!cc->crypt_queue) {
-		ti->error = "Couldn't create kcryptd queue";
-		goto bad;
-	}
+bad5:
+	bioset_free(cc->bs);
+bad_bs:
+	mempool_destroy(cc->page_pool);
+bad4:
+	mempool_destroy(cc->io_pool);
+bad3:
+	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+		cc->iv_gen_ops->dtr(cc);
+bad2:
+#if defined(CONFIG_OCF_DM_CRYPT)
+	crypto_freesession(cc->ocf_cryptoid);
+#else
+	crypto_free_blkcipher(tfm);
+#endif
+bad1:
+	/* Must zero key material before freeing */
+	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
+	kfree(cc);
+	return -EINVAL;
+}
+
+static void crypt_dtr(struct dm_target *ti)
+{
+	struct crypt_config *cc = (struct crypt_config *) ti->private;
+	flush_workqueue(_kcryptd_workqueue);
 
-	ti->num_flush_bios = 1;
-	ti->discard_zeroes_data_unsupported = true;
+	bioset_free(cc->bs);
+	mempool_destroy(cc->page_pool);
+	mempool_destroy(cc->io_pool);
 
-	return 0;
+	kfree(cc->iv_mode);
+	if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
+		cc->iv_gen_ops->dtr(cc);
+#if defined(CONFIG_OCF_DM_CRYPT)
+	crypto_freesession(cc->ocf_cryptoid);
+#else
+	crypto_free_blkcipher(cc->tfm);
+#endif
+	dm_put_device(ti, cc->dev);
 
-bad:
-	crypt_dtr(ti);
-	return ret;
+	/* Must zero key material before freeing */
+	memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8));
+	kfree(cc);
 }
 
 static int crypt_map(struct dm_target *ti, struct bio *bio)
 {
-	struct dm_crypt_io *io;
 	struct crypt_config *cc = ti->private;
+	struct crypt_io *io;
+#ifdef CONFIG_HIGHMEM
+	struct bio *newbio = NULL;
+	struct bio_vec *from, *to;
+	struct page *page;
+	char *vto, *vfrom;
+	unsigned int i;
+#endif /* CONFIG_HIGHMEM */
+
+	io = mempool_alloc(cc->io_pool, GFP_NOIO);
 
 	/*
-	 * If bio is REQ_FLUSH or REQ_DISCARD, just bypass crypt queues.
-	 * - for REQ_FLUSH device-mapper core ensures that no IO is in-flight
-	 * - for REQ_DISCARD caller must use flush if IO ordering matters
+	 * Because OCF and CESA do not support high memory
+	 * we have to create bounce pages if request
+	 * with data in high memory arrives.
 	 */
-	if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
-		bio->bi_bdev = cc->dev->bdev;
-		if (bio_sectors(bio))
-			bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector);
-		return DM_MAPIO_REMAPPED;
+
+#ifdef CONFIG_HIGHMEM
+	/* Check if we have to bounce */
+	bio_for_each_segment(from, bio, i) {
+		page = from->bv_page;
+
+		if (!PageHighMem(page))
+			continue;
+
+		/* We have to bounce */
+		if (newbio == NULL) {
+			newbio = bio_alloc(GFP_NOIO, bio->bi_vcnt);
+			memset(newbio->bi_io_vec, 0, bio->bi_vcnt *
+							sizeof(struct bio_vec));
+		}
+
+		/* Allocate new vector */
+		to = newbio->bi_io_vec + i;
+		to->bv_page = mempool_alloc(cc->page_pool, GFP_NOIO);
+		to->bv_len = from->bv_len;
+		to->bv_offset = from->bv_offset;
+
+		/* Copy data if this is required */
+		if (bio_data_dir(bio) == WRITE) {
+			vto = page_address(to->bv_page) + to->bv_offset;
+			vfrom = kmap(from->bv_page) + from->bv_offset;
+			memcpy(vto, vfrom, to->bv_len);
+			kunmap(from->bv_page);
+		}
 	}
 
-	io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector));
+	/* We have at least one page bounced */
+	if (newbio != NULL) {
+		__bio_for_each_segment(from, bio, i, 0) {
+			to = bio_iovec_idx(newbio, i);
+			if (!to->bv_page) {
+				to->bv_page = from->bv_page;
+				to->bv_len = from->bv_len;
+				to->bv_offset = from->bv_offset;
+			}
+		}
 
-	if (bio_data_dir(io->base_bio) == READ) {
-		if (kcryptd_io_read(io, GFP_NOWAIT))
-			kcryptd_queue_io(io);
-	} else
-		kcryptd_queue_crypt(io);
+		newbio->bi_bdev = bio->bi_bdev;
+		newbio->bi_sector = bio->bi_sector;
+		newbio->bi_rw = bio->bi_rw;
+		newbio->bi_vcnt = bio->bi_vcnt;
+		newbio->bi_idx = bio->bi_idx;
+		newbio->bi_size = bio->bi_size;
+
+		newbio->bi_flags |= (1 << BIO_BOUNCED);
+		newbio->bi_private = bio;
+		bio = newbio;
+	}
+#endif /* CONFIG_HIGHMEM */
+
+	io->target = ti;
+	io->base_bio = bio;
+	io->error = io->post_process = 0;
+	atomic_set(&io->pending, 0);
+	kcryptd_queue_io(io);
 
 	return DM_MAPIO_SUBMITTED;
 }
@@ -1714,7 +1387,11 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 		break;
 
 	case STATUSTYPE_TABLE:
-		DMEMIT("%s ", cc->cipher_string);
+		if (cc->iv_mode)
+			DMEMIT("%s-%s-%s ", cc->cipher, cc->chainmode,
+			       cc->iv_mode);
+		else
+			DMEMIT("%s-%s ", cc->cipher, cc->chainmode);
 
 		if (cc->key_size > 0)
 			for (i = 0; i < cc->key_size; i++)
@@ -1724,10 +1401,6 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 
 		DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
 				cc->dev->name, (unsigned long long)cc->start);
-
-		if (ti->num_discard_bios)
-			DMEMIT(" 1 allow_discards");
-
 		break;
 	}
 }
@@ -1735,14 +1408,12 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 static void crypt_postsuspend(struct dm_target *ti)
 {
 	struct crypt_config *cc = ti->private;
-
 	set_bit(DM_CRYPT_SUSPENDED, &cc->flags);
 }
 
 static int crypt_preresume(struct dm_target *ti)
 {
 	struct crypt_config *cc = ti->private;
-
 	if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) {
 		DMERR("aborting resume - crypt key is not set.");
 		return -EAGAIN;
@@ -1765,32 +1436,18 @@ static void crypt_resume(struct dm_target *ti)
 static int crypt_message(struct dm_target *ti, unsigned argc, char **argv)
 {
 	struct crypt_config *cc = ti->private;
-	int ret = -EINVAL;
-
 	if (argc < 2)
 		goto error;
 
-	if (!strcasecmp(argv[0], "key")) {
+	if (!strnicmp(argv[0], MESG_STR("key"))) {
 		if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) {
 			DMWARN("not suspended during key manipulation.");
 			return -EINVAL;
 		}
-		if (argc == 3 && !strcasecmp(argv[1], "set")) {
-			ret = crypt_set_key(cc, argv[2]);
-			if (ret)
-				return ret;
-			if (cc->iv_gen_ops && cc->iv_gen_ops->init)
-				ret = cc->iv_gen_ops->init(cc);
-			return ret;
-		}
-		if (argc == 2 && !strcasecmp(argv[1], "wipe")) {
-			if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) {
-				ret = cc->iv_gen_ops->wipe(cc);
-				if (ret)
-					return ret;
-			}
+		if (argc == 3 && !strnicmp(argv[1], MESG_STR("set")))
+			return crypt_set_key(cc, argv[2]);
+		if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe")))
 			return crypt_wipe_key(cc);
-		}
 	}
 
 error:
@@ -1803,12 +1460,11 @@ static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
 {
 	struct crypt_config *cc = ti->private;
 	struct request_queue *q = bdev_get_queue(cc->dev->bdev);
-
 	if (!q->merge_bvec_fn)
 		return max_size;
 
 	bvm->bi_bdev = cc->dev->bdev;
-	bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector);
+	bvm->bi_sector = cc->start + bvm->bi_sector - ti->begin;
 
 	return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
 }
@@ -1817,13 +1473,13 @@ static int crypt_iterate_devices(struct dm_target *ti,
 				 iterate_devices_callout_fn fn, void *data)
 {
 	struct crypt_config *cc = ti->private;
-
 	return fn(ti, cc->dev, cc->start, ti->len, data);
 }
 
+
 static struct target_type crypt_target = {
 	.name   = "crypt",
-	.version = {1, 12, 1},
+	.version = {1, 5, 1},
 	.module = THIS_MODULE,
 	.ctr    = crypt_ctr,
 	.dtr    = crypt_dtr,
@@ -1840,23 +1496,43 @@ static struct target_type crypt_target = {
 static int __init dm_crypt_init(void)
 {
 	int r;
-
-	_crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
+	_crypt_io_pool = KMEM_CACHE(crypt_io, 0);
 	if (!_crypt_io_pool)
 		return -ENOMEM;
 
+	_kcryptd_workqueue = create_workqueue("kcryptd");
+	if (!_kcryptd_workqueue) {
+		r = -ENOMEM;
+		DMERR("couldn't create kcryptd");
+		goto bad1;
+	}
+
 	r = dm_register_target(&crypt_target);
 	if (r < 0) {
 		DMERR("register failed %d", r);
-		kmem_cache_destroy(_crypt_io_pool);
+		goto bad2;
 	}
 
+	_crypt_requests = 0;
+	init_waitqueue_head(&_crypt_waitq);
+
+#ifdef CONFIG_OCF_DM_CRYPT
+	printk("dm_crypt using the OCF package.\n");
+#endif
+
+	return 0;
+
+bad2:
+	destroy_workqueue(_kcryptd_workqueue);
+bad1:
+	kmem_cache_destroy(_crypt_io_pool);
 	return r;
 }
 
 static void __exit dm_crypt_exit(void)
 {
 	dm_unregister_target(&crypt_target);
+	destroy_workqueue(_kcryptd_workqueue);
 	kmem_cache_destroy(_crypt_io_pool);
 }
 
diff --git a/drivers/md/md.c b/drivers/md/md.c
old mode 100644
new mode 100755
index a2dda416c9cb..e022a356d3e3
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -524,6 +524,9 @@ void mddev_init(struct mddev *mddev)
 	mddev->resync_min = 0;
 	mddev->resync_max = MaxSector;
 	mddev->level = LEVEL_NONE;
+#ifdef ALPHA_CUSTOMIZE
+	mddev->parallel_resync = 1;
+#endif
 }
 EXPORT_SYMBOL_GPL(mddev_init);
 
@@ -7587,8 +7590,18 @@ void md_do_sync(struct md_thread *thread)
 			/((jiffies-mddev->resync_mark)/HZ +1) +1;
 
 		if (currspeed > speed_min(mddev)) {
+#ifdef ALPHA_CUSTOMIZE
+			int i_speed_max = speed_max(mddev);
+			int i_is_idle = is_mddev_idle(mddev, 0);
+			if( i_speed_max == 222222 ){
+				printk(KERN_INFO "md: speed_max=%d, is_idle=%d\n", i_speed_max, i_is_idle);
+			}
+			if ((currspeed > i_speed_max) || !i_is_idle) {
+#else
 			if ((currspeed > speed_max(mddev)) ||
 					!is_mddev_idle(mddev, 0)) {
+
+#endif
 				msleep(500);
 				goto repeat;
 			}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
old mode 100644
new mode 100755
index e73740b55aea..1ce78e0e3265
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -9,7 +9,7 @@
  *
  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
  *
- * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
+ * Fixes to reconstruction by Jakob ?stergaard" <jakob@ostenfeld.dk>
  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
  *
  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
@@ -2912,11 +2912,18 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
 	md_set_array_sectors(mddev, newsize);
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	/*
+	 *  after expansion complete and capacity grew up, skip resync progress
+	 *  because resync progress is also skipped before when RAID created
+	 */
+#ifdef ALPHA_CUSTOMIZE
+#else
 	if (sectors > mddev->dev_sectors &&
 	    mddev->recovery_cp > mddev->dev_sectors) {
 		mddev->recovery_cp = mddev->dev_sectors;
 		set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
 	}
+#endif
 	mddev->dev_sectors = sectors;
 	mddev->resync_max_sectors = sectors;
 	return 0;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
old mode 100644
new mode 100755
index 5e3c25d4562c..2ea80e18117e
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4282,6 +4282,16 @@ static void make_request(struct mddev *mddev, struct bio * bi)
 	const int rw = bio_data_dir(bi);
 	int remaining;
 
+#ifdef ALPHA_CUSTOMIZE // to avoid system hang when unplug 2 disks from RAID5 at the same time
+	if( mddev->degraded > conf->max_degraded ){
+//		printk(KERN_ALERT
+//				"md/raid:%s: degraded(%d) > max_degraded(%d), request skipped.\n",
+//				mdname(mddev), mddev->degraded, conf->max_degraded);
+		bio_endio(bi, 0);
+		return;
+	}
+#endif
+
 	if (unlikely(bi->bi_rw & REQ_FLUSH)) {
 		md_flush_request(mddev, bi);
 		return;
@@ -5894,6 +5904,10 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
 	md_set_array_sectors(mddev, newsize);
 	set_capacity(mddev->gendisk, mddev->array_sectors);
 	revalidate_disk(mddev->gendisk);
+	/*
+	 *  after expansion complete and capacity grew up, skip resync progress
+	 *  because resync progress is also skipped before when RAID created
+	 */
 	if (sectors > mddev->dev_sectors &&
 	    mddev->recovery_cp > mddev->dev_sectors) {
 		mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 067f31174a0e..29a11db365bc 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -20,6 +20,16 @@ config TI_EMIF
 	  parameters and other settings during frequency, voltage and
 	  temperature changes
 
+config MVEBU_DEVBUS
+	bool "Marvell EBU Device Bus Controller"
+	default y
+	depends on PLAT_ORION && OF
+	help
+	  This driver is for the Device Bus controller available in some
+	  Marvell EBU SoCs such as Discovery (mv78xx0), Orion (88f5xxx) and
+	  Armada 370 and Armada XP. This controller allows to handle flash
+	  devices such as NOR, NAND, SRAM, and FPGA.
+
 config TEGRA20_MC
 	bool "Tegra20 Memory Controller(MC) driver"
 	default y
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 9cce5d70ed52..969d923dad93 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -6,5 +6,6 @@ ifeq ($(CONFIG_DDR),y)
 obj-$(CONFIG_OF)		+= of_memory.o
 endif
 obj-$(CONFIG_TI_EMIF)		+= emif.o
+obj-$(CONFIG_MVEBU_DEVBUS)	+= mvebu-devbus.o
 obj-$(CONFIG_TEGRA20_MC)	+= tegra20-mc.o
 obj-$(CONFIG_TEGRA30_MC)	+= tegra30-mc.o
diff --git a/drivers/memory/mvebu-devbus.c b/drivers/memory/mvebu-devbus.c
new file mode 100644
index 000000000000..94c92482fd8f
--- /dev/null
+++ b/drivers/memory/mvebu-devbus.c
@@ -0,0 +1,280 @@
+/*
+ * Marvell EBU SoC Device Bus Controller
+ * (memory controller for NOR/NAND/SRAM/FPGA devices)
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mbus.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+/* Register definitions */
+#define DEV_WIDTH_BIT		30
+#define BADR_SKEW_BIT		28
+#define RD_HOLD_BIT		23
+#define ACC_NEXT_BIT		17
+#define RD_SETUP_BIT		12
+#define ACC_FIRST_BIT		6
+
+#define SYNC_ENABLE_BIT		24
+#define WR_HIGH_BIT		16
+#define WR_LOW_BIT		8
+
+#define READ_PARAM_OFFSET	0x0
+#define WRITE_PARAM_OFFSET	0x4
+
+static const char * const devbus_wins[] = {
+	"devbus-boot",
+	"devbus-cs0",
+	"devbus-cs1",
+	"devbus-cs2",
+	"devbus-cs3",
+};
+
+struct devbus_read_params {
+	u32 bus_width;
+	u32 badr_skew;
+	u32 turn_off;
+	u32 acc_first;
+	u32 acc_next;
+	u32 rd_setup;
+	u32 rd_hold;
+};
+
+struct devbus_write_params {
+	u32 sync_enable;
+	u32 wr_high;
+	u32 wr_low;
+	u32 ale_wr;
+};
+
+struct devbus {
+	struct device *dev;
+	void __iomem *base;
+	unsigned long tick_ps;
+};
+
+static int get_timing_param_ps(struct devbus *devbus,
+			       struct device_node *node,
+			       const char *name,
+			       u32 *ticks)
+{
+	u32 time_ps;
+	int err;
+
+	err = of_property_read_u32(node, name, &time_ps);
+	if (err < 0) {
+		dev_err(devbus->dev, "%s has no '%s' property\n",
+			name, node->full_name);
+		return err;
+	}
+
+	*ticks = (time_ps + devbus->tick_ps - 1) / devbus->tick_ps;
+
+	dev_dbg(devbus->dev, "%s: %u ps -> 0x%x\n",
+		name, time_ps, *ticks);
+	return 0;
+}
+
+static int devbus_set_timing_params(struct devbus *devbus,
+				    struct device_node *node)
+{
+	struct devbus_read_params r;
+	struct devbus_write_params w;
+	u32 value;
+	int err;
+
+	dev_dbg(devbus->dev, "Setting timing parameter, tick is %lu ps\n",
+		devbus->tick_ps);
+
+	/* Get read timings */
+	err = of_property_read_u32(node, "devbus,bus-width", &r.bus_width);
+	if (err < 0) {
+		dev_err(devbus->dev,
+			"%s has no 'devbus,bus-width' property\n",
+			node->full_name);
+		return err;
+	}
+	/* Convert bit width to byte width */
+	r.bus_width /= 8;
+
+	err = get_timing_param_ps(devbus, node, "devbus,badr-skew-ps",
+				 &r.badr_skew);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,turn-off-ps",
+				 &r.turn_off);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,acc-first-ps",
+				 &r.acc_first);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,acc-next-ps",
+				 &r.acc_next);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,rd-setup-ps",
+				 &r.rd_setup);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,rd-hold-ps",
+				 &r.rd_hold);
+	if (err < 0)
+		return err;
+
+	/* Get write timings */
+	err = of_property_read_u32(node, "devbus,sync-enable",
+				  &w.sync_enable);
+	if (err < 0) {
+		dev_err(devbus->dev,
+			"%s has no 'devbus,sync-enable' property\n",
+			node->full_name);
+		return err;
+	}
+
+	err = get_timing_param_ps(devbus, node, "devbus,ale-wr-ps",
+				 &w.ale_wr);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,wr-low-ps",
+				 &w.wr_low);
+	if (err < 0)
+		return err;
+
+	err = get_timing_param_ps(devbus, node, "devbus,wr-high-ps",
+				 &w.wr_high);
+	if (err < 0)
+		return err;
+
+	/* Set read timings */
+	value = r.bus_width << DEV_WIDTH_BIT |
+		r.badr_skew << BADR_SKEW_BIT |
+		r.rd_hold   << RD_HOLD_BIT   |
+		r.acc_next  << ACC_NEXT_BIT  |
+		r.rd_setup  << RD_SETUP_BIT  |
+		r.acc_first << ACC_FIRST_BIT |
+		r.turn_off;
+
+	dev_dbg(devbus->dev, "read parameters register 0x%p = 0x%x\n",
+		devbus->base + READ_PARAM_OFFSET,
+		value);
+
+	writel(value, devbus->base + READ_PARAM_OFFSET);
+
+	/* Set write timings */
+	value = w.sync_enable  << SYNC_ENABLE_BIT |
+		w.wr_low       << WR_LOW_BIT      |
+		w.wr_high      << WR_HIGH_BIT     |
+		w.ale_wr;
+
+	dev_dbg(devbus->dev, "write parameters register: 0x%p = 0x%x\n",
+		devbus->base + WRITE_PARAM_OFFSET,
+		value);
+
+	writel(value, devbus->base + WRITE_PARAM_OFFSET);
+
+	return 0;
+}
+
+static int mvebu_devbus_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *node = pdev->dev.of_node;
+	struct devbus *devbus;
+	struct resource *res;
+	struct clk *clk;
+	unsigned long rate;
+	int err;
+
+	devbus = devm_kzalloc(&pdev->dev, sizeof(struct devbus), GFP_KERNEL);
+	if (!devbus)
+		return -ENOMEM;
+
+	devbus->dev = dev;
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	devbus->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(devbus->base))
+		return PTR_ERR(devbus->base);
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+	clk_prepare_enable(clk);
+
+	/*
+	 * Obtain clock period in picoseconds,
+	 * we need this in order to convert timing
+	 * parameters from cycles to picoseconds.
+	 */
+	rate = clk_get_rate(clk) / 1000;
+	devbus->tick_ps = 1000000000 / rate;
+
+	/* Read the device tree node and set the new timing parameters */
+	err = devbus_set_timing_params(devbus, node);
+	if (err < 0)
+		return err;
+
+	/*
+	 * We need to create a child device explicitly from here to
+	 * guarantee that the child will be probed after the timing
+	 * parameters for the bus are written.
+	 */
+	err = of_platform_populate(node, NULL, NULL, dev);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_devbus_of_match[] = {
+	{ .compatible = "marvell,mvebu-devbus" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mvebu_devbus_of_match);
+
+static struct platform_driver mvebu_devbus_driver = {
+	.probe		= mvebu_devbus_probe,
+	.driver		= {
+		.name	= "mvebu-devbus",
+		.owner	= THIS_MODULE,
+		.of_match_table = mvebu_devbus_of_match,
+	},
+};
+
+static int __init mvebu_devbus_init(void)
+{
+	return platform_driver_register(&mvebu_devbus_driver);
+}
+module_init(mvebu_devbus_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel.garcia@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU SoC Device Bus controller");
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 8960fc846c77..e50aab2e4782 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -858,6 +858,15 @@ static int mvsd_resume(struct platform_device *dev)
 #define mvsd_resume	NULL
 #endif
 
+void mvsd_shutdown(struct platform_device *pdev)
+{
+	struct mmc_host *mmc = platform_get_drvdata(pdev);
+	struct mvsd_host *host = mmc_priv(mmc);
+
+	if (!IS_ERR(host->clk))
+		clk_disable_unprepare(host->clk);
+}
+
 static const struct of_device_id mvsdio_dt_ids[] = {
 	{ .compatible = "marvell,orion-sdio" },
 	{ /* sentinel */ }
@@ -868,6 +877,7 @@ static struct platform_driver mvsd_driver = {
 	.remove		= __exit_p(mvsd_remove),
 	.suspend	= mvsd_suspend,
 	.resume		= mvsd_resume,
+	.shutdown	= mvsd_shutdown,
 	.driver		= {
 		.name	= DRIVER_NAME,
 		.of_match_table = mvsdio_dt_ids,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 1ae358e0662d..bd1851a6a202 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -34,6 +34,7 @@
 #include <linux/of_gpio.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/mbus.h>
 
 #include "sdhci.h"
 #include "sdhci-pltfm.h"
@@ -57,6 +58,49 @@
 #define SDCE_MISC_INT		(1<<2)
 #define SDCE_MISC_INT_EN	(1<<1)
 
+/*
+ * These registers are relative to the second register region, for the
+ * MBus bridge.
+ */
+#define SDHCI_WINDOW_CTRL(i)	(0x80 + ((i) << 3))
+#define SDHCI_WINDOW_BASE(i)	(0x84 + ((i) << 3))
+#define SDHCI_MAX_WIN_NUM	8
+
+/* Fields below belong to SDIO3 Configuration Register (third register region)
+ */
+#define SDIO3_CONF_CLK_INV	BIT(0)
+#define SDIO3_CONF_SD_FB_CLK	BIT(2)
+
+static int mv_conf_mbus_windows(struct device *dev, void __iomem *regs,
+				const struct mbus_dram_target_info *dram)
+{
+	int i;
+
+	if (!dram) {
+		dev_err(dev, "no mbus dram info\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) {
+		writel(0, regs + SDHCI_WINDOW_CTRL(i));
+		writel(0, regs + SDHCI_WINDOW_BASE(i));
+	}
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		/* Write size, attributes and target id to control register */
+		writel(((cs->size - 1) & 0xffff0000) |
+			(cs->mbus_attr << 8) |
+			(dram->mbus_dram_target_id << 4) | 1,
+			regs + SDHCI_WINDOW_CTRL(i));
+		/* Write base address to base register */
+		writel(cs->base, regs + SDHCI_WINDOW_BASE(i));
+	}
+
+	return 0;
+}
+
 static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
 {
 	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
@@ -131,7 +175,12 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
 
 static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
 {
+	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+	struct device_node *np = pdev->dev.of_node;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_pxa *pxa = pltfm_host->priv;
 	u16 ctrl_2;
+	u8 reg_val;
 
 	/*
 	 * Set V18_EN -- UHS modes do not work without this.
@@ -159,6 +208,22 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
 		break;
 	}
 
+	/* Update SDIO3 Configuration register according to
+	 * erratum 'FE-2946959'.
+	 */
+	if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+		reg_val = readb(pxa->sdio3_conf_reg);
+		if (uhs == MMC_TIMING_UHS_SDR50 ||
+		    uhs == MMC_TIMING_UHS_DDR50) {
+			reg_val &= ~SDIO3_CONF_CLK_INV;
+			reg_val |= SDIO3_CONF_SD_FB_CLK;
+		} else {
+			reg_val |= SDIO3_CONF_CLK_INV;
+			reg_val &= ~SDIO3_CONF_SD_FB_CLK;
+		}
+		writeb(reg_val, pxa->sdio3_conf_reg);
+	}
+
 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
 	dev_dbg(mmc_dev(host->mmc),
 		"%s uhs = %d, ctrl_2 = %04X\n",
@@ -178,7 +243,8 @@ static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
 	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK
 		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
 		| SDHCI_QUIRK_32BIT_ADMA_SIZE
-		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN,
+		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN
+		| SDHCI_QUIRK_MISSING_CAPS,
 	.ops = &pxav3_sdhci_ops,
 };
 
@@ -186,6 +252,11 @@ static struct sdhci_pltfm_data sdhci_pxav3_pdata = {
 static const struct of_device_id sdhci_pxav3_of_match[] = {
 	{
 		.compatible = "mrvl,pxav3-mmc",
+		.data = &sdhci_pxav3_pdata,
+	},
+	{
+		.compatible = "marvell,armada-380-sdhci",
+		.data = &sdhci_pxav3_pdata,
 	},
 	{},
 };
@@ -219,9 +290,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
 	struct sdhci_pltfm_host *pltfm_host;
 	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
 	struct device *dev = &pdev->dev;
+	struct device_node *np = pdev->dev.of_node;
 	struct sdhci_host *host = NULL;
 	struct sdhci_pxa *pxa = NULL;
+	struct resource *res;
 	const struct of_device_id *match;
+	const struct sdhci_pltfm_data *sdhci_pltfm_data;
 
 	int ret;
 	struct clk *clk;
@@ -230,11 +304,41 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
 	if (!pxa)
 		return -ENOMEM;
 
-	host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata);
+	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+
+	if (match)
+		sdhci_pltfm_data = match->data;
+	else
+		sdhci_pltfm_data = &sdhci_pxav3_pdata;
+
+	host = sdhci_pltfm_init(pdev, sdhci_pltfm_data);
 	if (IS_ERR(host)) {
 		kfree(pxa);
 		return PTR_ERR(host);
 	}
+
+	if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		pxa->mbus_win_regs = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(pxa->mbus_win_regs)) {
+			ret = PTR_ERR(pxa->mbus_win_regs);
+			goto err_clk_get;
+		}
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+		pxa->sdio3_conf_reg = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(pxa->sdio3_conf_reg)) {
+			ret = PTR_ERR(pxa->sdio3_conf_reg);
+			goto err_clk_get;
+		}
+
+		ret = mv_conf_mbus_windows(&pdev->dev, pxa->mbus_win_regs,
+					   mv_mbus_dram_info());
+		if (ret < 0)
+			goto err_clk_get;
+	}
+
+
 	pltfm_host = sdhci_priv(host);
 	pltfm_host->priv = pxa;
 
@@ -250,11 +354,26 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
 	/* enable 1/8V DDR capable */
 	host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
-	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
 	if (match) {
 		mmc_of_parse(host->mmc);
 		sdhci_get_of_property(pdev);
 		pdata = pxav3_get_mmc_pdata(dev);
+		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+
+		/* Modify capabilities of Armada 38x SDHCI controller according
+		 * to erratum ERR-7878951:
+		 */
+		if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+			if (of_get_property(np, "no-1-8-v", NULL)) {
+				host->caps &= ~SDHCI_CAN_VDD_180;
+				host->mmc->caps &= ~MMC_CAP_1_8V_DDR;
+			} else
+				host->caps &= ~SDHCI_CAN_VDD_330;
+
+			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
+					 SDHCI_USE_SDR50_TUNING);
+		}
 	} else if (pdata) {
 		/* on-chip device */
 		if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
@@ -362,6 +481,13 @@ static int sdhci_pxav3_resume(struct device *dev)
 {
 	int ret;
 	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_pxa *pxa = pltfm_host->priv;
+	struct device_node *np = dev->of_node;
+
+	if (of_device_is_compatible(np, "marvell,armada-380-sdhci"))
+		ret = mv_conf_mbus_windows(dev, pxa->mbus_win_regs,
+					   mv_mbus_dram_info());
 
 	pm_runtime_get_sync(dev);
 	ret = sdhci_resume_host(host);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 50543f166215..5b8cf397b8f6 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -353,11 +353,11 @@ config MTD_NAND_ATMEL
 	  on Atmel AT91 and AVR32 processors.
 
 config MTD_NAND_PXA3xx
-	tristate "Support for NAND flash devices on PXA3xx"
-	depends on PXA3xx || ARCH_MMP
+	tristate "NAND support on PXA3xx and Armada 370/XP"
+	depends on PXA3xx || ARCH_MMP || PLAT_ORION
 	help
 	  This enables the driver for the NAND flash device found on
-	  PXA3xx processors
+	  PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
 
 config MTD_NAND_SLC_LPC32XX
 	tristate "NXP LPC32xx SLC Controller"
@@ -424,6 +424,18 @@ config MTD_NAND_BCM47XXNFLASH
 	  registered by bcma as platform devices. This enables driver for
 	  NAND flash memories. For now only BCM4706 is supported.
 
+config MTD_NAND_MVEBU_NFC
+	bool "MTD driver for the Armada Nand Flash Controller"
+	depends on ARCH_MVEBU
+	default n
+	help
+	  This enables the driver for the NAND flash controller found in
+	  the Marvell Armada SoC devices.
+
+if MTD_NAND_MVEBU_NFC = â€œyâ€
+	source â€œdrivers/mtd/nand/mvebu_nfc/Kconfigâ€
+endif
+
 config MTD_NAND_PLATFORM
 	tristate "Support for generic platform NAND driver"
 	depends on HAS_IOMEM
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index bb8189172f62..702e8a901519 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740)		+= jz4740_nand.o
 obj-$(CONFIG_MTD_NAND_GPMI_NAND)	+= gpmi-nand/
 obj-$(CONFIG_MTD_NAND_XWAY)		+= xway_nand.o
 obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)	+= bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_MVEBU_NFC)	+= mvebu_nfc/
 
 nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/mvebu_nfc/Kconfig b/drivers/mtd/nand/mvebu_nfc/Kconfig
new file mode 100644
index 000000000000..407ec6f818b9
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/Kconfig
@@ -0,0 +1,52 @@
+config MTD_NAND_MARVELL
+	bool "NAND Flash support for Marvell Armada SoC"
+	depends on MACH_ARMADA_370_XP
+	default y
+	help
+	  This option enables the
+	  MTD support for NAND
+	  Flash on Marvell Armada
+	  machines.
+
+config MTD_NAND_NFC
+	tristate "MTD driver for the Armada Nand Flash Controller"
+	depends on MTD_NAND_MARVELL
+	default y
+	help
+	  This enables the driver
+	  for the NAND flash controller
+	  found in the Marvell
+	  Armada SoC devices.
+
+config MTD_NAND_NFC_GANG_SUPPORT
+	bool "NAND Ganged mode support for the NFC"
+	depends on MTD_NAND_NFC
+	default n
+	help
+	  This option enables the support for 2x8bit ganged mode in
+	  Marvell's NFC HAL driver and the MTD stack.
+
+config MTD_NAND_NFC_MLC_SUPPORT
+	bool "NAND MLC devices support for the NFC"
+	depends on MTD_NAND_NFC
+	default y
+	help
+	  This option allows support for Nand devices with non-standard
+	  page/oob layout. These devices are detected incorrectly with
+	  standard autodetection mechanism based on the READ_ID command.
+
+config MTD_NAND_NFC_INIT_RESET
+	bool "NAND Enable Reset on Initialization"
+	depends on MTD_NAND_NFC
+	default y
+	help
+	  This option forces NAND reset command on initialization. This
+	  is required by certain NAND vendors (Micron).
+
+config MTD_NAND_NFC_NEGLECT_RNB
+	bool "NAND Neglect Read/Busy Signal"
+	depends on MTD_NAND_NFC
+	default n
+	help
+	  This option forces allows operation withou the RnBx signal. In this
+	  mode, tR taken as worst case in every operation
diff --git a/drivers/mtd/nand/mvebu_nfc/Makefile b/drivers/mtd/nand/mvebu_nfc/Makefile
new file mode 100644
index 000000000000..4d299227bd04
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for the Marvell CESA driver
+#
+
+CPU_ARCH	= ARM
+ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
+ENDIAN      = BE
+else
+ENDIAN      = LE
+endif
+
+MVEBU_NFC_FLAGS := -DMV_LINUX -DMV_CPU_$(ENDIAN) -DMV_$(CPU_ARCH)
+
+ccflags-y       += $(MVEBU_NFC_FLAGS)
+
+NFC_DIR		:= drivers/mtd/nand/mvebu_nfc
+
+INCLUDE_DIRS	+= -I$(NFC_DIR)
+INCLUDE_DIRS	+= -I$(NFC_DIR)/hal
+INCLUDE_DIRS	+= -I$(srctree)/arch/arm/mach-mvebu/linux_oss
+INCLUDE_DIRS	+= -I$(srctree)/arch/arm/mach-mvebu/include/mach
+
+ccflags-y	+= $(INCLUDE_DIRS)
+
+armada_nand-y	+= nand_nfc.o
+armada_nand-y	+= hal/mvNfc.o
+
+obj-$(CONFIG_MTD_NAND_NFC)	+= armada_nand.o
diff --git a/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.c b/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.c
new file mode 100644
index 000000000000..9a35e10efd45
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.c
@@ -0,0 +1,3278 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvNfcRegs.h"
+#ifdef MV_INCLUDE_PDMA
+#include "pdma/mvPdma.h"
+#include "pdma/mvPdmaRegs.h"
+#endif
+#include "mvNfc.h"
+
+#ifdef _DEBUG__
+#define DB(x)	x
+#else
+#define DB(x)
+#endif
+/*************/
+/* Constants */
+/*************/
+
+#define NFC_NATIVE_READ_ID_CMD		0x0090
+#define NFC_READ_ID_ADDR_LEN		1
+#define NFC_ERASE_ADDR_LEN		3
+#define NFC_SP_READ_ADDR_LEN		3
+#define NFC_SP_BIG_READ_ADDR_LEN	4
+#define NFC_LP_READ_ADDR_LEN		5
+#define NFC_BLOCK_ADDR_BITS		0xFFFFFF
+#define NFC_SP_COL_OFFS			0
+#define NFC_SP_COL_MASK			(0xFF << NFC_SP_COL_OFFS)
+#define NFC_LP_COL_OFFS			0
+#define NFC_LP_COL_MASK			(0xFFFF << NFC_SP_COL_OFFS)
+#define NFC_SP_PG_OFFS			8
+#define NFC_SP_PG_MASK			(0xFFFFFF << NFC_SP_PG_OFFS)
+#define NFC_LP_PG_OFFS			16
+#define NFC_LP_PG_MASK			(0xFFFF << NFC_LP_PG_OFFS)
+#define NFC_PG_CNT_OFFS			8
+#define NFC_PG_CNT_MASK			(0xFF << NFC_PG_CNT_OFFS)
+
+/* NAND special features bitmask definition.	*/
+#define NFC_FLAGS_NONE				0x0
+#define NFC_FLAGS_ONFI_MODE_3_SET	0x1
+#define NFC_CLOCK_UPSCALE_200M		0x2
+
+/* End of NAND special features definitions.	*/
+
+#define NFC_READ_ID_PDMA_DATA_LEN	32
+#define NFC_READ_STATUS_PDMA_DATA_LEN	32
+#define NFC_READ_ID_PIO_DATA_LEN	8
+#define NFC_READ_STATUS_PIO_DATA_LEN	8
+#define NFC_RW_SP_PDMA_DATA_LEN		544
+#define NFC_RW_SP_NO_ECC_DATA_LEN	528
+#define NFC_RW_SP_HMNG_ECC_DATA_LEN	520
+#define NFC_RW_SP_G_NO_ECC_DATA_LEN	528
+#define NFC_RW_SP_G_HMNG_ECC_DATA_LEN	526
+
+#define NFC_RW_LP_PDMA_DATA_LEN		2112
+
+#define NFC_RW_LP_NO_ECC_DATA_LEN	2112
+#define NFC_RW_LP_HMNG_ECC_DATA_LEN	2088
+#define NFC_RW_LP_BCH_ECC_DATA_LEN	2080
+
+#define NFC_RW_LP_G_NO_ECC_DATA_LEN	2112
+#define NFC_RW_LP_G_HMNG_ECC_DATA_LEN	2088
+#define NFC_RW_LP_G_BCH_ECC_DATA_LEN	2080
+
+#define NFC_RW_LP_BCH1K_ECC_DATA_LEN	1024
+#define NFC_RW_LP_BCH704B_ECC_DATA_LEN	704
+#define NFC_RW_LP_BCH512B_ECC_DATA_LEN	512
+
+#define NFC_CMD_STRUCT_SIZE		(sizeof(MV_NFC_CMD))
+#define NFC_CMD_BUFF_SIZE(cmdb_0)	((cmdb_0 & NFC_CB0_LEN_OVRD_MASK) ? 16 : 12)
+#define NFC_CMD_BUFF_ADDR		(NFC_COMMAND_BUFF_0_REG_4PDMA)
+#define NFC_DATA_BUFF_ADDR		(NFC_DATA_BUFF_REG_4PDMA)
+
+
+#define TIMING_MAX_tADL		0x1f
+#define TIMING_DEF_SEL_CNTR	0x1
+#define TIMING_MAX_RD_CNT_DEL	0x0
+#define TIMING_MAX_tCH		0x7
+#define TIMING_MAX_tCS		0x7
+#define TIMING_MAX_tWH		0x7
+#define TIMING_MAX_tWP		0x7
+#define TIMING_MAX_etRP		0x1
+#define TIMING_MAX_tRH		0x7
+#define TIMING_MAX_tRP		0x7
+
+#define MV_NDTR0CS0_REG		((TIMING_MAX_tADL << 27) | \
+				 (TIMING_DEF_SEL_CNTR << 26) | \
+				 (TIMING_MAX_RD_CNT_DEL << 22) | \
+				 (TIMING_MAX_tCH << 19) | \
+				 (TIMING_MAX_tCS << 16) | \
+				 (TIMING_MAX_tWH << 11) | \
+				 (TIMING_MAX_tWP << 8) | \
+				 (TIMING_MAX_etRP << 6) | \
+				 (TIMING_MAX_tRH << 3) | \
+				 (TIMING_MAX_tRP))
+
+#define TIMING_tR		0xff
+#define TIMING_WAIT_MODE	0x1	/* Work with RnB signal (1) or ignore it (0) */
+#define TIMING_PRESCALE		0x0	/* no prescalling */
+#define TIMING_MAX_tRHW		0x0
+#define TIMING_MAX_tWHR		0xf
+#define TIMING_MAX_tAR		0xf
+
+#define MV_NDTR1CS0_REG		((TIMING_tR << 16) | \
+				 (TIMING_WAIT_MODE << 15) | \
+				 (TIMING_PRESCALE << 14) | \
+				 (TIMING_MAX_tRHW << 8) | \
+				 (TIMING_MAX_tWHR << 4) | \
+				 (TIMING_MAX_tAR))
+
+
+/**********/
+/* Macros */
+/**********/
+#define ns_clk(ns, ns2clk)	((ns % ns2clk) ? (MV_U32)((ns/ns2clk)+1) : (MV_U32)(ns/ns2clk))
+#define maxx(a, b)		((a > b) ? a : b)
+#define check_limit(val, pwr)	((val > ((1 << pwr)-1)) ? ((1 << pwr)-1) : val)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define MV_LE32_TO_CPU(x)	le32_to_cpu(x)
+#define MV_CPU_TO_LE32(x)	cpu_to_le32(x)
+#else
+#define MV_LE32_TO_CPU(x)	(x)
+#define MV_CPU_TO_LE32(x)	(x)
+#endif
+
+#define DBGPRINT(x)	printk(x)
+#define DBGLVL		KERN_INFO
+
+
+
+#ifndef MV_NAND_REG_BIT_SET
+#define MV_NAND_REG_BIT_SET	MV_REG_BIT_SET
+#endif
+#ifndef MV_NAND_REG_BIT_RESET
+#define MV_NAND_REG_BIT_RESET	MV_REG_BIT_RESET
+#endif
+
+#ifndef MV_NAND_REG_WRITE
+#define MV_NAND_REG_WRITE	MV_REG_WRITE
+#endif
+
+#ifndef MV_NAND_REG_READ
+#define MV_NAND_REG_READ	MV_REG_READ
+#endif
+
+
+/***********/
+/* Typedef */
+/***********/
+
+/* Flash Timing Parameters */
+typedef struct {
+	/* Flash Timing */
+	MV_U32 tADL;		/* Address to write data delay */
+	MV_U32 tCH;		/* Enable signal hold time */
+	MV_U32 tCS;		/* Enable signal setup time */
+	MV_U32 tWC;		/* ND_nWS cycle duration */
+	MV_U32 tWH;		/* ND_nWE high duration */
+	MV_U32 tWP;		/* ND_nWE pulse time */
+	MV_U32 tRC;		/* ND_nRE cycle duration */
+	MV_U32 tRH;		/* ND_nRE high duration */
+	MV_U32 tRP;		/* ND_nRE pulse width */
+	MV_U32 tR;		/* ND_nWE high to ND_nRE low for read */
+	MV_U32 tWHR;		/* ND_nWE high to ND_nRE low for status read */
+	MV_U32 tAR;		/* ND_ALE low to ND_nRE low delay */
+	MV_U32 tRHW;		/* ND_nRE high to ND_nWE low delay */
+	/* Physical Layout */
+	MV_U32 pgPrBlk;		/* Pages per block */
+	MV_U32 pgSz;		/* Page size */
+	MV_U32 oobSz;		/* Page size */
+	MV_U32 blkNum;		/* Number of blocks per device */
+	MV_U32 id;		/* Manufacturer and device IDs */
+	MV_U32 seqDis;		/* Enable/Disable sequential multipage read */
+	MV_8 *model;		/* Flash Model string */
+	MV_U32 bb_page;		/* Page containing bad block marking */
+	MV_U32 flags;		/* Special features configuration.	*/
+} MV_NFC_FLASH_INFO;
+
+/* Flash command set */
+typedef struct {
+	MV_U16 read1;
+	MV_U16 exitCacheRead;
+	MV_U16 cacheReadRand;
+	MV_U16 cacheReadSeq;
+	MV_U16 read2;
+	MV_U16 program;
+	MV_U16 readStatus;
+	MV_U16 readId;
+	MV_U16 erase;
+	MV_U16 multiplaneErase;
+	MV_U16 reset;
+	MV_U16 lock;
+	MV_U16 unlock;
+	MV_U16 lockStatus;
+} MV_NFC_FLASH_CMD_SET;
+
+/* ONFI Mode type */
+typedef enum {
+	MV_NFC_ONFI_MODE_0,
+	MV_NFC_ONFI_MODE_1,
+	MV_NFC_ONFI_MODE_2,
+	MV_NFC_ONFI_MODE_3,
+	MV_NFC_ONFI_MODE_4,
+	MV_NFC_ONFI_MODE_5
+} MV_NFC_ONFI_MODE;
+
+/********/
+/* Data */
+/********/
+
+/* Defined Flash Types */
+MV_NFC_FLASH_INFO flashDeviceInfo[] = {
+   {			/* Hynnix 4Gb */
+	.tADL = 70,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 20,		/* tCS, Enable signal setup time */
+	.tWC = 25,		/* tWC, ND_nWE cycle duration */
+	.tWH = 10,		/* tWH, ND_nWE high duration */
+	.tWP = 12,		/* tWP, ND_nWE pulse time */
+	.tRC = 25,		/* tWC, ND_nRE cycle duration */
+	.tRH = 10,		/* tRH, ND_nRE high duration */   //BLUE_ADD , this item not sure
+	.tRP = 12,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 4096,		/* Number of blocks/sectors in the flash */
+	.id = 0xDCAD,		/* Device ID 0xDevice,Vendor */
+	.model = "Hynix H27(U_S)4G8 4Gb 8bit",
+	.bb_page = 63,		/* Manufacturer Bad block marking page in block */
+	.flags = NFC_CLOCK_UPSCALE_200M
+	},
+   {                       /* Mxic 4Gb */
+    .tADL = 70,              /* tADL, Address to write data delay */
+    .tCH = 5,          /* tCH, Enable signal hold time */
+    .tCS = 15,         /* tCS, Enable signal setup time */
+    .tWC = 20,               /* tWC, ND_nWE cycle duration */
+    .tWH = 7,         /* tWH, ND_nWE high duration */
+    .tWP = 10,               /* tWP, ND_nWE pulse time */
+    .tRC = 20,                /* tWC, ND_nRE cycle duration */
+    .tRH = 7,          /* tRH, ND_nRE high duration */
+    .tRP = 10,         /* tRP, ND_nRE pulse width */
+    .tR = 25000,             /* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+    .tWHR = 60,             /* tWHR, ND_nWE high to ND_nRE low delay for status read */
+    .tAR = 10,                /* tAR, ND_ALE low to ND_nRE low delay */
+    .tRHW = 60,           /* tRHW, ND_nRE high to ND_nWE low delay */
+    .pgPrBlk = 64,          /* Pages per block - detected */
+    .pgSz = 2048,           /* Page size */
+    .oobSz = 64,             /* Spare size */
+    .blkNum = 4096,              /* Number of blocks/sectors in the flash */
+    .id = 0xDCC2,          /* Device ID 0xDevice,Vendor */
+    .model = "Micron 4Gb 8bit",
+    .bb_page = 63,                /* Manufacturer Bad block marking page in block */
+    .flags = NFC_CLOCK_UPSCALE_200M
+    },
+  {                       /* spansion 4Gb */
+  .tADL = 70,              /* tADL, Address to write data delay */
+  .tCH = 5,          /* tCH, Enable signal hold time */
+  .tCS = 20,         /* tCS, Enable signal setup time */
+  .tWC = 25,               /* tWC, ND_nWE cycle duration */
+  .tWH = 10,               /* tWH, ND_nWE high duration */
+  .tWP = 12,               /* tWP, ND_nWE pulse time */
+  .tRC = 25,                /* tWC, ND_nRE cycle duration */
+  .tRH = 10,                /* tRH, ND_nRE high duration */
+  .tRP = 12,         /* tRP, ND_nRE pulse width */
+  .tR = 25000,             /* tR = data transfer from cell to register, maximum 60,000ns */
+  .tWHR = 60,             /* tWHR, ND_nWE high to ND_nRE low delay for status read */
+  .tAR = 10,                /* tAR, ND_ALE low to ND_nRE low delay */
+  .tRHW = 100,           /* tRHW, ND_nRE high to ND_nWE low delay 32 clocks */
+  .pgPrBlk = 64,          /* Pages per block - detected */
+  .pgSz = 2048,           /* Page size */
+  .oobSz = 64,             /* Spare size */
+  .blkNum = 2048,              /* Number of blocks/sectors in the flash */
+  .id = 0xdc01,            /* Device ID 0xDevice,Vendor */ /* 0x9AA8 when run through JTAG */
+  .model = "spansion 4Gb 8bit",
+  .bb_page = 0,          /* Manufacturer Bad block marking page in block */
+  .flags = NFC_CLOCK_UPSCALE_200M
+  },
+	{			/* Micron 4Gb */
+	.tADL = 70,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 15,		/* tCS, Enable signal setup time */
+	.tWC = 20,		/* tWC, ND_nWE cycle duration */
+	.tWH = 7,		/* tWH, ND_nWE high duration */
+	.tWP = 10,		/* tWP, ND_nWE pulse time */
+	.tRC = 20,		/* tWC, ND_nRE cycle duration */
+	.tRH = 7,		/* tRH, ND_nRE high duration */
+	.tRP = 10,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 4096,		/* Number of blocks/sectors in the flash */
+	.id = 0xDC2C,		/* Device ID 0xDevice,Vendor */
+	.model = "Micron 4Gb 8bit",
+	.bb_page = 63,		/* Manufacturer Bad block marking page in block */
+	.flags = NFC_CLOCK_UPSCALE_200M
+	},
+
+	{			/* ST 1Gb */
+	.tADL = 100,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 20,		/* tCS, Enable signal setup time */
+	.tWC = 30,		/* tWC, ND_nWE cycle duration */
+	.tWH = 10,		/* tWH, ND_nWE high duration */
+	.tWP = 15,		/* tWP, ND_nWE pulse time */
+	.tRC = 25,		/* tWC, ND_nRE cycle duration */
+	.tRH = 10,		/* tRH, ND_nRE high duration */
+	.tRP = 15,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 30,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 1024,		/* Number of blocks/sectors in the flash */
+	.id = 0xF120,		/* Device ID 0xDevice,Vendor */
+	.model = "ST 1Gb 8bit",
+	.bb_page = 63,		/* Manufacturer Bad block marking page in block */
+	.flags = NFC_CLOCK_UPSCALE_200M
+	},
+
+	{			/* ST 8Gb */
+	 .tADL = 0,		/* tADL, Address to write data delay */
+	 .tCH = 5,		/* tCH, Enable signal hold time */
+	 .tCS = 20,		/* tCS, Enable signal setup time */
+	 .tWC = 24,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 12,		/* tWH, ND_nWE high duration */
+	 .tWP = 12,		/* tWP, ND_nWE pulse time */
+	 .tRC = 24,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 12,		/* tRH, ND_nRE high duration */
+	 .tRP = 12,		/* tRP, ND_nRE pulse width */
+	 .tR = 25121,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	 .tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 48,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	 .pgPrBlk = 64,		/* Pages per block - detected */
+	 .pgSz = 2048,		/* Page size */
+	 .oobSz = 64,		/* Spare size */
+	 .blkNum = 2048,	/* Number of blocks/sectors in the flash */
+	 .id = 0xD320,		/* Device ID 0xDevice,Vendor */
+	 .model = "ST 8Gb 8bit",
+	 .bb_page = 63,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+	{			/* ST 4Gb */
+	 .tADL = 70,		/* tADL, Address to write data delay */
+	 .tCH = 5,		/* tCH, Enable signal hold time */
+	 .tCS = 20,		/* tCS, Enable signal setup time */
+	 .tWC = 22,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 10,		/* tWH, ND_nWE high duration */
+	 .tWP = 12,		/* tWP, ND_nWE pulse time */
+	 .tRC = 24,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 12,		/* tRH, ND_nRE high duration */
+	 .tRP = 12,		/* tRP, ND_nRE pulse width */
+	 .tR = 25121,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	 .tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	 .pgPrBlk = 64,		/* Pages per block - detected */
+	 .pgSz = 2048,		/* Page size */
+	 .oobSz = 64,		/* Spare size */
+	 .blkNum = 2048,	/* Number of blocks/sectors in the flash */
+	 .id = 0xDC20,		/* Device ID 0xDevice,Vendor */
+	 .model = "NM 4Gb 8bit",
+	 .bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+	{			/* ST 32Gb */
+	 .tADL = 0,		/* tADL, Address to write data delay */
+	 .tCH = 5,		/* tCH, Enable signal hold time */
+	 .tCS = 20,		/* tCS, Enable signal setup time */
+	 .tWC = 22,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 10,		/* tWH, ND_nWE high duration */
+	 .tWP = 12,		/* tWP, ND_nWE pulse time */
+	 .tRC = 22,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 10,		/* tRH, ND_nRE high duration */
+	 .tRP = 12,		/* tRP, ND_nRE pulse width */
+	 .tR = 25121,		/* tR = tR+tRR+tWB+1, ND_nWE high to ND_nRE low for read - 25000+20+100+1 */
+	 .tWHR = 80,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 48,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	 .pgPrBlk = 64,		/* Pages per block - detected */
+	 .pgSz = 4096,		/* Page size */
+	 .oobSz = 128,		/* Spare size */
+	 .blkNum = 16384,	/* Number of blocks/sectors in the flash */
+	 .id = 0xD520,		/* Device ID 0xVendor,device */
+	 .model = "ST 32Gb 8bit",
+	 .bb_page = 63,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+
+	{			/* Samsung 16Gb */
+	 .tADL = 90,		/* tADL, Address to write data delay */
+	 .tCH = 0,		/* tCH, Enable signal hold time */
+	 .tCS = 5,		/* tCS, Enable signal setup time */
+	 .tWC = 22,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 10,		/* tWH, ND_nWE high duration */
+	 .tWP = 12,		/* tWP, ND_nWE pulse time */
+	 .tRC = 24,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 12,		/* tRH, ND_nRE high duration */
+	 .tRP = 12,		/* tRP, ND_nRE pulse width */
+	 .tR = 49146,		/* tR = data transfer from cell to register, maximum 60,000ns */
+	 .tWHR = 66,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 66,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 32,		/* tRHW, ND_nRE high to ND_nWE low delay 32 clocks */
+	 .pgPrBlk = 128,	/* Pages per block - detected */
+	 .pgSz = 2048,		/* Page size */
+	 .oobSz = 64,		/* Spare size */
+	 .blkNum = 8192,	/* Number of blocks/sectors in the flash */
+	 .id = 0xD5EC,		/* Device ID 0xDevice,Vendor */
+	 .model = "Samsung 16Gb 8bit",
+	 .bb_page = 127,	/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+
+	{			/* Samsung 2Gb */
+	.tADL = 90,		/* tADL, Address to write data delay */
+	.tCH = 10,		/* tCH, Enable signal hold time */
+	.tCS = 0,		/* tCS, Enable signal setup time */
+	.tWC = 40,		/* tWC, ND_nWE cycle duration */
+	.tWH = 15,		/* tWH, ND_nWE high duration */
+	.tWP = 25,		/* tWP, ND_nWE pulse time */
+	.tRC = 40,		/* tWC, ND_nRE cycle duration */
+	.tRH = 15,		/* tRH, ND_nRE high duration */
+	.tRP = 25,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = data transfer from cell to register, maximum 60,000ns */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 30,		/* tRHW, ND_nRE high to ND_nWE low delay 32 clocks */
+	.pgPrBlk = 128,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 1024,		/* Number of blocks/sectors in the flash */
+	.id = 0xDAEC,		/* Device ID 0xDevice,Vendor */ /* 0x9AA8 when run through JTAG */
+	.model = "Samsung 2Gb 8bit",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	},
+
+	{			/* Samsung 8Gb */
+	.tADL = 100,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 20,		/* tCS, Enable signal setup time */
+	.tWC = 22,		/* tWC, ND_nWE cycle duration */
+	.tWH = 10,		/* tWH, ND_nWE high duration */
+	.tWP = 12,		/* tWP, ND_nWE pulse time */
+	.tRC = 22,		/* tWC, ND_nRE cycle duration */
+	.tRH = 10,		/* tRH, ND_nRE high duration */
+	.tRP = 12,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = data transfer from cell to register, maximum 60,000ns */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay 32 clocks */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 4096,		/* Page size */
+	.oobSz = 128,		/* Spare size */
+	.blkNum = 4096,		/* Number of blocks/sectors in the flash */
+	.id = 0xD3EC,		/* Device ID 0xDevice,Vendor */ /* 0x9AA8 when run through JTAG */
+	.model = "Samsung 8Gb 8bit",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	},
+
+	{			/* Samsung 4Gb */
+	.tADL = 70,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 20,		/* tCS, Enable signal setup time */
+	.tWC = 22,		/* tWC, ND_nWE cycle duration */
+	.tWH = 10,		/* tWH, ND_nWE high duration */
+	.tWP = 12,		/* tWP, ND_nWE pulse time */
+	.tRC = 22,		/* tWC, ND_nRE cycle duration */
+	.tRH = 10,		/* tRH, ND_nRE high duration */
+	.tRP = 12,		/* tRP, ND_nRE pulse width */
+	.tR = 25000,		/* tR = data transfer from cell to register, maximum 60,000ns */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay 32 clocks */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 2048,		/* Number of blocks/sectors in the flash */
+	.id = 0xDCEC,		/* Device ID 0xDevice,Vendor */ /* 0x9AA8 when run through JTAG */
+	.model = "Samsung 4Gb 8bit",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	.flags = NFC_CLOCK_UPSCALE_200M
+	},
+
+	{			/* Samsung 32Gb */
+	 .tADL = 0,		/* tADL, Address to write data delay */
+	 .tCH = 5,		/* tCH, Enable signal hold time */
+	 .tCS = 20,		/* tCS, Enable signal setup time */
+	 .tWC = 25,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 10,		/* tWH, ND_nWE high duration */
+	 .tWP = 15,		/* tWP, ND_nWE pulse time */
+	 .tRC = 30,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 15,		/* tRH, ND_nRE high duration */
+	 .tRP = 15,		/* tRP, ND_nRE pulse width */
+	 .tR = 60000,		/* tR = data transfer from cell to register, maximum 60,000ns */
+	 .tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 48,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	 .pgPrBlk = 128,	/* Pages per block - detected */
+	 .pgSz = 4096,		/* Page size */
+	 .oobSz = 128,		/* Spare size */
+	 .blkNum = 8192,	/* Number of blocks/sectors in the flash */
+	 .id = 0xD7EC,		/* Device ID 0xDevice,Vendor */
+	 .model = "Samsung 32Gb 8bit",
+	 .bb_page = 127,	/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+	{			/* Micron 64Gb */
+	 .tADL = 0,		/* tADL, Address to write data delay */
+	 .tCH = 20,		/* tCH, Enable signal hold time */
+	 .tCS = 20,		/* tCS, Enable signal setup time */
+	 .tWC = 90,		/* tWC, ND_nWE cycle duration */
+	 .tWH = 45,		/* tWH, ND_nWE high duration */
+	 .tWP = 45,		/* tWP, ND_nWE pulse time */
+	 .tRC = 90,		/* tWC, ND_nRE cycle duration */
+	 .tRH = 45,		/* tRH, ND_nRE high duration */
+	 .tRP = 45,		/* tRP, ND_nRE pulse width */
+	 .tR = 0,		/* tR = data transfer from cell to register */
+	 .tWHR = 90,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	 .tAR = 65,		/* tAR, ND_ALE low to ND_nRE low delay */
+	 .tRHW = 32,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	 .pgPrBlk = 256,	/* Pages per block - detected */
+	 .pgSz = 8192,		/* Page size */
+	 .oobSz = 448,		/* Spare size */
+	 .blkNum = 4096,	/* Number of blocks/sectors in the flash */
+	 .id = 0x882C,		/* Device ID 0xDevice,Vendor */
+	 .model = "Micron 64Gb 8bit",
+	 .bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	 },
+	{			/* Hinyx 8Gb */
+	.tADL = 0,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 20,		/* tCS, Enable signal setup time */
+	.tWC = 22,		/* tWC, ND_nWE cycle duration */
+	.tWH = 10,		/* tWH, ND_nWE high duration */
+	.tWP = 12,		/* tWP, ND_nWE pulse time */
+	.tRC = 22,		/* tWC, ND_nRE cycle duration */
+	.tRH = 10,		/* tRH, ND_nRE high duration */
+	.tRP = 12,		/* tRP, ND_nRE pulse width */
+	.tR = 25,		/* tR = data transfer from cell to register */
+	.tWHR = 80,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 64,		/* Spare size */
+	.blkNum = 8192,		/* Number of blocks/sectors in the flash */
+	.id = 0xDCAD,		/* Device ID 0xDevice,Vendor */
+	.model = "Hynix 8Gb 8bit",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	 .flags = NFC_CLOCK_UPSCALE_200M
+	},
+	/* Timing used is ONFI Mode 2 (28Mhz) */
+	{			/* Micron 8Gb */
+	.tADL = 100,		/* tADL, Address to write data delay */
+	.tCH = 10,		/* tCH, Enable signal hold time */
+	.tCS = 25,		/* tCS, Enable signal setup time */
+	.tWC = 35,		/* tWC, ND_nWE cycle duration */
+	.tWH = 17,		/* tWH, ND_nWE high duration */
+	.tWP = 20,		/* tWP, ND_nWE pulse time */
+	.tRC = 35,		/* tWC, ND_nRE cycle duration */
+	.tRH = 17,		/* tRH, ND_nRE high duration */
+	.tRP = 17,		/* tRP, ND_nRE pulse width */
+	.tR = 25241,		/* tR = data transfer from cell to register tR = tR+tRR+tWB+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 128,		/* Pages per block - detected */
+	.pgSz = 4096,		/* Page size */
+	.oobSz = 224,		/* Spare size */
+	.blkNum = 2048,		/* Number of blocks/sectors in the flash */
+	.id = 0x382C,		/* Device ID 0xDevice,Vendor */
+	.model = "Micron 8Gb 8bit ABABA",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	.flags = (NFC_CLOCK_UPSCALE_200M | NFC_FLAGS_ONFI_MODE_3_SET)
+	},
+	{			/* Micron 8Gb ABACA  */
+		/* timing Asynchronous mode 3 */
+	.tADL = 100,		/* tADL, Address to write data delay */
+	.tCH = 10,		/* tCH, Enable signal hold time */
+	.tCS = 25,		/* tCS, Enable signal setup time */
+	.tWC = 35,		/* tWC, ND_nWE cycle duration, limited to 35 by the ARMADA-XP CPU */
+	.tWH = 17,		/* tWH, ND_nWE high duration */
+	.tWP = 15,		/* tWP, ND_nWE pulse time */
+	.tRC = 35,		/* tRC, ND_nRE cycle duration, limited to 35 by the ARMADA-XP CPU */
+	.tRH = 17,		/* tRH, ND_nRE high duration */
+	.tRP = 15,		/* tRP, ND_nRE pulse width */
+	.tR = 25241,		/* tR = data transfer from cell to register tR = tR+tRR+tWB+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 4096,		/* Page size */
+	.oobSz = 224,		/* Spare size */
+	.blkNum = 4096,		/* Number of blocks/sectors in the flash */
+	.id = 0xd32C,		/* Device ID 0xDevice,Vendor */
+	.model = "Micron 8Gb 8bit ABACA",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	.flags = (NFC_CLOCK_UPSCALE_200M)
+	},
+	{	/* Micron 2Gb ABAFA  */
+		/* 3.3v parametrs */
+	.tADL = 70,		/* tADL, Address to write data delay */
+	.tCH = 5,		/* tCH, Enable signal hold time */
+	.tCS = 15,		/* tCS, Enable signal setup time */
+	.tWC = 20,		/* tWC, ND_nWE cycle duration, limited to 35 by the ARMADA-XP CPU */
+	.tWH = 7,		/* tWH, ND_nWE high duration */
+	.tWP = 10,		/* tWP, ND_nWE pulse time */
+	.tRC = 21,		/* tRC, ND_nRE cycle duration, limited to 35 by the ARMADA-XP CPU */
+	.tRH = 7,		/* tRH, ND_nRE high duration */
+	.tRP = 10,		/* tRP, ND_nRE pulse width */
+	.tR = 25121,	/* tR = data transfer from cell to register tR = tR+tRR+tWB+1 */
+	.tWHR = 60,		/* tWHR, ND_nWE high to ND_nRE low delay for status read */
+	.tAR = 10,		/* tAR, ND_ALE low to ND_nRE low delay */
+	.tRHW = 100,		/* tRHW, ND_nRE high to ND_nWE low delay */
+	.pgPrBlk = 64,		/* Pages per block - detected */
+	.pgSz = 2048,		/* Page size */
+	.oobSz = 224,		/* Spare size */
+	.blkNum = 2048,		/* Number of blocks/sectors in the flash */
+	.id = 0xDA2C,		/* Device ID 0xDevice,Vendor */
+	.model = "Micron 2Gb 8bit ABAFA",
+	.bb_page = 0,		/* Manufacturer Bad block marking page in block */
+	.flags = NFC_CLOCK_UPSCALE_200M
+	}
+};
+
+/* Defined Command set */
+#define	MV_NFC_FLASH_SP_CMD_SET_IDX		0
+#define		MV_NFC_FLASH_LP_CMD_SET_IDX		1
+static MV_NFC_FLASH_CMD_SET flashCmdSet[] = {
+	{
+	 .read1 = 0x0000,
+	 .read2 = 0x0050,
+	 .program = 0x1080,
+	 .readStatus = 0x0070,
+	 .readId = 0x0090,
+	 .erase = 0xD060,
+	 .multiplaneErase = 0xD160,
+	 .reset = 0x00FF,
+	 .lock = 0x002A,
+	 .unlock = 0x2423,
+	 .lockStatus = 0x007A,
+	 },
+	{
+	 .read1 = 0x3000,
+	 .exitCacheRead = 0x003f,
+	 .cacheReadRand = 0x3100,
+	 .cacheReadSeq = 0x0031,
+	 .read2 = 0x0050,
+	 .program = 0x1080,
+	 .readStatus = 0x0070,
+	 .readId = 0x0090,
+	 .erase = 0xD060,
+	 .multiplaneErase = 0xD160,
+	 .reset = 0x00FF,
+	 .lock = 0x002A,
+	 .unlock = 0x2423,
+	 .lockStatus = 0x007A,
+	 }
+};
+
+/*#define MV_NFC_REG_DBG*/
+#ifdef MV_NFC_REG_DBG
+MV_U32 mvNfcDbgFlag = 1;
+
+MV_U32 nfc_dbg_read(MV_U32 addr)
+{
+	MV_U32 reg = MV_MEMIO_LE32_READ((INTER_REGS_BASE | addr));
+	if (mvNfcDbgFlag)
+		mvOsPrintf("NFC read  0x%08x = %08x\n", addr, reg);
+	return reg;
+}
+
+MV_VOID nfc_dbg_write(MV_U32 addr, MV_U32 val)
+{
+	MV_MEMIO_LE32_WRITE((INTER_REGS_BASE | addr), (val));
+
+	if (mvNfcDbgFlag)
+		mvOsPrintf("NFC write 0x%08x = %08x\n", addr, val);
+}
+
+#undef MV_NAND_REG_READ
+#undef MV_NAND_REG_WRITE
+#define MV_NAND_REG_READ(x)		nfc_dbg_read(x)
+#define MV_NAND_REG_WRITE(x, y)		nfc_dbg_write(x, y)
+#endif
+
+/**************/
+/* Prototypes */
+/**************/
+static MV_STATUS mvDfcWait4Complete(MV_U32 statMask, MV_U32 usec);
+static MV_STATUS mvNfcReadIdNative(MV_NFC_CHIP_SEL cs, MV_U16 *id);
+static MV_STATUS mvNfcTimingSet(MV_U32 nand_clock, MV_NFC_FLASH_INFO *flInfo);
+static MV_U32 mvNfcColBits(MV_U32 pg_size);
+static MV_STATUS mvNfcDeviceFeatureSet(MV_NFC_CTRL *nfcCtrl, MV_U8 cmd, MV_U8 addr, MV_U32 data0, MV_U32 data1);
+static MV_STATUS mvNfcDeviceFeatureGet(MV_NFC_CTRL *nfcCtrl, MV_U8 cmd, MV_U8 addr, MV_U32 *data0, MV_U32 *data1);
+static MV_STATUS mvNfcDeviceModeSet(MV_NFC_CTRL *nfcCtrl, MV_NFC_ONFI_MODE mode);
+static MV_STATUS mvNfcReadParamPage(struct parameter_page_t *ppage);
+
+/**************/
+/* Local Data */
+/**************/
+struct parameter_page_t paramPage;
+
+/*******************************************************************************
+* mvNfcInit
+*
+* DESCRIPTION:
+*       Initialize the NAND controller unit, and perform a detection of the
+*	attached NAND device.
+*
+* INPUT:
+*	nfcInfo  - Flash information parameters.
+*
+* OUTPUT:
+*	nfcCtrl  - Nand control and status information to be held by the user
+*		    and passed to all other APIs.
+*
+* RETURN:
+*       MV_OK		- On success,
+*	MV_BAD_PARAM	- The required ECC mode not supported by flash.
+*	MV_NOT_SUPPORTED- The underlying flash device is not supported by HAL.
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*	MV_FAIL		- On failure
+*******************************************************************************/
+MV_STATUS mvNfcInit(MV_NFC_INFO *nfcInfo, MV_NFC_CTRL *nfcCtrl, struct MV_NFC_HAL_DATA *halData)
+{
+	MV_U32 ctrl_reg;
+	MV_STATUS ret;
+	MV_U16 read_id = 0;
+	MV_U32 i;
+	MV_U32 nand_clock;
+	/* Initial register values */
+	ctrl_reg = 0;
+	/*
+	 Reduce NAND clock for supporting slower flashes for initialization
+	 ECC engine clock = (2Ghz / divider)
+	 NFC clock = ECC clock / 2
+	 */
+	nand_clock = halData->mvCtrlNandClkSetFunction(_100MHz); /* Go down to 100MHz */
+	if (nand_clock != _100MHz)
+		DB(mvOsPrintf("%s: Warning: set NFC Clock frequency to %dHz instead of %dHz\n",
+						__func__, nand_clock, _100MHz));
+
+	DB(mvOsPrintf("mvNfcInit: set nand clock to %d\n", nand_clock));
+
+	/* Relax Timing configurations to avoid timing violations after flash reset */
+	MV_NAND_REG_WRITE(NFC_TIMING_0_REG, MV_NDTR0CS0_REG);
+	MV_NAND_REG_WRITE(NFC_TIMING_1_REG, MV_NDTR1CS0_REG);
+
+	/* make sure ECC is disabled at this point - will be enabled only when issuing certain commands */
+	MV_NAND_REG_BIT_RESET(NFC_CONTROL_REG, NFC_CTRL_ECC_EN_MASK);
+	if (nfcInfo->eccMode != MV_NFC_ECC_HAMMING)
+		MV_NAND_REG_BIT_RESET(NFC_ECC_CONTROL_REG, NFC_ECC_BCH_EN_MASK);
+
+	if ((nfcInfo->eccMode == MV_NFC_ECC_BCH_1K) ||
+	    (nfcInfo->eccMode == MV_NFC_ECC_BCH_704B) || (nfcInfo->eccMode == MV_NFC_ECC_BCH_512B))
+		/* Disable spare */
+		ctrl_reg &= ~NFC_CTRL_SPARE_EN_MASK;
+	else
+		/* Enable spare */
+		ctrl_reg |= NFC_CTRL_SPARE_EN_MASK;
+
+	ctrl_reg &= ~NFC_CTRL_ECC_EN_MASK;
+
+	/* Configure flash interface */
+	if (nfcInfo->ifMode == MV_NFC_IF_1X16) {
+		nfcCtrl->flashWidth = 16;
+		nfcCtrl->dfcWidth = 16;
+		ctrl_reg |= (NFC_CTRL_DWIDTH_M_MASK | NFC_CTRL_DWIDTH_C_MASK);
+	} else if (nfcInfo->ifMode == MV_NFC_IF_2X8) {
+		nfcCtrl->flashWidth = 8;
+		nfcCtrl->dfcWidth = 16;
+		ctrl_reg |= NFC_CTRL_DWIDTH_C_MASK;
+	} else {
+		nfcCtrl->flashWidth = 8;
+		nfcCtrl->dfcWidth = 8;
+	}
+
+	/* Configure initial READ-ID byte count */
+	ctrl_reg |= (0x2 << NFC_CTRL_RD_ID_CNT_OFFS);
+
+	/* Configure the Arbiter */
+	ctrl_reg |= NFC_CTRL_ND_ARB_EN_MASK;
+
+	/* Write registers before device detection */
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, ctrl_reg);
+
+#ifdef MTD_NAND_NFC_INIT_RESET
+	/* reset the device */
+	ret = mvNfcReset();
+	if (ret != MV_OK)
+		return ret;
+#endif
+
+	/* Read the device ID */
+	ret = mvNfcReadIdNative(nfcCtrl->currCs, &read_id);
+	if (ret != MV_OK)
+		return ret;
+
+	/* Look for device ID in knwon device table */
+	for (i = 0; i < (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)); i++) {
+		if (flashDeviceInfo[i].id == read_id)
+			break;
+	}
+	if (i == (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return MV_NOT_SUPPORTED;
+	else
+		nfcCtrl->flashIdx = i;
+
+	/* In case of ONFI Mode set needed */
+	if (flashDeviceInfo[i].flags & NFC_FLAGS_ONFI_MODE_3_SET) {
+		ret = mvNfcDeviceModeSet(nfcCtrl, MV_NFC_ONFI_MODE_3);
+		if (ret != MV_OK)
+			return ret;
+		if (MV_OK == mvNfcReadParamPage(&paramPage)) {
+			DB(mvNfcPrintParamPage());
+			switch (paramPage.num_ECC_bits) {
+			case 1:
+				nfcInfo->eccMode = MV_NFC_ECC_HAMMING;
+				break;
+			case 4:
+				nfcInfo->eccMode = MV_NFC_ECC_BCH_2K;
+				break;
+			case 8:
+				nfcInfo->eccMode = MV_NFC_ECC_BCH_1K;
+				break;
+			case 24:
+			case 12:
+				nfcInfo->eccMode = MV_NFC_ECC_BCH_704B;
+				break;
+			case 16:
+				nfcInfo->eccMode = MV_NFC_ECC_BCH_512B;
+				break;
+			default:
+				nfcInfo->eccMode = MV_NFC_ECC_DISABLE;
+				break;
+			}
+		} else
+			mvOsPrintf("mvNfcReadParamPage (EC comand) return error\n");
+	}
+
+	/* Critical Initialization done. Raise NFC clock if needed */
+	if (flashDeviceInfo[i].flags & NFC_CLOCK_UPSCALE_200M) {
+		nand_clock = halData->mvCtrlNandClkSetFunction(_200MHz); /* raise NFC clk to 200MHz */
+		if (nand_clock != _200MHz)
+			DB(mvOsPrintf("%s: Warning: set NFC Clock frequency to %dHz instead of %dHz\n",
+							__func__, nand_clock, _200MHz));
+	}
+
+	DB(mvOsPrintf("mvNfcInit: set nand clock to %d\n", nand_clock));
+
+	/* Configure the command set based on page size */
+	if (flashDeviceInfo[i].pgSz < MV_NFC_2KB_PAGE)
+		nfcCtrl->cmdsetIdx = MV_NFC_FLASH_SP_CMD_SET_IDX;
+	else
+		nfcCtrl->cmdsetIdx = MV_NFC_FLASH_LP_CMD_SET_IDX;
+
+	/* calculate Timing parameters */
+	ret = mvNfcTimingSet(nand_clock, &flashDeviceInfo[i]);
+	if (ret != MV_OK) {
+		DB(mvOsPrintf("mvNfcInit: mvNfcTimingSet failed for clock %d\n", nand_clock));
+		return ret;
+	}
+
+	/* Configure the control register based on the device detected */
+	ctrl_reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+
+	/* Configure DMA */
+	if (nfcInfo->ioMode == MV_NFC_PDMA_ACCESS)
+		ctrl_reg |= NFC_CTRL_DMA_EN_MASK;
+	else
+		ctrl_reg &= ~NFC_CTRL_DMA_EN_MASK;
+
+	/* Configure Page size */
+	ctrl_reg &= ~NFC_CTRL_PAGE_SZ_MASK;
+	switch (flashDeviceInfo[i].pgSz) {
+	case MV_NFC_512B_PAGE:
+		ctrl_reg |= NFC_CTRL_PAGE_SZ_512B;
+		break;
+
+	case MV_NFC_2KB_PAGE:
+	case MV_NFC_4KB_PAGE:
+	case MV_NFC_8KB_PAGE:
+		ctrl_reg |= NFC_CTRL_PAGE_SZ_2KB;
+		break;
+
+	default:
+		return MV_BAD_PARAM;
+	}
+
+	/* Disable sequential read if indicated */
+	if (flashDeviceInfo[i].seqDis)
+		ctrl_reg |= NFC_CTRL_SEQ_DIS_MASK;
+	else
+		ctrl_reg &= ~NFC_CTRL_SEQ_DIS_MASK;
+
+	/* Configure the READ-ID count and row address start based on page size */
+	ctrl_reg &= ~(NFC_CTRL_RD_ID_CNT_MASK | NFC_CTRL_RA_START_MASK);
+	if (flashDeviceInfo[i].pgSz >= MV_NFC_2KB_PAGE) {
+		ctrl_reg |= NFC_CTRL_RD_ID_CNT_LP;
+		ctrl_reg |= NFC_CTRL_RA_START_MASK;
+	} else {
+		ctrl_reg |= NFC_CTRL_RD_ID_CNT_SP;
+	}
+
+	/* Confiugre pages per block */
+	ctrl_reg &= ~NFC_CTRL_PG_PER_BLK_MASK;
+	switch (flashDeviceInfo[i].pgPrBlk) {
+	case 32:
+		ctrl_reg |= NFC_CTRL_PG_PER_BLK_32;
+		break;
+
+	case 64:
+		ctrl_reg |= NFC_CTRL_PG_PER_BLK_64;
+		break;
+
+	case 128:
+		ctrl_reg |= NFC_CTRL_PG_PER_BLK_128;
+		break;
+
+	case 256:
+		ctrl_reg |= NFC_CTRL_PG_PER_BLK_256;
+		break;
+
+	default:
+		return MV_BAD_PARAM;
+	}
+
+	/* Write the updated control register */
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, ctrl_reg);
+
+#ifdef MV_INCLUDE_PDMA
+	/* DMA resource allocation */
+	if (nfcInfo->ioMode == MV_NFC_PDMA_ACCESS) {
+		/* Allocate command buffer */
+		nfcCtrl->cmdBuff.bufVirtPtr =
+		     mvOsIoUncachedMalloc(nfcInfo->osHandle, (NFC_CMD_STRUCT_SIZE * MV_NFC_MAX_DESC_CHAIN),
+					  &nfcCtrl->cmdBuff.bufPhysAddr, &nfcCtrl->cmdBuff.memHandle);
+		if (nfcCtrl->cmdBuff.bufVirtPtr == NULL)
+			return MV_OUT_OF_CPU_MEM;
+		nfcCtrl->cmdBuff.bufSize = (NFC_CMD_STRUCT_SIZE * MV_NFC_MAX_DESC_CHAIN);
+		nfcCtrl->cmdBuff.dataSize = (NFC_CMD_STRUCT_SIZE * MV_NFC_MAX_DESC_CHAIN);
+
+		/* Allocate command DMA descriptors */
+		nfcCtrl->cmdDescBuff.bufVirtPtr =
+		     mvOsIoUncachedMalloc(nfcInfo->osHandle, (MV_PDMA_DESC_SIZE * (MV_NFC_MAX_DESC_CHAIN + 1)),
+					  &nfcCtrl->cmdDescBuff.bufPhysAddr, &nfcCtrl->cmdDescBuff.memHandle);
+		if (nfcCtrl->cmdDescBuff.bufVirtPtr == NULL)
+			return MV_OUT_OF_CPU_MEM;
+		/* verify allignment to 128bits */
+		if ((MV_U32) nfcCtrl->cmdDescBuff.bufVirtPtr & 0xF) {
+			nfcCtrl->cmdDescBuff.bufVirtPtr =
+			    (MV_U8 *) (((MV_U32) nfcCtrl->cmdDescBuff.bufVirtPtr & ~0xF) + MV_PDMA_DESC_SIZE);
+			nfcCtrl->cmdDescBuff.bufPhysAddr =
+			    ((nfcCtrl->cmdDescBuff.bufPhysAddr & ~0xF) + MV_PDMA_DESC_SIZE);
+		}
+		nfcCtrl->cmdDescBuff.bufSize = (MV_PDMA_DESC_SIZE * MV_NFC_MAX_DESC_CHAIN);
+		nfcCtrl->cmdDescBuff.dataSize = (MV_PDMA_DESC_SIZE * MV_NFC_MAX_DESC_CHAIN);
+
+		/* Allocate data DMA descriptors */
+		nfcCtrl->dataDescBuff.bufVirtPtr =
+		     mvOsIoUncachedMalloc(nfcInfo->osHandle, (MV_PDMA_DESC_SIZE * (MV_NFC_MAX_DESC_CHAIN + 1)),
+					  &nfcCtrl->dataDescBuff.bufPhysAddr,
+					  &nfcCtrl->dataDescBuff.memHandle);
+		if (nfcCtrl->dataDescBuff.bufVirtPtr == NULL)
+			return MV_OUT_OF_CPU_MEM;
+		/* verify allignment to 128bits */
+		if ((MV_U32) nfcCtrl->dataDescBuff.bufVirtPtr & 0xF) {
+			nfcCtrl->dataDescBuff.bufVirtPtr =
+			    (MV_U8 *) (((MV_U32) nfcCtrl->dataDescBuff.bufVirtPtr & ~0xF) + MV_PDMA_DESC_SIZE);
+			nfcCtrl->dataDescBuff.bufPhysAddr =
+			    ((nfcCtrl->dataDescBuff.bufPhysAddr & ~0xF) + MV_PDMA_DESC_SIZE);
+		}
+		nfcCtrl->dataDescBuff.bufSize = (MV_PDMA_DESC_SIZE * MV_NFC_MAX_DESC_CHAIN);
+		nfcCtrl->dataDescBuff.dataSize = (MV_PDMA_DESC_SIZE * MV_NFC_MAX_DESC_CHAIN);
+
+		/* Allocate Data DMA channel */
+		if (mvPdmaChanAlloc(MV_PDMA_NAND_DATA, nfcInfo->dataPdmaIntMask, &nfcCtrl->dataChanHndl) != MV_OK)
+			return MV_NO_RESOURCE;
+
+		/* Allocate Command DMA channel */
+		if (mvPdmaChanAlloc(MV_PDMA_NAND_COMMAND, nfcInfo->cmdPdmaIntMask, &nfcCtrl->cmdChanHndl) != MV_OK)
+			return MV_NO_RESOURCE;
+	}
+#endif
+
+	/* Initialize remaining fields in the CTRL structure */
+	nfcCtrl->autoStatusRead = nfcInfo->autoStatusRead;
+	nfcCtrl->readyBypass = nfcInfo->readyBypass;
+	nfcCtrl->ioMode = nfcInfo->ioMode;
+	nfcCtrl->eccMode = nfcInfo->eccMode;
+	nfcCtrl->ifMode = nfcInfo->ifMode;
+	nfcCtrl->currCs = MV_NFC_CS_NONE;
+	nfcCtrl->regsPhysAddr = nfcInfo->regsPhysAddr;
+#ifdef MV_INCLUDE_PDMA
+	nfcCtrl->dataPdmaIntMask = nfcInfo->dataPdmaIntMask;
+	nfcCtrl->cmdPdmaIntMask = nfcInfo->cmdPdmaIntMask;
+#endif
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcSelectChip
+*
+* DESCRIPTION:
+*       Set the currently active chip for next commands.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	chip	 - The chip number to operate on.
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*       MV_OK	- On success,
+*	MV_FAIL	- On failure
+*******************************************************************************/
+MV_STATUS mvNfcSelectChip(MV_NFC_CTRL *nfcCtrl, MV_NFC_CHIP_SEL chip)
+{
+	nfcCtrl->currCs = chip;
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcDataLength
+*
+* DESCRIPTION:
+*       Get the length of data based on the NFC configuration
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	cmd	 - Command to be executed
+*
+* OUTPUT:
+*	data_len - length of data to be transfered
+*
+* RETURN:
+*       MV_OK	- On success,
+*	MV_FAIL	- On failure
+*******************************************************************************/
+MV_STATUS mvNfcDataLength(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *data_len)
+{
+	/* Decide read data size based on page size */
+	if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {	/* Small Page */
+		if (nfcCtrl->ifMode == MV_NFC_IF_2X8) {
+			if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+				*data_len = NFC_RW_SP_G_HMNG_ECC_DATA_LEN;
+			else	/* No ECC */
+				*data_len = NFC_RW_SP_G_NO_ECC_DATA_LEN;
+		} else {
+			if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+				*data_len = NFC_RW_SP_HMNG_ECC_DATA_LEN;
+			else	/* No ECC */
+				*data_len = NFC_RW_SP_NO_ECC_DATA_LEN;
+		}
+	} else {		/* Large Page */
+
+		if (nfcCtrl->ifMode == MV_NFC_IF_2X8) {
+			if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K)
+				*data_len = NFC_RW_LP_G_BCH_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K)
+				*data_len = NFC_RW_LP_BCH1K_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B)
+				*data_len = NFC_RW_LP_BCH704B_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)
+				*data_len = NFC_RW_LP_BCH512B_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+				*data_len = NFC_RW_LP_G_HMNG_ECC_DATA_LEN;
+			else	/* No ECC */
+				*data_len = NFC_RW_LP_G_NO_ECC_DATA_LEN;
+		} else {
+			if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K)
+				*data_len = NFC_RW_LP_BCH_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K)
+				*data_len = NFC_RW_LP_BCH1K_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B)
+				*data_len = NFC_RW_LP_BCH704B_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)
+				*data_len = NFC_RW_LP_BCH512B_ECC_DATA_LEN;
+			else if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+				*data_len = NFC_RW_LP_HMNG_ECC_DATA_LEN;
+			else	/* No ECC */
+				*data_len = NFC_RW_LP_NO_ECC_DATA_LEN;
+		}
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcTransferDataLength
+*
+* DESCRIPTION:
+*       Get the length of data to be transfered based on the command type and
+*	NFC configuration
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	cmd	 - Command to be executed
+*
+* OUTPUT:
+*	data_len - length of data to be transfered
+*
+* RETURN:
+*       MV_OK	- On success,
+*	MV_FAIL	- On failure
+*******************************************************************************/
+MV_STATUS mvNfcTransferDataLength(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *data_len)
+{
+	switch (cmd) {
+	case MV_NFC_CMD_READ_ID:
+		if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS)
+			*data_len = NFC_READ_ID_PDMA_DATA_LEN;
+		else
+			*data_len = NFC_READ_ID_PIO_DATA_LEN;
+		break;
+
+	case MV_NFC_CMD_READ_STATUS:
+		if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS)
+			*data_len = NFC_READ_STATUS_PDMA_DATA_LEN;
+		else
+			*data_len = NFC_READ_STATUS_PIO_DATA_LEN;
+		break;
+
+	case MV_NFC_CMD_READ_MONOLITHIC:	/* Read a single 512B or 2KB page */
+	case MV_NFC_CMD_READ_MULTIPLE:
+	case MV_NFC_CMD_READ_NAKED:
+	case MV_NFC_CMD_READ_LAST_NAKED:
+	case MV_NFC_CMD_READ_DISPATCH:
+	case MV_NFC_CMD_WRITE_MONOLITHIC:	/* Program a single page of 512B or 2KB */
+	case MV_NFC_CMD_WRITE_MULTIPLE:
+	case MV_NFC_CMD_WRITE_NAKED:
+	case MV_NFC_CMD_WRITE_LAST_NAKED:
+	case MV_NFC_CMD_WRITE_DISPATCH:
+	case MV_NFC_CMD_EXIT_CACHE_READ:
+	case MV_NFC_CMD_CACHE_READ_SEQ:
+	case MV_NFC_CMD_CACHE_READ_START:
+		if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS) {
+			/* Decide read data size based on page size */
+			if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {	/* Small Page */
+				*data_len = NFC_RW_SP_PDMA_DATA_LEN;
+			} else {	/* Large Page */
+
+				if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K)
+					*data_len = NFC_RW_LP_BCH_ECC_DATA_LEN;
+				else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K)
+					*data_len = NFC_RW_LP_BCH1K_ECC_DATA_LEN;
+				else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B)
+					*data_len = NFC_RW_LP_BCH704B_ECC_DATA_LEN;
+				else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)
+					*data_len = NFC_RW_LP_BCH512B_ECC_DATA_LEN;
+				else	/* Hamming and No-Ecc */
+					*data_len = NFC_RW_LP_PDMA_DATA_LEN;
+			}
+		} else {	/* PIO mode */
+
+			/* Decide read data size based on page size */
+			if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {	/* Small Page */
+				if (nfcCtrl->ifMode == MV_NFC_IF_2X8) {
+					if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+						*data_len = NFC_RW_SP_G_HMNG_ECC_DATA_LEN;
+					else	/* No ECC */
+						*data_len = NFC_RW_SP_G_NO_ECC_DATA_LEN;
+				} else {
+					if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+						*data_len = NFC_RW_SP_HMNG_ECC_DATA_LEN;
+					else	/* No ECC */
+						*data_len = NFC_RW_SP_NO_ECC_DATA_LEN;
+				}
+			} else {	/* Large Page */
+
+				if (nfcCtrl->ifMode == MV_NFC_IF_2X8) {
+					if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K)
+						*data_len = NFC_RW_LP_G_BCH_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K)
+						*data_len = NFC_RW_LP_BCH1K_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B)
+						*data_len = NFC_RW_LP_BCH704B_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)
+						*data_len = NFC_RW_LP_BCH512B_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+						*data_len = NFC_RW_LP_G_HMNG_ECC_DATA_LEN;
+					else	/* No ECC */
+						*data_len = NFC_RW_LP_G_NO_ECC_DATA_LEN;
+				} else {
+					if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K)
+						*data_len = NFC_RW_LP_BCH_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K)
+						*data_len = NFC_RW_LP_BCH1K_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B)
+						*data_len = NFC_RW_LP_BCH704B_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)
+						*data_len = NFC_RW_LP_BCH512B_ECC_DATA_LEN;
+					else if (nfcCtrl->eccMode == MV_NFC_ECC_HAMMING)
+						*data_len = NFC_RW_LP_HMNG_ECC_DATA_LEN;
+					else	/* No ECC */
+						*data_len = NFC_RW_LP_NO_ECC_DATA_LEN;
+				}
+			}
+		}
+		break;
+
+	case MV_NFC_CMD_ERASE:
+	case MV_NFC_CMD_MULTIPLANE_ERASE:
+	case MV_NFC_CMD_RESET:
+	case MV_NFC_CMD_WRITE_DISPATCH_START:
+	case MV_NFC_CMD_WRITE_DISPATCH_END:
+		return MV_BAD_PARAM;
+
+	default:
+		return MV_BAD_PARAM;
+
+	};
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcBuildCommand
+*
+* DESCRIPTION:
+*	Build the command buffer
+*
+* INPUT:
+*	nfcCtrl	- Nand control structure.
+*	cmd	- Command to be executed
+*	cmdb	- Command buffer cmdb[0:3] to fill
+*
+* OUTPUT:
+*	cmdb	- Command buffer filled
+*
+* RETURN:
+*	None
+*******************************************************************************/
+static MV_STATUS mvNfcBuildCommand(MV_NFC_CTRL *nfcCtrl, MV_NFC_MULTI_CMD *descInfo, MV_U32 *cmdb)
+{
+	cmdb[0] = 0;
+	cmdb[1] = 0;
+	cmdb[2] = 0;
+	cmdb[3] = 0;
+	if (nfcCtrl->autoStatusRead)
+		cmdb[0] |= NFC_CB0_AUTO_RS_MASK;
+
+	if ((nfcCtrl->currCs == MV_NFC_CS_1) || (nfcCtrl->currCs == MV_NFC_CS_3))
+		cmdb[0] |= NFC_CB0_CSEL_MASK;
+
+	if ((nfcCtrl->currCs == MV_NFC_CS_2) || (nfcCtrl->currCs == MV_NFC_CS_3))
+		cmdb[2] |= NFC_CB2_CS_2_3_SELECT_MASK;
+
+	if (nfcCtrl->readyBypass)
+		cmdb[0] |= NFC_CB0_RDY_BYP_MASK;
+
+	switch (descInfo->cmd) {
+	case MV_NFC_CMD_READ_ID:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].readId & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		cmdb[0] |= ((NFC_READ_ID_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+		cmdb[0] |= NFC_CB0_CMD_TYPE_READ_ID;
+		break;
+
+	case MV_NFC_CMD_READ_STATUS:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].readStatus & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		cmdb[0] |= NFC_CB0_CMD_TYPE_STATUS;
+		break;
+
+	case MV_NFC_CMD_ERASE:
+	case MV_NFC_CMD_MULTIPLANE_ERASE:
+
+		if (descInfo->cmd == MV_NFC_CMD_ERASE)
+			cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].erase & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		if (descInfo->cmd == MV_NFC_CMD_MULTIPLANE_ERASE)
+			cmdb[0] |=
+			    (flashCmdSet[nfcCtrl->cmdsetIdx].multiplaneErase & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+
+		cmdb[0] |= ((NFC_ERASE_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+		cmdb[0] |= NFC_CB0_DBC_MASK;
+		cmdb[0] |= NFC_CB0_CMD_TYPE_ERASE;
+		cmdb[1] |= (descInfo->pageAddr & NFC_BLOCK_ADDR_BITS);
+		break;
+
+	case MV_NFC_CMD_RESET:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].reset & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		cmdb[0] |= NFC_CB0_CMD_TYPE_RESET;
+		break;
+
+	case MV_NFC_CMD_CACHE_READ_SEQ:
+		cmdb[0] = (flashCmdSet[nfcCtrl->cmdsetIdx].cacheReadSeq & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		break;
+
+	case MV_NFC_CMD_CACHE_READ_RAND:
+		cmdb[0] = (flashCmdSet[nfcCtrl->cmdsetIdx].cacheReadRand & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {
+			cmdb[1] |= ((descInfo->pageAddr << NFC_SP_PG_OFFS) & NFC_SP_PG_MASK);
+			if (descInfo->pageAddr & ~NFC_SP_PG_MASK)
+				cmdb[0] |=
+				    ((NFC_SP_BIG_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			else
+				cmdb[0] |= ((NFC_SP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+		} else {
+			cmdb[0] |= ((NFC_LP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[0] |= NFC_CB0_DBC_MASK;
+			cmdb[1] |= ((descInfo->pageAddr << NFC_LP_PG_OFFS) & NFC_LP_PG_MASK);
+			cmdb[2] |= (descInfo->pageAddr >> (32 - NFC_LP_PG_OFFS));
+		}
+		cmdb[0] |= NFC_CB0_CMD_TYPE_READ;
+		break;
+
+	case MV_NFC_CMD_EXIT_CACHE_READ:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].exitCacheRead & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		break;
+
+	case MV_NFC_CMD_CACHE_READ_START:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].read1 & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {
+			cmdb[1] |= ((descInfo->pageAddr << NFC_SP_PG_OFFS) & NFC_SP_PG_MASK);
+			if (descInfo->pageAddr & ~NFC_SP_PG_MASK)
+				cmdb[0] |=
+				    ((NFC_SP_BIG_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			else
+				cmdb[0] |= ((NFC_SP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+		} else {
+			cmdb[0] |= ((NFC_LP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[0] |= NFC_CB0_DBC_MASK;
+			cmdb[1] |= ((descInfo->pageAddr << NFC_LP_PG_OFFS) & NFC_LP_PG_MASK);
+			cmdb[2] |= (descInfo->pageAddr >> (32 - NFC_LP_PG_OFFS));
+		}
+		cmdb[0] |= NFC_CB0_CMD_TYPE_READ;
+		cmdb[0] |= NFC_CB0_LEN_OVRD_MASK;
+		break;
+
+	case MV_NFC_CMD_READ_MONOLITHIC:	/* Read a single 512B or 2KB page */
+	case MV_NFC_CMD_READ_MULTIPLE:
+	case MV_NFC_CMD_READ_NAKED:
+	case MV_NFC_CMD_READ_LAST_NAKED:
+	case MV_NFC_CMD_READ_DISPATCH:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].read1 & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {
+			cmdb[1] |= ((descInfo->pageAddr << NFC_SP_PG_OFFS) & NFC_SP_PG_MASK);
+			if (descInfo->pageAddr & ~NFC_SP_PG_MASK)
+				cmdb[0] |=
+				    ((NFC_SP_BIG_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			else
+				cmdb[0] |= ((NFC_SP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+		} else {
+			cmdb[0] |= ((NFC_LP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[0] |= NFC_CB0_DBC_MASK;
+			cmdb[1] |= ((descInfo->pageAddr << NFC_LP_PG_OFFS) & NFC_LP_PG_MASK);
+			cmdb[2] |= (descInfo->pageAddr >> (32 - NFC_LP_PG_OFFS));
+		}
+		cmdb[0] |= NFC_CB0_CMD_TYPE_READ;
+
+		if (descInfo->length) {
+			cmdb[0] |= NFC_CB0_LEN_OVRD_MASK;
+			cmdb[3] |= (descInfo->length & 0xFFFF);
+		}
+
+		/* Check for extended command syntax */
+		switch (descInfo->cmd) {
+		case MV_NFC_CMD_READ_MULTIPLE:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_MULTIPLE;
+			break;
+		case MV_NFC_CMD_READ_NAKED:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_NAKED;
+			break;
+		case MV_NFC_CMD_READ_LAST_NAKED:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_LAST_NAKED;
+			break;
+		case MV_NFC_CMD_READ_DISPATCH:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_DISPATCH;
+			break;
+		default:
+			break;
+		};
+		break;
+
+	case MV_NFC_CMD_WRITE_MONOLITHIC:	/* Program a single page of 512B or 2KB */
+	case MV_NFC_CMD_WRITE_MULTIPLE:
+		/*case MV_NFC_CMD_WRITE_NAKED: */
+	case MV_NFC_CMD_WRITE_LAST_NAKED:
+	case MV_NFC_CMD_WRITE_DISPATCH:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].program & (NFC_CB0_CMD1_MASK | NFC_CB0_CMD2_MASK));
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {
+			if (descInfo->pageAddr & ~NFC_SP_PG_MASK)
+				cmdb[0] |=
+				    ((NFC_SP_BIG_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			else
+				cmdb[0] |= ((NFC_SP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[1] |= ((descInfo->pageAddr << NFC_SP_PG_OFFS) & NFC_SP_PG_MASK);
+		} else {
+			cmdb[0] |= ((NFC_LP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[1] |= ((descInfo->pageAddr << NFC_LP_PG_OFFS) & NFC_LP_PG_MASK);
+			cmdb[2] |= (descInfo->pageAddr >> (32 - NFC_LP_PG_OFFS));
+		}
+		cmdb[0] |= NFC_CB0_DBC_MASK;
+		cmdb[0] |= NFC_CB0_CMD_TYPE_WRITE;
+
+		/* Check for extended syntax */
+		switch (descInfo->cmd) {
+		case MV_NFC_CMD_WRITE_MULTIPLE:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_MULTIPLE;
+			break;
+		case MV_NFC_CMD_WRITE_NAKED:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_NAKED;
+			break;
+		case MV_NFC_CMD_WRITE_LAST_NAKED:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_LAST_NAKED;
+			break;
+		case MV_NFC_CMD_WRITE_DISPATCH:
+			cmdb[0] |= NFC_CB0_CMD_XTYPE_DISPATCH;
+			break;
+		default:
+			break;
+		};
+		break;
+
+	case MV_NFC_CMD_WRITE_DISPATCH_START:
+		cmdb[0] |= (flashCmdSet[nfcCtrl->cmdsetIdx].program & NFC_CB0_CMD1_MASK);
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {
+			if (descInfo->pageAddr & ~NFC_SP_PG_MASK)
+				cmdb[0] |=
+				    ((NFC_SP_BIG_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			else
+				cmdb[0] |= ((NFC_SP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[1] |= ((descInfo->pageAddr << NFC_SP_PG_OFFS) & NFC_SP_PG_MASK);
+		} else {
+			cmdb[0] |= ((NFC_LP_READ_ADDR_LEN << NFC_CB0_ADDR_CYC_OFFS) & NFC_CB0_ADDR_CYC_MASK);
+			cmdb[1] |= ((descInfo->pageAddr << NFC_LP_PG_OFFS) & NFC_LP_PG_MASK);
+			cmdb[2] |= (descInfo->pageAddr >> (32 - NFC_LP_PG_OFFS));
+		}
+		cmdb[0] |= NFC_CB0_CMD_TYPE_WRITE;
+		cmdb[0] |= NFC_CB0_CMD_XTYPE_DISPATCH;
+		break;
+
+	case MV_NFC_CMD_WRITE_NAKED:
+		cmdb[0] |= NFC_CB0_CMD_TYPE_WRITE;
+		cmdb[0] |= NFC_CB0_CMD_XTYPE_NAKED;
+		if (descInfo->length) {
+			cmdb[0] |= NFC_CB0_LEN_OVRD_MASK;
+			cmdb[3] |= (descInfo->length & 0xFFFF);
+		}
+		break;
+
+	case MV_NFC_CMD_WRITE_DISPATCH_END:
+		cmdb[0] |= ((flashCmdSet[nfcCtrl->cmdsetIdx].program >> 8) & NFC_CB0_CMD1_MASK);
+		cmdb[0] |= NFC_CB0_CMD_TYPE_WRITE;
+		cmdb[0] |= NFC_CB0_CMD_XTYPE_DISPATCH;
+		break;
+
+	default:
+		return MV_BAD_PARAM;
+	}
+
+	/* update page count */
+	cmdb[2] |= (((descInfo->pageCount - 1) << NFC_PG_CNT_OFFS) & NFC_PG_CNT_MASK);
+
+	return MV_OK;
+}
+
+#ifdef MV_INCLUDE_PDMA
+/*******************************************************************************
+* mvNfcCommandMultiple
+*
+* DESCRIPTION:
+*       Issue a command to the NAND controller.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	cmd	 - The command to issue.
+*	pageAddr - The page number to perform the command on (If the command
+*		   requires a flash offset), block address in erase.
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*       MV_OK	   - On success,
+*	MV_TIMEOUT - Timeout while waiting for command request.
+*	MV_FAIL	   - On failure
+*******************************************************************************/
+MV_STATUS mvNfcCommandMultiple(MV_NFC_CTRL *nfcCtrl, MV_NFC_MULTI_CMD *descInfo, MV_U32 descCnt)
+{
+	MV_U32 reg, i, buff;
+	MV_U32 errCode = MV_OK;
+	MV_U32 cmdb[4];
+	MV_NFC_CMD *cmdVirtPtr = (MV_NFC_CMD *) nfcCtrl->cmdBuff.bufVirtPtr;
+	MV_NFC_CMD *cmdPhysPtr = (MV_NFC_CMD *) nfcCtrl->cmdBuff.bufPhysAddr;
+
+	MV_PDMA_DESC *cmdDescVirtPtr = (MV_PDMA_DESC *) nfcCtrl->cmdDescBuff.bufVirtPtr;
+	MV_PDMA_DESC *cmdDescPhysPtr = (MV_PDMA_DESC *) nfcCtrl->cmdDescBuff.bufPhysAddr;
+	MV_PDMA_DESC *dataDescVirtPtr = (MV_PDMA_DESC *) nfcCtrl->dataDescBuff.bufVirtPtr;
+	MV_PDMA_DESC *dataDescPhysPtr = (MV_PDMA_DESC *) nfcCtrl->dataDescBuff.bufPhysAddr;
+	MV_U32 xferLen;
+	MV_U32 dataDescCount = 0;
+	MV_U32 nPage;
+	MV_U32 timeout = 10000;
+	MV_STATUS ret;
+
+	/* Check MAX descriptor count */
+	if (descCnt > MV_NFC_MAX_DESC_CHAIN)
+		return MV_BAD_PARAM;
+
+	/* If not in PDMA fail operation */
+	if (nfcCtrl->ioMode != MV_NFC_PDMA_ACCESS)
+		return MV_BAD_PARAM;
+
+	/* Check that a chip was selected */
+	if (nfcCtrl->currCs == MV_NFC_CS_NONE)
+		return MV_FAIL;
+
+	/* Start the whole command chain through setting the ND_RUN */
+	/* Setting ND_RUN bit to start the new transaction - verify that controller in idle state */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+
+	for (i = 0; i < descCnt; i++) {
+		if ((descInfo[i].cmd != MV_NFC_CMD_ERASE) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_MULTIPLANE_ERASE) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_RESET) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_EXIT_CACHE_READ) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_CACHE_READ_START) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_READ_DISPATCH) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_WRITE_DISPATCH_START) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_WRITE_DISPATCH_END)) {
+			/* Get transfer data length for this command type */
+			mvNfcTransferDataLength(nfcCtrl, descInfo[i].cmd, &xferLen);
+			if (errCode != MV_OK)
+				return errCode;
+		}
+
+		if (nfcCtrl->eccMode != MV_NFC_ECC_DISABLE) {
+			if ((descInfo[i].cmd == MV_NFC_CMD_READ_ID) || (descInfo[i].cmd == MV_NFC_CMD_READ_STATUS) ||
+			    (descInfo[i].cmd == MV_NFC_CMD_ERASE) || (descInfo[i].cmd == MV_NFC_CMD_RESET)) {
+				/* disable ECC for these commands */
+				MV_NAND_REG_BIT_RESET(NFC_CONTROL_REG, NFC_CTRL_ECC_EN_MASK);
+				if (nfcCtrl->eccMode != MV_NFC_ECC_HAMMING)
+					MV_NAND_REG_BIT_RESET(NFC_ECC_CONTROL_REG, NFC_ECC_BCH_EN_MASK);
+			} else {
+				/* enable ECC for all other commands */
+				MV_NAND_REG_BIT_SET(NFC_CONTROL_REG, NFC_CTRL_ECC_EN_MASK);
+				if (nfcCtrl->eccMode != MV_NFC_ECC_HAMMING)
+					MV_NAND_REG_BIT_SET(NFC_ECC_CONTROL_REG, NFC_ECC_BCH_EN_MASK);
+			}
+		}
+
+		/* Build the command buffer */
+		ret = mvNfcBuildCommand(nfcCtrl, &descInfo[i], cmdb);
+		if (ret != MV_OK)
+			return ret;
+
+		/* Fill Command data */
+		cmdVirtPtr[i].cmdb0 = cmdb[0];
+		cmdVirtPtr[i].cmdb1 = cmdb[1];
+		cmdVirtPtr[i].cmdb2 = cmdb[2];
+		cmdVirtPtr[i].cmdb3 = cmdb[3];
+
+		/* Hook to the previous descriptor if exists */
+		if (i != 0) {
+			cmdDescVirtPtr[i - 1].physDescPtr = (MV_U32) &cmdDescPhysPtr[i];
+			cmdVirtPtr[i - 1].cmdb0 |= NFC_CB0_NEXT_CMD_MASK;
+		}
+
+		/* Fill Command Descriptor */
+		cmdDescVirtPtr[i].physDescPtr = 0x1;
+		cmdDescVirtPtr[i].physSrcAddr = (MV_U32) &cmdPhysPtr[i];
+		cmdDescVirtPtr[i].physDestAddr = nfcCtrl->regsPhysAddr + NFC_CMD_BUFF_ADDR;
+		cmdDescVirtPtr[i].commandValue = mvPdmaCommandRegCalc(&nfcCtrl->cmdChanHndl, MV_PDMA_MEM_TO_PERIPH,
+								      NFC_CMD_BUFF_SIZE(cmdb[0]));
+
+		/* Check if data dma need to be operated for this command */
+		if ((descInfo[i].cmd != MV_NFC_CMD_ERASE) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_MULTIPLANE_ERASE) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_RESET) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_EXIT_CACHE_READ) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_CACHE_READ_START) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_READ_DISPATCH) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_WRITE_DISPATCH_START) &&
+		    (descInfo[i].cmd != MV_NFC_CMD_WRITE_DISPATCH_END)) {
+			for (nPage = 0; nPage < descInfo[i].pageCount; nPage++) {
+				if (dataDescCount != 0)
+					dataDescVirtPtr[dataDescCount - 1].physDescPtr =
+					    (MV_U32) &dataDescPhysPtr[dataDescCount];
+				/* Fill Data Descriptor */
+				if ((descInfo[i].cmd == MV_NFC_CMD_READ_MONOLITHIC) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_MULTIPLE) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_CACHE_READ_SEQ) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_EXIT_CACHE_READ) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_CACHE_READ_RAND) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_NAKED) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_LAST_NAKED) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_DISPATCH) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_ID) ||
+				    (descInfo[i].cmd == MV_NFC_CMD_READ_STATUS)) {
+					if (descInfo[i].numSgBuffs == 1) {
+						/* A single buffer, use physAddr */
+						dataDescVirtPtr[dataDescCount].physSrcAddr =
+						    nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR;
+						dataDescVirtPtr[dataDescCount].physDestAddr =
+						    descInfo[i].physAddr + nPage * xferLen;
+						dataDescVirtPtr[dataDescCount].commandValue =
+						    mvPdmaCommandRegCalc(&nfcCtrl->dataChanHndl, MV_PDMA_PERIPH_TO_MEM,
+									 (descInfo[i].length ? descInfo[i].
+									  length : xferLen));
+					} else {
+						/* Scatter-gather operation, use sgBuffAdd */
+						for (buff = 0; buff < descInfo[i].numSgBuffs; buff++) {
+							if (buff != 0)
+								dataDescVirtPtr[dataDescCount - 1].physDescPtr =
+								    (MV_U32) &dataDescPhysPtr[dataDescCount];
+							dataDescVirtPtr[dataDescCount].physSrcAddr =
+							    nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR;
+							dataDescVirtPtr[dataDescCount].physDestAddr =
+							    descInfo[i].sgBuffAddr[buff];
+							dataDescVirtPtr[dataDescCount].commandValue =
+							    mvPdmaCommandRegCalc(&nfcCtrl->dataChanHndl,
+										 MV_PDMA_PERIPH_TO_MEM,
+										 descInfo[i].sgBuffSize[buff]);
+							dataDescCount++;
+						}
+						dataDescCount--;
+					}
+				} else {	/* Write */
+
+					if (descInfo[i].numSgBuffs == 1) {
+						/* A single buffer, use physAddr */
+						dataDescVirtPtr[dataDescCount].physSrcAddr =
+						    descInfo[i].physAddr + nPage * xferLen;
+						dataDescVirtPtr[dataDescCount].physDestAddr =
+						    nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR;
+						dataDescVirtPtr[dataDescCount].commandValue =
+						    mvPdmaCommandRegCalc(&nfcCtrl->dataChanHndl, MV_PDMA_MEM_TO_PERIPH,
+									 (descInfo[i].length ? descInfo[i].
+									  length : xferLen));
+					} else {
+						/* Scatter-gather operation, use sgBuffAdd */
+						for (buff = 0; buff < descInfo[i].numSgBuffs; buff++) {
+							if (buff != 0)
+								dataDescVirtPtr[dataDescCount - 1].physDescPtr =
+								    (MV_U32) &dataDescPhysPtr[dataDescCount];
+							dataDescVirtPtr[dataDescCount].physSrcAddr =
+							    descInfo[i].sgBuffAddr[buff];
+							dataDescVirtPtr[dataDescCount].physDestAddr =
+							    nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR;
+							dataDescVirtPtr[dataDescCount].commandValue =
+							    mvPdmaCommandRegCalc(&nfcCtrl->dataChanHndl,
+										 MV_PDMA_MEM_TO_PERIPH,
+										 descInfo[i].sgBuffSize[buff]);
+							dataDescCount++;
+						}
+						dataDescCount--;
+					}
+				}
+
+				dataDescVirtPtr[dataDescCount].physDescPtr = 0x1;
+				dataDescCount++;
+
+				if (dataDescCount > MV_NFC_MAX_DESC_CHAIN)
+					return MV_OUT_OF_RANGE;
+			}
+		}
+	}
+
+#if 0
+	DBGPRINT((DBGLVL "\ncmdDescPhysPtr  = %08x, Count = %d\n", (MV_U32) cmdDescPhysPtr, descCnt));
+	for (nPage = 0; nPage < descCnt; nPage++) {
+		DBGPRINT((DBGLVL "    Command[%d] physDescPtr  = %08x\n", nPage, cmdDescVirtPtr[nPage].physDescPtr));
+		DBGPRINT((DBGLVL "    Command[%d] physSrcAddr  = %08x\n", nPage, cmdDescVirtPtr[nPage].physSrcAddr));
+		DBGPRINT((DBGLVL "    Command[%d] physDestAddr = %08x\n", nPage, cmdDescVirtPtr[nPage].physDestAddr));
+		DBGPRINT((DBGLVL "    Command[%d] commandValue = %08x\n", nPage, cmdDescVirtPtr[nPage].commandValue));
+		DBGPRINT((DBGLVL "      NDCB0 = %08x, NDCB1 = %08x, NDCB2 = %08x, NDCB3 = %08x\n",
+			  cmdVirtPtr[nPage].cmdb0, cmdVirtPtr[nPage].cmdb1, cmdVirtPtr[nPage].cmdb2,
+			  cmdVirtPtr[nPage].cmdb3));
+	}
+
+	DBGPRINT((DBGLVL "dataDescPhysPtr  = %08x, Count = %d\n", (MV_U32) dataDescPhysPtr, dataDescCount));
+	for (nPage = 0; nPage < dataDescCount; nPage++) {
+		DBGPRINT((DBGLVL "    Data[%d] physDescPtr  = %08x\n", nPage, dataDescVirtPtr[nPage].physDescPtr));
+		DBGPRINT((DBGLVL "    Data[%d] physSrcAddr  = %08x\n", nPage, dataDescVirtPtr[nPage].physSrcAddr));
+		DBGPRINT((DBGLVL "    Data[%d] physDestAddr = %08x\n", nPage, dataDescVirtPtr[nPage].physDestAddr));
+		DBGPRINT((DBGLVL "    Data[%d] commandValue = %08x\n", nPage, dataDescVirtPtr[nPage].commandValue));
+	}
+#endif
+	if (dataDescCount) {
+		/* enable interrupts in the last data descriptor. */
+		mvPdmaCommandIntrEnable(&nfcCtrl->dataChanHndl, &(dataDescVirtPtr[dataDescCount - 1].commandValue));
+		/* operate the data DMA */
+		if (mvPdmaChanTransfer(&nfcCtrl->dataChanHndl, MV_PDMA_PERIPH_TO_MEM,
+				       0, 0, 0, (MV_U32) dataDescPhysPtr) != MV_OK)
+			return MV_HW_ERROR;
+	}
+
+	/* operate the command DMA */
+	if (mvPdmaChanTransfer(&nfcCtrl->cmdChanHndl, MV_PDMA_MEM_TO_PERIPH, 0, 0, 0, (MV_U32) cmdDescPhysPtr) != MV_OK)
+		return MV_HW_ERROR;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Start the whole command chain through setting the ND_RUN */
+	/* Setting ND_RUN bit to start the new transaction - verify that controller in idle state */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	return MV_OK;
+}
+#endif
+
+/*******************************************************************************
+* mvNfcCommandPio
+*
+* DESCRIPTION:
+*       Issue a command to the NAND controller.
+*
+* INPUT:
+*	nfcCtrl   - Nand control structure.
+*	cmd_descr - The command to issue, page address, page number, data length
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*       MV_OK	   - On success,
+*	MV_TIMEOUT - Timeout while waiting for command request.
+*	MV_FAIL	   - On failure
+*******************************************************************************/
+MV_STATUS mvNfcCommandPio(MV_NFC_CTRL *nfcCtrl, MV_NFC_MULTI_CMD *cmd_desc, MV_BOOL next)
+{
+	MV_U32 reg;
+	MV_U32 errCode = MV_OK;
+	MV_U32 cmdb_pio[4];
+	MV_U32 *cmdb;
+	MV_U32 timeout = 10000;
+	MV_STATUS ret;
+
+	/* Check that a chip was selected */
+	if (nfcCtrl->currCs == MV_NFC_CS_NONE)
+		return MV_FAIL;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction - verify that controller in idle state */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+
+	if (timeout == 0)
+		return MV_BAD_STATE;
+
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/* Build 12 byte Command */
+	if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS)
+		cmdb = (MV_U32 *) nfcCtrl->cmdBuff.bufVirtPtr;
+	else			/* PIO mode */
+		cmdb = cmdb_pio;
+
+	if (nfcCtrl->eccMode != MV_NFC_ECC_DISABLE) {
+		switch (cmd_desc->cmd) {
+		case MV_NFC_CMD_READ_MONOLITHIC:
+		case MV_NFC_CMD_READ_MULTIPLE:
+		case MV_NFC_CMD_READ_NAKED:
+		case MV_NFC_CMD_READ_LAST_NAKED:
+		case MV_NFC_CMD_WRITE_MONOLITHIC:
+		case MV_NFC_CMD_WRITE_MULTIPLE:
+		case MV_NFC_CMD_WRITE_NAKED:
+		case MV_NFC_CMD_WRITE_LAST_NAKED:
+			if (nfcCtrl->eccMode != MV_NFC_ECC_DISABLE) {
+				MV_NAND_REG_BIT_SET(NFC_CONTROL_REG, NFC_CTRL_ECC_EN_MASK);
+				if (nfcCtrl->eccMode != MV_NFC_ECC_HAMMING)
+					MV_NAND_REG_BIT_SET(NFC_ECC_CONTROL_REG, NFC_ECC_BCH_EN_MASK);
+			}
+			break;
+
+		default:
+			/* disable ECC for non-data commands */
+			MV_NAND_REG_BIT_RESET(NFC_CONTROL_REG, NFC_CTRL_ECC_EN_MASK);
+			MV_NAND_REG_BIT_RESET(NFC_ECC_CONTROL_REG, NFC_ECC_BCH_EN_MASK);
+			break;
+		};
+	}
+
+	/* Build the command buffer */
+	ret = mvNfcBuildCommand(nfcCtrl, cmd_desc, cmdb);
+	if (ret != MV_OK)
+		return ret;
+
+	/* If next command, link to it */
+	if (next)
+		cmdb[0] |= NFC_CB0_NEXT_CMD_MASK;
+
+	/* issue command */
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb[0]);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb[1]);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb[2]);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb[3]);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcStatusGet
+*
+* DESCRIPTION:
+*       Retrieve the NAND controller status to monitor the NAND access sequence.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	cmd	 - The last issued command to get the status for.
+*
+* OUTPUT:
+*	value	- Relevant only if one of the MV_NFC_STATUS_BBD OR
+*		  MV_NFC_STATUS_COR_ERROR errors is turned on.
+*		  For MV_NFC_STATUS_COR_ERROR: Holds the errors count.
+*		  For MV_NFC_STATUS_BBD: Holds the bad block address.
+*		  If error value is not desired, pass NULL as input.
+*
+* RETURN:
+*	A bitmask of the MV_NFC_STATUS_XXX status bits.
+*******************************************************************************/
+MV_U32 mvNfcStatusGet(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *value)
+{
+	MV_U32 reg, ret;
+
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	if (reg == 0)
+		return 0;
+
+	if (value)
+		*value = ((reg & NFC_SR_ERR_CNT_MASK) >> NFC_SR_ERR_CNT_OFFS);
+
+	if ((nfcCtrl->currCs == MV_NFC_CS_0) || (nfcCtrl->currCs == MV_NFC_CS_2)) {
+		/* Clear out all non related interrupts */
+		reg &= (NFC_SR_CS0_BBD_MASK | NFC_SR_CS0_CMDD_MASK | NFC_SR_CS0_PAGED_MASK |
+			NFC_SR_RDY0_MASK | NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK |
+			NFC_SR_WRDREQ_MASK | NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK);
+
+		ret = (reg & (NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK |
+			      NFC_SR_WRDREQ_MASK | NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK));
+
+		if (reg & NFC_SR_CS0_BBD_MASK)
+			ret |= MV_NFC_STATUS_BBD;
+		if (reg & NFC_SR_CS0_CMDD_MASK)
+			ret |= MV_NFC_STATUS_CMDD;
+		if (reg & NFC_SR_CS0_PAGED_MASK)
+			ret |= MV_NFC_STATUS_PAGED;
+		if (reg & NFC_SR_RDY0_MASK)
+			ret |= MV_NFC_STATUS_RDY;
+	} else if ((nfcCtrl->currCs == MV_NFC_CS_1) || (nfcCtrl->currCs == MV_NFC_CS_3)) {
+		reg &= (NFC_SR_CS1_BBD_MASK | NFC_SR_CS1_CMDD_MASK | NFC_SR_CS1_PAGED_MASK |
+			NFC_SR_RDY1_MASK | NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK |
+			NFC_SR_WRDREQ_MASK | NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK);
+
+		ret = (reg & (NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK |
+			      NFC_SR_WRDREQ_MASK | NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK));
+
+		if (reg & NFC_SR_CS1_BBD_MASK)
+			ret |= MV_NFC_STATUS_BBD;
+		if (reg & NFC_SR_CS1_CMDD_MASK)
+			ret |= MV_NFC_STATUS_CMDD;
+		if (reg & NFC_SR_CS1_PAGED_MASK)
+			ret |= MV_NFC_STATUS_PAGED;
+		if (reg & NFC_SR_RDY1_MASK)
+			ret |= MV_NFC_STATUS_RDY;
+	} else {
+		reg &= (NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK |
+			NFC_SR_WRDREQ_MASK | NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK);
+
+		ret = reg;
+	}
+
+	/* Clear out all reported events */
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	return ret;
+}
+
+/*******************************************************************************
+* mvNfcIntrSet
+*
+* DESCRIPTION:
+*       Enable / Disable a given set of the Nand controller interrupts.
+*
+* INPUT:
+*	inatMask - A bitmask of the interrupts to enable / disable.
+*	enable	 - MV_TRUE: Unmask the interrupts
+*		   MV_FALSE: Mask the interrupts.
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*       MV_OK	- On success,
+*	MV_FAIL	- On failure
+*******************************************************************************/
+MV_STATUS mvNfcIntrSet(MV_NFC_CTRL *nfcCtrl, MV_U32 intMask, MV_BOOL enable)
+{
+	MV_U32 reg;
+	MV_U32 msk = (intMask & (NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK | NFC_SR_WRDREQ_MASK |
+				 NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK));
+
+	if ((nfcCtrl->currCs == MV_NFC_CS_0) || (nfcCtrl->currCs == MV_NFC_CS_2)) {
+		if (intMask & MV_NFC_STATUS_BBD)
+			msk |= NFC_SR_CS0_BBD_MASK;
+		if (intMask & MV_NFC_STATUS_CMDD)
+			msk |= NFC_SR_CS0_CMDD_MASK;
+		if (intMask & MV_NFC_STATUS_PAGED)
+			msk |= NFC_SR_CS0_PAGED_MASK;
+		if (intMask & MV_NFC_STATUS_RDY)
+			msk |= NFC_SR_RDY0_MASK;
+	} else if ((nfcCtrl->currCs == MV_NFC_CS_1) || (nfcCtrl->currCs == MV_NFC_CS_3)) {
+		if (intMask & MV_NFC_STATUS_BBD)
+			msk |= NFC_SR_CS1_BBD_MASK;
+		if (intMask & MV_NFC_STATUS_CMDD)
+			msk |= NFC_SR_CS1_CMDD_MASK;
+		if (intMask & MV_NFC_STATUS_PAGED)
+			msk |= NFC_SR_CS1_PAGED_MASK;
+		if (intMask & MV_NFC_STATUS_RDY)
+			msk |= NFC_SR_RDY0_MASK;
+	}
+
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	if (enable)
+		reg &= ~msk;
+	else
+		reg |= msk;
+
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcIntrToStatusConvert
+*
+* DESCRIPTION:
+*       Convert logical interrupt bit-mask to status register bit-mask.
+*
+* INPUT:
+*	nfcCtrl	- NAND control structure.
+*	intMask - Logical interrupts bitmask.
+*
+* OUTPUT:
+*	bitmask of interrupt status register bits.
+*
+* RETURN:
+*	None.
+*******************************************************************************/
+MV_U32 mvNfcIntrToStatusConvert(MV_NFC_CTRL *nfcCtrl, MV_U32 intMask)
+{
+	MV_U32 msk = (intMask & (NFC_SR_WRCMDREQ_MASK | NFC_SR_RDDREQ_MASK | NFC_SR_WRDREQ_MASK |
+				 NFC_SR_CORERR_MASK | NFC_SR_UNCERR_MASK));
+
+	if ((nfcCtrl->currCs == MV_NFC_CS_0) || (nfcCtrl->currCs == MV_NFC_CS_2)) {
+		if (intMask & MV_NFC_STATUS_BBD)
+			msk |= NFC_SR_CS0_BBD_MASK;
+		if (intMask & MV_NFC_STATUS_CMDD)
+			msk |= NFC_SR_CS0_CMDD_MASK;
+		if (intMask & MV_NFC_STATUS_PAGED)
+			msk |= NFC_SR_CS0_PAGED_MASK;
+		if (intMask & MV_NFC_STATUS_RDY)
+			msk |= NFC_SR_RDY0_MASK;
+	} else if ((nfcCtrl->currCs == MV_NFC_CS_1) || (nfcCtrl->currCs == MV_NFC_CS_3)) {
+		if (intMask & MV_NFC_STATUS_BBD)
+			msk |= NFC_SR_CS1_BBD_MASK;
+		if (intMask & MV_NFC_STATUS_CMDD)
+			msk |= NFC_SR_CS1_CMDD_MASK;
+		if (intMask & MV_NFC_STATUS_PAGED)
+			msk |= NFC_SR_CS1_PAGED_MASK;
+		if (intMask & MV_NFC_STATUS_RDY)
+			msk |= NFC_SR_RDY0_MASK;
+	}
+
+	return msk;
+
+}
+
+/*******************************************************************************
+* mvNfcReadWrite
+*
+* DESCRIPTION:
+*       Perform a read / write operation of a previously issued command.
+*	When working in PIO mode, this function will perform the read / write
+*	operation from / to the supplied buffer.
+*	when working in PDMA mode, this function will trigger the PDMA to start
+*	the data transfer.
+*	In all cases, the user is responsible to make sure that the data
+*	transfer operation was done successfully by polling the command done bit.
+*	Before calling this function, the Data-Read/Write request interrupts
+*	should be disabled (the one relevant to the command being processed).
+*
+* INPUT:
+*	nfcCtrl     - Nand control structure.
+*	cmd	    - The previously issued command.
+*	virtBufAddr - [Relevant only when working in PIO mode]
+*		      The virtual address of the buffer to read to / write from.
+*	physBufAddr - [Relevant only when working in PDMA mode]
+*		      The physical address of the buffer to read to / write from.
+*		      The buffer should be cache coherent for PDMA access.
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*       MV_OK	- On success,
+*	MV_FAIL	- On failure
+*******************************************************************************/
+MV_STATUS mvNfcReadWrite(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *virtBufAddr, MV_U32 physBuffAddr)
+{
+	MV_U32 data_len = 0;
+	MV_U32 i;
+	MV_STATUS errCode;
+
+	errCode = mvNfcTransferDataLength(nfcCtrl, cmd, &data_len);
+	if (errCode != MV_OK)
+		return errCode;
+
+	switch (cmd) {
+	case MV_NFC_CMD_READ_ID:
+	case MV_NFC_CMD_READ_STATUS:
+	case MV_NFC_CMD_READ_MONOLITHIC:	/* Read a single 512B or 2KB page */
+	case MV_NFC_CMD_READ_MULTIPLE:
+	case MV_NFC_CMD_READ_NAKED:
+	case MV_NFC_CMD_READ_LAST_NAKED:
+	case MV_NFC_CMD_READ_DISPATCH:
+		/* Issue command based on IO mode */
+		if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS) {
+#ifdef MV_INCLUDE_PDMA
+			/* operate the DMA */
+			if (mvPdmaChanTransfer(&nfcCtrl->dataChanHndl, MV_PDMA_PERIPH_TO_MEM,
+					       nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR,
+					       physBuffAddr, data_len, 0) != MV_OK)
+				return MV_HW_ERROR;
+#else
+			return MV_NOT_SUPPORTED;
+#endif
+		} else {	/* PIO mode */
+
+			for (i = 0; i < data_len; i += 4) {
+				*virtBufAddr = MV_LE32_TO_CPU(MV_NAND_REG_READ(NFC_DATA_BUFF_REG));
+				virtBufAddr++;
+			}
+		}
+		break;
+
+	case MV_NFC_CMD_WRITE_MONOLITHIC:	/* Program a single page of 512B or 2KB */
+	case MV_NFC_CMD_WRITE_MULTIPLE:
+	case MV_NFC_CMD_WRITE_NAKED:
+	case MV_NFC_CMD_WRITE_LAST_NAKED:
+	case MV_NFC_CMD_WRITE_DISPATCH:
+		/* Issue command based on IO mode */
+		if (nfcCtrl->ioMode == MV_NFC_PDMA_ACCESS) {
+#ifdef MV_INCLUDE_PDMA
+			/* operate the DMA */
+			if (mvPdmaChanTransfer(&nfcCtrl->dataChanHndl, MV_PDMA_MEM_TO_PERIPH,
+					       physBuffAddr, nfcCtrl->regsPhysAddr + NFC_DATA_BUFF_ADDR,
+					       data_len, 0) != MV_OK)
+				return MV_HW_ERROR;
+#else
+			return MV_NOT_SUPPORTED;
+#endif
+		} else {	/* PIO mode */
+
+			for (i = 0; i < data_len; i += 4) {
+				MV_NAND_REG_WRITE(NFC_DATA_BUFF_REG, MV_CPU_TO_LE32(*virtBufAddr));
+				virtBufAddr++;
+			}
+		}
+		break;
+
+	default:
+		return MV_BAD_PARAM;
+	};
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcReadWritePio
+*
+* DESCRIPTION:
+*       Perform PIO read / write operation to the specified buffer.
+*
+* INPUT:
+*	nfcCtrl     - Nand control structure.
+*	buff        - The virtual address of the buffer to read to / write from.
+*	data_len    - Byte count to transfer
+*	mode        - Read / Write/ None
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	None.
+*******************************************************************************/
+MV_VOID mvNfcReadWritePio(MV_NFC_CTRL *nfcCtrl, MV_U32 *buff, MV_U32 data_len, MV_NFC_PIO_RW_MODE mode)
+{
+	MV_U32 i;
+
+	switch (mode) {
+	case MV_NFC_PIO_READ:
+		for (i = 0; i < data_len; i += 4) {
+			*buff = MV_LE32_TO_CPU(MV_NAND_REG_READ(NFC_DATA_BUFF_REG));
+			buff++;
+			/* For BCH ECC check if RDDREQ bit is set every 32 bytes */
+			if (((nfcCtrl->eccMode == MV_NFC_ECC_BCH_2K) ||
+			     (nfcCtrl->eccMode == MV_NFC_ECC_BCH_1K) ||
+			     (nfcCtrl->eccMode == MV_NFC_ECC_BCH_704B) ||
+			     (nfcCtrl->eccMode == MV_NFC_ECC_BCH_512B)) &&
+			     ((i & 0x1f) == 0) && (i > 0)) {
+				if (mvDfcWait4Complete(NFC_SR_RDDREQ_MASK, 10) != MV_OK)
+					break;
+			}
+		}
+		break;
+
+	case MV_NFC_PIO_WRITE:	/* Program a single page of 512B or 2KB */
+		for (i = 0; i < data_len; i += 4) {
+			MV_NAND_REG_WRITE(NFC_DATA_BUFF_REG, MV_CPU_TO_LE32(*buff));
+			buff++;
+		}
+		break;
+
+	default:
+		/* nothing to do */
+		break;
+	};
+}
+
+/*******************************************************************************
+* mvNfcAddress2RowConvert
+*
+* DESCRIPTION:
+*       Convert an absolute flash address to row index.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	address	 - The absolute flash address.
+*
+* OUTPUT:
+*	row	 - The row number corresponding to the given address.
+*	colOffset- The column offset within the row.
+*
+* RETURN:
+*	None
+*******************************************************************************/
+MV_VOID mvNfcAddress2RowConvert(MV_NFC_CTRL *nfcCtrl, MV_U32 address, MV_U32 *row, MV_U32 *colOffset)
+{
+
+	if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz < MV_NFC_2KB_PAGE) {	/* Small Page */
+		*colOffset = (address & 0xFF);
+		*row = (address >> 9);
+	} else {		/* Large Page */
+
+		*colOffset = (address & (flashDeviceInfo[nfcCtrl->flashIdx].pgSz - 1));
+
+		/* Calculate the page bits */
+		*row = (address >> mvNfcColBits(flashDeviceInfo[nfcCtrl->flashIdx].pgSz));
+	}
+}
+
+/*******************************************************************************
+* mvNfcAddress2BlockConvert
+*
+* DESCRIPTION:
+*       Convert an absolute flash address to erasable block address
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	address	 - The absolute flash address.
+*
+* OUTPUT:
+*	blk - block address
+*
+* RETURN:
+*	None
+*******************************************************************************/
+MV_VOID mvNfcAddress2BlockConvert(MV_NFC_CTRL *nfcCtrl, MV_U32 address, MV_U32 *blk)
+{
+	*blk = (address / (flashDeviceInfo[nfcCtrl->flashIdx].pgSz * flashDeviceInfo[nfcCtrl->flashIdx].pgPrBlk));
+}
+
+/*******************************************************************************
+* mvNfcAddress2BlockConvert
+*
+* DESCRIPTION:
+*       Convert an absolute flash address to erasable block address
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	address	 - The absolute flash address.
+*
+* OUTPUT:
+*	blk - block address
+*
+* RETURN:
+*	None
+*******************************************************************************/
+MV_8 *mvNfcFlashModelGet(MV_NFC_CTRL *nfcCtrl)
+{
+	static MV_8 *unk_dev = "Unknown Flash Device";
+
+	if (nfcCtrl->flashIdx >= (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return unk_dev;
+
+	return flashDeviceInfo[nfcCtrl->flashIdx].model;
+}
+
+/*******************************************************************************
+* mvNfcFlashPageSizeGet
+*
+* DESCRIPTION:
+*       Retrieve the logical page size of a given flash.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*
+* OUTPUT:
+*	size - Flash page size in bytes.
+*	totalSize - Page size including spare area.
+*		    (Pass NULL if not needed).
+*
+* RETURN:
+*	MV_NOT_FOUND - Bad flash index.
+*******************************************************************************/
+MV_STATUS mvNfcFlashPageSizeGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *size, MV_U32 *totalSize)
+{
+	if (nfcCtrl->flashIdx >= (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return MV_NOT_FOUND;
+	if (size == NULL)
+		return MV_BAD_PTR;
+
+	if (nfcCtrl->ifMode == MV_NFC_IF_2X8)
+		*size = flashDeviceInfo[nfcCtrl->flashIdx].pgSz << 1;
+	else
+		*size = flashDeviceInfo[nfcCtrl->flashIdx].pgSz;
+
+	if (totalSize) {
+		mvNfcTransferDataLength(nfcCtrl, MV_NFC_CMD_READ_MONOLITHIC, totalSize);
+		if (nfcCtrl->ifMode == MV_NFC_IF_2X8)
+			*totalSize = (*totalSize) << 1;
+		if (flashDeviceInfo[nfcCtrl->flashIdx].pgSz > MV_NFC_2KB_PAGE)
+			*totalSize = (*totalSize) << 1;
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcFlashBlockSizeGet
+*
+* DESCRIPTION:
+*       Retrieve the logical block size of a given flash.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*
+* OUTPUT:
+*	size - Flash size in bytes.
+*
+* RETURN:
+*	MV_NOT_FOUND - Bad flash index.
+*******************************************************************************/
+MV_STATUS mvNfcFlashBlockSizeGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *size)
+{
+	if (nfcCtrl->flashIdx >= (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return MV_NOT_FOUND;
+	if (size == NULL)
+		return MV_BAD_PTR;
+
+	if (nfcCtrl->ifMode == MV_NFC_IF_2X8)
+		*size = ((flashDeviceInfo[nfcCtrl->flashIdx].pgSz << 1) * flashDeviceInfo[nfcCtrl->flashIdx].pgPrBlk);
+	else
+		*size = (flashDeviceInfo[nfcCtrl->flashIdx].pgSz * flashDeviceInfo[nfcCtrl->flashIdx].pgPrBlk);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcFlashBlockNumGet
+*
+* DESCRIPTION:
+*       Retrieve the number of logical blocks of a given flash.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*
+* OUTPUT:
+*	numBlocks - Flash number of blocks.
+*
+* RETURN:
+*	MV_NOT_FOUND - Bad flash index.
+*******************************************************************************/
+MV_STATUS mvNfcFlashBlockNumGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *numBlocks)
+{
+	if (nfcCtrl->flashIdx >= (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return MV_NOT_FOUND;
+	if (numBlocks == NULL)
+		return MV_BAD_PTR;
+
+	*numBlocks = flashDeviceInfo[nfcCtrl->flashIdx].blkNum;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcFlashIdGet
+*
+* DESCRIPTION:
+*       Retrieve the flash device ID.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*
+* OUTPUT:
+*	flashId - Flash ID.
+*
+* RETURN:
+*	MV_NOT_FOUND - Bad flash index.
+*******************************************************************************/
+MV_STATUS mvNfcFlashIdGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *flashId)
+{
+	if (nfcCtrl->flashIdx >= (sizeof(flashDeviceInfo) / sizeof(MV_NFC_FLASH_INFO)))
+		return MV_NOT_FOUND;
+
+	if (flashId == NULL)
+		return MV_BAD_PTR;
+
+	*flashId = flashDeviceInfo[nfcCtrl->flashIdx].id;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcUnitStateStore - Store the NFC Unit state.
+*
+* DESCRIPTION:
+*       This function stores the NFC unit registers before the unit is suspended.
+*	The stored registers are placed into the input buffer which will be used for
+*	the restore operation.
+*
+* INPUT:
+*       regsData	- Buffer to store the unit state registers (Must
+*			  include at least 64 entries)
+*	len		- Number of entries in regsData input buffer.
+*
+* OUTPUT:
+*       regsData	- Unit state registers. The registers are stored in
+*			  pairs of (reg, value).
+*       len		- Number of entries in regsData buffer (Must be even).
+*
+* RETURS:
+*       MV_ERROR on failure.
+*       MV_OK on success.
+*
+*******************************************************************************/
+MV_STATUS mvNfcUnitStateStore(MV_U32 *stateData, MV_U32 *len)
+{
+	MV_U32 i;
+
+	if ((stateData == NULL) || (len == NULL))
+		return MV_BAD_PARAM;
+
+	i = 0;
+
+	stateData[i++] = NFC_CONTROL_REG;
+	stateData[i++] = MV_NAND_REG_READ(NFC_CONTROL_REG);
+
+	stateData[i++] = NFC_TIMING_0_REG;
+	stateData[i++] = MV_NAND_REG_READ(NFC_TIMING_0_REG);
+
+	stateData[i++] = NFC_TIMING_1_REG;
+	stateData[i++] = MV_NAND_REG_READ(NFC_TIMING_1_REG);
+
+	stateData[i++] = NFC_ECC_CONTROL_REG;
+	stateData[i++] = MV_NAND_REG_READ(NFC_ECC_CONTROL_REG);
+	*len = i;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvDfcWait4Complete
+*
+* DESCRIPTION:
+*       Wait for event or process to complete
+*
+* INPUT:
+*	statMask: bit to wait from in status register NDSR
+*	usec: Max uSec to wait for event
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*******************************************************************************/
+static MV_STATUS mvDfcWait4Complete(MV_U32 statMask, MV_U32 usec)
+{
+	MV_U32 i, sts;
+
+	for (i = 0; i < usec; i++) {
+		sts = (MV_NAND_REG_READ(NFC_STATUS_REG) & statMask);
+		if (sts) {
+			MV_NAND_REG_WRITE(NFC_STATUS_REG, sts);
+			return MV_OK;
+		}
+		mvOsUDelay(1);
+	}
+
+	return MV_TIMEOUT;
+}
+
+/*******************************************************************************
+* mvNfcDeviceFeatureSet
+*
+* DESCRIPTION:
+*       Set a NAND device feature according to user's request.
+*
+* INPUT:
+*	nfcCtrl	- NFC control structure.
+*	cmd	- Command to be sent to NAND device.
+*	addr	- Address of the special feature.
+*	data0	- First 4 bytes of data to be written.
+*	data1	- Bytes 4-7 of data.
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*******************************************************************************/
+static MV_STATUS mvNfcDeviceFeatureSet(MV_NFC_CTRL *nfcCtrl, MV_U8 cmd, MV_U8 addr, MV_U32 data0, MV_U32 data1)
+{
+	MV_U32 reg;
+	MV_U32 errCode = MV_OK;
+	MV_U32 timeout = 10000;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction */
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		goto Error_1;
+
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Send Naked Command Dispatch Command*/
+	reg = cmd;
+	reg |= (0x1 << NFC_CB0_ADDR_CYC_OFFS);
+	reg |= NFC_CB0_CMD_XTYPE_MULTIPLE;
+	reg |= NFC_CB0_CMD_TYPE_WRITE;
+	reg |= NFC_CB0_LEN_OVRD_MASK;
+
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, reg);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, addr);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x8);
+
+	/* Wait for Data READ request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRDREQ_MASK, 10);
+	if (errCode != MV_OK)
+		return errCode;
+
+	mvOsUDelay(100);
+
+	MV_NAND_REG_WRITE(NFC_DATA_BUFF_REG, data0);
+	MV_NAND_REG_WRITE(NFC_DATA_BUFF_REG, data1);
+
+	/* Wait for Data READ request */
+	errCode = mvDfcWait4Complete(NFC_SR_RDY0_MASK, 10);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/* Wait for ND_RUN bit to get cleared. */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+
+Error_1:
+	return errCode;
+}
+
+/*******************************************************************************
+* mvNfcDeviceFeatureGet
+*
+* DESCRIPTION:
+*       Get a NAND device feature according to user's request.
+*
+* INPUT:
+*	nfcCtrl	- NFC control structure.
+*	cmd	- Command to be sent to NAND device.
+*	addr	- Address of the special feature.
+*
+* OUTPUT:
+*	data0	- First 4 bytes of the data.
+*	data1	- Bytes 4-7 of data.
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*******************************************************************************/
+static MV_STATUS mvNfcDeviceFeatureGet(MV_NFC_CTRL *nfcCtrl, MV_U8 cmd, MV_U8 addr, MV_U32 *data0, MV_U32 *data1)
+{
+	MV_U32 reg;
+	MV_U32 errCode = MV_OK;
+	MV_U32 timeout = 10000;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction */
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		goto Error_2;
+
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Send Read Command */
+	reg = cmd;
+	reg |= (0x1 << NFC_CB0_ADDR_CYC_OFFS);
+	reg |= NFC_CB0_CMD_XTYPE_MULTIPLE;
+	reg |= NFC_CB0_CMD_TYPE_READ;
+	reg |= NFC_CB0_LEN_OVRD_MASK;
+
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, reg);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, addr);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x8);
+
+	/* Wait for READY */
+	errCode = mvDfcWait4Complete(NFC_SR_RDY0_MASK, 10);
+	if (errCode != MV_OK)
+		return errCode;
+
+	udelay(500);
+	/* Send Last-Naked Read Command */
+	reg = 0x0;
+	reg |= NFC_CB0_CMD_XTYPE_LAST_NAKED;
+	reg |= NFC_CB0_CMD_TYPE_READ;
+	reg |= NFC_CB0_LEN_OVRD_MASK;
+
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, reg);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x8);
+
+	/* Wait for Data READ request */
+	errCode = mvDfcWait4Complete(NFC_SR_RDDREQ_MASK, 100);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/*  Read the data + read 4 bogus bytes */
+	*data0 = MV_NAND_REG_READ(NFC_DATA_BUFF_REG);
+	*data1 = MV_NAND_REG_READ(NFC_DATA_BUFF_REG);
+
+	/* Wait for ND_RUN bit to get cleared. */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+Error_2:
+	return errCode;
+}
+
+/*******************************************************************************
+* mvNfcDeviceModeSet
+*
+* DESCRIPTION:
+*       Change flash working mode according to the flags
+*	field.
+*
+* INPUT:
+*	nfcCtrl	- NFC control structure.
+*	flInfo  - flash info structure
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_BAD_VALUE	- Wrong mode
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*******************************************************************************/
+static MV_STATUS mvNfcDeviceModeSet(MV_NFC_CTRL *nfcCtrl, MV_NFC_ONFI_MODE mode)
+{
+	MV_STATUS	ret;
+	MV_U32		d0 = 0, d1 = 0;
+
+	if (mode == MV_NFC_ONFI_MODE_3) {
+		/* Switch to faster mode */
+		ret = mvNfcDeviceFeatureSet(nfcCtrl, 0xEF, 0x01, 0x03, 0);
+		if (ret != MV_OK)
+			return ret;
+
+		/* Verify mode setting */
+		mvNfcDeviceFeatureGet(nfcCtrl, 0xEE, 0x01, &d0, &d1);
+		if (d0 != 3)
+			return MV_BAD_VALUE;
+	} else
+		return MV_FAIL;
+
+	return MV_OK;
+}
+
+
+MV_STATUS mvNfcReset(void)
+{
+	MV_U32 reg;
+	MV_U32 errCode = MV_OK;
+	MV_U32 timeout = 10000;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction */
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		goto Error_3;
+
+	/* Send Command */
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x00A000FF);	/* DFC_NDCB0_RESET */
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+
+	/* Wait for Command completion */
+	errCode = mvDfcWait4Complete(NFC_SR_RDY0_MASK, 1000);
+	if (errCode != MV_OK)
+		goto Error_3;
+
+	/* Wait for ND_RUN bit to get cleared. */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+
+Error_3:
+	return errCode;
+}
+
+/*******************************************************************************
+* mvNfcReadIdNative
+*
+* DESCRIPTION:
+*       Read the flash Manufacturer and device ID in PIO mode.
+*
+* INPUT:
+*	None.
+*
+* OUTPUT:
+*	id: Manufacturer and Device Id detected (valid only if return is MV_OK).
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*	MV_FAIL		- On failure
+*******************************************************************************/
+static MV_STATUS mvNfcReadIdNative(MV_NFC_CHIP_SEL cs, MV_U16 *id)
+{
+	MV_U32 reg, cmdb0 = 0, cmdb2 = 0;
+	MV_U32 errCode = MV_OK;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction */
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/* Send Command */
+	reg = NFC_NATIVE_READ_ID_CMD;
+	reg |= (0x1 << NFC_CB0_ADDR_CYC_OFFS);
+	reg |= NFC_CB0_CMD_TYPE_READ_ID;
+	cmdb0 = reg;
+	if ((cs == MV_NFC_CS_1) || (cs == MV_NFC_CS_3))
+		cmdb0 |= NFC_CB0_CSEL_MASK;
+
+	if ((cs == MV_NFC_CS_2) || (cs == MV_NFC_CS_3))
+		cmdb2 |= NFC_CB2_CS_2_3_SELECT_MASK;
+
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0x0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, cmdb2);
+
+	/* Wait for Data READ request */
+	mvDfcWait4Complete(NFC_SR_RDDREQ_MASK, 10);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/*  Read the read ID bytes. + read 4 bogus bytes */
+	*id = (MV_U16) (MV_NAND_REG_READ(NFC_DATA_BUFF_REG) & 0xFFFF);
+	reg = MV_NAND_REG_READ(NFC_DATA_BUFF_REG);	/* dummy read to complete 8 bytes */
+
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	if (reg & NFC_CTRL_ND_RUN_MASK) {
+		MV_NAND_REG_WRITE(NFC_CONTROL_REG, (reg & ~NFC_CTRL_ND_RUN_MASK));
+		return MV_BAD_STATE;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNfcTimingSet
+ *
+ * DESCRIPTION:
+ *	Set all flash timing parameters for optimized operation
+ *
+ * INPUT:
+ *	nand_clock - nand clock frequency,
+ *	flInfo - timing information
+ *
+ * OUTPUT:
+ *	None.
+ *
+ * RETURN:
+ *	MV_OK		-On success,
+ *	MV_FAIL		-On failure
+ *******************************************************************************/
+static MV_STATUS mvNfcTimingSet(MV_U32 nand_clock, MV_NFC_FLASH_INFO *flInfo)
+{
+	MV_U32 reg, i;
+	MV_U32 clk2ns;
+	MV_U32 trc, trp, trh, twc, twp, twh;
+	MV_U32 tadl_nfc, tch_nfc, tcs_nfc, twh_nfc, twp_nfc, trh_nfc, trp_nfc;
+	MV_U32 tr_nfc, trhw_nfc, twhr_nfc, tar_nfc;
+	MV_U32 tr_pre_nfc = 0;
+/*	MV_U32 ret = MV_OK; */
+
+	clk2ns = DIV_ROUND_UP(_1GHz, nand_clock);
+
+	/* Calculate legal read timing */
+	trc = ns_clk(flInfo->tRC, clk2ns);
+	trp = ns_clk(flInfo->tRP, clk2ns);
+	trh = ns_clk(flInfo->tRH, clk2ns);
+	if (trc > (trp + trh))
+		trh = (trc - trp);
+
+	/* Calculate legal write timing */
+	twc = ns_clk(flInfo->tWC, clk2ns);
+	twp = ns_clk(flInfo->tWP, clk2ns);
+	twh = ns_clk(flInfo->tWH, clk2ns);
+	if (twc > (twp + twh))
+		twh = (twc - twp);
+
+	/* Calculate the timing configurations for register0 */
+	tadl_nfc = (ns_clk(flInfo->tADL, clk2ns) - maxx(ns_clk(flInfo->tCH, clk2ns), twh));
+	tch_nfc = (ns_clk(flInfo->tCH, clk2ns) - 1);
+	tcs_nfc = (ns_clk(flInfo->tCS, clk2ns) - twp - 1);
+	twh_nfc = (twh - 1);
+	twp_nfc = (twp - 1);
+	trh_nfc = (trh - 1);
+	trp_nfc = (trp - 1);
+
+	if (check_limit(tadl_nfc, 5) != tadl_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		tadl_nfc = check_limit(tadl_nfc, 5);
+	}
+
+	if (check_limit(tch_nfc, 3) != tch_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		tch_nfc = check_limit(tch_nfc, 3);
+	}
+
+	if (check_limit(tcs_nfc, 3) != tcs_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		tcs_nfc = check_limit(tcs_nfc, 3);
+	}
+
+	if (check_limit(twh_nfc, 3) != twh_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		twh_nfc = check_limit(twh_nfc, 3);
+	}
+
+	if (check_limit(twp_nfc, 3) != twp_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		twp_nfc = check_limit(twp_nfc, 3);
+	}
+
+	if (check_limit(trh_nfc, 3) != trh_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		trh_nfc = check_limit(trh_nfc, 3);
+	}
+
+	if (check_limit(trp_nfc, 4) != trp_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		trp_nfc = check_limit(trp_nfc, 4);
+	}
+
+	reg =  ((tadl_nfc << NFC_TMNG0_TADL_OFFS) |
+		(0x1 << NFC_TMNG0_SEL_CNTR_OFFS) |
+		(0x4 << NFC_TMNG0_RD_CNT_DEL_OFFS) |
+		(tch_nfc << NFC_TMNG0_TCH_OFFS) |
+		(tcs_nfc << NFC_TMNG0_TCS_OFFS) |
+		(twh_nfc << NFC_TMNG0_TWH_OFFS) |
+		(twp_nfc << NFC_TMNG0_TWP_OFFS) |
+		(0x0 << NFC_TMNG0_SEL_NRE_EDGE_OFFS) |
+		((trp_nfc >> 3) << NFC_TMNG0_ETRP_OFFS) |
+		(trh_nfc << NFC_TMNG0_TRH_OFFS) |
+		((trp_nfc & 0x7) << NFC_TMNG0_TRP_OFFS));
+	MV_NAND_REG_WRITE(NFC_TIMING_0_REG, reg);
+
+	/* Calculate the timing configurations for register1 */
+	tr_nfc = (ns_clk(flInfo->tR, clk2ns) - tch_nfc - 3);
+	trhw_nfc = (ns_clk(flInfo->tRHW, clk2ns) % 16) ? ((ns_clk(flInfo->tRHW,
+					clk2ns) / 16) + 1) : (ns_clk(flInfo->tRHW, clk2ns) / 16);
+
+	/*
+	 * For simplicity Assuming that tar == twhr
+	 * loop over all 16 possible values of tWHR_NFC and find smallest possible value (if possible!!!)
+	 */
+	twhr_nfc = 17; /* big number */
+	for (i = 0; i < 16; i++) {
+		if ((maxx(twh_nfc, tch_nfc) + maxx(i, maxx(0, i - maxx(twh_nfc, tch_nfc))) + 2) >=
+		     ns_clk(flInfo->tWHR, clk2ns))
+			if (twhr_nfc > i)
+				twhr_nfc = i;
+	}
+
+	if (twhr_nfc >= 16) {
+		twhr_nfc = 15; /* worst case - best we can do */
+		/* ret = MV_OUT_OF_RANGE; */
+	}
+
+	tar_nfc = twhr_nfc; /* our initial assumption */
+
+	if (tr_nfc > 0xFFFF) {
+		tr_pre_nfc = 1;
+		tr_nfc = ((tr_nfc % 16) ? ((tr_nfc/16) + 1) : (tr_nfc/16));
+	}
+
+#ifndef MTD_NAND_NFC_NEGLECT_RNB
+	/* If RnBx signal is used, then override tR to a very small and legal value */
+	tr_nfc = 0xFF;
+	tr_pre_nfc = 0;
+#endif
+
+	if (check_limit(tr_nfc, 16) != tr_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		tr_nfc = check_limit(tr_nfc, 16);
+	}
+
+	if (check_limit(trhw_nfc, 2) != trhw_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		trhw_nfc = check_limit(trhw_nfc, 2);
+	}
+
+	if (check_limit(twhr_nfc, 4) != twhr_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		twhr_nfc = check_limit(twhr_nfc, 4);
+	}
+
+	if (check_limit(tar_nfc, 4) != tar_nfc) {
+		/* ret = MV_OUT_OF_RANGE; */
+		tar_nfc = check_limit(tar_nfc, 5);
+	}
+
+	reg = ((tr_nfc << NFC_TMNG1_TR_OFFS) |
+		(tr_pre_nfc << NFC_TMNG1_PRESCALE_OFFS) |
+		(trhw_nfc << NFC_TMNG1_TRHW_OFFS) |
+		(twhr_nfc << NFC_TMNG1_TWHR_OFFS) |
+		(tar_nfc << NFC_TMNG1_TAR_OFFS));
+#ifndef MTD_NAND_NFC_NEGLECT_RNB
+	reg |= (0x1 << NFC_TMNG1_WAIT_MODE_OFFS);
+#endif
+	MV_NAND_REG_WRITE(NFC_TIMING_1_REG, reg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNfcColBits
+*
+* DESCRIPTION:
+*       Calculate number of bits representing column part of the address
+*
+* INPUT:
+	pg_size: page size
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	Number of bits representing a column
+*******************************************************************************/
+static MV_U32 mvNfcColBits(MV_U32 pg_size)
+{
+	MV_U32 shift = 0;
+	while (pg_size) {
+		++shift;
+		pg_size >>= 1;
+	};
+
+	return shift - 1;
+}
+
+/*******************************************************************************
+* mvNfcEccModeSet
+*
+* DESCRIPTION:
+*       Set the ECC mode at runtime to BCH, Hamming or No Ecc.
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*	MV_NFC_ECC_MODE eccMode: ECC type (BCH, Hamming or No Ecc)
+*
+* OUTPUT:
+*	None.
+*
+* RETURN:
+*	previous ECC mode.
+*******************************************************************************/
+MV_NFC_ECC_MODE mvNfcEccModeSet(MV_NFC_CTRL *nfcCtrl, MV_NFC_ECC_MODE eccMode)
+{
+	MV_NFC_ECC_MODE prevEccMode;
+
+	prevEccMode = nfcCtrl->eccMode;
+	nfcCtrl->eccMode = eccMode;
+	return prevEccMode;
+}
+
+/*******************************************************************************
+* mvNfcBadBlockPageNumber
+*
+* DESCRIPTION:
+*       Get the page number within the block holding the bad block indication
+*
+* INPUT:
+*	nfcCtrl  - Nand control structure.
+*
+* OUTPUT:
+*	None
+*
+* RETURN:
+*       page number having the bad block indicator
+*******************************************************************************/
+MV_U32 mvNfcBadBlockPageNumber(MV_NFC_CTRL *nfcCtrl)
+{
+	return flashDeviceInfo[nfcCtrl->flashIdx].bb_page;
+}
+
+
+/*******************************************************************************/
+#ifdef MV_CPU_LE
+#define build_uint16(byte1, byte0)	((MV_U16) ((byte0 << 8) | byte1));
+#define build_uint32(byte3, byte2, byte1, byte0)	\
+		((MV_U32) ((byte0 << 24) | (byte1 << 16) | (byte2 << 8) | byte3));
+#endif
+/*******************************************************************************/
+#ifdef MV_CPU_BE
+#define build_uint16(byte1, byte0) ((MV_U16) ((byte1 << 8) | byte0));
+#define build_uint32(byte3, byte2, byte1, byte0)	\
+		((MV_U32) ((byte3 << 24) | (byte2 << 16) | (byte1 << 8) | byte0));
+#endif
+/*******************************************************************************
+* mvNfcReadParamPage
+*
+* DESCRIPTION:
+*	The READ PARAMETER PAGE (ECh) command is used to read the ONFI parameter
+*	page programmed into the target. This command is accepted by the target
+*	only when all die (LUNs) on the target are idle
+*
+* INPUT:
+*	nfcCtrl	- NFC control structure.
+*	buf	- buff (size 256).
+*
+* OUTPUT:
+*	data0	- First 4 bytes of the data.
+*	data1	- Bytes 4-7 of data.
+*
+* RETURN:
+*	MV_OK		- On success,
+*	MV_TIMEOUT	- Error accessing the underlying flahs device.
+*******************************************************************************/
+static MV_STATUS mvNfcReadParamPage(struct parameter_page_t *ppage)
+{
+	MV_U32 reg, i;
+	MV_U8 rbuf[NUM_OF_PPAGE_BYTES];
+	MV_U32 *pBuf = (MV_U32 *)rbuf;
+
+	MV_U32 errCode = MV_OK;
+	MV_U32 timeout = 10000;
+
+	/* Clear all old events on the status register */
+	reg = MV_NAND_REG_READ(NFC_STATUS_REG);
+	MV_NAND_REG_WRITE(NFC_STATUS_REG, reg);
+
+	/* Setting ND_RUN bit to start the new transaction */
+	reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+	reg |= NFC_CTRL_ND_RUN_MASK;
+	MV_NAND_REG_WRITE(NFC_CONTROL_REG, reg);
+
+	/* Wait for Command WRITE request */
+	errCode = mvDfcWait4Complete(NFC_SR_WRCMDREQ_MASK, 1);
+	if (errCode != MV_OK)
+		return errCode;
+
+	/* Send Read Command */
+	reg = 0xEC;
+	reg |= (0x1 << NFC_CB0_ADDR_CYC_OFFS);
+	reg |= NFC_CB0_CMD_XTYPE_MULTIPLE;
+	reg |= NFC_CB0_CMD_TYPE_READ;
+	reg |= NFC_CB0_LEN_OVRD_MASK;
+
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, reg);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 0);
+	MV_NAND_REG_WRITE(NFC_COMMAND_BUFF_0_REG, 128);
+
+	/* Wait for READY */
+	errCode = mvDfcWait4Complete(NFC_SR_RDY0_MASK, 100);
+	if (errCode != MV_OK)
+		return errCode;
+	mvOsUDelay(100);
+
+	/*  Read the data 129 bytes */
+	for (i = 0; i < (NUM_OF_PPAGE_BYTES / 4); i++)
+		*pBuf++ = MV_NAND_REG_READ(NFC_DATA_BUFF_REG);
+
+	/* Wait for ND_RUN bit to get cleared. */
+	while (timeout > 0) {
+		reg = MV_NAND_REG_READ(NFC_CONTROL_REG);
+		if (!(reg & NFC_CTRL_ND_RUN_MASK))
+			break;
+		timeout--;
+	}
+	if (timeout == 0)
+		return MV_BAD_STATE;
+    /*
+     * Fill the parameter page data structure in the right way
+     */
+
+    /* Parameter page signature (ONFI) */
+	mvOsMemset(ppage, 0, sizeof(struct parameter_page_t));
+	mvOsMemcpy(ppage->signature, rbuf, 4);
+
+	/* check if the buffer contains a valid ONFI parameter page */
+	if (strcmp(ppage->signature, "ONFI"))
+		return MV_BAD_PARAM;
+
+	ppage->rev_num = build_uint16(rbuf[4], rbuf[5]);         /* Revision number */
+	ppage->feature = build_uint16(rbuf[6], rbuf[7]);         /* Features supported */
+	ppage->command = build_uint16(rbuf[8], rbuf[9]);         /* Optional commands supported */
+	mvOsMemcpy(ppage->manufacturer, &rbuf[32], 13);         /* Device manufacturer */
+	mvOsMemcpy(ppage->model, &rbuf[44], 21);                /* Device part number */
+	ppage->jedec_id = rbuf[64];                             /* Manufacturer ID (Micron = 2Ch) */
+	ppage->date_code = build_uint16(rbuf[65], rbuf[66]);     /* Date code */
+
+	/* Number of data bytes per page */
+	ppage->data_bytes_per_page = build_uint32(rbuf[80], rbuf[81], rbuf[82], rbuf[83]);
+
+	/* Number of spare bytes per page */
+	ppage->spare_bytes_per_page = build_uint16(rbuf[84], rbuf[85]);
+
+	/* Number of data bytes per partial page */
+	ppage->data_bytes_per_partial_page = build_uint32(rbuf[86], rbuf[87], rbuf[88], rbuf[89]);
+
+	/* Number of spare bytes per partial page */
+	ppage->spare_bytes_per_partial_page = build_uint16(rbuf[90], rbuf[91]);
+
+	/* Number of pages per block */
+	ppage->pages_per_block = build_uint32(rbuf[92], rbuf[93], rbuf[94], rbuf[95]);
+
+	/* Number of blocks per unit */
+	ppage->blocks_per_lun = build_uint32(rbuf[96], rbuf[97], rbuf[98], rbuf[99]);
+
+	ppage->luns_per_ce = rbuf[100];				/* Number of logical units (LUN) per chip enable */
+	ppage->num_addr_cycles = rbuf[101];			/*Number of address cycles */
+	ppage->bit_per_cell = rbuf[102];			/* Number of bits per cell (1 = SLC; >1= MLC) */
+	ppage->max_bad_blocks_per_lun = build_uint16(rbuf[103], rbuf[104]); /* Bad blocks maximum per unit */
+	ppage->block_endurance = build_uint16(rbuf[105], rbuf[106]);	/* Block endurance */
+	ppage->guarenteed_valid_blocks = rbuf[107];		/* Guaranteed valid blocks at beginning of target */
+
+	/* Block endurance for guaranteed valid blocks */
+	ppage->guarenteed_valid_blocks = build_uint16(rbuf[108], rbuf[109]);
+	ppage->num_programs_per_page = rbuf[110];		/* Number of programs per page */
+	ppage->partial_prog_attr = rbuf[111];			/* Partial programming attributes */
+	ppage->num_ECC_bits = rbuf[112];			/* Number of bits ECC bits */
+	ppage->num_interleaved_addr_bits = rbuf[113];		/* Number of interleaved address bits */
+	ppage->interleaved_op_attr = rbuf[114];			/* Interleaved operation attributes */
+
+	return errCode;
+}
+
+/*******************************************************************************
+* mvNfcPrintParamPage
+*
+* DESCRIPTION:
+*       Print the READ PARAMETER PAGE (ECh - the ONFI parameter )
+*
+* INPUT:
+*	struct parameter_page_t
+*
+* OUTPUT:
+*
+* RETURN:
+*******************************************************************************/
+void mvNfcPrintParamPage(void)
+{
+	struct parameter_page_t *ppage = &paramPage;
+
+	if (strcmp(ppage->signature, "ONFI") != 0)
+		return;
+
+	mvOsPrintf("ONFI structure\n");
+	mvOsPrintf("signature = %s\n", ppage->signature);
+	mvOsPrintf("Revision number = 0x%x, \tFeatures supported =0x%x\n", ppage->rev_num, ppage->feature);
+	mvOsPrintf("Optional commands supported=0x%x\n", ppage->command);
+	mvOsPrintf("manufacturer = %s\n", ppage->manufacturer);
+	mvOsPrintf("model = %s\n", ppage->model);
+	mvOsPrintf("data bytes per page= %d\n", ppage->data_bytes_per_page);
+
+	mvOsPrintf("spare bytes per page = %d\n", ppage->spare_bytes_per_page);
+
+	mvOsPrintf("data bytes per partial page = %d\n", ppage->data_bytes_per_partial_page);
+
+	mvOsPrintf("spare bytes per partial page = %d\n", ppage->spare_bytes_per_partial_page);
+	mvOsPrintf("pages per block = %d\n", ppage->pages_per_block);
+	mvOsPrintf("blocks per unit = %d\n", ppage->blocks_per_lun);
+	mvOsPrintf("Number of logical units (LUN) per chip enable = %d\n", ppage->luns_per_ce);
+	mvOsPrintf("Number of address cycles = %d\n", ppage->num_addr_cycles);
+	mvOsPrintf("Number of bits per cell (1 = SLC; >1= MLC)  = %d\n", ppage->bit_per_cell);
+	mvOsPrintf("Bad blocks maximum per unit= %d\n", ppage->max_bad_blocks_per_lun);
+	mvOsPrintf("block endurance = %d\n", ppage->block_endurance);
+	mvOsPrintf("Guaranteed valid blocks at beginning of target = %d\n", ppage->guarenteed_valid_blocks);
+
+	/* Block endurance for guaranteed valid blocks */
+	mvOsPrintf("Block endurance for guaranteed valid blocks  = %d\n", ppage->guarenteed_valid_blocks);
+	mvOsPrintf("Number of programs per page = %d\n", ppage->num_programs_per_page);
+	mvOsPrintf("Partial programming attributes = %d\n", ppage->partial_prog_attr);
+	mvOsPrintf("Number of bits ECC bits = %d\n", ppage->num_ECC_bits);
+	mvOsPrintf("Number of interleaved address bits = %d\n", ppage->num_interleaved_addr_bits);
+	mvOsPrintf("Interleaved operation attributes = %d\n", ppage->interleaved_op_attr);
+}
diff --git a/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.h b/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.h
new file mode 100644
index 000000000000..a2ed8be62d30
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/hal/mvNfc.h
@@ -0,0 +1,463 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCMVNFCH
+#define __INCMVNFCH
+
+/******************************************************************************
+ * Usage model:
+ *	The following describes the usage model for the below APIs.
+ *	the sequences below does not handle errors, and assume all stages pass
+ *	successfully.
+ *	Nand Read of 2 pages, PDMA mode:
+ *		- mvNfcInit(MV_NFC_PDMA_ACCESS)
+ *		- mvNfcSelectChip(...)
+ *		- [If interrupt mode]
+ *		  Enable RD_DATA_REQ & CMD_DONE interrupts.
+ *		- mvNfcCommandIssue(MV_NFC_CMD_READ)
+ *		- [In interrupt mode]
+ *		  Block till one of the above interrupts is triggered.
+ *		- [In polling mode]
+ *		  Poll on mvNfcStatusGet() till STATUS_RDD_REQ is returned.
+ *		- [In interrupt mode]
+ *		  Disable the RD_DATA_REQ interrupt.
+ *		- mvNfcReadWrite()
+ *		- Block till CMD_DONE interrupt is issued (Or Error).
+ *		  OR
+ *		  Poll on mvNfcStatusGet() till CMD_DONE is returned. (Or Error).
+ *		- Wait for PDMA done interrupt to signal data ready in buffer.
+ *		==> At this stage, data is ready in the read buffer.
+ *		- [If interrupt mode]
+ *		  Enable RD_DATA_REQ & CMD_DONE interrupts.
+ *		- mvNfcCommandIssue(MV_NFC_CMD_READ_LAST_NAKED)
+ *		- [In interrupt mode]
+ *		  Block till one of the above interrupts is triggered.
+ *		- [In polling mode]
+ *		  Poll on mvNfcStatusGet() till STATUS_RDD_REQ is returned.
+ *		- [In interrupt mode]
+ *		  Disable the RD_DATA_REQ interrupt.
+ *		- mvNfcReadWrite()
+ *		- Block till CMD_DONE interrupt is issued (Or Error).
+ *		  OR
+ *		  Poll on mvNfcStatusGet() till CMD_DONE is returned. (Or Error).
+ *		- Wait for PDMA done interrupt to signal data ready in buffer.
+ *		==> At this stage, second page data is ready in the read buffer.
+ *		- mvNfcSelectChip(MV_NFC_CS_NONE)
+ *
+ *	Nand Write of single page, PIO mode:
+ *		- mvNfcInit(MV_NFC_PIO_ACCESS)
+ *		- mvNfcSelectChip(...)
+ *		- [If interrupt mode]
+ *		  Enable WR_DATA_REQ & CMD_DONE interrupts.
+ *		- mvNfcCommandIssue(MV_NFC_CMD_WRITE_MONOLITHIC)
+ *		- [In interrupt mode]
+ *		  Block till one of the above interrupts is triggered.
+ *		- [In polling mode]
+ *		  Poll on mvNfcStatusGet() till STATUS_WRD_REQ is returned.
+ *		- [In interrupt mode]
+ *		  Disable the WR_DATA_REQ interrupt.
+ *		- mvNfcReadWrite()
+ *		- Block till CMD_DONE interrupt is issued (Or Error).
+ *		  OR
+ *		  Poll on mvNfcStatusGet() till CMD_DONE is returned. (Or Error).
+ *		==> At this stage, data was written to the flash device.
+ *		- mvNfcSelectChip(MV_NFC_CS_NONE)
+ *
+ *	Nand Erase of a single block:
+ *		- mvNfcInit(...)
+ *		- mvNfcSelectChip(...)
+ *		- [If interrupt mode]
+ *		  Enable CMD_DONE interrupts.
+ *		- mvNfcCommandIssue(MV_NFC_CMD_ERASE)
+ *		- [In interrupt mode]
+ *		  Block till the above interrupt is triggered.
+ *		- [In polling mode]
+ *		  Poll on mvNfcStatusGet() till STATUS_CMD_DONE is returned.
+ *		==> At this stage, flash block was erased from the flash device.
+ *		- mvNfcSelectChip(MV_NFC_CS_NONE)
+ *
+ ******************************************************************************/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(CONFIG_OF)
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#include "mvSysNfcConfig.h"
+#endif
+
+/********************************/
+/* Enums and structures		*/
+/********************************/
+
+/* Maximum Chain length */
+#define MV_NFC_MAX_DESC_CHAIN		0x800
+
+/* Supported page sizes */
+#define MV_NFC_512B_PAGE		512
+#define MV_NFC_2KB_PAGE			2048
+#define MV_NFC_4KB_PAGE			4096
+#define MV_NFC_8KB_PAGE			8192
+
+#define MV_NFC_MAX_CHUNK_SIZE		(2048)
+
+/* Nand controller status bits.		*/
+#define MV_NFC_STATUS_CMD_REQ		0x1
+#define MV_NFC_STATUS_RDD_REQ		0x2
+#define MV_NFC_STATUS_WRD_REQ		0x4
+#define MV_NFC_STATUS_COR_ERROR		0x8
+#define MV_NFC_STATUS_UNC_ERROR		0x10
+#define MV_NFC_STATUS_BBD		0x20	/* Bad Block Detected */
+#define MV_NFC_STATUS_CMDD		0x80	/* Command Done */
+#define MV_NFC_STATUS_PAGED		0x200	/* Page Done */
+#define MV_NFC_STATUS_RDY		0x800	/* Device Ready */
+
+/* Nand controller interrupt bits.	*/
+#define MV_NFC_WR_CMD_REQ_INT		0x1
+#define MV_NFC_RD_DATA_REQ_INT		0x2
+#define MV_NFC_WR_DATA_REQ_INT		0x4
+#define MV_NFC_CORR_ERR_INT		0x8
+#define MV_NFC_UNCORR_ERR_INT		0x10
+#define MV_NFC_CS1_BAD_BLK_DETECT_INT	0x20
+#define MV_NFC_CS0_BAD_BLK_DETECT_INT	0x40
+#define MV_NFC_CS1_CMD_DONE_INT		0x80
+#define MV_NFC_CS0_CMD_DONE_INT		0x100
+#define MV_NFC_DEVICE_READY_INT		0x800
+
+/* Max number of buffers chunks for as single read / write operation */
+#define MV_NFC_RW_MAX_BUFF_NUM		16
+
+#define NUM_OF_PPAGE_BYTES		128
+
+/* ECC mode options.			*/
+typedef enum {
+	MV_NFC_ECC_HAMMING = 0,		/* 1 bit */
+	MV_NFC_ECC_BCH_2K,		/* 4 bit */
+	MV_NFC_ECC_BCH_1K,		/* 8 bit */
+	MV_NFC_ECC_BCH_704B,		/* 12 bit */
+	MV_NFC_ECC_BCH_512B,		/* 16 bit */
+	MV_NFC_ECC_DISABLE,
+	MV_NFC_ECC_MAX_CNT
+} MV_NFC_ECC_MODE;
+
+typedef enum {
+	MV_NFC_PIO_ACCESS,
+	MV_NFC_PDMA_ACCESS
+} MV_NFC_IO_MODE;
+
+typedef enum {
+	MV_NFC_PIO_READ,
+	MV_NFC_PIO_WRITE,
+	MV_NFC_PIO_NONE
+} MV_NFC_PIO_RW_MODE;
+
+
+typedef enum {
+	MV_NFC_IF_1X8,
+	MV_NFC_IF_1X16,
+	MV_NFC_IF_2X8
+} MV_NFC_IF_MODE;
+
+
+/* Flash device CS.			*/
+typedef enum {
+	MV_NFC_CS_0,
+	MV_NFC_CS_1,
+	MV_NFC_CS_2,
+	MV_NFC_CS_3,
+	MV_NFC_CS_NONE
+} MV_NFC_CHIP_SEL;
+
+
+/*
+ *	ioMode		The access mode by which the unit will operate (PDMA / PIO).
+ *	eccMode		The ECC mode to configure the controller to.
+ *	ifMode		The NAND chip connection mode, 8-bit / 16-bit / gang mode.
+ *	autoStatusRead	Whether to automatically read the flash status after each
+ *			erase / write commands.
+ *	tclk		System TCLK.
+ *	readyBypass	Whether to wait for the RnB sugnal to be deasserted after
+ *			waiting the tR or skip it and move directly to the next step.
+ *	osHandle	OS specific handle used for allocating command buffer
+ *	regsPhysAddr	Physical address of internal registers (used in DMA
+ *			mode only)
+ *	dataPdmaIntMask Interrupt mask for PDMA data channel (used in DMA mode
+ *			only).
+ *	cmdPdmaIntMask	Interrupt mask for PDMA command channel (used in DMA
+ *			mode only).
+ */
+typedef struct {
+	MV_NFC_IO_MODE		ioMode;
+	MV_NFC_ECC_MODE		eccMode;
+	MV_NFC_IF_MODE		ifMode;
+	MV_BOOL			autoStatusRead;
+	MV_U32			tclk;
+	MV_BOOL			readyBypass;
+	MV_VOID			*osHandle;
+	MV_U32			regsPhysAddr;
+#ifdef MV_INCLUDE_PDMA
+	MV_U32			dataPdmaIntMask;
+	MV_U32			cmdPdmaIntMask;
+#endif
+} MV_NFC_INFO;
+
+
+typedef enum {
+	MV_NFC_CMD_READ_ID = 0,
+	MV_NFC_CMD_READ_STATUS,
+	MV_NFC_CMD_ERASE,
+	MV_NFC_CMD_MULTIPLANE_ERASE,
+	MV_NFC_CMD_RESET,
+
+	MV_NFC_CMD_CACHE_READ_SEQ,
+	MV_NFC_CMD_CACHE_READ_RAND,
+	MV_NFC_CMD_EXIT_CACHE_READ,
+	MV_NFC_CMD_CACHE_READ_START,
+	MV_NFC_CMD_READ_MONOLITHIC,
+	MV_NFC_CMD_READ_MULTIPLE,
+	MV_NFC_CMD_READ_NAKED,
+	MV_NFC_CMD_READ_LAST_NAKED,
+	MV_NFC_CMD_READ_DISPATCH,
+
+	MV_NFC_CMD_WRITE_MONOLITHIC,
+	MV_NFC_CMD_WRITE_MULTIPLE,
+	MV_NFC_CMD_WRITE_NAKED,
+	MV_NFC_CMD_WRITE_LAST_NAKED,
+	MV_NFC_CMD_WRITE_DISPATCH,
+	MV_NFC_CMD_WRITE_DISPATCH_START,
+	MV_NFC_CMD_WRITE_DISPATCH_END,
+
+	MV_NFC_CMD_COUNT	/* This should be the last enum */
+
+} MV_NFC_CMD_TYPE;
+
+
+/*
+ * Nand information structure.
+ *	flashId		The ID of the flash information structure representing the timing
+ *			and physical layout data of the flash device.
+ *	cmdsetId	The ID of the command-set structure holding the access
+ *			commands for the flash device.
+ *	flashWidth	Flash device interface width in bits.
+ *	autoStatusRead	Whether to automatically read the flash status after each
+ *			erase / write commands.
+ *	tclk		System TCLK.
+ *	readyBypass	Whether to wait for the RnB signal to be deasserted after
+ *			waiting the tR or skip it and move directly to the next step.
+ *	ioMode		Controller access mode (PDMA / PIO).
+ *	eccMode		Flash ECC mode (Hamming, BCH, None).
+ *	ifMode		Flash interface mode.
+ *	currC		The current flash CS currently being accessed.
+ *	dataChanHndl	Pointer to the data DMA channel
+ *	cmdChanHndl	Pointer to the command DMA Channel
+ *	cmdBuff		Command buffer information (used in DMA only)
+ *	regsPhysAddr	Physical address of internal registers (used in DMA
+ *			mode only)
+ *	dataPdmaIntMask Interrupt mask for PDMA data channel (used in DMA mode
+ *			only).
+ *	cmdPdmaIntMask	Interrupt mask for PDMA command channel (used in DMA
+ *			mode only).
+ */
+typedef struct {
+	MV_U32		flashIdx;
+	MV_U32		cmdsetIdx;
+	MV_U32		flashWidth;
+	MV_U32		dfcWidth;
+	MV_BOOL		autoStatusRead;
+	MV_BOOL		readyBypass;
+	MV_NFC_IO_MODE	ioMode;
+	MV_NFC_ECC_MODE	eccMode;
+	MV_NFC_IF_MODE	ifMode;
+	MV_NFC_CHIP_SEL	currCs;
+#ifdef MV_INCLUDE_PDMA
+	MV_PDMA_CHANNEL	dataChanHndl;
+	MV_PDMA_CHANNEL	cmdChanHndl;
+#endif
+	MV_BUF_INFO	cmdBuff;
+	MV_BUF_INFO	cmdDescBuff;
+	MV_BUF_INFO	dataDescBuff;
+	MV_U32		regsPhysAddr;
+#ifdef MV_INCLUDE_PDMA
+	MV_U32		dataPdmaIntMask;
+	MV_U32		cmdPdmaIntMask;
+#endif
+} MV_NFC_CTRL;
+
+/*
+ * Nand multi command information structure.
+ *	cmd		The command to be issued.
+ *	pageAddr	The flash page address to operate on.
+ *	pageCount	Number of pages to read / write.
+ *	virtAddr	The virtual address of the buffer to copy data to
+ *			from (For relevant commands).
+ *	physAddr	The physical address of the buffer to copy data to
+ *			from (For relevant commands).
+ *	The following parameters might only be used when working in Gagned PDMA
+ *	and the pageCount must be set to 1.
+ *	For ganged mode, the use might need to split the NAND stack read
+ *	write buffer into several buffers according to what the HW expects.
+ *	e.g. NAND stack expects data in the following format:
+ *	---------------------------
+ *	| Data (4K) | Spare | ECC |
+ *	---------------------------
+ *	While NAND controller expects data to be in the following format:
+ *	-----------------------------------------------------
+ *	| Data (2K) | Spare | ECC | Data (2K) | Spare | ECC |
+ *	-----------------------------------------------------
+ *	numSgBuffs	Number of buffers to split the HW buffer into
+ *			If 1, then buffOffset & buffSize are ignored.
+ *	sgBuffAddr	Array holding the address of the buffers into which the
+ *			HW data should be split (Or read into).
+ *	sgBuffSize	Array holding the size of each sub-buffer, entry "i"
+ *			represents the size in bytes of the buffer starting at
+ *			offset buffOffset[i].
+ */
+typedef struct {
+	MV_NFC_CMD_TYPE	cmd;
+	MV_U32		pageAddr;
+	MV_U32		pageCount;
+	MV_U32		*virtAddr;
+	MV_U32		physAddr;
+	MV_U32		numSgBuffs;
+	MV_U32		sgBuffAddr[MV_NFC_RW_MAX_BUFF_NUM];
+	MV_U32		*sgBuffAddrVirt[MV_NFC_RW_MAX_BUFF_NUM];
+	MV_U32		sgBuffSize[MV_NFC_RW_MAX_BUFF_NUM];
+	MV_U32		length;
+} MV_NFC_MULTI_CMD;
+
+typedef struct {
+	MV_U32		cmdb0;
+	MV_U32		cmdb1;
+	MV_U32		cmdb2;
+	MV_U32		cmdb3;
+} MV_NFC_CMD;
+
+struct MV_NFC_HAL_DATA {
+	int (*mvCtrlNandClkSetFunction) (int);    /* Controller NAND clock div  */
+};
+/** Micron MT29F NAND driver (ONFI):  Parameter Page Data   */
+struct parameter_page_t {
+	char signature[5];			/* Parameter page signature (ONFI) */
+	MV_U16 rev_num;				/* Revision number */
+	MV_U16 feature;				/* Features supported */
+	MV_U16 command;				/* Optional commands supported */
+	char manufacturer[13];			/* Device manufacturer */
+	char model[21];				/* Device part number */
+	MV_U8 jedec_id;				/* Manufacturer ID (Micron = 2Ch) */
+	MV_U16 date_code;			/* Date code */
+	MV_U32 data_bytes_per_page;		/* Number of data bytes per page */
+	MV_U16 spare_bytes_per_page;		/* Number of spare bytes per page */
+	MV_U32 data_bytes_per_partial_page;	/* Number of data bytes per partial page */
+	MV_U16 spare_bytes_per_partial_page;	/* Number of spare bytes per partial page */
+	MV_U32 pages_per_block;			/* Number of pages per block */
+	MV_U32 blocks_per_lun;			/* Number of blocks per unit */
+	MV_U8 luns_per_ce;			/* Number of logical units (LUN) per chip enable */
+	MV_U8 num_addr_cycles;			/* Number of address cycles */
+	MV_U8 bit_per_cell;			/* Number of bits per cell (1 = SLC; >1= MLC) */
+	MV_U16 max_bad_blocks_per_lun;		/* Bad blocks maximum per unit */
+	MV_U16 block_endurance;			/* Block endurance */
+	MV_U8 guarenteed_valid_blocks;		/* Guaranteed valid blocks at beginning of target */
+	MV_U16 block_endurance_guarenteed_valid; /* Block endurance for guaranteed valid blocks */
+	MV_U8 num_programs_per_page;		/* Number of programs per page */
+	MV_U8 partial_prog_attr;		/* Partial programming attributes */
+	MV_U8 num_ECC_bits;			/* Number of bits ECC bits */
+	MV_U8 num_interleaved_addr_bits;	/* Number of interleaved address bits */
+	MV_U8 interleaved_op_attr;		/* Interleaved operation attributes */
+};
+
+
+/********************************/
+/* Functions API		*/
+/********************************/
+MV_STATUS mvNfcInit(MV_NFC_INFO *nfcInfo, MV_NFC_CTRL *nfcCtrl, struct MV_NFC_HAL_DATA *halData);
+MV_STATUS mvNfcSelectChip(MV_NFC_CTRL *nfcCtrl, MV_NFC_CHIP_SEL chip);
+MV_STATUS mvNfcCommandPio(MV_NFC_CTRL *nfcCtrl, MV_NFC_MULTI_CMD *cmd_desc, MV_BOOL next);
+MV_STATUS mvNfcCommandMultiple(MV_NFC_CTRL *nfcCtrl, MV_NFC_MULTI_CMD *descInfo, MV_U32 descCnt);
+MV_U32    mvNfcStatusGet(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *value);
+MV_STATUS mvNfcIntrSet(MV_NFC_CTRL *nfcCtrl, MV_U32 intMask, MV_BOOL enable);
+MV_STATUS mvNfcReadWrite(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *virtBufAddr, MV_U32 physBuffAddr);
+MV_VOID   mvNfcReadWritePio(MV_NFC_CTRL *nfcCtrl, MV_U32 *buff, MV_U32 data_len, MV_NFC_PIO_RW_MODE mode);
+MV_VOID   mvNfcAddress2RowConvert(MV_NFC_CTRL *nfcCtrl, MV_U32 address, MV_U32 *row, MV_U32 *colOffset);
+MV_VOID   mvNfcAddress2BlockConvert(MV_NFC_CTRL *nfcCtrl, MV_U32 address, MV_U32 *blk);
+MV_8     *mvNfcFlashModelGet(MV_NFC_CTRL *nfcCtrl);
+MV_STATUS mvNfcFlashPageSizeGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *size, MV_U32 *totalSize);
+MV_STATUS mvNfcFlashBlockSizeGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *size);
+MV_STATUS mvNfcFlashBlockNumGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *numBlocks);
+MV_STATUS mvNfcDataLength(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *data_len);
+MV_STATUS mvNfcTransferDataLength(MV_NFC_CTRL *nfcCtrl, MV_NFC_CMD_TYPE cmd, MV_U32 *data_len);
+MV_STATUS mvNfcFlashIdGet(MV_NFC_CTRL *nfcCtrl, MV_U32 *flashId);
+MV_STATUS mvNfcUnitStateStore(MV_U32 *stateData, MV_U32 *len);
+MV_NFC_ECC_MODE mvNfcEccModeSet(MV_NFC_CTRL *nfcCtrl, MV_NFC_ECC_MODE eccMode);
+MV_U32    mvNfcBadBlockPageNumber(MV_NFC_CTRL *nfcCtrl);
+MV_STATUS mvNfcReset(void);
+void mvNfcPrintParamPage(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* __INCMVNFCH */
diff --git a/drivers/mtd/nand/mvebu_nfc/hal/mvNfcRegs.h b/drivers/mtd/nand/mvebu_nfc/hal/mvNfcRegs.h
new file mode 100644
index 000000000000..213b0cebad41
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/hal/mvNfcRegs.h
@@ -0,0 +1,313 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCMVNFCREGSH
+#define __INCMVNFCREGSH
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "mvSysNfcConfig.h"
+
+/* NAND Flash Control Register */
+#define	NFC_CONTROL_REG			(MV_NFC_REGS_BASE + 0x0)
+#define	NFC_CTRL_WRCMDREQ_MASK		(0x1 << 0)
+#define NFC_CTRL_RDDREQ_MASK		(0x1 << 1)
+#define NFC_CTRL_WRDREQ_MASK		(0x1 << 2)
+#define NFC_CTRL_CORRERR_MASK		(0x1 << 3)
+#define NFC_CTRL_UNCERR_MASK		(0x1 << 4)
+#define NFC_CTRL_CS1_BBD_MASK		(0x1 << 5)
+#define NFC_CTRL_CS0_BBD_MASK		(0x1 << 6)
+#define NFC_CTRL_CS1_CMDD_MASK		(0x1 << 7)
+#define NFC_CTRL_CS0_CMDD_MASK		(0x1 << 8)
+#define NFC_CTRL_CS1_PAGED_MASK		(0x1 << 9)
+#define NFC_CTRL_CS0_PAGED_MASK		(0x1 << 10)
+#define NFC_CTRL_RDY_MASK		(0x1 << 11)
+#define NFC_CTRL_ND_ARB_EN_MASK		(0x1 << 12)
+#define NFC_CTRL_PG_PER_BLK_OFFS	13
+#define NFC_CTRL_PG_PER_BLK_MASK	(0x3 << NFC_CTRL_PG_PER_BLK_OFFS)
+#define NFC_CTRL_PG_PER_BLK_32		(0x0 << NFC_CTRL_PG_PER_BLK_OFFS)
+#define NFC_CTRL_PG_PER_BLK_64		(0x2 << NFC_CTRL_PG_PER_BLK_OFFS)
+#define NFC_CTRL_PG_PER_BLK_128		(0x1 << NFC_CTRL_PG_PER_BLK_OFFS)
+#define NFC_CTRL_PG_PER_BLK_256		(0x3 << NFC_CTRL_PG_PER_BLK_OFFS)
+#define NFC_CTRL_RA_START_MASK		(0x1 << 15)
+#define NFC_CTRL_RD_ID_CNT_OFFS		16
+#define NFC_CTRL_RD_ID_CNT_MASK		(0x7 << NFC_CTRL_RD_ID_CNT_OFFS)
+#define NFC_CTRL_RD_ID_CNT_SP		(0x2 << NFC_CTRL_RD_ID_CNT_OFFS)
+#define NFC_CTRL_RD_ID_CNT_LP		(0x4 << NFC_CTRL_RD_ID_CNT_OFFS)
+#define NFC_CTRL_CLR_PG_CNT_MASK	(0x1 << 20)
+#define NFC_CTRL_FORCE_CSX_MASK		(0x1 << 21)
+#define NFC_CTRL_ND_STOP_MASK		(0x1 << 22)
+#define NFC_CTRL_SEQ_DIS_MASK		(0x1 << 23)
+#define NFC_CTRL_PAGE_SZ_OFFS		24
+#define NFC_CTRL_PAGE_SZ_MASK		(0x3 << NFC_CTRL_PAGE_SZ_OFFS)
+#define NFC_CTRL_PAGE_SZ_512B		(0x0 << NFC_CTRL_PAGE_SZ_OFFS)
+#define NFC_CTRL_PAGE_SZ_2KB		(0x1 << NFC_CTRL_PAGE_SZ_OFFS)
+#define NFC_CTRL_DWIDTH_M_MASK		(0x1 << 26)
+#define NFC_CTRL_DWIDTH_C_MASK		(0x1 << 27)
+#define NFC_CTRL_ND_RUN_MASK		(0x1 << 28)
+#define NFC_CTRL_DMA_EN_MASK		(0x1 << 29)
+#define NFC_CTRL_ECC_EN_MASK		(0x1 << 30)
+#define NFC_CTRL_SPARE_EN_MASK		(0x1 << 31)
+
+/* NAND Interface Timing Parameter 0 Register */
+#define NFC_TIMING_0_REG		(MV_NFC_REGS_BASE + 0x4)
+#define NFC_TMNG0_TRP_OFFS		0
+#define NFC_TMNG0_TRP_MASK		(0x7 << NFC_TMNG0_TRP_OFFS)
+#define NFC_TMNG0_TRH_OFFS		3
+#define NFC_TMNG0_TRH_MASK		(0x7 << NFC_TMNG0_TRH_OFFS)
+#define NFC_TMNG0_ETRP_OFFS		6
+#define NFC_TMNG0_SEL_NRE_EDGE_OFFS	7
+#define NFC_TMNG0_TWP_OFFS		8
+#define NFC_TMNG0_TWP_MASK		(0x7 << NFC_TMNG0_TWP_OFFS)
+#define NFC_TMNG0_TWH_OFFS		11
+#define NFC_TMNG0_TWH_MASK		(0x7 << NFC_TMNG0_TWH_OFFS)
+#define NFC_TMNG0_TCS_OFFS		16
+#define NFC_TMNG0_TCS_MASK		(0x7 << NFC_TMNG0_TCS_OFFS)
+#define NFC_TMNG0_TCH_OFFS		19
+#define NFC_TMNG0_TCH_MASK		(0x7 << NFC_TMNG0_TCH_OFFS)
+#define NFC_TMNG0_RD_CNT_DEL_OFFS	22
+#define NFC_TMNG0_RD_CNT_DEL_MASK	(0xF << NFC_TMNG0_RD_CNT_DEL_OFFS)
+#define NFC_TMNG0_SEL_CNTR_OFFS		26
+#define NFC_TMNG0_TADL_OFFS		27
+#define NFC_TMNG0_TADL_MASK		(0x1F << NFC_TMNG0_TADL_OFFS)
+
+/* NAND Interface Timing Parameter 1 Register */
+#define NFC_TIMING_1_REG		(MV_NFC_REGS_BASE + 0xC)
+#define NFC_TMNG1_TAR_OFFS		0
+#define NFC_TMNG1_TAR_MASK		(0xF << NFC_TMNG1_TAR_OFFS)
+#define NFC_TMNG1_TWHR_OFFS		4
+#define NFC_TMNG1_TWHR_MASK		(0xF << NFC_TMNG1_TWHR_OFFS)
+#define NFC_TMNG1_TRHW_OFFS		8
+#define NFC_TMNG1_TRHW_MASK		(0x3 << NFC_TMNG1_TRHW_OFFS)
+#define NFC_TMNG1_PRESCALE_OFFS		14
+#define NFC_TMNG1_WAIT_MODE_OFFS	15
+#define NFC_TMNG1_TR_OFFS		16
+#define NFC_TMNG1_TR_MASK		(0xFFFF << NFC_TMNG1_TR_OFFS)
+
+/* NAND Controller Status Register - NDSR */
+#define NFC_STATUS_REG			(MV_NFC_REGS_BASE + 0x14)
+#define NFC_SR_WRCMDREQ_MASK		(0x1 << 0)
+#define NFC_SR_RDDREQ_MASK		(0x1 << 1)
+#define NFC_SR_WRDREQ_MASK		(0x1 << 2)
+#define NFC_SR_CORERR_MASK		(0x1 << 3)
+#define NFC_SR_UNCERR_MASK		(0x1 << 4)
+#define NFC_SR_CS1_BBD_MASK		(0x1 << 5)
+#define NFC_SR_CS0_BBD_MASK		(0x1 << 6)
+#define NFC_SR_CS1_CMDD_MASK		(0x1 << 7)
+#define NFC_SR_CS0_CMDD_MASK		(0x1 << 8)
+#define NFC_SR_CS1_PAGED_MASK		(0x1 << 9)
+#define NFC_SR_CS0_PAGED_MASK		(0x1 << 10)
+#define NFC_SR_RDY0_MASK		(0x1 << 11)
+#define NFC_SR_RDY1_MASK		(0x1 << 12)
+#define NFC_SR_ALLIRQ_MASK		(0x1FFF << 0)
+#define NFC_SR_TRUSTVIO_MASK		(0x1 << 15)
+#define NFC_SR_ERR_CNT_OFFS		16
+#define NFC_SR_ERR_CNT_MASK		(0x1F << NFC_SR_ERR_CNT_OFFS)
+
+/* NAND Controller Page Count Register */
+#define NFC_PAGE_COUNT_REG		(MV_NFC_REGS_BASE + 0x18)
+#define NFC_PCR_PG_CNT_0_OFFS		0
+#define NFC_PCR_PG_CNT_0_MASK		(0xFF << NFC_PCR_PG_CNT_0_OFFS)
+#define NFC_PCR_PG_CNT_1_OFFS		16
+#define NFC_PCR_PG_CNT_1_MASK		(0xFF << NFC_PCR_PG_CNT_1_OFFS)
+
+/* NAND Controller Bad Block 0 Register */
+#define NFC_BAD_BLOCK_0_REG		(MV_NFC_REGS_BASE + 0x1C)
+
+/* NAND Controller Bad Block 1 Register */
+#define NFC_BAD_BLOCK_1_REG		(MV_NFC_REGS_BASE + 0x20)
+
+/* NAND ECC Controle Register */
+#define NFC_ECC_CONTROL_REG		(MV_NFC_REGS_BASE + 0x28)
+#define NFC_ECC_BCH_EN_MASK		(0x1 << 0)
+#define NFC_ECC_THRESHOLD_OFFS		1
+#define NFC_ECC_THRESHOLF_MASK		(0x3F << NFC_ECC_THRESHOLD_OFFS)
+#define NFC_ECC_SPARE_OFFS		7
+#define NFC_ECC_SPARE_MASK		(0xFF << NFC_ECC_SPARE_OFFS)
+
+/* NAND Busy Length Count */
+#define NFC_BUSY_LEN_CNT_REG		(MV_NFC_REGS_BASE + 0x2C)
+#define NFC_BUSY_CNT_0_OFFS		0
+#define NFC_BUSY_CNT_0_MASK		(0xFFFF << NFC_BUSY_CNT_0_OFFS)
+#define NFC_BUSY_CNT_1_OFFS		16
+#define NFC_BUSY_CNT_1_MASK		(0xFFFF << NFC_BUSY_CNT_1_OFFS)
+
+/* NAND Mutex Lock */
+#define NFC_MUTEX_LOCK_REG		(MV_NFC_REGS_BASE + 0x30)
+#define NFC_MUTEX_LOCK_MASK		(0x1 << 0)
+
+/* NAND Partition Command Match */
+#define NFC_PART_CMD_MACTH_1_REG	(MV_NFC_REGS_BASE + 0x34)
+#define NFC_PART_CMD_MACTH_2_REG	(MV_NFC_REGS_BASE + 0x38)
+#define NFC_PART_CMD_MACTH_3_REG	(MV_NFC_REGS_BASE + 0x3C)
+#define NFC_CMDMAT_CMD1_MATCH_OFFS	0
+#define NFC_CMDMAT_CMD1_MATCH_MASK	(0xFF << NFC_CMDMAT_CMD1_MATCH_OFFS)
+#define NFC_CMDMAT_CMD1_ROWADD_MASK	(0x1 << 8)
+#define NFC_CMDMAT_CMD1_NKDDIS_MASK	(0x1 << 9)
+#define NFC_CMDMAT_CMD2_MATCH_OFFS	10
+#define NFC_CMDMAT_CMD2_MATCH_MASK	(0xFF << NFC_CMDMAT_CMD2_MATCH_OFFS)
+#define NFC_CMDMAT_CMD2_ROWADD_MASK	(0x1 << 18)
+#define NFC_CMDMAT_CMD2_NKDDIS_MASK	(0x1 << 19)
+#define NFC_CMDMAT_CMD3_MATCH_OFFS	20
+#define NFC_CMDMAT_CMD3_MATCH_MASK	(0xFF << NFC_CMDMAT_CMD3_MATCH_OFFS)
+#define NFC_CMDMAT_CMD3_ROWADD_MASK	(0x1 << 28)
+#define NFC_CMDMAT_CMD3_NKDDIS_MASK	(0x1 << 29)
+#define NFC_CMDMAT_VALID_CNT_OFFS	30
+#define NFC_CMDMAT_VALID_CNT_MASK	(0x3 << NFC_CMDMAT_VALID_CNT_OFFS)
+
+/* NAND Controller Data Buffer */
+#define NFC_DATA_BUFF_REG_4PDMA		(MV_NFC_REGS_OFFSET + 0x40)
+#define NFC_DATA_BUFF_REG		(MV_NFC_REGS_BASE + 0x40)
+
+/* NAND Controller Command Buffer 0 */
+#define NFC_COMMAND_BUFF_0_REG_4PDMA	(MV_NFC_REGS_OFFSET + 0x48)
+#define NFC_COMMAND_BUFF_0_REG		(MV_NFC_REGS_BASE + 0x48)
+#define NFC_CB0_CMD1_OFFS		0
+#define NFC_CB0_CMD1_MASK		(0xFF << NFC_CB0_CMD1_OFFS)
+#define NFC_CB0_CMD2_OFFS		8
+#define NFC_CB0_CMD2_MASK		(0xFF << NFC_CB0_CMD2_OFFS)
+#define NFC_CB0_ADDR_CYC_OFFS		16
+#define NFC_CB0_ADDR_CYC_MASK		(0x7 << NFC_CB0_ADDR_CYC_OFFS)
+#define NFC_CB0_DBC_MASK			(0x1 << 19)
+#define NFC_CB0_NEXT_CMD_MASK		(0x1 << 20)
+#define NFC_CB0_CMD_TYPE_OFFS		21
+#define NFC_CB0_CMD_TYPE_MASK		(0x7 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_READ		(0x0 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_WRITE		(0x1 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_ERASE		(0x2 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_READ_ID	(0x3 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_STATUS		(0x4 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_RESET		(0x5 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_NAKED_CMD	(0x6 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CMD_TYPE_NAKED_ADDR	(0x7 << NFC_CB0_CMD_TYPE_OFFS)
+#define NFC_CB0_CSEL_MASK		(0x1 << 24)
+#define NFC_CB0_AUTO_RS_MASK		(0x1 << 25)
+#define NFC_CB0_ST_ROW_EN_MASK		(0x1 << 26)
+#define NFC_CB0_RDY_BYP_MASK		(0x1 << 27)
+#define NFC_CB0_LEN_OVRD_MASK		(0x1 << 28)
+#define NFC_CB0_CMD_XTYPE_OFFS		29
+#define NFC_CB0_CMD_XTYPE_MASK		(0x7 << NFC_CB0_CMD_XTYPE_OFFS)
+#define NFC_CB0_CMD_XTYPE_MONOLITHIC	(0x0 << NFC_CB0_CMD_XTYPE_OFFS)
+#define NFC_CB0_CMD_XTYPE_LAST_NAKED	(0x1 << NFC_CB0_CMD_XTYPE_OFFS)
+#define NFC_CB0_CMD_XTYPE_MULTIPLE	(0x4 << NFC_CB0_CMD_XTYPE_OFFS)
+#define NFC_CB0_CMD_XTYPE_NAKED		(0x5 << NFC_CB0_CMD_XTYPE_OFFS)
+#define NFC_CB0_CMD_XTYPE_DISPATCH	(0x6 << NFC_CB0_CMD_XTYPE_OFFS)
+
+/* NAND Controller Command Buffer 1 */
+#define NFC_COMMAND_BUFF_1_REG		(MV_NFC_REGS_BASE + 0x4C)
+#define NFC_CB1_ADDR1_OFFS		0
+#define NFC_CB1_ADDR1_MASK		(0xFF << NFC_CB1_ADDR1_OFFS)
+#define NFC_CB1_ADDR2_OFFS		8
+#define NFC_CB1_ADDR2_MASK		(0xFF << NFC_CB1_ADDR2_OFFS)
+#define NFC_CB1_ADDR3_OFFS		16
+#define NFC_CB1_ADDR3_MASK		(0xFF << NFC_CB1_ADDR3_OFFS)
+#define NFC_CB1_ADDR4_OFFS		24
+#define NFC_CB1_ADDR4_MASK		(0xFF << NFC_CB1_ADDR4_OFFS)
+
+/* NAND Controller Command Buffer 2 */
+#define NFC_COMMAND_BUFF_2_REG		(MV_NFC_REGS_BASE + 0x50)
+#define NFC_CB2_ADDR5_OFFS		0
+#define NFC_CB2_ADDR5_MASK		(0xFF << NFC_CB2_ADDR5_OFFS)
+#define NFC_CB2_CS_2_3_SELECT_MASK	(0x80 << NFC_CB2_ADDR5_OFFS)
+#define NFC_CB2_PAGE_CNT_OFFS		8
+#define NFC_CB2_PAGE_CNT_MASK		(0xFF << NFC_CB2_PAGE_CNT_OFFS)
+#define NFC_CB2_ST_CMD_OFFS		16
+#define NFC_CB2_ST_CMD_MASK		(0xFF << NFC_CB2_ST_CMD_OFFS)
+#define NFC_CB2_ST_MASK_OFFS		24
+#define NFC_CB2_ST_MASK_MASK		(0xFF << NFC_CB2_ST_MASK_OFFS)
+
+/* NAND Controller Command Buffer 3 */
+#define NFC_COMMAND_BUFF_3_REG		(MV_NFC_REGS_BASE + 0x54)
+#define NFC_CB3_NDLENCNT_OFFS		0
+#define NFC_CB3_NDLENCNT_MASK		(0xFFFF << NFC_CB3_NDLENCNT_OFFS)
+#define NFC_CB3_ADDR6_OFFS		16
+#define NFC_CB3_ADDR6_MASK		(0xFF << NFC_CB3_ADDR6_OFFS)
+#define NFC_CB3_ADDR7_OFFS		24
+#define NFC_CB3_ADDR7_MASK		(0xFF << NFC_CB3_ADDR7_OFFS)
+
+/* NAND Arbitration Control */
+#define NFC_ARB_CONTROL_REG		(MV_NFC_REGS_BASE + 0x5C)
+#define NFC_ARB_CNT_OFFS		0
+#define NFC_ARB_CNT_MASK		(0xFFFF << NFC_ARB_CNT_OFFS)
+
+/* NAND Partition Table for Chip Select */
+#define NFC_PART_TBL_4CS_REG(x)		(MV_NFC_REGS_BASE + (x * 0x4))
+#define NFC_PT4CS_BLOCKADD_OFFS		0
+#define NFC_PT4CS_BLOCKADD_MASK		(0xFFFFFF << NFC_PT4CS_BLOCKADD_OFFS)
+#define NFC_PT4CS_TRUSTED_MASK		(0x1 << 29)
+#define NFC_PT4CS_LOCK_MASK		(0x1 << 30)
+#define NFC_PT4CS_VALID_MASK		(0x1 << 31)
+
+/* NAND XBAR2AXI Bridge Configuration Register */
+#define NFC_XBAR2AXI_BRDG_CFG_REG	(MV_NFC_REGS_BASE + 0x1022C)
+#define NFC_XBC_CS_EXPAND_EN_MASK	(0x1 << 2)
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* __INCMVNFCREGSH */
diff --git a/drivers/mtd/nand/mvebu_nfc/mvSysNfcConfig.h b/drivers/mtd/nand/mvebu_nfc/mvSysNfcConfig.h
new file mode 100644
index 000000000000..5da1310479d5
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/mvSysNfcConfig.h
@@ -0,0 +1,43 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+
+*******************************************************************************/
+/*******************************************************************************
+* mvSysNfcConfig.h - Marvell NFC unit specific configurations
+*
+* DESCRIPTION:
+*       None.
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+/*
+ * Driver global variable holding the base address of the unit, which is
+ * initialized during driver probe
+ */
+extern MV_U32 mv_nand_base;
+
+#define MV_NFC_REGS_BASE	mv_nand_base
+
+/*
+ * This is being kept to satisfy HAL requirements. The definition is only used
+ * in MV_MEMIO_LE32_WRITE/READ, which already include mv_nand_base
+ */
+#define INTER_REGS_BASE		0x0
diff --git a/drivers/mtd/nand/mvebu_nfc/nand_nfc.c b/drivers/mtd/nand/mvebu_nfc/nand_nfc.c
new file mode 100644
index 000000000000..d181d1e9c605
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/nand_nfc.c
@@ -0,0 +1,1870 @@
+/*
+ * nand_nfc.c
+ *
+ * Copyright c 2005 Intel Corporation
+ * Copyright c 2006 Marvell International Ltd.
+ *
+ * This driver is based on the PXA drivers/mtd/nand/pxa3xx_nand.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <asm/dma.h>
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#ifndef CONFIG_OF
+#error nand_nfc driver supports only DT configuration
+#endif
+
+#ifdef CONFIG_MV_INCLUDE_PDMA
+#include <asm/hardware/pxa-dma.h>
+#include "pdma/mvPdma.h"
+#include "pdma/mvPdmaRegs.h"
+#endif
+#include "nand_nfc.h"
+
+#define	DRIVER_NAME	"armada-nand"
+
+#define NFC_DPRINT(x)		/* printk (x) */
+#define PRINT_LVL		KERN_DEBUG
+
+#define	CHIP_DELAY_TIMEOUT	(20 * HZ/10)
+#define NFC_MAX_NUM_OF_DESCR	(33) /* worst case in 8K ganaged */
+#define NFC_8BIT1K_ECC_SPARE	(32)
+
+#define NFC_SR_MASK		(0xfff)
+#define NFC_SR_BBD_MASK		(NFC_SR_CS0_BBD_MASK | NFC_SR_CS1_BBD_MASK)
+
+#define ARMADA_MAIN_PLL_FREQ	2000000000
+
+char *cmd_text[] = {
+	"MV_NFC_CMD_READ_ID",
+	"MV_NFC_CMD_READ_STATUS",
+	"MV_NFC_CMD_ERASE",
+	"MV_NFC_CMD_MULTIPLANE_ERASE",
+	"MV_NFC_CMD_RESET",
+
+	"MV_NFC_CMD_CACHE_READ_SEQ",
+	"MV_NFC_CMD_CACHE_READ_RAND",
+	"MV_NFC_CMD_EXIT_CACHE_READ",
+	"MV_NFC_CMD_CACHE_READ_START",
+	"MV_NFC_CMD_READ_MONOLITHIC",
+	"MV_NFC_CMD_READ_MULTIPLE",
+	"MV_NFC_CMD_READ_NAKED",
+	"MV_NFC_CMD_READ_LAST_NAKED",
+	"MV_NFC_CMD_READ_DISPATCH",
+
+	"MV_NFC_CMD_WRITE_MONOLITHIC",
+	"MV_NFC_CMD_WRITE_MULTIPLE",
+	"MV_NFC_CMD_WRITE_NAKED",
+	"MV_NFC_CMD_WRITE_LAST_NAKED",
+	"MV_NFC_CMD_WRITE_DISPATCH",
+	"MV_NFC_CMD_WRITE_DISPATCH_START",
+	"MV_NFC_CMD_WRITE_DISPATCH_END",
+
+	"MV_NFC_CMD_COUNT"	/* This should be the last enum */
+
+};
+
+MV_U32 pg_sz[NFC_PAGE_SIZE_MAX_CNT] = {512, 2048, 4096, 8192, 16384};
+MV_U32 mv_nand_base;
+struct clk *ecc_clk;
+
+/* error code and state */
+enum {
+	ERR_NONE	= 0,
+	ERR_DMABUSERR	= -1,
+	ERR_CMD_TO	= -2,
+	ERR_DATA_TO	= -3,
+	ERR_DBERR	= -4,
+	ERR_BBD		= -5,
+};
+
+enum {
+	STATE_READY	= 0,
+	STATE_CMD_HANDLE,
+	STATE_DMA_READING,
+	STATE_DMA_WRITING,
+	STATE_DMA_DONE,
+	STATE_PIO_READING,
+	STATE_PIO_WRITING,
+};
+
+struct orion_nfc_info {
+	struct platform_device	 *pdev;
+
+	void __iomem		*mmio_base;
+	unsigned int		mmio_phys_base;
+
+	unsigned int		irq;
+
+	struct clk		*aux_clk;
+
+	unsigned int		buf_start;
+	unsigned int		buf_count;
+
+	unsigned char		*data_buff;
+	dma_addr_t		data_buff_phys;
+	size_t			data_buff_size;
+
+	/* saved column/page_addr during CMD_SEQIN */
+	int			seqin_column;
+	int			seqin_page_addr;
+
+	/* relate to the command */
+	unsigned int		state;
+	unsigned int		use_dma;	/* use DMA ? */
+
+	/* flash information */
+	unsigned int		nfc_width;	/* Width of NFC 16/8 bits	*/
+	const char		*nfc_mode;	/* NAND mode - normal or ganged	*/
+	unsigned int		num_cs;		/* Number of NAND devices	*/
+						/* chip-selects.		*/
+	MV_NFC_ECC_MODE		ecc_type;
+	enum nfc_page_size	page_size;
+	uint32_t		page_per_block;	/* Pages per block (PG_PER_BLK) */
+	uint32_t		flash_width;	/* Width of Flash memory (DWIDTH_M) */
+	size_t			read_id_bytes;
+
+	size_t			data_size;	/* data size in FIFO */
+	size_t			read_size;
+	int			retcode;
+	uint32_t		dscr;		/* IRQ events - status */
+	struct completion	cmd_complete;
+
+	int			chained_cmd;
+	uint32_t		column;
+	uint32_t		page_addr;
+	MV_NFC_CMD_TYPE		cmd;
+	MV_NFC_CTRL		nfcCtrl;
+
+	/* RW buffer chunks config */
+	MV_U32			sgBuffAddr[MV_NFC_RW_MAX_BUFF_NUM];
+	MV_U32			sgBuffSize[MV_NFC_RW_MAX_BUFF_NUM];
+	MV_U32			sgNumBuffs;
+
+	/* suspend / resume data */
+	MV_U32			nfcUnitData[128];
+	MV_U32			nfcDataLen;
+	MV_U32			pdmaUnitData[128];
+	MV_U32			pdmaDataLen;
+};
+
+/*
+ * ECC Layout
+ */
+
+static struct nand_ecclayout ecc_latout_512B_hamming = {
+	.eccbytes = 6,
+	.eccpos = {8, 9, 10, 11, 12, 13 },
+	.oobfree = { {2, 6} }
+};
+
+static struct nand_ecclayout ecc_layout_2KB_hamming = {
+	.eccbytes = 24,
+	.eccpos = {
+		40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55,
+		56, 57, 58, 59, 60, 61, 62, 63},
+	.oobfree = { {2, 38} }
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+	.eccbytes = 32,
+	.eccpos = {
+		32, 33, 34, 35, 36, 37, 38, 39,
+		40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55,
+		56, 57, 58, 59, 60, 61, 62, 63},
+	.oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+	.eccbytes = 64,
+	.eccpos = {
+		32,  33,  34,  35,  36,  37,  38,  39,
+		40,  41,  42,  43,  44,  45,  46,  47,
+		48,  49,  50,  51,  52,  53,  54,  55,
+		56,  57,  58,  59,  60,  61,  62,  63,
+		96,  97,  98,  99,  100, 101, 102, 103,
+		104, 105, 106, 107, 108, 109, 110, 111,
+		112, 113, 114, 115, 116, 117, 118, 119,
+		120, 121, 122, 123, 124, 125, 126, 127},
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
+	.eccbytes = 128,
+	.eccpos = {
+		32,  33,  34,  35,  36,  37,  38,  39,
+		40,  41,  42,  43,  44,  45,  46,  47,
+		48,  49,  50,  51,  52,  53,  54,  55,
+		56,  57,  58,  59,  60,  61,  62,  63,
+
+		96,  97,  98,  99,  100, 101, 102, 103,
+		104, 105, 106, 107, 108, 109, 110, 111,
+		112, 113, 114, 115, 116, 117, 118, 119,
+		120, 121, 122, 123, 124, 125, 126, 127,
+
+		160, 161, 162, 163, 164, 165, 166, 167,
+		168, 169, 170, 171, 172, 173, 174, 175,
+		176, 177, 178, 179, 180, 181, 182, 183,
+		184, 185, 186, 187, 188, 189, 190, 191,
+
+		224, 225, 226, 227, 228, 229, 230, 231,
+		232, 233, 234, 235, 236, 237, 238, 239,
+		240, 241, 242, 243, 244, 245, 246, 247,
+		248, 249, 250, 251, 252, 253, 254, 255},
+
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+	.eccbytes = 64,
+	.eccpos = {
+		32,  33,  34,  35,  36,  37,  38,  39,
+		40,  41,  42,  43,  44,  45,  46,  47,
+		48,  49,  50,  51,  52,  53,  54,  55,
+		56,  57,  58,  59,  60,  61,  62,  63},
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 26},  }
+};
+
+static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
+	.eccbytes = 160,
+	.eccpos = {
+		128, 129, 130, 131, 132, 133, 134, 135,
+		136, 137, 138, 139, 140, 141, 142, 143,
+		144, 145, 146, 147, 148, 149, 150, 151,
+		152, 153, 154, 155, 156, 157, 158, 159},
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 122},  }
+};
+
+static struct nand_ecclayout ecc_layout_8KB_bch12bit = {
+	.eccbytes = 0,
+	.eccpos = { },
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 58}, }
+};
+
+static struct nand_ecclayout ecc_layout_16KB_bch12bit = {
+	.eccbytes = 0,
+	.eccpos = { },
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {1, 4}, {6, 122},  }
+};
+
+/*
+ * Define bad block scan pattern when scanning a device for factory
+ * marked blocks.
+ */
+static uint8_t mv_scan_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr mv_sp_bb = {
+	.options = NAND_BBT_SCANMVCUSTOM,
+	.offs = 5,
+	.len = 1,
+	.pattern = mv_scan_pattern
+};
+
+static struct nand_bbt_descr mv_lp_bb = {
+	.options = NAND_BBT_SCANMVCUSTOM,
+	.offs = 0,
+	.len = 2,
+	.pattern = mv_scan_pattern
+};
+
+/*
+ * Lookup Tables
+ */
+
+struct orion_nfc_naked_info {
+
+	struct nand_ecclayout	*ecc_layout;
+	struct nand_bbt_descr	*bb_info;
+	uint32_t		bb_bytepos;
+	uint32_t		chunk_size;
+	uint32_t		chunk_spare;
+	uint32_t		chunk_cnt;
+	uint32_t		last_chunk_size;
+	uint32_t		last_chunk_spare;
+};
+
+		/* PageSize */		/* ECc Type */
+static struct orion_nfc_naked_info orion_nfc_naked_info_lkup[NFC_PAGE_SIZE_MAX_CNT][MV_NFC_ECC_MAX_CNT] = {
+	/* 512B Pages */
+	{  {	/* Hamming */
+		&ecc_latout_512B_hamming, &mv_sp_bb, 512, 512, 16, 1, 0, 0
+	}, {	/* BCH 4bit */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 8bit */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 12bit */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 16bit */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}, {	/* No ECC */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}  },
+	/* 2KB Pages */
+	{  {	/* Hamming */
+		&ecc_layout_2KB_hamming, &mv_lp_bb, 2048, 2048, 40, 1, 0, 0
+	}, {	/* BCH 4bit */
+		&ecc_layout_2KB_bch4bit, &mv_lp_bb, 2048, 2048, 32, 1, 0, 0
+	}, {	/* BCH 8bit */
+		NULL, NULL, 2018, 1024, 0, 1, 1024, 32
+	}, {	/* BCH 12bit */
+		NULL, NULL, 1988, 704, 0, 2, 640, 0
+	}, {	/* BCH 16bit */
+		NULL, NULL, 1958, 512, 0, 4, 0, 32
+	}, {	/* No ECC */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}  },
+	/* 4KB Pages */
+	{  {	/* Hamming */
+		NULL, 0, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 4bit */
+		&ecc_layout_4KB_bch4bit, &mv_lp_bb, 4034, 2048, 32, 2, 0, 0
+	}, {	/* BCH 8bit */
+		&ecc_layout_4KB_bch8bit, &mv_lp_bb, 4006, 1024, 0, 4, 0, 64
+	}, {	/* BCH 12bit */
+		NULL, NULL, 3946, 704,  0, 5, 576, 32
+	}, {	/* BCH 16bit */
+		NULL, NULL, 3886, 512, 0, 8, 0, 32
+	}, {	/* No ECC */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}  },
+	/* 8KB Pages */
+	{  {	/* Hamming */
+		NULL, 0, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 4bit */
+		&ecc_layout_8KB_bch4bit, &mv_lp_bb, 8102, 2048, 32, 4, 0, 0
+	}, {	/* BCH 8bit */
+		&ecc_layout_8KB_bch8bit, &mv_lp_bb, 7982, 1024, 0, 8, 0, 160
+	}, {	/* BCH 12bit */
+		&ecc_layout_8KB_bch12bit, &mv_lp_bb, 7862, 704, 0, 11, 448, 64
+	}, {	/* BCH 16bit */
+		NULL, NULL, 7742, 512, 0, 16, 0, 32
+	}, {	/* No ECC */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}  },
+	/* 16KB Pages */
+	{  {	/* Hamming */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}, {	/* BCH 4bit */
+		NULL, NULL, 15914, 2048, 32, 8, 0, 0
+	}, {	/* BCH 8bit */
+		NULL, NULL, 15930, 1024, 0, 16, 0, 352
+	}, {	/* BCH 12bit */
+		&ecc_layout_16KB_bch12bit, &mv_lp_bb, 15724, 704, 0, 23, 192, 128
+	}, {	/* BCH 16bit */
+		NULL, NULL, 15484, 512, 0, 32, 0, 32
+	}, {	/* No ECC */
+		NULL, NULL, 0, 0, 0, 0, 0, 0
+	}  } };
+
+
+#define ECC_LAYOUT	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].ecc_layout)
+#define BB_INFO		(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].bb_info)
+#define	BB_BYTE_POS	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].bb_bytepos)
+#define CHUNK_CNT	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].chunk_cnt)
+#define CHUNK_SZ	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].chunk_size)
+#define CHUNK_SPR	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].chunk_spare)
+#define LST_CHUNK_SZ	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].last_chunk_size)
+#define LST_CHUNK_SPR	(orion_nfc_naked_info_lkup[info->page_size][info->ecc_type].last_chunk_spare)
+
+struct orion_nfc_cmd_info {
+
+	uint32_t		events_p1;	/* post command events */
+	uint32_t		events_p2;	/* post data events */
+	MV_NFC_PIO_RW_MODE	rw;
+};
+
+static struct orion_nfc_cmd_info orion_nfc_cmd_info_lkup[MV_NFC_CMD_COUNT] = {
+	/* Phase 1 interrupts */			/* Phase 2 interrupts */
+									/* Read/Write */  /* MV_NFC_CMD_xxxxxx */
+	{(NFC_SR_RDDREQ_MASK),				(0),
+									MV_NFC_PIO_READ}, /* READ_ID */
+	{(NFC_SR_RDDREQ_MASK),				(0),
+									MV_NFC_PIO_READ}, /* READ_STATUS */
+	{(0),						(MV_NFC_STATUS_RDY | MV_NFC_STATUS_BBD),
+									MV_NFC_PIO_NONE}, /* ERASE */
+	{(0),						(0),
+									MV_NFC_PIO_NONE}, /* MULTIPLANE_ERASE */
+	{(0),						(MV_NFC_STATUS_RDY),
+									MV_NFC_PIO_NONE}, /* RESET */
+	{(0),						(0),
+									MV_NFC_PIO_READ}, /* CACHE_READ_SEQ */
+	{(0),						(0),
+									MV_NFC_PIO_READ}, /* CACHE_READ_RAND */
+	{(0),						(0),
+									MV_NFC_PIO_NONE}, /* EXIT_CACHE_READ */
+	{(0),						(0),
+									MV_NFC_PIO_READ}, /* CACHE_READ_START */
+	{(NFC_SR_RDDREQ_MASK | NFC_SR_UNCERR_MASK),	(0),
+									MV_NFC_PIO_READ}, /* READ_MONOLITHIC */
+	{(0),						(0),
+									MV_NFC_PIO_READ}, /* READ_MULTIPLE */
+	{(NFC_SR_RDDREQ_MASK | NFC_SR_UNCERR_MASK),	(0),
+									MV_NFC_PIO_READ}, /* READ_NAKED */
+	{(NFC_SR_RDDREQ_MASK | NFC_SR_UNCERR_MASK),	(0),
+									MV_NFC_PIO_READ}, /* READ_LAST_NAKED */
+	{(0),						(0),
+									MV_NFC_PIO_NONE}, /* READ_DISPATCH */
+	{(MV_NFC_STATUS_WRD_REQ),			(MV_NFC_STATUS_RDY | MV_NFC_STATUS_BBD),
+									MV_NFC_PIO_WRITE},/* WRITE_MONOLITHIC */
+	{(0),						(0),
+									MV_NFC_PIO_WRITE},/* WRITE_MULTIPLE */
+	{(MV_NFC_STATUS_WRD_REQ),			(MV_NFC_STATUS_PAGED),
+									MV_NFC_PIO_WRITE},/* WRITE_NAKED */
+	{(0),						(0),
+									MV_NFC_PIO_WRITE},/* WRITE_LAST_NAKED */
+	{(0),						(0),
+									MV_NFC_PIO_NONE}, /* WRITE_DISPATCH */
+	{(MV_NFC_STATUS_CMDD),				(0),
+									MV_NFC_PIO_NONE}, /* WRITE_DISPATCH_START */
+	{(0),						(MV_NFC_STATUS_RDY | MV_NFC_STATUS_BBD),
+									MV_NFC_PIO_NONE}, /* WRITE_DISPATCH_END */
+};
+
+static int prepare_read_prog_cmd(struct orion_nfc_info *info,
+			int column, int page_addr)
+{
+	MV_U32 size;
+
+	if (mvNfcFlashPageSizeGet(&info->nfcCtrl, &size, &info->data_size)
+	    != MV_OK)
+		return -EINVAL;
+
+	return 0;
+}
+int orion_nfc_wait_for_completion_timeout(struct orion_nfc_info *info, int timeout)
+{
+	return wait_for_completion_timeout(&info->cmd_complete, timeout);
+
+}
+
+#ifdef CONFIG_MV_INCLUDE_PDMA
+static void orion_nfc_data_dma_irq(int irq, void *data)
+{
+	struct orion_nfc_info *info = data;
+	uint32_t dcsr, intr;
+	int channel = info->nfcCtrl.dataChanHndl.chanNumber;
+
+	intr = MV_REG_READ(PDMA_INTR_CAUSE_REG);
+	dcsr = MV_REG_READ(PDMA_CTRL_STATUS_REG(channel));
+	MV_REG_WRITE(PDMA_CTRL_STATUS_REG(channel), dcsr);
+
+	NFC_DPRINT((PRINT_LVL "orion_nfc_data_dma_irq(0x%x, 0x%x) - 1.\n", dcsr, intr));
+
+	if (info->chained_cmd) {
+		if (dcsr & DCSR_BUSERRINTR) {
+			info->retcode = ERR_DMABUSERR;
+			complete(&info->cmd_complete);
+		}
+		if ((info->state == STATE_DMA_READING) && (dcsr & DCSR_ENDINTR)) {
+			info->state = STATE_READY;
+			complete(&info->cmd_complete);
+		}
+		return;
+	}
+
+	if (dcsr & DCSR_BUSERRINTR) {
+		info->retcode = ERR_DMABUSERR;
+		complete(&info->cmd_complete);
+	}
+
+	if (info->state == STATE_DMA_WRITING) {
+		info->state = STATE_DMA_DONE;
+		mvNfcIntrSet(&info->nfcCtrl,  MV_NFC_STATUS_BBD | MV_NFC_STATUS_RDY , MV_TRUE);
+	} else {
+		info->state = STATE_READY;
+		complete(&info->cmd_complete);
+	}
+
+	return;
+}
+#endif
+
+static irqreturn_t orion_nfc_irq_pio(int irq, void *devid)
+{
+	struct orion_nfc_info *info = devid;
+
+	/* Disable all interrupts */
+	mvNfcIntrSet(&info->nfcCtrl, 0xFFF, MV_FALSE);
+
+	/* Clear the interrupt and pass the status UP */
+	info->dscr = MV_REG_READ(NFC_STATUS_REG);
+	NFC_DPRINT((PRINT_LVL ">>> orion_nfc_irq_pio(0x%x)\n", info->dscr));
+	MV_REG_WRITE(NFC_STATUS_REG, info->dscr);
+	complete(&info->cmd_complete);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_MV_INCLUDE_PDMA
+static irqreturn_t orion_nfc_irq_dma(int irq, void *devid)
+{
+	struct orion_nfc_info *info = devid;
+	unsigned int status;
+
+	status = MV_REG_READ(NFC_STATUS_REG);
+
+	NFC_DPRINT((PRINT_LVL "orion_nfc_irq_dma(0x%x) - 1.\n", status));
+
+	if (!info->chained_cmd) {
+		if (status & (NFC_SR_RDDREQ_MASK | NFC_SR_UNCERR_MASK)) {
+			if (status & NFC_SR_UNCERR_MASK)
+				info->retcode = ERR_DBERR;
+			mvNfcIntrSet(&info->nfcCtrl, NFC_SR_RDDREQ_MASK | NFC_SR_UNCERR_MASK, MV_FALSE);
+			if (info->use_dma) {
+				info->state = STATE_DMA_READING;
+				mvNfcReadWrite(&info->nfcCtrl, info->cmd,
+						(MV_U32 *)info->data_buff, info->data_buff_phys);
+			} else {
+				info->state = STATE_PIO_READING;
+				complete(&info->cmd_complete);
+			}
+		} else if (status & NFC_SR_WRDREQ_MASK) {
+			mvNfcIntrSet(&info->nfcCtrl, NFC_SR_WRDREQ_MASK, MV_FALSE);
+			if (info->use_dma) {
+				info->state = STATE_DMA_WRITING;
+				NFC_DPRINT((PRINT_LVL "Calling mvNfcReadWrite().\n"));
+				if (mvNfcReadWrite(&info->nfcCtrl, info->cmd,
+						   (MV_U32 *)info->data_buff,
+						   info->data_buff_phys)
+				    != MV_OK)
+					pr_err("mvNfcReadWrite() failed.\n");
+			} else {
+				info->state = STATE_PIO_WRITING;
+				complete(&info->cmd_complete);
+			}
+		} else if (status & (NFC_SR_BBD_MASK | MV_NFC_CS0_CMD_DONE_INT |
+				     NFC_SR_RDY0_MASK | MV_NFC_CS1_CMD_DONE_INT |
+				     NFC_SR_RDY1_MASK)) {
+			if (status & NFC_SR_BBD_MASK)
+				info->retcode = ERR_BBD;
+			mvNfcIntrSet(&info->nfcCtrl,  MV_NFC_STATUS_BBD |
+					MV_NFC_STATUS_CMDD | MV_NFC_STATUS_RDY,
+					MV_FALSE);
+			info->state = STATE_READY;
+			complete(&info->cmd_complete);
+		}
+	} else if (status & (NFC_SR_BBD_MASK | NFC_SR_RDY0_MASK |
+				NFC_SR_RDY1_MASK | NFC_SR_UNCERR_MASK)) {
+		if (status & (NFC_SR_BBD_MASK | NFC_SR_UNCERR_MASK))
+			info->retcode = ERR_DBERR;
+		mvNfcIntrSet(&info->nfcCtrl, MV_NFC_STATUS_BBD |
+				MV_NFC_STATUS_RDY | MV_NFC_STATUS_CMDD,
+				MV_FALSE);
+		if ((info->state != STATE_DMA_READING) ||
+		    (info->retcode == ERR_DBERR)) {
+			info->state = STATE_READY;
+			complete(&info->cmd_complete);
+		}
+	}
+	MV_REG_WRITE(NFC_STATUS_REG, status);
+	return IRQ_HANDLED;
+}
+#endif
+
+static int orion_nfc_cmd_prepare(struct orion_nfc_info *info,
+		MV_NFC_MULTI_CMD *descInfo, u32 *numCmds)
+{
+	MV_U32	i;
+	MV_NFC_MULTI_CMD *currDesc;
+
+	currDesc = descInfo;
+	if (info->cmd == MV_NFC_CMD_READ_MONOLITHIC) {
+		/* Main Chunks */
+		for (i = 0; i < CHUNK_CNT; i++) {
+			if (i == 0)
+				currDesc->cmd = MV_NFC_CMD_READ_MONOLITHIC;
+			else if ((i == (CHUNK_CNT-1)) && (LST_CHUNK_SZ == 0) && (LST_CHUNK_SPR == 0))
+				currDesc->cmd = MV_NFC_CMD_READ_LAST_NAKED;
+			else
+				currDesc->cmd = MV_NFC_CMD_READ_NAKED;
+
+			currDesc->pageAddr = info->page_addr;
+			currDesc->pageCount = 1;
+			currDesc->virtAddr = (MV_U32 *)(info->data_buff + (i * CHUNK_SZ));
+			currDesc->physAddr = info->data_buff_phys + (i * CHUNK_SZ);
+			currDesc->length = (CHUNK_SZ + CHUNK_SPR);
+
+			if (CHUNK_SPR == 0)
+				currDesc->numSgBuffs = 1;
+			else {
+				currDesc->numSgBuffs = 2;
+				currDesc->sgBuffAddr[0] = (info->data_buff_phys + (i * CHUNK_SZ));
+				currDesc->sgBuffAddrVirt[0] = (MV_U32 *)(info->data_buff + (i * CHUNK_SZ));
+				currDesc->sgBuffSize[0] = CHUNK_SZ;
+				currDesc->sgBuffAddr[1] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (i * CHUNK_SPR));
+				currDesc->sgBuffAddrVirt[1] = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (i * CHUNK_SPR));
+				currDesc->sgBuffSize[1] = CHUNK_SPR;
+			}
+
+			currDesc++;
+		}
+
+		/* Last chunk if existing */
+		if ((LST_CHUNK_SZ != 0) || (LST_CHUNK_SPR != 0)) {
+			currDesc->cmd = MV_NFC_CMD_READ_LAST_NAKED;
+			currDesc->pageAddr = info->page_addr;
+			currDesc->pageCount = 1;
+			currDesc->length = (LST_CHUNK_SPR + LST_CHUNK_SZ);
+
+			if ((LST_CHUNK_SZ == 0) && (LST_CHUNK_SPR != 0)) {		/* Spare only */
+				currDesc->virtAddr = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+									LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->physAddr = info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+									LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT);
+				currDesc->numSgBuffs = 1;
+				currDesc->length = LST_CHUNK_SPR;
+			} else if ((LST_CHUNK_SZ != 0) && (LST_CHUNK_SPR == 0)) {	/* Data only */
+				currDesc->virtAddr = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->physAddr = info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT);
+				currDesc->numSgBuffs = 1;
+				currDesc->length = LST_CHUNK_SZ;
+			} else {	/* Both spare and data */
+				currDesc->numSgBuffs = 2;
+				currDesc->sgBuffAddr[0] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->sgBuffAddrVirt[0] = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->sgBuffSize[0] = LST_CHUNK_SZ;
+				currDesc->sgBuffAddr[1] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->sgBuffAddrVirt[1] =  (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->sgBuffSize[1] = LST_CHUNK_SPR;
+			}
+			currDesc++;
+		}
+
+		*numCmds = CHUNK_CNT + (((LST_CHUNK_SZ) || (LST_CHUNK_SPR)) ? 1 : 0);
+	} else if (info->cmd == MV_NFC_CMD_WRITE_MONOLITHIC) {
+		/* Write Dispatch */
+		currDesc->cmd = MV_NFC_CMD_WRITE_DISPATCH_START;
+		currDesc->pageAddr = info->page_addr;
+		currDesc->pageCount = 1;
+		currDesc->numSgBuffs = 1;
+		currDesc->length = 0;
+		currDesc++;
+
+		/* Main Chunks */
+		for (i = 0; i < CHUNK_CNT; i++) {
+			currDesc->cmd = MV_NFC_CMD_WRITE_NAKED;
+			currDesc->pageAddr = info->page_addr;
+			currDesc->pageCount = 1;
+			currDesc->virtAddr = (MV_U32 *)(info->data_buff + (i * CHUNK_SZ));
+			currDesc->physAddr = info->data_buff_phys + (i * CHUNK_SZ);
+			currDesc->length = (CHUNK_SZ + CHUNK_SPR);
+
+			if (CHUNK_SPR == 0)
+				currDesc->numSgBuffs = 1;
+			else {
+				currDesc->numSgBuffs = 2;
+				currDesc->sgBuffAddr[0] = (info->data_buff_phys + (i * CHUNK_SZ));
+				currDesc->sgBuffAddrVirt[0] = (MV_U32 *)(info->data_buff + (i * CHUNK_SZ));
+				currDesc->sgBuffSize[0] = CHUNK_SZ;
+				currDesc->sgBuffAddr[1] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (i * CHUNK_SPR));
+				currDesc->sgBuffAddrVirt[1] = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (i * CHUNK_SPR));
+				currDesc->sgBuffSize[1] = CHUNK_SPR;
+			}
+
+			currDesc++;
+		}
+
+		/* Last chunk if existing */
+		if ((LST_CHUNK_SZ != 0) || (LST_CHUNK_SPR != 0)) {
+			currDesc->cmd = MV_NFC_CMD_WRITE_NAKED;
+			currDesc->pageAddr = info->page_addr;
+			currDesc->pageCount = 1;
+			currDesc->length = (LST_CHUNK_SZ + LST_CHUNK_SPR);
+
+			if ((LST_CHUNK_SZ == 0) && (LST_CHUNK_SPR != 0)) {		/* Spare only */
+				currDesc->virtAddr = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+									LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->physAddr = info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+									LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT);
+				currDesc->numSgBuffs = 1;
+			} else if ((LST_CHUNK_SZ != 0) && (LST_CHUNK_SPR == 0)) {	/* Data only */
+				currDesc->virtAddr = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->physAddr = info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT);
+				currDesc->numSgBuffs = 1;
+			} else {	/* Both spare and data */
+				currDesc->numSgBuffs = 2;
+				currDesc->sgBuffAddr[0] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->sgBuffAddrVirt[0] = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT));
+				currDesc->sgBuffSize[0] = LST_CHUNK_SZ;
+				currDesc->sgBuffAddr[1] = (info->data_buff_phys + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->sgBuffAddrVirt[1] = (MV_U32 *)(info->data_buff + (CHUNK_SZ * CHUNK_CNT) +
+										LST_CHUNK_SZ + (CHUNK_SPR * CHUNK_CNT));
+				currDesc->sgBuffSize[1] = LST_CHUNK_SPR;
+			}
+			currDesc++;
+		}
+
+		/* Write Dispatch END */
+		currDesc->cmd = MV_NFC_CMD_WRITE_DISPATCH_END;
+		currDesc->pageAddr = info->page_addr;
+		currDesc->pageCount = 1;
+		currDesc->numSgBuffs = 1;
+		currDesc->length = 0;
+
+		*numCmds = CHUNK_CNT + (((LST_CHUNK_SZ) || (LST_CHUNK_SPR)) ? 1 : 0) + 2;
+	} else {
+		descInfo[0].cmd = info->cmd;
+		descInfo[0].pageAddr = info->page_addr;
+		descInfo[0].pageCount = 1;
+		descInfo[0].virtAddr = (MV_U32 *)info->data_buff;
+		descInfo[0].physAddr = info->data_buff_phys;
+		descInfo[0].numSgBuffs = 1;
+		descInfo[0].length = info->data_size;
+		*numCmds = 1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_INCLUDE_PDM
+static int orion_nfc_do_cmd_dma(struct orion_nfc_info *info,
+		uint32_t event)
+{
+	uint32_t ndcr;
+	int ret, timeout = CHIP_DELAY_TIMEOUT;
+	MV_STATUS status;
+	MV_U32	numCmds;
+
+	/* static allocation to avoid stack overflow*/
+	static MV_NFC_MULTI_CMD descInfo[NFC_MAX_NUM_OF_DESCR];
+
+	/* Clear all status bits. */
+	MV_REG_WRITE(NFC_STATUS_REG, NFC_SR_MASK);
+
+	mvNfcIntrSet(&info->nfcCtrl, event, MV_TRUE);
+
+	NFC_DPRINT((PRINT_LVL "\nAbout to issue dma cmd %d (cs %d) - 0x%x.\n",
+				info->cmd, info->nfcCtrl.currCs,
+				MV_REG_READ(NFC_CONTROL_REG)));
+	if ((info->cmd == MV_NFC_CMD_READ_MONOLITHIC) ||
+	    (info->cmd == MV_NFC_CMD_READ_ID) ||
+	    (info->cmd == MV_NFC_CMD_READ_STATUS))
+		info->state = STATE_DMA_READING;
+	else
+		info->state = STATE_CMD_HANDLE;
+	info->chained_cmd = 1;
+
+	orion_nfc_cmd_prepare(info, descInfo, &numCmds);
+
+	status = mvNfcCommandMultiple(&info->nfcCtrl, descInfo, numCmds);
+	if (status != MV_OK) {
+		pr_err("nfcCmdMultiple() failed for cmd %d (%d).\n",
+				info->cmd, status);
+		goto fail;
+	}
+
+	NFC_DPRINT((PRINT_LVL "After issue command %d - 0x%x.\n",
+				info->cmd, MV_REG_READ(NFC_STATUS_REG)));
+
+	ret = orion_nfc_wait_for_completion_timeout(info, timeout);
+	if (!ret) {
+		pr_err("Cmd %d execution timed out (0x%x) - cs %d.\n",
+				info->cmd, MV_REG_READ(NFC_STATUS_REG),
+				info->nfcCtrl.currCs);
+		info->retcode = ERR_CMD_TO;
+		goto fail_stop;
+	}
+
+	mvNfcIntrSet(&info->nfcCtrl, event | MV_NFC_STATUS_CMDD, MV_FALSE);
+
+	while (MV_PDMA_CHANNEL_STOPPED !=
+			mvPdmaChannelStateGet(&info->nfcCtrl.dataChanHndl)) {
+		if (info->retcode == ERR_NONE)
+			BUG();
+
+	}
+
+	return 0;
+
+fail_stop:
+	ndcr = MV_REG_READ(NFC_CONTROL_REG);
+	MV_REG_WRITE(NFC_CONTROL_REG, ndcr & ~NFC_CTRL_ND_RUN_MASK);
+	udelay(10);
+fail:
+	return -ETIMEDOUT;
+}
+#endif
+
+static int orion_nfc_error_check(struct orion_nfc_info *info)
+{
+	switch (info->cmd) {
+	case MV_NFC_CMD_ERASE:
+	case MV_NFC_CMD_MULTIPLANE_ERASE:
+	case MV_NFC_CMD_WRITE_MONOLITHIC:
+	case MV_NFC_CMD_WRITE_MULTIPLE:
+	case MV_NFC_CMD_WRITE_NAKED:
+	case MV_NFC_CMD_WRITE_LAST_NAKED:
+	case MV_NFC_CMD_WRITE_DISPATCH:
+	case MV_NFC_CMD_WRITE_DISPATCH_START:
+	case MV_NFC_CMD_WRITE_DISPATCH_END:
+		if (info->dscr & (MV_NFC_CS0_BAD_BLK_DETECT_INT | MV_NFC_CS1_BAD_BLK_DETECT_INT)) {
+			info->retcode = ERR_BBD;
+			return 1;
+		}
+		break;
+
+	case MV_NFC_CMD_CACHE_READ_SEQ:
+	case MV_NFC_CMD_CACHE_READ_RAND:
+	case MV_NFC_CMD_EXIT_CACHE_READ:
+	case MV_NFC_CMD_CACHE_READ_START:
+	case MV_NFC_CMD_READ_MONOLITHIC:
+	case MV_NFC_CMD_READ_MULTIPLE:
+	case MV_NFC_CMD_READ_NAKED:
+	case MV_NFC_CMD_READ_LAST_NAKED:
+	case MV_NFC_CMD_READ_DISPATCH:
+		if (info->dscr & MV_NFC_UNCORR_ERR_INT) {
+			info->dscr = ERR_DBERR;
+			return 1;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	info->retcode = ERR_NONE;
+	return 0;
+}
+
+/* ==================================================================================================
+ *           STEP  1		|   STEP  2   |   STEP  3   |   STEP  4   |   STEP  5   |   STEP 6
+ *           COMMAND		|   WAIT FOR  |   CHK ERRS  |     PIO     |   WAIT FOR  |   CHK ERRS
+ * =========================|=============|=============|=============|=============|============
+ *   READ MONOLITHIC		|   RDDREQ    |   UNCERR    |    READ     |     NONE    |    NONE
+ *   READ NAKED				|   RDDREQ    |   UNCERR    |    READ     |     NONE    |    NONE
+ *   READ LAST NAKED		|   RDDREQ    |   UNCERR    |    READ     |     NONE    |    NONE
+ *   WRITE MONOLITHIC		|   WRDREQ    |    NONE     |    WRITE    |     RDY     |    BBD
+ *   WRITE DISPATCH START	|   CMDD      |    NONE     |    NONE     |     NONE    |    NONE
+ *   WRITE NAKED			|   WRDREQ    |    NONE     |    WRITE    |     PAGED   |    NONE
+ *   WRITE DISPATCH END		|   NONE      |    NONE     |    NONE     |     RDY     |    BBD
+ *   ERASE					|   NONE      |    NONE     |    NONE     |     RDY     |    BBD
+ *   READ ID				|   RDDREQ    |    NONE     |    READ     |     NONE    |    NONE
+ *   READ STAT				|   RDDREQ    |    NONE     |    READ     |     NONE    |    NONE
+ *   RESET					|   NONE      |    NONE     |    NONE     |     RDY     |    NONE
+ */
+static int orion_nfc_do_cmd_pio(struct orion_nfc_info *info)
+{
+	int timeout = CHIP_DELAY_TIMEOUT;
+	MV_STATUS status;
+	MV_U32	i, j, numCmds;
+	MV_U32 ndcr;
+
+	/* static allocation to avoid stack overflow */
+	static MV_NFC_MULTI_CMD descInfo[NFC_MAX_NUM_OF_DESCR];
+
+	/* Clear all status bits */
+	MV_REG_WRITE(NFC_STATUS_REG, NFC_SR_MASK);
+
+	NFC_DPRINT((PRINT_LVL "\nStarting PIO command %d (cs %d) - NDCR=0x%08x\n",
+		   info->cmd, info->nfcCtrl.currCs, MV_REG_READ(NFC_CONTROL_REG)));
+
+	/* Build the chain of commands */
+	orion_nfc_cmd_prepare(info, descInfo, &numCmds);
+	NFC_DPRINT((PRINT_LVL "Prepared %d commands in sequence\n", numCmds));
+
+	/* Execute the commands */
+	for (i = 0; i < numCmds; i++) {
+		/* Verify that command is supported in PIO mode */
+		if ((orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p1 == 0) &&
+		    (orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p2 == 0)) {
+			goto fail_stop;
+		}
+
+		/* clear the return code */
+		info->dscr = 0;
+
+		/* STEP1: Initiate the command */
+		NFC_DPRINT((PRINT_LVL "About to issue Descriptor #%d (command %d, pageaddr 0x%x, length %d).\n",
+			    i, descInfo[i].cmd, descInfo[i].pageAddr, descInfo[i].length));
+		status = mvNfcCommandPio(&info->nfcCtrl, &descInfo[i], MV_FALSE);
+		if (status != MV_OK) {
+			pr_err("mvNfcCommandPio() failed for command %d (%d).\n", descInfo[i].cmd, status);
+			goto fail_stop;
+		}
+		NFC_DPRINT((PRINT_LVL "After issue command %d (NDSR=0x%x)\n",
+			   descInfo[i].cmd, MV_REG_READ(NFC_STATUS_REG)));
+
+		/* Check if command phase interrupts events are needed */
+		if (orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p1) {
+			/* Enable necessary interrupts for command phase */
+			NFC_DPRINT((PRINT_LVL "Enabling part1 interrupts (IRQs 0x%x)\n",
+				   orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p1));
+			mvNfcIntrSet(&info->nfcCtrl, orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p1, MV_TRUE);
+
+			/* STEP2: wait for interrupt */
+			if (!orion_nfc_wait_for_completion_timeout(info, timeout)) {
+				pr_err("command %d execution timed out (CS %d, NDCR=0x%x, NDSR=0x%x).\n",
+				       descInfo[i].cmd, info->nfcCtrl.currCs, MV_REG_READ(NFC_CONTROL_REG),
+				       MV_REG_READ(NFC_STATUS_REG));
+				info->retcode = ERR_CMD_TO;
+				goto fail_stop;
+			}
+
+			/* STEP3: Check for errors */
+			if (orion_nfc_error_check(info)) {
+				NFC_DPRINT((PRINT_LVL "Command level errors (DSCR=%08x, retcode=%d)\n",
+					   info->dscr, info->retcode));
+				goto fail_stop;
+			}
+		}
+
+		/* STEP4: PIO Read/Write data if needed */
+		if (descInfo[i].numSgBuffs > 1) {
+			for (j = 0; j < descInfo[i].numSgBuffs; j++) {
+				NFC_DPRINT((PRINT_LVL "Starting SG#%d PIO Read/Write (%d bytes, R/W mode %d)\n", j,
+					    descInfo[i].sgBuffSize[j],
+					    orion_nfc_cmd_info_lkup[descInfo[i].cmd].rw));
+				mvNfcReadWritePio(&info->nfcCtrl, descInfo[i].sgBuffAddrVirt[j],
+						  descInfo[i].sgBuffSize[j],
+						  orion_nfc_cmd_info_lkup[descInfo[i].cmd].rw);
+			}
+		} else {
+			NFC_DPRINT((PRINT_LVL "Starting nonSG PIO Read/Write (%d bytes, R/W mode %d)\n",
+				    descInfo[i].length, orion_nfc_cmd_info_lkup[descInfo[i].cmd].rw));
+			mvNfcReadWritePio(&info->nfcCtrl, descInfo[i].virtAddr,
+					  descInfo[i].length, orion_nfc_cmd_info_lkup[descInfo[i].cmd].rw);
+		}
+
+		/* check if data phase events are needed */
+		if (orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p2) {
+			/* Enable the RDY interrupt to close the transaction */
+			NFC_DPRINT((PRINT_LVL "Enabling part2 interrupts (IRQs 0x%x)\n",
+				   orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p2));
+			mvNfcIntrSet(&info->nfcCtrl, orion_nfc_cmd_info_lkup[descInfo[i].cmd].events_p2, MV_TRUE);
+
+			/* STEP5: Wait for transaction to finish */
+			if (!orion_nfc_wait_for_completion_timeout(info, timeout)) {
+				pr_err("command %d execution timed out (NDCR=0x%08x, NDSR=0x%08x, NDECCCTRL=0x%08x)\n",
+				       descInfo[i].cmd, MV_REG_READ(NFC_CONTROL_REG), MV_REG_READ(NFC_STATUS_REG),
+				       MV_REG_READ(NFC_ECC_CONTROL_REG));
+				info->retcode = ERR_DATA_TO;
+				goto fail_stop;
+			}
+
+			/* STEP6: Check for errors BB errors (in erase) */
+			if (orion_nfc_error_check(info)) {
+				NFC_DPRINT((PRINT_LVL "Data level errors (DSCR=0x%08x, retcode=%d)\n",
+					   info->dscr, info->retcode));
+				goto fail_stop;
+			}
+		}
+
+		/* Fallback - in case the NFC did not reach the idle state */
+		ndcr = MV_REG_READ(NFC_CONTROL_REG);
+		if (ndcr & NFC_CTRL_ND_RUN_MASK) {
+			NFC_DPRINT((PRINT_LVL "WRONG NFC STAUS: command %d, NDCR=0x%08x, NDSR=0x%08x, NDECCCTRL=0x%08x)\n",
+				   info->cmd, MV_REG_READ(NFC_CONTROL_REG), MV_REG_READ(NFC_STATUS_REG),
+				   MV_REG_READ(NFC_ECC_CONTROL_REG)));
+			MV_REG_WRITE(NFC_CONTROL_REG, (ndcr & ~NFC_CTRL_ND_RUN_MASK));
+		}
+	}
+
+	NFC_DPRINT((PRINT_LVL "Command done (NDCR=0x%08x, NDSR=0x%08x)\n",
+		   MV_REG_READ(NFC_CONTROL_REG), MV_REG_READ(NFC_STATUS_REG)));
+	info->retcode = ERR_NONE;
+
+	return 0;
+
+fail_stop:
+	ndcr = MV_REG_READ(NFC_CONTROL_REG);
+	if (ndcr & NFC_CTRL_ND_RUN_MASK) {
+		pr_err("WRONG NFC STAUS: command %d, NDCR=0x%08x, NDSR=0x%08x, NDECCCTRL=0x%08x)\n",
+		       info->cmd, MV_REG_READ(NFC_CONTROL_REG), MV_REG_READ(NFC_STATUS_REG),
+		       MV_REG_READ(NFC_ECC_CONTROL_REG));
+		MV_REG_WRITE(NFC_CONTROL_REG, (ndcr & ~NFC_CTRL_ND_RUN_MASK));
+	}
+	mvNfcIntrSet(&info->nfcCtrl, 0xFFF, MV_FALSE);
+	udelay(10);
+	return -ETIMEDOUT;
+}
+
+static int orion_nfc_dev_ready(struct mtd_info *mtd)
+{
+	return (MV_REG_READ(NFC_STATUS_REG) & (NFC_SR_RDY0_MASK | NFC_SR_RDY1_MASK)) ? 1 : 0;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+	for (; len > 0; len--)
+		if (*buf++ != 0xff)
+			return 0;
+	return 1;
+}
+
+static void orion_nfc_cmdfunc(struct mtd_info *mtd, unsigned command,
+				int column, int page_addr)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+
+	info->data_size = 0;
+	info->state = STATE_READY;
+	info->chained_cmd = 0;
+	info->retcode = ERR_NONE;
+
+	init_completion(&info->cmd_complete);
+
+	switch (command) {
+	case NAND_CMD_READOOB:
+		info->buf_count = mtd->writesize + mtd->oobsize;
+		info->buf_start = mtd->writesize + column;
+		info->cmd = MV_NFC_CMD_READ_MONOLITHIC;
+		info->column = column;
+		info->page_addr = page_addr;
+		if (prepare_read_prog_cmd(info, column, page_addr))
+			break;
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_RDY | NFC_SR_UNCERR_MASK);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			orion_nfc_do_cmd_pio(info);
+
+		/* We only are OOB, so if the data has error, does not matter */
+		if (info->retcode == ERR_DBERR)
+			info->retcode = ERR_NONE;
+		break;
+
+	case NAND_CMD_READ0:
+		info->buf_start = column;
+		info->buf_count = mtd->writesize + mtd->oobsize;
+		memset(info->data_buff, 0xff, info->buf_count);
+		info->cmd = MV_NFC_CMD_READ_MONOLITHIC;
+		info->column = column;
+		info->page_addr = page_addr;
+
+		if (prepare_read_prog_cmd(info, column, page_addr))
+			break;
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_RDY | NFC_SR_UNCERR_MASK);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			orion_nfc_do_cmd_pio(info);
+
+		if (info->retcode == ERR_DBERR) {
+			/* for blank page (all 0xff), HW will calculate its ECC as
+			 * 0, which is different from the ECC information within
+			 * OOB, ignore such double bit errors
+			 */
+			if (is_buf_blank(info->data_buff, mtd->writesize))
+				info->retcode = ERR_NONE;
+			else
+				printk(PRINT_LVL "%s: retCode == ERR_DBERR\n", __func__);
+		}
+		break;
+	case NAND_CMD_SEQIN:
+		info->buf_start = column;
+		info->buf_count = mtd->writesize + mtd->oobsize;
+		memset(info->data_buff + mtd->writesize, 0xff, mtd->oobsize);
+
+		/* save column/page_addr for next CMD_PAGEPROG */
+		info->seqin_column = column;
+		info->seqin_page_addr = page_addr;
+		break;
+	case NAND_CMD_PAGEPROG:
+		info->column = info->seqin_column;
+		info->page_addr = info->seqin_page_addr;
+		info->cmd = MV_NFC_CMD_WRITE_MONOLITHIC;
+		if (prepare_read_prog_cmd(info,
+				info->seqin_column, info->seqin_page_addr)) {
+			pr_err("prepare_read_prog_cmd() failed.\n");
+			break;
+		}
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_RDY);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			orion_nfc_do_cmd_pio(info);
+
+		break;
+	case NAND_CMD_ERASE1:
+		info->column = 0;
+		info->page_addr = page_addr;
+		info->cmd = MV_NFC_CMD_ERASE;
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_BBD | MV_NFC_STATUS_RDY);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			orion_nfc_do_cmd_pio(info);
+
+		break;
+	case NAND_CMD_ERASE2:
+		break;
+	case NAND_CMD_READID:
+	case NAND_CMD_STATUS:
+		info->buf_start = 0;
+		info->buf_count = (command == NAND_CMD_READID) ?
+				info->read_id_bytes : 1;
+		info->data_size = 8;
+		info->column = 0;
+		info->page_addr = 0;
+		info->cmd = (command == NAND_CMD_READID) ?
+			MV_NFC_CMD_READ_ID : MV_NFC_CMD_READ_STATUS;
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_RDY);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			orion_nfc_do_cmd_pio(info);
+
+		break;
+	case NAND_CMD_RESET:
+#if 0
+		int ret = 0;
+
+		info->column = 0;
+		info->page_addr = 0;
+		info->cmd = MV_NFC_CMD_RESET;
+
+		if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDM
+			ret = orion_nfc_do_cmd_dma(info, MV_NFC_STATUS_CMDD);
+#else
+			pr_err("DMA mode not supported!\n");
+#endif
+		else
+			ret = orion_nfc_do_cmd_pio(info);
+
+		if (ret == 0) {
+			int timeout = 2;
+			uint32_t ndcr;
+
+			while (timeout--) {
+				if (MV_REG_READ(NFC_STATUS_REG) & (NFC_SR_RDY0_MASK | NFC_SR_RDY1_MASK))
+					break;
+				msleep(10);
+			}
+
+			ndcr = MV_REG_READ(NFC_CONTROL_REG);
+			MV_REG_WRITE(NFC_CONTROL_REG, ndcr & ~NFC_CTRL_ND_RUN_MASK);
+		}
+#else
+#ifdef MTD_NAND_NFC_INIT_RESET
+		if (mvNfcReset() != MV_OK)
+			pr_err("Device reset failed.\n");
+#endif
+#endif
+		break;
+	default:
+		pr_err("non-supported command.\n");
+		break;
+	}
+
+	if (info->retcode == ERR_DBERR) {
+		pr_err("double bit error @ page %08x (%d)\n",
+		       page_addr, info->cmd);
+		info->retcode = ERR_NONE;
+	}
+}
+
+static uint8_t orion_nfc_read_byte(struct mtd_info *mtd)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	char retval = 0xFF;
+
+	if (info->buf_start < info->buf_count)
+		/* Has just send a new command? */
+		retval = info->data_buff[info->buf_start++];
+	return retval;
+}
+
+static u16 orion_nfc_read_word(struct mtd_info *mtd)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	u16 retval = 0xFFFF;
+
+	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+		retval = *((u16 *)(info->data_buff+info->buf_start));
+		info->buf_start += 2;
+	} else
+		pr_err("\n%s: returning 0xFFFF (%d, %d).\n", __func__, info->buf_start, info->buf_count);
+
+	return retval;
+}
+
+static void orion_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+	memcpy(buf, info->data_buff + info->buf_start, real_len);
+	info->buf_start += real_len;
+}
+
+static void orion_nfc_write_buf(struct mtd_info *mtd,
+		const uint8_t *buf, int len)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+	memcpy(info->data_buff + info->buf_start, buf, real_len);
+	info->buf_start += real_len;
+}
+
+static void orion_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	mvNfcSelectChip(&info->nfcCtrl, MV_NFC_CS_0 + chip);
+	return;
+}
+
+static int orion_nfc_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+
+	/* orion_nfc_send_command has waited for command complete */
+	if (this->state == FL_WRITING || this->state == FL_ERASING) {
+		if (info->retcode == ERR_NONE)
+			return 0;
+		else {
+			/*
+			 * any error make it return 0x01 which will tell
+			 * the caller the erase and write fail
+			 */
+			return 0x01;
+		}
+	}
+
+	return 0;
+}
+
+static void orion_nfc_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+	return;
+}
+
+static int orion_nfc_ecc_calculate(struct mtd_info *mtd,
+		const uint8_t *dat, uint8_t *ecc_code)
+{
+	return 0;
+}
+
+static int orion_nfc_ecc_correct(struct mtd_info *mtd,
+		uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	/*
+	 * Any error include ERR_SEND_CMD, ERR_DBERR, ERR_BUSERR, we
+	 * consider it as a ecc error which will tell the caller the
+	 * read fail We have distinguish all the errors, but the
+	 * nand_read_ecc only check this function return value
+	 */
+	if (info->retcode != ERR_NONE)
+		return -1;
+
+	return 0;
+}
+
+static int orion_nfc_detect_flash(struct orion_nfc_info *info)
+{
+	MV_U32 my_page_size;
+
+	mvNfcFlashPageSizeGet(&info->nfcCtrl, &my_page_size, NULL);
+
+	/* Translate page size to enum */
+	switch (my_page_size) {
+	case 512:
+		info->page_size = NFC_PAGE_512B;
+		break;
+
+	case 2048:
+		info->page_size = NFC_PAGE_2KB;
+		break;
+
+	case 4096:
+		info->page_size = NFC_PAGE_4KB;
+		break;
+
+	case 8192:
+		info->page_size = NFC_PAGE_8KB;
+		break;
+
+	case 16384:
+		info->page_size = NFC_PAGE_16KB;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	info->flash_width = info->nfc_width;
+	if (info->flash_width != 16 && info->flash_width != 8)
+		return -EINVAL;
+
+	/* calculate flash information */
+	info->read_id_bytes = (pg_sz[info->page_size] >= 2048) ? 4 : 2;
+
+	return 0;
+}
+
+/* the maximum possible buffer size for ganaged 8K page with OOB data
+ * is: 2 * (8K + Spare) ==> to be alligned allocate 5 MMU (4K) pages
+ */
+#define MAX_BUFF_SIZE	(PAGE_SIZE * 5)
+
+static int orion_nfc_init_buff(struct orion_nfc_info *info)
+{
+	struct platform_device *pdev = info->pdev;
+
+	if (info->use_dma == 0) {
+		info->data_buff = devm_kzalloc(&pdev->dev, MAX_BUFF_SIZE,
+					       GFP_KERNEL);
+		if (info->data_buff == NULL)
+			return -ENOMEM;
+		return 0;
+	}
+
+	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+				&info->data_buff_phys, GFP_KERNEL);
+	if (info->data_buff == NULL) {
+		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
+		return -ENOMEM;
+	}
+	memset(info->data_buff, 0xff, MAX_BUFF_SIZE);
+
+#ifdef CONFIG_MV_INCLUDE_PDM
+	if (pxa_request_dma_intr("nand-data", info->nfcCtrl.dataChanHndl.chanNumber,
+			orion_nfc_data_dma_irq, info) < 0) {
+		dev_err(&pdev->dev, "failed to request PDMA IRQ\n");
+		return -ENOMEM;
+	}
+#endif
+	return 0;
+}
+
+static uint8_t mv_bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static uint8_t mv_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr mvbbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = mv_bbt_pattern
+};
+
+static struct nand_bbt_descr mvbbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = mv_mirror_pattern
+};
+
+
+static int orion_nfc_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd->priv;
+	uint8_t buf[6] = {0, 0, 0, 0, 0, 0};
+	int block, ret = 0;
+	loff_t page_addr;
+
+	/* Get block number */
+	block = (int)(ofs >> chip->bbt_erase_shift);
+	if (chip->bbt)
+		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+	ret = nand_update_bbt(mtd, ofs);
+
+	if (ret == 0) {
+		/* Get address of the next block */
+		ofs += mtd->erasesize;
+		ofs &= ~(mtd->erasesize - 1);
+
+		/* Get start of oob in last page */
+		ofs -= mtd->oobsize;
+
+		page_addr = ofs;
+		do_div(page_addr, mtd->writesize);
+
+		orion_nfc_cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize,
+				page_addr);
+		orion_nfc_write_buf(mtd, buf, 6);
+		orion_nfc_cmdfunc(mtd, NAND_CMD_PAGEPROG, 0, page_addr);
+	}
+
+	return ret;
+}
+
+
+static void orion_nfc_init_nand(struct nand_chip *nand, struct orion_nfc_info *info)
+{
+
+	if (info->nfc_width == 16)
+		nand->bbt_options	= (NAND_BBT_USE_FLASH |  NAND_BUSWIDTH_16);
+	else
+		nand->bbt_options	= NAND_BBT_USE_FLASH;
+
+#if CONFIG_MTD_NAND_NFC_MLC_SUPPORT
+	nand->oobsize_ovrd	= ((CHUNK_SPR * CHUNK_CNT) + LST_CHUNK_SPR);
+	nand->bb_location	= BB_BYTE_POS;
+	nand->bb_page		= mvNfcBadBlockPageNumber(&info->nfcCtrl);
+#endif
+	nand->waitfunc		= orion_nfc_waitfunc;
+	nand->select_chip	= orion_nfc_select_chip;
+	nand->dev_ready		= orion_nfc_dev_ready;
+	nand->cmdfunc		= orion_nfc_cmdfunc;
+	nand->read_word		= orion_nfc_read_word;
+	nand->read_byte		= orion_nfc_read_byte;
+	nand->read_buf		= orion_nfc_read_buf;
+	nand->write_buf		= orion_nfc_write_buf;
+	nand->block_markbad	= orion_nfc_markbad;
+	nand->ecc.mode		= NAND_ECC_HW;
+	nand->ecc.hwctl		= orion_nfc_ecc_hwctl;
+	nand->ecc.calculate	= orion_nfc_ecc_calculate;
+	nand->ecc.correct	= orion_nfc_ecc_correct;
+	nand->ecc.size		= pg_sz[info->page_size];
+	nand->ecc.layout	= ECC_LAYOUT;
+	/* Driver has to set ecc.strength when using hardware ECC */
+	switch (info->ecc_type) {
+	case (MV_NFC_ECC_HAMMING):
+		nand->ecc.strength = 1;
+		break;
+	case (MV_NFC_ECC_BCH_2K):
+		nand->ecc.strength = 4;
+		break;
+	case (MV_NFC_ECC_BCH_1K):
+		nand->ecc.strength = 8;
+		break;
+	case (MV_NFC_ECC_BCH_704B):
+		nand->ecc.strength = 12;
+		break;
+	case (MV_NFC_ECC_BCH_512B):
+		nand->ecc.strength = 16;
+		break;
+	default:
+		nand->ecc.strength = 0;
+	}
+	nand->bbt_td		= &mvbbt_main_descr;
+	nand->bbt_md		= &mvbbt_mirror_descr;
+	nand->badblock_pattern	= BB_INFO;
+	nand->chip_delay	= 25;
+}
+
+static int mvCtrlNandClkSet(int nfc_clk_freq)
+{
+	/* NAND clock is derived from ecc_clk according to equation
+	 * nfc_clk_freq = ecc_clk / 2
+	 */
+	clk_set_rate(ecc_clk, nfc_clk_freq * 2);
+
+	/* Return calculated nand clock frequency */
+	nfc_clk_freq = clk_get_rate(ecc_clk) / 2;
+
+	return nfc_clk_freq;
+}
+
+static MV_STATUS mvSysNfcInit(MV_NFC_INFO *nfcInfo, MV_NFC_CTRL *nfcCtrl)
+{
+	struct MV_NFC_HAL_DATA halData;
+
+	memset(&halData, 0, sizeof(halData));
+
+	halData.mvCtrlNandClkSetFunction = mvCtrlNandClkSet;
+
+	return mvNfcInit(nfcInfo, nfcCtrl, &halData);
+}
+
+static int orion_nfc_probe(struct platform_device *pdev)
+{
+	struct orion_nfc_info *info;
+	struct nand_chip *nand;
+	struct mtd_info *mtd;
+	struct resource *r;
+	int nr_parts = 0;
+	int ret, irq;
+	char *stat[2] = {"Disabled", "Enabled"};
+	char *ecc_stat[] = {"Hamming", "BCH 4bit", "BCH 8bit", "BCH 12bit", "BCH 16bit", "No"};
+	struct mtd_part_parser_data ppdata = {};
+	struct mtd_partition *parts = NULL;
+	struct device_node *np = pdev->dev.of_node;
+	MV_NFC_INFO nfcInfo;
+	MV_STATUS status;
+	MV_U32 mv_nand_offset;
+
+	/* Allocate all data: mtd_info -> nand_chip -> orion_nfc_info */
+	mtd = devm_kzalloc(&pdev->dev, sizeof(struct mtd_info), GFP_KERNEL);
+	if (!mtd) {
+		dev_err(&pdev->dev, "failed to allocate memory for mtd_info\n");
+		return -ENOMEM;
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct orion_nfc_info),
+			    GFP_KERNEL);
+	if (!info) {
+		dev_err(&pdev->dev, "failed to allocate memory for orion_nfc_info\n");
+		return -ENOMEM;
+	}
+
+	nand = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), GFP_KERNEL);
+	if (!nand) {
+		dev_err(&pdev->dev, "failed to allocate memory for nand_chip\n");
+		return -ENOMEM;
+	}
+
+	ecc_clk = devm_clk_get(&pdev->dev, "ecc_clk");
+	if (IS_ERR(ecc_clk)) {
+		dev_err(&pdev->dev, "failed to get nand clock\n");
+		return PTR_ERR(ecc_clk);
+	}
+	ret = clk_prepare_enable(ecc_clk);
+	if (ret < 0)
+		goto fail_put_nand_clk;
+
+	if (of_device_is_compatible(np, "marvell,armada-375-nand")) {
+		info->aux_clk = devm_clk_get(&pdev->dev, "gateclk");
+		if (IS_ERR(info->aux_clk)) {
+			dev_err(&pdev->dev, "failed to get auxiliary clock\n");
+			ret = PTR_ERR(info->aux_clk);
+			goto fail_put_nand_clk;
+		}
+		ret = clk_prepare_enable(info->aux_clk);
+		if (ret < 0)
+			goto fail_put_clk;
+	}
+
+	/* Hookup pointers */
+	info->pdev = pdev;
+	nand->priv = info;
+	mtd->priv = nand;
+	mtd->name = DRIVER_NAME;
+	mtd->owner = THIS_MODULE;
+
+	/* Parse DT tree and acquire all necessary data */
+	ret = 0;
+	ret |= of_property_read_u32(np, "nfc,nfc-dma", &info->use_dma);
+	ret |= of_property_read_u32(np, "nfc,nfc-width", &info->nfc_width);
+	ret |= of_property_read_u32(np, "nfc,ecc-type", &info->ecc_type);
+	ret |= of_property_read_u32(np, "nfc,num-cs", &info->num_cs);
+	ret |= of_property_read_u32(np, "reg", &mv_nand_offset);
+
+	/* Determine the NAND Flash Controller mode for later usage */
+	info->nfc_mode = of_get_property(np, "nfc,nfc-mode", NULL);
+	if (!info->nfc_mode || (strncmp(info->nfc_mode, "normal", 6) &&
+	    strncmp(info->nfc_mode, "ganged", 6))) {
+		ret = -EINVAL;
+		goto fail_put_clk;
+	}
+
+	if (ret != 0) {
+		dev_err(&pdev->dev,
+		    "missing or bad NAND configuration from device tree\n");
+		ret = -ENOENT;
+		goto fail_put_clk;
+	}
+
+	/* Get IRQ from FDT and map it to the Linux IRQ number */
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq == 0) {
+		dev_err(&pdev->dev,
+		    "IRQ number missing in device tree or can't be mapped\n");
+		ret = -ENOENT;
+		goto fail_put_clk;
+	}
+	/* Save acquired IRQ mapping */
+	info->irq = irq;
+
+	dev_info(&pdev->dev, "Initialize HAL based NFC in %dbit mode with DMA %s using %s ECC\n",
+			  info->nfc_width, stat[info->use_dma], ecc_stat[info->ecc_type]);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		dev_err(&pdev->dev, "no IO memory resource defined\n");
+		ret = -ENODEV;
+		goto fail_dispose_irq;
+	}
+
+	r = devm_request_mem_region(&pdev->dev, r->start, r->end - r->start + 1,
+				    pdev->name);
+	if (r == NULL) {
+		dev_err(&pdev->dev, "failed to request memory resource\n");
+		ret = -EBUSY;
+		goto fail_dispose_irq;
+	}
+
+	info->mmio_base = devm_ioremap(&pdev->dev, r->start,
+				       r->end - r->start + 1);
+	if (info->mmio_base == NULL) {
+		dev_err(&pdev->dev, "ioremap() failed\n");
+		ret = -ENODEV;
+		goto fail_dispose_irq;
+	}
+
+	info->mmio_phys_base = r->start;
+
+#ifdef CONFIG_MV_INCLUDE_PDMA
+	if (mvPdmaHalInit(MV_PDMA_MAX_CHANNELS_NUM) != MV_OK) {
+		dev_err(&pdev->dev, "mvPdmaHalInit() failed.\n");
+		goto fail_dispose_irq;
+	}
+#endif
+	/* Initialize NFC HAL */
+	mv_nand_base = (MV_U32)info->mmio_base;
+	nfcInfo.ioMode = (info->use_dma ? MV_NFC_PDMA_ACCESS : MV_NFC_PIO_ACCESS);
+	nfcInfo.eccMode = info->ecc_type;
+
+	if (strncmp(info->nfc_mode, "normal", 6) == 0)
+		nfcInfo.ifMode = ((info->nfc_width == 8) ? MV_NFC_IF_1X8 : MV_NFC_IF_1X16);
+	else
+		nfcInfo.ifMode = MV_NFC_IF_2X8;
+	nfcInfo.autoStatusRead = MV_FALSE;
+	nfcInfo.readyBypass = MV_FALSE;
+	nfcInfo.osHandle = NULL;
+	nfcInfo.regsPhysAddr = mv_nand_base - mv_nand_offset;
+#ifdef CONFIG_MV_INCLUDE_PDMA
+	nfcInfo.dataPdmaIntMask = MV_PDMA_END_OF_RX_INTR_EN | MV_PDMA_END_INTR_EN;
+	nfcInfo.cmdPdmaIntMask = 0x0;
+#endif
+
+	status = mvSysNfcInit(&nfcInfo, &info->nfcCtrl);
+	if (status != MV_OK) {
+		dev_err(&pdev->dev, "mvNfcInit() failed. Returned %d\n",
+				status);
+		goto fail_dispose_irq;
+	}
+
+	mvNfcSelectChip(&info->nfcCtrl, MV_NFC_CS_0);
+	mvNfcIntrSet(&info->nfcCtrl,  0xFFF, MV_FALSE);
+	mvNfcSelectChip(&info->nfcCtrl, MV_NFC_CS_1);
+	mvNfcIntrSet(&info->nfcCtrl,  0xFFF, MV_FALSE);
+	mvNfcSelectChip(&info->nfcCtrl, MV_NFC_CS_NONE);
+
+	ret = orion_nfc_init_buff(info);
+	if (ret)
+		goto fail_dispose_irq;
+
+	/* Clear all old events on the status register */
+	MV_REG_WRITE(NFC_STATUS_REG, MV_REG_READ(NFC_STATUS_REG));
+	if (info->use_dma)
+#ifdef CONFIG_MV_INCLUDE_PDMA
+		ret = request_irq(irq, orion_nfc_irq_dma, IRQF_DISABLED,
+				pdev->name, info);
+#else
+		pr_err("DMA mode not supported!\n");
+#endif
+	else
+		ret = request_irq(irq, orion_nfc_irq_pio, IRQF_DISABLED,
+				pdev->name, info);
+
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ\n");
+		goto fail_free_buf;
+	}
+
+	ret = orion_nfc_detect_flash(info);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to detect flash\n");
+		ret = -ENODEV;
+		goto fail_free_irq;
+	}
+
+	orion_nfc_init_nand(nand, info);
+
+	if (nand->ecc.layout == NULL) {
+		dev_err(&pdev->dev, "Undefined ECC layout for selected nand device\n");
+		ret = -ENXIO;
+		goto fail_free_irq;
+	}
+
+	platform_set_drvdata(pdev, mtd);
+
+	if (nand_scan(mtd, info->num_cs)) {
+		dev_err(&pdev->dev, "failed to scan nand\n");
+		ret = -ENXIO;
+		goto fail_free_irq;
+	}
+
+	ppdata.of_node = pdev->dev.of_node;
+	ret = mtd_device_parse_register(mtd, NULL, &ppdata, parts,
+					nr_parts);
+	if (ret == 0)
+		return ret;
+	else
+		dev_err(&pdev->dev, "MTD device registration filed.\n");
+
+	nand_release(mtd);
+
+fail_free_irq:
+	free_irq(irq, info);
+fail_free_buf:
+	if (info->use_dma)
+		dma_free_coherent(&pdev->dev, info->data_buff_size,
+			info->data_buff, info->data_buff_phys);
+fail_dispose_irq:
+	irq_dispose_mapping(info->irq);
+fail_put_clk:
+	if (of_device_is_compatible(np, "marvell,armada-375-nand"))
+		clk_disable_unprepare(info->aux_clk);
+fail_put_nand_clk:
+	clk_disable_unprepare(ecc_clk);
+	return ret;
+}
+
+static int orion_nfc_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	struct device_node *np = pdev->dev.of_node;
+
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable_unprepare(ecc_clk);
+	if (of_device_is_compatible(np, "marvell,armada-375-nand"))
+		clk_disable_unprepare(info->aux_clk);
+
+	/*del_mtd_device(mtd);*/
+	free_irq(info->irq, info);
+	irq_dispose_mapping(info->irq);
+
+	if (info->use_dma)
+		dma_free_writecombine(&pdev->dev, info->data_buff_size,
+				info->data_buff, info->data_buff_phys);
+
+	if (mtd)
+		mtd_device_unregister(mtd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int orion_nfc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+
+	if (info->state != STATE_READY) {
+		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
+		return -EAGAIN;
+	}
+
+#ifdef CONFIG_MV_INCLUDE_PDMA
+	/* Store PDMA registers.	*/
+	info->pdmaDataLen = 128;
+	mvPdmaUnitStateStore(info->pdmaUnitData, &info->pdmaDataLen);
+#endif
+
+	/* Store NFC registers.	*/
+	info->nfcDataLen = 128;
+	mvNfcUnitStateStore(info->nfcUnitData, &info->nfcDataLen);
+#if 0
+	clk_disable(info->clk);
+#endif
+
+	return 0;
+}
+
+static int orion_nfc_resume(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = (struct mtd_info *)platform_get_drvdata(pdev);
+	struct orion_nfc_info *info = (struct orion_nfc_info *)((struct nand_chip *)mtd->priv)->priv;
+	MV_U32	i;
+#if 0
+	clk_enable(info->clk);
+#endif
+#ifdef CONFIG_MV_INCLUDE_PDMA
+	/* restore PDMA registers */
+	for (i = 0; i < info->pdmaDataLen; i += 2)
+		MV_REG_WRITE(info->pdmaUnitData[i], info->pdmaUnitData[i+1]);
+#endif
+	/* Clear all NAND interrupts */
+	MV_REG_WRITE(NFC_STATUS_REG, MV_REG_READ(NFC_STATUS_REG));
+
+	/* restore NAND registers */
+	for (i = 0; i < info->nfcDataLen; i += 2)
+		MV_REG_WRITE(info->nfcUnitData[i], info->nfcUnitData[i+1]);
+
+	return 0;
+}
+#else
+#define orion_nfc_suspend	NULL
+#define orion_nfc_resume	NULL
+#endif
+
+static struct of_device_id mv_nfc_dt_ids[] = {
+	{ .compatible = "marvell,armada-nand", },
+	{ .compatible = "marvell,armada-375-nand", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mv_nfc_dt_ids);
+
+static struct platform_driver orion_nfc_driver = {
+	.driver = {
+		.name	= DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(mv_nfc_dt_ids),
+	},
+	.probe		= orion_nfc_probe,
+	.remove		= orion_nfc_remove,
+	.suspend	= orion_nfc_suspend,
+	.resume		= orion_nfc_resume,
+};
+
+static int __init orion_nfc_init(void)
+{
+	return platform_driver_register(&orion_nfc_driver);
+}
+module_init(orion_nfc_init);
+
+static void __exit orion_nfc_exit(void)
+{
+	platform_driver_unregister(&orion_nfc_driver);
+}
+module_exit(orion_nfc_exit);
+
+MODULE_ALIAS(DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Armada NAND controller driver");
diff --git a/drivers/mtd/nand/mvebu_nfc/nand_nfc.h b/drivers/mtd/nand/mvebu_nfc/nand_nfc.h
new file mode 100644
index 000000000000..d871ce256799
--- /dev/null
+++ b/drivers/mtd/nand/mvebu_nfc/nand_nfc.h
@@ -0,0 +1,35 @@
+#ifndef __ASM_ARCH_ORION_NFC_H
+#define __ASM_ARCH_ORION_NFC_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include "mvCommon.h"
+#include "mvOs.h"
+#ifdef CONFIG_MV_INCLUDE_PDMA
+#include "pdma/mvPdma.h"
+#endif
+#include "hal/mvNfc.h"
+#include "hal/mvNfcRegs.h"
+
+enum nfc_page_size {
+	NFC_PAGE_512B = 0,
+	NFC_PAGE_2KB,
+	NFC_PAGE_4KB,
+	NFC_PAGE_8KB,
+	NFC_PAGE_16KB,
+	NFC_PAGE_SIZE_MAX_CNT
+};
+
+struct nfc_platform_data {
+	unsigned int		tclk;		/* Clock supplied to NFC */
+	unsigned int		nfc_width;	/* Width of NFC 16/8 bits */
+	unsigned int		num_devs;	/* Number of NAND devices
+						   (2 for ganged mode).   */
+	unsigned int		num_cs;		/* Number of NAND devices
+						   chip-selects.	  */
+	unsigned int		use_dma;	/* Enable/Disable DMA 1/0 */
+	MV_NFC_ECC_MODE		ecc_type;
+	struct mtd_partition	*parts;
+	unsigned int		nr_parts;
+};
+#endif /* __ASM_ARCH_ORION_NFC_H */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 633db8830c13..7165b2bb9ac8 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2848,6 +2848,76 @@ static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
 	return crc;
 }
 
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+		struct nand_chip *chip, struct nand_onfi_params *p)
+{
+	struct onfi_ext_param_page *ep;
+	struct onfi_ext_section *s;
+	struct onfi_ext_ecc_info *ecc;
+	uint8_t *cursor;
+	int ret = -EINVAL;
+	int len;
+	int i;
+
+	len = le16_to_cpu(p->ext_param_page_length) * 16;
+	ep = kmalloc(len, GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	/* Send our own NAND_CMD_PARAM. */
+	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+	/* Use the Change Read Column command to skip the ONFI param pages. */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			sizeof(*p) * p->num_of_param_pages , -1);
+
+	/* Read out the Extended Parameter Page. */
+	chip->read_buf(mtd, (uint8_t *)ep, len);
+	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+		!= le16_to_cpu(ep->crc))) {
+		pr_debug("fail in the CRC.\n");
+		goto ext_out;
+	}
+
+	/*
+	 * Check the signature.
+	 * Do not strictly follow the ONFI spec, maybe changed in future.
+	 */
+	if (strncmp(ep->sig, "EPPS", 4)) {
+		pr_debug("The signature is invalid.\n");
+		goto ext_out;
+	}
+
+	/* find the ECC section. */
+	cursor = (uint8_t *)(ep + 1);
+	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+		s = ep->sections + i;
+		if (s->type == ONFI_SECTION_TYPE_2)
+			break;
+		cursor += s->length * 16;
+	}
+	if (i == ONFI_EXT_SECTION_MAX) {
+		pr_debug("We can not find the ECC section.\n");
+		goto ext_out;
+	}
+
+	/* get the info we want. */
+	ecc = (struct onfi_ext_ecc_info *)cursor;
+
+	if (ecc->codeword_size) {
+		chip->ecc_strength_ds = ecc->ecc_bits;
+		chip->ecc_step_ds = 1 << ecc->codeword_size;
+	}
+
+	pr_info("ONFI extended param page detected.\n");
+	ret = 0;
+
+ext_out:
+	kfree(ep);
+	return ret;
+}
+
 /*
  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
@@ -2924,6 +2994,26 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
 	if (le16_to_cpu(p->features) & 1)
 		*busw = NAND_BUSWIDTH_16;
 
+	if (p->ecc_bits != 0xff) {
+		chip->ecc_strength_ds = p->ecc_bits;
+		chip->ecc_step_ds = 512;
+	} else if (chip->onfi_version >= 21 &&
+		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+		/*
+		 * The nand_flash_detect_ext_param_page() uses the
+		 * Change Read Column command which maybe not supported
+		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
+		 * now. We do not replace user supplied command function.
+		 */
+		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+			chip->cmdfunc = nand_command_lp;
+
+		/* The Extended Parameter Page is supported since ONFI 2.1. */
+		if (nand_flash_detect_ext_param_page(mtd, chip, p))
+			pr_info("Failed to detect the extended param page.\n");
+	}
+
 	pr_info("ONFI flash detected\n");
 	return 1;
 }
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 683813a46a90..f505c37db0f1 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -114,6 +114,8 @@ struct nand_flash_dev nand_flash_ids[] = {
 	/* 8 Gigabit */
 	EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit",  0xA3, 1024, LP_OPTIONS),
 	EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit",  0xD3, 1024, LP_OPTIONS),
+	{"NAND 1GiB 3,3V 8-bit", { { .dev_id = 0x38 } },
+		  4096, 1024, 524288, LP_OPTIONS},
 	EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
 	EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
 
@@ -125,7 +127,16 @@ struct nand_flash_dev nand_flash_ids[] = {
 
 	/* 32 Gigabit */
 	EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit",  0xA7, 4096, LP_OPTIONS),
+#ifdef CONFIG_MTD_NAND_NFC_MLC_SUPPORT
+	/* 32 Gigabit - wrongly detected due to changes in READ_ID decoding */
+	{"NAND 4GiB 3,3V 8-bit", { { .dev_id = 0xD7 } },
+		4096, 4096, 524288, LP_OPTIONS},
+	/* 32 Gigabit - wrongly detected due to changes in READ_ID decoding */
+	{"NAND 8GiB 3,3V 8-bit", { { .dev_id = 0x88 } },
+		8192, 8192, 2097152, LP_OPTIONS},
+#else
 	EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit",  0xD7, 4096, LP_OPTIONS),
+#endif
 	EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
 	EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
 
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index dec80ca6a5ce..2a7a0b27ac38 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -7,6 +7,8 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
  */
 
 #include <linux/kernel.h>
@@ -24,14 +26,30 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_mtd.h>
+
+#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
+#define ARCH_HAS_DMA
+#endif
 
+#ifdef ARCH_HAS_DMA
 #include <mach/dma.h>
+#endif
+
 #include <linux/platform_data/mtd-nand-pxa3xx.h>
 
+#define NAND_DEV_READY_TIMEOUT  50
 #define	CHIP_DELAY_TIMEOUT	(2 * HZ/10)
 #define NAND_STOP_DELAY		(2 * HZ/50)
 #define PAGE_CHUNK_SIZE		(2048)
 
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM. The largest of these is the PARAM command,
+ * needing 256 bytes.
+ */
+#define INIT_BUFFER_SIZE	256
+
 /* registers and bit definitions */
 #define NDCR		(0x00) /* Control register */
 #define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
@@ -40,6 +58,7 @@
 #define NDPCR		(0x18) /* Page Count Register */
 #define NDBDR0		(0x1C) /* Bad Block Register 0 */
 #define NDBDR1		(0x20) /* Bad Block Register 1 */
+#define NDECCCTRL	(0x28) /* ECC control */
 #define NDDB		(0x40) /* Data Buffer */
 #define NDCB0		(0x48) /* Command Buffer0 */
 #define NDCB1		(0x4C) /* Command Buffer1 */
@@ -66,6 +85,9 @@
 #define NDCR_INT_MASK           (0xFFF)
 
 #define NDSR_MASK		(0xfff)
+#define NDSR_ERR_CNT_OFF	(16)
+#define NDSR_ERR_CNT_MASK       (0x1f)
+#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
 #define NDSR_RDY                (0x1 << 12)
 #define NDSR_FLASH_RDY          (0x1 << 11)
 #define NDSR_CS0_PAGED		(0x1 << 10)
@@ -74,15 +96,18 @@
 #define NDSR_CS1_CMDD		(0x1 << 7)
 #define NDSR_CS0_BBD		(0x1 << 6)
 #define NDSR_CS1_BBD		(0x1 << 5)
-#define NDSR_DBERR		(0x1 << 4)
-#define NDSR_SBERR		(0x1 << 3)
+#define NDSR_UNCORERR		(0x1 << 4)
+#define NDSR_CORERR		(0x1 << 3)
 #define NDSR_WRDREQ		(0x1 << 2)
 #define NDSR_RDDREQ		(0x1 << 1)
 #define NDSR_WRCMDREQ		(0x1)
 
+#define NDCB0_LEN_OVRD		(0x1 << 28)
 #define NDCB0_ST_ROW_EN         (0x1 << 26)
 #define NDCB0_AUTO_RS		(0x1 << 25)
 #define NDCB0_CSEL		(0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
 #define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
 #define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
 #define NDCB0_NC		(0x1 << 20)
@@ -93,6 +118,14 @@
 #define NDCB0_CMD1_MASK		(0xff)
 #define NDCB0_ADDR_CYC_SHIFT	(16)
 
+#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ	4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL	3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
+
 /* macros for registers read/write */
 #define nand_writel(info, off, val)	\
 	__raw_writel((val), (info)->mmio_base + (off))
@@ -105,9 +138,9 @@ enum {
 	ERR_NONE	= 0,
 	ERR_DMABUSERR	= -1,
 	ERR_SENDCMD	= -2,
-	ERR_DBERR	= -3,
+	ERR_UNCORERR	= -3,
 	ERR_BBERR	= -4,
-	ERR_SBERR	= -5,
+	ERR_CORERR	= -5,
 };
 
 enum {
@@ -123,14 +156,17 @@ enum {
 	STATE_READY,
 };
 
+enum pxa3xx_nand_variant {
+	PXA3XX_NAND_VARIANT_PXA,
+	PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
 struct pxa3xx_nand_host {
 	struct nand_chip	chip;
-	struct pxa3xx_nand_cmdset *cmdset;
 	struct mtd_info         *mtd;
 	void			*info_data;
 
 	/* page size of attached chip */
-	unsigned int		page_size;
 	int			use_ecc;
 	int			cs;
 
@@ -139,10 +175,6 @@ struct pxa3xx_nand_host {
 	unsigned int		row_addr_cycles;
 	size_t			read_id_bytes;
 
-	/* cached register value */
-	uint32_t		reg_ndcr;
-	uint32_t		ndtr0cs0;
-	uint32_t		ndtr1cs0;
 };
 
 struct pxa3xx_nand_info {
@@ -152,10 +184,13 @@ struct pxa3xx_nand_info {
 	struct clk		*clk;
 	void __iomem		*mmio_base;
 	unsigned long		mmio_phys;
-	struct completion	cmd_complete;
+	struct completion	cmd_complete, dev_ready;
 
 	unsigned int 		buf_start;
 	unsigned int		buf_count;
+	unsigned int		buf_size;
+	unsigned int		data_buff_pos;
+	unsigned int		oob_buff_pos;
 
 	/* DMA information */
 	int			drcmr_dat;
@@ -171,43 +206,44 @@ struct pxa3xx_nand_info {
 	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
 	unsigned int		state;
 
+	/*
+	 * This driver supports NFCv1 (as found in PXA SoC)
+	 * and NFCv2 (as found in Armada 370/XP SoC).
+	 */
+	enum pxa3xx_nand_variant variant;
+
 	int			cs;
 	int			use_ecc;	/* use HW ECC ? */
+	int			ecc_bch;	/* using BCH ECC? */
 	int			use_dma;	/* use DMA ? */
-	int			is_ready;
+	int			use_spare;	/* use spare ? */
+	int			need_wait;
 
-	unsigned int		page_size;	/* page size of attached chip */
-	unsigned int		data_size;	/* data size in FIFO */
+	unsigned int		data_size;	/* data to be read from FIFO */
+	unsigned int		chunk_size;	/* split commands chunk size */
 	unsigned int		oob_size;
+	unsigned int		spare_size;
+	unsigned int		ecc_size;
+	unsigned int		ecc_err_cnt;
+	unsigned int		max_bitflips;
 	int 			retcode;
 
+	/* cached register value */
+	uint32_t		reg_ndcr;
+	uint32_t		ndtr0cs0;
+	uint32_t		ndtr1cs0;
+
 	/* generated NDCBx register values */
 	uint32_t		ndcb0;
 	uint32_t		ndcb1;
 	uint32_t		ndcb2;
+	uint32_t		ndcb3;
 };
 
 static bool use_dma = 1;
 module_param(use_dma, bool, 0444);
 MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
 
-/*
- * Default NAND flash controller configuration setup by the
- * bootloader. This configuration is used only when pdata->keep_config is set
- */
-static struct pxa3xx_nand_cmdset default_cmdset = {
-	.read1		= 0x3000,
-	.read2		= 0x0050,
-	.program	= 0x1080,
-	.read_status	= 0x0070,
-	.read_id	= 0x0090,
-	.erase		= 0xD060,
-	.reset		= 0x00FF,
-	.lock		= 0x002A,
-	.unlock		= 0x2423,
-	.lock_status	= 0x007A,
-};
-
 static struct pxa3xx_nand_timing timing[] = {
 	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
 	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
@@ -227,11 +263,67 @@ static struct pxa3xx_nand_flash builtin_flash_types[] = {
 { "256MiB 16-bit", 0xba20,  64, 2048, 16, 16, 2048, &timing[3] },
 };
 
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = bbt_mirror_pattern
+};
+
+static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
+	.eccbytes = 32,
+	.eccpos = {
+		32, 33, 34, 35, 36, 37, 38, 39,
+		40, 41, 42, 43, 44, 45, 46, 47,
+		48, 49, 50, 51, 52, 53, 54, 55,
+		56, 57, 58, 59, 60, 61, 62, 63},
+	.oobfree = { {2, 30} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
+	.eccbytes = 64,
+	.eccpos = {
+		32,  33,  34,  35,  36,  37,  38,  39,
+		40,  41,  42,  43,  44,  45,  46,  47,
+		48,  49,  50,  51,  52,  53,  54,  55,
+		56,  57,  58,  59,  60,  61,  62,  63,
+		96,  97,  98,  99,  100, 101, 102, 103,
+		104, 105, 106, 107, 108, 109, 110, 111,
+		112, 113, 114, 115, 116, 117, 118, 119,
+		120, 121, 122, 123, 124, 125, 126, 127},
+	/* Bootrom looks in bytes 0 & 5 for bad blocks */
+	.oobfree = { {6, 26}, { 64, 32} }
+};
+
+static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
+	.eccbytes = 128,
+	.eccpos = {
+		32,  33,  34,  35,  36,  37,  38,  39,
+		40,  41,  42,  43,  44,  45,  46,  47,
+		48,  49,  50,  51,  52,  53,  54,  55,
+		56,  57,  58,  59,  60,  61,  62,  63},
+	.oobfree = { }
+};
+
 /* Define a default flash type setting serve as flash detecting only */
 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
 
-const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
-
 #define NDTR0_tCH(c)	(min((c), 7) << 19)
 #define NDTR0_tCS(c)	(min((c), 7) << 16)
 #define NDTR0_tWH(c)	(min((c), 7) << 11)
@@ -246,6 +338,29 @@ const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
 /* convert nano-seconds to nand flash controller clock cycles */
 #define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
 
+static struct of_device_id pxa3xx_nand_dt_ids[] = {
+	{
+		.compatible = "marvell,pxa3xx-nand",
+		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
+	},
+	{
+		.compatible = "marvell,armada370-nand",
+		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id =
+			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+	if (!of_id)
+		return PXA3XX_NAND_VARIANT_PXA;
+	return (enum pxa3xx_nand_variant)of_id->data;
+}
+
 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 				   const struct pxa3xx_nand_timing *t)
 {
@@ -264,31 +379,29 @@ static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
 		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
 
-	host->ndtr0cs0 = ndtr0;
-	host->ndtr1cs0 = ndtr1;
+	info->ndtr0cs0 = ndtr0;
+	info->ndtr1cs0 = ndtr1;
 	nand_writel(info, NDTR0CS0, ndtr0);
 	nand_writel(info, NDTR1CS0, ndtr1);
 }
 
-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
+/*
+ * Set the data and OOB size, depending on the selected
+ * spare and ECC configuration.
+ * Only applicable to READ0, READOOB and PAGEPROG commands.
+ */
+static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
+				struct mtd_info *mtd)
 {
-	struct pxa3xx_nand_host *host = info->host[info->cs];
-	int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
+	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
 
-	info->data_size = host->page_size;
-	if (!oob_enable) {
-		info->oob_size = 0;
+	info->data_size = mtd->writesize;
+	if (!oob_enable)
 		return;
-	}
 
-	switch (host->page_size) {
-	case 2048:
-		info->oob_size = (info->use_ecc) ? 40 : 64;
-		break;
-	case 512:
-		info->oob_size = (info->use_ecc) ? 8 : 16;
-		break;
-	}
+	info->oob_size = info->spare_size;
+	if (!info->use_ecc)
+		info->oob_size += info->ecc_size;
 }
 
 /**
@@ -299,12 +412,30 @@ static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
  */
 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 {
-	struct pxa3xx_nand_host *host = info->host[info->cs];
 	uint32_t ndcr;
 
-	ndcr = host->reg_ndcr;
-	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
-	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
+	ndcr = info->reg_ndcr;
+
+	if (info->use_ecc) {
+		ndcr |= NDCR_ECC_EN;
+		if (info->ecc_bch)
+			nand_writel(info, NDECCCTRL, 0x1);
+	} else {
+		ndcr &= ~NDCR_ECC_EN;
+		if (info->ecc_bch)
+			nand_writel(info, NDECCCTRL, 0x0);
+	}
+
+	if (info->use_dma)
+		ndcr |= NDCR_DMA_EN;
+	else
+		ndcr &= ~NDCR_DMA_EN;
+
+	if (info->use_spare)
+		ndcr |= NDCR_SPARE_EN;
+	else
+		ndcr &= ~NDCR_SPARE_EN;
+
 	ndcr |= NDCR_ND_RUN;
 
 	/* clear status bits and run */
@@ -333,7 +464,8 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
 	nand_writel(info, NDSR, NDSR_MASK);
 }
 
-static void enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 {
 	uint32_t ndcr;
 
@@ -351,28 +483,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
 
 static void handle_data_pio(struct pxa3xx_nand_info *info)
 {
+	unsigned int do_bytes = min(info->data_size, info->chunk_size);
+
 	switch (info->state) {
 	case STATE_PIO_WRITING:
-		__raw_writesl(info->mmio_base + NDDB, info->data_buff,
-				DIV_ROUND_UP(info->data_size, 4));
+		__raw_writesl(info->mmio_base + NDDB,
+			      info->data_buff + info->data_buff_pos,
+			      DIV_ROUND_UP(do_bytes, 4));
+
 		if (info->oob_size > 0)
-			__raw_writesl(info->mmio_base + NDDB, info->oob_buff,
-					DIV_ROUND_UP(info->oob_size, 4));
+			__raw_writesl(info->mmio_base + NDDB,
+				      info->oob_buff + info->oob_buff_pos,
+				      DIV_ROUND_UP(info->oob_size, 4));
 		break;
 	case STATE_PIO_READING:
-		__raw_readsl(info->mmio_base + NDDB, info->data_buff,
-				DIV_ROUND_UP(info->data_size, 4));
+		__raw_readsl(info->mmio_base + NDDB,
+			     info->data_buff + info->data_buff_pos,
+			     DIV_ROUND_UP(do_bytes, 4));
+
 		if (info->oob_size > 0)
-			__raw_readsl(info->mmio_base + NDDB, info->oob_buff,
-					DIV_ROUND_UP(info->oob_size, 4));
+			__raw_readsl(info->mmio_base + NDDB,
+				     info->oob_buff + info->oob_buff_pos,
+				     DIV_ROUND_UP(info->oob_size, 4));
 		break;
 	default:
 		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 				info->state);
 		BUG();
 	}
+
+	/* Update buffer pointers for multi-page read/write */
+	info->data_buff_pos += do_bytes;
+	info->oob_buff_pos += info->oob_size;
+	info->data_size -= do_bytes;
 }
 
+#ifdef ARCH_HAS_DMA
 static void start_data_dma(struct pxa3xx_nand_info *info)
 {
 	struct pxa_dma_desc *desc = info->data_desc;
@@ -419,11 +565,15 @@ static void pxa3xx_nand_data_dma_irq(int channel, void *data)
 	enable_int(info, NDCR_INT_MASK);
 	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
 }
+#else
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{}
+#endif
 
 static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 {
 	struct pxa3xx_nand_info *info = devid;
-	unsigned int status, is_completed = 0;
+	unsigned int status, is_completed = 0, is_ready = 0;
 	unsigned int ready, cmd_done;
 
 	if (info->cs == 0) {
@@ -436,10 +586,25 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 
 	status = nand_readl(info, NDSR);
 
-	if (status & NDSR_DBERR)
-		info->retcode = ERR_DBERR;
-	if (status & NDSR_SBERR)
-		info->retcode = ERR_SBERR;
+	if (status & NDSR_UNCORERR)
+		info->retcode = ERR_UNCORERR;
+	if (status & NDSR_CORERR) {
+		info->retcode = ERR_CORERR;
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+		    info->ecc_bch)
+			info->ecc_err_cnt = NDSR_ERR_CNT(status);
+		else
+			info->ecc_err_cnt = 1;
+
+		/*
+		 * Each chunk composing a page is corrected independently,
+		 * and we need to store maximum number of corrected bitflips
+		 * to return it to the MTD layer in ecc.read_page().
+		 */
+		info->max_bitflips = max_t(unsigned int,
+					   info->max_bitflips,
+					   info->ecc_err_cnt);
+	}
 	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
 		/* whether use dma to transfer data */
 		if (info->use_dma) {
@@ -459,23 +624,38 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
 		is_completed = 1;
 	}
 	if (status & ready) {
-		info->is_ready = 1;
 		info->state = STATE_READY;
+		is_ready = 1;
 	}
 
 	if (status & NDSR_WRCMDREQ) {
 		nand_writel(info, NDSR, NDSR_WRCMDREQ);
 		status &= ~NDSR_WRCMDREQ;
 		info->state = STATE_CMD_HANDLE;
+
+		/*
+		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+		 * must be loaded by writing directly either 12 or 16
+		 * bytes directly to NDCB0, four bytes at a time.
+		 *
+		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+		 * but each NDCBx register can be read.
+		 */
 		nand_writel(info, NDCB0, info->ndcb0);
 		nand_writel(info, NDCB0, info->ndcb1);
 		nand_writel(info, NDCB0, info->ndcb2);
+
+		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+			nand_writel(info, NDCB0, info->ndcb3);
 	}
 
 	/* clear NDSR to let the controller exit the IRQ */
 	nand_writel(info, NDSR, status);
 	if (is_completed)
 		complete(&info->cmd_complete);
+	if (is_ready)
+		complete(&info->dev_ready);
 NORMAL_IRQ_EXIT:
 	return IRQ_HANDLED;
 }
@@ -488,40 +668,53 @@ static inline int is_buf_blank(uint8_t *buf, size_t len)
 	return 1;
 }
 
-static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
-		uint16_t column, int page_addr)
+static void set_command_address(struct pxa3xx_nand_info *info,
+		unsigned int page_size, uint16_t column, int page_addr)
 {
-	uint16_t cmd;
-	int addr_cycle, exec_cmd;
-	struct pxa3xx_nand_host *host;
-	struct mtd_info *mtd;
+	/* small page addr setting */
+	if (page_size < PAGE_CHUNK_SIZE) {
+		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+				| (column & 0xFF);
 
-	host = info->host[info->cs];
-	mtd = host->mtd;
-	addr_cycle = 0;
-	exec_cmd = 1;
+		info->ndcb2 = 0;
+	} else {
+		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+				| (column & 0xFFFF);
+
+		if (page_addr & 0xFF0000)
+			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+		else
+			info->ndcb2 = 0;
+	}
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+	struct pxa3xx_nand_host *host = info->host[info->cs];
+	struct mtd_info *mtd = host->mtd;
 
 	/* reset data and oob column point to handle data */
 	info->buf_start		= 0;
 	info->buf_count		= 0;
 	info->oob_size		= 0;
+	info->data_buff_pos	= 0;
+	info->oob_buff_pos	= 0;
 	info->use_ecc		= 0;
-	info->is_ready		= 0;
+	info->use_spare		= 1;
 	info->retcode		= ERR_NONE;
-	if (info->cs != 0)
-		info->ndcb0 = NDCB0_CSEL;
-	else
-		info->ndcb0 = 0;
+	info->ecc_err_cnt	= 0;
+	info->ndcb3		= 0;
+	info->need_wait		= 0;
 
 	switch (command) {
 	case NAND_CMD_READ0:
 	case NAND_CMD_PAGEPROG:
 		info->use_ecc = 1;
 	case NAND_CMD_READOOB:
-		pxa3xx_set_datasize(info);
+		pxa3xx_set_datasize(info, mtd);
 		break;
-	case NAND_CMD_SEQIN:
-		exec_cmd = 0;
+	case NAND_CMD_PARAM:
+		info->use_spare = 0;
 		break;
 	default:
 		info->ndcb1 = 0;
@@ -529,48 +722,90 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
 		break;
 	}
 
+	/*
+	 * If we are about to issue a read command, or about to set
+	 * the write address, then clean the data buffer.
+	 */
+	if (command == NAND_CMD_READ0 ||
+	    command == NAND_CMD_READOOB ||
+	    command == NAND_CMD_SEQIN) {
+
+		info->buf_count = mtd->writesize + mtd->oobsize;
+		memset(info->data_buff, 0xFF, info->buf_count);
+	}
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+		int ext_cmd_type, uint16_t column, int page_addr)
+{
+	int addr_cycle, exec_cmd;
+	struct pxa3xx_nand_host *host;
+	struct mtd_info *mtd;
+
+	host = info->host[info->cs];
+	mtd = host->mtd;
+	addr_cycle = 0;
+	exec_cmd = 1;
+
+	if (info->cs != 0)
+		info->ndcb0 = NDCB0_CSEL;
+	else
+		info->ndcb0 = 0;
+
+	if (command == NAND_CMD_SEQIN)
+		exec_cmd = 0;
+
 	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
 				    + host->col_addr_cycles);
 
 	switch (command) {
 	case NAND_CMD_READOOB:
 	case NAND_CMD_READ0:
-		cmd = host->cmdset->read1;
+		info->buf_start = column;
+		info->ndcb0 |= NDCB0_CMD_TYPE(0)
+				| addr_cycle
+				| NAND_CMD_READ0;
+
 		if (command == NAND_CMD_READOOB)
-			info->buf_start = mtd->writesize + column;
-		else
-			info->buf_start = column;
+			info->buf_start += mtd->writesize;
 
-		if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
-			info->ndcb0 |= NDCB0_CMD_TYPE(0)
-					| addr_cycle
-					| (cmd & NDCB0_CMD1_MASK);
-		else
-			info->ndcb0 |= NDCB0_CMD_TYPE(0)
-					| NDCB0_DBC
-					| addr_cycle
-					| cmd;
+		/*
+		 * Multiple page read needs an 'extended command type' field,
+		 * which is either naked-read or last-read according to the
+		 * state.
+		 */
+		if (mtd->writesize == PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+					| NDCB0_LEN_OVRD
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+			info->ndcb3 = info->chunk_size +
+				      info->oob_size;
+		}
+
+		set_command_address(info, mtd->writesize, column, page_addr);
+		break;
 
 	case NAND_CMD_SEQIN:
-		/* small page addr setting */
-		if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
-			info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
-					| (column & 0xFF);
 
-			info->ndcb2 = 0;
-		} else {
-			info->ndcb1 = ((page_addr & 0xFFFF) << 16)
-					| (column & 0xFFFF);
+		info->buf_start = column;
+		set_command_address(info, mtd->writesize, 0, page_addr);
 
-			if (page_addr & 0xFF0000)
-				info->ndcb2 = (page_addr & 0xFF0000) >> 16;
-			else
-				info->ndcb2 = 0;
+		/*
+		 * Multiple page programming needs to execute the initial
+		 * SEQIN command that sets the page address.
+		 */
+		if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+				| addr_cycle
+				| command;
+			/* No data transfer in this case */
+			info->data_size = 0;
+			exec_cmd = 1;
 		}
-
-		info->buf_count = mtd->writesize + mtd->oobsize;
-		memset(info->data_buff, 0xFF, info->buf_count);
-
 		break;
 
 	case NAND_CMD_PAGEPROG:
@@ -580,49 +815,85 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
 			break;
 		}
 
-		cmd = host->cmdset->program;
-		info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
-				| NDCB0_AUTO_RS
-				| NDCB0_ST_ROW_EN
-				| NDCB0_DBC
-				| cmd
-				| addr_cycle;
+		/* Second command setting for large pages */
+		if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			/*
+			 * Multiple page write uses the 'extended command'
+			 * field. This can be used to issue a command dispatch
+			 * or a naked-write depending on the current stage.
+			 */
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+					| NDCB0_LEN_OVRD
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+			info->ndcb3 = info->chunk_size +
+				      info->oob_size;
+
+			/*
+			 * This is the command dispatch that completes a chunked
+			 * page program operation.
+			 */
+			if (info->data_size == 0) {
+				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+					| command;
+				info->ndcb1 = 0;
+				info->ndcb2 = 0;
+				info->ndcb3 = 0;
+			}
+		} else {
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+					| NDCB0_AUTO_RS
+					| NDCB0_ST_ROW_EN
+					| NDCB0_DBC
+					| (NAND_CMD_PAGEPROG << 8)
+					| NAND_CMD_SEQIN
+					| addr_cycle;
+		}
+		break;
+
+	case NAND_CMD_PARAM:
+		info->buf_count = 256;
+		info->ndcb0 |= NDCB0_CMD_TYPE(0)
+				| NDCB0_ADDR_CYC(1)
+				| NDCB0_LEN_OVRD
+				| command;
+		info->ndcb1 = (column & 0xFF);
+		info->ndcb3 = 256;
+		info->data_size = 256;
 		break;
 
 	case NAND_CMD_READID:
-		cmd = host->cmdset->read_id;
 		info->buf_count = host->read_id_bytes;
 		info->ndcb0 |= NDCB0_CMD_TYPE(3)
 				| NDCB0_ADDR_CYC(1)
-				| cmd;
+				| command;
+		info->ndcb1 = (column & 0xFF);
 
 		info->data_size = 8;
 		break;
 	case NAND_CMD_STATUS:
-		cmd = host->cmdset->read_status;
 		info->buf_count = 1;
 		info->ndcb0 |= NDCB0_CMD_TYPE(4)
 				| NDCB0_ADDR_CYC(1)
-				| cmd;
+				| command;
 
 		info->data_size = 8;
 		break;
 
 	case NAND_CMD_ERASE1:
-		cmd = host->cmdset->erase;
 		info->ndcb0 |= NDCB0_CMD_TYPE(2)
 				| NDCB0_AUTO_RS
 				| NDCB0_ADDR_CYC(3)
 				| NDCB0_DBC
-				| cmd;
+				| (NAND_CMD_ERASE2 << 8)
+				| NAND_CMD_ERASE1;
 		info->ndcb1 = page_addr;
 		info->ndcb2 = 0;
 
 		break;
 	case NAND_CMD_RESET:
-		cmd = host->cmdset->reset;
 		info->ndcb0 |= NDCB0_CMD_TYPE(5)
-				| cmd;
+				| command;
 
 		break;
 
@@ -640,8 +911,8 @@ static int prepare_command_pool(struct pxa3xx_nand_info *info, int command,
 	return exec_cmd;
 }
 
-static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
-				int column, int page_addr)
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+			 int column, int page_addr)
 {
 	struct pxa3xx_nand_host *host = mtd->priv;
 	struct pxa3xx_nand_info *info = host->info_data;
@@ -652,7 +923,7 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 	 * "byte" address into a "word" address appropriate
 	 * for indexing a word-oriented device
 	 */
-	if (host->reg_ndcr & NDCR_DWIDTH_M)
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
 		column /= 2;
 
 	/*
@@ -662,14 +933,19 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 	 */
 	if (info->cs != host->cs) {
 		info->cs = host->cs;
-		nand_writel(info, NDTR0CS0, host->ndtr0cs0);
-		nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
 	}
 
+	prepare_start_command(info, command);
+
 	info->state = STATE_PREPARED;
-	exec_cmd = prepare_command_pool(info, command, column, page_addr);
+	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
 	if (exec_cmd) {
 		init_completion(&info->cmd_complete);
+		init_completion(&info->dev_ready);
+		info->need_wait = 1;
 		pxa3xx_nand_start(info);
 
 		ret = wait_for_completion_timeout(&info->cmd_complete,
@@ -683,6 +959,117 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 	info->state = STATE_IDLE;
 }
 
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+				  const unsigned command,
+				  int column, int page_addr)
+{
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
+	int ret, exec_cmd, ext_cmd_type;
+
+	/*
+	 * if this is a x16 device then convert the input
+	 * "byte" address into a "word" address appropriate
+	 * for indexing a word-oriented device
+	 */
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
+		column /= 2;
+
+	/*
+	 * There may be different NAND chip hooked to
+	 * different chip select, so check whether
+	 * chip select has been changed, if yes, reset the timing
+	 */
+	if (info->cs != host->cs) {
+		info->cs = host->cs;
+		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+	}
+
+	/* Select the extended command for the first command */
+	switch (command) {
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		ext_cmd_type = EXT_CMD_TYPE_MONO;
+		break;
+	case NAND_CMD_SEQIN:
+		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+		break;
+	case NAND_CMD_PAGEPROG:
+		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+		break;
+	default:
+		ext_cmd_type = 0;
+		break;
+	}
+
+	prepare_start_command(info, command);
+
+	/*
+	 * Prepare the "is ready" completion before starting a command
+	 * transaction sequence. If the command is not executed the
+	 * completion will be completed, see below.
+	 *
+	 * We can do that inside the loop because the command variable
+	 * is invariant and thus so is the exec_cmd.
+	 */
+	info->need_wait = 1;
+	init_completion(&info->dev_ready);
+	do {
+		info->state = STATE_PREPARED;
+		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+					       column, page_addr);
+		if (!exec_cmd) {
+			info->need_wait = 0;
+			complete(&info->dev_ready);
+			break;
+		}
+
+		init_completion(&info->cmd_complete);
+		pxa3xx_nand_start(info);
+
+		ret = wait_for_completion_timeout(&info->cmd_complete,
+				CHIP_DELAY_TIMEOUT);
+		if (!ret) {
+			dev_err(&info->pdev->dev, "Wait time out!!!\n");
+			/* Stop State Machine for next command cycle */
+			pxa3xx_nand_stop(info);
+			break;
+		}
+
+		/* Check if the sequence is complete */
+		if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
+			break;
+
+		/*
+		 * After a splitted program command sequence has issued
+		 * the command dispatch, the command sequence is complete.
+		 */
+		if (info->data_size == 0 &&
+		    command == NAND_CMD_PAGEPROG &&
+		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+			break;
+
+		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+			/* Last read: issue a 'last naked read' */
+			if (info->data_size == info->chunk_size)
+				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+			else
+				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+		/*
+		 * If a splitted program command has no more data to transfer,
+		 * the command dispatch must be issued to complete.
+		 */
+		} else if (command == NAND_CMD_PAGEPROG &&
+			   info->data_size == 0) {
+				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+		}
+	} while (1);
+
+	info->state = STATE_IDLE;
+}
+
 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
 		struct nand_chip *chip, const uint8_t *buf, int oob_required)
 {
@@ -702,20 +1089,14 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
 	chip->read_buf(mtd, buf, mtd->writesize);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 
-	if (info->retcode == ERR_SBERR) {
-		switch (info->use_ecc) {
-		case 1:
-			mtd->ecc_stats.corrected++;
-			break;
-		case 0:
-		default:
-			break;
-		}
-	} else if (info->retcode == ERR_DBERR) {
+	if (info->retcode == ERR_CORERR && info->use_ecc) {
+		mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+	} else if (info->retcode == ERR_UNCORERR) {
 		/*
 		 * for blank page (all 0xff), HW will calculate its ECC as
 		 * 0, which is different from the ECC information within
-		 * OOB, ignore such double bit errors
+		 * OOB, ignore such uncorrectable errors
 		 */
 		if (is_buf_blank(buf, mtd->writesize))
 			info->retcode = ERR_NONE;
@@ -723,7 +1104,7 @@ static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
 			mtd->ecc_stats.failed++;
 	}
 
-	return 0;
+	return info->max_bitflips;
 }
 
 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
@@ -782,28 +1163,34 @@ static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
 {
 	struct pxa3xx_nand_host *host = mtd->priv;
 	struct pxa3xx_nand_info *info = host->info_data;
+	int ret;
+
+	if (info->need_wait) {
+		ret = wait_for_completion_timeout(&info->dev_ready,
+				CHIP_DELAY_TIMEOUT);
+		info->need_wait = 0;
+		if (!ret) {
+			dev_err(&info->pdev->dev, "Ready time out!!!\n");
+			return NAND_STATUS_FAIL;
+		}
+	}
 
 	/* pxa3xx_nand_send_command has waited for command complete */
 	if (this->state == FL_WRITING || this->state == FL_ERASING) {
 		if (info->retcode == ERR_NONE)
 			return 0;
-		else {
-			/*
-			 * any error make it return 0x01 which will tell
-			 * the caller the erase and write fail
-			 */
-			return 0x01;
-		}
+		else
+			return NAND_STATUS_FAIL;
 	}
 
-	return 0;
+	return NAND_STATUS_READY;
 }
 
 static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
 				    const struct pxa3xx_nand_flash *f)
 {
 	struct platform_device *pdev = info->pdev;
-	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct pxa3xx_nand_host *host = info->host[info->cs];
 	uint32_t ndcr = 0x0; /* enable all interrupts */
 
@@ -818,8 +1205,6 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
 	}
 
 	/* calculate flash information */
-	host->cmdset = &default_cmdset;
-	host->page_size = f->page_size;
 	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
 
 	/* calculate addressing information */
@@ -840,7 +1225,7 @@ static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info,
 	ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
 	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
 
-	host->reg_ndcr = ndcr;
+	info->reg_ndcr = ndcr;
 
 	pxa3xx_nand_set_timing(host, f->timing);
 	return 0;
@@ -856,41 +1241,35 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
 	uint32_t ndcr = nand_readl(info, NDCR);
 
 	if (ndcr & NDCR_PAGE_SZ) {
-		host->page_size = 2048;
+		/* Controller's FIFO size */
+		info->chunk_size = 2048;
 		host->read_id_bytes = 4;
 	} else {
-		host->page_size = 512;
+		info->chunk_size = 512;
 		host->read_id_bytes = 2;
 	}
 
-	host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
-	host->cmdset = &default_cmdset;
-
-	host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
-	host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-
+	/* Set an initial chunk size */
+	info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
 	return 0;
 }
 
-/* the maximum possible buffer size for large page with OOB data
- * is: 2048 + 64 = 2112 bytes, allocate a page here for both the
- * data buffer and the DMA descriptor
- */
-#define MAX_BUFF_SIZE	PAGE_SIZE
-
+#ifdef ARCH_HAS_DMA
 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 {
 	struct platform_device *pdev = info->pdev;
-	int data_desc_offset = MAX_BUFF_SIZE - sizeof(struct pxa_dma_desc);
+	int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc);
 
 	if (use_dma == 0) {
-		info->data_buff = kmalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+		info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
 		if (info->data_buff == NULL)
 			return -ENOMEM;
 		return 0;
 	}
 
-	info->data_buff = dma_alloc_coherent(&pdev->dev, MAX_BUFF_SIZE,
+	info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size,
 				&info->data_buff_phys, GFP_KERNEL);
 	if (info->data_buff == NULL) {
 		dev_err(&pdev->dev, "failed to allocate dma buffer\n");
@@ -904,29 +1283,131 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
 				pxa3xx_nand_data_dma_irq, info);
 	if (info->data_dma_ch < 0) {
 		dev_err(&pdev->dev, "failed to request data dma\n");
-		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
+		dma_free_coherent(&pdev->dev, info->buf_size,
 				info->data_buff, info->data_buff_phys);
 		return info->data_dma_ch;
 	}
 
+	/*
+	 * Now that DMA buffers are allocated we turn on
+	 * DMA proper for I/O operations.
+	 */
+	info->use_dma = 1;
+	return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+	struct platform_device *pdev = info->pdev;
+	if (info->use_dma) {
+		pxa_free_dma(info->data_dma_ch);
+		dma_free_coherent(&pdev->dev, info->buf_size,
+				  info->data_buff, info->data_buff_phys);
+	} else {
+		kfree(info->data_buff);
+	}
+}
+#else
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+	if (info->data_buff == NULL)
+		return -ENOMEM;
 	return 0;
 }
 
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+	kfree(info->data_buff);
+}
+#endif
+
 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
 {
 	struct mtd_info *mtd;
+	struct nand_chip *chip;
 	int ret;
+
 	mtd = info->host[info->cs]->mtd;
+	chip = mtd->priv;
+
 	/* use the common timing to make a try */
 	ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
 	if (ret)
 		return ret;
 
-	pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
-	if (info->is_ready)
-		return 0;
+	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+	ret = chip->waitfunc(mtd, chip);
+	if (ret & NAND_STATUS_FAIL)
+		return -ENODEV;
 
-	return -ENODEV;
+	return 0;
+}
+
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+			struct nand_ecc_ctrl *ecc,
+			int strength, int ecc_stepsize, int page_size)
+{
+	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+		info->chunk_size = 2048;
+		info->spare_size = 40;
+		info->ecc_size = 24;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = 512;
+		ecc->strength = 1;
+		return 1;
+
+	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+		info->chunk_size = 512;
+		info->spare_size = 8;
+		info->ecc_size = 8;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = 512;
+		ecc->strength = 1;
+		return 1;
+
+	/*
+	 * Required ECC: 4-bit correction per 512 bytes
+	 * Select: 16-bit correction per 2048 bytes
+	 */
+	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+		info->ecc_bch = 1;
+		info->chunk_size = 2048;
+		info->spare_size = 32;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		ecc->layout = &ecc_layout_2KB_bch4bit;
+		ecc->strength = 16;
+		return 1;
+
+	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+		info->ecc_bch = 1;
+		info->chunk_size = 2048;
+		info->spare_size = 32;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		ecc->layout = &ecc_layout_4KB_bch4bit;
+		ecc->strength = 16;
+		return 1;
+
+	/*
+	 * Required ECC: 8-bit correction per 512 bytes
+	 * Select: 16-bit correction per 1024 bytes
+	 */
+	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+		info->ecc_bch = 1;
+		info->chunk_size = 1024;
+		info->spare_size = 0;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		ecc->layout = &ecc_layout_4KB_bch8bit;
+		ecc->strength = 16;
+		return 1;
+	}
+	return 0;
 }
 
 static int pxa3xx_nand_scan(struct mtd_info *mtd)
@@ -934,13 +1415,14 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
 	struct pxa3xx_nand_host *host = mtd->priv;
 	struct pxa3xx_nand_info *info = host->info_data;
 	struct platform_device *pdev = info->pdev;
-	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
 	struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
 	const struct pxa3xx_nand_flash *f = NULL;
 	struct nand_chip *chip = mtd->priv;
 	uint32_t id = -1;
 	uint64_t chipsize;
 	int i, ret, num;
+	uint16_t ecc_strength, ecc_step;
 
 	if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
 		goto KEEP_CONFIG;
@@ -999,28 +1481,80 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd)
 	pxa3xx_flash_ids[1].name = NULL;
 	def = pxa3xx_flash_ids;
 KEEP_CONFIG:
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->ecc.size = host->page_size;
-	chip->ecc.strength = 1;
-
-	if (host->reg_ndcr & NDCR_DWIDTH_M)
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
 		chip->options |= NAND_BUSWIDTH_16;
 
+	/* Device detection must be done with ECC disabled */
+	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+		nand_writel(info, NDECCCTRL, 0x0);
+
 	if (nand_scan_ident(mtd, 1, def))
 		return -ENODEV;
+
+	if (pdata->flash_bbt) {
+		/*
+		 * We'll use a bad block table stored in-flash and don't
+		 * allow writing the bad block marker to the flash.
+		 */
+		chip->bbt_options |= NAND_BBT_USE_FLASH |
+				     NAND_BBT_NO_OOB_BBM;
+		chip->bbt_td = &bbt_main_descr;
+		chip->bbt_md = &bbt_mirror_descr;
+	}
+
+	/*
+	 * If the page size is bigger than the FIFO size, let's check
+	 * we are given the right variant and then switch to the extended
+	 * (aka splitted) command handling,
+	 */
+	if (mtd->writesize > PAGE_CHUNK_SIZE) {
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+			chip->cmdfunc = nand_cmdfunc_extended;
+		} else {
+			dev_err(&info->pdev->dev,
+				"unsupported page size on this variant\n");
+			return -ENODEV;
+		}
+	}
+
+	ecc_strength = chip->ecc_strength_ds;
+	ecc_step = chip->ecc_step_ds;
+
+	/* Set default ECC strength requirements on non-ONFI devices */
+	if (ecc_strength < 1 && ecc_step < 1) {
+		ecc_strength = 1;
+		ecc_step = 512;
+	}
+
+	ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
+			   ecc_step, mtd->writesize);
+	if (!ret) {
+		dev_err(&info->pdev->dev,
+			"ECC strength %d at page size %d is not supported\n",
+			chip->ecc_strength_ds, mtd->writesize);
+		return -ENODEV;
+	}
+
 	/* calculate addressing information */
 	if (mtd->writesize >= 2048)
 		host->col_addr_cycles = 2;
 	else
 		host->col_addr_cycles = 1;
 
+	/* release the initial buffer */
+	kfree(info->data_buff);
+
+	/* allocate the real data + oob buffer */
+	info->buf_size = mtd->writesize + mtd->oobsize;
+	ret = pxa3xx_nand_init_buff(info);
+	if (ret)
+		return ret;
 	info->oob_buff = info->data_buff + mtd->writesize;
+
 	if ((mtd->size >> chip->page_shift) > 65536)
 		host->row_addr_cycles = 3;
 	else
 		host->row_addr_cycles = 2;
-
-	mtd->name = mtd_names[0];
 	return nand_scan_tail(mtd);
 }
 
@@ -1034,15 +1568,14 @@ static int alloc_nand_resource(struct platform_device *pdev)
 	struct resource *r;
 	int ret, irq, cs;
 
-	pdata = pdev->dev.platform_data;
-	info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
-		       sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
-	if (!info) {
-		dev_err(&pdev->dev, "failed to allocate memory\n");
+	pdata = dev_get_platdata(&pdev->dev);
+	info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) +
+			    sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+	if (!info)
 		return -ENOMEM;
-	}
 
 	info->pdev = pdev;
+	info->variant = pxa3xx_nand_get_variant(pdev);
 	for (cs = 0; cs < pdata->num_cs; cs++) {
 		mtd = (struct mtd_info *)((unsigned int)&info[1] +
 		      (sizeof(*mtd) + sizeof(*host)) * cs);
@@ -1060,87 +1593,83 @@ static int alloc_nand_resource(struct platform_device *pdev)
 		chip->controller        = &info->controller;
 		chip->waitfunc		= pxa3xx_nand_waitfunc;
 		chip->select_chip	= pxa3xx_nand_select_chip;
-		chip->cmdfunc		= pxa3xx_nand_cmdfunc;
 		chip->read_word		= pxa3xx_nand_read_word;
 		chip->read_byte		= pxa3xx_nand_read_byte;
 		chip->read_buf		= pxa3xx_nand_read_buf;
 		chip->write_buf		= pxa3xx_nand_write_buf;
+		chip->options		|= NAND_NO_SUBPAGE_WRITE;
+		chip->cmdfunc		= nand_cmdfunc;
 	}
 
 	spin_lock_init(&chip->controller->lock);
 	init_waitqueue_head(&chip->controller->wq);
-	info->clk = clk_get(&pdev->dev, NULL);
+	info->clk = devm_clk_get(&pdev->dev, NULL);
 	if (IS_ERR(info->clk)) {
 		dev_err(&pdev->dev, "failed to get nand clock\n");
-		ret = PTR_ERR(info->clk);
-		goto fail_free_mtd;
+		return PTR_ERR(info->clk);
 	}
-	clk_enable(info->clk);
-
-	/*
-	 * This is a dirty hack to make this driver work from devicetree
-	 * bindings. It can be removed once we have a prober DMA controller
-	 * framework for DT.
-	 */
-	if (pdev->dev.of_node && cpu_is_pxa3xx()) {
-		info->drcmr_dat = 97;
-		info->drcmr_cmd = 99;
-	} else {
-		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-		if (r == NULL) {
-			dev_err(&pdev->dev, "no resource defined for data DMA\n");
-			ret = -ENXIO;
-			goto fail_put_clk;
-		}
-		info->drcmr_dat = r->start;
+	ret = clk_prepare_enable(info->clk);
+	if (ret < 0)
+		return ret;
 
-		r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-		if (r == NULL) {
-			dev_err(&pdev->dev, "no resource defined for command DMA\n");
-			ret = -ENXIO;
-			goto fail_put_clk;
+	if (use_dma) {
+		/*
+		 * This is a dirty hack to make this driver work from
+		 * devicetree bindings. It can be removed once we have
+		 * a prober DMA controller framework for DT.
+		 */
+		if (pdev->dev.of_node &&
+		    of_machine_is_compatible("marvell,pxa3xx")) {
+			info->drcmr_dat = 97;
+			info->drcmr_cmd = 99;
+		} else {
+			r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+			if (r == NULL) {
+				dev_err(&pdev->dev,
+					"no resource defined for data DMA\n");
+				ret = -ENXIO;
+				goto fail_disable_clk;
+			}
+			info->drcmr_dat = r->start;
+
+			r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+			if (r == NULL) {
+				dev_err(&pdev->dev,
+					"no resource defined for cmd DMA\n");
+				ret = -ENXIO;
+				goto fail_disable_clk;
+			}
+			info->drcmr_cmd = r->start;
 		}
-		info->drcmr_cmd = r->start;
 	}
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0) {
 		dev_err(&pdev->dev, "no IRQ resource defined\n");
 		ret = -ENXIO;
-		goto fail_put_clk;
+		goto fail_disable_clk;
 	}
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
-		dev_err(&pdev->dev, "no IO memory resource defined\n");
-		ret = -ENODEV;
-		goto fail_put_clk;
-	}
-
-	r = request_mem_region(r->start, resource_size(r), pdev->name);
-	if (r == NULL) {
-		dev_err(&pdev->dev, "failed to request memory resource\n");
-		ret = -EBUSY;
-		goto fail_put_clk;
-	}
-
-	info->mmio_base = ioremap(r->start, resource_size(r));
-	if (info->mmio_base == NULL) {
-		dev_err(&pdev->dev, "ioremap() failed\n");
-		ret = -ENODEV;
-		goto fail_free_res;
+	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(info->mmio_base)) {
+		ret = PTR_ERR(info->mmio_base);
+		goto fail_disable_clk;
 	}
 	info->mmio_phys = r->start;
 
-	ret = pxa3xx_nand_init_buff(info);
-	if (ret)
-		goto fail_free_io;
+	/* Allocate a buffer to allow flash detection */
+	info->buf_size = INIT_BUFFER_SIZE;
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+	if (info->data_buff == NULL) {
+		ret = -ENOMEM;
+		goto fail_disable_clk;
+	}
 
 	/* initialize all interrupts to be disabled */
 	disable_int(info, NDSR_MASK);
 
-	ret = request_irq(irq, pxa3xx_nand_irq, IRQF_DISABLED,
-			  pdev->name, info);
+	ret = request_irq(irq, pxa3xx_nand_irq, 0, pdev->name, info);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to request IRQ\n");
 		goto fail_free_buf;
@@ -1152,21 +1681,9 @@ static int alloc_nand_resource(struct platform_device *pdev)
 
 fail_free_buf:
 	free_irq(irq, info);
-	if (use_dma) {
-		pxa_free_dma(info->data_dma_ch);
-		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
-			info->data_buff, info->data_buff_phys);
-	} else
-		kfree(info->data_buff);
-fail_free_io:
-	iounmap(info->mmio_base);
-fail_free_res:
-	release_mem_region(r->start, resource_size(r));
-fail_put_clk:
-	clk_disable(info->clk);
-	clk_put(info->clk);
-fail_free_mtd:
-	kfree(info);
+	kfree(info->data_buff);
+fail_disable_clk:
+	clk_disable_unprepare(info->clk);
 	return ret;
 }
 
@@ -1174,45 +1691,25 @@ static int pxa3xx_nand_remove(struct platform_device *pdev)
 {
 	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
 	struct pxa3xx_nand_platform_data *pdata;
-	struct resource *r;
 	int irq, cs;
 
 	if (!info)
 		return 0;
 
-	pdata = pdev->dev.platform_data;
-	platform_set_drvdata(pdev, NULL);
+	pdata = dev_get_platdata(&pdev->dev);
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq >= 0)
 		free_irq(irq, info);
-	if (use_dma) {
-		pxa_free_dma(info->data_dma_ch);
-		dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
-				info->data_buff, info->data_buff_phys);
-	} else
-		kfree(info->data_buff);
+	pxa3xx_nand_free_buff(info);
 
-	iounmap(info->mmio_base);
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	release_mem_region(r->start, resource_size(r));
-
-	clk_disable(info->clk);
-	clk_put(info->clk);
+	clk_disable_unprepare(info->clk);
 
 	for (cs = 0; cs < pdata->num_cs; cs++)
 		nand_release(info->host[cs]->mtd);
-	kfree(info);
 	return 0;
 }
 
-#ifdef CONFIG_OF
-static struct of_device_id pxa3xx_nand_dt_ids[] = {
-	{ .compatible = "marvell,pxa3xx-nand" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, i2c_pxa_dt_ids);
-
 static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 {
 	struct pxa3xx_nand_platform_data *pdata;
@@ -1232,17 +1729,12 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
 	if (of_get_property(np, "marvell,nand-keep-config", NULL))
 		pdata->keep_config = 1;
 	of_property_read_u32(np, "num-cs", &pdata->num_cs);
+	pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
 
 	pdev->dev.platform_data = pdata;
 
 	return 0;
 }
-#else
-static inline int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
-	return 0;
-}
-#endif
 
 static int pxa3xx_nand_probe(struct platform_device *pdev)
 {
@@ -1251,11 +1743,18 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
 	struct pxa3xx_nand_info *info;
 	int ret, cs, probe_success;
 
+#ifndef ARCH_HAS_DMA
+	if (use_dma) {
+		use_dma = 0;
+		dev_warn(&pdev->dev,
+			 "This platform can't do DMA on this device\n");
+	}
+#endif
 	ret = pxa3xx_nand_probe_dt(pdev);
 	if (ret)
 		return ret;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (!pdata) {
 		dev_err(&pdev->dev, "no platform data defined\n");
 		return -ENODEV;
@@ -1270,8 +1769,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
 	info = platform_get_drvdata(pdev);
 	probe_success = 0;
 	for (cs = 0; cs < pdata->num_cs; cs++) {
+		struct mtd_info *mtd = info->host[cs]->mtd;
+
+		/*
+		 * The mtd name matches the one used in 'mtdparts' kernel
+		 * parameter. This name cannot be changed or otherwise
+		 * user's mtd partitions configuration would get broken.
+		 */
+		mtd->name = "pxa3xx_nand-0";
 		info->cs = cs;
-		ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+		ret = pxa3xx_nand_scan(mtd);
 		if (ret) {
 			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
 				cs);
@@ -1279,7 +1786,7 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
 		}
 
 		ppdata.of_node = pdev->dev.of_node;
-		ret = mtd_device_parse_register(info->host[cs]->mtd, NULL,
+		ret = mtd_device_parse_register(mtd, NULL,
 						&ppdata, pdata->parts[cs],
 						pdata->nr_parts[cs]);
 		if (!ret)
@@ -1302,7 +1809,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
 	struct mtd_info *mtd;
 	int cs;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	if (info->state) {
 		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
 		return -EAGAIN;
@@ -1323,7 +1830,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
 	struct mtd_info *mtd;
 	int cs;
 
-	pdata = pdev->dev.platform_data;
+	pdata = dev_get_platdata(&pdev->dev);
 	/* We don't want to handle interrupt without calling mtd routine */
 	disable_int(info, NDCR_INT_MASK);
 
@@ -1356,7 +1863,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
 static struct platform_driver pxa3xx_nand_driver = {
 	.driver = {
 		.name	= "pxa3xx-nand",
-		.of_match_table = of_match_ptr(pxa3xx_nand_dt_ids),
+		.of_match_table = pxa3xx_nand_dt_ids,
 	},
 	.probe		= pxa3xx_nand_probe,
 	.remove		= pxa3xx_nand_remove,
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index ed956e08d38b..7c16e68226f2 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -86,6 +86,7 @@ config LANTIQ_ETOP
 	  Support for the MII0 inside the Lantiq SoC
 
 source "drivers/net/ethernet/marvell/Kconfig"
+source "drivers/net/ethernet/mvebu_net/Kconfig"
 source "drivers/net/ethernet/mellanox/Kconfig"
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 8268d85f9448..b5d4477ce5f3 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -2,6 +2,8 @@
 # Makefile for the Linux network Ethernet device drivers.
 #
 
+obj-$(CONFIG_NET_VENDOR_MVEBU) += mvebu_net/
+obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
 obj-$(CONFIG_NET_VENDOR_8390) += 8390/
 obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
@@ -36,7 +38,6 @@ obj-$(CONFIG_IP1000) += icplus/
 obj-$(CONFIG_JME) += jme.o
 obj-$(CONFIG_KORINA) += korina.o
 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
-obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
 obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index a49e81bdf8e8..3630f0596f5a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -53,6 +53,17 @@ config MVNETA
 	  driver, which should be used for the older Marvell SoCs
 	  (Dove, Orion, Discovery, Kirkwood).
 
+config MVPP2
+	tristate "Marvell Armada 375 network interface support"
+	depends on MACH_ARMADA_375
+	select MVMDIO
+	---help---
+	  This driver supports the network interface units in the
+	  Marvell ARMADA 375 SoC family.
+
+	  Note that this driver is distinct from both the mv643xx_eth
+	  and the mvneta drivers.
+
 config PXA168_ETH
 	tristate "Marvell pxa168 ethernet support"
 	depends on CPU_PXA168
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 5c4a7765ff0e..f6425bd2884b 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_MVMDIO) += mvmdio.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_MVNETA) += mvneta.o
+obj-$(CONFIG_MVPP2) += mvpp2.o
 obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a602aeeb3acb..89ef4c952252 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1341,7 +1341,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 
 		dev_kfree_skb_any(skb);
 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-				 rx_desc->data_size, DMA_FROM_DEVICE);
+				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 	}
 
 	if (rx_done)
@@ -1387,7 +1387,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
 		}
 
 		dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
-				 rx_desc->data_size, DMA_FROM_DEVICE);
+				 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
 
 		rx_bytes = rx_desc->data_size -
 			(ETH_FCS_LEN + MVNETA_MH_SIZE);
@@ -2260,6 +2260,21 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
 	return 0;
 }
 
+/* Get mac address */
+static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+{
+	u32 mac_addr_l, mac_addr_h;
+
+	mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
+	mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
+	addr[0] = (mac_addr_h >> 24) & 0xFF;
+	addr[1] = (mac_addr_h >> 16) & 0xFF;
+	addr[2] = (mac_addr_h >> 8) & 0xFF;
+	addr[3] = mac_addr_h & 0xFF;
+	addr[4] = (mac_addr_l >> 8) & 0xFF;
+	addr[5] = mac_addr_l & 0xFF;
+}
+
 /* Handle setting mac address */
 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
 {
@@ -2345,8 +2360,12 @@ static int mvneta_mdio_probe(struct mvneta_port *pp)
 {
 	struct phy_device *phy_dev;
 
-	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
-				 pp->phy_interface);
+	if (pp->phy_node)
+		phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
+					 pp->phy_interface);
+	else
+		phy_dev = of_phy_connect_fixed_link(pp->dev, mvneta_adjust_link,
+						    pp->phy_interface);
 	if (!phy_dev) {
 		netdev_err(pp->dev, "could not find the PHY\n");
 		return -ENODEV;
@@ -2678,7 +2697,9 @@ static int mvneta_probe(struct platform_device *pdev)
 	u32 phy_addr;
 	struct mvneta_port *pp;
 	struct net_device *dev;
-	const char *mac_addr;
+	const char *dt_mac_addr;
+	char hw_mac_addr[ETH_ALEN];
+	const char *mac_from;
 	int phy_mode;
 	int err;
 
@@ -2702,9 +2723,13 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	phy_node = of_parse_phandle(dn, "phy", 0);
 	if (!phy_node) {
-		dev_err(&pdev->dev, "no associated PHY\n");
-		err = -ENODEV;
-		goto err_free_irq;
+		/* No 'phy' found, see if we have a 'fixed-link'
+		 * property */
+		err = of_phy_register_fixed_link(dn);
+		if (err < 0) {
+			dev_err(&pdev->dev, "no 'phy' or 'fixed-link' properties\n");
+			goto err_free_irq;
+		}
 	}
 
 	phy_mode = of_get_phy_mode(dn);
@@ -2714,13 +2739,6 @@ static int mvneta_probe(struct platform_device *pdev)
 		goto err_free_irq;
 	}
 
-	mac_addr = of_get_mac_address(dn);
-
-	if (!mac_addr || !is_valid_ether_addr(mac_addr))
-		eth_hw_addr_random(dev);
-	else
-		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
-
 	dev->tx_queue_len = MVNETA_MAX_TXD;
 	dev->watchdog_timeo = 5 * HZ;
 	dev->netdev_ops = &mvneta_netdev_ops;
@@ -2751,6 +2769,21 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	clk_prepare_enable(pp->clk);
 
+	dt_mac_addr = of_get_mac_address(dn);
+	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+		mac_from = "device tree";
+		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+	} else {
+		mvneta_get_mac_addr(pp, hw_mac_addr);
+		if (is_valid_ether_addr(hw_mac_addr)) {
+			mac_from = "hardware";
+			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+		} else {
+			mac_from = "random";
+			eth_hw_addr_random(dev);
+		}
+	}
+
 	pp->tx_done_timer.data = (unsigned long)dev;
 
 	pp->tx_ring_size = MVNETA_MAX_TXD;
@@ -2783,7 +2816,8 @@ static int mvneta_probe(struct platform_device *pdev)
 		goto err_deinit;
 	}
 
-	netdev_info(dev, "mac: %pM\n", dev->dev_addr);
+	netdev_info(dev, "Using %s mac address %pM\n", mac_from,
+		    dev->dev_addr);
 
 	platform_set_drvdata(pdev, pp->dev);
 
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
new file mode 100644
index 000000000000..ef5e0eff41c2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -0,0 +1,6571 @@
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <uapi/linux/ppp_defs.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+/* #define MVPP2_DEBUGS */
+#ifdef MVPP2_DEBUGS
+#define dprintk(a...) printk(a)
+#else
+#define dprintk(a...)
+#endif
+
+/* Packet Processor registers */
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
+#define MVPP2_RX_FIFO_INIT_REG			0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
+#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
+#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
+#define     MVPP2_POOL_BUF_SIZE_OFFSET		5
+#define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
+#define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
+#define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
+#define     MVPP2_RXQ_POOL_SHORT_OFFS		20
+#define     MVPP2_RXQ_POOL_SHORT_MASK		0x700000
+#define     MVPP2_RXQ_POOL_LONG_OFFS		24
+#define     MVPP2_RXQ_POOL_LONG_MASK		0x7000000
+#define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
+#define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
+#define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
+#define     MVPP2_PRS_PORT_LU_MAX		0xf
+#define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
+#define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
+#define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
+#define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
+#define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
+#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG			0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
+#define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG			0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG			0x1230
+#define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG			0x1800
+#define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG			0x1810
+#define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG			0x1814
+#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
+#define MVPP2_CLS_LKP_TBL_REG			0x1818
+#define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
+#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG		0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG			0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG			0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG			0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
+#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
+
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG			0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG			0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG			0x2048
+#define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
+#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
+#define     MVPP2_RXQ_NUM_NEW_OFFSET		16
+#define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
+#define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
+#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
+#define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
+#define MVPP2_RXQ_THRESH_REG			0x204c
+#define     MVPP2_OCCUPIED_THRESH_OFFSET	0
+#define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
+#define MVPP2_RXQ_INDEX_REG			0x2050
+#define MVPP2_TXQ_NUM_REG			0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG			0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG			0x2088
+#define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
+#define MVPP2_TXQ_THRESH_REG			0x2094
+#define     MVPP2_TRANSMITTED_THRESH_OFFSET	16
+#define     MVPP2_TRANSMITTED_THRESH_MASK	0x3fff0000
+#define MVPP2_TXQ_INDEX_REG			0x2098
+#define MVPP2_TXQ_PREF_BUF_REG			0x209c
+#define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
+#define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
+#define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
+#define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
+#define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
+#define MVPP2_TXQ_PENDING_REG			0x20a0
+#define     MVPP2_TXQ_PENDING_MASK		0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG		0x20a4
+#define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
+#define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
+#define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
+#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
+#define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
+#define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
+#define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE			0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq)		(0x5400 + 4 * (rxq))
+#define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
+#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
+#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
+#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
+#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
+#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
+#define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
+#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
+#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
+#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
+#define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
+#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
+#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
+#define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
+#define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
+#define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
+#define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
+#define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
+#define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
+#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
+#define     MVPP2_BM_START_MASK			BIT(0)
+#define     MVPP2_BM_STOP_MASK			BIT(1)
+#define     MVPP2_BM_STATE_MASK			BIT(4)
+#define     MVPP2_BM_LOW_THRESH_OFFS		8
+#define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
+#define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
+						MVPP2_BM_LOW_THRESH_OFFS)
+#define     MVPP2_BM_HIGH_THRESH_OFFS		16
+#define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
+#define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
+						MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
+#define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
+#define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
+#define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
+#define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
+#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
+#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG			0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
+#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
+#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
+#define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG			0x64c0
+#define MVPP2_BM_MC_RLS_REG			0x64c4
+#define     MVPP2_BM_MC_ID_MASK			0xfff
+#define     MVPP2_BM_FORCE_RELEASE_MASK		BIT(12)
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
+#define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
+#define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
+#define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
+#define MVPP2_TXP_SCHED_MTU_REG			0x801c
+#define     MVPP2_TXP_MTU_MAX			0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG		0x8020
+#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
+#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
+#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
+#define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
+#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
+#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
+#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG			0x8800
+#define MVPP2_TX_PORT_FLUSH_REG			0x8810
+#define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE			0x24
+#define MVPP2_SRC_ADDR_HIGH			0x28
+#define MVPP2_PHY_AN_CFG0_REG			0x34
+#define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port)		(0x1000 + ((port) >> 1) * \
+						0x400 + (port) * 0x400)
+#define     MVPP2_MIB_LATE_COLLISION		0x7c
+#define MVPP2_ISR_SUM_MASK_REG			0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT		0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG			0x0
+#define      MVPP2_GMAC_PORT_EN_MASK		BIT(0)
+#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS	2
+#define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
+#define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG			0x4
+#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
+#define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
+#define      MVPP2_GMAC_PCS_LB_EN_BIT		6
+#define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
+#define      MVPP2_GMAC_SA_LOW_OFFS		7
+#define MVPP2_GMAC_CTRL_2_REG			0x8
+#define      MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
+#define      MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
+#define      MVPP2_GMAC_PORT_RGMII_MASK		BIT(4)
+#define      MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG		0xc
+#define      MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
+#define      MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
+#define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
+#define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
+#define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
+#define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
+#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
+#define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
+					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH	15
+#define MVPP2_RX_COAL_PKTS		32
+#define MVPP2_RX_COAL_USEC		100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE			2
+#define MVPP2_ETH_TYPE_LEN		2
+#define MVPP2_PPPOE_HDR_SIZE		8
+#define MVPP2_VLAN_TAG_LEN		4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE		0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE	32
+#define MVPP2_TX_CSUM_MAX_SIZE		9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
+#define MVPP2_RX_DISABLE_TIMEOUT_MSEC	1000
+#define MVPP2_TX_FIFO_EMPTY_TIMEOUT	10000
+#define MVPP2_PORT_DISABLE_WAIT_TCLOCKS	5000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
+
+#define MVPP2_TX_MTU_MAX		0x7ffff
+
+/* Rx port number of PON port */
+#define MVPP2_PON_PORT_ID		7
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT			16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS			4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ			8
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_RXQ			8
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM		(MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD			128
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD			1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK		64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE		256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE		32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE	0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE	0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT	0x80
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+
+#define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE		16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK		BIT(0)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+	MVPP2_TAG_TYPE_NONE = 0,
+	MVPP2_TAG_TYPE_MH   = 1,
+	MVPP2_TAG_TYPE_DSA  = 2,
+	MVPP2_TAG_TYPE_EDSA = 3,
+	MVPP2_TAG_TYPE_VLAN = 4,
+	MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE	256
+#define MVPP2_PRS_TCAM_WORDS		6
+#define MVPP2_PRS_SRAM_WORDS		4
+#define MVPP2_PRS_FLOW_ID_SIZE		64
+#define MVPP2_PRS_FLOW_ID_MASK		0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID	1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
+#define MVPP2_PRS_IPV4_HEAD		0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
+#define MVPP2_PRS_IPV4_MC		0xe0
+#define MVPP2_PRS_IPV4_MC_MASK		0xf0
+#define MVPP2_PRS_IPV4_BC_MASK		0xff
+#define MVPP2_PRS_IPV4_IHL		0x5
+#define MVPP2_PRS_IPV4_IHL_MASK		0xf
+#define MVPP2_PRS_IPV6_MC		0xff
+#define MVPP2_PRS_IPV6_MC_MASK		0xff
+#define MVPP2_PRS_IPV6_HOP_MASK		0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK	0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX		100
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS			8
+#define MVPP2_PRS_PORT_MASK			0xff
+#define MVPP2_PRS_LU_MASK			0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
+				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
+					      (((offs) * 2) - ((offs) % 2)  + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE			16
+#define MVPP2_PRS_TCAM_PORT_BYTE		17
+#define MVPP2_PRS_TCAM_LU_BYTE			20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD			5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL		0
+#define MVPP2_PE_FIRST_FREE_TID		1
+#define MVPP2_PE_LAST_FREE_TID		(MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL		(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS			0
+#define MVPP2_PRS_SRAM_RI_WORD			0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS		64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
+#define MVPP2_PRS_SRAM_UDF_OFFS			73
+#define MVPP2_PRS_SRAM_UDF_BITS			8
+#define MVPP2_PRS_SRAM_UDF_MASK			0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
+#define MVPP2_PRS_SRAM_AI_OFFS			90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
+#define MVPP2_PRS_SRAM_AI_MASK			0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT		110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT		111
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK		0x1
+#define MVPP2_PRS_RI_DSA_MASK			0x2
+#define MVPP2_PRS_RI_VLAN_MASK			0xc
+#define MVPP2_PRS_RI_VLAN_NONE			~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK		0x600
+#define MVPP2_PRS_RI_L2_UCAST			~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST			BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST			BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK			0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK		0x7000
+#define MVPP2_PRS_RI_L3_UN			~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4			BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6			BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK		0x18000
+#define MVPP2_PRS_RI_L3_UCAST			~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST			BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
+#define MVPP2_PRS_RI_UDF3_MASK			0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
+#define MVPP2_PRS_RI_L4_TCP			BIT(22)
+#define MVPP2_PRS_RI_L4_UDP			BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK			0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK			0x80000000
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI		0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED		true
+#define MVPP2_PRS_UNTAGGED		false
+#define MVPP2_PRS_EDSA			true
+#define MVPP2_PRS_DSA			false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+	MVPP2_PRS_UDF_MAC_DEF,
+	MVPP2_PRS_UDF_MAC_RANGE,
+	MVPP2_PRS_UDF_L2_DEF,
+	MVPP2_PRS_UDF_L2_DEF_COPY,
+	MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+	MVPP2_PRS_LU_MH,
+	MVPP2_PRS_LU_MAC,
+	MVPP2_PRS_LU_DSA,
+	MVPP2_PRS_LU_VLAN,
+	MVPP2_PRS_LU_L2,
+	MVPP2_PRS_LU_PPPOE,
+	MVPP2_PRS_LU_IP4,
+	MVPP2_PRS_LU_IP6,
+	MVPP2_PRS_LU_FLOWS,
+	MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+	MVPP2_PRS_L3_UNI_CAST,
+	MVPP2_PRS_L3_MULTI_CAST,
+	MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Multicast MAC kinds */
+enum mvpp2_prs_mac_mc {
+	MVPP2_PRS_IP4_MAC_MC,
+	MVPP2_PRS_IP6_MAC_MC,
+	MVPP2_PRS_MAX_MAC_MC
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE	512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
+#define MVPP2_CLS_LKP_TBL_SIZE		64
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM		8
+#define MVPP2_BM_LONG_BUF_NUM		1024
+#define MVPP2_BM_SHORT_BUF_NUM		2048
+#define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN		128
+#define MVPP2_BM_SWF_LONG_POOL(port)	((port > 2) ? 2 : port)
+#define MVPP2_BM_SWF_SHORT_POOL		3
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS	8
+#define MVPP2_BM_COOKIE_CPU_OFFS	24
+
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE		MVPP2_RX_MAX_PKT_SIZE(512)
+
+enum mvpp2_bm_type {
+	MVPP2_BM_FREE,
+	MVPP2_BM_SWF_LONG,
+	MVPP2_BM_SWF_SHORT
+};
+
+/* Definitions */
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+	/* Shared registers' base addresses */
+	void __iomem *base;
+	void __iomem *lms_base;
+
+	/* Common clocks */
+	struct clk *pp_clk;
+	struct clk *gop_clk;
+
+	/* List of pointers to port structures */
+	struct mvpp2_port **port_list;
+
+	/* Aggregated TXQs */
+	struct mvpp2_tx_queue *aggr_txqs;
+
+	/* BM pools */
+	struct mvpp2_bm_pool *bm_pools;
+
+	/* PRS shadow table */
+	struct mvpp2_prs_shadow *prs_shadow;
+	/* PRS auxiliary table for double vlan entries control */
+	bool *prs_double_vlans;
+
+	/* Tclk value */
+	u32 tclk;
+
+	spinlock_t lock;
+};
+
+struct mvpp2_pcpu_stats {
+	struct	u64_stats_sync syncp;
+	u64	rx_packets;
+	u64	rx_bytes;
+	u64	tx_packets;
+	u64	tx_bytes;
+};
+
+struct mvpp2_port {
+	u8 id;
+
+	struct mvpp2 *pp2;
+
+	/* Per-port registers' base address */
+	void __iomem *base;
+
+	struct mvpp2_rx_queue **rxqs;
+	struct mvpp2_tx_queue **txqs;
+	struct net_device *dev;
+
+	int pkt_size;
+
+	u32 pending_cause_rx;
+	struct napi_struct napi;
+
+	/* Flags */
+	unsigned long flags;
+
+	u8 mcast_count[256];
+	u16 tx_ring_size;
+	u16 rx_ring_size;
+	struct mvpp2_pcpu_stats *stats;
+
+	struct phy_device *phy_dev;
+	phy_interface_t phy_interface;
+	struct device_node *phy_node;
+	unsigned int link;
+	unsigned int duplex;
+	unsigned int speed;
+
+	struct mvpp2_bm_pool *pool_long;
+	struct mvpp2_bm_pool *pool_short;
+
+	/* Number of physical transmit ports - should be set to 1 for
+	 * ethernet ports and 16 for PON port
+	 */
+	u8 txp_num;
+
+	/* Index of first port's physical RXQ */
+	u8 first_rxq;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT		0
+#define MVPP2_TXD_IP_HLEN_SHIFT		8
+#define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE	BIT(23)
+#define MVPP2_TXD_L4_UDP		BIT(24)
+#define MVPP2_TXD_L3_IP6		BIT(26)
+#define MVPP2_TXD_L_DESC		BIT(28)
+#define MVPP2_TXD_F_DESC		BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY		BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC		0x0
+#define MVPP2_RXD_ERR_OVERRUN		BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS	16
+#define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC		BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK		BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
+#define MVPP2_RXD_L4_TCP		BIT(25)
+#define MVPP2_RXD_L4_UDP		BIT(26)
+#define MVPP2_RXD_L3_IP4		BIT(28)
+#define MVPP2_RXD_L3_IP6		BIT(30)
+#define MVPP2_RXD_BUF_HDR		BIT(31)
+
+struct mvpp2_tx_desc {
+	u32 command;		/* Options used by HW for packet transmitting.*/
+	u8  packet_offset;	/* the offset from the buffer beginning	*/
+	u8  phys_txq;		/* destination queue ID			*/
+	u16 data_size;		/* data size of transmitted packet in bytes */
+	u32 buf_phys_addr;	/* physical addr of transmitted buffer	*/
+	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
+	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
+	u32 reserved2;		/* reserved (for future use)		*/
+};
+
+struct mvpp2_rx_desc {
+	u32 status;		/* info about received packet		*/
+	u16 reserved1;		/* parser_info (for future use, PnC)	*/
+	u16 data_size;		/* size of received packet in bytes	*/
+	u32 buf_phys_addr;	/* physical address of the buffer	*/
+	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
+	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
+	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
+	u8  reserved4;		/* bm_qset (for future use, BM)		*/
+	u8  reserved5;
+	u16 reserved6;		/* classify_info (for future use, PnC)	*/
+	u32 reserved7;		/* flow_id (for future use, PnC) */
+	u32 reserved8;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+	int cpu;
+
+	/* Number of Tx DMA descriptors in the descriptor ring */
+	int size;
+
+	/* Number of currently used Tx DMA descriptor in the
+	 * descriptor ring
+	 */
+	int count;
+
+	/* Number of Tx DMA descriptors reserved for each CPU */
+	int reserved_num;
+
+	/* Array of transmitted skb */
+	struct sk_buff **tx_skb;
+
+	/* Index of last TX DMA descriptor that was inserted */
+	int txq_put_index;
+
+	/* Index of the TX DMA descriptor to be cleaned up */
+	int txq_get_index;
+};
+
+struct mvpp2_tx_queue {
+	/* Physical number of this Tx queue */
+	u8 id;
+
+	/* Logical number of this Tx queue */
+	u8 log_id;
+
+	/* Number of port's egress port - 0 for ethernet, 0-15 for PON */
+	u8 txp;
+
+	/* Number of Tx DMA descriptors in the descriptor ring */
+	int size;
+
+	/* Number of Tx DMA descriptors to be used in software forwarding */
+	int swf_size;
+
+	/* Number of currently used Tx DMA descriptor in the
+	 * descriptor ring
+	 */
+	int count;
+
+	/* Per-CPU control of physical Tx queues */
+	struct mvpp2_txq_pcpu __percpu *pcpu;
+
+	/* Array of transmitted skb */
+	struct sk_buff **tx_skb;
+
+	u32 done_pkts_coal;
+
+	/* Virtual address of thex Tx DMA descriptors array */
+	struct mvpp2_tx_desc *descs;
+
+	/* DMA address of the Tx DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last Tx DMA descriptor */
+	int last_desc;
+
+	/* Index of the next Tx DMA descriptor to process */
+	int next_desc_to_proc;
+
+	struct mvpp2 *pp2;
+};
+
+struct mvpp2_rx_queue {
+	/* RX queue number, in the range 0-31 for physical RXQs */
+	u8 id;
+
+	/* Num of rx descriptors in the rx descriptor ring */
+	int size;
+
+	u32 pkts_coal;
+	u32 time_coal;
+
+	/* Virtual address of the RX DMA descriptors array */
+	struct mvpp2_rx_desc *descs;
+
+	/* DMA address of the RX DMA descriptors array */
+	dma_addr_t descs_phys;
+
+	/* Index of the last RX DMA descriptor */
+	int last_desc;
+
+	/* Index of the next RX DMA descriptor to process */
+	int next_desc_to_proc;
+
+	/* ID of port to which physical RXQ is mapped */
+	int port;
+
+	/* Port's logic RXQ number to which physical RXQ is mapped */
+	int logic_rxq;
+};
+
+union mvpp2_prs_tcam_entry {
+	u32 word[MVPP2_PRS_TCAM_WORDS];
+	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+	u32 word[MVPP2_PRS_SRAM_WORDS];
+	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+	u32 index;
+	union mvpp2_prs_tcam_entry tcam;
+	union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+	bool valid;
+	bool finish;
+
+	/* Lookup ID */
+	int lu;
+
+	/* User defined offset */
+	int udf;
+
+	/* Result info */
+	u32 ri;
+	u32 ri_mask;
+};
+
+struct mvpp2_cls_flow_entry {
+	u32 index;
+	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lkp_entry {
+	u32 lkpid;
+	u32 way;
+	u32 data;
+};
+
+struct mvpp2_bm_pool {
+	/* Pool number in the range 0-7 */
+	int id;
+	enum mvpp2_bm_type type;
+
+	/* Buffer Pointers Pool External (BPPE) size */
+	int size;
+	/* Number of buffers for this pool */
+	int buf_num;
+	/* Pool buffer size */
+	int buf_size;
+	/* Packet size */
+	int pkt_size;
+
+	/* BPPE virtual base address */
+	u32 *virt_addr;
+	/* BPPE physical base address */
+	dma_addr_t phys_addr;
+
+	/* Ports using BM pool */
+	u32 port_map;
+
+	/* Occupied buffers indicator */
+	atomic_t in_use;
+	int in_use_thresh;
+
+	spinlock_t lock;
+};
+
+struct mvpp2_buff_hdr {
+	u32 next_buff_phys_addr;
+	u32 next_buff_virt_addr;
+	u16 byte_count;
+	u16 info;
+	u8  reserved1;		/* bm_qset (for future use, BM)		*/
+};
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK	0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info)	((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS	12
+#define MVPP2_B_HDR_INFO_LAST_MASK	BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+	   ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Static declaractions */
+
+/* Number of RXQs used by single port */
+static int rxq_number = MVPP2_MAX_RXQ;
+/* Number of TXQs used by single port */
+static int txq_number = MVPP2_MAX_TXQ;
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+static void mvpp2_write(struct mvpp2 *pp2, u32 offset, u32 data)
+{
+	writel(data, pp2->base + offset);
+}
+
+static u32 mvpp2_read(struct mvpp2 *pp2, u32 offset)
+{
+	return readl(pp2->base + offset);
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+	txq_pcpu->txq_get_index++;
+	if (txq_pcpu->txq_get_index == txq_pcpu->size)
+		txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+			      struct sk_buff *skb)
+{
+	txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+	txq_pcpu->txq_put_index++;
+	if (txq_pcpu->txq_put_index == txq_pcpu->size)
+		txq_pcpu->txq_put_index = 0;
+}
+
+static inline void mvpp2_mib_counters_clear(struct mvpp2_port *pp)
+{
+	int i;
+	u32 dummy;
+
+	/* Perform dummy reads from MIB counters */
+	for (i = 0; i < MVPP2_MIB_LATE_COLLISION; i += 4)
+		dummy = readl(pp->pp2->lms_base +
+			      (MVPP2_MIB_COUNTERS_BASE(pp->id) + i));
+}
+
+/* Get number of physical egress port */
+static int mvpp2_egress_port(struct mvpp2_port *pp, int txp)
+{
+	return MVPP2_MAX_TCONT + pp->id + txp;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Parser configuration routines */
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *pp2, struct mvpp2_prs_entry *pe)
+{
+	int i;
+
+	if (!pe)
+		return -ENOMEM;
+
+	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+		return -EINVAL;
+
+	/* Clear entry invalidation bit */
+	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+	/* Write tcam index - indirect access */
+	mvpp2_write(pp2, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+		mvpp2_write(pp2, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+	/* Write sram index - indirect access */
+	mvpp2_write(pp2, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+		mvpp2_write(pp2, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+	dprintk("Write parser entry %d\n", pe->index);
+
+	return 0;
+}
+
+/* Read tcam entry from hw */
+static int mvpp2_prs_hw_read(struct mvpp2 *pp2, struct mvpp2_prs_entry *pe)
+{
+	int i;
+
+	if (!pe)
+		return -ENOMEM;
+
+	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+		return -EINVAL;
+
+	/* Write tcam index - indirect access */
+	mvpp2_write(pp2, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(pp2,
+			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+		return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+		pe->tcam.word[i] = mvpp2_read(pp2, MVPP2_PRS_TCAM_DATA_REG(i));
+
+	/* Write sram index - indirect access */
+	mvpp2_write(pp2, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+
+	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+		pe->sram.word[i] = mvpp2_read(pp2, MVPP2_PRS_SRAM_DATA_REG(i));
+
+	dprintk("Read parser entry %d\n", pe->index);
+
+	return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *pp2, int index)
+{
+	/* Write index - indirect access */
+	mvpp2_write(pp2, MVPP2_PRS_TCAM_IDX_REG, index);
+	mvpp2_write(pp2, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+		    MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *pp2, int index, int lu)
+{
+	pp2->prs_shadow[index].valid = true;
+	pp2->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *pp2, int index,
+				    unsigned int ri, unsigned int ri_mask)
+{
+	pp2->prs_shadow[index].ri_mask = ri_mask;
+	pp2->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE)] =
+							      MVPP2_PRS_LU_MASK;
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+				    unsigned int port, bool add)
+{
+	if (add)
+		pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)]
+								&= ~(1 << port);
+	else
+		pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)]
+								 |= (1 << port);
+}
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+					unsigned int ports)
+{
+	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)] &=
+					  (unsigned char)(~MVPP2_PRS_PORT_MASK);
+	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)] |=
+					       ((~ports) & MVPP2_PRS_PORT_MASK);
+}
+
+/* Obtain port map from tcam sw entry */
+static void mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe,
+					unsigned int *ports)
+{
+	*ports =
+	    (~pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE)]) &
+	    MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+					 unsigned int offs, unsigned char byte,
+					 unsigned char enable)
+{
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+/* Clear byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_clear(struct mvpp2_prs_entry *pe,
+					 unsigned int offs)
+{
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = 0x0;
+	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = 0x0;
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+					 unsigned int offs, unsigned char *byte,
+					 unsigned char *enable)
+{
+	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe,
+					   unsigned int offs, unsigned int size,
+					   unsigned char *bytes)
+{
+	unsigned char byte;
+	unsigned char mask;
+	int index;
+
+	for (index = 0; index < size; index++) {
+		mvpp2_prs_tcam_data_byte_get(pe, offs + index, &byte, &mask);
+
+		if (byte != bytes[index])
+			return -1;
+	}
+	return 0;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int enable)
+{
+	int i;
+
+	for (i = 0; i < MVPP2_PRS_AI_BITS; i++)
+		if (enable & BIT(i)) {
+			if (bits & BIT(i))
+				pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE] |=
+								       (1 << i);
+			else
+				pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE] &=
+								      ~(1 << i);
+		}
+
+	pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_AI_BYTE)] |= enable;
+}
+
+/* Get ai bits from tcam sw entry */
+static void mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe, unsigned int *bits,
+				unsigned int *enable)
+{
+	*bits = pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_AI_BYTE)];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+				  unsigned short ethertype)
+{
+	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+				    int val)
+{
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+				      int val)
+{
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int mask)
+{
+	unsigned int i;
+
+	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++)
+		if (mask & BIT(i)) {
+			if (bits & BIT(i))
+				mvpp2_prs_sram_bits_set(pe,
+					      MVPP2_PRS_SRAM_RI_OFFS + i, 1);
+			else
+				mvpp2_prs_sram_bits_clear(pe,
+					      MVPP2_PRS_SRAM_RI_OFFS + i, 1);
+			mvpp2_prs_sram_bits_set(pe,
+					 MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+		}
+}
+
+/* Obtain ri bits and their mask from sram sw entry */
+static void mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe,
+				  unsigned int *bits, unsigned int *mask)
+{
+	*bits = pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+	*mask = pe->sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+				     unsigned int bits, unsigned int mask)
+{
+	unsigned int i;
+
+	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++)
+		if (mask & BIT(i)) {
+			if (bits & BIT(i))
+				mvpp2_prs_sram_bits_set(pe,
+					      MVPP2_PRS_SRAM_AI_OFFS + i, 1);
+			else
+				mvpp2_prs_sram_bits_clear(pe,
+					      MVPP2_PRS_SRAM_AI_OFFS + i, 1);
+			mvpp2_prs_sram_bits_set(pe,
+					 MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+		}
+}
+
+/* Read ai bits from sram sw entry */
+static void mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe,
+				  unsigned int *bits, unsigned int *enable)
+{
+	*bits = (pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS)]
+		>> (MVPP2_PRS_SRAM_AI_OFFS % 8)) |
+		(pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS +
+		MVPP2_PRS_SRAM_AI_CTRL_BITS)] <<
+		(8 - (MVPP2_PRS_SRAM_AI_OFFS % 8)));
+
+	*enable = (pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_CTRL_OFFS)]
+		  >> (MVPP2_PRS_SRAM_AI_CTRL_OFFS % 8)) |
+		  (pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_CTRL_OFFS +
+		  MVPP2_PRS_SRAM_AI_CTRL_BITS)] <<
+		  (8 - (MVPP2_PRS_SRAM_AI_CTRL_OFFS % 8)));
+
+	*bits &= MVPP2_PRS_SRAM_AI_MASK;
+	*enable &= MVPP2_PRS_SRAM_AI_MASK;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+				       unsigned int lu)
+{
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_NEXT_LU_OFFS,
+				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_NEXT_LU_OFFS, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+				     unsigned int op)
+{
+	/* Set sign */
+	if (shift < 0) {
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+		shift = 0 - shift;
+	} else
+		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+
+	/* Set value */
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+							   (unsigned char)shift;
+
+	/* Reset and set operation */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+	/* Set base offset as current */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+				      unsigned int type, int offset,
+				      unsigned int op)
+{
+	/* Set sign */
+	if (offset < 0) {
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+		offset = 0 - offset;
+	} else
+		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+
+	/* Set value */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+				  MVPP2_PRS_SRAM_UDF_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+					MVPP2_PRS_SRAM_UDF_BITS)] &=
+	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+					MVPP2_PRS_SRAM_UDF_BITS)] |=
+				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+	/* Set offset type */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+	/* Set offset operation */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+	/* Set base offset as current */
+	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *pp2, int flow)
+{
+	struct mvpp2_prs_entry *pe;
+	unsigned int enable;
+	unsigned int bits;
+	int tid;
+
+	pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+
+	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+		if ((!pp2->prs_shadow[tid].valid) ||
+		    (pp2->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS))
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(pp2, pe);
+		mvpp2_prs_sram_ai_get(pe, &bits, &enable);
+
+		/* Sram store classification lookup ID in AI bits [5:0] */
+		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+			return pe;
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Return first free tcam index, seeking from start to end
+ * If start < end - seek bottom-->up
+ * If start > end - seek up-->bottom
+ */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *pp2, int start, int end)
+{
+	int tid;
+	bool found = false;
+
+	if (start < end)
+		for (tid = start; tid <= end; tid++) {
+			if (!pp2->prs_shadow[tid].valid) {
+				found = true;
+				break;
+			}
+		}
+	else
+		for (tid = start; tid >= end; tid--) {
+			if (!pp2->prs_shadow[tid].valid) {
+				found = true;
+				break;
+			}
+		}
+
+	if (found && (tid < MVPP2_PRS_TCAM_SRAM_SIZE) && (tid > -1))
+		return tid;
+	else
+		return -ERANGE;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *pp2, int port, bool add)
+{
+	struct mvpp2_prs_entry pe;
+
+	if (pp2->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+		/* Entry exist - update port only */
+		pe.index = MVPP2_PE_DROP_ALL;
+		mvpp2_prs_hw_read(pp2, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+		pe.index = MVPP2_PE_DROP_ALL;
+
+		/* Non-promiscuous mode for all ports - DROP unknown packets */
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+					 MVPP2_PRS_RI_DROP_MASK);
+
+		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MAC);
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Set port to promiscuous mode */
+static void mvpp2_prs_mac_promisc_set(struct mvpp2 *pp2, int port, bool add)
+{
+	struct mvpp2_prs_entry pe;
+
+	/* Promiscous mode - Accept unknown packets */
+
+	if (pp2->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+		/* Entry exist - update port only */
+		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+		mvpp2_prs_hw_read(pp2, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+
+		/* Continue - set next lookup */
+		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+		/* Set result info bits */
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+					 MVPP2_PRS_RI_L2_CAST_MASK);
+
+		/* Shift to ethertype */
+		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MAC);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Accept all multicast */
+static void mvpp2_prs_mac_all_multi_set(struct mvpp2 *pp2, int port, bool add)
+{
+	struct mvpp2_prs_entry pe;
+	unsigned int index = 0;
+	unsigned int i;
+
+	/* Ethernet multicast address first byte is
+	 * 0x01 for IPv4 and 0x33 for IPv6
+	 */
+	unsigned char da_mc[MVPP2_PRS_MAX_MAC_MC] = { 0x01, 0x33 };
+
+	for (i = MVPP2_PRS_IP4_MAC_MC; i < MVPP2_PRS_MAX_MAC_MC; i++) {
+		if (i == MVPP2_PRS_IP4_MAC_MC)
+			index = MVPP2_PE_MAC_MC_ALL;
+		else
+			index = MVPP2_PE_MAC_MC_IP6;
+
+		if (pp2->prs_shadow[index].valid) {
+			/* Entry exist - update port only */
+			pe.index = index;
+			mvpp2_prs_hw_read(pp2, &pe);
+		} else {
+			/* Entry doesn't exist - create new */
+			memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+			mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+			pe.index = index;
+
+			/* Continue - set next lookup */
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+			/* Set result info bits */
+			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+						 MVPP2_PRS_RI_L2_CAST_MASK);
+
+			/* Update tcam entry data first byte */
+			mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc[i], 0xff);
+
+			/* Shift to ethertype */
+			mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+					       MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+			/* Mask all ports */
+			mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+			/* Update shadow table */
+			mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MAC);
+		}
+
+		/* Update port mask */
+		mvpp2_prs_tcam_port_set(&pe, port, add);
+
+		mvpp2_prs_hw_write(pp2, &pe);
+	}
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *pp2, int port, bool add,
+				  bool tagged, bool extend)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+	int shift;
+
+	if (extend) {
+		if (tagged)
+			tid = MVPP2_PE_EDSA_TAGGED;
+		else
+			tid = MVPP2_PE_EDSA_UNTAGGED;
+		shift = 8;
+	} else {
+		if (tagged)
+			tid = MVPP2_PE_DSA_TAGGED;
+		else
+			tid = MVPP2_PE_DSA_UNTAGGED;
+		shift = 4;
+	}
+
+	if (pp2->prs_shadow[tid].valid) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvpp2_prs_hw_read(pp2, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+		pe.index = tid;
+
+		/* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+		mvpp2_prs_sram_shift_set(&pe, shift,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_DSA);
+
+		if (tagged) {
+			/* Set tagged bit in DSA tag */
+			mvpp2_prs_tcam_data_byte_set(&pe, 0,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+			/* Clear all ai bits for next iteration */
+			mvpp2_prs_sram_ai_update(&pe, 0,
+						 MVPP2_PRS_SRAM_AI_MASK);
+			/* If packet is tagged continue check vlans */
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+		} else {
+			/* Set result info bits to 'no vlans' */
+			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+		}
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *pp2, int port,
+					   bool add, bool tagged, bool extend)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+	int shift;
+	int port_mask;
+
+	if (extend) {
+		if (tagged)
+			tid = MVPP2_PE_ETYPE_EDSA_TAGGED;
+		else
+			tid = MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+		port_mask = 0;
+		shift = 8;
+	} else {
+		if (tagged)
+			tid = MVPP2_PE_ETYPE_DSA_TAGGED;
+		else
+			tid = MVPP2_PE_ETYPE_DSA_UNTAGGED;
+		port_mask = MVPP2_PRS_PORT_MASK;
+		shift = 4;
+	}
+
+	if (pp2->prs_shadow[tid].valid) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvpp2_prs_hw_read(pp2, &pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+		pe.index = tid;
+
+		/* Set ethertype*/
+		mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+		mvpp2_prs_match_etype(&pe, 2, 0);
+
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+				       MVPP2_PRS_RI_DSA_MASK);
+		/* Shift ethertype + 2 byte reserved + tag*/
+		mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_DSA);
+
+		if (tagged) {
+			/* Set tagged bit in DSA tag */
+			mvpp2_prs_tcam_data_byte_set(&pe,
+						 MVPP2_ETH_TYPE_LEN + 2 + 3,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+						 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+			/* Clear all ai bits for next iteration */
+			mvpp2_prs_sram_ai_update(&pe, 0,
+						 MVPP2_PRS_SRAM_AI_MASK);
+			/* If packet is tagged continue check vlans */
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+		} else {
+			/* Set result info bits to 'no vlans' */
+			mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+			mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+		}
+		/* Mask/unmask all ports, depending on dsa type */
+		mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(&pe, port, add);
+
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *pp2,
+						   unsigned short tpid, int ai)
+{
+	struct mvpp2_prs_entry *pe;
+	unsigned int ri_bits;
+	unsigned int ai_bits;
+	unsigned int enable;
+	unsigned char tpid_arr[2];
+	int tid;
+
+	pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+	tpid_arr[0] = ((unsigned char *)&tpid)[1];
+	tpid_arr[1] = ((unsigned char *)&tpid)[0];
+
+	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		if ((!pp2->prs_shadow[tid].valid) ||
+		    (pp2->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN))
+			continue;
+
+		pe->index = tid;
+
+		mvpp2_prs_hw_read(pp2, pe);
+		if (!mvpp2_prs_tcam_data_cmp(pe, 0, 2, tpid_arr)) {
+			/* Get vlan type */
+			mvpp2_prs_sram_ri_get(pe, &ri_bits, &enable);
+			ri_bits = (ri_bits & MVPP2_PRS_RI_VLAN_MASK);
+
+			/* Get current ai value from tcam */
+			mvpp2_prs_tcam_ai_get(pe, &ai_bits, &enable);
+			/* Clear double vlan bit */
+			ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+			if (ai != ai_bits)
+				continue;
+
+			if ((ri_bits == MVPP2_PRS_RI_VLAN_SINGLE) ||
+			    (ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE))
+				return pe;
+		}
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *pp2, unsigned short tpid, int ai,
+			      unsigned int port_map)
+{
+	struct mvpp2_prs_entry *pe = NULL;
+	unsigned int bits;
+	unsigned int enable;
+	int tid_aux;
+	int tid;
+
+	pe = mvpp2_prs_vlan_find(pp2, tpid, ai);
+
+	if (!pe) {
+		/* Create new tcam entry */
+		tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_LAST_FREE_TID,
+						MVPP2_PE_FIRST_FREE_TID);
+		if (tid == -ERANGE) {
+			dprintk("%s: No free TCAM entry\n", __func__);
+			return tid;
+		}
+
+		pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		/* Get last double vlan tid */
+		for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+		     tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+			if ((!pp2->prs_shadow[tid_aux].valid) ||
+			    (pp2->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN))
+				continue;
+
+			pe->index = tid_aux;
+			mvpp2_prs_hw_read(pp2, pe);
+			mvpp2_prs_sram_ri_get(pe, &bits, &enable);
+			if ((bits & MVPP2_PRS_RI_VLAN_MASK) ==
+			    MVPP2_PRS_RI_VLAN_DOUBLE)
+				break;
+		}
+		if (tid <= tid_aux) {
+			dprintk("%s: Too much triple or single vlans entries\n",
+				   __func__);
+			return -1;
+		}
+
+		memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		pe->index = tid;
+
+		mvpp2_prs_match_etype(pe, 0, tpid);
+
+		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+		/* Shift 4 bytes - skip 1 vlan tag */
+		mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+		/* Clear all ai bits for next iteration */
+		mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+		if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+			dprintk("%s: Adding single-VLAN\n", __func__);
+			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+		} else {
+			dprintk("%s: Adding triple-VLAN-%d\n", __func__, ai);
+			ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+			mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+						 MVPP2_PRS_RI_VLAN_MASK);
+		}
+		mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+		mvpp2_prs_shadow_set(pp2, pe->index, MVPP2_PRS_LU_VLAN);
+	}
+	/* Update ports' mask */
+	mvpp2_prs_tcam_port_map_set(pe, port_map);
+
+	mvpp2_prs_hw_write(pp2, pe);
+
+	kfree(pe);
+
+	return 0;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *pp2)
+{
+	int i;
+
+	for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
+		if (!pp2->prs_double_vlans[i])
+			return i;
+
+	return -ERANGE;
+}
+
+/* Search for existing double vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *pp2,
+							  unsigned short tpid1,
+							  unsigned short tpid2)
+{
+	struct mvpp2_prs_entry *pe;
+	unsigned int enable;
+	unsigned int bits;
+	unsigned char tpid_arr1[2];
+	unsigned char tpid_arr2[2];
+	int tid;
+
+	tpid_arr1[0] = ((unsigned char *)&tpid1)[1];
+	tpid_arr1[1] = ((unsigned char *)&tpid1)[0];
+
+	tpid_arr2[0] = ((unsigned char *)&tpid2)[1];
+	tpid_arr2[1] = ((unsigned char *)&tpid2)[0];
+
+	pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+	/* Go through the all entries with MVPP2_PRS_LU_VLAN */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		if ((!pp2->prs_shadow[tid].valid) ||
+		    (pp2->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN))
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(pp2, pe);
+
+		if (!mvpp2_prs_tcam_data_cmp(pe, 0, 2, tpid_arr1) &&
+		    !mvpp2_prs_tcam_data_cmp(pe, 4, 2, tpid_arr2)) {
+			mvpp2_prs_sram_ri_get(pe, &bits, &enable);
+			if ((bits & MVPP2_PRS_RI_VLAN_MASK) ==
+			    MVPP2_PRS_RI_VLAN_DOUBLE)
+				return pe;
+		}
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *pp2, unsigned short tpid1,
+				     unsigned short tpid2,
+				     unsigned int port_map)
+{
+	struct mvpp2_prs_entry *pe = NULL;
+	unsigned int bits;
+	unsigned int enable;
+	int tid_aux;
+	int tid;
+	int ai;
+
+	pe = mvpp2_prs_double_vlan_find(pp2, tpid1, tpid2);
+
+	if (!pe) {
+		/* Create new tcam entry */
+		tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+						MVPP2_PE_LAST_FREE_TID);
+		if (tid == -ERANGE) {
+			dprintk("%s: No free TCAM entry\n", __func__);
+			return tid;
+		}
+
+		pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		/* Set ai value for new double vlan entry */
+		ai = mvpp2_prs_double_vlan_ai_free_get(pp2);
+		if (ai == -ERANGE) {
+			dprintk("%s: Can't add one more double vlan entry\n",
+				__func__);
+			return ai;
+		}
+
+		/* Get first single/triple vlan tid */
+		for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+		     tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+			if ((!pp2->prs_shadow[tid_aux].valid) ||
+			    (pp2->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN))
+				continue;
+
+			pe->index = tid_aux;
+			mvpp2_prs_hw_read(pp2, pe);
+			mvpp2_prs_sram_ri_get(pe, &bits, &enable);
+			bits &= MVPP2_PRS_RI_VLAN_MASK;
+			if ((bits == MVPP2_PRS_RI_VLAN_SINGLE) ||
+			    (bits == MVPP2_PRS_RI_VLAN_TRIPLE))
+				break;
+		}
+		if (tid >= tid_aux) {
+			dprintk("%s: Double vlans entries overlapping\n",
+				__func__);
+			return -ERANGE;
+		}
+
+		memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		pe->index = tid;
+
+		pp2->prs_double_vlans[ai] = true;
+
+		mvpp2_prs_match_etype(pe, 0, tpid1);
+		mvpp2_prs_match_etype(pe, 4, tpid2);
+
+		mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+		/* Shift 8 bytes - skip 2 vlan tags */
+		mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+		mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+					 MVPP2_PRS_RI_VLAN_MASK);
+		mvpp2_prs_sram_ai_update(pe, (ai | MVPP2_PRS_DBL_VLAN_AI_BIT),
+					 MVPP2_PRS_SRAM_AI_MASK);
+
+		mvpp2_prs_shadow_set(pp2, pe->index, MVPP2_PRS_LU_VLAN);
+	}
+	/* Update ports' mask */
+	mvpp2_prs_tcam_port_map_set(pe, port_map);
+
+	mvpp2_prs_hw_write(pp2, pe);
+
+	kfree(pe);
+
+	return 0;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *pp2, unsigned short proto,
+			       unsigned int ri, unsigned int ri_mask)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+	    (proto != IPPROTO_IGMP)) {
+		dprintk("%s: IPv4 unsupported protocol %d\n", __func__, proto);
+		return -EINVAL;
+	}
+
+	/* Fragmented packet */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = tid;
+
+	/* Set next lu to IPv4 */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L4 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct iphdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
+				 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Not fragmented packet */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	pe.index = tid;
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+	mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *pp2, unsigned short l3_cast)
+{
+	struct mvpp2_prs_entry pe;
+	int mask;
+	int tid;
+
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = tid;
+
+	switch (l3_cast) {
+	case MVPP2_PRS_L3_MULTI_CAST:
+		mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+					     MVPP2_PRS_IPV4_MC_MASK);
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+					 MVPP2_PRS_RI_L3_ADDR_MASK);
+		break;
+	case  MVPP2_PRS_L3_BROAD_CAST:
+		mask = MVPP2_PRS_IPV4_BC_MASK;
+		mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+		mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+					 MVPP2_PRS_RI_L3_ADDR_MASK);
+		break;
+	default:
+		dprintk("%s: Invalid Input\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Set entries for protocols over IPv6  */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *pp2, unsigned short proto,
+			       unsigned int ri, unsigned int ri_mask)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+	    (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP)) {
+		dprintk("%s: IPv6 unsupported protocol %d\n", __func__, proto);
+		return -EINVAL;
+	}
+
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct ipv6hdr) - 6,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Write HW */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *pp2, unsigned short l3_cast)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) {
+		dprintk("%s: Invalid Input\n", __func__);
+		return -EINVAL;
+	}
+
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Shift back to IPv6 NH */
+	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+				     MVPP2_PRS_IPV6_MC_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *pp2, int port, int lu_first,
+			   int lu_max, int offset)
+{
+	u32 reg_val;
+
+	/* Set lookup ID */
+	reg_val = mvpp2_read(pp2, MVPP2_PRS_INIT_LOOKUP_REG);
+	reg_val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+	reg_val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+	mvpp2_write(pp2, MVPP2_PRS_INIT_LOOKUP_REG, reg_val);
+
+	/* Set maximum number of loops for packet received from port */
+	reg_val = mvpp2_read(pp2, MVPP2_PRS_MAX_LOOP_REG(port));
+	reg_val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+	reg_val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+	mvpp2_write(pp2, MVPP2_PRS_MAX_LOOP_REG(port), reg_val);
+
+	/* Set initial offset for packet header extraction for the first
+	 * searching loop
+	 */
+	reg_val = mvpp2_read(pp2, MVPP2_PRS_INIT_OFFS_REG(port));
+	reg_val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+	reg_val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+	mvpp2_write(pp2, MVPP2_PRS_INIT_OFFS_REG(port), reg_val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int port;
+
+	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+		/* Set flow ID*/
+		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+		/* Update shadow table and hw entry */
+		mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_FLOWS);
+		mvpp2_prs_hw_write(pp2, &pe);
+	}
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+	pe.index = MVPP2_PE_MH_DEFAULT;
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MH);
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+	/* Non-promiscuous mode for all ports - DROP unknown packets */
+	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+				 MVPP2_PRS_RI_DROP_MASK);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MAC);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* place holders only - no ports */
+	mvpp2_prs_mac_drop_all_set(pp2, 0, false);
+	mvpp2_prs_mac_promisc_set(pp2, 0, false);
+	mvpp2_prs_mac_all_multi_set(pp2, 0, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+
+	/* None tagged EDSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(pp2, 0, false, MVPP2_PRS_UNTAGGED,
+			      MVPP2_PRS_EDSA);
+
+	/* Tagged EDSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(pp2, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+	/* None tagged DSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(pp2, 0, false, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+	/* Tagged DSA entry - place holder */
+	mvpp2_prs_dsa_tag_set(pp2, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+	/* None tagged EDSA ethertype entry - place holder*/
+	mvpp2_prs_dsa_tag_ethertype_set(pp2, 0, false,
+					MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+	/* Tagged EDSA ethertype entry - place holder*/
+	mvpp2_prs_dsa_tag_ethertype_set(pp2, 0, false,
+					MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+	/* None tagged DSA ethertype entry */
+	mvpp2_prs_dsa_tag_ethertype_set(pp2, 0, true,
+					MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+	/* Tagged DSA ethertype entry */
+	mvpp2_prs_dsa_tag_ethertype_set(pp2, 0, true,
+					MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+	/* Set default entry, in case DSA or EDSA tag not found */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+	pe.index = MVPP2_PE_DSA_DEFAULT;
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+	/* Shift 0 bytes */
+	mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_MAC);
+
+	/* Clear all sram ai bits for next iteration */
+	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	mvpp2_prs_hw_write(pp2, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	/* Ethertype: PPPoE */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (PPPoE)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+				 MVPP2_PRS_RI_PPPOE_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+				MVPP2_PRS_RI_PPPOE_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Ethertype: ARP */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (ARP)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_L3_ARP,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Ethertype: LBTD */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (LBTD)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				 MVPP2_PRS_RI_CPU_CODE_MASK |
+				 MVPP2_PRS_RI_UDF3_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				MVPP2_PRS_RI_CPU_CODE_MASK |
+				MVPP2_PRS_RI_UDF3_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Ethertype: IPv4 without options */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv4 wo options)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+				     MVPP2_PRS_IPV4_HEAD_MASK |
+				     MVPP2_PRS_IPV4_IHL_MASK);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IP header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_L3_IP4,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Ethertype: IPv4 with options */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv4 w options)\n", __func__);
+		return tid;
+	}
+
+	pe.index = tid;
+
+	/* Clear tcam data before updating */
+	mvpp2_prs_tcam_data_byte_clear(&pe, MVPP2_ETH_TYPE_LEN);
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD,
+				     MVPP2_PRS_IPV4_HEAD_MASK);
+
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Ethertype: IPv6 without options */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv6)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+	/* Skip DIP of IPV6 header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+				 MVPP2_MAX_L3_ADDR_SIZE,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = false;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_L3_IP6,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+	pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Generate flow in the next iteration*/
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Set L3 offset even it's unknown L3 */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_L2);
+	pp2->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+	pp2->prs_shadow[pe.index].finish = true;
+	mvpp2_prs_shadow_ri_set(pp2, pe.index, MVPP2_PRS_RI_L3_UN,
+				MVPP2_PRS_RI_L3_PROTO_MASK);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int err;
+
+	pp2->prs_double_vlans = devm_kzalloc(&pdev->dev, sizeof(bool) *
+					     MVPP2_PRS_DBL_VLANS_MAX,
+					     GFP_KERNEL);
+	if (!pp2->prs_double_vlans)
+		return -ENOMEM;
+
+	/* Double VLAN: 0x8100, 0x88A8 */
+	err = mvpp2_prs_double_vlan_add(pp2, ETH_P_8021Q, ETH_P_8021AD,
+					MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Double VLAN: 0x8100, 0x8100 */
+	err = mvpp2_prs_double_vlan_add(pp2, ETH_P_8021Q, ETH_P_8021Q,
+					MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Single VLAN: 0x88a8 */
+	err = mvpp2_prs_vlan_add(pp2, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+				 MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Single VLAN: 0x8100 */
+	err = mvpp2_prs_vlan_add(pp2, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+				 MVPP2_PRS_PORT_MASK);
+	if (err)
+		return err;
+
+	/* Set default double vlan entry */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+	pe.index = MVPP2_PE_VLAN_DBL;
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+	/* Clear ai for next iterations */
+	mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+				 MVPP2_PRS_RI_VLAN_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+				 MVPP2_PRS_DBL_VLAN_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_VLAN);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Set default vlan none entry */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+	pe.index = MVPP2_PE_VLAN_NONE;
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+				 MVPP2_PRS_RI_VLAN_MASK);
+
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_VLAN);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+
+	/* IPv4 over PPPoE with options */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv4 w options)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IP header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* IPv4 over PPPoE without options */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv4 wo options)\n", __func__);
+		return tid;
+	}
+
+	pe.index = tid;
+
+	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+				     MVPP2_PRS_IPV4_HEAD_MASK |
+				     MVPP2_PRS_IPV4_IHL_MASK);
+
+	/* Clear ri before updating */
+	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* IPv6 over PPPoE */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (IPv6)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+	/* Skip eth_type + 4 bytes of IPv6 header */
+	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L3 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Non-IP over PPPoE */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry (non-IP)\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+	pe.index = tid;
+
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+				 MVPP2_PRS_RI_L3_PROTO_MASK);
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	/* Set L3 offset even if it's unknown L3 */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+				  MVPP2_ETH_TYPE_LEN,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_PPPOE);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int err;
+
+	/* Set entries for TCP, UDP and IGMP over IPv4 */
+	err = mvpp2_prs_ip4_proto(pp2, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_proto(pp2, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_proto(pp2, IPPROTO_IGMP,
+				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				  MVPP2_PRS_RI_CPU_CODE_MASK |
+				  MVPP2_PRS_RI_UDF3_MASK);
+	if (err)
+		return err;
+
+	/* IPv4 Broadcast */
+	err = mvpp2_prs_ip4_cast(pp2, MVPP2_PRS_L3_BROAD_CAST);
+	if (err)
+		return err;
+
+	/* IPv4 Multicast */
+	err = mvpp2_prs_ip4_cast(pp2, MVPP2_PRS_L3_MULTI_CAST);
+	if (err)
+		return err;
+
+	/* Default IPv4 entry for unknown protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+	/* Set next lu to IPv4 */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+	/* Set L4 offset */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct iphdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Default IPv4 entry for unicast address */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+	pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+				 MVPP2_PRS_IPV4_DIP_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_prs_entry pe;
+	int tid;
+	int err;
+
+	/* Set entries for TCP, UDP and ICMP over IPv6 */
+	err = mvpp2_prs_ip6_proto(pp2, IPPROTO_TCP,
+				  MVPP2_PRS_RI_L4_TCP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_proto(pp2, IPPROTO_UDP,
+				  MVPP2_PRS_RI_L4_UDP,
+				  MVPP2_PRS_RI_L4_PROTO_MASK);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_proto(pp2, IPPROTO_ICMPV6,
+				  MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+				  MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+				  MVPP2_PRS_RI_CPU_CODE_MASK |
+				  MVPP2_PRS_RI_UDF3_MASK);
+	if (err)
+		return err;
+
+	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+	/* Result Info: UDF7=1, DS lite */
+	err = mvpp2_prs_ip6_proto(pp2, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
+				  MVPP2_PRS_RI_UDF7_MASK);
+	if (err)
+		return err;
+
+	/* IPv6 multicast */
+	err = mvpp2_prs_ip6_cast(pp2, MVPP2_PRS_L3_MULTI_CAST);
+	if (err)
+		return err;
+
+	/* Entry for checking hop limit */
+	tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+					MVPP2_PE_LAST_FREE_TID);
+	if (tid == -ERANGE) {
+		dprintk("%s: No free TCAM entry\n", __func__);
+		return tid;
+	}
+
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = tid;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+				 MVPP2_PRS_RI_DROP_MASK,
+				 MVPP2_PRS_RI_L3_PROTO_MASK |
+				 MVPP2_PRS_RI_DROP_MASK);
+
+	mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Default IPv6 entry for unknown protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+	/* Set L4 offset relatively to our current place */
+	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+				  sizeof(struct ipv6hdr) - 4,
+				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Default IPv6 entry for unknown ext protocols */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+	/* Finished: go to flowid generation */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+				 MVPP2_PRS_RI_L4_PROTO_MASK);
+
+	mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP4);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	/* Default IPv6 entry for unicast address */
+	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+	/* Finished: go to IPv6 again */
+	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+				 MVPP2_PRS_RI_L3_ADDR_MASK);
+	mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+				 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Shift back to IPV6 NH */
+	mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+	/* Unmask all ports */
+	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+	/* Update shadow table and hw entry */
+	mvpp2_prs_shadow_set(pp2, pe.index, MVPP2_PRS_LU_IP6);
+	mvpp2_prs_hw_write(pp2, &pe);
+
+	return 0;
+}
+
+/* Parser default initialization */
+static int mvpp2_prs_default_init(struct platform_device *pdev,
+				  struct mvpp2 *pp2)
+{
+	int port;
+	int err;
+	int index;
+	int i;
+
+	/* Enable tcam table */
+	mvpp2_write(pp2, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+	/* Clear all tcam and sram entries */
+	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+		mvpp2_write(pp2, MVPP2_PRS_TCAM_IDX_REG, index);
+		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+			mvpp2_write(pp2, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+		mvpp2_write(pp2, MVPP2_PRS_SRAM_IDX_REG, index);
+		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+			mvpp2_write(pp2, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+	}
+
+	/* Invalidate all tcam entries */
+	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+		mvpp2_prs_hw_inv(pp2, index);
+
+	pp2->prs_shadow = devm_kzalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE *
+				       sizeof(struct mvpp2_prs_shadow),
+				       GFP_KERNEL);
+	if (!pp2->prs_shadow)
+		return -ENOMEM;
+
+	/* Always start from lookup = 0 */
+	for (port = 0; port < MVPP2_MAX_PORTS; port++)
+		mvpp2_prs_hw_port_init(pp2, port, MVPP2_PRS_LU_MH,
+				       MVPP2_PRS_PORT_LU_MAX, 0);
+
+	mvpp2_prs_def_flow_init(pp2);
+
+	mvpp2_prs_mh_init(pp2);
+
+	mvpp2_prs_mac_init(pp2);
+
+	mvpp2_prs_dsa_init(pp2);
+
+	err = mvpp2_prs_etype_init(pp2);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_vlan_init(pdev, pp2);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_pppoe_init(pp2);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip6_init(pp2);
+	if (err)
+		return err;
+
+	err = mvpp2_prs_ip4_init(pp2);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+				       const u8 *da, unsigned char *mask)
+{
+	int index;
+	unsigned char tcam_byte;
+	unsigned char tcam_mask;
+
+	for (index = 0; index < ETH_ALEN; index++) {
+		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+		if (tcam_mask != mask[index])
+			return false;
+
+		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+			return false;
+	}
+
+	return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static struct mvpp2_prs_entry *mvpp2_prs_mac_da_range_find(struct mvpp2 *pp2,
+							   int pmap,
+							   const u8 *da,
+							   unsigned char *mask,
+							   int udf_type)
+{
+	struct mvpp2_prs_entry *pe;
+	unsigned int entry_pmap;
+	int tid;
+
+	pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+	if (!pe)
+		return NULL;
+	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+
+	dprintk("%s: pmap = 0x%x, mac = %02x:%02x:%02x:%02x:%02x:%02x\n",
+		__func__, pmap, da[0], da[1], da[2], da[3], da[4], da[5]);
+
+	/* Go through the all entires with MVPP2_PRS_LU_MAC */
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		if (!pp2->prs_shadow[tid].valid ||
+		    (pp2->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+		    (pp2->prs_shadow[tid].udf != udf_type))
+			continue;
+
+		pe->index = tid;
+		mvpp2_prs_hw_read(pp2, pe);
+		mvpp2_prs_tcam_port_map_get(pe, &entry_pmap);
+
+		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+		    (entry_pmap == pmap)) {
+			dprintk("%s: entry found\n", __func__);
+			return pe;
+		}
+	}
+	kfree(pe);
+
+	return NULL;
+}
+
+/* Update parser's mac da entry */
+static int mvpp2_prs_mac_da_accept(struct mvpp2 *pp2, int port,
+				   const u8 *da, bool add)
+{
+	struct mvpp2_prs_entry *pe = NULL;
+	unsigned int pmap;
+	unsigned int len;
+	unsigned int ri;
+	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+	int tid;
+
+	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+	pe = mvpp2_prs_mac_da_range_find(pp2, (1 << port), da, mask,
+					 MVPP2_PRS_UDF_MAC_DEF);
+
+	/* No such entry */
+	if (!pe) {
+		if (!add) {
+			dprintk("%s: The entry already doesn't exist\n",
+				__func__);
+			return 0;
+		}
+
+		/* Create new TCAM entry */
+		/* Find first range mac entry*/
+		for (tid = MVPP2_PE_FIRST_FREE_TID;
+		     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
+			if (pp2->prs_shadow[tid].valid &&
+			    (pp2->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
+			    (pp2->prs_shadow[tid].udf ==
+						       MVPP2_PRS_UDF_MAC_RANGE))
+				break;
+
+		/* Go through the all entries from first to last */
+		tid = mvpp2_prs_tcam_first_free(pp2, MVPP2_PE_FIRST_FREE_TID,
+						tid - 1);
+		if (tid == -ERANGE) {
+			dprintk("%s: No free TCAM entry\n", __func__);
+			return tid;
+		}
+
+		pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+		if (!pe)
+			return -1;
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+		pe->index = tid;
+
+		/* Mask all ports */
+		mvpp2_prs_tcam_port_map_set(pe, 0);
+	}
+
+	/* Update port mask */
+	mvpp2_prs_tcam_port_set(pe, port, add);
+
+	/* Invalidate the entry if no ports are left enabled */
+	mvpp2_prs_tcam_port_map_get(pe, &pmap);
+	if (pmap == 0) {
+		if (add) {
+			kfree(pe);
+			dprintk("%s: Wrong port map value\n", __func__);
+			return -1;
+		}
+		mvpp2_prs_hw_inv(pp2, pe->index);
+		pp2->prs_shadow[pe->index].valid = false;
+		kfree(pe);
+		return 0;
+	}
+
+	/* Continue - set next lookup */
+	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+	/* Set match on DA */
+	len = ETH_ALEN;
+	while (len--)
+		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+	/* Set result info bits */
+	if (is_broadcast_ether_addr(da)) {
+		ri = MVPP2_PRS_RI_L2_BCAST;
+		dprintk("%s: bcast-port-%d\n", __func__, port);
+	} else if (is_multicast_ether_addr(da)) {
+		ri = MVPP2_PRS_RI_L2_MCAST;
+		dprintk("%s: mcast-port-%d\n", __func__, port);
+	} else {
+		ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+		dprintk("%s: ucast-port-%d\n", __func__, port);
+	}
+	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+				 MVPP2_PRS_RI_MAC_ME_MASK);
+	mvpp2_prs_shadow_ri_set(pp2, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+				MVPP2_PRS_RI_MAC_ME_MASK);
+
+	/* Shift to ethertype */
+	mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Update shadow table and hw entry */
+	pp2->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+	mvpp2_prs_shadow_set(pp2, pe->index, MVPP2_PRS_LU_MAC);
+	mvpp2_prs_hw_write(pp2, pe);
+
+	kfree(pe);
+
+	return 0;
+}
+
+static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	int err;
+
+	/* Remove old parser entry */
+	err = mvpp2_prs_mac_da_accept(pp->pp2, pp->id, dev->dev_addr, false);
+	if (err)
+		return err;
+
+	/* Add new parser entry */
+	err = mvpp2_prs_mac_da_accept(pp->pp2, pp->id, da, true);
+	if (err)
+		return err;
+
+	/* Set addr in the device */
+	memcpy(dev->dev_addr, da, ETH_ALEN);
+
+	return 0;
+}
+
+/* Delete all port's multicast simple (not range) entries */
+static void mvpp2_prs_mcast_del_all(struct mvpp2 *pp2, int port)
+{
+	struct mvpp2_prs_entry pe;
+	int index;
+	int tid;
+	unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+	for (tid = MVPP2_PE_FIRST_FREE_TID;
+	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+		if (!pp2->prs_shadow[tid].valid ||
+		    (pp2->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+		    (pp2->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+			continue;
+
+		/* Only simple mac entries */
+		pe.index = tid;
+		mvpp2_prs_hw_read(pp2, &pe);
+
+		/* Read mac addr from entry */
+		for (index = 0; index < ETH_ALEN; index++)
+			mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+						     &da_mask[index]);
+
+		if (is_broadcast_ether_addr(da))
+			continue;
+
+		if (is_multicast_ether_addr(da))
+			/* Delete entry */
+			mvpp2_prs_mac_da_accept(pp2, port, da, false);
+	}
+}
+
+static int mvpp2_prs_tag_mode_set(struct mvpp2 *pp2, int port, int type)
+{
+	switch (type) {
+
+	case MVPP2_TAG_TYPE_EDSA:
+		/* Add port to EDSA entries */
+		mvpp2_prs_dsa_tag_set(pp2, port, true,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, true,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		/* Remove port from DSA entries */
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		break;
+
+	case MVPP2_TAG_TYPE_DSA:
+		/* Add port to DSA entries */
+		mvpp2_prs_dsa_tag_set(pp2, port, true,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, true,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		/* Remove port from EDSA entries */
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		break;
+
+	case MVPP2_TAG_TYPE_MH:
+	case MVPP2_TAG_TYPE_NONE:
+		/* Remove port form EDSA and DSA entries */
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+		mvpp2_prs_dsa_tag_set(pp2, port, false,
+				      MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+		break;
+
+	default:
+		if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Set prs flow for the port */
+static int mvpp2_prs_def_flow(struct mvpp2_port *pp)
+{
+	struct mvpp2_prs_entry *pe;
+	int tid;
+
+	pe = mvpp2_prs_flow_find(pp->pp2, pp->id);
+
+	/* Such entry not exist */
+	if (!pe) {
+		/* Go through the all entires from last to first */
+		tid = mvpp2_prs_tcam_first_free(pp->pp2,
+					       MVPP2_PE_LAST_FREE_TID,
+					       MVPP2_PE_FIRST_FREE_TID);
+		if (tid == -ERANGE) {
+			dprintk("%s: No free TCAM entry\n", __func__);
+			return tid;
+		}
+
+		pe = kzalloc(sizeof(struct mvpp2_prs_entry), GFP_KERNEL);
+		if (!pe)
+			return -ENOMEM;
+
+		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+		pe->index = tid;
+
+		/* Set flow ID*/
+		mvpp2_prs_sram_ai_update(pe, pp->id, MVPP2_PRS_FLOW_ID_MASK);
+		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+		/* Update shadow table */
+		mvpp2_prs_shadow_set(pp->pp2, pe->index, MVPP2_PRS_LU_FLOWS);
+
+	}
+
+	mvpp2_prs_tcam_port_map_set(pe, (1 << pp->id));
+	mvpp2_prs_hw_write(pp->pp2, pe);
+	kfree(pe);
+
+	return 0;
+}
+
+/* Classifier configuration routines */
+
+/* Update classification flow table registers */
+static void mvpp2_cls_hw_flow_write(struct mvpp2 *pp2,
+				    struct mvpp2_cls_flow_entry *fe)
+{
+	/* Write to index reg - indirect access */
+	mvpp2_write(pp2, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+
+	mvpp2_write(pp2, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+	mvpp2_write(pp2, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+	mvpp2_write(pp2, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_hw_lkp_write(struct mvpp2 *pp2,
+				   struct mvpp2_cls_lkp_entry *le)
+{
+	u32 reg_val = 0;
+
+	/* Write to index reg - indirect access */
+	reg_val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+	mvpp2_write(pp2, MVPP2_CLS_LKP_INDEX_REG, reg_val);
+
+	mvpp2_write(pp2, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Classifier default initialization */
+static void mvpp2_cls_default_init(struct mvpp2 *pp2)
+{
+	struct mvpp2_cls_lkp_entry le;
+	struct mvpp2_cls_flow_entry fe;
+	int index;
+
+	/* Enable Classifier */
+	mvpp2_write(pp2, MVPP2_CLS_MODE_REG,
+		    MVPP2_CLS_MODE_ACTIVE_MASK);
+
+	/* Clear cls flow table */
+	fe.data[0] = 0;
+	fe.data[1] = 0;
+	fe.data[2] = 0;
+	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+		fe.index = index;
+		mvpp2_cls_hw_flow_write(pp2, &fe);
+	}
+
+	/* Clear cls lookup table */
+	le.data = 0;
+	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+		le.lkpid = index;
+		le.way = 0;
+		mvpp2_cls_hw_lkp_write(pp2, &le);
+		le.way = 1;
+		mvpp2_cls_hw_lkp_write(pp2, &le);
+	}
+}
+
+/* Port's classifier configuration */
+static void mvpp2_cls_hw_port_def_config(struct mvpp2_port *pp, int way,
+					 int lkpid)
+{
+	struct mvpp2_cls_lkp_entry le;
+	u32 reg_val;
+
+	/* Set way for the port */
+	reg_val = mvpp2_read(pp->pp2, MVPP2_CLS_PORT_WAY_REG);
+	if (way)
+		reg_val |= MVPP2_CLS_PORT_WAY_MASK(pp->id);
+	else
+		reg_val &= ~MVPP2_CLS_PORT_WAY_MASK(pp->id);
+	mvpp2_write(pp->pp2, MVPP2_CLS_PORT_WAY_REG, reg_val);
+
+	/* The entry to be accessed in lookupid decoding table
+	 * is acording to way and lkpid
+	 */
+	le.way = way;
+	le.lkpid = lkpid;
+	le.data = 0;
+
+	/* Set initial CPU queue for receiving packets */
+	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+	le.data |= pp->first_rxq;
+
+	/* Disable classification engines */
+	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+	/* Update lookupid table entry */
+	mvpp2_cls_hw_lkp_write(pp->pp2, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+static void mvpp2_cls_hw_oversize_rxq_set(struct mvpp2_port *pp)
+{
+	u32 reg_val;
+
+	mvpp2_write(pp->pp2, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(pp->id),
+		    pp->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+	mvpp2_write(pp->pp2, MVPP2_CLS_SWFWD_P2HQ_REG(pp->id),
+		    (pp->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_CLS_SWFWD_PCTRL_REG);
+	reg_val |= MVPP2_CLS_SWFWD_PCTRL_MASK(pp->id);
+	mvpp2_write(pp->pp2, MVPP2_CLS_SWFWD_PCTRL_REG, reg_val);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+				struct mvpp2 *pp2,
+				struct mvpp2_bm_pool *bm_pool, int size)
+{
+	int size_bytes;
+	u32 reg_val;
+
+	size_bytes = sizeof(u32) * size;
+	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+						&bm_pool->phys_addr,
+						GFP_KERNEL);
+	if (!bm_pool->virt_addr)
+		return -ENOMEM;
+
+	if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+		dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+				  bm_pool->phys_addr);
+		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+		return -ENOMEM;
+	}
+
+	mvpp2_write(pp2, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+		    bm_pool->phys_addr);
+	mvpp2_write(pp2, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+	reg_val = mvpp2_read(pp2, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+	reg_val |= MVPP2_BM_START_MASK;
+	mvpp2_write(pp2, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), reg_val);
+
+	bm_pool->type = MVPP2_BM_FREE;
+	bm_pool->size = size;
+	bm_pool->pkt_size = 0;
+	bm_pool->buf_num = 0;
+	atomic_set(&bm_pool->in_use, 0);
+	spin_lock_init(&bm_pool->lock);
+
+	return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *pp2,
+				      struct mvpp2_bm_pool *bm_pool,
+				      int buf_size)
+{
+	u32 reg_val;
+
+	bm_pool->buf_size = buf_size;
+
+	reg_val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+	mvpp2_write(pp2, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), reg_val);
+}
+
+/* Free "num" buffers from the pool */
+static int mvpp2_bm_bufs_free(struct mvpp2 *pp2,
+			      struct mvpp2_bm_pool *bm_pool, int num)
+{
+	int i;
+
+	if (num >= bm_pool->buf_num)
+		/* Free all buffers from the pool */
+		num = bm_pool->buf_num;
+
+	for (i = 0; i < num; i++) {
+		u32 vaddr;
+
+		/* Get buffer virtual adress (indirect access) */
+		mvpp2_read(pp2, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+		vaddr = mvpp2_read(pp2, MVPP2_BM_VIRT_ALLOC_REG);
+		if (!vaddr)
+			break;
+		dev_kfree_skb_any((struct sk_buff *)vaddr);
+	}
+
+	/* Update BM driver with number of buffers removed from pool */
+	bm_pool->buf_num -= i;
+	return i;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+				 struct mvpp2 *pp2,
+				 struct mvpp2_bm_pool *bm_pool)
+{
+	int num;
+	u32 reg_val;
+
+	num = mvpp2_bm_bufs_free(pp2, bm_pool, bm_pool->buf_num);
+	if (num != bm_pool->buf_num) {
+		WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+		return 0;
+	}
+
+	reg_val = mvpp2_read(pp2, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+	reg_val |= MVPP2_BM_STOP_MASK;
+	mvpp2_write(pp2, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), reg_val);
+
+	dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+			  bm_pool->virt_addr,
+			  bm_pool->phys_addr);
+	return 0;
+}
+
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+			       struct mvpp2 *pp2)
+{
+	int i, err, size;
+	struct mvpp2_bm_pool *bm_pool;
+
+	/* Create all pools with maximum size */
+	size = MVPP2_BM_POOL_SIZE_MAX;
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		bm_pool = &pp2->bm_pools[i];
+		bm_pool->id = i;
+		err = mvpp2_bm_pool_create(pdev, pp2, bm_pool, size);
+		if (err)
+			goto err_unroll_pools;
+		mvpp2_bm_pool_bufsize_set(pp2, bm_pool, 0);
+	}
+	return 0;
+
+err_unroll_pools:
+	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+	for (i = i - 1; i >= 0; i--)
+		mvpp2_bm_pool_destroy(pdev, pp2, &pp2->bm_pools[i]);
+	return err;
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *pp2)
+{
+	int i, err;
+
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		/* Mask BM all interrupts */
+		mvpp2_write(pp2, MVPP2_BM_INTR_MASK_REG(i), 0);
+		/* Clear BM cause register */
+		mvpp2_write(pp2, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+	}
+
+	/* Allocate and initialize BM pools */
+	pp2->bm_pools = devm_kzalloc(&pdev->dev, MVPP2_BM_POOLS_NUM *
+				     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+	if (!pp2->bm_pools)
+		return -ENOMEM;
+
+	err = mvpp2_bm_pools_init(pdev, pp2);
+	if (err < 0)
+		return err;
+	return 0;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *pp,
+				    int lrxq, int long_pool)
+{
+	u32 reg_val;
+	int prxq;
+
+	/* Get queue physical ID */
+	prxq = pp->rxqs[lrxq]->id;
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq));
+	reg_val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+	reg_val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
+		    MVPP2_RXQ_POOL_LONG_MASK);
+
+	mvpp2_write(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq), reg_val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *pp,
+				     int lrxq, int short_pool)
+{
+	u32 reg_val;
+	int prxq;
+
+	/* Get queue physical ID */
+	prxq = pp->rxqs[lrxq]->id;
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq));
+	reg_val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+	reg_val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
+		    MVPP2_RXQ_POOL_SHORT_MASK);
+
+	mvpp2_write(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq), reg_val);
+}
+
+/* Allocate skb for BM pool */
+static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *pp,
+				       struct mvpp2_bm_pool *bm_pool,
+				       dma_addr_t *buf_phys_addr,
+				       gfp_t gfp_mask)
+{
+	struct sk_buff *skb;
+	dma_addr_t phys_addr;
+
+	skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
+	if (!skb)
+		return NULL;
+
+	phys_addr = dma_map_single(pp->dev->dev.parent, skb->head,
+				    MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+				    DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
+		dev_kfree_skb_any(skb);
+		return NULL;
+	}
+	*buf_phys_addr = phys_addr;
+
+	return skb;
+}
+
+/* Set pool number in a BM cookie */
+static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
+{
+	u32 bm;
+
+	bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
+	bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
+
+	return bm;
+}
+
+/* Get pool number from a BM cookie */
+static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+{
+	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *pp, int pool,
+				     u32 buf_phys_addr, u32 buf_virt_addr)
+{
+	mvpp2_write(pp->pp2, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
+	mvpp2_write(pp->pp2, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
+}
+
+/* Release multicast buffer */
+static void mvpp2_bm_pool_mc_put(struct mvpp2_port *pp, int pool,
+				 u32 buf_phys_addr, u32 buf_virt_addr,
+				 int mc_id, int is_force)
+{
+	u32 reg_val = 0;
+
+	reg_val |= (mc_id & MVPP2_BM_MC_ID_MASK);
+	if (is_force)
+		reg_val |= MVPP2_BM_FORCE_RELEASE_MASK;
+
+	mvpp2_write(pp->pp2, MVPP2_BM_MC_RLS_REG, reg_val);
+	mvpp2_bm_pool_put(pp, pool,
+			  buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
+			  buf_virt_addr);
+}
+
+/* Refill BM pool */
+static void mvpp2_pool_refill(struct mvpp2_port *pp, u32 bm,
+			      u32 phys_addr, u32 cookie)
+{
+	int pool = mvpp2_bm_cookie_pool_get(bm);
+	unsigned long flags;
+
+	/* It's very odd to need irq_save/restore here,
+	 * TODO: Check?
+	 */
+	local_irq_save(flags);
+
+	mvpp2_bm_pool_put(pp, pool, phys_addr, cookie);
+
+	local_irq_restore(flags);
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *pp,
+			     struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+	struct sk_buff *skb;
+	int i, buf_size, total_size;
+	u32 bm;
+	dma_addr_t phys_addr;
+
+	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+	if (buf_num < 0 ||
+	   (buf_num + bm_pool->buf_num > bm_pool->size)) {
+		netdev_err(pp->dev,
+			   "cannot allocate %d buffers for pool %d\n",
+			   buf_num, bm_pool->id);
+		return 0;
+	}
+
+	bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
+	for (i = 0; i < buf_num; i++) {
+		skb = mvpp2_skb_alloc(pp, bm_pool, &phys_addr, GFP_KERNEL);
+		if (!skb)
+			break;
+
+		mvpp2_pool_refill(pp, bm, (u32)phys_addr, (u32)skb);
+	}
+
+	/* Update BM driver with number of buffers added to pool */
+	bm_pool->buf_num += i;
+	bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+	netdev_dbg(pp->dev,
+		   "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+		   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+	netdev_dbg(pp->dev,
+		   "%s pool %d: %d of %d buffers added\n",
+		   bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+		   bm_pool->id, i, buf_num);
+	return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *mvpp2_bm_pool_use(struct mvpp2_port *pp, int pool,
+					 enum mvpp2_bm_type type, int pkt_size)
+{
+	unsigned long flags = 0;
+	struct mvpp2_bm_pool *new_pool = &pp->pp2->bm_pools[pool];
+	int num;
+
+	if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
+		netdev_err(pp->dev, "mixing pool types is forbidden\n");
+		return NULL;
+	}
+
+	spin_lock_irqsave(&new_pool->lock, flags);
+
+	if (new_pool->type == MVPP2_BM_FREE)
+		new_pool->type = type;
+
+	/* Allocate buffers in case BM pool is used as long pool, but packet
+	 * size doesn't match MTU or BM pool hasn't being used yet
+	 */
+	if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
+	    (new_pool->pkt_size == 0)) {
+		int pkts_num;
+
+		/* Set default buffer number or free all the buffers in case
+		 * the pool is not empty
+		 */
+		pkts_num = new_pool->buf_num;
+		if (pkts_num == 0)
+			pkts_num = type == MVPP2_BM_SWF_LONG ?
+				   MVPP2_BM_LONG_BUF_NUM :
+				   MVPP2_BM_SHORT_BUF_NUM;
+		else
+			mvpp2_bm_bufs_free(pp->pp2, new_pool, pkts_num);
+
+		new_pool->pkt_size = pkt_size;
+
+		/* Allocate buffers for this pool */
+		num = mvpp2_bm_bufs_add(pp, new_pool, pkts_num);
+		if (num != pkts_num) {
+			WARN(1, "pool %d: %d of %d allocated\n",
+			     new_pool->id, num, pkts_num);
+			/* We need to undo the bufs_add() allocations */
+			spin_unlock_irqrestore(&new_pool->lock, flags);
+			return NULL;
+		}
+	}
+
+	mvpp2_bm_pool_bufsize_set(pp->pp2, new_pool,
+				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+	spin_unlock_irqrestore(&new_pool->lock, flags);
+
+	return new_pool;
+}
+
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *pp)
+{
+	unsigned long flags = 0;
+	int rxq;
+
+	if (!pp->pool_long) {
+		pp->pool_long =
+			mvpp2_bm_pool_use(pp, MVPP2_BM_SWF_LONG_POOL(pp->id),
+					  MVPP2_BM_SWF_LONG,
+					  pp->pkt_size);
+		if (!pp->pool_long)
+			return -ENOMEM;
+
+		spin_lock_irqsave(&pp->pool_long->lock, flags);
+		pp->pool_long->port_map |= (1 << pp->id);
+		spin_unlock_irqrestore(&pp->pool_long->lock, flags);
+
+		for (rxq = 0; rxq < rxq_number; rxq++)
+			mvpp2_rxq_long_pool_set(pp, rxq, pp->pool_long->id);
+	}
+
+	if (!pp->pool_short) {
+		pp->pool_short =
+			mvpp2_bm_pool_use(pp, MVPP2_BM_SWF_SHORT_POOL,
+					  MVPP2_BM_SWF_SHORT,
+					  MVPP2_BM_SHORT_PKT_SIZE);
+		if (!pp->pool_short)
+			return -ENOMEM;
+
+		spin_lock_irqsave(&pp->pool_short->lock, flags);
+		pp->pool_short->port_map |= (1 << pp->id);
+		spin_unlock_irqrestore(&pp->pool_short->lock, flags);
+
+		for (rxq = 0; rxq < rxq_number; rxq++)
+			mvpp2_rxq_short_pool_set(pp, rxq, pp->pool_short->id);
+	}
+
+	return 0;
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	struct mvpp2_bm_pool *port_pool = pp->pool_long;
+	int num, pkts_num = port_pool->buf_num;
+	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+	if (mtu == dev->mtu)
+		goto mtu_out;
+
+	/* Update BM pool with new buffer size */
+	num = mvpp2_bm_bufs_free(pp->pp2, port_pool, pkts_num);
+	if (num != pkts_num) {
+		WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
+		return -EIO;
+	}
+
+	port_pool->pkt_size = pkt_size;
+	num = mvpp2_bm_bufs_add(pp, port_pool, pkts_num);
+	if (num != pkts_num) {
+		WARN(1, "pool %d: %d of %d allocated\n",
+		     port_pool->id, num, pkts_num);
+		return -EIO;
+	}
+
+	mvpp2_bm_pool_bufsize_set(pp->pp2, port_pool,
+				  MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
+
+mtu_out:
+	dev->mtu = mtu;
+	netdev_update_features(dev);
+	return 0;
+}
+
+static inline void mvpp2_cpu_interrupts_enable(struct mvpp2_port *pp, int cpu)
+{
+	int cpu_mask = 1 << cpu;
+
+	mvpp2_write(pp->pp2, MVPP2_ISR_ENABLE_REG(pp->id),
+		    MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
+}
+
+static inline void mvpp2_cpu_interrupts_disable(struct mvpp2_port *pp, int cpu)
+{
+	int cpu_mask = 1 << cpu;
+
+	mvpp2_write(pp->pp2, MVPP2_ISR_ENABLE_REG(pp->id),
+		    MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_mask(void *arg)
+{
+	struct mvpp2_port *pp = arg;
+
+	mvpp2_write(pp->pp2, MVPP2_ISR_RX_TX_MASK_REG(pp->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+	struct mvpp2_port *pp = arg;
+
+	mvpp2_write(pp->pp2, MVPP2_ISR_RX_TX_MASK_REG(pp->id),
+		    (MVPP2_CAUSE_MISC_SUM_MASK |
+		     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
+		     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+}
+
+/* Port configuration routines */
+
+static void mvpp2_port_mii_set(struct mvpp2_port *pp)
+{
+	u32 val;
+
+	val = readl(pp->base + MVPP2_GMAC_CTRL_2_REG);
+
+	switch (pp->phy_interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		val |= MVPP2_GMAC_INBAND_AN_MASK;
+		break;
+	case PHY_INTERFACE_MODE_RGMII:
+		val |= MVPP2_GMAC_PORT_RGMII_MASK;
+	default:
+		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+	}
+
+	writel(val, pp->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+static void mvpp2_port_fc_adv_enable(struct mvpp2_port *pp)
+{
+	u32 val;
+
+	val = readl(pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+	val |= MVPP2_GMAC_FC_ADV_EN;
+	writel(val, pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *pp)
+{
+	u32 reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_0_REG);
+	reg_val |= MVPP2_GMAC_PORT_EN_MASK;
+	reg_val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *pp)
+{
+	u32 reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_0_REG);
+	reg_val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+/* TODO: Only one user? Just disable? */
+static void mvpp2_port_periodic_xon_set(struct mvpp2_port *pp, bool enable)
+{
+	u32 reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_1_REG);
+
+	if (enable)
+		reg_val |= MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+	else
+		reg_val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *pp)
+{
+	u32 reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_1_REG);
+
+	if (pp->speed == 1000)
+		reg_val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+	else
+		reg_val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+	if (pp->phy_interface == PHY_INTERFACE_MODE_SGMII)
+		reg_val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+	else
+		reg_val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Enable/disable port reset state */
+/* TODO: We have only one user, revisit this */
+static void mvpp2_port_reset_set(struct mvpp2_port *pp, bool set_reset)
+{
+	u32 reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_2_REG);
+	reg_val &= ~MVPP2_GMAC_PORT_RESET_MASK;
+
+	if (set_reset)
+		reg_val |= MVPP2_GMAC_PORT_RESET_MASK;
+	else
+		reg_val &= ~MVPP2_GMAC_PORT_RESET_MASK;
+
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_2_REG);
+
+	if (!set_reset)
+		while (readl(pp->base + MVPP2_GMAC_CTRL_2_REG) &
+		       MVPP2_GMAC_PORT_RESET_MASK)
+			continue;
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *pp)
+{
+	int reg_val;
+
+	reg_val = readl(pp->base + MVPP2_GMAC_CTRL_0_REG);
+	reg_val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+	reg_val |= (((pp->pkt_size - MVPP2_MH_SIZE) / 2) <<
+		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+	writel(reg_val, pp->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *pp)
+{
+	int reg_val;
+	int cpu;
+	int txp;
+	int queue;
+	int ptxq;
+	int lrxq;
+	int tx_port_num;
+
+	/* Configure port to loopback if needed */
+	if (pp->flags & MVPP2_F_LOOPBACK)
+		mvpp2_port_loopback_set(pp);
+
+	/* Update TX FIFO MIN Threshold */
+	reg_val = readl(pp->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+	reg_val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+	/* Min. TX threshold must be less than minimal packet length */
+	reg_val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+	writel(reg_val, pp->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		/* Disable Legacy WRR, Disable EJP, Release from reset */
+		tx_port_num = mvpp2_egress_port(pp, txp);
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+			    tx_port_num);
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+		/* Close bandwidth for all queues */
+		for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+			ptxq = mvpp2_txq_phys(pp->id, queue);
+			mvpp2_write(pp->pp2,
+				    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+		}
+
+		/* Set refill period to 1 usec, refill tokens
+		 * and bucket size to maximum
+		 */
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PERIOD_REG,
+			    pp->pp2->tclk / 1000000);
+		reg_val = mvpp2_read(pp->pp2, MVPP2_TXP_SCHED_REFILL_REG);
+		reg_val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+		reg_val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+		reg_val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_REFILL_REG, reg_val);
+		reg_val = MVPP2_TXP_TOKEN_SIZE_MAX;
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, reg_val);
+	}
+	/* Set MaximumLowLatencyPacketSize value to 256 */
+	mvpp2_write(pp->pp2, MVPP2_RX_CTRL_REG(pp->id),
+		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+	/* Enable Rx cache snoop */
+	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+		queue = pp->rxqs[lrxq]->id;
+		reg_val = mvpp2_read(pp->pp2, MVPP2_RXQ_CONFIG_REG(queue));
+		reg_val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+			   MVPP2_SNOOP_BUF_HDR_MASK;
+		mvpp2_write(pp->pp2, MVPP2_RXQ_CONFIG_REG(queue), reg_val);
+	}
+
+	/* At default, mask all interrupts to all present cpus */
+	for_each_present_cpu(cpu)
+		mvpp2_cpu_interrupts_disable(pp, cpu);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *pp, bool en)
+{
+	u32 reg_val;
+	int lrxq, queue;
+
+	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+		queue = pp->rxqs[lrxq]->id;
+		reg_val = mvpp2_read(pp->pp2, MVPP2_RXQ_CONFIG_REG(queue));
+		if (en)
+			reg_val &= ~MVPP2_RXQ_DISABLE_MASK;
+		else
+			reg_val |= MVPP2_RXQ_DISABLE_MASK;
+		mvpp2_write(pp->pp2, MVPP2_RXQ_CONFIG_REG(queue), reg_val);
+	}
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_txp_disable(struct mvpp2_port *pp, int txp)
+{
+	u32 reg_data;
+	int delay;
+	int tx_port_num = mvpp2_egress_port(pp, txp);
+
+	/* Issue stop command for active channels only */
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+	reg_data = (mvpp2_read(pp->pp2, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+		    MVPP2_TXP_SCHED_ENQ_MASK;
+	if (reg_data != 0)
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_Q_CMD_REG,
+			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+	/* Wait for all Tx activity to terminate. */
+	delay = 0;
+	do {
+		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+			netdev_warn(pp->dev,
+				    "Tx stop timed out, status=0x%08x\n",
+				    reg_data);
+			break;
+		}
+		mdelay(1);
+		delay++;
+
+		/* Check port TX Command register that all
+		 * Tx queues are stopped
+		 */
+		reg_data = mvpp2_read(pp->pp2, MVPP2_TXP_SCHED_Q_CMD_REG);
+	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_txp_enable(struct mvpp2_port *pp, int txp)
+{
+	u32 qmap;
+	int queue;
+	int tx_port_num = mvpp2_egress_port(pp, txp);
+
+	/* Enable all initialized TXs. */
+	qmap = 0;
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvpp2_tx_queue *txq = pp->txqs[queue];
+		if (txq->descs != NULL)
+			qmap |= (1 << queue);
+	}
+
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Enable/disable fetching descriptors from initialized TXQs */
+static void mvpp2_egress_enable(struct mvpp2_port *pp, bool en)
+{
+	int txp;
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		if (en)
+			/* Enable all physical TXQs */
+			mvpp2_txp_enable(pp, txp);
+		else
+			/* Disable all physical TXQs */
+			mvpp2_txp_disable(pp, txp);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *pp, int rxq_id)
+{
+	u32 val = mvpp2_read(pp->pp2, MVPP2_RXQ_STATUS_REG(rxq_id));
+	return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *pp, int rxq_id,
+			int used_count, int free_count)
+{
+	/* Decrement the number of used descriptors and increment count
+	 * increment the number of free descriptors.
+	 */
+	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+	int rx_desc = rxq->next_desc_to_proc;
+
+	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+	prefetch(rxq->descs + rxq->next_desc_to_proc);
+	return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *pp,
+				 int prxq, int offset)
+{
+	u32 reg_val;
+
+	/* Convert offset from bytes to units of 32 bytes */
+	offset = offset >> 5;
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq));
+	reg_val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+	/* Offset is in */
+	reg_val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+		    MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+	mvpp2_write(pp->pp2, MVPP2_RXQ_CONFIG_REG(prxq), reg_val);
+}
+
+/* Obtain BM cookie information from descriptor */
+static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+{
+	int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+		   MVPP2_RXD_BM_POOL_ID_OFFS;
+	int cpu = smp_processor_id();
+
+	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
+	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *pp,
+				       struct mvpp2_tx_queue *txq)
+{
+	u32 reg_val;
+
+	mvpp2_write(pp->pp2, MVPP2_TXQ_NUM_REG, txq->id);
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXQ_PENDING_REG);
+
+	return reg_val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+	int tx_desc = txq->next_desc_to_proc;
+
+	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+	return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *pp, int pending)
+{
+	/* aggregated access - relevant TXQ number is written in TX desc */
+	mvpp2_write(pp->pp2, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+/* Get number of occupied aggregated Tx descriptors */
+static u32 mvpp2_aggr_txq_pend_desc_num_get(struct mvpp2 *pp2, int cpu)
+{
+	u32 reg_val;
+
+	reg_val = mvpp2_read(pp2, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+	return reg_val & MVPP2_AGGR_TXQ_PENDING_MASK;
+}
+
+/* Check if there are enough free descriptors in aggregated txq. If not, try to
+ * update number of occupied descriptors from HW and repeat checking.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2 *pp2,
+				     struct mvpp2_tx_queue *aggr_txq, int num)
+{
+	if ((aggr_txq->count + num) > aggr_txq->size) {
+		aggr_txq->count = mvpp2_aggr_txq_pend_desc_num_get(pp2,
+							    smp_processor_id());
+	}
+	if ((aggr_txq->count + num) > aggr_txq->size)
+		return -ERANGE;
+
+	return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *pp2,
+					 struct mvpp2_tx_queue *txq, int num)
+{
+	u32 reg_val;
+
+	reg_val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+	mvpp2_write(pp2, MVPP2_TXQ_RSVD_REQ_REG, reg_val);
+
+	reg_val = mvpp2_read(pp2, MVPP2_TXQ_RSVD_RSLT_REG);
+
+	return reg_val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission. If not, try
+ * to reqest chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *pp2,
+					    struct mvpp2_tx_queue *txq,
+					    struct mvpp2_txq_pcpu *txq_pcpu,
+					    int num)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+	if (txq_pcpu->reserved_num < num) {
+		int req, cpu, new_reserved, desc_count = 0;
+
+		/* Compute total of used descriptors */
+		for_each_present_cpu(cpu) {
+			txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+			desc_count += txq_pcpu_aux->count;
+			desc_count += txq_pcpu_aux->reserved_num;
+		}
+
+		req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+		desc_count += req;
+
+		if (desc_count > txq->swf_size)
+			return -ERANGE;
+
+		new_reserved = mvpp2_txq_alloc_reserved_desc(pp2, txq, req);
+
+		txq_pcpu->reserved_num += new_reserved;
+
+		if (txq_pcpu->reserved_num < num)
+			return -ERANGE;
+	}
+
+	return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+	if (txq->next_desc_to_proc == 0)
+		txq->next_desc_to_proc = txq->last_desc - 1;
+	else
+		txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+				int ip_hdr_len, int l4_proto)
+{
+	u32 command;
+
+	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+	 * G_L4_chk, L4_type required only for checksum calculation
+	 */
+	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+	command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+	if (l3_proto == swab16(ETH_P_IP)) {
+		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
+		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
+	} else
+		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
+
+	if (l4_proto == IPPROTO_TCP) {
+		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
+		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
+	} else if (l4_proto == IPPROTO_UDP) {
+		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
+		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
+	} else
+		command |= MVPP2_TXD_L4_CSUM_NOT;
+
+	return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *pp,
+					   struct mvpp2_tx_queue *txq)
+{
+	u32 reg_val;
+
+	/* Reading status reg resets transmitted descriptor counter */
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXQ_SENT_REG(txq->id));
+
+	return (reg_val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+		MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+	struct mvpp2_tx_queue *txq = arg;
+	struct mvpp2 *pp2 = txq->pp2;
+	int reg_val;
+
+	reg_val = mvpp2_read(pp2, MVPP2_TXQ_SENT_REG(txq->id));
+}
+
+/* Queues helepr methods */
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *pp, int txp)
+{
+	u32	reg_val, size, mtu;
+	int	txq, tx_port_num;
+
+	mtu = pp->pkt_size * 8;
+	if (mtu > MVPP2_TXP_MTU_MAX)
+		mtu = MVPP2_TXP_MTU_MAX;
+
+	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+	mtu = 3 * mtu;
+
+	/* Indirect access to registers */
+	tx_port_num = mvpp2_egress_port(pp, txp);
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+	/* Set MTU */
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXP_SCHED_MTU_REG);
+	reg_val &= ~MVPP2_TXP_MTU_MAX;
+	reg_val |= mtu;
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_MTU_REG, reg_val);
+
+	/* TXP token size and all TXQs token size must be larger that MTU */
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+	size = reg_val & MVPP2_TXP_TOKEN_SIZE_MAX;
+	if (size < mtu) {
+		size = mtu;
+		reg_val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+		reg_val |= size;
+		mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, reg_val);
+	}
+	for (txq = 0; txq < txq_number; txq++) {
+		reg_val = mvpp2_read(pp->pp2,
+				     MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+		size = reg_val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+		if (size < mtu) {
+			size = mtu;
+			reg_val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+			reg_val |= size;
+			mvpp2_write(pp->pp2,
+				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+				    reg_val);
+		}
+	}
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *pp,
+				   struct mvpp2_rx_queue *rxq, u32 pkts)
+{
+	u32 val;
+
+	val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_THRESH_REG, val);
+
+	rxq->pkts_coal = pkts;
+}
+
+/* Set the time delay in usec before RX interrupt will be generated by
+ * HW.
+ */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *pp,
+				    struct mvpp2_rx_queue *rxq, u32 usec)
+{
+	u32 val;
+
+	val = (pp->pp2->tclk / 1000000) * usec;
+	mvpp2_write(pp->pp2, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+
+	rxq->time_coal = usec;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvpp2_tx_done_pkts_coal_set(void *arg)
+{
+	struct mvpp2_tx_queue *txq = arg;
+	struct mvpp2 *pp2 = txq->pp2;
+	u32 pkts = txq->done_pkts_coal;
+	u32 reg_val;
+
+	reg_val = (pkts << MVPP2_TRANSMITTED_THRESH_OFFSET) &
+		   MVPP2_TRANSMITTED_THRESH_MASK;
+	mvpp2_write(pp2, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_write(pp2, MVPP2_TXQ_THRESH_REG, reg_val);
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *pp,
+				struct mvpp2_tx_queue *txq,
+				struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+	int i;
+
+	for (i = 0; i < num; i++) {
+		struct mvpp2_tx_desc *tx_desc = txq->descs +
+							txq_pcpu->txq_get_index;
+		struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+
+		mvpp2_txq_inc_get(txq_pcpu);
+
+		if (!skb)
+			continue;
+
+		dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
+				 tx_desc->data_size, DMA_TO_DEVICE);
+		dev_kfree_skb_any(skb);
+	}
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *pp,
+							u32 cause)
+{
+	int queue = fls(cause) - 1;
+	return pp->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *pp,
+							u32 cause)
+{
+	int queue = fls(cause >> 16) - 1;
+	return pp->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *pp, struct mvpp2_tx_queue *txq,
+			   struct mvpp2_txq_pcpu *txq_pcpu)
+{
+	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->log_id);
+	int tx_done;
+
+	if (txq_pcpu->cpu != smp_processor_id())
+		netdev_err(pp->dev, "wrong cpu on the end of Tx processing\n");
+
+	tx_done = mvpp2_txq_sent_desc_proc(pp, txq);
+	if (!tx_done)
+		return;
+	mvpp2_txq_bufs_free(pp, txq, txq_pcpu, tx_done);
+
+	txq_pcpu->count -= tx_done;
+
+	if (netif_tx_queue_stopped(nq))
+		if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
+			netif_tx_wake_queue(nq);
+
+	return;
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+			       struct mvpp2_tx_queue *aggr_txq,
+			       int desc_num, int cpu,
+			       struct mvpp2 *pp2)
+{
+	/* Allocate memory for TX descriptors */
+	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+				desc_num * MVPP2_DESC_ALIGNED_SIZE,
+				&aggr_txq->descs_phys, GFP_KERNEL);
+	if (!aggr_txq->descs)
+		return -ENOMEM;
+
+	/* Make sure descriptor address is cache line size aligned  */
+	BUG_ON(aggr_txq->descs !=
+	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	aggr_txq->last_desc = aggr_txq->size - 1;
+
+	/* Aggr TXQ no reset WA */
+	aggr_txq->next_desc_to_proc = mvpp2_read(pp2,
+						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+
+	/* Set Tx descriptors queue starting address */
+	/* indirect access */
+	mvpp2_write(pp2, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
+		    aggr_txq->descs_phys);
+	mvpp2_write(pp2, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
+
+	return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *pp,
+			   struct mvpp2_rx_queue *rxq)
+
+{
+	rxq->size = pp->rx_ring_size;
+
+	/* Allocate memory for RX descriptors */
+	rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+					&rxq->descs_phys, GFP_KERNEL);
+	if (!rxq->descs)
+		return -ENOMEM;
+
+	BUG_ON(rxq->descs !=
+	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	rxq->last_desc = rxq->size - 1;
+
+	/* Zero occupied and non-occupied counters - direct access */
+	mvpp2_write(pp->pp2, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+	/* Set Rx descriptors queue starting address - indirect access */
+	mvpp2_write(pp->pp2, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_INDEX_REG, 0);
+
+	/* Set Offset */
+	mvpp2_rxq_offset_set(pp, rxq->id, NET_SKB_PAD);
+
+	/* Set coalescing pkts and time */
+	mvpp2_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+	mvpp2_rx_time_coal_set(pp, rxq, rxq->time_coal);
+
+	/* Add number of descriptors ready for receiving packets */
+	mvpp2_rxq_status_update(pp, rxq->id, 0, rxq->size);
+
+	return 0;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *pp,
+				struct mvpp2_rx_queue *rxq)
+{
+	int rx_received, i;
+
+	rx_received = mvpp2_rxq_received(pp, rxq->id);
+	if (!rx_received)
+		return;
+
+	for (i = 0; i < rx_received; i++) {
+		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+		struct mvpp2_bm_pool *bm_pool;
+		int pool;
+		u32 bm;
+
+		bm = mvpp2_bm_cookie_build(rx_desc);
+		pool = mvpp2_bm_cookie_pool_get(bm);
+		bm_pool = &pp->pp2->bm_pools[pool];
+
+		mvpp2_pool_refill(pp, bm, rx_desc->buf_phys_addr,
+				  rx_desc->buf_cookie);
+	}
+	mvpp2_rxq_status_update(pp, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *pp,
+			     struct mvpp2_rx_queue *rxq)
+{
+	mvpp2_rxq_drop_pkts(pp, rxq);
+
+	if (rxq->descs)
+		dma_free_coherent(pp->dev->dev.parent,
+				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+				  rxq->descs,
+				  rxq->descs_phys);
+
+	rxq->descs             = NULL;
+	rxq->last_desc         = 0;
+	rxq->next_desc_to_proc = 0;
+	rxq->descs_phys        = 0;
+
+	/* Clear Rx descriptors queue starting address and size;
+	 * free descriptor number
+	 */
+	mvpp2_write(pp->pp2, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_NUM_REG, rxq->id);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_DESC_ADDR_REG, 0);
+	mvpp2_write(pp->pp2, MVPP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *pp, int txp,
+			  struct mvpp2_tx_queue *txq)
+{
+	u32 reg_val;
+	int cpu, desc;
+	int desc_per_txq;
+	int tx_port_num;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+
+	/* Allocate memory for Tx descriptors */
+	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
+				txq->size * MVPP2_DESC_ALIGNED_SIZE,
+				&txq->descs_phys, GFP_KERNEL);
+	if (!txq->descs)
+		return -ENOMEM;
+
+	/* Make sure descriptor address is cache line size aligned  */
+	BUG_ON(txq->descs !=
+	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+	txq->last_desc = txq->size - 1;
+
+	/* Set Tx descriptors queue starting address - indirect access */
+	mvpp2_write(pp->pp2, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
+					     MVPP2_TXQ_DESC_SIZE_MASK);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_INDEX_REG, 0);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_RSVD_CLR_REG,
+		    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXQ_PENDING_REG);
+	reg_val &= ~MVPP2_TXQ_PENDING_MASK;
+	mvpp2_write(pp->pp2, MVPP2_TXQ_PENDING_REG, reg_val);
+
+	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
+	 * for each existing TXQ.
+	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+	 */
+	desc_per_txq = 16;
+	desc = (pp->id * MVPP2_MAX_TXQ * desc_per_txq) +
+	       (txq->log_id * desc_per_txq);
+
+	mvpp2_write(pp->pp2, MVPP2_TXQ_PREF_BUF_REG,
+		    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+		    MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+
+	/* WRR / EJP configuration - indirect access */
+	tx_port_num = mvpp2_egress_port(pp, txp);
+	mvpp2_write(pp->pp2, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+	reg_val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+	reg_val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+	reg_val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+	mvpp2_write(pp->pp2, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg_val);
+
+	reg_val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+	mvpp2_write(pp->pp2, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+		    reg_val);
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
+					   sizeof(*txq_pcpu->tx_skb),
+					   GFP_KERNEL);
+		if (!txq_pcpu->tx_skb) {
+			dma_free_coherent(pp->dev->dev.parent,
+					  txq->size * MVPP2_DESC_ALIGNED_SIZE,
+					  txq->descs, txq->descs_phys);
+			return -ENOMEM;
+		}
+
+		txq_pcpu->count = 0;
+		txq_pcpu->reserved_num = 0;
+		txq_pcpu->txq_put_index = 0;
+		txq_pcpu->txq_get_index = 0;
+	}
+
+	on_each_cpu(mvpp2_txq_sent_counter_clear, txq, 1);
+	on_each_cpu(mvpp2_tx_done_pkts_coal_set, txq, 1);
+
+	return 0;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *pp,
+			     struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int cpu;
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+		kfree(txq_pcpu->tx_skb);
+	}
+
+	if (txq->descs)
+		dma_free_coherent(pp->dev->dev.parent,
+				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
+				  txq->descs, txq->descs_phys);
+
+	txq->descs             = NULL;
+	txq->last_desc         = 0;
+	txq->next_desc_to_proc = 0;
+	txq->descs_phys        = 0;
+
+	/* Set minimum bandwidth for disabled TXQs */
+	mvpp2_write(pp->pp2, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+
+	/* Set Tx descriptors queue starting address and size */
+	mvpp2_write(pp->pp2, MVPP2_TXQ_NUM_REG, txq->id);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_DESC_ADDR_REG, 0);
+	mvpp2_write(pp->pp2, MVPP2_TXQ_DESC_SIZE_REG, 0);
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txp_clean(struct mvpp2_port *pp, int txp,
+			    struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int delay, pending, cpu;
+	u32 reg_val;
+
+	mvpp2_write(pp->pp2, MVPP2_TXQ_NUM_REG, txq->id);
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TXQ_PREF_BUF_REG);
+	reg_val |= MVPP2_TXQ_DRAIN_EN_MASK;
+	mvpp2_write(pp->pp2, MVPP2_TXQ_PREF_BUF_REG, reg_val);
+
+	/* The napi queue has been stopped so wait for all packets
+	 * to be transmitted.
+	 */
+	delay = 0;
+	do {
+		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+			netdev_warn(pp->dev,
+				    "port %d: cleaning queue %d timed out\n",
+				    pp->id, txq->log_id);
+			break;
+		}
+		mdelay(1);
+		delay++;
+
+		pending = mvpp2_txq_pend_desc_num_get(pp, txq);
+	} while (pending);
+
+	reg_val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+	mvpp2_write(pp->pp2, MVPP2_TXQ_PREF_BUF_REG, reg_val);
+
+	on_each_cpu(mvpp2_txq_sent_counter_clear, txq, 1);
+
+	for_each_present_cpu(cpu) {
+		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+		/* Release all packets */
+		mvpp2_txq_bufs_free(pp, txq, txq_pcpu, txq_pcpu->count);
+
+		/* Reset queue */
+		txq_pcpu->count = 0;
+		txq_pcpu->txq_put_index = 0;
+		txq_pcpu->txq_get_index = 0;
+	}
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *pp)
+{
+	struct mvpp2_tx_queue *txq;
+	int txp, queue;
+	u32 reg_val;
+
+	reg_val = mvpp2_read(pp->pp2, MVPP2_TX_PORT_FLUSH_REG);
+
+	/* Reset Tx ports and delete Tx queues */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		reg_val |= MVPP2_TX_PORT_FLUSH_MASK(pp->id);
+		mvpp2_write(pp->pp2, MVPP2_TX_PORT_FLUSH_REG, reg_val);
+
+		for (queue = 0; queue < txq_number; queue++) {
+			txq = pp->txqs[txp * txq_number + queue];
+			mvpp2_txp_clean(pp, txp, txq);
+			mvpp2_txq_deinit(pp, txq);
+		}
+
+		reg_val &= ~MVPP2_TX_PORT_FLUSH_MASK(pp->id);
+		mvpp2_write(pp->pp2, MVPP2_TX_PORT_FLUSH_REG, reg_val);
+	}
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *pp)
+{
+	int queue;
+
+	for (queue = 0; queue < rxq_number; queue++)
+		mvpp2_rxq_deinit(pp, pp->rxqs[queue]);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *pp)
+{
+	int queue, err;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		err = mvpp2_rxq_init(pp, pp->rxqs[queue]);
+		if (err)
+			goto err_cleanup;
+	}
+	return 0;
+
+err_cleanup:
+	mvpp2_cleanup_rxqs(pp);
+	return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *pp)
+{
+	struct mvpp2_tx_queue *txq;
+	int txp, queue, err;
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (queue = 0; queue < txq_number; queue++) {
+			txq = pp->txqs[txp * txq_number + queue];
+			err = mvpp2_txq_init(pp, txp, txq);
+			if (err)
+				goto err_cleanup;
+		}
+	}
+	return 0;
+
+err_cleanup:
+	mvpp2_cleanup_txqs(pp);
+	return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+	struct mvpp2_port *pp = (struct mvpp2_port *)dev_id;
+	int cpu;
+
+	for_each_present_cpu(cpu)
+		mvpp2_cpu_interrupts_disable(pp, cpu);
+
+	napi_schedule(&pp->napi);
+
+	return IRQ_HANDLED;
+}
+
+/* Adjust link */
+void mvpp2_link_event(struct net_device *dev)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	struct phy_device *phydev = pp->phy_dev;
+	int status_change = 0;
+	u32 val;
+
+	if (phydev->link) {
+		if ((pp->speed != phydev->speed) ||
+		    (pp->duplex != phydev->duplex)) {
+			u32 val;
+
+			val = readl(pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
+				 MVPP2_GMAC_CONFIG_GMII_SPEED |
+				 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
+				 MVPP2_GMAC_AN_SPEED_EN |
+				 MVPP2_GMAC_AN_DUPLEX_EN);
+
+			if (phydev->duplex)
+				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+			if (phydev->speed == SPEED_1000)
+				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+			else
+				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+			writel(val, pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+			pp->duplex = phydev->duplex;
+			pp->speed  = phydev->speed;
+		}
+	}
+
+	if (phydev->link != pp->link) {
+		if (!phydev->link) {
+			pp->duplex = -1;
+			pp->speed = 0;
+		}
+
+		pp->link = phydev->link;
+		status_change = 1;
+	}
+
+	if (status_change) {
+		if (phydev->link) {
+			val = readl(pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			val |= (MVPP2_GMAC_FORCE_LINK_PASS |
+				MVPP2_GMAC_FORCE_LINK_DOWN);
+			writel(val, pp->base + MVPP2_GMAC_AUTONEG_CONFIG);
+			mvpp2_egress_enable(pp, true);
+			mvpp2_ingress_enable(pp, true);
+			netdev_info(pp->dev, "link up");
+		} else {
+			mvpp2_ingress_enable(pp, false);
+			mvpp2_egress_enable(pp, false);
+			netdev_info(pp->dev, "link down\n");
+		}
+	}
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *pp,
+			   struct mvpp2_rx_desc *rx_desc)
+{
+	u32 status = rx_desc->status;
+
+	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+	case MVPP2_RXD_ERR_CRC:
+		netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVPP2_RXD_ERR_OVERRUN:
+		netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	case MVPP2_RXD_ERR_RESOURCE:
+		netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
+			   status, rx_desc->data_size);
+		break;
+	}
+}
+
+/* Handle RX checksum offload */
+static void mvpp2_rx_csum(struct mvpp2_port *pp, u32 status,
+			  struct sk_buff *skb)
+{
+	if (((status & MVPP2_RXD_L3_IP4) &&
+	    !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+	    (status & MVPP2_RXD_L3_IP6))
+		if (((status & MVPP2_RXD_L4_UDP) ||
+		     (status & MVPP2_RXD_L4_TCP)) &&
+		     (status & MVPP2_RXD_L4_CSUM_OK)) {
+			skb->csum = 0;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			return;
+		}
+
+	skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *pp, struct mvpp2_bm_pool *bm_pool,
+			   u32 bm, int is_recycle)
+{
+	struct sk_buff *skb;
+	dma_addr_t phys_addr;
+
+	if (is_recycle &&
+	   (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
+		return 0;
+
+	/* No recycle or too many buffers are in use, so allocate a new skb */
+	skb = mvpp2_skb_alloc(pp, bm_pool, &phys_addr, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	mvpp2_pool_refill(pp, bm, (u32)phys_addr, (u32)skb);
+	atomic_dec(&bm_pool->in_use);
+	return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *pp, struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int ip_hdr_len = 0;
+		u8 l4_proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *ip4h = ip_hdr(skb);
+
+			/* Calculate IPv4 checksum and L4 checksum */
+			ip_hdr_len = ip4h->ihl;
+			l4_proto = ip4h->protocol;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			/* Read l4_protocol from one of IPv6 extra headers */
+			if (skb_network_header_len(skb) > 0)
+				ip_hdr_len = (skb_network_header_len(skb) >> 2);
+			l4_proto = ip6h->nexthdr;
+		} else
+			return MVPP2_TXD_L4_CSUM_NOT;
+
+		return mvpp2_txq_desc_csum(skb_network_offset(skb),
+				skb->protocol, ip_hdr_len, l4_proto);
+	}
+
+	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+static void mvpp2_buff_hdr_rx(struct mvpp2_port *pp,
+			      struct mvpp2_rx_desc *rx_desc)
+{
+	struct mvpp2_buff_hdr *buff_hdr;
+	struct sk_buff *skb;
+	u32 rx_status = rx_desc->status;
+	u32 buff_phys_addr;
+	u32 buff_virt_addr;
+	u32 buff_phys_addr_next;
+	u32 buff_virt_addr_next;
+	int mc_id;
+	int pool_id;
+
+	pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+		   MVPP2_RXD_BM_POOL_ID_OFFS;
+	buff_phys_addr = rx_desc->buf_phys_addr;
+	buff_virt_addr = rx_desc->buf_cookie;
+
+	do {
+		skb = (struct sk_buff *)buff_virt_addr;
+		buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
+
+		mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
+
+		buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
+		buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
+
+		/* Release buffer */
+		mvpp2_bm_pool_mc_put(pp, pool_id, buff_phys_addr,
+				     buff_virt_addr, mc_id, 0);
+
+		buff_phys_addr = buff_phys_addr_next;
+		buff_virt_addr = buff_virt_addr_next;
+
+	} while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *pp, int rx_todo,
+		    struct mvpp2_rx_queue *rxq)
+{
+	struct net_device *dev = pp->dev;
+	int rx_received, rx_filled, i;
+	u32 rcvd_pkts = 0;
+	u32 rcvd_bytes = 0;
+
+	/* Get number of received packets and clamp the to-do */
+	rx_received = mvpp2_rxq_received(pp, rxq->id);
+	if (rx_todo > rx_received)
+		rx_todo = rx_received;
+
+	rx_filled = 0;
+	for (i = 0; i < rx_todo; i++) {
+		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+		struct mvpp2_bm_pool *bm_pool;
+		struct sk_buff *skb;
+		u32 bm, rx_status;
+		int pool, rx_bytes, err;
+
+		rx_filled++;
+		rx_status = rx_desc->status;
+		rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+
+		bm = mvpp2_bm_cookie_build(rx_desc);
+		pool = mvpp2_bm_cookie_pool_get(bm);
+		bm_pool = &pp->pp2->bm_pools[pool];
+		/* Check if buffer header is used */
+		if (rx_status & MVPP2_RXD_BUF_HDR) {
+			mvpp2_buff_hdr_rx(pp, rx_desc);
+			continue;
+		}
+
+		/* In case of an error, release the requested buffer pointer
+		 * to the Buffer Manager. This request process is controlled
+		 * by the hardware, and the information about the buffer is
+		 * comprised by the RX descriptor.
+		 */
+		if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+			dev->stats.rx_errors++;
+			mvpp2_rx_error(pp, rx_desc);
+			mvpp2_pool_refill(pp, bm, rx_desc->buf_phys_addr,
+					  rx_desc->buf_cookie);
+			continue;
+		}
+
+		skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+		rcvd_pkts++;
+		rcvd_bytes += rx_bytes;
+		atomic_inc(&bm_pool->in_use);
+
+		skb_reserve(skb, MVPP2_MH_SIZE);
+		skb_put(skb, rx_bytes);
+		skb->protocol = eth_type_trans(skb, dev);
+		mvpp2_rx_csum(pp, rx_status, skb);
+
+		napi_gro_receive(&pp->napi, skb);
+
+		err = mvpp2_rx_refill(pp, bm_pool, bm, 0);
+		if (err) {
+			netdev_err(pp->dev, "failed to refill BM pools\n");
+			rx_filled--;
+		}
+	}
+
+	if (rcvd_pkts) {
+		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes   += rcvd_bytes;
+		u64_stats_update_end(&stats->syncp);
+	}
+
+	/* Update Rx queue management counters */
+	wmb();
+	mvpp2_rxq_status_update(pp, rxq->id, rx_todo, rx_filled);
+
+	return rx_todo;
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *pp, struct sk_buff *skb,
+				 struct mvpp2_tx_queue *aggr_txq,
+				 struct mvpp2_tx_queue *txq)
+{
+	struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+	struct mvpp2_tx_desc *tx_desc;
+	int i;
+	dma_addr_t buf_phys_addr;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		void *addr = page_address(frag->page.p) + frag->page_offset;
+		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+		tx_desc->phys_txq = txq->id;
+		tx_desc->data_size = frag->size;
+
+		buf_phys_addr = dma_map_single(pp->dev->dev.parent, addr,
+					       tx_desc->data_size,
+					       DMA_TO_DEVICE);
+		if (dma_mapping_error(pp->dev->dev.parent, buf_phys_addr)) {
+			mvpp2_txq_desc_put(txq);
+			goto error;
+		}
+
+		tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+		tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+
+		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+			/* Last descriptor */
+			tx_desc->command = MVPP2_TXD_L_DESC;
+			mvpp2_txq_inc_put(txq_pcpu, skb);
+		} else {
+			/* Descriptor in the middle: Not First, Not Last */
+			tx_desc->command = 0;
+			mvpp2_txq_inc_put(txq_pcpu, NULL);
+		}
+	}
+
+	return 0;
+
+error:
+	/* Release all descriptors that were used to map fragments of
+	 * this packet, as well as the corresponding DMA mappings
+	 */
+	for (i = i - 1; i >= 0; i--) {
+		tx_desc = txq->descs + i;
+		dma_unmap_single(pp->dev->dev.parent,
+				 tx_desc->buf_phys_addr,
+				 tx_desc->data_size,
+				 DMA_TO_DEVICE);
+		mvpp2_txq_desc_put(txq);
+	}
+
+	return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	struct mvpp2_tx_queue *txq, *aggr_txq;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	struct mvpp2_tx_desc *tx_desc;
+	dma_addr_t buf_phys_addr;
+	int frags = 0;
+	u16 txq_id;
+	u32 tx_cmd;
+
+	if (!netif_running(dev))
+		goto out;
+
+	txq_id = skb_get_queue_mapping(skb);
+	txq = pp->txqs[txq_id];
+	txq_pcpu = this_cpu_ptr(txq->pcpu);
+	aggr_txq = &pp->pp2->aggr_txqs[smp_processor_id()];
+
+	frags = skb_shinfo(skb)->nr_frags + 1;
+
+	/* Check number of available descriptors */
+	if (mvpp2_aggr_desc_num_check(pp->pp2, aggr_txq, frags) ||
+	    mvpp2_txq_reserved_desc_num_proc(pp->pp2, txq, txq_pcpu, frags)) {
+		frags = 0;
+		goto out;
+	}
+
+	/* Get a descriptor for the first part of the packet */
+	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+	tx_desc->phys_txq = txq->id;
+	tx_desc->data_size = skb_headlen(skb);
+
+	buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+				       tx_desc->data_size, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+		mvpp2_txq_desc_put(txq);
+		frags = 0;
+		goto out;
+	}
+	tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+	tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+	tx_cmd = mvpp2_skb_tx_csum(pp, skb);
+
+	if (frags == 1) {
+		/* First and Last descriptor */
+		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+		tx_desc->command = tx_cmd;
+		mvpp2_txq_inc_put(txq_pcpu, skb);
+	} else {
+		/* First but not Last */
+		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+		tx_desc->command = tx_cmd;
+		mvpp2_txq_inc_put(txq_pcpu, NULL);
+
+		/* Continue with other skb fragments */
+		if (mvpp2_tx_frag_process(pp, skb, aggr_txq, txq)) {
+			dma_unmap_single(dev->dev.parent,
+					 tx_desc->buf_phys_addr,
+					 tx_desc->data_size,
+					 DMA_TO_DEVICE);
+			mvpp2_txq_desc_put(txq);
+			frags = 0;
+			goto out;
+		}
+	}
+
+	txq_pcpu->reserved_num -= frags;
+	txq_pcpu->count += frags;
+	aggr_txq->count += frags;
+
+	/* Enable transmit */
+	wmb();
+	mvpp2_aggr_txq_pend_desc_add(pp, frags);
+
+	if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
+		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+		netif_tx_stop_queue(nq);
+	}
+out:
+	if (frags > 0) {
+		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes += skb->len;
+		u64_stats_update_end(&stats->syncp);
+	} else {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+		netdev_err(dev, "FCS error\n");
+	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+		netdev_err(dev, "rx fifo overrun error\n");
+	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+		netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static void mvpp2_handle_cpu(void *arg)
+{
+	struct mvpp2_port *pp = arg;
+	u32 cause_rx_tx, cause_tx, cause_misc;
+
+	/* Rx/Tx cause register
+	 *
+	 * Bits 0-15: each bit indicates received packets on the Rx queue
+	 * (bit 0 is for Rx queue 0).
+	 *
+	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+	 * (bit 16 is for Tx queue 0).
+	 *
+	 * Each CPU has its own Rx/Tx cause register
+	 */
+	cause_rx_tx = mvpp2_read(pp->pp2, MVPP2_ISR_RX_TX_CAUSE_REG(pp->id));
+	cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+
+	if (cause_misc) {
+		mvpp2_cause_error(pp->dev, cause_misc);
+
+		/* Clear the cause register */
+		mvpp2_write(pp->pp2, MVPP2_ISR_MISC_CAUSE_REG, 0);
+		mvpp2_write(pp->pp2, MVPP2_ISR_RX_TX_CAUSE_REG(pp->id),
+			    cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+	}
+
+	/* Release TX descriptors */
+	if (cause_tx) {
+		struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(pp, cause_tx);
+		struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+		if (!txq)
+			return;
+
+		if (txq_pcpu->count)
+			mvpp2_txq_done(pp, txq, txq_pcpu);
+	}
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+	u32 cause_rx_tx, cause_rx;
+	int rx_done = 0;
+	struct mvpp2_port *pp = netdev_priv(napi->dev);
+
+	if (!netif_running(pp->dev)) {
+		napi_complete(napi);
+		return rx_done;
+	}
+
+	on_each_cpu(mvpp2_handle_cpu, pp, 1);
+
+	cause_rx_tx = mvpp2_read(pp->pp2, MVPP2_ISR_RX_TX_CAUSE_REG(pp->id));
+	cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+	/* Process RX packets */
+	cause_rx |= pp->pending_cause_rx;
+	while (cause_rx && budget > 0) {
+		int count;
+		struct mvpp2_rx_queue *rxq;
+
+		rxq = mvpp2_get_rx_queue(pp, cause_rx);
+		if (!rxq)
+			break;
+
+		count = mvpp2_rx(pp, budget, rxq);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0) {
+			/* Clear the bit associated to this Rx queue
+			 * so that next iteration will continue from
+			 * the next Rx queue.
+			 */
+			cause_rx &= ~(1 << rxq->logic_rxq);
+		}
+	}
+
+	if (budget > 0) {
+		int cpu;
+
+		cause_rx = 0;
+		napi_complete(napi);
+
+		/* TODO: Check this wmb() */
+		wmb();
+		for_each_present_cpu(cpu)
+			mvpp2_cpu_interrupts_enable(pp, cpu);
+	}
+	pp->pending_cause_rx = cause_rx;
+	return rx_done;
+}
+
+/* Set hw internals when starting port */
+static int mvpp2_start_dev(struct mvpp2_port *pp)
+{
+	int cpu, txp, err;
+
+	mvpp2_gmac_max_rx_size_set(pp);
+
+	/* Allocate the Rx/Tx queues */
+	err = mvpp2_setup_rxqs(pp);
+	if (err)
+		return err;
+	err = mvpp2_setup_txqs(pp);
+	if (err)
+		goto err_cleanup_rxqs;
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		mvpp2_txp_max_tx_size_set(pp, txp);
+
+	/* Initialize pools for swf */
+	err = mvpp2_swf_bm_pool_init(pp);
+	if (err)
+		goto err_cleanup_txqs;
+
+	napi_enable(&pp->napi);
+
+	/* Unmask and enable interrupts on all CPUs */
+	on_each_cpu(mvpp2_interrupts_unmask, pp, 1);
+	for_each_present_cpu(cpu)
+		mvpp2_cpu_interrupts_enable(pp, cpu);
+
+	mvpp2_port_enable(pp);
+	phy_start(pp->phy_dev);
+	netif_tx_start_all_queues(pp->dev);
+
+	return 0;
+
+err_cleanup_txqs:
+	mvpp2_cleanup_txqs(pp);
+err_cleanup_rxqs:
+	mvpp2_cleanup_rxqs(pp);
+	return err;
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *pp)
+{
+	int cpu;
+
+	/* Stop new packets from arriving to RXQs */
+	mvpp2_ingress_enable(pp, false);
+
+	mdelay(10);
+
+	/* Disable and mask interrupts on all CPUs */
+	for_each_present_cpu(cpu)
+		mvpp2_cpu_interrupts_disable(pp, cpu);
+	on_each_cpu(mvpp2_interrupts_mask, pp, 1);
+
+	napi_disable(&pp->napi);
+
+	netif_carrier_off(pp->dev);
+	netif_tx_stop_all_queues(pp->dev);
+
+	mvpp2_cleanup_rxqs(pp);
+	mvpp2_cleanup_txqs(pp);
+
+	mvpp2_egress_enable(pp, false);
+	mvpp2_port_disable(pp);
+	phy_stop(pp->phy_dev);
+}
+
+/* Return positive if MTU is valid */
+static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
+{
+	if (mtu < 68) {
+		netdev_err(dev, "cannot change mtu to less than 68\n");
+		return -EINVAL;
+	}
+
+	/* 9676 == 9700 - 20 and rounding to 8 */
+	if (mtu > 9676) {
+		netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
+		mtu = 9676;
+	}
+
+	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+	}
+
+	return mtu;
+}
+
+static void mvpp2_get_mac_address(struct mvpp2_port *pp, unsigned char *addr)
+{
+	u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+	mac_addr_l = readl(pp->base + MVPP2_GMAC_CTRL_1_REG);
+	mac_addr_m = readl(pp->pp2->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+	mac_addr_h = readl(pp->pp2->lms_base + MVPP2_SRC_ADDR_HIGH);
+	addr[0] = (mac_addr_h >> 24) & 0xFF;
+	addr[1] = (mac_addr_h >> 16) & 0xFF;
+	addr[2] = (mac_addr_h >> 8) & 0xFF;
+	addr[3] = mac_addr_h & 0xFF;
+	addr[4] = mac_addr_m & 0xFF;
+	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_phy_connect(struct mvpp2_port *pp)
+{
+	struct phy_device *phy_dev;
+
+	phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvpp2_link_event, 0,
+				 pp->phy_interface);
+	if (!phy_dev) {
+		netdev_err(pp->dev, "cannot connect to phy\n");
+		return -ENODEV;
+	}
+	phy_dev->supported &= PHY_GBIT_FEATURES;
+	phy_dev->advertising = phy_dev->supported;
+
+	pp->phy_dev = phy_dev;
+	pp->link    = 0;
+	pp->duplex  = 0;
+	pp->speed   = 0;
+
+	return 0;
+}
+
+static void mvpp2_phy_disconnect(struct mvpp2_port *pp)
+{
+	phy_disconnect(pp->phy_dev);
+	pp->phy_dev = NULL;
+}
+
+/* Start the port */
+static int mvpp2_open(struct net_device *dev)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	unsigned char mac_bcast[ETH_ALEN] = { 0xff, 0xff, 0xff,
+					      0xff, 0xff, 0xff };
+	int err;
+
+	err = mvpp2_prs_mac_da_accept(pp->pp2, pp->id, mac_bcast, true);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+		return err;
+	}
+	err = mvpp2_prs_mac_da_accept(pp->pp2, pp->id, dev->dev_addr, true);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
+		return err;
+	}
+	err = mvpp2_prs_tag_mode_set(pp->pp2, pp->id, MVPP2_TAG_TYPE_MH);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+		return err;
+	}
+	err = mvpp2_prs_def_flow(pp);
+	if (err) {
+		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+		return err;
+	}
+
+	/* Configure Rx packet size according to the current MTU */
+	pp->pkt_size = MVPP2_RX_PKT_SIZE(pp->dev->mtu);
+
+	err = request_irq(dev->irq, mvpp2_isr, 0, dev->name, pp);
+	if (err) {
+		netdev_err(pp->dev, "cannot request irq %d\n", dev->irq);
+		return err;
+	}
+
+	/* In default link is down */
+	netif_carrier_off(pp->dev);
+
+	err = mvpp2_phy_connect(pp);
+	if (err < 0)
+		goto err_free_irq;
+
+	err = mvpp2_start_dev(pp);
+	if (err < 0)
+		goto err_phy_disconnect;
+
+	return 0;
+
+err_phy_disconnect:
+	mvpp2_phy_disconnect(pp);
+err_free_irq:
+	free_irq(pp->dev->irq, pp);
+	return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+
+	mvpp2_stop_dev(pp);
+	mvpp2_phy_disconnect(pp);
+	free_irq(dev->irq, pp);
+	return 0;
+}
+
+void mvpp2_set_rx_mode(struct net_device *dev)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	struct netdev_hw_addr *ha;
+
+	mvpp2_prs_mac_promisc_set(pp->pp2, pp->id, dev->flags & IFF_PROMISC);
+	mvpp2_prs_mac_all_multi_set(pp->pp2, pp->id, dev->flags & IFF_ALLMULTI);
+
+	/* Remove all pp->id's mcast enries */
+	mvpp2_prs_mcast_del_all(pp->pp2, pp->id);
+
+	if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev))
+		netdev_for_each_mc_addr(ha, dev)
+			mvpp2_prs_mac_da_accept(pp->pp2, pp->id,
+						ha->addr, true);
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	const struct sockaddr *addr = p;
+	int err;
+
+	if (!is_valid_ether_addr(addr->sa_data)) {
+		err = -EADDRNOTAVAIL;
+		goto error;
+	}
+
+	if (!netif_running(dev)) {
+		err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+		if (err)
+			goto error;
+		return 0;
+	}
+
+	mvpp2_stop_dev(pp);
+
+	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+	if (err)
+		goto error;
+
+	err = mvpp2_start_dev(pp);
+	if (err)
+		goto error;
+	mvpp2_egress_enable(pp, true);
+	mvpp2_ingress_enable(pp, true);
+
+	return 0;
+
+error:
+	netdev_err(dev, "fail to change MAC address\n");
+	return err;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	int err;
+
+	mtu = mvpp2_check_mtu_valid(dev, mtu);
+	if (mtu < 0) {
+		err = mtu;
+		goto error;
+	}
+
+	if (!netif_running(dev)) {
+		err = mvpp2_bm_update_mtu(dev, mtu);
+		if (err)
+			goto error;
+		pp->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+		mvpp2_gmac_max_rx_size_set(pp);
+		return 0;
+	}
+
+	mvpp2_stop_dev(pp);
+
+	err = mvpp2_bm_update_mtu(dev, mtu);
+	if (err)
+		goto error;
+
+	pp->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+
+	err = mvpp2_start_dev(pp);
+	if (err)
+		goto error;
+	mvpp2_egress_enable(pp, true);
+	mvpp2_ingress_enable(pp, true);
+
+	return 0;
+
+error:
+	netdev_err(dev, "fail to change MTU");
+	return err;
+}
+
+struct rtnl_link_stats64 *mvpp2_get_stats64(struct net_device *dev,
+					    struct rtnl_link_stats64 *stats)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	unsigned int start;
+	int cpu;
+
+	for_each_possible_cpu(cpu) {
+		struct mvpp2_pcpu_stats *cpu_stats;
+		u64 rx_packets;
+		u64 rx_bytes;
+		u64 tx_packets;
+		u64 tx_bytes;
+
+		cpu_stats = per_cpu_ptr(pp->stats, cpu);
+		do {
+			start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+			rx_packets = cpu_stats->rx_packets;
+			rx_bytes   = cpu_stats->rx_bytes;
+			tx_packets = cpu_stats->tx_packets;
+			tx_bytes   = cpu_stats->tx_bytes;
+		} while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+
+		stats->rx_packets += rx_packets;
+		stats->rx_bytes   += rx_bytes;
+		stats->tx_packets += tx_packets;
+		stats->tx_bytes   += tx_bytes;
+	}
+
+	stats->rx_errors	= dev->stats.rx_errors;
+	stats->rx_dropped	= dev->stats.rx_dropped;
+	stats->tx_dropped	= dev->stats.tx_dropped;
+
+	return stats;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+int mvpp2_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+
+	if (!pp->phy_dev)
+		return -ENODEV;
+	return phy_ethtool_gset(pp->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+int mvpp2_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+
+	if (!pp->phy_dev)
+		return -ENODEV;
+	return phy_ethtool_sset(pp->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *c)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	int queue;
+	int txp;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq = pp->rxqs[queue];
+		rxq->time_coal = c->rx_coalesce_usecs;
+		rxq->pkts_coal = c->rx_max_coalesced_frames;
+		mvpp2_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
+		mvpp2_rx_time_coal_set(pp, rxq, rxq->time_coal);
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (queue = 0; queue < txq_number; queue++) {
+			struct mvpp2_tx_queue *txq =
+					     pp->txqs[txp * txq_number + queue];
+			txq->done_pkts_coal = c->tx_max_coalesced_frames;
+			on_each_cpu(mvpp2_tx_done_pkts_coal_set, txq, 1);
+		}
+	}
+
+	return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
+				      struct ethtool_coalesce *c)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+	c->rx_coalesce_usecs        = pp->rxqs[0]->time_coal;
+	c->rx_max_coalesced_frames  = pp->rxqs[0]->pkts_coal;
+	c->tx_max_coalesced_frames =  pp->txqs[0]->done_pkts_coal;
+	return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+				      struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+		sizeof(drvinfo->version));
+	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+		sizeof(drvinfo->bus_info));
+}
+
+static void mvpp2_ethtool_get_ringparam(struct net_device *netdev,
+					struct ethtool_ringparam *ring)
+{
+	struct mvpp2_port *pp = netdev_priv(netdev);
+
+	ring->rx_max_pending = MVPP2_MAX_RXD;
+	ring->tx_max_pending = MVPP2_MAX_TXD;
+	ring->rx_pending = pp->rx_ring_size;
+	ring->tx_pending = pp->tx_ring_size;
+}
+
+static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
+				       struct ethtool_ringparam *ring)
+{
+	struct mvpp2_port *pp = netdev_priv(dev);
+
+	if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
+		return -EINVAL;
+	pp->rx_ring_size = ring->rx_pending < MVPP2_MAX_RXD ?
+		ring->rx_pending : MVPP2_MAX_RXD;
+	pp->tx_ring_size = ring->tx_pending < MVPP2_MAX_TXD ?
+		ring->tx_pending : MVPP2_MAX_TXD;
+
+	if (netif_running(dev)) {
+		mvpp2_stop(dev);
+		if (mvpp2_open(dev)) {
+			netdev_err(dev,
+				   "cannot change ring parameter\n");
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+	.ndo_open            = mvpp2_open,
+	.ndo_stop            = mvpp2_stop,
+	.ndo_start_xmit      = mvpp2_tx,
+	.ndo_set_rx_mode     = mvpp2_set_rx_mode,
+	.ndo_set_mac_address = mvpp2_set_mac_address,
+	.ndo_change_mtu      = mvpp2_change_mtu,
+	.ndo_get_stats64     = mvpp2_get_stats64,
+};
+
+const struct ethtool_ops mvpp2_eth_tool_ops = {
+	.get_link       = ethtool_op_get_link,
+	.get_settings   = mvpp2_ethtool_get_settings,
+	.set_settings   = mvpp2_ethtool_set_settings,
+	.set_coalesce   = mvpp2_ethtool_set_coalesce,
+	.get_coalesce   = mvpp2_ethtool_get_coalesce,
+	.get_drvinfo    = mvpp2_ethtool_get_drvinfo,
+	.get_ringparam  = mvpp2_ethtool_get_ringparam,
+	.set_ringparam	= mvpp2_ethtool_set_ringparam,
+};
+
+/* Driver initialization */
+
+static void mvpp2_port_power_up(struct mvpp2_port *pp)
+{
+	mvpp2_port_mii_set(pp);
+	mvpp2_port_periodic_xon_set(pp, false);
+	mvpp2_port_fc_adv_enable(pp);
+	mvpp2_port_reset_set(pp, false);
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *pp)
+{
+	struct device *dev = pp->dev->dev.parent;
+	struct mvpp2 *pp2 = pp->pp2;
+	struct mvpp2_txq_pcpu *txq_pcpu;
+	int queue, txp, cpu;
+
+	if (pp->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+		return -EINVAL;
+
+	/* Disable port */
+	mvpp2_egress_enable(pp, false);
+	mvpp2_port_disable(pp);
+
+	pp->txqs = devm_kzalloc(dev, pp->txp_num * txq_number *
+				sizeof(struct mvpp2_tx_queue *), GFP_KERNEL);
+	if (!pp->txqs)
+		return -ENOMEM;
+
+	/* Associate physical Tx queues to this port and initialize.
+	 * The mapping is predefined.
+	 */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (queue = 0; queue < txq_number; queue++) {
+			int txq_idx = txp * txq_number + queue;
+			int queue_phy_id = mvpp2_txq_phys(pp->id, queue);
+			struct mvpp2_tx_queue *txq;
+
+			txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+			if (!txq)
+				return -ENOMEM;
+
+			txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+			if (!txq->pcpu)
+				return -ENOMEM;
+
+			txq->id = queue_phy_id;
+			txq->log_id = queue;
+			txq->txp = txp;
+			txq->size = pp->tx_ring_size;
+			txq->swf_size = txq->size - (num_present_cpus() *
+							  MVPP2_CPU_DESC_CHUNK);
+			txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+			for_each_present_cpu(cpu) {
+				txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+				txq_pcpu->cpu = cpu;
+				txq_pcpu->size = txq->size;
+			}
+
+			txq->pp2 = pp2;
+			pp->txqs[txq_idx] = txq;
+		}
+	}
+
+	pp->rxqs = devm_kzalloc(dev, rxq_number *
+				sizeof(struct mvpp2_rx_queue *), GFP_KERNEL);
+	if (!pp->rxqs)
+		return -ENOMEM;
+
+	/* Allocate and initialize Rx queue for this port */
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq;
+		/* Map physical RXQ to port's logical RXQ */
+		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+		if (!rxq)
+			return -ENOMEM;
+		/* Map this Rx queue to a physical queue */
+		rxq->id = pp->first_rxq + queue;
+		rxq->port = pp->id;
+		rxq->logic_rxq = queue;
+
+		pp->rxqs[queue] = rxq;
+	}
+
+	/* Configure Rx queue group interrupt for this port */
+	mvpp2_write(pp2, MVPP2_ISR_RXQ_GROUP_REG(pp->id), rxq_number);
+
+	/* Create Rx descriptor rings */
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvpp2_rx_queue *rxq = pp->rxqs[queue];
+		rxq->size = pp->rx_ring_size;
+		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+		rxq->time_coal = MVPP2_RX_COAL_USEC;
+	}
+
+	mvpp2_ingress_enable(pp, false);
+
+	/* Port default configuration */
+	mvpp2_defaults_set(pp);
+
+	/* Port's classifier configuration */
+	mvpp2_cls_hw_oversize_rxq_set(pp);
+	mvpp2_cls_hw_port_def_config(pp, 0, pp->id);
+
+	return 0;
+}
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+			    struct device_node *port_node,
+			    struct mvpp2 *pp2,
+			    int *next_first_rxq)
+{
+	struct device_node *phy_node;
+	struct mvpp2_port *pp;
+	struct net_device *dev;
+	struct resource *res;
+	const char *dt_mac_addr;
+	const char *mac_from;
+	char hw_mac_addr[ETH_ALEN];
+	u32 id;
+	int features;
+	int phy_mode;
+	int pp2_common_regs_num = 2;
+	int err, i;
+
+	dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
+				 rxq_number);
+	if (!dev)
+		return -ENOMEM;
+
+	dev->irq = irq_of_parse_and_map(port_node, 0);
+	if (dev->irq == 0) {
+		err = -EINVAL;
+		goto err_free_netdev;
+	}
+
+	phy_node = of_parse_phandle(port_node, "phy", 0);
+	if (!phy_node) {
+		dev_err(&pdev->dev, "missing phy\n");
+		err = -ENODEV;
+		goto err_free_irq;
+	}
+
+	phy_mode = of_get_phy_mode(port_node);
+	if (phy_mode < 0) {
+		dev_err(&pdev->dev, "incorrect phy mode\n");
+		err = phy_mode;
+		goto err_free_irq;
+	}
+
+	if (of_property_read_u32(port_node, "port-id", &id)) {
+		err = -EINVAL;
+		dev_err(&pdev->dev, "missing port-id value\n");
+		goto err_free_irq;
+	}
+
+	dev->tx_queue_len = MVPP2_MAX_TXD;
+	dev->watchdog_timeo = 5 * HZ;
+	dev->netdev_ops = &mvpp2_netdev_ops;
+	dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+	pp = netdev_priv(dev);
+
+	if (of_property_read_bool(port_node, "marvell,loopback"))
+		pp->flags |= MVPP2_F_LOOPBACK;
+
+	pp->pp2 = pp2;
+	pp->id = id;
+	pp->txp_num = 1;
+	pp->first_rxq = *next_first_rxq;
+	pp->phy_node = phy_node;
+	pp->phy_interface = phy_mode;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM,
+				    pp2_common_regs_num + id);
+	pp->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pp->base)) {
+		err = PTR_ERR(pp->base);
+		dev_err(&pdev->dev, "cannot obtain port base address\n");
+		goto err_free_irq;
+	}
+
+	/* Alloc per-cpu stats */
+	pp->stats = alloc_percpu(struct mvpp2_pcpu_stats);
+	if (!pp->stats) {
+		err = -ENOMEM;
+		goto err_free_irq;
+	}
+
+	dt_mac_addr = of_get_mac_address(port_node);
+	if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+		mac_from = "device tree";
+		memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
+	} else {
+		mvpp2_get_mac_address(pp, hw_mac_addr);
+		if (is_valid_ether_addr(hw_mac_addr)) {
+			mac_from = "hardware";
+			memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+		} else {
+			mac_from = "random";
+			eth_hw_addr_random(dev);
+		}
+	}
+
+	pp->tx_ring_size = MVPP2_MAX_TXD;
+	pp->rx_ring_size = MVPP2_MAX_RXD;
+	pp->dev = dev;
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = mvpp2_port_init(pp);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to init port %d\n", id);
+		goto err_free_stats;
+	}
+	mvpp2_port_power_up(pp);
+
+	netif_napi_add(dev, &pp->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
+	features = NETIF_F_SG | NETIF_F_IP_CSUM;
+	dev->features = features | NETIF_F_RXCSUM;
+	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
+	dev->vlan_features |= features;
+
+	err = register_netdev(dev);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to register netdev\n");
+		goto err_free_txq_pcpu;
+	}
+	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+	/* Increment the first Rx queue number to be used by the next port */
+	*next_first_rxq += rxq_number;
+	pp2->port_list[id] = pp;
+	return 0;
+
+err_free_txq_pcpu:
+	for (i = 0; i < txq_number; i++)
+		free_percpu(pp->txqs[i]->pcpu);
+err_free_stats:
+	free_percpu(pp->stats);
+err_free_irq:
+	irq_dispose_mapping(dev->irq);
+err_free_netdev:
+	free_netdev(dev);
+	return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *pp)
+{
+	int i;
+
+	unregister_netdev(pp->dev);
+	free_percpu(pp->stats);
+	for (i = 0; i < txq_number; i++)
+		free_percpu(pp->txqs[i]->pcpu);
+	irq_dispose_mapping(pp->dev->irq);
+	free_netdev(pp->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+				    struct mvpp2 *pp2)
+{
+	u32 win_enable;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		mvpp2_write(pp2, MVPP2_WIN_BASE(i), 0);
+		mvpp2_write(pp2, MVPP2_WIN_SIZE(i), 0);
+
+		if (i < 4)
+			mvpp2_write(pp2, MVPP2_WIN_REMAP(i), 0);
+	}
+
+	win_enable = 0;
+
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+		mvpp2_write(pp2, MVPP2_WIN_BASE(i),
+			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+			    dram->mbus_dram_target_id);
+
+		mvpp2_write(pp2, MVPP2_WIN_SIZE(i),
+			    (cs->size - 1) & 0xffff0000);
+
+		win_enable |= (1 << i);
+	}
+
+	mvpp2_write(pp2, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *pp2)
+{
+	int port;
+
+	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+		mvpp2_write(pp2, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+			    MVPP2_RX_FIFO_PORT_DATA_SIZE);
+		mvpp2_write(pp2, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+			    MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+	}
+
+	mvpp2_write(pp2, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
+	mvpp2_write(pp2, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *pp2)
+{
+	const struct mbus_dram_target_info *dram_target_info;
+	int err, i;
+	u32 val;
+
+	/* Checks for hardware constraints */
+	if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+			      (txq_number > MVPP2_MAX_TXQ)) {
+		dev_err(&pdev->dev, "invalid queue size parameter\n");
+		return -EINVAL;
+	}
+
+	/* MBUS windows configuration */
+	dram_target_info = mv_mbus_dram_info();
+	if (dram_target_info)
+		mvpp2_conf_mbus_windows(dram_target_info, pp2);
+
+	/* Disable HW PHY polling */
+	val = readl(pp2->lms_base + MVPP2_PHY_AN_CFG0_REG);
+	val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+	writel(val, pp2->lms_base + MVPP2_PHY_AN_CFG0_REG);
+
+	/* Allocate and initialize aggregated TXQs */
+	pp2->aggr_txqs = devm_kzalloc(&pdev->dev, num_present_cpus() *
+				      sizeof(struct mvpp2_tx_queue),
+				      GFP_KERNEL);
+	if (!pp2->aggr_txqs)
+		return -ENOMEM;
+
+	for_each_present_cpu(i) {
+		pp2->aggr_txqs[i].id = i;
+		pp2->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+		err = mvpp2_aggr_txq_init(pdev, &pp2->aggr_txqs[i],
+					  MVPP2_AGGR_TXQ_SIZE, i, pp2);
+		if (err < 0)
+			return err;
+	}
+
+	/* Rx Fifo Init */
+	mvpp2_rx_fifo_init(pp2);
+
+	/* Reset Rx queue group interrupt configuration */
+	for (i = 0; i < MVPP2_MAX_PORTS; i++)
+		mvpp2_write(pp2, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+
+	writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+	       pp2->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+	/* Allow cache snoop when transmiting packets */
+	mvpp2_write(pp2, MVPP2_TX_SNOOP_REG, 0x1);
+
+	/* Buffer Manager initialization */
+	err = mvpp2_bm_init(pdev, pp2);
+	if (err < 0)
+		return err;
+
+	/* Parser default initialization */
+	err = mvpp2_prs_default_init(pdev, pp2);
+	if (err < 0)
+		return err;
+
+	/* Classifier default initialization */
+	mvpp2_cls_default_init(pp2);
+
+	return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *port_node;
+	struct mvpp2 *pp2;
+	struct resource *res;
+	int port_count, first_rxq;
+	int err;
+
+	pp2 = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+	if (!pp2)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pp2->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pp2->base))
+		return PTR_ERR(pp2->base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	pp2->lms_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pp2->lms_base))
+		return PTR_ERR(pp2->lms_base);
+
+	pp2->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+	if (IS_ERR(pp2->pp_clk))
+		return PTR_ERR(pp2->pp_clk);
+	err = clk_prepare_enable(pp2->pp_clk);
+	if (err < 0)
+		return err;
+
+	pp2->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+	if (IS_ERR(pp2->gop_clk)) {
+		err = PTR_ERR(pp2->gop_clk);
+		goto err_pp_clk;
+	}
+	err = clk_prepare_enable(pp2->gop_clk);
+	if (err < 0)
+		goto err_pp_clk;
+
+	/* Get system's tclk rate */
+	pp2->tclk = clk_get_rate(pp2->pp_clk);
+
+	/* Initialize network controller common part HW */
+	err = mvpp2_init(pdev, pp2);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to initialize controller\n");
+		goto err_gop_clk;
+	}
+
+	port_count = of_get_available_child_count(dn);
+	if (port_count == 0) {
+		dev_err(&pdev->dev, "no ports enabled\n");
+		goto err_gop_clk;
+	}
+
+	pp2->port_list = devm_kzalloc(&pdev->dev, port_count *
+				      sizeof(struct mvpp2_port *),
+				      GFP_KERNEL);
+	if (!pp2->port_list) {
+		err = -ENOMEM;
+		goto err_gop_clk;
+	}
+
+	/* Initialize ports */
+	first_rxq = 0;
+	for_each_available_child_of_node(dn, port_node) {
+		err = mvpp2_port_probe(pdev, port_node, pp2, &first_rxq);
+		if (err < 0)
+			goto err_gop_clk;
+	}
+
+	platform_set_drvdata(pdev, pp2);
+	return 0;
+
+err_gop_clk:
+	clk_disable_unprepare(pp2->gop_clk);
+err_pp_clk:
+	clk_disable_unprepare(pp2->pp_clk);
+	return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+	struct mvpp2 *pp2 = platform_get_drvdata(pdev);
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *port_node;
+	int i = 0;
+
+	for_each_available_child_of_node(dn, port_node) {
+		if (pp2->port_list[i])
+			mvpp2_port_remove(pp2->port_list[i]);
+		i++;
+	}
+
+	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+		struct mvpp2_bm_pool *bm_pool = &pp2->bm_pools[i];
+		mvpp2_bm_pool_destroy(pdev, pp2, bm_pool);
+	}
+
+	for_each_present_cpu(i) {
+		struct mvpp2_tx_queue *aggr_txq = &pp2->aggr_txqs[i];
+		dma_free_coherent(&pdev->dev,
+				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+				  aggr_txq->descs,
+				  aggr_txq->descs_phys);
+	}
+
+	clk_disable_unprepare(pp2->pp_clk);
+	clk_disable_unprepare(pp2->gop_clk);
+
+	return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+	{ .compatible = "marvell,armada-375-pp2" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+static struct platform_driver mvpp2_driver = {
+	.probe = mvpp2_probe,
+	.remove = mvpp2_remove,
+	.driver = {
+		.name = MVPP2_DRIVER_NAME,
+		.of_match_table = mvpp2_match,
+	},
+};
+
+module_platform_driver(mvpp2_driver);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL");
+
+module_param(rxq_number, int, S_IRUGO);
+module_param(txq_number, int, S_IRUGO);
diff --git a/drivers/net/ethernet/mvebu_net/.gitignore b/drivers/net/ethernet/mvebu_net/.gitignore
new file mode 100644
index 000000000000..96a1e8b9c9f0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/.gitignore
@@ -0,0 +1,90 @@
+
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+vmlinux
+vmlinuz
+System.map
+Module.markers
+Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/Kconfig b/drivers/net/ethernet/mvebu_net/Kconfig
new file mode 100644
index 000000000000..e97630643338
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/Kconfig
@@ -0,0 +1,97 @@
+#
+# Marvell SoC network devices
+#
+
+config NET_VENDOR_MVEBU
+	bool "Marvell SoC network devices"
+	default y
+	depends on ARCH_MVEBU
+	---help---
+	  If you have a network (Ethernet) card belonging to this class, say Y
+	  and read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+if NET_VENDOR_MVEBU
+
+menu "Marvell Network SKB Features"
+
+config  NET_SKB_HEADROOM
+	int "SKB headroom size"
+	default 64
+	---help---
+	Customize SKB headroom size. Must be power of 2.
+
+config NET_SKB_RECYCLE
+	bool "Skb recycle"
+	default y
+	---help---
+	Work-in-progress and experimental.
+
+	This option enables skb's to be returned via a callback at kfree to
+	the allocator to make a fastpath for very skb consuming network
+	applications.
+
+endmenu
+
+menu "Marvell Network PON Support"
+
+config MV_INCLUDE_PON
+	bool "PON include"
+	default n
+	---help---
+	Whether PON is included.
+
+endmenu
+
+menu "Marvell Network Debug Control"
+
+config MV_ETH_DEBUG_CODE
+	bool "Add run-time debug code"
+	default n
+	---help---
+	Enable run-time enable/disable enter debug code blocks
+
+endmenu
+
+config MV_ETH_NETA
+	tristate "Marvell Armada 380 network interface support"
+	depends on MACH_ARMADA_380
+	---help---
+	  This driver supports the network interface units in the
+	  Marvell ARMADA 38x SoC family.
+
+if MV_ETH_NETA
+source drivers/net/ethernet/mvebu_net/neta/Kconfig
+endif
+
+config MV_ETH_PP2
+	tristate "Marvell Armada 375 network interface support"
+	depends on MACH_ARMADA_375
+	---help---
+	  This driver supports the network interface units in the
+	  Marvell ARMADA 375 SoC family.
+
+if MV_ETH_PP2
+source drivers/net/ethernet/mvebu_net/pp2/Kconfig
+endif
+
+config MV_ETH_INCLUDE_PHY
+	bool "Choose to compile Ethernet PHY support"
+	depends on MV_ETH_NETA || MV_ETH_PP2
+	default y
+	---help---
+	Use Marvell propriotary PHY driver for access PHY registers.
+	All kind of Marvell network drivers need it.
+	Driver provides APIs for reset PHY, restart autonegotiation and
+	change autonegotiation parameters of the PHY.
+	The driver located uner directory mvebu_net/phy
+
+config MV_ETH_INCLUDE_NETMUX
+	bool "Choose to compile Marvell NetMux support"
+	depends on MV_ETH_NETA || MV_ETH_PP2
+	default y
+	---help---
+	Use Marvell propriotary NETMUX driver for Virtual Networking interfaces support.
+	The driver located uner directory mvebu_net/netmux
+
+endif # NET_VENDOR_MVEBU
diff --git a/drivers/net/ethernet/mvebu_net/Makefile b/drivers/net/ethernet/mvebu_net/Makefile
new file mode 100644
index 000000000000..d3d83ebb4d19
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/Makefile
@@ -0,0 +1,40 @@
+# Makefile for mvebu_net Marvell network drivers
+
+
+CPU_ARCH    = ARM
+ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
+ENDIAN      = BE
+else
+ENDIAN      = LE
+endif
+
+MVEBU_NET_FLAGS := -DMV_LINUX -DMV_CPU_$(ENDIAN) -DMV_$(CPU_ARCH)
+export MVEBU_NET_FLAGS
+
+ccflags-y       += $(MVEBU_NET_FLAGS)
+
+PLAT_DIR 	:= drivers/net/ethernet/mvebu_net
+export PLAT_DIR
+
+INCLUDE_DIRS	+= -I$(PLAT_DIR)
+INCLUDE_DIRS	+= -I$(PLAT_DIR)/common
+INCLUDE_DIRS	+= -I$(srctree)/arch/arm/mach-mvebu/linux_oss
+INCLUDE_DIRS	+= -I$(PLAT_DIR)/switch
+INCLUDE_DIRS	+= -I$(PLAT_DIR)/netmux
+INCLUDE_DIRS	+= -I$(PLAT_DIR)/phy
+INCLUDE_DIRS	+= -I$(srctree)/arch/arm/mach-mvebu
+export INCLUDE_DIRS
+
+ccflags-y	+= $(INCLUDE_DIRS)
+
+obj-y	+= common/mvCommon.o common/mvStack.o common/mvDebug.o
+
+obj-$(CONFIG_MV_ETH_PP2_1)		+= common/mvList.o
+
+obj-$(CONFIG_MV_ETH_INCLUDE_PHY)	+= phy/mvEthPhy.o phy/phy_sysfs.o
+
+obj-$(CONFIG_MV_ETH_INCLUDE_NETMUX)	+= netmux/
+
+obj-$(CONFIG_MV_ETH_NETA) += neta/
+obj-$(CONFIG_MV_ETH_PP2) += pp2/
+
diff --git a/drivers/net/ethernet/mvebu_net/common/.gitignore b/drivers/net/ethernet/mvebu_net/common/.gitignore
new file mode 100644
index 000000000000..b33b8f2fab2e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/.gitignore
@@ -0,0 +1,5 @@
+*.o
+*.o.*
+*.rej
+*.orig
+*.su
diff --git a/drivers/net/ethernet/mvebu_net/common/mv802_3.h b/drivers/net/ethernet/mvebu_net/common/mv802_3.h
new file mode 100644
index 000000000000..04419fb434c7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mv802_3.h
@@ -0,0 +1,309 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmv802_3h
+#define __INCmv802_3h
+
+/* includes */
+#include "mvTypes.h"
+
+/* Defines */
+#define MV_MAX_ETH_DATA     1500
+#define MV_ETH_MH_SIZE      2
+#define MV_ETH_DSA_SIZE     4
+#define MV_ETH_EDSA_SIZE    8
+#define MV_ETH_VLAN_SIZE    4
+#define MV_ETH_CRC_SIZE     4
+
+/* Marvell tag types */
+typedef enum {
+	MV_TAG_TYPE_NONE = 0,
+	MV_TAG_TYPE_MH   = 1,
+	MV_TAG_TYPE_DSA  = 2,
+	MV_TAG_TYPE_EDSA = 3,
+	MV_TAG_TYPE_VLAN = 4,
+	MV_TAG_TYPE_LAST = 5
+} MV_TAG_TYPE;
+
+typedef union mv_tag {
+	MV_U32 edsa[2];
+	MV_U32 dsa;
+	MV_U32 vlan;
+	MV_U16 mh;
+} MV_TAG;
+
+typedef struct mv_mux_tag {
+	MV_TAG_TYPE tag_type;
+	MV_TAG tx_tag;
+	MV_TAG rx_tag_ptrn;
+	MV_TAG rx_tag_mask;
+} MV_MUX_TAG;
+
+typedef enum {
+	MV_PRESET_TRANSPARENT    = 0,
+	MV_PRESET_SINGLE_VLAN    = 1,
+	MV_PRESET_PER_PORT_VLAN  = 2,
+} MV_SWITCH_PRESET_TYPE;
+
+/* 802.3 types */
+#define MV_IP_TYPE                  0x0800
+#define MV_IP_ARP_TYPE              0x0806
+#define MV_IP_LBDT_TYPE             0xfffa
+#define MV_IP6_TYPE                 0x86dd
+#define MV_APPLE_TALK_ARP_TYPE      0x80F3
+#define MV_NOVELL_IPX_TYPE          0x8137
+#define MV_EAPOL_TYPE               0x888e
+#define MV_VLAN_TYPE                0x8100
+#define MV_VLAN_1_TYPE              0x88A8
+#define MV_PPPOE_TYPE               0x8864
+
+/* PPPoE protocol type */
+#define MV_IP_PPP  0x0021
+#define MV_IP6_PPP 0x0057
+/* Encapsulation header for RFC1042 and Ethernet_tunnel */
+
+#define MV_RFC1042_SNAP_HEADER     { 0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00 }
+
+#define MV_ETH_SNAP_LSB             0xF8
+
+#define MV_MAC_ADDR_SIZE    (6)
+#define MV_MAC_STR_SIZE     (20)
+#define MV_LLC_HLEN         (6)
+#define MV_VLAN_HLEN        (4)
+#define MV_ETH_TYPE_LEN     (2)
+#define MV_ETH_ALEN         (MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE + MV_ETH_TYPE_LEN)
+#define MV_PPP_HDR_SIZE     (2)
+#define MV_PPPOE_HDR_SIZE   (8) /* PPP header is 2, PPPoE header is 6 */
+
+/* This macro checks for a multicast mac address    */
+#define MV_IS_MULTICAST_MAC(mac)  (((mac)[0] & 0x1) == 1)
+
+/* IPv4 */
+#define MV_INET 2
+/* IPv6 */
+#define MV_INET6 10
+
+#define MV_MAX_IPV4_ADDR_SIZE   (4)
+#define MV_MAX_L3_ADDR_SIZE     (16)    /* IPv4: 4, IPv6: 16 */
+
+/* This macro checks for an broadcast mac address     */
+#define MV_IS_BROADCAST_MAC(mac)	    \
+	(((mac)[0] == 0xFF) &&	     \
+	 ((mac)[1] == 0xFF) &&	     \
+	 ((mac)[2] == 0xFF) &&	     \
+	 ((mac)[3] == 0xFF) &&	     \
+	 ((mac)[4] == 0xFF) &&	     \
+	 ((mac)[5] == 0xFF))
+
+/* Typedefs */
+typedef struct {
+	MV_U8 pDA[MV_MAC_ADDR_SIZE];
+	MV_U8 pSA[MV_MAC_ADDR_SIZE];
+	MV_U16 typeOrLen;
+
+} MV_802_3_HEADER;
+
+/* 8 bytes - PPPoE header + PPP header */
+typedef struct {
+	MV_U8 version;
+	MV_U8 code;
+	MV_U16 session;
+	MV_U16 len;
+	MV_U16 proto;
+} PPPoE_HEADER;
+
+enum {
+	MV_IP_PROTO_NULL = 0,           /* Dummy protocol for TCP               */
+	MV_IP_PROTO_ICMP = 1,           /* Internet Control Message Protocol    */
+	MV_IP_PROTO_IGMP = 2,           /* Internet Group Management Protocol   */
+	MV_IP_PROTO_IPIP = 4,           /* IPIP tunnels (older KA9Q tunnels use 94) */
+	MV_IP_PROTO_TCP = 6,            /* Transmission Control Protocol        */
+	MV_IP_PROTO_EGP = 8,            /* Exterior Gateway Protocol            */
+	MV_IP_PROTO_PUP = 12,           /* PUP protocol                         */
+	MV_IP_PROTO_UDP = 17,           /* User Datagram Protocol               */
+	MV_IP_PROTO_IDP = 22,           /* XNS IDP protocol                     */
+	MV_IP_PROTO_DCCP = 33,          /* Datagram Congestion Control Protocol */
+	MV_IP_PROTO_IPV6 = 41,          /* IPv6-in-IPv4 tunnelling              */
+	MV_IP_PROTO_RH = 43,            /* Routing Header protocol              */
+	MV_IP_PROTO_FH = 44,            /* Fragment Header protocol             */
+	MV_IP_PROTO_RSVP = 46,          /* RSVP protocol                        */
+	MV_IP_PROTO_GRE = 47,           /* Cisco GRE tunnels (rfc 1701,1702)    */
+	MV_IP_PROTO_ESP = 50,           /* Encapsulation Security Payload protocol */
+	MV_IP_PROTO_AH = 51,            /* Authentication Header protocol       */
+	MV_IP_PROTO_ICMPV6 = 58, /* Internet Group Management Protocol V6 */
+	MV_IP_PROTO_DH = 60,            /* Destination Options Header protocol  */
+	MV_IP_PROTO_BEETPH = 94,        /* IP option pseudo header for BEET     */
+	MV_IP_PROTO_PIM = 103,
+	MV_IP_PROTO_COMP = 108,         /* Compression Header protocol          */
+	MV_IP_PROTO_ZERO_HOP = 114,     /* Any 0 hop protocol (IANA)            */
+	MV_IP_PROTO_SCTP = 132,         /* Stream Control Transport Protocol    */
+	MV_IP_PROTO_MH = 135,           /* Mobility Header protocol             */
+	MV_IP_PROTO_UDPLITE = 136,      /* UDP-Lite (RFC 3828)                  */
+
+	MV_IP_PROTO_RAW = 255,          /* Raw IP packets                       */
+	MV_IP_PROTO_MAX
+};
+
+#define MV_IP4_FRAG_OFFSET_MASK 0x1FFF
+#define MV_IP4_DF_FLAG_MASK     0x4000
+#define MV_IP4_MF_FLAG_MASK     0x2000
+
+typedef struct {
+	MV_U8 version;
+	MV_U8 tos;
+	MV_U16 totalLength;
+	MV_U16 identifier;
+	MV_U16 fragmentCtrl;
+	MV_U8 ttl;
+	MV_U8 protocol;
+	MV_U16 checksum;
+	MV_U32 srcIP;
+	MV_U32 dstIP;
+
+} MV_IP_HEADER;
+
+typedef struct {
+	MV_U32 verClassFlow;
+	MV_U16 payloadLength;
+	MV_U8 protocol;
+	MV_U8 hoplimit;
+	MV_U8 srcAddr[16];
+	MV_U8 dstAddr[16];
+
+} MV_IP6_HEADER;
+
+typedef struct {
+	int family;
+	int ipOffset;
+	int ipHdrLen;
+	MV_U16 ipLen;
+	MV_U8 ipProto;
+	MV_U8 reserved;
+	union {
+		char          *l3;
+		MV_IP_HEADER  *ip4;
+		MV_IP6_HEADER *ip6;
+	} ip_hdr;
+} MV_IP_HEADER_INFO;
+
+typedef struct {
+	MV_U8 protocol;
+	MV_U8 length;
+	MV_U16 reserverd;
+	MV_U32 spi;
+	MV_U32 seqNum;
+} MV_AH_HEADER;
+
+typedef struct {
+	MV_U32 spi;
+	MV_U32 seqNum;
+} MV_ESP_HEADER;
+
+#define MV_ICMP_ECHOREPLY          0    /* Echo Reply                   */
+#define MV_ICMP_DEST_UNREACH       3    /* Destination Unreachable      */
+#define MV_ICMP_SOURCE_QUENCH      4    /* Source Quench                */
+#define MV_ICMP_REDIRECT           5    /* Redirect (change route)      */
+#define MV_ICMP_ECHO               8    /* Echo Request                 */
+#define MV_ICMP_TIME_EXCEEDED      11   /* Time Exceeded                */
+#define MV_ICMP_PARAMETERPROB      12   /* Parameter Problem            */
+#define MV_ICMP_TIMESTAMP          13   /* Timestamp Request            */
+#define MV_ICMP_TIMESTAMPREPLY     14   /* Timestamp Reply              */
+#define MV_ICMP_INFO_REQUEST       15   /* Information Request          */
+#define MV_ICMP_INFO_REPLY         16   /* Information Reply            */
+#define MV_ICMP_ADDRESS            17   /* Address Mask Request         */
+#define MV_ICMP_ADDRESSREPLY       18   /* Address Mask Reply           */
+
+typedef struct {
+	MV_U8 type;
+	MV_U8 code;
+	MV_U16 checksum;
+	MV_U16 id;
+	MV_U16 sequence;
+
+} MV_ICMP_ECHO_HEADER;
+
+#define MV_TCP_FLAG_FIN         (1 << 0)
+#define MV_TCP_FLAG_RST         (1 << 2)
+
+typedef struct {
+	MV_U16 source;
+	MV_U16 dest;
+	MV_U32 seq;
+	MV_U32 ack_seq;
+	MV_U16 flags;
+	MV_U16 window;
+	MV_U16 chksum;
+	MV_U16 urg_offset;
+
+} MV_TCP_HEADER;
+
+typedef struct {
+	MV_U16 source;
+	MV_U16 dest;
+	MV_U16 len;
+	MV_U16 check;
+
+} MV_UDP_HEADER;
+
+#endif /* __INCmv802_3h */
\ No newline at end of file
diff --git a/drivers/net/ethernet/mvebu_net/common/mvCommon.c b/drivers/net/ethernet/mvebu_net/common/mvCommon.c
new file mode 100644
index 000000000000..ac852f8c4ee7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvCommon.c
@@ -0,0 +1,417 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+
+int mvCharToHex(char ch)
+{
+	if ((ch >= '0') && (ch <= '9'))
+		return (ch - '0');
+
+	if ((ch >= 'a') && (ch <= 'f'))
+		return (ch - 'a') + 10;
+
+	if ((ch >= 'A') && (ch <= 'F'))
+		return (ch - 'A') + 10;
+
+	return -1;
+}
+
+int mvCharToDigit(char ch)
+{
+	if ((ch >= '0') && (ch <= '9'))
+		return (ch - '0');
+
+	return -1;
+}
+
+/*******************************************************************************
+* mvMacStrToHex - Convert MAC format string to hex.
+*
+* DESCRIPTION:
+*		This function convert MAC format string to hex.
+*
+* INPUT:
+*       macStr - MAC address string. Fornat of address string is
+*                uu:vv:ww:xx:yy:zz, where ":" can be any delimiter.
+*
+* OUTPUT:
+*       macHex - MAC in hex format.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvMacStrToHex(const char *macStr, MV_U8 *macHex)
+{
+	int i;
+	char tmp[3];
+
+	for (i = 0; i < MV_MAC_ADDR_SIZE; i++) {
+		tmp[0] = macStr[(i * 3) + 0];
+		tmp[1] = macStr[(i * 3) + 1];
+		tmp[2] = '\0';
+		macHex[i] = (MV_U8) (strtol(tmp, NULL, 16));
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvMacHexToStr - Convert MAC in hex format to string format.
+*
+* DESCRIPTION:
+*		This function convert MAC in hex format to string format.
+*
+* INPUT:
+*       macHex - MAC in hex format.
+*
+* OUTPUT:
+*       macStr - MAC address string. String format is uu:vv:ww:xx:yy:zz.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_STATUS mvMacHexToStr(MV_U8 *macHex, char *macStr)
+{
+	int i;
+
+	for (i = 0; i < MV_MAC_ADDR_SIZE; i++)
+		mvOsSPrintf(&macStr[i * 3], "%02x:", macHex[i]);
+
+	macStr[(i * 3) - 1] = '\0';
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvSizePrint - Print the given size with size unit description.
+*
+* DESCRIPTION:
+*		This function print the given size with size unit description.
+*       FOr example when size paramter is 0x180000, the function prints:
+*       "size 1MB+500KB"
+*
+* INPUT:
+*       size - Size in bytes.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvSizePrint(MV_U64 size)
+{
+	mvOsOutput("size ");
+
+	if (size >= _1G) {
+		mvOsOutput("%3lldGB ", (MV_U64)(size >> 30));
+		size &= (MV_U64)(_1G - 1);
+		if (size)
+			mvOsOutput("+");
+	}
+	if (size >= _1M) {
+		mvOsOutput("%3lldMB ", size / _1M);
+		size %= _1M;
+		if (size)
+			mvOsOutput("+");
+	}
+	if (size >= _1K) {
+		mvOsOutput("%3lldKB ", size / _1K);
+		size %= _1K;
+		if (size)
+			mvOsOutput("+");
+	}
+	if (size > 0)
+		mvOsOutput("%3lldB ", size);
+
+}
+
+/*******************************************************************************
+* mvHexToBin - Convert hex to binary
+*
+* DESCRIPTION:
+*		This function Convert hex to binary.
+*
+* INPUT:
+*       pHexStr - hex buffer pointer.
+*       size    - Size to convert.
+*
+* OUTPUT:
+*       pBin - Binary buffer pointer.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+MV_VOID mvHexToBin(const char *pHexStr, MV_U8 *pBin, int size)
+{
+	int j, i;
+	char tmp[3];
+	MV_U8 byte;
+
+	for (j = 0, i = 0; j < size; j++, i += 2) {
+		tmp[0] = pHexStr[i];
+		tmp[1] = pHexStr[i + 1];
+		tmp[2] = '\0';
+		byte = (MV_U8) (strtol(tmp, NULL, 16) & 0xFF);
+		pBin[j] = byte;
+	}
+}
+
+void mvAsciiToHex(const char *asciiStr, char *hexStr)
+{
+	int i = 0;
+
+	while (asciiStr[i] != 0) {
+		mvOsSPrintf(&hexStr[i * 2], "%02x", asciiStr[i]);
+		i++;
+	}
+	hexStr[i * 2] = 0;
+}
+
+void mvBinToHex(const MV_U8 *bin, char *hexStr, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		mvOsSPrintf(&hexStr[i * 2], "%02x", bin[i]);
+
+	hexStr[i * 2] = '\0';
+}
+
+void mvBinToAscii(const MV_U8 *bin, char *asciiStr, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		mvOsSPrintf(&asciiStr[i * 2], "%c", bin[i]);
+
+	asciiStr[i * 2] = '\0';
+}
+
+/*******************************************************************************
+* mvLog2 -
+*
+* DESCRIPTION:
+*	Calculate the Log2 of a given number.
+*
+* INPUT:
+*       num - A number to calculate the Log2 for.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       Log 2 of the input number, or 0xFFFFFFFF if input is 0.
+*
+*******************************************************************************/
+MV_U32 mvLog2(MV_U32 num)
+{
+	MV_U32 result = 0;
+	if (num == 0)
+		return 0xFFFFFFFF;
+	while (num != 1) {
+		num = num >> 1;
+		result++;
+	}
+	return result;
+}
+
+/*******************************************************************************
+* mvWinOverlapTest
+*
+* DESCRIPTION:
+*       This function checks the given two address windows for overlaping.
+*
+* INPUT:
+*       pAddrWin1 - Address window 1.
+*       pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if address window overlaps, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+	/* Need to cancel overlap testing, because we use the
+	** MBUS Bridge Windows to access IO windows, and thus there will be
+	** always an overlap between the IO & DRAM windows.
+	*/
+	return MV_FALSE;
+#if 0
+	/* this code can only be enabled if physical DRAM size is smaller
+	** or equal to 3GB for debug purposes.\
+	*/
+	MV_U32 winBase1, winBase2;
+	MV_U32 winTop1, winTop2;
+
+	/* check if we have overflow than 4G */
+	if (((0xffffffff - pAddrWin1->baseLow) < pAddrWin1->size - 1) ||
+	    ((0xffffffff - pAddrWin2->baseLow) < pAddrWin2->size - 1)) {
+		return MV_TRUE;
+	}
+
+	winBase1 = pAddrWin1->baseLow;
+	winBase2 = pAddrWin2->baseLow;
+	winTop1 = winBase1 + pAddrWin1->size - 1;
+	winTop2 = winBase2 + pAddrWin2->size - 1;
+
+	if (((winBase1 <= winTop2) && (winTop2 <= winTop1)) || ((winBase1 <= winBase2) && (winBase2 <= winTop1)))
+		return MV_TRUE;
+	else
+		return MV_FALSE;
+#endif
+}
+
+/*******************************************************************************
+* mvWinWithinWinTest
+*
+* DESCRIPTION:
+*       This function checks the given win1 boundries is within win2 boundries.
+*
+* INPUT:
+*       pAddrWin1 - Address window 1.
+*       pAddrWin2 - Address window 2.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if found win1 inside win2, MV_FALSE otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2)
+{
+	MV_U64 winBase1, winBase2;
+	MV_U64 winTop1, winTop2;
+
+	winBase1 = ((MV_U64)pAddrWin1->baseHigh << 32) + (MV_U32)pAddrWin1->baseLow;
+	winBase2 = ((MV_U64)pAddrWin2->baseHigh << 32) + (MV_U32)pAddrWin2->baseLow;
+	winTop1 = winBase1 + (MV_U64)pAddrWin1->size - 1;
+	winTop2 = winBase2 + (MV_U64)pAddrWin2->size - 1;
+
+	if (((winBase1 >= winBase2) && (winBase1 <= winTop2)) || ((winTop1 >= winBase2) && (winTop1 <= winTop2)))
+		return MV_TRUE;
+	else
+		return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvReverseBits
+*
+* DESCRIPTION:
+*       This function Reverts the direction of the bits (LSB to MSB and vice versa)
+*
+* INPUT:
+*	num - MV_U8 number to revert
+*
+* OUTPUT:
+*       Reverted number
+*
+* RETURN:
+*	None
+*
+*******************************************************************************/
+MV_U8 mvReverseBits(MV_U8 num)
+{
+	num = (num & 0xF0) >> 4 | (num & 0x0F) << 4;
+	num = (num & 0xCC) >> 2 | (num & 0x33) << 2;
+	num = (num & 0xAA) >> 1 | (num & 0x55) << 1;
+	return num;
+}
+/*******************************************************************************
+* mvCountMaskBits
+*
+* DESCRIPTION:
+*       This function count 1 in mask bit
+*
+* INPUT:
+*	num - MV_U8 number to count
+*
+* OUTPUT:
+*       None
+*
+* RETURN:
+*	number of 1 in mask
+*
+*******************************************************************************/
+MV_U32 mvCountMaskBits(MV_U8 mask)
+{
+	int i;
+	MV_U32 c = 0;
+
+	for (i = 0; i < 8; i++) {
+		if (mask & 1)
+			c++;
+		mask = mask >> 1;
+	}
+	return c;
+}
diff --git a/drivers/net/ethernet/mvebu_net/common/mvCommon.h b/drivers/net/ethernet/mvebu_net/common/mvCommon.h
new file mode 100644
index 000000000000..405fd3c6befc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvCommon.h
@@ -0,0 +1,420 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvCommonh
+#define __INCmvCommonh
+
+#ifdef __cplusplus
+extern "C" {
+#endif	/* __cplusplus */
+
+#include "mvTypes.h"
+#include "mvDeviceId.h"
+#ifndef MV_ASMLANGUAGE
+#include "mv802_3.h"
+#include "mvVideo.h"
+
+/* The golden ration: an arbitrary value */
+#define MV_JHASH_GOLDEN_RATIO           0x9e3779b9
+
+#define MV_JHASH_MIX(a, b, c)        \
+{                                   \
+    a -= b; a -= c; a ^= (c>>13);   \
+    b -= c; b -= a; b ^= (a<<8);    \
+    c -= a; c -= b; c ^= (b>>13);   \
+    a -= b; a -= c; a ^= (c>>12);   \
+    b -= c; b -= a; b ^= (a<<16);   \
+    c -= a; c -= b; c ^= (b>>5);    \
+    a -= b; a -= c; a ^= (c>>3);    \
+    b -= c; b -= a; b ^= (a<<10);   \
+    c -= a; c -= b; c ^= (b>>15);   \
+}
+
+#ifdef MV_VXWORKS
+static __inline__ MV_U32 mv_jhash_3words(MV_U32 a, MV_U32 b, MV_U32 c, MV_U32 initval)
+#else
+static inline MV_U32 mv_jhash_3words(MV_U32 a, MV_U32 b, MV_U32 c, MV_U32 initval)
+
+#endif
+{
+	a += MV_JHASH_GOLDEN_RATIO;
+	b += MV_JHASH_GOLDEN_RATIO;
+	c += initval;
+	MV_JHASH_MIX(a, b, c);
+
+	return c;
+}
+#endif
+
+
+/* Swap tool */
+
+/* 16bit nibble swap. For example 0x1234 -> 0x2143                          */
+#define MV_NIBBLE_SWAP_16BIT(X)        (((X&0xf) << 4) |     \
+					((X&0xf0) >> 4) |    \
+					((X&0xf00) << 4) |   \
+					((X&0xf000) >> 4))
+
+/* 32bit nibble swap. For example 0x12345678 -> 0x21436587                  */
+#define MV_NIBBLE_SWAP_32BIT(X)		(((X&0xf) << 4) |       \
+					((X&0xf0) >> 4) |      \
+					((X&0xf00) << 4) |     \
+					((X&0xf000) >> 4) |    \
+					((X&0xf0000) << 4) |   \
+					((X&0xf00000) >> 4) |  \
+					((X&0xf000000) << 4) | \
+					((X&0xf0000000) >> 4))
+
+/* 16bit byte swap. For example 0x1234->0x3412                             */
+#define MV_BYTE_SWAP_16BIT(X) ((((X)&0xff)<<8) | (((X)&0xff00)>>8))
+
+/* 32bit byte swap. For example 0x12345678->0x78563412                    */
+#define MV_BYTE_SWAP_32BIT(X)  ((((X)&0xff)<<24) |                       \
+				(((X)&0xff00)<<8) |                      \
+				(((X)&0xff0000)>>8) |                    \
+				(((X)&0xff000000)>>24))
+
+/* 64bit byte swap. For example 0x11223344.55667788 -> 0x88776655.44332211  */
+#define MV_BYTE_SWAP_64BIT(X) ((l64) ((((X)&0xffULL)<<56) |             \
+				      (((X)&0xff00ULL)<<40) |           \
+				      (((X)&0xff0000ULL)<<24) |         \
+				      (((X)&0xff000000ULL)<<8) |        \
+				      (((X)&0xff00000000ULL)>>8) |      \
+				      (((X)&0xff0000000000ULL)>>24) |   \
+				      (((X)&0xff000000000000ULL)>>40) | \
+				      (((X)&0xff00000000000000ULL)>>56)))
+
+/* Endianess macros.                                                        */
+#if defined(MV_CPU_LE)
+#define MV_16BIT_LE(X)  (X)
+#define MV_32BIT_LE(X)  (X)
+#define MV_64BIT_LE(X)  (X)
+#define MV_16BIT_BE(X)  MV_BYTE_SWAP_16BIT(X)
+#define MV_32BIT_BE(X)  MV_BYTE_SWAP_32BIT(X)
+#define MV_64BIT_BE(X)  MV_BYTE_SWAP_64BIT(X)
+#elif defined(MV_CPU_BE)
+#define MV_16BIT_LE(X)  MV_BYTE_SWAP_16BIT(X)
+#define MV_32BIT_LE(X)  MV_BYTE_SWAP_32BIT(X)
+#define MV_64BIT_LE(X)  MV_BYTE_SWAP_64BIT(X)
+#define MV_16BIT_BE(X)  (X)
+#define MV_32BIT_BE(X)  (X)
+#define MV_64BIT_BE(X)  (X)
+#else
+#error "CPU endianess isn't defined!\n"
+#endif
+
+/* Bit field definitions */
+#define NO_BIT      0x00000000
+
+/* avoid redefinition of bits */
+#ifndef BIT0
+
+#define BIT0        0x00000001
+#define BIT1        0x00000002
+#define BIT2        0x00000004
+#define BIT3        0x00000008
+#define BIT4        0x00000010
+#define BIT5        0x00000020
+#define BIT6        0x00000040
+#define BIT7        0x00000080
+#define BIT8        0x00000100
+#define BIT9        0x00000200
+#define BIT10       0x00000400
+#define BIT11       0x00000800
+#define BIT12       0x00001000
+#define BIT13       0x00002000
+#define BIT14       0x00004000
+#define BIT15       0x00008000
+#define BIT16       0x00010000
+#define BIT17       0x00020000
+#define BIT18       0x00040000
+#define BIT19       0x00080000
+#define BIT20       0x00100000
+#define BIT21       0x00200000
+#define BIT22       0x00400000
+#define BIT23       0x00800000
+#define BIT24       0x01000000
+#define BIT25       0x02000000
+#define BIT26       0x04000000
+#define BIT27       0x08000000
+#define BIT28       0x10000000
+#define BIT29       0x20000000
+#define BIT30       0x40000000
+#define BIT31       0x80000000
+
+#endif /* BIT0 */
+/* Handy sizes */
+#define _1K         0x00000400
+#define _2K         0x00000800
+#define _4K         0x00001000
+#define _8K         0x00002000
+#define _16K        0x00004000
+#define _32K        0x00008000
+#define _64K        0x00010000
+#define _128K       0x00020000
+#define _256K       0x00040000
+#define _512K       0x00080000
+
+#define _1M         0x00100000
+#define _2M         0x00200000
+#define _4M         0x00400000
+#define _8M         0x00800000
+#define _16M        0x01000000
+#define _32M        0x02000000
+#define _64M        0x04000000
+#define _128M       0x08000000
+#define _256M       0x10000000
+#define _512M       0x20000000
+
+#define _1G         0x40000000
+#define _2G         0x80000000
+
+/* Tclock and Sys clock define */
+#define _100MHz     100000000
+#define _125MHz     125000000
+#define _133MHz     133333334
+#define _150MHz     150000000
+#define _160MHz     160000000
+#define _166MHz     166666667
+#define _175MHz     175000000
+#define _178MHz     178000000
+#define _183MHz     183333334
+#define _187MHz     187000000
+#define _192MHz     192000000
+#define _194MHz     194000000
+#define _200MHz     200000000
+#define _233MHz     233333334
+#define _250MHz     250000000
+#define _266MHz     266666667
+#define _300MHz     300000000
+#define _800MHz     800000000
+#define _1GHz       1000000000UL
+#define _2GHz       2000000000UL
+
+/* Supported clocks */
+#define MV_BOARD_TCLK_100MHZ	100000000
+#define MV_BOARD_TCLK_125MHZ	125000000
+#define MV_BOARD_TCLK_133MHZ	133333333
+#define MV_BOARD_TCLK_150MHZ	150000000
+#define MV_BOARD_TCLK_166MHZ	166666667
+#define MV_BOARD_TCLK_200MHZ	200000000
+#define MV_BOARD_TCLK_250MHZ	250000000
+
+#define MV_BOARD_SYSCLK_100MHZ	100000000
+#define MV_BOARD_SYSCLK_125MHZ	125000000
+#define MV_BOARD_SYSCLK_133MHZ	133333333
+#define MV_BOARD_SYSCLK_150MHZ	150000000
+#define MV_BOARD_SYSCLK_166MHZ	166666667
+#define MV_BOARD_SYSCLK_200MHZ	200000000
+#define MV_BOARD_SYSCLK_233MHZ	233333333
+#define MV_BOARD_SYSCLK_250MHZ	250000000
+#define MV_BOARD_SYSCLK_267MHZ	266666667
+#define MV_BOARD_SYSCLK_300MHZ	300000000
+#define MV_BOARD_SYSCLK_333MHZ	333333334
+#define MV_BOARD_SYSCLK_400MHZ	400000000
+
+#define MV_BOARD_REFCLK_25MHZ	 25000000
+
+/* For better address window table readability */
+#define EN			MV_TRUE
+#define DIS			MV_FALSE
+#define N_A			-1	/* Not applicable */
+
+/* Cache configuration options for memory (DRAM, SRAM, ... ) */
+
+/* Memory uncached, HW or SW cache coherency is not needed */
+#define MV_UNCACHED             0
+/* Memory cached, HW cache coherency supported in WriteThrough mode */
+#define MV_CACHE_COHER_HW_WT    1
+/* Memory cached, HW cache coherency supported in WriteBack mode */
+#define MV_CACHE_COHER_HW_WB    2
+/* Memory cached, No HW cache coherency, Cache coherency must be in SW */
+#define MV_CACHE_COHER_SW       3
+
+/* Macro for testing aligment. Positive if number is NOT aligned   */
+#define MV_IS_NOT_ALIGN(number, align)      ((number) & ((align) - 1))
+
+/* Macro for alignment up. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0340   */
+#define MV_ALIGN_UP(number, align)                                          \
+(((number) & ((align) - 1)) ? (((number) + (align)) & ~((align)-1)) : (number))
+
+/* Macro for alignment down. For example, MV_ALIGN_UP(0x0330, 0x20) = 0x0320 */
+#define MV_ALIGN_DOWN(number, align) ((number) & ~((align)-1))
+
+/* This macro returns absolute value                                        */
+#define MV_ABS(number)  (((int)(number) < 0) ? -(int)(number) : (int)(number))
+
+/* Bit fields manipulation macros                                           */
+
+/* An integer word which its 'x' bit is set                                 */
+#define MV_BIT_MASK(bitNum)         (1 << (bitNum))
+
+/* Checks wheter bit 'x' in integer word is set                             */
+#define MV_BIT_CHECK(word, bitNum)  ((word) & MV_BIT_MASK(bitNum))
+
+/* Clear (reset) bit 'x' in integer word (RMW - Read-Modify-Write)          */
+#define MV_BIT_CLEAR(word, bitNum)  ((word) &= ~(MV_BIT_MASK(bitNum)))
+
+/* Set bit 'x' in integer word (RMW)                                        */
+#define MV_BIT_SET(word, bitNum)    ((word) |= MV_BIT_MASK(bitNum))
+
+/* Invert bit 'x' in integer word (RMW)                                     */
+#define MV_BIT_INV(word, bitNum)    ((word) ^= MV_BIT_MASK(bitNum))
+
+/* Get the min between 'a' or 'b'                                           */
+#define MV_MIN(a, b)    (((a) < (b)) ? (a) : (b))
+
+/* Get the max between 'a' or 'b'                                           */
+#define MV_MAX(a, b)    (((a) < (b)) ? (b) : (a))
+
+#define mvOsDivide(num, div)	\
+({				\
+	int i = 0, rem = (num);	\
+	while (rem >= (div)) {	\
+		rem -= (div);	\
+		i++;		\
+	}			\
+	(i);			\
+})
+
+#define mvOsReminder(num, div)	\
+({				\
+	int rem = (num);	\
+	while (rem >= (div))	\
+		rem -= (div);	\
+	(rem);			\
+})
+
+#define MV_MACQUAD_FMT "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x"
+
+#define MV_MACQUAD(addr) \
+	((unsigned char *)addr)[0], \
+	((unsigned char *)addr)[1], \
+	((unsigned char *)addr)[2], \
+	((unsigned char *)addr)[3], \
+	((unsigned char *)addr)[4], \
+	((unsigned char *)addr)[5]
+
+#define MV_IPQUAD_FMT         "%u.%u.%u.%u"
+#define MV_IPQUAD(ip)         ip[0], ip[1], ip[2], ip[3]
+
+#define MV_IP_QUAD(ipAddr)    ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF), \
+				((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF)
+
+#define MV_IP6_FMT		"%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define MV_IP6_ARG(L3)		L3[0], L3[1], L3[2], L3[3],	\
+				L3[4], L3[5], L3[6], L3[7],	\
+				L3[8], L3[9], L3[10], L3[11],	\
+				L3[12], L3[13], L3[14], L3[15]
+
+#define MV_IS_POWER_OF_2(num) ((num != 0) && ((num & (num - 1)) == 0))
+
+#define MV_GET_BIT(word, bitNum) (((word) & (1 << (bitNum))) >> (bitNum))
+
+#define MV_SET_BIT(word, bitNum, bitVal) (((word) & ~(1 << (bitNum))) | (bitVal << bitNum))
+
+#define MV_ARRAY_SIZE(a)                    ((sizeof(a)) / (sizeof(a[0])))
+
+#define MV_IF_NULL_RET_STR(ptr, rc, format, ...) { \
+	if (ptr == NULL) {\
+		pr_err("(error) %s(%d) (rc=%d): "format, __func__, __LINE__, rc, ##__VA_ARGS__);\
+		return rc;\
+	} \
+}
+#define MV_IF_NULL_STR(ptr, format, ...) { \
+	if (ptr == NULL) {\
+		pr_err("(error) %s(%d): "format, __func__, __LINE__, ##__VA_ARGS__);\
+		return;\
+	} \
+}
+
+#ifndef MV_ASMLANGUAGE
+/* mvCommon API list */
+
+int mvCharToHex(char ch);
+int mvCharToDigit(char ch);
+
+MV_VOID mvHexToBin(const char *pHexStr, MV_U8 *pBin, int size);
+void mvAsciiToHex(const char *asciiStr, char *hexStr);
+void mvBinToHex(const MV_U8 *bin, char *hexStr, int size);
+void mvBinToAscii(const MV_U8 *bin, char *asciiStr, int size);
+MV_U8 mvReverseBits(MV_U8 num);
+MV_U32 mvCountMaskBits(MV_U8 mask);
+
+MV_STATUS mvMacStrToHex(const char *macStr, MV_U8 *macHex);
+MV_STATUS mvMacHexToStr(MV_U8 *macHex, char *macStr);
+void mvSizePrint(MV_U64);
+
+MV_U32 mvLog2(MV_U32 num);
+
+MV_STATUS mvWinOverlapTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+MV_STATUS mvWinWithinWinTest(MV_ADDR_WIN *pAddrWin1, MV_ADDR_WIN *pAddrWin2);
+
+#endif /* MV_ASMLANGUAGE */
+
+#ifdef __cplusplus
+}
+#endif	/* __cplusplus */
+
+#endif /* __INCmvCommonh */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvCopyright.h b/drivers/net/ethernet/mvebu_net/common/mvCopyright.h
new file mode 100644
index 000000000000..458562b5da45
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvCopyright.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the three
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+Boston, MA 02110-1301 USA or on the worldwide web at
+http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+
+********************************************************************************
+Marvell GNU General Public License FreeRTOS Exception
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the Lesser
+General Public License Version 2.1 plus the following FreeRTOS exception.
+An independent module is a module which is not derived from or based on
+FreeRTOS.
+Clause 1:
+Linking FreeRTOS statically or dynamically with other modules is making a
+combined work based on FreeRTOS. Thus, the terms and conditions of the GNU
+General Public License cover the whole combination.
+As a special exception, the copyright holder of FreeRTOS gives you permission
+to link FreeRTOS with independent modules that communicate with FreeRTOS solely
+through the FreeRTOS API interface, regardless of the license terms of these
+independent modules, and to copy and distribute the resulting combined work
+under terms of your choice, provided that:
+1. Every copy of the combined work is accompanied by a written statement that
+details to the recipient the version of FreeRTOS used and an offer by yourself
+to provide the FreeRTOS source code (including any modifications you may have
+made) should the recipient request it.
+2. The combined work is not itself an RTOS, scheduler, kernel or related
+product.
+3. The independent modules add significant and primary functionality to
+FreeRTOS and do not merely extend the existing functionality already present in
+FreeRTOS.
+Clause 2:
+FreeRTOS may not be used for any competitive or comparative purpose, including
+the publication of any form of run time or compile time metric, without the
+express permission of Real Time Engineers Ltd. (this is the norm within the
+industry and is intended to ensure information accuracy).
+
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	* Redistributions of source code must retain the above copyright notice,
+	  this list of conditions and the following disclaimer.
+
+	* Redistributions in binary form must reproduce the above copyright
+	  notice, this list of conditions and the following disclaimer in the
+	  documentation and/or other materials provided with the distribution.
+
+	* Neither the name of Marvell nor the names of its contributors may be
+	  used to endorse or promote products derived from this software without
+	  specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
diff --git a/drivers/net/ethernet/mvebu_net/common/mvDebug.c b/drivers/net/ethernet/mvebu_net/common/mvDebug.c
new file mode 100644
index 000000000000..e2eea49a2eab
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvDebug.c
@@ -0,0 +1,275 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/* includes */
+#include "mv802_3.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvDebug.h"
+
+/* Global variables effect on behave MV_DEBUG_PRINT and MV_DEBUG_CODE macros
+ * mvDebug  - map of bits (one for each module) bit=1 means enable
+ *          debug code and messages for this module
+ * mvModuleDebug - array of 32 bits varables one for each module
+ */
+MV_U32 mvDebug = 0;
+MV_U32 mvDebugModules[MV_MODULE_MAX];
+
+/* Init mvModuleDebug array to default values */
+void mvDebugInit(void)
+{
+	int bit;
+
+	mvDebug = 0;
+	for (bit = 0; bit < MV_MODULE_MAX; bit++) {
+		mvDebugModules[bit] = MV_DEBUG_FLAG_ERR | MV_DEBUG_FLAG_STATS;
+		mvDebug |= MV_BIT_MASK(bit);
+	}
+}
+
+void mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable)
+{
+	if (isEnable)
+		MV_BIT_SET(mvDebug, module);
+	else
+		MV_BIT_CLEAR(mvDebug, module);
+}
+
+void mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+	mvDebugModules[module] |= flags;
+}
+
+void mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags)
+{
+	mvDebugModules[module] &= ~flags;
+}
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void *addr, int size, int access)
+{
+	int i, j;
+	MV_U32 memAddr = (MV_U32) addr;
+
+	if (access == 0)
+		access = 1;
+
+	if ((access != 4) && (access != 2) && (access != 1)) {
+		mvOsPrintf("%d wrong access size. Access must be 1 or 2 or 4\n", access);
+		return;
+	}
+	memAddr = MV_ALIGN_DOWN((unsigned int)addr, 4);
+	size = MV_ALIGN_UP(size, 4);
+	addr = (void *)MV_ALIGN_DOWN((unsigned int)addr, access);
+	while (size > 0) {
+		mvOsPrintf("%08x: ", memAddr);
+		i = 0;
+		/* 32 bytes in the line */
+		while (i < 32) {
+			if (memAddr >= (MV_U32) addr) {
+				switch (access) {
+				case 1:
+					mvOsPrintf("%02x ", MV_MEMIO8_READ(memAddr));
+					break;
+
+				case 2:
+					mvOsPrintf("%04x ", MV_MEMIO16_READ(memAddr));
+					break;
+
+				case 4:
+					mvOsPrintf("%08x ", MV_MEMIO32_READ(memAddr));
+					break;
+				}
+			} else {
+				for (j = 0; j < (access * 2 + 1); j++)
+					mvOsPrintf(" ");
+			}
+			i += access;
+			memAddr += access;
+			size -= access;
+			if (size <= 0)
+				break;
+		}
+		mvOsPrintf("\n");
+	}
+}
+
+void mvDebugPrintBufInfo(BUF_INFO *pBufInfo, int size, int access)
+{
+	if (pBufInfo == NULL) {
+		mvOsPrintf("\n!!! pBufInfo = NULL\n");
+		return;
+	}
+	mvOsPrintf("\n*** pBufInfo=0x%x, cmdSts=0x%08x, pBuf=0x%x, bufSize=%d\n",
+		   (unsigned int)pBufInfo,
+		   (unsigned int)pBufInfo->cmdSts, (unsigned int)pBufInfo->pBuff, (unsigned int)pBufInfo->bufSize);
+	mvOsPrintf("pData=0x%x, byteCnt=%d, pNext=0x%x, uInfo1=0x%x, uInfo2=0x%x\n",
+		   (unsigned int)pBufInfo->pData,
+		   (unsigned int)pBufInfo->byteCnt,
+		   (unsigned int)pBufInfo->pNextBufInfo,
+		   (unsigned int)pBufInfo->userInfo1, (unsigned int)pBufInfo->userInfo2);
+	if (pBufInfo->pData != NULL) {
+		if (size > pBufInfo->byteCnt)
+			size = pBufInfo->byteCnt;
+		mvDebugMemDump(pBufInfo->pData, size, access);
+	}
+}
+
+void mvDebugPrintPktInfo(MV_PKT_INFO *pPktInfo, int size, int access)
+{
+	int frag, len;
+
+	if (pPktInfo == NULL) {
+		mvOsPrintf("\n!!! pPktInfo = NULL\n");
+		return;
+	}
+	mvOsPrintf("\npPkt=%p, stat=0x%08x, numFr=%d, size=%d, pFr=%p, osInfo=0x%lx\n",
+		   pPktInfo, pPktInfo->status, pPktInfo->numFrags, pPktInfo->pktSize,
+		   pPktInfo->pFrags, pPktInfo->osInfo);
+
+	for (frag = 0; frag < pPktInfo->numFrags; frag++) {
+		mvOsPrintf("#%2d. bufVirt=%p, bufSize=%d\n",
+			   frag, pPktInfo->pFrags[frag].bufVirtPtr, pPktInfo->pFrags[frag].bufSize);
+		if (size > 0) {
+			len = MV_MIN((int)pPktInfo->pFrags[frag].bufSize, size);
+			mvDebugMemDump(pPktInfo->pFrags[frag].bufVirtPtr, len, access);
+			size -= len;
+		}
+	}
+
+}
+
+void mvDebugPrintIpAddr(MV_U32 ipAddr)
+{
+	mvOsPrintf("%d.%d.%d.%d", ((ipAddr >> 24) & 0xFF), ((ipAddr >> 16) & 0xFF),
+		   ((ipAddr >> 8) & 0xFF), ((ipAddr >> 0) & 0xFF));
+}
+
+void mvDebugPrintMacAddr(const MV_U8 *pMacAddr)
+{
+	int i;
+
+	mvOsPrintf("%02x", (unsigned int)pMacAddr[0]);
+	for (i = 1; i < MV_MAC_ADDR_SIZE; i++)
+		mvOsPrintf(":%02x", pMacAddr[i]);
+
+	/* mvOsPrintf("\n"); */
+}
+
+/******* There are three functions deals with MV_DEBUG_TIMES structure ********/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES *pTimeEntry, int count, char *pName)
+{
+	pTimeEntry->begin = 0;
+	pTimeEntry->count = count;
+	pTimeEntry->end = 0;
+	pTimeEntry->left = pTimeEntry->count;
+	pTimeEntry->total = 0;
+	pTimeEntry->min = 0xFFFFFFFF;
+	pTimeEntry->max = 0x0;
+	strncpy(pTimeEntry->name, pName, sizeof(pTimeEntry->name) - 1);
+	pTimeEntry->name[sizeof(pTimeEntry->name) - 1] = '\0';
+}
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES *pTimeEntry, MV_BOOL isTitle)
+{
+	int num;
+
+	if (isTitle == MV_TRUE)
+		mvOsPrintf("Event         NumOfEvents       TotalTime         Average       Min       Max\n");
+
+	num = pTimeEntry->count - pTimeEntry->left;
+	if (num > 0) {
+		mvOsPrintf("%-11s     %6u          0x%08lx        %6lu     %6lu    %6lu\n",
+			   pTimeEntry->name, num, pTimeEntry->total, pTimeEntry->total / num,
+			   pTimeEntry->min, pTimeEntry->max);
+	}
+}
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES *pTimeEntry)
+{
+	MV_U32 delta;
+
+	if (pTimeEntry->left > 0) {
+		if (pTimeEntry->end <= pTimeEntry->begin)
+			delta = pTimeEntry->begin - pTimeEntry->end;
+		else
+			delta = ((MV_U32) 0x10000 - pTimeEntry->end) + pTimeEntry->begin;
+
+		pTimeEntry->total += delta;
+
+		if (delta < pTimeEntry->min)
+			pTimeEntry->min = delta;
+
+		if (delta > pTimeEntry->max)
+			pTimeEntry->max = delta;
+
+		pTimeEntry->left--;
+	}
+}
diff --git a/drivers/net/ethernet/mvebu_net/common/mvDebug.h b/drivers/net/ethernet/mvebu_net/common/mvDebug.h
new file mode 100644
index 000000000000..595054e9ebd2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvDebug.h
@@ -0,0 +1,169 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDebugh
+#define __INCmvDebugh
+
+/* includes */
+#include "mvTypes.h"
+
+typedef enum {
+	MV_MODULE_INVALID = -1,
+	MV_MODULE_ETH = 0,
+	MV_MODULE_IDMA,
+	MV_MODULE_XOR,
+	MV_MODULE_TWASI,
+	MV_MODULE_MGI,
+	MV_MODULE_USB,
+	MV_MODULE_CESA,
+
+	MV_MODULE_MAX
+} MV_MODULE_ID;
+
+/* Define generic flags useful for most of modules */
+#define MV_DEBUG_FLAG_ALL   (0)
+#define MV_DEBUG_FLAG_INIT  (1 << 0)
+#define MV_DEBUG_FLAG_RX    (1 << 1)
+#define MV_DEBUG_FLAG_TX    (1 << 2)
+#define MV_DEBUG_FLAG_ERR   (1 << 3)
+#define MV_DEBUG_FLAG_TRACE (1 << 4)
+#define MV_DEBUG_FLAG_DUMP  (1 << 5)
+#define MV_DEBUG_FLAG_CACHE (1 << 6)
+#define MV_DEBUG_FLAG_IOCTL (1 << 7)
+#define MV_DEBUG_FLAG_STATS (1 << 8)
+
+extern MV_U32 mvDebug;
+extern MV_U32 mvDebugModules[MV_MODULE_MAX];
+
+#ifdef MV_DEBUG
+# define MV_DEBUG_PRINT(module, flags, msg)     mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code)     code
+#elif defined(MV_RT_DEBUG)
+# define MV_DEBUG_PRINT(module, flags, msg)                    \
+    if ((mvDebug & (1<<(module))) &&                           \
+	((mvDebugModules[(module)] & (flags)) == (flags)))     \
+	mvOsPrintf msg
+# define MV_DEBUG_CODE(module, flags, code)                    \
+    if ((mvDebug & (1<<(module))) &&                           \
+	((mvDebugModules[(module)] & (flags)) == (flags)))     \
+	code
+#else
+# define MV_DEBUG_PRINT(module, flags, msg)
+# define MV_DEBUG_CODE(module, flags, code)
+#endif
+
+/* typedefs */
+
+/*  time measurement structure used to check how much time pass between
+ *  two points
+ */
+typedef struct {
+	char name[20];		/* name of the entry */
+	unsigned long begin;	/* time measured on begin point */
+	unsigned long end;	/* time measured on end point */
+	unsigned long total;	/* Accumulated time */
+	unsigned long left;	/* The rest measurement actions */
+	unsigned long count;	/* Maximum measurement actions */
+	unsigned long min;	/* Minimum time from begin to end */
+	unsigned long max;	/* Maximum time from begin to end */
+} MV_DEBUG_TIMES;
+
+/* mvDebug.h API list */
+
+/****** Error Recording ******/
+
+/* Dump memory in specific format:
+ * address: X1X1X1X1 X2X2X2X2 ... X8X8X8X8
+ */
+void mvDebugMemDump(void *addr, int size, int access);
+
+void mvDebugPrintBufInfo(BUF_INFO *pBufInfo, int size, int access);
+
+void mvDebugPrintPktInfo(MV_PKT_INFO *pPktInfo, int size, int access);
+
+void mvDebugPrintIpAddr(MV_U32 ipAddr);
+
+void mvDebugPrintMacAddr(const MV_U8 *pMacAddr);
+
+/**** There are three functions deals with MV_DEBUG_TIMES structure ****/
+
+/* Reset MV_DEBUG_TIMES entry */
+void mvDebugResetTimeEntry(MV_DEBUG_TIMES *pTimeEntry, int count, char *name);
+
+/* Update MV_DEBUG_TIMES entry */
+void mvDebugUpdateTimeEntry(MV_DEBUG_TIMES *pTimeEntry);
+
+/* Print out MV_DEBUG_TIMES entry */
+void mvDebugPrintTimeEntry(MV_DEBUG_TIMES *pTimeEntry, MV_BOOL isTitle);
+
+/******** General ***********/
+
+/* Change value of mvDebugPrint global variable */
+
+void mvDebugInit(void);
+void mvDebugModuleEnable(MV_MODULE_ID module, MV_BOOL isEnable);
+void mvDebugModuleSetFlags(MV_MODULE_ID module, MV_U32 flags);
+void mvDebugModuleClearFlags(MV_MODULE_ID module, MV_U32 flags);
+
+#endif /* __INCmvDebug.h */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvDeviceId.h b/drivers/net/ethernet/mvebu_net/common/mvDeviceId.h
new file mode 100755
index 000000000000..4c84e156be74
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvDeviceId.h
@@ -0,0 +1,470 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvDeviceIdh
+#define __INCmvDeviceIdh
+
+#ifdef __cplusplus
+extern "C" {
+#endif	/* __cplusplus */
+
+/* defines  */
+#define MARVELL_VEN_ID			0x11ab
+#define MV_INVALID_DEV_ID		0xffff
+
+/* Disco-3 */
+#define MV64460_DEV_ID          	0x6480
+#define MV64460B_DEV_ID         	0x6485
+#define MV64430_DEV_ID          	0x6420
+
+/* Disco-5 */
+#define MV64560_DEV_ID          	0x6450
+
+/* Disco-6 */
+#define MV64660_DEV_ID          	0x6460
+
+/* Orion */
+#define MV_1181_DEV_ID          	0x1181
+#define MV_5181_DEV_ID          	0x5181
+#define MV_5281_DEV_ID          	0x5281
+#define MV_5182_DEV_ID          	0x5182
+#define MV_8660_DEV_ID          	0x8660
+#define MV_5180_DEV_ID          	0x5180
+#define MV_5082_DEV_ID          	0x5082
+#define MV_1281_DEV_ID          	0x1281
+#define MV_6082_DEV_ID          	0x6082
+#define MV_6183_DEV_ID          	0x6183
+#define MV_6183L_DEV_ID          	0x6083
+
+#define MV_5281_D0_REV          	0x4
+#define MV_5281_D0_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D0_REV)
+#define MV_5281_D0_NAME         "88F5281 D0"
+
+#define MV_5281_D1_REV          	0x5
+#define MV_5281_D1_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D1_REV)
+#define MV_5281_D1_NAME         "88F5281 D1"
+
+#define MV_5281_D2_REV          	0x6
+#define MV_5281_D2_ID           	((MV_5281_DEV_ID << 16) | MV_5281_D2_REV)
+#define MV_5281_D2_NAME         "88F5281 D2"
+
+#define MV_5181L_A0_REV         	0x8	/* need for PCIE Er */
+#define MV_5181_A1_REV          	0x1	/* for USB Er .. */
+#define MV_5181_B0_REV          	0x2
+#define MV_5181_B1_REV          	0x3
+#define MV_5182_A1_REV          	0x1
+#define MV_5180N_B1_REV         	0x3
+#define MV_5181L_A0_ID          	((MV_5181_DEV_ID << 16) | MV_5181L_A0_REV)
+
+/* kw */
+#define MV_6281_DEV_ID          	0x6281
+#define MV_6282_DEV_ID          	0x1155
+#define MV_6192_DEV_ID          	0x6192
+#define MV_6190_DEV_ID          	0x6190
+#define MV_6180_DEV_ID          	0x6180
+#define MV_6280_DEV_ID          	0x6280
+
+#define MV_6281_A0_REV         		0x2
+#define MV_6281_A0_ID          		((MV_6281_DEV_ID << 16) | MV_6281_A0_REV)
+#define MV_6281_A0_NAME         	"88F6281 A0"
+
+#define MV_6192_A0_REV         		0x2
+#define MV_6192_A0_ID          		((MV_6192_DEV_ID << 16) | MV_6192_A0_REV)
+#define MV_6192_A0_NAME         	"88F6192 A0"
+
+#define MV_6190_A0_REV         		0x2
+#define MV_6190_A0_ID          		((MV_6190_DEV_ID << 16) | MV_6190_A0_REV)
+#define MV_6190_A0_NAME         	"88F6190 A0"
+
+#define MV_6180_A0_REV         		0x2
+#define MV_6180_A0_ID          		((MV_6180_DEV_ID << 16) | MV_6180_A0_REV)
+#define MV_6180_A0_NAME         	"88F6180 A0"
+
+#define MV_6281_A1_REV              0x3
+#define MV_6281_A1_ID               ((MV_6281_DEV_ID << 16) | MV_6281_A1_REV)
+#define MV_6281_A1_NAME             "88F6281 A1"
+
+#define MV_6282_A1_REV              0x3
+#define MV_6282_A1_ID               ((MV_6282_DEV_ID << 16) | MV_6282_A1_REV)
+#define MV_6282_A1_NAME             "88F6282 A1"
+
+#define MV_6280_A1_REV         		0x3
+#define MV_6280_A1_ID          		((MV_6280_DEV_ID << 16) | MV_6280_A0_REV)
+#define MV_6280_A1_NAME         	"88F6280 A1"
+
+#define MV_6192_A1_REV              0x3
+#define MV_6192_A1_ID               ((MV_6192_DEV_ID << 16) | MV_6192_A1_REV)
+#define MV_6192_A1_NAME             "88F6192 A1"
+
+#define MV_6190_A1_REV              0x3
+#define MV_6190_A1_ID               ((MV_6190_DEV_ID << 16) | MV_6190_A1_REV)
+#define MV_6190_A1_NAME             "88F6190 A1"
+
+#define MV_6180_A1_REV              0x3
+#define MV_6180_A1_ID               ((MV_6180_DEV_ID << 16) | MV_6180_A1_REV)
+#define MV_6180_A1_NAME             "88F6180 A1"
+
+#define MV_88F6XXX_A0_REV         	0x2
+#define MV_88F6XXX_A1_REV         	0x3
+/* Disco-Duo */
+#define MV_78XX0_ZY_DEV_ID       0x6381
+#define MV_78XX0_ZY_NAME         "MV78X00"
+
+#define MV_78XX0_Z0_REV         0x1
+#define MV_78XX0_Z0_ID          ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Z0_REV)
+#define MV_78XX0_Z0_NAME        "78X00 Z0"
+
+#define MV_78XX0_Y0_REV         0x2
+#define MV_78XX0_Y0_ID          ((MV_78XX0_ZY_DEV_ID << 16) | MV_78XX0_Y0_REV)
+#define MV_78XX0_Y0_NAME        "78X00 Y0"
+
+#define MV_78XX0_DEV_ID       	0x7800
+#define MV_78XX0_NAME         	"MV78X00"
+
+#define MV_76100_DEV_ID      	0x7610
+#define MV_78200_DEV_ID      	0x7820
+#define MV_78100_DEV_ID      	0x7810
+#define MV_78XX0_A0_REV		0x1
+#define MV_78XX0_A1_REV		0x2
+
+#define MV_76100_NAME		"MV76100"
+#define MV_78100_NAME		"MV78100"
+#define MV_78200_NAME		"MV78200"
+
+#define MV_76100_A0_ID		((MV_76100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78100_A0_ID		((MV_78100_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78200_A0_ID		((MV_78200_DEV_ID << 16) | MV_78XX0_A0_REV)
+
+#define MV_76100_A1_ID		((MV_76100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78100_A1_ID		((MV_78100_DEV_ID << 16) | MV_78XX0_A1_REV)
+#define MV_78200_A1_ID		((MV_78200_DEV_ID << 16) | MV_78XX0_A1_REV)
+
+#define MV_76100_A0_NAME	"MV76100 A0"
+#define MV_78100_A0_NAME	"MV78100 A0"
+#define MV_78200_A0_NAME	"MV78200 A0"
+#define MV_78XX0_A0_NAME	"MV78XX0 A0"
+
+#define MV_76100_A1_NAME	"MV76100 A1"
+#define MV_78100_A1_NAME	"MV78100 A1"
+#define MV_78200_A1_NAME	"MV78200 A1"
+#define MV_78XX0_A1_NAME	"MV78XX0 A1"
+
+/*MV88F632X family*/
+#define MV_6321_DEV_ID      	0x6321
+#define MV_6322_DEV_ID      	0x6322
+#define MV_6323_DEV_ID      	0x6323
+
+#define MV_6321_NAME		"88F6321"
+#define MV_6322_NAME		"88F6322"
+#define MV_6323_NAME		"88F6323"
+
+#define MV_632X_A1_REV		0x2
+
+#define MV_6321_A1_ID		((MV_6321_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6322_A1_ID		((MV_6322_DEV_ID << 16) | MV_632X_A1_REV)
+#define MV_6323_A1_ID		((MV_6323_DEV_ID << 16) | MV_632X_A1_REV)
+
+#define MV_6321_A1_NAME		"88F6321 A1"
+#define MV_6322_A1_NAME		"88F6322 A1"
+#define MV_6323_A1_NAME		"88F6323 A1"
+
+/*MV88F6500 family*/
+#define MV_65XX_DEV_ID		0x6500
+#define MV_6510_DEV_ID		0x6510
+#define MV_6530_DEV_ID		0x6530
+#define MV_6550_DEV_ID		0x6550
+#define MV_6560_DEV_ID		0x6560
+
+#define MV_6510_Z0_REV         		0x1
+#define MV_6510_Z0_ID          		((MV_6510_DEV_ID << 16) | MV_6510_Z0_REV)
+#define MV_6510_Z0_NAME         	"88F6510 Z0"
+
+#define MV_6530_Z0_REV         		0x1
+#define MV_6530_Z0_ID          		((MV_6530_DEV_ID << 16) | MV_6530_Z0_REV)
+#define MV_6530_Z0_NAME         	"88F6530 Z0"
+
+#define MV_6550_Z0_REV         		0x1
+#define MV_6550_Z0_ID          		((MV_6550_DEV_ID << 16) | MV_6550_Z0_REV)
+#define MV_6550_Z0_NAME         	"88F6550 Z0"
+
+#define MV_6560_Z0_REV         		0x1
+#define MV_6560_Z0_ID          		((MV_6560_DEV_ID << 16) | MV_6560_Z0_REV)
+#define MV_6560_Z0_NAME         	"88F6560 Z0"
+
+
+/* KW40 */
+#define MV_67XX			0x6700
+#define MV_6710_DEV_ID		0x6710
+
+#define MV_6710_Z1_REV		0x0
+#define MV_6710_Z1_ID		((MV_6710_DEV_ID << 16) | MV_6710_Z1_REV)
+#define MV_6710_Z1_NAME		"MV6710 Z1"
+#define MV_6710_A0_REV          0x0
+#define MV_6710_A0_ID           ((MV_6710_DEV_ID << 16) | MV_6710_A0_REV)
+#define MV_6710_A0_NAME         "MV6710 A0"
+
+#define MV_6710_A1_REV          0x1
+#define MV_6710_A1_ID           ((MV_6710_DEV_ID << 16) | MV_6710_A1_REV)
+#define MV_6710_A1_NAME         "MV6710 A1"
+
+#define MV_6W11_DEV_ID          0x6711
+#define MV_6W11_A0_REV          0x0
+#define MV_6W11_A0_ID           ((MV_6W11_DEV_ID << 16) | MV_6W11_A0_REV)
+#define MV_6W11_A0_NAME         "MV6W11 A0"
+
+#define MV_6W11_A1_REV          0x1
+#define MV_6W11_A1_ID           ((MV_6W11_DEV_ID << 16) | MV_6W11_A1_REV)
+#define MV_6W11_A1_NAME         "MV6W11 A1"
+
+#define MV_6707_DEV_ID          0x6707
+#define MV_6707_A0_REV          0x0
+#define MV_6707_A0_ID           ((MV_6707_DEV_ID << 16) | MV_6707_A0_REV)
+#define MV_6707_A0_NAME         "MV6707 A0"
+
+#define MV_6707_A1_REV          0x1
+#define MV_6707_A1_ID           ((MV_6707_DEV_ID << 16) | MV_6707_A1_REV)
+#define MV_6707_A1_NAME         "MV6707 A1"
+
+
+/* Armada XP Family */
+#define MV_78XX0		0x78000
+#define MV_78130_DEV_ID		0x7813
+#define MV_78160_DEV_ID		0x7816
+#define MV_78230_DEV_ID		0x7823
+#define MV_78260_DEV_ID		0x7826
+#define MV_78460_DEV_ID		0x7846
+#define MV_78000_DEV_ID		0x7888
+
+#define MV_FPGA_DEV_ID		0x2107
+
+#define MV_78XX0_Z1_REV		0x0
+
+#define MV_78130_Z1_ID		((MV_78130_DEV_ID << 16) | MV_78XX0_Z1_REV)
+#define MV_78130_Z1_NAME	"MV78130 Z1"
+
+#define MV_78160_Z1_ID		((MV_78160_DEV_ID << 16) | MV_78XX0_Z1_REV)
+#define MV_78160_Z1_NAME	"MV78160 Z1"
+
+#define MV_78230_Z1_ID		((MV_78230_DEV_ID << 16) | MV_78XX0_Z1_REV)
+#define MV_78230_Z1_NAME	"MV78230 Z1"
+
+#define MV_78260_Z1_ID		((MV_78260_DEV_ID << 16) | MV_78XX0_Z1_REV)
+#define MV_78260_Z1_NAME	"MV78260 Z1"
+
+#define MV_78460_Z1_ID		((MV_78460_DEV_ID << 16) | MV_78XX0_Z1_REV)
+#define MV_78460_Z1_NAME	"MV78460 Z1"
+
+#define MV_78XX0_A0_REV		0x1
+
+#define MV_78130_A0_ID         ((MV_78130_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78130_A0_NAME       "MV78130 A0"
+
+#define MV_78160_A0_ID         ((MV_78160_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78160_A0_NAME       "MV78160 A0"
+
+#define MV_78230_A0_ID         ((MV_78230_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78230_A0_NAME       "MV78230 A0"
+
+#define MV_78260_A0_ID         ((MV_78260_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78260_A0_NAME       "MV78260 A0"
+
+#define MV_78460_A0_ID         ((MV_78460_DEV_ID << 16) | MV_78XX0_A0_REV)
+#define MV_78460_A0_NAME       "MV78460 A0"
+
+#define MV_78XX0_B0_REV		0x2
+
+#define MV_78130_B0_ID         ((MV_78130_DEV_ID << 16) | MV_78XX0_B0_REV)
+#define MV_78130_B0_NAME       "MV78130 B0"
+
+#define MV_78160_B0_ID         ((MV_78160_DEV_ID << 16) | MV_78XX0_B0_REV)
+#define MV_78160_B0_NAME       "MV78160 B0"
+
+#define MV_78230_B0_ID         ((MV_78230_DEV_ID << 16) | MV_78XX0_B0_REV)
+#define MV_78230_B0_NAME       "MV78230 B0"
+
+#define MV_78260_B0_ID         ((MV_78260_DEV_ID << 16) | MV_78XX0_B0_REV)
+#define MV_78260_B0_NAME       "MV78260 B0"
+
+#define MV_78460_B0_ID         ((MV_78460_DEV_ID << 16) | MV_78XX0_B0_REV)
+#define MV_78460_B0_NAME       "MV78460 B0"
+
+/* Avanta LP Family */
+#define MV_88F66X0		0x6600
+#define MV_6610_DEV_ID		0x6610
+#define MV_6610F_DEV_ID		0x610F
+#define MV_6650_DEV_ID		0x6650
+#define MV_6650F_DEV_ID		0x650F
+#define MV_6658_DEV_ID		0x6658
+#define MV_6660_DEV_ID		0x6660
+#define MV_6665_DEV_ID		0x6665
+
+#define MV_88F66X0_Z1_ID	0x0
+#define MV_88F66X0_Z1_NAME      "Z1"
+#define MV_88F66X0_Z2_ID	0x1
+#define MV_88F66X0_Z2_NAME      "Z2"
+#define MV_88F66X0_Z3_ID	0x2
+#define MV_88F66X0_Z3_NAME      "Z3"
+#define MV_88F66XX_A0_ID	0x3
+#define MV_88F66XX_A0_NAME	"A0"
+
+#define MV_88F66X0_ID_ARRAY { \
+	MV_88F66X0_Z1_NAME,\
+	MV_88F66X0_Z2_NAME,\
+	MV_88F66X0_Z3_NAME,\
+	MV_88F66XX_A0_NAME \
+};
+
+/* Armada 375 Family */
+#define MV_88F67X0			0x6700
+#define MV_6720_DEV_ID		0x6720
+#define MV_88F6720_Z1_ID	0x0
+#define MV_88F6720_Z1_NAME	"Z1"
+#define MV_88F6720_Z2_ID	0x1
+#define MV_88F6720_Z2_NAME      "Z2"
+#define MV_88F6720_Z3_ID	0x2
+#define MV_88F6720_Z3_NAME      "Z3"
+#define MV_88F672X_A0_ID	0x3
+#define MV_88F672X_A0_NAME	"A0"
+
+#define MV_88F672X_ID_ARRAY { \
+	MV_88F6720_Z1_NAME,\
+	MV_88F6720_Z2_NAME,\
+	MV_88F6720_Z3_NAME,\
+	MV_88F672X_A0_NAME \
+};
+
+/* Armada 38x Family */
+#define MV_88F68XX		0x6800
+#define MV_6810_DEV_ID		0x6810
+#define MV_6811_DEV_ID		0x6811
+#define MV_6820_DEV_ID		0x6820
+#define MV_6828_DEV_ID		0x6828
+
+/* A38x revisions */
+#define MV_88F68XX_Z1_ID		0x0
+#define MV_88F68XX_Z1_NAME		"Z1"
+#define MV_88F68XX_A0_ID		0x4
+#define MV_88F68XX_A0_NAME		"A0"
+
+/* A39x revisions */
+#define MV_88F69XX_Z1_ID		0x2
+#define MV_88F69XX_Z1_NAME		"Z1"
+
+#define MV_88F68XX_69XX_ID_ARRAY { \
+	MV_88F68XX_Z1_NAME,\
+	NULL,\
+	MV_88F69XX_Z1_NAME,\
+	NULL,\
+	MV_88F68XX_A0_NAME,\
+};
+
+/* Armada 39x Family */
+#define MV_88F69XX		0x6900
+#define MV_6920_DEV_ID		0x6920
+#define MV_6928_DEV_ID		0x6928
+
+/* BobCat2  Family */
+#define MV_BOBCAT2_DEV_ID		0xFC00
+
+/* BobCat2  Revisions */
+#define MV_BOBCAT2_A0_ID		0x0
+#define MV_BOBCAT2_A0_NAME		"A0"
+#define MV_BOBCAT2_B0_ID		0x1
+#define MV_BOBCAT2_B0_NAME		"B0"
+
+#define MV_BOBCAT2_ID_ARRAY { \
+	MV_BOBCAT2_A0_NAME,\
+	MV_BOBCAT2_B0_NAME,\
+}
+
+ /* Lion2  Family */
+#define MV_LION2_DEV_ID		0x8000
+
+/* AlleyCat3  Family */
+#define MV_ALLEYCAT3_DEV_ID		0xF400
+
+/* AlleyCat3  Revisions */
+#define MV_ALLEYCAT3_A0_ID		0x3
+#define MV_ALLEYCAT3_A0_NAME	"A0"
+#define MV_ALLEYCAT3_A1_ID		0x4
+#define MV_ALLEYCAT3_A1_NAME	"A1"
+
+#define MV_ALLEYCAT3_ID_ARRAY { \
+	NULL,\
+	NULL,\
+	NULL,\
+	MV_ALLEYCAT3_A0_NAME,\
+	MV_ALLEYCAT3_A1_NAME,\
+}
+
+ /* IDT Swicth  */
+#define PCI_VENDOR_ID_IDT_SWITCH	0x111D
+#define MV_IDT_SWITCH_DEV_ID_808E	0x808E
+#define MV_IDT_SWITCH_DEV_ID_802B	0x802B
+
+#ifdef __cplusplus
+}
+#endif	/* __cplusplus */
+
+#endif				/* __INCmvDeviceIdh */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvHalVer.h b/drivers/net/ethernet/mvebu_net/common/mvHalVer.h
new file mode 100644
index 000000000000..e6f3906fbe4e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvHalVer.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvHalVerh
+#define __INCmvHalVerh
+
+/* Defines */
+#define MV_HAL_VERSION			"FEROCEON_HAL_3_1_7"
+#define MV_RELEASE_BASELINE		"SoCandControllers_FEROCEON_RELEASE_7_9_2009_KW_4_3_4_DD_2_1_4_6183_1_1_4"
+
+#endif /* __INCmvHalVerh */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvIpc.c b/drivers/net/ethernet/mvebu_net/common/mvIpc.c
new file mode 100644
index 000000000000..bd633d07f45c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvIpc.c
@@ -0,0 +1,1395 @@
+/*******************************************************************************
+   Copyright (C) Marvell International Ltd. and its affiliates
+
+   This software file (the "File") is owned and distributed by Marvell
+   International Ltd. and/or its affiliates ("Marvell") under the following
+   alternative licensing terms.  Once you have made an election to distribute the
+   File under one of the following license alternatives, please (i) delete this
+   introductory statement regarding license alternatives, (ii) delete the two
+   license alternatives that you have not elected to use and (iii) preserve the
+   Marvell copyright notice above.
+
+********************************************************************************
+   Marvell Commercial License Option
+
+   If you received this File from Marvell and you have entered into a commercial
+   license agreement (a "Commercial License") with Marvell, the File is licensed
+   to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+   Marvell GPL License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File in accordance with the terms and conditions of the General
+   Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+   available along with the File in the license.txt file or by writing to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+   on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+   THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+   DISCLAIMED.  The GPL License provides additional details about this warranty
+   disclaimer.
+********************************************************************************
+   Marvell BSD License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File under the following licensing terms.
+   Redistribution and use in source and binary forms, with or without modification,
+   are permitted provided that the following conditions are met:
+
+*   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+*   Redistributions in binary form must reproduce the above copyright
+        notice, this list of conditions and the following disclaimer in the
+        documentation and/or other materials provided with the distribution.
+
+*   Neither the name of Marvell nor the names of its contributors may be
+        used to endorse or promote products derived from this software without
+        specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+   ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+   DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+   ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+   LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+   ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvTypes.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "ctrlEnv/sys/mvCpuIf.h"
+#include "cpu/mvCpu.h"
+#include "mvIpc.h"
+#include "mvOs.h"
+
+#include "mv_ipc_common.h"
+
+//#define MV_IPC_DEBUG
+#ifdef MV_IPC_DEBUG
+#define mvIpcDbgPrintf mvOsPrintf
+#else
+#define mvIpcDbgPrintf(x ...)
+#endif
+
+#define mvIpcDbgWrite(x, y)  (x = y);
+/*#define mvIpcDbgWrite(x, y)*/
+
+#define mvIpcErrPrintf mvOsPrintf
+
+/*main data structure - links array*/
+MV_IPC_LINK mv_ipc_links[MV_IPC_LINKS_NUM];
+
+/***********************************************************************************
+ * mvIpcChannelsOffsetsFix
+ *
+ * DESCRIPTION:
+ *		This add base address to all addresses in link and channel structures
+ *
+ * INPUT:
+ *		link  - Link structure to be fixed
+ *		base  - Base address
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		status
+ *
+ ************************************************************************************/
+static MV_VOID mvIpcChannelsOffsetsFix(MV_IPC_LINK *link, MV_U32 base)
+{
+	int chnIdx;
+
+	/*Fixup all offsets to shmem to local addresses*/
+	for (chnIdx = 0; chnIdx < link->numOfChannels; chnIdx++) {
+		link->channels[chnIdx].rxMsgQueVa =
+			(MV_IPC_MSG *)(base + (MV_U32)link->channels[chnIdx].rxMsgQueVa);
+		link->channels[chnIdx].txMsgQueVa =
+			(MV_IPC_MSG *)(base + (MV_U32)link->channels[chnIdx].txMsgQueVa);
+
+		link->channels[chnIdx].rxCtrlMsg    = &link->channels[chnIdx].rxMsgQueVa[0];
+		link->channels[chnIdx].txCtrlMsg    = &link->channels[chnIdx].txMsgQueVa[0];
+
+		link->channels[chnIdx].txMessageFlag += base;
+		link->channels[chnIdx].rxMessageFlag += base;
+	}
+
+	link->txSharedHeapAddr += base;
+	link->rxSharedHeapAddr += base;
+}
+
+/***********************************************************************************
+ * mvIpcSlaveConfig
+ *
+ * DESCRIPTION:
+ *		This routine read configuration from shared memory and fill local
+ *		structires with data configured by Master.
+ *		Can be called from mvIpcInit or postponed by and called from mvIpcOpenChannel
+ *
+ * INPUT:
+ *		linkId  - Link id to be configred
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		status
+ *
+ ************************************************************************************/
+static MV_STATUS mvIpcSlaveConfig(MV_U32 linkId)
+{
+	MV_U32 chnIdx;
+	MV_IPC_LINK *link;
+	MV_U32 tempAddr;
+	MV_IPC_MSG *tempQueVa;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: IPC Init: Bad link id %d\n", linkId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+	/*Read link structure from shared mem*/
+	mvOsMemcpy(link, mvIpcGetShmemAddr(linkId), sizeof(MV_IPC_LINK));
+
+	/*Override local paramters for link*/
+	link->nodeId            = mvIpcWhoAmI();
+	link->shmemBaseAddr = (MV_U32)mvIpcGetShmemAddr(linkId);
+	link->remoteNodeId      = mvIpcGetlinkRemoteNodeId(linkId);
+	link->channels = mvOsMalloc(sizeof(MV_IPC_CHANNEL) * link->numOfChannels);
+
+	/*Swap rx and tx fields for Heap region partition*/
+	tempAddr = link->txSharedHeapAddr;
+	link->txSharedHeapAddr = link->rxSharedHeapAddr;
+	link->rxSharedHeapAddr = tempAddr;
+	tempAddr = link->txSharedHeapSize;
+	link->txSharedHeapSize = link->rxSharedHeapSize;
+	link->rxSharedHeapSize = tempAddr;
+
+	/* Initialize all channels */
+	for (chnIdx = 0; chnIdx < link->numOfChannels; chnIdx++) {
+		/*Read channel structure from shared mem*/
+		mvOsMemcpy(&link->channels[chnIdx],
+			   (MV_VOID *)(link->shmemBaseAddr + sizeof(MV_IPC_LINK) + (chnIdx * sizeof(MV_IPC_CHANNEL))),
+			   sizeof(MV_IPC_CHANNEL));
+
+		link->channels[chnIdx].state        = MV_CHN_CLOSED;
+		link->channels[chnIdx].txEnable     = MV_FALSE;
+		link->channels[chnIdx].rxEnable     = MV_FALSE;
+		link->channels[chnIdx].nextRxMsgIdx = 1;
+		link->channels[chnIdx].nextTxMsgIdx = 1;
+
+		/*Swap RX and TX queue start */
+		tempQueVa = link->channels[chnIdx].rxMsgQueVa;
+		link->channels[chnIdx].rxMsgQueVa = link->channels[chnIdx].txMsgQueVa;
+		link->channels[chnIdx].txMsgQueVa   = tempQueVa;
+
+		mvIpcDbgPrintf("IPC HAL: Init channel %d with RxQ = 0x%08x; TxQ = 0x%08x\n",
+			       chnIdx, (unsigned int)link->channels[chnIdx].rxMsgQueVa,
+			       (unsigned int)link->channels[chnIdx].txMsgQueVa);
+
+		/*Set rx and tx functions*/
+		link->channels[chnIdx].sendTrigger = mvIpcGetChnTxHwPtr(linkId);
+		link->channels[chnIdx].registerChnInISR = mvIpcGetChnRxHwPtr(linkId);
+
+		tempAddr = link->channels[chnIdx].txMessageFlag;
+		link->channels[chnIdx].txMessageFlag = link->channels[chnIdx].rxMessageFlag;
+		link->channels[chnIdx].rxMessageFlag = tempAddr;
+	}
+
+	/*Fixup all offsets to shmem to local addresses*/
+	mvIpcChannelsOffsetsFix(link, link->shmemBaseAddr);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcLinkStart
+ *
+ * DESCRIPTION:
+ *		Initializes the IPC mechanism. reset all queues and sets global variables
+ *
+ * INPUT:
+ *		linkId  - Link id to be configred
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcLinkStart(MV_U32 linkId)
+{
+	MV_U32 chnIdx;
+	MV_IPC_LINK             *link;
+	/*runningOffset is offset in shared memory,
+	used to compute addreses of queues and heap*/
+	MV_U32 runningOffset = 0, flagsOffset;
+	MV_U32 heapSize;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: IPC Init: Bad link id %d\n", linkId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (MV_TRUE == mvIpcGetlinkMaster(linkId)) {
+		/*master configuration*/
+
+		link->nodeId            = mvIpcWhoAmI();
+		link->shmemBaseAddr =   (MV_U32)mvIpcGetShmemAddr(linkId);
+		link->shmemSize =               (MV_U32)mvIpcGetShmemSize(linkId);
+		link->numOfChannels     = mvIpcChnNum(linkId);
+		link->remoteNodeId      = mvIpcGetlinkRemoteNodeId(linkId);
+		link->channels = mvOsMalloc(sizeof(MV_IPC_CHANNEL) * link->numOfChannels);
+
+		/*Skip the control structures in Shared mem*/
+		/*Note: all pointers to shmem will be offsets,
+		after controle structures will be copied ti shmem, them will be fixed to addresses*/
+		runningOffset += sizeof(MV_IPC_LINK);
+		runningOffset += sizeof(MV_IPC_CHANNEL) * link->numOfChannels;
+		/*Skip the RX/TX flags in Shared mem*/
+		flagsOffset = runningOffset;
+		runningOffset += 2 * sizeof(MV_U32) * link->numOfChannels;
+
+		/* Initialize all channels */
+		for (chnIdx = 0; chnIdx < link->numOfChannels; chnIdx++) {
+			link->channels[chnIdx].state        = MV_CHN_CLOSED;
+			link->channels[chnIdx].txEnable     = MV_FALSE;
+			link->channels[chnIdx].rxEnable     = MV_FALSE;
+			link->channels[chnIdx].queSizeInMsg = mvIpcGetChnQueueSize(linkId, chnIdx);
+			link->channels[chnIdx].nextRxMsgIdx = 1;
+			link->channels[chnIdx].nextTxMsgIdx = 1;
+
+			/*set RX queue start move offset to queue size * message size*/
+			link->channels[chnIdx].rxMsgQueVa   = (MV_IPC_MSG *)runningOffset;
+			runningOffset += link->channels[chnIdx].queSizeInMsg * sizeof(MV_IPC_MSG);
+
+			/*set TX queue start move offset to queue size * message size*/
+			link->channels[chnIdx].txMsgQueVa   = (MV_IPC_MSG *)runningOffset;
+			runningOffset += link->channels[chnIdx].queSizeInMsg * sizeof(MV_IPC_MSG);
+
+			mvOsMemset((MV_VOID *)(link->shmemBaseAddr + (MV_U32)link->channels[chnIdx].rxMsgQueVa), 0,
+				   link->channels[chnIdx].queSizeInMsg * sizeof(MV_IPC_MSG));
+			mvOsMemset((MV_VOID *)(link->shmemBaseAddr + (MV_U32)link->channels[chnIdx].txMsgQueVa), 0,
+				   link->channels[chnIdx].queSizeInMsg * sizeof(MV_IPC_MSG));
+
+			mvIpcDbgPrintf("IPC HAL: Init channel %d with RxQ = 0x%08x; TxQ = 0x%08x\n",
+				       chnIdx, (unsigned int)link->channels[chnIdx].rxMsgQueVa,
+				       (unsigned int)link->channels[chnIdx].txMsgQueVa);
+
+			/*Set rx and tx functions*/
+			link->channels[chnIdx].sendTrigger = mvIpcGetChnTxHwPtr(linkId);
+			link->channels[chnIdx].registerChnInISR = mvIpcGetChnRxHwPtr(linkId);
+
+			link->channels[chnIdx].txMessageFlag = flagsOffset + 2 * chnIdx * sizeof(MV_U32);
+			link->channels[chnIdx].rxMessageFlag = flagsOffset + (2 * chnIdx + 1) * sizeof(MV_U32);
+		}
+
+		/*Check if we have enouth shared memory for all channels*/
+		if (runningOffset > mvIpcGetShmemSize(linkId)) {
+			mvIpcDbgPrintf("IPC HAL: Init channels allocated 0x%X bytes, shmem is 0x%X bytes\n",
+				       runningOffset, mvIpcGetShmemSize(linkId));
+
+			return MV_FAIL;
+		}
+
+		/*Heap region partition*/
+		heapSize = mvIpcGetShmemSize(linkId) - runningOffset;
+		link->txSharedHeapAddr = runningOffset;
+		link->txSharedHeapSize = (heapSize * mvIpcGetFreeMemMasterPercent(linkId))/100;
+		runningOffset += link->txSharedHeapSize;
+		link->rxSharedHeapAddr = runningOffset;
+		link->rxSharedHeapSize = mvIpcGetShmemSize(linkId) - runningOffset;
+
+		/*Link and channel structures ready, copy channels first to shared mem*/
+		runningOffset = sizeof(MV_IPC_LINK);
+		for (chnIdx = 0; chnIdx < link->numOfChannels; chnIdx++) {
+			mvOsMemcpy((MV_VOID *)(link->shmemBaseAddr + runningOffset),
+				   &link->channels[chnIdx], sizeof(MV_IPC_CHANNEL));
+			runningOffset += sizeof(MV_IPC_CHANNEL);
+		}
+
+		/*Set magic value in link structures and copy to shared memory,
+		this is ready state for client */
+		link->masterConfigDone = MV_IPC_MASTER_CONFIG_MAGIC;
+		mvOsMemcpy((MV_VOID *)link->shmemBaseAddr, link, sizeof(MV_IPC_LINK));
+
+		/*Fixup all offsets to shmem to local addresses*/
+		mvIpcChannelsOffsetsFix(link, link->shmemBaseAddr);
+
+		mvIpcDbgPrintf("IPC HAL: Initialized interface as Master\n");
+	} else {
+		/*Slave configuration*/
+
+		/*Read link structure from shared mem*/
+		mvOsMemcpy((MV_VOID *)link, mvIpcGetShmemAddr(linkId), sizeof(MV_IPC_LINK));
+		if (link->masterConfigDone == MV_IPC_MASTER_CONFIG_MAGIC) {
+			/*Master finished the init, Slave get the configuration*/
+			mvIpcSlaveConfig(linkId);
+
+			/*Clear magic*/
+			link->masterConfigDone = 0;
+			mvOsMemcpy((MV_VOID *)link->shmemBaseAddr, link, sizeof(MV_IPC_LINK));
+			link->slaveLinkInitialized = 0;
+
+			mvIpcDbgPrintf("IPC HAL: Initialized interface as Slave\n");
+		} else {
+			/*postpone the Slave init, will be done in mvIpcOpenChannel*/
+			link->slaveLinkInitialized = MV_IPC_MASTER_CONFIG_MAGIC;
+			mvIpcDbgPrintf("IPC HAL: Initialized interface as Slave, config postponed\n");
+		}
+	}
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcClose
+ *
+ * DESCRIPTION:
+ *		Closes all IPC channels
+ *
+ * INPUT:
+ *		linkId  - Link id to be configred
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcClose(MV_U32 linkId)
+{
+	MV_IPC_LINK             *link;
+	MV_U32 chnIdx;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: IPC close: Bad link id %d\n", linkId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	/* De-activate all channels */
+	for (chnIdx = 0; chnIdx < link->numOfChannels; chnIdx++) {
+		if (link->channels[chnIdx].state == MV_CHN_ATTACHED)
+			mvIpcDettachChannel(linkId, chnIdx);
+
+		if (link->channels[chnIdx].state == MV_CHN_OPEN)
+			mvIpcCloseChannel(linkId, chnIdx);
+	}
+
+	mvIpcDbgPrintf("IPC HAL: CLosed IPC interface\n");
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcOpenChannel
+ *
+ * DESCRIPTION:
+ *		Opens a ipc channel and prepares it for receiving messages
+ *
+ * INPUT:
+ *		linkId  - Link id to open
+ *		chnId - the channel ID to open
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *		MV_NOT_STARTED if slave and master still not wake.
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcOpenChannel(MV_U32 linkId, MV_U32 chnId, MV_IPC_RX_CLBK rx_clbk)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL  *chn;
+	MV_U32 msgId;
+	MV_STATUS status;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Open Chn: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	/*Check if posponed Slave init needed*/
+	if (link->slaveLinkInitialized == MV_IPC_MASTER_CONFIG_MAGIC) {
+		/*Read link structure from shared mem*/
+		mvOsMemcpy((MV_VOID *)link, mvIpcGetShmemAddr(linkId), sizeof(MV_IPC_LINK));
+		if (link->masterConfigDone == MV_IPC_MASTER_CONFIG_MAGIC) {
+			/*Master finished the init, Slave get the configuration*/
+			status = mvIpcSlaveConfig(linkId);
+			mvIpcErrPrintf("IPC MESSG: Open Chn:Postponed init done with status %d\n", status);
+
+			/*Clear magic*/
+			link->masterConfigDone = 0;
+			mvOsMemcpy((MV_VOID *)link->shmemBaseAddr, link, sizeof(MV_IPC_LINK));
+			link->slaveLinkInitialized = 0;
+		} else {
+			/*Master still not wake, cannot open the channel*/
+			mvIpcErrPrintf("IPC WARNG: Open Chn: Master not ready\n");
+			link->slaveLinkInitialized = MV_IPC_MASTER_CONFIG_MAGIC;
+			return MV_NOT_STARTED;
+		}
+	}
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Open Chn: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state != MV_CHN_CLOSED) {
+		mvIpcErrPrintf("IPC ERROR: Can't open channel %d. It is already open %d\n",
+			       chnId, chn->state);
+		return MV_ERROR;
+	}
+
+	/* Initialize the transmit queue */
+	for (msgId = 0; msgId < chn->queSizeInMsg; msgId++)
+		chn->txMsgQueVa[msgId].isUsed = MV_FALSE;
+
+	/* Initialize channel members */
+	chn->state                = MV_CHN_OPEN;
+	chn->nextRxMsgIdx = 1;
+	chn->nextTxMsgIdx = 1;
+	chn->rxEnable     = MV_TRUE;
+	chn->rxCallback   = rx_clbk;
+
+	mvIpcDbgPrintf("IPC HAL: Opened channel %d successfully\n", chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcAckAttach
+ *
+ * DESCRIPTION:
+ *		Acknowledges and Attach request from receiver.
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - the channel ID
+ *		cpuId - the CPU ID to attach to
+ *		acknowledge - do i need to acknowledge the message
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+static MV_STATUS mvIpcAckAttach(MV_U32 linkId, MV_U32 chnId, MV_BOOL acknowledge)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL  *chn;
+	MV_IPC_MSG attachMsg;
+	MV_STATUS status;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Ack attach: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR:Ack attach: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	/* Cannot acknowledge remote attach until local attach was requested*/
+	if ((chn->state != MV_CHN_ATTACHED) && (chn->state != MV_CHN_LINKING)) {
+		mvIpcDbgPrintf("IPC HAL: Can't acknowledge attach. channel in state %d\n", chn->state);
+		return MV_ERROR;
+	}
+
+	if (acknowledge == MV_TRUE) {
+		/* Check that channel is not already coupled to another CPU*/
+		if (chn->remoteNodeId != link->remoteNodeId) {
+			mvIpcDbgPrintf("IPC HAL: Can't acknowledge attach. CPU %d != %d\n",
+				       chn->remoteNodeId, link->remoteNodeId);
+			return MV_ERROR;
+		}
+
+		mvIpcDbgPrintf("IPC HAL: Acknowledging attach from CPU %d\n", link->remoteNodeId);
+
+		/* Send the attach acknowledge message */
+		attachMsg.type  = IPC_MSG_ATTACH_ACK;
+		attachMsg.value = link->remoteNodeId;
+		attachMsg.size  = 0;
+		attachMsg.ptr   = 0;
+		status = mvIpcTxCtrlMsg(linkId, chnId, &attachMsg);
+		if (status != MV_OK) {
+			mvIpcErrPrintf("IPC ERROR: Cannot Send attach acknowledge message\n");
+			return MV_ERROR;
+		}
+	}
+
+	/* Now change my own state to attached */
+	chn->state = MV_CHN_ATTACHED;
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcAckDetach
+ *
+ * DESCRIPTION:
+ *		Acknowledges detach request from receiver. this closes the channel for
+ *		transmission and resets the queues
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - the channel ID
+ *		acknowledge - do i need to acknowledge the message
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+static MV_STATUS mvIpcAckDetach(MV_U32 linkId, MV_U32 chnId, MV_BOOL acknowledge)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+	MV_IPC_MSG dettachMsg;
+	MV_STATUS status;
+	MV_U32 msgId;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Ack detach: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR:Ack detach: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	/* Cannot acknowledge remote detach until local attach was requested*/
+	if ((chn->state != MV_CHN_ATTACHED) && (chn->state != MV_CHN_UNLINKING)) {
+		mvIpcDbgPrintf("IPC HAL: Can't acknowledge detach. channel in state %d\n", chn->state);
+		return MV_ERROR;
+	}
+
+	if (acknowledge == MV_TRUE) {
+		/* Send the attach acknowledge message */
+		dettachMsg.type  = IPC_MSG_DETACH_ACK;
+		dettachMsg.size  = 0;
+		dettachMsg.ptr   = 0;
+		dettachMsg.value = 0;
+
+		status = mvIpcTxCtrlMsg(linkId, chnId, &dettachMsg);
+		if (status != MV_OK) {
+			mvIpcErrPrintf("IPC ERROR: Cannot Send dettach acknowledge message\n");
+			return MV_ERROR;
+		}
+	}
+
+	/* Now change my own state to attached */
+	chn->state                = MV_CHN_OPEN;
+	chn->txEnable     = MV_FALSE;
+	chn->nextRxMsgIdx = 1;
+	chn->nextTxMsgIdx = 1;
+
+	/* Initialize the transmit queue */
+	for (msgId = 1; msgId < chn->queSizeInMsg; msgId++)
+		chn->txMsgQueVa[msgId].isUsed = MV_FALSE;
+
+	return MV_OK;
+
+	mvIpcDbgPrintf("IPC HAL: Acknowledging dettach message\n");
+}
+
+/***********************************************************************************
+ * mvIpcReqAttach
+ *
+ * DESCRIPTION:
+ *		Ask receiver to acknowledge attach request. To verify reception, message
+ *		transmission is possible only after receiver acknowledges the attach
+ *
+ * INPUT:
+ *		chn   - pointer to channel structure
+ *		chnId - the channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+static MV_STATUS mvIpcReqAttach(MV_U32 linkId, MV_IPC_CHANNEL *chn, MV_U32 chnId)
+{
+	MV_IPC_MSG attachMsg;
+	MV_STATUS status;
+	int backoff = 10, timeout = 10;
+
+	mvIpcDbgPrintf("IPC HAL: Requesting attach from cpu %d\n", chn->remoteNodeId);
+
+	/* Send the attach message */
+	attachMsg.type  = IPC_MSG_ATTACH_REQ;
+	attachMsg.value = mvIpcWhoAmI();
+	status = mvIpcTxCtrlMsg(linkId, chnId, &attachMsg);
+	if (status != MV_OK) {
+		mvIpcErrPrintf("IPC ERROR: Cannot Send attach req message\n");
+		return MV_ERROR;
+	}
+
+	/* Give the receiver 10 seconds to reply */
+	while ((chn->state != MV_CHN_ATTACHED) && timeout) {
+		udelay(backoff);
+		timeout--;
+	}
+
+	if (chn->state != MV_CHN_ATTACHED) {
+		mvIpcDbgPrintf("IPC HAL: Cannot complete attach sequence. no reply from receiver after %d usec\n",
+			       timeout * backoff);
+		return MV_ERROR;
+	}
+
+	mvIpcDbgPrintf("IPC HAL: Attached channel %d\n", chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcAttachChannel
+ *
+ * DESCRIPTION:
+ *		Attempts to attach the TX queue to a remote CPU by sending a ATTACH ACK
+ *		messages to receiver. if the message is acknowledged the the channel state
+ *		becomes attached and message transmission is enabled.
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId           - The channel ID
+ *		remoteNodeId - CPU ID of receiver
+ * OUTPUT:
+ *		attached   - indicates if channel is attached
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcAttachChannel(MV_U32 linkId, MV_U32 chnId, MV_U32 remoteNodeId, MV_BOOL *attached)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+	MV_U32 msgId;
+	MV_STATUS status;
+
+	(*attached) = 0;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Chn attach: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Chn attach: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state == MV_CHN_CLOSED) {
+		mvIpcErrPrintf("IPC ERROR: Can't attach channel %d. It is closed\n", chnId);
+		return MV_ERROR;
+	}
+
+	if (chn->state == MV_CHN_ATTACHED) {
+		(*attached) = 1;
+		return MV_OK;
+	}
+
+	chn->state                = MV_CHN_LINKING;
+	chn->remoteNodeId  = remoteNodeId;
+	chn->txEnable     = MV_TRUE;
+
+	/* Initialize the transmit queue */
+	for (msgId = 1; msgId < chn->queSizeInMsg; msgId++)
+		chn->txMsgQueVa[msgId].isUsed = MV_FALSE;
+
+	/* Send req for attach to other side */
+	status = mvIpcReqAttach(linkId, chn, chnId);
+	if (status == MV_OK) {
+		(*attached) = 1;
+		mvIpcDbgPrintf("IPC HAL: Attached channel %d to link %d\n", chnId, linkId);
+	}
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcDettachChannel
+ *
+ * DESCRIPTION:
+ *		Detaches the channel from remote cpu. it notifies the remote cpu by sending
+ *		control message and waits for acknowledge. after calling this function
+ *		data messages cannot be sent anymore
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId           - The channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcDettachChannel(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+	MV_IPC_MSG msg;
+	MV_STATUS status;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Chn detach: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Chn detach: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state != MV_CHN_ATTACHED) {
+		mvIpcErrPrintf("IPC ERROR: Detach: channel %d is not attached\n", chnId);
+		return MV_ERROR;
+	}
+
+	msg.type  = IPC_MSG_DETACH_REQ;
+	msg.size  = 0;
+	msg.ptr   = 0;
+	msg.value = 0;
+
+	status = mvIpcTxCtrlMsg(linkId, chnId, &msg);
+	if (status != MV_OK) {
+		mvIpcErrPrintf("IPC ERROR: Cannot Send detach request message\n");
+		return MV_ERROR;
+	}
+
+	chn->remoteNodeId  = 0;
+	chn->state        = MV_CHN_UNLINKING;
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcCloseChannel - CLose and IPC channel
+ *
+ * DESCRIPTION:
+ *		Closes the      IPC channels. this disables the channels ability to receive messages
+ *
+ * INPUT:
+ *		linkId          - the link ID
+ *		chnId           - The channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcCloseChannel(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Chn close: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Chn close: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state == MV_CHN_CLOSED) {
+		mvIpcErrPrintf("IPC ERROR: Close channel: Channel %d is already closed\n", chnId);
+		return MV_ERROR;
+	}
+
+	chn->state       = MV_CHN_CLOSED;
+	chn->txEnable    = MV_FALSE;
+	chn->rxEnable    = MV_FALSE;
+	chn->remoteNodeId = 0;
+
+	mvIpcDbgPrintf("IPC HAL: Closed channel %d successfully\n", chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcIsTxReady
+ *
+ * DESCRIPTION:
+ *		Checks if the channel is ready to transmit
+ *
+ * INPUT:
+ *		chnId           - The channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_BOOL mvIpcIsTxReady(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	/* Verify parameters */
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Chn is ready: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Chn is ready: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state != MV_CHN_ATTACHED) {
+		mvIpcErrPrintf("IPC ERROR: Tx Test: channel not attached, state is %d\n", chn->state);
+		return MV_FALSE;
+	}
+
+	/* Is next message still used by receiver, yes means full queue or bug */
+	if (chn->txMsgQueVa[chn->nextTxMsgIdx].isUsed != MV_FALSE) {
+		mvIpcDbgPrintf("IPC HAL: Tx Test: Can't send, Msg %d used flag = %d\n",
+			       chn->nextTxMsgIdx, chn->txMsgQueVa[chn->nextTxMsgIdx].isUsed);
+		return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+/***********************************************************************************
+ * mvIpcTxCtrlMsg
+ *
+ * DESCRIPTION:
+ *		Sends a control message to other side. these messages are not forwarded
+ *		to user
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ *		inMsg - Pointer to message to send
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcTxCtrlMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *inMsg)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Tx Ctrl Msg: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Tx Ctr Msg: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->txEnable == MV_FALSE) {
+		mvIpcErrPrintf("IPC ERROR: Tx Ctrl msg: Tx not enabled\n");
+		return MV_ERROR;
+	}
+
+	/* Write the message and pass */
+	chn->txCtrlMsg->type  = inMsg->type;
+	chn->txCtrlMsg->size  = inMsg->size;
+	chn->txCtrlMsg->ptr   = inMsg->ptr;
+	chn->txCtrlMsg->value = inMsg->value;
+
+	/* Make sure the msg values are written before the used flag
+	 * to ensure the polling receiver will get valid message once
+	 * it detects isUsed == MV_TRUE.
+	 */
+	mvOsSync();
+
+	chn->txCtrlMsg->isUsed   = MV_TRUE;
+
+	mvIpcDbgWrite(chn->txCtrlMsg->align[0], MV_IPC_HAND_SHAKE_MAGIC);
+	mvIpcDbgWrite(chn->txCtrlMsg->align[1], 0);
+	mvIpcDbgWrite(chn->txCtrlMsg->align[2], 0);
+
+	mvIpcDbgPrintf("IPC HAL: Sent control message 0x%8x on channel %d to link %d\n",
+			(int)chn->txCtrlMsg, chnId, linkId);
+
+	/*Raise the TX ready flag and send the trigger*/
+	*((MV_U32 *)chn->txMessageFlag) = 0x1;
+	chn->sendTrigger(chn->remoteNodeId, chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcTxMsg
+ *
+ * DESCRIPTION:
+ *		Main transmit function
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ *		inMsg - Pointer to message to send
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcTxMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *inMsg)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+	MV_IPC_MSG     *currMsg;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	/*Test if TX ready to send*/
+	if (chn->state != MV_CHN_ATTACHED) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: channel not attached, state is %d\n", chn->state);
+		return MV_FALSE;
+	}
+
+	/* Is next message still used by receiver, yes means full queue or bug */
+	if (chn->txMsgQueVa[chn->nextTxMsgIdx].isUsed != MV_FALSE) {
+		mvIpcDbgPrintf("IPC HAL: Tx Msg: Can't send, Msg %d used flag = %d\n",
+			chn->nextTxMsgIdx, chn->txMsgQueVa[chn->nextTxMsgIdx].isUsed);
+		return MV_FALSE;
+	}
+
+	/* Write the message */
+	currMsg  = &chn->txMsgQueVa[chn->nextTxMsgIdx];
+
+	currMsg->type  = inMsg->type;
+	currMsg->size  = inMsg->size;
+	currMsg->ptr   = inMsg->ptr;
+	currMsg->value = inMsg->value;
+
+	/* Make sure the msg values are written before the used flag
+	 * to ensure the polling receiver will get valid message once
+	 * it detects isUsed == MV_TRUE.
+	 */
+	mvOsSync();
+
+	/* Pass ownership to remote cpu */
+	currMsg->isUsed   = MV_TRUE;
+
+	mvIpcDbgWrite(currMsg->align[0], MV_IPC_HAND_SHAKE_MAGIC);
+	mvIpcDbgWrite(currMsg->align[1], 0);
+	mvIpcDbgWrite(currMsg->align[2], 0);
+
+	chn->nextTxMsgIdx++;
+	if (chn->nextTxMsgIdx == chn->queSizeInMsg)
+		chn->nextTxMsgIdx = 1;
+
+	mvIpcDbgPrintf("IPC HAL: Sent message %d on channel %d to link %d\n",
+			chn->nextTxMsgIdx - 1, chnId, linkId);
+
+	/*Raise the TX ready flag and send the trigger*/
+	*((MV_U32 *)chn->txMessageFlag) = 0x1;
+	chn->sendTrigger(chn->remoteNodeId, chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcRxCtrlMsg
+ *
+ * DESCRIPTION:
+ *		This routine initializes IPC channel: setup receive queue and enable data receiving
+ *		This routine receives IPC control structure (ipcCtrl) as input parameter.
+ *		The following ipcCtrl members must be initialized prior calling this function:
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ *		msg   - Pointer to received control message
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		void
+ *
+ ************************************************************************************/
+static void mvIpcRxCtrlMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *msg)
+{
+	mvIpcDbgPrintf("IPC HAL: Processing control message %d\n", msg->type);
+
+	switch (msg->type) {
+	case IPC_MSG_ATTACH_REQ:
+		mvIpcAckAttach(linkId, chnId, MV_TRUE);
+		break;
+
+	case IPC_MSG_ATTACH_ACK:
+		mvIpcAckAttach(linkId, chnId, MV_FALSE);
+		break;
+
+	case IPC_MSG_DETACH_REQ:
+		mvIpcAckDetach(linkId, chnId, MV_TRUE);
+		break;
+
+	case IPC_MSG_DETACH_ACK:
+		mvIpcAckDetach(linkId, chnId, MV_FALSE);
+		break;
+
+	default:
+		mvIpcDbgPrintf("IPC HAL: Unknown internal message type %d\n", msg->type);
+	}
+
+	mvIpcDbgWrite(msg->align[2], MV_IPC_HAND_SHAKE_MAGIC);
+
+	mvIpcReleaseMsg(linkId, chnId, msg);
+}
+
+/***********************************************************************************
+ * mvIpcDisableChnRx
+ *
+ * DESCRIPTION:
+ *		Masks the given channel in ISR
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_VOID mvIpcDisableChnRx(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR:  Dis Chn RX: Bad link id %d\n", chnId);
+		return;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Dis Chn RX: Bad channel id %d\n", chnId);
+		return;
+	}
+
+	chn = &link->channels[chnId];
+
+	chn->registerChnInISR(linkId, chnId, MV_FALSE);
+
+	mvIpcDbgPrintf("IPC HAL: Disabled ISR for link %d, channel %d\n", linkId, chnId);
+	return;
+}
+
+/***********************************************************************************
+ * mvIpcEnableChnRx
+ *
+ * DESCRIPTION:
+ *		Unmasks the given channel in ISR
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_VOID mvIpcEnableChnRx(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR:  Ena Chn RX: Bad link id %d\n", chnId);
+		return;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Ena Chn RX: Bad channel id %d\n", chnId);
+		return;
+	}
+
+	chn = &link->channels[chnId];
+
+	chn->registerChnInISR(linkId, chnId, MV_TRUE);
+
+	mvIpcDbgPrintf("IPC HAL: Enabled ISR for link %d, channel %d\n", linkId, chnId);
+	return;
+}
+
+/***********************************************************************************
+ * mvIpcRxMsg
+ *
+ * DESCRIPTION:
+ *		Main Rx routine - should be called from interrupt routine
+ *
+ * INPUT:
+ *		drblNum  - number of doorbel received
+ * OUTPUT:
+ *		linkId  - the link ID
+ *       chnId - the channel id that received a message
+ *       outMsg   - pointer to the message received
+ * RETURN:
+ *		MV_TRUE  - if a message was received
+ *		MV_FALSE - if no message exists
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcRxMsg(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL  *chn;
+	MV_IPC_MSG     *currMsg;
+	MV_U32 msgIndx;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Rx msg: Bad link id %d\n", chnId);
+		return MV_FAIL;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Rx msg: Bad channel id %d\n", chnId);
+		return MV_FAIL;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state == MV_CHN_CLOSED)
+		return MV_FALSE;
+
+	/* First process control messages like attach, detach, close */
+	if (chn->rxCtrlMsg->isUsed == MV_TRUE)
+		mvIpcRxCtrlMsg(linkId, chnId, chn->rxCtrlMsg);
+
+	msgIndx = chn->nextRxMsgIdx;
+	currMsg = &chn->rxMsgQueVa[msgIndx];
+
+	// Check for unread data messages in queue */
+	if (currMsg->isUsed != MV_TRUE) {
+		/*No more messages, disable RX ready flag*/
+		*((MV_U32 *)chn->rxMessageFlag) = 0x0;
+		return MV_NO_MORE;
+	}
+
+	/* Increment msg idx to keep in sync with sender */
+	chn->nextRxMsgIdx++;
+	if (chn->nextRxMsgIdx == chn->queSizeInMsg)
+		chn->nextRxMsgIdx = 1;
+
+	// Check if channel is ready to receive messages */
+	if (chn->state < MV_CHN_OPEN) {
+		mvIpcErrPrintf("IPC ERROR: Rx msg: Channel not ready, state = %d\n", chn->state);
+		return MV_FAIL;
+	}
+
+	mvIpcDbgWrite(currMsg->align[2], MV_IPC_HAND_SHAKE_MAGIC);
+
+	/* Now process user messages */
+	mvIpcDbgPrintf("IPC HAL: Received message %d on channel %d\n",
+			chn->nextRxMsgIdx - 1, chnId);
+
+	/*Call user function to care the message*/
+	chn->rxCallback(currMsg);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcRxMsgFlagCheck
+ *
+ * DESCRIPTION:
+ *		Check if RX flag raided
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *       chnId - the channel id that received a message
+ * OUTPUT:
+ * RETURN:
+ *		MV_TRUE  - if a RX flag raised
+ *		MV_FALSE - if no RX waiting
+ *
+ ************************************************************************************/
+MV_BOOL mvIpcRxMsgFlagCheck(MV_U32 linkId, MV_U32 chnId)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL  *chn;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Rx msg: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Rx msg: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state == MV_CHN_CLOSED)
+		return MV_FALSE;
+
+	if (*((MV_U32 *)chn->rxMessageFlag) == 0x1)
+		return MV_TRUE;
+	else
+		return MV_FALSE;
+}
+
+/***********************************************************************************
+ * mvIpcReleaseMsg
+ *
+ * DESCRIPTION:
+ *		Return ownership on message to transmitter
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		chnId - The channel ID
+ *		msg   - Pointer to message to release
+ * OUTPUT:
+ *       None
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_STATUS mvIpcReleaseMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *msg)
+{
+	MV_IPC_LINK             *link;
+	MV_IPC_CHANNEL *chn;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: Bad link id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (chnId > link->numOfChannels) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: Bad channel id %d\n", chnId);
+		return MV_FALSE;
+	}
+
+	chn = &link->channels[chnId];
+
+	if (chn->state == MV_CHN_CLOSED) {
+		mvIpcErrPrintf("IPC ERROR: Msg release: Inactive channel id %d\n", chnId);
+		return MV_ERROR;
+	}
+
+	if (msg->isUsed == MV_FALSE) {
+		mvIpcErrPrintf("IPC ERROR: Msg release: Msg %d owned by %d\n",
+			chn->nextRxMsgIdx, msg->isUsed);
+		return MV_ERROR;
+	}
+
+	msg->isUsed   = MV_FALSE;
+	mvIpcDbgWrite(msg->align[1], MV_IPC_HAND_SHAKE_MAGIC);
+
+	mvIpcDbgPrintf("IPC HAL: Released message 0x%8x on channel %d\n", (int)msg, chnId);
+
+	return MV_OK;
+}
+
+/***********************************************************************************
+ * mvIpcShmemMalloc
+ *
+ * DESCRIPTION:
+ *		Malloc buffer in shared memory heap for TX buffers
+ *		(Sequentual malloc, no free allowed)
+ *
+ * INPUT:
+ *		linkId  - the link ID
+ *		size - requested buffer size
+ * OUTPUT:
+ *       offset of the buffer
+ * RETURN:
+ *		MV_OK or MV_ERROR
+ *
+ ************************************************************************************/
+MV_VOID *mvIpcShmemMalloc(MV_U32 linkId, MV_U32 size)
+{
+	MV_IPC_LINK             *link;
+	MV_VOID *ptr;
+
+	if (linkId > MV_IPC_LINKS_NUM) {
+		mvIpcErrPrintf("IPC ERROR: Tx Msg: Bad link id %d\n", linkId);
+		return MV_FALSE;
+	}
+
+	link = &mv_ipc_links[linkId];
+
+	if (size > link->txSharedHeapSize)
+		return NULL;
+
+	ptr = (MV_VOID *)link->txSharedHeapAddr;
+
+	link->txSharedHeapAddr  += size;
+	link->txSharedHeapSize -= size;
+
+	return ptr;
+}
diff --git a/drivers/net/ethernet/mvebu_net/common/mvIpc.h b/drivers/net/ethernet/mvebu_net/common/mvIpc.h
new file mode 100644
index 000000000000..fe91bad7e18c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvIpc.h
@@ -0,0 +1,158 @@
+/*******************************************************************************
+   Copyright (C) Marvell International Ltd. and its affiliates
+
+   This software file (the "File") is owned and distributed by Marvell
+   International Ltd. and/or its affiliates ("Marvell") under the following
+   alternative licensing terms.  Once you have made an election to distribute the
+   File under one of the following license alternatives, please (i) delete this
+   introductory statement regarding license alternatives, (ii) delete the two
+   license alternatives that you have not elected to use and (iii) preserve the
+   Marvell copyright notice above.
+
+********************************************************************************
+   Marvell Commercial License Option
+
+   If you received this File from Marvell and you have entered into a commercial
+   license agreement (a "Commercial License") with Marvell, the File is licensed
+   to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+   Marvell GPL License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File in accordance with the terms and conditions of the General
+   Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+   available along with the File in the license.txt file or by writing to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+   on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+   THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+   DISCLAIMED.  The GPL License provides additional details about this warranty
+   disclaimer.
+********************************************************************************
+   Marvell BSD License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File under the following licensing terms.
+   Redistribution and use in source and binary forms, with or without modification,
+   are permitted provided that the following conditions are met:
+
+*   Redistributions of source code must retain the above copyright notice,
+		this list of conditions and the following disclaimer.
+
+*   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+		documentation and/or other materials provided with the distribution.
+
+*   Neither the name of Marvell nor the names of its contributors may be
+		used to endorse or promote products derived from this software without
+		specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+   ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+   DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+   ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+   LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+   ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __mvIpc_h
+#define __mvIpc_h
+
+/*Channel statuses*/
+typedef enum {
+	MV_CHN_CLOSED =  0,
+	MV_CHN_OPEN,
+	MV_CHN_LINKING,
+	MV_CHN_UNLINKING,
+	MV_CHN_ATTACHED
+
+}MV_IPC_CHN_STATE;
+
+/*Message struct(channel queue entry)*/
+typedef struct __ipc_message_struct {
+	MV_U32 type;
+	MV_U32 size;                    /*buffer size*/
+	MV_VOID *ptr;                   /*buffer virtual address for Rx side*/
+	MV_U32 value;                   /*User data*/
+	MV_U32 isUsed;                  /*CPU Id and optional oob message*/
+	MV_U32 align[3];                /* Align message size to cache line */
+} MV_IPC_MSG;
+
+/*Function types*/
+typedef int (*MV_IPC_RX_CLBK)(MV_IPC_MSG *msg);
+typedef MV_VOID (*MV_IPC_SEND_TRIGGER)(MV_U32 linkId, MV_U32 chnId);
+typedef MV_VOID (*MV_IPC_RX_CHANNEL_REGISTER)(MV_U32 linkId, MV_U32 chnId, MV_BOOL enable);
+
+/*Channel struct*/
+typedef struct __ipc_channel_struct {
+	MV_IPC_MSG *rxMsgQueVa;         /*buffer virtual address for Rx side*/
+	MV_IPC_MSG *txMsgQueVa;         /*buffer virtual address for Tx side*/
+	MV_IPC_MSG *rxCtrlMsg;          /*buffer virtual address for Rx side*/
+	MV_IPC_MSG *txCtrlMsg;          /*buffer virtual address for Tx side*/
+	MV_U32 nextRxMsgIdx;
+	MV_U32 nextTxMsgIdx;
+	MV_U32 queSizeInMsg;
+	MV_U32 remoteNodeId;
+	MV_BOOL txEnable;
+	MV_BOOL rxEnable;
+	MV_IPC_CHN_STATE state;
+
+	MV_U32 txMessageFlag;                           /*Shared memory flag raised for message in queue*/
+	MV_U32 rxMessageFlag;                           /*Shared memory flag raised for message in queue*/
+
+	MV_IPC_RX_CLBK rxCallback;                      /*Called for for each RX*/
+	MV_IPC_SEND_TRIGGER sendTrigger;                /*Trigger to remote node to start RX*/
+	MV_IPC_RX_CHANNEL_REGISTER registerChnInISR;    /*Register the channel in RX ISR/Timer*/
+} MV_IPC_CHANNEL;
+
+/*Magic for masterConfigDone, wrote by master and clean by slave*/
+#define MV_IPC_MASTER_CONFIG_MAGIC      0x12345678
+#define MV_IPC_HAND_SHAKE_MAGIC         0x87654321
+/*Link struct(hold array of channels)*/
+typedef struct __ipc_link_struct {
+	MV_IPC_CHANNEL *channels;       /*Array of channels*/
+	MV_U32 numOfChannels;           /*Number of channels*/
+	MV_U32 shmemBaseAddr;           /*Shared mem physycal addr*/
+	MV_U32 shmemSize;               /*Shared mem physycal addr*/
+	MV_U32 nodeId;                  /*I node ID*/
+	MV_U32 remoteNodeId;            /*remote node ID*/
+	MV_U32 txSharedHeapAddr;        /*offset of heap node memory*/
+	MV_U32 txSharedHeapSize;        /*size of heap node memory*/
+	MV_U32 rxSharedHeapAddr;        /*offset of heap for remote node memory*/
+	MV_U32 rxSharedHeapSize;        /*size of heap node memory*/
+	MV_U32 masterConfigDone;        /*if master finished the configuration*/
+	MV_U32 slaveLinkInitialized;   /*if master not finished the configuration
+									and configuration was postponed by slave*/
+} MV_IPC_LINK;
+
+/*Control messages types*/
+typedef enum {
+	IPC_MSG_ATTACH_REQ = 0,
+	IPC_MSG_ATTACH_ACK,
+	IPC_MSG_DETACH_REQ,
+	IPC_MSG_DETACH_ACK
+}MV_IPC_CTRL_MSG_TYPE;
+
+MV_STATUS mvIpcLinkStart(MV_U32 linkId);
+MV_STATUS mvIpcClose(MV_U32 linkId);
+MV_STATUS mvIpcOpenChannel(MV_U32 linkId, MV_U32 chnId, MV_IPC_RX_CLBK rx_clbk);
+MV_STATUS mvIpcCloseChannel(MV_U32 linkId, MV_U32 chnId);
+MV_STATUS mvIpcAttachChannel(MV_U32 linkId, MV_U32 chnId, MV_U32 remoteCpuId, MV_BOOL *attached);
+MV_STATUS mvIpcDettachChannel(MV_U32 linkId, MV_U32 chnId);
+MV_BOOL   mvIpcIsTxReady(MV_U32 linkId, MV_U32 chnId);
+MV_STATUS mvIpcTxMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *inMsg);
+MV_STATUS mvIpcTxCtrlMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *inMsg);
+MV_STATUS mvIpcRxMsg(MV_U32 linkId, MV_U32 chnId);
+MV_BOOL mvIpcRxMsgFlagCheck(MV_U32 linkId, MV_U32 chnId);
+MV_STATUS mvIpcReleaseMsg(MV_U32 linkId, MV_U32 chnId, MV_IPC_MSG *msg);
+MV_VOID   mvIpcDisableChnRx(MV_U32 linkId, MV_U32 chnId);
+MV_VOID   mvIpcEnableChnRx(MV_U32 linkId, MV_U32 chnId);
+MV_VOID *mvIpcShmemMalloc(MV_U32 linkId, MV_U32 size);
+
+#endif /*__mvIpc_h */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvList.c b/drivers/net/ethernet/mvebu_net/common/mvList.c
new file mode 100644
index 000000000000..541b190bbeae
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvList.c
@@ -0,0 +1,116 @@
+#include "mvCopyright.h"
+
+/********************************************************************************
+* mvList.c - Implementation File for Linked List.
+*
+* DESCRIPTION:
+*     This file implements basic Linked List functionality.
+*
+*******************************************************************************/
+
+#include "mvList.h"
+
+/* Create a Linked List by allocating the list head */
+/* Returns the head of the list if successful, NULL otherwise */
+MV_LIST_ELEMENT *mvListCreate(MV_VOID)
+{
+	MV_LIST_ELEMENT *head = (MV_LIST_ELEMENT *)mvOsMalloc(sizeof(MV_LIST_ELEMENT));
+
+	if (head) {
+		head->prev = NULL;
+		head->next = NULL;
+		head->data = 0;
+	}
+
+#ifdef MV_LIST_SANITY_CHECKS
+	if (!head)
+		mvOsPrintf("%s ERROR: memory allocation for new list failed\n", __func__);
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	return head;
+}
+
+/* Delete all elements in a given list and free the list head */
+MV_STATUS mvListDestroy(MV_LIST_ELEMENT *head)
+{
+	MV_LIST_ELEMENT *curr, *tmp;
+
+#ifdef MV_LIST_SANITY_CHECKS
+	/* sanity check */
+	if (!head) {
+		mvOsPrintf("%s ERROR: trying to destroy uninitialized list\n", __func__);
+		return MV_ERROR;
+	}
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	/* delete all elements in the list */
+	/* skip list head, it never contains real data */
+	curr = head->next;
+	while (curr) {
+		tmp = curr;
+		curr = curr->next;
+		mvListDel(tmp);
+	}
+
+	/* free the list head */
+	mvOsFree(head);
+
+	return MV_OK;
+}
+
+/* Count the number of elements in the list (not including the head) */
+MV_LONG mvListElementsCount(MV_LIST_ELEMENT *head)
+{
+	MV_LONG count = 0;
+	MV_LIST_ELEMENT *curr;
+
+#ifdef MV_LIST_SANITY_CHECKS
+	/* sanity check */
+	if (!head) {
+		mvOsPrintf("%s ERROR: trying to count elements in an uninitialized list\n", __func__);
+		return -1;
+	}
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	/* skip list head, it's not a real element */
+	for (curr = head->next; curr != NULL; curr = curr->next)
+		count++;
+
+	return count;
+}
+
+/* Print all list elements */
+MV_VOID mvListPrint(MV_LIST_ELEMENT *head)
+{
+	MV_LIST_ELEMENT *curr;
+
+	/* skip list head, it never contains real data */
+	for (curr = head->next; curr != NULL; curr = curr->next) {
+		mvOsPrintf("%lu ", curr->data);
+		MV_LIST_DBG("element = %p, prev = %p, next = %p, data = %lu\n", curr, curr->prev, curr->next, curr->data);
+	}
+	mvOsPrintf("\n");
+}
+
+/* simple self-contained test */
+MV_VOID mvListTest(MV_VOID)
+{
+	int i;
+	MV_LIST_ELEMENT *list_elements[10];
+	MV_LIST_ELEMENT *head = mvListCreate();
+
+	mvOsPrintf("\n\n----- mvListTest -----\n\n");
+
+	for (i = 0; i < 10; i++)
+		list_elements[i] = mvListAddHead(head, i);
+
+	mvListPrint(head);
+
+	mvListDel(list_elements[0]);
+	mvListDel(list_elements[9]);
+	mvListDel(list_elements[4]);
+
+	mvListPrint(head);
+
+	mvListDestroy(head);
+}
diff --git a/drivers/net/ethernet/mvebu_net/common/mvList.h b/drivers/net/ethernet/mvebu_net/common/mvList.h
new file mode 100644
index 000000000000..ad2665537505
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvList.h
@@ -0,0 +1,127 @@
+#include "mvCopyright.h"
+
+/********************************************************************************
+* mvList.h - Header File for Linked List.
+*
+* DESCRIPTION:
+*     This file defines basic Linked List functionality.
+*
+*******************************************************************************/
+
+#ifndef __mvList_h__
+#define __mvList_h__
+
+#include "mvCommon.h"
+#include "mvOs.h"
+
+/* Un-comment the next line to use sanity checks in the code */
+/* #define MV_LIST_SANITY_CHECKS */
+
+/* Un-comment the next line to enable debug prints */
+/* #define MV_LIST_DEBUG */
+
+#ifdef MV_LIST_DEBUG
+#define MV_LIST_DBG(fmt, arg...) mvOsPrintf(fmt, ##arg)
+#else
+#define MV_LIST_DBG(fmt, arg...)
+#endif
+
+typedef struct mv_list_element {
+	struct mv_list_element *prev;
+	struct mv_list_element *next;
+	MV_ULONG data;
+
+} MV_LIST_ELEMENT;
+
+/* Returns the first matching element in the list, NULL if not found */
+static INLINE MV_LIST_ELEMENT *mvListFind(MV_LIST_ELEMENT *head, MV_ULONG data)
+{
+	MV_LIST_ELEMENT *curr;
+
+	/* skip list head, it never contains real data */
+	for (curr = head->next; curr != NULL; curr = curr->next) {
+		if (curr->data == data)
+			return curr;
+	}
+	return NULL;
+}
+
+/* Add a new element at the top of the list (right after head) */
+/* The list head will point to this new element */
+/* Returns pointer to new element if successful, NULL otherwise */
+static INLINE MV_LIST_ELEMENT *mvListAddHead(MV_LIST_ELEMENT *head, MV_ULONG data)
+{
+	MV_LIST_ELEMENT *element;
+
+#ifdef MV_LIST_SANITY_CHECKS
+	/* sanity check */
+	if (!head) {
+		mvOsPrintf("%s ERROR: trying to add an element to an uninitialized list\n", __func__);
+		return NULL;
+	}
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	element = mvOsMalloc(sizeof(MV_LIST_ELEMENT));
+	if (element) {
+		element->data = data;
+		element->next = head->next;
+		element->prev = head;
+		if (head->next)
+			head->next->prev = element;
+
+		head->next = element;
+
+		MV_LIST_DBG("Adding new element %p: data = %lu, next = %p, prev = %p\n",
+				element, element->data, element->next, element->prev);
+	}
+
+#ifdef MV_LIST_SANITY_CHECKS
+	if (!element)
+		mvOsPrintf("%s ERROR: memory allocation for new element failed\n", __func__);
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	return element;
+}
+
+/* Delete an element from a list */
+/* Return the deleted element data */
+static INLINE MV_ULONG mvListDel(MV_LIST_ELEMENT *element)
+{
+	MV_LIST_ELEMENT *prev;
+	MV_LIST_ELEMENT *next;
+	MV_ULONG data;
+
+#ifdef MV_LIST_SANITY_CHECKS
+	/* sanity check */
+	if (!element) {
+		mvOsPrintf("%s ERROR: trying to delete a NULL element\n", __func__);
+		return 0;
+	}
+#endif /* MV_LIST_SANITY_CHECKS */
+
+	prev = element->prev;
+	next = element->next;
+	data = element->data;
+
+	MV_LIST_DBG("Deleting element %p, data = %lu, prev = %p, next = %p\n", element, element->data, prev, next);
+
+	mvOsFree(element);
+
+	if (prev)
+		prev->next = next;
+	else
+		mvOsPrintf("%s ERROR: trying to delete an element when prev == NULL\n", __func__);
+
+	if (next)
+		next->prev = prev;
+
+	return data;
+}
+
+MV_LIST_ELEMENT *mvListCreate(MV_VOID);
+MV_STATUS mvListDestroy(MV_LIST_ELEMENT *head);
+MV_LONG mvListElementsCount(MV_LIST_ELEMENT *head);
+MV_VOID mvListPrint(MV_LIST_ELEMENT *head);
+MV_VOID mvListTest(MV_VOID);
+
+#endif /* __mvList_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvStack.c b/drivers/net/ethernet/mvebu_net/common/mvStack.c
new file mode 100644
index 000000000000..a2a0b0bd19c7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvStack.c
@@ -0,0 +1,85 @@
+#include "mvCopyright.h"
+
+/********************************************************************************
+* mvQueue.c
+*
+* FILENAME:    $Workfile: mvStack.c $
+* LAST UPDATE: $Modtime:  $
+*
+* DESCRIPTION:
+*     This file implements simple Stack LIFO functionality.
+*******************************************************************************/
+
+/* includes */
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvStack.h"
+
+/* defines  */
+
+/* Public functions */
+
+/* Purpose: Create new stack
+ * Inputs:
+ *	- MV_U32	noOfElements	- maximum number of elements in the stack.
+ *                              Each element 4 bytes size
+ * Return: void* - pointer to created stack.
+ */
+void *mvStackCreate(int numOfElements)
+{
+	MV_STACK *pStack;
+	MV_U32 *pStackElements;
+
+	pStack = (MV_STACK *)mvOsMalloc(sizeof(MV_STACK));
+	pStackElements = (MV_U32 *)mvOsMalloc(numOfElements * sizeof(MV_U32));
+	if ((pStack == NULL) || (pStackElements == NULL)) {
+		mvOsPrintf("mvStack: Can't create new stack\n");
+		if (pStack)
+			mvOsFree(pStack);
+		if (pStackElements)
+			mvOsFree(pStackElements);
+		return NULL;
+	}
+	memset(pStackElements, 0, numOfElements * sizeof(MV_U32));
+	pStack->numOfElements = numOfElements;
+	pStack->stackIdx = 0;
+	pStack->stackElements = pStackElements;
+
+	return pStack;
+}
+
+/* Purpose: Delete existing stack
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function
+ *
+ * Return: MV_STATUS  	MV_NOT_FOUND - Failure. StackHandle is not valid.
+ *			MV_OK        - Success.
+ */
+MV_STATUS mvStackDelete(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	if ((pStack == NULL) || (pStack->stackElements == NULL))
+		return MV_NOT_FOUND;
+
+	mvOsFree(pStack->stackElements);
+	mvOsFree(pStack);
+
+	return MV_OK;
+}
+
+/* PrintOut status of the stack */
+void mvStackStatus(void *stackHndl, MV_BOOL isPrintElements)
+{
+	int i;
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	mvOsPrintf("StackHandle=%p, pElements=%p, numElements=%d, stackIdx=%d\n",
+		   stackHndl, pStack->stackElements, pStack->numOfElements, pStack->stackIdx);
+	if (isPrintElements == MV_TRUE) {
+		for (i = 0; i < pStack->stackIdx; i++)
+			mvOsPrintf("%3d. Value=0x%x\n", i, pStack->stackElements[i]);
+	}
+}
diff --git a/drivers/net/ethernet/mvebu_net/common/mvStack.h b/drivers/net/ethernet/mvebu_net/common/mvStack.h
new file mode 100644
index 000000000000..353d52a695d3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvStack.h
@@ -0,0 +1,122 @@
+#include "mvCopyright.h"
+
+/********************************************************************************
+* mvStack.h - Header File for :
+*
+* FILENAME:    $Workfile: mvStack.h $
+* LAST UPDATE: $Modtime:  $
+*
+* DESCRIPTION:
+*     This file defines simple Stack (LIFO) functionality.
+*
+*******************************************************************************/
+
+#ifndef __mvStack_h__
+#define __mvStack_h__
+
+/* includes */
+#include "mvTypes.h"
+
+/* defines  */
+
+/* typedefs */
+/* Data structure describes general purpose Stack */
+typedef struct {
+	int stackIdx;
+	int numOfElements;
+	MV_U32 *stackElements;
+} MV_STACK;
+
+static INLINE MV_BOOL mvStackIsFull(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	if (pStack->stackIdx == pStack->numOfElements)
+		return MV_TRUE;
+
+	return MV_FALSE;
+}
+
+static INLINE MV_BOOL mvStackIsEmpty(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	if (pStack->stackIdx == 0)
+		return MV_TRUE;
+
+	return MV_FALSE;
+}
+
+/* Purpose: Push new element to stack
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function.
+ *	- MV_U32	value		- New element.
+ *
+ * Return: MV_STATUS  	MV_FULL - Failure. Stack is full.
+ *						MV_OK   - Success. Element is put to stack.
+ */
+static INLINE void mvStackPush(void *stackHndl, MV_U32 value)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+#ifdef MV_RT_DEBUG
+	if (pStack->stackIdx == pStack->numOfElements) {
+		mvOsPrintf("mvStackPush: Stack is FULL\n");
+		return;
+	}
+#endif /* MV_RT_DEBUG */
+
+	pStack->stackElements[pStack->stackIdx] = value;
+	pStack->stackIdx++;
+}
+
+/* Purpose: Pop element from the top of stack and copy it to "pValue"
+ * Inputs:
+ *	- void* 	stackHndl 	- Stack handle as returned by "mvStackCreate()" function.
+ *	- MV_U32	value		- Element in the top of stack.
+ *
+ * Return: MV_STATUS  	MV_EMPTY - Failure. Stack is empty.
+ *						MV_OK    - Success. Element is removed from the stack and
+ *									copied to pValue argument
+ */
+static INLINE MV_U32 mvStackPop(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+#ifdef MV_RT_DEBUG
+	if (pStack->stackIdx == 0) {
+		mvOsPrintf("mvStackPop: Stack is EMPTY\n");
+		return 0;
+	}
+#endif /* MV_RT_DEBUG */
+
+	pStack->stackIdx--;
+	return pStack->stackElements[pStack->stackIdx];
+}
+
+static INLINE int mvStackIndex(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	return pStack->stackIdx;
+}
+
+static INLINE int mvStackFreeElements(void *stackHndl)
+{
+	MV_STACK *pStack = (MV_STACK *) stackHndl;
+
+	return (pStack->numOfElements - pStack->stackIdx);
+}
+
+/* mvStack.h API list */
+
+/* Create new Stack */
+void *mvStackCreate(int numOfElements);
+
+/* Delete existing stack */
+MV_STATUS mvStackDelete(void *stackHndl);
+
+/* Print status of the stack */
+void mvStackStatus(void *stackHndl, MV_BOOL isPrintElements);
+
+#endif /* __mvStack_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvTypes.h b/drivers/net/ethernet/mvebu_net/common/mvTypes.h
new file mode 100644
index 000000000000..14a45e5d4680
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvTypes.h
@@ -0,0 +1,270 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCmvTypesh
+#define __INCmvTypesh
+
+/* Defines */
+
+/* The following is a list of Marvell status    */
+#define MV_ERROR		    (-1)
+#define MV_OK			    (0)	/* Operation succeeded                   */
+#define MV_FAIL			    (1)	/* Operation failed                      */
+#define MV_BAD_VALUE        (2)	/* Illegal value (general)               */
+#define MV_OUT_OF_RANGE     (3)	/* The value is out of range             */
+#define MV_BAD_PARAM        (4)	/* Illegal parameter in function called  */
+#define MV_BAD_PTR          (5)	/* Illegal pointer value                 */
+#define MV_BAD_SIZE         (6)	/* Illegal size                          */
+#define MV_BAD_STATE        (7)	/* Illegal state of state machine        */
+#define MV_SET_ERROR        (8)	/* Set operation failed                  */
+#define MV_GET_ERROR        (9)	/* Get operation failed                  */
+#define MV_CREATE_ERROR     (10)	/* Fail while creating an item           */
+#define MV_NOT_FOUND        (11)	/* Item not found                        */
+#define MV_NO_MORE          (12)	/* No more items found                   */
+#define MV_NO_SUCH          (13)	/* No such item                          */
+#define MV_TIMEOUT          (14)	/* Time Out                              */
+#define MV_NO_CHANGE        (15)	/* Parameter(s) is already in this value */
+#define MV_NOT_SUPPORTED    (16)	/* This request is not support           */
+#define MV_NOT_IMPLEMENTED  (17)	/* Request supported but not implemented */
+#define MV_NOT_INITIALIZED  (18)	/* The item is not initialized           */
+#define MV_NO_RESOURCE      (19)	/* Resource not available (memory ...)   */
+#define MV_FULL             (20)	/* Item is full (Queue or table etc...)  */
+#define MV_EMPTY            (21)	/* Item is empty (Queue or table etc...) */
+#define MV_INIT_ERROR       (22)	/* Error occured while INIT process      */
+#define MV_HW_ERROR         (23)	/* Hardware error                        */
+#define MV_TX_ERROR         (24)	/* Transmit operation not succeeded      */
+#define MV_RX_ERROR         (25)	/* Recieve operation not succeeded       */
+#define MV_NOT_READY	    (26)	/* The other side is not ready yet       */
+#define MV_ALREADY_EXIST    (27)	/* Tried to create existing item         */
+#define MV_OUT_OF_CPU_MEM   (28)	/* Cpu memory allocation failed.         */
+#define MV_NOT_STARTED      (29)	/* Not started yet                       */
+#define MV_BUSY             (30)	/* Item is busy.                         */
+#define MV_TERMINATE        (31)	/* Item terminates it's work.            */
+#define MV_NOT_ALIGNED      (32)	/* Wrong alignment                       */
+#define MV_NOT_ALLOWED      (33)	/* Operation NOT allowed                 */
+#define MV_WRITE_PROTECT    (34)	/* Write protected                       */
+#define MV_DROPPED          (35)	/* Packet dropped                        */
+#define MV_STOLEN           (36)	/* Packet stolen */
+#define MV_CONTINUE         (37)        /* Continue */
+#define MV_RETRY		    (38)	/* Operation failed need retry           */
+
+#define MV_INVALID  (int)(-1)
+
+#define MV_FALSE	0
+#define MV_TRUE     (!(MV_FALSE))
+
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef MV_ASMLANGUAGE
+/* typedefs */
+
+typedef char MV_8;
+typedef unsigned char MV_U8;
+
+typedef int MV_32;
+typedef unsigned int MV_U32;
+
+typedef short MV_16;
+typedef unsigned short MV_U16;
+
+#ifdef MV_PPC64
+typedef long MV_64;
+typedef unsigned long MV_U64;
+#else
+typedef long long MV_64;
+typedef unsigned long long MV_U64;
+#endif
+
+typedef long MV_LONG;		/* 32/64 */
+typedef unsigned long MV_ULONG;	/* 32/64 */
+
+typedef int MV_STATUS;
+typedef int MV_BOOL;
+typedef void MV_VOID;
+typedef float MV_FLOAT;
+
+typedef int (*MV_FUNCPTR) (void);	/* ptr to function returning int   */
+typedef void (*MV_VOIDFUNCPTR) (void);	/* ptr to function returning void  */
+typedef double (*MV_DBLFUNCPTR) (void);	/* ptr to function returning double */
+typedef float (*MV_FLTFUNCPTR) (void);	/* ptr to function returning float */
+
+typedef MV_U32 MV_KHZ;
+typedef MV_U32 MV_MHZ;
+typedef MV_U32 MV_HZ;
+
+/* This enumerator describes the set of commands that can be applied on   	*/
+/* an engine (e.g. IDMA, XOR). Appling a comman depends on the current   	*/
+/* status (see MV_STATE enumerator)                      					*/
+/* Start can be applied only when status is IDLE                         */
+/* Stop can be applied only when status is IDLE, ACTIVE or PAUSED        */
+/* Pause can be applied only when status is ACTIVE                          */
+/* Restart can be applied only when status is PAUSED                        */
+typedef enum _mvCommand {
+	MV_START,		/* Start     */
+	MV_STOP,		/* Stop     */
+	MV_PAUSE,		/* Pause    */
+	MV_RESTART		/* Restart  */
+} MV_COMMAND;
+
+/* This enumerator describes the set of state conditions.					*/
+/* Moving from one state to other is stricted.   							*/
+typedef enum _mvState {
+	MV_IDLE,
+	MV_ACTIVE,
+	MV_PAUSED,
+	MV_UNDEFINED_STATE
+} MV_STATE;
+
+typedef enum {
+	ETH_MAC_SPEED_10M,
+	ETH_MAC_SPEED_100M,
+	ETH_MAC_SPEED_1000M,
+	ETH_MAC_SPEED_AUTO
+
+} MV_ETH_MAC_SPEED;
+
+/* This structure describes address space window. Window base can be        */
+/* 64 bit, window size up to 4GB                                            */
+typedef struct _mvAddrWin {
+	MV_U32 baseLow;		/* 32bit base low       */
+	MV_U32 baseHigh;	/* 32bit base high      */
+	MV_U64 size;		/* 64bit size           */
+} MV_ADDR_WIN;
+
+/* This binary enumerator describes protection attribute status             */
+typedef enum _mvProtRight {
+	ALLOWED,		/* Protection attribute allowed                         */
+	FORBIDDEN		/* Protection attribute forbidden                       */
+} MV_PROT_RIGHT;
+
+/* Unified struct for Rx and Tx packet operations. The user is required to 	*/
+/* be familier only with Tx/Rx descriptor command status.               	*/
+typedef struct _bufInfo {
+	MV_U32 cmdSts;		/* Tx/Rx command status                                     */
+	MV_U16 byteCnt;		/* Size of valid data in the buffer     */
+	MV_U16 bufSize;		/* Total size of the buffer             */
+	MV_U8 *pBuff;		/* Pointer to Buffer                    */
+	MV_U8 *pData;		/* Pointer to data in the Buffer        */
+	MV_U32 userInfo1;	/* Tx/Rx attached user information 1    */
+	MV_U32 userInfo2;	/* Tx/Rx attached user information 2    */
+	struct _bufInfo *pNextBufInfo;	/* Next buffer in packet            */
+} BUF_INFO;
+
+/* This structure contains information describing one of buffers
+ * (fragments) they are built Ethernet packet.
+ */
+typedef struct {
+	MV_U8 *bufVirtPtr;
+	MV_ULONG bufPhysAddr;
+	MV_U32 bufSize;
+	MV_U32 dataSize;
+	MV_U32 memHandle;
+	MV_32 bufAddrShift;
+} MV_BUF_INFO;
+
+/* This structure contains information describing Ethernet packet.
+ * The packet can be divided for few buffers (fragments)
+ */
+typedef struct {
+	MV_ULONG osInfo;
+	MV_BUF_INFO *pFrags;
+	MV_U32 status;
+	MV_U16 pktSize;
+	MV_U16 numFrags;
+	MV_U32 ownerId;
+	MV_U32 fragIP;
+	MV_U32 txq;
+} MV_PKT_INFO;
+
+/* This structure describes SoC units address decode window	*/
+typedef struct {
+	MV_ADDR_WIN addrWin;	/* An address window */
+	MV_BOOL enable;		/* Address decode window is enabled/disabled    */
+	MV_U8 attrib;		/* chip select attributes */
+	MV_U8 targetId;		/* Target Id of this MV_TARGET */
+} MV_UNIT_WIN_INFO;
+
+/* This structure describes access rights for Access protection windows     */
+/* that can be found in IDMA, XOR, Ethernet and MPSC units.                 */
+/* Note that the permission enumerator coresponds to its register format.   */
+/* For example, Read only premission is presented as "1" in register field. */
+typedef enum _mvAccessRights {
+	NO_ACCESS_ALLOWED = 0,	/* No access allowed            */
+	READ_ONLY = 1,		/* Read only permission         */
+	ACC_RESERVED = 2,	/* Reserved access right                */
+	FULL_ACCESS = 3,	/* Read and Write permission    */
+	MAX_ACC_RIGHTS
+} MV_ACCESS_RIGHTS;
+
+typedef struct _mvDecRegs {
+	MV_U32 baseReg;
+	MV_U32 baseRegHigh;
+	MV_U32 ctrlReg;
+} MV_DEC_REGS;
+
+#endif /* MV_ASMLANGUAGE */
+
+#endif /* __INCmvTypesh */
diff --git a/drivers/net/ethernet/mvebu_net/common/mvVideo.h b/drivers/net/ethernet/mvebu_net/common/mvVideo.h
new file mode 100644
index 000000000000..d1f6e86cc099
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/common/mvVideo.h
@@ -0,0 +1,121 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __INCmvVideoh
+#define __INCmvVideoh
+
+/* includes */
+
+/* Defines */
+
+typedef struct mvVideoResolution {
+	MV_U32 width;
+	MV_U32 height;
+	char *name;
+} MV_VIDEO_RESOLUTION;
+
+/*
+ * Basic window sizes.
+ */
+
+#define VGA_WIDTH	640
+#define VGA_HEIGHT	480
+#define QVGA_WIDTH	320
+#define QVGA_HEIGHT	240
+#define CIF_WIDTH	352
+#define CIF_HEIGHT	288
+#define QCIF_WIDTH	176
+#define	QCIF_HEIGHT	144
+
+#define MV_VIDEO_RESOLUTION_VGA {VGA_WIDTH, VGA_HEIGHT, "VGA"}
+#define MV_VIDEO_RESOLUTION_QVGA {QVGA_WIDTH, QVGA_HEIGHT, "QVGA"}
+#define MV_VIDEO_RESOLUTION_CIF {CIF_WIDTH, CIF_HEIGHT, "CIF"}
+#define MV_VIDEO_RESOLUTION_QCIF {QCIF_WIDTH, QCIF_HEIGHT, "QCIF"}
+
+/* Pixel format */
+typedef enum mvPixFormatId {
+	MV_PIX_FORMAT_ID_RGB444 = 0x444,
+	MV_PIX_FORMAT_ID_RGB555 = 0x555,
+	MV_PIX_FORMAT_ID_RGB565 = 0x565,
+	MV_PIX_FORMAT_ID_YUV422 = 0x422,
+	MV_PIX_FORMAT_ID_RAW_BAYER = 0x111
+} MV_PIX_FORMAT_ID;
+
+typedef struct mvPixelFormat {
+	MV_PIX_FORMAT_ID id;
+	char *name;
+} MV_PIXEL_FORMAT;
+
+/* known formats */
+#define MV_PIXEL_FORMAT_RGB444	{MV_PIX_FORMAT_ID_RGB444, "RGB444"}
+#define MV_PIXEL_FORMAT_RGB555  {MV_PIX_FORMAT_ID_RGB555, "RGB555"}
+#define MV_PIXEL_FORMAT_RGB565  {MV_PIX_FORMAT_ID_RGB565, "RGB565"}
+#define MV_PIXEL_FORMAT_YUV422  {MV_PIX_FORMAT_ID_YUV422, "YUV422"}
+#define MV_PIXEL_FORMAT_RAW_BAYER  {MV_PIX_FORMAT_ID_RAW_BAYER, "RAW BAYER"}
+
+typedef struct {
+	MV_PIXEL_FORMAT pixelFormat;
+	MV_VIDEO_RESOLUTION resolution;
+} MV_IMAGE_FORMAT;
+
+#endif /* __INCmvVideoh */
diff --git a/drivers/net/ethernet/mvebu_net/linux/mv_neta.h b/drivers/net/ethernet/mvebu_net/linux/mv_neta.h
new file mode 100644
index 000000000000..a9898d1133bc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/linux/mv_neta.h
@@ -0,0 +1,126 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*  mv_neta.h */
+
+#ifndef LINUX_MV_NETA_H
+#define LINUX_MV_NETA_H
+
+#define MV_NETA_PORT_NAME	"mv_neta_port"
+struct mv_neta_pdata {
+	/* Global parameters common for all ports */
+	unsigned int  tclk;
+	unsigned int  pclk;
+	int           max_port;
+	int           max_cpu;
+	unsigned int  ctrl_model;
+	unsigned int  ctrl_rev;
+
+	/* Per port parameters */
+	unsigned int  cpu_mask;
+	int           mtu;
+
+	/* Whether a PHY is present, and if yes, at which address. */
+	int      phy_addr;
+
+	/* Maximum packet size for L4 checksum generation */
+	int      tx_csum_limit;
+
+	/* Use this MAC address if it is valid */
+	u8       mac_addr[6];
+
+	/*
+	* If speed is 0, autonegotiation is enabled.
+	*   Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
+	*   Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
+	*/
+	int      speed;
+	int      duplex;
+
+	/* Port configuration: indicates if this port is LB, and if PCS block is active */
+	int     lb_enable;
+	int     is_sgmii;
+	int     is_rgmii;
+
+	/* port interrupt line number */
+	int     irq;
+
+	/*
+	* How many RX/TX queues to use.
+	*/
+	int      rx_queue_count;
+	int      tx_queue_count;
+
+	/*
+	* Override default RX/TX queue sizes if nonzero.
+	*/
+	int      rx_queue_size;
+	int      tx_queue_size;
+	/* PNC TCAM size*/
+#ifdef CONFIG_MV_ETH_PNC
+	unsigned int pnc_tcam_size;
+#endif
+};
+
+
+#endif  /* LINUX_MV_NETA_H */
diff --git a/drivers/net/ethernet/mvebu_net/linux/mv_pp2.h b/drivers/net/ethernet/mvebu_net/linux/mv_pp2.h
new file mode 100644
index 000000000000..0b595f505267
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/linux/mv_pp2.h
@@ -0,0 +1,127 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+/*  mv_pp2.h */
+
+#ifndef LINUX_MV_PP2_H
+#define LINUX_MV_PP2_H
+
+#define MV_PP2_PORT_NAME	"mv_pp2_port"
+
+/* valid values for flags */
+#define MV_PP2_PDATA_F_SGMII		0x1 /* MAC connected to PHY via SGMII, PCS block is active */
+#define MV_PP2_PDATA_F_RGMII		0x2 /* MAC connected to PHY via RGMII */
+#define MV_PP2_PDATA_F_LB		0x4 /* This port is serve as LoopBack port */
+#define MV_PP2_PDATA_F_LINUX_CONNECT	0x8 /* This port is connected to Linux */
+
+struct mv_pp2_pdata {
+
+	/* Global parameters common for all ports */
+	unsigned int  tclk;
+	int           max_port;
+
+	/* Controller Model (Device ID) and Revision */
+	unsigned int  ctrl_model;
+	unsigned int  ctrl_rev;
+
+	/* Per port parameters */
+	unsigned int  cpu_mask;
+	int           mtu;
+
+	/* Whether a PHY is present, and if yes, at which address. */
+	int      phy_addr;
+
+	/* Use this MAC address if it is valid */
+	u8       mac_addr[6];
+
+	/*
+	* If speed is 0, autonegotiation is enabled.
+	*   Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
+	*   Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
+	*/
+	int      speed;
+	int      duplex;
+
+	int	     is_sgmii;
+	int	     is_rgmii;
+
+	/* port interrupt line number */
+	int		 irq;
+
+	/*
+	* How many RX/TX queues to use.
+	*/
+	int      rx_queue_count;
+	int      tx_queue_count;
+
+	/*
+	* Override default RX/TX queue sizes if nonzero.
+	*/
+	int      rx_queue_size;
+	int      tx_queue_size;
+
+	unsigned int flags;
+};
+
+
+#endif  /* LINUX_MV_PP2_H */
diff --git a/drivers/net/ethernet/mvebu_net/mvNetConfig.h b/drivers/net/ethernet/mvebu_net/mvNetConfig.h
new file mode 100644
index 000000000000..742f07f0bc47
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/mvNetConfig.h
@@ -0,0 +1,136 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mv_net_config_h__
+#define __mv_net_config_h__
+
+#define MV_ETH_MAX_PORTS                4
+
+#if defined(CONFIG_MV_ETH_PP2) || defined(CONFIG_MV_ETH_PP2_MODULE)
+
+#define INTER_REGS_PHYS_BASE		0xF1000000
+#define INTER_REGS_VIRT_BASE		0xFBC00000
+
+#define PP2_CPU0_VIRT_BASE		0xFEC00000
+#define PP2_CPU1_VIRT_BASE		0xFEC10000
+
+#define MV_PP2_PON_EXIST
+#define MV_PP2_PON_PORT_ID              7
+#define MV_PP2_MAX_RXQ                  16      /* Maximum number of RXQs can be mapped to each port */
+#define MV_PP2_MAX_TXQ                  8
+#define MV_PP2_RXQ_TOTAL_NUM            32      /* Total number of RXQs for usage by all ports */
+#define MV_PP2_MAX_TCONT                16      /* Maximum number of TCONTs supported by PON port */
+#define MV_PP2_TX_CSUM_MAX_SIZE         1790
+
+#define IRQ_GLOBAL_GOP			82 /* Group of Ports (GOP) */
+#define IRQ_GLOBAL_NET_WAKE_UP		112 /* WOL interrupt */
+
+#define MV_ETH_TCLK			(166666667)
+
+#ifdef CONFIG_OF
+extern int pp2_vbase;
+extern int eth_vbase;
+extern int pp2_port_vbase[MV_ETH_MAX_PORTS];
+#define MV_PP2_REG_BASE                 (pp2_vbase)
+#define MV_ETH_BASE_ADDR                (eth_vbase)
+#define GOP_REG_BASE(port)		(pp2_port_vbase[port])
+#else
+#define MV_PP2_REG_BASE                 (0xF0000)
+#define MV_ETH_BASE_ADDR                (0xC0000)
+#define GOP_REG_BASE(port)		(MV_ETH_BASE_ADDR + 0x4000 + ((port) / 2) * 0x3000 + ((port) % 2) * 0x1000)
+#endif
+
+#define LMS_REG_BASE                    (MV_ETH_BASE_ADDR)
+#define MIB_COUNTERS_REG_BASE           (MV_ETH_BASE_ADDR + 0x1000)
+#define GOP_MNG_REG_BASE                (MV_ETH_BASE_ADDR + 0x3000)
+#define MV_PON_REGS_OFFSET              (MV_ETH_BASE_ADDR + 0x8000)
+
+#endif /* PP2 */
+
+#if defined(CONFIG_MV_ETH_NETA) || defined(CONFIG_MV_ETH_NETA_MODULE)
+
+#define MV_PON_PORT(p)			MV_FALSE
+#define MV_ETH_MAX_TCONT		1
+
+#define MV_ETH_MAX_RXQ			8
+#define MV_ETH_MAX_TXQ			8
+#define MV_ETH_TX_CSUM_MAX_SIZE		9800
+#define MV_PNC_TCAM_LINES		1024    /* TCAM num of entries */
+#define MV_BM_WIN_ID		12
+#define MV_PNC_WIN_ID		11
+#define MV_BM_WIN_ATTR		0x4
+#define MV_PNC_WIN_ATTR		0x4
+
+/* New GMAC module is used */
+#define MV_ETH_GMAC_NEW
+/* New WRR/EJP module is used */
+#define MV_ETH_WRR_NEW
+/* IPv6 parsing support for Legacy parser */
+#define MV_ETH_LEGACY_PARSER_IPV6
+#define MV_ETH_PNC_NEW
+#define MV_ETH_PNC_LB
+
+#endif /* CONFIG_MV_ETH_NETA */
+
+#endif /* __mv_net_config_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/.gitignore b/drivers/net/ethernet/mvebu_net/neta/.gitignore
new file mode 100644
index 000000000000..1d5b2ead4159
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/.gitignore
@@ -0,0 +1,97 @@
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-armada370/armada_370_family/
+arch/arm/mach-armada375/armada_375_family/
+arch/arm/mach-armada380/armada_380_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+arch/arm/plat-armada/mv_drivers_lsp/mv_neta/
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/neta/Kconfig b/drivers/net/ethernet/mvebu_net/neta/Kconfig
new file mode 100644
index 000000000000..22dcc563540f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/Kconfig
@@ -0,0 +1,875 @@
+config MV_ETH_PORTS_NUM
+	int "Number of Marvell GbE ports"
+	depends on MV_ETH_NETA
+	default 1
+	---help---
+	Number of Marvell GbE ports supported by NETA driver
+
+menu "BM configuration"
+
+config MV_ETH_BM
+	depends on MV_ETH_NETA && (ARCH_FEROCEON_KW2 || ARCH_ARMADA_XP || ARCH_ARMADA38X || ARCH_MVEBU)
+	bool "Buffer Management support (BM)"
+	default y
+        ---help---
+	Enable/Disable hardware buffer management support for NETA driver.
+	if 'y' - BM support is enabled for Hardware Forwarding
+	To enable BM support for Software forwarding MV_ETH_BM_CPU should
+	be set to 'y' too.
+
+config MV_ETH_BM_CPU
+	depends on MV_ETH_BM
+	bool "Use BM for CPU processed traffic"
+	default y
+	---help---
+	BM pools is used for traffic processed by CPU and HWF both
+
+config MV_ETH_BM_0_PKT_SIZE
+	depends on MV_ETH_BM
+	int "Packet size [bytes] can use buffers from pool #0"
+	default 0
+	---help---
+	0 - means that packet size for the pool will be defined accordingly
+        with MTU of the port that use this pool.
+
+config MV_ETH_BM_1_PKT_SIZE
+	depends on MV_ETH_BM
+	int "Packet size [bytes] can use buffers from pool #1"
+	default 0
+        ---help---
+        0 - means that packet size for the pool will be defined accordingly
+        with MTU of the port that use this pool.
+
+config MV_ETH_BM_2_PKT_SIZE
+	depends on MV_ETH_BM
+	int "Packet size [bytes] can use buffers from pool #2"
+	default 0
+	---help---
+        0 - means that packet size for the pool will be defined accordingly
+        with MTU of the port that use this pool.
+
+config MV_ETH_BM_3_PKT_SIZE
+	depends on MV_ETH_BM
+	int "Packet size [bytes] can use buffers from pool #3"
+	default 256 if (MV_ETH_PORTS_NUM != 4)
+	default 0
+	---help---
+        0 - means that packet size for the pool will be defined accordingly
+        with MTU of the port that use this pool.
+
+menuconfig  MV_ETH_BM_PORT_0
+        depends on (MV_ETH_BM && (MV_ETH_PORTS_NUM != 0))
+        bool "BM configuration for GbE #0"
+        default y
+        ---help---
+
+config  MV_ETH_BM_PORT_0_LONG_POOL
+	int "Long BM pool for GbE #0"
+	depends on MV_ETH_BM_PORT_0
+	range -1 3 if MV_ETH_BM_CPU
+	range 0 3 if !MV_ETH_BM_CPU
+	default 0
+	---help---
+	BM pool to be used for GbE #0 port to process long packets
+	-1 means that port will choose BM pool closest to required buffers size.
+
+config  MV_ETH_BM_PORT_0_SHORT_POOL
+        int "Short BM pool for GbE #0"
+        depends on MV_ETH_BM_PORT_0
+        range 0 3
+        default 3 if (MV_ETH_PORTS_NUM != 4)
+	default 0
+	---help---
+	BM pool to be used for GbE #0 port to process short packets
+
+config  MV_ETH_BM_PORT_0_LONG_BUF_NUM
+        int "Number of buffers for Long pool of GbE #0"
+        depends on MV_ETH_BM_PORT_0
+        range 128 16384
+        default 2048
+        ---help---
+	Number of long buffers allocated for this port.
+
+config  MV_ETH_BM_PORT_0_SHORT_BUF_NUM
+        int "Number of buffers for Short pool of GbE #0"
+        depends on MV_ETH_BM_PORT_0 && (MV_ETH_BM_PORT_0_LONG_POOL != MV_ETH_BM_PORT_0_SHORT_POOL)
+        range 128 16384
+        default 3072
+	---help---
+        Number of short buffers allocated for this port.
+
+menuconfig  MV_ETH_BM_PORT_1
+        depends on (MV_ETH_BM && (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1))
+        bool "BM configuration for GbE #1"
+        default y
+        ---help---
+
+config  MV_ETH_BM_PORT_1_LONG_POOL
+        int "Long BM pool for GbE #1"
+        depends on MV_ETH_BM_PORT_1
+	range -1 3 if MV_ETH_BM_CPU
+	range 0 3 if !MV_ETH_BM_CPU
+        default 1
+        ---help---
+        BM pool to be used for GbE #1 port to process long packets.
+	-1 means that port will choose BM pool closest to required buffers size.
+
+config  MV_ETH_BM_PORT_1_SHORT_POOL
+        int "Short BM pool for GbE #1"
+        depends on MV_ETH_BM_PORT_1
+        range 0 3
+        default 3 if (MV_ETH_PORTS_NUM != 4)
+	default 1
+        ---help---
+        BM pool to be used for GbE #1 port to process short packets.
+
+config  MV_ETH_BM_PORT_1_LONG_BUF_NUM
+        int "Number of buffers for Long pool of GbE #1"
+        depends on MV_ETH_BM_PORT_1
+        range 128 16384
+        default 2048
+        ---help---
+	Number of long buffers allocated for this port.
+
+config  MV_ETH_BM_PORT_1_SHORT_BUF_NUM
+        int "Number of buffers for Short pool of GbE #1"
+        depends on MV_ETH_BM_PORT_1 && (MV_ETH_BM_PORT_1_LONG_POOL != MV_ETH_BM_PORT_1_SHORT_POOL)
+        range 128 16384
+        default 3072
+        ---help---
+	Number of short buffers allocated for this port.
+
+menuconfig  MV_ETH_BM_PORT_2
+        depends on (MV_ETH_BM && (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) &&  (MV_ETH_PORTS_NUM != 2))
+        bool "BM configuration for GbE #2"
+        default y
+        ---help---
+
+config  MV_ETH_BM_PORT_2_LONG_POOL
+        int "Long BM pool for GbE #2"
+        depends on MV_ETH_BM_PORT_2
+        range -1 3 if MV_ETH_BM_CPU
+        range 0 3 if !MV_ETH_BM_CPU
+        default 2
+        ---help---
+        BM pool to be used for GbE #2 port to process long packets.
+	-1 means that port will choose BM pool closest to required buffers size.
+
+config  MV_ETH_BM_PORT_2_SHORT_POOL
+        int "Short BM pool for GbE #2"
+        depends on MV_ETH_BM_PORT_2
+        range 0 3
+        default 3 if (MV_ETH_PORTS_NUM != 4)
+        default 2
+        ---help---
+	BM pool to be used for GbE #2 port to process short packets.
+
+config  MV_ETH_BM_PORT_2_LONG_BUF_NUM
+        int "Number of buffers for Long pool of GbE #2"
+        depends on MV_ETH_BM_PORT_2
+        range 128 16384
+        default 2048
+        ---help---
+        Number of long buffers allocated for this port.
+
+config  MV_ETH_BM_PORT_2_SHORT_BUF_NUM
+        int "Number of buffers for Short pool of GbE #2"
+        depends on MV_ETH_BM_PORT_2 && (MV_ETH_BM_PORT_2_LONG_POOL != MV_ETH_BM_PORT_2_SHORT_POOL)
+        range 128 16384
+        default 3072
+        ---help---
+        Number of short buffers allocated for this port.
+
+menuconfig  MV_ETH_BM_PORT_3
+        depends on (MV_ETH_BM && (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) && (MV_ETH_PORTS_NUM != 2) && (MV_ETH_PORTS_NUM != 3))
+        bool "BM configuration for GbE #3"
+        default y
+        ---help---
+
+config  MV_ETH_BM_PORT_3_LONG_POOL
+        int "Long BM pool for GbE #3"
+        depends on MV_ETH_BM_PORT_3
+        range -1 3 if MV_ETH_BM_CPU
+        range 0 3 if !MV_ETH_BM_CPU
+        default 3
+        ---help---
+        BM pool to be used for GbE #3 port to process long packets.
+	-1 means that port will choose BM pool closest to required buffers size.
+
+config  MV_ETH_BM_PORT_3_SHORT_POOL
+        int "Short BM pool for GbE #3"
+        depends on MV_ETH_BM_PORT_3
+        range 0 3
+        default 3
+        ---help---
+	BM pool to be used for GbE #3 port to process short packets.
+
+config  MV_ETH_BM_PORT_3_LONG_BUF_NUM
+        int "Number of buffers for Long pool of GbE #3"
+        depends on MV_ETH_BM_PORT_3
+        range 128 16384
+        default 2048
+        ---help---
+        Number of long buffers allocated for this port.
+
+config  MV_ETH_BM_PORT_3_SHORT_BUF_NUM
+        int "Number of buffers for Short pool of GbE #3"
+        depends on MV_ETH_BM_PORT_3 && (MV_ETH_BM_PORT_3_LONG_POOL != MV_ETH_BM_PORT_3_SHORT_POOL)
+        range 128 16384
+        default 3072
+        ---help---
+        Number of short buffers allocated for this port.
+
+endmenu
+
+menuconfig MV_ETH_PNC
+	depends on MV_ETH_NETA && (ARCH_FEROCEON_KW2 || ARCH_ARMADA_XP || ARCH_MVEBU)
+	bool "PnC support"
+	default y
+	---help---
+	PnC module is used for parse of incoming traffic.
+	If PnC is involved, incoming traffic is parsed by PnC, legacy mode is bypass.
+	On LK-3.10 legacy and pnc co-exist, user can select it in FDT.
+	On other LK version, it depends on corresponding SoC and config file.
+
+config MV_ETH_PNC_MCAST_NUM
+        depends on MV_ETH_PNC
+        int "Use PnC for Multicast MAC addresses filtering"
+        default 8
+        ---help---
+	Number of Multicast addresses can be matched and accepted
+	for all ports
+
+config MV_ETH_PNC_VLAN_PRIO
+        depends on MV_ETH_PNC
+        int "Use PnC for VLAN priority mapping"
+        range 0 32
+        default 7
+        ---help---
+	Number of VLAN priorities can be mapped to different RXQs for all ports
+
+config MV_ETH_PNC_ETYPE
+        depends on MV_ETH_PNC
+        int "Use PnC for extra ETYPE detection"
+        default 0
+        ---help---
+        Number of extra ETYPEs can be detected in addition to
+	ARP, IPv4, IPv6, PPPoE are detected by default.
+
+config MV_ETH_PNC_DSCP_PRIO
+        depends on MV_ETH_PNC
+        int "Use PnC for DSCP priority mapping"
+        range 0 256
+        default 16
+        ---help---
+        Number of DSCP priorities can be mapped to different RXQs for all ports
+
+config MV_ETH_PNC_L3_FLOW
+	depends on MV_ETH_PNC
+        bool "Use PNC for L3 Flows detection"
+        default n
+        ---help---
+        Use PNC rules for IPv4 and IPv6 Flows processing.
+        When enabled, MV_ETH_PNC_WOL will be disabled.
+
+config MV_ETH_PNC_L3_FLOW_LINES
+	depends on MV_ETH_PNC_L3_FLOW
+        int "Number of PNC L3 flows entries"
+        range 20 256
+        default 25
+        ---help---
+        Number of PNC L3 flows entries
+
+config MV_ETH_PNC_WOL
+	depends on MV_ETH_PNC
+	bool "Use PNC for Wake On LAN support"
+	default n
+	---help---
+	Use PNC rules for TCAM filtering for Wake on LAN support.
+
+menuconfig MV_ETH_HWF
+	depends on (MV_ETH_PNC && MV_ETH_BM)
+        bool "Hardware Forwarding support (HWF)"
+        default y
+        ---help---
+
+config MV_ETH_HWF_TXQ_DROP
+	depends on MV_ETH_HWF
+	int "HWF Drop Threshold [%]"
+	default 60
+	---help---
+
+config MV_ETH_HWF_TXQ_DROP_RND
+        depends on MV_ETH_HWF
+        int "HWF Drop Random Generator bits"
+        default 0
+        ---help---
+
+config MV_ETH_PMT
+        depends on (MV_ETH_NETA && ARCH_FEROCEON_KW2)
+        bool "Packet Modification Table (PMT)"
+        default n
+        ---help---
+
+config MV_ETH_PMT_FLOWS
+	int "Number of different flows can be set to PMT"
+	depends on MV_ETH_PMT
+	default 256
+	---help---
+	Depend on total number of PMT entries and
+	number of PMT entires reserved for each flow.
+	MV_ETH_PMT_SIZE >= (MV_ETH_PMT_FLOWS * (MV_ETH_PMT_CMD_PER_FLOW + 1))
+
+config MV_ETH_PMT_CMD_PER_FLOW
+        int "Number of PMT entries reserved for each flow"
+        depends on MV_ETH_PMT
+        default 12
+        ---help---
+        Depend on total number of PMT entries and
+        number of flows to be supported.
+	MV_ETH_PMT_SIZE >= (MV_ETH_PMT_FLOWS * (MV_ETH_PMT_CMD_PER_FLOW + 1))
+
+menu "Network Interface configuration"
+
+config  MV_ETH_0_MTU
+	int "Giga port #0 MTU value"
+	depends on (MV_ETH_PORTS_NUM != 0)
+ 	default 1500
+        ---help---
+	Default MTU value for Marvell GbE port #0
+
+config  MV_ETH_0_MACADDR
+        string "Giga port #0 MAC address"
+        depends on (MV_ETH_PORTS_NUM != 0)
+        default "00:00:00:00:00:80"
+        ---help---
+        Default MAC address for Marvell GbE port #0
+
+config  MV_ETH_1_MTU
+        int "Giga port #1 MTU value"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1)
+        default 1500
+        ---help---
+	Default MTU value for Marvell GbE port #1
+
+config  MV_ETH_1_MACADDR
+        string "Giga port #1 MAC address"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1)
+        default "00:00:00:00:00:81"
+        ---help---
+        Default MAC address for Marvell GbE port #1
+
+config  MV_ETH_2_MTU
+        int "Giga port #2 MTU value"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) && (MV_ETH_PORTS_NUM != 2)
+        default 1500
+        ---help---
+        Default MTU value for Marvell GbE port #2
+
+config  MV_ETH_2_MACADDR
+        string "Giga port #2 MAC address"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) && (MV_ETH_PORTS_NUM != 2)
+        default "00:00:00:00:00:82"
+        ---help---
+        Default MAC address for Marvell GbE port #2
+
+config  MV_ETH_3_MTU
+        int "Giga port #3 MTU value"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) && (MV_ETH_PORTS_NUM != 2) && (MV_ETH_PORTS_NUM != 3)
+        default 1500
+        ---help---
+        Default MTU value for Marvell GbE port #3
+
+config  MV_ETH_3_MACADDR
+        string "Giga port #3 MAC address"
+        depends on (MV_ETH_PORTS_NUM != 0) && (MV_ETH_PORTS_NUM != 1) && (MV_ETH_PORTS_NUM != 2) && (MV_ETH_PORTS_NUM != 3)
+        default "00:00:00:00:00:83"
+        ---help---
+        Default MAC address for Marvell GbE port #3
+
+endmenu
+
+menu "Rx/Tx Queue configuration"
+
+config  MV_ETH_RXQ
+        int "Number of RX queues"
+        default 1
+        ---help---
+          Multiple RX queue support.
+
+config  MV_ETH_TXQ
+        int "Number of TX queues"
+        default 1
+        ---help---
+          Multiple TX queue support.
+
+config MV_ETH_RXQ_DESC
+	int "Number of Rx descriptors"
+	depends on (MV_ETH_PORTS_NUM != 0)
+	default 128
+        ---help---
+	The number of Rx descriptors in each Rx queue.
+
+config MV_ETH_RXQ_DEF
+        int "Default RXQ to recieve packets"
+        default 0
+        ---help---
+
+config MV_ETH_TXQ_DESC
+	int "Number of Tx descriptors"
+	depends on (MV_ETH_PORTS_NUM != 0)
+	default 532
+        ---help---
+	The number of Tx descriptors in each Tx queue.
+
+config MV_ETH_TXQ_DEF
+        int "Default TXQ to send local generated packets"
+        default 0
+        ---help---
+
+endmenu
+
+menu "IP/TCP/UDP Offloading"
+
+config  MV_ETH_TX_CSUM_OFFLOAD
+        bool "L3/L4 TX checksum offload support for Marvell network interface"
+        default y
+        ---help---
+	Marvell network driver compiled with TCP/UDP over IPv4/IPv6 TX checksum offload support.
+
+config MV_ETH_TX_CSUM_OFFLOAD_DEF
+	depends on MV_ETH_TX_CSUM_OFFLOAD
+        bool "Default value for L3/L4 TX checksum offload: enable/disable"
+        default y
+        ---help---
+	Can be changed in run-time using ethtool
+
+config  MV_ETH_RX_CSUM_OFFLOAD
+        bool "L3/L4 RX checksum offload support for Marvell network interface"
+        default y
+        ---help---
+        Marvell network driver compiled with TCP/UDP over IPv4/IPv6 RX checksum offload support.
+
+config MV_ETH_RX_CSUM_OFFLOAD_DEF
+	depends on MV_ETH_RX_CSUM_OFFLOAD
+        bool "Default value for L3/L4 RX checksum offload: enable/disable"
+        default y
+        ---help---
+	Can be changed in run-time using ethtool
+
+config  MV_ETH_GRO
+        bool "GRO Support for Marvell network interface"
+	default y
+        ---help---
+        Marvell network driver compiled with GRO (Generic Receive Offload) support.
+
+config  MV_ETH_GRO_DEF
+	depends on MV_ETH_GRO
+        bool "Default value for GRO feature: enable/disable"
+	default n
+        ---help---
+        Can be changed in run-time using ethtool
+
+config  MV_ETH_TSO
+        bool "TSO Support for Marvell network interface"
+	default y
+        ---help---
+        Marvell network driver compiled with TSO (TCP Segmentation Offload) support.
+
+config  MV_ETH_TSO_DEF
+	depends on MV_ETH_TSO
+        bool "Default value for TSO feature: enable/disable"
+	default n
+        ---help---
+	Can be changed in run-time using ethtool
+
+endmenu
+
+menu "Control and Statistics"
+
+config  MV_NETA_DEBUG_CODE
+	depends on MV_ETH_DEBUG_CODE
+	bool "Add run-time debug code"
+	default n
+	---help---
+	Enable run-time enable/disable enter debug code blocks
+
+config  MV_ETH_STAT_ERR
+        bool "Collect error statistics"
+        default y
+	---help---
+	Marvell network interface driver collect minimal number of statistics.
+	Only for error conditions. Can be displayed using mv_eth_tool.
+
+config  MV_ETH_STAT_INF
+        bool "Collect event statistics"
+        default y
+        ---help---
+	Marvell network interface driver collect event statistics.
+	Provide more information about driver functionality and almost doesn't
+	effect performance. Can be displayed using mv_eth_tool.
+
+config  MV_ETH_STAT_DBG
+        bool "Collect debug statistics"
+        default n
+        ---help---
+	Marvell network interface driver collect a lot of statistics.
+	Used for Debug mode. Decrease performance. Can be displayed using mv_eth_tool.
+
+config  MV_ETH_STAT_DIST
+        bool "Collect debug distribution statistics"
+        default n
+        ---help---
+        Marvell network interface driver collect a lot of statistics.
+        Used for Debug mode. Decrease performance. Can be displayed using mv_eth_tool.
+
+config  MV_LINUX_COUNTERS_DISABLE
+	bool "Disable collection of SNMP statistics and Netfilter Contract statistics"
+	default n
+	---help---
+	Disable collection of SNMP statistics and Netfilter Contract statistics to improve performance.
+
+endmenu
+
+menu "Advanced Features"
+
+config MV_NETA_SKB_RECYCLE
+	depends on NET_SKB_RECYCLE
+	bool "NETA Skb recycle"
+	default y
+	---help---
+	Work-in-progress and experimental.
+
+	This option enables skb's to be returned via a callback at kfree to
+	the allocator to make a fastpath for very skb consuming network
+	applications.
+
+config MV_NETA_SKB_RECYCLE_DEF
+	depends on MV_NETA_SKB_RECYCLE
+	int "Default value for SKB recycle:  0 - disable, 1 - enable"
+	default 1
+	---help---
+
+config MV_NETA_TXDONE_PROCESS_METHOD
+	bool "TX_DONE event process method"
+	default y
+	help
+	  It's used for choosing TX_DONE event process method
+	  MV_NETA_TXDONE_ISR means processing TX_DONE event in interrupt mode
+	  MV_NETA_TXDONE_IN_TIMER means using regular timer to process TX_DONE event in polling mode
+	  MV_NETA_TXDONE_IN_HRTIMER means using high-resolution timer to process TX_DONE event in polling mode
+
+choice
+	prompt "TX_DONE event process method"
+	depends on MV_NETA_TXDONE_PROCESS_METHOD
+	default MV_NETA_TXDONE_IN_HRTIMER
+
+	config  MV_NETA_TXDONE_ISR
+		bool "Use interrupt to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process in interrupt mode
+		When unchosen TX_DONE event will be processed in polling mode
+
+	config MV_NETA_TXDONE_IN_TIMER
+		bool "Use regular timer to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process by regular timer in polling mode.
+
+	config MV_NETA_TXDONE_IN_HRTIMER
+		depends on HIGH_RES_TIMERS
+		bool "Use high resolution timer to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process by high resolution timer in polling mode.
+		High resolution timer can support higher precision in ns level.
+		If high resolution timer is enabled, TX processing
+		can free SKB memory much faster.
+
+endchoice
+
+config MV_NETA_TX_DONE_HIGH_RES_TIMER_PERIOD
+	depends on MV_NETA_TXDONE_IN_HRTIMER
+        int "Periodical Tx Done high resolution timer period"
+        default 1000
+        range 10 10000
+        ---help---
+        Periodical high resolution timer period for Tx Done operation in [usec]
+        Its precision is much higher than regular timer whose higest precision is 10 msec
+        Tx done high resolution timer's higest precison is 10 usec
+        Must be larger than or equal to 10 and be smaller than or equal to 10000.
+
+config  MV_NETA_TX_DONE_TIMER_PERIOD
+	depends on MV_NETA_TXDONE_IN_TIMER
+        int "Periodical Tx Done timer period"
+        default 10
+        ---help---
+          Periodical timer period for Tx Done operation in [msec].
+
+config  MV_ETH_TXDONE_COAL_PKTS
+	int "Threshold for TX_DONE event trigger"
+	default 16
+	---help---
+	Number of packets will be sent before TX_DONE event will be triggered
+	by interrupt or polling.
+
+config  MV_ETH_RX_COAL_PKTS
+        int "Threshold [number of packets] for RX interrupt"
+        default 32
+        ---help---
+        Number of packets will be received before RX interrupt will be generated by HW.
+
+config  MV_ETH_RX_COAL_USEC
+        int "Threshold [usec] for RX interrupt"
+        default 100
+        ---help---
+        Time delay in usec before RX interrupt will be generated by HW if number of
+	received packets larger than 0 but smaller than MV_ETH_RX_COAL_PKTS
+
+config  MV_ETH_RX_DESC_PREFETCH
+	bool "Enable RX descriptor prefetch"
+	default n
+	---help---
+	Use pld instruction to prefetch one RX descriptor ahead
+
+config  MV_ETH_RX_PKT_PREFETCH
+        bool "Enable RX packet prefetch"
+        default n
+        ---help---
+        Use pld instruction to prefetch first two cache lines of received packet data
+
+config MV_ETH_RX_SPECIAL
+	depends on MV_ETH_PNC
+        bool "Enable special RX processing"
+        default n
+        ---help---
+        Enable special RX processing for packets with RI_RX_SEPCIAL PNC result info bit set
+
+config MV_ETH_TX_SPECIAL
+	bool "Enable special TX processing"
+ 	default n
+	---help---
+	Enable special TX processing for packets with signal header (SH)
+
+config MV_ETH_L2FW
+	bool "L2 Forwarding support"
+	default n
+	---help---
+	Enable L2 Forwarding support for received packets.
+	Three modes are supported: Send packet without change, Swap MAC DA<->SA,
+	Copy the whole packet and swap MAC
+
+config MV_ETH_L2SEC
+	bool "L2 Forwarding IPSec support"
+	depends on MV_ETH_L2FW
+	default n
+	---help---
+	Handle encrypted packets with CESA.
+
+config MV_ETH_L2FW_DEBUG
+	depends on MV_ETH_L2FW
+	bool "Add run-time L2FW debug code"
+	default n
+	---help---
+	Enable L2FW run-time enable/disable enter debug code blocks
+
+config MV_ETH_RX_POLL_WEIGHT
+	int "poll weight for the RX poll() function"
+	default 64
+	range 1 255
+	---help---
+	poll weight for the RX poll() function; must be less or equal to 255
+
+config MV_ETH_EXTRA_BUF_SIZE
+	int "Extra buffer size in bytes"
+	default 120
+	range 120 16384
+	---help---
+	Size of buffers allocated for extra pool and used in special cases like TSO,
+	fragmentattion and others
+
+config MV_ETH_EXTRA_BUF_NUM
+        int "Number of extra buffers allocated for each port"
+        default MV_ETH_TXQ_DESC
+	---help---
+	Number of extra buffers allocated for each port
+endmenu
+
+menu "Network Fast Processing (NFP) support"
+
+config  MV_ETH_NFP
+	depends on MV_ETH_NETA
+        bool "NFP support"
+	default n
+        ---help---
+        Choosing this option will enable Network Fast Processing support.
+	Kernel image will be able to use NFP modules.
+	NFP provided as different package and must be compiled separately.
+	NFP support include two modules:
+	NFP core functionality and NFP dynamic learning.
+
+config MV_ETH_NFP_HOOKS
+        bool "NFP IP stack Hooks"
+	depends on MV_ETH_NFP
+	default y
+        ---help---
+        Choosing this option will enable NFP Dynamic Learning.
+	Marvell specific code was added to few files
+	in Linux Network Stack. Without this configration option only
+	static NFP configuration is enabled.
+
+config MV_ETH_NFP_EXT
+	bool "Support NFP for External (non GBE) network interfaces"
+	depends on MV_ETH_NFP
+	default n
+	---help---
+	Enable NFP support for External (non GBE) network interfaces.
+	It doesn't require special changes in external network drivers,
+	but NFP can be used only for NAPI capable drivers.
+	Leave default if unsure.
+
+config MV_ETH_NFP_EXT_NUM
+	depends on MV_ETH_NFP_EXT
+	int "Maximum number of External (non-Gbe) interfaces"
+	default 1
+	range 1 4
+endmenu
+
+menuconfig MV_ETH_NAPI
+        bool "NAPI configuration"
+        default y
+	---help---
+	This menu used for NAPI groups configuration.
+	Leave default if RSS support is not required.
+	Enable create multiple NAPI groups per port and attach RXQs and CPUs to NAPI groups.
+	Each CPU and each RXQ can be attached to single NAPI group.
+
+config  MV_ETH_NAPI_GROUPS
+        int "Number of NAPI instances can be used per port"
+	depends on MV_ETH_NAPI
+	range 1 NR_CPUS if SMP
+	range 1 1 if !SMP
+        default 1
+        ---help---
+	Different RXQs and TXQs can be processed by different CPU using different NAPI instances
+
+menu "NAPI group #0 configuration"
+	depends on MV_ETH_NAPI
+
+config MV_ETH_GROUP0_CPU
+	hex "CPU affinity for group0"
+	range 0x0 0xf if (SMP && (NR_CPUS=4))
+	range 0x0 0x7 if (SMP && (NR_CPUS=3))
+	range 0x0 0x3 if (SMP && (NR_CPUS=2))
+	range 0x0 0x1 if !SMP
+	default 0xf
+
+config MV_ETH_GROUP0_RXQ
+	hex "RXQ affinity for group0"
+	range 0x0 0xff
+	default 0xff
+endmenu
+
+menu "NAPI group #1 configuration"
+        depends on MV_ETH_NAPI
+	depends on (MV_ETH_NAPI_GROUPS != 1)
+
+config MV_ETH_GROUP1_CPU
+	hex "CPU affinity for group1"
+	range 0x0 0xf if (SMP && (NR_CPUS=4))
+	range 0x0 0x7 if (SMP && (NR_CPUS=3))
+	range 0x0 0x3 if (SMP && (NR_CPUS=2))
+	range 0x0 0x1 if !SMP
+	default 0x0
+
+config MV_ETH_GROUP1_RXQ
+	hex "RXQ affinity for group1"
+	range 0x0 0xff
+	default 0x0
+endmenu
+
+menu "NAPI group #2 configuration"
+	depends on MV_ETH_NAPI
+	depends on (MV_ETH_NAPI_GROUPS != 1) && (MV_ETH_NAPI_GROUPS != 2)
+
+config MV_ETH_GROUP2_CPU
+	hex "CPU affinity for group2"
+	range 0x0 0xf if (SMP && (NR_CPUS=4))
+	range 0x0 0x7 if (SMP && (NR_CPUS=3))
+	range 0x0 0x3 if (SMP && (NR_CPUS=2))
+	range 0x0 0x1 if !SMP
+	default 0x0
+
+config MV_ETH_GROUP2_RXQ
+	hex "RXQ affinity for group2"
+	range 0x0 0xff
+	default 0x0
+endmenu
+
+menu "NAPI group #3 configuration"
+	depends on MV_ETH_NAPI
+	depends on (MV_ETH_NAPI_GROUPS != 1) && (MV_ETH_NAPI_GROUPS != 2) && (MV_ETH_NAPI_GROUPS != 3)
+
+config MV_ETH_GROUP3_CPU
+	hex "CPU affinity for group3"
+	range 0x0 0xf if (SMP && (NR_CPUS=4))
+	range 0x0 0x7 if (SMP && (NR_CPUS=3))
+	range 0x0 0x3 if (SMP && (NR_CPUS=2))
+	range 0x0 0x1 if !SMP
+	default 0x0
+
+config MV_ETH_GROUP3_RXQ
+	hex "RXQ affinity for group3"
+	range 0x0 0xff
+	default 0x0
+endmenu
+
+menu "PON support for Network driver"
+
+config MV_PON
+        bool "PON support"
+        depends on MV_ETH_NETA && MV_INCLUDE_PON
+        ---help---
+        Choose this option to support PON port in Marvell network driver
+
+config MV_PON_TXP_DEF
+        int "Default T-CONT to send local generated packets"
+        depends on MV_PON
+        default 0
+        ---help---
+
+config MV_PON_TXQ_DEF
+        int "Default TXQ to send local generated packets"
+        depends on MV_PON
+        default 0
+        ---help---
+
+endmenu
+
+menu "ERRATA / WA"
+
+config MV_ETH_ERRATA_SMI_ACCESS
+        bool "use SMI port 1 instead of SMI port 0"
+        depends on ARMADA_XP_REV_Z1
+        default y
+        ---help---
+        Using SMI port 1 instead of SMI port 0 prevents the
+        link up/link down on a different port than the port on which
+        an action such as changing speed or starting autonegotiation is done.
+        (NetA BTS #313, DSMP LSP #42, ARMADA XP Z1).
+
+config MV_ETH_REDUCE_BURST_SIZE_WA
+        bool "Limit burst size of RX and TX transactions to 16B in IOCC mode"
+        depends on AURORA_IO_CACHE_COHERENCY
+        default y
+        ---help---
+        To avoid CPU make invalidate which might cause hang, force partial
+        read/write by limiting the burst size to 16B since it is less than the
+        size of a cache line.
+
+config MV_ETH_BE_WA
+        bool "WA for RX and TX descriptors swap in HW issue"
+        depends on CPU_ENDIAN_BE32
+        default n
+        ---help---
+        If set swap of RX and TX descriptors in BE mode will be done by driver
+
+endmenu
diff --git a/drivers/net/ethernet/mvebu_net/neta/Makefile b/drivers/net/ethernet/mvebu_net/neta/Makefile
new file mode 100644
index 000000000000..eef4370896f4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/Makefile
@@ -0,0 +1,90 @@
+#
+# Makefile for the Marvell Gigabit Ethernet driver
+#
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+NETA_HAL_DIR = hal
+else
+NETA_HAL_DIR = ../../mv_hal/neta
+endif
+
+NETA_GBE_DIR   = $(NETA_HAL_DIR)/gbe
+NETA_BM_DIR    = $(NETA_HAL_DIR)/bm
+NETA_PNC_DIR   = $(NETA_HAL_DIR)/pnc
+NETA_PMT_DIR   = $(NETA_HAL_DIR)/pmt
+
+mv_neta-objs += $(NETA_GBE_DIR)/mvNeta.o $(NETA_GBE_DIR)/mvNetaDebug.o
+
+ifeq ($(CONFIG_MV_ETH_PNC),y)
+mv_neta-objs += $(NETA_PNC_DIR)/mvTcam.o  $(NETA_PNC_DIR)/mvPncAging.o \
+		$(NETA_PNC_DIR)/mvPnc.o $(NETA_PNC_DIR)/mvPncLb.o
+mv_neta-objs += pnc/pnc_sysfs.o
+
+ifeq ($(CONFIG_MV_ETH_PNC_L3_FLOW),y)
+mv_neta-objs += $(NETA_PNC_DIR)/mvPncRxq.o pnc/rxq_map_sysfs.o
+endif
+
+ifeq ($(CONFIG_MV_ETH_PNC_WOL),y)
+mv_neta-objs += $(NETA_PNC_DIR)/mvPncWol.o pnc/wol_sysfs.o
+endif
+endif
+
+ifeq ($(CONFIG_MV_ETH_BM),y)
+mv_neta-objs += $(NETA_BM_DIR)/mvBm.o bm/bm_sysfs.o bm/mv_eth_bm.o
+endif
+
+ifeq ($(CONFIG_MV_ETH_HWF),y)
+mv_neta-objs += hwf/hwf_bm.o
+endif
+
+ifeq ($(CONFIG_MV_ETH_PMT),y)
+mv_neta-objs += $(NETA_PMT_DIR)/mvPmt.o pmt/pmt_sysfs.o
+endif
+
+ifeq ($(CONFIG_MV_ETH_HWF),y)
+mv_neta-objs += $(NETA_GBE_DIR)/mvHwf.o hwf/hwf_sysfs.o
+endif
+
+mv_neta-objs += net_dev/mv_netdev.o net_dev/mv_ethernet.o net_dev/mv_eth_tool.o
+mv_neta-objs += net_dev/mv_eth_sysfs.o net_dev/mv_eth_rx_sysfs.o net_dev/mv_eth_tx_sysfs.o
+mv_neta-objs += net_dev/mv_eth_tx_sched_sysfs.o net_dev/mv_eth_qos_sysfs.o net_dev/mv_eth_rss_sysfs.o
+
+ifeq ($(CONFIG_MV_ETH_L2FW),y)
+mv_neta-objs += l2fw/l2fw_sysfs.o l2fw/mv_eth_l2fw.o
+
+ifeq ($(CONFIG_MV_ETH_L2SEC),y)
+mv_neta-objs += l2fw/mv_eth_l2sec.o
+endif
+endif
+
+ifeq ($(CONFIG_MV_PON),y)
+mv_neta-objs += mv_pon_sysfs.o
+endif
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+
+ccflags-y       += $(MVEBU_NET_FLAGS)
+
+ccflags-y	+= $(INCLUDE_DIRS)
+
+ccflags-y       += -I$(PLAT_DIR)/neta
+ccflags-y       += -I$(PLAT_DIR)/neta/hal
+
+else
+
+ifneq ($(MACHINE),)
+include $(srctree)/$(MACHINE)/config/mvRules.mk
+endif
+
+mv_neta-objs 	+= $(NETA_GBE_DIR)/mvNetaAddrDec.o
+
+ccflags-y       += -I$(PLAT_PATH_I)/$(HAL_NETA_DIR)
+ccflags-y       += -I$(PLAT_PATH_I)/$(HAL_ETHPHY_DIR)
+ccflags-y       += -I$(PLAT_PATH_I)/$(LSP_MUX_DIR)
+endif
+
+ifeq ($(NETMAP),y)
+ccflags-y       += -DCONFIG_NETMAP -I$(NETMAP_DIR) -I$(NETMAP_DIR)/../sys
+endif
+
+obj-$(CONFIG_MV_ETH_NETA) += mv_neta.o
diff --git a/drivers/net/ethernet/mvebu_net/neta/bm/bm_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/bm/bm_sysfs.c
new file mode 100644
index 000000000000..d7c49b9f3621
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/bm/bm_sysfs.c
@@ -0,0 +1,141 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "gbe/mvNeta.h"
+#include "net_dev/mv_netdev.h"
+#include "bm/mvBm.h"
+
+static ssize_t bm_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat                regs         - show BM registers\n");
+	off += sprintf(buf+off, "cat                stat         - show BM status\n");
+	off += sprintf(buf+off, "cat                config       - show compile-time BM configuration\n");
+	off += sprintf(buf+off, "echo p v           > dump       - dump BM pool <p>. v=0-brief, v=1-full\n");
+	off += sprintf(buf+off, "echo p s           > size       - set packet size <s> to BM pool <p>\n");
+
+	return off;
+}
+
+static ssize_t bm_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int          err = 0;
+	const char   *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return bm_help(buf);
+	else if (!strcmp(name, "regs"))
+		mvNetaBmRegs();
+	else if (!strcmp(name, "stat"))
+		mvNetaBmStatus();
+	else if (!strcmp(name, "config"))
+		mv_eth_bm_config_print();
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	return err;
+}
+static ssize_t bm_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, pool = 0, val = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d", &pool, &val);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "dump")) {
+		mvNetaBmPoolDump(pool, val);
+	} else if (!strcmp(name, "size")) {
+		err = mv_eth_ctrl_pool_size_set(pool, val);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(size,   S_IWUSR, NULL, bm_store);
+static DEVICE_ATTR(dump,   S_IWUSR, NULL, bm_store);
+static DEVICE_ATTR(config, S_IRUSR, bm_show, NULL);
+static DEVICE_ATTR(stat,   S_IRUSR, bm_show, NULL);
+static DEVICE_ATTR(regs,   S_IRUSR, bm_show, NULL);
+static DEVICE_ATTR(help,   S_IRUSR, bm_show, NULL);
+
+static struct attribute *bm_attrs[] = {
+	&dev_attr_size.attr,
+	&dev_attr_dump.attr,
+	&dev_attr_config.attr,
+	&dev_attr_regs.attr,
+	&dev_attr_stat.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group bm_group = {
+	.name = "bm",
+	.attrs = bm_attrs,
+};
+
+int mv_neta_bm_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &bm_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", bm_group.name, err);
+
+	return err;
+}
+
+int mv_neta_bm_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &bm_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/bm/mv_eth_bm.c b/drivers/net/ethernet/mvebu_net/neta/bm/mv_eth_bm.c
new file mode 100644
index 000000000000..88ca9843662a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/bm/mv_eth_bm.c
@@ -0,0 +1,186 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBm.h"
+
+#include "net_dev/mv_netdev.h"
+
+typedef struct {
+	int             pool_pkt_size[MV_BM_POOLS];
+	MV_BM_CONFIG	port_config[CONFIG_MV_ETH_PORTS_NUM];
+} MV_ETH_BM_CONFIG;
+
+static MV_ETH_BM_CONFIG mv_eth_bm_config;
+
+int mv_eth_bm_config_pkt_size_get(int pool)
+{
+	if (mvNetaMaxCheck(pool, MV_BM_POOLS, "bm_pool"))
+		return -EINVAL;
+
+	return mv_eth_bm_config.pool_pkt_size[pool];
+}
+
+int mv_eth_bm_config_pkt_size_set(int pool, int pkt_size)
+{
+	if (mvNetaMaxCheck(pool, MV_BM_POOLS, "bm_pool"))
+		return -EINVAL;
+
+	mv_eth_bm_config.pool_pkt_size[pool] = pkt_size;
+	return 0;
+}
+
+int mv_eth_bm_config_long_pool_get(int port)
+{
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	return mv_eth_bm_config.port_config[port].longPool;
+}
+
+int mv_eth_bm_config_long_buf_num_get(int port)
+{
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	return mv_eth_bm_config.port_config[port].longBufNum;
+}
+
+int mv_eth_bm_config_short_pool_get(int port)
+{
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	return mv_eth_bm_config.port_config[port].shortPool;
+}
+
+int mv_eth_bm_config_short_buf_num_get(int port)
+{
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	return mv_eth_bm_config.port_config[port].shortBufNum;
+}
+
+/* Once time call: init configuration structure accordingly with compile time parameters */
+MV_STATUS mv_eth_bm_config_get(void)
+{
+	MV_BM_CONFIG *bmConfig;
+	int           port;
+
+	mv_eth_bm_config.pool_pkt_size[0] = CONFIG_MV_ETH_BM_0_PKT_SIZE;
+	mv_eth_bm_config.pool_pkt_size[1] = CONFIG_MV_ETH_BM_1_PKT_SIZE;
+	mv_eth_bm_config.pool_pkt_size[2] = CONFIG_MV_ETH_BM_2_PKT_SIZE;
+	mv_eth_bm_config.pool_pkt_size[3] = CONFIG_MV_ETH_BM_3_PKT_SIZE;
+
+#ifdef CONFIG_MV_ETH_BM_PORT_0
+	port = 0;
+	bmConfig = &mv_eth_bm_config.port_config[port];
+	memset(bmConfig, 0, sizeof(MV_BM_CONFIG));
+	bmConfig->valid = 1;
+	bmConfig->longPool = CONFIG_MV_ETH_BM_PORT_0_LONG_POOL;
+	bmConfig->shortPool = CONFIG_MV_ETH_BM_PORT_0_SHORT_POOL;
+	bmConfig->longBufNum = CONFIG_MV_ETH_BM_PORT_0_LONG_BUF_NUM;
+
+#if (CONFIG_MV_ETH_BM_PORT_0_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_0_LONG_POOL)
+	bmConfig->shortBufNum = CONFIG_MV_ETH_BM_PORT_0_SHORT_BUF_NUM;
+#endif /* CONFIG_MV_ETH_BM_PORT_0_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_0_LONG_POOL */
+#endif /* CONFIG_MV_ETH_BM_PORT_0 */
+
+#ifdef CONFIG_MV_ETH_BM_PORT_1
+	port = 1;
+	bmConfig = &mv_eth_bm_config.port_config[port];
+	memset(bmConfig, 0, sizeof(MV_BM_CONFIG));
+	bmConfig->valid = 1;
+	bmConfig->longPool = CONFIG_MV_ETH_BM_PORT_1_LONG_POOL;
+	bmConfig->shortPool = CONFIG_MV_ETH_BM_PORT_1_SHORT_POOL;
+	bmConfig->longBufNum = CONFIG_MV_ETH_BM_PORT_1_LONG_BUF_NUM;
+
+#if (CONFIG_MV_ETH_BM_PORT_1_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_1_LONG_POOL)
+	bmConfig->shortBufNum = CONFIG_MV_ETH_BM_PORT_1_SHORT_BUF_NUM;
+#endif /* CONFIG_MV_ETH_BM_PORT_1_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_1_LONG_POOL */
+#endif /* CONFIG_MV_ETH_BM_PORT_1 */
+
+#ifdef CONFIG_MV_ETH_BM_PORT_2
+	port = 2;
+	bmConfig = &mv_eth_bm_config.port_config[port];
+	memset(bmConfig, 0, sizeof(MV_BM_CONFIG));
+	bmConfig->valid = 1;
+	bmConfig->longPool = CONFIG_MV_ETH_BM_PORT_2_LONG_POOL;
+	bmConfig->shortPool = CONFIG_MV_ETH_BM_PORT_2_SHORT_POOL;
+	bmConfig->longBufNum = CONFIG_MV_ETH_BM_PORT_2_LONG_BUF_NUM;
+
+#if (CONFIG_MV_ETH_BM_PORT_2_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_2_LONG_POOL)
+	bmConfig->shortBufNum = CONFIG_MV_ETH_BM_PORT_2_SHORT_BUF_NUM;
+#endif /* CONFIG_MV_ETH_BM_PORT_2_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_2_LONG_POOL */
+#endif /* CONFIG_MV_ETH_BM_PORT_2 */
+
+	#ifdef CONFIG_MV_ETH_BM_PORT_3
+	port = 3;
+	bmConfig = &mv_eth_bm_config.port_config[port];
+	memset(bmConfig, 0, sizeof(MV_BM_CONFIG));
+	bmConfig->valid = 1;
+	bmConfig->longPool = CONFIG_MV_ETH_BM_PORT_3_LONG_POOL;
+	bmConfig->shortPool = CONFIG_MV_ETH_BM_PORT_3_SHORT_POOL;
+	bmConfig->longBufNum = CONFIG_MV_ETH_BM_PORT_3_LONG_BUF_NUM;
+
+#if (CONFIG_MV_ETH_BM_PORT_3_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_3_LONG_POOL)
+	bmConfig->shortBufNum = CONFIG_MV_ETH_BM_PORT_3_SHORT_BUF_NUM;
+#endif /* CONFIG_MV_ETH_BM_PORT_3_SHORT_POOL != CONFIG_MV_ETH_BM_PORT_3_LONG_POOL */
+#endif /* CONFIG_MV_ETH_BM_PORT_3 */
+
+	return MV_OK;
+}
+
+void mv_eth_bm_config_print(void)
+{
+	int           i;
+	MV_BM_CONFIG *bmConfig;
+
+	mvOsPrintf("BM compile time configuration\n");
+	for (i = 0; i < MV_BM_POOLS; i++)
+		mvOsPrintf("pool %d: pkt_size = %d bytes\n", i, mv_eth_bm_config.pool_pkt_size[i]);
+
+	mvOsPrintf("\n");
+	mvOsPrintf("port:  longPool  shortPool  longBufNum  shortBufNum\n");
+	for (i = 0; i < CONFIG_MV_ETH_PORTS_NUM; i++) {
+		bmConfig = &mv_eth_bm_config.port_config[i];
+		if (bmConfig->valid)
+			mvOsPrintf("  %2d:   %4d       %4d        %4d         %4d\n",
+				i, bmConfig->longPool, bmConfig->shortPool,
+				bmConfig->longBufNum, bmConfig->shortBufNum);
+	}
+	mvOsPrintf("\n");
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/.gitignore b/drivers/net/ethernet/mvebu_net/neta/hal/.gitignore
new file mode 100644
index 000000000000..1d5b2ead4159
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/.gitignore
@@ -0,0 +1,97 @@
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-armada370/armada_370_family/
+arch/arm/mach-armada375/armada_375_family/
+arch/arm/mach-armada380/armada_380_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+arch/arm/plat-armada/mv_drivers_lsp/mv_neta/
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.c b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.c
new file mode 100644
index 000000000000..a461f7574209
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.c
@@ -0,0 +1,481 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvebu-soc-id.h"
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#endif
+
+#include "mvBm.h"
+
+MV_U8 *mvBmVirtBase = 0;
+static MV_BM_POOL	mvBmPools[MV_BM_POOLS];
+
+/* Initialize Hardware Buffer management unit */
+MV_STATUS mvNetaBmInit(MV_U8 *virtBase)
+{
+	mvBmVirtBase = virtBase;
+
+	mvNetaBmRegsInit();
+
+	memset(mvBmPools, 0, sizeof(mvBmPools));
+
+	return MV_OK;
+}
+
+void mvNetaBmRegsInit(void)
+{
+	MV_U32 regVal;
+
+	/* Mask BM all interrupts */
+	MV_REG_WRITE(MV_BM_INTR_MASK_REG, 0);
+
+	/* Clear BM cause register */
+	MV_REG_WRITE(MV_BM_INTR_CAUSE_REG, 0);
+
+	/* Set BM configuration register */
+	regVal = MV_REG_READ(MV_BM_CONFIG_REG);
+
+	/* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
+	regVal &= ~MV_BM_MAX_IN_BURST_SIZE_MASK;
+	regVal |= MV_BM_MAX_IN_BURST_SIZE_16BP;
+	MV_REG_WRITE(MV_BM_CONFIG_REG, regVal);
+
+	return;
+}
+
+MV_STATUS mvNetaBmControl(MV_COMMAND cmd)
+{
+	MV_U32 regVal = 0;
+
+	switch (cmd) {
+	case MV_START:
+		regVal = MV_BM_START_MASK;
+		break;
+
+	case MV_STOP:
+		regVal = MV_BM_STOP_MASK;
+		break;
+
+	case MV_PAUSE:
+		regVal = MV_BM_PAUSE_MASK;
+		break;
+
+	default:
+		mvOsPrintf("bmControl: Unknown command %d\n", cmd);
+		return MV_FAIL;
+	}
+	MV_REG_WRITE(MV_BM_COMMAND_REG, regVal);
+	return MV_OK;
+}
+
+MV_STATE mvNetaBmStateGet(void)
+{
+	MV_U32 regVal;
+	MV_STATE state;
+
+	regVal = MV_REG_READ(MV_BM_COMMAND_REG);
+
+	switch ((regVal >> MV_BM_STATUS_OFFS) & MV_BM_STATUS_ALL_MASK) {
+	case MV_BM_STATUS_ACTIVE:
+		state = MV_ACTIVE;
+		break;
+
+	case MV_BM_STATUS_NOT_ACTIVE:
+		state = MV_IDLE;
+		break;
+
+	case MV_BM_STATUS_PAUSED:
+		state = MV_PAUSED;
+		break;
+
+	default:
+		mvOsPrintf("bmStateGet: Unexpected state 0x%x\n", regVal);
+		state = MV_UNDEFINED_STATE;
+	}
+	return state;
+}
+
+void mvNetaBmConfigSet(MV_U32 mask)
+{
+	MV_U32	regVal;
+
+	regVal = MV_REG_READ(MV_BM_CONFIG_REG);
+	regVal |= mask;
+	MV_REG_WRITE(MV_BM_CONFIG_REG, regVal);
+}
+
+void mvNetaBmConfigClear(MV_U32 mask)
+{
+	MV_U32	regVal;
+
+	regVal = MV_REG_READ(MV_BM_CONFIG_REG);
+	regVal &= ~mask;
+	MV_REG_WRITE(MV_BM_CONFIG_REG, regVal);
+}
+
+void mvNetaBmPoolTargetSet(int pool, MV_U8 targetId, MV_U8 attr)
+{
+	MV_U32 regVal;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+	/* Read modify write */
+	regVal = MV_REG_READ(MV_BM_XBAR_POOL_REG(pool));
+
+	regVal &= ~MV_BM_TARGET_ID_MASK(pool);
+	regVal &= ~MV_BM_XBAR_ATTR_MASK(pool);
+	regVal |= MV_BM_TARGET_ID_VAL(pool, targetId);
+	regVal |= MV_BM_XBAR_ATTR_VAL(pool, attr);
+
+	MV_REG_WRITE(MV_BM_XBAR_POOL_REG(pool), regVal);
+}
+
+void mvNetaBmPoolEnable(int pool)
+{
+	MV_U32 regVal;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+	regVal = MV_REG_READ(MV_BM_POOL_BASE_REG(pool));
+	regVal |= MV_BM_POOL_ENABLE_MASK;
+	MV_REG_WRITE(MV_BM_POOL_BASE_REG(pool), regVal);
+
+	/* Clear BM cause register */
+	MV_REG_WRITE(MV_BM_INTR_CAUSE_REG, 0);
+
+}
+
+void mvNetaBmPoolDisable(int pool)
+{
+	MV_U32 regVal;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+	regVal = MV_REG_READ(MV_BM_POOL_BASE_REG(pool));
+	regVal &= ~MV_BM_POOL_ENABLE_MASK;
+	MV_REG_WRITE(MV_BM_POOL_BASE_REG(pool), regVal);
+}
+
+MV_BOOL mvNetaBmPoolIsEnabled(int pool)
+{
+	MV_U32 regVal;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_FALSE;
+	}
+	regVal = MV_REG_READ(MV_BM_POOL_BASE_REG(pool));
+	return (regVal & MV_BM_POOL_ENABLE_MASK);
+}
+
+/* Configure BM specific pool of "capacity" size. */
+MV_STATUS mvNetaBmPoolInit(int pool, void *virtPoolBase, MV_ULONG physPoolBase, int capacity)
+{
+	MV_BM_POOL	*pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+	/* poolBase must be 4 byte aligned */
+	if (MV_IS_NOT_ALIGN(physPoolBase, MV_BM_POOL_PTR_ALIGN)) {
+		mvOsPrintf("bmPoolBase = 0x%lx is not aligned 4 bytes\n", physPoolBase);
+		return MV_NOT_ALIGNED;
+	}
+	/* Minimum pool capacity is 128 entries */
+	if (capacity < MV_BM_POOL_CAP_MIN) {
+		mvOsPrintf("bmPool capacity = %d is smaller than minimum (%d)\n", capacity, MV_BM_POOL_CAP_MIN);
+		return MV_BAD_SIZE;
+	}
+	/* Update data structure */
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->pVirt != NULL) {
+		mvOsPrintf("bmPool = %d is already busy\n", pool);
+		/* necessary for power managemaent resume process*/
+		/*return MV_BUSY;*/
+	}
+
+	pBmPool->pool = pool;
+	pBmPool->capacity = capacity;
+	pBmPool->pVirt = virtPoolBase;
+	pBmPool->physAddr = physPoolBase;
+
+	/* Maximum pool capacity is 16K entries (2^14) */
+	if (capacity > MV_BM_POOL_CAP_MAX) {
+		mvOsPrintf("bmPool capacity = %d is larger than maximum (%d)\n", capacity, MV_BM_POOL_CAP_MAX);
+		return MV_BAD_SIZE;
+	}
+
+	/* Set poolBase address */
+	MV_REG_WRITE(MV_BM_POOL_BASE_REG(pool), physPoolBase);
+
+	/* Set Read pointer to 0 */
+	MV_REG_WRITE(MV_BM_POOL_READ_PTR_REG(pool), 0);
+
+	/* Set Read pointer to 0 */
+	MV_REG_WRITE(MV_BM_POOL_WRITE_PTR_REG(pool), 0);
+
+	/* Set Pool size */
+	MV_REG_WRITE(MV_BM_POOL_SIZE_REG(pool), MV_BM_POOL_SIZE_VAL(capacity));
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaBmPoolBufferSizeSet(int pool, int buf_size)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+	pBmPool = &mvBmPools[pool];
+
+	pBmPool->bufSize = buf_size;
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaBmPoolBufNumUpdate(int pool, int buf_num, int add)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->bufSize == 0) {
+		mvOsPrintf("bmPoolId = %d has unknown buf_size\n", pool);
+		return MV_BAD_PARAM;
+	}
+
+	if (add)
+		pBmPool->bufNum += buf_num;
+	else
+		pBmPool->bufNum -= buf_num;
+
+	return MV_OK;
+}
+
+void mvNetaBmPoolPrint(int pool)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->pVirt == NULL) {
+		mvOsPrintf("bmPool = %d is not created yet\n", pool);
+		return;
+	}
+
+	mvOsPrintf("  %2d:     %4d       %4d       %4d      %p      0x%08x\n",
+						pBmPool->pool, pBmPool->capacity, pBmPool->bufSize, pBmPool->bufNum,
+						pBmPool->pVirt, (unsigned)pBmPool->physAddr);
+}
+
+void mvNetaBmStatus(void)
+{
+	int i;
+
+	mvOsPrintf("BM Pools status\n");
+	mvOsPrintf("pool:    capacity    bufSize    bufNum      virtPtr       physAddr\n");
+	for (i = 0; i < MV_BM_POOLS; i++)
+		mvNetaBmPoolPrint(i);
+}
+
+void mvNetaBmPoolDump(int pool, int mode)
+{
+	MV_U32     regVal;
+	MV_ULONG   *pBufAddr;
+	MV_BM_POOL *pBmPool;
+	int setReadIdx, getReadIdx, setWriteIdx, getWriteIdx, freeBuffs, i;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->pVirt == NULL) {
+		mvOsPrintf("bmPool = %d is not created yet\n", pool);
+		return;
+	}
+
+	mvOsPrintf("\n[NETA BM: pool=%d, mode=%d]\n", pool, mode);
+
+	mvOsPrintf("poolBase=%p (0x%x), capacity=%d, buf_num=%d, buf_size=%d\n",
+		   pBmPool->pVirt, (unsigned)pBmPool->physAddr, pBmPool->capacity, pBmPool->bufNum, pBmPool->bufSize);
+
+	regVal = MV_REG_READ(MV_BM_POOL_READ_PTR_REG(pool));
+	setReadIdx = ((regVal & MV_BM_POOL_SET_READ_PTR_MASK) >> MV_BM_POOL_SET_READ_PTR_OFFS) / 4;
+	getReadIdx = ((regVal & MV_BM_POOL_GET_READ_PTR_MASK) >> MV_BM_POOL_GET_READ_PTR_OFFS) / 4;
+
+	regVal = MV_REG_READ(MV_BM_POOL_WRITE_PTR_REG(pool));
+	setWriteIdx = ((regVal & MV_BM_POOL_SET_WRITE_PTR_MASK) >> MV_BM_POOL_SET_WRITE_PTR_OFFS) / 4;
+	getWriteIdx = ((regVal & MV_BM_POOL_GET_WRITE_PTR_MASK) >> MV_BM_POOL_GET_WRITE_PTR_OFFS) / 4;
+	if (getWriteIdx >= getReadIdx)
+		freeBuffs = getWriteIdx - getReadIdx;
+	else
+		freeBuffs = (pBmPool->capacity - getReadIdx) + getWriteIdx;
+
+	mvOsPrintf("nextToRead: set=%d, get=%d, nextToWrite: set=%d, get=%d, freeBuffs=%d\n",
+		setReadIdx, getReadIdx, setWriteIdx, getWriteIdx, freeBuffs);
+
+	if (mode > 0) {
+		/* Print the content of BM pool */
+		i = getReadIdx;
+		while (i != getWriteIdx) {
+			pBufAddr = (MV_ULONG *)pBmPool->pVirt + i;
+			mvOsPrintf("%3d. pBufAddr=%p, bufAddr=%08x\n",
+				   i, pBufAddr, (MV_U32)(*pBufAddr));
+			i++;
+			if (i == pBmPool->capacity)
+				i = 0;
+		}
+	}
+}
+
+void mvNetaBmRegs(void)
+{
+	int pool;
+
+	mvOsPrintf("\n\t Hardware Buffer Management Registers:\n");
+
+	mvOsPrintf("MV_BM_CONFIG_REG                : 0x%X = 0x%08x\n",
+		   MV_BM_CONFIG_REG, MV_REG_READ(MV_BM_CONFIG_REG));
+
+	mvOsPrintf("MV_BM_COMMAND_REG               : 0x%X = 0x%08x\n",
+		   MV_BM_COMMAND_REG, MV_REG_READ(MV_BM_COMMAND_REG));
+
+	mvOsPrintf("MV_BM_INTR_CAUSE_REG            : 0x%X = 0x%08x\n",
+		   MV_BM_INTR_CAUSE_REG, MV_REG_READ(MV_BM_INTR_CAUSE_REG));
+
+	mvOsPrintf("MV_BM_INTR_MASK_REG             : 0x%X = 0x%08x\n",
+		   MV_BM_INTR_MASK_REG, MV_REG_READ(MV_BM_INTR_MASK_REG));
+
+	mvOsPrintf("MV_BM_XBAR_01_REG               : 0x%X = 0x%08x\n",
+		   MV_BM_XBAR_01_REG, MV_REG_READ(MV_BM_XBAR_01_REG));
+
+	mvOsPrintf("MV_BM_XBAR_23_REG               : 0x%X = 0x%08x\n",
+		   MV_BM_XBAR_23_REG, MV_REG_READ(MV_BM_XBAR_23_REG));
+
+	for (pool = 0; pool < MV_BM_POOLS; pool++) {
+		mvOsPrintf("\n\t BM Pool #%d registers:\n", pool);
+
+		mvOsPrintf("MV_BM_POOL_BASE_REG             : 0x%X = 0x%08x\n",
+			MV_BM_POOL_BASE_REG(pool), MV_REG_READ(MV_BM_POOL_BASE_REG(pool)));
+
+		mvOsPrintf("MV_BM_POOL_READ_PTR_REG         : 0x%X = 0x%08x\n",
+			MV_BM_POOL_READ_PTR_REG(pool), MV_REG_READ(MV_BM_POOL_READ_PTR_REG(pool)));
+
+		mvOsPrintf("MV_BM_POOL_WRITE_PTR_REG        : 0x%X = 0x%08x\n",
+			MV_BM_POOL_WRITE_PTR_REG(pool), MV_REG_READ(MV_BM_POOL_WRITE_PTR_REG(pool)));
+
+		mvOsPrintf("MV_BM_POOL_SIZE_REG             : 0x%X = 0x%08x\n",
+			MV_BM_POOL_SIZE_REG(pool), MV_REG_READ(MV_BM_POOL_SIZE_REG(pool)));
+	}
+	mvOsPrintf("\n");
+
+	mvOsPrintf("MV_BM_DEBUG_REG                 : 0x%X = 0x%08x\n", MV_BM_DEBUG_REG, MV_REG_READ(MV_BM_DEBUG_REG));
+
+	mvOsPrintf("MV_BM_READ_PTR_REG              : 0x%X = 0x%08x\n",
+		   MV_BM_READ_PTR_REG, MV_REG_READ(MV_BM_READ_PTR_REG));
+
+	mvOsPrintf("MV_BM_WRITE_PTR_REG             : 0x%X = 0x%08x\n",
+		   MV_BM_WRITE_PTR_REG, MV_REG_READ(MV_BM_WRITE_PTR_REG));
+
+	mvOsPrintf("\n");
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.h b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.h
new file mode 100644
index 000000000000..4ad95fdedaae
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBm.h
@@ -0,0 +1,135 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvBm_h__
+#define __mvBm_h__
+
+/* includes */
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvStack.h"
+#include "mv802_3.h"
+
+#include "mvBmRegs.h"
+
+typedef struct {
+	int valid;
+	int longPool;
+	int shortPool;
+	int longBufNum;
+	int shortBufNum;
+
+} MV_BM_CONFIG;
+
+typedef struct {
+	int         pool;
+	int         capacity;
+	int         bufNum;
+	int         bufSize;
+	MV_U32      *pVirt;
+	MV_ULONG    physAddr;
+} MV_BM_POOL;
+
+extern MV_U8 *mvBmVirtBase;
+/* defines */
+
+/* bits[8-9] of address define pool 0-3 */
+#define BM_POOL_ACCESS_OFFS     8
+
+/* INLINE functions */
+static INLINE void mvBmPoolPut(int poolId, MV_ULONG bufPhysAddr)
+{
+	volatile MV_U32 *poolAddr = (MV_U32 *)((unsigned)mvBmVirtBase | (poolId << BM_POOL_ACCESS_OFFS));
+
+	*poolAddr = MV_32BIT_LE((MV_U32)bufPhysAddr);
+}
+
+static INLINE MV_ULONG mvBmPoolGet(int poolId)
+{
+	volatile MV_U32 *poolAddr = (MV_U32 *)((unsigned)mvBmVirtBase | (poolId << BM_POOL_ACCESS_OFFS));
+	MV_U32	bufPhysAddr = *poolAddr;
+
+	return (MV_ULONG)(MV_32BIT_LE(bufPhysAddr));
+}
+
+/* prototypes */
+MV_STATUS mvNetaBmInit(MV_U8 *virtBase);
+void      mvNetaBmRegsInit(void);
+void      mvNetaBmConfigSet(MV_U32 mask);
+void      mvNetaBmConfigClear(MV_U32 mask);
+MV_STATUS mvNetaBmControl(MV_COMMAND cmd);
+MV_STATE  mvNetaBmStateGet(void);
+void      mvNetaBmPoolTargetSet(int pool, MV_U8 targetId, MV_U8 attr);
+void      mvNetaBmPoolEnable(int pool);
+void      mvNetaBmPoolDisable(int pool);
+MV_BOOL   mvNetaBmPoolIsEnabled(int pool);
+MV_STATUS mvNetaBmPoolInit(int pool, void *virtPoolBase, MV_ULONG physPoolBase, int capacity);
+MV_STATUS mvNetaBmPoolBufNumUpdate(int pool, int buf_num, int add);
+MV_STATUS mvNetaBmPoolBufferSizeSet(int pool, int buf_size);
+void      mvNetaBmRegs(void);
+void      mvNetaBmStatus(void);
+void      mvNetaBmPoolDump(int pool, int mode);
+void      mvNetaBmPoolPrint(int pool);
+
+#endif /* __mvBm_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBmRegs.h b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBmRegs.h
new file mode 100644
index 000000000000..5742544a3c46
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/bm/mvBmRegs.h
@@ -0,0 +1,235 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __mvBmRegs_h__
+#define __mvBmRegs_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef CONFIG_OF
+extern int bm_reg_vbase;
+#define MV_BM_REG_BASE			(bm_reg_vbase)
+#endif
+
+#define MV_BM_POOLS                 4
+#define MV_BM_POOL_CAP_MAX          (16*1024 - MV_BM_POOL_PTR_ALIGN/4)
+#define MV_BM_POOL_CAP_MIN          128
+#define MV_BM_POOL_PTR_ALIGN        128
+
+/* BM Configuration Register */
+#define MV_BM_CONFIG_REG                (MV_BM_REG_BASE + 0x0)
+
+#define MV_BM_SRC_BURST_SIZE_OFFS       0
+#define MV_BM_SRC_BURST_SIZE_MASK       (3 << MV_BM_SRC_BURST_SIZE_OFFS)
+#define MV_BM_SRC_BURST_SIZE_128B       (0 << MV_BM_SRC_BURST_SIZE_OFFS)
+#define MV_BM_SRC_BURST_SIZE_32B        (1 << MV_BM_SRC_BURST_SIZE_OFFS)
+
+#define MV_BM_DST_BURST_SIZE_OFFS       2
+#define MV_BM_DST_BURST_SIZE_MASK       (3 << MV_BM_DST_BURST_SIZE_OFFS)
+#define MV_BM_DST_BURST_SIZE_128B       (0 << MV_BM_DST_BURST_SIZE_OFFS)
+#define MV_BM_DST_BURST_SIZE_32B        (1 << MV_BM_DST_BURST_SIZE_OFFS)
+
+#define MV_BM_DST_SWAP_BIT              4
+#define MV_BM_DST_SWAP_MASK             (1 << MV_BM_DST_SWAP_BIT)
+
+#define MV_BM_SRC_SWAP_BIT              5
+#define MV_BM_SRC_SWAP_MASK             (1 << MV_BM_SRC_SWAP_BIT)
+
+#define MV_BM_LOW_THRESH_OFFS           8
+#define MV_BM_LOW_THRESH_MASK           (0xF << MV_BM_LOW_THRESH_OFFS)
+#define MV_BM_LOW_THRESH_VALUE(val)     ((val) << MV_BM_LOW_THRESH_OFFS)
+
+#define MV_BM_HIGH_THRESH_OFFS          12
+#define MV_BM_HIGH_THRESH_MASK          (0xF << MV_BM_HIGH_THRESH_OFFS)
+#define MV_BM_HIGH_THRESH_VALUE(val)    ((val) << MV_BM_HIGH_THRESH_OFFS)
+
+#define MV_BM_MAX_IN_BURST_SIZE_OFFS    17
+#define MV_BM_MAX_IN_BURST_SIZE_MASK    (3 << MV_BM_MAX_IN_BURST_SIZE_OFFS)
+#define MV_BM_MAX_IN_BURST_SIZE_32BP    (0 << MV_BM_MAX_IN_BURST_SIZE_OFFS)
+#define MV_BM_MAX_IN_BURST_SIZE_24BP    (1 << MV_BM_MAX_IN_BURST_SIZE_OFFS)
+#define MV_BM_MAX_IN_BURST_SIZE_16BP    (2 << MV_BM_MAX_IN_BURST_SIZE_OFFS)
+#define MV_BM_MAX_IN_BURST_SIZE_8BP     (3 << MV_BM_MAX_IN_BURST_SIZE_OFFS)
+
+#define MV_BM_EMPTY_LIMIT_BIT			19
+#define MV_BM_EMPTY_LIMIT_MASK			(1 << MV_BM_EMPTY_LIMIT_BIT)
+
+
+/* BM Activation Register */
+#define MV_BM_COMMAND_REG               (MV_BM_REG_BASE + 0x4)
+
+#define MV_BM_START_BIT                 0
+#define MV_BM_START_MASK                (1 << MV_BM_START_BIT)
+
+#define MV_BM_STOP_BIT                  1
+#define MV_BM_STOP_MASK                 (1 << MV_BM_STOP_BIT)
+
+#define MV_BM_PAUSE_BIT                 2
+#define MV_BM_PAUSE_MASK                (1 << MV_BM_PAUSE_BIT)
+
+#define MV_BM_STATUS_OFFS               4
+#define MV_BM_STATUS_ALL_MASK           (0x3)
+#define MV_BM_STATUS_NOT_ACTIVE         (0x0)
+#define MV_BM_STATUS_ACTIVE             (0x1)
+#define MV_BM_STATUS_PAUSED             (0x2)
+#define MV_BM_STATUS_MASK(status)       ((status) << MV_BM_STATUS_OFFS)
+
+/* BM Xbar interface Register */
+#define MV_BM_XBAR_01_REG               (MV_BM_REG_BASE + 0x8)
+#define MV_BM_XBAR_23_REG               (MV_BM_REG_BASE + 0xC)
+
+#define MV_BM_XBAR_POOL_REG(pool)       (((pool) < 2) ? MV_BM_XBAR_01_REG : MV_BM_XBAR_23_REG)
+
+#define MV_BM_TARGET_ID_OFFS(pool)      (((pool) & 1) ? 16 : 0)
+#define MV_BM_TARGET_ID_MASK(pool)      (0xF << MV_BM_TARGET_ID_OFFS(pool))
+#define MV_BM_TARGET_ID_VAL(pool, id)   ((id) << MV_BM_TARGET_ID_OFFS(pool))
+
+#define MV_BM_XBAR_ATTR_OFFS(pool)      (((pool) & 1) ? 20 : 4)
+#define MV_BM_XBAR_ATTR_MASK(pool)      (0xFF << MV_BM_XBAR_ATTR_OFFS(pool))
+#define MV_BM_XBAR_ATTR_VAL(pool, attr) ((attr) << MV_BM_XBAR_ATTR_OFFS(pool))
+
+/* Address of External Buffer Pointers Pool Register */
+#define MV_BM_POOL_BASE_REG(pool)       (MV_BM_REG_BASE + 0x10 + ((pool) << 4))
+
+#define MV_BM_POOL_ENABLE_BIT           0
+#define MV_BM_POOL_ENABLE_MASK          (1 << MV_BM_POOL_ENABLE_BIT)
+
+#define MV_BM_POOL_BASE_ADDR_OFFS       2
+#define MV_BM_POOL_BASE_ADDR_MASK       (0x3FFFFFFF << MV_BM_POOL_BASE_ADDR_OFFS)
+
+/* External Buffer Pointers Pool RD pointer Register */
+#define MV_BM_POOL_READ_PTR_REG(pool)   (MV_BM_REG_BASE + 0x14 + ((pool) << 4))
+
+#define MV_BM_POOL_SET_READ_PTR_OFFS    0
+#define MV_BM_POOL_SET_READ_PTR_MASK    (0xFFFC << MV_BM_POOL_SET_READ_PTR_OFFS)
+#define MV_BM_POOL_SET_READ_PTR(val)    ((val) << MV_BM_POOL_SET_READ_PTR_OFFS)
+
+#define MV_BM_POOL_GET_READ_PTR_OFFS    16
+#define MV_BM_POOL_GET_READ_PTR_MASK    (0xFFFC << MV_BM_POOL_GET_READ_PTR_OFFS)
+
+
+/* External Buffer Pointers Pool WR pointer */
+#define MV_BM_POOL_WRITE_PTR_REG(pool)  (MV_BM_REG_BASE + 0x18 + ((pool) << 4))
+
+#define MV_BM_POOL_SET_WRITE_PTR_OFFS   0
+#define MV_BM_POOL_SET_WRITE_PTR_MASK   (0xFFFC << MV_BM_POOL_SET_WRITE_PTR_OFFS)
+#define MV_BM_POOL_SET_WRITE_PTR(val)   ((val) << MV_BM_POOL_SET_WRITE_PTR_OFFS)
+
+#define MV_BM_POOL_GET_WRITE_PTR_OFFS   16
+#define MV_BM_POOL_GET_WRITE_PTR_MASK   (0xFFFC << MV_BM_POOL_GET_WRITE_PTR_OFFS)
+
+/* External Buffer Pointers Pool Size Register */
+#define MV_BM_POOL_SIZE_REG(pool)       (MV_BM_REG_BASE + 0x1C + ((pool) << 4))
+
+#define MV_BM_POOL_SIZE_OFFS            0
+#define MV_BM_POOL_SIZE_MASK            (0x3FFF << MV_BM_POOL_SIZE_OFFS)
+#define MV_BM_POOL_SIZE_VAL(size)       ((size) << MV_BM_POOL_SIZE_OFFS)
+
+
+/* BM Interrupt Cause Register */
+#define MV_BM_INTR_CAUSE_REG            (MV_BM_REG_BASE + 0x50)
+
+#define MV_BM_CAUSE_FREE_FAIL_BIT(p)    (0 + ((p) * 6))
+#define MV_BM_CAUSE_FREE_FAIL_MASK(p)   (1 << MV_BM_CAUSE_FREE_FAIL_BIT(p))
+
+#define MV_BM_CAUSE_ALLOC_FAIL_BIT(p)   (1 + ((p) * 6))
+#define MV_BM_CAUSE_ALLOC_FAIL_MASK(p)  (1 << MV_BM_CAUSE_ALLOC_FAIL_BIT(p))
+
+#define MV_BM_CAUSE_POOL_EMPTY_BIT(p)   (2 + ((p) * 6))
+#define MV_BM_CAUSE_POOL_EMPTY_MASK(p)  (1 << MV_BM_CAUSE_POOL_EMPTY_BIT(p))
+
+#define MV_BM_CAUSE_POOL_FULL_BIT(p)    (3 + ((p) * 6))
+#define MV_BM_CAUSE_POOL_FULL_MASK(p)   (1 << MV_BM_CAUSE_POOL_FULL_BIT(p))
+
+#define MV_BM_CAUSE_INT_PAR_ERR_BIT     27
+#define MV_BM_CAUSE_INT_PAR_ERR_MASK    (1 << MV_BM_CAUSE_INT_PAR_ERR_MASK)
+
+#define MV_BM_CAUSE_XBAR_PAR_ERR_BIT    28
+#define MV_BM_CAUSE_XBAR_PAR_ERR_MASK   (1 << MV_BM_CAUSE_XBAR_PAR_ERR_MASK)
+
+#define MV_BM_CAUSE_STOPPED_BIT         29
+#define MV_BM_CAUSE_STOPPED_MASK        (1 << MV_BM_CAUSE_STOPPED_MASK)
+
+#define MV_BM_CAUSE_PAUSED_BIT          30
+#define MV_BM_CAUSE_PAUSED_MASK         (1 << MV_BM_CAUSE_PAUSED_MASK)
+
+#define MV_BM_CAUSE_SUMMARY_BIT         31
+#define MV_BM_CAUSE_SUMMARY_MASK        (1 << MV_BM_CAUSE_SUMMARY_MASK)
+
+/* BM interrupt Mask Register */
+#define MV_BM_INTR_MASK_REG             (MV_BM_REG_BASE + 0x54)
+
+#define MV_BM_DEBUG_REG                 (MV_BM_REG_BASE + 0x60)
+#define MV_BM_READ_PTR_REG              (MV_BM_REG_BASE + 0x64)
+#define MV_BM_WRITE_PTR_REG             (MV_BM_REG_BASE + 0x68)
+
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvBmRegs_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvEthRegs.h b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvEthRegs.h
new file mode 100755
index 000000000000..23d163905d33
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvEthRegs.h
@@ -0,0 +1,569 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __INCmvEthRegsh
+#define __INCmvEthRegsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "mvNetaRegs.h"
+
+
+#define ETH_MAX_DECODE_WIN              6
+#define ETH_MAX_HIGH_ADDR_REMAP_WIN     4
+
+/****************************************/
+/*        Ethernet Unit Registers       */
+/****************************************/
+#ifdef CONFIG_OF
+extern int port_vbase[MV_ETH_MAX_PORTS];
+
+#define ETH_REG_BASE(port)                  port_vbase[port]
+#else /* CONFIG_OF */
+#define ETH_REG_BASE(port)                  MV_ETH_REGS_BASE(port)
+#endif /* CONFIG_OF */
+
+#define ETH_PHY_ADDR_REG(port)              (ETH_REG_BASE(port) + 0x2000)
+#define ETH_SMI_REG(port)                   (ETH_REG_BASE(port) + 0x2004)
+#define ETH_UNIT_DEF_ADDR_REG(port)         (ETH_REG_BASE(port) + 0x2008)
+#define ETH_UNIT_DEF_ID_REG(port)           (ETH_REG_BASE(port) + 0x200c)
+#define ETH_UNIT_RESERVED(port)             (ETH_REG_BASE(port) + 0x2014)
+#define ETH_UNIT_INTR_CAUSE_REG(port)       (ETH_REG_BASE(port) + 0x2080)
+#define ETH_UNIT_INTR_MASK_REG(port)        (ETH_REG_BASE(port) + 0x2084)
+
+#define ETH_UNIT_ERROR_ADDR_REG(port)       (ETH_REG_BASE(port) + 0x2094)
+#define ETH_UNIT_INT_ADDR_ERROR_REG(port)   (ETH_REG_BASE(port) + 0x2098)
+
+/* Ethernet Unit Control (EUC) register */
+#define ETH_UNIT_CONTROL_REG(port)          (ETH_REG_BASE(port) + 0x20B0)
+
+#define ETH_PHY_POLLING_ENABLE_BIT          1
+#define ETH_PHY_POLLING_ENABLE_MASK        (1 << ETH_PHY_POLLING_ENABLE_BIT)
+
+#define ETH_UNIT_PORT_RESET_BIT             24
+#define ETH_UNIT_PORT_RESET_MASK            (1 << ETH_UNIT_PORT_RESET_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+/**** Address decode registers ****/
+
+#define ETH_WIN_BASE_REG(port, win)         (ETH_REG_BASE(port) + 0x2200 + ((win) << 3))
+#define ETH_WIN_SIZE_REG(port, win)         (ETH_REG_BASE(port) + 0x2204 + ((win) << 3))
+#define ETH_WIN_REMAP_REG(port, win)        (ETH_REG_BASE(port) + 0x2280 + ((win) << 2))
+#define ETH_BASE_ADDR_ENABLE_REG(port)      (ETH_REG_BASE(port) + 0x2290)
+#define ETH_ACCESS_PROTECT_REG(port)        (ETH_REG_BASE(port) + 0x2294)
+
+/* The target associated with this window*/
+#define ETH_WIN_TARGET_OFFS                 0
+#define ETH_WIN_TARGET_MASK                 (0xf << ETH_WIN_TARGET_OFFS)
+/* The target attributes associated with window */
+#define ETH_WIN_ATTR_OFFS                   8
+#define ETH_WIN_ATTR_MASK                   (0xff << ETH_WIN_ATTR_OFFS)
+
+/* The Base address associated with window */
+#define ETH_WIN_BASE_OFFS		            16
+#define ETH_WIN_BASE_MASK		            (0xFFFF << ETH_WIN_BASE_OFFS)
+
+#define ETH_WIN_SIZE_OFFS		            16
+#define ETH_WIN_SIZE_MASK		            (0xFFFF << ETH_WIN_SIZE_OFFS)
+
+ /* Ethernet Port Access Protect Register (EPAPR) */
+#define ETH_PROT_NO_ACCESS                  0
+#define ETH_PROT_READ_ONLY                  1
+#define ETH_PROT_FULL_ACCESS                3
+#define ETH_PROT_WIN_OFFS(winNum)           (2 * (winNum))
+#define ETH_PROT_WIN_MASK(winNum)           (0x3 << ETH_PROT_WIN_OFFS(winNum))
+/*-----------------------------------------------------------------------------------------------*/
+
+
+/***** Port Configuration reg (PxCR) *****/
+#define ETH_PORT_CONFIG_REG(port)           (ETH_REG_BASE(port) + 0x2400)
+
+#define ETH_UNICAST_PROMISCUOUS_MODE_BIT    0
+#define ETH_UNICAST_PROMISCUOUS_MODE_MASK   (1 << ETH_UNICAST_PROMISCUOUS_MODE_BIT)
+
+#define ETH_DEF_RX_QUEUE_OFFSET             1
+#define ETH_DEF_RX_QUEUE_ALL_MASK           (0x7 << ETH_DEF_RX_QUEUE_OFFSET)
+#define ETH_DEF_RX_QUEUE_MASK(queue)        ((queue) << ETH_DEF_RX_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_ARP_QUEUE_OFFSET         4
+#define ETH_DEF_RX_ARP_QUEUE_ALL_MASK       (0x7 << ETH_DEF_RX_ARP_QUEUE_OFFSET)
+#define ETH_DEF_RX_ARP_QUEUE_MASK(queue)    ((queue) << ETH_DEF_RX_ARP_QUEUE_OFFSET)
+
+#define ETH_REJECT_NOT_IP_ARP_BCAST_BIT     7
+#define ETH_REJECT_NOT_IP_ARP_BCAST_MASK    (1 << ETH_REJECT_NOT_IP_ARP_BCAST_BIT)
+
+#define ETH_REJECT_IP_BCAST_BIT             8
+#define ETH_REJECT_IP_BCAST_MASK            (1 << ETH_REJECT_IP_BCAST_BIT)
+
+#define ETH_REJECT_ARP_BCAST_BIT            9
+#define ETH_REJECT_ARP_BCAST_MASK           (1 << ETH_REJECT_ARP_BCAST_BIT)
+
+#define ETH_TX_NO_SET_ERROR_SUMMARY_BIT     12
+#define ETH_TX_NO_SET_ERROR_SUMMARY_MASK    (1 << ETH_TX_NO_SET_ERROR_SUMMARY_BIT)
+
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT   14
+#define ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK  (1 << ETH_CAPTURE_TCP_FRAMES_ENABLE_BIT)
+
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT   15
+#define ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK  (1 << ETH_CAPTURE_UDP_FRAMES_ENABLE_BIT)
+
+#define ETH_DEF_RX_TCP_QUEUE_OFFSET         16
+#define ETH_DEF_RX_TCP_QUEUE_ALL_MASK       (0x7 << ETH_DEF_RX_TCP_QUEUE_OFFSET)
+#define ETH_DEF_RX_TCP_QUEUE_MASK(queue)    ((queue) << ETH_DEF_RX_TCP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_UDP_QUEUE_OFFSET         19
+#define ETH_DEF_RX_UDP_QUEUE_ALL_MASK       (0x7 << ETH_DEF_RX_UDP_QUEUE_OFFSET)
+#define ETH_DEF_RX_UDP_QUEUE_MASK(queue)    ((queue) << ETH_DEF_RX_UDP_QUEUE_OFFSET)
+
+#define ETH_DEF_RX_BPDU_QUEUE_OFFSET        22
+#define ETH_DEF_RX_BPDU_QUEUE_ALL_MASK      (0x7 << ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+#define ETH_DEF_RX_BPDU_QUEUE_MASK(queue)   ((queue) << ETH_DEF_RX_BPDU_QUEUE_OFFSET)
+
+#define ETH_RX_CHECKSUM_MODE_OFFSET         25
+#define ETH_RX_CHECKSUM_NO_PSEUDO_HDR       (0 << ETH_RX_CHECKSUM_MODE_OFFSET)
+#define ETH_RX_CHECKSUM_WITH_PSEUDO_HDR     (1 << ETH_RX_CHECKSUM_MODE_OFFSET)
+/*-----------------------------------------------------------------------------------------------*/
+
+/***** Port Configuration Extend reg (PxCXR) *****/
+#define ETH_PORT_CONFIG_EXTEND_REG(port)    (ETH_REG_BASE(port) + 0x2404)
+
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT    1
+#define ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK   (1 << ETH_CAPTURE_SPAN_BPDU_ENABLE_BIT)
+
+#define ETH_TX_DISABLE_GEN_CRC_BIT          3
+#define ETH_TX_DISABLE_GEN_CRC_MASK         (1 << ETH_TX_DISABLE_GEN_CRC_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_VLAN_ETHER_TYPE_REG(port)       (ETH_REG_BASE(port) + 0x2410)
+#define ETH_MAC_ADDR_LOW_REG(port)          (ETH_REG_BASE(port) + 0x2414)
+#define ETH_MAC_ADDR_HIGH_REG(port)         (ETH_REG_BASE(port) + 0x2418)
+
+
+/***** Port Sdma Configuration reg (SDCR) *****/
+#define ETH_SDMA_CONFIG_REG(port)           (ETH_REG_BASE(port) + 0x241c)
+
+#define ETH_RX_FRAME_INTERRUPT_BIT          0
+#define ETH_RX_FRAME_INTERRUPT_MASK         (1 << ETH_RX_FRAME_INTERRUPT_BIT)
+
+#define ETH_BURST_SIZE_1_64BIT_VALUE        0
+#define ETH_BURST_SIZE_2_64BIT_VALUE        1
+#define ETH_BURST_SIZE_4_64BIT_VALUE        2
+#define ETH_BURST_SIZE_8_64BIT_VALUE        3
+#define ETH_BURST_SIZE_16_64BIT_VALUE       4
+
+#define ETH_RX_BURST_SIZE_OFFSET            1
+#define ETH_RX_BURST_SIZE_ALL_MASK          (0x7 << ETH_RX_BURST_SIZE_OFFSET)
+#define ETH_RX_BURST_SIZE_MASK(burst)       ((burst) << ETH_RX_BURST_SIZE_OFFSET)
+
+#define ETH_RX_NO_DATA_SWAP_BIT             4
+#define ETH_RX_NO_DATA_SWAP_MASK            (1 << ETH_RX_NO_DATA_SWAP_BIT)
+#define ETH_RX_DATA_SWAP_MASK               (0 << ETH_RX_NO_DATA_SWAP_BIT)
+
+#define ETH_TX_NO_DATA_SWAP_BIT             5
+#define ETH_TX_NO_DATA_SWAP_MASK            (1 << ETH_TX_NO_DATA_SWAP_BIT)
+#define ETH_TX_DATA_SWAP_MASK               (0 << ETH_TX_NO_DATA_SWAP_BIT)
+
+#define ETH_DESC_SWAP_BIT                   6
+#define ETH_DESC_SWAP_MASK                  (1 << ETH_DESC_SWAP_BIT)
+#define ETH_NO_DESC_SWAP_MASK               (0 << ETH_DESC_SWAP_BIT)
+
+#define ETH_TX_BURST_SIZE_OFFSET            22
+#define ETH_TX_BURST_SIZE_ALL_MASK          (0x7 << ETH_TX_BURST_SIZE_OFFSET)
+#define ETH_TX_BURST_SIZE_MASK(burst)       ((burst) << ETH_TX_BURST_SIZE_OFFSET)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_DIFF_SERV_PRIO_REG(port, code)  (ETH_REG_BASE(port) + 0x2420  + ((code) << 2))
+
+/* Port Serial Control0 register (PSC0) */
+#define ETH_PORT_SERIAL_CTRL_REG(port)      (ETH_REG_BASE(port) + 0x243c)
+
+#define ETH_TX_FC_MODE_OFFSET               5
+#define ETH_TX_FC_MODE_MASK                 (3 << ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_NO_PAUSE                  (0 << ETH_TX_FC_MODE_OFFSET)
+#define ETH_TX_FC_SEND_PAUSE                (1 << ETH_TX_FC_MODE_OFFSET)
+
+#define ETH_TX_BP_MODE_OFFSET               7
+#define ETH_TX_BP_MODE_MASK                 (3 << ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_NO_JAM                    (0 << ETH_TX_BP_MODE_OFFSET)
+#define ETH_TX_BP_SEND_JAM                  (1 << ETH_TX_BP_MODE_OFFSET)
+
+#define ETH_RETRANSMIT_FOREVER_BIT          11
+#define ETH_RETRANSMIT_FOREVER_MASK         (1 << ETH_RETRANSMIT_FOREVER_BIT)
+
+#define ETH_DTE_ADVERT_BIT                  14
+#define ETH_DTE_ADVERT_MASK                 (1 << ETH_DTE_ADVERT_BIT)
+
+/* Other bits are different for new GMAC and old GMAC modules */
+#ifdef MV_ETH_GMAC_NEW
+
+#define ETH_IGNORE_RX_ERR_BIT               28
+#define ETH_IGNORE_RX_ERR_MASK              (1 << ETH_IGNORE_RX_ERR_BIT)
+
+#define ETH_IGNORE_COL_BIT                  29
+#define ETH_IGNORE_COL_MASK                 (1 << ETH_IGNORE_COL_BIT)
+
+#define ETH_IGNORE_CARRIER_SENSE_BIT        30
+#define ETH_IGNORE_CARRIER_SENSE_MASK       (1 << ETH_IGNORE_CARRIER_SENSE_BIT)
+
+#else /* Old GMAC */
+
+#define ETH_PORT_ENABLE_BIT                 0
+#define ETH_PORT_ENABLE_MASK                (1 << ETH_PORT_ENABLE_BIT)
+
+#define ETH_FORCE_LINK_PASS_BIT             1
+#define ETH_FORCE_LINK_PASS_MASK            (1 << ETH_FORCE_LINK_PASS_BIT)
+
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_BIT     2
+#define ETH_DISABLE_DUPLEX_AUTO_NEG_MASK    (1 << ETH_DISABLE_DUPLEX_AUTO_NEG_BIT)
+
+#define ETH_DISABLE_FC_AUTO_NEG_BIT         3
+#define ETH_DISABLE_FC_AUTO_NEG_MASK        (1 << ETH_DISABLE_FC_AUTO_NEG_BIT)
+
+#define ETH_ADVERTISE_SYM_FC_BIT            4
+#define ETH_ADVERTISE_SYM_FC_MASK           (1 << ETH_ADVERTISE_SYM_FC_BIT)
+
+#define ETH_DO_NOT_FORCE_LINK_FAIL_BIT      10
+#define ETH_DO_NOT_FORCE_LINK_FAIL_MASK     (1 << ETH_DO_NOT_FORCE_LINK_FAIL_BIT)
+
+#define ETH_DISABLE_SPEED_AUTO_NEG_BIT      13
+#define ETH_DISABLE_SPEED_AUTO_NEG_MASK     (1 << ETH_DISABLE_SPEED_AUTO_NEG_BIT)
+
+#define ETH_MAX_RX_PACKET_SIZE_OFFSET       17
+#define ETH_MAX_RX_PACKET_SIZE_MASK         (7 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1518BYTE          (0 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1522BYTE          (1 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_1552BYTE          (2 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9022BYTE          (3 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9192BYTE          (4 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+#define ETH_MAX_RX_PACKET_9700BYTE          (5 << ETH_MAX_RX_PACKET_SIZE_OFFSET)
+
+#define ETH_SET_FULL_DUPLEX_BIT             21
+#define ETH_SET_FULL_DUPLEX_MASK            (1 << ETH_SET_FULL_DUPLEX_BIT)
+
+#define ETH_SET_FLOW_CTRL_BIT               22
+#define ETH_SET_FLOW_CTRL_MASK              (1 << ETH_SET_FLOW_CTRL_BIT)
+
+#define ETH_SET_GMII_SPEED_1000_BIT         23
+#define ETH_SET_GMII_SPEED_1000_MASK        (1 << ETH_SET_GMII_SPEED_1000_BIT)
+
+#define ETH_SET_MII_SPEED_100_BIT           24
+#define ETH_SET_MII_SPEED_100_MASK          (1 << ETH_SET_MII_SPEED_100_BIT)
+
+#endif /* MV_ETH_GMAC_NEW */
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_VLAN_TAG_TO_PRIO_REG(port)      (ETH_REG_BASE(port) + 0x2440)
+
+/* Ethernet Type Priority register */
+#define ETH_TYPE_PRIO_REG(port)             (ETH_REG_BASE(port) + 0x24BC)
+
+#define ETH_TYPE_PRIO_ENABLE_BIT            0
+#define ETH_TYPE_PRIO_FORCE_BIT             1
+
+#define ETH_TYPE_PRIO_RXQ_OFFS              2
+#define ETH_TYPE_PRIO_RXQ_ALL_MASK          (0x7 << ETH_TYPE_PRIO_RXQ_OFFS)
+#define ETH_TYPE_PRIO_RXQ_MASK(rxq)         ((rxq) << ETH_TYPE_PRIO_RXQ_OFFS)
+
+#define ETH_TYPE_PRIO_VALUE_OFFS            5
+#define ETH_TYPE_PRIO_VALUE_ALL_MASK        (0xFFFF << ETH_TYPE_PRIO_VALUE_OFFS)
+#define ETH_TYPE_PRIO_VALUE_MASK(type)      (type << ETH_TYPE_PRIO_VALUE_OFFS)
+
+#define ETH_FORCE_UNICAST_BIT               21
+#define ETH_FORCE_UNICAST_MASK              (1 << ETH_FORCE_UNICAST_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+/***** Ethernet Port Status reg (PSR) *****/
+#define ETH_PORT_STATUS_REG(port)           (ETH_REG_BASE(port) + 0x2444)
+
+/* Other bits are different for new GMAC and old GMAC modules */
+#ifdef MV_ETH_GMAC_NEW
+
+#define ETH_TX_IN_PROGRESS_OFFS             0
+#define ETH_TX_IN_PROGRESS_MASK(txp)        (1 << ((txp) + ETH_TX_IN_PROGRESS_OFFS))
+#define ETH_TX_IN_PROGRESS_ALL_MASK         (0xFF << ETH_TX_IN_PROGRESS_OFFS)
+
+#define ETH_TX_FIFO_EMPTY_OFFS              8
+#define ETH_TX_FIFO_EMPTY_MASK(txp)         (1 << ((txp) + ETH_TX_FIFO_EMPTY_OFFS))
+#define ETH_TX_FIFO_EMPTY_ALL_MASK          (0xFF << ETH_TX_FIFO_EMPTY_OFFS)
+
+#define ETH_RX_FIFO_EMPTY_BIT               16
+#define ETH_RX_FIFO_EMPTY_MASK              (1 << ETH_RX_FIFO_EMPTY_BIT)
+
+#else /* Old GMAC */
+
+#define ETH_LINK_UP_BIT                     1
+#define ETH_LINK_UP_MASK                    (1 << ETH_LINK_UP_BIT)
+
+#define ETH_FULL_DUPLEX_BIT                 2
+#define ETH_FULL_DUPLEX_MASK                (1 << ETH_FULL_DUPLEX_BIT)
+
+#define ETH_FLOW_CTRL_ENABLED_BIT        	3
+#define ETH_FLOW_CTRL_ENABLED_MASK       	(1 << ETH_FLOW_CTRL_ENABLED_BIT)
+
+#define ETH_GMII_SPEED_1000_BIT             4
+#define ETH_GMII_SPEED_1000_MASK            (1 << ETH_GMII_SPEED_1000_BIT)
+
+#define ETH_MII_SPEED_100_BIT               5
+#define ETH_MII_SPEED_100_MASK              (1 << ETH_MII_SPEED_100_BIT)
+
+#define ETH_TX_IN_PROGRESS_BIT              7
+#define ETH_TX_IN_PROGRESS_MASK             (1 << ETH_TX_IN_PROGRESS_BIT)
+
+#define ETH_TX_FIFO_EMPTY_BIT               10
+#define ETH_TX_FIFO_EMPTY_MASK              (1 << ETH_TX_FIFO_EMPTY_BIT)
+
+#define ETH_RX_FIFO_EMPTY_BIT               12
+#define ETH_RX_FIFO_EMPTY_MASK              (1 << ETH_RX_FIFO_EMPTY_BIT)
+
+#define PON_TX_IN_PROGRESS_OFFS             0
+#define PON_TX_IN_PROGRESS_MASK(txp)        (1 << ((txp) + PON_TX_IN_PROGRESS_OFFS))
+#define PON_TX_IN_PROGRESS_ALL_MASK         (0xFF << PON_TX_IN_PROGRESS_OFFS)
+
+#define PON_TX_FIFO_EMPTY_OFFS              8
+#define PON_TX_FIFO_EMPTY_MASK(txp)         (1 << ((txp) + PON_TX_FIFO_EMPTY_OFFS))
+#define PON_TX_FIFO_EMPTY_ALL_MASK          (0xFF << PON_TX_FIFO_EMPTY_OFFS)
+
+#endif /* MV_ETH_GMAC_NEW */
+/*-----------------------------------------------------------------------------------------------*/
+
+
+/***** Transmit Queue Command (TxQC) register *****/
+#define ETH_TX_QUEUE_COMMAND_REG(p, txp)    (NETA_TX_REG_BASE((p), (txp)) + 0x0048)
+
+#define ETH_TXQ_ENABLE_OFFSET               0
+#define ETH_TXQ_ENABLE_MASK                 (0x000000FF << ETH_TXQ_ENABLE_OFFSET)
+
+#define ETH_TXQ_DISABLE_OFFSET              8
+#define ETH_TXQ_DISABLE_MASK                (0x000000FF << ETH_TXQ_DISABLE_OFFSET)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Marvell Header Register */
+#define ETH_PORT_MARVELL_HEADER_REG(port)   (ETH_REG_BASE(port) + 0x2454)
+
+#define ETH_MH_EN_BIT                       0
+#define ETH_MH_EN_MASK                      (1 << ETH_MH_EN_BIT)
+
+#define ETH_DSA_EN_OFFS                     10
+#define ETH_DSA_EN_MASK                     (3 << ETH_DSA_EN_OFFS)
+#define ETH_DSA_MASK                        (1 << ETH_DSA_EN_OFFS)
+#define ETH_DSA_EXT_MASK                    (2 << ETH_DSA_EN_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Ethernet Cause Register */
+#define ETH_INTR_CAUSE_REG(port)            (ETH_REG_BASE(port) + 0x2460)
+
+#define ETH_CAUSE_RX_READY_SUM_BIT          0
+#define ETH_CAUSE_EXTEND_BIT                1
+
+#define ETH_CAUSE_RX_READY_OFFSET           2
+#define ETH_CAUSE_RX_READY_BIT(queue)       (ETH_CAUSE_RX_READY_OFFSET + (queue))
+#define ETH_CAUSE_RX_READY_MASK(queue)      (1 << (ETH_CAUSE_RX_READY_BIT(queue)))
+
+#define ETH_CAUSE_RX_ERROR_SUM_BIT          10
+#define ETH_CAUSE_RX_ERROR_OFFSET           11
+#define ETH_CAUSE_RX_ERROR_BIT(queue)       (ETH_CAUSE_RX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_RX_ERROR_MASK(queue)      (1 << (ETH_CAUSE_RX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_TX_END_BIT                19
+#define ETH_CAUSE_SUM_BIT                   31
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Ethernet Cause Extended Register */
+#define ETH_INTR_CAUSE_EXT_REG(port)        (ETH_REG_BASE(port) + 0x2464)
+
+#define ETH_CAUSE_TX_BUF_OFFSET             0
+#define ETH_CAUSE_TX_BUF_BIT(queue)         (ETH_CAUSE_TX_BUF_OFFSET + (queue))
+#define ETH_CAUSE_TX_BUF_MASK(queue)        (1 << (ETH_CAUSE_TX_BUF_BIT(queue)))
+
+#define ETH_CAUSE_TX_ERROR_OFFSET           8
+#define ETH_CAUSE_TX_ERROR_BIT(queue)       (ETH_CAUSE_TX_ERROR_OFFSET + (queue))
+#define ETH_CAUSE_TX_ERROR_MASK(queue)      (1 << (ETH_CAUSE_TX_ERROR_BIT(queue)))
+
+#define ETH_CAUSE_PHY_STATUS_CHANGE_BIT     16
+#define ETH_CAUSE_RX_OVERRUN_BIT            18
+#define ETH_CAUSE_TX_UNDERRUN_BIT           19
+#define ETH_CAUSE_LINK_STATE_CHANGE_BIT     20
+#define ETH_CAUSE_INTERNAL_ADDR_ERR_BIT     23
+#define ETH_CAUSE_EXTEND_SUM_BIT            31
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_INTR_MASK_REG(port)             (ETH_REG_BASE(port) + 0x2468)
+#define ETH_INTR_MASK_EXT_REG(port)         (ETH_REG_BASE(port) + 0x246c)
+#define ETH_RX_MINIMAL_FRAME_SIZE_REG(port) (ETH_REG_BASE(port) + 0x247c)
+#define ETH_RX_DISCARD_PKTS_CNTR_REG(port)  (ETH_REG_BASE(port) + 0x2484)
+#define ETH_RX_OVERRUN_PKTS_CNTR_REG(port)  (ETH_REG_BASE(port) + 0x2488)
+#define ETH_INTERNAL_ADDR_ERROR_REG(port)   (ETH_REG_BASE(port) + 0x2494)
+
+/***** Receive Queue Command (RxQC) register *****/
+#define ETH_RX_QUEUE_COMMAND_REG(port)      (ETH_REG_BASE(port) + 0x2680)
+
+#define ETH_RXQ_ENABLE_OFFSET               0
+#define ETH_RXQ_ENABLE_MASK                 (0x000000FF << ETH_RXQ_ENABLE_OFFSET)
+
+#define ETH_RXQ_DISABLE_OFFSET              8
+#define ETH_RXQ_DISABLE_MASK                (0x000000FF << ETH_RXQ_DISABLE_OFFSET)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_MIB_COUNTERS_BASE(port, txp)    (ETH_REG_BASE(port) + 0x3000 + ((txp) * 0x80))
+#define ETH_DA_FILTER_SPEC_MCAST_BASE(port) (ETH_REG_BASE(port) + 0x3400)
+#define ETH_DA_FILTER_OTH_MCAST_BASE(port)  (ETH_REG_BASE(port) + 0x3500)
+#define ETH_DA_FILTER_UCAST_BASE(port)      (ETH_REG_BASE(port) + 0x3600)
+
+/* Phy address register definitions */
+#define ETH_PHY_ADDR_OFFS          0
+#define ETH_PHY_ADDR_MASK          (0x1f << ETH_PHY_ADDR_OFFS)
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW    0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH   0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED         0x8
+#define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR   0xc
+#define ETH_MIB_GOOD_FRAMES_RECEIVED        0x10
+#define ETH_MIB_BAD_FRAMES_RECEIVED         0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED   0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED   0x1c
+#define ETH_MIB_FRAMES_64_OCTETS            0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS     0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS    0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS    0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS   0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS   0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW        0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH       0x3c
+#define ETH_MIB_GOOD_FRAMES_SENT            0x40
+#define ETH_MIB_EXCESSIVE_COLLISION         0x44
+#define ETH_MIB_MULTICAST_FRAMES_SENT       0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT       0x4c
+#define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED  0x50
+#define ETH_MIB_FC_SENT                     0x54
+#define ETH_MIB_GOOD_FC_RECEIVED            0x58
+#define ETH_MIB_BAD_FC_RECEIVED             0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED          0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED          0x64
+#define ETH_MIB_OVERSIZE_RECEIVED           0x68
+#define ETH_MIB_JABBER_RECEIVED             0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR           0x70
+#define ETH_MIB_BAD_CRC_EVENT               0x74
+#define ETH_MIB_COLLISION                   0x78
+#define ETH_MIB_LATE_COLLISION              0x7c
+
+
+#ifndef MV_ETH_GMAC_NEW
+/*****************************************************/
+/*        Registers are not exist in new GMAC    */
+/*****************************************************/
+#define ETH_MII_SERIAL_PARAM_REG(port)      (ETH_REG_BASE(port) + 0x2408)
+#define ETH_GMII_SERIAL_PARAM_REG(port)     (ETH_REG_BASE(port) + 0x240c)
+
+/* Port Serial Control1 (PSC1) */
+#define ETH_PORT_SERIAL_CTRL_1_REG(port)    (ETH_REG_BASE(port) + 0x244c)
+#define ETH_PSC_ENABLE_BIT                  2
+#define ETH_PSC_ENABLE_MASK                 (1 << ETH_PSC_ENABLE_BIT)
+
+#define ETH_RGMII_ENABLE_BIT                3
+#define ETH_RGMII_ENABLE_MASK               (1 << ETH_RGMII_ENABLE_BIT)
+
+#define ETH_PORT_RESET_BIT                  4
+#define ETH_PORT_RESET_MASK                 (1 << ETH_PORT_RESET_BIT)
+
+#define ETH_INBAND_AUTO_NEG_ENABLE_BIT      6
+#define ETH_INBAND_AUTO_NEG_ENABLE_MASK     (1 << ETH_INBAND_AUTO_NEG_ENABLE_BIT)
+
+#define ETH_INBAND_AUTO_NEG_BYPASS_BIT      7
+#define ETH_INBAND_AUTO_NEG_BYPASS_MASK     (1 << ETH_INBAND_AUTO_NEG_BYPASS_BIT)
+
+#define ETH_INBAND_AUTO_NEG_START_BIT       8
+#define ETH_INBAND_AUTO_NEG_START_MASK      (1 << ETH_INBAND_AUTO_NEG_START_BIT)
+
+#define ETH_PORT_TYPE_BIT                   11
+#define ETH_PORT_TYPE_1000BasedX_MASK       (1 << ETH_PORT_TYPE_BIT)
+
+#define ETH_SGMII_MODE_BIT                  12
+#define ETH_1000BaseX_MODE_MASK             (0 << ETH_SGMII_MODE_BIT)
+#define ETH_SGMII_MODE_MASK                 (1 << ETH_SGMII_MODE_BIT)
+
+#define ETH_MGMII_MODE_BIT                  13
+
+#define ETH_EN_MII_ODD_PRE_BIT		        22
+#define ETH_EN_MII_ODD_PRE_MASK		        (1 << ETH_EN_MII_ODD_PRE_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Ethernet Port Status1 (PS1) */
+#define ETH_PORT_STATUS_1_REG(port)         (ETH_REG_BASE(port) + 0x2450)
+#define ETH_AUTO_NEG_DONE_BIT               4
+#define ETH_AUTO_NEG_DONE_MASK              (1 << ETH_AUTO_NEG_DONE_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_PORT_FIFO_PARAMS_REG(port)      (ETH_REG_BASE(port) + 0x2458)
+
+#endif /* MV_ETH_GMAC_NEW */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __INCmvEthRegsh */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvHwf.c b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvHwf.c
new file mode 100755
index 000000000000..f693f2908be8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvHwf.c
@@ -0,0 +1,384 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+#include "mvNeta.h"
+#include "bm/mvBm.h"
+
+/*#define HWF_DBG mvOsPrintf*/
+#define HWF_DBG(X...)
+
+/*******************************************************************************
+* mvNetaHwfInit - Init HWF registers of the port
+* DESCRIPTION:
+*
+* INPUT:
+*       int			port - NETA port number
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*
+* NOTE:
+*******************************************************************************/
+MV_STATUS mvNetaHwfInit(int port)
+{
+	int					p, txp;
+	MV_U32				regVal;
+	MV_NETA_PORT_CTRL	*pPortCtrl;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return MV_OUT_OF_RANGE;
+	}
+
+	pPortCtrl = mvNetaPortHndlGet(port);
+	if (pPortCtrl == NULL) {
+		mvOsPrintf("%s: port %d is not initialized\n", __func__, port);
+		return MV_FAIL;
+	}
+
+	/* Set TX Port base addresses */
+	for (p = 0; p < mvNetaHalData.maxPort; p++) {
+		pPortCtrl = mvNetaPortHndlGet(p);
+		if (pPortCtrl == NULL)
+			continue;
+
+		for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+			regVal = MV_REG_READ(NETA_HWF_TXP_CFG_REG(port, (p + txp)));
+			regVal &= ~NETA_TXP_BASE_ADDR_MASK(p + txp);
+			regVal |= ((NETA_TX_REG_BASE(p, txp) >> 10) << NETA_TXP_BASE_ADDR_OFFS(p + txp));
+			MV_REG_WRITE(NETA_HWF_TXP_CFG_REG(port, (p + txp)), regVal);
+		}
+	}
+	/* Init HWF RX Control register */
+	regVal = NETA_GEM_PID_SRC_FLOW_ID;
+	MV_REG_WRITE(NETA_HWF_RX_CTRL_REG(port), regVal);
+
+	/* Set Small TX Gap */
+	MV_REG_WRITE(NETA_HWF_TX_GAP_REG(port), NETA_HWF_SMALL_TX_GAP_MASK);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfBmPoolsSet - Set short and long pools to be used by HWF of the port
+ *
+ * INPUT:
+ *       int        port	- port number
+ *       int        short_pool	- BM pool for short buffers
+ *       int        long_pool	- BM pool for long buffers
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ *******************************************************************************/
+MV_STATUS mvNetaHwfBmPoolsSet(int port, int short_pool, int long_pool)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_HWF_RX_CTRL_REG(port));
+
+	regVal &= ~NETA_HWF_LONG_POOL_MASK;
+	regVal |= NETA_HWF_LONG_POOL_ID(long_pool);
+
+	regVal &= ~NETA_HWF_SHORT_POOL_MASK;
+	regVal |= NETA_HWF_SHORT_POOL_ID(short_pool);
+
+	MV_REG_WRITE(NETA_HWF_RX_CTRL_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfEnable - Enable / Disable HWF of the port
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        port   - port number
+ *       int        enable - 0 - disable, 1 - enable
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfEnable(int port, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_HWF_RX_CTRL_REG(port));
+	if (enable)
+		regVal |= NETA_HWF_ENABLE_MASK;
+	else
+		regVal &= ~NETA_HWF_ENABLE_MASK;
+
+	MV_REG_WRITE(NETA_HWF_RX_CTRL_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfTxqInit - Set TXQ base address and size, set default Drop configuration
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        rx_port:            RX port number
+ *       int        tx_port, txp, txq:  port, TCONT and TXQ numbers
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfTxqInit(int tx_port, int txp, int txq)
+{
+	MV_U32				regVal;
+	MV_NETA_PORT_CTRL	*pPortCtrl;
+	MV_NETA_QUEUE_CTRL	*pQueueCtrl;
+	int					port, dropThresh;
+
+	pPortCtrl = mvNetaPortHndlGet(tx_port);
+	if (pPortCtrl == NULL) {
+		mvOsPrintf("%s: port %d is not initialized\n", __func__, tx_port);
+		return MV_NOT_INITIALIZED;
+	}
+
+	pQueueCtrl = &pPortCtrl->pTxQueue[txp * CONFIG_MV_ETH_TXQ + txq].queueCtrl;
+
+	if (pQueueCtrl->pFirst == NULL) {
+		mvOsPrintf("%s: tx_port=%d, txp=%d, txq=%d is not initialized\n",
+					__func__, tx_port, txp, txq);
+		return MV_NOT_INITIALIZED;
+	}
+
+	for (port = 0; port < mvNetaHalData.maxPort; port++) {
+
+		pPortCtrl = mvNetaPortHndlGet(port);
+		if (pPortCtrl == NULL)
+			continue;
+
+		regVal = NETA_HWF_TX_PORT_MASK(tx_port + txp) | NETA_HWF_TXQ_MASK(txq);
+		MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+		MV_REG_WRITE(NETA_HWF_TXQ_BASE_REG(port), pQueueCtrl->descBuf.bufPhysAddr);
+		MV_REG_WRITE(NETA_HWF_TXQ_SIZE_REG(port), pQueueCtrl->lastDesc + 1);
+
+		dropThresh = (CONFIG_MV_ETH_HWF_TXQ_DROP * (pQueueCtrl->lastDesc + 1)) / 100;
+		regVal = (dropThresh << NETA_YELLOW_DROP_THRESH_OFFS) |
+			    (CONFIG_MV_ETH_HWF_TXQ_DROP_RND << NETA_YELLOW_DROP_RND_GEN_OFFS);
+
+		MV_REG_WRITE(NETA_HWF_DROP_TH_REG(port), regVal);
+	}
+	return MV_OK;
+}
+
+MV_STATUS mvNetaHwfTxqNextIndexGet(int port, int tx_port, int txp, int txq, int *val)
+{
+	MV_U32				regVal;
+
+	regVal = NETA_HWF_TX_PORT_MASK(tx_port + txp) | NETA_HWF_TXQ_MASK(txq) | NETA_HWF_REG_MASK(3);
+	MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+
+	regVal = MV_REG_READ(NETA_HWF_MEMORY_REG(port));
+	if (val)
+		*val = (int)((regVal >> 16) & 0x3fff);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfTxqEnable - Enable / Disable HWF from the rx_port to tx_port/txp/txq
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        rx_port:            RX port number
+ *       int        tx_port, txp, txq:  port, TCONT and TXQ numbers
+ *       int        enable:             0 - disable, 1 - enable
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfTxqEnable(int port, int tx_port, int txp, int txq, int enable)
+{
+	MV_U32 regVal;
+
+	/* Enable HWF for each TXQ */
+	regVal = NETA_HWF_TX_PORT_MASK(tx_port + txp) | NETA_HWF_TXQ_MASK(txq);
+	MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+
+	MV_REG_WRITE(NETA_HWF_TXQ_ENABLE_REG(port), enable << NETA_HWF_TXQ_ENABLE_BIT);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfTxqDropSet - Set HWF drop threshold
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        rx_port:            RX port number
+ *       int        tx_port, txp, txq:  port, TCONT and TXQ numbers
+ *       int        thresh, bits		drop configuration
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfTxqDropSet(int port, int tx_port, int txp, int txq, int thresh, int bits)
+{
+	MV_U32 regVal, dropThresh;
+	MV_NETA_PORT_CTRL *pPortCtrl;
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+
+	pPortCtrl = mvNetaPortHndlGet(tx_port);
+	if (pPortCtrl == NULL)
+		return MV_FAIL;
+
+	pQueueCtrl = &pPortCtrl->pTxQueue[txp * CONFIG_MV_ETH_TXQ + txq].queueCtrl;
+	if (pQueueCtrl->pFirst == NULL)
+		return MV_FAIL;
+
+	/* Set HWF Drop parameters for specific TXQ */
+	regVal = NETA_HWF_TX_PORT_MASK(tx_port + txp) | NETA_HWF_TXQ_MASK(txq);
+	MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+
+	dropThresh = (thresh * (pQueueCtrl->lastDesc + 1)) / 100;
+	regVal = (dropThresh << NETA_YELLOW_DROP_THRESH_OFFS) | (bits << NETA_YELLOW_DROP_RND_GEN_OFFS);
+
+	MV_REG_WRITE(NETA_HWF_DROP_TH_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+ * mvNetaHwfMhSrcSet - Select MH source on TX during HWF (PNC or field in
+ * 			HWF RX control register.
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        port;	port number
+ *       int        mh_src; 0 - register field, 1 - PNC result info bits
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfMhSrcSet(int port, MV_NETA_HWF_MH_SRC mh_src)
+{
+	MV_U32	regVal;
+
+	regVal = MV_REG_READ(NETA_HWF_RX_CTRL_REG(port));
+
+	switch (mh_src) {
+
+	case MV_NETA_HWF_MH_REG:
+		regVal &= ~NETA_MH_SRC_PNC_MASK;
+		break;
+
+	case MV_NETA_HWF_MH_PNC:
+		regVal |= NETA_MH_SRC_PNC_MASK;
+		break;
+
+	default:
+		mvOsPrintf("port=%d: Unexpected HWF MH source = %d value\n", port, mh_src);
+		return MV_BAD_PARAM;
+	}
+	MV_REG_WRITE(NETA_HWF_RX_CTRL_REG(port), regVal);
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+ * mvNetaHwfMhSelSet - Set MH value on TX during HWF.
+ *
+ * DESCRIPTION:
+ *
+ * INPUT:
+ *       int        port;		port number
+ *       int        mh_sel_nask;	use the following values as mask
+ *					NETA_MH_DONT_CHANGE
+ *					NETA_MH_REPLACE_GPON_HDR
+ *					NETA_MH_REPLACE_MH_REG(r)
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ * NOTE:
+ *******************************************************************************/
+MV_STATUS mvNetaHwfMhSelSet(int port, MV_U8 mh_sel_mask)
+{
+	MV_U32	regVal;
+
+	regVal = MV_REG_READ(NETA_HWF_RX_CTRL_REG(port));
+	regVal &= ~NETA_MH_SEL_MASK;
+	regVal |= (mh_sel_mask & NETA_MH_SEL_MASK);
+
+	MV_REG_WRITE(NETA_HWF_RX_CTRL_REG(port), regVal);
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.c b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.c
new file mode 100644
index 000000000000..fbfa2772eeb2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.c
@@ -0,0 +1,3489 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "mvNeta.h"
+#include "pnc/mvPnc.h"
+#include "bm/mvBm.h"
+
+/* This array holds the control structure of each port */
+MV_NETA_PORT_CTRL **mvNetaPortCtrl = NULL;
+MV_NETA_HAL_DATA mvNetaHalData;
+
+/* Bitmap of NETA dymamic capabilities, such as PnC, BM, HWF and PME
+	 * Pnc - 0x01
+	 * BM  - 0x02
+	 * HWF - 0x04
+	 * PME - 0x08
+*/
+unsigned int neta_cap_bitmap = 0x0;
+
+/* Function prototypes */
+/* Legacy parse function start */
+static MV_BOOL netaSetUcastAddr(int port, MV_U8 lastNibble, int queue);
+static MV_BOOL netaSetSpecialMcastAddr(int port, MV_U8 lastByte, int queue);
+static MV_BOOL netaSetOtherMcastAddr(int port, MV_U8 crc8, int queue);
+/* Legacy parse function end */
+
+static void mvNetaPortSgmiiConfig(int port, MV_BOOL isInband);
+static MV_U8 *mvNetaDescrMemoryAlloc(MV_NETA_PORT_CTRL * pPortCtrl, int descSize,
+				   MV_ULONG *pPhysAddr, MV_U32 *memHandle);
+static void mvNetaDescrMemoryFree(MV_NETA_PORT_CTRL *pPortCtrl, MV_BUF_INFO *pDescBuf);
+static void mvNetaDescRingReset(MV_NETA_QUEUE_CTRL *pQueueHndl);
+
+#define TX_DISABLE_TIMEOUT_MSEC     1000
+#define RX_DISABLE_TIMEOUT_MSEC     1000
+#define TX_FIFO_EMPTY_TIMEOUT_MSEC  10000
+#define PORT_DISABLE_WAIT_TCLOCKS   5000
+
+
+int mvNetaMaxCheck(int value, int limit, char *name)
+{
+	if ((value < 0) || (value >= limit)) {
+		mvOsPrintf("%s %d is out of range [0..%d]\n",
+			name ? name : "value", value, (limit - 1));
+		return 1;
+	}
+	return 0;
+}
+
+int mvNetaPortCheck(int port)
+{
+	return mvNetaMaxCheck(port, mvNetaHalData.maxPort, "port");
+}
+
+int mvNetaTxpCheck(int port, int txp)
+{
+	int txpMax = 1;
+
+	if (mvNetaPortCheck(port))
+		return 1;
+
+	if (MV_PON_PORT(port))
+		txpMax = MV_ETH_MAX_TCONT;
+
+	return mvNetaMaxCheck(txp, txpMax, "txp");
+}
+
+int mvNetaCpuCheck(int cpu)
+{
+	return mvNetaMaxCheck(cpu, NETA_MAX_CPU_REGS, "cpu");
+}
+
+/******************************************************************************/
+/*                      Port Initialization functions                         */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaPortInit - Initialize the NETA port
+*
+* DESCRIPTION:
+*       This function initializes the NETA port.
+*       1) Allocates and initializes internal port control structure.
+*       2) Creates RX and TX descriptor rings.
+*       3) Disables RX and TX operations, clears cause registers and
+*	   masks all interrupts.
+*       4) Sets all registers to default values and cleans all MAC tables.
+*
+* INPUT:
+*       int			portNo          - NETA port number
+*
+* RETURN:
+*       void* - NETA port handler that should be passed to most other
+*               functions dealing with this port.
+*
+* NOTE: This function is called once per port when loading the NETA module.
+*******************************************************************************/
+void *mvNetaPortInit(int portNo, void *osHandle)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl;
+
+	/* Check validity of parameters */
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("EthPort #%d: Bad initialization parameters\n", portNo);
+		return NULL;
+	}
+
+	pPortCtrl = (MV_NETA_PORT_CTRL *)mvOsMalloc(sizeof(MV_NETA_PORT_CTRL));
+	if (pPortCtrl == NULL) {
+		mvOsPrintf("EthDrv: Can't allocate %dB for port #%d control structure!\n",
+			   (int)sizeof(MV_NETA_PORT_CTRL), portNo);
+		return NULL;
+	}
+
+	memset(pPortCtrl, 0, sizeof(MV_NETA_PORT_CTRL));
+	mvNetaPortCtrl[portNo] = pPortCtrl;
+
+	pPortCtrl->portNo = portNo;
+	pPortCtrl->osHandle = osHandle;
+
+	pPortCtrl->txpNum = 1;
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(portNo))
+		pPortCtrl->txpNum = MV_ETH_MAX_TCONT;
+#endif /* CONFIG_MV_PON */
+
+	pPortCtrl->rxqNum = CONFIG_MV_ETH_RXQ;
+	pPortCtrl->txqNum = CONFIG_MV_ETH_TXQ;
+
+	/* Allocate RXQ and TXQ structures */
+	pPortCtrl->pRxQueue = mvOsMalloc(pPortCtrl->rxqNum * sizeof(MV_NETA_RXQ_CTRL));
+	if (pPortCtrl->pRxQueue == NULL) {
+		mvOsPrintf("mvNeta port%d: Can't allocate %d Bytes for %d RXQs controls\n",
+			   portNo, (int)pPortCtrl->rxqNum * sizeof(MV_NETA_RXQ_CTRL), pPortCtrl->rxqNum);
+		return NULL;
+	}
+	memset(pPortCtrl->pRxQueue, 0, pPortCtrl->rxqNum * sizeof(MV_NETA_RXQ_CTRL));
+
+	pPortCtrl->pTxQueue = mvOsMalloc(pPortCtrl->txpNum * pPortCtrl->txqNum * sizeof(MV_NETA_TXQ_CTRL));
+	if (pPortCtrl->pTxQueue == NULL) {
+		mvOsPrintf("mvNeta port%d: Can't allocate %d Bytes for %d TXQs controls\n",
+			   portNo, (int)pPortCtrl->txqNum * pPortCtrl->txpNum * sizeof(MV_NETA_TXQ_CTRL),
+			   pPortCtrl->txqNum * pPortCtrl->txpNum);
+		return NULL;
+	}
+	memset(pPortCtrl->pTxQueue, 0, pPortCtrl->txpNum * pPortCtrl->txqNum * sizeof(MV_NETA_TXQ_CTRL));
+
+	/* Disable port */
+	mvNetaPortDisable(portNo);
+	mvNetaDefaultsSet(portNo);
+
+	return pPortCtrl;
+}
+
+/*******************************************************************************
+* mvNetaPortDestroy - Free the memory allocated for a NETA port
+*
+* DESCRIPTION:
+*       This function frees the memory allocated for the NETA port in mvNetaPortInit().
+*
+* INPUT:
+*       int			portNo          - NETA port number
+*
+*******************************************************************************/
+void mvNetaPortDestroy(int portNo)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(portNo);
+
+	if (pPortCtrl->pTxQueue)
+		mvOsFree(pPortCtrl->pTxQueue);
+
+	if (pPortCtrl->pRxQueue)
+		mvOsFree(pPortCtrl->pRxQueue);
+
+	if (pPortCtrl)
+		mvOsFree(pPortCtrl);
+
+	mvNetaPortCtrl[portNo] = NULL;
+}
+
+
+/*******************************************************************************
+* mvNetaAccMode - Get NETA Acceleration mode
+*
+* DESCRIPTION:
+*
+* INPUT:
+*
+* RETURN:
+*       int - NETA Acceleration mode
+*
+* NOTE: This function is called once on loading the NETA module.
+*******************************************************************************/
+int mvNetaAccMode(void)
+{
+	int mode;
+
+	if (MV_NETA_BM_CAP() && MV_NETA_PNC_CAP())
+		mode = NETA_ACC_MODE_MASK(NETA_ACC_MODE_EXT_PNC_BMU);
+	else if (MV_NETA_BM_CAP())
+		mode = NETA_ACC_MODE_MASK(NETA_ACC_MODE_EXT_BMU);
+	else if (MV_NETA_PNC_CAP())
+		mode = NETA_ACC_MODE_MASK(NETA_ACC_MODE_EXT_PNC);
+	else
+		mode = NETA_ACC_MODE_MASK(NETA_ACC_MODE_EXT);
+
+	return mode;
+}
+
+/*******************************************************************************
+* mvNetaDefaultsSet - Set defaults to the NETA port
+*
+* DESCRIPTION:
+*       This function sets default values to the NETA port.
+*       1) Clears interrupt Cause and Mask registers.
+*       2) Clears all MAC tables.
+*       3) Sets defaults to all registers.
+*       4) Resets RX and TX descriptor rings.
+*       5) Resets PHY.
+*
+* INPUT:
+*   int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+* NOTE:
+*   This function updates all the port configurations except those set
+*   initialy by the OsGlue by MV_NETA_PORT_INIT.
+*   This function can be called after portDown to return the port settings
+*   to defaults.
+*******************************************************************************/
+MV_STATUS mvNetaDefaultsSet(int port)
+{
+	int cpu;
+	int queue, txp;
+	MV_U32 regVal;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+
+	/* Clear all Cause registers */
+	MV_REG_WRITE(NETA_INTR_NEW_CAUSE_REG(port), 0);
+	MV_REG_WRITE(NETA_INTR_OLD_CAUSE_REG(port), 0);
+	MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(port), 0);
+
+	/* Mask all interrupts */
+	MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(port), 0);
+	MV_REG_WRITE(NETA_INTR_OLD_MASK_REG(port), 0);
+	MV_REG_WRITE(NETA_INTR_MISC_MASK_REG(port), 0);
+
+	MV_REG_WRITE(NETA_INTR_ENABLE_REG(port), 0);
+
+	/* Enable MBUS Retry bit16 */
+	MV_REG_WRITE(NETA_MBUS_RETRY_REG(port), NETA_MBUS_RETRY_CYCLES(0x20));
+
+	/* Set CPU queue access map - all CPUs have access to all RX queues and to all TX queues */
+
+	for (cpu = 0; cpu < NETA_MAX_CPU_REGS; cpu++)
+		if (MV_BIT_CHECK(mvNetaHalData.cpuMask, cpu))
+			MV_REG_WRITE(NETA_CPU_MAP_REG(port, cpu), (NETA_CPU_RXQ_ACCESS_ALL_MASK | NETA_CPU_TXQ_ACCESS_ALL_MASK));
+
+	/* Reset RX and TX DMAs */
+	MV_REG_WRITE(NETA_PORT_RX_RESET_REG(port), NETA_PORT_RX_DMA_RESET_MASK);
+
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		MV_REG_WRITE(NETA_PORT_TX_RESET_REG(port, txp), NETA_PORT_TX_DMA_RESET_MASK);
+
+#ifdef CONFIG_MV_PON
+		if ((txp * MV_ETH_MAX_TXQ % 32) == 0) {
+			MV_REG_WRITE(GPON_TXQ_INTR_NEW_CAUSE_REG(txp * MV_ETH_MAX_TXQ), 0);
+			MV_REG_WRITE(GPON_TXQ_INTR_NEW_MASK_REG(txp * MV_ETH_MAX_TXQ), 0);
+			MV_REG_WRITE(GPON_TXQ_INTR_OLD_CAUSE_REG(txp * MV_ETH_MAX_TXQ), 0);
+			MV_REG_WRITE(GPON_TXQ_INTR_OLD_MASK_REG(txp * MV_ETH_MAX_TXQ), 0);
+			MV_REG_WRITE(GPON_TXQ_INTR_ERR_CAUSE_REG(txp * MV_ETH_MAX_TXQ), 0);
+			MV_REG_WRITE(GPON_TXQ_INTR_ERR_MASK_REG(txp * MV_ETH_MAX_TXQ), 0);
+
+			MV_REG_WRITE(GPON_TXQ_INTR_ENABLE_REG(txp * MV_ETH_MAX_TXQ), 0xFFFFFFFF);
+		}
+#endif /* CONFIG_MV_PON */
+
+		/* Disable Legacy WRR, Disable EJP, Release from reset */
+		MV_REG_WRITE(NETA_TX_CMD_1_REG(port, txp), 0);
+
+		/* Close bandwidth for all queues */
+		for (queue = 0; queue < MV_ETH_MAX_TXQ; queue++)
+			MV_REG_WRITE(NETA_TXQ_TOKEN_CNTR_REG(port, txp, queue),  0);
+
+		/* Set basic period to  1 usec */
+		MV_REG_WRITE(NETA_TX_REFILL_PERIOD_REG(port, txp),  mvNetaHalData.tClk / 1000000);
+		mvNetaTxpRateMaxSet(port, txp);
+
+		MV_REG_WRITE(NETA_PORT_TX_RESET_REG(port, txp), 0);
+	}
+
+	MV_REG_WRITE(NETA_PORT_RX_RESET_REG(port), 0);
+
+	/* Set Port Acceleration Mode */
+	regVal = mvNetaAccMode();
+	MV_REG_WRITE(NETA_ACC_MODE_REG(port), regVal);
+
+#ifdef CONFIG_MV_ETH_BM
+	/* Set address of Buffer Management Unit */
+	if (MV_NETA_BM_CAP())
+		MV_REG_WRITE(NETA_BM_ADDR_REG(port), mvNetaHalData.bmPhysBase);
+#endif /* CONFIG_MV_ETH_BM */
+
+	/* Update value of portCfg register accordingly with all RxQueue types */
+	regVal = PORT_CONFIG_VALUE(CONFIG_MV_ETH_RXQ_DEF);
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), regVal);
+
+	regVal = PORT_CONFIG_EXTEND_VALUE;
+	MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(port), regVal);
+
+	if (MV_PON_PORT(port))
+		MV_REG_WRITE(ETH_RX_MINIMAL_FRAME_SIZE_REG(port), 40);
+	else
+		MV_REG_WRITE(ETH_RX_MINIMAL_FRAME_SIZE_REG(port), 64);
+
+#ifndef MV_ETH_GMAC_NEW
+	if (!MV_PON_PORT(port)) {
+		regVal = PORT_SERIAL_CONTROL_VALUE;
+
+		regVal &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+		regVal |= ETH_MAX_RX_PACKET_1522BYTE;
+
+		MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), regVal);
+
+		/* Allow receiving packes with odd number of preamble nibbles */
+		regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+		regVal |= ETH_EN_MII_ODD_PRE_MASK;
+		MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+	}
+#endif /* !MV_ETH_GMAC_NEW */
+
+	/* build PORT_SDMA_CONFIG_REG */
+	regVal = 0;
+
+#ifdef CONFIG_MV_ETH_REDUCE_BURST_SIZE_WA
+	/* This is a WA for the IOCC HW BUG involve in using 128B burst size */
+	regVal |= ETH_TX_BURST_SIZE_MASK(ETH_BURST_SIZE_2_64BIT_VALUE);
+	regVal |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_2_64BIT_VALUE);
+#else
+	/* Default burst size */
+	regVal |= ETH_TX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+	regVal |= ETH_RX_BURST_SIZE_MASK(ETH_BURST_SIZE_16_64BIT_VALUE);
+#endif /* CONFIG_MV_ETH_REDUCE_BURST_SIZE_WA */
+
+#if defined(MV_CPU_BE) && !defined(CONFIG_MV_ETH_BE_WA)
+    /* big endian */
+    regVal |= (ETH_RX_NO_DATA_SWAP_MASK | ETH_TX_NO_DATA_SWAP_MASK | ETH_DESC_SWAP_MASK);
+#else /* MV_CPU_LE */
+    /* little endian */
+	regVal |= (ETH_RX_NO_DATA_SWAP_MASK | ETH_TX_NO_DATA_SWAP_MASK | ETH_NO_DESC_SWAP_MASK);
+#endif /* MV_CPU_BE && !CONFIG_MV_ETH_BE_WA */
+
+	/* Assign port SDMA configuration */
+	MV_REG_WRITE(ETH_SDMA_CONFIG_REG(port), regVal);
+
+	mvNetaSetUcastTable(port, -1);
+	mvNetaSetSpecialMcastTable(port, -1);
+	mvNetaSetOtherMcastTable(port, -1);
+
+#if defined(CONFIG_MV_PON) && defined(MV_PON_MIB_SUPPORT)
+	if (MV_PON_PORT(port))
+		/* Set default MibNo = 0 for PON RX counters */
+		mvNetaPonRxMibDefault(0);
+#endif /* CONFIG_MV_PON && MV_PON_MIB_SUPPORT */
+
+	/* Set port interrupt enable register - default enable all */
+	MV_REG_WRITE(NETA_INTR_ENABLE_REG(port),
+		     (NETA_RXQ_PKT_INTR_ENABLE_ALL_MASK | NETA_TXQ_PKT_INTR_ENABLE_ALL_MASK));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaHalInit - Initialize the HAL and the NETA unit
+*
+* DESCRIPTION:
+*       This function:
+*	1) Initializes HAL global data structures.
+*       2) Clears and disables NETA unit interrupts.
+*
+* INPUT:  NONE
+*
+* RETURN: NONE
+*
+* NOTE: this function is called once in the boot process.
+*******************************************************************************/
+MV_STATUS mvNetaHalInit(MV_NETA_HAL_DATA *halData)
+{
+	int port;
+
+	mvNetaHalData = *halData;
+
+	/* Allocate port data structures */
+	mvNetaPortCtrl = mvOsMalloc(mvNetaHalData.maxPort * sizeof(MV_NETA_PORT_CTRL *));
+	if (mvNetaPortCtrl == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for %d ports\n", __func__,
+			   mvNetaHalData.maxPort * sizeof(MV_NETA_PORT_CTRL), mvNetaHalData.maxPort);
+		return MV_FAIL;
+	}
+	for (port = 0; port < mvNetaHalData.maxPort; port++)
+		mvNetaPortCtrl[port] = NULL;
+
+#ifdef CONFIG_MV_ETH_BM
+	if (MV_NETA_BM_CAP())
+		mvNetaBmInit(mvNetaHalData.bmVirtBase);
+#endif /* CONFIG_MV_ETH_BM */
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		mvPncInit(mvNetaHalData.pncVirtBase, mvNetaHalData.pncTcamSize);
+#endif /* CONFIG_MV_ETH_PNC */
+
+	return MV_OK;
+}
+
+/* Update CPUs that can process packets incoming to specific RXQ */
+MV_STATUS	mvNetaRxqCpuMaskSet(int port, int rxq_mask, int cpu)
+{
+	MV_U32	regVal;
+
+	if (mvNetaPortCheck(port))
+		return MV_ERROR;
+
+	if (mvNetaCpuCheck(cpu))
+		return MV_ERROR;
+
+	regVal = MV_REG_READ(NETA_CPU_MAP_REG(port, cpu));
+	regVal &= ~NETA_CPU_RXQ_ACCESS_ALL_MASK;
+	regVal |= (rxq_mask << NETA_CPU_RXQ_ACCESS_OFFS);
+	MV_REG_WRITE(NETA_CPU_MAP_REG(port, cpu), regVal);
+
+	return MV_OK;
+}
+
+/* Update specific CPU that can process packets outcoming to TXQs */
+MV_STATUS	mvNetaTxqCpuMaskSet(int port, int txq_mask, int cpu)
+{
+	MV_U32	regVal;
+
+	if (mvNetaPortCheck(port))
+		return MV_ERROR;
+
+	if (mvNetaCpuCheck(cpu))
+		return MV_ERROR;
+
+	regVal = MV_REG_READ(NETA_CPU_MAP_REG(port, cpu));
+	regVal &= ~NETA_CPU_TXQ_ACCESS_ALL_MASK;
+	regVal |= (txq_mask << NETA_CPU_TXQ_ACCESS_OFFS);
+	MV_REG_WRITE(NETA_CPU_MAP_REG(port, cpu), regVal);
+
+	return MV_OK;
+}
+
+/*****************************************************************/
+/* Functions below are different for old and new version of GMAC */
+/*****************************************************************/
+
+#ifdef MV_ETH_GMAC_NEW
+
+MV_STATUS       mvEthGmacRgmiiSet(int port, int enable)
+{
+	MV_U32  regVal;
+
+	regVal = MV_REG_READ(NETA_GMAC_CTRL_2_REG(port));
+	if (enable)
+		regVal |= NETA_GMAC_PORT_RGMII_MASK;
+	else
+		regVal &= ~NETA_GMAC_PORT_RGMII_MASK;
+
+	MV_REG_WRITE(NETA_GMAC_CTRL_2_REG(port), regVal);
+
+	return MV_OK;
+}
+
+static void mvNetaPortSgmiiConfig(int port, MV_BOOL isInband)
+{
+	MV_U32 regVal;
+
+
+	regVal = MV_REG_READ(NETA_GMAC_CTRL_2_REG(port));
+	regVal |= (NETA_GMAC_PSC_ENABLE_MASK);
+	MV_REG_WRITE(NETA_GMAC_CTRL_2_REG(port), regVal);
+
+	if (isInband) {
+
+		/* set Inband AN enable in MAC Control 2 */
+		regVal = MV_REG_READ(NETA_GMAC_CTRL_2_REG(port));
+		regVal |= NETA_GMAC_INBAND_AN_MODE_MASK;
+		MV_REG_WRITE(NETA_GMAC_CTRL_2_REG(port), regVal);
+
+		/* set portType to SGMII (encoding) in MAC Control 0 */
+		regVal = MV_REG_READ(NETA_GMAC_CTRL_0_REG(port));
+		regVal &= ~NETA_GMAC_PORT_TYPE_MASK;
+		regVal |= NETA_GMAC_PORT_TYPE_SGMII;
+		MV_REG_WRITE(NETA_GMAC_CTRL_0_REG(port), regVal);
+
+		/* in case of SGMII mode enable InBand AutoNeg */
+		regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(port));
+		regVal &= ~NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= NETA_INBAND_AN_EN_MASK;
+		MV_REG_WRITE(NETA_GMAC_AN_CTRL_REG(port), regVal);
+
+		/* Enable 1MS clock generation for SGMII */
+		regVal = MV_REG_READ(NETA_GMAC_CLOCK_DIVIDER_REG(port));
+		regVal |= NETA_GMAC_1MS_CLOCK_ENABLE_BIT_MASK;
+		MV_REG_WRITE(NETA_GMAC_CLOCK_DIVIDER_REG(port), regVal);
+
+	}
+
+}
+
+
+void mvNetaPortPowerUp(int port, MV_BOOL isSgmii, MV_BOOL isRgmii, MV_BOOL isInband)
+{
+	MV_U32 regVal;
+
+	/* MAC Cause register should be cleared */
+	MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+	if (isSgmii)
+		mvNetaPortSgmiiConfig(port, isInband);
+
+	mvEthGmacRgmiiSet(port, isRgmii);
+
+	/* Cancel Port Reset */
+	regVal = MV_REG_READ(NETA_GMAC_CTRL_2_REG(port));
+	regVal &= (~NETA_GMAC_PORT_RESET_MASK);
+	MV_REG_WRITE(NETA_GMAC_CTRL_2_REG(port), regVal);
+	while ((MV_REG_READ(NETA_GMAC_CTRL_2_REG(port)) & NETA_GMAC_PORT_RESET_MASK) != 0)
+		continue;
+}
+
+void mvNetaPortPowerDown(int port)
+{
+}
+
+/******************************************************************************/
+/*                          Port Configuration functions                      */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaMaxRxSizeSet -
+*
+* DESCRIPTION:
+*       Change maximum receive size of the port. This configuration will take place
+*       imidiately.
+*
+* INPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaMaxRxSizeSet(int portNo, int maxRxSize)
+{
+    MV_U32		regVal;
+
+	if (!MV_PON_PORT(portNo)) {
+
+		regVal =  MV_REG_READ(NETA_GMAC_CTRL_0_REG(portNo));
+		regVal &= ~NETA_GMAC_MAX_RX_SIZE_MASK;
+		regVal |= (((maxRxSize - MV_ETH_MH_SIZE) / 2) << NETA_GMAC_MAX_RX_SIZE_OFFS);
+		MV_REG_WRITE(NETA_GMAC_CTRL_0_REG(portNo), regVal);
+/*
+		mvOsPrintf("%s: port=%d, maxRxSize=%d, regAddr=0x%x, regVal=0x%x\n",
+			__func__, portNo, maxRxSize, NETA_GMAC_CTRL_0_REG(portNo), regVal);
+*/
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaForceLinkModeSet -
+*
+* DESCRIPTION:
+*       Sets "Force Link Pass" and "Do Not Force Link Fail" bits.
+* 	Note: This function should only be called when the port is disabled.
+*
+* INPUT:
+* 	int		portNo			- port number
+* 	MV_BOOL force_link_pass	- Force Link Pass
+* 	MV_BOOL force_link_fail - Force Link Failure
+*		0, 0 - normal state: detect link via PHY and connector
+*		1, 1 - prohibited state.
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaForceLinkModeSet(int portNo, MV_BOOL force_link_up, MV_BOOL force_link_down)
+{
+	MV_U32	regVal;
+
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Can't force link pass and link fail at the same time */
+	if ((force_link_up) && (force_link_down))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(portNo));
+
+	if (force_link_up)
+		regVal |= NETA_FORCE_LINK_PASS_MASK;
+	else
+		regVal &= ~NETA_FORCE_LINK_PASS_MASK;
+
+	if (force_link_down)
+		regVal |= NETA_FORCE_LINK_FAIL_MASK;
+	else
+		regVal &= ~NETA_FORCE_LINK_FAIL_MASK;
+
+	MV_REG_WRITE(NETA_GMAC_AN_CTRL_REG(portNo), regVal);
+
+    return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaSpeedDuplexSet -
+*
+* DESCRIPTION:
+*       Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+*	Sets port duplex to Auto Negotiation / Full / Half Duplex.
+*
+* INPUT:
+* 	int portNo - port number
+* 	MV_ETH_PORT_SPEED speed - port speed
+*	MV_ETH_PORT_DUPLEX duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaSpeedDuplexSet(int portNo, MV_ETH_PORT_SPEED speed, MV_ETH_PORT_DUPLEX duplex)
+{
+	MV_U32 regVal;
+
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Check validity */
+	if ((speed == MV_ETH_SPEED_1000) && (duplex == MV_ETH_DUPLEX_HALF))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(portNo));
+
+	switch (speed) {
+	case MV_ETH_SPEED_AN:
+		regVal |= NETA_ENABLE_SPEED_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_SPEED_1000:
+		regVal &= ~NETA_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal |= NETA_SET_GMII_SPEED_1000_MASK;
+		regVal &= ~NETA_SET_MII_SPEED_100_MASK;
+		/* the 100/10 bit doesn't matter in this case */
+		break;
+	case MV_ETH_SPEED_100:
+		regVal &= ~NETA_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal &= ~NETA_SET_GMII_SPEED_1000_MASK;
+		regVal |= NETA_SET_MII_SPEED_100_MASK;
+		break;
+	case MV_ETH_SPEED_10:
+		regVal &= ~NETA_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal &= ~NETA_SET_GMII_SPEED_1000_MASK;
+		regVal &= ~NETA_SET_MII_SPEED_100_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Speed value %d\n", speed);
+		return MV_BAD_PARAM;
+	}
+
+	switch (duplex) {
+	case MV_ETH_DUPLEX_AN:
+		regVal  |= NETA_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_DUPLEX_HALF:
+		regVal &= ~NETA_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		regVal &= ~NETA_SET_FULL_DUPLEX_MASK;
+		break;
+	case MV_ETH_DUPLEX_FULL:
+		regVal &= ~NETA_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		regVal |= NETA_SET_FULL_DUPLEX_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Duplex value %d\n", duplex);
+		return MV_BAD_PARAM;
+	}
+
+	MV_REG_WRITE(NETA_GMAC_AN_CTRL_REG(portNo), regVal);
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaSpeedDuplexGet -
+*
+* DESCRIPTION:
+*       Gets port speed
+*	Gets port duplex
+*
+* INPUT:
+* 	int portNo - port number
+* OUTPUT:
+* 	MV_ETH_PORT_SPEED *speed - port speed
+*	MV_ETH_PORT_DUPLEX *duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaSpeedDuplexGet(int portNo, MV_ETH_PORT_SPEED *speed, MV_ETH_PORT_DUPLEX *duplex)
+{
+	MV_U32 regVal;
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Check validity */
+	if (!speed || !duplex)
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(portNo));
+	if (regVal & NETA_ENABLE_SPEED_AUTO_NEG_MASK)
+		*speed = MV_ETH_SPEED_AN;
+	else if (regVal & NETA_SET_GMII_SPEED_1000_MASK)
+		*speed = MV_ETH_SPEED_1000;
+	else if (regVal & NETA_SET_MII_SPEED_100_MASK)
+		*speed = MV_ETH_SPEED_100;
+	else
+		*speed = MV_ETH_SPEED_10;
+
+	if (regVal & NETA_ENABLE_DUPLEX_AUTO_NEG_MASK)
+		*duplex = MV_ETH_DUPLEX_AN;
+	else if (regVal & NETA_SET_FULL_DUPLEX_MASK)
+		*duplex = MV_ETH_DUPLEX_FULL;
+	else
+		*duplex = MV_ETH_DUPLEX_HALF;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaFlowCtrlSet - Set Flow Control of the port.
+*
+* DESCRIPTION:
+*       This function configures the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*       MV_ETH_PORT_FC  flowControl - Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_BAD_VALUE    - Value flowControl parameters is not valid
+*
+*******************************************************************************/
+MV_STATUS mvNetaFlowCtrlSet(int port, MV_ETH_PORT_FC flowControl)
+{
+	MV_U32 regVal;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort))
+		return MV_OUT_OF_RANGE;
+
+	regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(port));
+
+	switch (flowControl) {
+	case MV_ETH_FC_AN_NO:
+		regVal |= NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal &= ~NETA_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal &= ~NETA_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_AN_SYM:
+		regVal |= NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= NETA_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal &= ~NETA_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_AN_ASYM:
+		regVal |= NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= NETA_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal |= NETA_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_DISABLE:
+		regVal &= ~NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal &= ~NETA_SET_FLOW_CONTROL_MASK;
+		break;
+
+	case MV_ETH_FC_ENABLE:
+		regVal &= ~NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= NETA_SET_FLOW_CONTROL_MASK;
+		break;
+
+	default:
+		mvOsPrintf("ethDrv: Unexpected FlowControl value %d\n", flowControl);
+		return MV_BAD_VALUE;
+	}
+
+	MV_REG_WRITE(NETA_GMAC_AN_CTRL_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaFlowCtrlGet - Get Flow Control configuration of the port.
+*
+* DESCRIPTION:
+*       This function returns the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*
+* OUTPUT:
+*       MV_ETH_PORT_FC  *flowCntrl	- Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*
+*******************************************************************************/
+MV_STATUS mvNetaFlowCtrlGet(int port, MV_ETH_PORT_FC *pFlowCntrl)
+{
+	MV_U32 regVal;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort))
+		return MV_OUT_OF_RANGE;
+
+	regVal = MV_REG_READ(NETA_GMAC_AN_CTRL_REG(port));
+
+	if (regVal & NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK) {
+		/* Auto negotiation is enabled */
+		if (regVal & NETA_FLOW_CONTROL_ADVERTISE_MASK) {
+			if (regVal & NETA_FLOW_CONTROL_ASYMETRIC_MASK)
+				*pFlowCntrl = MV_ETH_FC_AN_ASYM;
+			else
+				*pFlowCntrl = MV_ETH_FC_AN_SYM;
+		} else
+			*pFlowCntrl = MV_ETH_FC_AN_NO;
+	} else {
+		/* Auto negotiation is disabled */
+		if (regVal & NETA_SET_FLOW_CONTROL_MASK)
+			*pFlowCntrl = MV_ETH_FC_ENABLE;
+		else
+			*pFlowCntrl = MV_ETH_FC_DISABLE;
+	}
+	return MV_OK;
+}
+
+MV_STATUS mvNetaPortEnable(int port)
+{
+	if (!MV_PON_PORT(port)) {
+		MV_U32 regVal;
+
+		/* Enable port */
+		regVal = MV_REG_READ(NETA_GMAC_CTRL_0_REG(port));
+		regVal |= NETA_GMAC_PORT_EN_MASK;
+
+		MV_REG_WRITE(NETA_GMAC_CTRL_0_REG(port), regVal);
+
+		/* If Link is UP, Start RX and TX traffic */
+		if (MV_REG_READ(NETA_GMAC_STATUS_REG(port)) & NETA_GMAC_LINK_UP_MASK)
+			return mvNetaPortUp(port);
+	}
+	return MV_NOT_READY;
+}
+
+MV_STATUS mvNetaPortDisable(int port)
+{
+	MV_U32 regData;
+
+	mvNetaPortDown(port);
+
+	if (!MV_PON_PORT(port)) {
+		/* Reset the Enable bit in the Serial Control Register */
+		regData = MV_REG_READ(NETA_GMAC_CTRL_0_REG(port));
+		regData &= ~(NETA_GMAC_PORT_EN_MASK);
+		MV_REG_WRITE(NETA_GMAC_CTRL_0_REG(port), regData);
+	}
+	/* Wait about 200 usec */
+	mvOsUDelay(200);
+
+	return MV_OK;
+}
+
+MV_BOOL mvNetaLinkIsUp(int port)
+{
+	MV_U32	regVal;
+
+	if (MV_PON_PORT(port))
+		return MV_TRUE;
+
+	regVal = MV_REG_READ(NETA_GMAC_STATUS_REG(port));
+	if (regVal & NETA_GMAC_LINK_UP_MASK)
+		return MV_TRUE;
+
+	return MV_FALSE;
+}
+
+MV_STATUS mvNetaLinkStatus(int port, MV_ETH_PORT_STATUS *pStatus)
+{
+	MV_U32 regVal;
+
+	if (MV_PON_PORT(port)) {
+		/* FIXME: --BK */
+		pStatus->linkup = MV_TRUE;
+		pStatus->speed = MV_ETH_SPEED_1000;
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+		pStatus->txFc = MV_ETH_FC_DISABLE;
+		return MV_OK;
+	}
+
+	regVal = MV_REG_READ(NETA_GMAC_STATUS_REG(port));
+
+	if (regVal & NETA_GMAC_SPEED_1000_MASK)
+		pStatus->speed = MV_ETH_SPEED_1000;
+	else if (regVal & NETA_GMAC_SPEED_100_MASK)
+		pStatus->speed = MV_ETH_SPEED_100;
+	else
+		pStatus->speed = MV_ETH_SPEED_10;
+
+	if (regVal & NETA_GMAC_LINK_UP_MASK)
+		pStatus->linkup = MV_TRUE;
+	else
+		pStatus->linkup = MV_FALSE;
+
+	if (regVal & NETA_GMAC_FULL_DUPLEX_MASK)
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+	else
+		pStatus->duplex = MV_ETH_DUPLEX_HALF;
+
+	if (regVal & NETA_TX_FLOW_CTRL_ACTIVE_MASK)
+		pStatus->txFc = MV_ETH_FC_ACTIVE;
+	else if (regVal & NETA_TX_FLOW_CTRL_ENABLE_MASK)
+		pStatus->txFc = MV_ETH_FC_ENABLE;
+	else
+		pStatus->txFc = MV_ETH_FC_DISABLE;
+
+	if (regVal & NETA_RX_FLOW_CTRL_ACTIVE_MASK)
+		pStatus->rxFc = MV_ETH_FC_ACTIVE;
+	else if (regVal & NETA_RX_FLOW_CTRL_ENABLE_MASK)
+		pStatus->rxFc = MV_ETH_FC_ENABLE;
+	else
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+
+	return MV_OK;
+}
+
+/* Set Low Power Idle mode for GMAC port */
+MV_STATUS           mvNetaGmacLpiSet(int port, int mode)
+{
+	if (!MV_PON_PORT(port))	{
+		MV_U32  regVal;
+
+		regVal = MV_REG_READ(NETA_LOW_POWER_CTRL_1_REG(port));
+		if (mode)
+			regVal |= NETA_LPI_REQUEST_EN_MASK;
+		else
+			regVal &= ~NETA_LPI_REQUEST_EN_MASK;
+
+		MV_REG_WRITE(NETA_LOW_POWER_CTRL_1_REG(port), regVal);
+
+		return MV_OK;
+	}
+	return MV_FAIL;
+}
+
+#else	/* Old GMAC functions */
+
+static void mvNetaPortSgmiiConfig(int port, MV_BOOL isInband)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+	regVal |= (/*ETH_SGMII_MODE_MASK |*/ ETH_PSC_ENABLE_MASK /*| ETH_INBAND_AUTO_NEG_ENABLE_MASK */);
+	/* regVal &= (~ETH_INBAND_AUTO_NEG_BYPASS_MASK); */
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+}
+
+void mvNetaPortPowerUp(int port, MV_BOOL isSgmii, MV_BOOL isRgmii,  MV_BOOL isInband)
+{
+	MV_U32 regVal;
+
+	/* MAC Cause register should be cleared */
+	MV_REG_WRITE(ETH_UNIT_INTR_CAUSE_REG(port), 0);
+
+
+	if (isSgmii)
+		mvNetaPortSgmiiConfig(port, isInband);
+
+	/* Cancel Port Reset */
+	regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port));
+	regVal &= (~ETH_PORT_RESET_MASK);
+
+	if (isRgmii)
+		regVal |= ETH_RGMII_ENABLE_MASK;
+	else
+		regVal &= (~ETH_RGMII_ENABLE_MASK);
+
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_1_REG(port), regVal);
+	while ((MV_REG_READ(ETH_PORT_SERIAL_CTRL_1_REG(port)) & ETH_PORT_RESET_MASK) != 0)
+		continue;
+}
+
+void mvNetaPortPowerDown(int port)
+{
+}
+
+/******************************************************************************/
+/*                          Port Configuration functions                      */
+/******************************************************************************/
+
+/*******************************************************************************
+* netaMruGet - Get MRU configuration for Max Rx packet size.
+*
+* INPUT:
+*           MV_U32 maxRxPktSize - max  packet size.
+*
+* RETURN:   MV_U32 - MRU configuration.
+*
+*******************************************************************************/
+static MV_U32 netaMruGet(MV_U32 maxRxPktSize)
+{
+	MV_U32 portSerialCtrlReg = 0;
+
+	if (maxRxPktSize > 9192)
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_9700BYTE;
+	else if (maxRxPktSize > 9022)
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_9192BYTE;
+	else if (maxRxPktSize > 1552)
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_9022BYTE;
+	else if (maxRxPktSize > 1522)
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_1552BYTE;
+	else if (maxRxPktSize > 1518)
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_1522BYTE;
+	else
+		portSerialCtrlReg |= ETH_MAX_RX_PACKET_1518BYTE;
+
+	return portSerialCtrlReg;
+}
+
+/*******************************************************************************
+* mvNetaMaxRxSizeSet -
+*
+* DESCRIPTION:
+*       Change maximum receive size of the port. This configuration will take place
+*       imidiately.
+*
+* INPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaMaxRxSizeSet(int portNo, int maxRxSize)
+{
+	MV_U32 portSerialCtrlReg;
+
+	portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(portNo));
+	portSerialCtrlReg &= ~ETH_MAX_RX_PACKET_SIZE_MASK;
+	portSerialCtrlReg |= netaMruGet(maxRxSize);
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(portNo), portSerialCtrlReg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaForceLinkModeSet -
+*
+* DESCRIPTION:
+*       Sets "Force Link Pass" and "Do Not Force Link Fail" bits.
+* 	Note: This function should only be called when the port is disabled.
+*
+* INPUT:
+* 	int portNo - port number
+* 	MV_BOOL force_link_pass - value for Force Link Pass bit (bit 1): 0 or 1
+* 	MV_BOOL do_not_force_link_fail - value for Do Not Force Link Fail bit (bit 10): 0 or 1
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaForceLinkModeSet(int portNo, MV_BOOL force_link_up, MV_BOOL force_link_down)
+{
+	MV_U32 portSerialCtrlReg;
+
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Can't force link pass and link fail at the same time */
+	if ((force_link_up) && (force_link_down))
+		return MV_BAD_PARAM;
+
+	portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(portNo));
+
+	if (force_link_up)
+		portSerialCtrlReg |= ETH_FORCE_LINK_PASS_MASK | ETH_DO_NOT_FORCE_LINK_FAIL_MASK;
+	else if (force_link_down)
+		portSerialCtrlReg &= ~(ETH_FORCE_LINK_PASS_MASK | ETH_DO_NOT_FORCE_LINK_FAIL_MASK);
+
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(portNo), portSerialCtrlReg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaSpeedDuplexSet -
+*
+* DESCRIPTION:
+*       Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+*	Sets port duplex to Auto Negotiation / Full / Half Duplex.
+*
+* INPUT:
+* 	int portNo - port number
+* 	MV_ETH_PORT_SPEED speed - port speed
+*	MV_ETH_PORT_DUPLEX duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaSpeedDuplexSet(int portNo, MV_ETH_PORT_SPEED speed, MV_ETH_PORT_DUPLEX duplex)
+{
+	MV_U32 portSerialCtrlReg;
+
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Check validity */
+	if ((speed == MV_ETH_SPEED_1000) && (duplex == MV_ETH_DUPLEX_HALF))
+		return MV_BAD_PARAM;
+
+	portSerialCtrlReg = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(portNo));
+
+	switch (speed) {
+	case MV_ETH_SPEED_AN:
+		portSerialCtrlReg &= ~ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_SPEED_1000:
+		portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+		portSerialCtrlReg |= ETH_SET_GMII_SPEED_1000_MASK;
+		/* the 100/10 bit doesn't matter in this case */
+		break;
+	case MV_ETH_SPEED_100:
+		portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+		portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+		portSerialCtrlReg |= ETH_SET_MII_SPEED_100_MASK;
+		break;
+	case MV_ETH_SPEED_10:
+		portSerialCtrlReg |= ETH_DISABLE_SPEED_AUTO_NEG_MASK;
+		portSerialCtrlReg &= ~ETH_SET_GMII_SPEED_1000_MASK;
+		portSerialCtrlReg &= ~ETH_SET_MII_SPEED_100_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Speed value %d\n", speed);
+		return MV_BAD_PARAM;
+	}
+
+	switch (duplex) {
+	case MV_ETH_DUPLEX_AN:
+		portSerialCtrlReg &= ~ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_DUPLEX_HALF:
+		portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+		portSerialCtrlReg &= ~ETH_SET_FULL_DUPLEX_MASK;
+		break;
+	case MV_ETH_DUPLEX_FULL:
+		portSerialCtrlReg |= ETH_DISABLE_DUPLEX_AUTO_NEG_MASK;
+		portSerialCtrlReg |= ETH_SET_FULL_DUPLEX_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Duplex value %d\n", duplex);
+		return MV_BAD_PARAM;
+	}
+
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(portNo), portSerialCtrlReg);
+
+	return MV_OK;
+
+}
+
+/*******************************************************************************
+* mvNetaSpeedDuplexGet -
+*
+* DESCRIPTION:
+*       Gets port speed
+*	Gets port duplex
+*
+* INPUT:
+* 	int portNo - port number
+* OUTPUT:
+* 	MV_ETH_PORT_SPEED *speed - port speed
+*	MV_ETH_PORT_DUPLEX *duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvNetaSpeedDuplexGet(int portNo, MV_ETH_PORT_SPEED *speed, MV_ETH_PORT_DUPLEX *duplex)
+{
+	MV_U32 regVal;
+	if ((portNo < 0) || (portNo >= mvNetaHalData.maxPort))
+		return MV_BAD_PARAM;
+
+	/* Check validity */
+	if (!speed || !duplex)
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(portNo));
+	if (!(regVal & ETH_DISABLE_SPEED_AUTO_NEG_MASK))
+		*speed = MV_ETH_SPEED_AN;
+	else if (regVal & ETH_SET_GMII_SPEED_1000_MASK)
+		*speed = MV_ETH_SPEED_1000;
+	else if (regVal & ETH_SET_MII_SPEED_100_MASK)
+		*speed = MV_ETH_SPEED_100;
+	else
+		*speed = MV_ETH_SPEED_10;
+
+	if (!(regVal & ETH_DISABLE_DUPLEX_AUTO_NEG_MASK))
+		*duplex = MV_ETH_DUPLEX_AN;
+	else if (regVal & ETH_SET_FULL_DUPLEX_MASK)
+		*duplex = MV_ETH_DUPLEX_FULL;
+	else
+		*duplex = MV_ETH_DUPLEX_HALF;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaFlowCtrlSet - Set Flow Control of the port.
+*
+* DESCRIPTION:
+*       This function configures the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*       MV_ETH_PORT_FC  flowControl - Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_BAD_VALUE    - Value flowControl parameters is not valid
+*
+*******************************************************************************/
+MV_STATUS mvNetaFlowCtrlSet(int port, MV_ETH_PORT_FC flowControl)
+{
+	MV_U32 regVal;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort))
+		return MV_OUT_OF_RANGE;
+
+	regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+
+	switch (flowControl) {
+	case MV_ETH_FC_AN_NO:
+		regVal &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+		regVal &= ~ETH_ADVERTISE_SYM_FC_MASK;
+		break;
+
+	case MV_ETH_FC_AN_SYM:
+		regVal &= ~ETH_DISABLE_FC_AUTO_NEG_MASK;
+		regVal |= ETH_ADVERTISE_SYM_FC_MASK;
+		break;
+
+	case MV_ETH_FC_DISABLE:
+		regVal |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+		regVal &= ~ETH_SET_FLOW_CTRL_MASK;
+		break;
+
+	case MV_ETH_FC_ENABLE:
+		regVal |= ETH_DISABLE_FC_AUTO_NEG_MASK;
+		regVal |= ETH_SET_FLOW_CTRL_MASK;
+		break;
+
+	default:
+		mvOsPrintf("ethDrv: Unexpected FlowControl value %d\n", flowControl);
+		return MV_BAD_VALUE;
+	}
+
+	MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), regVal);
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaFlowCtrlGet - Get Flow Control configuration of the port.
+*
+* DESCRIPTION:
+*       This function returns the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*
+* OUTPUT:
+*       MV_ETH_PORT_FC  *flowCntrl	- Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*
+*******************************************************************************/
+MV_STATUS mvNetaFlowCtrlGet(int port, MV_ETH_PORT_FC *pFlowCntrl)
+{
+	MV_U32 regVal;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort))
+		return MV_OUT_OF_RANGE;
+
+	regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+
+	if (regVal & ETH_DISABLE_FC_AUTO_NEG_MASK) {
+		/* Auto negotiation is disabled */
+		if (regVal & ETH_SET_FLOW_CTRL_MASK)
+			*pFlowCntrl = MV_ETH_FC_ENABLE;
+		else
+			*pFlowCntrl = MV_ETH_FC_DISABLE;
+	} else {
+		/* Auto negotiation is enabled */
+		if (regVal & ETH_ADVERTISE_SYM_FC_MASK)
+			*pFlowCntrl = MV_ETH_FC_AN_SYM;
+		else
+			*pFlowCntrl = MV_ETH_FC_AN_NO;
+	}
+	return MV_OK;
+}
+
+MV_STATUS mvNetaPortEnable(int port)
+{
+	if (!MV_PON_PORT(port)) {
+		MV_U32 regVal;
+
+		/* Enable port */
+		regVal = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+		regVal |= (ETH_DO_NOT_FORCE_LINK_FAIL_MASK | ETH_PORT_ENABLE_MASK);
+
+		MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), regVal);
+
+		/* If Link is UP, Start RX and TX traffic */
+		if (MV_REG_READ(ETH_PORT_STATUS_REG(port)) & ETH_LINK_UP_MASK)
+			return mvNetaPortUp(port);
+	}
+	return MV_NOT_READY;
+}
+
+MV_STATUS mvNetaPortDisable(int port)
+{
+	MV_U32 regData;
+
+	mvNetaPortDown(port);
+
+	if (!MV_PON_PORT(port)) {
+		/* Reset the Enable bit in the Serial Control Register */
+		regData = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(port));
+		regData &= ~(ETH_PORT_ENABLE_MASK);
+		MV_REG_WRITE(ETH_PORT_SERIAL_CTRL_REG(port), regData);
+	}
+	/* Wait about 200 usec */
+	mvOsUDelay(200);
+
+	return MV_OK;
+}
+
+MV_BOOL mvNetaLinkIsUp(int port)
+{
+	MV_U32	regVal;
+
+	if (MV_PON_PORT(port))
+		return MV_TRUE;
+
+	regVal = MV_REG_READ(ETH_PORT_STATUS_REG(port));
+	if (regVal & ETH_LINK_UP_MASK)
+		return MV_TRUE;
+
+	return MV_FALSE;
+}
+
+MV_STATUS mvNetaLinkStatus(int port, MV_ETH_PORT_STATUS *pStatus)
+{
+	MV_U32 regVal;
+
+	if (MV_PON_PORT(port)) {
+		/* FIXME: --BK */
+		pStatus->linkup = MV_TRUE;
+		pStatus->speed = MV_ETH_SPEED_1000;
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+		pStatus->txFc = MV_ETH_FC_DISABLE;
+		return MV_OK;
+	}
+
+	regVal = MV_REG_READ(ETH_PORT_STATUS_REG(port));
+
+	if (regVal & ETH_GMII_SPEED_1000_MASK)
+		pStatus->speed = MV_ETH_SPEED_1000;
+	else if (regVal & ETH_MII_SPEED_100_MASK)
+		pStatus->speed = MV_ETH_SPEED_100;
+	else
+		pStatus->speed = MV_ETH_SPEED_10;
+
+	if (regVal & ETH_LINK_UP_MASK)
+		pStatus->linkup = MV_TRUE;
+	else
+		pStatus->linkup = MV_FALSE;
+
+	if (regVal & ETH_FULL_DUPLEX_MASK)
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+	else
+		pStatus->duplex = MV_ETH_DUPLEX_HALF;
+
+	pStatus->txFc = MV_ETH_FC_DISABLE;
+	if (regVal & ETH_FLOW_CTRL_ENABLED_MASK)
+		pStatus->rxFc = MV_ETH_FC_ENABLE;
+	else
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+
+	return MV_OK;
+}
+
+#endif /* MV_ETH_GMAC_NEW */
+
+/******************************************************************************/
+/*                      MAC Filtering functions                               */
+/******************************************************************************/
+
+/************************ Legacy parse function start *******************************/
+/*******************************************************************************
+* netaSetUcastAddr - This function Set the port unicast address table
+*
+* DESCRIPTION:
+*       This function locates the proper entry in the Unicast table for the
+*       specified MAC nibble and sets its properties according to function
+*       parameters.
+*
+* INPUT:
+*       int     portNo		- Port number.
+*       MV_U8   lastNibble	- Unicast MAC Address last nibble.
+*       int     queue		- Rx queue number for this MAC address.
+*			value "-1" means remove address.
+*
+* OUTPUT:
+*       This function add/removes MAC addresses from the port unicast address
+*       table.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL netaSetUcastAddr(int portNo, MV_U8 lastNibble, int queue)
+{
+	unsigned int unicastReg;
+	unsigned int tblOffset;
+	unsigned int regOffset;
+
+	/* Locate the Unicast table entry */
+	lastNibble = (0xf & lastNibble);
+	tblOffset = (lastNibble / 4) * 4;	/* Register offset from unicast table base */
+	regOffset = lastNibble % 4;	/* Entry offset within the above register */
+
+	unicastReg = MV_REG_READ((ETH_DA_FILTER_UCAST_BASE(portNo) + tblOffset));
+
+	if (queue == -1) {
+		/* Clear accepts frame bit at specified unicast DA table entry */
+		unicastReg &= ~(0xFF << (8 * regOffset));
+	} else {
+		unicastReg &= ~(0xFF << (8 * regOffset));
+		unicastReg |= ((0x01 | (queue << 1)) << (8 * regOffset));
+	}
+	MV_REG_WRITE((ETH_DA_FILTER_UCAST_BASE(portNo) + tblOffset), unicastReg);
+
+	return MV_TRUE;
+}
+
+/*******************************************************************************
+* netaSetSpecialMcastAddr - Special Multicast address settings.
+*
+* DESCRIPTION:
+*       This routine controls the MV device special MAC multicast support.
+*       The Special Multicast Table for MAC addresses supports MAC of the form
+*       0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+*       The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+*       Table entries in the DA-Filter table.
+*       This function set the Special Multicast Table appropriate entry
+*       according to the argument given.
+*
+* INPUT:
+*       int     port      Port number.
+*       unsigned char   mcByte      Multicast addr last byte (MAC DA[7:0] bits).
+*       int          queue      Rx queue number for this MAC address.
+*       int             option      0 = Add, 1 = remove address.
+*
+* OUTPUT:
+*       See description.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL netaSetSpecialMcastAddr(int port, MV_U8 lastByte, int queue)
+{
+	unsigned int smcTableReg;
+	unsigned int tblOffset;
+	unsigned int regOffset;
+
+	/* Locate the SMC table entry */
+	tblOffset = (lastByte / 4);	/* Register offset from SMC table base    */
+	regOffset = lastByte % 4;	/* Entry offset within the above register */
+
+	smcTableReg = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(port) + tblOffset * 4));
+
+	if (queue == -1) {
+		/* Clear accepts frame bit at specified Special DA table entry */
+		smcTableReg &= ~(0xFF << (8 * regOffset));
+	} else {
+		smcTableReg &= ~(0xFF << (8 * regOffset));
+		smcTableReg |= ((0x01 | (queue << 1)) << (8 * regOffset));
+	}
+	MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(port) + tblOffset * 4), smcTableReg);
+
+	return MV_TRUE;
+}
+
+/*******************************************************************************
+* netaSetOtherMcastAddr - Multicast address settings.
+*
+* DESCRIPTION:
+*       This routine controls the MV device Other MAC multicast support.
+*       The Other Multicast Table is used for multicast of another type.
+*       A CRC-8bit is used as an index to the Other Multicast Table entries
+*       in the DA-Filter table.
+*       The function gets the CRC-8bit value from the calling routine and
+*       set the Other Multicast Table appropriate entry according to the
+*       CRC-8 argument given.
+*
+* INPUT:
+*       int     port        Port number.
+*       MV_U8   crc8        A CRC-8bit (Polynomial: x^8+x^2+x^1+1).
+*       int     queue       Rx queue number for this MAC address.
+*
+* OUTPUT:
+*       See description.
+*
+* RETURN:
+*       MV_TRUE is output succeeded.
+*       MV_FALSE if option parameter is invalid.
+*
+*******************************************************************************/
+static MV_BOOL netaSetOtherMcastAddr(int port, MV_U8 crc8, int queue)
+{
+	unsigned int omcTableReg;
+	unsigned int tblOffset;
+	unsigned int regOffset;
+
+	/* Locate the OMC table entry */
+	tblOffset = (crc8 / 4) * 4;	/* Register offset from OMC table base    */
+	regOffset = crc8 % 4;	/* Entry offset within the above register */
+
+	omcTableReg = MV_REG_READ((ETH_DA_FILTER_OTH_MCAST_BASE(port) + tblOffset));
+
+	if (queue == -1) {
+		/* Clear accepts frame bit at specified Other DA table entry */
+		omcTableReg &= ~(0xFF << (8 * regOffset));
+	} else {
+		omcTableReg &= ~(0xFF << (8 * regOffset));
+		omcTableReg |= ((0x01 | (queue << 1)) << (8 * regOffset));
+	}
+
+	MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(port) + tblOffset), omcTableReg);
+
+	return MV_TRUE;
+}
+
+/*******************************************************************************
+* mvNetaRxUnicastPromiscSet - Configure Fitering mode of Ethernet port
+*
+* DESCRIPTION:
+*       This routine used to free buffers attached to the Rx ring and should
+*       be called only when Giga Ethernet port is Down
+*
+* INPUT:
+*		int			portNo			- Port number.
+*       MV_BOOL     isPromisc       - Promiscous mode
+*                                   MV_TRUE  - accept all Broadcast, Multicast
+*                                              and Unicast packets
+*                                   MV_FALSE - accept all Broadcast,
+*                                              specially added Multicast and
+*                                              single Unicast packets
+*
+* RETURN:   MV_STATUS   MV_OK - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS mvNetaRxUnicastPromiscSet(int port, MV_BOOL isPromisc)
+{
+	MV_U32 portCfgReg, regVal;
+
+	portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+	regVal = MV_REG_READ(ETH_TYPE_PRIO_REG(port));
+
+	/* Set / Clear UPM bit in port configuration register */
+	if (isPromisc == MV_TRUE) {
+		/* Accept all Unicast addresses */
+		portCfgReg |= ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+		regVal |= ETH_FORCE_UNICAST_MASK;
+		MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(port), 0xFFFF);
+		MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(port), 0xFFFFFFFF);
+	} else {
+		/* Reject all Unicast addresses */
+		portCfgReg &= ~ETH_UNICAST_PROMISCUOUS_MODE_MASK;
+		regVal &= ~ETH_FORCE_UNICAST_MASK;
+	}
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), portCfgReg);
+	MV_REG_WRITE(ETH_TYPE_PRIO_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaMacAddrSet - This function Set the port Unicast address.
+*
+* DESCRIPTION:
+*       This function Set the port Ethernet MAC address. This address
+*       will be used to send Pause frames if enabled. Packets with this
+*       address will be accepted and dispatched to default RX queue
+*
+* INPUT:
+*       int*    port    - Ethernet port.
+*       char*   pAddr   - Address to be set
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success,  Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS mvNetaMacAddrSet(int portNo, unsigned char *pAddr, int queue)
+{
+	unsigned int macH;
+	unsigned int macL;
+
+	if (queue >= CONFIG_MV_ETH_RXQ) {
+		mvOsPrintf("ethDrv: RX queue #%d is out of range\n", queue);
+		return MV_BAD_PARAM;
+	}
+
+	if (queue != -1) {
+		macL = (pAddr[4] << 8) | (pAddr[5]);
+		macH = (pAddr[0] << 24) | (pAddr[1] << 16) | (pAddr[2] << 8) | (pAddr[3] << 0);
+
+		MV_REG_WRITE(ETH_MAC_ADDR_LOW_REG(portNo), macL);
+		MV_REG_WRITE(ETH_MAC_ADDR_HIGH_REG(portNo), macH);
+	}
+
+	/* Accept frames of this address */
+	netaSetUcastAddr(portNo, pAddr[5], queue);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaMacAddrGet - This function returns the port Unicast address.
+*
+* DESCRIPTION:
+*       This function returns the port Ethernet MAC address.
+*
+* INPUT:
+*       int     portNo          - Ethernet port number.
+*       char*   pAddr           - Pointer where address will be written to
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success,  Other - Faulure
+*
+*******************************************************************************/
+MV_STATUS mvNetaMacAddrGet(int portNo, unsigned char *pAddr)
+{
+	unsigned int macH;
+	unsigned int macL;
+
+	if (pAddr == NULL) {
+		mvOsPrintf("mvNetaMacAddrGet: NULL pointer.\n");
+		return MV_BAD_PARAM;
+	}
+
+	macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(portNo));
+	macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(portNo));
+	pAddr[0] = (macH >> 24) & 0xff;
+	pAddr[1] = (macH >> 16) & 0xff;
+	pAddr[2] = (macH >> 8) & 0xff;
+	pAddr[3] = macH & 0xff;
+	pAddr[4] = (macL >> 8) & 0xff;
+	pAddr[5] = macL & 0xff;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaMcastCrc8Get - Calculate CRC8 of MAC address.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       MV_U8*  pAddr           - Address to calculate CRC-8
+*
+* RETURN: MV_U8 - CRC-8 of this MAC address
+*
+*******************************************************************************/
+static MV_U8 mvNetaMcastCrc8Get(MV_U8 *pAddr)
+{
+	unsigned int macH;
+	unsigned int macL;
+	int macArray[48];
+	int crc[8];
+	int i;
+	unsigned char crcResult = 0;
+
+	/* Calculate CRC-8 out of the given address */
+	macH = (pAddr[0] << 8) | (pAddr[1]);
+	macL = (pAddr[2] << 24) | (pAddr[3] << 16) | (pAddr[4] << 8) | (pAddr[5] << 0);
+
+	for (i = 0; i < 32; i++)
+		macArray[i] = (macL >> i) & 0x1;
+
+	for (i = 32; i < 48; i++)
+		macArray[i] = (macH >> (i - 32)) & 0x1;
+
+	crc[0] = macArray[45] ^ macArray[43] ^ macArray[40] ^ macArray[39] ^
+	    macArray[35] ^ macArray[34] ^ macArray[31] ^ macArray[30] ^
+	    macArray[28] ^ macArray[23] ^ macArray[21] ^ macArray[19] ^
+	    macArray[18] ^ macArray[16] ^ macArray[14] ^ macArray[12] ^
+	    macArray[8] ^ macArray[7] ^ macArray[6] ^ macArray[0];
+
+	crc[1] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+	    macArray[41] ^ macArray[39] ^ macArray[36] ^ macArray[34] ^
+	    macArray[32] ^ macArray[30] ^ macArray[29] ^ macArray[28] ^
+	    macArray[24] ^ macArray[23] ^ macArray[22] ^ macArray[21] ^
+	    macArray[20] ^ macArray[18] ^ macArray[17] ^ macArray[16] ^
+	    macArray[15] ^ macArray[14] ^ macArray[13] ^ macArray[12] ^
+	    macArray[9] ^ macArray[6] ^ macArray[1] ^ macArray[0];
+
+	crc[2] = macArray[47] ^ macArray[46] ^ macArray[44] ^ macArray[43] ^
+	    macArray[42] ^ macArray[39] ^ macArray[37] ^ macArray[34] ^
+	    macArray[33] ^ macArray[29] ^ macArray[28] ^ macArray[25] ^
+	    macArray[24] ^ macArray[22] ^ macArray[17] ^ macArray[15] ^
+	    macArray[13] ^ macArray[12] ^ macArray[10] ^ macArray[8] ^
+	    macArray[6] ^ macArray[2] ^ macArray[1] ^ macArray[0];
+
+	crc[3] = macArray[47] ^ macArray[45] ^ macArray[44] ^ macArray[43] ^
+	    macArray[40] ^ macArray[38] ^ macArray[35] ^ macArray[34] ^
+	    macArray[30] ^ macArray[29] ^ macArray[26] ^ macArray[25] ^
+	    macArray[23] ^ macArray[18] ^ macArray[16] ^ macArray[14] ^
+	    macArray[13] ^ macArray[11] ^ macArray[9] ^ macArray[7] ^ macArray[3] ^ macArray[2] ^ macArray[1];
+
+	crc[4] = macArray[46] ^ macArray[45] ^ macArray[44] ^ macArray[41] ^
+	    macArray[39] ^ macArray[36] ^ macArray[35] ^ macArray[31] ^
+	    macArray[30] ^ macArray[27] ^ macArray[26] ^ macArray[24] ^
+	    macArray[19] ^ macArray[17] ^ macArray[15] ^ macArray[14] ^
+	    macArray[12] ^ macArray[10] ^ macArray[8] ^ macArray[4] ^ macArray[3] ^ macArray[2];
+
+	crc[5] = macArray[47] ^ macArray[46] ^ macArray[45] ^ macArray[42] ^
+	    macArray[40] ^ macArray[37] ^ macArray[36] ^ macArray[32] ^
+	    macArray[31] ^ macArray[28] ^ macArray[27] ^ macArray[25] ^
+	    macArray[20] ^ macArray[18] ^ macArray[16] ^ macArray[15] ^
+	    macArray[13] ^ macArray[11] ^ macArray[9] ^ macArray[5] ^ macArray[4] ^ macArray[3];
+
+	crc[6] = macArray[47] ^ macArray[46] ^ macArray[43] ^ macArray[41] ^
+	    macArray[38] ^ macArray[37] ^ macArray[33] ^ macArray[32] ^
+	    macArray[29] ^ macArray[28] ^ macArray[26] ^ macArray[21] ^
+	    macArray[19] ^ macArray[17] ^ macArray[16] ^ macArray[14] ^
+	    macArray[12] ^ macArray[10] ^ macArray[6] ^ macArray[5] ^ macArray[4];
+
+	crc[7] = macArray[47] ^ macArray[44] ^ macArray[42] ^ macArray[39] ^
+	    macArray[38] ^ macArray[34] ^ macArray[33] ^ macArray[30] ^
+	    macArray[29] ^ macArray[27] ^ macArray[22] ^ macArray[20] ^
+	    macArray[18] ^ macArray[17] ^ macArray[15] ^ macArray[13] ^
+	    macArray[11] ^ macArray[7] ^ macArray[6] ^ macArray[5];
+
+	for (i = 0; i < 8; i++)
+		crcResult = crcResult | (crc[i] << i);
+
+	return crcResult;
+}
+
+/*******************************************************************************
+* mvNetaMcastAddrSet - Multicast address settings.
+*
+* DESCRIPTION:
+*       This API controls the MV device MAC multicast support.
+*       The MV device supports multicast using two tables:
+*       1) Special Multicast Table for MAC addresses of the form
+*          0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
+*          The MAC DA[7:0] bits are used as a pointer to the Special Multicast
+*          Table entries in the DA-Filter table.
+*          In this case, the function calls netaPortSmcAddr() routine to set the
+*          Special Multicast Table.
+*       2) Other Multicast Table for multicast of another type. A CRC-8bit
+*          is used as an index to the Other Multicast Table entries in the
+*          DA-Filter table.
+*          In this case, the function calculates the CRC-8bit value and calls
+*          netaPortOmcAddr() routine to set the Other Multicast Table.
+*
+* INPUT:
+*       void*   port            - Ethernet port.
+*       MV_U8*  pAddr           - Address to be set
+*       int     queue           - RX queue to capture all packets with this
+*                               Multicast MAC address.
+*                               -1 means delete this Multicast address.
+*
+* RETURN: MV_STATUS
+*       MV_TRUE - Success, Other - Failure
+*
+*******************************************************************************/
+MV_STATUS mvNetaMcastAddrSet(int port, MV_U8 *pAddr, int queue)
+{
+	if (queue >= CONFIG_MV_ETH_RXQ) {
+		mvOsPrintf("ethPort %d: RX queue #%d is out of range\n", port, queue);
+		return MV_BAD_PARAM;
+	}
+
+	if ((pAddr[0] == 0x01) && (pAddr[1] == 0x00) && (pAddr[2] == 0x5E) && (pAddr[3] == 0x00) && (pAddr[4] == 0x00)) {
+		netaSetSpecialMcastAddr(port, pAddr[5], queue);
+	} else {
+		unsigned char crcResult = 0;
+		MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+
+		crcResult = mvNetaMcastCrc8Get(pAddr);
+
+		/* Check Add counter for this CRC value */
+		if (queue == -1) {
+			if (pPortCtrl->mcastCount[crcResult] == 0) {
+				mvOsPrintf("ethPort #%d: No valid Mcast for crc8=0x%02x\n", port, (unsigned)crcResult);
+				return MV_NO_SUCH;
+			}
+
+			pPortCtrl->mcastCount[crcResult]--;
+			if (pPortCtrl->mcastCount[crcResult] != 0) {
+				mvNetaDebugPrintf("ethPort #%d: Left %d valid Mcast for crc8=0x%02x\n",
+					   pPortCtrl->portNo, pPortCtrl->mcastCount[crcResult], (unsigned)crcResult);
+				return MV_NO_CHANGE;
+			}
+		} else {
+			pPortCtrl->mcastCount[crcResult]++;
+			if (pPortCtrl->mcastCount[crcResult] > 1) {
+				mvNetaDebugPrintf("ethPort #%d: Exist %d valid Mcast for crc8=0x%02x\n",
+					   port, pPortCtrl->mcastCount[crcResult], (unsigned)crcResult);
+				return MV_NO_CHANGE;
+			}
+		}
+		netaSetOtherMcastAddr(port, crcResult, queue);
+	}
+	return MV_OK;
+}
+/************************ Legacy parse function end *******************************/
+
+/*******************************************************************************
+* mvNetaSetUcastTable - Unicast address settings.
+*
+* DESCRIPTION:
+*      Set all entries in the Unicast MAC Table queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+void mvNetaSetUcastTable(int port, int queue)
+{
+	int offset;
+	MV_U32 regValue;
+
+	if (queue == -1) {
+		regValue = 0;
+	} else {
+		regValue = (((0x01 | (queue << 1)) << 0) |
+			    ((0x01 | (queue << 1)) << 8) |
+			    ((0x01 | (queue << 1)) << 16) | ((0x01 | (queue << 1)) << 24));
+	}
+
+	for (offset = 0; offset <= 0xC; offset += 4)
+		MV_REG_WRITE((ETH_DA_FILTER_UCAST_BASE(port) + offset), regValue);
+}
+
+/*******************************************************************************
+* mvNetaSetSpecialMcastTable - Special Multicast address settings.
+*
+* DESCRIPTION:
+*   Set all entries to the Special Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvNetaSetSpecialMcastTable(int portNo, int queue)
+{
+	int offset;
+	MV_U32 regValue;
+
+	if (queue == -1) {
+		regValue = 0;
+	} else {
+		regValue = (((0x01 | (queue << 1)) << 0) |
+			    ((0x01 | (queue << 1)) << 8) |
+			    ((0x01 | (queue << 1)) << 16) | ((0x01 | (queue << 1)) << 24));
+	}
+
+	for (offset = 0; offset <= 0xFC; offset += 4)
+		MV_REG_WRITE((ETH_DA_FILTER_SPEC_MCAST_BASE(portNo) + offset), regValue);
+}
+
+/*******************************************************************************
+* mvNetaSetOtherMcastTable - Other Multicast address settings.
+*
+* DESCRIPTION:
+*   Set all entries to the Other Multicast MAC Table. queue==-1 means reject all
+* INPUT:
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_VOID mvNetaSetOtherMcastTable(int portNo, int queue)
+{
+	int offset;
+	MV_U32 regValue;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(portNo);
+
+	if (queue == -1) {
+		memset(pPortCtrl->mcastCount, 0, sizeof(pPortCtrl->mcastCount));
+		regValue = 0;
+	} else {
+		memset(pPortCtrl->mcastCount, 1, sizeof(pPortCtrl->mcastCount));
+		regValue = (((0x01 | (queue << 1)) << 0) |
+			    ((0x01 | (queue << 1)) << 8) |
+			    ((0x01 | (queue << 1)) << 16) | ((0x01 | (queue << 1)) << 24));
+	}
+
+	for (offset = 0; offset <= 0xFC; offset += 4)
+		MV_REG_WRITE((ETH_DA_FILTER_OTH_MCAST_BASE(portNo) + offset), regValue);
+}
+
+/************************ Legacy parse function start *******************************/
+/*******************************************************************************
+* mvNetaTosToRxqSet - Map packets with special TOS value to special RX queue
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int     portNo		- Port number.
+*       int     tos         - TOS value in the IP header of the packet
+*       int     rxq         - RX Queue for packets with the configured TOS value
+*                           Negative value (-1) means no special processing for these packets,
+*                           so they will be processed as regular packets.
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvNetaTosToRxqSet(int port, int tos, int rxq)
+{
+	MV_U32          regValue;
+	int             regIdx, regOffs;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("eth_%d: RX queue #%d is out of range\n", port, rxq);
+		return MV_BAD_PARAM;
+	}
+	if (tos > 0xFF) {
+		mvOsPrintf("eth_%d: tos=0x%x is out of range\n", port, tos);
+		return MV_BAD_PARAM;
+	}
+	regIdx  = mvOsDivide(tos >> 2, 10);
+	regOffs = mvOsReminder(tos >> 2, 10);
+
+	regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(port, regIdx));
+	regValue &= ~(0x7 << (regOffs*3));
+	regValue |= (rxq << (regOffs*3));
+
+	MV_REG_WRITE(ETH_DIFF_SERV_PRIO_REG(port, regIdx), regValue);
+	return MV_OK;
+}
+
+int     mvNetaTosToRxqGet(int port, int tos)
+{
+	MV_U32          regValue;
+	int             regIdx, regOffs, rxq;
+
+	if (tos > 0xFF) {
+		mvOsPrintf("eth_%d: tos=0x%x is out of range\n", port, tos);
+		return -1;
+	}
+	regIdx  = mvOsDivide(tos >> 2, 10);
+	regOffs = mvOsReminder(tos >> 2, 10);
+
+	regValue = MV_REG_READ(ETH_DIFF_SERV_PRIO_REG(port, regIdx));
+	rxq = (regValue >> (regOffs * 3));
+	rxq &= 0x7;
+
+	return rxq;
+}
+
+/*******************************************************************************
+* mvNetaVprioToRxqSet - Map packets with special VLAN priority to special RX queue
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       int     portNo  - Port number.
+*       int     vprio   - Vlan Priority value in packet header
+*       int     rxq     - RX Queue for packets with the configured TOS value
+*                         Negative value (-1) means no special processing for these packets,
+*                         so they will be processed as regular packets.
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvNetaVprioToRxqSet(int port, int vprio, int rxq)
+{
+	MV_U32          regValue;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("eth_%d: RX queue #%d is out of range\n", port, rxq);
+		return MV_BAD_PARAM;
+	}
+	if (vprio > 0x7) {
+		mvOsPrintf("eth_%d: vprio=0x%x is out of range\n", port, vprio);
+		return MV_BAD_PARAM;
+	}
+
+	regValue = MV_REG_READ(ETH_VLAN_TAG_TO_PRIO_REG(port));
+	regValue &= ~(0x7 << (vprio * 3));
+	regValue |= (rxq << (vprio * 3));
+
+	MV_REG_WRITE(ETH_VLAN_TAG_TO_PRIO_REG(port), regValue);
+	return MV_OK;
+}
+
+int     mvNetaVprioToRxqGet(int port, int vprio)
+{
+	MV_U32          regValue;
+	int             rxq;
+
+	if (vprio > 0x7) {
+		mvOsPrintf("eth_%d: vprio=0x%x is out of range\n", port, vprio);
+		return -1;
+	}
+
+	regValue = MV_REG_READ(ETH_VLAN_TAG_TO_PRIO_REG(port));
+	rxq = (regValue >> (vprio * 3));
+	rxq &= 0x7;
+
+	return rxq;
+}
+/************************ Legacy parse function end *******************************/
+
+/******************************************************************************/
+/*                         PHY Control Functions                              */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaPhyAddrSet - Set the ethernet port PHY address.
+*
+* DESCRIPTION:
+*       This routine set the ethernet port PHY address according to given
+*       parameter.
+*
+* INPUT:
+*       int     portNo		- Port number.
+*       int     phyAddr     - PHY address
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+void mvNetaPhyAddrSet(int port, int phyAddr)
+{
+	unsigned int regData;
+
+	regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+	regData &= ~ETH_PHY_ADDR_MASK;
+	regData |= phyAddr;
+
+	MV_REG_WRITE(ETH_PHY_ADDR_REG(port), regData);
+
+	/* Enable PHY polling */
+	regData = MV_REG_READ(ETH_UNIT_CONTROL_REG(port));
+	regData |= ETH_PHY_POLLING_ENABLE_MASK;
+	MV_REG_WRITE(ETH_UNIT_CONTROL_REG(port), regData);
+
+	return;
+}
+/*******************************************************************************
+* mvNetaPhyAddrPollingDisable - disable PHY polling
+*
+* DESCRIPTION:
+*       This routine diable PHY polling
+*
+* INPUT:
+*       int     portNo		- Port number.
+*
+* RETURN:
+*       None.
+*
+*******************************************************************************/
+void mvNetaPhyAddrPollingDisable(int port)
+{
+	unsigned int regData;
+
+	/* Enable PHY polling */
+	regData = MV_REG_READ(ETH_UNIT_CONTROL_REG(port));
+	regData &= ~ETH_PHY_POLLING_ENABLE_MASK;
+	MV_REG_WRITE(ETH_UNIT_CONTROL_REG(port), regData);
+
+	return;
+}
+
+/*******************************************************************************
+* mvNetaPhyAddrGet - Get the ethernet port PHY address.
+*
+* DESCRIPTION:
+*       This routine returns the given ethernet port PHY address.
+*
+* INPUT:
+*   int     portNo		- Port number.
+*
+*
+* RETURN: int - PHY address.
+*
+*******************************************************************************/
+int mvNetaPhyAddrGet(int port)
+{
+	unsigned int 	regData;
+	int		phy;
+
+	regData = MV_REG_READ(ETH_PHY_ADDR_REG(port));
+
+	phy = (regData >> (5 * port));
+	phy &= 0x1F;
+	return phy;
+}
+
+/******************************************************************************/
+/*                Descriptor handling Functions                               */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaDescRingReset -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*       MV_NETA_PORT_CTRL	*pPortCtrl	NETA Port Control srtucture.
+*       int			queue		Number of Rx queue.
+*
+* OUTPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvNetaDescRingReset(MV_NETA_QUEUE_CTRL *pQueueCtrl)
+{
+	int		descrNum = (pQueueCtrl->lastDesc + 1);
+	char	*pDesc = pQueueCtrl->pFirst;
+
+	if (pDesc == NULL)
+		return;
+
+	/* reset ring of descriptors */
+	memset(pDesc, 0, (descrNum * NETA_DESC_ALIGNED_SIZE));
+	pQueueCtrl->nextToProc = 0;
+}
+
+
+/* Reset all RXQs */
+void mvNetaRxReset(int port)
+{
+	int rxq;
+	MV_NETA_RXQ_CTRL *pRxqCtrl;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+
+	MV_REG_WRITE(NETA_PORT_RX_RESET_REG(port), NETA_PORT_RX_DMA_RESET_MASK);
+	for (rxq = 0; rxq < pPortCtrl->rxqNum ; rxq++) {
+		pRxqCtrl = mvNetaRxqHndlGet(port, rxq);
+		/* Check queue is initialized or not, if not init, skip reset */
+		if (NULL == pRxqCtrl->queueCtrl.pFirst)
+			continue;
+		mvNetaDescRingReset(&pRxqCtrl->queueCtrl);
+		mvOsCacheFlush(pPortCtrl->osHandle, pRxqCtrl->queueCtrl.pFirst,
+		((pRxqCtrl->queueCtrl.lastDesc + 1) * NETA_DESC_ALIGNED_SIZE));
+	}
+	MV_REG_WRITE(NETA_PORT_RX_RESET_REG(port), 0);
+}
+
+/* Reset all TXQs */
+void mvNetaTxpReset(int port, int txp)
+{
+	int txq;
+	MV_NETA_TXQ_CTRL *pTxqCtrl;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+
+	MV_REG_WRITE(NETA_PORT_TX_RESET_REG(port, txp), NETA_PORT_TX_DMA_RESET_MASK);
+	for (txq = 0; txq < pPortCtrl->txqNum; txq++) {
+		pTxqCtrl = mvNetaTxqHndlGet(port, txp, txq);
+		/* Check queue is initialized or not, if not init, skip reset */
+		if (NULL == pTxqCtrl->queueCtrl.pFirst)
+			continue;
+		mvNetaDescRingReset(&pTxqCtrl->queueCtrl);
+		mvOsCacheFlush(pPortCtrl->osHandle, pTxqCtrl->queueCtrl.pFirst,
+		((pTxqCtrl->queueCtrl.lastDesc + 1) * NETA_DESC_ALIGNED_SIZE));
+	}
+	MV_REG_WRITE(NETA_PORT_TX_RESET_REG(port, txp), 0);
+}
+
+/*******************************************************************************
+* mvNetaRxqInit -
+*
+* DESCRIPTION:
+*
+* INPUT:
+*   int     portNo		- Port number.
+*   int		queue		- Number of Rx queue.
+*	int		descrNum	- Number of descriptors
+*
+* OUTPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+MV_NETA_RXQ_CTRL *mvNetaRxqInit(int port, int queue, int descrNum)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+	MV_NETA_RXQ_CTRL *pRxqCtrl = &pPortCtrl->pRxQueue[queue];
+	MV_NETA_QUEUE_CTRL *pQueueCtrl = &pRxqCtrl->queueCtrl;
+	int descSize;
+
+	/* Allocate memory for RX descriptors */
+	descSize = ((descrNum * NETA_DESC_ALIGNED_SIZE) + CPU_D_CACHE_LINE_SIZE);
+	pQueueCtrl->descBuf.bufVirtPtr =
+	    mvNetaDescrMemoryAlloc(pPortCtrl, descSize, &pQueueCtrl->descBuf.bufPhysAddr, &pQueueCtrl->descBuf.memHandle);
+
+	pQueueCtrl->descBuf.bufSize = descSize;
+
+	if (pQueueCtrl->descBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("EthPort #%d, rxQ=%d: Can't allocate %d bytes for %d RX descr\n",
+			   pPortCtrl->portNo, queue, descSize, descrNum);
+		return NULL;
+	}
+
+	/* Make sure descriptor address is cache line size aligned  */
+	pQueueCtrl->pFirst = (char *)MV_ALIGN_UP((MV_ULONG) pQueueCtrl->descBuf.bufVirtPtr, CPU_D_CACHE_LINE_SIZE);
+
+	pQueueCtrl->lastDesc = (descrNum - 1);
+
+	mvNetaDescRingReset(pQueueCtrl);
+	mvOsCacheFlush(pPortCtrl->osHandle, pQueueCtrl->pFirst, (pQueueCtrl->lastDesc + 1) * NETA_DESC_ALIGNED_SIZE);
+
+	mvNetaRxqAddrSet(port, queue, descrNum);
+
+	return pRxqCtrl;
+}
+
+/* Set Rx descriptors queue starting address */
+void mvNetaRxqAddrSet(int port, int queue, int descrNum)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+	MV_NETA_RXQ_CTRL *pRxqCtrl = &pPortCtrl->pRxQueue[queue];
+	MV_NETA_QUEUE_CTRL *pQueueCtrl = &pRxqCtrl->queueCtrl;
+
+	/* Check queue is initialized or not, if not init, return */
+	if (NULL == pQueueCtrl->pFirst)
+		return;
+
+	/* Set Rx descriptors queue starting address */
+	MV_REG_WRITE(NETA_RXQ_BASE_ADDR_REG(pPortCtrl->portNo, queue),
+		     netaDescVirtToPhys(pQueueCtrl, (MV_U8 *)pQueueCtrl->pFirst));
+	MV_REG_WRITE(NETA_RXQ_SIZE_REG(pPortCtrl->portNo, queue), descrNum);
+}
+
+/*******************************************************************************
+* mvNetaTxqInit - Allocate required memory and initialize TXQ descriptor ring.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int     portNo		- Port number.
+*		int		txp			- Number of T-CONT instance.
+*		int		queue		- Number of Tx queue.
+*		int		descrNum	- Number of descriptors
+*
+* OUTPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+MV_NETA_TXQ_CTRL *mvNetaTxqInit(int port, int txp, int queue, int descrNum)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+	MV_NETA_TXQ_CTRL *pTxqCtrl;
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+	int descSize;
+
+	pTxqCtrl = mvNetaTxqHndlGet(port, txp, queue);
+	pQueueCtrl = &pTxqCtrl->queueCtrl;
+
+	/* Allocate memory for TX descriptors */
+	descSize = ((descrNum * NETA_DESC_ALIGNED_SIZE) + CPU_D_CACHE_LINE_SIZE);
+	pQueueCtrl->descBuf.bufVirtPtr =
+	    mvNetaDescrMemoryAlloc(pPortCtrl, descSize, &pQueueCtrl->descBuf.bufPhysAddr, &pQueueCtrl->descBuf.memHandle);
+
+	pQueueCtrl->descBuf.bufSize = descSize;
+
+	if (pQueueCtrl->descBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("EthPort #%d, txQ=%d: Can't allocate %d bytes for %d TX descr\n",
+			   pPortCtrl->portNo, queue, descSize, descrNum);
+		return NULL;
+	}
+
+	/* Make sure descriptor address is cache line size aligned  */
+	pQueueCtrl->pFirst = (char *)MV_ALIGN_UP((MV_ULONG) pQueueCtrl->descBuf.bufVirtPtr, CPU_D_CACHE_LINE_SIZE);
+
+	pQueueCtrl->lastDesc = (descrNum - 1);
+
+	mvNetaDescRingReset(pQueueCtrl);
+	mvOsCacheFlush(pPortCtrl->osHandle, pQueueCtrl->pFirst, (pQueueCtrl->lastDesc + 1) * NETA_DESC_ALIGNED_SIZE);
+
+	mvNetaTxqBandwidthSet(port, txp, queue);
+
+	mvNetaTxqAddrSet(port, txp, queue, descrNum);
+
+	return pTxqCtrl;
+}
+
+
+/* Set Rx descriptors queue starting address */
+void mvNetaTxqAddrSet(int port, int txp, int queue, int descrNum)
+{
+	MV_NETA_TXQ_CTRL *pTxqCtrl;
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+
+	pTxqCtrl = mvNetaTxqHndlGet(port, txp, queue);
+	pQueueCtrl = &pTxqCtrl->queueCtrl;
+
+	/* Check queue is initialized or not, if not init, return */
+	if (NULL == pQueueCtrl->pFirst)
+		return;
+
+	/* Set Tx descriptors queue starting address */
+	MV_REG_WRITE(NETA_TXQ_BASE_ADDR_REG(port, txp, queue), netaDescVirtToPhys(pQueueCtrl, (MV_U8 *)pQueueCtrl->pFirst));
+
+	MV_REG_WRITE(NETA_TXQ_SIZE_REG(port, txp, queue), NETA_TXQ_DESC_NUM_MASK(descrNum));
+}
+
+/* Set maximum bandwidth for TX port */
+void mvNetaTxpRateMaxSet(int port, int txp)
+{
+	MV_U32 regVal = NETA_TXP_REFILL_TOKENS_ALL_MASK | NETA_TXP_REFILL_PERIOD_MASK(1);
+
+	MV_REG_WRITE(NETA_TXP_REFILL_REG(port, txp), regVal);
+	MV_REG_WRITE(NETA_TXP_TOKEN_CNTR_REG(port, txp), NETA_TXP_TOKEN_CNTR_MAX);
+}
+
+/* Set maximum bandwidth for enabled TXQs */
+void mvNetaTxqBandwidthSet(int port, int txp,  int queue)
+{
+	MV_U32 regVal = NETA_TXQ_REFILL_TOKENS_ALL_MASK | NETA_TXQ_REFILL_PERIOD_MASK(1);
+
+	MV_REG_WRITE(NETA_TXQ_REFILL_REG(port, txp, queue), regVal);
+	MV_REG_WRITE(NETA_TXQ_TOKEN_CNTR_REG(port, txp, queue), NETA_TXQ_TOKEN_CNTR_MAX);
+}
+
+
+/*******************************************************************************
+* mvNetaRxqDelete - Delete RXQ and free memory allocated for descriptors ring.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int     port		- Port number.
+*		int		queue		- Number of RX queue.
+*
+* OUTPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+void mvNetaRxqDelete(int port, int queue)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl =  mvNetaPortCtrl[port];
+	MV_NETA_QUEUE_CTRL *pQueueCtrl = &pPortCtrl->pRxQueue[queue].queueCtrl;
+
+	mvNetaDescrMemoryFree(pPortCtrl, &pQueueCtrl->descBuf);
+
+	memset(pQueueCtrl, 0, sizeof(*pQueueCtrl));
+
+	/* Clear Rx descriptors queue starting address and size */
+	MV_REG_WRITE(NETA_RXQ_BASE_ADDR_REG(port, queue), 0);
+	MV_REG_WRITE(NETA_RXQ_SIZE_REG(port, queue), 0);
+}
+
+/*******************************************************************************
+* mvNetaTxqDelete - Delete TXQ and free memory allocated for descriptors ring.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int     port		- Port number.
+*		int		txp			- Number of T-CONT instance.
+*		int		queue		- Number of Tx queue.
+*
+* OUTPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+void mvNetaTxqDelete(int port, int txp, int queue)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl =  mvNetaPortCtrl[port];
+	MV_NETA_QUEUE_CTRL *pQueueCtrl = &pPortCtrl->pTxQueue[queue].queueCtrl;
+
+	mvNetaDescrMemoryFree(pPortCtrl, &pQueueCtrl->descBuf);
+
+	memset(pQueueCtrl, 0, sizeof(*pQueueCtrl));
+
+	/* Set minimum bandwidth for disabled TXQs */
+	MV_REG_WRITE(NETA_TXQ_TOKEN_CNTR_REG(port, txp, queue), 0);
+
+	/* Set Tx descriptors queue starting address and size */
+	MV_REG_WRITE(NETA_TXQ_BASE_ADDR_REG(port, txp, queue), 0);
+	MV_REG_WRITE(NETA_TXQ_SIZE_REG(port, txp, queue), 0);
+}
+
+
+/*******************************************************************************
+* mvNetaDescrMemoryFree - Free memory allocated for RX and TX descriptors.
+*
+* DESCRIPTION:
+*       This function frees memory allocated for RX and TX descriptors.
+*
+* INPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+static void mvNetaDescrMemoryFree(MV_NETA_PORT_CTRL *pPortCtrl, MV_BUF_INFO *pDescBuf)
+{
+	if ((pDescBuf == NULL) || (pDescBuf->bufVirtPtr == NULL))
+		return;
+
+#ifdef ETH_DESCR_UNCACHED
+	mvOsIoUncachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+			   pDescBuf->bufVirtPtr, pDescBuf->memHandle);
+#else
+	mvOsIoCachedFree(pPortCtrl->osHandle, pDescBuf->bufSize, pDescBuf->bufPhysAddr,
+			 pDescBuf->bufVirtPtr, pDescBuf->memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+}
+
+/*******************************************************************************
+* mvNetaDescrMemoryAlloc - Allocate memory for RX and TX descriptors.
+*
+* DESCRIPTION:
+*       This function allocates memory for RX and TX descriptors.
+*
+* INPUT:
+*
+* RETURN: None
+*
+*******************************************************************************/
+static MV_U8 *mvNetaDescrMemoryAlloc(MV_NETA_PORT_CTRL *pPortCtrl, int descSize,
+				   MV_ULONG *pPhysAddr, MV_U32 *memHandle)
+{
+	MV_U8 *pVirt;
+
+#ifdef ETH_DESCR_UNCACHED
+	pVirt = (MV_U8 *)mvOsIoUncachedMalloc(pPortCtrl->osHandle, descSize, pPhysAddr, memHandle);
+#else
+	pVirt = (MV_U8 *)mvOsIoCachedMalloc(pPortCtrl->osHandle, descSize, pPhysAddr, memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+
+	if (pVirt)
+		memset(pVirt, 0, descSize);
+
+	return pVirt;
+}
+
+/***************** Configuration functions ************************/
+
+MV_STATUS mvNetaMhSet(int port, MV_NETA_MH_MODE mh)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_PORT_MARVELL_HEADER_REG(port));
+	/* Clear relevant fields */
+	regVal &= ~(ETH_DSA_EN_MASK | ETH_MH_EN_MASK);
+	switch (mh) {
+	case MV_NETA_MH_NONE:
+		break;
+
+	case MV_NETA_MH:
+		regVal |= ETH_MH_EN_MASK;
+		break;
+
+	case MV_NETA_DSA:
+		regVal |= ETH_DSA_MASK;
+		break;
+
+	case MV_NETA_DSA_EXT:
+		regVal |= ETH_DSA_EXT_MASK;
+
+	default:
+		mvOsPrintf("port=%d: Unexpected MH = %d value\n", port, mh);
+		return MV_BAD_PARAM;
+	}
+	MV_REG_WRITE(ETH_PORT_MARVELL_HEADER_REG(port), regVal);
+	return MV_OK;
+}
+
+MV_STATUS mvNetaTagSet(int port, MV_TAG_TYPE mh)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_PORT_MARVELL_HEADER_REG(port));
+	/* Clear relevant fields */
+	regVal &= ~(ETH_DSA_EN_MASK | ETH_MH_EN_MASK);
+	switch (mh) {
+	case MV_TAG_TYPE_NONE:
+		break;
+
+	case MV_TAG_TYPE_MH:
+		regVal |= ETH_MH_EN_MASK;
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		regVal |= ETH_DSA_MASK;
+		break;
+
+	case MV_TAG_TYPE_EDSA:
+		regVal |= ETH_DSA_EXT_MASK;
+
+	default:
+		mvOsPrintf("port=%d: Unexpected MH = %d value\n", port, mh);
+		return MV_BAD_PARAM;
+	}
+	MV_REG_WRITE(ETH_PORT_MARVELL_HEADER_REG(port), regVal);
+	return MV_OK;
+}
+
+/* Set one of NETA_TX_MAX_MH_REGS registers */
+MV_STATUS mvNetaTxMhRegSet(int port, int txp, int reg, MV_U16 mh)
+{
+	/* Check params */
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (reg >= NETA_TX_MAX_MH_REGS)
+		return MV_BAD_PARAM;
+
+	/* Write register */
+	MV_REG_WRITE(NETA_TX_MH_REG(port, txp, reg), (MV_U32)mh);
+		return MV_OK;
+}
+
+
+MV_STATUS mvNetaRxqBufSizeSet(int port, int rxq, int bufSize)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_SIZE_REG(port, rxq));
+
+	regVal &= ~NETA_RXQ_BUF_SIZE_MASK;
+	regVal |= ((bufSize >> 3) << NETA_RXQ_BUF_SIZE_OFFS);
+
+	MV_REG_WRITE(NETA_RXQ_SIZE_REG(port, rxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaRxqTimeCoalSet(int port, int rxq, MV_U32 uSec)
+{
+	MV_U32 regVal;
+
+	regVal = (mvNetaHalData.tClk / 1000000) * uSec;
+
+	MV_REG_WRITE(NETA_RXQ_INTR_TIME_COAL_REG(port, rxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaRxqPktsCoalSet(int port, int rxq, MV_U32 pkts)
+{
+	MV_REG_WRITE(NETA_RXQ_THRESHOLD_REG(port, rxq),
+		     (NETA_RXQ_OCCUPIED_DESC_MASK(pkts) | NETA_RXQ_NON_OCCUPIED_DESC_MASK(0)));
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaTxDonePktsCoalSet(int port, int txp, int txq, MV_U32 pkts)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_TXQ_SIZE_REG(port, txp, txq));
+
+	regVal &= ~NETA_TXQ_SENT_DESC_TRESH_ALL_MASK;
+	regVal |= NETA_TXQ_SENT_DESC_TRESH_MASK(pkts);
+
+	MV_REG_WRITE(NETA_TXQ_SIZE_REG(port, txp, txq), regVal);
+
+	return MV_OK;
+}
+
+MV_U32 mvNetaRxqTimeCoalGet(int port, int rxq)
+{
+	MV_U32 regVal, uSec;
+
+	regVal = MV_REG_READ(NETA_RXQ_INTR_TIME_COAL_REG(port, rxq));
+
+	uSec = regVal / (mvNetaHalData.tClk / 1000000);
+
+	return uSec;
+}
+
+MV_U32 mvNetaRxqPktsCoalGet(int port, int rxq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_THRESHOLD_REG(port, rxq));
+
+	return ((regVal & NETA_RXQ_OCCUPIED_DESC_ALL_MASK) >> NETA_RXQ_OCCUPIED_DESC_OFFS);
+}
+
+MV_U32 mvNetaTxDonePktsCoalGet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_TXQ_SIZE_REG(port, txp, txq));
+
+	return ((regVal & NETA_TXQ_SENT_DESC_TRESH_ALL_MASK) >> NETA_TXQ_SENT_DESC_TRESH_OFFS);
+}
+
+/*******************************************************************************
+* mvNetaPortUp - Start the Ethernet port RX and TX activity.
+*
+* DESCRIPTION:
+*       This routine start Rx and Tx activity:
+*
+*       Note: Each Rx and Tx queue descriptor's list must be initialized prior
+*       to calling this function (use etherInitTxDescRing for Tx queues and
+*       etherInitRxDescRing for Rx queues).
+*
+* INPUT:
+*		int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*           MV_OK - Success, Others - Failure.
+*
+*******************************************************************************/
+MV_STATUS mvNetaPortUp(int port)
+{
+	int queue, txp;
+	MV_U32 qMap;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+
+	/* Enable all initialized TXs. */
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		mvNetaMibCountersClear(port, txp);
+
+		qMap = 0;
+		for (queue = 0; queue < CONFIG_MV_ETH_TXQ; queue++) {
+			pQueueCtrl = &pPortCtrl->pTxQueue[txp * CONFIG_MV_ETH_TXQ + queue].queueCtrl;
+
+			if (pQueueCtrl->pFirst != NULL)
+				qMap |= (1 << queue);
+		}
+		MV_REG_WRITE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo, txp), qMap);
+	}
+	/* Enable all initialized RXQs. */
+	qMap = 0;
+	for (queue = 0; queue < CONFIG_MV_ETH_RXQ; queue++) {
+		pQueueCtrl = &pPortCtrl->pRxQueue[queue].queueCtrl;
+
+		if (pQueueCtrl->pFirst != NULL)
+			qMap |= (1 << queue);
+	}
+	MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(pPortCtrl->portNo), qMap);
+
+/*
+	mvOsPrintf("Start TX port activity: regData=0x%x (0x%x)\n",
+		pPortCtrl->txqMap, MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)));
+*/
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaPortDown - Stop the Ethernet port activity.
+*
+* DESCRIPTION:
+*
+* INPUT:
+*		int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure.
+*
+* NOTE : used for port link down.
+*******************************************************************************/
+MV_STATUS mvNetaPortDown(int port)
+{
+	int	          txp;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+	MV_U32 		  regData, txFifoEmptyMask = 0, txInProgMask = 0;
+	int 		  mDelay;
+
+	/* Stop Rx port activity. Check port Rx activity. */
+	regData = (MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(port))) & ETH_RXQ_ENABLE_MASK;
+	if (regData != 0) {
+		/* Issue stop command for active channels only */
+		MV_REG_WRITE(ETH_RX_QUEUE_COMMAND_REG(port), (regData << ETH_RXQ_DISABLE_OFFSET));
+	}
+
+	if (!MV_PON_PORT(port)) {
+		/* Wait for all Rx activity to terminate. */
+		mDelay = 0;
+		do {
+			if (mDelay >= RX_DISABLE_TIMEOUT_MSEC) {
+				mvOsPrintf("ethPort_%d: TIMEOUT for RX stopped !!! rxQueueCmd - 0x08%x\n", port, regData);
+				break;
+			}
+			mvOsDelay(1);
+			mDelay++;
+
+			/* Check port RX Command register that all Rx queues are stopped */
+			regData = MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(port));
+		} while (regData & 0xFF);
+	}
+
+	if (!MV_PON_PORT(port)) {
+		/* Stop Tx port activity. Check port Tx activity. */
+		for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+			/* Issue stop command for active channels only */
+			regData = (MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(port, txp))) & ETH_TXQ_ENABLE_MASK;
+			if (regData != 0)
+				MV_REG_WRITE(ETH_TX_QUEUE_COMMAND_REG(port, txp), (regData << ETH_TXQ_DISABLE_OFFSET));
+
+			/* Wait for all Tx activity to terminate. */
+			mDelay = 0;
+			do {
+				if (mDelay >= TX_DISABLE_TIMEOUT_MSEC) {
+					mvOsPrintf("port=%d, txp=%d: TIMEOUT for TX stopped !!! txQueueCmd - 0x%08x\n",
+						   port, txp, regData);
+					break;
+				}
+				mvOsDelay(1);
+				mDelay++;
+
+				/* Check port TX Command register that all Tx queues are stopped */
+				regData = MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(port, txp));
+			} while (regData & 0xFF);
+#ifdef MV_ETH_GMAC_NEW
+			txFifoEmptyMask |= ETH_TX_FIFO_EMPTY_MASK(txp);
+			txInProgMask    |= ETH_TX_IN_PROGRESS_MASK(txp);
+#else
+			if (MV_PON_PORT(port)) {
+				txFifoEmptyMask |= PON_TX_FIFO_EMPTY_MASK(txp);
+				txInProgMask |= PON_TX_IN_PROGRESS_MASK(txp);
+			} else {
+				txFifoEmptyMask = ETH_TX_FIFO_EMPTY_MASK;
+				txInProgMask = ETH_TX_IN_PROGRESS_MASK;
+			}
+#endif /* MV_ETH_GMAC_NEW */
+		}
+
+		/* Double check to Verify that TX FIFO is Empty */
+		mDelay = 0;
+		while (MV_TRUE) {
+			do {
+				if (mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC) {
+					mvOsPrintf("\n port=%d, TX FIFO empty timeout. status=0x08%x, empty=0x%x, inProg=0x%x\n",
+						port, regData, txFifoEmptyMask, txInProgMask);
+					break;
+				}
+				mvOsDelay(1);
+				mDelay++;
+
+				regData = MV_REG_READ(ETH_PORT_STATUS_REG(port));
+			} while (((regData & txFifoEmptyMask) != txFifoEmptyMask) || ((regData & txInProgMask) != 0));
+
+			if (mDelay >= TX_FIFO_EMPTY_TIMEOUT_MSEC)
+				break;
+
+			/* Double check */
+			regData = MV_REG_READ(ETH_PORT_STATUS_REG(port));
+			if (((regData & txFifoEmptyMask) == txFifoEmptyMask) && ((regData & txInProgMask) == 0)) {
+				break;
+			} else
+				mvOsPrintf("port=%d: TX FIFO Empty double check failed. %d msec, status=0x%x, empty=0x%x, inProg=0x%x\n",
+					 port, mDelay, regData, txFifoEmptyMask, txInProgMask);
+		}
+	}
+	/* Wait about 200 usec */
+	mvOsUDelay(200);
+
+	return MV_OK;
+}
+
+
+MV_STATUS mvNetaRxqOffsetSet(int port, int rxq, int offset)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_CONFIG_REG(port, rxq));
+	regVal &= ~NETA_RXQ_PACKET_OFFSET_ALL_MASK;
+
+	/* Offset is in */
+	regVal |= NETA_RXQ_PACKET_OFFSET_MASK(offset >> 3);
+
+	MV_REG_WRITE(NETA_RXQ_CONFIG_REG(port, rxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaBmPoolBufSizeSet(int port, int pool, int bufsize)
+{
+	MV_U32 regVal;
+
+	regVal = MV_ALIGN_UP(bufsize, NETA_POOL_BUF_SIZE_ALIGN);
+	MV_REG_WRITE(NETA_POOL_BUF_SIZE_REG(port, pool), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaRxqBmEnable(int port, int rxq, int shortPool, int longPool)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_CONFIG_REG(port, rxq));
+
+	regVal &= ~(NETA_RXQ_SHORT_POOL_ID_MASK | NETA_RXQ_LONG_POOL_ID_MASK);
+	regVal |= (shortPool << NETA_RXQ_SHORT_POOL_ID_OFFS);
+	regVal |= (longPool << NETA_RXQ_LONG_POOL_ID_OFFS);
+	regVal |= NETA_RXQ_HW_BUF_ALLOC_MASK;
+
+	MV_REG_WRITE(NETA_RXQ_CONFIG_REG(port, rxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaRxqBmDisable(int port, int rxq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_CONFIG_REG(port, rxq));
+
+	regVal &= ~NETA_RXQ_HW_BUF_ALLOC_MASK;
+
+	MV_REG_WRITE(NETA_RXQ_CONFIG_REG(port, rxq), regVal);
+
+	return MV_OK;
+}
+
+/******************************************************************************/
+/*                        WRR / EJP configuration routines                    */
+/******************************************************************************/
+
+/* Set maximum burst rate (using IPG configuration) */
+MV_STATUS mvNetaTxpEjpBurstRateSet(int port, int txp, int txq, int rate)
+{
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	/* Only TXQs 2 and 3 are valid */
+	if ((txq != 2) && (txq != 3)) {
+		mvOsPrintf("%s: txq=%d is INVALID. Only TXQs 2 and 3 are supported\n", __func__, txq);
+		return MV_BAD_PARAM;
+	}
+
+	mvOsPrintf("Not supported\n");
+
+	return MV_OK;
+}
+
+/* Set maximum packet size for each one of EJP priorities (IsoLo, Async) */
+MV_STATUS mvNetaTxpEjpMaxPktSizeSet(int port, int txp, int type, int size)
+{
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	mvOsPrintf("Not supported\n");
+
+	/* TBD */
+	return MV_OK;
+}
+
+/* TBD - Set Transmit speed for EJP calculations */
+MV_STATUS mvNetaTxpEjpTxSpeedSet(int port, int txp, int type, int speed)
+{
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	/* TBD */
+	mvOsPrintf("Not supported\n");
+
+	return MV_OK;
+}
+
+/* Calculate period and tokens accordingly with required rate and accuracy */
+MV_STATUS mvNetaRateCalc(int rate, unsigned int accuracy, unsigned int *pPeriod, unsigned int *pTokens)
+{
+	/* Calculate refill tokens and period - rate [Kbps] = tokens [bits] * 1000 / period [usec] */
+	/* Assume:  Tclock [MHz] / BasicRefillNoOfClocks = 1 */
+	unsigned int period, tokens, calc;
+
+	if (rate == 0) {
+		/* Disable traffic from the port: tokens = 0 */
+		if (pPeriod != NULL)
+			*pPeriod = 1000;
+
+		if (pTokens != NULL)
+			*pTokens = 0;
+
+		return MV_OK;
+	}
+
+	/* Find values of "period" and "tokens" match "rate" and "accuracy" when period is minimal */
+	for (period = 1; period <= 1000; period++) {
+		tokens = 1;
+		while (MV_TRUE)	{
+			calc = (tokens * 1000) / period;
+			if (((MV_ABS(calc - rate) * 100) / rate) <= accuracy) {
+				if (pPeriod != NULL)
+					*pPeriod = period;
+
+				if (pTokens != NULL)
+					*pTokens = tokens;
+
+				return MV_OK;
+			}
+			if (calc > rate)
+				break;
+
+			tokens++;
+		}
+	}
+	return MV_FAIL;
+}
+
+/* Enable / Disable EJP mode */
+MV_STATUS mvNetaTxpEjpSet(int port, int txp, int enable)
+{
+	MV_U32  regVal;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (enable)
+		regVal = NETA_TX_EJP_ENABLE_MASK;
+	else
+		regVal = 0;
+
+	MV_REG_WRITE(NETA_TX_CMD_1_REG(port, txp), regVal);
+
+	return MV_OK;
+}
+
+
+
+/* Set TXQ to work in FIX priority mode */
+MV_STATUS mvNetaTxqFixPrioSet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(NETA_TX_FIXED_PRIO_CFG_REG(port, txp));
+	regVal |= (1 << txq);
+	MV_REG_WRITE(NETA_TX_FIXED_PRIO_CFG_REG(port, txp), regVal);
+
+	return MV_OK;
+}
+
+/* Set TXQ to work in WRR mode and set relative weight. */
+/*   Weight range [1..N] */
+MV_STATUS mvNetaTxqWrrPrioSet(int port, int txp, int txq, int weight)
+{
+	MV_U32 regVal, mtu;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return MV_BAD_PARAM;
+
+	/* Weight * 256 bytes * 8 bits must be larger then MTU [bits] */
+	mtu = MV_REG_READ(NETA_TXP_MTU_REG(port, txp));
+	/* MTU [bits] -> MTU [256 bytes] */
+	mtu = ((mtu / 8) / 256) + 1;
+/*
+	mvOsPrintf("%s: port=%d, txp=%d, txq=%d, weight=%d, mtu=%d\n",
+			__func__, port, txp, txq, weight, mtu);
+*/
+	if ((weight < mtu) || (weight > NETA_TXQ_WRR_WEIGHT_MAX)) {
+		mvOsPrintf("%s Error: weight=%d is out of range %d...%d\n",
+				__func__, weight, mtu, NETA_TXQ_WRR_WEIGHT_MAX);
+		return MV_FAIL;
+	}
+
+	regVal = MV_REG_READ(NETA_TXQ_WRR_ARBITER_REG(port, txp, txq));
+
+	regVal &= ~NETA_TXQ_WRR_WEIGHT_ALL_MASK;
+	regVal |= NETA_TXQ_WRR_WEIGHT_MASK(weight);
+	MV_REG_WRITE(NETA_TXQ_WRR_ARBITER_REG(port, txp, txq), regVal);
+
+	regVal = MV_REG_READ(NETA_TX_FIXED_PRIO_CFG_REG(port, txp));
+	regVal &= ~(1 << txq);
+	MV_REG_WRITE(NETA_TX_FIXED_PRIO_CFG_REG(port, txp), regVal);
+
+	return MV_OK;
+}
+
+/* Set minimum number of tockens to start transmit for TX port
+ *   maxTxSize [bytes]    - maximum packet size can be sent via this TX port
+ */
+MV_STATUS   mvNetaTxpMaxTxSizeSet(int port, int txp, int maxTxSize)
+{
+	MV_U32	regVal, size, mtu;
+	int		txq;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	mtu = maxTxSize * 8;
+	if (mtu > NETA_TXP_MTU_MAX)
+		mtu = NETA_TXP_MTU_MAX;
+
+	/* set MTU */
+	regVal = MV_REG_READ(NETA_TXP_MTU_REG(port, txp));
+	regVal &= ~NETA_TXP_MTU_ALL_MASK;
+	regVal |= NETA_TXP_MTU_MASK(mtu);
+
+	MV_REG_WRITE(NETA_TXP_MTU_REG(port, txp), regVal);
+
+	/* TXP token size and all TXQs token size must be larger that MTU */
+	regVal = MV_REG_READ(NETA_TXP_TOKEN_SIZE_REG(port, txp));
+	size = regVal & NETA_TXP_TOKEN_SIZE_MAX;
+	if (size < mtu) {
+		size = mtu;
+		regVal &= ~NETA_TXP_TOKEN_SIZE_MAX;
+		regVal |= size;
+		MV_REG_WRITE(NETA_TXP_TOKEN_SIZE_REG(port, txp), regVal);
+	}
+	for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+		regVal = MV_REG_READ(NETA_TXQ_TOKEN_SIZE_REG(port, txp, txq));
+		size = regVal & NETA_TXQ_TOKEN_SIZE_MAX;
+		if (size < mtu) {
+			size = mtu;
+			regVal &= ~NETA_TXQ_TOKEN_SIZE_MAX;
+			regVal |= size;
+			MV_REG_WRITE(NETA_TXQ_TOKEN_SIZE_REG(port, txp, txq), regVal);
+		}
+	}
+	return MV_OK;
+}
+
+/* Set bandwidth limitation for TX port
+ *   rate [Kbps]    - steady state TX bandwidth limitation
+ */
+MV_STATUS   mvNetaTxpRateSet(int port, int txp, int rate)
+{
+	MV_U32		regVal;
+	unsigned int	tokens, period, accuracy = 0;
+	MV_STATUS	status;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(NETA_TX_REFILL_PERIOD_REG(port, txp));
+
+	status = mvNetaRateCalc(rate, accuracy, &period, &tokens);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: Can't provide rate of %d [Kbps] with accuracy of %d [%%]\n",
+				__func__, rate, accuracy);
+		return status;
+	}
+	if (tokens > NETA_TXP_REFILL_TOKENS_MAX)
+		tokens = NETA_TXP_REFILL_TOKENS_MAX;
+
+	if (period > NETA_TXP_REFILL_PERIOD_MAX)
+		period = NETA_TXP_REFILL_PERIOD_MAX;
+
+	regVal = MV_REG_READ(NETA_TXP_REFILL_REG(port, txp));
+
+	regVal &= ~NETA_TXP_REFILL_TOKENS_ALL_MASK;
+	regVal |= NETA_TXP_REFILL_TOKENS_MASK(tokens);
+
+	regVal &= ~NETA_TXP_REFILL_PERIOD_ALL_MASK;
+	regVal |= NETA_TXP_REFILL_PERIOD_MASK(period);
+
+	MV_REG_WRITE(NETA_TXP_REFILL_REG(port, txp), regVal);
+
+	return MV_OK;
+}
+
+/* Set maximum burst size for TX port
+ *   burst [bytes] - number of bytes to be sent with maximum possible TX rate,
+ *                    before TX rate limitation will take place.
+ */
+MV_STATUS mvNetaTxpBurstSet(int port, int txp, int burst)
+{
+	MV_U32  size, mtu;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	/* Calulate Token Bucket Size */
+	size = 8 * burst;
+
+	if (size > NETA_TXP_TOKEN_SIZE_MAX)
+		size = NETA_TXP_TOKEN_SIZE_MAX;
+
+	/* Token bucket size must be larger then MTU */
+	mtu = MV_REG_READ(NETA_TXP_MTU_REG(port, txp));
+	if (mtu > size) {
+		mvOsPrintf("%s Error: Bucket size (%d bytes) < MTU (%d bytes)\n",
+					__func__, (size / 8), (mtu / 8));
+		return MV_BAD_PARAM;
+	}
+	MV_REG_WRITE(NETA_TXP_TOKEN_SIZE_REG(port, txp), size);
+
+	return MV_OK;
+}
+
+/* Set bandwidth limitation for TXQ
+ *   rate  [Kbps]  - steady state TX rate limitation
+ */
+MV_STATUS   mvNetaTxqRateSet(int port, int txp, int txq, int rate)
+{
+	MV_U32		regVal;
+	unsigned int	period, tokens, accuracy = 0;
+	MV_STATUS	status;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return MV_BAD_PARAM;
+
+	status = mvNetaRateCalc(rate, accuracy, &period, &tokens);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: Can't provide rate of %d [Kbps] with accuracy of %d [%%]\n",
+				__func__, rate, accuracy);
+		return status;
+	}
+
+	if (tokens > NETA_TXQ_REFILL_TOKENS_MAX)
+		tokens = NETA_TXQ_REFILL_TOKENS_MAX;
+
+	if (period > NETA_TXQ_REFILL_PERIOD_MAX)
+		period = NETA_TXQ_REFILL_PERIOD_MAX;
+
+	regVal = MV_REG_READ(NETA_TXQ_REFILL_REG(port, txp, txq));
+
+	regVal &= ~NETA_TXQ_REFILL_TOKENS_ALL_MASK;
+	regVal |= NETA_TXQ_REFILL_TOKENS_MASK(tokens);
+
+	regVal &= ~NETA_TXQ_REFILL_PERIOD_ALL_MASK;
+	regVal |= NETA_TXQ_REFILL_PERIOD_MASK(period);
+
+	MV_REG_WRITE(NETA_TXQ_REFILL_REG(port, txp, txq), regVal);
+
+	return MV_OK;
+}
+
+/* Set maximum burst size for TX port
+ *   burst [bytes] - number of bytes to be sent with maximum possible TX rate,
+ *                    before TX bandwidth limitation will take place.
+ */
+MV_STATUS mvNetaTxqBurstSet(int port, int txp, int txq, int burst)
+{
+	MV_U32  size, mtu;
+
+	if (mvNetaTxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return MV_BAD_PARAM;
+
+	/* Calulate Tocket Bucket Size */
+	size = 8 * burst;
+
+	if (size > NETA_TXQ_TOKEN_SIZE_MAX)
+		size = NETA_TXQ_TOKEN_SIZE_MAX;
+
+	/* Tocken bucket size must be larger then MTU */
+	mtu = MV_REG_READ(NETA_TXP_MTU_REG(port, txp));
+	if (mtu > size) {
+		mvOsPrintf("%s Error: Bucket size (%d bytes) < MTU (%d bytes)\n",
+					__func__, (size / 8), (mtu / 8));
+		return MV_BAD_PARAM;
+	}
+
+	MV_REG_WRITE(NETA_TXQ_TOKEN_SIZE_REG(port, txp, txq), size);
+
+	return MV_OK;
+}
+
+/************************ Legacy parse function start *******************************/
+/******************************************************************************/
+/*                        RX Dispatching configuration routines               */
+/******************************************************************************/
+
+MV_STATUS mvNetaTcpRxq(int port, int rxq)
+{
+	MV_U32 portCfgReg;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("ethDrv: RX queue #%d is out of range\n", rxq);
+		return MV_BAD_PARAM;
+	}
+	portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+	portCfgReg &= ~ETH_DEF_RX_TCP_QUEUE_ALL_MASK;
+	portCfgReg |= ETH_DEF_RX_TCP_QUEUE_MASK(rxq);
+	portCfgReg |= ETH_CAPTURE_TCP_FRAMES_ENABLE_MASK;
+
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), portCfgReg);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaUdpRxq(int port, int rxq)
+{
+	MV_U32 portCfgReg;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("ethDrv: RX queue #%d is out of range\n", rxq);
+		return MV_BAD_PARAM;
+	}
+
+	portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+	portCfgReg &= ~ETH_DEF_RX_UDP_QUEUE_ALL_MASK;
+	portCfgReg |= ETH_DEF_RX_UDP_QUEUE_MASK(rxq);
+	portCfgReg |= ETH_CAPTURE_UDP_FRAMES_ENABLE_MASK;
+
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), portCfgReg);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaArpRxq(int port, int rxq)
+{
+	MV_U32 portCfgReg;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("ethDrv: RX queue #%d is out of range\n", rxq);
+		return MV_BAD_PARAM;
+	}
+
+	portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+	portCfgReg &= ~ETH_DEF_RX_ARP_QUEUE_ALL_MASK;
+	portCfgReg |= ETH_DEF_RX_ARP_QUEUE_MASK(rxq);
+	portCfgReg &= (~ETH_REJECT_ARP_BCAST_MASK);
+
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), portCfgReg);
+
+	return MV_OK;
+}
+
+MV_STATUS mvNetaBpduRxq(int port, int rxq)
+{
+	MV_U32 portCfgReg;
+	MV_U32 portCfgExtReg;
+
+	if ((rxq < 0) || (rxq >= MV_ETH_MAX_RXQ)) {
+		mvOsPrintf("ethDrv: RX queue #%d is out of range\n", rxq);
+		return MV_BAD_PARAM;
+	}
+
+	portCfgExtReg = MV_REG_READ(ETH_PORT_CONFIG_EXTEND_REG(port));
+	portCfgReg = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+	portCfgReg &= ~ETH_DEF_RX_BPDU_QUEUE_ALL_MASK;
+	portCfgReg |= ETH_DEF_RX_BPDU_QUEUE_MASK(rxq);
+
+	MV_REG_WRITE(ETH_PORT_CONFIG_REG(port), portCfgReg);
+
+	portCfgExtReg |= ETH_CAPTURE_SPAN_BPDU_ENABLE_MASK;
+
+	MV_REG_WRITE(ETH_PORT_CONFIG_EXTEND_REG(port), portCfgExtReg);
+
+	return MV_OK;
+}
+/************************ Legacy parse function end *******************************/
+
+/******************************************************************************/
+/*                      MIB Counters functions                                */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaMibCounterRead - Read a MIB counter
+*
+* DESCRIPTION:
+*       This function reads a MIB counter of a specific ethernet port.
+*       NOTE - Read from ETH_MIB_GOOD_OCTETS_RECEIVED_LOW or
+*              ETH_MIB_GOOD_OCTETS_SENT_LOW counters will return 64 bits value,
+*              so pHigh32 pointer should not be NULL in this case.
+*
+* INPUT:
+*       port        - Ethernet Port number.
+*       mib         - MIB number
+*       mibOffset   - MIB counter offset.
+*
+* OUTPUT:
+*       MV_U32*       pHigh32 - pointer to place where 32 most significant bits
+*                             of the counter will be stored.
+*
+* RETURN:
+*       32 low sgnificant bits of MIB counter value.
+*
+*******************************************************************************/
+MV_U32 mvNetaMibCounterRead(int port, int mib, unsigned int mibOffset, MV_U32 *pHigh32)
+{
+	MV_U32 valLow32, valHigh32;
+
+	valLow32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(port, mib) + mibOffset);
+
+	/* Implement FEr ETH. Erroneous Value when Reading the Upper 32-bits    */
+	/* of a 64-bit MIB Counter.                                             */
+	if ((mibOffset == ETH_MIB_GOOD_OCTETS_RECEIVED_LOW) || (mibOffset == ETH_MIB_GOOD_OCTETS_SENT_LOW)) {
+		valHigh32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(port, mib) + mibOffset + 4);
+		if (pHigh32 != NULL)
+			*pHigh32 = valHigh32;
+	}
+	return valLow32;
+}
+
+/*******************************************************************************
+* mvNetaMibCountersClear - Clear all MIB counters
+*
+* DESCRIPTION:
+*       This function clears all MIB counters
+*
+* INPUT:
+*       port      - Ethernet Port number.
+*       mib       - MIB number
+*
+*
+* RETURN:   void
+*
+*******************************************************************************/
+void mvNetaMibCountersClear(int port, int mib)
+{
+	int i;
+
+#if defined(CONFIG_MV_PON) && !defined(MV_PON_MIB_SUPPORT)
+	if (MV_PON_PORT(port))
+		return;
+#endif /* CONFIG_MV_PON && !MV_PON_MIB_SUPPORT */
+
+	/* Perform dummy reads from MIB counters */
+	for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4)
+		 MV_REG_READ((ETH_MIB_COUNTERS_BASE(port, mib) + i));
+}
+
+#if defined(CONFIG_MV_PON) && defined(MV_PON_MIB_SUPPORT)
+
+/* Set default MIB counters set for RX packets. mib==-1 means don't count */
+MV_STATUS   mvNetaPonRxMibDefault(int mib)
+{
+	MV_U32  regVal = 0;
+
+	regVal = MV_REG_READ(NETA_PON_MIB_RX_DEF_REG);
+	if (mib == -1) {
+		/* Don't count default packets that not match Gem portID */
+		regVal &= ~NETA_PON_MIB_RX_VALID_MASK;
+	} else {
+		if (mvNetaMaxCheck(mib, MV_ETH_MAX_TCONT, "tcont"))
+			return MV_BAD_PARAM;
+
+		regVal &= ~NETA_PON_MIB_RX_MIB_NO_MASK;
+		regVal |= NETA_PON_MIB_RX_VALID_MASK | NETA_PON_MIB_RX_MIB_NO(mib);
+	}
+	MV_REG_WRITE(NETA_PON_MIB_RX_DEF_REG, regVal);
+
+	return MV_OK;
+}
+
+/* Set MIB counters set used for RX packets with special gemPid. mib==-1 means delete entry */
+MV_STATUS   mvNetaPonRxMibGemPid(int mib, MV_U16 gemPid)
+{
+	MV_U32	regVal;
+	int	i, free = -1;
+
+	if ((mib != -1) && mvNetaMaxCheck(mib, MV_ETH_MAX_TCONT, "tcont"))
+		return MV_BAD_PARAM;
+
+	/* look for gemPid if exist of first free entry */
+	for (i = 0; i < NETA_PON_MIB_MAX_GEM_PID; i++) {
+		regVal = MV_REG_READ(NETA_PON_MIB_RX_CTRL_REG(i));
+		if ((regVal & NETA_PON_MIB_RX_VALID_MASK) &&
+		    ((regVal & NETA_PON_MIB_RX_GEM_PID_ALL_MASK) ==
+			NETA_PON_MIB_RX_GEM_PID_MASK(gemPid))) {
+			/* Entry for this gemPid exist */
+			if (mib == -1) {
+				/* Delete entry */
+				regVal &= ~NETA_PON_MIB_RX_VALID_MASK;
+			} else {
+				/* update mibNo */
+				regVal &= ~NETA_PON_MIB_RX_MIB_NO_MASK;
+				regVal |= NETA_PON_MIB_RX_MIB_NO(mib);
+			}
+			MV_REG_WRITE(NETA_PON_MIB_RX_CTRL_REG(i), regVal);
+
+			return MV_OK;
+		}
+		if ((free == -1) && ((regVal & NETA_PON_MIB_RX_VALID_MASK) == 0)) {
+			/* remember first invalid entry */
+			free = i;
+		}
+	}
+	if (mib == -1)	{
+		mvOsPrintf("%s: Can't delete entry for gemPid=0x%x - NOT found\n",
+					__func__, gemPid);
+		return MV_NOT_FOUND;
+	}
+	if (free == -1)	{
+		mvOsPrintf("%s: No free entry for gemPid=0x%x, rxMib=%d\n",
+					__func__, gemPid, mib);
+		return MV_FULL;
+	}
+	regVal = NETA_PON_MIB_RX_VALID_MASK | NETA_PON_MIB_RX_MIB_NO(mib) | NETA_PON_MIB_RX_GEM_PID_MASK(gemPid);
+	MV_REG_WRITE(NETA_PON_MIB_RX_CTRL_REG(free), regVal);
+
+    return MV_OK;
+}
+#endif /* CONFIG_MV_PON && MV_PON_MIB_SUPPORT */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.h b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.h
new file mode 100644
index 000000000000..14df9356d571
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNeta.h
@@ -0,0 +1,873 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvNeta_h__
+#define __mvNeta_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvNetaRegs.h"
+#include "mvEthRegs.h"
+
+#ifdef CONFIG_MV_ETH_PNC
+# include "pnc/mvPnc.h"
+#endif /* CONFIG_MV_ETH_PNC */
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+# define mvNetaDebugPrintf      mvOsPrintf
+#else
+# define mvNetaDebugPrintf(msg, ...)
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+#ifndef MV_ETH_MAX_TCONT
+# define MV_ETH_MAX_TCONT 1
+#endif
+
+#ifdef CONFIG_MV_ETH_NFP
+
+#ifdef CONFIG_MV_ETH_NFP_EXT
+# define NFP_EXT_NUM 	CONFIG_MV_ETH_NFP_EXT_NUM
+#else
+# define NFP_EXT_NUM 	0
+#endif
+
+#define NFP_MAX_PORTS   (MV_ETH_MAX_PORTS + NFP_EXT_NUM)
+
+typedef struct {
+	void   *dev;
+	MV_U32 tx_cmd;
+	MV_U32 diffL4[2];
+	MV_U8  *pWrite;
+	MV_U16 flags;
+	MV_U16 mtu;
+	short  shift;
+	MV_U8  txp;
+	MV_U8  txq;
+	MV_IP_HEADER_INFO ipInfo;
+	void   *privateData;
+} MV_NFP_RESULT;
+
+#define MV_NFP_RES_TXP_VALID       0x0001
+#define MV_NFP_RES_TXQ_VALID       0x0002
+#define MV_NFP_RES_IP_INFO_VALID   0x0004
+#define MV_NFP_RES_NETDEV_EXT      0x0010
+#define MV_NFP_RES_L4_CSUM_NEEDED  0x0020
+
+#endif /* CONFIG_MV_ETH_NFP */
+
+#define NETA_RX_IP_IS_FRAG(status)     ((status) & NETA_RX_IP4_FRAG_MASK)
+#define NETA_RX_IP_SET_FRAG(rxd)       ((rxd)->status |= NETA_RX_IP4_FRAG_MASK)
+
+#define NETA_RX_L4_CSUM_IS_OK(status)  ((status) & NETA_RX_L4_CSUM_OK_MASK)
+#define	NETA_RX_L4_CSUM_SET_OK(rxd)    ((rxd)->status |= NETA_RX_L4_CSUM_OK_MASK)
+
+#define NETA_RX_GET_IPHDR_OFFSET(rxd)       (((rxd)->status & NETA_RX_L3_OFFSET_MASK) >> NETA_RX_L3_OFFSET_OFFS)
+#define NETA_RX_SET_IPHDR_OFFSET(rxd, offs) ((rxd)->status |= ((offs) << NETA_RX_L3_OFFSET_OFFS) & NETA_RX_L3_OFFSET_MASK)
+
+#define NETA_RX_GET_IPHDR_HDRLEN(rxd)       (((rxd)->status & NETA_RX_IP_HLEN_MASK) >> NETA_RX_IP_HLEN_OFFS)
+#define NETA_RX_SET_IPHDR_HDRLEN(rxd, hlen) ((rxd)->status |= ((hlen) << NETA_RX_IP_HLEN_OFFS) & NETA_RX_IP_HLEN_MASK)
+
+#ifdef CONFIG_MV_ETH_PNC
+
+#define NETA_RX_IS_PPPOE(rxd)          ((rxd)->pncInfo & NETA_PNC_PPPOE)
+#define NETA_RX_SET_PPPOE(rxd)         ((rxd)->pncInfo |= NETA_PNC_PPPOE)
+
+#define NETA_RX_IS_VLAN(rxd)           ((rxd)->pncInfo & NETA_PNC_VLAN)
+#define NETA_RX_SET_VLAN(rxd)          ((rxd)->pncInfo |= NETA_PNC_VLAN)
+
+#define NETA_RX_L3_IS_IP4(status)      (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP4)
+#define NETA_RX_L3_SET_IP4(rxd)        ((rxd)->status |= NETA_RX_L3_IP4)
+
+#define NETA_RX_L3_IS_IP4_ERR(status)  (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP4_ERR)
+#define NETA_RX_L3_SET_IP4_ERR(rxd)    ((rxd)->status |= NETA_RX_L3_IP4_ERR)
+
+#define NETA_RX_L3_IS_IP6(status)      (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP6)
+#define NETA_RX_L3_SET_IP6(rxd)        ((rxd)->status |= NETA_RX_L3_IP6)
+
+#define NETA_RX_L3_IS_UN(status)       (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_UN)
+#define NETA_RX_L3_SET_UN(rxd)         ((rxd)->status |= NETA_RX_L3_UN)
+
+#define NETA_RX_L4_IS_TCP(status)      (((status) & NETA_RX_L4_MASK) == NETA_RX_L4_TCP)
+#define NETA_RX_L4_SET_TCP(rxd)        ((rxd)->status |= NETA_RX_L4_TCP)
+
+#define NETA_RX_L4_IS_UDP(status)      (((status) & NETA_RX_L4_MASK) == NETA_RX_L4_UDP)
+#define NETA_RX_L4_SET_UDP(rxd)        ((rxd)->status |= NETA_RX_L4_UDP)
+
+#define NETA_RX_L4_IS_OTHER(status)    (((status) & NETA_RX_L4_MASK) == NETA_RX_L4_OTHER)
+#define NETA_RX_L4_SET_OTHER(rxd)      ((rxd)->status |= NETA_RX_L4_OTHER)
+
+#else /* LEGACY parser */
+
+#define NETA_RX_IS_PPPOE(rxd)          (MV_FALSE)
+#define NETA_RX_SET_PPPOE(rxd)
+
+#define NETA_RX_IS_VLAN(rxd)           ((rxd)->status & ETH_RX_VLAN_TAGGED_FRAME_MASK)
+#define NETA_RX_SET_VLAN(rxd)          ((rxd)->status |= ETH_RX_VLAN_TAGGED_FRAME_MASK)
+
+#ifdef MV_ETH_LEGACY_PARSER_IPV6
+
+#define NETA_RX_L3_IS_IP4(status)      (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP4)
+#define NETA_RX_L3_SET_IP4(rxd)        ((rxd)->status |= NETA_RX_L3_IP4)
+
+#define NETA_RX_L3_IS_IP4_ERR(status)  (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP4_ERR)
+#define NETA_RX_L3_SET_IP4_ERR(rxd)    ((rxd)->status |= NETA_RX_L3_IP4_ERR)
+
+#define NETA_RX_L3_IS_IP6(status)      (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_IP6)
+#define NETA_RX_L3_SET_IP6(rxd)        ((rxd)->status |= NETA_RX_L3_IP6)
+
+#define NETA_RX_L3_IS_UN(status)       (((status) & NETA_RX_L3_MASK) == NETA_RX_L3_UN)
+#define NETA_RX_L3_SET_UN(rxd)         ((rxd)->status |= NETA_RX_L3_UN)
+
+#else
+
+#define NETA_RX_L3_IS_IP4(status)      ((status) & ETH_RX_IP_HEADER_OK_MASK)
+#define NETA_RX_L3_SET_IP4(rxd)        ((rxd)->status |= (ETH_RX_IP_HEADER_OK_MASK | ETH_RX_IP_FRAME_TYPE_MASK))
+
+#define NETA_RX_L3_IS_IP4_ERR(status)  (((status) & ETH_RX_IP_FRAME_TYPE_MASK) &&	\
+					!((status) & ETH_RX_IP_HEADER_OK_MASK))
+
+#define NETA_RX_L3_SET_IP4_ERR(rxd) do {                \
+	((rxd)->status |= ETH_RX_IP_FRAME_TYPE_MASK);	\
+	((rxd)->status &= ~ETH_RX_IP_HEADER_OK_MASK);   \
+} while (0)
+
+#define NETA_RX_L3_IS_IP6(status)      (MV_FALSE)
+#define NETA_RX_L3_SET_IP6(rxd)        NETA_RX_L3_SET_UN(rxd)
+
+#define NETA_RX_L3_IS_UN(status)       (((status) & ETH_RX_IP_FRAME_TYPE_MASK) == 0)
+#define NETA_RX_L3_SET_UN(rxd)         ((rxd)->status &= ~ETH_RX_IP_FRAME_TYPE_MASK)
+
+#endif /* MV_ETH_LEGACY_PARSER_IPV6 */
+
+#define NETA_RX_L4_IS_TCP(status)      (((status) & ETH_RX_L4_TYPE_MASK) == ETH_RX_L4_TCP_TYPE)
+#define NETA_RX_L4_SET_TCP(rxd)        ((rxd)->status |= ETH_RX_L4_TCP_TYPE)
+
+#define NETA_RX_L4_IS_UDP(status)      (((status) & ETH_RX_L4_TYPE_MASK) == ETH_RX_L4_UDP_TYPE)
+#define NETA_RX_L4_SET_UDP(rxd)        ((rxd)->status |= ETH_RX_L4_UDP_TYPE)
+
+#define NETA_RX_L4_IS_OTHER(status)    (((status) & ETH_RX_L4_TYPE_MASK) == ETH_RX_L4_OTHER_TYPE)
+#define NETA_RX_L4_SET_OTHER(rxd)      ((rxd)->status |= ETH_RX_L4_OTHER_TYPE)
+
+#endif	/* CONFIG_MV_ETH_PNC */
+
+/* Default port configuration value */
+#define PORT_CONFIG_VALUE(rxQ)			\
+	(					\
+	ETH_DEF_RX_QUEUE_MASK(rxQ) |		\
+	ETH_DEF_RX_ARP_QUEUE_MASK(rxQ) |	\
+	ETH_DEF_RX_TCP_QUEUE_MASK(rxQ) |	\
+	ETH_DEF_RX_UDP_QUEUE_MASK(rxQ) |	\
+	ETH_DEF_RX_BPDU_QUEUE_MASK(rxQ) |	\
+	ETH_TX_NO_SET_ERROR_SUMMARY_MASK |	\
+	ETH_RX_CHECKSUM_WITH_PSEUDO_HDR		\
+	)
+
+/* Default port extend configuration value */
+#define PORT_CONFIG_EXTEND_VALUE            0
+
+#ifdef MV_ETH_GMAC_NEW
+#define PORT_SERIAL_CONTROL_VALUE				0
+#else
+#define PORT_SERIAL_CONTROL_VALUE		\
+	(					\
+	ETH_DISABLE_FC_AUTO_NEG_MASK |		\
+	BIT9 |					\
+	ETH_DO_NOT_FORCE_LINK_FAIL_MASK |	\
+	ETH_SET_FULL_DUPLEX_MASK		\
+	)
+#endif /* MV_ETH_GMAC_NEW */
+
+typedef enum {
+	MV_ETH_SPEED_AN,
+	MV_ETH_SPEED_10,
+	MV_ETH_SPEED_100,
+	MV_ETH_SPEED_1000
+} MV_ETH_PORT_SPEED;
+
+typedef enum {
+	MV_ETH_DUPLEX_AN,
+	MV_ETH_DUPLEX_HALF,
+	MV_ETH_DUPLEX_FULL
+} MV_ETH_PORT_DUPLEX;
+
+typedef enum {
+	MV_ETH_FC_AN_NO,
+	MV_ETH_FC_AN_SYM,
+	MV_ETH_FC_AN_ASYM,
+	MV_ETH_FC_DISABLE,
+	MV_ETH_FC_ENABLE,
+	MV_ETH_FC_ACTIVE
+
+} MV_ETH_PORT_FC;
+
+
+typedef enum {
+	MV_ETH_PRIO_FIXED = 0,	/* Fixed priority mode */
+	MV_ETH_PRIO_WRR = 1	/* Weighted round robin priority mode */
+} MV_ETH_PRIO_MODE;
+
+/* Ethernet port specific infomation */
+typedef struct eth_link_status {
+	MV_BOOL				linkup;
+	MV_ETH_PORT_SPEED	speed;
+	MV_ETH_PORT_DUPLEX	duplex;
+	MV_ETH_PORT_FC		rxFc;
+	MV_ETH_PORT_FC		txFc;
+
+} MV_ETH_PORT_STATUS;
+
+typedef enum {
+	MV_NETA_MH_NONE = 0,
+	MV_NETA_MH = 1,
+	MV_NETA_DSA = 2,
+	MV_NETA_DSA_EXT = 3
+} MV_NETA_MH_MODE;
+
+typedef struct {
+	MV_U32 maxPort;
+	MV_U32 tClk;
+	MV_U32 cpuMask;
+	MV_BOOL	iocc;
+	MV_U16 ctrlModel;       /* Controller Model     */
+	MV_U8  ctrlRev;         /* Controller Revision  */
+
+#ifdef CONFIG_MV_ETH_BM
+	MV_ULONG bmPhysBase;
+	MV_U8 *bmVirtBase;
+#endif /* CONFIG_MV_ETH_BM */
+
+#ifdef CONFIG_MV_ETH_PNC
+	MV_U32 pncTcamSize;
+	MV_ULONG pncPhysBase;
+	MV_U8 *pncVirtBase;
+#endif /* CONFIG_MV_ETH_PNC */
+
+	/* Obsolete fields - unused */
+	MV_U32 pClk;
+	MV_U32 portMask;
+	int    maxCPUs;
+} MV_NETA_HAL_DATA;
+
+typedef struct eth_pbuf {
+	void *osInfo;
+	MV_ULONG physAddr;
+	MV_U8 *pBuf;
+	MV_U16 bytes;
+	MV_U16 offset;
+	MV_U8  pool;
+	MV_U8  reserved;
+	MV_U16 vlanId;
+} MV_ETH_PKT;
+
+typedef struct {
+	char *pFirst;
+	int lastDesc;
+	int nextToProc;
+	int descSize;
+	MV_BUF_INFO descBuf;
+
+} MV_NETA_QUEUE_CTRL;
+
+#define MV_NETA_QUEUE_DESC_PTR(pQueueCtrl, descIdx)                 \
+    ((pQueueCtrl)->pFirst + ((descIdx) * NETA_DESC_ALIGNED_SIZE))
+
+#define MV_NETA_QUEUE_NEXT_DESC(pQueueCtrl, descIdx)  \
+    (((descIdx) < (pQueueCtrl)->lastDesc) ? ((descIdx) + 1) : 0)
+
+#define MV_NETA_QUEUE_PREV_DESC(pQueueCtrl, descIdx)  \
+    (((descIdx) > 0) ? ((descIdx) - 1) : (pQueueCtrl)->lastDesc)
+
+typedef struct {
+	MV_NETA_QUEUE_CTRL queueCtrl;
+
+} MV_NETA_RXQ_CTRL;
+
+typedef struct {
+	MV_NETA_QUEUE_CTRL queueCtrl;
+
+} MV_NETA_TXQ_CTRL;
+
+typedef struct {
+	int portNo;
+	MV_NETA_RXQ_CTRL *pRxQueue;
+	MV_NETA_TXQ_CTRL *pTxQueue;
+	int rxqNum;
+	int txpNum;
+	int txqNum;
+	MV_U8 mcastCount[256];
+	void *osHandle;
+} MV_NETA_PORT_CTRL;
+
+extern MV_NETA_PORT_CTRL **mvNetaPortCtrl;
+extern MV_NETA_HAL_DATA mvNetaHalData;
+extern unsigned int neta_cap_bitmap;
+/* NETA dynamic capabilities bitmap define */
+#define MV_ETH_CAP_PNC		(0x00000001)
+#define MV_ETH_CAP_BM		(0x00000002)
+#define MV_ETH_CAP_HWF		(0x00000004)
+#define MV_ETH_CAP_PME		(0x00000008)
+/* NETA dynamic capabilities macro */
+#define MV_NETA_BM_CAP()	(MV_ETH_CAP_BM & neta_cap_bitmap)
+#define MV_NETA_PNC_CAP()	(MV_ETH_CAP_PNC & neta_cap_bitmap)
+#define MV_NETA_HWF_CAP()	(MV_ETH_CAP_HWF & neta_cap_bitmap)
+#define MV_NETA_PMT_CAP()	(MV_ETH_CAP_PME & neta_cap_bitmap)
+
+/* Get Giga port handler */
+static INLINE MV_NETA_PORT_CTRL *mvNetaPortHndlGet(int port)
+{
+	return mvNetaPortCtrl[port];
+}
+
+/* Get RX queue handler */
+static INLINE MV_NETA_RXQ_CTRL *mvNetaRxqHndlGet(int port, int rxq)
+{
+	return &mvNetaPortCtrl[port]->pRxQueue[rxq];
+}
+
+/* Get TX queue handler */
+static INLINE MV_NETA_TXQ_CTRL *mvNetaTxqHndlGet(int port, int txp, int txq)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortCtrl[port];
+
+	return &pPortCtrl->pTxQueue[txp * pPortCtrl->txqNum + txq];
+}
+
+#if defined(MV_CPU_BE) && defined(CONFIG_MV_ETH_BE_WA)
+/* Swap RX descriptor to be BE */
+static INLINE void mvNetaRxqDescSwap(NETA_RX_DESC *pRxDesc)
+{
+	pRxDesc->status = MV_BYTE_SWAP_32BIT(pRxDesc->status);
+	pRxDesc->pncInfo = MV_BYTE_SWAP_16BIT(pRxDesc->pncInfo);
+	pRxDesc->dataSize =  MV_BYTE_SWAP_16BIT(pRxDesc->dataSize);
+	pRxDesc->bufPhysAddr = MV_BYTE_SWAP_32BIT(pRxDesc->bufPhysAddr);
+	pRxDesc->pncFlowId = MV_BYTE_SWAP_32BIT(pRxDesc->pncFlowId);
+	/* pRxDesc->bufCookie = MV_BYTE_SWAP_32BIT(pRxDesc->bufCookie); */
+	pRxDesc->prefetchCmd = MV_BYTE_SWAP_16BIT(pRxDesc->prefetchCmd);
+	pRxDesc->csumL4 = MV_BYTE_SWAP_16BIT(pRxDesc->csumL4);
+	pRxDesc->pncExtra = MV_BYTE_SWAP_32BIT(pRxDesc->pncExtra);
+	pRxDesc->hw_cmd = MV_BYTE_SWAP_32BIT(pRxDesc->hw_cmd);
+}
+
+/* Swap TX descriptor to be BE */
+static INLINE void mvNetaTxqDescSwap(NETA_TX_DESC *pTxDesc)
+{
+	pTxDesc->command = MV_BYTE_SWAP_32BIT(pTxDesc->command);
+	pTxDesc->csumL4 = MV_BYTE_SWAP_16BIT(pTxDesc->csumL4);
+	pTxDesc->dataSize = MV_BYTE_SWAP_16BIT(pTxDesc->dataSize);
+	pTxDesc->bufPhysAddr = MV_BYTE_SWAP_32BIT(pTxDesc->bufPhysAddr);
+	pTxDesc->hw_cmd = MV_BYTE_SWAP_32BIT(pTxDesc->hw_cmd);
+}
+#else
+static INLINE void mvNetaRxqDescSwap(NETA_RX_DESC *pRxDesc)
+{
+}
+static INLINE void mvNetaTxqDescSwap(NETA_TX_DESC *pTxDesc)
+{
+}
+#endif /* MV_CPU_BE &&  CONFIG_MV_ETH_BE_WA */
+
+/* Get number of RX descriptors occupied by received packets */
+static INLINE int mvNetaRxqBusyDescNumGet(int port, int rxq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_STATUS_REG(port, rxq));
+
+	return (regVal & NETA_RXQ_OCCUPIED_DESC_ALL_MASK) >> NETA_RXQ_OCCUPIED_DESC_OFFS;
+}
+
+/* Get number of free RX descriptors ready to received new packets */
+static INLINE int mvNetaRxqFreeDescNumGet(int port, int rxq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_RXQ_STATUS_REG(port, rxq));
+
+	return (regVal & NETA_RXQ_NON_OCCUPIED_DESC_ALL_MASK) >> NETA_RXQ_NON_OCCUPIED_DESC_OFFS;
+}
+
+/* Update HW with number of RX descriptors processed by SW:
+ *    - decrement number of occupied descriptors
+ *    - increment number of Non-occupied descriptors
+ */
+static INLINE void mvNetaRxqDescNumUpdate(int port, int rxq, int rx_done, int rx_filled)
+{
+	MV_U32 regVal;
+
+	if ((rx_done <= 0xFF) && (rx_filled <= 0xFF)) {
+		regVal = (rx_done << NETA_RXQ_DEC_OCCUPIED_OFFS) | (rx_filled << NETA_RXQ_ADD_NON_OCCUPIED_OFFS);
+		MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+		return;
+	}
+
+	/* Only 255 descriptors can be added at once */
+	while ((rx_done > 0) || (rx_filled > 0)) {
+		if (rx_done <= 0xFF) {
+			regVal = (rx_done << NETA_RXQ_DEC_OCCUPIED_OFFS);
+			rx_done = 0;
+		} else {
+			regVal = (0xFF << NETA_RXQ_DEC_OCCUPIED_OFFS);
+			rx_done -= 0xFF;
+		}
+		if (rx_filled <= 0xFF) {
+			regVal |= (rx_filled << NETA_RXQ_ADD_NON_OCCUPIED_OFFS);
+			rx_filled = 0;
+		} else {
+			regVal |= (0xFF << NETA_RXQ_ADD_NON_OCCUPIED_OFFS);
+			rx_filled -= 0xFF;
+		}
+		MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+	}
+}
+
+/* Add number of descriptors are ready to receive new packets */
+static INLINE void mvNetaRxqNonOccupDescAdd(int port, int rxq, int rx_desc)
+{
+	MV_U32	regVal;
+
+	/* Only 255 descriptors can be added at once */
+	while (rx_desc > 0xFF) {
+		regVal = (0xFF << NETA_RXQ_ADD_NON_OCCUPIED_OFFS);
+		MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+		rx_desc = rx_desc - 0xFF;
+	}
+	regVal = (rx_desc << NETA_RXQ_ADD_NON_OCCUPIED_OFFS);
+	MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+}
+
+/* Decrement number of processed descriptors */
+static INLINE void mvNetaRxqOccupDescDec(int port, int rxq, int rx_desc)
+{
+	MV_U32	regVal;
+
+	/* Only 255 descriptors can be updated at once */
+	while (rx_desc > 0xFF) {
+		regVal = (0xFF << NETA_RXQ_DEC_OCCUPIED_OFFS);
+		MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+		rx_desc = rx_desc - 0xFF;
+	}
+	regVal = (rx_desc << NETA_RXQ_DEC_OCCUPIED_OFFS);
+	MV_REG_WRITE(NETA_RXQ_STATUS_UPDATE_REG(port, rxq), regVal);
+}
+
+/* Decrement sent descriptors counter */
+static INLINE void mvNetaTxqSentDescDec(int port, int txp, int txq, int sent_desc)
+{
+	MV_U32 regVal;
+
+	/* Only 255 TX descriptors can be updated at once */
+	while (sent_desc > 0xFF) {
+		regVal = (0xFF << NETA_TXQ_DEC_SENT_OFFS);
+		MV_REG_WRITE(NETA_TXQ_UPDATE_REG(port, txp, txq), regVal);
+		sent_desc = sent_desc - 0xFF;
+	}
+	regVal = (sent_desc << NETA_TXQ_DEC_SENT_OFFS);
+	MV_REG_WRITE(NETA_TXQ_UPDATE_REG(port, txp, txq), regVal);
+}
+
+/* Get number of TX descriptors already sent by HW */
+static INLINE int mvNetaTxqSentDescNumGet(int port, int txp, int txq)
+{
+	MV_U32  regVal;
+	int     sent_desc;
+
+	regVal = MV_REG_READ(NETA_TXQ_STATUS_REG(port, txp, txq));
+	sent_desc = (regVal & NETA_TXQ_SENT_DESC_MASK) >> NETA_TXQ_SENT_DESC_OFFS;
+
+	return sent_desc;
+}
+
+/* Invalidate TXQ descriptor - buffer will not be sent, buffer will not be returned */
+static INLINE void mvNetaTxqDescInv(NETA_TX_DESC *pTxDesc)
+{
+	pTxDesc->command |= NETA_TX_HWF_MASK;
+	pTxDesc->command &= ~NETA_TX_BM_ENABLE_MASK;
+	pTxDesc->hw_cmd |= NETA_TX_ES_MASK;
+}
+
+/* Return: 1 - TX descriptor is valid, 0 - TX descriptor is invalid */
+static INLINE int mvNetaTxqDescIsValid(NETA_TX_DESC *pTxDesc)
+{
+	return ((pTxDesc->hw_cmd & NETA_TX_ES_MASK) == 0);
+}
+
+/* Get index of descripotor to be processed next in the specific TXQ */
+static INLINE int mvNetaTxqNextIndexGet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_TXQ_INDEX_REG(port, txp, txq));
+
+	return (regVal & NETA_TXQ_NEXT_DESC_INDEX_MASK) >> NETA_TXQ_NEXT_DESC_INDEX_OFFS;
+}
+
+/* Get number of TX descriptors didn't send by HW yet and waiting for TX */
+static INLINE int mvNetaTxqPendDescNumGet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(NETA_TXQ_STATUS_REG(port, txp, txq));
+
+	return (regVal & NETA_TXQ_PENDING_DESC_MASK) >> NETA_TXQ_PENDING_DESC_OFFS;
+}
+
+/* Update HW with number of TX descriptors to be sent */
+static INLINE void mvNetaTxqPendDescAdd(int port, int txp, int txq, int pend_desc)
+{
+	MV_U32 regVal;
+
+	/* Only 255 descriptors can be added at once - we don't check it for performance */
+	while (pend_desc > 0xFF) {
+		regVal = (0xFF << NETA_TXQ_ADD_PENDING_OFFS);
+		MV_REG_WRITE(NETA_TXQ_UPDATE_REG(port, txp, txq), regVal);
+		pend_desc = pend_desc - 0xFF;
+	}
+	regVal = (pend_desc << NETA_TXQ_ADD_PENDING_OFFS);
+	MV_REG_WRITE(NETA_TXQ_UPDATE_REG(port, txp, txq), regVal);
+}
+
+/* Get number of sent descriptors and descrement counter. Number of sent descriptors is returned. */
+static INLINE int mvNetaTxqSentDescProc(int port, int txp, int txq)
+{
+	int sent_desc;
+
+	/* Get number of sent descriptors */
+	sent_desc = mvNetaTxqSentDescNumGet(port, txp, txq);
+	/* Decrement sent descriptors counter */
+	if (sent_desc)
+		mvNetaTxqSentDescDec(port, txp, txq, sent_desc);
+
+	return sent_desc;
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static INLINE NETA_RX_DESC *mvNetaRxqNextDescGet(MV_NETA_RXQ_CTRL *pRxq)
+{
+	NETA_RX_DESC	*pRxDesc;
+	int				rxDesc = pRxq->queueCtrl.nextToProc;
+
+	pRxq->queueCtrl.nextToProc = MV_NETA_QUEUE_NEXT_DESC(&(pRxq->queueCtrl), rxDesc);
+
+	pRxDesc = ((NETA_RX_DESC *)pRxq->queueCtrl.pFirst) + rxDesc;
+
+	return pRxDesc;
+}
+
+static INLINE NETA_RX_DESC *mvNetaRxqDescGet(MV_NETA_RXQ_CTRL *pRxq)
+{
+	NETA_RX_DESC	*pRxDesc;
+
+	pRxDesc = ((NETA_RX_DESC *)pRxq->queueCtrl.pFirst) + pRxq->queueCtrl.nextToProc;
+
+	return pRxDesc;
+}
+
+/* Refill RX descriptor (when BM is not supported) */
+static INLINE void mvNetaRxDescFill(NETA_RX_DESC *pRxDesc, MV_U32 physAddr, MV_U32 cookie)
+{
+	pRxDesc->bufCookie = (MV_U32)cookie;
+
+#if defined(CONFIG_MV_ETH_BE_WA)
+	pRxDesc->bufPhysAddr = MV_32BIT_LE(physAddr);
+#else
+	pRxDesc->bufPhysAddr = physAddr;
+#endif /* CONFIG_MV_ETH_BE_WA */
+}
+
+/* Get pointer to next TX descriptor to be processed (send) by HW */
+static INLINE NETA_TX_DESC *mvNetaTxqNextDescGet(MV_NETA_TXQ_CTRL *pTxq)
+{
+	int txDesc = pTxq->queueCtrl.nextToProc;
+
+	pTxq->queueCtrl.nextToProc = MV_NETA_QUEUE_NEXT_DESC(&(pTxq->queueCtrl), txDesc);
+
+	return ((NETA_TX_DESC *) pTxq->queueCtrl.pFirst) + txDesc;
+}
+
+/* Get pointer to previous TX descriptor in the ring for rollback when needed */
+static INLINE NETA_TX_DESC *mvNetaTxqPrevDescGet(MV_NETA_TXQ_CTRL *pTxq)
+{
+	int txDesc = pTxq->queueCtrl.nextToProc;
+
+	pTxq->queueCtrl.nextToProc = MV_NETA_QUEUE_PREV_DESC(&(pTxq->queueCtrl), txDesc);
+
+	return ((NETA_TX_DESC *) pTxq->queueCtrl.pFirst) + txDesc;
+}
+
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static INLINE MV_U32 mvNetaTxqDescCsum(int l3_offs, int l3_proto, int ip_hdr_len, int l4_proto)
+{
+	MV_U32 command;
+
+	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, G_L4_chk, L4_type */
+	/* required only for checksum calculation */
+	command = (l3_offs << NETA_TX_L3_OFFSET_OFFS);
+	command |= (ip_hdr_len << NETA_TX_IP_HLEN_OFFS);
+
+	if (l3_proto == MV_16BIT_BE(MV_IP_TYPE))
+		command |= (NETA_TX_L3_IP4 | NETA_TX_IP_CSUM_MASK);
+	else
+		command |= NETA_TX_L3_IP6;
+
+	if (l4_proto == MV_IP_PROTO_TCP)
+		command |= (NETA_TX_L4_TCP | NETA_TX_L4_CSUM_FULL);
+	else if (l4_proto == MV_IP_PROTO_UDP)
+		command |= (NETA_TX_L4_UDP | NETA_TX_L4_CSUM_FULL);
+	else
+		command |= NETA_TX_L4_CSUM_NOT;
+
+	return command;
+}
+
+static INLINE MV_ULONG netaDescVirtToPhys(MV_NETA_QUEUE_CTRL *pQueueCtrl, MV_U8 *pDesc)
+{
+	return (pQueueCtrl->descBuf.bufPhysAddr + (pDesc - pQueueCtrl->descBuf.bufVirtPtr));
+}
+
+
+#ifdef CONFIG_MV_PON
+static INLINE void mvNetaPonTxqBytesAdd(int port, int txp, int txq, int bytes)
+{
+	MV_U32	regVal;
+
+	regVal = (NETA_TX_NEW_BYTES_MASK(bytes) | NETA_TX_NEW_BYTES_TXQ_MASK(txq) | NETA_TX_NEW_BYTES_COLOR_GREEN);
+
+	MV_REG_WRITE(NETA_TX_ADD_BYTES_REG(port, txp), regVal);
+}
+#endif /* CONFIG_MV_PON */
+
+/* Function prototypes */
+MV_STATUS 	mvNetaHalInit(MV_NETA_HAL_DATA *halData);
+
+MV_STATUS 	mvNetaWinInit(MV_U32 port, MV_UNIT_WIN_INFO *addrWinMap);
+MV_STATUS 	mvNetaWinWrite(MV_U32 port, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin);
+MV_STATUS 	mvNetaWinRead(MV_U32 port, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin);
+MV_STATUS 	mvNetaWinEnable(MV_U32 port, MV_U32 winNum, MV_BOOL enable);
+
+int 		mvNetaAccMode(void);
+MV_STATUS 	mvNetaMemMapGet(MV_ULONG physAddr, MV_U8 *pTarget, MV_U8 *pAttr);
+
+void		*mvNetaPortInit(int port, void *osHandle);
+void		mvNetaPortDestroy(int portNo);
+MV_NETA_TXQ_CTRL 	*mvNetaTxqInit(int port, int txp, int queue, int descrNum);
+MV_NETA_RXQ_CTRL 	*mvNetaRxqInit(int port, int queue, int descrNum);
+void		mvNetaTxqDelete(int port, int txp, int queue);
+void		mvNetaRxqDelete(int port, int queue);
+void		mvNetaRxqAddrSet(int port, int queue, int descrNum);
+void 		mvNetaTxqAddrSet(int port, int txp, int queue, int descrNum);
+void            mvNetaTxpRateMaxSet(int port, int txp);
+void 		mvNetaTxqBandwidthSet(int port, int txp,  int queue);
+
+void mvNetaRxReset(int port);
+void mvNetaTxpReset(int port, int txp);
+
+MV_STATUS	mvNetaPortDisable(int port);
+MV_STATUS	mvNetaPortEnable(int port);
+MV_STATUS	mvNetaPortUp(int port);
+MV_STATUS	mvNetaPortDown(int port);
+
+MV_BOOL		mvNetaLinkIsUp(int port);
+MV_STATUS	mvNetaLinkStatus(int port, MV_ETH_PORT_STATUS *pStatus);
+MV_STATUS	mvNetaDefaultsSet(int port);
+MV_STATUS	mvNetaForceLinkModeSet(int portNo, MV_BOOL force_link_pass, MV_BOOL force_link_fail);
+MV_STATUS	mvNetaSpeedDuplexSet(int portNo, MV_ETH_PORT_SPEED speed, MV_ETH_PORT_DUPLEX duplex);
+MV_STATUS 	mvNetaSpeedDuplexGet(int portNo, MV_ETH_PORT_SPEED *speed, MV_ETH_PORT_DUPLEX *duplex);
+
+
+void		mvNetaCpuDump(int port, int cpu, int RxTx);
+MV_STATUS	mvNetaTxqCpuMaskSet(int port, int txq_mask, int cpu);
+MV_STATUS	mvNetaRxqCpuMaskSet(int port, int rxq_mask, int cpu);
+
+
+void		mvNetaSetOtherMcastTable(int portNo, int queue);
+void		mvNetaSetUcastTable(int port, int queue);
+void		mvNetaSetSpecialMcastTable(int portNo, int queue);
+
+/************************ Legacy parse function start *******************************/
+MV_STATUS	mvNetaRxUnicastPromiscSet(int port, MV_BOOL isPromisc);
+
+MV_STATUS	mvNetaMcastAddrSet(int port, MV_U8 *pAddr, int queue);
+MV_STATUS	mvNetaMacAddrGet(int portNo, unsigned char *pAddr);
+
+MV_STATUS	mvNetaTosToRxqSet(int port, int rxq, int tos);
+int			mvNetaTosToRxqGet(int port, int tos);
+
+MV_STATUS   mvNetaVprioToRxqSet(int port, int vprio, int rxq);
+int	    	mvNetaVprioToRxqGet(int port, int vprio);
+
+MV_STATUS	mvNetaTcpRxq(int port, int rxq);
+MV_STATUS	mvNetaUdpRxq(int port, int rxq);
+MV_STATUS	mvNetaArpRxq(int port, int rxq);
+MV_STATUS	mvNetaBpduRxq(int port, int rxq);
+/************************ Legacy parse function end *******************************/
+
+void 		mvNetaPhyAddrSet(int port, int phyAddr);
+int 		mvNetaPhyAddrGet(int port);
+void		mvNetaPhyAddrPollingDisable(int port);
+
+void 		mvNetaPortPowerDown(int port);
+void		mvNetaPortPowerUp(int port, MV_BOOL isSgmii, MV_BOOL isRgmii, MV_BOOL isInband);
+
+/* Interrupt Coalesting functions */
+MV_STATUS mvNetaRxqTimeCoalSet(int port, int rxq, MV_U32 uSec);
+MV_STATUS mvNetaRxqPktsCoalSet(int port, int rxq, MV_U32 pkts);
+MV_STATUS mvNetaTxDonePktsCoalSet(int port, int txp, int txq, MV_U32 pkts);
+MV_U32 mvNetaRxqTimeCoalGet(int port, int rxq);
+MV_U32 mvNetaRxqPktsCoalGet(int port, int rxq);
+MV_U32 mvNetaTxDonePktsCoalGet(int port, int txp, int txq);
+
+MV_STATUS mvNetaRxqBufSizeSet(int port, int rxq, int bufSize);
+MV_STATUS mvNetaMhSet(int port, MV_NETA_MH_MODE mh);
+MV_STATUS mvNetaTagSet(int port, MV_TAG_TYPE mh);
+MV_STATUS mvNetaTxMhRegSet(int port, int txp, int reg, MV_U16 mh);
+MV_STATUS mvNetaMaxRxSizeSet(int port, int maxRxSize);
+MV_STATUS mvNetaMacAddrSet(int port, unsigned char *pAddr, int queue);
+
+MV_STATUS mvNetaRxqOffsetSet(int port, int rxq, int offset);
+MV_STATUS mvNetaBmPoolBufSizeSet(int port, int pool, int bufsize);
+MV_STATUS mvNetaRxqBmEnable(int port, int rxq, int smallPool, int largePool);
+MV_STATUS mvNetaRxqBmDisable(int port, int rxq);
+
+MV_STATUS mvNetaTxpEjpSet(int port, int txp, int enable);
+MV_STATUS mvNetaTxqFixPrioSet(int port, int txp, int txq);
+MV_STATUS mvNetaTxqWrrPrioSet(int port, int txp, int txq, int weight);
+MV_STATUS mvNetaTxpMaxTxSizeSet(int port, int txp, int maxTxSize);
+MV_STATUS mvNetaTxpRateSet(int port, int txp, int bw);
+MV_STATUS mvNetaTxqRateSet(int port, int txp, int txq, int bw);
+MV_STATUS mvNetaTxpBurstSet(int port, int txp, int burst);
+MV_STATUS mvNetaTxqBurstSet(int port, int txp, int txq, int burst);
+MV_STATUS mvNetaTxpEjpBurstRateSet(int port, int txp, int txq, int rate);
+MV_STATUS mvNetaTxpEjpMaxPktSizeSet(int port, int txp, int type, int size);
+MV_STATUS mvNetaTxpEjpTxSpeedSet(int port, int txp, int type, int speed);
+
+int mvNetaPortCheck(int port);
+int mvNetaTxpCheck(int port, int txp);
+int mvNetaMaxCheck(int num, int limit, char *name);
+
+void mvNetaMibCountersClear(int port, int txp);
+MV_U32 mvNetaMibCounterRead(int port, int txp, unsigned int mibOffset, MV_U32 *pHigh32);
+
+void mvEthPortRegs(int port);
+void mvEthPortUcastShow(int port);
+void mvEthPortMcastShow(int port);
+void mvNetaPortRegs(int port);
+void mvNetaPncRegs(void);
+void mvNetaTxpRegs(int port, int txp);
+void mvNetaRxqRegs(int port, int rxq);
+void mvNetaTxqRegs(int port, int txp, int txq);
+void mvNetaPortStatus(int port);
+void mvNetaRxqShow(int port, int rxq, int mode);
+void mvNetaTxqShow(int port, int txp, int txq, int mode);
+
+void mvEthTxpWrrRegs(int port, int txp);
+void mvEthRegs(int port);
+void mvEthPortCounters(int port, int mib);
+void mvEthPortRmonCounters(int port, int mib);
+
+MV_STATUS mvNetaFlowCtrlSet(int port, MV_ETH_PORT_FC flowControl);
+MV_STATUS mvNetaFlowCtrlGet(int port, MV_ETH_PORT_FC *flowControl);
+
+#ifdef MV_ETH_GMAC_NEW
+MV_STATUS   mvEthGmacRgmiiSet(int port, int enable);
+MV_STATUS	mvNetaGmacLpiSet(int port, int mode);
+void	    mvNetaGmacRegs(int port);
+#endif /* MV_ETH_GMAC_NEW */
+
+#ifdef CONFIG_MV_PON
+void        mvNetaPonTxpRegs(int port, int txp);
+MV_STATUS   mvNetaPonRxMibDefault(int mib);
+MV_STATUS   mvNetaPonRxMibGemPid(int mib, MV_U16 gemPid);
+#endif /* CONFIG_MV_PON */
+
+#ifdef CONFIG_MV_ETH_HWF
+typedef enum {
+    MV_NETA_HWF_MH_REG = 0,
+    MV_NETA_HWF_MH_PNC = 1
+} MV_NETA_HWF_MH_SRC;
+
+MV_STATUS mvNetaHwfInit(int port);
+MV_STATUS mvNetaHwfEnable(int port, int enable);
+MV_STATUS mvNetaHwfBmPoolsSet(int port, int short_pool, int long_pool);
+MV_STATUS mvNetaHwfTxqInit(int p, int txp, int txq);
+MV_STATUS mvNetaHwfTxqEnable(int port, int p, int txp, int txq, int enable);
+MV_STATUS mvNetaHwfTxqDropSet(int port, int p, int txp, int txq, int thresh, int bits);
+MV_STATUS mvNetaHwfMhSrcSet(int port, MV_NETA_HWF_MH_SRC src);
+MV_STATUS mvNetaHwfMhSelSet(int port, MV_U8 mhSel);
+MV_STATUS mvNetaHwfTxqNextIndexGet(int port, int tx_port, int txp, int txq, int *val);
+
+void mvNetaHwfRxpRegs(int port);
+void mvNetaHwfTxpRegs(int port, int p, int txp);
+void mvNetaHwfTxpCntrs(int port, int p, int txp);
+#endif /* CONFIG_MV_ETH_HWF */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvNeta_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaAddrDec.c b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaAddrDec.c
new file mode 100755
index 000000000000..3838ebce5c3e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaAddrDec.c
@@ -0,0 +1,431 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvSysEthConfig.h"
+
+#include "mvNeta.h"
+
+MV_TARGET ethAddrDecPrioTab[] = {
+#if defined(MV_INCLUDE_SDRAM_CS0)
+	SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+	SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+	SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+	SDRAM_CS3,
+#endif
+#if defined(CONFIG_MV_ETH_BM)
+	PNC_BM,
+#endif
+	TBL_TERM
+};
+
+static MV_STATUS ethWinOverlapDetect(MV_U32 port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvNetaWinInit
+*
+* DESCRIPTION:
+*	This function initialize ETH window decode unit. It set the default
+*	address decode windows of the unit.
+*
+* INPUT:
+*	port	 : The port number to initialize the address decoding for.
+*	addWinMap: An array holding the address decoding information for the
+*		    system.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if setting fail.
+*******************************************************************************/
+MV_STATUS mvNetaWinInit(MV_U32 port, MV_UNIT_WIN_INFO *addrWinMap)
+{
+	MV_U32 winNum, winPrioIndex = 0, i, regVal = 0;
+	MV_UNIT_WIN_INFO *addrDecWin;
+	MV_U32 accessProtReg = 0;
+
+	/* Initiate Ethernet address decode */
+	/* First disable all address decode windows */
+	for (winNum = 0; winNum < ETH_MAX_DECODE_WIN; winNum++)
+		regVal |= MV_BIT_MASK(winNum);
+
+	MV_REG_WRITE(ETH_BASE_ADDR_ENABLE_REG(port), regVal);
+
+	/* Go through all windows in user table until table terminator      */
+	for (winNum = 0; ((ethAddrDecPrioTab[winPrioIndex] != TBL_TERM) && (winNum < ETH_MAX_DECODE_WIN));) {
+		addrDecWin = &addrWinMap[ethAddrDecPrioTab[winPrioIndex]];
+
+		if (addrDecWin->enable == MV_TRUE) {
+			if (MV_OK != mvNetaWinWrite(port, winNum, addrDecWin)) {
+				mvOsPrintf("mvNetaWinInit failed: winNum=%d (%d, %d)\n",
+					   winNum, winPrioIndex, ethAddrDecPrioTab[winPrioIndex]);
+				return MV_ERROR;
+			}
+			winNum++;
+		}
+		winPrioIndex++;
+	}
+
+	/* set full access to all windows. */
+	for (i = 0; i < winNum; i++)
+		accessProtReg |= (FULL_ACCESS << (i * 2));
+
+	MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), accessProtReg);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaWinWrite
+*
+* DESCRIPTION:
+*	This function writes the address decoding registers according to the
+*	given window configuration.
+*
+* INPUT:
+*	unit	    - The Ethernet unit number to configure.
+*       winNum	    - ETH target address decode window number.
+*       pAddrDecWin - ETH target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK on success,
+*	MV_BAD_PARAM if winNum is invalid.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvNetaWinWrite(MV_U32 port, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin)
+{
+	MV_U64 size;
+	MV_U32 alignment;
+	MV_U32 baseReg, sizeReg;
+
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvNetaWinSet: ERR. Invalid win num %d\n", winNum);
+		return MV_BAD_PARAM;
+	}
+
+	size = pAddrDecWin->addrWin.size;
+	if (size == 0) {
+		mvOsPrintf("%s: ERR. Invalid window size %lld\n",	__func__, size);
+		return MV_BAD_PARAM;
+	}
+	if (!MV_IS_POWER_OF_2(size)) {
+		/* try to get a good size */
+		pAddrDecWin->addrWin.size = 1 << (mvLog2(size) + 1);
+		mvOsPrintf("%s: WARN. Wrong window size %lld, rounding to %lld\n",
+			__func__, size, pAddrDecWin->addrWin.size);
+		size = pAddrDecWin->addrWin.size;
+	}
+
+	/* Check if the requested window overlapps with current windows     */
+	if (MV_TRUE == ethWinOverlapDetect(port, winNum, &pAddrDecWin->addrWin)) {
+		mvOsPrintf("%s: ERR. Window %d overlap\n", __func__, winNum);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if (MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size)) {
+		mvOsPrintf("mvNetaWinSet: Error setting Ethernet window %d.\n"
+			   "Address 0x%08x is unaligned to size 0x%llx.\n",
+			   winNum, pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	baseReg = (pAddrDecWin->addrWin.baseLow & ETH_WIN_BASE_MASK);
+	sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+	/* set size */
+	alignment = 1 << ETH_WIN_SIZE_OFFS;
+	sizeReg &= ~ETH_WIN_SIZE_MASK;
+	sizeReg |= (((size / alignment) - 1) << ETH_WIN_SIZE_OFFS);
+
+	/* set attributes */
+	baseReg &= ~ETH_WIN_ATTR_MASK;
+	baseReg |= pAddrDecWin->attrib << ETH_WIN_ATTR_OFFS;
+
+	/* set target ID */
+	baseReg &= ~ETH_WIN_TARGET_MASK;
+	baseReg |= pAddrDecWin->targetId << ETH_WIN_TARGET_OFFS;
+
+	/* for the safe side we disable the window before writing the new
+	   values */
+	mvNetaWinEnable(port, winNum, MV_FALSE);
+	MV_REG_WRITE(ETH_WIN_BASE_REG(port, winNum), baseReg);
+
+	/* Write to address decode Size Register                            */
+	MV_REG_WRITE(ETH_WIN_SIZE_REG(port, winNum), sizeReg);
+
+	/* Enable address decode target window                              */
+	if (pAddrDecWin->enable == MV_TRUE)
+		mvNetaWinEnable(port, winNum, MV_TRUE);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* ethWinOverlapDetect - Detect ETH address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case ETH address decode
+*       windows overlapps.
+*       This function detects ETH address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS ethWinOverlapDetect(MV_U32 port, MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+	MV_U32 baseAddrEnableReg;
+	MV_U32 winNumIndex;
+	MV_UNIT_WIN_INFO addrDecWin;
+
+	/* Read base address enable register. Do not check disabled windows     */
+	baseAddrEnableReg = MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port));
+
+	for (winNumIndex = 0; winNumIndex < ETH_MAX_DECODE_WIN; winNumIndex++) {
+		/* Do not check window itself           */
+		if (winNumIndex == winNum)
+			continue;
+
+		/* Do not check disabled windows        */
+		if (baseAddrEnableReg & (1 << winNumIndex))
+			continue;
+
+		/* Get window parameters        */
+		if (MV_OK != mvNetaWinRead(port, winNumIndex, &addrDecWin)) {
+			mvOsPrintf("ethWinOverlapDetect: ERR. TargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		if (MV_TRUE == mvWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+			return MV_TRUE;
+	}
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvNetaWinRead
+*
+* DESCRIPTION:
+*       Read Ethernet peripheral target address window.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - ETH target window data structure.
+*
+* RETURN:
+*	MV_BAD_PARAM if winNum is invalid.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvNetaWinRead(MV_U32 port, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin)
+{
+	MV_U32 baseReg, sizeReg;
+	MV_U32 alignment, size;
+
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvNetaWinGet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	baseReg = MV_REG_READ(ETH_WIN_BASE_REG(port, winNum));
+	sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, winNum));
+
+	alignment = 1 << ETH_WIN_SIZE_OFFS;
+	size = (sizeReg & ETH_WIN_SIZE_MASK) >> ETH_WIN_SIZE_OFFS;
+	pAddrDecWin->addrWin.size = (size + 1) * alignment;
+
+	/* Extract base address                                     */
+	pAddrDecWin->addrWin.baseLow = baseReg & ETH_WIN_BASE_MASK;
+	pAddrDecWin->addrWin.baseHigh = 0;
+
+	/* attrib and targetId */
+	pAddrDecWin->attrib = (baseReg & ETH_WIN_ATTR_MASK) >> ETH_WIN_ATTR_OFFS;
+	pAddrDecWin->targetId = (baseReg & ETH_WIN_TARGET_MASK) >> ETH_WIN_TARGET_OFFS;
+
+	/* Check if window is enabled   */
+	if (~(MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port))) & (1 << winNum))
+		pAddrDecWin->enable = MV_TRUE;
+	else
+		pAddrDecWin->enable = MV_FALSE;
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaWinEnable - Enable/disable a ETH to target address window
+*
+* DESCRIPTION:
+*       This function enable/disable a ETH to target address window.
+*       According to parameter 'enable' the routine will enable the
+*       window, thus enabling ETH accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvNetaWinEnable(MV_U32 port, MV_U32 winNum, MV_BOOL enable)
+{
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvNetaTargetWinEnable:ERR. Invalid winNum%d\n", winNum);
+		return MV_ERROR;
+	}
+
+	if (enable == MV_TRUE)
+		MV_REG_BIT_RESET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+	else
+		/* Disable address decode target window                             */
+		MV_REG_BIT_SET(ETH_BASE_ADDR_ENABLE_REG(port), (1 << winNum));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaProtWinSet - Set access protection of Ethernet to target window.
+*
+* DESCRIPTION:
+*       Each Ethernet port can be configured with access attributes for each
+*       of the Ethenret to target windows (address decode windows). This
+*       function sets access attributes to a given window for the given channel.
+*
+* INPUTS:
+*       ethPort   - ETH channel number. See MV_ETH_CHANNEL enumerator.
+*       winNum - IETH to target address decode window number.
+*       access - IETH access rights. See MV_ACCESS_RIGHTS enumerator.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR in case window number is invalid or access right reserved.
+*
+*******************************************************************************/
+MV_STATUS mvNetaProtWinSet(MV_U32 portNo, MV_U32 winNum, MV_ACCESS_RIGHTS access)
+{
+	MV_U32 protReg;
+
+	/* Parameter checking   */
+	if (portNo >= MV_ETH_MAX_PORTS) {
+		mvOsPrintf("mvNetaProtWinSet:ERR. Invalid port number %d\n", portNo);
+		return MV_ERROR;
+	}
+
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvNetaProtWinSet:ERR. Invalid winNum%d\n", winNum);
+		return MV_ERROR;
+	}
+
+	if ((access == ACC_RESERVED) || (access >= MAX_ACC_RIGHTS)) {
+		mvOsPrintf("mvNetaProtWinSet:ERR. Inv access param %d\n", access);
+		return MV_ERROR;
+	}
+	/* Read current protection register */
+	protReg = MV_REG_READ(ETH_ACCESS_PROTECT_REG(portNo));
+
+	/* Clear protection window field */
+	protReg &= ~(ETH_PROT_WIN_MASK(winNum));
+
+	/* Set new protection field value */
+	protReg |= (access << (ETH_PROT_WIN_OFFS(winNum)));
+
+	/* Write protection register back   */
+	MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(portNo), protReg);
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaDebug.c b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaDebug.c
new file mode 100644
index 000000000000..368e9a523463
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaDebug.c
@@ -0,0 +1,809 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+/*******************************************************************************
+* mvNetaDebug.c - Source file for user friendly debug functions
+*
+* DESCRIPTION:
+*
+* DEPENDENCIES:
+*       None.
+*
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+
+#include "mvNeta.h"
+#include "bm/mvBm.h"
+#include "pnc/mvTcam.h"
+
+static void mvEthRegPrint(MV_U32 reg_addr, char *reg_name)
+{
+	mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", reg_name, reg_addr, MV_REG_READ(reg_addr));
+}
+
+static void mvEthRegPrint0(MV_U32 reg_addr, char *reg_name)
+{
+	mvOsPrintf("  %-32s: %u\n", reg_name, MV_REG_READ(reg_addr));
+}
+
+static void mvEthRegPrint2(MV_U32 reg_addr, char *reg_name, MV_U32 index)
+{
+	char buf[64];
+
+	mvOsSPrintf(buf, "%s[%d]", reg_name, index);
+	mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", buf, reg_addr, MV_REG_READ(reg_addr));
+}
+
+static void mvEthMibPrint(int port, int mib, MV_U32 offset, char *mib_name)
+{
+	MV_U32 regVaLo, regValHi = 0;
+
+	regVaLo = mvNetaMibCounterRead(port, mib, offset, &regValHi);
+
+	if (!regValHi)
+		mvOsPrintf("  %-32s: %u\n", mib_name, regVaLo);
+	else
+		mvOsPrintf("  t%-32s: 0x%08x%08x\n", mib_name, regValHi, regVaLo);
+}
+
+void mvEthTxpWrrRegs(int port, int txp)
+{
+	int queue;
+
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	mvOsPrintf("\n[TXP WRR/EJP registers: port=%d, txp=%d]\n", port, txp);
+	mvEthRegPrint(ETH_TX_QUEUE_COMMAND_REG(port, txp), "ETH_TX_QUEUE_COMMAND_REG");
+
+	mvEthRegPrint(NETA_TX_CMD_1_REG(port, txp),          "NETA_TX_CMD_1_REG");
+	mvEthRegPrint(NETA_TX_FIXED_PRIO_CFG_REG(port, txp), "NETA_TX_FIXED_PRIO_CFG_REG");
+	mvEthRegPrint(NETA_TX_REFILL_PERIOD_REG(port, txp),  "NETA_TX_REFILL_PERIOD_REG");
+	mvEthRegPrint(NETA_TXP_MTU_REG(port, txp),           "NETA_TXP_MTU_REG");
+	mvEthRegPrint(NETA_TXP_REFILL_REG(port, txp),        "NETA_TXP_REFILL_REG");
+	mvEthRegPrint(NETA_TXP_TOKEN_SIZE_REG(port, txp),    "NETA_TXP_TOKEN_SIZE_REG");
+	mvEthRegPrint(NETA_TXP_TOKEN_CNTR_REG(port, txp),    "NETA_TXP_TOKEN_CNTR_REG");
+	mvEthRegPrint(NETA_TXP_EJP_HI_LO_REG(port, txp),     "NETA_TXP_EJP_HI_LO_REG");
+	mvEthRegPrint(NETA_TXP_EJP_HI_ASYNC_REG(port, txp),  "NETA_TXP_EJP_HI_ASYNC_REG");
+	mvEthRegPrint(NETA_TXP_EJP_LO_ASYNC_REG(port, txp),  "NETA_TXP_EJP_LO_ASYNC_REG");
+	mvEthRegPrint(NETA_TXP_EJP_SPEED_REG(port, txp),     "NETA_TXP_EJP_SPEED_REG");
+
+	for (queue = 0; queue < MV_ETH_MAX_TXQ; queue++) {
+		mvOsPrintf("\n[TXQ WRR/EJP registers: port=%d, txp=%d, txq=%d]\n", port, txp, queue);
+		mvEthRegPrint(NETA_TXQ_REFILL_REG(port, txp, queue), "NETA_TXQ_REFILL_REG");
+		mvEthRegPrint(NETA_TXQ_TOKEN_SIZE_REG(port, txp, queue), "NETA_TXQ_TOKEN_SIZE_REG");
+		mvEthRegPrint(NETA_TXQ_TOKEN_CNTR_REG(port, txp, queue), "NETA_TXQ_TOKEN_CNTR_REG");
+		mvEthRegPrint(NETA_TXQ_WRR_ARBITER_REG(port, txp, queue), "NETA_TXQ_WRR_ARBITER_REG");
+		if ((queue == 2) || (queue == 3))
+			mvEthRegPrint(NETA_TXQ_EJP_IPG_REG(port, txp, queue), "NETA_TXQ_EJP_IPG_REG");
+	}
+}
+
+/* Print important registers of Ethernet port */
+void mvEthPortRegs(int port)
+{
+	int txp;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	mvEthRegPrint(ETH_PORT_STATUS_REG(port), "ETH_PORT_STATUS_REG");
+	mvEthRegPrint(ETH_PORT_SERIAL_CTRL_REG(port), "ETH_PORT_SERIAL_CTRL_REG");
+	mvEthRegPrint(ETH_PORT_CONFIG_REG(port), "ETH_PORT_CONFIG_REG");
+	mvEthRegPrint(ETH_PORT_CONFIG_EXTEND_REG(port), "ETH_PORT_CONFIG_EXTEND_REG");
+	mvEthRegPrint(ETH_SDMA_CONFIG_REG(port), "ETH_SDMA_CONFIG_REG");
+	mvEthRegPrint(ETH_RX_MINIMAL_FRAME_SIZE_REG(port), "ETH_RX_MINIMAL_FRAME_SIZE_REG");
+	mvEthRegPrint(ETH_INTR_CAUSE_REG(port), "ETH_INTR_CAUSE_REG");
+	mvEthRegPrint(ETH_INTR_CAUSE_EXT_REG(port), "ETH_INTR_CAUSE_EXT_REG");
+	mvEthRegPrint(ETH_INTR_MASK_REG(port), "ETH_INTR_MASK_REG");
+	mvEthRegPrint(ETH_INTR_MASK_EXT_REG(port), "ETH_INTR_MASK_EXT_REG");
+
+	mvEthRegPrint(ETH_RX_QUEUE_COMMAND_REG(port), "ETH_RX_QUEUE_COMMAND_REG");
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++)
+		mvEthRegPrint(ETH_TX_QUEUE_COMMAND_REG(port, txp), "ETH_TX_QUEUE_COMMAND_REG");
+}
+
+/* Print Giga Ethernet UNIT registers */
+void mvEthRegs(int port)
+{
+	int win;
+	MV_U32	regValue;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	mvEthRegPrint(ETH_PHY_ADDR_REG(port), "ETH_PHY_ADDR_REG");
+	mvEthRegPrint(ETH_UNIT_INTR_CAUSE_REG(port), "ETH_UNIT_INTR_CAUSE_REG");
+	mvEthRegPrint(ETH_UNIT_INTR_MASK_REG(port), "ETH_UNIT_INTR_MASK_REG");
+	mvEthRegPrint(ETH_UNIT_ERROR_ADDR_REG(port), "ETH_UNIT_ERROR_ADDR_REG");
+	mvEthRegPrint(ETH_UNIT_INT_ADDR_ERROR_REG(port), "ETH_UNIT_INT_ADDR_ERROR_REG");
+	mvEthRegPrint(ETH_BASE_ADDR_ENABLE_REG(port), "ETH_BASE_ADDR_ENABLE_REG");
+	mvEthRegPrint(ETH_ACCESS_PROTECT_REG(port), "ETH_ACCESS_PROTECT_REG");
+
+	regValue = MV_REG_READ(ETH_BASE_ADDR_ENABLE_REG(port));
+	for (win = 0; win < ETH_MAX_DECODE_WIN; win++) {
+		if (regValue & (1 << win))
+			continue; /* window is disable */
+		mvOsPrintf("win[%d]\n", win);
+		mvEthRegPrint(ETH_WIN_BASE_REG(port, win), "ETH_WIN_BASE_REG");
+		mvEthRegPrint(ETH_WIN_SIZE_REG(port, win), "ETH_WIN_SIZE_REG");
+	}
+}
+
+#ifdef MV_ETH_GMAC_NEW
+void    mvNetaGmacRegs(int port)
+{
+	if (mvNetaPortCheck(port))
+		return;
+
+	if (MV_PON_PORT(port)) {
+		mvOsPrintf("Not supported for PON port #%d \n", port);
+		return;
+	}
+	mvEthRegPrint(ETH_PORT_STATUS_REG(port),        "ETH_PORT_STATUS_REG");
+	mvEthRegPrint(ETH_PORT_SERIAL_CTRL_REG(port),   "ETH_PORT_SERIAL_CTRL_REG");
+	mvEthRegPrint(NETA_GMAC_CTRL_0_REG(port),       "NETA_GMAC_CTRL_0_REG");
+	mvEthRegPrint(NETA_GMAC_CTRL_1_REG(port),       "NETA_GMAC_CTRL_1_REG");
+	mvEthRegPrint(NETA_GMAC_CTRL_2_REG(port),       "NETA_GMAC_CTRL_2_REG");
+	mvEthRegPrint(NETA_GMAC_AN_CTRL_REG(port),      "NETA_GMAC_AN_CTRL_REG");
+	mvEthRegPrint(NETA_GMAC_STATUS_REG(port),       "NETA_GMAC_STATUS_REG");
+	mvEthRegPrint(NETA_GMAC_SERIAL_REG(port),       "NETA_GMAC_SERIAL_REG");
+	mvEthRegPrint(NETA_GMAC_FIFO_PARAM_0_REG(port), "NETA_GMAC_FIFO_PARAM_0_REG");
+	mvEthRegPrint(NETA_GMAC_FIFO_PARAM_1_REG(port), "NETA_GMAC_FIFO_PARAM_1_REG");
+	mvEthRegPrint(NETA_GMAC_CAUSE_REG(port),        "NETA_GMAC_CAUSE_REG");
+	mvEthRegPrint(NETA_GMAC_MASK_REG(port),         "NETA_GMAC_MASK_REG");
+	mvEthRegPrint(NETA_GMAC_MIB_CTRL_REG(port),     "NETA_GMAC_MIB_CTRL_REG");
+}
+#endif /* MV_ETH_GMAC_NEW */
+
+void mvNetaRxqRegs(int port, int rxq)
+{
+	if (mvNetaPortCheck(port))
+		return;
+
+	if (mvNetaMaxCheck(rxq, MV_ETH_MAX_RXQ, "rxq"))
+		return;
+
+	mvOsPrintf("\n[NetA Rx: port=%d, rxq=%d]\n", port, rxq);
+	mvEthRegPrint(ETH_RX_QUEUE_COMMAND_REG(port), "ETH_RX_QUEUE_COMMAND_REG");
+	mvEthRegPrint(NETA_RXQ_CONFIG_REG(port, rxq), "NETA_RXQ_CONFIG_REG");
+	mvEthRegPrint(NETA_RXQ_INTR_TIME_COAL_REG(port, rxq), "NETA_RXQ_INTR_TIME_COAL_REG");
+	mvEthRegPrint(NETA_RXQ_BASE_ADDR_REG(port, rxq), "NETA_RXQ_BASE_ADDR_REG");
+	mvEthRegPrint(NETA_RXQ_SIZE_REG(port, rxq), "NETA_RXQ_SIZE_REG");
+	mvEthRegPrint(NETA_RXQ_THRESHOLD_REG(port, rxq), "NETA_RXQ_THRESHOLD_REG");
+	mvEthRegPrint(NETA_RXQ_STATUS_REG(port, rxq), "NETA_RXQ_STATUS_REG");
+	mvEthRegPrint(NETA_RXQ_INDEX_REG(port, rxq), "NETA_RXQ_INDEX_REG");
+}
+
+void mvNetaTxqRegs(int port, int txp, int txq)
+{
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return;
+
+	mvOsPrintf("\n[NetA Tx: port=%d, txp=%d, txq=%d]\n", port, txp, txq);
+	mvEthRegPrint(NETA_TXQ_BASE_ADDR_REG(port, txp, txq), "NETA_TXQ_BASE_ADDR_REG");
+	mvEthRegPrint(NETA_TXQ_SIZE_REG(port, txp, txq), "NETA_TXQ_SIZE_REG");
+	mvEthRegPrint(NETA_TXQ_STATUS_REG(port, txp, txq), "NETA_TXQ_STATUS_REG");
+	mvEthRegPrint(NETA_TXQ_INDEX_REG(port, txp, txq), "NETA_TXQ_INDEX_REG");
+	mvEthRegPrint(NETA_TXQ_SENT_DESC_REG(port, txp, txq), "NETA_TXQ_SENT_DESC_REG");
+}
+
+void mvNetaTxpRegs(int port, int txp)
+{
+	int queue;
+
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	mvOsPrintf("\n[NetA Tx: port=%d, txp=%d]\n", port, txp);
+	mvEthRegPrint(ETH_TX_QUEUE_COMMAND_REG(port, txp), "ETH_TX_QUEUE_COMMAND_REG");
+	for (queue = 0; queue < CONFIG_MV_ETH_TXQ; queue++)
+		mvNetaTxqRegs(port, txp, queue);
+}
+
+#ifdef CONFIG_MV_ETH_PNC
+void mvNetaPncRegs(void)
+{
+	mvEthRegPrint(MV_PNC_LOOP_CTRL_REG, "PNC_LOOP_CTRL_REG");
+	mvEthRegPrint(MV_PNC_TCAM_CTRL_REG, "PNC_TCAM_CTRL_REG");
+	mvEthRegPrint(MV_PNC_INIT_OFFS_REG, "PNC_INIT_OFFS_REG");
+	mvEthRegPrint(MV_PNC_INIT_LOOKUP_REG, "PNC_INIT_LOOKUP_REG");
+	mvEthRegPrint(MV_PNC_CAUSE_REG, "PNC_CAUSE_REG");
+	mvEthRegPrint(MV_PNC_MASK_REG, "PNC_MASK_REG");
+	mvEthRegPrint(MV_PNC_HIT_SEQ0_REG, "PNC_HIT_SEQ0_REG");
+	mvEthRegPrint(MV_PNC_HIT_SEQ1_REG, "PNC_HIT_SEQ1_REG");
+	mvEthRegPrint(MV_PNC_HIT_SEQ2_REG, "PNC_HIT_SEQ2_REG");
+	mvEthRegPrint(MV_PNC_XBAR_RET_REG, "PNC_XBAR_RET_REG");
+
+#ifdef MV_ETH_PNC_AGING
+	{
+		int     i;
+
+		mvEthRegPrint(MV_PNC_AGING_CTRL_REG,  "PNC_AGING_CTRL_REG");
+		mvEthRegPrint(MV_PNC_AGING_HI_THRESH_REG,  "PNC_AGING_HI_THRESH_REG");
+		mvOsPrintf("\n");
+		for (i = 0; i < MV_PNC_AGING_MAX_GROUP; i++)
+			mvEthRegPrint2(MV_PNC_AGING_LO_THRESH_REG(i), "PNC_AGING_LO_THRESH_REG", i);
+	}
+#endif /* MV_ETH_PNC_AGING */
+
+#ifdef MV_ETH_PNC_LB
+	mvEthRegPrint(MV_PNC_LB_CRC_INIT_REG, "PNC_LB_CRC_INIT_REG");
+#endif /* MV_ETH_PNC_LB */
+}
+#endif /* CONFIG_MV_ETH_PNC */
+
+#ifdef CONFIG_MV_ETH_PMT
+void mvNetaPmtRegs(int port, int txp)
+{
+	int i;
+
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	mvOsPrintf("\n[NetA PMT registers: port=%d, txp=%d]\n", port, txp);
+
+#ifdef MV_ETH_PMT_NEW
+	mvEthRegPrint(NETA_TX_PMT_ACCESS_REG(port), "NETA_TX_PMT_ACCESS_REG");
+	mvEthRegPrint(NETA_TX_PMT_FIFO_THRESH_REG(port), "NETA_TX_PMT_FIFO_THRESH_REG");
+	mvEthRegPrint(NETA_TX_PMT_MTU_REG(port), "NETA_TX_PMT_MTU_REG");
+
+	mvOsPrintf("\n");
+	for (i = 0; i < NETA_TX_PMT_MAX_ETHER_TYPES; i++)
+		mvEthRegPrint2(NETA_TX_PMT_ETHER_TYPE_REG(port, i), "NETA_TX_PMT_ETHER_TYPE_REG", i);
+
+	mvOsPrintf("\n");
+	mvEthRegPrint(NETA_TX_PMT_DEF_VLAN_CFG_REG(port), "NETA_TX_PMT_DEF_VLAN_CFG_REG");
+	mvEthRegPrint(NETA_TX_PMT_DEF_DSA_1_CFG_REG(port), "NETA_TX_PMT_DEF_DSA_1_CFG_REG");
+	mvEthRegPrint(NETA_TX_PMT_DEF_DSA_2_CFG_REG(port), "NETA_TX_PMT_DEF_DSA_2_CFG_REG");
+	mvEthRegPrint(NETA_TX_PMT_DEF_DSA_SRC_DEV_REG(port), "NETA_TX_PMT_DEF_DSA_SRC_DEV_REG");
+
+	mvEthRegPrint(NETA_TX_PMT_TTL_ZERO_CNTR_REG(port), "NETA_TX_PMT_TTL_ZERO_CNTR_REG");
+	mvEthRegPrint(NETA_TX_PMT_TTL_ZERO_CNTR_REG(port), "NETA_TX_PMT_TTL_ZERO_CNTR_REG");
+
+	mvOsPrintf("\n");
+	mvEthRegPrint(NETA_TX_PMT_PPPOE_TYPE_REG(port), "NETA_TX_PMT_PPPOE_TYPE_REG");
+	mvEthRegPrint(NETA_TX_PMT_PPPOE_DATA_REG(port), "NETA_TX_PMT_PPPOE_DATA_REG");
+	mvEthRegPrint(NETA_TX_PMT_PPPOE_LEN_REG(port), "NETA_TX_PMT_PPPOE_LEN_REG");
+	mvEthRegPrint(NETA_TX_PMT_PPPOE_PROTO_REG(port), "NETA_TX_PMT_PPPOE_PROTO_REG");
+	mvOsPrintf("\n");
+
+	mvEthRegPrint(NETA_TX_PMT_CONFIG_REG(port), "NETA_TX_PMT_CONFIG_REG");
+	mvEthRegPrint(NETA_TX_PMT_STATUS_1_REG(port), "NETA_TX_PMT_STATUS_1_REG");
+	mvEthRegPrint(NETA_TX_PMT_STATUS_2_REG(port), "NETA_TX_PMT_STATUS_2_REG");
+#else
+	for (i = 0; i < NETA_TX_MAX_MH_REGS; i++)
+		mvEthRegPrint2(NETA_TX_MH_REG(port, txp, i), "NETA_TX_MH_REG", i);
+
+	mvEthRegPrint(NETA_TX_DSA_SRC_DEV_REG(port, txp), "NETA_TX_DSA_SRC_DEV_REG");
+
+	for (i = 0; i < NETA_TX_MAX_ETH_TYPE_REGS; i++)
+		mvEthRegPrint2(NETA_TX_ETH_TYPE_REG(port, txp, i), "NETA_TX_ETH_TYPE_REG", i);
+#endif /* MV_ETH_PMT_NEW */
+}
+#endif /* CONFIG_MV_ETH_PMT */
+
+void mvNetaPortRegs(int port)
+{
+	int i;
+	MV_NETA_PORT_CTRL *pPortCtrl = mvNetaPortHndlGet(port);
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	/* Per CPU register */
+	for (i = 0; i < NETA_MAX_CPU_REGS; i++)
+		mvEthRegPrint2(NETA_CPU_MAP_REG(port, i), "NETA_CPU_MAP_REG", i);
+
+	/* Per BM pool registers */
+	for (i = 0; i < MV_BM_POOLS; i++)
+		mvEthRegPrint2(NETA_POOL_BUF_SIZE_REG(port, i), "NETA_POOL_BUF_SIZE_REG", i);
+
+	/* Per port registers */
+	mvEthRegPrint(NETA_VERSION_REG(port), "NETA_VERSION_REG");
+	mvEthRegPrint(NETA_PORT_RX_RESET_REG(port), "NETA_PORT_RX_RESET_REG");
+	for (i = 0; i < pPortCtrl->txpNum; i++)
+		mvEthRegPrint(NETA_PORT_TX_RESET_REG(port, i), "NETA_PORT_TX_RESET_REG");
+	mvEthRegPrint(NETA_BM_ADDR_REG(port), "NETA_BM_ADDR_REG");
+	mvEthRegPrint(NETA_ACC_MODE_REG(port), "NETA_ACC_MODE_REG");
+	mvEthRegPrint(NETA_INTR_NEW_CAUSE_REG(port), "NETA_INTR_NEW_CAUSE_REG");
+	mvEthRegPrint(NETA_INTR_NEW_MASK_REG(port), "NETA_INTR_NEW_MASK_REG");
+	mvEthRegPrint(NETA_INTR_MISC_CAUSE_REG(port), "NETA_INTR_MISC_CAUSE_REG");
+	mvEthRegPrint(NETA_INTR_MISC_MASK_REG(port), "NETA_INTR_MISC_MASK_REG");
+	mvEthRegPrint(NETA_INTR_ENABLE_REG(port), "NETA_INTR_ENABLE_REG");
+}
+
+/* Print status of Ethernet port */
+void mvNetaPortStatus(int port)
+{
+	MV_ETH_PORT_STATUS	link;
+	MV_NETA_PORT_CTRL 	*pPortCtrl;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pPortCtrl = mvNetaPortHndlGet(port);
+	mvOsPrintf("\n[Link: port=%d, ctrl=%p]\n", port, pPortCtrl);
+	if (!pPortCtrl)
+		return;
+
+	if (MV_PON_PORT(port)) {
+		mvOsPrintf("GPON port %d link is up\n", port);
+	} else {
+
+		mvNetaLinkStatus(port, &link);
+
+		if (link.linkup) {
+			mvOsPrintf("link up");
+			mvOsPrintf(", %s duplex", (link.duplex == MV_ETH_DUPLEX_FULL) ? "full" : "half");
+			mvOsPrintf(", speed ");
+
+			if (link.speed == MV_ETH_SPEED_1000)
+				mvOsPrintf("1 Gbps\n");
+			else if (link.speed == MV_ETH_SPEED_100)
+				mvOsPrintf("100 Mbps\n");
+			else
+				mvOsPrintf("10 Mbps\n");
+
+			mvOsPrintf("rxFC - %s, txFC - %s\n",
+				(link.rxFc == MV_ETH_FC_DISABLE) ? "disabled" : "enabled",
+				(link.txFc == MV_ETH_FC_DISABLE) ? "disabled" : "enabled");
+		} else
+			mvOsPrintf("link down\n");
+	}
+#ifndef CONFIG_MV_ETH_PNC
+	MV_U32	regValue = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+	mvOsPrintf("default queue: rx=%d, arp=%d, bpdu=%d, tcp=%d, udp=%d\n",
+	   (regValue & ETH_DEF_RX_QUEUE_ALL_MASK) >> ETH_DEF_RX_QUEUE_OFFSET,
+	   (regValue & ETH_DEF_RX_ARP_QUEUE_ALL_MASK) >> ETH_DEF_RX_ARP_QUEUE_OFFSET,
+	   (regValue & ETH_DEF_RX_BPDU_QUEUE_ALL_MASK) >> ETH_DEF_RX_BPDU_QUEUE_OFFSET,
+	   (regValue & ETH_DEF_RX_TCP_QUEUE_ALL_MASK) >> ETH_DEF_RX_TCP_QUEUE_OFFSET,
+	   (regValue & ETH_DEF_RX_UDP_QUEUE_ALL_MASK) >> ETH_DEF_RX_UDP_QUEUE_OFFSET);
+#else /* CONFIG_MV_ETH_PNC */
+	if (!MV_NETA_PNC_CAP()) {
+		MV_U32	regValue = MV_REG_READ(ETH_PORT_CONFIG_REG(port));
+
+		mvOsPrintf("default queue: rx=%d, arp=%d, bpdu=%d, tcp=%d, udp=%d\n",
+		   (regValue & ETH_DEF_RX_QUEUE_ALL_MASK) >> ETH_DEF_RX_QUEUE_OFFSET,
+		   (regValue & ETH_DEF_RX_ARP_QUEUE_ALL_MASK) >> ETH_DEF_RX_ARP_QUEUE_OFFSET,
+		   (regValue & ETH_DEF_RX_BPDU_QUEUE_ALL_MASK) >> ETH_DEF_RX_BPDU_QUEUE_OFFSET,
+		   (regValue & ETH_DEF_RX_TCP_QUEUE_ALL_MASK) >> ETH_DEF_RX_TCP_QUEUE_OFFSET,
+		   (regValue & ETH_DEF_RX_UDP_QUEUE_ALL_MASK) >> ETH_DEF_RX_UDP_QUEUE_OFFSET);
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+void mvNetaRxqShow(int port, int rxq, int mode)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl;
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	if (mvNetaMaxCheck(rxq, MV_ETH_MAX_RXQ, "rxq"))
+		return;
+
+	pPortCtrl = mvNetaPortHndlGet(port);
+	if (!pPortCtrl)
+		return;
+
+	pQueueCtrl = &pPortCtrl->pRxQueue[rxq].queueCtrl;
+	mvOsPrintf("\n[NetA RxQ: port=%d, rxq=%d]\n", port, rxq);
+
+	if (!pQueueCtrl->pFirst) {
+		mvOsPrintf("rx queue %d wasn't created\n", rxq);
+		return;
+	}
+
+	mvOsPrintf("intr_coal: %d [pkts] or %d [usec]\n",
+		mvNetaRxqPktsCoalGet(port, rxq), mvNetaRxqTimeCoalGet(port, rxq));
+
+	mvOsPrintf("pFirst=%p (0x%x), numOfDescr=%d\n",
+		   pQueueCtrl->pFirst,
+		   (MV_U32) netaDescVirtToPhys(pQueueCtrl, (MV_U8 *) pQueueCtrl->pFirst), pQueueCtrl->lastDesc + 1);
+
+	mvOsPrintf("nextToProc=%d (%p), rxqOccupied=%d, rxqNonOccupied=%d\n",
+		   pQueueCtrl->nextToProc,
+		   MV_NETA_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+		   mvNetaRxqBusyDescNumGet(port, rxq), mvNetaRxqFreeDescNumGet(port, rxq));
+
+	if (mode > 0) {
+		int i;
+		NETA_RX_DESC *pRxDesc;
+
+		for (i = 0; i <= pQueueCtrl->lastDesc; i++) {
+			pRxDesc = (NETA_RX_DESC *) MV_NETA_QUEUE_DESC_PTR(pQueueCtrl, i);
+
+			mvOsPrintf("%3d. desc=%p, status=%08x, data=%4d, bufAddr=%08x, bufCookie=%08x\n",
+				   i, pRxDesc, pRxDesc->status,
+				   pRxDesc->dataSize, (MV_U32) pRxDesc->bufPhysAddr, (MV_U32) pRxDesc->bufCookie);
+
+			mvOsCacheLineInv(NULL, pRxDesc);
+		}
+	}
+}
+
+void mvNetaTxqShow(int port, int txp, int txq, int mode)
+{
+	MV_NETA_PORT_CTRL *pPortCtrl;
+	MV_NETA_TXQ_CTRL *pTxqCtrl;
+	MV_NETA_QUEUE_CTRL *pQueueCtrl;
+
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	pPortCtrl = mvNetaPortHndlGet(port);
+	if (!pPortCtrl)
+		return;
+
+	if (mvNetaMaxCheck(txq, MV_ETH_MAX_TXQ, "txq"))
+		return;
+
+	mvOsPrintf("\n[NetA TxQ: port=%d, txp=%d, txq=%d]\n", port, txp, txq);
+
+	pTxqCtrl = mvNetaTxqHndlGet(port, txp, txq);
+	pQueueCtrl = &pTxqCtrl->queueCtrl;
+
+	if (!pQueueCtrl->pFirst) {
+		mvOsPrintf("tx queue %d wasn't created\n", txq);
+		return;
+	}
+
+	mvOsPrintf("pFirst=%p (0x%x), numOfDescr=%d\n",
+		   pQueueCtrl->pFirst,
+		   (MV_U32) netaDescVirtToPhys(pQueueCtrl, (MV_U8 *) pQueueCtrl->pFirst), pQueueCtrl->lastDesc + 1);
+
+	mvOsPrintf("nextToProc=%d (%p), txqSent=%d, txqPending=%d\n",
+		   pQueueCtrl->nextToProc,
+		   MV_NETA_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+		   mvNetaTxqSentDescNumGet(port, txp, txq), mvNetaTxqPendDescNumGet(port, txp, txq));
+
+	if (mode > 0) {
+		int i;
+		NETA_TX_DESC *pTxDesc;
+
+		for (i = 0; i <= pQueueCtrl->lastDesc; i++) {
+			pTxDesc = (NETA_TX_DESC *) MV_NETA_QUEUE_DESC_PTR(pQueueCtrl, i);
+
+			mvOsPrintf("%3d. pTxDesc=%p, cmd=%08x, data=%4d, bufAddr=%08x, gponinfo=%x\n",
+				   i, pTxDesc, pTxDesc->command, pTxDesc->dataSize,
+				   (MV_U32) pTxDesc->bufPhysAddr, pTxDesc->hw_cmd);
+
+			mvOsCacheLineInv(NULL, pTxDesc);
+		}
+	}
+}
+
+/* Print counters of the Ethernet port */
+void mvEthPortCounters(int port, int mib)
+{
+#ifndef MV_PON_MIB_SUPPORT
+	if (MV_PON_PORT(port)) {
+		mvOsPrintf("%s: not supported for PON port\n", __func__);
+		return;
+	}
+#endif /* !MV_PON_MIB_SUPPORT */
+
+	if (mvNetaTxpCheck(port, mib))
+		return;
+
+	if (!mvNetaPortHndlGet(port))
+		return;
+
+	mvOsPrintf("\nMIBs: port=%d, mib=%d\n", port, mib);
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(port)) {
+		int	i;
+
+		for (i = 0; i < NETA_PON_MIB_MAX_GEM_PID; i++)
+			mvEthRegPrint2(NETA_PON_MIB_RX_CTRL_REG(i), "NETA_PON_MIB_RX_CTRL_REG", i);
+
+		mvEthRegPrint(NETA_PON_MIB_RX_DEF_REG, "NETA_PON_MIB_RX_DEF_REG");
+	}
+#endif /* CONFIG_MV_PON */
+
+	mvOsPrintf("\n[Rx]\n");
+	mvEthMibPrint(port, mib, ETH_MIB_GOOD_FRAMES_RECEIVED, "GOOD_FRAMES_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_BAD_FRAMES_RECEIVED, "BAD_FRAMES_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_BROADCAST_FRAMES_RECEIVED, "BROADCAST_FRAMES_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_MULTICAST_FRAMES_RECEIVED, "MULTICAST_FRAMES_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW, "GOOD_OCTETS_RECEIVED");
+	mvOsPrintf("\n[Rx Errors]\n");
+	mvEthMibPrint(port, mib, ETH_MIB_BAD_OCTETS_RECEIVED, "BAD_OCTETS_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_UNDERSIZE_RECEIVED, "UNDERSIZE_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAGMENTS_RECEIVED, "FRAGMENTS_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_OVERSIZE_RECEIVED, "OVERSIZE_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_JABBER_RECEIVED, "JABBER_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_MAC_RECEIVE_ERROR, "MAC_RECEIVE_ERROR");
+	mvEthMibPrint(port, mib, ETH_MIB_BAD_CRC_EVENT, "BAD_CRC_EVENT");
+	mvEthRegPrint0(ETH_RX_DISCARD_PKTS_CNTR_REG(port), "RX_DISCARD_PKTS_CNTR_REG");
+	mvEthRegPrint0(ETH_RX_OVERRUN_PKTS_CNTR_REG(port), "RX_OVERRUN_PKTS_CNTR_REG");
+	mvOsPrintf("\n[Tx]\n");
+	mvEthMibPrint(port, mib, ETH_MIB_GOOD_FRAMES_SENT, "GOOD_FRAMES_SENT");
+	mvEthMibPrint(port, mib, ETH_MIB_BROADCAST_FRAMES_SENT, "BROADCAST_FRAMES_SENT");
+	mvEthMibPrint(port, mib, ETH_MIB_MULTICAST_FRAMES_SENT, "MULTICAST_FRAMES_SENT");
+	mvEthMibPrint(port, mib, ETH_MIB_GOOD_OCTETS_SENT_LOW, "GOOD_OCTETS_SENT");
+	mvOsPrintf("\n[Tx Errors]\n");
+	mvEthMibPrint(port, mib, ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR, "INTERNAL_MAC_TRANSMIT_ERR");
+	mvEthMibPrint(port, mib, ETH_MIB_EXCESSIVE_COLLISION, "EXCESSIVE_COLLISION");
+	mvEthMibPrint(port, mib, ETH_MIB_COLLISION, "COLLISION");
+	mvEthMibPrint(port, mib, ETH_MIB_LATE_COLLISION, "LATE_COLLISION");
+#ifdef MV_ETH_PMT_NEW
+	mvEthRegPrint0(NETA_TX_BAD_FCS_CNTR_REG(port, mib), "NETA_TX_BAD_FCS_CNTR_REG");
+	mvEthRegPrint0(NETA_TX_DROP_CNTR_REG(port, mib), "NETA_TX_DROP_CNTR_REG");
+#endif
+	mvOsPrintf("\n[FC control]\n");
+	mvEthMibPrint(port, mib, ETH_MIB_UNREC_MAC_CONTROL_RECEIVED, "UNREC_MAC_CONTROL_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_GOOD_FC_RECEIVED, "GOOD_FC_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_BAD_FC_RECEIVED, "BAD_FC_RECEIVED");
+	mvEthMibPrint(port, mib, ETH_MIB_FC_SENT, "FC_SENT");
+	mvOsPrintf("\n");
+}
+
+/* Print RMON counters of the Ethernet port */
+void mvEthPortRmonCounters(int port, int mib)
+{
+	void	*pHndl;
+
+	if (mvNetaTxpCheck(port, mib))
+		return;
+
+	pHndl = mvNetaPortHndlGet(port);
+	if (!pHndl)
+		return;
+
+	mvOsPrintf("\n[RMON]: port=%d, mib=%d\n", port, mib);
+
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_64_OCTETS, "0...64");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_65_TO_127_OCTETS, "65...127");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_128_TO_255_OCTETS, "128...255");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_256_TO_511_OCTETS, "256...511");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_512_TO_1023_OCTETS, "512...1023");
+	mvEthMibPrint(port, mib, ETH_MIB_FRAMES_1024_TO_MAX_OCTETS, "1024...Max");
+}
+
+void mvEthPortUcastShow(int port)
+{
+	MV_U32 unicastReg, macL, macH;
+	int i, j;
+
+	macL = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(port));
+	macH = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(port));
+
+	mvOsPrintf("\nUnicast MAC Table: port=%d %02x:%02x:%02x:%02x:%02x:%02x\n",
+		   port, ((macH >> 24) & 0xff), ((macH >> 16) & 0xff),
+		   ((macH >> 8) & 0xff), (macH & 0xff), ((macL >> 8) & 0xff), (macL & 0xff));
+
+	for (i = 0; i < 4; i++) {
+		unicastReg = MV_REG_READ((ETH_DA_FILTER_UCAST_BASE(port) + i * 4));
+		for (j = 0; j < 4; j++) {
+			MV_U8 macEntry = (unicastReg >> (8 * j)) & 0xFF;
+			mvOsPrintf("%X: %8s, Q = %d\n", i * 4 + j,
+				   (macEntry & BIT0) ? "accept" : "reject", (macEntry >> 1) & 0x7);
+		}
+	}
+}
+
+void mvEthPortMcastShow(int port)
+{
+	int tblIdx, regIdx;
+	MV_U32 regVal;
+	MV_NETA_PORT_CTRL *pPortCtrl;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pPortCtrl = mvNetaPortHndlGet(port);
+	if (!pPortCtrl)
+		return;
+
+	mvOsPrintf("\nSpecial (IP) Multicast Table port=%d: 01:00:5E:00:00:XX\n", port);
+
+	for (tblIdx = 0; tblIdx < (256 / 4); tblIdx++) {
+		regVal = MV_REG_READ((ETH_DA_FILTER_SPEC_MCAST_BASE(port) + tblIdx * 4));
+		for (regIdx = 0; regIdx < 4; regIdx++) {
+			if ((regVal & (0x01 << (regIdx * 8))) != 0) {
+				mvOsPrintf("0x%02X: accepted, rxQ = %d\n",
+					   tblIdx * 4 + regIdx, ((regVal >> (regIdx * 8 + 1)) & 0x07));
+			}
+		}
+	}
+
+	mvOsPrintf("\nOther Multicast Table: port=%d\n", port);
+	for (tblIdx = 0; tblIdx < (256 / 4); tblIdx++) {
+		regVal = MV_REG_READ((ETH_DA_FILTER_OTH_MCAST_BASE(port) + tblIdx * 4));
+		for (regIdx = 0; regIdx < 4; regIdx++) {
+			if ((regVal & (0x01 << (regIdx * 8))) != 0) {
+				mvOsPrintf("crc8=0x%02X: accepted, rxq = %d, ref=%d\n",
+					   tblIdx * 4 + regIdx, ((regVal >> (regIdx * 8 + 1)) & 0x07),
+					   pPortCtrl->mcastCount[tblIdx * 4 + regIdx]);
+			}
+		}
+	}
+}
+
+#ifdef CONFIG_MV_ETH_HWF
+void mvNetaHwfTxpCntrs(int port, int p, int txp)
+{
+	int txq;
+	MV_U32 regVal;
+
+	if (mvNetaPortCheck(port) || mvNetaPortCheck(p))
+		return;
+
+	if (mvNetaTxpCheck(p, txp))
+		return;
+
+	mvOsPrintf("\n[HWF Counters: port=%d]\n", port);
+
+	for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+		mvOsPrintf("\n[HWF: hwf_txp=%d, txq=%d]\n", p + txp, txq);
+
+		regVal = NETA_HWF_TX_PORT_MASK(p + txp) | NETA_HWF_TXQ_MASK(txq);
+		MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+
+		mvEthRegPrint(NETA_HWF_ACCEPTED_CNTR(port), "NETA_HWF_ACCEPTED_CNTR");
+		mvEthRegPrint(NETA_HWF_YELLOW_DROP_CNTR(port), "NETA_HWF_YELLOW_DROP_CNTR");
+		mvEthRegPrint(NETA_HWF_GREEN_DROP_CNTR(port), "NETA_HWF_GREEN_DROP_CNTR");
+		mvEthRegPrint(NETA_HWF_THRESH_DROP_CNTR(port), "NETA_HWF_THRESH_DROP_CNTR");
+	}
+}
+
+void mvNetaHwfRxpRegs(int port)
+{
+	int txpNum, txp;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	mvOsPrintf("\n[HWF Config: port=%d]\n", port);
+	mvEthRegPrint(NETA_HWF_RX_CTRL_REG(port), "NETA_HWF_RX_CTRL_REG");
+	mvEthRegPrint(NETA_HWF_RX_THRESH_REG(port), "NETA_HWF_RX_THRESH_REG");
+
+	/* Calculate total number of txp */
+	txpNum = 2;
+
+#ifdef CONFIG_MV_PON
+	txpNum += MV_ETH_MAX_TCONT();
+#endif /* CONFIG_MV_PON */
+
+	for (txp = 0; txp < txpNum; txp += 2)
+		mvEthRegPrint2(NETA_HWF_TXP_CFG_REG(port, txp), "NETA_HWF_TXP_CFG_REG", txp);
+}
+
+void mvNetaHwfTxpRegs(int port, int p, int txp)
+{
+	int txq;
+	MV_U32 regVal;
+
+	if (mvNetaPortCheck(port) || mvNetaPortCheck(p))
+		return;
+
+	if (mvNetaTxpCheck(p, txp))
+		return;
+
+	for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+		mvOsPrintf("\n[HWF: hwf_txp=%d, txq=%d]\n", p + txp, txq);
+		regVal = NETA_HWF_TX_PORT_MASK(p + txp) | NETA_HWF_TXQ_MASK(txq);
+		MV_REG_WRITE(NETA_HWF_TX_PTR_REG(port), regVal);
+
+		mvEthRegPrint(NETA_HWF_DROP_TH_REG(port), "NETA_HWF_DROP_TH_REG");
+		mvEthRegPrint(NETA_HWF_TXQ_BASE_REG(port), "NETA_HWF_TXQ_BASE_REG");
+		mvEthRegPrint(NETA_HWF_TXQ_SIZE_REG(port), "NETA_HWF_TXQ_SIZE_REG");
+		mvEthRegPrint(NETA_HWF_TXQ_ENABLE_REG(port), "NETA_HWF_TXQ_ENABLE_REG");
+	}
+}
+#endif /* CONFIG_MV_ETH_HWF */
+
+void mvNetaCpuDump(int port, int cpu, int rxTx)
+{
+	MV_U32 regVal = MV_REG_READ(NETA_CPU_MAP_REG(port, cpu));
+	int j;
+	static const char  *qType[] = {"RXQ", "TXQ"};
+
+	if (rxTx > 1 || rxTx < 0) {
+		mvOsPrintf("Error - invalid queue type %d , valid values are 0 for TXQ or 1 for RXQ\n", rxTx);
+		return;
+	}
+
+	if (rxTx == 1)
+		regVal >>= 8;
+
+	for (j = 0; j < CONFIG_MV_ETH_RXQ; j++) {
+		if (regVal & 1)
+			mvOsPrintf("%s-%d ", qType[rxTx], j);
+		else
+			mvOsPrintf("       ");
+
+		regVal >>= 1;
+	}
+	mvOsPrintf("\n");
+}
+
+#ifdef CONFIG_MV_PON
+void mvNetaPonTxpRegs(int port, int txp)
+{
+	int txq;
+
+	if (mvNetaTxpCheck(port, txp))
+		return;
+
+	mvOsPrintf("\n[NetA PON TXQ Bytes registers: port=%d, txp=%d]\n", port, txp);
+	for (txq = 0; txq < MV_ETH_MAX_TXQ; txq++)
+		mvEthRegPrint2(NETA_TXQ_NEW_BYTES_REG(port, txp, txq), "NETA_TXQ_NEW_BYTES_REG", txq);
+}
+#endif /* CONFIG_MV_PON */
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaRegs.h b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaRegs.h
new file mode 100644
index 000000000000..6262dc6bb8e2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/gbe/mvNetaRegs.h
@@ -0,0 +1,1256 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __mvNetaRegs_h__
+#define __mvNetaRegs_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvebu-soc-id.h"
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#ifdef CONFIG_OF
+extern int port_vbase[MV_ETH_MAX_PORTS];
+#define NETA_REG_BASE(port)					port_vbase[port]
+/******* SERDES registers **************************************************/
+#define SGMII_SERDES_CFG_REG(p)				(NETA_REG_BASE(p) + 0x24A0)
+#else
+#define NETA_REG_BASE(port) 				MV_ETH_REGS_BASE(port)
+#endif /* CONFIG_OF */
+
+/************************** NETA TX Registers ******************************/
+
+#ifdef CONFIG_MV_PON
+#define NETA_TX_REG_BASE(p, txp)  	(MV_PON_PORT(p) ? \
+					(MV_PON_REG_BASE + 0x4000 + ((txp) >> 1) * 0x2000 + ((txp) & 0x1) * 0x400) :  \
+					(NETA_REG_BASE(p) + 0x2400))
+#else
+#define NETA_TX_REG_BASE(p, txp)   	(NETA_REG_BASE(p) + 0x2400)
+#endif /* CONFIG_MV_PON */
+
+/************************** NETA RX Registers ******************************/
+
+/* PxRXyC: Port RX queues Configuration Register */
+#define NETA_RXQ_CONFIG_REG(p, q)           (NETA_REG_BASE(p) + 0x1400 + ((q) << 2))
+
+#define NETA_RXQ_HW_BUF_ALLOC_BIT           0
+#define NETA_RXQ_HW_BUF_ALLOC_MASK          (1 << NETA_RXQ_HW_BUF_ALLOC_BIT)
+
+#define NETA_RXQ_SHORT_POOL_ID_OFFS         4
+#define NETA_RXQ_SHORT_POOL_ID_MASK         (0x3 << NETA_RXQ_SHORT_POOL_ID_OFFS)
+
+#define NETA_RXQ_LONG_POOL_ID_OFFS          6
+#define NETA_RXQ_LONG_POOL_ID_MASK          (0x3 << NETA_RXQ_LONG_POOL_ID_OFFS)
+
+#define NETA_RXQ_PACKET_OFFSET_OFFS         8
+#define NETA_RXQ_PACKET_OFFSET_ALL_MASK     (0xF << NETA_RXQ_PACKET_OFFSET_OFFS)
+#define NETA_RXQ_PACKET_OFFSET_MASK(offs)   ((offs) << NETA_RXQ_PACKET_OFFSET_OFFS)
+
+
+#define NETA_RXQ_INTR_ENABLE_BIT            15
+#define NETA_RXQ_INTR_ENABLE_MASK           (0x1 << NETA_RXQ_INTR_ENABLE_BIT)
+
+/* ????? What about PREFETCH commands 1, 2, 3 */
+#define NETA_RXQ_PREFETCH_MODE_BIT          16
+#define NETA_RXQ_PREFETCH_PNC               (0 << NETA_RXQ_PREFETCH_MODE_BIT)
+#define NETA_RXQ_PREFETCH_CMD_0             (1 << NETA_RXQ_PREFETCH_MODE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_RXQ_SNOOP_REG(p, q)            (NETA_REG_BASE(p) + 0x1420 + ((q) << 2))
+
+#define NETA_RXQ_SNOOP_BYTES_OFFS           0
+#define NETA_RXQ_SNOOP_BYTES_MASK           (0x3FFF << NETA_RXQ_SNOOP_BYTES_OFFS)
+
+#define NETA_RXQ_L2_DEPOSIT_BYTES_OFFS      16
+#define NETA_RXQ_L2_DEPOSIT_BYTES_MASK      (0x3FFF << NETA_RXQ_L2_DEPOSIT_BYTES_OFFS)
+
+
+#define NETA_RXQ_PREFETCH_01_REG(p, q)      (NETA_REG_BASE(p) + 0x1440 + ((q) << 2))
+#define NETA_RXQ_PREFETCH_23_REG(p, q)      (NETA_REG_BASE(p) + 0x1460 + ((q) << 2))
+
+#define NETA_RXQ_PREFETCH_CMD_OFFS(cmd)     (((cmd) & 1) ? 16 : 0)
+#define NETA_RXQ_PREFETCH_CMD_MASK(cmd)     (0xFFFF << NETA_RXQ_PREFETCH_CMD_OFFS(cmd))
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_RXQ_BASE_ADDR_REG(p, q)        (NETA_REG_BASE(p) + 0x1480 + ((q) << 2))
+#define NETA_RXQ_SIZE_REG(p, q)             (NETA_REG_BASE(p) + 0x14A0 + ((q) << 2))
+
+#define NETA_RXQ_DESC_NUM_OFFS              0
+#define NETA_RXQ_DESC_NUM_MASK              (0x3FFF << NETA_RXQ_DESC_NUM_OFFS)
+
+#define NETA_RXQ_BUF_SIZE_OFFS              19
+#define NETA_RXQ_BUF_SIZE_MASK              (0x1FFF << NETA_RXQ_BUF_SIZE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_RXQ_THRESHOLD_REG(p, q)        (NETA_REG_BASE(p) + 0x14C0 + ((q) << 2))
+#define NETA_RXQ_STATUS_REG(p, q)           (NETA_REG_BASE(p) + 0x14E0 + ((q) << 2))
+
+#define NETA_RXQ_OCCUPIED_DESC_OFFS         0
+#define NETA_RXQ_OCCUPIED_DESC_ALL_MASK     (0x3FFF << NETA_RXQ_OCCUPIED_DESC_OFFS)
+#define NETA_RXQ_OCCUPIED_DESC_MASK(val)    ((val) << NETA_RXQ_OCCUPIED_DESC_OFFS)
+
+#define NETA_RXQ_NON_OCCUPIED_DESC_OFFS     16
+#define NETA_RXQ_NON_OCCUPIED_DESC_ALL_MASK (0x3FFF << NETA_RXQ_NON_OCCUPIED_DESC_OFFS)
+#define NETA_RXQ_NON_OCCUPIED_DESC_MASK(v)  ((v) << NETA_RXQ_NON_OCCUPIED_DESC_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_RXQ_STATUS_UPDATE_REG(p, q)    (NETA_REG_BASE(p) + 0x1500 + ((q) << 2))
+
+/* Decrement OCCUPIED Descriptors counter */
+#define NETA_RXQ_DEC_OCCUPIED_OFFS          0
+#define NETA_RXQ_DEC_OCCUPIED_MASK          (0xFF << NETA_RXQ_DEC_OCCUPIED_OFFS)
+
+/* Increment NON_OCCUPIED Descriptors counter */
+#define NETA_RXQ_ADD_NON_OCCUPIED_OFFS      16
+#define NETA_RXQ_ADD_NON_OCCUPIED_MASK      (0xFF << NETA_RXQ_ADD_NON_OCCUPIED_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Port RX queues Descriptors Index Register (a register per RX Queue) */
+#define NETA_RXQ_INDEX_REG(p, q)            (NETA_REG_BASE(p) + 0x1520 + ((q) << 2))
+
+#define NETA_RXQ_NEXT_DESC_INDEX_OFFS       0
+#define NETA_RXQ_NEXT_DESC_INDEX_MASK       (0x3FFF << NETA_RXQ_NEXT_DESC_INDEX_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Port Pool-N Buffer Size Register - 8 bytes alignment */
+#define NETA_POOL_BUF_SIZE_REG(p, pool)     (NETA_REG_BASE(p) + 0x1700 + ((pool) << 2))
+#define NETA_POOL_BUF_SIZE_ALIGN            8
+#define NETA_POOL_BUF_SIZE_OFFS             3
+#define NETA_POOL_BUF_SIZE_MASK             (0x1FFF << NETA_POOL_BUF_SIZE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Port RX Flow Control register */
+#define NETA_FLOW_CONTROL_REG(p)            (NETA_REG_BASE(p) + 0x1710)
+
+#define NETA_PRIO_PAUSE_PKT_GEN_BIT         0
+#define NETA_PRIO_PAUSE_PKT_GEN_GIGA        (0 << NETA_PRIO_PAUSE_PKT_GEN_BIT)
+#define NETA_PRIO_PAUSE_PKT_GEN_CPU         (1 << NETA_PRIO_PAUSE_PKT_GEN_BIT)
+
+#define NETA_PRIO_TX_PAUSE_BIT              1
+#define NETA_PRIO_TX_PAUSE_GIGA             (0 << NETA_PRIO_TX_PAUSE_BIT)
+#define NETA_PRIO_TX_PAUSE_CPU              (1 << NETA_PRIO_TX_PAUSE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/* Port TX pause control register */
+#define NETA_TX_PAUSE_REG(p)                (NETA_REG_BASE(p) + 0x1714)
+
+/* ????? One register for all TXQs - problem for multi-core */
+#define NETA_TXQ_PAUSE_ENABLE_OFFS          0
+#define NETA_TXQ_PAUSE_ENABLE_ALL_MASK      (0xFF << NETA_TXQ_PAUSE_ENABLE_OFFS)
+#define NETA_TXQ_PAUSE_ENABLE_MASK(q)       ((1 << q) << NETA_TXQ_PAUSE_ENABLE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Port Flow Control generation control register */
+#define NETA_FC_GEN_REG(p)                  (NETA_REG_BASE(p) + 0x1718)
+
+#define NETA_PAUSE_PKT_GEN_DATA_BIT         0
+#define NETA_PAUSE_PKT_GEN_DATA_OFF         (0 << NETA_PAUSE_PKT_GEN_DATA_BIT)
+#define NETA_PAUSE_PKT_GEN_DATA_ON          (1 << NETA_PAUSE_PKT_GEN_DATA_BIT)
+
+#define NETA_TXQ_PAUSE_PKT_GEN_OFFS         4
+#define NETA_TXQ_PAUSE_PKT_GEN_ALL_MASK     (0x7 << NETA_TXQ_PAUSE_PKT_GEN_OFFS)
+#define NETA_TXQ_PAUSE_PKT_GEN_MASK(q)      ((1 << q) << NETA_TXQ_PAUSE_PKT_GEN_OFFS)
+
+#define NETA_RX_DEBUG_REG(p)                (NETA_REG_BASE(p) + 0x17f0)
+/* RXQ memory dump: offset = 1c00 - 1cbc */
+
+/* PxRXINIT: Port RX Initialization Register */
+#define NETA_PORT_RX_RESET_REG(p)           (NETA_REG_BASE(p) + 0x1cc0)
+
+#define NETA_PORT_RX_DMA_RESET_BIT          0
+#define NETA_PORT_RX_DMA_RESET_MASK         (1 << NETA_PORT_RX_DMA_RESET_BIT)
+/*-------------------------------------------------------------------------------*/
+
+
+#define NETA_HWF_RX_CTRL_REG(p)             (NETA_REG_BASE(p) + 0x1d00)
+
+#define NETA_COLOR_SRC_SEL_BIT				0
+#define NETA_COLOR_SRC_SEL_MASK				(1 << NETA_COLOR_SRC_SEL_BIT)
+
+#define NETA_GEM_PID_SRC_SEL_OFFS           4
+#define NETA_GEM_PID_SRC_SEL_MASK           (7 << NETA_GEM_PID_SRC_SEL_OFFS)
+#define NETA_GEM_PID_SRC_GPON_HDR           (0 << NETA_GEM_PID_SRC_SEL_OFFS)
+#define NETA_GEM_PID_SRC_EXT_DSA_TAG        (1 << NETA_GEM_PID_SRC_SEL_OFFS)
+#define NETA_GEM_PID_SRC_FLOW_ID            (2 << NETA_GEM_PID_SRC_SEL_OFFS)
+#define NETA_GEM_PID_SRC_DSA_TAG            (3 << NETA_GEM_PID_SRC_SEL_OFFS)
+#define NETA_GEM_PID_SRC_ZERO               (4 << NETA_GEM_PID_SRC_SEL_OFFS)
+
+#define NETA_TXQ_SRC_SEL_BIT                8
+#define NETA_TXQ_SRC_SEL_MASK               (1 << NETA_TXQ_SRC_SEL_BIT)
+#define NETA_TXQ_SRC_FLOW_ID                (0 << NETA_TXQ_SRC_SEL_BIT)
+#define NETA_TXQ_SRC_RES_INFO               (1 << NETA_TXQ_SRC_SEL_BIT)
+
+#ifdef MV_ETH_PMT_NEW
+
+#define NETA_MH_SEL_OFFS                    12
+#define NETA_MH_SEL_MASK                    (0xF << NETA_MH_SEL_OFFS)
+#define NETA_MH_DONT_CHANGE                 (0 << NETA_MH_SEL_OFFS)
+#define NETA_MH_REPLACE_GPON_HDR            (1 << NETA_MH_SEL_OFFS)
+#define NETA_MH_REPLACE_MH_REG(r)           (((r) + 1) << NETA_MH_SEL_OFFS)
+
+#define NETA_MH_SRC_PNC_BIT                 16
+#define NETA_MH_SRC_PNC_MASK                (1 << NETA_MH_SRC_PNC_BIT)
+
+#define NETA_HWF_ENABLE_BIT                 17
+#define NETA_HWF_ENABLE_MASK                (1 << NETA_HWF_ENABLE_BIT)
+
+#else
+
+#define NETA_MH_SEL_OFFS                    12
+#define NETA_MH_SEL_MASK                    (0x7 << NETA_MH_SEL_OFFS)
+#define NETA_MH_DONT_CHANGE                 (0 << NETA_MH_SEL_OFFS)
+#define NETA_MH_REPLACE_GPON_HDR            (1 << NETA_MH_SEL_OFFS)
+#define NETA_MH_REPLACE_MH_REG(r)           (((r) + 1) << NETA_MH_SEL_OFFS)
+
+#define NETA_MH_SRC_PNC_BIT                 15
+#define NETA_MH_SRC_PNC_MASK                (1 << NETA_MH_SRC_PNC_BIT)
+
+#define NETA_HWF_ENABLE_BIT                 16
+#define NETA_HWF_ENABLE_MASK                (1 << NETA_HWF_ENABLE_BIT)
+
+#endif /* MV_ETH_PMT_NEW */
+
+#define NETA_HWF_SHORT_POOL_OFFS            20
+#define NETA_HWF_SHORT_POOL_MASK            (3 << NETA_HWF_SHORT_POOL_OFFS)
+#define NETA_HWF_SHORT_POOL_ID(pool)        ((pool) << NETA_HWF_SHORT_POOL_OFFS)
+
+#define NETA_HWF_LONG_POOL_OFFS             22
+#define NETA_HWF_LONG_POOL_MASK             (3 << NETA_HWF_LONG_POOL_OFFS)
+#define NETA_HWF_LONG_POOL_ID(pool)         ((pool) << NETA_HWF_LONG_POOL_OFFS)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_RX_THRESH_REG(p)           (NETA_REG_BASE(p) + 0x1d04)
+
+#ifdef MV_ETH_PMT_NEW
+
+#define NETA_HWF_RX_FIFO_WORDS_OFFS         0
+#define NETA_HWF_RX_FIFO_WORDS_MASK         (0x3FF << NETA_HWF_RX_FIFO_WORDS_OFFS)
+
+#define NETA_HWF_RX_FIFO_PKTS_OFFS          16
+#define NETA_HWF_RX_FIFO_PKTS_MASK          (0x7F << NETA_HWF_RX_FIFO_PKTS_OFFS)
+
+#else
+
+#define NETA_HWF_RX_FIFO_WORDS_OFFS         0
+#define NETA_HWF_RX_FIFO_WORDS_MASK         (0xFF << NETA_HWF_RX_FIFO_WORDS_OFFS)
+
+#define NETA_HWF_RX_FIFO_PKTS_OFFS          8
+#define NETA_HWF_RX_FIFO_PKTS_MASK          (0x1F << NETA_HWF_RX_FIFO_PKTS_OFFS)
+
+#endif /* MV_ETH_PMT_NEW */
+/*-----------------------------------------------------------------------------------*/
+
+
+#define NETA_HWF_TXP_CFG_REG(p, txp)        (NETA_REG_BASE(p) + 0x1d10 + ((txp) >> 1) * 4)
+
+#define NETA_TXP_BASE_ADDR_OFFS(txp)        (((txp) & 0x1) ? 18 : 2)
+#define NETA_TXP_BASE_ADDR_MASK(txp)        (0xFFFF << NETA_TXP_BASE_ADDR_OFFS(txp))
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_TX_PTR_REG(p)              (NETA_REG_BASE(p) + 0x1d30)
+
+#define NETA_HWF_TX_PORT_OFFS               11
+#define NETA_HWF_TX_PORT_ALL_MASK           (0xF << NETA_HWF_TX_PORT_OFFS)
+#define NETA_HWF_TX_PORT_MASK(txp)          ((txp) << NETA_HWF_TX_PORT_OFFS)
+
+#define NETA_HWF_TXQ_OFFS                   8
+#define NETA_HWF_TXQ_ALL_MASK               (0x7 << NETA_HWF_TXQ_OFFS)
+#define NETA_HWF_TXQ_MASK(txq)              ((txq) << NETA_HWF_TXQ_OFFS)
+
+#define NETA_HWF_REG_OFFS                   0
+#define NETA_HWF_REG_ALL_MASK               (0x7 << NETA_HWF_REG_OFFS)
+#define NETA_HWF_REG_MASK(reg)              ((reg) << NETA_HWF_REG_OFFS)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_DROP_TH_REG(p)             (NETA_REG_BASE(p) + 0x1d40)
+
+#define NETA_YELLOW_DROP_THRESH_OFFS        0
+#define NETA_YELLOW_DROP_THRESH_MASK        (0x3fff << NETA_YELLOW_DROP_THRESH_OFFS)
+
+#define NETA_YELLOW_DROP_RND_GEN_OFFS       16
+#define NETA_YELLOW_DROP_RND_GEN_MASK       (0xf << NETA_YELLOW_DROP_RND_GEN_OFFS)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_TXQ_BASE_REG(p)            (NETA_REG_BASE(p) + 0x1d44)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_TXQ_SIZE_REG(p)            (NETA_REG_BASE(p) + 0x1d48)
+
+#define NETA_HWF_TXQ_SIZE_OFFS              0
+#define NETA_HWF_TXQ_SIZE_MASK              (0x3fff << NETA_HWF_TXQ_SIZE_OFFS)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_TXQ_ENABLE_REG(p)          (NETA_REG_BASE(p) + 0x1d4c)
+
+#define NETA_HWF_TXQ_ENABLE_BIT             0
+#define NETA_HWF_TXQ_ENABLE_MASK            (1 << NETA_HWF_TXQ_ENABLE_BIT)
+/*-----------------------------------------------------------------------------------*/
+
+#define NETA_HWF_ACCEPTED_CNTR(p)           (NETA_REG_BASE(p) + 0x1d50)
+#define NETA_HWF_YELLOW_DROP_CNTR(p)        (NETA_REG_BASE(p) + 0x1d54)
+#define NETA_HWF_GREEN_DROP_CNTR(p)         (NETA_REG_BASE(p) + 0x1d58)
+#define NETA_HWF_THRESH_DROP_CNTR(p)        (NETA_REG_BASE(p) + 0x1d5c)
+
+#define NETA_HWF_MEMORY_REG(p)				(NETA_REG_BASE(p) + 0x1d60)
+
+/* Hardware Forwarding TX access gap register */
+#define NETA_HWF_TX_GAP_REG(p)				(NETA_REG_BASE(p) + 0x1d6C)
+
+#define NETA_HWF_SMALL_TX_GAP_BIT			0
+#define NETA_HWF_SMALL_TX_GAP_MASK			(1 << NETA_HWF_SMALL_TX_GAP_BIT)
+/*-----------------------------------------------------------------------------------*/
+
+
+
+/**************************** NETA General Registers ***********************/
+
+/* Cross Bar registers per Giga Unit */
+#define NETA_MBUS_RETRY_REG(p)              (NETA_REG_BASE((p) & ~0x1) + 0x2010)
+
+#define NETA_MBUS_RETRY_DISABLE_BIT			16
+#define NETA_MBUS_RETRY_DISABLE_MASK		(1 << NETA_MBUS_RETRY_DISABLE_BIT)
+
+#define NETA_MBUS_RETRY_CYCLES_OFFS			0
+#define NETA_MBUS_RETRY_CYCLES_MASK			(0xFF << NETA_MBUS_RETRY_CYCLES_OFFS)
+#define NETA_MBUS_RETRY_CYCLES(val)			((val) << NETA_MBUS_RETRY_CYCLES_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_MBUS_ARBITER_REG(p)            (NETA_REG_BASE((p) & ~0x1) + 0x20C0)
+/*-------------------------------------------------------------------------------*/
+
+/* PACC - Port Acceleration Register */
+#define NETA_ACC_MODE_REG(p)                (NETA_REG_BASE(p) + 0x2500)
+
+#define NETA_ACC_MODE_OFFS                  0
+#define NETA_ACC_MODE_ALL_MASK              (7 << NETA_ACC_MODE_OFFS)
+#define NETA_ACC_MODE_MASK(mode)            ((mode) << NETA_ACC_MODE_OFFS)
+#define NETA_ACC_MODE_LEGACY                0
+#define NETA_ACC_MODE_EXT                   1
+#define NETA_ACC_MODE_EXT_BMU               2
+#define NETA_ACC_MODE_EXT_PNC               3
+#define NETA_ACC_MODE_EXT_PNC_BMU           4
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_BM_ADDR_REG(p)                 (NETA_REG_BASE(p) + 0x2504)
+/*-------------------------------------------------------------------------------*/
+
+/* RXQs and TXQs to CPU mapping */
+#define NETA_MAX_CPU_REGS                   4
+#define NETA_CPU_MAP_REG(p, cpu)            (NETA_REG_BASE(p) + 0x2540 + ((cpu) << 2))
+
+#define NETA_CPU_RXQ_ACCESS_OFFS            0
+#define NETA_CPU_RXQ_ACCESS_ALL_MASK        (0xFF << NETA_CPU_RXQ_ACCESS_OFFS)
+#define NETA_CPU_RXQ_ACCESS_MASK(q)         (1 << (NETA_CPU_RXQ_ACCESS_OFFS + (q)))
+
+#define NETA_CPU_TXQ_ACCESS_OFFS            8
+#define NETA_CPU_TXQ_ACCESS_ALL_MASK        (0xFF << NETA_CPU_TXQ_ACCESS_OFFS)
+#define NETA_CPU_TXQ_ACCESS_MASK(q)         (1 << (NETA_CPU_TXQ_ACCESS_OFFS + (q)))
+/*-------------------------------------------------------------------------------*/
+
+/* Interrupt coalescing mechanism */
+#define NETA_RXQ_INTR_TIME_COAL_REG(p, q)   (NETA_REG_BASE(p) + 0x2580 + ((q) << 2))
+
+/* Exception Interrupt Port/Queue Cause register */
+#define NETA_INTR_NEW_CAUSE_REG(p)          (NETA_REG_BASE(p) + 0x25A0)
+#define NETA_INTR_NEW_MASK_REG(p)           (NETA_REG_BASE(p) + 0x25A4)
+
+#ifdef CONFIG_MV_PON
+#   define GPON_CAUSE_TXQ_SENT_SUM_OFFS     0
+#   define GPON_CAUSE_TXQ_SENT_SUM_MASK     (3 << GPON_CAUSE_TXQ_SENT_SUM_OFFS)
+#endif /* CONFIG_MV_PON */
+
+#define NETA_CAUSE_TXQ_SENT_DESC_OFFS       0
+#define NETA_CAUSE_TXQ_SENT_DESC_BIT(q)     (NETA_CAUSE_TXQ_SENT_DESC_OFFS + (q))
+#define NETA_CAUSE_TXQ_SENT_DESC_ALL_MASK   (0xFF << NETA_CAUSE_TXQ_SENT_DESC_OFFS)
+#define NETA_CAUSE_TXQ_SENT_DESC_MASK(q)    (1 << (NETA_CAUSE_TXQ_SENT_DESC_BIT(q)))
+
+#define NETA_CAUSE_RXQ_OCCUP_DESC_OFFS      8
+#define NETA_CAUSE_RXQ_OCCUP_DESC_BIT(q)    (NETA_CAUSE_RXQ_OCCUP_DESC_OFFS + (q))
+#define NETA_CAUSE_RXQ_OCCUP_DESC_ALL_MASK  (0xFF << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS)
+#define NETA_CAUSE_RXQ_OCCUP_DESC_MASK(q)   (1 << (NETA_CAUSE_RXQ_OCCUP_DESC_BIT(q)))
+
+#define NETA_CAUSE_RXQ_FREE_DESC_OFFS       16
+#define NETA_CAUSE_RXQ_FREE_DESC_BIT(q)     (NETA_CAUSE_RXQ_FREE_DESC_OFFS + (q))
+#define NETA_CAUSE_RXQ_FREE_DESC_MASK(q)    (1 << (NETA_CAUSE_RXQ_FREE_DESC_BIT(q)))
+
+#define NETA_CAUSE_OLD_REG_SUM_BIT          29
+#define NETA_CAUSE_OLD_REG_SUM_MASK         (1 << NETA_CAUSE_OLD_REG_SUM_BIT)
+
+#define NETA_CAUSE_TX_ERR_SUM_BIT           30
+#define NETA_CAUSE_TX_ERR_SUM_MASK          (1 << NETA_CAUSE_TX_ERR_SUM_BIT)
+
+#define NETA_CAUSE_MISC_SUM_BIT             31
+#define NETA_CAUSE_MISC_SUM_MASK            (1 << NETA_CAUSE_MISC_SUM_BIT)
+
+#define NETA_CAUSE_TXQ_SENT_DESC_TXP_SUM    4 /* How many Tx ports are summarized by each bit */
+/*-------------------------------------------------------------------------------*/
+
+/* Data Path Port/Queue Cause Register */
+#define NETA_INTR_OLD_CAUSE_REG(p)          (NETA_REG_BASE(p) + 0x25A8)
+#define NETA_INTR_OLD_MASK_REG(p)           (NETA_REG_BASE(p) + 0x25AC)
+
+#ifdef CONFIG_MV_PON
+#   define GPON_CAUSE_TXQ_BUF_OFFS          0
+#   define GPON_CAUSE_TXQ_BUF_MASK          (3 << GPON_CAUSE_TXQ_BUF_OFFS)
+#endif /* CONFIG_MV_PON */
+
+#define NETA_CAUSE_TXQ_BUF_OFFS             0
+#define NETA_CAUSE_TXQ_BUF_BIT(q)           (NETA_CAUSE_TXQ_BUF_OFFS + (q))
+#define NETA_CAUSE_TXQ_BUF_ALL_MASK         (0xFF << NETA_CAUSE_TXQ_BUF_OFFS)
+#define NETA_CAUSE_TXQ_BUF_MASK(q)          (1 << (NETA_CAUSE_TXQ_BUF_BIT(q)))
+
+#define NETA_CAUSE_RXQ_PKT_OFFS             8
+#define NETA_CAUSE_RXQ_PKT_BIT(q)           (NETA_CAUSE_RXQ_PKT_OFFS + (q))
+#define NETA_CAUSE_RXQ_PKT_ALL_MASK         (0xFF << NETA_CAUSE_RXQ_PKT_OFFS)
+#define NETA_CAUSE_RXQ_PKT_MASK(q)          (1 << (NETA_CAUSE_RXQ_PKT_BIT(q)))
+
+#define NETA_CAUSE_RXQ_ERROR_OFFS           16
+#define NETA_CAUSE_RXQ_ERROR_BIT(q)         (NETA_CAUSE_RXQ_ERROR_OFFS + (q))
+#define NETA_CAUSE_RXQ_ERROR_ALL_MASK       (0xFF << NETA_CAUSE_RXQ_ERROR_OFFS)
+#define NETA_CAUSE_RXQ_ERROR_MASK(q)        (1 << (NETA_CAUSE_RXQ_ERROR_BIT(q)))
+
+#define NETA_CAUSE_NEW_REG_SUM_BIT          29
+#define NETA_CAUSE_NEW_REG_SUM_MASK         (1 << NETA_CAUSE_NEW_REG_SUM_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/* Misc Port Cause Register */
+#define NETA_INTR_MISC_CAUSE_REG(p)         (NETA_REG_BASE(p) + 0x25B0)
+#define NETA_INTR_MISC_MASK_REG(p)          (NETA_REG_BASE(p) + 0x25B4)
+
+#define NETA_CAUSE_PHY_STATUS_CHANGE_BIT    0
+#define NETA_CAUSE_PHY_STATUS_CHANGE_MASK   (1 << NETA_CAUSE_PHY_STATUS_CHANGE_BIT)
+
+#define NETA_CAUSE_LINK_CHANGE_BIT          1
+#define NETA_CAUSE_LINK_CHANGE_MASK         (1 << NETA_CAUSE_LINK_CHANGE_BIT)
+
+#define NETA_CAUSE_PTP_BIT                  4
+
+#define NETA_CAUSE_INTERNAL_ADDR_ERR_BIT    7
+#define NETA_CAUSE_RX_OVERRUN_BIT           8
+#define NETA_CAUSE_RX_CRC_ERROR_BIT         9
+#define NETA_CAUSE_RX_LARGE_PKT_BIT         10
+#define NETA_CAUSE_TX_UNDERUN_BIT           11
+#define NETA_CAUSE_PRBS_ERR_BIT             12
+#define NETA_CAUSE_PSC_SYNC_CHANGE_BIT      13
+#define NETA_CAUSE_SERDES_SYNC_ERR_BIT      14
+
+#define NETA_CAUSE_BMU_ALLOC_ERR_OFFS       16
+#define NETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK   (0xF << NETA_CAUSE_BMU_ALLOC_ERR_OFFS)
+#define NETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (NETA_CAUSE_BMU_ALLOC_ERR_OFFS + (pool)))
+
+#define NETA_CAUSE_TXQ_ERROR_OFFS           24
+#define NETA_CAUSE_TXQ_ERROR_BIT(q)         (NETA_CAUSE_TXQ_ERROR_OFFS + (q))
+#define NETA_CAUSE_TXQ_ERROR_ALL_MASK       (0xFF << NETA_CAUSE_TXQ_ERROR_OFFS)
+#define NETA_CAUSE_TXQ_ERROR_MASK(q)        (1 << (NETA_CAUSE_TXQ_ERROR_BIT(q)))
+
+#ifdef CONFIG_MV_PON
+#   define GPON_CAUSE_TXQ_ERROR_OFFS        24
+#   define GPON_CAUSE_TXQ_ERROR_MASK        (0x3 << GPON_CAUSE_TXQ_ERROR_OFFS)
+#endif /* CONFIG_MV_PON */
+/*-------------------------------------------------------------------------------*/
+
+/* one register for all queues - problem for Multi-Core */
+#define NETA_INTR_ENABLE_REG(p)             (NETA_REG_BASE(p) + 0x25B8)
+
+#define NETA_RXQ_PKT_INTR_ENABLE_OFFS       0
+#define NETA_RXQ_PKT_INTR_ENABLE_ALL_MASK   (0xFF << NETA_RXQ_PKT_INTR_ENABLE_OFFS)
+#define NETA_RXQ_PKT_INTR_ENABLE_MASK(q)    ((1 << NETA_RXQ_PKT_INTR_ENABLE_OFFS + (q))
+
+#define NETA_TXQ_PKT_INTR_ENABLE_OFFS       8
+#define NETA_TXQ_PKT_INTR_ENABLE_ALL_MASK   (0xFF << NETA_TXQ_PKT_INTR_ENABLE_OFFS)
+#define NETA_TXQ_PKT_INTR_ENABLE_MASK(q)    ((1 << NETA_TXQ_PKT_INTR_ENABLE_OFFS + (q))
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_VERSION_REG(p)                 (NETA_REG_BASE(p) + 0x25BC)
+
+#define NETA_VERSION_OFFS                   0
+#define NETA_VERSION_MASK                   (0xFF << NETA_VERSION_OFFS)
+
+/* Serdes registres: 0x72E00-0x72FFC */
+
+#ifdef CONFIG_MV_PON
+/* Extra registers for GPON port only */
+#   define GPON_TXQ_INTR_ENABLE_REG(txq)    (MV_PON_REG_BASE + 0x0480 +  (txq / 32) * 4)
+#   define GPON_TXQ_INTR_NEW_CAUSE_REG(txq) (MV_PON_REG_BASE + 0x0500 +  (txq / 32) * 8)
+#   define GPON_TXQ_INTR_NEW_MASK_REG(txq)  (MV_PON_REG_BASE + 0x0504 +  (txq / 32) * 8)
+#   define GPON_TXQ_INTR_OLD_CAUSE_REG(txq) (MV_PON_REG_BASE + 0x0540 +  (txq / 32) * 8)
+#   define GPON_TXQ_INTR_OLD_MASK_REG(txq)  (MV_PON_REG_BASE + 0x0544 +  (txq / 32) * 8)
+#   define GPON_TXQ_INTR_ERR_CAUSE_REG(txq) (MV_PON_REG_BASE + 0x0580 +  (txq / 32) * 8)
+#   define GPON_TXQ_INTR_ERR_MASK_REG(txq)  (MV_PON_REG_BASE + 0x0584 +  (txq / 32) * 8)
+#endif /* CONFIG_MV_PON */
+/*-------------------------------------------------------------------------------*/
+
+#ifdef MV_ETH_GMAC_NEW
+
+/******* New GigE MAC registers *******/
+#define NETA_GMAC_CTRL_0_REG(p)             (NETA_REG_BASE(p) + 0x2C00)
+
+#define NETA_GMAC_PORT_EN_BIT               0
+#define NETA_GMAC_PORT_EN_MASK              (1 << NETA_GMAC_PORT_EN_BIT)
+
+#define NETA_GMAC_PORT_TYPE_BIT             1
+#define NETA_GMAC_PORT_TYPE_MASK            (1 << NETA_GMAC_PORT_TYPE_BIT)
+#define NETA_GMAC_PORT_TYPE_SGMII           (0 << NETA_GMAC_PORT_TYPE_BIT)
+#define NETA_GMAC_PORT_TYPE_1000X           (1 << NETA_GMAC_PORT_TYPE_BIT)
+
+#define NETA_GMAC_MAX_RX_SIZE_OFFS          2
+#define NETA_GMAC_MAX_RX_SIZE_MASK          (0x1FFF << NETA_GMAC_MAX_RX_SIZE_OFFS)
+
+#define NETA_GMAC_MIB_CNTR_EN_BIT           15
+#define NETA_GMAC_MIB_CNTR_EN_MASK          (1 << NETA_GMAC_MIB_CNTR_EN_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_GMAC_CTRL_1_REG(p)             (NETA_REG_BASE(p) + 0x2C04)
+
+#define NETA_GMAC_CTRL_2_REG(p)             (NETA_REG_BASE(p) + 0x2C08)
+
+#define NETA_GMAC_INBAND_AN_MODE_BIT		0
+#define NETA_GMAC_INBAND_AN_MODE_MASK		(1 << NETA_GMAC_INBAND_AN_MODE_BIT)
+
+#define NETA_GMAC_PSC_ENABLE_BIT            3
+#define NETA_GMAC_PSC_ENABLE_MASK           (1 << NETA_GMAC_PSC_ENABLE_BIT)
+
+#define NETA_GMAC_PORT_RGMII_BIT            4
+#define NETA_GMAC_PORT_RGMII_MASK           (1 << NETA_GMAC_PORT_RGMII_BIT)
+
+#define NETA_GMAC_PORT_RESET_BIT            6
+#define NETA_GMAC_PORT_RESET_MASK           (1 << NETA_GMAC_PORT_RESET_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_GMAC_AN_CTRL_REG(p)                (NETA_REG_BASE(p) + 0x2C0C)
+
+#define NETA_FORCE_LINK_FAIL_BIT                0
+#define NETA_FORCE_LINK_FAIL_MASK               (1 << NETA_FORCE_LINK_FAIL_BIT)
+
+#define NETA_FORCE_LINK_PASS_BIT                1
+#define NETA_FORCE_LINK_PASS_MASK               (1 << NETA_FORCE_LINK_PASS_BIT)
+
+#define NETA_INBAND_AN_EN_BIT					2
+#define NETA_INBAND_AN_EN_MASK					(1 << NETA_INBAND_AN_EN_BIT)
+
+#define NETA_SET_MII_SPEED_100_BIT              5
+#define NETA_SET_MII_SPEED_100_MASK             (1 << NETA_SET_MII_SPEED_100_BIT)
+
+#define NETA_SET_GMII_SPEED_1000_BIT            6
+#define NETA_SET_GMII_SPEED_1000_MASK           (1 << NETA_SET_GMII_SPEED_1000_BIT)
+
+#define NETA_ENABLE_SPEED_AUTO_NEG_BIT          7
+#define NETA_ENABLE_SPEED_AUTO_NEG_MASK         (1 << NETA_ENABLE_SPEED_AUTO_NEG_BIT)
+
+#define NETA_SET_FLOW_CONTROL_BIT               8
+#define NETA_SET_FLOW_CONTROL_MASK              (1 << NETA_SET_FLOW_CONTROL_BIT)
+
+#define NETA_FLOW_CONTROL_ADVERTISE_BIT         9
+#define NETA_FLOW_CONTROL_ADVERTISE_MASK        (1 << NETA_FLOW_CONTROL_ADVERTISE_BIT)
+
+#define NETA_FLOW_CONTROL_ASYMETRIC_BIT         10
+#define NETA_FLOW_CONTROL_ASYMETRIC_MASK        (1 << NETA_FLOW_CONTROL_ASYMETRIC_BIT)
+
+#define NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_BIT   11
+#define NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK  (1 << NETA_ENABLE_FLOW_CONTROL_AUTO_NEG_BIT)
+
+#define NETA_SET_FULL_DUPLEX_BIT                12
+#define NETA_SET_FULL_DUPLEX_MASK               (1 << NETA_SET_FULL_DUPLEX_BIT)
+
+#define NETA_ENABLE_DUPLEX_AUTO_NEG_BIT         13
+#define NETA_ENABLE_DUPLEX_AUTO_NEG_MASK        (1 << NETA_ENABLE_DUPLEX_AUTO_NEG_BIT)
+
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_GMAC_STATUS_REG(p)                 (NETA_REG_BASE(p) + 0x2C10)
+
+#define NETA_GMAC_LINK_UP_BIT               0
+#define NETA_GMAC_LINK_UP_MASK              (1 << NETA_GMAC_LINK_UP_BIT)
+
+#define NETA_GMAC_SPEED_1000_BIT            1
+#define NETA_GMAC_SPEED_1000_MASK           (1 << NETA_GMAC_SPEED_1000_BIT)
+
+#define NETA_GMAC_SPEED_100_BIT             2
+#define NETA_GMAC_SPEED_100_MASK            (1 << NETA_GMAC_SPEED_100_BIT)
+
+#define NETA_GMAC_FULL_DUPLEX_BIT           3
+#define NETA_GMAC_FULL_DUPLEX_MASK          (1 << NETA_GMAC_FULL_DUPLEX_BIT)
+
+#define NETA_RX_FLOW_CTRL_ENABLE_BIT        4
+#define NETA_RX_FLOW_CTRL_ENABLE_MASK       (1 << NETA_RX_FLOW_CTRL_ENABLE_BIT)
+
+#define NETA_TX_FLOW_CTRL_ENABLE_BIT        5
+#define NETA_TX_FLOW_CTRL_ENABLE_MASK       (1 << NETA_TX_FLOW_CTRL_ENABLE_BIT)
+
+#define NETA_RX_FLOW_CTRL_ACTIVE_BIT        6
+#define NETA_RX_FLOW_CTRL_ACTIVE_MASK       (1 << NETA_RX_FLOW_CTRL_ACTIVE_BIT)
+
+#define NETA_TX_FLOW_CTRL_ACTIVE_BIT        7
+#define NETA_TX_FLOW_CTRL_ACTIVE_MASK       (1 << NETA_TX_FLOW_CTRL_ACTIVE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_GMAC_CLOCK_DIVIDER_REG(p)		(NETA_REG_BASE(p) + 0x24f4)
+
+#define NETA_GMAC_1MS_CLOCK_ENABLE_BIT		31
+#define NETA_GMAC_1MS_CLOCK_ENABLE_BIT_MASK	(1 << NETA_GMAC_1MS_CLOCK_ENABLE_BIT)
+
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_GMAC_SERIAL_REG(p)             (NETA_REG_BASE(p) + 0x2C14)
+
+#define NETA_GMAC_FIFO_PARAM_0_REG(p)       (NETA_REG_BASE(p) + 0x2C18)
+#define NETA_GMAC_FIFO_PARAM_1_REG(p)       (NETA_REG_BASE(p) + 0x2C1C)
+
+#define NETA_GMAC_CAUSE_REG(p)              (NETA_REG_BASE(p) + 0x2C20)
+#define NETA_GMAC_MASK_REG(p)               (NETA_REG_BASE(p) + 0x2C24)
+
+#define NETA_GMAC_SERDES_CFG_0_REG(p)       (NETA_REG_BASE(p) + 0x2C28)
+#define NETA_GMAC_SERDES_CFG_1_REG(p)       (NETA_REG_BASE(p) + 0x2C2C)
+#define NETA_GMAC_SERDES_CFG_2_REG(p)       (NETA_REG_BASE(p) + 0x2C30)
+#define NETA_GMAC_SERDES_CFG_3_REG(p)       (NETA_REG_BASE(p) + 0x2C34)
+
+#define NETA_GMAC_PRBS_STATUS_REG(p)        (NETA_REG_BASE(p) + 0x2C38)
+#define NETA_GMAC_PRBS_ERR_CNTR_REG(p)      (NETA_REG_BASE(p) + 0x2C3C)
+
+#define NETA_GMAC_STATUS_1_REG(p)           (NETA_REG_BASE(p) + 0x2C40)
+
+#define NETA_GMAC_MIB_CTRL_REG(p)           (NETA_REG_BASE(p) + 0x2C44)
+#define NETA_GMAC_CTRL_3_REG(p)             (NETA_REG_BASE(p) + 0x2C48)
+
+#define NETA_GMAC_QSGMII_REG(p)             (NETA_REG_BASE(p) + 0x2C4C)
+#define NETA_GMAC_QSGMII_STATUS_REG(p)      (NETA_REG_BASE(p) + 0x2C50)
+#define NETA_GMAC_QSGMII_ERR_CNTR_REG(p)    (NETA_REG_BASE(p) + 0x2C54)
+
+/* 8 FC Timer registers: 0x2c58 .. 0x2c74 */
+#define NETA_GMAC_FC_TIMER_REG(p, r)        (NETA_REG_BASE(p) + 0x2C58 + ((r) << 2))
+
+/* 4 DSA Tag registers: 0x2c78 .. 0x2c84 */
+#define NETA_GMAC_DSA_TAG_REG(p, r)         (NETA_REG_BASE(p) + 0x2C78 + ((r) << 2))
+
+#define NETA_GMAC_FC_WIN_0_REG(p)           (NETA_REG_BASE(p) + 0x2C88)
+#define NETA_GMAC_FC_WIN_1_REG(p)           (NETA_REG_BASE(p) + 0x2C8C)
+
+#define NETA_GMAC_CTRL_4_REG(p)             (NETA_REG_BASE(p) + 0x2C90)
+
+#define NETA_GMAC_SERIAL_1_REG(p)           (NETA_REG_BASE(p) + 0x2C94)
+
+#define NETA_LOW_POWER_CTRL_0_REG(p)        (NETA_REG_BASE(p) + 0x2CC0)
+
+/* Low Power Idle Control #1 register */
+#define NETA_LOW_POWER_CTRL_1_REG(p)        (NETA_REG_BASE(p) + 0x2CC4)
+
+#define NETA_LPI_REQUEST_EN_BIT             0
+#define NETA_LPI_REQUEST_EN_MASK            (1 << NETA_LPI_REQUEST_EN_BIT)
+
+#define NETA_LPI_REQUEST_FORCE_BIT          1
+#define NETA_LPI_REQUEST_FORCE_MASK         (1 << NETA_LPI_REQUEST_FORCE_BIT)
+
+#define NETA_LPI_MANUAL_MODE_BIT            2
+#define NETA_LPI_MANUAL_MODE_MASK           (1 << NETA_LPI_MANUAL_MODE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_LOW_POWER_CTRL_2_REG(p)        (NETA_REG_BASE(p) + 0x2CC8)
+#define NETA_LOW_POWER_STATUS_REG(p)        (NETA_REG_BASE(p) + 0x2CCC)
+#define NETA_LOW_POWER_CNTR_REG(p)          (NETA_REG_BASE(p) + 0x2CD0)
+
+#endif /* MV_ETH_GMAC_NEW */
+
+
+#ifdef MV_PON_MIB_SUPPORT
+/* Special registers for PON MIB support */
+#define NETA_PON_MIB_MAX_GEM_PID            32
+#define NETA_PON_MIB_RX_CTRL_REG(idx)       (MV_PON_REG_BASE + 0x3800 + ((idx) << 2))
+#define NETA_PON_MIB_RX_DEF_REG             (MV_PON_REG_BASE + 0x3880)
+
+#define NETA_PON_MIB_RX_GEM_PID_OFFS        0
+#define NETA_PON_MIB_RX_GEM_PID_ALL_MASK    (0xFFF << NETA_PON_MIB_RX_GEM_PID_OFFS)
+#define NETA_PON_MIB_RX_GEM_PID_MASK(pid)   ((pid) << NETA_PON_MIB_RX_GEM_PID_OFFS)
+
+#define NETA_PON_MIB_RX_MIB_NO_OFFS         12
+#define NETA_PON_MIB_RX_MIB_NO_MASK         (0x7 << NETA_PON_MIB_RX_MIB_NO_OFFS)
+#define NETA_PON_MIB_RX_MIB_NO(mib)         ((mib) << NETA_PON_MIB_RX_MIB_NO_OFFS)
+
+#define NETA_PON_MIB_RX_VALID_BIT           15
+#define NETA_PON_MIB_RX_VALID_MASK          (1 << NETA_PON_MIB_RX_VALID_BIT)
+
+#endif /* MV_PON_MIB_SUPPORT */
+
+/******************************** NETA TX Registers *****************************/
+
+#define NETA_TXQ_BASE_ADDR_REG(p, txp, q)   (NETA_TX_REG_BASE((p), (txp)) + 0x1800 + ((q) << 2))
+
+#define NETA_TXQ_SIZE_REG(p, txp, q)        (NETA_TX_REG_BASE((p), (txp)) + 0x1820 + ((q) << 2))
+
+#define NETA_TXQ_DESC_NUM_OFFS              0
+#define NETA_TXQ_DESC_NUM_ALL_MASK          (0x3FFF << NETA_TXQ_DESC_NUM_OFFS)
+#define NETA_TXQ_DESC_NUM_MASK(size)        ((size) << NETA_TXQ_DESC_NUM_OFFS)
+
+#define NETA_TXQ_SENT_DESC_TRESH_OFFS       16
+#define NETA_TXQ_SENT_DESC_TRESH_ALL_MASK   (0x3FFF << NETA_TXQ_SENT_DESC_TRESH_OFFS)
+#define NETA_TXQ_SENT_DESC_TRESH_MASK(coal) ((coal) << NETA_TXQ_SENT_DESC_TRESH_OFFS)
+
+#define NETA_TXQ_STATUS_REG(p, txp, q)      (NETA_TX_REG_BASE((p), (txp)) + 0x1840 + ((q) << 2))
+
+#define NETA_TXQ_PENDING_DESC_OFFS          0
+#define NETA_TXQ_PENDING_DESC_MASK          (0x3FFF << NETA_TXQ_PENDING_DESC_OFFS)
+
+#define NETA_TXQ_SENT_DESC_OFFS             16
+#define NETA_TXQ_SENT_DESC_MASK             (0x3FFF << NETA_TXQ_SENT_DESC_OFFS)
+
+#define NETA_TXQ_UPDATE_REG(p, txp, q)      (NETA_TX_REG_BASE((p), (txp)) + 0x1860 + ((q) << 2))
+
+#define NETA_TXQ_ADD_PENDING_OFFS           0
+#define NETA_TXQ_ADD_PENDING_MASK           (0xFF << NETA_TXQ_ADD_PENDING_OFFS)
+
+#define NETA_TXQ_DEC_SENT_OFFS              16
+#define NETA_TXQ_DEC_SENT_MASK              (0xFF << NETA_TXQ_DEC_SENT_OFFS)
+
+#define NETA_TXQ_INDEX_REG(p, txp, q)       (NETA_TX_REG_BASE((p), (txp)) + 0x1880 + ((q) << 2))
+
+#define NETA_TXQ_NEXT_DESC_INDEX_OFFS       0
+#define NETA_TXQ_NEXT_DESC_INDEX_MASK       (0x3FFF << NETA_TXQ_NEXT_DESC_INDEX_OFFS)
+
+#define NETA_TXQ_SENT_DESC_REG(p, txp, q)   (NETA_TX_REG_BASE((p), (txp)) + 0x18A0 + ((q) << 2))
+/* Use NETA_TXQ_SENT_DESC_OFFS and NETA_TXQ_SENT_DESC_MASK */
+
+#ifdef MV_ETH_PMT_NEW
+#define NETA_TX_BAD_FCS_CNTR_REG(p, txp)    (NETA_TX_REG_BASE((p), (txp)) + 0x18C0)
+#define NETA_TX_DROP_CNTR_REG(p, txp)       (NETA_TX_REG_BASE((p), (txp)) + 0x18C4)
+#endif /* MV_ETH_PMT_NEW */
+
+#define NETA_PORT_TX_RESET_REG(p, txp)      (NETA_TX_REG_BASE((p), (txp)) + 0x18F0)
+
+#define NETA_PORT_TX_DMA_RESET_BIT          0
+#define NETA_PORT_TX_DMA_RESET_MASK         (1 << NETA_PORT_TX_DMA_RESET_BIT)
+
+#ifdef MV_ETH_PMT_NEW
+
+#define NETA_TX_ADD_BYTES_REG(p, txp)       (NETA_TX_REG_BASE((p), (txp)) + 0x18FC)
+
+#define NETA_TX_NEW_BYTES_OFFS              0
+#define NETA_TX_NEW_BYTES_ALL_MASK          (0xFFFF << NETA_TX_NEW_BYTES_OFFS)
+#define NETA_TX_NEW_BYTES_MASK(bytes)       ((bytes) << NETA_TX_NEW_BYTES_OFFS)
+
+#define NETA_TX_NEW_BYTES_TXQ_OFFS          28
+#define NETA_TX_NEW_BYTES_TXQ_MASK(txq)     ((txq) << NETA_TX_NEW_BYTES_TXQ_OFFS)
+
+#define NETA_TX_NEW_BYTES_COLOR_BIT         31
+#define NETA_TX_NEW_BYTES_COLOR_GREEN       (0 << NETA_TX_NEW_BYTES_COLOR_BIT)
+#define NETA_TX_NEW_BYTES_COLOR_YELLOW      (1 << NETA_TX_NEW_BYTES_COLOR_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define NETA_TXQ_NEW_BYTES_REG(p, txp, txq) (NETA_TX_REG_BASE((p), (txp)) + 0x1900 + ((txq) << 2))
+
+#define NETA_TX_MAX_MH_REGS                 15
+#define NETA_TX_MH_REG(p, txp, idx)         (NETA_TX_REG_BASE((p), (txp)) + 0x1944 + ((idx) << 2))
+
+/*************** Packet Modification Registers *******************/
+#define NETA_TX_PMT_ACCESS_REG(p)           (NETA_TX_REG_BASE((p), 0) + 0x1980)
+#define NETA_TX_PMT_FIFO_THRESH_REG(p)      (NETA_TX_REG_BASE((p), 0) + 0x1984)
+#define NETA_TX_PMT_MTU_REG(p)              (NETA_TX_REG_BASE((p), 0) + 0x1988)
+
+#define NETA_TX_PMT_MAX_ETHER_TYPES         4
+#define NETA_TX_PMT_ETHER_TYPE_REG(p, i)    (NETA_TX_REG_BASE((p), 0) + 0x1990 + ((i) << 2))
+
+#define NETA_TX_PMT_DEF_VLAN_CFG_REG(p)     (NETA_TX_REG_BASE((p), 0) + 0x19a0)
+#define NETA_TX_PMT_DEF_DSA_1_CFG_REG(p)    (NETA_TX_REG_BASE((p), 0) + 0x19a4)
+#define NETA_TX_PMT_DEF_DSA_2_CFG_REG(p)    (NETA_TX_REG_BASE((p), 0) + 0x19a8)
+#define NETA_TX_PMT_DEF_DSA_SRC_DEV_REG(p)  (NETA_TX_REG_BASE((p), 0) + 0x19ac)
+
+#define NETA_TX_PMT_TTL_ZERO_FRWD_REG(p)    (NETA_TX_REG_BASE((p), 0) + 0x19b0)
+#define NETA_TX_PMT_TTL_ZERO_CNTR_REG(p)    (NETA_TX_REG_BASE((p), 0) + 0x19b4)
+
+#define NETA_TX_PMT_PPPOE_TYPE_REG(p)       (NETA_TX_REG_BASE((p), 0) + 0x19c0)
+#define NETA_TX_PMT_PPPOE_DATA_REG(p)       (NETA_TX_REG_BASE((p), 0) + 0x19c4)
+#define NETA_TX_PMT_PPPOE_LEN_REG(p)        (NETA_TX_REG_BASE((p), 0) + 0x19c8)
+#define NETA_TX_PMT_PPPOE_PROTO_REG(p)      (NETA_TX_REG_BASE((p), 0) + 0x19cc)
+
+#define NETA_TX_PMT_CONFIG_REG(p)           (NETA_TX_REG_BASE((p), 0) + 0x19d0)
+#define NETA_TX_PMT_STATUS_1_REG(p)         (NETA_TX_REG_BASE((p), 0) + 0x19d4)
+#define NETA_TX_PMT_STATUS_2_REG(p)         (NETA_TX_REG_BASE((p), 0) + 0x19d8)
+
+#else
+
+#define NETA_TX_COLOR_ADD_BYTES_REG(p, txp) (NETA_TX_REG_BASE((p), (txp)) + 0x1900)
+#define NETA_TX_GREEN_BYTES_REG(p, txp)     (NETA_TX_REG_BASE((p), (txp)) + 0x1908)
+#define NETA_TX_YELLOW_BYTES_REG(p, txp)    (NETA_TX_REG_BASE((p), (txp)) + 0x190c)
+
+#define NETA_TX_MAX_MH_REGS                 6
+#define NETA_TX_MH_REG(p, txp, idx)         (NETA_TX_REG_BASE((p), (txp)) + 0x1910 + ((idx) << 2))
+
+#define NETA_TX_DSA_SRC_DEV_REG(p, txp)     (NETA_TX_REG_BASE((p), (txp)) + 0x192C)
+
+#define NETA_TX_MAX_ETH_TYPE_REGS           4
+#define NETA_TX_ETH_TYPE_REG(p, txp, idx)   (NETA_TX_REG_BASE((p), (txp)) + 0x1930 + ((idx) << 2))
+
+#define NETA_TX_PMT_SIZE		256
+#define NETA_TX_PMT_W0_MASK		0xFFFF
+#define NETA_TX_PMT_W1_MASK		0xFFFFFFFF
+#define NETA_TX_PMT_W2_MASK		0x7FFFFFF
+#define NETA_TX_PMT_REG(p)		(NETA_TX_REG_BASE(p, 0) + 0x1940)
+#define NETA_TX_PMT_W0_REG(p)		(NETA_TX_REG_BASE(p, 0) + 0x1944)
+#define NETA_TX_PMT_W1_REG(p)		(NETA_TX_REG_BASE(p, 0) + 0x1948)
+#define NETA_TX_PMT_W2_REG(p)		(NETA_TX_REG_BASE(p, 0) + 0x194c)
+
+#endif /* MV_ETH_PMT_NEW */
+
+/*********************** New TX WRR EJP Registers ********************************/
+
+#define NETA_TX_CMD_1_REG(p, txp)           (NETA_TX_REG_BASE((p), (txp)) + 0x1a00)
+
+#define NETA_TX_EJP_RESET_BIT               0
+#define NETA_TX_EJP_RESET_MASK              (1 << NETA_TX_EJP_RESET_BIT)
+
+#define NETA_TX_PTP_SYNC_BIT                1
+#define NETA_TX_PTP_SYNC_MASK               (1 << NETA_TX_EJP_RESET_BIT)
+
+#define NETA_TX_EJP_ENABLE_BIT              2
+#define NETA_TX_EJP_ENABLE_MASK             (1 << NETA_TX_EJP_ENABLE_BIT)
+
+#define NETA_TX_LEGACY_WRR_BIT              3
+#define NETA_TX_LEGACY_WRR_MASK             (1 << NETA_TX_LEGACY_WRR_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Transmit Queue Fixed Priority Configuration (TQFPC) */
+#define NETA_TX_FIXED_PRIO_CFG_REG(p, txp)  (NETA_TX_REG_BASE((p), (txp)) + 0x1a04)
+
+#define NETA_TX_FIXED_PRIO_OFFS             0
+#define NETA_TX_FIXED_PRIO_MASK             (0xFF << NETA_TX_FIXED_PRIO_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Basic Refill No of Clocks (BRC) */
+#define NETA_TX_REFILL_PERIOD_REG(p, txp)   (NETA_TX_REG_BASE((p), (txp)) + 0x1a08)
+
+#define NETA_TX_REFILL_CLOCKS_OFFS          0
+#define NETA_TX_REFILL_CLOCKS_MIN           16
+#define NETA_TX_REFILL_CLOCKS_MASK          (0xFFFF << NETA_TX_REFILL_RATE_CLOCKS_MASK)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Maximum Transmit Unit (PMTU) */
+#define NETA_TXP_MTU_REG(p, txp)            (NETA_TX_REG_BASE((p), (txp)) + 0x1a0c)
+
+#define NETA_TXP_MTU_OFFS                   0
+#define NETA_TXP_MTU_MAX                    0x3FFFF
+#define NETA_TXP_MTU_ALL_MASK               (NETA_TXP_MTU_MAX << NETA_TXP_MTU_OFFS)
+#define NETA_TXP_MTU_MASK(mtu)              ((mtu) << NETA_TXP_MTU_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Bucket Refill (PRefill) */
+#define NETA_TXP_REFILL_REG(p, txp)         (NETA_TX_REG_BASE((p), (txp)) + 0x1a10)
+
+#define NETA_TXP_REFILL_TOKENS_OFFS         0
+#define NETA_TXP_REFILL_TOKENS_MAX          0x7FFFF
+#define NETA_TXP_REFILL_TOKENS_ALL_MASK     (NETA_TXP_REFILL_TOKENS_MAX << NETA_TXP_REFILL_TOKENS_OFFS)
+#define NETA_TXP_REFILL_TOKENS_MASK(val)    ((val) << NETA_TXP_REFILL_TOKENS_OFFS)
+
+#define NETA_TXP_REFILL_PERIOD_OFFS         20
+#define NETA_TXP_REFILL_PERIOD_MAX          0x3FF
+#define NETA_TXP_REFILL_PERIOD_ALL_MASK     (NETA_TXP_REFILL_PERIOD_MAX << NETA_TXP_REFILL_PERIOD_OFFS)
+#define NETA_TXP_REFILL_PERIOD_MASK(val)    ((val) << NETA_TXP_REFILL_PERIOD_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Maximum Token Bucket Size (PMTBS) */
+#define NETA_TXP_TOKEN_SIZE_REG(p, txp)     (NETA_TX_REG_BASE((p), (txp)) + 0x1a14)
+#define NETA_TXP_TOKEN_SIZE_MAX             0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Token Bucket Counter (PMTBS) */
+#define NETA_TXP_TOKEN_CNTR_REG(p, txp)     (NETA_TX_REG_BASE((p), (txp)) + 0x1a18)
+#define NETA_TXP_TOKEN_CNTR_MAX             0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Bucket Refill (QRefill) */
+#define NETA_TXQ_REFILL_REG(p, txp, q)      (NETA_TX_REG_BASE((p), (txp)) + 0x1a20 + ((q) << 2))
+
+#define NETA_TXQ_REFILL_TOKENS_OFFS         0
+#define NETA_TXQ_REFILL_TOKENS_MAX          0x7FFFF
+#define NETA_TXQ_REFILL_TOKENS_ALL_MASK     (NETA_TXQ_REFILL_TOKENS_MAX << NETA_TXQ_REFILL_TOKENS_OFFS)
+#define NETA_TXQ_REFILL_TOKENS_MASK(val)    ((val) << NETA_TXQ_REFILL_TOKENS_OFFS)
+
+#define NETA_TXQ_REFILL_PERIOD_OFFS         20
+#define NETA_TXQ_REFILL_PERIOD_MAX          0x3FF
+#define NETA_TXQ_REFILL_PERIOD_ALL_MASK     (NETA_TXQ_REFILL_PERIOD_MAX << NETA_TXQ_REFILL_PERIOD_OFFS)
+#define NETA_TXQ_REFILL_PERIOD_MASK(val)    ((val) << NETA_TXQ_REFILL_PERIOD_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Maximum Token Bucket Size (QMTBS) */
+#define NETA_TXQ_TOKEN_SIZE_REG(p, txp, q)  (NETA_TX_REG_BASE((p), (txp)) + 0x1a40 + ((q) << 2))
+#define NETA_TXQ_TOKEN_SIZE_MAX             0x7FFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Token Bucket Counter (PMTBS) */
+#define NETA_TXQ_TOKEN_CNTR_REG(p, txp, q)  (NETA_TX_REG_BASE((p), (txp)) + 0x1a60 + ((q) << 2))
+#define NETA_TXQ_TOKEN_CNTR_MAX             0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Transmit Queue Arbiter Configuration (TQxAC) */
+#define NETA_TXQ_WRR_ARBITER_REG(p, txp, q) (NETA_TX_REG_BASE((p), (txp)) + 0x1a80 + ((q) << 2))
+
+#define NETA_TXQ_WRR_WEIGHT_OFFS            0
+#define NETA_TXQ_WRR_WEIGHT_MAX             0xFF
+#define NETA_TXQ_WRR_WEIGHT_ALL_MASK        (NETA_TXQ_WRR_WEIGHT_MAX << NETA_TXQ_WRR_WEIGHT_OFFS)
+#define NETA_TXQ_WRR_WEIGHT_MASK(weigth)    ((weigth) << NETA_TXQ_WRR_WEIGHT_OFFS)
+
+#define NETA_TXQ_WRR_BYTE_COUNT_OFFS        8
+#define NETA_TXQ_WRR_BYTE_COUNT_MASK        (0x3FFFF << NETA_TXQ_WRR_BYTE_COUNT_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Transmission Queue IPG (TQxIPG) */
+#define NETA_TXQ_EJP_IPG_REG(p, txp, q)     (NETA_TX_REG_BASE((p), (txp)) + 0x1aa0 + ((q) << 2))
+
+#define NETA_TXQ_EJP_IPG_OFFS               0
+#define NETA_TXQ_EJP_IPG_MASK               (0x3FFF << NETA_TXQ_EJP_IPG_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define NETA_TXP_EJP_HI_LO_REG(p, txp)      (NETA_TX_REG_BASE((p), (txp)) + 0x1ab0)
+#define NETA_TXP_EJP_HI_ASYNC_REG(p, txp)   (NETA_TX_REG_BASE((p), (txp)) + 0x1ab4)
+#define NETA_TXP_EJP_LO_ASYNC_REG(p, txp)   (NETA_TX_REG_BASE((p), (txp)) + 0x1ab8)
+#define NETA_TXP_EJP_SPEED_REG(p, txp)      (NETA_TX_REG_BASE((p), (txp)) + 0x1abc)
+/*-----------------------------------------------------------------------------------------------*/
+
+/******************** NETA RX EXTENDED DESCRIPTOR ********************************/
+
+#define NETA_DESC_ALIGNED_SIZE	            32
+
+#if defined(MV_CPU_BE) && !defined(CONFIG_MV_ETH_BE_WA)
+
+typedef struct neta_rx_desc {
+	MV_U16  dataSize;
+	MV_U16  pncInfo;
+	MV_U32  status;
+	MV_U32  pncFlowId;
+	MV_U32  bufPhysAddr;
+	MV_U16  csumL4;
+	MV_U16  prefetchCmd;
+	MV_U32  bufCookie;
+	MV_U32  hw_cmd;
+	MV_U32  pncExtra;
+} NETA_RX_DESC;
+
+#else
+
+typedef struct neta_rx_desc {
+	MV_U32  status;
+	MV_U16  pncInfo;
+	MV_U16  dataSize;
+	MV_U32  bufPhysAddr;
+	MV_U32  pncFlowId;
+	MV_U32  bufCookie;
+	MV_U16  prefetchCmd;
+	MV_U16  csumL4;
+	MV_U32  pncExtra;
+	MV_U32  hw_cmd;
+} NETA_RX_DESC;
+
+#endif /* MV_CPU_BE && !CONFIG_MV_ETH_BE_WA */
+
+/* "status" word fileds definition */
+#define NETA_RX_L3_OFFSET_OFFS              0
+#define NETA_RX_L3_OFFSET_MASK              (0x7F << NETA_RX_L3_OFFSET_OFFS)
+
+#define NETA_RX_IP_HLEN_OFFS                8
+#define NETA_RX_IP_HLEN_MASK                (0x1F << NETA_RX_IP_HLEN_OFFS)
+
+#define NETA_RX_BM_POOL_ID_OFFS             13
+#define NETA_RX_BM_POOL_ALL_MASK            (0x3 << NETA_RX_BM_POOL_ID_OFFS)
+#define NETA_RX_BM_POOL_ID_MASK(pool)       ((pool) << NETA_RX_BM_POOL_ID_OFFS)
+
+#define NETA_RX_ES_BIT                      16
+#define NETA_RX_ES_MASK                     (1 << NETA_RX_ES_BIT)
+
+#define NETA_RX_ERR_CODE_OFFS               17
+#define NETA_RX_ERR_CODE_MASK               (3 << NETA_RX_ERR_CODE_OFFS)
+#define NETA_RX_ERR_CRC                     (0 << NETA_RX_ERR_CODE_OFFS)
+#define NETA_RX_ERR_OVERRUN                 (1 << NETA_RX_ERR_CODE_OFFS)
+#define NETA_RX_ERR_LEN                     (2 << NETA_RX_ERR_CODE_OFFS)
+#define NETA_RX_ERR_RESOURCE                (3 << NETA_RX_ERR_CODE_OFFS)
+
+#define NETA_RX_F_DESC_BIT                  26
+#define NETA_RX_F_DESC_MASK                 (1 << NETA_RX_F_DESC_BIT)
+
+#define NETA_RX_L_DESC_BIT                  27
+#define NETA_RX_L_DESC_MASK                 (1 << NETA_RX_L_DESC_BIT)
+
+#define NETA_RX_L4_CSUM_OK_BIT              30
+#define NETA_RX_L4_CSUM_OK_MASK             (1 << NETA_RX_L4_CSUM_OK_BIT)
+
+#define NETA_RX_IP4_FRAG_BIT                31
+#define NETA_RX_IP4_FRAG_MASK               (1 << NETA_RX_IP4_FRAG_BIT)
+
+#ifdef CONFIG_MV_ETH_PNC
+
+#define NETA_RX_L3_OFFS                     24
+#define NETA_RX_L3_MASK                     (3 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_UN                       (0 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP6                      (1 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP4                      (2 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP4_ERR                  (3 << NETA_RX_L3_OFFS)
+
+#define NETA_RX_L4_OFFS                     28
+#define NETA_RX_L4_MASK                     (3 << NETA_RX_L4_OFFS)
+#define NETA_RX_L4_TCP                      (0 << NETA_RX_L4_OFFS)
+#define NETA_RX_L4_UDP                      (1 << NETA_RX_L4_OFFS)
+#define NETA_RX_L4_OTHER                    (2 << NETA_RX_L4_OFFS)
+
+/* Bits of "pncExtra" field */
+#define NETA_RX_PNC_ENABLED_BIT             0
+#define NETA_RX_PNC_ENABLED_MASK            (1 << NETA_RX_PNC_ENABLED_BIT)
+
+#define NETA_RX_PNC_LOOPS_OFFS              1
+#define NETA_RX_PNC_LOOPS_MASK              (0xF << NETA_RX_PNC_LOOPS_OFFS)
+
+#define NETA_PNC_STATUS_OFFS                5
+#define NETA_PNC_STATUS_MASK                (3 << NETA_PNC_STATUS_OFFS)
+
+#define NETA_PNC_RI_EXTRA_OFFS              16
+#define NETA_PNC_RI_EXTRA_MASK              (0xFFF << NETA_PNC_RI_EXTRA_OFFS)
+/*---------------------------------------------------------------------------*/
+
+#else
+
+#define ETH_RX_VLAN_TAGGED_FRAME_BIT        19
+#define ETH_RX_VLAN_TAGGED_FRAME_MASK       (1 << ETH_RX_VLAN_TAGGED_FRAME_BIT)
+
+#define ETH_RX_BPDU_FRAME_BIT               20
+#define ETH_RX_BPDU_FRAME_MASK              (1 << ETH_RX_BPDU_FRAME_BIT)
+
+#define ETH_RX_L4_TYPE_OFFSET               21
+#define ETH_RX_L4_TYPE_MASK                 (3 << ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_TCP_TYPE                  (0 << ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_UDP_TYPE                  (1 << ETH_RX_L4_TYPE_OFFSET)
+#define ETH_RX_L4_OTHER_TYPE                (2 << ETH_RX_L4_TYPE_OFFSET)
+
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_BIT      23
+#define ETH_RX_NOT_LLC_SNAP_FORMAT_MASK     (1 << ETH_RX_NOT_LLC_SNAP_FORMAT_BIT)
+
+#ifdef MV_ETH_LEGACY_PARSER_IPV6
+
+#define NETA_RX_L3_OFFS                     24
+#define NETA_RX_L3_MASK                     (3 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_UN                       (0 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP6                      (2 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP4                      (3 << NETA_RX_L3_OFFS)
+#define NETA_RX_L3_IP4_ERR                  (1 << NETA_RX_L3_OFFS)
+
+#else
+
+#define ETH_RX_IP_FRAME_TYPE_BIT            24
+#define ETH_RX_IP_FRAME_TYPE_MASK           (1 << ETH_RX_IP_FRAME_TYPE_BIT)
+
+#define ETH_RX_IP_HEADER_OK_BIT             25
+#define ETH_RX_IP_HEADER_OK_MASK            (1 << ETH_RX_IP_HEADER_OK_BIT)
+
+#endif /* MV_ETH_LEGACY_PARSER_IPV6 */
+
+#define ETH_RX_UNKNOWN_DA_BIT               28
+#define ETH_RX_UNKNOWN_DA_MASK              (1 << ETH_RX_UNKNOWN_DA_BIT)
+
+#endif /* CONFIG_MV_ETH_PNC */
+/*---------------------------------------------------------------------------*/
+
+
+/* Bit map of "hw_cmd" field */
+#define NETA_RX_COLOR_BIT                   3
+#define NETA_RX_COLOR_MASK                  (1 << NETA_RX_COLOR_BIT)
+#define NETA_RX_COLOR_GREEN                 (0 << NETA_RX_COLOR_BIT)
+#define NETA_RX_COLOR_YELLOW                (1 << NETA_RX_COLOR_BIT)
+
+#define NETA_RX_DSA_OFFS                    4
+#define NETA_RX_DSA_MASK                    (3 << NETA_RX_DSA_OFFS)
+#define NETA_RX_DSA_NONE                    (0 << NETA_RX_DSA_OFFS)
+#define NETA_RX_DSA                         (1 << NETA_RX_DSA_OFFS)
+#define NETA_RX_DSA_E                       (2 << NETA_RX_DSA_OFFS)
+
+#define NETA_RX_GEM_PID_OFFS                8
+#define NETA_RX_GEM_PID_MASK                (0xFFF << NETA_RX_GEM_PID_OFFS)
+/*---------------------------------------------------------------------------*/
+
+
+/******************** NETA TX EXTENDED DESCRIPTOR ********************************/
+
+#if defined(MV_CPU_BE) && !defined(CONFIG_MV_ETH_BE_WA)
+
+typedef struct neta_tx_desc {
+	MV_U16  dataSize;
+	MV_U16  csumL4;
+	MV_U32  command;
+	MV_U32  hw_cmd;
+	MV_U32  bufPhysAddr;
+	MV_U32  reserved[4];
+} NETA_TX_DESC;
+
+#else
+
+typedef struct neta_tx_desc {
+	MV_U32  command;
+	MV_U16  csumL4;
+	MV_U16  dataSize;
+	MV_U32  bufPhysAddr;
+	MV_U32  hw_cmd;
+	MV_U32  reserved[4];
+} NETA_TX_DESC;
+
+#endif /* MV_CPU_BE && !CONFIG_MV_ETH_BE_WA */
+
+/* "command" word fileds definition */
+#define NETA_TX_L3_OFFSET_OFFS              0
+#define NETA_TX_L3_OFFSET_MASK              (0x7F << NETA_TX_L3_OFFSET_OFFS)
+
+#define NETA_TX_GEM_OEM_BIT                 7
+#define NETA_TX_GEM_OEM_MASK                (1 << NETA_TX_GEM_OEM_BIT)
+
+#define NETA_TX_IP_HLEN_OFFS                8
+#define NETA_TX_IP_HLEN_MASK                (0x1F << NETA_TX_IP_HLEN_OFFS)
+
+#define NETA_TX_BM_POOL_ID_OFFS             13
+#define NETA_TX_BM_POOL_ID_ALL_MASK         (0x3 << NETA_TX_BM_POOL_ID_OFFS)
+#define NETA_TX_BM_POOL_ID_MASK(pool)       ((pool) << NETA_TX_BM_POOL_ID_OFFS)
+
+#define NETA_TX_HWF_BIT                     15
+#define NETA_TX_HWF_MASK                    (1 << NETA_TX_HWF_BIT)
+
+#define NETA_TX_L4_BIT                      16
+#define NETA_TX_L4_TCP                      (0 << NETA_TX_L4_BIT)
+#define NETA_TX_L4_UDP                      (1 << NETA_TX_L4_BIT)
+
+#define NETA_TX_L3_BIT                      17
+#define NETA_TX_L3_IP4                      (0 << NETA_TX_L3_BIT)
+#define NETA_TX_L3_IP6                      (1 << NETA_TX_L3_BIT)
+
+#define NETA_TX_IP_CSUM_BIT                 18
+#define NETA_TX_IP_CSUM_MASK                (1 << NETA_TX_IP_CSUM_BIT)
+
+#define NETA_TX_Z_PAD_BIT                   19
+#define NETA_TX_Z_PAD_MASK                  (1 << NETA_TX_Z_PAD_BIT)
+
+#define NETA_TX_L_DESC_BIT                  20
+#define NETA_TX_L_DESC_MASK                 (1 << NETA_TX_L_DESC_BIT)
+
+#define NETA_TX_F_DESC_BIT                  21
+#define NETA_TX_F_DESC_MASK                 (1 << NETA_TX_F_DESC_BIT)
+
+#define NETA_TX_BM_ENABLE_BIT               22
+#define NETA_TX_BM_ENABLE_MASK              (1 << NETA_TX_BM_ENABLE_BIT)
+
+
+#define NETA_TX_PKT_OFFSET_OFFS             23
+#define NETA_TX_PKT_OFFSET_MAX				0x7F
+#define NETA_TX_PKT_OFFSET_ALL_MASK         (NETA_TX_PKT_OFFSET_MAX << NETA_TX_PKT_OFFSET_OFFS)
+#define NETA_TX_PKT_OFFSET_MASK(offset)     (((offset) << NETA_TX_PKT_OFFSET_OFFS) & NETA_TX_PKT_OFFSET_ALL_MASK)
+
+#define NETA_TX_L4_CSUM_BIT                 30
+#define NETA_TX_L4_CSUM_MASK                (3 << NETA_TX_L4_CSUM_BIT)
+#define NETA_TX_L4_CSUM_PART                (0 << NETA_TX_L4_CSUM_BIT)
+#define NETA_TX_L4_CSUM_FULL                (1 << NETA_TX_L4_CSUM_BIT)
+#define NETA_TX_L4_CSUM_NOT                 (2 << NETA_TX_L4_CSUM_BIT)
+
+#define NETA_TX_FLZ_DESC_MASK               (NETA_TX_F_DESC_MASK | NETA_TX_L_DESC_MASK | NETA_TX_Z_PAD_MASK)
+/*-------------------------------------------------------------------------------*/
+
+/* "hw_cmd" field definition */
+#define NETA_TX_ES_BIT                      0
+#define NETA_TX_ES_MASK                     (1 << NETA_TX_ES_BIT)
+
+#define NETA_TX_ERR_CODE_OFFS               1
+#define NETA_TX_ERR_CODE_MASK               (3 << NETA_TX_ERR_CODE_OFFS)
+#define NETA_TX_ERR_LATE_COLLISION          (0 << NETA_TX_ERR_CODE_OFFS)
+#define NETA_TX_ERR_UNDERRUN                (1 << NETA_TX_ERR_CODE_OFFS)
+#define NETA_TX_ERR_EXCE_COLLISION          (2 << NETA_RX_ERR_CODE_OFFS)
+
+#define NETA_TX_COLOR_BIT                   3
+#define NETA_TX_COLOR_GREEN                 (0 << NETA_TX_COLOR_BIT)
+#define NETA_TX_COLOR_YELLOW                (1 << NETA_TX_COLOR_BIT)
+
+#define NETA_TX_MH_SEL_OFFS                 4
+
+#ifdef MV_ETH_PMT_NEW
+#define NETA_TX_MH_SEL_MASK                 (0xF << NETA_TX_MH_SEL_OFFS)
+#else
+#define NETA_TX_MH_SEL_MASK                 (0x7 << NETA_TX_MH_SEL_OFFS)
+#endif /* MV_ETH_PMT_NEW */
+
+#define NETA_TX_MH_UNCHANGE                 (0 << NETA_TX_MH_SEL_OFFS)
+
+#define NETA_TX_GEM_PID_OFFS                8
+#define NETA_TX_GEM_PID_MASK                (0xFFF << NETA_TX_GEM_PID_OFFS)
+
+#define NETA_TX_MOD_CMD_OFFS                20
+#define NETA_TX_MOD_CMD_MASK                (0x3FF << NETA_TX_MOD_CMD_OFFS)
+
+#define NETA_TX_DSA_OFFS                    30
+#define NETA_TX_DSA_MASK                    (3 << NETA_TX_DSA_OFFS)
+#define NETA_TX_DSA_NONE                    (0 << NETA_TX_DSA_OFFS)
+#define NETA_TX_DSA                         (1 << NETA_TX_DSA_OFFS) /* normal dsa */
+#define NETA_TX_DSA_E                       (2 << NETA_TX_DSA_OFFS) /* extended dsa */
+/*-------------------------------------------------------------------------------*/
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvNetaRegs_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.c b/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.c
new file mode 100755
index 000000000000..dce977ad3ae1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.c
@@ -0,0 +1,639 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"  /* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "gbe/mvNeta.h"
+
+#include "mvPmt.h"
+
+MV_NETA_PMT	**mvPmtBase = NULL;
+
+/* #define PMT_DBG mvOsPrintf */
+#define PMT_DBG(X...)
+
+static char mvPmtCmdNames[MV_ETH_PMT_SIZE][PMT_TEXT] = {
+
+	[MV_NETA_CMD_NONE]          = "NO_MOD",
+	[MV_NETA_CMD_ADD_2B]        = "ADD_2B",
+	[MV_NETA_CMD_CFG_VLAN]      = "CFG_VLAN",
+	[MV_NETA_CMD_ADD_VLAN]      = "ADD_VLAN",
+	[MV_NETA_CMD_CFG_DSA_1]     = "CFG_DSA_1",
+	[MV_NETA_CMD_CFG_DSA_2]     = "CFG_DSA_2",
+	[MV_NETA_CMD_ADD_DSA]       = "ADD_DSA",
+	[MV_NETA_CMD_DEL_BYTES]     = "DEL_BYTES",
+	[MV_NETA_CMD_REPLACE_2B]    = "REPLACE_2B",
+	[MV_NETA_CMD_REPLACE_LSB]   = "REPLACE_LSB",
+	[MV_NETA_CMD_REPLACE_MSB]   = "REPLACE_MSB",
+	[MV_NETA_CMD_REPLACE_VLAN]  = "REPLACE_VLAN",
+	[MV_NETA_CMD_DEC_LSB]       = "DEC_LSB",
+	[MV_NETA_CMD_DEC_MSB]       = "DEC_MSB",
+	[MV_NETA_CMD_ADD_CALC_LEN]  = "ADD_CALC_LEN",
+	[MV_NETA_CMD_REPLACE_LEN]   = "REPLACE_LEN",
+	[MV_NETA_CMD_IPV4_CSUM]     = "IPV4_CSUM",
+	[MV_NETA_CMD_L4_CSUM]       = "L4_CSUM",
+	[MV_NETA_CMD_SKIP]          = "SKIP",
+	[MV_NETA_CMD_JUMP]          = "JUMP",
+	[MV_NETA_CMD_JUMP_SKIP]     = "JUMP_SKIP",
+	[MV_NETA_CMD_JUMP_SUB]      = "JUMP_SUB",
+	[MV_NETA_CMD_PPPOE]         = "PPPOE",
+	[MV_NETA_CMD_STORE]         = "STORE",
+};
+
+/*******************************************************************************
+* mvNetaPmtWrite - Add entry to Packet Modification Table
+*
+* INPUT:
+*       int			port    - NETA port number
+*       int			idx     - PMT entry index to write to
+*       MV_NETA_PMT	pEntry  - PMT entry
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvNetaPmtWrite(int port, int idx, MV_NETA_PMT *pEntry)
+{
+	MV_NETA_PMT	*pBase;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return MV_OUT_OF_RANGE;
+	}
+
+	if ((idx < 0) || (idx >= MV_ETH_PMT_SIZE)) {
+		mvOsPrintf("%s: entry %d is out of range\n", __func__, idx);
+		return MV_OUT_OF_RANGE;
+	}
+	if ((mvPmtBase == NULL) || (mvPmtBase[port] == NULL)) {
+		mvOsPrintf("%s: PMT for port #%d is not initialized\n", __func__, port);
+		return MV_INIT_ERROR;
+	}
+	pBase = mvPmtBase[port];
+	pBase[idx].word = pEntry->word;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaPmtRead - Read entry from Packet Modification Table
+*
+* INPUT:
+*       int			port - NETA port number
+*       int			idx  - PMT entry index to read from
+* OUTPUT:
+*       MV_NETA_PMT	pEntry - PMT entry
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS mvNetaPmtRead(int port, int idx, MV_NETA_PMT *pEntry)
+{
+	MV_NETA_PMT	*pBase;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return MV_OUT_OF_RANGE;
+	}
+
+	if ((idx < 0) || (idx >= MV_ETH_PMT_SIZE)) {
+		mvOsPrintf("%s: entry %d is out of range\n", __func__, idx);
+		return MV_OUT_OF_RANGE;
+	}
+	if ((mvPmtBase == NULL) || (mvPmtBase[port] == NULL)) {
+		mvOsPrintf("%s: PMT for port #%d is not initialized\n", __func__, port);
+		return MV_INIT_ERROR;
+	}
+	pBase = mvPmtBase[port];
+	pEntry->word = pBase[idx].word;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaPmtClear - Clear Packet Modification Table
+*
+* INPUT:
+*       int			port - NETA port number
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvNetaPmtClear(int port)
+{
+	int         idx;
+	MV_NETA_PMT entry;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return MV_OUT_OF_RANGE;
+	}
+
+	MV_NETA_PMT_INVALID_SET(&entry);
+	for (idx = 0; idx < MV_ETH_PMT_SIZE; idx++)
+		mvNetaPmtWrite(port, idx, &entry);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaPmtInit - Init Packet Modification Table driver
+*
+* INPUT:
+*       int			port - NETA port number
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvNetaPmtInit(int port, MV_NETA_PMT *pBase)
+{
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return MV_OUT_OF_RANGE;
+	}
+
+	if (mvPmtBase == NULL) {
+		mvPmtBase = mvOsMalloc(mvNetaHalData.maxPort * sizeof(MV_NETA_PMT *));
+		if (mvPmtBase == NULL) {
+			mvOsPrintf("%s: Allocation failed\n", __func__);
+			return MV_OUT_OF_CPU_MEM;
+		}
+		memset(mvPmtBase, 0, mvNetaHalData.maxPort * sizeof(MV_NETA_PMT *));
+	}
+	mvPmtBase[port] = pBase;
+
+	mvNetaPmtClear(port);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvNetaPmtDestroy - Free PMT Base memory
+*
+* INPUT:
+*
+* RETURN:   void
+*******************************************************************************/
+MV_VOID   mvNetaPmtDestroy(MV_VOID)
+{
+	if (mvPmtBase)
+		mvOsFree(mvPmtBase);
+}
+
+/*******************************************************************************
+* mvNetaPmtEntryPrint - Print PMT entry
+*
+* INPUT:
+*       MV_NETA_PMT*    pEntry - PMT entry to be printed
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtEntryPrint(MV_NETA_PMT *pEntry)
+{
+	mvOsPrintf("%04x %04x: %s",
+		MV_NETA_PMT_CTRL_GET(pEntry), MV_NETA_PMT_DATA_GET(pEntry),
+		mvPmtCmdNames[MV_NETA_PMT_CMD_GET(pEntry)]);
+
+	if (pEntry->word & MV_NETA_PMT_IP4_CSUM_MASK)
+		mvOsPrintf(", IPv4 csum");
+
+	if (pEntry->word & MV_NETA_PMT_L4_CSUM_MASK)
+		mvOsPrintf(", L4 csum");
+
+	if (pEntry->word & MV_NETA_PMT_LAST_MASK)
+		mvOsPrintf(", Last");
+
+	mvOsPrintf("\n");
+}
+
+/*******************************************************************************
+* mvNetaPmtDump - Dump Packet Modification Table
+*
+* INPUT:
+*       int			port    - NETA port number
+*       int         flags   -
+*
+* RETURN:   void
+*******************************************************************************/
+void   mvNetaPmtDump(int port, int flags)
+{
+	int             idx, count = 0;
+	MV_NETA_PMT 	entry;
+	MV_STATUS       status;
+
+	if ((port < 0) || (port >= mvNetaHalData.maxPort)) {
+		mvOsPrintf("%s: port %d is out of range\n", __func__, port);
+		return;
+	}
+
+	for (idx = 0; idx < MV_ETH_PMT_SIZE; idx++) {
+		status = mvNetaPmtRead(port, idx, &entry);
+		if (status != MV_OK) {
+			mvOsPrintf("%s failed: port=%d, idx=%d, status=%d\n",
+					__func__, port, idx, status);
+			return;
+		}
+		if ((flags & PMT_PRINT_VALID_FLAG) && !MV_NETA_PMT_IS_VALID(&entry))
+			continue;
+
+		count++;
+		mvOsPrintf("[%3d]: ", idx);
+		mvNetaPmtEntryPrint(&entry);
+	}
+
+	if (!count)
+		mvOsPrintf("PMT is empty, %d entries\n", MV_ETH_PMT_SIZE);
+}
+
+/*******************************************************************************
+* mvNetaPmtAdd2Bytes - Set PMT entry with "add 2 bytes" command
+*
+* INPUT:
+*       MV_U16 data         - 2 bytes of data to be added
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtAdd2Bytes(MV_NETA_PMT *pEntry, MV_U16 data)
+{
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_ADD_2B);
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtReplace2Bytes - Set PMT entry with "Replace 2 bytes" command
+*
+* INPUT:
+*       MV_U16 data         - 2 bytes of data to be replaced
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtReplace2Bytes(MV_NETA_PMT *pEntry, MV_U16 data)
+{
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_REPLACE_2B);
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtDelShorts - Set PMT entry with "Delete" command
+*
+* INPUT:
+*       MV_U8   toDelete    - number of shorts to be deleted
+*       MV_U8   skipBefore  - number of shorts to be skipped before delete
+*       MV_U8   skipAfter   - number of shorts to be skipped after delete
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtDelShorts(MV_NETA_PMT *pEntry, MV_U8 toDelete,
+				MV_U8 skipBefore, MV_U8 skipAfter)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_DEL_BYTES);
+
+	data = MV_NETA_PMT_DEL_SHORTS(toDelete) |
+		MV_NETA_PMT_DEL_SKIP_B(skipBefore) |
+		MV_NETA_PMT_DEL_SKIP_A(skipAfter);
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/* Set update checksum flags to PMT entry */
+void    mvNetaPmtFlags(MV_NETA_PMT *pEntry, int last, int ipv4, int l4)
+{
+	if (last)
+		pEntry->word |= MV_NETA_PMT_LAST_MASK;
+
+	if (ipv4)
+		pEntry->word |= MV_NETA_PMT_IP4_CSUM_MASK;
+
+	if (l4)
+		pEntry->word |= MV_NETA_PMT_L4_CSUM_MASK;
+}
+
+/* Set Last flag to PMT entry */
+void    mvNetaPmtLastFlag(MV_NETA_PMT *pEntry, int last)
+{
+	if (last)
+		pEntry->word |= MV_NETA_PMT_LAST_MASK;
+	else
+		pEntry->word &= ~MV_NETA_PMT_LAST_MASK;
+}
+
+/*******************************************************************************
+* mvNetaPmtReplaceLSB - Set PMT entry with "Replace LSB" command
+*
+* INPUT:
+*       MV_U8 value    - value to be placed
+*       MV_U8 mask     - mask defines which bits to be replaced
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtReplaceLSB(MV_NETA_PMT *pEntry, MV_U8 value, MV_U8 mask)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_REPLACE_LSB);
+
+	data = (value << 0) | (mask << 8);
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtReplaceMSB - Set PMT entry with "Replace MSB" command
+*
+* INPUT:
+*       MV_U8 value    - value to be placed
+*       MV_U8 mask     - mask defines which bits to be replaced
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtReplaceMSB(MV_NETA_PMT *pEntry, MV_U8 value, MV_U8 mask)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_REPLACE_MSB);
+
+	data = (value << 0) | (mask << 8);
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtSkip - Set PMT entry with "Skip" command
+*
+* INPUT:
+*       MV_U16 shorts   - number of shorts to be skipped
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtSkip(MV_NETA_PMT *pEntry, MV_U16 shorts)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_SKIP);
+
+	data = MV_NETA_PMT_CALC_LEN_DATA(shorts * 2);
+	data |= MV_NETA_PMT_CALC_LEN_0_ZERO;
+	data |= MV_NETA_PMT_CALC_LEN_1(MV_NETA_PMT_ZERO_ADD);
+	data |= MV_NETA_PMT_CALC_LEN_2(MV_NETA_PMT_ZERO_ADD);
+	data |= MV_NETA_PMT_CALC_LEN_3_ADD_MASK;
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtJump - Set PMT entry with "Jump" command
+*
+* INPUT:
+*       MV_U16 target   - PMT entry to jump to
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void    mvNetaPmtJump(MV_NETA_PMT *pEntry, MV_U16 target, int type, int cond)
+{
+	MV_U16  data;
+
+	if (type == 0) {
+		MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_JUMP);
+	} else if (type == 1) {
+		MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_JUMP_SKIP);
+	} else if (type == 2) {
+		MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_JUMP_SUB);
+	} else {
+		mvOsPrintf("%s - Unexpected type = %d\n", __func__, type);
+		return;
+	}
+
+	data = target;
+	if (cond == 1)
+		data |= MV_NETA_PMT_IP4_CSUM_MASK;
+	else if (cond == 2)
+		data |= MV_NETA_PMT_L4_CSUM_MASK;
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+
+/*******************************************************************************
+* mvNetaPmtDecLSB - Set PMT entry with "Decrement LSB" command
+*
+* INPUT:
+*       MV_U8   skipBefore  - number of shorts to be skipped before delete
+*       MV_U8   skipAfter   - number of shorts to be skipped after delete
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void        mvNetaPmtDecLSB(MV_NETA_PMT *pEntry, MV_U8 skipBefore, MV_U8 skipAfter)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_DEC_LSB);
+
+	data =  MV_NETA_PMT_DEL_SKIP_B(skipBefore) |
+		MV_NETA_PMT_DEL_SKIP_A(skipAfter);
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtDecMSB - Set PMT entry with "Decrement MSB" command
+*
+* INPUT:
+*       MV_U8   skipBefore  - number of shorts to be skipped before delete
+*       MV_U8   skipAfter   - number of shorts to be skipped after delete
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void        mvNetaPmtDecMSB(MV_NETA_PMT *pEntry, MV_U8 skipBefore, MV_U8 skipAfter)
+{
+	MV_U16  data;
+
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_DEC_MSB);
+
+	data =  MV_NETA_PMT_DEL_SKIP_B(skipBefore) |
+		MV_NETA_PMT_DEL_SKIP_A(skipAfter);
+
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtReplaceIPv4csum - Set PMT entry with "Replace IP checksum" command
+*
+* INPUT:
+*       MV_U16   data
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void        mvNetaPmtReplaceIPv4csum(MV_NETA_PMT *pEntry, MV_U16 data)
+{
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_IPV4_CSUM);
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/*******************************************************************************
+* mvNetaPmtReplaceL4csum - Set PMT entry with "Replace TCP/UDP checksum" command
+*
+* INPUT:
+*       MV_U16   data
+*
+* OUTPUT:
+*       MV_NETA_PMT* pEntry - PMT entry to be set
+*
+* RETURN:   void
+*******************************************************************************/
+void        mvNetaPmtReplaceL4csum(MV_NETA_PMT *pEntry, MV_U16 data)
+{
+	MV_NETA_PMT_CMD_SET(pEntry, MV_NETA_CMD_L4_CSUM);
+	MV_NETA_PMT_DATA_SET(pEntry, data);
+}
+
+/**************************************************************/
+/* High level PMT configuration functions - multiple commands */
+/**************************************************************/
+
+/* Configure PMT to decrement TTL in IPv4 header - 2 entries */
+int     mvNetaPmtTtlDec(int port, int idx, int ip_offs, int isLast)
+{
+	MV_NETA_PMT     pmtEntry;
+
+	/* Skip to TTL and Decrement - Set flag for IP csum */
+	MV_NETA_PMT_CLEAR(&pmtEntry);
+	mvNetaPmtDecMSB(&pmtEntry, (ip_offs + 8)/2, 0);
+	mvNetaPmtFlags(&pmtEntry, 0, 1, 0);
+	mvNetaPmtWrite(port, idx, &pmtEntry);
+	idx++;
+
+	/* Update IP checksum */
+	MV_NETA_PMT_CLEAR(&pmtEntry);
+	mvNetaPmtReplaceIPv4csum(&pmtEntry, 0);
+	if (isLast)
+		mvNetaPmtLastFlag(&pmtEntry, 1);
+
+	mvNetaPmtWrite(port, idx, &pmtEntry);
+
+	return idx;
+}
+
+/* Configure PMT to replace bytes in the packet: minimum 2 bytes - 1 entry for each 2 bytes */
+int     mvNetaPmtDataReplace(int port, int idx, int offset,
+				 MV_U8 *data, int bytes, int isLast)
+{
+	int             i;
+	MV_U16          u16;
+	MV_NETA_PMT     pmtEntry;
+
+	if (offset > 0) {
+		/* Skip command first */
+		MV_NETA_PMT_CLEAR(&pmtEntry);
+		mvNetaPmtSkip(&pmtEntry, offset/2);
+		mvNetaPmtWrite(port, idx, &pmtEntry);
+		idx++;
+	}
+	for (i = 0; i < bytes; i += 2) {
+		/* Replace */
+		MV_NETA_PMT_CLEAR(&pmtEntry);
+		u16 = ((data[i] << 8) | data[i+1]);
+		mvNetaPmtReplace2Bytes(&pmtEntry, u16);
+		if (isLast && (i == bytes))
+			mvNetaPmtLastFlag(&pmtEntry, 1);
+
+		mvNetaPmtWrite(port, idx, &pmtEntry);
+		idx++;
+	}
+
+	return idx;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.h b/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.h
new file mode 100755
index 000000000000..3d704fee49af
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pmt/mvPmt.h
@@ -0,0 +1,261 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	    this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __mvPmt_h__
+#define __mvPmt_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#define PMT_TEXT    16
+
+#define PMT_PRINT_VALID_FLAG    0x01
+
+
+typedef union mv_neta_pmt_t {
+	MV_U32	word;
+
+} MV_NETA_PMT;
+
+#define MV_NETA_PMT_DATA_OFFS       0
+#define MV_NETA_PMT_DATA_BITS       16
+#define MV_NETA_PMT_DATA_MASK       (((1 << MV_NETA_PMT_DATA_BITS) - 1) << MV_NETA_PMT_DATA_OFFS)
+
+#define MV_NETA_PMT_CTRL_OFFS       16
+#define MV_NETA_PMT_CTRL_BITS       16
+#define MV_NETA_PMT_CTRL_MASK       (((1 << MV_NETA_PMT_CTRL_BITS) - 1) << MV_NETA_PMT_CTRL_OFFS)
+
+#define MV_NETA_PMT_CMD_OFFS        16
+#define MV_NETA_PMT_CMD_BITS        5
+#define MV_NETA_PMT_CMD_ALL_MASK    (((1 << MV_NETA_PMT_CMD_BITS) - 1) << MV_NETA_PMT_CMD_OFFS)
+#define MV_NETA_PMT_CMD_MASK(cmd)   ((cmd) << MV_NETA_PMT_CMD_OFFS)
+
+enum {
+    MV_NETA_CMD_NONE        = 0,
+    MV_NETA_CMD_ADD_2B,
+    MV_NETA_CMD_CFG_VLAN,
+    MV_NETA_CMD_ADD_VLAN,
+    MV_NETA_CMD_CFG_DSA_1,
+    MV_NETA_CMD_CFG_DSA_2,
+    MV_NETA_CMD_ADD_DSA,
+    MV_NETA_CMD_DEL_BYTES,
+    MV_NETA_CMD_REPLACE_2B,
+    MV_NETA_CMD_REPLACE_LSB,
+    MV_NETA_CMD_REPLACE_MSB,
+    MV_NETA_CMD_REPLACE_VLAN,
+    MV_NETA_CMD_DEC_LSB,
+    MV_NETA_CMD_DEC_MSB,
+    MV_NETA_CMD_ADD_CALC_LEN,
+    MV_NETA_CMD_REPLACE_LEN,
+    MV_NETA_CMD_IPV4_CSUM,
+    MV_NETA_CMD_L4_CSUM,
+    MV_NETA_CMD_SKIP,
+    MV_NETA_CMD_JUMP,
+    MV_NETA_CMD_JUMP_SKIP,
+    MV_NETA_CMD_JUMP_SUB,
+    MV_NETA_CMD_PPPOE,
+    MV_NETA_CMD_STORE,
+};
+
+#define MV_NETA_PMT_IP4_CSUM_BIT    21
+#define MV_NETA_PMT_IP4_CSUM_MASK   (1 << MV_NETA_PMT_IP4_CSUM_BIT)
+
+#define MV_NETA_PMT_L4_CSUM_BIT     22
+#define MV_NETA_PMT_L4_CSUM_MASK    (1 << MV_NETA_PMT_L4_CSUM_BIT)
+
+#define MV_NETA_PMT_LAST_BIT        23
+#define MV_NETA_PMT_LAST_MASK       (1 << MV_NETA_PMT_LAST_BIT)
+
+
+/*********** Command special defines ************/
+
+/* Bits for MV_NETA_CMD_DEL_BYTES command */
+/* [7:0] - number of words (2 bytes) to delete */
+#define MV_NETA_PMT_DEL_SHORTS_OFFS         0
+#define MV_NETA_PMT_DEL_SHORTS_MASK         (0xFF << MV_NETA_PMT_DEL_SHORTS_OFFS)
+#define MV_NETA_PMT_DEL_SHORTS(size)        (((size) << MV_NETA_PMT_DEL_SHORTS_OFFS) & MV_NETA_PMT_DEL_SHORTS_MASK)
+
+/* [11:8] - number of words (2 bytes) to skip before the delete command */
+#define MV_NETA_PMT_DEL_SKIP_B_OFFS         8
+#define MV_NETA_PMT_DEL_SKIP_B_MASK         (0xF << MV_NETA_PMT_DEL_SKIP_B_OFFS)
+#define MV_NETA_PMT_DEL_SKIP_B(size)        (((size) << MV_NETA_PMT_DEL_SKIP_B_OFFS) & MV_NETA_PMT_DEL_SKIP_B_MASK)
+
+/* [15:12] - number of words (2 bytes) to skip after the delete command */
+#define MV_NETA_PMT_DEL_SKIP_A_OFFS         12
+#define MV_NETA_PMT_DEL_SKIP_A_MASK         (0xF << MV_NETA_PMT_DEL_SKIP_A_OFFS)
+#define MV_NETA_PMT_DEL_SKIP_A(size)        (((size) << MV_NETA_PMT_DEL_SKIP_A_OFFS) & MV_NETA_PMT_DEL_SKIP_A_MASK)
+/*-----------------------------------------------------------------------------------------------------------------*/
+
+/* Bits for Add Calculated length operation */
+/* Used for commands: Add Calculated length, Replace length, Skip, */
+#define MV_NETA_PMT_ZERO_ADD                0
+#define MV_NETA_PMT_DATA_ADD                2
+#define MV_NETA_PMT_DATA_SUB                3
+
+#define MV_NETA_PMT_CALC_LEN_0_OFFS         14
+#define MV_NETA_PMT_CALC_LEN_0_MASK         (3 << MV_NETA_PMT_CALC_LEN_0_OFFS)
+#define MV_NETA_PMT_CALC_LEN_0_ZERO         (0 << MV_NETA_PMT_CALC_LEN_0_OFFS)
+#define MV_NETA_PMT_CALC_LEN_0_TX_DESC      (1 << MV_NETA_PMT_CALC_LEN_0_OFFS)
+#define MV_NETA_PMT_CALC_LEN_0_TX_PKT       (2 << MV_NETA_PMT_CALC_LEN_0_OFFS)
+#define MV_NETA_PMT_CALC_LEN_0_STORE        (3 << MV_NETA_PMT_CALC_LEN_0_OFFS)
+
+#define MV_NETA_PMT_CALC_LEN_1_OFFS         12
+#define MV_NETA_PMT_CALC_LEN_1_MASK         (3 << MV_NETA_PMT_CALC_LEN_1_OFFS)
+#define MV_NETA_PMT_CALC_LEN_1(op)          ((op) << MV_NETA_PMT_CALC_LEN_1_OFFS)
+
+#define MV_NETA_PMT_CALC_LEN_2_OFFS         10
+#define MV_NETA_PMT_CALC_LEN_2_MASK         (3 << MV_NETA_PMT_CALC_LEN_2_OFFS)
+#define MV_NETA_PMT_CALC_LEN_2(op)          ((op) << MV_NETA_PMT_CALC_LEN_2_OFFS)
+
+#define MV_NETA_PMT_CALC_LEN_3_BIT          9
+#define MV_NETA_PMT_CALC_LEN_3_ADD_MASK     (0 << MV_NETA_PMT_CALC_LEN_3_BIT)
+#define MV_NETA_PMT_CALC_LEN_3_SUB_MASK     (1 << MV_NETA_PMT_CALC_LEN_3_BIT)
+
+#define MV_NETA_PMT_CALC_LEN_DATA_OFFS      0
+#define MV_NETA_PMT_CALC_LEN_DATA_MASK      (0x1FF << MV_NETA_PMT_CALC_LEN_DATA_OFFS)
+#define MV_NETA_PMT_CALC_LEN_DATA(data)     ((data) << MV_NETA_PMT_CALC_LEN_DATA_OFFS)
+/*-----------------------------------------------------------------------------------------------------------------*/
+
+/* Bits for MV_NETA_CMD_DEC_LSB and MV_NETA_CMD_DEC_MSB commands */
+#define MV_NETA_PMT_DEC_SKIP_A_OFFS         0
+#define MV_NETA_PMT_DEC_SKIP_A_MASK         (0xFF << MV_NETA_PMT_DEC_SKIP_A_OFFS)
+#define MV_NETA_PMT_DEC_SKIP_A(size)        (((size) << MV_NETA_PMT_DEC_SKIP_A_OFFS) & MV_NETA_PMT_DEC_SKIP_A_MASK)
+
+#define MV_NETA_PMT_DEC_SKIP_B_OFFS         8
+#define MV_NETA_PMT_DEC_SKIP_B_MASK         (0xFF << MV_NETA_PMT_DEC_SKIP_B_OFFS)
+#define MV_NETA_PMT_DEC_SKIP_B(size)        (((size) << MV_NETA_PMT_DEC_SKIP_B_OFFS) & MV_NETA_PMT_DEC_SKIP_B_MASK)
+/*-----------------------------------------------------------------------------------------------------------------*/
+
+#define MV_NETA_PMT_CLEAR(pmt)          \
+		(pmt)->word = 0;
+
+#define MV_NETA_PMT_IS_VALID(pmt)        \
+		((((pmt)->word & MV_NETA_PMT_CMD_ALL_MASK) >> MV_NETA_PMT_CMD_OFFS) != MV_NETA_CMD_NONE)
+
+#define MV_NETA_PMT_INVALID_SET(pmt)        \
+		((pmt)->word = MV_NETA_PMT_CMD_MASK(MV_NETA_CMD_NONE) | MV_NETA_PMT_LAST_MASK);
+
+#define MV_NETA_PMT_CTRL_GET(pmt)           \
+		(MV_U16)(((pmt)->word & MV_NETA_PMT_CTRL_MASK) >> MV_NETA_PMT_CTRL_OFFS)
+
+#define MV_NETA_PMT_CMD_GET(pmt)           \
+		(((pmt)->word & MV_NETA_PMT_CMD_ALL_MASK) >> MV_NETA_PMT_CMD_OFFS)
+
+#define MV_NETA_PMT_DATA_GET(pmt)           \
+		(MV_U16)(((pmt)->word & MV_NETA_PMT_DATA_MASK) >> MV_NETA_PMT_DATA_OFFS)
+
+#define MV_NETA_PMT_CMD_SET(pmt, cmd)                       \
+		(pmt)->word &= ~MV_NETA_PMT_CMD_ALL_MASK;       \
+		(pmt)->word |= MV_NETA_PMT_CMD_MASK(cmd);
+
+#define MV_NETA_PMT_DATA_SET(pmt, data)                         \
+		(pmt)->word &= ~MV_NETA_PMT_DATA_MASK;              \
+		(pmt)->word |= ((data) << MV_NETA_PMT_DATA_OFFS);
+
+
+MV_STATUS   mvNetaPmtWrite(int port, int idx, MV_NETA_PMT *pEntry);
+MV_STATUS   mvNetaPmtRead(int port, int idx, MV_NETA_PMT *pEntry);
+MV_STATUS   mvNetaPmtClear(int port);
+MV_STATUS   mvNetaPmtInit(int port, MV_NETA_PMT *pBase);
+MV_VOID	    mvNetaPmtDestroy(MV_VOID);
+MV_VOID	    mvNetaPmtDump(int port, int flags);
+MV_VOID        mvNetaPmtRegs(int port, int txp);
+
+MV_VOID        mvNetaPmtEntryPrint(MV_NETA_PMT *pEntry);
+
+MV_VOID        mvNetaPmtAdd2Bytes(MV_NETA_PMT *pEntry, MV_U16 data);
+MV_VOID        mvNetaPmtReplace2Bytes(MV_NETA_PMT *pEntry, MV_U16 data);
+MV_VOID        mvNetaPmtDelShorts(MV_NETA_PMT *pEntry, MV_U8 bDelete,
+				MV_U8 skipBefore, MV_U8 skipAfter);
+MV_VOID        mvNetaPmtReplaceLSB(MV_NETA_PMT *pEntry, MV_U8 value, MV_U8 mask);
+MV_VOID        mvNetaPmtReplaceMSB(MV_NETA_PMT *pEntry, MV_U8 value, MV_U8 mask);
+
+MV_VOID        mvNetaPmtDecLSB(MV_NETA_PMT *pEntry, MV_U8 skipBefore, MV_U8 skipAfter);
+MV_VOID        mvNetaPmtDecMSB(MV_NETA_PMT *pEntry, MV_U8 skipBefore, MV_U8 skipAfter);
+
+MV_VOID        mvNetaPmtLastFlag(MV_NETA_PMT *pEntry, int last);
+MV_VOID        mvNetaPmtFlags(MV_NETA_PMT *pEntry, int last, int ipv4, int l4);
+MV_VOID        mvNetaPmtSkip(MV_NETA_PMT *pEntry, MV_U16 shorts);
+MV_VOID        mvNetaPmtReplaceIPv4csum(MV_NETA_PMT *pEntry, MV_U16 data);
+MV_VOID        mvNetaPmtReplaceL4csum(MV_NETA_PMT *pEntry, MV_U16 data);
+MV_VOID        mvNetaPmtJump(MV_NETA_PMT *pEntry, MV_U16 target, int type, int cond);
+
+/* High level PMT configurations */
+int         mvNetaPmtTtlDec(int port, int idx, int ip_offs, int isLast);
+int         mvNetaPmtDataReplace(int port, int idx, int offset,
+				 MV_U8 *data, int bytes, int isLast);
+
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvPmt_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.c
new file mode 100644
index 000000000000..904809de1282
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.c
@@ -0,0 +1,1487 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mv802_3.h"
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNetaRegs.h"
+#include "gbe/mvEthRegs.h"
+
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+/*
+ * PNC debug
+ */
+/*#define PNC_DBG mvOsPrintf*/
+#define PNC_DBG(X...)
+
+/*
+ * PNC errors
+ */
+#define PNC_ERR mvOsPrintf
+/*#define PNC_ERR(X...)*/
+
+/*
+ * Local variables
+ */
+static int   pnc_inited = 0;
+static int rxq_mac_bc = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_mac_mc = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_vlan = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_ip6 = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_ip4 = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_ip4_tcp = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_ip4_udp = CONFIG_MV_ETH_RXQ_DEF;
+static int rxq_arp 		= CONFIG_MV_ETH_RXQ_DEF;
+
+/* xlate gbe port number to port value in pnc entry */
+struct gbe_pnc_port_mapping gbe_pnc_map[PORT_BITS];
+struct gbe_pnc_port_mapping gbe_pnc_map_kw2[] = {
+		{.gbe_port = 0, .pnc_port = 2},
+		{.gbe_port = 1, .pnc_port = 4},
+		{.gbe_port = 2, .pnc_port = 0},
+};
+struct gbe_pnc_port_mapping gbe_pnc_map_axp[] = {
+		{.gbe_port = 0, .pnc_port = 0},
+		{.gbe_port = 1, .pnc_port = 4},
+		{.gbe_port = 2, .pnc_port = 2},
+		{.gbe_port = 3, .pnc_port = 3},
+		{.gbe_port = 0, .pnc_port = 1},
+};
+struct gbe_pnc_port_mapping gbe_pnc_map_38x[] = {
+		{.gbe_port = 0, .pnc_port = 0},
+		{.gbe_port = 1, .pnc_port = 2},
+		{.gbe_port = 2, .pnc_port = 4},
+};
+
+
+#ifdef CONFIG_OF
+int pnc_gbe_port_map_init(unsigned int ctrl_model, unsigned int ctrl_rev)
+{
+
+	memset(&gbe_pnc_map, 0xff, sizeof(gbe_pnc_map));
+
+	if (ctrl_model == MV78230_DEV_ID
+		|| ctrl_model == MV78260_DEV_ID
+		|| ctrl_model == MV78460_DEV_ID) {
+		/* Armada XP ID */
+		memcpy(&gbe_pnc_map, &gbe_pnc_map_axp, sizeof(gbe_pnc_map_axp));
+	} else if (ctrl_model == MV88F6510_DEV_ID
+		|| ctrl_model == MV88F6530_DEV_ID
+		|| ctrl_model == MV88F6601_DEV_ID
+		|| ctrl_model == MV88F6560_DEV_ID) {
+		/* Armada KW2 ID */
+		memcpy(&gbe_pnc_map, &gbe_pnc_map_kw2, sizeof(gbe_pnc_map_kw2));
+	} else if (ctrl_model == MV88F6810_DEV_ID
+		|| ctrl_model == MV88F6811_DEV_ID
+		|| ctrl_model == MV88F6820_DEV_ID
+		|| ctrl_model == MV88F6828_DEV_ID) {
+		/* Armada A38x ID */
+		memcpy(&gbe_pnc_map, &gbe_pnc_map_38x, sizeof(gbe_pnc_map_38x));
+	} else {
+		mvOsPrintf("%s: ctrl_model=%x is not supported\n", __func__, ctrl_model);
+		return -1;
+	}
+	return 0;
+}
+#else
+int pnc_gbe_port_map_init(unsigned int ctrl_model, unsigned int ctrl_rev)
+{
+	memset(&gbe_pnc_map, 0xff, sizeof(gbe_pnc_map));
+
+#ifdef CONFIG_ARCH_FEROCEON_KW2
+	/* Armada KW2 ID */
+	memcpy(&gbe_pnc_map, &gbe_pnc_map_kw2, sizeof(gbe_pnc_map_kw2));
+#elif defined(CONFIG_ARCH_ARMADA38X)
+	/* Armada A38x ID */
+	memcpy(&gbe_pnc_map, &gbe_pnc_map_38x, sizeof(gbe_pnc_map_38x));
+#else
+	/* Armada XP ID */
+	memcpy(&gbe_pnc_map, &gbe_pnc_map_axp, sizeof(gbe_pnc_map_axp));
+#endif
+
+	return 0;
+}
+#endif
+
+int pnc_port_map(int pnc_port)
+{
+	int loop;
+
+	for (loop = 0; loop < PORT_BITS; loop++)
+		if (gbe_pnc_map[loop].pnc_port == pnc_port)
+			return gbe_pnc_map[loop].gbe_port;
+
+	mvOsPrintf("%s: pnc_port=%d is out of range\n", __func__, pnc_port);
+	return -1;
+}
+
+int pnc_eth_port_map(int eth_port)
+{
+	int loop;
+
+	for (loop = 0; loop < PORT_BITS; loop++)
+		if (gbe_pnc_map[loop].gbe_port == eth_port)
+			return gbe_pnc_map[loop].pnc_port;
+
+	mvOsPrintf("%s: eth_port=%d is out of range\n", __func__, eth_port);
+	return -1;
+}
+
+int pnc_te_del(unsigned int tid)
+{
+	PNC_DBG("%s [%d]\n", __func__, tid);
+
+	tcam_hw_inv(tid);
+
+	return 0;
+}
+
+/* pnc port setting: data: 0 for all bits, mask: 0 - for accepted ports, 1 - for rejected ports */
+int pnc_port_mask_check(unsigned int mask, int eth_port)
+{
+	int pnc_port = pnc_eth_port_map(eth_port);
+
+	if (pnc_port < 0)
+		return 0;
+
+	if (mask & (1 << pnc_port))
+		return 0;
+
+	return 1;
+}
+
+unsigned int pnc_port_mask_update(unsigned int mask, int eth_port, int add)
+{
+	int pnc_port = pnc_eth_port_map(eth_port);
+
+	if (pnc_port < 0)
+		return mask;
+
+	if (add)
+		mask &= ~(1 << pnc_port);
+	else
+		mask |= (1 << pnc_port);
+
+	return mask;
+}
+
+unsigned int pnc_port_mask(int eth_port)
+{
+	unsigned int mask;
+	int pnc_port = pnc_eth_port_map(eth_port);
+
+	if (pnc_port < 0)
+		return 0;
+
+	mask = (~(1 << pnc_port)) & PORT_MASK;
+	return mask;
+}
+
+/* Get TCAM entry if valid, NULL if invalid */
+struct tcam_entry *pnc_tcam_entry_get(int tid)
+{
+	struct tcam_entry *te;
+
+	te = tcam_sw_alloc(0);
+
+	tcam_hw_read(te, tid);
+
+	if (te->ctrl.flags & TCAM_F_INV) {
+		tcam_sw_free(te);
+		return NULL;
+	}
+	return te;
+}
+
+int pnc_tcam_port_update(int tid, int eth_port, int add)
+{
+	struct tcam_entry *te;
+	unsigned int data, mask;
+
+	te = pnc_tcam_entry_get(tid);
+	if (te == NULL) {
+		mvOsPrintf("%s: TCAM entry #%d is invalid\n", __func__, tid);
+		return -1;
+	}
+	tcam_sw_get_port(te, &data, &mask);
+	mask = pnc_port_mask_update(mask, eth_port, add);
+	tcam_sw_set_port(te, data, mask);
+	tcam_hw_write(te, tid);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * MAC Address Section
+ *
+ ******************************************************************************
+ */
+
+/*
+ * pnc_mac_fc_drop - Add Flow Control MAC address match rule to the MAC section
+ * to drop PAUSE frames arriving without Marvell Header on all ports
+ */
+static void pnc_mac_fc_drop(void)
+{
+	struct tcam_entry *te = NULL;
+	unsigned char da[6] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+	unsigned int len = MV_MAC_ADDR_SIZE;
+
+	te = tcam_sw_alloc(TCAM_LU_MAC);
+
+	/* set match on DA */
+	while (len--)
+		tcam_sw_set_byte(te, len, da[len]);
+
+	/* port id match */
+	tcam_sw_set_port(te, 0, 0);	/* all ports */
+
+	/* result info bit */
+	sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+
+	tcam_sw_text(te, "flow control");
+	sram_sw_set_lookup_done(te, 1);
+
+	tcam_hw_write(te, TE_MAC_FLOW_CTRL);
+	tcam_sw_free(te);
+}
+
+/*
+ * pnc_mac_da - Add DA MAC address match rule to the MAC section
+ * @da: destination MAC address
+ * @len: destination MAC address length to match on: 0..6
+ * @port_mask: source port id: 0..1F or ANY
+ * @rxq: rx queue
+ * @rinfo: result info bits to set
+ */
+static struct tcam_entry *pnc_mac_da(unsigned char *da, unsigned int len,
+				     unsigned int port_mask, int rxq, unsigned int rinfo)
+{
+	struct tcam_entry *te = NULL;
+
+	if (len > MV_MAC_ADDR_SIZE)
+		goto out;
+
+	if (rinfo >= BIT24)
+		goto out;
+
+	te = tcam_sw_alloc(TCAM_LU_MAC);
+
+	/* set match on DA */
+	while (len--)
+		tcam_sw_set_byte(te, MV_ETH_MH_SIZE + len, da[len]);
+
+	/* port id match */
+	tcam_sw_set_port(te, 0, port_mask);
+
+	/* result info bit */
+	sram_sw_set_rinfo(te, rinfo, rinfo);
+
+	/* set rx queue */
+	sram_sw_set_rxq(te, rxq, 0);
+
+	/* shift to ethertype */
+	sram_sw_set_shift_update(te, 0, MV_ETH_MH_SIZE + 2 * MV_MAC_ADDR_SIZE);
+	sram_sw_set_next_lookup(te, TCAM_LU_L2);
+out:
+	return te;
+}
+
+/*
+ * pnc_mac_me - Add DA MAC address of port
+ * @mac: destination MAC address or NULL for promiscuous
+ * @port: ingress giga port number
+ */
+int pnc_mac_me(unsigned int port, unsigned char *mac, int rxq)
+{
+	struct tcam_entry *te;
+	int len = MV_MAC_ADDR_SIZE;
+	char text[TCAM_TEXT];
+	unsigned int port_mask = pnc_port_mask(port);
+
+	if (port_mask < 0)
+		return 1;
+
+	if (!mac)
+		len = 0;
+
+	te = pnc_mac_da(mac, len, port_mask, rxq, RI_DA_ME);
+	sprintf(text, "%s%d", "ucast_me", port);
+	tcam_sw_text(te, text);
+
+	tcam_hw_write(te, TE_MAC_ME + port);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/*
+ * pnc_mcast_all - Accept all MAC multicast of port
+ * @port: ingress giga port number.
+ * @en: 1 - Accept ALL MCAST, 0 - Discard ALL MCAST
+ */
+int pnc_mcast_all(unsigned int port, int en)
+{
+	struct tcam_entry *te;
+	unsigned int data, mask;
+
+	te = pnc_tcam_entry_get(TE_MAC_MC_ALL);
+	if (te == NULL) {
+		mvOsPrintf("%s: MC_ALL entry (tid=%d) is invalid\n", __func__, TE_MAC_MC_ALL);
+		return 1;
+	}
+
+	/* Update port mask */
+	tcam_sw_get_port(te, &data, &mask);
+	mask = pnc_port_mask_update(mask, port, en);
+
+	tcam_sw_set_port(te, data, mask);
+	tcam_sw_text(te, "mcast_all");
+
+	tcam_hw_write(te, TE_MAC_MC_ALL);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/*
+ * pnc_mcast_me - Add DA MAC address of port
+ * @mac: Multicast MAC DA or NULL to delete all Multicast DAs for this port
+ * @port: ingress giga port number
+ */
+int pnc_mcast_me(unsigned int port, unsigned char *mac)
+{
+	struct tcam_entry *te;
+	int tid, empty = -1;
+	unsigned int data, mask;
+
+	if (mac == NULL) {
+		/* Delete all Multicast addresses for this port */
+		for (tid = (TE_MAC_MC_ALL + 1); tid <= TE_MAC_MC_L; tid++) {
+			/* Check TCAM entry */
+			te = pnc_tcam_entry_get(tid);
+			if (te != NULL) {
+				/* delete entry if belong specific port */
+				tcam_sw_get_port(te, &data, &mask);
+				mask = pnc_port_mask_update(mask, port, 0);
+				if (mask == PORT_MASK) {	/* No valid ports */
+					tcam_hw_inv(tid);
+				} else {
+					tcam_sw_set_port(te, data, mask);
+					tcam_hw_write(te, tid);
+				}
+				tcam_sw_free(te);
+			}
+		}
+		return 0;
+	}
+
+	/* Add new Multicast DA for this port */
+	for (tid = (TE_MAC_MC_ALL + 1); tid <= TE_MAC_MC_L; tid++) {
+		te = pnc_tcam_entry_get(tid);
+
+		/* Remember first Empty entry */
+		if (te == NULL) {
+			if (empty == -1)
+				empty = tid;
+
+			continue;
+		}
+
+		/* Find existing TCAM entry with this DA */
+		if (tcam_sw_cmp_bytes(te, MV_ETH_MH_SIZE, MV_MAC_ADDR_SIZE, mac) == 0) {
+			/* check and update port mask */
+			tcam_sw_get_port(te, &data, &mask);
+			mask = pnc_port_mask_update(mask, port, 1);
+			tcam_sw_set_port(te, data, mask);
+
+			tcam_hw_write(te, tid);
+			tcam_sw_free(te);
+			return 0;
+		}
+		tcam_sw_free(te);
+	}
+
+	/* Not found existing entry and no free TCAM entry - Failed */
+	if (empty == -1)
+		return 1;
+
+	/* Not found existing entry - add to free TCAM entry */
+	te = pnc_mac_da(mac, MV_MAC_ADDR_SIZE, pnc_port_mask(port), rxq_mac_mc, RI_DA_MC);
+	tcam_sw_text(te, "mcast_me");
+
+	tcam_hw_write(te, empty);
+	tcam_sw_free(te);
+	return 0;
+}
+
+/*
+ * pnc_mac_init - GE init phase configuration
+ */
+static int pnc_mac_init(void)
+{
+	struct tcam_entry *te;
+	unsigned char da_mc[6] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	unsigned char da_bc[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+	PNC_DBG("%s\n", __func__);
+
+	/* broadcast - for all ports */
+	te = pnc_mac_da(da_bc, 6, 0, rxq_mac_bc, RI_DA_BC);
+	tcam_sw_text(te, "bcast");
+
+	tcam_hw_write(te, TE_MAC_BC);
+	tcam_sw_free(te);
+
+	/* flow control PAUSE frames - discard for all ports by default */
+	pnc_mac_fc_drop();
+
+	/* All Multicast - no ports by default */
+	te = pnc_mac_da(da_mc, 1, PORT_MASK, rxq_mac_mc, RI_DA_MC);
+	tcam_sw_text(te, "mcast_all");
+
+	tcam_hw_write(te, TE_MAC_MC_ALL);
+	tcam_sw_free(te);
+
+	/* end of section */
+	te = tcam_sw_alloc(TCAM_LU_MAC);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_MAC, FLOWID_CTRL_LOW_HALF_MASK);
+
+	/* Non-promiscous mode - DROP unknown packets */
+	sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+	sram_sw_set_lookup_done(te, 1);
+	tcam_sw_text(te, "mac_eof");
+
+	tcam_hw_write(te, TE_MAC_EOF);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * L2 Section
+ *
+ ******************************************************************************
+ */
+
+/*
+ * Helper: match ethertype
+ */
+static void pnc_match_etype(struct tcam_entry *te, unsigned short ethertype)
+{
+	tcam_sw_set_byte(te, 0, ethertype >> 8);
+	tcam_sw_set_byte(te, 1, ethertype & 0xFF);
+}
+
+/* Set VLAN priority entry */
+int pnc_vlan_prio_set(int port, int prio, int rxq)
+{
+#if (CONFIG_MV_ETH_PNC_VLAN_PRIO > 0)
+	struct tcam_entry *te;
+	unsigned int pdata, pmask;
+	int q, tid, empty = -1;
+
+	PNC_DBG("%s\n", __func__);
+
+	/* check validity */
+	if ((prio < 0) || (prio > 7))
+		return 1;
+
+	if ((rxq < -1) || (rxq > CONFIG_MV_ETH_RXQ))
+		return 1;
+
+	/* Find match TCAM entry */
+	for (tid = TE_VLAN_PRIO; tid <= TE_VLAN_PRIO_END; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		/* Remember first Empty entry */
+		if (te == NULL) {
+			if (empty == -1)
+				empty = tid;
+			continue;
+		}
+		/* find VLAN entry with the same priority */
+		if (tcam_sw_cmp_byte(te, 2, ((unsigned char)prio << 5)) == 0) {
+			tcam_sw_get_port(te, &pdata, &pmask);
+			if (rxq == -1) {
+				if (!pnc_port_mask_check(pmask, port)) {
+					tcam_sw_free(te);
+					continue;
+				}
+				pmask = pnc_port_mask_update(pmask, port, 0);
+				if (pmask == PORT_MASK) {	/* No valid ports */
+					tcam_hw_inv(tid);
+				} else {
+					tcam_sw_set_port(te, pdata, pmask);
+					tcam_hw_write(te, tid);
+				}
+			} else {
+				q = sram_sw_get_rxq(te, NULL);
+				if (rxq == q) {
+					/* Add port to this entry */
+					pmask = pnc_port_mask_update(pmask, port, 1);
+					tcam_sw_set_port(te, pdata, pmask);
+					tcam_hw_write(te, tid);
+				} else {
+					/* Update RXQ */
+					pmask = pnc_port_mask_update(pmask, port, 0);
+					if (pmask == PORT_MASK) {
+						/* No valid ports - use the same entry */
+						pmask = pnc_port_mask_update(pmask, port, 1);
+						tcam_sw_set_port(te, pdata, pmask);
+						sram_sw_set_rxq(te, rxq, 0);
+						tcam_hw_write(te, tid);
+						tcam_sw_free(te);
+					} else {
+						tcam_sw_free(te);
+						continue;
+					}
+				}
+			}
+			tcam_sw_free(te);
+			return 0;
+		}
+		tcam_sw_free(te);
+	}
+	if (rxq == -1) {
+		mvOsPrintf("%s: Entry not found - vprio=%d, rxq=%d\n",
+					__func__, prio, rxq);
+		return 1;
+	}
+
+	/* Not found existing entry and no free TCAM entry - Failed */
+	if (empty == -1) {
+		mvOsPrintf("%s: No free place - vprio=%d, rxq=%d\n",
+					__func__, prio, rxq);
+		return 1;
+	}
+
+	/* Not found existing entry - add to free TCAM entry */
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_VLAN_TYPE);
+	tcam_sw_set_byte(te, 2, prio << 5);
+	tcam_sw_set_mask(te, 2, 7 << 5);
+	tcam_sw_text(te, "vlan_prio");
+
+	sram_sw_set_rinfo(te, RI_VLAN, RI_VLAN);
+	sram_sw_set_next_lookup(te, TCAM_LU_L2);
+	sram_sw_set_shift_update(te, 0, MV_VLAN_HLEN);
+	sram_sw_set_rxq(te, rxq, 0);
+
+	/* single port mask */
+	pmask = pnc_port_mask(port);
+	tcam_sw_set_port(te, 0, pmask);
+
+	tcam_sw_text(te, "vlan_prio");
+
+	tcam_hw_write(te, empty);
+	tcam_sw_free(te);
+
+	return 0;
+#else
+	return -1;
+#endif /* CONFIG_MV_ETH_PNC_VLAN_PRIO > 0 */
+}
+
+int pnc_vlan_init(void)
+{
+	struct tcam_entry *te;
+	int tid;
+
+	PNC_DBG("%s\n", __func__);
+
+	/* Set default VLAN entry */
+	tid = TE_VLAN_EOF;
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_VLAN_TYPE);
+	tcam_sw_set_mask(te, 2, 0);
+	tcam_sw_text(te, "vlan_def");
+
+	sram_sw_set_rxq(te, rxq_vlan, 0);
+
+	sram_sw_set_rinfo(te, RI_VLAN, RI_VLAN);
+	sram_sw_set_next_lookup(te, TCAM_LU_L2);
+	sram_sw_set_shift_update(te, 0, MV_VLAN_HLEN);
+
+	tcam_hw_write(te, tid);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * Ethertype Section
+ *
+ ******************************************************************************
+ */
+/* match arp */
+void pnc_etype_arp(int rxq)
+{
+	struct tcam_entry *te;
+
+	rxq_arp = rxq;
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_IP_ARP_TYPE);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_rxq(te, rxq_arp, 0);
+	tcam_sw_text(te, "etype_arp");
+
+	tcam_hw_write(te, TE_ETYPE_ARP);
+	tcam_sw_free(te);
+}
+
+/* match ip4 */
+static void pnc_etype_ip4(void)
+{
+	struct tcam_entry *te;
+
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_IP_TYPE);
+	sram_sw_set_shift_update(te, 0, MV_ETH_TYPE_LEN);
+	sram_sw_set_next_lookup(te, TCAM_LU_IP4);
+	tcam_sw_text(te, "etype_ipv4");
+
+	tcam_hw_write(te, TE_ETYPE_IP4);
+	tcam_sw_free(te);
+}
+
+/* match ip6 */
+static void pnc_etype_ip6(void)
+{
+	struct tcam_entry *te;
+
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_IP6_TYPE);
+	sram_sw_set_shift_update(te, 0, MV_ETH_TYPE_LEN);
+	sram_sw_set_next_lookup(te, TCAM_LU_IP6);
+	tcam_sw_text(te, "etype_ipv6");
+
+	tcam_hw_write(te, TE_ETYPE_IP6);
+	tcam_sw_free(te);
+}
+
+/* match pppoe */
+static void pnc_etype_pppoe(void)
+{
+	struct tcam_entry *te;
+
+	/* IPv4 over PPPoE */
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_PPPOE_TYPE);
+	tcam_sw_set_byte(te, MV_PPPOE_HDR_SIZE, 0x00);
+	tcam_sw_set_byte(te, MV_PPPOE_HDR_SIZE + 1, 0x21);
+
+	sram_sw_set_shift_update(te, 0, MV_ETH_TYPE_LEN + MV_PPPOE_HDR_SIZE);
+	sram_sw_set_next_lookup(te, TCAM_LU_IP4);
+	sram_sw_set_rinfo(te, RI_PPPOE, RI_PPPOE);
+	tcam_sw_text(te, "pppoe_ip4");
+
+	tcam_hw_write(te, TE_PPPOE_IP4);
+	tcam_sw_free(te);
+
+	/* IPv6 over PPPoE */
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	pnc_match_etype(te, MV_PPPOE_TYPE);
+
+	tcam_sw_set_byte(te, MV_PPPOE_HDR_SIZE, 0x00);
+	tcam_sw_set_byte(te, MV_PPPOE_HDR_SIZE + 1, 0x57);
+
+	sram_sw_set_shift_update(te, 0, MV_ETH_TYPE_LEN + MV_PPPOE_HDR_SIZE);
+	sram_sw_set_next_lookup(te, TCAM_LU_IP6);
+	sram_sw_set_rinfo(te, RI_PPPOE, RI_PPPOE);
+	tcam_sw_text(te, "pppoe_ip6");
+
+	tcam_hw_write(te, TE_PPPOE_IP6);
+	tcam_sw_free(te);
+}
+
+/*
+ * pnc_etype_init - match basic ethertypes
+ */
+static int pnc_etype_init(void)
+{
+	struct tcam_entry *te;
+	int tid;
+
+	PNC_DBG("%s\n", __func__);
+
+	pnc_etype_arp(CONFIG_MV_ETH_RXQ_DEF);
+	pnc_etype_ip4();
+	pnc_etype_ip6();
+	pnc_etype_pppoe();
+
+	/* add custom ethertypes here */
+	tid = TE_ETYPE;
+
+	ERR_ON_OOR(--tid >= TE_ETYPE_EOF);
+
+	/* end of section */
+	te = tcam_sw_alloc(TCAM_LU_L2);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_L2, FLOWID_CTRL_LOW_HALF_MASK);
+	sram_sw_set_rxq(te, CONFIG_MV_ETH_RXQ_DEF, 0);
+	sram_sw_set_lookup_done(te, 1);
+	tcam_sw_text(te, "etype_eof");
+
+	tcam_hw_write(te, TE_ETYPE_EOF);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * IPv4 Section
+ *
+ ******************************************************************************
+ */
+
+static void pnc_ip4_flow_next_lookup_set(struct tcam_entry *te)
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	sram_sw_set_next_lookup(te, TCAM_LU_FLOW_IP4);
+#else
+	sram_sw_set_next_lookup(te, TCAM_LU_L4);
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+
+/*
+ * pnc_ip4_tos - Add TOS priority rules
+ */
+int pnc_ip4_dscp(int port, unsigned char dscp, unsigned char mask, int rxq)
+{
+#if (CONFIG_MV_ETH_PNC_DSCP_PRIO > 0)
+	struct tcam_entry *te;
+	unsigned int pdata, pmask;
+	int tid, q, empty = -1;
+
+	if ((rxq < -1) || (rxq > CONFIG_MV_ETH_RXQ))
+		return 1;
+
+	for (tid = TE_IP4_DSCP; tid <= TE_IP4_DSCP_END; tid++) {
+		PNC_DBG("%s: tid=%d, dscp=0x%02x, mask=0x%02x, rxq=%d\n", __func__, tid, dscp, mask, rxq);
+
+		te = pnc_tcam_entry_get(tid);
+		/* Remember first Empty entry */
+		if (te == NULL) {
+			if (empty == -1)
+				empty = tid;
+			continue;
+		}
+		/* Find existing entry for this TOS */
+		if (tcam_sw_cmp_bytes(te, 1, 1, &dscp) == 0) {
+			tcam_sw_get_port(te, &pdata, &pmask);
+			if (rxq == -1) {
+				if (!pnc_port_mask_check(pmask, port)) {
+					tcam_sw_free(te);
+					continue;
+				}
+				pmask = pnc_port_mask_update(pmask, port, 0);
+				if (pmask == PORT_MASK) {	/* No valid ports */
+					tcam_hw_inv(tid);
+				} else {
+					tcam_sw_set_port(te, pdata, pmask);
+					tcam_hw_write(te, tid);
+				}
+			} else {
+				q = sram_sw_get_rxq(te, NULL);
+				if (rxq == q) {
+					/* Add port to this entry */
+					pmask = pnc_port_mask_update(pmask, port, 1);
+					tcam_sw_set_port(te, pdata, pmask);
+					tcam_hw_write(te, tid);
+				} else {
+					/* Update RXQ */
+					pmask = pnc_port_mask_update(pmask, port, 0);
+					if (pmask == PORT_MASK) {
+						/* No valid ports - use the same entry */
+						pmask = pnc_port_mask_update(pmask, port, 1);
+						tcam_sw_set_port(te, pdata, pmask);
+						sram_sw_set_rxq(te, rxq, 0);
+						tcam_hw_write(te, tid);
+					} else {
+						tcam_sw_free(te);
+						continue;
+					}
+				}
+			}
+			tcam_sw_free(te);
+			return 0;
+		}
+		tcam_sw_free(te);
+	}
+
+	if (rxq == -1) {
+		mvOsPrintf("%s: Entry not found - tos=0x%x, rxq=%d\n",
+					__func__, dscp, rxq);
+		return 1;
+	}
+
+	/* Not found existing entry and no free TCAM entry - Failed */
+	if (empty == -1) {
+		mvOsPrintf("%s: No free place - tos=0x%x, rxq=%d\n",
+					__func__, dscp, rxq);
+		return 1;
+	}
+
+	/* Not found existing entry - add to free TCAM entry */
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	tcam_sw_set_byte(te, 1, (MV_U8) dscp);
+	tcam_sw_set_mask(te, 1, (MV_U8) mask);
+	sram_sw_set_rxq(te, rxq, 0);
+	pmask = pnc_port_mask(port);
+	tcam_sw_set_port(te, 0, pmask);
+	sram_sw_set_next_lookup(te, TCAM_LU_IP4);
+	tcam_sw_set_ainfo(te, 0, AI_DONE_MASK);
+	sram_sw_set_ainfo(te, AI_DONE_MASK, AI_DONE_MASK);
+
+	tcam_sw_text(te, "ipv4_tos");
+
+	tcam_hw_write(te, empty);
+	tcam_sw_free(te);
+	return 0;
+#else
+	return -1;
+#endif /* (CONFIG_MV_ETH_PNC_DSCP_PRIO > 0) */
+}
+
+
+/* IPv4/TCP header parsing for fragmentation and L4 offset.  */
+void pnc_ip4_tcp(int rxq)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+	rxq_ip4_tcp = rxq;
+
+	/* TCP, FRAG=0 normal */
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	tcam_sw_set_byte(te, 9, MV_IP_PROTO_TCP);
+	tcam_sw_set_byte(te, 6, 0x00);
+	tcam_sw_set_mask(te, 6, 0x3F);
+	tcam_sw_set_byte(te, 7, 0x00);
+	tcam_sw_set_mask(te, 7, 0xFF);
+	sram_sw_set_shift_update(te, 1, SHIFT_IP4_HLEN);
+	sram_sw_set_rinfo(te, (RI_L3_IP4 | RI_L4_TCP), (RI_L3_IP4 | RI_L4_TCP));
+	sram_sw_set_rxq(te, rxq_ip4_tcp, 0);
+	sram_sw_set_ainfo(te, 0, AI_DONE_MASK);
+	pnc_ip4_flow_next_lookup_set(te);
+
+	tcam_sw_text(te, "ipv4_tcp");
+
+	tcam_hw_write(te, TE_IP4_TCP);
+	tcam_sw_free(te);
+
+	/* TCP, FRAG=1 any */
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	tcam_sw_set_byte(te, 9, MV_IP_PROTO_TCP);
+	sram_sw_set_shift_update(te, 1, SHIFT_IP4_HLEN);
+	sram_sw_set_rinfo(te, (RI_L3_IP4_FRAG | RI_L4_TCP), (RI_L3_IP4_FRAG | RI_L4_TCP));
+	sram_sw_set_rxq(te, rxq_ip4_tcp, 0);
+	sram_sw_set_ainfo(te, 0, AI_DONE_MASK);
+	pnc_ip4_flow_next_lookup_set(te);
+	tcam_sw_text(te, "ipv4_tcp_fr");
+
+	tcam_hw_write(te, TE_IP4_TCP_FRAG);
+	tcam_sw_free(te);
+}
+
+/* IPv4/UDP header parsing for fragmentation and L4 offset. */
+void pnc_ip4_udp(int rxq)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+	rxq_ip4_udp = rxq;
+
+	/* UDP, FRAG=0 normal */
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	tcam_sw_set_byte(te, 9, MV_IP_PROTO_UDP);
+	tcam_sw_set_byte(te, 6, 0x00);
+	tcam_sw_set_mask(te, 6, 0x3F);
+	tcam_sw_set_byte(te, 7, 0x00);
+	tcam_sw_set_mask(te, 7, 0xFF);
+	sram_sw_set_shift_update(te, 1, SHIFT_IP4_HLEN);
+	sram_sw_set_rinfo(te, (RI_L3_IP4 | RI_L4_UDP), (RI_L3_IP4 | RI_L4_UDP));
+	sram_sw_set_rxq(te, rxq_ip4_udp, 0);
+	sram_sw_set_ainfo(te, 0, AI_DONE_MASK);
+	pnc_ip4_flow_next_lookup_set(te);
+	tcam_sw_text(te, "ipv4_udp");
+
+	tcam_hw_write(te, TE_IP4_UDP);
+	tcam_sw_free(te);
+
+	/* UDP, FRAG=1 any */
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	tcam_sw_set_byte(te, 9, MV_IP_PROTO_UDP);
+	sram_sw_set_shift_update(te, 1, SHIFT_IP4_HLEN);
+	sram_sw_set_rinfo(te, (RI_L3_IP4_FRAG | RI_L4_UDP), (RI_L3_IP4_FRAG | RI_L4_UDP));
+	sram_sw_set_rxq(te, rxq_ip4_udp, 0);
+	sram_sw_set_ainfo(te, 0, AI_DONE_MASK);
+	pnc_ip4_flow_next_lookup_set(te);
+	tcam_sw_text(te, "ipv4_udp_fr");
+
+	tcam_hw_write(te, TE_IP4_UDP_FRAG);
+	tcam_sw_free(te);
+}
+
+/* IPv4 - end of section  */
+static void pnc_ip4_end(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	te = tcam_sw_alloc(TCAM_LU_IP4);
+	sram_sw_set_rinfo(te, (RI_L3_IP4 | RI_L4_UN), (RI_L3_IP4 | RI_L4_UN));
+	sram_sw_set_rxq(te, rxq_ip4, 0);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_IP4, FLOWID_CTRL_LOW_HALF_MASK);
+
+	tcam_sw_text(te, "ipv4_eof");
+
+	tcam_hw_write(te, TE_IP4_EOF);
+	tcam_sw_free(te);
+}
+
+int pnc_ip4_init(void)
+{
+	PNC_DBG("%s\n", __func__);
+
+	pnc_ip4_tcp(CONFIG_MV_ETH_RXQ_DEF);
+	pnc_ip4_udp(CONFIG_MV_ETH_RXQ_DEF);
+	/*pnc_ip4_esp();*/
+	pnc_ip4_end();
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * IPv6 Section
+ *
+ *******************************************************************************
+ */
+
+static void pnc_ip6_flow_next_lookup_set(struct tcam_entry *te)
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	sram_sw_set_next_lookup(te, TCAM_LU_FLOW_IP6_A);
+#else
+	sram_sw_set_next_lookup(te, TCAM_LU_L4);
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+
+/* IPv6 - detect TCP */
+static void pnc_ip6_tcp(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	/* TCP without extension headers */
+	te = tcam_sw_alloc(TCAM_LU_IP6);
+	tcam_sw_set_byte(te, 6, MV_IP_PROTO_TCP);
+	sram_sw_set_shift_update(te, 1, sizeof(MV_IP6_HEADER));
+	pnc_ip6_flow_next_lookup_set(te);
+	sram_sw_set_rinfo(te, (RI_L3_IP6 | RI_L4_TCP), (RI_L3_IP6 | RI_L4_TCP));
+	sram_sw_set_rxq(te, rxq_ip6, 0);
+	tcam_sw_text(te, "ipv6_tcp");
+
+	tcam_hw_write(te, TE_IP6_TCP);
+	tcam_sw_free(te);
+}
+
+/* IPv6 - detect UDP */
+static void pnc_ip6_udp(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	/* UDP without extension headers */
+	te = tcam_sw_alloc(TCAM_LU_IP6);
+	tcam_sw_set_byte(te, 6, MV_IP_PROTO_UDP);
+	sram_sw_set_shift_update(te, 1, sizeof(MV_IP6_HEADER));
+	pnc_ip6_flow_next_lookup_set(te);
+
+	sram_sw_set_rinfo(te, (RI_L3_IP6 | RI_L4_UDP), (RI_L3_IP6 | RI_L4_UDP));
+	sram_sw_set_rxq(te, rxq_ip6, 0);
+	tcam_sw_text(te, "ipv6_udp");
+
+	tcam_hw_write(te, TE_IP6_UDP);
+	tcam_sw_free(te);
+}
+
+/* IPv6 - end of section  */
+static void pnc_ip6_end(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	te = tcam_sw_alloc(TCAM_LU_IP6);
+	sram_sw_set_shift_update(te, 1, sizeof(MV_IP6_HEADER));
+	sram_sw_set_rinfo(te, (RI_L3_IP6 | RI_L4_UN), (RI_L3_IP6 | RI_L4_UN));
+	sram_sw_set_rxq(te, rxq_ip6, 0);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_IP6, FLOWID_CTRL_LOW_HALF_MASK);
+	tcam_sw_text(te, "ipv6_eof");
+
+	tcam_hw_write(te, TE_IP6_EOF);
+	tcam_sw_free(te);
+}
+
+int pnc_ip6_init(void)
+{
+	PNC_DBG("%s\n", __func__);
+
+	pnc_ip6_tcp();
+	pnc_ip6_udp();
+
+	pnc_ip6_end();
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+/******************************************************************************
+ *
+ * L3 Flows Section
+ *
+ ******************************************************************************
+ */
+static int pnc_flow_init(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	/* end of section for IPv4 */
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP4);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_FLOW_IP4, FLOWID_CTRL_LOW_HALF_MASK);
+	tcam_sw_text(te, "flow_ip4_eof");
+
+	tcam_hw_write(te, TE_FLOW_IP4_EOF);
+	tcam_sw_free(te);
+
+	/* end of section for IPv6_A */
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP6_A);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_FLOW_IP6_A, FLOWID_CTRL_LOW_HALF_MASK);
+	tcam_sw_text(te, "flow_ip6_A_eof");
+
+	tcam_hw_write(te, TE_FLOW_IP6_A_EOF);
+	tcam_sw_free(te);
+
+	/* end of section for IPv6_B */
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP6_B);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_FLOW_IP6_B, FLOWID_CTRL_LOW_HALF_MASK);
+	tcam_sw_text(te, "flow_ip6_B_eof");
+
+	tcam_hw_write(te, TE_FLOW_IP6_B_EOF);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+/* require 2 TCAM entries for macth */
+int pnc_ipv6_2_tuples_add(unsigned int tid1, unsigned int tid2, unsigned int flow_id,
+					      MV_U8 unique, MV_U8 *sip, MV_U8 *dip, unsigned int rxq)
+{
+	struct tcam_entry   *te;
+	int                 i;
+
+	if ((tid1 < TE_FLOW_L3) || (tid1 > TE_FLOW_L3_END))
+		ERR_ON_OOR(1);
+
+	if ((tid2 < TE_FLOW_L3) || (tid2 > TE_FLOW_L3_END))
+		ERR_ON_OOR(2);
+
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP6_A);
+	for (i = 0; i < 16; i++)
+		tcam_sw_set_byte(te, 8+i, sip[i]);
+
+	sram_sw_set_shift_update(te, 2, 24);
+
+	sram_sw_set_next_lookup_shift(te, 2);
+
+	sram_sw_set_next_lookup(te, TCAM_LU_FLOW_IP6_B);
+
+	sram_sw_set_ainfo(te, unique, AI_MASK);
+	tcam_sw_text(te, "ipv6_2t_A");
+
+	tcam_hw_write(te, tid1);
+	tcam_sw_free(te);
+
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP6_B);
+	for (i = 0; i < 16; i++)
+		tcam_sw_set_byte(te, i, dip[i]);
+
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, flow_id, FLOWID_CTRL_FULL_MASK);
+
+	if (PNC_FLOWID_IS_HWF(flow_id))	{
+		/* Overwrite RXQ - FIXME */
+		sram_sw_set_rxq(te, rxq, 1);
+	} else {
+		sram_sw_set_rxq(te, rxq, 0);
+	}
+
+	sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+
+	tcam_sw_set_ainfo(te, unique, AI_MASK);
+	tcam_sw_text(te, "ipv6_2t_B");
+
+	tcam_hw_write(te, tid2);
+	tcam_sw_free(te);
+
+	return 0;
+}
+
+int pnc_ipv4_2_tuples_add(unsigned int tid, unsigned int flow_id, unsigned int sip, unsigned int dip, unsigned int rxq)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s [%d] flow=%d " MV_IPQUAD_FMT "->" MV_IPQUAD_FMT "\n",
+		__func__, tid, flow_id, MV_IPQUAD(((MV_U8 *)&sip)), MV_IPQUAD(((MV_U8 *)&dip)));
+
+	if (tid < TE_FLOW_L3)
+		ERR_ON_OOR(1);
+
+	if (tid > TE_FLOW_L3_END)
+		ERR_ON_OOR(1);
+
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP4);
+
+	tcam_sw_set_byte(te, 12, (sip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 13, (sip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 14, (sip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 15, (sip >> 24) & 0xFF);
+
+	tcam_sw_set_byte(te, 16, (dip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 17, (dip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 18, (dip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 19, (dip >> 24) & 0xFF);
+
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, flow_id, FLOWID_CTRL_FULL_MASK);
+	sram_sw_set_rxq(te, rxq, 0);
+	sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+	tcam_sw_text(te, "ipv4_2t");
+
+	tcam_hw_write(te, tid);
+	tcam_sw_free(te);
+
+#ifdef MV_ETH_PNC_AGING
+	mvPncAgingCntrGroupSet(tid, 3);
+#endif
+
+	return 0;
+}
+
+int pnc_ipv4_5_tuples_add(unsigned int tid, unsigned int flow_id,
+			  unsigned int sip, unsigned int dip, unsigned int proto, unsigned int ports, unsigned int rxq)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s [%d] flow=%d " MV_IPQUAD_FMT "->" MV_IPQUAD_FMT ", ports=0x%x, proto=%d\n",
+		__func__, tid, flow_id, MV_IPQUAD(((MV_U8 *)&sip)), MV_IPQUAD(((MV_U8 *)&dip)), ports, proto);
+
+	if (tid < TE_FLOW_L3)
+		ERR_ON_OOR(1);
+
+	if (tid > TE_FLOW_L3_END)
+		ERR_ON_OOR(1);
+
+	/* sanity check */
+
+	te = tcam_sw_alloc(TCAM_LU_FLOW_IP4);
+
+	tcam_sw_set_byte(te, 9, proto);
+
+	tcam_sw_set_byte(te, 12, (sip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 13, (sip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 14, (sip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 15, (sip >> 24) & 0xFF);
+
+	tcam_sw_set_byte(te, 16, (dip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 17, (dip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 18, (dip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 19, (dip >> 24) & 0xFF);
+
+	tcam_sw_set_byte(te, 20, (ports >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 21, (ports >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 22, (ports >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 23, (ports >> 24) & 0xFF);
+
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, flow_id, FLOWID_CTRL_FULL_MASK);
+	sram_sw_set_rxq(te, rxq, 0);
+	sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+	tcam_sw_text(te, "ipv4_5t");
+
+	tcam_hw_write(te, tid);
+	tcam_sw_free(te);
+
+#ifdef MV_ETH_PNC_AGING
+	mvPncAgingCntrGroupSet(tid, 2);
+#endif
+
+	return 0;
+}
+#else
+int pnc_l4_end(void)
+{
+	struct tcam_entry *te;
+
+	PNC_DBG("%s\n", __func__);
+
+	te = tcam_sw_alloc(TCAM_LU_L4);
+	sram_sw_set_lookup_done(te, 1);
+	sram_sw_set_flowid(te, FLOWID_EOF_LU_L4, FLOWID_CTRL_LOW_HALF_MASK);
+
+	tcam_sw_text(te, "l4_eof");
+
+	tcam_hw_write(te, TE_L4_EOF);
+	tcam_sw_free(te);
+	return 0;
+}
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+/******************************************************************************
+ *
+ * PnC Init
+ *
+ ******************************************************************************
+ */
+int pnc_default_init(void)
+{
+	int     rc, port;
+	MV_U32	regVal = 0;
+
+	PNC_DBG("%s\n", __func__);
+
+	rc = tcam_hw_init();
+	if (rc)
+		goto out;
+
+	/* Mask all interrupts */
+	MV_REG_WRITE(MV_PNC_MASK_REG, 0xffffffff);
+
+	/* Clear all interrupts */
+	MV_REG_WRITE(MV_PNC_CAUSE_REG, 0x0);
+
+	/* Always start from lookup = 0 */
+	for (port = 0; port < PORT_BITS; port++)
+		regVal |= MV_PNC_PORT_LU_INIT_VAL(port, TCAM_LU_MAC);
+
+	MV_REG_WRITE(MV_PNC_INIT_LOOKUP_REG, regVal);
+
+	rc = pnc_mac_init();
+	if (rc)
+		goto out;
+
+	rc = pnc_vlan_init();
+	if (rc)
+		goto out;
+
+	rc = pnc_etype_init();
+	if (rc)
+		goto out;
+
+	rc = pnc_ip4_init();
+	if (rc)
+		goto out;
+
+	rc = pnc_ip6_init();
+	if (rc)
+		goto out;
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	rc = pnc_flow_init();
+	if (rc)
+		goto out;
+#else
+	rc = pnc_l4_end();
+	if (rc)
+		goto out;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+	mv_pnc_wol_init();
+#endif /* CONFIG_MV_ETH_PNC_WOL */
+
+	pnc_inited = 1;
+out:
+	return rc;
+}
+
+static void pnc_port_sprintf(struct tcam_entry *te, char *buf)
+{
+	int p, offs;
+	unsigned int data, mask;
+
+	tcam_sw_get_port(te, &data, &mask);
+	if (mask == PORT_MASK)
+		mvOsSPrintf(buf, "None");
+	else if (mask == 0)
+		mvOsSPrintf(buf, "All");
+	else {
+		offs = 0;
+		for (p = 0; p < PORT_BITS; p++) {
+			if ((mask & (1 << p)) == 0)
+				offs += mvOsSPrintf(buf + offs, " %d,", pnc_port_map(p));
+		}
+	}
+}
+
+void pnc_vlan_prio_show(int port)
+{
+#if (CONFIG_MV_ETH_PNC_VLAN_PRIO > 0)
+	struct tcam_entry *te;
+	int tid;
+	unsigned char prio;
+	char buf[16];
+
+	mvOsPrintf("Prio   Mask       Ports     RXQ    Name\n");
+	for (tid = TE_VLAN_PRIO; tid <= TE_VLAN_PRIO_END; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		if (te) {
+			prio = *(te->data.u.byte + 2);
+			mvOsPrintf("0x%02x", prio >> 5);
+			prio = *(te->mask.u.byte + 2);
+			mvOsPrintf("   0x%02x", prio >> 5);
+			pnc_port_sprintf(te, buf);
+			mvOsPrintf(" %12s", buf);
+			mvOsPrintf("     %d", sram_sw_get_rxq(te, NULL));
+			mvOsPrintf("     %s\n", te->ctrl.text);
+			tcam_sw_free(te);
+		}
+	}
+#endif /* (CONFIG_MV_ETH_PNC_VLAN_PRIO > 0) */
+	return;
+}
+void pnc_ipv4_dscp_show(int port)
+{
+#if (CONFIG_MV_ETH_PNC_DSCP_PRIO > 0)
+	struct tcam_entry *te;
+	int tid;
+	unsigned char tos;
+	char buf[16];
+
+	mvOsPrintf("TOS    Mask       Ports   RXQ    Name\n");
+	for (tid = TE_IP4_DSCP; tid <= TE_IP4_DSCP_END; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		if (te) {
+			tos = *(te->data.u.byte + 1);
+			mvOsPrintf("0x%02x", tos);
+			tos = *(te->mask.u.byte + 1);
+			mvOsPrintf("   0x%02x", tos);
+			pnc_port_sprintf(te, buf);
+			mvOsPrintf(" %10s", buf);
+			mvOsPrintf("     %d", sram_sw_get_rxq(te, NULL));
+			mvOsPrintf("     %s\n", te->ctrl.text);
+			tcam_sw_free(te);
+		}
+	}
+#endif /* CONFIG_MV_ETH_PNC_DSCP_PRIO > 0 */
+	return;
+}
+
+void pnc_mac_show(void)
+{
+	int tid;
+	struct tcam_entry *te;
+	char *mac;
+	char buf[16];
+
+	mvOsPrintf("     Addr                   Mask         Ports    RXQ   Name\n");
+	for (tid = TE_MAC_BC; tid < TE_MAC_EOF; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		if (te) {
+			mac = te->data.u.byte + MV_ETH_MH_SIZE;
+			mvOsPrintf(MV_MACQUAD_FMT, MV_MACQUAD(mac));
+			mac = te->mask.u.byte + MV_ETH_MH_SIZE;
+			mvOsPrintf("   " MV_MACQUAD_FMT, MV_MACQUAD(mac));
+
+			pnc_port_sprintf(te, buf);
+			mvOsPrintf(" %10s", buf);
+			mvOsPrintf("     %d", sram_sw_get_rxq(te, NULL));
+			mvOsPrintf("     %s\n", te->ctrl.text);
+			tcam_sw_free(te);
+		}
+	}
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.h b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.h
new file mode 100644
index 000000000000..b095c6fb4549
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPnc.h
@@ -0,0 +1,375 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PNC_H__
+#define __MV_PNC_H__
+
+
+#include "mvTcam.h"
+
+/*
+ * Errors
+ */
+#define ERR_ON_OOR(cond) if (cond) { mvOsPrintf("%s: out of range\n", __func__);  return PNC_ERR_OOR; }
+#define WARN_ON_OOR(cond) if (cond) { mvOsPrintf("%s: out of range\n", __func__);  return; }
+#define WARN_ON_OOM(cond) if (cond) { mvOsPrintf("%s: out of memory\n", __func__); return NULL; }
+
+ /*
+ * Errors assigment
+ */
+#define PNC_ERR_OOR			1			/* out of range error */
+#define PNC_ERR_INV			1			/* invalid parameter */
+
+ /* Result info bits assigment */
+#define RI_DROP	            (BIT0)		/* drop */
+
+#define RI_L4_OFFS     	    1
+#define RI_L4_MASK     	    (3 << RI_L4_OFFS)
+#define RI_L4_TCP           (0 << RI_L4_OFFS)
+#define RI_L4_UDP           (1 << RI_L4_OFFS)
+#define RI_L4_UN            (2 << RI_L4_OFFS)
+
+#define RI_L3_OFFS     	    3
+#define RI_L3_MASK     	    (7 << RI_L3_OFFS)
+#define RI_L3_UN            (0 << RI_L3_OFFS)
+#define RI_L3_IP6           (1 << RI_L3_OFFS)
+#define RI_L3_IP4_FRAG      (2 << RI_L3_OFFS)
+#define RI_L3_IP4           (3 << RI_L3_OFFS)
+#define RI_L3_IP4_FRAG_F    (6 << RI_L3_OFFS)
+
+#define RI_MCAST_OFFS       6
+#define RI_MCAST_MASK       (3 << RI_MCAST_OFFS)
+#define RI_MCAST_SPEC       (0 << RI_MCAST_OFFS)
+#define RI_MCAST_PNC_SPEC   (1 << RI_MCAST_OFFS)
+#define RI_MCAST_OTHER      (2 << RI_MCAST_OFFS)
+#define RI_MCAST_PNC_OTHER  (3 << RI_MCAST_OFFS)
+#define RI_MCAST_PNC_ONLY   (4 << RI_MCAST_OFFS)
+
+#define RI_DA_MC            (BIT10)	/* multicast */
+#define RI_DA_BC            (BIT11)	/* broadcast */
+#define RI_DA_ME            (BIT12)	/* unicast */
+#define RI_PPPOE            (BIT13)	/* PPPoE */
+/* BITS 14-17 are reserved for setting MH field in HWF packets (TXz_MH_reg_1-15) */
+#define RI_L3_FLOW          (BIT18)     /* L3 flow is found */
+#define RI_VLAN             (BIT19)     /* VLAN */
+#define RI_RX_SPECIAL       (BIT20)     /* Packet for special RX processing */
+
+ /* Additional info bits assigment */
+#define AI_DONE_BIT         0
+#define AI_DONE_MASK        (1 << AI_DONE_BIT)
+
+/* PnC result info */
+#define NETA_PNC_VLAN       (RI_VLAN  >> 9)
+#define NETA_PNC_DA_MC      (RI_DA_MC >> 9)
+#define NETA_PNC_DA_BC      (RI_DA_BC >> 9)
+#define NETA_PNC_DA_UC      (RI_DA_ME >> 9)
+#define NETA_PNC_PPPOE      (RI_PPPOE >> 9)
+#define NETA_PNC_RX_SPECIAL (RI_RX_SPECIAL >> 9)
+
+/*---------------------------------------------------------------------------*/
+
+MV_STATUS   mvPncInit(MV_U8 *pncVirtBase, MV_U32 pncTcamSize);
+
+#ifdef CONFIG_MV_ETH_PNC
+/*
+ * TCAM topology definition.
+ * The TCAM is divided into sections per protocol encapsulation.
+ * Usually each section is designed to be to a lookup.
+ * Change sizes of sections according to the target product.
+ */
+enum {
+	/* MAC Lookup including Marvell/PON header */
+	TE_MH,         		/* Match marvell header */
+	TE_MAC_BC,	        /* broadcast */
+	TE_MAC_FLOW_CTRL,   /* Flow Control PAUSE frames */
+	TE_MAC_MC_ALL,      /* first multicast entry (always reserved for all MCASTs) */
+	TE_MAC_MC_L = TE_MAC_MC_ALL + CONFIG_MV_ETH_PNC_MCAST_NUM,    /* last multicast entry */
+	TE_MAC_ME,	        /* mac to me per port */
+	TE_MAC_ME_END = TE_MAC_ME + CONFIG_MV_ETH_PORTS_NUM - 1,
+	TE_MAC_EOF,
+
+    /* VLAN Lookup */
+#if (CONFIG_MV_ETH_PNC_VLAN_PRIO > 0)
+	TE_VLAN_PRIO,
+	TE_VLAN_PRIO_END = TE_VLAN_PRIO + CONFIG_MV_ETH_PNC_VLAN_PRIO,
+#endif /* (CONFIG_MV_ETH_PNC_VLAN_PRIO > 0) */
+	TE_VLAN_EOF,
+
+    /* Ethertype Lookup */
+	TE_ETYPE_ARP,
+	TE_ETYPE_IP4,
+	TE_ETYPE_IP6,
+	TE_PPPOE_IP4,
+	TE_PPPOE_IP6,
+	TE_ETYPE,	/* custom ethertype */
+	TE_ETYPE_EOF = TE_ETYPE + CONFIG_MV_ETH_PNC_ETYPE,
+
+	/* IP4 Lookup */
+#if (CONFIG_MV_ETH_PNC_DSCP_PRIO > 0)
+	TE_IP4_DSCP,
+	TE_IP4_DSCP_END = TE_IP4_DSCP + CONFIG_MV_ETH_PNC_DSCP_PRIO - 1,
+#endif /* CONFIG_MV_ETH_PNC_DSCP_PRIO > 0 */
+
+	TE_IP4_TCP,
+	TE_IP4_TCP_FRAG,
+	TE_IP4_UDP,
+	TE_IP4_UDP_FRAG,
+	TE_IP4_IGMP,
+	TE_IP4_ESP,
+	TE_IP4_EOF,
+
+	/* IP6 Lookup */
+	TE_IP6_TCP,
+	TE_IP6_UDP,
+	TE_IP6_EOF,
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	/* Session Lookup for IPv4 and IPv6 */
+	TE_FLOW_L3,
+	TE_FLOW_L3_END = TE_FLOW_L3 + CONFIG_MV_ETH_PNC_L3_FLOW_LINES - 1,
+	TE_FLOW_IP4_EOF,
+	TE_FLOW_IP6_A_EOF,
+	TE_FLOW_IP6_B_EOF,
+#else
+	TE_L4_EOF,
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+	TE_WOL,
+	/*TE_WOL_EOF is always the last line of the TCAM table, it is dynamic, redefined it to macro */
+#endif /* CONFIG_MV_ETH_PNC_WOL */
+};
+
+
+enum {
+	TCAM_LU_MAC,
+	TCAM_LU_L2,
+	TCAM_LU_IP4,
+	TCAM_LU_IP6,
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	TCAM_LU_FLOW_IP4,
+	TCAM_LU_FLOW_IP6_A,
+	TCAM_LU_FLOW_IP6_B,
+#else
+	TCAM_LU_L4,
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+	TCAM_LU_WOL,
+#endif /* CONFIG_MV_ETH_PNC_WOL */
+};
+
+struct gbe_pnc_port_mapping {
+	int gbe_port;
+	int pnc_port;
+};
+
+/*
+ * Pre-defined FlowId assigment
+ */
+#define FLOWID_EOF_LU_MAC 			0xFFF0
+#define FLOWID_EOF_LU_L2			0xFFF2
+#define FLOWID_EOF_LU_IP4			0xFFF4
+#define FLOWID_EOF_LU_IP6			0xFFF6
+#define FLOWID_EOF_LU_L4			0xFFF7
+#define FLOWID_EOF_LU_FLOW_IP4		0xFFF8
+#define FLOWID_EOF_LU_FLOW_IP6_A	0xFFFA
+#define FLOWID_EOF_LU_FLOW_IP6_B	0xFFFB
+
+#define FLOWID_CTRL_FULL_MASK       FLOW_CTRL_MASK
+#define FLOWID_CTRL_LOW_HALF_MASK   FLOW_CTRL_HALF_MASK
+#define FLOWID_CTRL_HIGH_HALF_MASK  (FLOW_CTRL_MASK & ~FLOW_CTRL_HALF_MASK)
+
+/************ FlowID field detalization for HWF support ***********************/
+
+/* TXP for HWF: 0 - no HWF, 1 - Giga0, 2 - Giga1, 3..10 PON - TCONTs 0..7 */
+#define PNC_FLOWID_HWF_TXP_OFFS    24
+#define PNC_FLOWID_HWF_TXP_MASK    (0xF << PNC_FLOWID_HWF_TXP_OFFS)
+
+#define PNC_FLOWID_HWF_GEM_OFFS    12
+#define PNC_FLOWID_HWF_GEM_MASK    (0xFFF << PNC_FLOWID_HWF_GEM_OFFS)
+
+#define PNC_FLOWID_HWF_MOD_OFFS    0
+#define PNC_FLOWID_HWF_MOD_MASK    (0x3FF << PNC_FLOWID_HWF_MOD_OFFS)
+
+#define PNC_FLOWID_IS_HWF(flowid)	(((flowid) & PNC_FLOWID_HWF_TXP_MASK) != 0)
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Export API
+ */
+
+int pnc_default_init(void);
+
+int pnc_te_del(unsigned int tid);
+
+struct tcam_entry *pnc_tcam_entry_get(int tid);
+int pnc_gbe_port_map_init(unsigned int ctrl_model, unsigned int ctrl_rev);
+
+int          pnc_eth_port_map(int eth_port);
+int          pnc_port_map(int pnc_port);
+
+unsigned int pnc_port_mask_update(unsigned int mask, int eth_port, int add);
+unsigned int pnc_port_mask(int eth_port);
+int          pnc_tcam_port_update(int tid, int eth_port, int add);
+
+
+/* Set number of Rx queues */
+void pnc_rxq_max(int rxq_max);
+
+/* Assign Rx queue to a protocol */
+int pnc_rxq_proto(unsigned int proto, unsigned int rxq);
+
+/* Get availible range on section */
+int pnc_rule_range(int sec, int *first, int *last);
+
+/* Get section for specific rule */
+int pnc_rule_sec(int tid);
+
+/* Delete rule */
+int pnc_rule_del(int tid);
+
+/* Set MAC address of a port, or NULL for promiscuous */
+int pnc_mac_me(unsigned int port, unsigned char *mac, int rxq);
+
+/* Set Multicast MAC address to be accepted on the port */
+int pnc_mcast_me(unsigned int port, unsigned char *mac);
+
+/* Set VLAN priority entry */
+int pnc_vlan_prio_set(int port, int prio, int rxq);
+void pnc_vlan_prio_show(int port);
+
+/* match arp */
+void pnc_etype_arp(int rxq);
+
+/* TCP/IP header parsing for fragmentation and L4 offset.  */
+void pnc_ip4_tcp(int rxq);
+
+/* IPv4/UDP header parsing for fragmentation and L4 offset. */
+void pnc_ip4_udp(int rxq);
+
+/* Enable / Disable accept ALL Multicast */
+int pnc_mcast_all(unsigned int port, int en);
+
+void    pnc_mac_show(void);
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+int pnc_ip4_2tuple_rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, int rxq);
+int pnc_ip4_5tuple_rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, unsigned int ports,
+						unsigned int proto, int rxq);
+int pnc_rxq_map_dump(void);
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+/* Add TOS priority rule */
+int     pnc_ip4_dscp(int port, unsigned char dscp, unsigned char mask, int rxq);
+void    pnc_ipv4_dscp_show(int port);
+
+
+/* 2 tuple match */
+int pnc_ipv4_2_tuples_add(unsigned int tid, unsigned int flow_hi,
+			      unsigned int sip, unsigned int dip, unsigned int rxq);
+
+int pnc_ipv6_2_tuples_add(unsigned int tid1, unsigned int tid2, unsigned int flow_id,
+					      MV_U8 unique, MV_U8 *sip, MV_U8 *dip, unsigned int rxq);
+
+/* 5 tuple match */
+int pnc_ipv4_5_tuples_add(unsigned int tid, unsigned int flow_hi,
+				unsigned int sip, unsigned int dip,
+				unsigned int proto, unsigned int ports, unsigned int rxq);
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+void mv_pnc_wol_init(void);
+int  mv_pnc_wol_rule_set(int port, char *data, char *mask, int size);
+int  mv_pnc_wol_rule_del(int idx);
+int  mv_pnc_wol_rule_del_all(int port);
+
+int  mv_pnc_wol_pkt_match(int port, char *data, int size, int *ruleId);
+void mv_pnc_wol_sleep(int port);
+void mv_pnc_wol_wakeup(int port);
+int  mv_pnc_wol_rule_dump(int idx);
+void mv_pnc_wol_dump(void);
+#endif /* CONFIG_MV_ETH_PNC_WOL */
+
+#ifdef MV_ETH_PNC_AGING
+MV_U32  mvPncAgingCntrRead(int tid);
+void    mvPncAgingCntrWrite(int tid, MV_U32 w32);
+void    mvPncAgingDump(int all);
+void    mvPncAgingReset(void);
+void    mvPncAgingScannerDump(void);
+void    mvPncAgingCntrClear(int tid);
+void    mvPncAgingCntrGroupSet(int tid, int gr);
+void    mvPncAgingCounterClear(int tid, int gr);
+#endif /* MV_ETH_PNC_AGING */
+
+#ifdef MV_ETH_PNC_LB
+void    mvPncLbDump(void);
+int     mvPncLbRxqSet(int hash, int rxq);
+int		mvPncLbModeIp4(int mode);
+int		mvPncLbModeIp6(int mode);
+int		mvPncLbModeL4(int mode);
+#endif /* MV_ETH_PNC_LB */
+
+#endif /* CONFIG_MV_ETH_PNC */
+
+#endif /*__MV_PNC_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncAging.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncAging.c
new file mode 100644
index 000000000000..0e37bf520514
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncAging.c
@@ -0,0 +1,315 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+
+#include "gbe/mvNetaRegs.h"
+
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+#ifdef MV_ETH_PNC_AGING
+
+#define PNC_AGING_CNTRS_ADDR_MASK       (0 << 12)
+#define PNC_AGING_GROUPS_ADDR_MASK      (1 << 12)
+#define PNC_AGING_SCANNER_ADDR_MASK     (2 << 12)
+
+#define PNC_AGING_CNTR_IDX_ADDR_OFFS    2
+#define PNC_AGING_GROUP_ADDR_OFFS       2
+#define PNC_AGING_LOG_ADDR_OFFS         5
+
+#define PNC_AGING_CNTR_OFFS             0
+#define PNC_AGING_CNTR_MAX              0x3ffffff
+#define PNC_AGING_CNTR_MASK             (PNC_AGING_CNTR_MAX << PNC_AGING_CNTR_OFFS)
+
+#define PNC_AGING_GROUP_OFFS            26
+#define PNC_AGING_GROUP_ALL_MASK        (0x3 << PNC_AGING_GROUP_OFFS)
+#define PNC_AGING_GROUP_MASK(gr)        ((gr) << PNC_AGING_GROUP_OFFS)
+
+#define PNC_AGING_READ_LU_LOG_BIT       28
+#define PNC_AGING_READ_LU_LOG_MASK      (1 << PNC_AGING_READ_LU_LOG_BIT)
+
+#define PNC_AGING_READ_MU_LOG_BIT       29
+#define PNC_AGING_READ_MU_LOG_MASK      (1 << PNC_AGING_READ_MU_LOG_BIT)
+
+#define PNC_AGING_SKIP_LU_SCAN_BIT      30
+#define PNC_AGING_SKIP_LU_SCAN_MASK     (1 << PNC_AGING_SKIP_LU_SCAN_BIT)
+
+#define PNC_AGING_SKIP_MU_SCAN_BIT      31
+#define PNC_AGING_SKIP_MU_SCAN_MASK     (1 << PNC_AGING_SKIP_MU_SCAN_BIT)
+
+#define PNC_AGING_LOG_CNTR_IDX_OFFS     0
+#define PNC_AGING_LOG_CNTR_IDX_MASK     (0x3FF << PNC_AGING_LOG_CNTR_IDX_OFFS)
+
+#define PNC_AGING_LOG_VALID_BIT         31
+#define PNC_AGING_LOG_VALID_MASK        (1 << PNC_AGING_LOG_VALID_BIT)
+
+void    mvPncAgingCntrWrite(int tid, MV_U32 w32)
+{
+	MV_U32  va;
+
+	WARN_ON_OOR(tid >= MV_PNC_TCAM_SIZE());
+
+	va = (MV_U32)mvPncVirtBase;
+	va |= PNC_AGING_ACCESS_MASK;
+	va |= PNC_AGING_CNTRS_ADDR_MASK;
+	va |= (tid << PNC_AGING_CNTR_IDX_ADDR_OFFS);
+/*
+	mvOsPrintf("%s: tid=%d, va=0x%x, w32=0x%08x\n",
+		__func__, tid, va, w32);
+*/
+	MV_MEMIO32_WRITE(va, w32);
+}
+
+
+MV_U32  mvPncAgingCntrRead(int tid)
+{
+	MV_U32  va, w32;
+
+	ERR_ON_OOR(tid >= MV_PNC_TCAM_SIZE());
+
+	va = (MV_U32)mvPncVirtBase;
+	va |= PNC_AGING_ACCESS_MASK;
+	va |= PNC_AGING_CNTRS_ADDR_MASK;
+	va |= (tid << PNC_AGING_CNTR_IDX_ADDR_OFFS);
+
+	w32 = MV_MEMIO32_READ(va);
+/*
+	mvOsPrintf("%s: tid=%d, va=0x%x, w32=0x%08x\n",
+		__func__, tid, va, w32);
+*/
+	return w32;
+}
+
+MV_U32  mvPncAgingGroupCntrRead(int group)
+{
+	MV_U32  va, w32;
+
+	ERR_ON_OOR(group >= MV_PNC_AGING_MAX_GROUP);
+
+	va = (MV_U32)mvPncVirtBase;
+	va |= PNC_AGING_ACCESS_MASK;
+	va |= PNC_AGING_GROUPS_ADDR_MASK;
+	va |= (group << PNC_AGING_GROUP_ADDR_OFFS);
+
+	w32 = MV_MEMIO32_READ(va);
+
+	return w32;
+}
+
+void    mvPncAgingGroupCntrClear(int group)
+{
+	MV_U32  w32;
+
+	WARN_ON_OOR(group >= MV_PNC_AGING_MAX_GROUP);
+
+	w32 = MV_REG_READ(MV_PNC_AGING_CTRL_REG);
+	w32 |= MV_PNC_AGING_GROUP_RESET(group);
+	MV_REG_WRITE(MV_PNC_AGING_CTRL_REG, w32);
+}
+
+MV_U32  mvPncAgingLogEntryRead(int group, int mostly)
+{
+	MV_U32  va, w32;
+
+	ERR_ON_OOR(group >= MV_PNC_AGING_MAX_GROUP);
+
+	va = (MV_U32)mvPncVirtBase;
+	va |= PNC_AGING_ACCESS_MASK;
+	va |= PNC_AGING_SCANNER_ADDR_MASK;
+	va |= ((MV_PNC_AGING_MAX_GROUP * mostly + group) << PNC_AGING_LOG_ADDR_OFFS);
+
+	w32 = MV_MEMIO32_READ(va);
+
+	return w32;
+}
+
+void    mvPncAgingCntrShow(int tid, MV_32 w32)
+{
+	mvOsPrintf("[%3d] (%-12s): gr=%d - %10u", tid, tcam_text[tid],
+		((w32 & PNC_AGING_GROUP_ALL_MASK) >> PNC_AGING_GROUP_OFFS),
+		((w32 & PNC_AGING_CNTR_MASK) >> PNC_AGING_CNTR_OFFS));
+
+	if (w32 & PNC_AGING_READ_LU_LOG_MASK)
+		mvOsPrintf(", LU_READ");
+
+	if (w32 & PNC_AGING_READ_MU_LOG_MASK)
+		mvOsPrintf(", MU_READ");
+
+	if (w32 & PNC_AGING_SKIP_LU_SCAN_MASK)
+		mvOsPrintf(", LU_SKIP");
+
+	if (w32 & PNC_AGING_SKIP_MU_SCAN_MASK)
+		mvOsPrintf(", MU_SKIP");
+
+	mvOsPrintf("\n");
+}
+
+void    mvPncAgingDump(int all)
+{
+	int     tid, gr;
+	MV_U32  cntrVal;
+
+	mvOsPrintf("TCAM entries Aging counters: %s\n", all ? "ALL" : "Non ZERO");
+	for (tid = 0; tid < MV_PNC_TCAM_SIZE(); tid++) {
+		cntrVal = mvPncAgingCntrRead(tid);
+
+		if (all || (cntrVal & PNC_AGING_CNTR_MASK))
+			mvPncAgingCntrShow(tid, cntrVal);
+	}
+	mvOsPrintf("Aging Counters Summary per group: \n");
+	for (gr = 0; gr < MV_PNC_AGING_MAX_GROUP; gr++)
+		mvOsPrintf("group #%d: %10u\n", gr, mvPncAgingGroupCntrRead(gr));
+}
+
+static MV_U32  mvPncScannerLog[MV_PNC_TCAM_LINES];
+static MV_U32  mvPncAgingCntrs[MV_PNC_TCAM_LINES];
+
+void    mvPncAgingScannerDump(void)
+{
+	int     i, j, gr;
+	MV_U32  w32;
+
+	mvOsPrintf("Scanner LU Log entries for aging counters:\n");
+	for (gr = 0; gr < MV_PNC_AGING_MAX_GROUP; gr++) {
+		i = 0;
+		mvOsPrintf("LU group #%d:\n", gr);
+		while (i < MV_PNC_TCAM_SIZE()) {
+			w32 = mvPncAgingLogEntryRead(gr, 0);
+			if ((w32 & PNC_AGING_LOG_VALID_MASK) == 0)
+				break;
+
+			mvOsDelay(20);
+			mvPncAgingCntrs[i] = mvPncAgingCntrRead(w32 & PNC_AGING_LOG_CNTR_IDX_MASK);
+			mvPncScannerLog[i] = w32;
+			i++;
+		}
+		for (j = 0; j < i; j++) {
+			mvOsPrintf("%d: 0x%08x - tid=%u, 0x%08x - cntr=%u\n",
+				j, mvPncScannerLog[j], mvPncScannerLog[j] & PNC_AGING_LOG_CNTR_IDX_MASK,
+				mvPncAgingCntrs[j],
+				(mvPncAgingCntrs[j] & PNC_AGING_CNTR_MASK) >> PNC_AGING_CNTR_OFFS);
+		}
+	}
+
+	mvOsPrintf("\n");
+	mvOsPrintf("Scanner MU Log entries for aging counters:\n");
+	for (gr = 0; gr < MV_PNC_AGING_MAX_GROUP; gr++) {
+		i = 0;
+		mvOsPrintf("MU group #%d:\n", gr);
+		while (i < MV_PNC_TCAM_SIZE()) {
+			w32 = mvPncAgingLogEntryRead(gr, 1);
+			/*mvOsDelay(1);*/
+			if ((w32 & PNC_AGING_LOG_VALID_MASK) == 0)
+				break;
+
+			mvOsDelay(20);
+			mvPncAgingCntrs[i] = mvPncAgingCntrRead(w32 & PNC_AGING_LOG_CNTR_IDX_MASK);
+			mvPncScannerLog[i] = w32;
+			i++;
+		}
+		for (j = 0; j < i; j++) {
+			mvOsPrintf("%d: 0x%08x - tid=%u, 0x%08x - cntr=%u\n",
+				j, mvPncScannerLog[j], mvPncScannerLog[j] & PNC_AGING_LOG_CNTR_IDX_MASK,
+				mvPncAgingCntrs[j],
+				(mvPncAgingCntrs[j] & PNC_AGING_CNTR_MASK) >> PNC_AGING_CNTR_OFFS);
+		}
+	}
+}
+
+void    mvPncAgingCntrClear(int tid)
+{
+	MV_U32  w32;
+
+	w32 = mvPncAgingCntrRead(tid);
+
+	w32 &= ~PNC_AGING_CNTR_MASK;
+	w32 &= ~(PNC_AGING_READ_LU_LOG_MASK | PNC_AGING_READ_MU_LOG_MASK);
+
+	mvPncAgingCntrWrite(tid, w32);
+}
+
+void    mvPncAgingCntrGroupSet(int tid, int gr)
+{
+	MV_U32  w32;
+
+	w32 = PNC_AGING_GROUP_MASK(gr);
+
+	/*mvOsPrintf("%s: tid=%d, gr=%d, w32=0x%x\n", __FUNCTION__, tid, gr, w32);*/
+	mvPncAgingCntrWrite(tid, w32);
+}
+
+/* Reset all Aging counters */
+void    mvPncAgingReset(void)
+{
+	int tid, gr;
+
+	for (tid = 0; tid < MV_PNC_TCAM_SIZE(); tid++)
+		mvPncAgingCntrClear(tid);
+
+	for (gr = 0; gr < MV_PNC_AGING_MAX_GROUP; gr++)
+		mvPncAgingGroupCntrClear(gr);
+}
+#endif /* MV_ETH_PNC_AGING */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncLb.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncLb.c
new file mode 100755
index 000000000000..01ed15c04774
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncLb.c
@@ -0,0 +1,159 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+
+#include "gbe/mvNetaRegs.h"
+
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+#ifdef MV_ETH_PNC_LB
+
+void    mvPncLbDump(void)
+{
+	MV_U32	regVal;
+	int i, j, rxq;
+
+	MV_REG_WRITE(MV_PNC_LB_TBL_ACCESS_REG, 0);
+	mvOsPrintf("Hash:    rxq    rxq    rxq    rxq\n");
+	for (i = 0; i <= MV_PNC_LB_TBL_ADDR_MASK; i++) {
+		/* Each read returns 4 hash entries */
+		regVal = MV_REG_READ(MV_PNC_LB_TBL_ACCESS_REG);
+		/* Extract data */
+		regVal = (regVal & MV_PNC_LB_TBL_DATA_MASK) >> MV_PNC_LB_TBL_DATA_OFFS;
+		mvOsPrintf("%4d:    ", (i * 4));
+		for (j = 0; j < 4; j++) {
+			rxq = regVal & 7;
+			mvOsPrintf("%3d   ", rxq);
+			regVal = regVal >> 3;
+		}
+		mvOsPrintf("\n");
+	}
+}
+
+int    mvPncLbRxqSet(int hash, int rxq)
+{
+	MV_U32 regVal, entry, index;
+
+	entry = (hash / 4) & MV_PNC_LB_TBL_ADDR_MASK;
+	index = (hash & 3);
+
+	MV_REG_WRITE(MV_PNC_LB_TBL_ACCESS_REG, entry);
+	regVal = MV_REG_READ(MV_PNC_LB_TBL_ACCESS_REG);
+
+	regVal &= ~MV_PNC_LB_TBL_ADDR_MASK;
+	regVal |= entry;
+	regVal &= ~((7 << (index * 3)) << MV_PNC_LB_TBL_DATA_OFFS);
+	regVal |= ((rxq << (index * 3)) << MV_PNC_LB_TBL_DATA_OFFS);
+	regVal |= MV_PNC_LB_TBL_WRITE_TRIG_MASK;
+	MV_REG_WRITE(MV_PNC_LB_TBL_ACCESS_REG, regVal);
+
+	return 0;
+}
+
+int		mvPncLbModeIp4(int mode)
+{
+	int lb;
+	struct tcam_entry te;
+
+	switch (mode) {
+	case 0:
+		lb = LB_DISABLE_VALUE;
+		break;
+	case 1:
+		lb = LB_2_TUPLE_VALUE;
+		break;
+	case 2:
+	default:
+		mvOsPrintf("%s: %d - unexpected mode value\n", __func__, mode);
+		return 1;
+	}
+	tcam_hw_read(&te, TE_IP4_EOF);
+	sram_sw_set_load_balance(&te, lb);
+	tcam_hw_write(&te, TE_IP4_EOF);
+
+	return 0;
+}
+
+int	mvPncLbModeIp6(int mode)
+{
+	int lb;
+	struct tcam_entry te;
+
+	switch (mode) {
+	case 0:
+		lb = LB_DISABLE_VALUE;
+		break;
+	case 1:
+		lb = LB_2_TUPLE_VALUE;
+		break;
+	case 2:
+	default:
+		mvOsPrintf("%s: %d - unexpected mode value\n", __func__, mode);
+		return 1;
+	}
+	tcam_hw_read(&te, TE_IP6_EOF);
+	sram_sw_set_load_balance(&te, lb);
+	tcam_hw_write(&te, TE_IP6_EOF);
+
+	return 0;
+}
+
+int	mvPncLbModeL4(int mode)
+{
+	int lb;
+	struct tcam_entry te;
+
+	switch (mode) {
+	case 0:
+		lb = LB_DISABLE_VALUE;
+		break;
+	case 1:
+		lb = LB_2_TUPLE_VALUE;
+		break;
+	case 2:
+		lb = LB_4_TUPLE_VALUE;
+		break;
+	default:
+		mvOsPrintf("%s: %d - unexpected mode value\n", __func__, mode);
+		return 1;
+	}
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	mvOsPrintf("%s: Not supported\n", __func__);
+	return 1;
+#else
+	tcam_hw_read(&te, TE_L4_EOF);
+	sram_sw_set_load_balance(&te, lb);
+	tcam_hw_write(&te, TE_L4_EOF);
+	return 0;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+#endif /* MV_ETH_PNC_LB */
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncRxq.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncRxq.c
new file mode 100644
index 000000000000..612bad6efbf5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncRxq.c
@@ -0,0 +1,414 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mv802_3.h"
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNetaRegs.h"
+#include "gbe/mvEthRegs.h"
+
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+int first_2tuple_rule = TE_FLOW_L3_END + 1;
+int last_5tuple_rule = TE_FLOW_L3 - 1;
+
+extern int pnc_port_mask_check(unsigned int mask, int eth_port);
+
+static INLINE struct tcam_entry *pnc_create_2t_entry(unsigned int sip, unsigned int dip)
+{
+	struct tcam_entry *te = tcam_sw_alloc(TCAM_LU_FLOW_IP4);
+
+	tcam_sw_set_byte(te, 12, (sip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 13, (sip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 14, (sip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 15, (sip >> 24) & 0xFF);
+
+	tcam_sw_set_byte(te, 16, (dip >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 17, (dip >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 18, (dip >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 19, (dip >> 24) & 0xFF);
+
+	return te;
+}
+
+static INLINE int tcam_sw_cmp_2tuple(struct tcam_entry *te, unsigned int sip, unsigned int dip)
+{
+	return !((tcam_sw_cmp_bytes(te, 12, 4, (unsigned char *)&sip) == 0)
+			&& (tcam_sw_cmp_bytes(te, 16, 4, (unsigned char *)&dip) == 0));
+}
+
+static INLINE int tcam_sw_cmp_5tuple(struct tcam_entry *te, unsigned int sip, unsigned int dip,
+								unsigned int ports, unsigned int proto)
+{
+	if (tcam_sw_cmp_2tuple(te, sip, dip) != 0)
+		return 1;
+
+	return !((tcam_sw_cmp_bytes(te, 9, 1, (unsigned char *)&proto) == 0) &&
+			(tcam_sw_cmp_bytes(te, 20, 2, (unsigned char *)&ports) == 0));
+}
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+static INLINE int pnc_mask_to_port(unsigned int mask)
+{
+#ifdef CONFIG_ARCH_FEROCEON_KW2
+	switch (mask) {
+	case 27:
+		return 0;
+	case 15:
+		return 1;
+	case 30:
+		return 2;
+	default:
+		return -1;
+	}
+#else
+	switch (mask) {
+	case 30:
+		return 0;
+	case 15:
+		return 1;
+	case 27:
+		return 2;
+	case 23:
+		return 3;
+	default:
+		return -1;
+	}
+#endif /* MV_ETH_PNC_NEW */
+}
+
+/*
+ * pnc_ip4_2tuple - Add 2-tuple priority rules
+ */
+int pnc_ip4_2tuple_rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, int rxq)
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	struct tcam_entry *te;
+	unsigned int pdata, pmask;
+	int tid, empty = -1, min_index_occupied = TE_FLOW_L3_END + 1;
+
+	if (rxq < -2 || rxq >= CONFIG_MV_ETH_RXQ || eth_port >= CONFIG_MV_ETH_PORTS_NUM)
+		return 1;
+
+	for (tid = TE_FLOW_L3_END; tid > last_5tuple_rule; tid--) {
+		te = pnc_tcam_entry_get(tid);
+		/* Remember first Empty entry */
+		if (te == NULL) {
+			if (empty == -1)
+				empty = tid;
+			continue;
+		}
+
+		/* Find existing entry for this rule */
+		if (tcam_sw_cmp_2tuple(te, sip, dip) == 0) {
+			tcam_sw_get_port(te, &pdata, &pmask);
+			if (rxq == -2) { /* delete rule */
+				if (!pnc_port_mask_check(pmask, eth_port)) {
+					printk(KERN_ERR "%s: rule is not associated with this port (%d)\n", __func__, eth_port);
+					tcam_sw_free(te);
+					return 1;
+				}
+				if (first_2tuple_rule == tid)
+					first_2tuple_rule = min_index_occupied;
+				pnc_te_del(tid);
+				tcam_sw_free(te);
+				return 0;
+			}
+
+			if (!pnc_port_mask_check(pmask, eth_port)) { /* rule is already associated with another port */
+				printk(KERN_ERR "%s: rule is already associated with port %d\n",
+									__func__, pnc_mask_to_port(pmask));
+				return 1;
+			}
+			if (rxq == -1) { /* set rule to drop mode */
+				sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+				sram_sw_set_lookup_done(te, 1);
+				tcam_hw_write(te, tid);
+			} else { /* update rxq */
+				sram_sw_set_rinfo(te, 0, RI_DROP);
+				sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+				sram_sw_set_rxq(te, rxq, 0);
+				tcam_hw_write(te, tid);
+			}
+
+			tcam_sw_free(te);
+			return 0;
+		}
+		min_index_occupied = tid;
+		tcam_sw_free(te);
+	}
+
+	/* Add rule to PNC */
+	if (rxq == -2) {
+		mvOsPrintf("%s: Entry not found - sip=0x%x, dip=0x%x, rxq=%d\n", __func__, sip, dip, rxq);
+		return 1;
+	}
+	/* Not found existing entry and no free TCAM entry - Failed */
+	if ((empty == -1) || (empty <= last_5tuple_rule)) {
+		mvOsPrintf("%s: No free place - sip=0x%x, dip=0x%x, rxq=%d\n", __func__, sip, dip, rxq);
+		return 1;
+	}
+
+	/* update upper border of 2 tuple rules */
+	if (first_2tuple_rule > empty)
+		first_2tuple_rule = empty;
+
+	te = pnc_create_2t_entry(sip, dip);
+	pmask = pnc_port_mask(eth_port);
+	tcam_sw_set_port(te, 0, pmask);
+	sram_sw_set_lookup_done(te, 1);
+	tcam_sw_text(te, "ipv4_2t");
+
+	if (rxq == -1) {
+		sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+		sram_sw_set_lookup_done(te, 1);
+	} else {
+		sram_sw_set_rinfo(te, 0, RI_DROP);
+		sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+		sram_sw_set_rxq(te, rxq, 0);
+	}
+
+	tcam_hw_write(te, empty);
+	tcam_sw_free(te);
+
+	return 0;
+#else
+	return -1;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+
+/*
+ * pnc_ip4_5tuple - Add 5-tuple priority rules
+ */
+int pnc_ip4_5tuple_rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, unsigned int ports,
+						unsigned int proto, int rxq)
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	struct tcam_entry *te;
+	unsigned int pdata, pmask;
+	int tid, empty = -1, max_index_occupied = TE_FLOW_L3 - 1;
+
+	if (rxq < -2 || rxq >= CONFIG_MV_ETH_RXQ || eth_port >= CONFIG_MV_ETH_PORTS_NUM)
+		return 1;
+
+	for (tid = TE_FLOW_L3; tid < first_2tuple_rule; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		/* Remember first Empty entry */
+		if (te == NULL) {
+			if (empty == -1)
+				empty = tid;
+			continue;
+		}
+		/* Find existing entry for this rule */
+		if (tcam_sw_cmp_5tuple(te, sip, dip, ports, proto) == 0) {
+			tcam_sw_get_port(te, &pdata, &pmask);
+			if (rxq == -2) { /* delete rule */
+				if (!pnc_port_mask_check(pmask, eth_port)) {
+					printk(KERN_ERR "%s: rule is not associated with this port (%d)\n", __func__, eth_port);
+					tcam_sw_free(te);
+					return 1;
+				}
+				if (last_5tuple_rule == tid)
+					last_5tuple_rule = max_index_occupied;
+				pnc_te_del(tid);
+				tcam_sw_free(te);
+				return 0;
+			}
+
+			if (!pnc_port_mask_check(pmask, eth_port)) { /* rule is already associated with another port */
+				printk(KERN_ERR "%s: rule is already associated with port %d\n",
+									__func__, pnc_mask_to_port(pmask));
+				return 1;
+			}
+			if (rxq == -1) { /* set rule to drop mode */
+				sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+				sram_sw_set_lookup_done(te, 1);
+				tcam_hw_write(te, tid);
+			} else { /* update rxq */
+				sram_sw_set_rinfo(te, 0, RI_DROP);
+				sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+				sram_sw_set_rxq(te, rxq, 0);
+				tcam_hw_write(te, tid);
+			}
+
+			tcam_sw_free(te);
+			return 0;
+		}
+		max_index_occupied = tid;
+		tcam_sw_free(te);
+	}
+
+	/* Add rule to PNC */
+	if (rxq == -2) {
+		mvOsPrintf("%s: Entry not found - sip=0x%x, dip=0x%x, ports=0x%x, proto=%d, rxq=%d\n",
+				__func__, sip, dip, ports, proto, rxq);
+		return 1;
+	}
+	/* Not found existing entry and no free TCAM entry - Failed */
+	if ((empty == -1) || (empty >= first_2tuple_rule)) {
+		mvOsPrintf("%s: No free place - sip=0x%x, dip=0x%x, ports=0x%x, proto=%d, rxq=%d\n",
+				__func__, sip, dip, ports, proto, rxq);
+		return 1;
+	}
+
+	/* update lower border of 5 tuple rules */
+	if (last_5tuple_rule < empty)
+		last_5tuple_rule = empty;
+
+	te = pnc_create_2t_entry(sip, dip);
+
+	tcam_sw_set_byte(te, 9, proto);
+	tcam_sw_set_byte(te, 20, (ports >> 0) & 0xFF);
+	tcam_sw_set_byte(te, 21, (ports >> 8) & 0xFF);
+	tcam_sw_set_byte(te, 22, (ports >> 16) & 0xFF);
+	tcam_sw_set_byte(te, 23, (ports >> 24) & 0xFF);
+	pmask = pnc_port_mask(eth_port);
+	tcam_sw_set_port(te, 0, pmask);
+	sram_sw_set_lookup_done(te, 1);
+	tcam_sw_text(te, "ipv4_5t");
+
+	if (rxq == -1) {
+		sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+		sram_sw_set_lookup_done(te, 1);
+	} else {
+		sram_sw_set_rinfo(te, 0, RI_DROP);
+		sram_sw_set_rinfo(te, RI_L3_FLOW, RI_L3_FLOW);
+		sram_sw_set_rxq(te, rxq, 0);
+	}
+
+	tcam_hw_write(te, empty);
+	tcam_sw_free(te);
+
+	return 0;
+#else
+	return -1;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+
+
+/*
+ * pnc_rxq_map_dump - Dump all rules
+ */
+int pnc_rxq_map_dump()
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	struct tcam_entry *te;
+	unsigned int tid, sport, dport, word, shift, rinfo, mask, data;
+	unsigned char sip[4], dip[4], sip_buf[16], dip_buf[16], *proto;
+
+	mvOsPrintf(" Tid   Sip               Dip               Sport   Dport   Proto   Rxq    Port   Name\n");
+	for (tid = TE_FLOW_L3; tid <= TE_FLOW_L3_END; tid++) {
+		te = pnc_tcam_entry_get(tid);
+		/* Remember first Empty entry */
+		if (te) {
+			memset(sip_buf, 0, 16);
+			memset(dip_buf, 0, 16);
+
+			sip[0] = *(te->data.u.byte + 12);
+			sip[1] = *(te->data.u.byte + 13);
+			sip[2] = *(te->data.u.byte + 14);
+			sip[3] = *(te->data.u.byte + 15);
+			dip[0] = *(te->data.u.byte + 16);
+			dip[1] = *(te->data.u.byte + 17);
+			dip[2] = *(te->data.u.byte + 18);
+			dip[3] = *(te->data.u.byte + 19);
+			mvOsSPrintf(sip_buf, "%d.%d.%d.%d", sip[0], sip[1], sip[2], sip[3]);
+			mvOsSPrintf(dip_buf, "%d.%d.%d.%d", dip[0], dip[1], dip[2], dip[3]);
+			mvOsPrintf(" %-3d   %-15s   %-15s   ", tid, sip_buf, dip_buf);
+
+			if (te->ctrl.text[5] == '5') {
+				sport = MV_BYTE_SWAP_16BIT(*((u16 *)(te->data.u.byte + 20)));
+				dport = MV_BYTE_SWAP_16BIT(*((u16 *)(te->data.u.byte + 22)));
+				proto = (*(te->data.u.byte + 9) == 6) ? "TCP" : "UDP";
+				mvOsPrintf("%-5d   %-5d   %-5s   ", sport, dport, proto);
+			} else
+				mvOsPrintf("-----   -----   -----   ");
+
+			word = RI_VALUE_OFFS / 32;
+			shift = RI_VALUE_OFFS % 32;
+			rinfo = (te->sram.word[word] >> shift) & ((1 << RI_BITS) - 1);
+			if (rinfo & 1)
+				mvOsPrintf("DROP   ");
+			else
+				mvOsPrintf("%-4d   ", sram_sw_get_rxq(te, NULL));
+
+			tcam_sw_get_port(te, &data, &mask);
+			mvOsPrintf("%-4d   ", pnc_mask_to_port(mask));
+			mvOsPrintf("%s\n", te->ctrl.text);
+
+			tcam_sw_free(te);
+		}
+	}
+
+	return 0;
+#else
+	return -1;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncWol.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncWol.c
new file mode 100644
index 000000000000..31d6470adc5e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvPncWol.c
@@ -0,0 +1,505 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mv802_3.h"
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNeta.h"
+
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+/* PNC debug */
+/*#define PNC_DBG mvOsPrintf*/
+#define PNC_DBG(X...)
+
+/* PNC errors */
+#define PNC_ERR mvOsPrintf
+/*#define PNC_ERR(X...)*/
+
+#define MV_PNC_MAX_RULES			128
+
+/* up to 5 tids can be set for one rule */
+#define MV_PNC_LOOKUP_MAX				(MV_PNC_TOTAL_DATA_SIZE / MV_PNC_LOOKUP_DATA_SIZE)
+
+typedef struct {
+
+	int		id;
+	int		port_mask;
+	char	data[MV_PNC_TOTAL_DATA_SIZE];
+	char	mask[MV_PNC_TOTAL_DATA_SIZE];
+	int		size;
+	int		tids[MV_PNC_LOOKUP_MAX];
+} MV_PNC_WOL_RULE;
+
+/* Support up to 128 WoL rules */
+static MV_PNC_WOL_RULE	*mv_pnc_wol_tbl[MV_PNC_MAX_RULES];
+
+/* Return values:
+ * 0..4 - number of exactly match TCAM enries (24 bytes)
+ * 5    - totally the same rule,
+ * -1   - partial match (not supported)
+ */
+int mv_pnc_rule_cmp(MV_PNC_WOL_RULE *pNew, MV_PNC_WOL_RULE *pExist)
+{
+	int		offset, i, lookup, non_equal_byte, non_equal_lookup;
+	MV_U8	mask;
+
+	offset = 0;
+	lookup = 0;
+	non_equal_byte = -1;
+	non_equal_lookup = -1;
+	while (offset < pNew->size) {
+
+		for (i = 0; i < MV_PNC_LOOKUP_DATA_SIZE; i++) {
+
+			mask = pNew->mask[offset + i] & pExist->mask[offset + i];
+			if ((pNew->data[offset + i] & mask) != (pExist->data[offset + i] & mask)) {
+				/* Different */
+				mvOsPrintf("#%d different on lookup #%d byte #%d: new:%02x & %02x, exist:%02x & %02x\n",
+							pExist->id, lookup, offset + i, pNew->data[offset + i], pNew->mask[offset + i],
+							pExist->data[offset + i], pExist->mask[offset + i]);
+				return lookup;
+			}
+			if (non_equal_byte == -1) {
+				if ((pNew->mask[offset + i] != pExist->mask[offset + i]) ||
+					(pNew->data[offset + i] != pExist->data[offset + i])) {
+					/* Entries are different in this byte */
+
+					/* Remember lookup where rules are different */
+					if (non_equal_lookup == -1)
+						non_equal_lookup = lookup;
+
+					if ((pNew->mask[offset + i] != pExist->mask[offset + i]) && (pNew->mask[offset + i] != 0xFF)) {
+						/* New rule is superset of the existing rule for this byte */
+						non_equal_byte = offset + i;
+					}
+				}
+			}
+		}
+		offset += MV_PNC_LOOKUP_DATA_SIZE;
+		lookup++;
+	}
+	if (non_equal_byte != -1) {
+		mvOsPrintf("#%d non equal on lookup=%d, byte=%d: new %02x & %02x, exist %02x & %02x\n",
+				pExist->id, non_equal_lookup, non_equal_byte, pNew->data[non_equal_byte], pNew->mask[non_equal_byte],
+				pExist->data[non_equal_byte], pExist->mask[non_equal_byte]);
+
+		if (non_equal_lookup < (lookup - 1)) {
+			/* Rejected */
+			mvOsPrintf("rejected: non_equal_lookup #%d < last_lookup #%d\n", non_equal_lookup, lookup - 1);
+			return -1;
+		} else {
+			mvOsPrintf("pass: non_equal_lookup #%d == last_lookup #%d\n", non_equal_lookup, lookup - 1);
+			return non_equal_lookup;
+		}
+	}
+	if (non_equal_lookup == -1) {
+		/* rules are the equal */
+		mvOsPrintf("#%d equal - number of lookups=%d\n", pExist->id, lookup);
+		return MV_PNC_LOOKUP_MAX;
+	} else {
+		/* New rule is superset of existing rule */
+		mvOsPrintf("#%d is superset on lookup #%d\n", pExist->id, non_equal_lookup);
+		return lookup;
+	}
+}
+
+void mv_pnc_wol_init(void)
+{
+	struct tcam_entry *te;
+
+	memset(mv_pnc_wol_tbl, 0, sizeof(mv_pnc_wol_tbl));
+
+	/* Set default entires for each one of LU used for WoL */
+	te = tcam_sw_alloc(TCAM_LU_WOL);
+	tcam_sw_set_lookup_all(te);
+	sram_sw_set_rinfo(te, RI_DROP, RI_DROP);
+	sram_sw_set_lookup_done(te, 1);
+	tcam_sw_text(te, "wol_eof");
+
+	tcam_hw_write(te, TE_WOL_EOF);
+	tcam_sw_free(te);
+}
+
+/* Add WoL rule to TCAM */
+int mv_pnc_wol_rule_set(int port, char *data, char *mask, int size)
+{
+	int               tid, i, free, lookup, match_lu, offset;
+	MV_PNC_WOL_RULE   *pWolRule, *pNewRule, *pMatchRule;
+
+	/* Check parameters validity */
+	if (mvNetaPortCheck(port))
+		return -1;
+
+	if (mvNetaMaxCheck(size, (MV_PNC_TOTAL_DATA_SIZE + 1), "data_size"))
+		return -1;
+
+	/* Save WoL rule in mv_pnc_wol_tbl */
+	pNewRule = mvOsMalloc(sizeof(MV_PNC_WOL_RULE));
+	if (pNewRule == NULL) {
+		mvOsPrintf("%s: port=%d, size=%d - Can't allocate %d bytes\n",
+				__func__, port, size, sizeof(sizeof(MV_PNC_WOL_RULE)));
+		return -2;
+	}
+	memset(pNewRule, 0, sizeof(MV_PNC_WOL_RULE));
+	pNewRule->port_mask = (1 << port);
+	memcpy(pNewRule->data, data, size);
+	memcpy(pNewRule->mask, mask, size);
+
+	/* complete with don't care */
+	memset(&pNewRule->mask[size], 0, MV_PNC_TOTAL_DATA_SIZE - size);
+
+	/* remember last byte that mask != 0 */
+	pNewRule->size = 0;
+	for (i = 0; i < MV_PNC_TOTAL_DATA_SIZE; i++) {
+		if (pNewRule->mask[i] != 0)
+			pNewRule->size = i + 1;
+	}
+
+	/* Check if such rule already exist */
+	free = -1;
+	pMatchRule = NULL;
+	match_lu = 0;
+	for (i = 0; i < MV_PNC_MAX_RULES; i++) {
+
+		pWolRule = mv_pnc_wol_tbl[i];
+		if (pWolRule == NULL) {
+			/* Rememeber first free place */
+			if (free == -1)
+				free = i;
+
+			continue;
+		}
+		lookup = mv_pnc_rule_cmp(pNewRule, pWolRule);
+		if (lookup < 0) {
+			/* Rules are partilly different - not supported */
+			mvOsPrintf("%s: port=%d, size=%d - WoL rule partial match other rule\n",
+						__func__, port, size);
+			mvOsFree(pNewRule);
+			return -3;
+		}
+
+		if (lookup == MV_PNC_LOOKUP_MAX) {
+			/* The same rule exist - update port mask for all TCAM entries of the rule */
+			pWolRule->port_mask |= (1 << port);
+			for (lookup = 0; lookup < MV_PNC_LOOKUP_MAX; lookup++) {
+				if (pWolRule->tids[lookup] != 0)
+					pnc_tcam_port_update(pWolRule->tids[lookup], port, 1);
+			}
+			mvOsPrintf("%s: port=%d, size=%d - WoL rule already exist\n", __func__, port, size);
+			mvOsFree(pNewRule);
+			return i;
+		}
+		/* remember maximum match lookup and matched rule */
+		if (lookup > match_lu) {
+			match_lu = lookup;
+			pMatchRule = pWolRule;
+		}
+	}
+	if (free == -1) {
+		mvOsPrintf("%s: port=%d, size=%d - No free place\n", __func__, port, size);
+		mvOsFree(pNewRule);
+		return -MV_FULL;
+	}
+
+	/* Set WoL rule to TCAM */
+	pNewRule->id = free;
+	tid = TE_WOL;
+
+	offset = 0;
+	for (lookup = 0; lookup < MV_PNC_LOOKUP_MAX; lookup++) {
+		char              name[TCAM_TEXT];
+		struct tcam_entry *te;
+		unsigned int mask;
+
+		if (lookup < match_lu) {
+			pNewRule->tids[lookup] = pMatchRule->tids[lookup];
+			offset += MV_PNC_LOOKUP_DATA_SIZE;
+
+			/* Update port mask */
+			pnc_tcam_port_update(pNewRule->tids[lookup], port, 1);
+			continue;
+		}
+
+		if (offset >= pNewRule->size)
+			break;
+
+		/* Set free TCAM entry */
+		for (; tid < (MV_PNC_TCAM_SIZE() - 1); tid++) {
+
+			te = pnc_tcam_entry_get(tid);
+			if (te != NULL) {
+				tcam_sw_free(te);
+				continue;
+			}
+
+			te = tcam_sw_alloc(TCAM_LU_WOL + lookup);
+
+			for (i = 0; i < MV_PNC_LOOKUP_DATA_SIZE; i++) {
+				tcam_sw_set_byte(te, i, pNewRule->data[offset + i]);
+				tcam_sw_set_mask(te, i, pNewRule->mask[offset + i]);
+			}
+
+			/* Set AI */
+			if (lookup == 0)
+				sram_sw_set_ainfo(te, pNewRule->id, AI_MASK);
+			else if (lookup > match_lu)
+				tcam_sw_set_ainfo(te, pNewRule->id, AI_MASK);
+			else {
+				tcam_sw_set_ainfo(te, pMatchRule->id, AI_MASK);
+				sram_sw_set_ainfo(te, pNewRule->id, AI_MASK);
+			}
+			/* set port mask */
+			mask = pnc_port_mask(port);
+			tcam_sw_set_port(te, 0, mask);
+
+			sprintf(name, "wol_%d", pNewRule->id);
+			tcam_sw_text(te, name);
+
+			if ((offset + i) >= pNewRule->size) {
+				/* Last TCAM entry */
+				sram_sw_set_lookup_done(te, 1);
+			} else {
+				sram_sw_set_shift_update(te, 0, MV_PNC_LOOKUP_DATA_SIZE);
+				sram_sw_set_next_lookup(te, TCAM_LU_WOL + lookup + 1);
+			}
+			offset += MV_PNC_LOOKUP_DATA_SIZE;
+
+			pNewRule->tids[lookup] = tid;
+			tcam_hw_write(te, tid);
+			tcam_sw_free(te);
+			break;
+		}
+	}
+
+	mv_pnc_wol_tbl[pNewRule->id] = pNewRule;
+	mvOsPrintf("%s: port=%d, size=%d - New rule added [%d] = %p, \n",
+				__func__, port, size, pNewRule->id, pNewRule);
+	return pNewRule->id;
+}
+
+/* Delete specific WoL rule (maybe more than one TCAM entry) */
+int mv_pnc_wol_rule_del(int idx)
+{
+#if 0
+	int lookup, tid;
+	MV_PNC_WOL_RULE *pWolRule;
+
+	pWolRule = mv_pnc_wol_tbl[idx];
+	if (pWolRule == NULL)
+		return 1;
+
+	/* Invalidate TCAM entries */
+	for (lookup = 0; lookup < pWolRule->maxLookup; lookup++) {
+		tid = pNewRule->tids[lookup];
+
+		/* FIXME: Decrement reference count of TID, if last invalidate - TCAM entry */
+		pnc_te_del(tid);
+	}
+#endif
+	mvOsPrintf("Not supported\n");
+	return 0;
+}
+
+int mv_pnc_wol_rule_del_all(int port)
+{
+	int i;
+	MV_PNC_WOL_RULE *pWolRule;
+
+	if (mvNetaPortCheck(port))
+		return -1;
+
+	for (i = 0; i < MV_PNC_MAX_RULES; i++) {
+		pWolRule = mv_pnc_wol_tbl[i];
+		if (pWolRule != NULL) {
+			mvOsFree(pWolRule);
+			mv_pnc_wol_tbl[i] = NULL;
+		}
+	}
+	/* Set free TCAM entry */
+	for (i = TE_WOL; i < (MV_PNC_TCAM_SIZE() - 1); i++)
+		pnc_te_del(i);
+
+	return 0;
+}
+
+void mv_pnc_wol_sleep(int port)
+{
+	MV_U32 regVal;
+	int    pnc_port = pnc_eth_port_map(port);
+
+	regVal = MV_REG_READ(MV_PNC_INIT_LOOKUP_REG);
+
+	regVal &= ~MV_PNC_PORT_LU_INIT_MASK(pnc_port);
+	regVal |= MV_PNC_PORT_LU_INIT_VAL(pnc_port, TCAM_LU_WOL);
+
+	MV_REG_WRITE(MV_PNC_INIT_LOOKUP_REG, regVal);
+}
+
+void mv_pnc_wol_wakeup(int port)
+{
+	MV_U32 regVal;
+	int    pnc_port = pnc_eth_port_map(port);
+
+	regVal = MV_REG_READ(MV_PNC_INIT_LOOKUP_REG);
+
+	regVal &= ~MV_PNC_PORT_LU_INIT_MASK(pnc_port);
+	regVal |= MV_PNC_PORT_LU_INIT_VAL(pnc_port, TCAM_LU_MAC);
+
+	MV_REG_WRITE(MV_PNC_INIT_LOOKUP_REG, regVal);
+}
+
+int mv_pnc_wol_rule_dump(int idx)
+{
+	int	i;
+	MV_PNC_WOL_RULE *pWolRule;
+
+	if (mvNetaMaxCheck(idx, MV_PNC_MAX_RULES, "pnc_rules"))
+		return -1;
+
+	pWolRule = mv_pnc_wol_tbl[idx];
+	if (pWolRule == NULL)
+		return 1;
+
+	mvOsPrintf("[%3d]: id=%d, port_mask=0x%x, size=%d, tids=[",
+				idx, pWolRule->id, pWolRule->port_mask, pWolRule->size);
+	for (i = 0; i < MV_PNC_LOOKUP_MAX; i++) {
+		if (pWolRule->tids[i] == 0)
+			break;
+		mvOsPrintf(" %d", pWolRule->tids[i]);
+	}
+	mvOsPrintf("]\n");
+
+	mvOsPrintf(" offs: ");
+	for (i = 0;  i < MV_PNC_LOOKUP_DATA_SIZE; i++)
+		mvOsPrintf("%02d", i);
+	mvOsPrintf("\n");
+
+	mvOsPrintf(" data: ");
+	i = 0;
+	while (i < pWolRule->size) {
+		mvOsPrintf("%02x", pWolRule->data[i++]);
+		if ((i % MV_PNC_LOOKUP_DATA_SIZE) == 0)
+			mvOsPrintf("\n       ");
+	}
+	mvOsPrintf("\n");
+
+	mvOsPrintf(" mask: ");
+	i = 0;
+	while (i < pWolRule->size) {
+		mvOsPrintf("%02x", pWolRule->mask[i++]);
+		if ((i % MV_PNC_LOOKUP_DATA_SIZE) == 0)
+			mvOsPrintf("\n       ");
+	}
+	mvOsPrintf("\n\n");
+
+	return 0;
+}
+
+void mv_pnc_wol_dump(void)
+{
+	int				i;
+
+	mvOsPrintf("WoL rules table\n");
+
+	for (i = 0; i < MV_PNC_MAX_RULES; i++)
+		mv_pnc_wol_rule_dump(i);
+}
+
+
+int  mv_pnc_wol_pkt_match(int port, char *data, int size, int *ruleId)
+{
+	int               i, j;
+	MV_PNC_WOL_RULE   *pWolRule;
+
+	/* Check if data match one of existing rules */
+	for (i = 0; i < MV_PNC_MAX_RULES; i++) {
+
+		pWolRule = mv_pnc_wol_tbl[i];
+		if (pWolRule == NULL)
+			continue;
+
+		/* packet size must be more or equal than rule size */
+		if (size < pWolRule->size)
+			continue;
+
+		for (j = 0; j < pWolRule->size; j++) {
+			if ((data[j] & pWolRule->mask[j]) != (pWolRule->data[j] & pWolRule->mask[j]))
+				break;
+		}
+
+		if (j == pWolRule->size) {
+			/* rule matched */
+			if (ruleId != NULL)
+				*ruleId = i;
+			return 1;
+		}
+	}
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.c b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.c
new file mode 100644
index 000000000000..5d3c4fbfee52
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.c
@@ -0,0 +1,931 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvOs.h"
+#include "mvCommon.h"
+
+#include "gbe/mvNetaRegs.h"
+#include "mvPnc.h"
+#include "mvTcam.h"
+
+#define DWORD_LEN       32
+
+#define TCAM_DBG(x...) if (tcam_ctl_flags & TCAM_F_DEBUG) mvOsPrintf(x)
+/*#define TCAM_DBG(x...)*/
+
+
+/*
+ * SW control flags
+ */
+static int tcam_ctl_flags;
+#define TCAM_F_DEBUG	0x1
+#define TCAM_F_WRITE	0x2
+
+/*
+ * Keep short text per entry
+ */
+char tcam_text[MV_PNC_TCAM_LINES][TCAM_TEXT];
+
+MV_U8 *mvPncVirtBase = NULL;
+
+unsigned int tcam_line_num;
+
+MV_STATUS mvPncInit(MV_U8 *pncVirtBase, MV_U32 pncTcamSize)
+{
+	mvPncVirtBase = pncVirtBase;
+	tcam_line_num = pncTcamSize;
+
+	mvOsPrintf("mvPncVirtBase = 0x%p, pncTcamSize = %d\n", pncVirtBase, pncTcamSize);
+	return MV_OK;
+}
+
+/*
+ * Low-Level API: TCAM
+ */
+void tcam_sw_clear(struct tcam_entry *te)
+{
+	memset(te, 0, sizeof(struct tcam_entry));
+}
+
+/*
+ *@ainfo : match to `1` on additional info bits 6..0
+ */
+void tcam_sw_set_ainfo(struct tcam_entry *te, unsigned int bits, unsigned int mask)
+{
+	int i;
+	MV_U32 key = te->data.u.word[AI_WORD];
+	MV_U32 val;
+
+	WARN_ON_OOR(bits > AI_MASK);
+	WARN_ON_OOR(mask > AI_MASK);
+
+	for (i = 0; i < AI_BITS; i++) {
+		if (mask & (1 << i)) {
+
+			val = 1 << (i + AI_OFFS);
+
+			if (bits & (1 << i))
+				key |= val;
+			else
+				key &= ~val;
+		}
+	}
+	te->data.u.word[AI_WORD] = key;
+	te->mask.u.word[AI_WORD] |= mask << AI_OFFS;
+}
+
+static int tcam_sw_dump_ainfo(struct tcam_entry *te, char *buf)
+{
+	int i, data, mask;
+	int off = 0;
+
+	mask = ((te->mask.u.word[AI_WORD] >> AI_OFFS) & AI_MASK);
+	if (mask == 0)
+		return off;
+
+	data = ((te->data.u.word[AI_WORD] >> AI_OFFS) & AI_MASK);
+	off += mvOsSPrintf(buf + off, " AI=");
+	for (i = 0; i < AI_BITS; i++)
+		if (mask & (1 << i))
+			off += mvOsSPrintf(buf + off, "%d",
+							((data & (1 << i)) != 0));
+		else
+			off += mvOsSPrintf(buf + off, "x");
+
+	return off;
+}
+
+/*
+ *@port : port
+ */
+void tcam_sw_set_port(struct tcam_entry *te, unsigned int port, unsigned int mask)
+{
+	WARN_ON_OOR(port > PORT_MASK);
+	WARN_ON_OOR(mask > PORT_MASK);
+
+	te->data.u.word[PORT_WORD] &= ~(PORT_MASK << PORT_OFFS);
+	te->mask.u.word[PORT_WORD] &= ~(PORT_MASK << PORT_OFFS);
+
+	te->data.u.word[PORT_WORD] |= port << PORT_OFFS;
+	te->mask.u.word[PORT_WORD] |= mask << PORT_OFFS;
+}
+
+void tcam_sw_get_port(struct tcam_entry *te, unsigned int *port, unsigned int *mask)
+{
+	*port = (te->data.u.word[PORT_WORD] >> PORT_OFFS) & PORT_MASK;
+	*mask = (te->mask.u.word[PORT_WORD] >> PORT_OFFS) & PORT_MASK;
+}
+
+void tcam_sw_set_lookup(struct tcam_entry *te, unsigned int lookup)
+{
+	WARN_ON_OOR(lookup > LU_MASK);
+	te->data.u.word[PORT_WORD] &= ~(LU_MASK << LU_OFFS);
+	te->data.u.word[PORT_WORD] |= lookup << LU_OFFS;
+	te->mask.u.word[PORT_WORD] |= LU_MASK << LU_OFFS;
+}
+
+void tcam_sw_set_lookup_all(struct tcam_entry *te)
+{
+	te->data.u.word[PORT_WORD] &= ~(LU_MASK << LU_OFFS);
+	te->mask.u.word[PORT_WORD] &= ~(LU_MASK << LU_OFFS);
+}
+
+void tcam_sw_get_lookup(struct tcam_entry *te, unsigned int *lookup, unsigned int *mask)
+{
+	*lookup = (te->data.u.word[PORT_WORD] >> LU_OFFS) & LU_MASK;
+	*mask = (te->mask.u.word[PORT_WORD] >> LU_OFFS) & LU_MASK;
+}
+
+/* offset:23..0 */
+void tcam_sw_set_byte(struct tcam_entry *te, unsigned int offset, unsigned char data)
+{
+	WARN_ON_OOR(offset >= ((TCAM_LEN - 1) * 4));
+
+	te->data.u.byte[offset] = data;
+	te->mask.u.byte[offset] = 0xFF;
+}
+
+void tcam_sw_set_mask(struct tcam_entry *te, unsigned int offset, unsigned char mask)
+{
+	WARN_ON_OOR(offset >= ((TCAM_LEN - 1) * 4));
+
+	te->mask.u.byte[offset] = mask;
+}
+
+int tcam_sw_cmp_byte(struct tcam_entry *te, unsigned int offset, unsigned char data)
+{
+	unsigned char mask;
+
+	ERR_ON_OOR(offset >= ((TCAM_LEN - 1) * 4));
+
+	mask = te->mask.u.byte[offset];
+
+	if ((te->data.u.byte[offset] & mask) == (data & mask))
+		return 0;
+
+	return 1;
+}
+
+int tcam_sw_cmp_bytes(struct tcam_entry *te, unsigned int offset, unsigned int size, unsigned char *data)
+{
+	int i;
+
+	ERR_ON_OOR((offset + size) >= ((TCAM_LEN - 1) * 4));
+
+	for (i = 0; i < size; i++) {
+		if (tcam_sw_cmp_byte(te, offset + i, data[i]))
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * Low-Level API: SRAM
+ */
+void sram_sw_set_flowid(struct tcam_entry *te, unsigned int flowid,
+			unsigned int mask)
+{
+	unsigned int i;
+
+	WARN_ON_OOR(mask > FLOW_CTRL_MASK);
+
+	for (i = 0; i < FLOW_CTRL_BITS; i++) {
+		if (mask & (1 << i)) {
+			te->sram.word[0] &= ~(FLOW_PART_MASK << (i * FLOW_PART_BITS));
+			te->sram.word[0] |= flowid & (FLOW_PART_MASK << (i * FLOW_PART_BITS));
+			te->sram.word[1] |= 1 << i;
+		}
+	}
+}
+
+void sram_sw_set_flowid_partial(struct tcam_entry *te, unsigned int flowid,
+				unsigned int idx)
+{
+	WARN_ON_OOR(idx >= FLOW_CTRL_BITS);
+	WARN_ON_OOR(flowid > FLOW_PART_MASK);
+
+	te->sram.word[0] &= ~(FLOW_PART_MASK << (idx * FLOW_PART_BITS));
+	te->sram.word[0] |= (flowid << (idx * FLOW_PART_BITS));
+	te->sram.word[1] |= 1 << idx;
+}
+
+void sram_sw_set_rinfo(struct tcam_entry *te, unsigned int rinfo, unsigned int mask)
+{
+	unsigned int word;
+	unsigned int i;
+
+	WARN_ON_OOR(rinfo > RI_MASK);
+
+	for (i = 0; i < RI_BITS; i++) {
+		if (mask & (1 << i)) {
+
+			word = (RI_VALUE_OFFS + i) / DWORD_LEN;
+			if (rinfo & (1 << i))
+				te->sram.word[word] |= (1 << ((i + RI_VALUE_OFFS) % DWORD_LEN));
+			else
+				te->sram.word[word] &= ~(1 << ((i + RI_VALUE_OFFS) % DWORD_LEN));
+
+			word = (RI_MASK_OFFS + i) / DWORD_LEN;
+			te->sram.word[word] |= (1 << ((i + RI_MASK_OFFS) % DWORD_LEN));
+		}
+	}
+}
+
+#ifdef MV_ETH_PNC_NEW
+
+#ifdef MV_ETH_PNC_LB
+void sram_sw_set_load_balance(struct tcam_entry *te, unsigned int value)
+{
+	unsigned int word;
+
+	WARN_ON_OOR(value > LB_QUEUE_MASK);
+
+	word = LB_QUEUE_OFFS / DWORD_LEN;
+	te->sram.word[word] &= ~(LB_QUEUE_MASK << (LB_QUEUE_OFFS % DWORD_LEN));
+	te->sram.word[word] |= value << (LB_QUEUE_OFFS % DWORD_LEN);
+}
+
+static int sram_sw_dump_load_balance(struct tcam_entry *te, char *buf)
+{
+	unsigned int word, value;
+
+	word = LB_QUEUE_OFFS / DWORD_LEN;
+	value = te->sram.word[word] >> (LB_QUEUE_OFFS % DWORD_LEN);
+	value &= LB_QUEUE_MASK;
+
+	if (value)
+		return mvOsSPrintf(buf, " LB=%d", value);
+
+	return 0;
+}
+#endif /* MV_ETH_PNC_LB */
+
+void sram_sw_set_rinfo_extra(struct tcam_entry *te, unsigned int ri_extra)
+{
+	unsigned int word, value;
+	unsigned int i, c;
+
+	WARN_ON_OOR(ri_extra > RI_EXTRA_MASK);
+
+	for (c = i = 0; i < RI_EXTRA_BITS; i += 2, c++)	{
+		value = ((ri_extra >> i) & 3);
+		if (value) {
+			word = (RI_EXTRA_VALUE_OFFS + i) / DWORD_LEN;
+			te->sram.word[word] &= ~(3 << ((i + RI_EXTRA_VALUE_OFFS) % DWORD_LEN));
+			te->sram.word[word] |= value << ((i + RI_EXTRA_VALUE_OFFS) % DWORD_LEN);
+
+			word = (RI_EXTRA_CTRL_OFFS + c) / DWORD_LEN;
+			te->sram.word[word] |= 1 << ((c + RI_EXTRA_CTRL_OFFS) % DWORD_LEN);
+		}
+	}
+}
+#endif /* MV_ETH_PNC_NEW */
+
+static int sram_sw_dump_rinfo(struct tcam_entry *te, char *buf)
+{
+	unsigned int word, shift, rinfo;
+	int i, off = 0;
+
+	word = RI_VALUE_OFFS / DWORD_LEN;
+	shift = RI_VALUE_OFFS % DWORD_LEN;
+	rinfo = (te->sram.word[word] >> shift) & ((1 << RI_BITS) - 1);
+
+	for (i = 0; i < RI_BITS; i++)
+		if (rinfo & (1 << i))
+			off += mvOsSPrintf(buf + off, " R%d", i);
+
+	return off;
+}
+
+void sram_sw_set_shift_update(struct tcam_entry *te, unsigned int index, unsigned int value)
+{
+	unsigned int word;
+
+	WARN_ON_OOR(index > SHIFT_IDX_MASK);	/* 0x7  */
+	WARN_ON_OOR(value > SHIFT_VAL_MASK);	/* 0x7F */
+
+	/* Reset value prior to set new one */
+	word = SHIFT_IDX_OFFS / DWORD_LEN;
+	te->sram.word[word] &= ~(SHIFT_IDX_MASK << (SHIFT_IDX_OFFS % DWORD_LEN));
+	te->sram.word[word] |= index << (SHIFT_IDX_OFFS % DWORD_LEN);
+
+	word = SHIFT_VAL_OFFS / DWORD_LEN;
+	te->sram.word[word] &= ~(SHIFT_VAL_MASK << (SHIFT_VAL_OFFS % DWORD_LEN));
+	te->sram.word[word] |= value << (SHIFT_VAL_OFFS % DWORD_LEN);
+
+	TCAM_DBG("%s: w=%x i=0x%x v=0x%x\n", __func__, word, index, value);
+}
+
+static int sram_sw_dump_shift_update(struct tcam_entry *te, char *buf)
+{
+	unsigned int word;
+	unsigned int index;
+	unsigned int value;
+
+	word = SHIFT_VAL_OFFS / DWORD_LEN;
+	value = te->sram.word[word] >> (SHIFT_VAL_OFFS % DWORD_LEN);
+	value &= SHIFT_VAL_MASK;
+
+	word = SHIFT_IDX_OFFS / DWORD_LEN;
+	index = te->sram.word[word] >> (SHIFT_IDX_OFFS % DWORD_LEN);
+	index &= SHIFT_IDX_MASK;
+
+	if (value)
+		return mvOsSPrintf(buf, " [%d]=%d", index, value);
+
+	return 0;
+}
+
+/* rxq:95..93 info:92 */
+void sram_sw_set_rxq(struct tcam_entry *te, unsigned int rxq, unsigned int force)
+{
+	unsigned int word;
+
+	WARN_ON_OOR(rxq > RXQ_MASK);
+
+	if (force) {
+		word = RXQ_INFO_OFFS / DWORD_LEN;
+		te->sram.word[word] |= 1 << (RXQ_INFO_OFFS % DWORD_LEN);
+	}
+
+	word = RXQ_QUEUE_OFFS / DWORD_LEN;
+	te->sram.word[word] &= ~(RXQ_MASK << (RXQ_QUEUE_OFFS % DWORD_LEN));
+	te->sram.word[word] |= rxq << (RXQ_QUEUE_OFFS % DWORD_LEN);
+}
+
+unsigned int sram_sw_get_rxq(struct tcam_entry *te, unsigned int *force)
+{
+	unsigned int word;
+	unsigned int rxq;
+
+	word = RXQ_INFO_OFFS / DWORD_LEN;
+	if (force)
+		*force = te->sram.word[word] & (1 << (RXQ_INFO_OFFS % DWORD_LEN));
+
+	word = RXQ_QUEUE_OFFS / DWORD_LEN;
+	rxq = te->sram.word[word] >> (RXQ_QUEUE_OFFS % DWORD_LEN);
+	rxq &= RXQ_MASK;
+
+	return rxq;
+}
+
+static int sram_sw_dump_rxq(struct tcam_entry *te, char *buf)
+{
+	unsigned int rxq, force;
+
+	rxq = sram_sw_get_rxq(te, &force);
+	if (rxq)
+		return mvOsSPrintf(buf, " %sQ%d", force ? "f" : "", rxq);
+
+	return 0;
+}
+
+/* index */
+void sram_sw_set_next_lookup_shift(struct tcam_entry *te, unsigned int index)
+{
+	unsigned int word;
+
+	WARN_ON_OOR(index > SHIFT_IDX_MASK);
+
+	word = NEXT_LU_SHIFT_OFFS / DWORD_LEN;
+	te->sram.word[word] |= index << (NEXT_LU_SHIFT_OFFS % DWORD_LEN);
+}
+
+static int sram_sw_dump_next_lookup_shift(struct tcam_entry *te, char *buf)
+{
+	unsigned int word, value;
+
+	word = NEXT_LU_SHIFT_OFFS / DWORD_LEN;
+	value = te->sram.word[word] >> (NEXT_LU_SHIFT_OFFS % DWORD_LEN);
+	value &= SHIFT_IDX_MASK;
+
+	if (value)
+		return mvOsSPrintf(buf, " SH=%d", value);
+
+	return 0;
+}
+
+/* done */
+void sram_sw_set_lookup_done(struct tcam_entry *te, unsigned int value)
+{
+	unsigned int word;
+
+	word = LU_DONE_OFFS / DWORD_LEN;
+	if (value)
+		te->sram.word[word] |= 1 << (LU_DONE_OFFS % DWORD_LEN);
+	else
+		te->sram.word[word] &= ~(1 << (LU_DONE_OFFS % DWORD_LEN));
+}
+
+/* index:91..89 val:88..82 */
+void sram_sw_set_ainfo(struct tcam_entry *te, unsigned int bits, unsigned int mask)
+{
+	unsigned int word;
+	unsigned int i;
+
+	WARN_ON_OOR(bits > AI_MASK);
+	WARN_ON_OOR(mask > AI_MASK);
+
+	for (i = 0; i < AI_BITS; i++)
+		if (mask & (1 << i)) {
+			word = (AI_VALUE_OFFS + i) / DWORD_LEN;
+			if (bits & (1 << i))
+				te->sram.word[word] |= (1 << ((i + AI_VALUE_OFFS) % DWORD_LEN));
+			else
+				te->sram.word[word] &= ~(1 << ((i + AI_VALUE_OFFS) % DWORD_LEN));
+
+			word = (AI_MASK_OFFS + i) / DWORD_LEN;
+			te->sram.word[word] |= 1 << ((i + AI_MASK_OFFS) % DWORD_LEN);
+		}
+}
+
+static int sram_sw_dump_ainfo(struct tcam_entry *te, char *buf)
+{
+	unsigned int word, shift, data, mask;
+	int i, off = 0;
+
+	word = AI_VALUE_OFFS / DWORD_LEN;
+	shift = AI_VALUE_OFFS % DWORD_LEN;
+	data = ((te->sram.word[word] >> shift) & AI_MASK);
+	shift = AI_MASK_OFFS % DWORD_LEN;
+	mask = ((te->sram.word[word] >> shift) & AI_MASK);
+
+	if (mask) {
+		off += mvOsSPrintf(buf + off, " AI=");
+		for (i = 0; i < AI_BITS; i++) {
+			if (mask & (1 << i))
+				off += mvOsSPrintf(buf + off, "%d", ((data & (1 << i)) != 0));
+			else
+				off += mvOsSPrintf(buf + off, "x");
+		}
+	}
+	return off;
+}
+
+/* 121..118 */
+void sram_sw_set_next_lookup(struct tcam_entry *te, unsigned int lookup)
+{
+	unsigned int word;
+
+	WARN_ON_OOR(lookup > LU_MASK);
+
+	word = LU_ID_OFFS / DWORD_LEN;
+	te->sram.word[word] |= lookup << (LU_ID_OFFS % DWORD_LEN);
+}
+static int sram_sw_dump_next_lookup(struct tcam_entry *te, char *buf)
+{
+	unsigned int word;
+	unsigned int lookup;
+
+	word = LU_DONE_OFFS / DWORD_LEN;
+	lookup = te->sram.word[word] >> (LU_DONE_OFFS % DWORD_LEN);
+	lookup &= 0x1;
+
+	if (lookup)
+		return mvOsSPrintf(buf, " LU=D");
+
+	word = LU_ID_OFFS / DWORD_LEN;
+	lookup = te->sram.word[word] >> (LU_ID_OFFS % DWORD_LEN);
+	lookup &= LU_MASK;
+
+	if (lookup)
+		return mvOsSPrintf(buf, " LU=%d", lookup);
+
+	return 0;
+}
+
+/*
+ * tcam_sw_alloc - allocate new TCAM entry
+ * @lookup: lookup section
+ */
+struct tcam_entry *tcam_sw_alloc(unsigned int lookup)
+{
+	struct tcam_entry *te = mvOsMalloc(sizeof(struct tcam_entry));
+
+	WARN_ON_OOM(!te);
+
+	tcam_sw_clear(te);
+	tcam_sw_set_lookup(te, lookup);
+	sram_sw_set_shift_update(te, 7, 0);
+
+	return te;
+}
+
+void tcam_sw_free(struct tcam_entry *te)
+{
+	mvOsFree(te);
+}
+
+void tcam_sw_text(struct tcam_entry *te, char *text)
+{
+	strncpy(te->ctrl.text, text, TCAM_TEXT);
+	te->ctrl.text[TCAM_TEXT - 1] = 0;
+}
+
+int tcam_sw_dump(struct tcam_entry *te, char *buf)
+{
+	unsigned int *word;
+	unsigned int off = 0;
+	MV_U32       w32;
+	int			 i;
+
+	/* hw entry id */
+	off += mvOsSPrintf(buf + off, "[%4d] ", te->ctrl.index);
+
+	word = (unsigned int *)&te->data;
+	i = TCAM_LEN - 1;
+	off += mvOsSPrintf(buf+off, "%4.4x ", word[i--] & 0xFFFF);
+
+	while (i >= 0) {
+		w32 = word[i--];
+		off += mvOsSPrintf(buf+off, "%8.8x ", MV_32BIT_LE_FAST(w32));
+	}
+	off += mvOsSPrintf(buf+off, "| ");
+
+	word = (unsigned int *)&te->sram;
+	off += mvOsSPrintf(buf+off, SRAM_FMT, SRAM_VAL(word));
+
+	off += sram_sw_dump_next_lookup(te, buf + off);
+	off += sram_sw_dump_next_lookup_shift(te, buf + off);
+	off += sram_sw_dump_rinfo(te, buf + off);
+	off += sram_sw_dump_ainfo(te, buf + off);
+	off += sram_sw_dump_shift_update(te, buf + off);
+	off += sram_sw_dump_rxq(te, buf + off);
+
+#ifdef MV_ETH_PNC_LB
+	off += sram_sw_dump_load_balance(te, buf + off);
+#endif /* MV_ETH_PNC_LB */
+
+	off += (te->ctrl.flags & TCAM_F_INV) ? mvOsSPrintf(buf + off, " [inv]") : 0;
+	off += mvOsSPrintf(buf + off, "\n       ");
+
+	word = (unsigned int *)&te->mask;
+	i = TCAM_LEN - 1;
+	off += mvOsSPrintf(buf+off, "%4.4x ", word[i--] & 0xFFFF);
+
+	while (i >= 0) {
+		w32 = word[i--];
+		off += mvOsSPrintf(buf+off, "%8.8x ", MV_32BIT_LE_FAST(w32));
+	}
+
+	off += mvOsSPrintf(buf + off, "   (%s)", te->ctrl.text);
+	off += tcam_sw_dump_ainfo(te, buf + off);
+	off += mvOsSPrintf(buf + off, "\n");
+
+	return off;
+}
+
+/*
+ * tcam_hw_inv - invalidate TCAM entry on HW
+ * @tid: entry index
+ */
+void tcam_hw_inv(int tid)
+{
+	MV_U32 va;
+
+	WARN_ON_OOR(tid >= MV_PNC_TCAM_SIZE());
+	va = (MV_U32) mvPncVirtBase;
+	va |= PNC_TCAM_ACCESS_MASK;
+	va |= (tid << TCAM_LINE_INDEX_OFFS);
+	va |= (0xd << TCAM_WORD_ENTRY_OFFS);
+
+	MV_MEMIO_LE32_WRITE(va, 1);
+	TCAM_DBG("%s: (inv) 0x%8x <-- 0x%x [%2x]\n", __func__, va, 1, tid);
+}
+
+void tcam_hw_inv_all(void)
+{
+	MV_U32 va;
+	int tid = MV_PNC_TCAM_SIZE();
+
+	while (tid--) {
+		va = (MV_U32) mvPncVirtBase;
+		va |= PNC_TCAM_ACCESS_MASK;
+		va |= (tid << TCAM_LINE_INDEX_OFFS);
+		va |= (0xd << TCAM_WORD_ENTRY_OFFS);
+
+		MV_MEMIO_LE32_WRITE(va, 1);
+		TCAM_DBG("%s: (inv) 0x%8x <-- 0x%x [%2x]\n", __func__, va, 1, tid);
+	}
+}
+
+/*
+ * tcam_hw_write - install TCAM entry on HW
+ * @tid: entry index
+ */
+int tcam_hw_write(struct tcam_entry *te, int tid)
+{
+	MV_U32 i, va, w32;
+
+	TCAM_DBG("%s: tid=0x%x\n", __func__, tid);
+	ERR_ON_OOR(tid >= MV_PNC_TCAM_SIZE());
+
+	/* sram */
+	for (i = 0; i < SRAM_LEN; i++) {
+		w32 = te->sram.word[i];
+		/* last word triggers hardware */
+		if (tcam_ctl_flags & TCAM_F_WRITE || w32 || (i == (SRAM_LEN - 1))) {
+			va = (MV_U32) mvPncVirtBase;
+			va |= PNC_SRAM_ACCESS_MASK;
+			va |= (tid << TCAM_LINE_INDEX_OFFS);
+			va |= (i << TCAM_WORD_ENTRY_OFFS);
+			MV_MEMIO_LE32_WRITE(va, w32);
+			TCAM_DBG("%s: (sram) 0x%8x <-- 0x%x\n", __func__, va, w32);
+		}
+	}
+
+	/* tcam */
+	for (i = 0; i < (TCAM_LEN - 1); i++) {
+		w32 = te->data.u.word[i];
+
+		if (tcam_ctl_flags & TCAM_F_WRITE || w32) {
+			va = (MV_U32) mvPncVirtBase;
+			va |= PNC_TCAM_ACCESS_MASK;
+			va |= (tid << TCAM_LINE_INDEX_OFFS);
+			va |= ((2 * i) << TCAM_WORD_ENTRY_OFFS);
+
+			MV_MEMIO32_WRITE(va, w32);
+			TCAM_DBG("%s: (tcam data) 0x%08x <-- 0x%08x\n", __func__, va, w32);
+		}
+	}
+
+	/* mask */
+	for (i = 0; i < (TCAM_LEN - 1); i++) {
+		w32 = te->mask.u.word[i];
+
+		if (tcam_ctl_flags & TCAM_F_WRITE || w32) {
+			va = (MV_U32) mvPncVirtBase;
+			va |= PNC_TCAM_ACCESS_MASK;
+			va |= (tid << TCAM_LINE_INDEX_OFFS);
+			va |= ((2 * i + 1) << TCAM_WORD_ENTRY_OFFS);
+
+			MV_MEMIO32_WRITE(va, w32);
+			TCAM_DBG("%s: (tcam mask) 0x%08x <-- 0x%08x\n", __func__, va, w32);
+		}
+	}
+
+	va = (MV_U32) mvPncVirtBase;
+	va |= PNC_TCAM_ACCESS_MASK;
+	va |= (tid << TCAM_LINE_INDEX_OFFS);
+	va |= (0xc << TCAM_WORD_ENTRY_OFFS);
+
+	w32 = te->data.u.word[TCAM_LEN - 1] & 0xFFFF;
+	w32 |= (te->mask.u.word[TCAM_LEN - 1] << 16);
+
+	MV_MEMIO_LE32_WRITE(va, w32);
+	TCAM_DBG("%s: (last) 0x%8x <-- 0x%x\n", __func__, va, w32);
+
+	/* FIXME: perf hit */
+	if (te->ctrl.text[0]) {
+		TCAM_DBG("%s: (text) <-- %s\n", __func__, te->ctrl.text);
+		strncpy(tcam_text[tid], te->ctrl.text, TCAM_TEXT);
+		tcam_text[tid][TCAM_TEXT - 1] = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * tcam_hw_read - load TCAM entry from HW
+ * @tid: entry index
+ */
+int tcam_hw_read(struct tcam_entry *te, int tid)
+{
+	MV_U32 i, va, w32;
+
+	TCAM_DBG("%s: tid=0x%x\n", __func__, tid);
+	ERR_ON_OOR(tid >= MV_PNC_TCAM_SIZE());
+
+	te->ctrl.index = tid;
+
+	/* sram */
+	for (i = 0; i < SRAM_LEN; i++) {
+		va = (MV_U32) mvPncVirtBase;
+		va |= PNC_SRAM_ACCESS_MASK;
+		va |= (tid << TCAM_LINE_INDEX_OFFS);
+		va |= (i << TCAM_WORD_ENTRY_OFFS);
+
+		te->sram.word[i] = w32 = MV_MEMIO_LE32_READ(va);
+		TCAM_DBG("%s: (sram) 0x%8x --> 0x%x\n", __func__, va, w32);
+	}
+
+	/* tcam */
+	for (i = 0; i < (TCAM_LEN - 1); i++) {
+		va = (MV_U32) mvPncVirtBase;
+		va |= PNC_TCAM_ACCESS_MASK;
+		va |= (tid << TCAM_LINE_INDEX_OFFS);
+		va |= ((2 * i) << TCAM_WORD_ENTRY_OFFS);
+
+		te->data.u.word[i] = w32 = MV_MEMIO32_READ(va);
+		TCAM_DBG("%s: (tcam data) 0x%8x --> 0x%x\n", __func__, va, w32);
+	}
+
+	/* mask */
+	for (i = 0; i < (TCAM_LEN - 1); i++) {
+		va = (MV_U32) mvPncVirtBase;
+		va |= PNC_TCAM_ACCESS_MASK;
+		va |= (tid << TCAM_LINE_INDEX_OFFS);
+		va |= ((2 * i + 1) << TCAM_WORD_ENTRY_OFFS);
+
+		te->mask.u.word[i] = w32 = MV_MEMIO32_READ(va);
+		TCAM_DBG("%s: (tcam mask) 0x%8x --> 0x%x\n", __func__, va, w32);
+	}
+
+	va = (MV_U32) mvPncVirtBase;
+	va |= PNC_TCAM_ACCESS_MASK;
+	va |= (tid << TCAM_LINE_INDEX_OFFS);
+	va |= (0xc << TCAM_WORD_ENTRY_OFFS);
+
+	w32 = MV_MEMIO_LE32_READ(va);
+	te->data.u.word[TCAM_LEN - 1] = w32 & 0xFFFF;
+	te->mask.u.word[TCAM_LEN - 1] = w32 >> 16;
+	TCAM_DBG("%s: (last) 0x%8x --> 0x%x\n", __func__, va, w32);
+
+	va = (MV_U32) mvPncVirtBase;
+	va |= PNC_TCAM_ACCESS_MASK;
+	va |= (tid << TCAM_LINE_INDEX_OFFS);
+	va |= (0xd << TCAM_WORD_ENTRY_OFFS);
+
+	w32 = MV_MEMIO_LE32_READ(va);
+	te->ctrl.flags = w32 & TCAM_F_INV;
+	TCAM_DBG("%s: (inv) 0x%8x --> 0x%x\n", __func__, va, w32);
+
+	/* text */
+	TCAM_DBG("%s: (text) --> %s\n", __func__, tcam_text[tid]);
+	strncpy(te->ctrl.text, tcam_text[tid], TCAM_TEXT);
+	te->ctrl.text[TCAM_TEXT - 1] = 0;
+
+	return 0;
+}
+
+/*
+ * tcam_hw_record - record enable
+ */
+void tcam_hw_record(int port)
+{
+	TCAM_DBG("%s: port %d 0x%x <-- 1\n", __func__, port, MV_PNC_HIT_SEQ0_REG);
+	MV_REG_WRITE(MV_PNC_HIT_SEQ0_REG, (port << 1) | 1);
+}
+
+/*
+ * tcam_hw_hits - dump hit sequence
+ */
+int tcam_hw_hits(char *buf)
+{
+	MV_U32 i, off = 0;
+
+	off += mvOsSPrintf(buf + off, "seq hit\n");
+	off += mvOsSPrintf(buf + off, "--- ---\n");
+
+	i = MV_REG_READ(MV_PNC_HIT_SEQ0_REG);
+	off += mvOsSPrintf(buf + off, "0 - %d\n", (i >> 10) & 0x3FF);
+	off += mvOsSPrintf(buf + off, "1 - %d\n", (i >> 20) & 0x3FF);
+
+	i = MV_REG_READ(MV_PNC_HIT_SEQ1_REG);
+	off += mvOsSPrintf(buf + off, "2 - %d\n", (i >> 0) & 0x3FF);
+	off += mvOsSPrintf(buf + off, "3 - %d\n", (i >> 10) & 0x3FF);
+	off += mvOsSPrintf(buf + off, "4 - %d\n", (i >> 20) & 0x3FF);
+
+	i = MV_REG_READ(MV_PNC_HIT_SEQ2_REG);
+	off += mvOsSPrintf(buf + off, "5 - %d\n", (i >> 0) & 0x3FF);
+	off += mvOsSPrintf(buf + off, "6 - %d\n", (i >> 10) & 0x3FF);
+	off += mvOsSPrintf(buf + off, "7 - %d\n", (i >> 20) & 0x3FF);
+
+	return off;
+}
+
+void tcam_hw_debug(int en)
+{
+	tcam_ctl_flags = en;
+}
+
+/*
+ * tcam_hw_dump - print out TCAM registers
+ * @all - whether to dump all entries or valid only
+ */
+int tcam_hw_dump(int all)
+{
+	int i;
+	struct tcam_entry te;
+	char buff[1024];
+
+	for (i = 0; i < MV_PNC_TCAM_SIZE(); i++) {
+		tcam_sw_clear(&te);
+		tcam_hw_read(&te, i);
+		if (!all && (te.ctrl.flags & TCAM_F_INV))
+			continue;
+		tcam_sw_dump(&te, buff);
+		mvOsPrintf(buff);
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * HW Init
+ *
+ ******************************************************************************
+ */
+int tcam_hw_init(void)
+{
+	int i;
+	MV_U32	regVal;
+	struct tcam_entry te;
+
+	/* Check TCAM size */
+	if (MV_PNC_TCAM_SIZE() > MV_PNC_TCAM_LINES) {
+		mvOsPrintf("MV_PNC_TCAM_SIZE()-%d must be less or equal than MV_PNC_TCAM_LINES\n", MV_PNC_TCAM_SIZE());
+		return -1;
+	}
+
+	/* Power on TCAM arrays accordingly with MV_PNC_TCAM_SIZE() */
+	regVal = MV_REG_READ(MV_PNC_TCAM_CTRL_REG);
+	for (i = 0; i < (MV_PNC_TCAM_LINES / MV_PNC_TCAM_ARRAY_SIZE); i++) {
+		if ((i * MV_PNC_TCAM_ARRAY_SIZE) < MV_PNC_TCAM_SIZE())
+			regVal |= MV_PNC_TCAM_POWER_UP(i); /* Power ON */
+		else
+			regVal &= ~MV_PNC_TCAM_POWER_UP(i); /* Power OFF */
+	}
+	MV_REG_WRITE(MV_PNC_TCAM_CTRL_REG, regVal);
+
+	tcam_sw_clear(&te);
+	sram_sw_set_lookup_done(&te, 1);
+
+	/* Perform full write */
+	tcam_ctl_flags = TCAM_F_WRITE;
+
+	for (i = 0; i < MV_PNC_TCAM_SIZE(); i++) {
+		sram_sw_set_flowid(&te, i, FLOW_CTRL_MASK);
+		tcam_sw_text(&te, "empty");
+		tcam_hw_write(&te, i);
+		tcam_hw_inv(i);
+	}
+
+	/* Back to partial write */
+	tcam_ctl_flags = 0;
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.h b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.h
new file mode 100644
index 000000000000..70cb1f80fb18
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hal/pnc/mvTcam.h
@@ -0,0 +1,390 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_TCAM_H__
+#define __MV_TCAM_H__
+
+/************************** NETA PNC Registers ******************************/
+
+#ifdef CONFIG_OF
+extern int pnc_reg_vbase;
+#define MV_PNC_REG_BASE			(pnc_reg_vbase)
+#endif
+extern unsigned int tcam_line_num;
+#define MV_PNC_TCAM_SIZE() (tcam_line_num)
+#ifdef CONFIG_MV_ETH_PNC_WOL
+#define TE_WOL_EOF (tcam_line_num - 1)
+#endif
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_LOOP_CTRL_REG 				(MV_PNC_REG_BASE + 0x00)
+
+#define MV_PNC_TCAM_CTRL_REG 				(MV_PNC_REG_BASE + 0x04)
+
+#define MV_PNC_TCAM_POWER_UP_OFFS			0
+#define MV_PNC_TCAM_POWER_UP(array)			(1 << ((array) + MV_PNC_TCAM_POWER_UP_OFFS))
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_INIT_OFFS_REG 				(MV_PNC_REG_BASE + 0x08)
+
+#define MV_PNC_PORT_BUF_INIT_BITS			6
+#define MV_PNC_PORT_BUF_INIT_MAX			((1 << MV_PNC_PORT_BUF_INIT_BITS) - 1)
+#define MV_PNC_PORT_BUF_INIT_MASK(port)		(MV_PNC_PORT_BUF_INIT_MAX << ((port) * MV_PNC_PORT_BUF_INIT_BITS))
+#define MV_PNC_PORT_BUF_INIT_VAL(port, val)	((val) << ((port) * MV_PNC_PORT_BUF_INIT_BITS))
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_INIT_LOOKUP_REG 				(MV_PNC_REG_BASE + 0x0C)
+
+#define MV_PNC_PORT_LU_INIT_BITS			4
+#define MV_PNC_PORT_LU_INIT_MAX				((1 << MV_PNC_PORT_LU_INIT_BITS) - 1)
+#define MV_PNC_PORT_LU_INIT_MASK(port)		(MV_PNC_PORT_BUF_INIT_MAX << ((port) * MV_PNC_PORT_LU_INIT_BITS))
+#define MV_PNC_PORT_LU_INIT_VAL(port, val)	((val) << ((port) * MV_PNC_PORT_LU_INIT_BITS))
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_CAUSE_REG 					(MV_PNC_REG_BASE + 0x10)
+#define MV_PNC_MASK_REG 					(MV_PNC_REG_BASE + 0x14)
+
+#define MV_PNC_HIT_SEQ0_REG					(MV_PNC_REG_BASE + 0x18)
+
+#define MV_PNC_HIT_ENABLE_BIT				0
+#define MV_PNC_HIT_ENABLE_MASK				(1 << MV_PNC_HIT_ENABLE_BIT)
+
+#define MV_PNC_HIT_PORT_OFFS				1
+#define MV_PNC_HIT_PORT_BITS				3
+#define MV_PNC_HIT_PORT_MAX					((1 << MV_PNC_HIT_PORT_BITS) - 1)
+#define MV_PNC_HIT_PORT_MASK				(MV_PNC_HIT_PORT_MAX << MV_PNC_HIT_PORT_OFFS)
+#define MV_PNC_HIT_PORT(port)				(port << MV_PNC_HIT_PORT_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_HIT_SEQ1_REG					(MV_PNC_REG_BASE + 0x1C)
+#define MV_PNC_HIT_SEQ2_REG					(MV_PNC_REG_BASE + 0x20)
+#define MV_PNC_XBAR_RET_REG					(MV_PNC_REG_BASE + 0x24)
+/*-------------------------------------------------------------------------------*/
+
+
+#ifdef MV_ETH_PNC_AGING
+#define MV_PNC_AGING_MAX_GROUP              4
+
+/* Aging Control register */
+#define MV_PNC_AGING_CTRL_REG               (MV_PNC_REG_BASE + 0x28)
+
+#define MV_PNC_AGING_RESET_ON_READ_BIT      0
+#define MV_PNC_AGING_RESET_ON_READ_MASK     (1 << MV_PNC_AGING_RESET_ON_READ_BIT)
+
+#define MV_PNC_AGING_SCAN_VALID_BIT         1
+#define MV_PNC_AGING_SCAN_VALID_MASK        (1 << MV_PNC_AGING_SCAN_VALID_BIT)
+
+#define MV_PNC_AGING_GROUP_RESET_OFFS       2
+#define MV_PNC_AGING_GROUP_RESET_MASK       (0xF << MV_PNC_AGING_GROUP_RESET_OFFS)
+#define MV_PNC_AGING_GROUP_RESET(gr)        (1 << (gr + MV_PNC_AGING_GROUP_RESET_OFFS))
+
+#define MV_PNC_AGING_SCAN_START_BIT         6
+#define MV_PNC_AGING_SCAN_START_MASK        (1 << MV_PNC_AGING_SCAN_START_BIT)
+
+#define MV_PNC_AGING_SCAN_DISABLE_BIT       7
+#define MV_PNC_AGING_SCAN_DISABLE_MASK      (1 << MV_PNC_AGING_SCAN_DISABLE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_AGING_LO_THRESH_REG(gr)      (MV_PNC_REG_BASE + 0x2C + ((gr) << 2))
+#define MV_PNC_AGING_HI_THRESH_REG          (MV_PNC_REG_BASE + 0x3C)
+/*-------------------------------------------------------------------------------*/
+#endif /* MV_ETH_PNC_AGING */
+
+#ifdef MV_ETH_PNC_LB
+
+#define MV_PNC_LB_TBL_ACCESS_REG            (MV_PNC_REG_BASE + 0x40)
+
+#define MV_PNC_LB_TBL_ADDR_OFFS             0
+#define MV_PNC_LB_TBL_ADDR_MASK             (0x3F << MV_PNC_LB_TBL_ADDR_OFFS)
+
+#define MV_PNC_LB_TBL_DATA_OFFS             6
+#define MV_PNC_LB_TBL_DATA_MASK             (0xFFF << MV_PNC_LB_TBL_DATA_OFFS)
+
+#define MV_PNC_LB_TBL_WRITE_TRIG_BIT        18
+#define MV_PNC_LB_TBL_WRITE_TRIG_MASK       (1 << MV_PNC_LB_TBL_WRITE_TRIG_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PNC_LB_CRC_INIT_REG              (MV_PNC_REG_BASE + 0x44)
+#endif /* MV_ETH_PNC_LB */
+
+
+#define MV_PNC_TCAM_ARRAY_SIZE		256
+#define MV_PNC_TOTAL_DATA_SIZE		120
+#define MV_PNC_LOOKUP_DATA_SIZE		24
+
+#ifdef MV_ETH_PNC_NEW
+#define PNC_TCAM_ACCESS_MASK        (BIT18)
+#define PNC_SRAM_ACCESS_MASK        (BIT18 | BIT16)
+#define PNC_AGING_ACCESS_MASK       (BIT18 | BIT17)
+#define SRAM_LEN                    5 /* SRAM in words */
+#define SRAM_FMT                    "%5.5x %8.8x %8.8x %8.8x %8.8x"
+#define SRAM_VAL(p)                 p[4] & 0xFFFFF, p[3], p[2], p[1], p[0]
+#else
+#define PNC_TCAM_ACCESS_MASK        (BIT17)
+#define PNC_SRAM_ACCESS_MASK        (BIT17 | BIT16)
+#define SRAM_LEN                    4 /* SRAM in words */
+#define SRAM_FMT                    "%8.8x %8.8x %8.8x %8.8x"
+#define SRAM_VAL(p)                 p[3], p[2], p[1], p[0]
+#endif /* MV_ETH_PNC_NEW */
+
+#define TCAM_LEN                    7 /* TCAM key/mask in words */
+
+#define TCAM_LINE_INDEX_OFFS	6
+#define TCAM_WORD_ENTRY_OFFS	2
+
+#define AI_WORD					6
+#define AI_OFFS					0
+#define AI_BITS  				7
+#define AI_MASK					((1 << AI_BITS) - 1)
+
+#define PORT_WORD				6
+#define PORT_OFFS				7
+#define PORT_BITS  				5
+#define PORT_MASK				((1 << PORT_BITS) - 1)
+
+#define LU_WORD					6
+#define LU_OFFS					12
+#define LU_BITS  				4
+#define LU_MASK					((1 << LU_BITS) - 1)
+
+#ifdef MV_ETH_PNC_NEW
+#define RI_EXTRA_BITS  		    12
+#define RI_EXTRA_MASK			((1 << RI_EXTRA_BITS) - 1)
+
+#define FLOW_CTRL_BITS          8
+#define FLOW_PART_BITS          4
+#else
+#define FLOW_CTRL_BITS          2
+#define FLOW_PART_BITS          16
+#endif /* MV_ETH_PNC_NEW */
+
+#define FLOW_CTRL_MASK          ((1 << FLOW_CTRL_BITS) - 1)
+#define FLOW_CTRL_HALF_MASK     ((1 << (FLOW_CTRL_BITS / 2)) - 1)
+#define FLOW_PART_MASK          ((1 << FLOW_PART_BITS) - 1)
+
+#define RI_BITS  				24
+#define RI_MASK					((1 << RI_BITS) - 1)
+
+#define SHIFT_VAL_BITS			7
+#define SHIFT_VAL_MASK			((1 << SHIFT_VAL_BITS) - 1)
+#define SHIFT_IDX_BITS			3
+#define SHIFT_IDX_MASK			((1 << SHIFT_IDX_BITS) - 1)
+#define RXQ_BITS				3
+#define RXQ_MASK				((1 << RXQ_BITS) - 1)
+
+#define LB_QUEUE_BITS           2
+#define LB_QUEUE_MASK			((1 << LB_QUEUE_BITS) - 1)
+#define LB_DISABLE_VALUE		0
+#define LB_2_TUPLE_VALUE		1
+#define LB_4_TUPLE_VALUE		2
+
+#define FLOW_VALUE_OFFS 		0   /* 32 bits */
+#define FLOW_CTRL_OFFS			32  /* 8 bits */
+
+/* PNC SRAM Layout */
+#ifdef MV_ETH_PNC_NEW
+#define RI_VALUE_OFFS 			40  /* 24 bits */
+#define RI_MASK_OFFS  			64  /* 24 bits */
+
+#define RI_EXTRA_VALUE_OFFS 	88  /* 12 bits */
+#define RI_EXTRA_CTRL_OFFS  	100 /* 6 bits */
+
+#define SHIFT_VAL_OFFS 			106	/* 7 bits - shift update value offset */
+#define SHIFT_IDX_OFFS 			113	/* 3 bits - shift update index offset */
+#define RXQ_INFO_OFFS  			116 /* 1 bit */
+#define RXQ_QUEUE_OFFS 			117 /* 3 bits */
+#define LB_QUEUE_OFFS           120 /* 2 bits - load balancing queue info */
+#define NEXT_LU_SHIFT_OFFS  	122 /* 3 bits */
+#define LU_DONE_OFFS  			125 /* 1 bit */
+#define KEY_TYPE_OFFS           126 /* 4 bits */
+#define AI_VALUE_OFFS 			130 /* 7 bits */
+#define AI_MASK_OFFS  			137 /* 7 bits */
+#define LU_ID_OFFS  			144 /* 4 bits */
+#else /* Old PNC version (z1) */
+#define RI_VALUE_OFFS 			34
+#define RI_MASK_OFFS  			58
+#define SHIFT_VAL_OFFS 			82	/* shift update value offset */
+#define SHIFT_IDX_OFFS 			89	/* shift update index offset */
+#define RXQ_INFO_OFFS  			92
+#define RXQ_QUEUE_OFFS 			93
+#define NEXT_LU_SHIFT_OFFS  	96
+#define LU_DONE_OFFS  			99
+#define AI_VALUE_OFFS 			104
+#define AI_MASK_OFFS  			111
+#define LU_ID_OFFS  			118
+#endif /* MV_ETH_PNC_NEW */
+
+#define SHIFT_IP4_HLEN			126 /* IPv4 dynamic shift index */
+#define SHIFT_IP6_HLEN			127 /* IPv6 dynamic shift index */
+
+/*
+ * TCAM misc/control
+ */
+#define TCAM_F_INV 				1
+#define TCAM_TEXT				16
+
+/*
+ * TCAM control
+ */
+struct tcam_ctrl {
+	unsigned int index;
+	unsigned int flags;
+	unsigned char text[TCAM_TEXT];
+};
+
+/*
+ * TCAM key
+ */
+struct tcam_data {
+	union {
+		unsigned int word[TCAM_LEN];
+		unsigned char byte[TCAM_LEN*4];
+	} u;
+};
+
+/*
+ * TCAM mask
+ */
+struct tcam_mask {
+	union {
+		unsigned int word[TCAM_LEN];
+		unsigned char byte[TCAM_LEN*4];
+	} u;
+};
+
+/*
+ * SRAM entry
+ */
+struct sram_entry {
+	unsigned int word[SRAM_LEN];
+};
+
+/*
+ * TCAM entry
+ */
+struct tcam_entry {
+	struct tcam_data data;
+	struct tcam_mask mask;
+	struct sram_entry sram;
+	struct tcam_ctrl ctrl;
+}  __attribute__((packed));
+
+#ifdef CONFIG_MV_ETH_PNC
+/* PnC Global variale */
+extern char tcam_text[MV_PNC_TCAM_LINES][TCAM_TEXT];
+extern MV_U8 *mvPncVirtBase;
+#endif
+
+/*
+ * TCAM Low Level API
+ */
+#ifdef MV_ETH_PNC_NEW
+void sram_sw_set_rinfo_extra(struct tcam_entry *te, unsigned int ri_extra);
+void sram_sw_set_load_balance(struct tcam_entry *te, unsigned int value);
+#endif /* MV_ETH_PNC_NEW */
+
+struct tcam_entry *tcam_sw_alloc(unsigned int section);
+void tcam_sw_free(struct tcam_entry *te);
+int tcam_sw_dump(struct tcam_entry *te, char *buf);
+void tcam_sw_clear(struct tcam_entry *te);
+
+void tcam_sw_set_port(struct tcam_entry *te, unsigned int port, unsigned int mask);
+void tcam_sw_get_port(struct tcam_entry *te, unsigned int *port, unsigned int *mask);
+
+void tcam_sw_set_lookup_all(struct tcam_entry *te);
+void tcam_sw_set_lookup(struct tcam_entry *te, unsigned int lookup);
+void tcam_sw_get_lookup(struct tcam_entry *te, unsigned int *lookup, unsigned int *mask);
+void tcam_sw_set_ainfo(struct tcam_entry *te, unsigned int bits, unsigned int mask);
+void tcam_sw_set_byte(struct tcam_entry *te, unsigned int offset, unsigned char data);
+
+int  tcam_sw_cmp_byte(struct tcam_entry *te, unsigned int offset, unsigned char data);
+int  tcam_sw_cmp_bytes(struct tcam_entry *te, unsigned int offset, unsigned int size, unsigned char *data);
+
+void tcam_sw_set_mask(struct tcam_entry *te, unsigned int offset, unsigned char mask);
+void sram_sw_set_rinfo(struct tcam_entry *te, unsigned int rinfo, unsigned int mask);
+void sram_sw_set_shift_update(struct tcam_entry *te, unsigned int index, unsigned int value);
+void sram_sw_set_rxq(struct tcam_entry *te, unsigned int rxq, unsigned int force);
+
+unsigned int sram_sw_get_rxq(struct tcam_entry *te, unsigned int *force);
+
+void sram_sw_set_next_lookup_shift(struct tcam_entry *te, unsigned int index);
+void sram_sw_set_lookup_done(struct tcam_entry *te, unsigned int value);
+void sram_sw_set_next_lookup_shift(struct tcam_entry *te, unsigned int value);
+void sram_sw_set_ainfo(struct tcam_entry *te, unsigned int bits, unsigned int mask);
+void sram_sw_set_next_lookup(struct tcam_entry *te, unsigned int lookup);
+void sram_sw_set_flowid(struct tcam_entry *te, unsigned int flowid, unsigned int mask);
+void sram_sw_set_flowid_partial(struct tcam_entry *te, unsigned int flowid, unsigned int idx);
+void tcam_sw_text(struct tcam_entry *te, char *text);
+int tcam_hw_write(struct tcam_entry *te, int tid);
+int tcam_hw_read(struct tcam_entry *te, int tid);
+void tcam_hw_inv(int tid);
+void tcam_hw_inv_all(void);
+void tcam_hw_debug(int);
+int tcam_hw_dump(int);
+int tcam_hw_hits(char *buf);
+void tcam_hw_record(int);
+int tcam_hw_init(void);
+
+#endif
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_bm.c b/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_bm.c
new file mode 100644
index 000000000000..818b0c655439
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_bm.c
@@ -0,0 +1,190 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBm.h"
+
+#include "net_dev/mv_netdev.h"
+
+static MV_BM_POOL	hwfBmPool[MV_BM_POOLS];
+
+
+static int mv_eth_hwf_pool_add(MV_BM_POOL *pBmPool, int bufNum)
+{
+	int      i;
+	void     *pVirt;
+	MV_ULONG physAddr;
+
+	/* Check total number of buffers doesn't exceed capacity */
+	if ((bufNum < 0) ||
+		((bufNum + pBmPool->bufNum) > (pBmPool->capacity))) {
+
+		mvOsPrintf("%s: to many %d buffers for BM pool #%d: capacity=%d, buf_num=%d\n",
+			   __func__, bufNum, pBmPool->pool, pBmPool->capacity, pBmPool->bufNum);
+		return 0;
+	}
+	/* Allocate buffers for the pool */
+	for (i = 0; i < bufNum; i++) {
+
+		pVirt = mvOsIoCachedMalloc(NULL, pBmPool->bufSize, &physAddr, NULL);
+		if (pVirt == NULL) {
+			mvOsPrintf("%s: Warning! Not all buffers of %d bytes allocated\n",
+						__func__, pBmPool->bufSize);
+			break;
+		}
+		mvBmPoolPut(pBmPool->pool, (MV_ULONG) physAddr);
+	}
+	pBmPool->bufNum += i;
+
+	mvOsPrintf("BM pool #%d for HWF: bufSize=%4d - %4d of %4d buffers added\n",
+	       pBmPool->pool, pBmPool->bufSize, i, bufNum);
+
+	return i;
+}
+
+/*******************************************************************************
+ * mv_eth_hwf_bm_create - create BM pools used by the port for HWF only
+ *
+ * INPUT:
+ *       int        port	- port number
+ *
+ * RETURN:   MV_STATUS
+ *               MV_OK - Success, Others - Failure
+ *
+ *******************************************************************************/
+MV_STATUS mv_eth_hwf_bm_create(int port, int mtuPktSize)
+{
+	static bool		isFirst = true;
+	MV_BM_POOL		*pBmPool;
+	int				long_pool, short_pool;
+	int				long_buf_size, short_buf_size;
+
+	/* Check validity of the parameters */
+	if (mvNetaPortCheck(port))
+		return MV_FAIL;
+
+	/* For the first time - clean hwfBmPool array */
+	if (isFirst == true) {
+		memset(&hwfBmPool, 0, sizeof(hwfBmPool));
+		isFirst = false;
+	}
+
+	long_pool = mv_eth_bm_config_long_pool_get(port);
+	/* Check validity of the parameters */
+	if (mvNetaMaxCheck(long_pool,  MV_BM_POOLS, "bm pool"))
+		return MV_FAIL;
+
+	/* For HWF Packet offset in the packet is 8 bytes */
+	long_buf_size = mv_eth_bm_config_pkt_size_get(long_pool);
+	if (long_buf_size == 0)
+		long_buf_size = mtuPktSize + 8;
+
+	/* Check validity of the parameters */
+	if (long_buf_size < (mtuPktSize + 8))
+		return MV_FAIL;
+
+	/* Create long pool */
+	pBmPool = &hwfBmPool[long_pool];
+	if (pBmPool->pVirt == NULL) {
+		/* Allocate new pool */
+		pBmPool->pVirt = mv_eth_bm_pool_create(long_pool, MV_BM_POOL_CAP_MAX, &pBmPool->physAddr);
+		if (pBmPool->pVirt == NULL) {
+			mvOsPrintf("%s: Can't allocate %d bytes for Long pool #%d of port #%d\n",
+					__func__, MV_BM_POOL_CAP_MAX * sizeof(MV_U32), long_pool, port);
+			return MV_OUT_OF_CPU_MEM;
+		}
+		pBmPool->pool = long_pool;
+		pBmPool->capacity = MV_BM_POOL_CAP_MAX;
+		pBmPool->bufSize = long_buf_size;
+		mvNetaBmPoolBufSizeSet(port, long_pool, long_buf_size);
+	} else {
+		/* Share pool with other port - check buffer size */
+		if (long_buf_size > pBmPool->bufSize) {
+			/* The BM pool doesn't match the mtuPktSize */
+			mvOsPrintf("%s: longBufSize=%d is too match for the pool #%d (%d bytes)\n",
+						__func__, long_buf_size, pBmPool->pool, pBmPool->bufSize);
+			return MV_FAIL;
+		}
+	}
+	mv_eth_hwf_pool_add(pBmPool, mv_eth_bm_config_long_buf_num_get(port));
+
+	/* Create short pool */
+	short_pool = mv_eth_bm_config_short_pool_get(port);
+	short_buf_size = mv_eth_bm_config_pkt_size_get(short_pool);
+	if (short_pool != long_pool) {
+		pBmPool = &hwfBmPool[short_pool];
+		if (pBmPool->pVirt == NULL) {
+			/* Allocate new pool */
+			pBmPool->pVirt = mv_eth_bm_pool_create(short_pool, MV_BM_POOL_CAP_MAX, &pBmPool->physAddr);
+			if (pBmPool->pVirt == NULL) {
+				mvOsPrintf("%s: Can't allocate %d bytes for Short pool #%d of port #%d\n",
+						__func__, MV_BM_POOL_CAP_MAX * sizeof(MV_U32), short_pool, port);
+				return MV_OUT_OF_CPU_MEM;
+			}
+			pBmPool->pool = short_pool;
+			pBmPool->capacity = MV_BM_POOL_CAP_MAX;
+			pBmPool->bufSize = short_buf_size;
+			mvNetaBmPoolBufSizeSet(port, short_pool, short_buf_size);
+		} else {
+			/* Share pool with other port - check buffer size */
+			if (short_buf_size > pBmPool->bufSize) {
+				/* The BM pool doesn't match the mtuPktSize */
+				mvOsPrintf("%s: shortBufSize=%d is too match for the pool #%d (%d bytes)\n",
+							__func__, short_buf_size, pBmPool->pool, pBmPool->bufSize);
+				return MV_FAIL;
+			}
+		}
+		/* Add buffers to short pool */
+		mv_eth_hwf_pool_add(pBmPool, mv_eth_bm_config_short_buf_num_get(port));
+	}
+	mvNetaHwfBmPoolsSet(port, short_pool, long_pool);
+	return MV_OK;
+}
+
+void mv_hwf_bm_dump(void)
+{
+	int          i;
+	MV_BM_POOL   *bmPool;
+
+	mvOsPrintf("HWF BM Pools configuration\n");
+	mvOsPrintf("pool:    capacity    bufSize    bufNum      virtPtr       physAddr\n");
+	for (i = 0; i < MV_BM_POOLS; i++) {
+		bmPool = &hwfBmPool[i];
+		if (bmPool->pVirt)
+			mvOsPrintf("  %2d:     %4d       %4d       %4d      %p      0x%08x\n",
+						bmPool->pool, bmPool->capacity, bmPool->bufSize, bmPool->bufNum,
+						bmPool->pVirt, (unsigned)bmPool->physAddr);
+	}
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_sysfs.c
new file mode 100644
index 000000000000..308ea1a4dccc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/hwf/hwf_sysfs.c
@@ -0,0 +1,154 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "gbe/mvNeta.h"
+#include "net_dev/mv_netdev.h"
+
+static ssize_t hwf_help(char *buf)
+{
+	int off = 0;
+
+	off += mvOsSPrintf(buf+off, "cat                      help  - print this help\n");
+#ifndef CONFIG_MV_ETH_BM_CPU
+	off += mvOsSPrintf(buf+off, "cat                      bm    - print HWF BM information\n");
+#endif
+	off += mvOsSPrintf(buf+off, "echo rxp p txp         > regs  - print HWF registers of port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo rxp p txp         > cntrs - print HWF counters of port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo rxp p txp txq en  > en    - enable HWF from <rxp> to specific <txq>\n");
+	off += mvOsSPrintf(buf+off, "echo rxp p txp txq a b > drop  - set HWF drop threshold <a> and Random bits <b>\n");
+
+	return off;
+}
+
+static ssize_t hwf_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char   *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return hwf_help(buf);
+#ifndef CONFIG_MV_ETH_BM_CPU
+	else if (!strcmp(name, "bm"))
+		mv_hwf_bm_dump();
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	return 0;
+}
+static ssize_t hwf_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, rxp = 0, p = 0, txp = 0, txq = 0, a = 0, b = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %d %d %d %d", &rxp, &p, &txp, &txq, &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "regs")) {
+		mvNetaHwfRxpRegs(rxp);
+		mvNetaHwfTxpRegs(rxp, p, txp);
+	} else if (!strcmp(name, "cntrs")) {
+		mvNetaHwfTxpCntrs(rxp, p, txp);
+	} else if (!strcmp(name, "en")) {
+		if (a)        {
+			/* Set txp/txq ownership to HWF */
+			if (mv_eth_ctrl_txq_hwf_own(p, txp, txq, rxp)) {
+				printk(KERN_ERR "%s failed: p=%d, txp=%d, txq=%d\n",
+					__func__, p, txp, txq);
+				return -EINVAL;
+			}
+		} else
+			mv_eth_ctrl_txq_hwf_own(p, txp, txq, -1);
+
+		mvNetaHwfTxqEnable(rxp, p, txp, txq, a);
+	} else if (!strcmp(name, "drop")) {
+		mvNetaHwfTxqDropSet(rxp, p, txp, txq, a, b);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(regs,  S_IWUSR, hwf_show, hwf_store);
+static DEVICE_ATTR(cntrs, S_IWUSR, hwf_show, hwf_store);
+static DEVICE_ATTR(en,    S_IWUSR, hwf_show, hwf_store);
+static DEVICE_ATTR(drop,  S_IWUSR, hwf_show, hwf_store);
+static DEVICE_ATTR(bm,    S_IRUSR, hwf_show, hwf_store);
+static DEVICE_ATTR(help,  S_IRUSR, hwf_show, hwf_store);
+
+static struct attribute *hwf_attrs[] = {
+	&dev_attr_regs.attr,
+	&dev_attr_cntrs.attr,
+	&dev_attr_en.attr,
+	&dev_attr_drop.attr,
+	&dev_attr_bm.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group hwf_group = {
+	.name = "hwf",
+	.attrs = hwf_attrs,
+};
+
+int mv_neta_hwf_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &hwf_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", hwf_group.name, err);
+
+	return err;
+}
+
+int mv_neta_hwf_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &hwf_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/l2fw/l2fw_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/l2fw/l2fw_sysfs.c
new file mode 100644
index 000000000000..78f261f16738
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/l2fw/l2fw_sysfs.c
@@ -0,0 +1,259 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvTypes.h"
+#include "mv_eth_l2fw.h"
+#ifdef CONFIG_MV_ETH_L2SEC
+#include "mv_eth_l2sec.h"
+#endif
+#include "linux/inet.h"
+
+
+static ssize_t l2fw_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat rules_dump                - display L2fw rules DB\n");
+	off += sprintf(buf+off, "cat ports_dump                - display L2fw ports DB\n");
+	off += sprintf(buf+off, "cat stats                     - show debug information\n");
+
+	/* inputs in decimal */
+	off += sprintf(buf+off, "echo rxp txp mode > l2fw      - set L2FW mode: 0-dis,1-as_is,2-swap,3-copy,4-ipsec\n");
+#ifdef CONFIG_MV_INCLUDE_XOR
+	off += sprintf(buf+off, "echo rxp thresh   > l2fw_xor  - set XOR threshold in bytes for port <rxp>\n");
+	#endif
+	off += sprintf(buf+off, "echo rxp en       > lookup    - enable/disable hash lookup for <rxp>\n");
+	off += sprintf(buf+off, "echo 1            > flush     - flush L2fw rules DB\n");
+
+	/* inputs in hex */
+	off += sprintf(buf+off, "echo sip dip txp  > rule_add  - set rule for SIP and DIP pair. [x.x.x.x]\n");
+
+#ifdef CONFIG_MV_ETH_L2SEC
+	off += sprintf(buf+off, "echo p chan       > cesa_chan - set cesa channel <chan> for port <p>.\n");
+#endif
+
+	return off;
+}
+
+static ssize_t l2fw_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+    const char	*name = attr->attr.name;
+    int             off = 0;
+
+    if (!capable(CAP_NET_ADMIN))
+	return -EPERM;
+
+	if (!strcmp(name, "help")) {
+	    off = l2fw_help(buf);
+		return off;
+	} else if (!strcmp(name, "rules_dump")) {
+		l2fw_rules_dump();
+		return off;
+	} else if (!strcmp(name, "ports_dump")) {
+		l2fw_ports_dump();
+		return off;
+	} else if (!strcmp(name, "stats")) {
+		l2fw_stats();
+		return off;
+	}
+
+	return off;
+}
+
+
+
+static ssize_t l2fw_hex_store(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    addr1, addr2;
+	int port;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	err = addr1 = addr2 = port = 0;
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "flush")) {
+		l2fw_flush();
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t l2fw_ip_store(struct device *dev,
+			 struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char *name = attr->attr.name;
+
+	unsigned int err = 0;
+	unsigned int srcIp = 0, dstIp = 0;
+	unsigned char *sipArr = (unsigned char *)&srcIp;
+	unsigned char *dipArr = (unsigned char *)&dstIp;
+	int port;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%hhu.%hhu.%hhu.%hhu %hhu.%hhu.%hhu.%hhu %d",
+		sipArr, sipArr+1, sipArr+2, sipArr+3,
+		dipArr, dipArr+1, dipArr+2, dipArr+3, &port);
+
+	printk(KERN_INFO "0x%x->0x%x in %s\n", srcIp, dstIp, __func__);
+	local_irq_save(flags);
+
+	if (!strcmp(name, "l2fw_add_ip"))
+		l2fw_add(srcIp, dstIp, port);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+
+static ssize_t l2fw_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	int             err;
+
+	unsigned int    a, b, c;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = a = b = c = 0;
+	sscanf(buf, "%d %d %d", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "lookup"))
+		l2fw_lookupEn(a, b);
+	else if (!strcmp(name, "l2fw"))
+		l2fw(c, a, b);
+#ifdef CONFIG_MV_INCLUDE_XOR
+	else if (!strcmp(name, "l2fw_xor"))
+		l2fw_xor(a, b);
+#endif
+#ifdef CONFIG_MV_ETH_L2SEC
+	else if (!strcmp(name, "cesa_chan"))
+		err = mv_l2sec_set_cesa_chan(a, b);
+#endif
+	local_irq_restore(flags);
+
+	if (err)
+		mvOsPrintf("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+
+}
+
+
+static DEVICE_ATTR(l2fw,		S_IWUSR, l2fw_show, l2fw_store);
+#ifdef CONFIG_MV_INCLUDE_XOR
+static DEVICE_ATTR(l2fw_xor,		S_IWUSR, l2fw_show, l2fw_store);
+#endif
+static DEVICE_ATTR(lookup,		S_IWUSR, l2fw_show, l2fw_store);
+static DEVICE_ATTR(l2fw_add_ip,		S_IWUSR, l2fw_show, l2fw_ip_store);
+static DEVICE_ATTR(help,		S_IRUSR, l2fw_show,  NULL);
+static DEVICE_ATTR(rules_dump,		S_IRUSR, l2fw_show,  NULL);
+static DEVICE_ATTR(ports_dump,		S_IRUSR, l2fw_show,  NULL);
+static DEVICE_ATTR(flush,		S_IWUSR, NULL,	l2fw_hex_store);
+static DEVICE_ATTR(stats,		S_IRUSR, l2fw_show, NULL);
+
+#ifdef CONFIG_MV_ETH_L2SEC
+static DEVICE_ATTR(cesa_chan,		S_IWUSR, NULL,  l2fw_store);
+#endif
+
+
+
+static struct attribute *l2fw_attrs[] = {
+	&dev_attr_l2fw.attr,
+#ifdef CONFIG_MV_INCLUDE_XOR
+	&dev_attr_l2fw_xor.attr,
+#endif
+	&dev_attr_lookup.attr,
+	&dev_attr_l2fw_add_ip.attr,
+	&dev_attr_help.attr,
+	&dev_attr_rules_dump.attr,
+	&dev_attr_ports_dump.attr,
+	&dev_attr_flush.attr,
+	&dev_attr_stats.attr,
+#ifdef CONFIG_MV_ETH_L2SEC
+	&dev_attr_cesa_chan.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group l2fw_group = {
+	.name = "l2fw",
+	.attrs = l2fw_attrs,
+};
+
+int mv_neta_l2fw_sysfs_init(struct kobject *neta_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(neta_kobj, &l2fw_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", l2fw_group.name, err);
+
+	return err;
+}
+
+int mv_neta_l2fw_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &l2fw_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.c b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.c
new file mode 100644
index 000000000000..cfd07e86df9c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.c
@@ -0,0 +1,928 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+#include "xor/mvXor.h"
+#include "xor/mvXorRegs.h"
+#include "mv_hal_if/mvSysXorApi.h"
+#endif /* CONFIG_MV_INCLUDE_XOR */
+
+#include "mvOs.h"
+#include "mv_eth_l2fw.h"
+#include "gbe/mvNeta.h"
+#include "gbe/mvNetaRegs.h"
+#include "mvDebug.h"
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvNetConfig.h"
+#else /* CONFIG_ARCH_MVEBU */
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif /* CONFIG_ARCH_MVEBU */
+
+#include "gbe/mvNeta.h"
+
+#ifdef CONFIG_MV_ETH_L2SEC
+#include "mv_eth_l2sec.h"
+#endif
+
+static int numHashEntries;
+
+struct eth_pbuf *mv_eth_pool_get(struct bm_pool *pool);
+
+static int mv_eth_ports_l2fw_num;
+
+static L2FW_RULE **l2fw_hash = NULL;
+
+static MV_U32 l2fw_jhash_iv;
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+static MV_XOR_DESC *eth_xor_desc;
+static MV_LONG      eth_xor_desc_phys_addr;
+#endif /* CONFIG_MV_INCLUDE_XOR */
+
+struct eth_port_l2fw **mv_eth_ports_l2fw;
+static inline int       mv_eth_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq);
+static inline MV_STATUS mv_eth_l2fw_tx(struct eth_pbuf *pkt, struct eth_port *pp,
+					   int withXor, struct neta_rx_desc *rx_desc);
+
+
+static L2FW_RULE *l2fw_lookup(MV_U32 srcIP, MV_U32 dstIP)
+{
+	MV_U32 hash;
+	L2FW_RULE *rule;
+
+	hash = mv_jhash_3words(srcIP, dstIP, (MV_U32) 0, l2fw_jhash_iv);
+	hash &= L2FW_HASH_MASK;
+	rule = l2fw_hash[hash];
+
+#ifdef CONFIG_MV_ETH_L2FW_DEBUG
+	if (rule)
+		printk(KERN_INFO "rule is not NULL in %s\n", __func__);
+	else
+		printk(KERN_INFO "rule is NULL in %s\n", __func__);
+#endif
+	while (rule) {
+		if ((rule->srcIP == srcIP) && (rule->dstIP == dstIP))
+			return rule;
+
+		rule = rule->next;
+	}
+
+	return NULL;
+}
+
+void l2fw_show_numHashEntries(void)
+{
+	mvOsPrintf("number of Hash Entries is %d \n", numHashEntries);
+
+}
+
+
+void l2fw_flush(void)
+{
+	MV_U32 i = 0;
+	mvOsPrintf("\nFlushing L2fw Rule Database: \n");
+	mvOsPrintf("*******************************\n");
+	for (i = 0; i < L2FW_HASH_SIZE; i++)
+		l2fw_hash[i] = NULL;
+	numHashEntries = 0;
+}
+
+
+void l2fw_rules_dump(void)
+{
+	MV_U32 i = 0;
+	L2FW_RULE *currRule;
+	MV_U8	  *srcIP, *dstIP;
+
+	mvOsPrintf("\nPrinting L2fw Rule Database: \n");
+	mvOsPrintf("*******************************\n");
+
+	for (i = 0; i < L2FW_HASH_SIZE; i++) {
+		currRule = l2fw_hash[i];
+		srcIP = (MV_U8 *)&(currRule->srcIP);
+		dstIP = (MV_U8 *)&(currRule->dstIP);
+
+		while (currRule != NULL) {
+			mvOsPrintf("%u.%u.%u.%u->%u.%u.%u.%u     out port=%d (hash=%x)\n",
+				MV_IPQUAD(srcIP), MV_IPQUAD(dstIP),
+				currRule->port, i);
+			currRule = currRule->next;
+		}
+	}
+
+}
+
+void l2fw_ports_dump(void)
+{
+	MV_U32 rx_port = 0;
+	struct eth_port_l2fw *ppl2fw;
+
+	mvOsPrintf("\nPrinting L2fw ports Database:\n");
+	mvOsPrintf("*******************************\n");
+
+	for (rx_port = 0; rx_port < mv_eth_ports_l2fw_num; rx_port++) {
+		ppl2fw = mv_eth_ports_l2fw[rx_port];
+		mvOsPrintf("rx_port=%d cmd = %d tx_port=%d lookup=%d xor_threshold = %d\n",
+				rx_port, ppl2fw->cmd, ppl2fw->txPort, ppl2fw->lookupEn, ppl2fw->xorThreshold);
+
+	}
+}
+
+
+MV_STATUS l2fw_add(MV_U32 srcIP, MV_U32 dstIP, int port)
+{
+	L2FW_RULE *l2fw_rule;
+	MV_U8	  *srcIPchr, *dstIPchr;
+
+	MV_U32 hash = mv_jhash_3words(srcIP, dstIP, (MV_U32) 0, l2fw_jhash_iv);
+	hash &= L2FW_HASH_MASK;
+	if (numHashEntries == L2FW_HASH_SIZE) {
+		printk(KERN_INFO "cannot add entry, hash table is full, there are %d entires \n", L2FW_HASH_SIZE);
+		return MV_ERROR;
+	}
+
+	srcIPchr = (MV_U8 *)&(srcIP);
+	dstIPchr = (MV_U8 *)&(dstIP);
+
+#ifdef CONFIG_MV_ETH_L2FW_DEBUG
+	mvOsPrintf("srcIP=%x dstIP=%x in %s\n", srcIP, dstIP, __func__);
+	mvOsPrintf("srcIp = %u.%u.%u.%u in %s\n", MV_IPQUAD(srcIPchr), __func__);
+	mvOsPrintf("dstIp = %u.%u.%u.%u in %s\n", MV_IPQUAD(dstIPchr), __func__);
+#endif
+
+	l2fw_rule = l2fw_lookup(srcIP, dstIP);
+	if (l2fw_rule) {
+		/* overwite port */
+		l2fw_rule->port = port;
+		return MV_OK;
+	}
+
+	l2fw_rule = (L2FW_RULE *)mvOsMalloc(sizeof(L2FW_RULE));
+	if (!l2fw_rule) {
+		mvOsPrintf("%s: OOM\n", __func__);
+		return MV_FAIL;
+	}
+#ifdef CONFIG_MV_ETH_L2FW_DEBUG
+	mvOsPrintf("adding a rule to l2fw hash in %s\n", __func__);
+#endif
+	l2fw_rule->srcIP = srcIP;
+	l2fw_rule->dstIP = dstIP;
+	l2fw_rule->port = port;
+
+	l2fw_rule->next = l2fw_hash[hash];
+	l2fw_hash[hash] = l2fw_rule;
+	numHashEntries++;
+    return MV_OK;
+}
+
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+static void dump_xor(void)
+{
+	mvOsPrintf(" CHANNEL_ARBITER_REG %08x\n",
+		MV_REG_READ(XOR_CHANNEL_ARBITER_REG(1)));
+	mvOsPrintf(" CONFIG_REG          %08x\n",
+		MV_REG_READ(XOR_CONFIG_REG(1, XOR_CHAN(0))));
+	mvOsPrintf(" ACTIVATION_REG      %08x\n",
+		MV_REG_READ(XOR_ACTIVATION_REG(1, XOR_CHAN(0))));
+	mvOsPrintf(" CAUSE_REG           %08x\n",
+		MV_REG_READ(XOR_CAUSE_REG(1)));
+	mvOsPrintf(" MASK_REG            %08x\n",
+		MV_REG_READ(XOR_MASK_REG(1)));
+	mvOsPrintf(" ERROR_CAUSE_REG     %08x\n",
+		MV_REG_READ(XOR_ERROR_CAUSE_REG(1)));
+	mvOsPrintf(" ERROR_ADDR_REG      %08x\n",
+		MV_REG_READ(XOR_ERROR_ADDR_REG(1)));
+	mvOsPrintf(" NEXT_DESC_PTR_REG   %08x\n",
+		MV_REG_READ(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0))));
+	mvOsPrintf(" CURR_DESC_PTR_REG   %08x\n",
+		MV_REG_READ(XOR_CURR_DESC_PTR_REG(1, XOR_CHAN(0))));
+	mvOsPrintf(" BYTE_COUNT_REG      %08x\n\n",
+		MV_REG_READ(XOR_BYTE_COUNT_REG(1, XOR_CHAN(0))));
+	mvOsPrintf("  %08x\n\n", XOR_WINDOW_CTRL_REG(1, XOR_CHAN(0))) ;
+		mvOsPrintf(" XOR_WINDOW_CTRL_REG      %08x\n\n",
+		MV_REG_READ(XOR_WINDOW_CTRL_REG(1, XOR_CHAN(0)))) ;
+}
+#endif
+
+
+static int mv_eth_poll_l2fw(struct napi_struct *napi, int budget)
+{
+	int rx_done = 0;
+	MV_U32 causeRxTx;
+	struct eth_port *pp = MV_ETH_PRIV(napi->dev);
+
+	STAT_INFO(pp->stats.poll[smp_processor_id()]++);
+
+	/* Read cause register */
+	causeRxTx = MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)) &
+	    (MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK |
+		 MV_ETH_RX_INTR_MASK);
+
+	if (causeRxTx & MV_ETH_MISC_SUM_INTR_MASK) {
+		MV_U32 causeMisc;
+
+		/* Process MISC events - Link, etc ??? */
+		causeRxTx &= ~MV_ETH_MISC_SUM_INTR_MASK;
+		causeMisc = MV_REG_READ(NETA_INTR_MISC_CAUSE_REG(pp->port));
+
+		if (causeMisc & NETA_CAUSE_LINK_CHANGE_MASK)
+			mv_eth_link_event(pp, 1);
+		MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(pp->port), 0);
+	}
+
+	causeRxTx |= pp->cpu_config[smp_processor_id()]->causeRxTx;
+#ifdef CONFIG_MV_NETA_TXDONE_ISR
+	if (causeRxTx & MV_ETH_TXDONE_INTR_MASK) {
+		/* TX_DONE process */
+
+		mv_eth_tx_done_gbe(pp,
+				(causeRxTx & MV_ETH_TXDONE_INTR_MASK));
+
+		causeRxTx &= ~MV_ETH_TXDONE_INTR_MASK;
+	}
+#endif /* CONFIG_MV_NETA_TXDONE_ISR */
+
+#if (CONFIG_MV_ETH_RXQ > 1)
+	while ((causeRxTx != 0) && (budget > 0)) {
+		int count, rx_queue;
+
+		rx_queue = mv_eth_rx_policy(causeRxTx);
+		if (rx_queue == -1)
+			break;
+
+		count = mv_eth_l2fw_rx(pp, budget, rx_queue);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0)
+			causeRxTx &=
+			 ~((1 << rx_queue) << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS);
+	}
+#else
+	rx_done = mv_eth_l2fw_rx(pp, budget, CONFIG_MV_ETH_RXQ_DEF);
+	budget -= rx_done;
+#endif /* (CONFIG_MV_ETH_RXQ > 1) */
+
+
+	if (budget > 0) {
+		unsigned long flags;
+		causeRxTx = 0;
+
+		napi_complete(napi);
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+
+		local_irq_save(flags);
+		MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port),
+			(MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK |
+				  MV_ETH_RX_INTR_MASK));
+
+		local_irq_restore(flags);
+	}
+	pp->cpu_config[smp_processor_id()]->causeRxTx = causeRxTx;
+
+	return rx_done;
+}
+
+
+void mv_eth_set_l2fw(struct eth_port_l2fw *ppl2fw, int cmd, int rx_port, int tx_port)
+{
+	struct eth_port *pp;
+	struct net_device *dev;
+	int group;
+
+#ifndef CONFIG_MV_ETH_L2SEC
+	if (cmd == CMD_L2FW_CESA) {
+		mvOsPrintf("Invalid command (%d) - Ipsec is not defined (%s)\n", cmd, __func__);
+		return;
+	}
+#endif
+	pp = mv_eth_ports[rx_port];
+	if (!pp) {
+		mvOsPrintf("pp is NULL in setting L2FW (%s)\n", __func__);
+		return;
+	}
+
+	dev = pp->dev;
+	if (dev == NULL) {
+		mvOsPrintf("device is NULL in setting L2FW (%s)\n", __func__);
+		return;
+	}
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		mvOsPrintf("Device is down for port=%d ; MV_ETH_F_STARTED_BIT is not set in %s\n", rx_port, __func__);
+		mvOsPrintf("Cannot set to L2FW mode in %s\n", __func__);
+		return;
+	}
+
+	if (cmd == ppl2fw->cmd) {
+		ppl2fw->txPort = tx_port;
+		return;
+	}
+
+	if ((cmd != CMD_L2FW_DISABLE) && (ppl2fw->cmd != CMD_L2FW_DISABLE) && (ppl2fw->cmd != CMD_L2FW_LAST)) {
+		ppl2fw->txPort = tx_port;
+		ppl2fw->cmd	= cmd;
+		return;
+	}
+
+	/*TODO disconnect from linux in case that command != 0, connact back if cmd == 0
+	 use netif_carrier_on/netif_carrier_off
+	 netif_tx_stop_all_queues/netif_tx_wake_all_queues
+	*/
+
+	ppl2fw->txPort = tx_port;
+	ppl2fw->cmd	= cmd;
+
+	for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++) {
+		if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags)))
+			napi_disable(pp->napiGroup[group]);
+
+		netif_napi_del(pp->napiGroup[group]);
+
+		if (cmd == CMD_L2FW_DISABLE)
+			netif_napi_add(dev, pp->napiGroup[group], mv_eth_poll, pp->weight);
+		else
+			netif_napi_add(dev, pp->napiGroup[group], mv_eth_poll_l2fw, pp->weight);
+
+		if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags)))
+			napi_enable(pp->napiGroup[group]);
+	}
+
+}
+
+static inline struct eth_pbuf *l2fw_swap_mac(struct eth_pbuf *pRxPktInfo)
+{
+	MV_U16 *pSrc;
+	int i;
+	MV_U16 swap;
+	pSrc = (MV_U16 *)(pRxPktInfo->pBuf + pRxPktInfo->offset + MV_ETH_MH_SIZE);
+
+	for (i = 0; i < 3; i++) {
+		swap = pSrc[i];
+		pSrc[i] = pSrc[i+3];
+		pSrc[i+3] = swap;
+		}
+
+	return  pRxPktInfo;
+}
+
+static inline void l2fw_copy_mac(struct eth_pbuf *pRxPktInfo,
+					 struct eth_pbuf *pTxPktInfo)
+	{
+	/* copy 30 bytes (start after MH header) */
+	/* 12 for SA + DA */
+	/* 18 for the rest */
+	MV_U16 *pSrc;
+	MV_U16 *pDst;
+	int i;
+	pSrc = (MV_U16 *)(pRxPktInfo->pBuf + pRxPktInfo->offset + MV_ETH_MH_SIZE);
+	pDst = (MV_U16 *)(pTxPktInfo->pBuf + pTxPktInfo->offset + MV_ETH_MH_SIZE);
+
+	/* swap mac SA and DA */
+	for (i = 0; i < 3; i++) {
+		pDst[i]   = pSrc[i+3];
+		pDst[i+3] = pSrc[i];
+		}
+	for (i = 6; i < 15; i++)
+		pDst[i] = pSrc[i];
+	}
+
+static inline void l2fw_copy_and_swap_mac(struct eth_pbuf *pRxPktInfo, struct eth_pbuf *pTxPktInfo)
+{
+	MV_U16 *pSrc;
+	MV_U16 *pDst;
+	int i;
+
+	pSrc = (MV_U16 *)(pRxPktInfo->pBuf +  pRxPktInfo->offset + MV_ETH_MH_SIZE);
+	pDst = (MV_U16 *)(pTxPktInfo->pBuf +  pTxPktInfo->offset + MV_ETH_MH_SIZE);
+	for (i = 0; i < 3; i++) {
+		pDst[i]   = pSrc[i+3];
+		pDst[i+3] = pSrc[i];
+		}
+}
+
+static inline
+struct eth_pbuf *eth_l2fw_copy_packet_withoutXor(struct eth_pbuf *pRxPktInfo)
+{
+	MV_U8 *pSrc;
+	MV_U8 *pDst;
+	struct bm_pool *pool;
+	struct eth_pbuf *pTxPktInfo;
+
+	mvOsCacheInvalidate(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset,
+						pRxPktInfo->bytes);
+
+	pool = &mv_eth_pool[pRxPktInfo->pool];
+	pTxPktInfo = mv_eth_pool_get(pool);
+	if (pTxPktInfo == NULL) {
+		mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__);
+		return NULL;
+		}
+	pSrc = pRxPktInfo->pBuf +  pRxPktInfo->offset + MV_ETH_MH_SIZE;
+	pDst = pTxPktInfo->pBuf +  pTxPktInfo->offset + MV_ETH_MH_SIZE;
+
+	memcpy(pDst+12, pSrc+12, pRxPktInfo->bytes-12);
+	l2fw_copy_and_swap_mac(pRxPktInfo, pTxPktInfo);
+	pTxPktInfo->bytes = pRxPktInfo->bytes;
+	mvOsCacheFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset, pTxPktInfo->bytes);
+
+	return pTxPktInfo;
+}
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+static inline
+struct eth_pbuf *eth_l2fw_copy_packet_withXor(struct eth_pbuf *pRxPktInfo)
+{
+	struct bm_pool *pool;
+	struct eth_pbuf *pTxPktInfo;
+
+	pool = &mv_eth_pool[pRxPktInfo->pool];
+	pTxPktInfo = mv_eth_pool_get(pool);
+	if (pTxPktInfo == NULL) {
+		mvOsPrintf("pTxPktInfo == NULL in %s\n", __func__);
+		return NULL;
+		}
+
+	/* sync between giga and XOR to avoid errors (like checksum errors in TX)
+	   when working with IOCC */
+
+	mvOsCacheIoSync(NULL);
+
+	eth_xor_desc->srcAdd0    = pRxPktInfo->physAddr + pRxPktInfo->offset + MV_ETH_MH_SIZE + 30;
+	eth_xor_desc->phyDestAdd = pTxPktInfo->physAddr + pTxPktInfo->offset + MV_ETH_MH_SIZE + 30;
+
+	eth_xor_desc->byteCnt    = pRxPktInfo->bytes - 30;
+
+	eth_xor_desc->phyNextDescPtr = 0;
+	eth_xor_desc->status         = BIT31;
+	/* we had changed only the first part of eth_xor_desc, so flush only one
+	 line of cache */
+	mvOsCacheLineFlush(NULL, eth_xor_desc);
+	MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr);
+
+	MV_REG_WRITE(XOR_ACTIVATION_REG(1, XOR_CHAN(0)), XEXACTR_XESTART_MASK);
+
+	mvOsCacheLineInv(NULL, pRxPktInfo->pBuf + pRxPktInfo->offset);
+	l2fw_copy_mac(pRxPktInfo, pTxPktInfo);
+	mvOsCacheLineFlush(NULL, pTxPktInfo->pBuf + pTxPktInfo->offset);
+
+	/* Update TxPktInfo */
+	pTxPktInfo->bytes = pRxPktInfo->bytes;
+	return pTxPktInfo;
+}
+
+void setXorDesc(void)
+{
+	unsigned int mode;
+	eth_xor_desc = mvOsMalloc(sizeof(MV_XOR_DESC) + XEXDPR_DST_PTR_DMA_MASK + 32);
+	eth_xor_desc = (MV_XOR_DESC *)MV_ALIGN_UP((MV_U32)eth_xor_desc, XEXDPR_DST_PTR_DMA_MASK+1);
+	eth_xor_desc_phys_addr = mvOsIoVirtToPhys(NULL, eth_xor_desc);
+	mvSysXorInit();
+
+	mode = MV_REG_READ(XOR_CONFIG_REG(1, XOR_CHAN(0)));
+	mode &= ~XEXCR_OPERATION_MODE_MASK;
+	mode |= XEXCR_OPERATION_MODE_DMA;
+	MV_REG_WRITE(XOR_CONFIG_REG(1, XOR_CHAN(0)), mode);
+
+    MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr);
+	dump_xor();
+	/* TODO mask xor intterupts*/
+}
+
+static inline int xorReady(void)
+{
+	int timeout = 0;
+
+	while (!(MV_REG_READ(XOR_CAUSE_REG(1)) & XOR_CAUSE_DONE_MASK(XOR_CHAN(0)))) {
+		if (timeout > 0x100000) {
+			mvOsPrintf("XOR timeout\n");
+			return 0;
+			}
+		timeout++;
+	}
+
+	/* Clear int */
+	MV_REG_WRITE(XOR_CAUSE_REG(1), ~(XOR_CAUSE_DONE_MASK(XOR_CHAN(0))));
+
+	return 1;
+}
+#endif /* CONFIG_MV_INCLUDE_XOR */
+
+
+void l2fw(int cmd, int rx_port, int tx_port)
+{
+	struct eth_port_l2fw *ppl2fw;
+	int max_port = CONFIG_MV_ETH_PORTS_NUM - 1;
+
+	ppl2fw = mv_eth_ports_l2fw[rx_port];
+
+	if ((cmd < CMD_L2FW_DISABLE) || (cmd > CMD_L2FW_LAST)) {
+		mvOsPrintf("Error: invalid command %d\n", cmd);
+		return;
+	}
+
+	if ((rx_port > max_port) || (rx_port < 0)) {
+		mvOsPrintf("Error: invalid rx port %d\n", rx_port);
+		return;
+	}
+
+	if ((tx_port > max_port) || (tx_port < 0)) {
+		mvOsPrintf("Error: invalid tx port %d\n", tx_port);
+		return;
+	}
+
+	pr_info("cmd=%d rx_port=%d tx_port=%d in %s\n", cmd, rx_port, tx_port, __func__);
+
+	mv_eth_set_l2fw(ppl2fw, cmd, rx_port, tx_port);
+}
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+void l2fw_xor(int rx_port, int threshold)
+{
+	int max_port = CONFIG_MV_ETH_PORTS_NUM - 1;
+
+	if (rx_port > max_port) {
+		mvOsPrintf("Error: invalid rx port %d\n", rx_port);
+		return;
+	}
+
+	mvOsPrintf("setting port %d threshold to %d in %s\n", rx_port, threshold, __func__);
+	mv_eth_ports_l2fw[rx_port]->xorThreshold = threshold;
+}
+#endif /* CONFIG_MV_INCLUDE_XOR */
+
+void l2fw_lookupEn(int rx_port, int enable)
+{
+	int max_port = CONFIG_MV_ETH_PORTS_NUM - 1;
+
+	if (rx_port > max_port) {
+		mvOsPrintf("Error: invalid rx port %d\n", rx_port);
+		return;
+	}
+	mvOsPrintf("setting port %d lookup mode to %s in %s\n", rx_port, (enable == 1) ? "enable" : "disable", __func__);
+	mv_eth_ports_l2fw[rx_port]->lookupEn = enable;
+}
+
+void l2fw_stats(void)
+{
+	int i;
+	for (i = 0; i < CONFIG_MV_ETH_PORTS_NUM; i++) {
+		mvOsPrintf("number of errors in port[%d]=%d\n", i, mv_eth_ports_l2fw[i]->statErr);
+		mvOsPrintf("number of drops  in port[%d]=%d\n", i, mv_eth_ports_l2fw[i]->statDrop);
+	}
+#ifdef CONFIG_MV_ETH_L2SEC
+	mv_l2sec_stats();
+#endif
+}
+
+static inline MV_STATUS mv_eth_l2fw_tx(struct eth_pbuf *pkt, struct eth_port *pp, int withXor,
+									   struct neta_rx_desc *rx_desc)
+{
+	struct neta_tx_desc *tx_desc;
+	u32 tx_cmd = 0;
+	struct tx_queue *txq_ctrl;
+	unsigned long flags = 0;
+
+	/* assigning different txq for each rx port , to avoid waiting on the
+	same txq lock when traffic on several rx ports are destined to the same
+	outgoing interface */
+	int txq = pp->cpu_config[smp_processor_id()]->txq;
+
+	txq_ctrl = &pp->txq_ctrl[pp->txp * CONFIG_MV_ETH_TXQ + txq];
+
+	mv_eth_lock(txq_ctrl, flags);
+
+	if (txq_ctrl->txq_count >= mv_ctrl_txdone)
+		mv_eth_txq_done(pp, txq_ctrl);
+	/* Get next descriptor for tx, single buffer, so FIRST & LAST */
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+	if (tx_desc == NULL) {
+
+		mv_eth_unlock(txq_ctrl, flags);
+
+		/*read_unlock(&pp->rwlock);*/
+		/* No resources: Drop */
+		pp->dev->stats.tx_dropped++;
+#ifdef CONFIG_MV_INCLUDE_XOR
+		if (withXor)
+			xorReady();
+#endif /* CONFIG_MV_INCLUDE_XOR */
+		return MV_DROPPED;
+	}
+	txq_ctrl->txq_count++;
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		tx_cmd |= NETA_TX_BM_ENABLE_MASK | NETA_TX_BM_POOL_ID_MASK(pkt->pool);
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) NULL;
+	} else {
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+	}
+#else
+	txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	mv_eth_shadow_inc_put(txq_ctrl);
+
+	tx_desc->command = tx_cmd | NETA_TX_L4_CSUM_NOT |
+		NETA_TX_FLZ_DESC_MASK | NETA_TX_F_DESC_MASK
+		| NETA_TX_L_DESC_MASK |
+		NETA_TX_PKT_OFFSET_MASK(pkt->offset + MV_ETH_MH_SIZE);
+
+	tx_desc->dataSize    = pkt->bytes;
+	tx_desc->bufPhysAddr = pkt->physAddr;
+
+	mv_eth_tx_desc_flush(pp, tx_desc);
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+	if (withXor) {
+		if (!xorReady()) {
+			mvOsPrintf("MV_DROPPED in %s\n", __func__);
+
+			mv_eth_unlock(txq_ctrl, flags);
+
+			/*read_unlock(&pp->rwlock);*/
+			return MV_DROPPED;
+		}
+	}
+#endif /* CONFIG_MV_INCLUDE_XOR */
+	mv_neta_wmb();
+	mvNetaTxqPendDescAdd(pp->port, pp->txp, txq, 1);
+
+	mv_eth_unlock(txq_ctrl, flags);
+
+	return MV_OK;
+}
+
+
+static inline int mv_eth_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq)
+{
+	struct eth_port  *new_pp;
+	L2FW_RULE *l2fw_rule;
+	MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+	int rx_done, rx_filled;
+	struct neta_rx_desc *rx_desc;
+	u32 rx_status = MV_OK;
+	struct eth_pbuf *pkt;
+	struct eth_pbuf *newpkt = NULL;
+	struct bm_pool *pool;
+	MV_STATUS status = MV_OK;
+	struct eth_port_l2fw *ppl2fw = mv_eth_ports_l2fw[pp->port];
+	MV_IP_HEADER *pIph = NULL;
+	MV_U8 *pData;
+	int	ipOffset;
+
+	rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(NULL);
+	if (rx_todo > rx_done)
+		rx_todo = rx_done;
+	rx_done = 0;
+	rx_filled = 0;
+
+	/* Fairness NAPI loop */
+	while (rx_done < rx_todo) {
+#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
+		rx_desc = mv_eth_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo);
+		if (!rx_desc)
+			printk(KERN_INFO "rx_desc is NULL in %s\n", __func__);
+#else
+		rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+		mvOsCacheLineInv(NULL, rx_desc);
+		prefetch(rx_desc);
+#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
+
+		rx_done++;
+		rx_filled++;
+
+		pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+		if (!pkt) {
+			printk(KERN_INFO "pkt is NULL in ; rx_done=%d %s\n", rx_done, __func__);
+			return rx_done;
+		}
+
+		pool = &mv_eth_pool[pkt->pool];
+		rx_status = rx_desc->status;
+		if (((rx_status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) ||
+			(rx_status & NETA_RX_ES_MASK)) {
+			STAT_ERR(pp->stats.rx_error++);
+
+			if (pp->dev)
+				pp->dev->stats.rx_errors++;
+
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+			continue;
+		}
+
+		pkt->bytes = rx_desc->dataSize - (MV_ETH_CRC_SIZE + MV_ETH_MH_SIZE);
+
+		pData = pkt->pBuf + pkt->offset;
+
+#ifdef CONFIG_MV_ETH_PNC
+		if (MV_NETA_PNC_CAP()) {
+			ipOffset = NETA_RX_GET_IPHDR_OFFSET(rx_desc);
+		} else {
+			if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK))
+				ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN;
+			else
+				ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER);
+		}
+#else
+		if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK))
+			ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN;
+		else
+			ipOffset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER);
+#endif
+
+		pIph = (MV_IP_HEADER *)(pData + ipOffset);
+		if (pIph == NULL) {
+			printk(KERN_INFO "pIph==NULL in %s\n", __func__);
+			continue;
+		}
+#ifdef CONFIG_MV_ETH_L2FW_DEBUG
+		if (pIph) {
+			MV_U8 *srcIP, *dstIP;
+			srcIP = (MV_U8 *)&(pIph->srcIP);
+			dstIP = (MV_U8 *)&(pIph->dstIP);
+			printk(KERN_INFO "%u.%u.%u.%u->%u.%u.%u.%u in %s\n", MV_IPQUAD(srcIP), MV_IPQUAD(dstIP), __func__);
+			printk(KERN_INFO "0x%x->0x%x in %s\n", pIph->srcIP, pIph->dstIP, __func__);
+		} else
+			printk(KERN_INFO "pIph is NULL in %s\n", __func__);
+#endif
+
+		if (ppl2fw->lookupEn) {
+			l2fw_rule = l2fw_lookup(pIph->srcIP, pIph->dstIP);
+
+			if (!l2fw_rule) {
+
+#ifdef CONFIG_MV_ETH_L2FW_DEBUG
+				printk(KERN_INFO "l2fw_lookup() failed in %s\n", __func__);
+#endif
+
+				new_pp  = mv_eth_ports[ppl2fw->txPort];
+			} else
+				new_pp  = mv_eth_ports[l2fw_rule->port];
+		} else
+			new_pp  = mv_eth_ports[ppl2fw->txPort];
+
+		switch (ppl2fw->cmd) {
+		case CMD_L2FW_AS_IS:
+			status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc);
+			break;
+
+		case CMD_L2FW_SWAP_MAC:
+			mvOsCacheLineInv(NULL, pkt->pBuf + pkt->offset);
+			l2fw_swap_mac(pkt);
+			mvOsCacheLineFlush(NULL, pkt->pBuf+pkt->offset);
+			status = mv_eth_l2fw_tx(pkt, new_pp, 0, rx_desc);
+			break;
+
+		case CMD_L2FW_COPY_SWAP:
+#ifdef CONFIG_MV_INCLUDE_XOR
+			if (pkt->bytes >= ppl2fw->xorThreshold) {
+				newpkt = eth_l2fw_copy_packet_withXor(pkt);
+				if (newpkt)
+					status = mv_eth_l2fw_tx(newpkt, new_pp, 1, rx_desc);
+				else
+					status = MV_ERROR;
+			} else
+#endif /* CONFIG_MV_INCLUDE_XOR */
+			{
+					newpkt = eth_l2fw_copy_packet_withoutXor(pkt);
+					if (newpkt)
+						status = mv_eth_l2fw_tx(newpkt, new_pp, 0, rx_desc);
+					else
+						status = MV_ERROR;
+			}
+			break;
+#ifdef CONFIG_MV_ETH_L2SEC
+		case CMD_L2FW_CESA:
+			status = mv_l2sec_handle_esp(pkt, rx_desc, new_pp, pp->port);
+			break;
+#endif
+		default:
+			pr_err("WARNING:in %s invalid mode %d for rx port %d\n",
+				__func__, ppl2fw->cmd, pp->port);
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+		} /*of switch*/
+
+		if (status == MV_OK) {
+			if (mv_eth_pool_bm(pool)) {
+				/* BM - no refill */
+				mvOsCacheLineInv(NULL, rx_desc);
+			} else {
+				if (mv_eth_refill(pp, rxq, NULL, pool, rx_desc)) {
+					printk(KERN_ERR "%s: Linux processing - Can't refill\n", __func__);
+					pp->rxq_ctrl[rxq].missed++;
+				}
+			}
+			/* we do not need the pkt , we do not do anything with it*/
+			if  (ppl2fw->cmd == CMD_L2FW_COPY_SWAP)
+				mv_eth_pool_put(pool, pkt);
+
+			continue;
+
+		} else if (status == MV_DROPPED) {
+			ppl2fw->statDrop++;
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+			if (ppl2fw->cmd == CMD_L2FW_COPY_SWAP)
+				mv_eth_pool_put(pool, newpkt);
+
+			continue;
+
+		} else if (status == MV_ERROR) {
+			ppl2fw->statErr++;
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+		}
+
+
+	} /* of while */
+
+	/* Update RxQ management counters */
+	mv_neta_wmb();
+	mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled);
+
+	return rx_done;
+}
+
+int mv_l2fw_init(void)
+{
+	int size, port;
+	MV_U32 bytes;
+	MV_U32 regVal;
+	mv_eth_ports_l2fw_num = MV_ETH_MAX_PORTS; /* mvCtrlEthMaxPortGet();*/
+	mvOsPrintf("in %s: mv_eth_ports_l2fw_num=%d\n", __func__, mv_eth_ports_l2fw_num);
+
+	size = mv_eth_ports_l2fw_num * sizeof(struct eth_port_l2fw *);
+	mv_eth_ports_l2fw = mvOsMalloc(size);
+	if (!mv_eth_ports_l2fw)
+		goto oom;
+	memset(mv_eth_ports_l2fw, 0, size);
+	for (port = 0; port < mv_eth_ports_l2fw_num; port++) {
+		mv_eth_ports_l2fw[port] =
+			mvOsMalloc(sizeof(struct eth_port_l2fw));
+		if (!mv_eth_ports_l2fw[port])
+			goto oom1;
+		mv_eth_ports_l2fw[port]->cmd    = CMD_L2FW_LAST/*CMD_L2FW_DISABLE*/;
+		mv_eth_ports_l2fw[port]->txPort = -1;
+		mv_eth_ports_l2fw[port]->lookupEn = 0;
+		mv_eth_ports_l2fw[port]->xorThreshold = XOR_THRESHOLD_DEF;
+		mv_eth_ports_l2fw[port]->statErr = 0;
+		mv_eth_ports_l2fw[port]->statDrop = 0;
+	}
+
+	bytes = sizeof(L2FW_RULE *) * L2FW_HASH_SIZE;
+	l2fw_jhash_iv = mvOsRand();
+
+	l2fw_hash = (L2FW_RULE **)mvOsMalloc(bytes);
+	if (l2fw_hash == NULL) {
+		mvOsPrintf("l2fw hash: not enough memory\n");
+		return MV_NO_RESOURCE;
+	}
+
+	mvOsMemset(l2fw_hash, 0, bytes);
+
+	mvOsPrintf("L2FW hash init %d entries, %d bytes\n", L2FW_HASH_SIZE, bytes);
+	regVal = 0;
+#ifdef CONFIG_MV_ETH_L2SEC
+	mv_l2sec_cesa_init();
+#endif
+
+#ifdef CONFIG_MV_INCLUDE_XOR
+	setXorDesc();
+#endif
+	return 0;
+oom:
+	mvOsPrintf("%s: out of memory in L2FW initialization\n", __func__);
+oom1:
+	mvOsFree(mv_eth_ports_l2fw);
+	return -ENOMEM;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.h b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.h
new file mode 100644
index 000000000000..232314d19497
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2fw.h
@@ -0,0 +1,76 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#ifndef L2FW_MV_ETH_L2FW_H
+#define L2FW_MV_ETH_L2FW_H
+
+#include "mvOs.h"
+#include "net_dev/mv_netdev.h"
+
+#define	L2FW_HASH_SIZE   (1 << 17)
+#define	L2FW_HASH_MASK   (L2FW_HASH_SIZE - 1)
+
+/* L2fw defines */
+#define CMD_L2FW_DISABLE			0
+#define CMD_L2FW_AS_IS				1
+#define CMD_L2FW_SWAP_MAC			2
+#define CMD_L2FW_COPY_SWAP			3
+#define CMD_L2FW_CESA				4
+#define CMD_L2FW_LAST				5
+
+#define XOR_CAUSE_DONE_MASK(chan) ((BIT0|BIT1) << (chan * 16))
+#define XOR_THRESHOLD_DEF			2000;
+
+struct eth_port_l2fw {
+	int cmd;
+	int lookupEn;
+	int xorThreshold;
+	int txPort;
+	/* stats */
+	int statErr;
+	int statDrop;
+};
+
+typedef struct l2fw_rule {
+	MV_U32 srcIP;
+	MV_U32 dstIP;
+	MV_U8 port;
+	struct l2fw_rule *next;
+} L2FW_RULE;
+
+MV_STATUS l2fw_add(MV_U32 srcIP, MV_U32 dstIP, int port);
+
+void l2fw(int cmd, int rx_port, int tx_port);
+void l2fw_xor(int rx_port, int threshold);
+void l2fw_lookupEn(int rx_port, int enable);
+void l2fw_flush(void);
+void l2fw_rules_dump(void);
+void l2fw_ports_dump(void);
+void l2fw_stats(void);
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.c b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.c
new file mode 100644
index 000000000000..d5247e72ae17
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.c
@@ -0,0 +1,689 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvOs.h"
+#include  <linux/interrupt.h>
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "mv_neta/net_dev/mv_netdev.h"
+#include "mv_eth_l2sec.h"
+#include "mv_eth_l2fw.h"
+#include "mvDebug.h"
+#include "gbe/mvNetaRegs.h"
+#include <linux/spinlock.h>
+
+static struct tasklet_struct l2sec_tasklet;
+static MV_L2FW_SEC_CESA_PRIV *req_array[MV_L2FW_SEC_REQ_Q_SIZE];
+unsigned int req_empty;
+unsigned int req_ready;
+atomic_t req_count;
+spinlock_t cesa_lock[CESA_CHAN];
+
+static int cesaChanPort[CONFIG_MV_ETH_PORTS_NUM];
+
+static MV_BUF_INFO *pBufInfoArray[CESA_CHAN];
+
+static int cesaPrivIndx[CESA_CHAN];
+static int cesaCmdIndx[CESA_CHAN];
+int cesaFullResBuf[CESA_CHAN];
+
+MV_L2FW_SEC_CESA_PRIV *cesaPrivArray[CESA_CHAN];
+MV_CESA_COMMAND *cesaCmdArray[CESA_CHAN];
+static MV_CESA_MBUF *cesaMbufArray[CESA_CHAN];
+void *cesaOSHandle;
+
+
+static MV_L2FW_SEC_SA_ENTRY sa;
+
+
+#define MALLOC_AND_CLEAR(_ptr_, _size_) {\
+	(_ptr_) = mvOsMalloc(_size_);\
+	if ((_ptr_) == NULL) {\
+		mvOsPrintf("Can't allocate %d bytes of memory\n", (_size_));\
+		return;\
+	 } \
+	memset((_ptr_), 0, (_size_));\
+}
+
+
+void mv_l2sec_stats()
+{
+	int chan;
+	for (chan = 0; chan < CESA_CHAN; chan++)
+		printk(KERN_INFO "number of l2sec channel %d full result buffer events = %d\n", chan, cesaFullResBuf[chan]);
+}
+
+void printEspHdr(MV_ESP_HEADER *pEspHdr)
+{
+	printk(KERN_INFO "pEspHdr->spi=%d in %s\n"  , pEspHdr->spi, __func__);
+	printk(KERN_INFO "pEspHdr->seqNum=%d in %s\n", pEspHdr->seqNum, __func__);
+}
+
+void printIpHdr(MV_IP_HEADER *pIpHdr)
+{
+	MV_U8	  *srcIP, *dstIP;
+
+	srcIP = (MV_U8 *)&(pIpHdr->srcIP);
+	dstIP = (MV_U8 *)&(pIpHdr->dstIP);
+
+	pr_info("%u.%u.%u.%u->%u.%u.%u.%u in %s\n", MV_IPQUAD(srcIP), MV_IPQUAD(dstIP), __func__);
+	pr_info("MV_16BIT_BE(pIpHdr->totalLength)=%d  in %s\n", MV_16BIT_BE(pIpHdr->totalLength), __func__);
+	pr_info("pIpHdr->protocol=%d\n", pIpHdr->protocol);
+}
+
+static inline MV_STATUS mv_eth_l2sec_tx(struct eth_pbuf *pkt, struct eth_port *pp)
+{
+	struct neta_tx_desc *tx_desc;
+	struct tx_queue *txq_ctrl;
+	int l3_status;
+
+	/* assigning different txq for each rx port , to avoid waiting on the
+	same txq lock when traffic on several rx ports are destined to the same
+	outgoing interface */
+	int txq = 0;
+	txq_ctrl = &pp->txq_ctrl[pp->txp * CONFIG_MV_ETH_TXQ + txq];
+
+	if (txq_ctrl->txq_count >= mv_ctrl_txdone)
+		mv_eth_txq_done(pp, txq_ctrl);
+
+	/* Get next descriptor for tx, single buffer, so FIRST & LAST */
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+
+	if (tx_desc == NULL) {
+		/* No resources: Drop */
+		pp->dev->stats.tx_dropped++;
+		return MV_DROPPED;
+	}
+	txq_ctrl->txq_count++;
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		tx_cmd |= NETA_TX_BM_ENABLE_MASK | NETA_TX_BM_POOL_ID_MASK(pkt->pool);
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) NULL;
+	} else {
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+	}
+#else
+	txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	mv_eth_shadow_inc_put(txq_ctrl);
+	l3_status = (0xE << NETA_TX_L3_OFFSET_OFFS) | NETA_TX_IP_CSUM_MASK | (0x5 << NETA_TX_IP_HLEN_OFFS);
+
+	tx_desc->command = l3_status | NETA_TX_L4_CSUM_NOT | NETA_TX_FLZ_DESC_MASK | NETA_TX_F_DESC_MASK
+				| NETA_TX_L_DESC_MASK | NETA_TX_PKT_OFFSET_MASK(pkt->offset + MV_ETH_MH_SIZE);
+
+	tx_desc->dataSize    = pkt->bytes;
+	tx_desc->bufPhysAddr = pkt->physAddr;
+	mv_eth_tx_desc_flush(pp, tx_desc);
+	mvNetaTxqPendDescAdd(pp->port, pp->txp, 0, 1);
+
+	return MV_OK;
+}
+
+static inline void mv_l2sec_complete_out(unsigned long data)
+
+{
+	MV_L2FW_SEC_CESA_PRIV *sec_cesa_priv = (MV_L2FW_SEC_CESA_PRIV *)data;
+	MV_U32            ifout;
+	MV_BUF_INFO       *pBuf;
+	struct eth_port   *pp;
+	struct eth_pbuf   *pPkt;
+	int oldOfsset;
+	MV_STATUS status = MV_FAIL;
+	static int counterOfFailed = 0;
+
+	if (!sec_cesa_priv) {
+		printk(KERN_INFO "sec_cesa_priv is NULL in %s\n", __func__);
+		return;
+	}
+	ifout = sec_cesa_priv->ifout;
+
+	pBuf = sec_cesa_priv->pBufInfo;
+	if (!pBuf) {
+		printk(KERN_INFO "pBuf is NULL in %s\n", __func__);
+		return;
+	}
+	pPkt = sec_cesa_priv->pPkt;
+	if (!pPkt) {
+		printk(KERN_INFO "!pPkt) in %s\n", __func__);
+		return;
+	}
+	pPkt->bytes    = pBuf->dataSize;
+	pPkt->bytes   += MV_L2FW_SEC_ESP_OFFSET;
+	oldOfsset      = pPkt->offset;
+	pPkt->offset   = pPkt->offset - (sizeof(MV_ESP_HEADER) + sizeof(MV_IP_HEADER) + MV_CESA_AES_BLOCK_SIZE);
+
+	pp     = mv_eth_ports[ifout];
+
+	status = mv_eth_l2sec_tx(pPkt, pp);
+
+	pPkt->offset = oldOfsset;
+
+	if (status == MV_DROPPED) {
+		struct bm_pool *pool = &mv_eth_pool[pPkt->pool];
+		counterOfFailed++;
+		mv_eth_pool_put(pool, pPkt);
+	 }
+}
+
+int mv_l2sec_set_cesa_chan(int port, int cesaChan)
+{
+	if (cesaChan > (MV_CESA_CHANNELS - 1)) {
+		pr_info("non permitted value for CESA channel\n");
+		return -EINVAL;
+	}
+
+	pr_info("setting cesaChan to %d for port=%d\n", cesaChan, port);
+
+	cesaChanPort[port] = cesaChan;
+
+	return 0;
+}
+
+MV_STATUS my_mvSysCesaInit(int numOfSession, int queueDepth, void *osHandle)
+{
+
+	MV_CESA_HAL_DATA halData;
+	MV_UNIT_WIN_INFO addrWinMap[MAX_TARGETS + 1];
+	MV_STATUS status;
+	MV_U8 chan;
+
+	status = mvCtrlAddrWinMapBuild(addrWinMap, MAX_TARGETS + 1);
+
+	if (status == MV_OK) {
+		for (chan = 0; chan < MV_CESA_CHANNELS; chan++) {
+			status = mvCesaTdmaWinInit(chan, addrWinMap);
+			if (status != MV_OK) {
+				mvOsPrintf("Error, unable to initialize CESA windows for channel(%d)\n", chan);
+				break;
+			}
+			halData.sramPhysBase[chan] = (MV_ULONG)mv_crypto_virt_base_get(chan);
+			halData.sramVirtBase[chan] = (MV_U8 *)mv_crypto_virt_base_get(chan);
+			halData.sramOffset[chan] = 0;
+		}
+
+		if (status == MV_OK) {
+		halData.ctrlModel = mvCtrlModelGet();
+		halData.ctrlRev = mvCtrlRevGet();
+			status = mvCesaHalInit(numOfSession, queueDepth,
+					osHandle, &halData);
+		}
+	}
+
+	return status;
+
+}
+
+void mv_l2sec_cesa_start(void)
+{
+
+	MV_CESA_MBUF *pMbufSrc[CESA_CHAN], *pMbufDst[CESA_CHAN];
+	MV_BUF_INFO *pCesaBufs[CESA_CHAN], *pFragsSrc[CESA_CHAN], *pFragsDst[CESA_CHAN];
+	MV_CESA_COMMAND *cesaCmdArrTmp;
+	MV_BUF_INFO *pCesaBufsTmp;
+	int chan;
+	int i, j, idx;
+	char *pBuf;
+
+	for (chan = 0; chan < CESA_CHAN; chan++) {
+		MALLOC_AND_CLEAR(cesaCmdArray[chan], sizeof(MV_CESA_COMMAND) * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pMbufSrc[chan], sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pFragsSrc[chan], sizeof(MV_BUF_INFO) * L2SEC_CESA_BUF_NUM * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pMbufDst[chan], sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pFragsDst[chan], sizeof(MV_BUF_INFO) * L2SEC_CESA_BUF_NUM * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pCesaBufs[chan], sizeof(MV_BUF_INFO) * L2SEC_CESA_BUF_NUM * CESA_DEF_REQ_SIZE);
+
+		idx = 0;
+		pCesaBufsTmp = pCesaBufs[chan];
+		cesaCmdArrTmp = cesaCmdArray[chan];
+
+		for (i = 0; i < CESA_DEF_REQ_SIZE; i++) {
+			pBuf = mvOsIoCachedMalloc(cesaOSHandle, L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2,
+					  &pCesaBufsTmp[i].bufPhysAddr, &pCesaBufsTmp[i].memHandle);
+			if (pBuf == NULL) {
+				mvOsPrintf("testStart: Can't malloc %d bytes for pBuf\n", L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2);
+				return;
+			}
+
+			memset(pBuf, 0, L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2);
+			mvOsCacheFlush(cesaOSHandle, pBuf, L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2);
+			if (pBuf == NULL) {
+				mvOsPrintf("Can't allocate %d bytes for req_%d buffers\n",
+						L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2, i);
+				return;
+			}
+
+			pCesaBufsTmp[i].bufVirtPtr = (MV_U8 *) pBuf;
+			pCesaBufsTmp[i].bufSize = L2SEC_CESA_BUF_SIZE * L2SEC_CESA_BUF_NUM * 2;
+
+			cesaCmdArrTmp[i].pSrc = &pMbufSrc[chan][i];
+			cesaCmdArrTmp[i].pSrc->pFrags = &pFragsSrc[chan][idx];
+			cesaCmdArrTmp[i].pSrc->numFrags = L2SEC_CESA_BUF_NUM;
+			cesaCmdArrTmp[i].pSrc->mbufSize = 0;
+
+			cesaCmdArrTmp[i].pDst = &pMbufDst[chan][i];
+			cesaCmdArrTmp[i].pDst->pFrags = &pFragsDst[chan][idx];
+			cesaCmdArrTmp[i].pDst->numFrags = L2SEC_CESA_BUF_NUM;
+			cesaCmdArrTmp[i].pDst->mbufSize = 0;
+
+			for (j = 0; j < L2SEC_CESA_BUF_NUM; j++) {
+				cesaCmdArrTmp[i].pSrc->pFrags[j].bufVirtPtr = (MV_U8 *) pBuf;
+				cesaCmdArrTmp[i].pSrc->pFrags[j].bufSize = L2SEC_CESA_BUF_SIZE;
+				pBuf += L2SEC_CESA_BUF_SIZE;
+				cesaCmdArrTmp[i].pDst->pFrags[j].bufVirtPtr = (MV_U8 *) pBuf;
+
+				cesaCmdArrTmp[i].pDst->pFrags[j].bufSize = L2SEC_CESA_BUF_SIZE;
+				pBuf += L2SEC_CESA_BUF_SIZE;
+			}
+		idx += L2SEC_CESA_BUF_NUM;
+		}
+
+		MALLOC_AND_CLEAR(cesaMbufArray[chan], sizeof(MV_CESA_MBUF) * CESA_DEF_REQ_SIZE);
+		MALLOC_AND_CLEAR(pBufInfoArray[chan], sizeof(MV_BUF_INFO) * MV_L2FW_SEC_REQ_Q_SIZE);
+		MALLOC_AND_CLEAR(cesaPrivArray[chan],
+				sizeof(MV_L2FW_SEC_CESA_PRIV) * (CESA_DEF_REQ_SIZE + MV_L2FW_SEC_REQ_Q_SIZE));
+
+	} /*for chan*/
+
+	printk(KERN_INFO "start finished in %s\n", __func__);
+}
+
+/*
+ * nfp sec Interrupt handler routine.
+ */
+
+
+
+static irqreturn_t
+mv_l2sec_interrupt_handler(int irq, void *arg)
+{
+	MV_CESA_RESULT  	result;
+	MV_STATUS               status;
+	int chan;
+
+	chan = (irq == CESA_IRQ(0)) ? 0 : 1;
+
+	/* clear interrupts */
+	MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+#ifndef CONFIG_MV_CESA_INT_PER_PACKET
+	while (1) {
+#endif
+	/* Get Ready requests */
+	status = mvCesaReadyGet(chan, &result);
+	if (status != MV_OK) {
+#ifdef CONFIG_MV_CESA_INT_PER_PACKET
+		printk(KERN_ERR "ERROR: Ready get return %d\n", status);
+		return IRQ_HANDLED;
+#else
+			break;
+#endif
+	}
+	/* handle result */
+	if (atomic_read(&req_count) > (MV_L2FW_SEC_REQ_Q_SIZE - 4)) {
+		/*must take sure that no tx_done will happen on the same time.. */
+		MV_L2FW_SEC_CESA_PRIV *req_priv = (MV_L2FW_SEC_CESA_PRIV *)result.pReqPrv;
+		struct eth_pbuf *pPkt = req_priv->pPkt;
+		struct bm_pool *pool = &mv_eth_pool[pPkt->pool];
+		printk(KERN_ERR "Error: Q request is full - TBD test.\n");
+		mv_eth_pool_put(pool, pPkt);
+		cesaFullResBuf[chan]++;
+		return IRQ_HANDLED;
+	}
+
+	req_array[req_empty] = (MV_L2FW_SEC_CESA_PRIV *)result.pReqPrv;
+	req_empty = (req_empty + 1) % MV_L2FW_SEC_REQ_Q_SIZE;
+	atomic_inc(&req_count);
+#ifndef CONFIG_MV_CESA_INT_PER_PACKET
+	}
+#endif
+	tasklet_hi_schedule(&l2sec_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+void mv_l2sec_req_handler(unsigned long dummy)
+{
+	int req_count_init = atomic_read(&req_count);
+	int counter = req_count_init;
+
+	while (counter != 0) {
+		mv_l2sec_complete_out((unsigned long)req_array[req_ready]);
+		req_ready = (req_ready + 1) % MV_L2FW_SEC_REQ_Q_SIZE;
+		counter--;
+	}
+	atomic_sub(req_count_init, &req_count);
+
+}
+
+
+void mv_l2sec_open_cesa_session(void)
+{
+	unsigned char sha1Key[]  = {0x12, 0x34, 0x56, 0x78, 0x9a, 0xbc, 0xde, 0xf0,
+								0x24, 0x68, 0xac, 0xe0, 0x24, 0x68, 0xac, 0xe0,
+								0x13, 0x57, 0x9b, 0xdf};
+	/* sizeof(cryptoKey) should be 128 for AES-128 */
+	unsigned char cryptoKey[] = {0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+									0x02, 0x46, 0x8a, 0xce, 0x13, 0x57, 0x9b, 0xdf};
+
+	int i;
+	MV_L2FW_SEC_SA_ENTRY sa;
+	MV_CESA_OPEN_SESSION os;
+	unsigned short digest_size = 0;
+	memset(&sa, 0, sizeof(MV_L2FW_SEC_SA_ENTRY));
+	memset(&os, 0, sizeof(MV_CESA_OPEN_SESSION));
+
+	os.operation       = MV_CESA_MAC_THEN_CRYPTO;
+	os.cryptoAlgorithm = MV_CESA_CRYPTO_AES;
+	os.macMode         = MV_CESA_MAC_HMAC_SHA1;
+	digest_size        = MV_CESA_SHA1_DIGEST_SIZE;
+	os.cryptoMode      = MV_CESA_CRYPTO_CBC;
+
+	for (i = 0; i < sizeof(cryptoKey); i++)
+		os.cryptoKey[i] = cryptoKey[i];
+
+	os.cryptoKeyLength = sizeof(cryptoKey);
+
+	for (i = 0; i < sizeof(sha1Key); i++)
+		os.macKey[i] = sha1Key[i];
+	os.macKeyLength = sizeof(sha1Key);
+	os.digestSize = digest_size;
+
+	if (mvCesaSessionOpen(&os, (short *)&(sa.sid)))
+		printk(KERN_INFO "mvCesaSessionOpen failed in %s\n", __func__);
+}
+
+static void mv_l2sec_casa_param_init(void)
+{
+	const u8 da_addr[] = {0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff};
+	const u8 sa_addr[] = {0xab, 0xac, 0xad, 0xae, 0xaf, 0xaa};
+
+	memset(&sa, 0, sizeof(MV_L2FW_SEC_SA_ENTRY));
+	sa.digestSize = MV_CESA_SHA1_DIGEST_SIZE;
+	sa.ivSize = MV_CESA_AES_BLOCK_SIZE;
+	sa.spi = 3;
+
+	sa.tunProt = MV_L2FW_SEC_TUNNEL;
+	sa.encap   = MV_L2FW_SEC_ESP;
+	sa.seqNum  = 4;
+	sa.tunnelHdr.sIp = 0x6400A8C0;
+	sa.tunnelHdr.dIp = 0x6401A8C0;
+	sa.tunnelHdr.outIfIndex = 0;
+	sa.lifeTime = 0;
+
+	sa.secOp = MV_L2FW_SEC_ENCRYPT;
+	memcpy(sa.tunnelHdr.dstMac, da_addr, 6);
+	memcpy(sa.tunnelHdr.srcMac, sa_addr, 6);
+
+}
+
+int mv_l2sec_cesa_init(void)
+{
+	int chan, mask;
+	printk(KERN_INFO "%s: start.\n", __func__);
+	if (mvCtrlPwrClckGet(CESA_UNIT_ID, 0) == MV_FALSE)
+		return 0;
+
+	if (MV_OK != my_mvSysCesaInit(1, 256, NULL)) {
+		pr_err("%s: cesa init failed.\n", __func__);
+		return EINVAL;
+	}
+
+#ifdef CONFIG_MV_CESA_INT_COALESCING_SUPPORT
+	mask = MV_CESA_CAUSE_EOP_COAL_MASK;
+#else
+	mask = MV_CESA_CAUSE_ACC_DMA_MASK;
+#endif
+
+	for (chan = 0 ; chan < CESA_CHAN; chan++) {
+		/* clear and unmask channel 0 interrupt */
+		MV_REG_WRITE(MV_CESA_ISR_CAUSE_REG(chan), 0);
+		MV_REG_WRITE(MV_CESA_ISR_MASK_REG(chan), mask);
+
+		/* register channel 0 interrupt */
+		if (request_irq(CESA_IRQ(chan), mv_l2sec_interrupt_handler, (IRQF_DISABLED), "cesa", NULL)) {
+			printk(KERN_INFO "%s: cannot assign irq %x\n", __func__, CESA_IRQ(chan));
+			return -EINVAL;
+		}
+
+		cesaChanPort[chan] = 0;
+		cesaPrivIndx[chan] = 0;
+		cesaCmdIndx[chan] = 0;
+		cesaFullResBuf[chan] = 0;
+		spin_lock_init(&cesa_lock[chan]);
+	}
+
+	tasklet_init(&l2sec_tasklet, mv_l2sec_req_handler, (unsigned long) 0);
+	atomic_set(&req_count, 0);
+
+	mv_l2sec_casa_param_init();
+	mv_l2sec_open_cesa_session();
+	mv_l2sec_cesa_start();
+	printk(KERN_INFO "%s: done.\n", __func__);
+
+	return 0;
+}
+
+
+void mv_l2sec_build_tunnel(MV_BUF_INFO *pBuf, MV_L2FW_SEC_SA_ENTRY *pSAEntry)
+{
+	MV_IP_HEADER *pIpHdr, *pIntIpHdr;
+	MV_U16 newIpTotalLength;
+
+	newIpTotalLength = pBuf->dataSize - sizeof(MV_802_3_HEADER);
+	pIpHdr = (MV_IP_HEADER *) (pBuf->bufVirtPtr + sizeof(MV_802_3_HEADER));
+
+	pIntIpHdr = (MV_IP_HEADER *) ((MV_U8 *) (pIpHdr) + sizeof(MV_IP_HEADER) + sizeof(MV_ESP_HEADER) +
+				      pSAEntry->ivSize);
+
+	/* TBD - review below settings in RFC */
+	pIpHdr->version = 0x45;
+	pIpHdr->tos = 0;
+	pIpHdr->checksum = 0;
+	pIpHdr->totalLength = MV_16BIT_BE(newIpTotalLength);
+	pIpHdr->identifier = 0;
+	pIpHdr->fragmentCtrl = 0;
+	pIpHdr->ttl = pIntIpHdr->ttl - 1;
+	pIpHdr->protocol = MV_IP_PROTO_ESP;
+	pIpHdr->srcIP = pSAEntry->tunnelHdr.sIp;
+	pIpHdr->dstIP = pSAEntry->tunnelHdr.dIp;
+
+	return;
+}
+
+
+/* Append sequence number and spi, save some space for IV */
+void mv_l2sec_build_esp_hdr(MV_BUF_INFO *pBuf, MV_L2FW_SEC_SA_ENTRY *pSAEntry)
+{
+	MV_ESP_HEADER *pEspHdr;
+
+	pEspHdr = (MV_ESP_HEADER *) (pBuf->bufVirtPtr + sizeof(MV_802_3_HEADER) + sizeof(MV_IP_HEADER));
+	pEspHdr->spi = pSAEntry->spi;
+	pSAEntry->seqNum++;
+	pEspHdr->seqNum = MV_32BIT_BE(pSAEntry->seqNum);
+}
+
+void mv_l2sec_build_mac(MV_BUF_INFO *pBuf, MV_L2FW_SEC_SA_ENTRY *pSAEntry)
+{
+	MV_802_3_HEADER *pMacHdr;
+	pMacHdr = (MV_802_3_HEADER *) ((MV_U8 *) (pBuf->bufVirtPtr));
+
+	memcpy(pMacHdr, &pSAEntry->tunnelHdr.dstMac, 12);
+	pMacHdr->typeOrLen = 0x08;/* stands for IP protocol code 16bit swapped */
+	return;
+}
+
+
+MV_STATUS mv_l2sec_esp_process(struct eth_pbuf *pPkt, MV_BUF_INFO *pBuf, MV_L2FW_SEC_SA_ENTRY *pSAEntry,
+				struct eth_port *newpp, int channel, int inPort)
+{
+	MV_CESA_COMMAND	*pCesaCmd;
+	MV_CESA_MBUF *pCesaMbuf;
+	MV_L2FW_SEC_CESA_PRIV *pCesaPriv;
+	MV_STATUS status;
+	MV_IP_HEADER *pIpHdr;
+	int cmdIndx = cesaCmdIndx[channel];
+	int privIndx = cesaPrivIndx[channel];
+
+	pCesaCmd  = &cesaCmdArray[channel][cmdIndx];
+	pCesaMbuf = &cesaMbufArray[channel][cmdIndx];
+
+	cmdIndx = (cmdIndx + 1) % CESA_DEF_REQ_SIZE;
+	cesaCmdIndx[channel] = cmdIndx;
+
+	pCesaPriv = &cesaPrivArray[channel][privIndx];
+
+	privIndx = (privIndx + 1) % (CESA_DEF_REQ_SIZE + MV_L2FW_SEC_REQ_Q_SIZE);
+	cesaPrivIndx[channel] = privIndx;
+
+	pCesaPriv->pBufInfo = pBuf;
+	pCesaPriv->pSaEntry = pSAEntry;
+	pCesaPriv->pCesaCmd = pCesaCmd;
+
+	pCesaPriv->pPkt   = pPkt;
+	pCesaPriv->ifout  = newpp->port;
+	pCesaPriv->inPort = inPort;
+	/*
+	 *  Fix, encrypt/decrypt the IP payload only, --BK 20091027
+	 */
+	pIpHdr = (MV_IP_HEADER *)(pBuf->bufVirtPtr + sizeof(MV_802_3_HEADER));
+	pBuf->dataSize = MV_16BIT_BE(pIpHdr->totalLength) + sizeof(MV_802_3_HEADER);
+
+	/* after next command, pBuf->bufVirtPtr will point to ESP */
+	pBuf->bufVirtPtr += MV_L2FW_SEC_ESP_OFFSET;
+	pBuf->bufPhysAddr += MV_L2FW_SEC_ESP_OFFSET;
+	pBuf->dataSize -= MV_L2FW_SEC_ESP_OFFSET;
+
+	pBuf->bufAddrShift -= MV_L2FW_SEC_ESP_OFFSET;
+	pCesaMbuf->pFrags = pBuf;
+	pCesaMbuf->numFrags = 1;
+	pCesaMbuf->mbufSize = pBuf->dataSize;
+
+	pCesaMbuf->pFrags->bufSize = pBuf->dataSize;
+
+	pCesaCmd->pReqPrv = (void *)pCesaPriv;
+	pCesaCmd->sessionId = pSAEntry->sid;
+	pCesaCmd->pSrc = pCesaMbuf;
+	pCesaCmd->pDst = pCesaMbuf;
+	pCesaCmd->skipFlush = MV_TRUE;
+
+	/* Assume ESP */
+	pCesaCmd->cryptoOffset = sizeof(MV_ESP_HEADER) + pSAEntry->ivSize;
+	pCesaCmd->cryptoLength =  pBuf->dataSize - (sizeof(MV_ESP_HEADER)
+				  + pSAEntry->ivSize + pSAEntry->digestSize);
+	pCesaCmd->ivFromUser = 0; /* relevant for encode only */
+	pCesaCmd->ivOffset = sizeof(MV_ESP_HEADER);
+	pCesaCmd->macOffset = 0;
+	pCesaCmd->macLength = pBuf->dataSize - pSAEntry->digestSize;
+
+
+	if ((pCesaCmd->digestOffset != 0) && ((pCesaCmd->digestOffset%4)))  {
+		printk(KERN_INFO "pBuf->dataSize=%d pSAEntry->digestSize=%d in %s\n",
+			pBuf->dataSize, pSAEntry->digestSize, __func__);
+		printk(KERN_INFO "pCesaCmd->digestOffset=%d in %s\n",
+			pCesaCmd->digestOffset, __func__);
+	}
+
+	pCesaCmd->digestOffset = pBuf->dataSize - pSAEntry->digestSize ;
+
+	disable_irq(CESA_IRQ(channel));
+	status = mvCesaAction(channel, pCesaCmd);
+	enable_irq(CESA_IRQ(channel));
+
+
+	if (status != MV_OK) {
+		pSAEntry->stats.rejected++;
+		mvOsPrintf("%s: mvCesaAction failed %d\n", __func__, status);
+	}
+	return status;
+}
+
+MV_STATUS mv_l2sec_out_going(struct eth_pbuf *pkt, MV_BUF_INFO *pBuf, MV_L2FW_SEC_SA_ENTRY *pSAEntry,
+			struct eth_port *new_pp, int inPort, int chan)
+{
+	MV_U8 *pTmp;
+	MV_U32 cryptoSize, encBlockMod, dSize;
+	/* CESA Q is full drop. */
+	if (cesaReqResources[chan] <= 1)
+		return MV_DROPPED;
+
+	cryptoSize = pBuf->dataSize - sizeof(MV_802_3_HEADER);
+
+	/* Align buffer address to beginning of new packet - TBD handle VLAN tag, LLC */
+	dSize = pSAEntry->ivSize + sizeof(MV_ESP_HEADER) + sizeof(MV_IP_HEADER);
+	pBuf->bufVirtPtr -= dSize;
+	pBuf->bufPhysAddr -= dSize;
+	pBuf->dataSize += dSize;
+	pBuf->bufAddrShift += dSize;
+
+	encBlockMod = (cryptoSize % MV_L2FW_SEC_ENC_BLOCK_SIZE);
+	/* leave space for padLen + Protocol */
+	if (encBlockMod > 14) {
+		encBlockMod =  MV_L2FW_SEC_ENC_BLOCK_SIZE - encBlockMod;
+		encBlockMod += MV_L2FW_SEC_ENC_BLOCK_SIZE;
+	} else
+		encBlockMod =  MV_L2FW_SEC_ENC_BLOCK_SIZE - encBlockMod;
+
+	pBuf->dataSize += encBlockMod;
+
+	pTmp = pBuf->bufVirtPtr + pBuf->dataSize;
+	memset(pTmp - encBlockMod, 0, encBlockMod - 2);
+	*((MV_U8 *)(pTmp-2)) = (MV_U8)(encBlockMod-2);
+	*((MV_U8 *)(pTmp-1)) = (MV_U8)4;
+
+	pBuf->dataSize += pSAEntry->digestSize;
+
+	mv_l2sec_build_esp_hdr(pBuf, pSAEntry);
+	mv_l2sec_build_tunnel(pBuf, pSAEntry);
+	mv_l2sec_build_mac(pBuf, pSAEntry);
+
+	return mv_l2sec_esp_process(pkt, pBuf, pSAEntry, new_pp, chan, inPort);
+}
+
+MV_STATUS mv_l2sec_handle_esp(struct eth_pbuf *pkt, struct neta_rx_desc *rx_desc, struct eth_port  *new_pp, int inPort)
+{
+	MV_STATUS res;
+	int chan = cesaChanPort[inPort];
+	MV_BUF_INFO *pBufInfoArr = pBufInfoArray[chan];
+	int cmdIndx = cesaCmdIndx[chan];
+	spin_lock(&cesa_lock[chan]);
+
+	pBufInfoArr[cmdIndx].bufAddrShift = 0;
+	pBufInfoArr[cmdIndx].dataSize    = pkt->bytes;
+
+	pBufInfoArr[cmdIndx].bufSize     = pkt->bytes;
+	pBufInfoArr[cmdIndx].bufVirtPtr  = pkt->pBuf + pkt->offset + MV_ETH_MH_SIZE;
+
+	pBufInfoArr[cmdIndx].bufPhysAddr = mvOsIoVirtToPhy(NULL, pBufInfoArr[cmdIndx].bufVirtPtr);
+	pBufInfoArr[cmdIndx].memHandle   = 0;
+
+	res = mv_l2sec_out_going(pkt, &pBufInfoArr[cmdIndx], &sa, new_pp, inPort, chan);
+
+	spin_unlock(&cesa_lock[chan]);
+	return res;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.h b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.h
new file mode 100644
index 000000000000..04d7964370c3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/l2fw/mv_eth_l2sec.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#ifndef L2SEC_MV_ETH_L2SEC_H
+#define L2SEC_MV_ETH_L2SEC_H
+#include "cesa/mvCesa.h"
+
+extern u32 mv_crypto_virt_base_get(void);
+
+/* IPSec defines */
+#define MV_L2FW_SEC_MAX_PACKET		1540
+#define MV_L2FW_SEC_ENC_BLOCK_SIZE	16
+#define MV_L2FW_SEC_ESP_OFFSET		34
+
+#define L2SEC_CESA_BUF_NUM	1	/* CESA_DEF_BUF_NUM */
+#define L2SEC_CESA_BUF_SIZE  1500	/* CESA_DEF_BUF_SIZE */
+
+
+/* IPSec Enumerators */
+typedef enum {
+	MV_L2FW_SEC_TUNNEL = 0,
+	MV_L2FW_SEC_TRANSPORT,
+} MV_L2FW_SEC_PROT;
+
+typedef enum {
+	MV_L2FW_SEC_ESP = 0,
+	MV_L2FW_SEC_AH,
+} MV_L2FW_SEC_ENCAP;
+
+
+typedef enum {
+	MV_L2FW_SEC_ENCRYPT = 0,
+	MV_L2FW_SEC_DECRYPT,
+} MV_L2FW_SEC_OP;
+
+struct mv_l2fw_sa_stats {
+	MV_U32 encrypt;
+	MV_U32 decrypt;
+	MV_U32 rejected;	/* slow path */
+	MV_U32 dropped;		/* packet drop */
+	MV_U32 bytes;
+} MV_L2FW_SA_STATS;
+
+/* IPSec Structures */
+struct mv_l2fw_sec_tunnel_hdr {
+	MV_U32 sIp;			/* BE */
+	MV_U32 dIp;			/* BE */
+	/* dstMac should be 2 byte aligned */
+	MV_U8 dstMac[MV_MAC_ADDR_SIZE];	/* BE */
+	MV_U8 srcMac[MV_MAC_ADDR_SIZE];	/* BE */
+	MV_U8 outIfIndex;
+} MV_L2FW_SEC_TUNNEL_HDR;
+
+struct mv_l2fw_sec_sa_entry {
+	MV_U32 spi;			/* BE */
+	MV_L2FW_SEC_PROT tunProt;
+	MV_L2FW_SEC_ENCAP encap;
+	MV_U16 sid;
+	MV_U32 seqNum;			/* LE  */
+	struct mv_l2fw_sec_tunnel_hdr tunnelHdr;
+	MV_U32 lifeTime;
+	MV_U8 ivSize;
+	MV_U8 cipherBlockSize;
+	MV_U8 digestSize;
+	MV_L2FW_SEC_OP secOp;
+	struct mv_l2fw_sa_stats stats;
+} MV_L2FW_SEC_SA_ENTRY;
+
+
+#define CESA_0    0
+#define CESA_1    1
+
+/* define number of channels */
+#ifdef CONFIG_ARMADA_XP
+#define CESA_CHAN 2
+#else
+#define CESA_CHAN 1
+#endif
+
+
+#define MV_L2FW_SEC_REQ_Q_SIZE   1000
+#define CESA_DEF_REQ_SIZE       (256*4)
+
+struct mv_l2fw_sec_cesa_priv {
+	struct mv_l2fw_sec_sa_entry *pSaEntry;
+	MV_BUF_INFO *pBufInfo;
+	MV_U8 orgDigest[MV_CESA_MAX_DIGEST_SIZE];
+	MV_CESA_COMMAND *pCesaCmd;
+	struct eth_pbuf *pPkt;
+	int ifout;
+	int ownerId;
+	int inPort;
+} MV_L2FW_SEC_CESA_PRIV;
+
+MV_STATUS mv_l2sec_handle_esp(struct eth_pbuf *pkt, struct neta_rx_desc *rx_desc, struct eth_port  *new_pp, int inPort);
+int mv_l2sec_cesa_init(void);
+void mv_l2sec_stats(void);
+int mv_l2sec_set_cesa_chan(int port, int cesaChan);
+#endif /*L2SEC_MV_ETH_L2SEC_H*/
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_nfp.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_nfp.c
new file mode 100644
index 000000000000..38707f8e0084
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_nfp.c
@@ -0,0 +1,1072 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mv_nfp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mvOs.h"
+#include "mvDebug.h"
+#include "dbg-trace.h"
+#include "mvSysHwConfig.h"
+#include "boardEnv/mvBoardEnvLib.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#include "eth-phy/mvEthPhy.h"
+#include "mvSysEthPhyApi.h"
+#include "mvSysNetaApi.h"
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBm.h"
+#include "pnc/mvPnc.h"
+#include "pnc/mvTcam.h"
+#include "pmt/mvPmt.h"
+
+#include "mv_switch.h"
+#include "mv_netdev.h"
+#include "mv_eth_tool.h"
+#include "cpu/mvCpuCntrs.h"
+
+
+#ifdef CONFIG_MV_ETH_NFP_EXT
+int                mv_ctrl_nfp_ext_port[NFP_EXT_NUM];
+int                mv_ctrl_nfp_ext_en[NFP_EXT_NUM];
+struct net_device *mv_ctrl_nfp_ext_netdev[NFP_EXT_NUM];
+
+static void mv_eth_nfp_ext_skb_destructor(struct sk_buff *skb)
+{
+	consume_skb(skb_shinfo(skb)->destructor_arg);
+}
+
+static int mv_eth_nfp_ext_tx(struct eth_port *pp, struct eth_pbuf *pkt, MV_NFP_RESULT *res);
+#endif /* CONFIG_MV_ETH_NFP_EXT */
+
+static INLINE int mv_eth_nfp_need_fragment(MV_NFP_RESULT *res)
+{
+	if (res->flags & MV_NFP_RES_IP_INFO_VALID)
+		return (res->ipInfo.ipLen > res->mtu);
+
+	return 0;
+}
+
+/* Enable NFP */
+int mv_eth_nfp_ctrl(struct net_device *dev, int en)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+
+	if (pp == NULL)
+		return 1;
+
+	if (en) {
+		pp->flags |= MV_ETH_F_NFP_EN;
+		printk(KERN_INFO "%s: NFP enabled\n", dev->name);
+	} else {
+		pp->flags &= ~MV_ETH_F_NFP_EN;
+		printk(KERN_INFO "%s: NFP disabled\n", dev->name);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ctrl);
+
+#ifdef CONFIG_MV_ETH_NFP_EXT
+int mv_eth_nfp_ext_add(struct net_device *dev, int port)
+{
+	int i;
+
+	/* find free place in mv_ctrl_nfp_ext_netdev */
+	for (i = 0; i < NFP_EXT_NUM; i++) {
+		if (mv_ctrl_nfp_ext_netdev[i] == NULL) {
+			mv_ctrl_nfp_ext_netdev[i] = dev;
+			mv_ctrl_nfp_ext_port[i] = port;
+			mv_ctrl_nfp_ext_en[i] = 0;
+			return 0;
+		}
+	}
+	printk(KERN_INFO "External interface %s can't be bound to NFP\n", dev->name);
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_add);
+
+int mv_eth_nfp_ext_del(struct net_device *dev)
+{
+	int i;
+
+	/* find free place in mv_ctrl_nfp_ext_netdev */
+	for (i = 0; i < NFP_EXT_NUM; i++) {
+		if (mv_ctrl_nfp_ext_netdev[i] == dev) {
+			mv_ctrl_nfp_ext_netdev[i] = NULL;
+			return 0;
+		}
+	}
+	printk(KERN_INFO "External interface %s is not bound to NFP\n", dev->name);
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_del);
+
+int mv_eth_nfp_ext_ctrl(struct net_device *dev, int en)
+{
+	int i;
+
+	/* find net_device in mv_ctrl_nfp_ext_netdev */
+	for (i = 0; i < NFP_EXT_NUM; i++) {
+		if (mv_ctrl_nfp_ext_netdev[i] == dev) {
+			if (en)
+				printk(KERN_INFO "%s: NFP enabled for external interface\n", dev->name);
+			 else
+				printk(KERN_INFO "%s: NFP disabled for external interface\n", dev->name);
+
+			mv_ctrl_nfp_ext_en[i] = en;
+			return 0;
+		}
+	}
+	printk(KERN_INFO "External interface %s is not bind to NFP\n", dev->name);
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_ctrl);
+#else
+int mv_eth_nfp_ext_add(struct net_device *dev, int port)
+{
+	printk(KERN_INFO "NFP doesn't support external interfaces\n");
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_add);
+
+int mv_eth_nfp_ext_del(struct net_device *dev)
+{
+	printk(KERN_INFO "NFP doesn't support external interfaces\n");
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_del);
+
+int mv_eth_nfp_ext_ctrl(struct net_device *dev, int en)
+{
+	printk(KERN_INFO "NFP doesn't support external interfaces\n");
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_nfp_ext_ctrl);
+#endif /* CONFIG_MV_ETH_NFP_EXT */
+
+
+static inline int mv_eth_frag_build_hdr_desc(struct eth_port *priv, struct tx_queue *txq_ctrl,
+					MV_U8 *pktData, int mac_hdr_len, int ip_hdr_len,
+					     int frag_size, int left_len, int frag_offset)
+{
+	struct neta_tx_desc *tx_desc;
+	struct iphdr        *iph;
+	MV_U8               *data;
+	int                 align;
+	MV_U16              frag_ctrl;
+
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+	if (tx_desc == NULL)
+		return -1;
+
+	txq_ctrl->txq_count++;
+
+	data = mv_eth_extra_pool_get(priv);
+	if (data == NULL)
+		return -1;
+
+	tx_desc->command = mvNetaTxqDescCsum(mac_hdr_len, MV_16BIT_BE(MV_IP_TYPE), ip_hdr_len, 0);
+	tx_desc->command |= NETA_TX_F_DESC_MASK;
+	tx_desc->dataSize = mac_hdr_len + ip_hdr_len;
+
+	txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG)data | MV_ETH_SHADOW_EXT);
+	mv_eth_shadow_inc_put(txq_ctrl);
+
+	/* Check for IP header alignment */
+	align = 4 - (mac_hdr_len & 3);
+	data += align;
+	memcpy(data, pktData, mac_hdr_len + ip_hdr_len);
+
+	iph = (struct iphdr *)(data + mac_hdr_len);
+
+	iph->tot_len = htons(frag_size + ip_hdr_len);
+
+	/* update frag_offset and MF flag in IP header - packet can be already fragmented */
+	frag_ctrl = ntohs(iph->frag_off);
+	frag_offset += ((frag_ctrl & IP_OFFSET) << 3);
+	frag_ctrl &= ~IP_OFFSET;
+	frag_ctrl |= ((frag_offset >> 3) & IP_OFFSET);
+
+	if (((frag_ctrl & IP_MF) == 0) && (left_len != frag_size))
+		frag_ctrl |= IP_MF;
+
+	iph->frag_off = htons(frag_ctrl);
+
+	/* if it was PPPoE, update the PPPoE payload fields  */
+	if ((*((char *)iph - MV_PPPOE_HDR_SIZE - 1) == 0x64) &&
+		(*((char *)iph - MV_PPPOE_HDR_SIZE - 2) == 0x88)) {
+		PPPoE_HEADER *pPPPNew = (PPPoE_HEADER *)((char *)iph - MV_PPPOE_HDR_SIZE);
+		pPPPNew->len = htons(frag_size + ip_hdr_len + MV_PPP_HDR_SIZE);
+	}
+	tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, data, tx_desc->dataSize);
+	mv_eth_tx_desc_flush(tx_desc);
+
+	return 0;
+}
+
+static inline int mv_eth_frag_build_data_desc(struct tx_queue *txq_ctrl, MV_U8 *frag_ptr, int frag_size,
+						int data_left, struct eth_pbuf *pkt)
+{
+	struct neta_tx_desc *tx_desc;
+
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+	if (tx_desc == NULL)
+		return -1;
+
+	txq_ctrl->txq_count++;
+	tx_desc->dataSize = frag_size;
+	tx_desc->bufPhysAddr = pkt->physAddr + (frag_ptr - pkt->pBuf);
+	tx_desc->command = (NETA_TX_L_DESC_MASK | NETA_TX_Z_PAD_MASK);
+
+	if (frag_size == data_left)
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+	else
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
+
+	mv_eth_shadow_inc_put(txq_ctrl);
+	mv_eth_tx_desc_flush(tx_desc);
+
+	return 0;
+}
+
+static int mv_eth_nfp_fragment_tx(struct eth_port *pp, struct net_device *dev, MV_NFP_RESULT *res,
+					   struct tx_queue *txq_ctrl, struct eth_pbuf *pkt)
+{
+	MV_IP_HEADER_INFO *pIpInfo = &res->ipInfo;
+	int   pkt_offset = (pkt->offset + res->shift);
+	int   ip_offset = (pIpInfo->ipOffset - res->shift);
+	int   frag_size = MV_ALIGN_DOWN((res->mtu - res->ipInfo.ipHdrLen), 8);
+	int   data_left = pIpInfo->ipLen - res->ipInfo.ipHdrLen;
+	int   pktNum = (data_left / frag_size) + ((data_left % frag_size) ? 1 : 0);
+	MV_U8 *pData = pkt->pBuf + pkt_offset;
+	MV_U8 *payloadStart = pData + ip_offset + pIpInfo->ipHdrLen;
+	MV_U8 *frag_ptr = payloadStart;
+	int   i, total_bytes = 0;
+	int   save_txq_count = txq_ctrl->txq_count;
+
+	if ((txq_ctrl->txq_count + (pktNum * 2)) >= txq_ctrl->txq_size) {
+		/*
+		printk(KERN_ERR "%s: no TX descriptors - txq_count=%d, len=%d, frag_size=%d\n",
+					__func__, txq_ctrl->txq_count, data_left, frag_size);
+		*/
+		STAT_ERR(txq_ctrl->stats.txq_err++);
+		goto outNoTxDesc;
+	}
+
+	for (i = 0; i < pktNum; i++) {
+
+		if (mv_eth_frag_build_hdr_desc(pp, txq_ctrl, pData, ip_offset, pIpInfo->ipHdrLen,
+					frag_size, data_left, frag_ptr - payloadStart))
+			goto outNoTxDesc;
+
+		total_bytes += (ip_offset + pIpInfo->ipHdrLen);
+
+		if (mv_eth_frag_build_data_desc(txq_ctrl, frag_ptr, frag_size, data_left, pkt))
+			goto outNoTxDesc;
+
+		total_bytes += frag_size;
+		frag_ptr += frag_size;
+		data_left -= frag_size;
+		frag_size = MV_MIN(frag_size, data_left);
+	}
+	/* Flush + Invalidate cache for MAC + IP header + L4 header */
+	pData = pkt->pBuf + pkt->offset;
+	if (res->shift < 0)
+		pData += res->shift;
+
+	mvOsCacheMultiLineFlushInv(NULL, pData, (res->pWrite - pData));
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(pp->port))
+		mvNetaPonTxqBytesAdd(pp->port, txq_ctrl->txp, txq_ctrl->txq, total_bytes);
+#endif /* CONFIG_MV_PON */
+
+	dev->stats.tx_packets += pktNum;
+	dev->stats.tx_bytes += total_bytes;
+	STAT_DBG(txq_ctrl->stats.txq_tx += (pktNum * 2));
+
+	mvNetaTxqPendDescAdd(pp->port, txq_ctrl->txp, txq_ctrl->txq, pktNum * 2);
+
+	return pktNum * 2;
+
+outNoTxDesc:
+	while (save_txq_count < txq_ctrl->txq_count) {
+		txq_ctrl->txq_count--;
+		mv_eth_shadow_dec_put(txq_ctrl);
+		mvNetaTxqPrevDescGet(txq_ctrl->q);
+	}
+	/* Invalidate cache for MAC + IP header + L4 header */
+	pData = pkt->pBuf + pkt->offset;
+	if (res->shift < 0)
+		pData += res->shift;
+
+	mvOsCacheMultiLineInv(NULL, pData, (res->pWrite - pData));
+
+	return 0;
+}
+
+
+static MV_STATUS mv_eth_nfp_tx(struct eth_pbuf *pkt, MV_NFP_RESULT *res)
+{
+	struct net_device *dev = (struct net_device *)res->dev;
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	struct neta_tx_desc *tx_desc;
+	u32 tx_cmd, physAddr;
+	MV_STATUS status = MV_OK;
+	struct tx_queue *txq_ctrl;
+	int use_bm, pkt_offset, frags = 1;
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_TX)
+			printk(KERN_ERR "%s: STARTED_BIT = 0 , packet is dropped.\n", __func__);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+		return MV_DROPPED;
+	}
+
+	/* Get TxQ to send packet */
+	/* Check TXQ classification */
+	if ((res->flags & MV_NFP_RES_TXQ_VALID) == 0)
+		res->txq = pp->cpu_config[smp_processor_id()]->txq;
+
+	if ((res->flags & MV_NFP_RES_TXP_VALID) == 0)
+		res->txp = pp->txp;
+
+	txq_ctrl = &pp->txq_ctrl[res->txp * CONFIG_MV_ETH_TXQ + res->txq];
+
+	if (txq_ctrl->flags & MV_ETH_F_TX_SHARED)
+		spin_lock(&txq_ctrl->queue_lock);
+
+	/* Do fragmentation if needed */
+	if (mv_eth_nfp_need_fragment(res)) {
+		frags = mv_eth_nfp_fragment_tx(pp, dev, res, txq_ctrl, pkt);
+		if (frags == 0) {
+			dev->stats.tx_dropped++;
+			status = MV_DROPPED;
+		}
+		STAT_INFO(pp->stats.tx_fragment++);
+		goto out;
+	}
+
+	/* Get next descriptor for tx, single buffer, so FIRST & LAST */
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+	if (tx_desc == NULL) {
+
+		/* No resources: Drop */
+		dev->stats.tx_dropped++;
+		status = MV_DROPPED;
+		goto out;
+	}
+
+	if (res->flags & MV_NFP_RES_L4_CSUM_NEEDED) {
+		MV_U8 *pData = pkt->pBuf + pkt->offset;
+
+		if (res->shift < 0)
+			pData += res->shift;
+
+		mvOsCacheMultiLineFlushInv(NULL, pData, (res->pWrite - pData));
+	}
+
+	txq_ctrl->txq_count++;
+
+	/* tx_cmd - word accumulated by NFP processing */
+	tx_cmd = res->tx_cmd;
+
+	if (res->flags & MV_NFP_RES_IP_INFO_VALID) {
+		if (res->ipInfo.family == MV_INET) {
+			tx_cmd |= NETA_TX_L3_IP4 | NETA_TX_IP_CSUM_MASK |
+				((res->ipInfo.ipOffset - res->shift) << NETA_TX_L3_OFFSET_OFFS) |
+				((res->ipInfo.ipHdrLen >> 2) << NETA_TX_IP_HLEN_OFFS);
+		} else {
+			tx_cmd |= NETA_TX_L3_IP6 |
+				((res->ipInfo.ipOffset - res->shift) << NETA_TX_L3_OFFSET_OFFS) |
+				((res->ipInfo.ipHdrLen >> 2) << NETA_TX_IP_HLEN_OFFS);
+		}
+	}
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP())
+		use_bm = 1;
+	else
+		use_bm = 0;
+#else
+	use_bm = 0;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	pkt_offset = pkt->offset + res->shift;
+	physAddr = pkt->physAddr;
+	if (pkt_offset > NETA_TX_PKT_OFFSET_MAX) {
+		use_bm = 0;
+		physAddr += pkt_offset;
+		pkt_offset = 0;
+	}
+
+	if ((pkt->pool >= 0) && (pkt->pool < MV_ETH_BM_POOLS)) {
+		if (use_bm) {
+			tx_cmd |= NETA_TX_BM_ENABLE_MASK | NETA_TX_BM_POOL_ID_MASK(pkt->pool);
+			txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) NULL;
+		} else
+			txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = (u32) pkt;
+	} else {
+		/* skb from external interface */
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((u32)pkt->osInfo | MV_ETH_SHADOW_SKB);
+	}
+
+	mv_eth_shadow_inc_put(txq_ctrl);
+
+	tx_cmd |= NETA_TX_PKT_OFFSET_MASK(pkt_offset);
+
+	tx_desc->command = tx_cmd | NETA_TX_FLZ_DESC_MASK;
+	tx_desc->dataSize = pkt->bytes;
+	tx_desc->bufPhysAddr = physAddr;
+
+	/* FIXME: PON only? --BK */
+	tx_desc->hw_cmd = pp->hw_cmd;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_TX) {
+		printk(KERN_ERR "%s - nfp_tx_%lu: port=%d, txp=%d, txq=%d\n",
+		       dev->name, dev->stats.tx_packets, pp->port, res->txp, res->txq);
+		mv_eth_tx_desc_print(tx_desc);
+		mv_eth_pkt_print(pkt);
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	mv_eth_tx_desc_flush(tx_desc);
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(pp->port))
+		mvNetaPonTxqBytesAdd(pp->port, res->txp, res->txq, pkt->bytes);
+#endif /* CONFIG_MV_PON */
+
+	/* Enable transmit by update PENDING counter */
+	mvNetaTxqPendDescAdd(pp->port, res->txp, res->txq, 1);
+
+	/* FIXME: stats includes MH --BK */
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += pkt->bytes;
+	STAT_DBG(txq_ctrl->stats.txq_tx++);
+
+out:
+#ifndef CONFIG_MV_NETA_TXDONE_ISR
+	if (txq_ctrl->txq_count >= mv_ctrl_txdone) {
+		u32 tx_done = mv_eth_txq_done(pp, txq_ctrl);
+
+		STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+	}
+	/* If after calling mv_eth_txq_done, txq_ctrl->txq_count equals frags, we need to set the timer */
+	if ((txq_ctrl->txq_count == frags) && (frags > 0))
+		mv_eth_add_tx_done_timer(pp->cpu_config[smp_processor_id()]);
+
+#endif /* CONFIG_MV_NETA_TXDONE_ISR */
+
+	if (txq_ctrl->flags & MV_ETH_F_TX_SHARED)
+		spin_unlock(&txq_ctrl->queue_lock);
+
+	return status;
+}
+
+/* Main NFP function returns the following error codes:
+ *  MV_OK - packet processed and sent successfully by NFP
+ *  MV_TERMINATE - packet can't be processed by NFP - pass to Linux processing
+ *  MV_DROPPED - packet processed by NFP, but not sent (dropped)
+ */
+MV_STATUS mv_eth_nfp(struct eth_port *pp, int rxq, struct neta_rx_desc *rx_desc,
+				struct eth_pbuf *pkt, struct bm_pool *pool)
+{
+	MV_STATUS       status;
+	MV_NFP_RESULT   res;
+	bool            tx_external = false;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_RX) {
+		mv_eth_rx_desc_print(rx_desc);
+		mv_eth_pkt_print(pkt);
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	status = nfp_core_p->nfp_rx(pp->port, rx_desc, pkt, &res);
+	tx_external = (res.flags & MV_NFP_RES_NETDEV_EXT);
+
+	if (status == MV_OK) {
+
+		if (res.flags & MV_NFP_RES_L4_CSUM_NEEDED) {
+			MV_IP_HEADER_INFO *pIpInfo = &res.ipInfo;
+			MV_U8 *pIpHdr = pIpInfo->ip_hdr.l3;
+
+			if (pIpInfo->ipProto == MV_IP_PROTO_TCP) {
+				MV_TCP_HEADER *pTcpHdr = (MV_TCP_HEADER *) ((char *)pIpHdr + pIpInfo->ipHdrLen);
+
+				pTcpHdr->chksum = csum_fold(csum_partial((char *)res.diffL4, sizeof(res.diffL4),
+									~csum_unfold(pTcpHdr->chksum)));
+				res.pWrite = (MV_U8 *)pTcpHdr + sizeof(MV_TCP_HEADER);
+			} else {
+				MV_UDP_HEADER *pUdpHdr = (MV_UDP_HEADER *) ((char *)pIpHdr + pIpInfo->ipHdrLen);
+
+				pUdpHdr->check = csum_fold(csum_partial((char *)res.diffL4, sizeof(res.diffL4),
+									~csum_unfold(pUdpHdr->check)));
+				res.pWrite = (MV_U8 *)pUdpHdr + sizeof(MV_UDP_HEADER);
+			}
+		}
+
+#ifdef CONFIG_MV_ETH_NFP_EXT
+		if  (tx_external) {
+			/* INT RX -> EXT TX */
+			mv_eth_nfp_ext_tx(pp, pkt, &res);
+			status = MV_OK;
+		} else
+#endif /* CONFIG_MV_ETH_NFP_EXT */
+			/* INT RX -> INT TX */
+			status = mv_eth_nfp_tx(pkt, &res);
+	}
+	if (status == MV_OK) {
+		STAT_DBG(pp->stats.rx_nfp++);
+
+		/* Packet transmited - refill now */
+		if (!tx_external && mv_eth_pool_bm(pool)) {
+			/* BM - no refill */
+			mvOsCacheLineInv(NULL, rx_desc);
+			return MV_OK;
+		}
+
+		if (!tx_external || mv_eth_is_recycle())
+			pkt = NULL;
+
+		if (mv_eth_refill(pp, rxq, pkt, pool, rx_desc)) {
+			printk(KERN_ERR "Linux processing - Can't refill\n");
+			pp->rxq_ctrl[rxq].missed++;
+			mv_eth_add_cleanup_timer(pp->cpu_config[smp_processor_id()]);
+			return MV_FAIL;
+		}
+		return MV_OK;
+	}
+	if (status == MV_DROPPED) {
+		/* Refill the same buffer */
+		STAT_DBG(pp->stats.rx_nfp_drop++);
+		mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+		return MV_OK;
+	}
+	return status;
+}
+
+#ifdef CONFIG_MV_ETH_NFP_EXT
+static int mv_eth_nfp_ext_tx_fragment(struct net_device *dev, struct sk_buff *skb, MV_NFP_RESULT *res)
+{
+	unsigned int      dlen, doff, error, flen, fsize, l, max_dlen, max_plen;
+	unsigned int      hdrlen, offset;
+	struct iphdr      *ip, *nip;
+	struct sk_buff    *new;
+	struct page       *page;
+	int               mac_header_len;
+	MV_IP_HEADER_INFO *pIpInfo = &res->ipInfo;
+
+	max_plen = dev->mtu + dev->hard_header_len;
+
+	SKB_LINEAR_ASSERT(skb);
+
+	mac_header_len = (pIpInfo->ipOffset - res->shift);
+	ip = (struct iphdr *)(skb->data + mac_header_len);
+
+	hdrlen = mac_header_len + res->ipInfo.ipHdrLen;
+
+	doff = hdrlen;
+	dlen = skb_headlen(skb) - hdrlen;
+	offset = ntohs(ip->frag_off) & IP_OFFSET;
+	max_dlen = (max_plen - hdrlen) & ~0x07;
+
+	do {
+		new = dev_alloc_skb(hdrlen);
+		if (!new)
+			break;
+
+		/* Setup new packet metadata */
+		new->protocol = IPPROTO_IP;
+		new->ip_summed = CHECKSUM_PARTIAL;
+		skb_set_network_header(new, mac_header_len);
+
+		/* Copy original IP header */
+		memcpy(skb_put(new, hdrlen), skb->data, hdrlen);
+
+		/* Append data portion */
+		fsize = flen = min(max_dlen, dlen);
+
+		skb_get(skb);
+		skb_shinfo(new)->destructor_arg = skb;
+		new->destructor = mv_eth_nfp_ext_skb_destructor;
+
+		while (fsize) {
+			l = PAGE_SIZE - ((unsigned long)(skb->data + doff) & ~PAGE_MASK);
+			if (l > fsize)
+				l = fsize;
+
+			page = virt_to_page(skb->data + doff);
+			get_page(page);
+			skb_add_rx_frag(new, skb_shinfo(new)->nr_frags, page,
+					(unsigned long)(skb->data + doff) &
+								~PAGE_MASK, l);
+			dlen -= l;
+			doff += l;
+			fsize -= l;
+		}
+
+		/* Fixup IP header */
+		nip = ip_hdr(new);
+		nip->tot_len = htons((4 * ip->ihl) + flen);
+		nip->frag_off = htons(offset |
+				(dlen ? IP_MF : (IP_MF & ntohs(ip->frag_off))));
+
+		/* if it was PPPoE, update the PPPoE payload fields
+		adapted from  mv_eth_frag_build_hdr_desc */
+		if ((*((char *)nip - MV_PPPOE_HDR_SIZE - 1) == 0x64) &&
+			(*((char *)nip - MV_PPPOE_HDR_SIZE - 2) == 0x88)) {
+			PPPoE_HEADER *pPPPNew = (PPPoE_HEADER *)((char *)nip - MV_PPPOE_HDR_SIZE);
+			pPPPNew->len = htons(flen + 4*ip->ihl + MV_PPP_HDR_SIZE);
+	    }
+
+		offset += flen / 8;
+
+		/* Recalculate IP checksum */
+		new->ip_summed = CHECKSUM_NONE;
+		nip->check = 0;
+		nip->check = ip_fast_csum(nip, nip->ihl);
+
+		/* TX packet */
+		error = dev->netdev_ops->ndo_start_xmit(new, dev);
+		if (error)
+			break;
+	} while (dlen);
+
+	if (!new)
+		return -ENOMEM;
+
+	if (error) {
+		consume_skb(new);
+		return error;
+	}
+
+	/* We are no longer use original skb */
+	consume_skb(skb);
+	return 0;
+}
+
+static int mv_eth_nfp_ext_tx(struct eth_port *pp, struct eth_pbuf *pkt, MV_NFP_RESULT *res)
+{
+	struct sk_buff *skb;
+	struct net_device *dev = (struct net_device *)res->dev;
+
+	/* prepare SKB for transmit */
+	skb = (struct sk_buff *)(pkt->osInfo);
+
+	skb->data += res->shift;
+	skb->tail = skb->data + pkt->bytes ;
+	skb->len = pkt->bytes;
+
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+
+	if (res->flags & MV_NFP_RES_IP_INFO_VALID) {
+
+		if (res->ipInfo.family == MV_INET) {
+			struct iphdr *iph = (struct iphdr *)res->ipInfo.ip_hdr.ip4;
+
+			if (mv_eth_nfp_need_fragment(res))
+				return mv_eth_nfp_ext_tx_fragment(dev, skb, res);
+
+			/* Recalculate IP checksum for IPv4 if necessary */
+			skb->ip_summed = CHECKSUM_NONE;
+			iph->check = 0;
+			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+		}
+		skb_set_network_header(skb, res->ipInfo.ipOffset - res->shift);
+	}
+
+	if (pp) {
+		/* ingress port is GBE */
+#ifdef ETH_SKB_DEBUG
+		mv_eth_skb_check(skb);
+#endif /* ETH_SKB_DEBUG */
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+		if (mv_eth_is_recycle()) {
+			skb->skb_recycle = mv_eth_skb_recycle;
+			skb->hw_cookie = (__u32)pkt;
+		}
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+	}
+	return dev->netdev_ops->ndo_start_xmit(skb, dev);
+}
+
+
+static MV_STATUS mv_eth_nfp_ext_rxd_from_info(MV_EXT_PKT_INFO *pktInfo, struct neta_rx_desc *rxd)
+{
+	if (pktInfo->flags & MV_EXT_VLAN_EXIST_MASK)
+		NETA_RX_SET_VLAN(rxd);
+
+	if (pktInfo->flags & MV_EXT_PPP_EXIST_MASK)
+		NETA_RX_SET_PPPOE(rxd);
+
+	if (pktInfo->l3_type == ETH_P_IP)
+		NETA_RX_L3_SET_IP4(rxd);
+	else if (pktInfo->l3_type == ETH_P_IPV6)
+		NETA_RX_L3_SET_IP6(rxd);
+	else {
+		NETA_RX_L3_SET_UN(rxd);
+		return MV_OK;
+	}
+
+	if (pktInfo->flags & MV_EXT_IP_FRAG_MASK)
+		NETA_RX_IP_SET_FRAG(rxd);
+
+
+	if (!pktInfo->l3_offset || !pktInfo->l3_hdrlen)
+		return -1;
+
+	NETA_RX_SET_IPHDR_OFFSET(rxd, pktInfo->l3_offset + MV_ETH_MH_SIZE);
+	NETA_RX_SET_IPHDR_HDRLEN(rxd, (pktInfo->l3_hdrlen >> 2));
+
+	if ((pktInfo->flags & MV_EXT_L3_VALID_MASK) == 0) {
+		NETA_RX_L3_SET_IP4_ERR(rxd);
+		return MV_OK;
+	}
+
+	switch (pktInfo->l4_proto) {
+	case IPPROTO_TCP:
+		NETA_RX_L4_SET_TCP(rxd);
+		break;
+
+	case IPPROTO_UDP:
+		NETA_RX_L4_SET_UDP(rxd);
+		break;
+
+	default:
+		NETA_RX_L4_SET_OTHER(rxd);
+		break;
+	}
+
+	if (pktInfo->flags & MV_EXT_L4_VALID_MASK)
+		NETA_RX_L4_CSUM_SET_OK(rxd);
+
+	return MV_OK;
+}
+
+
+static MV_STATUS mv_eth_nfp_ext_rxd_from_ipv4(int ofs, struct iphdr *iph, struct sk_buff *skb, struct neta_rx_desc *rxd)
+{
+	int l4_proto = 0;
+	int hdrlen;
+	int tmp;
+
+	NETA_RX_L3_SET_IP4(rxd);
+	hdrlen = iph->ihl << 2;
+	NETA_RX_SET_IPHDR_HDRLEN(rxd, iph->ihl);
+
+	if (ip_fast_csum((unsigned char *)iph, iph->ihl)) {
+		NETA_RX_L3_SET_IP4_ERR(rxd);
+		return MV_OK;
+	}
+
+	switch ((l4_proto = iph->protocol)) {
+	case IPPROTO_TCP:
+		NETA_RX_L4_SET_TCP(rxd);
+		break;
+	case IPPROTO_UDP:
+		NETA_RX_L4_SET_UDP(rxd);
+		break;
+	default:
+		NETA_RX_L4_SET_OTHER(rxd);
+		l4_proto = 0;
+		break;
+	}
+
+	tmp = ntohs(iph->frag_off);
+	if ((tmp & IP_MF) != 0 || (tmp & IP_OFFSET) != 0) {
+		NETA_RX_IP_SET_FRAG(rxd);
+		return MV_OK; /* cannot checksum fragmented */
+	}
+
+	if (!l4_proto)
+		return MV_OK; /* can't proceed without l4_proto in {UDP, TCP} */
+
+	if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+		NETA_RX_L4_CSUM_SET_OK(rxd);
+		return MV_OK;
+	}
+
+	if (l4_proto == IPPROTO_UDP) {
+		struct udphdr *uh = (struct udphdr *)((char *)iph + hdrlen);
+
+		if (uh->check == 0)
+			NETA_RX_L4_CSUM_SET_OK(rxd);
+	}
+
+	/* Complete checksum with pseudo header */
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - hdrlen - ofs,
+			       l4_proto, skb->csum)) {
+			NETA_RX_L4_CSUM_SET_OK(rxd);
+			return MV_OK;
+		}
+	}
+
+	return MV_OK;
+}
+
+static MV_STATUS mv_eth_nfp_ext_rxd_from_ipv6(int ofs, struct sk_buff *skb, struct neta_rx_desc *rxd)
+{
+	struct ipv6hdr *ip6h;
+	int l4_proto = 0;
+	int hdrlen;
+	__u8 nexthdr;
+
+	NETA_RX_L3_SET_IP6(rxd);
+
+	hdrlen = sizeof(struct ipv6hdr);
+	NETA_RX_SET_IPHDR_HDRLEN(rxd, (hdrlen >> 2));
+
+	ip6h = (struct ipv6hdr *)(skb->data + ofs);
+
+	nexthdr = ip6h->nexthdr;
+
+	/* No support for extension headers. Only TCP or UDP */
+	if (nexthdr == NEXTHDR_TCP) {
+		l4_proto = IPPROTO_TCP;
+		NETA_RX_L4_SET_TCP(rxd);
+	} else if (nexthdr == NEXTHDR_UDP) {
+		l4_proto = IPPROTO_UDP;
+		NETA_RX_L4_SET_UDP(rxd);
+	} else {
+		NETA_RX_L4_SET_OTHER(rxd);
+		return MV_OK;
+	}
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len,
+				      l4_proto , skb->csum)) {
+			NETA_RX_L4_CSUM_SET_OK(rxd);
+			return MV_OK;
+		}
+	}
+	return MV_OK;
+}
+
+
+static MV_STATUS mv_eth_nfp_ext_rxd_build(struct sk_buff *skb, MV_EXT_PKT_INFO *pktInfo, struct neta_rx_desc *rxd)
+{
+	struct iphdr *iph;
+	int l3_proto = 0;
+	int ofs = 0;
+	MV_U16 tmp;
+
+	rxd->status = 0;
+	rxd->pncInfo = 0;
+
+	if (pktInfo)
+		return mv_eth_nfp_ext_rxd_from_info(pktInfo, rxd);
+
+	tmp = ntohs(skb->protocol);
+
+ ll:
+	switch (tmp) {
+	case ETH_P_IP:
+	case ETH_P_IPV6:
+		l3_proto = tmp;
+		break;
+
+	case ETH_P_PPP_SES:
+		NETA_RX_SET_PPPOE(rxd);
+		ofs += MV_PPPOE_HDR_SIZE;
+		switch (tmp = ntohs(*((MV_U16 *)&skb->data[ofs - 2]))) {
+		case 0x0021:
+			l3_proto = ETH_P_IP;
+			break;
+		case 0x0057:
+			l3_proto = ETH_P_IPV6;
+			break;
+		default:
+			goto non_ip;
+		}
+		break;
+
+	case ETH_P_8021Q:
+		/* Don't support double VLAN for now */
+		if (NETA_RX_IS_VLAN(rxd))
+			goto non_ip;
+
+		NETA_RX_SET_VLAN(rxd);
+		ofs = MV_VLAN_HLEN;
+
+		tmp = ntohs(*((MV_U16 *)&skb->data[2]));
+			goto ll;
+
+	default:
+	  goto non_ip;
+	}
+
+#ifndef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		rxd->status |= ETH_RX_NOT_LLC_SNAP_FORMAT_MASK;
+#endif /* CONFIG_MV_ETH_PNC */
+
+	NETA_RX_SET_IPHDR_OFFSET(rxd, ETH_HLEN + MV_ETH_MH_SIZE + ofs);
+
+	iph = (struct iphdr *)(skb->data + ofs);
+
+	if (l3_proto == ETH_P_IP)
+		return mv_eth_nfp_ext_rxd_from_ipv4(ofs, iph, skb, rxd);
+
+	return mv_eth_nfp_ext_rxd_from_ipv6(ofs, skb, rxd);
+
+non_ip:
+	 NETA_RX_L3_SET_UN(rxd);
+	 return MV_OK;
+}
+
+void mv_eth_nfp_ext_pkt_info_print(MV_EXT_PKT_INFO *pktInfo)
+{
+	if (pktInfo == NULL)
+		return;
+
+	if (pktInfo->flags & MV_EXT_VLAN_EXIST_MASK)
+		printk(KERN_INFO "VLAN");
+
+	if (pktInfo->flags & MV_EXT_PPP_EXIST_MASK)
+		printk(KERN_INFO " PPPoE");
+
+	if (pktInfo->l3_type == ETH_P_IP)
+		printk(KERN_INFO " ipv4");
+	else if (pktInfo->l3_type == ETH_P_IPV6)
+		printk(KERN_INFO " ipv6");
+	else
+		printk(KERN_INFO " non-ip");
+
+	if (pktInfo->flags & MV_EXT_IP_FRAG_MASK)
+		printk(KERN_INFO " FRAG");
+
+	if (pktInfo->flags & MV_EXT_L3_VALID_MASK)
+		printk(KERN_INFO " L3CSUM_OK");
+
+	printk(" offset=%d, hdrlen=%d", pktInfo->l3_offset, pktInfo->l3_hdrlen);
+
+	if (pktInfo->l4_proto == IPPROTO_TCP)
+		printk(KERN_INFO " TCP");
+	else if (pktInfo->l4_proto == IPPROTO_UDP)
+		printk(KERN_INFO " UDP");
+
+	if (pktInfo->flags & MV_EXT_L4_VALID_MASK)
+		printk(KERN_INFO " L4CSUM_OK");
+
+	printk(KERN_INFO "\n");
+}
+
+
+/* Return values:   0 - packet successfully processed by NFP (transmitted or dropped) */
+/*                  1 - packet can't be processed by NFP  */
+/*                  2 - skb is not valid for NFP (not enough headroom or nonlinear) */
+/*                  3 - not enough info in pktInfo   */
+int mv_eth_nfp_ext(struct net_device *dev, struct sk_buff *skb, MV_EXT_PKT_INFO *pktInfo)
+{
+	MV_STATUS           status;
+	MV_NFP_RESULT       res;
+	struct neta_rx_desc rx_desc;
+	struct eth_pbuf     pkt;
+	int                 err = 1;
+	int                 i, port = -1;
+
+#define NEEDED_HEADROOM (MV_PPPOE_HDR_SIZE + MV_VLAN_HLEN)
+
+	/* Check that NFP is enabled for this external interface */
+	for (i = 0; i < NFP_EXT_NUM; i++) {
+		if ((mv_ctrl_nfp_ext_netdev[i] == dev) && (mv_ctrl_nfp_ext_en[i])) {
+			port = mv_ctrl_nfp_ext_port[i];
+			break;
+		}
+	}
+	if (port == -1) /* NFP is disabled */
+		return 1;
+
+	if (skb_is_nonlinear(skb)) {
+		printk(KERN_ERR "%s: skb=%p is nonlinear\n", __func__, skb);
+		return 2;
+	}
+
+	/* Prepare pkt structure */
+	pkt.offset = skb_headroom(skb) - (ETH_HLEN + MV_ETH_MH_SIZE);
+	if (pkt.offset < NEEDED_HEADROOM) {
+		/* we don't know at this stage if there will be added any of vlans or pppoe or both */
+		printk(KERN_ERR "%s: Possible problem: not enough headroom: %d < %d\n",
+				__func__, pkt.offset, NEEDED_HEADROOM);
+		return 2;
+	}
+
+	pkt.pBuf = skb->head;
+	pkt.bytes = skb->len + ETH_HLEN + MV_ETH_MH_SIZE;
+
+	/* Set invalid pool to prevent BM usage */
+	pkt.pool = MV_ETH_BM_POOLS;
+	pkt.physAddr = mvOsIoVirtToPhys(NULL, skb->head);
+	pkt.osInfo = (void *)skb;
+
+	/* prepare rx_desc structure */
+	status = mv_eth_nfp_ext_rxd_build(skb, pktInfo,  &rx_desc);
+	if (status != MV_OK)
+		return 3;
+
+/*	read_lock(&nfp_lock);*/
+	status = nfp_core_p->nfp_rx(port, &rx_desc, &pkt, &res);
+
+/*	read_unlock(&nfp_lock);*/
+
+	if (status == MV_OK) {
+		if  (res.flags & MV_NFP_RES_NETDEV_EXT) {
+			/* EXT RX -> EXT TX */
+			mv_eth_nfp_ext_tx(NULL, &pkt, &res);
+		} else {
+			/* EXT RX -> INT TX */
+			mvOsCacheFlush(NULL, pkt.pBuf + pkt.offset, pkt.bytes);
+			status = mv_eth_nfp_tx(&pkt, &res);
+			if (status != MV_OK)
+				dev_kfree_skb_any(skb);
+		}
+		err = 0;
+	} else if (status == MV_DROPPED) {
+		dev_kfree_skb_any(skb);
+		err = 0;
+	}
+	return err;
+}
+#endif /* CONFIG_MV_ETH_NFP_EXT */
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_qos_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_qos_sysfs.c
new file mode 100644
index 000000000000..712f697aa504
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_qos_sysfs.c
@@ -0,0 +1,299 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gbe/mvNeta.h"
+#include "mv_eth_sysfs.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p, txq, rxq, cpu                      - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "v, tos                                - are hex numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "echo p             > tos           - show RX and TX TOS map for port <p>\n");
+	o += scnprintf(b+o, s-o, "echo p             > vprio         - show VLAN priority map for port <p>\n");
+	o += scnprintf(b+o, s-o, "echo p rxq tos     > rxq_tos       - set <rxq> for incoming IP packets with <tos>\n");
+	o += scnprintf(b+o, s-o, "echo p rxq t       > rxq_type      - set RXQ for different packet types. t=0-bpdu, 1-arp, 2-tcp, 3-udp\n");
+	o += scnprintf(b+o, s-o, "echo p rxq prio    > rxq_vlan      - set <rxq> for incoming VLAN packets with <prio>\n");
+	o += scnprintf(b+o, s-o, "echo p txq cpu tos > txq_tos       - set <txq> for outgoing IP packets with <tos> handeled by <cpu>\n");
+
+	return o;
+}
+
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+int pnc_run_rxq_type(int port, int q, int t)
+{
+	void *port_hndl = mvNetaPortHndlGet(port);
+
+	if (port_hndl == NULL)
+		return 1;
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (!mv_eth_pnc_ctrl_en) {
+			pr_err("%s: PNC control is not supported\n", __func__);
+			return 1;
+		}
+
+		switch (t) {
+		case 1:
+			pnc_etype_arp(q);
+			break;
+		case 2:
+			pnc_ip4_tcp(q);
+			break;
+		case 3:
+			pnc_ip4_udp(q);
+			break;
+		default:
+			pr_err("unsupported packet type: value=%d\n", t);
+			return 1;
+		}
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+	return 0;
+}
+
+int neta_run_rxq_type(int port, int q, int t)
+{
+	void *port_hndl = mvNetaPortHndlGet(port);
+
+	if (port_hndl == NULL)
+		return 1;
+	switch (t) {
+	case 0:
+		mvNetaBpduRxq(port, q);
+		break;
+	case 1:
+		mvNetaArpRxq(port, q);
+		break;
+	case 2:
+		mvNetaTcpRxq(port, q);
+		break;
+	case 3:
+		mvNetaUdpRxq(port, q);
+		break;
+	default:
+		pr_err("unknown packet type: value=%d\n", t);
+		return 1;
+	}
+
+	return 0;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = 0;
+	sscanf(buf, "%d", &p);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "tos")) {
+		mv_eth_tos_map_show(p);
+	} else if (!strcmp(name, "vprio")) {
+		mv_eth_vlan_prio_show(p);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	int		err;
+	unsigned int	p, i, v;
+	unsigned long	flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "rxq_type")) {
+		if (MV_NETA_PNC_CAP())
+			err = pnc_run_rxq_type(p, i, v);
+		else
+			err = neta_run_rxq_type(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %x", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "rxq_tos")) {
+		err = mv_eth_rxq_tos_map_set(p, i, v);
+	} else if (!strcmp(name, "rxq_vlan")) {
+		err = mv_eth_rxq_vlan_prio_set(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_4_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, cpu, txq, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = cpu = txq = v = 0;
+	sscanf(buf, "%d %d %d %x", &p, &txq, &cpu, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txq_tos")) {
+		err = mv_eth_txq_tos_map_set(p, txq, cpu, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(tos,         S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(vprio,       S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(rxq_tos,       S_IWUSR, NULL, mv_eth_3_hex_store);
+static DEVICE_ATTR(rxq_type,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rxq_vlan,      S_IWUSR, NULL, mv_eth_3_hex_store);
+static DEVICE_ATTR(txq_tos,     S_IWUSR, mv_eth_show, mv_eth_4_hex_store);
+
+
+static struct attribute *mv_eth_qos_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_tos.attr,
+	&dev_attr_vprio.attr,
+	&dev_attr_rxq_tos.attr,
+	&dev_attr_rxq_type.attr,
+	&dev_attr_rxq_vlan.attr,
+	&dev_attr_txq_tos.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_qos_group = {
+	.name = "qos",
+	.attrs = mv_eth_qos_attrs,
+};
+
+int mv_neta_qos_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_qos_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_qos_group.name, err);
+
+	return err;
+}
+
+int mv_neta_qos_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_qos_group);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rss_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rss_sysfs.c
new file mode 100644
index 000000000000..d9bd7b00e720
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rss_sysfs.c
@@ -0,0 +1,164 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p gr                               - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "mask                               - are hex numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "echo p             > napi          - show port NAPI groups: CPUs and RXQs\n");
+	o += scnprintf(b+o, s-o, "echo p gr mask     > cpu_group     - set <cpus mask>  for <port/napi group>.\n");
+	o += scnprintf(b+o, s-o, "echo p gr mask     > rxq_group     - set  <rxqs mask> for <port/napi group>.\n");
+
+	return o;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = 0;
+	sscanf(buf, "%d", &p);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "napi")) {
+		mv_eth_napi_group_show(p);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %x", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "cpu_group")) {
+		err = mv_eth_napi_set_cpu_affinity(p, i, v);
+	} else if (!strcmp(name, "rxq_group")) {
+		err = mv_eth_napi_set_rxq_affinity(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(napi,        S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(cpu_group,   S_IWUSR, mv_eth_show, mv_eth_3_hex_store);
+static DEVICE_ATTR(rxq_group,   S_IWUSR, mv_eth_show, mv_eth_3_hex_store);
+
+
+static struct attribute *mv_eth_rss_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_napi.attr,
+	&dev_attr_cpu_group.attr,
+	&dev_attr_rxq_group.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_rss_group = {
+	.name = "rss",
+	.attrs = mv_eth_rss_attrs,
+};
+
+int mv_neta_rss_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_rss_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_rss_group.name, err);
+
+	return err;
+}
+
+int mv_neta_rss_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_rss_group);
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rx_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rx_sysfs.c
new file mode 100644
index 000000000000..f1966d2364e5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_rx_sysfs.c
@@ -0,0 +1,156 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p, rxq, d                             - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "echo p rxq         > rxq_regs      - show RXQ registers for <p/rxq>\n");
+	o += scnprintf(b+o, s-o, "echo p rxq d       > rxq           - show RXQ descriptors ring for <p/rxq>. d=0-brief, d=1-full\n");
+	o += scnprintf(b+o, s-o, "echo p rxq d       > rxq_size      - set number of descriptors <d> for <port/rxq>.\n");
+	o += scnprintf(b+o, s-o, "echo p d           > rx_weight     - set weight for the poll function; <d> - new weight, max val: 255\n");
+	o += scnprintf(b+o, s-o, "echo p             > rx_reset      - reset RX part of the port <p>\n");
+	o += scnprintf(b+o, s-o, "echo p rxq d       > rxq_pkts_coal - set RXQ interrupt coalesing. <d> - number of received packets\n");
+	o += scnprintf(b+o, s-o, "echo p rxq d       > rxq_time_coal - set RXQ interrupt coalesing. <d> - time in microseconds\n");
+
+	return o;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_eth_3_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	int		err;
+	unsigned int	p, i, v;
+	unsigned long	flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "rxq_size"))
+		err = mv_eth_ctrl_rxq_size_set(p, i, v);
+	else if (!strcmp(name, "rxq_pkts_coal"))
+		err = mv_eth_rx_pkts_coal_set(p, i, v);
+	else if (!strcmp(name, "rxq_time_coal"))
+		err = mv_eth_rx_time_coal_set(p, i, v);
+	else if (!strcmp(name, "rxq"))
+		mvNetaRxqShow(p, i, v);
+	else if (!strcmp(name, "rxq_regs"))
+		mvNetaRxqRegs(p, i);
+	else if (!strcmp(name, "rx_reset"))
+		err = mv_eth_rx_reset(p);
+	else if (!strcmp(name, "rx_weight"))
+		err = mv_eth_ctrl_set_poll_rx_weight(p, i);
+	else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,          S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(rxq_size,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rxq_pkts_coal, S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rxq_time_coal, S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rxq,           S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rxq_regs,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rx_reset,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(rx_weight,     S_IWUSR, NULL, mv_eth_3_store);
+
+
+static struct attribute *mv_eth_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_rxq_size.attr,
+	&dev_attr_rxq_pkts_coal.attr,
+	&dev_attr_rxq_time_coal.attr,
+	&dev_attr_rxq.attr,
+	&dev_attr_rxq_regs.attr,
+	&dev_attr_rx_reset.attr,
+	&dev_attr_rx_weight.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_rx_group = {
+	.name = "rx",
+	.attrs = mv_eth_attrs,
+};
+
+int mv_neta_rx_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_rx_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_rx_group.name, err);
+
+	return err;
+}
+
+int mv_neta_rx_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_rx_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.c
new file mode 100644
index 000000000000..b96d7d42ff0c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.c
@@ -0,0 +1,357 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gbe/mvNeta.h"
+#include "mv_netdev.h"
+#include "mv_eth_sysfs.h"
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p, txp, d, l, s                    - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "v                                  - are hex numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "cat                ports           - show all ports info\n");
+	o += scnprintf(b+o, s-o, "cd                 rx              - move to RX sysfs directory\n");
+	o += scnprintf(b+o, s-o, "cd                 tx              - move to TX sysfs directory\n");
+	o += scnprintf(b+o, s-o, "cd                 tx_sched        - move to TX Scheduler sysfs directory\n");
+	o += scnprintf(b+o, s-o, "cd                 qos             - move to QoS sysfs directory\n");
+	o += scnprintf(b+o, s-o, "cd                 rss             - move to RSS sysfs directory\n");
+	o += scnprintf(b+o, s-o, "echo p d           > stack         - show pools stack for port <p>. d=0-brief, d=1-full\n");
+	o += scnprintf(b+o, s-o, "echo p             > port          - show a port info\n");
+	o += scnprintf(b+o, s-o, "echo [if_name]     > netdev        - show <if_name> net_device status\n");
+	o += scnprintf(b+o, s-o, "echo p             > stats         - show a port statistics\n");
+	o += scnprintf(b+o, s-o, "echo p txp         > cntrs         - show a port counters\n");
+	o += scnprintf(b+o, s-o, "echo p             > mac           - show MAC info for port <p>\n");
+	o += scnprintf(b+o, s-o, "echo p             > p_regs        - show port registers for <p>\n");
+#ifdef MV_ETH_GMAC_NEW
+	o += scnprintf(b+o, s-o, "echo p             > gmac_regs     - show gmac registers for <p>\n");
+#endif /* MV_ETH_GMAC_NEW */
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		o += scnprintf(b+o, s-o, "echo {0|1}         > pnc           - enable / disable PNC access\n");
+#endif /* CONFIG_MV_ETH_PNC */
+	o += scnprintf(b+o, s-o, "echo {0|1}         > skb           - enable / disable SKB recycle\n");
+	o += scnprintf(b+o, s-o, "echo p v           > debug         - bit0:rx, bit1:tx, bit2:isr, bit3:poll, bit4:dump\n");
+	o += scnprintf(b+o, s-o, "echo p l s         > buf_num       - set number of long <l> and short <s> buffers allocated for port <p>\n");
+	o += scnprintf(b+o, s-o, "echo p wol         > pm_mode       - set port <p> pm mode. 1 wol, 0 suspend.\n");
+
+	return o;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	unsigned int    p;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "ports")) {
+		mv_eth_status_print();
+
+		for (p = 0; p <= CONFIG_MV_ETH_PORTS_NUM; p++)
+			mv_eth_port_status_print(p);
+	} else {
+		off = mv_eth_help(buf);
+	}
+
+	return off;
+}
+
+static ssize_t mv_eth_netdev_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char        *name = attr->attr.name;
+	int               err = 0;
+	char              dev_name[IFNAMSIZ];
+	struct net_device *netdev;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%s", dev_name);
+	netdev = dev_get_by_name(&init_net, dev_name);
+	if (netdev == NULL) {
+		pr_err("%s: network interface <%s> doesn't exist\n",
+			__func__, dev_name);
+		err = 1;
+	} else {
+		if (!strcmp(name, "netdev"))
+			mv_eth_netdev_print(netdev);
+		else {
+			err = 1;
+			pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		}
+		dev_put(netdev);
+	}
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "debug")) {
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_DBG_RX,   v & 0x1);
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_DBG_TX,   v & 0x2);
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_DBG_ISR,  v & 0x4);
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_DBG_POLL, v & 0x8);
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_DBG_DUMP, v & 0x10);
+	} else if (!strcmp(name, "skb")) {
+		mv_eth_ctrl_recycle(p);
+	} else if (!strcmp(name, "port")) {
+		mv_eth_status_print();
+		mvNetaPortStatus(p);
+		mv_eth_port_status_print(p);
+	} else if (!strcmp(name, "stack")) {
+		mv_eth_stack_print(p, v);
+	} else if (!strcmp(name, "stats")) {
+		mv_eth_port_stats_print(p);
+	} else if (!strcmp(name, "mac")) {
+		mv_eth_mac_show(p);
+	} else if (!strcmp(name, "p_regs")) {
+		pr_info("\n[NetA Port: port=%d]\n", p);
+		mvEthRegs(p);
+		pr_info("\n");
+		mvEthPortRegs(p);
+		mvNetaPortRegs(p);
+#ifdef MV_ETH_GMAC_NEW
+	} else if (!strcmp(name, "gmac_regs")) {
+		mvNetaGmacRegs(p);
+#endif /* MV_ETH_GMAC_NEW */
+#ifdef CONFIG_MV_ETH_PNC
+	} else if (!strcmp(name, "pnc")) {
+		mv_eth_ctrl_pnc(p);
+#endif /* CONFIG_MV_ETH_PNC */
+	} else if (!strcmp(name, "pm_mode")) {
+		err = mv_eth_wol_mode_set(p, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "buf_num")) {
+		err = mv_eth_ctrl_port_buf_num_set(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_2_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, txp;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = txp = 0;
+	sscanf(buf, "%d %d", &p, &txp);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "cntrs")) {
+		mvEthPortCounters(p, txp);
+		mvEthPortRmonCounters(p, txp);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(buf_num,     S_IWUSR, mv_eth_show, mv_eth_3_store);
+static DEVICE_ATTR(debug,       S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(cntrs,       S_IWUSR, mv_eth_show, mv_eth_2_store);
+static DEVICE_ATTR(port,        S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(stack,        S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(mac,         S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(stats,       S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(skb,	        S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(ports,       S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(help,        S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(p_regs,      S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(gmac_regs,   S_IWUSR, mv_eth_show, mv_eth_port_store);
+#ifdef CONFIG_MV_ETH_PNC
+static DEVICE_ATTR(pnc,         S_IWUSR, NULL, mv_eth_port_store);
+#endif /* CONFIG_MV_ETH_PNC */
+static DEVICE_ATTR(pm_mode,	S_IWUSR, mv_eth_show, mv_eth_port_store);
+static DEVICE_ATTR(netdev,       S_IWUSR, NULL, mv_eth_netdev_store);
+
+static struct attribute *mv_eth_attrs[] = {
+
+	&dev_attr_buf_num.attr,
+	&dev_attr_debug.attr,
+	&dev_attr_port.attr,
+	&dev_attr_stack.attr,
+	&dev_attr_stats.attr,
+	&dev_attr_cntrs.attr,
+	&dev_attr_ports.attr,
+	&dev_attr_netdev.attr,
+	&dev_attr_mac.attr,
+	&dev_attr_skb.attr,
+	&dev_attr_p_regs.attr,
+	&dev_attr_gmac_regs.attr,
+	&dev_attr_help.attr,
+#ifdef CONFIG_MV_ETH_PNC
+    &dev_attr_pnc.attr,
+#endif /* CONFIG_MV_ETH_PNC */
+	&dev_attr_pm_mode.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_group = {
+	.attrs = mv_eth_attrs,
+};
+
+static struct kobject *gbe_kobj;
+
+int mv_neta_gbe_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	gbe_kobj = kobject_create_and_add("gbe", neta_kobj);
+	if (!gbe_kobj) {
+		pr_err("%s: cannot create gbe kobject\n", __func__);
+		return -ENOMEM;
+	}
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_group);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	err = mv_neta_rx_sysfs_init(gbe_kobj);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	err = mv_neta_tx_sysfs_init(gbe_kobj);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	err = mv_neta_tx_sched_sysfs_init(gbe_kobj);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	err = mv_neta_qos_sysfs_init(gbe_kobj);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	err = mv_neta_rss_sysfs_init(gbe_kobj);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		return err;
+	}
+
+	return err;
+}
+
+int mv_neta_gbe_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &mv_eth_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.h b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.h
new file mode 100644
index 000000000000..e410258fe2ba
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_sysfs.h
@@ -0,0 +1,72 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_eth_sysfs_h__
+#define __mv_eth_sysfs_h__
+
+
+/* Subdirectories of neta menu */
+
+int mv_neta_pme_sysfs_init(struct kobject *pp2_kobj);
+int mv_neta_pme_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_neta_gbe_sysfs_init(struct kobject *pp2_kobj);
+int mv_neta_gbe_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_neta_bm_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_bm_sysfs_exit(struct kobject *gbe_kobj);
+int mv_neta_hwf_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_hwf_sysfs_exit(struct kobject *gbe_kobj);
+int mv_neta_pnc_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_pnc_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_wol_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_wol_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_pon_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_pon_sysfs_exit(struct kobject *gbe_kobj);
+#ifdef CONFIG_MV_ETH_L2FW
+int mv_neta_l2fw_sysfs_init(struct kobject *neta_kobj);
+int mv_neta_l2fw_sysfs_exit(struct kobject *neta_kobj);
+#endif
+
+int mv_neta_rx_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_rx_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_tx_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_tx_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_tx_sched_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_tx_sched_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_qos_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_qos_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_neta_rss_sysfs_init(struct kobject *gbe_kobj);
+int mv_neta_rss_sysfs_exit(struct kobject *gbe_kobj);
+
+#endif /* __mv_eth_sysfs_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.c
new file mode 100644
index 000000000000..f9fa0af70a98
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.c
@@ -0,0 +1,908 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/mii.h>
+
+#include "mvOs.h"
+#include "mvDebug.h"
+#include "mvEthPhy.h"
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBm.h"
+
+#include "mv_switch.h"
+#include "mv_netdev.h"
+
+#include "mvOs.h"
+
+#ifdef CONFIG_MV_ETH_PNC
+#include "pnc/mvPnc.h"
+#endif /* CONFIG_MV_ETH_PNC */
+
+
+#define MV_ETH_TOOL_AN_TIMEOUT	5000
+
+static int isSwitch(struct eth_port *priv)
+{
+	return priv->tagged;
+}
+
+
+/******************************************************************************
+* mv_eth_tool_restore_settings
+* Description:
+*	restore saved speed/dublex/an settings
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_eth_tool_restore_settings(struct net_device *netdev)
+{
+	struct eth_port 	*priv = MV_ETH_PRIV(netdev);
+	int			phy_speed, phy_duplex;
+	MV_U32			phy_addr = priv->plat_data->phy_addr;
+	MV_ETH_PORT_SPEED	mac_speed;
+	MV_ETH_PORT_DUPLEX	mac_duplex;
+	int			err = -EINVAL;
+
+	 if ((priv == NULL) || (isSwitch(priv)))
+		 return -EOPNOTSUPP;
+
+	switch (priv->speed_cfg) {
+	case SPEED_10:
+		phy_speed  = 0;
+		mac_speed = MV_ETH_SPEED_10;
+		break;
+	case SPEED_100:
+		phy_speed  = 1;
+		mac_speed = MV_ETH_SPEED_100;
+		break;
+	case SPEED_1000:
+		phy_speed  = 2;
+		mac_speed = MV_ETH_SPEED_1000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (priv->duplex_cfg) {
+	case DUPLEX_HALF:
+		phy_duplex = 0;
+		mac_duplex = MV_ETH_DUPLEX_HALF;
+		break;
+	case DUPLEX_FULL:
+		phy_duplex = 1;
+		mac_duplex = MV_ETH_DUPLEX_FULL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (priv->autoneg_cfg == AUTONEG_ENABLE) {
+		err = mvNetaSpeedDuplexSet(priv->port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN);
+		if (!err)
+			err = mvEthPhyAdvertiseSet(phy_addr, priv->advertise_cfg);
+		/* Restart AN on PHY enables it */
+		if (!err) {
+			err = mvNetaFlowCtrlSet(priv->port, MV_ETH_FC_AN_SYM);
+			if (!err) {
+				err = mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT);
+				if (err == MV_TIMEOUT) {
+					MV_ETH_PORT_STATUS ps;
+
+					mvNetaLinkStatus(priv->port, &ps);
+
+					if (!ps.linkup)
+						err = 0;
+				}
+			}
+		}
+	} else if (priv->autoneg_cfg == AUTONEG_DISABLE) {
+		err = mvEthPhyDisableAN(phy_addr, phy_speed, phy_duplex);
+		if (!err)
+			err = mvNetaFlowCtrlSet(priv->port, MV_ETH_FC_ENABLE);
+		if (!err)
+			err = mvNetaSpeedDuplexSet(priv->port, mac_speed, mac_duplex);
+	} else
+		err = -EINVAL;
+
+	return err;
+}
+
+
+
+
+/******************************************************************************
+* mv_eth_tool_get_settings
+* Description:
+*	ethtool get standard port settings
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	cmd		command (settings)
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_eth_tool_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+	struct eth_port 	*priv = MV_ETH_PRIV(netdev);
+	u16			lp_ad, stat1000;
+	MV_U32			phy_addr;
+	MV_ETH_PORT_SPEED 	speed;
+	MV_ETH_PORT_DUPLEX 	duplex;
+	MV_ETH_PORT_STATUS      status;
+
+	if ((priv == NULL) || (isSwitch(priv)) || (MV_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
+			| SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII
+			| SUPPORTED_1000baseT_Full);
+
+	phy_addr = priv->plat_data->phy_addr;
+
+	mvNetaLinkStatus(priv->port, &status);
+
+	if (status.linkup != MV_TRUE) {
+		/* set to Unknown */
+		cmd->speed  = -1;
+		cmd->duplex = -1;
+	} else {
+		switch (status.speed) {
+		case MV_ETH_SPEED_1000:
+			cmd->speed = SPEED_1000;
+			break;
+		case MV_ETH_SPEED_100:
+			cmd->speed = SPEED_100;
+			break;
+		case MV_ETH_SPEED_10:
+			cmd->speed = SPEED_10;
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (status.duplex == MV_ETH_DUPLEX_FULL)
+			cmd->duplex = 1;
+		else
+			cmd->duplex = 0;
+	}
+
+	cmd->port = PORT_MII;
+	cmd->phy_address = phy_addr;
+	cmd->transceiver = XCVR_INTERNAL;
+	/* check if speed and duplex are AN */
+	mvNetaSpeedDuplexGet(priv->port, &speed, &duplex);
+	if (speed == MV_ETH_SPEED_AN && duplex == MV_ETH_DUPLEX_AN) {
+		cmd->lp_advertising = cmd->advertising = 0;
+		cmd->autoneg = AUTONEG_ENABLE;
+		mvEthPhyAdvertiseGet(phy_addr, (MV_U16 *)&(cmd->advertising));
+
+		mvEthPhyRegRead(phy_addr, MII_LPA, &lp_ad);
+		if (lp_ad & LPA_LPACK)
+			cmd->lp_advertising |= ADVERTISED_Autoneg;
+		if (lp_ad & ADVERTISE_10HALF)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+		if (lp_ad & ADVERTISE_10FULL)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+		if (lp_ad & ADVERTISE_100HALF)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+		if (lp_ad & ADVERTISE_100FULL)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+
+		mvEthPhyRegRead(phy_addr, MII_STAT1000, &stat1000);
+		if (stat1000 & LPA_1000HALF)
+			cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+		if (stat1000 & LPA_1000FULL)
+			cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+	} else
+		cmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+
+/******************************************************************************
+* mv_eth_tool_set_settings
+* Description:
+*	ethtool set standard port settings
+* INPUT:
+*	netdev		Network device structure pointer
+*	cmd		command (settings)
+* OUTPUT
+*	None
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_eth_tool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	int _speed, _duplex, _autoneg, _advertise, err;
+
+	if ((priv == NULL) || (isSwitch(priv)) || (MV_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, dev->name);
+		return -EOPNOTSUPP;
+	}
+
+	_duplex  = priv->duplex_cfg;
+	_speed   = priv->speed_cfg;
+	_autoneg = priv->autoneg_cfg;
+	_advertise = priv->advertise_cfg;
+
+	priv->duplex_cfg = cmd->duplex;
+	priv->speed_cfg = cmd->speed;
+	priv->autoneg_cfg = cmd->autoneg;
+	priv->advertise_cfg = cmd->advertising;
+	err = mv_eth_tool_restore_settings(dev);
+
+	if (err) {
+		priv->duplex_cfg = _duplex;
+		priv->speed_cfg = _speed;
+		priv->autoneg_cfg = _autoneg;
+		priv->advertise_cfg = _advertise;
+	}
+	return err;
+}
+
+/******************************************************************************
+* mv_eth_tool_get_regs_len
+* Description:
+*	ethtool get registers array length
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	registers array length
+*
+*******************************************************************************/
+int mv_eth_tool_get_regs_len(struct net_device *netdev)
+{
+#define MV_ETH_TOOL_REGS_LEN 32
+
+	return (MV_ETH_TOOL_REGS_LEN * sizeof(uint32_t));
+}
+
+
+/******************************************************************************
+* mv_eth_tool_get_drvinfo
+* Description:
+*	ethtool get driver information
+* INPUT:
+*	netdev		Network device structure pointer
+*	info		driver information
+* OUTPUT
+*	info		driver information
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, "mv_eth");
+	/*strcpy(info->version, LSP_VERSION);*/
+	strcpy(info->fw_version, "N/A");
+	strcpy(info->bus_info, "Mbus");
+/*   TBD
+	info->n_stats = MV_ETH_TOOL_STATS_LEN;
+*/
+	info->testinfo_len = 0;
+	info->regdump_len = mv_eth_tool_get_regs_len(netdev);
+	info->eedump_len = 0;
+}
+
+
+/******************************************************************************
+* mv_eth_tool_get_regs
+* Description:
+*	ethtool get registers array
+* INPUT:
+*	netdev		Network device structure pointer
+*	regs		registers information
+* OUTPUT
+*	p		registers array
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_regs(struct net_device *netdev,
+			  struct ethtool_regs *regs, void *p)
+{
+	struct eth_port	*priv = MV_ETH_PRIV(netdev);
+	uint32_t 	*regs_buff = p;
+
+	if ((priv == NULL) || MV_PON_PORT(priv->port)) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return;
+	}
+
+	memset(p, 0, MV_ETH_TOOL_REGS_LEN * sizeof(uint32_t));
+
+	regs->version = priv->plat_data->ctrl_rev;
+
+	/* ETH port registers */
+	regs_buff[0]  = MV_REG_READ(ETH_PORT_STATUS_REG(priv->port));
+	regs_buff[1]  = MV_REG_READ(ETH_PORT_SERIAL_CTRL_REG(priv->port));
+	regs_buff[2]  = MV_REG_READ(ETH_PORT_CONFIG_REG(priv->port));
+	regs_buff[3]  = MV_REG_READ(ETH_PORT_CONFIG_EXTEND_REG(priv->port));
+	regs_buff[4]  = MV_REG_READ(ETH_SDMA_CONFIG_REG(priv->port));
+/*	regs_buff[5]  = MV_REG_READ(ETH_TX_FIFO_URGENT_THRESH_REG(priv->port)); */
+	regs_buff[6]  = MV_REG_READ(ETH_RX_QUEUE_COMMAND_REG(priv->port));
+	/* regs_buff[7]  = MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(priv->port)); */
+	regs_buff[8]  = MV_REG_READ(ETH_INTR_CAUSE_REG(priv->port));
+	regs_buff[9]  = MV_REG_READ(ETH_INTR_CAUSE_EXT_REG(priv->port));
+	regs_buff[10] = MV_REG_READ(ETH_INTR_MASK_REG(priv->port));
+	regs_buff[11] = MV_REG_READ(ETH_INTR_MASK_EXT_REG(priv->port));
+	/* ETH Unit registers */
+	regs_buff[16] = MV_REG_READ(ETH_PHY_ADDR_REG(priv->port));
+	regs_buff[17] = MV_REG_READ(ETH_UNIT_INTR_CAUSE_REG(priv->port));
+	regs_buff[18] = MV_REG_READ(ETH_UNIT_INTR_MASK_REG(priv->port));
+	regs_buff[19] = MV_REG_READ(ETH_UNIT_ERROR_ADDR_REG(priv->port));
+	regs_buff[20] = MV_REG_READ(ETH_UNIT_INT_ADDR_ERROR_REG(priv->port));
+
+}
+
+
+
+
+/******************************************************************************
+* mv_eth_tool_nway_reset
+* Description:
+*	ethtool restart auto negotiation
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_nway_reset(struct net_device *netdev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	MV_U32	        phy_addr;
+
+	if ((priv == NULL) || (isSwitch(priv)) || (MV_PON_PORT(priv->port))) {
+		printk(KERN_ERR "interface %s is not supported\n", netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	phy_addr = priv->plat_data->phy_addr;
+	if (mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT) != MV_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_eth_tool_get_link
+* Description:
+*	ethtool get link status
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 if link is down, 1 if link is up
+*
+*******************************************************************************/
+u32 mv_eth_tool_get_link(struct net_device *netdev)
+{
+	struct eth_port     *pp = MV_ETH_PRIV(netdev);
+
+	if (pp == NULL) {
+		printk(KERN_ERR "interface %s is not supported\n", netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(pp->port))
+		return mv_pon_link_status();
+#endif /* CONFIG_MV_PON */
+
+	return mvNetaLinkIsUp(pp->port);
+}
+/******************************************************************************
+* mv_eth_tool_get_coalesce
+* Description:
+*	ethtool get RX/TX coalesce parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	cmd		Coalesce parameters
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	struct eth_port *pp = MV_ETH_PRIV(netdev);
+	/* get coal parameters only for rxq=0, txp=txq=0 !!!
+	   notice that if you use ethtool to set coal, then all queues have the same value */
+	cmd->rx_coalesce_usecs = pp->rx_time_coal_cfg;
+	cmd->rx_max_coalesced_frames = pp->rx_pkts_coal_cfg;
+	cmd->tx_max_coalesced_frames = pp->tx_pkts_coal_cfg;
+
+	/* Adaptive RX coalescing parameters */
+	cmd->rx_coalesce_usecs_low = pp->rx_time_low_coal_cfg;
+	cmd->rx_coalesce_usecs_high = pp->rx_time_high_coal_cfg;
+	cmd->rx_max_coalesced_frames_low = pp->rx_pkts_low_coal_cfg;
+	cmd->rx_max_coalesced_frames_high = pp->rx_pkts_high_coal_cfg;
+	cmd->pkt_rate_low = pp->pkt_rate_low_cfg;
+	cmd->pkt_rate_high = pp->pkt_rate_high_cfg;
+	cmd->rate_sample_interval = pp->rate_sample_cfg;
+	cmd->use_adaptive_rx_coalesce = pp->rx_adaptive_coal_cfg;
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_eth_tool_set_coalesce
+* Description:
+*	ethtool set RX/TX coalesce parameters
+* INPUT:
+*	netdev		Network device structure pointer
+*	cmd		Coalesce parameters
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	struct eth_port *pp = MV_ETH_PRIV(netdev);
+	int rxq, txp, txq;
+
+	/* can't set rx coalesce with both 0 pkts and 0 usecs,  tx coalesce supports only pkts */
+	if ((!cmd->rx_coalesce_usecs && !cmd->rx_max_coalesced_frames) || (!cmd->tx_max_coalesced_frames))
+		return -EPERM;
+
+	if (!cmd->use_adaptive_rx_coalesce) {
+		for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+			mv_eth_rx_pkts_coal_set(pp->port, rxq, cmd->rx_max_coalesced_frames);
+			mv_eth_rx_time_coal_set(pp->port, rxq, cmd->rx_coalesce_usecs);
+		}
+	}
+
+	pp->rx_time_coal_cfg = cmd->rx_coalesce_usecs;
+	pp->rx_pkts_coal_cfg = cmd->rx_max_coalesced_frames;
+	for (txp = 0; txp < pp->txp_num; txp++)
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++)
+			mv_eth_tx_done_pkts_coal_set(pp->port, txp, txq, cmd->tx_max_coalesced_frames);
+	pp->tx_pkts_coal_cfg = cmd->tx_max_coalesced_frames;
+
+	/* Adaptive RX coalescing parameters */
+	pp->rx_time_low_coal_cfg = cmd->rx_coalesce_usecs_low;
+	pp->rx_time_high_coal_cfg = cmd->rx_coalesce_usecs_high;
+	pp->rx_pkts_low_coal_cfg = cmd->rx_max_coalesced_frames_low;
+	pp->rx_pkts_high_coal_cfg = cmd->rx_max_coalesced_frames_high;
+	pp->pkt_rate_low_cfg = cmd->pkt_rate_low;
+	pp->pkt_rate_high_cfg = cmd->pkt_rate_high;
+
+	if (cmd->rate_sample_interval > 0)
+		pp->rate_sample_cfg = cmd->rate_sample_interval;
+
+	/* check if adaptive rx is on - reset rate calculation parameters */
+	if (!pp->rx_adaptive_coal_cfg && cmd->use_adaptive_rx_coalesce) {
+		pp->rx_timestamp = jiffies;
+		pp->rx_rate_pkts = 0;
+	}
+	pp->rx_adaptive_coal_cfg = cmd->use_adaptive_rx_coalesce;
+	pp->rate_current = 0; /* Unknown */
+
+	return 0;
+}
+
+
+/******************************************************************************
+* mv_eth_tool_get_ringparam
+* Description:
+*	ethtool get ring parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	ring		Ring paranmeters
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+/*	printk("in %s \n",__FUNCTION__); */
+}
+
+/******************************************************************************
+* mv_eth_tool_get_pauseparam
+* Description:
+*	ethtool get pause parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	pause		Pause paranmeters
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct eth_port      *priv = MV_ETH_PRIV(netdev);
+	int                  port = priv->port;
+	MV_ETH_PORT_STATUS   portStatus;
+	MV_ETH_PORT_FC       flowCtrl;
+
+	if ((priv == NULL) || (isSwitch(priv)) || (MV_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return;
+	}
+
+	mvNetaFlowCtrlGet(port, &flowCtrl);
+	if ((flowCtrl == MV_ETH_FC_AN_NO) || (flowCtrl == MV_ETH_FC_AN_SYM) || (flowCtrl == MV_ETH_FC_AN_ASYM))
+		pause->autoneg = AUTONEG_ENABLE;
+	else
+		pause->autoneg = AUTONEG_DISABLE;
+
+	mvNetaLinkStatus(port, &portStatus);
+	if (portStatus.rxFc == MV_ETH_FC_DISABLE)
+		pause->rx_pause = 0;
+	else
+		pause->rx_pause = 1;
+
+	if (portStatus.txFc == MV_ETH_FC_DISABLE)
+		pause->tx_pause = 0;
+	else
+		pause->tx_pause = 1;
+}
+
+
+
+
+/******************************************************************************
+* mv_eth_tool_set_pauseparam
+* Description:
+*	ethtool configure pause parameters
+* INPUT:
+*	netdev		Network device structure pointer
+*	pause		Pause paranmeters
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	int				port = priv->port;
+	MV_U32			phy_addr;
+	MV_STATUS		status = MV_FAIL;
+
+	if ((priv == NULL) || (isSwitch(priv)) || (MV_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	if (pause->rx_pause && pause->tx_pause) { /* Enable FC */
+		if (pause->autoneg) { /* autoneg enable */
+			status = mvNetaFlowCtrlSet(port, MV_ETH_FC_AN_SYM);
+		} else { /* autoneg disable */
+			status = mvNetaFlowCtrlSet(port, MV_ETH_FC_ENABLE);
+		}
+	} else if (!pause->rx_pause && !pause->tx_pause) { /* Disable FC */
+		if (pause->autoneg) { /* autoneg enable */
+			status = mvNetaFlowCtrlSet(port, MV_ETH_FC_AN_NO);
+		} else { /* autoneg disable */
+			status = mvNetaFlowCtrlSet(port, MV_ETH_FC_DISABLE);
+		}
+	}
+	/* Only symmetric change for RX and TX flow control is allowed */
+	if (status == MV_OK) {
+		phy_addr = priv->plat_data->phy_addr;
+		status = mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT);
+	}
+	if (status != MV_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_eth_tool_get_strings
+* Description:
+*	ethtool get strings (used for statistics and self-test descriptions)
+* INPUT:
+*	netdev		Network device structure pointer
+*	stringset	strings parameters
+* OUTPUT
+*	data		output data
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_strings(struct net_device *netdev,
+			     uint32_t stringset, uint8_t *data)
+{
+}
+
+/******************************************************************************
+* mv_eth_tool_get_stats_count
+* Description:
+*	ethtool get statistics count (number of stat. array entries)
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	statistics count
+*
+*******************************************************************************/
+int mv_eth_tool_get_stats_count(struct net_device *netdev)
+{
+	return 0;
+}
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+
+static u32 mv_eth_tool_get_rxfh_indir_size(struct net_device *netdev)
+{
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	return ARRAY_SIZE(priv->rx_indir_table);
+#else
+	return 0;
+#endif
+}
+
+static int mv_eth_tool_get_rxfh_indir(struct net_device *netdev,
+					u32 *indir)
+{
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	size_t copy_size = ARRAY_SIZE(priv->rx_indir_table);
+
+	if (!MV_NETA_PNC_CAP())
+		return -EOPNOTSUPP;
+
+	memcpy(indir, priv->rx_indir_table,
+	       copy_size * sizeof(u32));
+	return 0;
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+
+static int mv_eth_tool_set_rxfh_indir(struct net_device *netdev,
+				   const u32 *indir)
+{
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+	int i;
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	if (MV_NETA_PNC_CAP()) {
+		for (i = 0; i < ARRAY_SIZE(priv->rx_indir_table); i++) {
+			priv->rx_indir_table[i] = indir[i];
+			mvPncLbRxqSet(i, priv->rx_indir_table[i]);
+		}
+		return 0;
+	} else {
+		return -EOPNOTSUPP;
+	}
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+
+#else /* KERNEL_VERSION(3, 3, 0) */
+
+static int mv_eth_tool_get_rxfh_indir(struct net_device *netdev,
+					struct ethtool_rxfh_indir *indir)
+{
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+		struct eth_port *priv = MV_ETH_PRIV(netdev);
+		size_t copy_size =
+			min_t(size_t, indir->size, ARRAY_SIZE(priv->rx_indir_table));
+
+		if (!MV_NETA_PNC_CAP())
+			return -EOPNOTSUPP;
+
+		indir->size = ARRAY_SIZE(priv->rx_indir_table);
+
+		memcpy(indir->ring_index, priv->rx_indir_table,
+		       copy_size * sizeof(indir->ring_index[0]));
+		return 0;
+#else
+		return -EOPNOTSUPP;
+#endif
+}
+
+static int mv_eth_tool_set_rxfh_indir(struct net_device *netdev,
+				   const struct ethtool_rxfh_indir *indir)
+{
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+	int i;
+	struct eth_port 	*priv = MV_ETH_PRIV(netdev);
+	if (MV_NETA_PNC_CAP()) {
+		for (i = 0; i < indir->size; i++) {
+			priv->rx_indir_table[i] = indir->ring_index[i];
+			mvPncLbRxqSet(i, priv->rx_indir_table[i]);
+		}
+		return 0;
+	} else {
+		return -EOPNOTSUPP;
+	}
+#else
+	return -EOPNOTSUPP;
+#endif
+}
+#endif /* KERNEL_VERSION(3, 3, 0) */
+#endif /* KERNEL_VERSION(2, 6, 35) */
+
+static int mv_eth_tool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+									u32 *rules)
+{
+	if (info->cmd == ETHTOOL_GRXRINGS) {
+		struct eth_port *pp = MV_ETH_PRIV(dev);
+		if (pp)
+			info->data = ARRAY_SIZE(pp->rx_indir_table);
+	}
+	return 0;
+}
+
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33)))
+/******************************************************************************
+* mv_eth_tool_set_rx_ntuple
+* Description:
+*	ethtool set mapping from 2t/5t rule to rxq/drop
+*	ignore mask parameters (assume mask=0xFF for each byte provided)
+*	support only tcp4 / udp4 protocols
+*	support only full 2t/5t rules:
+*		** 2t - must provide src-ip, dst-ip
+*		** 5t - must provide src-ip, dst-ip, src-port, dst-port
+* INPUT:
+*	netdev		Network device structure pointer
+*	ntuple
+* OUTPUT
+*	None
+* RETURN:
+*
+*******************************************************************************/
+static int mv_eth_tool_set_rx_ntuple(struct net_device *dev, struct ethtool_rx_ntuple *ntuple)
+{
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	unsigned int sip, dip, ports, sport, dport, proto;
+	struct eth_port *pp;
+
+	if ((ntuple->fs.flow_type != TCP_V4_FLOW) && (ntuple->fs.flow_type != UDP_V4_FLOW))
+		return -EOPNOTSUPP;
+
+	if ((ntuple->fs.action >= CONFIG_MV_ETH_RXQ) || (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR))
+		return -EINVAL;
+
+	if (ntuple->fs.flow_type == TCP_V4_FLOW)
+		proto = 6; /* tcp */
+	else
+		proto = 17; /* udp */
+
+	sip = ntuple->fs.h_u.tcp_ip4_spec.ip4src;
+	dip = ntuple->fs.h_u.tcp_ip4_spec.ip4dst;
+	sport = ntuple->fs.h_u.tcp_ip4_spec.psrc;
+	dport = ntuple->fs.h_u.tcp_ip4_spec.pdst;
+	if (!sip || !dip)
+		return -EINVAL;
+
+	pp = MV_ETH_PRIV(dev);
+	if (!sport || !dport) { /* 2-tuple */
+		pnc_ip4_2tuple_rxq(pp->port, sip, dip, ntuple->fs.action);
+	} else {
+		ports = (dport << 16) | ((sport << 16) >> 16);
+		pnc_ip4_5tuple_rxq(pp->port, sip, dip, ports, proto, ntuple->fs.action);
+	}
+
+	return 0;
+#else
+	return 1;
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+}
+#endif
+
+/******************************************************************************
+* mv_eth_tool_get_ethtool_stats
+* Description:
+*	ethtool get statistics
+* INPUT:
+*	netdev		Network device structure pointer
+*	stats		stats parameters
+* OUTPUT
+*	data		output data
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_eth_tool_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats, uint64_t *data)
+{
+
+}
+
+const struct ethtool_ops mv_eth_tool_ops = {
+	.get_settings				= mv_eth_tool_get_settings,
+	.set_settings				= mv_eth_tool_set_settings,
+	.get_drvinfo				= mv_eth_tool_get_drvinfo,
+	.get_regs_len				= mv_eth_tool_get_regs_len,
+	.get_regs				= mv_eth_tool_get_regs,
+	.nway_reset				= mv_eth_tool_nway_reset,
+	.get_link				= mv_eth_tool_get_link,
+	.get_coalesce				= mv_eth_tool_get_coalesce,
+	.set_coalesce				= mv_eth_tool_set_coalesce,
+	.get_ringparam  			= mv_eth_tool_get_ringparam,
+	.get_pauseparam				= mv_eth_tool_get_pauseparam,
+	.set_pauseparam				= mv_eth_tool_set_pauseparam,
+	.get_strings				= mv_eth_tool_get_strings,/*TODO: complete implementation */
+	.get_ethtool_stats			= mv_eth_tool_get_ethtool_stats,
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)
+	.get_stats_count			= mv_eth_tool_get_stats_count,
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
+	.get_rxfh_indir_size			= mv_eth_tool_get_rxfh_indir_size,
+#endif
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)
+	.get_rxfh_indir				= mv_eth_tool_get_rxfh_indir,
+	.set_rxfh_indir				= mv_eth_tool_set_rxfh_indir,
+#endif
+	.get_rxnfc				= mv_eth_tool_get_rxnfc,
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) && (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33)))
+	.set_rx_ntuple				= mv_eth_tool_set_rx_ntuple,
+#endif
+};
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.h b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.h
new file mode 100644
index 000000000000..3c8f3ce1d190
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tool.h
@@ -0,0 +1,35 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __MV_ETH_TOOL_H__
+#define __MV_ETH_TOOL_H__
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops mv_eth_tool_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sched_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sched_sysfs.c
new file mode 100644
index 000000000000..03692903a5c6
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sched_sysfs.c
@@ -0,0 +1,186 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p, txp, txq, d                        - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "echo p txp         > wrr_regs      - show WRR registers for <p/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp {0|1}   > ejp           - enable/disable EJP mode for <port/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp d       > txp_rate      - set outgoing rate <d> in [kbps] for <port/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp d       > txp_burst     - set maximum burst <d> in [Bytes] for <port/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_rate      - set outgoing rate <d> in [kbps] for <port/txp/txq>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_burst     - set maximum burst <d> in [Bytes] for <port/txp/txq>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_wrr       - set outgoing WRR weight for <port/txp/txq>. <d=0> - fixed\n");
+
+	return o;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+static ssize_t mv_eth_3_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txp_rate")) {
+		err = mvNetaTxpRateSet(p, i, v);
+	} else if (!strcmp(name, "txp_burst")) {
+		err = mvNetaTxpBurstSet(p, i, v);
+	} else if (!strcmp(name, "ejp")) {
+		err = mvNetaTxpEjpSet(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_4_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, txp, txq, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = txp = txq = v = 0;
+	sscanf(buf, "%d %d %d %d", &p, &txp, &txq, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "wrr_regs")) {
+		mvEthTxpWrrRegs(p, txp);
+	} else if (!strcmp(name, "txq_rate")) {
+		err = mvNetaTxqRateSet(p, txp, txq, v);
+	} else if (!strcmp(name, "txq_burst")) {
+		err = mvNetaTxqBurstSet(p, txp, txq, v);
+	} else if (!strcmp(name, "txq_wrr")) {
+		if (v == 0)
+			err = mvNetaTxqFixPrioSet(p, txp, txq);
+		else
+			err = mvNetaTxqWrrPrioSet(p, txp, txq, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,           S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(wrr_regs,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(ejp,            S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(txp_rate,       S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(txp_burst,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(txq_rate,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_burst,      S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_wrr,        S_IWUSR, NULL, mv_eth_4_store);
+
+static struct attribute *mv_eth_tx_sched_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_wrr_regs.attr,
+	&dev_attr_ejp.attr,
+	&dev_attr_txp_rate.attr,
+	&dev_attr_txp_burst.attr,
+	&dev_attr_txq_rate.attr,
+	&dev_attr_txq_burst.attr,
+	&dev_attr_txq_wrr.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_tx_sched_group = {
+	.name = "tx_sched",
+	.attrs = mv_eth_tx_sched_attrs,
+};
+
+int mv_neta_tx_sched_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_tx_sched_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_tx_sched_group.name, err);
+
+	return err;
+}
+
+int mv_neta_tx_sched_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_tx_sched_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sysfs.c
new file mode 100644
index 000000000000..e8c35482cef3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_eth_tx_sysfs.c
@@ -0,0 +1,297 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "mv_netdev.h"
+
+
+static ssize_t mv_eth_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "p, txp, txq, cpu, d                   - are dec numbers\n");
+	o += scnprintf(b+o, s-o, "v, mask                               - are hex numbers\n");
+	o += scnprintf(b+o, s-o, "\n");
+
+	o += scnprintf(b+o, s-o, "echo p txp         > txp_regs      - show TX registers for <p/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq     > txq_regs      - show TXQ registers for <p/txp/txq>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq           - show TXQ descriptors ring for <p/txp/txq>. d=0-brief, d=1-full\n");
+	o += scnprintf(b+o, s-o, "echo p txp         > txp_reset     - reset TX part of the port <p/txp>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq     > txq_clean     - clean TXQ <p/txp/txq> - free descriptors and buffers\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq cpu > txq_def       - set default <txp/txq> for packets sent to port <p> by <cpu>\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_size      - set number of descriptors <d> for <port/txp/txq>.\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_coal      - set TXP/TXQ interrupt coalesing. <d> - number of sent packets\n");
+	o += scnprintf(b+o, s-o, "echo p cpu mask    > txq_mask      - set cpu <cpu> accessible txq bitmap <mask>.\n");
+	o += scnprintf(b+o, s-o, "echo p txp txq d   > txq_shared    - set/reset shared bit for <port/txp/txq>. <d> - 1/0 for set/reset.\n");
+	o += scnprintf(b+o, s-o, "echo d             > tx_done       - set threshold <d> to start tx_done operations\n");
+	o += scnprintf(b+o, s-o, "echo p {0|1}       > mh_en         - enable Marvell Header\n");
+	o += scnprintf(b+o, s-o, "echo p {0|1}       > tx_nopad      - disable zero padding on transmit\n");
+	o += scnprintf(b+o, s-o, "echo p v           > tx_mh_2B      - set 2 bytes of Marvell Header for transmit\n");
+	o += scnprintf(b+o, s-o, "echo p v           > tx_cmd        - set 4 bytes of TX descriptor offset 0xc\n");
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+	o += scnprintf(b+o, s-o, "echo period        > tx_period     - set Tx Done high resolution timer period\n");
+	o += scnprintf(b+o, s-o, "					period: period range is [%u, %u], unit usec\n",
+								MV_ETH_HRTIMER_PERIOD_MIN, MV_ETH_HRTIMER_PERIOD_MAX);
+#endif
+
+	return o;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "tx_cmd")) {
+		err = mv_eth_ctrl_tx_cmd(p, v);
+	} else if (!strcmp(name, "mh_en")) {
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_MH, v);
+	} else if (!strcmp(name, "tx_mh_2B")) {
+		err = mv_eth_ctrl_tx_mh(p, MV_16BIT_BE((u16)v));
+	} else if (!strcmp(name, "tx_nopad")) {
+		err = mv_eth_ctrl_flag(p, MV_ETH_F_NO_PAD, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txp_reset")) {
+		err = mv_eth_txp_reset(p, i);
+	} else if (!strcmp(name, "tx_done")) {
+		mv_eth_ctrl_txdone(p);
+	} else if (!strcmp(name, "tx_period")) {
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+		err = mv_eth_tx_done_hrtimer_period_set(p);
+#endif
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %x", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txq_mask")) {
+		err = mv_eth_cpu_txq_mask_set(p, i, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_4_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, txp, txq, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = txp = txq = v = 0;
+	sscanf(buf, "%d %d %d %d", &p, &txp, &txq, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txq_def")) {
+		err = mv_eth_ctrl_txq_cpu_def(p, txp, txq, v);
+	} else if (!strcmp(name, "txp_regs")) {
+		mvNetaTxpRegs(p, txp);
+	} else if (!strcmp(name, "txq_size")) {
+		err = mv_eth_ctrl_txq_size_set(p, txp, txq, v);
+	} else if (!strcmp(name, "txq_coal")) {
+		mv_eth_tx_done_pkts_coal_set(p, txp, txq, v);
+	} else if (!strcmp(name, "txq")) {
+		mvNetaTxqShow(p, txp, txq, v);
+	} else if (!strcmp(name, "txq_regs")) {
+		mvNetaTxqRegs(p, txp, txq);
+	} else if (!strcmp(name, "txq_clean")) {
+		err = mv_eth_txq_clean(p, txp, txq);
+	} else if (!strcmp(name, "txq_shared")) {
+		err = mv_eth_shared_set(p, txp, txq, v);
+	} else {
+		err = 1;
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		pr_err("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,           S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(txp_regs,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_regs,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq,            S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(tx_mh_2B,       S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(tx_cmd,         S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txp_reset,      S_IWUSR, NULL, mv_eth_3_store);
+static DEVICE_ATTR(txq_clean,      S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_def,        S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_size,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(txq_coal,       S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(mh_en,          S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(tx_done,        S_IWUSR, NULL, mv_eth_3_store);
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+static DEVICE_ATTR(tx_period,      S_IWUSR, NULL, mv_eth_3_store);
+#endif
+static DEVICE_ATTR(txq_mask,       S_IWUSR, NULL, mv_eth_3_hex_store);
+static DEVICE_ATTR(txq_shared,     S_IWUSR, NULL, mv_eth_4_store);
+static DEVICE_ATTR(tx_nopad,       S_IWUSR, NULL, mv_eth_port_store);
+
+static struct attribute *mv_eth_tx_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_txp_regs.attr,
+	&dev_attr_txq_regs.attr,
+	&dev_attr_txq.attr,
+	&dev_attr_tx_mh_2B.attr,
+	&dev_attr_tx_cmd.attr,
+	&dev_attr_txp_reset.attr,
+	&dev_attr_txq_clean.attr,
+	&dev_attr_txq_def.attr,
+	&dev_attr_txq_size.attr,
+	&dev_attr_txq_coal.attr,
+	&dev_attr_mh_en.attr,
+	&dev_attr_tx_done.attr,
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+	&dev_attr_tx_period.attr,
+#endif
+	&dev_attr_txq_mask.attr,
+	&dev_attr_txq_shared.attr,
+	&dev_attr_tx_nopad.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_tx_group = {
+	.name = "tx",
+	.attrs = mv_eth_tx_attrs,
+};
+
+int mv_neta_tx_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_tx_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_tx_group.name, err);
+
+	return err;
+}
+
+int mv_neta_tx_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_tx_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_ethernet.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_ethernet.c
new file mode 100644
index 000000000000..11736ccd14df
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_ethernet.c
@@ -0,0 +1,435 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+
+#include "mvOs.h"
+
+#include "mvEthPhy.h"
+#include "gbe/mvNeta.h"
+#include "pnc/mvPnc.h"
+
+#include "mv_netdev.h"
+
+static int mv_eth_set_mac_addr_internals(struct net_device *dev, void *addr);
+
+/***********************************************************
+ * mv_eth_start --                                         *
+ *   start a network device. connect and enable interrupts *
+ *   set hw defaults. fill rx buffers. restart phy link    *
+ *   auto neg. set device link flags. report status.       *
+ ***********************************************************/
+int mv_eth_start(struct net_device *dev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	int group;
+
+	/* in default link is down */
+	netif_carrier_off(dev);
+
+	/* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */
+	netif_tx_stop_all_queues(dev);
+
+	/* fill rx buffers, start rx/tx activity, set coalescing */
+	if (mv_eth_start_internals(priv, dev->mtu) != 0) {
+		printk(KERN_ERR "%s: start internals failed\n", dev->name);
+		goto error;
+	}
+
+	/* enable polling on the port, must be used after netif_poll_disable */
+	if (priv->flags & MV_ETH_F_CONNECT_LINUX)
+		for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++)
+			napi_enable(priv->napiGroup[group]);
+
+	if (priv->flags & MV_ETH_F_LINK_UP) {
+		if (mv_eth_ctrl_is_tx_enabled(priv)) {
+			netif_carrier_on(dev);
+			netif_tx_wake_all_queues(dev);
+		}
+		printk(KERN_NOTICE "%s: link up\n", dev->name);
+	}
+	if (priv->flags & MV_ETH_F_CONNECT_LINUX) {
+		/* connect to port interrupt line */
+		if (request_irq(dev->irq, mv_eth_isr, (IRQF_DISABLED), dev->name, priv)) {
+			printk(KERN_ERR "cannot request irq %d for %s port %d\n", dev->irq, dev->name, priv->port);
+			if (priv->flags & MV_ETH_F_CONNECT_LINUX)
+				napi_disable(priv->napiGroup[CPU_GROUP_DEF]);
+			goto error;
+		}
+
+		/* unmask interrupts */
+		on_each_cpu(mv_eth_interrupts_unmask, priv, 1);
+
+		printk(KERN_NOTICE "%s: started\n", dev->name);
+	}
+	return 0;
+error:
+	printk(KERN_ERR "%s: start failed\n", dev->name);
+	return -1;
+}
+
+/***********************************************************
+ * mv_eth_stop --                                          *
+ *   stop interface with linux core. stop port activity.   *
+ *   free skb's from rings.                                *
+ ***********************************************************/
+int mv_eth_stop(struct net_device *dev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	struct cpu_ctrl *cpuCtrl;
+	int group, cpu;
+
+	/* first make sure that the port finished its Rx polling - see tg3 */
+	for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++)
+		napi_disable(priv->napiGroup[group]);
+
+	/* stop upper layer */
+	netif_carrier_off(dev);
+	netif_tx_stop_all_queues(dev);
+
+	/* stop tx/rx activity, mask all interrupts, relese skb in rings,*/
+	mv_eth_stop_internals(priv);
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = priv->cpu_config[cpu];
+#if defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+		hrtimer_cancel(&cpuCtrl->tx_done_timer);
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+		del_timer(&cpuCtrl->tx_done_timer);
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+#endif
+		del_timer(&cpuCtrl->cleanup_timer);
+		clear_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags));
+	}
+
+	if (dev->irq != 0)
+		free_irq(dev->irq, priv);
+
+	printk(KERN_NOTICE "%s: stopped\n", dev->name);
+
+	return 0;
+}
+
+
+int mv_eth_change_mtu(struct net_device *dev, int mtu)
+{
+	int old_mtu = dev->mtu;
+
+	mtu = mv_eth_check_mtu_valid(dev, mtu);
+	if (mtu < 0)
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		if (mv_eth_change_mtu_internals(dev, mtu) == -1)
+			goto error;
+
+		printk(KERN_NOTICE "%s: change mtu %d (buffer-size %d) to %d (buffer-size %d)\n",
+				dev->name, old_mtu, RX_PKT_SIZE(old_mtu),
+				dev->mtu, RX_PKT_SIZE(dev->mtu));
+		return 0;
+	}
+
+	if (mv_eth_check_mtu_internals(dev, mtu))
+		goto error;
+
+	if (mv_eth_stop(dev)) {
+		printk(KERN_ERR "%s: stop interface failed\n", dev->name);
+		goto error;
+	}
+
+	if (mv_eth_change_mtu_internals(dev, mtu) == -1) {
+		printk(KERN_ERR "%s change mtu internals failed\n", dev->name);
+		goto error;
+	}
+
+	if (mv_eth_start(dev)) {
+		printk(KERN_ERR "%s: start interface failed\n", dev->name);
+		goto error;
+	}
+	printk(KERN_NOTICE "%s: change mtu %d (buffer-size %d) to %d (buffer-size %d)\n",
+				dev->name, old_mtu, RX_PKT_SIZE(old_mtu), dev->mtu,
+				RX_PKT_SIZE(dev->mtu));
+	return 0;
+
+error:
+	printk(KERN_ERR "%s: change mtu failed\n", dev->name);
+	return -1;
+}
+
+/***********************************************************
+ * eth_set_mac_addr --                                   *
+ *   stop port activity. set new addr in device and hw.    *
+ *   restart port activity.                                *
+ ***********************************************************/
+static int mv_eth_set_mac_addr_internals(struct net_device *dev, void *addr)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	u8              *mac = &(((u8 *)addr)[2]);  /* skip on first 2B (ether HW addr type) */
+	int             i;
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en) {
+			if (pnc_mac_me(priv->port, mac, CONFIG_MV_ETH_RXQ_DEF)) {
+				pr_err("%s: ethSetMacAddr failed\n", dev->name);
+				return -1;
+			}
+		} else {
+			pr_err("%s: PNC control is disabled\n", __func__);
+			return -1;
+		}
+	} else {
+		/* remove previous address table entry */
+		if (mvNetaMacAddrSet(priv->port, dev->dev_addr, -1) != MV_OK) {
+			pr_err("%s: ethSetMacAddr failed\n", dev->name);
+			return -1;
+		}
+
+		/* set new addr in hw */
+		if (mvNetaMacAddrSet(priv->port, mac, CONFIG_MV_ETH_RXQ_DEF) != MV_OK) {
+			pr_err("%s: ethSetMacAddr failed\n", dev->name);
+			return -1;
+		}
+	}
+#else
+	/* remove previous address table entry */
+	if (mvNetaMacAddrSet(priv->port, dev->dev_addr, -1) != MV_OK) {
+		pr_err("%s: ethSetMacAddr failed\n", dev->name);
+		return -1;
+	}
+
+	/* set new addr in hw */
+	if (mvNetaMacAddrSet(priv->port, mac, CONFIG_MV_ETH_RXQ_DEF) != MV_OK) {
+		pr_err("%s: ethSetMacAddr failed\n", dev->name);
+		return -1;
+	}
+#endif
+	/* set addr in the device */
+	for (i = 0; i < 6; i++)
+		dev->dev_addr[i] = mac[i];
+
+	printk(KERN_NOTICE "%s: mac address changed\n", dev->name);
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_ETH_PNC
+void mv_eth_set_multicast_list_pnc(struct net_device *dev)
+{
+	struct eth_port     *priv = MV_ETH_PRIV(dev);
+	int                 rxq = CONFIG_MV_ETH_RXQ_DEF;
+/*
+	printk("%s - mv_eth_set_multicast_list: flags=0x%x, mc_count=%d\n",
+		dev->name, dev->flags, dev->mc_count);
+*/
+	if (!mv_eth_pnc_ctrl_en) {
+		printk(KERN_ERR "%s: PNC control is disabled\n", __func__);
+		return;
+	}
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Accept all */
+		pnc_mac_me(priv->port, NULL, rxq);
+		pnc_mcast_all(priv->port, 1);
+	} else {
+		/* Accept Unicast to me */
+		pnc_mac_me(priv->port, dev->dev_addr, rxq);
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Accept all multicast */
+			pnc_mcast_all(priv->port, 1);
+		} else {
+			/* Accept only initialized Multicast */
+			pnc_mcast_all(priv->port, 0);
+			pnc_mcast_me(priv->port, NULL);
+
+			/* Number of entires for all ports is restricted by CONFIG_MV_ETH_PNC_MCAST_NUM */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+			if (!netdev_mc_empty(dev)) {
+				struct netdev_hw_addr *ha;
+
+				netdev_for_each_mc_addr(ha, dev) {
+					if (pnc_mcast_me(priv->port, ha->addr)) {
+						printk(KERN_ERR "%s: Mcast init failed\n", dev->name);
+						break;
+					}
+				}
+			}
+#else
+			{
+				struct dev_mc_list *curr_addr = dev->mc_list;
+				int                i;
+				for (i = 0; i < dev->mc_count; i++, curr_addr = curr_addr->next) {
+					if (!curr_addr)
+						break;
+					if (pnc_mcast_me(priv->port, curr_addr->dmi_addr)) {
+						printk(KERN_ERR "%s: Mcast init failed - %d of %d\n",
+							dev->name, i, dev->mc_count);
+						break;
+					}
+				}
+			}
+#endif /* KERNEL_VERSION >= 2.6.34 */
+		}
+	}
+}
+#endif
+
+void mv_eth_set_multicast_list_legacy(struct net_device *dev)
+{
+	struct eth_port    *priv = MV_ETH_PRIV(dev);
+	int                queue = CONFIG_MV_ETH_RXQ_DEF;
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Accept all: Multicast + Unicast */
+		mvNetaRxUnicastPromiscSet(priv->port, MV_TRUE);
+		mvNetaSetUcastTable(priv->port, queue);
+		mvNetaSetSpecialMcastTable(priv->port, queue);
+		mvNetaSetOtherMcastTable(priv->port, queue);
+	} else {
+		/* Accept single Unicast */
+		mvNetaRxUnicastPromiscSet(priv->port, MV_FALSE);
+		mvNetaSetUcastTable(priv->port, -1);
+		if (mvNetaMacAddrSet(priv->port, dev->dev_addr, queue) != MV_OK)
+			printk(KERN_ERR "%s: netaSetMacAddr failed\n", dev->name);
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Accept all Multicast */
+			mvNetaSetSpecialMcastTable(priv->port, queue);
+			mvNetaSetOtherMcastTable(priv->port, queue);
+		} else {
+			/* Accept only initialized Multicast */
+			mvNetaSetSpecialMcastTable(priv->port, -1);
+			mvNetaSetOtherMcastTable(priv->port, -1);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+			if (!netdev_mc_empty(dev)) {
+				struct netdev_hw_addr *ha;
+
+				netdev_for_each_mc_addr(ha, dev) {
+					mvNetaMcastAddrSet(priv->port, ha->addr, queue);
+				}
+			}
+#else
+			{
+				struct dev_mc_list *curr_addr = dev->mc_list;
+				int                i;
+				for (i = 0; i < dev->mc_count; i++, curr_addr = curr_addr->next) {
+					if (!curr_addr)
+						break;
+					mvNetaMcastAddrSet(priv->port, curr_addr->dmi_addr, queue);
+				}
+			}
+#endif /* KERNEL_VERSION >= 2.6.34 */
+		}
+	}
+}
+
+void mv_eth_set_multicast_list(struct net_device *dev)
+{
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		mv_eth_set_multicast_list_pnc(dev);
+	else
+		mv_eth_set_multicast_list_legacy(dev);
+#else
+	mv_eth_set_multicast_list_legacy(dev);
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+int     mv_eth_set_mac_addr(struct net_device *dev, void *addr)
+{
+	if (!netif_running(dev)) {
+		if (mv_eth_set_mac_addr_internals(dev, addr) == -1)
+			goto error;
+		return 0;
+	}
+
+	if (mv_eth_stop(dev)) {
+		printk(KERN_ERR "%s: stop interface failed\n", dev->name);
+		goto error;
+	}
+
+	if (mv_eth_set_mac_addr_internals(dev, addr) == -1)
+		goto error;
+
+	if (mv_eth_start(dev)) {
+		printk(KERN_ERR "%s: start interface failed\n", dev->name);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	printk(KERN_ERR "%s: set mac addr failed\n", dev->name);
+	return -1;
+}
+
+
+/************************************************************
+ * mv_eth_open -- Restore MAC address and call to   *
+ *                mv_eth_start                               *
+ ************************************************************/
+int mv_eth_open(struct net_device *dev)
+{
+	struct eth_port	*priv = MV_ETH_PRIV(dev);
+	int         queue = CONFIG_MV_ETH_RXQ_DEF;
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en) {
+			if (pnc_mac_me(priv->port, dev->dev_addr, queue)) {
+				pr_err("%s: ethSetMacAddr failed\n", dev->name);
+				return -1;
+			}
+		} else
+			pr_err("%s: PNC control is disabled\n", __func__);
+	} else {/* Legacy parser */
+		if (mvNetaMacAddrSet(priv->port, dev->dev_addr, queue) != MV_OK) {
+			pr_err("%s: ethSetMacAddr failed\n", dev->name);
+			return -1;
+		}
+	}
+#else /* Legacy parser */
+	if (mvNetaMacAddrSet(priv->port, dev->dev_addr, queue) != MV_OK) {
+		pr_err("%s: ethSetMacAddr failed\n", dev->name);
+		return -1;
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+
+	if (mv_eth_start(dev)) {
+		pr_err("%s: start interface failed\n", dev->name);
+		return -1;
+	}
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.c
new file mode 100755
index 000000000000..588ad01d5aac
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.c
@@ -0,0 +1,7514 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mv_neta.h>
+#include <linux/mbus.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/module.h>
+#include "mvOs.h"
+#include "mvDebug.h"
+#include "mvEthPhy.h"
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBm.h"
+#include "pnc/mvPnc.h"
+#include "pnc/mvTcam.h"
+#include "pmt/mvPmt.h"
+#include "mv_mux_netdev.h"
+
+#include "mv_netdev.h"
+#include "mv_eth_tool.h"
+#include "mv_eth_sysfs.h"
+
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#endif
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvebu-soc-id.h"
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif /* CONFIG_ARCH_MVEBU */
+
+#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
+#include <mv_neta_netmap.h>
+#endif
+
+#ifdef CONFIG_OF
+int port_vbase[MV_ETH_MAX_PORTS];
+int bm_reg_vbase, pnc_reg_vbase;
+static u32 pnc_phyaddr_base;
+static u32 pnc_win_size;
+static u32 bm_phyaddr_base;
+static u32 bm_win_size;
+#endif /* CONFIG_OF */
+
+static struct mv_mux_eth_ops mux_eth_ops;
+
+#ifdef CONFIG_MV_CPU_PERF_CNTRS
+#include "cpu/mvCpuCntrs.h"
+MV_CPU_CNTRS_EVENT	*event0 = NULL;
+MV_CPU_CNTRS_EVENT	*event1 = NULL;
+MV_CPU_CNTRS_EVENT	*event2 = NULL;
+MV_CPU_CNTRS_EVENT	*event3 = NULL;
+MV_CPU_CNTRS_EVENT	*event4 = NULL;
+MV_CPU_CNTRS_EVENT	*event5 = NULL;
+#endif /* CONFIG_MV_CPU_PERF_CNTRS */
+
+static struct  platform_device *neta_sysfs;
+unsigned int ext_switch_port_mask = 0;
+
+void handle_group_affinity(int port);
+void set_rxq_affinity(struct eth_port *pp, MV_U32 rxqAffinity, int group);
+static inline int mv_eth_tx_policy(struct eth_port *pp, struct sk_buff *skb);
+
+/* uncomment if you want to debug the SKB recycle feature */
+/* #define ETH_SKB_DEBUG */
+
+static int pm_flag;
+static int wol_ports_bmp;
+
+#ifdef CONFIG_MV_ETH_PNC
+unsigned int mv_eth_pnc_ctrl_en = 1;
+
+int mv_eth_ctrl_pnc(int en)
+{
+	mv_eth_pnc_ctrl_en = en;
+	return 0;
+}
+#endif /* CONFIG_MV_ETH_PNC */
+
+//alpha.jack.20140707+
+int mv_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	unsigned long	*data;
+	unsigned long offset;
+	u16 phy_reg_data;
+
+	data = (unsigned long *)rq->ifr_data;
+
+	switch(cmd)
+	{
+		case SIOCDEVPRIVATE+0:
+			switch( data[0])
+			{
+				case 0: /* read/write phy */
+					switch( data[1])
+					{
+						case 0: /* read phy */
+							offset = data[3];
+							mvEthPhyRegRead( data[2], offset ,&phy_reg_data);
+							*data = phy_reg_data;
+							break;
+
+						case 1: /* write phy */
+							offset = data[3];
+							phy_reg_data = (u16)*(data+4);
+							mvEthPhyRegWrite(data[2], offset, phy_reg_data );
+							break;
+					}
+					break;
+			}
+			break;
+
+		 default:
+		 	return -EOPNOTSUPP;
+
+	}
+
+	return 0;
+}
+//alpha end
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+int mv_ctrl_recycle = CONFIG_MV_NETA_SKB_RECYCLE_DEF;
+EXPORT_SYMBOL(mv_ctrl_recycle);
+
+int mv_eth_ctrl_recycle(int en)
+{
+	mv_ctrl_recycle = en;
+	return 0;
+}
+#else
+int mv_eth_ctrl_recycle(int en)
+{
+	printk(KERN_ERR "SKB recycle is not supported\n");
+	return 1;
+}
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+extern u8 mvMacAddr[CONFIG_MV_ETH_PORTS_NUM][MV_MAC_ADDR_SIZE];
+extern u16 mvMtu[CONFIG_MV_ETH_PORTS_NUM];
+static const u8 *mac_src[CONFIG_MV_ETH_PORTS_NUM];
+
+extern unsigned int switch_enabled_ports;
+
+struct bm_pool mv_eth_pool[MV_ETH_BM_POOLS];
+struct eth_port **mv_eth_ports;
+
+/* Global device used for global cache operation */
+static struct device *neta_global_dev;
+
+int mv_ctrl_txdone = CONFIG_MV_ETH_TXDONE_COAL_PKTS;
+EXPORT_SYMBOL(mv_ctrl_txdone);
+
+/*
+ * Static declarations
+ */
+static int mv_eth_ports_num = 0;
+
+static int mv_eth_initialized = 0;
+
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+static unsigned int mv_eth_tx_done_hrtimer_period_us = CONFIG_MV_NETA_TX_DONE_HIGH_RES_TIMER_PERIOD;
+#endif
+
+/*
+ * Local functions
+ */
+static void mv_eth_txq_delete(struct eth_port *pp, struct tx_queue *txq_ctrl);
+static void mv_eth_tx_timeout(struct net_device *dev);
+static int  mv_eth_tx(struct sk_buff *skb, struct net_device *dev);
+static void mv_eth_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct tx_queue *txq_ctrl, u16 flags);
+static int mv_eth_rxq_fill(struct eth_port *pp, int rxq, int num);
+
+static void mv_eth_config_show(void);
+static int  mv_eth_priv_init(struct eth_port *pp, int port);
+static void mv_eth_priv_cleanup(struct eth_port *pp);
+static int  mv_eth_hal_init(struct eth_port *pp);
+struct net_device *mv_eth_netdev_init(struct platform_device *pdev);
+static void mv_eth_netdev_init_features(struct net_device *dev);
+
+static MV_STATUS mv_eth_pool_create(int pool, int capacity);
+static int mv_eth_pool_add(struct eth_port *pp, int pool, int buf_num);
+static int mv_eth_pool_free(int pool, int num);
+static int mv_eth_pool_destroy(int pool);
+
+#ifdef CONFIG_MV_ETH_TSO
+int mv_eth_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mv_eth_tx_spec *tx_spec,
+		struct tx_queue *txq_ctrl);
+#endif
+
+/* Get the configuration string from the Kernel Command Line */
+static char *port0_config_str = NULL, *port1_config_str = NULL, *port2_config_str = NULL, *port3_config_str = NULL;
+int mv_eth_cmdline_port0_config(char *s);
+__setup("mv_port0_config=", mv_eth_cmdline_port0_config);
+int mv_eth_cmdline_port1_config(char *s);
+__setup("mv_port1_config=", mv_eth_cmdline_port1_config);
+int mv_eth_cmdline_port2_config(char *s);
+__setup("mv_port2_config=", mv_eth_cmdline_port2_config);
+int mv_eth_cmdline_port3_config(char *s);
+__setup("mv_port3_config=", mv_eth_cmdline_port3_config);
+
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+unsigned int mv_eth_tx_done_hrtimer_period_get(void)
+{
+	return mv_eth_tx_done_hrtimer_period_us;
+}
+
+int mv_eth_tx_done_hrtimer_period_set(unsigned int period)
+{
+	if ((period < MV_ETH_HRTIMER_PERIOD_MIN) || (period > MV_ETH_HRTIMER_PERIOD_MAX)) {
+		pr_info("period should be in [%u, %u]\n", MV_ETH_HRTIMER_PERIOD_MIN, MV_ETH_HRTIMER_PERIOD_MAX);
+		return -EINVAL;
+	}
+
+	mv_eth_tx_done_hrtimer_period_us = period;
+	return 0;
+}
+#endif
+
+int mv_eth_cmdline_port0_config(char *s)
+{
+	port0_config_str = s;
+	return 1;
+}
+
+int mv_eth_cmdline_port1_config(char *s)
+{
+	port1_config_str = s;
+	return 1;
+}
+
+int mv_eth_cmdline_port2_config(char *s)
+{
+	port2_config_str = s;
+	return 1;
+}
+
+int mv_eth_cmdline_port3_config(char *s)
+{
+	port3_config_str = s;
+	return 1;
+}
+void mv_eth_stack_print(int port, MV_BOOL isPrintElements)
+{
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+
+	if (pp->pool_long == NULL) {
+		pr_err("%s: Error - long pool is null\n", __func__);
+		return;
+	}
+
+	pr_info("Long pool (%d) stack\n", pp->pool_long->pool);
+	mvStackStatus(pp->pool_long->stack, isPrintElements);
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		if (pp->pool_short == NULL) {
+			pr_err("%s: Error - short pool is null\n", __func__);
+			return;
+		}
+		pr_info("Short pool (%d) stack\n", pp->pool_short->pool);
+		mvStackStatus(pp->pool_short->stack, isPrintElements);
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+}
+
+
+/*****************************************
+ *          Adaptive coalescing          *
+ *****************************************/
+static void mv_eth_adaptive_rx_update(struct eth_port *pp)
+{
+	unsigned long period = jiffies - pp->rx_timestamp;
+
+	if (period >= (pp->rate_sample_cfg * HZ)) {
+		int i;
+		unsigned long rate = pp->rx_rate_pkts * HZ / period;
+
+		if (rate < pp->pkt_rate_low_cfg) {
+			if (pp->rate_current != 1) {
+				pp->rate_current = 1;
+				for (i = 0; i < CONFIG_MV_ETH_RXQ; i++) {
+					mv_eth_rx_time_coal_set(pp->port, i, pp->rx_time_low_coal_cfg);
+					mv_eth_rx_pkts_coal_set(pp->port, i, pp->rx_pkts_low_coal_cfg);
+				}
+			}
+		} else if (rate > pp->pkt_rate_high_cfg) {
+			if (pp->rate_current != 3) {
+				pp->rate_current = 3;
+				for (i = 0; i < CONFIG_MV_ETH_RXQ; i++) {
+					mv_eth_rx_time_coal_set(pp->port, i, pp->rx_time_high_coal_cfg);
+					mv_eth_rx_pkts_coal_set(pp->port, i, pp->rx_pkts_high_coal_cfg);
+				}
+			}
+		} else {
+			if (pp->rate_current != 2) {
+				pp->rate_current = 2;
+				for (i = 0; i < CONFIG_MV_ETH_RXQ; i++) {
+					mv_eth_rx_time_coal_set(pp->port, i, pp->rx_time_coal_cfg);
+					mv_eth_rx_pkts_coal_set(pp->port, i, pp->rx_pkts_coal_cfg);
+				}
+			}
+		}
+		pp->rx_rate_pkts = 0;
+		pp->rx_timestamp = jiffies;
+	}
+}
+
+/*****************************************
+ *            MUX function                *
+ *****************************************/
+
+static int mv_eth_tag_type_set(int port, int type)
+{
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if ((type == MV_TAG_TYPE_MH) || (type == MV_TAG_TYPE_DSA) || (type == MV_TAG_TYPE_EDSA))
+		mvNetaMhSet(port, type);
+
+	pp->tagged = (type == MV_TAG_TYPE_NONE) ? MV_FALSE : MV_TRUE;
+
+	return 0;
+}
+
+void set_cpu_affinity(struct eth_port *pp, MV_U32 cpuAffinity, int group)
+{
+	int cpu;
+	struct cpu_ctrl	*cpuCtrl;
+	MV_U32 rxqAffinity = 0;
+
+	/* nothing to do when cpuAffinity == 0 */
+	if (cpuAffinity == 0)
+		return;
+
+	/* First, read affinity of the target group, in case it contains CPUs */
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		cpuCtrl = pp->cpu_config[cpu];
+		if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))
+			continue;
+		if (cpuCtrl->napiCpuGroup == group) {
+			rxqAffinity = MV_REG_READ(NETA_CPU_MAP_REG(pp->port, cpu)) & 0xff;
+			break;
+		}
+	}
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		if (cpuAffinity & 1) {
+			cpuCtrl = pp->cpu_config[cpu];
+			cpuCtrl->napi = pp->napiGroup[group];
+			cpuCtrl->napiCpuGroup = group;
+			cpuCtrl->cpuRxqMask = rxqAffinity;
+			/* set rxq affinity of the target group */
+			mvNetaRxqCpuMaskSet(pp->port, rxqAffinity, cpu);
+		}
+		cpuAffinity >>= 1;
+	}
+}
+
+int group_has_cpus(struct eth_port *pp, int group)
+{
+	int cpu;
+	struct cpu_ctrl	*cpuCtrl;
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))
+			continue;
+
+		cpuCtrl = pp->cpu_config[cpu];
+
+		if (cpuCtrl->napiCpuGroup == group)
+			return 1;
+	}
+
+	/* the group contains no CPU */
+	return 0;
+}
+
+void set_rxq_affinity(struct eth_port *pp, MV_U32 rxqAffinity, int group)
+{
+	int rxq, cpu;
+	MV_U32 regVal;
+	MV_U32 tmpRxqAffinity;
+	int groupHasCpus;
+	int cpuInGroup;
+	struct cpu_ctrl	*cpuCtrl;
+
+	/* nothing to do when rxqAffinity == 0 */
+	if (rxqAffinity == 0)
+		return;
+
+	groupHasCpus = group_has_cpus(pp, group);
+
+	if (!groupHasCpus) {
+		printk(KERN_ERR "%s: operation not performed; group %d has no cpu \n", __func__, group);
+		return;
+	}
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))
+			continue;
+		tmpRxqAffinity = rxqAffinity;
+
+		regVal = MV_REG_READ(NETA_CPU_MAP_REG(pp->port, cpu));
+		cpuCtrl = pp->cpu_config[cpu];
+
+		if (cpuCtrl->napiCpuGroup == group) {
+			cpuInGroup = 1;
+			/* init TXQ Access Enable bits */
+			regVal = regVal & 0xff00;
+		} else {
+			cpuInGroup = 0;
+		}
+
+		for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+			/* set rxq affinity for this cpu */
+			if (tmpRxqAffinity & 1) {
+				if (cpuInGroup)
+					regVal |= NETA_CPU_RXQ_ACCESS_MASK(rxq);
+				else
+					regVal &= ~NETA_CPU_RXQ_ACCESS_MASK(rxq);
+			}
+			tmpRxqAffinity >>= 1;
+		}
+
+		MV_REG_WRITE(NETA_CPU_MAP_REG(pp->port, cpu), regVal);
+		cpuCtrl->cpuRxqMask = regVal;
+	}
+}
+
+static int mv_eth_port_config_parse(struct eth_port *pp)
+{
+	char *str;
+
+	printk(KERN_ERR "\n");
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", pp->port);
+		return -1;
+	}
+
+	switch (pp->port) {
+	case 0:
+		str = port0_config_str;
+		break;
+	case 1:
+		str = port1_config_str;
+		break;
+	case 2:
+		str = port2_config_str;
+		break;
+	case 3:
+		str = port3_config_str;
+		break;
+	default:
+		printk(KERN_ERR "  o mv_eth_port_config_parse: got unknown port %d\n", pp->port);
+		return -1;
+	}
+
+	if (str != NULL) {
+		if ((!strcmp(str, "disconnected")) || (!strcmp(str, "Disconnected"))) {
+			printk(KERN_ERR "  o Port %d is disconnected from Linux netdevice\n", pp->port);
+			clear_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
+			return 0;
+		}
+	}
+
+	printk(KERN_ERR "  o Port %d is connected to Linux netdevice\n", pp->port);
+	set_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
+	return 0;
+}
+
+#ifdef ETH_SKB_DEBUG
+struct sk_buff *mv_eth_skb_debug[MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS];
+static spinlock_t skb_debug_lock;
+
+void mv_eth_skb_check(struct sk_buff *skb)
+{
+	int i;
+	struct sk_buff *temp;
+	unsigned long flags;
+
+	if (skb == NULL)
+		printk(KERN_ERR "mv_eth_skb_check: got NULL SKB\n");
+
+	spin_lock_irqsave(&skb_debug_lock, flags);
+
+	i = *((u32 *)&skb->cb[0]);
+
+	if ((i >= 0) && (i < MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS)) {
+		temp = mv_eth_skb_debug[i];
+		if (mv_eth_skb_debug[i] != skb) {
+			printk(KERN_ERR "mv_eth_skb_check: Unexpected skb: %p (%d) != %p (%d)\n",
+			       skb, i, temp, *((u32 *)&temp->cb[0]));
+		}
+		mv_eth_skb_debug[i] = NULL;
+	} else {
+		printk(KERN_ERR "mv_eth_skb_check: skb->cb=%d is out of range\n", i);
+	}
+
+	spin_unlock_irqrestore(&skb_debug_lock, flags);
+}
+
+void mv_eth_skb_save(struct sk_buff *skb, const char *s)
+{
+	int i;
+	int saved = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&skb_debug_lock, flags);
+
+	for (i = 0; i < MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS; i++) {
+		if (mv_eth_skb_debug[i] == skb) {
+			printk(KERN_ERR "%s: mv_eth_skb_debug Duplicate: i=%d, skb=%p\n", s, i, skb);
+			mv_eth_skb_print(skb);
+		}
+
+		if ((!saved) && (mv_eth_skb_debug[i] == NULL)) {
+			mv_eth_skb_debug[i] = skb;
+			*((u32 *)&skb->cb[0]) = i;
+			saved = 1;
+		}
+	}
+
+	spin_unlock_irqrestore(&skb_debug_lock, flags);
+
+	if ((i == MV_BM_POOL_CAP_MAX * MV_ETH_BM_POOLS) && (!saved))
+		printk(KERN_ERR "mv_eth_skb_debug is FULL, skb=%p\n", skb);
+}
+#endif /* ETH_SKB_DEBUG */
+
+struct eth_port *mv_eth_port_by_id(unsigned int port)
+{
+	if (port < mv_eth_ports_num)
+		return mv_eth_ports[port];
+
+	return NULL;
+}
+
+static inline int mv_eth_skb_mh_add(struct sk_buff *skb, u16 mh)
+{
+	/* sanity: Check that there is place for MH in the buffer */
+	if (skb_headroom(skb) < MV_ETH_MH_SIZE) {
+		printk(KERN_ERR "%s: skb (%p) doesn't have place for MH, head=%p, data=%p\n",
+		       __func__, skb, skb->head, skb->data);
+		return 1;
+	}
+
+	/* Prepare place for MH header */
+	skb->len += MV_ETH_MH_SIZE;
+	skb->data -= MV_ETH_MH_SIZE;
+	*((u16 *) skb->data) = mh;
+
+	return 0;
+}
+
+static inline int mv_eth_mh_skb_skip(struct sk_buff *skb)
+{
+	__skb_pull(skb, MV_ETH_MH_SIZE);
+	return MV_ETH_MH_SIZE;
+}
+
+void mv_eth_ctrl_txdone(int num)
+{
+	mv_ctrl_txdone = num;
+}
+
+int mv_eth_ctrl_flag(int port, u32 flag, u32 val)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	u32 bit_flag = (fls(flag) - 1);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (val)
+		set_bit(bit_flag, &(pp->flags));
+	else
+		clear_bit(bit_flag, &(pp->flags));
+
+	if (flag == MV_ETH_F_MH)
+		mvNetaMhSet(pp->port, val ? MV_TAG_TYPE_MH : MV_TAG_TYPE_NONE);
+
+	return 0;
+}
+
+int mv_eth_ctrl_port_buf_num_set(int port, int long_num, int short_num)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+	if (pp->pool_long != NULL) {
+		/* Update number of buffers in existing pool (allocate or free) */
+		if (pp->pool_long_num > long_num)
+			mv_eth_pool_free(pp->pool_long->pool, pp->pool_long_num - long_num);
+		else if (long_num > pp->pool_long_num)
+			mv_eth_pool_add(pp, pp->pool_long->pool, long_num - pp->pool_long_num);
+	}
+	pp->pool_long_num = long_num;
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		if (pp->pool_short != NULL) {
+			/* Update number of buffers in existing pool (allocate or free) */
+			if (pp->pool_short_num > short_num)
+				mv_eth_pool_free(pp->pool_short->pool, pp->pool_short_num - short_num);
+			else if (short_num > pp->pool_short_num)
+				mv_eth_pool_add(pp, pp->pool_short->pool, short_num - pp->pool_short_num);
+		}
+		pp->pool_short_num = short_num;
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_ETH_BM
+/* Set pkt_size for the pool. Check that pool not in use (all ports are stopped) */
+/* Free all buffers from the pool */
+/* Detach the pool from all ports */
+int mv_eth_ctrl_pool_size_set(int pool, int pkt_size)
+{
+#ifdef CONFIG_MV_ETH_BM_CPU
+	int port;
+	struct bm_pool *ppool;
+	struct eth_port *pp;
+
+	if (MV_NETA_BM_CAP()) {
+		if (mvNetaMaxCheck(pool, MV_ETH_BM_POOLS, "bm_pool"))
+			return -EINVAL;
+
+		ppool = &mv_eth_pool[pool];
+
+		for (port = 0; port < mv_eth_ports_num; port++) {
+			/* Check that all ports using this pool are stopped */
+			if (ppool->port_map & (1 << port)) {
+				pp = mv_eth_port_by_id(port);
+
+				if (pp->flags & MV_ETH_F_STARTED) {
+					pr_err("Port %d use pool #%d and must be stopped before change pkt_size\n",
+						port, pool);
+					return -EINVAL;
+				}
+			}
+		}
+		for (port = 0; port < mv_eth_ports_num; port++) {
+			/* Free all buffers and detach pool */
+			if (ppool->port_map & (1 << port)) {
+				pp = mv_eth_port_by_id(port);
+
+				if (ppool == pp->pool_long) {
+					mv_eth_pool_free(pool, pp->pool_long_num);
+					ppool->port_map &= ~(1 << pp->port);
+					pp->pool_long = NULL;
+				}
+				if (ppool == pp->pool_short) {
+					mv_eth_pool_free(pool, pp->pool_short_num);
+					ppool->port_map &= ~(1 << pp->port);
+					pp->pool_short = NULL;
+				}
+			}
+		}
+		ppool->pkt_size = pkt_size;
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	mv_eth_bm_config_pkt_size_set(pool, pkt_size);
+	if (pkt_size == 0)
+		mvNetaBmPoolBufferSizeSet(pool, 0);
+	else
+		mvNetaBmPoolBufferSizeSet(pool, RX_BUF_SIZE(pkt_size));
+
+	return 0;
+}
+#endif /* CONFIG_MV_ETH_BM */
+
+int mv_eth_ctrl_set_poll_rx_weight(int port, u32 weight)
+{
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	int cpu;
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	if (weight > 255)
+		weight = 255;
+	pp->weight = weight;
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+		if (cpuCtrl->napi)
+			cpuCtrl->napi->weight = pp->weight;
+	}
+
+	return 0;
+}
+
+int mv_eth_ctrl_rxq_size_set(int port, int rxq, int value)
+{
+	struct eth_port *pp;
+	struct rx_queue	*rxq_ctrl;
+
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	if (mvNetaMaxCheck(rxq, CONFIG_MV_ETH_RXQ, "rxq"))
+		return -EINVAL;
+
+	if ((value <= 0) || (value > 0x3FFF)) {
+		pr_err("RXQ size %d is out of range\n", value);
+		return -EINVAL;
+	}
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+	rxq_ctrl = &pp->rxq_ctrl[rxq];
+	if ((rxq_ctrl->q) && (rxq_ctrl->rxq_size != value)) {
+		/* Reset is required when RXQ ring size is changed */
+		mv_eth_rx_reset(pp->port);
+
+		mvNetaRxqDelete(pp->port, rxq);
+		rxq_ctrl->q = NULL;
+	}
+	pp->rxq_ctrl[rxq].rxq_size = value;
+
+	/* New RXQ will be created during mv_eth_start_internals */
+	return 0;
+}
+
+int mv_eth_ctrl_txq_size_set(int port, int txp, int txq, int value)
+{
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp;
+
+	if (mvNetaTxpCheck(port, txp))
+		return -EINVAL;
+
+	if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ, "txq"))
+		return -EINVAL;
+
+	if ((value <= 0) || (value > 0x3FFF)) {
+		pr_err("TXQ size %d is out of range\n", value);
+		return -EINVAL;
+	}
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+	if ((txq_ctrl->q) && (txq_ctrl->txq_size != value)) {
+		/* Reset of port/txp is required to change TXQ ring size */
+		if ((mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0) ||
+			(mvNetaTxqPendDescNumGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0) ||
+			(mvNetaTxqSentDescNumGet(pp->port, txq_ctrl->txp, txq_ctrl->txq) != 0)) {
+			pr_err("%s: port=%d, txp=%d, txq=%d must be in its initial state\n",
+				__func__, port, txq_ctrl->txp, txq_ctrl->txq);
+			return -EINVAL;
+		}
+		mv_eth_txq_delete(pp, txq_ctrl);
+	}
+	txq_ctrl->txq_size = value;
+
+	/* New TXQ will be created during mv_eth_start_internals */
+	return 0;
+}
+
+int mv_eth_ctrl_txq_mode_get(int port, int txp, int txq, int *value)
+{
+	int cpu, mode = MV_ETH_TXQ_FREE, val = 0;
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL)
+		return -ENODEV;
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+	for_each_possible_cpu(cpu)
+		if (txq_ctrl->cpu_owner[cpu]) {
+			mode = MV_ETH_TXQ_CPU;
+			val += txq_ctrl->cpu_owner[cpu];
+		}
+	if ((mode == MV_ETH_TXQ_FREE) && (txq_ctrl->hwf_rxp < (MV_U8) mv_eth_ports_num)) {
+		mode = MV_ETH_TXQ_HWF;
+		val = txq_ctrl->hwf_rxp;
+	}
+	if (value)
+		*value = val;
+
+	return mode;
+}
+
+/* Increment/Decrement CPU ownership for this TXQ */
+int mv_eth_ctrl_txq_cpu_own(int port, int txp, int txq, int add, int cpu)
+{
+	int mode;
+	struct tx_queue *txq_ctrl;
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	/* Check that new txp/txq can be allocated for CPU */
+	mode = mv_eth_ctrl_txq_mode_get(port, txp, txq, NULL);
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+	cpuCtrl = pp->cpu_config[cpu];
+
+	if (add) {
+		if ((mode != MV_ETH_TXQ_CPU) && (mode != MV_ETH_TXQ_FREE))
+			return -EINVAL;
+
+		txq_ctrl->cpu_owner[cpu]++;
+
+	} else {
+		if (mode != MV_ETH_TXQ_CPU)
+			return -EINVAL;
+
+		txq_ctrl->cpu_owner[cpu]--;
+	}
+
+	mv_eth_txq_update_shared(txq_ctrl, pp);
+
+	return 0;
+}
+
+/* Set TXQ ownership to HWF from the RX port.  rxp=-1 - free TXQ ownership */
+int mv_eth_ctrl_txq_hwf_own(int port, int txp, int txq, int rxp)
+{
+	int mode;
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	/* Check that new txp/txq can be allocated for HWF */
+	mode = mv_eth_ctrl_txq_mode_get(port, txp, txq, NULL);
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+	if (rxp == -1) {
+		if (mode != MV_ETH_TXQ_HWF)
+			return -EINVAL;
+	} else {
+		if ((mode != MV_ETH_TXQ_HWF) && (mode != MV_ETH_TXQ_FREE))
+			return -EINVAL;
+	}
+
+	txq_ctrl->hwf_rxp = (MV_U8) rxp;
+
+	return 0;
+}
+
+/* set or clear shared bit for this txq, txp=1 for pon , 0 for gbe */
+int mv_eth_shared_set(int port, int txp, int txq, int value)
+{
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if ((value < 0) || (value > 1)) {
+		pr_err("%s: Invalid value %d, should be 0 or 1\n", __func__, value);
+		return -EINVAL;
+	}
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+	if (txq_ctrl == NULL) {
+		printk(KERN_ERR "%s: txq_ctrl is null \n", __func__);
+		return -EINVAL;
+	}
+
+	value ? (txq_ctrl->flags |= MV_ETH_F_TX_SHARED) : (txq_ctrl->flags &= ~MV_ETH_F_TX_SHARED);
+
+	return MV_OK;
+}
+
+/* Set TXQ for CPU originated packets */
+int mv_eth_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu)
+{
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp;
+
+	if ((cpu >= nr_cpu_ids) || (cpu < 0)) {
+		pr_err("cpu #%d is out of range: from 0 to %d\n",
+			cpu, nr_cpu_ids - 1);
+		return -EINVAL;
+	}
+
+	if (mvNetaTxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL)) {
+		pr_err("Port %d does not exist\n", port);
+		return -ENODEV;
+	}
+	cpuCtrl = pp->cpu_config[cpu];
+
+	/* Check that new txq can be allocated for CPU */
+	if (!(MV_BIT_CHECK(cpuCtrl->cpuTxqMask, txq)))	{
+		printk(KERN_ERR "Txq #%d can not allocated for cpu #%d\n", txq, cpu);
+		return -EINVAL;
+	}
+
+	if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		/* Decrement CPU ownership for old txq */
+		mv_eth_ctrl_txq_cpu_own(port, pp->txp, cpuCtrl->txq, 0, cpu);
+
+		if (txq != -1) {
+			if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ, "txq"))
+				return -EINVAL;
+
+			/* Increment CPU ownership for new txq */
+			if (mv_eth_ctrl_txq_cpu_own(port, txp, txq, 1, cpu))
+				return -EINVAL;
+		}
+	}
+	pp->txp = txp;
+	cpuCtrl->txq = txq;
+
+	return 0;
+}
+
+
+int	mv_eth_cpu_txq_mask_set(int port, int cpu, int txqMask)
+{
+	struct tx_queue *txq_ctrl;
+	int i;
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	if ((cpu >= nr_cpu_ids) || (cpu < 0)) {
+		printk(KERN_ERR "cpu #%d is out of range: from 0 to %d\n",
+			cpu, nr_cpu_ids - 1);
+		return -EINVAL;
+	}
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return MV_FAIL;
+	}
+
+	if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))	{
+		printk(KERN_ERR "%s:Error- Cpu #%d masked for port  #%d\n", __func__, cpu, port);
+		return -EINVAL;
+	}
+
+	cpuCtrl = pp->cpu_config[cpu];
+
+	/* validate that default txq is not masked by the new txqMask Value */
+	if (!(MV_BIT_CHECK(txqMask, cpuCtrl->txq))) {
+		printk(KERN_ERR "Error: port %d default txq %d can not be masked.\n", port, cpuCtrl->txq);
+		return -EINVAL;
+	}
+
+	/* validate that txq values in tos map are not masked by the new txqMask Value */
+	for (i = 0; i < 256; i++)
+		if (cpuCtrl->txq_tos_map[i] != MV_ETH_TXQ_INVALID)
+			if (!(MV_BIT_CHECK(txqMask, cpuCtrl->txq_tos_map[i]))) {
+				printk(KERN_WARNING "Warning: port %d tos 0h%x mapped to txq %d ,this rule delete due to new masked value (0X%x).\n",
+					port, i, cpuCtrl->txq_tos_map[i], txqMask);
+				txq_ctrl = &pp->txq_ctrl[pp->txp * CONFIG_MV_ETH_TXQ + cpuCtrl->txq_tos_map[i]];
+				txq_ctrl->cpu_owner[cpu]--;
+				mv_eth_txq_update_shared(txq_ctrl, pp);
+				cpuCtrl->txq_tos_map[i] = MV_ETH_TXQ_INVALID;
+			}
+
+	/* nfp validation - can not mask nfp rules*/
+	for (i = 0; i < CONFIG_MV_ETH_TXQ; i++)
+		if (!(MV_BIT_CHECK(txqMask, i))) {
+			txq_ctrl = &pp->txq_ctrl[pp->txp * CONFIG_MV_ETH_TXQ + i];
+			if ((txq_ctrl != NULL) && (txq_ctrl->nfpCounter != 0)) {
+				printk(KERN_ERR "Error: port %d txq %d ruled by NFP, can not be masked.\n", port, i);
+				return -EINVAL;
+			}
+		}
+
+	mvNetaTxqCpuMaskSet(port, txqMask, cpu);
+	cpuCtrl->cpuTxqMask = txqMask;
+
+	return 0;
+}
+
+
+int mv_eth_ctrl_tx_cmd(int port, u32 tx_cmd)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->hw_cmd = tx_cmd;
+
+	return 0;
+}
+
+int mv_eth_ctrl_tx_mh(int port, u16 mh)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_mh = mh;
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+/* Register special transmit check function */
+void mv_eth_tx_special_check_func(int port,
+					int (*func)(int port, struct net_device *dev, struct sk_buff *skb,
+								struct mv_eth_tx_spec *tx_spec_out))
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp)
+		pp->tx_special_check = func;
+}
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+
+#ifdef CONFIG_MV_ETH_RX_SPECIAL
+/* Register special transmit check function */
+void mv_eth_rx_special_proc_func(int port, void (*func)(int port, int rxq, struct net_device *dev,
+							struct sk_buff *skb, struct neta_rx_desc *rx_desc))
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp)
+		pp->rx_special_proc = func;
+}
+#endif /* CONFIG_MV_ETH_RX_SPECIAL */
+
+static inline u16 mv_eth_select_txq(struct net_device *dev, struct sk_buff *skb)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	return mv_eth_tx_policy(pp, skb);
+}
+
+/* Update network device features after changing MTU.	*/
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 25)
+static u32 mv_eth_netdev_fix_features(struct net_device *dev, u32 features)
+#else
+static netdev_features_t mv_eth_netdev_fix_features(struct net_device *dev, netdev_features_t features)
+#endif
+{
+#ifdef CONFIG_MV_ETH_TX_CSUM_OFFLOAD
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+
+	if (dev->mtu > pp->plat_data->tx_csum_limit) {
+		if (features & (NETIF_F_IP_CSUM | NETIF_F_TSO)) {
+			features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
+			printk(KERN_ERR "%s: NETIF_F_IP_CSUM and NETIF_F_TSO not supported for mtu larger %d bytes\n",
+					dev->name, pp->plat_data->tx_csum_limit);
+		}
+	}
+#endif /* CONFIG_MV_ETH_TX_CSUM_OFFLOAD */
+	return features;
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 25)
+static int mv_eth_netdev_set_features(struct net_device *dev, u32 features)
+#else
+static int mv_eth_netdev_set_features(struct net_device *dev, netdev_features_t features)
+#endif
+{
+	u32 changed = dev->features ^ features;
+
+/*
+	pr_info("%s: dev->features=0x%x, features=0x%x, changed=0x%x\n",
+		 __func__, dev->features, features, changed);
+*/
+	if (changed == 0)
+		return 0;
+
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+	if (MV_NETA_PNC_CAP() && (changed & NETIF_F_RXHASH)) {
+		if (features & NETIF_F_RXHASH) {
+			dev->features |= NETIF_F_RXHASH;
+			mvPncLbModeIp4(LB_2_TUPLE_VALUE);
+			mvPncLbModeIp6(LB_2_TUPLE_VALUE);
+			mvPncLbModeL4(LB_4_TUPLE_VALUE);
+		} else {
+			dev->features |= ~NETIF_F_RXHASH;
+			mvPncLbModeIp4(LB_DISABLE_VALUE);
+			mvPncLbModeIp6(LB_DISABLE_VALUE);
+			mvPncLbModeL4(LB_DISABLE_VALUE);
+		}
+	}
+#endif /* MV_ETH_PNC_LB && CONFIG_MV_ETH_PNC */
+
+	return 0;
+}
+
+static const struct net_device_ops mv_eth_netdev_ops = {
+	.ndo_open = mv_eth_open,
+	.ndo_stop = mv_eth_stop,
+	.ndo_start_xmit = mv_eth_tx,
+	.ndo_set_rx_mode = mv_eth_set_multicast_list,
+	.ndo_set_mac_address = mv_eth_set_mac_addr,
+	.ndo_change_mtu = mv_eth_change_mtu,
+	.ndo_tx_timeout = mv_eth_tx_timeout,
+	.ndo_select_queue = mv_eth_select_txq,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	.ndo_fix_features = mv_eth_netdev_fix_features,
+	.ndo_set_features = mv_eth_netdev_set_features,
+#endif
+  .ndo_do_ioctl		= mv_eth_ioctl, //alpha.jack.20140707+
+};
+
+
+void mv_eth_link_status_print(int port)
+{
+	MV_ETH_PORT_STATUS link;
+
+	mvNetaLinkStatus(port, &link);
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(port))
+		link.linkup = mv_pon_link_status();
+#endif /* CONFIG_MV_PON */
+
+	if (link.linkup) {
+		printk(KERN_CONT "link up");
+		printk(KERN_CONT ", %s duplex", (link.duplex == MV_ETH_DUPLEX_FULL) ? "full" : "half");
+		printk(KERN_CONT ", speed ");
+
+		if (link.speed == MV_ETH_SPEED_1000)
+			printk(KERN_CONT "1 Gbps\n");
+		else if (link.speed == MV_ETH_SPEED_100)
+			printk(KERN_CONT "100 Mbps\n");
+		else
+			printk(KERN_CONT "10 Mbps\n");
+	} else
+		printk(KERN_CONT "link down\n");
+}
+
+static void mv_eth_rx_error(struct eth_port *pp, struct neta_rx_desc *rx_desc)
+{
+	STAT_ERR(pp->stats.rx_error++);
+
+	if (pp->dev)
+		pp->dev->stats.rx_errors++;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if ((pp->flags & MV_ETH_F_DBG_RX) == 0)
+		return;
+
+	if (!printk_ratelimit())
+		return;
+
+	if ((rx_desc->status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) {
+		printk(KERN_ERR "giga #%d: bad rx status %08x (buffer oversize), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		return;
+	}
+
+	switch (rx_desc->status & NETA_RX_ERR_CODE_MASK) {
+	case NETA_RX_ERR_CRC:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (crc error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	case NETA_RX_ERR_OVERRUN:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (overrun error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	case NETA_RX_ERR_LEN:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (max frame length error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	case NETA_RX_ERR_RESOURCE:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (resource error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	}
+	mv_eth_rx_desc_print(rx_desc);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+}
+
+void mv_eth_skb_print(struct sk_buff *skb)
+{
+	printk(KERN_ERR "skb=%p: head=%p, data=%p, tail=%p, end=%p\n", skb, skb->head, skb->data, skb->tail, skb->end);
+	printk(KERN_ERR "\t mac=%p, network=%p, transport=%p\n",
+			skb->mac_header, skb->network_header, skb->transport_header);
+	printk(KERN_ERR "\t truesize=%d, len=%d, data_len=%d, mac_len=%d\n",
+		skb->truesize, skb->len, skb->data_len, skb->mac_len);
+	printk(KERN_ERR "\t users=%d, dataref=%d, nr_frags=%d, gso_size=%d, gso_segs=%d\n",
+	       atomic_read(&skb->users), atomic_read(&skb_shinfo(skb)->dataref),
+	       skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_segs);
+	printk(KERN_ERR "\t proto=%d, ip_summed=%d, priority=%d\n", ntohs(skb->protocol), skb->ip_summed, skb->priority);
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+	printk(KERN_ERR "\t skb_recycle=%p, hw_cookie=0x%x\n", skb->skb_recycle, skb->hw_cookie);
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+}
+
+void mv_eth_rx_desc_print(struct neta_rx_desc *desc)
+{
+	int i;
+	u32 *words = (u32 *) desc;
+
+	printk(KERN_ERR "RX desc - %p: ", desc);
+	for (i = 0; i < 8; i++)
+		printk(KERN_CONT "%8.8x ", *words++);
+	printk(KERN_CONT "\n");
+
+	if (desc->status & NETA_RX_IP4_FRAG_MASK)
+		printk(KERN_ERR "Frag, ");
+
+	printk(KERN_CONT "size=%d, L3_offs=%d, IP_hlen=%d, L4_csum=%s, L3=",
+	       desc->dataSize,
+	       (desc->status & NETA_RX_L3_OFFSET_MASK) >> NETA_RX_L3_OFFSET_OFFS,
+	       (desc->status & NETA_RX_IP_HLEN_MASK) >> NETA_RX_IP_HLEN_OFFS,
+	       (desc->status & NETA_RX_L4_CSUM_OK_MASK) ? "Ok" : "Bad");
+
+	if (NETA_RX_L3_IS_IP4(desc->status))
+		printk(KERN_CONT "IPv4, ");
+	else if (NETA_RX_L3_IS_IP4_ERR(desc->status))
+		printk(KERN_CONT "IPv4 bad, ");
+	else if (NETA_RX_L3_IS_IP6(desc->status))
+		printk(KERN_CONT "IPv6, ");
+	else
+		printk(KERN_CONT "Unknown, ");
+
+	printk(KERN_CONT "L4=");
+	if (NETA_RX_L4_IS_TCP(desc->status))
+		printk(KERN_CONT "TCP");
+	else if (NETA_RX_L4_IS_UDP(desc->status))
+		printk(KERN_CONT "UDP");
+	else
+		printk(KERN_CONT "Unknown");
+	printk(KERN_CONT "\n");
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		pr_err("RINFO: ");
+		if (desc->pncInfo & NETA_PNC_DA_MC)
+			pr_cont("DA_MC, ");
+		if (desc->pncInfo & NETA_PNC_DA_BC)
+			pr_cont("DA_BC, ");
+		if (desc->pncInfo & NETA_PNC_DA_UC)
+			pr_cont("DA_UC, ");
+		if (desc->pncInfo & NETA_PNC_VLAN)
+			pr_cont("VLAN, ");
+		if (desc->pncInfo & NETA_PNC_PPPOE)
+			pr_cont("PPPOE, ");
+		if (desc->pncInfo & NETA_PNC_RX_SPECIAL)
+			pr_cont("RX_SPEC, ");
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+
+	printk(KERN_CONT "\n");
+}
+EXPORT_SYMBOL(mv_eth_rx_desc_print);
+
+void mv_eth_tx_desc_print(struct neta_tx_desc *desc)
+{
+	int i;
+	u32 *words = (u32 *) desc;
+
+	printk(KERN_ERR "TX desc - %p: ", desc);
+	for (i = 0; i < 8; i++)
+		printk(KERN_CONT "%8.8x ", *words++);
+	printk(KERN_CONT "\n");
+}
+EXPORT_SYMBOL(mv_eth_tx_desc_print);
+
+void mv_eth_pkt_print(struct eth_port *pp, struct eth_pbuf *pkt)
+{
+	printk(KERN_ERR "pkt: len=%d off=%d pool=%d "
+	       "skb=%p pa=%lx buf=%p\n",
+	       pkt->bytes, pkt->offset, pkt->pool,
+	       pkt->osInfo, pkt->physAddr, pkt->pBuf);
+
+	mvDebugMemDump(pkt->pBuf + pkt->offset, 64, 1);
+	mvOsCacheInvalidate(pp->dev->dev.parent, pkt->pBuf + pkt->offset, 64);
+}
+EXPORT_SYMBOL(mv_eth_pkt_print);
+
+static inline void mv_eth_rx_csum(struct eth_port *pp, struct neta_rx_desc *rx_desc, struct sk_buff *skb)
+{
+#if defined(CONFIG_MV_ETH_RX_CSUM_OFFLOAD)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	if (pp->dev->features & NETIF_F_RXCSUM) {
+
+		if ((NETA_RX_L3_IS_IP4(rx_desc->status) ||
+	      NETA_RX_L3_IS_IP6(rx_desc->status)) && (rx_desc->status & NETA_RX_L4_CSUM_OK_MASK)) {
+			skb->csum = 0;
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+			STAT_DBG(pp->stats.rx_csum_hw++);
+			return;
+		}
+	}
+#endif
+#endif /* CONFIG_MV_ETH_RX_CSUM_OFFLOAD */
+
+	skb->ip_summed = CHECKSUM_NONE;
+	STAT_DBG(pp->stats.rx_csum_sw++);
+}
+
+static inline int mv_eth_tx_done_policy(u32 cause)
+{
+	return fls(cause >> NETA_CAUSE_TXQ_SENT_DESC_OFFS) - 1;
+}
+
+inline int mv_eth_rx_policy(u32 cause)
+{
+	return fls(cause >> NETA_CAUSE_RXQ_OCCUP_DESC_OFFS) - 1;
+}
+
+static inline int mv_eth_txq_tos_map_get(struct eth_port *pp, MV_U8 tos, MV_U8 cpu)
+{
+	MV_U8 q = pp->cpu_config[cpu]->txq_tos_map[tos];
+
+	if (q == MV_ETH_TXQ_INVALID)
+		return pp->cpu_config[smp_processor_id()]->txq;
+
+	return q;
+}
+
+static inline int mv_eth_tx_policy(struct eth_port *pp, struct sk_buff *skb)
+{
+	int txq = pp->cpu_config[smp_processor_id()]->txq;
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		txq = mv_eth_txq_tos_map_get(pp, iph->tos, smp_processor_id());
+	}
+	return txq;
+}
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+int mv_eth_skb_recycle(struct sk_buff *skb)
+{
+	struct eth_pbuf *pkt = (struct eth_pbuf *)(skb->hw_cookie & ~BIT(0));
+	struct bm_pool  *pool;
+	int             status = 0;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	/* Sanity check */
+	if ((pkt->pool < 0) || (pkt->pool >= MV_ETH_BM_POOLS)) {
+		pr_err("BM pool #%d is out of range: 0  .. %d\n",
+			pkt->pool, MV_ETH_BM_POOLS);
+		goto err;
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	pool = &mv_eth_pool[pkt->pool];
+	if (skb->hw_cookie & BIT(0)) {
+		/* hw_cookie is not valid for recycle */
+		STAT_DBG(pool->stats.skb_hw_cookie_err++);
+		goto err;
+	}
+
+#if defined(CONFIG_MV_ETH_BM_CPU)
+	if (MV_NETA_BM_CAP()) {
+		/* Check that first 4 bytes of the buffer contain hw_cookie */
+		if (*((MV_U32 *) skb->head) != (MV_U32)pkt) {
+			/*
+			pr_err("%s: Wrong skb->head=%p (0x%x) != hw_cookie=%p\n",
+				__func__, skb->head, *((MV_U32 *) skb->head), pkt);
+			*/
+			STAT_DBG(pool->stats.skb_hw_cookie_err++);
+			goto err;
+		}
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	/* Check validity of skb->head - some Linux functions (skb_expand_head) reallocate it */
+	if (skb->head != pkt->pBuf) {
+		/*
+		pr_err("%s: skb=%p, pkt=%p, Wrong skb->head=%p != pkt->pBuf=%p\n",
+			__func__, skb, pkt, skb->head, pkt->pBuf);
+		*/
+		STAT_DBG(pool->stats.skb_hw_cookie_err++);
+		goto err;
+	}
+	/*
+	WA for Linux network stack issue that prevent skb recycle.
+	If dev_kfree_skb_any called from interrupt context or interrupts disabled context
+	skb->users will be zero when skb_recycle callback function is called.
+	In such case skb_recycle_check function returns error because skb->users != 1.
+	*/
+	if (atomic_read(&skb->users) == 0)
+		atomic_set(&skb->users, 1);
+
+	if (skb_recycle_check(skb, pool->pkt_size)) {
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		/* Sanity check */
+		if (SKB_TRUESIZE(skb->end - skb->head) != skb->truesize) {
+			printk(KERN_ERR "%s: skb=%p, Wrong SKB_TRUESIZE(end - head)=%d\n",
+				__func__, skb, SKB_TRUESIZE(skb->end - skb->head));
+			mv_eth_skb_print(skb);
+		}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+		STAT_DBG(pool->stats.skb_recycled_ok++);
+		mvOsCacheInvalidate(neta_global_dev->parent, skb->head, RX_BUF_SIZE(pool->pkt_size));
+
+		status = mv_eth_pool_put(pool, pkt);
+
+#ifdef ETH_SKB_DEBUG
+		if (status == 0)
+			mv_eth_skb_save(skb, "recycle");
+#endif /* ETH_SKB_DEBUG */
+
+		return 0;
+	}
+	STAT_DBG(pool->stats.skb_recycled_err++);
+
+	/* printk(KERN_ERR "mv_eth_skb_recycle failed: pool=%d, pkt=%p, skb=%p\n", pkt->pool, pkt, skb); */
+err:
+	mvOsFree(pkt);
+	skb->hw_cookie = 0;
+	skb->skb_recycle = NULL;
+
+	return 1;
+}
+EXPORT_SYMBOL(mv_eth_skb_recycle);
+
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+static struct sk_buff *mv_eth_skb_alloc(struct eth_port *pp, struct bm_pool *pool,
+					struct eth_pbuf *pkt, gfp_t gfp_mask)
+{
+	struct sk_buff *skb;
+
+	skb = __dev_alloc_skb(pool->pkt_size, GFP_DMA | gfp_mask);
+	if (!skb) {
+		STAT_ERR(pool->stats.skb_alloc_oom++);
+		return NULL;
+	}
+	STAT_DBG(pool->stats.skb_alloc_ok++);
+
+#ifdef ETH_SKB_DEBUG
+	mv_eth_skb_save(skb, "alloc");
+#endif /* ETH_SKB_DEBUG */
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		/* Save pkt as first 4 bytes in the buffer */
+#if !defined(CONFIG_MV_ETH_BE_WA)
+		*((MV_U32 *) skb->head) = MV_32BIT_LE((MV_U32)pkt);
+#else
+		*((MV_U32 *) skb->head) = (MV_U32)pkt;
+#endif /* !CONFIG_MV_ETH_BE_WA */
+		mvOsCacheLineFlush(pp->dev->dev.parent, skb->head);
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	pkt->osInfo = (void *)skb;
+	pkt->pBuf = skb->head;
+	pkt->bytes = 0;
+	pkt->physAddr = mvOsCacheInvalidate(pp->dev->dev.parent, skb->head, RX_BUF_SIZE(pool->pkt_size));
+	pkt->offset = NET_SKB_PAD;
+	pkt->pool = pool->pool;
+
+	return skb;
+}
+
+static inline void mv_eth_txq_buf_free(struct eth_port *pp, u32 shadow)
+{
+	if (!shadow)
+		return;
+
+	if (shadow & MV_ETH_SHADOW_SKB) {
+		shadow &= ~MV_ETH_SHADOW_SKB;
+		dev_kfree_skb_any((struct sk_buff *)shadow);
+		STAT_DBG(pp->stats.tx_skb_free++);
+	} else {
+		if (shadow & MV_ETH_SHADOW_EXT) {
+			shadow &= ~MV_ETH_SHADOW_EXT;
+			mv_eth_extra_pool_put(pp, (void *)shadow);
+		} else {
+			/* packet from NFP without BM */
+			struct eth_pbuf *pkt = (struct eth_pbuf *)shadow;
+			struct bm_pool *pool = &mv_eth_pool[pkt->pool];
+
+			if (mv_eth_pool_bm(pool)) {
+				/* Refill BM pool */
+				STAT_DBG(pool->stats.bm_put++);
+				mvBmPoolPut(pkt->pool, (MV_ULONG) pkt->physAddr);
+			} else {
+				mv_eth_pool_put(pool, pkt);
+			}
+		}
+	}
+}
+
+static inline void mv_eth_txq_cpu_clean(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int hw_txq_i, last_txq_i, i, count;
+	u32 shadow;
+
+	hw_txq_i = mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+	last_txq_i = txq_ctrl->shadow_txq_put_i;
+
+	i = hw_txq_i;
+	count = 0;
+	while (i != last_txq_i) {
+		shadow = txq_ctrl->shadow_txq[i];
+		mv_eth_txq_buf_free(pp, shadow);
+		txq_ctrl->shadow_txq[i] = (u32)NULL;
+
+		i = MV_NETA_QUEUE_NEXT_DESC(&txq_ctrl->q->queueCtrl, i);
+		count++;
+	}
+	if (count > 0) {
+		pr_info("\n%s: port=%d, txp=%d, txq=%d, mode=CPU\n",
+			__func__, pp->port, txq_ctrl->txp, txq_ctrl->txq);
+		pr_info("Free %d buffers: from desc=%d to desc=%d, tx_count=%d\n",
+			count, hw_txq_i, last_txq_i, txq_ctrl->txq_count);
+	}
+}
+
+#ifdef CONFIG_MV_ETH_HWF
+static inline void mv_eth_txq_hwf_clean(struct eth_port *pp, struct tx_queue *txq_ctrl, int rx_port)
+{
+	int pool, hw_txq_i, last_txq_i, i, count;
+	struct neta_tx_desc *tx_desc;
+
+	hw_txq_i = mvNetaTxqNextIndexGet(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+
+	if (mvNetaHwfTxqNextIndexGet(rx_port, pp->port, txq_ctrl->txp, txq_ctrl->txq, &last_txq_i) != MV_OK) {
+		printk(KERN_ERR "%s: mvNetaHwfTxqNextIndexGet failed\n", __func__);
+		return;
+	}
+
+	i = hw_txq_i;
+	count = 0;
+	while (i != last_txq_i) {
+		tx_desc = (struct neta_tx_desc *)MV_NETA_QUEUE_DESC_PTR(&txq_ctrl->q->queueCtrl, i);
+		if (mvNetaTxqDescIsValid(tx_desc)) {
+			mvNetaTxqDescInv(tx_desc);
+			mv_eth_tx_desc_flush(pp, tx_desc);
+
+			pool = (tx_desc->command & NETA_TX_BM_POOL_ID_ALL_MASK) >> NETA_TX_BM_POOL_ID_OFFS;
+			mvBmPoolPut(pool, (MV_ULONG)tx_desc->bufPhysAddr);
+			count++;
+		}
+		i = MV_NETA_QUEUE_NEXT_DESC(&txq_ctrl->q->queueCtrl, i);
+	}
+	if (count > 0) {
+		pr_info("\n%s: port=%d, txp=%d, txq=%d, mode=HWF-%d\n",
+			__func__, pp->port, txq_ctrl->txp, txq_ctrl->txq, rx_port);
+		pr_info("Free %d buffers to BM: from desc=%d to desc=%d\n",
+			count, hw_txq_i, last_txq_i);
+	}
+}
+#endif /* CONFIG_MV_ETH_HWF */
+
+int mv_eth_txq_clean(int port, int txp, int txq)
+{
+	int mode, rx_port;
+	struct eth_port *pp;
+	struct tx_queue *txq_ctrl;
+
+	if (mvNetaTxpCheck(port, txp))
+		return -EINVAL;
+
+	if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ, "txq"))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+	mode = mv_eth_ctrl_txq_mode_get(pp->port, txq_ctrl->txp, txq_ctrl->txq, &rx_port);
+	if (mode == MV_ETH_TXQ_CPU)
+		mv_eth_txq_cpu_clean(pp, txq_ctrl);
+#ifdef CONFIG_MV_ETH_HWF
+	else if (mode == MV_ETH_TXQ_HWF && MV_NETA_HWF_CAP())
+		mv_eth_txq_hwf_clean(pp, txq_ctrl, rx_port);
+#endif /* CONFIG_MV_ETH_HWF */
+
+	return 0;
+}
+
+static inline void mv_eth_txq_bufs_free(struct eth_port *pp, struct tx_queue *txq_ctrl, int num)
+{
+	u32 shadow;
+	int i;
+
+	/* Free buffers that was not freed automatically by BM */
+	for (i = 0; i < num; i++) {
+		shadow = txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_get_i];
+		mv_eth_shadow_inc_get(txq_ctrl);
+		mv_eth_txq_buf_free(pp, shadow);
+	}
+}
+
+inline u32 mv_eth_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int tx_done;
+
+	tx_done = mvNetaTxqSentDescProc(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+	if (!tx_done)
+		return tx_done;
+/*
+	printk(KERN_ERR "tx_done: txq_count=%d, port=%d, txp=%d, txq=%d, tx_done=%d\n",
+			txq_ctrl->txq_count, pp->port, txq_ctrl->txp, txq_ctrl->txq, tx_done);
+*/
+	if (!mv_eth_txq_bm(txq_ctrl))
+		mv_eth_txq_bufs_free(pp, txq_ctrl, tx_done);
+
+	txq_ctrl->txq_count -= tx_done;
+	STAT_DBG(txq_ctrl->stats.txq_txdone += tx_done);
+
+	return tx_done;
+}
+EXPORT_SYMBOL(mv_eth_txq_done);
+
+inline struct eth_pbuf *mv_eth_pool_get(struct eth_port *pp, struct bm_pool *pool)
+{
+	struct eth_pbuf *pkt = NULL;
+	struct sk_buff *skb;
+	unsigned long flags = 0;
+
+	MV_ETH_LOCK(&pool->lock, flags);
+
+	if (mvStackIndex(pool->stack) > 0) {
+		STAT_DBG(pool->stats.stack_get++);
+		pkt = (struct eth_pbuf *)mvStackPop(pool->stack);
+	} else
+		STAT_ERR(pool->stats.stack_empty++);
+
+	MV_ETH_UNLOCK(&pool->lock, flags);
+	if (pkt)
+		return pkt;
+
+	/* Try to allocate new pkt + skb */
+	pkt = mvOsMalloc(sizeof(struct eth_pbuf));
+	if (pkt) {
+		skb = mv_eth_skb_alloc(pp, pool, pkt, GFP_ATOMIC);
+		if (!skb) {
+			mvOsFree(pkt);
+			pkt = NULL;
+		}
+	}
+	return pkt;
+}
+
+/* Reuse pkt if possible, allocate new skb and move BM pool or RXQ ring */
+inline int mv_eth_refill(struct eth_port *pp, int rxq,
+				struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc)
+{
+	if (pkt == NULL) {
+		pkt = mv_eth_pool_get(pp, pool);
+		if (pkt == NULL)
+			return 1;
+	} else {
+		struct sk_buff *skb;
+
+		/* No recycle -  alloc new skb */
+		skb = mv_eth_skb_alloc(pp, pool, pkt, GFP_ATOMIC);
+		if (!skb) {
+			mvOsFree(pkt);
+			pool->missed++;
+			mv_eth_add_cleanup_timer(pp->cpu_config[smp_processor_id()]);
+			return 1;
+		}
+	}
+	mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+
+	return 0;
+}
+EXPORT_SYMBOL(mv_eth_refill);
+
+static inline MV_U32 mv_eth_skb_tx_csum(struct eth_port *pp, struct sk_buff *skb)
+{
+#ifdef CONFIG_MV_ETH_TX_CSUM_OFFLOAD
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int   ip_hdr_len = 0;
+		MV_U8 l4_proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *ip4h = ip_hdr(skb);
+
+			/* Calculate IPv4 checksum and L4 checksum */
+			ip_hdr_len = ip4h->ihl;
+			l4_proto = ip4h->protocol;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* If not IPv4 - must be ETH_P_IPV6 - Calculate only L4 checksum */
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			/* Read l4_protocol from one of IPv6 extra headers ?????? */
+			if (skb_network_header_len(skb) > 0)
+				ip_hdr_len = (skb_network_header_len(skb) >> 2);
+			l4_proto = ip6h->nexthdr;
+		} else {
+			STAT_DBG(pp->stats.tx_csum_sw++);
+			return NETA_TX_L4_CSUM_NOT;
+		}
+		STAT_DBG(pp->stats.tx_csum_hw++);
+
+		return mvNetaTxqDescCsum(skb_network_offset(skb), skb->protocol, ip_hdr_len, l4_proto);
+	}
+#endif /* CONFIG_MV_ETH_TX_CSUM_OFFLOAD */
+
+	STAT_DBG(pp->stats.tx_csum_sw++);
+	return NETA_TX_L4_CSUM_NOT;
+}
+
+#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
+inline struct neta_rx_desc *mv_eth_rx_prefetch(struct eth_port *pp, MV_NETA_RXQ_CTRL *rx_ctrl,
+									  int rx_done, int rx_todo)
+{
+	struct neta_rx_desc	*rx_desc, *next_desc;
+
+	rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+	if (rx_done == 0) {
+		/* First descriptor in the NAPI loop */
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+		prefetch(rx_desc);
+	}
+	if ((rx_done + 1) == rx_todo) {
+		/* Last descriptor in the NAPI loop - prefetch are not needed */
+		return rx_desc;
+	}
+	/* Prefetch next descriptor */
+	next_desc = mvNetaRxqDescGet(rx_ctrl);
+	mvOsCacheLineInv(pp->dev->dev.parent, next_desc);
+	prefetch(next_desc);
+
+	return rx_desc;
+}
+#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
+
+static inline int mv_eth_rx(struct eth_port *pp, int rx_todo, int rxq, struct napi_struct *napi)
+{
+	struct net_device *dev;
+	MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+	int rx_done, rx_filled, err;
+	struct neta_rx_desc *rx_desc;
+	u32 rx_status;
+	int rx_bytes;
+	struct eth_pbuf *pkt;
+	struct sk_buff *skb;
+	struct bm_pool *pool;
+#ifdef CONFIG_NETMAP
+	if (pp->flags & MV_ETH_F_IFCAP_NETMAP) {
+		int netmap_done;
+		if (netmap_rx_irq(pp->dev, 0, &netmap_done))
+			return 1; /* seems to be ignored */
+	}
+#endif /* CONFIG_NETMAP */
+	/* Get number of received packets */
+	rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(pp->dev->dev.parent);
+
+	if (rx_todo > rx_done)
+		rx_todo = rx_done;
+
+	rx_done = 0;
+	rx_filled = 0;
+
+	/* Fairness NAPI loop */
+	while (rx_done < rx_todo) {
+
+#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
+		rx_desc = mv_eth_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo);
+#else
+		rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+
+		prefetch(rx_desc);
+#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
+
+		rx_done++;
+		rx_filled++;
+
+#if defined(MV_CPU_BE)
+		mvNetaRxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_RX) {
+			printk(KERN_ERR "\n%s: port=%d, cpu=%d\n", __func__, pp->port, smp_processor_id());
+			mv_eth_rx_desc_print(rx_desc);
+		}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+		rx_status = rx_desc->status;
+		pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+		pool = &mv_eth_pool[pkt->pool];
+
+		if (((rx_status & NETA_RX_FL_DESC_MASK) != NETA_RX_FL_DESC_MASK) ||
+			(rx_status & NETA_RX_ES_MASK)) {
+
+			mv_eth_rx_error(pp, rx_desc);
+
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+			continue;
+		}
+
+		/* Speculative ICache prefetch WA: should be replaced with dma_unmap_single (invalidate l2) */
+		mvOsCacheMultiLineInv(pp->dev->dev.parent, pkt->pBuf + pkt->offset, rx_desc->dataSize);
+
+#ifdef CONFIG_MV_ETH_RX_PKT_PREFETCH
+		prefetch(pkt->pBuf + pkt->offset);
+		prefetch(pkt->pBuf + pkt->offset + CPU_D_CACHE_LINE_SIZE);
+#endif /* CONFIG_MV_ETH_RX_PKT_PREFETCH */
+
+		dev = pp->dev;
+
+		STAT_DBG(pp->stats.rxq[rxq]++);
+		dev->stats.rx_packets++;
+
+		rx_bytes = rx_desc->dataSize - MV_ETH_CRC_SIZE;
+		dev->stats.rx_bytes += rx_bytes;
+
+#ifndef CONFIG_MV_ETH_PNC
+	/* Update IP offset and IP header len in RX descriptor */
+	if (MV_NETA_PNC_CAP() && NETA_RX_L3_IS_IP4(rx_desc->status)) {
+		int ip_offset;
+
+		if ((rx_desc->status & ETH_RX_VLAN_TAGGED_FRAME_MASK))
+			ip_offset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER) + MV_VLAN_HLEN;
+		else
+			ip_offset = MV_ETH_MH_SIZE + sizeof(MV_802_3_HEADER);
+
+		NETA_RX_SET_IPHDR_OFFSET(rx_desc, ip_offset);
+		NETA_RX_SET_IPHDR_HDRLEN(rx_desc, 5);
+	}
+#endif /* !CONFIG_MV_ETH_PNC */
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_RX) {
+			printk(KERN_ERR "pkt=%p, pBuf=%p, ksize=%d\n", pkt, pkt->pBuf, ksize(pkt->pBuf));
+			mvDebugMemDump(pkt->pBuf + pkt->offset, 64, 1);
+		}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+#if defined(CONFIG_MV_ETH_PNC) && defined(CONFIG_MV_ETH_RX_SPECIAL)
+		/* Special RX processing */
+		if (MV_NETA_PNC_CAP() && (rx_desc->pncInfo & NETA_PNC_RX_SPECIAL)) {
+			if (pp->rx_special_proc) {
+				pp->rx_special_proc(pp->port, rxq, dev, (struct sk_buff *)(pkt->osInfo), rx_desc);
+				STAT_INFO(pp->stats.rx_special++);
+
+				/* Refill processing */
+				err = mv_eth_refill(pp, rxq, pkt, pool, rx_desc);
+				if (err) {
+					printk(KERN_ERR "Linux processing - Can't refill\n");
+					pp->rxq_ctrl[rxq].missed++;
+					rx_filled--;
+				}
+				continue;
+			}
+		}
+#endif /* CONFIG_MV_ETH_PNC && CONFIG_MV_ETH_RX_SPECIAL */
+
+#if defined(CONFIG_MV_ETH_NFP)
+		if (pp->flags & MV_ETH_F_NFP_EN) {
+			MV_STATUS status;
+
+			pkt->bytes = rx_bytes;
+			pkt->offset = NET_SKB_PAD;
+
+			status = mv_eth_nfp(pp, rxq, rx_desc, pkt, pool);
+			if (status == MV_OK)
+				continue;
+			if (status == MV_FAIL) {
+				rx_filled--;
+				continue;
+			}
+			/* MV_TERMINATE - packet returned to slow path */
+		}
+#endif /* CONFIG_MV_ETH_NFP */
+
+		/* Linux processing */
+		skb = (struct sk_buff *)(pkt->osInfo);
+
+		/* Linux processing */
+		__skb_put(skb, rx_bytes);
+
+#ifdef ETH_SKB_DEBUG
+		mv_eth_skb_check(skb);
+#endif /* ETH_SKB_DEBUG */
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+		if (mv_eth_is_recycle()) {
+			skb->skb_recycle = mv_eth_skb_recycle;
+			skb->hw_cookie = (__u32)pkt;
+			pkt = NULL;
+		}
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+		mv_eth_rx_csum(pp, rx_desc, skb);
+
+		if (pp->tagged) {
+			mv_mux_rx(skb, pp->port, napi);
+			STAT_DBG(pp->stats.rx_tagged++);
+			skb = NULL;
+		} else {
+			dev->stats.rx_bytes -= mv_eth_mh_skb_skip(skb);
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+
+
+#ifdef CONFIG_MV_ETH_GRO
+		if (skb && (dev->features & NETIF_F_GRO)) {
+			STAT_DBG(pp->stats.rx_gro++);
+			STAT_DBG(pp->stats.rx_gro_bytes += skb->len);
+
+			rx_status = napi_gro_receive(pp->cpu_config[smp_processor_id()]->napi, skb);
+			skb = NULL;
+		}
+#endif /* CONFIG_MV_ETH_GRO */
+
+		if (skb) {
+			STAT_DBG(pp->stats.rx_netif++);
+			rx_status = netif_receive_skb(skb);
+			STAT_DBG((rx_status == 0) ? 0 : pp->stats.rx_drop_sw++);
+		}
+
+		/* Refill processing: */
+		err = mv_eth_refill(pp, rxq, pkt, pool, rx_desc);
+		if (err) {
+			printk(KERN_ERR "Linux processing - Can't refill\n");
+			pp->rxq_ctrl[rxq].missed++;
+			mv_eth_add_cleanup_timer(pp->cpu_config[smp_processor_id()]);
+			rx_filled--;
+		}
+	}
+
+	/* Update RxQ management counters */
+	mv_neta_wmb();
+	mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled);
+
+	return rx_done;
+}
+
+static int mv_eth_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	int frags = 0;
+	bool tx_spec_ready = false;
+	struct mv_eth_tx_spec tx_spec;
+	u32 tx_cmd;
+
+	struct tx_queue *txq_ctrl = NULL;
+	struct neta_tx_desc *tx_desc;
+	unsigned long flags = 0;
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_TX)
+			printk(KERN_ERR "%s: STARTED_BIT = 0, packet is dropped.\n", __func__);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+		goto out;
+	}
+
+	if (!(netif_running(dev))) {
+		printk(KERN_ERR "!netif_running() in %s\n", __func__);
+		goto out;
+	}
+
+#if defined(CONFIG_MV_ETH_TX_SPECIAL)
+	if (pp->tx_special_check) {
+
+		if (pp->tx_special_check(pp->port, dev, skb, &tx_spec)) {
+			STAT_INFO(pp->stats.tx_special++);
+			if (tx_spec.tx_func) {
+				tx_spec.tx_func(skb->data, skb->len, &tx_spec);
+				goto out;
+			} else {
+				/* Check validity of tx_spec txp/txq must be CPU owned */
+				tx_spec_ready = true;
+			}
+		}
+	}
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+
+	/* In case this port is tagged, check if SKB is tagged - i.e. SKB's source is MUX interface */
+	if (pp->tagged && (!MV_MUX_SKB_IS_TAGGED(skb))) {
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_TX)
+			pr_err("%s: port %d is tagged, skb not from MUX interface - packet is dropped.\n",
+				__func__, pp->port);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+		goto out;
+	}
+
+	/* Get TXQ (without BM) to send packet generated by Linux */
+	if (tx_spec_ready == false) {
+		tx_spec.txp = pp->txp;
+		tx_spec.txq = mv_eth_tx_policy(pp, skb);
+		tx_spec.hw_cmd = pp->hw_cmd;
+		tx_spec.flags = pp->flags;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[tx_spec.txp * CONFIG_MV_ETH_TXQ + tx_spec.txq];
+	if (txq_ctrl == NULL) {
+		printk(KERN_ERR "%s: invalidate txp/txq (%d/%d)\n", __func__, tx_spec.txp, tx_spec.txq);
+		goto out;
+	}
+	mv_eth_lock(txq_ctrl, flags);
+
+#ifdef CONFIG_MV_ETH_TSO
+	/* GSO/TSO */
+	if (skb_is_gso(skb)) {
+		frags = mv_eth_tx_tso(skb, dev, &tx_spec, txq_ctrl);
+		goto out;
+	}
+#endif /* CONFIG_MV_ETH_TSO */
+
+	frags = skb_shinfo(skb)->nr_frags + 1;
+
+	if (tx_spec.flags & MV_ETH_F_MH) {
+		if (mv_eth_skb_mh_add(skb, pp->tx_mh)) {
+			frags = 0;
+			goto out;
+		}
+	}
+
+	tx_desc = mv_eth_tx_desc_get(txq_ctrl, frags);
+	if (tx_desc == NULL) {
+		frags = 0;
+		goto out;
+	}
+
+	/* Don't use BM for Linux packets: NETA_TX_BM_ENABLE_MASK = 0 */
+	/* NETA_TX_PKT_OFFSET_MASK = 0 - for all descriptors */
+	tx_cmd = mv_eth_skb_tx_csum(pp, skb);
+
+#ifdef CONFIG_MV_PON
+	tx_desc->hw_cmd = tx_spec.hw_cmd;
+#endif
+
+	/* FIXME: beware of nonlinear --BK */
+	tx_desc->dataSize = skb_headlen(skb);
+
+	tx_desc->bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, skb->data, tx_desc->dataSize);
+
+	if (frags == 1) {
+		/*
+		 * First and Last descriptor
+		 */
+		if (tx_spec.flags & MV_ETH_F_NO_PAD)
+			tx_cmd |= NETA_TX_F_DESC_MASK | NETA_TX_L_DESC_MASK;
+		else
+			tx_cmd |= NETA_TX_FLZ_DESC_MASK;
+
+		tx_desc->command = tx_cmd;
+		mv_eth_tx_desc_flush(pp, tx_desc);
+
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
+		mv_eth_shadow_inc_put(txq_ctrl);
+	} else {
+
+		/* First but not Last */
+		tx_cmd |= NETA_TX_F_DESC_MASK;
+
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
+		mv_eth_shadow_inc_put(txq_ctrl);
+
+		tx_desc->command = tx_cmd;
+		mv_eth_tx_desc_flush(pp, tx_desc);
+
+		/* Continue with other skb fragments */
+		mv_eth_tx_frag_process(pp, skb, txq_ctrl, tx_spec.flags);
+		STAT_DBG(pp->stats.tx_sg++);
+	}
+/*
+	printk(KERN_ERR "tx: frags=%d, tx_desc[0x0]=%x [0xc]=%x, wr_id=%d, rd_id=%d, skb=%p\n",
+			frags, tx_desc->command,tx_desc->hw_cmd,
+			txq_ctrl->shadow_txq_put_i, txq_ctrl->shadow_txq_get_i, skb);
+*/
+	txq_ctrl->txq_count += frags;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_TX) {
+		printk(KERN_ERR "\n");
+		printk(KERN_ERR "%s - eth_tx_%lu: cpu=%d, in_intr=0x%lx, port=%d, txp=%d, txq=%d\n",
+		       dev->name, dev->stats.tx_packets, smp_processor_id(),
+			in_interrupt(), pp->port, tx_spec.txp, tx_spec.txq);
+		printk(KERN_ERR "\t skb=%p, head=%p, data=%p, size=%d\n", skb, skb->head, skb->data, skb->len);
+		mv_eth_tx_desc_print(tx_desc);
+		/*mv_eth_skb_print(skb);*/
+		mvDebugMemDump(skb->data, 64, 1);
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(pp->port))
+		mvNetaPonTxqBytesAdd(pp->port, tx_spec.txp, tx_spec.txq, skb->len);
+#endif /* CONFIG_MV_PON */
+
+	/* Enable transmit */
+	mv_neta_wmb();
+	mvNetaTxqPendDescAdd(pp->port, tx_spec.txp, tx_spec.txq, frags);
+
+	STAT_DBG(txq_ctrl->stats.txq_tx += frags);
+
+out:
+	if (frags > 0) {
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+	} else {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+#ifndef CONFIG_MV_NETA_TXDONE_ISR
+	if (txq_ctrl) {
+		if (txq_ctrl->txq_count >= mv_ctrl_txdone) {
+			u32 tx_done = mv_eth_txq_done(pp, txq_ctrl);
+
+			STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+
+		}
+		/* If after calling mv_eth_txq_done, txq_ctrl->txq_count equals frags, we need to set the timer */
+		if ((txq_ctrl->txq_count > 0)  && (txq_ctrl->txq_count <= frags) && (frags > 0)) {
+			struct cpu_ctrl *cpuCtrl = pp->cpu_config[smp_processor_id()];
+
+			mv_eth_add_tx_done_timer(cpuCtrl);
+		}
+	}
+#endif /* CONFIG_MV_NETA_TXDONE_ISR */
+
+	if (txq_ctrl)
+		mv_eth_unlock(txq_ctrl, flags);
+
+	return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_MV_ETH_TSO
+/* Validate TSO */
+static inline int mv_eth_tso_validate(struct sk_buff *skb, struct net_device *dev)
+{
+	if (!(dev->features & NETIF_F_TSO)) {
+		printk(KERN_ERR "error: (skb_is_gso(skb) returns true but features is not NETIF_F_TSO\n");
+		return 1;
+	}
+
+	if (skb_shinfo(skb)->frag_list != NULL) {
+		printk(KERN_ERR "***** ERROR: frag_list is not null\n");
+		return 1;
+	}
+
+	if (skb_shinfo(skb)->gso_segs == 1) {
+		printk(KERN_ERR "***** ERROR: only one TSO segment\n");
+		return 1;
+	}
+
+	if (skb->len <= skb_shinfo(skb)->gso_size) {
+		printk(KERN_ERR "***** ERROR: total_len (%d) less than gso_size (%d)\n", skb->len, skb_shinfo(skb)->gso_size);
+		return 1;
+	}
+	if ((htons(ETH_P_IP) != skb->protocol) || (ip_hdr(skb)->protocol != IPPROTO_TCP) || (tcp_hdr(skb) == NULL)) {
+		printk(KERN_ERR "***** ERROR: Protocol is not TCP over IP\n");
+		return 1;
+	}
+	return 0;
+}
+
+static inline int mv_eth_tso_build_hdr_desc(struct eth_port *pp, struct neta_tx_desc *tx_desc,
+					     struct sk_buff *skb,
+					     struct tx_queue *txq_ctrl, u16 *mh, int hdr_len, int size,
+					     MV_U32 tcp_seq, MV_U16 ip_id, int left_len)
+{
+	struct iphdr *iph;
+	struct tcphdr *tcph;
+	MV_U8 *data, *mac;
+	int mac_hdr_len = skb_network_offset(skb);
+
+	data = mv_eth_extra_pool_get(pp);
+	if (!data)
+		return 0;
+
+	txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG)data | MV_ETH_SHADOW_EXT);
+
+	/* Reserve 2 bytes for IP header alignment */
+	mac = data + MV_ETH_MH_SIZE;
+	iph = (struct iphdr *)(mac + mac_hdr_len);
+
+	memcpy(mac, skb->data, hdr_len);
+
+	if (iph) {
+		iph->id = htons(ip_id);
+		iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+	}
+
+	tcph = (struct tcphdr *)(mac + skb_transport_offset(skb));
+	tcph->seq = htonl(tcp_seq);
+
+	if (left_len) {
+		/* Clear all special flags for not last packet */
+		tcph->psh = 0;
+		tcph->fin = 0;
+		tcph->rst = 0;
+	}
+
+	if (mh) {
+		/* Start tarnsmit from MH - add 2 bytes to size */
+		*((MV_U16 *)data) = *mh;
+		/* increment ip_offset field in TX descriptor by 2 bytes */
+		mac_hdr_len += MV_ETH_MH_SIZE;
+		hdr_len += MV_ETH_MH_SIZE;
+	} else {
+		/* Start transmit from MAC */
+		data = mac;
+	}
+
+	tx_desc->dataSize = hdr_len;
+	tx_desc->command = mvNetaTxqDescCsum(mac_hdr_len, skb->protocol, ((u8 *)tcph - (u8 *)iph) >> 2, IPPROTO_TCP);
+	tx_desc->command |= NETA_TX_F_DESC_MASK;
+
+	tx_desc->bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, data, tx_desc->dataSize);
+	mv_eth_shadow_inc_put(txq_ctrl);
+
+	mv_eth_tx_desc_flush(pp, tx_desc);
+
+	return hdr_len;
+}
+
+static inline int mv_eth_tso_build_data_desc(struct eth_port *pp, struct neta_tx_desc *tx_desc, struct sk_buff *skb,
+					     struct tx_queue *txq_ctrl, char *frag_ptr,
+					     int frag_size, int data_left, int total_left)
+{
+	int size;
+
+	size = MV_MIN(frag_size, data_left);
+
+	tx_desc->dataSize = size;
+	tx_desc->bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, frag_ptr, size);
+	tx_desc->command = 0;
+	txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
+
+	if (size == data_left) {
+		/* last descriptor in the TCP packet */
+		tx_desc->command = NETA_TX_L_DESC_MASK;
+
+		if (total_left == 0) {
+			/* last descriptor in SKB */
+			txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
+		}
+	}
+	mv_eth_shadow_inc_put(txq_ctrl);
+	mv_eth_tx_desc_flush(pp, tx_desc);
+
+	return size;
+}
+
+/***********************************************************
+ * mv_eth_tx_tso --                                        *
+ *   send a packet.                                        *
+ ***********************************************************/
+int mv_eth_tx_tso(struct sk_buff *skb, struct net_device *dev,
+		struct mv_eth_tx_spec *tx_spec, struct tx_queue *txq_ctrl)
+{
+	int frag = 0;
+	int total_len, hdr_len, size, frag_size, data_left;
+	char *frag_ptr;
+	int totalDescNum, totalBytes = 0;
+	struct neta_tx_desc *tx_desc;
+	MV_U16 ip_id;
+	MV_U32 tcp_seq = 0;
+	skb_frag_t *skb_frag_ptr;
+	const struct tcphdr *th = tcp_hdr(skb);
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	MV_U16 *mh = NULL;
+	int i;
+
+	STAT_DBG(priv->stats.tx_tso++);
+/*
+	printk(KERN_ERR "mv_eth_tx_tso_%d ENTER: skb=%p, total_len=%d\n", priv->stats.tx_tso, skb, skb->len);
+*/
+	if (mv_eth_tso_validate(skb, dev))
+		return 0;
+
+	/* Calculate expected number of TX descriptors */
+	totalDescNum = skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
+
+	if ((txq_ctrl->txq_count + totalDescNum) >= txq_ctrl->txq_size) {
+/*
+		printk(KERN_ERR "%s: no TX descriptors - txq_count=%d, len=%d, nr_frags=%d, gso_segs=%d\n",
+					__func__, txq_ctrl->txq_count, skb->len, skb_shinfo(skb)->nr_frags,
+					skb_shinfo(skb)->gso_segs);
+*/
+		STAT_ERR(txq_ctrl->stats.txq_err++);
+		return 0;
+	}
+
+	total_len = skb->len;
+	hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+
+	total_len -= hdr_len;
+	ip_id = ntohs(ip_hdr(skb)->id);
+	tcp_seq = ntohl(th->seq);
+
+	frag_size = skb_headlen(skb);
+	frag_ptr = skb->data;
+
+	if (frag_size < hdr_len) {
+		printk(KERN_ERR "***** ERROR: frag_size=%d, hdr_len=%d\n", frag_size, hdr_len);
+		return 0;
+	}
+
+	frag_size -= hdr_len;
+	frag_ptr += hdr_len;
+	if (frag_size == 0) {
+		skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
+
+		/* Move to next segment */
+		frag_size = skb_frag_ptr->size;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+		frag_ptr = page_address(skb_frag_ptr->page.p) + skb_frag_ptr->page_offset;
+#else
+		frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
+#endif
+		frag++;
+	}
+	totalDescNum = 0;
+
+	while (total_len > 0) {
+		data_left = MV_MIN(skb_shinfo(skb)->gso_size, total_len);
+
+		tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+		if (tx_desc == NULL)
+			goto outNoTxDesc;
+
+		totalDescNum++;
+		total_len -= data_left;
+		txq_ctrl->txq_count++;
+
+		if (tx_spec->flags & MV_ETH_F_MH)
+			mh = &priv->tx_mh;
+		/* prepare packet headers: MAC + IP + TCP */
+		size = mv_eth_tso_build_hdr_desc(priv, tx_desc, skb, txq_ctrl, mh,
+					hdr_len, data_left, tcp_seq, ip_id, total_len);
+		if (size == 0)
+			goto outNoTxDesc;
+
+		totalBytes += size;
+/*
+		printk(KERN_ERR "Header desc: tx_desc=%p, skb=%p, hdr_len=%d, data_left=%d\n",
+						tx_desc, skb, hdr_len, data_left);
+*/
+		ip_id++;
+
+		while (data_left > 0) {
+			tx_desc = mv_eth_tx_desc_get(txq_ctrl, 1);
+			if (tx_desc == NULL)
+				goto outNoTxDesc;
+
+			totalDescNum++;
+			txq_ctrl->txq_count++;
+
+			size = mv_eth_tso_build_data_desc(priv, tx_desc, skb, txq_ctrl,
+							  frag_ptr, frag_size, data_left, total_len);
+			totalBytes += size;
+/*
+			printk(KERN_ERR "Data desc: tx_desc=%p, skb=%p, size=%d, frag_size=%d, data_left=%d\n",
+							tx_desc, skb, size, frag_size, data_left);
+ */
+			data_left -= size;
+			tcp_seq += size;
+
+			frag_size -= size;
+			frag_ptr += size;
+
+			if ((frag_size == 0) && (frag < skb_shinfo(skb)->nr_frags)) {
+				skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
+
+				/* Move to next segment */
+				frag_size = skb_frag_ptr->size;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+				frag_ptr = page_address(skb_frag_ptr->page.p) + skb_frag_ptr->page_offset;
+#else
+				frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
+#endif
+				frag++;
+			}
+		}		/* of while data_left > 0 */
+	}			/* of while (total_len > 0) */
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(priv->port))
+		mvNetaPonTxqBytesAdd(priv->port, txq_ctrl->txp, txq_ctrl->txq, totalBytes);
+#endif /* CONFIG_MV_PON */
+
+	STAT_DBG(priv->stats.tx_tso_bytes += totalBytes);
+	STAT_DBG(txq_ctrl->stats.txq_tx += totalDescNum);
+
+	mv_neta_wmb();
+	mvNetaTxqPendDescAdd(priv->port, txq_ctrl->txp, txq_ctrl->txq, totalDescNum);
+/*
+	printk(KERN_ERR "mv_eth_tx_tso EXIT: totalDescNum=%d\n", totalDescNum);
+*/
+	return totalDescNum;
+
+outNoTxDesc:
+	/* No enough TX descriptors for the whole skb - rollback */
+	printk(KERN_ERR "%s: No TX descriptors - rollback %d, txq_count=%d, nr_frags=%d, skb=%p, len=%d, gso_segs=%d\n",
+			__func__, totalDescNum, txq_ctrl->txq_count, skb_shinfo(skb)->nr_frags,
+			skb, skb->len, skb_shinfo(skb)->gso_segs);
+
+	for (i = 0; i < totalDescNum; i++) {
+		txq_ctrl->txq_count--;
+		mv_eth_shadow_dec_put(txq_ctrl);
+		mvNetaTxqPrevDescGet(txq_ctrl->q);
+	}
+	return MV_OK;
+}
+#endif /* CONFIG_MV_ETH_TSO */
+
+/* Drop packets received by the RXQ and free buffers */
+static void mv_eth_rxq_drop_pkts(struct eth_port *pp, int rxq)
+{
+	struct neta_rx_desc *rx_desc;
+	struct eth_pbuf     *pkt;
+	struct bm_pool      *pool;
+	int	                rx_done, i;
+	MV_NETA_RXQ_CTRL    *rx_ctrl = pp->rxq_ctrl[rxq].q;
+
+	if (rx_ctrl == NULL)
+		return;
+
+	rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(pp->dev->dev.parent);
+
+	for (i = 0; i < rx_done; i++) {
+		rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+
+#if defined(MV_CPU_BE)
+		mvNetaRxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+		pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+		pool = &mv_eth_pool[pkt->pool];
+		mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+	}
+	if (rx_done) {
+		mv_neta_wmb();
+		mvNetaRxqDescNumUpdate(pp->port, rxq, rx_done, rx_done);
+	}
+}
+
+static void mv_eth_txq_done_force(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int tx_done = txq_ctrl->txq_count;
+
+	mv_eth_txq_bufs_free(pp, txq_ctrl, tx_done);
+
+	txq_ctrl->txq_count -= tx_done;
+	STAT_DBG(txq_ctrl->stats.txq_txdone += tx_done);
+}
+
+inline u32 mv_eth_tx_done_pon(struct eth_port *pp, int *tx_todo)
+{
+	int txp, txq;
+	struct tx_queue *txq_ctrl;
+	unsigned long flags = 0;
+
+	u32 tx_done = 0;
+
+	*tx_todo = 0;
+
+	STAT_INFO(pp->stats.tx_done++);
+
+	/* simply go over all TX ports and TX queues */
+	txp = pp->txp_num;
+	while (txp--) {
+		txq = CONFIG_MV_ETH_TXQ;
+
+		while (txq--) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+			mv_eth_lock(txq_ctrl, flags);
+			if ((txq_ctrl) && (txq_ctrl->txq_count)) {
+				tx_done += mv_eth_txq_done(pp, txq_ctrl);
+				*tx_todo += txq_ctrl->txq_count;
+			}
+			mv_eth_unlock(txq_ctrl, flags);
+		}
+	}
+
+	STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+
+	return tx_done;
+}
+
+
+inline u32 mv_eth_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo)
+{
+	int txq;
+	struct tx_queue *txq_ctrl;
+	unsigned long flags = 0;
+	u32 tx_done = 0;
+
+	*tx_todo = 0;
+
+	STAT_INFO(pp->stats.tx_done++);
+
+	while (cause_tx_done != 0) {
+
+		/* For GbE ports we get TX Buffers Threshold Cross per queue in bits [7:0] */
+		txq = mv_eth_tx_done_policy(cause_tx_done);
+
+		if (txq == -1)
+			break;
+
+		txq_ctrl = &pp->txq_ctrl[txq];
+
+		if (txq_ctrl == NULL) {
+			printk(KERN_ERR "%s: txq_ctrl = NULL, txq=%d\n", __func__, txq);
+			return -EINVAL;
+		}
+
+		mv_eth_lock(txq_ctrl, flags);
+
+		if ((txq_ctrl) && (txq_ctrl->txq_count)) {
+			tx_done += mv_eth_txq_done(pp, txq_ctrl);
+			*tx_todo += txq_ctrl->txq_count;
+		}
+		cause_tx_done &= ~((1 << txq) << NETA_CAUSE_TXQ_SENT_DESC_OFFS);
+
+		mv_eth_unlock(txq_ctrl, flags);
+	}
+
+	STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+
+	return tx_done;
+}
+
+
+static void mv_eth_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct tx_queue *txq_ctrl,	u16 flags)
+{
+	int i;
+	struct neta_tx_desc *tx_desc;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		tx_desc = mvNetaTxqNextDescGet(txq_ctrl->q);
+
+		/* NETA_TX_BM_ENABLE_MASK = 0 */
+		/* NETA_TX_PKT_OFFSET_MASK = 0 */
+		tx_desc->dataSize = frag->size;
+		tx_desc->bufPhysAddr =
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+			mvOsCacheFlush(pp->dev->dev.parent, page_address(frag->page.p) + frag->page_offset,
+			tx_desc->dataSize);
+#else
+			mvOsCacheFlush(pp->dev->dev.parent, page_address(frag->page) + frag->page_offset,
+			tx_desc->dataSize);
+#endif
+		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+			/* Last descriptor */
+			if (flags & MV_ETH_F_NO_PAD)
+				tx_desc->command = NETA_TX_L_DESC_MASK;
+			else
+				tx_desc->command = (NETA_TX_L_DESC_MASK | NETA_TX_Z_PAD_MASK);
+
+			txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
+			mv_eth_shadow_inc_put(txq_ctrl);
+		} else {
+			/* Descriptor in the middle: Not First, Not Last */
+			tx_desc->command = 0;
+
+			txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = 0;
+			mv_eth_shadow_inc_put(txq_ctrl);
+		}
+
+		mv_eth_tx_desc_flush(pp, tx_desc);
+	}
+}
+
+/***********************************************************
+ * mv_eth_port_pools_free                                  *
+ *   per port - free all the buffers from pools		   *
+ *   disable pool if empty				   *
+ ***********************************************************/
+static int mv_eth_port_pools_free(int port)
+{
+	struct eth_port *pp;
+
+	pp = mv_eth_port_by_id(port);
+	if (!pp)
+		return MV_OK;
+
+	if (pp->pool_long) {
+		mv_eth_pool_free(pp->pool_long->pool, pp->pool_long_num);
+#ifndef CONFIG_MV_ETH_BM_CPU
+	}
+#else
+		if (MV_NETA_BM_CAP()) {
+			if (pp->pool_long->buf_num == 0)
+				mvNetaBmPoolDisable(pp->pool_long->pool);
+
+			/*empty pools*/
+			if (pp->pool_short && (pp->pool_long->pool != pp->pool_short->pool)) {
+				mv_eth_pool_free(pp->pool_short->pool, pp->pool_short_num);
+				if (pp->pool_short->buf_num == 0)
+					mvNetaBmPoolDisable(pp->pool_short->pool);
+			}
+		}
+	}
+#endif /*CONFIG_MV_ETH_BM_CPU*/
+	return MV_OK;
+}
+
+/* Free "num" buffers from the pool */
+static int mv_eth_pool_free(int pool, int num)
+{
+	struct eth_pbuf *pkt;
+	int i = 0;
+	struct bm_pool *ppool = &mv_eth_pool[pool];
+	unsigned long flags = 0;
+	bool free_all = false;
+
+	MV_ETH_LOCK(&ppool->lock, flags);
+
+	if (num >= ppool->buf_num) {
+		/* Free all buffers from the pool */
+		free_all = true;
+		num = ppool->buf_num;
+	}
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (mv_eth_pool_bm(ppool)) {
+
+		if (free_all)
+			mvNetaBmConfigSet(MV_BM_EMPTY_LIMIT_MASK);
+
+		while (i < num) {
+			MV_U32 *va;
+			MV_U32 pa = mvBmPoolGet(pool);
+
+			if (pa == 0)
+				break;
+
+			va = phys_to_virt(pa);
+			pkt = (struct eth_pbuf *)*va;
+#if !defined(CONFIG_MV_ETH_BE_WA)
+			pkt = (struct eth_pbuf *)MV_32BIT_LE((MV_U32)pkt);
+#endif /* !CONFIG_MV_ETH_BE_WA */
+
+			if (pkt) {
+				mv_eth_pkt_free(pkt);
+#ifdef ETH_SKB_DEBUG
+				mv_eth_skb_check((struct sk_buff *)pkt->osInfo);
+#endif /* ETH_SKB_DEBUG */
+			}
+			i++;
+		}
+		printk(KERN_ERR "bm pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers free\n",
+			pool, ppool->pkt_size, RX_BUF_SIZE(ppool->pkt_size), i, num);
+
+		if (free_all)
+			mvNetaBmConfigClear(MV_BM_EMPTY_LIMIT_MASK);
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	ppool->buf_num -= num;
+#ifdef CONFIG_MV_ETH_BM
+	mvNetaBmPoolBufNumUpdate(pool, num, 0);
+#endif
+	/* Free buffers from the pool stack too */
+	if (free_all)
+		num = mvStackIndex(ppool->stack);
+	else if (mv_eth_pool_bm(ppool))
+		num = 0;
+
+	i = 0;
+	while (i < num) {
+		/* sanity check */
+		if (mvStackIndex(ppool->stack) == 0) {
+			printk(KERN_ERR "%s: No more buffers in the stack\n", __func__);
+			break;
+		}
+		pkt = (struct eth_pbuf *)mvStackPop(ppool->stack);
+		if (pkt) {
+			mv_eth_pkt_free(pkt);
+#ifdef ETH_SKB_DEBUG
+			mv_eth_skb_check((struct sk_buff *)pkt->osInfo);
+#endif /* ETH_SKB_DEBUG */
+		}
+		i++;
+	}
+	if (i > 0)
+		printk(KERN_ERR "stack pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers free\n",
+			pool, ppool->pkt_size, RX_BUF_SIZE(ppool->pkt_size), i, num);
+
+	MV_ETH_UNLOCK(&ppool->lock, flags);
+
+	return i;
+}
+
+
+static int mv_eth_pool_destroy(int pool)
+{
+	int num, status = 0;
+	struct bm_pool *ppool = &mv_eth_pool[pool];
+
+	num = mv_eth_pool_free(pool, ppool->buf_num);
+	if (num != ppool->buf_num) {
+		printk(KERN_ERR "Warning: could not free all buffers in pool %d while destroying pool\n", pool);
+		return MV_ERROR;
+	}
+
+	status = mvStackDelete(ppool->stack);
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		mvNetaBmPoolDisable(pool);
+
+		/* Note: we don't free the bm_pool here ! */
+		if (ppool->bm_pool)
+			mvOsFree(ppool->bm_pool);
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	memset(ppool, 0, sizeof(struct bm_pool));
+
+	return status;
+}
+
+
+static int mv_eth_pool_add(struct eth_port *pp, int pool, int buf_num)
+{
+	struct bm_pool *bm_pool;
+	struct sk_buff *skb;
+	struct eth_pbuf *pkt;
+	int i;
+	unsigned long flags = 0;
+
+	if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
+		printk(KERN_ERR "%s: invalid pool number %d\n", __func__, pool);
+		return 0;
+	}
+
+	bm_pool = &mv_eth_pool[pool];
+
+	/* Check buffer size */
+	if (bm_pool->pkt_size == 0) {
+		printk(KERN_ERR "%s: invalid pool #%d state: pkt_size=%d, buf_size=%d, buf_num=%d\n",
+		       __func__, pool, bm_pool->pkt_size, RX_BUF_SIZE(bm_pool->pkt_size), bm_pool->buf_num);
+		return 0;
+	}
+
+	/* Insure buf_num is smaller than capacity */
+	if ((buf_num < 0) || ((buf_num + bm_pool->buf_num) > (bm_pool->capacity))) {
+
+		printk(KERN_ERR "%s: can't add %d buffers into bm_pool=%d: capacity=%d, buf_num=%d\n",
+		       __func__, buf_num, pool, bm_pool->capacity, bm_pool->buf_num);
+		return 0;
+	}
+
+	MV_ETH_LOCK(&bm_pool->lock, flags);
+
+	for (i = 0; i < buf_num; i++) {
+		pkt = mvOsMalloc(sizeof(struct eth_pbuf));
+		if (!pkt) {
+			printk(KERN_ERR "%s: can't allocate %d bytes\n", __func__, sizeof(struct eth_pbuf));
+			break;
+		}
+
+		skb = mv_eth_skb_alloc(pp, bm_pool, pkt, GFP_KERNEL);
+		if (!skb) {
+			kfree(pkt);
+			break;
+		}
+/*
+	printk(KERN_ERR "skb_alloc_%d: pool=%d, skb=%p, pkt=%p, head=%p (%lx), skb->truesize=%d\n",
+				i, bm_pool->pool, skb, pkt, pkt->pBuf, pkt->physAddr, skb->truesize);
+*/
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP()) {
+			mvBmPoolPut(pool, (MV_ULONG) pkt->physAddr);
+			STAT_DBG(bm_pool->stats.bm_put++);
+		} else {
+			mvStackPush(bm_pool->stack, (MV_U32) pkt);
+			STAT_DBG(bm_pool->stats.stack_put++);
+		}
+#else
+		mvStackPush(bm_pool->stack, (MV_U32) pkt);
+		STAT_DBG(bm_pool->stats.stack_put++);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+	}
+	bm_pool->buf_num += i;
+#ifdef CONFIG_MV_ETH_BM
+	mvNetaBmPoolBufNumUpdate(pool, i, 1);
+#endif
+	printk(KERN_ERR "pool #%d: pkt_size=%d, buf_size=%d - %d of %d buffers added\n",
+	       pool, bm_pool->pkt_size, RX_BUF_SIZE(bm_pool->pkt_size), i, buf_num);
+
+	MV_ETH_UNLOCK(&bm_pool->lock, flags);
+
+	return i;
+}
+
+#ifdef CONFIG_MV_ETH_BM
+void	*mv_eth_bm_pool_create(int pool, int capacity, MV_ULONG *pPhysAddr)
+{
+		MV_ULONG			physAddr;
+		MV_UNIT_WIN_INFO	winInfo;
+		void				*pVirt;
+		MV_STATUS			status;
+
+		pVirt = mvOsIoUncachedMalloc(NULL, sizeof(MV_U32) * capacity, &physAddr, NULL);
+		if (pVirt == NULL) {
+			mvOsPrintf("%s: Can't allocate %d bytes for Long pool #%d\n",
+					__func__, MV_BM_POOL_CAP_MAX * sizeof(MV_U32), pool);
+			return NULL;
+		}
+
+		/* Pool address must be MV_BM_POOL_PTR_ALIGN bytes aligned */
+		if (MV_IS_NOT_ALIGN((unsigned)pVirt, MV_BM_POOL_PTR_ALIGN)) {
+			mvOsPrintf("memory allocated for BM pool #%d is not %d bytes aligned\n",
+						pool, MV_BM_POOL_PTR_ALIGN);
+			mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
+			return NULL;
+		}
+		status = mvNetaBmPoolInit(pool, pVirt, physAddr, capacity);
+		if (status != MV_OK) {
+			mvOsPrintf("%s: Can't init #%d BM pool. status=%d\n", __func__, pool, status);
+			mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
+			return NULL;
+		}
+#ifdef CONFIG_ARCH_MVEBU
+		status = mvebu_mbus_get_addr_win_info(physAddr, &winInfo.targetId, &winInfo.attrib);
+#else
+		status = mvCtrlAddrWinInfoGet(&winInfo, physAddr);
+#endif
+		if (status != MV_OK) {
+			printk(KERN_ERR "%s: Can't map BM pool #%d. phys_addr=0x%x, status=%d\n",
+			       __func__, pool, (unsigned)physAddr, status);
+			mvOsIoCachedFree(NULL, sizeof(MV_U32) * capacity, physAddr, pVirt, 0);
+			return NULL;
+		}
+		mvNetaBmPoolTargetSet(pool, winInfo.targetId, winInfo.attrib);
+		mvNetaBmPoolEnable(pool);
+
+		if (pPhysAddr != NULL)
+			*pPhysAddr = physAddr;
+
+		return pVirt;
+}
+#endif /* CONFIG_MV_ETH_BM */
+
+static MV_STATUS mv_eth_pool_create(int pool, int capacity)
+{
+	struct bm_pool *bm_pool;
+
+	if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
+		printk(KERN_ERR "%s: pool=%d is out of range\n", __func__, pool);
+		return MV_BAD_VALUE;
+	}
+
+	bm_pool = &mv_eth_pool[pool];
+	memset(bm_pool, 0, sizeof(struct bm_pool));
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		bm_pool->bm_pool = mv_eth_bm_pool_create(pool, capacity, &bm_pool->physAddr);
+		if (bm_pool->bm_pool == NULL)
+			return MV_FAIL;
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	/* Create Stack as container of alloacted skbs for SKB_RECYCLE and for RXQs working without BM support */
+	bm_pool->stack = mvStackCreate(capacity);
+
+	if (bm_pool->stack == NULL) {
+		printk(KERN_ERR "Can't create MV_STACK structure for %d elements\n", capacity);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	bm_pool->pool = pool;
+	bm_pool->capacity = capacity;
+	bm_pool->pkt_size = 0;
+	bm_pool->buf_num = 0;
+	spin_lock_init(&bm_pool->lock);
+
+	return MV_OK;
+}
+
+/* Interrupt handling */
+irqreturn_t mv_eth_isr(int irq, void *dev_id)
+{
+	struct eth_port *pp = (struct eth_port *)dev_id;
+	int cpu = smp_processor_id();
+	struct napi_struct *napi = pp->cpu_config[cpu]->napi;
+	u32 regVal;
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_ISR) {
+		printk(KERN_ERR "%s: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
+			__func__, pp->port, cpu,
+			MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port)), MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)));
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	STAT_INFO(pp->stats.irq[cpu]++);
+
+	/* Mask all interrupts */
+	MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port), 0);
+	/* To be sure that itterrupt already masked Dummy read is required */
+	/* MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port));*/
+
+	/* Verify that the device not already on the polling list */
+	if (napi_schedule_prep(napi)) {
+		/* schedule the work (rx+txdone+link) out of interrupt contxet */
+		__napi_schedule(napi);
+	} else {
+		STAT_INFO(pp->stats.irq_err[cpu]++);
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		pr_warning("%s: IRQ=%d, port=%d, cpu=%d - NAPI already scheduled\n",
+			__func__, irq, pp->port, cpu);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+	}
+
+	/*
+	* Ensure mask register write is completed by issuing a read.
+	* dsb() instruction cannot be used on registers since they are in
+	* MBUS domain
+	* Need for Aramada38x. Dosn't need for AXP and A370
+	*/
+	if ((pp->plat_data->ctrl_model == MV_6810_DEV_ID) ||
+	    (pp->plat_data->ctrl_model == MV_6811_DEV_ID) ||
+	    (pp->plat_data->ctrl_model == MV_6820_DEV_ID) ||
+	    (pp->plat_data->ctrl_model == MV_6828_DEV_ID)) {
+		regVal = MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port));
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* Enable / Disable HWF to the TXQs of the [port] that in MV_ETH_TXQ_HWF mode */
+int mv_eth_hwf_ctrl(int port, MV_BOOL enable)
+{
+#ifdef CONFIG_MV_ETH_HWF
+	int txp, txq, rx_port, mode;
+	struct eth_port *pp;
+
+	if (!MV_NETA_HWF_CAP())
+		return 0;
+
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -ENODEV;
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+			mode = mv_eth_ctrl_txq_mode_get(port, txp, txq, &rx_port);
+			if (mode == MV_ETH_TXQ_HWF)
+				mvNetaHwfTxqEnable(rx_port, port, txp, txq, enable);
+		}
+	}
+#endif /* CONFIG_MV_ETH_HWF */
+	return 0;
+}
+
+void mv_eth_link_event(struct eth_port *pp, int print)
+{
+	struct net_device *dev = pp->dev;
+	bool              link_is_up;
+
+	STAT_INFO(pp->stats.link++);
+
+	/* Check Link status on ethernet port */
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(pp->port))
+		link_is_up = mv_pon_link_status();
+	else
+#endif /* CONFIG_MV_PON */
+		link_is_up = mvNetaLinkIsUp(pp->port);
+
+	if (link_is_up) {
+		mvNetaPortUp(pp->port);
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+
+		if (mv_eth_ctrl_is_tx_enabled(pp)) {
+			if (dev) {
+				netif_carrier_on(dev);
+				netif_tx_wake_all_queues(dev);
+			}
+		}
+		mv_eth_hwf_ctrl(pp->port, MV_TRUE);
+	} else {
+		mv_eth_hwf_ctrl(pp->port, MV_FALSE);
+
+		if (dev) {
+			netif_carrier_off(dev);
+			netif_tx_stop_all_queues(dev);
+		}
+		mvNetaPortDown(pp->port);
+		clear_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+	}
+
+	if (print) {
+		if (dev)
+			printk(KERN_ERR "%s: ", dev->name);
+		else
+			printk(KERN_ERR "%s: ", "none");
+
+		mv_eth_link_status_print(pp->port);
+	}
+}
+
+/***********************************************************************************************/
+int mv_eth_poll(struct napi_struct *napi, int budget)
+{
+	int rx_done = 0;
+	MV_U32 causeRxTx;
+	struct eth_port *pp = MV_ETH_PRIV(napi->dev);
+	struct cpu_ctrl *cpuCtrl = pp->cpu_config[smp_processor_id()];
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_POLL) {
+		printk(KERN_ERR "%s ENTER: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
+			__func__, pp->port, smp_processor_id(),
+			MV_REG_READ(NETA_INTR_NEW_MASK_REG(pp->port)), MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)));
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_RX)
+			printk(KERN_ERR "%s: STARTED_BIT = 0, poll completed.\n", __func__);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+		napi_complete(napi);
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+		return rx_done;
+	}
+
+	STAT_INFO(pp->stats.poll[smp_processor_id()]++);
+
+	/* Read cause register */
+	causeRxTx = MV_REG_READ(NETA_INTR_NEW_CAUSE_REG(pp->port)) &
+	    (MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK | MV_ETH_RX_INTR_MASK);
+
+	if (causeRxTx & MV_ETH_MISC_SUM_INTR_MASK) {
+		MV_U32 causeMisc;
+
+		/* Process MISC events - Link, etc ??? */
+		causeRxTx &= ~MV_ETH_MISC_SUM_INTR_MASK;
+		causeMisc = MV_REG_READ(NETA_INTR_MISC_CAUSE_REG(pp->port));
+
+		if (causeMisc & NETA_CAUSE_LINK_CHANGE_MASK)
+			mv_eth_link_event(pp, 1);
+
+		MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(pp->port), 0);
+	}
+	causeRxTx |= cpuCtrl->causeRxTx;
+
+#ifdef CONFIG_MV_NETA_TXDONE_ISR
+	if (causeRxTx & MV_ETH_TXDONE_INTR_MASK) {
+		int tx_todo = 0;
+		/* TX_DONE process */
+
+		if (MV_PON_PORT(pp->port))
+			mv_eth_tx_done_pon(pp, &tx_todo);
+		else
+			mv_eth_tx_done_gbe(pp, (causeRxTx & MV_ETH_TXDONE_INTR_MASK), &tx_todo);
+
+		causeRxTx &= ~MV_ETH_TXDONE_INTR_MASK;
+	}
+#endif /* CONFIG_MV_NETA_TXDONE_ISR */
+
+#if (CONFIG_MV_ETH_RXQ > 1)
+	while ((causeRxTx != 0) && (budget > 0)) {
+		int count, rx_queue;
+
+		rx_queue = mv_eth_rx_policy(causeRxTx);
+		if (rx_queue == -1)
+			break;
+
+		count = mv_eth_rx(pp, budget, rx_queue, napi);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0)
+			causeRxTx &= ~((1 << rx_queue) << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS);
+	}
+#else
+	rx_done = mv_eth_rx(pp, budget, CONFIG_MV_ETH_RXQ_DEF, napi);
+	budget -= rx_done;
+#endif /* (CONFIG_MV_ETH_RXQ > 1) */
+
+	/* Maintain RX packets rate if adaptive RX coalescing is enabled */
+	if (pp->rx_adaptive_coal_cfg)
+		pp->rx_rate_pkts += rx_done;
+
+	STAT_DIST((rx_done < pp->dist_stats.rx_dist_size) ? pp->dist_stats.rx_dist[rx_done]++ : 0);
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_POLL) {
+		printk(KERN_ERR "%s  EXIT: port=%d, cpu=%d, budget=%d, rx_done=%d\n",
+			__func__, pp->port, smp_processor_id(), budget, rx_done);
+	}
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+	if (budget > 0) {
+		unsigned long flags;
+
+		causeRxTx = 0;
+
+		napi_complete(napi);
+
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+
+		/* adapt RX coalescing according to packets rate */
+		if (pp->rx_adaptive_coal_cfg)
+			mv_eth_adaptive_rx_update(pp);
+
+		if (!(pp->flags & MV_ETH_F_IFCAP_NETMAP)) {
+			local_irq_save(flags);
+
+			mv_neta_wmb();
+			MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port),
+			     (MV_ETH_MISC_SUM_INTR_MASK | MV_ETH_TXDONE_INTR_MASK | MV_ETH_RX_INTR_MASK));
+
+			local_irq_restore(flags);
+		}
+	}
+	cpuCtrl->causeRxTx = causeRxTx;
+	return rx_done;
+}
+
+static void mv_eth_cpu_counters_init(void)
+{
+#ifdef CONFIG_MV_CPU_PERF_CNTRS
+
+	mvCpuCntrsInitialize();
+
+#ifdef CONFIG_PLAT_ARMADA
+	/*  cycles counter via special CCNT counter */
+	mvCpuCntrsProgram(0, MV_CPU_CNTRS_CYCLES, "Cycles", 13);
+
+	/* instruction counters */
+	mvCpuCntrsProgram(1, MV_CPU_CNTRS_INSTRUCTIONS, "Instr", 13);
+	/* mvCpuCntrsProgram(0, MV_CPU_CNTRS_DCACHE_READ_HIT, "DcRdHit", 0); */
+
+	/* ICache misses counter */
+	mvCpuCntrsProgram(2, MV_CPU_CNTRS_ICACHE_READ_MISS, "IcMiss", 0);
+
+	/* DCache read misses counter */
+	mvCpuCntrsProgram(3, MV_CPU_CNTRS_DCACHE_READ_MISS, "DcRdMiss", 0);
+
+	/* DCache write misses counter */
+	mvCpuCntrsProgram(4, MV_CPU_CNTRS_DCACHE_WRITE_MISS, "DcWrMiss", 0);
+
+	/* DTLB Miss counter */
+	mvCpuCntrsProgram(5, MV_CPU_CNTRS_DTLB_MISS, "dTlbMiss", 0);
+
+	/* mvCpuCntrsProgram(3, MV_CPU_CNTRS_TLB_MISS, "TlbMiss", 0); */
+#else /* CONFIG_FEROCEON */
+	/* 0 - instruction counters */
+	mvCpuCntrsProgram(0, MV_CPU_CNTRS_INSTRUCTIONS, "Instr", 16);
+	/* mvCpuCntrsProgram(0, MV_CPU_CNTRS_DCACHE_READ_HIT, "DcRdHit", 0); */
+
+	/* 1 - ICache misses counter */
+	mvCpuCntrsProgram(1, MV_CPU_CNTRS_ICACHE_READ_MISS, "IcMiss", 0);
+
+	/* 2 - cycles counter */
+	mvCpuCntrsProgram(2, MV_CPU_CNTRS_CYCLES, "Cycles", 18);
+
+	/* 3 - DCache read misses counter */
+	mvCpuCntrsProgram(3, MV_CPU_CNTRS_DCACHE_READ_MISS, "DcRdMiss", 0);
+	/* mvCpuCntrsProgram(3, MV_CPU_CNTRS_TLB_MISS, "TlbMiss", 0); */
+#endif /* CONFIG_PLAT_ARMADA */
+
+	event0 = mvCpuCntrsEventCreate("RX_DESC_PREF", 100000);
+	event1 = mvCpuCntrsEventCreate("RX_DESC_READ", 100000);
+	event2 = mvCpuCntrsEventCreate("RX_BUF_INV", 100000);
+	event3 = mvCpuCntrsEventCreate("RX_DESC_FILL", 100000);
+	event4 = mvCpuCntrsEventCreate("TX_START", 100000);
+	event5 = mvCpuCntrsEventCreate("RX_BUF_INV", 100000);
+	if ((event0 == NULL) || (event1 == NULL) || (event2 == NULL) ||
+		(event3 == NULL) || (event4 == NULL) || (event5 == NULL))
+		printk(KERN_ERR "Can't create cpu counter events\n");
+#endif /* CONFIG_MV_CPU_PERF_CNTRS */
+}
+
+void mv_eth_port_promisc_set(int port)
+{
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		/* Accept all */
+		if (mv_eth_pnc_ctrl_en) {
+			pnc_mac_me(port, NULL, CONFIG_MV_ETH_RXQ_DEF);
+			pnc_mcast_all(port, 1);
+		} else {
+			pr_err("%s: PNC control is disabled\n", __func__);
+		}
+	} else { /* Legacy parser */
+		mvNetaRxUnicastPromiscSet(port, MV_TRUE);
+		mvNetaSetUcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+		mvNetaSetSpecialMcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+		mvNetaSetOtherMcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+	}
+#else /* Legacy parser */
+	mvNetaRxUnicastPromiscSet(port, MV_TRUE);
+	mvNetaSetUcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+	mvNetaSetSpecialMcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+	mvNetaSetOtherMcastTable(port, CONFIG_MV_ETH_RXQ_DEF);
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+void mv_eth_port_filtering_cleanup(int port)
+{
+#ifdef CONFIG_MV_ETH_PNC
+	static bool is_first = true;
+
+	if (MV_NETA_PNC_CAP()) {
+		/* clean TCAM only one, no need to do this per port. */
+		if (is_first) {
+			tcam_hw_init();
+			is_first = false;
+		}
+	} else {
+		mvNetaRxUnicastPromiscSet(port, MV_FALSE);
+		mvNetaSetUcastTable(port, -1);
+		mvNetaSetSpecialMcastTable(port, -1);
+		mvNetaSetOtherMcastTable(port, -1);
+	}
+#else
+	mvNetaRxUnicastPromiscSet(port, MV_FALSE);
+	mvNetaSetUcastTable(port, -1);
+	mvNetaSetSpecialMcastTable(port, -1);
+	mvNetaSetOtherMcastTable(port, -1);
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+
+static MV_STATUS mv_eth_bm_pools_init(void)
+{
+	int i, j;
+	MV_STATUS status;
+
+	/* Get compile time configuration */
+#ifdef CONFIG_MV_ETH_BM
+	if (MV_NETA_BM_CAP()) {
+		mvNetaBmControl(MV_START);
+		mv_eth_bm_config_get();
+	}
+#endif /* CONFIG_MV_ETH_BM */
+
+	/* Create all pools with maximum capacity */
+	for (i = 0; i < MV_ETH_BM_POOLS; i++) {
+		status = mv_eth_pool_create(i, MV_BM_POOL_CAP_MAX);
+		if (status != MV_OK) {
+			printk(KERN_ERR "%s: can't create bm_pool=%d - capacity=%d\n", __func__, i, MV_BM_POOL_CAP_MAX);
+			for (j = 0; j < i; j++)
+				mv_eth_pool_destroy(j);
+			return status;
+		}
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP()) {
+			mv_eth_pool[i].pkt_size = mv_eth_bm_config_pkt_size_get(i);
+			if (mv_eth_pool[i].pkt_size == 0)
+				mvNetaBmPoolBufferSizeSet(i, 0);
+			else
+				mvNetaBmPoolBufferSizeSet(i, RX_BUF_SIZE(mv_eth_pool[i].pkt_size));
+		} else {
+			mv_eth_pool[i].pkt_size = 0;
+		}
+#else
+		mv_eth_pool[i].pkt_size = 0;
+#endif /* CONFIG_MV_ETH_BM */
+	}
+	return MV_OK;
+}
+
+static int mv_eth_port_link_speed_fc(int port, MV_ETH_PORT_SPEED port_speed, int en_force)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (en_force) {
+		if (mvNetaSpeedDuplexSet(port, port_speed, MV_ETH_DUPLEX_FULL)) {
+			printk(KERN_ERR "mvEthSpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaFlowCtrlSet(port, MV_ETH_FC_ENABLE)) {
+			printk(KERN_ERR "mvEthFlowCtrlSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaForceLinkModeSet(port, 1, 0)) {
+			printk(KERN_ERR "mvEthForceLinkModeSet failed\n");
+			return -EIO;
+		}
+
+		set_bit(MV_ETH_F_FORCE_LINK_BIT, &(pp->flags));
+
+	} else {
+		if (mvNetaForceLinkModeSet(port, 0, 0)) {
+			printk(KERN_ERR "mvEthForceLinkModeSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaSpeedDuplexSet(port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN)) {
+			printk(KERN_ERR "mvEthSpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaFlowCtrlSet(port, MV_ETH_FC_AN_SYM)) {
+			printk(KERN_ERR "mvEthFlowCtrlSet failed\n");
+			return -EIO;
+		}
+
+		clear_bit(MV_ETH_F_FORCE_LINK_BIT, &(pp->flags));
+	}
+	return 0;
+}
+
+/* Get mac address */
+static void mv_eth_get_mac_addr(int port, unsigned char *addr)
+{
+	u32 mac_addr_l, mac_addr_h;
+
+	mac_addr_l = MV_REG_READ(ETH_MAC_ADDR_LOW_REG(port));
+	mac_addr_h = MV_REG_READ(ETH_MAC_ADDR_HIGH_REG(port));
+	addr[0] = (mac_addr_h >> 24) & 0xFF;
+	addr[1] = (mac_addr_h >> 16) & 0xFF;
+	addr[2] = (mac_addr_h >> 8) & 0xFF;
+	addr[3] = mac_addr_h & 0xFF;
+	addr[4] = (mac_addr_l >> 8) & 0xFF;
+	addr[5] = mac_addr_l & 0xFF;
+}
+
+/* Note: call this function only after mv_eth_ports_num is initialized */
+static int mv_eth_load_network_interfaces(struct platform_device *pdev)
+{
+	u32 port;
+	struct eth_port *pp;
+	int err = 0;
+	struct net_device *dev;
+	struct mv_neta_pdata *plat_data = (struct mv_neta_pdata *)pdev->dev.platform_data;
+
+	port = pdev->id;
+	if (plat_data->tx_csum_limit == 0)
+		plat_data->tx_csum_limit = MV_ETH_TX_CSUM_MAX_SIZE;
+
+	pr_info("  o Loading network interface(s) for port #%d: cpu_mask=0x%x, tx_csum_limit=%d\n",
+			port, plat_data->cpu_mask, plat_data->tx_csum_limit);
+
+	dev = mv_eth_netdev_init(pdev);
+
+	if (!dev) {
+		printk(KERN_ERR "%s: can't create netdevice\n", __func__);
+		mv_eth_priv_cleanup(pp);
+		return -EIO;
+	}
+
+	pp = (struct eth_port *)netdev_priv(dev);
+
+	mv_eth_ports[port] = pp;
+
+	/* set port's speed, duplex, fc */
+	if (!MV_PON_PORT(pp->port)) {
+		/* force link, speed and duplex if necessary (e.g. Switch is connected) based on board information */
+		switch (plat_data->speed) {
+		case SPEED_10:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_10, 1);
+			break;
+		case SPEED_100:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_100, 1);
+			break;
+		case SPEED_1000:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_1000, 1);
+			break;
+		case 0:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_AN, 0);
+			break;
+		default:
+			/* do nothing */
+			break;
+		}
+		if (err)
+			return err;
+	}
+
+	if (mv_eth_hal_init(pp)) {
+		printk(KERN_ERR "%s: can't init eth hal\n", __func__);
+		mv_eth_priv_cleanup(pp);
+		return -EIO;
+	}
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		mvNetaHwfInit(port);
+#endif /* CONFIG_MV_ETH_HWF */
+
+#ifdef CONFIG_MV_ETH_PMT
+	if (MV_NETA_PMT_CAP()) {
+		if (MV_PON_PORT(port))
+			mvNetaPmtInit(port, (MV_NETA_PMT *)ioremap(PMT_PON_PHYS_BASE, PMT_MEM_SIZE));
+		else
+			mvNetaPmtInit(port, (MV_NETA_PMT *)ioremap(PMT_GIGA_PHYS_BASE + port * 0x40000, PMT_MEM_SIZE));
+	}
+#endif /* CONFIG_MV_ETH_PMT */
+
+	pr_info("\t%s p=%d: mtu=%d, mac=" MV_MACQUAD_FMT " (%s)\n",
+		MV_PON_PORT(port) ? "pon" : "giga", port, dev->mtu, MV_MACQUAD(dev->dev_addr), mac_src[port]);
+
+	handle_group_affinity(port);
+
+	mux_eth_ops.set_tag_type = mv_eth_tag_type_set;
+	mv_mux_eth_attach(pp->port, pp->dev, &mux_eth_ops);
+
+#ifdef CONFIG_NETMAP
+	mv_neta_netmap_attach(pp);
+#endif /* CONFIG_NETMAP */
+
+	/* Call mv_eth_open specifically for ports not connected to Linux netdevice */
+	if (!(pp->flags & MV_ETH_F_CONNECT_LINUX))
+		mv_eth_open(pp->dev);
+
+	return MV_OK;
+}
+
+int mv_eth_resume_network_interfaces(struct eth_port *pp)
+{
+	int cpu;
+	int err = 0;
+
+	if (!MV_PON_PORT(pp->port)) {
+		int phyAddr;
+		/* Set the board information regarding PHY address */
+		phyAddr = pp->plat_data->phy_addr;
+		mvNetaPhyAddrSet(pp->port, phyAddr);
+	}
+	mvNetaPortDisable(pp->port);
+	mvNetaDefaultsSet(pp->port);
+
+	if (pp->flags & MV_ETH_F_MH)
+		mvNetaMhSet(pp->port, MV_TAG_TYPE_MH);
+
+	/*TODO: remove to mv_mux_resume */
+	if (pp->tagged) {
+		/* set this port to be in promiscuous mode. MAC filtering is performed by the Switch/Mux */
+		mv_eth_port_promisc_set(pp->port);
+	}
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		/* set queue mask per cpu */
+		mvNetaRxqCpuMaskSet(pp->port, pp->cpu_config[cpu]->cpuRxqMask, cpu);
+		mvNetaTxqCpuMaskSet(pp->port, pp->cpu_config[cpu]->cpuTxqMask, cpu);
+	}
+
+	/* set port's speed, duplex, fc */
+	if (!MV_PON_PORT(pp->port)) {
+		/* force link, speed and duplex if necessary (e.g. Switch is connected) based on board information */
+		switch (pp->plat_data->speed) {
+		case SPEED_10:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_10, 1);
+			break;
+		case SPEED_100:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_100, 1);
+			break;
+		case SPEED_1000:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_1000, 1);
+			break;
+		case 0:
+			err = mv_eth_port_link_speed_fc(pp->port, MV_ETH_SPEED_AN, 0);
+			break;
+		default:
+			/* do nothing */
+			break;
+		}
+		if (err)
+			return err;
+	}
+
+	return MV_OK;
+}
+
+#ifdef CONFIG_MV_ETH_BM
+int     mv_eth_bm_pool_restore(struct bm_pool *bm_pool)
+{
+		MV_UNIT_WIN_INFO        winInfo;
+		MV_STATUS               status;
+		int pool = bm_pool->pool;
+
+		mvNetaBmPoolInit(bm_pool->pool, bm_pool->bm_pool, bm_pool->physAddr, bm_pool->capacity);
+#ifdef CONFIG_ARCH_MVEBU
+		status = mvebu_mbus_get_addr_win_info(bm_pool->physAddr, &winInfo.targetId, &winInfo.attrib);
+#else
+		status = mvCtrlAddrWinInfoGet(&winInfo, bm_pool->physAddr);
+#endif
+		if (status != MV_OK) {
+			printk(KERN_ERR "%s: Can't map BM pool #%d. phys_addr=0x%x, status=%d\n",
+				__func__, bm_pool->pool, (unsigned)bm_pool->physAddr, status);
+			mvOsIoCachedFree(NULL, sizeof(MV_U32) *  bm_pool->capacity, bm_pool->physAddr, bm_pool->bm_pool, 0);
+			return MV_ERROR;
+		}
+		mvNetaBmPoolTargetSet(pool, winInfo.targetId, winInfo.attrib);
+		mvNetaBmPoolEnable(pool);
+
+		return MV_OK;
+}
+#endif /*CONFIG_MV_ETH_BM*/
+
+
+/* Refill port pools */
+static int mv_eth_resume_port_pools(struct eth_port *pp)
+{
+	int num;
+
+	if (!pp)
+		return -ENODEV;
+
+
+	/* fill long pool */
+	if (pp->pool_long) {
+		num = mv_eth_pool_add(pp, pp->pool_long->pool, pp->pool_long_num);
+
+		if (num != pp->pool_long_num) {
+			printk(KERN_ERR "%s FAILED long: pool=%d, pkt_size=%d, only %d of %d allocated\n",
+			       __func__, pp->pool_long->pool, pp->pool_long->pkt_size, num, pp->pool_long_num);
+			return MV_ERROR;
+		}
+
+#ifndef CONFIG_MV_ETH_BM_CPU
+	} /*fill long pool */
+#else
+		if (MV_NETA_BM_CAP())
+			mvNetaBmPoolBufSizeSet(pp->port, pp->pool_long->pool, RX_BUF_SIZE(pp->pool_long->pkt_size));
+	}
+
+	if (MV_NETA_BM_CAP() && pp->pool_short) {
+		if (pp->pool_short->pool != pp->pool_long->pool) {
+				/* fill short pool */
+				num = mv_eth_pool_add(pp, pp->pool_short->pool, pp->pool_short_num);
+				if (num != pp->pool_short_num) {
+					printk(KERN_ERR "%s FAILED short: pool=%d, pkt_size=%d - %d of %d buffers added\n",
+					   __func__, pp->pool_short->pool, pp->pool_short->pkt_size, num, pp->pool_short_num);
+					return MV_ERROR;
+				}
+
+				mvNetaBmPoolBufSizeSet(pp->port, pp->pool_short->pool, RX_BUF_SIZE(pp->pool_short->pkt_size));
+
+		} else {
+
+			int dummy_short_pool = (pp->pool_short->pool + 1) % MV_BM_POOLS;
+			/* To disable short pool we choose unused pool and set pkt size to 0 (buffer size = pkt offset) */
+			mvNetaBmPoolBufSizeSet(pp->port, dummy_short_pool, NET_SKB_PAD);
+
+		}
+	}
+
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	return MV_OK;
+}
+
+static int mv_eth_resume_rxq_txq(struct eth_port *pp, int mtu)
+{
+	int rxq, txp, txq = 0;
+
+
+	if (pp->plat_data->is_sgmii)
+		MV_REG_WRITE(SGMII_SERDES_CFG_REG(pp->port), pp->sgmii_serdes);
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		mvNetaTxpReset(pp->port, txp);
+
+	mvNetaRxReset(pp->port);
+
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+
+		if (pp->rxq_ctrl[rxq].q) {
+			/* Set Rx descriptors queue starting address */
+			mvNetaRxqAddrSet(pp->port, rxq,  pp->rxq_ctrl[rxq].rxq_size);
+
+			/* Set Offset */
+			mvNetaRxqOffsetSet(pp->port, rxq, NET_SKB_PAD);
+
+			/* Set coalescing pkts and time */
+			mv_eth_rx_pkts_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_pkts_coal);
+			mv_eth_rx_time_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_time_coal);
+
+#if defined(CONFIG_MV_ETH_BM_CPU)
+			if (MV_NETA_BM_CAP()) {
+				/* Enable / Disable - BM support */
+				if (pp->pool_long && pp->pool_short) {
+
+					if (pp->pool_short->pool == pp->pool_long->pool) {
+						int dummy_short_pool = (pp->pool_short->pool + 1) % MV_BM_POOLS;
+
+					/* To disable short pool we choose unused pool and set pkt size to 0 (buffer size = pkt offset) */
+						mvNetaRxqBmEnable(pp->port, rxq, dummy_short_pool, pp->pool_long->pool);
+					} else
+						mvNetaRxqBmEnable(pp->port, rxq, pp->pool_short->pool,
+								       pp->pool_long->pool);
+				}
+			} else {
+				/* Fill RXQ with buffers from RX pool */
+				mvNetaRxqBufSizeSet(pp->port, rxq, RX_BUF_SIZE(pp->pool_long->pkt_size));
+				mvNetaRxqBmDisable(pp->port, rxq);
+			}
+#else
+			/* Fill RXQ with buffers from RX pool */
+			mvNetaRxqBufSizeSet(pp->port, rxq, RX_BUF_SIZE(pp->pool_long->pkt_size));
+			mvNetaRxqBmDisable(pp->port, rxq);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+			if (mvNetaRxqFreeDescNumGet(pp->port, rxq) == 0)
+				mv_eth_rxq_fill(pp, rxq, pp->rxq_ctrl[rxq].rxq_size);
+		}
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+			struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+			if (txq_ctrl->q != NULL) {
+				mvNetaTxqAddrSet(pp->port, txq_ctrl->txp, txq_ctrl->txq, txq_ctrl->txq_size);
+				mv_eth_tx_done_pkts_coal_set(pp->port, txp, txq,
+							pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq].txq_done_pkts_coal);
+			}
+			mvNetaTxqBandwidthSet(pp->port, txp, txq);
+		}
+		mvNetaTxpRateMaxSet(pp->port, txp);
+		mvNetaTxpMaxTxSizeSet(pp->port, txp, RX_PKT_SIZE(mtu));
+	}
+
+	return MV_OK;
+}
+
+/**********************************************************
+ * mv_eth_pnc_resume                                      *
+ **********************************************************/
+#ifdef CONFIG_MV_ETH_PNC
+static void mv_eth_pnc_resume(void)
+{
+	/* TODO - in clock standby ,DO we want to keep old pnc TCAM/SRAM entries ? */
+	if (wol_ports_bmp != 0)
+		return;
+
+	/* Not in WOL, clock standby or suspend to ram mode*/
+	tcam_hw_init();
+
+	if (pnc_default_init())
+		printk(KERN_ERR "%s: Warning PNC init failed\n", __func__);
+
+	/* TODO: load balancing resume */
+}
+#endif /* CONFIG_MV_ETH_PNC */
+
+/***********************************************************
+ * mv_eth_port_resume                                      *
+ ***********************************************************/
+
+int mv_eth_port_resume(int port)
+{
+	struct eth_port *pp;
+	int cpu;
+
+	pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return  MV_ERROR;
+	}
+
+	if (!(pp->flags & MV_ETH_F_SUSPEND)) {
+		printk(KERN_ERR "%s: port %d is not suspend.\n", __func__, port);
+		return MV_ERROR;
+	}
+	mvNetaPortPowerUp(port, pp->plat_data->is_sgmii, pp->plat_data->is_rgmii, (pp->plat_data->phy_addr == -1));
+
+	mv_eth_win_init(port);
+
+	mv_eth_resume_network_interfaces(pp);
+
+	/* only once for all ports*/
+	if (pm_flag == 0) {
+#ifdef CONFIG_MV_ETH_BM
+		struct bm_pool *ppool;
+		int pool;
+
+		if (MV_NETA_BM_CAP()) {
+			mvNetaBmControl(MV_START);
+
+			mvNetaBmRegsInit();
+
+			for (pool = 0; pool < MV_ETH_BM_POOLS; pool++) {
+				ppool = &mv_eth_pool[pool];
+				if (mv_eth_bm_pool_restore(ppool)) {
+					pr_err("%s: port #%d pool #%d resrote failed.\n", __func__, port, pool);
+					return MV_ERROR;
+				}
+			}
+		}
+#endif /*CONFIG_MV_ETH_BM*/
+
+#ifdef CONFIG_MV_ETH_PNC
+		if (MV_NETA_PNC_CAP())
+			mv_eth_pnc_resume();
+#endif /* CONFIG_MV_ETH_PNC */
+
+		pm_flag = 1;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED_OLD)
+		(*pp->dev->netdev_ops->ndo_set_rx_mode)(pp->dev);
+
+	for_each_possible_cpu(cpu)
+		pp->cpu_config[cpu]->causeRxTx = 0;
+
+	set_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+
+	mv_eth_resume_port_pools(pp);
+
+	mv_eth_resume_rxq_txq(pp, pp->dev->mtu);
+
+	if (pp->flags & MV_ETH_F_STARTED_OLD) {
+		mv_eth_resume_internals(pp, pp->dev->mtu);
+		clear_bit(MV_ETH_F_STARTED_OLD_BIT, &(pp->flags));
+		if (pp->flags & MV_ETH_F_CONNECT_LINUX) {
+			on_each_cpu(mv_eth_interrupts_unmask, pp, 1);
+		}
+	} else
+		clear_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+
+
+	clear_bit(MV_ETH_F_SUSPEND_BIT, &(pp->flags));
+
+	printk(KERN_NOTICE "Exit suspend mode on port #%d\n", port);
+
+	return MV_OK;
+}
+
+void    mv_eth_hal_shared_init(struct mv_neta_pdata *plat_data)
+{
+	MV_NETA_HAL_DATA halData;
+	MV_U32 bm_phy_base, bm_size;
+	MV_U32 pnc_phy_base, pnc_size;
+	int ret;
+	unsigned int dev = 0, rev = 0;
+
+	bm_phy_base = 0;
+	pnc_phy_base = 0;
+	bm_size = 0;
+	pnc_size = 0;
+	memset(&halData, 0, sizeof(halData));
+
+	halData.maxPort = plat_data->max_port;
+	halData.pClk = plat_data->pclk;
+	halData.tClk = plat_data->tclk;
+	halData.maxCPUs = plat_data->max_cpu;
+	halData.cpuMask = plat_data->cpu_mask;
+	halData.iocc = arch_is_coherent();
+	halData.ctrlModel = plat_data->ctrl_model;
+	halData.ctrlRev = plat_data->ctrl_rev;
+
+#ifdef CONFIG_ARCH_MVEBU
+	dev = plat_data->ctrl_model;
+	rev = plat_data->ctrl_rev;
+
+	if (MV_NETA_BM_CAP()) {
+		ret = mvebu_mbus_win_addr_get(MV_BM_WIN_ID, MV_BM_WIN_ATTR, &bm_phy_base, &bm_size);
+		if (ret) {
+			pr_err("%s: get BM mbus window failed, error: %d.\n", __func__, ret);
+			return;
+		}
+		halData.bmPhysBase = bm_phy_base;
+		halData.bmVirtBase = (MV_U8 *)ioremap(bm_phy_base, bm_size);
+	}
+	if (MV_NETA_PNC_CAP()) {
+		/* on AXP, PNC and BM share the same window */
+		if ((dev == MV78230_DEV_ID)
+			|| (dev == MV78260_DEV_ID)
+			|| (dev == MV78460_DEV_ID)) {
+			if (!MV_NETA_BM_CAP()) {
+				ret = mvebu_mbus_win_addr_get(MV_BM_WIN_ID, MV_BM_WIN_ATTR, &bm_phy_base, &bm_size);
+				if (ret) {
+					pr_err("%s: get BM mbus window failed, error: %d.\n", __func__, ret);
+					return;
+				}
+			}
+			halData.pncPhysBase = bm_phy_base;
+			halData.pncVirtBase = (MV_U8 *)ioremap(bm_phy_base, bm_size);
+		} else {
+			ret = mvebu_mbus_win_addr_get(MV_PNC_WIN_ID, MV_PNC_WIN_ATTR, &pnc_phy_base, &pnc_size);
+			if (ret) {
+				pr_err("%s: get PNC mbus window failed, error: %d.\n", __func__, ret);
+				return;
+			}
+			halData.pncPhysBase = pnc_phy_base;
+			halData.pncVirtBase = (MV_U8 *)ioremap(pnc_phy_base, pnc_size);
+		}
+		halData.pncTcamSize = plat_data->pnc_tcam_size;
+	}
+#else
+#ifdef CONFIG_MV_ETH_BM
+	halData.bmPhysBase = PNC_BM_PHYS_BASE;
+	halData.bmVirtBase = (MV_U8 *)ioremap(PNC_BM_PHYS_BASE, PNC_BM_SIZE);
+#endif /* CONFIG_MV_ETH_BM */
+
+#ifdef CONFIG_MV_ETH_PNC
+	halData.pncTcamSize = plat_data->pnc_tcam_size;
+	halData.pncPhysBase = PNC_BM_PHYS_BASE;
+	halData.pncVirtBase = (MV_U8 *)ioremap(PNC_BM_PHYS_BASE, PNC_BM_SIZE);
+#endif /* CONFIG_MV_ETH_PNC */
+
+#endif /* CONFIG_ARCH_MVEBU */
+
+	mvNetaHalInit(&halData);
+
+	return;
+}
+
+
+/***********************************************************
+ * mv_eth_win_init --                                      *
+ *   Win initilization                                     *
+ ***********************************************************/
+#ifdef CONFIG_ARCH_MVEBU
+int mv_eth_win_set(int port, u32 base, u32 size, u8 trg_id, u8 win_attr, u32 *enable, u32 *protection)
+{
+	u32 baseReg, sizeReg;
+	u32 alignment;
+	u8 i, attr;
+
+	/* Find the windown not enabled */
+	for (i = 0; i < ETH_MAX_DECODE_WIN; i++) {
+		if (*enable & (1 << i))
+			break;
+	}
+	if (i == ETH_MAX_DECODE_WIN) {
+		pr_err("%s: No window is available\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if (MV_IS_NOT_ALIGN(base, size)) {
+		pr_err("%s: Error setting eth port%d window%d.\n"
+		   "Address 0x%08x is not aligned to size 0x%x.\n",
+		   __func__, port, i, base, size);
+		return MV_ERROR;
+	}
+
+	if (!MV_IS_POWER_OF_2(size)) {
+		pr_err("%s:Error setting eth port%d window%d.\n"
+			"Window size %u is not a power to 2.\n",
+			__func__, port, i, size);
+		return MV_ERROR;
+	}
+
+	attr = win_attr;
+
+#ifdef CONFIG_MV_SUPPORT_L2_DEPOSIT
+	if (trg_id == TARGET_DDR) {
+		/* Setting DRAM windows attribute to :
+			0x3 - Shared transaction + L2 write allocate (L2 Deposit) */
+		attr &= ~(0x30);
+		attr |= 0x30;
+	}
+#endif
+
+	baseReg = (base & ETH_WIN_BASE_MASK);
+	sizeReg = MV_REG_READ(ETH_WIN_SIZE_REG(port, i));
+
+	/* set size */
+	alignment = 1 << ETH_WIN_SIZE_OFFS;
+	sizeReg &= ~ETH_WIN_SIZE_MASK;
+	sizeReg |= (((size / alignment) - 1) << ETH_WIN_SIZE_OFFS);
+
+	/* set attributes */
+	baseReg &= ~ETH_WIN_ATTR_MASK;
+	baseReg |= attr << ETH_WIN_ATTR_OFFS;
+
+	/* set target ID */
+	baseReg &= ~ETH_WIN_TARGET_MASK;
+	baseReg |= trg_id << ETH_WIN_TARGET_OFFS;
+
+	MV_REG_WRITE(ETH_WIN_BASE_REG(port, i), baseReg);
+	MV_REG_WRITE(ETH_WIN_SIZE_REG(port, i), sizeReg);
+
+	*enable &= ~(1 << i);
+	*protection |= (FULL_ACCESS << (i * 2));
+
+	return MV_OK;
+}
+
+int mv_eth_pnc_win_get(u32 *phyaddr_base, u32 *win_size)
+{
+	static bool first_time = true;
+	int ret = 0;
+
+	/* get addr from fdt only once, since during eth resume (S2RAM feature), it might crash system */
+	if (first_time) {
+		ret = mvebu_mbus_win_addr_get(MV_PNC_WIN_ID, MV_PNC_WIN_ATTR, phyaddr_base, win_size);
+		pnc_phyaddr_base = *phyaddr_base;
+		pnc_win_size = *win_size;
+		first_time = false;
+	} else {
+		*phyaddr_base = pnc_phyaddr_base;
+		*win_size = pnc_win_size;
+	}
+	return ret;
+}
+
+int mv_eth_bm_win_get(u32 *phyaddr_base, u32 *win_size)
+{
+	static bool first_time = true;
+	int ret = 0;
+
+	/* get addr from fdt only once, since during eth resume (S2RAM feature), it might crash system */
+	if (first_time) {
+		ret = mvebu_mbus_win_addr_get(MV_BM_WIN_ID, MV_BM_WIN_ATTR, phyaddr_base, win_size);
+		bm_phyaddr_base = *phyaddr_base;
+		bm_win_size = *win_size;
+		first_time = false;
+	} else {
+		*phyaddr_base = bm_phyaddr_base;
+		*win_size = bm_win_size;
+	}
+	return ret;
+}
+
+void	mv_eth_win_init(int port)
+{
+	const struct mbus_dram_target_info *dram;
+	u32 phyaddr_base, win_size;
+	int i, ret;
+	u32 enable = 0, protect = 0;
+	unsigned int dev, rev;
+
+	/* Get SoC ID */
+	if (mvebu_get_soc_id(&dev, &rev))
+		return;
+
+	/* First disable all address decode windows */
+	enable = (1 << ETH_MAX_DECODE_WIN) - 1;
+	MV_REG_WRITE(ETH_BASE_ADDR_ENABLE_REG(port), enable);
+
+	/* Clear Base/Size/Remap registers for all windows */
+	for (i = 0; i < ETH_MAX_DECODE_WIN; i++) {
+		MV_REG_WRITE(ETH_WIN_BASE_REG(port, i), 0);
+		MV_REG_WRITE(ETH_WIN_SIZE_REG(port, i), 0);
+
+		if (i < ETH_MAX_HIGH_ADDR_REMAP_WIN)
+			MV_REG_WRITE(ETH_WIN_REMAP_REG(port, i), 0);
+	}
+
+	/* set dram window */
+	dram = mv_mbus_dram_info();
+	if (!dram) {
+		pr_err("%s: No DRAM information\n", __func__);
+		return;
+	}
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+		ret = mv_eth_win_set(port, cs->base, cs->size, dram->mbus_dram_target_id, cs->mbus_attr,
+				     &enable, &protect);
+		if (ret) {
+			pr_err("%s: eth window set fail\n", __func__);
+			return;
+		}
+	}
+
+	/* set BM and PnC window */
+	if (MV_NETA_BM_CAP()) {
+		ret = mv_eth_bm_win_get(&phyaddr_base, &win_size);
+		if (ret) {
+			pr_err("%s: BM window addr info get fail\n", __func__);
+			return;
+		}
+		ret = mv_eth_win_set(port, phyaddr_base, win_size, MV_BM_WIN_ID, MV_BM_WIN_ATTR,
+				     &enable, &protect);
+		if (ret) {
+			pr_err("%s: BM window set fail\n", __func__);
+			return;
+		}
+	}
+
+	if (MV_NETA_PNC_CAP()) {
+		/* on AXP, PNC and BM share the same window */
+		if ((dev == MV78230_DEV_ID)
+			|| (dev == MV78260_DEV_ID)
+			|| (dev == MV78460_DEV_ID)) {
+			if (!MV_NETA_BM_CAP()) {
+				ret = mv_eth_bm_win_get(&phyaddr_base, &win_size);
+				if (ret) {
+					pr_err("%s: BM window addr info get fail\n", __func__);
+					return;
+				}
+				ret = mv_eth_win_set(port, phyaddr_base, win_size, MV_BM_WIN_ID, MV_BM_WIN_ATTR,
+							 &enable, &protect);
+				if (ret) {
+					pr_err("%s: BM window set fail\n", __func__);
+					return;
+				}
+			}
+		} else {
+			ret = mv_eth_pnc_win_get(&phyaddr_base, &win_size);
+			if (ret) {
+				pr_err("%s: PNC window addr info get fail\n", __func__);
+				return;
+			}
+			ret = mv_eth_win_set(port, phyaddr_base, win_size, MV_PNC_WIN_ID, MV_PNC_WIN_ATTR,
+					     &enable, &protect);
+			if (ret) {
+				pr_err("%s: PNC window set fail\n", __func__);
+				return;
+			}
+		}
+	}
+
+	/* Set window protection */
+	MV_REG_WRITE(ETH_ACCESS_PROTECT_REG(port), protect);
+	/* Enable window */
+	MV_REG_WRITE(ETH_BASE_ADDR_ENABLE_REG(port), enable);
+}
+
+#else /* !CONFIG_ARCH_MVEBU */
+
+void 	mv_eth_win_init(int port)
+{
+	MV_UNIT_WIN_INFO addrWinMap[MAX_TARGETS + 1];
+	MV_STATUS status;
+	int i;
+
+	status = mvCtrlAddrWinMapBuild(addrWinMap, MAX_TARGETS + 1);
+	if (status != MV_OK)
+		return;
+
+	for (i = 0; i < MAX_TARGETS; i++) {
+		if (addrWinMap[i].enable == MV_FALSE)
+			continue;
+
+#ifdef CONFIG_MV_SUPPORT_L2_DEPOSIT
+		/* Setting DRAM windows attribute to :
+		   0x3 - Shared transaction + L2 write allocate (L2 Deposit) */
+		if (MV_TARGET_IS_DRAM(i)) {
+			addrWinMap[i].attrib &= ~(0x30);
+			addrWinMap[i].attrib |= 0x30;
+		}
+#endif
+	}
+	mvNetaWinInit(port, addrWinMap);
+	return;
+}
+#endif /* CONFIG_ARCH_MVEBU */
+
+/***********************************************************
+ * mv_eth_port_suspend                                     *
+ *   main driver initialization. loading the interfaces.   *
+ ***********************************************************/
+int mv_eth_port_suspend(int port)
+{
+	struct eth_port *pp;
+	int txp;
+
+
+	pp = mv_eth_port_by_id(port);
+	if (!pp)
+		return MV_OK;
+
+	if (pp->flags & MV_ETH_F_SUSPEND) {
+		printk(KERN_ERR "%s: port %d is allready suspend.\n", __func__, port);
+		return MV_ERROR;
+	}
+
+	if (pp->plat_data->is_sgmii)
+		pp->sgmii_serdes = MV_REG_READ(SGMII_SERDES_CFG_REG(port));
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		set_bit(MV_ETH_F_STARTED_OLD_BIT, &(pp->flags));
+		clear_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+		mv_eth_suspend_internals(pp);
+	} else
+		clear_bit(MV_ETH_F_STARTED_OLD_BIT, &(pp->flags));
+
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP()) {
+		mvNetaHwfEnable(pp->port, 0);
+	} else {
+		/* Reset TX port, transmit all pending packets */
+		for (txp = 0; txp < pp->txp_num; txp++)
+			mv_eth_txp_reset(pp->port, txp);
+	}
+#else
+	/* Reset TX port, transmit all pending packets */
+	for (txp = 0; txp < pp->txp_num; txp++)
+		mv_eth_txp_reset(pp->port, txp);
+#endif  /* !CONFIG_MV_ETH_HWF */
+
+	/* Reset RX port, free the empty buffers form queue */
+	mv_eth_rx_reset(pp->port);
+
+	mv_eth_port_pools_free(port);
+
+	set_bit(MV_ETH_F_SUSPEND_BIT, &(pp->flags));
+
+	printk(KERN_NOTICE "Enter suspend mode on port #%d\n", port);
+	return MV_OK;
+}
+
+/***********************************************************
+ * mv_eth_wol_mode_set --                                   *
+ *   set wol_mode. (power menegment mod)		    *
+ ***********************************************************/
+int	mv_eth_wol_mode_set(int port, int mode)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if ((mode < 0) || (mode > 1)) {
+		printk(KERN_ERR "%s: mode = %d, Invalid value.\n", __func__, mode);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_SUSPEND) {
+		printk(KERN_ERR "Port %d must resumed before\n", port);
+		return -EINVAL;
+	}
+	pp->pm_mode = mode;
+
+	if (mode)
+#ifdef CONFIG_MV_ETH_PNC_WOL
+		wol_ports_bmp |= (1 << port);
+#else
+		;
+#endif
+	else
+		wol_ports_bmp &= ~(1 << port);
+
+	return MV_OK;
+}
+
+static void mv_eth_sysfs_exit(void)
+{
+	struct device *pd;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "neta");
+	if (!pd) {
+		printk(KERN_ERR"%s: cannot find pp2 device\n", __func__);
+		return;
+	}
+#ifdef CONFIG_MV_ETH_L2FW
+	mv_neta_l2fw_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+	if (MV_NETA_PNC_CAP())
+		mv_neta_wol_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		mv_neta_pnc_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_BM
+	if (MV_NETA_BM_CAP())
+		mv_neta_bm_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		mv_neta_hwf_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_PON
+	mv_neta_pon_sysfs_exit(&pd->kobj);
+#endif
+	platform_device_unregister(neta_sysfs);
+}
+
+static int mv_eth_sysfs_init(void)
+{
+	struct device *pd;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "neta");
+	if (!pd) {
+		neta_sysfs = platform_device_register_simple("neta", -1, NULL, 0);
+		pd = bus_find_device_by_name(&platform_bus_type, NULL, "neta");
+	}
+
+	if (!pd) {
+		printk(KERN_ERR"%s: cannot find neta device\n", __func__);
+		return -1;
+	}
+
+	mv_neta_gbe_sysfs_init(&pd->kobj);
+
+#ifdef CONFIG_MV_PON
+	mv_neta_pon_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		mv_neta_hwf_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_BM
+	if (MV_NETA_BM_CAP())
+		mv_neta_bm_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		mv_neta_pnc_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+	if (MV_NETA_PNC_CAP())
+		mv_neta_wol_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_L2FW
+	mv_neta_l2fw_sysfs_init(&pd->kobj);
+#endif
+
+	return 0;
+}
+
+/***********************************************************
+ * mv_eth_shared_probe --                                         *
+ *   main driver initialization. loading the interfaces.   *
+ ***********************************************************/
+static int	mv_eth_shared_probe(struct mv_neta_pdata *plat_data)
+{
+	int size;
+
+#ifdef ETH_SKB_DEBUG
+	memset(mv_eth_skb_debug, 0, sizeof(mv_eth_skb_debug));
+	spin_lock_init(&skb_debug_lock);
+#endif
+
+	mv_eth_sysfs_init();
+
+	pr_info("SoC: model = 0x%x, revision = 0x%x\n",
+		plat_data->ctrl_model, plat_data->ctrl_rev);
+
+	/* init MAC Unit */
+	mv_eth_hal_shared_init(plat_data);
+
+	mv_eth_ports_num = plat_data->max_port;
+	if (mv_eth_ports_num > CONFIG_MV_ETH_PORTS_NUM)
+		mv_eth_ports_num = CONFIG_MV_ETH_PORTS_NUM;
+
+	mv_eth_config_show();
+
+	size = mv_eth_ports_num * sizeof(struct eth_port *);
+	mv_eth_ports = mvOsMalloc(size);
+	if (!mv_eth_ports)
+		goto oom;
+
+	memset(mv_eth_ports, 0, size);
+
+	if (mv_eth_bm_pools_init())
+		goto oom;
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		/* init gbe pnc port mapping */
+		if (pnc_gbe_port_map_init(plat_data->ctrl_model, plat_data->ctrl_rev)) {
+			pr_err("%s: PNC GBE port mapping init failed\n", __func__);
+			goto oom;
+		}
+
+		if (mv_eth_pnc_ctrl_en) {
+			if (pnc_default_init())
+				pr_err("%s: Warning PNC init failed\n", __func__);
+		} else
+			pr_err("%s: PNC control is disabled\n", __func__);
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+
+#ifdef CONFIG_MV_ETH_L2FW
+	mv_l2fw_init();
+#endif
+
+	mv_eth_cpu_counters_init();
+
+	mv_eth_initialized = 1;
+
+	return 0;
+
+oom:
+	if (mv_eth_ports)
+		mvOsFree(mv_eth_ports);
+
+	printk(KERN_ERR "%s: out of memory\n", __func__);
+	return -ENOMEM;
+}
+
+#ifdef CONFIG_OF
+static int pnc_bm_initialize;
+static unsigned int pnc_tcam_line_num;
+static int mv_eth_port_num_get(struct platform_device *pdev);
+
+static int mv_eth_neta_cap_verify(unsigned int neta_cap_bm)
+{
+	unsigned int dev, rev;
+
+	/* Get SoC ID */
+	if (mvebu_get_soc_id(&dev, &rev))
+		return MV_FAIL;
+
+	/* According to SoC type to check dynamic neta capabilities */
+	switch (dev) {
+	/* Armada 370 ID */
+	case MV6710_DEV_ID:
+	case MV6707_DEV_ID:
+		if (neta_cap_bm == 0)
+			return 0;
+		else
+			goto err;
+		break;
+
+	/* Armada XP ID */
+	case MV78230_DEV_ID:
+	case MV78260_DEV_ID:
+	case MV78460_DEV_ID:
+	/* KW2 ID */
+	case MV88F6510_DEV_ID:
+	case MV88F6530_DEV_ID:
+	case MV88F6560_DEV_ID:
+	case MV88F6601_DEV_ID:
+		if (neta_cap_bm == (MV_ETH_CAP_PNC | MV_ETH_CAP_BM | MV_ETH_CAP_HWF | MV_ETH_CAP_PME) ||
+		    neta_cap_bm == (MV_ETH_CAP_PNC | MV_ETH_CAP_BM | MV_ETH_CAP_HWF) ||
+		    neta_cap_bm == (MV_ETH_CAP_PNC | MV_ETH_CAP_BM) ||
+		    neta_cap_bm == MV_ETH_CAP_PNC ||
+		    neta_cap_bm == MV_ETH_CAP_BM ||
+		    neta_cap_bm == 0)
+			return 0;
+		else
+			goto err;
+		break;
+
+	/* Armada A38x ID */
+	case MV88F6810_DEV_ID:
+	case MV88F6811_DEV_ID:
+	case MV88F6820_DEV_ID:
+	case MV88F6828_DEV_ID:
+		if (((rev == MV88F68xx_Z1_REV) && (neta_cap_bm == MV_ETH_CAP_BM || neta_cap_bm == 0)) ||
+		    ((rev == MV88F68xx_A0_REV) && (neta_cap_bm == (MV_ETH_CAP_PNC | MV_ETH_CAP_BM) ||
+						neta_cap_bm == MV_ETH_CAP_PNC ||
+						neta_cap_bm == MV_ETH_CAP_BM ||
+						neta_cap_bm == 0)))
+			return 0;
+		else
+			goto err;
+		break;
+
+	default:
+		goto err;
+		break;
+	}
+
+	return 0;
+
+err:
+	pr_err("Error: invalid NETA capability 0x%x for SoC with dev_id-0x%x, rev_id-%d\n", neta_cap_bm, dev, rev);
+	return -1;
+}
+
+static struct of_device_id of_bm_pnc_table[] = {
+		{ .compatible = "marvell,neta_bm_pnc" },
+};
+
+static int mv_eth_pnc_bm_init(struct mv_neta_pdata *plat_data)
+{
+	struct device_node *bm_pnc_np;
+	struct clk *clk;
+	unsigned int dev, rev;
+
+	if (pnc_bm_initialize > 0) {
+		if (plat_data)
+			plat_data->pnc_tcam_size = pnc_tcam_line_num;
+		return MV_OK;
+	}
+
+	/* Get SoC ID */
+	if (mvebu_get_soc_id(&dev, &rev))
+		return MV_FAIL;
+
+	/* BM&PNC memory iomap */
+	bm_pnc_np = of_find_matching_node(NULL, of_bm_pnc_table);
+	if (bm_pnc_np) {
+		/* Get NETA dynamic capabilities supported  */
+		if (of_property_read_u32(bm_pnc_np, "neta_cap_bm", &neta_cap_bitmap)) {
+			pr_err("could not get bitmap of neta capability\n");
+			return -1;
+		}
+		/* NETA dynamic capability verify */
+		if (mv_eth_neta_cap_verify(neta_cap_bitmap)) {
+			pr_err("NETA capability verify not pass\n");
+			return -1;
+		}
+
+		/* Get PnC TCAM line number */
+		if (MV_NETA_PNC_CAP() && plat_data != NULL) {
+			if (of_property_read_u32(bm_pnc_np, "pnc_tcam_size", &pnc_tcam_line_num)) {
+				pr_err("could not get pnc tcam size\n");
+				return -1;
+			}
+			plat_data->pnc_tcam_size = pnc_tcam_line_num;
+		}
+
+		if (MV_NETA_BM_CAP()) {
+			/* IO map */
+			bm_reg_vbase = (int)of_iomap(bm_pnc_np, 0);
+
+			/* Enable BM gate clock */
+			clk = of_clk_get(bm_pnc_np, 0);
+
+			clk_prepare_enable(clk);
+		}
+
+		if (MV_NETA_PNC_CAP()) {
+			/* IO map */
+			pnc_reg_vbase = (int)of_iomap(bm_pnc_np, 1);
+			/* on AXP, PNC and BM share the same clock */
+			if ((dev == MV78230_DEV_ID)
+				|| (dev == MV78260_DEV_ID)
+				|| (dev == MV78460_DEV_ID)) {
+				/* Enable BM gate clock */
+				if (!MV_NETA_BM_CAP()) {
+					clk = of_clk_get(bm_pnc_np, 0);
+					clk_prepare_enable(clk);
+				}
+			} else {
+				/* Enable PNC gate clock */
+				clk = of_clk_get(bm_pnc_np, 1);
+				clk_prepare_enable(clk);
+			}
+		}
+	} else {
+		bm_reg_vbase = 0;
+		pnc_reg_vbase = 0;
+		neta_cap_bitmap = 0;
+	}
+
+	pnc_bm_initialize++;
+
+	return MV_OK;
+}
+
+static struct mv_neta_pdata *mv_plat_data_get(struct platform_device *pdev)
+{
+	struct mv_neta_pdata *plat_data;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *phy_node;
+	void __iomem *base_addr;
+	struct resource *res;
+	struct clk *clk;
+	phy_interface_t phy_mode;
+	const char *mac_addr = NULL;
+	u32 ctrl_model, ctrl_rev;
+
+	/* Get port number */
+	if (of_property_read_u32(np, "eth,port-num", &pdev->id)) {
+		pr_err("could not get port number\n");
+		return NULL;
+	}
+
+	plat_data = kzalloc(sizeof(struct mv_neta_pdata), GFP_KERNEL);
+	if (plat_data == NULL) {
+		pr_err("could not allocate memory for plat_data\n");
+		return NULL;
+	}
+	memset(plat_data, 0, sizeof(struct mv_neta_pdata));
+
+	/* Get the register base */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		pr_err("could not get resource information\n");
+		return NULL;
+	}
+
+	/* build virtual port base address */
+	base_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!base_addr) {
+		pr_err("could not map neta registers\n");
+		return NULL;
+	}
+	port_vbase[pdev->id] = (int)base_addr;
+
+	/* get IRQ number */
+	if (pdev->dev.of_node) {
+		plat_data->irq = irq_of_parse_and_map(np, 0);
+		if (plat_data->irq == 0) {
+			pr_err("could not get IRQ number\n");
+			return NULL;
+		}
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+		if (res == NULL) {
+			pr_err("could not get IRQ number\n");
+			return NULL;
+		}
+		plat_data->irq = res->start;
+	}
+	/* get MAC address */
+	mac_addr = of_get_mac_address(np);
+	if (mac_addr != NULL)
+		memcpy(plat_data->mac_addr, mac_addr, MV_MAC_ADDR_SIZE);
+
+	/* get phy smi address */
+	phy_node = of_parse_phandle(np, "phy", 0);
+	if (!phy_node) {
+		pr_err("no associated PHY\n");
+		return NULL;
+	}
+	if (of_property_read_u32(phy_node, "reg", &plat_data->phy_addr)) {
+		pr_err("could not PHY SMI address\n");
+		return NULL;
+	}
+
+	/* FDT does not support the '-1' convention of the inband fake phy address */
+	/* In case of FDT, use '999' phy address to represent inband mode */
+	if (plat_data->phy_addr == 999)
+		plat_data->phy_addr = -1;
+
+	/* Get port MTU */
+	if (of_property_read_u32(np, "eth,port-mtu", &plat_data->mtu)) {
+		pr_err("could not get MTU\n");
+		return NULL;
+	}
+	/* Get TX checksum offload limit */
+	if (of_property_read_u32(np, "tx-csum-limit", &plat_data->tx_csum_limit))
+		plat_data->tx_csum_limit = MV_ETH_TX_CSUM_MAX_SIZE;
+
+	/* Initialize PnC and BM module */
+	if (mv_eth_pnc_bm_init(plat_data)) {
+		pr_err("pnc and bm init fail\n");
+		return NULL;
+	}
+
+	/* Get port PHY mode */
+	phy_mode = of_get_phy_mode(np);
+	if (phy_mode < 0) {
+		pr_err("unknown PHY mode\n");
+		return NULL;
+	}
+	switch (phy_mode) {
+	case PHY_INTERFACE_MODE_SGMII:
+		plat_data->is_sgmii = 1;
+		plat_data->is_rgmii = 0;
+	break;
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+		plat_data->is_sgmii = 0;
+		plat_data->is_rgmii = 1;
+	break;
+	default:
+		pr_err("unsupported PHY mode (%d)\n", phy_mode);
+		return NULL;
+	}
+
+	/* Global Parameters */
+	plat_data->tclk = 166666667;    /*mvBoardTclkGet();*/
+	plat_data->max_port = mv_eth_port_num_get(pdev);
+
+	/* Per port parameters */
+	plat_data->cpu_mask  = (1 << nr_cpu_ids) - 1;
+	plat_data->duplex = DUPLEX_FULL;
+	plat_data->speed = MV_ETH_SPEED_AN;
+
+	/* Get SoC ID */
+	if (mvebu_get_soc_id(&ctrl_model, &ctrl_rev)) {
+		mvOsPrintf("%s: get soc_id failed\n", __func__);
+		return NULL;
+	}
+	plat_data->ctrl_model = ctrl_model;
+	plat_data->ctrl_rev = ctrl_rev;
+
+	pdev->dev.platform_data = plat_data;
+
+	clk = devm_clk_get(&pdev->dev, 0);
+	clk_prepare_enable(clk);
+
+	return plat_data;
+}
+#endif /* CONFIG_OF */
+
+/***********************************************************
+ * mv_eth_probe --                                         *
+ *   main driver initialization. loading the interfaces.   *
+ ***********************************************************/
+static int mv_eth_probe(struct platform_device *pdev)
+{
+	int port;
+
+#ifdef CONFIG_OF
+	struct mv_neta_pdata *plat_data = mv_plat_data_get(pdev);
+	pdev->dev.platform_data = plat_data;
+#else
+	struct mv_neta_pdata *plat_data = (struct mv_neta_pdata *)pdev->dev.platform_data;
+	struct resource *res;
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (res == NULL) {
+		pr_err("could not get IRQ number\n");
+		return -ENODEV;
+	}
+	plat_data->irq = res->start;
+
+	/* Initialize NETA capability bitmap */
+	neta_cap_bitmap = 0x0;
+
+#ifdef CONFIG_MV_ETH_PNC
+	neta_cap_bitmap |= MV_ETH_CAP_PNC;
+#endif
+
+#ifdef CONFIG_MV_ETH_BM
+	neta_cap_bitmap |= MV_ETH_CAP_BM;
+#endif
+
+#ifdef CONFIG_MV_ETH_HWF
+	neta_cap_bitmap |= MV_ETH_CAP_HWF;
+#endif
+
+#ifdef CONFIG_MV_ETH_PME
+	neta_cap_bitmap |= MV_ETH_CAP_PME;
+#endif
+
+#endif /* CONFIG_OF */
+
+	if (plat_data == NULL)
+		return -ENODEV;
+
+	port = pdev->id;
+	if (!mv_eth_initialized) {
+		neta_global_dev = &pdev->dev;
+		if (mv_eth_shared_probe(plat_data))
+			return -ENODEV;
+	}
+
+#ifdef CONFIG_OF
+	/* init SMI register */
+	mvEthPhySmiAddrSet(ETH_SMI_REG(port));
+#endif
+
+	pr_info("port #%d: is_sgmii=%d, is_rgmii=%d, phy_addr=%d\n",
+		port, plat_data->is_sgmii, plat_data->is_rgmii, plat_data->phy_addr);
+	mvNetaPortPowerUp(port, plat_data->is_sgmii, plat_data->is_rgmii, (plat_data->phy_addr == -1));
+
+	mv_eth_win_init(port);
+
+	if (mv_eth_load_network_interfaces(pdev))
+		return -ENODEV;
+
+	printk(KERN_ERR "\n");
+
+	return 0;
+}
+
+/***********************************************************
+ * mv_eth_tx_timeout --                                    *
+ *   nothing to be done (?)                                *
+ ***********************************************************/
+static void mv_eth_tx_timeout(struct net_device *dev)
+{
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+
+	pp->stats.tx_timeout++;
+#endif /* #ifdef CONFIG_MV_ETH_STAT_ERR */
+
+	printk(KERN_INFO "%s: tx timeout\n", dev->name);
+}
+
+/***************************************************************
+ * mv_eth_netdev_init -- Allocate and initialize net_device    *
+ *                   structure                                 *
+ ***************************************************************/
+struct net_device *mv_eth_netdev_init(struct platform_device *pdev)
+{
+	int cpu, i;
+	struct net_device *dev;
+	struct eth_port *pp;
+	struct cpu_ctrl	*cpuCtrl;
+	int port = pdev->id;
+	struct mv_neta_pdata *plat_data = (struct mv_neta_pdata *)pdev->dev.platform_data;
+
+	dev = alloc_etherdev_mq(sizeof(struct eth_port), CONFIG_MV_ETH_TXQ);
+	if (!dev)
+		return NULL;
+
+	pp = (struct eth_port *)netdev_priv(dev);
+	if (!pp)
+		return NULL;
+
+	memset(pp, 0, sizeof(struct eth_port));
+	pp->dev = dev;
+	pp->plat_data = plat_data;
+	pp->cpu_mask = plat_data->cpu_mask;
+
+	dev->mtu = plat_data->mtu;
+
+	if (!is_valid_ether_addr(plat_data->mac_addr)) {
+		mv_eth_get_mac_addr(port, plat_data->mac_addr);
+		if (is_valid_ether_addr(plat_data->mac_addr)) {
+			memcpy(dev->dev_addr, plat_data->mac_addr, MV_MAC_ADDR_SIZE);
+			mac_src[port] = "hw config";
+		} else {
+#ifdef CONFIG_OF
+			eth_hw_addr_random(dev);
+			mac_src[port] = "random";
+#else
+			memset(dev->dev_addr, 0, MV_MAC_ADDR_SIZE);
+			mac_src[port] = "invalid";
+#endif /* CONFIG_OF */
+		}
+	} else {
+		memcpy(dev->dev_addr, plat_data->mac_addr, MV_MAC_ADDR_SIZE);
+		mac_src[port] = "platform";
+	}
+	dev->irq = plat_data->irq;
+
+	dev->tx_queue_len = CONFIG_MV_ETH_TXQ_DESC;
+	dev->watchdog_timeo = 5 * HZ;
+	dev->netdev_ops = &mv_eth_netdev_ops;
+  strcpy(dev->name ,"egiga%d");  //alpha.jack.200140707 modified
+
+	if (mv_eth_priv_init(pp, port)) {
+		mv_eth_priv_cleanup(pp);
+		return NULL;
+	}
+
+	/* Default NAPI initialization */
+	for (i = 0; i < CONFIG_MV_ETH_NAPI_GROUPS; i++) {
+		pp->napiGroup[i] = kmalloc(sizeof(struct napi_struct), GFP_KERNEL);
+		memset(pp->napiGroup[i], 0, sizeof(struct napi_struct));
+	}
+
+	for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+		cpuCtrl = pp->cpu_config[cpu];
+		cpuCtrl->napiCpuGroup = 0;
+		cpuCtrl->napi         = NULL;
+	}
+
+	/* Add NAPI default group */
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX) {
+		for (i = 0; i < CONFIG_MV_ETH_NAPI_GROUPS; i++)
+			netif_napi_add(dev, pp->napiGroup[i], mv_eth_poll, pp->weight);
+	}
+
+	SET_ETHTOOL_OPS(dev, &mv_eth_tool_ops);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX) {
+		mv_eth_netdev_init_features(dev);
+		if (register_netdev(dev)) {
+			printk(KERN_ERR "failed to register %s\n", dev->name);
+			free_netdev(dev);
+			return NULL;
+		} else {
+
+			/* register_netdev() always sets NETIF_F_GRO via NETIF_F_SOFT_FEATURES */
+
+#ifndef CONFIG_MV_ETH_GRO_DEF
+			dev->features &= ~NETIF_F_GRO;
+#endif /* CONFIG_MV_ETH_GRO_DEF */
+
+			printk(KERN_ERR "    o %s, ifindex = %d, GbE port = %d", dev->name, dev->ifindex, pp->port);
+
+			printk(KERN_CONT "\n");
+		}
+	}
+	platform_set_drvdata(pdev, pp->dev);
+
+	return dev;
+}
+
+bool mv_eth_netdev_find(unsigned int dev_idx)
+{
+	int port;
+
+	for (port = 0; port < mv_eth_ports_num; port++) {
+		struct eth_port *pp = mv_eth_port_by_id(port);
+
+		if (pp && pp->dev && (pp->dev->ifindex == dev_idx))
+			return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(mv_eth_netdev_find);
+
+int mv_eth_hal_init(struct eth_port *pp)
+{
+	int rxq, txp, txq, size, cpu;
+	struct tx_queue *txq_ctrl;
+	struct rx_queue *rxq_ctrl;
+
+	if (!MV_PON_PORT(pp->port)) {
+		int phyAddr;
+
+		/* Set the board information regarding PHY address */
+		phyAddr = pp->plat_data->phy_addr;
+		mvNetaPhyAddrSet(pp->port, phyAddr);
+	}
+
+	/* Init port */
+	pp->port_ctrl = mvNetaPortInit(pp->port, pp->dev->dev.parent);
+	if (!pp->port_ctrl) {
+		printk(KERN_ERR "%s: failed to load port=%d\n", __func__, pp->port);
+		return -ENODEV;
+	}
+
+	size = pp->txp_num * CONFIG_MV_ETH_TXQ * sizeof(struct tx_queue);
+	pp->txq_ctrl = mvOsMalloc(size);
+	if (!pp->txq_ctrl)
+		goto oom;
+
+	memset(pp->txq_ctrl, 0, size);
+
+	/* Create TX descriptor rings */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+			txq_ctrl->q = NULL;
+			txq_ctrl->hwf_rxp = 0xFF;
+			txq_ctrl->txp = txp;
+			txq_ctrl->txq = txq;
+			txq_ctrl->txq_size = CONFIG_MV_ETH_TXQ_DESC;
+			txq_ctrl->txq_count = 0;
+			txq_ctrl->bm_only = MV_FALSE;
+
+			txq_ctrl->shadow_txq_put_i = 0;
+			txq_ctrl->shadow_txq_get_i = 0;
+			txq_ctrl->txq_done_pkts_coal = mv_ctrl_txdone;
+			txq_ctrl->flags = MV_ETH_F_TX_SHARED;
+			txq_ctrl->nfpCounter = 0;
+			for_each_possible_cpu(cpu)
+				txq_ctrl->cpu_owner[cpu] = 0;
+		}
+	}
+
+	pp->rxq_ctrl = mvOsMalloc(CONFIG_MV_ETH_RXQ * sizeof(struct rx_queue));
+	if (!pp->rxq_ctrl)
+		goto oom;
+
+	memset(pp->rxq_ctrl, 0, CONFIG_MV_ETH_RXQ * sizeof(struct rx_queue));
+
+	/* Create Rx descriptor rings */
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		rxq_ctrl = &pp->rxq_ctrl[rxq];
+		rxq_ctrl->rxq_size = CONFIG_MV_ETH_RXQ_DESC;
+		rxq_ctrl->rxq_pkts_coal = CONFIG_MV_ETH_RX_COAL_PKTS;
+		rxq_ctrl->rxq_time_coal = CONFIG_MV_ETH_RX_COAL_USEC;
+	}
+
+	if (pp->flags & MV_ETH_F_MH)
+		mvNetaMhSet(pp->port, MV_TAG_TYPE_MH);
+
+	/* Configure defaults */
+	pp->autoneg_cfg  = AUTONEG_ENABLE;
+	pp->speed_cfg    = SPEED_1000;
+	pp->duplex_cfg  = DUPLEX_FULL;
+	pp->advertise_cfg = 0x2f;
+	pp->rx_time_coal_cfg = CONFIG_MV_ETH_RX_COAL_USEC;
+	pp->rx_pkts_coal_cfg = CONFIG_MV_ETH_RX_COAL_PKTS;
+	pp->tx_pkts_coal_cfg = mv_ctrl_txdone;
+	pp->rx_time_low_coal_cfg = CONFIG_MV_ETH_RX_COAL_USEC >> 2;
+	pp->rx_time_high_coal_cfg = CONFIG_MV_ETH_RX_COAL_USEC << 2;
+	pp->rx_pkts_low_coal_cfg = CONFIG_MV_ETH_RX_COAL_PKTS;
+	pp->rx_pkts_high_coal_cfg = CONFIG_MV_ETH_RX_COAL_PKTS;
+	pp->pkt_rate_low_cfg = 1000;
+	pp->pkt_rate_high_cfg = 50000;
+	pp->rate_sample_cfg = 5;
+	pp->rate_current = 0; /* Unknown */
+
+	return 0;
+oom:
+	printk(KERN_ERR "%s: port=%d: out of memory\n", __func__, pp->port);
+	return -ENODEV;
+}
+
+/* Show network driver configuration */
+void mv_eth_config_show(void)
+{
+	/* Check restrictions */
+#if (CONFIG_MV_ETH_PORTS_NUM > MV_ETH_MAX_PORTS)
+#   error "CONFIG_MV_ETH_PORTS_NUM is large than MV_ETH_MAX_PORTS"
+#endif
+
+#if (CONFIG_MV_ETH_RXQ > MV_ETH_MAX_RXQ)
+#   error "CONFIG_MV_ETH_RXQ is large than MV_ETH_MAX_RXQ"
+#endif
+
+#if CONFIG_MV_ETH_TXQ > MV_ETH_MAX_TXQ
+#   error "CONFIG_MV_ETH_TXQ is large than MV_ETH_MAX_TXQ"
+#endif
+
+#if defined(CONFIG_MV_ETH_TSO) && !defined(CONFIG_MV_ETH_TX_CSUM_OFFLOAD)
+#   error "If GSO enabled - TX checksum offload must be enabled too"
+#endif
+
+	pr_info("  o %d Giga ports supported\n", mv_eth_ports_num);
+
+#ifdef CONFIG_MV_PON
+	pr_info("  o Giga PON port is #%d: - %d TCONTs supported\n", MV_PON_PORT_ID, MV_ETH_MAX_TCONT());
+#endif
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+	pr_info("  o SKB recycle supported (%s)\n", mv_ctrl_recycle ? "Enabled" : "Disabled");
+#endif
+
+#ifdef CONFIG_MV_ETH_NETA
+	pr_info("  o NETA acceleration mode %d\n", mvNetaAccMode());
+#endif
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP())
+		pr_info("  o BM supported for CPU: %d BM pools\n", MV_ETH_BM_POOLS);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		pr_info("  o PnC supported (%s)\n", mv_eth_pnc_ctrl_en ? "Enabled" : "Disabled");
+#endif
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		pr_info("  o HWF supported\n");
+#endif
+
+#ifdef CONFIG_MV_ETH_PMT
+	if (MV_NETA_PMT_CAP())
+		pr_info("  o PMT supported\n");
+#endif
+
+	pr_info("  o RX Queue support: %d Queues * %d Descriptors\n", CONFIG_MV_ETH_RXQ, CONFIG_MV_ETH_RXQ_DESC);
+
+	pr_info("  o TX Queue support: %d Queues * %d Descriptors\n", CONFIG_MV_ETH_TXQ, CONFIG_MV_ETH_TXQ_DESC);
+
+#if defined(CONFIG_MV_ETH_TSO)
+	pr_info("  o GSO supported\n");
+#endif /* CONFIG_MV_ETH_TSO */
+
+#if defined(CONFIG_MV_ETH_GRO)
+	pr_info("  o GRO supported\n");
+#endif /* CONFIG_MV_ETH_GRO */
+
+#if defined(CONFIG_MV_ETH_RX_CSUM_OFFLOAD)
+	pr_info("  o Receive checksum offload supported\n");
+#endif
+#if defined(CONFIG_MV_ETH_TX_CSUM_OFFLOAD)
+	pr_info("  o Transmit checksum offload supported\n");
+#endif
+
+#if defined(CONFIG_MV_ETH_NFP)
+	pr_info("  o NFP is supported\n");
+#endif /* CONFIG_MV_ETH_NFP */
+
+#if defined(CONFIG_MV_ETH_NFP_HOOKS)
+	pr_info("  o NFP Hooks are supported\n");
+#endif /* CONFIG_MV_ETH_NFP_HOOKS */
+
+#if defined(CONFIG_MV_ETH_NFP_EXT)
+	pr_info("  o NFP External drivers supported: up to %d interfaces\n", NFP_EXT_NUM);
+#endif /* CONFIG_MV_ETH_NFP_EXT */
+
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	pr_info("  o Driver ERROR statistics enabled\n");
+#endif
+
+#ifdef CONFIG_MV_ETH_STAT_INF
+	pr_info("  o Driver INFO statistics enabled\n");
+#endif
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	pr_info("  o Driver DEBUG statistics enabled\n");
+#endif
+
+#ifdef ETH_DEBUG
+	pr_info("  o Driver debug messages enabled\n");
+#endif
+
+#if defined(CONFIG_MV_ETH_SWITCH)
+	pr_info("  o Switch support enabled\n");
+
+#endif /* CONFIG_MV_ETH_SWITCH */
+
+	pr_info("\n");
+}
+
+/* Set network device features on initialization. Take into account default compile time configuration. */
+static void mv_eth_netdev_init_features(struct net_device *dev)
+{
+	dev->features |= NETIF_F_SG | NETIF_F_LLTX;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->hw_features |= NETIF_F_SG;
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+	dev->features |= NETIF_F_NTUPLE;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->hw_features |= NETIF_F_NTUPLE;
+#endif
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+#if defined(MV_ETH_PNC_LB) && defined(CONFIG_MV_ETH_PNC)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	if (MV_NETA_PNC_CAP())
+		dev->hw_features |= NETIF_F_RXHASH;
+#endif
+#endif
+
+#ifdef CONFIG_MV_ETH_TX_CSUM_OFFLOAD
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->hw_features |= NETIF_F_IP_CSUM;
+#endif
+#ifdef CONFIG_MV_ETH_TX_CSUM_OFFLOAD_DEF
+	dev->features |= NETIF_F_IP_CSUM;
+#endif /* CONFIG_MV_ETH_TX_CSUM_OFFLOAD_DEF */
+#endif /* CONFIG_MV_ETH_TX_CSUM_OFFLOAD */
+
+#ifdef CONFIG_MV_ETH_RX_CSUM_OFFLOAD
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->hw_features |= NETIF_F_RXCSUM;
+#endif
+#ifdef CONFIG_MV_ETH_RX_CSUM_OFFLOAD_DEF
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->features |= NETIF_F_RXCSUM;
+#endif
+#endif /* CONFIG_MV_ETH_RX_CSUM_OFFLOAD_DEF */
+#endif /* CONFIG_MV_ETH_RX_CSUM_OFFLOAD */
+
+#ifdef CONFIG_MV_ETH_TSO
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev->hw_features |= NETIF_F_TSO;
+#endif
+#ifdef CONFIG_MV_ETH_TSO_DEF
+	dev->features |= NETIF_F_TSO;
+#endif /* CONFIG_MV_ETH_TSO_DEF */
+#endif /* CONFIG_MV_ETH_TSO */
+}
+
+int mv_eth_napi_set_cpu_affinity(int port, int group, int affinity)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -1;
+	}
+
+	if (group >= CONFIG_MV_ETH_NAPI_GROUPS) {
+		printk(KERN_ERR "%s: group number is higher than %d\n", __func__, CONFIG_MV_ETH_NAPI_GROUPS-1);
+		return -1;
+		}
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+	set_cpu_affinity(pp, affinity, group);
+	return 0;
+
+}
+void handle_group_affinity(int port)
+{
+	int group;
+	struct eth_port *pp;
+	MV_U32 group_cpu_affinity[CONFIG_MV_ETH_NAPI_GROUPS];
+	MV_U32 rxq_affinity[CONFIG_MV_ETH_NAPI_GROUPS];
+
+	group_cpu_affinity[0] = CONFIG_MV_ETH_GROUP0_CPU;
+	rxq_affinity[0] 	  = CONFIG_MV_ETH_GROUP0_RXQ;
+
+#ifdef CONFIG_MV_ETH_GROUP1_CPU
+		group_cpu_affinity[1] = CONFIG_MV_ETH_GROUP1_CPU;
+		rxq_affinity[1] 	  = CONFIG_MV_ETH_GROUP1_RXQ;
+#endif
+
+#ifdef CONFIG_MV_ETH_GROUP2_CPU
+		group_cpu_affinity[2] = CONFIG_MV_ETH_GROUP2_CPU;
+		rxq_affinity[2] 	  = CONFIG_MV_ETH_GROUP2_RXQ;
+#endif
+
+#ifdef CONFIG_MV_ETH_GROUP3_CPU
+		group_cpu_affinity[3] = CONFIG_MV_ETH_GROUP3_CPU;
+		rxq_affinity[3] 	  = CONFIG_MV_ETH_GROUP3_RXQ;
+#endif
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL)
+		return;
+
+	for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++)
+		set_cpu_affinity(pp, group_cpu_affinity[group], group);
+	for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++)
+		set_rxq_affinity(pp, rxq_affinity[group], group);
+}
+
+int	mv_eth_napi_set_rxq_affinity(int port, int group, int rxqAffinity)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL) {
+		printk(KERN_ERR "%s: pp is null \n", __func__);
+		return MV_FAIL;
+	}
+	if (group >= CONFIG_MV_ETH_NAPI_GROUPS) {
+		printk(KERN_ERR "%s: group number is higher than %d\n", __func__, CONFIG_MV_ETH_NAPI_GROUPS-1);
+		return -1;
+		}
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	set_rxq_affinity(pp, rxqAffinity, group);
+	return 0;
+}
+
+
+void mv_eth_napi_group_show(int port)
+{
+	int cpu, group;
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+	for (group = 0; group < CONFIG_MV_ETH_NAPI_GROUPS; group++) {
+		printk(KERN_INFO "group=%d:\n", group);
+		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+			cpuCtrl = pp->cpu_config[cpu];
+			if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))
+				continue;
+			if (cpuCtrl->napiCpuGroup == group) {
+				printk(KERN_INFO "   CPU%d ", cpu);
+				mvNetaCpuDump(port, cpu, 0);
+				printk(KERN_INFO "\n");
+			}
+		}
+		printk(KERN_INFO "\n");
+	}
+}
+
+void mv_eth_priv_cleanup(struct eth_port *pp)
+{
+	/* TODO */
+}
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+static struct bm_pool *mv_eth_long_pool_get(struct eth_port *pp, int pkt_size)
+{
+	int             pool, i;
+	struct bm_pool	*bm_pool, *temp_pool = NULL;
+	unsigned long   flags = 0;
+
+	if (MV_NETA_BM_CAP()) {
+		pool = mv_eth_bm_config_long_pool_get(pp->port);
+		if (pool != -1) /* constant long pool for the port */
+			return &mv_eth_pool[pool];
+
+		/* look for free pool pkt_size == 0. First check pool == pp->port */
+		/* if no free pool choose larger than required */
+		for (i = 0; i < MV_ETH_BM_POOLS; i++) {
+			pool = (pp->port + i) % MV_ETH_BM_POOLS;
+			bm_pool = &mv_eth_pool[pool];
+
+			MV_ETH_LOCK(&bm_pool->lock, flags);
+
+			if (bm_pool->pkt_size == 0) {
+				/* found free pool */
+
+				MV_ETH_UNLOCK(&bm_pool->lock, flags);
+				return bm_pool;
+			}
+			if (bm_pool->pkt_size >= pkt_size) {
+				if (temp_pool == NULL)
+					temp_pool = bm_pool;
+				else if (bm_pool->pkt_size < temp_pool->pkt_size)
+					temp_pool = bm_pool;
+			}
+			MV_ETH_UNLOCK(&bm_pool->lock, flags);
+		}
+		return temp_pool;
+	} else {
+		return &mv_eth_pool[pp->port];
+	}
+}
+#else
+static struct bm_pool *mv_eth_long_pool_get(struct eth_port *pp, int pkt_size)
+{
+	return &mv_eth_pool[pp->port];
+}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+static int mv_eth_no_bm_cpu_rxq_fill(struct eth_port *pp, int rxq, int num)
+{
+	int i;
+	struct eth_pbuf *pkt;
+	struct bm_pool *bm_pool;
+	MV_NETA_RXQ_CTRL *rx_ctrl;
+	struct neta_rx_desc *rx_desc;
+
+	bm_pool = pp->pool_long;
+
+	rx_ctrl = pp->rxq_ctrl[rxq].q;
+	if (!rx_ctrl) {
+		printk(KERN_ERR "%s: rxq %d is not initialized\n", __func__, rxq);
+		return 0;
+	}
+
+	for (i = 0; i < num; i++) {
+		pkt = mv_eth_pool_get(pp, bm_pool);
+		if (pkt) {
+			rx_desc = (struct neta_rx_desc *)MV_NETA_QUEUE_DESC_PTR(&rx_ctrl->queueCtrl, i);
+			memset(rx_desc, 0, sizeof(struct neta_rx_desc));
+
+			mvNetaRxDescFill(rx_desc, pkt->physAddr, (MV_U32)pkt);
+			mvOsCacheLineFlush(pp->dev->dev.parent, rx_desc);
+		} else {
+			printk(KERN_ERR "%s: rxq %d, %d of %d buffers are filled\n", __func__, rxq, i, num);
+			break;
+		}
+	}
+
+	return i;
+}
+
+static int mv_eth_rxq_fill(struct eth_port *pp, int rxq, int num)
+{
+	int i;
+
+#ifndef CONFIG_MV_ETH_BM_CPU
+	i = mv_eth_no_bm_cpu_rxq_fill(pp, rxq, num);
+#else
+	if (MV_NETA_BM_CAP())
+		i = num;
+	else
+		i = mv_eth_no_bm_cpu_rxq_fill(pp, rxq, num);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	mvNetaRxqNonOccupDescAdd(pp->port, rxq, i);
+
+	return i;
+}
+
+static int mv_eth_txq_create(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	txq_ctrl->q = mvNetaTxqInit(pp->port, txq_ctrl->txp, txq_ctrl->txq, txq_ctrl->txq_size);
+	if (txq_ctrl->q == NULL) {
+		printk(KERN_ERR "%s: can't create TxQ - port=%d, txp=%d, txq=%d, desc=%d\n",
+		       __func__, pp->port, txq_ctrl->txp, txq_ctrl->txp, txq_ctrl->txq_size);
+		return -ENODEV;
+	}
+
+	txq_ctrl->shadow_txq = mvOsMalloc(txq_ctrl->txq_size * sizeof(MV_ULONG));
+	if (txq_ctrl->shadow_txq == NULL)
+		goto no_mem;
+
+	/* reset txq */
+	txq_ctrl->txq_count = 0;
+	txq_ctrl->shadow_txq_put_i = 0;
+	txq_ctrl->shadow_txq_get_i = 0;
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		mvNetaHwfTxqInit(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+#endif /* CONFIG_MV_ETH_HWF */
+
+	return 0;
+
+no_mem:
+	mv_eth_txq_delete(pp, txq_ctrl);
+	return -ENOMEM;
+}
+
+
+static int mv_force_port_link_speed_fc(int port, MV_ETH_PORT_SPEED port_speed, int en_force)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (en_force) {
+		if (mvNetaForceLinkModeSet(port, 1, 0)) {
+			printk(KERN_ERR "mvNetaForceLinkModeSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaSpeedDuplexSet(port, port_speed, MV_ETH_DUPLEX_FULL)) {
+			printk(KERN_ERR "mvNetaSpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaFlowCtrlSet(port, MV_ETH_FC_ENABLE)) {
+			printk(KERN_ERR "mvNetaFlowCtrlSet failed\n");
+			return -EIO;
+		}
+		printk(KERN_ERR "*************FORCE LINK**************\n");
+		set_bit(MV_ETH_F_FORCE_LINK_BIT, &(pp->flags));
+
+	} else {
+		if (mvNetaForceLinkModeSet(port, 0, 0)) {
+			printk(KERN_ERR "mvNetaForceLinkModeSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaSpeedDuplexSet(port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN)) {
+			printk(KERN_ERR "mvNetaSpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvNetaFlowCtrlSet(port, MV_ETH_FC_AN_SYM)) {
+			printk(KERN_ERR "mvNetaFlowCtrlSet failed\n");
+			return -EIO;
+		}
+		printk(KERN_ERR "*************CLEAR FORCE LINK**************\n");
+
+		clear_bit(MV_ETH_F_FORCE_LINK_BIT, &(pp->flags));
+	}
+	return 0;
+}
+
+static void mv_eth_txq_delete(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	if (txq_ctrl->shadow_txq) {
+		mvOsFree(txq_ctrl->shadow_txq);
+		txq_ctrl->shadow_txq = NULL;
+	}
+
+	if (txq_ctrl->q) {
+		mvNetaTxqDelete(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+		txq_ctrl->q = NULL;
+	}
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+int mv_eth_txp_reset(int port, int txp)
+{
+	struct eth_port *pp;
+	int queue;
+
+	if (mvNetaTxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -ENODEV;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	/* free the skb's in the hal tx ring */
+	for (queue = 0; queue < CONFIG_MV_ETH_TXQ; queue++) {
+		struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + queue];
+
+		if (txq_ctrl->q) {
+			int mode, rx_port;
+
+			mode = mv_eth_ctrl_txq_mode_get(pp->port, txp, queue, &rx_port);
+			if (mode == MV_ETH_TXQ_CPU) {
+				/* Free all buffers in TXQ */
+				mv_eth_txq_done_force(pp, txq_ctrl);
+				/* reset txq */
+				txq_ctrl->shadow_txq_put_i = 0;
+				txq_ctrl->shadow_txq_get_i = 0;
+			}
+#ifdef CONFIG_MV_ETH_HWF
+			else if (mode == MV_ETH_TXQ_HWF && MV_NETA_HWF_CAP())
+				mv_eth_txq_hwf_clean(pp, txq_ctrl, rx_port);
+#endif /* CONFIG_MV_ETH_HWF */
+			else
+				printk(KERN_ERR "%s: port=%d, txp=%d, txq=%d is not in use\n",
+					__func__, pp->port, txp, queue);
+		}
+	}
+	mvNetaTxpReset(port, txp);
+
+	return 0;
+}
+
+/* Free received packets from all RXQs and reset RX of the port */
+int mv_eth_rx_reset(int port)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	int rxq = 0;
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+#ifndef CONFIG_MV_ETH_BM_CPU
+	{
+		for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+			struct eth_pbuf *pkt;
+			struct neta_rx_desc *rx_desc;
+			struct bm_pool *pool;
+			int i, rx_done;
+			MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+
+			if (rx_ctrl == NULL)
+				continue;
+
+			rx_done = mvNetaRxqFreeDescNumGet(pp->port, rxq);
+			mvOsCacheIoSync(pp->dev->dev.parent);
+			for (i = 0; i < rx_done; i++) {
+				rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+				mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+
+#if defined(MV_CPU_BE)
+				mvNetaRxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+				pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+				pool = &mv_eth_pool[pkt->pool];
+				mv_eth_pool_put(pool, pkt);
+			}
+		}
+	}
+#else
+	if (!MV_NETA_BM_CAP()) {
+		for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+			struct eth_pbuf *pkt;
+			struct neta_rx_desc *rx_desc;
+			struct bm_pool *pool;
+			int i, rx_done;
+			MV_NETA_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+
+			if (rx_ctrl == NULL)
+				continue;
+
+			rx_done = mvNetaRxqFreeDescNumGet(pp->port, rxq);
+			mvOsCacheIoSync(pp->dev->dev.parent);
+			for (i = 0; i < rx_done; i++) {
+				rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+				mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+
+#if defined(MV_CPU_BE)
+				mvNetaRxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+				pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+				pool = &mv_eth_pool[pkt->pool];
+				mv_eth_pool_put(pool, pkt);
+			}
+		}
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	mvNetaRxReset(port);
+	return 0;
+}
+
+/***********************************************************
+ * coal set functions		                           *
+ ***********************************************************/
+MV_STATUS mv_eth_rx_pkts_coal_set(int port, int rxq, MV_U32 value)
+{
+	MV_STATUS status;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	status = mvNetaRxqPktsCoalSet(port, rxq, value);
+	if (status == MV_OK)
+		pp->rxq_ctrl[rxq].rxq_pkts_coal = value;
+	return status;
+}
+
+MV_STATUS mv_eth_rx_time_coal_set(int port, int rxq, MV_U32 value)
+{
+	MV_STATUS status;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	status = mvNetaRxqTimeCoalSet(port, rxq, value);
+	if (status == MV_OK)
+		pp->rxq_ctrl[rxq].rxq_time_coal = value;
+	return status;
+}
+
+MV_STATUS mv_eth_tx_done_pkts_coal_set(int port, int txp, int txq, MV_U32 value)
+{
+	MV_STATUS status;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	status = mvNetaTxDonePktsCoalSet(port, txp, txq, value);
+	if (status == MV_OK)
+		pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq].txq_done_pkts_coal = value;
+	return status;
+}
+
+/***********************************************************
+ * mv_eth_start_internals --                               *
+ *   fill rx buffers. start rx/tx activity. set coalesing. *
+ *   clear and unmask interrupt bits                       *
+ ***********************************************************/
+int mv_eth_start_internals(struct eth_port *pp, int mtu)
+{
+	unsigned int status;
+	struct cpu_ctrl	*cpuCtrl;
+	int rxq, txp, txq, num, err = 0;
+	int pkt_size = RX_PKT_SIZE(mtu);
+
+	if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+		printk(KERN_ERR "%s: port %d, wrong state: STARTED_BIT = 1\n", __func__, pp->port);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (mvNetaMaxRxSizeSet(pp->port, RX_PKT_SIZE(mtu))) {
+		printk(KERN_ERR "%s: can't set maxRxSize=%d for port=%d, mtu=%d\n",
+		       __func__, RX_PKT_SIZE(mtu), pp->port, mtu);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (mv_eth_ctrl_is_tx_enabled(pp)) {
+		int cpu;
+		for_each_possible_cpu(cpu) {
+			if (!(MV_BIT_CHECK(pp->cpu_mask, cpu)))
+				continue;
+
+			cpuCtrl = pp->cpu_config[cpu];
+
+			if (!(MV_BIT_CHECK(cpuCtrl->cpuTxqMask, cpuCtrl->txq))) {
+				printk(KERN_ERR "%s: error , port #%d txq #%d is masked for cpu #%d (mask= 0X%x).\n",
+					__func__, pp->port, cpuCtrl->txq, cpu, cpuCtrl->cpuTxqMask);
+				err = -EINVAL;
+				goto out;
+			}
+
+			if (mv_eth_ctrl_txq_cpu_own(pp->port, pp->txp, cpuCtrl->txq, 1, cpu) < 0) {
+				err = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	/* Allocate buffers for Long buffers pool */
+	if (pp->pool_long == NULL) {
+		struct bm_pool *new_pool;
+
+		new_pool = mv_eth_long_pool_get(pp, pkt_size);
+		if (new_pool == NULL) {
+			printk(KERN_ERR "%s FAILED: port=%d, Can't find pool for pkt_size=%d\n",
+			       __func__, pp->port, pkt_size);
+			err = -ENOMEM;
+			goto out;
+		}
+		if (new_pool->pkt_size == 0) {
+			new_pool->pkt_size = pkt_size;
+#ifdef CONFIG_MV_ETH_BM_CPU
+			if (MV_NETA_BM_CAP())
+				mvNetaBmPoolBufferSizeSet(new_pool->pool, RX_BUF_SIZE(pkt_size));
+#endif /* CONFIG_MV_ETH_BM_CPU */
+		}
+		if (new_pool->pkt_size < pkt_size) {
+			printk(KERN_ERR "%s FAILED: port=%d, long pool #%d, pkt_size=%d less than required %d\n",
+					__func__, pp->port, new_pool->pool, new_pool->pkt_size, pkt_size);
+			err = -ENOMEM;
+			goto out;
+		}
+		pp->pool_long = new_pool;
+		pp->pool_long->port_map |= (1 << pp->port);
+
+		num = mv_eth_pool_add(pp, pp->pool_long->pool, pp->pool_long_num);
+		if (num != pp->pool_long_num) {
+			printk(KERN_ERR "%s FAILED: mtu=%d, pool=%d, pkt_size=%d, only %d of %d allocated\n",
+			       __func__, mtu, pp->pool_long->pool, pp->pool_long->pkt_size, num, pp->pool_long_num);
+			err = -ENOMEM;
+			goto out;
+		}
+	}
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		mvNetaBmPoolBufSizeSet(pp->port, pp->pool_long->pool, RX_BUF_SIZE(pp->pool_long->pkt_size));
+
+		if (pp->pool_short == NULL) {
+			int short_pool = mv_eth_bm_config_short_pool_get(pp->port);
+
+			/* Allocate packets for short pool */
+			if (short_pool < 0) {
+				err = -EINVAL;
+				goto out;
+			}
+			pp->pool_short = &mv_eth_pool[short_pool];
+			pp->pool_short->port_map |= (1 << pp->port);
+			if (pp->pool_short->pool != pp->pool_long->pool) {
+				num = mv_eth_pool_add(pp, pp->pool_short->pool, pp->pool_short_num);
+				if (num != pp->pool_short_num) {
+					pr_err("%s FAILED: pool=%d, pkt_size=%d - %d of %d buffers added\n",
+						 __func__, short_pool, pp->pool_short->pkt_size, num,
+						 pp->pool_short_num);
+					err = -ENOMEM;
+					goto out;
+				}
+				mvNetaBmPoolBufSizeSet(pp->port, pp->pool_short->pool,
+							 RX_BUF_SIZE(pp->pool_short->pkt_size));
+			} else {
+				int dummy_short_pool = (pp->pool_short->pool + 1) % MV_BM_POOLS;
+
+			/* To disable short pool we choose unused pool and set pkt size to 0 (buffer size=pkt offset) */
+				mvNetaBmPoolBufSizeSet(pp->port, dummy_short_pool, NET_SKB_PAD);
+			}
+		}
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		if (pp->rxq_ctrl[rxq].q == NULL) {
+			pp->rxq_ctrl[rxq].q = mvNetaRxqInit(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+			if (!pp->rxq_ctrl[rxq].q) {
+				printk(KERN_ERR "%s: can't create RxQ port=%d, rxq=%d, desc=%d\n",
+				       __func__, pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+				err = -ENODEV;
+				goto out;
+			}
+		}
+
+		/* Set Offset */
+		mvNetaRxqOffsetSet(pp->port, rxq, NET_SKB_PAD);
+
+		/* Set coalescing pkts and time */
+		mv_eth_rx_pkts_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_pkts_coal);
+		mv_eth_rx_time_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_time_coal);
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP()) {
+			/* Enable / Disable - BM support */
+			if (pp->pool_short->pool == pp->pool_long->pool) {
+				int dummy_short_pool = (pp->pool_short->pool + 1) % MV_BM_POOLS;
+
+			/* To disable short pool we choose unused pool and set pkt size to 0 (buffer size=pkt offset) */
+				mvNetaRxqBmEnable(pp->port, rxq, dummy_short_pool, pp->pool_long->pool);
+			} else
+				mvNetaRxqBmEnable(pp->port, rxq, pp->pool_short->pool, pp->pool_long->pool);
+		} else {
+			/* Fill RXQ with buffers from RX pool */
+			mvNetaRxqBufSizeSet(pp->port, rxq, RX_BUF_SIZE(pkt_size));
+			mvNetaRxqBmDisable(pp->port, rxq);
+		}
+#else
+		/* Fill RXQ with buffers from RX pool */
+		mvNetaRxqBufSizeSet(pp->port, rxq, RX_BUF_SIZE(pkt_size));
+		mvNetaRxqBmDisable(pp->port, rxq);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+		if (!(pp->flags & MV_ETH_F_IFCAP_NETMAP)) {
+			if ((mvNetaRxqFreeDescNumGet(pp->port, rxq) == 0))
+				mv_eth_rxq_fill(pp, rxq, pp->rxq_ctrl[rxq].rxq_size);
+		} else {
+			mvNetaRxqNonOccupDescAdd(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+#ifdef CONFIG_NETMAP
+			if (neta_netmap_rxq_init_buffers(pp, rxq))
+				return MV_ERROR;
+#endif
+		}
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+			struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+
+			if ((txq_ctrl->q == NULL) && (txq_ctrl->txq_size > 0)) {
+				err = mv_eth_txq_create(pp, txq_ctrl);
+				if (err)
+					goto out;
+				spin_lock_init(&txq_ctrl->queue_lock);
+			}
+			mv_eth_tx_done_pkts_coal_set(pp->port, txp, txq,
+					pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq].txq_done_pkts_coal);
+#ifdef CONFIG_NETMAP
+			if (neta_netmap_txq_init_buffers(pp, txp, txq))
+				return MV_ERROR;
+#endif /* CONFIG_NETMAP */
+
+		}
+		mvNetaTxpMaxTxSizeSet(pp->port, txp, RX_PKT_SIZE(mtu));
+	}
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP()) {
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP())
+			mvNetaHwfBmPoolsSet(pp->port, pp->pool_short->pool, pp->pool_long->pool);
+		else
+			mv_eth_hwf_bm_create(pp->port, RX_PKT_SIZE(mtu));
+#else
+		mv_eth_hwf_bm_create(pp->port, RX_PKT_SIZE(mtu));
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+		mvNetaHwfEnable(pp->port, 1);
+	}
+#endif /* CONFIG_MV_ETH_HWF */
+
+	/* start the hal - rx/tx activity */
+	status = mvNetaPortEnable(pp->port);
+	if (status == MV_OK)
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+#ifdef CONFIG_MV_PON
+	else if (MV_PON_PORT(pp->port) && (mv_pon_link_status() == MV_TRUE)) {
+		mvNetaPortUp(pp->port);
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+	}
+#endif /* CONFIG_MV_PON */
+
+	set_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+
+ out:
+	return err;
+}
+
+
+
+int mv_eth_resume_internals(struct eth_port *pp, int mtu)
+{
+
+	unsigned int status;
+
+	mvNetaMaxRxSizeSet(pp->port, RX_PKT_SIZE(mtu));
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP()) {
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP()) {
+			if (pp->pool_long && pp->pool_short)
+				mvNetaHwfBmPoolsSet(pp->port, pp->pool_short->pool, pp->pool_long->pool);
+		} else {
+			/* TODO - update func if we want to support HWF */
+			mv_eth_hwf_bm_create(pp->port, RX_PKT_SIZE(mtu));
+		}
+#else
+		/* TODO - update func if we want to support HWF */
+		mv_eth_hwf_bm_create(pp->port, RX_PKT_SIZE(mtu));
+#endif /* CONFIG_MV_ETH_BM_CPU */
+		mvNetaHwfEnable(pp->port, 1);
+	}
+
+#endif /* CONFIG_MV_ETH_HWF */
+
+	/* start the hal - rx/tx activity */
+	status = mvNetaPortEnable(pp->port);
+	if (status == MV_OK)
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+
+#ifdef CONFIG_MV_PON
+	else if (MV_PON_PORT(pp->port) && (mv_pon_link_status() == MV_TRUE)) {
+		mvNetaPortUp(pp->port);
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+	}
+#endif /* CONFIG_MV_PON */
+
+	return MV_OK;
+
+}
+
+
+/***********************************************************
+ * mv_eth_suspend_internals --                             *
+ *   stop port rx/tx activity. free skb's from rx/tx rings.*
+ ***********************************************************/
+int mv_eth_suspend_internals(struct eth_port *pp)
+{
+	int cpu;
+
+	/* stop the port activity*/
+	if (mvNetaPortDisable(pp->port) != MV_OK) {
+		printk(KERN_ERR "%s: GbE port %d: mvNetaPortDisable failed\n", __func__, pp->port);
+		return MV_ERROR;
+	}
+
+	/* mask all interrupts */
+	on_each_cpu(mv_eth_interrupts_mask, pp, 1);
+
+	for_each_possible_cpu(cpu) {
+		pp->cpu_config[cpu]->causeRxTx = 0;
+		if (pp->cpu_config[cpu]->napi)
+			/* TODO: check napi status, MV_ETH_F_STARTED_OLD_BIT is not exactly the bit we should look at */
+			if (test_bit(MV_ETH_F_STARTED_OLD_BIT, &(pp->flags)))
+				napi_synchronize(pp->cpu_config[cpu]->napi);
+	}
+
+	mdelay(10);
+
+	return MV_OK;
+}
+
+
+/***********************************************************
+ * mv_eth_stop_internals --                                *
+ *   stop port rx/tx activity. free skb's from rx/tx rings.*
+ ***********************************************************/
+int mv_eth_stop_internals(struct eth_port *pp)
+{
+
+	int txp, queue;
+	struct cpu_ctrl	*cpuCtrl;
+
+	if (!test_and_clear_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+		printk(KERN_ERR "%s: port %d, wrong state: STARTED_BIT = 0.\n", __func__, pp->port);
+		goto error;
+	}
+
+	/* stop the port activity, mask all interrupts */
+	if (mvNetaPortDisable(pp->port) != MV_OK) {
+		printk(KERN_ERR "GbE port %d: ethPortDisable failed\n", pp->port);
+		goto error;
+	}
+
+	on_each_cpu(mv_eth_interrupts_mask, pp, 1);
+
+	mdelay(10);
+
+#ifdef CONFIG_MV_ETH_HWF
+	if (MV_NETA_HWF_CAP())
+		mvNetaHwfEnable(pp->port, 0);
+#endif /* CONFIG_MV_ETH_HWF */
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		for (queue = 0; queue < CONFIG_MV_ETH_TXQ; queue++)
+			mv_eth_txq_clean(pp->port, txp, queue);
+
+	if (mv_eth_ctrl_is_tx_enabled(pp)) {
+		int cpu;
+		for_each_possible_cpu(cpu) {
+			cpuCtrl = pp->cpu_config[cpu];
+			if (MV_BIT_CHECK(pp->cpu_mask, cpu))
+				if (MV_BIT_CHECK(cpuCtrl->cpuTxqMask, cpuCtrl->txq))
+					mv_eth_ctrl_txq_cpu_own(pp->port, pp->txp, cpuCtrl->txq, 0, cpu);
+		}
+	}
+
+	/* free the skb's in the hal rx ring */
+	for (queue = 0; queue < CONFIG_MV_ETH_RXQ; queue++)
+		mv_eth_rxq_drop_pkts(pp, queue);
+
+	return 0;
+
+error:
+	printk(KERN_ERR "GbE port %d: stop internals failed\n", pp->port);
+	return -1;
+}
+
+/* return positive if MTU is valid */
+int mv_eth_check_mtu_valid(struct net_device *dev, int mtu)
+{
+	if (mtu < 68) {
+		printk(KERN_INFO "MTU must be at least 68, change mtu failed\n");
+		return -EINVAL;
+	}
+	if (mtu > 9676 /* 9700 - 20 and rounding to 8 */) {
+		printk(KERN_ERR "%s: Illegal MTU value %d, ", dev->name, mtu);
+		mtu = 9676;
+		printk(KERN_CONT " rounding MTU to: %d \n", mtu);
+	}
+
+	if (MV_IS_NOT_ALIGN(RX_PKT_SIZE(mtu), 8)) {
+		printk(KERN_ERR "%s: Illegal MTU value %d, ", dev->name, mtu);
+		mtu = MV_ALIGN_UP(RX_PKT_SIZE(mtu), 8);
+		printk(KERN_CONT " rounding MTU to: %d \n", mtu);
+	}
+	return mtu;
+}
+
+/* Check if MTU can be changed */
+int mv_eth_check_mtu_internals(struct net_device *dev, int mtu)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	struct bm_pool	*new_pool = NULL;
+
+	new_pool = mv_eth_long_pool_get(pp, RX_PKT_SIZE(mtu));
+
+	/* If no pool for new MTU or shared pool is used - MTU can be changed only when interface is stopped */
+	if (new_pool == NULL) {
+		printk(KERN_ERR "%s: No BM pool available for MTU=%d\n", __func__, mtu);
+		return -EPERM;
+	}
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP() && new_pool->pkt_size < RX_PKT_SIZE(mtu)) {
+		if (mv_eth_bm_config_pkt_size_get(new_pool->pool) != 0) {
+			printk(KERN_ERR "%s: BM pool #%d - pkt_size = %d less than required for MTU=%d and can't be changed\n",
+						__func__, new_pool->pool, new_pool->pkt_size, mtu);
+			return -EPERM;
+		}
+		/* Pool packet size can be changed for new MTU, but pool is shared */
+		if ((new_pool == pp->pool_long) && (pp->pool_long->port_map != (1 << pp->port))) {
+			/* Shared pool */
+			printk(KERN_ERR "%s: bmPool=%d is shared port_map=0x%x. Stop all ports uses this pool before change MTU\n",
+						__func__, pp->pool_long->pool, pp->pool_long->port_map);
+			return -EPERM;
+		}
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+	return 0;
+}
+
+/***********************************************************
+ * mv_eth_change_mtu_internals --                          *
+ *   stop port activity. release skb from rings. set new   *
+ *   mtu in device and hw. restart port activity and       *
+ *   and fill rx-buiffers with size according to new mtu.  *
+ ***********************************************************/
+int mv_eth_change_mtu_internals(struct net_device *dev, int mtu)
+{
+	struct bm_pool	*new_pool = NULL;
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	int             config_pkt_size;
+
+	if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+		if (pp->flags & MV_ETH_F_DBG_RX)
+			printk(KERN_ERR "%s: port %d, STARTED_BIT = 0, Invalid value.\n", __func__, pp->port);
+		return MV_ERROR;
+	}
+
+	if ((mtu != dev->mtu) && (pp->pool_long)) {
+		/* If long pool assigned and MTU really changed and can't use old pool - free buffers */
+
+		config_pkt_size = 0;
+#ifdef CONFIG_MV_ETH_BM_CPU
+		if (MV_NETA_BM_CAP()) {
+			new_pool = mv_eth_long_pool_get(pp, RX_PKT_SIZE(mtu));
+			if (new_pool != NULL)
+				config_pkt_size = mv_eth_bm_config_pkt_size_get(new_pool->pool);
+		} else {
+			/* If BM is not used always free buffers */
+			new_pool = NULL;
+		}
+#else
+		/* If BM is not used always free buffers */
+		new_pool = NULL;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+		/* Free all buffers from long pool */
+		if ((new_pool == NULL) || (new_pool->pkt_size < RX_PKT_SIZE(mtu)) || (pp->pool_long != new_pool) ||
+			((new_pool->pkt_size > RX_PKT_SIZE(mtu)) && (config_pkt_size == 0))) {
+			mv_eth_rx_reset(pp->port);
+			mv_eth_pool_free(pp->pool_long->pool, pp->pool_long_num);
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+			if (MV_NETA_BM_CAP()) {
+				/* redefine pool pkt_size */
+				if (pp->pool_long->buf_num == 0) {
+					pp->pool_long->pkt_size = config_pkt_size;
+
+					if (pp->pool_long->pkt_size == 0)
+						mvNetaBmPoolBufferSizeSet(pp->pool_long->pool, 0);
+					else
+						mvNetaBmPoolBufferSizeSet(pp->pool_long->pool,
+									  RX_BUF_SIZE(pp->pool_long->pkt_size));
+				}
+			} else {
+				pp->pool_long->pkt_size = config_pkt_size;
+			}
+#else
+			pp->pool_long->pkt_size = config_pkt_size;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+			pp->pool_long->port_map &= ~(1 << pp->port);
+			pp->pool_long = NULL;
+		}
+
+		/* DIMA debug; Free all buffers from short pool */
+/*
+		if (pp->pool_short) {
+			mv_eth_pool_free(pp->pool_short->pool, pp->pool_short_num);
+			pp->pool_short = NULL;
+		}
+*/
+	}
+	dev->mtu = mtu;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+	netdev_update_features(dev);
+#else
+	netdev_features_change(dev);
+#endif
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+/***********************************************************
+ * mv_eth_tx_done_hr_timer_callback --			   *
+ *   callback for tx_done hrtimer                          *
+ ***********************************************************/
+enum hrtimer_restart mv_eth_tx_done_hr_timer_callback(struct hrtimer *timer)
+{
+	struct cpu_ctrl *cpuCtrl = container_of(timer, struct cpu_ctrl, tx_done_timer);
+
+	tasklet_schedule(&cpuCtrl->tx_done_tasklet);
+
+	return HRTIMER_NORESTART;
+}
+#endif
+
+#ifndef CONFIG_MV_NETA_TXDONE_ISR
+/***********************************************************
+ * mv_eth_tx_done_timer_callback --			   *
+ *   N msec periodic callback for tx_done                  *
+ ***********************************************************/
+static void mv_eth_tx_done_timer_callback(unsigned long data)
+{
+	struct cpu_ctrl *cpuCtrl = (struct cpu_ctrl *)data;
+	struct eth_port *pp = cpuCtrl->pp;
+	int tx_done = 0, tx_todo = 0;
+	unsigned int txq_mask;
+
+	STAT_INFO(pp->stats.tx_done_timer_event[smp_processor_id()]++);
+
+	clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		if (pp->flags & MV_ETH_F_DBG_TX)
+			printk(KERN_ERR "%s: port #%d is stopped, STARTED_BIT = 0, exit timer.\n", __func__, pp->port);
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+
+		return;
+	}
+
+	if (MV_PON_PORT(pp->port))
+		tx_done = mv_eth_tx_done_pon(pp, &tx_todo);
+	else {
+		/* check all possible queues, as there is no indication from interrupt */
+		txq_mask = ((1 << CONFIG_MV_ETH_TXQ) - 1) & cpuCtrl->cpuTxqOwner;
+		tx_done = mv_eth_tx_done_gbe(pp, txq_mask, &tx_todo);
+	}
+
+	if (cpuCtrl->cpu != smp_processor_id()) {
+		pr_warning("%s: Called on other CPU - %d != %d\n", __func__, cpuCtrl->cpu, smp_processor_id());
+		cpuCtrl = pp->cpu_config[smp_processor_id()];
+	}
+	if (tx_todo > 0)
+		mv_eth_add_tx_done_timer(cpuCtrl);
+}
+#endif /* !CONFIG_MV_NETA_TXDONE_ISR */
+
+/***********************************************************
+ * mv_eth_cleanup_timer_callback --			   *
+ *   N msec periodic callback for error cleanup            *
+ ***********************************************************/
+static void mv_eth_cleanup_timer_callback(unsigned long data)
+{
+	struct cpu_ctrl *cpuCtrl = (struct cpu_ctrl *)data;
+	struct eth_port *pp = cpuCtrl->pp;
+	struct net_device *dev = pp->dev;
+
+	STAT_INFO(pp->stats.cleanup_timer++);
+
+	clear_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags));
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags)))
+		return;
+
+	if (cpuCtrl->cpu != smp_processor_id()) {
+		pr_warn("%s: Called on other CPU - %d != %d\n", __func__, cpuCtrl->cpu, smp_processor_id());
+		cpuCtrl = pp->cpu_config[smp_processor_id()];
+	}
+
+	/* FIXME: check bm_pool->missed and pp->rxq_ctrl[rxq].missed counters and allocate */
+	/* re-add timer if necessary (check bm_pool->missed and pp->rxq_ctrl[rxq].missed   */
+}
+
+void mv_eth_mac_show(int port)
+{
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en) {
+			mvOsPrintf("PnC MAC Rules - port #%d:\n", port);
+			pnc_mac_show();
+		} else
+			mvOsPrintf("%s: PNC control is disabled\n", __func__);
+	} else {/* Legacy parser */
+		mvEthPortUcastShow(port);
+		mvEthPortMcastShow(port);
+	}
+#else /* Legacy parser */
+	mvEthPortUcastShow(port);
+	mvEthPortMcastShow(port);
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+void mv_eth_vlan_prio_show(int port)
+{
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en) {
+			mvOsPrintf("PnC VLAN Priority Rules - port #%d:\n", port);
+			pnc_vlan_prio_show(port);
+		} else
+			mvOsPrintf("%s: PNC control is disabled\n", __func__);
+	} else { /* Legacy parser */
+		int prio, rxq;
+
+		mvOsPrintf("Legacy VLAN Priority Rules - port #%d:\n", port);
+		for (prio = 0; prio <= 0x7; prio++) {
+
+			rxq = mvNetaVprioToRxqGet(port, prio);
+			if (rxq > 0)
+				pr_info("prio=%d: rxq=%d\n", prio, rxq);
+		}
+	}
+#else /* Legacy parser */
+	{
+		int prio, rxq;
+
+		mvOsPrintf("Legacy VLAN Priority Rules - port #%d:\n", port);
+		for (prio = 0; prio <= 0x7 ; prio++) {
+
+			rxq = mvNetaVprioToRxqGet(port, prio);
+			if (rxq > 0)
+				printk(KERN_INFO "prio=%d: rxq=%d\n", prio, rxq);
+		}
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+void mv_eth_tos_map_show(int port)
+{
+	int tos, txq, cpu;
+	struct cpu_ctrl *cpuCtrl;
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en) {
+			mvOsPrintf("PnC    TOS (DSCP) => RXQ Mapping Rules - port #%d:\n", port);
+			pnc_ipv4_dscp_show(port);
+		} else
+			mvOsPrintf("%s: PNC control is disabled\n", __func__);
+	} else {
+		mvOsPrintf("Legacy TOS (DSCP) => RXQ Mapping Rules - port #%d:\n", port);
+		for (tos = 0; tos < 0xFF; tos += 0x4) {
+			int rxq;
+
+			rxq = mvNetaTosToRxqGet(port, tos);
+			if (rxq > 0)
+				pr_err("      0x%02x (0x%02x) => %d\n",
+					tos, tos >> 2, rxq);
+		}
+	}
+#else
+	mvOsPrintf("Legacy TOS (DSCP) => RXQ Mapping Rules - port #%d:\n", port);
+	for (tos = 0; tos < 0xFF; tos += 0x4) {
+		int rxq;
+
+		rxq = mvNetaTosToRxqGet(port, tos);
+		if (rxq > 0)
+			printk(KERN_ERR "      0x%02x (0x%02x) => %d\n",
+					tos, tos >> 2, rxq);
+	}
+#endif /* CONFIG_MV_ETH_PNC */
+	for_each_possible_cpu(cpu) {
+		printk(KERN_ERR "\n");
+		printk(KERN_ERR " TOS => TXQ map for port #%d cpu #%d\n", port, cpu);
+		cpuCtrl = pp->cpu_config[cpu];
+		for (tos = 0; tos < sizeof(cpuCtrl->txq_tos_map); tos++) {
+			txq = cpuCtrl->txq_tos_map[tos];
+			if (txq != MV_ETH_TXQ_INVALID)
+				printk(KERN_ERR "0x%02x => %d\n", tos, txq);
+		}
+	}
+}
+
+int mv_eth_rxq_tos_map_set(int port, int rxq, unsigned char tos)
+{
+	int status = -1;
+	struct eth_port *pp;
+
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return 1;
+	}
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en)
+			status = pnc_ip4_dscp(port, tos, 0xFF, rxq);
+		else
+			mvOsPrintf("%s: PNC control is disabled\n", __func__);
+	} else {/* Legacy parser */
+		status = mvNetaTosToRxqSet(port, tos, rxq);
+	}
+#else /* Legacy parser */
+	status = mvNetaTosToRxqSet(port, tos, rxq);
+#endif /* CONFIG_MV_ETH_PNC */
+
+	if (status == 0)
+		printk(KERN_ERR "Succeeded\n");
+	else if (status == -1)
+		printk(KERN_ERR "Not supported\n");
+	else
+		printk(KERN_ERR "Failed\n");
+
+	return status;
+}
+
+int mv_eth_rxq_vlan_prio_set(int port, int rxq, unsigned char prio)
+{
+	int status = -1;
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP()) {
+		if (mv_eth_pnc_ctrl_en)
+			status = pnc_vlan_prio_set(port, prio, rxq);
+		else
+			mvOsPrintf("%s: PNC control is disabled\n", __func__);
+	} else {/* Legacy parser */
+		status = mvNetaVprioToRxqSet(port, prio, rxq);
+	}
+#else /* Legacy parser */
+	status = mvNetaVprioToRxqSet(port, prio, rxq);
+#endif /* CONFIG_MV_ETH_PNC */
+
+	if (status == 0)
+		printk(KERN_ERR "Succeeded\n");
+	else if (status == -1)
+		printk(KERN_ERR "Not supported\n");
+	else
+		printk(KERN_ERR "Failed\n");
+
+	return status;
+}
+
+/* Set TXQ for special TOS value. txq=-1 - use default TXQ for this port */
+int mv_eth_txq_tos_map_set(int port, int txq, int cpu, unsigned int tos)
+{
+	MV_U8 old_txq;
+	struct cpu_ctrl	*cpuCtrl;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	if (mvNetaPortCheck(port))
+		return -EINVAL;
+
+	if ((pp == NULL) || (pp->txq_ctrl == NULL)) {
+		pr_err("Port %d does not exist\n", port);
+		return -ENODEV;
+	}
+	if ((cpu >= nr_cpu_ids) || (cpu < 0)) {
+		printk(KERN_ERR "cpu #%d is out of range: from 0 to %d\n",
+			cpu, nr_cpu_ids - 1);
+		return -EINVAL;
+	}
+
+	if (!(MV_BIT_CHECK(pp->cpu_mask, cpu))) {
+		printk(KERN_ERR "%s: Error, cpu #%d is masked\n", __func__, cpu);
+		return -EINVAL;
+	}
+	if ((tos > 0xFF) || (tos < 0)) {
+		printk(KERN_ERR "TOS 0x%x is out of range: from 0 to 0xFF\n", tos);
+		return -EINVAL;
+	}
+	cpuCtrl = pp->cpu_config[cpu];
+	old_txq = cpuCtrl->txq_tos_map[tos];
+
+	/* The same txq - do nothing */
+	if (old_txq == (MV_U8) txq)
+		return MV_OK;
+
+	if (txq == -1) {
+		/* delete tos to txq mapping - free TXQ */
+		if (mv_eth_ctrl_txq_cpu_own(port, pp->txp, old_txq, 0, cpu))
+			return -EINVAL;
+
+		cpuCtrl->txq_tos_map[tos] = MV_ETH_TXQ_INVALID;
+		printk(KERN_ERR "Successfully deleted\n");
+		return MV_OK;
+	}
+
+	if (mvNetaMaxCheck(txq, CONFIG_MV_ETH_TXQ, "txq"))
+		return -EINVAL;
+
+	/* Check that new txq can be allocated for cpu */
+	if (!(MV_BIT_CHECK(cpuCtrl->cpuTxqMask, txq))) {
+		printk(KERN_ERR "%s: Error, Txq #%d masked for cpu #%d\n", __func__, txq, cpu);
+		return -EINVAL;
+	}
+
+	if (mv_eth_ctrl_txq_cpu_own(port, pp->txp, txq, 1, cpu))
+		return -EINVAL;
+
+	cpuCtrl->txq_tos_map[tos] = (MV_U8) txq;
+	printk(KERN_ERR "Successfully added\n");
+	return MV_OK;
+}
+
+static int mv_eth_priv_init(struct eth_port *pp, int port)
+{
+	int cpu, i;
+	struct cpu_ctrl	*cpuCtrl;
+	u8	*ext_buf;
+
+	/* Default field per cpu initialization */
+	for (i = 0; i < nr_cpu_ids; i++) {
+		pp->cpu_config[i] = kmalloc(sizeof(struct cpu_ctrl), GFP_KERNEL);
+		memset(pp->cpu_config[i], 0, sizeof(struct cpu_ctrl));
+	}
+
+	pp->port = port;
+	pp->txp_num = 1;
+	pp->txp = 0;
+	pp->pm_mode = 0;
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+		cpuCtrl->txq = CONFIG_MV_ETH_TXQ_DEF;
+		cpuCtrl->cpuTxqOwner = (1 << CONFIG_MV_ETH_TXQ_DEF);
+		cpuCtrl->cpuTxqMask = 0xFF;
+		mvNetaTxqCpuMaskSet(port, 0xFF , cpu);
+		cpuCtrl->pp = pp;
+		cpuCtrl->cpu = cpu;
+	}
+
+	pp->flags = 0;
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP()) {
+		pp->pool_long_num = mv_eth_bm_config_long_buf_num_get(port);
+		if (pp->pool_long_num > MV_BM_POOL_CAP_MAX)
+			pp->pool_long_num = MV_BM_POOL_CAP_MAX;
+
+		pp->pool_short_num = mv_eth_bm_config_short_buf_num_get(port);
+		if (pp->pool_short_num > MV_BM_POOL_CAP_MAX)
+			pp->pool_short_num = MV_BM_POOL_CAP_MAX;
+	} else {
+		pp->pool_long_num = CONFIG_MV_ETH_RXQ * CONFIG_MV_ETH_RXQ_DESC * 2;
+	}
+#else
+	pp->pool_long_num = CONFIG_MV_ETH_RXQ * CONFIG_MV_ETH_RXQ_DESC * 2;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+		for (i = 0; i < 256; i++) {
+			cpuCtrl->txq_tos_map[i] = MV_ETH_TXQ_INVALID;
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+		pp->tx_special_check = NULL;
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+		}
+	}
+
+	mv_eth_port_config_parse(pp);
+
+#ifdef CONFIG_MV_PON
+	if (MV_PON_PORT(port)) {
+		set_bit(MV_ETH_F_MH_BIT, &(pp->flags));
+		pp->txp_num = MV_ETH_MAX_TCONT();
+		pp->txp = CONFIG_MV_PON_TXP_DEF;
+		for_each_possible_cpu(i)
+			pp->cpu_config[i]->txq = CONFIG_MV_PON_TXQ_DEF;
+	}
+#endif /* CONFIG_MV_PON */
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+#if defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+		memset(&cpuCtrl->tx_done_timer, 0, sizeof(struct hrtimer));
+		hrtimer_init(&cpuCtrl->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+		cpuCtrl->tx_done_timer.function = mv_eth_tx_done_hr_timer_callback;
+		tasklet_init(&cpuCtrl->tx_done_tasklet, mv_eth_tx_done_timer_callback,
+			     (unsigned long) cpuCtrl);
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+		memset(&cpuCtrl->tx_done_timer, 0, sizeof(struct timer_list));
+		cpuCtrl->tx_done_timer.function = mv_eth_tx_done_timer_callback;
+		cpuCtrl->tx_done_timer.data = (unsigned long)cpuCtrl;
+		init_timer(&cpuCtrl->tx_done_timer);
+#endif
+		memset(&cpuCtrl->cleanup_timer, 0, sizeof(struct timer_list));
+		cpuCtrl->cleanup_timer.function = mv_eth_cleanup_timer_callback;
+		cpuCtrl->cleanup_timer.data = (unsigned long)cpuCtrl;
+		init_timer(&cpuCtrl->cleanup_timer);
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+		clear_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags));
+	}
+
+	pp->weight = CONFIG_MV_ETH_RX_POLL_WEIGHT;
+
+	/* Init pool of external buffers for TSO, fragmentation, etc */
+	spin_lock_init(&pp->extLock);
+	pp->extBufSize = CONFIG_MV_ETH_EXTRA_BUF_SIZE;
+	pp->extArrStack = mvStackCreate(CONFIG_MV_ETH_EXTRA_BUF_NUM);
+	if (pp->extArrStack == NULL) {
+		printk(KERN_ERR "Error: failed create  extArrStack for port #%d\n", port);
+		return -ENOMEM;
+	}
+	for (i = 0; i < CONFIG_MV_ETH_EXTRA_BUF_NUM; i++) {
+		ext_buf = mvOsMalloc(CONFIG_MV_ETH_EXTRA_BUF_SIZE);
+		if (ext_buf == NULL) {
+			printk(KERN_WARNING "%s Warning: %d of %d extra buffers allocated\n",
+				__func__, i, CONFIG_MV_ETH_EXTRA_BUF_NUM);
+			break;
+		}
+		mvStackPush(pp->extArrStack, (MV_U32)ext_buf);
+	}
+
+#ifdef CONFIG_MV_ETH_STAT_DIST
+	pp->dist_stats.rx_dist = mvOsMalloc(sizeof(u32) * (CONFIG_MV_ETH_RXQ * CONFIG_MV_ETH_RXQ_DESC + 1));
+	if (pp->dist_stats.rx_dist != NULL) {
+		pp->dist_stats.rx_dist_size = CONFIG_MV_ETH_RXQ * CONFIG_MV_ETH_RXQ_DESC + 1;
+		memset(pp->dist_stats.rx_dist, 0, sizeof(u32) * pp->dist_stats.rx_dist_size);
+	} else
+		printk(KERN_ERR "ethPort #%d: Can't allocate %d bytes for rx_dist\n",
+		       pp->port, sizeof(u32) * (CONFIG_MV_ETH_RXQ * CONFIG_MV_ETH_RXQ_DESC + 1));
+
+	pp->dist_stats.tx_done_dist =
+	    mvOsMalloc(sizeof(u32) * (pp->txp_num * CONFIG_MV_ETH_TXQ * CONFIG_MV_ETH_TXQ_DESC + 1));
+	if (pp->dist_stats.tx_done_dist != NULL) {
+		pp->dist_stats.tx_done_dist_size = pp->txp_num * CONFIG_MV_ETH_TXQ * CONFIG_MV_ETH_TXQ_DESC + 1;
+		memset(pp->dist_stats.tx_done_dist, 0, sizeof(u32) * pp->dist_stats.tx_done_dist_size);
+	} else
+		printk(KERN_ERR "ethPort #%d: Can't allocate %d bytes for tx_done_dist\n",
+		       pp->port, sizeof(u32) * (pp->txp_num * CONFIG_MV_ETH_TXQ * CONFIG_MV_ETH_TXQ_DESC + 1));
+#endif /* CONFIG_MV_ETH_STAT_DIST */
+
+	return 0;
+}
+
+/***********************************************************************************
+ ***  noqueue net device
+ ***********************************************************************************/
+extern struct Qdisc noop_qdisc;
+void mv_eth_set_noqueue(struct net_device *dev, int enable)
+{
+	struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+
+	if (dev->flags & IFF_UP) {
+		printk(KERN_ERR "%s: device or resource busy, take it down\n", dev->name);
+		return;
+	}
+	dev->tx_queue_len = enable ? 0 : CONFIG_MV_ETH_TXQ_DESC;
+
+	if (txq)
+		txq->qdisc_sleeping = &noop_qdisc;
+	else
+		printk(KERN_ERR "%s: txq #0 is NULL\n", dev->name);
+
+	printk(KERN_ERR "%s: device tx queue len is %d\n", dev->name, (int)dev->tx_queue_len);
+
+}
+
+/***********************************************************************************
+ ***  print RX bm_pool status
+ ***********************************************************************************/
+void mv_eth_pool_status_print(int pool)
+{
+	struct bm_pool *bm_pool = &mv_eth_pool[pool];
+
+	printk(KERN_ERR "\nRX Pool #%d: pkt_size=%d, BM-HW support - %s\n",
+	       pool, bm_pool->pkt_size, mv_eth_pool_bm(bm_pool) ? "Yes" : "No");
+
+	printk(KERN_ERR "bm_pool=%p, stack=%p, capacity=%d, buf_num=%d, port_map=0x%x missed=%d\n",
+	       bm_pool->bm_pool, bm_pool->stack, bm_pool->capacity, bm_pool->buf_num,
+		   bm_pool->port_map, bm_pool->missed);
+
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	printk(KERN_ERR "Errors: skb_alloc_oom=%u, stack_empty=%u, stack_full=%u\n",
+	       bm_pool->stats.skb_alloc_oom, bm_pool->stats.stack_empty, bm_pool->stats.stack_full);
+#endif /* #ifdef CONFIG_MV_ETH_STAT_ERR */
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	pr_info("     skb_alloc_ok=%u, bm_put=%u, stack_put=%u, stack_get=%u\n",
+	       bm_pool->stats.skb_alloc_ok, bm_pool->stats.bm_put, bm_pool->stats.stack_put, bm_pool->stats.stack_get);
+
+	pr_info("     skb_recycled_ok=%u, skb_recycled_err=%u, skb_hw_cookie_err=%u\n",
+	       bm_pool->stats.skb_recycled_ok, bm_pool->stats.skb_recycled_err, bm_pool->stats.skb_hw_cookie_err);
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+
+	if (bm_pool->stack)
+		mvStackStatus(bm_pool->stack, 0);
+
+	memset(&bm_pool->stats, 0, sizeof(bm_pool->stats));
+}
+
+
+/***********************************************************************************
+ ***  print ext pool status
+ ***********************************************************************************/
+void mv_eth_ext_pool_print(struct eth_port *pp)
+{
+	printk(KERN_ERR "\nExt Pool Stack: bufSize = %u bytes\n", pp->extBufSize);
+	mvStackStatus(pp->extArrStack, 0);
+}
+
+/***********************************************************************************
+ ***  print net device status
+ ***********************************************************************************/
+void mv_eth_netdev_print(struct net_device *dev)
+{
+	pr_info("%s net_device status:\n\n", dev->name);
+	pr_info("ifIdx=%d, mtu=%u, pkt_size=%d, buf_size=%d, MAC=" MV_MACQUAD_FMT "\n",
+	       dev->ifindex, dev->mtu, RX_PKT_SIZE(dev->mtu),
+		RX_BUF_SIZE(RX_PKT_SIZE(dev->mtu)), MV_MACQUAD(dev->dev_addr));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	pr_info("features=0x%x, hw_features=0x%x, wanted_features=0x%x, vlan_features=0x%x\n",
+			(unsigned int)(dev->features), (unsigned int)(dev->hw_features),
+			(unsigned int)(dev->wanted_features), (unsigned int)(dev->vlan_features));
+#else
+	pr_info("features=0x%x, vlan_features=0x%x\n",
+		 (unsigned int)(dev->features), (unsigned int)(dev->vlan_features));
+#endif
+
+	pr_info("flags=0x%x, gflags=0x%x, priv_flags=0x%x: running=%d, oper_up=%d\n",
+		(unsigned int)(dev->flags), (unsigned int)(dev->gflags), (unsigned int)(dev->priv_flags),
+		netif_running(dev), netif_oper_up(dev));
+	pr_info("uc_promisc=%d, promiscuity=%d, allmulti=%d\n", dev->uc_promisc, dev->promiscuity, dev->allmulti);
+
+	if (mv_eth_netdev_find(dev->ifindex)) {
+		struct eth_port *pp = MV_ETH_PRIV(dev);
+		if (pp->tagged)
+			mv_mux_netdev_print_all(pp->port);
+	} else {
+		/* Check if this is mux netdevice */
+		if (mv_mux_netdev_find(dev->ifindex) != -1)
+			mv_mux_netdev_print(dev);
+	}
+}
+
+void mv_eth_status_print(void)
+{
+	printk(KERN_ERR "totals: ports=%d\n", mv_eth_ports_num);
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+	printk(KERN_ERR "SKB recycle = %s\n", mv_ctrl_recycle ? "Enabled" : "Disabled");
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+#ifdef CONFIG_MV_ETH_PNC
+	if (MV_NETA_PNC_CAP())
+		pr_err("PnC control = %s\n", mv_eth_pnc_ctrl_en ? "Enabled" : "Disabled");
+#endif /* CONFIG_MV_ETH_PNC */
+}
+
+/***********************************************************************************
+ ***  print Ethernet port status
+ ***********************************************************************************/
+void mv_eth_port_status_print(unsigned int port)
+{
+	int txp, q;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	struct tx_queue *txq_ctrl;
+	struct cpu_ctrl	*cpuCtrl;
+
+	if (!pp)
+		return;
+
+	pr_info("\nport=%d, flags=0x%lx, rx_weight=%d, %s\n", port, pp->flags, pp->weight,
+			pp->tagged ? "tagged" : "untagged");
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX)
+		printk(KERN_ERR "%s: ", pp->dev->name);
+	else
+		printk(KERN_ERR "port %d: ", port);
+
+	mv_eth_link_status_print(port);
+
+#ifdef CONFIG_MV_ETH_NFP
+	printk(KERN_ERR "NFP = ");
+	if (pp->flags & MV_ETH_F_NFP_EN)
+		printk(KERN_CONT "Enabled\n");
+	else
+		printk(KERN_CONT "Disabled\n");
+#endif /* CONFIG_MV_ETH_NFP */
+	if (pp->pm_mode == 1)
+		printk(KERN_CONT "pm - wol\n");
+	else
+		printk(KERN_CONT "pm - suspend\n");
+
+	printk(KERN_ERR "rxq_coal(pkts)[ q]   = ");
+	for (q = 0; q < CONFIG_MV_ETH_RXQ; q++)
+		printk(KERN_CONT "%3d ", mvNetaRxqPktsCoalGet(port, q));
+
+	printk(KERN_CONT "\n");
+	printk(KERN_ERR "rxq_coal(usec)[ q]   = ");
+	for (q = 0; q < CONFIG_MV_ETH_RXQ; q++)
+		printk(KERN_CONT "%3d ", mvNetaRxqTimeCoalGet(port, q));
+
+	printk(KERN_CONT "\n");
+	printk(KERN_ERR "rxq_desc(num)[ q]    = ");
+	for (q = 0; q < CONFIG_MV_ETH_RXQ; q++)
+		printk(KERN_CONT "%3d ", pp->rxq_ctrl[q].rxq_size);
+
+	printk(KERN_CONT "\n");
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		printk(KERN_ERR "txq_coal(pkts)[%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_ETH_TXQ; q++)
+			printk(KERN_CONT "%3d ", mvNetaTxDonePktsCoalGet(port, txp, q));
+		printk(KERN_CONT "\n");
+
+		printk(KERN_ERR "txq_mod(F,C,H)[%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_ETH_TXQ; q++) {
+			int val, mode;
+
+			mode = mv_eth_ctrl_txq_mode_get(port, txp, q, &val);
+			if (mode == MV_ETH_TXQ_CPU)
+				printk(KERN_CONT " C%-d ", val);
+			else if (mode == MV_ETH_TXQ_HWF)
+				printk(KERN_CONT " H%-d ", val);
+			else
+				printk(KERN_CONT "  F ");
+		}
+		printk(KERN_CONT "\n");
+
+		printk(KERN_ERR "txq_desc(num) [%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_ETH_TXQ; q++) {
+			struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + q];
+			printk(KERN_CONT "%3d ", txq_ctrl->txq_size);
+		}
+		printk(KERN_CONT "\n");
+	}
+	printk(KERN_ERR "\n");
+
+#if defined(CONFIG_MV_NETA_TXDONE_ISR)
+	printk(KERN_ERR "Do tx_done in NAPI context triggered by ISR\n");
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		printk(KERN_ERR "txcoal(pkts)[%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_ETH_TXQ; q++)
+			printk(KERN_CONT "%3d ", mvNetaTxDonePktsCoalGet(port, txp, q));
+		printk(KERN_CONT "\n");
+	}
+	printk(KERN_ERR "\n");
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+	pr_err("Do tx_done in TX or high-resolution Timer's tasklet: tx_done_threshold=%d timer_interval=%d usec\n",
+		mv_ctrl_txdone, mv_eth_tx_done_hrtimer_period_get());
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+	pr_err("Do tx_done in TX or regular Timer context: tx_done_threshold=%d timer_interval=%d msec\n",
+		mv_ctrl_txdone, CONFIG_MV_NETA_TX_DONE_TIMER_PERIOD);
+#endif /* CONFIG_MV_NETA_TXDONE_ISR */
+
+	printk(KERN_ERR "txp=%d, zero_pad=%s, mh_en=%s (0x%04x), tx_cmd=0x%08x\n",
+	       pp->txp, (pp->flags & MV_ETH_F_NO_PAD) ? "Disabled" : "Enabled",
+	       (pp->flags & MV_ETH_F_MH) ? "Enabled" : "Disabled", pp->tx_mh, pp->hw_cmd);
+
+	printk(KERN_CONT "\n");
+#ifdef CONFIG_MV_NETA_TXDONE_ISR
+	pr_cont("CPU:  txq  causeRxTx   napi txqMask txqOwner flags\n");
+#else
+	pr_cont("CPU:  txq  causeRxTx   napi txqMask txqOwner flags  timer\n");
+#endif
+	{
+		int cpu;
+		for_each_possible_cpu(cpu) {
+			cpuCtrl = pp->cpu_config[cpu];
+			if (cpuCtrl != NULL)
+#if defined(CONFIG_MV_NETA_TXDONE_ISR)
+				pr_err("  %d:   %d   0x%08x   %d    0x%02x    0x%02x    0x%02x\n",
+					cpu, cpuCtrl->txq, cpuCtrl->causeRxTx,
+					test_bit(NAPI_STATE_SCHED, &cpuCtrl->napi->state),
+					cpuCtrl->cpuTxqMask, cpuCtrl->cpuTxqOwner,
+					(unsigned)cpuCtrl->flags);
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+				pr_err("  %d:   %d   0x%08x   %d    0x%02x    0x%02x    0x%02x    %d\n",
+					cpu, cpuCtrl->txq, cpuCtrl->causeRxTx,
+					test_bit(NAPI_STATE_SCHED, &cpuCtrl->napi->state),
+					cpuCtrl->cpuTxqMask, cpuCtrl->cpuTxqOwner,
+					(unsigned)cpuCtrl->flags, !(hrtimer_active(&cpuCtrl->tx_done_timer)));
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+				pr_err("  %d:   %d   0x%08x   %d    0x%02x    0x%02x    0x%02x    %d\n",
+					cpu, cpuCtrl->txq, cpuCtrl->causeRxTx,
+					test_bit(NAPI_STATE_SCHED, &cpuCtrl->napi->state),
+					cpuCtrl->cpuTxqMask, cpuCtrl->cpuTxqOwner,
+					(unsigned)cpuCtrl->flags, timer_pending(&cpuCtrl->tx_done_timer));
+#endif
+		}
+	}
+
+	printk(KERN_CONT "\n");
+	printk(KERN_CONT "TXQ: SharedFlag  nfpCounter   cpu_owner\n");
+
+	for (q = 0; q < CONFIG_MV_ETH_TXQ; q++) {
+		int cpu;
+
+		txq_ctrl = &pp->txq_ctrl[pp->txp * CONFIG_MV_ETH_TXQ + q];
+		if (txq_ctrl != NULL)
+			printk(KERN_CONT " %d:     %2lu        %d",
+				q, (txq_ctrl->flags & MV_ETH_F_TX_SHARED), txq_ctrl->nfpCounter);
+
+			printk(KERN_CONT "        [");
+			for_each_possible_cpu(cpu)
+				printk(KERN_CONT "%2d ", txq_ctrl->cpu_owner[cpu]);
+
+			printk(KERN_CONT "]\n");
+	}
+	printk(KERN_CONT "\n");
+
+	if (pp->dev)
+		mv_eth_netdev_print(pp->dev);
+
+	if (pp->tagged)
+		mv_mux_netdev_print_all(port);
+}
+
+/***********************************************************************************
+ ***  print port statistics
+ ***********************************************************************************/
+void mv_eth_port_stats_print(unsigned int port)
+{
+	struct eth_port *pp = mv_eth_port_by_id(port);
+	struct port_stats *stat = NULL;
+	struct tx_queue *txq_ctrl;
+	int txp, queue, cpu = smp_processor_id();
+	u32 total_rx_ok, total_rx_fill_ok;
+
+	pr_info("\n====================================================\n");
+	pr_info("ethPort_%d: Statistics (running on cpu#%d)", port, cpu);
+	pr_info("----------------------------------------------------\n\n");
+
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return;
+	}
+	stat = &(pp->stats);
+
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	printk(KERN_ERR "Errors:\n");
+	printk(KERN_ERR "rx_error......................%10u\n", stat->rx_error);
+	printk(KERN_ERR "tx_timeout....................%10u\n", stat->tx_timeout);
+	printk(KERN_ERR "tx_netif_stop.................%10u\n", stat->netif_stop);
+	printk(KERN_ERR "netif_wake....................%10u\n", stat->netif_wake);
+	printk(KERN_ERR "ext_stack_empty...............%10u\n", stat->ext_stack_empty);
+	printk(KERN_ERR "ext_stack_full ...............%10u\n", stat->ext_stack_full);
+	printk(KERN_ERR "state_err.....................%10u\n", stat->state_err);
+#endif /* CONFIG_MV_ETH_STAT_ERR */
+
+#ifdef CONFIG_MV_ETH_STAT_INF
+	pr_info("\nEvents:\n");
+
+	pr_info("irq[cpu]            = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->irq[cpu]);
+
+	pr_info("irq_none[cpu]       = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->irq_err[cpu]);
+
+	pr_info("poll[cpu]           = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->poll[cpu]);
+
+	pr_info("poll_exit[cpu]      = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->poll_exit[cpu]);
+
+	pr_info("tx_timer_event[cpu] = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->tx_done_timer_event[cpu]);
+
+	pr_info("tx_timer_add[cpu]   = ");
+	for_each_possible_cpu(cpu)
+		printk(KERN_CONT "%8d ", stat->tx_done_timer_add[cpu]);
+
+	pr_info("\n");
+	printk(KERN_ERR "tx_fragmentation..............%10u\n", stat->tx_fragment);
+	printk(KERN_ERR "tx_done_event.................%10u\n", stat->tx_done);
+	printk(KERN_ERR "cleanup_timer_event...........%10u\n", stat->cleanup_timer);
+	printk(KERN_ERR "link..........................%10u\n", stat->link);
+	printk(KERN_ERR "netdev_stop...................%10u\n", stat->netdev_stop);
+#ifdef CONFIG_MV_ETH_RX_SPECIAL
+	printk(KERN_ERR "rx_special....................%10u\n", stat->rx_special);
+#endif /* CONFIG_MV_ETH_RX_SPECIAL */
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+	printk(KERN_ERR "tx_special....................%10u\n", stat->tx_special);
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+#endif /* CONFIG_MV_ETH_STAT_INF */
+
+	printk(KERN_ERR "\n");
+	total_rx_ok = total_rx_fill_ok = 0;
+	printk(KERN_ERR "RXQ:       rx_ok      rx_fill_ok     missed\n\n");
+	for (queue = 0; queue < CONFIG_MV_ETH_RXQ; queue++) {
+		u32 rxq_ok = 0, rxq_fill = 0;
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+		rxq_ok = stat->rxq[queue];
+		rxq_fill = stat->rxq_fill[queue];
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+
+		printk(KERN_ERR "%3d:  %10u    %10u          %d\n",
+			queue, rxq_ok, rxq_fill,
+			pp->rxq_ctrl[queue].missed);
+		total_rx_ok += rxq_ok;
+		total_rx_fill_ok += rxq_fill;
+	}
+	printk(KERN_ERR "SUM:  %10u    %10u\n", total_rx_ok, total_rx_fill_ok);
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	{
+		printk(KERN_ERR "\n====================================================\n");
+		printk(KERN_ERR "ethPort_%d: Debug statistics", port);
+		printk(KERN_CONT "\n-------------------------------\n");
+
+		printk(KERN_ERR "\n");
+
+		printk(KERN_ERR "rx_nfp....................%10u\n", stat->rx_nfp);
+		printk(KERN_ERR "rx_nfp_drop...............%10u\n", stat->rx_nfp_drop);
+
+		printk(KERN_ERR "rx_gro....................%10u\n", stat->rx_gro);
+		printk(KERN_ERR "rx_gro_bytes .............%10u\n", stat->rx_gro_bytes);
+
+		printk(KERN_ERR "tx_tso....................%10u\n", stat->tx_tso);
+		printk(KERN_ERR "tx_tso_bytes .............%10u\n", stat->tx_tso_bytes);
+
+		printk(KERN_ERR "rx_netif..................%10u\n", stat->rx_netif);
+		printk(KERN_ERR "rx_drop_sw................%10u\n", stat->rx_drop_sw);
+		printk(KERN_ERR "rx_csum_hw................%10u\n", stat->rx_csum_hw);
+		printk(KERN_ERR "rx_csum_sw................%10u\n", stat->rx_csum_sw);
+
+
+		printk(KERN_ERR "tx_skb_free...............%10u\n", stat->tx_skb_free);
+		printk(KERN_ERR "tx_sg.....................%10u\n", stat->tx_sg);
+		printk(KERN_ERR "tx_csum_hw................%10u\n", stat->tx_csum_hw);
+		printk(KERN_ERR "tx_csum_sw................%10u\n", stat->tx_csum_sw);
+
+		printk(KERN_ERR "ext_stack_get.............%10u\n", stat->ext_stack_get);
+		printk(KERN_ERR "ext_stack_put ............%10u\n", stat->ext_stack_put);
+
+		printk(KERN_ERR "\n");
+	}
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+
+	printk(KERN_ERR "\n");
+	printk(KERN_ERR "TXP-TXQ:  count        send          done      no_resource\n\n");
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (queue = 0; queue < CONFIG_MV_ETH_TXQ; queue++) {
+			u32 txq_tx = 0, txq_txdone = 0, txq_err = 0;
+
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + queue];
+#ifdef CONFIG_MV_ETH_STAT_DBG
+			txq_tx = txq_ctrl->stats.txq_tx;
+			txq_txdone =  txq_ctrl->stats.txq_txdone;
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+#ifdef CONFIG_MV_ETH_STAT_ERR
+			txq_err = txq_ctrl->stats.txq_err;
+#endif /* CONFIG_MV_ETH_STAT_ERR */
+
+			printk(KERN_ERR "%d-%d:      %3d    %10u    %10u    %10u\n",
+			       txp, queue, txq_ctrl->txq_count, txq_tx,
+			       txq_txdone, txq_err);
+
+			memset(&txq_ctrl->stats, 0, sizeof(txq_ctrl->stats));
+		}
+	}
+	printk(KERN_ERR "\n\n");
+
+	memset(stat, 0, sizeof(struct port_stats));
+
+	/* RX pool statistics */
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP() && pp->pool_short)
+		mv_eth_pool_status_print(pp->pool_short->pool);
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	if (pp->pool_long)
+		mv_eth_pool_status_print(pp->pool_long->pool);
+
+		mv_eth_ext_pool_print(pp);
+
+#ifdef CONFIG_MV_ETH_STAT_DIST
+	{
+		int i;
+		struct dist_stats *dist_stats = &(pp->dist_stats);
+
+		if (dist_stats->rx_dist) {
+			printk(KERN_ERR "\n      Linux Path RX distribution\n");
+			for (i = 0; i < dist_stats->rx_dist_size; i++) {
+				if (dist_stats->rx_dist[i] != 0) {
+					printk(KERN_ERR "%3d RxPkts - %u times\n", i, dist_stats->rx_dist[i]);
+					dist_stats->rx_dist[i] = 0;
+				}
+			}
+		}
+
+		if (dist_stats->tx_done_dist) {
+			printk(KERN_ERR "\n      tx-done distribution\n");
+			for (i = 0; i < dist_stats->tx_done_dist_size; i++) {
+				if (dist_stats->tx_done_dist[i] != 0) {
+					printk(KERN_ERR "%3d TxDoneDesc - %u times\n", i, dist_stats->tx_done_dist[i]);
+					dist_stats->tx_done_dist[i] = 0;
+				}
+			}
+		}
+#ifdef CONFIG_MV_ETH_TSO
+		if (dist_stats->tx_tso_dist) {
+			printk(KERN_ERR "\n      TSO stats\n");
+			for (i = 0; i < dist_stats->tx_tso_dist_size; i++) {
+				if (dist_stats->tx_tso_dist[i] != 0) {
+					printk(KERN_ERR "%3d KBytes - %u times\n", i, dist_stats->tx_tso_dist[i]);
+					dist_stats->tx_tso_dist[i] = 0;
+				}
+			}
+		}
+#endif /* CONFIG_MV_ETH_TSO */
+	}
+#endif /* CONFIG_MV_ETH_STAT_DIST */
+}
+
+
+static int mv_eth_port_cleanup(int port)
+{
+	int txp, txq, rxq, i;
+	struct eth_port *pp;
+	struct tx_queue *txq_ctrl;
+	struct rx_queue *rxq_ctrl;
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -1;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "%s: port %d is started, cannot cleanup\n", __func__, port);
+		return -1;
+	}
+
+	/* Reset Tx ports */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		if (mv_eth_txp_reset(port, txp))
+			printk(KERN_ERR "Warning: Port %d Tx port %d reset failed\n", port, txp);
+	}
+
+	/* Delete Tx queues */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_ETH_TXQ; txq++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_ETH_TXQ + txq];
+			mv_eth_txq_delete(pp, txq_ctrl);
+		}
+	}
+
+	mvOsFree(pp->txq_ctrl);
+	pp->txq_ctrl = NULL;
+
+#ifdef CONFIG_MV_ETH_STAT_DIST
+	/* Free Tx Done distribution statistics */
+	mvOsFree(pp->dist_stats.tx_done_dist);
+#endif
+
+	/* Reset RX ports */
+	if (mv_eth_rx_reset(port))
+		printk(KERN_ERR "Warning: Rx port %d reset failed\n", port);
+
+	/* Delete Rx queues */
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		rxq_ctrl = &pp->rxq_ctrl[rxq];
+		mvNetaRxqDelete(pp->port, rxq);
+		rxq_ctrl->q = NULL;
+	}
+
+	mvOsFree(pp->rxq_ctrl);
+	pp->rxq_ctrl = NULL;
+
+#ifdef CONFIG_MV_ETH_STAT_DIST
+	/* Free Rx distribution statistics */
+	mvOsFree(pp->dist_stats.rx_dist);
+#endif
+
+	/* Free buffer pools */
+	if (pp->pool_long) {
+		mv_eth_pool_free(pp->pool_long->pool, pp->pool_long_num);
+		pp->pool_long->port_map &= ~(1 << pp->port);
+		pp->pool_long = NULL;
+	}
+#ifdef CONFIG_MV_ETH_BM_CPU
+	if (MV_NETA_BM_CAP() && pp->pool_short) {
+		mv_eth_pool_free(pp->pool_short->pool, pp->pool_short_num);
+		pp->pool_short->port_map &= ~(1 << pp->port);
+		pp->pool_short = NULL;
+	}
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+	/* Clear Marvell Header related modes - will be set again if needed on re-init */
+	mvNetaMhSet(port, MV_TAG_TYPE_NONE);
+
+	/* Clear any forced link, speed and duplex */
+	mv_force_port_link_speed_fc(port, MV_ETH_SPEED_AN, 0);
+
+	mvNetaPortDestroy(port);
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX)
+		for (i = 0; i < CONFIG_MV_ETH_NAPI_GROUPS; i++)
+			netif_napi_del(pp->napiGroup[i]);
+
+	return 0;
+}
+
+
+int mv_eth_all_ports_cleanup(void)
+{
+	int port, pool, status = 0;
+
+	for (port = 0; port < mv_eth_ports_num; port++) {
+		status = mv_eth_port_cleanup(port);
+		if (status != 0) {
+			printk(KERN_ERR "Error: mv_eth_port_cleanup failed on port %d, stopping all ports cleanup\n", port);
+			return status;
+		}
+	}
+
+	for (pool = 0; pool < MV_ETH_BM_POOLS; pool++)
+		mv_eth_pool_destroy(pool);
+
+	for (port = 0; port < mv_eth_ports_num; port++) {
+		if (mv_eth_ports[port])
+			mvOsFree(mv_eth_ports[port]);
+	}
+
+	memset(mv_eth_ports, 0, (mv_eth_ports_num * sizeof(struct eth_port *)));
+	/* Note: not freeing mv_eth_ports - we will reuse them */
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+
+#define DEF_WOL_SIZE	42
+MV_U8	wol_data[DEF_WOL_SIZE] = { 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				   0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x08, 0x00,
+				   0x45, 0x00, 0x00, 0x4E, 0x00, 0x00, 0x00, 0x00,
+				   0x00, 0x11, 0x00, 0x00, 0xc0, 0xa8, 0x01, 0xFA,
+				   0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x00, 0x89,
+				   0x00, 0x3A };
+
+MV_U8	wol_mask[DEF_WOL_SIZE] = { 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				   0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+				   0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+				   0x00, 0xff, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+				   0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+				   0xff, 0xff };
+
+int mv_eth_wol_pkts_check(int port)
+{
+	struct eth_port	    *pp = mv_eth_port_by_id(port);
+	struct neta_rx_desc *rx_desc;
+	struct eth_pbuf     *pkt;
+	struct bm_pool      *pool;
+	int                 rxq, rx_done, i, wakeup, ruleId;
+	MV_NETA_RXQ_CTRL    *rx_ctrl;
+
+	wakeup = 0;
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		rx_ctrl = pp->rxq_ctrl[rxq].q;
+
+		if (rx_ctrl == NULL)
+			continue;
+
+		rx_done = mvNetaRxqBusyDescNumGet(pp->port, rxq);
+
+		for (i = 0; i < rx_done; i++) {
+			rx_desc = mvNetaRxqNextDescGet(rx_ctrl);
+			mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+
+#if defined(MV_CPU_BE)
+			mvNetaRxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+			pkt = (struct eth_pbuf *)rx_desc->bufCookie;
+			mvOsCacheInvalidate(pp->dev->dev.parent, pkt->pBuf + pkt->offset, rx_desc->dataSize);
+
+			if (mv_pnc_wol_pkt_match(pp->port, pkt->pBuf + pkt->offset, rx_desc->dataSize, &ruleId))
+				wakeup = 1;
+
+			pool = &mv_eth_pool[pkt->pool];
+			mv_eth_rxq_refill(pp, rxq, pkt, pool, rx_desc);
+
+			if (wakeup) {
+				printk(KERN_INFO "packet match WoL rule=%d found on port=%d, rxq=%d\n",
+						ruleId, port, rxq);
+				i++;
+				break;
+			}
+		}
+		if (i) {
+			mvNetaRxqDescNumUpdate(pp->port, rxq, i, i);
+			printk(KERN_INFO "port=%d, rxq=%d: %d of %d packets dropped\n", port, rxq, i, rx_done);
+		}
+		if (wakeup) {
+			/* Failed enter WoL mode */
+			return 1;
+		}
+	}
+	return 0;
+}
+
+void mv_eth_wol_wakeup(int port)
+{
+	int rxq;
+
+
+	/* Restore RXQ coalescing */
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		mvNetaRxqPktsCoalSet(port, rxq, CONFIG_MV_ETH_RX_COAL_PKTS);
+		mvNetaRxqTimeCoalSet(port, rxq, CONFIG_MV_ETH_RX_COAL_USEC);
+	}
+
+	/* Set PnC to Active filtering mode */
+	mv_pnc_wol_wakeup(port);
+	printk(KERN_INFO "Exit wakeOnLan mode on port #%d\n", port);
+
+}
+
+int mv_eth_wol_sleep(int port)
+{
+	int rxq, cpu;
+	struct eth_port *pp;
+
+	/* Set PnC to WoL filtering mode */
+	mv_pnc_wol_sleep(port);
+
+	pp = mv_eth_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return 1;
+	}
+
+	mv_eth_interrupts_mask(pp);
+
+	/* wait until all napi stop transmit */
+	for_each_possible_cpu(cpu) {
+		if (pp->cpu_config[cpu]->napi)
+			napi_synchronize(pp->cpu_config[cpu]->napi);
+	}
+
+	/* Check received packets in all RXQs */
+	/* If match one of WoL pattern - wakeup, not match - drop */
+	if (mv_eth_wol_pkts_check(port)) {
+		/* Set PNC to Active filtering mode */
+		mv_pnc_wol_wakeup(port);
+		printk(KERN_INFO "Failed to enter wakeOnLan mode on port #%d\n", port);
+		return 1;
+	}
+	printk(KERN_INFO "Enter wakeOnLan mode on port #%d\n", port);
+
+	/* Set RXQ coalescing to minimum */
+	for (rxq = 0; rxq < CONFIG_MV_ETH_RXQ; rxq++) {
+		mvNetaRxqPktsCoalSet(port, rxq, 0);
+		mvNetaRxqTimeCoalSet(port, rxq, 0);
+	}
+
+	mv_eth_interrupts_unmask(pp);
+
+	return 0;
+}
+#endif /* CONFIG_MV_ETH_PNC_WOL */
+
+
+#ifdef CONFIG_MV_PON
+/* PON link status api */
+PONLINKSTATUSPOLLFUNC pon_link_status_polling_func;
+
+void pon_link_status_notify_func(MV_BOOL link_state)
+{
+	struct eth_port *pon_port = mv_eth_port_by_id(MV_PON_PORT_ID_GET());
+
+	if ((pon_port->flags & MV_ETH_F_STARTED) == 0) {
+		/* Ignore link event if port is down - link status will be updated on start */
+#ifdef CONFIG_MV_NETA_DEBUG_CODE
+		pr_info("PON port: Link event (%s) when port is down\n",
+			link_state ? "Up" : "Down");
+#endif /* CONFIG_MV_NETA_DEBUG_CODE */
+		return;
+	}
+	mv_eth_link_event(pon_port, 1);
+}
+
+/* called by PON module */
+void mv_pon_link_state_register(PONLINKSTATUSPOLLFUNC poll_func, PONLINKSTATUSNOTIFYFUNC *notify_func)
+{
+	pon_link_status_polling_func = poll_func;
+	*notify_func = pon_link_status_notify_func;
+}
+
+MV_BOOL mv_pon_link_status(void)
+{
+	if (pon_link_status_polling_func != NULL)
+		return pon_link_status_polling_func();
+	printk(KERN_ERR "pon_link_status_polling_func is uninitialized\n");
+	return MV_FALSE;
+}
+#endif /* CONFIG_MV_PON */
+
+/* Support for platform driver */
+
+#ifdef CONFIG_PM
+
+int mv_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct eth_port *pp;
+	int port = pdev->id;
+
+	pm_flag = 0;
+
+	pp = mv_eth_port_by_id(port);
+	if (!pp)
+		return 0;
+
+	if (pp->pm_mode == 0) {
+		if (mv_eth_port_suspend(port)) {
+			printk(KERN_ERR "%s: port #%d suspend failed.\n", __func__, port);
+			return MV_ERROR;
+		}
+
+		/* BUG WA - if port 0 clock is down, we can't interrupt by magic packet */
+		if ((port != 0) || (wol_ports_bmp == 0)) {
+			/* Set Port Power State to 0 */
+#ifdef CONFIG_ARCH_MVEBU
+			{
+				/* get the relevant clock from the clk lib */
+				struct clk *clk;
+				clk = devm_clk_get(&pdev->dev, NULL);
+				clk_disable(clk);
+			}
+#else
+			mvCtrlPwrClckSet(ETH_GIG_UNIT_ID, port, 0);
+#endif
+		}
+	} else {
+
+#ifdef CONFIG_MV_ETH_PNC_WOL
+		if (pp->flags & MV_ETH_F_STARTED)
+			if (mv_eth_wol_sleep(port)) {
+				printk(KERN_ERR "%s: port #%d  WOL failed.\n", __func__, port);
+				return MV_ERROR;
+			}
+#else
+		printk(KERN_INFO "%s:WARNING port #%d in WOL mode but PNC WOL is not defined.\n", __func__, port);
+
+#endif /*CONFIG_MV_ETH_PNC_WOL*/
+	}
+
+	return MV_OK;
+}
+
+
+int mv_eth_resume(struct platform_device *pdev)
+{
+	struct eth_port *pp;
+	int port = pdev->id;
+
+	pp = mv_eth_port_by_id(port);
+	if (!pp)
+		return 0;
+
+	if (pp->pm_mode == 0) {
+		/* Set Port Power State to 1 */
+#ifdef CONFIG_ARCH_MVEBU
+		{
+			struct clk *clk;
+			clk = devm_clk_get(&pdev->dev, NULL);
+			clk_enable(clk);
+		}
+#else
+		mvCtrlPwrClckSet(ETH_GIG_UNIT_ID, port, 1);
+#endif
+
+		mdelay(10);
+		if (mv_eth_port_resume(port)) {
+			printk(KERN_ERR "%s: port #%d resume failed.\n", __func__, port);
+			return MV_ERROR;
+		}
+	} else
+#ifdef CONFIG_MV_ETH_PNC_WOL
+		mv_eth_wol_wakeup(port);
+#else
+		printk(KERN_ERR "%s:WARNING port #%d in WOL mode but PNC WOL is not defined.\n", __func__, port);
+#endif /*CONFIG_MV_ETH_PNC_WOL*/
+
+	return MV_OK;
+}
+
+
+#endif	/*CONFIG_PM*/
+
+static int mv_eth_shared_remove(void)
+{
+	mv_eth_sysfs_exit();
+	return 0;
+}
+
+static int mv_eth_remove(struct platform_device *pdev)
+{
+	int port = pdev->id;
+	struct eth_port *pp = mv_eth_port_by_id(port);
+
+	printk(KERN_INFO "Removing Marvell Ethernet Driver - port #%d\n", port);
+	if (pp == NULL)
+		pr_err("Port %d does not exist\n", port);
+
+	mv_eth_priv_cleanup(pp);
+
+#ifdef CONFIG_OF
+	mv_eth_shared_remove();
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_NETMAP
+	netmap_detach(pp->dev);
+#endif /* CONFIG_NETMAP */
+
+	return 0;
+}
+
+static void mv_eth_shutdown(struct platform_device *pdev)
+{
+	printk(KERN_INFO "Shutting Down Marvell Ethernet Driver\n");
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id mv_neta_match[] = {
+	{ .compatible = "marvell,neta" },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, mv_neta_match);
+
+static int mv_eth_port_num_get(struct platform_device *pdev)
+{
+	int port_num = 0;
+	int tbl_id;
+	struct device_node *np = pdev->dev.of_node;
+
+	for (tbl_id = 0; tbl_id < (sizeof(mv_neta_match) / sizeof(struct of_device_id)); tbl_id++) {
+		for_each_compatible_node(np, NULL, mv_neta_match[tbl_id].compatible)
+			port_num++;
+	}
+
+	return port_num;
+}
+#endif /* CONFIG_OF */
+
+static struct platform_driver mv_eth_driver = {
+	.probe = mv_eth_probe,
+	.remove = mv_eth_remove,
+	.shutdown = mv_eth_shutdown,
+#ifdef CONFIG_PM
+	.suspend = mv_eth_suspend,
+	.resume = mv_eth_resume,
+#endif /* CONFIG_PM */
+	.driver = {
+		.name = MV_NETA_PORT_NAME,
+#ifdef CONFIG_OF
+		.of_match_table = mv_neta_match,
+#endif /* CONFIG_OF */
+	},
+};
+
+#ifdef CONFIG_OF
+module_platform_driver(mv_eth_driver);
+#else
+static int __init mv_eth_init_module(void)
+{
+	int err = platform_driver_register(&mv_eth_driver);
+
+	return err;
+}
+module_init(mv_eth_init_module);
+
+static void __exit mv_eth_exit_module(void)
+{
+	platform_driver_unregister(&mv_eth_driver);
+	mv_eth_shared_remove();
+}
+module_exit(mv_eth_exit_module);
+#endif /* CONFIG_OF */
+
+
+MODULE_DESCRIPTION("Marvell Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Dmitri Epshtein <dima@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.h b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.h
new file mode 100644
index 000000000000..af3c6389995b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_netdev.h
@@ -0,0 +1,891 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_netdev_h__
+#define __mv_netdev_h__
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/mv_neta.h>
+#include <net/ip.h>
+#include <linux/interrupt.h>
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvStack.h"
+
+#include "gbe/mvNeta.h"
+#include "bm/mvBmRegs.h"
+#include "bm/mvBm.h"
+
+#define MV_ETH_MAX_NETDEV_NUM	24
+
+/******************************************************
+ * driver statistics control --                       *
+ ******************************************************/
+#ifdef CONFIG_MV_ETH_STAT_ERR
+#define STAT_ERR(c) c
+#else
+#define STAT_ERR(c)
+#endif
+
+#ifdef CONFIG_MV_ETH_STAT_INF
+#define STAT_INFO(c) c
+#else
+#define STAT_INFO(c)
+#endif
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+#define STAT_DBG(c) c
+#else
+#define STAT_DBG(c)
+#endif
+
+#ifdef CONFIG_MV_ETH_STAT_DIST
+#define STAT_DIST(c) c
+#else
+#define STAT_DIST(c)
+#endif
+
+#ifdef CONFIG_MV_ETH_PNC
+extern unsigned int mv_eth_pnc_ctrl_en;
+int mv_eth_ctrl_pnc(int en);
+#endif /* CONFIG_MV_ETH_PNC */
+
+extern int mv_ctrl_txdone;
+
+/****************************************************************************
+ * Rx buffer size: MTU + 2(Marvell Header) + 4(VLAN) + 14(MAC hdr) + 4(CRC) *
+ ****************************************************************************/
+#define RX_PKT_SIZE(mtu) \
+		MV_ALIGN_UP((mtu) + 2 + 4 + ETH_HLEN + 4, CPU_D_CACHE_LINE_SIZE)
+
+#define RX_BUF_SIZE(pkt_size)   ((pkt_size) + NET_SKB_PAD)
+
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+extern int mv_ctrl_recycle;
+
+#define mv_eth_is_recycle()     (mv_ctrl_recycle)
+int mv_eth_skb_recycle(struct sk_buff *skb);
+#else
+#define mv_eth_is_recycle()     0
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+/******************************************************
+ * interrupt control --                               *
+ ******************************************************/
+#ifdef CONFIG_MV_NETA_TXDONE_ISR
+#define MV_ETH_TXDONE_INTR_MASK       (((1 << CONFIG_MV_ETH_TXQ) - 1) << NETA_CAUSE_TXQ_SENT_DESC_OFFS)
+#else
+#define MV_ETH_TXDONE_INTR_MASK       0
+#endif
+
+#define MV_ETH_MISC_SUM_INTR_MASK     (NETA_CAUSE_TX_ERR_SUM_MASK | NETA_CAUSE_MISC_SUM_MASK)
+#define MV_ETH_RX_INTR_MASK           (((1 << CONFIG_MV_ETH_RXQ) - 1) << NETA_CAUSE_RXQ_OCCUP_DESC_OFFS)
+#define NETA_RX_FL_DESC_MASK          (NETA_RX_F_DESC_MASK|NETA_RX_L_DESC_MASK)
+
+/* NAPI CPU defualt group */
+#define CPU_GROUP_DEF 0
+
+#define MV_ETH_TRYLOCK(lock, flags)                           \
+	(in_interrupt() ? spin_trylock((lock)) :              \
+		spin_trylock_irqsave((lock), (flags)))
+
+#define MV_ETH_LOCK(lock, flags)                              \
+{                                                             \
+	if (in_interrupt())                                   \
+		spin_lock((lock));                            \
+	else                                                  \
+		spin_lock_irqsave((lock), (flags));           \
+}
+
+#define MV_ETH_UNLOCK(lock, flags)                            \
+{                                                             \
+	if (in_interrupt())                                   \
+		spin_unlock((lock));                          \
+	else                                                  \
+		spin_unlock_irqrestore((lock), (flags));      \
+}
+
+#define MV_ETH_LIGHT_LOCK(flags)                              \
+	if (!in_interrupt())                                  \
+		local_irq_save(flags);
+
+#define MV_ETH_LIGHT_UNLOCK(flags)	                      \
+	if (!in_interrupt())                                  \
+		local_irq_restore(flags);
+
+
+#define mv_eth_lock(txq_ctrl, flags)			     \
+{							     \
+	if (txq_ctrl->flags & MV_ETH_F_TX_SHARED)	     \
+		MV_ETH_LOCK(&txq_ctrl->queue_lock, flags)    \
+	else                                                 \
+		MV_ETH_LIGHT_LOCK(flags)		     \
+}
+
+#define mv_eth_unlock(txq_ctrl, flags)                        \
+{							      \
+	if (txq_ctrl->flags & MV_ETH_F_TX_SHARED)	      \
+		MV_ETH_UNLOCK(&txq_ctrl->queue_lock, flags)   \
+	else                                                  \
+		MV_ETH_LIGHT_UNLOCK(flags)		      \
+}
+
+#if defined(CONFIG_CPU_SHEEVA_PJ4B_V7) || defined(CONFIG_CPU_SHEEVA_PJ4B_V6)
+#  define mv_neta_wmb()
+#else
+#  define mv_neta_wmb() wmb()
+#endif
+
+/******************************************************
+ * rx / tx queues --                                  *
+ ******************************************************/
+/*
+ * Debug statistics
+ */
+
+struct txq_stats {
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	u32 txq_err;
+#endif /* CONFIG_MV_ETH_STAT_ERR */
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	u32 txq_tx;
+	u32 txq_txdone;
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+};
+
+struct port_stats {
+
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	u32 rx_error;
+	u32 tx_timeout;
+	u32 netif_stop;
+	u32 ext_stack_empty;
+	u32 ext_stack_full;
+	u32 netif_wake;
+	u32 state_err;
+#endif /* CONFIG_MV_ETH_STAT_ERR */
+
+#ifdef CONFIG_MV_ETH_STAT_INF
+	u32 irq[CONFIG_NR_CPUS];
+	u32 irq_err[CONFIG_NR_CPUS];
+	u32 poll[CONFIG_NR_CPUS];
+	u32 poll_exit[CONFIG_NR_CPUS];
+	u32 tx_done_timer_event[CONFIG_NR_CPUS];
+	u32 tx_done_timer_add[CONFIG_NR_CPUS];
+	u32 tx_fragment;
+	u32 tx_done;
+	u32 cleanup_timer;
+	u32 link;
+	u32 netdev_stop;
+
+#ifdef CONFIG_MV_ETH_RX_SPECIAL
+	u32 rx_special;
+#endif /* CONFIG_MV_ETH_RX_SPECIAL */
+
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+	u32	tx_special;
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+
+#endif /* CONFIG_MV_ETH_STAT_INF */
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	u32 rxq[CONFIG_MV_ETH_RXQ];
+	u32 rx_tagged;
+	u32 rxq_fill[CONFIG_MV_ETH_RXQ];
+	u32 rx_netif;
+	u32 rx_nfp;
+	u32 rx_nfp_drop;
+	u32 rx_gro;
+	u32 rx_gro_bytes;
+	u32 rx_drop_sw;
+	u32 rx_csum_hw;
+	u32 rx_csum_sw;
+	u32 tx_csum_hw;
+	u32 tx_csum_sw;
+	u32 tx_skb_free;
+	u32 tx_sg;
+	u32 tx_tso;
+	u32 tx_tso_bytes;
+	u32 ext_stack_put;
+	u32 ext_stack_get;
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+};
+
+/* Used for define type of data saved in shadow: SKB or eth_pbuf or nothing */
+#define MV_ETH_SHADOW_SKB		0x1
+#define MV_ETH_SHADOW_EXT		0x2
+
+/* Masks used for pp->flags */
+#define MV_ETH_F_STARTED_BIT        0
+#define MV_ETH_F_MH_BIT             1
+#define MV_ETH_F_NO_PAD_BIT         2
+#define MV_ETH_F_DBG_RX_BIT         3
+#define MV_ETH_F_DBG_TX_BIT         4
+#define MV_ETH_F_EXT_SWITCH_BIT	    5	/* port is connected to the Switch without the Gateway driver */
+#define MV_ETH_F_CONNECT_LINUX_BIT  6	/* port is connected to Linux netdevice */
+#define MV_ETH_F_LINK_UP_BIT        7
+#define MV_ETH_F_DBG_DUMP_BIT       8
+#define MV_ETH_F_DBG_ISR_BIT        9
+#define MV_ETH_F_DBG_POLL_BIT       10
+#define MV_ETH_F_NFP_EN_BIT         11
+#define MV_ETH_F_SUSPEND_BIT        12
+#define MV_ETH_F_STARTED_OLD_BIT    13 /*STARTED_BIT value before suspend */
+#define MV_ETH_F_FORCE_LINK_BIT     14
+#define MV_ETH_F_IFCAP_NETMAP_BIT   15
+
+#define MV_ETH_F_STARTED           (1 << MV_ETH_F_STARTED_BIT)
+#define MV_ETH_F_SWITCH            (1 << MV_ETH_F_SWITCH_BIT)
+#define MV_ETH_F_MH                (1 << MV_ETH_F_MH_BIT)
+#define MV_ETH_F_NO_PAD            (1 << MV_ETH_F_NO_PAD_BIT)
+#define MV_ETH_F_DBG_RX            (1 << MV_ETH_F_DBG_RX_BIT)
+#define MV_ETH_F_DBG_TX            (1 << MV_ETH_F_DBG_TX_BIT)
+#define MV_ETH_F_EXT_SWITCH        (1 << MV_ETH_F_EXT_SWITCH_BIT)
+#define MV_ETH_F_CONNECT_LINUX     (1 << MV_ETH_F_CONNECT_LINUX_BIT)
+#define MV_ETH_F_LINK_UP           (1 << MV_ETH_F_LINK_UP_BIT)
+#define MV_ETH_F_DBG_DUMP          (1 << MV_ETH_F_DBG_DUMP_BIT)
+#define MV_ETH_F_DBG_ISR           (1 << MV_ETH_F_DBG_ISR_BIT)
+#define MV_ETH_F_DBG_POLL          (1 << MV_ETH_F_DBG_POLL_BIT)
+#define MV_ETH_F_NFP_EN            (1 << MV_ETH_F_NFP_EN_BIT)
+#define MV_ETH_F_SUSPEND           (1 << MV_ETH_F_SUSPEND_BIT)
+#define MV_ETH_F_STARTED_OLD       (1 << MV_ETH_F_STARTED_OLD_BIT)
+#define MV_ETH_F_FORCE_LINK        (1 << MV_ETH_F_FORCE_LINK_BIT)
+#define MV_ETH_F_IFCAP_NETMAP      (1 << MV_ETH_F_IFCAP_NETMAP_BIT)
+
+/* Masks used for cpu_ctrl->flags */
+#define MV_ETH_F_TX_DONE_TIMER_BIT  0
+#define MV_ETH_F_CLEANUP_TIMER_BIT  1
+
+#define MV_ETH_F_TX_DONE_TIMER		(1 << MV_ETH_F_TX_DONE_TIMER_BIT)	/* 0x01 */
+#define MV_ETH_F_CLEANUP_TIMER		(1 << MV_ETH_F_CLEANUP_TIMER_BIT)	/* 0x02 */
+
+/* Masks used for tx_queue->flags */
+#define MV_ETH_F_TX_SHARED_BIT  0
+
+#define MV_ETH_F_TX_SHARED		(1 << MV_ETH_F_TX_SHARED_BIT)	/* 0x01 */
+
+
+
+/* One of three TXQ states */
+#define MV_ETH_TXQ_FREE         0
+#define MV_ETH_TXQ_CPU          1
+#define MV_ETH_TXQ_HWF          2
+
+#define MV_ETH_TXQ_INVALID		0xFF
+
+struct mv_eth_tx_spec {
+	u32		hw_cmd;	/* tx_desc offset = 0xC */
+	u16		flags;
+	u8		txp;
+	u8		txq;
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+	void		(*tx_func) (u8 *data, int size, struct mv_eth_tx_spec *tx_spec);
+#endif
+};
+
+struct tx_queue {
+	MV_NETA_TXQ_CTRL   *q;
+	u8                  cpu_owner[CONFIG_NR_CPUS]; /* counter */
+	u8                  hwf_rxp;
+	u8                  txp;
+	u8                  txq;
+	int                 txq_size;
+	int                 txq_count;
+	int                 bm_only;
+	u32                 *shadow_txq; /* can be MV_ETH_PKT* or struct skbuf* */
+	int                 shadow_txq_put_i;
+	int                 shadow_txq_get_i;
+	struct txq_stats    stats;
+	spinlock_t          queue_lock;
+	MV_U32              txq_done_pkts_coal;
+	unsigned long       flags;
+	int		    nfpCounter;
+};
+
+struct rx_queue {
+	MV_NETA_RXQ_CTRL    *q;
+	int                 rxq_size;
+	int                 missed;
+	MV_U32	            rxq_pkts_coal;
+	MV_U32	            rxq_time_coal;
+};
+
+struct dist_stats {
+	u32     *rx_dist;
+	int     rx_dist_size;
+	u32     *tx_done_dist;
+	int     tx_done_dist_size;
+	u32     *tx_tso_dist;
+	int     tx_tso_dist_size;
+};
+
+struct cpu_ctrl {
+	MV_U8  			cpuTxqMask;
+	MV_U8			cpuRxqMask;
+	MV_U8			cpuTxqOwner;
+	MV_U8  			txq_tos_map[256];
+	MV_U32			causeRxTx;
+	struct eth_port		*pp;
+	struct napi_struct	*napi;
+	int			napiCpuGroup;
+	int             	txq;
+	int                     cpu;
+#if defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+	struct hrtimer		tx_done_timer;
+	struct tasklet_struct	tx_done_tasklet;
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+	struct timer_list	tx_done_timer;
+#endif
+	struct timer_list   	cleanup_timer;
+	unsigned long       	flags;
+
+};
+
+struct eth_port {
+	int                 port;
+	bool                tagged; /* NONE/MH/DSA/EDSA/VLAN */
+	struct mv_neta_pdata *plat_data;
+	MV_NETA_PORT_CTRL   *port_ctrl;
+	struct rx_queue     *rxq_ctrl;
+	struct tx_queue     *txq_ctrl;
+	int                 txp_num;
+	struct net_device   *dev;
+	rwlock_t            rwlock;
+	struct bm_pool      *pool_long;
+	int                 pool_long_num;
+#ifdef CONFIG_MV_ETH_BM_CPU
+	struct bm_pool      *pool_short;
+	int                 pool_short_num;
+#endif /* CONFIG_MV_ETH_BM_CPU */
+	struct napi_struct  *napiGroup[CONFIG_MV_ETH_NAPI_GROUPS];
+	unsigned long       flags;	/* MH, TIMER, etc. */
+	u32                 hw_cmd;	/* offset 0xc in TX descriptor */
+	int                 txp;
+	u16                 tx_mh;	/* 2B MH */
+	struct port_stats   stats;
+	struct dist_stats   dist_stats;
+	int                 weight;
+	MV_STACK            *extArrStack;
+	int                 extBufSize;
+	spinlock_t          extLock;
+	/* Ethtool parameters */
+	__u16               speed_cfg;
+	__u8                duplex_cfg;
+	__u8                autoneg_cfg;
+	__u16		        advertise_cfg;
+	__u32               rx_time_coal_cfg;
+	__u32               rx_pkts_coal_cfg;
+	__u32               tx_pkts_coal_cfg;
+	__u32               rx_time_low_coal_cfg;
+	__u32               rx_time_high_coal_cfg;
+	__u32               rx_pkts_low_coal_cfg;
+	__u32               rx_pkts_high_coal_cfg;
+	__u32               pkt_rate_low_cfg;
+	__u32               pkt_rate_high_cfg;
+	__u32               rate_current; /* unknown (0), low (1), normal (2), high (3) */
+	__u32               rate_sample_cfg;
+	__u32               rx_adaptive_coal_cfg;
+	/* Rate calculate */
+	unsigned long	    rx_rate_pkts;
+	unsigned long	    rx_timestamp;
+#ifdef CONFIG_MV_ETH_RX_SPECIAL
+	void    (*rx_special_proc)(int port, int rxq, struct net_device *dev,
+					struct sk_buff *skb, struct neta_rx_desc *rx_desc);
+#endif /* CONFIG_MV_ETH_RX_SPECIAL */
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+	int     (*tx_special_check)(int port, struct net_device *dev, struct sk_buff *skb,
+					struct mv_eth_tx_spec *tx_spec_out);
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+
+	MV_U32              cpu_mask;
+	MV_U32              rx_indir_table[256];
+	struct cpu_ctrl	    *cpu_config[CONFIG_NR_CPUS];
+	MV_U32              sgmii_serdes;
+	int	                pm_mode;
+};
+
+struct eth_netdev {
+	u16     tx_vlan_mh;		/* 2B MH */
+	u16     vlan_grp_id;		/* vlan group ID */
+	u16     port_map;		/* switch port map */
+	u16     link_map;		/* switch port link map */
+	u16     cpu_port;		/* switch CPU port */
+	u16     group;
+};
+
+struct eth_dev_priv {
+	struct eth_port     *port_p;
+	struct eth_netdev   *netdev_p;
+};
+
+#define MV_ETH_PRIV(dev)        ((struct eth_port *)(netdev_priv(dev)))
+#define MV_DEV_STAT(dev)        (&((dev)->stats))
+
+/* define which Switch ports are relevant */
+#define SWITCH_CONNECTED_PORTS_MASK	0x7F
+
+#define MV_SWITCH_ID_0			0
+#define MV_ETH_PORT_0			0
+#define MV_ETH_PORT_1			1
+
+struct pool_stats {
+#ifdef CONFIG_MV_ETH_STAT_ERR
+	u32 skb_alloc_oom;
+	u32 stack_empty;
+	u32 stack_full;
+#endif /* CONFIG_MV_ETH_STAT_ERR */
+
+#ifdef CONFIG_MV_ETH_STAT_DBG
+	u32 bm_put;
+	u32 stack_put;
+	u32 stack_get;
+	u32 skb_alloc_ok;
+	u32 skb_recycled_ok;
+	u32 skb_recycled_err;
+	u32 skb_hw_cookie_err;
+#endif /* CONFIG_MV_ETH_STAT_DBG */
+};
+
+struct bm_pool {
+	int         pool;
+	int         capacity;
+	int         buf_num;
+	int         pkt_size;
+	MV_ULONG    physAddr;
+	u32         *bm_pool;
+	MV_STACK    *stack;
+	spinlock_t  lock;
+	u32         port_map;
+	int         missed;		/* FIXME: move to stats */
+	struct pool_stats  stats;
+};
+
+#ifdef CONFIG_MV_ETH_BM_CPU
+#define MV_ETH_BM_POOLS	        MV_BM_POOLS
+#define mv_eth_pool_bm(p)       (MV_NETA_BM_CAP() ? (p->bm_pool) : 0)
+#define mv_eth_txq_bm(q)        (MV_NETA_BM_CAP() ? (q->bm_only) : 0)
+#else
+#define MV_ETH_BM_POOLS		CONFIG_MV_ETH_PORTS_NUM
+#define mv_eth_pool_bm(p)       0
+#define mv_eth_txq_bm(q)        0
+#endif /* CONFIG_MV_ETH_BM_CPU */
+
+#ifdef CONFIG_MV_NETA_TXDONE_IN_HRTIMER
+#define MV_ETH_HRTIMER_PERIOD_MIN	(10)
+#define MV_ETH_HRTIMER_PERIOD_MAX	(10000)
+unsigned int mv_eth_tx_done_hrtimer_period_get(void);
+int mv_eth_tx_done_hrtimer_period_set(unsigned int period);
+#endif
+
+#ifdef CONFIG_MV_ETH_BM
+MV_STATUS mv_eth_bm_config_get(void);
+int mv_eth_bm_config_pkt_size_get(int pool);
+int mv_eth_bm_config_pkt_size_set(int pool, int pkt_size);
+int mv_eth_bm_config_short_pool_get(int port);
+int mv_eth_bm_config_short_buf_num_get(int port);
+int mv_eth_bm_config_long_pool_get(int port);
+int mv_eth_bm_config_long_buf_num_get(int port);
+void mv_eth_bm_config_print(void);
+#endif /* CONFIG_MV_ETH_BM */
+
+void mv_eth_stack_print(int port, MV_BOOL isPrintElements);
+extern struct bm_pool mv_eth_pool[MV_ETH_BM_POOLS];
+extern struct eth_port **mv_eth_ports;
+
+static inline void mv_eth_interrupts_unmask(void *arg)
+{
+	struct eth_port *pp = arg;
+
+	/* unmask interrupts */
+	if (!test_bit(MV_ETH_F_FORCE_LINK_BIT, &(pp->flags)))
+		MV_REG_WRITE(NETA_INTR_MISC_MASK_REG(pp->port), NETA_CAUSE_LINK_CHANGE_MASK);
+
+	MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port),
+		(MV_ETH_MISC_SUM_INTR_MASK |
+		MV_ETH_TXDONE_INTR_MASK |
+		MV_ETH_RX_INTR_MASK));
+}
+
+static inline void mv_eth_interrupts_mask(void *arg)
+{
+	struct eth_port *pp = arg;
+
+	/* clear all ethernet port interrupts */
+	MV_REG_WRITE(NETA_INTR_MISC_CAUSE_REG(pp->port), 0);
+	MV_REG_WRITE(NETA_INTR_OLD_CAUSE_REG(pp->port), 0);
+
+	/* mask all ethernet port interrupts */
+	MV_REG_WRITE(NETA_INTR_NEW_MASK_REG(pp->port), 0);
+	MV_REG_WRITE(NETA_INTR_OLD_MASK_REG(pp->port), 0);
+	MV_REG_WRITE(NETA_INTR_MISC_MASK_REG(pp->port), 0);
+}
+
+
+static inline void mv_eth_txq_update_shared(struct tx_queue *txq_ctrl, struct eth_port *pp)
+{
+	int numOfRefCpu, cpu;
+	struct cpu_ctrl	*cpuCtrl;
+
+	numOfRefCpu = 0;
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+
+		if (txq_ctrl->cpu_owner[cpu] == 0)
+			cpuCtrl->cpuTxqOwner &= ~(1 << txq_ctrl->txq);
+		else {
+			numOfRefCpu++;
+			cpuCtrl->cpuTxqOwner |= (1 << txq_ctrl->txq);
+		}
+	}
+
+	if ((txq_ctrl->nfpCounter != 0) || (numOfRefCpu > 1))
+		txq_ctrl->flags |=  MV_ETH_F_TX_SHARED;
+	else
+		txq_ctrl->flags &= ~MV_ETH_F_TX_SHARED;
+}
+
+static inline int mv_eth_ctrl_is_tx_enabled(struct eth_port *pp)
+{
+	if (!pp)
+		return -ENODEV;
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX)
+		return 1;
+
+	return 0;
+}
+
+static inline struct neta_tx_desc *mv_eth_tx_desc_get(struct tx_queue *txq_ctrl, int num)
+{
+	/* Is enough TX descriptors to send packet */
+	if ((txq_ctrl->txq_count + num) >= txq_ctrl->txq_size) {
+		/*
+		printk(KERN_ERR "eth_tx: txq_ctrl->txq=%d - no_resource: txq_count=%d, txq_size=%d, num=%d\n",
+			txq_ctrl->txq, txq_ctrl->txq_count, txq_ctrl->txq_size, num);
+		*/
+		STAT_ERR(txq_ctrl->stats.txq_err++);
+		return NULL;
+	}
+	return mvNetaTxqNextDescGet(txq_ctrl->q);
+}
+
+static inline void mv_eth_tx_desc_flush(struct eth_port *pp, struct neta_tx_desc *tx_desc)
+{
+#if defined(MV_CPU_BE)
+	mvNetaTxqDescSwap(tx_desc);
+#endif /* MV_CPU_BE */
+
+	mvOsCacheLineFlush(pp->dev->dev.parent, tx_desc);
+}
+
+
+static inline void *mv_eth_extra_pool_get(struct eth_port *pp)
+{
+	void *ext_buf;
+
+	spin_lock(&pp->extLock);
+	if (mvStackIndex(pp->extArrStack) == 0) {
+		STAT_ERR(pp->stats.ext_stack_empty++);
+		ext_buf = mvOsMalloc(CONFIG_MV_ETH_EXTRA_BUF_SIZE);
+	} else {
+		STAT_DBG(pp->stats.ext_stack_get++);
+		ext_buf = (void *)mvStackPop(pp->extArrStack);
+	}
+	spin_unlock(&pp->extLock);
+
+	return ext_buf;
+}
+
+static inline int mv_eth_extra_pool_put(struct eth_port *pp, void *ext_buf)
+{
+	spin_lock(&pp->extLock);
+	if (mvStackIsFull(pp->extArrStack)) {
+		STAT_ERR(pp->stats.ext_stack_full++);
+		spin_unlock(&pp->extLock);
+		mvOsFree(ext_buf);
+		return 1;
+	}
+	mvStackPush(pp->extArrStack, (MV_U32)ext_buf);
+	STAT_DBG(pp->stats.ext_stack_put++);
+	spin_unlock(&pp->extLock);
+	return 0;
+}
+
+static inline void mv_eth_add_cleanup_timer(struct cpu_ctrl *cpuCtrl)
+{
+	if (test_and_set_bit(MV_ETH_F_CLEANUP_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
+		cpuCtrl->cleanup_timer.expires = jiffies + ((HZ * 10) / 1000); /* ms */
+		add_timer_on(&cpuCtrl->cleanup_timer, smp_processor_id());
+	}
+}
+
+#if defined(CONFIG_MV_NETA_TXDONE_IN_HRTIMER)
+static inline void mv_eth_add_tx_done_timer(struct cpu_ctrl *cpuCtrl)
+{
+	ktime_t interval;
+	unsigned long delay_in_ns = mv_eth_tx_done_hrtimer_period_get() * 1000; /*the func return value is in us unit*/
+
+	if (test_and_set_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
+		STAT_INFO(cpuCtrl->pp->stats.tx_done_timer_add[smp_processor_id()]++);
+		interval = ktime_set(0, delay_in_ns);
+		hrtimer_start(&cpuCtrl->tx_done_timer, interval, HRTIMER_MODE_REL_PINNED);
+	}
+}
+#elif defined(CONFIG_MV_NETA_TXDONE_IN_TIMER)
+static inline void mv_eth_add_tx_done_timer(struct cpu_ctrl *cpuCtrl)
+{
+	if (test_and_set_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
+
+		cpuCtrl->tx_done_timer.expires = jiffies + ((HZ * CONFIG_MV_NETA_TX_DONE_TIMER_PERIOD) / 1000); /* ms */
+		STAT_INFO(cpuCtrl->pp->stats.tx_done_timer_add[smp_processor_id()]++);
+		add_timer_on(&cpuCtrl->tx_done_timer, smp_processor_id());
+	}
+}
+#endif
+
+static inline void mv_eth_shadow_inc_get(struct tx_queue *txq)
+{
+	txq->shadow_txq_get_i++;
+	if (txq->shadow_txq_get_i == txq->txq_size)
+		txq->shadow_txq_get_i = 0;
+}
+
+static inline void mv_eth_shadow_inc_put(struct tx_queue *txq)
+{
+	txq->shadow_txq_put_i++;
+	if (txq->shadow_txq_put_i == txq->txq_size)
+		txq->shadow_txq_put_i = 0;
+}
+
+static inline void mv_eth_shadow_dec_put(struct tx_queue *txq)
+{
+	if (txq->shadow_txq_put_i == 0)
+		txq->shadow_txq_put_i = txq->txq_size - 1;
+	else
+		txq->shadow_txq_put_i--;
+}
+
+/* Free pkt + skb pair */
+static inline void mv_eth_pkt_free(struct eth_pbuf *pkt)
+{
+	struct sk_buff *skb = (struct sk_buff *)pkt->osInfo;
+
+#ifdef CONFIG_MV_NETA_SKB_RECYCLE
+	skb->skb_recycle = NULL;
+	skb->hw_cookie = 0;
+#endif /* CONFIG_MV_NETA_SKB_RECYCLE */
+
+	dev_kfree_skb_any(skb);
+	mvOsFree(pkt);
+}
+
+static inline int mv_eth_pool_put(struct bm_pool *pool, struct eth_pbuf *pkt)
+{
+	unsigned long flags = 0;
+
+	MV_ETH_LOCK(&pool->lock, flags);
+	if (mvStackIsFull(pool->stack)) {
+		STAT_ERR(pool->stats.stack_full++);
+		MV_ETH_UNLOCK(&pool->lock, flags);
+
+		/* free pkt+skb */
+		mv_eth_pkt_free(pkt);
+		return 1;
+	}
+	mvStackPush(pool->stack, (MV_U32) pkt);
+	STAT_DBG(pool->stats.stack_put++);
+	MV_ETH_UNLOCK(&pool->lock, flags);
+	return 0;
+}
+
+
+/* Pass pkt to BM Pool or RXQ ring */
+static inline void mv_eth_rxq_refill(struct eth_port *pp, int rxq,
+				     struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc)
+{
+	if (mv_eth_pool_bm(pool)) {
+		/* Refill BM pool */
+		STAT_DBG(pool->stats.bm_put++);
+		mvBmPoolPut(pkt->pool, (MV_ULONG) pkt->physAddr);
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+	} else {
+		/* Refill Rx descriptor */
+		STAT_DBG(pp->stats.rxq_fill[rxq]++);
+		mvNetaRxDescFill(rx_desc, pkt->physAddr, (MV_U32)pkt);
+		mvOsCacheLineFlush(pp->dev->dev.parent, rx_desc);
+	}
+}
+
+/******************************************************
+ * Function prototypes --                             *
+ ******************************************************/
+int         mv_eth_stop(struct net_device *dev);
+int         mv_eth_start(struct net_device *dev);
+int         mv_eth_change_mtu(struct net_device *dev, int mtu);
+int         mv_eth_check_mtu_internals(struct net_device *dev, int mtu);
+int         mv_eth_check_mtu_valid(struct net_device *dev, int mtu);
+
+int         mv_eth_set_mac_addr(struct net_device *dev, void *mac);
+void        mv_eth_set_multicast_list(struct net_device *dev);
+int         mv_eth_open(struct net_device *dev);
+int         mv_eth_port_suspend(int port);
+int         mv_eth_port_resume(int port);
+int         mv_eth_resume_clock(int port);
+int         mv_eth_suspend_clock(int port);
+int         mv_eth_suspend_internals(struct eth_port *pp);
+int         mv_eth_resume_internals(struct eth_port *pp, int mtu);
+int         mv_eth_restore_registers(struct eth_port *pp, int mtu);
+
+void        mv_eth_win_init(int port);
+int         mv_eth_resume_network_interfaces(struct eth_port *pp);
+int         mv_eth_wol_mode_set(int port, int mode);
+
+int	    mv_eth_cpu_txq_mask_set(int port, int cpu, int txqMask);
+
+irqreturn_t mv_eth_isr(int irq, void *dev_id);
+int         mv_eth_start_internals(struct eth_port *pp, int mtu);
+int         mv_eth_stop_internals(struct eth_port *pp);
+int         mv_eth_change_mtu_internals(struct net_device *netdev, int mtu);
+
+int         mv_eth_rx_reset(int port);
+int         mv_eth_txp_reset(int port, int txp);
+int         mv_eth_txq_clean(int port, int txp, int txq);
+
+MV_STATUS   mv_eth_rx_pkts_coal_set(int port, int rxq, MV_U32 value);
+MV_STATUS   mv_eth_rx_time_coal_set(int port, int rxq, MV_U32 value);
+MV_STATUS   mv_eth_tx_done_pkts_coal_set(int port, int txp, int txq, MV_U32 value);
+
+struct eth_port     *mv_eth_port_by_id(unsigned int port);
+struct net_device   *mv_eth_netdev_by_id(unsigned int idx);
+bool                 mv_eth_netdev_find(unsigned int if_index);
+
+void        mv_eth_mac_show(int port);
+void        mv_eth_tos_map_show(int port);
+int         mv_eth_rxq_tos_map_set(int port, int rxq, unsigned char tos);
+int         mv_eth_txq_tos_map_set(int port, int txq, int cpu, unsigned int tos);
+int         mv_eth_napi_set_cpu_affinity(int port, int group, int affinity);
+int         mv_eth_napi_set_rxq_affinity(int port, int group, int rxq);
+void        mv_eth_napi_group_show(int port);
+
+int         mv_eth_rxq_vlan_prio_set(int port, int rxq, unsigned char prio);
+void        mv_eth_vlan_prio_show(int port);
+
+void        mv_eth_netdev_print(struct net_device *netdev);
+void        mv_eth_status_print(void);
+void        mv_eth_port_status_print(unsigned int port);
+void        mv_eth_port_stats_print(unsigned int port);
+void        mv_eth_pool_status_print(int pool);
+
+void        mv_eth_set_noqueue(struct net_device *dev, int enable);
+
+void        mv_eth_ctrl_hwf(int en);
+int         mv_eth_ctrl_recycle(int en);
+void        mv_eth_ctrl_txdone(int num);
+int         mv_eth_ctrl_tx_mh(int port, u16 mh);
+int         mv_eth_ctrl_tx_cmd(int port, u32 cmd);
+int         mv_eth_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu);
+int         mv_eth_ctrl_txq_mode_get(int port, int txp, int txq, int *rx_port);
+int         mv_eth_ctrl_txq_cpu_own(int port, int txp, int txq, int add, int cpu);
+int         mv_eth_ctrl_txq_hwf_own(int port, int txp, int txq, int rxp);
+int         mv_eth_ctrl_flag(int port, u32 flag, u32 val);
+int         mv_eth_ctrl_txq_size_set(int port, int txp, int txq, int value);
+int         mv_eth_ctrl_rxq_size_set(int port, int rxq, int value);
+int         mv_eth_ctrl_port_buf_num_set(int port, int long_num, int short_num);
+int         mv_eth_ctrl_pool_size_set(int pool, int pkt_size);
+int         mv_eth_ctrl_set_poll_rx_weight(int port, u32 weight);
+int         mv_eth_shared_set(int port, int txp, int txq, int value);
+void        mv_eth_tx_desc_print(struct neta_tx_desc *desc);
+void        mv_eth_pkt_print(struct eth_port *pp, struct eth_pbuf *pkt);
+void        mv_eth_rx_desc_print(struct neta_rx_desc *desc);
+void        mv_eth_skb_print(struct sk_buff *skb);
+void        mv_eth_link_status_print(int port);
+
+#ifdef CONFIG_MV_PON
+typedef MV_BOOL(*PONLINKSTATUSPOLLFUNC)(void);		  /* prototype for PON link status polling function */
+typedef void   (*PONLINKSTATUSNOTIFYFUNC)(MV_BOOL state); /* prototype for PON link status notification function */
+
+MV_BOOL mv_pon_link_status(void);
+void mv_pon_link_state_register(PONLINKSTATUSPOLLFUNC poll_func, PONLINKSTATUSNOTIFYFUNC *notify_func);
+void mv_pon_ctrl_omci_type(MV_U16 type);
+void mv_pon_ctrl_omci_rx_gh(int en);
+void mv_pon_omci_print(void);
+
+#endif /* CONFIG_MV_PON */
+
+#ifdef CONFIG_MV_ETH_TX_SPECIAL
+void        mv_eth_tx_special_check_func(int port, int (*func)(int port, struct net_device *dev,
+				  struct sk_buff *skb, struct mv_eth_tx_spec *tx_spec_out));
+#endif /* CONFIG_MV_ETH_TX_SPECIAL */
+
+#ifdef CONFIG_MV_ETH_RX_SPECIAL
+void        mv_eth_rx_special_proc_func(int port, void (*func)(int port, int rxq, struct net_device *dev,
+							struct sk_buff *skb, struct neta_rx_desc *rx_desc));
+#endif /* CONFIG_MV_ETH_RX_SPECIAL */
+
+int  mv_eth_poll(struct napi_struct *napi, int budget);
+void mv_eth_link_event(struct eth_port *pp, int print);
+
+int mv_eth_rx_policy(u32 cause);
+int mv_eth_refill(struct eth_port *pp, int rxq,
+				struct eth_pbuf *pkt, struct bm_pool *pool, struct neta_rx_desc *rx_desc);
+u32 mv_eth_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl);
+u32 mv_eth_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo);
+u32 mv_eth_tx_done_pon(struct eth_port *pp, int *tx_todo);
+
+#ifdef CONFIG_MV_ETH_RX_DESC_PREFETCH
+struct neta_rx_desc *mv_eth_rx_prefetch(struct eth_port *pp,
+						MV_NETA_RXQ_CTRL *rx_ctrl, int rx_done, int rx_todo);
+#endif /* CONFIG_MV_ETH_RX_DESC_PREFETCH */
+
+#ifdef CONFIG_MV_ETH_BM
+void	*mv_eth_bm_pool_create(int pool, int capacity, MV_ULONG *physAddr);
+#endif /* CONFIG_MV_ETH_BM */
+
+#ifdef CONFIG_MV_ETH_HWF
+MV_STATUS mv_eth_hwf_bm_create(int port, int mtuPktSize);
+void      mv_hwf_bm_dump(void);
+#endif /* CONFIG_MV_ETH_HWF && !CONFIG_MV_ETH_BM_CPU */
+
+#ifdef CONFIG_MV_ETH_L2FW
+int         mv_l2fw_init(void);
+#endif
+
+#ifdef CONFIG_MV_ETH_NFP
+int         mv_eth_nfp_ctrl(struct net_device *dev, int en);
+int         mv_eth_nfp_ext_ctrl(struct net_device *dev, int en);
+int         mv_eth_nfp_ext_add(struct net_device *dev, int port);
+int         mv_eth_nfp_ext_del(struct net_device *dev);
+MV_STATUS   mv_eth_nfp(struct eth_port *pp, int rxq, struct neta_rx_desc *rx_desc,
+					struct eth_pbuf *pkt, struct bm_pool *pool);
+#endif /* CONFIG_MV_ETH_NFP */
+
+#endif /* __mv_netdev_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_pon_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_pon_sysfs.c
new file mode 100644
index 000000000000..089e59aa1762
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/net_dev/mv_pon_sysfs.c
@@ -0,0 +1,168 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gbe/mvNeta.h"
+#include "pnc/mvPnc.h"
+
+#include "mv_netdev.h"
+
+static ssize_t mv_pon_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat help                   - show this help\n");
+	off += sprintf(buf+off, "echo p txp   > txp_regs    - show TX registers for <p/txp>\n");
+
+#ifdef MV_PON_MIB_SUPPORT
+	off += sprintf(buf+off, "echo mib gp  > mib_gpid    - MIB set <mib> for incoming packets with GemPID <gp>\n");
+	off += sprintf(buf+off, "echo mib     > mib_def     - MIB set <mib> for incoming packets not matched any GemPID\n");
+#endif /* MV_PON_MIB_SUPPORT */
+
+	return off;
+}
+
+
+static ssize_t mv_pon_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	int        off = 0;
+	const char *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		off = mv_pon_help(buf);
+	else
+		off = mv_pon_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pon_1_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	unsigned int    v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	v = 0;
+
+	sscanf(buf, "%x", &v);
+
+	local_irq_save(flags);
+
+#ifdef MV_PON_MIB_SUPPORT
+	if (!strcmp(name, "mib_def")) {
+		mvNetaPonRxMibDefault(v);
+	} else
+#endif /* MV_PON_MIB_SUPPORT */
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	return len;
+}
+
+static ssize_t mv_pon_2_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	v = 0;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txp_regs"))
+		mvNetaPonTxpRegs(p, v);
+#ifdef MV_PON_MIB_SUPPORT
+	else if (!strcmp(name, "mib_gpid"))
+		mvNetaPonRxMibGemPid(p, v);
+#endif /* MV_PON_MIB_SUPPORT */
+	else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	return len;
+}
+
+static DEVICE_ATTR(txp_regs,   S_IWUSR, mv_pon_show, mv_pon_2_store);
+static DEVICE_ATTR(mib_gpid,   S_IWUSR, mv_pon_show, mv_pon_2_store);
+static DEVICE_ATTR(mib_def,    S_IWUSR, mv_pon_show, mv_pon_1_store);
+static DEVICE_ATTR(help,       S_IRUSR, mv_pon_show, NULL);
+
+static struct attribute *mv_pon_attrs[] = {
+	&dev_attr_txp_regs.attr,
+	&dev_attr_mib_def.attr,
+	&dev_attr_mib_gpid.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group mv_pon_group = {
+	.name = "pon",
+	.attrs = mv_pon_attrs,
+};
+
+int mv_neta_pon_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &pon_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", pon_group.name, err);
+
+	return err;
+}
+
+int mv_neta_pon_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &plcr_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/pmt/pmt_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/pmt/pmt_sysfs.c
new file mode 100644
index 000000000000..90bc6a7eba50
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/pmt/pmt_sysfs.c
@@ -0,0 +1,289 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "pmt/mvPmt.h"
+
+static MV_NETA_PMT  mv_neta_pmt_e;
+
+static ssize_t pmt_help(char *buf)
+{
+	int off = 0;
+
+	off += mvOsSPrintf(buf+off, "p, i, a, b, c - are dec numbers\n");
+	off += mvOsSPrintf(buf+off, "v, m          - are hex numbers\n");
+	off += mvOsSPrintf(buf+off, "\n");
+
+	off += mvOsSPrintf(buf+off, "cat          help          - Show this help\n");
+	off += mvOsSPrintf(buf+off, "cat          sw_dump       - Show sw PMT etry\n");
+	off += mvOsSPrintf(buf+off, "cat          sw_clear      - Clear sw PMT etry\n");
+	off += mvOsSPrintf(buf+off, "echo v     > sw_word       - Set 4 bytes value <v> to sw entry\n");
+	off += mvOsSPrintf(buf+off, "echo p     > hw_regs       - Show PMT registers\n");
+	off += mvOsSPrintf(buf+off, "echo p     > hw_dump       - Dump valid PMT entries of the port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo p     > hw_dump_all   - Dump all PMT entries of the port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo p i   > hw_read       - Read PMT entry <i> on port <p> into sw entry\n");
+	off += mvOsSPrintf(buf+off, "echo p i   > hw_write      - Write sw entry into PMT entry <i> on port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo p i   > hw_inv        - Disable PMT entry <i> on port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo p     > hw_inv_all    - Disable all PMT entries on port <p>\n");
+	off += mvOsSPrintf(buf+off, "echo 0|1   > s_last        - Set/Clear last bit\n");
+	off += mvOsSPrintf(buf+off, "echo a b c > s_flags       - Set/Clear flags: <a>-last, <b>-ipv4csum, <c>-l4csum\n");
+	off += mvOsSPrintf(buf+off, "echo v     > s_rep_2b      - Replace 2 bytes with value <v>\n");
+	off += mvOsSPrintf(buf+off, "echo v     > s_add_2b      - Add 2 bytes with value <v> to sw entry\n");
+	off += mvOsSPrintf(buf+off, "echo a b c > s_del_2b      - Delete <a> bytes, Skip <b> bytes before, Skip <c> bytes after\n");
+	off += mvOsSPrintf(buf+off, "echo v m   > s_rep_lsb     - Replace LSB with value <v> and mask <m>\n");
+	off += mvOsSPrintf(buf+off, "echo v m   > s_rep_msb     - Replace MSB with value <v> and mask <m>\n");
+	off += mvOsSPrintf(buf+off, "echo v     > s_ip_csum     - Replace IP checksum. <v> used as additional info\n");
+	off += mvOsSPrintf(buf+off, "echo v     > s_l4_csum     - Replace TCP/UDP checksum. <v> used as additional info\n");
+	off += mvOsSPrintf(buf+off, "echo a b   > s_dec_lsb     - Decrement LSB, Skip <a> bytes before, Skip <b> bytes after\n");
+	off += mvOsSPrintf(buf+off, "echo a b   > s_dec_msb     - Decrement MSB, Skip <a> bytes before, Skip <b> bytes after\n");
+	off += mvOsSPrintf(buf+off, "echo a     > s_skip        - Skip <a> bytes. Must be even\n");
+	off += mvOsSPrintf(buf+off, "echo a b c > s_jump        - Jump to entry <a>. <b>=1-skip,2-subroutine. <c>=1-green,2-yellow\n");
+
+	return off;
+}
+
+static ssize_t pmt_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	const char  *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return pmt_help(buf);
+
+	if (!strcmp(name, "sw_dump")) {
+		mvNetaPmtEntryPrint(&mv_neta_pmt_e);
+	} else if (!strcmp(name, "sw_clear")) {
+		MV_NETA_PMT_CLEAR(&mv_neta_pmt_e);
+	}
+
+	return 0;
+}
+
+static ssize_t pmt_hw_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, p = 0, i = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d", &p, &i);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_dump")) {
+		mvNetaPmtDump(p, 1);
+	} else if (!strcmp(name, "hw_dump_all")) {
+		mvNetaPmtDump(p, 0);
+	} else if (!strcmp(name, "hw_regs")) {
+		mvNetaPmtRegs(p, i);
+	} else if (!strcmp(name, "hw_write")) {
+		err = mvNetaPmtWrite(p, i, &mv_neta_pmt_e);
+	} else if (!strcmp(name, "hw_read")) {
+		err = mvNetaPmtRead(p, i, &mv_neta_pmt_e);
+	} else if (!strcmp(name, "hw_inv")) {
+		MV_NETA_PMT_INVALID_SET(&mv_neta_pmt_e);
+		err = mvNetaPmtWrite(p, i, &mv_neta_pmt_e);
+	} else if (!strcmp(name, "hw_inv_all")) {
+		err = mvNetaPmtClear(p);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t pmt_sw_dec_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	unsigned int    err = 0, a = 0, b = 0, c = 0;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %d", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "s_last")) {
+		mvNetaPmtLastFlag(&mv_neta_pmt_e, a);
+	} else if (!strcmp(name, "s_flags")) {
+		mvNetaPmtFlags(&mv_neta_pmt_e, a, b, c);
+	} else if (!strcmp(name, "s_del")) {
+		mvNetaPmtDelShorts(&mv_neta_pmt_e, a/2, b/2, c/2);
+	} else if (!strcmp(name, "s_skip")) {
+		mvNetaPmtSkip(&mv_neta_pmt_e, a/2);
+	} else if (!strcmp(name, "s_dec_lsb")) {
+		mvNetaPmtDecLSB(&mv_neta_pmt_e, a/2, b/2);
+	} else if (!strcmp(name, "s_dec_msb")) {
+		mvNetaPmtDecMSB(&mv_neta_pmt_e, a/2, b/2);
+	} else if (!strcmp(name, "s_jump")) {
+		mvNetaPmtJump(&mv_neta_pmt_e, a, b, c);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t pmt_sw_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	unsigned int    err = 0, v = 0, a = 0;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %d", &v, &a);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "sw_word")) {
+		mv_neta_pmt_e.word = v;
+	} else if (!strcmp(name, "s_rep_2b")) {
+		mvNetaPmtReplace2Bytes(&mv_neta_pmt_e, v);
+	} else if (!strcmp(name, "s_add_2b")) {
+		mvNetaPmtAdd2Bytes(&mv_neta_pmt_e, v);
+	} else if (!strcmp(name, "s_rep_lsb")) {
+		mvNetaPmtReplaceLSB(&mv_neta_pmt_e, v, a);
+	} else if (!strcmp(name, "s_rep_msb")) {
+		mvNetaPmtReplaceMSB(&mv_neta_pmt_e, v, a);
+	} else if (!strcmp(name, "s_ip_csum")) {
+		mvNetaPmtReplaceIPv4csum(&mv_neta_pmt_e, v);
+	} else if (!strcmp(name, "s_l4_csum")) {
+		mvNetaPmtReplaceL4csum(&mv_neta_pmt_e, v);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, pmt_show, NULL);
+static DEVICE_ATTR(sw_dump,     S_IRUSR, pmt_show, NULL);
+static DEVICE_ATTR(sw_clear,    S_IRUSR, pmt_show, NULL);
+static DEVICE_ATTR(hw_regs,     S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_write,    S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_read,     S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_dump,     S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_dump_all, S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_inv,      S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(hw_inv_all,  S_IWUSR, pmt_show, pmt_hw_store);
+static DEVICE_ATTR(sw_word,     S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_rep_2b,    S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_rep_lsb,   S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_rep_msb,   S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_ip_csum,   S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_l4_csum,   S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_dec_lsb,   S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_dec_msb,   S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_add_2b,    S_IWUSR, pmt_show, pmt_sw_hex_store);
+static DEVICE_ATTR(s_del,       S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_last,      S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_flags,     S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_skip,      S_IWUSR, pmt_show, pmt_sw_dec_store);
+static DEVICE_ATTR(s_jump,      S_IWUSR, pmt_show, pmt_sw_dec_store);
+
+
+static struct attribute *pmt_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_sw_dump.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_hw_write.attr,
+	&dev_attr_hw_read.attr,
+	&dev_attr_hw_dump.attr,
+	&dev_attr_hw_dump_all.attr,
+	&dev_attr_hw_inv.attr,
+	&dev_attr_hw_inv_all.attr,
+	&dev_attr_sw_word.attr,
+	&dev_attr_s_rep_2b.attr,
+	&dev_attr_s_add_2b.attr,
+	&dev_attr_s_del.attr,
+	&dev_attr_s_rep_lsb.attr,
+	&dev_attr_s_rep_msb.attr,
+	&dev_attr_s_ip_csum.attr,
+	&dev_attr_s_l4_csum.attr,
+	&dev_attr_s_dec_lsb.attr,
+	&dev_attr_s_dec_msb.attr,
+	&dev_attr_s_last.attr,
+	&dev_attr_s_flags.attr,
+	&dev_attr_s_skip.attr,
+	&dev_attr_s_jump.attr,
+	NULL
+};
+
+static struct attribute_group pmt_group = {
+	.name = "pmt",
+	.attrs = pmt_attrs,
+};
+
+int mv_neta_pme_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &pme_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", pmt_group.name, err);
+
+	return err;
+}
+
+int mv_neta_pme_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &pme_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.c
new file mode 100644
index 000000000000..9ca5e2dc129b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.c
@@ -0,0 +1,402 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#ifndef CONFIG_ARCH_MVEBU
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNeta.h"
+
+#include "pnc/mvPnc.h"
+#include "pnc/mvTcam.h"
+
+#ifdef CONFIG_MV_ETH_PNC_L3_FLOW
+#include "pnc_sysfs.h"
+#endif /* CONFIG_MV_ETH_PNC_L3_FLOW */
+
+static struct tcam_entry te;
+
+static ssize_t tcam_help(char *buf)
+{
+	int off = 0;
+
+	off += mvOsSPrintf(buf+off, "cat <file>\n");
+	off += mvOsSPrintf(buf+off, " sw_dump         - dump sw entry\n");
+	off += mvOsSPrintf(buf+off, " hw_dump         - dump valid entries\n");
+	off += mvOsSPrintf(buf+off, " hw_regs         - dump registers\n");
+	off += mvOsSPrintf(buf+off, " hw_hits         - decode hit sequences\n");
+
+#ifdef MV_ETH_PNC_LB
+	off += mvOsSPrintf(buf+off, " lb_dump         - dump load balancing hash entries\n");
+#endif /* MV_ETH_PNC_LB */
+#ifdef MV_ETH_PNC_AGING
+	off += mvOsSPrintf(buf+off, " age_dump        - dump non-zero aging counters\n");
+	off += mvOsSPrintf(buf+off, " age_dump_all    - dump all aging counters\n");
+	off += mvOsSPrintf(buf+off, " age_scan        - dump aging Scanner log\n");
+	off += mvOsSPrintf(buf+off, " age_reset       - reset all aging counters\n");
+#endif /* MV_ETH_PNC_AGING */
+
+	off += mvOsSPrintf(buf+off, "echo a > <file>\n");
+	off += mvOsSPrintf(buf+off, " hw_write        - write sw entry into tcam entry <a>\n");
+	off += mvOsSPrintf(buf+off, " hw_read         - read tcam entry <a> into sw entry\n");
+	off += mvOsSPrintf(buf+off, " hw_inv          - disable tcam entry <a>\n");
+	off += mvOsSPrintf(buf+off, " hw_inv_all      - disable all tcam entries\n");
+	off += mvOsSPrintf(buf+off, " hw_hits         - start recording for port <a>\n");
+
+#ifdef MV_ETH_PNC_LB
+	off += mvOsSPrintf(buf+off, " lb_ip4          - set LB mode <a> for ipv4 traffic: 0-disable, 1-2tuple\n");
+	off += mvOsSPrintf(buf+off, " lb_ip6          - set LB mode <a> for ipv6 traffic: 0-disable, 1-2tuple\n");
+	off += mvOsSPrintf(buf+off, " lb_l4           - set LB mode <a> for TCP/UDP traffic: : 0-disable, 1-2tuple, 2-4tuple\n");
+#endif /* MV_ETH_PNC_LB */
+
+#ifdef MV_ETH_PNC_AGING
+	off += mvOsSPrintf(buf+off, " age_clear       - clear aging counter for tcam entry <a>\n");
+	off += mvOsSPrintf(buf+off, " age_cntr        - show aging counter for tcam entry <a>\n");
+#endif /* MV_ETH_PNC_AGING */
+
+	off += mvOsSPrintf(buf+off, "echo a b > <file>\n");
+	off += mvOsSPrintf(buf+off, " t_offset_byte   - on offset <a> match value <b>\n");
+	off += mvOsSPrintf(buf+off, " t_offset_mask   - on offset <a> use mask <b>\n");
+	off += mvOsSPrintf(buf+off, " t_port          - match port value <a> with mask <b>\n");
+	off += mvOsSPrintf(buf+off, " t_ainfo         - match ainfo value <a> with mask <b>\n");
+	off += mvOsSPrintf(buf+off, " s_shift_update  - fill sram shift index <a> with value <b>\n");
+	off += mvOsSPrintf(buf+off, " s_rinfo         - set rinfo value <a> with mask <b>\n");
+	off += mvOsSPrintf(buf+off, " s_ainfo         - set ainfo value <a> with mask <b>\n");
+	off += mvOsSPrintf(buf+off, " s_flowid        - fill sram flowid nibbles <b> from value <a>\n");
+	off += mvOsSPrintf(buf+off, " s_flowid_part   - fill sram flowid part <b> with value <a>\n");
+
+#ifdef MV_ETH_PNC_NEW
+	off += mvOsSPrintf(buf+off, " s_rinfo_extra   - set 2 bits value <a> to extra result info offset <b>\n");
+#endif /* MV_ETH_PNC_NEW */
+
+#ifdef MV_ETH_PNC_LB
+	off += mvOsSPrintf(buf+off, " lb_rxq          - set rxq <b> for hash value <a>\n");
+#endif /* MV_ETH_PNC_LB */
+
+#ifdef MV_ETH_PNC_AGING
+	off += mvOsSPrintf(buf+off, " age_gr_set      - set group <b> of aging counter for tcam entry <a>\n");
+#endif /* MV_ETH_PNC_AGING */
+
+	return off;
+}
+
+static ssize_t tcam_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char   *name = attr->attr.name;
+	unsigned int v, m;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "t_port")) {
+		tcam_sw_get_port(&te, &v, &m);
+		return mvOsSPrintf(buf, "value:0x%x mask:0x%x\n", v, m);
+	} else if (!strcmp(name, "t_lookup")) {
+		tcam_sw_get_lookup(&te, &v, &m);
+		return mvOsSPrintf(buf, "value:0x%x mask:0x%x\n", v, m);
+	} else if (!strcmp(name, "sw_dump"))
+		return tcam_sw_dump(&te, buf);
+	else if (!strcmp(name, "hw_dump"))
+		return tcam_hw_dump(0);
+	else if (!strcmp(name, "hw_dump_all"))
+		return tcam_hw_dump(1);
+	else if (!strcmp(name, "hw_regs"))
+		mvNetaPncRegs();
+	else if (!strcmp(name, "hw_hits"))
+		return tcam_hw_hits(buf);
+#ifdef MV_ETH_PNC_AGING
+	else if (!strcmp(name, "age_dump"))
+		mvPncAgingDump(0);
+	else if (!strcmp(name, "age_dump_all"))
+		mvPncAgingDump(1);
+	else if (!strcmp(name, "age_scan"))
+		mvPncAgingScannerDump();
+	else if (!strcmp(name, "age_reset"))
+		mvPncAgingReset();
+#endif /* MV_ETH_PNC_AGING */
+#ifdef MV_ETH_PNC_LB
+	else if (!strcmp(name, "lb_dump"))
+		mvPncLbDump();
+#endif /* MV_ETH_PNC_LB */
+	else if (!strcmp(name, "help"))
+		return tcam_help(buf);
+
+	return 0;
+}
+static ssize_t tcam_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x", &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_write"))
+		tcam_hw_write(&te, a);
+	else if (!strcmp(name, "hw_read"))
+		tcam_hw_read(&te, a);
+	else if (!strcmp(name, "hw_debug"))
+		tcam_hw_debug(a);
+	else if (!strcmp(name, "hw_inv"))
+		tcam_hw_inv(a);
+	else if (!strcmp(name, "hw_inv_all"))
+		tcam_hw_inv_all();
+	else if (!strcmp(name, "hw_hits"))
+		tcam_hw_record(a);
+#ifdef MV_ETH_PNC_LB
+	else if (!strcmp(name, "lb_ip4"))
+		mvPncLbModeIp4(a);
+	else if (!strcmp(name, "lb_ip6"))
+		mvPncLbModeIp6(a);
+	else if (!strcmp(name, "lb_l4"))
+		mvPncLbModeL4(a);
+#endif /* MV_ETH_PNC_LB */
+#ifdef MV_ETH_PNC_AGING
+	else if (!strcmp(name, "age_clear"))
+		mvPncAgingCntrClear(a);
+	else if (!strcmp(name, "age_cntr")) {
+		b = mvPncAgingCntrRead(a);
+		printk(KERN_INFO "tid=%d: age_cntr = 0x%08x\n", a, b);
+	}
+#endif /* MV_ETH_PNC_AGING */
+	else if (!strcmp(name, "sw_clear"))
+		tcam_sw_clear(&te);
+	else if (!strcmp(name, "sw_text")) {
+		/* Remove last byte (new line) from the buffer */
+		int  len = strlen(buf);
+		char *temp = mvOsMalloc(len + 1);
+
+		strncpy(temp, buf, len-1);
+		temp[len-1] = 0;
+		tcam_sw_text(&te, temp);
+		mvOsFree(temp);
+	} else if (!strcmp(name, "t_port"))
+		tcam_sw_set_port(&te, a, b);
+	else if (!strcmp(name, "t_lookup"))
+		tcam_sw_set_lookup(&te, a);
+	else if (!strcmp(name, "t_ainfo_0"))
+		tcam_sw_set_ainfo(&te, 0<<a, 1<<a);
+	else if (!strcmp(name, "t_ainfo_1"))
+		tcam_sw_set_ainfo(&te, 1<<a, 1<<a);
+	else if (!strcmp(name, "t_ainfo"))
+		tcam_sw_set_ainfo(&te, a, b);
+	else if (!strcmp(name, "t_offset_byte"))
+		tcam_sw_set_byte(&te, a, b);
+	else if (!strcmp(name, "t_offset_mask"))
+		tcam_sw_set_mask(&te, a, b);
+	else if (!strcmp(name, "s_lookup"))
+		sram_sw_set_next_lookup(&te, a);
+	else if (!strcmp(name, "s_ainfo"))
+		sram_sw_set_ainfo(&te, a, b);
+	else if (!strcmp(name, "s_rinfo"))
+		sram_sw_set_rinfo(&te, a, b);
+	else if (!strcmp(name, "s_lookup_done"))
+		sram_sw_set_lookup_done(&te, a);
+	else if (!strcmp(name, "s_next_lookup_shift"))
+		sram_sw_set_next_lookup_shift(&te, a);
+	else if (!strcmp(name, "s_rxq"))
+		sram_sw_set_rxq(&te, a, b);
+	else if (!strcmp(name, "s_shift_update"))
+		sram_sw_set_shift_update(&te, a, b);
+#ifdef MV_ETH_PNC_NEW
+	else if (!strcmp(name, "s_rinfo_extra"))
+		sram_sw_set_rinfo_extra(&te, a << (b & ~1));
+#endif /* MV_ETH_PNC_NEW */
+	else if (!strcmp(name, "s_flowid"))
+		sram_sw_set_flowid(&te, a, b);
+	else if (!strcmp(name, "s_flowid_part"))
+		sram_sw_set_flowid_partial(&te, a, b);
+#ifdef MV_ETH_PNC_AGING
+	else if (!strcmp(name, "age_gr_set"))
+		mvPncAgingCntrGroupSet(a, b);
+#endif /* MV_ETH_PNC_AGING */
+#ifdef MV_ETH_PNC_LB
+	else if (!strcmp(name, "lb_rxq"))
+		err = mvPncLbRxqSet(a, b);
+#endif /* MV_ETH_PNC_LB */
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+#ifdef MV_ETH_PNC_AGING
+static DEVICE_ATTR(age_dump,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_dump_all, S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_scan,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_reset,    S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_clear,    S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_cntr,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(age_gr_set,   S_IWUSR, tcam_show, tcam_store);
+#endif /* MV_ETH_PNC_AGING */
+
+#ifdef MV_ETH_PNC_NEW
+static DEVICE_ATTR(s_rinfo_extra, S_IWUSR, tcam_show, tcam_store);
+#endif
+
+static DEVICE_ATTR(hw_write,    S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_read,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_debug,    S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_inv,      S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_inv_all,  S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_dump,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_dump_all, S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_regs,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(hw_hits,     S_IRUSR | S_IWUSR, tcam_show, tcam_store);
+
+#ifdef MV_ETH_PNC_LB
+static DEVICE_ATTR(lb_dump,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(lb_rxq,      S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(lb_ip4,      S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(lb_ip6,      S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(lb_l4,       S_IWUSR, tcam_show, tcam_store);
+#endif /* MV_ETH_PNC_LB */
+
+static DEVICE_ATTR(sw_dump,     S_IRUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(sw_clear,    S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(sw_text,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_port,      S_IRUSR | S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_lookup,    S_IRUSR | S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_ainfo_0,   S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_ainfo_1,   S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_ainfo,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_offset_byte, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(t_offset_mask, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_lookup,    S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_ainfo,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_lookup_done, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_next_lookup_shift, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_rxq,       S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_shift_update, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_rinfo,     S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_flowid, 	S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(s_flowid_part, S_IWUSR, tcam_show, tcam_store);
+static DEVICE_ATTR(help,        S_IRUSR, tcam_show, tcam_store);
+
+static struct attribute *pnc_attrs[] = {
+#ifdef MV_ETH_PNC_AGING
+    &dev_attr_age_dump.attr,
+    &dev_attr_age_dump_all.attr,
+    &dev_attr_age_scan.attr,
+    &dev_attr_age_reset.attr,
+    &dev_attr_age_clear.attr,
+    &dev_attr_age_cntr.attr,
+    &dev_attr_age_gr_set.attr,
+#endif /* MV_ETH_PNC_AGING */
+
+#ifdef MV_ETH_PNC_NEW
+    &dev_attr_s_rinfo_extra.attr,
+#endif /* MV_ETH_PNC_NEW */
+
+    &dev_attr_hw_write.attr,
+    &dev_attr_hw_read.attr,
+    &dev_attr_hw_debug.attr,
+    &dev_attr_hw_inv.attr,
+    &dev_attr_hw_inv_all.attr,
+    &dev_attr_hw_dump.attr,
+    &dev_attr_hw_dump_all.attr,
+    &dev_attr_hw_regs.attr,
+    &dev_attr_hw_hits.attr,
+
+#ifdef MV_ETH_PNC_LB
+    &dev_attr_lb_dump.attr,
+	&dev_attr_lb_rxq.attr,
+	&dev_attr_lb_ip4.attr,
+	&dev_attr_lb_ip6.attr,
+	&dev_attr_lb_l4.attr,
+#endif /* MV_ETH_PNC_LB */
+
+    &dev_attr_sw_dump.attr,
+    &dev_attr_sw_clear.attr,
+    &dev_attr_sw_text.attr,
+    &dev_attr_t_port.attr,
+    &dev_attr_t_lookup.attr,
+    &dev_attr_t_ainfo_0.attr,
+    &dev_attr_t_ainfo.attr,
+    &dev_attr_t_ainfo_1.attr,
+    &dev_attr_t_offset_byte.attr,
+    &dev_attr_t_offset_mask.attr,
+    &dev_attr_s_lookup.attr,
+    &dev_attr_s_ainfo.attr,
+    &dev_attr_s_lookup_done.attr,
+    &dev_attr_s_next_lookup_shift.attr,
+    &dev_attr_s_rxq.attr,
+    &dev_attr_s_shift_update.attr,
+    &dev_attr_s_rinfo.attr,
+    &dev_attr_s_flowid.attr,
+    &dev_attr_s_flowid_part.attr,
+    &dev_attr_help.attr,
+    NULL
+};
+
+static struct attribute_group pnc_group = {
+	.name = "pnc",
+	.attrs = pnc_attrs,
+};
+
+int mv_neta_pnc_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &pnc_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", pnc_group.name, err);
+
+	return err;
+}
+
+int mv_neta_pnc_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &pnc_group);
+
+	return 0;
+}
+
+
diff --git a/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.h b/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.h
new file mode 100644
index 000000000000..dc2835d46626
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/pnc/pnc_sysfs.h
@@ -0,0 +1,33 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __PNC_SYSFS_H__
+#define __PNC_SYSFS_H__
+
+int rxq_map_sysfs_init(struct kobject *kobj);
+
+#endif /* __PNC_SYSFS_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/neta/pnc/rxq_map_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/pnc/rxq_map_sysfs.c
new file mode 100644
index 000000000000..aee27090f57c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/pnc/rxq_map_sysfs.c
@@ -0,0 +1,196 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#ifndef CONFIG_OF
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNeta.h"
+#include "pnc/mvPnc.h"
+
+#include "net_dev/mv_netdev.h"
+
+
+static ssize_t rxq_map_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat                                   dump_all  - dump all rxq mapping rules\n");
+	off += sprintf(buf+off, "echo port sip dip rxq               > ip4_rxq   - add new mapping rule from <sip> <dip> to <rxq> via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip                   > ip4_drop  - add new mapping rule from <sip> <dip> to drop via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip                   > ip4_del   - delete existing rule with <sip> <dip>\n");
+
+	off += sprintf(buf+off, "echo port sip dip sport dport rxq   > udp4_rxq  - add new mapping rule from 5-tuple to <rxq> via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip sport dport       > udp4_drop - add new mapping rule from 5-tuple to drop via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip sport dport       > udp4_del  - delete existing rule\n");
+
+	off += sprintf(buf+off, "echo port sip dip sport dport rxq   > tcp4_rxq  - add new mapping rule from 5-tuple to <rxq> via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip sport dport       > tcp4_drop - add new mapping rule from 5-tuple to drop via <port>\n");
+	off += sprintf(buf+off, "echo port sip dip sport dport       > tcp4_del  - delete existing rule\n");
+
+	off += sprintf(buf+off, "\nparameters: sip/dip = xxx.xxx.xxx.xxx\n");
+
+	return off;
+}
+
+static ssize_t rxq_map_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char   *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "dump_all"))
+		pnc_rxq_map_dump();
+	else if (!strcmp(name, "help"))
+		return rxq_map_help(buf);
+
+	return 0;
+}
+
+
+static ssize_t rxq_map_2t_store(struct device *dev,
+			 struct device_attribute *attr, const char *buf, size_t len)
+{
+	unsigned int res = 0, err = 0, sip, dip, port;
+	int rxq;
+	const char *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	res = sscanf(buf, "%d %hhu.%hhu.%hhu.%hhu %hhu.%hhu.%hhu.%hhu %d", &port, (unsigned char *)(&sip), (unsigned char *)(&sip) + 1,
+		(unsigned char *)(&sip) + 2, (unsigned char *)(&sip) + 3, (unsigned char *)(&dip), (unsigned char *)(&dip) + 1,
+		(unsigned char *)(&dip) + 2, (unsigned char *)(&dip) + 3, &rxq);
+	if (res < 9)
+		return -EINVAL;
+
+	if (!strcmp(name, "ip4_drop"))
+		rxq = -1;
+	else if (!strcmp(name, "ip4_del"))
+		rxq = -2;
+
+	err = pnc_ip4_2tuple_rxq(port, sip, dip, rxq);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t rxq_map_5t_store(struct device *dev,
+			 struct device_attribute *attr, const char *buf, size_t len)
+{
+	unsigned int res = 0, err = 0, sip, dip, ports, proto, port;
+	int rxq;
+	const char *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	res = sscanf(buf, "%d %hhu.%hhu.%hhu.%hhu %hhu.%hhu.%hhu.%hhu %hu %hu %d", &port, (unsigned char *)(&sip),
+		(unsigned char *)(&sip) + 1, (unsigned char *)(&sip) + 2, (unsigned char *)(&sip) + 3, (unsigned char *)(&dip),
+		(unsigned char *)(&dip) + 1, (unsigned char *)(&dip) + 2, (unsigned char *)(&dip) + 3,
+		(u16 *)&ports + 1, (u16 *)&ports, &rxq);
+	if (res < 11)
+		return -EINVAL;
+
+	if (!strcmp(name, "udp4_drop") || !strcmp(name, "tcp4_drop"))
+		rxq = -1;
+	else if (!strcmp(name, "udp4_del") || !strcmp(name, "tcp4_del"))
+		rxq = -2;
+
+	if (name[0] == 't')
+		proto = 6; /* tcp */
+	else
+		proto = 17; /* udp */
+
+	err = pnc_ip4_5tuple_rxq(port, sip, dip, MV_BYTE_SWAP_32BIT(ports), proto, rxq);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,		S_IRUSR, rxq_map_show, rxq_map_2t_store);
+static DEVICE_ATTR(dump_all,		S_IRUSR, rxq_map_show, rxq_map_2t_store);
+//static DEVICE_ATTR(dump,        S_IWUSR, rxq_map_show, rxq_map_2t_store);
+static DEVICE_ATTR(ip4_rxq,	S_IWUSR, rxq_map_show, rxq_map_2t_store);
+static DEVICE_ATTR(ip4_drop,	S_IWUSR, rxq_map_show, rxq_map_2t_store);
+static DEVICE_ATTR(ip4_del,	S_IWUSR, rxq_map_show, rxq_map_2t_store);
+static DEVICE_ATTR(udp4_rxq,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+static DEVICE_ATTR(udp4_drop,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+static DEVICE_ATTR(udp4_del,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+static DEVICE_ATTR(tcp4_rxq,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+static DEVICE_ATTR(tcp4_drop,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+static DEVICE_ATTR(tcp4_del,	S_IWUSR, rxq_map_show, rxq_map_5t_store);
+
+static struct attribute *rxq_map_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_dump_all.attr,
+	&dev_attr_ip4_rxq.attr,
+	&dev_attr_ip4_drop.attr,
+	&dev_attr_ip4_del.attr,
+	&dev_attr_udp4_rxq.attr,
+	&dev_attr_udp4_drop.attr,
+	&dev_attr_udp4_del.attr,
+	&dev_attr_tcp4_rxq.attr,
+	&dev_attr_tcp4_drop.attr,
+	&dev_attr_tcp4_del.attr,
+	NULL
+};
+
+static struct attribute_group rxq_map_group = {
+	.name = "rxq_map",
+	.attrs = rxq_map_attrs,
+};
+
+int rxq_map_sysfs_init(struct kobject *kobj)
+{
+	int err;
+
+	err = sysfs_create_group(kobj, &rxq_map_group);
+	if (err) {
+		printk(KERN_INFO "sysfs group failed %d\n", err);
+		goto out;
+	}
+out:
+	return err;
+}
+
+
+
+MODULE_AUTHOR("Yoni Farhadian");
+MODULE_DESCRIPTION("PNC rule to rxq map for Marvell NetA");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/neta/pnc/wol_sysfs.c b/drivers/net/ethernet/mvebu_net/neta/pnc/wol_sysfs.c
new file mode 100644
index 000000000000..0c7444ddce18
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/neta/pnc/wol_sysfs.c
@@ -0,0 +1,190 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvOs.h"
+#include "mvCommon.h"
+#ifndef CONFIG_ARCH_MVEBU
+#include "ctrlEnv/mvCtrlEnvLib.h"
+#endif
+
+#include "gbe/mvNeta.h"
+#include "pnc/mvPnc.h"
+
+#include "net_dev/mv_netdev.h"
+
+static char	wol_data[MV_PNC_TOTAL_DATA_SIZE];
+static int  wol_data_size = 0;
+static char	wol_mask[MV_PNC_TOTAL_DATA_SIZE];
+static int  wol_mask_size = 0;
+
+extern void mv_eth_wol_wakeup(int port);
+extern int mv_eth_wol_sleep(int port);
+
+static ssize_t wol_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat                  dump_all      - dump all wol rules\n");
+	off += sprintf(buf+off, "echo idx           > dump          - dump rule <idx>\n");
+	off += sprintf(buf+off, "echo port          > sleep         - enter WoL mode on <port>\n");
+	off += sprintf(buf+off, "echo port          > wakeup        - exit WoL mode on <port>\n");
+	off += sprintf(buf+off, "echo str           > data          - set data string\n");
+	off += sprintf(buf+off, "echo str           > mask          - set mask string\n");
+	off += sprintf(buf+off, "echo port          > add           - add new rule with <data> and <mask> on <port>\n");
+	off += sprintf(buf+off, "echo idx           > del           - delete existing WoL rule <idx>\n");
+	off += sprintf(buf+off, "echo port          > del_all       - delete all WoL rules added to <port>\n");
+
+	return off;
+}
+
+static ssize_t wol_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char   *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "dump_all"))
+		mv_pnc_wol_dump();
+	else if (!strcmp(name, "help"))
+		return wol_help(buf);
+
+	return 0;
+}
+
+static ssize_t wol_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  p, size, err = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d", &p);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "sleep"))
+		mv_eth_wol_sleep(p);
+	else if (!strcmp(name, "wakeup"))
+		mv_eth_wol_wakeup(p);
+	else if (!strcmp(name, "dump")) {
+		if (mv_pnc_wol_rule_dump(p))
+			printk(KERN_INFO "WoL rule #%d doesn't exist\n", p);
+	} else if (!strcmp(name, "data")) {
+		memset(wol_data, 0, sizeof(wol_data));
+		size = strlen(buf) / 2;
+		if (size > sizeof(wol_data))
+			size = sizeof(wol_data);
+		mvHexToBin(buf, wol_data, size);
+		wol_data_size = size;
+	} else if (!strcmp(name, "mask")) {
+		memset(wol_mask, 0, sizeof(wol_mask));
+		size = strlen(buf) / 2;
+		if (size > sizeof(wol_mask))
+			size = sizeof(wol_mask);
+		mvHexToBin(buf, wol_mask, size);
+		wol_mask_size = size;
+	} else if (!strcmp(name, "add")) {
+		int idx;
+		idx = mv_pnc_wol_rule_set(p, wol_data, wol_mask, MV_MIN(wol_data_size, wol_mask_size));
+		if (idx < 0)
+			err = 1;
+	} else if (!strcmp(name, "del")) {
+		err = mv_pnc_wol_rule_del(p);
+	} else if (!strcmp(name, "del_all")) {
+		err = mv_pnc_wol_rule_del_all(p);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, wol_show, wol_store);
+static DEVICE_ATTR(dump_all,    S_IRUSR, wol_show, wol_store);
+static DEVICE_ATTR(dump,        S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(data,        S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(mask,        S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(add,         S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(del,         S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(del_all,     S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(sleep,       S_IWUSR, wol_show, wol_store);
+static DEVICE_ATTR(wakeup,      S_IWUSR, wol_show, wol_store);
+
+static struct attribute *wol_attrs[] = {
+    &dev_attr_help.attr,
+    &dev_attr_dump_all.attr,
+    &dev_attr_dump.attr,
+    &dev_attr_data.attr,
+    &dev_attr_mask.attr,
+    &dev_attr_add.attr,
+    &dev_attr_del.attr,
+    &dev_attr_del_all.attr,
+    &dev_attr_sleep.attr,
+    &dev_attr_wakeup.attr,
+    NULL
+};
+
+static struct attribute_group wol_group = {
+	.name = "wol",
+	.attrs = wol_attrs,
+};
+
+int mv_neta_wol_sysfs_init(struct kobject *neta_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(neta_kobj, &wol_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", wol_group.name, err);
+
+	return err;
+}
+
+int mv_neta_wol_sysfs_exit(struct kobject *neta_kobj)
+{
+	sysfs_remove_group(neta_kobj, &wol_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/netmux/.gitignore b/drivers/net/ethernet/mvebu_net/netmux/.gitignore
new file mode 100644
index 000000000000..60319acce7d0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/.gitignore
@@ -0,0 +1,96 @@
+
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-avantalp/avanta_lp_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/netmux/Makefile b/drivers/net/ethernet/mvebu_net/netmux/Makefile
new file mode 100644
index 000000000000..a6a38099f226
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the Marvell Virtual Netwrok interfaces library
+#
+
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+
+ccflags-y       += $(MVEBU_NET_FLAGS)
+ccflags-y       += $(INCLUDE_DIRS)
+
+else
+
+ifneq ($(MACHINE),)
+include $(srctree)/$(MACHINE)/config/mvRules.mk
+endif
+
+ccflags-y       += -I$(PLAT_PATH_I)/$(LSP_MUX_DIR)
+
+endif # CONFIG_ARCH_MVEBU
+
+
+obj-y	+= mv_mux_netdev.o mv_mux_sysfs.o mv_mux_tool.o
diff --git a/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.c b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.c
new file mode 100644
index 000000000000..5ef86dc1a5a8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.c
@@ -0,0 +1,1538 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/platform_device.h>
+#include <linux/module.h>
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvNetConfig.h"
+#else
+#include "ctrlEnv/mvCtrlEnvSpec.h"
+#endif
+
+#include "mvDebug.h"
+#include "mv_switch.h"
+
+#include "mv_mux_netdev.h"
+#include "mv_mux_tool.h"
+
+static struct notifier_block mux_notifier_block __read_mostly;
+static const struct net_device_ops mv_mux_netdev_ops;
+static struct  mv_mux_switch_port  mux_switch_shadow;
+struct  mv_mux_eth_port mux_eth_shadow[MV_ETH_MAX_PORTS];
+
+/* switch functions that called from mux */
+static const struct  mv_mux_switch_ops *switch_ops;
+
+/* count mux devices number */
+static int mux_init_cnt;
+
+/* ppv2/neta functions that called from mux */
+static struct  mv_mux_eth_ops	*eth_ops;
+
+/* mux functions that called from switch */
+static const struct  mv_switch_mux_ops mux_ops;
+
+static inline struct net_device *mv_mux_rx_netdev_get(int port, struct sk_buff *skb);
+static inline int mv_mux_rx_tag_remove(struct net_device *dev, struct sk_buff *skb);
+static inline int mv_mux_tx_skb_tag_add(struct net_device *dev, struct sk_buff *skb);
+static int mv_mux_netdev_delete_all(int port);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
+void netif_stacked_transfer_operstate(const struct net_device *rootdev,
+										struct net_device *dev)
+{
+	if (rootdev->operstate == IF_OPER_DORMANT)
+		netif_dormant_on(dev);
+	else
+		netif_dormant_off(dev);
+
+	if (netif_carrier_ok(rootdev)) {
+		if (!netif_carrier_ok(dev))
+			netif_carrier_on(dev);
+	} else {
+		if (netif_carrier_ok(dev))
+			netif_carrier_off(dev);
+	}
+}
+#endif
+/*-----------------------------------------------------------------------------------------*/
+/*----------------------------     MANAGER      -------------------------------------------*/
+/*-----------------------------------------------------------------------------------------*/
+static int mv_mux_mgr_create(char *name, int gbe_port, int group, MV_MUX_TAG *tag)
+{
+	struct net_device *mux_dev;
+	unsigned char broadcast[MV_MAC_ADDR_SIZE] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+	char *unicast;
+
+	mux_dev = mv_mux_netdev_alloc(name, group, tag);
+	if (mux_dev == NULL) {
+		printk(KERN_ERR "%s: mv_mux_netdev_alloc falied\n", __func__);
+		return MV_FAIL;
+	}
+	mv_mux_netdev_add(gbe_port, mux_dev);
+
+	/* update switch group's cookie for mux ops */
+	if (switch_ops && switch_ops->group_cookie_set)
+		switch_ops->group_cookie_set(group, mux_dev);
+
+	/* update switch's DB with mux's MAC addresses (bcast, ucast) */
+	unicast = mv_mux_get_mac(mux_dev);
+
+	if (switch_ops && switch_ops->mac_addr_set) {
+		switch_ops->mac_addr_set(group, unicast, 1);
+		switch_ops->mac_addr_set(group, broadcast, 1);
+	}
+
+	return 0;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static int mv_mux_mgr_init(MV_SWITCH_PRESET_TYPE preset, int vid, MV_TAG_TYPE tag_mode, int gbe_port)
+{
+	char name[7] = {0, 0, 0, 0, 0, 0, 0};
+	MV_MUX_TAG tag;
+	unsigned int g;
+
+	for (g = 0; g < MV_SWITCH_DB_NUM; g++) {
+		/* get tag data according to switch */
+		if (switch_ops && switch_ops->tag_get)
+			if (switch_ops->tag_get(g, tag_mode, preset, vid, &tag)) {
+				/* group g enabled */
+				sprintf(name, "mux%d", g);
+				/* create new mux device */
+				mv_mux_mgr_create(name, gbe_port, g, &tag);
+			}
+
+	}
+
+	return 0;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static int mv_mux_mgr_probe(int gbe_port)
+{
+	MV_TAG_TYPE tag_mode = mux_switch_shadow.tag_type;
+	MV_SWITCH_PRESET_TYPE preset = mux_switch_shadow.preset;
+	int vid = mux_switch_shadow.vid;
+
+	/* config switch according to preset mode */
+	if (switch_ops && switch_ops->preset_init)
+		switch_ops->preset_init(tag_mode, preset, vid);
+
+	/* update netdev port with tag type */
+	mv_mux_tag_type_set(gbe_port, tag_mode);
+
+	/* config mux interfaces according to preset mode */
+	mv_mux_mgr_init(preset, vid, tag_mode, gbe_port);
+
+	if (tag_mode != MV_TAG_TYPE_NONE) {
+		rtnl_lock();
+		dev_set_promiscuity(mux_eth_shadow[gbe_port].root, 1);
+		rtnl_unlock();
+	}
+
+	if (switch_ops && switch_ops->interrupt_unmask)
+		switch_ops->interrupt_unmask();
+
+	printk(KERN_ERR "port #%d establish switch connection\n\n", gbe_port);
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+/*----------------------------    MUX DRIVER    -------------------------------------------*/
+/*-----------------------------------------------------------------------------------------*/
+
+/*-----------------------------------------------------------------------------------------*/
+int mv_mux_switch_ops_set(const struct mv_mux_switch_ops *switch_ops_ptr)
+{
+	switch_ops = switch_ops_ptr;
+
+	return 0;
+}
+
+static inline bool mv_mux_internal_switch(int port)
+{
+	/* note: in external switch - attach return false */
+	return ((mux_switch_shadow.attach) && (mux_switch_shadow.gbe_port == port));
+}
+
+void mv_mux_shadow_print(int gbe_port)
+{
+	struct mv_mux_eth_port shadow;
+	static const char * const tags[] = {"None", "mh", "dsa", "edas", "vlan"};
+
+	if (mux_eth_shadow[gbe_port].root == NULL)
+		printk(KERN_ERR "gbe port %d is not attached.\n", gbe_port);
+
+	shadow = mux_eth_shadow[gbe_port];
+		printk(KERN_ERR "\n");
+		printk(KERN_ERR "port #%d: tag type=%s, switch_dev=0x%p, root_dev = 0x%p, flags=0x%x\n",
+			gbe_port, tags[shadow.tag_type],
+			shadow.switch_dev, shadow.root, (unsigned int)shadow.flags);
+
+	mv_mux_netdev_print_all(gbe_port);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+void mv_mux_switch_attach(int gbe_port, int preset, int vid, int tag, int switch_port)
+{
+	/* allready attach */
+	if (mux_switch_shadow.attach)
+		return;
+
+	mux_switch_shadow.tag_type = tag;
+	mux_switch_shadow.preset = preset;
+	mux_switch_shadow.vid = vid;
+	mux_switch_shadow.switch_port = switch_port;
+	mux_switch_shadow.gbe_port = gbe_port;
+	mux_switch_shadow.attach = MV_TRUE;
+	/* Update MTU when activating master interface */
+	mux_switch_shadow.mtu = -1;
+
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+	mv_switch_mux_ops_set(&mux_ops);
+#endif
+
+	if (mux_eth_shadow[gbe_port].root)
+		/* gbe port already attached */
+		mv_mux_mgr_probe(gbe_port);
+}
+
+
+void mv_mux_eth_attach(int port, struct net_device *root, struct mv_mux_eth_ops *ops)
+{
+	/* allready attach */
+	if (mux_eth_shadow[port].root)
+		return;
+
+	/* update root device in shadow */
+	mux_eth_shadow[port].root = root;
+
+	/* update ops structure */
+	eth_ops = ops;
+
+	if (mux_switch_shadow.attach && (mux_switch_shadow.gbe_port == port))
+		/* switch already attached */
+		mv_mux_mgr_probe(port);
+}
+EXPORT_SYMBOL(mv_mux_eth_attach);
+
+void mv_mux_eth_detach(int port)
+{
+	/* allready deattach */
+	if (mux_eth_shadow[port].root == NULL)
+		return;
+
+	/* delete all attached mux devices */
+	mv_mux_netdev_delete_all(port);
+
+	/* clear port data */
+	memset(&mux_eth_shadow[port], 0, sizeof(struct mv_mux_eth_port));
+}
+EXPORT_SYMBOL(mv_mux_eth_detach);
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_netdev_find(unsigned int dev_idx)
+{
+	int port;
+	struct net_device *root;
+
+	for (port = 0; port < MV_ETH_MAX_PORTS; port++) {
+		root = mux_eth_shadow[port].root;
+
+		if (root && (root->ifindex == dev_idx))
+			return port;
+	}
+	return -1;
+}
+EXPORT_SYMBOL(mv_mux_netdev_find);
+/*-----------------------------------------------------------------------------------------*/
+int mv_mux_update_link(void *cookie, int link_up)
+{
+	struct net_device *mux_dev = (struct net_device *)cookie;
+
+	(link_up) ? netif_carrier_on(mux_dev) : netif_carrier_off(mux_dev);
+
+	return 0;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+static inline int mv_mux_get_tag_size(MV_TAG_TYPE type)
+{
+	static const int size_arr[] = {0, MV_ETH_MH_SIZE,
+					MV_ETH_DSA_SIZE,
+					MV_TAG_TYPE_EDSA,
+					MV_TAG_TYPE_VLAN};
+	return size_arr[type];
+}
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_rx(struct sk_buff *skb, int port, struct napi_struct *napi)
+{
+	struct net_device *mux_dev;
+	int    len;
+
+	mux_dev = mv_mux_rx_netdev_get(port, skb);
+
+	if (mux_dev == NULL)
+		goto out;
+
+	/* mux device is down */
+	if (!(mux_dev->flags & IFF_UP))
+		goto out1;
+
+	/* remove tag*/
+	len = mv_mux_rx_tag_remove(mux_dev, skb);
+	mux_dev->stats.rx_packets++;
+	mux_dev->stats.rx_bytes += skb->len;
+
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	if (mux_eth_shadow[port].flags & MV_MUX_F_DBG_RX) {
+		struct mux_netdev *pmux_priv = MV_MUX_PRIV(mux_dev);
+		pr_err("\n%s - %s: port=%d, cpu=%d, pkt_size=%d, shift=%d\n",
+			mux_dev->name, __func__, pmux_priv->port, smp_processor_id(), skb->len, len);
+		/* mv_eth_skb_print(skb); */
+		mvDebugMemDump(skb->data, 64, 1);
+	}
+#endif /* CONFIG_MV_ETH_DEBUG_CODE */
+
+/*
+#ifdef ETH_SKB_DEBUG
+		mv_eth_skb_check(skb);
+#endif
+*/
+	skb->protocol = eth_type_trans(skb, mux_dev);
+
+	if (mux_dev->features & NETIF_F_GRO) {
+		/*
+		TODO update mux priv gro counters
+		STAT_DBG(pp->stats.rx_gro++);
+		STAT_DBG(pp->stats.rx_gro_bytes += skb->len);
+	`	*/
+		return napi_gro_receive(napi, skb);
+	}
+
+
+	return netif_receive_skb(skb);
+
+out1:
+	mux_dev->stats.rx_dropped++;
+out:
+	kfree_skb(skb);
+	return 0;
+}
+EXPORT_SYMBOL(mv_mux_rx);
+
+/*-----------------------------------------------------------------------------------------*/
+
+static int mv_mux_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(dev);
+
+	if (!(netif_running(dev))) {
+		printk(KERN_ERR "!netif_running() in %s\n", __func__);
+		goto out;
+	}
+
+	if (mv_mux_tx_skb_tag_add(dev, skb)) {
+		printk(KERN_ERR "%s: mv_mux_tx_skb_tag_add failed.\n", __func__);
+		goto out;
+	}
+
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	if (mux_eth_shadow[pmux_priv->port].flags & MV_MUX_F_DBG_TX) {
+		pr_err("\n%s - %s_%lu: port=%d, cpu=%d, in_intr=0x%lx\n",
+			dev->name, __func__, dev->stats.tx_packets, pmux_priv->port,
+			smp_processor_id(), in_interrupt());
+		/* mv_eth_skb_print(skb); */
+		mvDebugMemDump(skb->data, 64, 1);
+	}
+#endif /* CONFIG_MV_ETH_DEBUG_CODE */
+
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+
+	/* assign the packet to the hw interface */
+	skb->dev = mux_eth_shadow[pmux_priv->port].root;
+
+	/* mark skb as tagged skb */
+	MV_MUX_SKB_TAG_SET(skb);
+
+	/*tell Linux to pass it to its device */
+	return dev_queue_xmit(skb);
+
+out:
+	dev->stats.tx_dropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+/* Return mux device mac address							   */
+/*-----------------------------------------------------------------------------------------*/
+char *mv_mux_get_mac(struct net_device *mux_dev)
+{
+
+	if (!mux_dev) {
+		printk(KERN_ERR "%s: mux net device is NULL.\n", __func__);
+		return NULL;
+	}
+
+	return mux_dev->dev_addr;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static void mv_mux_set_rx_mode(struct net_device *mux_dev)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(mux_dev);
+
+	if (!mv_mux_internal_switch(pmux_priv->port) || (switch_ops == NULL))
+		return;
+
+	if (switch_ops->promisc_set)
+		if (switch_ops->promisc_set(pmux_priv->idx, (mux_dev->flags & IFF_PROMISC) ? 1 : 0))
+			pr_err("%s: Set promiscuous mode failed\n", mux_dev->name);
+
+	/* IFF_ALLMULTI is not supported by switch */
+
+	/* remove all mcast enries */
+	 if (switch_ops->all_mcast_del)
+		if (switch_ops->all_mcast_del(pmux_priv->idx))
+			pr_err("%s: Delete all Mcast failed\n", mux_dev->name);
+
+	if (mux_dev->flags & IFF_MULTICAST) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+		if (!netdev_mc_empty(mux_dev)) {
+			struct netdev_hw_addr *ha;
+
+			netdev_for_each_mc_addr(ha, mux_dev) {
+				if (switch_ops->mac_addr_set) {
+					if (switch_ops->mac_addr_set(pmux_priv->idx, ha->addr, 1)) {
+						pr_err("%s: Mcast init failed\n", mux_dev->name);
+						break;
+					}
+				}
+			}
+		}
+#else
+		struct dev_mc_list *curr_addr = mux_dev->mc_list;
+		int                i;
+		for (i = 0; i < mux_dev->mc_count; i++, curr_addr = curr_addr->next) {
+			if (!curr_addr)
+				break;
+			if (switch_ops->mac_addr_set) {
+				if (switch_ops->mac_addr_set(pmux_priv->idx, curr_addr->dmi_addr, 1)) {
+					pr_err("%s: Mcast init failed\n", mux_dev->name);
+					break;
+				}
+			}
+		}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34) */
+	}
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static int mv_mux_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	/*TODO compleate implementation*/
+	printk(KERN_ERR "Not supported yet.\n");
+	return 0;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static void mv_mux_switch_mtu_update(int mtu)
+{
+	int pkt_size, tag_size = 0;
+	MV_TAG_TYPE tag_type =  mux_switch_shadow.tag_type;
+
+	if (tag_type == MV_TAG_TYPE_MH)
+		tag_size = MV_ETH_MH_SIZE;
+	else if (tag_type == MV_TAG_TYPE_DSA)
+		tag_size = MV_ETH_DSA_SIZE;
+
+	pkt_size = mtu + tag_size + MV_ETH_ALEN + MV_ETH_VLAN_SIZE + MV_ETH_CRC_SIZE;
+
+	if (switch_ops && switch_ops->jumbo_mode_set)
+		switch_ops->jumbo_mode_set(pkt_size);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_close(struct net_device *dev)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(dev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	netif_stacked_transfer_operstate(root, dev);
+	netif_tx_stop_all_queues(dev);
+
+	if (mv_mux_internal_switch(pmux_priv->port))
+		if (switch_ops && switch_ops->group_disable)
+			switch_ops->group_disable(pmux_priv->idx);
+
+	printk(KERN_NOTICE "%s: stopped\n", dev->name);
+
+	return MV_OK;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_open(struct net_device *dev)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(dev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (!root) {
+		printk(KERN_ERR "%s:Invalid operation, set master before up.\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* if master is close */
+	if (!(root->flags & IFF_UP)) {
+		printk(KERN_ERR "%s:Invalid operation, port %d is down.\n", __func__, pmux_priv->port);
+		return MV_ERROR;
+	}
+
+	netif_stacked_transfer_operstate(root, dev);
+	netif_tx_wake_all_queues(dev);
+
+	if (dev->mtu > root->mtu)
+		dev->mtu = root->mtu;
+
+	if (mv_mux_internal_switch(pmux_priv->port))
+		if (switch_ops && switch_ops->group_enable)
+			switch_ops->group_enable(pmux_priv->idx);
+
+	printk(KERN_NOTICE "%s: started\n", dev->name);
+
+	return MV_OK;
+
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static int mv_mux_set_mac(struct net_device *mux_dev, void *addr)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(mux_dev);
+
+	u8 *mac = &(((u8 *)addr)[2]);  /* skip on first 2B (ether HW addr type) */
+
+	mv_mux_close(mux_dev);
+
+	/*TODO: update parser/PNC - mac filtering*/
+
+	if (mv_mux_internal_switch(pmux_priv->port))
+		if (switch_ops && switch_ops->mac_addr_set) {
+
+			/* delete old mac */
+			if (switch_ops->mac_addr_set(pmux_priv->idx, mux_dev->dev_addr, 0))
+				return MV_ERROR;
+
+			/* set new mac */
+			if (switch_ops->mac_addr_set(pmux_priv->idx, mac, 1))
+				return MV_ERROR;
+		}
+
+	memcpy(mux_dev->dev_addr, mac, ETH_ALEN);
+
+	mv_mux_open(mux_dev);
+
+	return 0;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_mtu_change(struct net_device *mux_dev, int mtu)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(mux_dev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (root->mtu < mtu) {
+		printk(KERN_ERR "Invalid mtu value.\n");
+		return MV_ERROR;
+	}
+
+	mux_dev->mtu = mtu;
+	return MV_OK;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+/* Create new mux device, if device is allready exist just change tag value                */
+/* mv_mux_netdev_add should called after mv_mux_netdev_alloc                               */
+/*-----------------------------------------------------------------------------------------*/
+struct net_device *mv_mux_netdev_alloc(char *name, int idx, MV_MUX_TAG *tag_cfg)
+{
+	struct net_device *mux_dev;
+	struct mux_netdev *pmux_priv;
+
+	if (name == NULL) {
+		printk(KERN_ERR "%s: mux net device name is missig.\n", __func__);
+		return NULL;
+	}
+
+	mux_dev = dev_get_by_name(&init_net, name);
+
+
+	if (!mux_dev) {
+		/* new net device */
+		mux_dev = alloc_netdev(sizeof(struct mux_netdev), name, ether_setup);
+		if (!mux_dev) {
+			printk(KERN_ERR "%s: out of memory, net device allocation failed.\n", __func__);
+			return NULL;
+		}
+		/* allocation succeed */
+		mux_dev->irq = NO_IRQ;
+		/* must set netdev_ops before registration */
+		mux_dev->netdev_ops = &mv_mux_netdev_ops;
+
+		if (register_netdev(mux_dev)) {
+			printk(KERN_ERR "%s: failed to register %s\n", __func__, mux_dev->name);
+			free_netdev(mux_dev);
+			return NULL;
+		}
+
+		/*initialization for new net device*/
+		pmux_priv = MV_MUX_PRIV(mux_dev);
+		memset(pmux_priv, 0, sizeof(struct mux_netdev));
+		pmux_priv->port = -1;
+		pmux_priv->next = NULL;
+	} else
+		dev_put(mux_dev);
+
+	pmux_priv = MV_MUX_PRIV(mux_dev);
+
+	if (tag_cfg == NULL) {
+		memset(pmux_priv, 0, sizeof(struct mux_netdev));
+		pmux_priv->port = -1;
+		pmux_priv->next = NULL;
+	} else{
+		/* next, pp not changed*/
+		pmux_priv->tx_tag = tag_cfg->tx_tag;
+		pmux_priv->rx_tag_ptrn = tag_cfg->rx_tag_ptrn;
+		pmux_priv->rx_tag_mask = tag_cfg->rx_tag_mask;
+	}
+	pmux_priv->idx = idx;
+	return mux_dev;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+/* Init mux device features								   */
+/*-----------------------------------------------------------------------------------------*/
+static inline void mv_mux_init_features(struct net_device *mux_dev)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(mux_dev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	mux_dev->features = root->features;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	mux_dev->hw_features = root->hw_features & ~NETIF_F_RXCSUM;
+	mux_dev->wanted_features = root->wanted_features;
+#endif
+	mux_dev->vlan_features = root->vlan_features;
+}
+/*-----------------------------------------------------------------------------------------*/
+/* mv_mux_transfer_features								   */
+/* update features when root features are changed					   */
+/*-----------------------------------------------------------------------------------------*/
+static void mv_mux_transfer_features(struct net_device *root, struct net_device *mux_dev)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	mux_dev->features &= ~NETIF_F_RXCSUM;
+	mux_dev->features |=  (root->features & NETIF_F_RXCSUM);
+#endif
+
+	mux_dev->features &= ~NETIF_F_IP_CSUM;
+	mux_dev->features |=  (root->features & NETIF_F_IP_CSUM);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	mux_dev->hw_features &= ~NETIF_F_IP_CSUM;
+	mux_dev->hw_features |=  (root->features & NETIF_F_IP_CSUM);
+#endif
+
+	mux_dev->features &= ~NETIF_F_TSO;
+	mux_dev->features |=  (root->features & NETIF_F_TSO);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	mux_dev->hw_features &= ~NETIF_F_TSO;
+	mux_dev->hw_features |=  (root->features & NETIF_F_TSO);
+#endif
+
+	mux_dev->features &= ~NETIF_F_SG;
+	mux_dev->features |=  (root->features & NETIF_F_SG);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	mux_dev->hw_features &= ~NETIF_F_SG;
+	mux_dev->hw_features |=  (root->features & NETIF_F_SG);
+#endif
+}
+/*----------------------------------------------------------------------------------------*/
+/* Function attache mux device to root device,						  */
+/* Set mux mac address and features according to root device				  */
+/*----------------------------------------------------------------------------------------*/
+static struct net_device *mv_mux_netdev_init(int port, struct net_device *mux_dev)
+{
+	struct mux_netdev *pmux_priv;
+	struct net_device *root = mux_eth_shadow[port].root;
+	int tag_type = mux_eth_shadow[port].tag_type;
+
+	if (root == NULL)
+		return NULL;
+/*
+	if (pp && !(pp->flags & MV_ETH_F_CONNECT_LINUX)) {
+		printk(KERN_ERR "%s: root device is not connect to linux.\n", __func__);
+		return NULL;
+	}
+*/
+	if (!mux_dev) {
+		printk(KERN_ERR "%s: mux net device is NULL.\n", __func__);
+		return NULL;
+	}
+
+	/* set skb header size , avoid from skb reallocation*/
+	mux_dev->hard_header_len = root->hard_header_len +
+					mv_mux_get_tag_size(tag_type);
+
+	/* Copy MAC address and MTU from root netdevice */
+	mux_dev->mtu = root->mtu;
+	pmux_priv = MV_MUX_PRIV(mux_dev);
+	pmux_priv->port = port;
+	memcpy(mux_dev->dev_addr, root->dev_addr, MV_MAC_ADDR_SIZE);
+
+	/* TODO: handle features */
+	mv_mux_init_features(mux_dev);
+
+	SET_ETHTOOL_OPS(mux_dev, &mv_mux_tool_ops);
+
+	return mux_dev;
+}
+/*-----------------------------------------------------------------------------------------*/
+struct net_device *mv_mux_switch_ptr_get(int port)
+{
+	return mux_eth_shadow[port].switch_dev;
+}
+/*-----------------------------------------------------------------------------------------*/
+int mv_mux_tag_type_get(int port)
+{
+	return mux_eth_shadow[port].tag_type;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+struct net_device *mv_mux_netdev_add(int port, struct net_device *mux_dev)
+{
+	struct net_device *dev_temp;
+	struct net_device *switch_dev;
+
+	struct mux_netdev *pdev;
+
+	if (mux_eth_shadow[port].root == NULL)
+		return NULL;
+
+	mux_dev = mv_mux_netdev_init(port, mux_dev);
+
+	if (mux_dev == NULL)
+		return NULL;
+
+	switch_dev = mux_eth_shadow[port].switch_dev;
+
+	if (switch_dev == NULL) {
+		/* First tag netdev */
+		mux_eth_shadow[port].switch_dev = mux_dev;
+	} else {
+		pdev = MV_MUX_PRIV(switch_dev);
+		dev_temp = switch_dev;
+		while ((mux_dev != dev_temp) && (pdev->next != NULL)) {
+			dev_temp = pdev->next;
+			pdev = MV_MUX_PRIV(dev_temp);
+		}
+		/*check whether mux_dev is already in the physical port*/
+		if (mux_dev == dev_temp)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
+			printk(KERN_INFO "%s: this mux interface is already in port %d\n", mux_dev->name, port);
+#else
+			netdev_info(mux_dev, "this mux interface is already in port %d\n", port);
+#endif
+		else
+			pdev->next = mux_dev;
+	}
+
+	if (!mux_init_cnt)
+		if (register_netdevice_notifier(&mux_notifier_block) < 0)
+			unregister_netdevice_notifier(&mux_notifier_block);
+
+	mux_init_cnt++;
+
+	return mux_dev;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+int mv_mux_tag_type_set(int port, int type)
+{
+	unsigned int flgs;
+	struct net_device *root;
+
+
+	if ((type < MV_TAG_TYPE_NONE) || (type >= MV_TAG_TYPE_LAST)) {
+		printk(KERN_INFO "%s: Invalid tag type %d\n", __func__, type);
+		return MV_ERROR;
+	}
+	root = mux_eth_shadow[port].root;
+	/* port not initialized */
+	if (root == NULL)
+		return MV_ERROR;
+
+	/* No change in tag type */
+	if (mux_eth_shadow[port].tag_type == type)
+		return MV_OK;
+
+	flgs = root->flags;
+
+	if (flgs & IFF_UP) {
+		printk(KERN_ERR "%s: root device (%s) must stopped before.\n", __func__, root->name);
+		return MV_ERROR;
+	}
+
+	/* delete all attached virtual interfaces */
+	if (mv_mux_netdev_delete_all(port))
+		return MV_ERROR;
+
+	mux_eth_shadow[port].tag_type = type;
+
+	if (eth_ops && eth_ops->set_tag_type)
+		eth_ops->set_tag_type(port, mux_eth_shadow[port].tag_type);
+
+	return MV_OK;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+/* Delete mux device                                                                       */
+/*	remove device from port linked list						   */
+/*	free mux device                                                                    */
+/*-----------------------------------------------------------------------------------------*/
+
+int mv_mux_netdev_delete(struct net_device *mux_dev)
+{
+	struct net_device *pdev_curr, *pdev_prev = NULL;
+	struct mux_netdev *pdev_tmp_curr, *pdev_tmp_prev, *pdev;
+	struct net_device *root = NULL;
+	int flgs, port;
+
+	if (mux_dev == NULL) {
+		printk(KERN_ERR "%s: mux net device is NULL.\n", __func__);
+		return MV_ERROR;
+	}
+	pdev = MV_MUX_PRIV(mux_dev);
+	port = pdev->port;
+
+	if (port != -1)
+		root = mux_eth_shadow[pdev->port].root;
+
+	/*not attached to gbe port*/
+	if (root == NULL) {
+		synchronize_net();
+		unregister_netdev(mux_dev);
+		free_netdev(mux_dev);
+		/*
+		we don't need to decrease here mux_init_cnt
+		mux_init_cnt incease only in mv_mux_netdev_add
+		when mux attached to gbe port
+		*/
+		return MV_OK;
+	}
+
+	flgs = mux_dev->flags;
+	if (flgs & IFF_UP) {
+		printk(KERN_ERR "%s: root device (%s) must stopped before.\n", __func__, root->name);
+		return MV_ERROR;
+	}
+
+	pdev_curr = mux_eth_shadow[port].switch_dev;
+
+	while (pdev_curr != NULL) {
+
+		pdev_tmp_curr = MV_MUX_PRIV(pdev_curr);
+
+		if (pdev_curr == mux_dev) {
+			if (pdev_curr == mux_eth_shadow[port].switch_dev) {
+				/* first element*/
+				mux_eth_shadow[port].switch_dev = pdev_tmp_curr->next;
+			} else {
+				pdev_tmp_prev = MV_MUX_PRIV(pdev_prev);
+				pdev_tmp_prev->next = pdev_tmp_curr->next;
+			}
+			/* delet current */
+			synchronize_net();
+			unregister_netdev(mux_dev);
+			printk(KERN_ERR "%s has been removed.\n", mux_dev->name);
+			free_netdev(mux_dev);
+
+			mux_init_cnt--;
+
+			if (!mux_init_cnt)
+				unregister_netdevice_notifier(&mux_notifier_block);
+
+			return MV_OK;
+
+		} else {
+			pdev_prev = pdev_curr;
+			pdev_curr = pdev_tmp_curr->next;
+		}
+	}
+	/* mux_dev not found */
+	return MV_ERROR;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+static int mv_mux_netdev_delete_all(int port)
+{
+	/* delete all attached mux devices */
+	struct net_device *mux_dev;
+
+	if (mux_eth_shadow[port].root == NULL)
+		return MV_ERROR;
+
+	/* delete all attached mux devices */
+	mux_dev = mux_eth_shadow[port].switch_dev;
+	while (mux_dev) {
+		if (mv_mux_netdev_delete(mux_dev))
+			return MV_ERROR;
+
+		mux_dev = mux_eth_shadow[port].switch_dev;
+	}
+
+	return MV_OK;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+static int mux_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
+{
+	struct net_device *mux_dev, *dev = ptr;
+	struct mux_netdev *pdev_priv;
+	int tag_type;
+	int port = 0;
+	int flgs;
+
+	/*recognize if marvell event */
+	port = mv_mux_netdev_find(dev->ifindex);
+
+	if (port == -1)
+		goto out;
+
+	tag_type = mux_eth_shadow[port].tag_type;
+
+	/* exit - if transparent mode */
+	if (mv_mux_internal_switch(port) && (tag_type == MV_TAG_TYPE_NONE))
+		goto out;
+
+	switch (event) {
+
+	case NETDEV_CHANGE:
+		mux_dev = mux_eth_shadow[port].switch_dev;
+		while (mux_dev != NULL) {
+			pdev_priv = MV_MUX_PRIV(mux_dev);
+			if (mv_mux_internal_switch(port)) {
+				/* In case of internal switch, link is determined by switch */
+				/*In HGU mode, mux may be created by sysfs cmd and then pdev_priv->idx will be -1*/
+				if (switch_ops && switch_ops->link_status_get
+					&& (pdev_priv->idx != MV_MUX_UNKNOWN_GROUP)) {
+					int link_up = switch_ops->link_status_get(pdev_priv->idx);
+					mv_mux_update_link(mux_dev, link_up);
+				}
+			} else {
+				/* In case of external switch, propagate real device link state to mux devices */
+				/* change state*/
+				netif_stacked_transfer_operstate(dev, mux_dev);
+			}
+			mux_dev = pdev_priv->next;
+		}
+		break;
+
+	case NETDEV_CHANGEADDR:
+		/* Propagate real device mac adress to mux devices */
+		mux_dev = mux_eth_shadow[port].switch_dev;
+
+		while (mux_dev != NULL) {
+			pdev_priv = MV_MUX_PRIV(mux_dev);
+			/* May be called without an actual change */
+			if (!compare_ether_addr(mux_dev->dev_addr, dev->dev_addr)) {
+				mux_dev = pdev_priv->next;
+				continue;
+			}
+			memcpy(mux_dev->dev_addr, dev->dev_addr, ETH_ALEN);
+			mux_dev = pdev_priv->next;
+		}
+		break;
+
+	case NETDEV_CHANGEMTU:
+		mux_dev = mux_eth_shadow[port].switch_dev;
+
+		while (mux_dev != NULL) {
+			pdev_priv = MV_MUX_PRIV(mux_dev);
+			dev_set_mtu(mux_dev, dev->mtu);
+			mux_dev = pdev_priv->next;
+		}
+
+		if (mv_mux_internal_switch(port)) {
+			mux_switch_shadow.mtu = dev->mtu;
+			mv_mux_switch_mtu_update(dev->mtu);
+		}
+
+		break;
+
+	case NETDEV_DOWN:
+		/* Master down - Put all mux devices for this dev in the down state too.  */
+		mux_dev = mux_eth_shadow[port].switch_dev;
+
+		while (mux_dev != NULL) {
+			pdev_priv = MV_MUX_PRIV(mux_dev);
+			flgs = mux_dev->flags;
+			if (!(flgs & IFF_UP)) {
+				mux_dev = pdev_priv->next;
+				continue;
+			}
+			/* dev_change_flags call to mv_mux_close*/
+			dev_change_flags(mux_dev, flgs & ~IFF_UP);
+			mux_dev = pdev_priv->next;
+		}
+		break;
+
+	case NETDEV_UP:
+		/* Check for MTU updates */
+		if (mv_mux_internal_switch(port) &&
+			((mux_switch_shadow.mtu == -1) || (mux_switch_shadow.mtu > dev->mtu))) {
+				mux_switch_shadow.mtu = dev->mtu;
+				mv_mux_switch_mtu_update(dev->mtu);
+		}
+
+		break;
+
+	case NETDEV_FEAT_CHANGE:
+		/* Master features changed - Propagate device features to underlying device */
+		mux_dev = mux_eth_shadow[port].switch_dev;
+		while (mux_dev != NULL) {
+			pdev_priv = MV_MUX_PRIV(mux_dev);
+			mv_mux_transfer_features(mux_eth_shadow[port].root, mux_dev);
+			mux_dev = pdev_priv->next;
+		}
+		break;
+	} /*switch*/
+out:
+	return NOTIFY_DONE;
+}
+/*-----------------------------------------------------------------------------------------*/
+static struct notifier_block mux_notifier_block __read_mostly = {
+	.notifier_call = mux_device_event,
+};
+/*-----------------------------------------------------------------------------------------*/
+
+bool mv_mux_netdev_link_status(struct net_device *dev)
+{
+	return netif_carrier_ok(dev) ? true : false;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+void mv_mux_vlan_set(MV_MUX_TAG *mux_cfg, unsigned int vid)
+{
+
+	mux_cfg->tx_tag.vlan = MV_32BIT_BE((MV_VLAN_TYPE << 16) | vid);
+	mux_cfg->rx_tag_ptrn.vlan = MV_32BIT_BE((MV_VLAN_TYPE << 16) | vid);
+
+	/*mask priority*/
+	mux_cfg->rx_tag_mask.vlan = MV_32BIT_BE(0xFFFF0FFF);
+
+	mux_cfg->tag_type = MV_TAG_TYPE_VLAN;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+void mv_mux_cfg_get(struct net_device *mux_dev, MV_MUX_TAG *mux_cfg)
+{
+	if (mux_dev) {
+		struct mux_netdev *pmux_priv;
+		pmux_priv = MV_MUX_PRIV(mux_dev);
+		mux_cfg->tx_tag = pmux_priv->tx_tag;
+		mux_cfg->rx_tag_ptrn = pmux_priv->rx_tag_ptrn;
+		mux_cfg->rx_tag_mask = pmux_priv->rx_tag_mask;
+	} else
+		memset(mux_cfg, 0, sizeof(MV_MUX_TAG));
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static inline struct net_device *mv_mux_mh_netdev_get(int port, MV_TAG *tag)
+{
+	struct net_device *dev = mux_eth_shadow[port].switch_dev;
+	struct mux_netdev *pdev;
+
+	while (dev != NULL) {
+		pdev = MV_MUX_PRIV(dev);
+		if ((tag->mh & pdev->rx_tag_mask.mh) == pdev->rx_tag_ptrn.mh)
+			return dev;
+
+		dev = pdev->next;
+	}
+	printk(KERN_ERR "%s: MH=0x%04x match no interfaces\n", __func__, tag->mh);
+	return NULL;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline struct net_device *mv_mux_vlan_netdev_get(int port, MV_TAG *tag)
+{
+	struct net_device *dev = mux_eth_shadow[port].switch_dev;
+	struct mux_netdev *pdev;
+
+	while (dev != NULL) {
+		pdev = MV_MUX_PRIV(dev);
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+		if (mux_eth_shadow[port].flags & MV_MUX_F_DBG_RX)
+			printk(KERN_ERR "pkt tag = 0x%x, rx_tag_ptrn = 0x%x, rx_tag_mask = 0x%x\n",
+				 tag->vlan, pdev->rx_tag_ptrn.vlan, pdev->rx_tag_mask.vlan);
+#endif
+		if ((tag->vlan & pdev->rx_tag_mask.vlan) ==
+			(pdev->rx_tag_ptrn.vlan & pdev->rx_tag_mask.vlan))
+			return dev;
+
+		dev = pdev->next;
+	}
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	printk(KERN_ERR "%s:Error TAG=0x%08x match no interfaces\n", __func__, tag->vlan);
+#endif
+
+	return NULL;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline struct net_device *mv_mux_dsa_netdev_get(int port, MV_TAG *tag)
+{
+	/*
+	   MV_TAG.vlan and MV_TAG.dsa size are equal.
+	   MV_TAG type is union.
+	   We can use in the same functins.
+	*/
+
+	return mv_mux_vlan_netdev_get(port, tag);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline struct net_device *mv_mux_edsa_netdev_get(int port, MV_TAG *tag)
+{
+	struct net_device *dev = mux_eth_shadow[port].switch_dev;
+	struct mux_netdev *pdev;
+
+	while (dev != NULL) {
+		pdev = MV_MUX_PRIV(dev);
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+		if (mux_eth_shadow[port].flags & MV_MUX_F_DBG_RX)
+			printk(KERN_ERR "pkt tag = 0x%x %x, rx_tag_ptrn = 0x%x %x, rx_tag_mask = 0x%x %x\n",
+				 tag->edsa[0], tag->edsa[1], pdev->rx_tag_ptrn.edsa[0], pdev->rx_tag_ptrn.edsa[1],
+				 pdev->rx_tag_mask.edsa[0], pdev->rx_tag_mask.edsa[1]);
+#endif
+		/* compare tags */
+		if (((tag->edsa[0] & pdev->rx_tag_mask.edsa[0]) ==
+			(pdev->rx_tag_ptrn.edsa[0] & pdev->rx_tag_mask.edsa[0])) &&
+			((tag->edsa[1] & pdev->rx_tag_mask.edsa[1]) ==
+			(pdev->rx_tag_ptrn.edsa[1] & pdev->rx_tag_mask.edsa[1])))
+				return dev;
+
+		dev = pdev->next;
+	}
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	printk(KERN_ERR "%s:Error TAG=0x%08x match no interfaces\n", __func__, tag->vlan);
+#endif
+
+	return NULL;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+
+static inline struct net_device *mv_mux_rx_netdev_get(int port, struct sk_buff *skb)
+{
+	struct net_device *dev;
+	MV_TAG tag;
+	MV_U8 *data = skb->data;
+	int tag_type = mux_eth_shadow[port].tag_type;
+
+	/* skb->data point to MH */
+	switch (tag_type) {
+
+	case MV_TAG_TYPE_MH:
+		tag.mh = *(MV_U16 *)data;
+		dev = mv_mux_mh_netdev_get(port, &tag);
+		break;
+
+	case MV_TAG_TYPE_VLAN:
+		tag.vlan = *(MV_U32 *)(data + MV_ETH_MH_SIZE + (2 * MV_MAC_ADDR_SIZE));
+		dev = mv_mux_vlan_netdev_get(port, &tag);
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		tag.dsa = *(MV_U32 *)(data + MV_ETH_MH_SIZE + (2 * MV_MAC_ADDR_SIZE));
+		dev = mv_mux_dsa_netdev_get(port, &tag);
+		break;
+
+	case MV_TAG_TYPE_EDSA:
+		tag.edsa[0] = *(MV_U32 *)(data + MV_ETH_MH_SIZE + (2 * MV_MAC_ADDR_SIZE));
+		tag.edsa[1] = *(MV_U32 *)(data + MV_ETH_MH_SIZE + (2 * MV_MAC_ADDR_SIZE) + 4);
+		dev = mv_mux_edsa_netdev_get(port, &tag);
+		break;
+
+	default:
+		printk(KERN_ERR "%s: unexpected port mode = %d\n", __func__, tag_type);
+		return NULL;
+	}
+
+	return dev;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+static inline int mv_mux_mh_skb_remove(struct sk_buff *skb)
+{
+	__skb_pull(skb, MV_ETH_MH_SIZE);
+	return MV_ETH_MH_SIZE;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_vlan_skb_remove(struct sk_buff *skb)
+{
+	/* memmove use temporrary array, no overlap problem*/
+	memmove(skb->data + MV_VLAN_HLEN, skb->data, (2 * MV_MAC_ADDR_SIZE) + MV_ETH_MH_SIZE);
+
+	__skb_pull(skb, MV_VLAN_HLEN);
+
+	return MV_ETH_VLAN_SIZE;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_dsa_skb_remove(struct sk_buff *skb)
+{
+	/* memmove use temporrary array, no overlap problem*/
+	memmove(skb->data + MV_ETH_DSA_SIZE, skb->data, (2 * MV_MAC_ADDR_SIZE) + MV_ETH_MH_SIZE);
+
+	__skb_pull(skb, MV_ETH_DSA_SIZE);
+
+	return MV_ETH_DSA_SIZE;
+}
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_edsa_skb_remove(struct sk_buff *skb)
+{
+	/* memmove use temporrary array, no overlap problem*/
+	memmove(skb->data + MV_ETH_EDSA_SIZE, skb->data, (2 * MV_MAC_ADDR_SIZE) + MV_ETH_MH_SIZE);
+
+	__skb_pull(skb, MV_ETH_EDSA_SIZE);
+
+	return MV_ETH_EDSA_SIZE;
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_rx_tag_remove(struct net_device *dev, struct sk_buff *skb)
+{
+	int shift = 0;
+	struct mux_netdev *pdev = MV_MUX_PRIV(dev);
+	int tag_type = mux_eth_shadow[pdev->port].tag_type;
+
+	if (pdev->leave_tag == true)
+		return 0;
+
+	switch (tag_type) {
+
+	case MV_TAG_TYPE_MH:
+		break;
+
+	case MV_TAG_TYPE_VLAN:
+		shift = mv_mux_vlan_skb_remove(skb);
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		shift = mv_mux_dsa_skb_remove(skb);
+		break;
+
+	case MV_TAG_TYPE_EDSA:
+		shift = mv_mux_edsa_skb_remove(skb);
+		break;
+
+	default:
+		printk(KERN_ERR "%s: unexpected port mode = %d\n", __func__, tag_type);
+		return -1;
+	}
+	/* MH exist in packet anycase - Skip it */
+	shift += mv_mux_mh_skb_remove(skb);
+
+	return shift;
+}
+
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_eth_skb_mh_add(struct sk_buff *skb, u16 mh)
+{
+
+	/* sanity: Check that there is place for MH in the buffer */
+	if (skb_headroom(skb) < MV_ETH_MH_SIZE) {
+		printk(KERN_ERR "%s: skb (%p) doesn't have place for MH, head=%p, data=%p\n",
+		       __func__, skb, skb->head, skb->data);
+		return 1;
+	}
+
+	/* Prepare place for MH header */
+	__skb_push(skb, MV_ETH_MH_SIZE);
+
+	*((u16 *) skb->data) = mh;
+
+	return MV_OK;
+}
+
+static inline int mv_mux_tx_skb_mh_add(struct net_device *dev, struct sk_buff *skb)
+{
+	struct mux_netdev *pdev = MV_MUX_PRIV(dev);
+
+	return mv_eth_skb_mh_add(skb, pdev->tx_tag.mh);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_skb_vlan_add(struct sk_buff *skb, unsigned int vlan)
+{
+	unsigned char *pvlan;
+/*
+	TODO: add stat counter to mux_pp
+		mean that there is not enough bytes in header room
+		to push vlan, skb_cow will realloc skb
+
+	if (skb_headroom(skb) < MV_VLAN_HLEN) {
+		mux_skb_tx_realloc++;
+	}
+*/
+	if (skb_cow(skb, MV_VLAN_HLEN)) {
+		printk(KERN_ERR "%s: skb (%p) headroom < VLAN_HDR, skb_head=%p, skb_data=%p\n",
+		       __func__, skb, skb->head, skb->data);
+		return 1;
+	}
+
+	__skb_push(skb, MV_VLAN_HLEN);
+
+	memmove(skb->data, skb->data + MV_VLAN_HLEN, 2 * MV_MAC_ADDR_SIZE);
+
+	pvlan = skb->data + (2 * MV_MAC_ADDR_SIZE);
+	*(MV_U32 *)pvlan = vlan;
+
+	return MV_OK;
+}
+
+static inline int mv_mux_tx_skb_vlan_add(struct net_device *dev, struct sk_buff *skb)
+{
+	struct mux_netdev *pdev = MV_MUX_PRIV(dev);
+
+	return mv_mux_skb_vlan_add(skb, pdev->tx_tag.vlan);
+}
+
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_tx_skb_dsa_add(struct net_device *dev, struct sk_buff *skb)
+{
+	/* both DSA and VLAN are 4 bytes tags, placed in the same offset in the packet */
+	return mv_mux_tx_skb_vlan_add(dev, skb);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_skb_edsa_add(struct sk_buff *skb, unsigned int edsaL, unsigned int edsaH)
+{
+	unsigned char *pedsa;
+
+	if (skb_cow(skb, MV_ETH_EDSA_SIZE)) {
+		printk(KERN_ERR "%s: skb (%p) headroom < VLAN_HDR, skb_head=%p, skb_data=%p\n",
+		       __func__, skb, skb->head, skb->data);
+		return 1;
+	}
+
+	__skb_push(skb, MV_ETH_EDSA_SIZE);
+
+	memmove(skb->data, skb->data + MV_ETH_EDSA_SIZE, 2 * MV_MAC_ADDR_SIZE);
+
+	pedsa = skb->data + (2 * MV_MAC_ADDR_SIZE);
+	*(MV_U32 *)pedsa = edsaL;
+	*((MV_U32 *)pedsa + 1) = edsaH;
+
+	return MV_OK;
+}
+
+static inline int mv_mux_tx_skb_edsa_add(struct net_device *dev, struct sk_buff *skb)
+{
+	struct mux_netdev *pdev = MV_MUX_PRIV(dev);
+
+	return mv_mux_skb_edsa_add(skb, pdev->tx_tag.edsa[0], pdev->tx_tag.edsa[1]);
+}
+
+/*-----------------------------------------------------------------------------------------*/
+
+static inline int mv_mux_tx_skb_tag_add(struct net_device *dev, struct sk_buff *skb)
+{
+	struct mux_netdev *pdev = MV_MUX_PRIV(dev);
+	int tag_type = mux_eth_shadow[pdev->port].tag_type;
+	int err = 0;
+
+	switch (tag_type) {
+
+	case MV_TAG_TYPE_MH:
+		err = mv_mux_tx_skb_mh_add(dev, skb);
+		break;
+	case MV_TAG_TYPE_VLAN:
+		err = mv_mux_tx_skb_vlan_add(dev, skb);
+		break;
+	case MV_TAG_TYPE_DSA:
+		err = mv_mux_tx_skb_dsa_add(dev, skb);
+		break;
+	case MV_TAG_TYPE_EDSA:
+		err = mv_mux_tx_skb_edsa_add(dev, skb);
+		break;
+	default:
+		printk(KERN_ERR "%s: unexpected port mode = %d\n", __func__, tag_type);
+		err = 1;
+	}
+	return err;
+}
+
+/*--------------------------------------------------------------------------------------*/
+/* Print mux device data								*/
+/*--------------------------------------------------------------------------------------*/
+
+void mv_mux_netdev_print(struct net_device *mux_dev)
+{
+	struct mux_netdev *pdev;
+	int tag_type;
+
+	if (!mux_dev) {
+		printk(KERN_ERR "%s:device in NULL.\n", __func__);
+		return;
+	}
+
+	if (mv_mux_netdev_find(mux_dev->ifindex) != -1) {
+		printk(KERN_ERR "%s: %s is not mux device.\n", __func__, mux_dev->name);
+		return;
+	}
+
+	pdev = MV_MUX_PRIV(mux_dev);
+
+	if (!pdev || (pdev->port == -1)) {
+		printk(KERN_ERR "%s: device must be conncted to physical port\n", __func__);
+		return;
+	}
+	tag_type = mux_eth_shadow[pdev->port].tag_type;
+	switch (tag_type) {
+
+	case MV_TAG_TYPE_VLAN:
+		printk(KERN_ERR "%s: port=%d, pdev=%p, tx_vlan=0x%08x, rx_vlan=0x%08x, rx_mask=0x%08x\n",
+			mux_dev->name, pdev->port, pdev, pdev->tx_tag.vlan,
+			pdev->rx_tag_ptrn.vlan, pdev->rx_tag_mask.vlan);
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		printk(KERN_ERR "%s: port=%d, pdev=%p: tx_dsa=0x%08x, rx_dsa=0x%08x, rx_mask=0x%08x\n",
+			mux_dev->name, pdev->port, pdev, pdev->tx_tag.dsa,
+			pdev->rx_tag_ptrn.dsa, pdev->rx_tag_mask.dsa);
+		break;
+
+	case MV_TAG_TYPE_MH:
+		printk(KERN_ERR "%s: port=%d, pdev=%p: tx_mh=0x%04x, rx_mh=0x%04x, rx_mask=0x%04x\n",
+			mux_dev->name, pdev->port, pdev, pdev->tx_tag.mh, pdev->rx_tag_ptrn.mh, pdev->rx_tag_mask.mh);
+		break;
+
+	case MV_TAG_TYPE_EDSA:
+		printk(KERN_ERR "%s: port=%d, pdev=%p: tx_edsa=0x%08x %08x, rx_edsa=0x%08x %08x, rx_mask=0x%08x %08x\n",
+			mux_dev->name, pdev->port, pdev, pdev->tx_tag.edsa[1], pdev->tx_tag.edsa[0],
+			pdev->rx_tag_ptrn.edsa[1], pdev->rx_tag_ptrn.edsa[0],
+			pdev->rx_tag_mask.edsa[1], pdev->rx_tag_mask.edsa[0]);
+		break;
+
+	default:
+		printk(KERN_ERR "%s: Error, Unknown tag type\n", __func__);
+	}
+}
+EXPORT_SYMBOL(mv_mux_netdev_print);
+
+/*--------------------------------------------------------------------------------------*/
+/* Print all port's mux devices data							*/
+/*--------------------------------------------------------------------------------------*/
+void mv_mux_netdev_print_all(int port)
+{
+	struct net_device *dev;
+	struct mux_netdev *dev_priv;
+
+	dev = mux_eth_shadow[port].root;
+
+	if (!dev)
+		return;
+
+	dev = mux_eth_shadow[port].switch_dev;
+
+	while (dev != NULL) {
+		mv_mux_netdev_print(dev);
+		dev_priv = MV_MUX_PRIV(dev);
+		dev = dev_priv->next;
+		printk(KERN_CONT "\n");
+	}
+}
+EXPORT_SYMBOL(mv_mux_netdev_print_all);
+/*-----------------------------------------------------------------------------------------*/
+int mv_mux_ctrl_dbg_flag(int port, u32 flag, u32 val)
+{
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	struct net_device *root = mux_eth_shadow[port].root;
+	u32 bit_flag = (fls(flag) - 1);
+
+	if (!root)
+		return -ENODEV;
+
+	if (val)
+		set_bit(bit_flag, (unsigned long *)&(mux_eth_shadow[port].flags));
+	else
+		clear_bit(bit_flag, (unsigned long *)&(mux_eth_shadow[port].flags));
+#endif /* CONFIG_MV_ETH_DEBUG_CODE */
+
+	return 0;
+}
+/*-----------------------------------------------------------------------------------------*/
+static const struct net_device_ops mv_mux_netdev_ops = {
+	.ndo_open		= mv_mux_open,
+	.ndo_stop		= mv_mux_close,
+	.ndo_start_xmit		= mv_mux_xmit,
+	.ndo_set_mac_address	= mv_mux_set_mac,
+	.ndo_do_ioctl		= mv_mux_ioctl,
+	.ndo_set_rx_mode	= mv_mux_set_rx_mode,
+	.ndo_change_mtu		= mv_mux_mtu_change,
+};
+/*-----------------------------------------------------------------------------------------*/
+static const struct mv_switch_mux_ops mux_ops =  {
+	.update_link = mv_mux_update_link,
+};
diff --git a/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.h b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.h
new file mode 100644
index 000000000000..80fcabd642b6
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_netdev.h
@@ -0,0 +1,138 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_tag_netdev_h__
+#define __mv_tag_netdev_h__
+
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "mvOs.h"
+#include "mv802_3.h"
+
+#define MV_MUX_SKB_TAG_VAL		(0xabcd)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)
+#define MV_MUX_SKB_TAG_SET(skb)		(skb->iif = (MV_MUX_SKB_TAG_VAL))
+#define MV_MUX_SKB_IS_TAGGED(skb)	(skb->iif == (MV_MUX_SKB_TAG_VAL))
+#else
+#define MV_MUX_SKB_TAG_SET(skb)		(skb->skb_iif = (MV_MUX_SKB_TAG_VAL))
+#define MV_MUX_SKB_IS_TAGGED(skb)	(skb->skb_iif == (MV_MUX_SKB_TAG_VAL))
+#endif
+/*MV_MUX_UNKNOWN_GROUP is usedfor sysfs creating mux device.*/
+/*e.g. when switch is in HGU mode, then mux device is created in -1 group*/
+#define MV_MUX_UNKNOWN_GROUP		(-1)
+
+extern const struct ethtool_ops mv_mux_tool_ops;
+
+struct mux_netdev {
+	int	idx;
+	int	port;
+	bool    leave_tag;
+	MV_TAG  tx_tag;
+	MV_TAG  rx_tag_ptrn;
+	MV_TAG  rx_tag_mask;
+	struct  net_device *next;
+};
+
+#define MV_MUX_PRIV(dev)        ((struct mux_netdev *)(netdev_priv(dev)))
+
+
+struct mv_mux_eth_port {
+	int    tag_type;
+	struct net_device *switch_dev;
+	struct net_device *root;
+	unsigned long flags;
+};
+
+#define MV_MUX_F_DBG_RX_BIT         0
+#define MV_MUX_F_DBG_TX_BIT         1
+
+#define MV_MUX_F_DBG_RX            (1 << MV_MUX_F_DBG_RX_BIT)
+#define MV_MUX_F_DBG_TX            (1 << MV_MUX_F_DBG_TX_BIT)
+
+struct mv_mux_switch_port {
+	int    tag_type;
+	int    preset;
+	int    vid;
+	int    switch_port;
+	int    gbe_port;
+	int    mtu;
+	bool   attach;
+};
+
+
+/* operations requested by switch device from mux device */
+struct mv_switch_mux_ops {
+	int	(*update_link)(void *cookie, int link_up);
+};
+
+/* operations requested by mux device from switch device */
+struct mv_mux_switch_ops {
+	int	(*promisc_set)(int db, u8 promisc_on);
+	int	(*jumbo_mode_set)(int max_size);
+	int	(*group_disable)(int db);
+	int	(*group_enable)(int db);
+	int	(*link_status_get)(int db);
+	int     (*all_mcast_del)(int db);
+	int	(*mac_addr_set)(int db, unsigned char *mac_addr, unsigned char op);
+	int	(*group_cookie_set)(int db, void *cookie);
+	bool	(*tag_get)(int db, MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid, MV_MUX_TAG *tag);
+	int	(*preset_init)(MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid);
+	void	(*interrupt_unmask)(void);
+};
+
+struct mv_mux_eth_ops {
+	int	(*set_tag_type)(int port, int tag_type);
+};
+
+int mv_mux_update_link(void *cookie, int link_up);
+struct net_device *mv_mux_netdev_add(int port, struct net_device *mux_dev);
+struct net_device *mv_mux_netdev_alloc(char *name, int idx, MV_MUX_TAG *tag_cfg);
+char *mv_mux_get_mac(struct net_device *mux_dev);
+int mv_mux_netdev_delete(struct net_device *mux_dev);
+int mv_mux_tag_type_set(int port, int type);
+void mv_mux_vlan_set(MV_MUX_TAG *mux_cfg, unsigned int vid);
+void mv_mux_cfg_get(struct net_device *mux_dev, MV_MUX_TAG *mux_cfg);
+int mv_mux_rx(struct sk_buff *skb, int port, struct napi_struct *napi);
+void mv_mux_netdev_print(struct net_device *mux_dev);
+void mv_mux_netdev_print_all(int port);
+void mv_mux_shadow_print(int gbe_port);
+struct net_device *mv_mux_switch_ptr_get(int port);
+int mv_mux_ctrl_dbg_flag(int port, u32 flag, u32 val);
+void mv_mux_eth_attach(int port, struct net_device *root, struct mv_mux_eth_ops *ops);
+void mv_mux_switch_attach(int gbe_port, int preset, int vid, int tag, int switch_port);
+void mv_mux_eth_detach(int port);
+int mv_mux_switch_ops_set(const struct mv_mux_switch_ops *switch_ops_ptr);
+int mv_mux_netdev_find(unsigned int dev_idx);
+
+
+#endif /* __mv_tag_netdev_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/netmux/mv_mux_sysfs.c b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_sysfs.c
new file mode 100644
index 000000000000..558d01a99de9
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_sysfs.c
@@ -0,0 +1,252 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "mv_mux_netdev.h"
+MV_MUX_TAG mux_cfg;
+
+static ssize_t mv_mux_help(char *b)
+{
+	int o = 0;
+	o += sprintf(b+o, "echo p             > dump         - Show gbe port [p] info\n");
+	o += sprintf(b+o, "echo name          > mux_dump     - Show virt interface device info\n");
+	o += sprintf(b+o, "echo name p        > add          - Attach to gbe port [p] new virtual interface\n");
+	o += sprintf(b+o, "echo name          > del          - Remove virt interface\n");
+	o += sprintf(b+o, "\n");
+	o += sprintf(b+o, "echo p tag         > tag_type     - Set port p tag type 0-NONE,1-MH,2-DSA,3-EDSA,4-VID\n");
+	o += sprintf(b+o, "echo name vid      > mux_vid      - Set virt interface vid value.\n");
+	o += sprintf(b+o, "echo name mh       > mh_tx        - Set virt interface MH tX tag\n");
+	o += sprintf(b+o, "echo name mh mask  > mh_rx        - Set virt interface MH RX tag and mask\n");
+	o += sprintf(b+o, "echo name dsa      > dsa_tx       - Set virt interface DSA TX tag\n");
+	o += sprintf(b+o, "echo name dsa mask > dsa_rx       - Set virt interface DSA RX tag and mask\n");
+	o += sprintf(b+o, "echo name wL wH    > edsa_tx      - Set virt interface EDSA TX tag\n");
+	o += sprintf(b+o, "echo name wL wH    > edsa_rx      - Set virt interface EDSA RX tag\n");
+	o += sprintf(b+o, "echo name wL wH    > edsa_rx_mask - Set virt interface EDSA RX mask tag\n");
+
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	o += sprintf(b+o, "echo p hex         > debug        - bit0:rx, bit1:tx\n");
+#endif
+	o += sprintf(b+o, "\n");
+	o += sprintf(b+o, "params: name-interface name,  mh-2 bytes value(hex), dsa,edsa,vid-4 bytes value(hex)\n");
+
+	return o;
+}
+
+
+static ssize_t mv_mux_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+/*
+	const char      *name = attr->attr.name;
+	int             off = 0;
+*/
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	return mv_mux_help(buf);
+}
+
+
+static ssize_t mv_mux_netdev_store(struct device *dev,
+					struct device_attribute *attr, const char *buf, size_t len)
+{
+	struct net_device *mux_dev;
+	const char        *name = attr->attr.name;
+	int               a = 0, b = 0, err = 0;
+	char              dev_name[IFNAMSIZ];
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%s %x %x", dev_name, &a, &b);
+	mux_dev = dev_get_by_name(&init_net, dev_name);
+
+	if (mux_dev)
+		dev_put(mux_dev);
+
+	if (!strcmp(name, "mux_dump")) {
+		mv_mux_netdev_print(mux_dev);
+
+	} else if (!strcmp(name, "mux_vid")) {
+		mv_mux_vlan_set(&mux_cfg, a);
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "mh_rx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.rx_tag_ptrn.mh = MV_16BIT_BE((MV_U16)a);
+		mux_cfg.rx_tag_mask.mh = MV_16BIT_BE((MV_U16)b);
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "dsa_rx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.rx_tag_ptrn.dsa = a;
+		mux_cfg.rx_tag_mask.dsa = b;
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "edsa_rx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.rx_tag_ptrn.edsa[0] = a;
+		mux_cfg.rx_tag_ptrn.edsa[1] = b;
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "edsa_rx_mask")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.rx_tag_mask.edsa[0] = a;
+		mux_cfg.rx_tag_mask.edsa[1] = b;
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	}  else if (!strcmp(name, "mh_tx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.tx_tag.mh = MV_16BIT_BE((MV_U16)a);
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	}  else if (!strcmp(name, "dsa_tx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.tx_tag.dsa = a;
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "edsa_tx")) {
+		mv_mux_cfg_get(mux_dev, &mux_cfg);
+		mux_cfg.tx_tag.edsa[0] = a;
+		mux_cfg.tx_tag.edsa[1] = b;
+		err = mv_mux_netdev_alloc(dev_name, MV_MUX_UNKNOWN_GROUP, &mux_cfg) ? 0 : 1;
+
+	} else if (!strcmp(name, "add")) {
+		err =  mv_mux_netdev_add(a, mux_dev) ? 0 : 1;
+
+	} else if (!strcmp(name, "del"))
+		err = mv_mux_netdev_delete(mux_dev);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_mux_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    a, b;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	a = b = err = 0;
+
+	sscanf(buf, "%x %x", &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "tag_type")) {
+		mv_mux_tag_type_set(a, b);
+
+	} else if (!strcmp(name, "dump")) {
+		mv_mux_shadow_print(a);
+
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	} else if (!strcmp(name, "debug")) {
+		err = mv_mux_ctrl_dbg_flag(a, MV_MUX_F_DBG_RX,   b & 0x1);
+		err = mv_mux_ctrl_dbg_flag(a, MV_MUX_F_DBG_TX,   b & 0x2);
+#endif
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+static DEVICE_ATTR(add,          S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(del,          S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(mux_vid,      S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(mh_rx,        S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(dsa_rx,       S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(edsa_rx,      S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(edsa_rx_mask, S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(mh_tx,        S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(dsa_tx,       S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(edsa_tx,      S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(tag_type,     S_IWUSR, mv_mux_show, mv_mux_store);
+static DEVICE_ATTR(dump,         S_IWUSR, mv_mux_show, mv_mux_store);
+static DEVICE_ATTR(debug,        S_IWUSR, mv_mux_show, mv_mux_store);
+static DEVICE_ATTR(mux_dump,     S_IWUSR, mv_mux_show, mv_mux_netdev_store);
+static DEVICE_ATTR(help,         S_IRUSR, mv_mux_show, NULL);
+
+
+static struct attribute *mv_mux_attrs[] = {
+
+	&dev_attr_add.attr,
+	&dev_attr_del.attr,
+	&dev_attr_mux_vid.attr,
+	&dev_attr_mh_rx.attr,
+	&dev_attr_dsa_rx.attr,
+	&dev_attr_edsa_rx.attr,
+	&dev_attr_edsa_rx_mask.attr,
+	&dev_attr_mh_tx.attr,
+	&dev_attr_dsa_tx.attr,
+	&dev_attr_edsa_tx.attr,
+	&dev_attr_tag_type.attr,
+	&dev_attr_dump.attr,
+	&dev_attr_help.attr,
+	&dev_attr_mux_dump.attr,
+	&dev_attr_debug.attr,
+	NULL
+};
+
+static struct attribute_group mv_mux_group = {
+	.name = "mv_mux",
+	.attrs = mv_mux_attrs,
+};
+
+int __init mv_mux_sysfs_init(void)
+{
+	int err;
+	struct device *pd;
+
+	pd = &platform_bus;
+	err = sysfs_create_group(&pd->kobj, &mv_mux_group);
+	if (err)
+		pr_err("Init sysfs group %s failed %d\n", mv_mux_group.name, err);
+
+	return err;
+}
+
+
+module_init(mv_mux_sysfs_init);
+
+MODULE_AUTHOR("Uri Eliyahu");
+MODULE_DESCRIPTION("sysfs for marvell GbE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.c b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.c
new file mode 100644
index 000000000000..7e4551bba008
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.c
@@ -0,0 +1,241 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mv_mux_tool.h"
+#include <linux/ethtool.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0)
+static int __ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_cmd cmd = { ETHTOOL_GSET };
+	int err;
+
+	if (!dev->ethtool_ops->get_settings)
+		return -EOPNOTSUPP;
+
+	err = dev->ethtool_ops->get_settings(dev, &cmd);
+	if (err < 0)
+		return err;
+
+	if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+		return -EFAULT;
+	return 0;
+}
+#endif
+
+/******************************************************************************
+*mv_mux_tool_get_settings
+*Description:
+*	ethtool	get standard port settings
+*INPUT:
+*	netdev	Network device structure pointer
+*OUTPUT
+*	cmd	command (settings)
+*RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_mux_tool_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(netdev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (!root)
+		return -ENETUNREACH;
+
+	return __ethtool_get_settings(root, cmd);
+}
+/******************************************************************************
+*mv_mux_tool_get_drvinfo
+*Description:
+*	ethtool get driver information
+*INPUT:
+*	netdev	Network device structure pointer
+*	info	driver information
+*OUTPUT
+*	info	driver information
+*RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_mux_tool_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(netdev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (!root || !root->ethtool_ops || !root->ethtool_ops->get_drvinfo)
+		return;
+
+
+	root->ethtool_ops->get_drvinfo(root, info);
+}
+
+/******************************************************************************
+*mv_mux_tool_get_coalesce
+*Description:
+*	ethtool get RX/TX coalesce parameters
+*INPUT:
+*	netdev	Network device structure pointer
+*OUTPUT
+*	cmd	Coalesce parameters
+*RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_mux_tool_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *cmd)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(netdev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (!root || !root->ethtool_ops || !root->ethtool_ops->get_coalesce)
+		return -ENETUNREACH;
+
+	return root->ethtool_ops->get_coalesce(root, cmd);
+
+}
+/******************************************************************************
+*mv_mux_tool_get_pauseparam
+*Description:
+*	ethtool get pause parameters
+*INPUT:
+*	netdev	Network device structure pointer
+*OUTPUT
+*	pause	Pause paranmeters
+*RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_mux_tool_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+	struct mux_netdev *pmux_priv = MV_MUX_PRIV(netdev);
+	struct net_device *root = mux_eth_shadow[pmux_priv->port].root;
+
+	if (!root || !root->ethtool_ops || !root->ethtool_ops->get_pauseparam)
+		return;
+
+	root->ethtool_ops->get_pauseparam(root, pause);
+}
+
+
+/******************************************************************************
+* mv_mux_tool_nway_reset
+* Description:
+*	ethtool restart auto negotiation
+* INPUT:
+*	netdev	Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+int mv_mux_tool_nway_reset(struct net_device *mux_dev)
+{
+	struct mux_netdev *pdev_priv;
+
+	pdev_priv = MV_MUX_PRIV(mux_dev);
+	/* restart group autoneg */
+	if (mv_switch_group_restart_autoneg(pdev_priv->idx))
+		return -EINVAL;
+
+	return 0;
+}
+#endif
+
+/******************************************************************************
+* mv_mux_tool_get_link
+* Description:
+*	ethtool get link status
+* INPUT:
+*	netdev	Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 if link is down, 1 if link is up
+*
+*******************************************************************************/
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+u32 mv_mux_tool_get_link(struct net_device *mux_dev)
+{
+	struct mux_netdev *pdev_priv;
+
+	pdev_priv = MV_MUX_PRIV(mux_dev);
+
+	return mv_switch_link_status_get(pdev_priv->idx);
+}
+#endif
+
+
+const struct ethtool_ops mv_mux_tool_ops = {
+	.get_settings	= mv_mux_tool_get_settings,
+	.get_pauseparam	= mv_mux_tool_get_pauseparam,
+	.get_coalesce	= mv_mux_tool_get_coalesce,
+	.get_link	= ethtool_op_get_link,
+	.get_drvinfo	= mv_mux_tool_get_drvinfo,
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+	.nway_reset	= mv_mux_tool_nway_reset,
+	.get_link	= mv_mux_tool_get_link,
+#endif
+};
diff --git a/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.h b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.h
new file mode 100644
index 000000000000..35f5dad0bc21
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/netmux/mv_mux_tool.h
@@ -0,0 +1,40 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef NET_DEV_MV_MUX_TOOL_H
+#define NET_DEV_MV_MUX_TOOL_H
+
+#include <linux/version.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mv_switch.h"
+
+#include "mv_mux_netdev.h"
+
+extern struct  mv_mux_eth_port mux_eth_shadow[];
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.c b/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.c
new file mode 100644
index 000000000000..1e903e5586c8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.c
@@ -0,0 +1,669 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvEthPhyRegs.h"
+#include "mvEthPhy.h"
+
+#include "mvNetConfig.h"
+
+#undef DEBUG
+#ifdef DEBUG
+#define DB(x) x
+#define DB2(x) x
+#else
+#define DB(x)
+#define DB2(x)
+#endif /* DEBUG */
+
+static MV_U32 mvEthPhySmiReg;
+
+MV_STATUS mvEthPhySmiAddrSet(MV_U32 smi_addr)
+{
+	mvEthPhySmiReg = smi_addr;
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPhyRegRead - Read from ethernet phy register.
+*
+* DESCRIPTION:
+*       This function reads ethernet phy register.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*       regOffs - Phy register offset.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       16bit phy register value, or 0xffff on error
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyRegRead(MV_U32 phyAddr, MV_U32 regOffs, MV_U16 *data)
+{
+	MV_U32 			smiReg;
+	volatile MV_U32 timeout;
+
+	/* check parameters */
+	if ((phyAddr << ETH_PHY_SMI_DEV_ADDR_OFFS) & ~ETH_PHY_SMI_DEV_ADDR_MASK) {
+		mvOsPrintf("mvEthPhyRegRead: Err. Illegal PHY device address %d\n",
+				phyAddr);
+		return MV_FAIL;
+	}
+	if ((regOffs <<  ETH_PHY_SMI_REG_ADDR_OFFS) & ~ETH_PHY_SMI_REG_ADDR_MASK) {
+		mvOsPrintf("mvEthPhyRegRead: Err. Illegal PHY register offset %d\n",
+				regOffs);
+		return MV_FAIL;
+	}
+
+	timeout = ETH_PHY_TIMEOUT;
+	/* wait till the SMI is not busy*/
+	do {
+		/* read smi register */
+		smiReg = MV_REG_READ(mvEthPhySmiReg);
+		if (timeout-- == 0) {
+			mvOsPrintf("mvEthPhyRegRead: SMI busy timeout\n");
+			return MV_FAIL;
+		}
+	} while (smiReg & ETH_PHY_SMI_BUSY_MASK);
+
+	/* fill the phy address and regiser offset and read opcode */
+	smiReg = (phyAddr <<  ETH_PHY_SMI_DEV_ADDR_OFFS) | (regOffs << ETH_PHY_SMI_REG_ADDR_OFFS)|
+			   ETH_PHY_SMI_OPCODE_READ;
+
+	/* write the smi register */
+	MV_REG_WRITE(mvEthPhySmiReg, smiReg);
+
+	timeout = ETH_PHY_TIMEOUT;
+
+	/*wait till readed value is ready */
+	do {
+		/* read smi register */
+		smiReg = MV_REG_READ(mvEthPhySmiReg);
+
+		if (timeout-- == 0) {
+			mvOsPrintf("mvEthPhyRegRead: SMI read-valid timeout\n");
+			return MV_FAIL;
+		}
+	} while (!(smiReg & ETH_PHY_SMI_READ_VALID_MASK));
+
+	/* Wait for the data to update in the SMI register */
+	for (timeout = 0; timeout < ETH_PHY_TIMEOUT; timeout++)
+		;
+
+	*data = (MV_U16)(MV_REG_READ(mvEthPhySmiReg) & ETH_PHY_SMI_DATA_MASK);
+
+	return MV_OK;
+}
+
+MV_STATUS mvEthPhyRegPrint(MV_U32 phyAddr, MV_U32 regOffs)
+{
+	MV_U16      data;
+	MV_STATUS   status;
+
+	status = mvEthPhyRegRead(phyAddr, regOffs, &data);
+	if (status != MV_OK)
+		mvOsPrintf("Read failed - status = %d\n", status);
+
+	mvOsPrintf("phy=0x%x, reg=0x%x: 0x%04x\n", phyAddr, regOffs, data);
+
+	return status;
+}
+
+void mvEthPhyRegs(int phyAddr)
+{
+	mvOsPrintf("[ETH-PHY #%d registers]\n\n", phyAddr);
+
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_CTRL_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_STATUS_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_AUTONEGO_AD_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_LINK_PARTNER_CAP_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_1000BASE_T_CTRL_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_1000BASE_T_STATUS_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_EXTENDED_STATUS_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_SPEC_CTRL_REG);
+	mvEthPhyRegPrint(phyAddr, ETH_PHY_SPEC_STATUS_REG);
+}
+
+/*******************************************************************************
+* mvEthPhyRegWrite - Write to ethernet phy register.
+*
+* DESCRIPTION:
+*       This function write to ethernet phy register.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*       regOffs - Phy register offset.
+*       data    - 16bit data.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK if write succeed, MV_BAD_PARAM on bad parameters , MV_ERROR on error .
+*		MV_TIMEOUT on timeout
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyRegWrite(MV_U32 phyAddr, MV_U32 regOffs, MV_U16 data)
+{
+	MV_U32		smiReg;
+	volatile MV_U32 timeout;
+
+	/* check parameters */
+	if ((phyAddr <<  ETH_PHY_SMI_DEV_ADDR_OFFS) & ~ETH_PHY_SMI_DEV_ADDR_MASK) {
+		mvOsPrintf("mvEthPhyRegWrite: Err. Illegal phy address 0x%x\n", phyAddr);
+		return MV_BAD_PARAM;
+	}
+	if ((regOffs <<  ETH_PHY_SMI_REG_ADDR_OFFS) & ~ETH_PHY_SMI_REG_ADDR_MASK) {
+		mvOsPrintf("mvEthPhyRegWrite: Err. Illegal register offset 0x%x\n", regOffs);
+		return MV_BAD_PARAM;
+	}
+
+	timeout = ETH_PHY_TIMEOUT;
+
+	/* wait till the SMI is not busy*/
+	do {
+		/* read smi register */
+		smiReg = MV_REG_READ(mvEthPhySmiReg);
+		if (timeout-- == 0) {
+			mvOsPrintf("mvEthPhyRegWrite: SMI busy timeout\n");
+		return MV_TIMEOUT;
+		}
+	} while (smiReg & ETH_PHY_SMI_BUSY_MASK);
+
+	/* fill the phy address and regiser offset and write opcode and data*/
+	smiReg = (data << ETH_PHY_SMI_DATA_OFFS);
+	smiReg |= (phyAddr <<  ETH_PHY_SMI_DEV_ADDR_OFFS) | (regOffs << ETH_PHY_SMI_REG_ADDR_OFFS);
+	smiReg &= ~ETH_PHY_SMI_OPCODE_READ;
+
+	/* write the smi register */
+	DB(printf("%s: phyAddr=0x%x offset = 0x%x data=0x%x\n", __func__, phyAddr, regOffs, data));
+	DB(printf("%s: mvEthPhySmiReg = 0x%x smiReg=0x%x\n", __func__, mvEthPhySmiReg, smiReg));
+	MV_REG_WRITE(mvEthPhySmiReg, smiReg);
+
+	return MV_OK;
+}
+
+
+/*******************************************************************************
+* mvEthPhyReset - Reset ethernet Phy.
+*
+* DESCRIPTION:
+*       This function resets a given ethernet Phy.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*       timeout - in millisec
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:   MV_OK       - Success
+*           MV_TIMEOUT  - Timeout
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyReset(MV_U32 phyAddr, int timeout)
+{
+	MV_U16  phyRegData;
+
+	/* Reset the PHY */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &phyRegData) != MV_OK)
+		return MV_FAIL;
+
+	/* Set bit 15 to reset the PHY */
+	phyRegData |= ETH_PHY_CTRL_RESET_MASK;
+	mvEthPhyRegWrite(phyAddr, ETH_PHY_CTRL_REG, phyRegData);
+
+	/* Wait untill Reset completed */
+	while (timeout > 0) {
+		mvOsSleep(100);
+		timeout -= 100;
+
+		if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &phyRegData) != MV_OK)
+			return MV_FAIL;
+
+		if ((phyRegData & ETH_PHY_CTRL_RESET_MASK) == 0)
+			return MV_OK;
+	}
+	return MV_TIMEOUT;
+}
+
+
+/*******************************************************************************
+* mvEthPhyRestartAN - Restart ethernet Phy Auto-Negotiation.
+*
+* DESCRIPTION:
+*       This function resets a given ethernet Phy.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*       timeout - in millisec; 0 - no timeout (don't wait)
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:   MV_OK       - Success
+*           MV_TIMEOUT  - Timeout
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyRestartAN(MV_U32 phyAddr, int timeout)
+{
+	MV_U16  phyRegData;
+
+	/* Reset the PHY */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &phyRegData) != MV_OK)
+		return MV_FAIL;
+
+	/* Set bit 12 to Enable autonegotiation of the PHY */
+	phyRegData |= ETH_PHY_CTRL_AN_ENABLE_MASK;
+
+	/* Set bit 9 to Restart autonegotiation of the PHY */
+	phyRegData |= ETH_PHY_CTRL_AN_RESTART_MASK;
+	mvEthPhyRegWrite(phyAddr, ETH_PHY_CTRL_REG, phyRegData);
+
+	if (timeout == 0)
+		return MV_OK;
+
+	/* Wait untill Auotonegotiation completed */
+	while (timeout > 0) {
+		mvOsSleep(100);
+		timeout -= 100;
+
+		if (mvEthPhyRegRead(phyAddr, ETH_PHY_STATUS_REG, &phyRegData) != MV_OK)
+			return MV_FAIL;
+
+		if (phyRegData & ETH_PHY_STATUS_AN_DONE_MASK)
+			return MV_OK;
+	}
+	return MV_TIMEOUT;
+}
+
+
+/*******************************************************************************
+* mvEthPhyDisableAN - Disable Phy Auto-Negotiation and set forced Speed and Duplex
+*
+* DESCRIPTION:
+*       This function disable AN and set duplex and speed.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*       speed   - port speed. 0 - 10 Mbps, 1-100 Mbps, 2 - 1000 Mbps
+*       duplex  - port duplex. 0 - Half duplex, 1 - Full duplex
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:   MV_OK   - Success
+*           MV_FAIL - Failure
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyDisableAN(MV_U32 phyAddr, int speed, int duplex)
+{
+	MV_U16  phyRegData;
+
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &phyRegData) != MV_OK)
+		return MV_FAIL;
+
+	switch (speed) {
+	case 0: /* 10 Mbps */
+			phyRegData &= ~ETH_PHY_CTRL_SPEED_LSB_MASK;
+			phyRegData &= ~ETH_PHY_CTRL_SPEED_MSB_MASK;
+			break;
+
+	case 1: /* 100 Mbps */
+			phyRegData |= ETH_PHY_CTRL_SPEED_LSB_MASK;
+			phyRegData &= ~ETH_PHY_CTRL_SPEED_MSB_MASK;
+			break;
+
+	case 2: /* 1000 Mbps */
+			phyRegData &= ~ETH_PHY_CTRL_SPEED_LSB_MASK;
+			phyRegData |= ETH_PHY_CTRL_SPEED_MSB_MASK;
+			break;
+
+	default:
+			mvOsOutput("Unexpected speed = %d\n", speed);
+			return MV_FAIL;
+	}
+
+	switch (duplex) {
+	case 0: /* half duplex */
+			phyRegData &= ~ETH_PHY_CTRL_DUPLEX_MASK;
+			break;
+
+	case 1: /* full duplex */
+			phyRegData |= ETH_PHY_CTRL_DUPLEX_MASK;
+			break;
+
+	default:
+			mvOsOutput("Unexpected duplex = %d\n", duplex);
+			return MV_FAIL;
+	}
+	/* Clear bit 12 to Disable autonegotiation of the PHY */
+	phyRegData &= ~ETH_PHY_CTRL_AN_ENABLE_MASK;
+
+	/* Clear bit 9 to DISABLE, Restart autonegotiation of the PHY */
+	phyRegData &= ~ETH_PHY_CTRL_AN_RESTART_MASK;
+	mvEthPhyRegWrite(phyAddr, ETH_PHY_CTRL_REG, phyRegData);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvEthPhyLoopback(MV_U32 phyAddr, MV_BOOL isEnable)
+{
+	MV_U16      regVal, ctrlVal;
+	MV_STATUS   status;
+
+	/* Set loopback speed and duplex accordingly with current */
+	/* Bits: 6, 8, 13 */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &ctrlVal) != MV_OK)
+		return MV_FAIL;
+
+	if (isEnable) {
+		/* Select page 2 */
+		mvEthPhyRegWrite(phyAddr, 22, 2);
+
+		mvEthPhyRegRead(phyAddr, 21, &regVal);
+		regVal &= ~(ETH_PHY_CTRL_DUPLEX_MASK | ETH_PHY_CTRL_SPEED_LSB_MASK |
+				ETH_PHY_CTRL_SPEED_MSB_MASK | ETH_PHY_CTRL_AN_ENABLE_MASK);
+		regVal |= (ctrlVal & (ETH_PHY_CTRL_DUPLEX_MASK | ETH_PHY_CTRL_SPEED_LSB_MASK |
+					ETH_PHY_CTRL_SPEED_MSB_MASK | ETH_PHY_CTRL_AN_ENABLE_MASK));
+		mvEthPhyRegWrite(phyAddr, 21, regVal);
+
+		/* Select page 0 */
+		mvEthPhyRegWrite(phyAddr, 22, 0);
+
+		/* Disable Energy detection   R16[9:8] = 00 */
+		/* Disable MDI/MDIX crossover R16[6:5] = 00 */
+		mvEthPhyRegRead(phyAddr, ETH_PHY_SPEC_CTRL_REG, &regVal);
+		regVal &= ~(BIT5 | BIT6 | BIT8 | BIT9);
+		mvEthPhyRegWrite(phyAddr, ETH_PHY_SPEC_CTRL_REG, regVal);
+
+		status = mvEthPhyReset(phyAddr, 1000);
+		if (status != MV_OK) {
+			mvOsPrintf("mvEthPhyReset failed: status=0x%x\n", status);
+			return status;
+		}
+
+		/* Set loopback */
+		ctrlVal |= ETH_PHY_CTRL_LOOPBACK_MASK;
+		mvEthPhyRegWrite(phyAddr, ETH_PHY_CTRL_REG, ctrlVal);
+	} else {
+		/* Cancel Loopback */
+		ctrlVal &= ~ETH_PHY_CTRL_LOOPBACK_MASK;
+		mvEthPhyRegWrite(phyAddr, ETH_PHY_CTRL_REG, ctrlVal);
+
+		status = mvEthPhyReset(phyAddr, 1000);
+		if (status != MV_OK) {
+			mvOsPrintf("mvEthPhyReset failed: status=0x%x\n", status);
+			return status;
+		}
+
+		/* Enable Energy detection   R16[9:8] = 11 */
+		/* Enable MDI/MDIX crossover R16[6:5] = 11 */
+		mvEthPhyRegRead(phyAddr, ETH_PHY_SPEC_CTRL_REG, &regVal);
+		regVal |= (BIT5 | BIT6 | BIT8 | BIT9);
+		mvEthPhyRegWrite(phyAddr, ETH_PHY_SPEC_CTRL_REG, regVal);
+
+		status = mvEthPhyReset(phyAddr, 1000);
+		if (status != MV_OK) {
+			mvOsPrintf("mvEthPhyReset failed: status=0x%x\n", status);
+			return status;
+		}
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPhyCheckLink -
+*
+* DESCRIPTION:
+*	check link in phy port
+*
+* INPUT:
+*       phyAddr - Phy address.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:   MV_TRUE if link is up, MV_FALSE if down
+*
+*******************************************************************************/
+MV_BOOL mvEthPhyCheckLink(MV_U32 phyAddr)
+{
+	MV_U16 val_st, val_ctrl, val_spec_st;
+
+	/* read status reg */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_STATUS_REG, &val_st) != MV_OK)
+		return MV_FALSE;
+
+	/* read control reg */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &val_ctrl) != MV_OK)
+		return MV_FALSE;
+
+	/* read special status reg */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_SPEC_STATUS_REG, &val_spec_st) != MV_OK)
+		return MV_FALSE;
+
+	/* Check for PHY exist */
+	if ((val_ctrl == ETH_PHY_SMI_DATA_MASK) && (val_st & ETH_PHY_SMI_DATA_MASK))
+		return MV_FALSE;
+
+
+	if (val_ctrl & ETH_PHY_CTRL_AN_ENABLE_MASK) {
+		if (val_st & ETH_PHY_STATUS_AN_DONE_MASK)
+			return MV_TRUE;
+		else
+			return MV_FALSE;
+	} else {
+		if (val_spec_st & ETH_PHY_SPEC_STATUS_LINK_MASK)
+			return MV_TRUE;
+	}
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvEthPhyPrintStatus -
+*
+* DESCRIPTION:
+*	print port Speed, Duplex, Auto-negotiation, Link.
+*
+* INPUT:
+*       phyAddr - Phy address.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:   16bit phy register value, or 0xffff on error
+*
+*******************************************************************************/
+MV_STATUS	mvEthPhyPrintStatus(MV_U32 phyAddr)
+{
+	MV_U16 val;
+
+	/* read control reg */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_CTRL_REG, &val) != MV_OK)
+		return MV_ERROR;
+
+	if (val & ETH_PHY_CTRL_AN_ENABLE_MASK)
+		mvOsOutput("Auto negotiation: Enabled\n");
+	else
+		mvOsOutput("Auto negotiation: Disabled\n");
+
+
+	/* read specific status reg */
+	if (mvEthPhyRegRead(phyAddr, ETH_PHY_SPEC_STATUS_REG, &val) != MV_OK)
+		return MV_ERROR;
+
+	switch (val & ETH_PHY_SPEC_STATUS_SPEED_MASK) {
+	case ETH_PHY_SPEC_STATUS_SPEED_1000MBPS:
+			mvOsOutput("Speed: 1000 Mbps\n");
+			break;
+	case ETH_PHY_SPEC_STATUS_SPEED_100MBPS:
+			mvOsOutput("Speed: 100 Mbps\n");
+			break;
+	case ETH_PHY_SPEC_STATUS_SPEED_10MBPS:
+			mvOsOutput("Speed: 10 Mbps\n");
+			break;
+	default:
+			mvOsOutput("Speed: Uknown\n");
+			break;
+
+	}
+
+	if (val & ETH_PHY_SPEC_STATUS_DUPLEX_MASK)
+		mvOsOutput("Duplex: Full\n");
+	else
+		mvOsOutput("Duplex: Half\n");
+
+
+	if (val & ETH_PHY_SPEC_STATUS_LINK_MASK)
+		mvOsOutput("Link: up\n");
+	else
+		mvOsOutput("Link: down\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPhyAdvertiseSet -
+*
+* DESCRIPTION:
+*	Set advertisement mode
+*
+* INPUT:
+*       phyAddr - Phy address.
+*	advertise -	0x1: 10 half
+*			0x2: 10 full
+*			0x4: 100 half
+*			0x8: 100 full
+*			0x10: 1000 half
+*			0x20: 1000 full
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyAdvertiseSet(MV_U32 phyAddr, MV_U16 advertise)
+{
+	MV_U16 regVal, tmp;
+	/* 10 - 100 */
+	mvEthPhyRegRead(phyAddr, ETH_PHY_AUTONEGO_AD_REG, &regVal);
+
+	regVal = regVal & (~ETH_PHY_10_100_BASE_ADVERTISE_MASK);
+	tmp = (advertise & 0xf);
+	regVal = regVal | (tmp << ETH_PHY_10_100_BASE_ADVERTISE_OFFSET);
+	mvEthPhyRegWrite(phyAddr, ETH_PHY_AUTONEGO_AD_REG, regVal);
+	/* 1000 */
+	mvEthPhyRegRead(phyAddr, ETH_PHY_1000BASE_T_CTRL_REG, &regVal);
+	/* keep only bits 4-5 + shift to the right */
+	tmp = ((advertise >> 4) & 0x3);
+	regVal = regVal & (~ETH_PHY_1000BASE_ADVERTISE_MASK);
+	regVal = regVal | (tmp << ETH_PHY_1000BASE_ADVERTISE_OFFSET);
+	mvEthPhyRegWrite(phyAddr, ETH_PHY_1000BASE_T_CTRL_REG, regVal);
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvEthPhyAdvertiseGet -
+*
+* DESCRIPTION:
+*	Get advertisement mode
+*
+* INPUT:
+*       phyAddr - Phy address.
+*
+* OUTPUT:
+*	advertise -	0x1: 10 half
+*			0x2: 10 full
+*			0x4: 100 half
+*			0x8: 100 full
+*			0x10: 1000 half
+*			0x20: 1000 full
+*
+* RETURN:
+*
+*******************************************************************************/
+MV_STATUS mvEthPhyAdvertiseGet(MV_U32 phyAddr, MV_U16 *advertise)
+{
+	MV_U16 regVal, tmp;
+
+	if (advertise == NULL)
+		return MV_BAD_PARAM;
+
+	/* 10 - 100 */
+	mvEthPhyRegRead(phyAddr, ETH_PHY_AUTONEGO_AD_REG, &regVal);
+	tmp = ((regVal & ETH_PHY_10_100_BASE_ADVERTISE_MASK) >> ETH_PHY_10_100_BASE_ADVERTISE_OFFSET);
+	/* 1000 */
+	mvEthPhyRegRead(phyAddr, ETH_PHY_1000BASE_T_CTRL_REG, &regVal);
+	tmp |= (((regVal & ETH_PHY_1000BASE_ADVERTISE_MASK) >> ETH_PHY_1000BASE_ADVERTISE_OFFSET) << 4);
+	*advertise = tmp;
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.h b/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.h
new file mode 100644
index 000000000000..f93ef7eabb3f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/phy/mvEthPhy.h
@@ -0,0 +1,123 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCETHPHYH
+#define __INCETHPHYH
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define MV_PHY_88E3061  0x1      /* E3061, E3081 */
+#define MV_PHY_88E104X  0x2      /* E1040, E1041, E1042 */
+#define MV_PHY_88E10X0  0x4      /* E1000, E1010, E1020 */
+#define MV_PHY_88E10X0S 0x5      /* E1000S, E1010S, E1020S */
+#define MV_PHY_88E1011  0x6      /* E1011, E1011S */
+#define MV_PHY_88E3082  0x8
+#define MV_PHY_88E1112  0x9
+#define MV_PHY_88E1149  0xA
+#define MV_PHY_88E1121  0xB
+#define MV_PHY_88E1111  0xC      /* E1111, E1115 */
+#define MV_PHY_88E114X  0xD
+#define MV_PHY_88E1181  0xE
+#define MV_PHY_88E1340S 0x1C     /* 88E1340S */
+#define MV_PHY_88E1512  0x1D
+#define MV_PHY_88E1340  0x1E     /* 88E1340/x0a */
+#define MV_PHY_88E1543  0x2A     /* 88E15453 */
+#define MV_PHY_88E154X  0x2B     /* 88E1545M */
+#define MV_PHY_88E1340M 0x1F     /* 88E1340M/x0a */
+#define MV_PHY_88E1116R 0x24
+#define MV_PHY_88E1116  0x21     /* E1116, E1116R */
+#define MV_PHY_88E3016_88E3019  0x22     /* E3015, E3016, E3018, 88E3019 */
+#define MV_PHY_88E1240  0x23
+#define MV_PHY_88E1149R 0x25
+#define MV_PHY_88E1119R 0x28    /* 88E1119R */
+#define MV_PHY_88E1310  0x29    /* 88E1310 */
+#define MV_PHY_KW2_INTERNAL_GE		0x2b
+#define MV_PHY_KW2_INTERNAL_3FE		0x26
+#define MV_PHY_ALP_INTERNAL_QUAD_GE	0x0
+
+#define MV_IS_MARVELL_OUI(_reg2, _reg3)		\
+	(((_reg2) == 0x0141) && (((_reg3)&0xFC00) == 0x0C00))
+
+MV_STATUS mvEthPhySmiAddrSet(MV_U32 smi_addr);
+MV_STATUS	mvEthPhyRegRead(MV_U32 phyAddr, MV_U32 regOffs, MV_U16 *data);
+MV_STATUS	mvEthPhyRegPrint(MV_U32 phyAddr, MV_U32 regOffs);
+void		mvEthPhyRegs(int phyAddr);
+MV_STATUS	mvEthPhyRegWrite(MV_U32 phyAddr, MV_U32 regOffs, MV_U16 data);
+MV_STATUS	mvEthPhyReset(MV_U32 phyAddr, int timeout);
+MV_STATUS	mvEthPhyRestartAN(MV_U32 phyAddr, int timeout);
+MV_STATUS	mvEthPhyDisableAN(MV_U32 phyAddr, int speed, int duplex);
+MV_STATUS	mvEthPhyLoopback(MV_U32 phyAddr, MV_BOOL isEnable);
+MV_BOOL		mvEthPhyCheckLink(MV_U32 phyAddr);
+MV_STATUS	mvEthPhyPrintStatus(MV_U32 phyAddr);
+MV_STATUS	mvEthPhyAdvertiseSet(MV_U32 phyAddr, MV_U16 advertise);
+MV_STATUS	mvEthPhyAdvertiseGet(MV_U32 phyAddr, MV_U16 *advertise);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* #ifndef __INCETHPHYH */
diff --git a/drivers/net/ethernet/mvebu_net/phy/mvEthPhyRegs.h b/drivers/net/ethernet/mvebu_net/phy/mvEthPhyRegs.h
new file mode 100644
index 000000000000..e0539b24c197
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/phy/mvEthPhyRegs.h
@@ -0,0 +1,176 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	 specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __INCethphyregsh
+#define __INCethphyregsh
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* defines */
+#define ETH_PHY_TIMEOUT		    10000
+
+/* registers offsetes defines */
+
+/* SMI register fields (ETH_PHY_SMI_REG) */
+
+#define ETH_PHY_SMI_DATA_OFFS	        0 /* Data */
+#define ETH_PHY_SMI_DATA_MASK	        (0xffff << ETH_PHY_SMI_DATA_OFFS)
+
+#define ETH_PHY_SMI_DEV_ADDR_OFFS	    16 /* PHY device address */
+#define ETH_PHY_SMI_DEV_ADDR_MASK       (0x1f << ETH_PHY_SMI_DEV_ADDR_OFFS)
+
+#define ETH_PHY_SMI_REG_ADDR_OFFS	    21 /* PHY device register address */
+#define ETH_PHY_SMI_REG_ADDR_MASK	    (0x1f << ETH_PHY_SMI_REG_ADDR_OFFS)
+
+#define ETH_PHY_SMI_OPCODE_OFFS	        26	/* Write/Read opcode */
+#define ETH_PHY_SMI_OPCODE_MASK	        (3 << ETH_PHY_SMI_OPCODE_OFFS)
+#define ETH_PHY_SMI_OPCODE_WRITE        (0 << ETH_PHY_SMI_OPCODE_OFFS)
+#define ETH_PHY_SMI_OPCODE_READ         (1 << ETH_PHY_SMI_OPCODE_OFFS)
+
+#define ETH_PHY_SMI_READ_VALID_BIT	    27	/* Read Valid  */
+#define ETH_PHY_SMI_READ_VALID_MASK	    (1 << ETH_PHY_SMI_READ_VALID_BIT)
+
+#define ETH_PHY_SMI_BUSY_BIT		    28  /* Busy */
+#define ETH_PHY_SMI_BUSY_MASK		    (1 << ETH_PHY_SMI_BUSY_BIT)
+
+/* PHY registers and bits */
+#define ETH_PHY_CTRL_REG                0
+#define ETH_PHY_STATUS_REG              1
+#define ETH_PHY_AUTONEGO_AD_REG		4
+#define ETH_PHY_LINK_PARTNER_CAP_REG	5
+#define ETH_PHY_1000BASE_T_CTRL_REG	9
+#define ETH_PHY_1000BASE_T_STATUS_REG	10
+#define ETH_PHY_EXTENDED_STATUS_REG	15
+#define ETH_PHY_SPEC_CTRL_REG           16
+#define ETH_PHY_SPEC_STATUS_REG         17
+
+/* ETH_PHY_CTRL_REG bits */
+#define ETH_PHY_CTRL_SPEED_MSB_BIT      6
+#define ETH_PHY_CTRL_SPEED_MSB_MASK     (1 << ETH_PHY_CTRL_SPEED_MSB_BIT)
+
+#define ETH_PHY_CTRL_COLISION_TEST_BIT  7
+#define ETH_PHY_CTRL_COLISION_TEST_MASK (1 << ETH_PHY_CTRL_COLISION_TEST_BIT)
+
+#define ETH_PHY_CTRL_DUPLEX_BIT         8
+#define ETH_PHY_CTRL_DUPLEX_MASK        (1 << ETH_PHY_CTRL_DUPLEX_BIT)
+
+#define ETH_PHY_CTRL_AN_RESTART_BIT     9
+#define ETH_PHY_CTRL_AN_RESTART_MASK    (1 << ETH_PHY_CTRL_AN_RESTART_BIT)
+
+#define ETH_PHY_CTRL_ISOLATE_BIT        10
+#define ETH_PHY_CTRL_ISOLATE_MASK       (1 << ETH_PHY_CTRL_ISOLATE_BIT)
+
+#define ETH_PHY_CTRL_POWER_DOWN_BIT     11
+#define ETH_PHY_CTRL_POWER_DOWN_MASK    (1 << ETH_PHY_CTRL_POWER_DOWN_BIT)
+
+#define ETH_PHY_CTRL_AN_ENABLE_BIT      12
+#define ETH_PHY_CTRL_AN_ENABLE_MASK     (1 << ETH_PHY_CTRL_AN_ENABLE_BIT)
+
+#define ETH_PHY_CTRL_SPEED_LSB_BIT	    13
+#define ETH_PHY_CTRL_SPEED_LSB_MASK	    (1 << ETH_PHY_CTRL_SPEED_LSB_BIT)
+
+#define ETH_PHY_CTRL_LOOPBACK_BIT	    14
+#define ETH_PHY_CTRL_LOOPBACK_MASK	    (1 << ETH_PHY_CTRL_LOOPBACK_BIT)
+
+#define ETH_PHY_CTRL_RESET_BIT          15
+#define ETH_PHY_CTRL_RESET_MASK         (1 << ETH_PHY_CTRL_RESET_BIT)
+
+/* ETH_PHY_STATUS_REG bits */
+#define ETH_PHY_STATUS_AN_DONE_BIT      5
+#define ETH_PHY_STATUS_AN_DONE_MASK     (1 << ETH_PHY_STATUS_AN_DONE_BIT)
+
+/* ETH_PHY_AUTONEGO_AD_REG bits */
+#define ETH_PHY_10_100_BASE_ADVERTISE_OFFSET	5
+#define ETH_PHY_10_100_BASE_ADVERTISE_MASK	(0xf << ETH_PHY_10_100_BASE_ADVERTISE_OFFSET)
+
+/* ETH_PHY_1000BASE_T_CTRL_REG bits */
+#define ETH_PHY_1000BASE_ADVERTISE_OFFSET	8
+#define ETH_PHY_1000BASE_ADVERTISE_MASK		(0x3 << ETH_PHY_1000BASE_ADVERTISE_OFFSET)
+
+/* ETH_PHY_SPEC_STATUS_REG bits */
+#define ETH_PHY_SPEC_STATUS_SPEED_OFFS		14
+#define ETH_PHY_SPEC_STATUS_SPEED_MASK		(0x3 << ETH_PHY_SPEC_STATUS_SPEED_OFFS)
+
+#define ETH_PHY_SPEC_STATUS_SPEED_10MBPS	(0x0 << ETH_PHY_SPEC_STATUS_SPEED_OFFS)
+#define ETH_PHY_SPEC_STATUS_SPEED_100MBPS	(0x1 << ETH_PHY_SPEC_STATUS_SPEED_OFFS)
+#define ETH_PHY_SPEC_STATUS_SPEED_1000MBPS	(0x2 << ETH_PHY_SPEC_STATUS_SPEED_OFFS)
+
+
+#define ETH_PHY_SPEC_STATUS_DUPLEX_BIT		13
+#define ETH_PHY_SPEC_STATUS_DUPLEX_MASK		(0x1 << ETH_PHY_SPEC_STATUS_DUPLEX_BIT)
+
+#define ETH_PHY_SPEC_STATUS_LINK_BIT		10
+#define ETH_PHY_SPEC_STATUS_LINK_MASK		(0x1 << ETH_PHY_SPEC_STATUS_LINK_BIT)
+
+/* ETH_PHY_SPEC_STATUS_REG bits */
+#define ETH_PHY_LED_ACT_LNK_DV              0x4109
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* __INCethphyregsh */
diff --git a/drivers/net/ethernet/mvebu_net/phy/phy_sysfs.c b/drivers/net/ethernet/mvebu_net/phy/phy_sysfs.c
new file mode 100644
index 000000000000..722f0ae4b7a4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/phy/phy_sysfs.c
@@ -0,0 +1,172 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
+
+#include "mvTypes.h"
+
+#include "mvEthPhy.h"
+
+static ssize_t phy_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo a       > status        - print phy status <a-phy address>.\n");
+	off += sprintf(buf+off, "echo a r     > read_reg      - read phy <a-phy address> register <r-hex>\n");
+	off += sprintf(buf+off, "echo a r v   > write_reg     - write value <v-hex> to  phy (a-phy address) register <r-hex>\n");
+	off += sprintf(buf+off, "echo a       > restart_an    - restart phy <a-phy address> Auto-Negotiation.\n");
+
+	return off;
+}
+
+static ssize_t phy_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int          err = 0;
+	const char   *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return phy_help(buf);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	return err;
+}
+
+static ssize_t phy_store_hex(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, p = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x", &p);
+
+
+	if (!strcmp(name, "status")) {
+		err = mvEthPhyPrintStatus(p);
+	} else if (!strcmp(name, "restart_an")) {
+		err = mvEthPhyRestartAN(p, 0 /* time out */);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	return err ? -EINVAL : len;
+}
+
+
+static ssize_t phy_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, reg, val;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = reg = val = 0;
+	sscanf(buf, "%x %x %x", &p, &reg, &val);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "read_reg")) {
+		err = mvEthPhyRegPrint(p, reg);
+	} else if (!strcmp(name, "write_reg")) {
+		err = mvEthPhyRegWrite(p, reg, val);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+
+
+static DEVICE_ATTR(status, S_IWUSR, NULL, phy_store_hex);
+static DEVICE_ATTR(restart_an, S_IWUSR, NULL, phy_store_hex);
+static DEVICE_ATTR(read_reg, S_IWUSR, NULL, phy_3_hex_store);
+static DEVICE_ATTR(write_reg, S_IWUSR, NULL, phy_3_hex_store);
+static DEVICE_ATTR(help,   S_IRUSR, phy_show, NULL);
+
+
+static struct attribute *phy_attrs[] = {
+	&dev_attr_status.attr,
+	&dev_attr_read_reg.attr,
+	&dev_attr_write_reg.attr,
+	&dev_attr_restart_an.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group phy_group = {
+	.name = "mv_phy",
+	.attrs = phy_attrs,
+};
+
+int __init phy_sysfs_init(void)
+{
+		int err;
+		struct device *pd;
+
+		pd = &platform_bus;
+
+		err = sysfs_create_group(&pd->kobj, &phy_group);
+		if (err) {
+			printk(KERN_INFO "sysfs group failed %d\n", err);
+			goto out;
+		}
+out:
+		return err;
+}
+
+module_init(phy_sysfs_init);
+
+MODULE_AUTHOR("Uri Eliyahu");
+MODULE_DESCRIPTION("Phy sysfs commands");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/pp2/.gitignore b/drivers/net/ethernet/mvebu_net/pp2/.gitignore
new file mode 100644
index 000000000000..60319acce7d0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/.gitignore
@@ -0,0 +1,96 @@
+
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-avantalp/avanta_lp_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/pp2/Kconfig b/drivers/net/ethernet/mvebu_net/pp2/Kconfig
new file mode 100644
index 000000000000..6fbc50c60cca
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/Kconfig
@@ -0,0 +1,498 @@
+config MV_ETH_PP2_1
+	bool "Support PPv2.1 version (A0)"
+	default n
+	---help---
+	PPv2.1 support (Avanta-LP A0)
+	PPv2.1 enable various features such as:
+	* Qsets support
+	* TX descriptors chunks mechanism
+	* See PPv2.1 MAS for more info
+
+config MV_PP2_HWF
+	bool "Enable PPv2 Harware Forwarding"
+	default y
+	---help---
+	Enable Hardware:
+	Configuration is changable in Run-Time
+
+config MV_ETH_PP2_CLS2
+	bool
+	default y
+	---help---
+
+config MV_ETH_PP2_CLS3
+	bool
+	default y
+	---help---
+
+config MV_ETH_PP2_CLS4
+	bool
+	default y
+	---help---
+
+config MV_ETH_PP2_CLS_MC
+	bool
+	default y
+	---help---
+
+menu "PP2 BM configuration"
+
+choice
+	prompt "PP2 BM pool assignment mode"
+	default MV_PP2_BM_SWF_HWF_MODE
+	---help---
+	Determines how BM pools are divided among GBE ports
+
+config MV_PP2_BM_SWF_HWF_MODE
+	bool "SWF / HWF seperation"
+	---help---
+	BM pools are not shared between SWF and HWF.
+	Short pools are shared between all ports.
+	When BM pool is used only by HWF, then SW can allocate smaller buffers for same packet size
+	Configuration is changable in Run-Time
+
+config MV_PP2_BM_PER_PORT_MODE
+	bool "Per Port"
+	---help---
+	BM pools are shared beteen HWF and SWF.
+	BM pools are not shared between different ports.
+	When BM pool is used only by HWF, then SW can allocate smaller buffers for same packet size
+	Configuration is changable in Run-Time
+
+endchoice
+
+config MV_PP2_BM_LONG_BUF_NUM
+	int "number of buffers for PP2 BM long pool"
+	default 1024
+	---help---
+	The number of long buffers is relevant for all long BM pools
+
+config MV_PP2_BM_SHORT_BUF_NUM
+	int "number of buffers for PP2 BM short pool"
+	default 2048
+	---help---
+	The number of short buffers is relevant for all short BM pools
+
+endmenu
+
+menu "PP2 Rx/Tx Queue configuration"
+
+config MV_PP2_RXQ
+	int "Number of RX queues per port"
+	default 8
+	---help---
+	Multiple RX queue support.
+
+config  MV_PP2_TXQ
+	int "Number of TX queues per port"
+	default 8
+	---help---
+	Multiple TX queue support.
+
+config MV_PP2_RXQ_DESC
+	int "Number of Rx descriptors"
+	depends on MV_ETH_PP2
+	default 256
+	---help---
+	The number of Rx descriptors in each Rx queue.
+
+config MV_PP2_RXQ_DEF
+        int "Default RXQ to recieve packets"
+        default 0
+        ---help---
+
+config MV_PP2_TXQ_DESC
+	int "Number of Tx descriptors"
+	depends on MV_ETH_PP2
+	default 1024
+        ---help---
+	The number of Tx descriptors in each Tx queue.
+
+config MV_PP2_TXQ_DEF
+        int "Default TXQ to send local generated packets"
+        default 0
+        ---help---
+
+config MV_PP2_TXQ_CPU_CHUNK
+	int "Number of TX descriptors allocated per CPU"
+	depends on MV_ETH_PP2_1
+	default 64
+        ---help---
+	The number of TXQ descriptors each CPU will allocate each time when
+	no enough allocated descriptors to transmit the current packet.
+	The total number of TXQ descriptors (MV_PP2_TXQ_DESC) must be at least
+	3 * nr_cpu_ids * MV_PP2_TXQ_CPU_CHUNK
+
+config MV_PP2_TXQ_HWF_DESC
+	int "Number of HWF Tx descriptors"
+	depends on (MV_ETH_PP2 && !MV_ETH_PP2_1)
+	default 16
+        ---help---
+	The number of HWF dedicated Tx descriptors in each Tx queue.
+
+config MV_PP2_AGGR_TXQ_SIZE
+	int "Number of aggregated Tx descriptors"
+	depends on MV_ETH_PP2
+	default 256
+        ---help---
+	The number of Tx descriptors in each aggregated Tx queue.
+
+config MV_PP2_TEMP_TXQ_SIZE
+	int "Number of temporary Txq descriptors (for switching between HWF and SWF)"
+	depends on (MV_ETH_PP2 && MV_PP2_HWF)
+	default 512
+        ---help---
+
+config MV_PP2_TEMP_TXQ_HWF_SIZE
+	int "Number of temporary Txq HWF descriptors (for switching between HWF and SWF)"
+	depends on (MV_ETH_PP2 && MV_PP2_HWF)
+	default 256
+        ---help---
+
+endmenu
+
+menu "PP2 IP/TCP/UDP Offloading"
+
+config  MV_PP2_TSO
+	bool "TSO Support for Marvell network interface"
+	default y
+	---help---
+	Marvell network driver compiled with TSO (TCP Segmentation Offload) support.
+	Configuration is changable in Run-Time
+
+endmenu
+
+menu "PP2 Control and Statistics"
+
+config  MV_PP2_DEBUG_CODE
+	bool "Add run-time debug code"
+	default n
+	---help---
+	Enable debug code blocks in key places of PP2 driver.
+	By default the debug code do nothing but can be activated in run-time
+	by sysfs command under directory: "/sys/devices/platform/neta"
+	"echo [p] [hex]   > debug  - b0:rx, b1:tx, b2:isr, b3:poll, b4:dump, b5:b_hdr"
+
+config  MV_PP2_STAT_ERR
+        bool "Collect error statistics"
+        default y
+	---help---
+	Marvell network interface driver collect minimal number of statistics.
+	Only for error conditions. Can be displayed using mv_eth_tool.
+
+config  MV_PP2_STAT_INF
+        bool "Collect event statistics"
+        default y
+        ---help---
+	Marvell network interface driver collect event statistics.
+	Provide more information about driver functionality and almost doesn't
+	effect performance. Can be displayed using mv_eth_tool.
+
+config  MV_PP2_STAT_DBG
+        bool "Collect debug statistics"
+        default n
+        ---help---
+	Marvell network interface driver collect a lot of statistics.
+	Used for Debug mode. Decrease performance. Can be displayed using mv_eth_tool.
+
+config  MV_PP2_STAT_DIST
+        bool "Collect debug distribution statistics"
+        default n
+        ---help---
+        Marvell network interface driver collect a lot of statistics.
+        Used for Debug mode. Decrease performance. Can be displayed using mv_eth_tool.
+
+endmenu
+
+menu "Advanced Features"
+
+config MV_PP2_SKB_RECYCLE
+	depends on NET_SKB_RECYCLE
+	bool "PP2 Skb recycle"
+	default y
+	---help---
+	Work-in-progress and experimental.
+
+	This option enables skb's to be returned via a callback at kfree to
+	the allocator to make a fastpath for very skb consuming network
+	applications.
+
+config MV_PP2_SKB_RECYCLE_DEF
+	depends on MV_PP2_SKB_RECYCLE
+	int "Default value for SKB recycle:  0 - disable, 1 - enable"
+	default 1
+	---help---
+	Default value for skb recycle capability.
+
+config MV_PP2_TXDONE_PROCESS_METHOD
+	bool "TX_DONE event process method"
+	default y
+	help
+	  It's used for choosing TX_DONE event process method
+	  MV_PP2_TXDONE_ISR means processing TX_DONE event in interrupt mode
+	  MV_PP2_TXDONE_IN_TIMER means using regular timer to process TX_DONE event in polling mode
+	  MV_PP2_TXDONE_IN_HRTIMER means using high-resolution timer to process TX_DONE event in polling mode
+
+choice
+	prompt "TX_DONE event process method"
+	depends on MV_PP2_TXDONE_PROCESS_METHOD
+	default MV_PP2_TXDONE_IN_HRTIMER
+
+	config  MV_PP2_TXDONE_ISR
+		bool "Use interrupt to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process in interrupt mode
+		When unchosen TX_DONE event will be processed in polling mode
+
+	config MV_PP2_TXDONE_IN_TIMER
+		bool "Use regular timer to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process by regular timer in polling mode.
+
+	config MV_PP2_TXDONE_IN_HRTIMER
+		depends on HIGH_RES_TIMERS
+		bool "Use high resolution timer to process TX_DONE event"
+		---help---
+		When chosen TX_DONE event will be process by high resolution timer in polling mode.
+		High resolution timer can support higher precision in ns level.
+		If high resolution timer is enabled, TX processing
+		can free SKB memory much faster.
+
+endchoice
+
+config MV_PP2_TX_DONE_HIGH_RES_TIMER_PERIOD
+	depends on MV_PP2_TXDONE_IN_HRTIMER
+        int "Periodical Tx Done high resolution timer period"
+        default 1000
+        range 10 10000
+        ---help---
+        Periodical high resolution timer period for Tx Done operation in [usec]
+        Its precision is much higher than regular timer whose higest precision is 10 msec
+        Tx done high resolution timer's higest precison is 10 usec
+        Must be larger than or equal to 10 and be smaller than or equal to 10000.
+
+config  MV_PP2_TX_DONE_TIMER_PERIOD
+	depends on MV_PP2_TXDONE_IN_TIMER
+        int "Periodical Tx Done timer period"
+        default 10
+        ---help---
+          Periodical timer period for Tx Done operation in [msec].
+
+
+config MV_PP2_TXDONE_COAL_PKTS
+	int "Threshold for TX_DONE event trigger"
+	default 16
+	---help---
+	Number of packets will be sent before TX_DONE event will be triggered
+	by interrupt or polling.
+
+config MV_PP2_RX_COAL_PKTS
+        int "Threshold [number of packets] for RX interrupt"
+        default 32
+        ---help---
+        Number of packets will be received before RX interrupt will be generated by HW.
+
+config MV_PP2_RX_COAL_USEC
+        int "Threshold [usec] for RX interrupt"
+        default 100
+        ---help---
+        Time delay in usec before RX interrupt will be generated by HW if number of
+	received packets larger than 0 but smaller than MV_ETH_RX_COAL_PKTS
+
+config MV_PP2_RX_DESC_PREFETCH
+	bool "Enable RX descriptor prefetch"
+	default n
+	---help---
+	Default value for RX descriptor prefetch.
+	When enabled PP2 driver uses "pld" instruction to prefetch one RX descriptor ahead.
+	This feature can be enabled/disabled in run-time by sysfs command under directory:
+	"/sys/devices/platform/neta/rx".
+	"echo [p] [m] > prefetch    - set RX prefetch mode for port [p]"
+	where [m]: 0-disable, 1-descriptor, 2-packet header, 3-both
+
+config MV_PP2_RX_PKT_PREFETCH
+	bool "Enable RX packet prefetch"
+	default n
+	---help---
+	Default value for first two cache lines of received packet prefetch.
+	When enabled PP2 driver uses "pld" instruction to prefetch first two cache lines
+	of received packet data.
+	This feature can be enabled/disabled in run-time by sysfs command under directory:
+	"/sys/devices/platform/neta/rx".
+	"echo [p] [m] > prefetch    - set RX prefetch mode for port [p]"
+	where [m]: 0-disable, 1-descriptor, 2-packet header, 3-both
+
+config MV_PP2_RX_SPECIAL
+	bool "Support special RX processing"
+	default n
+	---help---
+	Add source code for special RX processing of packets marked by parser.
+	To enable this feature two additional configurations must be done.
+	1. Parser must be configured to mark packets which need special processing.
+	2. Callback function for special RX processing must be registered (per port).
+	If unsure, say N.
+
+config MV_PP2_TX_SPECIAL
+	bool "Support special TX processing"
+	default n
+	---help---
+	Add source code for special TX processing of packets per egress port.
+	To enable this feature callback function for special TX processing
+	must be registered (per port).
+	If unsure, say N.
+
+config MV_PP2_L2FW
+	bool "L2 Forwarding support"
+	default n
+	---help---
+	Enable L2 Forwarding support for received packets.
+	Three modes are supported: Send packet without change, Swap MAC DA<->SA,
+	Copy the whole packet and swap MAC
+
+config MV_PP2_L2FW_XOR
+        bool "L2 Forwarding XOR support"
+        depends on MV_PP2_L2FW && MV_INCLUDE_XOR
+        default n
+        ---help---
+        Enable using XOR engine to copy ingress packets during L2FW processing.
+	If enabled XOR engine will be used to copy packet when packet size is
+	larger than XOR threshold (default value is 2000 bytes).
+	XOR threshold can be changes using sysfs command.
+
+config MV_PP2_L2SEC
+	bool "L2 Forwarding IPSec support"
+	depends on MV_PP2_L2FW
+	default n
+	---help---
+	Handle encrypted packets with CESA.
+
+config MV_PP2_L2FW_DEBUG
+	depends on (MV_PP2_L2FW && MV_PP2_DEBUG_CODE)
+	bool "Add run-time L2FW debug code"
+	default n
+	---help---
+	Enable L2FW run-time enable/disable enter debug code blocks
+
+config MV_PP2_RX_POLL_WEIGHT
+	int "poll weight for the RX poll() function"
+	default 64
+	range 1 255
+	---help---
+	poll weight for the RX poll() function; must be less or equal to 255
+
+config MV_PP2_EXTRA_BUF_SIZE
+	int "Extra buffer size in bytes"
+	default 120
+	range 120 16384
+	---help---
+	Size of buffers allocated for extra pool and used in special cases like TSO,
+	fragmentattion and others
+
+config MV_PP2_EXTRA_BUF_NUM
+        int "Number of extra buffers allocated for each port"
+        default MV_PP2_TXQ_DESC
+	---help---
+	Number of extra buffers allocated for each port
+endmenu
+
+menu "PON support for Network driver"
+
+config MV_PP2_PON
+	bool "PP2 PON support"
+	depends on MV_ETH_PP2 && MV_INCLUDE_PON
+	---help---
+	Choose this option to support PON port in Marvell network driver.
+
+config MV_PP2_PON_TXP_DEF
+	int "Default T-CONT to send local generated packets"
+	depends on MV_PP2_PON
+	default 0
+	---help---
+	Define default T-CONT to send local generated packets
+
+config MV_PP2_PON_TXQ_DEF
+	int "Default TXQ to send local generated packets"
+	depends on MV_PP2_PON
+	default 0
+	---help---
+	Define default TXQ to send local generated packets
+
+endmenu
+
+menu "PP2 ERRATA / WA"
+
+config MV_PP2_SWF_HWF_CORRUPTION_WA
+        bool "Prevent data corruption in IOCC mode"
+        depends on (AURORA_IO_CACHE_COHERENCY && MV_PP2_HWF)
+        default y
+        ---help---
+	Enable this feature to avoid data corruption in IOCC mode
+	when HWF and SWF traffic use buffers from the same BM pools.
+
+endmenu
+
+menu "SoC CPH support"
+
+config  MV_CPH
+        tristate "Support for Marvell CPU Packet Handler Driver"
+        depends on MV_PP2_TX_SPECIAL
+        default n
+        ---help---
+        CPH is designed mainly for PON product (GPON/EPON),
+        which basically has two features:
+        1. Set special skb->protocol value based on user configure rules.
+        2. Forward SWF packets to special TCONT and GEM port (GPON), LLID (EPON).
+
+comment "CPH Driver Options"
+
+config  MV_CPH_IGMP_HANDLE
+        bool "Enable MV_CPH IGMP handling"
+	depends on MV_CPH
+        default n
+        ---help---
+        Enable CPH to handle IGMP as protocol packet,
+        and setspecial skb->protocol value based on
+        user configure rules. So it could be trapped
+        by RAW SOCKET.
+
+config  MV_CPH_MLD_HANDLE
+        bool "Enable MV_CPH MLD handling"
+	depends on MV_CPH
+        default n
+        ---help---
+        Enable CPH to handle MLD as protocol packet,
+        and set special skb->protocol value based
+        on user configure rules. So it could be
+        trapped by RAW SOCKET.
+
+config  MV_CPH_BC_HANDLE
+        bool "Enable MV_CPH broadcast handling"
+	depends on MV_CPH
+        default n
+        ---help---
+        Enable CPH to handle Broadcast as protocol
+        packet, and set special skb->protocol value
+        based on user configure rules.So it could be
+        trapped by RAW SOCKET
+
+config  MV_CPH_UDP_SAMPLE_HANDLE
+        bool "Enable MV_CPH sample UDP handling"
+	depends on MV_CPH
+        default n
+        ---help---
+        Enable CPH to handle sample UDP as protocol
+        packet, and set special skb->protocol value
+        based on user configure rules. So it could be
+        trapped by RAW SOCKET.
+
+config  MV_CPH_FLOW_MAP_HANDLE
+        bool "Enable MV_CPH flow mapping handling"
+	depends on MV_CPH
+        default n
+        ---help---
+        Enable CPH to handle flow mapping, and forward
+        upstream SWF packets to special TCONT and
+        GEM port (GPON), LLID (EPON) based on user
+        configure rules.
+
+endmenu
diff --git a/drivers/net/ethernet/mvebu_net/pp2/Makefile b/drivers/net/ethernet/mvebu_net/pp2/Makefile
new file mode 100644
index 000000000000..7d98b7876d8e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/Makefile
@@ -0,0 +1,98 @@
+#
+# Makefile for the Marvell Gigabit Ethernet driver
+#
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+PP2_HAL_DIR = hal
+else
+PP2_HAL_DIR = ../../mv_hal/pp2
+endif
+
+PP2_GMAC_DIR  = $(PP2_HAL_DIR)/gmac
+PP2_GBE_DIR   = $(PP2_HAL_DIR)/gbe
+PP2_BM_DIR    = $(PP2_HAL_DIR)/bm
+PP2_PRS_DIR   = $(PP2_HAL_DIR)/prs
+PP2_CLS_DIR   = $(PP2_HAL_DIR)/cls
+PP2_PME_DIR   = $(PP2_HAL_DIR)/pme
+PP2_PLCR_DIR  = $(PP2_HAL_DIR)/plcr
+PP2_WOL_DIR   = $(PP2_HAL_DIR)/wol
+PP2_DPI_DIR   = $(PP2_HAL_DIR)/dpi
+PP2_COMMON_DIR= $(PP2_HAL_DIR)/common
+PP2_CPH_DIR   = $(PP2_HAL_DIR)/cph
+
+mv_pp2-objs +=	$(PP2_GMAC_DIR)/mvEthGmacApi.o
+mv_pp2-objs +=	$(PP2_GBE_DIR)/mvPp2Gbe.o $(PP2_GBE_DIR)/mvPp2GbeDebug.o
+#mv_pp2-objs +=  $(PP2_GBE_DIR)/mvPp2AddrDec.o
+mv_pp2-objs +=	$(PP2_BM_DIR)/mvBm.o
+mv_pp2-objs += 	$(PP2_PRS_DIR)/mvPp2PrsHw.o $(PP2_PRS_DIR)/mvPp2Prs.o
+mv_pp2-objs += 	$(PP2_CLS_DIR)/mvPp2ClsHw.o $(PP2_CLS_DIR)/mvPp2Cls2Hw.o \
+		$(PP2_CLS_DIR)/mvPp2Cls3Hw.o $(PP2_CLS_DIR)/mvPp2Cls4Hw.o \
+		$(PP2_CLS_DIR)/mvPp2ClsMcHw.o $(PP2_CLS_DIR)/mvPp2Classifier.o
+mv_pp2-objs += 	$(PP2_PME_DIR)/mvPp2PmeHw.o
+mv_pp2-objs += 	$(PP2_PLCR_DIR)/mvPp2PlcrHw.o
+mv_pp2-objs +=	$(PP2_BM_DIR)/mvBm.o
+mv_pp2-objs +=  $(PP2_WOL_DIR)/mvPp2Wol.o
+mv_pp2-objs +=  $(PP2_DPI_DIR)/mvPp2DpiHw.o
+mv_pp2-objs +=  $(PP2_COMMON_DIR)/mvPp2Common.o
+
+mv_pp2-objs += net_dev/mv_netdev.o net_dev/mv_ethernet.o net_dev/mv_eth_sysfs.o net_dev/mv_eth_tool.o
+mv_pp2-objs += net_dev/mv_eth_rx_sysfs.o net_dev/mv_eth_tx_sysfs.o net_dev/mv_eth_tx_sched_sysfs.o
+mv_pp2-objs += net_dev/mv_eth_pme_sysfs.o net_dev/mv_eth_pon_sysfs.o
+mv_pp2-objs += net_dev/mv_eth_bm_sysfs.o net_dev/mv_eth_qos_sysfs.o net_dev/mv_eth_dbg_sysfs.o
+
+ifeq ($(CONFIG_MV_PP2_HWF),y)
+mv_pp2-objs += net_dev/mv_eth_hwf_sysfs.o
+endif
+
+mv_pp2-objs += prs/prs_low_sysfs.o prs/prs_high_sysfs.o
+mv_pp2-objs += cls/cls_sysfs.o cls/cls2_sysfs.o cls/cls3_sysfs.o cls/cls4_sysfs.o cls/cls_mc_sysfs.o
+mv_pp2-objs += pme/pme_sysfs.o
+mv_pp2-objs += plcr/plcr_sysfs.o
+mv_pp2-objs += wol/wol_sysfs.o
+mv_pp2-objs += dpi/dpi_sysfs.o
+
+ifeq ($(CONFIG_MV_PP2_L2FW),y)
+mv_pp2-objs += l2fw/l2fw_sysfs.o l2fw/mv_eth_l2fw.o
+endif
+
+ifeq ($(CONFIG_MV_CPH),y)
+mv_pp2-objs += cph/mv_cph_api.o
+mv_pp2-objs += cph/mv_cph_app.o
+mv_pp2-objs += cph/mv_cph_db.o
+mv_pp2-objs += cph/mv_cph_dev.o
+mv_pp2-objs += cph/mv_cph_flow.o
+mv_pp2-objs += cph/mv_cph_infra.o
+mv_pp2-objs += cph/mv_cph_mod.o
+mv_pp2-objs += cph/mv_cph_netdev.o
+mv_pp2-objs += cph/mv_cph_sysfs.o
+endif
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+
+ccflags-y       += $(MVEBU_NET_FLAGS)
+
+ccflags-y	+= $(INCLUDE_DIRS)
+
+ccflags-y       += -I$(PLAT_DIR)/pp2
+ccflags-y       += -I$(PLAT_DIR)/pp2/hal
+
+else
+
+ifneq ($(MACHINE),)
+include $(srctree)/$(MACHINE)/config/mvRules.mk
+endif
+
+ccflags-y       += -I$(PLAT_PATH_I)/$(HAL_PP2_DIR)
+ccflags-y       += -I$(PLAT_PATH_I)/$(HAL_ETHPHY_DIR)
+ccflags-y       += -I$(PLAT_PATH_I)/$(LSP_MUX_DIR)
+endif
+
+ifeq ($(NETMAP),y)
+ccflags-y       += -DCONFIG_NETMAP -I$(NETMAP_DIR) -I$(NETMAP_DIR)/../sys
+endif
+
+ifeq ($(CONFIG_NETMAP),y)
+ccflags-y       += -Inet/netmap
+endif
+
+obj-$(CONFIG_MV_ETH_PP2) += mv_pp2.o
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cls/cls2_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cls/cls2_sysfs.c
new file mode 100644
index 000000000000..92dd4472938e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cls/cls2_sysfs.c
@@ -0,0 +1,348 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "cls/mvPp2Cls2Hw.h"
+
+
+static MV_PP2_CLS_C2_QOS_ENTRY		qos_entry;
+static MV_PP2_CLS_C2_ENTRY		act_entry;
+
+
+
+
+static ssize_t mv_cls_help(char *buf)
+{
+	int off = 0;
+
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  qos_sw_dump  - dump QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  prio_hw_dump - dump all QoS priority tables from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  dscp_hw_dump - dump all QoS dscp tables from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  act_sw_dump  - dump action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  act_hw_dump  - dump all action table enrties from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  hw_regs      - dump classifier C2 registers.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat  cnt_dump     - dump all hit counters that are not zeroed.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1             > qos_sw_clear           - clear QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1             > act_sw_clear           - clear action table SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id s ln       > qos_hw_write           - write QoS table SW entry into HW <id,s,ln>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id s ln       > qos_hw_read            - read QoS table entry from HW <id,s,ln>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo prio          > qos_sw_prio            - set priority <prio> value to QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo dscp          > qos_sw_dscp            - set DSCP <dscp> value to QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo color         > qos_sw_color           - set color value to QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id            > qos_sw_gemid           - set GemPortId <id> value to QoS table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo q             > qos_sw_queue           - set queue number <q> value to QoS table SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx           > act_hw_write           - write action table SW entry into HW <idx>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx           > act_hw_read            - read action table entry from HW <idx> into SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx           > act_hw_inv             - invalidate C2 entry <idx> in hw.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo               > act_hw_inv_all         - invalidate all C2 entries in HW.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo o d m         > act_sw_byte            - set byte <d,m> to TCAM offset <o> to action table SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id sel        > act_sw_qos             - set QoS table <id,sel> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd from      > act_sw_color           - set color command <cmd> to action table SW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              <from> - source for color command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd prio from > act_sw_prio            - set priority command <cmd> and value <prio> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table SW entry. <from> - source for priority command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd prio from > act_sw_dscp            - set DSCP command <cmd> and value <dscp> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table SW entry. <from> - source for DSCP command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd gpid from > act_sw_gpid            - set GemPortID command <cmd> and value <gpid> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table SW entry. <from> - source for GemPortID command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q from    > act_sw_qh              - set queue high command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table software entry. <from>-source for Queue High command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q from    > act_sw_ql              - set queue low command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table software entry. <from> -source for Queue Low command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q from    > act_sw_queue           - set full queue command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              table software entry.  <from> -source for Queue command.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd           > act_sw_hwf             - set Forwarding command <cmd> to action table SW entry.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id bank   > act_sw_pol             - set PolicerID command <cmd> bank and number <id> to action table SW entry.\n");
+#else
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id        > act_sw_pol             - set PolicerID command <cmd> and number <id> to action table SW entry.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "echo en            > act_sw_flowid          - set FlowID enable/disable <1/0> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo d i cs        > act_sw_mdf             - set modification parameters to action table SW entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              data pointer <d>, instruction pointrt <i>,\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                              <cs> enable L4 checksum generation.\n");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx           > act_sw_mtu             - set MTU index to action table SW entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo miss id       > act_sw_sq              - set miss bit and instruction ID to action table SW entry\n");
+#endif
+
+/*TODO ppv2.1: ADD sysfs command for mvPp2ClsC2SeqSet */
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id cnt        > act_sw_dup             - set packet duplication parameters <id,cnt> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo               > cnt_clr_all            - clear all hit counters from action tabe.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx           > cnt_read               - show hit counter for action table entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	return off;
+}
+
+
+static ssize_t mv_cls_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "qos_sw_dump"))
+		off += mvPp2ClsC2QosSwDump(&qos_entry);
+	else if (!strcmp(name, "prio_hw_dump"))
+		off += mvPp2ClsC2QosPrioHwDump();
+	else if (!strcmp(name, "dscp_hw_dump"))
+		off += mvPp2ClsC2QosDscpHwDump();
+	else if (!strcmp(name, "act_sw_dump"))
+		off += mvPp2ClsC2SwDump(&act_entry);
+	else if (!strcmp(name, "act_hw_dump"))
+		off += mvPp2ClsC2HwDump();
+	else if (!strcmp(name, "cnt_dump"))
+		off += mvPp2ClsC2HitCntrsDump();
+	else if (!strcmp(name, "hw_regs"))
+		off += mvPp2ClsC2RegsDump();
+	else
+		off += mv_cls_help(buf);
+
+	return off;
+}
+
+
+static ssize_t mv_cls_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0, e = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x %x", &a, &b, &c, &d, &e);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "act_hw_inv_all"))
+		mvPp2ClsC2HwInvAll();
+	else if (!strcmp(name, "act_hw_inv"))
+		mvPp2ClsC2HwInv(a);
+	else if (!strcmp(name, "qos_sw_clear"))
+		mvPp2ClsC2QosSwClear(&qos_entry);
+	else if (!strcmp(name, "qos_hw_write"))
+		mvPp2ClsC2QosHwWrite(a, b, c, &qos_entry);
+	else if (!strcmp(name, "qos_hw_read"))
+		mvPp2ClsC2QosHwRead(a, b, c, &qos_entry);
+	else if (!strcmp(name, "qos_sw_prio"))
+		mvPp2ClsC2QosPrioSet(&qos_entry, a);
+	else if (!strcmp(name, "qos_sw_dscp"))
+		mvPp2ClsC2QosDscpSet(&qos_entry, a);
+	else if (!strcmp(name, "qos_sw_color"))
+		mvPp2ClsC2QosColorSet(&qos_entry, a);
+	else if (!strcmp(name, "qos_sw_gemid"))
+		mvPp2ClsC2QosGpidSet(&qos_entry, a);
+	else if (!strcmp(name, "qos_sw_queue"))
+		mvPp2ClsC2QosQueueSet(&qos_entry, a);
+	else if (!strcmp(name, "act_sw_clear"))
+		mvPp2ClsC2SwClear(&act_entry);
+	else if (!strcmp(name, "act_hw_write"))
+		mvPp2ClsC2HwWrite(a, &act_entry);
+	else if (!strcmp(name, "act_hw_read"))
+		mvPp2ClsC2HwRead(a, &act_entry);
+	else if (!strcmp(name, "act_sw_byte"))
+		mvPp2ClsC2TcamByteSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_qos"))
+		mvPp2ClsC2QosTblSet(&act_entry, a, b);
+	else if (!strcmp(name, "act_sw_color"))
+		mvPp2ClsC2ColorSet(&act_entry, a, b);
+	else if (!strcmp(name, "act_sw_prio"))
+		mvPp2ClsC2PrioSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_dscp"))
+		mvPp2ClsC2DscpSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_gpid"))
+		mvPp2ClsC2GpidSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_qh"))
+		mvPp2ClsC2QueueHighSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_ql"))
+		mvPp2ClsC2QueueLowSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_queue"))
+		mvPp2ClsC2QueueSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_hwf"))
+		mvPp2ClsC2ForwardSet(&act_entry, a);
+	else if (!strcmp(name, "act_sw_pol"))
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2ClsC2PolicerSet(&act_entry, a, b, c);
+#else
+		mvPp2ClsC2PolicerSet(&act_entry, a, b);
+#endif
+	else if (!strcmp(name, "act_sw_mdf"))
+		mvPp2ClsC2ModSet(&act_entry, a, b, c);
+	else if (!strcmp(name, "act_sw_mtu"))/*PPv2.1 new feature MAS 3.7*/
+		mvPp2ClsC2MtuSet(&act_entry, a);
+	else if (!strcmp(name, "act_sw_dup"))
+		mvPp2ClsC2DupSet(&act_entry, a, b);
+	else if (!strcmp(name, "act_sw_sq"))/*PPv2.1 new feature MAS 3.14*/
+		mvPp2ClsC2SeqSet(&act_entry, a, b);
+	else if (!strcmp(name, "cnt_clr_all"))
+		mvPp2ClsC2HitCntrsClearAll();
+	else if (!strcmp(name, "act_sw_flowid"))
+		mvPp2ClsC2FlowIdEn(&act_entry, a);
+	else if (!strcmp(name, "cnt_read"))
+		mvPp2ClsC2HitCntrRead(a, NULL);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(prio_hw_dump,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(dscp_hw_dump,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(qos_sw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(act_sw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(act_hw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(cnt_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(hw_regs,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(help,			S_IRUSR, mv_cls_show, NULL);
+
+static DEVICE_ATTR(qos_sw_clear,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_hw_write,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_hw_read,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_sw_prio,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_sw_dscp,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_sw_color,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_sw_gemid,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(qos_sw_queue,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_hw_inv,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_hw_inv_all,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_clear,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_hw_write,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_hw_read,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_byte,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_color,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_prio,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_dscp,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_gpid,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_qh,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_ql,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_queue,		S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_hwf,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_pol,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_mdf,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_mtu,			S_IWUSR, mv_cls_show, mv_cls_store);/*PPv2.1 new feature MAS 3.7*/
+static DEVICE_ATTR(act_sw_dup,			S_IWUSR, mv_cls_show, mv_cls_store);/*PPv2.1 new feature MAS 3.14*/
+static DEVICE_ATTR(act_sw_sq,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(cnt_clr_all,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_qos,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(cnt_read,			S_IWUSR, mv_cls_show, mv_cls_store);
+static DEVICE_ATTR(act_sw_flowid,		S_IWUSR, mv_cls_show, mv_cls_store);
+
+
+static struct attribute *cls2_attrs[] = {
+	&dev_attr_prio_hw_dump.attr,
+	&dev_attr_dscp_hw_dump.attr,
+	&dev_attr_qos_sw_dump.attr,
+	&dev_attr_act_sw_dump.attr,
+	&dev_attr_act_hw_dump.attr,
+	&dev_attr_cnt_dump.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_help.attr,
+	&dev_attr_qos_sw_clear.attr,
+	&dev_attr_qos_hw_write.attr,
+	&dev_attr_qos_hw_read.attr,
+	&dev_attr_qos_sw_prio.attr,
+	&dev_attr_qos_sw_dscp.attr,
+	&dev_attr_qos_sw_color.attr,
+	&dev_attr_qos_sw_gemid.attr,
+	&dev_attr_qos_sw_queue.attr,
+	&dev_attr_act_hw_inv.attr,
+	&dev_attr_act_hw_inv_all.attr,
+	&dev_attr_act_sw_clear.attr,
+	&dev_attr_act_hw_write.attr,
+	&dev_attr_act_hw_read.attr,
+	&dev_attr_act_sw_byte.attr,
+	&dev_attr_act_sw_color.attr,
+	&dev_attr_act_sw_prio.attr,
+	&dev_attr_act_sw_dscp.attr,
+	&dev_attr_act_sw_gpid.attr,
+	&dev_attr_act_sw_qh.attr,
+	&dev_attr_act_sw_ql.attr,
+	&dev_attr_act_sw_queue.attr,
+	&dev_attr_act_sw_hwf.attr,
+	&dev_attr_act_sw_pol.attr,
+	&dev_attr_act_sw_mdf.attr,
+	&dev_attr_act_sw_mtu.attr,/*PPv2.1 new feature MAS 3.7*/
+	&dev_attr_act_sw_dup.attr,
+	&dev_attr_act_sw_sq.attr,/*PPv2.1 new feature MAS 3.14*/
+	&dev_attr_cnt_clr_all.attr,
+	&dev_attr_act_sw_qos.attr,
+	&dev_attr_cnt_read.attr,
+	&dev_attr_act_sw_flowid.attr,
+	NULL
+};
+
+static struct attribute_group cls2_group = {
+	.name = "cls2",
+	.attrs = cls2_attrs,
+};
+
+int mv_pp2_cls2_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &cls2_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", cls2_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_cls2_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &cls2_group);
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cls/cls3_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cls/cls3_sysfs.c
new file mode 100644
index 000000000000..02294c6b777c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cls/cls3_sysfs.c
@@ -0,0 +1,401 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This SW file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+SW Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "cls/mvPp2Cls3Hw.h"
+
+static MV_PP2_CLS_C3_ENTRY		c3;
+
+
+static ssize_t mv_cls3_help(char *buf)
+{
+	int off = 0;
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             hw_dump        - Dump all occupied entries from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             hw_ext_dump    - Dump all occupied extension table entries from HW.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             hw_ms_dump     - Dump all miss table entires from HW.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             sw_dump        - Dump SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             sc_res_dump    - Dump all valid scan results from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             sc_regs        - Dump scan registers.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             hw_query       - Get query for HEK in the SW entry and show result.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             cnt_read_all   - Dump all hit counters for all changed indices and miss entries\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo lkp_type   > hw_ms_add    - Write entry from SW into HW miss table <lkp_type>\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "echo depth      > hw_query_add - Get query for HEK in the SW entry and Write entry into HW hash entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                 free entry search depth <depth>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx        > hw_read      - Read entry from HW <idx> into SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx extIdx > hw_add       - Write entry from SW into HW hash table <idx>\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                 external table entry index <extIdx> optionally used for long entries.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx        > hw_del       - Delete entry from HW <idx>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1          > hw_del_all   - Delete all c3 entries from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1          > sw_clear     - Clear SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo val        > sw_init_cnt  - Set initial hit counter value <val> (in units of 64 hits) to SW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo info       > key_sw_l4    - Set L4 information <info> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo type       > key_sw_lkp_type - Set key lookup type to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id type    > key_sw_port  - Set key port ID <id> and port ID type to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo size       > key_sw_size  - Set key HEK size port ID <id> and port ID type to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo o d        > key_sw_byte  - Set byte of HEK data <d> and offset <o> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo o d        > key_sw_word  - Set byte of HEK data <d> and offset <o> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd         > act_sw_color - Set color command <cmd> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd qh      > act_sw_qh    - Set Queue High command <cmd> and value <qh> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd ql      > act_sw_ql    - Set Queue Low command <cmd> and value <ql> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q       > act_sw_queue - Set full Queue command <cmd> and value <q> to action table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd         > act_sw_fwd   - Set Forwarding command <cmd> to action table SW entry.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id bnk  > act_sw_pol   - Set PolicerID command <cmd> bank <bnk> and number <id> to action table SW entry.\n");
+#else
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id      > act_sw_pol   - Set PolicerID command <cmd> and number <id> to action table SW entry.\n");
+#endif
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo en          > act_sw_flowid- Set FlowID enable/disable <1/0> to action table SW entry.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx         > act_sw_mtu   - Set MTU index to action table SW entry\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "echo d i cs      > act_sw_mdf   - Set modification parameters to action table SW entry data pointer <d>\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                  instruction offset <i>, <cs> enable L4 checksum generation\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id cnt      > act_sw_dup   - Set packet duplication parameters <id, cnt> to action SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo id off bits > act_sw_sq    - Write sequence id <id> to offset <off> (in bits), id bits size <bits>\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                  to action SW entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx         > cnt_read     - Show hit counter for action table entry <idx>.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo lkp_type    > cnt_ms_read  - Show hit counter for action table miss entry <lkp_type>.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1           > cnt_clr_all  - Clear hit counters for all action table entries.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo t           > cnt_clr_lkp  - Clear hit counters for all action table entries with lookup type <t>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1           > sc_start     - Start new multi-hash scanning.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo m t         > sc_thresh    - Set scan threshold <t> and mode to above <m=1> or below <m=0> thresh.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo e           > sc_clear_before - clear hit counter before scan enable <e=1> or disable<e=0> in HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo t           > sc_lkp       - Set lookup type <t> for scan operation, <t=-1> all entries are scanned\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo idx         > sc_start_idx - Set scan start entry <idx> in HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo time        > sc_delay     - Set scan delay <time> in HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo  idx        > sc_res_read  - Show result entry <idx> form scan result table in HW.\n");
+
+	return off;
+}
+
+
+static ssize_t mv_cls3_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	if (!strcmp(name, "hw_dump"))
+		mvPp2ClsC3HwDump();
+	else if (!strcmp(name, "hw_ms_dump"))
+		mvPp2ClsC3HwMissDump();
+	else if (!strcmp(name, "hw_ext_dump"))
+		mvPp2ClsC3HwExtDump();
+	else if (!strcmp(name, "sw_dump"))
+		mvPp2ClsC3SwDump(&c3);
+	else if (!strcmp(name, "sc_res_dump"))
+		mvPp2ClsC3ScanResDump();
+	else if (!strcmp(name, "sc_regs"))
+		mvPp2ClsC3ScanRegs();
+	else if (!strcmp(name, "hw_query"))
+		mvPp2ClsC3HwQuery(&c3, NULL, NULL);
+	else if (!strcmp(name, "cnt_read_all"))
+		mvPp2ClsC3HitCntrsReadAll();
+	else
+		off += mv_cls3_help(buf);
+
+	return off;
+}
+
+
+
+static ssize_t mv_cls3_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x", &a, &b, &c, &d);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_read"))
+		mvPp2ClsC3HwRead(&c3, a);
+	else if (!strcmp(name, "hw_query_add"))
+		mvPp2ClsC3HwQueryAdd(&c3, a, NULL);
+	else if (!strcmp(name, "hw_add"))
+		mvPp2ClsC3HwAdd(&c3, a, b);
+	else if (!strcmp(name, "hw_ms_add"))/*PPv2.1 new feature MAS 3.12*/
+		mvPp2ClsC3HwMissAdd(&c3, a);
+	else if (!strcmp(name, "hw_del"))
+		mvPp2ClsC3HwDel(a);
+	else if (!strcmp(name, "hw_del_all"))
+		mvPp2ClsC3HwDelAll();
+	else if (!strcmp(name, "sw_clear"))
+		mvPp2ClsC3SwClear(&c3);
+	else if (!strcmp(name, "sw_init_cnt"))
+		mvPp2ClsC3HwInitCtrSet(a);
+	else if (!strcmp(name, "key_sw_l4"))
+		 mvPp2ClsC3SwL4infoSet(&c3, a);
+	else if (!strcmp(name, "key_sw_lkp_type"))
+		mvPp2ClsC3SwLkpTypeSet(&c3, a);
+	else if (!strcmp(name, "key_sw_port"))
+		mvPp2ClsC3SwPortIDSet(&c3, b, a);
+	else if (!strcmp(name, "key_sw_size"))
+		mvPp2ClsC3SwHekSizeSet(&c3, a);
+	else if (!strcmp(name, "key_sw_byte"))
+		mvPp2ClsC3SwHekByteSet(&c3, a, b);
+	else if (!strcmp(name, "key_sw_word"))
+		mvPp2ClsC3SwHekWordSet(&c3, a, b);
+	else if (!strcmp(name, "act_sw_color"))
+		mvPp2ClsC3ColorSet(&c3, a);
+	else if (!strcmp(name, "act_sw_qh"))
+		mvPp2ClsC3QueueHighSet(&c3, a, b);
+	else if (!strcmp(name, "act_sw_ql"))
+		mvPp2ClsC3QueueLowSet(&c3, a, b);
+	else if (!strcmp(name, "act_sw_queue"))
+		mvPp2ClsC3QueueSet(&c3, a, b);
+	else if (!strcmp(name, "act_sw_fwd"))
+		mvPp2ClsC3ForwardSet(&c3, a);
+	else if (!strcmp(name, "act_sw_pol"))
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2ClsC3PolicerSet(&c3, a, b, c);
+#else
+		mvPp2ClsC3PolicerSet(&c3, a, b);
+#endif
+	else if (!strcmp(name, "act_sw_flowid"))
+		mvPp2ClsC3FlowIdEn(&c3, a);
+	else if (!strcmp(name, "act_sw_mdf"))
+		mvPp2ClsC3ModSet(&c3, a, b, c);
+	else if (!strcmp(name, "act_sw_mtu"))/*PPv2.1 new feature MAS 3.7*/
+		mvPp2ClsC3MtuSet(&c3, a);
+	else if (!strcmp(name, "act_sw_dup"))
+		mvPp2ClsC3DupSet(&c3, a, b);
+	else if (!strcmp(name, "act_sw_sq"))/*PPv2.1 new feature MAS 3.4*/
+		mvPp2ClsC3SeqSet(&c3, a, b, c);
+	else if (!strcmp(name, "cnt_read"))
+		mvPp2ClsC3HitCntrsRead(a, NULL);
+	else if (!strcmp(name, "cnt_ms_read"))
+		mvPp2ClsC3HitCntrsMissRead(a, NULL);
+	else if (!strcmp(name, "cnt_clr_all"))
+		mvPp2ClsC3HitCntrsClearAll();
+	else if (!strcmp(name, "cnt_clr_lkp"))
+		mvPp2ClsC3HitCntrsClear(a);
+	else if (!strcmp(name, "sc_start"))
+		mvPp2ClsC3ScanStart();
+	else if (!strcmp(name, "sc_thresh"))
+		mvPp2ClsC3ScanThreshSet(a, b);
+	else if (!strcmp(name, "sc_clear_before"))
+		mvPp2ClsC3ScanClearBeforeEnSet(a);
+	else if (!strcmp(name, "sc_start_idx"))
+		mvPp2ClsC3ScanStartIndexSet(a);
+	else if (!strcmp(name, "sc_delay"))
+		mvPp2ClsC3ScanDelaySet(a);
+	else if (!strcmp(name, "sc_res_read"))
+		mvPp2ClsC3ScanResRead(a, NULL, NULL);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_cls3_signed_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0;
+	unsigned long flags;
+	int           a = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d", &a);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "sc_lkp"))
+		mvPp2ClsC3ScanLkpTypeSet(a);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(hw_dump,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(hw_ms_dump,		S_IRUSR, mv_cls3_show, NULL);/*PPv2.1 new feature MAS 3.7*/
+static DEVICE_ATTR(hw_ext_dump,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(sw_dump,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(sc_res_dump,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(sc_regs,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(hw_query,		S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(cnt_read_all,	S_IRUSR, mv_cls3_show, NULL);
+static DEVICE_ATTR(help,		S_IRUSR, mv_cls3_show, NULL);
+
+static DEVICE_ATTR(hw_query_add,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(hw_read,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(hw_add,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(hw_ms_add,		S_IWUSR, NULL, mv_cls3_store);/*PPv2.1 new feature MAS 3.12*/
+static DEVICE_ATTR(hw_del,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(hw_del_all,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sw_clear,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sw_init_cnt,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_l4,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_lkp_type,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_port,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_size,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_byte,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(key_sw_word,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_color,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_qh,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_ql,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_queue,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_fwd,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_pol,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_mdf,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_flowid,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_mtu,		S_IWUSR, NULL, mv_cls3_store);/*PPv2.1 new feature MAS 3.7*/
+static DEVICE_ATTR(act_sw_dup,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(act_sw_sq,		S_IWUSR, NULL, mv_cls3_store);/*PPv2.1 new feature MAS 3.14*/
+static DEVICE_ATTR(cnt_read,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(cnt_ms_read,		S_IWUSR, NULL, mv_cls3_store);/*PPv2.1 new feature MAS 3.12*/
+static DEVICE_ATTR(cnt_clr_all,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(cnt_clr_lkp,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_start,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_thresh,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_clear_before,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_lkp,		S_IWUSR, NULL, mv_cls3_signed_store);
+static DEVICE_ATTR(sc_start_idx,	S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_delay,		S_IWUSR, NULL, mv_cls3_store);
+static DEVICE_ATTR(sc_res_read,		S_IWUSR, NULL, mv_cls3_store);
+
+
+
+static struct attribute *cls3_attrs[] = {
+	&dev_attr_hw_dump.attr,
+	&dev_attr_hw_ms_dump.attr,
+	&dev_attr_hw_ext_dump.attr,
+	&dev_attr_sw_dump.attr,
+	&dev_attr_sc_res_dump.attr,
+	&dev_attr_sc_regs.attr,
+	&dev_attr_hw_query.attr,
+	&dev_attr_cnt_read_all.attr,
+	&dev_attr_help.attr,
+	&dev_attr_hw_query_add.attr,
+	&dev_attr_hw_read.attr,
+	&dev_attr_hw_add.attr,
+	&dev_attr_hw_ms_add.attr,
+	&dev_attr_hw_del.attr,
+	&dev_attr_hw_del_all.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_sw_init_cnt.attr,
+	&dev_attr_key_sw_l4.attr,
+	&dev_attr_key_sw_lkp_type.attr,
+	&dev_attr_key_sw_port.attr,
+	&dev_attr_key_sw_size.attr,
+	&dev_attr_key_sw_byte.attr,
+	&dev_attr_key_sw_word.attr,
+	&dev_attr_act_sw_color.attr,
+	&dev_attr_act_sw_qh.attr,
+	&dev_attr_act_sw_ql.attr,
+	&dev_attr_act_sw_queue.attr,
+	&dev_attr_act_sw_fwd.attr,
+	&dev_attr_act_sw_pol.attr,
+	&dev_attr_act_sw_mdf.attr,
+	&dev_attr_act_sw_mtu.attr,
+	&dev_attr_act_sw_dup.attr,
+	&dev_attr_act_sw_sq.attr,
+	&dev_attr_cnt_read.attr,
+	&dev_attr_cnt_ms_read.attr,
+	&dev_attr_cnt_clr_all.attr,
+	&dev_attr_cnt_clr_lkp.attr,
+	&dev_attr_act_sw_flowid.attr,
+	&dev_attr_sc_start.attr,
+	&dev_attr_sc_thresh.attr,
+	&dev_attr_sc_clear_before.attr,
+	&dev_attr_sc_lkp.attr,
+	&dev_attr_sc_start_idx.attr,
+	&dev_attr_sc_delay.attr,
+	&dev_attr_sc_res_read.attr,
+	NULL
+};
+
+static struct attribute_group cls3_group = {
+	.name = "cls3",
+	.attrs = cls3_attrs,
+};
+
+int mv_pp2_cls3_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &cls3_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", cls3_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_cls3_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &cls3_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cls/cls4_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cls/cls4_sysfs.c
new file mode 100644
index 000000000000..6dfdabc945c1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cls/cls4_sysfs.c
@@ -0,0 +1,286 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "cls/mvPp2Cls4Hw.h"
+
+
+static MV_PP2_CLS_C4_ENTRY		C4;
+
+
+
+static ssize_t mv_cls_help(char *buf)
+{
+	int off = 0;
+	off += scnprintf(buf + off, PAGE_SIZE, "cat               sw_dump               - Dump software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat               hw_regs               - Dump hardware registers.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat               hw_dump               - Dump all hardware entries.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "cat               hw_hits               - Dump non zeroed hit counters and the associated HW entries\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo p s r      > hw_port_rules         - Set physical port number <p> for rules set <s>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          <rules> - number of rules.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo p s r      > hw_uni_rules          - Set uni port number <p> for rules set <s>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          <rules> - number of rules.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo s r        > hw_write              - Write software entry into hardware <set=s,rule=r>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo s r        > hw_read               - Read entry <set=s,rule=r> from hardware into software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1          > sw_clear              - Clear software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1          > hw_clear_all          - Clear all C4 rules in hardware.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo f o d      > rule_two_b            - Set two bytes of data <d> in field <f> with offset <o> to\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo f id op    > rule_params           - Set ID <id> and OpCode <op> to filed <f> in software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo mode       > rule_sw_pppoe         - Set PPPOE mode to software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo mode       > rule_sw_vlan          - Set VLAN mode to software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo mode       > rule_sw_mac           - Set mac to me mode to software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo mode       > rule_sw_l4            - Set L4 info mode to software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo mode       > rule_sw_l3            - Set L3 info mode to software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd        > act_sw_color          - Set Color command <cmd> to action table software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd prio   > act_sw_prio           - Set priority command <cmd> and value <prio> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd dscp   > act_sw_dscp           - Set DSCP command <cmd> and value <dscp> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd gpid   > act_sw_gpid           - Set GemPortID command <cmd> and value <gpid> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q      > act_sw_qh             - Set queue high command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q      > act_sw_ql             - Set queue low command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd        > act_sw_fwd            - Set Forwarding command <cmd> to action table software entry\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd q      > act_sw_queue          - Set full queue command <cmd> and value <q> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id bnk > act_sw_pol            - Set PolicerId command <cmd> bank <bnk> and numver <id> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+#else
+	off += scnprintf(buf + off, PAGE_SIZE, "echo cmd id     > act_sw_pol            - Set PolicerId command <cmd> and numver <id> to action\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "                                          table software entry.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+
+	return off;
+}
+
+
+static ssize_t mv_cls_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "sw_dump"))
+		mvPp2ClsC4SwDump(&C4);
+	else if (!strcmp(name, "hw_regs"))
+		mvPp2ClsC4RegsDump();
+	else if (!strcmp(name, "hw_dump"))
+		mvPp2ClsC4HwDumpAll();
+	else if (!strcmp(name, "hw_hits"))
+		mvPp2V1ClsC4HwHitsDump();
+
+	else
+		off += mv_cls_help(buf);
+
+	return off;
+}
+
+
+static ssize_t mv_cls_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0, e = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x %x", &a, &b, &c, &d, &e);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_port_rules"))
+		mvPp2ClsC4HwPortToRulesSet(a, b, c);
+	else if (!strcmp(name, "hw_uni_rules"))
+		mvPp2ClsC4HwUniToRulesSet(a, b, c);
+	else if (!strcmp(name, "hw_read"))
+		mvPp2ClsC4HwRead(&C4, b, a);
+	else if (!strcmp(name, "hw_write"))
+		mvPp2ClsC4HwWrite(&C4, b, a);
+	else if (!strcmp(name, "sw_clear"))
+		mvPp2ClsC4SwClear(&C4);
+	else if (!strcmp(name, "hw_clear_all"))
+		mvPp2ClsC4HwClearAll();
+	else if (!strcmp(name, "rule_two_b"))
+		mvPp2ClsC4FieldsShortSet(&C4, a, b, (unsigned short) c);
+	else if (!strcmp(name, "rule_params"))
+		mvPp2ClsC4FieldsParamsSet(&C4, a, b, c);
+	else if (!strcmp(name, "rule_sw_vlan"))
+		mvPp2ClsC4SwVlanSet(&C4, a);
+	else if (!strcmp(name, "rule_sw_pppoe"))
+		mvPp2ClsC4SwPppoeSet(&C4, a);
+	else if (!strcmp(name, "rule_sw_mac"))
+		mvPp2ClsC4SwMacMeSet(&C4, a);
+	else if (!strcmp(name, "rule_sw_l4"))
+		mvPp2ClsC4SwL4InfoSet(&C4, a);
+	else if (!strcmp(name, "rule_sw_l3"))
+		mvPp2ClsC4SwL3InfoSet(&C4, a);
+	else if (!strcmp(name, "act_sw_color"))
+		mvPp2ClsC4ColorSet(&C4, a);
+	else if (!strcmp(name, "act_sw_prio"))
+		mvPp2ClsC4PrioSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_dscp"))
+		mvPp2ClsC4DscpSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_gpid"))
+		mvPp2ClsC4GpidSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_qh"))
+		mvPp2ClsC4QueueHighSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_ql"))
+		mvPp2ClsC4QueueLowSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_fwd"))
+		mvPp2ClsC4ForwardSet(&C4, a);
+	else if (!strcmp(name, "act_sw_queue"))
+		mvPp2ClsC4QueueSet(&C4, a, b);
+	else if (!strcmp(name, "act_sw_pol"))
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2ClsC4PolicerSet(&C4, a, b, c);
+#else
+		mvPp2ClsC4PolicerSet(&C4, a, b);
+#endif
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(hw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(sw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(hw_regs,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(hw_hits,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(help,			S_IRUSR, mv_cls_show, NULL);
+
+static DEVICE_ATTR(hw_port_rules,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(hw_uni_rules,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(hw_read,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(hw_write,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(sw_clear,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(hw_clear_all,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_two_b,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_params,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_sw_vlan,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_sw_pppoe,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_sw_mac,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_sw_l4,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(rule_sw_l3,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_color,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_prio,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_dscp,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_gpid,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_qh,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_ql,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_fwd,			S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_queue,		S_IWUSR, NULL, mv_cls_store);
+static DEVICE_ATTR(act_sw_pol,			S_IWUSR, NULL, mv_cls_store);
+
+
+
+
+static struct attribute *cls4_attrs[] = {
+	&dev_attr_sw_dump.attr,
+	&dev_attr_hw_dump.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_hw_hits.attr,
+	&dev_attr_help.attr,
+	&dev_attr_hw_port_rules.attr,
+	&dev_attr_hw_uni_rules.attr,
+	&dev_attr_hw_read.attr,
+	&dev_attr_hw_write.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_hw_clear_all.attr,
+	&dev_attr_rule_two_b.attr,
+	&dev_attr_rule_params.attr,
+	&dev_attr_rule_sw_vlan.attr,
+	&dev_attr_rule_sw_pppoe.attr,
+	&dev_attr_rule_sw_mac.attr,
+	&dev_attr_rule_sw_l4.attr,
+	&dev_attr_rule_sw_l3.attr,
+	&dev_attr_act_sw_color.attr,
+	&dev_attr_act_sw_prio.attr,
+	&dev_attr_act_sw_dscp.attr,
+	&dev_attr_act_sw_gpid.attr,
+	&dev_attr_act_sw_qh.attr,
+	&dev_attr_act_sw_ql.attr,
+	&dev_attr_act_sw_fwd.attr,/*ppv2.1 new feature MAS 3.9*/
+	&dev_attr_act_sw_queue.attr,
+	&dev_attr_act_sw_pol.attr,
+	NULL
+};
+
+static struct attribute_group cls4_group = {
+	.name = "cls4",
+	.attrs = cls4_attrs,
+};
+
+int mv_pp2_cls4_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &cls4_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", cls4_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_cls4_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &cls4_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cls/cls_mc_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cls/cls_mc_sysfs.c
new file mode 100644
index 000000000000..dac03343a5a6
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cls/cls_mc_sysfs.c
@@ -0,0 +1,191 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "cls/mvPp2ClsMcHw.h"
+
+
+static MV_PP2_MC_ENTRY		mc;
+
+static ssize_t mv_mc_help(char *buf)
+{
+	int off = 0;
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             sw_dump      - Dump software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "cat             hw_dump      - Dump all hardware entries.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo i        > hw_write     - Write software entry into hardware <i>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo i        > hw_read      - Read entry <i> from hardware into software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1        > sw_clear     - Clear software entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo 1        > hw_clear_all - Clear all multicast table entries in hardware.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "echo prio en  > mc_sw_prio   - Set priority enable <en> and value <prio> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo dscp en  > mc_sw_dscp   - Set DSCP enable <en> and value <dscp> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo gpid en  > mc_sw_gpid   - Set GemPortID enable <en> and value <gpid> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo d i      > mc_sw_modif  - Set modification data <d> and command <i> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo q        > mc_sw_queue  - Set Queue <q> value to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo en       > mc_sw_hwf    - Set HWF enabled <en=1> or disable <en=0> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE, "echo next     > mc_sw_next   - Set next pointer <next> to sw entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE, "\n");
+
+	return off;
+}
+
+static ssize_t mv_mc_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "sw_dump"))
+		mvPp2McSwDump(&mc);
+	else if (!strcmp(name, "hw_dump"))
+		mvPp2McHwDump();
+	else
+		off += mv_mc_help(buf);
+
+	return off;
+}
+
+
+static ssize_t mv_mc_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0, e = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x %x", &a, &b, &c, &d, &e);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_read"))
+		mvPp2McHwRead(&mc, a);
+	else if (!strcmp(name, "hw_write"))
+		mvPp2McHwWrite(&mc, a);
+	else if (!strcmp(name, "sw_clear"))
+		mvPp2McSwClear(&mc);
+	else if (!strcmp(name, "hw_clear_all"))
+		mvPp2McHwClearAll();
+	else if (!strcmp(name, "mc_sw_prio"))
+		mvPp2McSwPrioSet(&mc, a, b);
+	else if (!strcmp(name, "mc_sw_dscp"))
+		mvPp2McSwDscpSet(&mc, a, b);
+	else if (!strcmp(name, "mc_sw_gpid"))
+		mvPp2McSwGpidSet(&mc, a, b);
+	else if (!strcmp(name, "mc_sw_modif"))
+		mvPp2McSwModSet(&mc, a, b);
+	else if (!strcmp(name, "mc_sw_queue"))
+		mvPp2McSwQueueSet(&mc, a);
+	else if (!strcmp(name, "mc_sw_hwf"))
+		mvPp2McSwForwardEn(&mc, a);
+	else if (!strcmp(name, "mc_sw_next"))
+		mvPp2McSwNext(&mc, a);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(hw_dump,			S_IRUSR, mv_mc_show, NULL);
+static DEVICE_ATTR(sw_dump,			S_IRUSR, mv_mc_show, NULL);
+static DEVICE_ATTR(help,			S_IRUSR, mv_mc_show, NULL);
+
+static DEVICE_ATTR(hw_read,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(hw_write,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(sw_clear,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(hw_clear_all,		S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_prio,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_dscp,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_gpid,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_modif,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_queue,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_hwf,			S_IWUSR, NULL, mv_mc_store);
+static DEVICE_ATTR(mc_sw_next,			S_IWUSR, NULL, mv_mc_store);
+
+
+static struct attribute *mc_attrs[] = {
+	&dev_attr_sw_dump.attr,
+	&dev_attr_hw_dump.attr,
+	&dev_attr_help.attr,
+	&dev_attr_hw_read.attr,
+	&dev_attr_hw_write.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_hw_clear_all.attr,
+	&dev_attr_mc_sw_prio.attr,
+	&dev_attr_mc_sw_dscp.attr,
+	&dev_attr_mc_sw_gpid.attr,
+	&dev_attr_mc_sw_modif.attr,
+	&dev_attr_mc_sw_queue.attr,
+	&dev_attr_mc_sw_hwf.attr,
+	&dev_attr_mc_sw_next.attr,
+	NULL
+};
+
+static struct attribute_group mc_group = {
+	.name = "mc",
+	.attrs = mc_attrs,
+};
+
+int mv_pp2_mc_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &mc_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mc_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_mc_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mc_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cls/cls_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cls/cls_sysfs.c
new file mode 100644
index 000000000000..16fea847f1cb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cls/cls_sysfs.c
@@ -0,0 +1,404 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "cls/mvPp2ClsHw.h"
+
+static MV_PP2_CLS_LKP_ENTRY	lkp_entry;
+static MV_PP2_CLS_FLOW_ENTRY	flow_entry;
+
+
+static ssize_t mv_cls_help(char *buf)
+{
+	int off = 0;
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             lkp_sw_dump          - dump lookup ID table sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             flow_sw_dump         - dump flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             lkp_hw_dump          - dump lookup ID tabel from hardware.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             flow_hw_hits         - dump non zeroed hit counters  and the associated flow tabel entries from hardware.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             lkp_hw_hits          - dump non zeroed hit counters and the associated lookup ID entires from hardware.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             flow_hw_dump         - dump flow table from hardware.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             len_change_hw_dump   - lkp dump sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "cat             hw_regs              - dump classifier top registers.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE,  "\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo 1          >lkp_sw_clear        - clear lookup ID table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo 1          >flow_sw_clear       - clear flow table SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE,  "\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo en         >hw_enable           - classifier enable/disable <en = 1/0>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p w        >hw_port_way         - set lookup way <w> for physical port <p>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p mode     >hw_port_spid        - set SPID extraction mode <mode> for physical port <p>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo uni spid   >hw_uni_spid         - set port <uni> for spid <spid>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo virt gpid  >hw_virt_gpid        - set virtual port number <virt> for GemPortId <gpid>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo a b c d    >hw_udf              - set UDF field <a> as: base <b>, offset <c> bits, size<d> bits.\n");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p q        >hw_over_rxq_low     - set oversize rx low queue <q> for ingress port <p>.\n");
+#else
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p q        >hw_over_rxq         - set oversize rxq <q> for ingress port <p>.\n");
+#endif
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p from q   >hw_qh               - set rx high queue source <from> and queue <q> for ingress port <p>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo idx m      >hw_mtu              - set MTU value <m> for index <idx>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p v u mh   >hw_mh               - set port <p> enable/disable port Id generation for.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "                                       virtual <v=0,1> and uni <u=0,1> ports, set default Marvell header <mh>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo idx size   >hw_sq_size          - set sequence id number <idx> size to flow table software entry\n");
+#else
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo p txp m    >hw_mtu              - set MTU value <m> for egress port <p, txp>.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE,  "\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo idx way    >lkp_hw_write        - write lookup ID table SW entry HW <idx,way>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo idx way    >lkp_hw_read         - read lookup ID table entry from HW <idx,way>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo rxq        >lkp_sw_rxq          - set default RXQ <rxq> to lookup ID table.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo f          >lkp_sw_flow         - set index of firs insruction <f> in flow table\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "                                       to lookup ID SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo m          >lkp_sw_mod          - set modification instruction offset <m> to lookup ID SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo e          >lkp_sw_en           - Enable <e=1> or disable <e=0> lookup ID table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo id         >flow_hw_write       - write flow table SW entry to HW <id>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo id         >flow_hw_read        - read flow table entry <id> from HW.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo t id       >flow_sw_port        - set port type <t> and id <p> to flow table SW entry\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	/*PPv2.1 new feature MAS 3.18*/
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo from       >flow_sw_portid      - set cls to recive portid via packet <from=1>\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "                                       or via user configurration <from=0>  to flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo mode       >flow_sw_pppoe       - Set PPPoE lookup skip mode <mode> to flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo mode       >flow_sw_vlan        - Set VLAN lookup skip mode <mode> to flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo mode       >flow_sw_macme       - Set MAC ME lookup skip mode <mode> to flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo mode       >flow_sw_udf7        - Set UDF7 lookup skip mode <mode> to flow table SW entry.\n");
+	/*PPv2.1 new feature MAS 3.14*/
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo mode       >flow_sw_sq          - Set sequence type <mode> to flow table SW entry.\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo e l        >flow_sw_engin       - set engine <e> nember to flow table SW entry.  <l> - last bit.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo l p        >flow_sw_extra       - set lookup type <l> and priority <p> to flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo idx id     >flow_sw_hek         - set HEK field <idx, id> flow table SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo n          >flow_sw_num_of_heks - set number of HEK fields <n> to flow table SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE,  "\n");
+	off += scnprintf(buf + off, PAGE_SIZE,  "echo i len      >len_change_hw_set   - set signed length <len> (in decimal) change for modification index <idx> (in hex).\n");
+
+	return off;
+}
+
+
+static ssize_t mv_cls_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "lkp_sw_dump"))
+		mvPp2ClsSwLkpDump(&lkp_entry);
+	else if (!strcmp(name, "lkp_hw_hits"))
+		mvPp2V1ClsHwLkpHitsDump();
+	else if (!strcmp(name, "flow_hw_hits"))
+		mvPp2V1ClsHwFlowHitsDump();
+	else if (!strcmp(name, "flow_sw_dump"))
+		mvPp2ClsSwFlowDump(&flow_entry);
+	else if (!strcmp(name, "lkp_hw_dump"))
+		mvPp2ClsHwLkpDump();
+	else if (!strcmp(name, "flow_hw_dump"))
+		mvPp2ClsHwFlowDump();
+	else if (!strcmp(name, "len_change_hw_dump"))
+		mvPp2ClsPktLenChangeDump();
+	else if (!strcmp(name, "hw_regs"))
+		mvPp2ClsHwRegsDump();
+	else
+		off += mv_cls_help(buf);
+
+	return off;
+}
+
+
+
+static ssize_t mv_prs_store_unsigned(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x", &a, &b, &c, &d);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "lkp_sw_clear"))
+		mvPp2ClsSwLkpClear(&lkp_entry);
+	else if (!strcmp(name, "lkp_hw_write"))
+		mvPp2ClsHwLkpWrite(a, b, &lkp_entry);
+	else if (!strcmp(name, "lkp_hw_read"))
+		mvPp2ClsHwLkpRead(a, b, &lkp_entry);
+	else if (!strcmp(name, "lkp_sw_rxq"))
+		mvPp2ClsSwLkpRxqSet(&lkp_entry, a);
+	else if (!strcmp(name, "lkp_sw_flow"))
+		mvPp2ClsSwLkpFlowSet(&lkp_entry, a);
+	else if (!strcmp(name, "lkp_sw_mod"))
+		mvPp2ClsSwLkpModSet(&lkp_entry, a);
+	else if (!strcmp(name, "lkp_sw_en"))
+		mvPp2ClsSwLkpEnSet(&lkp_entry, a);
+	else if (!strcmp(name, "flow_sw_clear"))
+		mvPp2ClsSwFlowClear(&flow_entry);
+	else if (!strcmp(name, "flow_hw_write"))
+		mvPp2ClsHwFlowWrite(a, &flow_entry);
+	else if (!strcmp(name, "flow_hw_read"))
+		mvPp2ClsHwFlowRead(a, &flow_entry);
+	else if (!strcmp(name, "flow_sw_port"))
+		mvPp2ClsSwFlowPortSet(&flow_entry, a, b);
+	else if (!strcmp(name, "flow_sw_portid"))
+		mvPp2ClsSwPortIdSelect(&flow_entry, a);
+	else if (!strcmp(name, "flow_sw_pppoe"))
+		mvPp2ClsSwFlowPppoeSet(&flow_entry, a);
+	else if (!strcmp(name, "flow_sw_vlan"))
+		mvPp2ClsSwFlowVlanSet(&flow_entry, a);
+	else if (!strcmp(name, "flow_sw_macme"))
+		mvPp2ClsSwFlowMacMeSet(&flow_entry, a);
+	else if (!strcmp(name, "flow_sw_udf7"))
+		mvPp2ClsSwFlowUdf7Set(&flow_entry, a);
+	/*PPv2.1 feature changed MAS 3.14*/
+	else if (!strcmp(name, "flow_sw_sq"))
+		mvPp2ClsSwFlowSeqCtrlSet(&flow_entry, a);
+	else if (!strcmp(name, "flow_sw_engine"))
+		mvPp2ClsSwFlowEngineSet(&flow_entry, a, b);
+	else if (!strcmp(name, "flow_sw_extra"))
+		mvPp2ClsSwFlowExtraSet(&flow_entry, a, b);
+	else if (!strcmp(name, "flow_sw_hek"))
+		mvPp2ClsSwFlowHekSet(&flow_entry, a, b);
+	else if (!strcmp(name, "flow_sw_num_of_heks"))
+		mvPp2ClsSwFlowHekNumSet(&flow_entry, a);
+	else if (!strcmp(name, "hw_enable"))
+		mvPp2ClsHwEnable(a);
+	else if (!strcmp(name, "hw_port_way"))
+		mvPp2ClsHwPortWaySet(a, b);
+	else if (!strcmp(name, "hw_port_spid"))
+		mvPp2ClsHwPortSpidSet(a, b);
+	else if (!strcmp(name, "hw_uni_spid"))
+		mvPp2ClsHwUniPortSet(a, b);
+	else if (!strcmp(name, "hw_virt_gpid"))
+		mvPp2ClsHwVirtPortSet(a, b);
+	else if (!strcmp(name, "hw_udf"))
+		mvPp2ClsHwUdfSet(a, b, c, d);
+	/*PPv2.1 feature changed MAS 3.7*/
+	else if (!strcmp(name, "hw_mtu"))
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2V1ClsHwMtuSet(a, b);
+#else
+		mvPp2V0ClsHwMtuSet(a, b, c);
+#endif
+#ifdef CONFIG_MV_ETH_PP2_1
+	else if (!strcmp(name, "hw_over_rxq_low"))
+		mvPp2ClsHwOversizeRxqLowSet(a, b);
+#else
+	else if (!strcmp(name, "hw_over_rxq"))
+		mvPp2ClsHwOversizeRxqSet(a, b);
+#endif
+	/*PPv2.1 new feature MAS 3.5*/
+	else if (!strcmp(name, "hw_qh"))
+		mvPp2ClsHwRxQueueHighSet(a, b, c);
+	/*PPv2.1 new feature MAS 3.18*/
+	else if (!strcmp(name, "hw_mh"))
+		mvPp2ClsHwMhSet(a, b, c, d);
+	/*PPv2.1 new feature MAS 3.18*/
+	else if (!strcmp(name, "hw_sq_size"))
+		mvPp2ClsHwSeqInstrSizeSet(a, b);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static ssize_t mv_prs_store_signed(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0;
+	unsigned long flags;
+	int b = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %d", &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "len_change_hw_set"))
+		mvPp2ClsPktLenChangeSet(a, b);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(lkp_hw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(lkp_hw_hits,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(flow_hw_hits,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(flow_hw_dump,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(lkp_sw_dump,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(flow_sw_dump,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(len_change_hw_dump,		S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(help,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(hw_regs,			S_IRUSR, mv_cls_show, NULL);
+static DEVICE_ATTR(lkp_sw_clear,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_hw_write,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_hw_read,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_sw_rxq,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_sw_flow,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_sw_mod,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(lkp_sw_en,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_clear,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_hw_write,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_hw_read,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_port,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_portid,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(flow_sw_pppoe,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(flow_sw_macme,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(flow_sw_vlan,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(flow_sw_udf7,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(flow_sw_sq,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);/*PPv2.1 new feature MAS 3.14*/
+static DEVICE_ATTR(flow_sw_engine,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_extra,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_hek,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(flow_sw_num_of_heks,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(len_change_hw_set,		S_IWUSR, mv_cls_show, mv_prs_store_signed);
+static DEVICE_ATTR(hw_enable,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_port_way,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_port_spid,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_uni_spid,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_virt_gpid,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_udf,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+static DEVICE_ATTR(hw_mtu,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+#ifdef CONFIG_MV_ETH_PP2_1
+static DEVICE_ATTR(hw_over_rxq_low,		S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+#else
+static DEVICE_ATTR(hw_over_rxq,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned);
+#endif
+static DEVICE_ATTR(hw_qh,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned); /*PPv2.1 new feature MAS 3.5*/
+static DEVICE_ATTR(hw_mh,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned); /*PPv2.1 new feature MAS 3.18*/
+static DEVICE_ATTR(hw_sq_size,			S_IWUSR, mv_cls_show, mv_prs_store_unsigned); /*PPv2.1 new feature MAS 3.14*/
+
+
+
+static struct attribute *cls_attrs[] = {
+	&dev_attr_lkp_sw_dump.attr,
+	&dev_attr_flow_sw_dump.attr,
+	&dev_attr_lkp_hw_hits.attr,
+	&dev_attr_flow_hw_hits.attr,
+	&dev_attr_lkp_hw_dump.attr,
+	&dev_attr_flow_hw_dump.attr,
+	&dev_attr_len_change_hw_dump.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_lkp_sw_clear.attr,
+	&dev_attr_lkp_hw_write.attr,
+	&dev_attr_lkp_hw_read.attr,
+	&dev_attr_lkp_sw_rxq.attr,
+	&dev_attr_lkp_sw_flow.attr,
+	&dev_attr_lkp_sw_mod.attr,
+	&dev_attr_lkp_sw_en.attr,
+	&dev_attr_flow_sw_clear.attr,
+	&dev_attr_flow_hw_write.attr,
+	&dev_attr_flow_hw_read.attr,
+	&dev_attr_flow_sw_port.attr,
+	&dev_attr_flow_sw_portid.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_flow_sw_engine.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_flow_sw_vlan.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_flow_sw_pppoe.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_flow_sw_macme.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_flow_sw_sq.attr,/*PPv2.1 new feature MAS 3.14*/
+	&dev_attr_flow_sw_udf7.attr,
+	&dev_attr_flow_sw_extra.attr,
+	&dev_attr_flow_sw_hek.attr,
+	&dev_attr_flow_sw_num_of_heks.attr,
+	&dev_attr_len_change_hw_set.attr,
+	&dev_attr_hw_enable.attr,
+	&dev_attr_hw_port_way.attr,
+	&dev_attr_hw_port_spid.attr,
+	&dev_attr_hw_uni_spid.attr,
+	&dev_attr_hw_virt_gpid.attr,
+	&dev_attr_hw_udf.attr,
+	&dev_attr_hw_mtu.attr,/*PPv2.1 feature changed MAS 3.7*/
+#ifdef CONFIG_MV_ETH_PP2_1
+	&dev_attr_hw_over_rxq_low.attr,/*PPv2.1 feature changed MAS 3.7*/
+#else
+	&dev_attr_hw_over_rxq.attr,
+#endif
+	&dev_attr_hw_qh.attr,/*PPv2.1 new feature MAS 3.5*/
+	&dev_attr_hw_mh.attr,/*PPv2.1 new feature MAS 3.18*/
+	&dev_attr_hw_sq_size.attr,/*PPv2.1 new feature MAS 3.14*/
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group cls_group = {
+	.name = "cls",
+	.attrs = cls_attrs,
+};
+
+int mv_pp2_cls_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &cls_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", cls_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_cls_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &cls_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.c
new file mode 100644
index 000000000000..ab7a4389da5c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.c
@@ -0,0 +1,636 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_api.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) API definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include "mv_cph_header.h"
+
+
+/******************************************************************************
+* Variable Definition
+******************************************************************************/
+
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_set_complex_profile()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set TPM complex profile ID
+*
+* INPUTS:
+*       profile_id   - TPM complex profile ID
+*       active_port  - Active WAN port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_complex_profile(enum tpm_eth_complex_profile_t profile_id, enum MV_APP_GMAC_PORT_E active_port)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_set_complex_profile(profile_id, active_port);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_app_set_complex_profile");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_set_complex_profile);
+
+/******************************************************************************
+* cph_set_feature_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Enable or disable feature support in CPH
+*
+* INPUTS:
+*       feature - CPH supported features
+*       state   - Enable or disable this feature in CPH
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_feature_flag(enum CPH_APP_FEATURE_E feature, bool state)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_set_feature_flag(feature, state);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_app_set_feature_flag");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_set_feature_flag);
+
+/******************************************************************************
+* cph_add_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_add_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_add_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_app_add_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_add_app_rule);
+
+/******************************************************************************
+* cph_del_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_del_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_del_rule(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_app_del_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_del_app_rule);
+
+/******************************************************************************
+* cph_update_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_update_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_update_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_app_update_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_update_app_rule);
+
+/******************************************************************************
+* cph_get_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_get_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_get_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	if (rc != MV_OK)
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "fail to call cph_app_get_rule\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_get_app_rule);
+
+/******************************************************************************
+* cph_add_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_add_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_add_rule(cph_flow);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_add_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_add_flow_rule);
+
+/******************************************************************************
+* cph_del_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_del_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_del_rule(cph_flow);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_del_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_del_flow_rule);
+
+/******************************************************************************
+* cph_get_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Gets flow mapping rule for tagged frames.
+*
+* INPUTS:
+*       cph_flow - Input vid, pbits, dir
+*
+* OUTPUTS:
+*       cph_flow - output packet forwarding information, including GEM port,
+*                   T-CONT, queue and packet modification for VID, P-bits.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_get_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_get_rule(cph_flow);
+	if (rc != MV_OK)
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "fail to call cph_flow_get_rule\n");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_get_flow_rule);
+
+/******************************************************************************
+* cph_clear_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears all flow mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_clear_flow_rule(void)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_clear_rule();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_clear_rule");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_clear_flow_rule);
+
+/******************************************************************************
+* cph_clear_flow_rule_by_mh()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears flow mapping rules by MH
+*
+* INPUTS:
+*       mh   -  Marvell header.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_clear_flow_rule_by_mh(unsigned short mh)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_clear_rule_by_mh(mh);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_clear_rule_by_mh");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_set_flow_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       dscp_map  - DSCP to P-bits mapping rules.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_set_flow_dscp_map(struct CPH_DSCP_PBITS_T *dscp_map)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_set_dscp_map(dscp_map);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_set_dscp_map");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_set_flow_dscp_map);
+
+/******************************************************************************
+* cph_del_flow_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_del_flow_dscp_map(void)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_del_dscp_map();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_del_dscp_map");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_del_flow_dscp_map);
+
+/*******************************************************************************
+**
+** cph_get_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function get T-CONT state
+**
+** INPUTS:
+**   tcont - T-CONT
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**   state - State of T-CONT, enabled or disabled.
+**
+*******************************************************************************/
+bool cph_get_tcont_state(unsigned int tcont)
+{
+	return cph_db_get_tcont_state(tcont);
+}
+EXPORT_SYMBOL(cph_get_tcont_state);
+
+/*******************************************************************************
+**
+** cph_set_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function sets T-CONT state in mv_cust
+**
+** INPUTS:
+**   tcont - T-CONT
+**   state - State of T-CONT, enabled or disabled.
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**  On success, the function returns (MV_OK). On error different types are
+**  returned according to the case.
+**
+*******************************************************************************/
+int cph_set_tcont_state(unsigned int tcont, bool state)
+{
+	return cph_db_set_tcont_state(tcont, state);
+}
+EXPORT_SYMBOL(cph_set_tcont_state);
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_set_port_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH port Rx/Tx func
+*
+* INPUTS:
+*       port          - physical port ID
+*       dir            - Rx(0)  Tx(1)
+*       enable      - disable(0)  enable(1)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_port_func(int port, enum CPH_RX_TX_E dir, bool enable)
+{
+	MV_STATUS rc = MV_OK;
+
+	if (CPH_DIR_RX == dir) {
+		if (enable)
+			mv_pp2_rx_special_proc_func(port, cph_rx_func);
+		else
+			mv_pp2_rx_special_proc_func(port, NULL);
+	} else if (CPH_DIR_TX == dir) {
+		if (enable)
+			mv_pp2_tx_special_check_func(port, cph_tx_func);
+		else
+			mv_pp2_tx_special_check_func(port, NULL);
+	} else {
+		if (enable) {
+			mv_pp2_rx_special_proc_func(port, cph_rx_func);
+			mv_pp2_tx_special_check_func(port, cph_tx_func);
+		} else {
+			mv_pp2_rx_special_proc_func(port, NULL);
+			mv_pp2_tx_special_check_func(port, NULL);
+		}
+	}
+
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_set_port_func");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_set_port_func);
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_get_port_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH port Rx/Tx func
+*
+* INPUTS:
+*       port              - physical port ID
+*
+* OUTPUTS:
+*       rx_enable      - disable(0)  enable(1)
+*       tx_enable      - disable(0)  enable(1)
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_get_port_func(int port, bool *rx_enable, bool *tx_enable)
+{
+	MV_STATUS rc = MV_OK;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return MV_BAD_PARAM;
+
+	if (pp->rx_special_proc)
+		*rx_enable = true;
+	else
+		*rx_enable = false;
+
+	if (pp->tx_special_check)
+		*tx_enable = true;
+	else
+		*tx_enable = false;
+
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_get_port_func");
+
+	return rc;
+}
+EXPORT_SYMBOL(cph_get_port_func);
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.h
new file mode 100644
index 000000000000..7b8bd6e4c099
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_api.h
@@ -0,0 +1,456 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_api.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) API definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_API_H_
+#define _MV_CPH_API_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+* Type Definition
+******************************************************************************/
+
+
+
+/******************************************************************************
+* Function Declaration
+******************************************************************************/
+/******************************************************************************
+* cph_set_complex_profile()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set TPM complex profile ID
+*
+* INPUTS:
+*       profile_id   - TPM complex profile ID
+*       active_port  - Active WAN port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_complex_profile(enum tpm_eth_complex_profile_t profile_id, enum MV_APP_GMAC_PORT_E active_port);
+
+/******************************************************************************
+* cph_set_feature_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Enable or disable feature support in CPH
+*
+* INPUTS:
+*       feature - CPH supported features
+*       state   - Enable or disable this feature in CPH
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_feature_flag(enum CPH_APP_FEATURE_E feature, bool state);
+
+/******************************************************************************
+* cph_add_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_add_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_del_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_del_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_update_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_update_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_get_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_get_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_add_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_add_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_del_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_del_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_get_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Gets flow mapping rule for tagged frames.
+*
+* INPUTS:
+*       cph_flow - Input vid, pbits, dir
+*
+* OUTPUTS:
+*       cph_flow - output packet forwarding information, including GEM port,
+*                   T-CONT, queue and packet modification for VID, P-bits.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_get_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_clear_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears all flow mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_clear_flow_rule(void);
+
+/******************************************************************************
+* cph_clear_flow_rule_by_mh()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears flow mapping rules by MH
+*
+* INPUTS:
+*       mh   -  Marvell header.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_clear_flow_rule_by_mh(unsigned short mh);
+
+/******************************************************************************
+* cph_set_flow_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       dscp_map  - DSCP to P-bits mapping rules.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_set_flow_dscp_map(struct CPH_DSCP_PBITS_T *dscp_map);
+
+/******************************************************************************
+* cph_del_flow_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_del_flow_dscp_map(void);
+
+/*******************************************************************************
+**
+** cph_get_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function get T-CONT state
+**
+** INPUTS:
+**   tcont - T-CONT
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**   state - State of T-CONT, enabled or disabled.
+**
+*******************************************************************************/
+bool cph_get_tcont_state(unsigned int tcont);
+
+/*******************************************************************************
+**
+** cph_set_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function sets T-CONT state in mv_cust
+**
+** INPUTS:
+**   tcont - T-CONT
+**   state - State of T-CONT, enabled or disabled.
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**  On success, the function returns (MV_OK). On error different types are
+**  returned according to the case.
+**
+*******************************************************************************/
+int cph_set_tcont_state(unsigned int tcont, bool state);
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_set_port_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH port Rx/Tx func
+*
+* INPUTS:
+*       port          - physical port ID
+*       dir            - Rx(0)  Tx(1)
+*       enable      - disable(0)  enable(1)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_port_func(int port, enum CPH_RX_TX_E dir, bool enable);
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_get_port_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH port Rx/Tx func
+*
+* INPUTS:
+*       port              - physical port ID
+*
+* OUTPUTS:
+*       rx_enable      - disable(0)  enable(1)
+*       tx_enable      - disable(0)  enable(1)
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_get_port_func(int port, bool *rx_enable, bool *tx_enable);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_API_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.c
new file mode 100644
index 000000000000..507442386976
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.c
@@ -0,0 +1,1432 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_app.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) application module to implement
+*              CPH main logic and handle application packets such as OMCI, eOAM,
+*              IGMP packets.
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mv_cph_header.h"
+
+
+/******************************************************************************
+* Variable Definition
+******************************************************************************/
+/* CPH global trace flag */
+unsigned int g_cph_global_trace = CPH_ERR_LEVEL|CPH_WARN_LEVEL|CPH_INFO_LEVEL;
+
+struct MV_ENUM_ENTRY_T g_enum_map_profile_id[] = {
+	{ TPM_PON_WAN_DUAL_MAC_INT_SWITCH,  "TPM_PON_WAN_DUAL_MAC_INT_SWITCH"},
+	{ TPM_PON_WAN_G0_INT_SWITCH,        "TPM_PON_WAN_G0_INT_SWITCH"},
+	{ TPM_PON_WAN_G1_LAN_G0_INT_SWITCH, "TPM_PON_WAN_G1_LAN_G0_INT_SWITCH"},
+	{ TPM_G0_WAN_G1_INT_SWITCH,         "TPM_G0_WAN_G1_INT_SWITCH"},
+	{ TPM_G1_WAN_G0_INT_SWITCH,         "TPM_G1_WAN_G0_INT_SWITCH"},
+	{ TPM_PON_G1_WAN_G0_INT_SWITCH,     "TPM_PON_G1_WAN_G0_INT_SWITCH"},
+	{ TPM_PON_G0_WAN_G1_INT_SWITCH,     "TPM_PON_G0_WAN_G1_INT_SWITCH"},
+	{ TPM_PON_WAN_DUAL_MAC_EXT_SWITCH,  "TPM_PON_WAN_DUAL_MAC_EXT_SWITCH"},
+	{ TPM_PON_WAN_G1_MNG_EXT_SWITCH,    "TPM_PON_WAN_G1_MNG_EXT_SWITCH"},
+	{ TPM_PON_WAN_G0_SINGLE_PORT,       "TPM_PON_WAN_G0_SINGLE_PORT"},
+	{ TPM_PON_WAN_G1_SINGLE_PORT,       "TPM_PON_WAN_G1_SINGLE_PORT"},
+	{ TPM_PON_G1_WAN_G0_SINGLE_PORT,    "TPM_PON_G1_WAN_G0_SINGLE_PORT"},
+	{ TPM_PON_G0_WAN_G1_SINGLE_PORT,    "TPM_PON_G0_WAN_G1_SINGLE_PORTg"},
+	{ TPM_PON_WAN_G0_G1_LPBK,           "TPM_PON_WAN_G0_G1_LPBK"},
+	{ TPM_PON_WAN_G0_G1_DUAL_LAN,       "TPM_PON_WAN_G0_G1_DUAL_LAN"},
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_profile_id = {
+	sizeof(g_enum_map_profile_id)/sizeof(g_enum_map_profile_id[0]),
+	g_enum_map_profile_id
+};
+
+static struct MV_ENUM_ENTRY_T g_enum_map_pon_type[] = {
+	{ CPH_PON_TYPE_EPON, "EPON"},
+	{ CPH_PON_TYPE_GPON, "GPON"},
+	{ CPH_PON_TYPE_GBE,  "GBE"},
+	{ CPH_PON_TYPE_P2P,  "P2P"},
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_pon_type = {
+	sizeof(g_enum_map_pon_type)/sizeof(g_enum_map_pon_type[0]),
+	g_enum_map_pon_type
+};
+
+static struct MV_ENUM_ENTRY_T g_enum_map_dir[] = {
+	{ CPH_DIR_US,       "US"},
+	{ CPH_DIR_DS,       "DS"},
+	{ CPH_DIR_NOT_CARE, "Not Care"},
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_dir = {
+	sizeof(g_enum_map_dir)/sizeof(g_enum_map_dir[0]),
+	g_enum_map_dir
+};
+
+static struct MV_ENUM_ENTRY_T g_enum_map_rx_tx[] = {
+	{ CPH_DIR_RX,         "RX"},
+	{ CPH_DIR_TX,         "TX"},
+	{ CPH_RX_TX_NOT_CARE, "Not Care"},
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_rx_tx = {
+	sizeof(g_enum_map_rx_tx)/sizeof(g_enum_map_rx_tx[0]),
+	g_enum_map_rx_tx
+};
+
+static struct MV_ENUM_ENTRY_T g_enum_map_gmac[] = {
+	{ MV_APP_GMAC_PORT_0,  "GMAC0"},
+	{ MV_APP_GMAC_PORT_1,  "GMAC1"},
+	{ MV_APP_PON_MAC_PORT, "PON MAC"},
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_gmac = {
+	sizeof(g_enum_map_gmac)/sizeof(g_enum_map_gmac[0]),
+	g_enum_map_gmac
+};
+
+
+/******************************************************************************
+* External Declaration
+******************************************************************************/
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_app_set_complex_profile()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set TPM complex profile ID
+*
+* INPUTS:
+*       profile_id   - TPM complex profile ID
+*       active_port  - Active WAN port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_complex_profile(enum tpm_eth_complex_profile_t profile_id, enum MV_APP_GMAC_PORT_E active_port)
+{
+	MV_STATUS rc = MV_OK;
+
+	/* Check the range of profile_id */
+	if (profile_id > TPM_PON_WAN_G0_G1_DUAL_LAN) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "profile_id[%d] is out of range[1~%d]\n",
+			profile_id, TPM_PON_WAN_G0_G1_DUAL_LAN);
+		return MV_OUT_OF_RANGE;
+	}
+
+	/* Check the range of active_port */
+	if (active_port > MV_APP_PON_MAC_PORT) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "active_port[%d] is out of range[0~%d]\n",
+			active_port, MV_APP_PON_MAC_PORT);
+		return MV_OUT_OF_RANGE;
+	}
+
+	rc = cph_db_set_param(CPH_DB_PARAM_PROFILE_ID, &profile_id);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_set_param");
+
+	rc = cph_db_set_param(CPH_DB_PARAM_ACTIVE_PORT, &active_port);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_set_param");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_set_feature_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Enable or disable feature support in CPH
+*
+* INPUTS:
+*       feature - CPH supported features
+*       state   - Enable or disable this feature in CPH
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_feature_flag(enum CPH_APP_FEATURE_E feature, bool state)
+{
+	MV_STATUS rc = MV_OK;
+
+	switch (feature) {
+	case CPH_APP_FEATURE_APP:
+		cph_db_set_param(CPH_DB_PARAM_APP_SUPPORT, &state);
+		break;
+	case CPH_APP_FEATURE_IGMP:
+		cph_db_set_param(CPH_DB_PARAM_IGMP_SUPPORT, &state);
+		break;
+	case CPH_APP_FEATURE_BC:
+		cph_db_set_param(CPH_DB_PARAM_BC_SUPPORT, &state);
+		break;
+	case CPH_APP_FEATURE_FLOW:
+		cph_db_set_param(CPH_DB_PARAM_FLOW_SUPPORT, &state);
+		break;
+	case CPH_APP_FEATURE_UDP:
+		cph_db_set_param(CPH_DB_PARAM_UDP_SUPPORT, &state);
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_validate_parse_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Validate the parsing filed of CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_validate_parse_field(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	MV_STATUS rc = MV_OK;
+
+	/* Check the range of parse_bm */
+	if (parse_bm >= CPH_APP_PARSE_FIELD_END) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "parse_bm[0x%x] is out of range[0x01~0x%x]\n",
+			parse_bm, CPH_APP_PARSE_FIELD_END);
+		return MV_OUT_OF_RANGE;
+	}
+
+	/* Validate direction */
+	if (parse_bm & CPH_APP_PARSE_FIELD_DIR) {
+		if (parse_key->dir > CPH_DIR_NOT_CARE) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "dir[%d] is out of range[0~%d]\n",
+				parse_key->dir, CPH_DIR_NOT_CARE);
+			return MV_OUT_OF_RANGE;
+		}
+	}
+
+	/* Validate RX/TX direction */
+	if (parse_bm & CPH_APP_PARSE_FIELD_RX_TX) {
+		if (parse_key->rx_tx > CPH_RX_TX_NOT_CARE) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "rx_tx[%d] is out of range[0~%d]\n",
+				parse_key->rx_tx,
+				CPH_RX_TX_NOT_CARE);
+			return MV_OUT_OF_RANGE;
+		}
+	}
+
+	/* Could not parse None IPv4 Eth type and IPv4 protocol type at the same ime */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_ETH_TYPE) &&
+		(parse_bm & CPH_APP_PARSE_FIELD_IPV4_TYPE) &&
+		(parse_key->eth_type != MV_CPH_ETH_TYPE_IPV4)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "parse_bm[0x%x], eth_type[0x%x], does not support parsing None" \
+			"IPv4 Eth type and IPv4 protocol type at the same time\n",
+				parse_bm, parse_key->eth_type);
+		return MV_BAD_VALUE;
+	}
+
+	/* Could not parse None IPv6 Eth type and IPv4 protocol type at the same ime */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_ETH_TYPE) &&
+		((parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH1) ||
+		(parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH2) ||
+		(parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE)) &&
+		(parse_key->eth_type != MV_CPH_ETH_TYPE_IPV6)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"parse_bm[0x%x], eth_type[0x%x], does not support parsing None IPv6 Eth type and " \
+			"IPv6 NH or ICMP type at the same time\n",
+				parse_bm, parse_key->eth_type);
+		return MV_BAD_VALUE;
+	}
+
+	/* Could not parse Eth subtype and IPv4 the same time */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_ETH_SUBTYPE) &&
+		(parse_bm & CPH_APP_PARSE_FIELD_IPV4_TYPE)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"parse_bm[0x%x], does not support parsing Eth subtype and IPv4 type at the same time\n",
+			parse_bm);
+		return MV_BAD_VALUE;
+	}
+
+	/* Could not parse Eth subtype and IPv6 the same time */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_ETH_SUBTYPE) &&
+		((parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH1) ||
+		(parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH2) ||
+		(parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE))) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"parse_bm[0x%x], does not support parsing Eth subtype and IPv6 type at the same time\n",
+			parse_bm);
+		return MV_BAD_VALUE;
+	}
+
+	/* Could not parse IPv4 and IPv6 at the same time */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_IPV4_TYPE) &&
+		((parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH1) ||
+		 (parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH2) ||
+		 (parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE))) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"parse_bm[0x%x], does not support parsing IPv4 and IPv6 type at the same time\n",
+			parse_bm);
+		return MV_BAD_VALUE;
+	}
+
+	/* Validate IGMPv6 type */
+	if ((parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE) &&
+		(parse_key->icmpv6_type != MV_ICMPV6_TYPE_MLD)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"parse_bm[0x%x], icmpv6_type[%d], currently only support ICMPv6 MLD type[%d]\n",
+			parse_bm, parse_key->icmpv6_type, MV_ICMPV6_TYPE_MLD);
+		return MV_BAD_VALUE;
+	}
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_validate_mod_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Validate the modification filed of CPH rule
+*
+* INPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_validate_mod_field(
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	/* Check the range of mod_bm */
+	if (mod_bm >= CPH_APP_MOD_FIELD_END) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "mod_bm[0x%x] is out of range[0x01~0x%x]\n", mod_bm, CPH_APP_MOD_FIELD_END);
+		return MV_OUT_OF_RANGE;
+	}
+
+	/* Does not support adding GMAC information and strip MH at the same time */
+	if ((mod_bm & CPH_APP_RX_MOD_ADD_GMAC) &&
+	    (mod_bm & CPH_APP_RX_MOD_STRIP_MH)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"mod_bm[0x%x], does not support adding GMAC information and stripping MH at the same time\n",
+			mod_bm);
+		return MV_BAD_VALUE;
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_validate_frwd_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Validate the forwarding filed of CPH rule
+*
+* INPUTS:
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_validate_frwd_field(
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	/* Check the range of frwd_bm */
+	if (frwd_bm >= CPH_APP_FRWD_FIELD_END) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "frwd_bm[0x%x] is out of range[0x01~0x%x]\n",
+			frwd_bm, CPH_APP_FRWD_FIELD_END);
+		return MV_OUT_OF_RANGE;
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_add_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_add_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_validate_parse_field(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid parsing field");
+
+	rc = cph_app_validate_mod_field(mod_bm, mod_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid modification field");
+
+	rc = cph_app_validate_frwd_field(frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid forwarding field");
+
+	rc = cph_db_add_app_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_add_app_rule");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_del_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_del_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_validate_parse_field(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid parsing field");
+
+	rc = cph_db_del_app_rule(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_del_app_rule");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_update_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_update_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_validate_parse_field(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid parsing field");
+
+	rc = cph_app_validate_mod_field(mod_bm, mod_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid modification field");
+
+	rc = cph_app_validate_frwd_field(frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid forwarding field");
+
+	rc = cph_db_update_app_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_update_app_rule");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_get_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_get_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_validate_parse_field(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid parsing field");
+
+	rc = cph_db_get_app_rule(parse_bm, parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+	if (rc != MV_OK)
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "fail to call cph_app_get_rule\n");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_get_rule_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_get_rule_by_dir_proto(
+	enum CPH_DIR_E              dir,
+	unsigned short                 proto_type,
+	enum CPH_APP_PARSE_FIELD_E *parse_bm,
+	struct CPH_APP_PARSE_T       *parse_key,
+	enum CPH_APP_MOD_FIELD_E   *mod_bm,
+	struct CPH_APP_MOD_T         *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  *frwd_bm,
+	struct CPH_APP_FRWD_T        *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_db_get_app_rule_by_dir_proto(dir, proto_type, parse_bm,
+		parse_key, mod_bm, mod_value, frwd_bm, frwd_value);
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_increase_counter()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Increase RX counter
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_increase_counter(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_app_validate_parse_field(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to valid parsing field");
+
+	rc = cph_db_increase_counter(parse_bm, parse_key);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_increase_counter");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_increase_counter_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:  Increase RX counter according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_increase_counter_by_dir_proto(
+	enum CPH_DIR_E dir,
+	unsigned short    proto_type)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_db_increase_counter_by_dir_proto(dir, proto_type);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_increase_counter_by_dir_proto");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_parse_ge_port_type()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get GEMAC port type by profile ID
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       port_type   - Modification bitmap
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_parse_ge_port_type(struct CPH_PORT_STATE_T *port_type)
+{
+	enum tpm_eth_complex_profile_t  profile_id  = 0;
+	enum MV_APP_GMAC_PORT_E         active_port = 0;
+	MV_STATUS                  rc          = MV_OK;
+
+	/* Get Profile ID and active WAN port */
+	cph_db_get_param(CPH_DB_PARAM_PROFILE_ID,  &profile_id);
+	cph_db_get_param(CPH_DB_PARAM_ACTIVE_PORT, &active_port);
+
+	switch (profile_id) {
+	case TPM_PON_WAN_DUAL_MAC_INT_SWITCH:
+	case TPM_PON_WAN_G1_LAN_G0_INT_SWITCH:
+	case TPM_PON_WAN_DUAL_MAC_EXT_SWITCH:
+	case TPM_PON_WAN_G0_G1_DUAL_LAN:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_PON_MAC_PORT].port_type  = MV_APP_PORT_WAN;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_ACTIVE;
+		break;
+	case TPM_PON_WAN_G0_INT_SWITCH:
+	case TPM_PON_WAN_G0_SINGLE_PORT:
+	case TPM_PON_WAN_G0_G1_LPBK:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_INVALID;
+		port_type[MV_APP_PON_MAC_PORT].port_type  = MV_APP_PORT_WAN;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_ACTIVE;
+		break;
+	case TPM_G0_WAN_G1_INT_SWITCH:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_WAN;
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_INVALID;
+		break;
+	case TPM_G1_WAN_G0_INT_SWITCH:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_WAN;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_INVALID;
+		break;
+	case TPM_PON_G1_WAN_G0_INT_SWITCH:
+	case TPM_PON_G1_WAN_G0_SINGLE_PORT:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_WAN;
+		port_type[MV_APP_PON_MAC_PORT].port_type  = MV_APP_PORT_WAN;
+
+		if (active_port == MV_APP_GMAC_PORT_1) {
+			port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+			port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_INACTIVE;
+		} else {
+			port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_INACTIVE;
+			port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_ACTIVE;
+		}
+		break;
+	case TPM_PON_G0_WAN_G1_INT_SWITCH:
+	case TPM_PON_G0_WAN_G1_SINGLE_PORT:
+		port_type[MV_APP_GMAC_PORT_0].port_type   = MV_APP_PORT_WAN;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_PON_MAC_PORT].port_type  = MV_APP_PORT_WAN;
+		if (active_port == MV_APP_GMAC_PORT_0) {
+			port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_ACTIVE;
+			port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_INACTIVE;
+		} else {
+			port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_INACTIVE;
+			port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_ACTIVE;
+		}
+		break;
+	case TPM_PON_WAN_G1_MNG_EXT_SWITCH:
+	case TPM_PON_WAN_G1_SINGLE_PORT:
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_INVALID;
+		port_type[MV_APP_GMAC_PORT_1].port_type   = MV_APP_PORT_LAN;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_ACTIVE;
+		port_type[MV_APP_PON_MAC_PORT].port_type  = MV_APP_PORT_WAN;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_ACTIVE;
+		break;
+	default:
+		port_type[MV_APP_GMAC_PORT_0].port_state  = MV_GE_PORT_INVALID;
+		port_type[MV_APP_GMAC_PORT_1].port_state  = MV_GE_PORT_INVALID;
+		port_type[MV_APP_PON_MAC_PORT].port_state = MV_GE_PORT_INVALID;
+		break;
+	}
+	/* loopback port will be LAN side port by default */
+	port_type[MV_APP_LPBK_PORT].port_type  = MV_APP_PORT_LAN;
+	port_type[MV_APP_LPBK_PORT].port_state = MV_GE_PORT_ACTIVE;
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_parse_peer_port()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get peer GEMAC port
+*
+* INPUTS:
+*       port        - Original port
+*
+* OUTPUTS:
+*       peer_port   - Peer port
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_parse_peer_port(
+	int    port,
+	int   *peer_port)
+{
+	unsigned int            idx = 0;
+	struct CPH_PORT_STATE_T  port_type[MV_APP_GMAC_PORT_NUM];
+	MV_STATUS         rc  = MV_FAIL;
+
+	/* Verify port */
+	if (port > MV_APP_PON_MAC_PORT) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "port[%d] is out of range[0~%d]\n", port, MV_APP_PON_MAC_PORT);
+		return MV_OUT_OF_RANGE;
+	}
+
+	/* Get port type */
+	rc = cph_app_parse_ge_port_type(&port_type[0]);
+
+	/* Search for peer port */
+	if (port_type[port].port_type == MV_APP_PORT_LAN) {
+		for (idx = 0; idx < MV_APP_GMAC_PORT_NUM; idx++) {
+			if (idx == port)
+				continue;
+			if ((port_type[idx].port_type  == MV_APP_PORT_WAN) &&
+			    (port_type[idx].port_state == MV_GE_PORT_ACTIVE)) {
+				*peer_port = idx;
+				rc  = MV_OK;
+				break;
+			}
+		}
+	} else if (port_type[port].port_type == MV_APP_PORT_WAN) {
+		for (idx = 0; idx < MV_APP_GMAC_PORT_NUM; idx++) {
+			if (idx == port)
+				continue;
+			if (port_type[idx].port_type  == MV_APP_PORT_LAN) {
+				*peer_port = idx;
+				rc  = MV_OK;
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_parse_dir()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse application packet to output parse bitmap and value
+*
+* INPUTS:
+*       port  - GE MAC port
+*       rx    - Whether RX path
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       Return direction.
+*******************************************************************************/
+enum CPH_DIR_E cph_app_parse_dir(
+	int    port,
+	bool     rx)
+{
+	struct CPH_PORT_STATE_T port_type[MV_APP_GMAC_PORT_NUM];
+	MV_STATUS        rc  = MV_OK;
+	enum CPH_DIR_E        dir = CPH_DIR_INVALID;
+
+	/* Parse port */
+	if (port > MV_APP_PON_MAC_PORT) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "port[%d] is out of range[0~%d]\n", port, MV_APP_PON_MAC_PORT);
+		return dir;
+	}
+
+	/* Get GMAC WAN/LAN type */
+	rc = cph_app_parse_ge_port_type(&port_type[0]);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "fail to call cph_app_parse_ge_port_type()\n");
+		return dir;
+	}
+
+	/* Rx dir */
+	if (rx == TRUE) {
+		if ((port_type[port].port_type  == MV_APP_PORT_WAN) &&
+		    (port_type[port].port_state == MV_GE_PORT_ACTIVE))
+			dir = CPH_DIR_DS;
+		else if ((port_type[port].port_type  == MV_APP_PORT_LAN) &&
+			(port_type[port].port_state == MV_GE_PORT_ACTIVE))
+			dir = CPH_DIR_US;
+		else {
+			dir = CPH_DIR_INVALID;
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "RX dir[%d] is invalid\n", dir);
+		}
+	} else {/* Tx dir */
+		if ((port_type[port].port_type  == MV_APP_PORT_WAN) &&
+		    (port_type[port].port_state == MV_GE_PORT_ACTIVE))
+			dir = CPH_DIR_US;
+		else if ((port_type[port].port_type  == MV_APP_PORT_LAN) &&
+			(port_type[port].port_state == MV_GE_PORT_ACTIVE))
+			dir = CPH_DIR_DS;
+		else {
+			dir = CPH_DIR_INVALID;
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "TX dir[%d] is invalid\n", dir);
+		}
+	}
+
+	return dir;
+}
+
+/******************************************************************************
+* cph_app_parse_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse application packet to output parse bitmap and value
+*
+* INPUTS:
+*       port       - GE MAC port
+*       skb_data   - Pointer to SKB data holding application packet
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_parse_packet(
+	int                  port,
+	unsigned char                 *skb_data,
+	enum CPH_APP_PARSE_FIELD_E *parse_bm,
+	struct CPH_APP_PARSE_T       *parse_key)
+{
+	unsigned short                  eth_type     = 0;
+	struct ipv6hdr         *p_ipv6_hdr   = NULL;
+	struct ipv6_hopopt_hdr *p_hopopt_hdr = NULL;
+	struct icmp6hdr        *p_icmp_hdr   = NULL;
+	unsigned char                  *p_field      = NULL;
+	MV_STATUS               rc           = MV_OK;
+
+	*parse_bm = 0;
+	memset(parse_key, 0, sizeof(struct CPH_APP_PARSE_T));
+
+	/* Parse dir */
+	parse_key->dir = cph_app_parse_dir(port, TRUE);
+	if (parse_key->dir == CPH_DIR_INVALID) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "dir[%d] is invalid\n", parse_key->dir);
+		return MV_BAD_VALUE;
+	}
+	*parse_bm |= CPH_APP_PARSE_FIELD_DIR;
+
+	/* Parse RX/TX */
+	parse_key->rx_tx = CPH_DIR_RX;
+	*parse_bm |= CPH_APP_PARSE_FIELD_RX_TX;
+
+	/* Parse Marvell header */
+	if (parse_key->dir == CPH_DIR_US)
+		parse_key->mh = (ntohs(*(unsigned short *)skb_data) & MV_VALID_MH_MASK);
+	else
+		parse_key->mh = (ntohs(*(unsigned short *)skb_data) & MV_VALID_GH_MASK);
+
+	*parse_bm |= CPH_APP_PARSE_FIELD_MH;
+
+	/* Parse Eth type */
+	p_field  = skb_data + MV_ETH_MH_SIZE + ETH_ALEN + ETH_ALEN;
+	eth_type = ntohs(*(unsigned short *)p_field);
+	while (eth_type == MV_TPID_8100 || eth_type == MV_TPID_88A8 || eth_type == MV_TPID_9100) {
+		p_field += VLAN_HLEN;
+		eth_type = ntohs(*(unsigned short *)p_field);
+	}
+	parse_key->eth_type = eth_type;
+	*parse_bm |= CPH_APP_PARSE_FIELD_ETH_TYPE;
+
+	/* Parse IPv4 type */
+	if (eth_type == ETH_P_IP) {
+		p_field += MV_CPH_ETH_TYPE_LEN;
+		p_field += MV_IPV4_PROTO_OFFSET;
+		parse_key->ipv4_type = *(unsigned char *)p_field;
+		*parse_bm |= CPH_APP_PARSE_FIELD_IPV4_TYPE;
+	} else if (eth_type == ETH_P_IPV6) {/* Parse IPv6 type */
+		p_ipv6_hdr = (struct ipv6hdr *)(p_field + MV_CPH_ETH_TYPE_LEN);
+		parse_key->ipv6_nh1 = p_ipv6_hdr->nexthdr;
+		*parse_bm |= CPH_APP_PARSE_FIELD_IPV6_NH1;
+
+		if (p_ipv6_hdr->nexthdr != NEXTHDR_HOP)
+			return rc;
+
+		p_hopopt_hdr = (struct ipv6_hopopt_hdr *)((unsigned char *)p_ipv6_hdr + sizeof(struct ipv6hdr));
+
+		parse_key->ipv6_nh2 = p_hopopt_hdr->nexthdr;
+		*parse_bm |= CPH_APP_PARSE_FIELD_IPV6_NH2;
+
+		if (p_hopopt_hdr->nexthdr != IPPROTO_ICMPV6)
+			return rc;
+
+		p_icmp_hdr =  (struct icmp6hdr *)((unsigned char *)p_hopopt_hdr + ipv6_optlen(p_hopopt_hdr));
+
+		switch (p_icmp_hdr->icmp6_type) {
+		case ICMPV6_MGM_QUERY:
+		case ICMPV6_MGM_REPORT:
+		case ICMPV6_MGM_REDUCTION:
+		case ICMPV6_MLD2_REPORT:
+			parse_key->icmpv6_type = MV_ICMPV6_TYPE_MLD;
+			*parse_bm |= CPH_APP_PARSE_FIELD_ICMPV6_TYPE;
+			break;
+		default:
+			break;
+		}
+	} else {/* Parse Ethenet subtype */
+		parse_key->eth_subtype = (*(unsigned char *)(p_field + MV_CPH_ETH_TYPE_LEN));
+		*parse_bm |= CPH_APP_PARSE_FIELD_ETH_SUBTYPE;
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_mod_rx_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify RX application packet
+*
+* INPUTS:
+*       port      - Gmac port the packet from
+*       dev       - Net device
+*       skb       - SKB buffer to receive packet
+*       rx_desc   - RX descriptor
+*       mod_bm    - Modification bitmap
+*       mod_value - Modification value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_mod_rx_packet(
+	int                port,
+	struct net_device   *dev,
+	struct sk_buff      *skb,
+	struct pp2_rx_desc  *rx_desc,
+	enum CPH_APP_MOD_FIELD_E  mod_bm,
+	struct CPH_APP_MOD_T       *mod_value)
+{
+	unsigned char     *p_data = NULL;
+	MV_STATUS  rc     = MV_OK;
+
+	/* Save GMAC Information */
+	if (mod_bm & CPH_APP_RX_MOD_ADD_GMAC) {
+		p_data     = (unsigned char *)skb->data;
+		p_data[0] &= 0x0F;
+		p_data[0] |= ((port & 0x0F) << 4);
+	}
+
+	if (mod_bm & CPH_APP_RX_MOD_STRIP_MH) {
+		skb->data += MV_ETH_MH_SIZE;
+		skb->tail -= MV_ETH_MH_SIZE;
+		skb->len  -= MV_ETH_MH_SIZE;
+	}
+
+	skb->protocol = eth_type_trans(skb, dev);
+	if (mod_bm & CPH_APP_RX_MOD_REPLACE_PROTO_TYPE)
+		skb->protocol = mod_value->proto_type;
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_mod_tx_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify TX application packet
+*
+* INPUTS:
+*       skb         - Pointer to SKB data hoding application packet
+*       tx_spec_out - TX descriptor
+*       mod_bm      - Modification bitmap
+*       mod_value   - Modification value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_mod_tx_packet(
+	struct sk_buff        *skb,
+	struct mv_pp2_tx_spec *tx_spec_out,
+	enum CPH_APP_MOD_FIELD_E    mod_bm,
+	struct CPH_APP_MOD_T         *mod_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	if (mod_bm & CPH_APP_TX_MOD_ADD_MH_BY_DRIVER) {
+		tx_spec_out->flags |= MV_ETH_TX_F_MH;
+		tx_spec_out->tx_mh = MV_16BIT_BE(mod_value->mh);
+	}
+
+	if (mod_bm & CPH_APP_TX_MOD_NO_PAD)
+		tx_spec_out->flags |= MV_ETH_TX_F_NO_PAD;
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_set_frwd()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set packet forwarding information
+*
+* INPUTS:
+*       skb         - Pointer to SKB data hoding application packet
+*       tx_spec_out - TX descriptor
+*       frwd_bm     - Forwarding bitmap
+*       frwd_value  - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_frwd(
+	struct sk_buff        *skb,
+	struct mv_pp2_tx_spec *tx_spec_out,
+	enum CPH_APP_FRWD_FIELD_E   frwd_bm,
+	struct CPH_APP_FRWD_T        *frwd_value)
+{
+	MV_STATUS rc = MV_OK;
+
+	if (frwd_bm & CPH_APP_FRWD_SET_TRG_PORT)
+		tx_spec_out->txp = frwd_value->trg_port;
+
+	if (frwd_bm & CPH_APP_FRWD_SET_TRG_QUEUE)
+		tx_spec_out->txq = frwd_value->trg_queue;
+
+	if (frwd_bm & CPH_APP_FRWD_SET_GEM_PORT)
+		tx_spec_out->hw_cmd[0] = ((frwd_value->gem_port << 8)|0x0010);
+
+	tx_spec_out->tx_func = NULL;
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_app_rx_bc()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received broadcast packets
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       dev     - Net device
+*       pkt     - Marvell packet information
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+int cph_app_rx_bc(int port, struct net_device *dev, struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	struct CPH_FLOW_ENTRY_T      flow_rule;
+	int                 peer_port = 0;
+	int                 rx_size   = 0;
+	int                 offset    = 0;
+	bool                  state     = FALSE;
+	struct sk_buff       *skb_old   = NULL;
+	struct sk_buff       *skb_new   = NULL;
+	MV_STATUS             rc        = MV_OK;
+
+	/* Check whether need to handle broadcast packet */
+	cph_db_get_param(CPH_DB_PARAM_BC_SUPPORT, &state);
+	if (state == FALSE)
+		return 0;
+
+	/* Parse packets */
+	skb_old = skb;
+	skb_new = skb;
+	rc = cph_flow_parse_packet(port, skb_old->data, TRUE, TRUE, &flow_rule);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "fail to call cph_flow_parse_packet, rc<%d>\n", rc);
+		return 0;
+	}
+
+	/* U/S */
+	if (flow_rule.dir == CPH_DIR_US) {
+		/* Forward packet to peer port */
+		rc = cph_app_parse_peer_port(port, &peer_port);
+		if (rc != MV_OK) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "fail to call cph_app_parse_peer_port, rc<%d>\n", rc);
+			return 0;
+		}
+
+		/* Forward packet */
+		if (netif_running(mv_pp2_ports[peer_port]->dev)) {
+			/* Copy a new SKB */
+			skb_old->tail += rx_desc->dataSize;
+			skb_old->len   = rx_desc->dataSize;
+			skb_new = skb_copy(skb_old, GFP_ATOMIC);
+			if (skb_new == NULL) {
+				skb_new = skb_old;
+				goto out;
+			}
+			mv_pp2_ports[peer_port]->dev->netdev_ops->ndo_start_xmit(skb_old, mv_pp2_ports[peer_port]->dev);
+		}
+	}
+out:
+	/* Stripe VLAN tag, then send to Linux network stack */
+	offset         = cph_flow_strip_vlan(TRUE, skb_new->data);
+	skb_new->data += offset;
+	rx_size       -= offset;
+
+	/* Strip MH */
+	skb_new->data += MV_ETH_MH_SIZE;
+	offset        += MV_ETH_MH_SIZE;
+
+	skb_new->tail    -= offset;
+	skb_new->len     -= offset;
+	skb_new->protocol = eth_type_trans(skb_new, dev);
+
+	cph_rec_skb(port, skb_new);
+
+	return 1;
+}
+
+/******************************************************************************
+* cph_app_lookup_profile_id()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup profile ID string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_profile_id(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_profile_id, enum_value);
+}
+
+/******************************************************************************
+* cph_app_lookup_pon_type()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup PON type string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_pon_type(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_pon_type, enum_value);
+}
+
+/******************************************************************************
+* cph_app_lookup_dir()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup direction string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_dir(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_dir, enum_value);
+}
+
+/******************************************************************************
+* cph_app_lookup_rx_tx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup RX/TX direction string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_rx_tx(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_rx_tx, enum_value);
+}
+
+/******************************************************************************
+* cph_app_lookup_gmac()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup GMAC string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_gmac(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_gmac, enum_value);
+}
+
+
+/******************************************************************************
+* cph_app_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initializes CPH application module.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_app_init(void)
+{
+
+	cph_db_init();
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_set_trace_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:sets cph trace flag.
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_trace_flag(unsigned int flag)
+{
+	g_cph_global_trace = flag;
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.h
new file mode 100644
index 000000000000..d4dca2b7ecbb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_app.h
@@ -0,0 +1,766 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_app.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) application module to implement
+*              CPH main logic and handle application packets such as OMCI, eOAM,
+*              IGMP packets.
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_APP_H_
+#define _MV_CPH_APP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/******************************************************************************
+* Type Definition
+******************************************************************************/
+#define MV_VALID_MH_MASK  0x007F  /* Valid MH filed in CPH */
+#define MV_VALID_GH_MASK  0x0FFF  /* Valid MH filed in CPH */
+
+
+/* CPH application packets parsing field definition
+------------------------------------------------------------------------------*/
+enum CPH_APP_PARSE_FIELD_E {
+	CPH_APP_PARSE_FIELD_DIR          = 0x1,
+	CPH_APP_PARSE_FIELD_RX_TX        = 0x2,
+	CPH_APP_PARSE_FIELD_MH           = 0x4,
+	CPH_APP_PARSE_FIELD_ETH_TYPE     = 0x8,
+	CPH_APP_PARSE_FIELD_ETH_SUBTYPE  = 0x10,
+	CPH_APP_PARSE_FIELD_IPV4_TYPE    = 0x20,
+	CPH_APP_PARSE_FIELD_IPV6_NH1     = 0x40,
+	CPH_APP_PARSE_FIELD_IPV6_NH2     = 0x80,
+	CPH_APP_PARSE_FIELD_ICMPV6_TYPE  = 0x100,
+	CPH_APP_PARSE_FIELD_END          = 0x200
+};
+
+enum CPH_DIR_E {
+	CPH_DIR_US       = 0,
+	CPH_DIR_DS       = 1,
+	CPH_DIR_NOT_CARE = 2,
+	CPH_DIR_NUM      = 2,
+	CPH_DIR_INVALID  = 3
+};
+
+enum CPH_RX_TX_E {
+	CPH_DIR_RX         = 0,
+	CPH_DIR_TX         = 1,
+	CPH_RX_TX_NOT_CARE = 2,
+};
+
+struct CPH_APP_PARSE_T {
+	enum CPH_DIR_E   dir;
+	enum CPH_RX_TX_E rx_tx;
+	unsigned short      mh;
+	unsigned short      eth_type;
+	unsigned char       eth_subtype;
+	unsigned char       ipv4_type;
+	unsigned char       ipv6_nh1;
+	unsigned char       ipv6_nh2;
+	unsigned int      icmpv6_type;
+};
+
+/* CPH application packets modification field definition
+------------------------------------------------------------------------------*/
+enum CPH_APP_MOD_FIELD_E {
+	CPH_APP_RX_MOD_ADD_GMAC           = 0x1,
+	CPH_APP_RX_MOD_REPLACE_PROTO_TYPE = 0x2,
+	CPH_APP_RX_MOD_STRIP_MH           = 0x4,
+	CPH_APP_TX_MOD_ADD_MH_BY_DRIVER   = 0x8,
+	CPH_APP_TX_MOD_NO_PAD             = 0x10,
+	CPH_APP_MOD_CHANGE_STATE          = 0x20,
+	CPH_APP_MOD_FIELD_END             = 0x40
+};
+
+struct CPH_APP_MOD_T {
+	unsigned short   proto_type;
+	unsigned short   mh;
+	bool     state;
+};
+
+/* CPH application packets forwarding field definition
+------------------------------------------------------------------------------*/
+enum CPH_APP_FRWD_FIELD_E {
+	CPH_APP_FRWD_SET_TRG_PORT     = 0x1,
+	CPH_APP_FRWD_SET_TRG_QUEUE    = 0x2,
+	CPH_APP_FRWD_SET_GEM_PORT     = 0x4,
+	CPH_APP_FRWD_FIELD_END        = 0x8
+};
+
+struct CPH_APP_FRWD_T {
+	unsigned char   trg_port;
+	unsigned char   trg_queue;
+	unsigned short  gem_port;
+};
+
+/* TPM connection profile definition from "tpm_types.h"
+------------------------------------------------------------------------------*/
+enum tpm_eth_complex_profile_t {
+	TPM_PON_WAN_DUAL_MAC_INT_SWITCH = 1,
+	TPM_PON_WAN_G0_INT_SWITCH,
+	TPM_PON_WAN_G1_LAN_G0_INT_SWITCH,
+	TPM_G0_WAN_G1_INT_SWITCH,
+	TPM_G1_WAN_G0_INT_SWITCH,
+	TPM_PON_G1_WAN_G0_INT_SWITCH,
+	TPM_PON_G0_WAN_G1_INT_SWITCH,
+	TPM_PON_WAN_DUAL_MAC_EXT_SWITCH,
+	TPM_PON_WAN_G1_MNG_EXT_SWITCH,
+	TPM_PON_WAN_G0_SINGLE_PORT,
+	TPM_PON_WAN_G1_SINGLE_PORT,
+	TPM_PON_G1_WAN_G0_SINGLE_PORT,
+	TPM_PON_G0_WAN_G1_SINGLE_PORT,
+	TPM_PON_WAN_G0_G1_LPBK,
+	TPM_PON_WAN_G0_G1_DUAL_LAN
+};
+
+/* PON type definition
+------------------------------------------------------------------------------*/
+enum CPH_PON_TYPE_E {
+	CPH_PON_TYPE_EPON = 0,
+	CPH_PON_TYPE_GPON,
+	CPH_PON_TYPE_GBE,
+	CPH_PON_TYPE_P2P,
+	CPH_PON_TYPE_MAX
+};
+
+
+/* CPH feature flag
+------------------------------------------------------------------------------*/
+enum CPH_APP_FEATURE_E {
+	CPH_APP_FEATURE_APP = 0,
+	CPH_APP_FEATURE_IGMP,
+	CPH_APP_FEATURE_BC,
+	CPH_APP_FEATURE_FLOW,
+	CPH_APP_FEATURE_UDP,
+};
+
+
+/* CPH parse/modification field definition for bc/igmp
+------------------------------------------------------------------------------*/
+struct CPH_PACKET_PARSE_T {
+	enum CPH_DIR_E      dir;
+	unsigned int         src_port;
+	unsigned short         vid;
+	unsigned char          pbits;
+	unsigned char          dscp;
+};
+
+
+struct CPH_PACKET_MOD_T {
+	enum CPH_DIR_E      dir;
+	unsigned int         src_port;
+	unsigned short         vid;
+	unsigned char          pbits;
+	unsigned char          dscp;
+};
+
+/* GMAC port state definition
+------------------------------------------------------------------------------*/
+struct CPH_PORT_STATE_T {
+	enum MV_APP_PORT_TYPE_E   port_type;
+	enum MV_GE_PORT_STATE_E   port_state;
+};
+
+/* Debug related definition
+------------------------------------------------------------------------------*/
+extern unsigned int g_cph_global_trace;
+
+#define CPH_GLOBAL_TRACE g_cph_global_trace
+#define CPH_DEBUG_LEVEL  0x00000001
+#define CPH_INFO_LEVEL   0x00000002
+#define CPH_WARN_LEVEL   0x00000004
+#define CPH_ERR_LEVEL    0x00000008
+
+#define MV_CPH_PRINT(level, format, ...) \
+	{ \
+		if (level & CPH_GLOBAL_TRACE) \
+			pr_info("%s(line:%d) "format, __func__, __LINE__, ##__VA_ARGS__); \
+	}
+
+#define MV_CPH_CLEAN_PRINT(level, format, ...) \
+	{ \
+		if (level & CPH_GLOBAL_TRACE) \
+			pr_info(format, ##__VA_ARGS__); \
+	}
+
+/******************************************************************************
+ * Function Declaration
+ ******************************************************************************/
+/******************************************************************************
+* cph_app_set_complex_profile()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set TPM complex profile ID
+*
+* INPUTS:
+*       profile_id   - TPM complex profile ID
+*       active_port  - Active WAN port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_complex_profile(enum tpm_eth_complex_profile_t profile_id, enum MV_APP_GMAC_PORT_E active_port);
+
+/******************************************************************************
+* cph_app_set_feature_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Enable or disable feature support in CPH
+*
+* INPUTS:
+*       feature - CPH supported features
+*       state   - Enable or disable this feature in CPH
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_feature_flag(enum CPH_APP_FEATURE_E feature, bool state);
+
+/******************************************************************************
+* cph_app_add_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_add_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_app_del_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Del CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_del_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_app_update_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_update_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_app_get_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_get_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_app_get_rule_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_get_rule_by_dir_proto(
+	enum CPH_DIR_E              dir,
+	unsigned short                 proto_type,
+	enum CPH_APP_PARSE_FIELD_E *parse_bm,
+	struct CPH_APP_PARSE_T       *parse_key,
+	enum CPH_APP_MOD_FIELD_E   *mod_bm,
+	struct CPH_APP_MOD_T         *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  *frwd_bm,
+	struct CPH_APP_FRWD_T        *frwd_value);
+
+/******************************************************************************
+* cph_app_increase_counter()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Increase RX counter
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_increase_counter(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_app_increase_counter_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:  Increase RX counter according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_increase_counter_by_dir_proto(
+	enum CPH_DIR_E dir,
+	unsigned short    proto_type);
+
+/******************************************************************************
+* cph_app_parse_ge_port_type()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get GEMAC port type by profile ID
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       port_type   - Modification bitmap
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_parse_ge_port_type(struct CPH_PORT_STATE_T *port_type);
+
+/******************************************************************************
+* cph_app_parse_dir()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse application packet to output parse bitmap and value
+*
+* INPUTS:
+*       port  - GE MAC port
+*       rx    - Whether RX path
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       Return direction.
+*******************************************************************************/
+enum CPH_DIR_E cph_app_parse_dir(
+	int    port,
+	bool     rx);
+
+/******************************************************************************
+* cph_app_parse_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse application packet to output parse bitmap and value
+*
+* INPUTS:
+*       port       - GE MAC port
+*       skb_data   - Pointer to SKB data holding application packet
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_parse_packet(
+	int                  port,
+	unsigned char                 *skb_data,
+	enum CPH_APP_PARSE_FIELD_E *parse_bm,
+	struct CPH_APP_PARSE_T       *parse_key);
+
+/******************************************************************************
+* cph_app_mod_rx_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify RX application packet
+*
+* INPUTS:
+*       port      - Gmac port the packet from
+*       dev       - Net device
+*       skb       - SKB buffer to receive packet
+*       rx_desc   - RX descriptor
+*       mod_bm    - Modification bitmap
+*       mod_value - Modification value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_mod_rx_packet(
+	int                port,
+	struct net_device   *dev,
+	struct sk_buff      *skb,
+	struct pp2_rx_desc  *rx_desc,
+	enum CPH_APP_MOD_FIELD_E  mod_bm,
+	struct CPH_APP_MOD_T       *mod_value);
+
+/******************************************************************************
+* cph_app_mod_tx_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify TX application packet
+*
+* INPUTS:
+*       skb         - Pointer to SKB data hoding application packet
+*       tx_spec_out - TX descriptor
+*       mod_bm      - Modification bitmap
+*       mod_value   - Modification value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_mod_tx_packet(
+	struct sk_buff        *skb,
+	struct mv_pp2_tx_spec *tx_spec_out,
+	enum CPH_APP_MOD_FIELD_E    mod_bm,
+	struct CPH_APP_MOD_T         *mod_value);
+
+/******************************************************************************
+* cph_app_set_frwd()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set packet forwarding information
+*
+* INPUTS:
+*       skb         - Pointer to SKB data hoding application packet
+*       tx_spec_out - TX descriptor
+*       frwd_bm     - Forwarding bitmap
+*       frwd_value  - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_app_set_frwd(
+	struct sk_buff        *skb,
+	struct mv_pp2_tx_spec *tx_spec_out,
+	enum CPH_APP_FRWD_FIELD_E   frwd_bm,
+	struct CPH_APP_FRWD_T        *frwd_value);
+
+/******************************************************************************
+* cph_app_rx_bc()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received broadcast packets
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       dev     - Net device
+*       skb     - Marvell packet information
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+int cph_app_rx_bc(int port, struct net_device *dev, struct sk_buff *skb, struct pp2_rx_desc *rx_desc);
+
+/******************************************************************************
+* cph_app_lookup_profile_id()
+*
+* DESCRIPTION:lookup profile ID string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_profile_id(int enum_value);
+
+/******************************************************************************
+* cph_app_lookup_pon_type()
+*
+* DESCRIPTION:lookup PON type string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_pon_type(int enum_value);
+
+/******************************************************************************
+* cph_app_lookup_dir()
+*
+* DESCRIPTION:lookup direction string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_dir(int enum_value);
+
+/******************************************************************************
+* cph_app_lookup_rx_tx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup RX/TX direction string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_rx_tx(int enum_value);
+
+/******************************************************************************
+* cph_app_lookup_gmac()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup GMAC string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_app_lookup_gmac(int enum_value);
+
+/******************************************************************************
+* cph_app_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initializes CPH application module.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_app_init(void);
+
+/******************************************************************************
+* cph_set_trace_flag()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:sets cph trace flag.
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_set_trace_flag(unsigned int flag);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_APP_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.c
new file mode 100644
index 000000000000..9e15e0735702
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.c
@@ -0,0 +1,1299 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_db.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) data base implementation
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include "mv_cph_header.h"
+/*#include "ezxml.h"*/
+
+
+/******************************************************************************
+* Variable Definition
+******************************************************************************/
+static struct CPH_APP_DB_T g_cph_app_db;
+char *g_cph_xml_cfg_file         = US_CPH_XML_CFG_FILE;
+char *g_onu_profile_xml_cfg_file = US_ONU_PROFILE_XML_CFG_FILE;
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* cph_db_set_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH DB parameter
+*
+* INPUTS:
+*       param   - The parameter type
+*       value   - Parameter value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_set_param(enum CPH_DB_PARAM_E param, void *value)
+{
+	MV_STATUS rc = MV_OK;
+
+	switch (param) {
+	case CPH_DB_PARAM_PROFILE_ID:
+		g_cph_app_db.profile_id   = *(enum tpm_eth_complex_profile_t *)value;
+		break;
+	case CPH_DB_PARAM_ACTIVE_PORT:
+		g_cph_app_db.active_port  = *(enum MV_APP_GMAC_PORT_E *)value;
+		break;
+	case CPH_DB_PARAM_APP_SUPPORT:
+		g_cph_app_db.app_support  = *(bool *)value;
+		break;
+	case CPH_DB_PARAM_IGMP_SUPPORT:
+		g_cph_app_db.igmp_support = *(bool *)value;
+		break;
+	case CPH_DB_PARAM_BC_SUPPORT:
+		g_cph_app_db.bc_support   = *(bool *)value;
+		break;
+	case CPH_DB_PARAM_FLOW_SUPPORT:
+		g_cph_app_db.flow_support = *(bool *)value;
+		break;
+	case CPH_DB_PARAM_UDP_SUPPORT:
+		g_cph_app_db.udp_support  = *(bool *)value;
+		break;
+	case CPH_DB_PARAM_BC_COUNTER:
+		g_cph_app_db.bc_count     = *(unsigned int *)value;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_db_get_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH DB parameter
+*
+* INPUTS:
+*       param   - The parameter type
+*
+* OUTPUTS:
+*       value   - Parameter value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_param(enum CPH_DB_PARAM_E param, void *value)
+{
+	MV_STATUS rc = MV_OK;
+
+	switch (param) {
+	case CPH_DB_PARAM_PROFILE_ID:
+		*(enum tpm_eth_complex_profile_t *)value = g_cph_app_db.profile_id;
+		break;
+	case CPH_DB_PARAM_ACTIVE_PORT:
+		*(enum MV_APP_GMAC_PORT_E *)value = g_cph_app_db.active_port;
+		break;
+	case CPH_DB_PARAM_APP_SUPPORT:
+		*(bool *)value = g_cph_app_db.app_support;
+		break;
+	case CPH_DB_PARAM_IGMP_SUPPORT:
+		*(bool *)value = g_cph_app_db.igmp_support;
+		break;
+	case CPH_DB_PARAM_BC_SUPPORT:
+		*(bool *)value = g_cph_app_db.bc_support;
+		break;
+	case CPH_DB_PARAM_FLOW_SUPPORT:
+		*(bool *)value = g_cph_app_db.flow_support;
+		break;
+	case CPH_DB_PARAM_UDP_SUPPORT:
+		*(bool *)value = g_cph_app_db.udp_support;
+		break;
+	case CPH_DB_PARAM_BC_COUNTER:
+		*(unsigned int *)value = g_cph_app_db.bc_count;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+/******************************************************************************
+* cph_db_compare_rules()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare the parse_bm and parse_key of two rules
+*
+* INPUTS:
+*       parse_bm_1   - Parsing bitmap of first CPH rule
+*       parse_key_1  - Parsing key of first CPH rule
+*       parse_bm_2   - Parsing bitmap of second CPH rule
+*       parse_key_2  - Parsing key of second CPH rule
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_db_compare_rules(
+	enum CPH_APP_PARSE_FIELD_E parse_bm_1,
+	struct CPH_APP_PARSE_T      *parse_key_1,
+	enum CPH_APP_PARSE_FIELD_E parse_bm_2,
+	struct CPH_APP_PARSE_T      *parse_key_2)
+{
+	if (parse_bm_1 == parse_bm_2) {
+		/* Compare direction */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_DIR) {
+			if (parse_key_1->dir != parse_key_2->dir) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old dir[%d], new dir[%d]\n",
+						parse_key_1->dir, parse_key_2->dir);
+				return FALSE;
+			}
+		}
+
+		/* Compare RX direction */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_RX_TX) {
+			if (parse_key_1->rx_tx != parse_key_2->rx_tx) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old rx_tx[%d], new rx_tx[%d]\n",
+						parse_key_1->rx_tx, parse_key_2->rx_tx);
+				return FALSE;
+			}
+		}
+
+		/* Compare Marvell header */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_MH) {
+			if (parse_key_1->mh != parse_key_2->mh) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old mh[%d], new mh[%d]\n",
+						parse_key_1->mh, parse_key_2->mh);
+				return FALSE;
+			}
+		}
+
+		/* Compare Eth type */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_ETH_TYPE) {
+			if (parse_key_1->eth_type != parse_key_2->eth_type) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old eth_type[%d], new eth_type[%d]\n",
+						parse_key_1->eth_type, parse_key_2->eth_type);
+				return FALSE;
+			}
+		}
+
+		/* Compare Eth subtype */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_ETH_SUBTYPE) {
+			if (parse_key_1->eth_subtype != parse_key_2->eth_subtype) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old eth_subtype[%d], new eth_subtype[%d]\n",
+						parse_key_1->eth_subtype, parse_key_2->eth_subtype);
+				return FALSE;
+			}
+		}
+
+		/* Compare IPV4 type */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_IPV4_TYPE) {
+			if (parse_key_1->ipv4_type != parse_key_2->ipv4_type) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old ipv4_type[%d], new ipv4_type[%d]\n",
+						parse_key_1->ipv4_type, parse_key_2->ipv4_type);
+				return FALSE;
+			}
+		}
+
+		/* Compare IPV6 type */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_IPV6_NH1) {
+			if (parse_key_1->ipv6_nh1 != parse_key_2->ipv6_nh1) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old ipv6_nh1[%d], new ipv6_nh1[%d]\n",
+						parse_key_1->ipv6_nh1, parse_key_2->ipv6_nh1);
+				return FALSE;
+			}
+		}
+
+		/* Compare IPv6 NH */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_IPV6_NH2) {
+			if (parse_key_1->ipv6_nh2 != parse_key_2->ipv6_nh2) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old ipv6_nh2[%d], new ipv6_nh2[%d]\n",
+						parse_key_1->ipv6_nh2, parse_key_2->ipv6_nh2);
+				return FALSE;
+			}
+		}
+
+		/* Compare ICMPv6 type */
+		if (parse_bm_1 & CPH_APP_PARSE_FIELD_ICMPV6_TYPE) {
+			if (parse_key_1->icmpv6_type != parse_key_2->icmpv6_type) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Old icmpv6_type[%d], new icmpv6_type[%d]\n",
+						parse_key_1->icmpv6_type, parse_key_2->icmpv6_type);
+				return FALSE;
+			}
+		}
+		return TRUE;
+	} else {
+		return FALSE;
+	}
+}
+
+/******************************************************************************
+* cph_db_compare_rule_and_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare the parse_bm and parse_key of CPH and the packet
+*
+* INPUTS:
+*       parse_bm_rule     - Parsing bitmap of CPH rule
+*       parse_key_rule    - Parsing key of CPH rule
+*       parse_bm_packet   - Parsing bitmap of packet
+*       parse_key_packet  - Parsing key of packet
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_db_compare_rule_and_packet(
+	enum CPH_APP_PARSE_FIELD_E parse_bm_rule,
+	struct CPH_APP_PARSE_T      *parse_key_rule,
+	enum CPH_APP_PARSE_FIELD_E parse_bm_packet,
+	struct CPH_APP_PARSE_T      *parse_key_packet)
+{
+	/* Check direction */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_DIR) {
+		if (parse_key_rule->dir != CPH_DIR_NOT_CARE) {
+			if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_DIR)) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no dir field\n");
+				return FALSE;
+			}
+
+			if (parse_key_rule->dir != parse_key_packet->dir) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+						"Packet dir[%d] is different with rule dir[%d], mis-mathced!\n",
+						parse_key_packet->dir, parse_key_rule->dir);
+				return FALSE;
+			}
+		}
+	}
+
+	/* Check RX/TX direction */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_RX_TX) {
+		if (parse_key_rule->rx_tx != CPH_RX_TX_NOT_CARE) {
+			if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_RX_TX)) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no dir RX/TX field\n");
+				return FALSE;
+			}
+
+			if (parse_key_rule->rx_tx != parse_key_packet->rx_tx) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+					"Packet rx_tx[%d] is different with rule rx_tx[%d], mis-mathced!\n",
+					parse_key_packet->rx_tx, parse_key_rule->rx_tx);
+				return FALSE;
+			}
+		}
+	}
+
+	/* Check Marvell header */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_MH) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_MH)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no MH field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->mh != parse_key_packet->mh) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet mh[0x%x] is different with rule mh[0x%x], mis-mathced!\n",
+					parse_key_packet->mh, parse_key_rule->mh);
+			return FALSE;
+		}
+	}
+
+	/* Check Eth type */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_ETH_TYPE) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_ETH_TYPE)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no eth_type field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->eth_type != parse_key_packet->eth_type) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet eth_type[0x%x] is different with rule eth_type[0x%x], mis-mathced!\n",
+				parse_key_packet->eth_type, parse_key_rule->eth_type);
+			return FALSE;
+		}
+	}
+
+	/* Check Eth subtype */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_ETH_SUBTYPE) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_ETH_SUBTYPE)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no eth_subtype field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->eth_subtype != parse_key_packet->eth_subtype) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet eth_subtype[%d] is different with rule eth_subtype[%d], mis-mathced!\n",
+				parse_key_packet->eth_subtype, parse_key_rule->eth_subtype);
+			return FALSE;
+		}
+	}
+
+	/* Check IPV4 type */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_IPV4_TYPE) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_IPV4_TYPE)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no ipv4_type field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->ipv4_type != parse_key_packet->ipv4_type) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet ipv4_type[%d] is different with rule ipv4_type[%d], mis-mathced!\n",
+				parse_key_packet->ipv4_type, parse_key_rule->ipv4_type);
+			return FALSE;
+		}
+	}
+
+	/* Check IPV6 NH1 */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_IPV6_NH1) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_IPV6_NH1)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no ipv6_nh1 field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->ipv6_nh1 != parse_key_packet->ipv6_nh1) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet ipv6_nh1[%d] is different with rule ipv6_nh1[%d], mis-mathced!\n",
+				parse_key_packet->ipv6_nh1, parse_key_rule->ipv6_nh1);
+			return FALSE;
+		}
+	}
+
+	/* Check IPv6 NH2 */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_IPV6_NH2) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_IPV6_NH2)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no ipv6_nh2 field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->ipv6_nh2 != parse_key_packet->ipv6_nh2) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet ipv6_nh2[%d] is different with rule ipv6_nh2[%d], mis-mathced!\n",
+				parse_key_packet->ipv6_nh2, parse_key_rule->ipv6_nh2);
+			return FALSE;
+		}
+	}
+
+	/* Check ICMPv6 type */
+	if (parse_bm_rule & CPH_APP_PARSE_FIELD_ICMPV6_TYPE) {
+		if (!(parse_bm_packet & CPH_APP_PARSE_FIELD_ICMPV6_TYPE)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet has no icmpv6_type field\n");
+			return FALSE;
+		}
+
+		if (parse_key_rule->icmpv6_type != parse_key_packet->icmpv6_type) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+				"Packet icmpv6_type[%d] is different with rule icmpv6_type[%d], mis-mathced!\n",
+				parse_key_packet->icmpv6_type, parse_key_rule->icmpv6_type);
+			return FALSE;
+		}
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_db_check_duplicate_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Check whether there is duplicate CPH rule w/ same parse bitmap
+*              value
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case has duplicated rule, return TRUE,
+*       In case has not duplicated rule, return FALSE.
+*******************************************************************************/
+bool cph_db_check_duplicate_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	bool             rc          = FALSE;
+
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_db_compare_rules(p_cph_rule->parse_bm, &p_cph_rule->parse_key, parse_bm, parse_key);
+			if (rc == TRUE)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_db_add_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add application type CPH rule to data base
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_add_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	unsigned int           idx        = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule = NULL;
+	bool             rc         = TRUE;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	/* Seach for an free entry */
+	for (idx = 0; idx < CPH_APP_MAX_RULE_NUM; idx++) {
+		if (g_cph_app_db.cph_rule[idx].valid == FALSE)
+			break;
+	}
+
+	/* No free entry */
+	if (idx == CPH_APP_MAX_RULE_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "No free CPH entry\n");
+		spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+		return MV_FULL;
+	}
+
+	/* Do not add new rule if there is already duplicated rule */
+	rc = cph_db_check_duplicate_rule(parse_bm, parse_key);
+	if (rc == TRUE) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "Already has duplicated rule, could not add new CPH rule\n");
+		spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+		return MV_ERROR;
+	}
+
+	/* Save CPH rule for application packet */
+	p_cph_rule = &g_cph_app_db.cph_rule[idx];
+	p_cph_rule->parse_bm = parse_bm;
+	memcpy(&p_cph_rule->parse_key,  parse_key, sizeof(struct CPH_APP_PARSE_T));
+	p_cph_rule->mod_bm   = mod_bm;
+	memcpy(&p_cph_rule->mod_value,  mod_value, sizeof(struct CPH_APP_MOD_T));
+	p_cph_rule->frwd_bm  = frwd_bm;
+	memcpy(&p_cph_rule->frwd_value, frwd_value, sizeof(struct CPH_APP_FRWD_T));
+	p_cph_rule->valid    = TRUE;
+	g_cph_app_db.rule_num++;
+
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_del_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete application type CPH rule from data base
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_del_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	bool             rc          = FALSE;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_db_compare_rules(p_cph_rule->parse_bm, &p_cph_rule->parse_key, parse_bm, parse_key);
+			if (rc == TRUE) {
+				memset(p_cph_rule, 0, sizeof(struct CPH_APP_RULE_T));
+				p_cph_rule->valid = FALSE;
+				g_cph_app_db.rule_num--;
+
+				spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_update_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update application type CPH rule from data base
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_update_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	bool             rc          = FALSE;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_db_compare_rules(p_cph_rule->parse_bm, &p_cph_rule->parse_key, parse_bm, parse_key);
+			if (rc == TRUE) {
+				p_cph_rule->mod_bm   = mod_bm;
+				memcpy(&p_cph_rule->mod_value,  mod_value,  sizeof(struct CPH_APP_MOD_T));
+				p_cph_rule->frwd_bm  = frwd_bm;
+				memcpy(&p_cph_rule->frwd_value, frwd_value, sizeof(struct CPH_APP_FRWD_T));
+				spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_get_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get application type CPH rule from data base
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	bool             rc          = FALSE;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_db_compare_rule_and_packet(p_cph_rule->parse_bm,
+							    &p_cph_rule->parse_key,
+							    parse_bm,
+							    parse_key);
+			if (rc == TRUE) {
+				if (p_cph_rule->mod_value.state == TRUE) {
+					*mod_bm  = p_cph_rule->mod_bm;
+					memcpy(mod_value, &p_cph_rule->mod_value, sizeof(struct CPH_APP_MOD_T));
+					*frwd_bm = p_cph_rule->frwd_bm;
+					memcpy(frwd_value, &p_cph_rule->frwd_value, sizeof(struct CPH_APP_FRWD_T));
+
+					spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+					return MV_OK;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return MV_FAIL;
+}
+
+/******************************************************************************
+* cph_db_get_app_rule_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get application type CPH rule from data base by protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_app_rule_by_dir_proto(
+enum CPH_DIR_E              dir,
+unsigned short                 proto_type,
+enum CPH_APP_PARSE_FIELD_E *parse_bm,
+struct CPH_APP_PARSE_T       *parse_key,
+enum CPH_APP_MOD_FIELD_E   *mod_bm,
+struct CPH_APP_MOD_T         *mod_value,
+enum CPH_APP_FRWD_FIELD_E  *frwd_bm,
+struct CPH_APP_FRWD_T        *frwd_value)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			if ((p_cph_rule->mod_bm & CPH_APP_RX_MOD_REPLACE_PROTO_TYPE) &&
+			   (p_cph_rule->mod_value.proto_type == proto_type) &&
+			   (p_cph_rule->mod_value.state      == TRUE)) {
+				if ((p_cph_rule->parse_bm & CPH_APP_PARSE_FIELD_DIR) &&
+				    ((p_cph_rule->parse_key.dir == CPH_DIR_NOT_CARE) ||
+				     (p_cph_rule->parse_key.dir == dir)) &&
+				     (p_cph_rule->parse_bm & CPH_APP_PARSE_FIELD_RX_TX) &&
+				     (p_cph_rule->parse_key.rx_tx == CPH_DIR_TX)) {
+					*parse_bm = p_cph_rule->parse_bm;
+					memcpy(parse_key, &p_cph_rule->parse_key, sizeof(struct CPH_APP_PARSE_T));
+					*mod_bm   = p_cph_rule->mod_bm;
+					memcpy(mod_value, &p_cph_rule->mod_value, sizeof(struct CPH_APP_MOD_T));
+					*frwd_bm  = p_cph_rule->frwd_bm;
+					memcpy(frwd_value, &p_cph_rule->frwd_value, sizeof(struct CPH_APP_FRWD_T));
+
+					spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+					return MV_OK;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return MV_FAIL;
+}
+
+/******************************************************************************
+* cph_db_increase_counter()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Increase RX counter
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_increase_counter(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	bool             rc          = MV_FAIL;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_db_compare_rule_and_packet(p_cph_rule->parse_bm,
+							    &p_cph_rule->parse_key,
+							    parse_bm, parse_key);
+			if (rc == TRUE) {
+				if (p_cph_rule->mod_value.state == TRUE) {
+					p_cph_rule->count++;
+
+					spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+					return MV_OK;
+				}
+			}
+		}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_db_increase_counter_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:  Increase RX counter according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_increase_counter_by_dir_proto(enum CPH_DIR_E dir,
+	unsigned short    proto_type)
+{
+	unsigned int           idx         = 0;
+	unsigned int           rule_idx    = 0;
+	struct CPH_APP_RULE_T  *p_cph_rule  = NULL;
+	unsigned long    flags;
+
+	spin_lock_irqsave(&g_cph_app_db.app_lock, flags);
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+
+	/* Compare dir and protocol type */
+	if (p_cph_rule->valid == TRUE) {
+		rule_idx++;
+
+		if ((p_cph_rule->mod_bm & CPH_APP_RX_MOD_REPLACE_PROTO_TYPE) &&
+		    (p_cph_rule->mod_value.proto_type == proto_type) &&
+		    (p_cph_rule->mod_value.state      == TRUE)) {
+			if ((p_cph_rule->parse_bm & CPH_APP_PARSE_FIELD_DIR) &&
+			    ((p_cph_rule->parse_key.dir == CPH_DIR_NOT_CARE) ||
+			     (p_cph_rule->parse_key.dir == dir)) &&
+			     (p_cph_rule->parse_bm & CPH_APP_PARSE_FIELD_RX_TX) &&
+			     (p_cph_rule->parse_key.rx_tx == CPH_DIR_TX)) {
+				p_cph_rule->count++;
+
+				spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	}
+	spin_unlock_irqrestore(&g_cph_app_db.app_lock, flags);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_get_xml_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get the XML parameter
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+* COMMENTS: The routine need to finish in A0, it used to parse input file.
+*******************************************************************************/
+MV_STATUS cph_db_get_xml_param(void)
+{
+	int       rc         = MV_OK;
+
+	return rc;
+}
+
+/*******************************************************************************
+**
+** cph_db_get_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function get T-CONT state
+**
+** INPUTS:
+**   tcont - T-CONT
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**   state - State of T-CONT, enabled or disabled.
+**
+*******************************************************************************/
+bool cph_db_get_tcont_state(unsigned int tcont)
+{
+	/* Check tcont */
+	if (tcont >= MV_TCONT_LLID_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "tcont[%d] is illegal, should be less than [%d]\n",
+				tcont, MV_TCONT_LLID_NUM);
+		return FALSE;
+	}
+
+	return g_cph_app_db.tcont_state[tcont];
+}
+
+/*******************************************************************************
+**
+** cph_db_set_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function sets T-CONT state in mv_cph
+**
+** INPUTS:
+**   tcont - T-CONT
+**   state - State of T-CONT, enabled or disabled.
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**  On success, the function returns (MV_OK). On error different types are
+**  returned according to the case.
+**
+*******************************************************************************/
+MV_STATUS cph_db_set_tcont_state(unsigned int tcont, bool state)
+{
+	/* Check tcont */
+	if (tcont >= MV_TCONT_LLID_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "tcont[%d] is illegal, should be less than [%d]\n",
+				tcont, MV_TCONT_LLID_NUM);
+		return MV_FAIL;
+	}
+
+	/* Apply t-cont state to mv_cph */
+	g_cph_app_db.tcont_state[tcont] = state;
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_display_parse_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule parsing field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_parse_field(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key)
+{
+	pr_info("Parse field(0x%x): ", parse_bm);
+
+	/* Print direction */
+	if (parse_bm & CPH_APP_PARSE_FIELD_DIR)
+		pr_info("Dir(%s), ", cph_app_lookup_dir(parse_key->dir));
+
+	/* Print TX/RX direction */
+	if (parse_bm & CPH_APP_PARSE_FIELD_RX_TX)
+		pr_info("RX/TX(%s), ", cph_app_lookup_rx_tx(parse_key->rx_tx));
+
+	/* Print Marvell header */
+	if (parse_bm & CPH_APP_PARSE_FIELD_MH)
+		pr_info("MH(0x%x), ", parse_key->mh);
+
+	/* Print Eth type */
+	if (parse_bm & CPH_APP_PARSE_FIELD_ETH_TYPE)
+		pr_info("Eth type(0x%04x), ", parse_key->eth_type);
+
+	/* Print Eth subtype */
+	if (parse_bm & CPH_APP_PARSE_FIELD_ETH_SUBTYPE)
+		pr_info("Eth subtype(%d), ", parse_key->eth_subtype);
+
+	/* Print IPV4 type */
+	if (parse_bm & CPH_APP_PARSE_FIELD_IPV4_TYPE)
+		pr_info("IPv4 type(%d), ", parse_key->ipv4_type);
+
+	/* Print IPv6 NH1 */
+	if (parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH1)
+		pr_info("IPv6 NH1(%d), ", parse_key->ipv6_nh1);
+
+	/* Print IPv6 NH2 */
+	if (parse_bm & CPH_APP_PARSE_FIELD_IPV6_NH2)
+		pr_info("IPv6 NH2(%d), ", parse_key->ipv6_nh2);
+
+	/* Print ICMPv6 type */
+	if (parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE)
+		pr_info("ICMPv6 type(%d)", parse_key->icmpv6_type);
+
+	pr_info("\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_display_mod_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule modification field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_mod_field(
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value)
+{
+	pr_info("Mod field(0x%x): ", mod_bm);
+
+	/* Print protocol type */
+	if (mod_bm & CPH_APP_RX_MOD_REPLACE_PROTO_TYPE)
+		pr_info("Proto type(0x%x), ", mod_value->proto_type);
+
+	pr_info("state(%s)", (mod_value->state == TRUE) ? "Enabled" : "Disabled");
+
+	pr_info("\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_display_frwd_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule forwarding field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_frwd_field(
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value)
+{
+	pr_info("Forward field(0x%x): ", frwd_bm);
+
+	/* Print target port */
+	if (frwd_bm & CPH_APP_FRWD_SET_TRG_PORT)
+		pr_info("Target port(%d), ", frwd_value->trg_port);
+
+	/* Print target queue */
+	if (frwd_bm & CPH_APP_FRWD_SET_TRG_QUEUE)
+		pr_info("Target queue(%d), ", frwd_value->trg_queue);
+
+	/* Print GEM port */
+	if (frwd_bm & CPH_APP_FRWD_SET_GEM_PORT)
+		pr_info("Gem port(%d)", frwd_value->gem_port);
+
+	pr_info("\n");
+
+	return MV_OK;
+}
+
+
+/******************************************************************************
+* cph_db_display_all()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display all CPH rules in data base
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_all(void)
+{
+	struct CPH_APP_RULE_T *p_cph_rule = NULL;
+	unsigned int          idx        = 0;
+	unsigned int          rule_idx   = 0;
+
+	pr_info("CPH Application Data Base, total rule number[%u]\n", g_cph_app_db.rule_num);
+	pr_info("-------------------------------------------------------\n");
+
+	pr_info("TPM complex profile: %s, active WAN: %s\n",
+		cph_app_lookup_profile_id(g_cph_app_db.profile_id),
+		cph_app_lookup_gmac(g_cph_app_db.active_port));
+
+	pr_info("Generic application handling suppport: %s\n",
+	   (g_cph_app_db.app_support == TRUE) ? "Enabled" : "Disabled");
+
+#ifdef CONFIG_MV_CPH_IGMP_HANDLE
+	pr_info("IGMP/MLD handling suppport: %s\n",
+		(g_cph_app_db.igmp_support == TRUE) ? "Enabled" : "Disabled");
+#endif
+
+#ifdef CONFIG_MV_CPH_BC_HANDLE
+	pr_info("Broadcast handling suppport: %s\n",
+		(g_cph_app_db.bc_support == TRUE) ? "Enabled" : "Disabled");
+#endif
+
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	pr_info("Data flow mapping/modification suppport: %s\n",
+		(g_cph_app_db.flow_support == TRUE) ? "Enabled" : "Disabled");
+#endif
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	pr_info("UDP port mapping suppport: %s\n",
+		(g_cph_app_db.udp_support == TRUE) ? "Enabled" : "Disabled");
+#endif
+
+	pr_info("-------------------------------------------------------\n");
+
+	pr_info("CPH total rule number: %d\n", g_cph_app_db.rule_num);
+
+	for (idx = 0, rule_idx = 0; (idx < CPH_APP_MAX_RULE_NUM) && (rule_idx < g_cph_app_db.rule_num); idx++) {
+		p_cph_rule = &g_cph_app_db.cph_rule[idx];
+		if (p_cph_rule->valid == TRUE) {
+			rule_idx++;
+
+			pr_info("CPH rule: #%d\n", rule_idx);
+			pr_info("-----------------------\n");
+			cph_db_display_parse_field(p_cph_rule->parse_bm, &p_cph_rule->parse_key);
+			cph_db_display_mod_field(p_cph_rule->mod_bm,     &p_cph_rule->mod_value);
+			cph_db_display_frwd_field(p_cph_rule->frwd_bm,   &p_cph_rule->frwd_value);
+			pr_info("Counter: %d\n\n", p_cph_rule->count);
+		}
+	}
+
+	pr_info("Mis-matched or broadcast counter: %d\n", g_cph_app_db.bc_count);
+
+	pr_info("T-CONT State\n");
+	for (idx = 0; idx < MV_TCONT_LLID_NUM; idx++)
+		pr_info("T-CONT[%d]: %s\n", idx, (g_cph_app_db.tcont_state[idx] == TRUE) ? "TRUE" : "FALSE");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_db_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH data base
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_init(void)
+{
+	unsigned int    idx = 0;
+	MV_STATUS rc  = MV_OK;
+
+	memset(&g_cph_app_db, 0, sizeof(g_cph_app_db));
+	for (idx = 0; idx < CPH_APP_MAX_RULE_NUM; idx++)
+		g_cph_app_db.cph_rule[idx].valid = FALSE;
+
+	/* Set the default value */
+	g_cph_app_db.profile_id   = TPM_PON_WAN_DUAL_MAC_INT_SWITCH;
+	g_cph_app_db.active_port  = MV_APP_PON_MAC_PORT;
+	g_cph_app_db.app_support  = TRUE;
+	g_cph_app_db.igmp_support = FALSE;
+	g_cph_app_db.bc_support   = FALSE;
+	g_cph_app_db.flow_support = FALSE;
+	g_cph_app_db.udp_support  = FALSE;
+
+	for (idx = 0; idx < MV_TCONT_LLID_NUM; idx++)
+		g_cph_app_db.tcont_state[idx] = FALSE;
+
+	/* Init spin lock */
+	spin_lock_init(&g_cph_app_db.app_lock);
+
+	return rc;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.h
new file mode 100644
index 000000000000..8068c33b5f88
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_db.h
@@ -0,0 +1,529 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_db.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) data base implementation
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_DB_H_
+#define _MV_CPH_DB_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+ * Data type Definition
+ ******************************************************************************/
+#define US_CPH_XML_CFG_FILE          "/etc/xml_params/cph_xml_cfg_file.xml"
+#define US_ONU_PROFILE_XML_CFG_FILE  "/etc/xml_params/onu_profile_xml_cfg_file.xml"
+
+#define XML_PROFILE_ELM_CAPABILITY   "Capability"
+#define XML_PROFILE_ELM_ATTRIB       "attrib"
+#define XML_PROFILE_ATTR_NAME        "name"
+#define XML_PROFILE_ATTR_VALUE       "value"
+#define XML_PROFILE_NAME_PROFILE     "Complex profile"
+#define XML_PROFILE_NAME_ACTIVE_PORT "Active wan"
+
+#define XML_CPH_ELM_APP_SUPPORT      "app_support"
+#define XML_CPH_ELM_IGMP_SUPPORT     "igmp_support"
+#define XML_CPH_ELM_BC_SUPPORT       "bc_support"
+#define XML_CPH_ELM_FLOW_SUPPORT     "flow_support"
+#define XML_CPH_ELM_UDP_SUPPORT      "udp_support"
+
+
+/* CPH rule definition for application packet handling
+------------------------------------------------------------------------------*/
+struct CPH_APP_RULE_T {
+	bool                  valid;
+	enum CPH_APP_PARSE_FIELD_E parse_bm;
+	struct CPH_APP_PARSE_T       parse_key;
+	enum CPH_APP_MOD_FIELD_E   mod_bm;
+	struct CPH_APP_MOD_T         mod_value;
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm;
+	struct CPH_APP_FRWD_T        frwd_value;
+	unsigned int                count;
+};
+
+/* CPH data base for application packet handling
+------------------------------------------------------------------------------*/
+#define CPH_APP_MAX_RULE_NUM  (64)
+struct CPH_APP_DB_T {
+	enum tpm_eth_complex_profile_t  profile_id;       /* Complex profile ID, see enum tpm_eth_complex_profile_t  */
+	enum MV_APP_GMAC_PORT_E         active_port;      /* Current active WAN GE port, see enum MV_APP_GMAC_PORT_E */
+	bool                       app_support;      /* Whether support generic application handling       */
+	bool                       igmp_support;     /* Whether support IGMP/MLD packet handling           */
+	bool                       bc_support;       /* Whether support U/S broadcast packet handling      */
+	bool                       flow_support;     /* Whether support flow mapping handling in CPH       */
+	bool                       udp_support;      /* Whether support UDP port mapping in CPH            */
+	unsigned int                     rule_num;         /* Current application rule number                    */
+	struct CPH_APP_RULE_T             cph_rule[CPH_APP_MAX_RULE_NUM]; /* CPH application rules                */
+	spinlock_t                 app_lock;         /* Spin lock for application rule operation           */
+	unsigned int                     bc_count;         /* Counter for mis-matched packets, usually is bc     */
+	bool                       tcont_state[MV_TCONT_LLID_NUM];/* T-CONT state used to control SWF      */
+};
+
+/* CPH database parameter enum
+------------------------------------------------------------------------------*/
+enum CPH_DB_PARAM_E {
+	CPH_DB_PARAM_PROFILE_ID = 0,
+	CPH_DB_PARAM_ACTIVE_PORT,
+	CPH_DB_PARAM_APP_SUPPORT,
+	CPH_DB_PARAM_IGMP_SUPPORT,
+	CPH_DB_PARAM_BC_SUPPORT,
+	CPH_DB_PARAM_FLOW_SUPPORT,
+	CPH_DB_PARAM_UDP_SUPPORT,
+	CPH_DB_PARAM_BC_COUNTER
+};
+
+/******************************************************************************
+* Function Declaration
+******************************************************************************/
+/******************************************************************************
+* cph_db_set_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Set CPH DB parameter
+*
+* INPUTS:
+*       param   - The parameter type
+*       value   - Parameter value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_set_param(enum CPH_DB_PARAM_E param, void *value);
+
+/******************************************************************************
+* cph_db_get_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH DB parameter
+*
+* INPUTS:
+*       param   - The parameter type
+*
+* OUTPUTS:
+*       value   - Parameter value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_param(enum CPH_DB_PARAM_E param, void *value);
+
+/******************************************************************************
+* cph_db_add_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_add_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_db_del_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_del_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_db_update_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Update CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_update_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_db_get_app_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH rule
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_app_rule(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key,
+	enum CPH_APP_MOD_FIELD_E  *mod_bm,
+	struct CPH_APP_MOD_T        *mod_value,
+	enum CPH_APP_FRWD_FIELD_E *frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+/******************************************************************************
+* cph_db_get_app_rule_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get application type CPH rule from data base by protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*       mod_bm     - Modification bitmap
+*       mod_value  - Modification value
+*       frwd_bm    - Forwarding bitmap
+*       frwd_value - Forwarding value
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_app_rule_by_dir_proto(
+	enum CPH_DIR_E              dir,
+	unsigned short                 proto_type,
+	enum CPH_APP_PARSE_FIELD_E *parse_bm,
+	struct CPH_APP_PARSE_T       *parse_key,
+	enum CPH_APP_MOD_FIELD_E   *mod_bm,
+	struct CPH_APP_MOD_T         *mod_value,
+	enum CPH_APP_FRWD_FIELD_E  *frwd_bm,
+	struct CPH_APP_FRWD_T        *frwd_value);
+
+/******************************************************************************
+* cph_db_increase_counter()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Increase RX counter
+*
+* INPUTS:
+*       parse_bm   - Parsing bitmap
+*       parse_key  - Parsing key
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_increase_counter(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_db_increase_counter_by_dir_proto()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:  Increase RX counter according to protocol type
+*
+* INPUTS:
+*       dir        - Direction
+*       proto_type - SKB protocol type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_increase_counter_by_dir_proto(enum CPH_DIR_E dir,
+	unsigned short    proto_type);
+
+/******************************************************************************
+* cph_db_get_xml_param()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get the XML parameter
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_get_xml_param(void);
+
+/*******************************************************************************
+**
+** cph_db_get_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function get T-CONT state
+**
+** INPUTS:
+**   tcont - T-CONT
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**   state - State of T-CONT, enabled or disabled.
+**
+*******************************************************************************/
+bool cph_db_get_tcont_state(unsigned int tcont);
+
+/*******************************************************************************
+**
+** cph_db_set_tcont_state
+** ___________________________________________________________________________
+**
+** DESCRIPTION: The function sets T-CONT state in mv_cph
+**
+** INPUTS:
+**   tcont - T-CONT
+**   state - State of T-CONT, enabled or disabled.
+**
+** OUTPUTS:
+**   None.
+**
+** RETURNS:
+**  On success, the function returns (MV_OK). On error different types are
+**  returned according to the case.
+**
+*******************************************************************************/
+MV_STATUS cph_db_set_tcont_state(unsigned int tcont, bool state);
+
+/******************************************************************************
+* cph_db_display_parse_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule parsing field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_parse_field(
+	enum CPH_APP_PARSE_FIELD_E parse_bm,
+	struct CPH_APP_PARSE_T      *parse_key);
+
+/******************************************************************************
+* cph_db_display_mod_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule modification field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_mod_field(
+	enum CPH_APP_MOD_FIELD_E   mod_bm,
+	struct CPH_APP_MOD_T        *mod_value);
+
+/******************************************************************************
+* cph_db_display_frwd_field()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH rule forwarding field
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_frwd_field(
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm,
+	struct CPH_APP_FRWD_T       *frwd_value);
+
+
+/******************************************************************************
+* cph_db_display_all()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display CPH data base
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_display_all(void);
+
+/******************************************************************************
+* cph_db_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH data base
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_db_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_DB_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.c
new file mode 100644
index 000000000000..4aeedff9540e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.c
@@ -0,0 +1,446 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+		notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_dev.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) char device definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/clk.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include "mv_cph_header.h"
+
+/* Used to prevent multiple access to device */
+static int               g_cph_device_open;
+static struct miscdevice g_cph_misc_dev;
+
+/******************************************************************************
+* cph_dev_open()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: The function executes device open actions
+*
+* INPUTS:
+*       inode - Device inode pointer.
+*       file  - File handler.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+static int cph_dev_open(struct inode *inode, struct file *file)
+{
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+#if 0
+	if (g_cph_device_open > 0)
+		return -EBUSY;
+#endif
+
+	g_cph_device_open++;
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_dev_release()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: The function executes device release actions
+*
+* INPUTS:
+*       inode - Device inode pointer.
+*       file  - File handler.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+static int cph_dev_release(struct inode *inode, struct file *file)
+{
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+#if 0
+	if (cph_device_open > 0)
+		cph_device_open--;
+#endif
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_dev_ioctl()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: The function executes ioctl commands
+*
+* INPUTS:
+*       inode - Device inode pointer.
+*       file  - File handler.
+*       cmd   - Command.
+*       arg   - Ponter to arg.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+long cph_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct CPH_IOCTL_APP_RULE_T    cph_app_rule;
+	struct CPH_IOCTL_FLOW_MAP_T    cph_flow_map;
+	struct CPH_IOCTL_DSCP_MAP_T    cph_dscp_map;
+	struct CPH_IOCTL_MISC_T        cph_misc;
+	struct CPH_IOCTL_TCONT_STATE_T cph_tcont;
+	int                   rc  = -EINVAL;
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+	switch (cmd) {
+	/* CPH application packet handling IOCTL
+	-------------------------------------------*/
+	case MV_CPH_IOCTL_SET_COMPLEX_PROFILE:
+		if (copy_from_user(&cph_misc, (struct CPH_IOCTL_MISC_T *)arg, sizeof(struct CPH_IOCTL_MISC_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_set_complex_profile(cph_misc.profile_id, cph_misc.active_port);
+		break;
+
+	case MV_CPH_IOCTL_SET_FEATURE_FLAG:
+		if (copy_from_user(&cph_misc, (struct CPH_IOCTL_MISC_T *)arg, sizeof(struct CPH_IOCTL_MISC_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_set_feature_flag(cph_misc.feature_type, cph_misc.feature_flag);
+		break;
+
+	case MV_CPH_IOCTL_APP_ADD_RULE:
+		if (copy_from_user(&cph_app_rule, (struct CPH_IOCTL_APP_RULE_T *)arg,
+			sizeof(struct CPH_IOCTL_APP_RULE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_add_app_rule(cph_app_rule.parse_bm, &cph_app_rule.parse_key, cph_app_rule.mod_bm,
+					&cph_app_rule.mod_value, cph_app_rule.frwd_bm, &cph_app_rule.frwd_value);
+		break;
+
+	case MV_CPH_IOCTL_APP_DEL_RULE:
+		if (copy_from_user(&cph_app_rule, (struct CPH_IOCTL_APP_RULE_T *)arg,
+			sizeof(struct CPH_IOCTL_APP_RULE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_del_app_rule(cph_app_rule.parse_bm, &cph_app_rule.parse_key);
+		break;
+
+	case MV_CPH_IOCTL_APP_UPDATE_RULE:
+		if (copy_from_user(&cph_app_rule, (struct CPH_IOCTL_APP_RULE_T *)arg,
+			sizeof(struct CPH_IOCTL_APP_RULE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_update_app_rule(cph_app_rule.parse_bm, &cph_app_rule.parse_key, cph_app_rule.mod_bm,
+					&cph_app_rule.mod_value, cph_app_rule.frwd_bm, &cph_app_rule.frwd_value);
+		break;
+
+	case MV_CPH_IOCTL_APP_GET_RULE:
+		if (copy_from_user(&cph_app_rule, (struct CPH_IOCTL_APP_RULE_T *)arg,
+			sizeof(struct CPH_IOCTL_APP_RULE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_get_app_rule(cph_app_rule.parse_bm, &cph_app_rule.parse_key, &cph_app_rule.mod_bm,
+					&cph_app_rule.mod_value, &cph_app_rule.frwd_bm, &cph_app_rule.frwd_value);
+
+		if (rc != MV_OK)
+			goto ioctl_err;
+
+		if (copy_to_user((struct CPH_IOCTL_APP_RULE_T *)arg, &cph_app_rule,
+			sizeof(struct CPH_IOCTL_APP_RULE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_to_user failed\n");
+			goto ioctl_err;
+		}
+		break;
+
+	/* CPH flow mapping IOCTL
+	-------------------------------------------*/
+	case MV_CPH_IOCTL_FLOW_ADD_RULE:
+		if (copy_from_user(&cph_flow_map, (struct CPH_IOCTL_FLOW_MAP_T *)arg,
+			sizeof(struct CPH_IOCTL_FLOW_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_add_flow_rule(&cph_flow_map.flow_map);
+		break;
+
+	case MV_CPH_IOCTL_FLOW_DEL_RULE:
+		if (copy_from_user(&cph_flow_map, (struct CPH_IOCTL_FLOW_MAP_T *)arg,
+			sizeof(struct CPH_IOCTL_FLOW_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_del_flow_rule(&cph_flow_map.flow_map);
+		break;
+
+	case MV_CPH_IOCTL_FLOW_GET_RULE:
+		if (copy_from_user(&cph_flow_map, (struct CPH_IOCTL_FLOW_MAP_T *)arg,
+			sizeof(struct CPH_IOCTL_FLOW_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_get_flow_rule(&cph_flow_map.flow_map);
+
+		if (rc != MV_OK)
+			goto ioctl_err;
+
+		if (copy_to_user((struct CPH_IOCTL_FLOW_MAP_T *)arg, &cph_flow_map,
+			sizeof(struct CPH_IOCTL_FLOW_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_to_user failed\n");
+			goto ioctl_err;
+		}
+		break;
+
+	case MV_CPH_IOCTL_FLOW_CLEAR_RULE:
+		rc = cph_clear_flow_rule();
+		break;
+
+	case MV_CPH_IOCTL_FLOW_CLEAR_RULE_BY_MH:
+		if (copy_from_user(&cph_flow_map, (struct CPH_IOCTL_FLOW_MAP_T *)arg,
+			sizeof(struct CPH_IOCTL_FLOW_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_clear_flow_rule_by_mh(cph_flow_map.flow_map.mh);
+		break;
+
+	case MV_CPH_IOCTL_FLOW_SET_DSCP_MAP:
+		if (copy_from_user(&cph_dscp_map, (struct CPH_IOCTL_DSCP_MAP_T *)arg,
+			sizeof(struct CPH_IOCTL_DSCP_MAP_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_set_flow_dscp_map(&cph_dscp_map.dscp_map);
+		break;
+
+	case MV_CPH_IOCTL_FLOW_DEL_DSCP_MAP:
+		rc = cph_del_flow_dscp_map();
+		break;
+
+	case MV_CPH_IOCTL_SET_TCONT_LLID_STATE:
+		if (copy_from_user(&cph_tcont, (unsigned int *)arg, sizeof(struct CPH_IOCTL_TCONT_STATE_T))) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "copy_from_user failed\n");
+			goto ioctl_err;
+		}
+
+		rc = cph_set_tcont_state(cph_tcont.tcont, cph_tcont.state);
+		break;
+
+	case MV_CPH_IOCTL_SETUP:
+		rc = cph_dev_setup();
+		break;
+
+	default:
+		rc = -EINVAL;
+	}
+
+ioctl_err:
+	return rc;
+}
+
+
+static const struct file_operations g_cph_dev_fops = {
+	.open			= cph_dev_open,
+	.release		= cph_dev_release,
+	.unlocked_ioctl	= cph_dev_ioctl,
+};
+
+/******************************************************************************
+* cph_dev_setup()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Setup device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_dev_setup(void)
+{
+	MV_STATUS rc  = MV_OK;
+
+	/* Get parameter from XML file */
+	rc = cph_db_get_xml_param();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_db_get_xml_param");
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_dev_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_dev_init(void)
+{
+	MV_STATUS rc = MV_OK;
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+	g_cph_misc_dev.minor = MISC_DYNAMIC_MINOR;
+	g_cph_misc_dev.name  = MV_CPH_DEVICE_NAME;
+	g_cph_misc_dev.fops  = &g_cph_dev_fops;
+
+	rc = misc_register(&g_cph_misc_dev);
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call misc_register");
+
+	rc = cph_netdev_init();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_netdev_init");
+
+	rc = cph_sysfs_init();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "fail to call cph_sysfs_init");
+
+	pr_info("CPH: misc device %s registered with minor: %d\n", MV_CPH_DEVICE_NAME, g_cph_misc_dev.minor);
+	return rc;
+}
+
+/******************************************************************************
+* cph_dev_shutdown()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Shutdown CPH device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+void cph_dev_shutdown(void)
+{
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+	cph_sysfs_exit();
+
+	misc_deregister(&g_cph_misc_dev);
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.h
new file mode 100644
index 000000000000..9b1371a2b08c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_dev.h
@@ -0,0 +1,151 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_dev.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) char device definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_DEV_H_
+#define _MV_CPH_DEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define MV_CPH_DEVICE_NAME  "cph"
+#define MV_CPH_IOCTL_MAGIC  ('C')
+
+/******************************************************************************
+* Function Declaration
+******************************************************************************/
+/******************************************************************************
+* cph_dev_setup()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Setup device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_dev_setup(void);
+
+/******************************************************************************
+* cph_dev_shutdown()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_dev_init(void);
+
+/******************************************************************************
+* cph_dev_shutdown()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Shutdown CPH device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+void cph_dev_shutdown(void);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_DEV_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.c
new file mode 100644
index 000000000000..c54fd35e5dad
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.c
@@ -0,0 +1,2911 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_flow.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) flow module to handle the
+*              flow mapping, VLAN modification of data traffic
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 12Dec2011
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mv_cph_header.h"
+
+/******************************************************************************
+*                           Global Data Definitions
+******************************************************************************/
+static struct CPH_FLOW_DB_T gs_cph_flow_db;
+static struct CPH_FLOW_TABLE_T gs_mc_flow_tbl;
+
+static struct MV_ENUM_ENTRY_T g_enum_map_op_type[] = {
+	{ CPH_VLAN_OP_ASIS,                              "ASIS"},
+	{ CPH_VLAN_OP_DISCARD,                           "DISCARD"},
+	{ CPH_VLAN_OP_ADD,                               "ADD"},
+	{ CPH_VLAN_OP_ADD_COPY_DSCP,                     "ADD_COPY_DSCP"},
+	{ CPH_VLAN_OP_ADD_COPY_OUTER_PBIT,               "ADD_COPY_OUTER_PBIT"},
+	{ CPH_VLAN_OP_ADD_COPY_INNER_PBIT,               "ADD_COPY_INNER_PBIT"},
+	{ CPH_VLAN_OP_ADD_2_TAGS,                        "ADD_2_TAGS"},
+	{ CPH_VLAN_OP_ADD_2_TAGS_COPY_DSCP,              "ADD_2_TAGS_COPY_DSCP"},
+	{ CPH_VLAN_OP_ADD_2_TAGS_COPY_PBIT,              "ADD_2_TAGS_COPY_PBIT"},
+	{ CPH_VLAN_OP_REM,                               "REM"},
+	{ CPH_VLAN_OP_REM_2_TAGS,                        "REM_2_TAGS"},
+	{ CPH_VLAN_OP_REPLACE,                           "REPLACE"},
+	{ CPH_VLAN_OP_REPLACE_VID,                       "REPLACE_VID"},
+	{ CPH_VLAN_OP_REPLACE_PBIT,                      "REPLACE_PBIT"},
+	{ CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER,           "REPLACE_INNER_ADD_OUTER"},
+	{ CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER_COPY_PBIT, "REPLACE_INNER_ADD_OUTER_COPY_PBIT"},
+	{ CPH_VLAN_OP_REPLACE_INNER_REM_OUTER,           "REPLACE_INNER_REM_OUTER"},
+	{ CPH_VLAN_OP_REPLACE_2TAGS,                     "REPLACE_2TAGS"},
+	{ CPH_VLAN_OP_REPLACE_2TAGS_VID,                 "REPLACE_2TAGS_VID"},
+	{ CPH_VLAN_OP_SWAP,                              "SWAP"}
+};
+
+static struct MV_ENUM_ARRAY_T g_enum_array_op_type = {
+	sizeof(g_enum_map_op_type)/sizeof(g_enum_map_op_type[0]),
+	g_enum_map_op_type
+};
+
+/******************************************************************************
+*                           External Declarations
+******************************************************************************/
+
+
+
+/******************************************************************************
+*                           Function Definitions
+******************************************************************************/
+/******************************************************************************
+* cph_flow_db_get_mc_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH flow mapping multicast rule.
+*
+* INPUTS:
+*       mc_flow    - MC flow parsing field values
+*       for_packet - Whether get rule for packet or for new CPH rule
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_get_mc_rule(struct CPH_FLOW_ENTRY_T *mc_flow, bool for_packet)
+{
+	unsigned int            idx         = 0;
+	unsigned int            rule_idx    = 0;
+	struct CPH_FLOW_ENTRY_T *p_flow_rule = NULL;
+	struct CPH_FLOW_DB_T    *p_cph_db    = NULL;
+	struct CPH_FLOW_TABLE_T *p_mc_tbl    = NULL;
+	bool              rc          = FALSE;
+	unsigned long     flags;
+
+	CPH_IF_NULL(mc_flow);
+
+	p_cph_db = &gs_cph_flow_db;
+	p_mc_tbl = &gs_mc_flow_tbl;
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Traverse CPH flow rule table */
+	for (idx = 0, rule_idx = 0; (idx < CPH_FLOW_ENTRY_NUM) && (rule_idx < p_mc_tbl->rule_num); idx++) {
+		p_flow_rule = &p_mc_tbl->flow_rule[idx];
+
+		/* Compare packet or new rule rule data base rule */
+		if (p_flow_rule->valid == TRUE) {
+			rule_idx++;
+
+			if (for_packet == TRUE)
+				rc = cph_flow_compare_packet_and_rule(mc_flow, p_flow_rule);
+			else
+				rc = cph_flow_compare_rules(mc_flow, p_flow_rule);
+
+			if (rc == TRUE) {
+				mc_flow->op_type = p_flow_rule->op_type;
+				memcpy(&mc_flow->mod_outer_tci, &p_flow_rule->mod_outer_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&mc_flow->mod_inner_tci, &p_flow_rule->mod_inner_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&mc_flow->pkt_frwd,      &p_flow_rule->pkt_frwd,
+					sizeof(struct CPH_FLOW_FRWD_T));
+
+				/* Increase count */
+				if (for_packet == TRUE) {
+					if (p_flow_rule->count == 0xFFFFFFFF)
+						p_flow_rule->count = 0;
+					else
+						p_flow_rule->count++;
+				}
+
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return MV_FAIL;
+}
+/******************************************************************************
+* cph_flow_db_add_mc_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add flow rule to database
+*
+* INPUTS:
+*       mc_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_db_add_mc_rule(struct CPH_FLOW_ENTRY_T *mc_flow)
+{
+	unsigned int            idx         = 0;
+	struct CPH_FLOW_ENTRY_T *p_flow_rule = NULL;
+	struct CPH_FLOW_DB_T    *p_cph_db    = NULL;
+	struct CPH_FLOW_TABLE_T *p_mc_tbl    = NULL;
+	bool              rc          = MV_OK;
+	unsigned long     flags;
+
+	CPH_IF_NULL(mc_flow);
+
+	p_cph_db = &gs_cph_flow_db;
+	p_mc_tbl = &gs_mc_flow_tbl;
+
+	/* If the flow table is full */
+	if (p_mc_tbl->rule_num >= CPH_FLOW_ENTRY_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), flow rule table is full<%d>\n", __func__, p_mc_tbl->rule_num);
+		return MV_FULL;
+	}
+
+	/* Check if there is already conflicted rules */
+	if (cph_flow_db_get_mc_rule(mc_flow, FALSE) == MV_OK) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s(), already has conflict flow rule\n", __func__);
+		return MV_OK;
+	}
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Traverse CPH flow rule tale */
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++) {
+		p_flow_rule = &p_mc_tbl->flow_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == FALSE)
+			break;
+	}
+
+	if (idx == CPH_FLOW_ENTRY_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), flow rule table is full<%d>\n", __func__, p_mc_tbl->rule_num);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_FULL;
+	}
+
+	/* Save to db */
+	memcpy(p_flow_rule, mc_flow, sizeof(struct CPH_FLOW_ENTRY_T));
+	p_flow_rule->valid = TRUE;
+	p_flow_rule->count = 0;
+	p_mc_tbl->rule_num++;
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_db_del_mc_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete CPH flow mapping rule.
+*
+* INPUTS:
+*       mc_flow - Flow parsing field values
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_del_mc_rule(struct CPH_FLOW_ENTRY_T *mc_flow)
+{
+	unsigned int            idx         = 0;
+	unsigned int            rule_idx    = 0;
+	struct CPH_FLOW_ENTRY_T *p_flow_rule = NULL;
+	struct CPH_FLOW_DB_T    *p_cph_db    = NULL;
+	struct CPH_FLOW_TABLE_T *p_mc_tbl    = NULL;
+	bool              rc          = MV_OK;
+	unsigned long     flags;
+
+	CPH_IF_NULL(mc_flow);
+
+	p_cph_db = &gs_cph_flow_db;
+	p_mc_tbl = &gs_mc_flow_tbl;
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Traverse CPH flow rule tale */
+	for (idx = 0, rule_idx = 0; (idx < CPH_FLOW_ENTRY_NUM) && (rule_idx < p_mc_tbl->rule_num); idx++) {
+		p_flow_rule = &p_mc_tbl->flow_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == TRUE) {
+			rule_idx++;
+
+			rc = cph_flow_compare_rules(mc_flow, p_flow_rule);
+			if (rc == TRUE) {
+				memset(p_flow_rule, 0, sizeof(struct CPH_FLOW_ENTRY_T));
+				p_flow_rule->valid = FALSE;
+				p_mc_tbl->rule_num--;
+
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_get_vid_pbit()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get external VID and P-bits.
+*
+* INPUTS:
+*       flow       - Flow parsing field values
+*
+* OUTPUTS:
+*       vid  - external VLAN ID.
+*       pbit - external P-bits
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_get_vid_pbit(struct CPH_FLOW_ENTRY_T *flow, unsigned short *vid, unsigned char *pbit)
+{
+	MV_STATUS rc = MV_OK;
+
+	CPH_IF_NULL(flow);
+	CPH_IF_NULL(vid);
+	CPH_IF_NULL(pbit);
+
+	*vid = flow->parse_outer_tci.vid;
+	*pbit = flow->parse_outer_tci.pbits;
+	if (flow->is_default == FALSE) {
+		if (!(flow->parse_bm & (CPH_FLOW_PARSE_EXT_VLAN|CPH_FLOW_PARSE_TWO_VLAN))) {
+			*vid   = MV_CPH_DEFAULT_UNTAG_RULE;
+			*pbit  = MV_CPH_PBITS_NOT_CARE_VALUE;
+		}
+	} else {
+		if (flow->parse_bm & CPH_FLOW_PARSE_EXT_VLAN) {
+			if ((flow->parse_outer_tci.vid == MV_CPH_ZERO_VALUE) ||
+			    (flow->parse_outer_tci.vid == MV_CPH_VID_NOT_CARE_VALUE))
+				*vid = MV_CPH_DEFAULT_SINGLE_TAG_RULE;
+
+			if ((flow->parse_outer_tci.pbits == MV_CPH_ZERO_VALUE) ||
+			    (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE))
+				*pbit = MV_CPH_PBITS_NOT_CARE_VALUE;
+		} else if (flow->parse_bm & CPH_FLOW_PARSE_TWO_VLAN) {
+			if ((flow->parse_outer_tci.vid == MV_CPH_ZERO_VALUE) ||
+			    (flow->parse_outer_tci.vid == MV_CPH_VID_NOT_CARE_VALUE)) {
+				*vid = MV_CPH_DEFAULT_DOUBLE_TAG_RULE;
+			}
+			if ((flow->parse_outer_tci.pbits == MV_CPH_ZERO_VALUE) ||
+			    (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE)) {
+				*pbit = MV_CPH_PBITS_NOT_CARE_VALUE;
+			}
+		} else {
+			*vid = MV_CPH_DEFAULT_UNTAG_RULE;
+			*pbit  = MV_CPH_PBITS_NOT_CARE_VALUE;
+		}
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_db_get_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH flow mapping rule.
+*
+* INPUTS:
+*       cph_flow   - Flow parsing field values
+*       for_packet - Whether get rule for packet or for new CPH rule
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_get_rule(struct CPH_FLOW_ENTRY_T *cph_flow, bool for_packet)
+{
+	unsigned short             vid;
+	unsigned char              pbit;
+	unsigned int             idx           = 0;
+	unsigned char             *p_vid_entry   = NULL;
+	struct CPH_PBITS_TABLE_T *p_pbit_tbl    = NULL;
+	struct CPH_PBITS_ENTRY_T *p_pbit_entry  = NULL;
+	struct CPH_FLOW_ENTRY_T  *p_flow_rule   = NULL;
+	struct CPH_FLOW_DB_T     *p_cph_db      = NULL;
+	bool               rc            = MV_OK;
+	unsigned long      flags;
+
+	CPH_IF_NULL(cph_flow);
+	if (cph_flow->parse_bm & CPH_FLOW_PARSE_MC_PROTO) {
+		rc = cph_flow_db_get_mc_rule(cph_flow, for_packet);
+		if (rc != MV_OK) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s(%d), failed to get MC rule\n", __func__, __LINE__);
+			return rc;
+		}
+		return rc;
+	}
+
+	CPH_POS_RANGE_VALIDATE(cph_flow->dir, CPH_DIR_DS, "DIR not allowed");
+
+	/* get VLAN ID and P-bits from cph rule */
+	rc = cph_flow_get_vid_pbit(cph_flow, &vid, &pbit);
+	CPH_IF_ERROR(rc, "fail to get VID and P-bits\n");
+
+	CPH_POS_RANGE_VALIDATE(vid, MV_CPH_VID_INDEX_TABLE_MAX_SIZE - 1, "illegal VID");
+	CPH_POS_RANGE_VALIDATE(pbit, MV_CPH_PBITS_MAP_MAX_ENTRY_NUM - 1, "illegal pbits");
+
+	p_cph_db = &gs_cph_flow_db;
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Find VID index entry by VID */
+	p_vid_entry = &p_cph_db->vid_idx_tbl[cph_flow->dir].pbit_tbl_idx[vid];
+
+	/* Get P-bits mapping table */
+	if ((*p_vid_entry == MV_CPH_PBITS_TABLE_INVALID_INDEX) ||
+	    (*p_vid_entry >= MV_CPH_MAX_PBITS_MAP_TABLE_SIZE)) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Pbit table index(%d) is invalid\n", *p_vid_entry);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_NO_SUCH;
+	}
+
+	p_pbit_tbl = &p_cph_db->pbits_tbl[cph_flow->dir][*p_vid_entry];
+
+	/* Save forwarding information */
+	if (cph_flow->is_default == TRUE)
+		p_pbit_entry = &p_pbit_tbl->def_flow_rule[pbit];
+	else
+		p_pbit_entry = &p_pbit_tbl->flow_rule[pbit];
+
+	if (p_pbit_entry->num > MV_CPH_RULE_NUM_PER_ENTRY) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid P-bit entry number(%d)\n", p_pbit_entry->num);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_BAD_VALUE;
+	}
+
+	/* Traverse CPH flow rule table */
+	for (idx = 0; idx < p_pbit_entry->num; idx++) {
+		if ((p_pbit_entry->rule_idx[idx] >= CPH_FLOW_ENTRY_NUM) ||
+		    (p_pbit_entry->rule_idx[idx] < 1)) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid rule index(%d)\n", p_pbit_entry->rule_idx[idx]);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_BAD_VALUE;
+		}
+
+		p_flow_rule = &p_cph_db->flow_tbl.flow_rule[p_pbit_entry->rule_idx[idx]];
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == TRUE) {
+			if (for_packet == TRUE)
+				rc = cph_flow_compare_packet_and_rule(cph_flow, p_flow_rule);
+			else
+				rc = cph_flow_compare_rules(cph_flow, p_flow_rule);
+
+			if (rc == TRUE) {
+				cph_flow->op_type = p_flow_rule->op_type;
+				memcpy(&cph_flow->mod_outer_tci, &p_flow_rule->mod_outer_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->mod_inner_tci, &p_flow_rule->mod_inner_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->pkt_frwd,      &p_flow_rule->pkt_frwd,
+					sizeof(struct CPH_FLOW_FRWD_T));
+
+				/* Increase count */
+				if (for_packet == TRUE) {
+					if (p_flow_rule->count == 0xFFFFFFFF)
+						p_flow_rule->count = 0;
+					else
+						p_flow_rule->count++;
+				}
+
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+
+	/* traverse CPH flow rule rules which does not care about P-bits */
+	if (MV_CPH_PBITS_NOT_CARE_VALUE != pbit) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Search P-bits not care rules, vlan(%d), p-bits(%d), default(%d)\n",
+				vid, pbit, cph_flow->is_default);
+		if (cph_flow->is_default == TRUE)
+			p_pbit_entry = &p_pbit_tbl->def_flow_rule[MV_CPH_PBITS_NOT_CARE_VALUE];
+		else
+			p_pbit_entry = &p_pbit_tbl->flow_rule[MV_CPH_PBITS_NOT_CARE_VALUE];
+
+		if (p_pbit_entry->num > MV_CPH_RULE_NUM_PER_ENTRY) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid P-bit entry number(%d)\n", p_pbit_entry->num);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_BAD_VALUE;
+		}
+
+		/* Traverse CPH flow rule table */
+		for (idx = 0; idx < p_pbit_entry->num; idx++) {
+			if ((p_pbit_entry->rule_idx[idx] >= CPH_FLOW_ENTRY_NUM) ||
+			    (p_pbit_entry->rule_idx[idx] < 1)) {
+				MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid rule index(%d)\n", p_pbit_entry->rule_idx[idx]);
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_BAD_VALUE;
+			}
+
+			p_flow_rule = &p_cph_db->flow_tbl.flow_rule[p_pbit_entry->rule_idx[idx]];
+			/* Compare parse_bm and parse_key */
+			if (p_flow_rule->valid != TRUE)
+				continue;
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Search P-bits not care rules, for_packet(%d)\n", for_packet);
+
+			if (for_packet == TRUE)
+				rc = cph_flow_compare_packet_and_rule(cph_flow, p_flow_rule);
+			else
+				rc = cph_flow_compare_rules(cph_flow, p_flow_rule);
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Search P-bits not care rules, rc(%d)\n", rc);
+
+			if (rc == TRUE) {
+				cph_flow->op_type = p_flow_rule->op_type;
+				memcpy(&cph_flow->mod_outer_tci, &p_flow_rule->mod_outer_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->mod_inner_tci, &p_flow_rule->mod_inner_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->pkt_frwd,      &p_flow_rule->pkt_frwd,
+					sizeof(struct CPH_FLOW_FRWD_T));
+
+				/* Increase count */
+				if (for_packet == TRUE) {
+					if (p_flow_rule->count == 0xFFFFFFFF)
+						p_flow_rule->count = 0;
+					else
+						p_flow_rule->count++;
+				}
+
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return MV_FAIL;
+}
+
+/******************************************************************************
+* cph_flow_db_get_rule_by_vid()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH flow mapping rule by VID, only used to compare packet and db rule.
+*
+* INPUTS:
+*       cph_flow   - Flow parsing field values
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_get_rule_by_vid(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	unsigned short             vid;
+	unsigned char              pbit;
+	unsigned int             idx           = 0;
+	unsigned char             *p_vid_entry   = NULL;
+	struct CPH_PBITS_TABLE_T *p_pbit_tbl    = NULL;
+	struct CPH_PBITS_ENTRY_T *p_pbit_entry  = NULL;
+	struct CPH_FLOW_ENTRY_T  *p_flow_rule   = NULL;
+	struct CPH_FLOW_DB_T     *p_cph_db      = NULL;
+	bool               rc            = MV_OK;
+	unsigned long      flags;
+
+	CPH_IF_NULL(cph_flow);
+	CPH_POS_RANGE_VALIDATE(cph_flow->dir, CPH_DIR_DS, "DIR not allowed");
+
+	/* get VLAN ID and P-bits from cph rule */
+	rc = cph_flow_get_vid_pbit(cph_flow, &vid, &pbit);
+	CPH_IF_ERROR(rc, "fail to get VID and P-bits\n");
+
+	CPH_POS_RANGE_VALIDATE(vid, MV_CPH_VID_INDEX_TABLE_MAX_SIZE - 1, "illegal VID");
+	CPH_POS_RANGE_VALIDATE(pbit, MV_CPH_PBITS_MAP_MAX_ENTRY_NUM - 1, "illegal pbits");
+
+	p_cph_db = &gs_cph_flow_db;
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Find VID index entry by VID */
+	p_vid_entry = &p_cph_db->vid_idx_tbl[cph_flow->dir].pbit_tbl_idx[vid];
+
+	/* Get P-bits mapping table */
+	if ((*p_vid_entry == MV_CPH_PBITS_TABLE_INVALID_INDEX) ||
+	    (*p_vid_entry >= MV_CPH_MAX_PBITS_MAP_TABLE_SIZE)) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Pbit table index(%d) is invalid\n", *p_vid_entry);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_NO_SUCH;
+	}
+
+	p_pbit_tbl = &p_cph_db->pbits_tbl[cph_flow->dir][*p_vid_entry];
+
+	/* Save forwarding information */
+	if (cph_flow->is_default == TRUE)
+		p_pbit_entry = &p_pbit_tbl->def_flow_rule[MV_CPH_PBITS_NOT_CARE_VALUE];
+	else
+		p_pbit_entry = &p_pbit_tbl->flow_rule[MV_CPH_PBITS_NOT_CARE_VALUE];
+
+	if (p_pbit_entry->num > MV_CPH_RULE_NUM_PER_ENTRY) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid P-bit entry number(%d)\n", p_pbit_entry->num);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_BAD_VALUE;
+	}
+
+	/* Traverse CPH flow rule tale */
+	for (idx = 0; idx < p_pbit_entry->num; idx++) {
+		if ((p_pbit_entry->rule_idx[idx] >= CPH_FLOW_ENTRY_NUM) ||
+		    (p_pbit_entry->rule_idx[idx] < 1)) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid rule index(%d)\n", p_pbit_entry->rule_idx[idx]);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_BAD_VALUE;
+		}
+		p_flow_rule = &p_cph_db->flow_tbl.flow_rule[p_pbit_entry->rule_idx[idx]];
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == TRUE) {
+			rc = cph_flow_compare_packet_and_rule_vid(cph_flow, p_flow_rule);
+
+			if (rc == TRUE) {
+				cph_flow->op_type = p_flow_rule->op_type;
+				memcpy(&cph_flow->mod_outer_tci, &p_flow_rule->mod_outer_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->mod_inner_tci, &p_flow_rule->mod_inner_tci,
+					sizeof(struct CPH_FLOW_TCI_T));
+				memcpy(&cph_flow->pkt_frwd,      &p_flow_rule->pkt_frwd,
+					sizeof(struct CPH_FLOW_FRWD_T));
+
+				/* Increase count */
+				if (p_flow_rule->count == 0xFFFFFFFF)
+					p_flow_rule->count = 0;
+				else
+					p_flow_rule->count++;
+
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_OK;
+			}
+		}
+	}
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return MV_FAIL;
+}
+
+/******************************************************************************
+* cph_flow_db_add_flow_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add flow rule to database
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       idx      - index of the rule in flow rule table.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_db_add_flow_rule(struct CPH_FLOW_ENTRY_T *cph_flow, unsigned int *idx)
+{
+	unsigned int            l_idx       = 0;
+	struct CPH_FLOW_ENTRY_T *p_flow_rule = NULL;
+	bool              rc          = MV_OK;
+	unsigned long     flags;
+	struct CPH_FLOW_DB_T    *p_cph_db    = NULL;
+
+	CPH_IF_NULL(cph_flow);
+	CPH_IF_NULL(idx);
+
+	p_cph_db = &gs_cph_flow_db;
+	/* If the flow table is full */
+	if (p_cph_db->flow_tbl.rule_num >= CPH_FLOW_ENTRY_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"%s(), flow rule table is full<%d>\n", __func__, p_cph_db->flow_tbl.rule_num);
+		return MV_FULL;
+	}
+
+	/* Check if there is already conflicted rules */
+	if (cph_flow_db_get_rule(cph_flow, FALSE) == MV_OK) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s(), already has conflict flow rule\n", __func__);
+		return MV_ALREADY_EXIST;
+	}
+
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+	/* Traverse CPH flow rule tale, entry 0 will be reserved */
+	for (l_idx = 1; l_idx < CPH_FLOW_ENTRY_NUM; l_idx++) {
+		p_flow_rule = &p_cph_db->flow_tbl.flow_rule[l_idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == FALSE)
+			break;
+	}
+
+	if (l_idx == CPH_FLOW_ENTRY_NUM) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"%s(), flow rule table is full<%d>\n", __func__, p_cph_db->flow_tbl.rule_num);
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+		return MV_FULL;
+	}
+	*idx = l_idx;
+
+	/* Save to db */
+	memcpy(p_flow_rule, cph_flow, sizeof(struct CPH_FLOW_ENTRY_T));
+	p_flow_rule->valid = TRUE;
+	p_flow_rule->count = 0;
+	p_cph_db->flow_tbl.rule_num++;
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return rc;
+}
+
+/*******************************************************************************
+**
+**    cph_flow_db_get_free_pbit_tbl
+**    ___________________________________________________________________________
+**
+**    DESCRIPTION: The function gets available P-bits mapping table.
+**
+**    INPUTS:
+**      None.
+**
+**    OUTPUTS:
+**      None.
+**
+**    RETURNS:
+**      Available P-bits mapping table index.
+**
+*******************************************************************************/
+unsigned int cph_flow_db_get_free_pbit_tbl(enum CPH_DIR_E dir)
+{
+	unsigned int table_idx = 0;
+
+	/* Table index MV_CUST_MAX_PBITS_MAP_TABLE_SIZE is reserved for tagged default packets */
+	for (table_idx = 0;
+		table_idx < (MV_CPH_MAX_PBITS_MAP_TABLE_SIZE - 1 - MV_CPH_RESERVED_PBITS_TABLE_NUM);
+		table_idx++) {
+		if (gs_cph_flow_db.pbits_tbl[dir][table_idx].in_use == FALSE)
+			return table_idx;
+	}
+
+	return MV_CPH_PBITS_TABLE_INVALID_INDEX;
+}
+
+/******************************************************************************
+* cph_flow_db_add_idx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add flow rule to database
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*       idx      - the index in flow rule table
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_db_add_idx(struct CPH_FLOW_ENTRY_T *cph_flow, unsigned int idx)
+{
+	unsigned short             vid;
+	unsigned char              pbit;
+	unsigned long      flags;
+	struct CPH_FLOW_DB_T     *p_cph_db = NULL;
+	unsigned char             *p_vid_entry = NULL;
+	struct CPH_PBITS_TABLE_T *p_pbit_tbl = NULL;
+	struct CPH_PBITS_ENTRY_T *p_pbit_entry = NULL;
+	unsigned int             pbit_tbl_idx;
+	MV_STATUS          rc = MV_OK;
+
+	CPH_IF_NULL(cph_flow);
+	CPH_POS_RANGE_VALIDATE(cph_flow->dir, CPH_DIR_DS, "DIR not allowed");
+	CPH_POS_RANGE_VALIDATE(idx, CPH_FLOW_ENTRY_NUM, "exceed max flow index");
+
+	/* get VLAN ID and P-bits from cph rule */
+	rc = cph_flow_get_vid_pbit(cph_flow, &vid, &pbit);
+	CPH_IF_ERROR(rc, "failed to get VID and P-bits\n");
+
+	p_cph_db = &gs_cph_flow_db;
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+
+	/* Find VID index entry by VID */
+	p_vid_entry = &p_cph_db->vid_idx_tbl[cph_flow->dir].pbit_tbl_idx[vid];
+
+	/* Get P-bits mapping table */
+	/* If this VID index entry does not point to any P-bits mapping table,
+	   need to search for an available P-bits mapping table             */
+	if ((*p_vid_entry == MV_CPH_PBITS_TABLE_INVALID_INDEX) ||
+	    (*p_vid_entry >= MV_CPH_MAX_PBITS_MAP_TABLE_SIZE)) {
+		/* Reserved for default tagged rule */
+		if (vid >= MV_CPH_DEFAULT_UNTAG_RULE)
+			pbit_tbl_idx = MV_CPH_MAX_PBITS_MAP_TABLE_SIZE - 1 - (vid - MV_CPH_DEFAULT_UNTAG_RULE);
+		else
+			pbit_tbl_idx = cph_flow_db_get_free_pbit_tbl(cph_flow->dir);
+
+		if (pbit_tbl_idx >= MV_CPH_MAX_PBITS_MAP_TABLE_SIZE) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, " %d P-bits mapping table has used out\n\r",
+				MV_CPH_MAX_PBITS_MAP_TABLE_SIZE);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_FULL;
+		}
+	} else {
+		/* In case that the VID index already points to a P-bits mapping table,
+		   Need to replace the forwarding information of this P-bit mapping table */
+		pbit_tbl_idx = *p_vid_entry;
+	}
+	p_pbit_tbl = &p_cph_db->pbits_tbl[cph_flow->dir][pbit_tbl_idx];
+
+	/* If legal P-bits is configured */
+	if (pbit <= MV_CPH_PBITS_NOT_CARE_VALUE) {
+		/* Save forwarding information */
+		if (cph_flow->is_default == TRUE)
+			p_pbit_entry = &p_pbit_tbl->def_flow_rule[pbit];
+		else
+			p_pbit_entry = &p_pbit_tbl->flow_rule[pbit];
+
+		if (p_pbit_entry->num >= MV_CPH_RULE_NUM_PER_ENTRY) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s(%d), p-bit table(%d) for vid(%d), p-bit(%d)is full\n",
+					__func__, __LINE__, pbit_tbl_idx, vid, pbit);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_FULL;
+		}
+		p_pbit_entry->rule_idx[p_pbit_entry->num] = idx;
+		p_pbit_entry->num++;
+		p_pbit_tbl->in_use  = TRUE;
+
+		/* Save P-bit mapping table index in VID index table */
+		*p_vid_entry = pbit_tbl_idx;
+	}
+
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_db_add_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add flow rule and index to database
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_add_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	unsigned int    idx;
+	MV_STATUS rc = MV_OK;
+
+	CPH_IF_NULL(cph_flow);
+
+	if (cph_flow->parse_bm & CPH_FLOW_PARSE_MC_PROTO) {
+		rc = cph_flow_db_add_mc_rule(cph_flow);
+		CPH_IF_ERROR(rc, "failed to add CPH MC rule\n");
+	} else {
+		/* add flow to flow table */
+		rc = cph_flow_db_add_flow_rule(cph_flow, &idx);
+		if (rc == MV_ALREADY_EXIST) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "already has conflicted rule\n");
+			return MV_OK;
+		} else if (rc != MV_OK) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "fail to add flow rule to data base\n");
+			return MV_FAIL;
+		}
+
+		/* save flow index to vid and p-bit index table */
+		rc = cph_flow_db_add_idx(cph_flow, idx);
+		CPH_IF_ERROR(rc, "failed to set CPH index\n");
+	}
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_db_update_pbit_tbl_state()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: update the state(in_use or not) of P-bit table.
+*
+* INPUTS:
+*       cph_flow - Flow parsing field values
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_update_pbit_tbl_state(struct CPH_PBITS_TABLE_T *pbit_tbl)
+{
+	unsigned int idx;
+	bool in_use = FALSE;
+
+	for (idx = 0; idx < MV_CPH_PBITS_MAP_MAX_ENTRY_NUM; idx++) {
+		if ((pbit_tbl->flow_rule[idx].num != 0) ||
+		    (pbit_tbl->def_flow_rule[idx].num != 0)) {
+			in_use = TRUE;
+			break;
+		}
+	}
+
+	pbit_tbl->in_use = in_use;
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_db_del_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete CPH flow mapping rule.
+*
+* INPUTS:
+*       cph_flow - Flow parsing field values
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_del_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	unsigned short             vid;
+	unsigned char              pbit;
+	unsigned int             idx           = 0;
+	unsigned char             *p_vid_entry   = NULL;
+	struct CPH_PBITS_TABLE_T *p_pbit_tbl    = NULL;
+	struct CPH_PBITS_ENTRY_T *p_pbit_entry  = NULL;
+	struct CPH_FLOW_ENTRY_T  *p_flow_rule   = NULL;
+	struct CPH_FLOW_DB_T     *p_cph_db      = NULL;
+	MV_STATUS          rc            = MV_OK;
+	unsigned long      flags;
+
+	CPH_IF_NULL(cph_flow);
+	if (cph_flow->parse_bm & CPH_FLOW_PARSE_MC_PROTO) {
+		rc = cph_flow_db_del_mc_rule(cph_flow);
+		CPH_IF_ERROR(rc, "failed to delete CPH MC rule\n");
+	} else {
+		CPH_POS_RANGE_VALIDATE(cph_flow->dir, CPH_DIR_DS, "DIR not allowed");
+
+		/* get VLAN ID and P-bits from cph rule */
+		rc = cph_flow_get_vid_pbit(cph_flow, &vid, &pbit);
+		CPH_IF_ERROR(rc, "failed to get VID and P-bits\n");
+
+		CPH_POS_RANGE_VALIDATE(vid, MV_CPH_VID_INDEX_TABLE_MAX_SIZE - 1, "illegal VID");
+		CPH_POS_RANGE_VALIDATE(pbit, MV_CPH_PBITS_MAP_MAX_ENTRY_NUM - 1, "illegal pbits");
+
+		p_cph_db = &gs_cph_flow_db;
+
+		spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+		/* Find VID index entry by VID */
+		p_vid_entry = &p_cph_db->vid_idx_tbl[cph_flow->dir].pbit_tbl_idx[vid];
+
+		/* Get P-bits mapping table */
+		if ((*p_vid_entry == MV_CPH_PBITS_TABLE_INVALID_INDEX) ||
+		    (*p_vid_entry >= MV_CPH_MAX_PBITS_MAP_TABLE_SIZE)) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "Pbit tale index(%d) is invalid\n", *p_vid_entry);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_NO_SUCH;
+		}
+
+		p_pbit_tbl = &p_cph_db->pbits_tbl[cph_flow->dir][*p_vid_entry];
+
+		/* Save forwarding information */
+		if (cph_flow->is_default == TRUE)
+			p_pbit_entry = &p_pbit_tbl->def_flow_rule[pbit];
+		else
+			p_pbit_entry = &p_pbit_tbl->flow_rule[pbit];
+
+		if (p_pbit_entry->num > MV_CPH_RULE_NUM_PER_ENTRY) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid P-bit entry number(%d)\n", p_pbit_entry->num);
+			spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+			return MV_BAD_VALUE;
+		}
+
+		/* Traverse CPH flow rule tale */
+		for (idx = 0; idx < p_pbit_entry->num; idx++) {
+			if ((p_pbit_entry->rule_idx[idx] >= CPH_FLOW_ENTRY_NUM) ||
+			    (p_pbit_entry->rule_idx[idx] < 1)) {
+				MV_CPH_PRINT(CPH_ERR_LEVEL, "invalid rule index(%d)\n", p_pbit_entry->rule_idx[idx]);
+				spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+				return MV_BAD_VALUE;
+			}
+			p_flow_rule = &p_cph_db->flow_tbl.flow_rule[p_pbit_entry->rule_idx[idx]];
+			/* Compare parse_bm and parse_key */
+			if (p_flow_rule->valid == TRUE) {
+				rc = cph_flow_compare_rules(cph_flow, p_flow_rule);
+				if (rc == TRUE) {
+					/* clear flow rule in flow table */
+					memset(p_flow_rule, 0, sizeof(struct CPH_FLOW_ENTRY_T));
+					p_flow_rule->valid = FALSE;
+					p_cph_db->flow_tbl.rule_num--;
+
+					/* clear the rule index */
+					if (idx == (p_pbit_entry->num - 1)) {
+						p_pbit_entry->rule_idx[idx] = 0;
+					} else {
+						memmove(&p_pbit_entry->rule_idx[idx], &p_pbit_entry->rule_idx[idx+1],
+							sizeof(p_pbit_entry->rule_idx[idx])*
+							(p_pbit_entry->num - 1 - idx));
+						p_pbit_entry->rule_idx[p_pbit_entry->num - 1] = 0;
+					}
+					p_pbit_entry->num--;
+
+					rc = cph_flow_db_update_pbit_tbl_state(p_pbit_tbl);
+					CPH_IF_ERROR(rc, "failed to update P-bit table\n");
+
+					spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+					return MV_OK;
+				}
+			}
+		}
+		spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+	}
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_db_clear_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clear CPH flow mapping rules.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_clear_rule(void)
+{
+	unsigned int         idx         = 0;
+	unsigned long  flags;
+	struct CPH_FLOW_DB_T *p_cph_db    = NULL;
+	struct CPH_FLOW_TABLE_T *p_mc_tbl = NULL;
+
+	p_cph_db = &gs_cph_flow_db;
+	p_mc_tbl = &gs_mc_flow_tbl;
+	spin_lock_irqsave(&p_cph_db->flow_lock, flags);
+
+	/* reset VID index table */
+	for (idx = 0; idx < MV_CPH_VID_INDEX_TABLE_MAX_SIZE; idx++) {
+		p_cph_db->vid_idx_tbl[CPH_DIR_US].pbit_tbl_idx[idx] = MV_CPH_PBITS_TABLE_INVALID_INDEX;
+		p_cph_db->vid_idx_tbl[CPH_DIR_DS].pbit_tbl_idx[idx] = MV_CPH_PBITS_TABLE_INVALID_INDEX;
+	}
+
+	/* reset P-bit table */
+	memset(p_cph_db->pbits_tbl, 0, sizeof(p_cph_db->pbits_tbl));
+	for (idx = 0; idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE; idx++) {
+		p_cph_db->pbits_tbl[CPH_DIR_US][idx].in_use = FALSE;
+		p_cph_db->pbits_tbl[CPH_DIR_DS][idx].in_use = FALSE;
+	}
+
+	/* reset flow rule table */
+	memset(&p_cph_db->flow_tbl, 0, sizeof(p_cph_db->flow_tbl));
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++)
+		p_cph_db->flow_tbl.flow_rule[idx].valid = FALSE;
+	memset(p_mc_tbl, 0, sizeof(struct CPH_FLOW_TABLE_T));
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++)
+		p_mc_tbl->flow_rule[idx].valid = FALSE;
+
+	spin_unlock_irqrestore(&p_cph_db->flow_lock, flags);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_db_clear_rule_by_mh()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clear CPH flow mapping rules by MH.
+*
+* INPUTS:
+*       mh  -  Marvell header.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_clear_rule_by_mh(unsigned short mh)
+{
+	struct CPH_FLOW_ENTRY_T   *p_flow_rule = NULL;
+	struct CPH_FLOW_DB_T      *p_cph_db    = NULL;
+	unsigned int              idx = 0;
+	MV_STATUS           rc          = MV_OK;
+
+	/* go through all flow rules */
+	p_cph_db = &gs_cph_flow_db;
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++) {
+		p_flow_rule = &p_cph_db->flow_tbl.flow_rule[idx];
+
+		if ((p_flow_rule->valid == TRUE) &&
+		    (p_flow_rule->mh == mh) &&
+		    !(p_flow_rule->parse_bm & CPH_FLOW_PARSE_MC_PROTO)) {
+			rc = cph_flow_db_del_rule(p_flow_rule);
+			CPH_IF_ERROR(rc, "failed to delete flow rule\n");
+		}
+	}
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_db_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH flow mapping database.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_init(void)
+{
+	unsigned int            idx        = 0;
+	struct CPH_FLOW_DB_T    *p_cph_db   = NULL;
+	struct CPH_FLOW_TABLE_T *p_mc_table = NULL;
+
+	/* Init flow rule */
+	p_cph_db = &gs_cph_flow_db;
+	memset((unsigned char *)p_cph_db, 0, sizeof(struct CPH_FLOW_DB_T));
+	p_mc_table = &gs_mc_flow_tbl;
+	memset((unsigned char *)p_mc_table, 0, sizeof(struct CPH_FLOW_TABLE_T));
+
+	/* Init lock */
+	spin_lock_init(&p_cph_db->flow_lock);
+
+	/* Init VID index table */
+	for (idx = 0; idx < MV_CPH_VID_INDEX_TABLE_MAX_SIZE; idx++) {
+		p_cph_db->vid_idx_tbl[CPH_DIR_US].pbit_tbl_idx[idx] = MV_CPH_PBITS_TABLE_INVALID_INDEX;
+		p_cph_db->vid_idx_tbl[CPH_DIR_DS].pbit_tbl_idx[idx] = MV_CPH_PBITS_TABLE_INVALID_INDEX;
+	}
+
+	/* Init P-bit table */
+	for (idx = 0; idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE; idx++) {
+		p_cph_db->pbits_tbl[CPH_DIR_US][idx].in_use = FALSE;
+		p_cph_db->pbits_tbl[CPH_DIR_DS][idx].in_use = FALSE;
+	}
+
+	/* Init flow rule table */
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++)
+		p_cph_db->flow_tbl.flow_rule[idx].valid = FALSE;
+	for (idx = 0; idx < CPH_FLOW_ENTRY_NUM; idx++)
+		p_mc_table->flow_rule[idx].valid = FALSE;
+
+	/* Init DSCP to P-bits mapping table */
+	p_cph_db->dscp_tbl.in_use = FALSE;
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_verify_tci()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Verify TCI field of flow mapping rule
+*
+* INPUTS:
+*       tci         - TPID, VLAN ID, P-bits information.
+*       parse_field - Whether the TCI is from parsing field.
+*       tci_field   - the TCI field need to be checked.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns TRUE.
+*       On error returns FALSE.
+*******************************************************************************/
+bool  cph_flow_verify_tci(struct CPH_FLOW_TCI_T *tci, bool parse_field, enum CPH_TCI_FIELD_E tci_field)
+{
+	unsigned short max_vid   = 0;
+	unsigned short max_pbits = 0;
+
+	if (TRUE == parse_field) {
+		max_vid   = MV_CPH_VID_NOT_CARE_VALUE;
+		max_pbits = MV_CPH_PBITS_NOT_CARE_VALUE;
+	} else {
+		max_vid   = MV_VLAN_ID_MAX;
+		max_pbits = MV_PBITS_MAX;
+	}
+
+	/* Check TPID */
+	if ((tci->tpid != MV_TPID_8100) &&
+	    (tci->tpid != MV_TPID_9100) &&
+	    (tci->tpid != MV_TPID_88A8) &&
+	    ((tci->tpid != MV_CPH_TPID_NOT_CARE_VALUE) && (parse_field == TRUE))) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "tpid[0x%x] is invalid\n", tci->tpid);
+		return FALSE;
+	}
+
+	/* Check VID */
+	if ((tci_field == CPH_TCI_FIELD_VID) ||
+	    (tci_field == CPH_TCI_FIELD_VID_PBIT) ||
+	    (tci_field == CPH_TCI_FIELD_ALL)) {
+		if (tci->vid > max_vid) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "vid[%d] exceeds maximum value[%d]\n", tci->vid, max_vid);
+			return FALSE;
+		}
+	}
+
+	/* Check P-bits */
+	if ((tci_field == CPH_TCI_FIELD_PBIT) ||
+	    (tci_field == CPH_TCI_FIELD_VID_PBIT) ||
+	    (tci_field == CPH_TCI_FIELD_ALL)) {
+		if (tci->pbits > max_pbits) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "pbits[%d] exceeds maximum value[%d]\n", tci->pbits, max_pbits);
+			return FALSE;
+		}
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_flow_verify_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Verify flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns TRUE.
+*       On error returns FALSE.
+*******************************************************************************/
+bool  cph_flow_verify_rule(struct CPH_FLOW_ENTRY_T *cph_flow, bool full)
+{
+	enum CPH_DIR_E          dir         = CPH_DIR_US;
+	enum CPH_FLOW_PARSE_E   parse_bm    = 0;
+	enum CPH_VLAN_OP_TYPE_E op_type     = 0;
+	struct CPH_FLOW_FRWD_T   *p_pkt_fwd   = NULL;
+	bool               rc          = TRUE;
+
+	/* Get input information: VID, P-bits... */
+	if (cph_flow == NULL) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "cph_flow is NULL\n");
+		return FALSE;
+	}
+
+	dir       =  cph_flow->dir;
+	parse_bm  =  cph_flow->parse_bm;
+	op_type   =  cph_flow->op_type;
+	p_pkt_fwd = &cph_flow->pkt_frwd;
+
+	/* Check dir */
+	if (dir >= CPH_DIR_NOT_CARE) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "dir[%d] exceeds maximum value[%d]\n", dir, CPH_DIR_NOT_CARE);
+		return FALSE;
+	}
+
+	/* Check parse_bm */
+	if ((parse_bm & CPH_FLOW_PARSE_EXT_VLAN) &&
+	    (parse_bm & CPH_FLOW_PARSE_TWO_VLAN)) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL,
+			"Cann't set parse_bm CPH_FLOW_PARSE_EXT_VLAN and CPH_FLOW_PARSE_EXT_VLAN at the same time\n");
+		return FALSE;
+	}
+
+	/* Check op_type */
+	if (op_type > CPH_VLAN_OP_SWAP) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "op_type[%d] exceeds maximum value[%d]\n", op_type, CPH_VLAN_OP_SWAP);
+		return FALSE;
+	}
+
+	/* Check TCI */
+	if ((parse_bm & CPH_FLOW_PARSE_EXT_VLAN) ||
+	    (parse_bm & CPH_FLOW_PARSE_TWO_VLAN)) {
+		rc = cph_flow_verify_tci(&cph_flow->parse_outer_tci, TRUE, CPH_TCI_FIELD_VID_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+	}
+
+	if (parse_bm & CPH_FLOW_PARSE_TWO_VLAN) {
+		rc = cph_flow_verify_tci(&cph_flow->parse_inner_tci, TRUE, CPH_TCI_FIELD_VID_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+	}
+
+	switch (cph_flow->op_type) {
+	case CPH_VLAN_OP_ASIS:
+	case CPH_VLAN_OP_DISCARD:
+	case CPH_VLAN_OP_REM:
+	case CPH_VLAN_OP_REM_2_TAGS:
+	case CPH_VLAN_OP_SWAP:
+		break;
+	case CPH_VLAN_OP_ADD:
+	case CPH_VLAN_OP_REPLACE:
+	case CPH_VLAN_OP_REPLACE_INNER_REM_OUTER:
+		rc = cph_flow_verify_tci(&cph_flow->mod_outer_tci, FALSE, CPH_TCI_FIELD_VID_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+		break;
+	case CPH_VLAN_OP_ADD_COPY_DSCP:
+	case CPH_VLAN_OP_ADD_COPY_OUTER_PBIT:
+	case CPH_VLAN_OP_ADD_COPY_INNER_PBIT:
+	case CPH_VLAN_OP_REPLACE_VID:
+		rc = cph_flow_verify_tci(&cph_flow->mod_outer_tci, FALSE, CPH_TCI_FIELD_VID);
+		if (rc == FALSE)
+			return FALSE;
+		break;
+	case CPH_VLAN_OP_ADD_2_TAGS:
+	case CPH_VLAN_OP_REPLACE_2TAGS:
+	case CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER:
+		rc = cph_flow_verify_tci(&cph_flow->mod_outer_tci, FALSE, CPH_TCI_FIELD_VID_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+		rc = cph_flow_verify_tci(&cph_flow->mod_inner_tci, FALSE, CPH_TCI_FIELD_VID_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+		break;
+	case CPH_VLAN_OP_ADD_2_TAGS_COPY_DSCP:
+	case CPH_VLAN_OP_ADD_2_TAGS_COPY_PBIT:
+	case CPH_VLAN_OP_REPLACE_2TAGS_VID:
+	case CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER_COPY_PBIT:
+		rc = cph_flow_verify_tci(&cph_flow->mod_outer_tci, FALSE, CPH_TCI_FIELD_VID);
+		if (rc == FALSE)
+			return FALSE;
+		rc = cph_flow_verify_tci(&cph_flow->mod_inner_tci, FALSE, CPH_TCI_FIELD_VID);
+		if (rc == FALSE)
+			return FALSE;
+		break;
+	case CPH_VLAN_OP_REPLACE_PBIT:
+		rc = cph_flow_verify_tci(&cph_flow->mod_outer_tci, FALSE, CPH_TCI_FIELD_PBIT);
+		if (rc == FALSE)
+			return FALSE;
+		break;
+	default:
+		break;
+	}
+
+	/* Check target port/queue/GEM port */
+	if (p_pkt_fwd->trg_port > MV_TCONT_LLID_MAX) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "trg_port[%d] exceeds maximum value[%d]\n",
+			p_pkt_fwd->trg_port, MV_TCONT_LLID_MAX);
+		return FALSE;
+	}
+
+	if (p_pkt_fwd->trg_queue > MV_QUEUE_MAX) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "SWF trg_queue[%d] exceeds maximum value[%d]\n",
+			p_pkt_fwd->trg_queue, MV_QUEUE_MAX);
+		return FALSE;
+	}
+
+	if (p_pkt_fwd->trg_hwf_queue > MV_QUEUE_MAX) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "HWF trg_queue[%d] exceeds maximum value[%d]\n",
+			p_pkt_fwd->trg_hwf_queue, MV_QUEUE_MAX);
+		return FALSE;
+	}
+
+	if (p_pkt_fwd->gem_port > MV_GEM_PORT_MAX) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "gem_port[%d] exceeds maximum value[%d]\n",
+			p_pkt_fwd->gem_port, MV_GEM_PORT_MAX);
+		return FALSE;
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_flow_display_tci()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Display TCI value
+*
+* INPUTS:
+*       tci         - TCI field
+*       trace_level - Trace and debug level
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+void  cph_flow_display_tci(struct CPH_FLOW_TCI_T *tci, unsigned int trace_level)
+{
+	MV_CPH_PRINT(trace_level,
+			"TPID[0x%x], vid[%d], p-bits[%d]\n",
+			((tci != NULL) ? tci->tpid : 0),
+			((tci != NULL) ? tci->vid : 0),
+			((tci != NULL) ? tci->pbits : 0));
+	return;
+}
+
+/******************************************************************************
+* cph_flow_add_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_add_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	bool rc = TRUE;
+
+	/* Display input CPH flow */
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+		"---->\n default rule[%s], dir[%d], parse bm[%x], mh[%d], eth type[0x%x], op type[%d], trg port[%d]," \
+		"SWF queue[%d], HWF queue[%d], GEM port[%d]\n",
+		(cph_flow->is_default == TRUE) ? "Yes" : "No",
+		cph_flow->dir, cph_flow->parse_bm, cph_flow->mh, cph_flow->eth_type, cph_flow->op_type,
+		cph_flow->pkt_frwd.trg_port, cph_flow->pkt_frwd.trg_queue, cph_flow->pkt_frwd.trg_hwf_queue,
+		cph_flow->pkt_frwd.gem_port);
+	cph_flow_display_tci(&cph_flow->parse_outer_tci, CPH_DEBUG_LEVEL);
+	cph_flow_display_tci(&cph_flow->parse_inner_tci, CPH_DEBUG_LEVEL);
+	cph_flow_display_tci(&cph_flow->mod_outer_tci, CPH_DEBUG_LEVEL);
+	cph_flow_display_tci(&cph_flow->mod_inner_tci, CPH_DEBUG_LEVEL);
+
+	/* Verify CPH flow rule */
+	rc = cph_flow_verify_rule(cph_flow, TRUE);
+	if (rc == FALSE) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), verify rule failed\n", __func__);
+		return MV_FAIL;
+	}
+
+	/* Add flow rule to data base */
+	if (cph_flow_db_add_rule(cph_flow) != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), fail to call cph_flow_db_add_rule\n", __func__);
+		return MV_FAIL;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_del_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_del_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	/* Display input CPH flow */
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+		"---->\n default rule[%s], dir[%d], parse bm[%x], mh[%d], eth type[0x%x], op type[%d]\n",
+		(cph_flow->is_default == TRUE) ? "Yes" : "No",
+		cph_flow->dir, cph_flow->parse_bm, cph_flow->mh, cph_flow->eth_type, cph_flow->op_type);
+	cph_flow_display_tci(&cph_flow->parse_outer_tci, CPH_DEBUG_LEVEL);
+	cph_flow_display_tci(&cph_flow->parse_inner_tci, CPH_DEBUG_LEVEL);
+
+	/* Delete flow rule from data base */
+	if (cph_flow_db_del_rule(cph_flow) != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), fail to call cph_flow_db_del_rule\n", __func__);
+		return MV_FAIL;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_get_tag_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Gets flow mapping rule for tagged frames.
+*
+* INPUTS:
+*       cph_flow - Input vid, pbits, dir
+*
+* OUTPUTS:
+*       cph_flow - output packet forwarding information, including GEM port,
+*                  T-CONT, queue and packet modification for VID, P-bits.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_get_rule(struct CPH_FLOW_ENTRY_T *cph_flow)
+{
+	/* Display input CPH flow */
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+		"---->\nPacket flow rule: default rule[%s], dir[%d], parse bm[%x], mh[%d], eth type[0x%x]\n",
+		(cph_flow->is_default == TRUE) ? "Yes" : "No",
+		cph_flow->dir, cph_flow->parse_bm, cph_flow->mh, cph_flow->eth_type);
+	cph_flow_display_tci(&cph_flow->parse_outer_tci, CPH_DEBUG_LEVEL);
+	cph_flow_display_tci(&cph_flow->parse_inner_tci, CPH_DEBUG_LEVEL);
+
+	/* Get flow rule from data base */
+	if (cph_flow_db_get_rule(cph_flow, TRUE) != MV_OK) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s(), fail to call cph_flow_db_get_rule\n", __func__);
+		return MV_FAIL;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_clear_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears all flow mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_clear_rule(void)
+{
+	/* Display input CPH flow */
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "---->\n");
+
+	/* Clear flow rule from data base */
+	if (cph_flow_db_clear_rule() != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), fail to call cph_flow_db_clear_rule\n", __func__);
+		return MV_FAIL;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_clear_rule_by_mh()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears flow mapping rules by MH
+*
+* INPUTS:
+*       mh   -  Marvell header.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_clear_rule_by_mh(unsigned short mh)
+{
+	/* Display input CPH flow */
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "---->  mh(%d)\n", mh);
+
+	/* Clear flow rule from data base */
+	if (cph_flow_db_clear_rule_by_mh(mh) != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s(), fail to call cph_flow_db_clear_rule_by_mh\n", __func__);
+		return MV_FAIL;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_set_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       dscp_map  - DSCP to P-bits mapping rules.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_set_dscp_map(struct CPH_DSCP_PBITS_T *dscp_map)
+{
+	struct CPH_FLOW_DB_T *p_cph_db = NULL;
+
+	CPH_IF_NULL(dscp_map);
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+		"----> in_use[%d]\n",
+		((dscp_map != NULL) ? dscp_map->in_use : 0));
+
+	p_cph_db = &gs_cph_flow_db;
+	/* Case 1: to enable DSCP to P-bits mapping */
+	if (dscp_map->in_use == TRUE) {
+		memcpy(&p_cph_db->dscp_tbl.pbits[0], &dscp_map->pbits[0], sizeof(p_cph_db->dscp_tbl.pbits));
+		p_cph_db->dscp_tbl.in_use = TRUE;
+	} else {/* Case 2: to disable DSCP to P-bits mapping */
+		memset((unsigned char *)&p_cph_db->dscp_tbl, 0, sizeof(p_cph_db->dscp_tbl));
+		p_cph_db->dscp_tbl.in_use = FALSE;
+	}
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_del_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_del_dscp_map(void)
+{
+	struct CPH_FLOW_DB_T *p_cph_db = NULL;
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "---->\n");
+
+	/* Clear DSCP to P-bits mapping */
+	p_cph_db = &gs_cph_flow_db;
+	memset((unsigned char *)&p_cph_db->dscp_tbl, 0, sizeof(p_cph_db->dscp_tbl));
+	p_cph_db->dscp_tbl.in_use = FALSE;
+
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "<----\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_add_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*       tpid   - Type of VLAN ID
+*       vid    - VLAN to be added
+*       pbits  - P-bits value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_add_vlan(bool mh, unsigned char *p_data, unsigned short tpid,
+	unsigned short vid, unsigned char pbits)
+{
+	unsigned char  *p_new  = NULL;
+	unsigned short *p_vlan = NULL;
+	unsigned int  len    = 0;
+
+	p_new = p_data - MV_VLAN_HLEN;
+
+	if (TRUE == mh)
+		len = MV_ETH_MH_SIZE + MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+	else
+		len = MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+
+	memmove(p_new, p_data, len);
+
+	p_vlan = (unsigned short *)(p_new + len);
+
+	/* Set VLAN Type */
+	*p_vlan = htons(tpid);
+	p_vlan++;
+
+	/* Set VID + priority */
+	*p_vlan = htons((vid & MV_VLAN_ID_MASK) | ((pbits & MV_PBITS_MASK) << MV_PBITS_SHIFT));
+
+	return -MV_VLAN_HLEN;
+}
+
+/******************************************************************************
+* cph_flow_del_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_del_vlan(bool mh, unsigned char *p_data)
+{
+	unsigned char  *p_new  = NULL;
+	unsigned int  len    = 0;
+
+	p_new = p_data + MV_VLAN_HLEN;
+
+	if (TRUE == mh)
+		len = MV_ETH_MH_SIZE + MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+	else
+		len = MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+
+	memmove(p_new, p_data, len);
+
+	return MV_VLAN_HLEN;
+}
+
+/******************************************************************************
+* cph_flow_strip_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete all VLAN tags behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_strip_vlan(bool mh, unsigned char *p_data)
+{
+	int  offset       = 0;
+	int  total_offset = 0;
+	unsigned short eth_type     = 0;
+	unsigned char *p_field      = NULL;
+	unsigned int len          = 0;
+
+	if (TRUE == mh)
+		len = MV_ETH_MH_SIZE + MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+	else
+		len = MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+
+	p_field  = p_data + len;
+	eth_type = ntohs(*(unsigned short *)p_field);
+
+	while (eth_type == MV_TPID_8100 || eth_type == MV_TPID_88A8 || eth_type == MV_TPID_9100) {
+		offset   = cph_flow_del_vlan(mh, p_data+offset);
+
+		p_field += offset;
+		eth_type = ntohs(*(unsigned short *)p_field);
+		total_offset += offset;
+	}
+
+	return total_offset;
+}
+
+/******************************************************************************
+* cph_flow_replace_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Replace one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*       tpid   - Type of VLAN ID
+*       vid    - VLAN to be added
+*       pbits  - P-bits value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_replace_vlan(bool mh, unsigned char *p_data, unsigned short tpid,
+	unsigned short vid, unsigned char pbits)
+{
+	unsigned short *p_vlan = NULL;
+	unsigned int  len    = 0;
+
+	if (TRUE == mh)
+		len = MV_ETH_MH_SIZE + MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+	else
+		len = MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+
+	p_vlan = (unsigned short *)(p_data + len);
+
+	/* Set VLAN Type */
+	*p_vlan = htons(tpid);
+	p_vlan++;
+
+	/* Set VID + priority */
+	*p_vlan = htons((vid & MV_VLAN_ID_MASK) | ((pbits & MV_PBITS_MASK) << MV_PBITS_SHIFT));
+
+	return 0;
+}
+
+/******************************************************************************
+* cph_flow_swap_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Swap between two VLAN tag.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_swap_vlan(bool mh, unsigned char *p_data)
+{
+	unsigned int *p_tci = NULL;
+	unsigned int  tci1  = 0;
+	unsigned int  tci2  = 0;
+	unsigned int  len    = 0;
+
+	if (TRUE == mh)
+		len = MV_ETH_MH_SIZE + MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+	else
+		len = MV_MAC_ADDR_SIZE + MV_MAC_ADDR_SIZE;
+
+	p_tci = (unsigned int *)(p_data + len);
+
+	/* Save first TCI */
+	tci1 = ntohl(*p_tci);
+	p_tci++;
+
+	/* Save second TCI and replace it w/ first TCI */
+	tci2   = ntohl(*p_tci);
+	*p_tci = htonl(tci1);
+
+	/* Go back to replce first TCI */
+	p_tci--;
+	*p_tci = htonl(tci2);
+
+	return 0;
+}
+
+/******************************************************************************
+* cph_flow_parse_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse packet and output flow information.
+*
+* INPUTS:
+*       port - Source GMAC port
+*       data - Pointer to packet
+*       rx   - Whether in RX dir
+*       mh   - Whether has Marvell header
+*
+* OUTPUTS:
+*       flow - Flow parsing field values
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_parse_packet(int port, unsigned char *data, bool rx, bool mh, struct CPH_FLOW_ENTRY_T *flow)
+{
+	unsigned short      eth_type = 0;
+	unsigned char      *p_field  = NULL;
+	unsigned char       proto    = 0;
+	MV_STATUS   rc       = MV_OK;
+	struct ipv6hdr         *p_ipv6_hdr   = NULL;
+	struct ipv6_hopopt_hdr *p_hopopt_hdr = NULL;
+	struct icmp6hdr        *p_icmp_hdr   = NULL;
+
+	memset(flow, 0, sizeof(struct CPH_FLOW_ENTRY_T));
+
+	/* Parse Direction */
+	flow->dir = cph_app_parse_dir(port, rx);
+	if (flow->dir == CPH_DIR_INVALID) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "dir[%d] is invalid\n", flow->dir);
+		return MV_BAD_VALUE;
+	}
+
+	if (TRUE == mh) {
+		/* Parse Marvell header */
+		if (flow->dir == CPH_DIR_US)
+			flow->mh = (ntohs(*(unsigned short *)data) & MV_VALID_MH_MASK);
+		else
+			flow->mh = (ntohs(*(unsigned short *)data) & MV_VALID_GH_MASK);
+
+		flow->parse_bm |= CPH_FLOW_PARSE_MH;
+		p_field  = data + MV_ETH_MH_SIZE + ETH_ALEN + ETH_ALEN;
+	} else {
+		p_field  = data + ETH_ALEN + ETH_ALEN;
+	}
+
+	/* Parse VLAN tag */
+	eth_type = ntohs(*(unsigned short *)p_field);
+	if (eth_type == MV_TPID_8100 || eth_type == MV_TPID_88A8 || eth_type == MV_TPID_9100) {
+		flow->parse_bm |= CPH_FLOW_PARSE_EXT_VLAN;
+
+		flow->parse_outer_tci.tpid = ntohs(*(unsigned short *)p_field);
+		p_field += MV_CPH_TPID_LEN;
+
+		flow->parse_outer_tci.vid   = (ntohs(*(unsigned short *)p_field) & MV_VLAN_ID_MASK);
+		flow->parse_outer_tci.pbits = ((ntohs(*(unsigned short *)p_field) >> MV_PBITS_SHIFT) & MV_PBITS_MASK);
+
+		p_field += MV_CPH_VLAN_TAG_LEN;
+
+		eth_type = ntohs(*(unsigned short *)p_field);
+		if (eth_type == MV_TPID_8100 || eth_type == MV_TPID_88A8 || eth_type == MV_TPID_9100) {
+			flow->parse_bm &= ~CPH_FLOW_PARSE_EXT_VLAN;
+			flow->parse_bm |= CPH_FLOW_PARSE_TWO_VLAN;
+
+			flow->parse_inner_tci.tpid = ntohs(*(unsigned short *)p_field);
+			p_field += MV_CPH_TPID_LEN;
+
+			flow->parse_inner_tci.vid   = (ntohs(*(unsigned short *)p_field) & MV_VLAN_ID_MASK);
+			flow->parse_inner_tci.pbits = ((ntohs(*(unsigned short *)p_field) >> MV_PBITS_SHIFT)
+							& MV_PBITS_MASK);
+
+			p_field += MV_CPH_VLAN_TAG_LEN;
+
+			eth_type = ntohs(*(unsigned short *)p_field);
+		}
+	}
+	while (eth_type == MV_TPID_8100 || eth_type == MV_TPID_88A8 || eth_type == MV_TPID_9100) {
+		p_field += VLAN_HLEN;
+		eth_type = ntohs(*(unsigned short *)p_field);
+	}
+	/* Parse Eth type */
+	flow->eth_type = eth_type;
+	flow->parse_bm |= CPH_FLOW_PARSE_ETH_TYPE;
+
+	/* Parse Multicast protocol */
+	if (MV_CPH_ETH_TYPE_IPV4 == flow->eth_type) {
+		p_field += MV_CPH_ETH_TYPE_LEN;
+		p_field += MV_IPV4_PROTO_OFFSET;
+		proto = *(unsigned char *)p_field;
+
+		if (IPPROTO_IGMP == proto)
+			flow->parse_bm |= CPH_FLOW_PARSE_MC_PROTO;
+	} else if (MV_CPH_ETH_TYPE_IPV6 == flow->eth_type) {
+		p_ipv6_hdr = (struct ipv6hdr *)(p_field + MV_CPH_ETH_TYPE_LEN);
+
+		if (NEXTHDR_HOP == p_ipv6_hdr->nexthdr) {
+			p_hopopt_hdr = (struct ipv6_hopopt_hdr *)((unsigned char *)p_ipv6_hdr + sizeof(struct ipv6hdr));
+
+			if (IPPROTO_ICMPV6 == p_hopopt_hdr->nexthdr) {
+				p_icmp_hdr = (struct icmp6hdr *)((unsigned char *)p_hopopt_hdr +
+					ipv6_optlen(p_hopopt_hdr));
+
+				switch (p_icmp_hdr->icmp6_type) {
+				case ICMPV6_MGM_QUERY:
+				case ICMPV6_MGM_REPORT:
+				case ICMPV6_MGM_REDUCTION:
+				case ICMPV6_MLD2_REPORT:
+					flow->parse_bm |= CPH_FLOW_PARSE_MC_PROTO;
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_compare_rules()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare two flow rules.
+*
+* INPUTS:
+*       parse_rule  - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_rules(struct CPH_FLOW_ENTRY_T *parse_rule, struct CPH_FLOW_ENTRY_T *db_rule)
+{
+	/* Check direction */
+	if (parse_rule->dir != db_rule->dir)
+		return FALSE;
+
+	/* Check parse_bm */
+	if (parse_rule->parse_bm != db_rule->parse_bm)
+		return FALSE;
+
+	/* Check MH if needed */
+	if (db_rule->parse_bm & CPH_FLOW_PARSE_MH) {
+		if (parse_rule->mh != db_rule->mh)
+			return FALSE;
+	}
+
+	/* Check if it is default rule */
+	if (parse_rule->is_default != db_rule->is_default)
+		return FALSE;
+
+	/* Check VLAN ID */
+	if (parse_rule->is_default == FALSE) {
+		if ((db_rule->parse_bm & CPH_FLOW_PARSE_EXT_VLAN) ||
+		    (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN)) {
+			if (parse_rule->parse_outer_tci.tpid != db_rule->parse_outer_tci.tpid)
+				return FALSE;
+
+			if (parse_rule->parse_outer_tci.vid != db_rule->parse_outer_tci.vid)
+				return FALSE;
+
+			if (parse_rule->parse_outer_tci.pbits != db_rule->parse_outer_tci.pbits)
+				return FALSE;
+		}
+		if (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN) {
+			if (parse_rule->parse_inner_tci.tpid != db_rule->parse_inner_tci.tpid)
+				return FALSE;
+
+			if (parse_rule->parse_inner_tci.vid != db_rule->parse_inner_tci.vid)
+				return FALSE;
+
+			if (parse_rule->parse_inner_tci.pbits != db_rule->parse_inner_tci.pbits)
+				return FALSE;
+		}
+	}
+	/* Check Ethernet type if needed */
+	if (db_rule->parse_bm & CPH_FLOW_PARSE_ETH_TYPE) {
+		if (parse_rule->eth_type != db_rule->eth_type)
+			return FALSE;
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_flow_compare_packet_and_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare flow packet and rule.
+*
+* INPUTS:
+*       packet_rule - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_packet_and_rule(struct CPH_FLOW_ENTRY_T *packet_rule, struct CPH_FLOW_ENTRY_T *db_rule)
+{
+	/* Check direction */
+	if ((packet_rule->dir != db_rule->dir) &&
+	    (db_rule->dir != CPH_DIR_NOT_CARE))
+		return FALSE;
+
+	/* Check Multicast protocol */
+	if ((db_rule->parse_bm & CPH_FLOW_PARSE_MC_PROTO) != (packet_rule->parse_bm & CPH_FLOW_PARSE_MC_PROTO))
+		return FALSE;
+
+	/* Check MH if needed */
+	if ((db_rule->parse_bm & CPH_FLOW_PARSE_MH) &&
+	    (packet_rule->parse_bm & CPH_FLOW_PARSE_MH)) {
+		if (packet_rule->mh != db_rule->mh)
+			return FALSE;
+	}
+
+	/* Check if it is default rule */
+	if (packet_rule->is_default != db_rule->is_default)
+		return FALSE;
+
+	/* Check VLAN ID */
+	if ((packet_rule->parse_bm & (CPH_FLOW_PARSE_EXT_VLAN | CPH_FLOW_PARSE_TWO_VLAN))
+	     != (db_rule->parse_bm & (CPH_FLOW_PARSE_EXT_VLAN | CPH_FLOW_PARSE_TWO_VLAN)))
+		return FALSE;
+	if (packet_rule->is_default == FALSE) {
+		if ((db_rule->parse_bm & CPH_FLOW_PARSE_EXT_VLAN) ||
+		    (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN)) {
+			if ((packet_rule->parse_outer_tci.tpid != db_rule->parse_outer_tci.tpid) &&
+			    (db_rule->parse_outer_tci.tpid != MV_CPH_TPID_NOT_CARE_VALUE))
+				return FALSE;
+
+			if ((packet_rule->parse_outer_tci.vid != db_rule->parse_outer_tci.vid) &&
+			    (db_rule->parse_outer_tci.vid != MV_CPH_VID_NOT_CARE_VALUE))
+				return FALSE;
+
+			if ((packet_rule->parse_outer_tci.pbits != db_rule->parse_outer_tci.pbits) &&
+			    (db_rule->parse_outer_tci.pbits != MV_CPH_PBITS_NOT_CARE_VALUE))
+				return FALSE;
+		}
+		if (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN) {
+			if ((packet_rule->parse_inner_tci.tpid != db_rule->parse_inner_tci.tpid) &&
+			    (db_rule->parse_inner_tci.tpid != MV_CPH_TPID_NOT_CARE_VALUE))
+				return FALSE;
+
+			if ((packet_rule->parse_inner_tci.vid != db_rule->parse_inner_tci.vid) &&
+			    (db_rule->parse_inner_tci.vid != MV_CPH_VID_NOT_CARE_VALUE))
+				return FALSE;
+
+			if ((packet_rule->parse_inner_tci.pbits != db_rule->parse_inner_tci.pbits) &&
+			    (db_rule->parse_inner_tci.pbits != MV_CPH_PBITS_NOT_CARE_VALUE))
+				return FALSE;
+		}
+	}
+	/* Check Ethernet type if needed */
+	if (db_rule->parse_bm & CPH_FLOW_PARSE_ETH_TYPE) {
+		if (packet_rule->eth_type != db_rule->eth_type)
+			return FALSE;
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_flow_compare_packet_and_rule_vid()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare flow packet and rule w/ only VID.
+*
+* INPUTS:
+*       packet_rule - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_packet_and_rule_vid(struct CPH_FLOW_ENTRY_T *packet_rule,
+					struct CPH_FLOW_ENTRY_T *db_rule)
+{
+	/* Check direction */
+	if ((packet_rule->dir != db_rule->dir) &&
+	    (db_rule->dir != CPH_DIR_NOT_CARE))
+		return FALSE;
+
+	/* Check Multicast protocol */
+	if ((db_rule->parse_bm & CPH_FLOW_PARSE_MC_PROTO) != (packet_rule->parse_bm & CPH_FLOW_PARSE_MC_PROTO))
+		return FALSE;
+
+	/* Check MH if needed */
+	if ((db_rule->parse_bm & CPH_FLOW_PARSE_MH) &&
+	    (packet_rule->parse_bm & CPH_FLOW_PARSE_MH)) {
+		if (packet_rule->mh != db_rule->mh)
+			return FALSE;
+	}
+
+	/* Check if it is default rule */
+	if (packet_rule->is_default != db_rule->is_default)
+		return FALSE;
+
+	/* Check VLAN ID */
+	if ((packet_rule->parse_bm & (CPH_FLOW_PARSE_EXT_VLAN | CPH_FLOW_PARSE_TWO_VLAN))
+	     != (db_rule->parse_bm & (CPH_FLOW_PARSE_EXT_VLAN | CPH_FLOW_PARSE_TWO_VLAN)))
+		return FALSE;
+	if (packet_rule->is_default == FALSE) {
+		if ((db_rule->parse_bm & CPH_FLOW_PARSE_EXT_VLAN) ||
+		    (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN)) {
+			if (db_rule->parse_outer_tci.tpid != MV_CPH_TPID_NOT_CARE_VALUE)
+				return FALSE;
+
+			if (db_rule->parse_outer_tci.pbits != MV_CPH_PBITS_NOT_CARE_VALUE)
+				return FALSE;
+
+			if ((packet_rule->parse_outer_tci.vid != db_rule->parse_outer_tci.vid) &&
+			    (db_rule->parse_outer_tci.vid != MV_CPH_VID_NOT_CARE_VALUE))
+				return FALSE;
+		}
+		if (db_rule->parse_bm & CPH_FLOW_PARSE_TWO_VLAN) {
+			if (db_rule->parse_inner_tci.tpid != MV_CPH_TPID_NOT_CARE_VALUE)
+				return FALSE;
+
+			if (db_rule->parse_inner_tci.pbits != MV_CPH_PBITS_NOT_CARE_VALUE)
+				return FALSE;
+
+			if ((packet_rule->parse_inner_tci.vid != db_rule->parse_inner_tci.vid) &&
+			    (db_rule->parse_inner_tci.vid != MV_CPH_VID_NOT_CARE_VALUE))
+				return FALSE;
+		}
+	}
+	/* Check Ethernet type if needed */
+	if (db_rule->parse_bm & CPH_FLOW_PARSE_ETH_TYPE) {
+		if (packet_rule->eth_type != db_rule->eth_type)
+			return FALSE;
+	}
+
+	return TRUE;
+}
+
+/******************************************************************************
+* cph_flow_mod_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify packet according to flow rule
+*
+* INPUTS:
+*       skb        - Pointer to packet
+*       mh         - Whether has MH or not
+*       flow       - Flow parsing field values
+*       out_offset - Offset of packet
+*       rx         - Whether RX or TX
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_mod_packet(struct sk_buff *skb,  bool mh, struct CPH_FLOW_ENTRY_T *flow, int *out_offset)
+{
+	int  offset = 0;
+	unsigned short tpid   = 0;
+	unsigned short vid    = 0;
+	unsigned char  pbits  = 0;
+	bool   rc     = MV_OK;
+
+	switch (flow->op_type) {
+	case CPH_VLAN_OP_ASIS:
+		break;
+	case CPH_VLAN_OP_DISCARD:
+		break;
+	case CPH_VLAN_OP_ADD:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_ADD_COPY_DSCP:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_ADD_COPY_OUTER_PBIT:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.pbits;
+		offset = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_ADD_COPY_INNER_PBIT:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_inner_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_inner_tci.pbits;
+		offset = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_ADD_2_TAGS:
+		tpid    = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset  = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		tpid    = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		offset += cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, flow->mod_inner_tci.pbits);
+		break;
+	case CPH_VLAN_OP_ADD_2_TAGS_COPY_DSCP:
+		tpid    = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset  = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		tpid    = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		offset += cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, flow->mod_inner_tci.pbits);
+		break;
+	case CPH_VLAN_OP_ADD_2_TAGS_COPY_PBIT:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.pbits;
+		offset = cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, pbits);
+		tpid   = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_inner_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_inner_tci.pbits;
+		offset += cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_REM:
+		offset = cph_flow_del_vlan(mh, skb->data);
+		break;
+	case CPH_VLAN_OP_REM_2_TAGS:
+		offset  = cph_flow_del_vlan(mh, skb->data);
+		offset += cph_flow_del_vlan(mh, skb->data);
+		break;
+	case CPH_VLAN_OP_REPLACE:
+		tpid   = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_VID:
+		tpid = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+
+		pbits  = (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.pbits;
+		offset = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_PBIT:
+		tpid = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+
+		vid    = (flow->parse_outer_tci.vid == MV_CPH_VID_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.vid;
+		offset = cph_flow_replace_vlan(mh, skb->data, tpid,
+					vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER:
+		tpid = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		offset = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, flow->mod_inner_tci.pbits);
+		tpid = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset += cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER_COPY_PBIT:
+		tpid = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_inner_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_inner_tci.pbits;
+		offset = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, pbits);
+		tpid = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.pbits;
+		offset += cph_flow_add_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_INNER_REM_OUTER:
+		offset  = cph_flow_del_vlan(mh, skb->data);
+		tpid    = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset += cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_2TAGS:
+		tpid    = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		offset  = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, flow->mod_inner_tci.pbits);
+		offset += cph_flow_swap_vlan(mh, skb->data);
+		tpid    = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		offset += cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, flow->mod_outer_tci.pbits);
+		break;
+	case CPH_VLAN_OP_REPLACE_2TAGS_VID:
+		tpid    = flow->mod_inner_tci.tpid ? flow->mod_inner_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_inner_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_inner_tci.pbits;
+		offset  = cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_inner_tci.vid, pbits);
+		offset += cph_flow_swap_vlan(mh, skb->data);
+		tpid    = flow->mod_outer_tci.tpid ? flow->mod_outer_tci.tpid : MV_TPID_8100;
+		pbits  = (flow->parse_outer_tci.pbits == MV_CPH_PBITS_NOT_CARE_VALUE) ? 0 : flow->parse_outer_tci.pbits;
+		offset += cph_flow_replace_vlan(mh, skb->data, tpid,
+					flow->mod_outer_tci.vid, pbits);
+		break;
+	case CPH_VLAN_OP_SWAP:
+		offset  = cph_flow_swap_vlan(mh, skb->data);
+		break;
+	default:
+		break;
+	}
+
+	/* Save SKB data offset */
+	skb->data  += offset;
+	skb->len   -= offset;
+	*out_offset = offset;
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_mod_frwd()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify forwarding parameter of transmiting packet according to flow rule
+*
+* INPUTS:
+*       flow        - Flow parsing field values
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_mod_frwd(struct CPH_FLOW_ENTRY_T *flow, struct mv_pp2_tx_spec *tx_spec_out)
+{
+	MV_STATUS rc = MV_OK;
+
+	tx_spec_out->txp     = flow->pkt_frwd.trg_port;
+	tx_spec_out->txq     = flow->pkt_frwd.trg_queue;
+	if ((FALSE == cph_db_get_tcont_state(tx_spec_out->txp)) ||
+	    (flow->op_type == CPH_VLAN_OP_DISCARD))
+		tx_spec_out->txq = MV_ETH_TXQ_INVALID;
+	tx_spec_out->hw_cmd[0]  = ((flow->pkt_frwd.gem_port << 8)|0x0010);
+	tx_spec_out->tx_func = NULL;
+	tx_spec_out->flags   = MV_ETH_TX_F_MH;
+	if (!flow->pkt_frwd.gem_port)
+		tx_spec_out->tx_mh = htons(flow->pkt_frwd.trg_port + 0x01);/* EPON LLID*/
+	else
+		tx_spec_out->tx_mh = htons(flow->pkt_frwd.gem_port);/* GPON GEM port */
+
+	return rc;
+}
+
+/******************************************************************************
+* cph_flow_send_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received application packets
+*
+* INPUTS:
+*       dev_out     - Net device
+*       pkt         - Marvell packet information
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+MV_STATUS cph_flow_send_packet(struct net_device *dev_out, struct eth_pbuf *pkt,
+	struct mv_pp2_tx_spec *tx_spec_out)
+{
+#if 0
+	struct eth_port *pp = MV_ETH_PRIV(dev_out);
+	int frags = 0;
+	bool tx_spec_ready = false;
+	struct mv_pp2_tx_spec tx_spec;
+	u32 tx_cmd;
+	struct tx_queue *txq_ctrl = NULL;
+	struct pp2_tx_desc *tx_desc;
+	struct sk_buff  *skb;
+
+
+	skb = (struct sk_buff *)(pkt->osInfo);
+
+	read_lock(&pp->rwlock);
+
+	if (!(netif_running(dev_out))) {
+		pr_err("!netif_running() in %s\n", __func__);
+		goto out;
+	}
+
+	/* Get TXQ (without BM) to send packet generated by Linux */
+	if (tx_spec_ready == false) {
+		tx_spec.txp    = tx_spec_out->txp;
+		tx_spec.txq    = tx_spec_out->txq;
+		tx_spec.hw_cmd[0] = tx_spec_out->hw_cmd[0];
+		tx_spec.flags  = tx_spec_out->flags;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[tx_spec.txp * CONFIG_MV_PP2_TXQ + tx_spec.txq];
+	if (txq_ctrl == NULL) {
+		pr_err("%s: invalidate txp/txq (%d/%d)\n", __func__, tx_spec.txp, tx_spec.txq);
+		goto out;
+	}
+	spin_lock_irqsave(&txq_ctrl->queue_lock);
+
+#if 0
+#ifdef CONFIG_MV_PP2_TSO
+	/* GSO/TSO */
+	if (skb_is_gso(skb)) {
+		frags = mv_pp2_tx_tso(skb, dev_out, &tx_spec, txq_ctrl);
+		goto out;
+	}
+#endif /* CONFIG_MV_PP2_TSO */
+#endif
+
+	frags = 1;
+
+#if 0
+	if (tx_spec.flags & MV_ETH_TX_F_MH) {
+		if (tx_spec.flags & MV_ETH_F_SWITCH)
+			mh = dev_priv->tx_vlan_mh;
+		else
+			mh = pp->tx_mh;
+
+		if (mv_pp2_skb_mh_add(skb, mh)) {
+			frags = 0;
+			goto out;
+		}
+	}
+#endif
+	tx_desc = mv_pp2_tx_desc_get(txq_ctrl, frags);
+	if (tx_desc == NULL) {
+		frags = 0;
+		goto out;
+	}
+
+	tx_cmd = PP2_TX_L4_CSUM_NOT;
+
+#ifdef CONFIG_MV_PP2_PON
+	tx_desc->hw_cmd[0] = tx_spec.hw_cmd[0];
+#endif
+
+	/* FIXME: beware of nonlinear --BK */
+	tx_desc->dataSize = skb_headlen(skb);
+
+	tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, skb->data, tx_desc->dataSize);
+
+	if (frags == 1) {
+		/*
+		 * First and Last descriptor
+		 */
+		if (tx_spec.flags & MV_ETH_TX_F_NO_PAD)
+			tx_cmd |= PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK;
+		else
+			tx_cmd |= PP2_TX_FLZ_DESC_MASK;
+
+		tx_desc->command = tx_cmd;
+		mv_pp2_tx_desc_flush(tx_desc);
+
+		txq_ctrl->shadow_txq[txq_ctrl->shadow_txq_put_i] = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
+		mv_pp2_shadow_inc_put(txq_ctrl);
+	}
+
+	txq_ctrl->txq_count += frags;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->flags & MV_ETH_F_DBG_TX) {
+		pr_err("\n");
+		pr_err("%s - eth_tx_%lu: port=%d, txp=%d, txq=%d, skb=%p, head=%p, data=%p, size=%d\n",
+			dev_out->name, dev_out->stats.tx_packets, pp->port, tx_spec.txp, tx_spec.txq, skb,
+			skb->head, skb->data, skb->len);
+		mv_pp2_tx_desc_print(tx_desc);
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+#ifdef CONFIG_MV_PP2_PON
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		mvNetaPonTxqBytesAdd(pp->port, tx_spec.txp, tx_spec.txq, skb->len);
+#endif /* CONFIG_MV_PP2_PON */
+
+	/* Enable transmit */
+	mvPp2AggrTxqPendDescAdd(pp->port, tx_spec.txp, tx_spec.txq, frags);
+
+	STAT_DBG(txq_ctrl->stats.txq_tx += frags);
+
+out:
+	if (frags > 0) {
+		dev_out->stats.tx_packets++;
+		dev_out->stats.tx_bytes += skb->len;
+	} else {
+		dev_out->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+#ifndef CONFIG_MV_PP2_TXDONE_ISR
+	if (txq_ctrl) {
+		if (txq_ctrl->txq_count >= mv_ctrl_pp2_txdone) {
+			u32 tx_done = mv_pp2_txq_done(pp, txq_ctrl);
+			STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ?
+				pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+		}
+		/* If after calling mv_pp2_txq_done, txq_ctrl->txq_count equals frags, we need to set the timer */
+		if ((txq_ctrl->txq_count == frags) && (frags > 0))
+			mv_pp2_add_tx_done_timer(pp);
+	}
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+
+	if (txq_ctrl)
+		spin_unlock_irqrestore(&txq_ctrl->queue_lock);
+
+	read_unlock(&pp->rwlock);
+
+#endif
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_lookup_op_type()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup operation type string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_flow_lookup_op_type(int enum_value)
+{
+	return mtype_lookup_enum_str(&g_enum_array_op_type, enum_value);
+}
+
+/******************************************************************************
+* cph_flow_display_all()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: The function displays valid flow mapping tables and DSCP
+*              to P-bits mapping tablefor untagged frames.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_display_all(void)
+{
+	unsigned int             idx         = 0;
+	unsigned int             pbit_idx    = 0;
+	unsigned int             rule_idx    = 0;
+	struct CPH_FLOW_ENTRY_T  *p_flow_rule = NULL;
+	int              offset      = 0;
+	struct CPH_FLOW_DB_T     *p_cph_db    = NULL;
+	struct CPH_PBITS_TABLE_T *p_pbits_tbl = NULL;
+	struct CPH_FLOW_TABLE_T  *p_mc_tbl    = NULL;
+	unsigned char              pbit_tbl_idx;
+	unsigned char              buff[512];
+
+	p_cph_db = &gs_cph_flow_db;
+	p_mc_tbl = &gs_mc_flow_tbl;
+	/* Print flow rule entries */
+	pr_info("MV_CPH Flow Rule Table\n----------------------------------\n");
+	pr_info("Total rule number:%d, Max rule number:%d\n", p_cph_db->flow_tbl.rule_num, CPH_FLOW_ENTRY_NUM);
+
+	pr_info("----------------------------------------------------------------------------------------------------------------------------------------------------------------\n");
+	pr_info("                                         |Parse outer       |Parse inner       |Mod outer         |Mod Inner         |Forward\n");
+	pr_info("rule_idx dir default parse_bm mh   ety    tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  port queue hwf_queue gem  count    op_type\n");
+	/* Traverse CPH flow rule table */
+	for (idx = 0, rule_idx = 0; (idx < CPH_FLOW_ENTRY_NUM) && (rule_idx < p_cph_db->flow_tbl.rule_num); idx++) {
+		p_flow_rule = &p_cph_db->flow_tbl.flow_rule[idx];
+
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == TRUE) {
+			rule_idx++;
+
+			pr_info(
+			       "%-8d %2.2s  %3.3s     0x%04x   %-4d 0x%04x 0x%04x %-4d %1d      0x%04x %-4d %1d      " \
+			       "0x%04x %-4d %1d      0x%04x %-4d %1d      %1d    %1d     %1d         %-4d %-8d %s\n",
+			       idx,
+			       cph_app_lookup_dir(p_flow_rule->dir), (p_flow_rule->is_default == TRUE) ? "Yes" : "No ",
+			       p_flow_rule->parse_bm,             p_flow_rule->mh,
+			       p_flow_rule->eth_type,
+			       p_flow_rule->parse_outer_tci.tpid, p_flow_rule->parse_outer_tci.vid,
+			       p_flow_rule->parse_outer_tci.pbits,
+			       p_flow_rule->parse_inner_tci.tpid, p_flow_rule->parse_inner_tci.vid,
+			       p_flow_rule->parse_inner_tci.pbits,
+			       p_flow_rule->mod_outer_tci.tpid,   p_flow_rule->mod_outer_tci.vid,
+			       p_flow_rule->mod_outer_tci.pbits,
+			       p_flow_rule->mod_inner_tci.tpid,   p_flow_rule->mod_inner_tci.vid,
+			       p_flow_rule->mod_inner_tci.pbits,
+			       p_flow_rule->pkt_frwd.trg_port,    p_flow_rule->pkt_frwd.trg_queue,
+			       p_flow_rule->pkt_frwd.trg_hwf_queue, p_flow_rule->pkt_frwd.gem_port,
+			       p_flow_rule->count,                cph_flow_lookup_op_type(p_flow_rule->op_type));
+		}
+	}
+
+	pr_info("\nVID Index Table U/S\n----------------------------------\n");
+	pr_info("Index   pbit_tbl_idx\n");
+	rule_idx = 0;
+	/* Traverse CPH U/S VID index table */
+	for (idx = 0; idx < MV_CPH_VID_INDEX_TABLE_MAX_SIZE; idx++) {
+		pbit_tbl_idx = p_cph_db->vid_idx_tbl[CPH_DIR_US].pbit_tbl_idx[idx];
+
+		if (pbit_tbl_idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE) {
+			rule_idx++;
+			pr_info("%4.4d       %4.4d\n", idx, pbit_tbl_idx);
+		}
+	}
+	pr_info("Total valid P-bits table index:%d\n\n", rule_idx);
+
+	pr_info("VID Index Table D/S\n----------------------------------\n");
+	pr_info("Index   pbit_tbl_idx\n");
+	rule_idx = 0;
+	/* Traverse CPH D/S VID index table */
+	for (idx = 0; idx < MV_CPH_VID_INDEX_TABLE_MAX_SIZE; idx++) {
+		pbit_tbl_idx = p_cph_db->vid_idx_tbl[CPH_DIR_DS].pbit_tbl_idx[idx];
+
+		if (pbit_tbl_idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE) {
+			rule_idx++;
+			pr_info("%4.4d       %4.4d\n", idx, pbit_tbl_idx);
+		}
+	}
+	pr_info("Total valid P-bits table index:%d\n\n", rule_idx);
+
+	pr_info("P-bits table U/S\n----------------------------------\n");
+	/* Traverse CPH U/S P-bits table */
+	for (idx = 0; idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE; idx++) {
+		p_pbits_tbl = &p_cph_db->pbits_tbl[CPH_DIR_US][idx];
+
+		if (p_pbits_tbl->in_use == TRUE) {
+			pr_info("\nP-bits table:%d\nflow rule:\n", idx);
+			for (pbit_idx = 0; pbit_idx < MV_CPH_PBITS_MAP_MAX_ENTRY_NUM; pbit_idx++) {
+				if ((p_pbits_tbl->flow_rule[pbit_idx].num > 0) &&
+				    (p_pbits_tbl->flow_rule[pbit_idx].num < MV_CPH_RULE_NUM_PER_ENTRY)) {
+					memset(buff, 0, sizeof(buff));
+					offset = 0;
+					offset += sprintf(buff+offset, "P-bit:%d, number:%d rule_idx:",
+						pbit_idx, p_pbits_tbl->flow_rule[pbit_idx].num);
+					for (rule_idx = 0; rule_idx < p_pbits_tbl->flow_rule[pbit_idx].num; rule_idx++)
+						offset += sprintf(buff+offset, "[%d]%d ", rule_idx,
+							p_pbits_tbl->flow_rule[pbit_idx].rule_idx[rule_idx]);
+					pr_info("%s\n", buff);
+				}
+			}
+			pr_info("default flow rule:\n");
+			for (pbit_idx = 0; pbit_idx < MV_CPH_PBITS_MAP_MAX_ENTRY_NUM; pbit_idx++) {
+				if ((p_pbits_tbl->def_flow_rule[pbit_idx].num > 0) &&
+				    (p_pbits_tbl->def_flow_rule[pbit_idx].num < MV_CPH_RULE_NUM_PER_ENTRY)) {
+					memset(buff, 0, sizeof(buff));
+					offset = 0;
+					offset += sprintf(buff+offset, "P-bit:%d, number:%d rule_idx:",
+						pbit_idx, p_pbits_tbl->def_flow_rule[pbit_idx].num);
+					for (rule_idx = 0;
+						rule_idx < p_pbits_tbl->def_flow_rule[pbit_idx].num;
+						rule_idx++)
+						offset += sprintf(buff+offset, "[%d]%d ",
+						rule_idx, p_pbits_tbl->def_flow_rule[pbit_idx].rule_idx[rule_idx]);
+					pr_info("%s\n", buff);
+				}
+			}
+		}
+	}
+
+	pr_info("\nP-bits table D/S\n----------------------------------\n");
+	/* Traverse CPH D/S P-bits table */
+	for (idx = 0; idx < MV_CPH_MAX_PBITS_MAP_TABLE_SIZE; idx++) {
+		p_pbits_tbl = &p_cph_db->pbits_tbl[CPH_DIR_DS][idx];
+
+		if (p_pbits_tbl->in_use == TRUE) {
+			pr_info("\nP-bits table:%d\nflow rule:\n", idx);
+			for (pbit_idx = 0; pbit_idx < MV_CPH_PBITS_MAP_MAX_ENTRY_NUM; pbit_idx++) {
+				if ((p_pbits_tbl->flow_rule[pbit_idx].num > 0) &&
+				    (p_pbits_tbl->flow_rule[pbit_idx].num < MV_CPH_RULE_NUM_PER_ENTRY)) {
+					memset(buff, 0, sizeof(buff));
+					offset = 0;
+					offset += sprintf(buff+offset, "P-bit:%d, number:%d rule_idx:",
+						pbit_idx, p_pbits_tbl->flow_rule[pbit_idx].num);
+					for (rule_idx = 0; rule_idx < p_pbits_tbl->flow_rule[pbit_idx].num; rule_idx++)
+						offset += sprintf(buff+offset, "[%d]%d ",
+						rule_idx, p_pbits_tbl->flow_rule[pbit_idx].rule_idx[rule_idx]);
+					pr_info("%s\n\n", buff);
+				}
+			}
+			pr_info("default flow rule:\n");
+			for (pbit_idx = 0; pbit_idx < MV_CPH_PBITS_MAP_MAX_ENTRY_NUM; pbit_idx++) {
+				if ((p_pbits_tbl->def_flow_rule[pbit_idx].num > 0) &&
+				    (p_pbits_tbl->def_flow_rule[pbit_idx].num < MV_CPH_RULE_NUM_PER_ENTRY)) {
+					memset(buff, 0, sizeof(buff));
+					offset = 0;
+					offset += sprintf(buff+offset, "P-bit:%d, number:%d rule_idx:",
+						pbit_idx, p_pbits_tbl->def_flow_rule[pbit_idx].num);
+					for (rule_idx = 0;
+						rule_idx < p_pbits_tbl->def_flow_rule[pbit_idx].num;
+						rule_idx++)
+						offset += sprintf(buff+offset, "[%d]%d ", rule_idx,
+						p_pbits_tbl->def_flow_rule[pbit_idx].rule_idx[rule_idx]);
+					pr_info("%s\n\n", buff);
+				}
+			}
+		}
+	}
+
+	/* Print MC flow rule entries */
+	pr_info("MV_CPH MC Flow Rule Table\n----------------------------------\n");
+	pr_info("Total rule number:%d, Max rule number:%d\n", p_mc_tbl->rule_num, CPH_FLOW_ENTRY_NUM);
+
+	pr_info("----------------------------------------------------------------------------------------------------------------------------------------------------------------\n");
+	pr_info("                                         |Parse outer       |Parse inner       |Mod outer         |Mod Inner         |Forward\n");
+	pr_info("rule_idx dir default parse_bm mh   ety    tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  port queue hwf_queue gem  count    op_type\n");
+	/* Traverse CPH flow rule table */
+	for (idx = 0, rule_idx = 0; (idx < CPH_FLOW_ENTRY_NUM) && (rule_idx < p_mc_tbl->rule_num); idx++) {
+		p_flow_rule = &p_mc_tbl->flow_rule[idx];
+		/* Compare parse_bm and parse_key */
+		if (p_flow_rule->valid == TRUE) {
+			rule_idx++;
+
+			pr_info(
+			       "%-8d %2.2s  %3.3s     0x%04x   %-4d 0x%04x 0x%04x %-4d %1d      0x%04x %-4d %1d      " \
+			       "0x%04x %-4d %1d      0x%04x %-4d %1d      %1d    %1d     %1d         %-4d %-8d %s\n",
+			       idx,
+			       cph_app_lookup_dir(p_flow_rule->dir), (p_flow_rule->is_default == TRUE) ? "Yes" : "No ",
+			       p_flow_rule->parse_bm, p_flow_rule->mh, p_flow_rule->eth_type,
+			       p_flow_rule->parse_outer_tci.tpid, p_flow_rule->parse_outer_tci.vid,
+			       p_flow_rule->parse_outer_tci.pbits,
+			       p_flow_rule->parse_inner_tci.tpid, p_flow_rule->parse_inner_tci.vid,
+			       p_flow_rule->parse_inner_tci.pbits,
+			       p_flow_rule->mod_outer_tci.tpid,   p_flow_rule->mod_outer_tci.vid,
+			       p_flow_rule->mod_outer_tci.pbits,
+			       p_flow_rule->mod_inner_tci.tpid,   p_flow_rule->mod_inner_tci.vid,
+			       p_flow_rule->mod_inner_tci.pbits,
+			       p_flow_rule->pkt_frwd.trg_port,    p_flow_rule->pkt_frwd.trg_queue,
+			       p_flow_rule->pkt_frwd.trg_hwf_queue, p_flow_rule->pkt_frwd.gem_port,
+			       p_flow_rule->count,                cph_flow_lookup_op_type(p_flow_rule->op_type));
+		}
+	}
+
+	/* Print  DSCP to P-bits mapping table */
+	offset = 0;
+	pr_info("\nMV_CPH DSCP to P-bits Mapping Table\n----------------------------------\n");
+	if (p_cph_db->dscp_tbl.in_use == FALSE)
+		pr_info("No DSCP to P-bits mapping\n");
+	else {
+		pr_info("DSCP[Pbits]\n");
+		memset(buff, 0, sizeof(buff));
+		for (idx = 0; idx < MV_CPH_DSCP_PBITS_TABLE_MAX_SIZE; idx++) {
+			offset += sprintf(buff+offset, "%2.2d[%2.2d] ", idx, p_cph_db->dscp_tbl.pbits[idx]);
+			if (((idx+1) % 16) == 0)
+				offset += sprintf(buff+offset, "\n");
+		}
+		pr_info("%s\n", buff);
+	}
+	pr_info("\n");
+
+	return MV_OK;
+}
+
+/******************************************************************************
+* cph_flow_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initializes CPH flow mapping data structure.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_init(void)
+{
+	MV_STATUS rc = MV_OK;
+
+	rc = cph_flow_db_init();
+	CHECK_API_RETURN_AND_LOG_ERROR(rc, "Fail to call cph_flow_db_init");
+
+	return rc;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.h
new file mode 100644
index 000000000000..c14ff07cfbed
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_flow.h
@@ -0,0 +1,687 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_flow.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) flow module to handle the
+*              flow mapping, VLAN modification of data traffic
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 12Dec2011
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_FLOW_H_
+#define _MV_CPH_FLOW_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/******************************************************************************
+*                        Data Enum and Structure
+******************************************************************************/
+#define MV_CPH_TPID_NOT_CARE_VALUE       (0)     /* Does not care for TPID     */
+#define MV_CPH_ZERO_VALUE                (0)     /* zero value                 */
+#define MV_CPH_VID_NOT_CARE_VALUE        (4096)  /* Does not care for VID      */
+#define MV_CPH_PBITS_NOT_CARE_VALUE      (8)     /* Does not care for P-bits   */
+#define MV_CPH_DSCP_NOT_CARE_VALUE       (64)    /* Does not care for DSCP     */
+
+#define MV_CPH_TPID_INVALID_VALUE        (0xFFFF)/* No valid TPID              */
+#define MV_CPH_VID_INVALID_VALUE         (0xFFFF)/* No valid VID               */
+#define MV_CPH_PBITS_INVALID_VALUE       (0xFF)  /* No valid P-bits            */
+#define MV_CPH_DSCP_INVALID_VALUE        (0xFF)  /* No valid DSCP              */
+
+#define MV_CPH_DEFAULT_UNTAG_RULE        (4096+1)/* Default untagged  rule     */
+#define MV_CPH_DEFAULT_SINGLE_TAG_RULE   (4096+2)/* Default sinlge tagged  rule*/
+#define MV_CPH_DEFAULT_DOUBLE_TAG_RULE   (4096+3)/* Default double tagged  rule*/
+
+#define MV_CPH_PBITS_TABLE_INVALID_INDEX (0xFF)     /* Invalid Pbits table index value in VID index table*/
+
+/* VLAN ID index table definition for flow mapping */
+#define MV_CPH_VID_INDEX_TABLE_MAX_SIZE  (4096+4)
+struct CPH_VID_IDX_TBL_T {
+	unsigned char pbit_tbl_idx[MV_CPH_VID_INDEX_TABLE_MAX_SIZE];
+};
+
+/* P-bits flow mapping table definition */
+#define MV_CPH_RULE_NUM_PER_ENTRY   (16)
+struct CPH_PBITS_ENTRY_T {
+	unsigned short    num;        /* total valid cph flow rule number */
+	unsigned short    rule_idx[MV_CPH_RULE_NUM_PER_ENTRY]; /* index to flow rule */
+};
+
+#define MV_CPH_PBITS_MAP_MAX_ENTRY_NUM   (8+1)
+#define MV_CPH_MAX_PBITS_MAP_TABLE_SIZE  (64)
+#define MV_CPH_RESERVED_PBITS_TABLE_NUM  (4)
+
+struct CPH_PBITS_TABLE_T {
+	bool               in_use;
+	struct CPH_PBITS_ENTRY_T  flow_rule[MV_CPH_PBITS_MAP_MAX_ENTRY_NUM];
+	struct CPH_PBITS_ENTRY_T  def_flow_rule[MV_CPH_PBITS_MAP_MAX_ENTRY_NUM];
+};
+
+/* CPH flow mapping rule definition
+------------------------------------------------------------------------------*/
+enum CPH_VLAN_OP_TYPE_E {
+	CPH_VLAN_OP_ASIS                               = 0,
+	CPH_VLAN_OP_DISCARD                            = 1,
+	CPH_VLAN_OP_ADD                                = 2,
+	CPH_VLAN_OP_ADD_COPY_DSCP                      = 3,
+	CPH_VLAN_OP_ADD_COPY_OUTER_PBIT                = 4,
+	CPH_VLAN_OP_ADD_COPY_INNER_PBIT                = 5,
+	CPH_VLAN_OP_ADD_2_TAGS                         = 6,
+	CPH_VLAN_OP_ADD_2_TAGS_COPY_DSCP               = 7,
+	CPH_VLAN_OP_ADD_2_TAGS_COPY_PBIT               = 8,
+	CPH_VLAN_OP_REM                                = 9,
+	CPH_VLAN_OP_REM_2_TAGS                         = 10,
+	CPH_VLAN_OP_REPLACE                            = 11,
+	CPH_VLAN_OP_REPLACE_VID                        = 12,
+	CPH_VLAN_OP_REPLACE_PBIT                       = 13,
+	CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER            = 14,
+	CPH_VLAN_OP_REPLACE_INNER_ADD_OUTER_COPY_PBIT  = 15,
+	CPH_VLAN_OP_REPLACE_INNER_REM_OUTER            = 16,
+	CPH_VLAN_OP_REPLACE_2TAGS                      = 17,
+	CPH_VLAN_OP_REPLACE_2TAGS_VID                  = 18,
+	CPH_VLAN_OP_SWAP                               = 19
+};
+
+struct CPH_FLOW_FRWD_T {
+	unsigned char  trg_port;
+	unsigned char  trg_queue;
+	unsigned char  trg_hwf_queue;
+	unsigned short gem_port;
+};
+
+struct CPH_FLOW_TCI_T {
+	unsigned short  tpid;
+	unsigned short  vid;
+	unsigned char   pbits;
+};
+
+enum CPH_FLOW_PARSE_E {
+	CPH_FLOW_PARSE_MH        = 0x01,  /* parsing Marvell header                          */
+	CPH_FLOW_PARSE_EXT_VLAN  = 0x02,  /* parsing external VLAN tag                       */
+	CPH_FLOW_PARSE_TWO_VLAN  = 0x04,  /* parsing both of external and internal VLAN tags */
+	CPH_FLOW_PARSE_ETH_TYPE  = 0x08,  /* parsing Ethernet type                           */
+	CPH_FLOW_PARSE_MC_PROTO  = 0x10,  /* parsing multicast protocol                      */
+};
+
+enum CPH_TCI_FIELD_E {
+	CPH_TCI_FIELD_VID,
+	CPH_TCI_FIELD_CFI,
+	CPH_TCI_FIELD_PBIT,
+	CPH_TCI_FIELD_VID_PBIT,
+	CPH_TCI_FIELD_ALL,
+};
+
+struct CPH_FLOW_ENTRY_T {
+	bool               valid;
+	enum CPH_DIR_E          dir;
+	enum CPH_FLOW_PARSE_E   parse_bm;
+	bool               is_default;
+	unsigned short             mh;
+	struct CPH_FLOW_TCI_T     parse_outer_tci;
+	struct CPH_FLOW_TCI_T     parse_inner_tci;
+	unsigned short             eth_type;
+	enum CPH_VLAN_OP_TYPE_E op_type;
+	struct CPH_FLOW_TCI_T     mod_outer_tci;
+	struct CPH_FLOW_TCI_T     mod_inner_tci;
+	struct CPH_FLOW_FRWD_T    pkt_frwd;
+	unsigned int             count;
+};
+
+#define CPH_FLOW_ENTRY_NUM   (512)
+
+struct CPH_FLOW_TABLE_T {
+	unsigned int             rule_num;
+	struct CPH_FLOW_ENTRY_T   flow_rule[CPH_FLOW_ENTRY_NUM];
+};
+
+/* DSCP to P-bits mapping table definition
+------------------------------------------------------------------------------*/
+#define MV_CPH_DSCP_PBITS_TABLE_MAX_SIZE  (64)
+struct CPH_DSCP_PBITS_T {
+	unsigned int in_use;
+	unsigned char  pbits[MV_CPH_DSCP_PBITS_TABLE_MAX_SIZE];
+};
+
+/* CPH flow database
+------------------------------------------------------------------------------*/
+struct CPH_FLOW_DB_T {
+	spinlock_t         flow_lock;
+	struct CPH_VID_IDX_TBL_T  vid_idx_tbl[CPH_DIR_NUM];
+	struct CPH_PBITS_TABLE_T  pbits_tbl[CPH_DIR_NUM][MV_CPH_MAX_PBITS_MAP_TABLE_SIZE];
+	struct CPH_FLOW_TABLE_T   flow_tbl;
+	struct CPH_DSCP_PBITS_T   dscp_tbl;
+};
+
+/******************************************************************************
+ *                        Function Declaration
+ ******************************************************************************/
+/******************************************************************************
+* cph_flow_add_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_add_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_flow_del_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes flow mapping rule
+*
+* INPUTS:
+*       cph_flow - VLAN ID, 802.1p value, pkt_fwd information.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_del_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_flow_clear_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears all flow mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_clear_rule(void);
+
+/******************************************************************************
+* cph_flow_clear_rule_by_mh()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Clears flow mapping rules by MH
+*
+* INPUTS:
+*       mh   -  Marvell header.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_clear_rule_by_mh(unsigned short mh);
+
+/******************************************************************************
+* cph_flow_get_tag_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Gets flow mapping rule for tagged frames.
+*
+* INPUTS:
+*       cph_flow - Input vid, pbits, dir
+*
+* OUTPUTS:
+*       cph_flow - output packet forwarding information, including GEM port,
+*                  T-CONT, queue and packet modification for VID, P-bits.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int  cph_flow_get_rule(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_flow_db_get_rule_by_vid()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH flow mapping rule by VID, only used to compare packet and db rule.
+*
+* INPUTS:
+*       cph_flow   - Flow parsing field values
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_get_rule_by_vid(struct CPH_FLOW_ENTRY_T *cph_flow);
+
+/******************************************************************************
+* cph_flow_set_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Sets DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       dscp_map  - DSCP to P-bits mapping rules.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_set_dscp_map(struct CPH_DSCP_PBITS_T *dscp_map);
+
+/******************************************************************************
+* cph_flow_del_dscp_map()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Deletes DSCP to P-bits mapping rules
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_del_dscp_map(void);
+
+/******************************************************************************
+* cph_flow_add_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Add one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*       tpid   - Type of VLAN ID
+*       vid    - VLAN to be added
+*       pbits  - P-bits value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_add_vlan(bool mh, unsigned char *p_data, unsigned short tpid,
+	unsigned short vid, unsigned char pbits);
+
+/******************************************************************************
+* cph_flow_del_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_del_vlan(bool mh, unsigned char *p_data);
+
+/******************************************************************************
+* cph_flow_replace_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Replace one VLAN tag behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*       tpid   - Type of VLAN ID
+*       vid    - VLAN to be added
+*       pbits  - P-bits value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_replace_vlan(bool mh, unsigned char *p_data, unsigned short tpid,
+	unsigned short vid, unsigned char pbits);
+
+/******************************************************************************
+* cph_flow_swap_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Swap between two VLAN tag.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_swap_vlan(bool mh, unsigned char *p_data);
+
+/******************************************************************************
+* cph_flow_strip_vlan()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Delete all VLAN tags behind of source MAC address.
+*
+* INPUTS:
+*       mh     - Whether has MH or not
+*       p_data - Pointer to packet.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       The shift of SKB data.
+*******************************************************************************/
+INLINE int cph_flow_strip_vlan(bool mh, unsigned char *p_data);
+
+/******************************************************************************
+* cph_flow_compare_rules()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Comparse two flow rules.
+*
+* INPUTS:
+*       parse_rule  - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_rules(struct CPH_FLOW_ENTRY_T *parse_rule, struct CPH_FLOW_ENTRY_T *db_rule);
+
+/******************************************************************************
+* cph_flow_compare_packet_and_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare flow packet and rule.
+*
+* INPUTS:
+*       packet_rule - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_packet_and_rule(struct CPH_FLOW_ENTRY_T *packet_rule, struct CPH_FLOW_ENTRY_T *db_rule);
+
+/******************************************************************************
+* cph_flow_compare_packet_and_rule_vid()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Compare flow packet and rule w/ only VID.
+*
+* INPUTS:
+*       packet_rule - The parsing field values come from the packets
+*       db_rule     - The flow rule stored in flow database
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       In case same, return TRUE,
+*       In case different, return FALSE.
+*******************************************************************************/
+bool cph_flow_compare_packet_and_rule_vid(struct CPH_FLOW_ENTRY_T *packet_rule, struct CPH_FLOW_ENTRY_T *db_rule);
+
+/******************************************************************************
+* cph_flow_parse_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Parse packet and output flow information.
+*
+* INPUTS:
+*       port - Source GMAC port
+*       data - Pointer to packet
+*       rx   - Whether in RX dir
+*
+* OUTPUTS:
+*       flow - Flow parsing field values
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_parse_packet(int port, unsigned char *data, bool rx, bool mh, struct CPH_FLOW_ENTRY_T *flow);
+
+/******************************************************************************
+* cph_flow_mod_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify packet according to flow rule
+*
+* INPUTS:
+*       skb        - Pointer to packet
+*       mh         - Whether has MH or not
+*       flow       - Flow parsing field values
+*       out_offset - Offset of packet
+*       rx         - Whether RX or TX
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_mod_packet(struct sk_buff *skb,  bool mh, struct CPH_FLOW_ENTRY_T *flow, int *out_offset);
+
+/******************************************************************************
+* cph_flow_mod_frwd()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Modify forwarding parameter of transmiting packet according to flow rule
+*
+* INPUTS:
+*       flow        - Flow parsing field values
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_mod_frwd(struct CPH_FLOW_ENTRY_T *flow, struct mv_pp2_tx_spec *tx_spec_out);
+
+/******************************************************************************
+* cph_flow_send_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received application packets
+*
+* INPUTS:
+*       dev_out     - Net device
+*       pkt         - Marvell packet information
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+MV_STATUS cph_flow_send_packet(struct net_device *dev_out,  struct eth_pbuf *pkt,
+	struct mv_pp2_tx_spec *tx_spec_out);
+
+/******************************************************************************
+* cph_flow_db_get_rule()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Get CPH flow mapping rule.
+*
+* INPUTS:
+*       flow       - Flow parsing field values
+*       for_packet - Whether get rule for packet or for new CPH rule
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+MV_STATUS cph_flow_db_get_rule(struct CPH_FLOW_ENTRY_T *cph_flow, bool for_packet);
+
+/******************************************************************************
+* cph_flow_lookup_op_type()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup operation type string according to value
+*
+* INPUTS:
+*       enum_value - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *cph_flow_lookup_op_type(int enum_value);
+
+/******************************************************************************
+* cph_flow_display_all()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: The function displays valid flow mapping tables and DSCP
+*              to P-bits mapping tablefor untagged frames.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_display_all(void);
+
+/******************************************************************************
+* cph_flow_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initializes CPH flow mapping data structure.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_flow_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_FLOW_MAP_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_header.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_header.h
new file mode 100644
index 000000000000..84142133f276
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_header.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_header.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) header file
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_HEADER_H_
+#define _MV_CPH_HEADER_H_
+
+/* Include Files
+------------------------------------------------------------------------------*/
+#include <mvCommon.h>
+#include <../net_dev/mv_netdev.h>
+
+#include "mv_cph_infra.h"
+#include "mv_cph_app.h"
+#include "mv_cph_flow.h"
+#include "mv_cph_db.h"
+#include "mv_cph_api.h"
+#include "mv_cph_mng_if.h"
+#include "mv_cph_dev.h"
+#include "mv_cph_netdev.h"
+#include "mv_cph_sysfs.h"
+
+#endif /* _MV_CPH_HEADER_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.c
new file mode 100644
index 000000000000..8761b4d1ed2b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.c
@@ -0,0 +1,238 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_infra.c
+*
+* DESCRIPTION: Include user space infrastructure modules definitions
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mv_cph_header.h"
+
+/******************************************************************************
+* Variable Definition
+******************************************************************************/
+char g_cph_unknown_str[] = "<unknown>";
+
+/******************************************************************************
+* Function Definition
+******************************************************************************/
+/******************************************************************************
+* mindex_tpm_src_to_app_port()
+*
+* DESCRIPTION:Convert TPM source port to application UNI port
+*
+* INPUTS:
+*       src_port    - TPM source port
+*
+* OUTPUTS:
+*       Application UNI port index
+*
+* RETURNS:
+*       On success, the function returns application UNI port index.
+*       On error return invalid application UNI port index.
+*******************************************************************************/
+enum MV_APP_ETH_PORT_UNI_E mindex_tpm_src_to_app_port(enum tpm_src_port_type_t src_port)
+{
+	enum MV_APP_ETH_PORT_UNI_E app_port = MV_APP_ETH_PORT_INVALID;
+
+	/* Should modify below code in case support more than four UNI ports */
+	if (src_port <= TPM_SRC_PORT_UNI_3)
+		app_port = MV_APP_ETH_PORT_INDEX_MIN + (src_port - TPM_SRC_PORT_UNI_0);
+
+	return app_port;
+}
+
+/******************************************************************************
+* mindex_mh_to_app_llid()
+*
+* DESCRIPTION:Convert Marvell header to application LLID
+*
+* INPUTS:
+*       mh  - Marvell header
+*
+* OUTPUTS:
+*       Application LLID
+*
+* RETURNS:
+*       On success, the function returns application LLID.
+*       On error return invalid application LLID.
+*******************************************************************************/
+enum MV_TCONT_LLID_E mindex_mh_to_app_llid(unsigned short mh)
+{
+	enum MV_TCONT_LLID_E llid       = MV_TCONT_LLID_INVALID;
+	unsigned char           llid_index = 0;
+
+	llid_index = (mh >> 8) & 0x0f;
+
+	if (llid_index > 0) {
+		if (0x0f == llid_index) {
+			llid = MV_TCONT_LLID_BROADCAST;
+		} else {
+			llid = llid_index - 1;
+			if (llid > MV_TCONT_LLID_7)
+				llid = MV_TCONT_LLID_INVALID;
+		}
+	}
+
+	return llid;
+}
+
+/******************************************************************************
+* mtype_get_digit_num()
+*
+* DESCRIPTION:Convert character string to digital number
+*
+* INPUTS:
+*       str   - Character string
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Digital numbe
+*******************************************************************************/
+unsigned int mtype_get_digit_num(const char  *str)
+{
+	unsigned int  val = 0;
+
+	if ((str[1] == 'x') || (str[1] == 'X'))
+		sscanf(&str[2], "%x", &val);
+	else
+		val = simple_strtoul(str, NULL, 10);
+
+	return val;
+}
+
+/******************************************************************************
+* mtype_lookup_enum_str()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup enum string according to enum value
+*
+* INPUTS:
+*       p_enum_array   - Pointer to enum array
+*       enum_value     - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *mtype_lookup_enum_str(struct MV_ENUM_ARRAY_T *p_enum_array, int enum_value)
+{
+	int idx;
+
+	for (idx = 0; idx < p_enum_array->enum_num; idx++) {
+		if (enum_value == p_enum_array->enum_array[idx].enum_value)
+			return p_enum_array->enum_array[idx].enum_str;
+	}
+	return g_cph_unknown_str;
+}
+
+/******************************************************************************
+* mutils_is_frwd_broadcast_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:Check whether packet is directly forwarded broadcast one
+*
+* INPUTS:
+*       data   - packet data
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       TRUE: broadcast packet, FALSE:none broadcast packet
+*******************************************************************************/
+bool mutils_is_frwd_broadcast_packet(char *data)
+{
+	char bc_mac[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	char *p_data;
+
+	p_data = data + MV_ETH_MH_SIZE;
+
+	if (!memcmp(p_data, &bc_mac[0], sizeof(bc_mac)))
+		return TRUE;
+	else
+		return FALSE;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.h
new file mode 100644
index 000000000000..0faec56015a9
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_infra.h
@@ -0,0 +1,499 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_infra.h
+*
+* DESCRIPTION: Include user space infrastructure modules definitions
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_INFRA_H_
+#define _MV_CPH_INFRA_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/******************************************************************************
+* Data Type Definition
+******************************************************************************/
+
+#define TRUE               true
+#define FALSE              false
+
+
+/******************************************************************************
+* Port Index Definition
+******************************************************************************/
+/* Ethernet port index */
+#define MV_APP_ETH_PORT_INDEX_MIN 1
+#define MV_APP_ETH_PORT_INDEX_MAX 4
+#define MV_APP_ETH_PORT_NUM       (MV_APP_ETH_PORT_INDEX_MAX-MV_APP_ETH_PORT_INDEX_MIN+1)
+#define MV_APP_ETH_PORTS_NUM      4
+enum MV_APP_ETH_PORT_UNI_E {
+	MV_APP_ETH_PORT_UNI_0   = 1,
+	MV_APP_ETH_PORT_UNI_1,
+	MV_APP_ETH_PORT_UNI_2,
+	MV_APP_ETH_PORT_UNI_3,
+	MV_APP_ETH_PORT_UNI_4,
+	MV_APP_ETH_PORT_UNI_5,
+	MV_APP_ETH_PORT_UNI_6,
+	MV_APP_ETH_PORT_UNI_7,
+	MV_APP_ETH_PORT_INVALID
+};
+
+/* VoIP port index */
+#define MV_APP_VOIP_PORT_INDEX_MIN 1
+#define MV_APP_VOIP_PORT_INDEX_MAX 2
+#define MV_APP_VOIP_PORT_NUM       (MV_APP_VOIP_PORT_INDEX_MAX-MV_APP_VOIP_PORT_INDEX_MIN+1)
+
+enum MV_APP_VOIP_PORT_E {
+	MV_APP_VOIP_PORT_0 = 1,
+	MV_APP_VOIP_PORT_1
+};
+
+/* WIFI SSID port index */
+#define MV_APP_SSID_INDEX_MIN 1
+#define MV_APP_SSID_INDEX_MAX 4
+enum MV_APP_SSID_E {
+	MV_APP_SSID_0 = 1,
+	MV_APP_SSID_1,
+	MV_APP_SSID_2,
+	MV_APP_SSID_3,
+	MV_APP_SSID_4,
+	MV_APP_SSID_5,
+	MV_APP_SSID_6,
+	MV_APP_SSID_7
+};
+
+/* USB port index */
+#define MV_APP_USB_PORT_INDEX_MIN 1
+#define MV_APP_USB_PORT_INDEX_MAX 2
+#define MV_APP_USB_PORT_NUM       (MV_APP_USB_PORT_INDEX_MAX-MV_APP_USB_PORT_INDEX_MIN+1)
+
+enum MV_APP_USB_PORT_E {
+	MV_APP_USB_PORT_0 = 1,
+	MV_APP_USB_PORT_1
+};
+
+/******************************************************************************
+ * GE MAC Port Index Definition
+ ******************************************************************************/
+enum MV_APP_GMAC_PORT_E {
+	MV_APP_GMAC_PORT_0       = 0,
+	MV_APP_GMAC_PORT_1,
+	MV_APP_LPBK_PORT,
+	MV_APP_PON_MAC_PORT,
+	MV_APP_GMAC_PORT_NUM,
+	MV_APP_GMAC_PORT_INVALID = 0xff,
+};
+
+/******************************************************************************
+ * T-CONT/LLID Index Definition
+ ******************************************************************************/
+#define MV_TCONT_LLID_MIN 0
+#define MV_TCONT_LLID_MAX 7
+#define MV_TCONT_LLID_NUM (MV_TCONT_LLID_MAX-MV_TCONT_LLID_MIN+1)
+
+enum MV_TCONT_LLID_E {
+	MV_TCONT_LLID_0 = 0,
+	MV_TCONT_LLID_1,
+	MV_TCONT_LLID_2,
+	MV_TCONT_LLID_3,
+	MV_TCONT_LLID_4,
+	MV_TCONT_LLID_5,
+	MV_TCONT_LLID_6,
+	MV_TCONT_LLID_7,
+	MV_TCONT_LLID_8,
+	MV_TCONT_LLID_9,
+	MV_TCONT_LLID_10,
+	MV_TCONT_LLID_11,
+	MV_TCONT_LLID_12,
+	MV_TCONT_LLID_13,
+	MV_TCONT_LLID_14,
+	MV_TCONT_LLID_15,
+	MV_TCONT_LLID_BROADCAST,
+	MV_TCONT_LLID_INVALID
+};
+
+/******************************************************************************
+* WAN and LAN Index Definition
+******************************************************************************/
+enum MV_APP_PORT_TYPE_E {
+	MV_APP_PORT_WAN = 0,
+	MV_APP_PORT_LAN,
+	MV_APP_PORT_INVALID,
+};
+
+/******************************************************************************
+* GEM Port Index Definition
+******************************************************************************/
+#define MV_GEM_PORT_MIN  0
+#define MV_GEM_PORT_MAX  4095
+#define MV_GEM_PORT_NUM  (MV_GEM_PORT_MAX-MV_GEM_PORT_MIN+1)
+#define MV_GEM_PORT_MASK 0x0FFF
+
+/******************************************************************************
+ * Queue Index Definition
+ ******************************************************************************/
+#define MV_QUEUE_MIN  0
+#define MV_QUEUE_MAX  7
+#define MV_QUEUE_NUM  (MV_QUEUE_MAX-MV_QUEUE_MIN+1)
+
+enum MV_QUEUE_E {
+	MV_QUEUE_0 = 0,
+	MV_QUEUE_1,
+	MV_QUEUE_2,
+	MV_QUEUE_3,
+	MV_QUEUE_4,
+	MV_QUEUE_5,
+	MV_QUEUE_6,
+	MV_QUEUE_7
+};
+
+/******************************************************************************
+* VLAN ID/P-bits Index Definition
+******************************************************************************/
+#define MV_VLAN_ID_MIN  0
+#define MV_VLAN_ID_MAX 4095
+#define MV_VLAN_ID_NUM (MV_VLAN_ID_MAX-MV_VLAN_ID_MIN+1)
+#define MV_VLAN_ID_MASK 0x0FFF
+#define MV_VLAN_ID_INVALID_VALUE 0xFFFF
+
+#define MV_PBITS_MIN   0
+#define MV_PBITS_MAX   7
+#define MV_PBITS_NUM  (MV_PBITS_MAX-MV_PBITS_MIN+1)
+#define MV_PBITS_SHIFT 13
+#define MV_PBITS_MASK 0x07
+#define MV_PBITS_INVALID_VALUE 0xFF
+
+
+/******************************************************************************
+* WAN Port State Definition
+******************************************************************************/
+enum MV_GE_PORT_STATE_E {
+	MV_GE_PORT_INACTIVE = 0,
+	MV_GE_PORT_ACTIVE,
+	MV_GE_PORT_INVALID,
+};
+
+/* Enum for well known TPID
+------------------------------------------------------------------------------*/
+#define MV_TPID_8100 0x8100
+#define MV_TPID_88A8 0x88A8
+#define MV_TPID_9100 0x9100
+#define MV_CPH_TPID_LEN      (2)
+#define MV_CPH_VLAN_TAG_LEN  (2)
+#define MV_CPH_ETH_TYPE_LEN  (2)
+#define MV_IPV4_PROTO_OFFSET (9)
+#define MV_ICMPV6_TYPE_MLD   (1)
+#define MV_CPH_ETH_TYPE_IPV4 (0x0800)
+#define MV_CPH_ETH_TYPE_IPV6 (0x86DD)
+
+
+/******************************************************************************
+* TPM Source port - used to specify through which port the packet entered the processor
+******************************************************************************/
+enum tpm_src_port_type_t {
+	TPM_SRC_PORT_UNI_0,        /* upstream */
+	TPM_SRC_PORT_UNI_1,        /* upstream */
+	TPM_SRC_PORT_UNI_2,        /* upstream */
+	TPM_SRC_PORT_UNI_3,        /* upstream */
+	TPM_SRC_PORT_UNI_4,        /* upstream */
+	TPM_SRC_PORT_UNI_5,        /* upstream */
+	TPM_SRC_PORT_UNI_6,        /* upstream */
+	TPM_SRC_PORT_UNI_7,        /* upstream */
+	TPM_SRC_PORT_UNI_VIRT,     /* upstream */
+	TPM_SRC_PORT_WAN,          /* downstram */
+	TPM_SRC_PORT_UNI_ANY,      /* upstream - all UNI ports */
+	TPM_SRC_PORT_WAN_OR_LAN,   /* Any Port, currently not supported */
+	TPM_SRC_PORT_ILLEGAL = 0xFF
+};
+
+/******************************************************************************
+* Check API return value
+******************************************************************************/
+
+#define INT_TO_MV_BOOL(var)    ((var) ? TRUE : FALSE)
+#define MV_BOOL_TO_INT(var)    (((var) == TRUE) ? 1 : 0)
+
+#define CHECK_API_RETURN_AND_LOG_ERROR(ret, log)\
+{\
+	if (ret != 0) {\
+		pr_err("%s(%d) error:%s , ret(%d)\n", __func__, __LINE__, (log), ret);\
+		return ret;\
+	} \
+} \
+
+#define CPH_IF_NULL(ptr)\
+{\
+	if (ptr == NULL) {\
+		pr_err("%s(%d) NULL pointer\n", __func__, __LINE__);\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_IF_ERROR(rc, format, ...)\
+{\
+	if (rc) {\
+		pr_err("(error) %s(%d)(rc=%d):  "format , __func__, __LINE__, rc, ##__VA_ARGS__);\
+	return rc;\
+	} \
+}
+
+#define CPH_IF_EQUAL(var, expect, format, ...)\
+{\
+	if (var == expect) {\
+		pr_err("%s(%d) (var=%d): "format , __func__, __LINE__, var, ##__VA_ARGS__);\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_IF_NOT_EQUAL(var, expect, format, ...)\
+{\
+	if (var != expect) {\
+		pr_err("%s(%d) (var!=%d): "format , __func__, __LINE__, var, ##__VA_ARGS__);\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_IF_NULL_NO_RET(ptr)\
+{\
+	if (ptr == NULL) {\
+		pr_err("%s(%d) NULL pointer\n", __func__, __LINE__);\
+		return;\
+	} \
+}
+
+#define CPH_DECIMAL_RANGE_VALIDATE(value, min, max) {\
+	if (((value) > (max)) || ((value) < (min))) {\
+		pr_err("(error) %s(%d) value (%d/0x%x) is out of range[%d, %d]\n",\
+		    __func__, __LINE__, (value), (value), (min), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_HEX_RANGE_VALIDATE(value, min, max) {\
+	if (((value) > (max)) || ((value) < (min))) {\
+		pr_err("(error) %s(%d) value(%d/0x%x) is out of range[0x%X, 0x%X]\n",\
+		    __func__, __LINE__, (value), (value), (min), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_DECIMAL_RANGE_VALIDATE_STR(value, min, max, str) {\
+	if (((value) > (max)) || ((value) < (min))) {\
+		pr_err("(error) %s(%d) %s(%d/0x%x), out of range[%d, %d]\n",\
+		    __func__, __LINE__, (str), (value), (value), (min), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_HEX_RANGE_VALIDATE_STR(value, min, max, str) {\
+	if (((value) > (max)) || ((value) < (min))) {\
+		pr_err("(error) %s(%d) %s(%d/0x%x), out of range[0x%X, 0x%X]\n",\
+		    __func__ , __LINE__, (str), (value), (value), (min), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_BIT_RANGE_VALIDATE(value, str) {\
+	if (((value) != (0)) || ((value) != (1))) {\
+		pr_err("(error) %s(%d) %s(%d/0x%x), out of range[%d, %d]\n",\
+		    __func__, __LINE__, (str), (value), (value), (0), (1));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_POS_RANGE_VALIDATE(value, max, str) {\
+	if ((value) > (max)) {\
+		pr_err("(error) %s(%d) %s(%d/0x%x), out of range[%d, %d]\n",\
+		    __func__, __LINE__, (str), (value), (value), (0), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+
+#define CPH_POS_HEX_RANGE_VALIDATE(value, max, str) {\
+	if ((value) > (max)) {\
+		pr_err("(error) %s(%d) %s(%d/0x%x), out of range[0x%X, 0x%X]\n",\
+		    __func__, __LINE__, (str), (value), (value), (0), (max));\
+		return MV_BAD_VALUE;\
+	} \
+}
+#define CPH_TBL_ENTRY_NUM(a) (sizeof(a)/sizeof(a[0]))
+
+/******************************************************************************
+* Enum for display
+******************************************************************************/
+struct MV_ENUM_ENTRY_T {
+	int  enum_value;
+	char  *enum_str;
+};
+
+struct MV_ENUM_ARRAY_T {
+	int            enum_num;
+	struct MV_ENUM_ENTRY_T *enum_array;
+};
+
+/******************************************************************************
+* Function Declaration
+******************************************************************************/
+/******************************************************************************
+* mindex_tpm_src_to_app_port()
+*
+* DESCRIPTION:Convert TPM source port to application UNI port
+*
+* INPUTS:
+*       src_port    - TPM source port
+*
+* OUTPUTS:
+*       Application UNI port index
+*
+* RETURNS:
+*       On success, the function returns application UNI port index.
+*       On error return invalid application UNI port index.
+*******************************************************************************/
+enum MV_APP_ETH_PORT_UNI_E mindex_tpm_src_to_app_port(enum tpm_src_port_type_t src_port);
+
+/******************************************************************************
+* mindex_mh_to_app_llid()
+*
+* DESCRIPTION:Convert Marvell header to application LLID
+*
+* INPUTS:
+*       mh  - Marvell header
+*
+* OUTPUTS:
+*       Application LLID
+*
+* RETURNS:
+*       On success, the function returns application LLID.
+*       On error return invalid application LLID.
+*******************************************************************************/
+enum MV_TCONT_LLID_E mindex_mh_to_app_llid(unsigned short mh);
+
+/******************************************************************************
+* mtype_get_digit_num()
+*
+* DESCRIPTION:Convert character string to digital number
+*
+* INPUTS:
+*       str   - Character string
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Digital numbe
+*******************************************************************************/
+unsigned int mtype_get_digit_num(const char  *str);
+
+/******************************************************************************
+* mtype_lookup_enum_str()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:lookup enum string according to enum value
+*
+* INPUTS:
+*       p_enum_array   - Pointer to enum array
+*       enum_value     - The enum value to be matched
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Enum string
+*******************************************************************************/
+char *mtype_lookup_enum_str(struct MV_ENUM_ARRAY_T *p_enum_array, int enum_value);
+
+/******************************************************************************
+* mutils_is_frwd_broadcast_packet()
+* _____________________________________________________________________________
+*
+* DESCRIPTION:Check whether packet is directly forwarded broadcast one
+*
+* INPUTS:
+*       data   - packet data
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       TRUE: broadcast packet, FALSE:none broadcast packet
+*******************************************************************************/
+bool mutils_is_frwd_broadcast_packet(char *data);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_INFRA_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mng_if.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mng_if.h
new file mode 100644
index 000000000000..157dcd537c12
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mng_if.h
@@ -0,0 +1,162 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_mng_if.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) management interface definition
+*              for ioctl
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 11Dec2011
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_MNG_IF_H_
+#define _MV_CPH_MNG_IF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Include Files
+------------------------------------------------------------------------------*/
+#include <linux/cdev.h>
+
+/* Definitions
+------------------------------------------------------------------------------*/
+#define MV_CPH_IOCTL_SET_COMPLEX_PROFILE      _IOW(MV_CPH_IOCTL_MAGIC,  1,  unsigned int)
+#define MV_CPH_IOCTL_SET_FEATURE_FLAG         _IOW(MV_CPH_IOCTL_MAGIC,  2,  unsigned int)
+#define MV_CPH_IOCTL_APP_ADD_RULE             _IOW(MV_CPH_IOCTL_MAGIC,  3,  unsigned int)
+#define MV_CPH_IOCTL_APP_DEL_RULE             _IOW(MV_CPH_IOCTL_MAGIC,  4,  unsigned int)
+#define MV_CPH_IOCTL_APP_UPDATE_RULE          _IOW(MV_CPH_IOCTL_MAGIC,  5,  unsigned int)
+#define MV_CPH_IOCTL_APP_GET_RULE             _IOR(MV_CPH_IOCTL_MAGIC,  6,  unsigned int)
+#define MV_CPH_IOCTL_FLOW_ADD_RULE            _IOW(MV_CPH_IOCTL_MAGIC,  7,  unsigned int)
+#define MV_CPH_IOCTL_FLOW_DEL_RULE            _IOW(MV_CPH_IOCTL_MAGIC,  8,  unsigned int)
+#define MV_CPH_IOCTL_FLOW_GET_RULE            _IOR(MV_CPH_IOCTL_MAGIC,  9,  unsigned int)
+#define MV_CPH_IOCTL_FLOW_CLEAR_RULE          _IOW(MV_CPH_IOCTL_MAGIC,  10, unsigned int)
+#define MV_CPH_IOCTL_FLOW_CLEAR_RULE_BY_MH    _IOW(MV_CPH_IOCTL_MAGIC,  11, unsigned int)
+#define MV_CPH_IOCTL_FLOW_SET_DSCP_MAP        _IOW(MV_CPH_IOCTL_MAGIC,  12, unsigned int)
+#define MV_CPH_IOCTL_FLOW_DEL_DSCP_MAP        _IOW(MV_CPH_IOCTL_MAGIC,  13, unsigned int)
+#define MV_CPH_IOCTL_SET_TCONT_LLID_STATE     _IOW(MV_CPH_IOCTL_MAGIC,  14, unsigned int)
+#define MV_CPH_IOCTL_SETUP                    _IOW(MV_CPH_IOCTL_MAGIC,  15, unsigned int)
+
+/* Typedefs
+------------------------------------------------------------------------------*/
+struct CPH_IOCTL_APP_RULE_T {
+	enum CPH_APP_PARSE_FIELD_E parse_bm;
+	struct CPH_APP_PARSE_T       parse_key;
+	enum CPH_APP_MOD_FIELD_E   mod_bm;
+	struct CPH_APP_MOD_T         mod_value;
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm;
+	struct CPH_APP_FRWD_T        frwd_value;
+};
+
+struct CPH_IOCTL_FLOW_MAP_T {
+	struct CPH_FLOW_ENTRY_T flow_map;
+};
+
+struct CPH_IOCTL_DSCP_MAP_T {
+	struct CPH_DSCP_PBITS_T dscp_map;
+};
+
+struct CPH_IOCTL_MISC_T {
+	enum tpm_eth_complex_profile_t profile_id;
+	enum MV_APP_GMAC_PORT_E        active_port;
+	enum CPH_APP_FEATURE_E         feature_type;
+	bool                      feature_flag;
+};
+
+struct CPH_IOCTL_TCONT_STATE_T {
+	unsigned int  tcont;
+	bool    state;
+};
+
+/* MV CPH Char Device Structure */
+struct CPH_CDEV_T {
+	struct CPH_IOCTL_APP_RULE_T    cph_ioctl_app_rule;
+	struct CPH_IOCTL_FLOW_MAP_T    cph_ioctl_flow_map;
+	struct CPH_IOCTL_DSCP_MAP_T    cph_ioctl_dscp_map;
+	struct CPH_IOCTL_MISC_T        cph_ioctl_misc;
+	struct CPH_IOCTL_TCONT_STATE_T cph_ioctl_tcont;
+
+	struct cdev             cdev;
+};
+
+/* Global variables
+------------------------------------------------------------------------------*/
+
+/* Global functions
+------------------------------------------------------------------------------*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_MNG_IF_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mod.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mod.c
new file mode 100644
index 000000000000..6150da7ca006
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_mod.c
@@ -0,0 +1,142 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_mod.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) module definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "mv_cph_header.h"
+
+#define CPH_MODULE_VERSION  "22-Jan-2013"
+#define CPH_MODULE_DESC     "Marvell CPU Packet Handler Module"
+
+/******************************************************************************
+* cph_mod_exit()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Exit from CPH module
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+static void __exit cph_mod_exit(void)
+{
+	cph_dev_shutdown();
+}
+
+/******************************************************************************
+* cph_mod_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH module
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+static int __init cph_mod_init(void)
+{
+	if (cph_dev_init() != 0) {
+		pr_err("\nCPH module initialization failed\n\n");
+		return MV_ERROR;
+	}
+
+	/* pr_info("\nCPH module inserted - %s\n\n", CPH_MODULE_VERSION); */
+
+	return MV_OK;
+}
+
+device_initcall_sync(cph_mod_init);
+
+module_exit(cph_mod_exit);
+
+MODULE_AUTHOR("Victor Gu");
+MODULE_VERSION(CPH_MODULE_VERSION);
+MODULE_DESCRIPTION(CPH_MODULE_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.c
new file mode 100644
index 000000000000..7e644b0bb515
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.c
@@ -0,0 +1,865 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_netdev.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) network device part definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/icmpv6.h>
+
+#include "mvDebug.h"
+#include "mv_cph_header.h"
+
+
+/******************************************************************************
+*                           Global Definition
+******************************************************************************/
+#define MV_MUX_SKB_TAG_VAL		(0xabcd)
+
+/* Total Eth port number */
+static int gs_mv_eth_port_num;
+
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+static struct mv_port_tx_spec    udp_port_spec_cfg[MV_APP_ETH_PORTS_NUM];
+#define PORT_ENTRIES        CPH_TBL_ENTRY_NUM(udp_port_spec_cfg)
+#endif
+
+/******************************************************************************
+*                           External Declarations
+******************************************************************************/
+
+/******************************************************************************
+*                           Function Definitions
+******************************************************************************/
+/******************************************************************************
+* cph_rec_skb()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Send SKB packet to linux network and increse counter
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       skb     - SKB buffer to receive packet
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+void cph_rec_skb(int port, struct sk_buff *skb)
+{
+	unsigned int rx_status    = 0;
+	struct eth_port *pp = NULL;
+
+	rx_status = netif_receive_skb(skb);
+	pp        = mv_pp2_port_by_id(port);
+	if (rx_status)
+		STAT_DBG(pp->stats.rx_drop_sw++);
+}
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+static inline void cph_copy_tx_spec(struct mv_pp2_tx_spec *tx_spec,
+					uint8_t txp, uint8_t txq,
+					uint16_t flags, uint32_t hw_cmd)
+{
+	tx_spec->txp = txp;
+	tx_spec->txq = txq;
+	tx_spec->hw_cmd[0] = hw_cmd;
+	tx_spec->flags = flags;
+}
+
+int cph_udp_spec_print(int port)
+{
+	int i;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct mv_udp_port_tx_spec *udp_spec;
+
+	if (!pp)
+		return -ENODEV;
+
+	udp_spec = &(udp_port_spec_cfg[port].udp_dst[0]);
+
+	pr_err("\n**** port #%d - TX UDP Dest Port configuration *****\n", port);
+	pr_err("----------------------------------------------------\n");
+	pr_err("ID udp_dst   txp    txq    flags    hw_cmd     func_add\n");
+	for (i = 0; i < sizeof(udp_port_spec_cfg[port].udp_dst)/sizeof(udp_port_spec_cfg[port].udp_dst[0]); i++) {
+		if (udp_spec[i].tx_spec.txq != MV_ETH_TXQ_INVALID)
+			pr_err("%2d   %04d      %d      %d     0x%04lx   0x%08x   0x%p\n",
+			       i, ntohs(udp_spec[i].udp_port),
+			       udp_spec[i].tx_spec.txp, udp_spec[i].tx_spec.txq,
+			       udp_spec[i].tx_spec.flags, udp_spec[i].tx_spec.hw_cmd[0],
+			       udp_spec[i].tx_spec.tx_func);
+	}
+	pr_err("-----------------------------------------------------\n");
+
+	udp_spec = &(udp_port_spec_cfg[port].udp_src[0]);
+
+	pr_err("**** port #%d - TX UDP Source Port configuration *****\n", port);
+	pr_err("-----------------------------------------------------\n");
+	pr_err("ID udp_src   txp    txq     flags    hw_cmd     func_add\n");
+	for (i = 0; i < sizeof(udp_port_spec_cfg[port].udp_src)/sizeof(udp_port_spec_cfg[port].udp_src[0]); i++) {
+		if (udp_spec[i].tx_spec.txq != MV_ETH_TXQ_INVALID)
+			pr_err("%2d   %04d      %d      %d     0x%04lx   0x%08x   0x%p\n",
+			       i, ntohs(udp_spec[i].udp_port),
+			       udp_spec[i].tx_spec.txp, udp_spec[i].tx_spec.txq,
+			       udp_spec[i].tx_spec.flags, udp_spec[i].tx_spec.hw_cmd[0],
+			       udp_spec[i].tx_spec.tx_func);
+	}
+	pr_err("**************************************************************\n");
+
+	return 0;
+}
+
+
+void cph_udp_spec_print_all(void)
+{
+	int port;
+
+	for (port = 0; port < MV_APP_ETH_PORTS_NUM; port++)
+		cph_udp_spec_print(port);
+}
+
+MV_STATUS  cph_udp_int_spec_set(struct mv_udp_port_tx_spec *udp_spec, uint16_t udp_port, int table_size,
+				uint8_t txp, uint8_t txq, uint16_t flags, uint32_t hw_cmd)
+{
+	int i;
+
+	/* Check if already exists */
+	for (i = 0; i < table_size; i++) {
+		if (udp_spec[i].udp_port == htons(udp_port) &&
+		    udp_spec[i].tx_spec.txq != MV_ETH_TXQ_INVALID) {
+			cph_copy_tx_spec(&(udp_spec[i].tx_spec), txp, txq, flags, hw_cmd);
+			return MV_OK;
+		}
+	}
+	/* Check empty */
+	for (i = 0; i < table_size; i++) {
+		if (udp_spec[i].tx_spec.txq == MV_ETH_TXQ_INVALID) {
+			udp_spec[i].udp_port = htons(udp_port);
+			cph_copy_tx_spec(&(udp_spec[i].tx_spec), txp, txq, flags, hw_cmd);
+			return MV_OK;
+		}
+	}
+
+	return MV_FULL;
+}
+
+
+MV_STATUS  cph_udp_src_spec_set(int tx_port, uint16_t udp_src_port, uint8_t txp,
+	uint8_t txq, uint16_t flags, uint32_t hw_cmd)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(tx_port);
+	struct mv_udp_port_tx_spec *udp_src_spec = udp_port_spec_cfg[tx_port].udp_src;
+	MV_STATUS mv_status;
+
+	if (!pp)
+		return -ENODEV;
+
+	mv_status = cph_udp_int_spec_set(udp_src_spec, udp_src_port,
+					MV_CPH_MAS_UDP_SRC_PORT,
+					txp, txq, flags, hw_cmd);
+
+	if (mv_status != MV_OK)
+		pr_err("%s: UDP Special Source Port Table is full\n", __func__);
+
+	return mv_status;
+}
+EXPORT_SYMBOL(cph_udp_src_spec_set);
+
+
+MV_STATUS  cph_udp_dest_spec_set(int tx_port, uint16_t udp_dest_port, uint8_t txp,
+	uint8_t txq, uint16_t flags, uint32_t hw_cmd)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(tx_port);
+	struct mv_udp_port_tx_spec *udp_dst_spec = udp_port_spec_cfg[tx_port].udp_dst;
+	MV_STATUS mv_status;
+
+	if (!pp)
+		return -ENODEV;
+
+	mv_status = cph_udp_int_spec_set(udp_dst_spec, udp_dest_port,
+					MV_CPH_MAS_UDP_DST_PORT,
+					txp, txq, flags, hw_cmd);
+
+	if (mv_status != MV_OK)
+		pr_err("%s: UDP Special Dest. Port Table is full\n", __func__);
+
+	return mv_status;
+}
+EXPORT_SYMBOL(cph_udp_dest_spec_set);
+
+
+void cph_udp_table_init(void)
+{
+	int num_ports = PORT_ENTRIES;
+	int tx_port, i;
+
+	if (num_ports > gs_mv_eth_port_num)
+		num_ports = gs_mv_eth_port_num;
+
+	for (tx_port = 0; tx_port < num_ports; tx_port++) {
+		/* Invalidate UDP Dest ports, set txq=invalid  */
+		for (i = 0; i < MV_CPH_MAS_UDP_DST_PORT; i++) {
+			memset(&(udp_port_spec_cfg[tx_port].udp_dst[i]), 0, sizeof(struct mv_udp_port_tx_spec));
+			udp_port_spec_cfg[tx_port].udp_dst[i].tx_spec.txq = MV_ETH_TXQ_INVALID;
+		}
+
+		/* Invalidate UDP Source ports, , set txq=invalid */
+		for (i = 0; i < MV_CPH_MAS_UDP_SRC_PORT; i++) {
+			memset(&(udp_port_spec_cfg[tx_port].udp_src[i]), 0, sizeof(struct mv_udp_port_tx_spec));
+			udp_port_spec_cfg[tx_port].udp_src[i].tx_spec.txq = MV_ETH_TXQ_INVALID;
+		}
+
+	}
+	return;
+}
+
+int cph_udp_port_tx(int port, struct net_device *dev, struct sk_buff *skb,
+		struct mv_pp2_tx_spec *tx_spec_out)
+{
+	struct iphdr  *iphdrp   = NULL;
+	struct udphdr *udphdrp  = NULL;
+	int i;
+
+	if (port >= MV_APP_ETH_PORTS_NUM) {
+		pr_err("Port Error\n");
+		return 0;
+	}
+
+	if (skb->protocol == MV_CPH_ETH_TYPE_IPV4) {
+		/* Get UDP Port */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
+		iphdrp = skb->nh.iph;
+#else
+		iphdrp = ip_hdr(skb);
+#endif
+
+	if ((iphdrp) && (iphdrp->protocol == IPPROTO_UDP)) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
+		udphdrp = skb->h.uh;
+#else
+		udphdrp = udp_hdr(skb);
+#endif
+			if (udphdrp) {
+				if (udphdrp == (struct udphdr *)iphdrp)
+					udphdrp = (struct udphdr *)((char *)udphdrp + (4*(iphdrp->ihl)));
+				/* Find configured UDP Source Port*/
+				for (i = 0; i < MV_CPH_MAS_UDP_SRC_PORT; i++) {
+					if ((udphdrp->source == udp_port_spec_cfg[port].udp_src[i].udp_port) &&
+					(udp_port_spec_cfg[port].udp_src[i].tx_spec.txq != MV_ETH_TXQ_INVALID)) {
+						memcpy(tx_spec_out, &(udp_port_spec_cfg[port].udp_src[i].tx_spec),
+							sizeof(struct mv_pp2_tx_spec));
+						MV_CPH_PRINT(CPH_DEBUG_LEVEL, "found udp_src 0x(%04x)\n",
+							ntohs(udphdrp->source));
+						return 1;
+					}
+				}
+				/* Find configured UDP Dest. Port*/
+				for (i = 0; i < MV_CPH_MAS_UDP_DST_PORT; i++) {
+					if ((udphdrp->dest == udp_port_spec_cfg[port].udp_dst[i].udp_port) &&
+					(udp_port_spec_cfg[port].udp_src[i].tx_spec.txq != MV_ETH_TXQ_INVALID)) {
+						memcpy(tx_spec_out, &(udp_port_spec_cfg[port].udp_dst[i].tx_spec),
+							sizeof(struct mv_pp2_tx_spec));
+						MV_CPH_PRINT(CPH_DEBUG_LEVEL, "found udp_dst 0x(%04x)\n",
+							ntohs(udphdrp->dest));
+						return 1;
+					}
+				}
+
+				if (port == MV_CPH_PON_PORT_IDX) {
+					MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Packet UDP, udp source or dest port not found");
+					MV_CPH_PRINT(CPH_DEBUG_LEVEL, " udp_src(%x)x udp_dst(%x)x\n",
+						ntohs(udphdrp->source), ntohs(udphdrp->dest));
+				}
+			}
+		} else if (port == MV_CPH_PON_PORT_IDX)
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "NOT UDP, ip_proto(%d)\n", iphdrp->protocol);
+	} else if (port == MV_CPH_PON_PORT_IDX)
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "NOT IP, proto(%d)\n", skb->protocol);
+
+	return 0;
+}
+#endif
+
+
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+
+/******************************************************************************
+* cph_data_flow_rx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received application packets
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       dev     - Net device
+*       skb     - SKB buffer to receive packet
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+static int cph_data_flow_rx(int port, struct net_device *dev, struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	struct CPH_FLOW_ENTRY_T flow_rule;
+	int            offset = 0;
+	bool             state  = FALSE;
+	MV_STATUS        rc     = MV_OK;
+
+	if (MV_CPH_PON_PORT_IDX != port)
+		return 0;
+
+	cph_db_get_param(CPH_DB_PARAM_FLOW_SUPPORT, &state);
+
+	if (state == TRUE) {
+		/* Parse packets */
+		rc = cph_flow_parse_packet(port, skb->data, TRUE, TRUE, &flow_rule);
+		if (rc != MV_OK) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "%s():fail to call cph_flow_parse_packet, rc(%d)\n", __func__, rc);
+			return 0;
+		}
+
+		/* Get None default CPH data flow rule at first */
+		flow_rule.is_default = FALSE;
+		rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+		if (rc != MV_OK) {
+			/* Get default CPH data flow rule secondly */
+			flow_rule.is_default = TRUE;
+			rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+			/* Do nothing */
+			if (rc != MV_OK)
+				return 0;
+		}
+
+		/* modify packet */
+		rc = cph_flow_mod_packet(skb, TRUE, &flow_rule, &offset);
+		if (rc != MV_OK) {
+			MV_CPH_PRINT(CPH_ERR_LEVEL, "%s():fail to call cph_flow_mod_rx_packet, rc(%d)\n", __func__, rc);
+			return 0;
+		}
+
+		return 1;
+	}
+
+	return 0;
+}
+#endif
+
+/******************************************************************************
+* cph_data_flow_tx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the trasmiting application packets
+*
+* INPUTS:
+*       port        - Gmac port the packet from
+*       dev         - Net device
+*       skb         - SKB buffer to receive packet
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+int cph_data_flow_tx(int port, struct net_device *dev, struct sk_buff *skb,
+			bool mh, struct mv_pp2_tx_spec *tx_spec_out)
+{
+	struct CPH_FLOW_ENTRY_T flow_rule;
+	int            offset = 0;
+	bool             l_mh;
+	bool             state  = FALSE;
+	MV_STATUS        rc     = MV_OK;
+
+	if (MV_CPH_PON_PORT_IDX != port)
+		return 0;
+
+	cph_db_get_param(CPH_DB_PARAM_FLOW_SUPPORT, &state);
+
+	if (state != TRUE)
+		return 0;
+
+	/* Decide whether need to handle Marvell header */
+	l_mh = mh;
+	if (TRUE == mutils_is_frwd_broadcast_packet(skb->data))
+		l_mh = TRUE;
+
+	/* Parse packets */
+	rc = cph_flow_parse_packet(port, skb->data, FALSE, l_mh, &flow_rule);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s():fail to call cph_flow_parse_packet, rc<%d>\n", __func__, rc);
+		return 0;
+	}
+
+	/* Get CPH data flow rule */
+	flow_rule.is_default = FALSE;
+	rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+	if (rc != MV_OK) {
+		/* Handle multicat packets as unicast one */
+		if (flow_rule.parse_bm & CPH_FLOW_PARSE_MC_PROTO) {
+			flow_rule.parse_bm &= ~CPH_FLOW_PARSE_MC_PROTO;
+			rc = cph_flow_db_get_rule_by_vid(&flow_rule);
+			if (rc != MV_OK) {
+				rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+
+				if (rc != MV_OK) {
+					flow_rule.is_default = TRUE;
+					flow_rule.parse_outer_tci.vid   = MV_CPH_VID_NOT_CARE_VALUE;
+					flow_rule.parse_outer_tci.pbits = MV_CPH_PBITS_NOT_CARE_VALUE;
+					rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+					if (rc != MV_OK) {
+						MV_CPH_PRINT(CPH_DEBUG_LEVEL,
+							"%s():fail to call cph_flow_get_rule, rc<%d>\n",
+							__func__, rc);
+						return 0;
+					}
+				}
+			}
+		} else {
+			flow_rule.is_default = TRUE;
+			flow_rule.parse_outer_tci.vid   = MV_CPH_VID_NOT_CARE_VALUE;
+			flow_rule.parse_outer_tci.pbits = MV_CPH_PBITS_NOT_CARE_VALUE;
+			rc = cph_flow_db_get_rule(&flow_rule, MV_TRUE);
+			if (rc != MV_OK) {
+				MV_CPH_PRINT(CPH_DEBUG_LEVEL, "%s():fail to call cph_flow_get_rule, rc<%d>\n",
+					__func__, rc);
+				return 0;
+			}
+		}
+	}
+
+	/* modify packet */
+	rc = cph_flow_mod_packet(skb, l_mh, &flow_rule, &offset);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s():fail to call cph_flow_mod_rx_packet, rc<%d>\n", __func__, rc);
+		return 0;
+	}
+
+	/* Strip TX MH */
+	if (TRUE == l_mh) {
+		skb->data  += MV_ETH_MH_SIZE;
+		skb->len   -= MV_ETH_MH_SIZE;
+	}
+
+	/* modify packet */
+	rc = cph_flow_mod_frwd(&flow_rule, tx_spec_out);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "%s():fail to call cph_flow_mod_frwd, rc<%d>\n", __func__, rc);
+		return 0;
+	}
+	return 1;
+}
+
+/******************************************************************************
+* cph_app_packet_rx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received application packets
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       dev     - Net device
+*       skb     - SKB buffer to receive packet
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+static int cph_app_packet_rx(int port, struct net_device *dev, struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	enum CPH_APP_PARSE_FIELD_E parse_bm = 0;
+	struct CPH_APP_PARSE_T       parse_key;
+	int                 rc       = MV_OK;
+	enum CPH_APP_MOD_FIELD_E   mod_bm   = 0;
+	struct CPH_APP_MOD_T         mod_value;
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm  = 0;
+	struct CPH_APP_FRWD_T        frwd_value;
+	bool                  state    = FALSE;
+
+	/* Check whether need to handle generic application packet */
+	cph_db_get_param(CPH_DB_PARAM_APP_SUPPORT, &state);
+	if (state == FALSE)
+		return 0;
+
+	memset(&parse_key,  0, sizeof(parse_key));
+	memset(&mod_value,  0, sizeof(mod_value));
+	memset(&frwd_value, 0, sizeof(frwd_value));
+
+	/* Parse application packet */
+	rc = cph_app_parse_packet(port, skb->data, &parse_bm, &parse_key);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_parse_packet, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Get CPH application rule */
+	rc = cph_app_get_rule(parse_bm, &parse_key, &mod_bm, &mod_value, &frwd_bm, &frwd_value);
+	if (rc != MV_OK) {
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Fail to call cph_app_get_rule, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Increase counter */
+	rc = cph_app_increase_counter(parse_bm, &parse_key);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_increase_counter, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Apply modification */
+	rc = cph_app_mod_rx_packet(port, dev, skb, rx_desc, mod_bm, &mod_value);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_mod_rx_packet, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Send to Linux Network Stack */
+	cph_rec_skb(port, skb);
+
+	return 1;
+}
+
+/******************************************************************************
+* cph_app_packet_tx()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the trasmiting application packets
+*
+* INPUTS:
+*       port        - Gmac port the packet from
+*       dev         - Net device
+*       skb         - SKB buffer to receive packet
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns 1.
+*       On error returns 0.
+*******************************************************************************/
+int cph_app_packet_tx(int port, struct net_device *dev, struct sk_buff *skb,
+			struct mv_pp2_tx_spec *tx_spec_out)
+{
+	enum CPH_DIR_E             dir;
+	unsigned short                proto_type = 0;
+	enum CPH_APP_PARSE_FIELD_E parse_bm   = 0;
+	struct CPH_APP_PARSE_T       parse_key;
+	enum CPH_APP_MOD_FIELD_E   mod_bm     = 0;
+	struct CPH_APP_MOD_T         mod_value;
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm    = 0;
+	struct CPH_APP_FRWD_T        frwd_value;
+	int                 rc         = MV_OK;
+	bool                  state      = FALSE;
+
+	/* Check whether need to handle generic application packet */
+	cph_db_get_param(CPH_DB_PARAM_APP_SUPPORT, &state);
+	if (state == FALSE)
+		return 0;
+
+	memset(&parse_key,  0, sizeof(parse_key));
+	memset(&mod_value,  0, sizeof(mod_value));
+	memset(&frwd_value, 0, sizeof(frwd_value));
+	tx_spec_out->tx_func = NULL;
+	tx_spec_out->flags   = 0;
+
+	/* Get direction of packet */
+	dir = cph_app_parse_dir(port, FALSE);
+	if (dir == CPH_DIR_INVALID) {
+		MV_CPH_PRINT(CPH_ERR_LEVEL, "dir[%d] is invalid\n", dir);
+		return 0;
+	}
+
+	/* Get the protocol type application packet */
+	proto_type = skb->protocol;
+
+	/* Get CPH application rule by protocol type */
+	rc = cph_app_get_rule_by_dir_proto(dir, proto_type, &parse_bm,
+					&parse_key, &mod_bm, &mod_value, &frwd_bm, &frwd_value);
+	if (rc != MV_OK)
+		return 0;
+
+	/* Increase counter */
+	rc = cph_app_increase_counter_by_dir_proto(dir, proto_type);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_increase_counter_by_dir_proto, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Sepcial handling for IGMP and MLD */
+	if (((parse_bm & CPH_APP_PARSE_FIELD_IPV4_TYPE) &&
+		(parse_key.ipv4_type == MV_IP_PROTO_IGMP))  ||
+		((parse_bm & CPH_APP_PARSE_FIELD_ICMPV6_TYPE) &&
+		(parse_key.icmpv6_type == MV_ICMPV6_TYPE_MLD))) {
+		/* only handle U/S multicast protocol packet in U/S */
+		if (dir == CPH_DIR_US) {
+			if (cph_data_flow_tx(port, dev, skb, TRUE, tx_spec_out))
+				return 1;
+		}
+	}
+
+	/* Apply modification and set forwarding information */
+	rc = cph_app_mod_tx_packet(skb, tx_spec_out, mod_bm, &mod_value);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_mod_tx_packet, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* Set forwarding information */
+	rc = cph_app_set_frwd(skb, tx_spec_out, frwd_bm, &frwd_value);
+	if (rc != MV_OK) {
+		pr_err("Fail to call cph_app_set_frwd, rc(%d)\n", rc);
+		return 0;
+	}
+
+	/* mark skb as tagged skb */
+	if (dir == CPH_DIR_DS)
+		skb->skb_iif = MV_MUX_SKB_TAG_VAL;
+	return 1;
+}
+
+/******************************************************************************
+* cph_rx_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received special packets
+*              from network driver
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       rxq     - CPU received queue
+*       dev     - Net device
+*       skb     - Marvell packet information
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       1: the packet will be handled and forwarded to linux stack in CPH
+*       0: the packet will not be forwarded to linux stack and mv_pp2_rx() needs to continue to handle it
+*******************************************************************************/
+int cph_rx_func(int port, int rxq, struct net_device *dev,
+		struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	MV_CPH_CLEAN_PRINT(CPH_DEBUG_LEVEL, "\n");
+	MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Enter\n");
+
+	if (dev == NULL || skb == NULL || rx_desc == NULL) {
+		pr_err("%s: NULL Pointer dev(%p) skb(%p) rx_desc(%p)\n",
+			__func__, dev, skb, rx_desc);
+	}
+
+	if (mvPp2IsRxSpecial(rx_desc->parserInfo)) {
+		/* Receive application packets */
+		if (cph_app_packet_rx(port, dev, skb, rx_desc))
+			return 1;
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Failed to receive application packet\n");
+
+		/* Handle the broadcast packet in case it is enabled */
+#ifdef CONFIG_MV_CPH_BC_HANDLE
+		if (cph_app_rx_bc(port, dev, skb, rx_desc))
+			return 1;
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "BC packet failure\n");
+#endif
+		/* deliver to upper layer */
+		MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Deliver to upper layer\n");
+	} else {/* Handle received data flow packets */
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+		if (cph_data_flow_rx(port, dev, skb, rx_desc)) {
+			MV_CPH_PRINT(CPH_DEBUG_LEVEL, "Flow mapping\n");
+			return 0;
+		}
+#endif
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+* cph_tx_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle tranmitting special packets
+*              to network driver
+*
+* INPUTS:
+*       port        - Gmac port the packet from
+*       dev         - Net device
+*       skb         - SKB buffer to receive packet
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+int cph_tx_func(int port, struct net_device *dev, struct sk_buff *skb,
+		struct mv_pp2_tx_spec *tx_spec_out)
+{
+	/* Transmit application packets */
+	if (cph_app_packet_tx(port, dev, skb, tx_spec_out))
+		return 1;
+
+	/* Transmit data flow packets */
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	if (cph_data_flow_tx(port, dev, skb, FALSE, tx_spec_out))
+		return 1;
+#endif
+
+	/* Transmit data flow packets by UDP Source Port or Dest Port */
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	if (cph_udp_port_tx(port, dev, skb, tx_spec_out))
+		return 1;
+#endif
+
+	return 0;
+}
+
+/******************************************************************************
+* cph_netdev_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH network device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_netdev_init(void)
+{
+	unsigned int idx;
+
+	/* Retrieve Eth port number, TODO list */
+	gs_mv_eth_port_num = MV_ETH_MAX_PORTS;
+
+	if (gs_mv_eth_port_num > MV_ETH_MAX_PORTS)
+		gs_mv_eth_port_num = MV_ETH_MAX_PORTS;
+
+	/* Initialize application packet handling */
+	cph_app_init();
+
+	/* Initialize UDP port mapping feature */
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	cph_udp_table_init();
+#endif
+
+	/* Initialize data flow mapping feature */
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	cph_flow_init();
+#endif
+
+	/* Register special receive check function */
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+	for (idx = 0; idx < gs_mv_eth_port_num; idx++)
+		mv_pp2_rx_special_proc_func(idx, cph_rx_func);
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+	/* Register special transmit check function */
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	mv_pp2_tx_special_check_func(MV_PON_LOGIC_PORT_GET(), cph_tx_func);
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+	/* enable all T-CONT by default, whill remove it once callback implmented*/
+	for (idx = 0; idx < MV_TCONT_LLID_NUM; idx++)
+		cph_set_tcont_state(idx, true);
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.h
new file mode 100644
index 000000000000..b503fae9102d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_netdev.h
@@ -0,0 +1,205 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_netdev.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) network device part definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 11Dec2011
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.0
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_NETDEV_H_
+#define _MV_CPH_NETDEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+
+#include <mvCommon.h>
+#include <mvOs.h>
+#include <../net_dev/mv_netdev.h>
+
+
+#define MV_CPH_MAS_UDP_SRC_PORT          8
+#define MV_CPH_MAS_UDP_DST_PORT          8
+#define MV_CPH_NUM_LLID                  8
+#define MV_CPH_PON_PORT_IDX              3
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+struct mv_udp_port_tx_spec {
+	__be16    udp_port;
+	struct mv_pp2_tx_spec tx_spec;
+};
+
+struct mv_port_tx_spec {
+	struct mv_udp_port_tx_spec udp_src[MV_CPH_MAS_UDP_SRC_PORT];
+	struct mv_udp_port_tx_spec udp_dst[MV_CPH_MAS_UDP_DST_PORT];
+};
+
+void cph_udp_spec_print_all(void);
+MV_STATUS  cph_udp_src_spec_set(int tx_port, uint16_t udp_src_port,
+	uint8_t txp, uint8_t txq, uint16_t flags, uint32_t hw_cmd);
+MV_STATUS  cph_udp_dest_spec_set(int tx_port, uint16_t udp_dest_port,
+	uint8_t txp, uint8_t txq, uint16_t flags, uint32_t hw_cmd);
+#endif
+
+/******************************************************************************
+* cph_rec_skb()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Send SKB packet to linux network and increse counter
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       skb     - SKB buffer to receive packet
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+void cph_rec_skb(int port, struct sk_buff *skb);
+
+/******************************************************************************
+* cph_netdev_init()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: Initialize CPH network device
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       On success, the function returns MV_OK.
+*       On error returns error code accordingly.
+*******************************************************************************/
+int cph_netdev_init(void);
+
+/******************************************************************************
+* cph_rx_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle the received special packets
+*              from network driver
+*
+* INPUTS:
+*       port    - Gmac port the packet from
+*       rxq     - CPU received queue
+*       dev     - Net device
+*       skb     - Marvell packet information
+*       rx_desc - RX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       1: the packet will be handled and forwarded to linux stack in CPH
+*       0: the packet will not be forwarded to linux stack and mv_pp2_rx() needs to continue to handle it
+*******************************************************************************/
+int cph_rx_func(int port, int rxq, struct net_device *dev,
+		struct sk_buff *skb, struct pp2_rx_desc *rx_desc);
+
+/******************************************************************************
+* cph_tx_func()
+* _____________________________________________________________________________
+*
+* DESCRIPTION: CPH function to handle tranmitting special packets
+*              to network driver
+*
+* INPUTS:
+*       port        - Gmac port the packet from
+*       dev         - Net device
+*       skb         - SKB buffer to receive packet
+*       tx_spec_out - TX descriptor
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*******************************************************************************/
+int cph_tx_func(int port, struct net_device *dev, struct sk_buff *skb,
+		struct mv_pp2_tx_spec *tx_spec_out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_NETDEV_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.c
new file mode 100644
index 000000000000..d30bd0d0dd4a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.c
@@ -0,0 +1,1194 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_sysfs.c
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) sysfs command definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 22Jan2013
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_cph_header.h"
+
+static ssize_t cph_spec_proc_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "cat  help                                 - show this help\n");
+	o += scnprintf(b+o, s-o, "cat  help_add                             - show additional help for parameters\n");
+	o += scnprintf(b+o, s-o, "cat  show_app_db                          - show all information in application rule data base\n");
+	o += scnprintf(b+o, s-o, "cat  show_parse_name                      - show sysfs parsing rule data base\n");
+	o += scnprintf(b+o, s-o, "cat  show_mod_name                        - show sysfs modification rule data base\n");
+	o += scnprintf(b+o, s-o, "cat  show_frwd_name                       - show sysfs modification rule data base\n");
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	o += scnprintf(b+o, s-o, "cat  udp_ports                            - show special udp source and destination port configuration\n");
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	o += scnprintf(b+o, s-o, "cat  show_flow_rule                       - show flow mapping rules\n");
+	o += scnprintf(b+o, s-o, "cat  clear_flow_rule                      - clear all flow mapping rules\n");
+	o += scnprintf(b+o, s-o, "cat  del_dscp_map                         - delete DSCP to P-bits mapping rules\n");
+#endif
+	o += scnprintf(b+o, s-o, "echo p dir en                             > set_port_func   - enable or disable cph function on physical port\n");
+	o += scnprintf(b+o, s-o, "echo p                                    > get_port_func   - show cph function enabled status on physical port\n");
+	o += scnprintf(b+o, s-o, "	p(dec): physical port | dir(dec): 0:Rx, 1:Tx, 2:both of dir | en(dec): 0:disable, 1:enable\n");
+	o += scnprintf(b+o, s-o, "echo profile_id active_port               > set_complex     - Set TPM complex profile ID and active GMAC port, 0:GMAC0, 1:GMAC1, 2:PON MAC\n");
+	o += scnprintf(b+o, s-o, "echo feature state                        > set_flag        - Set the support of CPH feature: refer to below additional info, state 0:disable, 1:enable\n");
+	o += scnprintf(b+o, s-o, "echo tcont state                          > set_tcont       - Set T-CONT state in CPH, T-CONT 0~7, state, 1:enable, 0:disable\n");
+	o += scnprintf(b+o, s-o, "echo hex                                  > trace_level     - Set cph trace level bitmap. 0x01:debug, 0x02:info, 0x04:warn, 0x08:error\n");
+	o += scnprintf(b+o, s-o, "echo name bm(hex) dir rx(hex) mh(hex) ety(hex) esty ipv4ty nh1 nh2 icmpty > add_parse   - add parsing field, dir 0:U/S, 1:D/S, 2:Not care\n");
+	o += scnprintf(b+o, s-o, "echo name                                 > del_parse       - delete parsing field\n");
+	o += scnprintf(b+o, s-o, "echo name bm(hex) proto_type(hex) state   > add_mod         - add modification field, state 0:diable, 1:enable\n");
+	o += scnprintf(b+o, s-o, "echo name                                 > del_mod         - delete modification field\n");
+	o += scnprintf(b+o, s-o, "echo name bm(hex) trg_port trg_queue gem  > add_frwd        - add forwarding field\n");
+	o += scnprintf(b+o, s-o, "echo name                                 > del_frwd        - delete forwarding field\n");
+	o += scnprintf(b+o, s-o, "echo parse_name mod_name frwd_name        > add_app_rule    - add application rule\n");
+	o += scnprintf(b+o, s-o, "echo parse_name                           > del_app_rule    - delete application rule\n");
+	o += scnprintf(b+o, s-o, "echo parse_name mod_name frwd_name        > update_app_rule - update application rule\n");
+	o += scnprintf(b+o, s-o, "echo parse_name                           > get_app_rule    - get application rule\n");
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	o += scnprintf(b+o, s-o, "echo p udp_src(dec) txp txq flags hw_cmd  > udp_src         - set udp source port special Tx behavior\n");
+	o += scnprintf(b+o, s-o, "echo p udp_dst(dec) txp txq flags hw_cmd  > udp_dst         - set udp destination port special Tx behavior\n");
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	o += scnprintf(b+o, s-o, "---------------------------------------------------------------------------------------------------------------------------------------\n");
+	o += scnprintf(b+o, s-o, "                         |Parse outer    |Parse inner             |Mod outer      |Mod Inner      |Forward\n");
+	o += scnprintf(b+o, s-o, "echo dir default parse_bm mh ety  tpid vid pbits  tpid vid pbits  op_type  tpid vid pbits  tpid vid pbits  port queue hwf_queue gem > add_flow_rule - Add flow rule\n");
+	o += scnprintf(b+o, s-o, "echo dir default parse_bm mh ety  tpid vid pbits  tpid vid pbits  > del_flow_rule   - delete flow mapping rule\n");
+	o += scnprintf(b+o, s-o, "echo dir default parse_bm mh ety  tpid vid pbits  tpid vid pbits  > get_flow_rule   - get flow mapping rule\n");
+	o += scnprintf(b+o, s-o, "echo pbits0 pbits1 ... pbits62 pbits63                    > set_dscp_map    - set DSCP to P-bits mapping rules\n");
+#endif
+	return o;
+}
+
+static ssize_t cph_spec_proc_help_add(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "CPH additional help for parameters\n");
+	o += scnprintf(b+o, s-o, "---------------------------------------------------------------------------------------------------------------------------------------\n");
+	o += scnprintf(b+o, s-o, "[Generic Parameters]\n");
+	o += scnprintf(b+o, s-o, "feature:\n");
+	o += scnprintf(b+o, s-o, "   0:Generic application, 1:IGMP/MLD support, 2:Broadcast support, 3:Data flow mapping support, 4: UDP port mapping support\n");
+	o += scnprintf(b+o, s-o, "[App Parameters]\n");
+	o += scnprintf(b+o, s-o, "parse bm:\n");
+	o += scnprintf(b+o, s-o, "   0x01:PARSE_FIELD_DIR              0x02:PARSE_FIELD_MH               0x04:PARSE_FIELD_ETH_TYPE         0x08:PARSE_FIELD_ETH_SUBTYPE\n");
+	o += scnprintf(b+o, s-o, "   0x10:PARSE_FIELD_IPV4_TYPE        0x20:PARSE_FIELD_IPV6_NH1         0x40:PARSE_FIELD_IPV6_NH2         0x80:PARSE_FIELD_ICMPV6_TYPE\n");
+	o += scnprintf(b+o, s-o, "dir: 0: U/S, 1:D/S, 2: Not care\n");
+	o += scnprintf(b+o, s-o, "rx: 0: RX, 1:TX\n");
+	o += scnprintf(b+o, s-o, "mod bm:\n");
+	o += scnprintf(b+o, s-o, "   0x01:RX_MOD_ADD_GMAC              0x02:RX_MOD_REPLACE_PROTO_TYPE    0x04:RX_MOD_STRIP_MH              0x08:TX_MOD_ADD_MH_BY_DRIVER\n");
+	o += scnprintf(b+o, s-o, "   0x10:CPH_APP_TX_MOD_NO_PAD        0x20:MOD_SET_STATE\n");
+	o += scnprintf(b+o, s-o, "frwd bm:\n");
+	o += scnprintf(b+o, s-o, "   0x01:FRWD_SET_TRG_PORT            0x02:FRWD_SET_TRG_QUEUE           0x04:FRWD_SET_GEM_PORT\n");
+	o += scnprintf(b+o, s-o, "[Flow Parameters]\n");
+	o += scnprintf(b+o, s-o, "dir: 0: U/S, 1:D/S, 2: Not care\n");
+	o += scnprintf(b+o, s-o, "default: 0: not default, 1:default\n");
+	o += scnprintf(b+o, s-o, "bm:\n");
+	o += scnprintf(b+o, s-o, "   0x01:PARSE_MH                     0x02:PARSE_EXT_VLAN               0x04:PARSE_TWO_VLAN               0x08:PARSE_ETH_TYPE\n");
+	o += scnprintf(b+o, s-o, "mh(hex), ety(hex), tpid(hex), vid(dec), pbits(dec)\n");
+	o += scnprintf(b+o, s-o, "op_type:\n");
+	o += scnprintf(b+o, s-o, "   00:ASIS                           01:DISCARD                        02:ADD                            03:ADD_COPY_DSCP\n");
+	o += scnprintf(b+o, s-o, "   04:ADD_COPY_OUTER_PBIT            05:ADD_COPY_INNER_PBIT            06:ADD_2_TAGS                     07:ADD_2_TAGS_COPY_DSCP\n");
+	o += scnprintf(b+o, s-o, "   08:ADD_2_TAGS_COPY_PBIT           09:REM                            10:REM_2_TAGS                     11:REPLACE\n");
+	o += scnprintf(b+o, s-o, "   12:REPLACE_VID                    13:REPLACE_PBIT                   14:REPLACE_INNER_ADD_OUTER        15:REPLACE_INNER_ADD_OUTER_COPY_PBIT\n");
+	o += scnprintf(b+o, s-o, "   16:REPLACE_INNER_REM_OUTER        17:REPLACE_2TAGS                  18:REPLACE_2TAGS_VID              19:SWAP\n");
+
+	return o;
+}
+
+
+/********************************************************************************/
+/*                          Parsing field table                                 */
+/********************************************************************************/
+static struct CPH_SYSFS_PARSE_T cph_sysfs_parse_table[CPH_SYSFS_FIELD_MAX_ENTRY];
+
+static struct CPH_SYSFS_RULE_T cph_parse_rule_db = {
+	.max_entry_num    = CPH_SYSFS_FIELD_MAX_ENTRY,
+	.entry_num        = 0,
+	.entry_size       = sizeof(struct CPH_SYSFS_PARSE_T),
+	.entry_ara        = cph_sysfs_parse_table
+};
+
+static void cph_sysfs_init_parse_db(void)
+{
+	struct CPH_SYSFS_PARSE_T  *p_entry = (struct CPH_SYSFS_PARSE_T *)cph_parse_rule_db.entry_ara;
+	int               idx     = 0;
+
+	for (idx = 0; idx < cph_parse_rule_db.max_entry_num; idx++, p_entry++)
+		p_entry->name[0] = 0;
+}
+
+struct CPH_SYSFS_PARSE_T *cph_sysfs_find_parse_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_PARSE_T *p_entry = (struct CPH_SYSFS_PARSE_T *)cph_parse_rule_db.entry_ara;
+	int              idx     = 0;
+
+	for (idx = 0; idx < cph_parse_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+struct CPH_SYSFS_PARSE_T *cph_sysfs_find_free_parse_entry(void)
+{
+	struct CPH_SYSFS_PARSE_T *p_entry = (struct CPH_SYSFS_PARSE_T *)cph_parse_rule_db.entry_ara;
+	int              idx     = 0;
+
+	for (idx = 0; idx < cph_parse_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+bool cph_sysfs_del_parse_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_PARSE_T *p_entry = (struct CPH_SYSFS_PARSE_T *)cph_parse_rule_db.entry_ara;
+	int              idx     = 0;
+
+	for (idx = 0; idx < cph_parse_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0) {
+			p_entry->name[0]  = 0;
+			p_entry->parse_bm = 0;
+			memset(&p_entry->parse_key, 0, sizeof(p_entry->parse_key));
+			return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+void cph_sysfs_show_parse_db(void)
+{
+	struct CPH_SYSFS_PARSE_T *p_entry = (struct CPH_SYSFS_PARSE_T *)cph_parse_rule_db.entry_ara;
+	int              idx     = 0;
+
+	for (idx = 0; idx < cph_parse_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] != 0) {
+			pr_info("Parse entry(%d) name(%s)\n", idx, p_entry->name);
+			cph_db_display_parse_field(p_entry->parse_bm, &p_entry->parse_key);
+		}
+	}
+}
+
+/********************************************************************************/
+/*                          Modification field table                            */
+/********************************************************************************/
+static struct CPH_SYSFS_MOD_T CPH_SYSFS_MOD_Table[CPH_SYSFS_FIELD_MAX_ENTRY];
+
+static struct CPH_SYSFS_RULE_T cph_mod_rule_db = {
+	.max_entry_num    = CPH_SYSFS_FIELD_MAX_ENTRY,
+	.entry_num        = 0,
+	.entry_size       = sizeof(struct CPH_SYSFS_MOD_T),
+	.entry_ara        = CPH_SYSFS_MOD_Table
+};
+
+static void cph_sysfs_init_mod_db(void)
+{
+	struct CPH_SYSFS_MOD_T  *p_entry = (struct CPH_SYSFS_MOD_T *)cph_mod_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_mod_rule_db.max_entry_num; idx++, p_entry++)
+		p_entry->name[0] = 0;
+}
+
+struct CPH_SYSFS_MOD_T *cph_sysfs_find_mod_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_MOD_T  *p_entry = (struct CPH_SYSFS_MOD_T *)cph_mod_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_mod_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+struct CPH_SYSFS_MOD_T *cph_sysfs_find_free_mod_entry(void)
+{
+	struct CPH_SYSFS_MOD_T  *p_entry = (struct CPH_SYSFS_MOD_T *)cph_mod_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_mod_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+bool cph_sysfs_del_mod_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_MOD_T  *p_entry = (struct CPH_SYSFS_MOD_T *)cph_mod_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_mod_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0) {
+			p_entry->name[0] = 0;
+			p_entry->mod_bm  = 0;
+			memset(&p_entry->mod_value, 0, sizeof(p_entry->mod_value));
+			return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+void cph_sysfs_show_mod_db(void)
+{
+	struct CPH_SYSFS_MOD_T  *p_entry = (struct CPH_SYSFS_MOD_T *)cph_mod_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_mod_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] != 0) {
+			pr_info("Mod entry(%d) name(%s)\n", idx, p_entry->name);
+			cph_db_display_mod_field(p_entry->mod_bm, &p_entry->mod_value);
+		}
+	}
+}
+
+/********************************************************************************/
+/*                          Forwarding field table                              */
+/********************************************************************************/
+static struct CPH_SYSFS_FRWD_T cph_sysfs_frwd_table[CPH_SYSFS_FIELD_MAX_ENTRY];
+
+static struct CPH_SYSFS_RULE_T cph_frwd_rule_db = {
+	.max_entry_num    = CPH_SYSFS_FIELD_MAX_ENTRY,
+	.entry_num        = 0,
+	.entry_size       = sizeof(struct CPH_SYSFS_FRWD_T),
+	.entry_ara        = cph_sysfs_frwd_table
+};
+
+static void cph_sysfs_init_frwd_db(void)
+{
+	struct CPH_SYSFS_FRWD_T *p_entry = (struct CPH_SYSFS_FRWD_T *)cph_frwd_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_frwd_rule_db.max_entry_num; idx++, p_entry++)
+		p_entry->name[0] = 0;
+}
+
+struct CPH_SYSFS_FRWD_T *cph_sysfs_find_frwd_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_FRWD_T *p_entry = (struct CPH_SYSFS_FRWD_T *)cph_frwd_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_frwd_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+struct CPH_SYSFS_FRWD_T *cph_sysfs_find_free_frwd_entry(void)
+{
+	struct CPH_SYSFS_FRWD_T *p_entry = (struct CPH_SYSFS_FRWD_T *)cph_frwd_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_frwd_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] == 0)
+			return p_entry;
+	}
+	return 0;
+}
+
+bool cph_sysfs_del_frwd_entry_by_name(char *name)
+{
+	struct CPH_SYSFS_FRWD_T *p_entry = (struct CPH_SYSFS_FRWD_T *)cph_frwd_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_frwd_rule_db.max_entry_num; idx++, p_entry++) {
+		if (strcmp(p_entry->name, name) == 0) {
+			p_entry->name[0] = 0;
+			p_entry->frwd_bm = 0;
+			memset(&p_entry->frwd_value, 0, sizeof(p_entry->frwd_value));
+			return TRUE;
+		}
+	}
+	return FALSE;
+}
+
+void cph_sysfs_show_frwd_db(void)
+{
+	struct CPH_SYSFS_FRWD_T *p_entry = (struct CPH_SYSFS_FRWD_T *)cph_frwd_rule_db.entry_ara;
+	int             idx     = 0;
+
+	for (idx = 0; idx < cph_frwd_rule_db.max_entry_num; idx++, p_entry++) {
+		if (p_entry->name[0] != 0) {
+			pr_info("Frwd entry(%d) name(%s)\n", idx, p_entry->name);
+			cph_db_display_frwd_field(p_entry->frwd_bm, &p_entry->frwd_value);
+		}
+	}
+}
+
+/********************************************************************************/
+/*                          SYS FS Parsing Functions                            */
+/********************************************************************************/
+static ssize_t cph_spec_proc_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int       off  = 0;
+	const char *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		off = cph_spec_proc_help(buf);
+
+	if (!strcmp(name, "help_add"))
+		off = cph_spec_proc_help_add(buf);
+	else if (!strcmp(name, "show_app_db"))
+		cph_db_display_all();
+
+	else if (!strcmp(name, "show_parse_name"))
+		cph_sysfs_show_parse_db();
+
+	else if (!strcmp(name, "show_mod_name"))
+		cph_sysfs_show_mod_db();
+
+	else if (!strcmp(name, "show_frwd_name"))
+		cph_sysfs_show_frwd_db();
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	else if (!strcmp(name, "udp_ports"))
+		cph_udp_spec_print_all();
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	else if (!strcmp(name, "show_flow_rule"))
+		cph_flow_display_all();
+
+	else if (!strcmp(name, "clear_flow_rule"))
+		cph_flow_clear_rule();
+
+	else if (!strcmp(name, "del_dscp_map"))
+		cph_flow_del_dscp_map();
+#endif
+	else
+		off = cph_spec_proc_help(buf);
+
+	return off;
+}
+
+static ssize_t cph_spec_proc_1_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	unsigned int      v1    = 0;
+	unsigned long       flags = 0;
+	MV_STATUS   rc    =  MV_OK;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%x", &v1);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "trace_level")) {
+		rc = cph_set_trace_flag(v1);
+		if (rc == MV_OK)
+			pr_err("Succeed to set trace level<0x%x>\n", v1);
+		else
+			pr_err("Fail to set trace level<0x%x>\n", v1);
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+static ssize_t cph_spec_proc_2_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	unsigned int      v1    = 0;
+	unsigned int      v2    = 0;
+	unsigned long       flags = 0;
+	MV_STATUS   rc    =  MV_OK;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d %d", &v1, &v2);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "set_complex")) {
+		rc = cph_set_complex_profile(v1, v2);
+		if (rc == MV_OK)
+			pr_err("Succeed to set complex profile<%d> active port<%d>\n", v1, v2);
+		else
+			pr_err("Fail to set complex profile<%d> active port<%d>\n", v1, v2);
+	} else if (!strcmp(name, "set_flag")) {
+		rc = cph_set_feature_flag(v1, v2);
+		if (rc == MV_OK)
+			pr_err("Succeed to set feature<%d> to <%d>\n", v1, v2);
+		else
+			pr_err("Fail to set feature<%d> to<%d>\n", v1, v2);
+	} else if (!strcmp(name, "set_tcont")) {
+		rc = cph_set_tcont_state(v1, v2);
+		if (rc == MV_OK)
+			pr_err("Succeed to set tcont<%d> to <%d>\n", v1, v2);
+		else
+			pr_err("Fail to set tcont<%d> to<%d>\n", v1, v2);
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+static ssize_t cph_spec_proc_name_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	char        name1[CPH_SYSFS_FIELD_MAX_LEN+1];
+	unsigned int      v1    = 0;
+	unsigned int      v2    = 0;
+	unsigned int      v3    = 0;
+	unsigned int      v4    = 0;
+	unsigned int      v5    = 0;
+	unsigned int      v6    = 0;
+	unsigned int      v7    = 0;
+	unsigned int      v8    = 0;
+	unsigned int      v9    = 0;
+	unsigned int      v10   = 0;
+	unsigned long       flags = 0;
+	struct CPH_SYSFS_PARSE_T *p_parse_entry = NULL;
+	struct CPH_SYSFS_MOD_T   *p_mod_entry   = NULL;
+	struct CPH_SYSFS_FRWD_T  *p_frwd_entry  = NULL;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "add_parse")) {
+		/* Read input */
+		sscanf(buf, "%s %x %d %d %d %x %d %d %d %d %d", name1, &v1, &v2, &v3, &v4, &v5, &v6, &v7, &v8, &v9, &v10);
+
+		raw_local_irq_save(flags);
+
+		p_parse_entry = cph_sysfs_find_parse_entry_by_name(name1);
+		if (p_parse_entry) {
+			pr_err("Already has the parse field by name <%s>\n", name1);
+			return -EPERM;
+		}
+
+		p_parse_entry = cph_sysfs_find_free_parse_entry();
+		if (!p_parse_entry) {
+			pr_err("No free parse entry\n");
+			return -EPERM;
+		}
+
+		strcpy(p_parse_entry->name, name1);
+		p_parse_entry->parse_bm              = v1;
+		p_parse_entry->parse_key.dir         = v2;
+		p_parse_entry->parse_key.rx_tx       = v3;
+		p_parse_entry->parse_key.mh          = v4;
+		p_parse_entry->parse_key.eth_type    = v5;
+		p_parse_entry->parse_key.eth_subtype = v6;
+		p_parse_entry->parse_key.ipv4_type   = v7;
+		p_parse_entry->parse_key.ipv6_nh1    = v8;
+		p_parse_entry->parse_key.ipv6_nh2    = v9;
+		p_parse_entry->parse_key.icmpv6_type = v10;
+
+		pr_err("Succeed to add parse field by name <%s>\n", name1);
+
+		raw_local_irq_restore(flags);
+	} else if (!strcmp(name, "add_mod")) {
+		/* Read input */
+		sscanf(buf, "%s %x %x %d", name1, &v1, &v2, &v3);
+
+		raw_local_irq_save(flags);
+
+		p_mod_entry = cph_sysfs_find_mod_entry_by_name(name1);
+		if (p_mod_entry) {
+			pr_err("Already has the mod field by name <%s>\n", name1);
+			return -EPERM;
+		}
+
+		p_mod_entry = cph_sysfs_find_free_mod_entry();
+		if (!p_mod_entry) {
+			pr_err("No free mod entry\n");
+			return -EPERM;
+		}
+
+		strcpy(p_mod_entry->name, name1);
+		p_mod_entry->mod_bm                = v1;
+		p_mod_entry->mod_value.proto_type  = v2;
+		if (v3)
+			p_mod_entry->mod_value.state   = TRUE;
+		else
+			p_mod_entry->mod_value.state   = FALSE;
+
+		pr_err("Succeed to add mod field by name <%s>\n", name1);
+
+		raw_local_irq_restore(flags);
+	} else if (!strcmp(name, "add_frwd")) {
+		/* Read input */
+		sscanf(buf, "%s %x %d %d %d", name1, &v1, &v2, &v3, &v4);
+
+		raw_local_irq_save(flags);
+
+		p_frwd_entry = cph_sysfs_find_frwd_entry_by_name(name1);
+		if (p_frwd_entry) {
+			pr_err("Already has the frwd field by name <%s>\n", name1);
+			return -EPERM;
+		}
+
+		p_frwd_entry = cph_sysfs_find_free_frwd_entry();
+		if (!p_frwd_entry) {
+			pr_err("No free frwd entry\n");
+			return -EPERM;
+		}
+
+		strcpy(p_frwd_entry->name, name1);
+		p_frwd_entry->frwd_bm              = v1;
+		p_frwd_entry->frwd_value.trg_port  = v2;
+		p_frwd_entry->frwd_value.trg_queue = v3;
+		p_frwd_entry->frwd_value.gem_port  = v4;
+
+		pr_err("Succeed to add frwd field by name <%s>\n", name1);
+
+		raw_local_irq_restore(flags);
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	return len;
+}
+
+static ssize_t cph_spec_proc_app_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	char        name1[CPH_SYSFS_FIELD_MAX_LEN+1];
+	char        name2[CPH_SYSFS_FIELD_MAX_LEN+1];
+	char        name3[CPH_SYSFS_FIELD_MAX_LEN+1];
+	unsigned long       flags = 0;
+	MV_STATUS   rc    =  MV_OK;
+	struct CPH_SYSFS_PARSE_T *p_parse_entry = NULL;
+	struct CPH_SYSFS_MOD_T   *p_mod_entry   = NULL;
+	struct CPH_SYSFS_FRWD_T  *p_frwd_entry  = NULL;
+	struct CPH_SYSFS_MOD_T    mod_entry;
+	struct CPH_SYSFS_FRWD_T   frwd_entry;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%s %s %s", name1, name2, name3);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "add_app_rule")) {
+		p_parse_entry = cph_sysfs_find_parse_entry_by_name(name1);
+		if (!p_parse_entry) {
+			pr_err("add_app_rule: invalid parse name <%s>\n", name1);
+			return -EPERM;
+		}
+		p_mod_entry = cph_sysfs_find_mod_entry_by_name(name2);
+		if (!p_mod_entry) {
+			pr_err("add_app_rule: invalid mod name <%s>\n", name2);
+			return -EPERM;
+		}
+		p_frwd_entry = cph_sysfs_find_frwd_entry_by_name(name3);
+		if (!p_frwd_entry) {
+			pr_err("add_app_rule: invalid frwd name <%s>\n", name3);
+			return -EPERM;
+		}
+
+		rc = cph_add_app_rule(p_parse_entry->parse_bm, &p_parse_entry->parse_key,
+					p_mod_entry->mod_bm, &p_mod_entry->mod_value,
+					p_frwd_entry->frwd_bm, &p_frwd_entry->frwd_value);
+		if (rc == MV_OK)
+			pr_err("Succeed to add app rule\n");
+		else
+			pr_err("Fail to add app rule\n");
+	} else if (!strcmp(name, "del_app_rule")) {
+		p_parse_entry = cph_sysfs_find_parse_entry_by_name(name1);
+		if (!p_parse_entry) {
+			pr_err("add_app_rule: invalid parse name <%s>\n", name1);
+			return -EPERM;
+		}
+
+		rc = cph_del_app_rule(p_parse_entry->parse_bm, &p_parse_entry->parse_key);
+		if (rc == MV_OK)
+			pr_err("Succeed to delete app rule\n");
+		else
+			pr_err("Fail to delete app rule\n");
+	} else if (!strcmp(name, "update_app_rule")) {
+		p_parse_entry = cph_sysfs_find_parse_entry_by_name(name1);
+		if (!p_parse_entry) {
+			pr_err("add_app_rule: invalid parse name <%s>\n", name1);
+			return -EPERM;
+		}
+		p_mod_entry = cph_sysfs_find_mod_entry_by_name(name2);
+		if (!p_mod_entry) {
+			pr_err("add_app_rule: invalid mod name <%s>\n", name2);
+			return -EPERM;
+		}
+		p_frwd_entry = cph_sysfs_find_frwd_entry_by_name(name3);
+		if (!p_frwd_entry) {
+			pr_err("add_app_rule: invalid frwd name <%s>\n", name3);
+			return -EPERM;
+		}
+
+		rc = cph_update_app_rule(p_parse_entry->parse_bm, &p_parse_entry->parse_key,
+					p_mod_entry->mod_bm, &p_mod_entry->mod_value,
+					p_frwd_entry->frwd_bm, &p_frwd_entry->frwd_value);
+		if (rc == MV_OK)
+			pr_err("Succeed to update app rule\n");
+		else
+			pr_err("Fail to update app rule\n");
+	} else if (!strcmp(name, "get_app_rule")) {
+		p_parse_entry = cph_sysfs_find_parse_entry_by_name(name1);
+		if (!p_parse_entry) {
+			pr_err("add_app_rule: invalid parse name <%s>\n", name1);
+			return -EPERM;
+		}
+
+		rc = cph_get_app_rule(p_parse_entry->parse_bm, &p_parse_entry->parse_key,
+					&mod_entry.mod_bm, &mod_entry.mod_value,
+					&frwd_entry.frwd_bm, &frwd_entry.frwd_value);
+		if (rc == MV_OK) {
+			cph_db_display_parse_field(p_parse_entry->parse_bm, &p_parse_entry->parse_key);
+			cph_db_display_mod_field(mod_entry.mod_bm, &mod_entry.mod_value);
+			cph_db_display_frwd_field(frwd_entry.frwd_bm, &frwd_entry.frwd_value);
+		} else {
+			pr_err("No valid CPH app rule\n");
+		}
+
+	} else if (!strcmp(name, "del_parse")) {
+		rc = cph_sysfs_del_parse_entry_by_name(name1);
+		if (rc == TRUE)
+			pr_err("Succeed to delete parse field by name <%s>\n", name1);
+		else
+			pr_err("Fail to delete parse field by name <%s>\n", name1);
+	} else if (!strcmp(name, "del_mod")) {
+		rc = cph_sysfs_del_mod_entry_by_name(name1);
+		if (rc == TRUE)
+			pr_err("Succeed to delete mod field by name <%s>\n", name1);
+		else
+			pr_err("Fail to delete mod field by name <%s>\n", name1);
+	} else if (!strcmp(name, "del_frwd")) {
+		rc = cph_sysfs_del_frwd_entry_by_name(name1);
+		if (rc == TRUE)
+			pr_err("Succeed to delete frwd field by name <%s>\n", name1);
+		else
+			pr_err("Fail to delete frwd field by name <%s>\n", name1);
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+static ssize_t cph_spec_proc_udp_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	unsigned int      v1    = 0;
+	unsigned int      v2    = 0;
+	unsigned int      v3    = 0;
+	unsigned int      v4    = 0;
+	unsigned int      v5    = 0;
+	unsigned int      v6    = 0;
+	unsigned long       flags = 0;
+	MV_STATUS   rc    =  MV_OK;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d %d %x %x %x %x", &v1, &v2, &v3, &v4, &v5, &v6);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "udp_src")) {
+		rc = cph_udp_src_spec_set(v1, v2, v3, v4, v5, v6);
+		if (rc == MV_OK)
+			pr_err("Succeed to add UDP src rule\n");
+		else
+			pr_err("Fail to add UDP src rule\n");
+	} else if (!strcmp(name, "udp_dst")) {
+		rc = cph_udp_dest_spec_set(v1, v2, v3, v4, v5, v6);
+		if (rc == MV_OK)
+			pr_err("Succeed to add UDP dest rule\n");
+		else
+			pr_err("Fail to add UDP dest rule\n");
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+#endif
+
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+static ssize_t cph_spec_proc_flow_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char    *name  = attr->attr.name;
+	unsigned int         v0    = 0;
+	unsigned int         v1    = 0;
+	unsigned int         v2    = 0;
+	unsigned int         v3    = 0;
+	unsigned int         v4    = 0;
+	unsigned int         v5    = 0;
+	unsigned int         v6    = 0;
+	unsigned int         v7    = 0;
+	unsigned int         v8    = 0;
+	unsigned int         v9    = 0;
+	unsigned int         v10   = 0;
+	unsigned int         v11   = 0;
+	unsigned int         v12   = 0;
+	unsigned int         v13   = 0;
+	unsigned int         v14   = 0;
+	unsigned int         v15   = 0;
+	unsigned int         v16   = 0;
+	unsigned int         v17   = 0;
+	unsigned int         v18   = 0;
+	unsigned int         v19   = 0;
+	unsigned int         v20   = 0;
+	unsigned int         v21   = 0;
+	unsigned long          flags = 0;
+	MV_STATUS      rc    =  MV_OK;
+	struct CPH_FLOW_ENTRY_T cph_flow;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d %d %x %x %x %x %d %d %x %d %d %d %x %d %d %x %d %d %d %d %d %d", &v0, &v1, &v2, &v3, &v4, &v5, &v6, &v7, &v8, &v9, &v10, &v11, &v12, &v13, &v14, &v15, &v16, &v17, &v18, &v19, &v20, &v21);
+
+	raw_local_irq_save(flags);
+
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	if (!strcmp(name, "add_flow_rule")) {
+		memset(&cph_flow, 0, sizeof(cph_flow));
+		cph_flow.dir        = (enum CPH_DIR_E)v0;
+		cph_flow.is_default = v1 ? TRUE : FALSE;
+		cph_flow.parse_bm   = (enum CPH_FLOW_PARSE_E)v2;
+		cph_flow.mh         = (unsigned short)v3;
+		cph_flow.eth_type   = (unsigned short)v4;
+		cph_flow.parse_outer_tci.tpid   = (unsigned short)v5;
+		cph_flow.parse_outer_tci.vid    = (unsigned short)v6;
+		cph_flow.parse_outer_tci.pbits  = (unsigned char)v7;
+		cph_flow.parse_inner_tci.tpid   = (unsigned short)v8;
+		cph_flow.parse_inner_tci.vid    = (unsigned short)v9;
+		cph_flow.parse_inner_tci.pbits  = (unsigned char)v10;
+		cph_flow.op_type                = (enum CPH_VLAN_OP_TYPE_E)v11;
+		cph_flow.mod_outer_tci.tpid     = (unsigned short)v12;
+		cph_flow.mod_outer_tci.vid      = (unsigned short)v13;
+		cph_flow.mod_outer_tci.pbits    = (unsigned char)v14;
+		cph_flow.mod_inner_tci.tpid     = (unsigned short)v15;
+		cph_flow.mod_inner_tci.vid      = (unsigned short)v16;
+		cph_flow.mod_inner_tci.pbits    = (unsigned char)v17;
+		cph_flow.pkt_frwd.trg_port      = (unsigned char)v18;
+		cph_flow.pkt_frwd.trg_queue     = (unsigned char)v19;
+		cph_flow.pkt_frwd.trg_hwf_queue = (unsigned char)v20;
+		cph_flow.pkt_frwd.gem_port      = (unsigned short)v21;
+
+		rc = cph_flow_add_rule(&cph_flow);
+		if (rc == MV_OK)
+			pr_err("Succeed to add flow mapping rule\n");
+		else
+			pr_err("Fail to add flow mapping rule\n");
+	} else if (!strcmp(name, "del_flow_rule")) {
+		memset(&cph_flow, 0, sizeof(cph_flow));
+		cph_flow.dir        = (enum CPH_DIR_E)v0;
+		cph_flow.is_default = v1 ? TRUE : FALSE;
+		cph_flow.parse_bm   = (enum CPH_FLOW_PARSE_E)v2;
+		cph_flow.mh         = (unsigned short)v3;
+		cph_flow.eth_type   = (unsigned short)v4;
+		cph_flow.parse_outer_tci.tpid   = (unsigned short)v5;
+		cph_flow.parse_outer_tci.vid    = (unsigned short)v6;
+		cph_flow.parse_outer_tci.pbits  = (unsigned char)v7;
+		cph_flow.parse_inner_tci.tpid   = (unsigned short)v8;
+		cph_flow.parse_inner_tci.vid    = (unsigned short)v9;
+		cph_flow.parse_inner_tci.pbits  = (unsigned char)v10;
+
+		rc = cph_flow_del_rule(&cph_flow);
+		if (rc == MV_OK)
+			pr_err("Succeed to delete flow mapping rule\n");
+		else
+			pr_err("Fail to delete flow mapping rule\n");
+	} else if (!strcmp(name, "get_flow_rule")) {
+		memset(&cph_flow, 0, sizeof(cph_flow));
+		cph_flow.dir        = (enum CPH_DIR_E)v0;
+		cph_flow.is_default = v1 ? TRUE : FALSE;
+		cph_flow.parse_bm   = (enum CPH_FLOW_PARSE_E)v2;
+		cph_flow.mh         = (unsigned short)v3;
+		cph_flow.eth_type   = (unsigned short)v4;
+		cph_flow.parse_outer_tci.tpid   = (unsigned short)v5;
+		cph_flow.parse_outer_tci.vid    = (unsigned short)v6;
+		cph_flow.parse_outer_tci.pbits  = (unsigned char)v7;
+		cph_flow.parse_inner_tci.tpid   = (unsigned short)v8;
+		cph_flow.parse_inner_tci.vid    = (unsigned short)v9;
+		cph_flow.parse_inner_tci.pbits  = (unsigned char)v10;
+
+		rc = cph_flow_get_rule(&cph_flow);
+		if (rc == MV_OK) {
+			pr_err("Succeed to get flow rule\n");
+			pr_info("                        |Parse outer       |Parse inner       |Mod outer         |Mod Inner         |Forward\n");
+			pr_info("dir default tparse_bm mh   ety    tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  tpid   vid  pbits  port queue hwf_queue gem  op_type\n");
+			pr_info(
+			"%2.2s  %4.4s    0x%04x   %-4d 0x%04x 0x%04x %4d %1d      0x%04x %4d %1d      0x%04x %4d %1d      0x%04x %4d %1d      %1d    %1d     %1d         %4d %s\n",
+			cph_app_lookup_dir(cph_flow.dir), (cph_flow.is_default == TRUE) ? "Yes" : "No",
+			cph_flow.parse_bm, cph_flow.mh, cph_flow.eth_type,
+			cph_flow.parse_outer_tci.tpid, cph_flow.parse_outer_tci.vid, cph_flow.parse_outer_tci.pbits,
+			cph_flow.parse_inner_tci.tpid, cph_flow.parse_inner_tci.vid, cph_flow.parse_inner_tci.pbits,
+			cph_flow.mod_outer_tci.tpid,   cph_flow.mod_outer_tci.vid,   cph_flow.mod_outer_tci.pbits,
+			cph_flow.mod_inner_tci.tpid,   cph_flow.mod_inner_tci.vid,   cph_flow.mod_inner_tci.pbits,
+			cph_flow.pkt_frwd.trg_port,    cph_flow.pkt_frwd.trg_queue,  cph_flow.pkt_frwd.trg_hwf_queue,
+			cph_flow.pkt_frwd.gem_port, cph_flow_lookup_op_type(cph_flow.op_type));
+		} else {
+			pr_err("Fail to get flow\n");
+		}
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+#endif
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+static ssize_t cph_spec_proc_dscp_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	unsigned int           v[64];
+	unsigned int           index = 0;
+	unsigned long            flags = 0;
+	MV_STATUS        rc    =  MV_OK;
+	struct CPH_DSCP_PBITS_T dscp_map;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d ",
+	       &v[0],  &v[1],  &v[2],  &v[3],  &v[4],  &v[5],  &v[6],  &v[7],
+	       &v[8],  &v[9],  &v[10], &v[11], &v[12], &v[13], &v[14], &v[15],
+	       &v[16], &v[17], &v[18], &v[19], &v[20], &v[21], &v[22], &v[23],
+	       &v[24], &v[25], &v[26], &v[27], &v[28], &v[29], &v[30], &v[31],
+	       &v[32], &v[33], &v[34], &v[35], &v[36], &v[37], &v[38], &v[39],
+	       &v[40], &v[41], &v[42], &v[43], &v[44], &v[45], &v[46], &v[47],
+	       &v[48], &v[49], &v[50], &v[51], &v[52], &v[53], &v[54], &v[55],
+	       &v[56], &v[57], &v[58], &v[59], &v[60], &v[61], &v[62], &v[63]);
+	for (index = 0; index < 64; index++)
+		dscp_map.pbits[index] = (unsigned char)v[index];
+
+	dscp_map.in_use = TRUE;
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "set_dscp_map")) {
+		rc = cph_flow_set_dscp_map(&dscp_map);
+		if (rc == MV_OK)
+			pr_err("Succeed to set DSCP to P-bits mapping\n");
+		else
+			pr_err("Fail to set DSCP to P-bits mapping\n");
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+#endif
+
+static ssize_t cph_port_func_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name  = attr->attr.name;
+	unsigned int      v1    = 0;
+	unsigned int      v2    = 0;
+	unsigned int      v3    = 0;
+	unsigned long       flags = 0;
+	MV_STATUS   rc    =  MV_OK;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d %d %d", &v1, &v2, &v3);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "set_port_func")) {
+		rc = cph_set_port_func(v1, v2, v3);
+		if (rc == MV_OK)
+			pr_err("Succeed to set cph port<%d> func\n", v1);
+		else
+			pr_err("Fail to set cph port port<%d> func\n", v1);
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+static ssize_t cph_port_func_get(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t len)
+{
+	const char *name        = attr->attr.name;
+	unsigned int v1         = 0;
+	unsigned long flags     = 0;
+	bool rx_enable, tx_enable;
+	MV_STATUS   rc    =  MV_OK;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read input */
+	sscanf(buf, "%d", &v1);
+
+	raw_local_irq_save(flags);
+
+	if (!strcmp(name, "get_port_func")) {
+		rc = cph_get_port_func(v1, &rx_enable, &tx_enable);
+		if (rc != MV_OK)
+			pr_err("input port<%d> is error!\n", v1);
+		else {
+			pr_err("port port<%d> cph func rx <%s>, tx<%s>.\n", v1,
+								rx_enable ? "enabled" : "disbaled",
+								tx_enable ? "enabled" : "disbaled");
+		}
+	} else
+		pr_err("%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	raw_local_irq_restore(flags);
+
+	return len;
+}
+
+
+static DEVICE_ATTR(help,            S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(help_add,        S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(show_app_db,     S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(show_parse_name, S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(show_mod_name,   S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(show_frwd_name,  S_IRUSR, cph_spec_proc_show, NULL);
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+static DEVICE_ATTR(udp_ports,       S_IRUSR, cph_spec_proc_show, NULL);
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+static DEVICE_ATTR(show_flow_rule,  S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(clear_flow_rule, S_IRUSR, cph_spec_proc_show, NULL);
+static DEVICE_ATTR(del_dscp_map,    S_IRUSR, cph_spec_proc_show, NULL);
+#endif
+static DEVICE_ATTR(set_port_func,   S_IWUSR, cph_spec_proc_show, cph_port_func_store);
+static DEVICE_ATTR(get_port_func,   S_IWUSR, cph_spec_proc_show, cph_port_func_get);
+static DEVICE_ATTR(set_complex,     S_IWUSR, cph_spec_proc_show, cph_spec_proc_2_store);
+static DEVICE_ATTR(set_flag,        S_IWUSR, cph_spec_proc_show, cph_spec_proc_2_store);
+static DEVICE_ATTR(add_parse,       S_IWUSR, cph_spec_proc_show, cph_spec_proc_name_store);
+static DEVICE_ATTR(del_parse,       S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(add_mod,         S_IWUSR, cph_spec_proc_show, cph_spec_proc_name_store);
+static DEVICE_ATTR(del_mod,         S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(add_frwd,        S_IWUSR, cph_spec_proc_show, cph_spec_proc_name_store);
+static DEVICE_ATTR(del_frwd,        S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(add_app_rule,    S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(del_app_rule,    S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(update_app_rule, S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+static DEVICE_ATTR(get_app_rule,    S_IWUSR, cph_spec_proc_show, cph_spec_proc_app_store);
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+static DEVICE_ATTR(udp_src,         S_IWUSR, cph_spec_proc_show, cph_spec_proc_udp_store);
+static DEVICE_ATTR(udp_dst,         S_IWUSR, cph_spec_proc_show, cph_spec_proc_udp_store);
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+static DEVICE_ATTR(add_flow_rule,   S_IWUSR, cph_spec_proc_show, cph_spec_proc_flow_store);
+static DEVICE_ATTR(del_flow_rule,   S_IWUSR, cph_spec_proc_show, cph_spec_proc_flow_store);
+static DEVICE_ATTR(get_flow_rule,   S_IWUSR, cph_spec_proc_show, cph_spec_proc_flow_store);
+static DEVICE_ATTR(set_dscp_map,    S_IWUSR, cph_spec_proc_show, cph_spec_proc_dscp_store);
+#endif
+static DEVICE_ATTR(set_tcont,       S_IWUSR, cph_spec_proc_show, cph_spec_proc_2_store);
+static DEVICE_ATTR(trace_level,     S_IWUSR, cph_spec_proc_show, cph_spec_proc_1_store);
+
+
+static struct attribute *cph_spec_proc_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_help_add.attr,
+	&dev_attr_show_app_db.attr,
+	&dev_attr_show_parse_name.attr,
+	&dev_attr_show_mod_name.attr,
+	&dev_attr_show_frwd_name.attr,
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	&dev_attr_udp_ports.attr,
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	&dev_attr_show_flow_rule.attr,
+	&dev_attr_clear_flow_rule.attr,
+	&dev_attr_del_dscp_map.attr,
+#endif
+	&dev_attr_set_port_func.attr,
+	&dev_attr_get_port_func.attr,
+	&dev_attr_set_complex.attr,
+	&dev_attr_set_flag.attr,
+	&dev_attr_add_parse.attr,
+	&dev_attr_del_parse.attr,
+	&dev_attr_add_mod.attr,
+	&dev_attr_del_mod.attr,
+	&dev_attr_add_frwd.attr,
+	&dev_attr_del_frwd.attr,
+	&dev_attr_add_app_rule.attr,
+	&dev_attr_del_app_rule.attr,
+	&dev_attr_update_app_rule.attr,
+	&dev_attr_get_app_rule.attr,
+#ifdef CONFIG_MV_CPH_UDP_SAMPLE_HANDLE
+	&dev_attr_udp_src.attr,
+	&dev_attr_udp_dst.attr,
+#endif
+#ifdef CONFIG_MV_CPH_FLOW_MAP_HANDLE
+	&dev_attr_add_flow_rule.attr,
+	&dev_attr_del_flow_rule.attr,
+	&dev_attr_get_flow_rule.attr,
+	&dev_attr_set_dscp_map.attr,
+#endif
+	&dev_attr_set_tcont.attr,
+	&dev_attr_trace_level.attr,
+
+	NULL
+};
+
+static struct attribute_group cph_spec_proc_group = {
+	.name = "proto",
+	.attrs = cph_spec_proc_attrs,
+};
+
+int cph_sysfs_init(void)
+{
+	int          err = 0;
+	struct device *pd  = NULL;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "cph");
+	if (!pd) {
+		platform_device_register_simple("cph", -1, NULL, 0);
+		pd = bus_find_device_by_name(&platform_bus_type, NULL, "cph");
+	}
+
+	if (!pd) {
+		pr_err("%s: cannot find cph device\n", __func__);
+		pd = &platform_bus;
+	}
+
+	err = sysfs_create_group(&pd->kobj, &cph_spec_proc_group);
+	if (err) {
+		pr_info("sysfs group failed %d\n", err);
+		goto out;
+	}
+
+	/* Init CPH SYS FS data base to hold parse/mod/frwd values */
+	cph_sysfs_init_parse_db();
+	cph_sysfs_init_mod_db();
+	cph_sysfs_init_frwd_db();
+
+out:
+	return err;
+}
+
+void cph_sysfs_exit(void)
+{
+	struct device *pd = NULL;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "cph");
+	if (!pd) {
+		pr_err("%s: cannot find CPH device\n", __func__);
+		return;
+	}
+
+	sysfs_remove_group(&pd->kobj, &cph_spec_proc_group);
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.h b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.h
new file mode 100644
index 000000000000..1f753b667155
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/cph/mv_cph_sysfs.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+	*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+	*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+	*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+********************************************************************************
+* mv_cph_sysfs.h
+*
+* DESCRIPTION: Marvell CPH(CPH Packet Handler) sysfs command definition
+*
+* DEPENDENCIES:
+*               None
+*
+* CREATED BY:   VictorGu
+*
+* DATE CREATED: 11Dec2011
+*
+* FILE REVISION NUMBER:
+*               Revision: 1.1
+*
+*
+*******************************************************************************/
+#ifndef _MV_CPH_SYSFS_H_
+#define _MV_CPH_SYSFS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CPH_SYSFS_FIELD_MAX_LEN   (32)
+#define CPH_SYSFS_FIELD_MAX_ENTRY (64)
+
+
+/* Common DB structure for entries
+------------------------------------------------------------------------------*/
+struct CPH_SYSFS_RULE_T {
+	int  max_entry_num;
+	int  entry_num;
+	int  entry_size;
+	void  *entry_ara;
+} ;
+
+/* Parsing filed entry
+------------------------------------------------------------------------------*/
+struct CPH_SYSFS_PARSE_T {
+	char                  name[CPH_SYSFS_FIELD_MAX_LEN+1];
+	enum CPH_APP_PARSE_FIELD_E parse_bm;
+	struct CPH_APP_PARSE_T       parse_key;
+};
+
+/* Modification filed entry
+------------------------------------------------------------------------------*/
+struct CPH_SYSFS_MOD_T {
+	char                  name[CPH_SYSFS_FIELD_MAX_LEN+1];
+	enum CPH_APP_MOD_FIELD_E   mod_bm;
+	struct CPH_APP_MOD_T         mod_value;
+};
+
+/* Forwarding filed entry
+------------------------------------------------------------------------------*/
+struct CPH_SYSFS_FRWD_T {
+	char                  name[CPH_SYSFS_FIELD_MAX_LEN+1];
+	enum CPH_APP_FRWD_FIELD_E  frwd_bm;
+	struct CPH_APP_FRWD_T        frwd_value;
+};
+
+
+int cph_sysfs_init(void);
+void cph_sysfs_exit(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _MV_CPH_SYSFS_H_ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/dpi/dpi_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/dpi/dpi_sysfs.c
new file mode 100644
index 000000000000..f62f39d73a82
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/dpi/dpi_sysfs.c
@@ -0,0 +1,297 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "dpi/mvPp2DpiHw.h"
+
+
+static ssize_t dpi_help(char *b)
+{
+	int o = 0;
+
+	o += scnprintf(b + o, PAGE_SIZE - o, "arguments c, d, o, s, n: decimal numbers\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "arguments b, m         : hexadecimal numbers\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "[c] counter valid range [0..%d]\n", MV_PP2_DPI_CNTRS - 1);
+	o += scnprintf(b + o, PAGE_SIZE - o, "[o] window offset valid range [0..%d] bytes\n", MV_PP2_DPI_WIN_OFFSET_MAX);
+	o += scnprintf(b + o, PAGE_SIZE - o, "[s] window size valid range [0..%d] bytes\n", MV_PP2_DPI_WIN_SIZE_MAX);
+	o += scnprintf(b + o, PAGE_SIZE - o, "[n] number of descriptors valid range [0..%d]\n", MV_PP2_DPI_Q_SIZE_MAX);
+	o += scnprintf(b + o, PAGE_SIZE - o, "\n");
+
+	o += scnprintf(b + o, PAGE_SIZE - o, "cat            help    - Show this help\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "cat            regs    - Show DPI hardware registers\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo d         queues  - Show DPI request and result queues. 0-brief, 1-full\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo c o s   > win     - Set window offset [o] and size [s] for DPI counter [c]\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo b m     > cntrs   - Set map of counters [m] to be incremented for byte [b]\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo c b 0|1 > cntr_en - On/Off incrementing of DPI counter [c] for byte [b]\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo c       > disable - Disable incrementing of DPI counter [c] for all bytes\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo n       > q_size  - Set number of descriptors [n] for DPI queues\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo data    > do_req  - Put DPI request for [data=xxxxxx...] and print results\n");
+
+	return o;
+}
+
+static ssize_t dpi_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	const char  *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return dpi_help(buf);
+
+	if (!strcmp(name, "regs")) {
+		mvPp2DpiRegs();
+	} else if (!strcmp(name, "queues")) {
+		mvPp2DpiQueueShow(0);
+	} else {
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t dpi_dec_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %d", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "queues")) {
+		mvPp2DpiQueueShow(a);
+	} else if (!strcmp(name, "win")) {
+		err = mvPp2DpiCntrWinSet(a, b, c);
+	} else if (!strcmp(name, "disable")) {
+		err = mvPp2DpiCntrDisable(a);
+	} else if (!strcmp(name, "q_size")) {
+		if (mvPp2DpiQueuesDelete())
+			pr_err("DPI: %s command error. Can't delete queues\n", name);
+		err = mvPp2DpiQueuesCreate(a);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t dpi_hex_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x", &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "cntrs"))
+		mvPp2DpiByteConfig(a, b);
+	else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t dpi_dec_hex_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %x %d", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "cntr_en"))
+		mvPp2DpiCntrByteSet(a, b, c);
+	else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static char		dpi_pkt_data[MV_PP2_DPI_MAX_PKT_SIZE];
+static unsigned char	dpi_counters[MV_PP2_DPI_CNTRS];
+#define DPI_REQUEST_TIMEOUT  100000
+
+static int mv_pp2_dpi_do_request(char *data, int size, unsigned char *counters)
+{
+	unsigned int timeout = DPI_REQUEST_TIMEOUT;
+	unsigned long phys_addr;
+	int ready_num;
+
+	phys_addr = mvOsCacheFlush(NULL, data, size);
+	if (mvPp2DpiRequestSet(phys_addr, size)) {
+		pr_err("%s: DPI request set failed\n", __func__);
+		return -EINVAL;
+	}
+	/* Start processing */
+	wmb();
+	mvPp2DpiReqPendAdd(1);
+
+	/* Wait for response is ready */
+	ready_num = 0;
+	while (ready_num == 0) {
+		timeout--;
+		if (timeout == 0) {
+			pr_err("%s: DPI result get timeout\n", __func__);
+			return -EINVAL;
+		}
+		ready_num = mvPp2DpiResOccupGet();
+	}
+	pr_info("DPI request is ready after %d\n", DPI_REQUEST_TIMEOUT - timeout);
+	if (ready_num != 1)
+		pr_warning("%s: %d requests became ready - only one processsed\n",
+			__func__, ready_num);
+
+	/* Process single response - copy counters */
+	mvOsCacheIoSync(NULL);
+	mvPp2DpiResultGet(dpi_counters, MV_PP2_DPI_CNTRS);
+
+	/* Enable HW to reuse Response descriptors */
+	wmb();
+	mvPp2DpiResOccupDec(ready_num);
+
+	return 0;
+}
+
+static ssize_t dpi_string_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, size = 0, i;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "do_req")) {
+		size = strlen(buf) / 2;
+		if (size > sizeof(dpi_pkt_data))
+			size = sizeof(dpi_pkt_data);
+		mvHexToBin(buf, dpi_pkt_data, size);
+		err = mv_pp2_dpi_do_request(dpi_pkt_data, size, dpi_counters);
+		if (!err) {
+			for (i = 0; i < MV_PP2_DPI_CNTRS; i++)
+				pr_info("#%2d  -  %d\n", i, dpi_counters[i]);
+
+			pr_info("\n");
+		}
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,          S_IRUSR, dpi_show, NULL);
+static DEVICE_ATTR(regs,          S_IRUSR, dpi_show, NULL);
+static DEVICE_ATTR(queues,        S_IRUSR | S_IWUSR, dpi_show, dpi_dec_store);
+static DEVICE_ATTR(win,           S_IWUSR, NULL, dpi_dec_store);
+static DEVICE_ATTR(cntrs,         S_IWUSR, NULL, dpi_hex_store);
+static DEVICE_ATTR(cntr_en,       S_IWUSR, NULL, dpi_dec_hex_store);
+static DEVICE_ATTR(disable,       S_IWUSR, NULL, dpi_dec_store);
+static DEVICE_ATTR(q_size,        S_IWUSR, NULL, dpi_dec_store);
+static DEVICE_ATTR(do_req,        S_IWUSR, NULL, dpi_string_store);
+
+static struct attribute *dpi_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_regs.attr,
+	&dev_attr_queues.attr,
+	&dev_attr_win.attr,
+	&dev_attr_cntrs.attr,
+	&dev_attr_cntr_en.attr,
+	&dev_attr_disable.attr,
+	&dev_attr_q_size.attr,
+	&dev_attr_do_req.attr,
+	NULL
+};
+
+
+static struct attribute_group mv_dpi_group = {
+	.name = "dpi",
+	.attrs = dpi_attrs,
+};
+
+int mv_pp2_dpi_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &mv_dpi_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", mv_dpi_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_dpi_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mv_dpi_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/.gitignore b/drivers/net/ethernet/mvebu_net/pp2/hal/.gitignore
new file mode 100644
index 000000000000..1d5b2ead4159
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/.gitignore
@@ -0,0 +1,97 @@
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-armada370/armada_370_family/
+arch/arm/mach-armada375/armada_375_family/
+arch/arm/mach-armada380/armada_380_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+arch/arm/plat-armada/mv_drivers_lsp/mv_neta/
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.c b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.c
new file mode 100644
index 000000000000..61a1a4c984ba
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.c
@@ -0,0 +1,935 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#include "mvBm.h"
+
+static MV_BM_POOL	mvBmPools[MV_BM_POOLS];
+
+static MV_BM_QSET	mvBmQsets[MV_BM_QSET_MAX];
+static int		mvBmRxqToQsetLong[MV_BM_QSET_PRIO_MAX];
+static int		mvBmRxqToQsetShort[MV_BM_QSET_PRIO_MAX];
+static int		mvBmTxqToQsetLong[MV_BM_QSET_PRIO_MAX];
+static int		mvBmTxqToQsetShort[MV_BM_QSET_PRIO_MAX];
+
+/* Initialize Hardware Buffer management unit */
+MV_STATUS mvBmInit()
+{
+	int i;
+
+	for (i = 0; i < MV_BM_POOLS; i++) {
+		/* Mask BM all interrupts */
+		mvPp2WrReg(MV_BM_INTR_MASK_REG(i), 0);
+		/* Clear BM cause register */
+		mvPp2WrReg(MV_BM_INTR_CAUSE_REG(i), 0);
+	}
+
+	memset(mvBmPools, 0, sizeof(mvBmPools));
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* Enable BM priority */
+	mvPp2WrReg(MV_BM_PRIO_CTRL_REG, 1);
+
+	/* Initialize Qsets */
+	for (i = 0; i < MV_BM_QSET_MAX; i++) {
+		mvBmQsets[i].id = i;
+		mvBmQsets[i].pool = -1;
+	}
+
+	for (i = 0; i < MV_BM_QSET_PRIO_MAX; i++) {
+		mvBmRxqToQsetLong[i] = -1;
+		mvBmRxqToQsetShort[i] = -1;
+		mvBmTxqToQsetLong[i] = -1;
+		mvBmTxqToQsetShort[i] = -1;
+	}
+#endif
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmPoolControl(int pool, MV_COMMAND cmd)
+{
+	MV_U32 regVal = 0;
+	regVal = mvPp2RdReg(MV_BM_POOL_CTRL_REG(pool));
+
+	switch (cmd) {
+	case MV_START:
+		regVal |= MV_BM_START_MASK;
+		break;
+
+	case MV_STOP:
+		regVal |= MV_BM_STOP_MASK;
+		break;
+
+	default:
+		mvOsPrintf("bmControl: Unknown command %d\n", cmd);
+		return MV_FAIL;
+	}
+	mvPp2WrReg(MV_BM_POOL_CTRL_REG(pool), regVal);
+	return MV_OK;
+}
+
+MV_STATE mvBmPoolStateGet(int pool)
+{
+	MV_U32 regVal;
+	MV_STATE state;
+
+	regVal = mvPp2RdReg(MV_BM_POOL_CTRL_REG(pool));
+
+	if (regVal & MV_BM_STATE_MASK)
+		state = MV_ACTIVE;
+	else
+		state = MV_IDLE;
+
+	return state;
+}
+
+/* Configure BM specific pool of "capacity" size. */
+MV_STATUS mvBmPoolInit(int pool, MV_U32 *virtPoolBase, MV_ULONG physPoolBase, int capacity)
+{
+	MV_BM_POOL	*pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+	/* poolBase must be 4 byte aligned */
+	if (MV_IS_NOT_ALIGN(physPoolBase, MV_BM_POOL_PTR_ALIGN)) {
+		mvOsPrintf("bmPoolBase = 0x%lx is not aligned 4 bytes\n", physPoolBase);
+		return MV_NOT_ALIGNED;
+	}
+	if (MV_IS_NOT_ALIGN(capacity, 16)) {
+		mvOsPrintf("%s: Illegal pool capacity %d, ", __func__, capacity);
+		capacity = MV_ALIGN_UP(capacity, 16);
+		mvOsPrintf("round to: %d\n", capacity);
+	}
+	/* Minimum pool capacity is 128 entries */
+	if (capacity < MV_BM_POOL_CAP_MIN) {
+		mvOsPrintf("bmPool capacity = %d is smaller than minimum (%d)\n", capacity, MV_BM_POOL_CAP_MIN);
+		return MV_BAD_SIZE;
+	}
+	/* Maximum pool capacity is 16K entries (2^14) */
+	if (capacity > MV_BM_POOL_CAP_MAX) {
+		mvOsPrintf("bmPool capacity = %d is larger than maximum (%d)\n", capacity, MV_BM_POOL_CAP_MAX);
+		return MV_BAD_SIZE;
+	}
+	/* Update data structure */
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->physAddr) {
+		mvOsPrintf("bmPool = %d is already busy\n", pool);
+		return MV_BUSY;
+	}
+
+	pBmPool->pool = pool;
+	pBmPool->capacity = capacity;
+	pBmPool->physAddr = physPoolBase;
+	pBmPool->pVirt = virtPoolBase;
+
+	mvBmPoolControl(pool, MV_STOP);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* Init Qsets list for this pool */
+	pBmPool->qsets = mvListCreate();
+	if (pBmPool->qsets == NULL) {
+		mvOsPrintf("%s: failed to create Qsets list\n", __func__);
+		return MV_FAIL;
+	}
+
+	/* Assign and init default Qset for this pool */
+	pBmPool->defQset = &mvBmQsets[MV_BM_POOL_QSET_BASE + pool];
+
+	mvBmQsetCreate(pBmPool->defQset->id, pool);
+	mvBmQsetBuffMaxSet(pBmPool->defQset->id, 0, 0);
+	mvBmQsetBuffCountersSet(pBmPool->defQset->id, 0, 0);
+
+	/* Assign and init MC Qset for this pool */
+	pBmPool->mcQset = &mvBmQsets[pool];
+
+	mvBmQsetCreate(pBmPool->mcQset->id, pool);
+	mvBmQsetBuffMaxSet(pBmPool->mcQset->id, 0, 0);
+	mvBmQsetBuffCountersSet(pBmPool->mcQset->id, 0, 0);
+
+	/* Init default priority counters for this pool */
+	mvBmPoolBuffNumSet(pool, 0);
+	mvBmPoolBuffCountersSet(pool, 0, 0);
+#endif
+
+	/* Set poolBase address */
+	mvPp2WrReg(MV_BM_POOL_BASE_REG(pool), physPoolBase);
+
+	/* Set Pool size */
+	mvPp2WrReg(MV_BM_POOL_SIZE_REG(pool), capacity);
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmPoolBufSizeSet(int pool, int buf_size)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+	pBmPool = &mvBmPools[pool];
+
+	pBmPool->bufSize = buf_size;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmPoolBufNumUpdate(int pool, int buf_num, int add)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return MV_BAD_PARAM;
+	}
+
+	pBmPool = &mvBmPools[pool];
+
+	if (add)
+		pBmPool->bufNum += buf_num;
+	else
+		pBmPool->bufNum -= buf_num;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* Update max buffers of default Qset, MC Qset and pool shared */
+	if (add) {
+		mvBmQsetBuffMaxSet(pBmPool->defQset->id,
+			pBmPool->defQset->maxGrntd, pBmPool->defQset->maxShared + buf_num);
+		mvBmQsetBuffMaxSet(pBmPool->mcQset->id,
+			pBmPool->mcQset->maxGrntd, pBmPool->mcQset->maxShared + buf_num);
+		mvBmPoolBuffNumSet(pool, pBmPool->maxShared + buf_num);
+	} else {
+		mvBmQsetBuffMaxSet(pBmPool->defQset->id,
+			pBmPool->defQset->maxGrntd, pBmPool->defQset->maxShared - buf_num);
+		mvBmQsetBuffMaxSet(pBmPool->mcQset->id,
+			pBmPool->mcQset->maxGrntd, pBmPool->mcQset->maxShared - buf_num);
+		mvBmPoolBuffNumSet(pool, pBmPool->maxShared - buf_num);
+	}
+#endif
+
+	return MV_OK;
+}
+
+/******************************************************************************/
+/* BM priority API */
+MV_STATUS mvBmQsetCreate(int qset, int pool)
+{
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("%s: bmPoolId = %d is invalid\n", __func__, pool);
+		return MV_BAD_PARAM;
+	}
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+	if (mvBmQsets[qset].pool != -1) {
+		mvOsPrintf("%s: qset %d is already attached to pool %d\n", __func__, qset, mvBmQsets[qset].pool);
+		return MV_FAIL;
+	}
+
+	mvBmQsets[qset].pool = pool;
+	mvBmQsets[qset].refCount = 0;
+	mvBmQsets[qset].maxShared = 0;
+	mvBmQsets[qset].maxGrntd = 0;
+
+	mvListAddHead(mvBmPools[pool].qsets, (MV_ULONG)qset);
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmQsetDelete(int qset)
+{
+	int pool;
+
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+
+	if (mvBmQsets[qset].refCount > 0) {
+		mvOsPrintf("%s: qset number %d has RXQs/TXQs that use it\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	pool = mvBmQsets[qset].pool;
+	if (pool != -1) {
+		MV_LIST_ELEMENT *elm;
+
+		if (qset == mvBmDefaultQsetNumGet(pool)) {
+			mvOsPrintf("%s: Can't delete Bm pool's default Qset (%d)\n", __func__, qset);
+			return MV_BAD_PARAM;
+		}
+
+		elm = mvListFind(mvBmPools[pool].qsets, (MV_ULONG)qset);
+
+		mvListDel(elm);
+	}
+
+	mvBmQsets[qset].pool = -1;
+
+	return MV_OK;
+}
+
+int mvBmDefaultQsetNumGet(int pool)
+{
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("%s: bmPoolId = %d is invalid\n", __func__, pool);
+		return MV_BAD_PARAM;
+	}
+
+	return mvBmPools[pool].defQset->id;
+}
+
+MV_STATUS mvBmRxqToQsetLongClean(int queue)
+{
+	int oldQset;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+
+	oldQset = mvBmRxqToQsetLong[queue];
+	if (oldQset != -1)
+		mvBmQsets[oldQset].refCount--;
+
+	mvBmRxqToQsetLong[queue] = -1;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmRxqToQsetShortClean(int queue)
+{
+	int oldQset;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+
+	oldQset = mvBmRxqToQsetShort[queue];
+	if (oldQset != -1)
+		mvBmQsets[oldQset].refCount--;
+
+	mvBmRxqToQsetShort[queue] = -1;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmTxqToQsetLongClean(int queue)
+{
+	int oldQset;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+
+	oldQset = mvBmTxqToQsetLong[queue];
+	if (oldQset != -1)
+		mvBmQsets[oldQset].refCount--;
+
+	mvBmTxqToQsetLong[queue] = -1;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmTxqToQsetShortClean(int queue)
+{
+	int oldQset;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+
+	oldQset = mvBmTxqToQsetShort[queue];
+	if (oldQset != -1)
+		mvBmQsets[oldQset].refCount--;
+
+	mvBmTxqToQsetShort[queue] = -1;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmRxqToQsetLongSet(int queue, int qset)
+{
+	MV_U32 regVal;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+	if (mvBmQsets[qset].pool == -1) {
+		mvOsPrintf("%s: qset %d is not attached to BM pool\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	/* Same Qset */
+	if (mvBmRxqToQsetLong[queue] == qset)
+		return MV_OK;
+
+	/* Remove old Qset */
+	if (mvBmRxqToQsetLong[queue] != -1) {
+		int oldQset = mvBmRxqToQsetLong[queue];
+
+		/* Check that queue is using the same BM pool */
+		if (mvBmQsets[qset].pool != mvBmQsets[oldQset].pool) {
+			mvOsPrintf("%s: queue %d is attached BM pool %d, but new qset %d is attached to BM pool %d\n",
+				__func__, queue, mvBmQsets[oldQset].pool, qset, mvBmQsets[qset].pool);
+			return MV_FAIL;
+		}
+		mvBmQsets[oldQset].refCount--;
+	}
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, queue);
+
+	regVal = mvPp2RdReg(MV_BM_CPU_QSET_REG);
+	regVal &= ~MV_BM_CPU_LONG_QSET_MASK;
+	regVal |= ((qset << MV_BM_CPU_LONG_QSET_OFFS) & MV_BM_CPU_LONG_QSET_MASK);
+
+	mvPp2WrReg(MV_BM_CPU_QSET_REG, regVal);
+
+	mvBmRxqToQsetLong[queue] = qset;
+	mvBmQsets[qset].refCount++;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmRxqToQsetShortSet(int queue, int qset)
+{
+	MV_U32 regVal;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+	if (mvBmQsets[qset].pool == -1) {
+		mvOsPrintf("%s: qset %d is not attached to BM pool\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	/* Same Qset */
+	if (mvBmRxqToQsetShort[queue] == qset)
+		return MV_OK;
+
+	/* Remove old Qset */
+	if (mvBmRxqToQsetShort[queue] != -1) {
+		int oldQset = mvBmRxqToQsetShort[queue];
+
+		/* Check that queue is using the same BM pool */
+		if (mvBmQsets[qset].pool != mvBmQsets[oldQset].pool) {
+			mvOsPrintf("%s: queue %d is attached BM pool %d, but new qset %d is attached to BM pool %d\n",
+				__func__, queue, mvBmQsets[oldQset].pool, qset, mvBmQsets[qset].pool);
+			return MV_FAIL;
+		}
+		mvBmQsets[oldQset].refCount--;
+	}
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, queue);
+
+	regVal = mvPp2RdReg(MV_BM_CPU_QSET_REG);
+	regVal &= ~MV_BM_CPU_SHORT_QSET_MASK;
+	regVal |= ((qset << MV_BM_CPU_SHORT_QSET_OFFS) & MV_BM_CPU_SHORT_QSET_MASK);
+
+	mvPp2WrReg(MV_BM_CPU_QSET_REG, regVal);
+
+	mvBmRxqToQsetShort[queue] = qset;
+	mvBmQsets[qset].refCount++;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmTxqToQsetLongSet(int queue, int qset)
+{
+	MV_U32 regVal;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+	if (mvBmQsets[qset].pool == -1) {
+		mvOsPrintf("%s: qset %d is not attached to BM pool\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	/* Same Qset */
+	if (mvBmTxqToQsetLong[queue] == qset)
+		return MV_OK;
+
+	/* Remove old Qset */
+	if (mvBmTxqToQsetLong[queue] != -1) {
+		int oldQset = mvBmTxqToQsetLong[queue];
+
+		/* Check that queue is using the same BM pool */
+		if (mvBmQsets[qset].pool != mvBmQsets[oldQset].pool) {
+			mvOsPrintf("%s: queue %d is attached BM pool %d, but new qset %d is attached to BM pool %d\n",
+				__func__, queue, mvBmQsets[oldQset].pool, qset, mvBmQsets[qset].pool);
+			return MV_FAIL;
+		}
+		mvBmQsets[oldQset].refCount--;
+	}
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, queue);
+
+	regVal = mvPp2RdReg(MV_BM_HWF_QSET_REG);
+	regVal &= ~MV_BM_HWF_LONG_QSET_MASK;
+	regVal |= ((qset << MV_BM_HWF_LONG_QSET_OFFS) & MV_BM_HWF_LONG_QSET_MASK);
+
+	mvPp2WrReg(MV_BM_HWF_QSET_REG, regVal);
+
+	mvBmTxqToQsetLong[queue] = qset;
+	mvBmQsets[qset].refCount++;
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmTxqToQsetShortSet(int queue, int qset)
+{
+	MV_U32 regVal;
+
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return MV_BAD_PARAM;
+	}
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+	if (mvBmQsets[qset].pool == -1) {
+		mvOsPrintf("%s: qset %d is not attached to BM pool\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	/* Same Qset */
+	if (mvBmTxqToQsetShort[queue] == qset)
+		return MV_OK;
+
+	/* Remove old Qset */
+	if (mvBmTxqToQsetShort[queue] != -1) {
+		int oldQset = mvBmTxqToQsetShort[queue];
+
+		/* Check that queue is using the same BM pool */
+		if (mvBmQsets[qset].pool != mvBmQsets[oldQset].pool) {
+			mvOsPrintf("%s: queue %d is attached BM pool %d, but new qset %d is attached to BM pool %d\n",
+				__func__, queue, mvBmQsets[oldQset].pool, qset, mvBmQsets[qset].pool);
+			return MV_FAIL;
+		}
+		mvBmQsets[oldQset].refCount--;
+	}
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, queue);
+
+	regVal = mvPp2RdReg(MV_BM_HWF_QSET_REG);
+	regVal &= ~MV_BM_HWF_SHORT_QSET_MASK;
+	regVal |= ((qset << MV_BM_HWF_SHORT_QSET_OFFS) & MV_BM_HWF_SHORT_QSET_MASK);
+
+	mvPp2WrReg(MV_BM_HWF_QSET_REG, regVal);
+
+	mvBmTxqToQsetShort[queue] = qset;
+	mvBmQsets[qset].refCount++;
+
+	return MV_OK;
+}
+
+int mvBmRxqToQsetLongGet(int queue)
+{
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return -1;
+	}
+
+	return mvBmRxqToQsetLong[queue];
+}
+
+int mvBmRxqToQsetShortGet(int queue)
+{
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return -1;
+	}
+
+	return mvBmRxqToQsetShort[queue];
+}
+
+int mvBmTxqToQsetLongGet(int queue)
+{
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return -1;
+	}
+
+	return mvBmTxqToQsetLong[queue];
+}
+
+int mvBmTxqToQsetShortGet(int queue)
+{
+	if (queue < 0 || queue > MV_BM_PRIO_IDX_MASK) {
+		mvOsPrintf("%s: Bad queue number = %d\n", __func__, queue);
+		return -1;
+	}
+
+	return mvBmTxqToQsetShort[queue];
+}
+
+MV_STATUS mvBmQsetBuffMaxSet(int qset, int maxGrntd, int maxShared)
+{
+	MV_U32 regVal = 0;
+	int pool, delta;
+	MV_BM_POOL *pBmPool;
+	MV_BM_QSET *pQset;
+
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+
+	pQset = &mvBmQsets[qset];
+	pool = pQset->pool;
+	if (pool == -1) {
+		mvOsPrintf("%s: Qset (%d) is not attached to any BM pool\n", __func__, qset);
+		return MV_FAIL;
+	}
+
+	/* number of requested guaranteed buffers from BM pool (after Qset max update) */
+	delta = maxGrntd - pQset->maxGrntd;
+
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->maxShared < delta) {
+		mvOsPrintf("%s: Not enough buffers (%d) in BM pool %d to guarantee %d buffer for qset %d\n",
+				__func__, pBmPool->maxShared, pool, delta, qset);
+		return MV_FAIL;
+	}
+
+	/* Update BM pool shared buffers num */
+	mvBmPoolBuffNumSet(pool, pBmPool->maxShared - delta);
+
+	pQset->maxShared = maxShared;
+	pQset->maxGrntd = maxGrntd;
+
+	regVal |= maxShared << MV_BM_QSET_MAX_SHARED_OFFS;
+	regVal |= maxGrntd << MV_BM_QSET_MAX_GRNTD_OFFS;
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, qset);
+	mvPp2WrReg(MV_BM_QSET_SET_MAX_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmQsetBuffCountersSet(int qset, int cntrGrntd, int cntrShared)
+{
+	MV_U32 regVal = 0;
+
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+
+	regVal |= cntrShared << MV_BM_QSET_CNTR_SHARED_OFFS;
+	regVal |= cntrGrntd << MV_BM_QSET_CNTR_GRNTD_OFFS;
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, qset);
+	mvPp2WrReg(MV_BM_QSET_SET_CNTRS_REG, regVal);
+
+	return MV_OK;
+}
+
+/* Set number of SHARED buffers for this pool */
+MV_STATUS mvBmPoolBuffNumSet(int pool, int buffNum)
+{
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid\n", pool);
+		return MV_BAD_PARAM;
+	}
+
+	mvBmPools[pool].maxShared = buffNum;
+
+	mvPp2WrReg(MV_BM_POOL_MAX_SHARED_REG(pool), buffNum);
+
+	return MV_OK;
+}
+
+MV_STATUS mvBmPoolBuffCountersSet(int pool, int cntrGrntd, int cntrShared)
+{
+	MV_U32 regVal = 0;
+
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid\n", pool);
+		return MV_BAD_PARAM;
+	}
+
+
+	regVal |= cntrShared << MV_BM_POOL_CNTR_SHARED_OFFS;
+	regVal |= cntrGrntd << MV_BM_POOL_CNTR_GRNTD_OFFS;
+
+	mvPp2WrReg(MV_BM_POOL_SET_CNTRS_REG(pool), regVal);
+
+	return MV_OK;
+}
+/******************************************************************************/
+
+void mvBmPoolPrint(int pool)
+{
+	MV_BM_POOL *pBmPool;
+
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->pVirt == NULL) {
+		mvOsPrintf("bmPool = %d is not created yet\n", pool);
+		return;
+	}
+
+	mvOsPrintf("  %2d:     %4d       %4d       %4d      0x%08x\n",
+						pBmPool->pool, pBmPool->capacity, pBmPool->bufSize, pBmPool->bufNum,
+						(unsigned)pBmPool->physAddr);
+}
+
+void mvBmStatus(void)
+{
+	int i;
+
+	mvOsPrintf("BM Pools status\n");
+	mvOsPrintf("pool:    capacity    bufSize    bufNum       physAddr\n");
+	for (i = 0; i < MV_BM_POOLS; i++)
+		mvBmPoolPrint(i);
+}
+/* PPv2.1 MAS 3.20 new counters */
+void mvBmV1PoolDropCntDump(int pool)
+{
+	mvPp2PrintReg2(MV_BM_V1_PKT_DROP_REG(pool), "MV_BM_V1_PKT_DROP_REG", pool);
+	mvPp2PrintReg2(MV_BM_V1_PKT_MC_DROP_REG(pool), "MV_BM_V1_PKT_MC_DROP_REG", pool);
+}
+
+void mvBmPoolDump(int pool, int mode)
+{
+/*	MV_U32     regVal;
+	MV_ULONG   *pBufAddr;
+	MV_BM_POOL *pBmPool;
+	int setReadIdx, getReadIdx, setWriteIdx, getWriteIdx, freeBuffs, i;
+*/
+	/* validate poolId */
+	if ((pool < 0) || (pool >= MV_BM_POOLS)) {
+		mvOsPrintf("bmPoolId = %d is invalid \n", pool);
+		return;
+	}
+/*
+	pBmPool = &mvBmPools[pool];
+	if (pBmPool->pVirt == NULL) {
+		mvOsPrintf("bmPool = %d is not created yet\n", pool);
+		return;
+	}
+
+	mvOsPrintf("\n[NETA BM: pool=%d, mode=%d]\n", pool, mode);
+
+	mvOsPrintf("poolBase=%p (0x%x), capacity=%d, buf_num=%d, buf_size=%d\n",
+		   pBmPool->pVirt, (unsigned)pBmPool->physAddr, pBmPool->capacity, pBmPool->bufNum, pBmPool->bufSize);
+
+	regVal = mvPp2RdReg(MV_BM_POOL_READ_PTR_REG(pool));
+	setReadIdx = ((regVal & MV_BM_POOL_SET_READ_PTR_MASK) >> MV_BM_POOL_SET_READ_PTR_OFFS) / 4;
+	getReadIdx = ((regVal & MV_BM_POOL_GET_READ_PTR_MASK) >> MV_BM_POOL_GET_READ_PTR_OFFS) / 4;
+
+	regVal = mvPp2RdReg(MV_BM_POOL_WRITE_PTR_REG(pool));
+	setWriteIdx = ((regVal & MV_BM_POOL_SET_WRITE_PTR_MASK) >> MV_BM_POOL_SET_WRITE_PTR_OFFS) / 4;
+	getWriteIdx = ((regVal & MV_BM_POOL_GET_WRITE_PTR_MASK) >> MV_BM_POOL_GET_WRITE_PTR_OFFS) / 4;
+	if (getWriteIdx >= getReadIdx)
+		freeBuffs = getWriteIdx - getReadIdx;
+	else
+		freeBuffs = (pBmPool->capacity - getReadIdx) + getWriteIdx;
+
+	mvOsPrintf("nextToRead: set=%d, get=%d, nextToWrite: set=%d, get=%d, freeBuffs=%d\n",
+		setReadIdx, getReadIdx, setWriteIdx, getWriteIdx, freeBuffs);
+
+	if (mode > 0) {
+*/
+		/* Print the content of BM pool */
+/*		i = getReadIdx;
+		while (i != getWriteIdx) {
+			pBufAddr = (MV_ULONG *)pBmPool->pVirt + i;
+			mvOsPrintf("%3d. pBufAddr=%p, bufAddr=%08x\n",
+				   i, pBufAddr, (MV_U32)(*pBufAddr));
+			i++;
+			if (i == pBmPool->capacity)
+				i = 0;
+		}
+	}
+*/
+}
+
+/******************************************************************************/
+MV_STATUS mvBmQsetShow(int qset)
+{
+	MV_BM_QSET *pQset;
+	MV_U32 regVal;
+
+	if (qset < 0 || qset >= MV_BM_QSET_MAX) {
+		mvOsPrintf("%s: Bad qset number = %d\n", __func__, qset);
+		return MV_BAD_PARAM;
+	}
+
+	pQset = &mvBmQsets[qset];
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, qset);
+	regVal = mvPp2RdReg(MV_BM_QSET_SET_MAX_REG);
+
+	mvOsPrintf("Qset[%03d]: pool=%d,  refCount=%03d,  maxShared=%04d,  maxGrntd=%04d,  MaxBuff reg(0x%x)=0x%08x\n",
+			qset, pQset->pool, pQset->refCount, pQset->maxShared,
+			pQset->maxGrntd, MV_BM_QSET_SET_MAX_REG, regVal);
+
+	return MV_OK;
+}
+
+static MV_BOOL mvBmPriorityEn(void)
+{
+	return ((mvPp2RdReg(MV_BM_PRIO_CTRL_REG) == 0) ? MV_FALSE : MV_TRUE);
+}
+
+void mvBmQsetConfigDumpAll(void)
+{
+	int qset;
+
+	if (!mvBmPriorityEn())
+		mvOsPrintf("Note: The buffers priority algorithms is disabled.\n");
+
+	for (qset = 0; qset < MV_BM_QSET_MAX; qset++) {
+		/* skip qsets that not attached to any pool */
+		if (mvBmQsets[qset].pool == -1)
+			continue;
+
+		mvBmQsetShow(qset);
+	}
+}
+
+static void mvBmQueueMapDump(int queue)
+{
+	unsigned int regVal, shortQset, longQset;
+
+	mvOsPrintf("-------- queue #%d --------\n", queue);
+
+	mvPp2WrReg(MV_BM_PRIO_IDX_REG, queue);
+	regVal = mvPp2RdReg(MV_BM_CPU_QSET_REG);
+
+	shortQset = ((regVal & (MV_BM_CPU_SHORT_QSET_MASK)) >> MV_BM_CPU_SHORT_QSET_OFFS);
+	longQset = ((regVal & (MV_BM_CPU_LONG_QSET_MASK)) >> MV_BM_CPU_LONG_QSET_OFFS);
+	mvOsPrintf("CPU SHORT QSET = 0x%02x\n", shortQset);
+	mvOsPrintf("CPU LONG QSET  = 0x%02x\n", longQset);
+
+	regVal = mvPp2RdReg(MV_BM_HWF_QSET_REG);
+	shortQset = ((regVal & (MV_BM_HWF_SHORT_QSET_MASK)) >> MV_BM_HWF_SHORT_QSET_OFFS);
+	longQset = ((regVal & (MV_BM_HWF_LONG_QSET_MASK)) >> MV_BM_HWF_LONG_QSET_OFFS);
+	mvOsPrintf("HWF SHORT QSET = 0x%02x\n", shortQset);
+	mvOsPrintf("HWF LONG QSET  = 0x%02x\n", longQset);
+}
+
+void mvBmQueueMapDumpAll(void)
+{
+	int queue;
+
+	if (!mvBmPriorityEn())
+		mvOsPrintf("Note: The buffers priority algorithms is disabled.\n");
+
+	for (queue = 0; queue < 256 /* TODO MAX(RXQ_NUM, TXQ_NUM)*/; queue++)
+		mvBmQueueMapDump(queue);
+}
+
+/*
+void mvBmPoolConfigDumpAll(void)
+
+	regVal = mvPp2RdReg(MV_BM_POOL_MAX_SHARED_REG(pool));
+
+	maxSherd = ((regVal & MV_BM_POOL_MAX_SHARED_MASK) >> MV_BM_POOL_MAX_SHARED_OFFS);
+	mvOsPrintf("POOL MAX SHERD = 0x%04x\n", maxSherd);
+	mvOsPrintf("\n");
+*/
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.h b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.h
new file mode 100644
index 000000000000..92113e968dff
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBm.h
@@ -0,0 +1,225 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvBm_h__
+#define __mvBm_h__
+
+/* includes */
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvStack.h"
+#include "mvList.h"
+#include "mv802_3.h"
+#include "common/mvPp2Common.h"
+#include "mvBmRegs.h"
+
+typedef struct {
+	int valid;
+	int longPool;
+	int shortPool;
+	int longBufNum;
+	int shortBufNum;
+	int hwfLongPool;
+	int hwfShortPool;
+	int hwfLongBufNum;
+	int hwfShortBufNum;
+
+} MV_BM_CONFIG;
+
+typedef struct {
+	int id;
+	int pool;
+	int refCount;
+	int maxShared;
+	int maxGrntd;
+} MV_BM_QSET;
+
+typedef struct {
+	int		pool;
+	int		capacity;
+	int		bufNum;
+	int		bufSize;
+	MV_U32		*pVirt;
+	MV_ULONG	physAddr;
+	int		maxShared;
+	MV_BM_QSET	*defQset; /* default qset */
+	MV_BM_QSET	*mcQset;  /* MC qset */
+	MV_LIST_ELEMENT	*qsets;   /* list of qsets attached to this pool */
+} MV_BM_POOL;
+
+/* defines */
+
+/* bits[8-9] of address define pool 0-3 */
+#define BM_POOL_ACCESS_OFFS     8
+
+/* Qsets 0-7 reserved for MC */
+#define MV_BM_POOL_QSET_BASE		8
+#define MV_BM_QSET_MAX			128
+#define MV_BM_QSET_PRIO_MAX		255
+
+/* INLINE functions */
+static INLINE void mvBmPoolPut(int pool, MV_U32 bufPhysAddr, MV_U32 bufVirtAddr)
+{
+	mvPp2WrReg(MV_BM_VIRT_RLS_REG, bufVirtAddr);
+	mvPp2WrReg(MV_BM_PHY_RLS_REG(pool), bufPhysAddr);
+}
+
+static INLINE void mvBmPoolMcPut(int pool, MV_U32 bufPhysAddr, MV_U32 bufVirtAddr, int mcId, int isForce)
+{
+	MV_U32 regVal = 0;
+
+	regVal |= ((mcId << MV_BM_MC_ID_OFFS) & MV_BM_MC_ID_MASK);
+	if (isForce)
+		regVal |= MV_BM_FORCE_RELEASE_MASK;
+
+	mvPp2WrReg(MV_BM_MC_RLS_REG, regVal);
+	mvBmPoolPut(pool, bufPhysAddr | MV_BM_PHY_RLS_MC_BUFF_MASK, bufVirtAddr);
+}
+
+static INLINE void mvBmPoolQsetPut(int pool, MV_U32 bufPhysAddr, MV_U32 bufVirtAddr, int qset, int isGrntd)
+{
+	MV_U32 regVal, bufPhysAddrPrio;
+
+	regVal = (qset << MV_BM_RLS_QSET_NUM_OFFS) & MV_BM_RLS_QSET_NUM_MASK;
+	mvPp2WrReg(MV_BM_QSET_RLS_REG, regVal);
+
+	bufPhysAddrPrio = bufPhysAddr | MV_BM_PHY_RLS_PRIO_EN_MASK;
+	if (isGrntd)
+		bufPhysAddrPrio |= MV_BM_PHY_RLS_GRNTD_MASK;
+
+	mvBmPoolPut(pool, bufPhysAddrPrio, bufVirtAddr);
+}
+
+static INLINE void mvBmPoolQsetMcPut(int pool, MV_U32 bufPhysAddr, MV_U32 bufVirtAddr,
+					int qset, int isGrntd, int mcId, int isForce)
+{
+	MV_U32 regVal, bufPhysAddrPrio;
+
+	regVal = (qset << MV_BM_RLS_QSET_NUM_OFFS) & MV_BM_RLS_QSET_NUM_MASK;
+	mvPp2WrReg(MV_BM_QSET_RLS_REG, regVal);
+
+	bufPhysAddrPrio = bufPhysAddr | MV_BM_PHY_RLS_PRIO_EN_MASK;
+	if (isGrntd)
+		bufPhysAddrPrio |= MV_BM_PHY_RLS_GRNTD_MASK;
+
+	mvBmPoolMcPut(pool, bufPhysAddrPrio, bufVirtAddr, mcId, isForce);
+}
+
+static INLINE MV_U32 mvBmPoolGet(int pool, MV_U32 *bufPhysAddr)
+{
+	MV_U32 bufVirtAddr, physAddr;
+
+	physAddr = mvPp2RdReg(MV_BM_PHY_ALLOC_REG(pool)); /* read physical address */
+	bufVirtAddr = mvPp2RdReg(MV_BM_VIRT_ALLOC_REG); /* return virtual address */
+	if (bufPhysAddr)
+		*bufPhysAddr = physAddr;
+
+	return bufVirtAddr;
+}
+
+/* prototypes */
+MV_STATUS mvBmInit(void);
+MV_STATUS mvBmPoolControl(int pool, MV_COMMAND cmd);
+MV_STATE  mvBmPoolStateGet(int pool);
+void      mvBmPoolEnable(int pool);
+void      mvBmPoolDisable(int pool);
+MV_BOOL   mvBmPoolIsEnabled(int pool);
+MV_STATUS mvBmPoolInit(int pool, MV_U32 *virtPoolBase, MV_ULONG physPoolBase, int capacity);
+MV_STATUS mvBmPoolBufNumUpdate(int pool, int buf_num, int add);
+MV_STATUS mvBmPoolBufSizeSet(int pool, int buf_size);
+void      mvBmRegs(void);
+void      mvBmStatus(void);
+void      mvBmPoolDump(int pool, int mode);
+void      mvBmV1PoolDropCntDump(int pool);
+void      mvBmPoolPrint(int pool);
+
+/* BM priority API */
+MV_STATUS mvBmQsetCreate(int qset, int pool);
+MV_STATUS mvBmQsetDelete(int qset);
+void mvBmQsetConfigDumpAll(void);
+int mvBmDefaultQsetNumGet(int pool);
+void mvBmQueueMapDumpAll(void);
+MV_STATUS mvBmQsetShow(int qset);
+
+MV_STATUS mvBmRxqToQsetLongClean(int queue);
+MV_STATUS mvBmRxqToQsetShortClean(int queue);
+MV_STATUS mvBmTxqToQsetLongClean(int queue);
+MV_STATUS mvBmTxqToQsetShortClean(int queue);
+
+MV_STATUS mvBmRxqToQsetLongSet(int queue, int qset);
+MV_STATUS mvBmRxqToQsetShortSet(int queue, int qset);
+MV_STATUS mvBmTxqToQsetLongSet(int queue, int qset);
+MV_STATUS mvBmTxqToQsetShortSet(int queue, int qset);
+int mvBmRxqToQsetLongGet(int queue);
+int mvBmRxqToQsetShortGet(int queue);
+int mvBmTxqToQsetLongGet(int queue);
+int mvBmTxqToQsetShortGet(int queue);
+
+MV_STATUS mvBmQsetBuffMaxSet(int qset, int maxGrntd, int maxShared);
+MV_STATUS mvBmQsetBuffCountersSet(int qset, int cntrGrntd, int cntrShared);
+MV_STATUS mvBmPoolBuffNumSet(int pool, int buffNum);
+MV_STATUS mvBmPoolBuffCountersSet(int pool, int cntrGrntd, int cntrShared);
+
+#endif /* __mvBm_h__ */
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBmRegs.h b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBmRegs.h
new file mode 100644
index 000000000000..37f00bd60f9b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/bm/mvBmRegs.h
@@ -0,0 +1,274 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __mvBmRegs_h__
+#define __mvBmRegs_h__
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#define MV_BM_POOLS                 8
+#define MV_BM_POOL_CAP_MAX          (16*1024 - MV_BM_POOL_PTR_ALIGN/4)
+#define MV_BM_POOL_CAP_MIN          128
+#define MV_BM_POOL_PTR_ALIGN        128
+
+/* Address of External Buffer Pointers Pool Register */
+#define MV_BM_POOL_BASE_REG(pool)       (MV_PP2_REG_BASE + 0x6000 + ((pool) * 4))
+
+#define MV_BM_POOL_BASE_ADDR_OFFS       7
+#define MV_BM_POOL_BASE_ADDR_MASK       (0x1FFFFF << MV_BM_POOL_BASE_ADDR_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* External Buffer Pointers Pool Size Register */
+#define MV_BM_POOL_SIZE_REG(pool)       (MV_PP2_REG_BASE + 0x6040 + ((pool) * 4))
+
+#define MV_BM_POOL_SIZE_OFFS            4
+#define MV_BM_POOL_SIZE_MASK            (0xFFF << MV_BM_POOL_SIZE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* External Buffer Pointers Pool Read pointer Register */
+#define MV_BM_POOL_READ_PTR_REG(pool)   (MV_PP2_REG_BASE + 0x6080 + ((pool) * 4))
+
+#define MV_BM_POOL_GET_READ_PTR_OFFS    4
+#define MV_BM_POOL_GET_READ_PTR_MASK    (0xFFF << MV_BM_POOL_GET_READ_PTR_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* External Buffer Pointers Pool Number of Pointers Register */
+#define MV_BM_POOL_PTRS_NUM_REG(pool)	(MV_PP2_REG_BASE + 0x60c0 + ((pool) * 4))
+
+#define MV_BM_POOL_PTRS_NUM_OFFS		4
+#define MV_BM_POOL_PTRS_NUM_MASK		(0xFFF << MV_BM_POOL_PTRS_NUM_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Internal Buffer Pointers Pool RD pointer Register */
+#define MV_BM_BPPI_READ_PTR_REG(pool)   (MV_PP2_REG_BASE + 0x6100 + ((pool) * 4))
+/*-------------------------------------------------------------------------------*/
+
+/* Internal Buffer Pointers Pool Num of pointers Register */
+#define MV_BM_BPPI_PTRS_NUM_REG(pool)   (MV_PP2_REG_BASE + 0x6140 + ((pool) * 4))
+
+#define MV_BM_BPPI_PTR_NUM_OFFS    	0
+#define MV_BM_BPPI_PTR_NUM_MASK	    	(0x7FF << MV_BM_BPPI_PTR_NUM_OFFS)
+
+#define MV_BM_BPPI_PREFETCH_FULL_BIT    16
+#define MV_BM_BPPI_PREFETCH_FULL_MASK	(0x1 << MV_BM_BPPI_PREFETCH_FULL_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/* BM Activation Register */
+#define MV_BM_POOL_CTRL_REG(pool)       (MV_PP2_REG_BASE + 0x6200 + ((pool) * 4))
+
+#define MV_BM_START_BIT                 0
+#define MV_BM_START_MASK                (1 << MV_BM_START_BIT)
+
+#define MV_BM_STOP_BIT                  1
+#define MV_BM_STOP_MASK                 (1 << MV_BM_STOP_BIT)
+
+#define MV_BM_STATE_BIT                 4
+#define MV_BM_STATE_MASK                (1 << MV_BM_STATE_BIT)
+
+#define MV_BM_LOW_THRESH_OFFS           8
+#define MV_BM_LOW_THRESH_MASK           (0x7F << MV_BM_LOW_THRESH_OFFS)
+#define MV_BM_LOW_THRESH_VALUE(val)     ((val) << MV_BM_LOW_THRESH_OFFS)
+
+#define MV_BM_HIGH_THRESH_OFFS          16
+#define MV_BM_HIGH_THRESH_MASK          (0x7F << MV_BM_HIGH_THRESH_OFFS)
+#define MV_BM_HIGH_THRESH_VALUE(val)    ((val) << MV_BM_HIGH_THRESH_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* BM Interrupt Cause Register */
+#define MV_BM_INTR_CAUSE_REG(pool)      (MV_PP2_REG_BASE + 0x6240 + ((pool) * 4))
+
+#define MV_BM_RELEASED_DELAY_BIT        0
+#define MV_BM_RELEASED_DELAY_MASK       (1 << MV_BM_RELEASED_DELAY_BIT)
+
+#define MV_BM_ALLOC_FAILED_BIT          1
+#define MV_BM_ALLOC_FAILED_MASK         (1 << MV_BM_ALLOC_FAILED_BIT)
+
+#define MV_BM_BPPE_EMPTY_BIT            2
+#define MV_BM_BPPE_EMPTY_MASK           (1 << MV_BM_BPPE_EMPTY_BIT)
+
+#define MV_BM_BPPE_FULL_BIT             3
+#define MV_BM_BPPE_FULL_MASK            (1 << MV_BM_BPPE_FULL_BIT)
+
+#define MV_BM_AVAILABLE_BP_LOW_BIT      4
+#define MV_BM_AVAILABLE_BP_LOW_MASK     (1 << MV_BM_AVAILABLE_BP_LOW_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/* BM interrupt Mask Register */
+#define MV_BM_INTR_MASK_REG(pool)       (MV_PP2_REG_BASE + 0x6280 + ((pool) * 4))
+/*-------------------------------------------------------------------------------*/
+
+/* BM physical address allocate */
+#define MV_BM_PHY_ALLOC_REG(pool)	(MV_PP2_REG_BASE + 0x6400 + ((pool) * 4))
+
+#define MV_BM_PHY_ALLOC_GRNTD_MASK	(0x1)
+
+/* BM virtual address allocate */
+#define MV_BM_VIRT_ALLOC_REG		(MV_PP2_REG_BASE + 0x6440)
+
+/* BM physical address release */
+#define MV_BM_PHY_RLS_REG(pool)		(MV_PP2_REG_BASE + 0x6480 + ((pool) * 4))
+
+#define MV_BM_PHY_RLS_MC_BUFF_MASK	(0x1)
+#define MV_BM_PHY_RLS_PRIO_EN_MASK	(0x2)
+#define MV_BM_PHY_RLS_GRNTD_MASK	(0x4)
+
+/* BM virtual address release */
+#define MV_BM_VIRT_RLS_REG		(MV_PP2_REG_BASE + 0x64c0)
+
+/*-------------------------------------------------------------------------------*/
+
+/* BM MC release */
+#define MV_BM_MC_RLS_REG		(MV_PP2_REG_BASE + 0x64c4)
+
+#define MV_BM_MC_ID_OFFS		0
+#define MV_BM_MC_ID_MASK		(0xfff << MV_BM_MC_ID_OFFS)
+
+#define MV_BM_FORCE_RELEASE_OFFS	12
+#define MV_BM_FORCE_RELEASE_MASK	(0x1 << MV_BM_FORCE_RELEASE_OFFS)
+
+/*-------------------------------------------------------------------------------*/
+/* BM prio alloc/release */
+#define MV_BM_QSET_ALLOC_REG		(MV_PP2_REG_BASE + 0x63fc)
+
+#define MV_BM_ALLOC_QSET_NUM_OFFS	0
+#define MV_BM_ALLOC_QSET_NUM_MASK	(0x7f << MV_BM_ALLOC_QSET_NUM_OFFS)
+
+#define MV_BM_ALLOC_YELLOW_MASK		(0x1 << 8)
+
+#define MV_BM_ALLOC_PRIO_EN_MASK	(0x1 << 12)
+
+
+#define MV_BM_QSET_RLS_REG		(MV_PP2_REG_BASE + 0x64c8)
+
+#define MV_BM_RLS_QSET_NUM_OFFS		0
+#define MV_BM_RLS_QSET_NUM_MASK		(0x7f << MV_BM_RLS_QSET_NUM_OFFS)
+/*-------------------------------------------------------------------------------*/
+/* BM Priority Configuration Registers */
+
+#define MV_BM_PRIO_CTRL_REG		(MV_PP2_REG_BASE + 0x6800)
+
+
+#define MV_BM_PRIO_IDX_REG		(MV_PP2_REG_BASE + 0x6810)
+#define MV_BM_PRIO_IDX_MASK		0xff
+
+
+#define MV_BM_CPU_QSET_REG		(MV_PP2_REG_BASE + 0x6814)
+
+#define MV_BM_CPU_SHORT_QSET_OFFS	0
+#define MV_BM_CPU_SHORT_QSET_MASK	(0x7f << MV_BM_CPU_SHORT_QSET_OFFS)
+
+#define MV_BM_CPU_LONG_QSET_OFFS	8
+#define MV_BM_CPU_LONG_QSET_MASK	(0x7f << MV_BM_CPU_LONG_QSET_OFFS)
+
+
+#define MV_BM_HWF_QSET_REG		(MV_PP2_REG_BASE + 0x6818)
+
+#define MV_BM_HWF_SHORT_QSET_OFFS	0
+#define MV_BM_HWF_SHORT_QSET_MASK	(0x7f << MV_BM_HWF_SHORT_QSET_OFFS)
+
+#define MV_BM_HWF_LONG_QSET_OFFS	8
+#define MV_BM_HWF_LONG_QSET_MASK	(0x7f << MV_BM_HWF_LONG_QSET_OFFS)
+
+
+#define MV_BM_QSET_SET_MAX_REG		(MV_PP2_REG_BASE + 0x6820)
+
+#define MV_BM_QSET_MAX_SHARED_OFFS	0
+#define MV_BM_QSET_MAX_GRNTD_OFFS	16
+
+#define MV_BM_QSET_MAX_SHARED_MASK	(0xffff << MV_BM_QSET_MAX_SHARED_OFFS)
+#define MV_BM_QSET_MAX_GRNTD_MASK	(0xffff << MV_BM_QSET_MAX_GRNTD_OFFS)
+
+
+#define MV_BM_QSET_SET_CNTRS_REG	(MV_PP2_REG_BASE + 0x6824)
+
+#define MV_BM_QSET_CNTR_SHARED_OFFS	0
+#define MV_BM_QSET_CNTR_GRNTD_OFFS	16
+
+
+#define MV_BM_POOL_MAX_SHARED_REG(pool)	(MV_PP2_REG_BASE + 0x6840 + ((pool) * 4))
+#define MV_BM_POOL_MAX_SHARED_OFFS	0
+#define MV_BM_POOL_MAX_SHARED_MASK	(0xffff << MV_BM_POOL_MAX_SHARED_OFFS)
+
+#define MV_BM_POOL_SET_CNTRS_REG(pool)	(MV_PP2_REG_BASE + 0x6880 + ((pool) * 4))
+
+#define MV_BM_POOL_CNTR_SHARED_OFFS	0
+#define MV_BM_POOL_CNTR_GRNTD_OFFS	16
+
+#define MV_BM_V1_PKT_DROP_REG(pool)		(MV_PP2_REG_BASE + 0x7300 + 4 * (pool))
+#define MV_BM_V1_PKT_MC_DROP_REG(pool)		(MV_PP2_REG_BASE + 0x7340 + 4 * (pool))
+
+
+#define MV_BM_POOL_SHARED_STATUS(pool)		(MV_PP2_REG_BASE + 0x68c0 + ((pool) * 4))
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvBmRegs_h__ */
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.c
new file mode 100644
index 000000000000..8f93dc9094c5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.c
@@ -0,0 +1,94 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2Classifier.h"
+
+int mvPp2ClassifierDefInit()
+{
+
+	if (mvPp2ClsInit())
+		return MV_ERROR;
+
+#ifdef CONFIG_MV_ETH_PP2_CLS2
+	if (mvPp2ClsC2Init())
+		return MV_ERROR;
+#endif /* CONFIG_MV_ETH_PP2_CLS2 */
+
+#ifdef CONFIG_MV_ETH_PP2_CLS3
+	if (mvPp2ClsC3Init())
+		return MV_ERROR;
+#endif /* CONFIG_MV_ETH_PP2_CLS3 */
+
+#ifdef CONFIG_MV_ETH_PP2_CLS4
+	mvPp2ClsC4HwClearAll();
+#endif /* CONFIG_MV_ETH_PP2_CLS2 */
+
+#ifdef CONFIG_MV_ETH_PP2_CLS_MC
+	mvPp2McHwClearAll();
+#endif /* CONFIG_MV_ETH_PP2_CLS_MC */
+
+	return MV_OK;
+}
+
+
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.h
new file mode 100644
index 000000000000..16884bf4a3fc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Classifier.h
@@ -0,0 +1,103 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PNC_HW_H__
+#define __MV_PNC_HW_H__
+
+#include "mvPp2ClsHw.h"
+#include "mvPp2ClsActHw.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+
+#ifdef CONFIG_MV_ETH_PP2_CLS2
+#include "mvPp2Cls2Hw.h"
+#endif
+#ifdef CONFIG_MV_ETH_PP2_CLS3
+#include "mvPp2Cls3Hw.h"
+#endif
+#ifdef CONFIG_MV_ETH_PP2_CLS4
+#include "mvPp2Cls4Hw.h"
+#endif
+#ifdef CONFIG_MV_ETH_PP2_CLS_MC
+#include "mvPp2ClsMcHw.h"
+#endif
+
+/* call to defult init of cls, C2, C3, C4, MC, Clear all HW structure , clean all shadow arrays */
+int mvPp2ClassifierDefInit(void);
+
+/*
+Assign Rx queue to a protocol
+int mvPp2ClassifierProtoRxq(unsigned int proto, unsigned int rxq); rxq to arp
+Assign Rx queue to a vlan priority
+int mvPp2ClassifierVlanPrioRxq(int port, int prio, int rxq);
+int mvPp2Classifier2tupleIp4Rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, int rxq);
+int mvPp2Classifier5tupleIp4Rxq(unsigned int eth_port, unsigned int sip, unsigned int dip, unsigned int ports,
+				unsigned int proto, int rxq);
+int  mvPp2ClassifierIp4DscpRxq(int port, unsigned char dscp, unsigned char mask, int rxq);
+change def rxq per port
+int  mvPp2ClassifierPortRxq(int port, int rxq);
+*/
+
+#endif /*__MV_PNC_HW_H__ */
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.c
new file mode 100644
index 000000000000..019146011e67
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.c
@@ -0,0 +1,1213 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2Cls2Hw.h"
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine QoS table Public APIs			 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosHwRead(int tbl_id, int tbl_sel, int tbl_line, MV_PP2_CLS_C2_QOS_ENTRY *qos)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(qos);
+
+	POS_RANGE_VALIDATE(tbl_sel, 1); /* one bit */
+	if (tbl_sel == 1) {
+		/*dscp*/
+		/* TODO define 8=DSCP_TBL_NUM  64=DSCP_TBL_LINES */
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_DSCP);
+		POS_RANGE_VALIDATE(tbl_line, QOS_TBL_LINE_NUM_DSCP);
+	} else {
+		/*pri*/
+		/* TODO define 64=PRI_TBL_NUM  8=PRI_TBL_LINES */
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_PRI);
+		POS_RANGE_VALIDATE(tbl_line, QOS_TBL_LINE_NUM_PRI);
+	}
+
+	qos->tbl_id = tbl_id;
+	qos->tbl_sel = tbl_sel;
+	qos->tbl_line = tbl_line;
+
+	/* write index reg */
+	regVal |= (tbl_line << MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+	regVal |= (tbl_sel << MV_PP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+	regVal |= (tbl_id << MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+
+	mvPp2WrReg(MV_PP2_CLS2_DSCP_PRI_INDEX_REG, regVal);
+
+	/* read data reg*/
+	qos->data = mvPp2RdReg(MV_PP2_CLS2_QOS_TBL_REG);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosHwWrite(int tbl_id, int tbl_sel, int tbl_line, MV_PP2_CLS_C2_QOS_ENTRY *qos)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(qos);
+
+	POS_RANGE_VALIDATE(tbl_sel, 1); /* one bit */
+	if (tbl_sel == 1) {
+		/*dscp*/
+		/* TODO define 8=DSCP_TBL_NUM  64=DSCP_TBL_LINES */
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_DSCP);
+		POS_RANGE_VALIDATE(tbl_line, QOS_TBL_LINE_NUM_DSCP);
+	} else {
+		/*pri*/
+		/* TODO define 64=PRI_TBL_NUM  8=PRI_TBL_LINES */
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_PRI);
+		POS_RANGE_VALIDATE(tbl_line, QOS_TBL_LINE_NUM_PRI);
+	}
+	/* write index reg */
+	regVal |= (tbl_line << MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_OFF);
+	regVal |= (tbl_sel << MV_PP2_CLS2_DSCP_PRI_INDEX_SEL_OFF);
+	regVal |= (tbl_id << MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF);
+
+	mvPp2WrReg(MV_PP2_CLS2_DSCP_PRI_INDEX_REG, regVal);
+
+	/* write data reg*/
+	mvPp2WrReg(MV_PP2_CLS2_QOS_TBL_REG, qos->data);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosSwDump(MV_PP2_CLS_C2_QOS_ENTRY *qos)
+{
+	int int32bit;
+	int status = 0;
+
+	PTR_VALIDATE(qos);
+
+	mvOsPrintf("TABLE	SEL	LINE	PRI	DSCP	COLOR	GEM_ID	QUEUE\n");
+
+	/* table id */
+	mvOsPrintf("0x%2.2x\t", qos->tbl_id);
+
+	/* table sel */
+	mvOsPrintf("0x%1.1x\t", qos->tbl_sel);
+
+	/* table line */
+	mvOsPrintf("0x%2.2x\t", qos->tbl_line);
+
+	/* priority */
+	status |= mvPp2ClsC2QosPrioGet(qos, &int32bit);
+	mvOsPrintf("0x%1.1x\t", int32bit);
+
+	/* dscp */
+	status |= mvPp2ClsC2QosDscpGet(qos, &int32bit);
+	mvOsPrintf("0x%2.2x\t", int32bit);
+
+	/* color */
+	status |= mvPp2ClsC2QosColorGet(qos, &int32bit);
+	mvOsPrintf("0x%1.1x\t", int32bit);
+
+	/* gem port id */
+	status |= mvPp2ClsC2QosGpidGet(qos, &int32bit);
+	mvOsPrintf("0x%3.3x\t", int32bit);
+
+	/* queue */
+	status |= mvPp2ClsC2QosQueueGet(qos, &int32bit);
+	mvOsPrintf("0x%2.2x", int32bit);
+
+	mvOsPrintf("\n");
+
+	return status;
+}
+/*-------------------------------------------------------------------------------*/
+void 	mvPp2ClsC2QosSwClear(MV_PP2_CLS_C2_QOS_ENTRY *qos)
+{
+
+	memset(qos, 0, sizeof(MV_PP2_CLS_C2_QOS_ENTRY));
+}
+/*-------------------------------------------------------------------------------*/
+void 	mvPp2ClsC2QosHwClearAll()
+{
+	int tbl_id, tbl_line;
+
+	MV_PP2_CLS_C2_QOS_ENTRY c2;
+
+	mvPp2ClsC2QosSwClear(&c2);
+
+	/* clear DSCP tables */
+	for (tbl_id = 0; tbl_id < MV_PP2_CLS_C2_QOS_DSCP_TBL_NUM; tbl_id++)
+		for (tbl_line = 0; tbl_line < MV_PP2_CLS_C2_QOS_DSCP_TBL_SIZE; tbl_line++)
+			mvPp2ClsC2QosHwWrite(tbl_id, 1/*DSCP*/, tbl_line, &c2);
+
+	/* clear PRIO tables */
+	for (tbl_id = 0; tbl_id < MV_PP2_CLS_C2_QOS_PRIO_TBL_NUM; tbl_id++)
+		for (tbl_line = 0; tbl_line < MV_PP2_CLS_C2_QOS_PRIO_TBL_SIZE; tbl_line++)
+			mvPp2ClsC2QosHwWrite(tbl_id, 0/*PRIO*/, tbl_line, &c2);
+
+
+
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosDscpHwDump(void)
+{
+	int tbl_id, tbl_line, int32bit;
+	MV_PP2_CLS_C2_QOS_ENTRY qos;
+
+	for (tbl_id = 0; tbl_id < MV_PP2_CLS_C2_QOS_DSCP_TBL_NUM; tbl_id++) {
+
+		mvOsPrintf("\n------------ DSCP TABLE %d ------------\n", tbl_id);
+		mvOsPrintf("LINE	DSCP	COLOR	GEM_ID	QUEUE\n");
+		for (tbl_line = 0; tbl_line < MV_PP2_CLS_C2_QOS_DSCP_TBL_SIZE; tbl_line++) {
+			mvPp2ClsC2QosHwRead(tbl_id, 1/*DSCP*/, tbl_line, &qos);
+			mvOsPrintf("0x%2.2x\t", qos.tbl_line);
+			mvPp2ClsC2QosDscpGet(&qos, &int32bit);
+			mvOsPrintf("0x%2.2x\t", int32bit);
+			mvPp2ClsC2QosColorGet(&qos, &int32bit);
+			mvOsPrintf("0x%1.1x\t", int32bit);
+			mvPp2ClsC2QosGpidGet(&qos, &int32bit);
+			mvOsPrintf("0x%3.3x\t", int32bit);
+			mvPp2ClsC2QosQueueGet(&qos, &int32bit);
+			mvOsPrintf("0x%2.2x", int32bit);
+			mvOsPrintf("\n");
+		}
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosPrioHwDump(void)
+{
+	int tbl_id, tbl_line, int32bit;
+
+	MV_PP2_CLS_C2_QOS_ENTRY qos;
+
+	for (tbl_id = 0; tbl_id < MV_PP2_CLS_C2_QOS_PRIO_TBL_NUM; tbl_id++) {
+
+		mvOsPrintf("\n-------- PRIORITY TABLE %d -----------\n", tbl_id);
+		mvOsPrintf("LINE	PRIO	COLOR	GEM_ID	QUEUE\n");
+
+		for (tbl_line = 0; tbl_line < MV_PP2_CLS_C2_QOS_PRIO_TBL_SIZE; tbl_line++) {
+			mvPp2ClsC2QosHwRead(tbl_id, 0/*PRIO*/, tbl_line, &qos);
+			mvOsPrintf("0x%2.2x\t", qos.tbl_line);
+			mvPp2ClsC2QosPrioGet(&qos, &int32bit);
+			mvOsPrintf("0x%1.1x\t", int32bit);
+			mvPp2ClsC2QosColorGet(&qos, &int32bit);
+			mvOsPrintf("0x%1.1x\t", int32bit);
+			mvPp2ClsC2QosGpidGet(&qos, &int32bit);
+			mvOsPrintf("0x%3.3x\t", int32bit);
+			mvPp2ClsC2QosQueueGet(&qos, &int32bit);
+			mvOsPrintf("0x%2.2x", int32bit);
+			mvOsPrintf("\n");
+		}
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosPrioSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int prio)
+
+{
+	PTR_VALIDATE(qos);
+	POS_RANGE_VALIDATE(prio, (QOS_TBL_LINE_NUM_PRI-1));
+
+	qos->data &= ~QOS_TBL_PRI_MASK;
+	qos->data |= (prio << QOS_TBL_PRI);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosDscpSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int dscp)
+{
+	PTR_VALIDATE(qos);
+	POS_RANGE_VALIDATE(dscp, (QOS_TBL_LINE_NUM_DSCP-1));
+
+	qos->data &= ~QOS_TBL_DSCP_MASK;
+	qos->data |= (dscp << QOS_TBL_DSCP);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosColorSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int color)
+{
+	PTR_VALIDATE(qos);
+	POS_RANGE_VALIDATE(color, COLOR_RED_AND_LOCK);
+
+	qos->data &= ~QOS_TBL_COLOR_MASK;
+	qos->data |= (color << QOS_TBL_COLOR);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosGpidSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int gpid)
+{
+	PTR_VALIDATE(qos);
+	POS_RANGE_VALIDATE(gpid, ACT_QOS_ATTR_GEM_ID_MAX);
+
+	qos->data &= ~QOS_TBL_GEM_ID_MASK;
+	qos->data |= (gpid << QOS_TBL_GEM_ID);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosQueueSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int queue)
+{
+	PTR_VALIDATE(qos);
+	POS_RANGE_VALIDATE(queue, QOS_TBL_Q_NUM_MAX);
+
+	qos->data &= ~QOS_TBL_Q_NUM_MASK;
+	qos->data |= (queue << QOS_TBL_Q_NUM);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosPrioGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *prio)
+{
+	PTR_VALIDATE(qos);
+	PTR_VALIDATE(prio);
+
+	*prio = (qos->data & QOS_TBL_PRI_MASK) >> QOS_TBL_PRI ;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosDscpGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *dscp)
+{
+	PTR_VALIDATE(qos);
+	PTR_VALIDATE(dscp);
+
+	*dscp = (qos->data & QOS_TBL_DSCP_MASK) >> QOS_TBL_DSCP;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosColorGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *color)
+{
+	PTR_VALIDATE(qos);
+	PTR_VALIDATE(color);
+
+	*color = (qos->data & QOS_TBL_COLOR_MASK) >> QOS_TBL_COLOR;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosGpidGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *gpid)
+{
+	PTR_VALIDATE(qos);
+	PTR_VALIDATE(gpid);
+
+	*gpid = (qos->data & QOS_TBL_GEM_ID_MASK) >> QOS_TBL_GEM_ID;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosQueueGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *queue)
+{
+	PTR_VALIDATE(qos);
+	PTR_VALIDATE(queue);
+
+	*queue = (qos->data & QOS_TBL_Q_NUM_MASK) >> QOS_TBL_Q_NUM;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine TCAM table Public APIs	    		 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2HwWrite(int index, MV_PP2_CLS_C2_ENTRY *c2)
+{
+	int TcmIdx;
+
+	PTR_VALIDATE(c2);
+
+	POS_RANGE_VALIDATE(index, (MV_PP2_CLS_C2_TCAM_SIZE-1));
+
+	c2->index = index;
+
+	/* write index reg */
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_IDX_REG, index);
+
+	/* write valid bit*/
+	c2->inv = 0;
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_INV_REG, ((c2->inv) << MV_PP2_CLS2_TCAM_INV_INVALID));
+
+	for (TcmIdx = 0; TcmIdx < MV_PP2_CLS_C2_TCAM_WORDS; TcmIdx++)
+		mvPp2WrReg(MV_PP2_CLS2_TCAM_DATA_REG(TcmIdx), c2->tcam.words[TcmIdx]);
+
+	/* write action_tbl 0x1B30 */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_DATA_REG, c2->sram.regs.action_tbl);
+
+	/* write actions 0x1B60 */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_REG, c2->sram.regs.actions);
+
+	/* write qos_attr 0x1B64 */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_QOS_ATTR_REG, c2->sram.regs.qos_attr);
+
+	/* write hwf_attr 0x1B68 */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_HWF_ATTR_REG, c2->sram.regs.hwf_attr);
+
+	/* write dup_attr 0x1B6C */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_DUP_ATTR_REG, c2->sram.regs.dup_attr);
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* write seq_attr 0x1B70 */
+	mvPp2WrReg(MV_PP2_CLS2_ACT_SEQ_ATTR_REG, c2->sram.regs.seq_attr);
+#endif
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+/*
+ note: error is not returned if entry is invalid
+ user should check c2->valid afer returned from this func
+*/
+int mvPp2ClsC2HwRead(int index, MV_PP2_CLS_C2_ENTRY *c2)
+{
+	unsigned int regVal;
+	int	TcmIdx;
+
+	PTR_VALIDATE(c2);
+
+	c2->index = index;
+
+	/* write index reg */
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_IDX_REG, index);
+
+	/* read inValid bit*/
+	regVal = mvPp2RdReg(MV_PP2_CLS2_TCAM_INV_REG);
+	c2->inv = (regVal & MV_PP2_CLS2_TCAM_INV_INVALID_MASK) >> MV_PP2_CLS2_TCAM_INV_INVALID;
+
+	if (c2->inv)
+		return MV_OK;
+
+	for (TcmIdx = 0; TcmIdx < MV_PP2_CLS_C2_TCAM_WORDS; TcmIdx++)
+		c2->tcam.words[TcmIdx] = mvPp2RdReg(MV_PP2_CLS2_TCAM_DATA_REG(TcmIdx));
+
+	/* read action_tbl 0x1B30 */
+	c2->sram.regs.action_tbl = mvPp2RdReg(MV_PP2_CLS2_ACT_DATA_REG);
+
+	/* read actions 0x1B60 */
+	c2->sram.regs.actions = mvPp2RdReg(MV_PP2_CLS2_ACT_REG);
+
+	/* read qos_attr 0x1B64 */
+	c2->sram.regs.qos_attr = mvPp2RdReg(MV_PP2_CLS2_ACT_QOS_ATTR_REG);
+
+	/* read hwf_attr 0x1B68 */
+	c2->sram.regs.hwf_attr = mvPp2RdReg(MV_PP2_CLS2_ACT_HWF_ATTR_REG);
+
+	/* read dup_attr 0x1B6C */
+	c2->sram.regs.dup_attr = mvPp2RdReg(MV_PP2_CLS2_ACT_DUP_ATTR_REG);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* read seq_attr 0x1B70 */
+	c2->sram.regs.seq_attr = mvPp2RdReg(MV_PP2_CLS2_ACT_SEQ_ATTR_REG);
+#endif
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2SwWordsDump(MV_PP2_CLS_C2_ENTRY *c2)
+{
+	int i;
+
+	PTR_VALIDATE(c2);
+
+	/* TODO check size */
+	/* hw entry id */
+	mvOsPrintf("[0x%3.3x] ", c2->index);
+
+	i = MV_PP2_CLS_C2_TCAM_WORDS - 1 ;
+
+	while (i >= 0)
+		mvOsPrintf("%4.4x ", (c2->tcam.words[i--]) & 0xFFFF);
+
+	mvOsPrintf("| ");
+
+	mvOsPrintf(C2_SRAM_FMT, C2_SRAM_VAL(c2->sram.words));
+
+	/*tcam inValid bit*/
+	mvOsPrintf(" %s", (c2->inv == 1) ? "[inv]" : "[valid]");
+
+	mvOsPrintf("\n        ");
+
+	i = MV_PP2_CLS_C2_TCAM_WORDS - 1;
+
+	while (i >= 0)
+		mvOsPrintf("%4.4x ", ((c2->tcam.words[i--] >> 16)  & 0xFFFF));
+
+	mvOsPrintf("\n");
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2SwDump(MV_PP2_CLS_C2_ENTRY *c2)
+{
+	int id, sel, type, gemid, low_q, high_q, color, int32bit;
+
+	PTR_VALIDATE(c2);
+
+	mvPp2ClsC2SwWordsDump(c2);
+	mvOsPrintf("\n");
+
+	/*------------------------------*/
+	/*	action_tbl 0x1B30	*/
+	/*------------------------------*/
+
+	id =  ((c2->sram.regs.action_tbl & (ACT_TBL_ID_MASK)) >> ACT_TBL_ID);
+	sel =  ((c2->sram.regs.action_tbl & (ACT_TBL_SEL_MASK)) >> ACT_TBL_SEL);
+	type =	((c2->sram.regs.action_tbl & (ACT_TBL_PRI_DSCP_MASK)) >> ACT_TBL_PRI_DSCP);
+	gemid = ((c2->sram.regs.action_tbl & (ACT_TBL_GEM_ID_MASK)) >> ACT_TBL_GEM_ID);
+	low_q = ((c2->sram.regs.action_tbl & (ACT_TBL_LOW_Q_MASK)) >> ACT_TBL_LOW_Q);
+	high_q = ((c2->sram.regs.action_tbl & (ACT_TBL_HIGH_Q_MASK)) >> ACT_TBL_HIGH_Q);
+	color =  ((c2->sram.regs.action_tbl & (ACT_TBL_COLOR_MASK)) >> ACT_TBL_COLOR);
+
+	mvOsPrintf("FROM_QOS_%s_TBL[%2.2d]:  ", sel ? "DSCP" : "PRI", id);
+	type ? mvOsPrintf("%s	", sel ? "DSCP" : "PRIO") : 0;
+	color ? mvOsPrintf("COLOR	") : 0;
+	gemid ? mvOsPrintf("GEMID	") : 0;
+	low_q ? mvOsPrintf("LOW_Q	") : 0;
+	high_q ? mvOsPrintf("HIGH_Q	") : 0;
+	mvOsPrintf("\n");
+
+	mvOsPrintf("FROM_ACT_TBL:		");
+	(type == 0) ? mvOsPrintf("%s 	", sel ? "DSCP" : "PRI") : 0;
+	(gemid == 0) ? mvOsPrintf("GEMID	") : 0;
+	(low_q == 0) ? mvOsPrintf("LOW_Q	") : 0;
+	(high_q == 0) ? mvOsPrintf("HIGH_Q	") : 0;
+	(color == 0) ? mvOsPrintf("COLOR	") : 0;
+	mvOsPrintf("\n\n");
+
+	/*------------------------------*/
+	/*	actions 0x1B60		*/
+	/*------------------------------*/
+
+	mvOsPrintf("ACT_CMD:		COLOR	PRIO	DSCP	GEMID	LOW_Q	HIGH_Q	FWD	POLICER	FID\n");
+	mvOsPrintf("			");
+
+	mvOsPrintf("%1.1d\t%1.1d\t%1.1d\t%1.1d\t%1.1d\t%1.1d\t%1.1d\t%1.1d\t%1.1d\t",
+			((c2->sram.regs.actions & ACT_COLOR_MASK) >> ACT_COLOR),
+			((c2->sram.regs.actions & ACT_PRI_MASK) >> ACT_PRI),
+			((c2->sram.regs.actions & ACT_DSCP_MASK) >> ACT_DSCP),
+			((c2->sram.regs.actions & ACT_GEM_ID_MASK) >> ACT_GEM_ID),
+			((c2->sram.regs.actions & ACT_LOW_Q_MASK) >> ACT_LOW_Q),
+			((c2->sram.regs.actions & ACT_HIGH_Q_MASK) >> ACT_HIGH_Q),
+			((c2->sram.regs.actions & ACT_FWD_MASK) >> ACT_FWD),
+			((c2->sram.regs.actions & ACT_POLICER_SELECT_MASK) >> ACT_POLICER_SELECT),
+			((c2->sram.regs.actions & ACT_FLOW_ID_EN_MASK) >> ACT_FLOW_ID_EN));
+	mvOsPrintf("\n\n");
+
+
+	/*------------------------------*/
+	/*	qos_attr 0x1B64		*/
+	/*------------------------------*/
+	mvOsPrintf("ACT_ATTR:		PRIO	DSCP	GEMID	LOW_Q	HIGH_Q	QUEUE\n");
+	mvOsPrintf("		");
+	/* modify priority */
+	int32bit =  ((c2->sram.regs.qos_attr & ACT_QOS_ATTR_MDF_PRI_MASK) >> ACT_QOS_ATTR_MDF_PRI);
+	mvOsPrintf("	%1.1d\t", int32bit);
+
+	/* modify dscp */
+	int32bit =  ((c2->sram.regs.qos_attr & ACT_QOS_ATTR_MDF_DSCP_MASK) >> ACT_QOS_ATTR_MDF_DSCP);
+	mvOsPrintf("0x%2.2d\t", int32bit);
+
+	/* modify gemportid */
+	int32bit =  ((c2->sram.regs.qos_attr & ACT_QOS_ATTR_MDF_GEM_ID_MASK) >> ACT_QOS_ATTR_MDF_GEM_ID);
+	mvOsPrintf("0x%4.4x\t", int32bit);
+
+	/* modify low Q */
+	int32bit =  ((c2->sram.regs.qos_attr & ACT_QOS_ATTR_MDF_LOW_Q_MASK) >> ACT_QOS_ATTR_MDF_LOW_Q);
+	mvOsPrintf("0x%1.1d\t", int32bit);
+
+	/* modify high Q */
+	int32bit =  ((c2->sram.regs.qos_attr & ACT_QOS_ATTR_MDF_HIGH_Q_MASK) >> ACT_QOS_ATTR_MDF_HIGH_Q);
+	mvOsPrintf("0x%2.2x\t", int32bit);
+
+	/*modify queue*/
+	int32bit = ((c2->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_LOW_Q_MASK | ACT_QOS_ATTR_MDF_HIGH_Q_MASK)));
+	int32bit >>= ACT_QOS_ATTR_MDF_LOW_Q;
+
+	mvOsPrintf("0x%2.2x\t", int32bit);
+	mvOsPrintf("\n\n");
+
+
+
+	/*------------------------------*/
+	/*	hwf_attr 0x1B68		*/
+	/*------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvOsPrintf("HWF_ATTR:		IPTR	DPTR	CHKSM   MTU_IDX\n");
+#else
+	mvOsPrintf("HWF_ATTR:		IPTR	DPTR	CHKSM\n");
+#endif
+	mvOsPrintf("			");
+
+	/* HWF modification instraction pointer */
+	int32bit =  ((c2->sram.regs.hwf_attr & ACT_HWF_ATTR_IPTR_MASK) >> ACT_HWF_ATTR_IPTR);
+	mvOsPrintf("0x%1.1x\t", int32bit);
+
+	/* HWF modification data pointer */
+	int32bit =  ((c2->sram.regs.hwf_attr & ACT_HWF_ATTR_DPTR_MASK) >> ACT_HWF_ATTR_DPTR);
+	mvOsPrintf("0x%4.4x\t", int32bit);
+
+	/* HWF modification instraction pointer */
+	int32bit =  ((c2->sram.regs.hwf_attr & ACT_HWF_ATTR_CHKSM_EN_MASK) >> ACT_HWF_ATTR_CHKSM_EN);
+	mvOsPrintf("%s\t", int32bit ? "ENABLE " : "DISABLE");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* mtu index */
+	int32bit =  ((c2->sram.regs.hwf_attr & ACT_HWF_ATTR_MTU_INX_MASK) >> ACT_HWF_ATTR_MTU_INX);
+	mvOsPrintf("0x%1.1x\t", int32bit);
+#endif
+	mvOsPrintf("\n\n");
+
+	/*------------------------------*/
+	/*	dup_attr 0x1B6C		*/
+	/*------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvOsPrintf("DUP_ATTR:		FID	COUNT	POLICER [id    bank]\n");
+	mvOsPrintf("			0x%2.2x\t0x%1.1x\t\t[0x%2.2x   0x%1.1x]\n",
+		((c2->sram.regs.dup_attr & ACT_DUP_FID_MASK) >> ACT_DUP_FID),
+		((c2->sram.regs.dup_attr & ACT_DUP_COUNT_MASK) >> ACT_DUP_COUNT),
+		((c2->sram.regs.dup_attr & ACT_DUP_POLICER_MASK) >> ACT_DUP_POLICER_ID),
+		((c2->sram.regs.dup_attr & ACT_DUP_POLICER_BANK_MASK) >> ACT_DUP_POLICER_BANK_BIT));
+	mvOsPrintf("\n");
+	/*------------------------------*/
+	/*	seq_attr 0x1B70		*/
+	/*------------------------------*/
+	/*PPv2.1 new feature MAS 3.14*/
+	mvOsPrintf("SEQ_ATTR:		ID	MISS\n");
+	mvOsPrintf("			0x%2.2x    0x%2.2x\n",
+			((c2->sram.regs.seq_attr & ACT_SEQ_ATTR_ID_MASK) >> ACT_SEQ_ATTR_ID),
+			((c2->sram.regs.seq_attr & ACT_SEQ_ATTR_MISS_MASK) >> ACT_SEQ_ATTR_MISS));
+
+	mvOsPrintf("\n\n");
+
+#else
+	mvOsPrintf("DUP_ATTR:		FID	COUNT	POLICER\n");
+	mvOsPrintf("	0x%2.2x\t0x%1.1x\t0x%2.2x",
+		((c2->sram.regs.dup_attr & ACT_DUP_FID_MASK) >> ACT_DUP_FID),
+		((c2->sram.regs.dup_attr & ACT_DUP_COUNT_MASK) >> ACT_DUP_COUNT),
+		((c2->sram.regs.dup_attr & ACT_DUP_POLICER_MASK) >> ACT_DUP_POLICER_ID));
+
+	mvOsPrintf("\n\n");
+#endif
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+void 	mvPp2ClsC2SwClear(MV_PP2_CLS_C2_ENTRY *c2)
+{
+
+	memset(c2, 0, sizeof(MV_PP2_CLS_C2_ENTRY));
+}
+/*-------------------------------------------------------------------------------*/
+void 	mvPp2ClsC2HwClearAll()
+{
+	int index;
+
+	MV_PP2_CLS_C2_ENTRY c2;
+
+	mvPp2ClsC2SwClear(&c2);
+
+	for (index = 0; index < MV_PP2_CLS_C2_TCAM_SIZE; index++) {
+		mvPp2ClsC2HwWrite(index, &c2);
+		mvPp2ClsC2HwInv(index);
+	}
+}
+/*-------------------------------------------------------------------------------*/
+int 	mvPp2ClsC2HwDump()
+{
+	int index;
+	unsigned cnt;
+
+	MV_PP2_CLS_C2_ENTRY c2;
+
+	mvPp2ClsC2SwClear(&c2);
+
+	for (index = 0; index < MV_PP2_CLS_C2_TCAM_SIZE; index++) {
+		mvPp2ClsC2HwRead(index, &c2);
+		if (c2.inv == 0) {
+			mvPp2ClsC2SwDump(&c2);
+			mvPp2ClsC2HitCntrRead(index, &cnt);
+			mvOsPrintf("HITS: %d\n", cnt);
+			mvOsPrintf("-----------------------------------------------------------------\n");
+		}
+	}
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2SwTcam(int enable)
+{
+	POS_RANGE_VALIDATE(enable, 1);
+
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_CTRL_REG, enable);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2TcamByteSet(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char byte, unsigned char enable)
+{
+	PTR_VALIDATE(c2);
+
+	POS_RANGE_VALIDATE(offs, MV_PP2_CLS_C2_TCAM_DATA_BYTES);
+
+	c2->tcam.bytes[TCAM_DATA_BYTE(offs)] = byte;
+	c2->tcam.bytes[TCAM_DATA_MASK(offs)] = enable;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2TcamByteGet(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char *byte, unsigned char *enable)
+{
+	PTR_VALIDATE(c2);
+	PTR_VALIDATE(byte);
+	PTR_VALIDATE(enable);
+
+	POS_RANGE_VALIDATE(offs, 8);
+
+	*byte = c2->tcam.bytes[TCAM_DATA_BYTE(offs)];
+	*enable = c2->tcam.bytes[TCAM_DATA_MASK(offs)];
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*
+return EQUALS if tcam_data[off]&tcam_mask[off] = byte
+*/
+int mvPp2ClsC2TcamByteCmp(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char byte)
+	{
+	unsigned char tcamByte, tcamMask;
+
+	PTR_VALIDATE(c2);
+
+	if (mvPp2ClsC2TcamByteGet(c2, offs, &tcamByte, &tcamMask) != MV_OK)
+		return MV_CLS2_ERR;
+
+	if ((tcamByte & tcamMask) == (byte & tcamMask))
+		return EQUALS;
+
+	return NOT_EQUALS;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2TcamBytesCmp(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offset, unsigned int size, unsigned char *bytes)
+{
+	int status, index;
+
+	PTR_VALIDATE(c2);
+
+	if ((sizeof(bytes) < size) || ((offset + size) > (MV_PP2_CLS_C2_TCAM_WORDS * 4))) {
+		mvOsPrintf("mvCls2Hw %s: value is out of range.\n", __func__);
+		return MV_CLS2_ERR;
+	}
+
+	for (index = 0; index < size; index++) {
+		status = mvPp2ClsC2TcamByteCmp(c2, offset, bytes[index]);
+		if (status != EQUALS)
+			return status;
+	}
+	return EQUALS;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QosTblSet(MV_PP2_CLS_C2_ENTRY *c2, int tbl_id, int tbl_sel)
+{
+
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(tbl_sel, 1);
+
+	if (tbl_sel == 1) {
+		/*dscp*/
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_DSCP);
+	} else {
+		/*pri*/
+		POS_RANGE_VALIDATE(tbl_id, QOS_TBL_NUM_PRI);
+	}
+	c2->sram.regs.action_tbl = (tbl_id << ACT_TBL_ID) | (tbl_sel << ACT_TBL_SEL);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2ColorSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int from)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, COLOR_RED_AND_LOCK);
+
+	c2->sram.regs.actions &= ~ACT_COLOR_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_COLOR);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_COLOR);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_COLOR);
+
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2PrioSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int prio, int from)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(prio, (QOS_TBL_LINE_NUM_PRI-1));
+
+	/*set command*/
+	c2->sram.regs.actions &= ~ACT_PRI_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_PRI);
+
+	/*set modify priority value*/
+	c2->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_PRI_MASK;
+	c2->sram.regs.qos_attr |= (prio << ACT_QOS_ATTR_MDF_PRI);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_PRI_DSCP);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_PRI_DSCP);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2DscpSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int dscp, int from)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(dscp, (QOS_TBL_LINE_NUM_DSCP-1));
+
+	/*set command*/
+	c2->sram.regs.actions &= ~ACT_DSCP_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_DSCP);
+
+	/*set modify DSCP value*/
+	c2->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_DSCP_MASK;
+	c2->sram.regs.qos_attr |= (dscp << ACT_QOS_ATTR_MDF_DSCP);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_PRI_DSCP);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_PRI_DSCP);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2GpidSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int gpid, int from)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(gpid, ACT_QOS_ATTR_GEM_ID_MAX);
+
+	/*set command*/
+	c2->sram.regs.actions &= ~ACT_GEM_ID_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_GEM_ID);
+
+	/*set modify DSCP value*/
+	c2->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_GEM_ID_MASK;
+	c2->sram.regs.qos_attr |= (gpid << ACT_QOS_ATTR_MDF_GEM_ID);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_GEM_ID);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_GEM_ID);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2QueueHighSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from)
+{
+	PTR_VALIDATE(c2);
+
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_HIGH_Q_MAX);
+
+	/*set command*/
+	c2->sram.regs.actions &= ~ACT_HIGH_Q_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_HIGH_Q);
+
+	/*set modify High queue value*/
+	c2->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_HIGH_Q_MASK;
+	c2->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_HIGH_Q);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_HIGH_Q);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_HIGH_Q);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2QueueLowSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from)
+{
+	PTR_VALIDATE(c2);
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_LOW_Q_MAX);
+
+	/*set command*/
+	c2->sram.regs.actions &= ~ACT_LOW_Q_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_LOW_Q);
+
+	/*set modify High queue value*/
+	c2->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_LOW_Q_MASK;
+	c2->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_LOW_Q);
+
+	if (from == 1)
+		c2->sram.regs.action_tbl |= (1 << ACT_TBL_LOW_Q);
+	else
+		c2->sram.regs.action_tbl &= ~(1 << ACT_TBL_LOW_Q);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2QueueSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from)
+{
+	int status = MV_OK;
+	int qHigh, qLow;
+
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_Q_MAX);
+
+	/* cmd validation in set functions */
+
+	qHigh = (queue & ACT_QOS_ATTR_MDF_HIGH_Q_MASK) >> ACT_QOS_ATTR_MDF_HIGH_Q;
+	qLow = (queue & ACT_QOS_ATTR_MDF_LOW_Q_MASK) >> ACT_QOS_ATTR_MDF_LOW_Q;
+
+	status |= mvPp2ClsC2QueueLowSet(c2, cmd, qLow, from);
+	status |= mvPp2ClsC2QueueHighSet(c2, cmd, qHigh, from);
+
+	return status;
+
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2ForwardSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, HWF_AND_LOW_LATENCY_AND_LOCK);
+
+	c2->sram.regs.actions &= ~ACT_FWD_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_FWD);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsC2PolicerSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int policerId, int bank)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+	BIT_RANGE_VALIDATE(bank);
+
+	c2->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	c2->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	c2->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+
+	if (bank)
+		c2->sram.regs.dup_attr |= ACT_DUP_POLICER_BANK_MASK;
+	else
+		c2->sram.regs.dup_attr &= ~ACT_DUP_POLICER_BANK_MASK;
+
+	return MV_OK;
+
+}
+
+#else
+int mvPp2ClsC2PolicerSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int policerId)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+
+	c2->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	c2->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	c2->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	c2->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+	return MV_OK;
+}
+#endif /*CONFIG_MV_ETH_PP2_1*/
+ /*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2FlowIdEn(MV_PP2_CLS_C2_ENTRY *c2, int flowid_en)
+{
+	PTR_VALIDATE(c2);
+
+	/*set Flow ID enable or disable*/
+	if (flowid_en)
+		c2->sram.regs.actions |= (1 << ACT_FLOW_ID_EN);
+	else
+		c2->sram.regs.actions &= ~(1 << ACT_FLOW_ID_EN);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2ModSet(MV_PP2_CLS_C2_ENTRY *c2, int data_ptr, int instr_offs, int l4_csum)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(data_ptr, ACT_HWF_ATTR_DPTR_MAX);
+	POS_RANGE_VALIDATE(instr_offs, ACT_HWF_ATTR_IPTR_MAX);
+	POS_RANGE_VALIDATE(l4_csum, 1);
+
+	c2->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_DPTR_MASK;
+	c2->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_IPTR_MASK;
+	c2->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_CHKSM_EN_MASK;
+
+	c2->sram.regs.hwf_attr |= (data_ptr << ACT_HWF_ATTR_DPTR);
+	c2->sram.regs.hwf_attr |= (instr_offs << ACT_HWF_ATTR_IPTR);
+	c2->sram.regs.hwf_attr |= (l4_csum << ACT_HWF_ATTR_CHKSM_EN);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+/*  PPv2.1 (feature MAS 3.7) new feature - set mtu index */
+
+int mvPp2ClsC2MtuSet(MV_PP2_CLS_C2_ENTRY *c2, int mtu_inx)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(mtu_inx, ACT_HWF_ATTR_MTU_INX_MAX);
+
+	c2->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_MTU_INX_MASK;
+	c2->sram.regs.hwf_attr |= (mtu_inx << ACT_HWF_ATTR_MTU_INX);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2DupSet(MV_PP2_CLS_C2_ENTRY *c2, int dupid, int count)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(count, ACT_DUP_COUNT_MAX);
+	POS_RANGE_VALIDATE(dupid, ACT_DUP_FID_MAX);
+
+	/*set flowid and count*/
+	c2->sram.regs.dup_attr &= ~(ACT_DUP_FID_MASK | ACT_DUP_COUNT_MASK);
+	c2->sram.regs.dup_attr |= (dupid << ACT_DUP_FID);
+	c2->sram.regs.dup_attr |= (count << ACT_DUP_COUNT);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.14) SEQ_ATTR new register in action table
+ */
+int mvPp2ClsC2SeqSet(MV_PP2_CLS_C2_ENTRY *c2, int miss, int id)
+{
+	PTR_VALIDATE(c2);
+	POS_RANGE_VALIDATE(miss, 1);
+	POS_RANGE_VALIDATE(id, ACT_SEQ_ATTR_ID_MAX);
+
+	c2->sram.regs.seq_attr = 0;
+	c2->sram.regs.seq_attr = ((id << ACT_SEQ_ATTR_ID) | (miss << ACT_SEQ_ATTR_MISS));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine Hit counters Public APIs		    	 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2HitCntrsClearAll(void)
+{
+	int iter = 0;
+
+	/* wrirte clear bit*/
+	mvPp2WrReg(MV_PP2_CLS2_HIT_CTR_CLR_REG, (1 << MV_PP2_CLS2_HIT_CTR_CLR_CLR));
+
+	while (mvPp2ClsC2HitCntrsIsBusy())
+		if (iter++ >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS2_RETRIES_EXCEEDED;
+		}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2HitCntrsIsBusy(void)
+{
+	unsigned int regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_CLS2_HIT_CTR_REG);
+	regVal &= MV_PP2_CLS2_HIT_CTR_CLR_DONE_MASK;
+	regVal >>= MV_PP2_CLS2_HIT_CTR_CLR_DONE;
+
+	return (1 - (int)regVal);
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2HitCntrRead(int index, MV_U32 *cntr)
+{
+	unsigned int value = 0;
+
+	/* write index reg */
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_IDX_REG, index);
+
+	value = mvPp2RdReg(MV_PP2_CLS2_HIT_CTR_REG);
+
+	if (cntr)
+		*cntr = value;
+	else
+		mvOsPrintf("INDEX: 0x%8.8X	VAL: 0x%8.8X\n", index, value);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2HitCntrsDump()
+{
+	int i;
+	unsigned int cnt;
+
+	for (i = 0; i < MV_PP2_CLS_C2_TCAM_SIZE; i++) {
+		mvPp2ClsC2HitCntrRead(i, &cnt);
+		if (cnt != 0)
+			mvOsPrintf("INDEX: 0x%8.8X	VAL: 0x%8.8X\n", i, cnt);
+	}
+
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC2RegsDump()
+{
+	int i;
+	char reg_name[100];
+
+	mvPp2PrintReg(MV_PP2_CLS2_TCAM_IDX_REG, "MV_PP2_CLS2_TCAM_IDX_REG");
+
+	for (i = 0; i < MV_PP2_CLS_C2_TCAM_WORDS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS2_TCAM_DATA_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS2_TCAM_DATA_REG(i), reg_name);
+	}
+
+	mvPp2PrintReg(MV_PP2_CLS2_TCAM_INV_REG, "MV_PP2_CLS2_TCAM_INV_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_DATA_REG, "MV_PP2_CLS2_ACT_DATA_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_DSCP_PRI_INDEX_REG, "MV_PP2_CLS2_DSCP_PRI_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_QOS_TBL_REG, "MV_PP2_CLS2_QOS_TBL_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_REG, "MV_PP2_CLS2_ACT_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_QOS_ATTR_REG, "MV_PP2_CLS2_ACT_QOS_ATTR_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_HWF_ATTR_REG, "MV_PP2_CLS2_ACT_HWF_ATTR_REG");
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_DUP_ATTR_REG, "MV_PP2_CLS2_ACT_DUP_ATTR_REG");
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2PrintReg(MV_PP2_CLS2_ACT_SEQ_ATTR_REG, "MV_PP2_CLS2_ACT_SEQ_ATTR_REG");
+#endif
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int	mvPp2ClsC2HwInv(int index)
+{	/* write index reg */
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_IDX_REG, index);
+
+	/* set invalid bit*/
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_INV_REG, (1 << MV_PP2_CLS2_TCAM_INV_INVALID));
+
+	/* trigger */
+	mvPp2WrReg(MV_PP2_CLS2_TCAM_DATA_REG(4), 0);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2ClsC2HwInvAll(void)
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_CLS_C2_TCAM_SIZE; index++) {
+		/* write index reg */
+		mvPp2WrReg(MV_PP2_CLS2_TCAM_IDX_REG, index);
+
+		/* set invalid bit*/
+		mvPp2WrReg(MV_PP2_CLS2_TCAM_INV_REG, (1 << MV_PP2_CLS2_TCAM_INV_INVALID));
+
+		/* trigger */
+		mvPp2WrReg(MV_PP2_CLS2_TCAM_DATA_REG(4), 0);
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int	mvPp2ClsC2Init(void)
+{
+	mvPp2ClsC2QosHwClearAll();
+	mvPp2ClsC2HwClearAll();
+	mvPp2ClsC2SwTcam(1);
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.h
new file mode 100644
index 000000000000..7de7f525cb6e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls2Hw.h
@@ -0,0 +1,307 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS2_HW_H__
+#define __MV_CLS2_HW_H__
+
+#include "mvPp2ClsActHw.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C2 Top Registers	    			 */
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_TCAM_IDX_REG			(MV_PP2_REG_BASE + 0x1B00)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_TCAM_DATA_REG(idx)			(MV_PP2_REG_BASE + 0x1B10 + (idx) * 4)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_TCAM_INV_REG			(MV_PP2_REG_BASE + 0x1B24)
+#define MV_PP2_CLS2_TCAM_INV_INVALID			31
+#define MV_PP2_CLS2_TCAM_INV_INVALID_MASK		(1 << MV_PP2_CLS2_TCAM_INV_INVALID)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_ACT_DATA_REG			(MV_PP2_REG_BASE + 0x1B30)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_REG			(MV_PP2_REG_BASE + 0x1B40)
+
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_OFF		0
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_BITS		6
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_MASK		((1 << MV_PP2_CLS2_DSCP_PRI_INDEX_LINE_BITS) - 1)
+
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_SEL_OFF		6
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_SEL_MASK		(1 << MV_PP2_CLS2_DSCP_PRI_INDEX_SEL_OFF)
+
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_OFF		8
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_BITS		6
+#define MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_MASK		((1 << MV_PP2_CLS2_DSCP_PRI_INDEX_TBL_ID_BITS) - 1)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_QOS_TBL_REG				(MV_PP2_REG_BASE + 0x1B44)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_HIT_CTR_REG				(MV_PP2_REG_BASE + 0x1B50)
+#define MV_PP2_CLS2_HIT_CTR_OFF				0
+
+#ifdef CONFIG_MV_ETH_PP2_1
+#define MV_PP2_CLS2_HIT_CTR_BITS			32
+#else
+#define MV_PP2_CLS2_HIT_CTR_BITS			24
+#endif
+#define MV_PP2_CLS2_HIT_CTR_MASK			((1  << MV_PP2_CLS2_HIT_CTR_BITS) - 1)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_HIT_CTR_CLR_REG			(MV_PP2_REG_BASE + 0x1B54)
+
+#define MV_PP2_CLS2_HIT_CTR_CLR_CLR			0
+#define MV_PP2_CLS2_HIT_CTR_CLR_CLR_MASK		(1 << MV_PP2_CLS2_HIT_CTR_CLR_CLR)
+
+#define MV_PP2_CLS2_HIT_CTR_CLR_DONE			1
+#define MV_PP2_CLS2_HIT_CTR_CLR_DONE_MASK		(1 << MV_PP2_CLS2_HIT_CTR_CLR_DONE)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_ACT_REG				(MV_PP2_REG_BASE + 0x1B60)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_ACT_QOS_ATTR_REG			(MV_PP2_REG_BASE + 0x1B64)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_ACT_HWF_ATTR_REG			(MV_PP2_REG_BASE + 0x1B68)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_ACT_DUP_ATTR_REG			(MV_PP2_REG_BASE + 0x1B6C)
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.14) SEQ_ATTR new register in action table
+ */
+#define MV_PP2_CLS2_ACT_SEQ_ATTR_REG			(MV_PP2_REG_BASE + 0x1B70)
+
+#define ACT_SEQ_ATTR_ID					0
+#define ACT_SEQ_ATTR_ID_BITS				8
+#define ACT_SEQ_ATTR_ID_MASK				(((1 << ACT_SEQ_ATTR_ID_BITS) - 1) << ACT_SEQ_ATTR_ID)
+#define ACT_SEQ_ATTR_ID_MAX				((1 << ACT_SEQ_ATTR_ID_BITS) - 1)
+
+#define ACT_SEQ_ATTR_MISS				8
+#define ACT_SEQ_ATTR_MISS_MASK				(1 << ACT_SEQ_ATTR_MISS)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS2_TCAM_CTRL_REG			(MV_PP2_REG_BASE + 0x1B90)
+#define MV_PP2_CLS2_TCAM_CTRL_EN			0
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 QOS Table	(DSCP/PRI Table)			 */
+/*-------------------------------------------------------------------------------*/
+#define QOS_TBL_LINE_NUM_PRI				(8)
+#define QOS_TBL_NUM_PRI					(64)
+
+#define QOS_TBL_LINE_NUM_DSCP				(64)
+#define QOS_TBL_NUM_DSCP				(8)
+
+#define QOS_TBL_PRI					0
+#define QOS_TBL_PRI_MASK				(((1 << ACT_QOS_ATTR_PRI_BITS) - 1) << QOS_TBL_PRI)
+
+
+#define QOS_TBL_DSCP					3
+#define QOS_TBL_DSCP_MASK				(((1 << ACT_QOS_ATTR_DSCP_BITS) - 1) << QOS_TBL_DSCP)
+
+#define QOS_TBL_COLOR					9
+#define QOS_TBL_COLOR_BITS				3
+#define QOS_TBL_COLOR_MASK				(((1 << QOS_TBL_COLOR_BITS) - 1) << QOS_TBL_COLOR)
+
+#define QOS_TBL_GEM_ID					12
+#define QOS_TBL_GEM_ID_MASK				(((1 << ACT_QOS_ATTR_GEM_ID_BITS) - 1) << QOS_TBL_GEM_ID)
+
+#define QOS_TBL_Q_NUM					24
+#define QOS_TBL_Q_NUM_BITS				8
+#define QOS_TBL_Q_NUM_MAX				((1 << QOS_TBL_Q_NUM_BITS) - 1)
+#define QOS_TBL_Q_NUM_MASK				(((1 << QOS_TBL_Q_NUM_BITS) - 1) << QOS_TBL_Q_NUM)
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C2 engine Public APIs			 */
+/*-------------------------------------------------------------------------------*/
+int	mvPp2ClsC2Init(void);
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine QoS table Public APIs			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_C2_QOS_DSCP_TBL_SIZE				(64)
+#define MV_PP2_CLS_C2_QOS_PRIO_TBL_SIZE				(8)
+#define MV_PP2_CLS_C2_QOS_DSCP_TBL_NUM				(8)
+#define MV_PP2_CLS_C2_QOS_PRIO_TBL_NUM				(64)
+
+typedef struct mvPp2ClsC2Qosentry {
+	unsigned int tbl_id;
+	unsigned int tbl_sel;
+	unsigned int tbl_line;
+	unsigned int data;
+} MV_PP2_CLS_C2_QOS_ENTRY;
+
+int	mvPp2ClsC2QosPrioHwDump(void);
+int	mvPp2ClsC2QosDscpHwDump(void);
+int	mvPp2ClsC2QosHwRead(int tbl_id, int tbl_sel, int tbl_line, MV_PP2_CLS_C2_QOS_ENTRY *qos);
+int	mvPp2ClsC2QosHwWrite(int id, int sel, int line, MV_PP2_CLS_C2_QOS_ENTRY *qos);
+int	mvPp2ClsC2QosSwDump(MV_PP2_CLS_C2_QOS_ENTRY *qos);
+void 	mvPp2ClsC2QosSwClear(MV_PP2_CLS_C2_QOS_ENTRY *qos);
+void	mvPp2ClsC2QosHwClearAll(void);
+
+int	mvPp2ClsC2QosPrioSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int prio);
+int	mvPp2ClsC2QosDscpSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int dscp);
+int	mvPp2ClsC2QosColorSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int color);
+int	mvPp2ClsC2QosGpidSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int gpid);
+int	mvPp2ClsC2QosQueueSet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int queue);
+int	mvPp2ClsC2QosPrioGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *prio);
+int	mvPp2ClsC2QosDscpGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *dscp);
+int	mvPp2ClsC2QosColorGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *color);
+int	mvPp2ClsC2QosGpidGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *gpid);
+int	mvPp2ClsC2QosQueueGet(MV_PP2_CLS_C2_QOS_ENTRY *qos, int *queue);
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine action table Public APIs	 		 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_C2_TCAM_SIZE				(256)
+#define MV_PP2_CLS_C2_TCAM_WORDS			(5)
+#define MV_PP2_CLS_C2_TCAM_DATA_BYTES			(10)
+#define MV_PP2_CLS_C2_SRAM_WORDS			(5)
+
+#define C2_SRAM_FMT					"%8.8x %8.8x %8.8x %8.8x %8.8x"
+#define C2_SRAM_VAL(p)					p[4], p[3], p[2], p[1], p[0]
+
+typedef struct mvPp2ClsC2Entry {
+	unsigned int index;
+	bool         inv;
+	union {
+		MV_U32	words[MV_PP2_CLS_C2_TCAM_WORDS];
+		MV_U8	bytes[MV_PP2_CLS_C2_TCAM_WORDS * 4];
+	} tcam;
+	union {
+		MV_U32 words[MV_PP2_CLS_C2_SRAM_WORDS];
+		struct {
+			MV_U32 action_tbl; /* 0x1B30 */
+			MV_U32 actions;    /* 0x1B60 */
+			MV_U32 qos_attr;   /* 0x1B64*/
+			MV_U32 hwf_attr;   /* 0x1B68 */
+			MV_U32 dup_attr;   /* 0x1B6C */
+			/* PPv2.1 (feature MAS 3.14) SEQ_ATTR new register in action table */
+			MV_U32 seq_attr;   /* 0x1B70 */
+		} regs;
+	} sram;
+} MV_PP2_CLS_C2_ENTRY;
+
+int 	mvPp2ClsC2SwTcam(int enable);
+int 	mvPp2ClsC2HwWrite(int index, MV_PP2_CLS_C2_ENTRY *c2);
+int 	mvPp2ClsC2HwRead(int index, MV_PP2_CLS_C2_ENTRY *c2);
+int 	mvPp2ClsC2SwDump(MV_PP2_CLS_C2_ENTRY *c2);
+int 	mvPp2ClsC2HwDump(void);
+void 	mvPp2ClsC2SwClear(MV_PP2_CLS_C2_ENTRY *c2);
+void	mvPp2ClsC2HwClearAll(void);
+int	mvPp2ClsC2HwInv(int index);
+int	mvPp2ClsC2HwInvAll(void);
+
+int	mvPp2ClsC2TcamByteSet(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char byte, unsigned char enable);
+int	mvPp2ClsC2TcamByteGet(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char *byte, unsigned char *enable);
+int	mvPp2ClsC2TcamByteCmp(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offs, unsigned char byte);
+int 	mvPp2ClsC2TcamBytesCmp(MV_PP2_CLS_C2_ENTRY *c2, unsigned int offset, unsigned int size, unsigned char *bytes);
+
+int	mvPp2ClsC2QosTblSet(MV_PP2_CLS_C2_ENTRY *c2, int id, int sel);
+int	mvPp2ClsC2ColorSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int from);
+int	mvPp2ClsC2PrioSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int prio, int form);
+int	mvPp2ClsC2DscpSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int dscp, int from);
+int	mvPp2ClsC2GpidSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int gpid, int from);
+int	mvPp2ClsC2QueueHighSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from);
+int	mvPp2ClsC2QueueLowSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from);
+int	mvPp2ClsC2QueueSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int queue, int from);
+int	mvPp2ClsC2ForwardSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+int	mvPp2ClsC2PolicerSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int policerId, int bank);
+#else
+int	mvPp2ClsC2PolicerSet(MV_PP2_CLS_C2_ENTRY *c2, int cmd, int policerId);
+#endif
+
+int     mvPp2ClsC2FlowIdEn(MV_PP2_CLS_C2_ENTRY *c2, int flowid_en);
+int	mvPp2ClsC2ModSet(MV_PP2_CLS_C2_ENTRY *c2, int data_ptr, int instr_offs, int l4_csum);
+int	mvPp2ClsC2MtuSet(MV_PP2_CLS_C2_ENTRY *c2, int mtu_inx);
+int	mvPp2ClsC2DupSet(MV_PP2_CLS_C2_ENTRY *c2, int dupid, int count);
+int	mvPp2ClsC2SeqSet(MV_PP2_CLS_C2_ENTRY *c2, int miss, int id);
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine Hit counters Public APIs		    	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2HitCntrsIsBusy(void);
+int mvPp2ClsC2HitCntrsClearAll(void);
+int mvPp2ClsC2HitCntrRead(int index, MV_U32 *cntr);
+int mvPp2ClsC2HitCntrsDump(void);
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C2 engine debug Public APIs			    	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC2RegsDump(void);
+
+#endif /* MV_CLS2_HW */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.c
new file mode 100644
index 000000000000..11ac1ce2a86f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.c
@@ -0,0 +1,1670 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2Cls3Hw.h"
+
+CLS3_SHADOW_HASH_ENTRY mvCls3ShadowTbl[MV_PP2_CLS_C3_HASH_TBL_SIZE];
+int mvCls3ShadowExtTbl[MV_PP2_CLS_C3_EXT_TBL_SIZE];
+static int mvPp2ClsC3SwActDump(MV_PP2_CLS_C3_ENTRY *c3);
+
+static int SwInitCntSet;
+
+/******************************************************************************
+ * 			Common utilities
+
+******************************************************************************/
+static void mvPp2ClsC3ShadowSet(int hekSize, int index, int ext_index)
+{
+	mvCls3ShadowTbl[index].size = hekSize;
+
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES) {
+		mvCls3ShadowTbl[index].ext_ptr = ext_index;
+		mvCls3ShadowExtTbl[ext_index] = IN_USE;
+	} else
+		mvCls3ShadowTbl[index].ext_ptr = NOT_IN_USE;
+}
+
+/*-----------------------------------------------------------------------------*/
+void mvPp2ClsC3ShadowInit(void)
+{
+	/* clear hash shadow and extension shadow */
+	int index;
+
+	for (index = 0; index < MV_PP2_CLS_C3_HASH_TBL_SIZE; index++) {
+		mvCls3ShadowTbl[index].size = 0;
+		mvCls3ShadowTbl[index].ext_ptr = NOT_IN_USE;
+	}
+
+	for (index = 0; index < MV_PP2_CLS_C3_EXT_TBL_SIZE; index++)
+		mvCls3ShadowExtTbl[index] = NOT_IN_USE;
+}
+
+/*-----------------------------------------------------------------------------*/
+int mvPp2ClsC3ShadowFreeGet(void)
+{
+	int index;
+
+	/* Go through the all entires from first to last */
+	for (index = 0; index < MV_PP2_CLS_C3_HASH_TBL_SIZE; index++) {
+		if (!mvCls3ShadowTbl[index].size)
+			break;
+	}
+	return index;
+}
+/*-----------------------------------------------------------------------------*/
+int mvPp2ClsC3ShadowExtFreeGet(void)
+{
+	int index;
+
+	/* Go through the all entires from first to last */
+	for (index = 0; index < MV_PP2_CLS_C3_EXT_TBL_SIZE; index++) {
+		if (mvCls3ShadowExtTbl[index] == NOT_IN_USE)
+			break;
+	}
+	return index;
+}
+/*-----------------------------------------------------------------------------*/
+void mvPp2C3ShadowClear(int index)
+{
+	int ext_ptr;
+
+	mvCls3ShadowTbl[index].size = 0;
+	ext_ptr = mvCls3ShadowTbl[index].ext_ptr;
+
+	if (ext_ptr != NOT_IN_USE)
+		mvCls3ShadowExtTbl[ext_ptr] = NOT_IN_USE;
+
+	mvCls3ShadowTbl[index].ext_ptr = NOT_IN_USE;
+}
+/*-------------------------------------------------------------------------------
+retun 1 scan procedure completed
+-------------------------------------------------------------------------------*/
+static int mvPp2ClsC3ScanIsComplete(void)
+{
+	unsigned int regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG);
+	regVal &= MV_PP2_CLS3_STATE_SC_DONE_MASK;
+	regVal >>= MV_PP2_CLS3_STATE_SC_DONE;
+
+	return regVal;
+}
+/*-------------------------------------------------------------------------------
+return 1 if that the last CPU access (Query,Add or Delete) was completed
+-------------------------------------------------------------------------------*/
+static int mvPp2ClsC3CpuIsDone(void)
+{
+	unsigned int regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG);
+	regVal &= MV_PP2_CLS3_STATE_CPU_DONE_MASK;
+	regVal >>= MV_PP2_CLS3_STATE_CPU_DONE;
+	return regVal;
+}
+
+/*-------------------------------------------------------------------------------
+0x0  "ScanCompleted"  scan completed and the scan results are ready in hardware
+0x1  "HitCountersClear"  The engine is clearing the Hit Counters
+0x2  "ScanWait"  The engine waits for the scan delay timer
+0x3  "ScanInProgress"  The scan process is in progress
+-------------------------------------------------------------------------------*/
+static int mvPp2ClsC3ScanStateGet(int *state)
+{
+	unsigned int regVal;
+
+	PTR_VALIDATE(state);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG);
+	regVal &= MV_PP2_CLS3_STATE_SC_STATE_MASK;
+	regVal >>= MV_PP2_CLS3_STATE_SC_STATE;
+	*state = regVal;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------
+return 1 if counters clearing is completed
+--------------------------------------------------------------------------------*/
+static int mvPp2ClsC3HitCntrClearDone(void)
+{
+	unsigned int regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG);
+	regVal &= MV_PP2_CLS3_STATE_CLEAR_CTR_DONE_MASK;
+	regVal >>= MV_PP2_CLS3_STATE_CLEAR_CTR_DONE;
+	return regVal;
+}
+/*-------------------------------------------------------------------------------*/
+
+void mvPp2ClsC3SwClear(MV_PP2_CLS_C3_ENTRY *c3)
+{
+	memset(c3, 0, sizeof(MV_PP2_CLS_C3_ENTRY));
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3Init()
+{
+	int rc;
+
+	mvPp2ClsC3ShadowInit();
+	rc = mvPp2ClsC3HitCntrsClearAll();
+	return rc;
+}
+
+/*-------------------------------------------------------------------------------
+  bank first 2 entries are reserved for miss actions
+--------------------------------------------------------------------------------*/
+static int mvPp2ClsC3IsReservedIndex(int index)
+{
+#ifdef CONFIG_MV_ETH_PP2_1
+	return MV_FALSE;
+#endif
+	if ((index % MV_PP2_CLS_C3_BANK_SIZE) > 1)
+		/* not reserved */
+		return MV_FALSE;
+
+	return MV_TRUE;
+}
+
+/*-------------------------------------------------------------------------------
+Add entry to hash table
+ext_index used only if hek size < 12
+-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwAdd(MV_PP2_CLS_C3_ENTRY *c3, int index, int ext_index)
+{
+	int regStartInd, hekSize, iter = 0;
+	unsigned int regVal = 0;
+
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX);
+
+	c3->index = index;
+
+	/* write key control */
+	mvPp2WrReg(MV_PP2_CLS3_KEY_CTRL_REG, c3->key.key_ctrl);
+
+	hekSize = ((c3->key.key_ctrl & KEY_CTRL_HEK_SIZE_MASK) >> KEY_CTRL_HEK_SIZE);
+
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES) {
+		/* Extension */
+		POS_RANGE_VALIDATE(ext_index, MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_MAX)
+		c3->ext_index = ext_index;
+		regVal |= (ext_index << MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR);
+
+		/* write 9 hek refisters */
+		regStartInd = 0;
+	} else
+		/* write 3 hek refisters */
+		regStartInd = 6;
+
+	for (; regStartInd < MV_PP2_CLS_C3_EXT_HEK_WORDS; regStartInd++)
+		mvPp2WrReg(MV_PP2_CLS3_KEY_HEK_REG(regStartInd), c3->key.hek.words[regStartInd]);
+
+
+	regVal |= (index << MV_PP2_CLS3_HASH_OP_TBL_ADDR);
+	regVal &= ~MV_PP2_CLS3_MISS_PTR_MASK; /*set miss bit to 0, ppv2.1 mas 3.16*/
+	regVal |= (1 << MV_PP2_CLS3_HASH_OP_ADD);
+
+	/* set hit counter init value */
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2WrReg(MV_PP2_CLS3_INIT_HIT_CNT_REG, SwInitCntSet << MV_PP2_CLS3_INIT_HIT_CNT_OFFS);
+#else
+	regVal |= (SwInitCntSet << MV_PP2_CLS3_HASH_OP_INIT_CTR_VAL);
+#endif
+	/*trigger ADD operation*/
+	mvPp2WrReg(MV_PP2_CLS3_HASH_OP_REG, regVal);
+
+	/* wait to cpu access done bit */
+	while (!mvPp2ClsC3CpuIsDone())
+		if (++iter >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS3_RETRIES_EXCEEDED;
+		}
+
+	/* write action table registers */
+	mvPp2WrReg(MV_PP2_CLS3_ACT_REG, c3->sram.regs.actions);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_QOS_ATTR_REG, c3->sram.regs.qos_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_HWF_ATTR_REG, c3->sram.regs.hwf_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_DUP_ATTR_REG, c3->sram.regs.dup_attr);
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2WrReg(MV_PP2_CLS3_ACT_SEQ_L_ATTR_REG, c3->sram.regs.seq_l_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_SEQ_H_ATTR_REG, c3->sram.regs.seq_h_attr);
+#endif
+	/* set entry as valid, extesion pointer in use only if size > 12*/
+	mvPp2ClsC3ShadowSet(hekSize, index, ext_index);
+
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------
+Add entry to miss hash table
+ppv2.1 mas 3.16 relevant only for ppv2.1
+-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwMissAdd(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(lkp_type, MV_PP2_CLS_C3_MISS_TBL_SIZE - 1);
+
+	c3->index = lkp_type;
+
+	regVal |= (lkp_type << MV_PP2_CLS3_HASH_OP_TBL_ADDR);
+	regVal |= (1 << MV_PP2_CLS3_HASH_OP_ADD);
+	regVal |= MV_PP2_CLS3_MISS_PTR_MASK;/*set miss bit to 1, ppv2.1 mas 3.16*/
+
+	/*index to miss table */
+	mvPp2WrReg(MV_PP2_CLS3_HASH_OP_REG, regVal);
+
+	/* write action table registers */
+	mvPp2WrReg(MV_PP2_CLS3_ACT_REG, c3->sram.regs.actions);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_QOS_ATTR_REG, c3->sram.regs.qos_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_HWF_ATTR_REG, c3->sram.regs.hwf_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_DUP_ATTR_REG, c3->sram.regs.dup_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_SEQ_L_ATTR_REG, c3->sram.regs.seq_l_attr);
+	mvPp2WrReg(MV_PP2_CLS3_ACT_SEQ_H_ATTR_REG, c3->sram.regs.seq_h_attr);
+	/*clear hit counter, clear on read */
+	mvPp2ClsC3HitCntrsMissRead(lkp_type, &regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HwDel(int index)
+{
+
+	unsigned int regVal = 0;
+	int iter = 0;
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX);
+
+	regVal |= (index << MV_PP2_CLS3_HASH_OP_TBL_ADDR);
+	regVal |= (1 << MV_PP2_CLS3_HASH_OP_DEL);
+	regVal &= ~MV_PP2_CLS3_MISS_PTR_MASK;/*set miss bit to 1, ppv2.1 mas 3.16*/
+
+
+	/*trigger del operation*/
+	mvPp2WrReg(MV_PP2_CLS3_HASH_OP_REG, regVal);
+
+	/* wait to cpu access done bit */
+	while (!mvPp2ClsC3CpuIsDone())
+		if (++iter >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS3_RETRIES_EXCEEDED;
+		}
+
+	/* delete form shadow and extension shadow if exist */
+	mvPp2C3ShadowClear(index);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HwDelAll()
+{
+	int index, status;
+
+	for (index = 0; index < MV_PP2_CLS_C3_HASH_TBL_SIZE; index++) {
+		status = mvPp2ClsC3HwDel(index);
+		if (status != MV_OK)
+			return status;
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+void mvPp2ClsC3HwInitCtrSet(int cntVal)
+{
+	SwInitCntSet = cntVal;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+static int mvPp2ClsC3HwQueryAddRelocate(int new_idx, int max_depth, int cur_depth, MV_PP2_CLS3_HASH_PAIR *hash_pair_arr)
+{
+	int status, index_free, idx = 0;
+	unsigned char occupied_bmp;
+	MV_PP2_CLS_C3_ENTRY local_c3;
+	int usedIndex[MV_PP2_CLS3_HASH_BANKS_NUM] = {0};
+
+	if (cur_depth >= max_depth)
+		return MV_CLS3_RETRIES_EXCEEDED;
+
+
+	mvPp2ClsC3SwClear(&local_c3);
+
+	if (mvPp2ClsC3HwRead(&local_c3, new_idx)) {
+		mvOsPrintf("%s could not get key for index [0x%x]\n", __func__, new_idx);
+		return MV_CLS3_RETRIES_EXCEEDED;
+	}
+
+	if (mvPp2ClsC3HwQuery(&local_c3, &occupied_bmp, usedIndex)) {
+		mvOsPrintf("%s: mvPp2ClsC3HwQuery failed, depth = %d\n", __func__, cur_depth);
+		return MV_CLS3_ERR;
+	}
+
+	/* fill in indices for this key */
+	for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++) {
+		/* if new index is in the bank index, skip it */
+		if (new_idx == usedIndex[idx] || mvPp2ClsC3IsReservedIndex(usedIndex[idx])) {
+			usedIndex[idx] = 0;
+			continue;
+		}
+
+		/* found a vacant index */
+		if (!(occupied_bmp & (1 << idx))) {
+			index_free = usedIndex[idx];
+			break;
+		}
+	}
+
+	/* no free index, recurse and relocate another key */
+	if (idx == MV_PP2_CLS3_HASH_BANKS_NUM) {
+#ifdef MV_DEBUG
+		mvOsPrintf("new[0x%.3x]:%.1d ", new_idx, cur_depth);
+		for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++)
+			mvOsPrintf("0x%.3x ", usedIndex[idx]);
+		mvOsPrintf("\n");
+#endif
+
+		/* recurse over all valid indices */
+		for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++) {
+			if (usedIndex[idx] == 0)
+				continue;
+
+			if (mvPp2ClsC3HwQueryAddRelocate(usedIndex[idx], max_depth,
+							cur_depth+1, hash_pair_arr) == MV_OK)
+				break;
+		}
+
+		/* tried relocate, no valid entries found */
+		if (idx == MV_PP2_CLS3_HASH_BANKS_NUM)
+			return MV_CLS3_RETRIES_EXCEEDED;
+
+	}
+
+	/* if we reached here, we found a valid free index */
+	index_free = usedIndex[idx];
+
+	/* new_idx del is not necessary */
+
+	/*We do not chage extension tabe*/
+	status = mvPp2ClsC3HwAdd(&local_c3, index_free, local_c3.ext_index);
+
+	/* update the hash pair */
+	if (hash_pair_arr != NULL) {
+		hash_pair_arr->old_idx[hash_pair_arr->pair_num] = new_idx;
+		hash_pair_arr->new_idx[hash_pair_arr->pair_num] = index_free;
+		hash_pair_arr->pair_num++;
+	}
+
+	if (status != MV_OK) {
+		mvOsPrintf("%s:Error - mvPp2ClsC3HwAdd failed, depth = %d\\n", __func__, cur_depth);
+		return status;
+	}
+
+	mvOsPrintf("key relocated  0x%.3x->0x%.3x\n", new_idx, index_free);
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwQueryAdd(MV_PP2_CLS_C3_ENTRY *c3, int max_search_depth, MV_PP2_CLS3_HASH_PAIR *hash_pair_arr)
+{
+	int usedIndex[MV_PP2_CLS3_HASH_BANKS_NUM] = {0};
+	unsigned char occupied_bmp;
+	int idx, index_free, hekSize, status, ext_index = 0;
+
+	status = mvPp2ClsC3HwQuery(c3, &occupied_bmp, usedIndex);
+
+	if (status != MV_OK) {
+		mvOsPrintf("%s:Error - mvPp2ClsC3HwQuery failed\n", __func__);
+		return status;
+	}
+
+	/* Select avaliable entry index */
+	for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++) {
+		if (!(occupied_bmp & (1 << idx)))
+			if (!mvPp2ClsC3IsReservedIndex(usedIndex[idx]))
+				break;
+	}
+
+	/* Avaliable index did not found, try to relocate another key */
+
+	if (idx == MV_PP2_CLS3_HASH_BANKS_NUM) {
+
+		/* save all valid bank indices */
+		for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++) {
+			if (mvPp2ClsC3IsReservedIndex(usedIndex[idx]))
+				usedIndex[idx] = 0;
+		}
+
+		for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++) {
+			if (mvPp2ClsC3IsReservedIndex(usedIndex[idx]))
+				continue;
+
+			if (mvPp2ClsC3HwQueryAddRelocate(usedIndex[idx], max_search_depth,
+							0 /*curren depth*/, hash_pair_arr) == MV_OK)
+				break;
+		}
+
+		if (idx == MV_PP2_CLS3_HASH_BANKS_NUM) {
+			/* Avaliable index did not found*/
+			mvOsPrintf("%s:Error - HASH table is full.\n", __func__);
+			return MV_CLS3_ERR;
+		}
+	}
+
+	index_free = usedIndex[idx];
+
+	hekSize = ((c3->key.key_ctrl & KEY_CTRL_HEK_SIZE_MASK) >> KEY_CTRL_HEK_SIZE);
+
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES) {
+		/* Get Free Extension Index */
+		ext_index = mvPp2ClsC3ShadowExtFreeGet();
+
+		if (ext_index == MV_PP2_CLS_C3_EXT_TBL_SIZE) {
+			mvOsPrintf("%s:Error - Extension table is full.\n", __func__);
+			return MV_CLS3_ERR;
+		}
+	}
+
+	status = mvPp2ClsC3HwAdd(c3, index_free, ext_index);
+
+	if (status != MV_OK) {
+		mvOsPrintf("%s:Error - mvPp2ClsC3HwAdd failed\n", __func__);
+		return status;
+	}
+
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES)
+		mvOsPrintf("Added C3 entry @ index=0x%.3x ext=0x%.3x\n", index_free, ext_index);
+	else
+		mvOsPrintf("Added C3 entry @ index=0x%.3x\n", index_free);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*if index or occupied_bmp is NULL dump the data 				 */
+/* index[] size must be 8							 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HwQuery(MV_PP2_CLS_C3_ENTRY *c3, unsigned char *occupied_bmp, int index[])
+{
+	int idx = 0;
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(c3);
+
+	/* write key control */
+	mvPp2WrReg(MV_PP2_CLS3_KEY_CTRL_REG, c3->key.key_ctrl);
+
+	/* write hek */
+	for (idx = 0; idx < MV_PP2_CLS_C3_EXT_HEK_WORDS; idx++)
+		mvPp2WrReg(MV_PP2_CLS3_KEY_HEK_REG(idx), c3->key.hek.words[idx]);
+
+	/*trigger query operation*/
+	mvPp2WrReg(MV_PP2_CLS3_QRY_ACT_REG, (1 << MV_PP2_CLS3_QRY_ACT));
+
+	idx = 0;
+	while (!mvPp2ClsC3CpuIsDone())
+		if (++idx >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS3_RETRIES_EXCEEDED;
+		}
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG) & MV_PP2_CLS3_STATE_OCCIPIED_MASK;
+	regVal = regVal >> MV_PP2_CLS3_STATE_OCCIPIED;
+
+	if ((!occupied_bmp) || (!index)) {
+		/* print to screen - call from sysfs*/
+		for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++)
+			mvOsPrintf("0x%8.8x	%s\n",
+				mvPp2RdReg(MV_PP2_CLS3_QRY_RES_HASH_REG(idx)),
+				(regVal & (1 << idx)) ? "OCCUPIED" : "FREE");
+		return MV_OK;
+	}
+
+	*occupied_bmp = regVal;
+	for (idx = 0; idx < MV_PP2_CLS3_HASH_BANKS_NUM; idx++)
+		index[idx] = mvPp2RdReg(MV_PP2_CLS3_QRY_RES_HASH_REG(idx));
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwRead(MV_PP2_CLS_C3_ENTRY *c3, int index)
+{
+	int i, isExt;
+	MV_U32 regVal = 0;
+
+	unsigned int hashData[MV_PP2_CLS3_HASH_DATA_REG_NUM];
+	unsigned int hashExtData[MV_PP2_CLS3_HASH_EXT_DATA_REG_NUM];
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX);
+
+	mvPp2ClsC3SwClear(c3);
+
+	c3->index = index;
+	c3->ext_index = NOT_IN_USE;
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_CLS3_DB_INDEX_REG, index);
+
+	regVal |= (index << MV_PP2_CLS3_HASH_OP_TBL_ADDR);
+	mvPp2WrReg(MV_PP2_CLS3_HASH_OP_REG, regVal);
+
+	/* read action table */
+	c3->sram.regs.actions = mvPp2RdReg(MV_PP2_CLS3_ACT_REG);
+	c3->sram.regs.qos_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_QOS_ATTR_REG);
+	c3->sram.regs.hwf_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_HWF_ATTR_REG);
+	c3->sram.regs.dup_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_DUP_ATTR_REG);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	c3->sram.regs.seq_l_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_SEQ_L_ATTR_REG);
+	c3->sram.regs.seq_h_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_SEQ_H_ATTR_REG);
+#endif
+
+	/* read hash data*/
+	for (i = 0; i < MV_PP2_CLS3_HASH_DATA_REG_NUM; i++)
+		hashData[i] = mvPp2RdReg(MV_PP2_CLS3_HASH_DATA_REG(i));
+
+	if (mvCls3ShadowTbl[index].size == 0) {
+		/* entry not in use */
+		return MV_OK;
+	}
+
+	c3->key.key_ctrl = 0;
+
+	if (mvCls3ShadowTbl[index].ext_ptr == NOT_IN_USE) {
+		isExt = 0;
+		/* TODO REMOVE NEXT LINES- ONLY FOR INTERNAL VALIDATION */
+		if ((mvCls3ShadowTbl[index].size == 0) ||
+			 (mvCls3ShadowTbl[index].ext_ptr != NOT_IN_USE)) {
+				mvOsPrintf("%s: SW internal error.\n", __func__);
+				return MV_CLS3_SW_INTERNAL;
+		}
+
+		/*read Multihash entry data*/
+		c3->key.hek.words[6] = hashData[0]; /* hek 0*/
+		c3->key.hek.words[7] = hashData[1]; /* hek 1*/
+		c3->key.hek.words[8] = hashData[2]; /* hek 2*/
+
+		/* write key control data to SW */
+		c3->key.key_ctrl |= (((hashData[3] & KEY_PRT_ID_MASK(isExt)) >>
+					(KEY_PRT_ID(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_PRT_ID);
+
+		c3->key.key_ctrl |= (((hashData[3] & KEY_PRT_ID_TYPE_MASK(isExt)) >>
+					(KEY_PRT_ID_TYPE(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_PRT_ID_TYPE);
+
+		c3->key.key_ctrl |= (((hashData[3] & KEY_LKP_TYPE_MASK(isExt)) >>
+					(KEY_LKP_TYPE(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_LKP_TYPE);
+
+		c3->key.key_ctrl |= (((hashData[3] & KEY_L4_INFO_MASK(isExt)) >>
+					(KEY_L4_INFO(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_L4);
+
+	} else {
+		isExt = 1;
+		/* TODO REMOVE NEXT LINES- ONLY FOR INTERNAL VALIDATION */
+		if ((mvCls3ShadowTbl[index].size == 0) ||
+			 (mvCls3ShadowTbl[index].ext_ptr == NOT_IN_USE)) {
+				mvOsPrintf("%s: SW internal error.\n", __func__);
+				return MV_CLS3_SW_INTERNAL;
+		}
+		c3->ext_index = mvCls3ShadowTbl[index].ext_ptr;
+
+		/* write extension index */
+		mvPp2WrReg(MV_PP2_CLS3_DB_INDEX_REG, mvCls3ShadowTbl[index].ext_ptr);
+
+		/* read hash extesion data*/
+		for (i = 0; i < MV_PP2_CLS3_HASH_EXT_DATA_REG_NUM; i++)
+			hashExtData[i] = mvPp2RdReg(MV_PP2_CLS3_HASH_EXT_DATA_REG(i));
+
+
+		/* heks bytes 35 - 32 */
+		c3->key.hek.words[8] = ((hashData[2] & 0x00FFFFFF) << 8) | ((hashData[1] & 0xFF000000) >> 24);
+
+		/* heks bytes 31 - 28 */
+		c3->key.hek.words[7] = ((hashData[1] & 0x00FFFFFF) << 8) | ((hashData[0] & 0xFF000000) >> 24);
+
+		/* heks bytes 27 - 24 */
+		c3->key.hek.words[6] = ((hashData[0] & 0x00FFFFFF) << 8) | (hashExtData[6] & 0x000000FF);
+
+		c3->key.hek.words[5] = hashExtData[5]; /* heks bytes 23 - 20 */
+		c3->key.hek.words[4] = hashExtData[4]; /* heks bytes 19 - 16 */
+		c3->key.hek.words[3] = hashExtData[3]; /* heks bytes 15 - 12 */
+		c3->key.hek.words[2] = hashExtData[2]; /* heks bytes 11 - 8  */
+		c3->key.hek.words[1] = hashExtData[1]; /* heks bytes 7 - 4   */
+		c3->key.hek.words[0] = hashExtData[0]; /* heks bytes 3 - 0   */
+
+		/* write key control data to SW*/
+
+		c3->key.key_ctrl |= (((hashData[3] & KEY_PRT_ID_MASK(isExt)) >>
+					(KEY_PRT_ID(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_PRT_ID);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+		/* PPv2.1 (feature MAS 3.16) LKP_TYPE size and offset changed */
+
+		c3->key.key_ctrl |= (((hashData[3] & KEY_PRT_ID_TYPE_MASK(isExt)) >>
+					(KEY_PRT_ID_TYPE(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_PRT_ID_TYPE);
+
+		c3->key.key_ctrl |= ((((hashData[2] & 0xf8000000) >> 27) |
+					((hashData[3] & 0x1) << 5)) << KEY_CTRL_LKP_TYPE);
+
+#else
+		c3->key.key_ctrl |= ((((hashData[2] & 0x80000000) >> 31) |
+					((hashData[3] & 0x1) << 1)) << KEY_CTRL_PRT_ID_TYPE);
+
+		c3->key.key_ctrl |= (((hashData[2] & KEY_LKP_TYPE_MASK(isExt)) >>
+					(KEY_LKP_TYPE(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_LKP_TYPE);
+
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+		c3->key.key_ctrl |= (((hashData[2] & KEY_L4_INFO_MASK(isExt)) >>
+					(KEY_L4_INFO(isExt) % DWORD_BITS_LEN)) << KEY_CTRL_L4);
+	}
+
+	/* update hek size */
+	c3->key.key_ctrl |= ((mvCls3ShadowTbl[index].size << KEY_CTRL_HEK_SIZE) & KEY_CTRL_HEK_SIZE_MASK);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/* ppv2.1 MAS 3.12								*/
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HwMissRead(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(lkp_type, MV_PP2_CLS_C3_MISS_TBL_SIZE - 1);
+
+	mvPp2ClsC3SwClear(c3);
+
+	c3->index = lkp_type;
+	c3->ext_index = NOT_IN_USE;
+
+	regVal = (lkp_type << MV_PP2_CLS3_HASH_OP_TBL_ADDR) | MV_PP2_CLS3_MISS_PTR_MASK;
+	mvPp2WrReg(MV_PP2_CLS3_HASH_OP_REG, regVal);
+
+	/* read action table */
+	c3->sram.regs.actions = mvPp2RdReg(MV_PP2_CLS3_ACT_REG);
+	c3->sram.regs.qos_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_QOS_ATTR_REG);
+	c3->sram.regs.hwf_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_HWF_ATTR_REG);
+	c3->sram.regs.dup_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_DUP_ATTR_REG);
+	c3->sram.regs.seq_l_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_SEQ_L_ATTR_REG);
+	c3->sram.regs.seq_h_attr = mvPp2RdReg(MV_PP2_CLS3_ACT_SEQ_H_ATTR_REG);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwDump(MV_PP2_CLS_C3_ENTRY *c3)
+{
+	int hekSize;
+
+	PTR_VALIDATE(c3);
+
+	mvOsPrintf("\n");
+	mvOsPrintf("INDEX[0x%3.3x] ", c3->index);
+
+	hekSize = ((c3->key.key_ctrl & KEY_CTRL_HEK_SIZE_MASK) >> KEY_CTRL_HEK_SIZE);
+
+	/* print extension index if exist*/
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES)
+		/* extension */
+		mvOsPrintf("EXT_INDEX[0x%2.2x] ", c3->ext_index);
+	else
+		/* without extension */
+		mvOsPrintf("EXT_INDEX[ NA ] ");
+
+	mvOsPrintf("SIZE[0x%2.2x] ", hekSize);
+	mvOsPrintf("PRT[ID = 0x%2.2x,TYPE = 0x%1.1x] ",
+			((c3->key.key_ctrl & KEY_CTRL_PRT_ID_MASK) >> KEY_CTRL_PRT_ID),
+			((c3->key.key_ctrl & KEY_CTRL_PRT_ID_TYPE_MASK) >> KEY_CTRL_PRT_ID_TYPE));
+
+	mvOsPrintf("LKP_TYPE[0x%1.1x] ",
+			((c3->key.key_ctrl & KEY_CTRL_LKP_TYPE_MASK) >> KEY_CTRL_LKP_TYPE));
+
+	mvOsPrintf("L4INFO[0x%1.1x] ",
+			((c3->key.key_ctrl & KEY_CTRL_L4_MASK) >> KEY_CTRL_L4));
+
+	mvOsPrintf("\n\n");
+	mvOsPrintf("HEK	");
+	if (hekSize > MV_PP2_CLS_C3_HEK_BYTES)
+		/* extension */
+		mvOsPrintf(HEK_EXT_FMT, HEK_EXT_VAL(c3->key.hek.words));
+	else
+		/* without extension */
+		mvOsPrintf(HEK_FMT, HEK_VAL(c3->key.hek.words));
+	mvOsPrintf("\n");
+	return mvPp2ClsC3SwActDump(c3);
+}
+
+/*-------------------------------------------------------------------------------*/
+
+static int mvPp2ClsC3SwActDump(MV_PP2_CLS_C3_ENTRY *c3)
+{
+	PTR_VALIDATE(c3);
+	mvOsPrintf("\n");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/*------------------------------*/
+	/*	actions 0x1D40		*/
+	/*------------------------------*/
+
+	mvOsPrintf("ACT_TBL: COLOR   LOW_Q   HIGH_Q     FWD   POLICER  FID\n");
+	mvOsPrintf("CMD:     [%1d]      [%1d]    [%1d]        [%1d]   [%1d]      [%1d]\n",
+			((c3->sram.regs.actions & (ACT_COLOR_MASK)) >> ACT_COLOR),
+			((c3->sram.regs.actions & (ACT_LOW_Q_MASK)) >> ACT_LOW_Q),
+			((c3->sram.regs.actions & (ACT_HIGH_Q_MASK)) >> ACT_HIGH_Q),
+			((c3->sram.regs.actions & ACT_FWD_MASK) >> ACT_FWD),
+			((c3->sram.regs.actions & (ACT_POLICER_SELECT_MASK)) >> ACT_POLICER_SELECT),
+			((c3->sram.regs.actions & ACT_FLOW_ID_EN_MASK) >> ACT_FLOW_ID_EN));
+
+	mvOsPrintf("VAL:              [%1d]    [0x%x]\n",
+			((c3->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_LOW_Q_MASK)) >> ACT_QOS_ATTR_MDF_LOW_Q),
+			((c3->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_HIGH_Q_MASK)) >> ACT_QOS_ATTR_MDF_HIGH_Q));
+
+	mvOsPrintf("\n");
+	/*------------------------------*/
+	/*	hwf_attr 0x1D48		*/
+	/*------------------------------*/
+
+	mvOsPrintf("HWF_ATTR: IPTR	DPTR	 CHKSM     MTU_IDX\n");
+	mvOsPrintf("          0x%1.1x   0x%4.4x   %s   0x%1.1x\n",
+
+			((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_IPTR_MASK) >> ACT_HWF_ATTR_IPTR),
+			((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_DPTR_MASK) >> ACT_HWF_ATTR_DPTR),
+			(((c3->sram.regs.hwf_attr &
+				ACT_HWF_ATTR_CHKSM_EN_MASK) >> ACT_HWF_ATTR_CHKSM_EN) ? "ENABLE" : "DISABLE"),
+			((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_MTU_INX_MASK) >> ACT_HWF_ATTR_MTU_INX));
+	mvOsPrintf("\n");
+	/*------------------------------*/
+	/*	dup_attr 0x1D4C		*/
+	/*------------------------------*/
+	mvOsPrintf("DUP_ATTR:FID	COUNT	POLICER [id    bank]\n");
+	mvOsPrintf("         0x%2.2x\t0x%1.1x\t\t[0x%2.2x   0x%1.1x]\n",
+		((c3->sram.regs.dup_attr & ACT_DUP_FID_MASK) >> ACT_DUP_FID),
+		((c3->sram.regs.dup_attr & ACT_DUP_COUNT_MASK) >> ACT_DUP_COUNT),
+		((c3->sram.regs.dup_attr & ACT_DUP_POLICER_MASK) >> ACT_DUP_POLICER_ID),
+		((c3->sram.regs.dup_attr & ACT_DUP_POLICER_BANK_MASK) >> ACT_DUP_POLICER_BANK_BIT));
+	mvOsPrintf("\n");
+	mvOsPrintf("SEQ_ATTR: HIGH[32:37] LOW[0:31]\n");
+	mvOsPrintf("          0x%2.2x        0x%8.8x", c3->sram.regs.seq_h_attr, c3->sram.regs.seq_l_attr);
+
+#else
+	/*------------------------------*/
+	/*	actions 0x1D40		*/
+	/*------------------------------*/
+
+	mvOsPrintf("ACT_TBL: COLOR   LOW_Q   HIGH_Q     FWD   POLICER  FID\n");
+	mvOsPrintf("CMD:     [%1d]      [%1d]    [%1d]        [%1d]   [%1d]      [%1d]\n",
+			((c3->sram.regs.actions & (ACT_COLOR_MASK)) >> ACT_COLOR),
+			((c3->sram.regs.actions & (ACT_LOW_Q_MASK)) >> ACT_LOW_Q),
+			((c3->sram.regs.actions & (ACT_HIGH_Q_MASK)) >> ACT_HIGH_Q),
+			((c3->sram.regs.actions & ACT_FWD_MASK) >> ACT_FWD),
+			((c3->sram.regs.actions & (ACT_POLICER_SELECT_MASK)) >> ACT_POLICER_SELECT),
+			((c3->sram.regs.actions & ACT_FLOW_ID_EN_MASK) >> ACT_FLOW_ID_EN));
+
+	mvOsPrintf("VAL:              [%1d]    [0x%x]            [0x%x]\n",
+			((c3->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_LOW_Q_MASK)) >> ACT_QOS_ATTR_MDF_LOW_Q),
+			((c3->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_HIGH_Q_MASK)) >> ACT_QOS_ATTR_MDF_HIGH_Q),
+			((c3->sram.regs.dup_attr & (ACT_DUP_POLICER_MASK)) >> ACT_DUP_POLICER_ID));
+	mvOsPrintf("\n");
+
+	/*------------------------------*/
+	/*	hwf_attr 0x1D48		*/
+	/*------------------------------*/
+
+	mvOsPrintf("HWF_ATTR: IPTR    DPTR   CHKSM\n");
+	mvOsPrintf("          0x%1.1x     0x%4.4x %s\t",
+			((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_IPTR_MASK) >> ACT_HWF_ATTR_IPTR),
+			((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_DPTR_MASK) >> ACT_HWF_ATTR_DPTR),
+			(((c3->sram.regs.hwf_attr & ACT_HWF_ATTR_CHKSM_EN_MASK) >> ACT_HWF_ATTR_CHKSM_EN) ? "ENABLE" : "DISABLE"));
+
+	mvOsPrintf("\n");
+
+	/*------------------------------*/
+	/*	dup_attr 0x1D4C		*/
+	/*------------------------------*/
+
+	mvOsPrintf("DUP_ATTR: FID   COUNT\n");
+	mvOsPrintf("          0x%2.2x  0x%1.1x\n",
+			((c3->sram.regs.dup_attr & ACT_DUP_FID_MASK) >> ACT_DUP_FID),
+			((c3->sram.regs.dup_attr & ACT_DUP_COUNT_MASK) >> ACT_DUP_COUNT));
+
+
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+	mvOsPrintf("\n\n");
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwDump()
+{
+	int index;
+	MV_PP2_CLS_C3_ENTRY c3;
+
+	mvPp2ClsC3SwClear(&c3);
+
+	for (index = 0; index < MV_PP2_CLS_C3_HASH_TBL_SIZE; index++) {
+		if (mvCls3ShadowTbl[index].size > 0) {
+			mvPp2ClsC3HwRead(&c3, index);
+			mvPp2ClsC3SwDump(&c3);
+			mvOsPrintf("----------------------------------------------------------------------\n");
+		}
+	}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*
+All miss entries are valid,
+the key+heks in miss entries are hot in use and this is the
+reason that we dump onlt action table fields
+*/
+int mvPp2ClsC3HwMissDump()
+{
+	int index;
+	MV_PP2_CLS_C3_ENTRY c3;
+
+	mvPp2ClsC3SwClear(&c3);
+
+	for (index = 0; index < MV_PP2_CLS_C3_MISS_TBL_SIZE; index++) {
+		mvPp2ClsC3HwMissRead(&c3, index);
+		mvOsPrintf("INDEX[0x%3.3X]\n", index);
+		mvPp2ClsC3SwActDump(&c3);
+		mvOsPrintf("----------------------------------------------------------------------\n");
+	}
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HwExtDump()
+{
+	int index, i;
+	unsigned int hashExtData[MV_PP2_CLS3_HASH_EXT_DATA_REG_NUM];
+
+	mvOsPrintf("INDEX    DATA\n");
+
+	for (index = 0; index <  MV_PP2_CLS_C3_EXT_TBL_SIZE; index++)
+		if (mvCls3ShadowExtTbl[index] == IN_USE) {
+			/* write extension index */
+			mvPp2WrReg(MV_PP2_CLS3_DB_INDEX_REG, index);
+
+			/* read hash extesion data*/
+			for (i = 0; i < MV_PP2_CLS3_HASH_EXT_DATA_REG_NUM; i++)
+				hashExtData[i] = mvPp2RdReg(MV_PP2_CLS3_HASH_EXT_DATA_REG(i));
+
+			mvOsPrintf("[0x%2.2x] %8.8x %8.8x %8.8x %8.8x %8.8x %8.8x %8.8x\n",
+					index, hashExtData[6], hashExtData[5], hashExtData[4],
+					hashExtData[3], hashExtData[2], hashExtData[1], hashExtData[0]);
+		} /* if */
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 key fields			   	 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3SwL4infoSet(MV_PP2_CLS_C3_ENTRY *c3, int l4info)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(l4info, KEY_CTRL_L4_MAX);
+
+	c3->key.key_ctrl &= ~KEY_CTRL_L4_MASK;
+	c3->key.key_ctrl |= (l4info << KEY_CTRL_L4);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwLkpTypeSet(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(lkp_type, KEY_CTRL_LKP_TYPE_MAX);
+
+	c3->key.key_ctrl &= ~KEY_CTRL_LKP_TYPE_MASK;
+	c3->key.key_ctrl |= (lkp_type << KEY_CTRL_LKP_TYPE);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwPortIDSet(MV_PP2_CLS_C3_ENTRY *c3, int type, int portid)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(portid, KEY_CTRL_PRT_ID_MAX);
+	POS_RANGE_VALIDATE(type, KEY_CTRL_PRT_ID_TYPE_MAX);
+
+	c3->key.key_ctrl &= ~(KEY_CTRL_PRT_ID_MASK | KEY_CTRL_PRT_ID_TYPE_MASK);
+	c3->key.key_ctrl |= ((portid << KEY_CTRL_PRT_ID) | (type << KEY_CTRL_PRT_ID_TYPE));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwHekSizeSet(MV_PP2_CLS_C3_ENTRY *c3, int hekSize)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(hekSize, KEY_CTRL_HEK_SIZE_MAX);
+
+	c3->key.key_ctrl &= ~KEY_CTRL_HEK_SIZE_MASK;
+	c3->key.key_ctrl |= (hekSize << KEY_CTRL_HEK_SIZE);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwHekByteSet(MV_PP2_CLS_C3_ENTRY *c3, unsigned int offs, unsigned char byte)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(offs, ((MV_PP2_CLS_C3_EXT_HEK_WORDS*4) - 1));
+
+	c3->key.hek.bytes[HW_BYTE_OFFS(offs)] = byte;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwHekWordSet(MV_PP2_CLS_C3_ENTRY *c3, unsigned int offs, unsigned int word)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(offs, ((MV_PP2_CLS_C3_EXT_HEK_WORDS) - 1));
+
+	c3->key.hek.words[offs] = word;
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 action table fields		   	 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3ColorSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(cmd, COLOR_RED_AND_LOCK);
+
+	c3->sram.regs.actions &= ~ACT_COLOR_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_COLOR);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3QueueHighSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int queue)
+{
+	PTR_VALIDATE(c3);
+
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_HIGH_Q_MAX);
+
+	/*set command*/
+	c3->sram.regs.actions &= ~ACT_HIGH_Q_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_HIGH_Q);
+
+	/*set modify High queue value*/
+	c3->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_HIGH_Q_MASK;
+	c3->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_HIGH_Q);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3QueueLowSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int queue)
+{
+	PTR_VALIDATE(c3);
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_LOW_Q_MAX);
+
+	/*set command*/
+	c3->sram.regs.actions &= ~ACT_LOW_Q_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_LOW_Q);
+
+	/*set modify High queue value*/
+	c3->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_LOW_Q_MASK;
+	c3->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_LOW_Q);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3QueueSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int queue)
+{
+	int status = MV_OK;
+	int qHigh, qLow;
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_Q_MAX);
+
+	/* cmd validation in set functions */
+
+	qHigh = (queue & ACT_QOS_ATTR_MDF_HIGH_Q_MASK) >> ACT_QOS_ATTR_MDF_HIGH_Q;
+	qLow = (queue & ACT_QOS_ATTR_MDF_LOW_Q_MASK) >> ACT_QOS_ATTR_MDF_LOW_Q;
+
+	status |= mvPp2ClsC3QueueLowSet(c3, cmd, qLow);
+	status |= mvPp2ClsC3QueueHighSet(c3, cmd, qHigh);
+
+	return status;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3ForwardSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(cmd, HWF_AND_LOW_LATENCY_AND_LOCK);
+
+	c3->sram.regs.actions &= ~ACT_FWD_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_FWD);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsC3PolicerSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int policerId, int bank)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+	BIT_RANGE_VALIDATE(bank);
+
+	c3->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	c3->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	c3->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+
+	if (bank)
+		c3->sram.regs.dup_attr |= ACT_DUP_POLICER_BANK_MASK;
+	else
+		c3->sram.regs.dup_attr &= ~ACT_DUP_POLICER_BANK_MASK;
+
+	return MV_OK;
+}
+#else
+int mvPp2ClsC3PolicerSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int policerId)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+
+	c3->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	c3->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	c3->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	c3->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+	return MV_OK;
+}
+#endif /*CONFIG_MV_ETH_PP2_1*/
+ /*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3FlowIdEn(MV_PP2_CLS_C3_ENTRY *c3, int flowid_en)
+{
+	PTR_VALIDATE(c3);
+
+	/*set Flow ID enable or disable*/
+	if (flowid_en)
+		c3->sram.regs.actions |= (1 << ACT_FLOW_ID_EN);
+	else
+		c3->sram.regs.actions &= ~(1 << ACT_FLOW_ID_EN);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.7) function changed , get also MTU index as parameter
+ */
+int mvPp2ClsC3ModSet(MV_PP2_CLS_C3_ENTRY *c3, int data_ptr, int instr_offs, int l4_csum)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(data_ptr, ACT_HWF_ATTR_DPTR_MAX);
+	POS_RANGE_VALIDATE(instr_offs, ACT_HWF_ATTR_IPTR_MAX);
+	POS_RANGE_VALIDATE(l4_csum, 1);
+
+	c3->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_DPTR_MASK;
+	c3->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_IPTR_MASK;
+	c3->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_CHKSM_EN_MASK;
+
+	c3->sram.regs.hwf_attr |= (data_ptr << ACT_HWF_ATTR_DPTR);
+	c3->sram.regs.hwf_attr |= (instr_offs << ACT_HWF_ATTR_IPTR);
+	c3->sram.regs.hwf_attr |= (l4_csum << ACT_HWF_ATTR_CHKSM_EN);
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.7) mtu - new field at action table
+*/
+
+int mvPp2ClsC3MtuSet(MV_PP2_CLS_C3_ENTRY *c3, int mtu_inx)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(mtu_inx, ACT_HWF_ATTR_MTU_INX_MAX);
+
+	c3->sram.regs.hwf_attr &= ~ACT_HWF_ATTR_MTU_INX_MASK;
+	c3->sram.regs.hwf_attr |= (mtu_inx << ACT_HWF_ATTR_MTU_INX);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3DupSet(MV_PP2_CLS_C3_ENTRY *c3, int dupid, int count)
+{
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(count, ACT_DUP_COUNT_MAX);
+	POS_RANGE_VALIDATE(dupid, ACT_DUP_FID_MAX);
+
+	/*set flowid and count*/
+	c3->sram.regs.dup_attr &= ~(ACT_DUP_FID_MASK | ACT_DUP_COUNT_MASK);
+	c3->sram.regs.dup_attr |= (dupid << ACT_DUP_FID);
+	c3->sram.regs.dup_attr |= (count << ACT_DUP_COUNT);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/* PPv2.1 (feature MAS 3.14) cls sequence */
+int mvPp2ClsC3SeqSet(MV_PP2_CLS_C3_ENTRY *c3, int id,  int bits_offs,  int bits)
+{
+	unsigned int low_bits, high_bits = 0;
+
+	PTR_VALIDATE(c3);
+	POS_RANGE_VALIDATE(bits, MV_PP2_CLS_SEQ_SIZE_MAX);
+	POS_RANGE_VALIDATE(id, (1 << bits) - 1);
+	POS_RANGE_VALIDATE(bits_offs + bits, MV_PP2_CLS3_ACT_SEQ_SIZE);
+
+	if (bits_offs >= DWORD_BITS_LEN)
+		high_bits = bits;
+
+	else if (bits_offs + bits > DWORD_BITS_LEN)
+		high_bits = (bits_offs + bits) % DWORD_BITS_LEN;
+
+	low_bits = bits - high_bits;
+
+	/*
+	high_bits hold the num of bits that we need to write in seq_h_attr
+	low_bits hold the num of bits that we need to write in seq_l_attr
+	*/
+
+	if (low_bits) {
+		/* mask and set new value in seq_l_attr*/
+		c3->sram.regs.seq_l_attr &= ~(((1 << low_bits) - 1)  << bits_offs);
+		c3->sram.regs.seq_l_attr |= (id  << bits_offs);
+	}
+
+	if (high_bits) {
+		int high_id = id >> low_bits;
+		int high_offs = (low_bits == 0) ? (bits_offs % DWORD_BITS_LEN) : 0;
+
+		/* mask and set new value in seq_h_attr*/
+		c3->sram.regs.seq_h_attr &= ~(((1 << high_bits) - 1)  << high_offs);
+		c3->sram.regs.seq_h_attr |= (high_id << high_offs);
+	}
+
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 Hit counters management	   	 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HitCntrsClear(int lkpType)
+{
+	/* clear all counters that entry lookup type corresponding to lkpType */
+	int iter = 0;
+
+	POS_RANGE_VALIDATE(lkpType, KEY_CTRL_LKP_TYPE_MAX);
+
+	mvPp2WrReg(MV_PP2_CLS3_CLEAR_COUNTERS_REG, lkpType);
+
+	/* wait to clear het counters done bit */
+	while (!mvPp2ClsC3HitCntrClearDone())
+		if (++iter >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS3_RETRIES_EXCEEDED;
+		}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HitCntrsClearAll(void)
+{
+	int iter = 0;
+/*
+  PPv2.1 (feature MAS 3.16)  CLEAR_COUNTERS size changed, clear all code changed from 0x1f to 0x3f
+*/
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2WrReg(MV_PP2_CLS3_CLEAR_COUNTERS_REG, MV_PP2_V1_CLS3_CLEAR_ALL);
+#else
+	mvPp2WrReg(MV_PP2_CLS3_CLEAR_COUNTERS_REG, MV_PP2_V0_CLS3_CLEAR_ALL);
+#endif
+	/* wait to clear het counters done bit */
+	while (!mvPp2ClsC3HitCntrClearDone())
+		if (++iter >= RETRIES_EXCEEDED) {
+			mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+			return MV_CLS3_RETRIES_EXCEEDED;
+		}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HitCntrsRead(int index, MV_U32 *cntr)
+{
+	unsigned int counter;
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX);
+
+	/*write entry index*/
+	mvPp2WrReg(MV_PP2_CLS3_DB_INDEX_REG, index);
+
+	/*counter read*/
+#ifdef CONFIG_MV_ETH_PP2_1
+	counter = mvPp2RdReg(MV_PP2_CLS3_HIT_COUNTER_REG) & MV_PP2_V1_CLS3_HIT_COUNTER_MASK;
+#else
+	counter = mvPp2RdReg(MV_PP2_CLS3_HIT_COUNTER_REG) & MV_PP2_V0_CLS3_HIT_COUNTER_MASK;
+#endif
+
+	if (!cntr)
+		mvOsPrintf("ADDR:0x%3.3x	COUNTER VAL:0x%6.6x\n", index, counter);
+	else
+		*cntr = counter;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HitCntrsMissRead(int lkp_type, MV_U32 *cntr)
+{
+	unsigned int counter;
+	int index;
+
+	POS_RANGE_VALIDATE(lkp_type, MV_PP2_CLS_C3_MISS_TBL_SIZE - 1);
+
+
+	/*set miss bit to 1, ppv2.1 mas 3.16*/
+	index = (lkp_type | MV_PP2_CLS3_DB_MISS_MASK);
+
+	/*write entry index*/
+	mvPp2WrReg(MV_PP2_CLS3_DB_INDEX_REG, index);
+
+	/*counter read*/
+	counter = mvPp2RdReg(MV_PP2_CLS3_HIT_COUNTER_REG) & MV_PP2_V1_CLS3_HIT_COUNTER_MASK;
+
+	if (!cntr)
+		mvOsPrintf("LKPT:0x%3.3x	COUNTER VAL:0x%6.6x\n", lkp_type, counter);
+	else
+		*cntr = counter;
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HitCntrsReadAll(void)
+{
+	unsigned int counter, index;
+
+	for (index = 0; index < MV_PP2_CLS_C3_HASH_TBL_SIZE; index++) {
+		mvPp2ClsC3HitCntrsRead(index, &counter);
+
+		/* skip initial counter value */
+		if (counter == 0)
+			continue;
+
+		mvOsPrintf("ADDR:0x%3.3x	COUNTER VAL:0x%6.6x\n", index, counter);
+	}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	for (index = 0; index < MV_PP2_CLS_C3_MISS_TBL_SIZE; index++) {
+		mvPp2ClsC3HitCntrsMissRead(index, &counter);
+
+		/* skip initial counter value */
+		if (counter == 0)
+			continue;
+
+		mvOsPrintf("LKPT:0x%3.3x	COUNTER VAL:0x%6.6x\n", index, counter);
+	}
+#endif
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*	 APIs for Classification C3 hit counters scan fields operation 		 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanStart()
+{
+	int complete, iter = 0;
+
+	/* trigger scan operation */
+	mvPp2WrReg(MV_PP2_CLS3_SC_ACT_REG, (1 << MV_PP2_CLS3_SC_ACT));
+
+	do {
+		complete = mvPp2ClsC3ScanIsComplete();
+
+	} while ((!complete) && ((iter++) < RETRIES_EXCEEDED));/*scan compleated*/
+
+	if (iter >= RETRIES_EXCEEDED) {
+		return MV_CLS3_RETRIES_EXCEEDED;
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanRegs()
+{
+	unsigned int prop, propVal;
+#ifdef CONFIG_MV_ETH_PP2_1
+	unsigned int treshHold;
+	treshHold = mvPp2RdReg(MV_PP2_CLS3_SC_TH_REG);
+#endif
+	prop = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_REG);
+	propVal = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_VAL_REG);
+
+
+	mvOsPrintf("%-32s: 0x%x = 0x%08x\n", "MV_PP2_CLS3_SC_PROP_REG", MV_PP2_CLS3_SC_PROP_REG, prop);
+	mvOsPrintf("%-32s: 0x%x = 0x%08x\n", "MV_PP2_CLS3_SC_PROP_VAL_REG", MV_PP2_CLS3_SC_PROP_VAL_REG, propVal);
+	mvOsPrintf("\n");
+
+	mvOsPrintf("MODE      = %s\n", ((MV_PP2_CLS3_SC_PROP_TH_MODE_MASK & prop) == 0) ? "Below" : "Above");
+	mvOsPrintf("CLEAR     = %s\n", ((MV_PP2_CLS3_SC_PROP_CLEAR_MASK & prop) == 0) ? "NoClear" : "Clear  ");
+
+	/* lookup type */
+	((MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN_MASK & prop) == 0) ?
+		mvOsPrintf("LKP_TYPE  = NA\n") :
+		mvOsPrintf("LKP_TYPE  = 0x%x\n", ((MV_PP2_CLS3_SC_PROP_LKP_TYPE_MASK & prop) >> MV_PP2_CLS3_SC_PROP_LKP_TYPE));
+
+	/* start index */
+	mvOsPrintf("START     = 0x%x\n", (MV_PP2_CLS3_SC_PROP_START_ENTRY_MASK & prop) >> MV_PP2_CLS3_SC_PROP_START_ENTRY);
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* threshold */
+	mvOsPrintf("THRESHOLD = 0x%x\n", (MV_PP2_CLS3_SC_TH_MASK & treshHold) >> MV_PP2_CLS3_SC_TH);
+
+	/* delay value */
+	mvOsPrintf("DELAY     = 0x%x\n\n",
+			(MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY_MASK & propVal) >> MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY);
+
+#else
+	/* threshold */
+	mvOsPrintf("THRESHOLD = 0x%x\n",
+			(MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MASK & propVal) >> MV_PP2_V0_CLS3_SC_PROP_VAL_TH);
+
+	/* delay value */
+	mvOsPrintf("DELAY     = 0x%x\n\n",
+			(MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY_MASK & propVal) >> MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY);
+#endif
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+/*mod = 0 below th . mode = 1 above threshold*/
+int mvPp2ClsC3ScanThreshSet(int mode, int thresh)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(mode, 1); /* one bit */
+#ifdef CONFIG_MV_ETH_PP2_1
+	POS_RANGE_VALIDATE(thresh, MV_PP2_CLS3_SC_TH_MAX);
+#else
+	POS_RANGE_VALIDATE(thresh, MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MAX);
+#endif
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_REG);
+	regVal &= ~MV_PP2_CLS3_SC_PROP_TH_MODE_MASK;
+	regVal |= (mode << MV_PP2_CLS3_SC_PROP_TH_MODE);
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_REG, regVal);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	regVal = mvPp2RdReg(MV_PP2_CLS3_SC_TH_REG);
+	regVal &= ~MV_PP2_CLS3_SC_TH_MASK;
+	regVal |= (thresh << MV_PP2_CLS3_SC_TH);
+	mvPp2WrReg(MV_PP2_CLS3_SC_TH_REG, regVal);
+#else
+	regVal = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_VAL_REG);
+	regVal &= ~MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MASK;
+	regVal |= (thresh << MV_PP2_V0_CLS3_SC_PROP_VAL_TH);
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_VAL_REG, regVal);
+#endif
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanLkpTypeSet(int type)
+{
+	unsigned int prop;
+
+	RANGE_VALIDATE(type, -1, MV_PP2_CLS3_SC_PROP_LKP_TYPE_MAX);
+	prop = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_REG);
+
+	if (type == -1)
+		/* scan all entries */
+		prop &= ~(1 << MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN);
+	else {
+		/* scan according to lookup type */
+		prop |= (1 << MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN);
+		prop &= ~MV_PP2_CLS3_SC_PROP_LKP_TYPE_MASK;
+		prop |= (type << MV_PP2_CLS3_SC_PROP_LKP_TYPE);
+	}
+
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_REG, prop);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanClearBeforeEnSet(int en)
+{
+	unsigned int prop;
+
+	POS_RANGE_VALIDATE(en, 1); /* one bit */
+
+	prop = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_REG);
+
+	prop &= ~MV_PP2_CLS3_SC_PROP_CLEAR_MASK;
+	prop |= (en << MV_PP2_CLS3_SC_PROP_CLEAR);
+
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_REG, prop);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3ScanStartIndexSet(int idx)
+{
+	unsigned int prop;
+
+	POS_RANGE_VALIDATE(idx, MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX); /* one bit */
+
+	prop = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_REG);
+
+	prop &= ~MV_PP2_CLS3_SC_PROP_START_ENTRY_MASK;
+	prop |= (idx << MV_PP2_CLS3_SC_PROP_START_ENTRY);
+
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_REG, prop);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3ScanDelaySet(int time)
+{
+	unsigned int propVal;
+
+	POS_RANGE_VALIDATE(time, MV_PP2_CLS3_SC_PROP_VAL_DELAY_MAX);
+
+	propVal = mvPp2RdReg(MV_PP2_CLS3_SC_PROP_VAL_REG);
+#ifdef CONFIG_MV_ETH_PP2_1
+	propVal &= ~MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY_MASK;
+	propVal |= (time << MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY);
+#else
+	propVal &= ~MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY_MASK;
+	propVal |= (time << MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY);
+#endif
+	mvPp2WrReg(MV_PP2_CLS3_SC_PROP_VAL_REG, propVal);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanResRead(int index, int *addr, int *cnt)
+{
+	unsigned int regVal, scState, addres, counter;
+	int iter = 0;
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_C3_SC_RES_TBL_SIZE-1);
+
+	do {
+		mvPp2ClsC3ScanStateGet(&scState);
+	} while (scState != 0 && ((iter++) < RETRIES_EXCEEDED));/*scan compleated*/
+
+	if (iter >= RETRIES_EXCEEDED) {
+		mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+		return MV_CLS3_RETRIES_EXCEEDED;
+	}
+
+	/*write index*/
+	mvPp2WrReg(MV_PP2_CLS3_SC_INDEX_REG, index);
+
+	/*read date*/
+	regVal = mvPp2RdReg(MV_PP2_CLS3_SC_RES_REG);
+	addres = (regVal & MV_PP2_CLS3_SC_RES_ENTRY_MASK) >> MV_PP2_CLS3_SC_RES_ENTRY;
+#ifdef CONFIG_MV_ETH_PP2_1
+	counter = (regVal & MV_PP2_V1_CLS3_SC_RES_CTR_MASK) >> MV_PP2_V1_CLS3_SC_RES_CTR;
+#else
+	counter = (regVal & MV_PP2_V0_CLS3_SC_RES_CTR_MASK) >> MV_PP2_V0_CLS3_SC_RES_CTR;
+#endif
+	/* if one of parameters is null - func call from sysfs*/
+	if ((!addr) | (!cnt))
+		mvOsPrintf("INDEX:0x%2.2x	ADDR:0x%3.3x	COUNTER VAL:0x%6.6x\n", index, addres, counter);
+	else {
+		*addr = addres;
+		*cnt = counter;
+	}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanResDump()
+{
+	int addr, cnt, resNum, index;
+
+	mvPp2ClsC3ScanNumOfResGet(&resNum);
+
+	mvOsPrintf("INDEX	ADDRESS		COUNTER\n");
+	for (index = 0; index < resNum; index++) {
+		mvPp2ClsC3ScanResRead(index, &addr, &cnt);
+		mvOsPrintf("[0x%2.2x]\t[0x%3.3x]\t[0x%6.6x]\n", index, addr, cnt);
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ScanNumOfResGet(int *resNum)
+{
+	unsigned int regVal, scState;
+	int iter = 0;
+
+	do {
+		mvPp2ClsC3ScanStateGet(&scState);
+	} while (scState != 0 && ((iter++) < RETRIES_EXCEEDED));/*scan compleated*/
+
+	if (iter >= RETRIES_EXCEEDED) {
+		mvOsPrintf("%s:Error - retries exceeded.\n", __func__);
+		return MV_CLS3_RETRIES_EXCEEDED;
+	}
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_STATE_REG);
+	regVal &= MV_PP2_CLS3_STATE_NO_OF_SC_RES_MASK;
+	regVal >>= MV_PP2_CLS3_STATE_NO_OF_SC_RES;
+	*resNum = regVal;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------
+
+int mvPp2ClsC3ScanTimerGet(int *timer)
+{
+	unsigned int regVal;
+
+	if (timer == NULL) {
+		mvOsPrintf("mvCls3Hw %s: null pointer.\n", __func__);
+		return MV_CLS3_ERR;
+	}
+
+	regVal = mvPp2RdReg(MV_PP2_CLS3_SC_TIMER_REG);
+	regVal &= MV_PP2_CLS3_SC_TIMER_MASK;
+	regVal >>= MV_PP2_CLS3_SC_TIMER;
+	*timer = regVal;
+	return MV_OK;
+}
+-------------------------------------------------------------------------------------*/
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.h
new file mode 100644
index 000000000000..332afd3ff2da
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls3Hw.h
@@ -0,0 +1,523 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS3_HW_H__
+#define __MV_CLS3_HW_H__
+
+#include "mvPp2ClsActHw.h"
+#include "mvPp2ClsHw.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C3 Top Registers	    			 */
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_KEY_CTRL_REG		(MV_PP2_REG_BASE + 0x1C10)
+#define KEY_CTRL_L4				0
+#define KEY_CTRL_L4_BITS			3
+#define KEY_CTRL_L4_MAX				((1 << KEY_CTRL_L4_BITS) - 1)
+#define KEY_CTRL_L4_MASK			(((1 << KEY_CTRL_L4_BITS) - 1) << KEY_CTRL_L4)
+
+/*
+  PPv2.1 (feature MAS 3.16) LKP_TYPE size and offset changed
+*/
+#ifdef CONFIG_MV_ETH_PP2_1
+#define KEY_CTRL_LKP_TYPE			4
+#define KEY_CTRL_LKP_TYPE_BITS			6
+#else
+#define KEY_CTRL_LKP_TYPE			8
+#define KEY_CTRL_LKP_TYPE_BITS			4
+#endif
+
+#define KEY_CTRL_LKP_TYPE_MAX			((1 << KEY_CTRL_LKP_TYPE_BITS) - 1)
+#define KEY_CTRL_LKP_TYPE_MASK			(((1 << KEY_CTRL_LKP_TYPE_BITS) - 1) << KEY_CTRL_LKP_TYPE)
+
+
+#define KEY_CTRL_PRT_ID_TYPE			12
+#define KEY_CTRL_PRT_ID_TYPE_BITS		2
+#define KEY_CTRL_PRT_ID_TYPE_MAX		((1 << KEY_CTRL_PRT_ID_TYPE_BITS) - 1)
+#define KEY_CTRL_PRT_ID_TYPE_MASK		((KEY_CTRL_PRT_ID_TYPE_MAX) << KEY_CTRL_PRT_ID_TYPE)
+
+#define KEY_CTRL_PRT_ID				16
+#define KEY_CTRL_PRT_ID_BITS			8
+#define KEY_CTRL_PRT_ID_MAX			((1 << KEY_CTRL_PRT_ID_BITS) - 1)
+#define KEY_CTRL_PRT_ID_MASK			(((1 << KEY_CTRL_PRT_ID_BITS) - 1) << KEY_CTRL_PRT_ID)
+
+#define KEY_CTRL_HEK_SIZE			24
+#define KEY_CTRL_HEK_SIZE_BITS			6
+#define KEY_CTRL_HEK_SIZE_MAX			36
+#define KEY_CTRL_HEK_SIZE_MASK			(((1 << KEY_CTRL_HEK_SIZE_BITS) - 1) << KEY_CTRL_HEK_SIZE)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_KEY_HEK_REG(reg_num)	(MV_PP2_REG_BASE + 0x1C34 - 4*(reg_num))
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_QRY_ACT_REG			(MV_PP2_REG_BASE + 0x1C40)
+#define MV_PP2_CLS3_QRY_ACT			0
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_QRY_RES_HASH_REG(hash)	(MV_PP2_REG_BASE + 0x1C50 + 4*(hash))
+#define MV_PP2_CLS3_HASH_BANKS_NUM		8
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_INIT_HIT_CNT_REG	(MV_PP2_REG_BASE + 0x1C80)
+#define MV_PP2_CLS3_INIT_HIT_CNT_OFFS	6
+#define MV_PP2_CLS3_INIT_HIT_CNT_BITS	18
+#define MV_PP2_CLS3_INIT_HIT_CNT_MASK	(((1 << MV_PP2_CLS3_INIT_HIT_CNT_BITS) - 1) << MV_PP2_CLS3_INIT_HIT_CNT_OFFS)
+#define MV_PP2_CLS3_INIT_HIT_CNT_MAX	((1 << MV_PP2_CLS3_INIT_HIT_CNT_BITS) - 1)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_HASH_OP_REG			(MV_PP2_REG_BASE + 0x1C84)
+
+#define MV_PP2_CLS3_HASH_OP_TBL_ADDR		0
+#define MV_PP2_CLS3_HASH_OP_TBL_ADDR_BITS	12
+#define MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX	((1 << MV_PP2_CLS3_HASH_OP_TBL_ADDR_BITS) - 1)
+#define MV_PP2_CLS3_HASH_OP_TBL_ADDR_MASK	((MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MV_PP2_CLS3_HASH_OP_TBL_ADDR)
+
+/*PPv2.1 (feature MAS 3.16) MISS_PTR is new field (one bit) at HASH_OP_REG */
+#define MV_PP2_CLS3_MISS_PTR			12
+#define MV_PP2_CLS3_MISS_PTR_MASK		(1 << MV_PP2_CLS3_MISS_PTR)
+
+#define MV_PP2_CLS3_HASH_OP_DEL			14
+#define MV_PP2_CLS3_HASH_OP_ADD			15
+
+#define MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR	16
+#define MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_BITS	8
+#define MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_MAX	((1 << MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_BITS) - 1)
+#define MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_MASK	\
+		((MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR_MAX) << MV_PP2_CLS3_HASH_OP_EXT_TBL_ADDR)
+
+/*PPv2.1 (feature MAS 3.16) INIT_CNT_VAL field removed*/
+#define MV_PP2_CLS3_HASH_OP_INIT_CTR_VAL	24
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_STATE_REG			(MV_PP2_REG_BASE + 0x1C8C)
+#define MV_PP2_CLS3_STATE_CPU_DONE		0
+#define MV_PP2_CLS3_STATE_CPU_DONE_MASK		(1 << MV_PP2_CLS3_STATE_CPU_DONE)
+
+#define MV_PP2_CLS3_STATE_CLEAR_CTR_DONE	1
+#define MV_PP2_CLS3_STATE_CLEAR_CTR_DONE_MASK	(1 << MV_PP2_CLS3_STATE_CLEAR_CTR_DONE)
+
+#define MV_PP2_CLS3_STATE_SC_DONE		2
+#define MV_PP2_CLS3_STATE_SC_DONE_MASK		(1 << MV_PP2_CLS3_STATE_SC_DONE)
+
+#define MV_PP2_CLS3_STATE_OCCIPIED		8
+#define MV_PP2_CLS3_STATE_OCCIPIED_BITS		8
+#define MV_PP2_CLS3_STATE_OCCIPIED_MASK		\
+		(((1 << MV_PP2_CLS3_STATE_OCCIPIED_BITS) - 1) << MV_PP2_CLS3_STATE_OCCIPIED)
+
+#define MV_PP2_CLS3_STATE_SC_STATE		16
+#define MV_PP2_CLS3_STATE_SC_STATE_BITS		2
+#define MV_PP2_CLS3_STATE_SC_STATE_MASK		\
+		(((1 << MV_PP2_CLS3_STATE_SC_STATE_BITS) - 1) << MV_PP2_CLS3_STATE_SC_STATE)
+/*
+SCAN STATUS
+0 - scan compleat
+1 -	hit counter clear
+3 - scan wait
+4 - scan in progress
+*/
+
+#define MV_PP2_CLS3_STATE_NO_OF_SC_RES		20
+#define MV_PP2_CLS3_STATE_NO_OF_SC_RES_BITS	9
+#define MV_PP2_CLS3_STATE_NO_OF_SC_RES_MASK	\
+		(((1 << MV_PP2_CLS3_STATE_NO_OF_SC_RES_BITS) - 1) << MV_PP2_CLS3_STATE_NO_OF_SC_RES)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_DB_INDEX_REG		(MV_PP2_REG_BASE + 0x1C90)
+#define MV_PP2_CLS3_DB_MISS_OFFS		12
+#define MV_PP2_CLS3_DB_MISS_MASK		(1 << MV_PP2_CLS3_DB_MISS_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_HASH_DATA_REG(num)		(MV_PP2_REG_BASE + 0x1CA0 + 4*(num)) /* 0-3 valid val*/
+#define MV_PP2_CLS3_HASH_DATA_REG_NUM		4
+#define MV_PP2_CLS3_HASH_EXT_DATA_REG(num)	(MV_PP2_REG_BASE + 0x1CC0 + 4*(num))
+#define MV_PP2_CLS3_HASH_EXT_DATA_REG_NUM	7
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_CLEAR_COUNTERS_REG		(MV_PP2_REG_BASE + 0x1D00)
+#define MV_PP2_CLS3_CLEAR_COUNTERS		0
+/*
+  PPv2.1 (feature MAS 3.16)  CLEAR_COUNTERS size changed, clear all code changed from 0x1f to 0x3f
+*/
+#define MV_PP2_V1_CLS3_CLEAR_COUNTERS_BITS	7
+#define MV_PP2_V1_CLS3_CLEAR_ALL		0x3f
+#define MV_PP2_V1_CLS3_CLEAR_COUNTERS_MAX	0x3F
+#define MV_PP2_V1_CLS3_CLEAR_COUNTERS_MASK	((MV_PP2_V1_CLS3_CLEAR_COUNTERS_MAX) << MV_PP2_V1_CLS3_CLEAR_COUNTERS)
+
+#define MV_PP2_V0_CLS3_CLEAR_COUNTERS_BITS	5
+#define MV_PP2_V0_CLS3_CLEAR_ALL		0x1f
+#define MV_PP2_V0_CLS3_CLEAR_COUNTERS_MAX	0x1F
+#define MV_PP2_V0_CLS3_CLEAR_COUNTERS_MASK	((MV_PP2_V0_CLS3_CLEAR_COUNTERS_MAX)  << MV_PP2_V0_CLS3_CLEAR_COUNTERS)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_HIT_COUNTER_REG		(MV_PP2_REG_BASE + 0x1D08)
+#define MV_PP2_CLS3_HIT_COUNTER			0
+/*ppv2.1 his counter field size changed from 14 bits to 24 bits*/
+#define MV_PP2_V0_CLS3_HIT_COUNTER_BITS		14
+#define MV_PP2_V0_CLS3_HIT_COUNTER_MAX		((1 << MV_PP2_V0_CLS3_HIT_COUNTER_BITS) - 1)
+#define MV_PP2_V0_CLS3_HIT_COUNTER_MASK		((MV_PP2_V0_CLS3_HIT_COUNTER_MAX) << MV_PP2_CLS3_HIT_COUNTER)
+
+#define MV_PP2_V1_CLS3_HIT_COUNTER_BITS		24
+#define MV_PP2_V1_CLS3_HIT_COUNTER_MAX		((1 << MV_PP2_V1_CLS3_HIT_COUNTER_BITS) - 1)
+#define MV_PP2_V1_CLS3_HIT_COUNTER_MASK		((MV_PP2_V1_CLS3_HIT_COUNTER_MAX) << MV_PP2_CLS3_HIT_COUNTER)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_SC_PROP_REG			(MV_PP2_REG_BASE + 0x1D10)
+
+#define MV_PP2_CLS3_SC_PROP_TH_MODE		0
+#define MV_PP2_CLS3_SC_PROP_TH_MODE_MASK	(1 << MV_PP2_CLS3_SC_PROP_TH_MODE)
+
+#define MV_PP2_CLS3_SC_PROP_CLEAR		1
+#define MV_PP2_CLS3_SC_PROP_CLEAR_MASK		(1 << MV_PP2_CLS3_SC_PROP_CLEAR)
+
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN		3
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN_MASK	(1 << MV_PP2_CLS3_SC_PROP_LKP_TYPE_EN)
+
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE		4
+/*
+  PPv2.1 (feature MAS 3.16) LKP_TYPE size and offset changed
+*/
+
+#ifdef CONFIG_MV_ETH_PP2_1
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_BITS	6
+#else
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_BITS	4
+#endif
+
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_MAX	((1 << MV_PP2_CLS3_SC_PROP_LKP_TYPE_BITS) - 1)
+#define MV_PP2_CLS3_SC_PROP_LKP_TYPE_MASK	((MV_PP2_CLS3_SC_PROP_LKP_TYPE_MAX) << MV_PP2_CLS3_SC_PROP_LKP_TYPE)
+
+#define MV_PP2_CLS3_SC_PROP_START_ENTRY		16
+#define MV_PP2_CLS3_SC_PROP_START_ENTRY_MASK	((MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MV_PP2_CLS3_SC_PROP_START_ENTRY)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_SC_PROP_VAL_REG		(MV_PP2_REG_BASE + 0x1D14)
+
+/* ppv2.1 field removed from this reg */
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_TH		0
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_TH_BITS	13
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MAX	((1 << MV_PP2_V0_CLS3_SC_PROP_VAL_TH_BITS) - 1)
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MASK	((MV_PP2_V0_CLS3_SC_PROP_VAL_TH_MAX) << MV_PP2_V0_CLS3_SC_PROP_VAL_TH)
+
+/* ppv2.1 field offsett changed */
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY	16
+#define MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY	0
+#define MV_PP2_CLS3_SC_PROP_VAL_DELAY_BITS	16
+#define MV_PP2_CLS3_SC_PROP_VAL_DELAY_MAX	((1 << MV_PP2_CLS3_SC_PROP_VAL_DELAY_BITS) - 1)
+#define MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY_MASK	(MV_PP2_CLS3_SC_PROP_VAL_DELAY_MAX << MV_PP2_V0_CLS3_SC_PROP_VAL_DELAY)
+#define MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY_MASK	(MV_PP2_CLS3_SC_PROP_VAL_DELAY_MAX << MV_PP2_V1_CLS3_SC_PROP_VAL_DELAY)
+
+
+/*-------------------------------------------------------------------------------*/
+/* PPv2.1 new reg in cls3 */
+#define MV_PP2_CLS3_SC_TH_REG			(MV_PP2_REG_BASE + 0x1D18)
+#define MV_PP2_CLS3_SC_TH			4
+#define MV_PP2_CLS3_SC_TH_BITS			20
+#define MV_PP2_CLS3_SC_TH_MAX			((1 << MV_PP2_CLS3_SC_TH_BITS) - 1)
+#define MV_PP2_CLS3_SC_TH_MASK			(((1 << MV_PP2_CLS3_SC_TH_BITS) - 1) << MV_PP2_CLS3_SC_TH)
+
+
+
+/*-------------------------------------------------------------------------------*/
+/* ppv2.1 TIMER REG ADDRESS changed */
+#define MV_PP2_V0_CLS3_SC_TIMER_REG		(MV_PP2_REG_BASE + 0x1D18)
+#define MV_PP2_V1_CLS3_SC_TIMER_REG		(MV_PP2_REG_BASE + 0x1D1c)
+
+#define MV_PP2_CLS3_SC_TIMER			0
+#define MV_PP2_CLS3_SC_TIMER_BITS		16
+#define MV_PP2_CLS3_SC_TIMER_MASK		(((1 << MV_PP2_CLS3_SC_TIMER_BITS) - 1) << MV_PP2_CLS3_SC_TIMER)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_SC_ACT_REG			(MV_PP2_REG_BASE + 0x1D20)
+#define MV_PP2_CLS3_SC_ACT			0
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_SC_INDEX_REG		(MV_PP2_REG_BASE + 0x1D28)
+#define MV_PP2_CLS3_SC_INDEX			0
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_SC_RES_REG			(MV_PP2_REG_BASE + 0x1D2C)
+#define MV_PP2_CLS3_SC_RES_ENTRY		0
+#define MV_PP2_CLS3_SC_RES_ENTRY_MASK		((MV_PP2_CLS3_HASH_OP_TBL_ADDR_MAX) << MV_PP2_CLS3_SC_RES_ENTRY)
+
+/*ppv2.1 field offset and size changed */
+#define MV_PP2_V0_CLS3_SC_RES_CTR		16
+#define MV_PP2_V0_CLS3_SC_RES_CTR_MASK		((MV_PP2_V0_CLS3_HIT_COUNTER_MAX) << MV_PP2_V0_CLS3_SC_RES_CTR)
+#define MV_PP2_V1_CLS3_SC_RES_CTR		12
+#define MV_PP2_V1_CLS3_SC_RES_CTR_MASK		((MV_PP2_V1_CLS3_HIT_COUNTER_MAX) << MV_PP2_V1_CLS3_SC_RES_CTR)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_ACT_REG			(MV_PP2_REG_BASE + 0x1D40)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_ACT_QOS_ATTR_REG		(MV_PP2_REG_BASE + 0x1D44)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_ACT_HWF_ATTR_REG		(MV_PP2_REG_BASE + 0x1D48)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS3_ACT_DUP_ATTR_REG		(MV_PP2_REG_BASE + 0x1D4C)
+/*-------------------------------------------------------------------------------*/
+/*ppv2.1: 0x1D50 0x1D54 are new registers, additional fields for action table*/
+#define MV_PP2_CLS3_ACT_SEQ_L_ATTR_REG		(MV_PP2_REG_BASE + 0x1D50)
+#define MV_PP2_CLS3_ACT_SEQ_H_ATTR_REG		(MV_PP2_REG_BASE + 0x1D54)
+#define MV_PP2_CLS3_ACT_SEQ_SIZE		38
+/*-------------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C3 offsets in hash table		    		 */
+/*-------------------------------------------------------------------------------*/
+/* PPv2.1 (feature MAS 3.16) LKP_TYPE size and offset changed */
+#ifdef CONFIG_MV_ETH_PP2_1
+
+#define KEY_OCCUPIED				116
+#define KEY_FORMAT				115
+#define KEY_PTR_EXT				107
+
+#define KEY_PRT_ID(ext_mode)			((ext_mode == 1) ? (99) : (107))
+#define KEY_PRT_ID_MASK(ext_mode)		(((1 << KEY_CTRL_PRT_ID_BITS) - 1) << (KEY_PRT_ID(ext_mode) % 32))
+
+#define KEY_PRT_ID_TYPE(ext_mode)		((ext_mode == 1) ? (97) : (105))
+#define KEY_PRT_ID_TYPE_MASK(ext_mode)		((KEY_CTRL_PRT_ID_TYPE_MAX) << (KEY_PRT_ID_TYPE(ext_mode) % 32))
+
+#else
+
+#define KEY_OCCUPIED				114
+#define KEY_FORMAT				113
+#define KEY_PTR_EXT				105
+
+#define KEY_PRT_ID(ext_mode)			((ext_mode == 1) ? (97) : (105))
+#define KEY_PRT_ID_MASK(ext_mode)		(((1 << KEY_CTRL_PRT_ID_BITS) - 1) << (KEY_PRT_ID(ext_mode) % 32))
+
+#define KEY_PRT_ID_TYPE(ext_mode)		((ext_mode == 1) ? (95) : (103))
+#define KEY_PRT_ID_TYPE_MASK(ext_mode)		((KEY_CTRL_PRT_ID_TYPE_MAX) << (KEY_PRT_ID_TYPE(ext_mode) % 32))
+
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+#define KEY_LKP_TYPE(ext_mode)			((ext_mode == 1) ? (91) : (99))
+#define KEY_LKP_TYPE_MASK(ext_mode)		(((1 << KEY_CTRL_LKP_TYPE_BITS) - 1) << (KEY_LKP_TYPE(ext_mode) % 32))
+
+#define KEY_L4_INFO(ext_mode)			((ext_mode == 1) ? (88) : (96))
+#define KEY_L4_INFO_MASK(ext_mode)		(((1 << KEY_CTRL_L4_BITS) - 1) << (KEY_L4_INFO(ext_mode) % 32))
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier C3 engine Key public APIs		    		 */
+/*-------------------------------------------------------------------------------*/
+
+typedef struct {
+	/* valid if size > 0 */
+	/* size include the extension*/
+	int	ext_ptr;
+	int	size;
+} CLS3_SHADOW_HASH_ENTRY;
+
+#define HEK_EXT_FMT				"%8.8x %8.8x %8.8x | %8.8x %8.8x %8.8x %8.8x %8.8x %8.8x"
+#define HEK_EXT_VAL(p)				p[8], p[7], p[6], p[5], p[4], p[3], p[2], p[1], p[0]
+
+#define HEK_FMT					"%8.8x %8.8x %8.8x"
+#define HEK_VAL(p)				p[8], p[7], p[6]
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C3 engine Public APIs	 		 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_C3_HASH_TBL_SIZE			(4096)
+#define MV_PP2_CLS_C3_MISS_TBL_SIZE			(64)
+#define MV_PP2_CLS_C3_EXT_HEK_WORDS			(9)
+#define MV_PP2_CLS_C3_SRAM_WORDS			(5)
+#define MV_PP2_CLS_C3_EXT_TBL_SIZE			(256)
+#define MV_PP2_CLS_C3_HEK_WORDS				(3)
+#define MV_PP2_CLS_C3_HEK_BYTES				12 /* size in bytes */
+#define MV_PP2_CLS_C3_BANK_SIZE				(512)
+#define MV_PP2_CLS_C3_MAX_SEARCH_DEPTH			(16)
+
+typedef struct mvPp2Cls3HashPair {
+	unsigned short	pair_num;
+	unsigned short	old_idx[MV_PP2_CLS_C3_MAX_SEARCH_DEPTH];
+	unsigned short	new_idx[MV_PP2_CLS_C3_MAX_SEARCH_DEPTH];
+} MV_PP2_CLS3_HASH_PAIR;
+
+typedef struct mvPp2ClsC3Entry {
+	unsigned int 	index;
+	unsigned int 	ext_index;
+
+	struct {
+		union {
+			MV_U32	words[MV_PP2_CLS_C3_EXT_HEK_WORDS];
+			MV_U8	bytes[MV_PP2_CLS_C3_EXT_HEK_WORDS * 4];
+		} hek;
+		MV_U32		key_ctrl;/*0x1C10*/
+	} key;
+	union {
+		MV_U32 words[MV_PP2_CLS_C3_SRAM_WORDS];
+		struct {
+			MV_U32 actions;/*0x1D40*/
+			MV_U32 qos_attr;/*0x1D44*/
+			MV_U32 hwf_attr;/*0x1D48*/
+			MV_U32 dup_attr;/*0x1D4C*/
+			/*ppv2.1: 0x1D50 0x1D54 are new registers, additional fields for action table*/
+			MV_U32 seq_l_attr;/*0x1D50*/
+			MV_U32 seq_h_attr;/*0x1D54*/
+		} regs;
+	} sram;
+} MV_PP2_CLS_C3_ENTRY;
+
+
+/*-------------------------------------------------------------------------------*/
+/*			Common utilities				   	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3Init(void);
+void mvPp2ClsC3ShadowInit(void);
+int mvPp2ClsC3ShadowFreeGet(void);
+int mvPp2ClsC3ShadowExtFreeGet(void);
+void mvPp2C3ShadowClear(int index);
+
+/*-------------------------------------------------------------------------------*/
+/*			APIs for Classification C3 engine		   	 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC3HwRead(MV_PP2_CLS_C3_ENTRY *c3, int index);
+int mvPp2ClsC3HwAdd(MV_PP2_CLS_C3_ENTRY *c3, int index, int ext_index);
+int mvPp2ClsC3HwMissAdd(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type);
+int mvPp2ClsC3HwDump(void);
+int mvPp2ClsC3HwMissDump(void);
+int mvPp2ClsC3HwExtDump(void);
+int mvPp2ClsC3HwDel(int index);
+int mvPp2ClsC3HwDelAll(void);
+int mvPp2ClsC3SwDump(MV_PP2_CLS_C3_ENTRY *c3);
+void mvPp2ClsC3SwClear(MV_PP2_CLS_C3_ENTRY *c3);
+void mvPp2ClsC3HwInitCtrSet(int cntVal);
+int mvPp2ClsC3HwQuery(MV_PP2_CLS_C3_ENTRY *c3, unsigned char *occupied_bmp, int index[]);
+int mvPp2ClsC3HwQueryAdd(MV_PP2_CLS_C3_ENTRY *c3, int max_search_depth, MV_PP2_CLS3_HASH_PAIR *hash_pair_arr);
+
+int mvPp2ClsC3HwMissRead(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type);
+int mvPp2ClsC3HwMissDump(void);
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 key fields			   	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3SwL4infoSet(MV_PP2_CLS_C3_ENTRY *c3, int l4info);
+int mvPp2ClsC3SwLkpTypeSet(MV_PP2_CLS_C3_ENTRY *c3, int lkp_type);
+int mvPp2ClsC3SwPortIDSet(MV_PP2_CLS_C3_ENTRY *c3, int type, int portid);
+int mvPp2ClsC3SwHekSizeSet(MV_PP2_CLS_C3_ENTRY *c3, int hek_size);
+int mvPp2ClsC3SwHekByteSet(MV_PP2_CLS_C3_ENTRY *c3, unsigned int offs, unsigned char byte);
+int mvPp2ClsC3SwHekWordSet(MV_PP2_CLS_C3_ENTRY *c3, unsigned int offs, unsigned int word);
+
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 action table fields		   	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3ColorSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd);
+int mvPp2ClsC3QueueHighSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int q);
+int mvPp2ClsC3QueueLowSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int q);
+int mvPp2ClsC3QueueSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int queue);
+int mvPp2ClsC3ForwardSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd);
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsC3PolicerSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int policerId, int bank);
+#else
+int mvPp2ClsC3PolicerSet(MV_PP2_CLS_C3_ENTRY *c3, int cmd, int policerId);
+#endif
+int mvPp2ClsC3FlowIdEn(MV_PP2_CLS_C3_ENTRY *c3, int flowid_en);
+
+/* PPv2.1 (feature MAS 3.7) mtu - new field at action table */
+int mvPp2ClsC3MtuSet(MV_PP2_CLS_C3_ENTRY *c3, int mtu_inx);
+int mvPp2ClsC3ModSet(MV_PP2_CLS_C3_ENTRY *c3, int data_ptr, int instr_offs, int l4_csum);
+int mvPp2ClsC3DupSet(MV_PP2_CLS_C3_ENTRY *c3, int dupid, int count);
+
+/* PPv2.1 (feature MAS 3.14) cls sequence */
+int mvPp2ClsC3SeqSet(MV_PP2_CLS_C3_ENTRY *c3, int id,  int bits_offs,  int bits);
+
+/*-------------------------------------------------------------------------------*/
+/*		APIs for Classification C3 Hit counters management	   	 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC3HitCntrsRead(int index, MV_U32 *cntr);
+int mvPp2ClsC3HitCntrsClearAll(void);
+int mvPp2ClsC3HitCntrsReadAll(void);
+int mvPp2ClsC3HitCntrsClear(int lkpType);
+int mvPp2ClsC3HitCntrsMissRead(int lkp_type, MV_U32 *cntr);
+
+
+/*-------------------------------------------------------------------------------*/
+/*	 APIs for Classification C3 hit counters scan fields operation 		 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_C3_SC_RES_TBL_SIZE			(256)
+
+int mvPp2ClsC3ScanStart(void);
+int mvPp2ClsC3ScanRegs(void);
+int mvPp2ClsC3ScanThreshSet(int mode, int thresh);
+int mvPp2ClsC3ScanClearBeforeEnSet(int en);
+int mvPp2ClsC3ScanLkpTypeSet(int type);
+int mvPp2ClsC3ScanStartIndexSet(int idx);
+int mvPp2ClsC3ScanDelaySet(int time);
+int mvPp2ClsC3ScanResRead(int index, int *addr, int *cnt);
+int mvPp2ClsC3ScanNumOfResGet(int *resNum);
+int mvPp2ClsC3ScanResDump(void);
+
+
+
+#endif /* __MV_CLS3_HW_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.c
new file mode 100644
index 000000000000..c986de089dda
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.c
@@ -0,0 +1,721 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2Cls4Hw.h"
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine Public APIs			 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC4HwPortToRulesSet(int port, int set, int rules)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS-1);
+	POS_RANGE_VALIDATE(set, MV_PP2_CLS_C4_GRPS_NUM-1);
+	RANGE_VALIDATE(rules, 1, MV_PP2_CLS_C4_GRP_SIZE);
+
+	regVal = (set << MV_PP2_CLS4_PHY_TO_RL_GRP) | (rules << MV_PP2_CLS4_PHY_TO_RL_RULE_NUM);
+	mvPp2WrReg(MV_PP2_CLS4_PHY_TO_RL_REG(port), regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwUniToRulesSet(int uni, int set, int rules)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(uni, UNI_MAX);
+	POS_RANGE_VALIDATE(set, MV_PP2_CLS_C4_GRPS_NUM-1);
+	RANGE_VALIDATE(rules, 1, MV_PP2_CLS_C4_GRP_SIZE);
+
+	regVal = (set << MV_PP2_CLS4_PHY_TO_RL_GRP) | (rules << MV_PP2_CLS4_PHY_TO_RL_RULE_NUM);
+	mvPp2WrReg(MV_PP2_CLS4_UNI_TO_RL_REG(uni), regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwPortToRulesGet(int port, int *set, int *rules)
+{
+	unsigned int regVal;
+
+	PTR_VALIDATE(set);
+	PTR_VALIDATE(rules);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS4_PHY_TO_RL_REG(port));
+
+	*rules = (regVal & MV_PP2_CLS4_PHY_TO_RL_RULE_NUM_MASK) >> MV_PP2_CLS4_PHY_TO_RL_RULE_NUM;
+	*set = (regVal & MV_PP2_CLS4_PHY_TO_RL_GRP_MASK) >> MV_PP2_CLS4_PHY_TO_RL_GRP;
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwUniToRulesGet(int uni, int *set, int *rules)
+{
+	unsigned int regVal;
+
+	PTR_VALIDATE(set);
+	PTR_VALIDATE(rules);
+
+
+	regVal = mvPp2RdReg(MV_PP2_CLS4_UNI_TO_RL_REG(uni));
+
+	*rules = (regVal & MV_PP2_CLS4_PHY_TO_RL_RULE_NUM_MASK) >> MV_PP2_CLS4_PHY_TO_RL_RULE_NUM;
+	*set = (regVal & MV_PP2_CLS4_PHY_TO_RL_GRP_MASK) >> MV_PP2_CLS4_PHY_TO_RL_GRP;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwRead(MV_PP2_CLS_C4_ENTRY *C4, int rule, int set)
+{
+	unsigned int regVal = 0;
+	int regInd;
+
+	PTR_VALIDATE(C4);
+
+	POS_RANGE_VALIDATE(rule, (MV_PP2_CLS_C4_GRP_SIZE-1));
+	POS_RANGE_VALIDATE(set, (MV_PP2_CLS_C4_GRPS_NUM-1));
+
+	/* write index reg */
+	regVal = (set << MV_PP2_CLS4_RL_INDEX_GRP) | (rule << MV_PP2_CLS4_RL_INDEX_RULE);
+	mvPp2WrReg(MV_PP2_CLS4_RL_INDEX_REG, regVal);
+
+	C4->ruleIndex = rule;
+	C4->setIndex = set;
+	/* read entry rule fields*/
+	C4->rules.regs.attr[0] = mvPp2RdReg(MV_PP2_CLS4_FATTR1_REG);
+	C4->rules.regs.attr[1] = mvPp2RdReg(MV_PP2_CLS4_FATTR2_REG);
+
+	for (regInd = 0; regInd < MV_PP2_CLS_C4_TBL_DATA_WORDS; regInd++)
+		C4->rules.regs.fdataArr[regInd] = mvPp2RdReg(MV_PP2_CLS4_FDATA_REG(regInd));
+
+	/* read entry from action table */
+	C4->sram.regs.actions = mvPp2RdReg(MV_PP2_CLS4_ACT_REG);
+	C4->sram.regs.qos_attr = mvPp2RdReg(MV_PP2_CLS4_ACT_QOS_ATTR_REG);
+	C4->sram.regs.dup_attr = mvPp2RdReg(MV_PP2_CLS4_ACT_DUP_ATTR_REG);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwWrite(MV_PP2_CLS_C4_ENTRY *C4, int rule, int set)
+{
+	unsigned int regVal = 0;
+	int regInd;
+
+	PTR_VALIDATE(C4);
+
+
+	POS_RANGE_VALIDATE(rule, (MV_PP2_CLS_C4_GRP_SIZE-1));
+	POS_RANGE_VALIDATE(set, (MV_PP2_CLS_C4_GRPS_NUM-1));
+
+	/* write index reg */
+	regVal = (set << MV_PP2_CLS4_RL_INDEX_GRP) | (rule << MV_PP2_CLS4_RL_INDEX_RULE);
+	mvPp2WrReg(MV_PP2_CLS4_RL_INDEX_REG, regVal);
+
+	mvPp2WrReg(MV_PP2_CLS4_FATTR1_REG, C4->rules.regs.attr[0]);
+	mvPp2WrReg(MV_PP2_CLS4_FATTR2_REG, C4->rules.regs.attr[1]);
+
+	for (regInd = 0; regInd < MV_PP2_CLS_C4_TBL_DATA_WORDS; regInd++)
+		mvPp2WrReg(MV_PP2_CLS4_FDATA_REG(regInd), C4->rules.regs.fdataArr[regInd]);
+
+	/* read entry from action table */
+	mvPp2WrReg(MV_PP2_CLS4_ACT_REG, C4->sram.regs.actions);
+	mvPp2WrReg(MV_PP2_CLS4_ACT_QOS_ATTR_REG, C4->sram.regs.qos_attr);
+	mvPp2WrReg(MV_PP2_CLS4_ACT_DUP_ATTR_REG, C4->sram.regs.dup_attr);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwDump(MV_PP2_CLS_C4_ENTRY *C4)
+{
+	int index;
+
+	PTR_VALIDATE(C4);
+
+	mvOsPrintf("SET: %d	RULE: %d\n", C4->setIndex, C4->ruleIndex);
+	mvOsPrintf("FIELD  ID  OP	DATA\n");
+
+	/*------------------------------*/
+	/*	   fields 0-2		*/
+	/*------------------------------*/
+
+	for (index = 0; index <  4; index++) {
+		mvOsPrintf("%d       %d  %d	",
+				index,
+				MV_PP2_CLS4_FATTR_ID_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]),
+				MV_PP2_CLS4_FATTR_OPCODE_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]));
+
+		mvOsPrintf(FLD_FMT, FLD_VAL(index, C4->rules.regs.fdataArr));
+		mvOsPrintf("\n");
+	}
+
+	/*------------------------------*/
+	/*	   field 4		*/
+	/*------------------------------*/
+
+	/* index = 4 after loop */
+	mvOsPrintf("%d       %d  %d	",
+			index,
+			MV_PP2_CLS4_FATTR_ID_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]),
+			MV_PP2_CLS4_FATTR_OPCODE_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]));
+	mvOsPrintf(FLD4_FMT, FLD4_VAL(C4->rules.regs.fdataArr));
+	mvOsPrintf("\n");
+
+	/*------------------------------*/
+	/*	   field 5		*/
+	/*------------------------------*/
+	index++;
+
+	mvOsPrintf("%d       %d  %d	",
+			index,
+			MV_PP2_CLS4_FATTR_ID_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]),
+			MV_PP2_CLS4_FATTR_OPCODE_VAL(index, C4->rules.regs.attr[GET_FIELD_ATRR(index)]));
+	mvOsPrintf(FLD5_FMT, FLD5_VAL(C4->rules.regs.fdataArr));
+	mvOsPrintf("\n");
+	mvOsPrintf("\n");
+	mvOsPrintf("VLAN: %d  PPPOE: %d  MACME: %d  L4INFO: %d  L3INFO: %d\n",
+			MV_PP2_CLS4_VLAN_VAL(C4->rules.regs.fdataArr[6]),
+			MV_PP2_CLS4_PPPOE_VAL(C4->rules.regs.fdataArr[6]),
+			MV_PP2_CLS4_MACME_VAL(C4->rules.regs.fdataArr[6]),
+			MV_PP2_CLS4_L4INFO_VAL(C4->rules.regs.fdataArr[6]),
+			MV_PP2_CLS4_L3INFO_VAL(C4->rules.regs.fdataArr[6]));
+	mvOsPrintf("\n");
+	/*------------------------------*/
+	/*	actions	0x1E80		*/
+	/*------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.9) Add forwarding command to C4
+*/
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvOsPrintf("ACT_TBL:COLOR	PRIO	DSCP	GPID	LOW_Q	HIGH_Q	POLICER		FWD\n");
+	mvOsPrintf("CMD:    [%1d]	[%1d]	[%1d]	[%1d]	[%1d]	[%1d]	[%1d]		[%1d]\n",
+			((C4->sram.regs.actions & (ACT_COLOR_MASK)) >> ACT_COLOR),
+			((C4->sram.regs.actions & (ACT_PRI_MASK)) >> ACT_PRI),
+			((C4->sram.regs.actions & (ACT_DSCP_MASK)) >> ACT_DSCP),
+			((C4->sram.regs.actions & (ACT_GEM_ID_MASK)) >> ACT_GEM_ID),
+			((C4->sram.regs.actions & (ACT_LOW_Q_MASK)) >> ACT_LOW_Q),
+			((C4->sram.regs.actions & (ACT_HIGH_Q_MASK)) >> ACT_HIGH_Q),
+			((C4->sram.regs.actions & (ACT_POLICER_SELECT_MASK)) >> ACT_POLICER_SELECT),
+			((C4->sram.regs.actions & ACT_FWD_MASK) >> ACT_FWD));
+
+
+	/*------------------------------*/
+	/*	qos_attr 0x1E84		*/
+	/*------------------------------*/
+	/*mvOsPrintf("ACT_TBL:COLOR	PRIO	DSCP	GPID	LOW_Q	HIGH_Q	POLICER		FWD\n");*/
+
+	/*mvOsPrintf("VAL:		PRIO	DSCP	GPID	LOW_Q	HIGH_Q	 POLICER\n");*/
+
+	mvOsPrintf("VAL:		[%1d]	[%1d]	[%1d]	[%1d]	[0x%x]	[id 0x%2.2x bank %1.1x]\n",
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_PRI_MASK)) >> ACT_QOS_ATTR_MDF_PRI),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_DSCP_MASK)) >> ACT_QOS_ATTR_MDF_DSCP),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_GEM_ID_MASK)) >> ACT_QOS_ATTR_MDF_GEM_ID),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_LOW_Q_MASK)) >> ACT_QOS_ATTR_MDF_LOW_Q),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_HIGH_Q_MASK)) >> ACT_QOS_ATTR_MDF_HIGH_Q),
+			((C4->sram.regs.dup_attr & (ACT_DUP_POLICER_MASK)) >> ACT_DUP_POLICER_ID),
+			((C4->sram.regs.dup_attr & ACT_DUP_POLICER_BANK_MASK) >> ACT_DUP_POLICER_BANK_BIT));
+
+#else
+	mvOsPrintf("ACT_TBL:    COLOR   PRIO    DSCP    GPID    LOW_Q   HIGH_Q  POLICER\n");
+	mvOsPrintf("CMD:                [%1d]   [%1d]   [%1d]   [%1d]   [%1d]   [%1d]   [%1d]\n",
+			((C4->sram.regs.actions & (ACT_COLOR_MASK)) >> ACT_COLOR),
+			((C4->sram.regs.actions & (ACT_PRI_MASK)) >> ACT_PRI),
+			((C4->sram.regs.actions & (ACT_DSCP_MASK)) >> ACT_DSCP),
+			((C4->sram.regs.actions & (ACT_GEM_ID_MASK)) >> ACT_GEM_ID),
+			((C4->sram.regs.actions & (ACT_LOW_Q_MASK)) >> ACT_LOW_Q),
+			((C4->sram.regs.actions & (ACT_HIGH_Q_MASK)) >> ACT_HIGH_Q),
+			((C4->sram.regs.actions & (ACT_POLICER_SELECT_MASK)) >> ACT_POLICER_SELECT));
+
+
+	/*------------------------------*/
+	/*	qos_attr 0x1E84		*/
+	/*------------------------------*/
+	/*mvOsPrintf("VAL:		PRIO	DSCP	GPID	LOW_Q	HIGH_Q	 POLICER\n");*/
+
+	mvOsPrintf("VAL:                    [%1d]	[%1d]	[%1d]	[%1d]	[0x%x]	[%1d]\n",
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_PRI_MASK)) >> ACT_QOS_ATTR_MDF_PRI),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_DSCP_MASK)) >> ACT_QOS_ATTR_MDF_DSCP),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_GEM_ID_MASK)) >> ACT_QOS_ATTR_MDF_GEM_ID),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_LOW_Q_MASK)) >> ACT_QOS_ATTR_MDF_LOW_Q),
+			((C4->sram.regs.qos_attr & (ACT_QOS_ATTR_MDF_HIGH_Q_MASK)) >> ACT_QOS_ATTR_MDF_HIGH_Q),
+			((C4->sram.regs.dup_attr & (ACT_DUP_POLICER_MASK)) >> ACT_DUP_POLICER_ID));
+
+#endif
+
+
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/* PPv2.1 MASS 3.20 new feature */
+int mvPp2V1ClsC4HwCntDump(int rule, int set, unsigned int *cnt)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(rule, (MV_PP2_CLS_C4_GRP_SIZE-1));
+	POS_RANGE_VALIDATE(set, (MV_PP2_CLS_C4_GRPS_NUM-1));
+
+	/* write index */
+	regVal =  MV_PP2_V1_CNT_IDX_RULE(rule, set);
+	mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, regVal);
+
+	/*read hit counter*/
+	regVal = mvPp2RdReg(MV_PP2_V1_CLS_C4_TBL_HIT_REG);
+
+	if (cnt)
+		*cnt = regVal;
+	else
+		mvOsPrintf("HIT COUNTER: %d\n", regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4HwDumpAll()
+{
+	int set, rule;
+	MV_PP2_CLS_C4_ENTRY C4;
+
+	for (set = 0; set < MV_PP2_CLS_C4_GRPS_NUM; set++)
+		for (rule = 0; rule <  MV_PP2_CLS_C4_GRP_SIZE; rule++) {
+			mvPp2ClsC4HwRead(&C4, rule, set);
+			mvPp2ClsC4SwDump(&C4);
+#ifdef CONFIG_MV_ETH_PP2_1
+			mvPp2V1ClsC4HwCntDump(rule, set, NULL);
+#endif
+			mvOsPrintf("--------------------------------------------------------------------\n");
+		}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/* mvPp2V1ClsC4HwHitsDump - dump all non zeroed hit counters and the associated  HWentries */
+/* PPv2.1 MASS 3.20 new feature */
+int mvPp2V1ClsC4HwHitsDump()
+{
+	int set, rule;
+	unsigned int cnt;
+	MV_PP2_CLS_C4_ENTRY C4;
+
+	for (set = 0; set < MV_PP2_CLS_C4_GRPS_NUM; set++)
+		for (rule = 0; rule <  MV_PP2_CLS_C4_GRP_SIZE; rule++) {
+			mvPp2V1ClsC4HwCntDump(rule, set, &cnt);
+			if (cnt == 0)
+				continue;
+
+			mvPp2ClsC4HwRead(&C4, rule, set);
+			mvPp2ClsC4SwDump(&C4);
+			mvOsPrintf("HITS: %d\n", cnt);
+			mvOsPrintf("--------------------------------------------------------------------\n");
+		}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4RegsDump()
+{
+	int i = 0;
+	char reg_name[100];
+
+
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS4_PHY_TO_RL_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS4_PHY_TO_RL_REG(i), reg_name);
+	}
+
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS4_UNI_TO_RL_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS4_UNI_TO_RL_REG(i), reg_name);
+	}
+
+	mvPp2PrintReg(MV_PP2_CLS4_FATTR1_REG, "MV_PP2_CLS4_FATTR1_REG");
+	mvPp2PrintReg(MV_PP2_CLS4_FATTR2_REG, "MV_PP2_CLS4_FATTR2_REG");
+
+	for (i = 0; i < MV_PP2_CLS4_FDATA_REGS_NUM; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS4_FDATA_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS4_FDATA_REG(i), reg_name);
+	}
+
+	mvPp2PrintReg(MV_PP2_CLS4_RL_INDEX_REG, "MV_PP2_CLS4_RL_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_CLS4_ACT_REG, "MV_PP2_CLS4_ACT_REG");
+	mvPp2PrintReg(MV_PP2_CLS4_ACT_QOS_ATTR_REG, "MV_PP2_CLS4_ACT_QOS_ATTR_REG");
+	mvPp2PrintReg(MV_PP2_CLS4_ACT_DUP_ATTR_REG, "MV_PP2_CLS4_ACT_DUP_ATTR_REG");
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsC4SwClear(MV_PP2_CLS_C4_ENTRY *C4)
+{
+	memset(C4, 0, sizeof(MV_PP2_CLS_C4_ENTRY));
+}
+
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsC4HwClearAll()
+{
+	int set, rule;
+	MV_PP2_CLS_C4_ENTRY C4;
+
+	mvPp2ClsC4SwClear(&C4);
+
+	for (set = 0; set < MV_PP2_CLS_C4_GRPS_NUM; set++)
+		for (rule = 0; rule <  MV_PP2_CLS_C4_GRP_SIZE; rule++)
+			mvPp2ClsC4HwWrite(&C4, rule, set);
+}
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine rules APIs	 			 */
+/*-------------------------------------------------------------------------------*/
+
+/*
+set two bytes of data in fields
+offs - offset in byte resolution
+*/
+int mvPp2ClsC4FieldsShortSet(MV_PP2_CLS_C4_ENTRY *C4, int field, unsigned int offs, unsigned short data)
+{
+	PTR_VALIDATE(C4);
+
+	POS_RANGE_VALIDATE(field, MV_PP2_CLS_C4_FIELDS_NUM-1);
+
+	if ((offs % 2) != 0) {
+		mvOsPrintf("mvCls4Hw %s: offset should be even , current func write two bytes of data.\n", __func__);
+		return MV_CLS4_ERR;
+	}
+
+	if (field < 4) {
+		/* fields 0,1,2,3 lenght is 2 bytes */
+		POS_RANGE_VALIDATE(offs, 0);
+		C4->rules.regs.fdataArr[field/2] &= ~(0xFFFF << (16 * (field % 2)));
+		C4->rules.regs.fdataArr[field/2] |= (data << (16 * (field % 2)));
+	}
+
+	else if (field == 4) {
+		/* field 4 lenght is 16 bytes */
+		POS_RANGE_VALIDATE(offs, 14);
+		C4->rules.regs.fdataArr[5 - offs/4] &= ~(0xFFFF << (16 * ((offs / 2) % 2)));
+		C4->rules.regs.fdataArr[5 - offs/4] |= (data << (16 * ((offs / 2) % 2)));
+	} else {
+		/* field 5 lenght is 6 bytes */
+		POS_RANGE_VALIDATE(offs, 4);
+		C4->rules.regs.fdataArr[7 - offs/4] &= ~(0xFFFF << (16 * ((offs / 2) % 2)));
+		C4->rules.regs.fdataArr[7 - offs/4] |= (data << (16 * ((offs / 2) % 2)));
+	}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4FieldsParamsSet(MV_PP2_CLS_C4_ENTRY *C4, int field, unsigned int id, unsigned int op)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(field, MV_PP2_CLS_C4_FIELDS_NUM-1);
+	POS_RANGE_VALIDATE(id, MV_PP2_CLS4_FATTR_ID_MAX);
+	POS_RANGE_VALIDATE(op, MV_PP2_CLS4_FATTR_OPCODE_MAX);
+
+	/* clear old ID and opcode*/
+	C4->rules.regs.attr[GET_FIELD_ATRR(field)] &= ~MV_PP2_CLS4_FATTR_ID_MASK(field);
+	C4->rules.regs.attr[GET_FIELD_ATRR(field)] &= ~MV_PP2_CLS4_FATTR_OPCODE_MASK(field);
+
+	/* write new values */
+	C4->rules.regs.attr[GET_FIELD_ATRR(field)] |=  (op << MV_PP2_CLS4_FATTR_OPCODE(field));
+	C4->rules.regs.attr[GET_FIELD_ATRR(field)] |= 	(id << MV_PP2_CLS4_FATTR_ID(field));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwVlanSet(MV_PP2_CLS_C4_ENTRY *C4, int vlan)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(vlan, MV_PP2_CLS4_VLAN_MAX);
+
+	C4->rules.regs.fdataArr[6] &= ~MV_PP2_CLS4_VLAN_MASK;
+	C4->rules.regs.fdataArr[6] |= (vlan << MV_PP2_CLS4_FDATA7_VLAN);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwPppoeSet(MV_PP2_CLS_C4_ENTRY *C4, int pppoe)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(pppoe, MV_PP2_CLS4_PPPOE_MAX);
+
+	C4->rules.regs.fdataArr[6] &= ~MV_PP2_CLS4_PPPOE_MASK;
+	C4->rules.regs.fdataArr[6] |= (pppoe << MV_PP2_CLS4_FDATA7_PPPOE);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwMacMeSet(MV_PP2_CLS_C4_ENTRY *C4, int mac)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(mac, MV_PP2_CLS4_MACME_MAX);
+
+	C4->rules.regs.fdataArr[6] &= ~MV_PP2_CLS4_MACME_MASK;
+	C4->rules.regs.fdataArr[6] |= (mac << MV_PP2_CLS4_FDATA7_MACME);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwL4InfoSet(MV_PP2_CLS_C4_ENTRY *C4, int info)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(info, MV_PP2_CLS4_L4INFO_MAX);
+
+	C4->rules.regs.fdataArr[6] &= ~MV_PP2_CLS4_L4INFO_MASK;
+	C4->rules.regs.fdataArr[6] |= (info << MV_PP2_CLS4_FDATA7_L4INFO);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4SwL3InfoSet(MV_PP2_CLS_C4_ENTRY *C4, int info)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(info, MV_PP2_CLS4_L3INFO_MAX);
+
+	C4->rules.regs.fdataArr[6] &= ~MV_PP2_CLS4_L3INFO_MASK;
+	C4->rules.regs.fdataArr[6] |= (info << MV_PP2_CLS4_FDATA7_L3INFO);
+
+	return MV_OK;
+}
+
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine Public action table APIs 		 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC4ColorSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, COLOR_RED_AND_LOCK);
+
+	C4->sram.regs.actions &= ~ACT_COLOR_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_COLOR);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4PrioSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int prio)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(prio, ACT_QOS_ATTR_PRI_MAX);
+
+	/*set command*/
+	C4->sram.regs.actions &= ~ACT_PRI_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_PRI);
+
+	/*set modify priority value*/
+	C4->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_PRI_MASK;
+	C4->sram.regs.qos_attr |= (prio << ACT_QOS_ATTR_MDF_PRI);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4DscpSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int dscp)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(dscp, ACT_QOS_ATTR_DSCP_MAX);
+
+	/*set command*/
+	C4->sram.regs.actions &= ~ACT_DSCP_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_DSCP);
+
+	/*set modify DSCP value*/
+	C4->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_DSCP_MASK;
+	C4->sram.regs.qos_attr |= (dscp << ACT_QOS_ATTR_MDF_DSCP);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC4GpidSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int gid)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(gid, ACT_QOS_ATTR_GEM_ID_MAX);
+
+	/*set command*/
+	C4->sram.regs.actions &= ~ACT_GEM_ID_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_GEM_ID);
+
+	/*set modify DSCP value*/
+	C4->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_GEM_ID_MASK;
+	C4->sram.regs.qos_attr |= (gid << ACT_QOS_ATTR_MDF_GEM_ID);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsC4PolicerSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int policerId, int bank)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+	BIT_RANGE_VALIDATE(bank);
+
+	C4->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	C4->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	C4->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+
+	if (bank)
+		C4->sram.regs.dup_attr |= ACT_DUP_POLICER_BANK_MASK;
+	else
+		C4->sram.regs.dup_attr &= ~ACT_DUP_POLICER_BANK_MASK;
+
+	return MV_OK;
+}
+#else
+int mvPp2ClsC4PolicerSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int policerId)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(policerId, ACT_DUP_POLICER_MAX);
+
+	C4->sram.regs.actions &= ~ACT_POLICER_SELECT_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_POLICER_SELECT);
+
+	C4->sram.regs.dup_attr &= ~ACT_DUP_POLICER_MASK;
+	C4->sram.regs.dup_attr |= (policerId << ACT_DUP_POLICER_ID);
+	return MV_OK;
+}
+#endif /*CONFIG_MV_ETH_PP2_1*/
+
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4QueueHighSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue)
+{
+	PTR_VALIDATE(C4);
+
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_HIGH_Q_MAX);
+
+	/*set command*/
+	C4->sram.regs.actions &= ~ACT_HIGH_Q_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_HIGH_Q);
+
+	/*set modify High queue value*/
+	C4->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_HIGH_Q_MASK;
+	C4->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_HIGH_Q);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsC4QueueLowSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue)
+{
+	PTR_VALIDATE(C4);
+
+	POS_RANGE_VALIDATE(cmd, UPDATE_AND_LOCK);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_MDF_LOW_Q_MAX);
+
+	/*set command*/
+	C4->sram.regs.actions &= ~ACT_LOW_Q_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_LOW_Q);
+
+	/*set modify High queue value*/
+	C4->sram.regs.qos_attr &= ~ACT_QOS_ATTR_MDF_LOW_Q_MASK;
+	C4->sram.regs.qos_attr |= (queue << ACT_QOS_ATTR_MDF_LOW_Q);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC4QueueSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue)
+{
+	int status = MV_OK;
+	int qHigh, qLow;
+
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(queue, ACT_QOS_ATTR_Q_MAX);
+
+	/* cmd validation in set functions */
+
+	qHigh = (queue & ACT_QOS_ATTR_MDF_HIGH_Q_MASK) >> ACT_QOS_ATTR_MDF_HIGH_Q;
+	qLow = (queue & ACT_QOS_ATTR_MDF_LOW_Q_MASK) >> ACT_QOS_ATTR_MDF_LOW_Q;
+
+	status |= mvPp2ClsC4QueueLowSet(C4, cmd, qLow);
+	status |= mvPp2ClsC4QueueHighSet(C4, cmd, qHigh);
+
+	return status;
+
+}
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.9) Add forwarding command to C4
+*/
+int mvPp2ClsC4ForwardSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd)
+{
+	PTR_VALIDATE(C4);
+	POS_RANGE_VALIDATE(cmd, SWF_AND_LOCK);
+
+	C4->sram.regs.actions &= ~ACT_FWD_MASK;
+	C4->sram.regs.actions |= (cmd << ACT_FWD);
+	return MV_OK;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.h
new file mode 100644
index 000000000000..01e0b131315f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2Cls4Hw.h
@@ -0,0 +1,264 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS4_HW_H__
+#define __MV_CLS4_HW_H__
+
+#include "mvPp2ClsActHw.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 Top Registers	    			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_PHY_TO_RL_REG(port)				(MV_PP2_REG_BASE + 0x1E00 + ((port)*4))
+
+#define MV_PP2_CLS4_PHY_TO_RL_GRP				0
+#define MV_PP2_CLS4_PHY_TO_RL_GRP_BITS				3
+#define MV_PP2_CLS4_PHY_TO_RL_GRP_MASK				(((1 << MV_PP2_CLS4_PHY_TO_RL_GRP_BITS) - 1) << MV_PP2_CLS4_PHY_TO_RL_GRP)
+
+#define MV_PP2_CLS4_PHY_TO_RL_RULE_NUM				4
+#define MV_PP2_CLS4_PHY_TO_RL_RULE_NUM_BITS			4
+#define MV_PP2_CLS4_PHY_TO_RL_RULE_NUM_MASK			(((1 << MV_PP2_CLS4_PHY_TO_RL_RULE_NUM_BITS) - 1) << MV_PP2_CLS4_PHY_TO_RL_RULE_NUM)
+
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_UNI_TO_RL_REG(uni)				(MV_PP2_REG_BASE + 0x1E20 + ((uni)*4))
+
+#define MV_PP2_CLS4_UNI_TO_RL_GRP				0
+#define MV_PP2_CLS4_UNI_TO_RL_RULE_NUM				4
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_RL_INDEX_REG				(MV_PP2_REG_BASE + 0x1E40)
+#define MV_PP2_CLS4_RL_INDEX_RULE				0
+#define MV_PP2_CLS4_RL_INDEX_GRP				3
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_FATTR1_REG					(MV_PP2_REG_BASE + 0x1E50)
+#define MV_PP2_CLS4_FATTR2_REG					(MV_PP2_REG_BASE + 0x1E54)
+#define MV_PP2_CLS4_FATTR_REG_NUM				2
+
+#define MV_PP2_CLS4_FATTR_ID(field)				(((field) * 9) % 27)
+#define MV_PP2_CLS4_FATTR_ID_BITS				6
+#define MV_PP2_CLS4_FATTR_ID_MAX				((1 << MV_PP2_CLS4_FATTR_ID_BITS) - 1)
+#define MV_PP2_CLS4_FATTR_ID_MASK(field)			(MV_PP2_CLS4_FATTR_ID_MAX << MV_PP2_CLS4_FATTR_ID(field))
+#define MV_PP2_CLS4_FATTR_ID_VAL(field, reg_val)		((reg_val & MV_PP2_CLS4_FATTR_ID_MASK(field)) >> MV_PP2_CLS4_FATTR_ID(field))
+
+#define MV_PP2_CLS4_FATTR_OPCODE_BITS				3
+#define MV_PP2_CLS4_FATTR_OPCODE(field)				((((field) * 9) % 27) + MV_PP2_CLS4_FATTR_ID_BITS)
+#define MV_PP2_CLS4_FATTR_OPCODE_MAX				((1 << MV_PP2_CLS4_FATTR_OPCODE_BITS) - 1)
+#define MV_PP2_CLS4_FATTR_OPCODE_MASK(field)			(MV_PP2_CLS4_FATTR_OPCODE_MAX << MV_PP2_CLS4_FATTR_OPCODE(field))
+#define MV_PP2_CLS4_FATTR_OPCODE_VAL(field, reg_val)		((reg_val & MV_PP2_CLS4_FATTR_OPCODE_MASK(field)) >> MV_PP2_CLS4_FATTR_OPCODE(field))
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_FDATA1_REG					(MV_PP2_REG_BASE + 0x1E58)
+#define MV_PP2_CLS4_FDATA2_REG					(MV_PP2_REG_BASE + 0x1E5C)
+#define MV_PP2_CLS4_FDATA3_REG					(MV_PP2_REG_BASE + 0x1E60)
+#define MV_PP2_CLS4_FDATA4_REG					(MV_PP2_REG_BASE + 0x1E64)
+#define MV_PP2_CLS4_FDATA5_REG					(MV_PP2_REG_BASE + 0x1E68)
+#define MV_PP2_CLS4_FDATA6_REG					(MV_PP2_REG_BASE + 0x1E6C)
+#define MV_PP2_CLS4_FDATA7_REG					(MV_PP2_REG_BASE + 0x1E70)
+#define MV_PP2_CLS4_FDATA8_REG					(MV_PP2_REG_BASE + 0x1E74)
+#define MV_PP2_CLS4_FDATA_REG(reg_num)				(MV_PP2_REG_BASE + 0x1E58 + (4*(reg_num)))
+#define MV_PP2_CLS4_FDATA_REGS_NUM				8
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS4_FDATA7_L3INFO				16
+#define MV_PP2_CLS4_FDATA7_L3INFO_BITS				4
+#define MV_PP2_CLS4_L3INFO_MAX					((1 << MV_PP2_CLS4_FDATA7_L3INFO_BITS) - 1)
+#define MV_PP2_CLS4_L3INFO_MASK					(MV_PP2_CLS4_L3INFO_MAX << MV_PP2_CLS4_FDATA7_L3INFO)
+#define MV_PP2_CLS4_L3INFO_VAL(reg_val)				(((reg_val) & MV_PP2_CLS4_L3INFO_MASK) >> MV_PP2_CLS4_FDATA7_L3INFO)
+
+#define MV_PP2_CLS4_FDATA7_L4INFO				20
+#define MV_PP2_CLS4_FDATA7_L4INFO_BITS				4
+#define MV_PP2_CLS4_L4INFO_MAX					((1 << MV_PP2_CLS4_FDATA7_L4INFO_BITS) - 1)
+#define MV_PP2_CLS4_L4INFO_MASK					(MV_PP2_CLS4_L4INFO_MAX << MV_PP2_CLS4_FDATA7_L4INFO)
+#define MV_PP2_CLS4_L4INFO_VAL(reg_val)				(((reg_val) & MV_PP2_CLS4_L4INFO_MASK) >> MV_PP2_CLS4_FDATA7_L4INFO)
+
+
+#define MV_PP2_CLS4_FDATA7_MACME				24
+#define MV_PP2_CLS4_FDATA7_MACME_BITS				2
+#define MV_PP2_CLS4_MACME_MAX					((1 << MV_PP2_CLS4_FDATA7_MACME_BITS) - 1)
+#define MV_PP2_CLS4_MACME_MASK					(MV_PP2_CLS4_MACME_MAX << MV_PP2_CLS4_FDATA7_MACME)
+#define MV_PP2_CLS4_MACME_VAL(reg_val)				(((reg_val) & MV_PP2_CLS4_MACME_MASK) >> MV_PP2_CLS4_FDATA7_MACME)
+
+#define MV_PP2_CLS4_FDATA7_PPPOE				26
+#define MV_PP2_CLS4_FDATA7_PPPOE_BITS				2
+#define MV_PP2_CLS4_PPPOE_MAX					((1 << MV_PP2_CLS4_FDATA7_PPPOE_BITS) - 1)
+#define MV_PP2_CLS4_PPPOE_MASK					(MV_PP2_CLS4_PPPOE_MAX << MV_PP2_CLS4_FDATA7_PPPOE)
+#define MV_PP2_CLS4_PPPOE_VAL(reg_val)				(((reg_val) & MV_PP2_CLS4_PPPOE_MASK) >> MV_PP2_CLS4_FDATA7_PPPOE)
+
+#define MV_PP2_CLS4_FDATA7_VLAN					28
+#define MV_PP2_CLS4_FDATA7_VLAN_BITS				3
+#define MV_PP2_CLS4_VLAN_MAX					((1 << MV_PP2_CLS4_FDATA7_VLAN_BITS) - 1)
+#define MV_PP2_CLS4_VLAN_MASK					(MV_PP2_CLS4_VLAN_MAX << MV_PP2_CLS4_FDATA7_VLAN)
+#define MV_PP2_CLS4_VLAN_VAL(reg_val)				(((reg_val) & MV_PP2_CLS4_VLAN_MASK) >> MV_PP2_CLS4_FDATA7_VLAN)
+
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_ACT_REG					(MV_PP2_REG_BASE + 0x1E80)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_ACT_QOS_ATTR_REG				(MV_PP2_REG_BASE + 0x1E84)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS4_ACT_DUP_ATTR_REG				(MV_PP2_REG_BASE + 0x1E88)
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new counters MAS 3.20*/
+#define MV_PP2_V1_CNT_IDX_REG				(MV_PP2_REG_BASE + 0x7040)
+#define MV_PP2_V1_CNT_IDX_RULE(rule, set)		((rule) << 3 | (set))
+
+#define MV_PP2_V1_CLS_C4_TBL_HIT_REG			(MV_PP2_REG_BASE + 0x7708)
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine Public APIs			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_C4_GRP_SIZE					(8)
+#define MV_PP2_CLS_C4_GRPS_NUM					(8)
+#define MV_PP2_CLS_C4_TBL_WORDS					(10)
+#define MV_PP2_CLS_C4_TBL_DATA_WORDS				(8)
+#define MV_PP2_CLS_C4_SRAM_WORDS				(3)
+
+/* Field 0- 3 */
+#define FLD_FMT							"%4.4x"
+#define FLD_VAL(field, p)					((p[field/2] >> (16 * (field % 2))) & 0xFFFF)
+
+/* field 4 */
+#define FLD4_FMT						"%8.8x %8.8x %8.8x %8.8x"
+#define FLD4_VAL(p)						p[2], p[3], p[4], p[5]
+/* field 5 */
+#define FLD5_FMT						"%4.4x %8.8x"
+#define FLD5_VAL(p)						p[6] & 0xFFFF, p[7]
+
+#define MV_PP2_CLS_C4_FIELDS_NUM				6
+#define GET_FIELD_ATRR(field)					((field) / 3)
+
+typedef struct mvPp2ClsC4RuleEntry {
+	unsigned int ruleIndex;
+	unsigned int setIndex;
+	union {
+		MV_U32	words[MV_PP2_CLS_C4_TBL_WORDS];
+		struct {
+			MV_U32 attr[MV_PP2_CLS4_FATTR_REG_NUM];
+			MV_U32 fdataArr[MV_PP2_CLS_C4_TBL_DATA_WORDS];
+		} regs;
+	} rules;
+	union {
+		MV_U32 words[MV_PP2_CLS_C4_SRAM_WORDS];
+		struct {
+			MV_U32 actions;/* 0x1E80 */
+			MV_U32 qos_attr;/* 0x1E84*/
+			MV_U32 dup_attr;/* 0x1E88 */
+		} regs;
+	} sram;
+} MV_PP2_CLS_C4_ENTRY;
+
+
+int mvPp2ClsC4HwPortToRulesSet(int port, int set, int rules);
+int mvPp2ClsC4HwUniToRulesSet(int uniPort, int set, int rules);
+int mvPp2ClsC4HwPortToRulesGet(int port, int *set, int *rules);
+int mvPp2ClsC4HwUniToRulesGet(int uni, int *set, int *rules);
+int mvPp2ClsC4HwRead(MV_PP2_CLS_C4_ENTRY *C4, int rule, int set);
+int mvPp2ClsC4HwWrite(MV_PP2_CLS_C4_ENTRY *C4, int rule, int set);
+int mvPp2ClsC4SwDump(MV_PP2_CLS_C4_ENTRY *C4);
+void mvPp2ClsC4SwClear(MV_PP2_CLS_C4_ENTRY *C4);
+void mvPp2ClsC4HwClearAll(void);
+int mvPp2ClsC4RegsDump(void);
+int mvPp2V1ClsC4HwHitsDump(void);
+int mvPp2ClsC4HwDumpAll(void);
+int mvPp2V1ClsC4HwCntDump(int rule, int set, unsigned int *cnt);
+
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine rules APIs	 			 */
+/*-------------------------------------------------------------------------------*/
+
+
+int mvPp2ClsC4FieldsShortSet(MV_PP2_CLS_C4_ENTRY *C4, int field, unsigned int offs, unsigned short data);
+int mvPp2ClsC4FieldsParamsSet(MV_PP2_CLS_C4_ENTRY *C4, int field, unsigned int id, unsigned int op);
+int mvPp2ClsC4SwVlanSet(MV_PP2_CLS_C4_ENTRY *C4, int vlan);
+int mvPp2ClsC4SwPppoeSet(MV_PP2_CLS_C4_ENTRY *C4, int pppoe);
+int mvPp2ClsC4SwMacMeSet(MV_PP2_CLS_C4_ENTRY *C4, int mac);
+int mvPp2ClsC4SwL4InfoSet(MV_PP2_CLS_C4_ENTRY *C4, int info);
+int mvPp2ClsC4SwL3InfoSet(MV_PP2_CLS_C4_ENTRY *C4, int info);
+
+
+/*-------------------------------------------------------------------------------*/
+/*			Classifier C4 engine action table APIs 			 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsC4ColorSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd);
+int mvPp2ClsC4PrioSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int prio);
+int mvPp2ClsC4DscpSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int dscp);
+int mvPp2ClsC4GpidSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int gpid);
+int mvPp2ClsC4ForwardSet(MV_PP2_CLS_C4_ENTRY *c4, int cmd);
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsC4PolicerSet(MV_PP2_CLS_C4_ENTRY *c2, int cmd, int policerId, int bank);
+#else
+int mvPp2ClsC4PolicerSet(MV_PP2_CLS_C4_ENTRY *c2, int cmd, int policerId);
+#endif
+int mvPp2ClsC4QueueHighSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue);
+int mvPp2ClsC4QueueLowSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue);
+int mvPp2ClsC4QueueSet(MV_PP2_CLS_C4_ENTRY *C4, int cmd, int queue);
+/*
+  PPv2.1 (feature MAS 3.9) Add forwarding command to C4
+*/
+int mvPp2ClsC4ForwardSet(MV_PP2_CLS_C4_ENTRY *c4, int cmd);
+
+#endif /* MV_CLS4_HW */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsActHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsActHw.h
new file mode 100644
index 000000000000..0e5b22bde806
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsActHw.h
@@ -0,0 +1,254 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS_ACT_HW_H__
+#define __MV_CLS_ACT_HW_H__
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier engines Actions Table offsets	    		 */
+/*-------------------------------------------------------------------------------*/
+
+/*action_tbl*/
+#define ACT_TBL_ID			0
+#define ACT_TBL_ID_BITS			6
+#define ACT_TBL_ID_MASK			((1 << ACT_TBL_ID_BITS) - 1)
+
+#define ACT_TBL_SEL			6
+#define ACT_TBL_SEL_MASK		(1 << ACT_TBL_SEL)
+
+#define ACT_TBL_PRI_DSCP		7
+#define ACT_TBL_PRI_DSCP_MASK		(1 << ACT_TBL_PRI_DSCP)
+
+#define ACT_TBL_GEM_ID			8
+#define ACT_TBL_GEM_ID_MASK		(1 << ACT_TBL_GEM_ID)
+
+#define ACT_TBL_LOW_Q			9
+#define ACT_TBL_LOW_Q_MASK		(1 << ACT_TBL_LOW_Q)
+
+#define ACT_TBL_HIGH_Q			10
+#define ACT_TBL_HIGH_Q_MASK		(1 << ACT_TBL_HIGH_Q)
+
+#define ACT_TBL_COLOR			11
+#define ACT_TBL_COLOR_MASK		(1 << ACT_TBL_COLOR)
+
+/*actions*/
+#define ACT_COLOR			0
+#define ACT_COLOR_BITS			3
+#define ACT_COLOR_MASK			(((1 << ACT_COLOR_BITS) - 1) << ACT_COLOR)
+
+#define ACT_PRI				3
+#define ACT_PRI_BITS			2
+#define ACT_PRI_MASK			(((1 << ACT_PRI_BITS) - 1) << ACT_PRI)
+#define ACT_PRI_MAX			((1 << ACT_PRI_BITS) - 1)
+
+
+#define ACT_DSCP			5
+#define ACT_DSCP_BITS			2
+#define ACT_DSCP_MASK			(((1 << ACT_DSCP_BITS) - 1) << ACT_DSCP)
+
+#define ACT_GEM_ID			7
+#define ACT_GEM_ID_BITS			2
+#define ACT_GEM_ID_MASK			(((1 << ACT_GEM_ID_BITS) - 1) << ACT_GEM_ID)
+
+#define ACT_LOW_Q			9
+#define ACT_LOW_Q_BITS			2
+#define ACT_LOW_Q_MASK			(((1 << ACT_LOW_Q_BITS) - 1) << ACT_LOW_Q)
+
+
+#define ACT_HIGH_Q			11
+#define ACT_HIGH_Q_BITS			2
+#define ACT_HIGH_Q_MASK			(((1 << ACT_HIGH_Q_BITS) - 1) << ACT_HIGH_Q)
+
+#define ACT_FWD				13
+#define ACT_FWD_BITS			3
+#define ACT_FWD_MASK			(((1 << ACT_FWD_BITS) - 1) << ACT_FWD)
+
+#define ACT_POLICER_SELECT		16
+#define ACT_POLICER_SELECT_BITS		2
+#define ACT_POLICER_SELECT_MASK		(((1 << ACT_POLICER_SELECT_BITS) - 1) << ACT_POLICER_SELECT)
+
+#define ACT_FLOW_ID_EN			18
+#define ACT_FLOW_ID_EN_MASK		(1 << ACT_FLOW_ID_EN)
+
+/*qos_attr*/
+#define ACT_QOS_ATTR_MDF_PRI		0
+#define ACT_QOS_ATTR_PRI_BITS		3
+#define ACT_QOS_ATTR_MDF_PRI_MASK	(((1 << ACT_QOS_ATTR_PRI_BITS) - 1) << ACT_QOS_ATTR_MDF_PRI)
+#define ACT_QOS_ATTR_PRI_MAX		((1 << ACT_QOS_ATTR_PRI_BITS) - 1)
+
+#define ACT_QOS_ATTR_MDF_DSCP		3
+#define ACT_QOS_ATTR_DSCP_BITS		6
+#define ACT_QOS_ATTR_MDF_DSCP_MASK	(((1 << ACT_QOS_ATTR_DSCP_BITS) - 1) << ACT_QOS_ATTR_MDF_DSCP)
+#define ACT_QOS_ATTR_DSCP_MAX		((1 << ACT_QOS_ATTR_DSCP_BITS) - 1)
+
+#define ACT_QOS_ATTR_MDF_GEM_ID		9
+#define ACT_QOS_ATTR_GEM_ID_BITS	12
+#define ACT_QOS_ATTR_MDF_GEM_ID_MASK	(((1 << ACT_QOS_ATTR_GEM_ID_BITS) - 1) << ACT_QOS_ATTR_MDF_GEM_ID)
+#define ACT_QOS_ATTR_GEM_ID_MAX		((1 << ACT_QOS_ATTR_GEM_ID_BITS) - 1)
+
+
+#define ACT_QOS_ATTR_MDF_LOW_Q		21
+#define ACT_QOS_ATTR_MDF_LOW_Q_BITS	3
+#define ACT_QOS_ATTR_MDF_LOW_Q_MAX	((1 << ACT_QOS_ATTR_MDF_LOW_Q_BITS) - 1)
+#define ACT_QOS_ATTR_MDF_LOW_Q_MASK	(ACT_QOS_ATTR_MDF_LOW_Q_MAX << ACT_QOS_ATTR_MDF_LOW_Q)
+
+#define ACT_QOS_ATTR_MDF_HIGH_Q		24
+#define ACT_QOS_ATTR_MDF_HIGH_Q_BITS	5
+#define ACT_QOS_ATTR_MDF_HIGH_Q_MAX	((1 << ACT_QOS_ATTR_MDF_HIGH_Q_BITS) - 1)
+#define ACT_QOS_ATTR_MDF_HIGH_Q_MASK	(ACT_QOS_ATTR_MDF_HIGH_Q_MAX << ACT_QOS_ATTR_MDF_HIGH_Q)
+
+#define ACT_QOS_ATTR_Q_MAX		((1 << (ACT_QOS_ATTR_MDF_HIGH_Q_BITS + ACT_QOS_ATTR_MDF_LOW_Q_BITS)) - 1)
+/*hwf_attr*/
+
+#define	ACT_HWF_ATTR_DPTR		1
+#define	ACT_HWF_ATTR_DPTR_BITS		15
+#define	ACT_HWF_ATTR_DPTR_MASK		(((1 << ACT_HWF_ATTR_DPTR_BITS) - 1) << ACT_HWF_ATTR_DPTR)
+#define	ACT_HWF_ATTR_DPTR_MAX		((1 << ACT_HWF_ATTR_DPTR_BITS) - 1)
+
+#define	ACT_HWF_ATTR_IPTR		16
+#define	ACT_HWF_ATTR_IPTR_BITS		8
+#define	ACT_HWF_ATTR_IPTR_MASK		(((1 << ACT_HWF_ATTR_IPTR_BITS) - 1) << ACT_HWF_ATTR_IPTR)
+#define	ACT_HWF_ATTR_IPTR_MAX		((1 << ACT_HWF_ATTR_IPTR_BITS) - 1)
+
+#define	ACT_HWF_ATTR_CHKSM_EN		24
+#define	ACT_HWF_ATTR_CHKSM_EN_MASK	(1 << ACT_HWF_ATTR_CHKSM_EN)
+
+/*
+  PPv2.1 (feature MAS 3.7) new field in action table (c2, c3)
+ */
+#define ACT_HWF_ATTR_MTU_INX		25
+#define ACT_HWF_ATTR_MTU_INX_BITS	4
+#define ACT_HWF_ATTR_MTU_INX_MAX	((1 << ACT_HWF_ATTR_MTU_INX_BITS) - 1)
+#define	ACT_HWF_ATTR_MTU_INX_MASK	((ACT_HWF_ATTR_MTU_INX_MAX) << ACT_HWF_ATTR_MTU_INX)
+
+
+/*MV_U32 dup_attr*/
+#define ACT_DUP_FID			0
+#define ACT_DUP_FID_BITS		8
+#define ACT_DUP_FID_MASK		(((1 << ACT_DUP_FID_BITS) - 1) << ACT_DUP_FID)
+#define ACT_DUP_FID_MAX			((1 << ACT_DUP_FID_BITS) - 1)
+
+
+#define ACT_DUP_COUNT			8
+#define ACT_DUP_COUNT_BITS		4
+#define ACT_DUP_COUNT_MASK		(((1 << ACT_DUP_COUNT_BITS) - 1) << ACT_DUP_COUNT)
+#define ACT_DUP_COUNT_MAX		14
+
+#ifdef CONFIG_MV_ETH_PP2_1
+#define ACT_DUP_POLICER_ID		24
+#define ACT_DUP_POLICER_ID_BITS		5
+#else
+#define ACT_DUP_POLICER_ID		28
+#define ACT_DUP_POLICER_ID_BITS		4
+#endif
+
+#define ACT_DUP_POLICER_MASK		(((1 << ACT_DUP_POLICER_ID_BITS) - 1) << ACT_DUP_POLICER_ID)
+#define ACT_DUP_POLICER_MAX		((1 << ACT_DUP_POLICER_ID_BITS) - 1)
+
+/*only in ppv2.1*/
+#define ACT_DUP_POLICER_BANK_BIT	29
+#define ACT_DUP_POLICER_BANK_MASK	(1 << ACT_DUP_POLICER_BANK_BIT)
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier engines Actions Table offsets	    		 */
+/*-------------------------------------------------------------------------------*/
+
+typedef enum {
+	COLOR_NO_UPDATE = 0,
+	COLOR_NO_UPDATE_AND_LOCK = 1,
+	COLOR_GREEN = 2,
+	COLOR_GREEN_AND_LOCK = 3,
+	COLOR_YELLOW = 4,
+	COLOR_YELLOW_AND_LOCK = 5,
+	COLOR_RED = 6,
+	COLOR_RED_AND_LOCK = 7
+} MV_PP2_CLS_COLOR_CMD;
+
+typedef enum MV_PP2_CLS_HWF_CMD {
+	HWF_NO_UPDATE = 0,
+	HWF_NO_UPDATE_AND_LOCK,
+	SWF,
+	SWF_AND_LOCK,
+	HWF,
+	HWF_AND_LOCK,
+	HWF_AND_LOW_LATENCY,
+	HWF_AND_LOW_LATENCY_AND_LOCK
+} MV_PP2_CLS_HWF_CMD;
+
+typedef enum {
+	NO_UPDATE = 0,
+	NO_UPDATE_AND_LOCK,
+	UPDATE,
+	UPDATE_AND_LOCK
+} MV_PP2_CLS_CMD;
+
+typedef enum {
+	GREEN = 0,
+	YELLOW,
+	RED
+} MV_PP2_CLS_COLOR;
+
+#endif /*__MV_CLS_ACT_HW_H__*/
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.c
new file mode 100644
index 000000000000..ddd078d4cd15
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.c
@@ -0,0 +1,1269 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2ClsHw.h"
+
+/* TODO: chage to char arrays */
+int mvClsLkpShadowTbl[2 * MV_PP2_CLS_LKP_TBL_SIZE];
+int mvClsFlowShadowTbl[MV_PP2_CLS_FLOWS_TBL_SIZE];
+
+
+/******************************************************************************/
+/**************** Classifier Top Public initialization APIs *******************/
+/******************************************************************************/
+int mvPp2ClsInit()
+{
+	int rc;
+	/*TODO - SET MTU */
+	/* Enabled Classifier */
+	rc = mvPp2ClsHwEnable(1);
+	if (rc)
+		return rc;
+	/* clear cls flow table and shadow */
+	mvPp2ClsHwFlowClearAll();
+
+	/* clear cls lookup table and shadow */
+	mvPp2ClsHwLkpClearAll();
+
+	return MV_OK;
+}
+void mvPp2ClsShadowInit()
+{
+	memset(mvClsLkpShadowTbl, NOT_IN_USE, 2 * MV_PP2_CLS_LKP_TBL_SIZE * sizeof(int));
+	memset(mvClsFlowShadowTbl, NOT_IN_USE, MV_PP2_CLS_FLOWS_TBL_SIZE * sizeof(int));
+}
+void mvPp2ClsHwLastBitWorkAround()
+{
+	/* workaround to hw bug - set last bit in flow entry */
+	mvPp2WrReg(MV_PP2_CLS_FLOW_INDEX_REG, 0);
+	mvPp2WrReg(MV_PP2_CLS_FLOW_TBL0_REG, 1);
+}
+
+int mvPp2ClsHwPortDefConfig(int port, int way, int lkpid, int rxq)
+{
+	MV_PP2_CLS_LKP_ENTRY  le;
+
+	mvPp2ClsHwPortWaySet(port, way);
+	/*
+	the entry to be accessed in lookupid decoding table
+	is acording to way and lkpid
+	*/
+	le.way = way;
+	le.lkpid = lkpid;
+	le.data = 0;
+
+	mvPp2ClsSwLkpRxqSet(&le, rxq);
+
+	/* do not use classification engines */
+	mvPp2ClsSwLkpEnSet(&le, 0);
+
+	/* write entry */
+	mvPp2ClsHwLkpWrite(lkpid, way, &le);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsHwEnable(int enable)
+{
+	mvPp2WrReg(MV_PP2_CLS_MODE_REG, (unsigned int)(enable << MV_PP2_CLS_MODE_ACTIVE_BIT));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwPortWaySet(int port, int way)
+{
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS-1);
+	POS_RANGE_VALIDATE(way, ONE_BIT_MAX);
+
+	if (way == 1)
+		MV_REG_BIT_SET(MV_PP2_CLS_PORT_WAY_REG, MV_PP2_CLS_PORT_WAY_MASK(port));
+	else
+		MV_REG_BIT_RESET(MV_PP2_CLS_PORT_WAY_REG, MV_PP2_CLS_PORT_WAY_MASK(port));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwPortSpidSet(int port, int spid)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(port, ETH_PORTS_NUM-1);
+	POS_RANGE_VALIDATE(spid, MV_PP2_CLS_PORT_SPID_MAX);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS_PORT_SPID_REG);
+	regVal &= ~MV_PP2_CLS_PORT_SPID_MASK(port);
+	regVal |=  MV_PP2_CLS_PORT_SPID_VAL(port, spid);
+	mvPp2WrReg(MV_PP2_CLS_PORT_SPID_REG, regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwUniPortSet(int uni_port, int spid)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(uni_port, UNI_MAX);
+	POS_RANGE_VALIDATE(spid, MV_PP2_CLS_SPID_MAX);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS_SPID_UNI_REG(spid));
+	regVal &= ~MV_PP2_CLS_SPID_UNI_MASK(spid);
+	regVal |=  MV_PP2_CLS_SPID_UNI_VAL(spid, uni_port);
+	mvPp2WrReg(MV_PP2_CLS_SPID_UNI_REG(spid), regVal);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwVirtPortSet(int index, int gem_portid)
+{
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_GEM_VIRT_REGS_NUM - 1);
+	POS_RANGE_VALIDATE(gem_portid, MV_PP2_CLS_GEM_VIRT_MAX);
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2WrReg(MV_PP2_CLS_GEM_VIRT_INDEX_REG, index);
+	mvPp2WrReg(MV_PP2_CLS_GEM_VIRT_REG, gem_portid);
+#else
+	mvPp2WrReg(MV_PP2_CLS_GEM_VIRT_REG(index), gem_portid);
+#endif /* CONFIG_MV_ETH_PP2_1 */
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwUdfSet(int udf_no, int offs_id, int offs_bits, int size_bits)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(offs_id, MV_PP2_CLS_UDF_OFFSET_ID_MAX);
+	POS_RANGE_VALIDATE(offs_bits, MV_PP2_CLS_UDF_REL_OFFSET_MAX);
+	POS_RANGE_VALIDATE(size_bits, MV_PP2_CLS_UDF_SIZE_MASK);
+	POS_RANGE_VALIDATE(udf_no, MV_PP2_CLS_UDF_REGS_NUM - 1);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS_UDF_REG(udf_no));
+	regVal &= ~MV_PP2_CLS_UDF_OFFSET_ID_MASK;
+	regVal &= ~MV_PP2_CLS_UDF_REL_OFFSET_MASK;
+	regVal &= ~MV_PP2_CLS_UDF_SIZE_MASK;
+
+	regVal |= (offs_id << MV_PP2_CLS_UDF_OFFSET_ID_OFFS);
+	regVal |= (offs_bits << MV_PP2_CLS_UDF_REL_OFFSET_OFFS);
+	regVal |= (size_bits << MV_PP2_CLS_UDF_SIZE_OFFS);
+
+	mvPp2WrReg(MV_PP2_CLS_UDF_REG(udf_no), regVal);
+
+	return MV_OK;
+}
+#ifdef CONFIG_MV_ETH_PP2_1
+/*-------------------------------------------------------------------------------*/
+/*
+PPv2.1 (feature MAS 3.7) feature update
+Note: this function only set oversize rxq_low value, rxq_high will be set by mvPp2ClsHwRxQueueHighSet
+*/
+int mvPp2ClsHwOversizeRxqLowSet(int port, int rxq)
+{
+	POS_RANGE_VALIDATE(rxq, MV_PP2_CLS_OVERSIZE_RXQ_LOW_MAX);
+
+	/* set oversize rxq */
+	mvPp2WrReg(MV_PP2_CLS_OVERSIZE_RXQ_LOW_REG(port), rxq);
+
+	return MV_OK;
+}
+#else
+/*-------------------------------------------------------------------------------*/
+/*
+Note: this function set oversize rxq including rxq_high and rxq_low for PPv2
+*/
+int mvPp2ClsHwOversizeRxqSet(int port, int rxq)
+{
+
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(rxq, MV_PP2_CLS_OVERSIZE_RXQ_MAX);
+
+	/* set oversize rxq */
+	regVal = mvPp2RdReg(MV_PP2_CLS_OVERSIZE_RXQ_REG(port));
+	regVal &= ~MV_PP2_CLS_OVERSIZE_RX_MASK;
+	regVal |= (rxq << MV_PP2_CLS_OVERSIZE_RXQ_OFFS);
+	mvPp2WrReg(MV_PP2_CLS_OVERSIZE_RXQ_REG(port), regVal);
+
+	return MV_OK;
+}
+#endif
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 feature changed MAS 3.7*/
+int mvPp2V0ClsHwMtuSet(int port, int txp, int mtu)
+{
+	int eport;
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(mtu, MV_PP2_CLS_MTU_MAX);
+
+	if (port == MV_PP2_PON_PORT_ID)  {/*pon*/
+		POS_RANGE_VALIDATE(txp, MV_PP2_MAX_TCONT - 1); /*txq num in pon*/
+		eport = txp; /* regs 0 - 15 for pon txq */
+	} else {
+		POS_RANGE_VALIDATE(port, ETH_PORTS_NUM - 1);
+		eport = 16 + port;/* regs 16 - 23 ethernet port */
+	}
+
+	/* set mtu */
+	regVal = mvPp2RdReg(MV_PP2_CLS_MTU_REG(eport));
+	regVal &= ~MV_PP2_CLS_MTU_MASK;
+	regVal |= (mtu << MV_PP2_CLS_MTU_OFFS);
+	mvPp2WrReg(MV_PP2_CLS_MTU_REG(eport), regVal);
+
+	return MV_OK;
+
+}
+
+int mvPp2V1ClsHwMtuSet(int index, int mtu)
+{
+	POS_RANGE_VALIDATE(mtu, MV_PP2_CLS_MTU_MAX);
+	POS_RANGE_VALIDATE(index, 15 /* define MAX value */);
+
+	/* set mtu */
+	mvPp2WrReg(MV_PP2_CLS_MTU_REG(index), mtu);
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------
+PPv2.1 new feature MAS 3.5
+set high queue -
+	from = 0 : The value of QueueHigh is as defined by the Classifier
+	from = 1 : The value of QueueHigh set to queue
+	None: this function overwite rxq value that set by mvPp2ClsHwOversizeRxSet
+-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwRxQueueHighSet(int port, int from, int queue)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS - 1);
+	POS_RANGE_VALIDATE(from, 1);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS_SWFWD_PCTRL_REG);
+
+	if (from) {
+		POS_RANGE_VALIDATE(queue, MV_PP2_CLS_SWFWD_P2HQ_QUEUE_MASK);
+		mvPp2WrReg(MV_PP2_CLS_SWFWD_P2HQ_REG(port), queue);
+		regVal |= MV_PP2_CLS_SWFWD_PCTRL_MASK(port);
+	} else
+		regVal &= ~MV_PP2_CLS_SWFWD_PCTRL_MASK(port);
+
+	mvPp2WrReg(MV_PP2_CLS_SWFWD_PCTRL_REG, regVal);
+
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------
+PPv2.1 new feature MAS 3.18
+	virtEn: port support/not support generation of virtual portId, not relevant for PON port.
+	uniEn:  port support/not support generation of UNI portId
+	mh: default Marvell header value, used for For UNI and Virtual Port ID generation
+	    in case that ETH port do not support Marvell header, not relevant for PON port.
+-------------------------------------------------------------------------------*/
+int mvPp2ClsHwMhSet(int port, int virtEn, int uniEn, unsigned short mh)
+{
+	unsigned int regVal = 0;
+	int uniDisable = 1 - uniEn;
+	int VirtDisable = 1 - virtEn;
+
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS - 1);
+	BIT_RANGE_VALIDATE(uniEn);
+	BIT_RANGE_VALIDATE(virtEn);
+	POS_RANGE_VALIDATE(mh, MV_PP2_CLS_PCTRL_MH_MASK);
+
+	if (MV_PP2_IS_PON_PORT(port))
+		regVal = uniDisable << MV_PP2_CLS_PCTRL_UNI_EN_OFFS;
+	else
+		regVal = (uniDisable << MV_PP2_CLS_PCTRL_UNI_EN_OFFS) |
+			(VirtDisable << MV_PP2_CLS_PCTRL_VIRT_EN_OFFS) |
+			(mh << MV_PP2_CLS_PCTRL_MH_OFFS);
+
+	mvPp2WrReg(MV_PP2_CLS_PCTRL_REG(port), regVal);
+
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsHwSeqInstrSizeSet(int index, int size)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_SEQ_INDEX_MAX);
+	POS_RANGE_VALIDATE(size, MV_PP2_CLS_SEQ_SIZE_MAX);
+
+	regVal = mvPp2RdReg(MV_PP2_CLS_SEQ_SIZE_REG);
+	regVal &= ~MV_PP2_CLS_SEQ_SIZE_MASK(index);
+	regVal |= MV_PP2_CLS_SEQ_SIZE_VAL(index, size);
+	mvPp2WrReg(MV_PP2_CLS_SEQ_SIZE_REG, regVal);
+
+	return MV_OK;
+}
+
+/******************************************************************************/
+/***************** Classifier Top Public lkpid table APIs ********************/
+/******************************************************************************/
+
+int mvPp2ClsHwLkpWrite(int lkpid, int way, MV_PP2_CLS_LKP_ENTRY *fe)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(fe);
+
+	BIT_RANGE_VALIDATE(way);
+	POS_RANGE_VALIDATE(lkpid, MV_PP2_CLS_LKP_TBL_SIZE);
+
+	/* write index reg */
+	regVal = (way << MV_PP2_CLS_LKP_INDEX_WAY_OFFS) | (lkpid << MV_PP2_CLS_LKP_INDEX_LKP_OFFS);
+	mvPp2WrReg(MV_PP2_CLS_LKP_INDEX_REG, regVal);
+
+	/* write flowId reg */
+	mvPp2WrReg(MV_PP2_CLS_LKP_TBL_REG, fe->data);
+
+	/* update shadow */
+	mvClsLkpShadowTbl[regVal] = IN_USE;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwLkpRead(int lkpid, int way, MV_PP2_CLS_LKP_ENTRY *fe)
+{
+	unsigned int regVal = 0;
+
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(way, WAY_MAX);
+	POS_RANGE_VALIDATE(lkpid, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	/* write index reg */
+	regVal = (way << MV_PP2_CLS_LKP_INDEX_WAY_OFFS) | (lkpid << MV_PP2_CLS_LKP_INDEX_LKP_OFFS);
+	mvPp2WrReg(MV_PP2_CLS_LKP_INDEX_REG, regVal);
+
+	fe->way = way;
+	fe->lkpid = lkpid;
+
+	fe->data = mvPp2RdReg(MV_PP2_CLS_LKP_TBL_REG);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwLkpClear(int lkpid, int way)
+{
+	unsigned int regVal = 0;
+	MV_PP2_CLS_LKP_ENTRY fe;
+
+	POS_RANGE_VALIDATE(lkpid, MV_PP2_CLS_LKP_TBL_SIZE);
+	BIT_RANGE_VALIDATE(way);
+
+	/* clear entry */
+	mvPp2ClsSwLkpClear(&fe);
+	mvPp2ClsHwLkpWrite(lkpid, way, &fe);
+
+	/* clear shadow */
+	regVal = (way << MV_PP2_CLS_LKP_INDEX_WAY_OFFS) | (lkpid << MV_PP2_CLS_LKP_INDEX_LKP_OFFS);
+	mvClsLkpShadowTbl[regVal] = NOT_IN_USE;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpDump(MV_PP2_CLS_LKP_ENTRY *fe)
+{
+	int int32bit;
+	int status = 0;
+
+	PTR_VALIDATE(fe);
+
+	mvOsPrintf("< ID  WAY >:	RXQ  	EN	FLOW	MODE_BASE\n");
+
+	/* id */
+	mvOsPrintf(" 0x%2.2x  %1.1d\t", fe->lkpid, fe->way);
+
+	/*rxq*/
+	status |= mvPp2ClsSwLkpRxqGet(fe, &int32bit);
+	mvOsPrintf("0x%2.2x\t", int32bit);
+
+	/*enabe bit*/
+	status |= mvPp2ClsSwLkpEnGet(fe, &int32bit);
+	mvOsPrintf("%1.1d\t", int32bit);
+
+	/*flow*/
+	status |= mvPp2ClsSwLkpFlowGet(fe, &int32bit);
+	mvOsPrintf("0x%3.3x\t", int32bit);
+
+	/*mode*/
+	status |= mvPp2ClsSwLkpModGet(fe, &int32bit);
+	mvOsPrintf(" 0x%2.2x\t", int32bit);
+
+	mvOsPrintf("\n");
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpRxqSet(MV_PP2_CLS_LKP_ENTRY *fe, int rxq)
+{
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(rxq, MV_PP2_MAX_RXQS_TOTAL-1);
+
+	fe->data &= ~FLOWID_RXQ_MASK;
+	fe->data |= (rxq << FLOWID_RXQ);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpEnSet(MV_PP2_CLS_LKP_ENTRY *fe, int en)
+{
+	PTR_VALIDATE(fe);
+
+	BIT_RANGE_VALIDATE(en);
+
+	fe->data &= ~FLOWID_EN_MASK;
+	fe->data |= (en << FLOWID_EN);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpFlowSet(MV_PP2_CLS_LKP_ENTRY *fe, int flow_idx)
+{
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(flow_idx, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	fe->data &= ~FLOWID_FLOW_MASK;
+	fe->data |= (flow_idx << FLOWID_FLOW);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpModSet(MV_PP2_CLS_LKP_ENTRY *fe, int mod_base)
+{
+	PTR_VALIDATE(fe);
+	/* TODO: what is the max value of mode base */
+	POS_RANGE_VALIDATE(mod_base, FLOWID_MODE_MAX);
+
+	fe->data &= ~FLOWID_MODE_MASK;
+	fe->data |= (mod_base << FLOWID_MODE);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpRxqGet(MV_PP2_CLS_LKP_ENTRY *fe, int *rxq)
+{
+
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(rxq);
+
+	*rxq =  (fe->data & FLOWID_RXQ_MASK) >> FLOWID_RXQ;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpEnGet(MV_PP2_CLS_LKP_ENTRY *fe, int *en)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(en);
+
+	*en = (fe->data & FLOWID_EN_MASK) >> FLOWID_EN;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpFlowGet(MV_PP2_CLS_LKP_ENTRY *fe, int *flow_idx)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(flow_idx);
+
+	*flow_idx = (fe->data & FLOWID_FLOW_MASK) >> FLOWID_FLOW;
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwLkpModGet(MV_PP2_CLS_LKP_ENTRY *fe, int *mod_base)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(mod_base);
+
+	*mod_base = (fe->data & FLOWID_MODE_MASK) >> FLOWID_MODE;
+	return MV_OK;
+}
+
+/******************************************************************************/
+/***************** Classifier Top Public flows table APIs  ********************/
+/******************************************************************************/
+
+
+int mvPp2ClsHwFlowWrite(int index, MV_PP2_CLS_FLOW_ENTRY *fe)
+{
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	fe->index = index;
+
+	/*write index*/
+	mvPp2WrReg(MV_PP2_CLS_FLOW_INDEX_REG, index);
+
+	mvPp2WrReg(MV_PP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+	mvPp2WrReg(MV_PP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+	mvPp2WrReg(MV_PP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+
+	/* update shadow */
+	mvClsFlowShadowTbl[index] = IN_USE;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+
+int mvPp2ClsHwFlowRead(int index, MV_PP2_CLS_FLOW_ENTRY *fe)
+{
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	fe->index = index;
+
+	/*write index*/
+	mvPp2WrReg(MV_PP2_CLS_FLOW_INDEX_REG, index);
+
+	fe->data[0] = mvPp2RdReg(MV_PP2_CLS_FLOW_TBL0_REG);
+	fe->data[1] = mvPp2RdReg(MV_PP2_CLS_FLOW_TBL1_REG);
+	fe->data[2] = mvPp2RdReg(MV_PP2_CLS_FLOW_TBL2_REG);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwFlowClear(int index)
+{
+	MV_PP2_CLS_FLOW_ENTRY fe;
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	/* Clear flow entry */
+	mvPp2ClsSwFlowClear(&fe);
+	mvPp2ClsHwFlowWrite(index, &fe);
+
+	/* clear shadow */
+	mvClsFlowShadowTbl[index] = NOT_IN_USE;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowDump(MV_PP2_CLS_FLOW_ENTRY *fe)
+{
+	int	int32bit_1, int32bit_2, i;
+	int	fieldsArr[MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX];
+	int	status = MV_OK;
+
+	PTR_VALIDATE(fe);
+	mvOsPrintf("INDEX: F[0] F[1] F[2] F[3] PRT[T  ID] ENG LAST LKP_TYP  PRIO\n");
+
+	/*index*/
+	mvOsPrintf("0x%3.3x  ", fe->index);
+
+	/*filed[0] filed[1] filed[2] filed[3]*/
+	status |= mvPp2ClsSwFlowHekGet(fe, &int32bit_1, fieldsArr);
+
+	for (i = 0 ; i < MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX; i++)
+		if (i < int32bit_1)
+			mvOsPrintf("0x%2.2x ", fieldsArr[i]);
+		else
+			mvOsPrintf(" NA  ");
+
+	/*port_type port_id*/
+	status |= mvPp2ClsSwFlowPortGet(fe, &int32bit_1, &int32bit_2);
+	mvOsPrintf("[%1d  0x%3.3x]  ", int32bit_1, int32bit_2);
+
+	/* engine_num last_bit*/
+	status |= mvPp2ClsSwFlowEngineGet(fe, &int32bit_1, &int32bit_2);
+	mvOsPrintf("%1d   %1d    ", int32bit_1, int32bit_2);
+
+	/* lookup_type priority*/
+	status |= mvPp2ClsSwFlowExtraGet(fe, &int32bit_1, &int32bit_2);
+	mvOsPrintf("0x%2.2x    0x%2.2x", int32bit_1, int32bit_2);
+
+	mvOsPrintf("\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvOsPrintf("\n");
+	mvOsPrintf("       PPPEO   VLAN   MACME   UDF7   SELECT SEQ_CTRL\n");
+	mvOsPrintf("         %1d      %1d      %1d       %1d      %1d      %1d\n",
+			(fe->data[0] & FLOW_PPPOE_MASK) >> FLOW_PPPOE,
+			(fe->data[0] & FLOW_VLAN_MASK) >> FLOW_VLAN,
+			(fe->data[0] & FLOW_MACME_MASK) >> FLOW_MACME,
+			(fe->data[0] & FLOW_UDF7_MASK) >> FLOW_UDF7,
+			(fe->data[0] & FLOW_PORT_ID_SEL_MASK) >> FLOW_PORT_ID_SEL,
+			(fe->data[1] & FLOW_SEQ_CTRL_MASK) >> FLOW_SEQ_CTRL);
+	mvOsPrintf("\n");
+
+#endif
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsSwFlowHekSet(MV_PP2_CLS_FLOW_ENTRY *fe, int field_index, int field_id)
+{
+	int num_of_fields;
+
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(field_index, MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX);
+	POS_RANGE_VALIDATE(field_id, FLOW_FIELED_MAX);
+
+	/* get current num_of_fields */
+	num_of_fields = ((fe->data[1] & FLOW_FIELDS_NUM_MASK) >> FLOW_FIELDS_NUM) ;
+
+	if (num_of_fields < (field_index+1)) {
+		mvOsPrintf("%s: number of heks = %d , index (%d) is out of range.\n", __func__, num_of_fields, field_index);
+		return MV_CLS_OUT_OF_RAGE;
+	}
+
+	fe->data[2] &= ~FLOW_FIELED_MASK(field_index);
+	fe->data[2] |= (field_id <<  FLOW_FIELED_ID(field_index));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowHekNumSet(MV_PP2_CLS_FLOW_ENTRY *fe, int num_of_fields)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(num_of_fields, MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX);
+
+	fe->data[1] &= ~FLOW_FIELDS_NUM_MASK;
+	fe->data[1] |= (num_of_fields << FLOW_FIELDS_NUM);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------
+
+int mvPp2ClsSwFlowHekSet(MV_PP2_CLS_FLOW_ENTRY *fe, int num_of_fields, int field_ids[])
+{
+	int index;
+
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(field_ids);
+
+	POS_RANGE_VALIDATE(num_of_fields, MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX);
+
+	fe->data[1] &= ~FLOW_FIELDS_NUM_MASK;
+	fe->data[1] |= (num_of_fields << FLOW_FIELDS_NUM);
+
+	for (index = 0; index < num_of_fields; index++) {
+		POS_RANGE_VALIDATE(field_ids[index], FLOW_FIELED_MAX);
+		fe->data[2] &= ~FLOW_FIELED_MASK(index);
+		fe->data[2] |= (field_ids[index] <<  FLOW_FIELED_ID(index));
+	}
+
+	return MV_OK;
+}
+-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowPortSet(MV_PP2_CLS_FLOW_ENTRY *fe, int type, int portid)
+{
+	PTR_VALIDATE(fe);
+
+	POS_RANGE_VALIDATE(type, FLOW_PORT_TYPE_MAX);
+	POS_RANGE_VALIDATE(portid, FLOW_PORT_ID_MAX);
+
+	fe->data[0] &= ~FLOW_PORT_ID_MASK;
+	fe->data[0] &= ~FLOW_PORT_TYPE_MASK;
+
+	fe->data[0] |= (portid << FLOW_PORT_ID);
+	fe->data[0] |= (type << FLOW_PORT_TYPE);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwPortIdSelect(MV_PP2_CLS_FLOW_ENTRY *fe, int from)
+{
+	PTR_VALIDATE(fe);
+	BIT_RANGE_VALIDATE(from);
+
+	if (from)
+		fe->data[0] |= FLOW_PORT_ID_SEL_MASK;
+	else
+		fe->data[0] &= ~FLOW_PORT_ID_SEL_MASK;
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowPppoeSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(mode, FLOW_PPPOE_MAX);
+
+	fe->data[0] &= ~FLOW_PPPOE_MASK;
+	fe->data[0] |= (mode << FLOW_PPPOE);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowVlanSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(mode, FLOW_VLAN_MAX);
+
+	fe->data[0] &= ~FLOW_VLAN_MASK;
+	fe->data[0] |= (mode << FLOW_VLAN);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowMacMeSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(mode, FLOW_MACME_MAX);
+
+	fe->data[0] &= ~FLOW_MACME_MASK;
+	fe->data[0] |= (mode << FLOW_MACME);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowUdf7Set(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(mode, FLOW_UDF7_MAX);
+
+	fe->data[0] &= ~FLOW_UDF7_MASK;
+	fe->data[0] |= (mode << FLOW_UDF7);
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsSwFlowEngineSet(MV_PP2_CLS_FLOW_ENTRY *fe, int engine, int is_last)
+{
+	PTR_VALIDATE(fe);
+
+	BIT_RANGE_VALIDATE(is_last);
+	POS_RANGE_VALIDATE(engine, FLOW_ENGINE_MAX);
+
+	fe->data[0] &= ~FLOW_LAST_MASK;
+	fe->data[0] &= ~FLOW_ENGINE_MASK;
+
+	fe->data[0] |= is_last;
+	fe->data[0] |= (engine << FLOW_ENGINE);
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsSwFlowSeqCtrlSet(MV_PP2_CLS_FLOW_ENTRY *fe, int mode)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(mode, FLOW_ENGINE_MAX);
+
+	fe->data[1] &= ~FLOW_SEQ_CTRL_MASK;
+	fe->data[1] |= (mode << FLOW_SEQ_CTRL);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowExtraSet(MV_PP2_CLS_FLOW_ENTRY *fe, int type, int prio)
+{
+	PTR_VALIDATE(fe);
+	POS_RANGE_VALIDATE(type, FLOW_PORT_ID_MAX);
+	POS_RANGE_VALIDATE(prio, FLOW_FIELED_MAX);
+
+	fe->data[1] &= ~FLOW_LKP_TYPE_MASK;
+	fe->data[1] |= (type << FLOW_LKP_TYPE);
+
+	fe->data[1] &= ~FLOW_FIELED_PRIO_MASK;
+	fe->data[1] |= (prio << FLOW_FIELED_PRIO);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowHekGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *num_of_fields, int field_ids[])
+{
+	int index;
+
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(num_of_fields);
+	PTR_VALIDATE(field_ids);
+
+	*num_of_fields = (fe->data[1] & FLOW_FIELDS_NUM_MASK) >> FLOW_FIELDS_NUM;
+
+
+	for (index = 0; index < (*num_of_fields); index++)
+		field_ids[index] = ((fe->data[2] & FLOW_FIELED_MASK(index)) >>  FLOW_FIELED_ID(index));
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowPortGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *type, int *portid)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(type);
+	PTR_VALIDATE(portid);
+
+	*type = (fe->data[0] & FLOW_PORT_TYPE_MASK) >> FLOW_PORT_TYPE;
+	*portid = (fe->data[0] & FLOW_PORT_ID_MASK) >> FLOW_PORT_ID;
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowEngineGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *engine, int *is_last)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(engine);
+	PTR_VALIDATE(is_last);
+
+	*engine = (fe->data[0] & FLOW_ENGINE_MASK) >> FLOW_ENGINE;
+	*is_last = fe->data[0] & FLOW_LAST_MASK;
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsSwFlowExtraGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *type, int *prio)
+{
+	PTR_VALIDATE(fe);
+	PTR_VALIDATE(type);
+	PTR_VALIDATE(prio);
+
+	*type = (fe->data[1] & FLOW_LKP_TYPE_MASK) >> FLOW_LKP_TYPE;
+	*prio = (fe->data[1] & FLOW_FIELED_PRIO_MASK) >> FLOW_FIELED_PRIO;
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*	Classifier Top Public length change table APIs   			 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsPktLenChangeWrite(int index, unsigned int data)
+{
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_LEN_CHANGE_TBL_SIZE);
+#ifdef CONFIG_MV_ETH_PP2_1
+	/*write index*/
+	mvPp2WrReg(MV_PP2_V1_CLS_LEN_CHANGE_INDEX_REG, index);
+
+	mvPp2WrReg(MV_PP2_V1_CLS_LEN_CHANGE_TBL_REG, data);
+#else
+	mvPp2WrReg(MV_PP2_V0_CLS_LEN_CHANGE_INDEX_REG, index);
+
+	mvPp2WrReg(MV_PP2_V0_CLS_LEN_CHANGE_TBL_REG, data);
+#endif
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsPktLenChangeRead(int index, unsigned int *data)
+{
+	PTR_VALIDATE(data);
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_LEN_CHANGE_TBL_SIZE);
+#ifdef CONFIG_MV_ETH_PP2_1
+	/*write index*/
+	mvPp2WrReg(MV_PP2_V1_CLS_LEN_CHANGE_INDEX_REG, index);
+
+	*data = mvPp2RdReg(MV_PP2_V1_CLS_LEN_CHANGE_TBL_REG);
+#else
+	/*write index*/
+	mvPp2WrReg(MV_PP2_V0_CLS_LEN_CHANGE_INDEX_REG, index);
+
+	*data = mvPp2RdReg(MV_PP2_V0_CLS_LEN_CHANGE_TBL_REG);
+#endif
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsPktLenChangeDump()
+{
+	/* print all non-zero length change entries */
+	int index, int32bit_1;
+
+	mvOsPrintf("INDEX:\tLENGHT\n");
+
+	for (index = 0 ; index <= LEN_CHANGE_LENGTH_MAX; index++) {
+		/* read entry */
+		mvPp2ClsPktLenChangeGet(index, &int32bit_1);
+
+		if (int32bit_1 != 0)
+			mvOsPrintf("0x%3.3x\t%d\n", index, int32bit_1);
+	}
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsPktLenChangeSet(int index, int length)
+{
+	unsigned int dec = 0, data = 0;
+
+	DECIMAL_RANGE_VALIDATE(length, (0 - LEN_CHANGE_LENGTH_MAX), LEN_CHANGE_LENGTH_MAX);
+
+	if (length < 0) {
+		dec =  1;
+		length = 0 - length;
+	}
+
+	data &= ~(LEN_CHANGE_DEC_MASK | LEN_CHANGE_LENGTH_MASK);
+	data |= ((dec << LEN_CHANGE_DEC) |  (length << LEN_CHANGE_LENGTH));
+
+	mvPp2ClsPktLenChangeWrite(index, data);
+
+	return MV_OK;
+}
+
+int mvPp2ClsPktLenChangeGet(int index, int *length)
+{
+	unsigned int data, dec;
+
+	PTR_VALIDATE(length);
+
+	/* read HW entry */
+	mvPp2ClsPktLenChangeRead(index, &data);
+
+	dec = ((data & LEN_CHANGE_DEC_MASK) >> LEN_CHANGE_DEC);
+	*length = ((data & LEN_CHANGE_LENGTH_MASK) >> LEN_CHANGE_LENGTH);
+
+	if (dec == 1)
+		*length = (*length) * (-1);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*			additional cls debug APIs				 */
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2ClsHwRegsDump()
+{
+	int i = 0;
+	char reg_name[100];
+
+	mvPp2PrintReg(MV_PP2_CLS_MODE_REG, "MV_PP2_CLS_MODE_REG");
+	mvPp2PrintReg(MV_PP2_CLS_PORT_WAY_REG, "MV_PP2_CLS_PORT_WAY_REG");
+	mvPp2PrintReg(MV_PP2_CLS_LKP_INDEX_REG, "MV_PP2_CLS_LKP_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_CLS_LKP_TBL_REG, "MV_PP2_CLS_LKP_TBL_REG");
+	mvPp2PrintReg(MV_PP2_CLS_FLOW_INDEX_REG, "MV_PP2_CLS_FLOW_INDEX_REG");
+
+	mvPp2PrintReg(MV_PP2_CLS_FLOW_TBL0_REG, "MV_PP2_CLS_FLOW_TBL0_REG");
+	mvPp2PrintReg(MV_PP2_CLS_FLOW_TBL1_REG, "MV_PP2_CLS_FLOW_TBL1_REG");
+	mvPp2PrintReg(MV_PP2_CLS_FLOW_TBL2_REG, "MV_PP2_CLS_FLOW_TBL2_REG");
+
+
+	mvPp2PrintReg(MV_PP2_CLS_PORT_SPID_REG, "MV_PP2_CLS_PORT_SPID_REG");
+
+	for (i = 0; i < MV_PP2_CLS_SPID_UNI_REGS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_SPID_UNI_%d_REG", i);
+		mvPp2PrintReg((MV_PP2_CLS_SPID_UNI_BASE_REG + (4 * i)), reg_name);
+	}
+#ifdef CONFIG_MV_ETH_PP2_1
+	for (i = 0; i < MV_PP2_CLS_GEM_VIRT_REGS_NUM; i++) {
+		/* indirect access */
+		mvPp2WrReg(MV_PP2_CLS_GEM_VIRT_INDEX_REG, i);
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_GEM_VIRT_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_GEM_VIRT_REG, reg_name);
+	}
+#else
+	for (i = 0; i < MV_PP2_CLS_GEM_VIRT_REGS_NUM; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_GEM_VIRT_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_GEM_VIRT_REG(i), reg_name);
+	}
+#endif
+	for (i = 0; i < MV_PP2_CLS_UDF_BASE_REGS; i++)	{
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_UDF_REG_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_UDF_REG(i), reg_name);
+	}
+#ifdef CONFIG_MV_ETH_PP2_1
+	for (i = 0; i < 16; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_MTU_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_MTU_REG(i), reg_name);
+	}
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_OVER_RXQ_LOW_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_OVERSIZE_RXQ_LOW_REG(i), reg_name);
+	}
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_SWFWD_P2HQ_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_SWFWD_P2HQ_REG(i), reg_name);
+	}
+
+	mvPp2PrintReg(MV_PP2_CLS_SWFWD_PCTRL_REG, "MV_PP2_CLS_SWFWD_PCTRL_REG");
+	mvPp2PrintReg(MV_PP2_CLS_SEQ_SIZE_REG, "MV_PP2_CLS_SEQ_SIZE_REG");
+
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_PCTRL_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_PCTRL_REG(i), reg_name);
+	}
+#else
+	for (i = 0; i < (MV_PP2_MAX_TCONT + MV_PP2_MAX_PORTS - 1); i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_MTU_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_MTU_REG(i), reg_name);
+	}
+
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_CLS_OVER_RXQ_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_CLS_OVERSIZE_RXQ_REG(i), reg_name);
+	}
+#endif
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsSwLkpClear(MV_PP2_CLS_LKP_ENTRY *fe)
+{
+	memset(fe, 0, sizeof(MV_PP2_CLS_LKP_ENTRY));
+}
+
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsSwFlowClear(MV_PP2_CLS_FLOW_ENTRY *fe)
+{
+	memset(fe, 0, sizeof(MV_PP2_CLS_FLOW_ENTRY));
+}
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsHwFlowClearAll()
+{
+	int index;
+
+	MV_PP2_CLS_FLOW_ENTRY fe;
+
+	mvPp2ClsSwFlowClear(&fe);
+
+	for (index = 0; index < MV_PP2_CLS_FLOWS_TBL_SIZE ; index++)
+		mvPp2ClsHwFlowWrite(index, &fe);
+
+	/* clear shadow */
+	memset(mvClsFlowShadowTbl, NOT_IN_USE, MV_PP2_CLS_FLOWS_TBL_SIZE * sizeof(int));
+
+}
+/*-------------------------------------------------------------------------------*/
+static int mvPp2V1ClsHwFlowHitGet(int index,  unsigned int *cnt)
+{
+
+	POS_RANGE_VALIDATE(index, MV_PP2_CLS_FLOWS_TBL_SIZE);
+
+	/*set index */
+	mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, MV_PP2_V1_CNT_IDX_FLOW(index));
+
+	if (cnt)
+		*cnt = mvPp2RdReg(MV_PP2_V1_CLS_FLOW_TBL_HIT_REG);
+	else
+		mvOsPrintf("HITS = %d\n", mvPp2RdReg(MV_PP2_V1_CLS_FLOW_TBL_HIT_REG));
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2V1ClsHwLkpHitGet(int lkpid, int way,  unsigned int *cnt)
+{
+
+	BIT_RANGE_VALIDATE(way);
+	POS_RANGE_VALIDATE(lkpid, MV_PP2_CLS_LKP_TBL_SIZE);
+
+	/*set index */
+	mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, MV_PP2_V1_CNT_IDX_LKP(lkpid, way));
+
+	if (cnt)
+		*cnt = mvPp2RdReg(MV_PP2_V1_CLS_LKP_TBL_HIT_REG);
+	else
+		mvOsPrintf("HITS: %d\n", mvPp2RdReg(MV_PP2_V1_CLS_LKP_TBL_HIT_REG));
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsHwFlowDump()
+{
+	int index;
+
+	MV_PP2_CLS_FLOW_ENTRY fe;
+
+	for (index = 0; index < MV_PP2_CLS_FLOWS_TBL_SIZE ; index++) {
+		if (mvClsFlowShadowTbl[index] == IN_USE) {
+			mvPp2ClsHwFlowRead(index, &fe);
+			mvPp2ClsSwFlowDump(&fe);
+#ifdef CONFIG_MV_ETH_PP2_1
+			mvPp2V1ClsHwFlowHitGet(index, NULL);
+#endif
+			mvOsPrintf("------------------------------------------------------------------\n");
+		}
+	}
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new counters MAS 3.20*/
+int mvPp2V1ClsHwFlowHitsDump()
+{
+	int index;
+	unsigned int cnt;
+	MV_PP2_CLS_FLOW_ENTRY fe;
+
+	for (index = 0; index < MV_PP2_CLS_FLOWS_TBL_SIZE ; index++) {
+		if (mvClsFlowShadowTbl[index] == IN_USE) {
+			mvPp2V1ClsHwFlowHitGet(index, &cnt);
+			if (cnt != 0) {
+				mvPp2ClsHwFlowRead(index, &fe);
+				mvPp2ClsSwFlowDump(&fe);
+				mvOsPrintf("HITS = %d\n", cnt);
+				mvOsPrintf("\n");
+			}
+		}
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+void mvPp2ClsHwLkpClearAll()
+{
+	int index;
+
+	MV_PP2_CLS_LKP_ENTRY fe;
+
+	mvPp2ClsSwLkpClear(&fe);
+
+	for (index = 0; index < MV_PP2_CLS_LKP_TBL_SIZE ; index++) {
+		mvPp2ClsHwLkpWrite(index, 0, &fe);
+		mvPp2ClsHwLkpWrite(index, 1, &fe);
+	}
+	/* clear shadow */
+	memset(mvClsLkpShadowTbl, NOT_IN_USE, 2 * MV_PP2_CLS_LKP_TBL_SIZE * sizeof(int));
+
+}
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new counters MAS 3.20*/
+int mvPp2V1ClsHwLkpHitsDump()
+{
+	int index, way, entryInd;
+	unsigned int cnt;
+
+	mvOsPrintf("< ID  WAY >:	HITS\n");
+	for (index = 0; index < MV_PP2_CLS_LKP_TBL_SIZE ; index++)
+		for (way = 0; way < 2 ; way++)	{
+			entryInd = (way << MV_PP2_CLS_LKP_INDEX_WAY_OFFS) | index;
+			if (mvClsLkpShadowTbl[entryInd] == IN_USE) {
+				mvPp2V1ClsHwLkpHitGet(index, way,  &cnt);
+				if (cnt != 0)
+					mvOsPrintf(" 0x%2.2x  %1.1d\t0x%8.8x\n", index, way, cnt);
+			}
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new counters MAS 3.20*/
+int mvPp2ClsHwLkpDump()
+{
+	int index, way, int32bit, ind;
+
+	MV_PP2_CLS_LKP_ENTRY fe;
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvOsPrintf("< ID  WAY >:	RXQ	EN	FLOW	MODE_BASE  HITS\n");
+#else
+	mvOsPrintf("< ID  WAY >:	RXQ	EN	FLOW	MODE_BASE\n");
+#endif
+	for (index = 0; index < MV_PP2_CLS_LKP_TBL_SIZE ; index++)
+		for (way = 0; way < 2 ; way++)	{
+			ind = (way << MV_PP2_CLS_LKP_INDEX_WAY_OFFS) | index;
+			if (mvClsLkpShadowTbl[ind] == IN_USE) {
+				mvPp2ClsHwLkpRead(index, way, &fe);
+				mvOsPrintf(" 0x%2.2x  %1.1d\t", fe.lkpid, fe.way);
+				mvPp2ClsSwLkpRxqGet(&fe, &int32bit);
+				mvOsPrintf("0x%2.2x\t", int32bit);
+				mvPp2ClsSwLkpEnGet(&fe, &int32bit);
+				mvOsPrintf("%1.1d\t", int32bit);
+				mvPp2ClsSwLkpFlowGet(&fe, &int32bit);
+				mvOsPrintf("0x%3.3x\t", int32bit);
+				mvPp2ClsSwLkpModGet(&fe, &int32bit);
+				mvOsPrintf(" 0x%2.2x\t", int32bit);
+#ifdef CONFIG_MV_ETH_PP2_1
+				mvPp2V1ClsHwLkpHitGet(index, way, &int32bit);
+				mvOsPrintf(" 0x%8.8x\n", int32bit);
+#endif
+				mvOsPrintf("\n");
+
+			}
+		}
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.h
new file mode 100644
index 000000000000..5d5f40a90a8c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsHw.h
@@ -0,0 +1,520 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS_HW_H__
+#define __MV_CLS_HW_H__
+
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+#include "../gbe/mvPp2Gbe.h"
+/*-------------------------------------------------------------------------------*/
+/*			Classifier Top Registers	    			 */
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_MODE_REG			(MV_PP2_REG_BASE + 0x1800)
+
+#define MV_PP2_CLS_MODE_ACTIVE_BIT		0
+#define MV_PP2_CLS_MODE_ACTIVE_MASK		(1 << MV_PP2_CLS_MODE_ACTIVE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_PORT_WAY_REG			(MV_PP2_REG_BASE + 0x1810)
+
+#define MV_PP2_CLS_PORT_WAY_OFFS		0
+#define MV_PP2_CLS_PORT_WAY_MASK(port)		(1 << ((port) + MV_PP2_CLS_PORT_WAY_OFFS))
+#define WAY_MAX					1
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_LKP_INDEX_REG		(MV_PP2_REG_BASE + 0x1814)
+
+#define MV_PP2_CLS_LKP_INDEX_LKP_OFFS		0
+#define MV_PP2_CLS_LKP_INDEX_WAY_OFFS		6
+#define MV_PP2_CLS_LKP_INDEX_BITS		7
+#define MV_PP2_CLS_LKP_INDEX_MASK		((1 << MV_PP2_CLS_LKP_INDEX_BITS) - 1)
+#define MV_PP2_CLS_LKP_WAY_MASK			(1 << MV_PP2_CLS_LKP_INDEX_WAY_OFFS)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_LKP_TBL_REG			(MV_PP2_REG_BASE + 0x1818)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_FLOW_INDEX_REG		(MV_PP2_REG_BASE + 0x1820)
+
+#define MV_PP2_CLS_FLOW_INDEX_BITS		9
+#define MV_PP2_CLS_FLOW_INDEX_MASK		((1 << MV_PP2_CLS_FLOW_INDEX_BITS) - 1)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_FLOW_TBL0_REG		(MV_PP2_REG_BASE + 0x1824)
+#define MV_PP2_CLS_FLOW_TBL1_REG		(MV_PP2_REG_BASE + 0x1828)
+#define MV_PP2_CLS_FLOW_TBL2_REG		(MV_PP2_REG_BASE + 0x182c)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_PORT_SPID_REG		(MV_PP2_REG_BASE + 0x1830)
+
+#define MV_PP2_CLS_PORT_SPID_BITS		2
+#define MV_PP2_CLS_PORT_SPID_MAX		((1 << MV_PP2_CLS_PORT_SPID_BITS) - 1)
+#define MV_PP2_CLS_PORT_SPID_MASK(port)		((MV_PP2_CLS_PORT_SPID_MAX) << ((port) * MV_PP2_CLS_PORT_SPID_BITS))
+#define MV_PP2_CLS_PORT_SPID_VAL(port, val)	((val) << ((port) * MV_PP2_CLS_PORT_SPID_BITS));
+
+/* PORT - SPID types */
+#define PORT_SPID_MH				0
+#define PORT_SPID_EXT_SWITCH			1
+#define PORT_SPID_CAS_SWITCH			2
+#define PORT_SPID_PORT_TRUNK			3
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_SPID_UNI_BASE_REG		(MV_PP2_REG_BASE + 0x1840)
+#define MV_PP2_CLS_SPID_UNI_REG(spid)		(MV_PP2_CLS_SPID_UNI_BASE_REG + (((spid) >> 3) * 4))
+
+#define MV_PP2_CLS_SPID_MAX			31
+#define MV_PP2_CLS_SPID_UNI_REGS		4
+#define MV_PP2_CLS_SPID_UNI_BITS		3
+#define MV_PP2_CLS_SPID_UNI_FIXED_BITS		4
+#define MV_PP2_CLS_SPID_UNI_MAX			((1 << MV_PP2_CLS_SPID_UNI_BITS) - 1)
+#define MV_PP2_CLS_SPID_UNI_OFFS(spid)		(((spid) % 8) * MV_PP2_CLS_SPID_UNI_FIXED_BITS)
+#define MV_PP2_CLS_SPID_UNI_MASK(spid)		((MV_PP2_CLS_SPID_UNI_MAX) << (MV_PP2_CLS_SPID_UNI_OFFS(spid)))
+#define MV_PP2_CLS_SPID_UNI_VAL(spid, val)	((val) << (MV_PP2_CLS_SPID_UNI_OFFS(spid)))
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_GEM_VIRT_INDEX_REG		(MV_PP2_REG_BASE + 0x1A00)
+#define MV_PP2_CLS_GEM_VIRT_INDEX_BITS		(7)
+#define MV_PP2_CLS_GEM_VIRT_INDEX_MAX		(((1 << MV_PP2_CLS_GEM_VIRT_INDEX_BITS) - 1) << 0)
+/*-------------------------------------------------------------------------------*/
+#ifdef CONFIG_MV_ETH_PP2_1
+/* indirect rd/wr via index GEM_VIRT_INDEX */
+#define MV_PP2_CLS_GEM_VIRT_REGS_NUM		128
+#define MV_PP2_CLS_GEM_VIRT_REG			(MV_PP2_REG_BASE + 0x1A04)
+#else
+/* direct rd/wr */
+#define MV_PP2_CLS_GEM_VIRT_REGS_NUM		64
+#define MV_PP2_CLS_GEM_VIRT_BASE_REG		(MV_PP2_REG_BASE + 0x1A00)
+#define MV_PP2_CLS_GEM_VIRT_REG(index)		(MV_PP2_CLS_GEM_VIRT_BASE_REG + ((index) * 4))
+#endif
+
+#define MV_PP2_CLS_GEM_VIRT_BITS		12
+#define MV_PP2_CLS_GEM_VIRT_MAX			((1 << MV_PP2_CLS_GEM_VIRT_BITS) - 1)
+#define MV_PP2_CLS_GEM_VIRT_MASK		(((1 << MV_PP2_CLS_GEM_VIRT_BITS) - 1) << 0)
+
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_UDF_BASE_REG			(MV_PP2_REG_BASE + 0x1860)
+#define MV_PP2_CLS_UDF_REG(index)		(MV_PP2_CLS_UDF_BASE_REG + ((index) * 4)) /*index <=63*/
+#define MV_PP2_CLS_UDF_REGS_NUM			64
+
+#define MV_PP2_CLS_UDF_BASE_REGS		8
+#define MV_PP2_CLS_UDF_OFFSET_ID_OFFS		0
+#define MV_PP2_CLS_UDF_OFFSET_ID_BITS		4
+#define MV_PP2_CLS_UDF_OFFSET_ID_MAX		((1 << MV_PP2_CLS_UDF_OFFSET_ID_BITS) - 1)
+#define MV_PP2_CLS_UDF_OFFSET_ID_MASK		((MV_PP2_CLS_UDF_OFFSET_ID_MAX) << MV_PP2_CLS_UDF_OFFSET_ID_OFFS)
+
+#define MV_PP2_CLS_UDF_REL_OFFSET_OFFS		4
+#define MV_PP2_CLS_UDF_REL_OFFSET_BITS		11
+#define MV_PP2_CLS_UDF_REL_OFFSET_MAX		((1 << MV_PP2_CLS_UDF_REL_OFFSET_BITS) - 1)
+#define MV_PP2_CLS_UDF_REL_OFFSET_MASK		((MV_PP2_CLS_UDF_REL_OFFSET_MAX) << MV_PP2_CLS_UDF_REL_OFFSET_OFFS)
+
+#define MV_PP2_CLS_UDF_SIZE_OFFS		16
+#define MV_PP2_CLS_UDF_SIZE_BITS		8
+#define MV_PP2_CLS_UDF_SIZE_MAX			((1 << MV_PP2_CLS_UDF_SIZE_BITS) - 1)
+#define MV_PP2_CLS_UDF_SIZE_MASK		(((1 << MV_PP2_CLS_UDF_SIZE_BITS) - 1) << MV_PP2_CLS_UDF_SIZE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_CLS_MTU_BASE_REG			(MV_PP2_REG_BASE + 0x1900)
+/*
+  in PPv2.1 (feature MAS 3.7) num indicate an mtu reg index
+  in PPv2.0 num (<=31) indicate eport number , 0-15 pon txq,  16-23 ethernet
+*/
+#define MV_PP2_CLS_MTU_REG(num)			(MV_PP2_CLS_MTU_BASE_REG + ((num) * 4))
+#define MV_PP2_CLS_MTU_OFFS			0
+#define MV_PP2_CLS_MTU_BITS			16
+#define MV_PP2_CLS_MTU_MAX			((1 << MV_PP2_CLS_MTU_BITS) - 1)
+#define MV_PP2_CLS_MTU_MASK			(((1 << MV_PP2_CLS_MTU_BITS) - 1) << MV_PP2_CLS_MTU_OFFS)
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.7) MV_PP2_V0_CLS_OVERSIZE_RXQ_BASE_REG removed
+*/
+#define MV_PP2_V0_CLS_OVERSIZE_RXQ_BASE_REG	(MV_PP2_REG_BASE + 0x1980)
+#define MV_PP2_CLS_OVERSIZE_RXQ_REG(eport)	(MV_PP2_V0_CLS_OVERSIZE_RXQ_BASE_REG + (4 * (eport))) /*eport <=23*/
+#define MV_PP2_CLS_OVERSIZE_RXQ_BITS		8
+#define MV_PP2_CLS_OVERSIZE_RXQ_MAX		((1 << MV_PP2_CLS_OVERSIZE_RXQ_BITS) - 1)
+#define MV_PP2_CLS_OVERSIZE_RXQ_OFFS		0
+#define MV_PP2_CLS_OVERSIZE_RX_MASK		((MV_PP2_CLS_OVERSIZE_RXQ_MAX) << MV_PP2_CLS_OVERSIZE_RXQ_OFFS)
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 (feature MAS 3.7) new registers
+*/
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_BASE_REG	(MV_PP2_REG_BASE + 0x1980)
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(MV_PP2_CLS_OVERSIZE_RXQ_LOW_BASE_REG + ((port) * 4))
+
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_OFF		0
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_MAX		((1 << MV_PP2_CLS_OVERSIZE_RXQ_LOW_BITS) - 1)
+#define MV_PP2_CLS_OVERSIZE_RXQ_LOW_MASK	((MV_PP2_CLS_OVERSIZE_RXQ_LOW_MAX) << MV_PP2_CLS_OVERSIZE_RXQ_LOW_OFF)
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 len changed table moved to tx general
+*/
+#define MV_PP2_V0_CLS_LEN_CHANGE_INDEX_REG	(MV_PP2_REG_BASE + 0x19A0)
+#define MV_PP2_V1_CLS_LEN_CHANGE_INDEX_REG	(MV_PP2_REG_BASE + 0x8808)
+
+/*-------------------------------------------------------------------------------*/
+/*
+  PPv2.1 len changed table moved to tx general
+*/
+#define MV_PP2_V0_CLS_LEN_CHANGE_TBL_REG	(MV_PP2_REG_BASE + 0x19A4)
+#define MV_PP2_V1_CLS_LEN_CHANGE_TBL_REG	(MV_PP2_REG_BASE + 0x880c)
+
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.5*/
+#define MV_PP2_CLS_SWFWD_P2HQ_BASE_REG		(MV_PP2_REG_BASE + 0x19B0)
+#define MV_PP2_CLS_SWFWD_P2HQ_REG(eport)	(MV_PP2_CLS_SWFWD_P2HQ_BASE_REG + ((eport) * 4))
+
+#define MV_PP2_CLS_SWFWD_P2HQ_QUEUE_OFF		0
+#define MV_PP2_CLS_SWFWD_P2HQ_QUEUE_BITS	5
+#define MV_PP2_CLS_SWFWD_P2HQ_QUEUE_MAX		((1 << MV_PP2_CLS_SWFWD_P2HQ_QUEUE_BITS) - 1)
+#define MV_PP2_CLS_SWFWD_P2HQ_QUEUE_MASK	((MV_PP2_CLS_SWFWD_P2HQ_QUEUE_MAX) << MV_PP2_CLS_SWFWD_P2HQ_QUEUE_OFF)
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.5*/
+#define MV_PP2_CLS_SWFWD_PCTRL_REG		(MV_PP2_REG_BASE + 0x19D0)
+#define MV_PP2_CLS_SWFWD_PCTRL_OFF		0
+#define MV_PP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << ((port) + MV_PP2_CLS_SWFWD_PCTRL_OFF))
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new feature MAS 3.14*/
+#define MV_PP2_CLS_SEQ_SIZE_REG			(MV_PP2_REG_BASE + 0x19D4)
+#define MV_PP2_CLS_SEQ_SIZE_BITS		4
+#define MV_PP2_CLS_SEQ_INDEX_MAX		7
+#define MV_PP2_CLS_SEQ_SIZE_MAX			8
+#define MV_PP2_CLS_SEQ_SIZE_MASK(index)		\
+		(((1 << MV_PP2_CLS_SEQ_SIZE_BITS) - 1) << (MV_PP2_CLS_SEQ_SIZE_BITS * (index)))
+#define MV_PP2_CLS_SEQ_SIZE_VAL(index, val)	((val) << ((index) * MV_PP2_CLS_SEQ_SIZE_BITS))
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new register MAS 3.18*/
+#define MV_PP2_CLS_PCTRL_BASE_REG		(MV_PP2_REG_BASE + 0x1880)
+#define MV_PP2_CLS_PCTRL_REG(port)		(MV_PP2_CLS_PCTRL_BASE_REG + 4 * (port))
+#define MV_PP2_CLS_PCTRL_MH_OFFS		0
+#define MV_PP2_CLS_PCTRL_MH_BITS		16
+#define MV_PP2_CLS_PCTRL_MH_MASK		(((1 << MV_PP2_CLS_PCTRL_MH_BITS) - 1) << MV_PP2_CLS_PCTRL_MH_OFFS)
+
+#define MV_PP2_CLS_PCTRL_VIRT_EN_OFFS		16
+#define MV_PP2_CLS_PCTRL_VIRT_EN_MASK		(1 << MV_PP2_CLS_PCTRL_VIRT_EN_OFFS)
+
+#define MV_PP2_CLS_PCTRL_UNI_EN_OFFS		17
+#define MV_PP2_CLS_PCTRL_UNI_EN_MASK		(1 << MV_PP2_CLS_PCTRL_UNI_EN_OFFS)
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 new counters MAS 3.20*/
+#define MV_PP2_V1_CNT_IDX_REG			(MV_PP2_REG_BASE + 0x7040)
+#define MV_PP2_V1_CNT_IDX_LKP(lkp, way)		((way) << 6 | (lkp))
+#define MV_PP2_V1_CNT_IDX_FLOW(index)		(index)
+
+#define MV_PP2_V1_CLS_LKP_TBL_HIT_REG		(MV_PP2_REG_BASE + 0x7700)
+#define MV_PP2_V1_CLS_FLOW_TBL_HIT_REG		(MV_PP2_REG_BASE + 0x7704)
+
+/*-------------------------------------------------------------------------------*/
+/*			 lkpid table structure					 */
+/*-------------------------------------------------------------------------------*/
+#define FLOWID_RXQ				0
+#define FLOWID_RXQ_BITS				8
+#define FLOWID_RXQ_MASK				(((1 << FLOWID_RXQ_BITS) - 1) << FLOWID_RXQ)
+
+#define FLOWID_MODE				8
+#define FLOWID_MODE_BITS			8
+#define FLOWID_MODE_MASK			(((1 << FLOWID_MODE_BITS) - 1) << FLOWID_MODE)
+#define FLOWID_MODE_MAX				((1 << FLOWID_MODE_BITS) - 1)
+
+#define FLOWID_FLOW				16
+#define FLOWID_FLOW_BITS			9
+#define FLOWID_FLOW_MASK			(((1 << FLOWID_FLOW_BITS) - 1) << FLOWID_FLOW)
+
+#define FLOWID_EN				25 /*one bit */
+#define FLOWID_EN_MASK				(1 << FLOWID_EN)
+
+
+/*-------------------------------------------------------------------------------*/
+/*			 flow table structure					 */
+/*-------------------------------------------------------------------------------*/
+
+/*-------------------------------  DWORD 0  ------------------------------------ */
+
+#define FLOW_LAST				0
+#define FLOW_LAST_MASK				1 /*one bit*/
+
+#define FLOW_ENGINE				1
+#define FLOW_ENGINE_BITS			3
+#define FLOW_ENGINE_MASK			(((1 << FLOW_ENGINE_BITS) - 1) << FLOW_ENGINE)
+#define FLOW_ENGINE_MAX				5 /* valid value 1 - 5 */
+
+#define FLOW_PORT_ID				4
+#define FLOW_PORT_ID_BITS			8
+#define FLOW_PORT_ID_MASK			(((1 << FLOW_PORT_ID_BITS) - 1) << FLOW_PORT_ID)
+#define FLOW_PORT_ID_MAX			((1 << FLOW_PORT_ID_BITS) - 1)
+
+#define FLOW_PORT_TYPE				12
+#define FLOW_PORT_TYPE_BITS			2
+#define FLOW_PORT_TYPE_MASK			(((1 << FLOW_PORT_TYPE_BITS) - 1) << FLOW_PORT_TYPE)
+#define FLOW_PORT_TYPE_MAX			2 /* valid value 0 - 2 */
+
+/*
+  PPv2.1  FLOW_PPPOE new fields in word 0
+*/
+
+#define FLOW_PPPOE				14
+#define FLOW_PPPOE_BITS				2
+#define FLOW_PPPOE_MASK				(((1 << FLOW_PPPOE_BITS) - 1) << FLOW_PPPOE)
+#define FLOW_PPPOE_MAX				2 /* valid value 0 - 2 */
+
+/*
+  PPv2.1  FLOW_VLAN new fields in word 0
+*/
+#define FLOW_VLAN				16
+#define FLOW_VLAN_BITS				3
+#define FLOW_VLAN_MASK				(((1 << FLOW_VLAN_BITS) - 1) << FLOW_VLAN)
+#define FLOW_VLAN_MAX				((1 << FLOW_VLAN_BITS) - 1)
+
+/*
+  PPv2.1  FLOW_MACME new fields in word 0
+*/
+#define FLOW_MACME				19
+#define FLOW_MACME_BITS				2
+#define FLOW_MACME_MASK				(((1 << FLOW_MACME_BITS) - 1) << FLOW_MACME)
+#define FLOW_MACME_MAX				2 /* valid value 0 - 2 */
+
+/*
+  PPv2.1  FLOW_UDF7 new fields in word 0
+*/
+#define FLOW_UDF7				21
+#define FLOW_UDF7_BITS				2
+#define FLOW_UDF7_MASK				(((1 << FLOW_UDF7_BITS) - 1) << FLOW_UDF7)
+#define FLOW_UDF7_MAX				((1 << FLOW_UDF7_BITS) - 1)
+
+/*
+  PPv2.1  FLOW_PORT_ID_SEL new bit in word 0
+*/
+#define FLOW_PORT_ID_SEL			23
+#define FLOW_PORT_ID_SEL_MASK			(1 << FLOW_PORT_ID_SEL)
+
+/*-------------------------------  DWORD 1  ------------------------------------ */
+
+#define FLOW_FIELDS_NUM				0
+#define FLOW_FIELDS_NUM_BITS			3
+#define FLOW_FIELDS_NUM_MASK			(((1 << FLOW_FIELDS_NUM_BITS) - 1) << FLOW_FIELDS_NUM)
+#define FLOW_FIELDS_NUM_MAX			4 /*valid vaue 0 - 4 */
+
+#define FLOW_LKP_TYPE				3
+#define FLOW_LKP_TYPE_BITS			6
+#define FLOW_LKP_TYPE_MASK			(((1 << FLOW_LKP_TYPE_BITS) - 1) << FLOW_LKP_TYPE)
+#define FLOW_LKP_TYPE_MAX			((1 << FLOW_LKP_TYPE_BITS) - 1)
+
+#define FLOW_FIELED_PRIO			9
+#define FLOW_FIELED_PRIO_BITS			6
+#define FLOW_FIELED_PRIO_MASK			(((1 << FLOW_FIELED_PRIO_BITS) - 1) << FLOW_FIELED_PRIO)
+#define FLOW_FIELED_PRIO_MAX			((1 << FLOW_FIELED_PRIO_BITS) - 1)
+
+/*
+  PPv2.1  FLOW_SEQ_CTRL new fields in word 1
+*/
+#define FLOW_SEQ_CTRL				15
+#define FLOW_SEQ_CTRL_BITS			3
+#define FLOW_SEQ_CTRL_MASK			(((1 << FLOW_SEQ_CTRL_BITS) - 1) << FLOW_SEQ_CTRL)
+#define FLOW_SEQ_CTRL_MAX			4
+
+
+
+/*----------------------------------  DWORD 2  ---------------------------------- */
+#define FLOW_FIELD0_ID				0
+#define FLOW_FIELD1_ID				6
+#define FLOW_FIELD2_ID				12
+#define FLOW_FIELD3_ID				18
+
+#define FLOW_FIELD_ID_BITS			6
+#define FLOW_FIELED_ID(num)			(FLOW_FIELD0_ID + (FLOW_FIELD_ID_BITS * (num)))
+#define FLOW_FIELED_MASK(num)			(((1 << FLOW_FIELD_ID_BITS) - 1) << (FLOW_FIELD_ID_BITS * (num)))
+#define FLOW_FIELED_MAX				((1 << FLOW_FIELD_ID_BITS) - 1)
+
+/*-------------------------------------------------------------------------------*/
+/*		  change length table structure					 */
+/*-------------------------------------------------------------------------------*/
+#define LEN_CHANGE_LENGTH			0
+#define LEN_CHANGE_LENGTH_BITS			7
+#define LEN_CHANGE_LENGTH_MAX			((1 << LEN_CHANGE_LENGTH_BITS) - 1)
+#define LEN_CHANGE_LENGTH_MASK			(((1 << LEN_CHANGE_LENGTH_BITS) - 1) << LEN_CHANGE_LENGTH)
+
+#define LEN_CHANGE_DEC				7 /*1 dec , 0 inc*/
+#define LEN_CHANGE_DEC_MASK			(1 << LEN_CHANGE_DEC)
+/*-------------------------------------------------------------------------------*/
+/*		Classifier Top Public initialization APIs    			 */
+/*-------------------------------------------------------------------------------*/
+/* workaround for HW bug - set las bit in flow entry 0*/
+void mvPp2ClsHwLastBitWorkAround(void);
+
+int mvPp2ClsInit(void);
+int mvPp2ClsHwPortDefConfig(int port, int way, int lkpid, int rxq);
+int mvPp2ClsHwEnable(int enable);
+int mvPp2ClsHwPortWaySet(int port, int way);
+int mvPp2ClsHwPortSpidSet(int port, int spid);
+int mvPp2ClsHwUniPortSet(int uni_port, int spid);
+int mvPp2ClsHwVirtPortSet(int virt_port, int gem_portid);
+int mvPp2ClsHwUdfSet(int udf_no, int offs_id, int offs_bits, int size_bits);
+int mvPp2V0ClsHwMtuSet(int port, int txp, int mtu);/*PPv2.1 feature changed MAS 3.7*/
+int mvPp2V1ClsHwMtuSet(int index, int mtu);/*PPv2.1 feature changed MAS 3.7*/
+#ifdef CONFIG_MV_ETH_PP2_1
+int mvPp2ClsHwOversizeRxqLowSet(int port, int rxq);/*PPv2.1 feature changed MAS 3.7*/
+#else
+int mvPp2ClsHwOversizeRxqSet(int port, int rxq);
+#endif
+int mvPp2ClsHwRxQueueHighSet(int port, int from, int queue);/*PPv2.1 new feature MAS 3.5*/
+int mvPp2ClsHwMhSet(int port, int virtEn, int uniEn, unsigned short mh);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsHwSeqInstrSizeSet(int index, int size);/*PPv2.1 new feature MAS 3.14*/
+void mvPp2ClsShadowInit(void);
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier Top Public lkpid table APIs     			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_LKP_TBL_SIZE				(64)
+
+typedef struct mvPp2ClsLkpEntry {
+	unsigned int lkpid;
+	unsigned int way;
+	unsigned int data;
+} MV_PP2_CLS_LKP_ENTRY;
+
+int mvPp2ClsHwLkpWrite(int lkpid, int way, MV_PP2_CLS_LKP_ENTRY *fe);
+int mvPp2ClsHwLkpRead(int lkpid, int way, MV_PP2_CLS_LKP_ENTRY *fe);
+int mvPp2ClsHwLkpClear(int lkpid, int way);
+int mvPp2ClsSwLkpDump(MV_PP2_CLS_LKP_ENTRY *fe);
+int mvPp2ClsHwLkpDump(void);
+/*PPv2.1 new counters MAS 3.20*/
+int mvPp2V1ClsHwLkpHitsDump(void);
+void mvPp2ClsSwLkpClear(MV_PP2_CLS_LKP_ENTRY *fe);
+void mvPp2ClsHwLkpClearAll(void);
+
+int mvPp2ClsSwLkpRxqSet(MV_PP2_CLS_LKP_ENTRY *fe, int rxq);
+int mvPp2ClsSwLkpEnSet(MV_PP2_CLS_LKP_ENTRY *fe, int en);
+int mvPp2ClsSwLkpFlowSet(MV_PP2_CLS_LKP_ENTRY *fe, int flow_idx);
+int mvPp2ClsSwLkpModSet(MV_PP2_CLS_LKP_ENTRY *fe, int mod_base);
+int mvPp2ClsSwLkpRxqGet(MV_PP2_CLS_LKP_ENTRY *fe, int *rxq);
+int mvPp2ClsSwLkpEnGet(MV_PP2_CLS_LKP_ENTRY *fe, int *en);
+int mvPp2ClsSwLkpFlowGet(MV_PP2_CLS_LKP_ENTRY *fe, int *flow_idx);
+int mvPp2ClsSwLkpModGet(MV_PP2_CLS_LKP_ENTRY *fe, int *mod_base);
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier Top Public flows table APIs   			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_FLOWS_TBL_SIZE			(512)
+#define MV_PP2_CLS_FLOWS_TBL_DATA_WORDS			(3)
+#define MV_PP2_CLS_FLOWS_TBL_FIELDS_MAX			(4)
+
+typedef struct mvPp2ClsFlowEntry {
+	unsigned int index;
+	unsigned int data[MV_PP2_CLS_FLOWS_TBL_DATA_WORDS];
+} MV_PP2_CLS_FLOW_ENTRY;
+
+int mvPp2ClsHwFlowWrite(int index, MV_PP2_CLS_FLOW_ENTRY *fe);
+int mvPp2ClsHwFlowRead(int index, MV_PP2_CLS_FLOW_ENTRY *fe);
+int mvPp2ClsHwFlowClear(int index);
+int mvPp2ClsSwFlowDump(MV_PP2_CLS_FLOW_ENTRY *fe);
+int mvPp2V1ClsHwLkpHitGet(int lkpid, int way,  unsigned int *cnt);
+int mvPp2ClsHwFlowDump(void);
+int mvPp2V1ClsHwFlowHitsDump(void);
+void mvPp2ClsSwFlowClear(MV_PP2_CLS_FLOW_ENTRY *fe);
+void mvPp2ClsHwFlowClearAll(void);
+
+/*
+int mvPp2ClsSwFlowHekSet(MV_PP2_CLS_FLOW_ENTRY *fe, int num_of_fields, int field_ids[]);
+*/
+int mvPp2ClsSwFlowHekSet(MV_PP2_CLS_FLOW_ENTRY *fe, int field_index, int field_id);
+int mvPp2ClsSwFlowHekNumSet(MV_PP2_CLS_FLOW_ENTRY *fe, int num_of_fields);
+int mvPp2ClsSwFlowPortSet(MV_PP2_CLS_FLOW_ENTRY *fe, int type, int portid);
+int mvPp2ClsSwFlowUdf7Set(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowMacMeSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowVlanSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowPppoeSet(MV_PP2_CLS_FLOW_ENTRY *fe,  int mode);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwPortIdSelect(MV_PP2_CLS_FLOW_ENTRY *fe, int from);/*PPv2.1 new feature MAS 3.18*/
+int mvPp2ClsSwFlowEngineSet(MV_PP2_CLS_FLOW_ENTRY *fe, int engine, int is_last);
+int mvPp2ClsSwFlowSeqCtrlSet(MV_PP2_CLS_FLOW_ENTRY *fe, int mode);/*PPv2.1 new feature MAS 3.14*/
+int mvPp2ClsSwFlowExtraSet(MV_PP2_CLS_FLOW_ENTRY *fe, int type, int prio);
+int mvPp2ClsSwFlowHekGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *num_of_fields, int field_ids[]);
+int mvPp2ClsSwFlowPortGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *type, int *portid);
+int mvPp2ClsSwFlowEngineGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *engine, int *is_last);
+int mvPp2ClsSwFlowExtraGet(MV_PP2_CLS_FLOW_ENTRY *fe, int *type, int *prio);
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Classifier Top Public length change table APIs  		 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_CLS_LEN_CHANGE_TBL_SIZE				(256)
+
+int mvPp2ClsPktLenChangeDump(void);
+int mvPp2ClsPktLenChangeSet(int index, int length);
+int mvPp2ClsPktLenChangeGet(int index, int *length);
+
+
+/*-------------------------------------------------------------------------------*/
+/*			additional cls debug APIs				 */
+/*-------------------------------------------------------------------------------*/
+int mvPp2ClsHwRegsDump(void);
+
+#endif /* MV_CLS_HW */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.c
new file mode 100644
index 000000000000..9f0cdbbe74d1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.c
@@ -0,0 +1,339 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPp2ClsMcHw.h"
+
+MC_SHADOW_ENTRY  mvMcShadowTbl[MV_PP2_MC_TBL_SIZE];
+
+/******************************************************************************
+ * Common utilities
+ ******************************************************************************/
+/*
+static void mvPp2McShadowSet(int index, int next)
+{
+	mvMcShadowTbl[index].valid = 1;
+	mvMcShadowTbl[index].next = next;
+}
+*/
+/*-------------------------------------------------------------------------------*/
+/*
+static void mvPp2McShadowClear(int index)
+{
+	mvMcShadowTbl[index].valid = 0;
+}
+*/
+/*-------------------------------------------------------------------------------*/
+/*
+static void mvPp2McShadowClearAll(void)
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_MC_TBL_SIZE; index++)
+		mvMcShadowTbl[index].valid = 0;
+}
+*/
+/*-------------------------------------------------------------------------------*/
+/*
+int mvPp2McFirstFreeGet(void)
+{
+	int index;
+
+	Go through the all entires from first to last
+	for (index = 0; index < MV_PP2_MC_TBL_SIZE; index++) {
+		if (!mvMcShadowTbl[index].valid)
+			break;
+	}
+	return index;
+}
+*/
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McHwWrite(MV_PP2_MC_ENTRY *mc, int index)
+{
+	PTR_VALIDATE(mc);
+
+	POS_RANGE_VALIDATE(index, MV_PP2_MC_TBL_SIZE - 1);
+
+	mc->index = index;
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_MC_INDEX_REG, mc->index);
+
+	/* write data */
+	mvPp2WrReg(MV_PP2_MC_DATA1_REG, mc->sram.regs.data1);
+	mvPp2WrReg(MV_PP2_MC_DATA2_REG, mc->sram.regs.data2);
+	mvPp2WrReg(MV_PP2_MC_DATA3_REG, mc->sram.regs.data3);
+
+	/*
+	update shadow
+	next = ((mc->sram.regs.data3 & MV_PP2_MC_DATA3_NEXT_MASK) >> MV_PP2_MC_DATA3_NEXT);
+	mvPp2McShadowSet(mc->index, next);
+	*/
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McHwRead(MV_PP2_MC_ENTRY *mc, int index)
+{
+	PTR_VALIDATE(mc);
+
+	POS_RANGE_VALIDATE(index, MV_PP2_MC_TBL_SIZE - 1);
+
+	mc->index = index;
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_MC_INDEX_REG, mc->index);
+
+	/* read data */
+	mc->sram.regs.data1 = mvPp2RdReg(MV_PP2_MC_DATA1_REG);
+	mc->sram.regs.data2 = mvPp2RdReg(MV_PP2_MC_DATA2_REG);
+	mc->sram.regs.data3 = mvPp2RdReg(MV_PP2_MC_DATA3_REG);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwDump(MV_PP2_MC_ENTRY *mc)
+{
+	mvOsPrintf("INDEX:0x%2x\t", mc->index);
+
+	mvOsPrintf("IPTR:0x%1x\t",
+			(mc->sram.regs.data1 >> MV_PP2_MC_DATA1_IPTR) & ACT_HWF_ATTR_IPTR_MAX);
+
+	mvOsPrintf("DPTR:0x%1x\t",
+			(mc->sram.regs.data1 >> MV_PP2_MC_DATA1_DPTR) & ACT_HWF_ATTR_DPTR_MAX);
+
+	if (mc->sram.regs.data2 &  MV_PP2_MC_DATA2_GEM_ID_EN)
+		mvOsPrintf("GPID:0x%3x\t", (mc->sram.regs.data2 >> MV_PP2_MC_DATA2_GEM_ID) & ACT_QOS_ATTR_GEM_ID_MAX);
+	else
+		mvOsPrintf("GPID:INV\t");
+
+	if (mc->sram.regs.data2 &  MV_PP2_MC_DATA2_DSCP_EN)
+		mvOsPrintf("DSCP:0x%1x\t", (mc->sram.regs.data2 >> MV_PP2_MC_DATA2_DSCP) & ACT_QOS_ATTR_DSCP_MAX);
+	else
+		mvOsPrintf("DSCP:INV\t");
+
+	if (mc->sram.regs.data2 &  MV_PP2_MC_DATA2_PRI_EN)
+		mvOsPrintf("PRI:0x%1x \t", (mc->sram.regs.data2 >> MV_PP2_MC_DATA2_PRI) & ACT_QOS_ATTR_PRI_MAX);
+	else
+		mvOsPrintf("DSCP:INV\t");
+
+	mvOsPrintf("QUEUE:0x%2x\t", (mc->sram.regs.data3 >> MV_PP2_MC_DATA3_QUEUE) & 0xFF);/*TODO use gbe define*/
+
+	if (mc->sram.regs.data3 & MV_PP2_MC_DATA3_HWF_EN)
+		mvOsPrintf("HW_FWD:ENABLE\t");
+
+	else
+		mvOsPrintf("HW_FWD:DISABLE\t");
+
+	mvOsPrintf("NEXT:0x%2x\t", (mc->sram.regs.data3 >> MV_PP2_MC_DATA3_NEXT) & MV_PP2_MC_INDEX_MAX);
+
+	mvOsPrintf("\n");
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McHwDump(void)
+{
+	int index;
+	MV_PP2_MC_ENTRY mc;
+
+	for (index = 0; index < MV_PP2_MC_TBL_SIZE; index++) {
+		mc.index = index;
+		mvPp2McHwRead(&mc, index);
+		mvPp2McSwDump(&mc);
+		mvOsPrintf("-------------------------------------------------------------------------");
+		mvOsPrintf("-------------------------------------------------------------------------\n");
+
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+void	mvPp2McSwClear(MV_PP2_MC_ENTRY *mc)
+{
+	memset(mc, 0, sizeof(MV_PP2_MC_ENTRY));
+}
+
+/*-------------------------------------------------------------------------------*/
+void	mvPp2McHwClearAll(void)
+{
+	int index;
+	MV_PP2_MC_ENTRY mc;
+
+	mvPp2McSwClear(&mc);
+
+	for (index = 0; index < MV_PP2_MC_TBL_SIZE; index++)
+		mvPp2McHwWrite(&mc, index);
+
+}
+/*-------------------------------------------------------------------------------*/
+
+int	mvPp2McSwModSet(MV_PP2_MC_ENTRY *mc, int data_ptr, int instr_offs)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(data_ptr, ACT_HWF_ATTR_DPTR_MAX);
+	POS_RANGE_VALIDATE(instr_offs, ACT_HWF_ATTR_IPTR_MAX);
+
+	mc->sram.regs.data1 &= ~(ACT_HWF_ATTR_DPTR_MAX << MV_PP2_MC_DATA1_DPTR);
+	mc->sram.regs.data1 |= (data_ptr << MV_PP2_MC_DATA1_DPTR);
+
+	mc->sram.regs.data1 &= ~(ACT_HWF_ATTR_IPTR_MAX << MV_PP2_MC_DATA1_IPTR);
+	mc->sram.regs.data1 |= (instr_offs << MV_PP2_MC_DATA1_IPTR);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwGpidSet(MV_PP2_MC_ENTRY *mc, int gpid, int enable)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(gpid, ACT_QOS_ATTR_GEM_ID_MAX);
+	POS_RANGE_VALIDATE(enable, 1);
+	if (enable) {
+		mc->sram.regs.data2 &= ~(ACT_QOS_ATTR_GEM_ID_MAX << MV_PP2_MC_DATA2_GEM_ID);
+		mc->sram.regs.data2 |= (gpid << MV_PP2_MC_DATA2_GEM_ID);
+		mc->sram.regs.data2 |= MV_PP2_MC_DATA2_GEM_ID_EN;
+
+	} else
+		mc->sram.regs.data2 &= ~MV_PP2_MC_DATA2_GEM_ID_EN;
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwDscpSet(MV_PP2_MC_ENTRY *mc, int dscp, int enable)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(dscp, ACT_QOS_ATTR_DSCP_MAX);
+	POS_RANGE_VALIDATE(enable, 1);
+	if (enable) {
+		mc->sram.regs.data2 &= ~(ACT_QOS_ATTR_DSCP_MAX << MV_PP2_MC_DATA2_DSCP);
+		mc->sram.regs.data2 |= (dscp << MV_PP2_MC_DATA2_DSCP);
+		mc->sram.regs.data2 |= MV_PP2_MC_DATA2_DSCP_EN;
+
+	} else
+		mc->sram.regs.data2 &= MV_PP2_MC_DATA2_DSCP_EN;
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwPrioSet(MV_PP2_MC_ENTRY *mc, int prio, int enable)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(prio, ACT_QOS_ATTR_PRI_MAX);
+	POS_RANGE_VALIDATE(enable, 1);
+	if (enable) {
+		mc->sram.regs.data2 &= ~(ACT_QOS_ATTR_PRI_MAX << MV_PP2_MC_DATA2_PRI);
+		mc->sram.regs.data2 |= (prio << MV_PP2_MC_DATA2_PRI);
+		mc->sram.regs.data2 |= MV_PP2_MC_DATA2_PRI_EN;
+
+	} else
+		mc->sram.regs.data2 &= ~MV_PP2_MC_DATA2_PRI_EN;
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwQueueSet(MV_PP2_MC_ENTRY *mc, int q)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(q, 0xFF);/*TODO use gbe define*/
+
+	mc->sram.regs.data3 &= ~(0xFF << MV_PP2_MC_DATA3_QUEUE);/*TODO use gbe define*/
+	mc->sram.regs.data3 |= (q << MV_PP2_MC_DATA3_QUEUE);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+int	mvPp2McSwForwardEn(MV_PP2_MC_ENTRY *mc, int enable)
+{
+	PTR_VALIDATE(mc);
+	POS_RANGE_VALIDATE(enable, 1);
+
+	if (enable)
+		mc->sram.regs.data3 |= MV_PP2_MC_DATA3_HWF_EN;
+	else
+		mc->sram.regs.data3 &= ~MV_PP2_MC_DATA3_HWF_EN;
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
+
+int	mvPp2McSwNext(MV_PP2_MC_ENTRY *mc, int next)
+{
+	PTR_VALIDATE(mc);
+	/* if next = -1 last link */
+	DECIMAL_RANGE_VALIDATE(next, -1, MV_PP2_MC_INDEX_MAX);
+
+	mc->sram.regs.data3 &= ~(MV_PP2_MC_INDEX_MAX << MV_PP2_MC_DATA3_NEXT);
+	mc->sram.regs.data3 |= (next << MV_PP2_MC_DATA3_NEXT);
+
+	return MV_OK;
+
+}
+/*-------------------------------------------------------------------------------*/
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.h
new file mode 100644
index 000000000000..c8a22faf28bb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/cls/mvPp2ClsMcHw.h
@@ -0,0 +1,150 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_CLS_MC_HW_H__
+#define __MV_CLS_MC_HW_H__
+
+#include "mvPp2ClsActHw.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+
+/*-------------------------------------------------------------------------------*/
+/*			Multicast table Top Registers	    			 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_MC_INDEX_REG			(MV_PP2_REG_BASE + 0x160)
+#define MV_PP2_MC_INDEX_MAX			ACT_DUP_FID_MAX
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_MC_DATA1_REG			(MV_PP2_REG_BASE + 0x164)
+#define	MV_PP2_MC_DATA1_DPTR			1
+#define	MV_PP2_MC_DATA1_IPTR			16
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_MC_DATA2_REG			(MV_PP2_REG_BASE + 0x168)
+#define MV_PP2_MC_DATA2_GEM_ID			0
+#define MV_PP2_MC_DATA2_PRI			12
+#define MV_PP2_MC_DATA2_DSCP			15
+#define MV_PP2_MC_DATA2_GEM_ID_EN		(1 << 21)
+#define MV_PP2_MC_DATA2_PRI_EN			(1 << 22)
+#define MV_PP2_MC_DATA2_DSCP_EN			(1 << 23)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_MC_DATA3_REG			(MV_PP2_REG_BASE + 0x16C)
+
+#define MV_PP2_MC_DATA3_QUEUE			0
+
+#define MV_PP2_MC_DATA3_HWF_EN			(1 << 8)
+
+#define MV_PP2_MC_DATA3_NEXT			16
+#define MV_PP2_MC_DATA3_NEXT_MASK		(MV_PP2_MC_INDEX_MAX << MV_PP2_MC_DATA3_NEXT)
+
+
+typedef struct {
+	int             valid;
+	int		next;
+} MC_SHADOW_ENTRY;
+
+#define LAST 	(-1)
+/*-------------------------------------------------------------------------------*/
+/*			Multicast table Public APIs				 */
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_MC_TBL_SIZE		256
+#define MV_PP2_MC_WORDS			3
+
+
+typedef struct mvPp2McEntry {
+	unsigned int index;
+	union {
+		MV_U32 words[MV_PP2_MC_WORDS];
+		struct {
+			MV_U32 data1;/* 0x164 */
+			MV_U32 data2;/* 0x168 */
+			MV_U32 data3;/* 0x16c */
+		} regs;
+	} sram;
+} MV_PP2_MC_ENTRY;
+/*
+int	mvPp2McFirstFreeGet(void)
+*/
+
+int	mvPp2McHwWrite(MV_PP2_MC_ENTRY *mc, int index);
+int	mvPp2McHwRead(MV_PP2_MC_ENTRY *mc, int index);
+int	mvPp2McSwDump(MV_PP2_MC_ENTRY *mc);
+int	mvPp2McHwDump(void);
+void	mvPp2McSwClear(MV_PP2_MC_ENTRY *mc);
+void	mvPp2McHwClearAll(void);
+
+
+int	mvPp2McSwModSet(MV_PP2_MC_ENTRY *mc, int data_ptr, int instr_offs);
+int	mvPp2McSwGpidSet(MV_PP2_MC_ENTRY *mc, int gpid, int enable);
+int	mvPp2McSwDscpSet(MV_PP2_MC_ENTRY *mc, int dscp, int enable);
+int	mvPp2McSwPrioSet(MV_PP2_MC_ENTRY *mc, int prio, int enable);
+int	mvPp2McSwQueueSet(MV_PP2_MC_ENTRY *mc, int q);
+int	mvPp2McSwForwardEn(MV_PP2_MC_ENTRY *mc, int enable);
+int	mvPp2McSwNext(MV_PP2_MC_ENTRY *mc, int next);
+
+
+#endif /*__MV_CLS_MC_HW_H__ */
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.c b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.c
new file mode 100644
index 000000000000..92271bdd176c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.c
@@ -0,0 +1,125 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvPp2Common.h"
+
+/*#define PP2_REG_WRITE_TRACE*/
+/*#define PP2_REG_READ_TRACE*/
+
+int mvPp2WrReg(unsigned int offset, unsigned int  val)
+{
+	MV_PP2_REG_WRITE(offset, val);
+
+#if defined(PP2_REG_WRITE_TRACE)
+	mvOsPrintf("REG:0x%08X	W:0x%08X\n", offset, val);
+#endif
+	return val;
+}
+
+int mvPp2RdReg(unsigned int offset)
+{
+	unsigned int val = MV_PP2_REG_READ(offset);
+
+#if defined(PP2_REG_READ_TRACE)
+	mvOsPrintf("REG:0x%08X	R:0x%08X\n", offset, val);
+#endif
+	return val;
+}
+
+int mvPp2SPrintReg(char *buf, unsigned int  reg_addr, char *reg_name)
+{
+	return mvOsSPrintf(buf, "  %-32s: 0x%x = 0x%08x\n", reg_name, reg_addr, mvPp2RdReg(reg_addr));
+}
+
+void mvPp2PrintReg(unsigned int reg_addr, char *reg_name)
+{
+	mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", reg_name, reg_addr, mvPp2RdReg(reg_addr));
+}
+
+void mvPp2PrintReg2(MV_U32 reg_addr, char *reg_name, MV_U32 index)
+{
+	char buf[64];
+
+	mvOsSPrintf(buf, "%s[%d]", reg_name, index);
+	mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", buf, reg_addr, mvPp2RdReg(reg_addr));
+}
+
+void mvPp2RegPrintNonZero(MV_U32 reg_addr, char *reg_name)
+{
+	unsigned int regVal = MV_REG_READ(reg_addr);
+
+	if (regVal)
+		mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", reg_name, reg_addr, regVal);
+}
+
+void mvPp2RegPrintNonZero2(MV_U32 reg_addr, char *reg_name, MV_U32 index)
+{
+	char buf[64];
+	unsigned int regVal = MV_REG_READ(reg_addr);
+
+	if (regVal) {
+		mvOsSPrintf(buf, "%s[%d]", reg_name, index);
+		mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", buf, reg_addr, regVal);
+	}
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.h b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.h
new file mode 100644
index 000000000000..24d11e04e972
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2Common.h
@@ -0,0 +1,185 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __MV_PP2_COMMON_H__
+#define __MV_PP2_COMMON_H__
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#endif
+
+/*--------------------------------------------------------------------*/
+/*			PP2 COMMON MACROS			      */
+/*--------------------------------------------------------------------*/
+
+#define DECIMAL_RANGE_VALIDATE(_VALUE_ , _MIN_, _MAX_) {\
+	if (((_VALUE_) > (_MAX_)) || ((_VALUE_) < (_MIN_))) {\
+		mvOsPrintf("%s: value %d (0x%x) is out of range [%d , %d].\n",\
+				__func__, (_VALUE_), (_VALUE_), (_MIN_), (_MAX_));\
+		return MV_ERROR;\
+	} \
+}
+
+#define RANGE_VALIDATE(_VALUE_ , _MIN_, _MAX_) {\
+	if (((_VALUE_) > (_MAX_)) || ((_VALUE_) < (_MIN_))) {\
+		mvOsPrintf("%s: value 0x%X (%d) is out of range [0x%X , 0x%X].\n",\
+				__func__, (_VALUE_), (_VALUE_), (_MIN_), (_MAX_));\
+		return MV_ERROR;\
+	} \
+}
+
+#define BIT_RANGE_VALIDATE(_VALUE_)			RANGE_VALIDATE(_VALUE_ , 0, 1)
+
+#define POS_RANGE_VALIDATE(_VALUE_, _MAX_)		RANGE_VALIDATE(_VALUE_ , 0, _MAX_)
+
+#define PTR_VALIDATE(_ptr_) {\
+	if (_ptr_ == NULL) {\
+		mvOsPrintf("%s: null pointer.\n", __func__);\
+		return MV_ERROR;\
+	} \
+}
+
+#define RET_VALIDATE(_ret_) {\
+	if (_ret_ != MV_OK) {\
+		mvOsPrintf("%s: function call fail.\n", __func__);\
+		return MV_ERROR;\
+	} \
+}
+
+
+#define WARN_OOM(cond) if (cond) { mvOsPrintf("%s: out of memory\n", __func__); return NULL; }
+
+
+/*--------------------------------------------------------------------*/
+/*			PP2 COMMON FUNCTIONS			      */
+/*--------------------------------------------------------------------*/
+
+
+int mvPp2RdReg(unsigned int offset);
+
+int mvPp2WrReg(unsigned int offset, unsigned int  val);
+
+void mvPp2PrintReg(unsigned int  reg_addr, char *reg_name);
+void mvPp2PrintReg2(MV_U32 reg_addr, char *reg_name, MV_U32 index);
+
+int mvPp2SPrintReg(char *buf, unsigned int  reg_addr, char *reg_name);
+
+void mvPp2RegPrintNonZero(MV_U32 reg_addr, char *reg_name);
+void mvPp2RegPrintNonZero2(MV_U32 reg_addr, char *reg_name, MV_U32 index);
+
+/*--------------------------------------------------------------------*/
+/*			PP2 COMMON DEFINETIONS			      */
+/*--------------------------------------------------------------------*/
+#define NOT_IN_USE					(-1)
+#define IN_USE						(1)
+#define DWORD_BITS_LEN					32
+#define DWORD_BYTES_LEN                                 4
+#define RETRIES_EXCEEDED				15000
+#define ONE_BIT_MAX					1
+#define UNI_MAX						7
+#define ETH_PORTS_NUM					7
+
+/*--------------------------------------------------------------------*/
+/*			PNC COMMON DEFINETIONS			      */
+/*--------------------------------------------------------------------*/
+
+/*
+ HW_BYTE_OFFS
+ return HW byte offset in 4 bytes register
+ _offs_: native offset (LE)
+ LE example: HW_BYTE_OFFS(1) = 1
+ BE example: HW_BYTE_OFFS(1) = 2
+*/
+
+#if defined(MV_CPU_LE)
+	#define HW_BYTE_OFFS(_offs_)		(_offs_)
+#else
+	#define HW_BYTE_OFFS(_offs_)		((3 - ((_offs_) % 4)) + (((_offs_) / 4) * 4))
+#endif
+
+
+#define TCAM_DATA_BYTE_OFFS_LE(_offs_)		(((_offs_) - ((_offs_) % 2)) * 2 + ((_offs_) % 2))
+#define TCAM_DATA_MASK_OFFS_LE(_offs_)		(((_offs_) * 2) - ((_offs_) % 2)  + 2)
+
+/*
+ TCAM_DATA_BYTE/MASK
+ tcam data devide into 4 bytes registers
+ each register include 2 bytes of data and 2 bytes of mask
+ the next macros calc data/mask offset in 4 bytes register
+ _offs_: native offset (LE) in data bytes array
+ relevant only for TCAM data bytes
+ used by PRS and CLS2
+*/
+#define TCAM_DATA_BYTE(_offs_)			(HW_BYTE_OFFS(TCAM_DATA_BYTE_OFFS_LE(_offs_)))
+#define TCAM_DATA_MASK(_offs_)			(HW_BYTE_OFFS(TCAM_DATA_MASK_OFFS_LE(_offs_)))
+
+
+
+
+#endif /* __MV_PP2_ERR_CODE_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2ErrCode.h b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2ErrCode.h
new file mode 100644
index 000000000000..6d19f14cc1cf
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/common/mvPp2ErrCode.h
@@ -0,0 +1,119 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PP2_ERR_CODE_H__
+#define __MV_PP2_ERR_CODE_H__
+
+#define  MV_ERR_CODE_BASE						0x80000000
+#define  MV_PP2_ERR_CODE_BASE					(MV_ERR_CODE_BASE | 0x00001000)
+
+
+#define  MV_PP2_PRS						(MV_PP2_ERR_CODE_BASE | 0x00000100)
+#define  MV_PP2_CLS						(MV_PP2_ERR_CODE_BASE | 0x00000200)
+#define  MV_PP2_CLS2						(MV_PP2_ERR_CODE_BASE | 0x00000400)
+#define  MV_PP2_CLS3						(MV_PP2_ERR_CODE_BASE | 0x00000800)
+#define  MV_PP2_CLS4						(MV_PP2_ERR_CODE_BASE | 0x00000800)
+
+
+/*****************************************************************************
+
+
+
+			    E R R O R   C O D E S
+
+
+*****************************************************************************/
+/* #define MV_OK 0  define in mvTypes*/
+#define EQUALS 0
+#define NOT_EQUALS 1
+
+/* PRS error codes */
+#define  MV_PRS_ERR						(MV_PP2_PRS | 0x00)
+#define  MV_PRS_OUT_OF_RAGE					(MV_PP2_PRS | 0x01)
+#define  MV_PRS_NULL_POINTER					(MV_PP2_PRS | 0x02)
+
+/* CLS error codes */
+#define  MV_CLS_ERR						(MV_PP2_CLS | 0x00)
+#define  MV_CLS_OUT_OF_RAGE					(MV_PP2_CLS | 0x01)
+
+/* CLS2 error codes */
+#define  MV_CLS2_ERR						(MV_PP2_CLS2 | 0x00)
+#define  MV_CLS2_OUT_OF_RAGE					(MV_PP2_CLS2 | 0x01)
+#define  MV_CLS2_NULL_POINTER					(MV_PP2_CLS2 | 0x02)
+#define  MV_CLS2_RETRIES_EXCEEDED				(MV_PP2_CLS2 | 0x03)
+
+/* CLS3 error codes */
+#define  MV_CLS3_ERR						(MV_PP2_CLS3 | 0x00)
+#define  MV_CLS3_OUT_OF_RAGE					(MV_PP2_CLS3 | 0x01)
+#define  MV_CLS3_NULL_POINTER					(MV_PP2_CLS3 | 0x02)
+#define  MV_CLS3_RETRIES_EXCEEDED				(MV_PP2_CLS3 | 0x03)
+#define  MV_CLS3_SW_INTERNAL					(MV_PP2_CLS3 | 0x04)
+
+/* CLS4 error codes */
+#define  MV_CLS4_ERR						(MV_PP2_CLS4 | 0x00)
+#define  MV_CLS4_OUT_OF_RAGE					(MV_PP2_CLS4 | 0x01)
+#define  MV_CLS4_NULL_POINTER					(MV_PP2_CLS4 | 0x02)
+#define  MV_CLS4_RETRIES_EXCEEDED				(MV_PP2_CLS4 | 0x03)
+
+#endif /* __MV_PP2_ERR_CODE_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.c
new file mode 100644
index 000000000000..55a84bf72fba
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.c
@@ -0,0 +1,371 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"  /* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "common/mvPp2Common.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvPp2DpiHw.h"
+
+MV_PP2_QUEUE_CTRL *mvPp2DpiReqQ;
+MV_PP2_QUEUE_CTRL *mvPp2DpiResQ;
+
+
+void	mvPp2DpiInit(void)
+{
+	int i;
+
+	/* Reset all counters */
+	mvPp2WrReg(MV_PP2_DPI_INIT_REG, 1);
+
+	/* Clear all counters control  registers */
+	for (i = 0; i < MV_PP2_DPI_BYTE_VAL_MAX; i++)
+		mvPp2DpiByteConfig(i, 0);
+
+	/* Clear all counters window registers */
+	for (i = 0; i < MV_PP2_DPI_CNTRS; i++)
+		mvPp2DpiCntrWinSet(i, 0, 0);
+
+	/* Create Request and Result queues */
+	mvPp2DpiQueuesCreate(MV_PP2_DPI_Q_SIZE);
+}
+
+void	mvPp2DpiRegs(void)
+{
+	int    i;
+
+	mvOsPrintf("\n[DIP registers: %d counters]\n", MV_PP2_DPI_CNTRS);
+
+	mvPp2PrintReg(MV_PP2_DPI_INIT_REG,	 "MV_PP2_DPI_INIT_REG");
+	mvPp2PrintReg(MV_PP2_DPI_REQ_Q_ADDR_REG, "MV_PP2_DPI_REQ_Q_ADDR_REG");
+	mvPp2PrintReg(MV_PP2_DPI_RES_Q_ADDR_REG, "MV_PP2_DPI_RES_Q_ADDR_REG");
+	mvPp2PrintReg(MV_PP2_DPI_Q_SIZE_REG,     "MV_PP2_DPI_Q_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_DPI_Q_STATUS_REG,   "MV_PP2_DPI_Q_STATUS_REG");
+	mvPp2PrintReg(MV_PP2_DPI_Q_INDEX_REG,    "MV_PP2_DPI_Q_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_DPI_Q_PEND_REG,     "MV_PP2_DPI_Q_PEND_REG");
+	mvPp2PrintReg(MV_PP2_DPI_Q_THRESH_REG,   "MV_PP2_DPI_Q_THRESH_REG");
+
+	mvOsPrintf("\nDPI Bytes per counter configuration\n");
+	for (i = 0; i < MV_PP2_DPI_BYTE_VAL_MAX; i++) {
+		mvPp2WrReg(MV_PP2_DPI_BYTE_VAL_REG, i);
+		mvPp2RegPrintNonZero2(MV_PP2_DPI_CNTR_CTRL_REG, "MV_PP2_DPI_CNTR_CTRL_REG", i);
+	}
+
+	mvOsPrintf("\nDPI counters window offset and size confuration\n");
+	for (i = 0; i < MV_PP2_DPI_CNTRS; i++)
+		mvPp2PrintReg2(MV_PP2_DPI_CNTR_WIN_REG(i),   "MV_PP2_DPI_CNTR_WIN_REG", i);
+}
+
+
+MV_STATUS	mvPp2DpiCntrWinSet(int cntr, int offset, int size)
+{
+	MV_U32 regVal;
+
+	if (mvPp2MaxCheck(cntr, MV_PP2_DPI_CNTRS, "DPI counter"))
+		return MV_BAD_PARAM;
+
+	if (mvPp2MaxCheck(offset, (MV_PP2_DPI_WIN_OFFSET_MAX + 1), "DPI win offset"))
+		return MV_BAD_PARAM;
+
+	if (mvPp2MaxCheck(size, (MV_PP2_DPI_WIN_SIZE_MAX + 1), "DPI win size"))
+		return MV_BAD_PARAM;
+
+	regVal = MV_PP2_DPI_WIN_OFFSET_MASK(offset) | MV_PP2_DPI_WIN_SIZE_MASK(size);
+	mvPp2WrReg(MV_PP2_DPI_CNTR_WIN_REG(cntr), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS	mvPp2DpiByteConfig(MV_U8 byte, MV_U16 cntrs_map)
+{
+	mvPp2WrReg(MV_PP2_DPI_BYTE_VAL_REG, byte);
+	mvPp2WrReg(MV_PP2_DPI_CNTR_CTRL_REG, cntrs_map);
+
+	return MV_OK;
+}
+
+MV_STATUS	mvPp2DpiCntrByteSet(int cntr, MV_U8 byte, int en)
+{
+	MV_U32 regVal;
+
+	if (mvPp2MaxCheck(cntr, MV_PP2_DPI_CNTRS, "DPI counter"))
+		return MV_BAD_PARAM;
+
+	mvPp2WrReg(MV_PP2_DPI_BYTE_VAL_REG, byte);
+	regVal = mvPp2RdReg(MV_PP2_DPI_CNTR_CTRL_REG);
+
+	if (en)
+		regVal |= (1 << cntr);
+	else
+		regVal &= ~(1 << cntr);
+
+	mvPp2WrReg(MV_PP2_DPI_CNTR_CTRL_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS	mvPp2DpiCntrDisable(int cntr)
+{
+	int i;
+
+	if (mvPp2MaxCheck(cntr, MV_PP2_DPI_CNTRS, "DPI counter"))
+		return MV_BAD_PARAM;
+
+	for (i = 0; i < MV_PP2_DPI_BYTE_VAL_MAX; i++)
+		mvPp2DpiCntrByteSet(cntr, i, 0);
+
+	return MV_OK;
+}
+
+void	mvPp2DpiQueueShow(int mode)
+{
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+	int i;
+
+	pQueueCtrl = mvPp2DpiReqQ;
+	mvOsPrintf("\n[PPv2 DPI Requests Queue]\n");
+
+	if (pQueueCtrl) {
+		mvOsPrintf("nextToProc=%d (%p), PendingRequests=%d, NextRequestIndex=%d\n",
+			pQueueCtrl->nextToProc, MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+			mvPp2DpiReqPendGet(), mvPp2DpiReqNextIdx());
+
+		mvOsPrintf("pFirst=%p (0x%x), descSize=%d, numOfDescr=%d\n",
+			pQueueCtrl->pFirst, (MV_U32) pp2DescVirtToPhys(pQueueCtrl, (MV_U8 *) pQueueCtrl->pFirst),
+			pQueueCtrl->descSize, pQueueCtrl->lastDesc + 1);
+
+		if (mode > 0) {
+			for (i = 0; i <= pQueueCtrl->lastDesc; i++) {
+				PP2_DPI_REQ_DESC *pReqDesc = (PP2_DPI_REQ_DESC *) MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, i);
+
+				mvOsPrintf("%3d. pReqDesc=%p, 0x%08x, %d\n",
+					i, pReqDesc, pReqDesc->bufPhysAddr, pReqDesc->dataSize);
+				mvOsCacheLineInv(NULL, pReqDesc);
+			}
+		}
+	}
+
+	pQueueCtrl = mvPp2DpiResQ;
+	mvOsPrintf("\n[PPv2 DPI Results Queue]\n");
+
+	if (pQueueCtrl) {
+		mvOsPrintf("nextToProc=%d (%p), PendingResults=%d, NextResultIndex=%d\n",
+			pQueueCtrl->nextToProc, MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+			mvPp2DpiResOccupGet(), mvPp2DpiResNextIdx());
+
+		mvOsPrintf("pFirst=%p (0x%x), descSize=%d, numOfDescr=%d\n",
+			pQueueCtrl->pFirst, (MV_U32) pp2DescVirtToPhys(pQueueCtrl, (MV_U8 *) pQueueCtrl->pFirst),
+			pQueueCtrl->descSize, pQueueCtrl->lastDesc + 1);
+
+		if (mode > 0) {
+			for (i = 0; i <= pQueueCtrl->lastDesc; i++) {
+				/* Result Queue */
+				PP2_DPI_RES_DESC *pResDesc = (PP2_DPI_RES_DESC *) MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, i);
+				int j;
+
+				mvOsPrintf("%3d. pResDesc=%p, ", i, pResDesc);
+				for (j = 0; j < MV_PP2_DPI_CNTRS; j++)
+					mvOsPrintf("%-2d ", pResDesc->counter[j]);
+
+				mvOsPrintf("\n");
+				mvOsCacheLineInv(NULL, pResDesc);
+			}
+		}
+	}
+}
+
+MV_STATUS mvPp2DpiQueuesCreate(int num)
+{
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+	int size;
+
+	mvPp2WrReg(MV_PP2_DPI_Q_SIZE_REG, num);
+
+	/* Allocate memory for DPI request queue */
+	pQueueCtrl = mvOsMalloc(sizeof(MV_PP2_QUEUE_CTRL));
+	if (pQueueCtrl == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for mvPp2DpiReqQ\n", __func__, sizeof(MV_PP2_QUEUE_CTRL));
+		return MV_OUT_OF_CPU_MEM;
+	}
+	mvOsMemset(pQueueCtrl, 0, sizeof(MV_PP2_QUEUE_CTRL));
+
+	size = (num * sizeof(PP2_DPI_REQ_DESC) + MV_PP2_DPI_Q_ALIGN);
+	pQueueCtrl->descBuf.bufVirtPtr =
+	    mvPp2DescrMemoryAlloc(size, &pQueueCtrl->descBuf.bufPhysAddr, &pQueueCtrl->descBuf.memHandle);
+	pQueueCtrl->descBuf.bufSize = size;
+	pQueueCtrl->descSize = sizeof(PP2_DPI_REQ_DESC);
+
+	if (pQueueCtrl->descBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for %d descr\n", __func__, size, num);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	/* Make sure descriptor address is aligned */
+	pQueueCtrl->pFirst = (char *)MV_ALIGN_UP((MV_ULONG) pQueueCtrl->descBuf.bufVirtPtr, MV_PP2_DPI_Q_ALIGN);
+	pQueueCtrl->lastDesc = (num - 1);
+	mvPp2WrReg(MV_PP2_DPI_REQ_Q_ADDR_REG, pp2DescVirtToPhys(pQueueCtrl, (MV_U8 *)pQueueCtrl->pFirst));
+	mvPp2DpiReqQ = pQueueCtrl;
+
+	/* Allocate memory for DPI result queue */
+	pQueueCtrl = mvOsMalloc(sizeof(MV_PP2_QUEUE_CTRL));
+	if (pQueueCtrl == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for mvPp2DpiResQ\n", __func__, sizeof(MV_PP2_QUEUE_CTRL));
+		return MV_OUT_OF_CPU_MEM;
+	}
+	mvOsMemset(pQueueCtrl, 0, sizeof(MV_PP2_QUEUE_CTRL));
+
+	size = (num * sizeof(PP2_DPI_RES_DESC) + MV_PP2_DPI_Q_ALIGN);
+	pQueueCtrl->descBuf.bufVirtPtr =
+	    mvPp2DescrMemoryAlloc(size, &pQueueCtrl->descBuf.bufPhysAddr, &pQueueCtrl->descBuf.memHandle);
+	pQueueCtrl->descBuf.bufSize = size;
+	pQueueCtrl->descSize = sizeof(PP2_DPI_RES_DESC);
+
+	if (pQueueCtrl->descBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for %d descr\n", __func__, size, num);
+		return MV_OUT_OF_CPU_MEM;
+	}
+	/* Make sure descriptor address is aligned */
+	pQueueCtrl->pFirst = (char *)MV_ALIGN_UP((MV_ULONG) pQueueCtrl->descBuf.bufVirtPtr, MV_PP2_DPI_Q_ALIGN);
+	pQueueCtrl->lastDesc = (num - 1);
+	mvPp2WrReg(MV_PP2_DPI_RES_Q_ADDR_REG, pp2DescVirtToPhys(pQueueCtrl, (MV_U8 *)pQueueCtrl->pFirst));
+	mvPp2DpiResQ = pQueueCtrl;
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2DpiQueuesDelete(void)
+{
+	if (mvPp2DpiReqQ) {
+		mvPp2DescrMemoryFree(mvPp2DpiReqQ->descBuf.bufSize, (MV_ULONG *)mvPp2DpiReqQ->descBuf.bufPhysAddr,
+				mvPp2DpiReqQ->descBuf.bufVirtPtr, (MV_U32 *)mvPp2DpiReqQ->descBuf.memHandle);
+		mvOsFree(mvPp2DpiReqQ);
+		mvPp2DpiReqQ = NULL;
+	} else
+		mvOsPrintf("%s: DPI Request queue is not initialized\n", __func__);
+
+
+	if (mvPp2DpiResQ) {
+		mvPp2DescrMemoryFree(mvPp2DpiResQ->descBuf.bufSize, (MV_ULONG *)mvPp2DpiResQ->descBuf.bufPhysAddr,
+				mvPp2DpiResQ->descBuf.bufVirtPtr, (MV_U32 *)mvPp2DpiResQ->descBuf.memHandle);
+		mvOsFree(mvPp2DpiResQ);
+		mvPp2DpiResQ = NULL;
+	} else
+		mvOsPrintf("%s: DPI Result queue is not initialized\n", __func__);
+
+	/* Reset all counters */
+	mvPp2WrReg(MV_PP2_DPI_INIT_REG, 1);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2DpiRequestSet(unsigned long paddr, int size)
+{
+	MV_PP2_QUEUE_CTRL *pQueueCtrl = mvPp2DpiReqQ;
+	PP2_DPI_REQ_DESC  *pReqDesc;
+	int reqDesc = pQueueCtrl->nextToProc;
+
+	if (mvPp2DpiReqQ == NULL) {
+		mvOsPrintf("%s: DPI Request queue is not initialized\n", __func__);
+		return MV_NOT_READY;
+	}
+	/* Check if request queue is not Full */
+	if (mvPp2DpiReqIsFull(pQueueCtrl))
+		return MV_FULL;
+
+	pReqDesc = (PP2_DPI_REQ_DESC *)MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, reqDesc);
+	pReqDesc->bufPhysAddr = paddr;
+	pReqDesc->dataSize = size;
+
+	pQueueCtrl->nextToProc = MV_PP2_QUEUE_NEXT_DESC(pQueueCtrl, reqDesc);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2DpiResultGet(MV_U8 *counters, int num)
+{
+	MV_PP2_QUEUE_CTRL *pQueueCtrl = mvPp2DpiResQ;
+	PP2_DPI_RES_DESC  *pResDesc;
+	int resDesc = pQueueCtrl->nextToProc;
+
+	if (mvPp2DpiResQ == NULL) {
+		mvOsPrintf("%s: DPI Result queue is not initialized\n", __func__);
+		return MV_NOT_READY;
+	}
+	if (num > MV_PP2_DPI_CNTRS) {
+		mvOsPrintf("%s: Number of DPI counters %d is out of maximium %d\n",
+				__func__, num, MV_PP2_DPI_CNTRS);
+		num = MV_PP2_DPI_CNTRS;
+	}
+
+	pResDesc = (PP2_DPI_RES_DESC *)MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, resDesc);
+	pQueueCtrl->nextToProc = MV_PP2_QUEUE_NEXT_DESC(pQueueCtrl, resDesc);
+	if (counters)
+		memcpy(counters, pResDesc->counter, num);
+
+	return MV_OK;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.h
new file mode 100644
index 000000000000..279e7698db48
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/dpi/mvPp2DpiHw.h
@@ -0,0 +1,264 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvPp2DipHw_h__
+#define __mvPp2DipHw_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+#include "common/mvPp2Common.h"
+#include "gbe/mvPp2Gbe.h"
+
+#define MV_PP2_DPI_CNTRS		16
+#define MV_PP2_DPI_MAX_PKT_SIZE		1024
+
+#define MV_PP2_DPI_Q_SIZE		32
+
+/*********************************** DPI Counters Registers *******************/
+
+#define MV_PP2_DPI_INIT_REG		(MV_PP2_REG_BASE + 0x4800)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_BYTE_VAL_REG		(MV_PP2_REG_BASE + 0x4810)
+
+#define MV_PP2_DPI_BYTE_VAL_OFFS	0
+#define MV_PP2_DPI_BYTE_VAL_MAX		256
+#define MV_PP2_DPI_BYTE_VAL_MASK	((MV_PP2_DPI_BYTE_VAL_MAX - 1) << MV_PP2_DPI_BYTE_VAL_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_CNTR_CTRL_REG	(MV_PP2_REG_BASE + 0x4814)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_CNTR_WIN_REG(cntr)	(MV_PP2_REG_BASE + 0x4840 + (cntr) * 4)
+
+#define MV_PP2_DPI_WIN_OFFSET_OFFS	0
+#define MV_PP2_DPI_WIN_OFFSET_BITS	9
+#define MV_PP2_DPI_WIN_OFFSET_MAX	((1 << MV_PP2_DPI_WIN_OFFSET_BITS) - 1)
+#define MV_PP2_DPI_WIN_OFFSET_ALL_MASK	(MV_PP2_DPI_WIN_OFFSET_MAX << MV_PP2_DPI_WIN_OFFSET_OFFS)
+#define MV_PP2_DPI_WIN_OFFSET_MASK(v)   ((v << MV_PP2_DPI_WIN_OFFSET_OFFS) & MV_PP2_DPI_WIN_OFFSET_ALL_MASK)
+
+#define MV_PP2_DPI_WIN_SIZE_OFFS	16
+#define MV_PP2_DPI_WIN_SIZE_BITS	8
+#define MV_PP2_DPI_WIN_SIZE_MAX		((1 << MV_PP2_DPI_WIN_SIZE_BITS) - 1)
+#define MV_PP2_DPI_WIN_SIZE_ALL_MASK	(MV_PP2_DPI_WIN_SIZE_MAX << MV_PP2_DPI_WIN_SIZE_OFFS)
+#define MV_PP2_DPI_WIN_SIZE_MASK(v)	((v << MV_PP2_DPI_WIN_SIZE_OFFS) & MV_PP2_DPI_WIN_SIZE_ALL_MASK)
+/*---------------------------------------------------------------------------------------------*/
+
+/*********************************** DPI Request / Result Queues Registers *******************/
+#define MV_PP2_DPI_Q_SIZE_BITS		12
+#define MV_PP2_DPI_Q_SIZE_MAX		((1 < MV_PP2_DPI_Q_SIZE_BITS) - 1)
+
+#define MV_PP2_DPI_Q_ALIGN		(1 << 7)
+
+#define MV_PP2_DPI_REQ_Q_ADDR_REG	(MV_PP2_REG_BASE + 0x4880)
+#define MV_PP2_DPI_RES_Q_ADDR_REG	(MV_PP2_REG_BASE + 0x4884)
+#define MV_PP2_DPI_Q_SIZE_REG		(MV_PP2_REG_BASE + 0x4888)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_Q_CNTR_BITS          12
+#define MV_PP2_DPI_Q_CNTR_MAX           ((1 << MV_PP2_DPI_Q_CNTR_BITS) - 1)
+
+#define MV_PP2_DPI_Q_UPDATE_REG		(MV_PP2_REG_BASE + 0x4890)
+
+#define MV_PP2_DPI_RES_DEC_OCCUP_OFFS	0
+#define MV_PP2_DPI_RES_DEC_OCCUP_MASK   (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_RES_DEC_OCCUP_OFFS)
+
+#define MV_PP2_DPI_REQ_ADD_PEND_OFFS	16
+#define MV_PP2_DPI_REQ_ADD_PEND_MASK   (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_REQ_ADD_PEND_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_Q_STATUS_REG		(MV_PP2_REG_BASE + 0x4894)
+
+#define MV_PP2_DPI_RES_Q_OCCUP_OFFS	0
+#define MV_PP2_DPI_RES_Q_OCCUP_MASK     (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_RES_Q_OCCUP_OFFS)
+
+#define MV_PP2_DPI_REQ_Q_PEND_OFFS	16
+#define MV_PP2_DPI_REQ_Q_PEND_MASK      (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_REQ_Q_PEND_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_Q_INDEX_REG		(MV_PP2_REG_BASE + 0x4898)
+
+#define MV_PP2_DPI_RES_Q_INDEX_OFFS	0
+#define MV_PP2_DPI_RES_Q_INDEX_MASK     (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_RES_Q_INDEX_OFFS)
+
+#define MV_PP2_DPI_REQ_Q_INDEX_OFFS	16
+#define MV_PP2_DPI_REQ_Q_INDEX_MASK     (MV_PP2_DPI_Q_CNTR_MAX << MV_PP2_DPI_REQ_Q_INDEX_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_DPI_Q_PEND_REG		(MV_PP2_REG_BASE + 0x489C)
+#define MV_PP2_DPI_Q_THRESH_REG		(MV_PP2_REG_BASE + 0x48A0)
+/*---------------------------------------------------------------------------------------------*/
+
+typedef struct pp2_dpi_req_desc {
+	MV_U32 bufPhysAddr;
+	MV_U32 dataSize;
+} PP2_DPI_REQ_DESC;
+
+typedef struct pp2_dpi_res_desc {
+	MV_U8 counter[MV_PP2_DPI_CNTRS];
+} PP2_DPI_RES_DESC;
+
+
+/* Update HW with number of DPI RequestQ descriptors to be processed */
+static INLINE void mvPp2DpiReqPendAdd(int pend)
+{
+	MV_U32 regVal;
+
+	regVal = (pend << MV_PP2_DPI_REQ_ADD_PEND_OFFS);
+	mvPp2WrReg(MV_PP2_DPI_Q_UPDATE_REG, regVal);
+}
+
+/* Get number of DPI requestQ  descriptors are waiting for processing */
+static INLINE int mvPp2DpiReqPendGet(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_DPI_Q_STATUS_REG);
+	regVal = (regVal >> MV_PP2_DPI_REQ_Q_PEND_OFFS);
+
+	return regVal;
+}
+
+/* Update HW with number of DPI ResultQ descriptors to be reused */
+static INLINE void mvPp2DpiResOccupDec(int occup)
+{
+	MV_U32 regVal;
+
+	regVal = (occup << MV_PP2_DPI_RES_Q_OCCUP_OFFS);
+	mvPp2WrReg(MV_PP2_DPI_Q_UPDATE_REG, regVal);
+}
+
+/* Get number of RX descriptors occupied by received packets */
+static INLINE int mvPp2DpiResOccupGet(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_DPI_Q_STATUS_REG);
+	regVal = ((regVal & MV_PP2_DPI_RES_Q_OCCUP_MASK) >> MV_PP2_DPI_RES_Q_OCCUP_OFFS);
+
+	return regVal;
+}
+
+static INLINE int mvPp2DpiReqNextIdx(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_DPI_Q_INDEX_REG);
+	regVal = ((regVal & MV_PP2_DPI_REQ_Q_INDEX_MASK) >> MV_PP2_DPI_REQ_Q_INDEX_OFFS);
+
+	return regVal;
+}
+
+static INLINE int mvPp2DpiResNextIdx(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_DPI_Q_INDEX_REG);
+	regVal = ((regVal & MV_PP2_DPI_RES_Q_INDEX_MASK) >> MV_PP2_DPI_RES_Q_INDEX_OFFS);
+
+	return regVal;
+}
+
+static INLINE MV_BOOL mvPp2DpiReqIsFull(MV_PP2_QUEUE_CTRL *pQueueCtrl)
+{
+	if ((pQueueCtrl->lastDesc + 1) - mvPp2DpiReqPendGet() > 0)
+		return MV_FALSE;
+
+	return MV_TRUE;
+}
+
+static INLINE MV_BOOL mvPp2DpiResIsEmpty(MV_PP2_QUEUE_CTRL *pQueueCtrl)
+{
+	if (mvPp2DpiResOccupGet() > 0)
+		return MV_FALSE;
+
+	return MV_TRUE;
+}
+
+/* Public function prototypes */
+void	  mvPp2DpiInit(void);
+void	  mvPp2DpiRegs(void);
+MV_STATUS mvPp2DpiCntrWinSet(int cntr, int offset, int size);
+MV_STATUS mvPp2DpiByteConfig(MV_U8 byte, MV_U16 cntrs_map);
+MV_STATUS mvPp2DpiCntrByteSet(int cntr, MV_U8 byte, int en);
+MV_STATUS mvPp2DpiCntrDisable(int cntr);
+
+void	  mvPp2DpiQueueShow(int mode);
+MV_STATUS mvPp2DpiQueuesCreate(int num);
+MV_STATUS mvPp2DpiQueuesDelete(void);
+
+MV_STATUS mvPp2DpiRequestSet(unsigned long paddr, int size);
+MV_STATUS mvPp2DpiResultGet(MV_U8 *counters, int num);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvPp2DipHw_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2AddrDec.c b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2AddrDec.c
new file mode 100644
index 000000000000..d7c00a46c819
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2AddrDec.c
@@ -0,0 +1,357 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mvSysEthConfig.h"
+
+#include "mvPp2Gbe.h"
+
+MV_TARGET ethAddrDecPrioTab[] = {
+#if defined(MV_INCLUDE_SDRAM_CS0)
+	SDRAM_CS0,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS1)
+	SDRAM_CS1,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS2)
+	SDRAM_CS2,
+#endif
+#if defined(MV_INCLUDE_SDRAM_CS3)
+	SDRAM_CS3,
+#endif
+	TBL_TERM
+};
+
+static MV_STATUS ethWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin);
+
+/*******************************************************************************
+* mvPp2WinInit
+*
+* DESCRIPTION:
+*	This function initialize ETH window decode unit. It set the default
+*	address decode windows of the unit.
+*
+* INPUT:
+*	addWinMap: An array holding the address decoding information for the
+*		    system.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_ERROR if setting fail.
+*******************************************************************************/
+MV_STATUS mvPp2WinInit(MV_U32 dummy/*backward compability*/, MV_UNIT_WIN_INFO *addrWinMap)
+{
+	MV_U32 winNum, winPrioIndex = 0;
+	MV_UNIT_WIN_INFO *addrDecWin;
+
+	/* Initiate Ethernet address decode */
+	/* First disable all address decode windows */
+	mvPp2WrReg(ETH_BASE_ADDR_ENABLE_REG, 0);
+
+	/* Go through all windows in user table until table terminator      */
+	for (winNum = 0; ((ethAddrDecPrioTab[winPrioIndex] != TBL_TERM) && (winNum < ETH_MAX_DECODE_WIN));) {
+		addrDecWin = &addrWinMap[ethAddrDecPrioTab[winPrioIndex]];
+
+		if (addrDecWin->enable == MV_TRUE) {
+			if (MV_OK != mvPp2WinWrite(0, winNum, addrDecWin)) {
+				mvOsPrintf("mvPp2WinInit failed: winNum=%d (%d, %d)\n",
+					   winNum, winPrioIndex, ethAddrDecPrioTab[winPrioIndex]);
+				return MV_ERROR;
+			}
+			winNum++;
+		}
+		winPrioIndex++;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2WinWrite
+*
+* DESCRIPTION:
+*	This function writes the address decoding registers according to the
+*	given window configuration.
+*
+* INPUT:
+*	unit	    - The Ethernet unit number to configure.
+*       winNum	    - ETH target address decode window number.
+*       pAddrDecWin - ETH target window data structure.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_OK on success,
+*	MV_BAD_PARAM if winNum is invalid.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvPp2WinWrite(MV_U32 dummy/*backward compability*/, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin)
+{
+	MV_U32 size, alignment;
+	MV_U32 baseReg, sizeReg;
+
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvPp2WinSet: ERR. Invalid win num %d\n", winNum);
+		return MV_BAD_PARAM;
+	}
+
+	/* Check if the requested window overlapps with current windows     */
+	if (MV_TRUE == ethWinOverlapDetect(winNum, &pAddrDecWin->addrWin)) {
+		mvOsPrintf("mvPp2WinWrite: ERR. Window %d overlap\n", winNum);
+		return MV_ERROR;
+	}
+
+	/* check if address is aligned to the size */
+	if (MV_IS_NOT_ALIGN(pAddrDecWin->addrWin.baseLow, pAddrDecWin->addrWin.size)) {
+		mvOsPrintf("mvPp2WinSet: Error setting Ethernet window %d.\n"
+			   "Address 0x%08x is unaligned to size 0x%x.\n",
+			   winNum, pAddrDecWin->addrWin.baseLow, (MV_U32)pAddrDecWin->addrWin.size);
+		return MV_ERROR;
+	}
+
+	size = pAddrDecWin->addrWin.size;
+	if (!MV_IS_POWER_OF_2(size)) {
+		mvOsPrintf("mvPp2WinWrite: Error setting AUDIO window %d. "
+			   "Window size is not a power to 2.", winNum);
+		return MV_BAD_PARAM;
+	}
+
+	baseReg = (pAddrDecWin->addrWin.baseLow & ETH_WIN_BASE_MASK);
+	sizeReg = mvPp2RdReg(ETH_WIN_SIZE_REG(winNum));
+
+	/* set size */
+	alignment = 1 << ETH_WIN_SIZE_OFFS;
+	sizeReg &= ~ETH_WIN_SIZE_MASK;
+	sizeReg |= (((size / alignment) - 1) << ETH_WIN_SIZE_OFFS);
+
+	/* set attributes */
+	baseReg &= ~ETH_WIN_ATTR_MASK;
+	baseReg |= pAddrDecWin->attrib << ETH_WIN_ATTR_OFFS;
+
+	/* set target ID */
+	baseReg &= ~ETH_WIN_TARGET_MASK;
+	baseReg |= pAddrDecWin->targetId << ETH_WIN_TARGET_OFFS;
+
+	/* for the safe side we disable the window before writing the new
+	   values */
+	mvPp2WinEnable(0, winNum, MV_FALSE);
+	mvPp2WrReg(ETH_WIN_BASE_REG(winNum), baseReg);
+
+	/* Write to address decode Size Register                            */
+	mvPp2WrReg(ETH_WIN_SIZE_REG(winNum), sizeReg);
+
+	/* Enable address decode target window                              */
+	if (pAddrDecWin->enable == MV_TRUE)
+		mvPp2WinEnable(0, winNum, MV_TRUE);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* ethWinOverlapDetect - Detect ETH address windows overlapping
+*
+* DESCRIPTION:
+*       An unpredicted behaviur is expected in case ETH address decode
+*       windows overlapps.
+*       This function detects ETH address decode windows overlapping of a
+*       specified window. The function does not check the window itself for
+*       overlapping. The function also skipps disabled address decode windows.
+*
+* INPUT:
+*       winNum      - address decode window number.
+*       pAddrDecWin - An address decode window struct.
+*
+* OUTPUT:
+*       None.
+*
+* RETURN:
+*       MV_TRUE if the given address window overlap current address
+*       decode map, MV_FALSE otherwise, MV_ERROR if reading invalid data
+*       from registers.
+*
+*******************************************************************************/
+static MV_STATUS ethWinOverlapDetect(MV_U32 winNum, MV_ADDR_WIN *pAddrWin)
+{
+	MV_U32 baseAddrEnableReg;
+	MV_U32 winNumIndex;
+	MV_UNIT_WIN_INFO addrDecWin;
+
+	/* Read base address enable register. Do not check disabled windows     */
+	baseAddrEnableReg = mvPp2RdReg(ETH_BASE_ADDR_ENABLE_REG);
+
+	for (winNumIndex = 0; winNumIndex < ETH_MAX_DECODE_WIN; winNumIndex++) {
+		/* Do not check window itself           */
+		if (winNumIndex == winNum)
+			continue;
+
+		/* Do not check disabled windows        */
+		if (baseAddrEnableReg & (1 << winNumIndex))
+			continue;
+
+		/* Get window parameters        */
+		if (MV_OK != mvPp2WinRead(0, winNumIndex, &addrDecWin)) {
+			mvOsPrintf("ethWinOverlapDetect: ERR. TargetWinGet failed\n");
+			return MV_ERROR;
+		}
+
+		if (MV_TRUE == mvWinOverlapTest(pAddrWin, &(addrDecWin.addrWin)))
+			return MV_TRUE;
+	}
+	return MV_FALSE;
+}
+
+/*******************************************************************************
+* mvPp2WinRead
+*
+* DESCRIPTION:
+*       Read Ethernet peripheral target address window.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*
+* OUTPUT:
+*       pAddrDecWin - ETH target window data structure.
+*
+* RETURN:
+*	MV_BAD_PARAM if winNum is invalid.
+*	MV_ERROR otherwise.
+*
+*******************************************************************************/
+MV_STATUS mvPp2WinRead(MV_U32 dummy/*backward compability*/, MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin)
+{
+	MV_U32 baseReg, sizeReg;
+	MV_U32 alignment, size;
+
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvPp2WinGet: ERR. Invalid winNum %d\n", winNum);
+		return MV_NOT_SUPPORTED;
+	}
+
+	baseReg = mvPp2RdReg(ETH_WIN_BASE_REG(winNum));
+	sizeReg = mvPp2RdReg(ETH_WIN_SIZE_REG(winNum));
+
+	alignment = 1 << ETH_WIN_SIZE_OFFS;
+	size = (sizeReg & ETH_WIN_SIZE_MASK) >> ETH_WIN_SIZE_OFFS;
+	pAddrDecWin->addrWin.size = (size + 1) * alignment;
+
+	/* Extract base address                                     */
+	pAddrDecWin->addrWin.baseLow = baseReg & ETH_WIN_BASE_MASK;
+	pAddrDecWin->addrWin.baseHigh = 0;
+
+	/* attrib and targetId */
+	pAddrDecWin->attrib = (baseReg & ETH_WIN_ATTR_MASK) >> ETH_WIN_ATTR_OFFS;
+	pAddrDecWin->targetId = (baseReg & ETH_WIN_TARGET_MASK) >> ETH_WIN_TARGET_OFFS;
+
+	/* Check if window is enabled   */
+	if ((mvPp2RdReg(ETH_BASE_ADDR_ENABLE_REG)) & (1 << winNum))
+		pAddrDecWin->enable = MV_TRUE;
+	else
+		pAddrDecWin->enable = MV_FALSE;
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2WinEnable - Enable/disable a ETH to target address window
+*
+* DESCRIPTION:
+*       This function enable/disable a ETH to target address window.
+*       According to parameter 'enable' the routine will enable the
+*       window, thus enabling ETH accesses (before enabling the window it is
+*       tested for overlapping). Otherwise, the window will be disabled.
+*
+* INPUT:
+*       winNum - ETH to target address decode window number.
+*       enable - Enable/disable parameter.
+*
+* OUTPUT:
+*       N/A
+*
+* RETURN:
+*       MV_ERROR if decode window number was wrong or enabled window overlapps.
+*
+*******************************************************************************/
+MV_STATUS mvPp2WinEnable(MV_U32 dummy/*backward compability*/, MV_U32 winNum, MV_BOOL enable)
+{
+	/* Parameter checking   */
+	if (winNum >= ETH_MAX_DECODE_WIN) {
+		mvOsPrintf("mvPp2TargetWinEnable:ERR. Invalid winNum%d\n", winNum);
+		return MV_ERROR;
+	}
+
+	if (enable)
+		MV_REG_BIT_SET(ETH_BASE_ADDR_ENABLE_REG, (1 << winNum));
+	else
+		/* Disable address decode target window                             */
+		MV_REG_BIT_RESET(ETH_BASE_ADDR_ENABLE_REG, (1 << winNum));
+
+	return MV_OK;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.c b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.c
new file mode 100644
index 000000000000..ae6cd044ecb1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.c
@@ -0,0 +1,1995 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "mvPp2Gbe.h"
+#include "prs/mvPp2Prs.h"
+#include "bm/mvBm.h"
+
+#define MV_PP2_RXQ_FREE 	-1
+
+#define TX_DISABLE_TIMEOUT_MSEC     1000
+#define RX_DISABLE_TIMEOUT_MSEC     1000
+#define TX_FIFO_EMPTY_TIMEOUT_MSEC  10000
+#define PORT_DISABLE_WAIT_TCLOCKS   5000
+
+/* physical TXQs */
+MV_PP2_PHYS_TXQ_CTRL *mvPp2PhysTxqs;
+
+/* aggregated TXQs */
+MV_PP2_AGGR_TXQ_CTRL *mvPp2AggrTxqs;
+
+/* physical RXQs */
+MV_PP2_PHYS_RXQ_CTRL *mvPp2PhysRxqs;
+
+/* ports control */
+MV_PP2_PORT_CTRL **mvPp2PortCtrl;
+
+/* HW data */
+MV_PP2_HAL_DATA mvPp2HalData;
+
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2MaxCheck(int value, int limit, char *name)
+{
+	if ((value < 0) || (value >= limit)) {
+		mvOsPrintf("%s %d is out of range [0..%d]\n",
+			name ? name : "value", value, (limit - 1));
+		return 1;
+	}
+	return 0;
+}
+
+int mvPp2PortCheck(int port)
+{
+	return mvPp2MaxCheck(port, mvPp2HalData.maxPort, "port");
+}
+
+int mvPp2TxpCheck(int port, int txp)
+{
+	int txpMax = 1;
+
+	if (mvPp2PortCheck(port))
+		return 1;
+
+	if (MV_PP2_IS_PON_PORT(port))
+		txpMax = mvPp2HalData.maxTcont;
+
+	return mvPp2MaxCheck(txp, txpMax, "txp");
+}
+
+int mvPp2CpuCheck(int cpu)
+{
+	return mvPp2MaxCheck(cpu, mvPp2HalData.maxCPUs, "cpu");
+}
+
+int mvPp2EgressPort(int port, int txp)
+{
+	int egress_port;
+
+	if (!MV_PP2_IS_PON_PORT(port))
+		egress_port = MV_PP2_MAX_TCONT + port + txp;
+	else
+		egress_port = txp;
+
+	return egress_port;
+}
+/*-------------------------------------------------------------------------------*/
+MV_STATUS mvPp2HalInit(MV_PP2_HAL_DATA *halData)
+{
+	int bytes, i;
+	MV_STATUS status;
+
+	mvPp2HalData = *halData;
+	bytes = mvPp2HalData.maxPort * sizeof(MV_PP2_PORT_CTRL *);
+
+	/* Allocate port data structures */
+	mvPp2PortCtrl = mvOsMalloc(bytes);
+	if (mvPp2PortCtrl == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for %d ports\n", __func__,
+			   mvPp2HalData.maxPort * sizeof(MV_PP2_PORT_CTRL), mvPp2HalData.maxPort);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	mvOsMemset(mvPp2PortCtrl, 0, bytes);
+
+	/* Allocate physical TXQs */
+	status = mvPp2PhysTxqsAlloc();
+	if (status != MV_OK) {
+		mvOsPrintf("%s: mvPp2PhysTxqsAlloc failed\n", __func__);
+		return status;
+	}
+
+	/* Allocate aggregated TXQs */
+	status = mvPp2AggrTxqsAlloc(mvPp2HalData.maxCPUs);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: mvPp2AggrTxqsAlloc failed\n", __func__);
+		return status;
+	}
+
+	/* Allocate physical RXQs */
+	status = mvPp2PhysRxqsAlloc();
+	if (status != MV_OK) {
+		mvOsPrintf("%s: mvPp2PhysRxqsAlloc failed\n", __func__);
+		return status;
+	}
+
+	mvBmInit();
+
+	/* Rx Fifo Init */
+	mvPp2RxFifoInit(mvPp2HalData.maxPort);
+
+	/* Init all interrupt rxqs groups - each port has 0 rxqs */
+	for (i = 0; i <= MV_PP2_PON_PORT_ID; i++)
+		mvPp2GbeIsrRxqGroup(i, 0);
+
+	MV_REG_WRITE(ETH_MNG_EXTENDED_GLOBAL_CTRL_REG, 0x27);
+
+	/* Allow cache snoop when transmiting packets */
+	if (mvPp2HalData.iocc)
+		mvPp2WrReg(MV_PP2_TX_SNOOP_REG, 0x1);
+
+	/* Set TX FIFO Threshold to maximum */
+	MV_REG_WRITE(MV_PP2_TX_FIFO_THRESH_REG, MV_PP2_TX_CSUM_MAX_SIZE);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+MV_VOID mvPp2HalDestroy(MV_VOID)
+{
+	mvPp2PhysTxqsDestroy();
+	mvPp2AggrTxqsDestroy();
+	mvPp2PhysRxqsDestroy();
+	mvOsFree(mvPp2PortCtrl);
+	memset(&mvPp2HalData, 0, sizeof(mvPp2HalData));
+}
+
+/*******************************************************************************
+* mvNetaDefaultsSet - Set defaults to the NETA port
+*
+* DESCRIPTION:
+*       This
+function sets default values to the NETA port.
+*       1) Clears interrupt Cause and Mask registers.
+*       2) Clears all MAC tables.
+*       3) Sets defaults to all registers.
+*       4) Resets RX and TX descriptor rings.
+*       5) Resets PHY.
+*
+* INPUT:
+*   int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+* NOTE:
+*   This function updates all the port configurations except those set
+*   initialy by the OsGlue by MV_NETA_PORT_INIT.
+*   This function can be called after portDown to return the port settings
+*   to defaults.
+*******************************************************************************/
+MV_STATUS mvPp2DefaultsSet(int port)
+{
+	MV_U32 regVal;
+	int txp, queue, txPortNum, i;
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(port);
+
+	if (!MV_PP2_IS_PON_PORT(port))
+		mvGmacDefaultsSet(port);
+
+	/* avoid unused variable compilation warninig */
+	regVal = 0;
+
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		/* Disable Legacy WRR, Disable EJP, Release from reset */
+		txPortNum = mvPp2EgressPort(port, txp);
+
+		mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+		mvPp2WrReg(MV_PP2_TXP_SCHED_CMD_1_REG, 0);
+		/* Close bandwidth for all queues */
+		for (queue = 0; queue < MV_PP2_MAX_TXQ; queue++)
+			mvPp2WrReg(MV_PP2_TXQ_SCHED_TOKEN_CNTR_REG(MV_PPV2_TXQ_PHYS(port, txp, queue)), 0);
+
+		/* Set refill period to 1 usec, refill tokens and bucket size to maximum */
+		mvPp2WrReg(MV_PP2_TXP_SCHED_PERIOD_REG, mvPp2HalData.tClk / 1000000);
+		mvPp2TxpMaxRateSet(port, txp);
+	}
+	/* Set MaximumLowLatencyPacketSize value to 256 */
+	mvPp2WrReg(MV_PP2_RX_CTRL_REG(port), MV_PP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+						MV_PP2_RX_LOW_LATENCY_PKT_SIZE_MASK(256));
+
+	/* Enable Rx cache snoop */
+	if (mvPp2HalData.iocc) {
+		for (i = 0; i < pPortCtrl->rxqNum; i++) {
+			queue = mvPp2LogicRxqToPhysRxq(port, i);
+#ifdef CONFIG_MV_ETH_PP2_1
+			regVal = mvPp2RdReg(MV_PP2_RXQ_CONFIG_REG(queue));
+			regVal |= MV_PP2_SNOOP_PKT_SIZE_MASK | MV_PP2_SNOOP_BUF_HDR_MASK;
+			mvPp2WrReg(MV_PP2_RXQ_CONFIG_REG(queue), regVal);
+#else
+			regVal = MV_PP2_V0_SNOOP_PKT_SIZE_MASK | MV_PP2_V0_SNOOP_BUF_HDR_MASK;
+			mvPp2WrReg(MV_PP2_V0_RXQ_SNOOP_REG(queue), regVal);
+#endif
+		}
+	}
+
+	/* At default, mask all interrupts to all cpus */
+	mvPp2GbeCpuInterruptsDisable(port, (1 << mvPp2HalData.maxCPUs) - 1);
+
+	return MV_OK;
+
+}
+
+/*-------------------------------------------------------------------------------*/
+/* Mapping */
+/* Add a mapping prxq <-> (port, lrxq) */
+MV_STATUS mvPp2PhysRxqMapAdd(int prxq, int port, int lrxq)
+{
+	MV_PP2_PORT_CTRL *pCtrl;
+
+	if (mvPp2PortCheck(port)) {
+		mvOsPrintf("Bad port number: %d\n", port);
+		return MV_BAD_PARAM;
+	}
+	if (lrxq < 0 || lrxq > MV_PP2_MAX_RXQ) {
+		mvOsPrintf("Bad logical RXQ number: %d\n", lrxq);
+		return MV_BAD_PARAM;
+	}
+	if (mvPp2PhysRxqs == NULL)
+		return MV_ERROR;
+	if (prxq < 0 || prxq >= MV_PP2_RXQ_TOTAL_NUM)
+		return MV_BAD_PARAM;
+	if (mvPp2PhysRxqs[prxq].port != MV_PP2_RXQ_FREE || mvPp2PhysRxqs[prxq].logicRxq != MV_PP2_RXQ_FREE)
+		return MV_BAD_PARAM;
+
+	pCtrl = mvPp2PortCtrl[port];
+	/* map prxq <- (port, lrxq) */
+	if (pCtrl == NULL || pCtrl->pRxQueue == NULL)
+		return MV_BAD_PARAM;
+	if (lrxq < 0 || lrxq >= MV_PP2_MAX_RXQ)
+		return MV_BAD_PARAM;
+	if (pCtrl->rxqNum >= MV_PP2_MAX_RXQ)
+		return MV_FAIL;
+
+	pCtrl->pRxQueue[lrxq] = &mvPp2PhysRxqs[prxq];
+	pCtrl->rxqNum++;
+
+	/* map prxq -> (port, lrxq) */
+	mvPp2PhysRxqs[prxq].port = port;
+	mvPp2PhysRxqs[prxq].logicRxq = lrxq;
+
+	return MV_OK;
+}
+
+/* Free the relevant physical rxq */
+MV_STATUS mvPp2PhysRxqMapDel(int prxq)
+{
+	int port, lrxq;
+
+	if (mvPp2PhysRxqs == NULL)
+		return MV_ERROR;
+	if (prxq < 0 || prxq >= MV_PP2_RXQ_TOTAL_NUM)
+		return MV_BAD_PARAM;
+
+	port = mvPp2PhysRxqs[prxq].port;
+	lrxq = mvPp2PhysRxqs[prxq].logicRxq;
+	mvPp2PhysRxqs[prxq].port = MV_PP2_RXQ_FREE;
+	mvPp2PhysRxqs[prxq].logicRxq = MV_PP2_RXQ_FREE;
+
+	if (port != MV_PP2_RXQ_FREE && lrxq != MV_PP2_RXQ_FREE &&
+		mvPp2PortCtrl[port] && mvPp2PortCtrl[port]->pRxQueue[lrxq]) {
+		mvPp2PortCtrl[port]->pRxQueue[lrxq] = NULL;
+		mvPp2PortCtrl[port]->rxqNum--;
+	}
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2PortLogicRxqMapDel(int port, int lrxq)
+{
+	MV_PP2_PHYS_RXQ_CTRL *prxqCtrl;
+
+	if (mvPp2PortCheck(port)) {
+		mvOsPrintf("Bad port number: %d\n", port);
+		return MV_BAD_PARAM;
+	}
+	if (lrxq < 0 || lrxq > MV_PP2_MAX_RXQ) {
+		mvOsPrintf("Bad logical RXQ number: %d\n", lrxq);
+		return MV_BAD_PARAM;
+	}
+	if (mvPp2PhysRxqs == NULL)
+		return MV_ERROR;
+	if (mvPp2PortCtrl[port] == NULL || mvPp2PortCtrl[port]->pRxQueue == NULL)
+		return MV_BAD_PARAM;
+
+	prxqCtrl = mvPp2PortCtrl[port]->pRxQueue[lrxq];
+	mvPp2PortCtrl[port]->pRxQueue[lrxq] = NULL;
+	if (prxqCtrl) {
+		prxqCtrl->logicRxq = MV_PP2_RXQ_FREE;
+		prxqCtrl->port = MV_PP2_RXQ_FREE;
+		mvPp2PortCtrl[port]->rxqNum--;
+	}
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/* General descriptor management */
+static void mvPp2DescRingReset(MV_PP2_QUEUE_CTRL *pQueueCtrl)
+{
+	int descrNum = (pQueueCtrl->lastDesc + 1);
+	char *pDesc  = pQueueCtrl->pFirst;
+
+	if (pDesc == NULL)
+		return;
+
+	/* reset ring of descriptors */
+	mvOsMemset(pDesc, 0, (descrNum * MV_PP2_DESC_ALIGNED_SIZE));
+	mvOsCacheFlush(NULL, pDesc, (descrNum * MV_PP2_DESC_ALIGNED_SIZE));
+	pQueueCtrl->nextToProc = 0;
+}
+
+/* allocate descriptors */
+MV_U8 *mvPp2DescrMemoryAlloc(int descSize, MV_ULONG *pPhysAddr, MV_U32 *memHandle)
+{
+	MV_U8 *pVirt;
+#ifdef ETH_DESCR_UNCACHED
+	pVirt = (MV_U8 *)mvOsIoUncachedMalloc(NULL, descSize, pPhysAddr, memHandle);
+#else
+	pVirt = (MV_U8 *)mvOsIoCachedMalloc(NULL, descSize, pPhysAddr, memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+	if (pVirt)
+		mvOsMemset(pVirt, 0, descSize);
+
+	return pVirt;
+}
+
+void mvPp2DescrMemoryFree(int descSize, MV_ULONG *pPhysAddr, MV_U8 *pVirt, MV_U32 *memHandle)
+{
+#ifdef ETH_DESCR_UNCACHED
+	mvOsIoUncachedFree(NULL, descSize, (MV_ULONG)pPhysAddr, pVirt, (MV_U32)memHandle);
+#else
+	mvOsIoCachedFree(NULL, descSize, (MV_ULONG)pPhysAddr, pVirt, (MV_U32)memHandle);
+#endif /* ETH_DESCR_UNCACHED */
+}
+
+MV_STATUS mvPp2DescrCreate(MV_PP2_QUEUE_CTRL *qCtrl, int descNum)
+{
+	int descSize;
+
+	/* Allocate memory for descriptors */
+	descSize = ((descNum * MV_PP2_DESC_ALIGNED_SIZE) + MV_PP2_DESC_Q_ALIGN);
+	qCtrl->descBuf.bufVirtPtr =
+	    mvPp2DescrMemoryAlloc(descSize, &qCtrl->descBuf.bufPhysAddr, &qCtrl->descBuf.memHandle);
+
+	qCtrl->descBuf.bufSize = descSize;
+	qCtrl->descSize = MV_PP2_DESC_ALIGNED_SIZE;
+
+	if (qCtrl->descBuf.bufVirtPtr == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for %d descr\n", __func__, descSize, descNum);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	/* Make sure descriptor address is aligned */
+	qCtrl->pFirst = (char *)MV_ALIGN_UP((MV_ULONG) qCtrl->descBuf.bufVirtPtr, MV_PP2_DESC_Q_ALIGN);
+
+	qCtrl->lastDesc = (descNum - 1);
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/* RXQ */
+/* Allocate and initialize descriptors for RXQ */
+MV_PP2_PHYS_RXQ_CTRL *mvPp2RxqInit(int port, int rxq, int descNum)
+{
+	MV_STATUS status;
+	int prxq;
+	MV_PP2_PHYS_RXQ_CTRL *pRxq;
+	MV_PP2_QUEUE_CTRL *qCtrl;
+
+	prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+	if (prxq < 0) {
+		mvOsPrintf("bad (port,rxq): (%d, %d), no mapping to physical rxq\n", port, rxq);
+		return NULL;
+	}
+	pRxq = &mvPp2PhysRxqs[prxq];
+	qCtrl = &pRxq->queueCtrl;
+
+	/* Number of descriptors must be multiple of 16 */
+	if (descNum % 16 != 0) {
+		mvOsPrintf("Descriptor number %d, must be a multiple of 16\n", descNum);
+		return NULL;
+	}
+
+	status = mvPp2DescrCreate(qCtrl, descNum);
+	if (status != MV_OK)
+		return NULL;
+
+	mvPp2DescRingReset(qCtrl);
+
+	/* zero occupied and non-occupied counters - direct access */
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_REG(prxq), 0);
+
+	/* Set Rx descriptors queue starting address */
+	/* indirect access */
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, prxq);
+	mvPp2WrReg(MV_PP2_RXQ_DESC_ADDR_REG, pp2DescVirtToPhys(qCtrl, (MV_U8 *)qCtrl->pFirst));
+	mvPp2WrReg(MV_PP2_RXQ_DESC_SIZE_REG, descNum);
+	mvPp2WrReg(MV_PP2_RXQ_INDEX_REG, 0);
+
+	return pRxq;
+}
+
+void mvPp2RxqDelete(int port, int rxq)
+{
+	int prxq;
+	MV_PP2_PHYS_RXQ_CTRL *pRxq;
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+	MV_BUF_INFO *pDescBuf;
+
+	prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	if (prxq < 0) {
+		mvOsPrintf("bad (port,rxq): (%d, %d), no mapping to physical rxq\n", port, rxq);
+		return;
+	}
+	pRxq = &mvPp2PhysRxqs[prxq];
+	pQueueCtrl = &pRxq->queueCtrl;
+	pDescBuf = &pQueueCtrl->descBuf;
+
+	mvPp2DescrMemoryFree(pDescBuf->bufSize, (MV_ULONG *)pDescBuf->bufPhysAddr,
+				pDescBuf->bufVirtPtr, (MV_U32 *)pDescBuf->memHandle);
+	mvOsMemset(pQueueCtrl, 0, sizeof(*pQueueCtrl));
+
+	/* Clear Rx descriptors queue starting address, size and free descr number */
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_REG(prxq), 0);
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, prxq);
+	mvPp2WrReg(MV_PP2_RXQ_DESC_ADDR_REG, 0);
+	mvPp2WrReg(MV_PP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Allocate and initialize all physical RXQs.
+   This function must be called before any use of RXQ */
+MV_STATUS mvPp2PhysRxqsAlloc(MV_VOID)
+{
+	int i, bytes;
+
+	bytes = MV_PP2_RXQ_TOTAL_NUM * sizeof(MV_PP2_PHYS_RXQ_CTRL);
+	mvPp2PhysRxqs = mvOsMalloc(bytes);
+
+	if (!mvPp2PhysRxqs) {
+		mvOsPrintf("mvPp2 Can't allocate %d Bytes for %d RXQs controls\n",
+			   bytes, MV_PP2_RXQ_TOTAL_NUM);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	memset(mvPp2PhysRxqs, 0, bytes);
+
+	for (i = 0; i < MV_PP2_RXQ_TOTAL_NUM; i++) {
+		mvPp2PhysRxqs[i].port = MV_PP2_RXQ_FREE;
+		mvPp2PhysRxqs[i].logicRxq = MV_PP2_RXQ_FREE;
+		mvPp2PhysRxqs[i].rxq = i;
+	}
+	return MV_OK;
+}
+
+/* Destroy all physical RXQs */
+MV_STATUS mvPp2PhysRxqsDestroy(MV_VOID)
+{
+	mvOsFree(mvPp2PhysRxqs);
+	return MV_OK;
+}
+
+/* Associate <num_rxqs> RXQs for Port number <port>, starting from RXQ number <firstRxq>
+   Port and physical RXQs must be initialized.
+   Opperation succeeds only if ALL RXQs can be added to this port - otherwise do nothing */
+MV_STATUS mvPp2PortRxqsInit(int port, int firstRxq, int numRxqs)
+{
+	int i;
+	MV_PP2_PORT_CTRL *pCtrl = mvPp2PortCtrl[port];
+
+	if (firstRxq < 0 || firstRxq + numRxqs > MV_PP2_RXQ_TOTAL_NUM) {
+		mvOsPrintf("%s: Bad RXQ parameters. first RXQ = %d,  num of RXQS = %d\n", __func__, firstRxq, numRxqs);
+		return MV_BAD_PARAM;
+	}
+	/* Check resources */
+	for (i = firstRxq; i < firstRxq + numRxqs; i++) {
+		if (mvPp2PhysRxqs[i].port != MV_PP2_RXQ_FREE || mvPp2PhysRxqs[i].logicRxq != MV_PP2_RXQ_FREE) {
+			mvOsPrintf("%s: Failed to init port#%d RXQ#%d: RXQ is already occupied\n", __func__, port, i);
+			return MV_FAIL;
+		}
+	}
+
+	/* Allocate logical RXQs */
+	if (!pCtrl->pRxQueue)
+		pCtrl->pRxQueue = mvOsMalloc(MV_PP2_MAX_RXQ * sizeof(MV_PP2_PHYS_RXQ_CTRL *));
+	if (!pCtrl->pRxQueue)
+		return MV_OUT_OF_CPU_MEM;
+
+	mvOsMemset(pCtrl->pRxQueue, 0, (MV_PP2_MAX_RXQ * sizeof(MV_PP2_PHYS_RXQ_CTRL *)));
+
+	/* Associate requested RXQs with port */
+	for (i = firstRxq; i < firstRxq + numRxqs; i++)
+		mvPp2PhysRxqMapAdd(i, port, i - firstRxq);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqPktsCoalSet(int port, int rxq, MV_U32 pkts)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = (pkts << MV_PP2_OCCUPIED_THRESH_OFFSET) & MV_PP2_OCCUPIED_THRESH_MASK;
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, prxq);
+	mvPp2WrReg(MV_PP2_RXQ_THRESH_REG, regVal);
+
+	return MV_OK;
+}
+
+int mvPp2RxqPktsCoalGet(int port, int rxq)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, prxq);
+	regVal = mvPp2RdReg(MV_PP2_RXQ_THRESH_REG);
+
+	return (regVal & MV_PP2_OCCUPIED_THRESH_MASK) >> MV_PP2_OCCUPIED_THRESH_OFFSET;
+}
+
+void mvPp2RxqReset(int port, int rxq)
+{
+	MV_PP2_PHYS_RXQ_CTRL *pRxq;
+	int prxq;
+
+	prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+	pRxq = &mvPp2PhysRxqs[prxq];
+
+	mvPp2DescRingReset(&pRxq->queueCtrl);
+	/* zero occupied and non-occupied counters - direct access */
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_REG(prxq), 0);
+
+	/* zero next descriptor index - indirect access */
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, prxq);
+	mvPp2WrReg(MV_PP2_RXQ_INDEX_REG, 0);
+}
+
+/* Reset all RXQs */
+void mvPp2RxReset(int port)
+{
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortCtrl[port];
+	int rxq;
+
+	for (rxq = 0; rxq < pPortCtrl->rxqNum ; rxq++)
+		mvPp2RxqReset(port, rxq);
+}
+/*-------------------------------------------------------------------------------*/
+void mvPp2TxqHwfSizeSet(int port, int txp, int txq, int hwfNum)
+{
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_HWF_SIZE_REG, hwfNum & MV_PP2_TXQ_DESC_HWF_SIZE_MASK);
+}
+
+/* TXQ */
+/* Allocate and initialize descriptors for TXQ */
+MV_PP2_PHYS_TXQ_CTRL *mvPp2TxqInit(int port, int txp, int txq, int descNum, int hwfNum)
+{
+	MV_STATUS status;
+	MV_U32 regVal;
+	int desc, descPerTxq, ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	MV_PP2_PHYS_TXQ_CTRL *pTxq = &mvPp2PhysTxqs[ptxq];
+	MV_PP2_QUEUE_CTRL *qCtrl = &pTxq->queueCtrl;
+
+	status = mvPp2DescrCreate(qCtrl, descNum);
+	if (status != MV_OK)
+		return NULL;
+
+	mvPp2DescRingReset(qCtrl);
+
+	mvPp2TxqFreeReservedDesc(port, txp, txq);
+
+	/* Set Tx descriptors queue starting address */
+	/* indirect access */
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_ADDR_REG, pp2DescVirtToPhys(qCtrl, (MV_U8 *)qCtrl->pFirst));
+	mvPp2WrReg(MV_PP2_TXQ_DESC_SIZE_REG, descNum & MV_PP2_TXQ_DESC_SIZE_MASK);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_HWF_SIZE_REG, hwfNum & MV_PP2_TXQ_DESC_HWF_SIZE_MASK);
+	mvPp2WrReg(MV_PP2_TXQ_INDEX_REG, 0);
+
+	/* Sanity check: Pending descriptors counter and sent descriptors counter must be 0 */
+	/* Pending counter read - indirect access */
+	regVal = mvPp2RdReg(MV_PP2_TXQ_PENDING_REG);
+	if (regVal != 0) {
+		mvOsPrintf("port=%d, txp=%d, txq=%d, ptxq=%d: pending=%u, reserved=%u\n",
+			port, txp, txq, ptxq,
+			((regVal & MV_PP2_TXQ_PENDING_MASK) >> MV_PP2_TXQ_PENDING_OFFSET),
+			((regVal & MV_PP2_TXQ_RESERVED_MASK) >> MV_PP2_TXQ_RESERVED_OFFSET));
+	}
+	/* Sent descriptors counter - direct access */
+	regVal = mvPp2RdReg(MV_PP2_TXQ_SENT_REG(ptxq));
+	if (regVal != 0) {
+		mvOsPrintf("port=%d, txp=%d, txq=%d, ptxq=%d, sent=0x%08x - Sent packets\n",
+			port, txp, txq, ptxq, regVal);
+	}
+
+	/* Calculate base address in prefetch buffer. We reserve 16 descriptors for each existing TXQ */
+	/* TCONTS for PON port must be continious from 0 to mvPp2HalData.maxTcont */
+	/* GBE ports assumed to be continious from 0 to (mvPp2HalData.maxPort - 1) */
+	descPerTxq = 16;
+	if (MV_PP2_IS_PON_PORT(port))
+		desc = ptxq * descPerTxq;
+	else
+		desc = (mvPp2HalData.maxTcont * MV_PP2_MAX_TXQ * descPerTxq) +
+			(port * MV_PP2_MAX_TXQ * descPerTxq) + (txq * descPerTxq);
+
+	mvPp2WrReg(MV_PP2_TXQ_PREF_BUF_REG, MV_PP2_PREF_BUF_PTR(desc) | MV_PP2_PREF_BUF_SIZE_16 |
+				MV_PP2_PREF_BUF_THRESH(descPerTxq/2));
+
+	mvPp2TxqMaxRateSet(port, txp, txq);
+
+	return pTxq;
+}
+
+MV_STATUS mvPp2TxqDelete(int port, int txp, int txq)
+{
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	MV_PP2_QUEUE_CTRL *pQueueCtrl = &mvPp2PhysTxqs[ptxq].queueCtrl;
+	MV_BUF_INFO *pDescBuf = &pQueueCtrl->descBuf;
+
+	mvPp2DescrMemoryFree(pDescBuf->bufSize, (MV_ULONG *)pDescBuf->bufPhysAddr,
+				pDescBuf->bufVirtPtr, (MV_U32 *)pDescBuf->memHandle);
+
+	mvOsMemset(pQueueCtrl, 0, sizeof(*pQueueCtrl));
+
+	/* Set minimum bandwidth for disabled TXQs */
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+
+	/* Set Tx descriptors queue starting address and size */
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+
+	mvPp2WrReg(MV_PP2_TXQ_DESC_ADDR_REG, 0);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_SIZE_REG, 0);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_HWF_SIZE_REG, 0);
+
+	return MV_OK;
+}
+
+/* Allocate and initialize all physical TXQs.
+   This function must be called before any use of TXQ */
+MV_STATUS mvPp2PhysTxqsAlloc(void)
+{
+	int i, bytes;
+
+	/* Alloc one extra element for temporary TXQ */
+	bytes = (MV_PP2_TXQ_TOTAL_NUM + 1) * sizeof(MV_PP2_PHYS_TXQ_CTRL);
+
+	mvPp2PhysTxqs = mvOsMalloc(bytes);
+
+	if (!mvPp2PhysTxqs) {
+		mvOsPrintf("mvPp2 Can't allocate %d Bytes for %d TXQs control\n",
+			   bytes, MV_PP2_TXQ_TOTAL_NUM);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	memset(mvPp2PhysTxqs, 0, bytes);
+
+	for (i = 0; i < (MV_PP2_TXQ_TOTAL_NUM + 1); i++)
+		mvPp2PhysTxqs[i].txq = i;
+
+	return MV_OK;
+}
+
+/* Destroy all physical TXQs */
+MV_VOID mvPp2PhysTxqsDestroy(MV_VOID)
+{
+	mvOsFree(mvPp2PhysTxqs);
+}
+
+/* Associate TXQs for this port
+   Physical TXQS must be initialized (by using mvPp2PhysTxqsAlloc)
+   Notice that TXQ mapping is predefined */
+MV_STATUS mvPp2PortTxqsInit(int port)
+{
+	int txp, txq, ptxq;
+	MV_PP2_PORT_CTRL *pCtrl = mvPp2PortCtrl[port];
+
+	if (!pCtrl->pTxQueue)
+		pCtrl->pTxQueue = mvOsMalloc(pCtrl->txqNum * pCtrl->txpNum * sizeof(MV_PP2_PHYS_TXQ_CTRL *));
+	if (!pCtrl->pTxQueue)
+		return MV_OUT_OF_CPU_MEM;
+
+	for (txp = 0; txp < pCtrl->txpNum; txp++) {
+		for (txq = 0; txq < pCtrl->txqNum; txq++) {
+			ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+			pCtrl->pTxQueue[txp * CONFIG_MV_PP2_TXQ + txq] = &mvPp2PhysTxqs[ptxq];
+		}
+	}
+
+	return MV_OK;
+}
+
+/* Allocate and initialize descriptors for Aggr TXQ */
+MV_STATUS mvPp2AggrTxqDescInit(MV_PP2_AGGR_TXQ_CTRL *txqCtrl, int descNum, int cpu)
+{
+	MV_STATUS status;
+	MV_PP2_QUEUE_CTRL *qCtrl = &txqCtrl->queueCtrl;
+
+	status = mvPp2DescrCreate(qCtrl, descNum);
+	if (status != MV_OK)
+		return status;
+
+	mvPp2DescRingReset(qCtrl);
+
+	/* Aggr TXQ no reset WA */
+	qCtrl->nextToProc = mvPp2RdReg(MV_PP2_AGGR_TXQ_INDEX_REG(cpu));
+
+	/* Set Tx descriptors queue starting address */
+	/* indirect access */
+	mvPp2WrReg(MV_PP2_AGGR_TXQ_DESC_ADDR_REG(cpu), pp2DescVirtToPhys(qCtrl, (MV_U8 *)qCtrl->pFirst));
+	mvPp2WrReg(MV_PP2_AGGR_TXQ_DESC_SIZE_REG(cpu), descNum & MV_PP2_AGGR_TXQ_DESC_SIZE_MASK);
+	/* RO - mvPp2WrReg(MV_PP2_AGGR_TXQ_INDEX_REG(cpu), 0); */
+
+	return MV_OK;
+}
+
+/* Allocate all aggregated TXQs.
+   This function must be called before any use of aggregated TXQ */
+MV_STATUS mvPp2AggrTxqsAlloc(int cpuNum)
+{
+	/* Alloc one extra element for temporary TXQ */
+	int bytes = cpuNum * sizeof(MV_PP2_PHYS_TXQ_CTRL);
+
+	mvPp2AggrTxqs = mvOsMalloc(bytes);
+
+	if (!mvPp2AggrTxqs) {
+		mvOsPrintf("mvPp2 Can't allocate %d Bytes for %d aggr TXQs control\n", bytes, cpuNum);
+		return MV_OUT_OF_CPU_MEM;
+	}
+
+	memset(mvPp2AggrTxqs, 0, bytes);
+
+	return MV_OK;
+}
+
+/* release all aggregated TXQs */
+MV_VOID mvPp2AggrTxqsDestroy(MV_VOID)
+{
+	mvOsFree(mvPp2AggrTxqs);
+}
+
+
+/* Destroy all aggregated TXQs */
+MV_VOID mvPp2AggrTxqDelete(int cpu)
+{
+	MV_PP2_AGGR_TXQ_CTRL *pTxqCtrl = &mvPp2AggrTxqs[cpu];
+	MV_PP2_QUEUE_CTRL *pQueuCtrl = &pTxqCtrl->queueCtrl;
+	MV_BUF_INFO *pDescBuf = &pQueuCtrl->descBuf;
+
+	mvPp2DescrMemoryFree(pDescBuf->bufSize, (MV_ULONG *)pDescBuf->bufPhysAddr,
+				pDescBuf->bufVirtPtr, (MV_U32 *)pDescBuf->memHandle);
+
+	mvOsMemset(pQueuCtrl, 0, sizeof(*pQueuCtrl));
+}
+
+/* Initialize aggregated TXQ */
+MV_PP2_AGGR_TXQ_CTRL *mvPp2AggrTxqInit(int cpu, int descNum)
+{
+	MV_STATUS status;
+
+	if (!mvPp2AggrTxqs)
+		return NULL;
+
+	/* Number of descriptors must be multiple of 16 */
+	if (descNum % 16 != 0) {
+		mvOsPrintf("Descriptor number %d, must be a multiple of 16\n", descNum);
+		return NULL;
+	}
+
+	mvPp2AggrTxqs[cpu].cpu = cpu;
+	status = mvPp2AggrTxqDescInit(&mvPp2AggrTxqs[cpu], descNum, cpu);
+	if (status != MV_OK) {
+		mvOsPrintf("mvPp2 failed to initialize descriptor ring for aggr TXQ %d\n", cpu);
+		return NULL;
+	}
+
+	return &mvPp2AggrTxqs[cpu];
+}
+
+MV_STATUS mvPp2TxDonePktsCoalSet(int port, int txp, int txq, MV_U32 pkts)
+{
+	MV_U32 regVal;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	regVal = (pkts << MV_PP2_TRANSMITTED_THRESH_OFFSET) & MV_PP2_TRANSMITTED_THRESH_MASK;
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	mvPp2WrReg(MV_PP2_TXQ_THRESH_REG, regVal);
+
+	return MV_OK;
+}
+
+int mvPp2TxDonePktsCoalGet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	regVal = mvPp2RdReg(MV_PP2_TXQ_THRESH_REG);
+
+	return (regVal & MV_PP2_TRANSMITTED_THRESH_MASK) >> MV_PP2_TRANSMITTED_THRESH_OFFSET;
+}
+
+void mvPp2TxqReset(int port, int txp, int txq)
+{
+	int ptxq;
+	MV_PP2_PHYS_TXQ_CTRL *pTxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	pTxq = &mvPp2PhysTxqs[ptxq];
+
+	mvPp2DescRingReset(&pTxq->queueCtrl);
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	mvPp2WrReg(MV_PP2_TXQ_INDEX_REG, 0);
+}
+
+/* Reset all TXQs */
+void mvPp2TxpReset(int port, int txp)
+{
+	int txq;
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortCtrl[port];
+
+	for (txq = 0; txq < pPortCtrl->txqNum; txq++)
+		mvPp2TxqReset(port, txp, txq);
+}
+
+/* Allocate and initialize descriptors for temporary TXQ */
+MV_STATUS mvPp2TxqTempInit(int descNum, int hwfNum)
+{
+	MV_STATUS status;
+	int ptxq = MV_PP2_TXQ_TOTAL_NUM;
+	MV_PP2_PHYS_TXQ_CTRL *pTxq = &mvPp2PhysTxqs[ptxq];
+	MV_PP2_QUEUE_CTRL *qCtrl = &pTxq->queueCtrl;
+
+	status = mvPp2DescrCreate(qCtrl, descNum);
+	if (status != MV_OK)
+		return MV_FAIL;
+
+	mvPp2DescRingReset(qCtrl);
+
+	/* Set Tx descriptors queue starting address */
+	/* indirect access */
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_ADDR_REG, pp2DescVirtToPhys(qCtrl, (MV_U8 *)qCtrl->pFirst));
+	mvPp2WrReg(MV_PP2_TXQ_DESC_SIZE_REG, descNum & MV_PP2_TXQ_DESC_SIZE_MASK);
+	mvPp2WrReg(MV_PP2_TXQ_DESC_HWF_SIZE_REG, hwfNum & MV_PP2_TXQ_DESC_HWF_SIZE_MASK);
+	mvPp2WrReg(MV_PP2_TXQ_INDEX_REG, 0);
+
+	mvPp2WrReg(MV_PP2_TXQ_PREF_BUF_REG, MV_PP2_PREF_BUF_PTR(ptxq * 4) | MV_PP2_PREF_BUF_SIZE_4 | MV_PP2_PREF_BUF_THRESH(2));
+
+	return MV_OK;
+}
+
+void mvPp2TxqTempDelete(void)
+{
+	int ptxq = MV_PP2_TXQ_TOTAL_NUM;
+
+	MV_PP2_PHYS_TXQ_CTRL *pTxq = &mvPp2PhysTxqs[ptxq];
+	MV_PP2_QUEUE_CTRL *qCtrl = &pTxq->queueCtrl;
+	MV_BUF_INFO *pDescBuf = &qCtrl->descBuf;
+	mvPp2DescrMemoryFree(pDescBuf->bufSize, (MV_ULONG *)pDescBuf->bufPhysAddr,
+				pDescBuf->bufVirtPtr, (MV_U32 *)pDescBuf->memHandle);
+
+	mvOsMemset(qCtrl, 0, sizeof(*qCtrl));
+}
+/*-------------------------------------------------------------------------------*/
+/* Port */
+/* Allocate and initialize port structure
+   Alocate an initialize TXQs for this port
+   Associate <numRxqs> RXQs for Port number <port>, starting from RXQ number <firstRxq>
+   Note: mvPp2PortCtrl must be initialized, i.e. must call mvPp2HalInit before this function */
+void *mvPp2PortInit(int port, int firstRxq, int numRxqs, void *osHandle)
+{
+	MV_STATUS status;
+	MV_PP2_PORT_CTRL *pCtrl;
+
+	if (mvPp2PortCheck(port)) {
+		mvOsPrintf("%s: Bad port number: %d\n", __func__, port);
+		return NULL;
+	}
+	if (!mvPp2PortCtrl) {
+		mvOsPrintf("%s: Port control is uninitialized\n", __func__);
+		return NULL;
+	}
+
+	if (!mvPp2PortCtrl[port])
+		mvPp2PortCtrl[port] = (MV_PP2_PORT_CTRL *)mvOsMalloc(sizeof(MV_PP2_PORT_CTRL));
+	if (!mvPp2PortCtrl[port]) {
+		mvOsPrintf("%s: Could not allocate %d bytes for port structure\n", __func__, sizeof(MV_PP2_PORT_CTRL));
+		return NULL;
+	}
+
+	mvOsMemset(mvPp2PortCtrl[port], 0, sizeof(MV_PP2_PORT_CTRL));
+
+	pCtrl = mvPp2PortCtrl[port];
+	pCtrl->portNo = port;
+	pCtrl->osHandle = osHandle;
+
+	/* associate TXQs to this port */
+#ifdef CONFIG_MV_INCLUDE_PON
+	pCtrl->txpNum = MV_PP2_IS_PON_PORT(port) ? mvPp2HalData.maxTcont : 1;
+#else
+	pCtrl->txpNum = 1;
+#endif
+	pCtrl->txqNum = CONFIG_MV_PP2_TXQ;
+	status = mvPp2PortTxqsInit(port);
+	if (status != MV_OK)
+		return NULL;
+
+	/* associate RXQs to this port */
+	pCtrl->rxqNum = 0;
+	status = mvPp2PortRxqsInit(port, firstRxq, numRxqs);
+	if (status != MV_OK)
+		return NULL;
+
+	/* associate interrupt from relevant rxqs group to this port */
+	status = mvPp2GbeIsrRxqGroup(port, numRxqs);
+	if (status != MV_OK)
+		return NULL;
+
+	/* Disable port */
+	mvPp2PortIngressEnable(port, MV_FALSE);
+	mvPp2PortEgressEnable(port, MV_FALSE);
+	mvPp2PortEnable(port, MV_FALSE);
+
+	mvPp2DefaultsSet(port);
+
+	return pCtrl;
+}
+
+void mvPp2PortDestroy(int portNo)
+{
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(portNo);
+
+	if (pPortCtrl->pTxQueue)
+		mvOsFree(pPortCtrl->pTxQueue);
+
+	if (pPortCtrl->pRxQueue)
+		mvOsFree(pPortCtrl->pRxQueue);
+
+	if (pPortCtrl)
+		mvOsFree(pPortCtrl);
+
+	mvPp2PortCtrl[portNo] = NULL;
+}
+
+/*******************************************************************************
+* mvPp2PortEgressEnable
+*
+* DESCRIPTION:
+*	Disable fetch descriptors from initialized TXQs
+*
+*       Note: Effects TXQs initialized prior to calling this function.
+*
+* INPUT:
+*	int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure.
+*
+*******************************************************************************/
+MV_STATUS mvPp2PortEgressEnable(int port, MV_BOOL en)
+{
+	int	         txp;
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(port);
+
+	/* Disable all physical TXQs */
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		if (en)
+			mvPp2TxpEnable(port, txp);
+		else
+			mvPp2TxpDisable(port, txp);
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+MV_STATUS mvPp2PortEnable(int port, MV_BOOL en)
+{
+	if (!MV_PP2_IS_PON_PORT(port)) {
+		/* Enable port */
+		if (en)
+			mvGmacPortEnable(port);
+		else
+			mvGmacPortDisable(port);
+	}
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+
+/* BM */
+MV_STATUS mvPp2BmPoolBufSizeSet(int pool, int bufsize)
+{
+	MV_U32 regVal;
+
+	mvBmPoolBufSizeSet(pool, bufsize);
+	regVal = MV_ALIGN_UP(bufsize, 1 << MV_PP2_POOL_BUF_SIZE_OFFSET);
+	mvPp2WrReg(MV_PP2_POOL_BUF_SIZE_REG(pool), regVal);
+
+	return MV_OK;
+}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+
+/*******************************************************************************
+* mvPp2PortIngressEnable
+*
+* DESCRIPTION:
+*	Enable/Disable receive packets to RXQs for SWF and receive packets to TXQs for HWF.
+*
+*       Note: Effects only Rx and Tx queues initialized prior to calling this function.
+*
+* INPUT:
+*	int     portNo		- Port number.
+*
+* RETURN:   MV_STATUS
+*           MV_OK - Success, Others - Failure.
+*
+*******************************************************************************/
+MV_STATUS mvPp2PortIngressEnable(int port, MV_BOOL en)
+{
+	int txp, txq, rxq;
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(port);
+
+	/* Enable all initialized RXQs */
+	for (rxq = 0; rxq < pPortCtrl->rxqNum ; rxq++) {
+		if (pPortCtrl->pRxQueue[rxq] != NULL)
+			mvPp2RxqEnable(port, rxq, en);
+	}
+
+	/* Enable HWF for all initialized TXQs. */
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		for (txq = 0; txq < pPortCtrl->txqNum; txq++) {
+			if (pPortCtrl->pTxQueue[txp * pPortCtrl->txqNum + txq] != NULL)
+				mvPp2HwfTxqEnable(port, txp, txq, en);
+		}
+	}
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqOffsetSet(int port, int rxq, int offset)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	if (offset % 32 != 0) {
+		mvOsPrintf("%s: offset must be in units of 32\n", __func__);
+		return MV_BAD_PARAM;
+	}
+
+	/* convert offset from bytes to units of 32 bytes */
+	offset = offset >> 5;
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_CONFIG_REG(prxq));
+	regVal &= ~MV_PP2_RXQ_PACKET_OFFSET_MASK;
+
+	/* Offset is in */
+	regVal |= ((offset << MV_PP2_RXQ_PACKET_OFFSET_OFFS) & MV_PP2_RXQ_PACKET_OFFSET_MASK);
+
+	mvPp2WrReg(MV_PP2_RXQ_CONFIG_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqBmLongPoolSet(int port, int rxq, int longPool)
+{
+	MV_U32 regVal = 0;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_CONFIG_REG(prxq));
+	regVal &= ~MV_PP2_RXQ_POOL_LONG_MASK;
+	regVal |= ((longPool << MV_PP2_RXQ_POOL_LONG_OFFS) & MV_PP2_RXQ_POOL_LONG_MASK);
+
+	mvPp2WrReg(MV_PP2_RXQ_CONFIG_REG(prxq), regVal);
+
+	/* Update default BM priority rule */
+	mvBmRxqToQsetLongClean(prxq);
+	mvBmRxqToQsetLongSet(prxq, mvBmDefaultQsetNumGet(longPool));
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqBmShortPoolSet(int port, int rxq, int shortPool)
+{
+	MV_U32 regVal = 0;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_CONFIG_REG(prxq));
+	regVal &= ~MV_PP2_RXQ_POOL_SHORT_MASK;
+	regVal |= ((shortPool << MV_PP2_RXQ_POOL_SHORT_OFFS) & MV_PP2_RXQ_POOL_SHORT_MASK);
+
+	mvPp2WrReg(MV_PP2_RXQ_CONFIG_REG(prxq), regVal);
+
+	/* Update default BM priority rule */
+	mvBmRxqToQsetShortClean(prxq);
+	mvBmRxqToQsetShortSet(prxq, mvBmDefaultQsetNumGet(shortPool));
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2TxqBmShortPoolSet(int port, int txp, int txq, int shortPool)
+{
+	MV_U32 regVal = 0;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	regVal = mvPp2RdReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq));
+	regVal &= ~MV_PP2_HWF_TXQ_POOL_SHORT_MASK;
+
+	regVal |= ((shortPool << MV_PP2_HWF_TXQ_POOL_SHORT_OFFS) & MV_PP2_HWF_TXQ_POOL_SHORT_MASK);
+
+	mvPp2WrReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq), regVal);
+
+	mvBmTxqToQsetShortClean(ptxq);
+	mvBmTxqToQsetShortSet(ptxq, mvBmDefaultQsetNumGet(shortPool));
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2TxqBmLongPoolSet(int port, int txp, int txq, int longPool)
+{
+	MV_U32 regVal = 0;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	regVal = mvPp2RdReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq));
+	regVal &= ~MV_PP2_HWF_TXQ_POOL_LONG_MASK;
+
+	regVal |= ((longPool << MV_PP2_HWF_TXQ_POOL_LONG_OFFS) & MV_PP2_HWF_TXQ_POOL_LONG_MASK);
+
+	mvPp2WrReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq), regVal);
+
+	mvBmTxqToQsetLongClean(ptxq);
+	mvBmTxqToQsetLongSet(ptxq, mvBmDefaultQsetNumGet(longPool));
+
+	return MV_OK;
+}
+
+#else
+
+MV_STATUS mvPp2PortIngressEnable(int port, MV_BOOL en)
+{
+	if (en)
+		mvPrsMacDropAllSet(port, 0);
+	else
+		mvPrsMacDropAllSet(port, 1);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqOffsetSet(int port, int rxq, int offset)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	if (offset % 32 != 0) {
+		mvOsPrintf("%s: offset must be in units of 32\n", __func__);
+		return MV_BAD_PARAM;
+	}
+
+	/* convert offset from bytes to units of 32 bytes */
+	offset = offset >> 5;
+
+	regVal = mvPp2RdReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq));
+
+	regVal &= ~MV_PP2_V0_RXQ_PACKET_OFFSET_MASK;
+	regVal |= ((offset << MV_PP2_V0_RXQ_PACKET_OFFSET_OFFS) & MV_PP2_V0_RXQ_PACKET_OFFSET_MASK);
+
+	mvPp2WrReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqBmLongPoolSet(int port, int rxq, int longPool)
+{
+	MV_U32 regVal = 0;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = mvPp2RdReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq));
+	regVal &= ~MV_PP2_V0_RXQ_POOL_LONG_MASK;
+	regVal |= ((longPool << MV_PP2_V0_RXQ_POOL_LONG_OFFS) & MV_PP2_V0_RXQ_POOL_LONG_MASK);
+
+	mvPp2WrReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2RxqBmShortPoolSet(int port, int rxq, int shortPool)
+{
+	MV_U32 regVal = 0;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = mvPp2RdReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq));
+	regVal &= ~MV_PP2_V0_RXQ_POOL_SHORT_MASK;
+	regVal |= ((shortPool << MV_PP2_V0_RXQ_POOL_SHORT_OFFS) & MV_PP2_V0_RXQ_POOL_SHORT_MASK);
+
+	mvPp2WrReg(MV_PP2_V0_RXQ_CONFIG_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2PortHwfBmPoolSet(int port, int shortPool, int longPool)
+{
+	MV_U32 regVal = 0;
+
+	regVal |= ((shortPool << MV_PP2_V0_PORT_HWF_POOL_SHORT_OFFS) & MV_PP2_V0_PORT_HWF_POOL_SHORT_MASK);
+	regVal |= ((longPool << MV_PP2_V0_PORT_HWF_POOL_LONG_OFFS) & MV_PP2_V0_PORT_HWF_POOL_LONG_MASK);
+
+	mvPp2WrReg(MV_PP2_V0_PORT_HWF_CONFIG_REG(MV_PPV2_PORT_PHYS(port)), regVal);
+
+	return MV_OK;
+}
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+/*-------------------------------------------------------------------------------*/
+
+MV_STATUS mvPp2MhSet(int port, MV_TAG_TYPE mh)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_MH_REG(MV_PPV2_PORT_PHYS(port)));
+	/* Clear relevant fields */
+	regVal &= ~(MV_PP2_DSA_EN_MASK | MV_PP2_MH_EN_MASK);
+	switch (mh) {
+	case MV_TAG_TYPE_NONE:
+		break;
+
+	case MV_TAG_TYPE_MH:
+		regVal |= MV_PP2_MH_EN_MASK;
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		regVal |= MV_PP2_DSA_EN_MASK;
+		break;
+
+	case MV_TAG_TYPE_EDSA:
+		regVal |= MV_PP2_DSA_EXTENDED;
+
+	default:
+		mvOsPrintf("port=%d: Unexpected MH = %d value\n", port, mh);
+		return MV_BAD_PARAM;
+	}
+	mvPp2WrReg(MV_PP2_MH_REG(MV_PPV2_PORT_PHYS(port)), regVal);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	if (mh == MV_TAG_TYPE_MH)
+		mvGmacPortMhSet(port, 1);
+	else
+		mvGmacPortMhSet(port, 0);
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+MV_STATUS mvPp2RxFifoInit(int portNum)
+{
+	int i, port;
+
+	for (i = 0; i < portNum; i++) {
+		port = MV_PPV2_PORT_PHYS(i);
+		mvPp2WrReg(MV_PP2_RX_DATA_FIFO_SIZE_REG(port), MV_PP2_RX_FIFO_PORT_DATA_SIZE);
+		mvPp2WrReg(MV_PP2_RX_ATTR_FIFO_SIZE_REG(port), MV_PP2_RX_FIFO_PORT_ATTR_SIZE);
+	}
+
+	mvPp2WrReg(MV_PP2_RX_MIN_PKT_SIZE_REG, MV_PP2_RX_FIFO_PORT_MIN_PKT);
+	mvPp2WrReg(MV_PP2_RX_FIFO_INIT_REG, 0x1);
+
+	return MV_OK;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*******************************/
+/*       Interrupts API        */
+/*******************************/
+MV_STATUS mvPp2RxqTimeCoalSet(int port, int rxq, MV_U32 uSec)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+	unsigned int tClkUsec;
+
+	tClkUsec = mvPp2HalData.tClk / 1000000;
+
+	/* Register contains interrupt time in units of 16 core clock sycles, */
+	/* therefore shift the result value on 4 bits */
+	regVal = (((uSec * tClkUsec) >> 4) << MV_PP2_ISR_RX_THRESHOLD_OFFS) & MV_PP2_ISR_RX_THRESHOLD_MASK;
+
+	mvPp2WrReg(MV_PP2_ISR_RX_THRESHOLD_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+unsigned int mvPp2RxqTimeCoalGet(int port, int rxq)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+	unsigned int res, tClkUsec, uSec;
+
+	tClkUsec = mvPp2HalData.tClk / 1000000;
+	regVal = mvPp2RdReg(MV_PP2_ISR_RX_THRESHOLD_REG(prxq));
+
+	/* Register contains interrupt time in units of 16 core clock sycles, */
+	/* therefore shift the result value on 4 bits */
+	res = (((regVal & MV_PP2_ISR_RX_THRESHOLD_MASK) >> MV_PP2_ISR_RX_THRESHOLD_OFFS) << 4);
+
+	if ((res % tClkUsec) != 0)
+		uSec = (res / tClkUsec) + 1;
+	else
+		uSec = res / tClkUsec;
+
+	return uSec;
+}
+
+/* unmask the current CPU's rx/tx interrupts                   *
+ *  - rxq_mask: support rxq to cpu granularity                 *
+ *  - isTxDoneIsr: if 0 then Tx Done interruptare not unmasked */
+MV_STATUS mvPp2GbeIsrRxTxUnmask(int port, MV_U16 rxq_mask, int isTxDoneIsr)
+{
+	if (MV_PP2_IS_PON_PORT(port)) {
+		mvPp2WrReg(MV_PP2_ISR_PON_RX_TX_MASK_REG,
+			(MV_PP2_PON_CAUSE_MISC_SUM_MASK |
+			((isTxDoneIsr) ? MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK : 0) |
+			(MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK & rxq_mask)));
+	} else {
+		mvPp2WrReg(MV_PP2_ISR_RX_TX_MASK_REG(MV_PPV2_PORT_PHYS(port)),
+			(MV_PP2_CAUSE_MISC_SUM_MASK |
+			((isTxDoneIsr) ? MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK : 0) |
+			(MV_PP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK & rxq_mask)));
+	}
+
+	return MV_OK;
+}
+
+/* mask the current CPU's rx/tx interrupts */
+MV_STATUS mvPp2GbeIsrRxTxMask(int port)
+{
+	if (MV_PP2_IS_PON_PORT(port))
+		mvPp2WrReg(MV_PP2_ISR_PON_RX_TX_MASK_REG, 0);
+	else
+		mvPp2WrReg(MV_PP2_ISR_RX_TX_MASK_REG(MV_PPV2_PORT_PHYS(port)), 0);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2GbeIsrRxqGroup(int port, int rxqNum)
+{
+	if ((rxqNum % 4 != 0) || (rxqNum > MV_PP2_MAX_RXQ)) {
+		mvOsPrintf("%s: bad number of rxqs - %d.  Must be multiple of 4 and less than %d\n",
+			__func__, rxqNum, MV_PP2_MAX_RXQ);
+		return MV_BAD_PARAM;
+	}
+
+	mvPp2WrReg(MV_PP2_ISR_RXQ_GROUP_REG(port), rxqNum);
+
+	return MV_OK;
+}
+/*-------------------------------------------------------------------------------*/
+/* WRR / EJP configuration routines */
+
+MV_STATUS mvPp2TxpMaxRateSet(int port, int txp)
+{
+	MV_U32 regVal;
+	int eport;
+
+	eport = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, eport);
+
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_REFILL_REG);
+	regVal &= ~MV_PP2_TXP_REFILL_PERIOD_ALL_MASK;
+	regVal |= MV_PP2_TXP_REFILL_PERIOD_MASK(1);
+	regVal |= MV_PP2_TXP_REFILL_TOKENS_ALL_MASK;
+	mvPp2WrReg(MV_PP2_TXP_SCHED_REFILL_REG, regVal);
+
+	regVal = MV_PP2_TXP_TOKEN_CNTR_MAX;
+	mvPp2WrReg(MV_PP2_TXP_SCHED_TOKEN_SIZE_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2TxqMaxRateSet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+	int eport;
+
+	eport = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, eport);
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_SCHED_REFILL_REG(txq));
+	regVal &= ~MV_PP2_TXQ_REFILL_PERIOD_ALL_MASK;
+	regVal |= MV_PP2_TXQ_REFILL_PERIOD_MASK(1);
+	regVal |= MV_PP2_TXQ_REFILL_TOKENS_ALL_MASK;
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_REFILL_REG(txq), regVal);
+
+	regVal = MV_PP2_TXQ_TOKEN_CNTR_MAX;
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), regVal);
+
+	return MV_OK;
+}
+
+/* Calculate period and tokens accordingly with required rate and accuracy */
+MV_STATUS mvPp2RateCalc(int rate, unsigned int accuracy, unsigned int *pPeriod, unsigned int *pTokens)
+{
+	/* Calculate refill tokens and period - rate [Kbps] = tokens [bits] * 1000 / period [usec] */
+	/* Assume:  Tclock [MHz] / BasicRefillNoOfClocks = 1 */
+	unsigned int period, tokens, calc;
+
+	if (rate == 0) {
+		/* Disable traffic from the port: tokens = 0 */
+		if (pPeriod != NULL)
+			*pPeriod = 1000;
+
+		if (pTokens != NULL)
+			*pTokens = 0;
+
+		return MV_OK;
+	}
+
+	/* Find values of "period" and "tokens" match "rate" and "accuracy" when period is minimal */
+	for (period = 1; period <= 1000; period++) {
+		tokens = 1;
+		while (MV_TRUE)	{
+			calc = (tokens * 1000) / period;
+			if (((MV_ABS(calc - rate) * 100) / rate) <= accuracy) {
+				if (pPeriod != NULL)
+					*pPeriod = period;
+
+				if (pTokens != NULL)
+					*pTokens = tokens;
+
+				return MV_OK;
+			}
+			if (calc > rate)
+				break;
+
+			tokens++;
+		}
+	}
+	return MV_FAIL;
+}
+
+/* Set bandwidth limitation for TX port
+ *   rate [Kbps]    - steady state TX bandwidth limitation
+ */
+MV_STATUS   mvPp2TxpRateSet(int port, int txp, int rate)
+{
+	MV_U32		regVal;
+	unsigned int	tokens, period, txPortNum, accuracy = 0;
+	MV_STATUS	status;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_PERIOD_REG);
+
+	status = mvPp2RateCalc(rate, accuracy, &period, &tokens);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: Can't provide rate of %d [Kbps] with accuracy of %d [%%]\n",
+				__func__, rate, accuracy);
+		return status;
+	}
+	if (tokens > MV_PP2_TXP_REFILL_TOKENS_MAX)
+		tokens = MV_PP2_TXP_REFILL_TOKENS_MAX;
+
+	if (period > MV_PP2_TXP_REFILL_PERIOD_MAX)
+		period = MV_PP2_TXP_REFILL_PERIOD_MAX;
+
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_REFILL_REG);
+
+	regVal &= ~MV_PP2_TXP_REFILL_TOKENS_ALL_MASK ;
+	regVal |= MV_PP2_TXP_REFILL_TOKENS_MASK(tokens);
+
+	regVal &= ~MV_PP2_TXP_REFILL_PERIOD_ALL_MASK;
+	regVal |= MV_PP2_TXP_REFILL_PERIOD_MASK(period);
+
+	mvPp2WrReg(MV_PP2_TXP_SCHED_REFILL_REG, regVal);
+
+	return MV_OK;
+}
+
+/* Set maximum burst size for TX port
+ *   burst [bytes] - number of bytes to be sent with maximum possible TX rate,
+ *                    before TX rate limitation will take place.
+ */
+MV_STATUS mvPp2TxpBurstSet(int port, int txp, int burst)
+{
+	MV_U32  size, mtu;
+	int txPortNum;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	/* Calulate Token Bucket Size */
+	size = 8 * burst;
+
+	if (size > MV_PP2_TXP_TOKEN_SIZE_MAX)
+		size = MV_PP2_TXP_TOKEN_SIZE_MAX;
+
+	/* Token bucket size must be larger then MTU */
+	mtu = mvPp2RdReg(MV_PP2_TXP_SCHED_MTU_REG);
+	if (mtu > size) {
+		mvOsPrintf("%s Error: Bucket size (%d bytes) < MTU (%d bytes)\n",
+					__func__, (size / 8), (mtu / 8));
+		return MV_BAD_PARAM;
+	}
+	mvPp2WrReg(MV_PP2_TXP_SCHED_TOKEN_SIZE_REG, size);
+
+	return MV_OK;
+}
+
+/* Set bandwidth limitation for TXQ
+ *   rate  [Kbps]  - steady state TX rate limitation
+ */
+MV_STATUS   mvPp2TxqRateSet(int port, int txp, int txq, int rate)
+{
+	MV_U32		regVal;
+	unsigned int	txPortNum, period, tokens, accuracy = 0;
+	MV_STATUS	status;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (txq >= MV_PP2_MAX_TXQ)
+		return MV_BAD_PARAM;
+
+	status = mvPp2RateCalc(rate, accuracy, &period, &tokens);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: Can't provide rate of %d [Kbps] with accuracy of %d [%%]\n",
+				__func__, rate, accuracy);
+		return status;
+	}
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	if (tokens > MV_PP2_TXQ_REFILL_TOKENS_MAX)
+		tokens = MV_PP2_TXQ_REFILL_TOKENS_MAX;
+
+	if (period > MV_PP2_TXQ_REFILL_PERIOD_MAX)
+		period = MV_PP2_TXQ_REFILL_PERIOD_MAX;
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_SCHED_REFILL_REG(txq));
+
+	regVal &= ~MV_PP2_TXQ_REFILL_TOKENS_ALL_MASK;
+	regVal |= MV_PP2_TXQ_REFILL_TOKENS_MASK(tokens);
+
+	regVal &= ~MV_PP2_TXQ_REFILL_PERIOD_ALL_MASK;
+	regVal |= MV_PP2_TXQ_REFILL_PERIOD_MASK(period);
+
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_REFILL_REG(txq), regVal);
+
+	return MV_OK;
+}
+
+/* Set maximum burst size for TX port
+ *   burst [bytes] - number of bytes to be sent with maximum possible TX rate,
+ *                    before TX bandwidth limitation will take place.
+ */
+MV_STATUS mvPp2TxqBurstSet(int port, int txp, int txq, int burst)
+{
+	MV_U32  size, mtu;
+	int txPortNum;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (txq >= MV_PP2_MAX_TXQ)
+		return MV_BAD_PARAM;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	/* Calulate Tocket Bucket Size */
+	size = 8 * burst;
+
+	if (size > MV_PP2_TXQ_TOKEN_SIZE_MAX)
+		size = MV_PP2_TXQ_TOKEN_SIZE_MAX;
+
+	/* Tocken bucket size must be larger then MTU */
+	mtu = mvPp2RdReg(MV_PP2_TXP_SCHED_MTU_REG);
+	if (mtu > size) {
+		mvOsPrintf("%s Error: Bucket size (%d bytes) < MTU (%d bytes)\n",
+					__func__, (size / 8), (mtu / 8));
+		return MV_BAD_PARAM;
+	}
+
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), size);
+
+	return MV_OK;
+}
+
+/* Set TXQ to work in FIX priority mode */
+MV_STATUS mvPp2TxqFixPrioSet(int port, int txp, int txq)
+{
+	MV_U32 regVal;
+	int txPortNum;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (txq >= MV_PP2_MAX_TXQ)
+		return MV_BAD_PARAM;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_FIXED_PRIO_REG);
+	regVal |= (1 << txq);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_FIXED_PRIO_REG, regVal);
+
+	return MV_OK;
+}
+
+/* Set TXQ to work in WRR mode and set relative weight. */
+/*   Weight range [1..N] */
+MV_STATUS mvPp2TxqWrrPrioSet(int port, int txp, int txq, int weight)
+{
+	MV_U32 regVal, mtu, mtu_aligned, weight_min;
+	int txPortNum;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	if (txq >= MV_PP2_MAX_TXQ)
+		return MV_BAD_PARAM;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+
+	/* Weight * 256 bytes * 8 bits must be larger then MTU [bits] */
+	mtu = mvPp2RdReg(MV_PP2_TXP_SCHED_MTU_REG);
+
+	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value, now get read MTU*/
+	mtu /= MV_AMPLIFY_FACTOR_MTU;
+	mtu /= MV_BIT_NUM_OF_BYTE; /* move to bytes */
+	mtu_aligned = MV_ALIGN_UP(mtu, MV_WRR_WEIGHT_UNIT);
+	weight_min = mtu_aligned / MV_WRR_WEIGHT_UNIT;
+
+	if ((weight < weight_min) || (weight > MV_PP2_TXQ_WRR_WEIGHT_MAX)) {
+		mvOsPrintf("%s Error: weight=%d is out of range %d...%d\n",
+				__func__, weight, weight_min, MV_PP2_TXQ_WRR_WEIGHT_MAX);
+		return MV_FAIL;
+	}
+
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_SCHED_WRR_REG(txq));
+
+	regVal &= ~MV_PP2_TXQ_WRR_WEIGHT_ALL_MASK;
+	regVal |= MV_PP2_TXQ_WRR_WEIGHT_MASK(weight);
+	mvPp2WrReg(MV_PP2_TXQ_SCHED_WRR_REG(txq), regVal);
+
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_FIXED_PRIO_REG);
+	regVal &= ~(1 << txq);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_FIXED_PRIO_REG, regVal);
+
+	return MV_OK;
+}
+
+/* Set minimum number of tockens to start transmit for TX port
+ *   maxTxSize [bytes]    - maximum packet size can be sent via this TX port
+ */
+MV_STATUS   mvPp2TxpMaxTxSizeSet(int port, int txp, int maxTxSize)
+{
+	MV_U32	regVal, size, mtu;
+	int	txq, txPortNum;
+
+	if (mvPp2TxpCheck(port, txp))
+		return MV_BAD_PARAM;
+
+	mtu = maxTxSize * 8;
+	if (mtu > MV_PP2_TXP_MTU_MAX)
+		mtu = MV_PP2_TXP_MTU_MAX;
+
+	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+	mtu = MV_AMPLIFY_FACTOR_MTU * mtu;
+
+	txPortNum = mvPp2EgressPort(port, txp);
+
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+	/* set MTU */
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_MTU_REG);
+	regVal &= ~MV_PP2_TXP_MTU_ALL_MASK;
+	regVal |= MV_PP2_TXP_MTU_MASK(mtu);
+
+	mvPp2WrReg(MV_PP2_TXP_SCHED_MTU_REG, regVal);
+
+	/* TXP token size and all TXQs token size must be larger that MTU */
+	regVal = mvPp2RdReg(MV_PP2_TXP_SCHED_TOKEN_SIZE_REG);
+	size = regVal & MV_PP2_TXP_TOKEN_SIZE_MAX;
+	if (size < mtu) {
+		size = mtu;
+		regVal &= ~MV_PP2_TXP_TOKEN_SIZE_MAX;
+		regVal |= size;
+		mvPp2WrReg(MV_PP2_TXP_SCHED_TOKEN_SIZE_REG, regVal);
+	}
+	for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++) {
+		regVal = mvPp2RdReg(MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+		size = regVal & MV_PP2_TXQ_TOKEN_SIZE_MAX;
+		if (size < mtu) {
+			size = mtu;
+			regVal &= ~MV_PP2_TXQ_TOKEN_SIZE_MAX;
+			regVal |= size;
+			mvPp2WrReg(MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), regVal);
+		}
+	}
+	return MV_OK;
+}
+
+/* Disable transmit via physical egress queue - HW doesn't take descriptors from DRAM */
+MV_STATUS mvPp2TxpDisable(int port, int txp)
+{
+	MV_U32 regData;
+	int    mDelay;
+	int    txPortNum = mvPp2EgressPort(port, txp);
+
+	/* Issue stop command for active channels only */
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, txPortNum);
+	regData = (mvPp2RdReg(MV_PP2_TXP_SCHED_Q_CMD_REG)) & MV_PP2_TXP_SCHED_ENQ_MASK;
+	if (regData != 0)
+		mvPp2WrReg(MV_PP2_TXP_SCHED_Q_CMD_REG, (regData << MV_PP2_TXP_SCHED_DISQ_OFFSET));
+
+	/* Wait for all Tx activity to terminate. */
+	/* for PON, do not wait for TXQ, since for Functional Erratum FE-8309479, PON TXQ could only be flushed
+	    for Ethernet port, not PON port, so TXQ will never be stopped */
+	if (MV_PP2_IS_PON_PORT(port))
+		return MV_OK;
+
+	mDelay = 0;
+	do {
+		if (mDelay >= TX_DISABLE_TIMEOUT_MSEC) {
+			mvOsPrintf("port=%d, txp=%d: TIMEOUT for TX stopped !!! txQueueCmd - 0x%08x\n",
+				   port, txp, regData);
+			return MV_TIMEOUT;
+		}
+		mvOsDelay(1);
+		mDelay++;
+
+		/* Check port TX Command register that all Tx queues are stopped */
+		regData = mvPp2RdReg(MV_PP2_TXP_SCHED_Q_CMD_REG);
+	} while (regData & MV_PP2_TXP_SCHED_ENQ_MASK);
+
+	return MV_OK;
+}
+
+/* Enable transmit via physical egress queue - HW starts take descriptors from DRAM */
+MV_STATUS mvPp2TxpEnable(int port, int txp)
+{
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(port);
+	MV_U32 qMap;
+	int    txq, eport = mvPp2EgressPort(port, txp);
+
+	/* Enable all initialized TXs. */
+	qMap = 0;
+	for (txq = 0; txq < pPortCtrl->txqNum; txq++) {
+		if (pPortCtrl->pTxQueue[txp * CONFIG_MV_PP2_TXQ + txq] != NULL)
+			qMap |= (1 << txq);
+	}
+	/* Indirect access to register */
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, eport);
+	mvPp2WrReg(MV_PP2_TXP_SCHED_Q_CMD_REG, qMap);
+
+	return MV_OK;
+}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+/* Functions implemented only for PPv2.1 version (A0 and later) */
+MV_STATUS mvPp2RxqEnable(int port, int rxq, MV_BOOL en)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_CONFIG_REG(prxq));
+	if (en)
+		regVal &= ~MV_PP2_RXQ_DISABLE_MASK;
+	else
+		regVal |= MV_PP2_RXQ_DISABLE_MASK;
+
+	mvPp2WrReg(MV_PP2_RXQ_CONFIG_REG(prxq), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2HwfTxqEnable(int port, int txp, int txq, MV_BOOL en)
+{
+	MV_U32 regVal;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	regVal = mvPp2RdReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq));
+
+	if (en)
+		regVal &= ~MV_PP2_HWF_TXQ_DISABLE_MASK;
+	else
+		regVal |= MV_PP2_HWF_TXQ_DISABLE_MASK;
+
+	mvPp2WrReg(MV_PP2_HWF_TXQ_CONFIG_REG(ptxq), regVal);
+
+	return MV_OK;
+}
+
+MV_BOOL mvPp2DisableCmdInProgress(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_RX_STATUS);
+	regVal &= MV_PP2_DISABLE_IN_PROG_MASK;
+
+	return regVal;
+}
+
+
+MV_STATUS mvPp2TxqDrainSet(int port, int txp, int txq, MV_BOOL en)
+{
+	MV_U32 regVal;
+	int ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+	regVal = mvPp2RdReg(MV_PP2_TXQ_PREF_BUF_REG);
+
+	if (en)
+		regVal |= MV_PP2_HWF_TXQ_DISABLE_MASK;
+	else
+		regVal &= ~MV_PP2_HWF_TXQ_DISABLE_MASK;
+
+	mvPp2WrReg(MV_PP2_TXQ_PREF_BUF_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2TxPortFifoFlush(int port, MV_BOOL en)
+{
+	MV_U32 regVal;
+
+	/* valid only for ethernet ports (not for xPON) */
+	if (MV_PP2_IS_PON_PORT(port))
+		return MV_NOT_SUPPORTED;
+
+	regVal = mvPp2RdReg(MV_PP2_TX_PORT_FLUSH_REG);
+
+	if (en)
+		regVal |= MV_PP2_TX_PORT_FLUSH_MASK(port);
+	else
+		regVal &= ~MV_PP2_TX_PORT_FLUSH_MASK(port);
+
+	mvPp2WrReg(MV_PP2_TX_PORT_FLUSH_REG, regVal);
+
+	return MV_OK;
+}
+
+#else /* Stabs for Z1 */
+
+MV_STATUS mvPp2TxqDrainSet(int port, int txp, int txq, MV_BOOL en)
+{
+	return MV_OK;
+}
+
+MV_STATUS mvPp2TxPortFifoFlush(int port, MV_BOOL en)
+{
+	return MV_OK;
+}
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+/* Function for swithcing SWF to HWF */
+/* txq is physical (global) txq in range 0..MV_PP2_TXQ_TOTAL_NUM */
+/* txq is physical (global) rxq in range 0..MV_PP2_RXQ_TOTAL_NUM */
+
+MV_STATUS mvPp2FwdSwitchCtrl(MV_U32 flowId, int txq, int rxq, int msec)
+{
+	MV_U32 regVal;
+	int timeout, max;
+
+	/* Check validity of parameters */
+	if (mvPp2MaxCheck(txq, MV_PP2_TXQ_TOTAL_NUM, "global txq"))
+		return MV_BAD_PARAM;
+
+	if (mvPp2MaxCheck(rxq, MV_PP2_RXQ_TOTAL_NUM, "global rxq"))
+		return MV_BAD_PARAM;
+
+	timeout = MV_PP2_FWD_SWITCH_TIMEOUT_MAX * 1024;
+	max = timeout / (mvPp2HalData.tClk / 1000);
+	if (mvPp2MaxCheck(msec, max + 1, "timeout msec"))
+		return MV_BAD_PARAM;
+
+	mvPp2WrReg(MV_PP2_FWD_SWITCH_FLOW_ID_REG, flowId);
+	timeout = ((mvPp2HalData.tClk / 1000) * msec) / 1024;
+	regVal = MV_PP2_FWD_SWITCH_TXQ_VAL(txq) | MV_PP2_FWD_SWITCH_RXQ_VAL(rxq) |
+		MV_PP2_FWD_SWITCH_TIMEOUT_VAL(timeout);
+	mvPp2WrReg(MV_PP2_FWD_SWITCH_CTRL_REG, regVal);
+
+	return MV_OK;
+}
+
+int       mvPp2FwdSwitchStatus(int *hwState, int *msec)
+{
+	MV_U32 regVal, cycles;
+
+	regVal = mvPp2RdReg(MV_PP2_FWD_SWITCH_STATUS_REG);
+	if (hwState)
+		*hwState = (regVal & MV_PP2_FWD_SWITCH_STATE_MASK) >> MV_PP2_FWD_SWITCH_STATE_OFFS;
+
+	cycles = (regVal & MV_PP2_FWD_SWITCH_TIMER_MASK) >> MV_PP2_FWD_SWITCH_TIMER_OFFS;
+	cycles *= 1024;
+	if (msec)
+		*msec = cycles / (mvPp2HalData.tClk / 1000);
+
+	return (regVal & MV_PP2_FWD_SWITCH_STATUS_MASK) >> MV_PP2_FWD_SWITCH_STATUS_OFFS;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.h b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.h
new file mode 100644
index 000000000000..d02ae95bcabf
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2Gbe.h
@@ -0,0 +1,761 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PP2_GBE_H__
+#define __MV_PP2_GBE_H__
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mv802_3.h"
+#include "mvOs.h"
+
+#include "mvPp2GbeRegs.h"
+#include "bm/mvBm.h"
+#include "gmac/mvEthGmacApi.h"
+#include "common/mvPp2Common.h"
+#include "prs/mvPp2PrsHw.h"
+
+#define PP2_CPU_CODE_IS_RX_SPECIAL(cpu_code)		((cpu_code) & RI_CPU_CODE_RX_SPEC_VAL)
+#define MV_AMPLIFY_FACTOR_MTU				(3)
+#define MV_BIT_NUM_OF_BYTE				(8)
+#define MV_WRR_WEIGHT_UNIT				(256)
+
+static inline int mvPp2IsRxSpecial(MV_U16 parser_info)
+{
+	MV_U16 cpu_code = (parser_info & PP2_RX_CPU_CODE_MASK) >> PP2_RX_CPU_CODE_OFFS;
+
+	return PP2_CPU_CODE_IS_RX_SPECIAL(cpu_code);
+}
+
+static inline int mvPp2RxBmPoolId(PP2_RX_DESC *rxDesc)
+{
+	return (rxDesc->status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS;
+}
+
+/************************** PPv2 HW Configuration ***********************/
+typedef struct eth_pbuf {
+	void	*osInfo;
+	MV_ULONG physAddr;
+	MV_U8	*pBuf;
+	MV_U16	bytes;
+	MV_U16	offset;
+	MV_U8	pool;
+	MV_U8	qset;
+	MV_U8	grntd;
+	MV_U8	reserved;
+	MV_U16	vlanId;
+} MV_ETH_PKT;
+
+/************************** Port + Queue Control Structures ******************************/
+typedef struct {
+	char *pFirst;
+	int lastDesc;
+	int nextToProc;
+	int descSize;
+	MV_BUF_INFO descBuf;
+} MV_PP2_QUEUE_CTRL;
+
+#define MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, descIdx)                 \
+	((pQueueCtrl)->pFirst + ((descIdx) * (pQueueCtrl)->descSize))
+
+#define MV_PP2_QUEUE_NEXT_DESC(pQueueCtrl, descIdx)  \
+	(((descIdx) < (pQueueCtrl)->lastDesc) ? ((descIdx) + 1) : 0)
+
+#define MV_PP2_QUEUE_PREV_DESC(pQueueCtrl, descIdx)  \
+	(((descIdx) > 0) ? ((descIdx) - 1) : (pQueueCtrl)->lastDesc)
+
+/*-------------------------------------------------------------------------------*/
+/* TXQ */
+typedef struct {
+	MV_PP2_QUEUE_CTRL queueCtrl;
+	int txq;
+} MV_PP2_PHYS_TXQ_CTRL;
+
+typedef struct {
+	MV_PP2_QUEUE_CTRL queueCtrl;
+	int cpu;
+} MV_PP2_AGGR_TXQ_CTRL;
+
+/* physical TXQs */
+extern MV_PP2_PHYS_TXQ_CTRL *mvPp2PhysTxqs;
+
+/* aggregated TXQs */
+extern MV_PP2_AGGR_TXQ_CTRL *mvPp2AggrTxqs;
+
+/*-------------------------------------------------------------------------------*/
+/* RXQ */
+
+typedef struct {
+	MV_PP2_QUEUE_CTRL queueCtrl;
+	int rxq;
+	int port;
+	int logicRxq;
+} MV_PP2_PHYS_RXQ_CTRL;
+
+/* physical RXQs */
+extern MV_PP2_PHYS_RXQ_CTRL *mvPp2PhysRxqs;
+
+/*-------------------------------------------------------------------------------*/
+/* Port */
+typedef struct {
+	int portNo;
+	MV_PP2_PHYS_RXQ_CTRL **pRxQueue;
+	MV_PP2_PHYS_TXQ_CTRL **pTxQueue;
+	int rxqNum;
+	int txpNum;
+	int txqNum;
+	void *osHandle;
+} MV_PP2_PORT_CTRL;
+
+/* ports control */
+extern MV_PP2_PORT_CTRL **mvPp2PortCtrl;
+
+/*-------------------------------------------------------------------------------*/
+/* HW data */
+typedef struct {
+	MV_U32 maxPort;
+	MV_U32 maxTcont;
+	MV_U32 aggrTxqSize;
+	MV_U32 physTxqHwfSize;
+	MV_U32 maxCPUs;
+	MV_U32 portMask;
+	MV_U32 cpuMask;
+	MV_U32 pClk;
+	MV_U32 tClk;
+	MV_BOOL	iocc;
+	MV_U16 ctrlModel;       /* Controller Model     */
+	MV_U8  ctrlRev;         /* Controller Revision  */
+} MV_PP2_HAL_DATA;
+
+extern MV_PP2_HAL_DATA mvPp2HalData;
+
+/************************** RXQ: Physical - Logical Mapping ******************************/
+static INLINE int mvPp2PhysRxqToPort(int prxq)
+{
+	return mvPp2PhysRxqs[prxq].port;
+}
+
+static INLINE int mvPp2PhysRxqToLogicRxq(int prxq)
+{
+	return mvPp2PhysRxqs[prxq].logicRxq;
+}
+
+static INLINE int mvPp2LogicRxqToPhysRxq(int port, int rxq)
+{
+	if (mvPp2PortCtrl[port]->pRxQueue[rxq])
+		return mvPp2PortCtrl[port]->pRxQueue[rxq]->rxq;
+
+	return -1;
+}
+
+
+/************************** TXQ: Physical - Logical Mapping ******************************/
+#ifdef MV_PP2_PON_EXIST
+
+#define MV_PON_LOGIC_PORT_GET()			(mvPp2HalData.maxPort - 1)
+#define MV_PP2_IS_PON_PORT(p)			((p) == MV_PON_LOGIC_PORT_GET())
+#define MV_PON_PHYS_PORT_GET()			MV_PP2_PON_PORT_ID
+#define MV_PON_PHYS_PORT(p)			((p) == MV_PP2_PON_PORT_ID)
+#define MV_PP2_TOTAL_TXP_NUM			(MV_PP2_MAX_TCONT + MV_ETH_MAX_PORTS - 1)
+#define MV_PP2_TOTAL_PON_TXQ_NUM		(MV_PP2_MAX_TCONT * MV_PP2_MAX_TXQ)
+
+#define MV_PPV2_PORT_PHYS(port)			((MV_PP2_IS_PON_PORT(port)) ? MV_PON_PHYS_PORT_GET() : (port))
+#define MV_PPV2_TXP_PHYS(port, txp)		((MV_PP2_IS_PON_PORT(port)) ? txp : (MV_PP2_MAX_TCONT + port))
+#define MV_PPV2_TXQ_PHYS(port, txp, txq)	((MV_PP2_IS_PON_PORT(port)) ? txp * MV_PP2_MAX_TXQ + txq :\
+							MV_PP2_TOTAL_PON_TXQ_NUM + port * MV_PP2_MAX_TXQ + txq)
+
+#define MV_PPV2_TXQ_LOGICAL_PORT(physTxq)	((physTxq < MV_PP2_TOTAL_PON_TXQ_NUM) ? MV_PON_LOGIC_PORT_ID_GET() :\
+							(physTxq - MV_PP2_TOTAL_PON_TXQ_NUM) / MV_PP2_MAX_TXQ)
+
+#define MV_PPV2_TXQ_LOGICAL_TXP(physTxq)	((physTxq < MV_PP2_TOTAL_PON_TXQ_NUM) ? (physTxq / MV_PP2_MAX_TXQ) : 0)
+
+#else /* Without PON */
+
+#define MV_PP2_IS_PON_PORT(p)				MV_FALSE
+#define MV_PON_PHYS_PORT(p)			MV_FALSE
+#define MV_PP2_TOTAL_TXP_NUM			(MV_ETH_MAX_PORTS)
+
+#define MV_PPV2_PORT_PHYS(port)                 (port)
+#define MV_PPV2_TXP_PHYS(port, txp)		(port)
+#define MV_PPV2_TXQ_PHYS(port, txp, txq)	(port * MV_PP2_MAX_TXQ + txq)
+#define MV_PPV2_TXQ_LOGICAL_PORT(physTxq)	(physTxq / MV_PP2_MAX_TXQ)
+#define MV_PPV2_TXQ_LOGICAL_TXP(physTxq)	0
+
+#endif /* MV_PP2_PON_EXIST */
+
+#define MV_PPV2_TXQ_LOGICAL_TXQ(physTxq)	(physTxq % MV_PP2_MAX_TXQ)
+#define MV_PP2_TXQ_TOTAL_NUM			(MV_PP2_TOTAL_TXP_NUM * MV_PP2_MAX_TXQ)
+
+/************************** Data Path functions ******************************/
+/* Set TXQ descriptors fields relevant for CSUM calculation */
+static INLINE MV_U32 mvPp2TxqDescCsum(int l3_offs, int l3_proto, int ip_hdr_len, int l4_proto)
+{
+	MV_U32 command;
+
+	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, G_L4_chk, L4_type */
+	/* required only for checksum calculation */
+	command = (l3_offs << PP2_TX_L3_OFFSET_OFFS);
+	command |= (ip_hdr_len << PP2_TX_IP_HLEN_OFFS);
+	command |= PP2_TX_IP_CSUM_DISABLE_MASK;
+
+	if (l3_proto == MV_16BIT_BE(MV_IP_TYPE)) {
+		command &= ~PP2_TX_IP_CSUM_DISABLE_MASK; /* enable IP CSUM */
+		command |= PP2_TX_L3_IP4;
+	} else
+		command |= PP2_TX_L3_IP6;
+
+	if (l4_proto == MV_IP_PROTO_TCP)
+		command |= (PP2_TX_L4_TCP | PP2_TX_L4_CSUM);
+	else if (l4_proto == MV_IP_PROTO_UDP)
+		command |= (PP2_TX_L4_UDP | PP2_TX_L4_CSUM);
+	else
+		command |= PP2_TX_L4_CSUM_NOT;
+
+	return command;
+}
+
+static INLINE MV_VOID mvPp2GbeCpuInterruptsDisable(int port, int cpuMask)
+{
+	mvPp2WrReg(MV_PP2_ISR_ENABLE_REG(port), MV_PP2_ISR_DISABLE_INTERRUPT(cpuMask));
+}
+
+static INLINE MV_VOID mvPp2GbeCpuInterruptsEnable(int port, int cpuMask)
+{
+	mvPp2WrReg(MV_PP2_ISR_ENABLE_REG(port), MV_PP2_ISR_ENABLE_INTERRUPT(cpuMask));
+}
+
+/* Get Giga port handler */
+static INLINE MV_PP2_PORT_CTRL *mvPp2PortHndlGet(int port)
+{
+	return mvPp2PortCtrl[port];
+}
+
+/* Get physical RX queue handler */
+static INLINE MV_PP2_PHYS_RXQ_CTRL *mvPp2RxqHndlGet(int port, int rxq)
+{
+	return mvPp2PortCtrl[port]->pRxQueue[rxq];
+}
+
+/* Get physical TX queue handler */
+static INLINE MV_PP2_PHYS_TXQ_CTRL *mvPp2TxqHndlGet(int port, int txp, int txq)
+{
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortCtrl[port];
+
+	return pPortCtrl->pTxQueue[txp * pPortCtrl->txqNum + txq];
+}
+
+/* Get Aggregated TX queue handler */
+static INLINE MV_PP2_AGGR_TXQ_CTRL *mvPp2AggrTxqHndlGet(int cpu)
+{
+	return &mvPp2AggrTxqs[cpu];
+}
+
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static INLINE PP2_RX_DESC *mvPp2RxqNextDescGet(MV_PP2_PHYS_RXQ_CTRL *pRxq)
+{
+	PP2_RX_DESC	*pRxDesc;
+	int		rxDesc = pRxq->queueCtrl.nextToProc;
+
+	pRxq->queueCtrl.nextToProc = MV_PP2_QUEUE_NEXT_DESC(&(pRxq->queueCtrl), rxDesc);
+
+	pRxDesc = ((PP2_RX_DESC *)pRxq->queueCtrl.pFirst) + rxDesc;
+
+	return pRxDesc;
+}
+
+static INLINE PP2_RX_DESC *mvPp2RxqDescGet(MV_PP2_PHYS_RXQ_CTRL *pRxq)
+{
+	PP2_RX_DESC	*pRxDesc;
+
+	pRxDesc = ((PP2_RX_DESC *)pRxq->queueCtrl.pFirst) + pRxq->queueCtrl.nextToProc;
+
+	return pRxDesc;
+}
+
+#if defined(MV_CPU_BE)
+/* Swap RX descriptor to be BE */
+static INLINE void mvPPv2RxqDescSwap(PP2_RX_DESC *pRxDesc)
+{
+	pRxDesc->status = MV_BYTE_SWAP_32BIT(pRxDesc->status);
+	pRxDesc->parserInfo = MV_BYTE_SWAP_16BIT(pRxDesc->parserInfo);
+	pRxDesc->dataSize =  MV_BYTE_SWAP_16BIT(pRxDesc->dataSize);
+	pRxDesc->bufPhysAddr = MV_BYTE_SWAP_32BIT(pRxDesc->bufPhysAddr);
+	pRxDesc->bufCookie = MV_BYTE_SWAP_32BIT(pRxDesc->bufCookie);
+	pRxDesc->gemPortIdPktColor = MV_BYTE_SWAP_16BIT(pRxDesc->gemPortIdPktColor);
+	pRxDesc->csumL4 = MV_BYTE_SWAP_16BIT(pRxDesc->csumL4);
+	pRxDesc->classifyInfo = MV_BYTE_SWAP_16BIT(pRxDesc->classifyInfo);
+	pRxDesc->flowId = MV_BYTE_SWAP_32BIT(pRxDesc->flowId);
+}
+
+/* Swap TX descriptor to be BE */
+static INLINE void mvPPv2TxqDescSwap(PP2_TX_DESC *pTxDesc)
+{
+	pTxDesc->command = MV_BYTE_SWAP_32BIT(pTxDesc->command);
+	pTxDesc->dataSize = MV_BYTE_SWAP_16BIT(pTxDesc->dataSize);
+	pTxDesc->bufPhysAddr = MV_BYTE_SWAP_32BIT(pTxDesc->bufPhysAddr);
+	pTxDesc->bufCookie = MV_BYTE_SWAP_32BIT(pTxDesc->bufCookie);
+	pTxDesc->hwCmd[0] = MV_BYTE_SWAP_32BIT(pTxDesc->hwCmd[0]);
+	pTxDesc->hwCmd[1] = MV_BYTE_SWAP_32BIT(pTxDesc->hwCmd[1]);
+	pTxDesc->hwCmd[2] = MV_BYTE_SWAP_32BIT(pTxDesc->hwCmd[2]);
+}
+#endif
+/*-------------------------------------------------------------------------------*/
+/* Get number of RX descriptors occupied by received packets */
+static INLINE int mvPp2RxqBusyDescNumGet(int port, int rxq)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	if (prxq < 0)
+		return 0;
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_STATUS_REG(prxq));
+
+	return (regVal & MV_PP2_RXQ_OCCUPIED_MASK) >> MV_PP2_RXQ_OCCUPIED_OFFSET;
+}
+
+/* Get number of free RX descriptors ready to received new packets */
+static INLINE int mvPp2RxqFreeDescNumGet(int port, int rxq)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	if (prxq < 0)
+		return 0;
+
+	regVal = mvPp2RdReg(MV_PP2_RXQ_STATUS_REG(prxq));
+
+	return (regVal & MV_PP2_RXQ_NON_OCCUPIED_MASK) >> MV_PP2_RXQ_NON_OCCUPIED_OFFSET;
+}
+
+/* Update HW with number of RX descriptors processed by SW:
+ *    - decrement number of occupied descriptors
+ *    - increment number of Non-occupied descriptors
+ */
+static INLINE void mvPp2RxqDescNumUpdate(int port, int rxq, int rx_done, int rx_filled)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = (rx_done << MV_PP2_RXQ_NUM_PROCESSED_OFFSET) | (rx_filled << MV_PP2_RXQ_NUM_NEW_OFFSET);
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_UPDATE_REG(prxq), regVal);
+}
+
+/* Add number of descriptors are ready to receive new packets */
+static INLINE void mvPp2RxqNonOccupDescAdd(int port, int rxq, int rx_desc)
+{
+	MV_U32	regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = (rx_desc << MV_PP2_RXQ_NUM_NEW_OFFSET);
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_UPDATE_REG(prxq), regVal);
+}
+
+/* Decrement number of processed descriptors */
+static INLINE void mvPp2RxqOccupDescDec(int port, int rxq, int rx_done)
+{
+	MV_U32 regVal;
+	int prxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	regVal = (rx_done << MV_PP2_RXQ_NUM_PROCESSED_OFFSET);
+	mvPp2WrReg(MV_PP2_RXQ_STATUS_UPDATE_REG(prxq), regVal);
+}
+
+/*-------------------------------------------------------------------------------*/
+/*
+   PPv2 new feature MAS 3.16
+   reserved TXQ descriptorts allocation request
+*/
+static INLINE int mvPp2TxqAllocReservedDesc(int port, int txp, int txq, int num)
+{
+	MV_U32 regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	regVal = (ptxq << MV_PP2_TXQ_RSVD_REQ_Q_OFFSET) | (num << MV_PP2_TXQ_RSVD_REQ_DESC_OFFSET);
+	mvPp2WrReg(MV_PP2_TXQ_RSVD_REQ_REG, regVal);
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_RSVD_RSLT_REG);
+
+	return (regVal & MV_PP2_TXQ_RSVD_REQ_DESC_MASK) >> MV_PP2_TXQ_RSVD_RSLT_OFFSET;
+}
+
+/* Free all descriptors reserved */
+static INLINE void mvPp2TxqFreeReservedDesc(int port, int txp, int txq)
+{
+	MV_U32 regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	regVal = (ptxq << MV_PP2_TXQ_RSVD_CLR_Q_OFFSET);
+	mvPp2WrReg(MV_PP2_TXQ_RSVD_CLR_REG, regVal);
+}
+
+/* Get number of TXQ descriptors waiting to be transmitted by HW */
+static INLINE int mvPp2TxqPendDescNumGet(int port, int txp, int txq)
+{
+	MV_U32 regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_PENDING_REG);
+
+	return (regVal & MV_PP2_TXQ_PENDING_MASK) >> MV_PP2_TXQ_PENDING_OFFSET;
+}
+
+/*
+   PPv2.1 new feature MAS 3.16
+   Get number of SWF reserved descriptors
+*/
+static INLINE int mvPp2TxqPendRsrvdDescNumGet(int port, int txp, int txq)
+{
+	MV_U32 regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_PENDING_REG);
+
+	return (regVal & MV_PP2_TXQ_RSVD_DESC_OFFSET) >> MV_PP2_TXQ_RSVD_DESC_OFFSET;
+}
+
+
+/*
+   PPv2.1 field removed, MAS 3.16
+   Relevant only for ppv2.0
+   Get number of TXQ HWF descriptors waiting to be transmitted by HW
+*/
+static INLINE int mvPp2TxqPendHwfDescNumGet(int port, int txp, int txq)
+{
+	MV_U32 regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, ptxq);
+
+	regVal = mvPp2RdReg(MV_PP2_TXQ_PENDING_REG);
+
+	return (regVal & MV_PP2_TXQ_HWF_PENDING_MASK) >> MV_PP2_TXQ_HWF_PENDING_OFFSET;
+}
+
+/* Get next aggregated TXQ descriptor */
+static INLINE PP2_TX_DESC *mvPp2AggrTxqNextDescGet(MV_PP2_AGGR_TXQ_CTRL *pTxq)
+{
+	PP2_TX_DESC	*pTxDesc;
+	int		txDesc = pTxq->queueCtrl.nextToProc;
+
+	pTxq->queueCtrl.nextToProc = MV_PP2_QUEUE_NEXT_DESC(&(pTxq->queueCtrl), txDesc);
+
+	pTxDesc = ((PP2_TX_DESC *)pTxq->queueCtrl.pFirst) + txDesc;
+
+	return pTxDesc;
+}
+
+/* Get pointer to previous aggregated TX descriptor for rollback when needed */
+static INLINE PP2_TX_DESC *mvPp2AggrTxqPrevDescGet(MV_PP2_AGGR_TXQ_CTRL *pTxq)
+{
+	int txDesc = pTxq->queueCtrl.nextToProc;
+
+	pTxq->queueCtrl.nextToProc = MV_PP2_QUEUE_PREV_DESC(&(pTxq->queueCtrl), txDesc);
+
+	return ((PP2_TX_DESC *) pTxq->queueCtrl.pFirst) + txDesc;
+}
+
+/* Get number of aggregated TXQ descriptors didn't send by HW to relevant physical TXQ */
+static INLINE int mvPp2AggrTxqPendDescNumGet(int cpu)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_AGGR_TXQ_STATUS_REG(cpu));
+
+	return (regVal & MV_PP2_AGGR_TXQ_PENDING_MASK) >> MV_PP2_AGGR_TXQ_PENDING_OFFSET;
+}
+
+/* Update HW with number of Aggr-TX descriptors to be sent - user responsible for writing TXQ in TX descriptor */
+static INLINE void mvPp2AggrTxqPendDescAdd(int pending)
+{
+	/* aggregated access - relevant TXQ number is written in TX descriptor */
+	mvPp2WrReg(MV_PP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+/* Get number of sent descriptors and descrement counter.
+   Clear sent descriptor counter.
+   Number of sent descriptors is returned. */
+static INLINE int mvPp2TxqSentDescProc(int port, int txp, int txq)
+{
+	MV_U32  regVal, ptxq;
+
+	ptxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+	/* reading status reg also cause to reset transmitted counter */
+	regVal = mvPp2RdReg(MV_PP2_TXQ_SENT_REG(ptxq));
+
+	return (regVal & MV_PP2_TRANSMITTED_COUNT_MASK) >> MV_PP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+/*-------------------------------------------------------------------------------*/
+
+static INLINE MV_U32 mvPp2GbeIsrCauseRxTxGet(int port)
+{
+	MV_U32 val;
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		val = mvPp2RdReg(MV_PP2_ISR_PON_RX_TX_CAUSE_REG);
+		val &= (MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
+			MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK | MV_PP2_PON_CAUSE_MISC_SUM_MASK);
+	} else {
+		val = mvPp2RdReg(MV_PP2_ISR_RX_TX_CAUSE_REG(MV_PPV2_PORT_PHYS(port)));
+		val &= (MV_PP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
+			MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK | MV_PP2_CAUSE_MISC_SUM_MASK);
+	}
+
+	return val;
+}
+
+static INLINE MV_BOOL mvPp2GbeIsrCauseTxDoneIsSet(int port, MV_U32 causeRxTx)
+{
+	if (MV_PP2_IS_PON_PORT(port))
+		return (causeRxTx & MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK);
+
+	return (causeRxTx & MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK);
+}
+
+static INLINE MV_U32 mvPp2GbeIsrCauseTxDoneOffset(int port, MV_U32 causeRxTx)
+{
+	if (MV_PP2_IS_PON_PORT(port))
+		return (causeRxTx & MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK);
+
+	return (causeRxTx & MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK);
+}
+/************************** function declaration ******************************/
+MV_STATUS 	mvPp2WinInit(MV_U32 dummy/*backward compability*/,   MV_UNIT_WIN_INFO *addrWinMap);
+MV_STATUS 	mvPp2WinWrite(MV_U32 dummy/*backward compability*/,  MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin);
+MV_STATUS 	mvPp2WinRead(MV_U32 dummy/*backward compability*/,   MV_U32 winNum, MV_UNIT_WIN_INFO *pAddrDecWin);
+MV_STATUS 	mvPp2WinEnable(MV_U32 dummy/*backward compability*/, MV_U32 winNum, MV_BOOL enable);
+
+int mvPp2MaxCheck(int value, int limit, char *name);
+int mvPp2PortCheck(int port);
+int mvPp2TxpCheck(int port, int txp);
+int mvPp2CpuCheck(int cpu);
+
+static INLINE MV_ULONG pp2DescVirtToPhys(MV_PP2_QUEUE_CTRL *pQueueCtrl, MV_U8 *pDesc)
+{
+	return (pQueueCtrl->descBuf.bufPhysAddr + (pDesc - pQueueCtrl->descBuf.bufVirtPtr));
+}
+
+MV_U8 *mvPp2DescrMemoryAlloc(int descSize, MV_ULONG *pPhysAddr, MV_U32 *memHandle);
+void mvPp2DescrMemoryFree(int descSize, MV_ULONG *pPhysAddr, MV_U8 *pVirt, MV_U32 *memHandle);
+
+MV_STATUS mvPp2HalInit(MV_PP2_HAL_DATA *halData);
+MV_VOID mvPp2HalDestroy(MV_VOID);
+
+/* Add a mapping prxq <-> (port, lrxq) */
+MV_STATUS mvPp2PhysRxqMapAdd(int prxq, int port, int lrxq);
+/* Free the relevant physical rxq */
+MV_STATUS mvPp2PhysRxqMapDel(int prxq);
+MV_STATUS mvPp2PortLogicRxqMapDel(int port, int lrxq);
+
+/* Allocate and initialize descriptors for RXQ */
+MV_PP2_PHYS_RXQ_CTRL *mvPp2RxqInit(int port, int rxq, int descNum);
+void mvPp2RxqDelete(int port, int queue);
+void mvPp2RxqReset(int port, int queue);
+
+/* Allocate and initialize all physical RXQs.
+   This function must be called before any use of RXQ */
+MV_STATUS mvPp2PhysRxqsAlloc(MV_VOID);
+
+/* Destroy all physical RXQs */
+MV_STATUS mvPp2PhysRxqsDestroy(MV_VOID);
+
+MV_STATUS mvPp2RxqOffsetSet(int port, int rxq, int offset);
+
+MV_STATUS mvPp2RxqPktsCoalSet(int port, int rxq, MV_U32 pkts);
+int mvPp2RxqPktsCoalGet(int port, int rxq);
+
+void mvPp2RxReset(int port);
+
+void mvPp2TxqHwfSizeSet(int port, int txp, int txq, int hwfNum);
+
+/* Allocate and initialize descriptors for TXQ */
+MV_PP2_PHYS_TXQ_CTRL *mvPp2TxqInit(int port, int txp, int txq, int descNum, int hwfNum);
+
+MV_STATUS mvPp2TxqDelete(int port, int txp, int txq);
+
+/* Allocate and initialize all physical TXQs.
+   This function must be called before any use of TXQ */
+MV_STATUS mvPp2PhysTxqsAlloc(MV_VOID);
+
+/* Destroy all physical TXQs */
+MV_VOID mvPp2PhysTxqsDestroy(MV_VOID);
+
+/* Allocate and initialize all aggregated TXQs.
+   This function must be called before any use of aggregated TXQ */
+MV_STATUS mvPp2AggrTxqsAlloc(int cpuNum);
+
+/* Destroy all aggregated TXQs */
+MV_VOID mvPp2AggrTxqsDestroy(MV_VOID);
+MV_VOID mvPp2AggrTxqDelete(int cpu);
+/* Initialize aggregated TXQ */
+MV_PP2_AGGR_TXQ_CTRL *mvPp2AggrTxqInit(int cpu, int descNum);
+
+MV_STATUS mvPp2TxDonePktsCoalSet(int port, int txp, int txq, MV_U32 pkts);
+int mvPp2TxDonePktsCoalGet(int port, int txp, int txq);
+
+void mvPp2TxpReset(int port, int txp);
+void mvPp2TxqReset(int port, int txp, int txq);
+
+MV_STATUS mvPp2TxqTempInit(int descNum, int hwfNum);
+MV_VOID mvPp2TxqTempDelete(MV_VOID);
+
+/* Allocate and initialize port structure
+   Associate relevant TXQs for this port (predefined)
+   Associate <numRxqs> RXQs for Port number <port>, starting from RXQ number <firstRxq>
+   Note: mvPp2PortCtrl must be initialized, i.e. must call mvPp2HalInit before this function */
+void *mvPp2PortInit(int port, int firstRxq, int numRxqs, void *osHandle);
+void mvPp2PortDestroy(int port);
+
+/* Low Level APIs */
+MV_STATUS mvPp2RxqEnable(int port, int rxq, MV_BOOL en);
+MV_STATUS mvPp2HwfTxqEnable(int port, int txp, int txq, MV_BOOL en);
+MV_BOOL   mvPp2DisableCmdInProgress(void);
+MV_STATUS mvPp2TxqDrainSet(int port, int txp, int txq, MV_BOOL en);
+MV_STATUS mvPp2TxPortFifoFlush(int port, MV_BOOL en);
+MV_STATUS mvPp2TxpEnable(int port, int txp);
+MV_STATUS mvPp2TxpDisable(int port, int txp);
+MV_STATUS mvPp2PortEnable(int port, MV_BOOL en);
+
+/* High Level APIs */
+MV_STATUS mvPp2PortIngressEnable(int port, MV_BOOL en);
+MV_STATUS mvPp2PortEgressEnable(int port, MV_BOOL en);
+
+MV_STATUS mvPp2BmPoolBufSizeSet(int pool, int bufsize);
+MV_STATUS mvPp2RxqBmShortPoolSet(int port, int rxq, int shortPool);
+MV_STATUS mvPp2RxqBmLongPoolSet(int port, int rxq, int longPool);
+MV_STATUS mvPp2TxqBmShortPoolSet(int port, int txp, int txq, int shortPool);
+MV_STATUS mvPp2TxqBmLongPoolSet(int port, int txp, int txq, int longPool);
+MV_STATUS mvPp2PortHwfBmPoolSet(int port, int shortPool, int longPool);
+
+MV_STATUS mvPp2MhSet(int port, MV_TAG_TYPE mh);
+
+MV_STATUS mvPp2RxFifoInit(int portNum);
+
+MV_STATUS mvPp2TxpMaxTxSizeSet(int port, int txp, int maxTxSize);
+MV_STATUS mvPp2TxpMaxRateSet(int port, int txp);
+MV_STATUS mvPp2TxqMaxRateSet(int port, int txp, int txq);
+MV_STATUS mvPp2TxpRateSet(int port, int txp, int rate);
+MV_STATUS mvPp2TxpBurstSet(int port, int txp, int burst);
+MV_STATUS mvPp2TxqRateSet(int port, int txp, int txq, int rate);
+MV_STATUS mvPp2TxqBurstSet(int port, int txp, int txq, int burst);
+MV_STATUS mvPp2TxqFixPrioSet(int port, int txp, int txq);
+MV_STATUS mvPp2TxqWrrPrioSet(int port, int txp, int txq, int weight);
+
+/* Function for swithcing SWF to HWF */
+MV_STATUS mvPp2FwdSwitchCtrl(MV_U32 flowId, int txq, int rxq, int usec);
+int       mvPp2FwdSwitchStatus(int *hwState, int *usec);
+void      mvPp2FwdSwitchRegs(void);
+
+/*****************************/
+/*      Interrupts API       */
+/*****************************/
+MV_VOID		mvPp2GbeCpuInterruptsDisable(int port, int cpuMask);
+MV_VOID		mvPp2GbeCpuInterruptsEnable(int port, int cpuMask);
+MV_STATUS	mvPp2RxqTimeCoalSet(int port, int rxq, MV_U32 uSec);
+unsigned int	mvPp2RxqTimeCoalGet(int port, int rxq);
+MV_STATUS	mvPp2GbeIsrRxqGroup(int port, int rxqNum);
+
+/* unmask the current CPU's rx/tx interrupts                   *
+ *  - rxq_mask: support rxq to cpu granularity                 *
+ *  - isTxDoneIsr: if 0 then Tx Done interruptare not unmasked */
+MV_STATUS mvPp2GbeIsrRxTxUnmask(int port, MV_U16 rxq_mask, int isTxDoneIsr);
+
+/* mask the current CPU's rx/tx interrupts */
+MV_STATUS mvPp2GbeIsrRxTxMask(int port);
+/*****************************/
+
+/*****************************/
+/*      Debug functions      */
+/*****************************/
+MV_VOID mvPp2RxDmaRegsPrint(MV_VOID);
+MV_VOID mvPp2DescMgrRegsRxPrint(MV_VOID);
+MV_VOID mvPp2DescMgrRegsTxPrint(MV_VOID);
+MV_VOID mvPp2AddressDecodeRegsPrint(MV_VOID);
+void mvPp2IsrRegs(int port);
+void mvPp2PhysRxqRegs(int rxq);
+void mvPp2PortRxqRegs(int port, int rxq);
+MV_VOID mvPp2RxqShow(int port, int rxq, int mode);
+MV_VOID mvPp2TxqShow(int port, int txp, int txq, int mode);
+MV_VOID mvPp2AggrTxqShow(int cpu, int mode);
+void mvPp2PhysTxqRegs(int txq);
+void mvPp2PortTxqRegs(int port, int txp, int txq);
+void mvPp2AggrTxqRegs(int cpu);
+void mvPp2TxRegs(void);
+void mvPp2AddrDecodeRegs(void);
+void mvPp2TxSchedRegs(int port, int txp);
+void mvPp2BmPoolRegs(int pool);
+void mvPp2V0DropCntrs(int port);
+/* PPv2.1 MAS 3.20 - counters change */
+void mvPp2V1DropCntrs(int port);
+void mvPp2V1TxqDbgCntrs(int port, int txp, int txq);
+void mvPp2V1RxqDbgCntrs(int port, int rxq);
+void mvPp2RxFifoRegs(int port);
+void mvPp2PortStatus(int port);
+#endif /* MV_PP2_GBE_H */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeDebug.c b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeDebug.c
new file mode 100644
index 000000000000..7dad7ff9db6e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeDebug.c
@@ -0,0 +1,629 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"		/* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mv802_3.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "mvPp2Gbe.h"
+#include "bm/mvBm.h"
+
+#include "mvPp2GbeRegs.h"
+
+MV_VOID mvPp2RxDmaRegsPrint(void)
+{
+	int i;
+
+	mvOsPrintf("\n[RX DMA regs]\n");
+
+	mvOsPrintf("\nRXQs [0..%d] registers\n", MV_PP2_RXQ_TOTAL_NUM);
+	for (i = 0; i < MV_PP2_RXQ_TOTAL_NUM; i++) {
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2PrintReg(MV_PP2_RX_STATUS, "MV_PP2_RX_STATUS");
+		mvPp2PrintReg2(MV_PP2_RXQ_CONFIG_REG(i), "MV_PP2_RXQ_CONFIG_REG", i);
+#else
+		mvPp2PrintReg2(MV_PP2_V0_RXQ_SNOOP_REG(i), "MV_PP2_RXQ_SNOOP_REG", i);
+		mvPp2PrintReg2(MV_PP2_V0_RXQ_CONFIG_REG(i), "MV_PP2_RXQ_CONFIG_REG", i);
+#endif
+	}
+	mvOsPrintf("\nBM pools [0..%d] registers\n", MV_BM_POOLS);
+	for (i = 0; i < MV_BM_POOLS; i++)
+		mvPp2PrintReg2(MV_PP2_POOL_BUF_SIZE_REG(i), "MV_PP2_POOL_BUF_SIZE_REG", i);
+
+	mvOsPrintf("\nIngress ports [0..%d] registers\n", MV_PP2_MAX_PORTS);
+	for (i = 0; i < MV_PP2_MAX_PORTS; i++) {
+#ifndef CONFIG_MV_ETH_PP2_1
+		mvPp2PrintReg2(MV_PP2_V0_PORT_HWF_CONFIG_REG(i), "MV_PP2_PORT_HWF_CONFIG_REG", i);
+#endif
+		mvPp2PrintReg2(MV_PP2_RX_CTRL_REG(i), "MV_PP2_RX_CTRL_REG", i);
+	}
+	mvOsPrintf("\n");
+}
+
+static void mvPp2QueueShow(MV_PP2_QUEUE_CTRL *pQueueCtrl, int mode, int isTxq)
+{
+	mvOsPrintf("pFirst=%p (0x%x), descSize=%d, numOfDescr=%d\n",
+			pQueueCtrl->pFirst,
+			(MV_U32) pp2DescVirtToPhys(pQueueCtrl, (MV_U8 *) pQueueCtrl->pFirst),
+			pQueueCtrl->descSize, pQueueCtrl->lastDesc + 1);
+
+	if (mode > 0) {
+		int i;
+
+		for (i = 0; i <= pQueueCtrl->lastDesc; i++) {
+			if (isTxq) {
+				PP2_TX_DESC *pTxDesc = (PP2_TX_DESC *) MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, i);
+				mvOsPrintf("%3d. pTxDesc=%p, cmd=%08x, offs=%08x, txq=%4d, data=%4d, bufAddr=%08x, cookie=%x\n",
+					   i, pTxDesc, pTxDesc->command, pTxDesc->pktOffset, pTxDesc->physTxq, pTxDesc->dataSize,
+					   (MV_U32) pTxDesc->bufPhysAddr, (MV_U32) pTxDesc->bufCookie);
+
+				mvOsCacheLineInv(NULL, pTxDesc);
+			} else { /* RXQ */
+				PP2_RX_DESC *pRxDesc;
+
+				pRxDesc = (PP2_RX_DESC *) MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, i);
+				mvOsPrintf("%3d. desc=%p, status=%08x, data=%4d, bufAddr=%08x, bufCookie=%08x, parserInfo=%03x\n",
+					   i, pRxDesc, pRxDesc->status,
+					   pRxDesc->dataSize, (MV_U32) pRxDesc->bufPhysAddr,
+					   (MV_U32) pRxDesc->bufCookie, (MV_U16) pRxDesc->parserInfo);
+				mvOsCacheLineInv(NULL, pRxDesc);
+			}
+		}
+	}
+}
+
+/* Show Port/Rxq descriptors ring */
+MV_VOID mvPp2RxqShow(int port, int rxq, int mode)
+{
+	int pRxq;
+	MV_PP2_PORT_CTRL *pCtrl;
+	MV_PP2_PHYS_RXQ_CTRL *pRxqCtrl;
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+
+	if (mvPp2PortCheck(port))
+		return;
+
+	pCtrl = mvPp2PortCtrl[port];
+	if ((pCtrl == NULL) || (pCtrl->pRxQueue == NULL)) {
+		mvOsPrintf("port #%d is not initialized\n", port);
+		return;
+	}
+
+	if (mvPp2MaxCheck(rxq, MV_PP2_MAX_RXQ, "logical rxq"))
+		return;
+
+	if (pCtrl->pRxQueue[rxq] == NULL) {
+		mvOsPrintf("rxq #%d of port #%d is not initialized\n", rxq, port);
+		return;
+	}
+
+	pRxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+	if (pRxq < 0) {
+		mvOsPrintf("Port/RXQ is not mapped to physical RXQ\n");
+		return;
+	}
+	mvOsPrintf("\n[PPv2 RxQ show: port=%d, logical rxq=%d -> physical rxq=%d]\n",
+			port, rxq, pRxq);
+
+	pRxqCtrl = &mvPp2PhysRxqs[pRxq];
+	pQueueCtrl = &pRxqCtrl->queueCtrl;
+	if (!pQueueCtrl->pFirst) {
+		mvOsPrintf("rxq %d wasn't created\n", rxq);
+		return;
+	}
+
+	mvOsPrintf("nextToProc=%d (%p), rxqOccupied=%d, rxqNonOccupied=%d\n",
+		   pQueueCtrl->nextToProc,
+		   MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+		   mvPp2RxqBusyDescNumGet(port, rxq), mvPp2RxqFreeDescNumGet(port, rxq));
+
+	mvPp2QueueShow(pQueueCtrl, mode, 0);
+}
+
+/* Show Port/TXQ descriptors ring */
+MV_VOID mvPp2TxqShow(int port, int txp, int txq, int mode)
+{
+	int pTxq;
+	MV_PP2_PHYS_TXQ_CTRL *pTxqCtrl;
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+
+	mvOsPrintf("\n[PPv2 TxQ show: port=%d, txp=%d, txq=%d]\n", port, txp, txq);
+
+	if (mvPp2TxpCheck(port, txp))
+		return;
+
+	if (mvPp2MaxCheck(txq, MV_PP2_MAX_TXQ, "logical txq"))
+		return;
+
+	pTxq = MV_PPV2_TXQ_PHYS(port, txp, txq);
+
+	pTxqCtrl = &mvPp2PhysTxqs[pTxq];
+	pQueueCtrl = &pTxqCtrl->queueCtrl;
+	if (!pQueueCtrl->pFirst) {
+		mvOsPrintf("txq %d wasn't created\n", txq);
+		return;
+	}
+
+	mvOsPrintf("nextToProc=%d (%p), txqPending=%d\n",
+		   pQueueCtrl->nextToProc,
+		   MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+		   mvPp2TxqPendDescNumGet(port, txp, txq));
+
+	mvPp2QueueShow(pQueueCtrl, mode, 1);
+}
+
+/* Show CPU aggregation TXQ descriptors ring */
+MV_VOID mvPp2AggrTxqShow(int cpu, int mode)
+{
+	MV_PP2_AGGR_TXQ_CTRL *pTxqCtrl;
+	MV_PP2_QUEUE_CTRL *pQueueCtrl;
+
+	mvOsPrintf("\n[PPv2 AggrTxQ: cpu=%d]\n", cpu);
+
+	if (mvPp2CpuCheck(cpu))
+		return;
+
+	pTxqCtrl = &mvPp2AggrTxqs[cpu];
+	pQueueCtrl = &pTxqCtrl->queueCtrl;
+	if (!pQueueCtrl->pFirst) {
+		mvOsPrintf("aggr tx queue for cpu %d wasn't created\n", cpu);
+		return;
+	}
+	mvOsPrintf("nextToProc=%d (%p), txqPending=%d\n",
+		   pQueueCtrl->nextToProc,
+		   MV_PP2_QUEUE_DESC_PTR(pQueueCtrl, pQueueCtrl->nextToProc),
+		   mvPp2AggrTxqPendDescNumGet(cpu));
+
+	mvPp2QueueShow(pQueueCtrl, mode, 1);
+}
+
+void mvPp2IsrRegs(int port)
+{
+	int physPort;
+
+	if (mvPp2PortCheck(port))
+		return;
+
+	physPort = MV_PPV2_PORT_PHYS(port);
+
+	mvOsPrintf("\n[PPv2 ISR registers: port=%d - %s]\n", port, MV_PP2_IS_PON_PORT(port) ? "PON" : "GMAC");
+	mvPp2PrintReg(MV_PP2_ISR_RXQ_GROUP_REG(port), "MV_PP2_ISR_RXQ_GROUP_REG");
+	mvPp2PrintReg(MV_PP2_ISR_ENABLE_REG(port), "MV_PP2_ISR_ENABLE_REG");
+	mvPp2PrintReg(MV_PP2_ISR_RX_TX_CAUSE_REG(physPort), "MV_PP2_ISR_RX_TX_CAUSE_REG");
+	mvPp2PrintReg(MV_PP2_ISR_RX_TX_MASK_REG(physPort), "MV_PP2_ISR_RX_TX_MASK_REG");
+
+	mvPp2PrintReg(MV_PP2_ISR_RX_ERR_CAUSE_REG(physPort), "MV_PP2_ISR_RX_ERR_CAUSE_REG");
+	mvPp2PrintReg(MV_PP2_ISR_RX_ERR_MASK_REG(physPort), "MV_PP2_ISR_RX_ERR_MASK_REG");
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		mvPp2PrintReg(MV_PP2_ISR_PON_TX_UNDR_CAUSE_REG, "MV_PP2_ISR_PON_TX_UNDR_CAUSE_REG");
+		mvPp2PrintReg(MV_PP2_ISR_PON_TX_UNDR_MASK_REG, "MV_PP2_ISR_PON_TX_UNDR_MASK_REG");
+	} else {
+		mvPp2PrintReg(MV_PP2_ISR_TX_ERR_CAUSE_REG(physPort), "MV_PP2_ISR_TX_ERR_CAUSE_REG");
+		mvPp2PrintReg(MV_PP2_ISR_TX_ERR_MASK_REG(physPort), "MV_PP2_ISR_TX_ERR_MASK_REG");
+	}
+	mvPp2PrintReg(MV_PP2_ISR_MISC_CAUSE_REG, "MV_PP2_ISR_MISC_CAUSE_REG");
+	mvPp2PrintReg(MV_PP2_ISR_MISC_MASK_REG, "MV_PP2_ISR_MISC_MASK_REG");
+}
+
+void mvPp2PhysRxqRegs(int rxq)
+{
+	mvOsPrintf("\n[PPv2 RxQ registers: global rxq=%d]\n", rxq);
+
+	if (mvPp2MaxCheck(rxq, MV_PP2_RXQ_TOTAL_NUM, "global rxq"))
+		return;
+
+	mvPp2WrReg(MV_PP2_RXQ_NUM_REG, rxq);
+	mvPp2PrintReg(MV_PP2_RXQ_NUM_REG, "MV_PP2_RXQ_NUM_REG");
+	mvPp2PrintReg(MV_PP2_RXQ_DESC_ADDR_REG, "MV_PP2_RXQ_DESC_ADDR_REG");
+	mvPp2PrintReg(MV_PP2_RXQ_DESC_SIZE_REG, "MV_PP2_RXQ_DESC_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_RXQ_STATUS_REG(rxq), "MV_PP2_RXQ_STATUS_REG");
+	mvPp2PrintReg(MV_PP2_RXQ_THRESH_REG, "MV_PP2_RXQ_THRESH_REG");
+	mvPp2PrintReg(MV_PP2_RXQ_INDEX_REG, "MV_PP2_RXQ_INDEX_REG");
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2PrintReg(MV_PP2_RXQ_CONFIG_REG(rxq), "MV_PP2_RXQ_CONFIG_REG");
+#else
+	mvPp2PrintReg(MV_PP2_V0_RXQ_CONFIG_REG(rxq), "MV_PP2_RXQ_CONFIG_REG");
+	mvPp2PrintReg(MV_PP2_V0_RXQ_SNOOP_REG(rxq), "MV_PP2_RXQ_SNOOP_REG");
+	mvPp2PrintReg(MV_PP2_V0_RX_EARLY_DROP_REG(rxq), "MV_PP2_V0_RX_EARLY_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V0_RX_DESC_DROP_REG(rxq), "MV_PP2_V0_RX_DESC_DROP_REG");
+#endif
+
+}
+
+void mvPp2PortRxqRegs(int port, int rxq)
+{
+	mvOsPrintf("\n[PPv2 RxQ registers: port=%d, local rxq=%d]\n", port, rxq);
+
+	if (mvPp2PortCheck(port))
+		return;
+
+	if (mvPp2MaxCheck(rxq, MV_PP2_MAX_RXQ, "local rxq"))
+		return;
+
+	mvPp2PhysRxqRegs(mvPp2LogicRxqToPhysRxq(port, rxq));
+}
+
+void mvPp2PhysTxqRegs(int txq)
+{
+	mvOsPrintf("\n[PPv2 TxQ registers: global txq=%d]\n", txq);
+
+	if (mvPp2MaxCheck(txq, MV_PP2_TXQ_TOTAL_NUM, "global txq"))
+		return;
+
+	mvPp2WrReg(MV_PP2_TXQ_NUM_REG, txq);
+	mvPp2PrintReg(MV_PP2_TXQ_NUM_REG, "MV_PP2_TXQ_NUM_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_DESC_ADDR_REG, "MV_PP2_TXQ_DESC_ADDR_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_DESC_SIZE_REG, "MV_PP2_TXQ_DESC_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_DESC_HWF_SIZE_REG, "MV_PP2_TXQ_DESC_HWF_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_INDEX_REG, "MV_PP2_TXQ_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_PREF_BUF_REG, "MV_PP2_TXQ_PREF_BUF_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_PENDING_REG, "MV_PP2_TXQ_PENDING_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_SENT_REG(txq), "MV_PP2_TXQ_SENT_REG");
+	mvPp2PrintReg(MV_PP2_TXQ_INT_STATUS_REG, "MV_PP2_TXQ_INT_STATUS_REG");
+}
+
+void mvPp2PortTxqRegs(int port, int txp, int txq)
+{
+	mvOsPrintf("\n[PPv2 TxQ registers: port=%d, txp=%d, local txq=%d]\n", port, txp, txq);
+
+	if (mvPp2TxpCheck(port, txp))
+		return;
+
+	if (mvPp2MaxCheck(txq, MV_PP2_MAX_TXQ, "local txq"))
+		return;
+
+	mvPp2PhysTxqRegs(MV_PPV2_TXQ_PHYS(port, txp, txq));
+}
+
+void mvPp2AggrTxqRegs(int cpu)
+{
+	mvOsPrintf("\n[PP2 Aggr TXQ registers: cpu=%d]\n", cpu);
+
+	if (mvPp2CpuCheck(cpu))
+		return;
+
+	mvPp2PrintReg(MV_PP2_AGGR_TXQ_DESC_ADDR_REG(cpu), "MV_PP2_AGGR_TXQ_DESC_ADDR_REG");
+	mvPp2PrintReg(MV_PP2_AGGR_TXQ_DESC_SIZE_REG(cpu), "MV_PP2_AGGR_TXQ_DESC_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_AGGR_TXQ_STATUS_REG(cpu), "MV_PP2_AGGR_TXQ_STATUS_REG");
+	mvPp2PrintReg(MV_PP2_AGGR_TXQ_INDEX_REG(cpu), "MV_PP2_AGGR_TXQ_INDEX_REG");
+}
+
+void mvPp2AddrDecodeRegs(void)
+{
+	MV_U32 regValue;
+	int win;
+
+	/* ToDo - print Misc interrupt Cause and Mask registers */
+
+	mvPp2PrintReg(ETH_BASE_ADDR_ENABLE_REG, "ETH_BASE_ADDR_ENABLE_REG");
+	mvPp2PrintReg(ETH_TARGET_DEF_ADDR_REG, "ETH_TARGET_DEF_ADDR_REG");
+	mvPp2PrintReg(ETH_TARGET_DEF_ID_REG, "ETH_TARGET_DEF_ID_REG");
+
+	regValue = mvPp2RdReg(ETH_BASE_ADDR_ENABLE_REG);
+	for (win = 0; win < ETH_MAX_DECODE_WIN; win++) {
+		if ((regValue & (1 << win)) == 0)
+			continue; /* window is disable */
+
+		mvOsPrintf("\t win[%d]\n", win);
+		mvPp2PrintReg(ETH_WIN_BASE_REG(win), "\t ETH_WIN_BASE_REG");
+		mvPp2PrintReg(ETH_WIN_SIZE_REG(win), "\t ETH_WIN_SIZE_REG");
+		if (win < ETH_MAX_HIGH_ADDR_REMAP_WIN)
+			mvPp2PrintReg(ETH_WIN_REMAP_REG(win), "\t ETH_WIN_REMAP_REG");
+	}
+}
+
+
+void mvPp2TxSchedRegs(int port, int txp)
+{
+	int physTxp, txq;
+
+	physTxp = MV_PPV2_TXP_PHYS(port, txp);
+
+	mvOsPrintf("\n[TXP Scheduler registers: port=%d, txp=%d, physPort=%d]\n", port, txp, physTxp);
+
+	mvPp2WrReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, physTxp);
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_PORT_INDEX_REG, "MV_PP2_TXP_SCHED_PORT_INDEX_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_Q_CMD_REG, "MV_PP2_TXP_SCHED_Q_CMD_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_CMD_1_REG, "MV_PP2_TXP_SCHED_CMD_1_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_FIXED_PRIO_REG, "MV_PP2_TXP_SCHED_FIXED_PRIO_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_PERIOD_REG, "MV_PP2_TXP_SCHED_PERIOD_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_MTU_REG, "MV_PP2_TXP_SCHED_MTU_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_REFILL_REG, "MV_PP2_TXP_SCHED_REFILL_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_TOKEN_SIZE_REG, "MV_PP2_TXP_SCHED_TOKEN_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_TXP_SCHED_TOKEN_CNTR_REG, "MV_PP2_TXP_SCHED_TOKEN_CNTR_REG");
+
+	for (txq = 0; txq < MV_PP2_MAX_TXQ; txq++) {
+		mvOsPrintf("\n[TxQ Scheduler registers: port=%d, txp=%d, txq=%d]\n", port, txp, txq);
+		mvPp2PrintReg(MV_PP2_TXQ_SCHED_REFILL_REG(txq), "MV_PP2_TXQ_SCHED_REFILL_REG");
+		mvPp2PrintReg(MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), "MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG");
+		mvPp2PrintReg(MV_PP2_TXQ_SCHED_TOKEN_CNTR_REG(txq), "MV_PP2_TXQ_SCHED_TOKEN_CNTR_REG");
+	}
+}
+
+void      mvPp2FwdSwitchRegs(void)
+{
+	mvOsPrintf("\n[FWD Switch registers]\n");
+
+	mvPp2PrintReg(MV_PP2_FWD_SWITCH_FLOW_ID_REG, "MV_PP2_FWD_SWITCH_FLOW_ID_REG");
+	mvPp2PrintReg(MV_PP2_FWD_SWITCH_CTRL_REG, "MV_PP2_FWD_SWITCH_CTRL_REG");
+	mvPp2PrintReg(MV_PP2_FWD_SWITCH_STATUS_REG, "MV_PP2_FWD_SWITCH_STATUS_REG");
+}
+
+void mvPp2BmPoolRegs(int pool)
+{
+	if (mvPp2MaxCheck(pool, MV_BM_POOLS, "bm_pool"))
+		return;
+
+	mvOsPrintf("\n[BM pool registers: pool=%d]\n", pool);
+	mvPp2PrintReg(MV_BM_POOL_BASE_REG(pool), "MV_BM_POOL_BASE_REG");
+	mvPp2PrintReg(MV_BM_POOL_SIZE_REG(pool), "MV_BM_POOL_SIZE_REG");
+	mvPp2PrintReg(MV_BM_POOL_READ_PTR_REG(pool), "MV_BM_POOL_READ_PTR_REG");
+	mvPp2PrintReg(MV_BM_POOL_PTRS_NUM_REG(pool), "MV_BM_POOL_PTRS_NUM_REG");
+	mvPp2PrintReg(MV_BM_BPPI_READ_PTR_REG(pool), "MV_BM_BPPI_READ_PTR_REG");
+	mvPp2PrintReg(MV_BM_BPPI_PTRS_NUM_REG(pool), "MV_BM_BPPI_PTRS_NUM_REG");
+	mvPp2PrintReg(MV_BM_POOL_CTRL_REG(pool), "MV_BM_POOL_CTRL_REG");
+	mvPp2PrintReg(MV_BM_INTR_CAUSE_REG(pool), "MV_BM_INTR_CAUSE_REG");
+	mvPp2PrintReg(MV_BM_INTR_MASK_REG(pool), "MV_BM_INTR_MASK_REG");
+}
+
+void mvPp2V0DropCntrs(int port)
+{
+	int i;
+
+	mvOsPrintf("\n[Port #%d Drop counters]\n", port);
+	mvPp2PrintReg(MV_PP2_OVERRUN_DROP_REG(MV_PPV2_PORT_PHYS(port)), "MV_PP2_OVERRUN_DROP_REG");
+	mvPp2PrintReg(MV_PP2_CLS_DROP_REG(MV_PPV2_PORT_PHYS(port)), "MV_PP2_CLS_DROP_REG");
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		for (i = 0; i < mvPp2HalData.maxTcont; i++) {
+			mvPp2PrintReg2(MV_PP2_V0_TX_EARLY_DROP_REG(i), "MV_PP2_TX_EARLY_DROP_REG", i);
+			mvPp2PrintReg2(MV_PP2_V0_TX_DESC_DROP_REG(i), "MV_PP2_TX_DESC_DROP_REG", i);
+		}
+	} else {
+		i = MV_PP2_MAX_TCONT + port;
+		mvPp2PrintReg2(MV_PP2_V0_TX_EARLY_DROP_REG(i), "MV_PP2_TX_EARLY_DROP_REG", i);
+		mvPp2PrintReg2(MV_PP2_V0_TX_DESC_DROP_REG(i), "MV_PP2_TX_DESC_DROP_REG", i);
+	}
+	for (i = port * CONFIG_MV_PP2_RXQ; i < (port * CONFIG_MV_PP2_RXQ + CONFIG_MV_PP2_RXQ); i++) {
+		mvPp2PrintReg2(MV_PP2_V0_RX_EARLY_DROP_REG(i), "MV_PP2_RX_EARLY_DROP_REG", i);
+		mvPp2PrintReg2(MV_PP2_V0_RX_DESC_DROP_REG(i), "MV_PP2_RX_DESC_DROP_REG", i);
+	}
+}
+
+void mvPp2V1DropCntrs(int port)
+{
+	int txp, phyRxq, q;
+	MV_PP2_PORT_CTRL *pPortCtrl = mvPp2PortHndlGet(port);
+	int physPort = MV_PPV2_PORT_PHYS(port);
+
+
+	mvOsPrintf("\n[global drop counters]\n");
+	mvPp2RegPrintNonZero(MV_PP2_V1_OVERFLOW_MC_DROP_REG, "MV_PP2_OVERRUN_DROP_REG");
+
+	mvOsPrintf("\n[Port #%d Drop counters]\n", port);
+	mvPp2RegPrintNonZero(MV_PP2_OVERRUN_DROP_REG(physPort), "MV_PP2_OVERRUN_DROP_REG");
+	mvPp2RegPrintNonZero(MV_PP2_CLS_DROP_REG(physPort), "MV_PP2_CLS_DROP_REG");
+
+	for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+		for (q = 0; q < MV_PP2_MAX_TXQ; q++) {
+			mvOsPrintf("\n------ [Port #%d txp #%d txq #%d counters] -----\n", port, txp, q);
+			mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, TX_CNT_IDX(port, txp, q));
+			mvPp2RegPrintNonZero(MV_PP2_V1_TX_PKT_FULLQ_DROP_REG, "MV_PP2_V1_TX_PKT_FULLQ_DROP_REG");
+			mvPp2RegPrintNonZero(MV_PP2_V1_TX_PKT_EARLY_DROP_REG, "MV_PP2_V1_TX_PKT_EARLY_DROP_REG");
+			mvPp2RegPrintNonZero(MV_PP2_V1_TX_PKT_BM_DROP_REG, "MV_PP2_V1_TX_PKT_BM_DROP_REG");
+			mvPp2RegPrintNonZero(MV_PP2_V1_TX_PKT_BM_MC_DROP_REG, "MV_PP2_V1_TX_PKT_BM_MC_DROP_REG");
+		}
+	}
+
+	for (q = 0; q < CONFIG_MV_PP2_RXQ; q++) {
+		mvOsPrintf("\n------ [Port #%d, rxq #%d counters] -----\n", port, q);
+		phyRxq = mvPp2LogicRxqToPhysRxq(port, q);
+		mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, phyRxq);
+		mvPp2RegPrintNonZero(MV_PP2_V1_RX_PKT_FULLQ_DROP_REG, "MV_PP2_V1_RX_PKT_FULLQ_DROP_REG");
+		mvPp2RegPrintNonZero(MV_PP2_V1_RX_PKT_EARLY_DROP_REG, "MV_PP2_V1_RX_PKT_EARLY_DROP_REG");
+		mvPp2RegPrintNonZero(MV_PP2_V1_RX_PKT_BM_DROP_REG, "MV_PP2_V1_RX_PKT_BM_DROP_REG");
+	}
+}
+
+void mvPp2V1TxqDbgCntrs(int port, int txp, int txq)
+{
+	mvOsPrintf("\n------ [Port #%d txp #%d txq #%d counters] -----\n", port, txp, txq);
+	mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, TX_CNT_IDX(port, txp, txq));
+	mvPp2PrintReg(MV_PP2_V1_TX_DESC_ENQ_REG, "MV_PP2_V1_TX_DESC_ENQ_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_DESC_ENQ_TO_DRAM_REG, "MV_PP2_V1_TX_DESC_ENQ_TO_DRAM_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_BUF_ENQ_TO_DRAM_REG, "MV_PP2_V1_TX_BUF_ENQ_TO_DRAM_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_DESC_HWF_ENQ_REG, "MV_PP2_V1_TX_DESC_HWF_ENQ_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_PKT_DQ_REG, "MV_PP2_V1_TX_PKT_DQ_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_PKT_FULLQ_DROP_REG, "MV_PP2_V1_TX_PKT_FULLQ_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_PKT_EARLY_DROP_REG, "MV_PP2_V1_TX_PKT_EARLY_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_PKT_BM_DROP_REG, "MV_PP2_V1_TX_PKT_BM_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_TX_PKT_BM_MC_DROP_REG, "MV_PP2_V1_TX_PKT_BM_MC_DROP_REG");
+}
+
+void mvPp2V1RxqDbgCntrs(int port, int rxq)
+{
+	int phyRxq = mvPp2LogicRxqToPhysRxq(port, rxq);
+
+	mvOsPrintf("\n------ [Port #%d, rxq #%d counters] -----\n", port, rxq);
+	mvPp2WrReg(MV_PP2_V1_CNT_IDX_REG, phyRxq);
+	mvPp2PrintReg(MV_PP2_V1_RX_PKT_FULLQ_DROP_REG, "MV_PP2_V1_RX_PKT_FULLQ_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_RX_PKT_EARLY_DROP_REG, "MV_PP2_V1_RX_PKT_EARLY_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_RX_PKT_BM_DROP_REG, "MV_PP2_V1_RX_PKT_BM_DROP_REG");
+	mvPp2PrintReg(MV_PP2_V1_RX_DESC_ENQ_REG, "MV_PP2_V1_RX_DESC_ENQ_REG");
+}
+
+void mvPp2TxRegs(void)
+{
+	mvOsPrintf("\n[TX general registers]\n");
+
+	mvPp2PrintReg(MV_PP2_TX_SNOOP_REG, "MV_PP2_TX_SNOOP_REG");
+	mvPp2PrintReg(MV_PP2_TX_FIFO_THRESH_REG, "MV_PP2_TX_FIFO_THRESH_REG");
+	mvPp2PrintReg(MV_PP2_TX_PORT_FLUSH_REG, "MV_PP2_TX_PORT_FLUSH_REG");
+}
+
+void mvPp2RxFifoRegs(int port)
+{
+	int p = MV_PPV2_PORT_PHYS(port);
+
+	mvOsPrintf("\n[Port #%d RX Fifo]\n", p);
+	mvPp2PrintReg(MV_PP2_RX_DATA_FIFO_SIZE_REG(p), "MV_PP2_RX_DATA_FIFO_SIZE_REG");
+	mvPp2PrintReg(MV_PP2_RX_ATTR_FIFO_SIZE_REG(p), "MV_PP2_RX_ATTR_FIFO_SIZE_REG");
+	mvOsPrintf("\n[Global RX Fifo regs]\n");
+	mvPp2PrintReg(MV_PP2_RX_MIN_PKT_SIZE_REG, "MV_PP2_RX_MIN_PKT_SIZE_REG");
+}
+
+
+/* Print status of Ethernet port */
+void mvPp2PortStatus(int port)
+{
+	int i, txp, txq;
+	MV_ETH_PORT_STATUS	link;
+	MV_PP2_PORT_CTRL 	*pPortCtrl;
+
+	if (mvPp2PortCheck(port))
+		return;
+
+	pPortCtrl = mvPp2PortHndlGet(port);
+	if (!pPortCtrl)
+		return;
+
+	mvOsPrintf("\n[RXQ mapping: port=%d, ctrl=%p]\n", port, pPortCtrl);
+	if (pPortCtrl->pRxQueue) {
+		mvOsPrintf("         RXQ: ");
+		for (i = 0; i < pPortCtrl->rxqNum; i++)
+			mvOsPrintf(" %4d", i);
+
+		mvOsPrintf("\nphysical RXQ: ");
+		for (i = 0; i < pPortCtrl->rxqNum; i++) {
+			if (pPortCtrl->pRxQueue[i])
+				mvOsPrintf(" %4d", pPortCtrl->pRxQueue[i]->rxq);
+			else
+				mvOsPrintf(" NULL");
+		}
+		mvOsPrintf("\n");
+	}
+
+	mvOsPrintf("\n[BM queue to Qset mapping]\n");
+	if (pPortCtrl->pRxQueue) {
+		mvOsPrintf("       RXQ: ");
+		for (i = 0; i < pPortCtrl->rxqNum; i++)
+			mvOsPrintf(" %4d", i);
+
+		mvOsPrintf("\n long Qset: ");
+		for (i = 0; i < pPortCtrl->rxqNum; i++)
+			mvOsPrintf(" %4d", mvBmRxqToQsetLongGet(mvPp2LogicRxqToPhysRxq(port, i)));
+
+		mvOsPrintf("\nshort Qset: ");
+		for (i = 0; i < pPortCtrl->rxqNum; i++)
+			mvOsPrintf(" %4d", mvBmRxqToQsetShortGet(mvPp2LogicRxqToPhysRxq(port, i)));
+
+		mvOsPrintf("\n");
+	}
+	if (pPortCtrl->pTxQueue) {
+		for (txp = 0; txp < pPortCtrl->txpNum; txp++) {
+			mvOsPrintf("\nTXP %2d, TXQ:", txp);
+			for (txq = 0; txq < pPortCtrl->txqNum; txq++)
+				mvOsPrintf(" %4d", txq);
+
+			mvOsPrintf("\n long Qset: ");
+			for (txq = 0; txq < pPortCtrl->txqNum; txq++)
+				mvOsPrintf(" %4d", mvBmTxqToQsetLongGet(MV_PPV2_TXQ_PHYS(port, txp, txq)));
+
+			mvOsPrintf("\nshort Qset: ");
+			for (txq = 0; txq < pPortCtrl->txqNum; txq++)
+				mvOsPrintf(" %4d", mvBmTxqToQsetShortGet(MV_PPV2_TXQ_PHYS(port, txp, txq)));
+
+			mvOsPrintf("\n");
+		}
+	}
+
+	mvOsPrintf("\n[Link: port=%d, ctrl=%p]\n", port, pPortCtrl);
+
+	if (!MV_PP2_IS_PON_PORT(port)) {
+
+		mvGmacLinkStatus(port, &link);
+
+		if (link.linkup) {
+			mvOsPrintf("link up");
+			mvOsPrintf(", %s duplex", (link.duplex == MV_ETH_DUPLEX_FULL) ? "full" : "half");
+			mvOsPrintf(", speed ");
+
+			if (link.speed == MV_ETH_SPEED_1000)
+				mvOsPrintf("1 Gbps\n");
+			else if (link.speed == MV_ETH_SPEED_100)
+				mvOsPrintf("100 Mbps\n");
+			else
+				mvOsPrintf("10 Mbps\n");
+
+			mvOsPrintf("rxFC - %s, txFC - %s\n",
+				(link.rxFc == MV_ETH_FC_DISABLE) ? "disabled" : "enabled",
+				(link.txFc == MV_ETH_FC_DISABLE) ? "disabled" : "enabled");
+		} else
+			mvOsPrintf("link down\n");
+	}
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeRegs.h b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeRegs.h
new file mode 100644
index 000000000000..ea3a6238f481
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gbe/mvPp2GbeRegs.h
@@ -0,0 +1,1027 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PP2_GBE_REGS_H__
+#define __MV_PP2_GBE_REGS_H__
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#endif
+
+/************************** PPv2 HW Configuration ***********************/
+
+/************************** TX General Registers ******************************/
+#define MV_PP2_TX_SNOOP_REG			(MV_PP2_REG_BASE + 0x8800)
+#define MV_PP2_TX_FIFO_THRESH_REG		(MV_PP2_REG_BASE + 0x8804)
+
+/* Indirect access */
+#define MV_PP2_TX_PKT_LEN_IDX_REG		(MV_PP2_REG_BASE + 0x8808)
+#define MV_PP2_TX_PKT_LEN_CHANGE_REG		(MV_PP2_REG_BASE + 0x880C)
+
+#define MV_PP2_TX_PORT_FLUSH_REG		(MV_PP2_REG_BASE + 0x8810)
+
+#define MV_PP2_TX_PORT_FLUSH_OFFS		0
+#define MV_PP2_TX_PORT_FLUSH_BITS		7
+#define MV_PP2_TX_PORT_FLUSH_ALL_MASK		(((1 << MV_PP2_TX_PORT_FLUSH_BITS) - 1) << MV_PP2_TX_PORT_FLUSH_OFFS)
+#define MV_PP2_TX_PORT_FLUSH_MASK(p)		((1 << (p)) << MV_PP2_TX_PORT_FLUSH_OFFS)
+
+/* Registers per egress port */
+#define MV_PP2_TXP_BAD_CRC_CNTR_REG(txp)	(MV_PP2_REG_BASE + 0x8900)
+#define MV_PP2_TXP_DROP_CNTR_REG(txp)		(MV_PP2_REG_BASE + 0x8980)
+#define MV_PP2_TXP_DEQUEUE_THRESH_REG(txp)	(MV_PP2_REG_BASE + 0x88A0)
+
+/************************** RX Fifo Registers ******************************/
+#define MV_PP2_RX_DATA_FIFO_SIZE_REG(port)	(MV_PP2_REG_BASE + 0x00 + 4 * (port))
+#define MV_PP2_RX_ATTR_FIFO_SIZE_REG(port)	(MV_PP2_REG_BASE + 0x20 + 4 * (port))
+#define MV_PP2_RX_MIN_PKT_SIZE_REG		(MV_PP2_REG_BASE + 0x60)
+#define MV_PP2_RX_FIFO_INIT_REG			(MV_PP2_REG_BASE + 0x64)
+
+/************************** Top Reg file ******************************/
+#define MV_PP2_MH_REG(port)			(MV_PP2_REG_BASE + 0x5040 + 4 * (port))
+
+#define MV_PP2_MH_EN_OFFS			0
+#define MV_PP2_MH_EN_MASK			(1 << MV_PP2_MH_EN_OFFS)
+
+#define MV_PP2_DSA_EN_OFFS			0
+#define MV_PP2_DSA_EN_MASK			(0x3 << MV_PP2_DSA_EN_OFFS)
+#define MV_PP2_DSA_DISABLE			0
+#define MV_PP2_DSA_NON_EXTENDED			(0x1 << MV_PP2_DSA_EN_OFFS)
+#define MV_PP2_DSA_EXTENDED			(0x2 << MV_PP2_DSA_EN_OFFS)
+
+/************************** RX DMA Top Registers ******************************/
+#define MV_PP2_RX_CTRL_REG(port)		(MV_PP2_REG_BASE + 0x140 + 4 * (port))
+
+#define MV_PP2_POOL_BUF_SIZE_REG(pool)		(MV_PP2_REG_BASE + 0x180 + 4 * (pool))
+
+#define MV_PP2_POOL_BUF_SIZE_OFFSET		5
+#define MV_PP2_POOL_BUF_SIZE_MASK		(0xFFFE)
+/*-------------------------------------------------------------------------------*/
+
+#ifdef CONFIG_MV_ETH_PP2_1 /* PPv2.1 - A0 */
+
+#define MV_PP2_RX_STATUS			(MV_PP2_REG_BASE + 0x174)
+
+#define MV_PP2_DISABLE_IN_PROG_OFFS		0
+#define MV_PP2_DISABLE_IN_PROG_MASK		(0x1 << MV_PP2_DISABLE_IN_PROG_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_RXQ_CONFIG_REG(rxq)		(MV_PP2_REG_BASE + 0x800 + 4 * (rxq))
+
+#define MV_PP2_SNOOP_PKT_SIZE_OFFS		0
+#define MV_PP2_SNOOP_PKT_SIZE_MASK		(0x1FF << MV_PP2_SNOOP_PKT_SIZE_OFFS)
+
+#define MV_PP2_SNOOP_BUF_HDR_OFFS		9
+#define MV_PP2_SNOOP_BUF_HDR_MASK		(0x1 << MV_PP2_SNOOP_BUF_HDR_OFFS)
+
+#define MV_PP2_L2_DEPOSIT_PKT_SIZE_OFFS		12
+#define MV_PP2_L2_DEPOSIT_PKT_SIZE_MASK		(0xF << MV_PP2_L2_DEPOSIT_PKT_SIZE_OFFS)
+
+#define MV_PP2_L2_DEPOSIT_BUF_HDR_OFFS		16
+#define MV_PP2_L2_DEPOSIT_BUF_HDR_MASK		(0x1 << MV_PP2_L2_DEPOSIT_BUF_HDR_OFFS)
+
+#define MV_PP2_RXQ_POOL_SHORT_OFFS		20
+#define MV_PP2_RXQ_POOL_SHORT_MASK		(0x7 << MV_PP2_RXQ_POOL_SHORT_OFFS)
+
+#define MV_PP2_RXQ_POOL_LONG_OFFS		24
+#define MV_PP2_RXQ_POOL_LONG_MASK		(0x7 << MV_PP2_RXQ_POOL_LONG_OFFS)
+
+#define MV_PP2_RXQ_PACKET_OFFSET_OFFS		28
+#define MV_PP2_RXQ_PACKET_OFFSET_MASK		(0x7 << MV_PP2_RXQ_PACKET_OFFSET_OFFS)
+
+#define MV_PP2_RXQ_DISABLE_BIT			31
+#define MV_PP2_RXQ_DISABLE_MASK			(0x1 << MV_PP2_RXQ_DISABLE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_HWF_TXQ_CONFIG_REG(txq)		(MV_PP2_REG_BASE + 0xc00 + 4 * (txq))
+
+#define MV_PP2_HWF_TXQ_POOL_SHORT_OFFS		0
+#define MV_PP2_HWF_TXQ_POOL_SHORT_MASK		(0x7 << MV_PP2_HWF_TXQ_POOL_SHORT_OFFS)
+
+#define MV_PP2_HWF_TXQ_POOL_LONG_OFFS		4
+#define MV_PP2_HWF_TXQ_POOL_LONG_MASK		(0x7 << MV_PP2_HWF_TXQ_POOL_LONG_OFFS)
+
+#define MV_PP2_HWF_TXQ_DISABLE_BIT              31
+#define MV_PP2_HWF_TXQ_DISABLE_MASK             (0x1 << MV_PP2_HWF_TXQ_DISABLE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#else /* PPv2 - Z1 */
+
+#define MV_PP2_V0_RXQ_SNOOP_REG(rxq)		(MV_PP2_REG_BASE + 0x800 + 4 * (rxq))
+
+#define MV_PP2_V0_SNOOP_PKT_SIZE_OFFS		5
+#define MV_PP2_V0_SNOOP_PKT_SIZE_MASK		(0x1FF << MV_PP2_V0_SNOOP_PKT_SIZE_OFFS)
+
+#define MV_PP2_V0_SNOOP_BUF_HDR_OFFS		14
+#define MV_PP2_V0_SNOOP_BUF_HDR_MASK		(0x1 << MV_PP2_V0_SNOOP_BUF_HDR_OFFS)
+
+#define MV_PP2_V0_L2_DEPOSIT_PKT_SIZE_OFFS	21
+#define MV_PP2_V0_L2_DEPOSIT_PKT_SIZE_MASK	(0xF << MV_PP2_V0_L2_DEPOSIT_PKT_SIZE_OFFS)
+
+#define MV_PP2_V0_L2_DEPOSIT_BUF_HDR_OFFS	25
+#define MV_PP2_V0_L2_DEPOSIT_BUF_HDR_MASK	(0x1 << MV_PP2_V0_L2_DEPOSIT_BUF_HDR_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_V0_RXQ_CONFIG_REG(rxq)		(MV_PP2_REG_BASE + 0xc00 + 4 * (rxq))
+
+#define MV_PP2_V0_RXQ_POOL_SHORT_OFFS		0
+#define MV_PP2_V0_RXQ_POOL_SHORT_MASK		(0x7 << MV_PP2_V0_RXQ_POOL_SHORT_OFFS)
+#define MV_PP2_V0_RXQ_POOL_LONG_OFFS		8
+#define MV_PP2_V0_RXQ_POOL_LONG_MASK		(0x7 << MV_PP2_V0_RXQ_POOL_LONG_OFFS)
+#define MV_PP2_V0_RXQ_PACKET_OFFSET_OFFS	17
+#define MV_PP2_V0_RXQ_PACKET_OFFSET_MASK	(0xFF << MV_PP2_V0_RXQ_PACKET_OFFSET_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_V0_PORT_HWF_CONFIG_REG(port)	(MV_PP2_REG_BASE + 0x120 + 4 * (port))
+
+#define MV_PP2_V0_PORT_HWF_POOL_SHORT_OFFS	0
+#define MV_PP2_V0_PORT_HWF_POOL_SHORT_MASK	(0x7 << MV_PP2_V0_PORT_HWF_POOL_SHORT_OFFS)
+#define MV_PP2_V0_PORT_HWF_POOL_LONG_OFFS	8
+#define MV_PP2_V0_PORT_HWF_POOL_LONG_MASK	(0x7 << MV_PP2_V0_PORT_HWF_POOL_LONG_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#endif /* PPv2 - Z1 / PPv2.1 - A0 */
+
+#define MV_PP2_RX_GEMPID_SRC_OFFS		8
+#define MV_PP2_RX_GEMPID_SRC_MASK		(0x7 << MV_PP2_RX_GEMPID_SRC_OFFS)
+
+#define MV_PP2_RX_LOW_LATENCY_PKT_SIZE_OFFS	16
+#define MV_PP2_RX_LOW_LATENCY_PKT_SIZE_BITS	12
+#define MV_PP2_RX_LOW_LATENCY_PKT_SIZE_MAX	((1 << MV_PP2_RX_LOW_LATENCY_PKT_SIZE_BITS) - 1)
+#define MV_PP2_RX_LOW_LATENCY_PKT_SIZE_MASK(s)	(((s) & MV_PP2_RX_LOW_LATENCY_PKT_SIZE_MAX) << \
+							MV_PP2_RX_LOW_LATENCY_PKT_SIZE_OFFS)
+
+#define MV_PP2_RX_DROP_ON_CSUM_ERR_BIT		30
+#define MV_PP2_RX_DROP_ON_CSUM_ERR_MASK		(1 << MV_PP2_RX_DROP_ON_CSUM_ERR_BIT)
+
+#define MV_PP2_RX_USE_PSEUDO_FOR_CSUM_BIT	31
+#define MV_PP2_RX_USE_PSEUDO_FOR_CSUM_MASK      (1 << MV_PP2_RX_USE_PSEUDO_FOR_CSUM_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/************************** Descriptor Manager Top Registers ******************************/
+
+#define MV_PP2_RXQ_NUM_REG			(MV_PP2_REG_BASE + 0x2040)
+
+#define MV_PP2_RXQ_NUM_OFFSET			0
+#define MV_PP2_RXQ_NUM_MASK			(0xFF << MV_PP2_RXQ_NUM_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_RXQ_DESC_ADDR_REG		(MV_PP2_REG_BASE + 0x2044)
+
+#define MV_PP2_RXQ_DESC_SIZE_REG		(MV_PP2_REG_BASE + 0x2048)
+
+#define MV_PP2_RXQ_DESC_SIZE_OFFSET		4
+#define MV_PP2_RXQ_DESC_SIZE_MASK		(0x3FF << MV_PP2_RXQ_DESC_SIZE_OFFSET)
+
+#define MV_PP2_RXQ_L2_DEPOSIT_OFFSET		16
+#define MV_PP2_RXQ_L2_DEPOSIT_MASK		(0x1 << MV_PP2_RXQ_L2_DEPOSIT_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_RXQ_STATUS_UPDATE_REG(rxq)	(MV_PP2_REG_BASE + 0x3000 + 4 * (rxq))
+
+#define MV_PP2_RXQ_NUM_PROCESSED_OFFSET	0
+#define MV_PP2_RXQ_NUM_PROCESSED_MASK		(0x3FFF << MV_PP2_RXQ_NUM_PROCESSED_OFFSET)
+#define MV_PP2_RXQ_NUM_NEW_OFFSET		16
+#define MV_PP2_RXQ_NUM_NEW_MASK			(0x3FFF << MV_PP2_RXQ_NUM_NEW_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_RXQ_STATUS_REG(rxq)		(MV_PP2_REG_BASE + 0x3400 + 4 * (rxq))
+
+#define MV_PP2_RXQ_OCCUPIED_OFFSET		0
+#define MV_PP2_RXQ_OCCUPIED_MASK		(0x3FFF << MV_PP2_RXQ_OCCUPIED_OFFSET)
+#define MV_PP2_RXQ_NON_OCCUPIED_OFFSET		16
+#define MV_PP2_RXQ_NON_OCCUPIED_MASK		(0x3FFF << MV_PP2_RXQ_NON_OCCUPIED_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_RXQ_THRESH_REG			(MV_PP2_REG_BASE + 0x204c)
+
+#define MV_PP2_OCCUPIED_THRESH_OFFSET		0
+#define MV_PP2_OCCUPIED_THRESH_MASK		(0x3FFF << MV_PP2_OCCUPIED_THRESH_OFFSET)
+
+#define MV_PP2_NON_OCCUPIED_THRESH_OFFSET	16
+#define MV_PP2_NON_OCCUPIED_THRESH_MASK		(0x3FFF << MV_PP2_NON_OCCUPIED_THRESH_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_RXQ_INDEX_REG			(MV_PP2_REG_BASE + 0x2050)
+/*-------------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_TXQ_NUM_REG			(MV_PP2_REG_BASE + 0x2080)
+
+#define MV_PP2_TXQ_NUM_OFFSET			0
+#define MV_PP2_TXQ_NUM_MASK			(0xFF << MV_PP2_RXQ_NUM_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_TXQ_DESC_ADDR_REG		(MV_PP2_REG_BASE + 0x2084)
+
+#define MV_PP2_TXQ_DESC_SIZE_REG		(MV_PP2_REG_BASE + 0x2088)
+
+#define MV_PP2_TXQ_DESC_SIZE_OFFSET		4
+#define MV_PP2_TXQ_DESC_SIZE_MASK		(0x3FF << MV_PP2_TXQ_DESC_SIZE_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_TXQ_DESC_HWF_SIZE_REG		(MV_PP2_REG_BASE + 0x208c)
+
+#define MV_PP2_TXQ_DESC_HWF_SIZE_OFFSET		4
+#define MV_PP2_TXQ_DESC_HWF_SIZE_MASK		(0x3FF << MV_PP2_TXQ_DESC_HWF_SIZE_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+/* Aggregated (per CPU) TXQ - WO */
+#define MV_PP2_AGGR_TXQ_UPDATE_REG		(MV_PP2_REG_BASE + 0x2090)
+/*-------------------------------------------------------------------------------*/
+
+/* Each CPU has own copy */
+#define MV_PP2_TXQ_THRESH_REG			(MV_PP2_REG_BASE + 0x2094)
+
+#define MV_PP2_TRANSMITTED_THRESH_OFFSET	16
+#define MV_PP2_TRANSMITTED_THRESH_MASK		(0x3FFF << MV_PP2_TRANSMITTED_THRESH_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_TXQ_INDEX_REG			(MV_PP2_REG_BASE + 0x2098)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_TXQ_PREF_BUF_REG			(MV_PP2_REG_BASE + 0x209c)
+
+#define MV_PP2_PREF_BUF_PTR_OFFS		0
+#define MV_PP2_PREF_BUF_PTR_MASK		(0xFFF << MV_PP2_PREF_BUF_PTR_OFFS)
+#define MV_PP2_PREF_BUF_PTR(desc)		(((desc) << MV_PP2_PREF_BUF_PTR_OFFS) & MV_PP2_PREF_BUF_PTR_MASK)
+
+#define MV_PP2_PREF_BUF_SIZE_OFFS		12
+#define MV_PP2_PREF_BUF_SIZE_MASK		(0x7 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_NONE		(0 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_1			(1 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_2			(2 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_4			(3 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_8			(4 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_16			(5 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_32			(6 << MV_PP2_PREF_BUF_SIZE_OFFS)
+#define MV_PP2_PREF_BUF_SIZE_64			(7 << MV_PP2_PREF_BUF_SIZE_OFFS)
+
+#define MV_PP2_PREF_BUF_THRESH_OFFS		17
+#define MV_PP2_PREF_BUF_THRESH_MASK		(0xF << MV_PP2_PREF_BUF_THRESH_OFFS)
+#define MV_PP2_PREF_BUF_THRESH(val)		((val) << MV_PP2_PREF_BUF_THRESH_OFFS)
+
+/* new field for PPV2.1 - A0 only */
+#define MV_PP2_TXQ_DRAIN_EN_BIT			31
+#define MV_PP2_TXQ_DRAIN_EN_MASK		(1 << MV_PP2_TXQ_DRAIN_EN_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_TXQ_PENDING_REG			(MV_PP2_REG_BASE + 0x20a0)
+
+#define MV_PP2_TXQ_PENDING_OFFSET		0
+#define MV_PP2_TXQ_PENDING_MASK			(0x3FFF << MV_PP2_TXQ_PENDING_OFFSET)
+
+#define MV_PP2_TXQ_RESERVED_OFFSET		16
+#define MV_PP2_TXQ_RESERVED_MASK		(0x3FFF << MV_PP2_TXQ_RESERVED_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+/*
+   ppv2.1 field MV_PP2_TXQ_HWF_PENDING_OFFSET changed to MV_PP2_TXQ_RESERVED_DESC_OFFSET
+   MAS 3.16
+*/
+#define MV_PP2_TXQ_HWF_PENDING_OFFSET		16
+#define MV_PP2_TXQ_HWF_PENDING_MASK		(0x3FFF << MV_PP2_TXQ_HWF_PENDING_OFFSET)
+
+#define MV_PP2_TXQ_RSVD_DESC_OFFSET		16
+#define MV_PP2_TXQ_RSVD_DESC_MASK		(0x3FFF << MV_PP2_TXQ_RSVD_DESC_MASK)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_TXQ_INT_STATUS_REG		(MV_PP2_REG_BASE + 0x20a4)
+/*-------------------------------------------------------------------------------*/
+/*
+   ppv2.1- new register 0x20b0, not exist ip ppv2.0
+   MAS 3.16
+*/
+#define MV_PP2_TXQ_RSVD_REQ_REG			(MV_PP2_REG_BASE + 0x20b0)
+
+#define MV_PP2_TXQ_RSVD_REQ_DESC_OFFSET		0
+#define MV_PP2_TXQ_RSVD_REQ_DESC_MASK		(0x3FFF << MV_PP2_TXQ_RSVD_REQ_DESC_OFFSET)
+
+#define MV_PP2_TXQ_RSVD_REQ_Q_OFFSET		16
+#define MV_PP2_TXQ_RSVD_REQ_Q_MASK		(0xFF << MV_PP2_TXQ_RSVD_REQ_Q_OFFSET)
+/*-------------------------------------------------------------------------------*/
+/*
+   ppv2.1- new register 0x20b4, not exist ip ppv2.0
+   MAS 3.16
+*/
+#define MV_PP2_TXQ_RSVD_RSLT_REG		(MV_PP2_REG_BASE + 0x20b4)
+
+#define MV_PP2_TXQ_RSVD_RSLT_OFFSET		0
+#define MV_PP2_TXQ_RSVD_RSLT_MASK		(0x3FFF << MV_PP2_TXQ_RSVD_RSLT_OFFSET)
+
+/*-------------------------------------------------------------------------------*/
+/*
+   ppv2.1- new register 0x20b8, not exist ip ppv2.0
+   MAS 3.22
+*/
+#define MV_PP2_TXQ_RSVD_CLR_REG			(MV_PP2_REG_BASE + 0x20b8)
+
+#define MV_PP2_TXQ_RSVD_CLR_Q_OFFSET		16
+#define MV_PP2_TXQ_RSVD_CLR_Q_MASK		(0xFF << MV_PP2_TXQ_RSVD_CLR_Q_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+/* Direct access - per TXQ, per CPU */
+#define MV_PP2_TXQ_SENT_REG(txq)		(MV_PP2_REG_BASE + 0x3c00 + 4 * (txq))
+
+#define MV_PP2_TRANSMITTED_COUNT_OFFSET	16
+#define MV_PP2_TRANSMITTED_COUNT_MASK		(0x3FFF << MV_PP2_TRANSMITTED_COUNT_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(MV_PP2_REG_BASE + 0x2100 + 4 * (cpu))
+
+#define MV_PP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(MV_PP2_REG_BASE + 0x2140 + 4 * (cpu))
+#define MV_PP2_AGGR_TXQ_DESC_SIZE_OFFSET	4
+#define MV_PP2_AGGR_TXQ_DESC_SIZE_MASK		(0x3FF << MV_PP2_AGGR_TXQ_DESC_SIZE_OFFSET)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_AGGR_TXQ_STATUS_REG(cpu)		(MV_PP2_REG_BASE + 0x2180 + 4 * (cpu))
+
+#define MV_PP2_AGGR_TXQ_PENDING_OFFSET		0
+#define MV_PP2_AGGR_TXQ_PENDING_MASK		(0x3FFF << MV_PP2_AGGR_TXQ_PENDING_OFFSET)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_AGGR_TXQ_INDEX_REG(cpu)		(MV_PP2_REG_BASE + 0x21c0 + 4 * (cpu))
+/*-------------------------------------------------------------------------------*/
+
+/* Registers for HWF to SWF switching */
+#define MV_PP2_FWD_SWITCH_FLOW_ID_REG		(MV_PP2_REG_BASE + 0x2200)
+
+#define MV_PP2_FWD_SWITCH_CTRL_REG		(MV_PP2_REG_BASE + 0x2204)
+
+#define MV_PP2_FWD_SWITCH_TXQ_OFFS		0
+#define MV_PP2_FWD_SWITCH_TXQ_MAX		255
+#define MV_PP2_FWD_SWITCH_TXQ_MASK              (255 << MV_PP2_FWD_SWITCH_TXQ_OFFS)
+#define MV_PP2_FWD_SWITCH_TXQ_VAL(txq)		(((txq) << MV_PP2_FWD_SWITCH_TXQ_OFFS) & \
+							MV_PP2_FWD_SWITCH_TXQ_MASK)
+
+#define MV_PP2_FWD_SWITCH_RXQ_OFFS		8
+#define MV_PP2_FWD_SWITCH_RXQ_MAX		255
+#define MV_PP2_FWD_SWITCH_RXQ_MASK              (255 << MV_PP2_FWD_SWITCH_RXQ_OFFS)
+#define MV_PP2_FWD_SWITCH_RXQ_VAL(rxq)		(((rxq) << MV_PP2_FWD_SWITCH_RXQ_OFFS) & \
+							MV_PP2_FWD_SWITCH_RXQ_MASK)
+
+#define MV_PP2_FWD_SWITCH_TIMEOUT_OFFS		16
+#define MV_PP2_FWD_SWITCH_TIMEOUT_BITS		10
+#define MV_PP2_FWD_SWITCH_TIMEOUT_MAX		((1 << MV_PP2_FWD_SWITCH_TIMEOUT_BITS) - 1)
+#define MV_PP2_FWD_SWITCH_TIMEOUT_MASK          (MV_PP2_FWD_SWITCH_TIMEOUT_MAX << MV_PP2_FWD_SWITCH_TIMEOUT_OFFS)
+#define MV_PP2_FWD_SWITCH_TIMEOUT_VAL(time)	(((time) << MV_PP2_FWD_SWITCH_TIMEOUT_OFFS) & \
+							MV_PP2_FWD_SWITCH_TIMEOUT_MASK)
+
+#define MV_PP2_FWD_SWITCH_STATUS_REG		(MV_PP2_REG_BASE + 0x2208)
+
+#define MV_PP2_FWD_SWITCH_STATE_OFFS		0
+#define MV_PP2_FWD_SWITCH_STATE_MASK		(0x7 << MV_PP2_FWD_SWITCH_STATE_OFFS)
+
+#define MV_PP2_FWD_SWITCH_STATUS_OFFS		4
+#define MV_PP2_FWD_SWITCH_STATUS_MASK		(0x3 << MV_PP2_FWD_SWITCH_STATUS_OFFS)
+
+#define MV_PP2_FWD_SWITCH_TIMER_OFFS		16
+#define MV_PP2_FWD_SWITCH_TIMER_BITS		10
+#define MV_PP2_FWD_SWITCH_TIMER_MAX		((1 << MV_PP2_FWD_SWITCH_TIMER_BITS) - 1)
+#define MV_PP2_FWD_SWITCH_TIMER_MASK		(MV_PP2_FWD_SWITCH_TIMER_MAX << MV_PP2_FWD_SWITCH_TIMER_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* Unused registers */
+#define MV_PP2_INTERNAL_BUF_CTRL_REG		(MV_PP2_REG_BASE + 0x2220)
+
+/* No CPU access to Physcal TXQ descriptors, so Snoop doesn't needed */
+#define MV_PP2_TX_DESC_SNOOP_REG		(MV_PP2_REG_BASE + 0x2224)
+/*-------------------------------------------------------------------------------*/
+
+
+/************************** Interrupt Cause and Mask registers ******************/
+#define MV_PP2_ISR_RX_THRESHOLD_REG(port)	(MV_PP2_REG_BASE + 0x5200 + 4 * (port))
+
+#define MV_PP2_ISR_RX_THRESHOLD_OFFS	4
+#define MV_PP2_ISR_RX_THRESHOLD_MASK	0xFFFFF0
+
+#define MV_PP2_ISR_RXQ_GROUP_REG(port)		(MV_PP2_REG_BASE + 0x5400 + 4 * (port))
+
+#define MV_PP2_ISR_ENABLE_REG(port)		(MV_PP2_REG_BASE + 0x5420 + 4 * (port))
+
+#define MV_PP2_ISR_ENABLE_INTERRUPT_OFFS	0
+#define MV_PP2_ISR_ENABLE_INTERRUPT_MASK	0xFFFF
+#define MV_PP2_ISR_ENABLE_INTERRUPT(cpuMask)	(((cpuMask) << MV_PP2_ISR_ENABLE_INTERRUPT_OFFS)\
+							& MV_PP2_ISR_ENABLE_INTERRUPT_MASK)
+
+#define MV_PP2_ISR_DISABLE_INTERRUPT_OFFS	16
+#define MV_PP2_ISR_DISABLE_INTERRUPT_MASK	(0xFFFF << MV_PP2_ISR_DISABLE_INTERRUPT_OFFS)
+#define MV_PP2_ISR_DISABLE_INTERRUPT(cpuMask)	(((cpuMask) << MV_PP2_ISR_DISABLE_INTERRUPT_OFFS)\
+							& MV_PP2_ISR_DISABLE_INTERRUPT_MASK)
+
+
+#define MV_PP2_ISR_RX_TX_CAUSE_REG(port)	(MV_PP2_REG_BASE + 0x5480 + 4 * (port))
+#define MV_PP2_ISR_RX_TX_MASK_REG(port)		(MV_PP2_REG_BASE + 0x54a0 + 4 * (port))
+
+#define MV_PP2_CAUSE_RXQ_OCCUP_DESC_OFFS      	0
+#define MV_PP2_CAUSE_RXQ_OCCUP_DESC_BIT(q)    	(MV_PP2_CAUSE_RXQ_OCCUP_DESC_OFFS + (q))
+#define MV_PP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK  	(0xFFFF << MV_PP2_CAUSE_RXQ_OCCUP_DESC_OFFS)
+#define MV_PP2_CAUSE_RXQ_OCCUP_DESC_MASK(q)   	(1 << (MV_PP2_CAUSE_RXQ_OCCUP_DESC_BIT(q)))
+
+#define MV_PP2_CAUSE_TXQ_OCCUP_DESC_OFFS       	16
+#define MV_PP2_CAUSE_TXQ_OCCUP_DESC_BIT(q)     	(MV_PP2_CAUSE_TXQ_OCCUP_DESC_OFFS + (q))
+#define MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK   	(0xFF << MV_PP2_CAUSE_TXQ_OCCUP_DESC_OFFS)
+#define MV_PP2_CAUSE_TXQ_OCCUP_DESC_MASK(q)    	(1 << (MV_PP2_CAUSE_TXQ_SENT_DESC_BIT(q)))
+
+#define MV_PP2_CAUSE_RX_FIFO_OVERRUN_BIT        24
+#define MV_PP2_CAUSE_RX_FIFO_OVERRUN_MASK      	(1 << MV_PP2_CAUSE_RX_FIFO_OVERRUN_BIT)
+
+#define MV_PP2_CAUSE_FCS_ERR_BIT           	25
+#define MV_PP2_CAUSE_FCS_ERR_MASK          	(1 << MV_PP2_CAUSE_FCS_ERR_BIT)
+
+#define MV_PP2_CAUSE_TX_FIFO_UNDERRUN_BIT       26
+#define MV_PP2_CAUSE_TX_FIFO_UNDERRUN_MASK     	(1 << MV_PP2_CAUSE_TX_FIFO_UNDERRUN_BIT)
+
+#define MV_PP2_CAUSE_TX_EXCEPTION_SUM_BIT     	29
+#define MV_PP2_CAUSE_TX_EXCEPTION_SUM_MASK     	(1 << MV_PP2_CAUSE_TX_EXCEPTION_SUM_BIT)
+
+#define MV_PP2_CAUSE_RX_EXCEPTION_SUM_BIT     	30
+#define MV_PP2_CAUSE_RX_EXCEPTION_SUM_MASK     	(1 << MV_PP2_CAUSE_RX_EXCEPTION_SUM_BIT)
+
+#define MV_PP2_CAUSE_MISC_SUM_BIT     		31
+#define MV_PP2_CAUSE_MISC_SUM_MASK     		(1 << MV_PP2_CAUSE_MISC_SUM_BIT)
+
+#define MV_PP2_CAUSE_MISC_ERR_SUM_MASK		(0xE7000000)
+
+
+#define MV_PP2_ISR_PON_RX_TX_CAUSE_REG			(MV_PP2_REG_BASE + 0x549c)
+#define MV_PP2_ISR_PON_RX_TX_MASK_REG			(MV_PP2_REG_BASE + 0x54bc)
+
+#define MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_OFFS      	0
+#define MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_BIT(q)    	(MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_OFFS + (q))
+#define MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK  	(0xFFFF << MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_OFFS)
+#define MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_MASK(q)   	(1 << (MV_PP2_PON_CAUSE_RXQ_OCCUP_DESC_BIT(q)))
+
+#define MV_PP2_PON_CAUSE_RX_FIFO_OVERRUN_BIT        	16
+#define MV_PP2_PON_CAUSE_RX_FIFO_OVERRUN_MASK      	(1 << MV_PP2_CAUSE_RX_FIFO_OVERRUN_BIT)
+
+#define MV_PP2_PON_CAUSE_FCS_ERR_BIT           		17
+#define MV_PP2_PON_CAUSE_FCS_ERR_MASK          		(1 << MV_PP2_PON_CAUSE_FCS_ERR_BIT)
+
+#define MV_PP2_PON_BYTE_COUNT_ERR_BIT          		18
+#define MV_PP2_PON_BYTE_COUNT_ERR_MASK         		(1 << MV_PP2_PON_BYTE_COUNT_ERR_BIT)
+
+#define MV_PP2_PON_CAUSE_TX_FIFO_UNDERRUN_BIT       	21
+#define MV_PP2_PON_CAUSE_TX_FIFO_UNDERRUN_MASK     	(1 << MV_PP2_PON_CAUSE_TX_FIFO_UNDERRUN_BIT)
+
+#define MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_OFFS       	22
+#define MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK   	(0xFF << MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_OFFS)
+
+#define MV_PP2_PON_CAUSE_TX_EXCEPTION_SUM_BIT     	29
+#define MV_PP2_PON_CAUSE_TX_EXCEPTION_SUM_MASK     	(1 << MV_PP2_PON_CAUSE_TX_EXCEPTION_SUM_BIT)
+
+#define MV_PP2_PON_CAUSE_RX_EXCEPTION_SUM_BIT     	30
+#define MV_PP2_PON_CAUSE_RX_EXCEPTION_SUM_MASK     	(1 << MV_PP2_PON_CAUSE_RX_EXCEPTION_SUM_BIT)
+
+#define MV_PP2_PON_CAUSE_MISC_SUM_BIT     		31
+#define MV_PP2_PON_CAUSE_MISC_SUM_MASK     		(1 << MV_PP2_PON_CAUSE_MISC_SUM_BIT)
+
+#define MV_PP2_PON_CAUSE_MISC_ERR_SUM_MASK		(0xC0270000)
+
+
+/* TCONT Cause registers 54c0 - 54cc */
+/* TCONT Mask registers 54d0 - 54dc */
+#define MV_PP2_ISR_RX_ERR_CAUSE_REG(port)	(MV_PP2_REG_BASE + 0x5500 + 4 * (port))
+#define MV_PP2_ISR_RX_ERR_MASK_REG(port)	(MV_PP2_REG_BASE + 0x5520 + 4 * (port))
+#define MV_PP2_ISR_TX_ERR_CAUSE_REG(port)	(MV_PP2_REG_BASE + 0x5500 + 4 * (port))
+#define MV_PP2_ISR_TX_ERR_MASK_REG(port)	(MV_PP2_REG_BASE + 0x5520 + 4 * (port))
+/* TCONT TX exception Cause registers 5580 - 558c */
+/* TCONT TX exception Mask registers 5590 - 559c */
+#define MV_PP2_ISR_PON_TX_UNDR_CAUSE_REG	(MV_PP2_REG_BASE + 0x55a0)
+#define MV_PP2_ISR_PON_TX_UNDR_MASK_REG		(MV_PP2_REG_BASE + 0x55a4)
+
+#define MV_PP2_ISR_MISC_CAUSE_REG			(MV_PP2_REG_BASE + 0x55b0)
+#define MV_PP2_ISR_MISC_MASK_REG			(MV_PP2_REG_BASE + 0x55b4)
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_OVERRUN_DROP_REG(port)		(MV_PP2_REG_BASE + 0x7000 + 4 * (port))
+#define MV_PP2_CLS_DROP_REG(port)			(MV_PP2_REG_BASE + 0x7020 + 4 * (port))
+
+/******************************** Port Drop counters ppv2.0*****************************/
+
+#define MV_PP2_V0_POLICER_DROP_REG(plcr)		(MV_PP2_REG_BASE + 0x7040 + 4 * (plcr))
+#define MV_PP2_V0_TX_EARLY_DROP_REG(eport)		(MV_PP2_REG_BASE + 0x7080 + 4 * (eport))
+#define MV_PP2_V0_TX_DESC_DROP_REG(eport)		(MV_PP2_REG_BASE + 0x7100 + 4 * (eport))
+#define MV_PP2_V0_RX_EARLY_DROP_REG(rxq)		(MV_PP2_REG_BASE + 0x7200 + 4 * (rxq))
+#define MV_PP2_V0_RX_DESC_DROP_REG(rxq)			(MV_PP2_REG_BASE + 0x7400 + 4 * (rxq))
+
+/************************************ counters ppv2.1 **********************************/
+
+
+#define MV_PP2_V1_CNT_IDX_REG				(MV_PP2_REG_BASE + 0x7040)
+/* TX counters index */
+#define TX_CNT_IDX_TXP					3
+#define TX_CNT_IDX_TXQ					0
+
+#define TX_CNT_IDX(port, txp, txq)			((MV_PPV2_TXP_PHYS(port, txp) << 3) | (txq))
+
+#define MV_PP2_V1_TX_DESC_ENQ_REG			(MV_PP2_REG_BASE + 0x7100)
+#define MV_PP2_V1_TX_DESC_ENQ_TO_DRAM_REG		(MV_PP2_REG_BASE + 0x7104)
+#define MV_PP2_V1_TX_BUF_ENQ_TO_DRAM_REG		(MV_PP2_REG_BASE + 0x7108)
+#define MV_PP2_V1_TX_DESC_HWF_ENQ_REG			(MV_PP2_REG_BASE + 0x710c)
+#define MV_PP2_V1_TX_PKT_DQ_REG				(MV_PP2_REG_BASE + 0x7130)
+#define MV_PP2_V1_TX_PKT_FULLQ_DROP_REG			(MV_PP2_REG_BASE + 0x7200)
+#define MV_PP2_V1_TX_PKT_EARLY_DROP_REG			(MV_PP2_REG_BASE + 0x7204)
+#define MV_PP2_V1_TX_PKT_BM_DROP_REG			(MV_PP2_REG_BASE + 0x7208)
+#define MV_PP2_V1_TX_PKT_BM_MC_DROP_REG			(MV_PP2_REG_BASE + 0x720c)
+
+#define MV_PP2_V1_RX_PKT_FULLQ_DROP_REG			(MV_PP2_REG_BASE + 0x7220)
+#define MV_PP2_V1_RX_PKT_EARLY_DROP_REG			(MV_PP2_REG_BASE + 0x7224)
+#define MV_PP2_V1_RX_PKT_BM_DROP_REG			(MV_PP2_REG_BASE + 0x7228)
+#define MV_PP2_V1_RX_DESC_ENQ_REG			(MV_PP2_REG_BASE + 0x7120)
+
+#define MV_PP2_V1_OVERFLOW_MC_DROP_REG			(MV_PP2_REG_BASE + 0x770c)
+
+
+
+
+/*-------------------------------------------------------------------------------*/
+
+
+/************************** TX Scheduler Registers ******************************/
+/* Indirect access */
+#define MV_PP2_TXP_SCHED_PORT_INDEX_REG		(MV_PP2_REG_BASE + 0x8000)
+
+#define MV_PP2_TXP_SCHED_Q_CMD_REG		(MV_PP2_REG_BASE + 0x8004)
+
+#define MV_PP2_TXP_SCHED_ENQ_OFFSET		0
+#define MV_PP2_TXP_SCHED_ENQ_MASK		(0xFF << MV_PP2_TXP_SCHED_ENQ_OFFSET)
+#define MV_PP2_TXP_SCHED_DISQ_OFFSET		8
+#define MV_PP2_TXP_SCHED_DISQ_MASK		(0xFF << MV_PP2_TXP_SCHED_DISQ_OFFSET)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_TXP_SCHED_CMD_1_REG		(MV_PP2_REG_BASE + 0x8010)
+
+#define MV_PP2_TXP_SCHED_RESET_BIT		0
+#define MV_PP2_TXP_SCHED_RESET_MASK		(1 << MV_PP2_TXP_SCHED_RESET_BIT)
+
+#define MV_PP2_TXP_SCHED_PTP_SYNC_BIT		1
+#define MV_PP2_TXP_SCHED_PTP_SYNC_MASK		(1 << MV_PP2_TXP_SCHED_PTP_SYNC_BIT)
+
+#define MV_PP2_TXP_SCHED_EJP_ENABLE_BIT		2
+#define MV_PP2_TXP_SCHED_EJP_ENABLE_MASK	(1 << MV_PP2_TXP_SCHED_EJP_ENABLE_BIT)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Transmit Queue Fixed Priority Configuration (TQFPC) */
+#define MV_PP2_TXP_SCHED_FIXED_PRIO_REG		(MV_PP2_REG_BASE + 0x8014)
+
+#define MV_PP2_TXP_FIXED_PRIO_OFFS          	0
+#define MV_PP2_TXP_FIXED_PRIO_MASK          	(0xFF << MV_PP2_TX_FIXED_PRIO_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Basic Refill No of Clocks (BRC) */
+#define MV_PP2_TXP_SCHED_PERIOD_REG		(MV_PP2_REG_BASE + 0x8018)
+
+#define MV_PP2_TXP_REFILL_CLOCKS_OFFS       	0
+#define MV_PP2_TXP_REFILL_CLOCKS_MIN        	16
+#define MV_PP2_TXP_REFILL_CLOCKS_MASK       	(0xFFFF << MV_PP2_TXP_REFILL_CLOCKS_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Maximum Transmit Unit (PMTU) */
+#define MV_PP2_TXP_SCHED_MTU_REG		(MV_PP2_REG_BASE + 0x801c)
+#define MV_PP2_TXP_MTU_OFFS			0
+#define MV_PP2_TXP_MTU_MAX			0x7FFFF
+#define MV_PP2_TXP_MTU_ALL_MASK			(MV_PP2_TXP_MTU_MAX << MV_PP2_TXP_MTU_OFFS)
+#define MV_PP2_TXP_MTU_MASK(mtu)		((mtu) << MV_PP2_TXP_MTU_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Bucket Refill (PRefill) */
+#define MV_PP2_TXP_SCHED_REFILL_REG		(MV_PP2_REG_BASE + 0x8020)
+#define MV_PP2_TXP_REFILL_TOKENS_OFFS		0
+#define MV_PP2_TXP_REFILL_TOKENS_MAX		0x7FFFF
+#define MV_PP2_TXP_REFILL_TOKENS_ALL_MASK	(MV_PP2_TXP_REFILL_TOKENS_MAX << MV_PP2_TXP_REFILL_TOKENS_OFFS)
+#define MV_PP2_TXP_REFILL_TOKENS_MASK(val)	((val) << MV_PP2_TXP_REFILL_TOKENS_OFFS)
+
+#define MV_PP2_TXP_REFILL_PERIOD_OFFS       	20
+#define MV_PP2_TXP_REFILL_PERIOD_MAX        	0x3FF
+#define MV_PP2_TXP_REFILL_PERIOD_ALL_MASK   	(MV_PP2_TXP_REFILL_PERIOD_MAX << MV_PP2_TXP_REFILL_PERIOD_OFFS)
+#define MV_PP2_TXP_REFILL_PERIOD_MASK(val)  	((val) << MV_PP2_TXP_REFILL_PERIOD_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Maximum Token Bucket Size (PMTBS) */
+#define MV_PP2_TXP_SCHED_TOKEN_SIZE_REG		(MV_PP2_REG_BASE + 0x8024)
+#define MV_PP2_TXP_TOKEN_SIZE_MAX           	0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Port Token Bucket Counter (PMTBS) */
+#define MV_PP2_TXP_SCHED_TOKEN_CNTR_REG		(MV_PP2_REG_BASE + 0x8028)
+#define MV_PP2_TXP_TOKEN_CNTR_MAX		0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Bucket Refill (QRefill) */
+#define MV_PP2_TXQ_SCHED_REFILL_REG(q)		(MV_PP2_REG_BASE + 0x8040 + ((q) << 2))
+
+#define MV_PP2_TXQ_REFILL_TOKENS_OFFS		0
+#define MV_PP2_TXQ_REFILL_TOKENS_MAX		0x7FFFF
+#define MV_PP2_TXQ_REFILL_TOKENS_ALL_MASK	(MV_PP2_TXQ_REFILL_TOKENS_MAX << MV_PP2_TXQ_REFILL_TOKENS_OFFS)
+#define MV_PP2_TXQ_REFILL_TOKENS_MASK(val)	((val) << MV_PP2_TXQ_REFILL_TOKENS_OFFS)
+
+#define MV_PP2_TXQ_REFILL_PERIOD_OFFS		20
+#define MV_PP2_TXQ_REFILL_PERIOD_MAX		0x3FF
+#define MV_PP2_TXQ_REFILL_PERIOD_ALL_MASK	(MV_PP2_TXQ_REFILL_PERIOD_MAX << MV_PP2_TXQ_REFILL_PERIOD_OFFS)
+#define MV_PP2_TXQ_REFILL_PERIOD_MASK(val)	((val) << MV_PP2_TXQ_REFILL_PERIOD_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Maximum Token Bucket Size (QMTBS) */
+#define MV_PP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(MV_PP2_REG_BASE + 0x8060 + ((q) << 2))
+#define MV_PP2_TXQ_TOKEN_SIZE_MAX		0x7FFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Queue Token Bucket Counter (PMTBS) */
+#define MV_PP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(MV_PP2_REG_BASE + 0x8080 + ((q) << 2))
+#define MV_PP2_TXQ_TOKEN_CNTR_MAX		0xFFFFFFFF
+/*-----------------------------------------------------------------------------------------------*/
+
+/* Transmit Queue Arbiter Configuration (TQxAC) */
+#define MV_PP2_TXQ_SCHED_WRR_REG(q)		(MV_PP2_REG_BASE + 0x80A0 + ((q) << 2))
+
+#define MV_PP2_TXQ_WRR_WEIGHT_OFFS		0
+#define MV_PP2_TXQ_WRR_WEIGHT_MAX		0xFF
+#define MV_PP2_TXQ_WRR_WEIGHT_ALL_MASK		(MV_PP2_TXQ_WRR_WEIGHT_MAX << MV_PP2_TXQ_WRR_WEIGHT_OFFS)
+#define MV_PP2_TXQ_WRR_WEIGHT_MASK(weigth)	((weigth) << MV_PP2_TXQ_WRR_WEIGHT_OFFS)
+
+#define MV_PP2_TXQ_WRR_BYTE_COUNT_OFFS		8
+#define MV_PP2_TXQ_WRR_BYTE_COUNT_MASK		(0x3FFFF << MV_PP2_TXQ_WRR_BYTE_COUNT_OFFS)
+
+/************************** PPv2 HW defines ******************************/
+#define MV_PP2_RX_FIFO_PORT_DATA_SIZE		0x2000
+#define MV_PP2_RX_FIFO_PORT_ATTR_SIZE		0x80
+#define MV_PP2_RX_FIFO_PORT_MIN_PKT		0x80
+
+#define MV_PP2_MAX_PORTS			8 	/* Maximum number of ports supported by PPv2 HW */
+#define MV_PP2_MAX_RXQS_TOTAL			256	/* Maximum number of RXQs supported by PPv2 HW for all ports */
+
+#define MV_PP2_DESC_ALIGNED_SIZE		32
+#define MV_PP2_DESC_Q_ALIGN			512
+/************************** RX/TX Descriptor defines and inlines ******************************/
+/* RXQ */
+typedef struct pp2_rx_desc {
+	MV_U32 status;
+	MV_U16 parserInfo;
+	MV_U16 dataSize;
+	MV_U32 bufPhysAddr;
+	MV_U32 bufCookie;
+	MV_U16 gemPortIdPktColor;
+	MV_U16 csumL4;
+	MV_U8  bmQset;
+	MV_U8  reserved;
+	MV_U16 classifyInfo;
+	MV_U32 flowId;
+	MV_U32 reserved2;
+} PP2_RX_DESC;
+
+/* Bits of "status" field */
+#define PP2_RX_L3_OFFSET_OFFS			0
+#define PP2_RX_L3_OFFSET_MASK			(0x7F << PP2_RX_L3_OFFSET_OFFS)
+
+#define PP2_RX_IP_HLEN_OFFS			8
+#define PP2_RX_IP_HLEN_MASK			(0x1F << PP2_RX_IP_HLEN_OFFS)
+
+#define PP2_RX_ERR_CODE_OFFS			13
+#define PP2_RX_ERR_CODE_MASK			(3 << PP2_RX_ERR_CODE_OFFS)
+#define PP2_RX_ERR_CRC				(0 << PP2_RX_ERR_CODE_OFFS)
+#define PP2_RX_ERR_OVERRUN			(1 << PP2_RX_ERR_CODE_OFFS)
+#define PP2_RX_RESERVED				(2 << PP2_RX_ERR_CODE_OFFS)
+#define PP2_RX_ERR_RESOURCE			(3 << PP2_RX_ERR_CODE_OFFS)
+
+#define PP2_RX_ES_BIT				15
+#define PP2_RX_ES_MASK				(1 << PP2_RX_ES_BIT)
+
+#define PP2_RX_BM_POOL_ID_OFFS			16
+#define PP2_RX_BM_POOL_ALL_MASK			(0x7 << PP2_RX_BM_POOL_ID_OFFS)
+#define PP2_RX_BM_POOL_ID_MASK(pool)		((pool) << PP2_RX_BM_POOL_ID_OFFS)
+
+#define PP2_RX_HWF_SYNC_BIT			21
+#define PP2_RX_HWF_SYNC_MASK			(1 << PP2_RX_HWF_SYNC_BIT)
+
+#define PP2_RX_L4_CHK_OK_BIT			22
+#define PP2_RX_L4_CHK_OK_MASK			(1 << PP2_RX_L4_CHK_OK_BIT)
+
+#define PP2_RX_IP_FRAG_BIT			23
+#define PP2_RX_IP_FRAG_MASK			(1 << PP2_RX_IP_FRAG_BIT)
+
+#define PP2_RX_IP4_HEADER_ERR_BIT		24
+#define PP2_RX_IP4_HEADER_ERR_MASK		(1 << PP2_RX_IP4_HEADER_ERR_BIT)
+
+#define PP2_RX_L4_OFFS				25
+#define PP2_RX_L4_MASK				(7 << PP2_RX_L4_OFFS)
+/* Value 0 - N/A, 3-7 - User Defined */
+#define PP2_RX_L4_TCP				(1 << PP2_RX_L4_OFFS)
+#define PP2_RX_L4_UDP				(2 << PP2_RX_L4_OFFS)
+
+#define PP2_RX_L3_OFFS				28
+#define PP2_RX_L3_MASK				(7 << PP2_RX_L3_OFFS)
+/* Value 0 - N/A, 6-7 - User Defined */
+#define PP2_RX_L3_IP4				(1 << PP2_RX_L3_OFFS)
+#define PP2_RX_L3_IP4_OPT			(2 << PP2_RX_L3_OFFS)
+#define PP2_RX_L3_IP4_OTHER			(3 << PP2_RX_L3_OFFS)
+#define PP2_RX_L3_IP6				(4 << PP2_RX_L3_OFFS)
+#define PP2_RX_L3_IP6_EXT			(5 << PP2_RX_L3_OFFS)
+
+#define PP2_RX_BUF_HDR_BIT			31
+#define PP2_RX_BUF_HDR_MASK			(1 << PP2_RX_BUF_HDR_BIT)
+
+/* status field MACROs */
+#define PP2_RX_L3_IS_IP4(status)		(((status) & PP2_RX_L3_MASK) == PP2_RX_L3_IP4)
+#define PP2_RX_L3_IS_IP4_OPT(status)		(((status) & PP2_RX_L3_MASK) == PP2_RX_L3_IP4_OPT)
+#define PP2_RX_L3_IS_IP4_OTHER(status)		(((status) & PP2_RX_L3_MASK) == PP2_RX_L3_IP4_OTHER)
+#define PP2_RX_L3_IS_IP6(status)		(((status) & PP2_RX_L3_MASK) == PP2_RX_L3_IP6)
+#define PP2_RX_L3_IS_IP6_EXT(status)		(((status) & PP2_RX_L3_MASK) == PP2_RX_L3_IP6_EXT)
+#define PP2_RX_L4_IS_UDP(status)		(((status) & PP2_RX_L4_MASK) == PP2_RX_L4_UDP)
+#define PP2_RX_L4_IS_TCP(status)		(((status) & PP2_RX_L4_MASK) == PP2_RX_L4_TCP)
+#define PP2_RX_IP4_HDR_ERR(status)		((status) & PP2_RX_IP4_HEADER_ERR_MASK)
+#define PP2_RX_IP4_FRG(status)			((status) & PP2_RX_IP_FRAG_MASK)
+#define PP2_RX_L4_CHK_OK(status)		((status) & PP2_RX_L4_CHK_OK_MASK)
+
+/* Sub fields of "parserInfo" field */
+#define PP2_RX_LKP_ID_OFFS			0
+#define PP2_RX_LKP_ID_BITS			6
+#define PP2_RX_LKP_ID_MASK			(((1 << PP2_RX_LKP_ID_BITS) - 1) << PP2_RX_LKP_ID_OFFS)
+
+#define PP2_RX_CPU_CODE_OFFS			6
+#define PP2_RX_CPU_CODE_BITS			3
+#define PP2_RX_CPU_CODE_MASK			(((1 << PP2_RX_CPU_CODE_BITS) - 1) << PP2_RX_CPU_CODE_OFFS)
+
+#define PP2_RX_PPPOE_BIT			9
+#define PP2_RX_PPPOE_MASK			(1 << PP2_RX_PPPOE_BIT)
+
+#define PP2_RX_L3_CAST_OFFS			10
+#define PP2_RX_L3_CAST_BITS			2
+#define PP2_RX_L3_CAST_MASK			(((1 << PP2_RX_L3_CAST_BITS) - 1) << PP2_RX_L3_CAST_OFFS)
+
+#define PP2_RX_L2_CAST_OFFS			12
+#define PP2_RX_L2_CAST_BITS			2
+#define PP2_RX_L2_CAST_MASK			(((1 << PP2_RX_L2_CAST_BITS) - 1) << PP2_RX_L2_CAST_OFFS)
+
+#define PP2_RX_VLAN_INFO_OFFS			14
+#define PP2_RX_VLAN_INFO_BITS			2
+#define PP2_RX_VLAN_INFO_MASK			(((1 << PP2_RX_VLAN_INFO_BITS) - 1) << PP2_RX_VLAN_INFO_OFFS)
+
+/* Bits of "bmQset" field */
+#define PP2_RX_BUFF_QSET_NUM_OFFS		0
+#define PP2_RX_BUFF_QSET_NUM_MASK		(0x7f << PP2_RX_BUFF_QSET_NUM_OFFS)
+
+#define PP2_RX_BUFF_TYPE_OFFS			7
+#define PP2_RX_BUFF_TYPE_MASK			(0x1 << PP2_RX_BUFF_TYPE_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+/* TXQ */
+typedef struct pp2_tx_desc {
+	MV_U32 command;
+	MV_U8  pktOffset;
+	MV_U8  physTxq;
+	MV_U16 dataSize;
+	MV_U32 bufPhysAddr;
+	MV_U32 bufCookie;
+	MV_U32 hwCmd[3];
+	MV_U32 reserved;
+} PP2_TX_DESC;
+
+/* Bits of "command" field */
+#define PP2_TX_L3_OFFSET_OFFS			0
+#define PP2_TX_L3_OFFSET_MASK			(0x7F << PP2_TX_L3_OFFSET_OFFS)
+
+#define PP2_TX_BUF_RELEASE_MODE_BIT		7
+#define PP2_TX_BUF_RELEASE_MODE_MASK		(1 << PP2_TX_BUF_RELEASE_MODE_BIT)
+
+#define PP2_TX_IP_HLEN_OFFS			8
+#define PP2_TX_IP_HLEN_MASK			(0x1F << PP2_TX_IP_HLEN_OFFS)
+
+#define PP2_TX_L4_CSUM_OFFS			13
+#define PP2_TX_L4_CSUM_MASK			(3 << PP2_TX_L4_CSUM_OFFS)
+#define PP2_TX_L4_CSUM				(0 << PP2_TX_L4_CSUM_OFFS)
+#define PP2_TX_L4_CSUM_FRG			(1 << PP2_TX_L4_CSUM_OFFS)
+#define PP2_TX_L4_CSUM_NOT			(2 << PP2_TX_L4_CSUM_OFFS)
+
+#define PP2_TX_IP_CSUM_DISABLE_BIT		15
+#define PP2_TX_IP_CSUM_DISABLE_MASK		(1 << PP2_TX_IP_CSUM_DISABLE_BIT)
+
+/* 3 bits: 16..18 */
+#define PP2_TX_POOL_INDEX_OFFS			16
+#define PP2_TX_POOL_INDEX_MASK			(7 << PP2_TX_POOL_INDEX_OFFS)
+
+/* bit 19 - Reserved */
+
+#define PP2_TX_PKT_OFFS_9_BIT			20
+#define PP2_TX_PKT_OFFS_9_MASK			(1 << PP2_TX__PKT_OFFS_9_BIT)
+
+#define PP2_TX_HWF_SYNC_BIT			21
+#define PP2_TX_HWF_SYNC_MASK			(1 << PP2_TX_HWF_SYNC_BIT)
+
+#define PP2_TX_HWF_BIT				22
+#define PP2_TX_HWF_MASK				(1 << PP2_TX_HWF_BIT)
+
+#define PP2_TX_PADDING_DISABLE_BIT		23
+#define PP2_TX_PADDING_DISABLE_MASK		(1 << PP2_TX_PADDING_DISABLE_BIT)
+
+#define PP2_TX_L4_OFFS				24
+#define PP2_TX_L4_TCP				(0 << PP2_TX_L4_OFFS)
+#define PP2_TX_L4_UDP				(1 << PP2_TX_L4_OFFS)
+
+#define PP2_TX_L3_OFFS				26
+#define PP2_TX_L3_IP4				(0 << PP2_TX_L3_OFFS)
+#define PP2_TX_L3_IP6				(1 << PP2_TX_L3_OFFS)
+
+#define PP2_TX_L_DESC_BIT			28
+#define PP2_TX_L_DESC_MASK			(1 << PP2_TX_L_DESC_BIT)
+
+#define PP2_TX_F_DESC_BIT			29
+#define PP2_TX_F_DESC_MASK			(1 << PP2_TX_F_DESC_BIT)
+
+#define PP2_TX_DESC_FRMT_BIT			30
+#define PP2_TX_DESC_FRMT_MASK			(1 << PP2_TX_DESC_FRMT_BIT)
+#define PP2_TX_DESC_PER_BUF			(0 << PP2_TX_DESC_FRMT_BIT)
+#define PP2_TX_DESC_PER_PKT			(1 << PP2_TX_DESC_FRMT_BIT)
+
+#define PP2_TX_BUF_HDR_BIT			31
+#define PP2_TX_BUF_HDR_MASK			(1 << PP2_TX_BUF_HDR_BIT)
+
+/* Bits of "hwCmd[0]" field - offset 0x10 */
+#define PP2_TX_GEMPID_OFFS			0
+#define PP2_TX_GEMPID_BITS			12
+#define PP2_TX_GEMPID_ALL_MASK			(((1 << PP2_TX_GEMPID_BITS) - 1) << PP2_TX_GEMPID_OFFS)
+#define PP2_TX_GEMPID_MASK(gpid)		(((gpid) & PP2_TX_GEMPID_ALL_MASK) << PP2_TX_GEMPID_OFFS)
+
+#define PP2_TX_COLOR_OFFS			12
+#define PP2_TX_COLOR_ALL_MASK			(0x3 << PP2_TX_COLOR_OFFS)
+#define PP2_TX_COLOR_GREEN			0
+#define PP2_TX_COLOR_YELLOW			1
+#define PP2_TX_COLOR_MASK(col)			(((col) & PP2_TX_COLOR_ALL_MASK) << PP2_TX_COLOR_OFFS)
+
+#define PP2_TX_DSA_OFFS				14
+#define PP2_TX_DSA_ALL_MASK			(0x3 << PP2_TX_DSA_OFFS)
+#define PP2_TX_DSA_NONE				0
+#define PP2_TX_DSA_TAG				1
+#define PP2_TX_EDSA_TAG				2
+#define PP2_TX_DSA_MASK(dsa)			(((dsa) & PP2_TX_DSA_ALL_MASK) << PP2_TX_DSA_OFFS)
+
+#define PP2_TX_L4_CSUM_INIT_OFFS		16
+#define PP2_TX_L4_CSUM_INIT_MASK		(0xffff << PP2_TX_L4_CSUM_INIT_OFFS)
+
+/* Bits of "hwCmd[1]" field - offset 0x14 */
+
+#define PP2_TX_MOD_QSET_OFFS			0
+#define PP2_TX_MOD_QSET_BITS			7
+#define PP2_TX_MOD_QSET_MASK			(((1 << PP2_TX_MOD_QSET_BITS) - 1) << PP2_TX_MOD_QSET_OFFS)
+
+#define PP2_TX_MOD_GRNTD_BIT			7
+#define PP2_TX_MOD_GRNTD_MASK			(1 <<  PP2_TX_MOD_GRNTD_BIT)
+
+/* bits 8..15 are reserved */
+
+#define PP2_TX_MOD_DSCP_OFFS			16
+#define PP2_TX_MOD_DSCP_BITS			6
+#define PP2_TX_MOD_DSCP_MASK			(((1 << PP2_TX_MOD_DSCP_BITS) - 1) << PP2_TX_MOD_DSCP_OFFS)
+
+#define PP2_TX_MOD_PRIO_OFFS			22
+#define PP2_TX_MOD_PRIO_BITS			3
+#define PP2_TX_MOD_PRIO_MASK			(((1 << PP2_TX_MOD_PRIO_BITS) - 1) << PP2_TX_MOD_PRIO_OFFS)
+
+#define PP2_TX_MOD_DSCP_EN_BIT			25
+#define PP2_TX_MOD_DSCP_EN_MASK			(1 << PP2_TX_MOD_DSCP_EN_BIT)
+
+#define PP2_TX_MOD_PRIO_EN_BIT			26
+#define PP2_TX_MOD_PRIO_EN_MASK			(1 << PP2_TX_MOD_PRIO_EN_BIT)
+
+#define PP2_TX_MOD_GEMPID_EN_BIT		27
+#define PP2_TX_MOD_GEMPID_EN_MASK		(1 << PP2_TX_MOD_GEMPID_EN_BIT)
+
+/* Bits of "hwCmd[2]" field - offset 0x18 */
+#define PP2_TX_PME_DPTR_OFFS			0
+#define PP2_TX_PME_DPTR_ALL_MASK		(0xffff << PP2_TX_PME_DPTR_OFFS)
+#define PP2_TX_PME_DPTR_MASK(val)		(((val) & PP2_TX_PME_DPTR_ALL_MASK) << PP2_TX_PME_DPTR_OFFS)
+
+#define PP2_TX_PME_IPTR_OFFS			16
+#define PP2_TX_PME_IPTR_ALL_MASK		(0xff << PP2_TX_PME_IPTR_OFFS)
+#define PP2_TX_PME_IPTR_MASK(val)		(((val) & PP2_TX_PME_IPTR_ALL_MASK) << PP2_TX_PME_IPTR_OFFS)
+
+/* Bit 24 - HWF_IDB is for HWF usage only */
+
+#define PP2_TX_GEM_OEM_BIT			25
+#define PP2_TX_GEM_OEM_MASK			(1 << PP2_TX_GEM_OEM_BIT)
+
+/* Bit 26 - ERROR_SUM is for HWF usage only */
+
+#define PP2_TX_PON_FEC_BIT			27
+#define PP2_TX_PON_FEC_MASK			(1 << PP2_TX_PON_FEC_BIT)
+
+#define PP2_TX_CPU_MAP_OFFS			28
+#define PP2_TX_CPU_MAP_BITS			4
+#define PP2_TX_CPU_MAP_MASK			(((1 << PP2_TX_CPU_MAP_BITS) - 1) << PP2_TX_CPU_MAP_OFFS)
+
+
+/************************** Buffer Header defines ******************************/
+typedef struct pp2_buff_hdr {
+	MV_U32 nextBuffPhysAddr;
+	MV_U32 nextBuffVirtAddr;
+	MV_U16 byteCount;
+	MV_U16 info;
+	MV_U8  bmQset;
+} PP2_BUFF_HDR;
+
+
+
+/* info bits */
+#define PP2_BUFF_HDR_INFO_MC_ID_OFFS		0
+#define PP2_BUFF_HDR_INFO_MC_ID_MASK		(0xfff << PP2_BUFF_HDR_INFO_MC_ID_OFFS)
+#define PP2_BUFF_HDR_INFO_MC_ID(info)		((info & PP2_BUFF_HDR_INFO_MC_ID_MASK) >> PP2_BUFF_HDR_INFO_MC_ID_OFFS)
+
+#define PP2_BUFF_HDR_INFO_LAST_OFFS		12
+#define PP2_BUFF_HDR_INFO_LAST_MASK		(0x1 << PP2_BUFF_HDR_INFO_LAST_OFFS)
+#define PP2_BUFF_HDR_INFO_IS_LAST(info)		((info & PP2_BUFF_HDR_INFO_LAST_MASK) >> PP2_BUFF_HDR_INFO_LAST_OFFS)
+
+/* bmQset bits */
+#define PP2_BUFF_HDR_BM_QSET_NUM_OFFS		0
+#define PP2_BUFF_HDR_BM_QSET_NUM_MASK		(0x7f << PP2_BUFF_HDR_BM_QSET_NUM_OFFS)
+
+#define PP2_BUFF_HDR_BM_QSET_TYPE_OFFS		7
+#define PP2_BUFF_HDR_BM_QSET_TYPE_MASK		(0x1 << PP2_BUFF_HDR_BM_QSET_TYPE_OFFS)
+
+/************************** Ethernet misc ******************************/
+
+#define ETH_MAX_DECODE_WIN			6
+#define ETH_MAX_HIGH_ADDR_REMAP_WIN		4
+
+/**** Address decode registers ****/
+
+#define ETH_WIN_BASE_REG(win)			(MV_PP2_REG_BASE + 0x4000 + ((win) << 2))
+#define ETH_WIN_SIZE_REG(win)			(MV_PP2_REG_BASE + 0x4020 + ((win) << 2))
+#define ETH_WIN_REMAP_REG(win)			(MV_PP2_REG_BASE + 0x4040 + ((win) << 2))
+#define ETH_BASE_ADDR_ENABLE_REG		(MV_PP2_REG_BASE + 0x4060)
+
+/* The target associated with this window*/
+#define ETH_WIN_TARGET_OFFS			0
+#define ETH_WIN_TARGET_MASK			(0xf << ETH_WIN_TARGET_OFFS)
+/* The target attributes associated with window */
+#define ETH_WIN_ATTR_OFFS			8
+#define ETH_WIN_ATTR_MASK			(0xff << ETH_WIN_ATTR_OFFS)
+
+/* The Base address associated with window */
+#define ETH_WIN_BASE_OFFS			16
+#define ETH_WIN_BASE_MASK			(0xFFFF << ETH_WIN_BASE_OFFS)
+
+#define ETH_WIN_SIZE_OFFS			16
+#define ETH_WIN_SIZE_MASK			(0xFFFF << ETH_WIN_SIZE_OFFS)
+/*-----------------------------------------------------------------------------------------------*/
+
+#define ETH_TARGET_DEF_ADDR_REG			(MV_PP2_REG_BASE + 0x4064)
+#define ETH_TARGET_DEF_ID_REG			(MV_PP2_REG_BASE + 0x4068)
+/*-----------------------------------------------------------------------------------------------*/
+
+#endif /* __MV_PP2_GBE_REGS_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.c b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.c
new file mode 100644
index 000000000000..25ce5e38e520
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.c
@@ -0,0 +1,831 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvEthGmacApi.h"
+#include "gbe/mvPp2Gbe.h"
+
+void mvGmacPortEnable(int port)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_0_REG(port));
+	regVal |= ETH_GMAC_PORT_EN_MASK;
+	regVal |= ETH_GMAC_MIB_CNTR_EN_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_0_REG(port), regVal);
+}
+
+void mvGmacPortDisable(int port)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_0_REG(port));
+	regVal &= ~(ETH_GMAC_PORT_EN_MASK);
+	MV_REG_WRITE(ETH_GMAC_CTRL_0_REG(port), regVal);
+}
+
+void mvGmacPortMhSet(int port, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_4_REG(port));
+
+	if (enable)
+		regVal |= ETH_GMAC_MH_ENABLE_MASK;
+	else
+		regVal &= ~ETH_GMAC_MH_ENABLE_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_4_REG(port), regVal);
+}
+
+static void mvGmacPortRgmiiSet(int port, int enable)
+{
+	MV_U32  regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_2_REG(port));
+	if (enable)
+		regVal |= ETH_GMAC_PORT_RGMII_MASK;
+	else
+		regVal &= ~ETH_GMAC_PORT_RGMII_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_2_REG(port), regVal);
+}
+
+static void mvGmacPortSgmiiSet(int port, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_2_REG(port));
+
+	if (enable)
+		regVal |= (ETH_GMAC_PCS_ENABLE_MASK | ETH_GMAC_INBAND_AN_MASK);
+	else
+		regVal &= ~(ETH_GMAC_PCS_ENABLE_MASK | ETH_GMAC_INBAND_AN_MASK);
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_2_REG(port), regVal);
+}
+
+void mvGmacPortPeriodicXonSet(int port, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_1_REG(port));
+
+	if (enable)
+		regVal |= ETH_GMAC_PERIODIC_XON_EN_MASK;
+	else
+		regVal &= ~ETH_GMAC_PERIODIC_XON_EN_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_1_REG(port), regVal);
+}
+
+void mvGmacPortLbSet(int port, int isGmii, int isPcsEn)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_1_REG(port));
+
+	if (isGmii)
+		regVal |= ETH_GMAC_GMII_LB_EN_MASK;
+	else
+		regVal &= ~ETH_GMAC_GMII_LB_EN_MASK;
+
+	if (isPcsEn)
+		regVal |= ETH_GMAC_PCS_LB_EN_MASK;
+	else
+		regVal &= ~ETH_GMAC_PCS_LB_EN_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_1_REG(port), regVal);
+}
+
+void mvGmacPortResetSet(int port, MV_BOOL setReset)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_CTRL_2_REG(port));
+	regVal &= ~ETH_GMAC_PORT_RESET_MASK;
+
+	if (setReset == MV_TRUE)
+		regVal |= ETH_GMAC_PORT_RESET_MASK;
+	else
+		regVal &= ~ETH_GMAC_PORT_RESET_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_CTRL_2_REG(port), regVal);
+
+	if (setReset == MV_FALSE) {
+		while (MV_REG_READ(ETH_GMAC_CTRL_2_REG(port)) & ETH_GMAC_PORT_RESET_MASK)
+			;
+	}
+}
+
+void mvGmacPortPowerUp(int port, MV_BOOL isSgmii, MV_BOOL isRgmii)
+{
+	mvGmacPortSgmiiSet(port, isSgmii);
+	mvGmacPortRgmiiSet(port, isRgmii);
+	mvGmacPortPeriodicXonSet(port, MV_FALSE);
+	mvGmacPortResetSet(port, MV_FALSE);
+}
+
+void mvGmacDefaultsSet(int port)
+{
+	MV_U32 regVal;
+
+	/* Update TX FIFO MIN Threshold */
+	regVal = MV_REG_READ(GMAC_PORT_FIFO_CFG_1_REG(port));
+	regVal &= ~GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+	/* Minimal TX threshold must be less than minimal packet length */
+	regVal |= GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+	MV_REG_WRITE(GMAC_PORT_FIFO_CFG_1_REG(port), regVal);
+}
+
+void mvGmacPortPowerDown(int port)
+{
+	mvGmacPortDisable(port);
+	mvGmacMibCountersClear(port);
+	mvGmacPortResetSet(port, MV_TRUE);
+}
+
+MV_BOOL mvGmacPortIsLinkUp(int port)
+{
+	return (MV_REG_READ(ETH_GMAC_STATUS_REG(port)) & ETH_GMAC_LINK_UP_MASK);
+}
+
+MV_STATUS mvGmacLinkStatus(int port, MV_ETH_PORT_STATUS *pStatus)
+{
+	MV_U32 regVal;
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		pStatus->linkup = MV_TRUE;
+		pStatus->speed = MV_ETH_SPEED_1000;
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+		pStatus->txFc = MV_ETH_FC_DISABLE;
+		return MV_OK;
+	}
+
+	regVal = MV_REG_READ(ETH_GMAC_STATUS_REG(port));
+
+	if (regVal & ETH_GMAC_SPEED_1000_MASK)
+		pStatus->speed = MV_ETH_SPEED_1000;
+	else if (regVal & ETH_GMAC_SPEED_100_MASK)
+		pStatus->speed = MV_ETH_SPEED_100;
+	else
+		pStatus->speed = MV_ETH_SPEED_10;
+
+	if (regVal & ETH_GMAC_LINK_UP_MASK)
+		pStatus->linkup = MV_TRUE;
+	else
+		pStatus->linkup = MV_FALSE;
+
+	if (regVal & ETH_GMAC_FULL_DUPLEX_MASK)
+		pStatus->duplex = MV_ETH_DUPLEX_FULL;
+	else
+		pStatus->duplex = MV_ETH_DUPLEX_HALF;
+
+	if (regVal & ETH_TX_FLOW_CTRL_ACTIVE_MASK)
+		pStatus->txFc = MV_ETH_FC_ACTIVE;
+	else if (regVal & ETH_TX_FLOW_CTRL_ENABLE_MASK)
+		pStatus->txFc = MV_ETH_FC_ENABLE;
+	else
+		pStatus->txFc = MV_ETH_FC_DISABLE;
+
+	if (regVal & ETH_RX_FLOW_CTRL_ACTIVE_MASK)
+		pStatus->rxFc = MV_ETH_FC_ACTIVE;
+	else if (regVal & ETH_RX_FLOW_CTRL_ENABLE_MASK)
+		pStatus->rxFc = MV_ETH_FC_ENABLE;
+	else
+		pStatus->rxFc = MV_ETH_FC_DISABLE;
+
+	return MV_OK;
+}
+
+char *mvGmacSpeedStrGet(MV_ETH_PORT_SPEED speed)
+{
+	char *str;
+
+	switch (speed) {
+	case MV_ETH_SPEED_10:
+		str = "10 Mbps";
+		break;
+	case MV_ETH_SPEED_100:
+		str = "100 Mbps";
+		break;
+	case MV_ETH_SPEED_1000:
+		str = "1 Gbps";
+		break;
+	case MV_ETH_SPEED_2000:
+		str = "2 Gbps";
+		break;
+	case MV_ETH_SPEED_AN:
+		str = "AutoNeg";
+		break;
+	default:
+		str = "Unknown";
+	}
+	return str;
+}
+
+/******************************************************************************/
+/*                          Port Configuration functions                      */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvNetaMaxRxSizeSet -
+*
+* DESCRIPTION:
+*       Change maximum receive size of the port. This configuration will take place
+*       imidiately.
+*
+* INPUT:
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvGmacMaxRxSizeSet(int port, int maxRxSize)
+{
+	MV_U32		regVal;
+
+	if (MV_PP2_IS_PON_PORT(port))
+		return MV_ERROR;
+
+	regVal =  MV_REG_READ(ETH_GMAC_CTRL_0_REG(port));
+	regVal &= ~ETH_GMAC_MAX_RX_SIZE_MASK;
+	regVal |= (((maxRxSize - MV_ETH_MH_SIZE) / 2) << ETH_GMAC_MAX_RX_SIZE_OFFS);
+	MV_REG_WRITE(ETH_GMAC_CTRL_0_REG(port), regVal);
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvGmacForceLinkModeSet -
+*
+* DESCRIPTION:
+*       Sets "Force Link Pass" and "Do Not Force Link Fail" bits.
+* 	Note: This function should only be called when the port is disabled.
+*
+* INPUT:
+* 	int		portNo			- port number
+* 	MV_BOOL force_link_pass	- Force Link Pass
+* 	MV_BOOL force_link_fail - Force Link Failure
+*		0, 0 - normal state: detect link via PHY and connector
+*		1, 1 - prohibited state.
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvGmacForceLinkModeSet(int portNo, MV_BOOL force_link_up, MV_BOOL force_link_down)
+{
+	MV_U32 regVal;
+
+	/* Can't force link pass and link fail at the same time */
+	if ((force_link_up) && (force_link_down))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(ETH_GMAC_AN_CTRL_REG(portNo));
+
+	if (force_link_up)
+		regVal |= ETH_FORCE_LINK_PASS_MASK;
+	else
+		regVal &= ~ETH_FORCE_LINK_PASS_MASK;
+
+	if (force_link_down)
+		regVal |= ETH_FORCE_LINK_FAIL_MASK;
+	else
+		regVal &= ~ETH_FORCE_LINK_FAIL_MASK;
+
+	MV_REG_WRITE(ETH_GMAC_AN_CTRL_REG(portNo), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvGmacSpeedDuplexSet -
+*
+* DESCRIPTION:
+*       Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps.
+*	Sets port duplex to Auto Negotiation / Full / Half Duplex.
+*
+* INPUT:
+* 	int portNo - port number
+* 	MV_ETH_PORT_SPEED speed - port speed
+*	MV_ETH_PORT_DUPLEX duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvGmacSpeedDuplexSet(int portNo, MV_ETH_PORT_SPEED speed, MV_ETH_PORT_DUPLEX duplex)
+{
+	MV_U32 regVal;
+
+	/* Check validity */
+	if ((speed == MV_ETH_SPEED_1000) && (duplex == MV_ETH_DUPLEX_HALF))
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(ETH_GMAC_AN_CTRL_REG(portNo));
+
+	switch (speed) {
+	case MV_ETH_SPEED_AN:
+		regVal |= ETH_ENABLE_SPEED_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_SPEED_1000:
+		regVal &= ~ETH_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal |= ETH_SET_GMII_SPEED_1000_MASK;
+		/* the 100/10 bit doesn't matter in this case */
+		break;
+	case MV_ETH_SPEED_100:
+		regVal &= ~ETH_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal &= ~ETH_SET_GMII_SPEED_1000_MASK;
+		regVal |= ETH_SET_MII_SPEED_100_MASK;
+		break;
+	case MV_ETH_SPEED_10:
+		regVal &= ~ETH_ENABLE_SPEED_AUTO_NEG_MASK;
+		regVal &= ~ETH_SET_GMII_SPEED_1000_MASK;
+		regVal &= ~ETH_SET_MII_SPEED_100_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Speed value %d\n", speed);
+		return MV_BAD_PARAM;
+	}
+
+	switch (duplex) {
+	case MV_ETH_DUPLEX_AN:
+		regVal  |= ETH_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		/* the other bits don't matter in this case */
+		break;
+	case MV_ETH_DUPLEX_HALF:
+		regVal &= ~ETH_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		regVal &= ~ETH_SET_FULL_DUPLEX_MASK;
+		break;
+	case MV_ETH_DUPLEX_FULL:
+		regVal &= ~ETH_ENABLE_DUPLEX_AUTO_NEG_MASK;
+		regVal |= ETH_SET_FULL_DUPLEX_MASK;
+		break;
+	default:
+		mvOsPrintf("Unexpected Duplex value %d\n", duplex);
+		return MV_BAD_PARAM;
+	}
+
+	MV_REG_WRITE(ETH_GMAC_AN_CTRL_REG(portNo), regVal);
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvGmacSpeedDuplexGet -
+*
+* DESCRIPTION:
+*       Gets port speed
+*	Gets port duplex
+*
+* INPUT:
+* 	int portNo - port number
+* OUTPUT:
+* 	MV_ETH_PORT_SPEED *speed - port speed
+*	MV_ETH_PORT_DUPLEX *duplex - port duplex mode
+*
+* RETURN:
+*******************************************************************************/
+MV_STATUS mvGmacSpeedDuplexGet(int portNo, MV_ETH_PORT_SPEED *speed, MV_ETH_PORT_DUPLEX *duplex)
+{
+	MV_U32 regVal;
+
+	/* Check validity */
+	if (!speed || !duplex)
+		return MV_BAD_PARAM;
+
+	regVal = MV_REG_READ(ETH_GMAC_AN_CTRL_REG(portNo));
+	if (regVal & ETH_ENABLE_SPEED_AUTO_NEG_MASK)
+		*speed = MV_ETH_SPEED_AN;
+	else if (regVal & ETH_SET_GMII_SPEED_1000_MASK)
+		*speed = MV_ETH_SPEED_1000;
+	else if (regVal & ETH_SET_MII_SPEED_100_MASK)
+		*speed = MV_ETH_SPEED_100;
+	else
+		*speed = MV_ETH_SPEED_10;
+
+	if (regVal & ETH_ENABLE_DUPLEX_AUTO_NEG_MASK)
+		*duplex = MV_ETH_DUPLEX_AN;
+	else if (regVal & ETH_SET_FULL_DUPLEX_MASK)
+		*duplex = MV_ETH_DUPLEX_FULL;
+	else
+		*duplex = MV_ETH_DUPLEX_HALF;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvGmacFlowCtrlSet - Set Flow Control of the port.
+*
+* DESCRIPTION:
+*       This function configures the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*       MV_ETH_PORT_FC  flowControl - Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*       MV_BAD_VALUE    - Value flowControl parameters is not valid
+*
+*******************************************************************************/
+MV_STATUS mvGmacFlowCtrlSet(int port, MV_ETH_PORT_FC flowControl)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_AN_CTRL_REG(port));
+
+	switch (flowControl) {
+	case MV_ETH_FC_AN_NO:
+		regVal |= ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal &= ~ETH_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal &= ~ETH_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_AN_SYM:
+		regVal |= ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= ETH_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal &= ~ETH_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_AN_ASYM:
+		regVal |= ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= ETH_FLOW_CONTROL_ADVERTISE_MASK;
+		regVal |= ETH_FLOW_CONTROL_ASYMETRIC_MASK;
+		break;
+
+	case MV_ETH_FC_DISABLE:
+		regVal &= ~ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal &= ~ETH_SET_FLOW_CONTROL_MASK;
+		break;
+
+	case MV_ETH_FC_ENABLE:
+		regVal &= ~ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK;
+		regVal |= ETH_SET_FLOW_CONTROL_MASK;
+		break;
+
+	default:
+		mvOsPrintf("ethDrv: Unexpected FlowControl value %d\n", flowControl);
+		return MV_BAD_VALUE;
+	}
+
+	MV_REG_WRITE(ETH_GMAC_AN_CTRL_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvGmacFlowCtrlGet - Get Flow Control configuration of the port.
+*
+* DESCRIPTION:
+*       This function returns the port's Flow Control properties.
+*
+* INPUT:
+*       int				port		- Port number
+*
+* OUTPUT:
+*       MV_ETH_PORT_FC  *flowCntrl	- Flow control of the port.
+*
+* RETURN:   MV_STATUS
+*       MV_OK           - Success
+*       MV_OUT_OF_RANGE - Failed. Port is out of valid range
+*
+*******************************************************************************/
+MV_STATUS mvGmacFlowCtrlGet(int port, MV_ETH_PORT_FC *pFlowCntrl)
+{
+	MV_U32 regVal;
+
+	regVal = MV_REG_READ(ETH_GMAC_AN_CTRL_REG(port));
+
+	if (regVal & ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK) {
+		/* Auto negotiation is enabled */
+		if (regVal & ETH_FLOW_CONTROL_ADVERTISE_MASK) {
+			if (regVal & ETH_FLOW_CONTROL_ASYMETRIC_MASK)
+				*pFlowCntrl = MV_ETH_FC_AN_ASYM;
+			else
+				*pFlowCntrl = MV_ETH_FC_AN_SYM;
+		} else
+			*pFlowCntrl = MV_ETH_FC_AN_NO;
+	} else {
+		/* Auto negotiation is disabled */
+		if (regVal & ETH_SET_FLOW_CONTROL_MASK)
+			*pFlowCntrl = MV_ETH_FC_ENABLE;
+		else
+			*pFlowCntrl = MV_ETH_FC_DISABLE;
+	}
+	return MV_OK;
+}
+
+MV_STATUS mvGmacPortLinkSpeedFlowCtrl(int port, MV_ETH_PORT_SPEED speed,
+				     int forceLinkUp)
+{
+	if (forceLinkUp) {
+		if (mvGmacSpeedDuplexSet(port, speed, MV_ETH_DUPLEX_FULL)) {
+			mvOsPrintf("mvGmacSpeedDuplexSet failed\n");
+			return MV_FAIL;
+		}
+		if (mvGmacFlowCtrlSet(port, MV_ETH_FC_ENABLE)) {
+			mvOsPrintf("mvGmacFlowCtrlSet failed\n");
+			return MV_FAIL;
+		}
+		if (mvGmacForceLinkModeSet(port, 1, 0)) {
+			mvOsPrintf("mvGmacForceLinkModeSet failed\n");
+			return MV_FAIL;
+		}
+	} else {
+		if (mvGmacForceLinkModeSet(port, 0, 0)) {
+			mvOsPrintf("mvGmacForceLinkModeSet failed\n");
+			return MV_FAIL;
+		}
+		if (mvGmacSpeedDuplexSet(port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN)) {
+			mvOsPrintf("mvGmacSpeedDuplexSet failed\n");
+			return MV_FAIL;
+		}
+		if (mvGmacFlowCtrlSet(port, MV_ETH_FC_AN_SYM)) {
+			mvOsPrintf("mvGmacFlowCtrlSet failed\n");
+			return MV_FAIL;
+		}
+	}
+
+	return MV_OK;
+}
+
+/******************************************************************************/
+/*                         PHY Control Functions                              */
+/******************************************************************************/
+void mvGmacPhyAddrSet(int port, int phyAddr)
+{
+	unsigned int regData;
+
+	regData = MV_REG_READ(ETH_PHY_ADDR_REG);
+
+	regData &= ~ETH_PHY_ADDR_MASK(port);
+	regData |= ((phyAddr << ETH_PHY_ADDR_OFFS(port)) & ETH_PHY_ADDR_MASK(port));
+
+	MV_REG_WRITE(ETH_PHY_ADDR_REG, regData);
+
+	return;
+}
+
+int mvGmacPhyAddrGet(int port)
+{
+	unsigned int 	regData;
+
+	regData = MV_REG_READ(ETH_PHY_ADDR_REG);
+
+	return ((regData & ETH_PHY_ADDR_MASK(port)) >> ETH_PHY_ADDR_OFFS(port));
+}
+
+void mvGmacPhyPollEnable(int enable)
+{
+	unsigned int regData;
+
+	regData = MV_REG_READ(ETH_PHY_AN_CFG0_REG);
+	if (enable)
+		regData &= ~ETH_PHY_AN_CFG0_STOP_AN_SMI0_MASK;
+	else
+		regData |= ETH_PHY_AN_CFG0_STOP_AN_SMI0_MASK;
+
+	MV_REG_WRITE(ETH_PHY_AN_CFG0_REG, regData);
+}
+
+void mvGmacPrintReg(unsigned int reg_addr, char *reg_name)
+{
+	mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", reg_name, reg_addr, MV_REG_READ(reg_addr));
+}
+
+void mvGmacLmsRegs(void)
+{
+	mvOsPrintf("\n[GoP LMS registers]\n");
+
+	mvGmacPrintReg(ETH_PHY_ADDR_REG, "MV_GOP_LMS_PHY_ADDR_REG");
+	mvGmacPrintReg(ETH_PHY_AN_CFG0_REG, "MV_GOP_LMS_PHY_AN_CFG0_REG");
+
+	mvGmacPrintReg(ETH_PHY_AN_CFG0_REG, "MV_GOP_LMS_PHY_AN_CFG0_REG");
+	mvGmacPrintReg(ETH_PHY_AN_CFG0_REG, "MV_GOP_LMS_PHY_AN_CFG0_REG");
+	mvGmacPrintReg(ETH_ISR_SUM_CAUSE_REG, "ETH_ISR_SUM_CAUSE_REG");
+	mvGmacPrintReg(ETH_ISR_SUM_MASK_REG, "ETH_ISR_SUM_MASK_REG");
+}
+
+void mvGmacPortRegs(int port)
+{
+	if (mvPp2PortCheck(port))
+		return;
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		mvOsPrintf("Not supported for PON port\n");
+		return;
+	}
+
+	port = MV_PPV2_PORT_PHYS(port);
+
+	mvOsPrintf("\n[GoP MAC #%d registers]\n", port);
+
+	mvGmacPrintReg(ETH_GMAC_CTRL_0_REG(port), "MV_GMAC_CTRL_0_REG");
+	mvGmacPrintReg(ETH_GMAC_CTRL_1_REG(port), "MV_GMAC_CTRL_1_REG");
+	mvGmacPrintReg(ETH_GMAC_CTRL_2_REG(port), "MV_GMAC_CTRL_2_REG");
+	mvGmacPrintReg(ETH_GMAC_CTRL_3_REG(port), "MV_GMAC_CTRL_3_REG");
+	mvGmacPrintReg(ETH_GMAC_CTRL_4_REG(port), "MV_GMAC_CTRL_4_REG");
+
+	mvGmacPrintReg(ETH_GMAC_AN_CTRL_REG(port), "MV_GMAC_AN_CTRL_REG");
+	mvGmacPrintReg(ETH_GMAC_STATUS_REG(port), "MV_GMAC_STATUS_REG");
+
+	mvGmacPrintReg(GMAC_PORT_FIFO_CFG_0_REG(port), "MV_GMAC_PORT_FIFO_CFG_0_REG");
+	mvGmacPrintReg(GMAC_PORT_FIFO_CFG_1_REG(port), "MV_GMAC_PORT_FIFO_CFG_1_REG");
+
+	mvGmacPrintReg(ETH_PORT_ISR_CAUSE_REG(port), "MV_GMAC_ISR_CAUSE_REG");
+	mvGmacPrintReg(ETH_PORT_ISR_MASK_REG(port), "MV_GMAC_ISR_MASK_REG");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvGmacPrintReg(ETH_PORT_ISR_SUM_CAUSE_REG(port), "MV_GMAC_ISR_SUM_CAUSE_REG");
+	mvGmacPrintReg(ETH_PORT_ISR_SUM_MASK_REG(port), "MV_GMAC_ISR_SUM_MASK_REG");
+#endif
+
+}
+
+
+/******************************************************************************/
+/*                      MIB Counters functions                                */
+/******************************************************************************/
+
+/*******************************************************************************
+* mvGmacMibCounterRead - Read a MIB counter
+*
+* DESCRIPTION:
+*       This function reads a MIB counter of a specific ethernet port.
+*       NOTE - Read from ETH_MIB_GOOD_OCTETS_RECEIVED_LOW or
+*              ETH_MIB_GOOD_OCTETS_SENT_LOW counters will return 64 bits value,
+*              so pHigh32 pointer should not be NULL in this case.
+*
+* INPUT:
+*       port        - Ethernet Port number.
+*       mibOffset   - MIB counter offset.
+*
+* OUTPUT:
+*       MV_U32*       pHigh32 - pointer to place where 32 most significant bits
+*                             of the counter will be stored.
+*
+* RETURN:
+*       32 low sgnificant bits of MIB counter value.
+*
+*******************************************************************************/
+MV_U32 mvGmacMibCounterRead(int port, unsigned int mibOffset, MV_U32 *pHigh32)
+{
+	MV_U32 valLow32, valHigh32;
+
+	valLow32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(port) + mibOffset);
+
+	/* Implement FEr ETH. Erroneous Value when Reading the Upper 32-bits    */
+	/* of a 64-bit MIB Counter.                                             */
+	if ((mibOffset == ETH_MIB_GOOD_OCTETS_RECEIVED_LOW) || (mibOffset == ETH_MIB_GOOD_OCTETS_SENT_LOW)) {
+		valHigh32 = MV_REG_READ(ETH_MIB_COUNTERS_BASE(port) + mibOffset + 4);
+		if (pHigh32 != NULL)
+			*pHigh32 = valHigh32;
+	}
+	return valLow32;
+}
+
+/*******************************************************************************
+* mvGmacMibCountersClear - Clear all MIB counters
+*
+* DESCRIPTION:
+*       This function clears all MIB counters
+*
+* INPUT:
+*       port      - Ethernet Port number.
+*
+* RETURN:   void
+*
+*******************************************************************************/
+void mvGmacMibCountersClear(int port)
+{
+	int i;
+
+	if (MV_PP2_IS_PON_PORT(port))
+		return;
+
+	/* Perform dummy reads from MIB counters */
+	/* Read of last counter clear all counter were read before */
+	for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i <= ETH_MIB_LATE_COLLISION; i += 4)
+		MV_REG_READ((ETH_MIB_COUNTERS_BASE(port) + i));
+}
+
+static void mvGmacMibPrint(int port, MV_U32 offset, char *mib_name)
+{
+	MV_U32 regVaLo, regValHi = 0;
+
+	regVaLo = mvGmacMibCounterRead(port, offset, &regValHi);
+
+	if (!regValHi)
+		mvOsPrintf("  %-32s: 0x%02x = %u\n", mib_name, offset, regVaLo);
+	else
+		mvOsPrintf("  %-32s: 0x%02x = 0x%08x%08x\n", mib_name, offset, regValHi, regVaLo);
+}
+
+/* Print MIB counters of the Ethernet port */
+void mvGmacMibCountersShow(int port)
+{
+	if (mvPp2PortCheck(port))
+		return;
+
+	if (MV_PP2_IS_PON_PORT(port)) {
+		mvOsPrintf("%s: not supported for PON port\n", __func__);
+		return;
+	}
+
+	mvOsPrintf("\nMIBs: port=%d, base=0x%x\n", port, ETH_MIB_COUNTERS_BASE(port));
+
+	mvOsPrintf("\n[Rx]\n");
+	mvGmacMibPrint(port, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW, "GOOD_OCTETS_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_BAD_OCTETS_RECEIVED, "BAD_OCTETS_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_UNICAST_FRAMES_RECEIVED, "UNCAST_FRAMES_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_BROADCAST_FRAMES_RECEIVED, "BROADCAST_FRAMES_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_MULTICAST_FRAMES_RECEIVED, "MULTICAST_FRAMES_RECEIVED");
+
+	mvOsPrintf("\n[RMON]\n");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_64_OCTETS, "FRAMES_64_OCTETS");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_65_TO_127_OCTETS, "FRAMES_65_TO_127_OCTETS");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_128_TO_255_OCTETS, "FRAMES_128_TO_255_OCTETS");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_256_TO_511_OCTETS, "FRAMES_256_TO_511_OCTETS");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_512_TO_1023_OCTETS, "FRAMES_512_TO_1023_OCTETS");
+	mvGmacMibPrint(port, ETH_MIB_FRAMES_1024_TO_MAX_OCTETS, "FRAMES_1024_TO_MAX_OCTETS");
+
+	mvOsPrintf("\n[Tx]\n");
+	mvGmacMibPrint(port, ETH_MIB_GOOD_OCTETS_SENT_LOW, "GOOD_OCTETS_SENT");
+	mvGmacMibPrint(port, ETH_MIB_UNICAST_FRAMES_SENT, "UNICAST_FRAMES_SENT");
+	mvGmacMibPrint(port, ETH_MIB_MULTICAST_FRAMES_SENT, "MULTICAST_FRAMES_SENT");
+	mvGmacMibPrint(port, ETH_MIB_BROADCAST_FRAMES_SENT, "BROADCAST_FRAMES_SENT");
+	mvGmacMibPrint(port, ETH_MIB_CRC_ERRORS_SENT, "CRC_ERRORS_SENT");
+
+	mvOsPrintf("\n[FC control]\n");
+	mvGmacMibPrint(port, ETH_MIB_FC_RECEIVED, "FC_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_FC_SENT, "FC_SENT");
+
+	mvOsPrintf("\n[Errors]\n");
+	mvGmacMibPrint(port, ETH_MIB_RX_FIFO_OVERRUN, "ETH_MIB_RX_FIFO_OVERRUN");
+	mvGmacMibPrint(port, ETH_MIB_UNDERSIZE_RECEIVED, "UNDERSIZE_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_FRAGMENTS_RECEIVED, "FRAGMENTS_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_OVERSIZE_RECEIVED, "OVERSIZE_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_JABBER_RECEIVED, "JABBER_RECEIVED");
+	mvGmacMibPrint(port, ETH_MIB_MAC_RECEIVE_ERROR, "MAC_RECEIVE_ERROR");
+	mvGmacMibPrint(port, ETH_MIB_BAD_CRC_EVENT, "BAD_CRC_EVENT");
+	mvGmacMibPrint(port, ETH_MIB_COLLISION, "COLLISION");
+	/* This counter must be read last. Read it clear all the counters */
+	mvGmacMibPrint(port, ETH_MIB_LATE_COLLISION, "LATE_COLLISION");
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.h b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.h
new file mode 100644
index 000000000000..36638779cfa3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacApi.h
@@ -0,0 +1,196 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#ifndef __mvEthGmac_h__
+#define __mvEthGmac_h__
+
+#include "mvEthGmacRegs.h"
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+
+typedef enum {
+	MV_ETH_SPEED_AN,
+	MV_ETH_SPEED_10,
+	MV_ETH_SPEED_100,
+	MV_ETH_SPEED_1000,
+	MV_ETH_SPEED_2000,
+} MV_ETH_PORT_SPEED;
+
+typedef enum {
+	MV_ETH_DUPLEX_AN,
+	MV_ETH_DUPLEX_HALF,
+	MV_ETH_DUPLEX_FULL
+} MV_ETH_PORT_DUPLEX;
+
+typedef enum {
+	MV_ETH_FC_AN_NO,
+	MV_ETH_FC_AN_SYM,
+	MV_ETH_FC_AN_ASYM,
+	MV_ETH_FC_DISABLE,
+	MV_ETH_FC_ENABLE,
+	MV_ETH_FC_ACTIVE
+
+} MV_ETH_PORT_FC;
+
+typedef struct eth_link_status {
+	MV_BOOL			linkup;
+	MV_ETH_PORT_SPEED	speed;
+	MV_ETH_PORT_DUPLEX	duplex;
+	MV_ETH_PORT_FC		rxFc;
+	MV_ETH_PORT_FC		txFc;
+
+} MV_ETH_PORT_STATUS;
+
+/***************************************************************************/
+/*                          Inline functions                               */
+/***************************************************************************/
+static INLINE void mvGmacIsrSummaryMask(MV_VOID)
+{
+	MV_REG_WRITE(ETH_ISR_SUM_MASK_REG, 0);
+}
+
+static INLINE void mvGmacIsrSummaryUnmask(MV_VOID)
+{
+	MV_REG_WRITE(ETH_ISR_SUM_MASK_REG, ETH_ISR_SUM_PORT0_MASK |
+		     ETH_ISR_SUM_PORT1_MASK | 0x20 /* magic bit */);
+}
+
+static INLINE MV_U32 mvGmacIsrSummaryCauseGet(MV_VOID)
+{
+	return MV_REG_READ(ETH_ISR_SUM_CAUSE_REG);
+}
+
+static INLINE MV_U32 mvGmacPortIsrCauseGet(int port)
+{
+	return MV_REG_READ(ETH_PORT_ISR_CAUSE_REG(port));
+}
+
+static INLINE MV_VOID mvGmacPortIsrMask(int port)
+{
+	MV_REG_WRITE(ETH_PORT_ISR_MASK_REG(port), 0);
+}
+
+static INLINE MV_VOID mvGmacPortIsrUnmask(int port)
+{
+	MV_REG_WRITE(ETH_PORT_ISR_MASK_REG(port), ETH_PORT_LINK_CHANGE_MASK);
+}
+
+static INLINE MV_VOID mvGmacPortSumIsrMask(int port)
+{
+#ifdef CONFIG_MV_ETH_PP2_1
+	MV_REG_WRITE(ETH_PORT_ISR_SUM_MASK_REG(port), 0);
+#endif
+}
+
+static INLINE MV_VOID mvGmacPortSumIsrUnmask(int port)
+{
+#ifdef CONFIG_MV_ETH_PP2_1
+	MV_REG_WRITE(ETH_PORT_ISR_SUM_MASK_REG(port), ETH_PORT_ISR_SUM_INTERN_MASK);
+#endif
+}
+
+
+void mvGmacPhyPollEnable(int enable);
+void mvGmacDefaultsSet(int port);
+void mvGmacPortEnable(int port);
+void mvGmacPortDisable(int port);
+void mvGmacPortMhSet(int port, int enable);
+void mvGmacPortPeriodicXonSet(int port, int enable);
+MV_BOOL mvGmacPortIsLinkUp(int port);
+MV_STATUS mvGmacLinkStatus(int port, MV_ETH_PORT_STATUS *pStatus);
+void mvGmacPortLbSet(int port, int isGmii, int isPcsEn);
+void mvGmacPortResetSet(int port, MV_BOOL setReset);
+void mvGmacPortPowerUp(int port, MV_BOOL isSgmii, MV_BOOL isRgmii);
+void mvGmacPortPowerDown(int port);
+char *mvGmacSpeedStrGet(MV_ETH_PORT_SPEED speed);
+
+/******************************************************************************/
+/*                          Port Configuration functions                      */
+/******************************************************************************/
+MV_STATUS mvGmacMaxRxSizeSet(int port, int maxRxSize);
+MV_STATUS mvGmacForceLinkModeSet(int portNo, MV_BOOL force_link_up, MV_BOOL force_link_down);
+MV_STATUS mvGmacSpeedDuplexSet(int portNo, MV_ETH_PORT_SPEED speed, MV_ETH_PORT_DUPLEX duplex);
+MV_STATUS mvGmacSpeedDuplexGet(int portNo, MV_ETH_PORT_SPEED *speed, MV_ETH_PORT_DUPLEX *duplex);
+MV_STATUS mvGmacFlowCtrlSet(int port, MV_ETH_PORT_FC flowControl);
+MV_STATUS mvGmacFlowCtrlGet(int port, MV_ETH_PORT_FC *pFlowCntrl);
+MV_STATUS mvGmacPortLinkSpeedFlowCtrl(int port, MV_ETH_PORT_SPEED speed,
+				     int forceLinkUp);
+
+/******************************************************************************/
+/*                         PHY Control Functions                              */
+/******************************************************************************/
+void mvGmacPhyAddrSet(int port, int phyAddr);
+int mvGmacPhyAddrGet(int port);
+
+/****************************************/
+/*        MIB counters		       	*/
+/****************************************/
+MV_U32 mvGmacMibCounterRead(int port, unsigned int mibOffset, MV_U32 *pHigh32);
+void mvGmacMibCountersClear(int port);
+void mvGmacMibCountersShow(int port);
+void mvGmacPortRegs(int port);
+void mvGmacLmsRegs(void);
+
+#endif /* __mvGmacGmac_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacRegs.h b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacRegs.h
new file mode 100644
index 000000000000..c01ae5a735d8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/gmac/mvEthGmacRegs.h
@@ -0,0 +1,347 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvEthGmacRegs_h__
+#define __mvEthGmacRegs_h__
+
+#ifdef CONFIG_ARCH_MVEBU
+#include "mvNetConfig.h"
+#else
+#include "mvSysEthConfig.h"
+#endif
+
+#define ETH_MNG_EXTENDED_GLOBAL_CTRL_REG   (GOP_MNG_REG_BASE + 0x5c)
+
+#define ETH_REG_BASE(port)                 GOP_REG_BASE(port)
+
+/****************************************/
+/*        MAC Unit Registers            */
+/****************************************/
+
+/**** Tri-Speed Ports MAC and CPU Port MAC Configuration Sub-Unit Registers ****/
+#define ETH_GMAC_CTRL_0_REG(p)             (ETH_REG_BASE(p) + 0x0)
+
+#define ETH_GMAC_PORT_EN_BIT               0
+#define ETH_GMAC_PORT_EN_MASK              (1 << ETH_GMAC_PORT_EN_BIT)
+
+#define ETH_GMAC_PORT_TYPE_BIT             1
+#define ETH_GMAC_PORT_TYPE_MASK            (1 << ETH_GMAC_PORT_TYPE_BIT)
+#define ETH_GMAC_PORT_TYPE_SGMII           (0 << ETH_GMAC_PORT_TYPE_BIT)
+#define ETH_GMAC_PORT_TYPE_1000X           (1 << ETH_GMAC_PORT_TYPE_BIT)
+
+#define ETH_GMAC_MAX_RX_SIZE_OFFS          2
+#define ETH_GMAC_MAX_RX_SIZE_MASK          (0x1FFF << ETH_GMAC_MAX_RX_SIZE_OFFS)
+
+#define ETH_GMAC_MIB_CNTR_EN_BIT           15
+#define ETH_GMAC_MIB_CNTR_EN_MASK          (1 << ETH_GMAC_MIB_CNTR_EN_BIT)
+/*-------------------------------------------------------------------------------*/
+
+#define ETH_GMAC_CTRL_1_REG(p)             (ETH_REG_BASE(p) + 0x4)
+
+#define ETH_GMAC_PERIODIC_XON_EN_BIT       1
+#define ETH_GMAC_PERIODIC_XON_EN_MASK      (0x1 << ETH_GMAC_PERIODIC_XON_EN_BIT)
+
+#define ETH_GMAC_GMII_LB_EN_BIT            5
+#define ETH_GMAC_GMII_LB_EN_MASK           (1 << ETH_GMAC_GMII_LB_EN_BIT)
+
+#define ETH_GMAC_PCS_LB_EN_BIT             6
+#define ETH_GMAC_PCS_LB_EN_MASK            (1 << ETH_GMAC_PCS_LB_EN_BIT)
+
+#define ETH_GMAC_SA_LOW_OFFS               7
+#define ETH_GMAC_SA_LOW_MASK               (0xFF << ETH_GMAC_SA_LOW_OFFS)
+/*-------------------------------------------------------------------------------*/
+
+#define ETH_GMAC_CTRL_2_REG(p)             (ETH_REG_BASE(p) + 0x8)
+
+#define ETH_GMAC_INBAND_AN_BIT            0
+#define ETH_GMAC_INBAND_AN_MASK           (1 << ETH_GMAC_INBAND_AN_BIT)
+
+#define ETH_GMAC_PCS_ENABLE_BIT            3
+#define ETH_GMAC_PCS_ENABLE_MASK           (1 << ETH_GMAC_PCS_ENABLE_BIT)
+
+#define ETH_GMAC_PORT_RGMII_BIT            4
+#define ETH_GMAC_PORT_RGMII_MASK           (1 << ETH_GMAC_PORT_RGMII_BIT)
+
+#define ETH_GMAC_PORT_RESET_BIT            6
+#define ETH_GMAC_PORT_RESET_MASK           (1 << ETH_GMAC_PORT_RESET_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port Auto-Negotiation Configuration Sub-Unit Registers ****/
+#define ETH_GMAC_AN_CTRL_REG(p)                (ETH_REG_BASE(p) + 0xC)
+
+#define ETH_FORCE_LINK_FAIL_BIT                0
+#define ETH_FORCE_LINK_FAIL_MASK               (1 << ETH_FORCE_LINK_FAIL_BIT)
+
+#define ETH_FORCE_LINK_PASS_BIT                1
+#define ETH_FORCE_LINK_PASS_MASK               (1 << ETH_FORCE_LINK_PASS_BIT)
+
+#define ETH_SET_MII_SPEED_100_BIT              5
+#define ETH_SET_MII_SPEED_100_MASK             (1 << ETH_SET_MII_SPEED_100_BIT)
+
+#define ETH_SET_GMII_SPEED_1000_BIT            6
+#define ETH_SET_GMII_SPEED_1000_MASK           (1 << ETH_SET_GMII_SPEED_1000_BIT)
+
+#define ETH_ENABLE_SPEED_AUTO_NEG_BIT          7
+#define ETH_ENABLE_SPEED_AUTO_NEG_MASK         (1 << ETH_ENABLE_SPEED_AUTO_NEG_BIT)
+
+#define ETH_ENABLE_FLOW_CTRL_AUTO_NEG_BIT      11
+#define ETH_ENABLE_FLOW_CTRL_AUTO_NEG_MASK     (1 << ETH_ENABLE_FLOW_CTRL_AUTO_NEG_BIT)
+
+/* TODO: I keep this bit even though it's not listed in Cider */
+#define ETH_SET_FLOW_CONTROL_BIT               8
+#define ETH_SET_FLOW_CONTROL_MASK              (1 << ETH_SET_FLOW_CONTROL_BIT)
+
+#define ETH_FLOW_CONTROL_ADVERTISE_BIT         9
+#define ETH_FLOW_CONTROL_ADVERTISE_MASK        (1 << ETH_FLOW_CONTROL_ADVERTISE_BIT)
+
+#define ETH_FLOW_CONTROL_ASYMETRIC_BIT         10
+#define ETH_FLOW_CONTROL_ASYMETRIC_MASK        (1 << ETH_FLOW_CONTROL_ASYMETRIC_BIT)
+
+#define ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_BIT   11
+#define ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_MASK  (1 << ETH_ENABLE_FLOW_CONTROL_AUTO_NEG_BIT)
+
+#define ETH_SET_FULL_DUPLEX_BIT                12
+#define ETH_SET_FULL_DUPLEX_MASK               (1 << ETH_SET_FULL_DUPLEX_BIT)
+
+#define ETH_ENABLE_DUPLEX_AUTO_NEG_BIT         13
+#define ETH_ENABLE_DUPLEX_AUTO_NEG_MASK        (1 << ETH_ENABLE_DUPLEX_AUTO_NEG_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port Status Sub-Unit Registers ****/
+#define ETH_GMAC_STATUS_REG(p)             (ETH_REG_BASE(p) + 0x10)
+
+#define ETH_GMAC_LINK_UP_BIT               0
+#define ETH_GMAC_LINK_UP_MASK              (1 << ETH_GMAC_LINK_UP_BIT)
+
+#define ETH_GMAC_SPEED_1000_BIT            1
+#define ETH_GMAC_SPEED_1000_MASK           (1 << ETH_GMAC_SPEED_1000_BIT)
+
+#define ETH_GMAC_SPEED_100_BIT             2
+#define ETH_GMAC_SPEED_100_MASK            (1 << ETH_GMAC_SPEED_100_BIT)
+
+#define ETH_GMAC_FULL_DUPLEX_BIT           3
+#define ETH_GMAC_FULL_DUPLEX_MASK          (1 << ETH_GMAC_FULL_DUPLEX_BIT)
+
+#define ETH_RX_FLOW_CTRL_ENABLE_BIT        4
+#define ETH_RX_FLOW_CTRL_ENABLE_MASK       (1 << ETH_RX_FLOW_CTRL_ENABLE_BIT)
+
+#define ETH_TX_FLOW_CTRL_ENABLE_BIT        5
+#define ETH_TX_FLOW_CTRL_ENABLE_MASK       (1 << ETH_TX_FLOW_CTRL_ENABLE_BIT)
+
+#define ETH_RX_FLOW_CTRL_ACTIVE_BIT        6
+#define ETH_RX_FLOW_CTRL_ACTIVE_MASK       (1 << ETH_RX_FLOW_CTRL_ACTIVE_BIT)
+
+#define ETH_TX_FLOW_CTRL_ACTIVE_BIT        7
+#define ETH_TX_FLOW_CTRL_ACTIVE_MASK       (1 << ETH_TX_FLOW_CTRL_ACTIVE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port Internal Sub-Unit Registers ****/
+#define GMAC_PORT_FIFO_CFG_0_REG(p)        (ETH_REG_BASE(p) + 0x18)
+
+#define GMAC_PORT_FIFO_CFG_1_REG(p)        (ETH_REG_BASE(p) + 0x1C)
+
+#define GMAC_RX_FIFO_MAX_TH_OFFS           0
+
+#define GMAC_TX_FIFO_MIN_TH_OFFS           6
+#define GMAC_TX_FIFO_MIN_TH_ALL_MASK       (0x7F << GMAC_TX_FIFO_MIN_TH_OFFS)
+#define GMAC_TX_FIFO_MIN_TH_MASK(val)      (((val) << GMAC_TX_FIFO_MIN_TH_OFFS) & GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port Interrupt Sub-Unit Registers ****/
+#define ETH_PORT_ISR_CAUSE_REG(p)		(ETH_REG_BASE(p) + 0x20)
+
+#define ETH_PORT_ISR_SUM_BIT			0
+#define ETH_PORT_ISR_SUM_MASK			(1 << ETH_PORT_ISR_SUM_BIT)
+
+#define ETH_PORT_LINK_CHANGE_BIT		1
+#define ETH_PORT_LINK_CHANGE_MASK		(1 << ETH_PORT_LINK_CHANGE_BIT)
+
+#define ETH_PORT_ISR_MASK_REG(p)		(ETH_REG_BASE(p) + 0x24)
+
+#define ETH_PORT_ISR_SUM_CAUSE_REG(p)		(ETH_REG_BASE(p) + 0xA0)
+#define ETH_PORT_ISR_SUM_MASK_REG(p)		(ETH_REG_BASE(p) + 0xA4)
+
+#define ETH_PORT_ISR_SUM_INTERN_BIT		0x1
+#define ETH_PORT_ISR_SUM_INTERN_MASK		(1 << ETH_PORT_ISR_SUM_INTERN_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port MIB Counters Control register ****/
+#define ETH_GMAC_MIB_CTRL_REG(p)		(ETH_REG_BASE(p) + 0x44)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port MAC Control register #3 ****/
+#define ETH_GMAC_CTRL_3_REG(p)			(ETH_REG_BASE(p) + 0x48)
+/*-------------------------------------------------------------------------------*/
+
+/**** CCFC Port Speed Timer register ****/
+#define ETH_GMAC_SPEED_TIMER_REG(p)		(ETH_REG_BASE(p) + 0x58)
+/*-------------------------------------------------------------------------------*/
+
+/**** Port MAC Control register #4 ****/
+#define ETH_GMAC_CTRL_4_REG(p)			(ETH_REG_BASE(p) + 0x90)
+
+#define ETH_GMAC_MH_ENABLE_BIT			9
+#define ETH_GMAC_MH_ENABLE_MASK			(1 << ETH_GMAC_MH_ENABLE_BIT)
+/*-------------------------------------------------------------------------------*/
+
+/****************************************/
+/*        LMS Unit Registers       	*/
+/****************************************/
+
+/*
+ * PHY Address Ports 0 through 5 Register
+ */
+#define ETH_PHY_ADDR_REG		(LMS_REG_BASE + 0x30)
+#define ETH_PHY_ADDR_OFFS(port)		(port * 5)
+#define ETH_PHY_ADDR_MASK(port)		(0x1F << ETH_PHY_ADDR_OFFS(port))
+
+/*------------------------------------------------------------------------------
+ * PHY Auto-Negotiation Configuration Register0
+ */
+#define ETH_PHY_AN_CFG0_REG			(LMS_REG_BASE + 0x34)
+#define ETH_PHY_AN_CFG0_STOP_AN_SMI0_BIT	7
+#define ETH_PHY_AN_CFG0_STOP_AN_SMI0_MASK	(1 << ETH_PHY_AN_CFG0_STOP_AN_SMI0_BIT)
+#define ETH_PHY_AN_EN_OFFS(port)		(port)
+#define ETH_PHY_AN_EN_MASK(port)		(1 << ETH_PHY_AN_EN_OFFS(port))
+
+/*------------------------------------------------------------------------------
+ * Interrupt Summary Cause Register
+ */
+#define ETH_ISR_SUM_CAUSE_REG		(LMS_REG_BASE + 0x10)
+#define ETH_ISR_SUM_LMS_BIT		0
+#define ETH_ISR_SUM_LMS_MASK		(1 << ETH_ISR_SUM_LMS_BIT)
+
+#define ETH_ISR_SUM_LMS0_BIT		1
+#define ETH_ISR_SUM_LMS0_MASK		(1 << ETH_ISR_SUM_LMS0_BIT)
+
+#define ETH_ISR_SUM_LMS1_BIT		2
+#define ETH_ISR_SUM_LMS1_MASK		(1 << ETH_ISR_SUM_LMS1_BIT)
+
+#define ETH_ISR_SUM_LMS2_BIT		3
+#define ETH_ISR_SUM_LMS2_MASK		(1 << ETH_ISR_SUM_LMS2_BIT)
+
+#define ETH_ISR_SUM_LMS3_BIT		4
+#define ETH_ISR_SUM_LMS3_MASK		(1 << ETH_ISR_SUM_LMS3_BIT)
+
+#define ETH_ISR_SUM_PORTS_BIT		16
+#define ETH_ISR_SUM_PORTS_MASK		(1 << ETH_ISR_SUM_PORTS_BIT)
+
+#define ETH_ISR_SUM_PORT0_BIT		17
+#define ETH_ISR_SUM_PORT0_MASK		(1 << ETH_ISR_SUM_PORT0_BIT)
+
+#define ETH_ISR_SUM_PORT1_BIT		18
+#define ETH_ISR_SUM_PORT1_MASK		(1 << ETH_ISR_SUM_PORT1_BIT)
+
+#define ETH_ISR_SUM_PORT2_BIT		19
+#define ETH_ISR_SUM_PORT2_MASK		(1 << ETH_ISR_SUM_PORT2_BIT)
+
+#define ETH_ISR_SUM_PORT_MASK(p)	(1 << (ETH_ISR_SUM_PORT0_BIT + p))
+
+#define ETH_ISR_SUM_MASK_REG		(LMS_REG_BASE + 0x220c)
+
+/*------------------------------------------------------------------------------
+ * SMI Management Register
+ */
+#define ETH_SMI_REG(port)		(LMS_REG_BASE + 0x54)
+
+/****************************************/
+/*        MIB counters		       	*/
+/****************************************/
+#define ETH_MIB_PORT_OFFSET(port)	    ((port >> 1) * 0x400 + (port) * 0x400)
+#define ETH_MIB_COUNTERS_BASE(port)    (MIB_COUNTERS_REG_BASE + ETH_MIB_PORT_OFFSET(port))
+
+/* MIB Counters register definitions */
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW    0x0
+#define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH   0x4
+#define ETH_MIB_BAD_OCTETS_RECEIVED         0x8
+/* Reserved                                 0xc */
+#define ETH_MIB_UNICAST_FRAMES_RECEIVED     0x10
+#define ETH_MIB_CRC_ERRORS_SENT             0x14
+#define ETH_MIB_BROADCAST_FRAMES_RECEIVED   0x18
+#define ETH_MIB_MULTICAST_FRAMES_RECEIVED   0x1c
+#define ETH_MIB_FRAMES_64_OCTETS            0x20
+#define ETH_MIB_FRAMES_65_TO_127_OCTETS     0x24
+#define ETH_MIB_FRAMES_128_TO_255_OCTETS    0x28
+#define ETH_MIB_FRAMES_256_TO_511_OCTETS    0x2c
+#define ETH_MIB_FRAMES_512_TO_1023_OCTETS   0x30
+#define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS   0x34
+#define ETH_MIB_GOOD_OCTETS_SENT_LOW        0x38
+#define ETH_MIB_GOOD_OCTETS_SENT_HIGH       0x3c
+#define ETH_MIB_UNICAST_FRAMES_SENT         0x40
+/* Reserved                                 0x44 */
+#define ETH_MIB_MULTICAST_FRAMES_SENT       0x48
+#define ETH_MIB_BROADCAST_FRAMES_SENT       0x4c
+/* Reserved                                 0x50 */
+#define ETH_MIB_FC_SENT                     0x54
+#define ETH_MIB_FC_RECEIVED                 0x58
+#define ETH_MIB_RX_FIFO_OVERRUN             0x5c
+#define ETH_MIB_UNDERSIZE_RECEIVED          0x60
+#define ETH_MIB_FRAGMENTS_RECEIVED          0x64
+#define ETH_MIB_OVERSIZE_RECEIVED           0x68
+#define ETH_MIB_JABBER_RECEIVED             0x6c
+#define ETH_MIB_MAC_RECEIVE_ERROR           0x70
+#define ETH_MIB_BAD_CRC_EVENT               0x74
+#define ETH_MIB_COLLISION                   0x78
+#define ETH_MIB_LATE_COLLISION              0x7c
+#endif /* mvEthGmacRegs */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.c
new file mode 100644
index 000000000000..b7f89ee83df7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.c
@@ -0,0 +1,407 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"  /* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "common/mvPp2Common.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvPp2PlcrHw.h"
+
+
+void        mvPp2PlcrHwRegs(void)
+{
+	int    i;
+	MV_U32 regVal;
+
+	mvOsPrintf("\n[PLCR registers: %d policers]\n", MV_PP2_PLCR_NUM);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2PrintReg(MV_PP2_PLCR_MODE_REG,	"MV_PP2_PLCR_MODE_REG");
+#else
+	mvPp2PrintReg(MV_PP2_PLCR_ENABLE_REG,	"MV_PP2_PLCR_ENABLE_REG");
+#endif
+	mvPp2PrintReg(MV_PP2_PLCR_BASE_PERIOD_REG,	"MV_PP2_PLCR_BASE_PERIOD_REG");
+	mvPp2PrintReg(MV_PP2_PLCR_MIN_PKT_LEN_REG,	"MV_PP2_PLCR_MIN_PKT_LEN_REG");
+	mvPp2PrintReg(MV_PP2_PLCR_EDROP_EN_REG,		"MV_PP2_PLCR_EDROP_EN_REG");
+
+	for (i = 0; i < MV_PP2_PLCR_NUM; i++) {
+		mvOsPrintf("\n[Policer %d registers]\n", i);
+
+		mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, i);
+		mvPp2PrintReg(MV_PP2_PLCR_COMMIT_TOKENS_REG, "MV_PP2_PLCR_COMMIT_TOKENS_REG");
+		mvPp2PrintReg(MV_PP2_PLCR_EXCESS_TOKENS_REG, "MV_PP2_PLCR_EXCESS_TOKENS_REG");
+		mvPp2PrintReg(MV_PP2_PLCR_BUCKET_SIZE_REG,   "MV_PP2_PLCR_BUCKET_SIZE_REG");
+		mvPp2PrintReg(MV_PP2_PLCR_TOKEN_CFG_REG,     "MV_PP2_PLCR_TOKEN_CFG_REG");
+	}
+
+	mvOsPrintf("\nEarly Drop Thresholds for SW and HW forwarding\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	for (i = 0; i < MV_PP2_V1_PLCR_EDROP_THRESH_NUM; i++) {
+		mvPp2PrintReg2(MV_PP2_V1_PLCR_EDROP_CPU_TR_REG(i),   "MV_PP2_V1_PLCR_EDROP_CPU_TR_REG", i);
+		mvPp2PrintReg2(MV_PP2_V1_PLCR_EDROP_HWF_TR_REG(i),   "MV_PP2_V1_PLCR_EDROP_HWF_TR_REG", i);
+	}
+#else
+
+	for (i = 0; i < MV_PP2_V0_PLCR_EDROP_THRESH_NUM; i++) {
+		mvPp2PrintReg2(MV_PP2_V0_PLCR_EDROP_CPU_TR_REG(i),   "MV_PP2_V0_PLCR_EDROP_CPU_TR_REG", i);
+		mvPp2PrintReg2(MV_PP2_V0_PLCR_EDROP_HWF_TR_REG(i),   "MV_PP2_V0_PLCR_EDROP_HWF_TR_REG", i);
+	}
+#endif
+	mvOsPrintf("\nPer RXQ: Non zero early drop thresholds\n");
+	for (i = 0; i < MV_PP2_RXQ_TOTAL_NUM; i++) {
+		mvPp2WrReg(MV_PP2_PLCR_EDROP_RXQ_REG, i);
+		regVal = mvPp2RdReg(MV_PP2_PLCR_EDROP_RXQ_TR_REG);
+		if (regVal != 0)
+			mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", "MV_PP2_PLCR_EDROP_RXQ_TR_REG", MV_PP2_PLCR_EDROP_RXQ_TR_REG, regVal);
+	}
+	mvOsPrintf("\nPer TXQ: Non zero Early Drop Thresholds\n");
+	for (i = 0; i < MV_PP2_TXQ_TOTAL_NUM; i++) {
+		mvPp2WrReg(MV_PP2_PLCR_EDROP_TXQ_REG, i);
+		regVal = mvPp2RdReg(MV_PP2_PLCR_EDROP_TXQ_TR_REG);
+		if (regVal != 0)
+			mvOsPrintf("  %-32s: 0x%x = 0x%08x\n", "MV_PP2_PLCR_EDROP_TXQ_TR_REG", MV_PP2_PLCR_EDROP_TXQ_TR_REG, regVal);
+	}
+}
+
+static void        mvPp2PlcrHwDumpTitle(void)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_BASE_PERIOD_REG);
+	mvOsPrintf("PLCR status: %d policers, period=%d (%s), ",
+				MV_PP2_PLCR_NUM, regVal & MV_PP2_PLCR_BASE_PERIOD_ALL_MASK,
+				regVal & MV_PP2_PLCR_ADD_TOKENS_EN_MASK ? "En" : "Dis");
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_EDROP_EN_REG);
+	mvOsPrintf("edrop=%s, ", regVal & MV_PP2_PLCR_EDROP_EN_MASK ? "En" : "Dis");
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_MIN_PKT_LEN_REG);
+	mvOsPrintf("min_pkt=%d bytes\n", (regVal & MV_PP2_PLCR_MIN_PKT_LEN_ALL_MASK) >> MV_PP2_PLCR_MIN_PKT_LEN_OFFS);
+
+	mvOsPrintf("PLCR: enable period  unit   type  tokens  color  c_size  e_size  c_tokens  e_tokens\n");
+}
+
+static void        mvPp2PlcrHwDump(int plcr)
+{
+	int units, type, tokens, color, enable;
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+	mvOsPrintf("%3d:  ", plcr);
+
+#ifndef CONFIG_MV_ETH_PP2_1
+	enable = mvPp2RdReg(MV_PP2_PLCR_ENABLE_REG);
+	mvOsPrintf("%4s", MV_BIT_CHECK(enable, plcr) ? "Yes" : "No");
+#endif
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_TOKEN_CFG_REG);
+	units = regVal & MV_PP2_PLCR_TOKEN_UNIT_MASK;
+	color = regVal & MV_PP2_PLCR_COLOR_MODE_MASK;
+	type = (regVal & MV_PP2_PLCR_TOKEN_TYPE_ALL_MASK) >> MV_PP2_PLCR_TOKEN_TYPE_OFFS;
+	tokens =  (regVal & MV_PP2_PLCR_TOKEN_VALUE_ALL_MASK) >> MV_PP2_PLCR_TOKEN_VALUE_OFFS;
+#ifdef CONFIG_MV_ETH_PP2_1
+	enable = regVal & MV_PP2_PLCR_ENABLE_MASK;
+	mvOsPrintf("%4s", enable ? "Yes" : "No");
+#endif
+	mvOsPrintf("   %-5s  %2d   %5d", units ? "pkts" : "bytes", type, tokens);
+	mvOsPrintf("  %-5s", color ? "aware" : "blind");
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_BASE_PERIOD_REG);
+	mvOsPrintf("  %6d", regVal & MV_PP2_PLCR_BASE_PERIOD_ALL_MASK);
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_BUCKET_SIZE_REG);
+	mvOsPrintf("    %04x    %04x",
+			(regVal & MV_PP2_PLCR_COMMIT_SIZE_ALL_MASK) >> MV_PP2_PLCR_COMMIT_SIZE_OFFS,
+			(regVal & MV_PP2_PLCR_EXCESS_SIZE_ALL_MASK) >> MV_PP2_PLCR_EXCESS_SIZE_OFFS);
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_COMMIT_TOKENS_REG);
+	mvOsPrintf("    %08x", regVal);
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_EXCESS_TOKENS_REG);
+	mvOsPrintf("  %08x", regVal);
+
+	mvOsPrintf("\n");
+}
+
+void        mvPp2PlcrHwDumpAll(void)
+{
+	int i;
+
+	mvPp2PlcrHwDumpTitle();
+	for (i = 0; i < MV_PP2_PLCR_NUM; i++)
+		mvPp2PlcrHwDump(i);
+}
+
+void        mvPp2PlcrHwDumpSingle(int plcr)
+{
+	mvPp2PlcrHwDumpTitle();
+	mvPp2PlcrHwDump(plcr);
+}
+
+MV_STATUS   mvPp2PlcrHwBaseRateGenEnable(int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_BASE_PERIOD_REG);
+	if (enable)
+		regVal |= MV_PP2_PLCR_ADD_TOKENS_EN_MASK;
+	else
+		regVal &= ~MV_PP2_PLCR_ADD_TOKENS_EN_MASK;
+
+	mvPp2WrReg(MV_PP2_PLCR_BASE_PERIOD_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwBasePeriodSet(int period)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_BASE_PERIOD_REG);
+	regVal &= ~MV_PP2_PLCR_BASE_PERIOD_ALL_MASK;
+	regVal |= MV_PP2_PLCR_BASE_PERIOD_MASK(period);
+	mvPp2WrReg(MV_PP2_PLCR_BASE_PERIOD_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwMode(int mode)
+{
+	mvPp2WrReg(MV_PP2_PLCR_MODE_REG, mode);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwEnable(int plcr, int enable)
+{
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_TOKEN_CFG_REG);
+	if (enable)
+		regVal |= MV_PP2_PLCR_ENABLE_MASK;
+	else
+		regVal &= ~MV_PP2_PLCR_ENABLE_MASK;
+
+	mvPp2WrReg(MV_PP2_PLCR_TOKEN_CFG_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwMinPktLen(int bytes)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_MIN_PKT_LEN_REG);
+	regVal &= ~MV_PP2_PLCR_MIN_PKT_LEN_ALL_MASK;
+	regVal |= MV_PP2_PLCR_MIN_PKT_LEN_MASK(bytes);
+	mvPp2WrReg(MV_PP2_PLCR_MIN_PKT_LEN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwEarlyDropSet(int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_PLCR_EDROP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_PLCR_EDROP_EN_MASK;
+	else
+		regVal &= ~MV_PP2_PLCR_EDROP_EN_MASK;
+
+	mvPp2WrReg(MV_PP2_PLCR_EDROP_EN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwTokenConfig(int plcr, int unit, int type)
+{
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+	regVal = mvPp2RdReg(MV_PP2_PLCR_TOKEN_CFG_REG);
+	if (unit)
+		regVal |= MV_PP2_PLCR_TOKEN_UNIT_MASK;
+	else
+		regVal &= ~MV_PP2_PLCR_TOKEN_UNIT_MASK;
+
+	regVal &= ~MV_PP2_PLCR_TOKEN_TYPE_ALL_MASK;
+	regVal |= MV_PP2_PLCR_TOKEN_TYPE_MASK(type);
+
+	mvPp2WrReg(MV_PP2_PLCR_TOKEN_CFG_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwTokenValue(int plcr, int value)
+{
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+	regVal = mvPp2RdReg(MV_PP2_PLCR_TOKEN_CFG_REG);
+
+	regVal &= ~MV_PP2_PLCR_TOKEN_VALUE_ALL_MASK;
+	regVal |= MV_PP2_PLCR_TOKEN_VALUE_MASK(value);
+	mvPp2WrReg(MV_PP2_PLCR_TOKEN_CFG_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwColorModeSet(int plcr, int enable)
+{
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+	regVal = mvPp2RdReg(MV_PP2_PLCR_TOKEN_CFG_REG);
+	if (enable)
+		regVal |= MV_PP2_PLCR_COLOR_MODE_MASK;
+	else
+		regVal &= ~MV_PP2_PLCR_COLOR_MODE_MASK;
+
+	mvPp2WrReg(MV_PP2_PLCR_TOKEN_CFG_REG, regVal);
+
+	return MV_OK;
+}
+
+
+MV_STATUS   mvPp2PlcrHwBucketSizeSet(int plcr, int commit, int excess)
+{
+	MV_U32 regVal;
+
+	mvPp2WrReg(MV_PP2_PLCR_TABLE_INDEX_REG, plcr);
+	regVal = MV_PP2_PLCR_EXCESS_SIZE_MASK(excess) | MV_PP2_PLCR_COMMIT_SIZE_MASK(commit);
+	mvPp2WrReg(MV_PP2_PLCR_BUCKET_SIZE_REG, regVal);
+
+	return MV_OK;
+}
+/*ppv2.1 policer early drop threshold mechanism changed*/
+MV_STATUS   mvPp2V0PlcrHwCpuThreshSet(int idx, int threshold)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_V0_PLCR_EDROP_CPU_TR_REG(idx));
+	regVal &= ~MV_PP2_V0_PLCR_EDROP_TR_ALL_MASK(idx);
+	regVal |= MV_PP2_V0_PLCR_EDROP_TR_MASK(idx, threshold);
+	mvPp2WrReg(MV_PP2_V0_PLCR_EDROP_CPU_TR_REG(idx), regVal);
+
+	return MV_OK;
+}
+/*ppv2.1 policer early drop threshold mechanism changed*/
+MV_STATUS   mvPp2V1PlcrHwCpuThreshSet(int idx, int threshold)
+{
+	mvPp2WrReg(MV_PP2_V1_PLCR_EDROP_CPU_TR_REG(idx), threshold);
+
+	return MV_OK;
+}
+
+/*ppv2.1 policer early drop threshold mechanism changed*/
+MV_STATUS   mvPp2V0PlcrHwHwfThreshSet(int idx, int threshold)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_V0_PLCR_EDROP_HWF_TR_REG(idx));
+	regVal &= ~MV_PP2_V0_PLCR_EDROP_TR_ALL_MASK(idx);
+	regVal |= MV_PP2_V0_PLCR_EDROP_TR_MASK(idx, threshold);
+	mvPp2WrReg(MV_PP2_V0_PLCR_EDROP_HWF_TR_REG(idx), regVal);
+
+	return MV_OK;
+}
+
+/*ppv2.1 policer early drop threshold mechanism changed*/
+MV_STATUS   mvPp2V1PlcrHwHwfThreshSet(int idx, int threshold)
+{
+	mvPp2WrReg(MV_PP2_V1_PLCR_EDROP_HWF_TR_REG(idx), threshold);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwRxqThreshSet(int rxq, int idx)
+{
+	mvPp2WrReg(MV_PP2_PLCR_EDROP_RXQ_REG, rxq);
+	mvPp2WrReg(MV_PP2_PLCR_EDROP_RXQ_TR_REG, idx);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PlcrHwTxqThreshSet(int txq, int idx)
+{
+	mvPp2WrReg(MV_PP2_PLCR_EDROP_TXQ_REG, txq);
+	mvPp2WrReg(MV_PP2_PLCR_EDROP_TXQ_TR_REG, idx);
+
+	return MV_OK;
+}
+
+void mvPp2V1PlcrTbCntDump(int plcr)
+{
+	mvPp2PrintReg2(MV_PP2_V1_PLCR_PKT_GREEN_REG(plcr), "MV_PP2_V1_PLCR_PKT_GREEN_REG", plcr);
+	mvPp2PrintReg2(MV_PP2_V1_PLCR_PKT_YELLOW_REG(plcr), "MV_PP2_V1_PLCR_PKT_YELLOW_REG", plcr);
+	mvPp2PrintReg2(MV_PP2_V1_PLCR_PKT_RED_REG(plcr), "MV_PP2_V1_PLCR_PKT_RED_REG", plcr);
+
+}
+
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.h
new file mode 100644
index 000000000000..3bd653685785
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/plcr/mvPp2PlcrHw.h
@@ -0,0 +1,240 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvPp2PlcrHw_h__
+#define __mvPp2PlcrHw_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+
+#ifdef CONFIG_MV_ETH_PP2_1
+#define MV_PP2_PLCR_NUM		48
+#else
+#define MV_PP2_PLCR_NUM		16
+#endif
+
+/*********************************** RX Policer Registers *******************/
+/* exist only in ppv2.0 */
+#define MV_PP2_PLCR_ENABLE_REG			(MV_PP2_REG_BASE + 0x1300)
+
+#define MV_PP2_PLCR_EN_OFFS			0
+#define MV_PP2_PLCR_EN_ALL_MASK			(((1 << MV_PP2_PLCR_NUM) - 1) << MV_PP2_PLCR_EN_OFFS)
+#define MV_PP2_PLCR_EN_MASK(plcr)		((1 << (plcr)) << MV_PP2_PLCR_EN_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_BASE_PERIOD_REG		(MV_PP2_REG_BASE + 0x1304)
+
+#define MV_PP2_PLCR_BASE_PERIOD_OFFS		0
+#define MV_PP2_PLCR_BASE_PERIOD_BITS		16
+#define MV_PP2_PLCR_BASE_PERIOD_ALL_MASK	\
+		(((1 << MV_PP2_PLCR_BASE_PERIOD_BITS) - 1) << MV_PP2_PLCR_BASE_PERIOD_OFFS)
+#define MV_PP2_PLCR_BASE_PERIOD_MASK(p)		\
+		(((p) << MV_PP2_PLCR_BASE_PERIOD_OFFS) & MV_PP2_PLCR_BASE_PERIOD_ALL_MASK)
+
+#define MV_PP2_PLCR_ADD_TOKENS_EN_BIT		16
+#define MV_PP2_PLCR_ADD_TOKENS_EN_MASK		(1 << MV_PP2_PLCR_ADD_TOKENS_EN_BIT)
+/*---------------------------------------------------------------------------------------------*/
+#define MV_PP2_PLCR_MODE_REG			(MV_PP2_REG_BASE + 0x1308)
+#define MV_PP2_PLCR_MODE_BITS			(3)
+#define MV_PP2_PLCR_MODE_MASK			(((1 << MV_PP2_PLCR_MODE_BITS) - 1) << 0)
+
+/*---------------------------------------------------------------------------------------------*/
+/* exist only in ppv2.1*/
+#define MV_PP2_PLCR_TABLE_INDEX_REG		(MV_PP2_REG_BASE + 0x130c)
+#define MV_PP2_PLCR_COMMIT_TOKENS_REG		(MV_PP2_REG_BASE + 0x1310)
+#define MV_PP2_PLCR_EXCESS_TOKENS_REG		(MV_PP2_REG_BASE + 0x1314)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_BUCKET_SIZE_REG		(MV_PP2_REG_BASE + 0x1318)
+
+#define MV_PP2_PLCR_COMMIT_SIZE_OFFS		0
+#define MV_PP2_PLCR_COMMIT_SIZE_BITS		16
+#define MV_PP2_PLCR_COMMIT_SIZE_ALL_MASK	\
+		(((1 << MV_PP2_PLCR_COMMIT_SIZE_BITS) - 1) << MV_PP2_PLCR_COMMIT_SIZE_OFFS)
+#define MV_PP2_PLCR_COMMIT_SIZE_MASK(size)	\
+		(((size) << MV_PP2_PLCR_COMMIT_SIZE_OFFS) & MV_PP2_PLCR_COMMIT_SIZE_ALL_MASK)
+
+#define MV_PP2_PLCR_EXCESS_SIZE_OFFS		16
+#define MV_PP2_PLCR_EXCESS_SIZE_BITS		16
+#define MV_PP2_PLCR_EXCESS_SIZE_ALL_MASK	\
+		(((1 << MV_PP2_PLCR_EXCESS_SIZE_BITS) - 1) << MV_PP2_PLCR_EXCESS_SIZE_OFFS)
+#define MV_PP2_PLCR_EXCESS_SIZE_MASK(size)	\
+		(((size) << MV_PP2_PLCR_EXCESS_SIZE_OFFS) & MV_PP2_PLCR_EXCESS_SIZE_ALL_MASK)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_TOKEN_CFG_REG		(MV_PP2_REG_BASE + 0x131c)
+
+#define MV_PP2_PLCR_TOKEN_VALUE_OFFS		0
+#define MV_PP2_PLCR_TOKEN_VALUE_BITS		10
+#define MV_PP2_PLCR_TOKEN_VALUE_ALL_MASK	\
+		(((1 << MV_PP2_PLCR_TOKEN_VALUE_BITS) - 1) << MV_PP2_PLCR_TOKEN_VALUE_OFFS)
+#define MV_PP2_PLCR_TOKEN_VALUE_MASK(val)	\
+		(((val) << MV_PP2_PLCR_TOKEN_VALUE_OFFS) & MV_PP2_PLCR_TOKEN_VALUE_ALL_MASK)
+
+#define MV_PP2_PLCR_TOKEN_TYPE_OFFS		12
+#define MV_PP2_PLCR_TOKEN_TYPE_BITS		3
+#define MV_PP2_PLCR_TOKEN_TYPE_ALL_MASK		\
+		(((1 << MV_PP2_PLCR_TOKEN_TYPE_BITS) - 1) << MV_PP2_PLCR_TOKEN_TYPE_OFFS)
+#define MV_PP2_PLCR_TOKEN_TYPE_MASK(type)	\
+		(((type) << MV_PP2_PLCR_TOKEN_TYPE_OFFS) & MV_PP2_PLCR_TOKEN_TYPE_ALL_MASK)
+
+#define MV_PP2_PLCR_TOKEN_UNIT_BIT		31
+#define MV_PP2_PLCR_TOKEN_UNIT_MASK		(1 << MV_PP2_PLCR_TOKEN_UNIT_BIT)
+#define MV_PP2_PLCR_TOKEN_UNIT_BYTES		(0 << MV_PP2_PLCR_TOKEN_UNIT_BIT)
+#define MV_PP2_PLCR_TOKEN_UNIT_PKTS		(1 << MV_PP2_PLCR_TOKEN_UNIT_BIT)
+
+#define MV_PP2_PLCR_COLOR_MODE_BIT		30
+#define MV_PP2_PLCR_COLOR_MODE_MASK		(1 << MV_PP2_PLCR_COLOR_MODE_BIT)
+#define MV_PP2_PLCR_COLOR_MODE_BLIND		(0 << MV_PP2_PLCR_COLOR_MODE_BIT)
+#define MV_PP2_PLCR_COLOR_MODE_AWARE		(1 << MV_PP2_PLCR_COLOR_MODE_BIT)
+
+#define MV_PP2_PLCR_ENABLE_BIT			29
+#define MV_PP2_PLCR_ENABLE_MASK			(1 << MV_PP2_PLCR_ENABLE_BIT)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_MIN_PKT_LEN_REG		(MV_PP2_REG_BASE + 0x1320)
+
+#define MV_PP2_PLCR_MIN_PKT_LEN_OFFS		0
+#define MV_PP2_PLCR_MIN_PKT_LEN_BITS		8
+#define MV_PP2_PLCR_MIN_PKT_LEN_ALL_MASK	\
+		(((1 << MV_PP2_PLCR_MIN_PKT_LEN_BITS) - 1) << MV_PP2_PLCR_MIN_PKT_LEN_OFFS)
+#define MV_PP2_PLCR_MIN_PKT_LEN_MASK(len)	\
+		(((len) << MV_PP2_PLCR_MIN_PKT_LEN_OFFS) & MV_PP2_PLCR_MIN_PKT_LEN_ALL_MASK)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_EDROP_EN_REG		(MV_PP2_REG_BASE + 0x1330)
+
+#define MV_PP2_PLCR_EDROP_EN_BIT		0
+#define MV_PP2_PLCR_EDROP_EN_MASK		(1 << MV_PP2_PLCR_EDROP_EN_BIT)
+/*---------------------------------------------------------------------------------------------*/
+/*ppv2.1 policer early drop threshold mechanism changed*/
+#define MV_PP2_V0_PLCR_EDROP_THRESH_NUM		4
+
+#define MV_PP2_V0_PLCR_EDROP_TR_OFFS(i)		((i % 2) ? 16 : 0)
+#define MV_PP2_V0_PLCR_EDROP_TR_BITS		14
+#define MV_PP2_V0_PLCR_EDROP_TR_ALL_MASK(i)	\
+		(((1 << MV_PP2_V0_PLCR_EDROP_TR_BITS) - 1) << MV_PP2_V0_PLCR_EDROP_TR_OFFS(i))
+#define MV_PP2_V0_PLCR_EDROP_TR_MASK(i, tr)	\
+		(((tr) << MV_PP2_V0_PLCR_EDROP_TR_OFFS(i)) & MV_PP2_V0_PLCR_EDROP_TR_ALL_MASK(i))
+
+#define MV_PP2_V0_PLCR_EDROP_CPU_TR_REG(i)	(MV_PP2_REG_BASE + 0x1340 + (((i) / 2) << 2))
+#define MV_PP2_V0_PLCR_EDROP_HWF_TR_REG(i)	(MV_PP2_REG_BASE + 0x1350 + (((i) / 2) << 2))
+/*---------------------------------------------------------------------------------------------*/
+/*ppv2.1 policer early drop threshold new mechanism*/
+#define MV_PP2_V1_PLCR_EDROP_THRESH_NUM		16
+
+#define MV_PP2_V1_PLCR_EDROP_TR_OFFS		0
+#define MV_PP2_V1_PLCR_EDROP_TR_BITS		14
+
+#define MV_PP2_V1_PLCR_EDROP_TR_MASK(i)		\
+		(((1 << MV_PP2_V1_PLCR_EDROP_TR_BITS) - 1) << MV_PP2_V1_PLCR_EDROP_TR_OFFS)
+
+#define MV_PP2_V1_PLCR_EDROP_CPU_TR_REG(i)	(MV_PP2_REG_BASE + 0x1380 + ((i) * 4))
+#define MV_PP2_V1_PLCR_EDROP_HWF_TR_REG(i)	(MV_PP2_REG_BASE + 0x13c0 + ((i) * 4))
+
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_EDROP_RXQ_REG		(MV_PP2_REG_BASE + 0x1348)
+#define MV_PP2_PLCR_EDROP_RXQ_TR_REG		(MV_PP2_REG_BASE + 0x134c)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_PLCR_EDROP_TXQ_REG		(MV_PP2_REG_BASE + 0x1358)
+#define MV_PP2_PLCR_EDROP_TXQ_TR_REG		(MV_PP2_REG_BASE + 0x135c)
+/*---------------------------------------------------------------------------------------------*/
+#define MV_PP2_V1_PLCR_PKT_GREEN_REG(pol)	(MV_PP2_REG_BASE + 0x7400 + 4 * (pol))
+#define MV_PP2_V1_PLCR_PKT_YELLOW_REG(pol)	(MV_PP2_REG_BASE + 0x7500 + 4 * (pol))
+#define MV_PP2_V1_PLCR_PKT_RED_REG(pol)		(MV_PP2_REG_BASE + 0x7600 + 4 * (pol))
+/*---------------------------------------------------------------------------------------------*/
+
+/* Policer APIs */
+void        mvPp2PlcrHwRegs(void);
+void        mvPp2PlcrHwDumpAll(void);
+void        mvPp2PlcrHwDumpSingle(int plcr);
+void        mvPp2V1PlcrTbCntDump(int plcr);
+MV_STATUS   mvPp2PlcrHwBasePeriodSet(int period);
+MV_STATUS   mvPp2PlcrHwBaseRateGenEnable(int enable);
+MV_STATUS   mvPp2PlcrHwEnable(int plcr, int enable);
+MV_STATUS   mvPp2PlcrHwMode(int mode);
+MV_STATUS   mvPp2PlcrHwMinPktLen(int bytes);
+MV_STATUS   mvPp2PlcrHwEarlyDropSet(int enable);
+MV_STATUS   mvPp2PlcrHwTokenConfig(int plcr, int unit, int type);
+MV_STATUS   mvPp2PlcrHwTokenValue(int plcr, int value);
+MV_STATUS   mvPp2PlcrHwColorModeSet(int plcr, int enable);
+MV_STATUS   mvPp2PlcrHwBucketSizeSet(int plcr, int commit, int excess);
+
+/*ppv2.1 policer early drop threshold mechanism changed*/
+MV_STATUS   mvPp2V0PlcrHwCpuThreshSet(int idx, int threshold);
+MV_STATUS   mvPp2V0PlcrHwHwfThreshSet(int idx, int threshold);
+MV_STATUS   mvPp2V1PlcrHwCpuThreshSet(int idx, int threshold);
+MV_STATUS   mvPp2V1PlcrHwHwfThreshSet(int idx, int threshold);
+MV_STATUS   mvPp2PlcrHwRxqThreshSet(int rxq, int idx);
+MV_STATUS   mvPp2PlcrHwTxqThreshSet(int txq, int idx);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvPp2PlcrHw_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.c
new file mode 100644
index 000000000000..386a6ab32c99
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.c
@@ -0,0 +1,673 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"  /* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "common/mvPp2Common.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvPp2PmeHw.h"
+
+
+/* #define PME_DBG mvOsPrintf */
+#define PME_DBG(X...)
+
+static char *mvPp2PmeCmdName(enum MV_PP2_PME_CMD_E cmd)
+{
+	switch (cmd) {
+	case MV_PP2_PME_CMD_NONE:		return "NO_MOD";
+	case MV_PP2_PME_CMD_ADD_2B:		return "ADD_2B";
+	case MV_PP2_PME_CMD_CFG_VLAN:	return "CFG_VLAN";
+	case MV_PP2_PME_CMD_ADD_VLAN:	return "ADD_VLAN";
+	case MV_PP2_PME_CMD_CFG_DSA_1:	return "CFG_DSA_1";
+	case MV_PP2_PME_CMD_CFG_DSA_2:	return "CFG_DSA_2";
+	case MV_PP2_PME_CMD_ADD_DSA:	return "ADD_DSA";
+	case MV_PP2_PME_CMD_DEL_BYTES:	return "DEL_BYTES";
+	case MV_PP2_PME_CMD_REPLACE_2B: return "REPLACE_2B";
+	case MV_PP2_PME_CMD_REPLACE_LSB: return "REPLACE_LSB";
+	case MV_PP2_PME_CMD_REPLACE_MSB: return "REPLACE_MSB";
+	case MV_PP2_PME_CMD_REPLACE_VLAN: return "REPLACE_VLAN";
+	case MV_PP2_PME_CMD_DEC_LSB:	return "DEC_LSB";
+	case MV_PP2_PME_CMD_DEC_MSB:	return "DEC_MSB";
+	case MV_PP2_PME_CMD_ADD_CALC_LEN: return "ADD_CALC_LEN";
+	case MV_PP2_PME_CMD_REPLACE_LEN: return "REPLACE_LEN";
+	case MV_PP2_PME_CMD_IPV4_CSUM:	return "IPV4_CSUM";
+	case MV_PP2_PME_CMD_L4_CSUM:	return "L4_CSUM";
+	case MV_PP2_PME_CMD_SKIP:		return "SKIP";
+	case MV_PP2_PME_CMD_JUMP:		return "JUMP";
+	case MV_PP2_PME_CMD_JUMP_SKIP:	return "JUMP_SKIP";
+	case MV_PP2_PME_CMD_JUMP_SUB:	return "JUMP_SUB";
+	case MV_PP2_PME_CMD_PPPOE:		return "PPPOE";
+	case MV_PP2_PME_CMD_STORE:		return "STORE";
+	case MV_PP2_PME_CMD_ADD_IP4_CSUM: return "ADD_L4";
+	case MV_PP2_PME_CMD_PPPOE_2:	return "PPPOE_2";
+	case MV_PP2_PME_CMD_REPLACE_MID: return "REPLACE_MID";
+	case MV_PP2_PME_CMD_ADD_MULT:	return "ADD_MULT";
+	case MV_PP2_PME_CMD_REPLACE_MULT: return "REPLACE_MULT";
+	case MV_PP2_PME_CMD_REPLACE_REM_2B: return "REPLACE_REM_2B"; /* For PPv2.1 - A0 only, MAS 3.3 */
+	case MV_PP2_PME_CMD_ADD_IP6_HDR: return "ADD_IP6_HDR";       /* For PPv2.1 - A0 only, MAS 3.15 */
+	case MV_PP2_PME_CMD_DROP_PKT:	return "DROP";
+	default:
+		return "UNKNOWN";
+	}
+	return NULL;
+};
+
+static 	int mvPp2PmeDataTblSize(int tbl)
+{
+	int max;
+
+	switch (tbl) {
+	case 0:
+		max = MV_PP2_PME_DATA1_SIZE;
+		break;
+	case 1:
+		max = MV_PP2_PME_DATA2_SIZE;
+		break;
+	default:
+		max = 0;
+		mvOsPrintf("%s: tbl %d is out of range [0..1]\n", __func__, tbl);
+	}
+	return max;
+}
+
+static inline MV_U32	mvPp2PmeDataTblRegAddr(int tbl)
+{
+	MV_U32 regAddr;
+
+	switch (tbl) {
+	case 0:
+		regAddr = MV_PP2_PME_TBL_DATA1_REG;
+		break;
+	case 1:
+		regAddr = MV_PP2_PME_TBL_DATA2_REG;
+		break;
+	default:
+		regAddr = 0;
+		mvOsPrintf("%s: tbl %d is out of range [0..1]\n", __func__, tbl);
+	}
+	return regAddr;
+}
+
+/*******************************************************************************
+* mvPp2PmeHwRegs - Print PME hardware registers
+*
+*******************************************************************************/
+void        mvPp2PmeHwRegs(void)
+{
+	int    i;
+	MV_U32 regVal;
+
+	mvOsPrintf("\n[PME registers]\n");
+
+	mvPp2PrintReg(MV_PP2_PME_TBL_IDX_REG, "MV_PP2_PME_TBL_IDX_REG");
+	mvPp2PrintReg(MV_PP2_PME_TCONT_THRESH_REG, "MV_PP2_PME_TCONT_THRESH_REG");
+	mvPp2PrintReg(MV_PP2_PME_MTU_REG, "MV_PP2_PME_MTU_REG");
+
+	for (i = 0; i < MV_PP2_PME_MAX_VLAN_ETH_TYPES; i++)
+		mvPp2PrintReg2(MV_PP2_PME_VLAN_ETH_TYPE_REG(i), "MV_PP2_PME_VLAN_ETH_TYPE_REG", i);
+
+	mvPp2PrintReg(MV_PP2_PME_DEF_VLAN_CFG_REG, "MV_PP2_PME_DEF_VLAN_CFG_REG");
+	for (i = 0; i < MV_PP2_PME_MAX_DSA_ETH_TYPES; i++)
+		mvPp2PrintReg2(MV_PP2_PME_DEF_DSA_CFG_REG(i), "MV_PP2_PME_DEF_DSA_CFG_REG", i);
+
+	mvPp2PrintReg(MV_PP2_PME_DEF_DSA_SRC_DEV_REG, "MV_PP2_PME_DEF_DSA_SRC_DEV_REG");
+	mvPp2PrintReg(MV_PP2_PME_TTL_ZERO_FRWD_REG, "MV_PP2_PME_TTL_ZERO_FRWD_REG");
+	mvPp2PrintReg(MV_PP2_PME_PPPOE_ETYPE_REG, "MV_PP2_PME_PPPOE_ETYPE_REG");
+	mvPp2PrintReg(MV_PP2_PME_PPPOE_DATA_REG, "MV_PP2_PME_PPPOE_DATA_REG");
+	mvPp2PrintReg(MV_PP2_PME_PPPOE_LEN_REG, "MV_PP2_PME_PPPOE_LEN_REG");
+	mvPp2PrintReg(MV_PP2_PME_PPPOE_PROTO_REG, "MV_PP2_PME_PPPOE_PROTO_REG");
+	mvPp2PrintReg(MV_PP2_PME_CONFIG_REG, "MV_PP2_PME_CONFIG_REG");
+	mvPp2PrintReg(MV_PP2_PME_STATUS_1_REG, "MV_PP2_PME_STATUS_1_REG");
+
+	mvOsPrintf("\nMV_PP2_PME_STATUS_2_REG[txp] registers that are not zero\n");
+	for (i = 0; i < MV_PP2_TOTAL_TXP_NUM; i++) {
+		regVal = mvPp2RdReg(MV_PP2_PME_STATUS_2_REG(i));
+		if (regVal != 0)
+			mvOsPrintf("%-32s[%2d]: 0x%x = 0x%08x\n",
+				"MV_PP2_PME_STATUS_2_REG", i, MV_PP2_PME_STATUS_2_REG(i), regVal);
+	}
+
+	mvOsPrintf("\nMV_PP2_PME_STATUS_3_REG[txp] registers that are not zero\n");
+	for (i = 0; i < MV_PP2_TOTAL_TXP_NUM; i++) {
+		regVal = mvPp2RdReg(MV_PP2_PME_STATUS_3_REG(i));
+		if (regVal != 0)
+			mvOsPrintf("%-32s[%2d]: 0x%x = 0x%08x\n",
+				"MV_PP2_PME_STATUS_3_REG", i, MV_PP2_PME_STATUS_3_REG(i), regVal);
+	}
+}
+
+/*******************************************************************************
+* mvPp2PmeHwWrite - Write PME entry to the hardware
+*
+* INPUT:
+*       int			idx	- PME entry index to write to
+*       MV_PP2_PME_ENTRY	*pEntry - PME software entry to be written
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvPp2PmeHwWrite(int idx, MV_PP2_PME_ENTRY *pEntry)
+{
+	if ((idx < 0) || (idx >= MV_PP2_PME_INSTR_SIZE)) {
+		mvOsPrintf("%s: entry %d is out of range [0..%d]\n", __func__, idx, MV_PP2_PME_INSTR_SIZE);
+		return MV_OUT_OF_RANGE;
+	}
+	pEntry->index = idx;
+	mvPp2WrReg(MV_PP2_PME_TBL_IDX_REG, idx);
+	mvPp2WrReg(MV_PP2_PME_TBL_INSTR_REG, pEntry->word);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeHwRead - Read PME entry from the hardware
+*
+* INPUT:
+*       int	idx	- PME entry index to write to
+* OUTPUT:
+*       MV_PP2_PME_ENTRY	*pEntry - PME software entry to be read into
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS mvPp2PmeHwRead(int idx, MV_PP2_PME_ENTRY *pEntry)
+{
+	if ((idx < 0) || (idx >= MV_PP2_PME_INSTR_SIZE)) {
+		mvOsPrintf("%s: entry %d is out of range [0..%d]\n", __func__, idx, MV_PP2_PME_INSTR_SIZE);
+		return MV_OUT_OF_RANGE;
+	}
+
+	pEntry->index = idx;
+	mvPp2WrReg(MV_PP2_PME_TBL_IDX_REG, idx);
+	pEntry->word = mvPp2RdReg(MV_PP2_PME_TBL_INSTR_REG);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeHwInv - Invalidate single PME entry in the hardware
+*
+* INPUT:
+*       int	idx	- PME entry index to be invaliadted
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvPp2PmeHwInv(int idx)
+{
+	MV_PP2_PME_ENTRY	entry;
+
+	if ((idx < 0) || (idx >= MV_PP2_PME_INSTR_SIZE)) {
+		mvOsPrintf("%s: entry %d is out of range [0..%d]\n", __func__, idx, MV_PP2_PME_INSTR_SIZE);
+		return MV_OUT_OF_RANGE;
+	}
+	mvPp2PmeSwClear(&entry);
+
+	return mvPp2PmeHwWrite(idx, &entry);
+}
+
+/*******************************************************************************
+* mvPp2PmeHwInvAll - Invalidate all PME entries in the hardware
+*
+* INPUT:
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvPp2PmeHwInvAll(void)
+{
+	int	idx;
+
+	for (idx = 0; idx < MV_PP2_PME_INSTR_SIZE; idx++)
+		mvPp2PmeHwInv(idx);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeHwInit - Init TX Packet Modification driver
+*
+* INPUT:
+*
+* RETURN:   MV_STATUS
+*               MV_OK - Success, Others - Failure
+*******************************************************************************/
+MV_STATUS   mvPp2PmeHwInit(void)
+{
+	mvPp2PmeHwInvAll();
+	mvPp2PmeHwDataTblClear(0);
+	mvPp2PmeHwDataTblClear(1);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeSwDump - Print PME software entry
+*
+* INPUT:
+*       MV_PP2_PME_ENTRY*    pEntry - PME software entry to be printed
+*
+* RETURN:   void
+*******************************************************************************/
+MV_STATUS	mvPp2PmeSwDump(MV_PP2_PME_ENTRY *pEntry)
+{
+	mvOsPrintf("%04x %04x: ",
+		MV_PP2_PME_CTRL_GET(pEntry), MV_PP2_PME_DATA_GET(pEntry));
+
+	mvOsPrintf("%s ", mvPp2PmeCmdName(MV_PP2_PME_CMD_GET(pEntry)));
+
+	if (pEntry->word & MV_PP2_PME_IP4_CSUM_MASK)
+		mvOsPrintf(", IPv4 csum");
+
+	if (pEntry->word & MV_PP2_PME_L4_CSUM_MASK)
+		mvOsPrintf(", L4 csum");
+
+	if (pEntry->word & MV_PP2_PME_LAST_MASK)
+		mvOsPrintf(", Last");
+
+	mvOsPrintf("\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeHwDump - Dump PME hardware entries
+*
+* INPUT:
+*       int	mode   -
+*
+* RETURN:   void
+*******************************************************************************/
+MV_STATUS	mvPp2PmeHwDump(int mode)
+{
+	int					idx, count = 0;
+	MV_PP2_PME_ENTRY 	entry;
+	MV_STATUS			status;
+
+	mvOsPrintf("PME instraction table: #%d entries\n", MV_PP2_PME_INSTR_SIZE);
+	for (idx = 0; idx < MV_PP2_PME_INSTR_SIZE; idx++) {
+		status = mvPp2PmeHwRead(idx, &entry);
+		if (status != MV_OK) {
+			mvOsPrintf("%s failed: idx=%d, status=%d\n",
+					__func__, idx, status);
+			return status;
+		}
+		if (mode == 0) {
+			if (!MV_PP2_PME_IS_VALID(&entry))
+				continue;
+		}
+
+		count++;
+		mvOsPrintf("[%4d]: ", idx);
+		mvPp2PmeSwDump(&entry);
+	}
+
+	if (!count)
+		mvOsPrintf("Table is Empty\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeEntryPrint - Print PME entry
+*
+* INPUT:
+*       MV_PP2_PME_ENTRY	*pEntry - PME entry to be printed
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvPp2PmeSwClear(MV_PP2_PME_ENTRY *pEntry)
+{
+	pEntry->word = 0;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeSwWordSet - Set 4 bytes data to software PME entry
+*
+* INPUT:
+*       MV_PP2_PME_ENTRY*    pEntry - PME entry to be set
+*		MV_U32               word   - 4 bytes of data to be set
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvPp2PmeSwWordSet(MV_PP2_PME_ENTRY *pEntry, MV_U32 word)
+{
+	pEntry->word = word;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeSwCmdSet - Set modification command to software PME instruction entry
+*
+* INPUT:
+*       MV_PP2_PME_ENTRY*     pEntry - PME entry to be set
+*		enum MV_PP2_PME_CMD_E cmd    - modification command to be set
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvPp2PmeSwCmdSet(MV_PP2_PME_ENTRY *pEntry, enum MV_PP2_PME_CMD_E cmd)
+{
+	pEntry->word &= ~MV_PP2_PME_CMD_ALL_MASK;
+	pEntry->word |= MV_PP2_PME_CMD_MASK(cmd);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mvPp2PmeSwCmdTypeSet - Set modification command type to software PME instruction entry
+*
+* INPUT:
+*       MV_PP2_PME_ENTRY*     pEntry - PME entry to be set
+*		int                   type    - modification command type to be set
+*
+* RETURN:   MV_STATUS
+*******************************************************************************/
+MV_STATUS   mvPp2PmeSwCmdTypeSet(MV_PP2_PME_ENTRY *pEntry, int type)
+{
+	pEntry->word &= ~MV_PP2_PME_CMD_TYPE_ALL_MASK;
+	pEntry->word |= MV_PP2_PME_CMD_TYPE_MASK(type);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeSwCmdLastSet(MV_PP2_PME_ENTRY *pEntry, int last)
+{
+	if (last)
+		pEntry->word |= MV_PP2_PME_LAST_MASK;
+	else
+		pEntry->word &= ~MV_PP2_PME_LAST_MASK;
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeSwCmdFlagsSet(MV_PP2_PME_ENTRY *pEntry, int last, int ipv4, int l4)
+{
+	if (last)
+		pEntry->word |= MV_PP2_PME_LAST_MASK;
+	else
+		pEntry->word &= ~MV_PP2_PME_LAST_MASK;
+
+	if (ipv4)
+		pEntry->word |= MV_PP2_PME_IP4_CSUM_MASK;
+	else
+		pEntry->word &= ~MV_PP2_PME_IP4_CSUM_MASK;
+
+	if (l4)
+		pEntry->word |= MV_PP2_PME_L4_CSUM_MASK;
+	else
+		pEntry->word &= ~MV_PP2_PME_L4_CSUM_MASK;
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeSwCmdDataSet(MV_PP2_PME_ENTRY *pEntry, MV_U16 data)
+{
+	MV_PP2_PME_DATA_SET(pEntry, data);
+	return MV_OK;
+}
+
+/* Functions to access PME data1 and data2 tables */
+MV_STATUS   mvPp2PmeHwDataTblWrite(int tbl, int idx, MV_U16 data)
+{
+	MV_U32  regVal;
+
+	if ((tbl < 0) || (tbl > 1)) {
+		mvOsPrintf("%s: data table %d is out of range [0..1]\n", __func__, tbl);
+		return MV_BAD_PARAM;
+	}
+	if ((idx < 0) || (idx >= mvPp2PmeDataTblSize(tbl))) {
+		mvOsPrintf("%s: entry index #%d is out of range [0..%d] for data table #%d\n",
+					__func__, idx, tbl, mvPp2PmeDataTblSize(tbl));
+		return MV_FALSE;
+	}
+
+	mvPp2WrReg(MV_PP2_PME_TBL_IDX_REG, idx / 2);
+
+	regVal = mvPp2RdReg(mvPp2PmeDataTblRegAddr(tbl));
+	regVal &= ~MV_PP2_PME_TBL_DATA_MASK(idx % 2);
+	regVal |= (data << MV_PP2_PME_TBL_DATA_OFFS(idx % 2));
+
+	mvPp2WrReg(MV_PP2_PME_TBL_IDX_REG, idx / 2);
+	mvPp2WrReg(mvPp2PmeDataTblRegAddr(tbl), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeHwDataTblRead(int tbl, int idx, MV_U16 *data)
+{
+	MV_U32  regVal;
+
+	if ((tbl < 0) || (tbl > 1)) {
+		mvOsPrintf("%s: data table %d is out of range [0..1]\n", __func__, tbl);
+		return MV_BAD_PARAM;
+	}
+	if ((idx < 0) || (idx >= mvPp2PmeDataTblSize(tbl))) {
+		mvOsPrintf("%s: entry index #%d is out of range [0..%d] for data table #%d\n",
+					__func__, idx, tbl, mvPp2PmeDataTblSize(tbl));
+		return MV_FALSE;
+	}
+
+	mvPp2WrReg(MV_PP2_PME_TBL_IDX_REG, idx / 2);
+
+	regVal = mvPp2RdReg(mvPp2PmeDataTblRegAddr(tbl));
+
+	if (data)
+		*data = (regVal & MV_PP2_PME_TBL_DATA_MASK(idx % 2)) >> MV_PP2_PME_TBL_DATA_OFFS(idx % 2);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeHwDataTblDump(int tbl)
+{
+	int idx, max, count = 0;
+	MV_U16 data;
+
+	if ((tbl < 0) || (tbl > 1)) {
+		mvOsPrintf("%s: data table %d is out of range [0..1]\n", __func__, tbl);
+		return MV_BAD_PARAM;
+	}
+	max = mvPp2PmeDataTblSize(tbl);
+
+	mvOsPrintf("PME Data%d table: #%d entries\n", tbl + 1, max);
+	for (idx = 0; idx < max; idx++) {
+		mvPp2PmeHwDataTblRead(tbl, idx, &data);
+		if (data != 0) {
+			mvOsPrintf("[%4d]: 0x%04x\n", idx, data);
+			count++;
+		}
+	}
+	if (count == 0)
+		mvOsPrintf("Table is Empty\n");
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeHwDataTblClear(int tbl)
+{
+	int max, idx;
+
+	if ((tbl < 0) || (tbl > 1)) {
+		mvOsPrintf("%s: data table %d is out of range [0..1]\n", __func__, tbl);
+		return MV_BAD_PARAM;
+	}
+
+	max = mvPp2PmeDataTblSize(tbl);
+	for (idx = 0; idx < max; idx++)
+		mvPp2PmeHwDataTblWrite(tbl, idx, 0);
+
+	return MV_OK;
+}
+
+/* Functions to set other PME register fields */
+MV_STATUS   mvPp2PmeVlanEtherTypeSet(int idx, MV_U16 ethertype)
+{
+	MV_U32 regVal = (MV_U32)ethertype;
+
+	if ((idx < 0) || (idx > MV_PP2_PME_MAX_VLAN_ETH_TYPES)) {
+		mvOsPrintf("%s: idx %d is out of range [0..%d]\n", __func__, idx, MV_PP2_PME_MAX_VLAN_ETH_TYPES);
+		return MV_BAD_PARAM;
+	}
+	mvPp2WrReg(MV_PP2_PME_VLAN_ETH_TYPE_REG(idx), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeVlanDefaultSet(MV_U16 ethertype)
+{
+	mvPp2WrReg(MV_PP2_PME_DEF_VLAN_CFG_REG, (MV_U32)ethertype);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeDsaDefaultSet(int idx, MV_U16 ethertype)
+{
+	MV_U32 regVal = (MV_U32)ethertype;
+
+	if ((idx < 0) || (idx > MV_PP2_PME_MAX_DSA_ETH_TYPES)) {
+		mvOsPrintf("%s: idx %d is out of range [0..%d]\n", __func__, idx, MV_PP2_PME_MAX_DSA_ETH_TYPES);
+		return MV_BAD_PARAM;
+	}
+	mvPp2WrReg(MV_PP2_PME_DEF_DSA_CFG_REG(idx), regVal);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeDsaSrcDevSet(MV_U8 src)
+{
+	MV_U32 regVal = 0;
+
+	regVal &= ~MV_PP2_PME_DSA_SRC_DEV_ALL_MASK;
+	regVal |= MV_PP2_PME_DSA_SRC_DEV_MASK(src);
+	mvPp2WrReg(MV_PP2_PME_DEF_DSA_SRC_DEV_REG, regVal);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeTtlZeroSet(int forward)
+{
+	MV_U32 regVal = 0;
+
+	regVal |= MV_PP2_PME_TTL_ZERO_FRWD_MASK;
+	mvPp2WrReg(MV_PP2_PME_TTL_ZERO_FRWD_REG, regVal);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmePppoeEtypeSet(MV_U16 ethertype)
+{
+	mvPp2WrReg(MV_PP2_PME_PPPOE_ETYPE_REG, (MV_U32)ethertype);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmePppoeLengthSet(MV_U16 length)
+{
+	mvPp2WrReg(MV_PP2_PME_PPPOE_LEN_REG, (MV_U32)length);
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmePppoeConfig(MV_U8 version, MV_U8 type, MV_U8 code)
+{
+	MV_U32 regVal = 0;
+
+	regVal |= MV_PP2_PME_PPPOE_VER_MASK(version);
+	regVal |= MV_PP2_PME_PPPOE_TYPE_MASK(type);
+	regVal |= MV_PP2_PME_PPPOE_CODE_MASK(code);
+
+	mvPp2WrReg(MV_PP2_PME_PPPOE_DATA_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmePppoeProtoSet(int idx, MV_U16 protocol)
+{
+	MV_U32 regVal = 0;
+
+	if ((idx < 0) || (idx > 1)) {
+		mvOsPrintf("%s: idx %d is out of range [0..1]\n", __func__, idx);
+		return MV_BAD_PARAM;
+	}
+	regVal = mvPp2RdReg(MV_PP2_PME_PPPOE_PROTO_REG);
+
+	regVal &= ~MV_PP2_PME_PPPOE_PROTO_ALL_MASK(idx);
+	regVal |= MV_PP2_PME_PPPOE_PROTO_MASK(idx, protocol);
+	mvPp2WrReg(MV_PP2_PME_PPPOE_PROTO_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS   mvPp2PmeMaxConfig(int maxsize, int maxinstr, int errdrop)
+{
+	MV_U32 regVal = 0;
+
+	regVal |= MV_PP2_PME_MAX_INSTR_NUM_MASK(maxinstr);
+	regVal |= MV_PP2_PME_MAX_HDR_SIZE_MASK(maxsize);
+	if (errdrop)
+		regVal |= MV_PP2_PME_DROP_ON_ERR_MASK;
+
+	mvPp2WrReg(MV_PP2_PME_CONFIG_REG, regVal);
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.h
new file mode 100644
index 000000000000..4074cce2ccf1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/pme/mvPp2PmeHw.h
@@ -0,0 +1,303 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+
+#ifndef __mvPp2PmeHw_h__
+#define __mvPp2PmeHw_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+/* FIXME: move to ctrlSpec.h */
+#define MV_PP2_PME_INSTR_SIZE	2600
+#define MV_PP2_PME_DATA1_SIZE   (46 * 1024 / 2) /* 46KBytes = 23K data of 2 bytes */
+#define MV_PP2_PME_DATA2_SIZE   (4 * 1024 / 2) /* 4KBytes = 2K data of 2 bytes */
+
+/*************** TX Packet Modification Registers *******************/
+#define MV_PP2_PME_TBL_IDX_REG				(MV_PP2_REG_BASE + 0x8400)
+#define MV_PP2_PME_TBL_INSTR_REG			(MV_PP2_REG_BASE + 0x8480)
+#define MV_PP2_PME_TBL_DATA1_REG			(MV_PP2_REG_BASE + 0x8500)
+#define MV_PP2_PME_TBL_DATA2_REG			(MV_PP2_REG_BASE + 0x8580)
+#define MV_PP2_PME_TBL_STATUS_REG			(MV_PP2_REG_BASE + 0x8600)
+#define MV_PP2_PME_TCONT_THRESH_REG			(MV_PP2_REG_BASE + 0x8604)
+#define MV_PP2_PME_MTU_REG					(MV_PP2_REG_BASE + 0x8608)
+
+#define MV_PP2_PME_MAX_VLAN_ETH_TYPES		4
+#define MV_PP2_PME_VLAN_ETH_TYPE_REG(i)		(MV_PP2_REG_BASE + 0x8610 + ((i) << 2))
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_DEF_VLAN_CFG_REG			(MV_PP2_REG_BASE + 0x8620)
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_MAX_DSA_ETH_TYPES		2
+#define MV_PP2_PME_DEF_DSA_CFG_REG(i)		(MV_PP2_REG_BASE + 0x8624 + ((i) << 2))
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_DEF_DSA_SRC_DEV_REG		(MV_PP2_REG_BASE + 0x8630)
+#define MV_PP2_PME_DSA_SRC_DEV_OFFS			1
+#define MV_PP2_PME_DSA_SRC_DEV_BITS			4
+#define MV_PP2_PME_DSA_SRC_DEV_ALL_MASK		(((1 << MV_PP2_PME_DSA_SRC_DEV_BITS) - 1) << MV_PP2_PME_DSA_SRC_DEV_OFFS)
+#define MV_PP2_PME_DSA_SRC_DEV_MASK(dev)	((dev) << MV_PP2_PME_DSA_SRC_DEV_OFFS)
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_TTL_ZERO_FRWD_REG		(MV_PP2_REG_BASE + 0x8640)
+#define MV_PP2_PME_TTL_ZERO_FRWD_BIT		0
+#define MV_PP2_PME_TTL_ZERO_FRWD_MASK		(1 << MV_PP2_PME_TTL_ZERO_FRWD_BIT);
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_PPPOE_ETYPE_REG			(MV_PP2_REG_BASE + 0x8650)
+
+#define MV_PP2_PME_PPPOE_DATA_REG			(MV_PP2_REG_BASE + 0x8654)
+
+#define MV_PP2_PME_PPPOE_CODE_OFFS			0
+#define MV_PP2_PME_PPPOE_CODE_BITS			8
+#define MV_PP2_PME_PPPOE_CODE_ALL_MASK		(((1 << MV_PP2_PME_PPPOE_CODE_BITS) - 1) << MV_PP2_PME_PPPOE_CODE_OFFS)
+#define MV_PP2_PME_PPPOE_CODE_MASK(code)	(((code) << MV_PP2_PME_PPPOE_CODE_OFFS) & MV_PP2_PME_PPPOE_CODE_ALL_MASK)
+
+#define MV_PP2_PME_PPPOE_TYPE_OFFS			8
+#define MV_PP2_PME_PPPOE_TYPE_BITS			4
+#define MV_PP2_PME_PPPOE_TYPE_ALL_MASK		(((1 << MV_PP2_PME_PPPOE_TYPE_BITS) - 1) << MV_PP2_PME_PPPOE_TYPE_OFFS)
+#define MV_PP2_PME_PPPOE_TYPE_MASK(type)	(((type) << MV_PP2_PME_PPPOE_TYPE_OFFS) & MV_PP2_PME_PPPOE_TYPE_ALL_MASK)
+
+#define MV_PP2_PME_PPPOE_VER_OFFS			12
+#define MV_PP2_PME_PPPOE_VER_BITS			4
+#define MV_PP2_PME_PPPOE_VER_ALL_MASK		(((1 << MV_PP2_PME_PPPOE_VER_BITS) - 1) << MV_PP2_PME_PPPOE_VER_OFFS)
+#define MV_PP2_PME_PPPOE_VER_MASK(ver)		(((ver) << MV_PP2_PME_PPPOE_VER_OFFS) & MV_PP2_PME_PPPOE_VER_ALL_MASK)
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_PPPOE_LEN_REG			(MV_PP2_REG_BASE + 0x8658)
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_PPPOE_PROTO_REG			(MV_PP2_REG_BASE + 0x865c)
+
+#define MV_PP2_PME_PPPOE_PROTO_OFFS(i)		((i == 0) ? 0 : 16)
+#define MV_PP2_PME_PPPOE_PROTO_BITS			16
+#define MV_PP2_PME_PPPOE_PROTO_ALL_MASK(i)	(((1 << MV_PP2_PME_PPPOE_PROTO_BITS) - 1) << MV_PP2_PME_PPPOE_PROTO_OFFS(i))
+#define MV_PP2_PME_PPPOE_PROTO_MASK(i, p)	(((p) << MV_PP2_PME_PPPOE_PROTO_OFFS(i)) & MV_PP2_PME_PPPOE_PROTO_ALL_MASK(i))
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_CONFIG_REG				(MV_PP2_REG_BASE + 0x8660)
+
+#define MV_PP2_PME_MAX_HDR_SIZE_OFFS		0
+#define MV_PP2_PME_MAX_HDR_SIZE_BITS		8
+#define MV_PP2_PME_MAX_HDR_SIZE_ALL_MASK	(((1 << MV_PP2_PME_MAX_HDR_SIZE_BITS) - 1) << MV_PP2_PME_MAX_HDR_SIZE_OFFS)
+#define MV_PP2_PME_MAX_HDR_SIZE_MASK(size)	(((size) << MV_PP2_PME_MAX_HDR_SIZE_OFFS) & MV_PP2_PME_MAX_HDR_SIZE_ALL_MASK)
+
+#define MV_PP2_PME_MAX_INSTR_NUM_OFFS		16
+#define MV_PP2_PME_MAX_INSTR_NUM_BITS		8
+#define MV_PP2_PME_MAX_INSTR_NUM_ALL_MASK	(((1 << MV_PP2_PME_MAX_INSTR_NUM_BITS) - 1) << MV_PP2_PME_MAX_INSTR_NUM_OFFS)
+#define MV_PP2_PME_MAX_INSTR_NUM_MASK(num)	(((num) << MV_PP2_PME_MAX_INSTR_NUM_OFFS) & MV_PP2_PME_MAX_INSTR_NUM_ALL_MASK)
+
+#define MV_PP2_PME_DROP_ON_ERR_BIT			24
+#define MV_PP2_PME_DROP_ON_ERR_MASK			(1 << MV_PP2_PME_DROP_ON_ERR_BIT)
+/*---------------------------------------------------------------------------*/
+
+#define MV_PP2_PME_STATUS_1_REG				(MV_PP2_REG_BASE + 0x8664)
+#define MV_PP2_PME_STATUS_2_REG(txp)		(MV_PP2_REG_BASE + 0x8700 + 4 * (txp))
+#define MV_PP2_PME_STATUS_3_REG(txp)		(MV_PP2_REG_BASE + 0x8780 + 4 * (txp))
+
+/* PME instructions table (MV_PP2_PME_TBL_INSTR_REG) fields definition */
+#define MV_PP2_PME_DATA_OFFS				0
+#define MV_PP2_PME_DATA_BITS				16
+#define MV_PP2_PME_DATA_MASK				(((1 << MV_PP2_PME_DATA_BITS) - 1) << MV_PP2_PME_DATA_OFFS)
+
+#define MV_PP2_PME_CTRL_OFFS				16
+#define MV_PP2_PME_CTRL_BITS				16
+#define MV_PP2_PME_CTRL_MASK				(((1 << MV_PP2_PME_CTRL_BITS) - 1) << MV_PP2_PME_CTRL_OFFS)
+
+#define MV_PP2_PME_CMD_OFFS					16
+#define MV_PP2_PME_CMD_BITS					5
+#define MV_PP2_PME_CMD_ALL_MASK				(((1 << MV_PP2_PME_CMD_BITS) - 1) << MV_PP2_PME_CMD_OFFS)
+#define MV_PP2_PME_CMD_MASK(cmd)			((cmd) << MV_PP2_PME_CMD_OFFS)
+
+#define MV_PP2_PME_IP4_CSUM_BIT				21
+#define MV_PP2_PME_IP4_CSUM_MASK			(1 << MV_PP2_PME_IP4_CSUM_BIT)
+
+#define MV_PP2_PME_L4_CSUM_BIT				22
+#define MV_PP2_PME_L4_CSUM_MASK				(1 << MV_PP2_PME_L4_CSUM_BIT)
+
+#define MV_PP2_PME_LAST_BIT					23
+#define MV_PP2_PME_LAST_MASK				(1 << MV_PP2_PME_LAST_BIT)
+
+#define MV_PP2_PME_CMD_TYPE_OFFS			24
+#define MV_PP2_PME_CMD_TYPE_BITS			3
+#define MV_PP2_PME_CMD_TYPE_ALL_MASK		(((1 << MV_PP2_PME_CMD_TYPE_BITS) - 1) << MV_PP2_PME_CMD_TYPE_OFFS)
+#define MV_PP2_PME_CMD_TYPE_MASK(type)		((type) << MV_PP2_PME_CMD_TYPE_OFFS)
+
+enum MV_PP2_PME_CMD_E {
+	MV_PP2_PME_CMD_NONE        = 0,
+	MV_PP2_PME_CMD_ADD_2B,
+	MV_PP2_PME_CMD_CFG_VLAN,
+	MV_PP2_PME_CMD_ADD_VLAN,
+	MV_PP2_PME_CMD_CFG_DSA_1,
+	MV_PP2_PME_CMD_CFG_DSA_2,
+	MV_PP2_PME_CMD_ADD_DSA,
+	MV_PP2_PME_CMD_DEL_BYTES,
+	MV_PP2_PME_CMD_REPLACE_2B,
+	MV_PP2_PME_CMD_REPLACE_LSB,
+	MV_PP2_PME_CMD_REPLACE_MSB,
+	MV_PP2_PME_CMD_REPLACE_VLAN,
+	MV_PP2_PME_CMD_DEC_LSB,
+	MV_PP2_PME_CMD_DEC_MSB,
+	MV_PP2_PME_CMD_ADD_CALC_LEN,
+	MV_PP2_PME_CMD_REPLACE_LEN,
+	MV_PP2_PME_CMD_IPV4_CSUM,
+	MV_PP2_PME_CMD_L4_CSUM,
+	MV_PP2_PME_CMD_SKIP,
+	MV_PP2_PME_CMD_JUMP,
+	MV_PP2_PME_CMD_JUMP_SKIP,
+	MV_PP2_PME_CMD_JUMP_SUB,
+	MV_PP2_PME_CMD_PPPOE,
+	MV_PP2_PME_CMD_STORE,
+	MV_PP2_PME_CMD_ADD_IP4_CSUM,
+	MV_PP2_PME_CMD_PPPOE_2,
+	MV_PP2_PME_CMD_REPLACE_MID,
+	MV_PP2_PME_CMD_ADD_MULT,
+	MV_PP2_PME_CMD_REPLACE_MULT,
+	MV_PP2_PME_CMD_REPLACE_REM_2B, /* 0x1d - added on PPv2.1 (A0), MAS 3.3 */
+	MV_PP2_PME_CMD_ADD_IP6_HDR,    /* 0x1e - added on PPv2.1 (A0), MAS 3.15 */
+	MV_PP2_PME_CMD_DROP_PKT = 0x1f,
+	MV_PP2_TMP_CMD_LAST
+};
+
+/* PME data1 and data2 fields MV_PP2_PME_TBL_DATA1_REG and MV_PP2_PME_TBL_DATA2_REG */
+#define MV_PP2_PME_TBL_DATA_BITS		16
+#define MV_PP2_PME_TBL_DATA_OFFS(idx)	((idx == 0) ? MV_PP2_PME_TBL_DATA_BITS : 0)
+#define MV_PP2_PME_TBL_DATA_MASK(idx)	(((1 << MV_PP2_PME_TBL_DATA_BITS) - 1) << MV_PP2_PME_TBL_DATA_OFFS(idx))
+
+/* Macros for internal usage */
+#define MV_PP2_PME_IS_VALID(pme)        \
+		((((pme)->word & MV_PP2_PME_CMD_ALL_MASK) >> MV_PP2_PME_CMD_OFFS) != MV_PP2_PME_CMD_NONE)
+
+#define MV_PP2_PME_INVALID_SET(pme)        \
+		((pme)->word = MV_PP2_PME_CMD_MASK(MV_NETA_CMD_NONE) | MV_PP2_PME_LAST_MASK);
+
+#define MV_PP2_PME_CTRL_GET(pme)           \
+		(MV_U16)(((pme)->word & MV_PP2_PME_CTRL_MASK) >> MV_PP2_PME_CTRL_OFFS)
+
+#define MV_PP2_PME_CMD_GET(pme)           \
+		(((pme)->word & MV_PP2_PME_CMD_ALL_MASK) >> MV_PP2_PME_CMD_OFFS)
+
+#define MV_PP2_PME_DATA_GET(pme)           \
+		(MV_U16)(((pme)->word & MV_PP2_PME_DATA_MASK) >> MV_PP2_PME_DATA_OFFS)
+
+#define MV_PP2_PME_CMD_SET(pme, cmd)                       \
+		(pme)->word &= ~MV_PP2_PME_CMD_ALL_MASK;       \
+		(pme)->word |= MV_PP2_PME_CMD_MASK(cmd);
+
+#define MV_PP2_PME_DATA_SET(pme, data)                         \
+		(pme)->word &= ~MV_PP2_PME_DATA_MASK;              \
+		(pme)->word |= ((data) << MV_PP2_PME_DATA_OFFS);
+
+/* TX packet modification table entry */
+typedef struct mv_pp2_pme {
+	int     index;
+	MV_U32	word;
+
+} MV_PP2_PME_ENTRY;
+/*------------------------------------------------------------*/
+
+
+/* TX packet modification APIs */
+void        mvPp2PmeHwRegs(void);
+void        mvPp2PmeHwCntrs(void);
+MV_STATUS   mvPp2PmeHwDump(int mode);
+MV_STATUS   mvPp2PmeHwInvAll(void);
+
+MV_STATUS   mvPp2PmeHwInv(int idx);
+MV_STATUS   mvPp2PmeHwWrite(int idx, MV_PP2_PME_ENTRY *pEntry);
+MV_STATUS   mvPp2PmeHwRead(int idx, MV_PP2_PME_ENTRY *pEntry);
+
+MV_STATUS   mvPp2PmeSwDump(MV_PP2_PME_ENTRY *pEntry);
+MV_STATUS   mvPp2PmeSwClear(MV_PP2_PME_ENTRY *pEntry);
+MV_STATUS   mvPp2PmeSwWordSet(MV_PP2_PME_ENTRY *pEntry, MV_U32 word);
+MV_STATUS   mvPp2PmeSwCmdSet(MV_PP2_PME_ENTRY *pEntry, enum MV_PP2_PME_CMD_E cmd);
+MV_STATUS   mvPp2PmeSwCmdTypeSet(MV_PP2_PME_ENTRY *pEntry, int type);
+MV_STATUS   mvPp2PmeSwCmdFlagsSet(MV_PP2_PME_ENTRY *pEntry, int last, int ipv4, int l4);
+MV_STATUS   mvPp2PmeSwCmdLastSet(MV_PP2_PME_ENTRY *pEntry, int last);
+MV_STATUS   mvPp2PmeSwCmdDataSet(MV_PP2_PME_ENTRY *pEntry, MV_U16 data);
+
+MV_STATUS   mvPp2PmeHwDataTblDump(int tbl);
+MV_STATUS   mvPp2PmeHwDataTblClear(int tbl);
+MV_STATUS   mvPp2PmeHwDataTblWrite(int tbl, int idx, MV_U16 data);
+MV_STATUS   mvPp2PmeHwDataTblRead(int tbl, int idx, MV_U16 *data);
+
+MV_STATUS   mvPp2PmeVlanEtherTypeSet(int idx, MV_U16 ethertype);
+MV_STATUS   mvPp2PmeVlanDefaultSet(MV_U16 ethertype);
+MV_STATUS   mvPp2PmeDsaDefaultSet(int idx, MV_U16 ethertype);
+MV_STATUS   mvPp2PmeDsaSrcDevSet(MV_U8 src);
+MV_STATUS   mvPp2PmeTtlZeroSet(int forward);
+MV_STATUS   mvPp2PmePppoeConfig(MV_U8 version, MV_U8 type, MV_U8 code);
+MV_STATUS   mvPp2PmePppoeProtoSet(int idx, MV_U16 protocol);
+MV_STATUS   mvPp2PmePppoeEtypeSet(MV_U16 ethertype);
+MV_STATUS   mvPp2PmePppoeLengthSet(MV_U16 length);
+MV_STATUS   mvPp2PmeMaxConfig(int maxsize, int maxinstr, int errdrop);
+
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvPp2PmeHw_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.c b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.c
new file mode 100644
index 000000000000..eaa1a52ecdd1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.c
@@ -0,0 +1,3775 @@
+/*******************************************************************************
+   Copyright (C) Marvell International Ltd. and its affiliates
+
+   This software file (the "File") is owned and distributed by Marvell
+   International Ltd. and/or its affiliates ("Marvell") under the following
+   alternative licensing terms.  Once you have made an election to distribute the
+   File under one of the following license alternatives, please (i) delete this
+   introductory statement regarding license alternatives, (ii) delete the two
+   license alternatives that you have not elected to use and (iii) preserve the
+   Marvell copyright notice above.
+
+********************************************************************************
+   Marvell Commercial License Option
+
+   If you received this File from Marvell and you have entered into a commercial
+   license agreement (a "Commercial License") with Marvell, the File is licensed
+   to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+   Marvell GPL License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File in accordance with the terms and conditions of the General
+   Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+   available along with the File in the license.txt file or by writing to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+   on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+   THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+   DISCLAIMED.  The GPL License provides additional details about this warranty
+   disclaimer.
+********************************************************************************
+   Marvell BSD License Option
+
+   If you received this File from Marvell, you may opt to use, redistribute and/or
+   modify this File under the following licensing terms.
+   Redistribution and use in source and binary forms, with or without modification,
+   are permitted provided that the following conditions are met:
+
+*   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+*   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+*   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+   ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+   WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+   DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+   ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+   (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+   LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+   ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+   SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "mv802_3.h"
+#include "mvPp2Prs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvPp2PrsHw.h"
+#include "mvPp2Prs.h"
+
+#define PRS_DBG(X...)
+
+
+/*-------------------------------------------------------------------------------*/
+/*		Static varialbes for internal use				 */
+/*-------------------------------------------------------------------------------*/
+
+int etherTypeDsa;
+
+int mvPrsDblVlanAiShadow[DBL_VLAN_SHADOW_SIZE];
+
+static int mvPrsDblVlanAiShadowSet(int index)
+{
+	RANGE_VALIDATE(index, 1, DBL_VLAN_SHADOW_SIZE - 1);
+	mvPrsDblVlanAiShadow[index] = 1;
+	return MV_OK;
+}
+
+
+static int mvPrsDblVlanAiShadowClear(int index)
+{
+	RANGE_VALIDATE(index, 1, DBL_VLAN_SHADOW_SIZE - 1);
+	mvPrsDblVlanAiShadow[index] = 0;
+	return MV_OK;
+}
+
+static int mvPrsDblVlanAiShadowClearAll(void)
+{
+	int i;
+
+	for (i = 1; i < DBL_VLAN_SHADOW_SIZE; i++)
+		mvPrsDblVlanAiShadowClear(i);
+
+	return MV_OK;
+}
+static int mvPrsDblVlanAiFreeGet(void)
+{
+	int i;
+	/* start from 1, 0 not in used */
+	for (i = 1; i < DBL_VLAN_SHADOW_SIZE; i++)
+		if (mvPrsDblVlanAiShadow[i] == 0)
+			return i;
+
+	return MV_PRS_OUT_OF_RAGE;
+}
+
+
+/******************************************************************************
+* Common utilities
+******************************************************************************/
+static MV_BOOL mvPp2PrsEtypeEquals(MV_PP2_PRS_ENTRY *pe, int offset, unsigned short ethertype)
+{
+	unsigned char etype[MV_ETH_TYPE_LEN];
+
+	PRS_DBG("%s\n", __func__);
+	etype[0] =  (ethertype >> 8) & 0xFF;
+	etype[1] =  ethertype & 0xFF;
+
+	if (mvPp2PrsSwTcamBytesIgnorMaskCmp(pe, offset, MV_ETH_TYPE_LEN, etype) == NOT_EQUALS)
+		return MV_FALSE;
+
+	return MV_TRUE;
+}
+
+static void mvPp2PrsMatchEtype(MV_PP2_PRS_ENTRY *pe, int offset, unsigned short ethertype)
+{
+	PRS_DBG("%s\n", __func__);
+
+	mvPp2PrsSwTcamByteSet(pe, offset + 0, ethertype >> 8, 0xff);
+	mvPp2PrsSwTcamByteSet(pe, offset + 1, ethertype & 0xFF, 0xff);
+}
+#if 0
+static void mvPp2PrsMatchMh(MV_PP2_PRS_ENTRY *pe, unsigned short mh)
+{
+	PRS_DBG("%s\n", __func__);
+
+	mvPp2PrsSwTcamByteSet(pe, 0, mh >> 8, 0xff);
+	mvPp2PrsSwTcamByteSet(pe, 1, mh & 0xFF, 0xff);
+}
+#endif
+
+/******************************************************************************
+ *
+ * Marvell header Section
+ *
+ ******************************************************************************
+ */
+static MV_BOOL mvPrsMhRangeEquals(MV_PP2_PRS_ENTRY *pe, MV_U8 *mh, MV_U8 *mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask;
+
+	for (index = 0; index < MV_ETH_MH_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+		if (tcamMask != mask[index])
+			return MV_FALSE;
+
+		if ((tcamMask & tcamByte) != (mh[index] & mask[index]))
+			return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+static MV_BOOL mvPrsMhRangeIntersec(MV_PP2_PRS_ENTRY *pe, MV_U8 *mh, MV_U8 *mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask, commonMask;
+
+	for (index = 0; index < MV_ETH_MH_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+
+		commonMask = mask[index] & tcamMask;
+
+		if ((commonMask & tcamByte) != (commonMask & mh[index]))
+			return MV_FALSE;
+	}
+	return MV_TRUE;
+}
+
+static MV_BOOL mvPrsMhInRange(MV_PP2_PRS_ENTRY *pe, MV_U8 *mh, MV_U8 *mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask;
+
+	for (index = 0; index < MV_ETH_MH_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+		if ((tcamByte & mask[index]) != (mh[index] & mask[index]))
+			return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+static MV_PP2_PRS_ENTRY *mvPrsMhRangeFind(int portMap, unsigned char *mh, unsigned char *mask)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	unsigned int entryPmap;
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_MH);
+
+	/* Go through the all entires with PRS_LU_MAC */
+	for (tid = PE_FIRST_FREE_TID; tid <= PE_LAST_FREE_TID; tid++) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_MH))
+			continue;
+
+		pe->index = tid;
+		mvPp2PrsHwRead(pe);
+
+		mvPp2PrsSwTcamPortMapGet(pe, &entryPmap);
+
+		if (mvPrsMhRangeEquals(pe, mh, mask) && (entryPmap == (unsigned int)portMap))
+			return pe;
+	}
+	mvPp2PrsSwFree(pe);
+	return NULL;
+}
+
+static int mvPrsMhRangeAccept(int portMap, MV_U8 *mh, MV_U8 *mask, unsigned int ri, unsigned int riMask, MV_BOOL finish)
+{
+	int tid, len;
+	MV_PP2_PRS_ENTRY *pe = NULL;
+
+	/* Scan TCAM and see if entry with this <MH, port> already exist */
+	pe = mvPrsMhRangeFind(portMap, mh, mask);
+
+	if (pe == NULL) {
+		/* entry not exist */
+		/* Go through the all entires from first to last */
+		tid = mvPp2PrsTcamFirstFree(PE_FIRST_FREE_TID, PE_LAST_FREE_TID);
+
+		/* Can't add - No free TCAM entries */
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			mvPp2PrsSwFree(pe);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_MH);
+		pe->index = tid;
+		mvPp2PrsSwTcamPortMapSet(pe, portMap);
+		/* shift to MAC */
+		mvPp2PrsSwSramShiftSet(pe, MV_ETH_MH_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+		mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+		mvPp2PrsSwSramFlowidGenSet(pe);
+
+		/* set MH range */
+		len = MV_ETH_MH_SIZE;
+
+		while (len--)
+			mvPp2PrsSwTcamByteSet(pe, len, mh[len], mask[len]);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe->index, PRS_LU_MH, "mh-range");
+
+	}
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+	finish ? mvPp2PrsSwSramFlowidGenSet(pe) : mvPp2PrsSwSramFlowidGenClear(pe);
+
+	/* Write entry to TCAM */
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+	return MV_OK;
+}
+
+static int mvPrsMhRangeValid(unsigned int portMap, MV_U8 *mh, MV_U8 *mask)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int entryPmap;
+	int tid;
+
+	for (tid = PE_LAST_FREE_TID; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_MH))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		if ((mvPrsMhRangeIntersec(&pe, mh, mask)) & !mvPrsMhRangeEquals(&pe, mh, mask)) {
+			if (entryPmap & portMap) {
+				mvOsPrintf("%s: operation not supported, range intersection\n", __func__);
+				mvOsPrintf("%s: user must delete portMap 0x%x from entry %d.\n",
+					   __func__, entryPmap & portMap, tid);
+				return MV_ERROR;
+			}
+
+		} else if (mvPrsMhRangeEquals(&pe, mh, mask) && (entryPmap != portMap) && (entryPmap & portMap)) {
+			mvOsPrintf("%s: operation not supported, range intersection\n", __func__);
+			mvOsPrintf("%s: user must delete portMap 0x%x from entry %d.\n",
+				   __func__, entryPmap & portMap, tid);
+
+			return MV_ERROR;
+		}
+	}
+	return MV_OK;
+}
+
+int mvPrsMhSet(unsigned int portMap, unsigned short mh, unsigned short mh_mask, unsigned int ri, unsigned int riMask, MV_BOOL finish)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid;
+	unsigned int entryPmap;
+	MV_BOOL done = MV_FALSE;
+	unsigned short n_mh;
+	unsigned short n_mh_mask;
+
+	/* step 1 - validation, ranges intersections are forbidden*/
+	n_mh = htons(mh);
+	n_mh_mask = htons(mh_mask);
+	if (mvPrsMhRangeValid(portMap, (unsigned char *)&n_mh, (unsigned char *)&n_mh_mask))
+		return MV_ERROR;
+
+	/* step 2 - update TCAM */
+	for (tid = PE_LAST_FREE_TID; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || !(mvPp2PrsShadowLu(tid) == PRS_LU_MH))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		if (mvPrsMhRangeEquals(&pe, (unsigned char *)&n_mh, (unsigned char *)&n_mh_mask) &&
+				       (entryPmap == portMap)) {
+			/* portMap and range are equals to TCAM entry*/
+			done = MV_TRUE;
+			mvPp2PrsSwSramRiUpdate(&pe, ri, riMask);
+			finish ? mvPp2PrsSwSramFlowidGenSet(&pe) : mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsHwWrite(&pe);
+			continue;
+		}
+
+		/* PRS_UDF_MAC_DEF */
+		if (mvPrsMhInRange(&pe, (unsigned char *)&n_mh, (unsigned char *)&n_mh_mask) && (entryPmap & portMap)) {
+			mvPp2PrsSwSramRiUpdate(&pe, ri, riMask);
+			finish ? mvPp2PrsSwSramFlowidGenSet(&pe) : mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsHwWrite(&pe);
+		}
+	}
+	/* step 3 - Add new range entry */
+	if (!done)
+		return mvPrsMhRangeAccept(portMap, (unsigned char *)&n_mh,
+					 (unsigned char *)&n_mh_mask, ri, riMask, finish);
+
+	return MV_OK;
+}
+
+int mvPrsMhDel(unsigned int portMap, unsigned short mh, unsigned short mh_mask)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid;
+	unsigned int entryPmap;
+	MV_BOOL found = MV_FALSE;
+	unsigned short n_mh;
+	unsigned short n_mh_mask;
+
+	n_mh = htons(mh);
+	n_mh_mask = htons(mh_mask);
+
+	for (tid = PE_LAST_FREE_TID; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || !(mvPp2PrsShadowLu(tid) == PRS_LU_MH))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		/* differents ports */
+		if (!(entryPmap & portMap))
+			continue;
+
+		if (mvPrsMhRangeEquals(&pe, (unsigned char *)&n_mh, (unsigned char *)&n_mh_mask)) {
+			found = MV_TRUE;
+			entryPmap &= ~portMap;
+
+			if (!entryPmap) {
+				/* delete entry */
+				mvPp2PrsHwInv(pe.index);
+				mvPp2PrsShadowClear(pe.index);
+				continue;
+			}
+
+			/* update port map */
+			mvPp2PrsSwTcamPortMapSet(&pe, entryPmap);
+			mvPp2PrsHwWrite(&pe);
+			continue;
+		}
+
+		/* PRS_UDF_MH_RANGE */
+		if (!found) {
+			/* range entry not exist */
+			mvOsPrintf("%s: Error, entry not found\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* range entry allready found, now fix all relevant default entries*/
+		if (mvPrsMhInRange(&pe, (unsigned char *)&n_mh, (unsigned char *)&n_mh_mask)) {
+			mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsSwSramRiSet(&pe, mvPp2PrsShadowRi(tid), mvPp2PrsShadowRiMask(tid));
+			mvPp2PrsHwWrite(&pe);
+		}
+	}
+	return MV_OK;
+}
+
+/* Set default entry for Marvell header field */
+static int mvPp2PrsMhInit(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	mvPp2PrsSwClear(&pe);
+
+	pe.index = PE_MH_DEFAULT;
+	mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MH);
+	mvPp2PrsSwSramShiftSet(&pe, MV_ETH_MH_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+	mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_MAC);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe.index, PRS_LU_MH, "mh-default");
+
+	mvPp2PrsSwTcamPortMapSet(&pe, PORT_MASK);
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+ *
+ * MAC Address Section
+ *
+ ******************************************************************************
+ */
+
+char *mvPrsL2InfoStr(unsigned int l2_info)
+{
+	switch (l2_info << RI_L2_CAST_OFFS) {
+	case RI_L2_UCAST:
+		return "Ucast";
+	case RI_L2_MCAST:
+		return "Mcast";
+	case RI_L2_BCAST:
+		return "Bcast";
+	default:
+		return "Unknown";
+	}
+	return NULL;
+}
+
+static MV_BOOL mvPrsMacRangeEquals(MV_PP2_PRS_ENTRY *pe, MV_U8 *da, MV_U8 *mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask;
+
+	for (index = 0; index < MV_MAC_ADDR_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+		if (tcamMask != mask[index])
+			return MV_FALSE;
+
+		if ((tcamMask & tcamByte) != (da[index] & mask[index]))
+			return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+static MV_BOOL mvPrsMacRangeIntersec(MV_PP2_PRS_ENTRY *pe, MV_U8 *da, MV_U8 *mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask, commonMask;
+
+	for (index = 0; index < MV_MAC_ADDR_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+
+		commonMask = mask[index] & tcamMask;
+
+
+		if ((commonMask & tcamByte) != (commonMask & da[index]))
+			return MV_FALSE;
+	}
+	return MV_TRUE;
+}
+
+static MV_BOOL mvPrsMacInRange(MV_PP2_PRS_ENTRY *pe, MV_U8* da, MV_U8* mask)
+{
+	int index;
+	unsigned char tcamByte, tcamMask;
+
+	for (index = 0; index < MV_MAC_ADDR_SIZE; index++) {
+		mvPp2PrsSwTcamByteGet(pe, index, &tcamByte, &tcamMask);
+		if ((tcamByte & mask[index]) != (da[index] & mask[index]))
+			return MV_FALSE;
+	}
+
+	return MV_TRUE;
+}
+
+static MV_PP2_PRS_ENTRY *mvPrsMacDaRangeFind(int portMap, unsigned char *da, unsigned char *mask, int udfType)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	unsigned int entryPmap;
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_MAC);
+
+	/* Go through the all entires with PRS_LU_MAC */
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_MAC))
+			continue;
+
+		if (mvPp2PrsShadowUdf(tid) != udfType)
+			continue;
+
+		pe->index = tid;
+		mvPp2PrsHwRead(pe);
+
+		mvPp2PrsSwTcamPortMapGet(pe, &entryPmap);
+
+		if (mvPrsMacRangeEquals(pe, da, mask) && (entryPmap == portMap))
+			return pe;
+	}
+	mvPp2PrsSwFree(pe);
+	return NULL;
+
+}
+
+static MV_PP2_PRS_ENTRY *mvPrsMacDaFind(int port, unsigned char *da)
+{
+	unsigned char mask[MV_MAC_ADDR_SIZE];
+
+	mask[0] = mask[1] = mask[2] = mask[3] = mask[4] = mask[5] = 0xff;
+
+	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+	return mvPrsMacDaRangeFind((1 << port), da, mask, PRS_UDF_MAC_DEF);
+}
+
+static int mvPrsMacDaRangeAccept(int portMap, MV_U8 *da, MV_U8 *mask, unsigned int ri, unsigned int riMask, MV_BOOL finish)
+{
+	int tid, len;
+	MV_PP2_PRS_ENTRY *pe = NULL;
+
+	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+	pe = mvPrsMacDaRangeFind(portMap, da, mask, PRS_UDF_MAC_RANGE);
+
+	if (pe == NULL) {
+		/* entry not exist */
+		/* find last simple mac entry*/
+		for (tid = PE_LAST_FREE_TID ; tid >= PE_FIRST_FREE_TID; tid--)
+			if (mvPp2PrsShadowIsValid(tid) && (mvPp2PrsShadowLu(tid) == PRS_LU_MAC) &&
+			    (mvPp2PrsShadowUdf(tid) == PRS_UDF_MAC_DEF))
+				break;
+
+		/* Go through the all entires from first to last */
+		tid = mvPp2PrsTcamFirstFree(tid + 1, PE_LAST_FREE_TID);
+
+		/* Can't add - No free TCAM entries */
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			mvPp2PrsSwFree(pe);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_MAC);
+		pe->index = tid;
+		mvPp2PrsSwTcamPortMapSet(pe, portMap);
+		/* shift to ethertype */
+		mvPp2PrsSwSramShiftSet(pe, 2 * MV_MAC_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+		/* set DA range */
+		len = MV_MAC_ADDR_SIZE;
+
+		while (len--)
+			mvPp2PrsSwTcamByteSet(pe, len, da[len], mask[len]);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe->index, PRS_LU_MAC, "mac-range");
+		mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_MAC_RANGE);
+
+	}
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+	finish ? mvPp2PrsSwSramFlowidGenSet(pe) : mvPp2PrsSwSramFlowidGenClear(pe);
+
+	/* Write entry to TCAM */
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+	return MV_OK;
+}
+
+
+/* delete all port's simple (not range) multicast entries */
+int mvPrsMcastDelAll(int port)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid, index;
+	unsigned char da[MV_MAC_ADDR_SIZE], daMask[MV_MAC_ADDR_SIZE];
+
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+
+		if (!mvPp2PrsShadowIsValid(tid))
+			continue;
+
+		if (mvPp2PrsShadowLu(tid) != PRS_LU_MAC)
+			continue;
+
+		if (mvPp2PrsShadowUdf(tid) != PRS_UDF_MAC_DEF)
+			continue;
+
+		/* only simple mac entries */
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		/* read mac addr from entry */
+		for (index = 0; index < MV_MAC_ADDR_SIZE; index++)
+			mvPp2PrsSwTcamByteGet(&pe, index, &da[index], &daMask[index]);
+
+		if (MV_IS_BROADCAST_MAC(da))
+			continue;
+
+		if (MV_IS_MULTICAST_MAC(da))
+			/* delete mcast entry */
+			mvPrsMacDaAccept(port, da, 0);
+	}
+
+	return MV_OK;
+}
+
+
+/* TODO: use mvPrsMacDaRangeAccept */
+int mvPrsMacDaAccept(int port, unsigned char *da, int add)
+{
+	MV_PP2_PRS_ENTRY *pe = NULL;
+	unsigned int len, ports, ri;
+	int tid;
+	char name[PRS_TEXT_SIZE];
+
+	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+	pe = mvPrsMacDaFind(port, da);
+
+	if (pe == NULL) {
+		/* No such entry */
+		if (!add) {
+			/* Can't remove - No such entry */
+			return MV_ERROR;
+		}
+		 /* Create new TCAM entry */
+
+		/* find last range mac entry*/
+		for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++)
+			if (mvPp2PrsShadowIsValid(tid) && (mvPp2PrsShadowLu(tid) == PRS_LU_MAC) &&
+			    (mvPp2PrsShadowUdf(tid) == PRS_UDF_MAC_RANGE))
+				break;
+
+		/* Go through the all entires from first to last */
+		tid = mvPp2PrsTcamFirstFree(0, tid - 1);
+
+		/* Can't add - No free TCAM entries */
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			mvPp2PrsSwFree(pe);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_MAC);
+		pe->index = tid;
+
+		mvPp2PrsSwTcamPortMapSet(pe, 0);
+
+	}
+	/* Update port mask */
+	mvPp2PrsSwTcamPortSet(pe, port, add);
+	mvPp2PrsSwTcamPortMapGet(pe, &ports);
+
+	if (ports == 0) {
+		if (add) {
+			mvPp2PrsSwFree(pe);
+			/* Internal error, port should be set in ports bitmap */
+			return MV_ERROR;
+		}
+		/* No ports - invalidate the entry */
+		mvPp2PrsHwInv(pe->index);
+		mvPp2PrsShadowClear(pe->index);
+		mvPp2PrsSwFree(pe);
+		return MV_OK;
+
+	}
+
+	/* Continue - set next lookup */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_DSA);
+
+	/* set match on DA */
+	len = MV_MAC_ADDR_SIZE;
+	while (len--)
+		mvPp2PrsSwTcamByteSet(pe, len, da[len], 0xff);
+
+	/* Set result info bits */
+	if (MV_IS_BROADCAST_MAC(da)) {
+		ri = RI_L2_BCAST;
+		mvOsSPrintf(name, "bcast-port-%d", port);
+
+	} else if (MV_IS_MULTICAST_MAC(da)) {
+		ri = RI_L2_MCAST;
+		mvOsSPrintf(name, "mcast-port-%d", port);
+	} else {
+		ri = RI_L2_UCAST | RI_MAC_ME_MASK;
+		mvOsSPrintf(name, "ucast-port-%d", port);
+	}
+
+	/*mvPp2PrsSwSramRiSetBit(pe, RI_MAC_ME_BIT);*/
+	mvPp2PrsSwSramRiUpdate(pe, ri, RI_L2_CAST_MASK | RI_MAC_ME_MASK);
+	mvPp2PrsShadowRiSet(pe->index, ri, RI_L2_CAST_MASK | RI_MAC_ME_MASK);
+
+	/* shift to ethertype */
+	mvPp2PrsSwSramShiftSet(pe, 2 * MV_MAC_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Write entry to TCAM */
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_MAC, name);
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_MAC_DEF);
+
+	mvPp2PrsSwFree(pe);
+	return MV_OK;
+}
+
+
+static int mvPrsMacDaRangeValid(unsigned int portMap, MV_U8 *da, MV_U8 *mask)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int entryPmap;
+	int tid;
+
+	for (tid = PE_LAST_FREE_TID ; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_MAC) ||
+		    (mvPp2PrsShadowUdf(tid) != PRS_UDF_MAC_RANGE))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		if ((mvPrsMacRangeIntersec(&pe, da, mask)) && !mvPrsMacRangeEquals(&pe, da, mask)) {
+			if (entryPmap & portMap) {
+				mvOsPrintf("%s: operation not supported, range intersection\n", __func__);
+				mvOsPrintf("%s: user must delete portMap 0x%x from entry %d.\n",
+					   __func__, entryPmap & portMap, tid);
+				return MV_ERROR;
+			}
+
+		} else if (mvPrsMacRangeEquals(&pe, da, mask) && (entryPmap != portMap) && (entryPmap & portMap)) {
+			mvOsPrintf("%s: operation not supported, range intersection\n", __func__);
+			mvOsPrintf("%s: user must delete portMap 0x%x from entry %d.\n",
+				   __func__, entryPmap & portMap, tid);
+			return MV_ERROR;
+		}
+	}
+	return MV_OK;
+}
+
+int mvPrsMacDaRangeSet(unsigned int portMap, MV_U8 *da, MV_U8 *mask, unsigned int ri, unsigned int riMask, MV_BOOL finish)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid;
+	unsigned int entryPmap;
+	MV_BOOL done = MV_FALSE;
+
+	/* step 1 - validation, ranges intersections are forbidden*/
+	if (mvPrsMacDaRangeValid(portMap, da, mask))
+		return MV_ERROR;
+
+	/* step 2 - update TCAM */
+	for (tid = PE_LAST_FREE_TID ; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || !(mvPp2PrsShadowLu(tid) == PRS_LU_MAC))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		if ((mvPp2PrsShadowUdf(tid) == PRS_UDF_MAC_RANGE) &&
+		    mvPrsMacRangeEquals(&pe, da, mask) && (entryPmap == portMap)) {
+			/* portMap and range are equals to TCAM entry*/
+			done = MV_TRUE;
+			mvPp2PrsSwSramRiUpdate(&pe, ri, riMask);
+			finish ? mvPp2PrsSwSramFlowidGenSet(&pe) : mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsHwWrite(&pe);
+			continue;
+		}
+
+		/* PRS_UDF_MAC_DEF */
+		if (mvPrsMacInRange(&pe, da, mask) && (entryPmap & portMap)) {
+			mvPp2PrsSwSramRiUpdate(&pe, ri, riMask);
+			finish ? mvPp2PrsSwSramFlowidGenSet(&pe) : mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsHwWrite(&pe);
+		}
+	}
+	/* step 3 - Add new range entry */
+	if (!done)
+		return mvPrsMacDaRangeAccept(portMap, da, mask, ri, riMask, finish);
+
+	return MV_OK;
+
+}
+
+int mvPrsMacDaRangeDel(unsigned int portMap, MV_U8 *da, MV_U8 *mask)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid;
+	unsigned int entryPmap;
+	MV_BOOL found = MV_FALSE;
+
+	for (tid = PE_LAST_FREE_TID ; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || !(mvPp2PrsShadowLu(tid) == PRS_LU_MAC))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		/* differents ports */
+		if (!(entryPmap & portMap))
+			continue;
+
+		if ((mvPp2PrsShadowUdf(tid) == PRS_UDF_MAC_RANGE) && (mvPrsMacRangeEquals(&pe, da, mask))) {
+
+			found = MV_TRUE;
+			entryPmap &= ~portMap;
+
+			if (!entryPmap) {
+				/* delete entry */
+				mvPp2PrsHwInv(pe.index);
+				mvPp2PrsShadowClear(pe.index);
+				continue;
+			}
+
+			/* update port map */
+			mvPp2PrsSwTcamPortMapSet(&pe, entryPmap);
+			mvPp2PrsHwWrite(&pe);
+			continue;
+		}
+
+		/* PRS_UDF_MAC_RANGE */
+		if (!found) {
+			/* range entry not exist */
+			mvOsPrintf("%s: Error, entry not found\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* range entry allready found, now fix all relevant default entries*/
+		if (mvPrsMacInRange(&pe, da, mask)) {
+			mvPp2PrsSwSramFlowidGenClear(&pe);
+			mvPp2PrsSwSramRiSet(&pe, mvPp2PrsShadowRi(tid), mvPp2PrsShadowRiMask(tid));
+			mvPp2PrsHwWrite(&pe);
+		}
+	}
+	return MV_OK;
+}
+
+/* Drop special MAC DA - 6 bytes */
+int mvPrsMacDaDrop(int port, unsigned char *da, int add)
+{
+	return MV_OK;
+}
+
+int mvPrsMacDropAllSet(int port, int add)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	if (mvPp2PrsShadowIsValid(PE_DROP_ALL)) {
+		/* Entry exist - update port only */
+		pe.index = PE_DROP_ALL;
+		mvPp2PrsHwRead(&pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		mvPp2PrsSwClear(&pe);
+		mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MAC);
+		pe.index = PE_DROP_ALL;
+
+		/* Non-promiscous mode for all ports - DROP unknown packets */
+		mvPp2PrsSwSramRiSetBit(&pe, RI_DROP_BIT);
+		/*	mvPp2PrsSwSramLuDoneSet(&pe);*/
+
+		mvPp2PrsSwSramFlowidGenSet(&pe);
+		mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_FLOWS);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, "drop-all");
+
+		mvPp2PrsSwTcamPortMapSet(&pe, 0);
+	}
+
+	mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+/* Set port to promiscous mode */
+int mvPrsMacPromiscousSet(int port, int add)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	/* Promiscous mode - Accept unknown packets */
+
+	if (mvPp2PrsShadowIsValid(PE_MAC_PROMISCOUS)) {
+		/* Entry exist - update port only */
+		pe.index = PE_MAC_PROMISCOUS;
+		mvPp2PrsHwRead(&pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		mvPp2PrsSwClear(&pe);
+		mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MAC);
+		pe.index = PE_MAC_PROMISCOUS;
+
+		/* Continue - set next lookup */
+		mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_DSA);
+
+		/* Set result info bits */
+		mvPp2PrsSwSramRiUpdate(&pe, RI_L2_UCAST, RI_L2_CAST_MASK);
+
+		/* shift to ethertype */
+		mvPp2PrsSwSramShiftSet(&pe, 2 * MV_MAC_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+		/* mask all ports */
+		mvPp2PrsSwTcamPortMapSet(&pe, 0);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, "promisc");
+	}
+
+	mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+/* 0 - reject, 1 - accept */
+int mvPrsMacAllMultiSet(int port, int add)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int i, idx = 0;
+	char *rule_str[MAX_MAC_MC] = { "mcast-mac-ip4", "mcast-mac-ip6" };
+
+	/* Ethernet multicast address first byte is with 0x01 */
+	unsigned char da_mc[MAX_MAC_MC] = { 0x01, 0x33 };
+
+	for (i = IP4_MAC_MC; i < MAX_MAC_MC; i++) {
+		if (i == IP4_MAC_MC)
+			idx = PE_MAC_MC_ALL;
+		else
+			idx = PE_MAC_MC_IP6;
+		/* all multicast */
+
+		if (mvPp2PrsShadowIsValid(idx)) {
+			/* Entry exist - update port only */
+			pe.index = idx;
+			mvPp2PrsHwRead(&pe);
+		} else {
+			/* Entry doesn't exist - create new */
+			mvPp2PrsSwClear(&pe);
+
+			pe.index = idx;
+
+			mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MAC);
+
+			/* Continue - set next lookup */
+			mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_DSA);
+
+			/* Set result info bits */
+			mvPp2PrsSwSramRiUpdate(&pe, RI_L2_MCAST, RI_L2_CAST_MASK);
+
+			mvPp2PrsSwTcamByteSet(&pe, 0, da_mc[i], 0xff);
+
+			/* shift to ethertype */
+		mvPp2PrsSwSramShiftSet(&pe, 2 * MV_MAC_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+			/* no ports */
+			mvPp2PrsSwTcamPortMapSet(&pe, 0);
+
+			/* Update mvPrsShadowTbl */
+			mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, rule_str[i]);
+		}
+
+		mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+		mvPp2PrsHwWrite(&pe);
+	}
+
+	return MV_OK;
+}
+
+int mvPrsMhRxSpecialSet(int port, unsigned short mh, int add)
+{
+#if 0 /* this function to be removed in future */
+	MV_PP2_PRS_ENTRY pe;
+
+	if (mvPp2PrsShadowIsValid(PE_RX_SPECIAL)) {
+		/* Entry exist - update port only */
+		pe.index = PE_RX_SPECIAL;
+		mvPp2PrsHwRead(&pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		mvPp2PrsSwClear(&pe);
+		mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MAC);
+		pe.index = PE_RX_SPECIAL;
+
+		mvPp2PrsSwSramRiUpdate(&pe, RI_CPU_CODE_RX_SPEC, RI_CPU_CODE_MASK);
+		mvPp2PrsSwSramFlowidGenSet(&pe);
+		mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_FLOWS);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, "RX special");
+
+		mvPp2PrsSwTcamPortMapSet(&pe, 0);
+	}
+
+	mvPp2PrsMatchMh(&pe, mh);
+	mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+	mvPp2PrsHwWrite(&pe);
+#endif
+	return MV_OK;
+}
+
+/* Set default entires (place holder) for promiscous, non-promiscous and all-milticast MAC addresses */
+static int mvPp2PrsMacInit(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	mvPp2PrsSwClear(&pe);
+
+	/* Non-promiscous mode for all ports - DROP unknown packets */
+	pe.index = PE_MAC_NON_PROMISCOUS;
+	mvPp2PrsSwTcamLuSet(&pe, PRS_LU_MAC);
+	mvPp2PrsSwSramRiSetBit(&pe, RI_DROP_BIT);
+	/* mvPp2PrsSwSramLuDoneSet(&pe);*/
+
+	mvPp2PrsSwSramFlowidGenSet(&pe);
+	mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_FLOWS);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, "non-promisc");
+
+	mvPp2PrsSwTcamPortMapSet(&pe, PORT_MASK);
+	mvPp2PrsHwWrite(&pe);
+
+	/* place holders only - no ports */
+	mvPrsMacDropAllSet(0, 0);
+	mvPrsMacPromiscousSet(0, 0);
+	mvPrsMacAllMultiSet(0, 0);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+ *
+ * DSA Section
+ *
+ ******************************************************************************
+ */
+
+static int mvPp2PrsDsaTagEtherTypeSet(int port, int add, int tagged, int extend)
+{
+	MV_PP2_PRS_ENTRY pe;
+	char name[PRS_TEXT_SIZE];
+	int tid, shift, portMask;
+
+	/* if packet is tagged continue check vlans */
+
+	if (extend) {
+		if (tagged) {
+			tid = PE_ETYPE_EDSA_TAGGED;
+			mvOsSPrintf(name, "Etype-EDSA-tagged");
+		} else {
+			tid = PE_ETYPE_EDSA_UNTAGGED;
+			mvOsSPrintf(name, "Etype-EDSA-untagged");
+		}
+		portMask = 0;
+		shift = 8;
+	} else {
+
+		if (tagged) {
+			tid = PE_ETYPE_DSA_TAGGED;
+			mvOsSPrintf(name, "Etype-DSA-tagged");
+		} else {
+			tid = PE_ETYPE_DSA_UNTAGGED;
+			mvOsSPrintf(name, "Etype-DSA-untagged");
+		}
+		portMask = PORT_MASK;
+		shift = 4;
+	}
+
+	if (mvPp2PrsShadowIsValid(tid)) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		mvPp2PrsSwClear(&pe);
+		mvPp2PrsSwTcamLuSet(&pe, PRS_LU_DSA);
+		pe.index = tid;
+
+		/* set etherType*/
+		mvPp2PrsMatchEtype(&pe, 0, etherTypeDsa);
+		mvPp2PrsMatchEtype(&pe, 2, 0);
+
+		mvPp2PrsSwSramRiSetBit(&pe, RI_DSA_BIT);
+
+		/* shift etherType + 2 byte reserved + tag*/
+		mvPp2PrsSwSramShiftSet(&pe, 2 + MV_ETH_TYPE_LEN + shift, SRAM_OP_SEL_SHIFT_ADD);
+
+		mvPp2PrsShadowSet(pe.index, PRS_LU_DSA, name);
+
+		/* set tagged bit in DSA tag */
+		/* TODO use define */
+		if (tagged) {
+			/* set bit 29 in dsa tag */
+			mvPp2PrsSwTcamByteSet(&pe, MV_ETH_TYPE_LEN + 2 + 3, 0x20, 0x20);
+
+			/* Clear all AI bits for next iteration */
+			mvPp2PrsSwSramAiUpdate(&pe, 0, SRAM_AI_MASK);
+
+			/* mark vlan single RI */
+			mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_SINGLE, RI_VLAN_MASK);
+
+			mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_VLAN);
+		} else {
+			/* Set result info bits - No valns ! */
+			mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_NONE, RI_VLAN_MASK);
+			mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_L2);
+		}
+		/* all ports enabled */
+		mvPp2PrsSwTcamPortMapSet(&pe, portMask);
+	}
+
+	mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+
+static int mvPp2PrsDsaTagSet(int port, int add, int tagged, int extend)
+{
+	MV_PP2_PRS_ENTRY pe;
+	char name[PRS_TEXT_SIZE];
+	int tid, shift;
+
+	/* if packet is tagged continue check vlans */
+
+	if (extend) {
+		if (tagged) {
+			tid = PE_EDSA_TAGGED;
+			mvOsSPrintf(name, "EDSA-tagged");
+		} else {
+			tid = PE_EDSA_UNTAGGED;
+			mvOsSPrintf(name, "EDSA-untagged");
+		}
+
+		shift = 8;
+	} else {
+
+		if (tagged) {
+			tid = PE_DSA_TAGGED;
+			mvOsSPrintf(name, "DSA-tagged");
+		} else {
+			tid = PE_DSA_UNTAGGED;
+			mvOsSPrintf(name, "DSA-untagged");
+		}
+
+		shift = 4;
+	}
+
+	if (mvPp2PrsShadowIsValid(tid)) {
+		/* Entry exist - update port only */
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+	} else {
+		/* Entry doesn't exist - create new */
+		mvPp2PrsSwClear(&pe);
+		mvPp2PrsSwTcamLuSet(&pe, PRS_LU_DSA);
+		pe.index = tid;
+
+
+		/* shift 4 bytes if DSA tag , Skip 8 bytes if extand DSA tag*/
+		mvPp2PrsSwSramShiftSet(&pe, shift, SRAM_OP_SEL_SHIFT_ADD);
+
+		mvPp2PrsShadowSet(pe.index, PRS_LU_DSA, name);
+
+		/* set tagged bit in DSA tag */
+		/* TODO use define */
+		if (tagged) {
+			mvPp2PrsSwTcamByteSet(&pe, 0, 0x20, 0x20);
+
+			/* Clear all AI bits for next iteration */
+			mvPp2PrsSwSramAiUpdate(&pe, 0, SRAM_AI_MASK);
+
+			/* mark vlan single RI */
+			mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_SINGLE, RI_VLAN_MASK);
+
+			mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_VLAN);
+		} else {
+			/* Set result info bits - No valns ! */
+			mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_NONE, RI_VLAN_MASK);
+			mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_L2);
+		}
+
+		mvPp2PrsSwTcamPortMapSet(&pe, 0);
+	}
+
+	mvPp2PrsSwTcamPortSet(&pe, port, add);
+
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+
+
+static int mvPp2PrsDsaInit(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	etherTypeDsa = DSA_ETHER_TYPE;
+
+	/* none tagged EDSA entry -place holder */
+	mvPp2PrsDsaTagSet(0, 0, UNTAGGED, EDSA);
+
+	/* tagged EDSA entry -place holder */
+	mvPp2PrsDsaTagSet(0, 0, TAGGED, EDSA);
+
+	/* none tagged DSA entry -place holder */
+	mvPp2PrsDsaTagSet(0, 0, UNTAGGED, DSA);
+
+	/* tagged DSA entry -place holder */
+	mvPp2PrsDsaTagSet(0, 0, TAGGED, DSA);
+
+	/* none tagged EDSA EtherType entry - place holder*/
+	mvPp2PrsDsaTagEtherTypeSet(0, 0, UNTAGGED, EDSA);
+
+	/* tagged EDSA EtherType entry - place holder*/
+	mvPp2PrsDsaTagEtherTypeSet(0, 0, TAGGED, EDSA);
+
+	/* none tagged DSA EtherType entry */
+	mvPp2PrsDsaTagEtherTypeSet(0, 1, UNTAGGED, DSA);
+
+	/* tagged DSA EtherType entry */
+	mvPp2PrsDsaTagEtherTypeSet(0, 1, TAGGED, DSA);
+
+	/* default entry , if DSA or EDSA tag not found */
+	mvPp2PrsSwClear(&pe);
+	mvPp2PrsSwTcamLuSet(&pe, PRS_LU_DSA);
+	pe.index = PE_DSA_DEFAULT;
+	mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_VLAN);
+
+	/* shift 0 bytes */
+	mvPp2PrsSwSramShiftSet(&pe, 0, SRAM_OP_SEL_SHIFT_ADD);
+	mvPp2PrsShadowSet(pe.index, PRS_LU_MAC, "default-DSA-tag");
+
+	/* Clear all AI bits for next iteration */
+	mvPp2PrsSwSramAiUpdate(&pe, 0, SRAM_AI_MASK);
+
+	/* match for all ports*/
+	mvPp2PrsSwTcamPortMapSet(&pe, PORT_MASK);
+	mvPp2PrsHwWrite(&pe);
+
+	return MV_OK;
+}
+
+int mvPp2PrsEtypeDsaSet(unsigned int eType)
+{
+	int tid;
+
+	MV_PP2_PRS_ENTRY pe;
+
+	etherTypeDsa = eType;
+
+	for (tid = PE_ETYPE_EDSA_TAGGED; tid <= PE_ETYPE_DSA_UNTAGGED; tid++) {
+
+		pe.index = tid;
+
+		mvPp2PrsHwRead(&pe);
+
+		/* overrwite old etherType*/
+		mvPp2PrsMatchEtype(&pe, 0, etherTypeDsa);
+
+		mvPp2PrsHwWrite(&pe);
+	}
+
+	return MV_OK;
+}
+
+int mvPp2PrsEtypeDsaModeSet(int port, int extand)
+{
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS - 1);
+	POS_RANGE_VALIDATE(extand, 1);
+
+	if (extand) {
+		mvPp2PrsDsaTagEtherTypeSet(port, 1, UNTAGGED, EDSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 1, TAGGED, EDSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 0, UNTAGGED, DSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 0, TAGGED, DSA);
+	} else {
+		mvPp2PrsDsaTagEtherTypeSet(port, 0, UNTAGGED, EDSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 0, TAGGED, EDSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 1, UNTAGGED, DSA);
+		mvPp2PrsDsaTagEtherTypeSet(port, 1, TAGGED, DSA);
+	}
+	return MV_OK;
+}
+
+int mvPp2PrsTagModeSet(int port, int type)
+{
+
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS - 1);
+
+	switch (type) {
+
+	case MV_TAG_TYPE_EDSA:
+		/* Add port to EDSA entries */
+		mvPp2PrsDsaTagSet(port, 1, TAGGED, EDSA);
+		mvPp2PrsDsaTagSet(port, 1, UNTAGGED, EDSA);
+		/* remove port from DSA entries */
+		mvPp2PrsDsaTagSet(port, 0, TAGGED, DSA);
+		mvPp2PrsDsaTagSet(port, 0, UNTAGGED, DSA);
+
+		break;
+
+	case MV_TAG_TYPE_DSA:
+		/* Add port to DSA entries */
+		mvPp2PrsDsaTagSet(port, 1, TAGGED, DSA);
+		mvPp2PrsDsaTagSet(port, 1, UNTAGGED, DSA);
+
+		/* remove port from EDSA entries */
+		mvPp2PrsDsaTagSet(port, 0, TAGGED, EDSA);
+		mvPp2PrsDsaTagSet(port, 0, UNTAGGED, EDSA);
+
+		break;
+
+	case MV_TAG_TYPE_MH:
+	case MV_TAG_TYPE_NONE:
+
+		/* remove port form EDSA and DSA entries */
+		mvPp2PrsDsaTagSet(port, 0, TAGGED, DSA);
+		mvPp2PrsDsaTagSet(port, 0, UNTAGGED, DSA);
+		mvPp2PrsDsaTagSet(port, 0, TAGGED, EDSA);
+		mvPp2PrsDsaTagSet(port, 0, UNTAGGED, EDSA);
+
+		break;
+
+	default:
+		POS_RANGE_VALIDATE(type, MV_TAG_TYPE_EDSA);
+	}
+
+	return MV_OK;
+
+}
+
+
+/******************************************************************************
+ *
+ * VLAN Section
+ *
+ ******************************************************************************
+ */
+
+char *mvPrsVlanInfoStr(unsigned int vlan_info)
+{
+	switch (vlan_info << RI_VLAN_OFFS) {
+	case RI_VLAN_NONE:
+		return "None";
+	case RI_VLAN_SINGLE:
+		return "Single";
+	case RI_VLAN_DOUBLE:
+		return "Double";
+	case RI_VLAN_TRIPLE:
+		return "Triple";
+	default:
+		return "Unknown";
+	}
+	return NULL;
+}
+
+static MV_PP2_PRS_ENTRY *mvPrsVlanFind(unsigned short tpid, int ai)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	unsigned int riBits, aiBits, enable;
+	unsigned char tpidArr[2];
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_VLAN);
+
+#ifdef MV_CPU_LE
+	tpidArr[0] = ((unsigned char *)&tpid)[1];
+	tpidArr[1] = ((unsigned char *)&tpid)[0];
+#else
+	tpidArr[0] = ((unsigned char *)&tpid)[0];
+	tpidArr[1] = ((unsigned char *)&tpid)[1];
+#endif
+	/* Go through the all entires with PRS_LU_MAC */
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_VLAN))
+			continue;
+
+		pe->index = tid;
+
+		mvPp2PrsHwRead(pe);
+		if (mvPp2PrsSwTcamBytesIgnorMaskCmp(pe, 0, 2, tpidArr) == EQUALS) {
+			mvPp2PrsSwSramRiGet(pe, &riBits, &enable);
+
+			/* get Vlan type */
+			riBits = (riBits & RI_VLAN_MASK);
+
+			/* get current AI value Tcam */
+			mvPp2PrsSwTcamAiGet(pe, &aiBits, &enable);
+
+			/* clear double Vlan Bit */
+			aiBits &= ~(1 << DBL_VLAN_AI_BIT);
+
+			if (ai != aiBits)
+				continue;
+
+			if ((riBits == RI_VLAN_SINGLE) || (riBits == RI_VLAN_TRIPLE))
+				return pe;
+		}
+	}
+	mvPp2PrsSwFree(pe);
+	return NULL;
+}
+
+int mvPrsVlanExist(unsigned short tpid, int ai)
+{
+	if (NULL == mvPrsVlanFind(tpid, ai))
+		return 0;
+	else
+		return 1;
+}
+
+static MV_PP2_PRS_ENTRY *mvPrsDoubleVlanFind(unsigned short tpid1, unsigned short tpid2)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	unsigned int bits, enable;
+	unsigned char tpidArr1[2];
+	unsigned char tpidArr2[2];
+
+#ifdef MV_CPU_LE
+	tpidArr1[0] = ((unsigned char *)&tpid1)[1];
+	tpidArr1[1] = ((unsigned char *)&tpid1)[0];
+
+	tpidArr2[0] = ((unsigned char *)&tpid2)[1];
+	tpidArr2[1] = ((unsigned char *)&tpid2)[0];
+#else /* MV_CPU_LE */
+	tpidArr1[0] = ((unsigned char *)&tpid1)[0];
+	tpidArr1[1] = ((unsigned char *)&tpid1)[1];
+
+	tpidArr2[0] = ((unsigned char *)&tpid2)[0];
+	tpidArr2[1] = ((unsigned char *)&tpid2)[1];
+#endif
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_VLAN);
+
+	/* Go through the all entires with PRS_LU_MAC */
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_VLAN))
+			continue;
+
+		pe->index = tid;
+		mvPp2PrsHwRead(pe);
+		if ((mvPp2PrsSwTcamBytesIgnorMaskCmp(pe, 0, 2, tpidArr1) == EQUALS) &&
+		    (mvPp2PrsSwTcamBytesIgnorMaskCmp(pe, 4, 2, tpidArr2) == EQUALS)) {
+
+			mvPp2PrsSwSramRiGet(pe, &bits, &enable);
+
+			if ((bits & RI_VLAN_MASK) == RI_VLAN_DOUBLE)
+				return pe;
+		}
+	}
+	mvPp2PrsSwFree(pe);
+	return NULL;
+}
+
+int mvPrsDoubleVlanExist(unsigned short tpid1, unsigned short tpid2)
+{
+	if (NULL == mvPrsDoubleVlanFind(tpid1, tpid2))
+		return 0;
+	else
+		return 1;
+}
+
+/* return last double vlan entry */
+static int mvPpPrsDoubleVlanLast(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int bits, enable;
+	int tid;
+
+	for (tid = PE_LAST_FREE_TID; tid >= PE_FIRST_FREE_TID; tid--) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_VLAN))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwSramRiGet(&pe, &bits, &enable);
+
+		if ((bits & RI_VLAN_MASK) == RI_VLAN_DOUBLE)
+			return tid;
+	}
+	return tid;
+}
+
+/* return first Single or Triple vlan entry */
+static int mvPpPrsVlanFirst(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int bits, enable;
+	int tid;
+
+	for (tid = PE_FIRST_FREE_TID; tid <= PE_LAST_FREE_TID; tid++) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_VLAN))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+		mvPp2PrsSwSramRiGet(&pe, &bits, &enable);
+
+		bits &= RI_VLAN_MASK;
+
+		if ((bits == RI_VLAN_SINGLE) || (bits == RI_VLAN_TRIPLE))
+			return tid;
+	}
+	return tid;
+}
+
+static int mvPp2PrsVlanAdd(unsigned short tpid, int ai, unsigned int portBmp)
+{
+	int lastDouble, tid, status = 0;
+	MV_PP2_PRS_ENTRY *pe = NULL;
+	char name[PRS_TEXT_SIZE];
+
+	pe = mvPrsVlanFind(tpid, ai);
+
+	if (pe == NULL) {
+
+		/* Create new TCAM entry */
+		tid = mvPp2PrsTcamFirstFree(PE_LAST_FREE_TID, PE_FIRST_FREE_TID);
+
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* get last double vlan tid */
+		lastDouble = mvPpPrsDoubleVlanLast();
+
+		if (tid <= lastDouble) {
+			/* double vlan entries overlapping*/
+			mvOsPrintf("%s:Can't add entry, please remove unnecessary triple or single vlans entries.\n",
+				   __func__);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_VLAN);
+		pe->index = tid;
+
+		mvPp2PrsMatchEtype(pe, 0, tpid);
+
+		/* Clear all AI bits for next iteration */
+		status |= mvPp2PrsSwSramAiUpdate(pe, 0, SRAM_AI_MASK);
+
+		/* Continue - set next lookup */
+		status |= mvPp2PrsSwSramNextLuSet(pe, PRS_LU_L2);
+
+		/* shift 4 bytes - skip 1 VLAN tags */
+		status |= mvPp2PrsSwSramShiftSet(pe, MV_VLAN_HLEN, SRAM_OP_SEL_SHIFT_ADD);
+
+		if (ai == SINGLE_VLAN_AI) {
+			/* single vlan*/
+			mvOsSPrintf(name, "single-VLAN");
+			mvPp2PrsSwSramRiUpdate(pe, RI_VLAN_SINGLE, RI_VLAN_MASK);
+		} else {
+			/* triple vlan*/
+			mvOsSPrintf(name, "triple-VLAN-%d", ai);
+			ai |= (1 << DBL_VLAN_AI_BIT);
+			mvPp2PrsSwSramRiUpdate(pe, RI_VLAN_TRIPLE, RI_VLAN_MASK);
+		}
+
+		status |= mvPp2PrsSwTcamAiUpdate(pe, ai, SRAM_AI_MASK);
+
+		mvPp2PrsShadowSet(pe->index, PRS_LU_VLAN, name);
+	}
+
+	status |= mvPp2PrsSwTcamPortMapSet(pe, portBmp);
+
+	if (status == 0)
+		mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPp2PrsVlanDel(unsigned short tpid, int ai)
+{
+	MV_PP2_PRS_ENTRY *pe = NULL;
+
+	pe = mvPrsVlanFind(tpid, ai);
+
+	if (pe == NULL) {
+		/* No such entry */
+		mvOsPrintf("Can't remove - No such entry\n");
+		return MV_ERROR;
+	}
+
+	/* remove entry */
+	mvPp2PrsHwInv(pe->index);
+	mvPp2PrsShadowClear(pe->index);
+	mvPp2PrsSwFree(pe);
+	return MV_OK;
+}
+
+int mvPp2PrsDoubleVlanAdd(unsigned short tpid1, unsigned short tpid2, unsigned int portBmp)
+{
+	int tid, ai, status = 0;
+	int firstVlan;
+	MV_PP2_PRS_ENTRY *pe = NULL;
+	char name[PRS_TEXT_SIZE];
+
+	pe = mvPrsDoubleVlanFind(tpid1, tpid2);
+
+	if (pe == NULL) {
+
+		/* Create new TCAM entry */
+
+		tid = mvPp2PrsTcamFirstFree(PE_FIRST_FREE_TID, PE_LAST_FREE_TID);
+
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_VLAN);
+		pe->index = tid;
+
+		/* set AI value for new double vlan entry */
+		ai = mvPrsDblVlanAiFreeGet();
+
+		if (ai == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: Can't add - number of Double vlan rules reached to maximum.\n", __func__);
+			return MV_ERROR;
+		}
+
+		/* get first single/triple vlan tid */
+		firstVlan = mvPpPrsVlanFirst();
+
+		if (tid >= firstVlan) {
+			/* double vlan entries overlapping*/
+			mvOsPrintf("%s:Can't add entry, please remove unnecessary double vlans entries.\n", __func__);
+			return MV_ERROR;
+		}
+
+		mvPrsDblVlanAiShadowSet(ai);
+
+		mvPp2PrsMatchEtype(pe, 0, tpid1);
+		mvPp2PrsMatchEtype(pe, 4, tpid2);
+
+		/* Set AI value in SRAM for double vlan */
+		status |= mvPp2PrsSwSramAiUpdate(pe, (ai | (1 << DBL_VLAN_AI_BIT)), SRAM_AI_MASK);
+
+		/* Continue - set next lookup, */
+		status |= mvPp2PrsSwSramNextLuSet(pe, PRS_LU_VLAN);
+
+		/* Set result info bits */
+		status |= mvPp2PrsSwSramRiUpdate(pe, RI_VLAN_DOUBLE, RI_VLAN_MASK);
+
+		/* shift 8 bytes - skip 2 VLAN tags */
+		status |= mvPp2PrsSwSramShiftSet(pe, 2 * MV_VLAN_HLEN, SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Update mvPrsShadowTbl */
+		mvOsSPrintf(name, "double-VLAN-%d", ai);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_VLAN, name);
+
+	}
+	/* set ports bitmap*/
+	status |= mvPp2PrsSwTcamPortMapSet(pe, portBmp);
+
+	if (status == 0)
+		mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return status;
+}
+
+int mvPp2PrsDoubleVlanDel(unsigned short tpid1, unsigned short tpid2)
+{
+	MV_PP2_PRS_ENTRY *pe = NULL;
+	unsigned int ai, enable;
+
+
+	pe = mvPrsDoubleVlanFind(tpid1, tpid2);
+
+	if (pe == NULL) {
+		/* No such entry */
+		mvOsPrintf("Can't remove - No such entry\n");
+		return MV_ERROR;
+	}
+
+	/* TODO - remove all corresponding vlan triples  */
+
+	mvPp2PrsSwSramAiGet(pe, &ai, &enable);
+	/* remove double vlan AI sign*/
+	ai &= ~(1 << DBL_VLAN_AI_BIT);
+	mvPrsDblVlanAiShadowClear(ai);
+
+	/* remove entry */
+	mvPp2PrsHwInv(pe->index);
+	mvPp2PrsShadowClear(pe->index);
+	mvPp2PrsSwFree(pe);
+	return MV_OK;
+
+}
+
+
+int mvPp2PrsSingleVlan(unsigned short tpid, unsigned int portBmp, int add)
+{
+	if (add)
+		return mvPp2PrsVlanAdd(tpid, SINGLE_VLAN_AI, portBmp);
+	else
+		return mvPp2PrsVlanDel(tpid, SINGLE_VLAN_AI);
+}
+
+
+int mvPp2PrsDoubleVlan(unsigned short tpid1, unsigned short tpid2, unsigned int portBmp, int add)
+{
+	if (add)
+		return mvPp2PrsDoubleVlanAdd(tpid1, tpid2, portBmp);
+	else
+		return mvPp2PrsDoubleVlanDel(tpid1, tpid2);
+}
+
+int mvPp2PrsTripleVlan(unsigned short tpid1, unsigned short tpid2, unsigned short tpid3, unsigned int portBmp, int add)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	unsigned int ai, aiEnable;
+	int status;
+
+	pe = mvPrsDoubleVlanFind(tpid1, tpid2);
+
+	if (!pe) {
+		if (add)
+			mvOsPrintf("User must enter first double vlan <0x%x,0x%x> before triple\n", tpid1, tpid2);
+		else
+			mvOsPrintf("Can't remove - No such entry\n");
+
+		return MV_ERROR;
+	}
+
+	/* get AI value form double VLAN entry */
+	mvPp2PrsSwSramAiGet(pe, &ai, &aiEnable);
+
+	ai &= ~(1 << DBL_VLAN_AI_BIT);
+
+	if (add)
+		status = mvPp2PrsVlanAdd(tpid3, ai, portBmp);
+	else
+		status = mvPp2PrsVlanDel(tpid3, ai);
+
+	mvPp2PrsSwFree(pe);
+
+	return status;
+}
+
+/* Detect up to 2 successive VLAN tags:
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvPp2PrsVlanInit(void)
+{
+	MV_PP2_PRS_ENTRY pe;
+
+	mvPrsDblVlanAiShadowClearAll();
+
+	/* double VLAN: 0x8100, 0x88A8 */
+	if (mvPp2PrsDoubleVlan(MV_VLAN_TYPE, MV_VLAN_1_TYPE, PORT_MASK, 1))
+		return MV_ERROR;
+
+	/* double VLAN: 0x8100, 0x8100 */
+	if (mvPp2PrsDoubleVlan(MV_VLAN_TYPE, MV_VLAN_TYPE, PORT_MASK, 1))
+		return MV_ERROR;
+
+	/* single VLAN: 0x88a8 */
+	if (mvPp2PrsSingleVlan(MV_VLAN_1_TYPE, PORT_MASK, 1))
+		return MV_ERROR;
+
+	/* single VLAN: 0x8100 */
+	if (mvPp2PrsSingleVlan(MV_VLAN_TYPE, PORT_MASK, 1))
+		return MV_ERROR;
+
+	/*---------------------------------*/
+	/*  Set default double vlan entry  */
+	/*---------------------------------*/
+	mvPp2PrsSwClear(&pe);
+	mvPp2PrsSwTcamLuSet(&pe, PRS_LU_VLAN);
+	pe.index = PE_VLAN_DBL;
+	/* Continue - set next lookup */
+	mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_L2);
+
+	/* double vlan AI bit */
+	mvPp2PrsSwTcamAiUpdate(&pe, (1 << DBL_VLAN_AI_BIT), (1 << DBL_VLAN_AI_BIT));
+
+	/* clear AI for next iterations */
+	mvPp2PrsSwSramAiUpdate(&pe, 0, SRAM_AI_MASK);
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_DOUBLE, RI_VLAN_MASK);
+
+	mvPp2PrsSwTcamPortMapSet(&pe, PORT_MASK);
+	mvPp2PrsHwWrite(&pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe.index, PRS_LU_VLAN, "double-VLAN-accept");
+
+	/*---------------------------------*/
+	/*   Set default vlan none entry   */
+	/*---------------------------------*/
+
+	mvPp2PrsSwClear(&pe);
+	mvPp2PrsSwTcamLuSet(&pe, PRS_LU_VLAN);
+	pe.index = PE_VLAN_NONE;
+	/* Continue - set next lookup */
+	mvPp2PrsSwSramNextLuSet(&pe, PRS_LU_L2);
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(&pe, RI_VLAN_NONE, RI_VLAN_MASK);
+
+	mvPp2PrsSwTcamPortMapSet(&pe, PORT_MASK);
+	mvPp2PrsHwWrite(&pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe.index, PRS_LU_VLAN, "no-VLAN");
+
+	return MV_OK;
+}
+
+/* remove all vlan entries */
+int mvPp2PrsVlanAllDel(void)
+{
+	int tid;
+
+	/* clear doublr Vlan shadow */
+	mvPrsDblVlanAiShadowClearAll();
+
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+		if (mvPp2PrsShadowIsValid(tid) && (mvPp2PrsShadowLu(tid) == PRS_LU_VLAN)) {
+			mvPp2PrsHwInv(tid);
+			mvPp2PrsShadowClear(tid);
+		}
+	}
+
+	return MV_OK;
+}
+/******************************************************************************
+ *
+ * Ethertype Section
+ *
+ ******************************************************************************
+ */
+/*TODO USE this function for all def etypres creation */
+static int mvPrsEthTypeCreate(int portMap, unsigned short eth_type, unsigned int ri, unsigned int riMask)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(PE_FIRST_FREE_TID, PE_LAST_FREE_TID);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, eth_type);
+
+	mvPp2PrsSwSramRiSet(pe, ri, riMask);
+	mvPp2PrsSwTcamPortMapSet(pe, portMap);
+	/* Continue - set next lookup */
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	mvPp2PrsSwSramFlowidGenSet(pe);
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-user-define");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_USER);
+	mvPp2PrsShadowRiSet(pe->index, ri, riMask);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPrsEthTypeValid(unsigned int portMap, unsigned short ethertype)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int entryPmap;
+	int tid;
+
+	for (tid = PE_LAST_FREE_TID ; tid >= PE_FIRST_FREE_TID; tid--) {
+		if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_L2))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		if (!mvPp2PrsEtypeEquals(&pe, 0, ethertype))
+			continue;
+
+		/* in default entries portmask must be 0xff */
+		if ((mvPp2PrsShadowUdf(tid) == PRS_UDF_L2_DEF) && (portMap != PORT_MASK)) {
+			mvOsPrintf("%s: operation not supported.\n", __func__);
+			mvOsPrintf("%s: ports map must be 0xFF for default ether type\n", __func__);
+			return MV_ERROR;
+
+		} else {
+
+			/* port maps cannot intersection in User entries*/
+			/* PRS_UDF_L2_USER */
+			mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+			if ((portMap & entryPmap) && (portMap != entryPmap)) {
+				mvOsPrintf("%s: operation not supported\n", __func__);
+				mvOsPrintf("%s: user must delete portMap 0x%x from entry %d.\n",
+					__func__, entryPmap & portMap, tid);
+				return MV_ERROR;
+			}
+		}
+	}
+	return MV_OK;
+}
+
+int mvPrsEthTypeSet(int portMap, unsigned short ethertype, unsigned int ri, unsigned int riMask, MV_BOOL finish)
+{
+	MV_PP2_PRS_ENTRY pe;
+	int tid;
+	unsigned int entryPmap;
+	MV_BOOL done = MV_FALSE;
+
+	/* step 1 - validation */
+	if (mvPrsEthTypeValid(portMap, ethertype))
+		return MV_ERROR;
+
+
+	/* step 2 - update TCAM */
+	for (tid = PE_FIRST_FREE_TID ; tid <= PE_LAST_FREE_TID; tid++) {
+		if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_L2))
+			continue;
+
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		if (!mvPp2PrsEtypeEquals(&pe, 0, ethertype))
+			continue;
+
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+
+		if (entryPmap != portMap)
+			continue;
+
+		done = MV_TRUE;
+		mvPp2PrsSwSramRiUpdate(&pe, ri, riMask);
+
+		if ((mvPp2PrsShadowUdf(tid) == PRS_UDF_L2_USER) || finish)
+			mvPp2PrsSwSramFlowidGenSet(&pe);
+
+		mvPp2PrsHwWrite(&pe);
+	}
+	/* step 3 - Add new ethertype entry */
+	if (!done)
+		return mvPrsEthTypeCreate(portMap, ethertype, ri, riMask);
+
+	return MV_OK;
+}
+
+int mvPrsEthTypeDel(int portMap, unsigned short ethertype)
+{
+	MV_PP2_PRS_ENTRY pe;
+	unsigned int entryPmap;
+	int tid;
+
+	for (tid = PE_FIRST_FREE_TID; tid <= PE_LAST_FREE_TID; tid++) {
+
+		if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_L2))
+			continue;
+
+		/* EtherType entry */
+		pe.index = tid;
+		mvPp2PrsHwRead(&pe);
+
+		if (!mvPp2PrsEtypeEquals(&pe, 0, ethertype))
+			continue;
+
+		if (mvPp2PrsShadowUdf(tid) == PRS_UDF_L2_DEF) {
+			if (portMap != PORT_MASK) {
+				mvOsPrintf("%s: ports map must be 0xFF for default ether type\n", __func__);
+				return MV_ERROR;
+			}
+
+			mvPp2PrsSwSramRiSet(&pe, mvPp2PrsShadowRi(tid), mvPp2PrsShadowRiMask(tid));
+
+			if (!mvPp2PrsShadowFin(tid))
+				mvPp2PrsSwSramFlowidGenClear(&pe);
+
+			mvPp2PrsHwWrite(&pe);
+
+			continue;
+		}
+
+		/*PRS_UDF_L2_USER */
+
+		mvPp2PrsSwTcamPortMapGet(&pe, &entryPmap);
+		/*mask ports in user entry */
+		entryPmap &= ~portMap;
+
+		if (entryPmap == 0) {
+			mvPp2PrsHwInv(tid);
+			mvPp2PrsShadowClear(tid);
+			continue;
+		}
+
+		mvPp2PrsSwTcamPortMapSet(&pe, entryPmap);
+		mvPp2PrsHwWrite(&pe);
+	}
+	return MV_OK;
+}
+
+static int mvPp2PrsEtypePppoe(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_PPPOE_TYPE);
+	mvPp2PrsSwSramShiftSet(pe, MV_PPPOE_HDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_PPPOE);
+	mvPp2PrsSwSramRiSetBit(pe, RI_PPPOE_BIT);
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-PPPoE");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowRiSet(pe->index, RI_PPPOE_MASK, RI_PPPOE_MASK);
+	mvPp2PrsShadowFinSet(pe->index, MV_FALSE);
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+
+/* match ip4 and ihl == 5 */
+static int mvPp2PrsEtypeIp4(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* IPv4 without options */
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_TYPE);
+	mvPp2PrsSwTcamByteSet(pe, MV_ETH_TYPE_LEN + 0, 0x45, 0xff);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP4, RI_L3_PROTO_MASK);
+
+	/* Skip eth_type + 4 bytes of IP header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 4, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-ipv4");
+	mvPp2PrsShadowFinSet(pe->index, MV_FALSE);
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowRiSet(pe->index, RI_L3_IP4, RI_L3_PROTO_MASK);
+	mvPp2PrsSwFree(pe);
+
+	/* IPv4 with options */
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_TYPE);
+	mvPp2PrsSwTcamByteSet(pe, MV_ETH_TYPE_LEN + 0, 0x40, 0xf0);
+
+	/* Skip eth_type + 4 bytes of IP header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 4, SRAM_OP_SEL_SHIFT_ADD);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP4_OPT, RI_L3_PROTO_MASK);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-ipv4-opt");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowFinSet(pe->index, MV_FALSE);
+	mvPp2PrsShadowRiSet(pe->index, RI_L3_IP4_OPT, RI_L3_PROTO_MASK);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPp2PrsEtypeArp(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_ARP_TYPE);
+
+	/* generate flow in the next iteration*/
+	/*mvPp2PrsSwSramAiSetBit(pe, AI_DONE_BIT);*/
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_ARP, RI_L3_PROTO_MASK);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-arp");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowFinSet(pe->index, MV_TRUE);
+	mvPp2PrsShadowRiSet(pe->index, RI_L3_ARP, RI_L3_PROTO_MASK);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPp2PrsEtypeLbdt(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_LBDT_TYPE);
+
+	/* generate flow in the next iteration*/
+	/*mvPp2PrsSwSramAiSetBit(pe, AI_DONE_BIT);*/
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramRiUpdate(pe, RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-lbdt");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowFinSet(pe->index, MV_TRUE);
+	mvPp2PrsShadowRiSet(pe->index, RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* match ip6 */
+static int mvPp2PrsEtypeIp6(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP6_TYPE);
+
+	/* Skip DIP of IPV6 header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 8 + MV_MAX_L3_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+/*
+	there is no support in extension yet
+ */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP6, RI_L3_PROTO_MASK);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-ipv6");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowFinSet(pe->index, MV_FALSE);
+	mvPp2PrsShadowRiSet(pe->index, RI_L3_IP6, RI_L3_PROTO_MASK);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+
+/* unknown ethertype */
+static int mvPp2PrsEtypeUn(void)
+{
+	MV_PP2_PRS_ENTRY *pe;
+
+	/* Default entry for PRS_LU_L2 - Unknown ethtype */
+	pe = mvPp2PrsSwAlloc(PRS_LU_L2);
+	pe->index = PE_ETH_TYPE_UN;
+
+	/* generate flow in the next iteration*/
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_UN, RI_L3_PROTO_MASK);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* set L3 offset even it's unknown L3 */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_L2, "etype-unknown");
+	mvPp2PrsShadowUdfSet(pe->index, PRS_UDF_L2_DEF);
+	mvPp2PrsShadowFinSet(pe->index, MV_TRUE);
+	mvPp2PrsShadowRiSet(pe->index, RI_L3_UN, RI_L3_PROTO_MASK);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/*
+ * pnc_etype_init - match basic ethertypes
+ */
+static int mvPp2PrsEtypeInit(void)
+{
+	int rc;
+
+	PRS_DBG("%s\n", __func__);
+
+	rc = mvPp2PrsEtypePppoe();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeArp();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeLbdt();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeIp4();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeIp6();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeUn();
+	if (rc)
+		return rc;
+
+	return MV_OK;
+}
+
+/******************************************************************************
+ *
+ * PPPoE Section
+ *
+ ******************************************************************************
+ */
+
+static int mvPp2PrsIpv6Pppoe(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_PPPOE);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP6_PPP);
+
+	/* there is no support in extension yet */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP6, RI_L3_PROTO_MASK);
+
+	/* Skip DIP of IPV6 header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 8 + MV_MAX_L3_ADDR_SIZE, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_PPPOE, "Ipv6-over-PPPoE");
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPp2PrsIpv4Pppoe(void)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/** ipV4 over PPPoE without options **/
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_PPPOE);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_PPP);
+	mvPp2PrsSwTcamByteSet(pe, MV_ETH_TYPE_LEN + 0, 0x45, 0xff);
+
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP4, RI_L3_PROTO_MASK);
+
+	/* Skip eth_type + 4 bytes of IP header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 4, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_PPPOE, "Ipv4-over-PPPoE");
+
+	mvPp2PrsSwFree(pe);
+
+
+	/** ipV4 over PPPoE with options **/
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_PPPOE);
+
+	pe->index = tid;
+
+	mvPp2PrsMatchEtype(pe, 0, MV_IP_PPP);
+
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP4_OPT, RI_L3_PROTO_MASK);
+
+	/* Skip eth_type + 4 bytes of IP header */
+	mvPp2PrsSwSramShiftSet(pe, MV_ETH_TYPE_LEN + 4, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* set L3 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_PPPOE, "Ipv4-over-PPPoE-opt");
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* Create entry for non-ip over PPPoE (for PPP LCP, IPCPv4, IPCPv6, etc) */
+static int mvPp2PrsNonipPppoe(void)
+{
+	int ret;
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/** non-Ip over PPPoE **/
+
+	/* Go through the all entires from first to last */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Can't add - No free TCAM entries */
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_PPPOE);
+	PTR_VALIDATE(pe);
+
+	pe->index = tid;
+
+	ret = mvPp2PrsSwSramRiUpdate(pe, RI_L3_UN, RI_L3_PROTO_MASK);
+	RET_VALIDATE(ret);
+
+	ret = mvPp2PrsSwSramFlowidGenSet(pe);
+	RET_VALIDATE(ret);
+
+	/* set L3 offset even it's unknown L3 */
+	ret = mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L3, MV_ETH_TYPE_LEN, SRAM_OP_SEL_OFFSET_ADD);
+	RET_VALIDATE(ret);
+
+	ret = mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	RET_VALIDATE(ret);
+
+	ret = mvPp2PrsHwWrite(pe);
+	RET_VALIDATE(ret);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_PPPOE, "NonIP-over-PPPoE");
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* match etype = PPPOE */
+static int mvPp2PrsPppeInit(void)
+{
+	int rc;
+
+	rc = mvPp2PrsIpv4Pppoe();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIpv6Pppoe();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsNonipPppoe();
+	if (rc)
+		return rc;
+
+	return MV_OK;
+}
+
+
+
+/******************************************************************************
+ *
+ * IPv4 Section
+ *
+ ******************************************************************************
+ */
+
+/* IPv4/TCP header parsing for fragmentation and L4 offset.  */
+static int mvPp2PrsIp4Proto(unsigned short proto, unsigned int ri, unsigned int riMask)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* TCP, Not Fragmented */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP4);
+	pe->index = tid;
+
+	mvPp2PrsSwTcamByteSet(pe, 2, 0x00, 0x3f);
+	mvPp2PrsSwTcamByteSet(pe, 3, 0x00, 0xff);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	mvPp2PrsSwTcamByteSet(pe, 5, proto, 0xff);
+
+	if (proto == MV_IP_PROTO_TCP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-tcp");
+	} else if (proto == MV_IP_PROTO_UDP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-udp");
+	} else if (proto == MV_IP_PROTO_IGMP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-igmp");
+	} else {
+		mvOsPrintf("%s: IPv4 unsupported protocol %d\n", __func__, proto);
+		mvPp2PrsSwFree(pe);
+		return MV_ERROR;
+	}
+
+	/* set L4 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, sizeof(MV_IP_HEADER) - 4, SRAM_OP_SEL_OFFSET_ADD);
+	mvPp2PrsSwSramShiftSet(pe, 12, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Next: go to IPV4 */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	/* Set sram AIbits */
+	mvPp2PrsSwSramAiUpdate(pe, (1 << IPV4_DIP_AI_BIT), (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* AI bits check */
+	mvPp2PrsSwTcamAiUpdate(pe, 0, (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	/* TCP, Fragmented */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP4);
+	pe->index = tid;
+
+	mvPp2PrsSwTcamByteSet(pe, 5, proto, 0xff);
+	mvPp2PrsSwSramRiSetBit(pe, RI_IP_FRAG_BIT);
+
+	if (proto == MV_IP_PROTO_TCP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-tcp-frag");
+	} else if (proto == MV_IP_PROTO_UDP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-udp-frag");
+	} else if (proto == MV_IP_PROTO_IGMP) {
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-igmp-frag");
+	} else {
+		mvOsPrintf("%s: IPv4 unsupported protocol %d\n", __func__, proto);
+		mvPp2PrsSwFree(pe);
+		return MV_ERROR;
+	}
+
+	/* set L4 offset */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, sizeof(MV_IP_HEADER) - 4, SRAM_OP_SEL_OFFSET_ADD);
+	mvPp2PrsSwSramShiftSet(pe, 12, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Next: go to IPV4 */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	/* Set sram AIbits */
+	mvPp2PrsSwSramAiUpdate(pe, (1 << IPV4_DIP_AI_BIT), (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	/* AI bits check */
+	mvPp2PrsSwTcamAiUpdate(pe, 0, (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* IPv4 L3 Multicast or borad cast.  1-MC, 2-BC */
+static int mvPp2PrsIp4Cast(unsigned short l3_cast)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	if (l3_cast != L3_MULTI_CAST &&
+	    l3_cast != L3_BROAD_CAST) {
+		mvOsPrintf("%s: Invalid Input\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* Get free entry */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP4);
+	pe->index = tid;
+
+	if (l3_cast == L3_MULTI_CAST) {
+		mvPp2PrsSwTcamByteSet(pe, 0, 0xE0, 0xE0);
+		mvPp2PrsSwSramRiUpdate(pe, RI_L3_MCAST, RI_L3_ADDR_MASK);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-mc");
+	} else if (l3_cast == L3_BROAD_CAST) {
+		mvPp2PrsSwTcamByteSet(pe, 0, 0xFF, 0xFF);
+		mvPp2PrsSwTcamByteSet(pe, 1, 0xFF, 0xFF);
+		mvPp2PrsSwTcamByteSet(pe, 2, 0xFF, 0xFF);
+		mvPp2PrsSwTcamByteSet(pe, 3, 0xFF, 0xFF);
+		mvPp2PrsSwSramRiUpdate(pe, RI_L3_BCAST, RI_L3_ADDR_MASK);
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-bc");
+	}
+
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV4_DIP_AI_BIT), (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* Finished: go to flowid generation */
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+
+static int mvPp2PrsIp4Init(void)
+{
+	int rc;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* Set entries for TCP, UDP and IGMP over IPv4 */
+	rc = mvPp2PrsIp4Proto(MV_IP_PROTO_TCP, RI_L4_TCP, RI_L4_PROTO_MASK);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp4Proto(MV_IP_PROTO_UDP, RI_L4_UDP, RI_L4_PROTO_MASK);
+	if (rc)
+		return rc;
+
+	/* IPv4 Broadcast */
+	rc = mvPp2PrsIp4Cast(L3_BROAD_CAST);
+	if (rc)
+		return rc;
+
+	/* IPv4 Multicast */
+	rc = mvPp2PrsIp4Cast(L3_MULTI_CAST);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp4Proto(MV_IP_PROTO_IGMP, RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK);
+	if (rc)
+		return rc;
+
+	/* Default IPv4 entry for unknown protocols */
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP4);
+	pe->index = PE_IP4_PROTO_UN;
+
+	/* Next: go to IPV4 */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP4);
+	/* Set sram AIbits */
+	mvPp2PrsSwSramAiUpdate(pe, (1 << IPV4_DIP_AI_BIT), (1 << IPV4_DIP_AI_BIT));
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L4_OTHER, RI_L4_PROTO_MASK);
+
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, sizeof(MV_IP_HEADER) - 4, SRAM_OP_SEL_OFFSET_ADD);
+	mvPp2PrsSwSramShiftSet(pe, 12, SRAM_OP_SEL_SHIFT_ADD);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* AI bits check */
+	mvPp2PrsSwTcamAiUpdate(pe, 0, (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-l4-unknown");
+
+	mvPp2PrsSwFree(pe);
+
+	/* Default IPv4 entry for unit cast address */
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP4);
+	pe->index = PE_IP4_ADDR_UN;
+
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV4_DIP_AI_BIT), (1 << IPV4_DIP_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_UCAST, RI_L3_ADDR_MASK);
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv4-uc");
+
+	/* Finished: go to flowid generation */
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/******************************************************************************
+ *
+ * IPv6 Section
+ *
+ *******************************************************************************/
+/* TODO continue from here */
+/* IPv6 - detect TCP */
+
+static int mvPp2PrsIp6Proto(unsigned short proto, unsigned int ri, unsigned int riMask, MV_BOOL ip6_ext)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* TCP, Not Fragmented */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = tid;
+
+	/* Match Protocol */
+	mvPp2PrsSwTcamByteSet(pe, 0, proto, 0xff);
+
+	/* Set Rule Shadow */
+	switch (proto) {
+	/* TCP */
+	case MV_IP_PROTO_TCP:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-tcp" : "ipv6-ext-tcp");
+		break;
+
+	/* UDP */
+	case MV_IP_PROTO_UDP:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-udp" : "ipv6-ext-udp");
+		break;
+
+	/* ICMP */
+	case MV_IP_PROTO_ICMPV6:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-icmp" : "ipv6-ext-icmp");
+		break;
+
+	/* IPv4, for IPv6 DS Lite */
+	case MV_IP_PROTO_IPIP:
+		if (ip6_ext != MV_FALSE) {
+			mvOsPrintf("%s: IPv4 header not a IP6 extension header\n", __func__);
+			mvPp2PrsSwFree(pe);
+			return MV_ERROR;
+		}
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-lite-ip4");
+		break;
+
+	/* IPV6 Extension Header */
+
+	/* Hop-by-Hop Options Header, Dummy protocol for TCP */
+	case MV_IP_PROTO_NULL:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-hh" : "ipv6-ext-nh-hh");
+		break;
+
+	/* Encapsulated IPv6 Header, IPv6-in-IPv4 tunnelling */
+	case MV_IP_PROTO_IPV6:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-eh" : "ipv6-ext-nh-eh");
+		break;
+
+	/* Route header */
+	case MV_IP_PROTO_RH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-rh" : "ipv6-ext-nh-rh");
+		break;
+
+	/* Fragment Header */
+	case MV_IP_PROTO_FH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-fh" : "ipv6-ext-nh-fh");
+		break;
+#if 0
+	/* Encapsulation Security Payload protocol */
+	case MV_IP_PROTO_ESP:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-esp" : "ipv6-ext-nh-esp");
+		break;
+#endif
+	/* Authentication Header */
+	case MV_IP_PROTO_AH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-ah" : "ipv6-ext-nh-ah");
+		break;
+
+	/* Destination Options Header */
+	case MV_IP_PROTO_DH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-dh" : "ipv6-ext-nh-dh");
+		break;
+
+	/* Mobility Header */
+	case MV_IP_PROTO_MH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, (MV_FALSE == ip6_ext) ? "ipv6-nh-mh" : "ipv6-ext-nh-mh");
+		break;
+
+	default:
+		mvOsPrintf("%s: IPv6 unsupported protocol %d\n", __func__, proto);
+		mvPp2PrsSwFree(pe);
+		return MV_ERROR;
+	}
+
+	/* Set TCAM and SRAM for TCP, UDP and IGMP */
+	if (proto == MV_IP_PROTO_TCP ||
+	    proto == MV_IP_PROTO_UDP ||
+	    proto == MV_IP_PROTO_ICMPV6 ||
+	    proto == MV_IP_PROTO_IPIP) {
+		/* Set TCAM AI */
+		if (MV_FALSE == ip6_ext)
+			mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+		else
+			mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AI_BIT), (1 << IPV6_EXT_AI_BIT));
+
+		/* Set result info */
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+
+		/* set L4 offset relatively to our current place */
+		if (MV_FALSE == ip6_ext)
+			mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, sizeof(MV_IP6_HEADER) - 6, SRAM_OP_SEL_OFFSET_ADD);
+		else
+			mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, 0, SRAM_OP_SEL_OFFSET_IP6_ADD);
+
+		/* Finished: go to LU Generation */
+		mvPp2PrsSwSramFlowidGenSet(pe);
+		mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	} else { /* Set TCAM and SRAM for IPV6 Extension Header */
+		if (MV_FALSE == ip6_ext) { /* Case 1: xx is first NH of IPv6 */
+			/* Skip to NH */
+			mvPp2PrsSwSramShiftSet(pe, 34, SRAM_OP_SEL_SHIFT_ADD);
+
+			/* Set AI bit */
+			mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+			/* update UDF2 */
+			mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_IPV6_PROTO, 0, SRAM_OP_SEL_SHIFT_ADD);
+		} else { /* Case 2: xx is not first NH of IPv6 */
+			/* Skip to NH */
+			mvPp2PrsSwSramShiftSet(pe, 0, SRAM_OP_SEL_SHIFT_IP6_ADD);
+
+			/* Set AI bit */
+			mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AI_BIT), (1 << IPV6_EXT_AI_BIT));
+			/* update UDF2 */
+			mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_IPV6_PROTO, 0, SRAM_OP_SEL_SHIFT_IP6_ADD);
+		}
+
+		/* Next LU */
+		mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+
+		/* Set sram AIbits */
+		if (proto == MV_IP_PROTO_AH) {
+			mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_EXT_AH_AI_BIT), (1 << IPV6_EXT_AH_AI_BIT));
+			mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AI_BIT));
+			mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+		} else {
+			mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_AI_BIT));
+			mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_EXT_AI_BIT), (1 << IPV6_EXT_AI_BIT));
+			mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+		}
+
+		/* Set RI, IPv6 Ext */
+		mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP6_EXT, RI_L3_PROTO_MASK);
+	}
+
+	/* All ports */
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* Write HW */
+	mvPp2PrsHwWrite(pe);
+
+	/* Release SW entry */
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* Parse the extension header on AH */
+static int mvPp2PrsIp6ProtoAh(unsigned short proto, unsigned int ri, unsigned int riMask)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	/* TCP, Not Fragmented */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = tid;
+
+	/* Match Protocol */
+	mvPp2PrsSwTcamByteSet(pe, 0, proto, 0xff);
+
+	/* Set Rule Shadow */
+	switch (proto) {
+	/* TCP */
+	case MV_IP_PROTO_TCP:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-tcp");
+		break;
+
+	/* UDP */
+	case MV_IP_PROTO_UDP:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-udp");
+		break;
+
+	/* ICMP */
+	case MV_IP_PROTO_ICMPV6:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-icmp");
+		break;
+
+	/* IPV6 Extension Header */
+
+	/* Hop-by-Hop Options Header, Dummy protocol for TCP */
+	case MV_IP_PROTO_NULL:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-hh");
+		break;
+
+	/* Encapsulated IPv6 Header, IPv6-in-IPv4 tunnelling */
+	case MV_IP_PROTO_IPV6:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-eh");
+		break;
+
+	/* Route header */
+	case MV_IP_PROTO_RH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-rh");
+		break;
+
+	/* Fragment Header */
+	case MV_IP_PROTO_FH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-fh");
+		break;
+
+	/* Authentication Header */
+	case MV_IP_PROTO_AH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-ah");
+		break;
+
+	/* Destination Options Header */
+	case MV_IP_PROTO_DH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-dh");
+		break;
+
+	/* Mobility Header */
+	case MV_IP_PROTO_MH:
+		mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-ah-nh-mh");
+		break;
+
+	default:
+		mvOsPrintf("%s: IPv6 unsupported extension header %d\n", __func__, proto);
+		mvPp2PrsSwFree(pe);
+		return MV_ERROR;
+	}
+
+	/* Set TCAM and SRAM for TCP, UDP and IGMP */
+	if (proto == MV_IP_PROTO_TCP ||
+	    proto == MV_IP_PROTO_UDP ||
+	    proto == MV_IP_PROTO_ICMPV6) {
+		/* Set result info */
+		mvPp2PrsSwSramRiUpdate(pe, ri, riMask);
+		/* Set sram AIbits */
+		mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_EXT_AH_L4_AI_BIT), (1 << IPV6_EXT_AH_L4_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_LEN_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+	} else {
+		/* Set sram AIbits */
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_L4_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_EXT_AH_LEN_AI_BIT), (1 << IPV6_EXT_AH_LEN_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+	}
+
+	/* Set AI bit */
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AH_AI_BIT), (1 << IPV6_EXT_AH_AI_BIT));
+
+	/* Next LU */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+
+	/* Set RI, IPv6 Ext */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_IP6_EXT, RI_L3_PROTO_MASK);
+
+	/* All ports */
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* Write HW */
+	mvPp2PrsHwWrite(pe);
+
+	/* Release SW entry */
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* Parse AH length field */
+static int mvPp2PrsIp6AhLen(unsigned char ah_len, MV_BOOL l4_off_set)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+	char tmp_buf[15];
+
+	PRS_DBG("%s\n", __func__);
+
+	/* TCP, Not Fragmented */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = tid;
+
+	/* Match AH Len */
+	mvPp2PrsSwTcamByteSet(pe, 1, ah_len, 0xff);
+
+	/* Set Rule Shadow */
+	sprintf(tmp_buf, "ipv6-ah-len%d", ah_len);
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, tmp_buf);
+
+	/* Set AI bit */
+	if (l4_off_set) {
+		mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AH_L4_AI_BIT), (1 << IPV6_EXT_AH_L4_AI_BIT));
+		/* Set L4 offset */
+		mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4,
+					(IPV6_EXT_EXCLUDE_BYTES + ah_len * IPV6_EXT_AH_UNIT_BYTES),
+					SRAM_OP_SEL_OFFSET_LKP_ADD);
+		/* Finished: go to LU Generation */
+		mvPp2PrsSwSramFlowidGenSet(pe);
+		mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	} else {
+		mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AH_LEN_AI_BIT), (1 << IPV6_EXT_AH_LEN_AI_BIT));
+		/* Set sram AIbits */
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_L4_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_LEN_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_EXT_AH_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_EXT_AI_BIT), (1 << IPV6_EXT_AI_BIT));
+		mvPp2PrsSwSramAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+
+		/* Skip to NH */
+		mvPp2PrsSwSramShiftSet(pe, (IPV6_EXT_EXCLUDE_BYTES + ah_len * IPV6_EXT_AH_UNIT_BYTES), SRAM_OP_SEL_SHIFT_ADD);
+
+		/* Next LU */
+		mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+	}
+
+	/* All ports */
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* Write HW */
+	mvPp2PrsHwWrite(pe);
+
+	/* Release SW entry */
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* IPv6 L3 Multicast or borad cast.  1-MC */
+static int mvPp2PrsIp6Cast(unsigned short l3_cast)
+{
+	int tid;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	if (l3_cast != L3_MULTI_CAST) {
+		mvOsPrintf("%s: Invalid Input\n", __func__);
+		return MV_ERROR;
+	}
+
+	/* Get free entry */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		return MV_ERROR;
+	}
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = tid;
+
+	mvPp2PrsSwTcamByteSet(pe, 0, 0xFF, 0xFF);
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_MCAST, RI_L3_ADDR_MASK);
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-mc");
+
+	mvPp2PrsSwTcamAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* Set sram AIbits */
+	mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+
+	/* Shift back to IPV6 NH */
+	mvPp2PrsSwSramShiftSet(pe, -18, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Finished: go to flowid generation */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+static int mvPp2PrsIp6Init(void)
+{
+	int tid, rc;
+	MV_PP2_PRS_ENTRY *pe;
+
+	PRS_DBG("%s\n", __func__);
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+
+	/* Check hop limit */
+	tid = mvPp2PrsTcamFirstFree(0, MV_PP2_PRS_TCAM_SIZE - 1);
+	if (tid == MV_PRS_OUT_OF_RAGE) {
+		mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+		mvPp2PrsSwFree(pe);
+		return MV_ERROR;
+	}
+	pe->index = tid;
+
+	mvPp2PrsSwTcamByteSet(pe, 1, 0x00, 0xff);
+	mvPp2PrsSwSramRiUpdate(pe, (RI_L3_UN | RI_DROP_BIT), (RI_L3_PROTO_MASK | RI_DROP_MASK));
+
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+	/* Update TCAM AI */
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv6-hop-zero");
+
+	mvPp2PrsSwFree(pe);
+
+	/* Set entries for TCP and UDP over IPv6 */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_TCP,
+			      RI_L4_TCP,
+			      RI_L4_PROTO_MASK,
+			      MV_FALSE);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_UDP,
+			      RI_L4_UDP,
+			      RI_L4_PROTO_MASK,
+			      MV_FALSE);
+	if (rc)
+		return rc;
+
+	/* IPv6 Multicast */
+	rc = mvPp2PrsIp6Cast(L3_MULTI_CAST);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_ICMPV6,
+			      RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK,
+			      MV_FALSE);
+	if (rc)
+		return rc;
+
+	/* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+	/* Result Info: UDF7=1, DS lite */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_IPIP, RI_UDF7_IP6_LITE, RI_UDF7_MASK, MV_FALSE);
+	if (rc)
+		return rc;
+
+	/* Default IPv6 entry for unknown protocols */
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = PE_IP6_PROTO_UN;
+
+	/* generate flow in the next iteration*/
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L4_OTHER, RI_L4_PROTO_MASK);
+
+	/* set L4 offset relatively to our current place */
+	mvPp2PrsSwSramOffsetSet(pe, SRAM_OFFSET_TYPE_L4, sizeof(MV_IP6_HEADER) - 4, SRAM_OP_SEL_OFFSET_ADD);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* AI bits check */
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv6-l4-unknown");
+
+	mvPp2PrsSwFree(pe);
+
+	/* Default IPv6 entry for unknown Ext protocols */
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = PE_IP6_EXT_PROTO_UN;
+
+	/* Finished: go to LU Generation */
+	mvPp2PrsSwSramFlowidGenSet(pe);
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_FLOWS);
+
+	/* Set result info bits */
+	mvPp2PrsSwSramRiUpdate(pe, RI_L4_OTHER, RI_L4_PROTO_MASK);
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	/* AI bits check */
+	mvPp2PrsSwTcamAiUpdate(pe, (1 << IPV6_EXT_AI_BIT), (1 << IPV6_EXT_AI_BIT));
+
+	mvPp2PrsHwWrite(pe);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP4, "ipv6-ext-l4-unknown");
+
+	mvPp2PrsSwFree(pe);
+
+	/* Default IPv6 entry for unit cast address */
+	pe = mvPp2PrsSwAlloc(PRS_LU_IP6);
+	pe->index = PE_IP6_ADDR_UN;
+
+	mvPp2PrsSwTcamAiUpdate(pe, 0, (1 << IPV6_NO_EXT_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+
+	mvPp2PrsSwSramRiUpdate(pe, RI_L3_UCAST, RI_L3_ADDR_MASK);
+	mvPp2PrsShadowSet(pe->index, PRS_LU_IP6, "ipv6-uc");
+
+	/* Finished: go to IPv6 again */
+	mvPp2PrsSwSramNextLuSet(pe, PRS_LU_IP6);
+
+	/* Shift back to IPV6 NH */
+	mvPp2PrsSwSramShiftSet(pe, -18, SRAM_OP_SEL_SHIFT_ADD);
+
+	/* Set sram AIbits */
+	mvPp2PrsSwSramAiUpdate(pe, (1 << IPV6_NO_EXT_AI_BIT), (1 << IPV6_NO_EXT_AI_BIT));
+
+	mvPp2PrsSwTcamPortMapSet(pe, PORT_MASK);
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+/* Add IPv6 Next Header parse rule set */
+int mvPrsIp6NhSet(void)
+{
+	int rc;
+	unsigned char ah_len = 0;
+
+	/* Hop-by-Hop Options Header, Dummy protocol for TCP */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_NULL, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_NULL, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* Encapsulated IPv6 Header, IPv6-in-IPv4 tunnelling */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_IPV6, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_IPV6, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* Route header */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_RH, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_RH, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* Fragment Header */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_FH, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_FH, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* Authentication Header */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_AH, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_AH, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+	/* Check NH on AH header */
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_NULL, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_IPV6, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_RH, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_FH, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_DH, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_MH, 0, 0);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_TCP, RI_L4_TCP, RI_L4_PROTO_MASK);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_UDP, RI_L4_UDP, RI_L4_PROTO_MASK);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6ProtoAh(MV_IP_PROTO_ICMPV6, RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK);
+	if (rc)
+		return rc;
+	/* Check AH length */
+	for (ah_len = IP6_AH_LEN_16B; ah_len < IP6_AH_LEN_MAX; ah_len++) {
+		rc = mvPp2PrsIp6AhLen(ah_len, MV_FALSE);
+		if (rc)
+			return rc;
+		/* Set L4 offset */
+		rc = mvPp2PrsIp6AhLen(ah_len, MV_TRUE);
+		if (rc)
+			return rc;
+	}
+
+	/* Destination Options Header */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_DH, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_DH, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* Mobility Header */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_MH, 0, 0, MV_FALSE);
+	if (rc)
+		return rc;
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_MH, 0, 0, MV_TRUE);
+	if (rc)
+		return rc;
+
+	/* L4 parse */
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_TCP,
+			      RI_L4_TCP,
+			      RI_L4_PROTO_MASK,
+			      MV_TRUE);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_UDP,
+			      RI_L4_UDP,
+			      RI_L4_PROTO_MASK,
+			      MV_TRUE);
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp6Proto(MV_IP_PROTO_ICMPV6,
+			      RI_CPU_CODE_RX_SPEC | RI_UDF3_RX_SPECIAL, RI_CPU_CODE_MASK | RI_UDF3_MASK,
+			      MV_TRUE);
+	if (rc)
+		return rc;
+
+	return MV_OK;
+}
+
+/*
+ ******************************************************************************
+ *
+ * flows
+ *
+ ******************************************************************************
+ */
+
+static MV_PP2_PRS_ENTRY *mvPrsFlowFind(int flow)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	unsigned int bits, enable;
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_FLOWS);
+
+	/* Go through the all entires with PRS_LU_MAC */
+	for (tid = MV_PP2_PRS_TCAM_SIZE - 1; tid >= 0; tid--) {
+		if ((!mvPp2PrsShadowIsValid(tid)) || (mvPp2PrsShadowLu(tid) != PRS_LU_FLOWS))
+			continue;
+
+		pe->index = tid;
+		mvPp2PrsHwRead(pe);
+		mvPp2PrsSwSramAiGet(pe, &bits, &enable);
+
+		/* sram store classification lookup id in AI bits [5:0] */
+		if ((bits & FLOWID_MASK) == flow)
+			return pe;
+	}
+	mvPp2PrsSwFree(pe);
+	return NULL;
+}
+
+int mvPrsFlowIdGen(int tid, int flowId, unsigned int res, unsigned int resMask, int portBmp)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	char name[PRS_TEXT_SIZE];
+
+	PRS_DBG("%s\n", __func__);
+
+	POS_RANGE_VALIDATE(flowId, FLOWID_MASK);
+	POS_RANGE_VALIDATE(tid, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Default configuration entry - overrwite is forbidden */
+	if (mvPp2PrsShadowIsValid(tid) && (mvPp2PrsShadowLu(tid) != PRS_LU_FLOWS)) {
+		mvOsPrintf("%s: Error, Tcam entry is in use\n", __func__);
+		return MV_ERROR;
+	}
+
+	pe = mvPp2PrsSwAlloc(PRS_LU_FLOWS);
+	pe->index = tid;
+
+	mvPp2PrsSwSramAiUpdate(pe, flowId, FLOWID_MASK);
+	mvPp2PrsSwSramLuDoneSet(pe);
+
+	mvOsSPrintf(name, "flowId-%d", flowId);
+
+	/* Update mvPrsShadowTbl */
+	mvPp2PrsShadowSet(pe->index, PRS_LU_FLOWS, name);
+
+	mvPp2PrsSwTcamPortMapSet(pe, portBmp);
+
+	/*update result data and mask*/
+	mvPp2PrsSwTcamWordSet(pe, TCAM_DATA_OFFS, res, resMask);
+
+	mvPp2PrsHwWrite(pe);
+
+	mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+
+}
+
+int mvPrsFlowIdDel(int tid)
+{
+	PRS_DBG("%s\n", __func__);
+
+	POS_RANGE_VALIDATE(tid, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* Only handle valid flow type rule */
+	if (!mvPp2PrsShadowIsValid(tid) || (mvPp2PrsShadowLu(tid) != PRS_LU_FLOWS)) {
+		mvOsPrintf("%s: Error, Tcam entry is not use or not flow type\n", __func__);
+		return MV_ERROR;
+	}
+
+	mvPp2PrsHwInv(tid);
+	mvPp2PrsShadowClear(tid);
+
+	return MV_OK;
+
+}
+
+int mvPrsFlowIdFirstFreeGet(void)
+{
+	int fid;
+
+	for (fid = MV_PP2_PRS_FIRST_FLOW_ID; fid <= MV_PP2_PRS_LAST_FLOW_ID; fid++)
+		if (mvPrsFlowIdGet(fid) == MV_FALSE)
+			break;
+
+	if (fid <= MV_PP2_PRS_LAST_FLOW_ID)
+		return fid;
+
+	return MV_PP2_PRS_INVALID_FLOW_ID;
+}
+
+int mvPrsFlowIdLastFreeGet(void)
+{
+	int fid;
+
+	for (fid = MV_PP2_PRS_LAST_FLOW_ID; fid >= MV_PP2_PRS_FIRST_FLOW_ID; fid--)
+		if (mvPrsFlowIdGet(fid) == MV_FALSE)
+			break;
+
+	if (fid >=  MV_PP2_PRS_FIRST_FLOW_ID)
+		return fid;
+
+	return MV_PP2_PRS_INVALID_FLOW_ID;
+}
+
+int mvPrsFlowIdRelease(int flowId)
+{
+	POS_RANGE_VALIDATE(flowId, FLOWID_MASK);
+
+	mvPrsFlowIdClear(flowId);
+
+	return MV_OK;
+}
+
+int mvPrsDefFlow(int port)
+{
+
+	MV_PP2_PRS_ENTRY *pe;
+	int tid, mallocFlag = 0;
+	char name[PRS_TEXT_SIZE];
+
+	PRS_DBG("%s\n", __func__);
+
+	POS_RANGE_VALIDATE(port, MV_PP2_MAX_PORTS - 1);
+
+	pe = mvPrsFlowFind(FLOWID_DEF(port));
+
+	/* Such entry not exist */
+	if (!pe) {
+		/* Go through the all entires from last to fires */
+		tid = mvPp2PrsTcamFirstFree(MV_PP2_PRS_TCAM_SIZE - 1, 0);
+
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			return MV_ERROR;
+		}
+
+		mallocFlag = 1;
+		pe = mvPp2PrsSwAlloc(PRS_LU_FLOWS);
+		pe->index = tid;
+
+		/* set flowID*/
+		mvPp2PrsSwSramAiUpdate(pe, FLOWID_DEF(port), FLOWID_MASK);
+		mvPrsFlowIdSet(FLOWID_DEF(port));
+		mvPp2PrsSwSramLuDoneSet(pe);
+
+		mvOsSPrintf(name, "def-flowId-port-%d", port);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe->index, PRS_LU_FLOWS, name);
+
+	}
+
+	mvPp2PrsSwTcamPortMapSet(pe, (1 << port));
+
+	mvPp2PrsHwWrite(pe);
+
+	if (mallocFlag)
+		mvPp2PrsSwFree(pe);
+
+	return MV_OK;
+}
+
+int mvPrsDefFlowInit(void)
+{
+	MV_PP2_PRS_ENTRY *pe;
+	int tid;
+	int port;
+	char name[PRS_TEXT_SIZE];
+
+	PRS_DBG("%s\n", __func__);
+
+	for (port = 0; port < MV_PP2_MAX_PORTS; port++) {
+		/* Go through the all entires from last to fires */
+		tid = PE_FIRST_DEFAULT_FLOW - port;
+
+		if (tid == MV_PRS_OUT_OF_RAGE) {
+			mvOsPrintf("%s: No free TCAM entiry\n", __func__);
+			return MV_ERROR;
+		}
+
+		pe = mvPp2PrsSwAlloc(PRS_LU_FLOWS);
+		pe->index = tid;
+
+		mvPp2PrsSwTcamPortMapSet(pe, 0);
+
+		/* set flowID*/
+		mvPp2PrsSwSramAiUpdate(pe, FLOWID_DEF(port), FLOWID_MASK);
+		mvPrsFlowIdSet(FLOWID_DEF(port));
+		mvPp2PrsSwSramLuDoneSet(pe);
+
+		mvOsSPrintf(name, "def-flowId-port-%d", port);
+
+		/* Update mvPrsShadowTbl */
+		mvPp2PrsShadowSet(pe->index, PRS_LU_FLOWS, name);
+
+		mvPp2PrsHwWrite(pe);
+		mvPp2PrsSwFree(pe);
+
+	}
+	return MV_OK;
+}
+
+/******************************************************************************
+ *
+ * Paeser Init
+ *
+ ******************************************************************************
+ */
+
+int mvPrsDefaultInit(void)
+{
+	int port, rc;
+
+	/*enable tcam table*/
+	mvPp2PrsSwTcam(1);
+
+	/*write zero to all the lines*/
+	mvPp2PrsHwClearAll();
+
+	mvPp2PrsHwInvAll();
+	mvPp2PrsShadowClearAll();
+	mvPrsFlowIdClearAll();
+
+	/* TODO: Mask & clear all interrupts */
+
+	/* Always start from lookup = 0 */
+	for (port = 0; port < MV_PP2_MAX_PORTS; port++)
+		mvPp2PrsHwPortInit(port, PRS_LU_MH, MV_PP2_PRS_PORT_LU_MAX, 0);
+
+	rc = mvPrsDefFlowInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsMhInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsMacInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsDsaInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsEtypeInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsVlanInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsPppeInit();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp4Init();
+	if (rc)
+		return rc;
+
+	rc = mvPp2PrsIp6Init();
+	if (rc)
+		return rc;
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.h b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.h
new file mode 100644
index 000000000000..2a87b16f4746
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2Prs.h
@@ -0,0 +1,248 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PRS_H__
+#define __MV_PRS_H__
+
+/*
+#define PP2_PRS_DEBUG
+*/
+/*
+ * TCAM topology definition.
+ * The TCAM is divided into sections per protocol encapsulation.
+ * Usually each section is designed to be to a lookup.
+ * Change sizes of sections according to the target product.
+ */
+
+/* VLAN */
+#define SINGLE_VLAN_AI		0
+#define DBL_VLAN_AI_BIT		7
+#define DBL_VLAN_SHADOW_SIZE	0x64	/* max number of double vlan*/
+
+/* DSA/EDSA type */
+#define TAGGED			1
+#define UNTAGGED		0
+#define EDSA			1
+#define DSA			0
+
+#define	DSA_ETHER_TYPE		0xDADA/*TODO set to default DSA ether type*/
+
+#define MV_PP2_PRS_INVALID_FLOW_ID	(0xFF) /* Invalid Flow ID */
+
+/* MAC entries , shadow udf */
+enum prs_udf {
+	PRS_UDF_MAC_DEF,
+	PRS_UDF_MAC_RANGE,
+	PRS_UDF_L2_DEF,
+	PRS_UDF_L2_DEF_COPY,
+	PRS_UDF_L2_USER,
+};
+
+/* LOOKUP ID */
+enum prs_lookup {
+	PRS_LU_MH,
+	PRS_LU_MAC,
+	PRS_LU_DSA,
+	PRS_LU_VLAN,
+	PRS_LU_L2,
+	PRS_LU_PPPOE,
+	PRS_LU_IP4,
+	PRS_LU_IP6,
+	PRS_LU_FLOWS,
+	PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum prs_l3_cast {
+	L3_UNIT_CAST,
+	L3_MULTI_CAST,
+	L3_BROAD_CAST
+};
+
+/* PP2 GMAC enum */
+enum prs_gmacs_enum {
+	INVALID_GMAC = -1,
+	ENUM_GMAC_0,
+	ENUM_GMAC_1,
+	ENUM_GMAC_LPK,
+	ENUM_PMAC = 7,
+	MAX_GMAC = ENUM_PMAC,
+	MAX_NUM_GMACS
+};
+
+/* Multicast MAC kinds */
+enum prs_mac_mc {
+	IP4_MAC_MC,
+	IP6_MAC_MC,
+	MAX_MAC_MC
+};
+
+/* IPV6 extension header length supported
+*  Calculate: Length = 8B + len * 4 */
+enum prs_ip6_ext_ah_len {
+	IP6_AH_LEN_16B = 2,
+	IP6_AH_LEN_20B = 3,
+	IP6_AH_LEN_24B = 4,
+	IP6_AH_LEN_28B = 5,
+	IP6_AH_LEN_32B = 6,
+	IP6_AH_LEN_36B = 7,
+	IP6_AH_LEN_MAX
+};
+
+/* Tcam entries ID */
+#define PE_DROP_ALL				0
+/* The TCAM rule for RX Special packets based on Marvell header is allocated dynamically */
+#define PE_FIRST_FREE_TID				1
+
+#define PE_LAST_FREE_TID	(MV_PP2_PRS_TCAM_SIZE - 31)
+#define PE_IP6_EXT_PROTO_UN	(MV_PP2_PRS_TCAM_SIZE - 30)
+#define PE_MAC_MC_IP6		(MV_PP2_PRS_TCAM_SIZE - 29) /* multicast for IPv6 */
+#define PE_IP6_ADDR_UN		(MV_PP2_PRS_TCAM_SIZE - 28)
+#define PE_IP4_ADDR_UN		(MV_PP2_PRS_TCAM_SIZE - 27)
+#define PE_LAST_DEFAULT_FLOW	(MV_PP2_PRS_TCAM_SIZE - 26)
+#define PE_FIRST_DEFAULT_FLOW	(MV_PP2_PRS_TCAM_SIZE - 19)
+/*#define PE_ETYPE_DSA		(MV_PP2_PRS_TCAM_SIZE - 19)*/
+#define PE_EDSA_TAGGED		(MV_PP2_PRS_TCAM_SIZE - 18)
+#define PE_EDSA_UNTAGGED	(MV_PP2_PRS_TCAM_SIZE - 17)
+#define PE_DSA_TAGGED		(MV_PP2_PRS_TCAM_SIZE - 16)
+#define PE_DSA_UNTAGGED		(MV_PP2_PRS_TCAM_SIZE - 15)
+
+#define PE_ETYPE_EDSA_TAGGED	(MV_PP2_PRS_TCAM_SIZE - 14)
+#define PE_ETYPE_EDSA_UNTAGGED	(MV_PP2_PRS_TCAM_SIZE - 13)
+#define PE_ETYPE_DSA_TAGGED	(MV_PP2_PRS_TCAM_SIZE - 12)
+#define PE_ETYPE_DSA_UNTAGGED	(MV_PP2_PRS_TCAM_SIZE - 11)
+
+#define PE_MH_DEFAULT		(MV_PP2_PRS_TCAM_SIZE - 10) /* Marvell header default rule */
+#define PE_DSA_DEFAULT		(MV_PP2_PRS_TCAM_SIZE - 9)
+#define PE_IP6_PROTO_UN		(MV_PP2_PRS_TCAM_SIZE - 8)
+#define PE_IP4_PROTO_UN		(MV_PP2_PRS_TCAM_SIZE - 7)
+#define PE_ETH_TYPE_UN		(MV_PP2_PRS_TCAM_SIZE - 6)
+#define PE_VLAN_DBL		(MV_PP2_PRS_TCAM_SIZE - 5) /* accept double vlan*/
+#define PE_VLAN_NONE		(MV_PP2_PRS_TCAM_SIZE - 4) /* vlan default*/
+#define PE_MAC_MC_ALL		(MV_PP2_PRS_TCAM_SIZE - 3) /* all multicast mode */
+#define PE_MAC_PROMISCOUS	(MV_PP2_PRS_TCAM_SIZE - 2) /* promiscous mode */
+#define PE_MAC_NON_PROMISCOUS	(MV_PP2_PRS_TCAM_SIZE - 1) /* non-promiscous mode */
+
+/*
+ * Pre-defined FlowId assigment
+*/
+
+#define FLOWID_DEF(_port_)	(_port_)
+#define FLOWID_MASK	 	0x3F
+
+/*
+ * AI bits assigment
+*/
+#define IPV4_DIP_AI_BIT		0
+#define IPV6_NO_EXT_AI_BIT	0
+#define IPV6_EXT_AI_BIT		1
+#define IPV6_EXT_AH_AI_BIT	2
+#define IPV6_EXT_AH_LEN_AI_BIT	3
+#define IPV6_EXT_AH_L4_AI_BIT	4
+
+/*
+ * IPv6 extension header related
+*/
+#define IPV6_EXT_EXCLUDE_BYTES	8	/* IP6 excluding bytes in extension header, 8 bytes */
+#define IPV6_EXT_AH_UNIT_BYTES	4	/* The AH length units, 4 bytes */
+
+/*
+ * Export API
+ */
+int mvPrsDefFlow(int port);
+int mvPrsDefaultInit(void);
+int mvPrsMacDaAccept(int port, unsigned char *da, int add);
+int mvPrsMacDaRangeSet(unsigned portBmp, MV_U8 *da, MV_U8 *mask, unsigned int ri, unsigned int riMask, MV_BOOL finish);
+int mvPrsMacDaRangeDel(unsigned portBmp, MV_U8 *da, MV_U8 *mask);
+int mvPrsMacDropAllSet(int port, int add);
+int mvPrsMhSet(unsigned int portMap, unsigned short mh, unsigned short mh_mask, unsigned int ri, unsigned int riMask, MV_BOOL finish);
+int mvPrsMhDel(unsigned int portMap, unsigned short mh, unsigned short mh_mask);
+int mvPrsMcastDelAll(int port);
+int mvPrsMhRxSpecialSet(int port, unsigned short mh, int add);
+int mvPrsMacPromiscousSet(int port, int add);
+int mvPrsMacAllMultiSet(int port, int add);
+int mvPrsDebugBasicInit(void);
+int mvPrsFlowIdGen(int tid, int flowId, unsigned int res, unsigned int resMask, int portBmp);
+int mvPrsFlowIdDel(int tid);
+int mvPrsFlowIdFirstFreeGet(void);
+int mvPrsFlowIdLastFreeGet(void);
+int mvPrsFlowIdRelease(int flowId);
+int mvPp2PrsTagModeSet(int port, int type);
+int mvPp2PrsEtypeDsaModeSet(int port, int extand);
+int mvPp2PrsEtypeDsaSet(unsigned int eType);
+int mvPrsEthTypeSet(int portMap, unsigned short ethertype, unsigned int ri, unsigned int riMask, MV_BOOL finish);
+int mvPrsEthTypeDel(int portMap, unsigned short eth_type);
+int mvPrsVlanExist(unsigned short tpid, int ai);
+int mvPrsDoubleVlanExist(unsigned short tpid1, unsigned short tpid2);
+int mvPp2PrsTripleVlan(unsigned short tpid1, unsigned short tpid2, unsigned short tpid3, unsigned int portBmp, int add);
+int mvPp2PrsDoubleVlan(unsigned short tpid1, unsigned short tpid2, unsigned int portBmp, int add);
+int mvPp2PrsSingleVlan(unsigned short tpid, unsigned int portBmp, int add);
+int mvPp2PrsVlanAllDel(void);
+char *mvPrsVlanInfoStr(unsigned int vlan_info);
+char *mvPrsL2InfoStr(unsigned int l2_info);
+int mvPrsIp6NhSet(void);
+/*
+int mvPrsMacDaDrop(int port, unsigned char *da, int add);
+*/
+#endif /*__MV_PRS_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.c b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.c
new file mode 100644
index 000000000000..55a1ea6546a0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.c
@@ -0,0 +1,1273 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvPp2PrsHw.h"
+
+
+/*-------------------------------------------------------------------------------*/
+/*	Static functions declaretion for internal use 				*/
+/*-------------------------------------------------------------------------------*/
+
+static int mvPp2PrsSwSramRiDump(MV_PP2_PRS_ENTRY *pe);
+static int mvPp2PrsSwSramAiDump(MV_PP2_PRS_ENTRY *pe);
+/*-------------------------------------------------------------------------------*/
+
+PRS_SHADOW_ENTRY  mvPrsShadowTbl[MV_PP2_PRS_TCAM_SIZE];
+int  mvPrsFlowIdTbl[MV_PP2_PRS_FLOW_ID_SIZE];
+/******************************************************************************
+ * Common utilities
+ ******************************************************************************/
+int mvPp2PrsShadowIsValid(int index)
+{
+	return mvPrsShadowTbl[index].valid;
+}
+
+int mvPp2PrsShadowLu(int index)
+{
+	return mvPrsShadowTbl[index].lu;
+}
+
+int mvPp2PrsShadowUdf(int index)
+{
+	return mvPrsShadowTbl[index].udf;
+}
+unsigned int mvPp2PrsShadowRi(int index)
+{
+	return mvPrsShadowTbl[index].ri;
+}
+
+unsigned int mvPp2PrsShadowRiMask(int index)
+{
+	return mvPrsShadowTbl[index].riMask;
+}
+
+void mvPp2PrsShadowUdfSet(int index, int udf)
+{
+	mvPrsShadowTbl[index].udf = udf;
+}
+
+void mvPp2PrsShadowSet(int index, int lu, char *text)
+{
+
+	strncpy((char *)mvPrsShadowTbl[index].text, text, PRS_TEXT_SIZE);
+	mvPrsShadowTbl[index].text[PRS_TEXT_SIZE - 1] = 0;
+	mvPrsShadowTbl[index].valid = MV_TRUE;
+	mvPrsShadowTbl[index].lu = lu;
+}
+void mvPp2PrsShadowLuSet(int index, int lu)
+{
+	mvPrsShadowTbl[index].lu = lu;
+}
+
+void mvPp2PrsShadowRiSet(int index, unsigned int ri, unsigned int riMask)
+{
+	mvPrsShadowTbl[index].riMask = riMask;
+	mvPrsShadowTbl[index].ri = ri;
+}
+
+void mvPp2PrsShadowFinSet(int index, MV_BOOL finish)
+{
+	mvPrsShadowTbl[index].finish = finish;
+}
+
+MV_BOOL mvPp2PrsShadowFin(int index)
+{
+	return mvPrsShadowTbl[index].finish;
+}
+
+void mvPp2PrsShadowClear(int index)
+{
+	mvPrsShadowTbl[index].valid = MV_FALSE;
+	mvPrsShadowTbl[index].text[0] = 0;
+}
+
+void mvPp2PrsShadowClearAll(void)
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_PRS_TCAM_SIZE; index++) {
+		mvPrsShadowTbl[index].valid = MV_FALSE;
+		mvPrsShadowTbl[index].text[0] = 0;
+	}
+
+}
+
+int mvPrsFlowIdGet(int flowId)
+{
+	return mvPrsFlowIdTbl[flowId];
+}
+
+void mvPrsFlowIdSet(int flowId)
+{
+	mvPrsFlowIdTbl[flowId] = MV_TRUE;
+}
+
+void mvPrsFlowIdClear(int flowId)
+{
+	mvPrsFlowIdTbl[flowId] = MV_FALSE;
+}
+
+void mvPrsFlowIdClearAll(void)
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_PRS_FLOW_ID_SIZE; index++)
+		mvPrsFlowIdTbl[index] = MV_FALSE;
+}
+
+int mvPrsFlowIdDump(void)
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_PRS_FLOW_ID_SIZE; index++) {
+		if (mvPrsFlowIdGet(index) == MV_TRUE)
+			mvOsPrintf("Flow ID[%d]: In_USE\n", index);
+	}
+
+	return MV_OK;
+}
+
+static int mvPp2PrsFirstFreeGet(int from, int to)
+{
+	int tid;
+
+	for (tid = from; tid <= to; tid++) {
+		if (!mvPrsShadowTbl[tid].valid)
+			break;
+	}
+	return tid;
+}
+
+static int mvPp2PrsLastFreeGet(int from, int to)
+{
+	int tid;
+
+	/* Go through the all entires from last to fires */
+
+	for (tid = from; tid >= to; tid--) {
+		if (!mvPrsShadowTbl[tid].valid)
+			break;
+	}
+	return tid;
+}
+/*
+* mvPp2PrsTcamFirstFree - seek for free tcam entry
+* Return first free TCAM index, seeking from start to end
+* If start < end - seek up-->bottom
+* If start > end - seek bottom-->up
+*/
+int mvPp2PrsTcamFirstFree(int start, int end)
+{
+	int tid;
+
+	if (start < end)
+		tid = mvPp2PrsFirstFreeGet(start, end);
+	else
+		tid =  mvPp2PrsLastFreeGet(start, end);
+
+	if ((tid < MV_PP2_PRS_TCAM_SIZE) && (tid > -1))
+		return tid;
+	else
+		return MV_PRS_OUT_OF_RAGE;
+}
+/*
+ * mvPrsSwAlloc - allocate new prs entry
+ * @id: tcam lookup id
+ */
+MV_PP2_PRS_ENTRY  *mvPp2PrsSwAlloc(unsigned int lu)
+{
+	MV_PP2_PRS_ENTRY *pe = mvOsMalloc(sizeof(MV_PP2_PRS_ENTRY));
+
+	WARN_OOM(pe == NULL);
+	mvPp2PrsSwClear(pe);
+	mvPp2PrsSwTcamLuSet(pe, lu);
+
+	return pe;
+}
+
+/*-------------------------------------------------------------------------------*/
+/*
+ * mvPp2PrsSwFree mvPrsSwAlloc - free prs entry
+ * @id: tcam lookup id
+ */
+
+void mvPp2PrsSwFree(MV_PP2_PRS_ENTRY *pe)
+{
+	mvOsFree(pe);
+}
+/*-------------------------------------------------------------------------------*/
+
+int mvPp2PrsHwPortInit(int port, int lu_first, int lu_max, int offs)
+{
+	int status = MV_OK;
+
+	status = mvPrsHwLkpFirstSet(port, lu_first);
+	if (status < 0)
+		return status;
+
+	status = mvPrsHwLkpMaxSet(port, lu_max);
+	if (status < 0)
+		return status;
+
+	status = mvPrsHwLkpFirstOffsSet(port, offs);
+	if (status < 0)
+		return status;
+
+	return MV_OK;
+}
+
+
+int mvPp2PrsHwRegsDump()
+{
+	int i;
+	char reg_name[100];
+
+	mvPp2PrintReg(MV_PP2_PRS_INIT_LOOKUP_REG, "MV_PP2_PRS_INIT_LOOKUP_REG");
+	mvPp2PrintReg(MV_PP2_PRS_INIT_OFFS_0_REG, "MV_PP2_PRS_INIT_OFFS_0_REG");
+	mvPp2PrintReg(MV_PP2_PRS_INIT_OFFS_1_REG, "MV_PP2_PRS_INIT_OFFS_1_REG");
+	mvPp2PrintReg(MV_PP2_PRS_MAX_LOOP_0_REG, "MV_PP2_PRS_MAX_LOOP_0_REG");
+	mvPp2PrintReg(MV_PP2_PRS_MAX_LOOP_1_REG, "MV_PP2_PRS_MAX_LOOP_1_REG");
+
+	mvPp2PrintReg(MV_PP2_PRS_INTR_CAUSE_REG, "MV_PP2_PRS_INTR_CAUSE_REG");
+	mvPp2PrintReg(MV_PP2_PRS_INTR_MASK_REG, "MV_PP2_PRS_INTR_MASK_REG");
+	mvPp2PrintReg(MV_PP2_PRS_TCAM_IDX_REG, "MV_PP2_PRS_TCAM_IDX_REG");
+
+	for (i = 0; i < MV_PP2_PRC_TCAM_WORDS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_PRS_TCAM_DATA_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_PRS_TCAM_DATA_REG(i), reg_name);
+	}
+	mvPp2PrintReg(MV_PP2_PRS_SRAM_IDX_REG, "MV_PP2_PRS_SRAM_IDX_REG");
+
+	for (i = 0; i < MV_PP2_PRC_SRAM_WORDS; i++) {
+		mvOsSPrintf(reg_name, "MV_PP2_PRS_SRAM_DATA_%d_REG", i);
+		mvPp2PrintReg(MV_PP2_PRS_SRAM_DATA_REG(i), reg_name);
+	}
+
+	mvPp2PrintReg(MV_PP2_PRS_EXP_REG, "MV_PP2_PRS_EXP_REG");
+	mvPp2PrintReg(MV_PP2_PRS_TCAM_CTRL_REG, "MV_PP2_PRS_TCAM_CTRL_REG");
+
+	return MV_OK;
+}
+
+
+int mvPrsHwLkpFirstSet(int port, int lu_first)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(lu_first, MV_PP2_PRS_PORT_LU_MAX);
+	regVal = mvPp2RdReg(MV_PP2_PRS_INIT_LOOKUP_REG);
+	regVal &= ~MV_PP2_PRS_PORT_LU_MASK(port);
+	regVal |=  MV_PP2_PRS_PORT_LU_VAL(port, lu_first);
+	mvPp2WrReg(MV_PP2_PRS_INIT_LOOKUP_REG, regVal);
+
+	return MV_OK;
+}
+
+int mvPrsHwLkpMaxSet(int port, int lu_max)
+{
+	unsigned int regVal;
+
+	RANGE_VALIDATE(lu_max, MV_PP2_PRS_MAX_LOOP_MIN, MV_PP2_PRS_PORT_LU_MAX);
+
+	regVal = mvPp2RdReg(MV_PP2_PRS_MAX_LOOP_REG(port));
+	regVal &= ~MV_PP2_PRS_MAX_LOOP_MASK(port);
+	regVal |= MV_PP2_PRS_MAX_LOOP_VAL(port, lu_max);
+	mvPp2WrReg(MV_PP2_PRS_MAX_LOOP_REG(port), regVal);
+
+	return MV_OK;
+}
+
+int mvPrsHwLkpFirstOffsSet(int port, int off)
+{
+	unsigned int regVal;
+	/* todo if port > 7 return error */
+
+	POS_RANGE_VALIDATE(off, MV_PP2_PRS_INIT_OFF_MAX);
+
+	regVal = mvPp2RdReg(MV_PP2_PRS_INIT_OFFS_REG(port));
+	regVal &= ~MV_PP2_PRS_INIT_OFF_MASK(port);
+	regVal |= MV_PP2_PRS_INIT_OFF_VAL(port, off);
+	mvPp2WrReg(MV_PP2_PRS_INIT_OFFS_REG(port), regVal);
+
+	return MV_OK;
+}
+
+/*
+	user responsibility to check valid bit after the call to this function
+	this function will return OK even if the entry is invalid
+*/
+int mvPp2PrsHwRead(MV_PP2_PRS_ENTRY *pe)
+{
+	int index, tid;
+
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(pe->index, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	tid = pe->index;
+/*
+	mvOsPrintf("start read parser entry %d \n",tid);
+*/
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_TCAM_IDX_REG, tid);
+	pe->tcam.word[TCAM_INV_WORD] = mvPp2RdReg(MV_PP2_PRS_TCAM_DATA5_REG);
+
+	if ((pe->tcam.word[TCAM_INV_WORD] & TCAM_INV_MASK) != TCAM_VALID)
+		/* Invalid entry */
+		return MV_OK;
+
+	for (index = 0; index < MV_PP2_PRC_TCAM_WORDS; index++)
+		pe->tcam.word[index] = mvPp2RdReg(MV_PP2_PRS_TCAM_DATA_REG(index));
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_SRAM_IDX_REG, tid);
+
+	for (index = 0; index < MV_PP2_PRC_SRAM_WORDS; index++)
+		pe->sram.word[index] = mvPp2RdReg(MV_PP2_PRS_SRAM_DATA_REG(index));
+/*
+	mvOsPrintf("end read parser entry %d \n",tid);
+*/
+	return MV_OK;
+}
+
+/*
+write entry SRAM + TCAM,
+*/
+int mvPp2PrsHwWrite(MV_PP2_PRS_ENTRY *pe)
+{
+	int index, tid;
+
+	PTR_VALIDATE(pe);
+/*
+	mvOsPrintf("Write parser entry %d - start\n",pe->index);
+*/
+	POS_RANGE_VALIDATE(pe->index, MV_PP2_PRS_TCAM_SIZE - 1);
+	tid = pe->index;
+
+	/*clear invalid bit*/
+	pe->tcam.word[TCAM_INV_WORD] &= ~TCAM_INV_MASK;
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_TCAM_IDX_REG, tid);
+
+	for (index = 0; index < MV_PP2_PRC_TCAM_WORDS; index++)
+		mvPp2WrReg(MV_PP2_PRS_TCAM_DATA_REG(index), pe->tcam.word[index]);
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_SRAM_IDX_REG, tid);
+
+	for (index = 0; index < MV_PP2_PRC_SRAM_WORDS; index++)
+		mvPp2WrReg(MV_PP2_PRS_SRAM_DATA_REG(index), pe->sram.word[index]);
+/*
+	mvOsPrintf("Write parser entry %d - end\n",pe->index);
+*/
+	return MV_OK;
+}
+
+/* Read tcam hit counter*/
+/* PPv2.1 MASS 3.20 new feature */
+/* return tcam entry (tid) hits counter or error if tid is out of range */
+static int mvPp2V1PrsHwTcamCntDump(int tid, unsigned int *cnt)
+{
+	unsigned int regVal;
+
+	POS_RANGE_VALIDATE(tid, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_TCAM_HIT_IDX_REG, tid);
+
+	regVal = mvPp2RdReg(MV_PP2_PRS_TCAM_HIT_CNT_REG);
+	regVal &= MV_PP2_PRS_TCAM_HIT_CNT_MASK;
+
+	if (cnt)
+		*cnt = regVal;
+	else
+		mvOsPrintf("HIT COUNTER: %d\n", regVal);
+
+	return MV_OK;
+}
+/* mvPp2PrsHwHitsDump - dump all non zeroed hit counters and the associated TCAM entries */
+/* PPv2.1 MASS 3.20 new feature */
+int mvPp2V1PrsHwHitsDump()
+{
+	int index;
+	unsigned int cnt;
+	MV_PP2_PRS_ENTRY pe;
+
+	for (index = 0; index < MV_PP2_PRS_TCAM_SIZE; index++) {
+		pe.index = index;
+		mvPp2PrsHwRead(&pe);
+		if ((pe.tcam.word[TCAM_INV_WORD] & TCAM_INV_MASK) == TCAM_VALID) {
+			mvPp2V1PrsHwTcamCntDump(index, &cnt);
+
+			if (cnt == 0)
+				continue;
+
+			mvOsPrintf("%s\n", mvPrsShadowTbl[index].text);
+			mvPp2PrsSwDump(&pe);
+			mvOsPrintf("       HITS: %d\n", cnt);
+			mvOsPrintf("-------------------------------------------------------------------------\n");
+		}
+	}
+
+	return MV_OK;
+}
+
+/* delete hw entry (set as invalid) */
+int mvPp2PrsHwInv(int tid)
+{
+	POS_RANGE_VALIDATE(tid, MV_PP2_PRS_TCAM_SIZE - 1);
+
+	/* write index */
+	mvPp2WrReg(MV_PP2_PRS_TCAM_IDX_REG, tid);
+	/* write invalid */
+	mvPp2WrReg(MV_PP2_PRS_TCAM_DATA_REG(TCAM_INV_WORD), TCAM_INV_MASK);
+
+	return MV_OK;
+}
+
+/* delete all hw entry (set all as invalid) */
+int mvPp2PrsHwInvAll()
+{
+	int index;
+
+	for (index = 0; index < MV_PP2_PRS_TCAM_SIZE; index++)
+		mvPp2PrsHwInv(index);
+
+	return MV_OK;
+}
+
+int mvPp2PrsHwClearAll()
+{
+	int index, i;
+
+	for (index = 0; index < MV_PP2_PRS_TCAM_SIZE; index++) {
+		mvPp2WrReg(MV_PP2_PRS_TCAM_IDX_REG, index);
+
+		for (i = 0; i < MV_PP2_PRC_TCAM_WORDS; i++)
+			mvPp2WrReg(MV_PP2_PRS_TCAM_DATA_REG(i), 0);
+
+		mvPp2WrReg(MV_PP2_PRS_SRAM_IDX_REG, index);
+
+		for (i = 0; i < MV_PP2_PRC_SRAM_WORDS; i++)
+			mvPp2WrReg(MV_PP2_PRS_SRAM_DATA_REG(i), 0);
+	}
+
+	return MV_OK;
+}
+
+int mvPp2PrsHwDump()
+{
+	int index;
+	MV_PP2_PRS_ENTRY pe;
+
+	for (index = 0; index < MV_PP2_PRS_TCAM_SIZE; index++) {
+		pe.index = index;
+		mvPp2PrsHwRead(&pe);
+		if ((pe.tcam.word[TCAM_INV_WORD] & TCAM_INV_MASK) == TCAM_VALID) {
+			mvOsPrintf("%s\n", mvPrsShadowTbl[index].text);
+			mvPp2PrsSwDump(&pe);
+#ifdef MV_ETH_PPV2_1
+			mvPp2V1PrsHwTcamCntDump(index);
+#endif
+			mvOsPrintf("-------------------------------------------------------------------------\n");
+		}
+	}
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwDump(MV_PP2_PRS_ENTRY *pe)
+{
+	MV_U32	op, type, lu, done, flowid;
+	int	shift, offset, i;
+
+	PTR_VALIDATE(pe);
+
+	/* hw entry id */
+	mvOsPrintf("[%4d] ", pe->index);
+
+	i = MV_PP2_PRC_TCAM_WORDS - 1;
+	mvOsPrintf("%1.1x ", pe->tcam.word[i--] & 0xF);
+
+	while (i >= 0)
+		mvOsPrintf("%4.4x ", (pe->tcam.word[i--]) & 0xFFFF);
+
+	mvOsPrintf("| ");
+
+	mvOsPrintf(PRS_SRAM_FMT, PRS_SRAM_VAL(pe->sram.word));
+
+	mvOsPrintf("\n       ");
+
+	i = MV_PP2_PRC_TCAM_WORDS - 1;
+	mvOsPrintf("%1.1x ", (pe->tcam.word[i--] >> 16) & 0xF);
+
+	while (i >= 0)
+		mvOsPrintf("%4.4x ", ((pe->tcam.word[i--]) >> 16)  & 0xFFFF);
+
+	mvOsPrintf("| ");
+
+	mvPp2PrsSwSramShiftGet(pe, &shift);
+	mvOsPrintf("SH=%d ", shift);
+
+	mvPp2PrsSwSramOffsetGet(pe, &type, &offset, &op);
+	if (offset != 0 || ((op >> SRAM_OP_SEL_SHIFT_BITS) != 0))
+		mvOsPrintf("UDFT=%u UDFO=%d ", type, offset);
+
+	mvOsPrintf("op=%u ", op);
+
+	mvPp2PrsSwSramNextLuGet(pe, &lu);
+	mvOsPrintf("LU=%u ", lu);
+
+	mvPp2PrsSwSramLuDoneGet(pe, &done);
+	mvOsPrintf("%s ", done ? "DONE" : "N_DONE");
+
+	/*flow id generation bit*/
+	mvPp2PrsSwSramFlowidGenGet(pe, &flowid);
+	mvOsPrintf("%s ", flowid ? "FIDG" : "N_FIDG");
+
+	(pe->tcam.word[TCAM_INV_WORD] & TCAM_INV_MASK) ? mvOsPrintf(" [inv]") : 0;
+
+	if (mvPp2PrsSwSramRiDump(pe))
+		return MV_ERROR;
+
+	if (mvPp2PrsSwSramAiDump(pe))
+		return MV_ERROR;
+
+	mvOsPrintf("\n");
+
+	return MV_OK;
+
+}
+
+void mvPp2PrsSwClear(MV_PP2_PRS_ENTRY *pe)
+{
+	memset(pe, 0, sizeof(MV_PP2_PRS_ENTRY));
+}
+
+/*
+	enable - Tcam Ebable/Disable
+*/
+
+int mvPp2PrsSwTcam(int enable)
+{
+	POS_RANGE_VALIDATE(enable, 1);
+
+	mvPp2WrReg(MV_PP2_PRS_TCAM_CTRL_REG, enable);
+
+	return MV_OK;
+}
+/*
+	byte - data to tcam entry
+	enable - data to tcam enable endtry
+*/
+int mvPp2PrsSwTcamByteSet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char byte, unsigned char enable)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(offs, TCAM_DATA_MAX);
+
+	pe->tcam.byte[TCAM_DATA_BYTE(offs)] = byte;
+	pe->tcam.byte[TCAM_DATA_MASK(offs)] = enable;
+
+	return MV_OK;
+}
+
+/*  get byte from entry structure MV_PP2_PRS_ENTRY (sw)
+	byte - data of tcam entry
+	enable - data of tcam enable entry
+*/
+int mvPp2PrsSwTcamByteGet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char *byte, unsigned char *enable)
+{
+
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(byte);
+	PTR_VALIDATE(enable);
+
+	POS_RANGE_VALIDATE(offs, TCAM_DATA_MAX);
+
+	*byte = pe->tcam.byte[TCAM_DATA_BYTE(offs)];
+	*enable = pe->tcam.byte[TCAM_DATA_MASK(offs)];
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamWordSet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int word, unsigned int mask)
+{
+	int index, offset;
+	unsigned char byte, byteMask;
+
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(offs, TCAM_DATA_WORD_MAX);
+
+#if defined(MV_CPU_BE)
+	word = MV_BYTE_SWAP_32BIT(word);
+	mask = MV_BYTE_SWAP_32BIT(mask);
+#endif
+	for (index = 0; index < DWORD_BYTES_LEN; index++) {
+
+		offset = (offs * DWORD_BYTES_LEN) + index;
+		byte = ((unsigned char *) &word)[index];
+		byteMask = ((unsigned char *) &mask)[index];
+
+		mvPp2PrsSwTcamByteSet(pe, offset, byte, byteMask);
+	}
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamWordGet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int *word, unsigned int *enable)
+{
+	int index, offset;
+	unsigned char byte, mask;
+
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(word);
+	PTR_VALIDATE(enable);
+
+	POS_RANGE_VALIDATE(offs, TCAM_DATA_WORD_MAX);
+
+	for (index = 0; index < DWORD_BYTES_LEN; index++) {
+		offset = (offs * DWORD_BYTES_LEN) + index;
+		mvPp2PrsSwTcamByteGet(pe, offset,  &byte, &mask);
+		((unsigned char *) word)[index] = byte;
+		((unsigned char *) enable)[index] = mask;
+	}
+
+	return MV_OK;
+}
+
+/* compare in sw.
+	return EQUALS if tcam_data[off]&tcam_mask[off] = byte
+
+	user should call hw_read before.!!!!!
+*/
+int mvPp2PrsSwTcamByteCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char byte)
+{
+	unsigned char tcamByte, tcamMask;
+
+	PTR_VALIDATE(pe);
+
+	if (mvPp2PrsSwTcamByteGet(pe, offs, &tcamByte, &tcamMask) != MV_OK)
+		return MV_PRS_ERR;
+
+	if ((tcamByte & tcamMask) == (byte & tcamMask))
+		return EQUALS;
+
+	return NOT_EQUALS;
+}
+
+/* compare in sw.
+	enable data according to corresponding enable bits in  MV_PP2_PRS_ENTRY
+	user should call hw_read before.!!!!!
+	byte - data of tcam entry
+	enable - data of tcam enable endtry
+	return 0 if equals ..else return 1
+	return MV_PRS_ERR if falied !
+*/
+
+int mvPp2PrsSwTcamBytesCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int size, unsigned char *bytes)
+{
+	int status, index;
+
+	PTR_VALIDATE(pe);
+
+	POS_RANGE_VALIDATE(offs + size, TCAM_DATA_SIZE);
+
+	for (index = 0; index < size; index++) {
+		status = mvPp2PrsSwTcamByteCmp(pe, offs + index, bytes[index]);
+		if (status != EQUALS)
+			return status;
+	}
+	return EQUALS;
+}
+
+int mvPp2PrsSwTcamBytesIgnorMaskCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int size, unsigned char *bytes)
+{
+	int		index;
+	unsigned char 	tcamByte, tcamMask;
+
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(offs + size, TCAM_DATA_SIZE);
+
+	for (index = 0; index < size; index++) {
+		mvPp2PrsSwTcamByteGet(pe, offs + index, &tcamByte, &tcamMask);
+
+		if (tcamByte != bytes[index])
+			return NOT_EQUALS;
+	}
+	return EQUALS;
+}
+
+
+
+int mvPp2PrsSwTcamAiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable)
+{
+	int i;
+
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(bits, AI_MASK);
+	POS_RANGE_VALIDATE(enable, AI_MASK);
+
+	for (i = 0; i < AI_BITS; i++)
+		if (enable & (1 << i)) {
+			if (bits & (1 << i))
+				pe->tcam.byte[TCAM_AI_BYTE] |= (1 << i);
+			else
+				pe->tcam.byte[TCAM_AI_BYTE] &= ~(1 << i);
+		}
+
+	pe->tcam.byte[TCAM_MASK_OFFS(TCAM_AI_BYTE)] |= enable;
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamAiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable)
+{
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(bits);
+	PTR_VALIDATE(enable);
+
+	*bits = pe->tcam.byte[TCAM_AI_BYTE];
+	*enable = pe->tcam.byte[TCAM_MASK_OFFS(TCAM_AI_BYTE)];
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamPortSet(MV_PP2_PRS_ENTRY *pe, unsigned int port, int add)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(port, 7);/*TODO define max port val*/
+	POS_RANGE_VALIDATE(add, 1);
+
+	if (add == 1)
+		pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)] &= ~(1 << port);
+	else
+		pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)] |= (1 << port);
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamPortGet(MV_PP2_PRS_ENTRY *pe, unsigned int port, MV_BOOL *status)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(port, 7);/*TODO define max port val*/
+
+	if (~(pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)]) & (1 << port))
+		*status = MV_TRUE;
+	else
+		*status = MV_FALSE;
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamPortMapSet(MV_PP2_PRS_ENTRY *pe, unsigned int ports)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(ports, PORT_MASK);
+
+	pe->tcam.byte[TCAM_PORT_BYTE] = 0;
+	pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)] &= (unsigned char)(~PORT_MASK);
+	pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)] |= ((~ports) & PORT_MASK);
+
+	return MV_OK;
+}
+int mvPp2PrsSwTcamPortMapGet(MV_PP2_PRS_ENTRY *pe, unsigned int *ports)
+{
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(ports);
+
+	*ports = (~pe->tcam.byte[TCAM_MASK_OFFS(TCAM_PORT_BYTE)]) & PORT_MASK;
+
+	return MV_OK;
+}
+
+
+int mvPp2PrsSwTcamLuSet(MV_PP2_PRS_ENTRY *pe, unsigned int lu)
+{
+
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(lu, LU_MASK);
+
+	pe->tcam.byte[TCAM_LU_BYTE] = lu;
+	pe->tcam.byte[TCAM_MASK_OFFS(TCAM_LU_BYTE)] = LU_MASK;
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwTcamLuGet(MV_PP2_PRS_ENTRY *pe, unsigned int *lu, unsigned int *enable)
+{
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(lu);
+	PTR_VALIDATE(enable);
+
+	*lu = (pe->tcam.byte[TCAM_LU_BYTE]) & LU_MASK;
+	*enable = (pe->tcam.byte[TCAM_MASK_OFFS(TCAM_LU_BYTE)]) & LU_MASK;
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramRiSetBit(MV_PP2_PRS_ENTRY *pe, unsigned int bit)
+{
+	PTR_VALIDATE(pe);
+
+	POS_RANGE_VALIDATE(bit, (SRAM_RI_BITS - 1));
+
+	pe->sram.word[SRAM_RI_WORD] |= (1 << bit);
+	pe->sram.word[SRAM_RI_CTRL_WORD] |= (1 << bit);
+
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramRiClearBit(MV_PP2_PRS_ENTRY *pe, unsigned int bit)
+{
+	PTR_VALIDATE(pe);
+
+	POS_RANGE_VALIDATE(bit, (SRAM_RI_BITS-1));
+
+	pe->sram.word[SRAM_RI_OFFS] &= ~(1 << bit);
+	pe->sram.word[SRAM_RI_CTRL_WORD] |= (1 << bit);
+
+	return MV_OK;
+}
+
+
+/* set RI and RI_UPDATE */
+int mvPp2PrsSwSramRiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable)
+{
+/* ALTERNATIVE WAY:
+   find the bist that set in defRiMask and clear in riMask
+   maskDiff = defRiMask & (defRiMask ^ riMask);
+   update 1's: ri |= (defRi & maskDiff);
+   update 0's: ri &= ~(maskDiff & (~defRi));
+   update mask: riMask |= defRiMask;
+*/
+
+	unsigned int i;
+
+	PTR_VALIDATE(pe);
+
+
+	for (i = 0; i < SRAM_RI_BITS; i++) {
+		if (enable & (1 << i)) {
+			if (bits & (1 << i))
+				mvPp2PrsSwSramRiSetBit(pe, i);
+			else
+				mvPp2PrsSwSramRiClearBit(pe, i);
+		}
+	}
+	return MV_OK;
+}
+
+/* return RI and RI_UPDATE */
+int mvPp2PrsSwSramRiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable)
+{
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(bits);
+	PTR_VALIDATE(enable);
+
+	*bits = pe->sram.word[SRAM_RI_OFFS/32];
+	*enable = pe->sram.word[SRAM_RI_CTRL_OFFS/32];
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramRiSet(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable)
+{
+	PTR_VALIDATE(pe);
+
+	pe->sram.word[SRAM_RI_OFFS/32] = bits;
+	pe->sram.word[SRAM_RI_CTRL_OFFS/32] = enable;
+	return MV_OK;
+}
+
+static int mvPp2PrsSwSramRiDump(MV_PP2_PRS_ENTRY *pe)
+{
+	unsigned int data, mask;
+	int i, off = 0, bitsOffs = 0;
+	char bits[100];
+
+	PTR_VALIDATE(pe);
+
+	mvPp2PrsSwSramRiGet(pe, &data, &mask);
+	if (mask == 0)
+		return off;
+
+	mvOsPrintf("\n       ");
+
+	mvOsPrintf("S_RI=");
+	for (i = (SRAM_RI_BITS-1); i > -1 ; i--)
+		if (mask & (1 << i)) {
+			mvOsPrintf("%d", ((data & (1 << i)) != 0));
+			bitsOffs += mvOsSPrintf(bits + bitsOffs, "%d:", i);
+		} else
+			mvOsPrintf("x");
+
+	bits[bitsOffs] = '\0';
+	mvOsPrintf(" %s", bits);
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramAiSetBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(bit, (SRAM_AI_CTRL_BITS - 1));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_OFFS + bit)] |= (1  << ((SRAM_AI_OFFS + bit) % 8));
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_CTRL_OFFS + bit)] |= (1  << ((SRAM_AI_CTRL_OFFS + bit) % 8));
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramAiClearBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit)
+{
+	PTR_VALIDATE(pe);
+	POS_RANGE_VALIDATE(bit, (SRAM_AI_CTRL_BITS - 1));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_OFFS + bit)] &= ~(1  << ((SRAM_AI_OFFS + bit) % 8));
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_CTRL_OFFS + bit)] |= (1  << ((SRAM_AI_CTRL_OFFS + bit) % 8));
+
+	return MV_OK;
+}
+
+
+int mvPp2PrsSwSramAiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int mask)
+{
+	unsigned int i;
+
+	PTR_VALIDATE(pe);
+
+	POS_RANGE_VALIDATE(bits, AI_MASK);
+	POS_RANGE_VALIDATE(mask, AI_MASK);
+
+	for (i = 0; i < SRAM_AI_CTRL_BITS; i++)
+		if (mask & (1 << i)) {
+			if (bits & (1 << i))
+				mvPp2PrsSwSramAiSetBit(pe, i);
+			else
+				mvPp2PrsSwSramAiClearBit(pe, i);
+		}
+	return MV_OK;
+}
+
+
+/* return AI and AI_UPDATE */
+int mvPp2PrsSwSramAiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable)
+{
+
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(bits);
+	PTR_VALIDATE(enable);
+
+	*bits = (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_OFFS)] >> (SRAM_AI_OFFS % 8)) |
+		(pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_OFFS+SRAM_AI_CTRL_BITS)] << (8 - (SRAM_AI_OFFS % 8)));
+
+	*enable = (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_CTRL_OFFS)] >> (SRAM_AI_CTRL_OFFS % 8)) |
+			(pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_AI_CTRL_OFFS+SRAM_AI_CTRL_BITS)] <<
+				(8 - (SRAM_AI_CTRL_OFFS % 8)));
+
+	*bits &= SRAM_AI_MASK;
+	*enable &= SRAM_AI_MASK;
+
+	return MV_OK;
+}
+
+static int mvPp2PrsSwSramAiDump(MV_PP2_PRS_ENTRY *pe)
+{
+	int i, bitsOffs = 0;
+	unsigned int data, mask;
+	char bits[30];
+
+	PTR_VALIDATE(pe);
+
+	mvPp2PrsSwSramAiGet(pe, &data, &mask);
+
+	if (mask == 0)
+		return MV_OK;
+
+	mvOsPrintf("\n       ");
+
+	mvOsPrintf("S_AI=");
+	for (i = (SRAM_AI_CTRL_BITS-1); i > -1 ; i--)
+		if (mask & (1 << i)) {
+			mvOsPrintf("%d", ((data & (1 << i)) != 0));
+			bitsOffs += mvOsSPrintf(bits + bitsOffs, "%d:", i);
+		} else
+			mvOsPrintf("x");
+	bits[bitsOffs] = '\0';
+	mvOsPrintf(" %s", bits);
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramNextLuSet(MV_PP2_PRS_ENTRY *pe, unsigned int lu)
+{
+	PTR_VALIDATE(pe);
+
+	POS_RANGE_VALIDATE(lu, SRAM_NEXT_LU_MASK);
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_NEXT_LU_OFFS)] &= ~(SRAM_NEXT_LU_MASK << (SRAM_NEXT_LU_OFFS % 8));
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_NEXT_LU_OFFS)] |= (lu << (SRAM_NEXT_LU_OFFS % 8));
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramNextLuGet(MV_PP2_PRS_ENTRY *pe, unsigned int *lu)
+{
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(lu);
+
+	*lu = pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_NEXT_LU_OFFS)];
+	*lu = ((*lu) >> SRAM_NEXT_LU_OFFS % 8);
+	*lu &= SRAM_NEXT_LU_MASK;
+	return MV_OK;
+}
+
+/* shift to (current offset + shift) */
+int mvPp2PrsSwSramShiftSet(MV_PP2_PRS_ENTRY *pe, int shift, unsigned int op)
+{
+	PTR_VALIDATE(pe);
+	RANGE_VALIDATE(shift, 0 - SRAM_SHIFT_MASK, SRAM_SHIFT_MASK);
+	POS_RANGE_VALIDATE(op, SRAM_OP_SEL_SHIFT_MASK);
+
+	/* Set sign */
+	if (shift < 0) {
+		pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_SHIFT_SIGN_BIT)] |= (1 << (SRAM_SHIFT_SIGN_BIT % 8));
+		shift = 0 - shift;
+	} else
+		pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_SHIFT_SIGN_BIT)] &= ~(1 << (SRAM_SHIFT_SIGN_BIT % 8));
+
+	/* Set offset */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_SHIFT_OFFS)] = (unsigned char)shift;
+
+	/* Reset and Set operation */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_SHIFT_OFFS)] &=
+		~(SRAM_OP_SEL_SHIFT_MASK << (SRAM_OP_SEL_SHIFT_OFFS % 8));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_SHIFT_OFFS)] |= (op << (SRAM_OP_SEL_SHIFT_OFFS % 8));
+
+	/* Set base offset as current */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_BASE_OFFS)] &= ~(1 << (SRAM_OP_SEL_BASE_OFFS % 8));
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramShiftGet(MV_PP2_PRS_ENTRY *pe, int *shift)
+{
+	int sign;
+
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(shift);
+
+	sign = pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_SHIFT_SIGN_BIT)] & (1 << (SRAM_SHIFT_SIGN_BIT % 8));
+	*shift = ((int)(pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_SHIFT_OFFS)])) & SRAM_SHIFT_MASK;
+
+	if (sign == 1)
+		*shift *= -1;
+
+	return MV_OK;
+}
+/* shift to (InitOffs + shift) */
+int mvPp2PrsSwSramShiftAbsUpdate(MV_PP2_PRS_ENTRY *pe, int shift, unsigned int op)
+{
+	mvPp2PrsSwSramShiftSet(pe, shift, op);
+
+	/* Set base offset as initial */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_BASE_OFFS)] |= (1 << (SRAM_OP_SEL_BASE_OFFS % 8));
+
+	return MV_OK;
+}
+
+
+int mvPp2PrsSwSramOffsetSet(MV_PP2_PRS_ENTRY *pe, unsigned int type, int offset, unsigned int op)
+{
+	PTR_VALIDATE(pe);
+
+	RANGE_VALIDATE(offset, 0 - SRAM_OFFSET_MASK, SRAM_OFFSET_MASK);
+	POS_RANGE_VALIDATE(type, SRAM_OFFSET_TYPE_MASK);
+	POS_RANGE_VALIDATE(op, SRAM_OP_SEL_OFFSET_MASK);
+
+	/* Set offset sign */
+	if (offset < 0) {
+		offset = 0 - offset;
+		/* set sram offset sign bit */
+		pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_SIGN_BIT)] |= (1 << (SRAM_OFFSET_SIGN_BIT % 8));
+	} else
+		pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_SIGN_BIT)] &= ~(1 << (SRAM_OFFSET_SIGN_BIT % 8));
+
+	/* set offset value */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS)] &= ~(SRAM_OFFSET_MASK << (SRAM_OFFSET_OFFS % 8));
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS)] |= (offset << (SRAM_OFFSET_OFFS % 8));
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS + SRAM_OFFSET_BITS)] &=
+		~(SRAM_OFFSET_MASK >> (8 - (SRAM_OFFSET_OFFS % 8)));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS + SRAM_OFFSET_BITS)] |=
+		(offset >> (8 - (SRAM_OFFSET_OFFS % 8)));
+
+	/* set offset type */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_TYPE_OFFS)] &=
+		~(SRAM_OFFSET_TYPE_MASK << (SRAM_OFFSET_TYPE_OFFS % 8));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_TYPE_OFFS)] |= (type << (SRAM_OFFSET_TYPE_OFFS % 8));
+
+	/* Set offset operation */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFSET_OFFS)] &=
+		~(SRAM_OP_SEL_OFFSET_MASK << (SRAM_OP_SEL_OFFSET_OFFS % 8));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFSET_OFFS)] |= (op << (SRAM_OP_SEL_OFFSET_OFFS % 8));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFSET_OFFS + SRAM_OP_SEL_OFFSET_BITS)] &=
+			 ~(SRAM_OP_SEL_OFFSET_MASK >> (8 - (SRAM_OP_SEL_OFFSET_OFFS % 8)));
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFSET_OFFS + SRAM_OP_SEL_OFFSET_BITS)] |=
+			  (op >> (8 - (SRAM_OP_SEL_OFFSET_OFFS % 8)));
+
+	/* Set base offset as current */
+	pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_BASE_OFFS)] &= ~(1 << (SRAM_OP_SEL_BASE_OFFS % 8));
+
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramOffsetGet(MV_PP2_PRS_ENTRY *pe, unsigned int *type, int *offset, unsigned int *op)
+{
+	int sign;
+
+	PTR_VALIDATE(pe);
+	PTR_VALIDATE(offset);
+	PTR_VALIDATE(type);
+
+	*type = pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_TYPE_OFFS)] >> (SRAM_OFFSET_TYPE_OFFS % 8);
+	*type &= SRAM_OFFSET_TYPE_MASK;
+
+
+	*offset = (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS)] >> (SRAM_OFFSET_OFFS % 8)) & 0x7f;
+	*offset |= (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_OFFS + SRAM_OFFSET_BITS)] <<
+			(8 - (SRAM_OFFSET_OFFS % 8))) & 0x80;
+
+	*op = (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFS)] >> (SRAM_OP_SEL_OFFS % 8)) & 0x7;
+	*op |= (pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OP_SEL_OFFS + SRAM_OP_SEL_BITS)] <<
+			(8 - (SRAM_OP_SEL_OFFS % 8))) & 0x18;
+
+	/* if signed bit is tes */
+	sign = pe->sram.byte[SRAM_BIT_TO_BYTE(SRAM_OFFSET_SIGN_BIT)] & (1 << (SRAM_OFFSET_SIGN_BIT % 8));
+	if (sign != 0)
+		*offset = 1-(*offset);
+
+	return MV_OK;
+}
+
+int mvPp2PrsSramBitSet(MV_PP2_PRS_ENTRY *pe, int bitNum)
+{
+
+	PTR_VALIDATE(pe);
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(bitNum)] |= (1 << (bitNum % 8));
+	return MV_OK;
+}
+
+int mvPp2PrsSramBitClear(MV_PP2_PRS_ENTRY *pe, int bitNum)
+{
+	PTR_VALIDATE(pe);
+
+	pe->sram.byte[SRAM_BIT_TO_BYTE(bitNum)] &= ~(1 << (bitNum % 8));
+	return MV_OK;
+}
+
+int mvPp2PrsSramBitGet(MV_PP2_PRS_ENTRY *pe, int bitNum, unsigned int *bit)
+{
+	PTR_VALIDATE(pe);
+
+	*bit = pe->sram.byte[SRAM_BIT_TO_BYTE(bitNum)]  & (1 << (bitNum % 8));
+	*bit = (*bit) >> (bitNum % 8);
+	return MV_OK;
+}
+
+int mvPp2PrsSwSramLuDoneSet(MV_PP2_PRS_ENTRY *pe)
+{
+	return mvPp2PrsSramBitSet(pe, SRAM_LU_DONE_BIT);
+}
+
+int mvPp2PrsSwSramLuDoneClear(MV_PP2_PRS_ENTRY *pe)
+{
+	return mvPp2PrsSramBitClear(pe, SRAM_LU_DONE_BIT);
+}
+
+int mvPp2PrsSwSramLuDoneGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bit)
+{
+	return mvPp2PrsSramBitGet(pe, SRAM_LU_DONE_BIT, bit);
+}
+
+int mvPp2PrsSwSramFlowidGenSet(MV_PP2_PRS_ENTRY *pe)
+{
+	return mvPp2PrsSramBitSet(pe, SRAM_LU_GEN_BIT);
+}
+
+int mvPp2PrsSwSramFlowidGenClear(MV_PP2_PRS_ENTRY *pe)
+{
+	return mvPp2PrsSramBitClear(pe, SRAM_LU_GEN_BIT);
+}
+
+int mvPp2PrsSwSramFlowidGenGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bit)
+{
+	return mvPp2PrsSramBitGet(pe, SRAM_LU_GEN_BIT, bit);
+
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.h b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.h
new file mode 100644
index 000000000000..e87af082b390
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/prs/mvPp2PrsHw.h
@@ -0,0 +1,928 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __MV_PRS_HW_H__
+#define __MV_PRS_HW_H__
+
+#include "mvTypes.h"
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "../common/mvPp2ErrCode.h"
+#include "../common/mvPp2Common.h"
+#include "../gbe/mvPp2GbeRegs.h"
+/************************** Parser HW Configuration ***********************/
+
+/************************** Parser Registers ******************************/
+
+#define MV_PP2_PRS_INIT_LOOKUP_REG              (MV_PP2_REG_BASE + 0x1000)
+
+#define MV_PP2_PRS_PORT_LU_BITS                 4
+#define MV_PP2_PRS_PORT_LU_MAX			((1 << MV_PP2_PRS_PORT_LU_BITS) - 1)
+#define MV_PP2_PRS_PORT_LU_MASK(port)           (MV_PP2_PRS_PORT_LU_MAX << ((port) * MV_PP2_PRS_PORT_LU_BITS))
+#define MV_PP2_PRS_PORT_LU_VAL(port, val)       ((val) << ((port) * MV_PP2_PRS_PORT_LU_BITS))
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_PRS_INIT_OFFS_0_REG              (MV_PP2_REG_BASE + 0x1004)
+#define MV_PP2_PRS_INIT_OFFS_1_REG              (MV_PP2_REG_BASE + 0x1008)
+
+#define MV_PP2_PRS_INIT_OFFS_REG(port)         	(MV_PP2_PRS_INIT_OFFS_0_REG + ((port) & 4))
+#define MV_PP2_PRS_INIT_OFF_BITS					6
+#define MV_PP2_PRS_INIT_OFF_FIXED_BITS		8 /*only for offsets calculations*/
+#define MV_PP2_PRS_INIT_OFF_MAX			((1 << MV_PP2_PRS_INIT_OFF_BITS) - 1)
+#define MV_PP2_PRS_INIT_OFF_MASK(port)		(MV_PP2_PRS_INIT_OFF_MAX << (((port) % 4) * MV_PP2_PRS_INIT_OFF_FIXED_BITS))
+#define MV_PP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * MV_PP2_PRS_INIT_OFF_FIXED_BITS))
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_PRS_MAX_LOOP_0_REG		(MV_PP2_REG_BASE + 0x100c)
+#define MV_PP2_PRS_MAX_LOOP_1_REG		(MV_PP2_REG_BASE + 0x1010)
+
+#define MV_PP2_PRS_MAX_LOOP_REG(port)		(MV_PP2_PRS_MAX_LOOP_0_REG + ((port) & 4))
+#define MV_PP2_PRS_MAX_LOOP_BITS                8
+#define MV_PP2_PRS_MAX_LOOP_MAX			((1 << MV_PP2_PRS_MAX_LOOP_BITS) - 1) /*MAX VALID VALUE*/
+#define MV_PP2_PRS_MAX_LOOP_MIN			1 /*MIN VALID VALUE*/
+#define MV_PP2_PRS_MAX_LOOP_MASK(port)		(MV_PP2_PRS_MAX_LOOP_MAX << (((port) % 4) * MV_PP2_PRS_MAX_LOOP_BITS))
+#define MV_PP2_PRS_MAX_LOOP_VAL(port, val)      ((val) << (((port) % 4) * MV_PP2_PRS_MAX_LOOP_BITS))
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_PRS_INTR_CAUSE_REG		(MV_PP2_REG_BASE + 0x1020)
+#define MV_PP2_PRS_INTR_MASK_REG		(MV_PP2_REG_BASE + 0x1024)
+
+#define PRS_INTR_MISS				0
+#define PRS_INTR_MAX_LOOPS			1
+#define PRS_INTR_INV_OFF			2
+#define PRS_INTR_PARITY				4
+#define PRS_INTR_SRAM_PARITY			5
+
+#define PRS_INTR_MISS_MASK			(1 << PRS_INTR_MISS)
+#define PRS_INTR_MAX_LOOP_MASK			(1 << PRS_INTR_MAX_LOOPS)
+#define PRS_INTR_INV_OFF_MASK			(1 << PRS_INTR_INV_OFF)
+#define PRS_INTR_PARITY_MASK			(1 << PRS_INTR_PARITY)
+#define PRS_INTR_SRAM_PARITY_MASK		(1 << PRS_INTR_SRAM_PARITY)
+
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_PRS_TCAM_IDX_REG			(MV_PP2_REG_BASE + 0x1100)
+#define MV_PP2_PRS_TCAM_DATA_REG(idx)		(MV_PP2_REG_BASE + 0x1104 + (idx) * 4)
+#define MV_PP2_PRS_TCAM_DATA0_REG		(MV_PP2_REG_BASE + 0x1104)
+#define MV_PP2_PRS_TCAM_DATA1_REG		(MV_PP2_REG_BASE + 0x1108)
+#define MV_PP2_PRS_TCAM_DATA2_REG		(MV_PP2_REG_BASE + 0x110c)
+#define MV_PP2_PRS_TCAM_DATA3_REG		(MV_PP2_REG_BASE + 0x1110)
+#define MV_PP2_PRS_TCAM_DATA4_REG		(MV_PP2_REG_BASE + 0x1114)
+#define MV_PP2_PRS_TCAM_DATA5_REG		(MV_PP2_REG_BASE + 0x1118)
+
+#define MV_PP2_PRS_TCAM_DATA_OFFS		0
+#define MV_PP2_PRS_TCAM_MASK_OFFS		16
+/*-------------------------------------------------------------------------------*/
+
+#define MV_PP2_PRS_SRAM_IDX_REG			(MV_PP2_REG_BASE + 0x1200)
+#define MV_PP2_PRS_SRAM_DATA_REG(idx)		(MV_PP2_REG_BASE + 0x1204 + (idx) * 4)
+#define MV_PP2_PRS_SRAM_DATA0_REG		(MV_PP2_REG_BASE + 0x1204)
+#define MV_PP2_PRS_SRAM_DATA1_REG		(MV_PP2_REG_BASE + 0x1208)
+#define MV_PP2_PRS_SRAM_DATA2_REG		(MV_PP2_REG_BASE + 0x120c)
+#define MV_PP2_PRS_SRAM_DATA3_REG		(MV_PP2_REG_BASE + 0x1210)
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_PRS_EXP_REG			(MV_PP2_REG_BASE + 0x1214)
+#define MV_PP2_PRS_EXP_MISS			0
+#define MV_PP2_PRS_EXP_EXEED			1
+#define MV_PP2_PRS_EXP_OF			2
+
+/*-------------------------------------------------------------------------------*/
+#define MV_PP2_PRS_TCAM_CTRL_REG		(MV_PP2_REG_BASE + 0x1230)
+#define MV_PP2_PRS_TCAM_CTRL_EN			0
+
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 MASS 3.20 new feature */
+#define MV_PP2_PRS_TCAM_HIT_IDX_REG		(MV_PP2_REG_BASE + 0x1240)
+/*-------------------------------------------------------------------------------*/
+/*PPv2.1 MASS 3.20 new feature */
+#define MV_PP2_PRS_TCAM_HIT_CNT_REG		(MV_PP2_REG_BASE + 0x1244)
+#define MV_PP2_PRS_TCAM_HIT_CNT_BITS		16
+#define MV_PP2_PRS_TCAM_HIT_CNT_OFFS		0
+#define MV_PP2_PRS_TCAM_HIT_CNT_MASK		\
+	(((1 << MV_PP2_PRS_TCAM_HIT_CNT_BITS) - 1) << MV_PP2_PRS_TCAM_HIT_CNT_OFFS)
+
+/*-------------------------------------------------------------------------------*/
+/*				TCAM 						*/
+/*-------------------------------------------------------------------------------*/
+#define AI_BITS  				8
+#define AI_MASK					((1 << AI_BITS) - 1)
+#define AI_DONE_BIT				7
+#define AI_DONE					(1 << 7)
+
+#define PORT_BITS  				8
+#define PORT_MASK				((1 << PORT_BITS) - 1)
+
+#define LU_BITS  				4
+#define LU_MASK					((1 << LU_BITS) - 1)
+
+/****************************************************************/
+/*			endians support				*/
+/****************************************************************/
+
+/*
+  TCAM_MASK_OFFS
+  return tcam fileds (AI/LKP_ID/PRT_ID) mask offset
+  not relevant for TCAM_DATA bytes
+  _offs_: native offset if LE
+	  swapped offset if BE
+*/
+
+#if defined(MV_CPU_LE)
+	#define TCAM_MASK_OFFS(_offs_)		((_offs_) + 2)
+#else
+	#define TCAM_MASK_OFFS(_offs_)		((_offs_) - 2)
+#endif
+
+/************************* TCAM structure **********************/
+/*
+ ____________________________________________
+|  LKP ID  | PORT ID |    AI  | HEADER DATA  |
+| 4 bits   | 1 byte  | 1 byte |   8 byte     |
+----------------------------------------------
+reg 5 --> reg 0
+*/
+
+#define TCAM_DATA_OFFS				0
+#define TCAM_DATA_SIZE				8 /*bytes*/
+#define TCAM_DATA_MAX				(TCAM_DATA_SIZE - 1) /*bytes*/
+#define TCAM_DATA_WORD_MAX			((TCAM_DATA_SIZE / 4) - 1) /*words*/
+#define TCAM_AI_BYTE				HW_BYTE_OFFS(16)
+
+#define TCAM_PORT_BYTE				HW_BYTE_OFFS(17)
+
+#define TCAM_LU_BYTE				HW_BYTE_OFFS(20)
+
+/* Special bit in the TCAM register */
+#define TCAM_INV_BIT				31
+#define TCAM_INV_MASK				(1 << TCAM_INV_BIT)
+#define TCAM_VALID				0
+#define TCAM_INVALID				1
+#define TCAM_INV_WORD				5
+
+/************************* SRAM structure **********************/
+/* convert bit offset to byte offset */
+#define SRAM_BIT_TO_BYTE(_bit_)			HW_BYTE_OFFS((_bit_) / 8)
+
+
+#define SRAM_RI_OFFS  					0
+#define SRAM_RI_BITS  					32
+#define SRAM_RI_MASK  					((1 << SRAM_RI_BITS) - 1)
+#define SRAM_RI_WORD  					(SRAM_RI_OFFS / DWORD_BITS_LEN)
+
+#define SRAM_RI_CTRL_OFFS  				32
+#define SRAM_RI_CTRL_BITS  				32
+#define SRAM_RI_CTRL_WORD  				(SRAM_RI_CTRL_OFFS / DWORD_BITS_LEN)
+
+#define SRAM_SHIFT_OFFS  				64 /*NEXT_LKP*/
+#define SRAM_SHIFT_BITS					8
+#define SRAM_SHIFT_MASK					((1 << SRAM_SHIFT_BITS) - 1)
+
+#define SRAM_SHIFT_SIGN_BIT  				72 /*NEXT_LKP_SIGN*/
+
+#define SRAM_OFFSET_OFFS				73 /*UDF_OFF*/
+#define SRAM_OFFSET_BITS				8
+#define SRAM_OFFSET_MASK				((1 << SRAM_OFFSET_BITS) - 1)
+
+#define SRAM_OFFSET_SIGN_BIT  				81 /*UDF_SIGN*/
+
+#define SRAM_OFFSET_TYPE_OFFS  				82 /*UDF_TYPE*/
+#define SRAM_OFFSET_TYPE_BITS  				3
+#define SRAM_OFFSET_TYPE_MASK  				((1 << SRAM_OFFSET_TYPE_BITS) - 1)
+#define SRAM_OFFSET_TYPE_PKT				0
+#define SRAM_OFFSET_TYPE_L3				1
+#define SRAM_OFFSET_TYPE_IPV6_PROTO		2
+#define SRAM_OFFSET_TYPE_L4				4
+
+#define SRAM_OP_SEL_OFFS  				85
+#define SRAM_OP_SEL_BITS  				5
+#define SRAM_OP_SEL_MASK  				((1 << SRAM_OP_SEL_BITS) - 1)
+
+#define SRAM_OP_SEL_SHIFT_OFFS				85
+#define SRAM_OP_SEL_SHIFT_BITS				2
+#define SRAM_OP_SEL_SHIFT_MASK  			((1 << SRAM_OP_SEL_SHIFT_BITS) - 1)
+#define SRAM_OP_SEL_SHIFT_ADD				1
+#define SRAM_OP_SEL_SHIFT_IP4_ADD			2
+#define SRAM_OP_SEL_SHIFT_IP6_ADD			3
+
+#define SRAM_OP_SEL_OFFSET_OFFS				87
+#define SRAM_OP_SEL_OFFSET_BITS				2
+#define SRAM_OP_SEL_OFFSET_MASK  			((1 << SRAM_OP_SEL_OFFSET_BITS) - 1)
+#define SRAM_OP_SEL_OFFSET_ADD				0
+#define SRAM_OP_SEL_OFFSET_LKP_ADD			1
+#define SRAM_OP_SEL_OFFSET_IP4_ADD			2
+#define SRAM_OP_SEL_OFFSET_IP6_ADD			3
+
+#define SRAM_OP_SEL_BASE_OFFS				89
+#define SRAM_OP_SEL_BASE_BITS				1
+#define SRAM_OP_SEL_BASE_MASK				((1 << SRAM_OP_SEL_BASE_BITS) - 1)
+#define SRAM_OP_SEL_BASE_CURRENT			0
+#define SRAM_OP_SEL_BASE_INIT				1
+
+#define SRAM_AI_OFFS					90
+#define SRAM_AI_BITS					8
+#define SRAM_AI_MASK					((1 << SRAM_AI_BITS) - 1)
+#define SRAM_AI_WORD					(SRAM_AI_OFFS / DWORD_BITS_LEN)
+
+#define SRAM_AI_CTRL_OFFS				98
+#define SRAM_AI_CTRL_BITS				8
+#define SRAM_AI_CTRL_MASK				((1 << SRAM_AI_CTRL_BITS) - 1)
+#define SRAM_AI_CTRL_WORD				(SRAM_AI_CTRL_OFFS / DWORD_BITS_LEN)
+
+#define SRAM_NEXT_LU_OFFS				106 /*LOOKUP ID*/
+#define SRAM_NEXT_LU_BITS				4
+#define SRAM_NEXT_LU_MASK				((1 << SRAM_NEXT_LU_BITS) - 1)
+
+#define SRAM_LU_DONE_BIT				110
+#define SRAM_LU_GEN_BIT					111
+/*-------------------------------------------------------------------------------*/
+
+/* Result info bits assigment */
+#define RI_MAC_ME_BIT					0
+#define RI_MAC_ME_MASK					(1 << RI_MAC_ME_BIT)
+
+#define RI_DSA_BIT                 			1
+#define RI_DSA_MASK        				(1 << RI_DSA_BIT)
+
+/* bits 2 - 3 */
+#define RI_VLAN_OFFS					2
+#define RI_VLAN_BITS					2
+#define RI_VLAN_MASK					(((1 << RI_VLAN_BITS) - 1) << RI_VLAN_OFFS)
+#define RI_VLAN_NONE          				(0 << RI_VLAN_OFFS)
+#define RI_VLAN_SINGLE          			(1 << RI_VLAN_OFFS)
+#define RI_VLAN_DOUBLE          			(2 << RI_VLAN_OFFS)
+#define RI_VLAN_TRIPLE          			(3 << RI_VLAN_OFFS)
+
+/* bits 4 - 6 */
+#define RI_CPU_CODE_OFFS           			4 /* bits 4 - 6 */
+#define RI_CPU_CODE_BITS				3
+#define RI_CPU_CODE_MASK				(((1 << RI_CPU_CODE_BITS) - 1) << RI_CPU_CODE_OFFS)
+#define RI_CPU_CODE_RX_SPEC_VAL				3 /* bit 0 for IGMP, bit 1 for OMCI/eOAM */
+#define RI_CPU_CODE_RX_SPEC				(RI_CPU_CODE_RX_SPEC_VAL << RI_CPU_CODE_OFFS)
+
+/* bits 7 - 8 */
+#define RI_L2_VER_OFFS					7
+#define RI_L2_VER_BITS					2
+#define RI_L2_VER_MASK					(((1 << RI_L2_VER_BITS) - 1) << RI_L2_VER_OFFS)
+#define RI_L2_LLC               			(0 << RI_L2_VER_OFFS)
+#define RI_L2_LLC_SNAP          			(1 << RI_L2_VER_OFFS)
+#define RI_L2_ETH2             				(2 << RI_L2_VER_OFFS)
+#define RI_L2_OTHER             			(3 << RI_L2_VER_OFFS)
+
+/* bits 9 - 10 */
+#define RI_L2_CAST_OFFS					9
+#define RI_L2_CAST_BITS					2
+#define RI_L2_CAST_MASK					(((1 << RI_L2_CAST_BITS) - 1) << RI_L2_CAST_OFFS)
+#define RI_L2_UCAST					(0 << RI_L2_CAST_OFFS)
+#define RI_L2_MCAST					(1 << RI_L2_CAST_OFFS)
+#define RI_L2_BCAST					(2 << RI_L2_CAST_OFFS)
+#define RI_L2_RESERVED					(3 << RI_L2_CAST_OFFS)
+
+/* bit 11 */
+#define RI_PPPOE_BIT					11
+#define RI_PPPOE_MASK					(1 << RI_PPPOE_BIT)
+
+/* bits 12 - 14 */
+#define RI_L3_PROTO_OFFS				12
+#define RI_L3_PROTO_BITS				3
+#define RI_L3_PROTO_MASK				(((1 << RI_L3_PROTO_BITS) - 1) << RI_L3_PROTO_OFFS)
+#define RI_L3_UN              				(0 << RI_L3_PROTO_OFFS)
+#define RI_L3_IP4            				(1 << RI_L3_PROTO_OFFS)
+#define RI_L3_IP4_OPT          				(2 << RI_L3_PROTO_OFFS)
+#define RI_L3_IP4_OTHER       				(3 << RI_L3_PROTO_OFFS)
+#define RI_L3_IP6         				(4 << RI_L3_PROTO_OFFS)
+#define RI_L3_IP6_EXT          				(5 << RI_L3_PROTO_OFFS)
+#define RI_L3_ARP					(6 << RI_L3_PROTO_OFFS)
+#define RI_L3_RESERVED					(7 << RI_L3_PROTO_OFFS)
+
+
+/* bits 15 - 16 */
+#define RI_L3_ADDR_OFFS       				15
+#define RI_L3_ADDR_BITS       				2
+#define RI_L3_ADDR_MASK					(((1 << RI_L3_ADDR_BITS) - 1) << RI_L3_ADDR_OFFS)
+#define RI_L3_UCAST            				(0 << RI_L3_ADDR_OFFS)
+#define RI_L3_MCAST            				(1 << RI_L3_ADDR_OFFS)
+#define RI_L3_ANYCAST					(2 << RI_L3_ADDR_OFFS)
+#define RI_L3_BCAST					(3 << RI_L3_ADDR_OFFS)
+
+/* bit 17 */
+#define RI_IP_FRAG_BIT					17
+#define RI_IP_FRAG_MASK					(1 << RI_IP_FRAG_BIT)
+
+/* Bits 18 - 19 */
+#define RI_UDF2_OFFS					18
+#define RI_UDF2_BITS					2
+#define RI_UDF2_MASK					(((1 << RI_UDF2_BITS) - 1) << RI_UDF2_OFFS)
+
+/* Bits 20 - 21 */
+#define RI_UDF3_OFFS					20
+#define RI_UDF3_BITS					2
+#define RI_UDF3_MASK					(((1 << RI_UDF3_BITS) - 1) << RI_UDF3_OFFS)
+#define RI_UDF3_RX_SPEC_VAL				2
+#define RI_UDF3_RX_SPECIAL				(RI_UDF3_RX_SPEC_VAL << RI_UDF3_OFFS)
+
+/* Bits 22 - 24 */
+#define RI_L4_PROTO_OFFS				22
+#define RI_L4_PROTO_BITS				3
+#define RI_L4_PROTO_MASK				(((1 << RI_L4_PROTO_BITS) - 1) << RI_L4_PROTO_OFFS)
+#define RI_L4_UN					(0 << RI_L4_PROTO_OFFS)
+#define RI_L4_TCP					(1 << RI_L4_PROTO_OFFS)
+#define RI_L4_UDP					(2 << RI_L4_PROTO_OFFS)
+#define RI_L4_OTHER					(3 << RI_L4_PROTO_OFFS)
+									/* 3-7 user defined */
+/* Bits 25 - 26 */
+#define RI_UDF5_OFFS					25
+#define RI_UDF5_BITS					2
+#define RI_UDF5_MASK					(((1 << RI_UDF5_BITS) - 1) << RI_UDF5_OFFS)
+
+/* Bits 27 - 28 */
+#define RI_UDF6_OFFS					27
+#define RI_UDF6_BITS					2
+#define RI_UDF6_MASK					(((1 << RI_UDF6_BITS) - 1) << RI_UDF6_OFFS)
+
+/* Bits 29 - 30 */
+#define RI_UDF7_OFFS					29
+#define RI_UDF7_BITS					2
+#define RI_UDF7_MASK					(((1 << RI_UDF7_BITS) - 1) << RI_UDF7_OFFS)
+#define RI_UDF7_IP6_LITE				(1 << RI_UDF7_OFFS)/* Indicates DS lite, in A0 version */
+
+/* bit 31 - drop */
+#define RI_DROP_BIT					31
+#define RI_DROP_MASK					(1 << RI_DROP_BIT)
+/*---------------------------------------------------------------------------*/
+
+/************* Offfset types *****************/
+#define MV_PP2_PKT_OFFSET				0
+#define MV_PP2_L3_OFFSET				1
+#define MV_PP2_IP6_OFFSET				2
+#define MV_PP2_UDF3_OFFSET				3
+#define MV_PP2_L4_OFFSET				4
+#define MV_PP2_UDF5_OFFSET				5
+#define MV_PP2_UDF6_OFFSET				6
+#define MV_PP2_UDF7_OFFSET				7
+
+
+/*-------------------------------------------------------------------------------*/
+/* 				Parser Shadow					 */
+/*-------------------------------------------------------------------------------*/
+
+#define PRS_TEXT_SIZE					20
+
+typedef struct {
+	int             valid;
+	int		lu;
+	unsigned char   text[PRS_TEXT_SIZE];
+	int		udf;
+	unsigned	ri;
+	unsigned	riMask;
+	MV_BOOL		finish;
+} PRS_SHADOW_ENTRY;
+
+
+void mvPp2PrsShadowSet(int index, int lu, char *text);
+void mvPp2PrsShadowLuSet(int index, int lu);
+int mvPp2PrsShadowUdf(int index);
+void mvPp2PrsShadowUdfSet(int index, int udf);
+unsigned int mvPp2PrsShadowRi(int index);
+unsigned int mvPp2PrsShadowRiMask(int index);
+void mvPp2PrsShadowRiSet(int index, unsigned int ri, unsigned int riMask);
+void mvPp2PrsShadowFinSet(int index, MV_BOOL finish); /* set bit 111 (GEN_BIT) in SRAM */
+MV_BOOL mvPp2PrsShadowFin(int index);
+void mvPp2PrsShadowClear(int index);
+void mvPp2PrsShadowClearAll(void);
+int mvPrsFlowIdGet(int flowId);
+void mvPrsFlowIdSet(int flowId);
+void mvPrsFlowIdClear(int flowId);
+void mvPrsFlowIdClearAll(void);
+int mvPrsFlowIdDump(void);
+int mvPp2PrsShadowLu(int index);
+int mvPp2PrsShadowIsValid(int index);
+int mvPp2PrsTcamFirstFree(int start, int end);
+
+
+/*-------------------------------------------------------------------------------*/
+/* 				Parser SW entry 				 */
+/*-------------------------------------------------------------------------------*/
+
+/* Parser Public TCAM APIs */
+#define MV_PP2_PRS_TCAM_SIZE				(256)
+
+#define MV_PP2_PRS_FLOW_ID_SIZE				(64)
+#define MV_PP2_PRS_FIRST_FLOW_ID			(8) /* Flow ID 0~7 are reserved by LSP */
+#define MV_PP2_PRS_LAST_FLOW_ID				(64 - 1 - 4) /* Flow ID 60~63 are reserved by HW */
+
+#define MV_PP2_PRC_TCAM_WORDS				6
+#define MV_PP2_PRC_SRAM_WORDS				4
+
+#define PRS_SRAM_FMT					"%4.4x %8.8x %8.8x %8.8x"
+#define PRS_SRAM_VAL(p)					p[3] & 0xFFFF, p[2], p[1], p[0]
+
+typedef union mvPp2TcamEntry {
+	MV_U32 word[MV_PP2_PRC_TCAM_WORDS];
+	MV_U8  byte[MV_PP2_PRC_TCAM_WORDS * 4];
+} MV_PP2_TCAM_ENTRY;
+
+typedef union mvPp2SramEntry {
+	MV_U32 word[MV_PP2_PRC_SRAM_WORDS];
+	MV_U8  byte[MV_PP2_PRC_SRAM_WORDS * 4];
+} MV_PP2_SRAM_ENTRY;
+
+
+typedef struct mvPp2PrsEntry {
+	unsigned int index;
+	MV_PP2_TCAM_ENTRY tcam;
+	MV_PP2_SRAM_ENTRY sram;
+} MV_PP2_PRS_ENTRY;
+
+
+/*-------------------------------------------------------------------------------*/
+/* 			Parser Public initialization APIs			 */
+/*-------------------------------------------------------------------------------*/
+
+/*
+ *mvPp2PrsHwInvAll - sign all tcam entries as invalid
+*/
+int mvPp2PrsHwInvAll(void);
+
+/*
+ *mvPp2PrsHwClearAll - clear all tcam and sram entries
+*/
+int mvPp2PrsHwClearAll(void);
+
+/*
+ * mvPp2PrsHwPortInit - set first lookup fileds per port
+ * @port: port number
+ * @lu_first: first lookup id
+ * @lu_max: max number of lookups
+ * @offs: initial offset in packet
+*/
+int mvPp2PrsHwPortInit(int port, int lu_first, int lu_max, int offs);
+
+/*
+ * mvPrsSwAlloc - allocate new prs entry
+ * @id: tcam lookup id
+ */
+MV_PP2_PRS_ENTRY *mvPp2PrsSwAlloc(unsigned int luId);
+
+/*
+ * mvPp2PrsSwFree
+ * @pe: entry to free
+*/
+void mvPp2PrsSwFree(MV_PP2_PRS_ENTRY *pe);
+
+/*-------------------------------------------------------------------------------*/
+/* 			Parser internal initialization functions		 */
+/*-------------------------------------------------------------------------------*/
+
+/*
+ * mvPrsHwLkpFirstSet - set first lookup id per port
+ * @port: port number
+ * @lu_first: first lookup id
+*/
+int mvPrsHwLkpFirstSet(int port, int lu_first);
+
+/*
+ * mvPrsHwLkpMaxSet - set max number of lookups per port
+ * @port: port number
+ * @lu_max: max number of lookups
+*/
+int mvPrsHwLkpMaxSet(int port, int lu_max);
+
+/*
+ * mvPrsHwLkpMaxSet - set first lookup initial packet offset
+ * @port: port number
+ * @offs: initial offset in packet
+*/
+int mvPrsHwLkpFirstOffsSet(int port, int off);
+
+/*-------------------------------------------------------------------------------*/
+/* 			Parser Public TCAM APIs 				*/
+/*-------------------------------------------------------------------------------*/
+
+/*
+ * mvPp2PrsHwRead - read prs entry
+*/
+int mvPp2PrsHwRead(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsHwWrite - write prs entry
+*/
+int mvPp2PrsHwWrite(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsHwInv - invalidate prs entry
+ * @tid: entry id
+*/
+int mvPp2PrsHwInv(int tid);
+
+/*
+ * mvPp2PrsHwRegsDump - dump all prs registers into buffer
+*/
+int mvPp2PrsHwRegsDump(void);
+
+/*
+ * mvPp2PrsSwDump - dump sw prs entry
+ * @pe: sw prs entry
+*/
+int mvPp2PrsSwDump(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsSwClear - clear prs sw entry
+ * @pe: sw prs entry
+*/
+void mvPp2PrsSwClear(MV_PP2_PRS_ENTRY *pe);
+/*
+ * mvPp2PrsHwDump - dump all valid hw entries
+*/
+
+int mvPp2PrsHwDump(void);
+
+/*
+	mvPp2V1PrsHwHitsDump - dump all entries with non zeroed hit counters
+*/
+int mvPp2V1PrsHwHitsDump(void);
+
+/*
+	enable - Tcam Ebable/Disable
+*/
+
+int mvPp2PrsSwTcam(int enable);
+
+/*
+ * mvPp2PrsSwTcamWordGet - get byte form tcam data and tcam mask
+ * @pe: sw prs entry
+ * @offs: offset in tcam data, valid value 0 - 7
+ * @woed: data from tcam
+ * @enablek: data from tcam mask
+*/
+int mvPp2PrsSwTcamWordGet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int *word, unsigned int *enable);
+/*
+ * mvPp2PrsSwTcamWordSet - set byte in tcam data and tcam mask
+ * @pe: sw prs entry
+ * @offs: offset in tcam data, valid value 0, 1
+ * @word: data to tcam
+ * enable: data to tcam mask
+*/
+int mvPp2PrsSwTcamWordSet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int word, unsigned int mask);
+
+/*
+ * mvPp2PrsSwTcamByteGet - get byte form tcam data and tcam mask
+ * @pe: sw prs entry
+ * @offs: offset in tcam data, valid value 0, 1
+ * @byte: data from tcam
+ * @mask: data from tcam mask
+*/
+int mvPp2PrsSwTcamByteGet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char *byte, unsigned char *enable);
+/*
+ * mvPp2PrsSwTcamByteSet - set byte in tcam data and tcam mask
+ * @pe: sw prs entry
+ * @offs: offset in tcam data, valid value 0 - 7
+ * @byte: data to tcam
+ * enable: data to tcam mask
+*/
+int mvPp2PrsSwTcamByteSet(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char byte, unsigned char mask);
+
+/*
+ * mvPp2PrsSwTcamByteCmp - compare one byte in tcam data of prs sw entery
+ * @pe: sw prs entry
+ * @offs: offset in tcam data, valid value 0 - 7
+ * @byte: data to compare
+ * return value: tcam[off] & tcam_mask[off] == byte & tcam_mask[off]
+*/
+int mvPp2PrsSwTcamByteCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned char byte);
+
+/*
+ * mvPp2PrsSwTcamBytesCmp - compare bytes sequence in tcam data of prs sw entery
+ * call to  mvPp2PrsSwTcamByteCmp fpr each byte in the sequence.
+ * @pe: sw prs entry
+ * @offs: bytes sequence start offset in tcam dat
+ * @size: number of bytes to compare
+ * return value: tcam[off] & tcam_mask[off] == byte & tcam_mask[off] for all bytes
+*/
+int mvPp2PrsSwTcamBytesCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offset, unsigned int size, unsigned char *bytes);
+
+/*
+ * mvPp2PrsSwTcamBytesCmpIgnorMask - compare bytes sequence in tcam data of prs sw entery
+ * call to  mvPp2PrsSwTcamByteCmp fpr each byte in the sequence.
+ * @pe: sw prs entry
+ * @offs: bytes sequence start offset in tcam dat
+ * @size: number of bytes to compare
+ * return value: tcam[off] == byte for all bytes
+*/
+int mvPp2PrsSwTcamBytesIgnorMaskCmp(MV_PP2_PRS_ENTRY *pe, unsigned int offs, unsigned int size, unsigned char *bytes);
+/*
+ * mvPp2PrsSwTcamAiUpdate - update tcam ai bits in prs sw entry.
+ * @pe: sw prs entry
+ * @bits: bits to set
+ * @enable: bits mask
+ * tcam AI[i] <-- bits[i] only if  enable[i] is set.
+ * tcam_mask AI[i] <--1 only if enable[i] is set.
+*/
+int mvPp2PrsSwTcamAiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable);
+
+/*
+ * mvPp2PrsSwTcamAiGet - get tcam AI and tcam_mask AI from prs sw entry.
+ * @pe: sw prs entry
+ * @bits: get tcam AI val
+ * @enable: get tcam mask AI val
+*/
+int mvPp2PrsSwTcamAiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable);
+
+/*
+ * mvPp2PrsSwTcamAiSetBit - set tcam AI bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset
+ * tcam AI[bit] = 1 , tcam mask AI[bit] = 1
+*/
+int mvPp2PrsSwTcamAiSetBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit);
+
+/*
+ * mvPp2PrsSwTcamAiClearBit - clear tcam AI bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset
+ * tcam AI[bit] = 0 , tcam mask AI[bit] = 1
+*/
+int mvPp2PrsSwTcamAiClearBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit);
+/*
+ * mvPp2PrsSwTcamPortGet - return tcam port status in prs sw entry.
+ * @pe: sw prs entry
+ * @port: single port
+ * @status: 1 - port bit is set, 0 - port bit is not set
+*/
+
+int mvPp2PrsSwTcamPortGet(MV_PP2_PRS_ENTRY *pe, unsigned int port, MV_BOOL *status);
+/*
+ * mvPp2PrsSwTcamPortSet - set tcam port map in prs sw entry.
+ * @pe: sw prs entry
+ * @port: single port to be add or delete
+ * @add: 1 - add port, 0 - delete port
+*/
+int mvPp2PrsSwTcamPortSet(MV_PP2_PRS_ENTRY *pe, unsigned int port, int add);
+
+/*
+ * mvPp2PrsSwTcamPortMapSet - set tcam port map in prs sw entry.
+ * @pe: sw prs entry
+ * @ports: ports bitmap to be set
+*/
+int mvPp2PrsSwTcamPortMapSet(MV_PP2_PRS_ENTRY *pe, unsigned int ports);
+
+/*
+ * mvPp2PrsSwTcamPortMapGet - get tcam PORT bitmap from prs sw entry.
+ * @pe: sw prs entry
+ * @port: get tcam PORTS val
+*/
+int mvPp2PrsSwTcamPortMapGet(MV_PP2_PRS_ENTRY *pe, unsigned int *ports);
+
+/*
+ * mvPp2PrsSwTcamLuSet - set tcam lookup id in prs sw entry.
+ * @pe: sw prs entry
+ * @lu: lookup id
+ * set tcam mask LU to 0xff
+*/
+int mvPp2PrsSwTcamLuSet(MV_PP2_PRS_ENTRY *pe, unsigned int lu);
+
+/*
+ * mvPp2PrsSwTcamLuGet - get tcam lookup id from prs sw entry.
+ * @pe: sw prs entry
+ * @lu: get tcam lookup id
+ * @enable: get tcam mask lookup id
+*/
+int mvPp2PrsSwTcamLuGet(MV_PP2_PRS_ENTRY *pe, unsigned int *lu, unsigned int *enable);
+
+
+/*
+ * mvPp2PrsSwSramRiSetBit - set sram result info bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset in result info
+ * set sram RI_EN[bit]
+ */
+int mvPp2PrsSwSramRiSetBit(MV_PP2_PRS_ENTRY *pe, unsigned int bit);
+
+/*
+ * mvPp2PrsSwSramRiClearBit - clear sram result info bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset in result info
+ * set sram RI_EN[bit]
+ */
+int mvPp2PrsSwSramRiClearBit(MV_PP2_PRS_ENTRY *pe, unsigned int bit);
+
+/*
+ * mvPp2PrsSwSramRiUpdate - update sram result info bits in prs sw entry.
+ * @pe: sw prs entry
+ * @bits: bits to set
+ * @enable: bits mask
+ * sram RI[i] <-- bits[i] only if  sram RI_EN[i] is set.
+ * sram RI_EN[i] <--1 only if enable[i] is set.
+ */
+int mvPp2PrsSwSramRiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable);
+/*
+ * mvPp2PrsSwSramRiSet - set sram result info bits in prs sw entry.
+ * @pe: sw prs entry
+ * @bits: bits to set
+ * @enable: bits mask
+  */
+
+int mvPp2PrsSwSramRiSet(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable);
+/*
+ * mvPp2PrsSwSramRiGet - get sram result info from prs sw entry.
+ * @pe: sw prs entry
+ * @bits: get result info bits
+ * @enable: get result info update bits
+*/
+int mvPp2PrsSwSramRiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable);
+
+
+/*
+ * mvPp2PrsSwSramAiSetBit - set sram AI bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset
+ * sram AI[bit] = 1 , sram AI_EN[bit] = 1
+*/
+int mvPp2PrsSwSramAiSetBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit);
+
+/*
+ * mvPp2PrsSwSramAiClearBit - clear sram AI bit in prs sw entry.
+ * @pe: sw prs entry
+ * @bit: bit offset
+ * sram AI[bit] = 0 , sram AI_EN[bit] = 1
+*/
+int mvPp2PrsSwSramAiClearBit(MV_PP2_PRS_ENTRY *pe, unsigned char bit);
+
+/*
+ * mvPp2PrsSwSramAiUpdate - update sram ai bits in prs sw entry.
+ * @pe: sw prs entry
+ * @bits: bits to set
+ * @enable: bits mask
+ * sram AI[i] <-- bits[i] only if  enable[i] is set.
+ * sram AI_EN[i] <--1 only if enable[i] is set.
+*/
+int mvPp2PrsSwSramAiUpdate(MV_PP2_PRS_ENTRY *pe, unsigned int bits, unsigned int enable);
+
+/*
+ * mvPp2PrsSwSramAiGet - get sram AI and AI_EN from prs sw entry.
+ * @pe: sw prs entry
+ * @bits: get sram AI val
+ * @enable: get sram AI_EN val
+*/
+int mvPp2PrsSwSramAiGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bits, unsigned int *enable);
+
+
+/*
+ * mvPp2PrsSwSramNextLuSet - set prs sram next lookup id.
+ * @pe: sw prs entry
+ * @lu: next lookup id
+*/
+int mvPp2PrsSwSramNextLuSet(MV_PP2_PRS_ENTRY *pe, unsigned int lu);
+
+/*
+ * mvPp2PrsSwSramNextLuGet - got prs sram next lookup id.
+ * @pe: sw prs entry
+ * @lu: get next lookup id
+*/
+int mvPp2PrsSwSramNextLuGet(MV_PP2_PRS_ENTRY *pe, unsigned int *lu);
+
+
+/*
+ * mvPp2PrsSwSramShiftSet - set prs sram shift.
+ * @pe: sw prs entry
+ * @shift
+ * @op
+*/
+int mvPp2PrsSwSramShiftSet(MV_PP2_PRS_ENTRY *pe, int shift, unsigned int op);
+
+/*
+ * mvPp2PrsSwSramShiftGet - get prs sram shift.
+ * @pe: sw prs entry
+ * @shift: get shift val
+ * @op: get shift op
+*/
+int mvPp2PrsSwSramShiftGet(MV_PP2_PRS_ENTRY *pe, int *shift);
+
+/*
+ * mvPp2PrsSwSramShiftAbsUpdate - set sram shift value according to initial offset
+ * @pe: sw prs entry
+ * @shift: get shift val
+ * shift value = @shift + inital value (store in reg 0x1004, 0x1008)
+*/
+int mvPp2PrsSwSramShiftAbsUpdate(MV_PP2_PRS_ENTRY *pe, int shift, unsigned int op);
+
+/*
+ * mvPp2PrsSwSramOffsetSet - set prs sram offset.
+ * @pe: sw prs entry
+ * @type: offset type
+ * @offset: signed offset value
+ * @op: offset operation
+*/
+int mvPp2PrsSwSramOffsetSet(MV_PP2_PRS_ENTRY *pe, unsigned int type, int offset, unsigned int op);
+
+/*
+ * mvPp2PrsSwSramOffsetGet - get prs sram offset.
+ * @pe: sw prs entry
+ * @type: get offset type
+ * @offset: get offset
+ * @op: get offset operation
+*/
+int mvPp2PrsSwSramOffsetGet(MV_PP2_PRS_ENTRY *pe, unsigned int *type, int *offset, unsigned int *op);
+
+/*
+ * mvPp2PrsSwSramLuDoneSet - set prs sram lookup done bit.
+ * @pe: sw prs entry
+*/
+int mvPp2PrsSwSramLuDoneSet(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsSwSramLuDoneClear - clear prs sram lookup done bit.
+ * @pe: sw prs entry
+*/
+int mvPp2PrsSwSramLuDoneClear(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsSwSramLuDoneGet - get prs sram lookup done bit.
+ * @pe: sw prs entry
+ * bit: get lookup done bit
+*/
+int mvPp2PrsSwSramLuDoneGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bit);
+
+/*
+ * mvPp2PrsSwSramFlowidGenSet - set prs sram flowid gen bit.
+ * @pe: sw prs entry
+*/
+int mvPp2PrsSwSramFlowidGenSet(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsSwSramFlowidGenClear - clear prs sram flowid gen bit.
+ * @pe: sw prs entry
+*/
+int mvPp2PrsSwSramFlowidGenClear(MV_PP2_PRS_ENTRY *pe);
+
+/*
+ * mvPp2PrsSwSramFlowidGenGet - get prs sram flowid gen bit.
+ * @pe: sw prs entry
+ * bit: get lowid gen bit.
+*/
+int mvPp2PrsSwSramFlowidGenGet(MV_PP2_PRS_ENTRY *pe, unsigned int *bit);
+
+#endif /* __MV_PRS_HW_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.c b/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.c
new file mode 100644
index 000000000000..12479bd82319
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.c
@@ -0,0 +1,322 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include "mvCommon.h"  /* Should be included before mvSysHwConfig */
+#include "mvTypes.h"
+#include "mvDebug.h"
+#include "mvOs.h"
+
+#include "common/mvPp2Common.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvPp2Wol.h"
+
+
+void        mvPp2WolRegs(void)
+{
+	int    i, reg;
+
+	mvOsPrintf("\n[WoL registers]\n");
+
+	mvPp2PrintReg(MV_PP2_WOL_MODE_REG,       "MV_PP2_WOL_MODE_REG");
+	mvPp2PrintReg(MV_PP2_WOL_MAC_HIGH_REG,   "MV_PP2_WOL_MAC_HIGH_REG");
+	mvPp2PrintReg(MV_PP2_WOL_MAC_LOW_REG,    "MV_PP2_WOL_MAC_LOW_REG");
+	mvPp2PrintReg(MV_PP2_WOL_ARP_IP0_REG,    "MV_PP2_WOL_ARP_IP0_REG");
+	mvPp2PrintReg(MV_PP2_WOL_ARP_IP1_REG,    "MV_PP2_WOL_ARP_IP1_REG");
+	mvPp2PrintReg(MV_PP2_WOL_WAKEUP_EN_REG,  "MV_PP2_WOL_WAKEUP_EN_REG");
+	mvPp2PrintReg(MV_PP2_WOL_INTR_CAUSE_REG, "MV_PP2_WOL_INTR_CAUSE_REG");
+	mvPp2PrintReg(MV_PP2_WOL_INTR_MASK_REG,  "MV_PP2_WOL_INTR_MASK_REG");
+	mvPp2PrintReg(MV_PP2_WOL_PTRN_SIZE_REG,  "MV_PP2_WOL_PTRN_SIZE_REG");
+
+
+	for (i = 0; i < MV_PP2_WOL_PTRN_NUM; i++) {
+		mvOsPrintf("\nWoL Wakeup Frame pattern #%d\n", i);
+
+		mvPp2WrReg(MV_PP2_WOL_PTRN_IDX_REG, i);
+		for (reg = 0; reg < MV_PP2_WOL_PTRN_REGS; reg++) {
+			mvPp2RegPrintNonZero2(MV_PP2_WOL_PTRN_DATA_REG(reg), "MV_PP2_WOL_PTRN_DATA_REG", reg);
+			mvPp2RegPrintNonZero2(MV_PP2_WOL_PTRN_MASK_REG(reg), "MV_PP2_WOL_PTRN_MASK_REG", reg);
+		}
+	}
+}
+
+void      mvPp2WolStatus(void)
+{
+}
+
+MV_STATUS mvPp2WolSleep(int port)
+{
+	MV_U32 regVal;
+
+	if (mvPp2PortCheck(port))
+		return MV_BAD_PARAM;
+
+	/* Clear cause register and unmask enabled WoL events */
+	mvPp2WrReg(MV_PP2_WOL_INTR_CAUSE_REG, 0);
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	mvPp2WrReg(MV_PP2_WOL_INTR_MASK_REG, regVal);
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_MODE_REG);
+	if (regVal & MV_PP2_WOL_IS_SLEEP_MASK) {
+		mvOsPrintf("WoL is already activated on port #%d\n",
+			(regVal >> MV_PP2_WOL_SLEEP_PORT_OFFS) & MV_PP2_WOL_SLEEP_PORT_MAX);
+		return MV_BUSY;
+	}
+	regVal = MV_PP2_WOL_SLEEP_PORT_MASK(port) | MV_PP2_WOL_GO_SLEEP_MASK;
+	mvPp2WrReg(MV_PP2_WOL_MODE_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolWakeup(void)
+{
+	MV_U32 regVal;
+
+	/* Clear cause register and mask all WoL events */
+	mvPp2WrReg(MV_PP2_WOL_INTR_CAUSE_REG, 0);
+	mvPp2WrReg(MV_PP2_WOL_INTR_MASK_REG, 0);
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_MODE_REG);
+	regVal &= ~MV_PP2_WOL_GO_SLEEP_MASK;
+	mvPp2WrReg(MV_PP2_WOL_MODE_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolMagicDaSet(MV_U8 *mac_da)
+{
+	MV_U32 regVal;
+
+	regVal = (mac_da[0] << 24) | (mac_da[1] << 16) | (mac_da[2] << 8) | (mac_da[3] << 0);
+	mvPp2WrReg(MV_PP2_WOL_MAC_HIGH_REG, regVal);
+
+	regVal = (mac_da[4] << 8) | (mac_da[5]);
+	mvPp2WrReg(MV_PP2_WOL_MAC_LOW_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolArpIpSet(int idx, MV_U32 ip)
+{
+	MV_U32 regVal;
+
+	if (mvPp2MaxCheck(idx, MV_PP2_WOL_ARP_IP_NUM, "ARP IP index"))
+		return MV_BAD_PARAM;
+
+	regVal = MV_32BIT_BE(ip);
+	mvPp2WrReg(MV_PP2_WOL_ARP_IP_REG(idx), regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolPtrnSet(int idx, int off, int size, MV_U8 *data, MV_U8 *mask)
+{
+	MV_U32 regVal, regData, regMask;
+	int i, j, reg, new_size;
+	MV_U8 *new_data;
+	MV_U8 *new_mask;
+	int aligned_size = 0, mh_off = 0;
+
+	/* Take Marvell Header offset into consideration  */
+	mh_off = off + MV_ETH_MH_SIZE;
+
+	if (mvPp2MaxCheck(idx, MV_PP2_WOL_PTRN_NUM, "PTRN index"))
+		return MV_BAD_PARAM;
+
+	if (mvPp2MaxCheck((mh_off + size), MV_PP2_WOL_PTRN_BYTES, "PTRN size"))
+		return MV_BAD_PARAM;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_PTRN_SIZE_REG);
+	regVal &= ~MV_PP2_WOL_PTRN_SIZE_MAX_MASK(idx);
+	regVal |= MV_PP2_WOL_PTRN_SIZE_MASK(idx, size + mh_off);
+
+	mvPp2WrReg(MV_PP2_WOL_PTRN_SIZE_REG, regVal);
+
+	mvPp2WrReg(MV_PP2_WOL_PTRN_IDX_REG, idx);
+	if (mh_off % 4) {
+		aligned_size = size + 4 - (mh_off % 4);
+		new_data = kmalloc(sizeof(MV_U8) * aligned_size, GFP_KERNEL);
+		if (!new_data) {
+			mvOsPrintf("CPU memory allocation fail\n");
+			return MV_OUT_OF_CPU_MEM;
+		}
+
+		new_mask = kmalloc(sizeof(MV_U8) * aligned_size, GFP_KERNEL);
+		if (!new_mask) {
+			kfree(new_data);
+			mvOsPrintf("CPU memory allocation fail\n");
+			return MV_OUT_OF_CPU_MEM;
+		}
+
+		memset(new_data, 0, sizeof(MV_U8) * aligned_size);
+		memset(new_mask, 0, sizeof(MV_U8) * aligned_size);
+
+		memcpy(&new_data[mh_off % 4], data, size);
+		memcpy(&new_mask[mh_off % 4], mask, size);
+	} else {
+		new_data = data;
+		new_mask = mask;
+	}
+	new_size = size + (mh_off % 4);
+	for (i = 0; i < new_size; i += 4) {
+		reg = (mh_off + i) / 4;
+		regData = mvPp2RdReg(MV_PP2_WOL_PTRN_DATA_REG(reg));
+		regMask = mvPp2RdReg(MV_PP2_WOL_PTRN_MASK_REG(reg));
+		for (j = 0; j < 4; j++) {
+
+			if ((i + j) >= new_size)
+				break;
+
+			regData &= ~MV_PP2_WOL_PTRN_DATA_BYTE_MASK(3 - j);
+			regData |= MV_PP2_WOL_PTRN_DATA_BYTE(3 - j, new_data[i + j]);
+			/* mask on byte level */
+			if (new_mask[i + j] == 0)
+				regMask &= ~MV_PP2_WOL_PTRN_MASK_BIT(3 - j);
+			else
+				regMask |= MV_PP2_WOL_PTRN_MASK_BIT(3 - j);
+		}
+		mvPp2WrReg(MV_PP2_WOL_PTRN_DATA_REG(reg), regData);
+		mvPp2WrReg(MV_PP2_WOL_PTRN_MASK_REG(reg), regMask);
+	}
+	if (mh_off % 4) {
+		kfree(new_data);
+		kfree(new_mask);
+	}
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolArpEventSet(int idx, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_WOL_ARP_IP_MASK(idx);
+	else
+		regVal &= ~MV_PP2_WOL_ARP_IP_MASK(idx);
+
+	mvPp2WrReg(MV_PP2_WOL_WAKEUP_EN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolMcastEventSet(int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_WOL_MCAST_MASK;
+	else
+		regVal &= ~MV_PP2_WOL_MCAST_MASK;
+
+	mvPp2WrReg(MV_PP2_WOL_WAKEUP_EN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolUcastEventSet(int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_WOL_UCAST_MASK;
+	else
+		regVal &= ~MV_PP2_WOL_UCAST_MASK;
+
+	mvPp2WrReg(MV_PP2_WOL_WAKEUP_EN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolMagicEventSet(int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_WOL_MAGIC_PTRN_MASK;
+	else
+		regVal &= ~MV_PP2_WOL_MAGIC_PTRN_MASK;
+
+	mvPp2WrReg(MV_PP2_WOL_WAKEUP_EN_REG, regVal);
+
+	return MV_OK;
+}
+
+MV_STATUS mvPp2WolPtrnEventSet(int idx, int enable)
+{
+	MV_U32 regVal;
+
+	regVal = mvPp2RdReg(MV_PP2_WOL_WAKEUP_EN_REG);
+	if (enable)
+		regVal |= MV_PP2_WOL_PTRN_IDX_MASK(idx);
+	else
+		regVal &= ~MV_PP2_WOL_PTRN_IDX_MASK(idx);
+
+	mvPp2WrReg(MV_PP2_WOL_WAKEUP_EN_REG, regVal);
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.h b/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.h
new file mode 100644
index 000000000000..b15fb270db90
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/hal/wol/mvPp2Wol.h
@@ -0,0 +1,171 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#ifndef __mvPp2Wol_h__
+#define __mvPp2Wol_h__
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+#include "gbe/mvPp2Gbe.h"
+
+/*********************************** RX Policer Registers *******************/
+
+#define MV_PP2_WOL_MODE_REG                 (MV_PP2_REG_BASE + 0x400)
+
+#define MV_PP2_WOL_GO_SLEEP_BIT             0
+#define MV_PP2_WOL_GO_SLEEP_MASK            (1 << MV_PP2_WOL_GO_SLEEP_BIT)
+
+#define MV_PP2_WOL_IS_SLEEP_BIT             1
+#define MV_PP2_WOL_IS_SLEEP_MASK            (1 << MV_PP2_WOL_IS_SLEEP_BIT)
+
+#define MV_PP2_WOL_SLEEP_PORT_OFFS          4
+#define MV_PP2_WOL_SLEEP_PORT_BITS          3
+#define MV_PP2_WOL_SLEEP_PORT_MAX           ((1 << MV_PP2_WOL_SLEEP_PORT_BITS) - 1)
+#define MV_PP2_WOL_SLEEP_PORT_ALL_MASK      (MV_PP2_WOL_SLEEP_PORT_MAX << MV_PP2_WOL_SLEEP_PORT_OFFS)
+#define MV_PP2_WOL_SLEEP_PORT_MASK(p)       (((p) & MV_PP2_WOL_SLEEP_PORT_MAX) << MV_PP2_WOL_SLEEP_PORT_OFFS)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_WOL_MAC_HIGH_REG             (MV_PP2_REG_BASE + 0x410)
+#define MV_PP2_WOL_MAC_LOW_REG              (MV_PP2_REG_BASE + 0x414)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_WOL_ARP_IP_NUM               2
+
+#define MV_PP2_WOL_ARP_IP0_REG              (MV_PP2_REG_BASE + 0x418)
+#define MV_PP2_WOL_ARP_IP1_REG              (MV_PP2_REG_BASE + 0x41C)
+#define MV_PP2_WOL_ARP_IP_REG(idx)          (MV_PP2_WOL_ARP_IP0_REG + ((idx) << 2))
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_WOL_PTRN_NUM                 4
+#define MV_PP2_WOL_PTRN_BYTES               128
+#define MV_PP2_WOL_PTRN_REGS                (MV_PP2_WOL_PTRN_BYTES / 4)
+
+#define MV_PP2_WOL_WAKEUP_EN_REG            (MV_PP2_REG_BASE + 0x420)
+#define MV_PP2_WOL_INTR_CAUSE_REG           (MV_PP2_REG_BASE + 0x424)
+#define MV_PP2_WOL_INTR_MASK_REG            (MV_PP2_REG_BASE + 0x428)
+
+/* Bits are the same for all three registers above */
+#define MV_PP2_WOL_PTRN_IDX_BIT(idx)        (0 + (idx))
+#define MV_PP2_WOL_PTRN_IDX_MASK(idx)       (1 << MV_PP2_WOL_PTRN_IDX_BIT(idx))
+
+#define MV_PP2_WOL_MAGIC_PTRN_BIT           4
+#define MV_PP2_WOL_MAGIC_PTRN_MASK          (1 << MV_PP2_WOL_MAGIC_PTRN_BIT)
+
+#define MV_PP2_WOL_ARP_IP0_BIT              5
+#define MV_PP2_WOL_ARP_IP1_BIT              6
+#define MV_PP2_WOL_ARP_IP_MASK(idx)         (1 << (MV_PP2_WOL_ARP_IP0_BIT + (idx)))
+
+#define MV_PP2_WOL_UCAST_BIT                7
+#define MV_PP2_WOL_UCAST_MASK               (1 << MV_PP2_WOL_UCAST_BIT)
+
+#define MV_PP2_WOL_MCAST_BIT                8
+#define MV_PP2_WOL_MCAST_MASK               (1 << MV_PP2_WOL_MCAST_BIT)
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_WOL_PTRN_SIZE_REG            (MV_PP2_REG_BASE + 0x430)
+
+#define MV_PP2_WOL_PTRN_SIZE_BITS           8
+#define MV_PP2_WOL_PTRN_SIZE_MAX            ((1 << MV_PP2_WOL_PTRN_SIZE_BITS) - 1)
+#define MV_PP2_WOL_PTRN_SIZE_MAX_MASK(i)    (MV_PP2_WOL_PTRN_SIZE_MAX << ((i) << MV_PP2_WOL_PTRN_SIZE_BITS))
+#define MV_PP2_WOL_PTRN_SIZE_MASK(i, s)     ((s) << ((i) * MV_PP2_WOL_PTRN_SIZE_BITS))
+/*---------------------------------------------------------------------------------------------*/
+
+#define MV_PP2_WOL_PTRN_IDX_REG             (MV_PP2_REG_BASE + 0x434)
+#define MV_PP2_WOL_PTRN_DATA_REG(i)         (MV_PP2_REG_BASE + 0x500 + ((i) << 2))
+#define MV_PP2_WOL_PTRN_MASK_REG(i)         (MV_PP2_REG_BASE + 0x580 + ((i) << 2))
+
+#define MV_PP2_WOL_PTRN_DATA_BYTE_MASK(i)   (0xFF << ((i) * 8))
+#define MV_PP2_WOL_PTRN_DATA_BYTE(i, b)     ((b)  << ((i) * 8))
+#define MV_PP2_WOL_PTRN_MASK_BIT(i)         (1    << ((i) * 8))
+/*---------------------------------------------------------------------------------------------*/
+
+/*********************************** ENUMERATIONS *******************/
+
+enum wol_event_enable_t {
+	WOL_EVENT_DIS = 0,
+	WOL_EVENT_EN,
+};
+
+/* WoL APIs */
+void      mvPp2WolRegs(void);
+void      mvPp2WolStatus(void);
+MV_STATUS mvPp2WolSleep(int port);
+MV_STATUS mvPp2WolWakeup(void);
+int       mvPp2WolIsSleep(int *port);
+MV_STATUS mvPp2WolMagicDaSet(MV_U8 *mac_da);
+MV_STATUS mvPp2WolArpIpSet(int idx, MV_U32 ip);
+MV_STATUS mvPp2WolPtrnSet(int idx, int off, int size, MV_U8 *data, MV_U8 *mask);
+MV_STATUS mvPp2WolArpEventSet(int idx, int enable);
+MV_STATUS mvPp2WolMcastEventSet(int enable);
+MV_STATUS mvPp2WolUcastEventSet(int enable);
+MV_STATUS mvPp2WolMagicEventSet(int enable);
+MV_STATUS mvPp2WolPtrnEventSet(int idx, int enable);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* __mvPp2Wol_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/l2fw/l2fw_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/l2fw/l2fw_sysfs.c
new file mode 100644
index 000000000000..3711b5b4a344
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/l2fw/l2fw_sysfs.c
@@ -0,0 +1,258 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvTypes.h"
+#include "mv_eth_l2fw.h"
+#ifdef CONFIG_MV_PP2_L2SEC
+#include "mv_eth_l2sec.h"
+#endif
+#include "linux/inet.h"
+
+
+static ssize_t mv_l2fw_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat               rules_dump - Display L2FW rules DB\n");
+	off += sprintf(buf+off, "cat               ports_dump - Display L2FW ports DB\n");
+	off += sprintf(buf+off, "cat               stats      - Show debug information\n");
+	off += sprintf(buf+off, "\n");
+	off += sprintf(buf+off, "echo p [1|0]      > l2fw     - Enable/Disable L2FW for port <p>\n");
+	off += sprintf(buf+off, "echo rxp txp mode > bind     - Set <rxp-->txp>, mode: 0-as_is, 1-swap, 2-copy\n");
+	off += sprintf(buf+off, "echo rxp [1|0]    > lookup   - Enable/Disable L3 lookup for port <rxp>\n");
+	off += sprintf(buf+off, "echo 1            > flush    - Flush L2FW rules DB\n");
+	off += sprintf(buf+off, "echo sip dip txp  > add_ip   - Set L3 lookup rule, sip, dip in a.b.c.d format\n");
+#ifdef CONFIG_MV_L2FW_XOR
+	off += sprintf(buf+off, "echo rxp thresh   > xor      - Set XOR threshold for port <rxp>\n");
+#endif
+#ifdef CONFIG_MV_PP2_L2SEC
+	off += sprintf(buf+off, "echo p chan       > cesa     - Set cesa channel <chan> for port <p>.\n");
+#endif
+	return off;
+}
+
+static ssize_t mv_l2fw_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int off = 0;
+	const char *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		off = mv_l2fw_help(buf);
+
+	else if (!strcmp(name, "rules_dump"))
+		mv_l2fw_rules_dump();
+
+	else if (!strcmp(name, "ports_dump"))
+		mv_l2fw_ports_dump();
+
+	else if (!strcmp(name, "stats"))
+		mv_l2fw_stats();
+
+	return off;
+}
+
+
+
+static ssize_t mv_l2fw_hex_store(struct device *dev, struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    addr1, addr2;
+	int port;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	err = addr1 = addr2 = port = 0;
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "flush")) {
+		mv_l2fw_flush();
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_l2fw_ip_store(struct device *dev,
+			 struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char *name = attr->attr.name;
+
+	unsigned int err = 0;
+	unsigned int srcIp = 0, dstIp = 0;
+	unsigned char *sipArr = (unsigned char *)&srcIp;
+	unsigned char *dipArr = (unsigned char *)&dstIp;
+	int port;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%hhu.%hhu.%hhu.%hhu %hhu.%hhu.%hhu.%hhu %d",
+		sipArr, sipArr+1, sipArr+2, sipArr+3,
+		dipArr, dipArr+1, dipArr+2, dipArr+3, &port);
+
+	printk(KERN_INFO "0x%x->0x%x in %s\n", srcIp, dstIp, __func__);
+	local_irq_save(flags);
+
+	if (!strcmp(name, "add_ip"))
+		mv_l2fw_add(srcIp, dstIp, port);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+
+static ssize_t mv_l2fw_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	int             err;
+
+	unsigned int    a, b, c;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = a = b = c = 0;
+	sscanf(buf, "%d %d %d", &a, &b, &c);
+
+	local_irq_save(flags);
+	if (!strcmp(name, "lookup"))
+		mv_l2fw_lookupEn(a, b);
+#ifdef CONFIG_MV_L2FW_XOR
+	else if (!strcmp(name, "xor"))
+		mv_l2fw_xor(a, b);
+#endif
+	else if (!strcmp(name, "l2fw"))
+		err = mv_l2fw_set(a, b);
+
+	else if (!strcmp(name, "bind"))
+		err = mv_l2fw_port(a, b, c);
+
+#ifdef CONFIG_MV_PP2_L2SEC
+	else if (!strcmp(name, "cesa_chan"))
+		err = mv_l2sec_set_cesa_chan(a, b);
+#endif
+	local_irq_restore(flags);
+
+	if (err)
+		mvOsPrintf("%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+
+}
+
+
+static DEVICE_ATTR(l2fw,		S_IWUSR, mv_l2fw_show, mv_l2fw_store);
+static DEVICE_ATTR(bind,		S_IWUSR, mv_l2fw_show, mv_l2fw_store);
+static DEVICE_ATTR(lookup,		S_IWUSR, mv_l2fw_show, mv_l2fw_store);
+static DEVICE_ATTR(add_ip,		S_IWUSR, mv_l2fw_show, mv_l2fw_ip_store);
+static DEVICE_ATTR(help,		S_IRUSR, mv_l2fw_show, NULL);
+static DEVICE_ATTR(rules_dump,		S_IRUSR, mv_l2fw_show, NULL);
+static DEVICE_ATTR(ports_dump,		S_IRUSR, mv_l2fw_show, NULL);
+static DEVICE_ATTR(stats,		S_IRUSR, mv_l2fw_show, NULL);
+static DEVICE_ATTR(flush,		S_IWUSR, NULL,	mv_l2fw_hex_store);
+
+#ifdef CONFIG_MV_PP2_L2SEC
+static DEVICE_ATTR(cesa_chan,		S_IWUSR, NULL,  mv_l2fw_store);
+#endif
+#ifdef CONFIG_MV_L2FW_XOR
+static DEVICE_ATTR(xor,		S_IWUSR, mv_l2fw_show, mv_l2fw_store);
+#endif
+
+
+
+static struct attribute *mv_l2fw_attrs[] = {
+	&dev_attr_l2fw.attr,
+	&dev_attr_bind.attr,
+#ifdef CONFIG_MV_L2FW_XOR
+	&dev_attr_xor.attr,
+#endif
+	&dev_attr_lookup.attr,
+	&dev_attr_add_ip.attr,
+	&dev_attr_help.attr,
+	&dev_attr_rules_dump.attr,
+	&dev_attr_ports_dump.attr,
+	&dev_attr_flush.attr,
+	&dev_attr_stats.attr,
+#ifdef CONFIG_MV_PP2_L2SEC
+	&dev_attr_cesa_chan.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group mv_l2fw_group = {
+	.name = "l2fw",
+	.attrs = mv_l2fw_attrs,
+};
+
+int mv_pp2_l2fw_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &mv_l2fw_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", mv_l2fw_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_l2fw_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mv_l2fw_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.c b/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.c
new file mode 100644
index 000000000000..7e487c905e1d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.c
@@ -0,0 +1,1214 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/version.h>
+
+#ifdef CONFIG_MV_PP2_L2FW_XOR
+#include "xor/mvXor.h"
+#include "xor/mvXorRegs.h"
+#include "mv_hal_if/mvSysXorApi.h"
+#endif /* CONFIG_MV_PP2_L2FW_XOR */
+
+#include "mv_eth_l2fw.h"
+#include "../net_dev/mv_netdev.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mvDebug.h"
+
+#ifdef CONFIG_MV_PP2_L2SEC
+#include "mv_eth_l2sec.h"
+#endif
+
+static int numHashEntries;
+static int shared;
+
+static struct l2fw_rule **l2fw_hash;
+static struct eth_port_l2fw **mv_pp2_ports_l2fw;
+static int eth_ports_l2fw_num;
+
+static MV_U32 l2fw_jhash_iv;
+
+#ifdef CONFIG_MV_PP2_L2FW_XOR
+static MV_XOR_DESC *eth_xor_desc;
+static MV_LONG      eth_xor_desc_phys_addr;
+#endif
+
+inline int mv_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq);
+inline int mv_l2fw_tx(struct sk_buff *skb, struct eth_port *pp, struct pp2_rx_desc *rx_desc);
+inline int mv_l2fw_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl);
+static int mv_l2fw_port_init(int port);
+static void mv_l2fw_port_free(int port);
+
+static const struct net_device_ops mv_l2fw_netdev_ops;
+static const struct net_device_ops *mv_pp2_netdev_ops_ptr;
+
+static struct l2fw_rule *l2fw_lookup(MV_U32 srcIP, MV_U32 dstIP)
+{
+	MV_U32 hash;
+	struct l2fw_rule *rule;
+
+	hash = mv_jhash_3words(srcIP, dstIP, (MV_U32) 0, l2fw_jhash_iv);
+	hash &= L2FW_HASH_MASK;
+	rule = l2fw_hash[hash];
+
+	while (rule) {
+		if ((rule->srcIP == srcIP) && (rule->dstIP == dstIP)) {
+#ifdef CONFIG_MV_PP2_L2FW_DEBUG
+			printk(KERN_INFO "rule is not NULL in %s\n", __func__);
+#endif
+			return rule;
+		}
+
+		rule = rule->next;
+	}
+
+#ifdef CONFIG_MV_PP2_L2FW_DEBUG
+	printk(KERN_INFO "rule is NULL in %s\n", __func__);
+#endif
+
+	return NULL;
+}
+
+void l2fw_show_numHashEntries(void)
+{
+	mvOsPrintf("number of Hash Entries is %d\n", numHashEntries);
+
+}
+
+
+void mv_l2fw_flush(void)
+{
+	MV_U32 i = 0;
+	mvOsPrintf("\nFlushing L2fw Rule Database:\n");
+	mvOsPrintf("*******************************\n");
+	for (i = 0; i < L2FW_HASH_SIZE; i++)
+		if (l2fw_hash[i]) {
+			mvOsFree(l2fw_hash[i]);
+			l2fw_hash[i] = NULL;
+		}
+	numHashEntries = 0;
+}
+
+
+void mv_l2fw_rules_dump(void)
+{
+	MV_U32 i = 0;
+	struct l2fw_rule *currRule;
+	MV_U8	  *srcIP, *dstIP;
+
+	mvOsPrintf("\nPrinting L2fw Rule Database:\n");
+	mvOsPrintf("*******************************\n");
+
+	for (i = 0; i < L2FW_HASH_SIZE; i++) {
+		currRule = l2fw_hash[i];
+		srcIP = (MV_U8 *)&(currRule->srcIP);
+		dstIP = (MV_U8 *)&(currRule->dstIP);
+
+		while (currRule != NULL) {
+			mvOsPrintf("%u.%u.%u.%u->%u.%u.%u.%u     out port=%d (hash=%x)\n",
+				MV_IPQUAD(srcIP), MV_IPQUAD(dstIP),
+				currRule->port, i);
+			currRule = currRule->next;
+		}
+	}
+
+}
+
+void mv_l2fw_ports_dump(void)
+{
+	MV_U32 rx_port = 0;
+	struct eth_port_l2fw *ppl2fw;
+
+	mvOsPrintf("\nPrinting L2fw ports Database:\n");
+	mvOsPrintf("*******************************\n");
+
+	if (!mv_pp2_ports_l2fw)
+		return;
+
+	for (rx_port = 0; rx_port < eth_ports_l2fw_num; rx_port++) {
+		ppl2fw = mv_pp2_ports_l2fw[rx_port];
+		if (ppl2fw)
+			mvOsPrintf("rx_port=%d cmd = %d tx_port=%d lookup=%d xor_threshold = %d\n",
+					rx_port, ppl2fw->cmd, ppl2fw->txPort, ppl2fw->lookupEn, ppl2fw->xorThreshold);
+
+	}
+}
+
+
+int mv_l2fw_add(MV_U32 srcIP, MV_U32 dstIP, int port)
+{
+	struct l2fw_rule *rule;
+	MV_U8	  *srcIPchr, *dstIPchr;
+
+	MV_U32 hash = mv_jhash_3words(srcIP, dstIP, (MV_U32) 0, l2fw_jhash_iv);
+	hash &= L2FW_HASH_MASK;
+	if (numHashEntries == L2FW_HASH_SIZE) {
+		printk(KERN_INFO "cannot add entry, hash table is full, there are %d entires\n", L2FW_HASH_SIZE);
+		return MV_ERROR;
+	}
+
+	srcIPchr = (MV_U8 *)&(srcIP);
+	dstIPchr = (MV_U8 *)&(dstIP);
+
+#ifdef CONFIG_MV_PP2_L2FW_DEBUG
+	mvOsPrintf("srcIP=%x dstIP=%x in %s\n", srcIP, dstIP, __func__);
+	mvOsPrintf("srcIp = %u.%u.%u.%u in %s\n", MV_IPQUAD(srcIPchr), __func__);
+	mvOsPrintf("dstIp = %u.%u.%u.%u in %s\n", MV_IPQUAD(dstIPchr), __func__);
+#endif
+
+	rule = l2fw_lookup(srcIP, dstIP);
+	if (rule) {
+		/* overwite port */
+		rule->port = port;
+		return MV_OK;
+	}
+
+	rule = (struct l2fw_rule *)mvOsMalloc(sizeof(struct l2fw_rule));
+	if (!rule) {
+		mvOsPrintf("%s: OOM\n", __func__);
+		return MV_FAIL;
+	}
+#ifdef CONFIG_MV_PP2_L2FW_DEBUG
+	mvOsPrintf("adding a rule to l2fw hash in %s\n", __func__);
+#endif
+	rule->srcIP = srcIP;
+	rule->dstIP = dstIP;
+	rule->port = port;
+
+	rule->next = l2fw_hash[hash];
+	l2fw_hash[hash] = rule;
+	numHashEntries++;
+	return MV_OK;
+}
+
+static int mv_pp2_poll_l2fw(struct napi_struct *napi, int budget)
+{
+	int rx_done = 0;
+	MV_U32 causeRxTx;
+	struct napi_group_ctrl *napi_group;
+	struct eth_port *pp = MV_ETH_PRIV(napi->dev);
+	int cpu = smp_processor_id();
+
+	STAT_INFO(pp->stats.poll[cpu]++);
+
+	/* Read cause register */
+	causeRxTx = mvPp2GbeIsrCauseRxTxGet(pp->port);
+	if (causeRxTx & MV_PP2_CAUSE_MISC_SUM_MASK) {
+		if (causeRxTx & MV_PP2_CAUSE_FCS_ERR_MASK)
+			printk(KERN_ERR "%s: FCS error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+			printk(KERN_ERR "%s: RX fifo overrun error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+			printk(KERN_ERR "%s: TX fifo underrun error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_MISC_SUM_MASK) {
+			printk(KERN_ERR "%s: misc event\n", __func__);
+			MV_REG_WRITE(MV_PP2_ISR_MISC_CAUSE_REG, 0);
+		}
+
+		causeRxTx &= ~MV_PP2_CAUSE_MISC_SUM_MASK;
+		MV_REG_WRITE(MV_PP2_ISR_RX_TX_CAUSE_REG(MV_PPV2_PORT_PHYS(pp->port)), causeRxTx);
+	}
+	napi_group = pp->cpu_config[smp_processor_id()]->napi_group;
+	causeRxTx |= napi_group->cause_rx_tx;
+
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+
+	/* TODO check this mode */
+
+	if (mvPp2GbeIsrCauseTxDoneIsSet(pp->port, causeRxTx)) {
+		int tx_todo = 0, cause_tx_done;
+
+		/* TX_DONE process */
+		cause_tx_done = mvPp2GbeIsrCauseTxDoneOffset(pp->port, causeRxTx);
+		if (MV_PP2_IS_PON_PORT(pp->port)) {
+			mv_pp2_tx_done_pon(pp, &tx_todo);
+			mvOsPrintf("enter to mv_pp2_tx_done_pon\n");
+		} else
+			mv_pp2_tx_done_gbe(pp, cause_tx_done, &tx_todo);
+	}
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		causeRxTx &= ~MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK;
+	else
+		causeRxTx &= ~MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+
+	while ((causeRxTx != 0) && (budget > 0)) {
+		int count, rx_queue;
+
+		rx_queue = mv_pp2_rx_policy(causeRxTx);
+		if (rx_queue == -1)
+			break;
+
+		count = mv_l2fw_rx(pp, budget, rx_queue);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0)
+			causeRxTx &= ~((1 << rx_queue) << MV_PP2_CAUSE_RXQ_OCCUP_DESC_OFFS);
+	}
+
+	STAT_DIST((rx_done < pp->dist_stats.rx_dist_size) ? pp->dist_stats.rx_dist[rx_done]++ : 0);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_POLL) {
+		printk(KERN_ERR "%s  EXIT: port=%d, cpu=%d, budget=%d, rx_done=%d\n",
+			__func__, pp->port, cpu, budget, rx_done);
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	if (budget > 0) {
+		unsigned long flags;
+
+		causeRxTx = 0;
+
+		napi_complete(napi);
+
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+
+		local_irq_save(flags);
+		/* Enable interrupts for all cpus belong to this group */
+		mvPp2GbeCpuInterruptsEnable(pp->port, napi_group->cpu_mask);
+		local_irq_restore(flags);
+	}
+	napi_group->cause_rx_tx = causeRxTx;
+	return rx_done;
+}
+
+
+static int mv_l2fw_update_napi(struct eth_port *pp, bool l2fw)
+{
+	int group;
+	struct napi_group_ctrl *napi_group;
+
+
+	for (group = 0; group < 1/*MV_ETH_MAX_NAPI_GROUPS*/; group++) {
+		napi_group = pp->napi_group[group];
+/*
+		if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags)))
+			napi_disable(napi_group->napi);
+*/
+		netif_napi_del(napi_group->napi);
+
+		if (l2fw)
+			netif_napi_add(pp->dev, napi_group->napi, mv_pp2_poll_l2fw, pp->weight);
+		else
+			netif_napi_add(pp->dev, napi_group->napi, mv_pp2_poll, pp->weight);
+/*
+		if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags)))
+			napi_enable(napi_group->napi);
+*/
+	}
+	return MV_OK;
+}
+
+static int mv_l2fw_check(int port, bool l2fw)
+{
+	if (!l2fw) {
+		/* user try to exit form l2fw */
+		if (!mv_pp2_ports_l2fw) {
+			mvOsPrintf("port #%d l2fw already disabled\n", port);
+			return MV_ERROR;
+		}
+
+		if (!mv_pp2_ports_l2fw[port]) {
+			mvOsPrintf("port #%d l2fw already disabled\n", port);
+			return MV_ERROR;
+		}
+
+	/* user try to enter into l2fw */
+	} else if (mv_pp2_ports_l2fw && mv_pp2_ports_l2fw[port]) {
+			mvOsPrintf("port #%d l2fw already enabled\n", port);
+			return MV_ERROR;
+	}
+
+	return MV_OK;
+}
+
+int mv_l2fw_set(int port, bool l2fw)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	int status = MV_OK;
+
+	if (mv_l2fw_check(port, l2fw))
+		return MV_ERROR;
+
+	if (!pp) {
+		mvOsPrintf("pp is NULL in setting L2FW (%s)\n", __func__);
+		return MV_ERROR;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+	/* for multiBuffer validation */
+	/*mvGmacMaxRxSizeSet(port, 9000);*/
+
+	if (!mv_pp2_netdev_ops_ptr) {
+		/* enter only once - save eth ops */
+		mv_pp2_netdev_ops_ptr = pp->dev->netdev_ops;
+		/* set maximum number of ports */
+		eth_ports_l2fw_num = pp->plat_data->max_port;
+	}
+
+	if (mv_l2fw_update_napi(pp, l2fw))
+		return MV_ERROR;
+
+	if (l2fw) {
+		status = mv_l2fw_port_init(port);
+		pp->dev->netdev_ops  = &mv_l2fw_netdev_ops;
+
+	} else {
+		pp->dev->netdev_ops = mv_pp2_netdev_ops_ptr;
+		mv_l2fw_port_free(port);
+	}
+
+	return status;
+}
+
+int mv_l2fw_port(int rx_port, int tx_port, int cmd)
+{
+	struct eth_port_l2fw *ppl2fw;
+
+	if (!mv_pp2_ports_l2fw) {
+		mvOsPrintf("%s: ports are not in l2fw mode\n", __func__);
+		return MV_ERROR;
+	}
+	if (mvPp2MaxCheck(rx_port, eth_ports_l2fw_num, "rx_port"))
+		return MV_ERROR;
+
+	if (mvPp2MaxCheck(tx_port, eth_ports_l2fw_num, "tx_port"))
+		return MV_ERROR;
+
+	if (!mv_pp2_ports_l2fw[rx_port]) {
+		mvOsPrintf("%s: port #%d is not in l2fw mode\n", __func__, rx_port);
+		return MV_ERROR;
+	}
+
+	if (!mv_pp2_ports_l2fw[tx_port]) {
+		mvOsPrintf("%s: port #%d is not in l2fw mode\n", __func__, tx_port);
+		return MV_ERROR;
+	}
+
+	if (cmd > CMD_L2FW_LAST) {
+		mvOsPrintf("Error: invalid command %d\n", cmd);
+		return MV_ERROR;
+	}
+
+	ppl2fw = mv_pp2_ports_l2fw[rx_port];
+	ppl2fw->cmd = cmd;
+	ppl2fw->txPort = tx_port;
+
+	return MV_OK;
+
+}
+
+inline unsigned char *l2fw_swap_mac(unsigned char *buff)
+{
+	MV_U16 *pSrc;
+	int i;
+	MV_U16 swap;
+	pSrc = (MV_U16 *)(buff + MV_ETH_MH_SIZE);
+
+	for (i = 0; i < 3; i++) {
+		swap = pSrc[i];
+		pSrc[i] = pSrc[i+3];
+		pSrc[i+3] = swap;
+		}
+
+	return  buff;
+}
+
+inline void l2fw_copy_mac(unsigned char *rx_buff, unsigned char *tx_buff)
+{
+	/* copy 30 bytes (start after MH header) */
+	/* 12 for SA + DA */
+	/* 18 for the rest */
+	MV_U16 *pSrc;
+	MV_U16 *pDst;
+	int i;
+	pSrc = (MV_U16 *)(rx_buff);
+	pDst = (MV_U16 *)(tx_buff);
+
+	/* swap mac SA and DA */
+	for (i = 0; i < 3; i++) {
+		pDst[i]   = pSrc[i+3];
+		pDst[i+3] = pSrc[i];
+		}
+	for (i = 6; i < 15; i++)
+		pDst[i] = pSrc[i];
+	}
+
+inline void l2fw_copy_and_swap_mac(unsigned char *rx_buff, unsigned char *tx_buff)
+{
+	MV_U16 *pSrc;
+	MV_U16 *pDst;
+	int i;
+
+	pSrc = (MV_U16 *)(rx_buff);
+	pDst = (MV_U16 *)(tx_buff);
+	for (i = 0; i < 3; i++) {
+		pDst[i]   = pSrc[i+3];
+		pDst[i+3] = pSrc[i];
+	}
+}
+
+inline struct sk_buff *eth_l2fw_copy_packet_withOutXor(struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	MV_U8 *pSrc;
+	MV_U8 *pDst;
+	int poolId;
+	struct sk_buff *skb_new;
+	int  bytes = rx_desc->dataSize - MV_ETH_MH_SIZE;
+	/* 12 for SA + DA */
+	int mac = 2 * MV_MAC_ADDR_SIZE;
+
+	mvOsCacheInvalidate(NULL, skb->data, bytes);
+
+	poolId = mvPp2RxBmPoolId(rx_desc);
+
+	skb_new = (struct sk_buff *)mv_pp2_pool_get(poolId);
+
+	if (!skb_new) {
+		mvOsPrintf("skb == NULL in %s\n", __func__);
+		return NULL;
+	}
+
+	pSrc = skb->data + MV_ETH_MH_SIZE;
+	pDst = skb_new->data + MV_ETH_MH_SIZE;
+
+	memcpy(pDst + mac, pSrc + mac, bytes - mac);
+	l2fw_copy_and_swap_mac(pSrc, pDst);
+	mvOsCacheFlush(NULL, skb_new->data, bytes);
+
+	return skb_new;
+}
+
+#ifdef CONFIG_MV_PP2_L2FW_XOR
+inline struct sk_buff *eth_l2fw_copy_packet_withXor(struct sk_buff *skb, struct pp2_rx_desc *rx_desc)
+{
+	struct sk_buff *skb_new = NULL;
+	MV_U8 *pSrc;
+	MV_U8 *pDst;
+	int poolId;
+	unsigned int bufPhysAddr;
+	int  bytes = rx_desc->dataSize - MV_ETH_MH_SIZE;
+
+	poolId = mvPp2RxBmPoolId(rx_desc);
+
+	skb_new = (struct sk_buff *)mv_pp2_pool_get(poolId);
+
+	if (!skb_new) {
+		mvOsPrintf("skb == NULL in %s\n", __func__);
+		return NULL;
+	}
+
+	/* sync between giga and XOR to avoid errors (like checksum errors in TX)
+	   when working with IOCC */
+
+	mvOsCacheIoSync(NULL);
+
+	bufPhysAddr =  mvOsCacheFlush(NULL, skb->data, bytes);
+	eth_xor_desc->srcAdd0    = bufPhysAddr + skb_headroom(skb) + MV_ETH_MH_SIZE + 30;
+
+	bufPhysAddr =  mvOsCacheFlush(NULL, skb_new->data, bytes);
+	eth_xor_desc->srcAdd0    = bufPhysAddr + skb_headroom(skb_new) + MV_ETH_MH_SIZE + 30;
+
+	eth_xor_desc->byteCnt    = bytes - 30;
+
+	eth_xor_desc->phyNextDescPtr = 0;
+	eth_xor_desc->status         = BIT31;
+	/* we had changed only the first part of eth_xor_desc, so flush only one
+	 line of cache */
+	mvOsCacheLineFlush(NULL, eth_xor_desc);
+	MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr);
+
+	MV_REG_WRITE(XOR_ACTIVATION_REG(1, XOR_CHAN(0)), XEXACTR_XESTART_MASK);
+
+	mvOsCacheLineInv(NULL, skb->data);
+
+	pSrc = skb->data + MV_ETH_MH_SIZE;
+	pDst = skb_new->data + MV_ETH_MH_SIZE;
+
+	l2fw_copy_mac(pSrc, pDst);
+	mvOsCacheLineFlush(NULL, skb_new->data);
+
+	return skb_new;
+}
+
+void setXorDesc(void)
+{
+	unsigned int mode;
+	eth_xor_desc = mvOsMalloc(sizeof(MV_XOR_DESC) + XEXDPR_DST_PTR_DMA_MASK + 32);
+	eth_xor_desc = (MV_XOR_DESC *)MV_ALIGN_UP((MV_U32)eth_xor_desc, XEXDPR_DST_PTR_DMA_MASK+1);
+	eth_xor_desc_phys_addr = mvOsIoVirtToPhys(NULL, eth_xor_desc);
+	mvSysXorInit();
+
+	mode = MV_REG_READ(XOR_CONFIG_REG(1, XOR_CHAN(0)));
+	mode &= ~XEXCR_OPERATION_MODE_MASK;
+	mode |= XEXCR_OPERATION_MODE_DMA;
+	MV_REG_WRITE(XOR_CONFIG_REG(1, XOR_CHAN(0)), mode);
+	MV_REG_WRITE(XOR_NEXT_DESC_PTR_REG(1, XOR_CHAN(0)), eth_xor_desc_phys_addr);
+	/* TODO mask xor intterupts*/
+}
+
+
+inline int xorReady(void)
+{
+	int timeout = 0;
+
+	while (!(MV_REG_READ(XOR_CAUSE_REG(1)) & XOR_CAUSE_DONE_MASK(XOR_CHAN(0)))) {
+		if (timeout > 0x100000) {
+			mvOsPrintf("XOR timeout\n");
+			return 0;
+			}
+		timeout++;
+	}
+
+	/* Clear int */
+	MV_REG_WRITE(XOR_CAUSE_REG(1), ~(XOR_CAUSE_DONE_MASK(XOR_CHAN(0))));
+
+	return 1;
+}
+
+void mv_l2fw_xor(int rx_port, int threshold)
+{
+	if (mvPp2MaxCheck(rx_port, eth_ports_l2fw_num, "rx_port"))
+		return;
+
+	mvOsPrintf("setting port %d threshold to %d in %s\n", rx_port, threshold, __func__);
+	mv_pp2_ports_l2fw[rx_port]->xorThreshold = threshold;
+}
+#endif /* CONFIG_MV_PP2_L2FW_XOR */
+
+void mv_l2fw_lookupEn(int rx_port, int enable)
+{
+	if (mvPp2MaxCheck(rx_port, eth_ports_l2fw_num, "rx_port"))
+		return;
+
+	mvOsPrintf("setting port %d lookup mode to %s\n", rx_port, (enable == 1) ? "enable" : "disable");
+	mv_pp2_ports_l2fw[rx_port]->lookupEn = enable;
+}
+
+void mv_l2fw_stats(void)
+{
+	int i;
+
+	if (!mv_pp2_ports_l2fw)
+		return;
+
+	for (i = 0; i < eth_ports_l2fw_num; i++) {
+		if (mv_pp2_ports_l2fw[i]) {
+			mvOsPrintf("number of errors in port[%d]=%d\n", i, mv_pp2_ports_l2fw[i]->statErr);
+			mvOsPrintf("number of drops  in port[%d]=%d\n", i, mv_pp2_ports_l2fw[i]->statDrop);
+		}
+	}
+
+#ifdef CONFIG_MV_PP2_L2SEC
+	mv_l2sec_stats();
+#endif
+
+}
+
+inline int mv_l2fw_tx(struct sk_buff *skb, struct eth_port *pp, struct pp2_rx_desc *rx_desc)
+{
+	struct pp2_tx_desc *tx_desc;
+	u32 tx_cmd = 0;
+	struct mv_pp2_tx_spec *tx_spec_ptr = NULL;
+	struct tx_queue *txq_ctrl;
+	struct aggr_tx_queue *aggr_txq_ctrl = NULL;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+	int qset, grntd;
+	int cpu = smp_processor_id(), poolId, frags = 1;
+	tx_spec_ptr = &pp->tx_spec;
+	tx_spec_ptr->txq = pp->cpu_config[cpu]->txq;
+	aggr_txq_ctrl = &aggr_txqs[cpu];
+
+	txq_ctrl = &pp->txq_ctrl[tx_spec_ptr->txp * CONFIG_MV_PP2_TXQ + tx_spec_ptr->txq];
+	txq_cpu_ptr = &(txq_ctrl->txq_cpu[cpu]);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	if (mv_pp2_reserved_desc_num_proc(pp, tx_spec_ptr->txp, tx_spec_ptr->txq, frags) ||
+		mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, frags)) {
+		frags = 0;
+		goto out;
+	}
+#else
+	if (mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, frags))
+		goto out;
+#endif /*CONFIG_MV_ETH_PP2_1*/
+
+	/* Get next descriptor for tx, single buffer, so FIRST & LAST */
+	tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+
+	if (tx_desc == NULL) {
+		pp->dev->stats.tx_dropped++;
+		return MV_DROPPED;
+		/* TODO wait until xor is ready */
+	}
+
+	/* check if buffer header is used */
+	if (rx_desc->status & PP2_RX_BUF_HDR_MASK)
+		tx_cmd |= PP2_TX_BUF_HDR_MASK | PP2_TX_DESC_PER_PKT;
+
+	if (tx_spec_ptr->flags & MV_ETH_TX_F_NO_PAD)
+		tx_cmd |= PP2_TX_PADDING_DISABLE_MASK;
+
+	poolId = mvPp2RxBmPoolId(rx_desc);
+
+	/* buffers released by HW */
+	tx_cmd |= (poolId << PP2_TX_POOL_INDEX_OFFS) | PP2_TX_BUF_RELEASE_MODE_MASK |
+			PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK |
+			PP2_TX_L4_CSUM_NOT | PP2_TX_IP_CSUM_DISABLE_MASK;
+
+	tx_desc->command = tx_cmd;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	qset = (rx_desc->bmQset & PP2_RX_BUFF_QSET_NUM_MASK) >> PP2_RX_BUFF_QSET_NUM_OFFS;
+	grntd = (rx_desc->bmQset & PP2_RX_BUFF_TYPE_MASK) >> PP2_RX_BUFF_TYPE_OFFS;
+	tx_desc->hwCmd[1] = (qset << PP2_TX_MOD_QSET_OFFS) | (grntd << PP2_TX_MOD_GRNTD_BIT);
+#endif
+
+	tx_desc->physTxq = MV_PPV2_TXQ_PHYS(pp->port, tx_spec_ptr->txp, tx_spec_ptr->txq);
+
+	txq_ctrl = &pp->txq_ctrl[tx_spec_ptr->txp * CONFIG_MV_PP2_TXQ + tx_spec_ptr->txq];
+
+	if (txq_ctrl == NULL) {
+		printk(KERN_ERR "%s: invalidate txp/txq (%d/%d)\n",
+			__func__, tx_spec_ptr->txp, tx_spec_ptr->txq);
+		pp->dev->stats.tx_dropped++;
+		return MV_DROPPED;
+	}
+
+	txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+
+	if (txq_cpu_ptr->txq_count >= mv_ctrl_pp2_txdone)
+		mv_l2fw_txq_done(pp, txq_ctrl);
+
+	if (MV_PP2_IS_PON_PORT(pp->port)) {
+		tx_desc->dataSize  = rx_desc->dataSize;
+		tx_desc->pktOffset = skb_headroom(skb);
+	} else {
+		tx_desc->dataSize  = rx_desc->dataSize - MV_ETH_MH_SIZE;
+		tx_desc->pktOffset = skb_headroom(skb) + MV_ETH_MH_SIZE;
+	}
+
+	tx_desc->bufCookie = (MV_U32)skb;
+	tx_desc->bufPhysAddr = mvOsCacheFlush(NULL, skb->head, tx_desc->dataSize);
+	mv_pp2_tx_desc_flush(pp, tx_desc);
+
+	/* TODO - XOR ready check */
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	txq_cpu_ptr->reserved_num--;
+#endif
+	txq_cpu_ptr->txq_count++;
+	aggr_txq_ctrl->txq_count++;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_TX) {
+		printk(KERN_ERR "\n");
+		printk(KERN_ERR "%s - eth_l2fw_tx_%lu: cpu=%d, in_intr=0x%lx, port=%d, txp=%d, txq=%d\n",
+			pp->dev->name, pp->dev->stats.tx_packets, smp_processor_id(), in_interrupt(),
+			pp->port, tx_spec_ptr->txp, tx_spec_ptr->txq);
+
+		mv_pp2_tx_desc_print(tx_desc);
+		mvDebugMemDump(skb->data, 64, 1);
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	/* Enable transmit */
+	wmb();
+	mvPp2AggrTxqPendDescAdd(frags);
+
+	STAT_DBG(aggr_txq_ctrl->stats.txq_tx++);
+	STAT_DBG(txq_ctrl->txq_cpu[cpu].stats.txq_tx++);
+
+	pp->dev->stats.tx_packets++;
+	pp->dev->stats.tx_bytes += rx_desc->dataSize - MV_ETH_MH_SIZE;
+
+out:
+#ifndef CONFIG_MV_PP2_TXDONE_ISR
+	if (txq_cpu_ptr->txq_count >= mv_ctrl_pp2_txdone)
+		mv_l2fw_txq_done(pp, txq_ctrl);
+#endif /* CONFIG_MV_PP2_STAT_DIST */
+
+	return NETDEV_TX_OK;
+}
+
+
+inline int mv_l2fw_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	struct txq_cpu_ctrl *txq_cpu_ptr = &txq_ctrl->txq_cpu[smp_processor_id()];
+	int tx_done = mvPp2TxqSentDescProc(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+
+	if (!tx_done)
+		return tx_done;
+
+	txq_cpu_ptr->txq_count -= tx_done;
+	STAT_DBG(txq_cpu_ptr->stats.txq_txdone += tx_done);
+	return tx_done;
+}
+
+static int mv_l2fw_txq_done_force(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int cpu, tx_done = 0;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+		tx_done += txq_cpu_ptr->txq_count;
+		txq_cpu_ptr->txq_count = 0;
+	}
+	return tx_done;
+}
+
+static int mv_l2fw_txq_clean(int port, int txp, int txq)
+{
+	struct eth_port *pp;
+	struct tx_queue *txq_ctrl;
+	int msec, pending, tx_done;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	if (mvPp2MaxCheck(txq, CONFIG_MV_PP2_TXQ, "txq"))
+		return -EINVAL;
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+	if (txq_ctrl->q) {
+		/* Enable TXQ drain */
+		mvPp2TxqDrainSet(port, txp, txq, MV_TRUE);
+
+		/* Wait for all packets to be transmitted */
+		msec = 0;
+		do {
+			if (msec >= 1000 /*timeout*/) {
+				pr_err("port=%d, txp=%d txq=%d: timeout for transmit pending descriptors\n",
+					port, txp, txq);
+				break;
+			}
+			mdelay(1);
+			msec++;
+
+			pending = mvPp2TxqPendDescNumGet(port, txp, txq);
+		} while (pending);
+
+		/* Disable TXQ Drain */
+		mvPp2TxqDrainSet(port, txp, txq, MV_FALSE);
+
+		/* release all transmitted packets */
+		tx_done = mv_l2fw_txq_done(pp, txq_ctrl);
+		if (tx_done > 0)
+			mvOsPrintf(KERN_INFO "%s: port=%d, txp=%d txq=%d: Free %d transmitted descriptors\n",
+				__func__, port, txp, txq, tx_done);
+
+		/* release all untransmitted packets */
+		tx_done = mv_l2fw_txq_done_force(pp, txq_ctrl);
+		if (tx_done > 0)
+			mvOsPrintf(KERN_INFO "%s: port=%d, txp=%d txq=%d: Free %d untransmitted descriptors\n",
+				__func__, port, txp, txq, tx_done);
+	}
+	return 0;
+}
+
+static int mv_l2fw_txp_clean(int port, int txp)
+{
+	struct eth_port *pp;
+	int txq;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	/* Flush TX FIFO */
+	mvPp2TxPortFifoFlush(port, MV_TRUE);
+
+	/* free the skb's in the hal tx ring */
+	for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+		mv_l2fw_txq_clean(port, txp, txq);
+
+	mvPp2TxPortFifoFlush(port, MV_FALSE);
+
+	mvPp2TxpReset(port, txp);
+
+	return 0;
+}
+
+
+
+
+inline void mv_l2fw_pool_refill(struct eth_port *pp,
+				     struct bm_pool *pool, struct pp2_rx_desc *rx_desc)
+{
+	if ((rx_desc->status & PP2_RX_BUF_HDR_MASK) == MV_FALSE) {
+		__u32 bm = mv_pp2_bm_cookie_build(rx_desc);
+		mv_pp2_pool_refill(pool, bm, rx_desc->bufPhysAddr, rx_desc->bufCookie);
+	} else
+		/* multiBuffer mode */
+		mv_pp2_buff_hdr_rx(pp, rx_desc);
+}
+
+inline int mv_l2fw_rx(struct eth_port *pp, int rx_todo, int rxq)
+{
+	struct eth_port  *new_pp;
+	struct l2fw_rule *rule;
+	MV_PP2_PHYS_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+	int rx_done, rx_filled, poolId, bytes;
+	u32 rx_status;
+	struct pp2_rx_desc *rx_desc;
+	struct bm_pool *pool;
+	MV_STATUS status = MV_OK;
+	struct eth_port_l2fw *ppl2fw = mv_pp2_ports_l2fw[pp->port];
+	MV_IP_HEADER *pIph = NULL;
+	int ipOffset;
+	struct sk_buff *skb, *skb_new = NULL;
+	MV_U32 bufPhysAddr, bm;
+
+	rx_done = mvPp2RxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(NULL);
+
+	if ((rx_todo > rx_done) || (rx_todo < 0))
+		rx_todo = rx_done;
+
+	if (rx_todo == 0)
+		return 0;
+
+	rx_done = 0;
+	rx_filled = 0;
+
+	/* Fairness NAPI loop */
+	while (rx_done < rx_todo) {
+#ifdef CONFIG_MV_PP2_RX_DESC_PREFETCH
+		rx_desc = mv_pp2_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo);
+#else
+		rx_desc = mvPp2RxqNextDescGet(rx_ctrl);
+		mvOsCacheLineInv(NULL, rx_desc);
+		prefetch(rx_desc);
+#endif /* CONFIG_MV_PP2_RX_DESC_PREFETCH */
+
+		if (!rx_desc)
+			printk(KERN_INFO "rx_desc is NULL in %s\n", __func__);
+
+		rx_done++;
+		rx_filled++;
+
+		rx_status = rx_desc->status;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		/* check if buffer header is in used */
+		if (pp->dbg_flags & MV_ETH_F_DBG_BUFF_HDR)
+			if (rx_status & PP2_RX_BUF_HDR_MASK)
+				mv_pp2_buff_hdr_rx_dump(pp, rx_desc);
+
+		/* print RX descriptor */
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX)
+			mv_pp2_rx_desc_print(rx_desc);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		skb = (struct sk_buff *)rx_desc->bufCookie;
+
+		if (!skb) {
+			printk(KERN_INFO "%s: skb is NULL, rx_done=%d\n", __func__, rx_done);
+			return rx_done;
+		}
+
+		poolId = mvPp2RxBmPoolId(rx_desc);
+		pool = &mv_pp2_pool[poolId];
+
+		if (rx_status & PP2_RX_ES_MASK) {
+			printk(KERN_ERR "giga #%d: bad rx status 0x%08x\n", pp->port, rx_status);
+			mv_l2fw_pool_refill(pp, pool, rx_desc);
+			continue;
+		}
+
+		ipOffset = (rx_status & PP2_RX_L3_OFFSET_MASK) >> PP2_RX_L3_OFFSET_OFFS;
+
+		pIph = (MV_IP_HEADER *)(skb->data + ipOffset);
+
+		if (pIph == NULL) {
+			printk(KERN_INFO "pIph==NULL in %s\n", __func__);
+			continue;
+		}
+#ifdef CONFIG_MV_PP2_L2FW_DEBUG
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX) {
+
+			mvDebugMemDump(skb->data, 64, 1);
+
+			if (pIph) {
+				MV_U8 *srcIP, *dstIP;
+				srcIP = (MV_U8 *)&(pIph->srcIP);
+				dstIP = (MV_U8 *)&(pIph->dstIP);
+				printk(KERN_INFO "%u.%u.%u.%u->%u.%u.%u.%u in %s\n",
+						MV_IPQUAD(srcIP), MV_IPQUAD(dstIP), __func__);
+				printk(KERN_INFO "0x%x->0x%x in %s\n", pIph->srcIP, pIph->dstIP, __func__);
+			} else
+				printk(KERN_INFO "pIph is NULL in %s\n", __func__);
+		}
+#endif
+		if (ppl2fw->lookupEn) {
+			rule = l2fw_lookup(pIph->srcIP, pIph->dstIP);
+
+			new_pp = rule ? mv_pp2_ports[rule->port] : mv_pp2_ports[ppl2fw->txPort];
+
+		} else
+			new_pp  = mv_pp2_ports[ppl2fw->txPort];
+
+		bytes = rx_desc->dataSize - MV_ETH_MH_SIZE;
+
+		switch (ppl2fw->cmd) {
+		case CMD_L2FW_AS_IS:
+			status = mv_l2fw_tx(skb, new_pp, rx_desc);
+			break;
+
+		case CMD_L2FW_SWAP_MAC:
+			mvOsCacheLineInv(NULL, skb->data);
+			l2fw_swap_mac(skb->data);
+			mvOsCacheLineFlush(NULL, skb->data);
+			status = mv_l2fw_tx(skb, new_pp, rx_desc);
+			break;
+
+		case CMD_L2FW_COPY_SWAP:
+			if (rx_status & PP2_RX_BUF_HDR_MASK) {
+				printk(KERN_INFO "%s: not support copy with multibuffer packets.\n", __func__);
+				status = MV_ERROR;
+				break;
+			}
+#ifdef CONFIG_MV_PP2_L2FW_XOR
+			if (bytes >= ppl2fw->xorThreshold) {
+				skb_new = eth_l2fw_copy_packet_withXor(skb, rx_desc);
+				pr_error("%s: xor is not supported\n", __func__);
+			}
+#endif /* CONFIG_MV_PP2_L2FW_XOR */
+
+			if (skb_new == NULL)
+				skb_new = eth_l2fw_copy_packet_withOutXor(skb, rx_desc);
+
+			if (skb_new) {
+				bufPhysAddr = rx_desc->bufPhysAddr;
+
+				bm = mv_pp2_bm_cookie_build(rx_desc);
+				status = mv_l2fw_tx(skb_new, new_pp, rx_desc);
+
+				mv_pp2_pool_refill(pool, bm, bufPhysAddr, (MV_ULONG)skb);
+
+				/* for refill function */
+				skb = skb_new;
+			} else
+				status = MV_ERROR;
+			break;
+#ifdef CONFIG_MV_PP2_L2SEC
+		case CMD_L2FW_CESA:
+			if (rx_status & PP2_RX_BUF_HDR_MASK) {
+				printk(KERN_INFO "%s: not support cesa with multibuffer packets.\n", __func__);
+				status = MV_ERROR;
+				break;
+			}
+				status = mv_l2sec_handle_esp(pkt, rx_desc, new_pp, pp->port);
+			break;
+#endif
+
+		default:
+			printk(KERN_INFO "WARNING:%s invalid mode %d, rx port %d\n", __func__, ppl2fw->cmd, pp->port);
+			status = MV_DROPPED;
+		} /*switch*/
+
+		if (status == MV_OK) {
+			/* BM - no refill */
+			mvOsCacheLineInv(NULL, rx_desc);
+			continue;
+		}
+
+		/* status is not OK */
+		mv_l2fw_pool_refill(pp, pool, rx_desc);
+
+		if (status == MV_DROPPED)
+			ppl2fw->statDrop++;
+
+		if (status == MV_ERROR)
+			ppl2fw->statErr++;
+
+	} /* of while */
+
+	/* Update RxQ management counters */
+	mvOsCacheIoSync(NULL);
+	mvPp2RxqDescNumUpdate(pp->port, rxq, rx_done, rx_filled);
+
+	return rx_done;
+}
+
+static void mv_l2fw_shared_cleanup(void)
+{
+	if (mv_pp2_ports_l2fw)
+		mvOsFree(mv_pp2_ports_l2fw);
+
+	if (l2fw_hash) {
+		mv_l2fw_flush();
+		mvOsFree(l2fw_hash);
+	}
+
+	mv_pp2_ports_l2fw = NULL;
+	l2fw_hash = NULL;
+}
+
+
+static int mv_l2fw_shared_init(void)
+{
+	int size, bytes;
+
+	size = eth_ports_l2fw_num * sizeof(struct eth_port_l2fw *);
+	mv_pp2_ports_l2fw = mvOsMalloc(size);
+
+	if (!mv_pp2_ports_l2fw)
+		goto oom;
+
+	memset(mv_pp2_ports_l2fw, 0, size);
+
+	bytes = sizeof(struct l2fw_rule *) * L2FW_HASH_SIZE;
+	get_random_bytes(&l2fw_jhash_iv, sizeof(l2fw_jhash_iv));
+	l2fw_hash = (struct l2fw_rule **)mvOsMalloc(bytes);
+
+	if (l2fw_hash == NULL) {
+		mvOsPrintf("l2fw hash: not enough memory\n");
+		goto oom;
+	}
+
+	mvOsMemset(l2fw_hash, 0, bytes);
+
+	mvOsPrintf("L2FW hash init %d entries, %d bytes\n", L2FW_HASH_SIZE, bytes);
+
+#ifdef CONFIG_MV_PP2_L2SEC
+	mv_l2sec_cesa_init();
+#endif
+
+#ifdef CONFIG_MV_PP2_L2FW_XOR
+	setXorDesc();
+#endif
+
+	return MV_OK;
+oom:
+	mv_l2fw_shared_cleanup();
+	mvOsPrintf("%s: out of memory in L2FW initialization\n", __func__);
+
+	return -ENOMEM;
+
+}
+
+static int mv_l2fw_port_init(int port)
+{
+	int status;
+
+	if (!shared) {
+		status = mv_l2fw_shared_init();
+		if (status)
+			return status;
+	}
+
+	mv_pp2_ports_l2fw[port] = mvOsMalloc(sizeof(struct eth_port_l2fw));
+	if (!mv_pp2_ports_l2fw[port])
+		goto oom;
+
+	mv_pp2_ports_l2fw[port]->cmd    = CMD_L2FW_AS_IS;
+	mv_pp2_ports_l2fw[port]->txPort = port;
+	mv_pp2_ports_l2fw[port]->lookupEn = 0;
+	mv_pp2_ports_l2fw[port]->xorThreshold = XOR_THRESHOLD_DEF;
+	mv_pp2_ports_l2fw[port]->statErr = 0;
+	mv_pp2_ports_l2fw[port]->statDrop = 0;
+
+	shared++;
+
+	return MV_OK;
+
+oom:
+	if (!shared)
+		mv_l2fw_shared_cleanup();
+
+	return -ENOMEM;
+}
+
+
+static void mv_l2fw_port_free(int port)
+{
+	if (!mv_pp2_ports_l2fw) {
+		mvOsPrintf("in %s: l2fw database is NULL\n", __func__);
+		return;
+	}
+
+	if (!mv_pp2_ports_l2fw[port]) {
+		mvOsPrintf("in %s: l2fw port #%d database is NULL\n", __func__, port);
+		return;
+	}
+
+	mvOsFree(mv_pp2_ports_l2fw[port]);
+	mv_pp2_ports_l2fw[port] = NULL;
+
+	shared--;
+
+	if (!shared)
+		mv_l2fw_shared_cleanup();
+}
+
+int mv_l2fw_stop(struct net_device *dev)
+{
+	int txp;
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		if (mv_l2fw_txp_clean(pp->port, txp))
+			return MV_ERROR;
+
+	return mv_pp2_eth_stop(dev);
+}
+
+static netdev_tx_t mv_l2fw_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	return NETDEV_TX_LOCKED;
+}
+
+static const struct net_device_ops mv_l2fw_netdev_ops = {
+	.ndo_open = mv_pp2_eth_open,
+	.ndo_stop = mv_l2fw_stop,
+	.ndo_start_xmit = mv_l2fw_xmit,
+	.ndo_set_rx_mode = mv_pp2_rx_set_rx_mode,
+	.ndo_set_mac_address = mv_pp2_eth_set_mac_addr,
+	.ndo_change_mtu = mv_pp2_eth_change_mtu,
+};
diff --git a/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.h b/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.h
new file mode 100644
index 000000000000..46075e38245c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/l2fw/mv_eth_l2fw.h
@@ -0,0 +1,78 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#ifndef L2FW_MV_ETH_L2FW_H
+#define L2FW_MV_ETH_L2FW_H
+
+#include "mvOs.h"
+#include "../net_dev/mv_netdev.h"
+
+#define	L2FW_HASH_SIZE   (1 << 17)
+#define	L2FW_HASH_MASK   (L2FW_HASH_SIZE - 1)
+
+extern struct aggr_tx_queue *aggr_txqs;
+
+/* L2fw defines */
+
+#define CMD_L2FW_AS_IS				0
+#define CMD_L2FW_SWAP_MAC			1
+#define CMD_L2FW_COPY_SWAP			2
+#define CMD_L2FW_CESA				3
+#define CMD_L2FW_LAST				4
+
+#define XOR_CAUSE_DONE_MASK(chan) ((BIT0|BIT1) << (chan * 16))
+#define XOR_THRESHOLD_DEF			2000;
+
+struct eth_port_l2fw {
+	int cmd;
+	int lookupEn;
+	int xorThreshold;
+	int txPort;
+	/* stats */
+	int statErr;
+	int statDrop;
+};
+
+struct l2fw_rule {
+	MV_U32 srcIP;
+	MV_U32 dstIP;
+	MV_U8 port;
+	struct l2fw_rule *next;
+};
+
+int mv_l2fw_add(MV_U32 srcIP, MV_U32 dstIP, int port);
+int mv_l2fw_set(int port, bool l2fw);
+int mv_l2fw_port(int rx_port, int tx_port, int cmd);
+void mv_l2fw_xor(int rx_port, int threshold);
+void mv_l2fw_lookupEn(int rx_port, int enable);
+void mv_l2fw_flush(void);
+void mv_l2fw_rules_dump(void);
+void mv_l2fw_ports_dump(void);
+void mv_l2fw_stats(void);
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_bm_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_bm_sysfs.c
new file mode 100644
index 000000000000..a8676a32e3bb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_bm_sysfs.c
@@ -0,0 +1,223 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_help(char *buf)
+{
+	int off = 0;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += sprintf(buf+off, "cat                            queueMappDump   - print BM all rxq/txq to qSet mapp\n");
+	off += sprintf(buf+off, "cat                            qsetConfigDump  - print BM all qSets configuration\n");
+	off += sprintf(buf+off, "echo [qset] [pool]           > qsetCreate      - create qset and attach it to BM [pool]\n");
+	off += sprintf(buf+off, "echo [qset]                  > qsetDelete      - delete an unused qset (not used by RXQ/TXQ)\n");
+	off += sprintf(buf+off, "echo [qset] [grntd] [shared] > qsetMaxSet      - set max buff parameters for [qset]\n");
+	off += sprintf(buf+off, "echo [rxq] [qset]            > rxqQsetLong     - map [rxq] long Qset to [qset]\n");
+	off += sprintf(buf+off, "echo [rxq] [qset]            > rxqQsetShort    - map [rxq] short Qset to [qset]\n");
+	off += sprintf(buf+off, "echo [txq] [qset]            > txqQsetLong     - map [txq] long Qset to [qset]\n");
+	off += sprintf(buf+off, "echo [txq] [qset]            > txqQsetShort    - map [txq] short Qset to [qset]\n");
+	off += sprintf(buf+off, "echo [qset]                  > qsetShow        - show info for Qset [qset]\n");
+
+	off += sprintf(buf+off, "echo [pool]                  > poolDropCnt     - print BM pool drop counters\n");
+#endif
+
+	off += sprintf(buf+off, "echo [pool]                  > poolRegs        - print BM pool registers\n");
+	off += sprintf(buf+off, "echo [pool]                  > poolStatus      - print BM pool status\n");
+	off += sprintf(buf+off, "echo [pool] [size]           > poolSize        - set packet size to BM pool\n");
+	off += sprintf(buf+off, "echo [port] [pool] [buf_num] > poolBufNum      - set buffers num for BM pool\n");
+	off += sprintf(buf+off, "                                                 [port] - any port use this pool");
+	off += sprintf(buf+off, "echo [port] [pool]           > longPool        - set port's long BM pool\n");
+	off += sprintf(buf+off, "echo [port] [pool]           > shortPool       - set port's short BM pool\n");
+	off += sprintf(buf+off, "echo [port] [pool]           > hwfLongPool     - set port's HWF long BM pool\n");
+	off += sprintf(buf+off, "echo [port] [pool]           > hwfShortPool    - set port's HWF short BM pool\n");
+
+	return off;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char	*name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "queueMappDump"))
+		mvBmQueueMapDumpAll();
+	else if  (!strcmp(name, "qsetConfigDump"))
+		mvBmQsetConfigDumpAll();
+
+	else
+		off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    a, b, c;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = a = b = c = 0;
+	sscanf(buf, "%d %d %d", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "poolRegs")) {
+		mvPp2BmPoolRegs(a);
+	} else if (!strcmp(name, "poolDropCnt")) {
+		mvBmV1PoolDropCntDump(a);
+	} else if (!strcmp(name, "poolStatus")) {
+		mv_pp2_pool_status_print(a);
+	} else if (!strcmp(name, "poolSize")) {
+		err = mv_pp2_ctrl_pool_size_set(a, b);
+	} else if (!strcmp(name, "poolBufNum")) {
+		err = mv_pp2_ctrl_pool_buf_num_set(a, b, c);
+	} else if (!strcmp(name, "longPool")) {
+		err = mv_pp2_ctrl_long_pool_set(a, b);
+	} else if (!strcmp(name, "shortPool")) {
+		err = mv_pp2_ctrl_short_pool_set(a, b);
+	} else if (!strcmp(name, "hwfLongPool")) {
+		err = mv_pp2_ctrl_hwf_long_pool_set(a, b);
+	} else if (!strcmp(name, "hwfShortPool")) {
+		err = mv_pp2_ctrl_hwf_short_pool_set(a, b);
+	} else if (!strcmp(name, "qsetCreate")) {
+		mvBmQsetCreate(a, b);
+	} else if (!strcmp(name, "qsetDelete")) {
+		mvBmQsetDelete(a);
+	} else if (!strcmp(name, "rxqQsetLong")) {
+		mvBmRxqToQsetLongSet(a, b);
+	} else if (!strcmp(name, "rxqQsetShort")) {
+		mvBmRxqToQsetShortSet(a, b);
+	} else if (!strcmp(name, "txqQsetLong")) {
+		mvBmTxqToQsetLongSet(a, b);
+	} else if (!strcmp(name, "txqQsetShort")) {
+		mvBmTxqToQsetShortSet(a, b);
+	} else if (!strcmp(name, "qsetMaxSet")) {
+		mvBmQsetBuffMaxSet(a, b, c);
+	} else if (!strcmp(name, "qsetShow")) {
+		mvBmQsetShow(a);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,		S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(queueMappDump,	S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(qsetConfigDump,	S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(poolRegs,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(poolDropCnt,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(poolStatus,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(poolSize,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(poolBufNum,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(longPool,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(shortPool,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(hwfLongPool,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(hwfShortPool,	S_IWUSR, NULL, mv_pp2_port_store);
+
+static DEVICE_ATTR(qsetCreate,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(qsetDelete,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxqQsetLong,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxqQsetShort,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(txqQsetLong,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(txqQsetShort,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(qsetMaxSet,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(qsetShow,		S_IWUSR, NULL, mv_pp2_port_store);
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_queueMappDump.attr,
+	&dev_attr_qsetConfigDump.attr,
+	&dev_attr_poolRegs.attr,
+	&dev_attr_poolDropCnt.attr,
+	&dev_attr_poolStatus.attr,
+	&dev_attr_poolSize.attr,
+	&dev_attr_poolBufNum.attr,
+	&dev_attr_longPool.attr,
+	&dev_attr_shortPool.attr,
+	&dev_attr_hwfLongPool.attr,
+	&dev_attr_hwfShortPool.attr,
+	&dev_attr_qsetCreate.attr,
+	&dev_attr_qsetDelete.attr,
+	&dev_attr_rxqQsetLong.attr,
+	&dev_attr_rxqQsetShort.attr,
+	&dev_attr_txqQsetLong.attr,
+	&dev_attr_txqQsetShort.attr,
+	&dev_attr_qsetMaxSet.attr,
+	&dev_attr_qsetShow.attr,
+	NULL
+};
+
+static struct attribute_group mv_pp2_bm_group = {
+	.name = "bm",
+	.attrs = mv_pp2_attrs,
+};
+
+int mv_pp2_bm_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(pp2_kobj, &mv_pp2_bm_group);
+	if (err)
+		printk(KERN_INFO "sysfs group failed %d\n", err);
+
+	return err;
+}
+
+int mv_pp2_bm_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mv_pp2_bm_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_dbg_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_dbg_sysfs.c
new file mode 100644
index 000000000000..05a2e4bfbf07
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_dbg_sysfs.c
@@ -0,0 +1,146 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_netdev.h"
+#include "mv_eth_sysfs.h"
+
+
+static ssize_t mv_pp2_dbg_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat              clean     - Clean all ports\n");
+	off += sprintf(buf+off, "cat              init      - Clean and init all ports\n");
+	off += sprintf(buf+off, "echo offs      > regRead   - Read PPv2 register [ offs]\n");
+	off += sprintf(buf+off, "echo offs hex  > regWrite  - Write value [hex] to PPv2 register [offs]\n");
+
+	return off;
+}
+
+static ssize_t mv_pp2_dbg_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "clean"))
+		mv_pp2_all_ports_cleanup();
+	else if (!strcmp(name, "init")) {
+		if (mv_pp2_all_ports_cleanup() == 0)
+			/* probe only if all ports are clean */
+			mv_pp2_all_ports_probe();
+	} else
+		off = mv_pp2_dbg_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_dbg_reg_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    r, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = r = v = 0;
+	sscanf(buf, "%x %x", &r, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "regRead")) {
+		v = mvPp2RdReg(r);
+		pr_info("regRead val: 0x%08x\n", v);
+	}  else if (!strcmp(name, "regWrite")) {
+		mvPp2WrReg(r, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,          S_IRUSR, mv_pp2_dbg_show, NULL);
+static DEVICE_ATTR(clean,         S_IRUSR, mv_pp2_dbg_show, NULL);
+static DEVICE_ATTR(init,          S_IRUSR, mv_pp2_dbg_show, NULL);
+static DEVICE_ATTR(regRead,       S_IWUSR, NULL, mv_pp2_dbg_reg_store);
+static DEVICE_ATTR(regWrite,      S_IWUSR, NULL, mv_pp2_dbg_reg_store);
+
+
+static struct attribute *mv_pp2_dbg_attrs[] = {
+	&dev_attr_clean.attr,
+	&dev_attr_init.attr,
+	&dev_attr_help.attr,
+	&dev_attr_regRead.attr,
+	&dev_attr_regWrite.attr,
+	NULL
+};
+
+
+static struct attribute_group mv_pp2_dbg_group = {
+	.name = "dbg",
+	.attrs = mv_pp2_dbg_attrs,
+};
+
+int mv_pp2_dbg_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(pp2_kobj, &mv_pp2_dbg_group);
+	if (err)
+		pr_err("sysfs group i%s failed %d\n", mv_pp2_dbg_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_dbg_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mv_pp2_dbg_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_hwf_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_hwf_sysfs.c
new file mode 100644
index 000000000000..962df50c2eb4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_hwf_sysfs.c
@@ -0,0 +1,195 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_hwf_help(char *buf)
+{
+	int o = 0;
+
+	o += scnprintf(buf+o, PAGE_SIZE-o, "id     - is hex number\n");
+	o += scnprintf(buf+o, PAGE_SIZE-o, "others - are dec numbers\n\n");
+
+	o += scnprintf(buf+o, PAGE_SIZE-o, "cat                    regs    - show SWF to HWF switching registers\n");
+	o += scnprintf(buf+o, PAGE_SIZE-o, "cat                    status  - show SWF to HWF switching status\n");
+	o += scnprintf(buf+o, PAGE_SIZE-o, "echo msec            > timeout - set SWF to HWF switching timeout\n");
+	o += scnprintf(buf+o, PAGE_SIZE-o, "echo id txq rxq msec > switch  - start SWF to HWF switching process\n");
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+	o += scnprintf(buf+o, PAGE_SIZE-o, "echo en              > c_inv   - on/off L1 and L2 cache invalidation\n");
+#endif
+	return o;
+}
+
+static ssize_t mv_pp2_hwf_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+	const char      *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "regs"))
+		mvPp2FwdSwitchRegs();
+	else if (!strcmp(name, "status")) {
+		int state, status, msec;
+
+		status = mvPp2FwdSwitchStatus(&state, &msec);
+		pr_info("\n[FWD Switch status]\n");
+		pr_info("\t status=%d, hwState=%d, msec=%d\n", status, state, msec);
+	} else
+		off = mv_pp2_hwf_help(buf);
+
+	return off;
+}
+
+static unsigned int fwd_switch_msec = 3;
+
+static ssize_t mv_pp2_hwf_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             num, err;
+	unsigned int    id, rxq, txq, msec;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = id = txq = rxq = msec = 0;
+	num = sscanf(buf, "%x %d %d %d", &id, &txq, &rxq, &msec);
+	if (num < 4)
+		msec = fwd_switch_msec;
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "switch")) {
+		err = mvPp2FwdSwitchCtrl(id, txq, rxq, msec);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_hwf_dec_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err = 0;
+	unsigned int    val;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = val = 0;
+	sscanf(buf, "%d ", &val);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "timeout")) {
+		fwd_switch_msec = val;
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+	} else if (!strcmp(name, "c_inv")) {
+		mv_pp2_cache_inv_wa_ctrl(val);
+#endif
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,		S_IRUSR, mv_pp2_hwf_show, NULL);
+static DEVICE_ATTR(regs,		S_IRUSR, mv_pp2_hwf_show, NULL);
+static DEVICE_ATTR(status,		S_IRUSR, mv_pp2_hwf_show, NULL);
+static DEVICE_ATTR(switch,		S_IWUSR, NULL, mv_pp2_hwf_store);
+static DEVICE_ATTR(timeout,		S_IWUSR, NULL, mv_pp2_hwf_dec_store);
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+static DEVICE_ATTR(c_inv,		S_IWUSR, NULL, mv_pp2_hwf_dec_store);
+#endif
+
+static struct attribute *mv_pp2_hwf_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_regs.attr,
+	&dev_attr_status.attr,
+	&dev_attr_switch.attr,
+	&dev_attr_timeout.attr,
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+	&dev_attr_c_inv.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group mv_pp2_hwf_group = {
+	.name = "hwf",
+	.attrs = mv_pp2_hwf_attrs,
+};
+
+
+int mv_pp2_gbe_hwf_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_hwf_group);
+	if (err)
+		printk(KERN_INFO "sysfs group failed %d\n", err);
+
+	return err;
+}
+
+int mv_pp2_gbe_hwf_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_pp2_hwf_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_napi_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_napi_sysfs.c
new file mode 100644
index 000000000000..93217e8200b8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_napi_sysfs.c
@@ -0,0 +1,170 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo [p]                     > napiShow      - show port's napi groups info\n");
+	off += sprintf(buf+off, "echo [p] [group]             > napiCreate    - create an empty napi group (cpu_mask = rxq_mask = 0)\n");
+	off += sprintf(buf+off, "echo [p] [group]             > napiDelete    - delete an existing empty napi group\n");
+	off += sprintf(buf+off, "echo [p] [group] [cpus]      > cpuGroup      - set <cpus mask> for <port/napi group>\n");
+	off += sprintf(buf+off, "echo [p] [group] [rxqs]      > rxqGroup      - set <rxqs mask> for <port/napi group>\n");
+
+	return off;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %d", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "napiShow")) {
+		mv_eth_napi_groups_print(p);
+	} else if (!strcmp(name, "napiCreate")) {
+		mv_eth_port_napi_group_create(p, v);
+	} else if (!strcmp(name, "napiDelete")) {
+		mv_eth_port_napi_group_delete(p, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_eth_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %x", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "cpuGroup")) {
+		err = mv_eth_napi_set_cpu_affinity(p, i, v);
+	} else if (!strcmp(name, "rxqGroup")) {
+		err = mv_pp2_eth_napi_set_rxq_affinity(p, i, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,         S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(napiCreate,   S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(napiDelete,   S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(napiShow,     S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(cpuGroup,     S_IWUSR, NULL, mv_eth_3_hex_store);
+static DEVICE_ATTR(rxqGroup,     S_IWUSR, NULL, mv_eth_3_hex_store);
+
+static struct attribute *mv_eth_attrs[] = {
+	&dev_attr_napiCreate.attr,
+	&dev_attr_napiDelete.attr,
+	&dev_attr_cpuGroup.attr,
+	&dev_attr_rxqGroup.attr,
+	&dev_attr_help.attr,
+	&dev_attr_napiShow.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_napi_group = {
+	.name = "napi",
+	.attrs = mv_eth_attrs,
+};
+
+int mv_pp2_napi_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_napi_group);
+	if (err)
+		pr_err("sysfs group i%s failed %d\n", mv_eth_napi_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_napi_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_napi_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pme_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pme_sysfs.c
new file mode 100644
index 000000000000..aac36d69e7f2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pme_sysfs.c
@@ -0,0 +1,133 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo [p] [hex]       > modCmd    - set 2 bytes of PME_ATTR filed in TX descriptor (offset: 0x16)\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > pmeDptr   - set 2 bytes of PME_DPTR field in TX descriptor (offset: 0x18)\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > pmeProg   - set 1 bytes of PME_PROG field in TX descriptor (offset: 0x20)\n");
+
+	return off;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "modCmd")) {
+		err = mv_pp2_ctrl_tx_cmd_mod(p, v);
+	} else if (!strcmp(name, "pmeDptr")) {
+		err = mv_pp2_ctrl_tx_cmd_pme_dptr(p, v);
+	} else if (!strcmp(name, "pmeProgram")) {
+		err = mv_pp2_ctrl_tx_cmd_pme_prog(p, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(modCmd,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(pmeDptr,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(pmeProgram,	S_IWUSR, NULL, mv_pp2_port_store);
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_modCmd.attr,
+	&dev_attr_pmeDptr.attr,
+	&dev_attr_pmeProgram.attr,
+	NULL
+};
+
+static struct attribute_group gbe_pme_group = {
+	.name = "pme",
+	.attrs = mv_pp2_attrs,
+};
+
+int mv_pp2_gbe_pme_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &gbe_pme_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", gbe_pme_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_gbe_pme_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &gbe_pme_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pon_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pon_sysfs.c
new file mode 100644
index 000000000000..cfc55170b600
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_pon_sysfs.c
@@ -0,0 +1,142 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo [p] [hex]       > dsaTag       - set 2 bits of DSA tag in tx descriptor\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > pktColor     - set 2 bits of packet color in tx descriptor\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > gemPortId    - set 12 bits of GEM port id in tx descriptor\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > ponFec       - set 1 bit of PON fec in tx descriptor\n");
+	off += sprintf(buf+off, "echo [p] [hex]       > gemOem       - set 1 bit of GEM OEM in tx descriptor\n");
+
+	return off;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v, a;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = a = 0;
+	sscanf(buf, "%d %x %x", &p, &v, &a);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "dsaTag")) {
+		err = mv_pp2_ctrl_tx_cmd_dsa(p, v);
+	} else if (!strcmp(name, "pktColor")) {
+		err = mv_pp2_ctrl_tx_cmd_color(p, v);
+	} else if (!strcmp(name, "gemPortId")) {
+		err = mv_pp2_ctrl_tx_cmd_gem_id(p, v);
+	} else if (!strcmp(name, "ponFec")) {
+		err = mv_pp2_ctrl_tx_cmd_pon_fec(p, v);
+	} else if (!strcmp(name, "gemOem")) {
+		err = mv_eth_ctrl_tx_cmd_gem_oem(p, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(dsaTag,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(pktColor,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(gemPortId,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(ponFec,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(gemOem,	S_IWUSR, NULL, mv_pp2_port_store);
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_dsaTag.attr,
+	&dev_attr_pktColor.attr,
+	&dev_attr_gemPortId.attr,
+	&dev_attr_ponFec.attr,
+	&dev_attr_gemOem.attr,
+	NULL
+};
+
+static struct attribute_group mv_pp2_pon_group = {
+	.name = "pon",
+	.attrs = mv_pp2_attrs,
+};
+
+int mv_pp2_pon_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_pon_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_pp2_pon_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_pon_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_pp2_pon_group);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_qos_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_qos_sysfs.c
new file mode 100644
index 000000000000..9a07b64e0866
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_qos_sysfs.c
@@ -0,0 +1,155 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo [p]                     > dscp          - show RX and TX DSCP map for port <p>\n");
+	off += sprintf(buf+off, "echo [p] [txq] [dscp]        > txqDscp       - set <txq> for outgoing IP packets with <dscp>\n");
+
+	return off;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = 0;
+	sscanf(buf, "%d", &p);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "dscp")) {
+		mv_pp2_dscp_map_show(p);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_3_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, i, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	err = p = i = v = 0;
+	sscanf(buf, "%d %d %x", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txqDscp")) {
+		err = mv_pp2_txq_dscp_map_set(p, i, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,         S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(dscp,         S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(txqDscp,      S_IWUSR, NULL, mv_pp2_3_hex_store);
+
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_dscp.attr,
+	&dev_attr_txqDscp.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group mv_pp2_qos_group = {
+	.name = "qos",
+	.attrs = mv_pp2_attrs,
+};
+
+int mv_pp2_qos_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_qos_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_pp2_qos_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_qos_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_pp2_qos_group);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_rx_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_rx_sysfs.c
new file mode 100644
index 000000000000..b077b66a94ff
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_rx_sysfs.c
@@ -0,0 +1,205 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "prs/mvPp2Prs.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_pp2_help(char *b)
+{
+	int o = 0;
+
+	o += sprintf(b+o, "cat                    rxDmaRegs   - show RX DMA registers\n");
+	o += sprintf(b+o, "echo [p]             > rxFifoRegs  - show RX FIFO registers for port <p>\n");
+	o += sprintf(b+o, "echo [p] v           > rxWeight    - set weight for poll function, <v> - weight [0..255]\n");
+	o += sprintf(b+o, "echo [rxq]           > gRxqRegs    - show RXQ registers for global <rxq>\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	o += sprintf(b+o, "echo [p] [rxq]       > rxqCounters - show RXQ counters for <p/rxq>.\n");
+#endif
+	o += sprintf(b+o, "echo [p] [rxq]       > pRxqRegs    - show RXQ registers for global <rxq>\n");
+	o += sprintf(b+o, "echo [p] [rxq] [0|1] > rxqShow     - show RXQ descriptors ring for <p/rxq>\n");
+	o += sprintf(b+o, "echo [p] [rxq] [v]   > rxqSize     - set number of descriptors <v> for <port/rxq>.\n");
+	o += sprintf(b+o, "echo [p] [hex] [0|1] > mhRxSpec    - set MH value [hex] for RX special packets\n");
+	o += sprintf(b+o, "echo [p] [m]         > prefetch    - set RX prefetch mode for port [p]\n");
+	o += sprintf(b+o, "                                   [m]: 0-disable, 1-descriptor, 2-packet header, 3-both\n");
+
+	return o;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "rxDmaRegs"))
+		mvPp2RxDmaRegsPrint();
+	else
+		off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v, a;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = a = 0;
+	sscanf(buf, "%d %d %d", &p, &v, &a);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "rxqShow")) {
+		mvPp2RxqShow(p, v, a);
+	} else if (!strcmp(name, "gRxqRegs")) {
+		mvPp2PhysRxqRegs(p);
+	} else if (!strcmp(name, "pRxqRegs")) {
+		mvPp2PortRxqRegs(p, v);
+	} else if (!strcmp(name, "rxFifoRegs")) {
+		mvPp2RxFifoRegs(p);
+	} else if (!strcmp(name, "rxWeight")) {
+		mv_pp2_ctrl_set_poll_rx_weight(p, v);
+	} else if (!strcmp(name, "rxqSize")) {
+		mv_pp2_ctrl_rxq_size_set(p, v, a);
+	} else if (!strcmp(name, "rxqCounters")) {
+		mvPp2V1RxqDbgCntrs(p, v);
+	} else if (!strcmp(name, "prefetch")) {
+		err |= mv_pp2_ctrl_flag(p, MV_ETH_F_RX_DESC_PREFETCH, v & 0x1);
+		err |= mv_pp2_ctrl_flag(p, MV_ETH_F_RX_PKT_PREFETCH, v & 0x2);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_rx_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v, a;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = a = 0;
+	sscanf(buf, "%d %x %x", &p, &v, &a);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "mhRxSpec")) {
+		mvPrsMhRxSpecialSet(MV_PPV2_PORT_PHYS(p), v, a);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,        S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(rxDmaRegs,	S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(rxqCounters, S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxqShow,     S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(gRxqRegs,    S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(pRxqRegs,    S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxFifoRegs,  S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxWeight,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(rxqSize,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(mhRxSpec,	S_IWUSR, NULL, mv_pp2_rx_hex_store);
+static DEVICE_ATTR(prefetch,	S_IWUSR, NULL, mv_pp2_port_store);
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_rxDmaRegs.attr,
+	&dev_attr_rxqShow.attr,
+	&dev_attr_rxqCounters.attr,
+	&dev_attr_gRxqRegs.attr,
+	&dev_attr_pRxqRegs.attr,
+	&dev_attr_rxFifoRegs.attr,
+	&dev_attr_rxWeight.attr,
+	&dev_attr_rxqSize.attr,
+	&dev_attr_mhRxSpec.attr,
+	&dev_attr_prefetch.attr,
+	NULL
+};
+
+static struct attribute_group mv_pp2_rx_group = {
+	.name = "rx",
+	.attrs = mv_pp2_attrs,
+};
+
+int mv_pp2_rx_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_rx_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_pp2_rx_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_rx_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_pp2_rx_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.c
new file mode 100644
index 000000000000..92816a35721e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.c
@@ -0,0 +1,353 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "gbe/mvPp2Gbe.h"
+#include "gmac/mvEthGmacApi.h"
+#include "prs/mvPp2Prs.h"
+#include "mv_netdev.h"
+#include "mv_eth_sysfs.h"
+
+
+static ssize_t mv_pp2_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cd                 bm          - move to BM sysfs directory\n");
+	off += sprintf(buf+off, "cd                 rx          - move to RX sysfs directory\n");
+	off += sprintf(buf+off, "cd                 tx          - move to TX sysfs directory\n");
+	off += sprintf(buf+off, "cd                 tx_sched    - move to TX Scheduler sysfs directory\n");
+	off += sprintf(buf+off, "cd                 pon         - move to PON sysfs directory\n");
+	off += sprintf(buf+off, "cd                 pme         - move to PME sysfs directory\n");
+	off += sprintf(buf+off, "cd                 qos         - move to QoS sysfs directory\n\n");
+
+#ifdef CONFIG_MV_PP2_HWF
+	off += sprintf(buf+off, "cd                 qos         - move to QoS sysfs directory\n\n");
+#endif
+	off += sprintf(buf+off, "cat                addrDec     - show address decode registers\n");
+	off += sprintf(buf+off, "echo [p]         > port        - show port [p] status\n");
+	off += sprintf(buf+off, "echo [if_name]   > netdev      - show [if_name] net_device status\n");
+	off += sprintf(buf+off, "echo [p]         > cntrs       - show port [p] MIB counters\n");
+	off += sprintf(buf+off, "echo [p]         > stats       - show port [p] statistics\n");
+	off += sprintf(buf+off, "echo [p]         > gmacRegs    - show GMAC registers for port [p]\n");
+	off += sprintf(buf+off, "echo [p]         > isrRegs     - show ISR registers for port [p]\n");
+	off += sprintf(buf+off, "echo [p]         > dropCntrs   - show drop counters for port [p]\n");
+
+	off += sprintf(buf+off, "echo [0|1]       > pnc         - enable / disable Parser and Classifier access\n");
+	off += sprintf(buf+off, "echo [0|1]       > skb         - enable / disable skb recycle\n");
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	off += sprintf(buf+off, "echo [p] [hex]   > debug       - b0:rx, b1:tx, b2:isr, b3:poll, b4:dump, b5:b_hdr\n");
+#endif
+#ifdef CONFIG_PM
+	off += sprintf(buf+off, "echo [p] [hex]   > pm_mode     - set port <p> pm mode. 0 wol, 1 suspend.\n");
+#endif
+
+	return off;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "addrDec"))
+		/*mvPp2AddressDecodeRegsPrint();*/
+		mvPp2AddrDecodeRegs();
+	else
+		off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %d", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "port")) {
+		mv_pp2_status_print();
+		mv_pp2_eth_port_status_print(p);
+		mvPp2PortStatus(p);
+	} else if (!strcmp(name, "cntrs")) {
+		if (!MV_PP2_IS_PON_PORT(p))
+			mvGmacMibCountersShow(p);
+		else
+			printk(KERN_ERR "sysfs command %s is not supported for xPON port %d\n",
+				name, p);
+	} else if (!strcmp(name, "isrRegs")) {
+		mvPp2IsrRegs(p);
+	} else if (!strcmp(name, "gmacRegs")) {
+		mvGmacLmsRegs();
+		mvGmacPortRegs(p);
+	} else if (!strcmp(name, "dropCntrs")) {
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvPp2V1DropCntrs(p);
+#else
+		mvPp2V0DropCntrs(p);
+#endif
+	} else if (!strcmp(name, "stats")) {
+		mv_pp2_port_stats_print(p);
+	} else if (!strcmp(name, "mac")) {
+		mv_pp2_mac_show(p);
+	} else if (!strcmp(name, "pnc")) {
+		mv_pp2_ctrl_pnc(p);
+	} else if (!strcmp(name, "skb")) {
+		mv_pp2_eth_ctrl_recycle(p);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_2_hex_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = 0;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "debug")) {
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_RX,   v & 0x1);
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_TX,   v & 0x2);
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_ISR,  v & 0x4);
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_POLL, v & 0x8);
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_DUMP, v & 0x10);
+		err = mv_pp2_ctrl_dbg_flag(p, MV_ETH_F_DBG_BUFF_HDR, v & 0x20);
+#endif
+	} else if (!strcmp(name, "pm_mode")) {
+#ifdef CONFIG_PM
+		err = mv_pp2_pm_mode_set(p, v);
+#endif
+	}
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_netdev_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char *name = attr->attr.name;
+	int err = 0;
+	char dev_name[IFNAMSIZ];
+	struct net_device *netdev;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%s", dev_name);
+	netdev = dev_get_by_name(&init_net, dev_name);
+	if (netdev == NULL) {
+		printk(KERN_ERR "%s: network interface <%s> doesn't exist\n", __func__, dev_name);
+		err = 1;
+	} else {
+		if (!strcmp(name, "netdev"))
+			mv_pp2_eth_netdev_print(netdev);
+		else {
+			err = 1;
+			printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		}
+		dev_put(netdev);
+	}
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_reg_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    r, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = r = v = 0;
+	sscanf(buf, "%x %x", &r, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "regRead")) {
+		v = mvPp2RdReg(r);
+		printk(KERN_INFO "regRead val: 0x%08x\n", v);
+	}  else if (!strcmp(name, "regWrite")) {
+		mvPp2WrReg(r, v);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(addrDec,	S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(help,	S_IRUSR, mv_pp2_show, NULL);
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+static DEVICE_ATTR(debug,	S_IWUSR, NULL, mv_pp2_2_hex_store);
+#endif
+static DEVICE_ATTR(isrRegs,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(gmacRegs,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(dropCntrs,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(stats,       S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(mac,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(pnc,		S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(skb,         S_IWUSR, NULL, mv_pp2_port_store);
+
+static DEVICE_ATTR(port,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(cntrs,	S_IWUSR, NULL, mv_pp2_port_store);
+static DEVICE_ATTR(netdev,	S_IWUSR, NULL, mv_pp2_netdev_store);
+
+static DEVICE_ATTR(regRead,       S_IWUSR, NULL, mv_pp2_reg_store);
+static DEVICE_ATTR(regWrite,      S_IWUSR, NULL, mv_pp2_reg_store);
+#ifdef CONFIG_PM
+static DEVICE_ATTR(pm_mode,	S_IWUSR, NULL, mv_pp2_2_hex_store);
+#endif
+
+static struct attribute *mv_pp2_attrs[] = {
+	&dev_attr_addrDec.attr,
+	&dev_attr_help.attr,
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	&dev_attr_debug.attr,
+#endif
+	&dev_attr_port.attr,
+	&dev_attr_cntrs.attr,
+	&dev_attr_netdev.attr,
+	&dev_attr_isrRegs.attr,
+	&dev_attr_gmacRegs.attr,
+	&dev_attr_dropCntrs.attr,
+	&dev_attr_stats.attr,
+	&dev_attr_pnc.attr,
+	&dev_attr_skb.attr,
+	&dev_attr_regRead.attr,
+	&dev_attr_regWrite.attr,
+#ifdef CONFIG_PM
+	&dev_attr_pm_mode.attr,
+#endif
+	NULL
+};
+
+static struct attribute_group mv_pp2_group = {
+	.attrs = mv_pp2_attrs,
+};
+
+static struct kobject *gbe_kobj;
+
+int mv_pp2_gbe_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	gbe_kobj = kobject_create_and_add("gbe", pp2_kobj);
+	if (!gbe_kobj) {
+		printk(KERN_ERR"%s: cannot create gbe kobject\n", __func__);
+		return -ENOMEM;
+	}
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_group);
+	if (err) {
+		printk(KERN_INFO "sysfs group failed %d\n", err);
+		return err;
+	}
+
+	mv_pp2_bm_sysfs_init(gbe_kobj);
+	mv_pp2_rx_sysfs_init(gbe_kobj);
+	mv_pp2_tx_sysfs_init(gbe_kobj);
+	mv_pp2_tx_sched_sysfs_init(gbe_kobj);
+	mv_pp2_qos_sysfs_init(gbe_kobj);
+	mv_pp2_pon_sysfs_init(gbe_kobj);
+	mv_pp2_gbe_pme_sysfs_init(gbe_kobj);
+#ifdef CONFIG_MV_PP2_HWF
+	mv_pp2_gbe_hwf_sysfs_init(gbe_kobj);
+#endif
+	return err;
+}
+
+int mv_pp2_gbe_sysfs_exit(struct kobject *pp2_kobj)
+{
+	mv_pp2_gbe_pme_sysfs_exit(gbe_kobj);
+	mv_pp2_pon_sysfs_exit(gbe_kobj);
+	mv_pp2_qos_sysfs_exit(gbe_kobj);
+	mv_pp2_tx_sched_sysfs_exit(gbe_kobj);
+	mv_pp2_tx_sysfs_exit(gbe_kobj);
+	mv_pp2_rx_sysfs_exit(gbe_kobj);
+	mv_pp2_bm_sysfs_exit(gbe_kobj);
+#ifdef CONFIG_MV_PP2_HWF
+	mv_pp2_gbe_hwf_sysfs_exit(gbe_kobj);
+#endif
+	sysfs_remove_group(pp2_kobj, &mv_pp2_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.h b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.h
new file mode 100644
index 000000000000..b2e121c2c37b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_sysfs.h
@@ -0,0 +1,108 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_eth_sysfs_h__
+#define __mv_eth_sysfs_h__
+
+int mv_mux_sysfs_init(struct kobject *pp2_kobj);
+
+/* Subdirectories of pp2 menu */
+int mv_pp2_prs_low_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_prs_low_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_prs_high_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_prs_high_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_cls_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_cls_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_cls2_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_cls2_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_cls3_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_cls3_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_cls4_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_cls4_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_mc_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_mc_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_pme_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_pme_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_plcr_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_plcr_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_gbe_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_gbe_sysfs_exit(struct kobject *pp2_kobj);
+
+/* Subdirectories of gbe menu */
+int mv_pp2_bm_sysfs_init(struct kobject *gbe_kobj);
+int mv_pp2_bm_sysfs_exit(struct kobject *gbe_kobj);
+
+int mv_pp2_napi_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_napi_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_rx_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_rx_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_tx_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_tx_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_tx_sched_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_tx_sched_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_qos_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_qos_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_pon_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_pon_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_gbe_pme_sysfs_init(struct kobject *gbe_kobj);
+int mv_pp2_gbe_pme_sysfs_exit(struct kobject *gbe_kobj);
+
+#ifdef CONFIG_MV_PP2_HWF
+int mv_pp2_gbe_hwf_sysfs_init(struct kobject *gbe_kobj);
+int mv_pp2_gbe_hwf_sysfs_exit(struct kobject *gbe_kobj);
+#endif /* CONFIG_MV_PP2_HWF */
+
+int mv_pp2_dbg_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_dbg_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_wol_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_wol_sysfs_exit(struct kobject *pp2_kobj);
+
+int mv_pp2_dpi_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_dpi_sysfs_exit(struct kobject *pp2_kobj);
+
+#ifdef CONFIG_MV_PP2_L2FW
+int mv_pp2_l2fw_sysfs_init(struct kobject *pp2_kobj);
+int mv_pp2_l2fw_sysfs_exit(struct kobject *pp2_kobj);
+#endif
+
+#endif /* __mv_eth_sysfs_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.c
new file mode 100644
index 000000000000..8682331f978e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.c
@@ -0,0 +1,1261 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/mii.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
+#include <asm/arch/system.h>
+#else
+#include <asm/system.h>
+#endif
+
+#include "mvOs.h"
+#include "mvDebug.h"
+#include "mvCommon.h"
+#include "mvEthPhy.h"
+#include "gmac/mvEthGmacApi.h"
+
+#include "gbe/mvPp2Gbe.h"
+#include "bm/mvBm.h"
+
+#include "mv_netdev.h"
+
+#include "prs/mvPp2Prs.h"
+
+#include "wol/mvPp2Wol.h"
+
+
+#define MV_ETH_TOOL_AN_TIMEOUT	5000
+
+struct mv_pp2_tool_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int stat_offset;
+};
+
+#define MV_ETH_TOOL_STAT(m)	offsetof(struct eth_port, m)
+
+static const struct mv_pp2_tool_stats mv_pp2_tool_global_strings_stats[] = {
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	{"rx_error", MV_ETH_TOOL_STAT(stats.rx_error)},
+	{"tx_timeout", MV_ETH_TOOL_STAT(stats.tx_timeout)},
+	{"state_err", MV_ETH_TOOL_STAT(stats.state_err)},
+#endif
+#ifdef CONFIG_MV_PP2_STAT_INF
+	{"tx_done", MV_ETH_TOOL_STAT(stats.tx_done)},
+	{"link", MV_ETH_TOOL_STAT(stats.link)},
+	{"netdev_stop", MV_ETH_TOOL_STAT(stats.netdev_stop)},
+	{"rx_buf_hdr", MV_ETH_TOOL_STAT(stats.rx_buf_hdr)},
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+	{"rx_special", MV_ETH_TOOL_STAT(stats.rx_special)},
+#endif
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	{"tx_special", MV_ETH_TOOL_STAT(stats.tx_special)},
+#endif
+#endif
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	{"rx_tagged", MV_ETH_TOOL_STAT(stats.rx_tagged)},
+	{"rx_netif", MV_ETH_TOOL_STAT(stats.rx_netif)},
+	{"rx_gro", MV_ETH_TOOL_STAT(stats.rx_gro)},
+	{"rx_gro_bytes", MV_ETH_TOOL_STAT(stats.rx_gro_bytes)},
+	{"rx_drop_sw", MV_ETH_TOOL_STAT(stats.rx_drop_sw)},
+	{"rx_csum_hw", MV_ETH_TOOL_STAT(stats.rx_csum_hw)},
+	{"rx_csum_sw", MV_ETH_TOOL_STAT(stats.rx_csum_sw)},
+	{"tx_csum_hw", MV_ETH_TOOL_STAT(stats.tx_csum_hw)},
+	{"tx_csum_sw", MV_ETH_TOOL_STAT(stats.tx_csum_sw)},
+	{"tx_skb_free", MV_ETH_TOOL_STAT(stats.tx_skb_free)},
+	{"tx_sg", MV_ETH_TOOL_STAT(stats.tx_sg)},
+	{"tx_tso", MV_ETH_TOOL_STAT(stats.tx_tso)},
+	{"tx_tso_no_resource", MV_ETH_TOOL_STAT(stats.tx_tso_no_resource)},
+	{"tx_tso_bytes", MV_ETH_TOOL_STAT(stats.tx_tso_bytes)},
+#endif
+	{"rate_current", MV_ETH_TOOL_STAT(rate_current)},
+};
+
+static const struct mv_pp2_tool_stats mv_pp2_tool_cpu_strings_stats[] = {
+#ifdef CONFIG_MV_ETH_STATS_DEBUG
+	{"irq", MV_ETH_TOOL_STAT(stats.irq)},
+	{"irq_err", MV_ETH_TOOL_STAT(stats.irq_err)},
+	{"poll", MV_ETH_TOOL_STAT(stats.poll)},
+	{"poll_exit", MV_ETH_TOOL_STAT(stats.poll_exit)},
+	{"tx_done_timer_event", MV_ETH_TOOL_STAT(stats.tx_done_timer_event)},
+	{"tx_done_timer_add", MV_ETH_TOOL_STAT(stats.tx_done_timer_add)},
+#endif /* CONFIG_MV_ETH_STATS_DEBUG */
+};
+
+static const struct mv_pp2_tool_stats mv_pp2_tool_rx_queue_strings_stats[] = {
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	{"rxq", MV_ETH_TOOL_STAT(stats.rxq)},
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+};
+
+static const struct mv_pp2_tool_stats mv_pp2_tool_tx_queue_strings_stats[] = {
+};
+
+#define MV_ETH_TOOL_CPU_STATS_LEN	\
+	(sizeof(mv_pp2_tool_cpu_strings_stats) / sizeof(struct mv_pp2_tool_stats))
+
+#define MV_ETH_TOOL_RX_QUEUE_STATS_LEN	\
+	(sizeof(mv_pp2_tool_rx_queue_strings_stats) / sizeof(struct mv_pp2_tool_stats))
+
+#define MV_ETH_TOOL_TX_QUEUE_STATS_LEN	\
+	(sizeof(mv_pp2_tool_tx_queue_strings_stats) / sizeof(struct mv_pp2_tool_stats))
+
+#define MV_ETH_TOOL_QUEUE_STATS_LEN	\
+	((MV_ETH_TOOL_RX_QUEUE_STATS_LEN * CONFIG_MV_PP2_RXQ) + \
+	(MV_ETH_TOOL_TX_QUEUE_STATS_LEN * CONFIG_MV_PP2_TXQ))
+
+#define MV_ETH_TOOL_GLOBAL_STATS_LEN	\
+	(sizeof(mv_pp2_tool_global_strings_stats) / sizeof(struct mv_pp2_tool_stats))
+
+#define MV_ETH_TOOL_STATS_LEN		\
+	(MV_ETH_TOOL_GLOBAL_STATS_LEN + MV_ETH_TOOL_CPU_STATS_LEN + MV_ETH_TOOL_QUEUE_STATS_LEN)
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_settings
+* Description:
+*	ethtool get standard port settings
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	cmd		command (settings)
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+	struct eth_port 	*priv = MV_ETH_PRIV(netdev);
+	u16			lp_ad, stat1000;
+	MV_U32			phy_addr;
+	MV_ETH_PORT_SPEED 	speed;
+	MV_ETH_PORT_DUPLEX 	duplex;
+	MV_ETH_PORT_STATUS      status;
+
+	if ((priv == NULL) || (MV_PP2_IS_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half
+			| SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII
+			| SUPPORTED_1000baseT_Full);
+
+	phy_addr = priv->plat_data->phy_addr;
+
+	mvGmacLinkStatus(priv->port, &status);
+
+	if (status.linkup != MV_TRUE) {
+		/* set to Unknown */
+		cmd->speed  = priv->speed_cfg;
+		cmd->duplex = priv->duplex_cfg;
+	} else {
+		switch (status.speed) {
+		case MV_ETH_SPEED_1000:
+			cmd->speed = SPEED_1000;
+			break;
+		case MV_ETH_SPEED_100:
+			cmd->speed = SPEED_100;
+			break;
+		case MV_ETH_SPEED_10:
+			cmd->speed = SPEED_10;
+			break;
+		default:
+			return -EINVAL;
+		}
+		if (status.duplex == MV_ETH_DUPLEX_FULL)
+			cmd->duplex = 1;
+		else
+			cmd->duplex = 0;
+	}
+
+	cmd->port = PORT_MII;
+	cmd->phy_address = phy_addr;
+	cmd->transceiver = XCVR_INTERNAL;
+	/* check if speed and duplex are AN */
+	mvGmacSpeedDuplexGet(priv->port, &speed, &duplex);
+	if (speed == MV_ETH_SPEED_AN && duplex == MV_ETH_DUPLEX_AN) {
+		cmd->lp_advertising = cmd->advertising = 0;
+		cmd->autoneg = AUTONEG_ENABLE;
+		mvEthPhyAdvertiseGet(phy_addr, (MV_U16 *)&(cmd->advertising));
+
+		mvEthPhyRegRead(phy_addr, MII_LPA, &lp_ad);
+		if (lp_ad & LPA_LPACK)
+			cmd->lp_advertising |= ADVERTISED_Autoneg;
+		if (lp_ad & ADVERTISE_10HALF)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+		if (lp_ad & ADVERTISE_10FULL)
+			cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+		if (lp_ad & ADVERTISE_100HALF)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+		if (lp_ad & ADVERTISE_100FULL)
+			cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+
+		mvEthPhyRegRead(phy_addr, MII_STAT1000, &stat1000);
+		if (stat1000 & LPA_1000HALF)
+			cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+		if (stat1000 & LPA_1000FULL)
+			cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+	} else
+		cmd->autoneg = AUTONEG_DISABLE;
+
+	return 0;
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_restore_settings
+* Description:
+*	restore saved speed/dublex/an settings
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_restore_settings(struct net_device *netdev)
+{
+	struct eth_port 	*priv = MV_ETH_PRIV(netdev);
+	int			phy_speed, phy_duplex;
+	MV_U32			phy_addr = priv->plat_data->phy_addr;
+	MV_ETH_PORT_SPEED	mac_speed;
+	MV_ETH_PORT_DUPLEX	mac_duplex;
+	int			err = -EINVAL;
+
+	 if (priv == NULL)
+		 return -EOPNOTSUPP;
+
+	switch (priv->speed_cfg) {
+	case SPEED_10:
+		phy_speed  = 0;
+		mac_speed = MV_ETH_SPEED_10;
+		break;
+	case SPEED_100:
+		phy_speed  = 1;
+		mac_speed = MV_ETH_SPEED_100;
+		break;
+	case SPEED_1000:
+		phy_speed  = 2;
+		mac_speed = MV_ETH_SPEED_1000;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (priv->duplex_cfg) {
+	case DUPLEX_HALF:
+		phy_duplex = 0;
+		mac_duplex = MV_ETH_DUPLEX_HALF;
+		break;
+	case DUPLEX_FULL:
+		phy_duplex = 1;
+		mac_duplex = MV_ETH_DUPLEX_FULL;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (priv->autoneg_cfg == AUTONEG_ENABLE) {
+		err = mvGmacSpeedDuplexSet(priv->port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN);
+		if (!err)
+			err = mvEthPhyAdvertiseSet(phy_addr, priv->advertise_cfg);
+		/* Restart AN on PHY enables it */
+		if (!err) {
+			err = mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT);
+			if (err == MV_TIMEOUT) {
+				MV_ETH_PORT_STATUS ps;
+
+				mvGmacLinkStatus(priv->port, &ps);
+
+				if (!ps.linkup)
+					err = 0;
+			}
+		}
+	} else if (priv->autoneg_cfg == AUTONEG_DISABLE) {
+		err = mvEthPhyDisableAN(phy_addr, phy_speed, phy_duplex);
+		if (!err)
+			err = mvGmacSpeedDuplexSet(priv->port, mac_speed, mac_duplex);
+	} else {
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_set_settings
+* Description:
+*	ethtool set standard port settings
+* INPUT:
+*	netdev		Network device structure pointer
+*	cmd		command (settings)
+* OUTPUT
+*	None
+* RETURN:
+*	0 for success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	int _speed, _duplex, _autoneg, _advertise, err;
+
+	if ((priv == NULL) || (MV_PP2_IS_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, dev->name);
+		return -EOPNOTSUPP;
+	}
+
+	_duplex  = priv->duplex_cfg;
+	_speed   = priv->speed_cfg;
+	_autoneg = priv->autoneg_cfg;
+	_advertise = priv->advertise_cfg;
+
+	priv->duplex_cfg = cmd->duplex;
+	priv->speed_cfg = cmd->speed;
+	priv->autoneg_cfg = cmd->autoneg;
+	priv->advertise_cfg = cmd->advertising;
+	err = mv_pp2_eth_tool_restore_settings(dev);
+
+	if (err) {
+		priv->duplex_cfg = _duplex;
+		priv->speed_cfg = _speed;
+		priv->autoneg_cfg = _autoneg;
+		priv->advertise_cfg = _advertise;
+	}
+	return err;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_regs_len
+* Description:
+*	ethtool get registers array length
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	registers array length
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_get_regs_len(struct net_device *netdev)
+{
+#define MV_ETH_TOOL_REGS_LEN 42
+
+	return (MV_ETH_TOOL_REGS_LEN * sizeof(uint32_t));
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_wol
+* Description:
+*	ethtool get WOL information
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	wolinfo		WOL info
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_wol(struct net_device *netdev,
+			 struct ethtool_wolinfo *wolinfo)
+{
+	struct eth_port	*priv = MV_ETH_PRIV(netdev);
+
+	if (priv == NULL) {
+		pr_err("%s is not supported on %s\n", __func__, netdev->name);
+		return;
+	}
+
+	wolinfo->supported = WAKE_ARP | WAKE_UCAST | WAKE_MAGIC;
+	wolinfo->wolopts = 0;
+
+	if (priv->wol & (MV_PP2_WOL_ARP_IP_MASK(0) | MV_PP2_WOL_ARP_IP_MASK(1)))
+		wolinfo->wolopts |= WAKE_ARP;
+
+	if (priv->wol & MV_PP2_WOL_UCAST_MASK)
+		wolinfo->wolopts |= WAKE_UCAST;
+	if (priv->wol & MV_PP2_WOL_MAGIC_PTRN_MASK)
+		wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_set_wol
+* Description:
+*	ethtool set WOL
+* INPUT:
+*	netdev		Network device structure pointer
+*	wolinfo		WOL settings
+* OUTPUT
+*	None
+* RETURN:
+*	None
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_set_wol(struct net_device *netdev,
+			 struct ethtool_wolinfo *wolinfo)
+{
+	int ret;
+	struct eth_port	*priv = MV_ETH_PRIV(netdev);
+
+	if (priv == NULL) {
+		pr_err("%s is not supported on %s\n", __func__, netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	if (wolinfo->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	/* these settings will always override what we currently have */
+	priv->wol = 0;
+	/* Clearn all settings before if have */
+	ret = mvPp2WolWakeup();
+	if (ret)
+		return ret;
+
+	if (wolinfo->wolopts & WAKE_UCAST) {
+		priv->wol |= MV_PP2_WOL_UCAST_MASK;
+		/* Enable WoL Ucast event */
+		ret = mvPp2WolUcastEventSet(WOL_EVENT_EN);
+		if (ret)
+			return ret;
+	}
+
+	if (wolinfo->wolopts & WAKE_ARP) {
+		/* Even port num use ARP0; Odd port num use ARP1 */
+		priv->wol |= MV_PP2_WOL_ARP_IP_MASK((priv->port) % MV_PP2_WOL_ARP_IP_NUM);
+		/* Set WoL ARP Address; TODO */
+		/* Enable WoL ARP event */
+		ret = mvPp2WolArpEventSet((priv->port) % MV_PP2_WOL_ARP_IP_NUM, WOL_EVENT_EN);
+		if (ret)
+			return ret;
+	}
+
+	if (wolinfo->wolopts & WAKE_MAGIC) {
+		priv->wol |= MV_PP2_WOL_MAGIC_PTRN_MASK;
+		/* Set Magic MAC, the MAC of the last port configured by ethtool will be the Magic MAC */
+		ret = mvPp2WolMagicDaSet(netdev->dev_addr);
+		if (ret)
+			return ret;
+		/* Enable WoL Magic event */
+		ret = mvPp2WolMagicEventSet(WOL_EVENT_EN);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_drvinfo
+* Description:
+*	ethtool get driver information
+* INPUT:
+*	netdev		Network device structure pointer
+*	info		driver information
+* OUTPUT
+*	info		driver information
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_drvinfo(struct net_device *netdev,
+			     struct ethtool_drvinfo *info)
+{
+	strcpy(info->driver, "mv_eth");
+	strcpy(info->fw_version, "N/A");
+	strcpy(info->bus_info, "Mbus");
+	info->n_stats = MV_ETH_TOOL_STATS_LEN;
+	info->testinfo_len = 0;
+	info->regdump_len = mv_pp2_eth_tool_get_regs_len(netdev);
+	info->eedump_len = 0;
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_regs
+* Description:
+*	ethtool get registers array
+* INPUT:
+*	netdev		Network device structure pointer
+*	regs		registers information
+* OUTPUT
+*	p		registers array
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_regs(struct net_device *netdev,
+			  struct ethtool_regs *regs, void *p)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	uint32_t	*regs_buff = p;
+
+	if ((priv == NULL) || MV_PP2_IS_PON_PORT(priv->port)) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return;
+	}
+
+	memset(p, 0, MV_ETH_TOOL_REGS_LEN * sizeof(uint32_t));
+
+	regs->version = priv->plat_data->ctrl_rev;
+
+	/* ETH port registers */
+	regs_buff[0]  = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_CTRL_0_REG(priv->port)));
+	regs_buff[1]  = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_CTRL_1_REG(priv->port)));
+	regs_buff[2]  = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_CTRL_2_REG(priv->port)));
+	regs_buff[3]  = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_AN_CTRL_REG(priv->port)));
+	regs_buff[4]  = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_STATUS_REG(priv->port)));
+	regs_buff[6]  = MV_32BIT_BE(MV_REG_READ(GMAC_PORT_FIFO_CFG_0_REG(priv->port)));
+	regs_buff[7]  = MV_32BIT_BE(MV_REG_READ(GMAC_PORT_FIFO_CFG_1_REG(priv->port)));
+	regs_buff[8]  = MV_32BIT_BE(MV_REG_READ(ETH_PORT_ISR_CAUSE_REG(priv->port)));
+	regs_buff[9]  = MV_32BIT_BE(MV_REG_READ(ETH_PORT_ISR_MASK_REG(priv->port)));
+	regs_buff[17] = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_MIB_CTRL_REG(priv->port)));
+	regs_buff[18] = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_CTRL_3_REG(priv->port)));
+	regs_buff[22] = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_SPEED_TIMER_REG(priv->port)));
+	regs_buff[36] = MV_32BIT_BE(MV_REG_READ(ETH_GMAC_CTRL_4_REG(priv->port)));
+	regs_buff[40] = MV_32BIT_BE(MV_REG_READ(ETH_PORT_ISR_SUM_CAUSE_REG(priv->port)));
+	regs_buff[41] = MV_32BIT_BE(MV_REG_READ(ETH_PORT_ISR_SUM_MASK_REG(priv->port)));
+}
+
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_nway_reset
+* Description:
+*	ethtool restart auto negotiation
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_nway_reset(struct net_device *netdev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	MV_U32	        phy_addr;
+
+	if ((priv == NULL) || (MV_PP2_IS_PON_PORT(priv->port))) {
+		printk(KERN_ERR "interface %s is not supported\n", netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	phy_addr = priv->plat_data->phy_addr;
+	if (mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT) != MV_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_link
+* Description:
+*	ethtool get link status
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	0 if link is down, 1 if link is up
+*
+*******************************************************************************/
+u32 mv_pp2_eth_tool_get_link(struct net_device *netdev)
+{
+	struct eth_port     *pp = MV_ETH_PRIV(netdev);
+
+	if (pp == NULL) {
+		printk(KERN_ERR "interface %s is not supported\n", netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+#ifdef CONFIG_MV_INCLUDE_PON
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		return mv_pon_link_status(NULL);
+#endif /* CONFIG_MV_PON */
+
+	return mvGmacPortIsLinkUp(pp->port);
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_coalesce
+* Description:
+*	ethtool get RX/TX coalesce parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	cmd		Coalesce parameters
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_get_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	struct eth_port *pp = MV_ETH_PRIV(netdev);
+	/* get coal parameters only for rxq=0, txp=txq=0 !!!
+	   notice that if you use ethtool to set coal, then all queues have the same value */
+	cmd->rx_coalesce_usecs = pp->rx_time_coal_cfg;
+	cmd->rx_max_coalesced_frames = pp->rx_pkts_coal_cfg;
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+	cmd->tx_max_coalesced_frames = pp->tx_pkts_coal_cfg;
+#endif
+
+	/* Adaptive RX coalescing parameters */
+	cmd->rx_coalesce_usecs_low = pp->rx_time_low_coal_cfg;
+	cmd->rx_coalesce_usecs_high = pp->rx_time_high_coal_cfg;
+	cmd->pkt_rate_low = pp->pkt_rate_low_cfg;
+	cmd->pkt_rate_high = pp->pkt_rate_high_cfg;
+	cmd->rate_sample_interval = pp->rate_sample_cfg;
+	cmd->use_adaptive_rx_coalesce = pp->rx_adaptive_coal_cfg;
+	cmd->rx_max_coalesced_frames_low = pp->rx_pkts_low_coal_cfg;
+	cmd->rx_max_coalesced_frames_high = pp->rx_pkts_high_coal_cfg;
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_set_coalesce
+* Description:
+*	ethtool set RX/TX coalesce parameters
+* INPUT:
+*	netdev		Network device structure pointer
+*	cmd		Coalesce parameters
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_set_coalesce(struct net_device *netdev,
+			     struct ethtool_coalesce *cmd)
+{
+	struct eth_port *pp = MV_ETH_PRIV(netdev);
+	int rxq;
+
+	/* can't set rx coalesce with both 0 pkts and 0 usecs,  tx coalesce supports only pkts */
+	if (!cmd->rx_coalesce_usecs && !cmd->rx_max_coalesced_frames)
+		return -EPERM;
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+	if (!cmd->tx_max_coalesced_frames)
+		return -EPERM;
+#endif
+
+	if (!cmd->use_adaptive_rx_coalesce)
+		for (rxq = 0; rxq < CONFIG_MV_PP2_RXQ; rxq++) {
+			mv_pp2_rx_ptks_coal_set(pp->port, rxq, cmd->rx_max_coalesced_frames);
+			mv_pp2_rx_time_coal_set(pp->port, rxq, cmd->rx_coalesce_usecs);
+		}
+
+	pp->rx_time_coal_cfg = cmd->rx_coalesce_usecs;
+	pp->rx_pkts_coal_cfg = cmd->rx_max_coalesced_frames;
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+	{
+		int txp, txq;
+
+		for (txp = 0; txp < pp->txp_num; txp++)
+			for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+				mv_pp2_tx_done_ptks_coal_set(pp->port, txp, txq, cmd->tx_max_coalesced_frames);
+	}
+#endif
+	pp->tx_pkts_coal_cfg = cmd->tx_max_coalesced_frames;
+
+	/* Adaptive RX coalescing parameters */
+	pp->rx_time_low_coal_cfg = cmd->rx_coalesce_usecs_low;
+	pp->rx_time_high_coal_cfg = cmd->rx_coalesce_usecs_high;
+	pp->rx_pkts_low_coal_cfg = cmd->rx_max_coalesced_frames_low;
+	pp->rx_pkts_high_coal_cfg = cmd->rx_max_coalesced_frames_high;
+	pp->pkt_rate_low_cfg = cmd->pkt_rate_low;
+	pp->pkt_rate_high_cfg = cmd->pkt_rate_high;
+
+	if (cmd->rate_sample_interval > 0)
+		pp->rate_sample_cfg = cmd->rate_sample_interval;
+
+	/* check if adaptive rx is on - reset rate calculation parameters */
+	if (!pp->rx_adaptive_coal_cfg && cmd->use_adaptive_rx_coalesce) {
+		pp->rx_timestamp = jiffies;
+		pp->rx_rate_pkts = 0;
+	}
+	pp->rx_adaptive_coal_cfg = cmd->use_adaptive_rx_coalesce;
+
+	return 0;
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_ringparam
+* Description:
+*	ethtool get ring parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	ring		Ring paranmeters
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_ringparam(struct net_device *netdev,
+				struct ethtool_ringparam *ring)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+
+	memset(ring, 0, sizeof(struct ethtool_ringparam));
+	ring->rx_pending = priv->rxq_ctrl[0].rxq_size;
+	ring->tx_pending = priv->txq_ctrl[0].txq_size;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_set_ringparam
+* Description:
+*	ethtool set ring parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	ring		Ring paranmeters
+* RETURN:
+*	None
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_set_ringparam(struct net_device *netdev,
+				 struct ethtool_ringparam *ring)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	int rxq, txp, txq, rxq_size, txq_size, swf_size, hwf_size, netdev_running = 0;
+
+	if (ring->rx_jumbo_pending || ring->rx_mini_pending)
+		return -EINVAL;
+
+	rxq_size = MV_ALIGN_UP(ring->rx_pending, 16);
+
+	/* Set minimum of 32, to save space for HWF as well */
+	txq_size = MV_ALIGN_UP(ring->tx_pending, 32);
+	/* Set HWF size to half of total TXQ size */
+
+	if (netif_running(netdev))
+		netdev_running = 1;
+
+	if (netdev_running)
+		mv_pp2_eth_stop(netdev);
+
+	if (rxq_size != priv->rxq_ctrl[0].rxq_size)
+		for (rxq = 0; rxq < priv->rxq_num; rxq++)
+			mv_pp2_ctrl_rxq_size_set(priv->port, rxq, rxq_size);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	hwf_size = txq_size - (nr_cpu_ids * priv->txq_ctrl[0].rsvd_chunk);
+#else
+	hwf_size = txq_size/2;
+#endif
+	/* relevant only for ppv2.1 */
+	swf_size = hwf_size - (nr_cpu_ids * priv->txq_ctrl[0].rsvd_chunk);
+
+	if (txq_size != priv->txq_ctrl[0].txq_size)
+		for (txp = 0; txp < priv->txp_num; txp++)
+			for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++) {
+				mv_pp2_ctrl_txq_size_set(priv->port, txp, txq, txq_size);
+				/* swf_size is ignored if ppv2.0 */
+				mv_pp2_ctrl_txq_limits_set(priv->port, txp, txq, hwf_size, swf_size);
+			}
+
+	if (netdev_running)
+		mv_pp2_eth_open(netdev);
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_pauseparam
+* Description:
+*	ethtool get pause parameters
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	pause		Pause paranmeters
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct eth_port      *priv = MV_ETH_PRIV(netdev);
+	int                  port = priv->port;
+	MV_ETH_PORT_STATUS   portStatus;
+	MV_ETH_PORT_FC       flowCtrl;
+
+	if ((priv == NULL) || (MV_PP2_IS_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return;
+	}
+
+	mvGmacFlowCtrlGet(port, &flowCtrl);
+	if ((flowCtrl == MV_ETH_FC_AN_NO) || (flowCtrl == MV_ETH_FC_AN_SYM) || (flowCtrl == MV_ETH_FC_AN_ASYM))
+		pause->autoneg = AUTONEG_ENABLE;
+	else
+		pause->autoneg = AUTONEG_DISABLE;
+
+	mvGmacLinkStatus(port, &portStatus);
+	if (portStatus.rxFc == MV_ETH_FC_DISABLE)
+		pause->rx_pause = 0;
+	else
+		pause->rx_pause = 1;
+
+	if (portStatus.txFc == MV_ETH_FC_DISABLE)
+		pause->tx_pause = 0;
+	else
+		pause->tx_pause = 1;
+}
+
+
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_set_pauseparam
+* Description:
+*	ethtool configure pause parameters
+* INPUT:
+*	netdev		Network device structure pointer
+*	pause		Pause paranmeters
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pause)
+{
+	struct eth_port *priv = MV_ETH_PRIV(netdev);
+	int				port = priv->port;
+	MV_U32			phy_addr;
+	MV_STATUS		status = MV_FAIL;
+
+	if ((priv == NULL) || (MV_PP2_IS_PON_PORT(priv->port))) {
+		printk(KERN_ERR "%s is not supported on %s\n", __func__, netdev->name);
+		return -EOPNOTSUPP;
+	}
+
+	if (pause->rx_pause && pause->tx_pause) { /* Enable FC */
+		if (pause->autoneg) { /* autoneg enable */
+			status = mvGmacFlowCtrlSet(port, MV_ETH_FC_AN_SYM);
+		} else { /* autoneg disable */
+			status = mvGmacFlowCtrlSet(port, MV_ETH_FC_ENABLE);
+		}
+	} else if (!pause->rx_pause && !pause->tx_pause) { /* Disable FC */
+		if (pause->autoneg) { /* autoneg enable */
+			status = mvGmacFlowCtrlSet(port, MV_ETH_FC_AN_NO);
+		} else { /* autoneg disable */
+			status = mvGmacFlowCtrlSet(port, MV_ETH_FC_DISABLE);
+		}
+	}
+	/* Only symmetric change for RX and TX flow control is allowed */
+	if (status == MV_OK) {
+		phy_addr = priv->plat_data->phy_addr;
+		status = mvEthPhyRestartAN(phy_addr, MV_ETH_TOOL_AN_TIMEOUT);
+	}
+	if (status != MV_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_strings
+* Description:
+*	ethtool get strings (used for statistics and self-test descriptions)
+* INPUT:
+*	netdev		Network device structure pointer
+*	stringset	strings parameters
+* OUTPUT
+*	data		output data
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_strings(struct net_device *netdev,
+			     uint32_t stringset, uint8_t *data)
+{
+	uint8_t *p = data;
+	int i, q;
+	char qnum[8][4] = {" Q0", " Q1", " Q2", " Q3", " Q4", " Q5", " Q6", " Q7"};
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		/*
+		memcpy(data, *mv_pp2_tool_gstrings_test,
+		       MV_ETH_TOOL_TEST_LEN*ETH_GSTRING_LEN); */
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < MV_ETH_TOOL_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, mv_pp2_tool_global_strings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+		for (q = 0; q < CONFIG_MV_PP2_RXQ; q++) {
+			for (i = 0; i < MV_ETH_TOOL_RX_QUEUE_STATS_LEN; i++) {
+				const char *str = mv_pp2_tool_rx_queue_strings_stats[i].stat_string;
+				memcpy(p, str, ETH_GSTRING_LEN);
+				strcat(p, qnum[q]);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			for (i = 0; i < MV_ETH_TOOL_TX_QUEUE_STATS_LEN; i++) {
+				const char *str = mv_pp2_tool_tx_queue_strings_stats[i].stat_string;
+				memcpy(p, str, ETH_GSTRING_LEN);
+				strcat(p, qnum[q]);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		break;
+	}
+}
+
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_stats_count
+* Description:
+*	ethtool get statistics count (number of stat. array entries)
+* INPUT:
+*	netdev		Network device structure pointer
+* OUTPUT
+*	None
+* RETURN:
+*	statistics count
+*
+*******************************************************************************/
+int mv_pp2_eth_tool_get_stats_count(struct net_device *netdev)
+{
+	return 0;
+}
+
+static int mv_pp2_eth_tool_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+									 u32 *rules)
+{
+	if (info->cmd == ETHTOOL_GRXRINGS) {
+		struct eth_port *pp = MV_ETH_PRIV(dev);
+		if (pp)
+			info->data = ARRAY_SIZE(pp->rx_indir_table);
+	}
+	return 0;
+}
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_ethtool_stats
+* Description:
+*	ethtool get statistics
+* INPUT:
+*	netdev		Network device structure pointer
+*	stats		stats parameters
+* OUTPUT
+*	data		output data
+* RETURN:
+*	None
+*
+*******************************************************************************/
+void mv_pp2_eth_tool_get_ethtool_stats(struct net_device *netdev,
+				   struct ethtool_stats *stats, uint64_t *data)
+{
+	struct eth_port	*priv = MV_ETH_PRIV(netdev);
+	uint64_t	*pdest = data;
+	int		i, q;
+	int		cpu = smp_processor_id();
+
+	for (i = 0; i < MV_ETH_TOOL_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)priv +
+			mv_pp2_tool_global_strings_stats[i].stat_offset;
+		pdest[i] =  *(uint32_t *)p;
+	}
+	pdest += MV_ETH_TOOL_GLOBAL_STATS_LEN;
+
+	for (i = 0; i < MV_ETH_TOOL_CPU_STATS_LEN; i++) {
+		char *p = (char *)priv +
+			mv_pp2_tool_cpu_strings_stats[i].stat_offset;
+		pdest[i] =  *((uint32_t *)p + cpu);
+	}
+	pdest += MV_ETH_TOOL_CPU_STATS_LEN;
+
+	for (q = 0; q < CONFIG_MV_PP2_RXQ; q++) {
+		for (i = 0; i < MV_ETH_TOOL_RX_QUEUE_STATS_LEN; i++) {
+			char *p = (char *)priv +
+				mv_pp2_tool_rx_queue_strings_stats[i].stat_offset;
+			pdest[i] =  *((uint32_t *)p + q);
+		}
+		pdest += MV_ETH_TOOL_RX_QUEUE_STATS_LEN;
+	}
+
+	for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+		for (i = 0; i < MV_ETH_TOOL_TX_QUEUE_STATS_LEN; i++) {
+			char *p = (char *)priv +
+				mv_pp2_tool_tx_queue_strings_stats[i].stat_offset;
+			pdest[i] =  *((uint32_t *)p + q);
+		}
+		pdest += MV_ETH_TOOL_TX_QUEUE_STATS_LEN;
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+/******************************************************************************
+* mv_pp2_eth_tool_set_phys_id
+* Description:
+*	ethtool set indicator state for physical identification
+* INPUT:
+*	netdev		Network device structure pointer
+*	state		indicator state for physical identification
+* OUTPUT
+*	None
+* RETURN:
+*	Set results
+*
+*******************************************************************************/
+static int mv_pp2_eth_tool_set_phys_id(struct net_device *netdev,
+			     enum ethtool_phys_id_state state)
+{
+	/* we can only set Blink Duty Cycle and Blink Duration for Blink1 and Blink0
+	we can not set LED blink stae
+	0 = Blink Duty Cycle_0: 25% ON, 75% OFF.
+	1 = Blink Duty Cycle_1: 50% ON, 50% OFF.
+	2 = Blink Duty Cycle_2: 50% ON, 50% OFF.
+	3 = Blink Duty Cycle_3: 75% ON, 25% OFF.
+
+	0 = 1 x Core Clock: (Core_clock_period*2200*1)*1,000,000
+	1 = 2 x Core Clock: (Core_clock_period*2200*2)*1,000,000
+	2 = 4 x Core Clock: (Core_clock_period*2200*4)*1,000,000
+	3 = 8 x Core Clock: (Core_clock_period*2200*8)*1,000,000
+	4 = 16 x Core Clock: (Core_clock_period*2200*16)*1,000,000
+	5 = 32 x Core Clock: (Core_clock_period*2200*32)*1,000,000
+	6 = 64 x Core Clock: (Core_clock_period*2200*64)*1,000,000
+	*/
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 2;
+
+	case ETHTOOL_ID_ON:
+		break;
+
+	case ETHTOOL_ID_OFF:
+		return -EOPNOTSUPP;
+
+	case ETHTOOL_ID_INACTIVE:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+#else
+/******************************************************************************
+* mv_pp2_eth_tool_phys_id
+* Description:
+*	ethtool set indicator state for physical identification
+* INPUT:
+*	netdev		Network device structure pointer
+*	state		indicator state for physical identification
+* OUTPUT
+*	None
+* RETURN:
+*	Set results
+*
+*******************************************************************************/
+static int mv_pp2_eth_tool_phys_id(struct net_device *netdev,
+			     uint32_t data)
+{
+	/* we can only set Blink Duty Cycle and Blink Duration for Blink1 and Blink0
+	we can not set LED blink stae
+	0 = Blink Duty Cycle_0: 25% ON, 75% OFF.
+	1 = Blink Duty Cycle_1: 50% ON, 50% OFF.
+	2 = Blink Duty Cycle_2: 50% ON, 50% OFF.
+	3 = Blink Duty Cycle_3: 75% ON, 25% OFF.
+
+	0 = 1 x Core Clock: (Core_clock_period*2200*1)*1,000,000
+	1 = 2 x Core Clock: (Core_clock_period*2200*2)*1,000,000
+	2 = 4 x Core Clock: (Core_clock_period*2200*4)*1,000,000
+	3 = 8 x Core Clock: (Core_clock_period*2200*8)*1,000,000
+	4 = 16 x Core Clock: (Core_clock_period*2200*16)*1,000,000
+	5 = 32 x Core Clock: (Core_clock_period*2200*32)*1,000,000
+	6 = 64 x Core Clock: (Core_clock_period*2200*64)*1,000,000
+	*/
+	return -EOPNOTSUPP;
+}
+
+#endif
+
+/******************************************************************************
+* mv_pp2_eth_tool_get_sset_count
+* Description:
+*	ethtool get stringset count
+* INPUT:
+*	netdev		Network device structure pointer
+*	sset		stringset
+* OUTPUT
+*	None
+* RETURN:
+*	stringset length
+*
+*******************************************************************************/
+static int mv_pp2_eth_tool_get_sset_count(struct net_device *netdev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return MV_ETH_TOOL_STATS_LEN;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+/******************************************************************************
+* mv_eth_tool_set_tx_csum
+* Description:
+*	ethtool enable/disable TX checksum offloading
+* INPUT:
+*	netdev		Network device structure pointer
+*	data		Command data
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_set_tx_csum(struct net_device *netdev, uint32_t data)
+{
+	if (data && (MV_MAX_PKT_SIZE(netdev->mtu) > MV_PP2_TX_CSUM_MAX_SIZE)) {
+		pr_err("%s: NETIF_F_IP_CSUM and NETIF_F_TSO not supported for mtu larger %d bytes\n",
+			netdev->name, MV_PP2_TX_CSUM_MAX_SIZE);
+		return -EINVAL;
+	}
+
+	ethtool_op_set_tx_csum(netdev, data);
+	return 0;
+}
+
+/******************************************************************************
+* mv_eth_tool_set_sg
+* Description:
+*	ethtool enable/disable scatter-gathering
+* INPUT:
+*	netdev		Network device structure pointer
+*	data		Command data
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_set_sg(struct net_device *netdev, uint32_t data)
+{
+	if (data && (MV_MAX_PKT_SIZE(netdev->mtu) > MV_PP2_TX_CSUM_MAX_SIZE)) {
+		pr_err("%s: NETIF_F_IP_CSUM and NETIF_F_TSO not supported for mtu larger %d bytes\n",
+			netdev->name, MV_PP2_TX_CSUM_MAX_SIZE);
+		return -EINVAL;
+	}
+
+	ethtool_op_set_sg(netdev, data);
+	return 0;
+}
+
+/******************************************************************************
+* mv_eth_tool_set_tso
+* Description:
+*	ethtool enable/disable TCP segmentation offloading
+* INPUT:
+*	netdev		Network device structure pointer
+*	data		Command data
+* OUTPUT
+*	None
+* RETURN:
+*	0 on success
+*
+*******************************************************************************/
+int mv_eth_tool_set_tso(struct net_device *netdev, uint32_t data)
+{
+	if (data && (MV_MAX_PKT_SIZE(netdev->mtu) > MV_PP2_TX_CSUM_MAX_SIZE)) {
+		pr_err("%s: NETIF_F_IP_CSUM and NETIF_F_TSO not supported for mtu larger %d bytes\n",
+			netdev->name, MV_PP2_TX_CSUM_MAX_SIZE);
+		return -EINVAL;
+	}
+
+	ethtool_op_set_tso(netdev, data);
+	return 0;
+}
+#endif
+
+const struct ethtool_ops mv_pp2_eth_tool_ops = {
+	.get_settings				= mv_pp2_eth_tool_get_settings,
+	.set_settings				= mv_pp2_eth_tool_set_settings,
+	.get_drvinfo				= mv_pp2_eth_tool_get_drvinfo,
+	.get_regs_len				= mv_pp2_eth_tool_get_regs_len,
+	.get_regs				= mv_pp2_eth_tool_get_regs,
+	.get_wol				= mv_pp2_eth_tool_get_wol,
+	.set_wol				= mv_pp2_eth_tool_set_wol,
+	.nway_reset				= mv_pp2_eth_tool_nway_reset,
+	.get_link				= mv_pp2_eth_tool_get_link,
+	.get_coalesce				= mv_pp2_eth_tool_get_coalesce,
+	.set_coalesce				= mv_pp2_eth_tool_set_coalesce,
+	.get_ringparam				= mv_pp2_eth_tool_get_ringparam,
+	.set_ringparam				= mv_pp2_eth_tool_set_ringparam,
+	.get_pauseparam				= mv_pp2_eth_tool_get_pauseparam,
+	.set_pauseparam				= mv_pp2_eth_tool_set_pauseparam,
+	.get_strings				= mv_pp2_eth_tool_get_strings,
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 32)
+	.get_stats_count			= mv_pp2_eth_tool_get_stats_count,/*TODO: complete implementation */
+#endif
+	.get_ethtool_stats			= mv_pp2_eth_tool_get_ethtool_stats,/*TODO: complete implementation */
+	/*.get_rxfh_indir			= mv_pp2_eth_tool_get_rxfh_indir,
+	.set_rxfh_indir				= mv_pp2_eth_tool_set_rxfh_indir, */
+	.get_rxnfc				= mv_pp2_eth_tool_get_rxnfc,/*TODO new implementation*/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+	.set_phys_id				= mv_pp2_eth_tool_set_phys_id,
+#else
+	.phys_id				= mv_pp2_eth_tool_phys_id,
+#endif
+	.get_sset_count				= mv_pp2_eth_tool_get_sset_count,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0)
+	.get_ts_info				= ethtool_op_get_ts_info,
+#endif
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 39)
+	.get_rx_csum				= ethtool_op_get_rx_csum,
+	.get_tx_csum				= ethtool_op_get_tx_csum,
+	.set_tx_csum				= mv_eth_tool_set_tx_csum,
+	.get_sg					= ethtool_op_get_sg,
+	.set_sg					= mv_eth_tool_set_sg,
+	.get_tso				= ethtool_op_get_tso,
+	.set_tso				= mv_eth_tool_set_tso,
+#endif
+
+};
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.h b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.h
new file mode 100644
index 000000000000..61c6be0e2092
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tool.h
@@ -0,0 +1,35 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef NET_DEV_MV_ETH_TOOL_H
+#define NET_DEV_MV_ETH_TOOL_H
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops mv_pp2_eth_tool_ops;
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sched_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sched_sysfs.c
new file mode 100644
index 000000000000..1fe357016b2a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sched_sysfs.c
@@ -0,0 +1,151 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+static ssize_t mv_eth_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "echo [p] [txp]               > txSchedRegs   - show TXP Scheduler registers for egress port <p/txp>\n");
+	off += sprintf(buf+off, "echo [p] [txp] [v]           > txpRate       - set outgoing rate <v> in [kbps] for <port/txp>\n");
+	off += sprintf(buf+off, "echo [p] [txp] [v]           > txpBurst      - set maximum burst <v> in [Bytes] for <port/txp>\n");
+	off += sprintf(buf+off, "echo [p] [txp] [txq] [v]     > txqRate       - set outgoing rate <v> in [kbps] for <port/txp/txq>\n");
+	off += sprintf(buf+off, "echo [p] [txp] [txq] [v]     > txqBurst      - set maximum burst <v> in [Bytes] for <port/txp/txq>\n");
+	off += sprintf(buf+off, "echo [p] [txp] [txq] [v]     > txqWrr        - set outgoing WRR weight for <port/txp/txq>. <v=0> - fixed\n");
+
+	return off;
+}
+
+static ssize_t mv_eth_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	off = mv_eth_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_eth_port_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v, a, b;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = a = b = 0;
+	sscanf(buf, "%d %d %d %d", &p, &v, &a, &b);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txSchedRegs")) {
+		mvPp2TxSchedRegs(p, v);
+	} else if (!strcmp(name, "txpRate")) {
+		err = mvPp2TxpRateSet(p, v, a);
+	} else if (!strcmp(name, "txpBurst")) {
+		err = mvPp2TxpBurstSet(p, v, a);
+	} else if (!strcmp(name, "txqRate")) {
+		err = mvPp2TxqRateSet(p, v, a, b);
+	} else if (!strcmp(name, "txqBurst")) {
+		err = mvPp2TxqBurstSet(p, v, a, b);
+	} else if (!strcmp(name, "txqWrr")) {
+		if (b == 0)
+			err = mvPp2TxqFixPrioSet(p, v, a);
+		else
+			err = mvPp2TxqWrrPrioSet(p, v, a, b);
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,         S_IRUSR, mv_eth_show, NULL);
+static DEVICE_ATTR(txSchedRegs,  S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txpRate,      S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txpBurst,     S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txqRate,      S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txqBurst,     S_IWUSR, NULL, mv_eth_port_store);
+static DEVICE_ATTR(txqWrr,       S_IWUSR, NULL, mv_eth_port_store);
+
+static struct attribute *mv_eth_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_txSchedRegs.attr,
+	&dev_attr_txpRate.attr,
+	&dev_attr_txpBurst.attr,
+	&dev_attr_txqRate.attr,
+	&dev_attr_txqBurst.attr,
+	&dev_attr_txqWrr.attr,
+	NULL
+};
+
+static struct attribute_group mv_eth_tx_sched_group = {
+	.name = "tx_sched",
+	.attrs = mv_eth_attrs,
+};
+
+int mv_pp2_tx_sched_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_eth_tx_sched_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_eth_tx_sched_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_tx_sched_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_eth_tx_sched_group);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sysfs.c
new file mode 100644
index 000000000000..389c5ee9f707
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_eth_tx_sysfs.c
@@ -0,0 +1,241 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv_eth_sysfs.h"
+#include "gbe/mvPp2Gbe.h"
+#include "mv_netdev.h"
+
+
+static ssize_t mv_pp2_help(char *b)
+{
+	int o = 0; /* buffer offset */
+	int s = PAGE_SIZE; /* buffer size */
+
+	o += scnprintf(b+o, s-o, "cat                              txRegs          - show global TX registers\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq]             > pTxqCounters  - show TXQ Counters for port <p/txp/txq> where <txq> range [0..7]\n");
+#endif
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq]             > pTxqRegs      - show TXQ registers for port <p/txp/txq> where <txq> range [0..7]\n");
+	o += scnprintf(b+o, s-o, "echo [txq]                       > gTxqRegs      - show TXQ registers for global <txq> range [0..255]\n");
+	o += scnprintf(b+o, s-o, "echo [cpu]                       > aggrTxqRegs   - show Aggregation TXQ registers for <cpu> range [0..max]\n");
+	o += scnprintf(b+o, s-o, "echo [cpu] [v]                   > aggrTxqShow   - show aggregated TXQ descriptors ring for <cpu>.\n");
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [v]         > txqShow       - show TXQ descriptors ring for <p/txp/txq>. v: 0-brief, 1-full\n");
+	o += scnprintf(b+o, s-o, "echo [p] [hex]                   > txFlags       - set TX flags. bits: 0-no_pad, 1-mh, 2-hw_cmd\n");
+	o += scnprintf(b+o, s-o, "echo [p] [hex]                   > txMH          - set 2 bytes of Marvell Header for transmit\n");
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [cpu]       > txqDef        - set default <txp/txq> for packets sent to port <p> by <cpu>\n");
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [v]         > txqSize       - set TXQ size <v> for <p/txp/txq>.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [hwf] [swf] > txqLimit      - set HWF <hwf> and SWF <swf> limits for <p/txp/txq>.\n");
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [v]         > txqChunk      - set SWF request chunk [v] for <p/txp/txq>\n");
+
+#else
+	o += scnprintf(b+o, s-o, "echo [p] [txp] [txq] [hwf]       > txqLimit      - set HWF limit <hwf> for <p/txp/txq>.\n");
+#endif
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+	o += scnprintf(b+o, s-o, "echo [period]                    > txPeriod      - set Tx Done high resolution timer period\n");
+	o += scnprintf(b+o, s-o, "				     [period]: period range is [%lu, %lu], unit usec\n",
+		MV_PP2_HRTIMER_PERIOD_MIN, MV_PP2_HRTIMER_PERIOD_MAX);
+#endif
+	return o;
+}
+
+static ssize_t mv_pp2_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "txRegs"))
+		mvPp2TxRegs();
+	else
+		off = mv_pp2_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_pp2_tx_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v;
+	sscanf(buf, "%d %x", &p, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txFlags")) {
+		err = mv_pp2_ctrl_tx_flag(p, MV_ETH_TX_F_NO_PAD, v & 0x1);
+		err = mv_pp2_ctrl_tx_flag(p, MV_ETH_TX_F_MH, v & 0x2);
+		err = mv_pp2_ctrl_tx_flag(p, MV_ETH_TX_F_HW_CMD, v & 0x4);
+	} else if (!strcmp(name, "txMH")) {
+		err = mv_pp2_eth_ctrl_tx_mh(p, MV_16BIT_BE((u16)v));
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_pp2_txq_store(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err;
+	unsigned int    p, v, a, b, c;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read port and value */
+	err = p = v = a = b = c = 0;
+	sscanf(buf, "%d %d %d %d %d", &p, &v, &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "txqDef")) {
+		err = mv_pp2_ctrl_txq_cpu_def(p, v, a, b);
+	} else if (!strcmp(name, "txqShow")) {
+		mvPp2TxqShow(p, v, a, b);
+	}  else if (!strcmp(name, "aggrTxqShow")) {
+		mvPp2AggrTxqShow(p, v);
+	} else if (!strcmp(name, "gTxqRegs")) {
+		mvPp2PhysTxqRegs(p);
+	} else if (!strcmp(name, "pTxqRegs")) {
+		mvPp2PortTxqRegs(p, v, a);
+	} else if (!strcmp(name, "pTxqCounters")) {
+		mvPp2V1TxqDbgCntrs(p, v, a);
+	} else if (!strcmp(name, "aggrTxqRegs")) {
+		mvPp2AggrTxqRegs(p);
+	} else if (!strcmp(name, "txqSize")) {
+		mv_pp2_ctrl_txq_size_set(p, v, a, b);
+	} else if (!strcmp(name, "txqLimit")) {
+		/* last param is ignored in ppv2.0 */
+		mv_pp2_ctrl_txq_limits_set(p, v, a, b, c);
+	} else if (!strcmp(name, "txqChunk")) {
+		mv_pp2_ctrl_txq_chunk_set(p, v, a, b);
+	} else if (!strcmp(name, "txPeriod")) {
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+		mv_pp2_tx_done_hrtimer_period_set(p);
+#endif
+	} else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: error %d\n", __func__, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,         S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(txRegs,       S_IRUSR, mv_pp2_show, NULL);
+static DEVICE_ATTR(aggrTxqRegs,  S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(pTxqCounters, S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(txqShow,      S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(gTxqRegs,     S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(pTxqRegs,     S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(aggrTxqShow,  S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(txqDef,       S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(txqSize,      S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(txqLimit,     S_IWUSR, NULL, mv_pp2_txq_store);
+static DEVICE_ATTR(txqChunk,     S_IWUSR, NULL, mv_pp2_txq_store);
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+static DEVICE_ATTR(txPeriod,     S_IWUSR, NULL, mv_pp2_txq_store);
+#endif
+static DEVICE_ATTR(txFlags,      S_IWUSR, NULL, mv_pp2_tx_hex_store);
+static DEVICE_ATTR(txMH,         S_IWUSR, NULL, mv_pp2_tx_hex_store);
+
+static struct attribute *mv_pp2_tx_attrs[] = {
+	&dev_attr_txqDef.attr,
+	&dev_attr_pTxqCounters.attr,
+	&dev_attr_aggrTxqRegs.attr,
+	&dev_attr_help.attr,
+	&dev_attr_txRegs.attr,
+	&dev_attr_txqShow.attr,
+	&dev_attr_gTxqRegs.attr,
+	&dev_attr_pTxqRegs.attr,
+	&dev_attr_aggrTxqShow.attr,
+	&dev_attr_txqSize.attr,
+	&dev_attr_txqLimit.attr,
+	&dev_attr_txqChunk.attr,
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+	&dev_attr_txPeriod.attr,
+#endif
+	&dev_attr_txFlags.attr,
+	&dev_attr_txMH.attr,
+	NULL
+};
+
+static struct attribute_group mv_pp2_tx_group = {
+	.name = "tx",
+	.attrs = mv_pp2_tx_attrs,
+};
+
+int mv_pp2_tx_sysfs_init(struct kobject *gbe_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(gbe_kobj, &mv_pp2_tx_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", mv_pp2_tx_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_tx_sysfs_exit(struct kobject *gbe_kobj)
+{
+	sysfs_remove_group(gbe_kobj, &mv_pp2_tx_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_ethernet.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_ethernet.c
new file mode 100644
index 000000000000..ba1d1ba39012
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_ethernet.c
@@ -0,0 +1,379 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/interrupt.h>
+
+#include "mvOs.h"
+
+#include "mvEthPhy.h"
+#include "gbe/mvPp2Gbe.h"
+#include "prs/mvPp2Prs.h"
+#include "cls/mvPp2ClsHw.h"
+
+#include "mv_netdev.h"
+
+static int mv_pp2_set_mac_addr_internals(struct net_device *dev, void *addr);
+
+/***********************************************************
+ * mv_pp2_start --                                         *
+ *   start a network device. connect and enable interrupts *
+ *   set hw defaults. fill rx buffers. restart phy link    *
+ *   auto neg. set device link flags. report status.       *
+ ***********************************************************/
+int mv_pp2_start(struct net_device *dev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	int group;
+
+	/* in default link is down */
+	netif_carrier_off(dev);
+	/* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */
+
+	netif_tx_stop_all_queues(dev);
+
+	/* fill rx buffers, start rx/tx activity, set coalescing */
+	if (mv_pp2_start_internals(priv, dev->mtu) != 0) {
+		printk(KERN_ERR "%s: start internals failed\n", dev->name);
+		goto error;
+	}
+	/* enable polling on the port, must be used after netif_poll_disable */
+	if (priv->flags & MV_ETH_F_CONNECT_LINUX) {
+		for (group = 0; group < MV_PP2_MAX_RXQ; group++)
+			if (priv->napi_group[group] && priv->napi_group[group]->napi)
+				napi_enable(priv->napi_group[group]->napi);
+	}
+	if (priv->flags & MV_ETH_F_LINK_UP) {
+
+		if (mv_pp2_ctrl_is_tx_enabled(priv)) {
+			netif_carrier_on(dev);
+			netif_tx_wake_all_queues(dev);
+		}
+		printk(KERN_NOTICE "%s: link up\n", dev->name);
+	}
+
+	if (priv->flags & MV_ETH_F_CONNECT_LINUX) {
+		/* connect to port interrupt line */
+		if (request_irq(dev->irq, mv_pp2_isr, (IRQF_DISABLED), dev->name, priv)) {
+			printk(KERN_ERR "cannot request irq %d for %s port %d\n", dev->irq, dev->name, priv->port);
+			for (group = 0; group < MV_PP2_MAX_RXQ; group++)
+				if (priv->napi_group[group] && priv->napi_group[group]->napi)
+					napi_disable(priv->napi_group[group]->napi);
+			goto error;
+		}
+
+		/* unmask interrupts */
+		on_each_cpu(mv_pp2_interrupts_unmask, (void *)priv, 1);
+
+		/* Enable interrupts for all CPUs */
+		mvPp2GbeCpuInterruptsEnable(priv->port, priv->cpuMask);
+
+		/* Unmask Port link interrupt */
+		mvGmacPortIsrUnmask(priv->port);
+
+		printk(KERN_NOTICE "%s: started\n", dev->name);
+	}
+
+	/* Enable GMAC */
+	if (!MV_PP2_IS_PON_PORT(priv->port))
+		mvGmacPortEnable(priv->port);
+
+	mv_pp2_link_event(priv, 1);
+
+	return 0;
+
+error:
+	printk(KERN_ERR "%s: start failed\n", dev->name);
+	return -EINVAL;
+}
+
+/***********************************************************
+ * mv_pp2_eth_stop --                                          *
+ *   stop interface with linux core. stop port activity.   *
+ *   free skb's from rings.                                *
+ ***********************************************************/
+int mv_pp2_eth_stop(struct net_device *dev)
+{
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	struct cpu_ctrl *cpuCtrl;
+	int group, cpu;
+
+	/* stop new packets from arriving to RXQs */
+	mvPp2PortIngressEnable(priv->port, MV_FALSE);
+
+	mdelay(10);
+
+	/* Disable interrupts for all CPUs */
+	mvPp2GbeCpuInterruptsDisable(priv->port, priv->cpuMask);
+
+	on_each_cpu(mv_pp2_interrupts_mask, priv, 1);
+
+	/* make sure that the port finished its Rx polling */
+	for (group = 0; group < MV_PP2_MAX_RXQ; group++)
+		if (priv->napi_group[group] && priv->napi_group[group]->napi)
+			napi_disable(priv->napi_group[group]->napi);
+
+	/* stop upper layer */
+	netif_carrier_off(dev);
+	netif_tx_stop_all_queues(dev);
+
+	/* stop tx/rx activity, mask all interrupts, relese skb in rings,*/
+	mv_pp2_stop_internals(priv);
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = priv->cpu_config[cpu];
+#if defined(CONFIG_MV_PP2_TXDONE_IN_HRTIMER)
+		hrtimer_cancel(&cpuCtrl->tx_done_timer);
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_TIMER)
+		del_timer(&cpuCtrl->tx_done_timer);
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+#endif
+	}
+	if (dev->irq != 0)
+		free_irq(dev->irq, priv);
+
+	mvPp2PortEgressEnable(priv->port, MV_FALSE);
+
+	if (!MV_PP2_IS_PON_PORT(priv->port))
+		mvGmacPortDisable(priv->port);
+
+	printk(KERN_NOTICE "%s: stopped\n", dev->name);
+
+	return 0;
+}
+
+
+int mv_pp2_eth_change_mtu(struct net_device *dev, int mtu)
+{
+	int old_mtu = dev->mtu;
+
+	mtu = mv_pp2_eth_check_mtu_valid(dev, mtu);
+	if (mtu < 0)
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		if (mv_pp2_eth_change_mtu_internals(dev, mtu) == -1)
+			goto error;
+
+		pr_info("%s: change mtu %d (max-pkt-size %d) to %d (max-pkt-size %d)\n",
+				dev->name, old_mtu, MV_MAX_PKT_SIZE(old_mtu),
+				dev->mtu, MV_MAX_PKT_SIZE(dev->mtu));
+		return 0;
+	}
+
+	if (mv_pp2_check_mtu_internals(dev, mtu))
+		goto error;
+
+	if (dev->netdev_ops->ndo_stop(dev)) {
+		pr_err("%s: stop interface failed\n", dev->name);
+		goto error;
+	}
+
+	if (mv_pp2_eth_change_mtu_internals(dev, mtu) == -1) {
+		pr_err("%s change mtu internals failed\n", dev->name);
+		goto error;
+	}
+
+	if (dev->netdev_ops->ndo_open(dev)) {
+		pr_err("%s: start interface failed\n", dev->name);
+		goto error;
+	}
+	pr_info("%s: change mtu %d (max-pkt-size %d) to %d (max-pkt-size %d)\n",
+				dev->name, old_mtu, MV_MAX_PKT_SIZE(old_mtu), dev->mtu,
+				MV_MAX_PKT_SIZE(dev->mtu));
+	return 0;
+
+error:
+	pr_err("%s: change mtu failed\n", dev->name);
+	return -1;
+}
+
+/***********************************************************
+ * eth_set_mac_addr --                                   *
+ *   stop port activity. set new addr in device and hw.    *
+ *   restart port activity.                                *
+ ***********************************************************/
+static int mv_pp2_set_mac_addr_internals(struct net_device *dev, void *addr)
+{
+	u8              *mac = &(((u8 *)addr)[2]);  /* skip on first 2B (ether HW addr type) */
+	int             i;
+
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+
+	if (!mv_pp2_pnc_ctrl_en) {
+		printk(KERN_ERR "%s Error: PARSER and CLASSIFIER control is disabled\n", __func__);
+
+		/* linux stop the port */
+		mv_pp2_eth_open(dev);
+		return -1;
+	}
+
+	/* remove old parser entry*/
+	mvPrsMacDaAccept(MV_PPV2_PORT_PHYS(priv->port), dev->dev_addr, 0);
+
+	/*add new parser entry*/
+	mvPrsMacDaAccept(MV_PPV2_PORT_PHYS(priv->port), mac, 1);
+
+	/* set addr in the device */
+	for (i = 0; i < 6; i++)
+		dev->dev_addr[i] = mac[i];
+
+#ifdef CONFIG_MV_INCLUDE_PON
+	/* Update PON module */
+	if (MV_PP2_IS_PON_PORT(priv->port))
+		mv_pon_set_mac_addr(addr);
+#endif
+
+	printk(KERN_NOTICE "%s: mac address changed\n", dev->name);
+
+	return 0;
+}
+
+void mv_pp2_rx_set_rx_mode(struct net_device *dev)
+{
+	struct eth_port     *priv = MV_ETH_PRIV(dev);
+	int                 phyPort = MV_PPV2_PORT_PHYS(priv->port);
+
+	if (!mv_pp2_pnc_ctrl_en) {
+		pr_err("%s Error: PARSER and CLASSIFIER control is disabled\n", __func__);
+		return;
+	}
+
+	if (dev->flags & IFF_PROMISC)
+		mvPrsMacPromiscousSet(phyPort, 1);
+	else
+		mvPrsMacPromiscousSet(phyPort, 0);
+
+	if (dev->flags & IFF_ALLMULTI)
+		mvPrsMacAllMultiSet(phyPort, 1);
+	else
+		mvPrsMacAllMultiSet(phyPort, 0);
+
+	/* remove all port's mcast enries */
+	mvPrsMcastDelAll(phyPort);
+
+	if (dev->flags & IFF_MULTICAST) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+		if (!netdev_mc_empty(dev)) {
+			struct netdev_hw_addr *ha;
+
+			netdev_for_each_mc_addr(ha, dev) {
+				if (mvPrsMacDaAccept(phyPort, ha->addr, 1) != MV_OK) {
+					pr_err("%s: Mcast init failed\n", dev->name);
+					break;
+				}
+			}
+		}
+#else
+		struct dev_mc_list *curr_addr = dev->mc_list;
+		int                i;
+		for (i = 0; i < dev->mc_count; i++, curr_addr = curr_addr->next) {
+			if (!curr_addr)
+				break;
+			if (mvPrsMacDaAccept(priv->port, curr_addr->dmi_addr, 1)) {
+				printk(KERN_ERR "%s: Mcast init failed - %d of %d\n",
+						dev->name, i, dev->mc_count);
+				break;
+			}
+		}
+#endif
+	}
+}
+
+
+int     mv_pp2_eth_set_mac_addr(struct net_device *dev, void *addr)
+{
+	if (!netif_running(dev)) {
+		if (mv_pp2_set_mac_addr_internals(dev, addr) == -1)
+			goto error;
+		return 0;
+	}
+
+	if (dev->netdev_ops->ndo_stop(dev)) {
+		printk(KERN_ERR "%s: stop interface failed\n", dev->name);
+		goto error;
+	}
+
+	if (mv_pp2_set_mac_addr_internals(dev, addr) == -1)
+		goto error;
+
+	if (dev->netdev_ops->ndo_open(dev)) {
+		printk(KERN_ERR "%s: start interface failed\n", dev->name);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	printk(KERN_ERR "%s: set mac addr failed\n", dev->name);
+	return -1;
+}
+
+
+/************************************************************
+ * mv_pp2_eth_open -- Restore MAC address and call to   *
+ *                    mv_pp2_start                      *
+ ************************************************************/
+int mv_pp2_eth_open(struct net_device *dev)
+{
+
+	struct	eth_port *priv = MV_ETH_PRIV(dev);
+	int	phyPort = MV_PPV2_PORT_PHYS(priv->port);
+	static  u8 mac_bcast[MV_MAC_ADDR_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+	if (mv_pp2_pnc_ctrl_en) {
+
+		if (mvPrsMacDaAccept(phyPort, mac_bcast, 1 /*add*/)) {
+			printk(KERN_ERR "%s:mvPrsMacDaAccept\n", dev->name);
+				return -1;
+		}
+		if (mvPrsMacDaAccept(phyPort, dev->dev_addr, 1 /*add*/)) {
+			printk(KERN_ERR "%s: mvPrsMacDaAccept failed\n", dev->name);
+				return -1;
+		}
+		if (mvPp2PrsTagModeSet(phyPort, MV_TAG_TYPE_MH)) {
+			printk(KERN_ERR "%s: mvPp2PrsTagModeSet failed\n", dev->name);
+				return -1;
+		}
+		if (mvPrsDefFlow(phyPort)) {
+			printk(KERN_ERR "%s: mvPp2PrsDefFlow failed\n", dev->name);
+				return -1;
+		}
+	}
+	if (mv_pp2_start(dev)) {
+		printk(KERN_ERR "%s: start interface failed\n", dev->name);
+		return -1;
+	}
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.c b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.c
new file mode 100644
index 000000000000..a249d214e770
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.c
@@ -0,0 +1,6424 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include "mvCommon.h"
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/inetdevice.h>
+#include <linux/interrupt.h>
+#include <linux/mv_pp2.h>
+#include <asm/setup.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "mvOs.h"
+#include "mvDebug.h"
+#include "mvEthPhy.h"
+
+#include "gbe/mvPp2Gbe.h"
+#include "prs/mvPp2Prs.h"
+#include "prs/mvPp2PrsHw.h"
+#include "cls/mvPp2Classifier.h"
+#include "dpi/mvPp2DpiHw.h"
+#include "wol/mvPp2Wol.h"
+
+
+#include "mv_mux_netdev.h"
+#include "mv_netdev.h"
+#include "mv_eth_tool.h"
+#include "mv_eth_sysfs.h"
+
+#ifdef CONFIG_OF
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#endif /* CONFIG_OF */
+
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#endif
+
+#ifdef CONFIG_OF
+/* Virtual address for PP2, ETH module and GMACs when FDT used */
+int pp2_vbase, eth_vbase, pp2_port_vbase[MV_ETH_MAX_PORTS];
+#endif /* CONFIG_OF */
+
+#define MV_ETH_MAX_NAPI_GROUPS	MV_PP2_MAX_RXQ
+#define MV_ETH_TX_PENDING_TIMEOUT_MSEC     1000
+
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+static unsigned int mv_pp2_swf_hwf_wa_en;
+void mv_pp2_cache_inv_wa_ctrl(int en)
+{
+	mv_pp2_swf_hwf_wa_en = en;
+}
+void mv_pp2_iocc_l1_l2_cache_inv(unsigned char *v_start, int size)
+{
+	if (mv_pp2_swf_hwf_wa_en)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+		___dma_single_dev_to_cpu(v_start, size, DMA_FROM_DEVICE);
+#else
+		dma_cache_maint(v_start, size, DMA_FROM_DEVICE);
+#endif
+}
+#endif /* CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA */
+
+static struct mv_mux_eth_ops mux_eth_ops;
+
+static struct  platform_device *pp2_sysfs;
+
+/*
+platform_device used in mv_pp2_all_ports_probe only for debug
+*/
+
+struct platform_device *plats[MV_ETH_MAX_PORTS];
+
+/* Temporary implementation for SWF to HWF transition */
+static void *sync_head;
+static u32   sync_rx_desc;
+
+/* Global device used for global cache operation */
+static struct device *global_dev;
+
+static inline int mv_pp2_tx_policy(struct eth_port *pp, struct sk_buff *skb);
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+int mv_ctrl_pp2_recycle = CONFIG_MV_PP2_SKB_RECYCLE_DEF;
+EXPORT_SYMBOL(mv_ctrl_pp2_recycle);
+
+int mv_pp2_eth_ctrl_recycle(int en)
+{
+	mv_ctrl_pp2_recycle = en;
+	return 0;
+}
+#else
+int mv_pp2_eth_ctrl_recycle(int en)
+{
+	printk(KERN_ERR "SKB recycle is not supported\n");
+	return 1;
+}
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+struct bm_pool mv_pp2_pool[MV_ETH_BM_POOLS];
+struct eth_port **mv_pp2_ports;
+struct aggr_tx_queue *aggr_txqs;
+EXPORT_SYMBOL(aggr_txqs);
+
+int mv_ctrl_pp2_txdone = CONFIG_MV_PP2_TXDONE_COAL_PKTS;
+EXPORT_SYMBOL(mv_ctrl_pp2_txdone);
+
+unsigned int mv_pp2_pnc_ctrl_en = 1;
+
+/*
+ * Static declarations
+ */
+static int mv_pp2_ports_num;
+
+static int mv_pp2_initialized;
+
+static struct tasklet_struct link_tasklet;
+
+static int wol_ports_bmp;
+
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+static unsigned int mv_pp2_tx_done_hrtimer_period_us = CONFIG_MV_PP2_TX_DONE_HIGH_RES_TIMER_PERIOD;
+#endif
+
+/*
+ * Local functions
+ */
+static void mv_pp2_txq_delete(struct eth_port *pp, struct tx_queue *txq_ctrl);
+static void mv_pp2_tx_timeout(struct net_device *dev);
+static int  mv_pp2_tx(struct sk_buff *skb, struct net_device *dev);
+static void mv_pp2_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct aggr_tx_queue *aggr_txq_ctrl,
+		struct tx_queue *txq_ctrl, struct mv_pp2_tx_spec *tx_spec);
+static void mv_pp2_config_show(void);
+static int  mv_pp2_priv_init(struct eth_port *pp, int port);
+static void mv_pp2_priv_cleanup(struct eth_port *pp);
+static int  mv_pp2_config_get(struct platform_device *pdev, u8 *mac);
+static int  mv_pp2_hal_init(struct eth_port *pp);
+struct net_device *mv_pp2_netdev_init(struct platform_device *pdev);
+static int mv_pp2_netdev_connect(struct eth_port *pp);
+static void mv_pp2_netdev_init_features(struct net_device *dev);
+static struct sk_buff *mv_pp2_skb_alloc(struct eth_port *pp, struct bm_pool *pool,
+					phys_addr_t *phys_addr, gfp_t gfp_mask);
+static MV_STATUS mv_pp2_pool_create(int pool, int capacity);
+static int mv_pp2_pool_add(struct eth_port *pp, int pool, int buf_num);
+static int mv_pp2_pool_free(int pool, int num);
+static int mv_pp2_pool_destroy(int pool);
+static struct bm_pool *mv_pp2_pool_use(struct eth_port *pp, int pool, enum mv_pp2_bm_type type, int pkt_size);
+#ifdef CONFIG_MV_PP2_TSO
+static int mv_pp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mv_pp2_tx_spec *tx_spec,
+			 struct tx_queue *txq_ctrl, struct aggr_tx_queue *aggr_txq_ctrl);
+#endif
+
+#if defined(CONFIG_NETMAP) || defined(CONFIG_NETMAP_MODULE)
+#include <mv_pp2_netmap.h>
+#endif
+
+void mv_pp2_ctrl_pnc(int en)
+{
+	mv_pp2_pnc_ctrl_en = en;
+}
+
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+unsigned int mv_pp2_tx_done_hrtimer_period_get(void)
+{
+	return mv_pp2_tx_done_hrtimer_period_us;
+}
+
+int mv_pp2_tx_done_hrtimer_period_set(unsigned int period)
+{
+	if ((period < MV_PP2_HRTIMER_PERIOD_MIN) || (period > MV_PP2_HRTIMER_PERIOD_MAX)) {
+		pr_info("period should be in [%lu, %lu]\n", MV_PP2_HRTIMER_PERIOD_MIN, MV_PP2_HRTIMER_PERIOD_MAX);
+		return -EINVAL;
+	}
+
+	mv_pp2_tx_done_hrtimer_period_us = period;
+	return 0;
+}
+#endif
+
+/*****************************************
+ *          Adaptive coalescing          *
+ *****************************************/
+static void mv_pp2_adaptive_rx_update(struct eth_port *pp)
+{
+	unsigned long period = jiffies - pp->rx_timestamp;
+
+	if (period >= (pp->rate_sample_cfg * HZ)) {
+		int i;
+		unsigned long rate = pp->rx_rate_pkts * HZ / period;
+
+		if (rate < pp->pkt_rate_low_cfg) {
+			if (pp->rate_current != 1) {
+				pp->rate_current = 1;
+				for (i = 0; i < CONFIG_MV_PP2_RXQ; i++) {
+					mv_pp2_rx_time_coal_set(pp->port, i, pp->rx_time_low_coal_cfg);
+					mv_pp2_rx_ptks_coal_set(pp->port, i, pp->rx_pkts_low_coal_cfg);
+				}
+			}
+		} else if (rate > pp->pkt_rate_high_cfg) {
+			if (pp->rate_current != 3) {
+				pp->rate_current = 3;
+				for (i = 0; i < CONFIG_MV_PP2_RXQ; i++) {
+					mv_pp2_rx_time_coal_set(pp->port, i, pp->rx_time_high_coal_cfg);
+					mv_pp2_rx_ptks_coal_set(pp->port, i, pp->rx_pkts_high_coal_cfg);
+				}
+			}
+		} else {
+			if (pp->rate_current != 2) {
+				pp->rate_current = 2;
+				for (i = 0; i < CONFIG_MV_PP2_RXQ; i++) {
+					mv_pp2_rx_time_coal_set(pp->port, i, pp->rx_time_coal_cfg);
+					mv_pp2_rx_ptks_coal_set(pp->port, i, pp->rx_pkts_coal_cfg);
+				}
+			}
+		}
+
+		pp->rx_rate_pkts = 0;
+		pp->rx_timestamp = jiffies;
+	}
+}
+
+/*****************************************
+ *            MUX function                *
+ *****************************************/
+static int mv_pp2_tag_type_set(int port, int type)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if ((type == MV_TAG_TYPE_MH) || (type == MV_TAG_TYPE_DSA) || (type == MV_TAG_TYPE_EDSA))
+		mvPp2MhSet(port, type);
+
+	pp->tagged = (type == MV_TAG_TYPE_NONE) ? MV_FALSE : MV_TRUE;
+
+	return 0;
+}
+/*****************************************
+ *            NAPI Group API             *
+ *****************************************/
+/* Add/update a new empty napi_group */
+int mv_pp2_port_napi_group_create(int port, int group)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct napi_group_ctrl *napi_group;
+
+	if ((group < 0) || (group >= MV_ETH_MAX_NAPI_GROUPS)) {
+		printk(KERN_ERR "%s: invalid napi group number - %d\n", __func__, group);
+		return 1;
+	}
+
+	napi_group = pp->napi_group[group];
+	if (napi_group) {
+		printk(KERN_ERR "%s: group already exist - %d\n", __func__, group);
+		return 1;
+	}
+
+	napi_group = mvOsMalloc(sizeof(struct napi_group_ctrl));
+	if (!napi_group)
+		return 1;
+
+	napi_group->napi = kmalloc(sizeof(struct napi_struct), GFP_KERNEL);
+	if (!napi_group->napi) {
+		mvOsFree(napi_group);
+		return 1;
+	}
+
+	memset(napi_group->napi, 0, sizeof(struct napi_struct));
+	netif_napi_add(pp->dev, napi_group->napi, mv_pp2_poll, pp->weight);
+	pp->napi_group[group] = napi_group;
+	napi_group->id = group;
+
+	return 0;
+}
+
+/* Delete napi_group */
+int mv_pp2_port_napi_group_delete(int port, int group)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct napi_group_ctrl *napi_group;
+
+	if ((group < 0) || (group >= MV_ETH_MAX_NAPI_GROUPS)) {
+		printk(KERN_ERR "%s: invalid napi group number - %d\n", __func__, group);
+		return 1;
+	}
+
+	napi_group = pp->napi_group[group];
+	if (!napi_group)
+		return 1;
+
+	if ((napi_group->cpu_mask != 0) || (napi_group->rxq_mask != 0)) {
+		printk(KERN_ERR "%s: group %d still has cpus/rxqs - cpus=0x%02x  rxqs=0x%04x\n", __func__,
+			group, napi_group->cpu_mask, napi_group->rxq_mask);
+		return 1;
+	}
+
+	netif_napi_del(napi_group->napi);
+	mvOsFree(napi_group->napi);
+	mvOsFree(napi_group);
+	pp->napi_group[group] = NULL;
+
+	return 0;
+}
+
+int mv_pp2_napi_set_cpu_affinity(int port, int group, int cpu_mask)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct napi_group_ctrl *napi_group;
+	int i, cpu;
+
+	if (pp == NULL) {
+		printk(KERN_ERR "%s: pp is null \n", __func__);
+		return MV_FAIL;
+	}
+	if ((group < 0) || (group >= MV_ETH_MAX_NAPI_GROUPS)) {
+		printk(KERN_ERR "%s: group number is higher than %d\n", __func__, MV_ETH_MAX_NAPI_GROUPS - 1);
+		return -1;
+	}
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "%s: port %d must be stopped\n", __func__, port);
+		return -EINVAL;
+	}
+
+	/* check that cpu_mask doesn't have cpu that belong to other group */
+	for (i = 0; i < MV_ETH_MAX_NAPI_GROUPS; i++) {
+		napi_group = pp->napi_group[i];
+		if ((!napi_group) || (i == group))
+			continue;
+
+		if (napi_group->cpu_mask & cpu_mask) {
+			printk(KERN_ERR "%s: cpus mask contains cpu that is already in other group(%d) - %d\n",
+				__func__, i, cpu_mask);
+			return MV_FAIL;
+		}
+	}
+
+	napi_group = pp->napi_group[group];
+	if (napi_group == NULL) {
+		printk(KERN_ERR "%s: napi group #%d doesn't exist\n", __func__, group);
+		return MV_FAIL;
+	}
+
+	/* update group's CPU mask - remove old CPUs using this group */
+	for_each_possible_cpu(cpu)
+		if ((1 << cpu) & napi_group->cpu_mask)
+			pp->cpu_config[cpu]->napi_group = NULL;
+
+	napi_group->cpu_mask = cpu_mask;
+	napi_group->cause_rx_tx = 0;
+
+	for_each_possible_cpu(cpu)
+		if ((1 << cpu) & cpu_mask)
+			pp->cpu_config[cpu]->napi_group = napi_group;
+
+	return 0;
+}
+
+int mv_pp2_eth_napi_set_rxq_affinity(int port, int group, int rxq_mask)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct napi_group_ctrl *napi_group;
+	int i;
+
+	if (pp == NULL) {
+		printk(KERN_ERR "%s: pp is null \n", __func__);
+		return MV_FAIL;
+	}
+	if ((group < 0) || (group >= MV_ETH_MAX_NAPI_GROUPS)) {
+		printk(KERN_ERR "%s: group number is higher than %d\n", __func__, MV_ETH_MAX_NAPI_GROUPS - 1);
+		return -1;
+	}
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "%s: port %d must be stopped\n", __func__, port);
+		return -EINVAL;
+	}
+
+	/* check that rxq_mask doesn't have rxq that belong to other group */
+	for (i = 0; i < MV_ETH_MAX_NAPI_GROUPS; i++) {
+		napi_group = pp->napi_group[i];
+		if ((!napi_group) || (i == group))
+			continue;
+
+		if (napi_group->rxq_mask & rxq_mask) {
+			printk(KERN_ERR "%s: rxqs/cpus mask contains rxq that is already in other group(%d) - %d\n",
+				__func__, i, rxq_mask);
+			return MV_FAIL;
+		}
+	}
+
+	napi_group = pp->napi_group[group];
+	if (napi_group == NULL) {
+		printk(KERN_ERR "%s: napi group #%d doesn't exist\n", __func__, group);
+		return MV_FAIL;
+	}
+
+	napi_group->rxq_mask = rxq_mask;
+	napi_group->cause_rx_tx = 0;
+
+	return 0;
+}
+
+/**********************************************************/
+
+struct eth_port *mv_pp2_port_by_id(unsigned int port)
+{
+	if (mv_pp2_ports && (port < mv_pp2_ports_num))
+		return mv_pp2_ports[port];
+
+	return NULL;
+}
+
+/* return the first port in port_mask that is up, or -1 if all ports are down */
+static int mv_pp2_port_up_get(unsigned int port_mask)
+{
+	int port;
+	struct eth_port *pp;
+
+	for (port = 0; port < mv_pp2_ports_num; port++) {
+		if (!((1 << port) & port_mask))
+			continue;
+
+		pp = mv_pp2_port_by_id(port);
+		if (pp == NULL)
+			continue;
+
+		if (pp->flags & MV_ETH_F_STARTED)
+			return port;
+	}
+
+	return -1;
+}
+
+static inline int mv_pp2_skb_mh_add(struct sk_buff *skb, u16 mh)
+{
+       /* sanity: Check that there is place for MH in the buffer */
+       if (skb_headroom(skb) < MV_ETH_MH_SIZE) {
+		printk(KERN_ERR "%s: skb (%p) doesn't have place for MH, head=%p, data=%p\n",
+		__func__, skb, skb->head, skb->data);
+		return 1;
+	}
+
+	/* Prepare place for MH header */
+	skb->len += MV_ETH_MH_SIZE;
+	skb->data -= MV_ETH_MH_SIZE;
+	*((u16 *) skb->data) = mh;
+
+	return 0;
+}
+
+static inline int mv_pp2_mh_skb_skip(struct sk_buff *skb)
+{
+	__skb_pull(skb, MV_ETH_MH_SIZE);
+	return MV_ETH_MH_SIZE;
+}
+
+void mv_pp2_ctrl_txdone(int num)
+{
+	mv_ctrl_pp2_txdone = num;
+}
+
+int mv_pp2_ctrl_tx_flag(int port, u32 flag, u32 val)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	u32 bit_flag = (fls(flag) - 1);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (val)
+		set_bit(bit_flag, &(pp->tx_spec.flags));
+	else
+		clear_bit(bit_flag, &(pp->tx_spec.flags));
+
+	return 0;
+}
+
+int mv_pp2_ctrl_flag(int port, u32 flag, u32 val)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	u32 bit_flag = (fls(flag) - 1);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (val)
+		set_bit(bit_flag, &(pp->flags));
+	else
+		clear_bit(bit_flag, &(pp->flags));
+
+	return 0;
+}
+
+int mv_pp2_ctrl_dbg_flag(int port, u32 flag, u32 val)
+{
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	u32 bit_flag = (fls(flag) - 1);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (val)
+		pp->dbg_flags |= (1 << bit_flag);
+	else
+		pp->dbg_flags &= ~(1 << bit_flag);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	return 0;
+}
+
+/* mv_pp2_ctrl_pool_port_map_get					*
+ *     - Return ports map use this BM pool			*/
+int mv_pp2_ctrl_pool_port_map_get(int pool)
+{
+	struct bm_pool *ppool;
+
+	if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
+		pr_err("%s: Invalid pool number (%d)\n", __func__, pool);
+		return -1;
+	}
+
+	ppool = &mv_pp2_pool[pool];
+	if (ppool == NULL) {
+		pr_err("%s: BM pool %d is not initialized\n", __func__, pool);
+		return -1;
+	}
+	return ppool->port_map;
+}
+
+/* mv_pp2_ctrl_pool_buf_num_set					*
+ *     - Set number of buffers for BM pool			*
+ *     - Add or remove buffers to this pool accordingly		*/
+int mv_pp2_ctrl_pool_buf_num_set(int port, int pool, int buf_num)
+{
+	unsigned long flags = 0;
+	struct bm_pool *ppool;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
+		pr_err("%s: Invalid pool number (%d)\n", __func__, pool);
+		return -1;
+	}
+
+	ppool = &mv_pp2_pool[pool];
+	if (ppool == NULL) {
+		pr_err("%s: BM pool %d is not initialized\n", __func__, pool);
+		return -1;
+	}
+
+	MV_ETH_LOCK(&ppool->lock, flags);
+	if (ppool->buf_num > buf_num)
+		mv_pp2_pool_free(pool, ppool->buf_num - buf_num);
+	else
+		mv_pp2_pool_add(pp, pool, buf_num - ppool->buf_num);
+
+	MV_ETH_UNLOCK(&ppool->lock, flags);
+
+	return 0;
+}
+
+/* mv_pp2_ctrl_pool_size_set				*
+ *     - Set buffer size for BM pool			*
+ *     - All ports using this pool must be stopped	*
+ *     - Re-allocate all buffers			*/
+int mv_pp2_ctrl_pool_size_set(int pool, int total_size)
+{
+	unsigned long flags = 0;
+	struct eth_port *pp;
+	struct bm_pool *ppool = &mv_pp2_pool[pool];
+	int port, pkt_size, buf_size, pkts_num;
+
+	port = mv_pp2_port_up_get(ppool->port_map);
+	if (port != -1) {
+		pr_err("%s: Can't change pool %d buffer size, while port %d is up\n",
+			__func__, pool, port);
+		return -EINVAL;
+	}
+
+	if (MV_ETH_BM_POOL_IS_HWF(ppool->type)) {
+		pkt_size = RX_HWF_MAX_PKT_SIZE(total_size);
+		buf_size = RX_HWF_BUF_SIZE(pkt_size);
+	} else {
+		pkt_size = RX_MAX_PKT_SIZE(total_size);
+		buf_size = RX_BUF_SIZE(pkt_size);
+	}
+
+
+	for (port = 0; port < mv_pp2_ports_num; port++) {
+		if (!((1 << port) & ppool->port_map))
+			continue;
+
+		pp = mv_pp2_port_by_id(port);
+		if (pp == NULL)
+			continue;
+
+		/* If this pool is used as long pool, then it is expected that MTU will be smaller than buffer size */
+		if (MV_ETH_BM_POOL_IS_LONG(ppool->type) && (RX_PKT_SIZE(pp->dev->mtu) > pkt_size))
+			pr_warn("%s: port %d MTU (%d) is larger than requested packet size (%d) [total size = %d]\n",
+				__func__, port, RX_PKT_SIZE(pp->dev->mtu), pkt_size, total_size);
+	}
+
+	MV_ETH_LOCK(&ppool->lock, flags);
+	pkts_num = ppool->buf_num;
+	mv_pp2_pool_free(pool, pkts_num);
+	ppool->pkt_size = pkt_size;
+	mv_pp2_pool_add(NULL, pool, pkts_num);
+	mvBmPoolBufSizeSet(pool, buf_size);
+	MV_ETH_UNLOCK(&ppool->lock, flags);
+
+	pr_info("%s: BM pool %d:\n", __func__, pool);
+	pr_info("       packet size = %d, buffer size = %d, total bytes per buffer = %d, true buffer size = %d\n",
+		pkt_size, buf_size, total_size, (int)RX_TRUE_SIZE(total_size));
+
+	return 0;
+}
+
+/* detach port from old pool */
+int mv_pp2_ctrl_pool_detach(int port, struct bm_pool *pool)
+{
+	unsigned long flags = 0;
+	/*TODO remove struct bm_pool *pool;*/
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if (pool == NULL) {
+		pr_err("%s: pool is null\n" , __func__);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+
+	pool->port_map &= ~(1 << port);
+
+	if (!pool->port_map) {
+		MV_ETH_LOCK(&pool->lock, flags);
+		mv_pp2_pool_free(pool->pool, pool->buf_num);
+
+		pool->type = MV_ETH_BM_FREE;
+		pool->pkt_size = 0;
+
+		mvPp2BmPoolBufSizeSet(pool->pool, 0);
+		MV_ETH_UNLOCK(&pool->lock, flags);
+	}
+
+	return MV_OK;
+}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+static int mv_pp2_hwf_long_pool_attach(int port, int pool)
+{
+	int txp, txq;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+			mvPp2TxqBmLongPoolSet(port, txp, txq, pool);
+
+	return MV_OK;
+}
+
+static int mv_pp2_hwf_short_pool_attach(int port, int pool)
+{
+	int txp, txq;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+			mvPp2TxqBmShortPoolSet(port, txp, txq, pool);
+
+	return MV_OK;
+}
+
+#else
+
+/* Init classifer MTU */
+/* the same MTU for all Ports/Queues */
+static int mv_pp2_tx_mtu_set(int port, int mtu)
+{
+	int txp;
+
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++)
+		mvPp2V0ClsHwMtuSet(MV_PPV2_PORT_PHYS(port), txp, RX_PKT_SIZE(mtu));
+
+	return MV_OK;
+
+}
+
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+int mv_pp2_ctrl_long_pool_set(int port, int pool)
+{
+	unsigned long flags = 0;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct bm_pool *old_pool;
+	int rxq, pkt_size = RX_PKT_SIZE(pp->dev->mtu);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+	old_pool = pp->pool_long;
+	if (old_pool) {
+		if (old_pool->pool == pool)
+			return 0;
+
+		if (pp->hwf_pool_long != pp->pool_long)
+			if (mv_pp2_ctrl_pool_detach(port, old_pool))
+				return -EINVAL;
+	}
+
+	pp->pool_long = mv_pp2_pool_use(pp, pool, MV_ETH_BM_SWF_LONG, pkt_size);
+	if (!pp->pool_long)
+		return -EINVAL;
+	MV_ETH_LOCK(&pp->pool_long->lock, flags);
+	pp->pool_long->port_map |= (1 << port);
+	MV_ETH_UNLOCK(&pp->pool_long->lock, flags);
+
+	for (rxq = 0; rxq < pp->rxq_num; rxq++)
+		mvPp2RxqBmLongPoolSet(port, rxq, pp->pool_long->pool);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_short_pool_set(int port, int pool)
+{
+	unsigned long flags = 0;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct bm_pool *old_pool;
+	int rxq;
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+	old_pool = pp->pool_short;
+	if (old_pool) {
+		if (old_pool->pool == pool)
+			return 0;
+
+		if (pp->hwf_pool_short != pp->pool_short)
+			if (mv_pp2_ctrl_pool_detach(port, old_pool))
+				return -EINVAL;
+	}
+
+	pp->pool_short = mv_pp2_pool_use(pp, pool, MV_ETH_BM_SWF_SHORT, MV_ETH_BM_SHORT_PKT_SIZE);
+	if (!pp->pool_short)
+		return -EINVAL;
+	MV_ETH_LOCK(&pp->pool_short->lock, flags);
+	pp->pool_short->port_map |= (1 << port);
+	MV_ETH_UNLOCK(&pp->pool_short->lock, flags);
+
+	for (rxq = 0; rxq < pp->rxq_num; rxq++)
+		mvPp2RxqBmShortPoolSet(port, rxq, pp->pool_short->pool);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_hwf_long_pool_set(int port, int pool)
+{
+	unsigned long flags = 0;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct bm_pool *old_pool;
+	int pkt_size = RX_PKT_SIZE(pp->dev->mtu);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+	old_pool = pp->hwf_pool_long;
+	if (old_pool) {
+		if (old_pool->pool == pool)
+			return 0;
+
+		if (pp->hwf_pool_long != pp->pool_long)
+			if (mv_pp2_ctrl_pool_detach(port, old_pool))
+				return -EINVAL;
+	}
+
+	pp->hwf_pool_long = mv_pp2_pool_use(pp, pool, MV_ETH_BM_HWF_LONG, pkt_size);
+	if (!pp->hwf_pool_long)
+		return -EINVAL;
+	MV_ETH_LOCK(&pp->hwf_pool_long->lock, flags);
+	pp->hwf_pool_long->port_map |= (1 << port);
+	MV_ETH_UNLOCK(&pp->hwf_pool_long->lock, flags);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mv_pp2_hwf_long_pool_attach(pp->port, pp->hwf_pool_long->pool);
+#else
+	mvPp2PortHwfBmPoolSet(pp->port, pp->hwf_pool_short->pool, pp->hwf_pool_long->pool);
+#endif
+
+	return 0;
+}
+
+int mv_pp2_ctrl_hwf_short_pool_set(int port, int pool)
+{
+	unsigned long flags = 0;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct bm_pool *old_pool;
+
+	if (pp == NULL) {
+		pr_err("%s: port %d does not exist\n" , __func__, port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("%s: port %d must be stopped before\n", __func__, port);
+		return -EINVAL;
+	}
+
+	old_pool = pp->hwf_pool_short;
+	if (old_pool) {
+		if (old_pool->pool == pool)
+			return 0;
+
+		if (pp->hwf_pool_short != pp->pool_short)
+			if (mv_pp2_ctrl_pool_detach(port, old_pool))
+				return -EINVAL;
+	}
+	pp->hwf_pool_short = mv_pp2_pool_use(pp, pool, MV_ETH_BM_HWF_SHORT, MV_ETH_BM_SHORT_HWF_PKT_SIZE);
+	if (!pp->hwf_pool_short)
+		return -EINVAL;
+	MV_ETH_LOCK(&pp->hwf_pool_short->lock, flags);
+	pp->hwf_pool_short->port_map |= (1 << port);
+	MV_ETH_UNLOCK(&pp->hwf_pool_short->lock, flags);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mv_pp2_hwf_short_pool_attach(pp->port, pp->hwf_pool_short->pool);
+#else
+	mvPp2PortHwfBmPoolSet(pp->port, pp->hwf_pool_short->pool, pp->hwf_pool_long->pool);
+#endif
+
+	return 0;
+}
+
+int mv_pp2_ctrl_set_poll_rx_weight(int port, u32 weight)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	int i;
+
+	if (pp == NULL) {
+		printk(KERN_INFO "port doens not exist (%d) in %s\n" , port, __func__);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	if (weight > 255)
+		weight = 255;
+	pp->weight = weight;
+
+	for (i = 0; i < MV_ETH_MAX_NAPI_GROUPS; i++) {
+		if (!pp->napi_group[i])
+			continue;
+		pp->napi_group[i]->napi->weight = pp->weight;
+	}
+
+	return 0;
+}
+
+int mv_pp2_ctrl_rxq_size_set(int port, int rxq, int value)
+{
+	struct eth_port *pp;
+	struct rx_queue	*rxq_ctrl;
+
+	if (mvPp2PortCheck(port))
+		return -EINVAL;
+
+	if (mvPp2MaxCheck(rxq, CONFIG_MV_PP2_RXQ, "rxq"))
+		return -EINVAL;
+
+	if ((value <= 0) || (value > 0x3FFF) || (value % 16)) {
+		pr_err("Invalid RXQ size %d\n", value);
+		return -EINVAL;
+	}
+
+	pp = mv_pp2_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	if (value % 16 != 0) {
+		printk(KERN_ERR "invalid rxq size\n");
+		return -EINVAL;
+	}
+
+	rxq_ctrl = &pp->rxq_ctrl[rxq];
+	if ((rxq_ctrl->q) && (rxq_ctrl->rxq_size != value)) {
+		/* Reset is required when RXQ ring size is changed */
+		mvPp2RxqReset(pp->port, rxq);
+		mvPp2RxqDelete(pp->port, rxq);
+		rxq_ctrl->q = NULL;
+	}
+	pp->rxq_ctrl[rxq].rxq_size = value;
+
+	/* New RXQ will be created during mv_pp2_start_internals */
+	return 0;
+}
+
+static int mv_pp2_txq_size_validate(struct tx_queue *txq_ctrl, int txq_size)
+{
+	int txq_min_size, txq_max_size = MV_PP2_TXQ_DESC_SIZE_MASK;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	txq_min_size = 3 * (nr_cpu_ids * txq_ctrl->rsvd_chunk);
+#else
+	/* At least 16 descriptors per CPU */
+	txq_min_size = txq_ctrl->hwf_size + 16 * nr_cpu_ids;
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+	if ((txq_size < txq_min_size) || (txq_size > txq_max_size)) {
+		pr_err("Invalid TXQ size %d. Valid range: %d .. %d\n",
+			txq_size, txq_min_size, txq_max_size);
+		return -EINVAL;
+	}
+	/* txq_size must be aligned to 16 bytes */
+	if (txq_size % (1 << MV_PP2_TXQ_DESC_SIZE_OFFSET)) {
+		pr_warn("txq_size %d is not aligned %d, rounded to %d\n",
+			txq_size, 1 << MV_PP2_TXQ_DESC_SIZE_OFFSET,
+			txq_size & MV_PP2_TXQ_DESC_SIZE_MASK);
+		txq_size = txq_size & MV_PP2_TXQ_DESC_SIZE_MASK;
+	}
+	return txq_size;
+}
+
+static void mv_pp2_txq_size_set(struct tx_queue *txq_ctrl, int txq_size)
+{
+	int cpu;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+
+	txq_ctrl->txq_size = txq_size;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	txq_ctrl->hwf_size = txq_ctrl->txq_size - (nr_cpu_ids * txq_ctrl->rsvd_chunk);
+	txq_ctrl->swf_size = txq_ctrl->txq_size - 2 * (nr_cpu_ids * txq_ctrl->rsvd_chunk);
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+
+		txq_cpu_ptr->txq_size = txq_ctrl->txq_size;
+	}
+#else
+	txq_ctrl->hwf_size = CONFIG_MV_PP2_TXQ_HWF_DESC;
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+
+		txq_cpu_ptr->txq_size = (txq_ctrl->txq_size - txq_ctrl->hwf_size) / nr_cpu_ids;
+	}
+#endif /* CONFIG_MV_ETH_PP2_1 */
+}
+
+/* set <txp/txq> SWF request chunk size */
+int mv_pp2_ctrl_txq_chunk_set(int port, int txp, int txq, int chunk_size)
+{
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		printk(KERN_INFO "port does not exist (%d) in %s\n" , port, __func__);
+		return -EINVAL;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+	if (!txq_ctrl) {
+		printk(KERN_INFO "queue does not exist (%d) in %s\n" , port, __func__);
+		return -EINVAL;
+	}
+	/* chunk_size must be less than swf_size */
+	if (chunk_size > (txq_ctrl->swf_size)) {
+		pr_err("Chunk size %d must be less or equal than swf size %d\n",
+			chunk_size, txq_ctrl->swf_size);
+		return -EINVAL;
+	}
+	txq_ctrl->rsvd_chunk = chunk_size;
+
+	return MV_OK;
+}
+
+/* swf_size is in use only in ppv2.1, ignored in ppv2.0 */
+int mv_pp2_ctrl_txq_limits_set(int port, int txp, int txq, int hwf_size, int swf_size)
+{
+	int txq_size;
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	if (mvPp2MaxCheck(txq, CONFIG_MV_PP2_TXQ, "txq"))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("port does not exist (%d) in %s\n" , port, __func__);
+		return -EINVAL;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+
+	if (!txq_ctrl) {
+		pr_err("queue is null %s\n", __func__);
+		return -EINVAL;
+	}
+
+	txq_size = txq_ctrl->txq_size;
+
+	if (txq_size < hwf_size) {
+		pr_err("invalid hwf size, must be less or equal to txq size (%d)\n", txq_size);
+		return -EINVAL;
+	}
+
+	if (hwf_size % 16 != 0) {
+		pr_err("invalid hwf size, must be aligned to 16\n");
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	if (hwf_size < swf_size) {
+		pr_err("Invalid size params, swf size must be less than hwf size\n");
+		return -EINVAL;
+	}
+	txq_ctrl->swf_size = swf_size;
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+	txq_ctrl->hwf_size = hwf_size;
+
+	mvPp2TxqHwfSizeSet(port, txp, txq, hwf_size);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_txq_size_set(int port, int txp, int txq, int txq_size)
+{
+	struct tx_queue *txq_ctrl;
+	struct eth_port *pp;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	if (mvPp2MaxCheck(txq, CONFIG_MV_PP2_TXQ, "txq"))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if (pp == NULL) {
+		pr_err("Port %d does not exist\n", port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		pr_err("Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+	if (!txq_ctrl) {
+		pr_err("TXQ is not exist\n");
+		return -EINVAL;
+	}
+
+	txq_size = mv_pp2_txq_size_validate(txq_ctrl, txq_size);
+	if (txq_size < 0)
+		return -EINVAL;
+
+	if ((txq_ctrl->q) && (txq_ctrl->txq_size != txq_size)) {
+		/* Clean and Reset of txq is required when TXQ ring size is changed */
+		mv_pp2_txq_clean(port, txp, txq);
+
+		/* TBD: If needed to send dummy packets to reset number of descriptors reserved by all CPUs */
+
+		mvPp2TxqReset(port, txp, txq);
+		mv_pp2_txq_delete(pp, txq_ctrl);
+	}
+	mv_pp2_txq_size_set(txq_ctrl, txq_size);
+
+	/* New TXQ will be created during mv_eth_start_internals */
+	return 0;
+}
+
+
+/* Set TXQ for CPU originated packets */
+int mv_pp2_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if ((cpu >= nr_cpu_ids) || (cpu < 0)) {
+		printk(KERN_ERR "cpu #%d is out of range: from 0 to %d\n",
+			cpu, nr_cpu_ids - 1);
+		return -EINVAL;
+	}
+
+	if (txq >= CONFIG_MV_PP2_TXQ) {
+		pr_err("txq #%d is out of range: from 0 to %d\n", txq, CONFIG_MV_PP2_TXQ - 1);
+		return -EINVAL;
+	}
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	pp->tx_spec.txp = txp;
+	pp->cpu_config[cpu]->txq = txq;
+
+	return 0;
+}
+
+int mv_pp2_eth_ctrl_tx_mh(int port, u16 mh)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.tx_mh = mh;
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_dsa(int port, u16 dsa_tag)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.hw_cmd[0] &= ~PP2_TX_DSA_ALL_MASK;
+	pp->tx_spec.hw_cmd[0] |= PP2_TX_DSA_MASK(dsa_tag);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_color(int port, u16 color)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.hw_cmd[0] &= ~PP2_TX_COLOR_ALL_MASK;
+	pp->tx_spec.hw_cmd[0] |= PP2_TX_COLOR_MASK(color);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_gem_id(int port, u16 gem_port_id)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.hw_cmd[0] &= ~PP2_TX_GEMPID_ALL_MASK;
+	pp->tx_spec.hw_cmd[0] |= PP2_TX_GEMPID_MASK(gem_port_id);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_pon_fec(int port, u16 pon_fec)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (pon_fec == 0)
+		pp->tx_spec.hw_cmd[2] &= ~PP2_TX_PON_FEC_MASK;
+	else
+		pp->tx_spec.hw_cmd[2] |= PP2_TX_PON_FEC_MASK;
+
+	return 0;
+}
+
+int mv_eth_ctrl_tx_cmd_gem_oem(int port, u16 gem_oem)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	if (gem_oem == 0)
+		pp->tx_spec.hw_cmd[2] &= ~PP2_TX_GEM_OEM_MASK;
+	else
+		pp->tx_spec.hw_cmd[2] |= PP2_TX_GEM_OEM_MASK;
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_mod(int port, u16 mod)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	u32 mask = (PP2_TX_MOD_DSCP_MASK | PP2_TX_MOD_PRIO_MASK | PP2_TX_MOD_DSCP_EN_MASK
+			| PP2_TX_MOD_PRIO_EN_MASK | PP2_TX_MOD_GEMPID_EN_MASK);
+
+	if (!pp)
+		return -ENODEV;
+
+	/* This command update all fields in the TX descriptor offset 0x14 */
+	/* MOD_DSCP - 6 bits, MOD_PRIO - 3bits, MOD_DSCP_EN - 1b, MOD_PRIO_EN - 1b, MOD_GEMPID_EN - 1b */
+	pp->tx_spec.hw_cmd[1] &= ~mask;
+	pp->tx_spec.hw_cmd[1] |= ((mod << PP2_TX_MOD_DSCP_OFFS) & mask);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_pme_dptr(int port, u16 pme_dptr)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.hw_cmd[2] &= ~PP2_TX_PME_DPTR_ALL_MASK;
+	pp->tx_spec.hw_cmd[2] |= PP2_TX_PME_DPTR_MASK(pme_dptr);
+
+	return 0;
+}
+
+int mv_pp2_ctrl_tx_cmd_pme_prog(int port, u16 pme_prog)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (!pp)
+		return -ENODEV;
+
+	pp->tx_spec.hw_cmd[2] &= ~PP2_TX_PME_IPTR_ALL_MASK;
+	pp->tx_spec.hw_cmd[2] |= PP2_TX_PME_IPTR_MASK(pme_prog);
+
+	return 0;
+}
+
+
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+/* Register special transmit check function */
+void mv_pp2_tx_special_check_func(int port,
+					int (*func)(int port, struct net_device *dev, struct sk_buff *skb,
+								struct mv_pp2_tx_spec *tx_spec_out))
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp)
+		pp->tx_special_check = func;
+}
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+/* Register special transmit check function */
+void mv_pp2_rx_special_proc_func(int port, int (*func)(int port, int rxq, struct net_device *dev,
+							struct sk_buff *skb, struct pp2_rx_desc *rx_desc))
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp)
+		pp->rx_special_proc = func;
+}
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+static inline u16 mv_pp2_select_txq(struct net_device *dev, struct sk_buff *skb)
+{
+	return smp_processor_id();
+}
+
+void mv_pp2_eth_link_status_print(int port)
+{
+	MV_ETH_PORT_STATUS link;
+
+#ifdef CONFIG_MV_PP2_PON
+	if (MV_PP2_IS_PON_PORT(port))
+		mv_pon_link_status(&link);
+	else
+#endif /* CONFIG_MV_PP2_PON */
+		mvGmacLinkStatus(port, &link);
+
+	if (link.linkup) {
+		printk(KERN_CONT "link up");
+		printk(KERN_CONT ", %s duplex", (link.duplex == MV_ETH_DUPLEX_FULL) ? "full" : "half");
+		printk(KERN_CONT ", speed ");
+
+		printk(KERN_CONT "%s\n", mvGmacSpeedStrGet(link.speed));
+	} else
+		printk(KERN_CONT "link down\n");
+
+}
+
+static void mv_pp2_rx_error(struct eth_port *pp, struct pp2_rx_desc *rx_desc)
+{
+	STAT_ERR(pp->stats.rx_error++);
+
+	if (pp->dev)
+		pp->dev->stats.rx_errors++;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if ((pp->dbg_flags & MV_ETH_F_DBG_RX) == 0)
+		return;
+
+	if (!printk_ratelimit())
+		return;
+
+	switch (rx_desc->status & PP2_RX_ERR_CODE_MASK) {
+	case PP2_RX_ERR_CRC:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (crc error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	case PP2_RX_ERR_OVERRUN:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (overrun error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	/*case NETA_RX_ERR_LEN:*/
+	case PP2_RX_ERR_RESOURCE:
+		printk(KERN_ERR "giga #%d: bad rx status %08x (resource error), size=%d\n",
+				pp->port, rx_desc->status, rx_desc->dataSize);
+		break;
+	}
+	mv_pp2_rx_desc_print(rx_desc);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+}
+
+void mv_pp2_skb_print(struct sk_buff *skb)
+{
+	printk(KERN_ERR "skb=%p: head=%p, data=%p, tail=%p, end=%p\n", skb, skb->head, skb->data, skb->tail, skb->end);
+	printk(KERN_ERR "\t mac=%p, network=%p, transport=%p\n",
+			skb->mac_header, skb->network_header, skb->transport_header);
+	printk(KERN_ERR "\t truesize=%d, len=%d, data_len=%d, mac_len=%d\n",
+		skb->truesize, skb->len, skb->data_len, skb->mac_len);
+	printk(KERN_ERR "\t users=%d, dataref=%d, nr_frags=%d, gso_size=%d, gso_segs=%d\n",
+	       atomic_read(&skb->users), atomic_read(&skb_shinfo(skb)->dataref),
+	       skb_shinfo(skb)->nr_frags, skb_shinfo(skb)->gso_size, skb_shinfo(skb)->gso_segs);
+	printk(KERN_ERR "\t proto=%d, ip_summed=%d, priority=%d\n", ntohs(skb->protocol), skb->ip_summed, skb->priority);
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+	printk(KERN_ERR "\t skb_recycle=%p, hw_cookie=0x%x\n", skb->skb_recycle, skb->hw_cookie);
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+}
+
+void mv_pp2_rx_desc_print(struct pp2_rx_desc *desc)
+{
+	int i;
+	u32 *words = (u32 *) desc;
+
+	printk(KERN_ERR "RX desc - %p: ", desc);
+	for (i = 0; i < 8; i++)
+		printk(KERN_CONT "%8.8x ", *words++);
+	printk(KERN_CONT "\n");
+
+	printk(KERN_CONT "pkt_size=%d, L3_offs=%d, IP_hlen=%d, ",
+	       desc->dataSize,
+	       (desc->status & PP2_RX_L3_OFFSET_MASK) >> PP2_RX_L3_OFFSET_OFFS,
+	       (desc->status & PP2_RX_IP_HLEN_MASK) >> PP2_RX_IP_HLEN_OFFS);
+
+	printk(KERN_CONT "L2=%s, ",
+		mvPrsL2InfoStr((desc->parserInfo & PP2_RX_L2_CAST_MASK) >> PP2_RX_L2_CAST_OFFS));
+
+	printk(KERN_CONT "VLAN=");
+	printk(KERN_CONT "%s, ",
+		mvPrsVlanInfoStr((desc->parserInfo & PP2_RX_VLAN_INFO_MASK) >> PP2_RX_VLAN_INFO_OFFS));
+
+	printk(KERN_CONT "L3=");
+	if (PP2_RX_L3_IS_IP4(desc->status))
+		printk(KERN_CONT "IPv4 (hdr=%s), ", PP2_RX_IP4_HDR_ERR(desc->status) ? "bad" : "ok");
+	else if (PP2_RX_L3_IS_IP4_OPT(desc->status))
+		printk(KERN_CONT "IPv4 Options (hdr=%s), ", PP2_RX_IP4_HDR_ERR(desc->status) ? "bad" : "ok");
+	else if (PP2_RX_L3_IS_IP4_OTHER(desc->status))
+		printk(KERN_CONT "IPv4 Other (hdr=%s), ", PP2_RX_IP4_HDR_ERR(desc->status) ? "bad" : "ok");
+	else if (PP2_RX_L3_IS_IP6(desc->status))
+		printk(KERN_CONT "IPv6, ");
+	else if (PP2_RX_L3_IS_IP6_EXT(desc->status))
+		printk(KERN_CONT "IPv6 Ext, ");
+	else
+		printk(KERN_CONT "Unknown, ");
+
+	if (desc->status & PP2_RX_IP_FRAG_MASK)
+		printk(KERN_CONT "Frag, ");
+
+	printk(KERN_CONT "L4=");
+	if (PP2_RX_L4_IS_TCP(desc->status))
+		printk(KERN_CONT "TCP (csum=%s)", (desc->status & PP2_RX_L4_CHK_OK_MASK) ? "Ok" : "Bad");
+	else if (PP2_RX_L4_IS_UDP(desc->status))
+		printk(KERN_CONT "UDP (csum=%s)", (desc->status & PP2_RX_L4_CHK_OK_MASK) ? "Ok" : "Bad");
+	else
+		printk(KERN_CONT "Unknown");
+
+	printk(KERN_CONT "\n");
+
+	printk(KERN_INFO "Lookup_ID=0x%x, cpu_code=0x%x\n",
+		(desc->parserInfo & PP2_RX_LKP_ID_MASK) >> PP2_RX_LKP_ID_OFFS,
+		(desc->parserInfo & PP2_RX_CPU_CODE_MASK) >> PP2_RX_CPU_CODE_OFFS);
+}
+EXPORT_SYMBOL(mv_pp2_rx_desc_print);
+
+void mv_pp2_tx_desc_print(struct pp2_tx_desc *desc)
+{
+	int i;
+	u32 *words = (u32 *) desc;
+
+	printk(KERN_ERR "TX desc - %p: ", desc);
+	for (i = 0; i < 8; i++)
+		printk(KERN_CONT "%8.8x ", *words++);
+	printk(KERN_CONT "\n");
+}
+EXPORT_SYMBOL(mv_pp2_tx_desc_print);
+
+void mv_pp2_pkt_print(struct eth_port *pp, struct eth_pbuf *pkt)
+{
+	printk(KERN_ERR "pkt: len=%d off=%d pool=%d "
+	       "skb=%p pa=%lx buf=%p\n",
+	       pkt->bytes, pkt->offset, pkt->pool,
+	       pkt->osInfo, pkt->physAddr, pkt->pBuf);
+
+	mvDebugMemDump(pkt->pBuf + pkt->offset, 64, 1);
+	mvOsCacheInvalidate(pp->dev->dev.parent, pkt->pBuf + pkt->offset, 64);
+}
+EXPORT_SYMBOL(mv_pp2_pkt_print);
+
+static inline int mv_pp2_tx_done_policy(u32 cause)
+{
+	return fls(cause) - 1;
+}
+
+inline int mv_pp2_rx_policy(u32 cause)
+{
+	return fls(cause) - 1;
+}
+
+static inline int mv_pp2_txq_dscp_map_get(struct eth_port *pp, MV_U8 dscp)
+{
+	MV_U8 q = pp->txq_dscp_map[dscp];
+
+	if (q == MV_ETH_TXQ_INVALID)
+		return pp->cpu_config[smp_processor_id()]->txq;
+
+	return q;
+}
+
+static inline int mv_pp2_tx_policy(struct eth_port *pp, struct sk_buff *skb)
+{
+	int txq = pp->cpu_config[smp_processor_id()]->txq;
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *iph = ip_hdr(skb);
+
+		txq = mv_pp2_txq_dscp_map_get(pp, TOS_TO_DSCP(iph->tos));
+	}
+	return txq;
+}
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+int mv_pp2_skb_recycle(struct sk_buff *skb)
+{
+	int pool, cpu;
+	__u32 bm = skb->hw_cookie;
+	phys_addr_t phys_addr;
+	struct bm_pool *ppool;
+	bool is_recyclable;
+
+	skb->hw_cookie = 0;
+	skb->skb_recycle = NULL;
+
+	cpu = mv_pp2_bm_cookie_cpu_get(bm);
+
+	pool = mv_pp2_bm_cookie_pool_get(bm);
+	if (mvPp2MaxCheck(pool, MV_ETH_BM_POOLS, "bm_pool"))
+		return 1;
+
+	ppool = &mv_pp2_pool[pool];
+
+	/*
+	WA for Linux network stack issue that prevent skb recycle.
+	If dev_kfree_skb_any called from interrupt context or interrupts disabled context
+	skb->users will be zero when skb_recycle callback function is called.
+	In such case skb_recycle_check function returns error because skb->users != 1.
+	*/
+	if (atomic_read(&skb->users) == 0)
+		atomic_set(&skb->users, 1);
+
+	if (bm & MV_ETH_BM_COOKIE_F_INVALID) {
+		/* hw_cookie is not valid for recycle */
+		STAT_DBG(ppool->stats.bm_cookie_err++);
+		is_recyclable = false;
+	} else if (!skb_recycle_check(skb, ppool->pkt_size)) {
+		STAT_DBG(ppool->stats.skb_recycled_err++);
+		is_recyclable = false;
+	} else
+		is_recyclable = true;
+
+	if (is_recyclable) {
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		/* Sanity check */
+		if (SKB_TRUESIZE(skb->end - skb->head) != skb->truesize) {
+			pr_err("%s: skb=%p, Wrong SKB_TRUESIZE(end - head)=%d\n",
+				__func__, skb, SKB_TRUESIZE(skb->end - skb->head));
+			mv_pp2_skb_print(skb);
+		}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		STAT_DBG(ppool->stats.skb_recycled_ok++);
+
+		phys_addr = dma_map_single(global_dev->parent,
+					   skb->head,
+					   RX_BUF_SIZE(ppool->pkt_size),
+					   DMA_FROM_DEVICE);
+		/*phys_addr = virt_to_phys(skb->head);*/
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+		/* Invalidate only part of the buffer used by CPU */
+		if ((ppool->type == MV_ETH_BM_MIXED_LONG) || (ppool->type == MV_ETH_BM_MIXED_SHORT))
+			mv_pp2_iocc_l1_l2_cache_inv(skb->head, skb->len + skb_headroom(skb));
+#endif /* CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA */
+	} else {
+/*
+		pr_err("%s: Failed - skb=%p, pool=%d, bm_cookie=0x%x\n",
+			__func__, skb, MV_ETH_BM_COOKIE_POOL(bm), bm.word);
+
+		mv_pp2_skb_print(skb);
+*/
+		skb = mv_pp2_skb_alloc(NULL, ppool, &phys_addr,  GFP_ATOMIC);
+		if (!skb) {
+			pr_err("Linux processing - Can't refill\n");
+			return 1;
+		}
+	}
+	mv_pp2_pool_refill(ppool, bm, phys_addr, (unsigned long) skb);
+	atomic_dec(&ppool->in_use);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+/*
+	if (cpu != smp_processor_id()) {
+		pr_warning("%s on CPU=%d other than RX=%d\n", __func__,
+			smp_processor_id(), cpu);
+	}
+*/
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	return !is_recyclable;
+}
+EXPORT_SYMBOL(mv_pp2_skb_recycle);
+
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+static struct sk_buff *mv_pp2_skb_alloc(struct eth_port *pp, struct bm_pool *pool,
+					phys_addr_t *phys_addr, gfp_t gfp_mask)
+{
+	struct sk_buff *skb;
+	phys_addr_t pa;
+	struct device *dev;
+
+	if (pp == NULL)
+		dev = global_dev->parent;
+	else
+		dev = pp->dev->dev.parent;
+
+	skb = __dev_alloc_skb(pool->pkt_size, gfp_mask);
+	if (!skb) {
+		STAT_ERR(pool->stats.skb_alloc_oom++);
+		return NULL;
+	}
+	/* pa = virt_to_phys(skb->head); */
+	if (phys_addr) {
+		pa = dma_map_single(dev, skb->head, RX_BUF_SIZE(pool->pkt_size), DMA_FROM_DEVICE);
+		*phys_addr = pa;
+
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+		if ((pool->type == MV_ETH_BM_MIXED_LONG) || (pool->type == MV_ETH_BM_MIXED_SHORT))
+			mv_pp2_iocc_l1_l2_cache_inv(skb->head, RX_BUF_SIZE(pool->pkt_size));
+#endif
+	}
+
+	STAT_DBG(pool->stats.skb_alloc_ok++);
+
+	return skb;
+}
+
+static unsigned char *mv_pp2_hwf_buff_alloc(struct bm_pool *pool, phys_addr_t *phys_addr)
+{
+	unsigned char *buff;
+	int size = RX_HWF_BUF_SIZE(pool->pkt_size);
+
+	buff = mvOsMalloc(size);
+	if (!buff)
+		return NULL;
+
+	if (phys_addr != NULL)
+		*phys_addr = mvOsCacheInvalidate(NULL, buff, size);
+
+	return buff;
+}
+
+static inline void mv_pp2_txq_buf_free(struct eth_port *pp, u32 shadow)
+{
+	if (!shadow)
+		return;
+
+	if (shadow & MV_ETH_SHADOW_SKB) {
+		shadow &= ~MV_ETH_SHADOW_SKB;
+		dev_kfree_skb_any((struct sk_buff *)shadow);
+		STAT_DBG(pp->stats.tx_skb_free++);
+	} else if (shadow & MV_ETH_SHADOW_EXT) {
+		shadow &= ~MV_ETH_SHADOW_EXT;
+		mv_pp2_extra_pool_put(pp, (void *)shadow);
+	} else {
+		/* TBD - return buffer back to BM */
+		printk(KERN_ERR "%s: unexpected buffer - not skb and not ext\n", __func__);
+	}
+}
+
+
+static inline void mv_pp2_txq_bufs_free(struct eth_port *pp, struct txq_cpu_ctrl *txq_cpu, int num)
+{
+	u32 shadow;
+	int i;
+
+	/* Free buffers that was not freed automatically by BM */
+	for (i = 0; i < num; i++) {
+		shadow = mv_pp2_shadow_get_pop(txq_cpu);
+		mv_pp2_txq_buf_free(pp, shadow);
+	}
+}
+
+inline u32 mv_pp2_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int tx_done;
+	struct txq_cpu_ctrl *txq_cpu_ptr = &txq_ctrl->txq_cpu[smp_processor_id()];
+
+	/* get number of transmitted TX descriptors by this CPU */
+	tx_done = mvPp2TxqSentDescProc(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+	if (!tx_done)
+		return tx_done;
+/*
+	printk(KERN_ERR "tx_done: txq_count=%d, port=%d, txp=%d, txq=%d, tx_done=%d\n",
+			txq_ctrl->txq_count, pp->port, txq_ctrl->txp, txq_ctrl->txq, tx_done);
+*/
+	/* packet sent by outer tx function */
+	if (txq_cpu_ptr->txq_count < tx_done) {
+		pr_warn("%s: txq_count = %d < tx_done = %d\n", __func__, txq_cpu_ptr->txq_count, tx_done);
+		return tx_done;
+	}
+
+	mv_pp2_txq_bufs_free(pp, txq_cpu_ptr, tx_done);
+
+	txq_cpu_ptr->txq_count -= tx_done;
+	STAT_DBG(txq_cpu_ptr->stats.txq_txdone += tx_done);
+
+	return tx_done;
+}
+EXPORT_SYMBOL(mv_pp2_txq_done);
+
+/* Reuse skb if possible, allocate new skb and move to BM pool */
+inline int mv_pp2_refill(struct eth_port *pp, struct bm_pool *ppool, __u32 bm, int is_recycle)
+{
+	struct sk_buff *skb;
+	phys_addr_t phys_addr;
+
+	if (is_recycle && (mv_pp2_bm_in_use_read(ppool) < ppool->in_use_thresh))
+		return 0;
+
+	/* No recycle or too many buffers are in use - alloc new skb */
+	skb = mv_pp2_skb_alloc(pp, ppool, &phys_addr, GFP_ATOMIC);
+	if (!skb) {
+		pr_err("Linux processing - Can't refill\n");
+		return 1;
+	}
+	STAT_DBG(ppool->stats.no_recycle++);
+
+	mv_pp2_pool_refill(ppool, bm, phys_addr, (unsigned long) skb);
+	atomic_dec(&ppool->in_use);
+
+	return 0;
+}
+EXPORT_SYMBOL(mv_pp2_refill);
+
+static inline MV_U32 mv_pp2_skb_tx_csum(struct eth_port *pp, struct sk_buff *skb)
+{
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		int   ip_hdr_len = 0;
+		MV_U8 l4_proto;
+
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *ip4h = ip_hdr(skb);
+
+			/* Calculate IPv4 checksum and L4 checksum */
+			ip_hdr_len = ip4h->ihl;
+			l4_proto = ip4h->protocol;
+		} else if (skb->protocol == htons(ETH_P_IPV6)) {
+			/* If not IPv4 - must be ETH_P_IPV6 - Calculate only L4 checksum */
+			struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+			/* Read l4_protocol from one of IPv6 extra headers ?????? */
+			if (skb_network_header_len(skb) > 0)
+				ip_hdr_len = (skb_network_header_len(skb) >> 2);
+			l4_proto = ip6h->nexthdr;
+		} else {
+			STAT_DBG(pp->stats.tx_csum_sw++);
+			return PP2_TX_L4_CSUM_NOT;
+		}
+		STAT_DBG(pp->stats.tx_csum_hw++);
+
+		return mvPp2TxqDescCsum(skb_network_offset(skb), skb->protocol, ip_hdr_len, l4_proto);
+	}
+
+	STAT_DBG(pp->stats.tx_csum_sw++);
+	return PP2_TX_L4_CSUM_NOT | PP2_TX_IP_CSUM_DISABLE_MASK;
+}
+
+inline struct pp2_rx_desc *mv_pp2_rx_prefetch(struct eth_port *pp, MV_PP2_PHYS_RXQ_CTRL *rx_ctrl,
+									  int rx_done, int rx_todo)
+{
+	struct pp2_rx_desc	*rx_desc, *next_desc;
+
+	rx_desc = mvPp2RxqNextDescGet(rx_ctrl);
+	if (rx_done == 0) {
+		/* First descriptor in the NAPI loop */
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+		prefetch(rx_desc);
+	}
+	if ((rx_done + 1) == rx_todo) {
+		/* Last descriptor in the NAPI loop - prefetch are not needed */
+		return rx_desc;
+	}
+	/* Prefetch next descriptor */
+	next_desc = mvPp2RxqDescGet(rx_ctrl);
+	mvOsCacheLineInv(pp->dev->dev.parent, next_desc);
+	prefetch(next_desc);
+
+	return rx_desc;
+}
+
+void mv_pp2_buff_hdr_rx(struct eth_port *pp, struct pp2_rx_desc *rx_desc)
+{
+	u32 rx_status = rx_desc->status;
+	int mc_id, pool_id;
+	PP2_BUFF_HDR *buff_hdr;
+	MV_U32 buff_phys_addr, buff_virt_addr, buff_phys_addr_next, buff_virt_addr_next;
+	int count = 0;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	int qset, is_grntd;
+
+	qset = (rx_desc->bmQset & PP2_RX_BUFF_QSET_NUM_MASK) >> PP2_RX_BUFF_QSET_NUM_OFFS;
+	is_grntd = (rx_desc->bmQset & PP2_RX_BUFF_TYPE_MASK) >> PP2_RX_BUFF_TYPE_OFFS;
+#endif
+
+	pool_id = (rx_status & PP2_RX_BM_POOL_ALL_MASK) >> PP2_RX_BM_POOL_ID_OFFS;
+	buff_phys_addr = rx_desc->bufPhysAddr;
+	buff_virt_addr = rx_desc->bufCookie;
+
+	do {
+		buff_hdr = (PP2_BUFF_HDR *)(((struct sk_buff *)buff_virt_addr)->head);
+		mc_id = PP2_BUFF_HDR_INFO_MC_ID(buff_hdr->info);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_BUFF_HDR) {
+			printk(KERN_ERR "buff header #%d:\n", count);
+			mvDebugMemDump(buff_hdr, 32, 1);
+
+			printk(KERN_ERR "byte count = %d   MC ID = %d   last = %d\n",
+				buff_hdr->byteCount, mc_id,
+				PP2_BUFF_HDR_INFO_IS_LAST(buff_hdr->info));
+		}
+#endif
+
+		count++;
+		buff_phys_addr_next = buff_hdr->nextBuffPhysAddr;
+		buff_virt_addr_next = buff_hdr->nextBuffVirtAddr;
+
+		/* release buffer */
+#ifdef CONFIG_MV_ETH_PP2_1
+		mvBmPoolQsetMcPut(pool_id, buff_phys_addr, buff_virt_addr, qset, is_grntd, mc_id, 0);
+
+		/* Qset number and buffer type of next buffer */
+		qset = (buff_hdr->bmQset & PP2_BUFF_HDR_BM_QSET_NUM_MASK) >> PP2_BUFF_HDR_BM_QSET_NUM_OFFS;
+		is_grntd = (buff_hdr->bmQset & PP2_BUFF_HDR_BM_QSET_TYPE_MASK) >> PP2_BUFF_HDR_BM_QSET_TYPE_OFFS;
+#else
+		mvBmPoolMcPut(pool_id, buff_phys_addr, buff_virt_addr, mc_id, 0);
+#endif
+		buff_phys_addr = buff_phys_addr_next;
+		buff_virt_addr = buff_virt_addr_next;
+
+		STAT_DBG((&mv_pp2_pool[pool_id])->stats.bm_put++);
+
+	} while (!PP2_BUFF_HDR_INFO_IS_LAST(buff_hdr->info));
+
+	mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+	STAT_INFO(pp->stats.rx_buf_hdr++);
+}
+EXPORT_SYMBOL(mv_pp2_buff_hdr_rx);
+
+
+void mv_pp2_buff_hdr_rx_dump(struct eth_port *pp, struct pp2_rx_desc *rx_desc)
+{
+	int mc_id;
+	PP2_BUFF_HDR *buff_hdr;
+	MV_U32 buff_phys_addr, buff_virt_addr;
+
+	int count = 0;
+
+	buff_phys_addr = rx_desc->bufPhysAddr;
+	buff_virt_addr = rx_desc->bufCookie;
+		printk(KERN_ERR "------------------------\n");
+	do {
+		printk(KERN_ERR "buff_virt_addr = %x\n", buff_virt_addr);
+		buff_hdr = (PP2_BUFF_HDR *)(((struct eth_pbuf *)buff_virt_addr)->pBuf);
+
+		printk(KERN_ERR "buff_hdr = %p\n", buff_hdr);
+		mc_id = PP2_BUFF_HDR_INFO_MC_ID(buff_hdr->info);
+
+		printk(KERN_ERR "buff header #%d:\n", ++count);
+		mvDebugMemDump(buff_hdr, buff_hdr->byteCount, 1);
+
+		printk(KERN_ERR "byte count = %d   MC ID = %d   last = %d\n",
+			buff_hdr->byteCount, mc_id,
+			PP2_BUFF_HDR_INFO_IS_LAST(buff_hdr->info));
+
+		buff_phys_addr = buff_hdr->nextBuffPhysAddr;
+		buff_virt_addr  = buff_hdr->nextBuffVirtAddr;
+
+	} while (!PP2_BUFF_HDR_INFO_IS_LAST(buff_hdr->info));
+
+}
+
+static inline int mv_pp2_rx(struct eth_port *pp, int rx_todo, int rxq, struct napi_struct *napi)
+{
+	struct net_device *dev = pp->dev;
+	MV_PP2_PHYS_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+	int rx_done, pool;
+	struct pp2_rx_desc *rx_desc;
+	u32 rx_status;
+	int rx_bytes;
+	struct sk_buff *skb;
+	__u32 bm;
+	struct bm_pool *ppool;
+#ifdef CONFIG_NETMAP
+	if (pp->flags & MV_ETH_F_IFCAP_NETMAP) {
+		int netmap_done;
+		if (netmap_rx_irq(pp->dev, 0, &netmap_done))
+			return 1; /* seems to be ignored */
+	}
+#endif /* CONFIG_NETMAP */
+	/* Get number of received packets */
+	rx_done = mvPp2RxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(pp->dev->dev.parent);
+
+	if ((rx_todo > rx_done) || (rx_todo < 0))
+		rx_todo = rx_done;
+
+	if (rx_todo == 0)
+		return 0;
+
+	rx_done = 0;
+
+	/* Fairness NAPI loop */
+	while (rx_done < rx_todo) {
+
+		if (pp->flags & MV_ETH_F_RX_DESC_PREFETCH)
+			rx_desc = mv_pp2_rx_prefetch(pp, rx_ctrl, rx_done, rx_todo);
+		else {
+			rx_desc = mvPp2RxqNextDescGet(rx_ctrl);
+			mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+			prefetch(rx_desc);
+		}
+		rx_done++;
+
+#if defined(MV_CPU_BE)
+		mvPPv2RxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX) {
+			printk(KERN_ERR "\n%s: port=%d, cpu=%d\n", __func__, pp->port, smp_processor_id());
+			mv_pp2_rx_desc_print(rx_desc);
+		}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		rx_status = rx_desc->status;
+		bm = mv_pp2_bm_cookie_build(rx_desc);
+		pool = mv_pp2_bm_cookie_pool_get(bm);
+		ppool = &mv_pp2_pool[pool];
+
+		/* check if buffer header is used */
+		if ((rx_status & (PP2_RX_HWF_SYNC_MASK | PP2_RX_BUF_HDR_MASK)) == PP2_RX_BUF_HDR_MASK) {
+			mv_pp2_buff_hdr_rx(pp, rx_desc);
+			continue;
+		}
+
+		if (rx_status & PP2_RX_ES_MASK) {
+			mv_pp2_rx_error(pp, rx_desc);
+			mv_pp2_pool_refill(ppool, bm, rx_desc->bufPhysAddr, rx_desc->bufCookie);
+			mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+			continue;
+		}
+		skb = (struct sk_buff *)rx_desc->bufCookie;
+
+		if (rx_status & PP2_RX_HWF_SYNC_MASK) {
+			/* Remember sync bit for TX */
+			pr_info("\n%s: port=%d, rxq=%d, cpu=%d, skb=%p - Sync packet received\n",
+			__func__, pp->port, rxq, smp_processor_id(), skb);
+			mv_pp2_rx_desc_print(rx_desc);
+			sync_head = skb->head;
+			sync_rx_desc = rx_status;
+		}
+
+		/*dma_unmap_single(NULL, rx_desc->bufPhysAddr, RX_BUF_SIZE(ppool->pkt_size), DMA_FROM_DEVICE);*/
+
+		/* Prefetch two cache lines from beginning of packet */
+		if (pp->flags & MV_ETH_F_RX_PKT_PREFETCH) {
+			prefetch(skb->data);
+			prefetch(skb->data + CPU_D_CACHE_LINE_SIZE);
+		}
+
+		atomic_inc(&ppool->in_use);
+		STAT_DBG(pp->stats.rxq[rxq]++);
+		dev->stats.rx_packets++;
+
+		rx_bytes = rx_desc->dataSize;
+		dev->stats.rx_bytes += rx_bytes;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX) {
+			printk(KERN_ERR "skb=%p, buf=%p, ksize=%d\n", skb, skb->head, ksize(skb->head));
+			mvDebugMemDump(skb->head + NET_SKB_PAD, 64, 1);
+		}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		/* Linux processing */
+		__skb_put(skb, rx_bytes);
+
+#if defined(CONFIG_MV_PP2_RX_SPECIAL)
+		/* Special RX processing */
+		if (mvPp2IsRxSpecial(rx_desc->parserInfo)) {
+			if (pp->rx_special_proc) {
+				if (pp->rx_special_proc(pp->port, rxq, dev, skb, rx_desc)) {
+					STAT_INFO(pp->stats.rx_special++);
+
+					/* Refill processing */
+					mv_pp2_refill(pp, ppool, bm, 0);
+					mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+					continue;
+				}
+			}
+		}
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+		if (mv_pp2_is_recycle()) {
+			skb->skb_recycle = mv_pp2_skb_recycle;
+			skb->hw_cookie = bm;
+		}
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+		mv_pp2_rx_csum(pp, rx_desc, skb);
+
+		if (pp->tagged) {
+			mv_mux_rx(skb, pp->port, napi);
+			STAT_DBG(pp->stats.rx_tagged++);
+			skb = NULL;
+		} else {
+			dev->stats.rx_bytes -= mv_pp2_mh_skb_skip(skb);
+			skb->protocol = eth_type_trans(skb, dev);
+		}
+
+		if (skb && (dev->features & NETIF_F_GRO)) {
+			STAT_DBG(pp->stats.rx_gro++);
+			STAT_DBG(pp->stats.rx_gro_bytes += skb->len);
+
+			rx_status = napi_gro_receive(napi, skb);
+			skb = NULL;
+		}
+
+		if (skb) {
+			STAT_DBG(pp->stats.rx_netif++);
+			rx_status = netif_receive_skb(skb);
+			STAT_DBG((rx_status == 0) ? pp->stats.rx_drop_sw += 0 : pp->stats.rx_drop_sw++);
+		}
+
+		/* Refill processing: */
+		mv_pp2_refill(pp, ppool, bm, mv_pp2_is_recycle());
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+	}
+
+	/* Update RxQ management counters */
+	wmb();
+	mvPp2RxqDescNumUpdate(pp->port, rxq, rx_done, rx_done);
+
+	return rx_done;
+}
+
+static int mv_pp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	int frags = 0, cpu = smp_processor_id();
+	u32 tx_cmd, bufPhysAddr;
+	struct mv_pp2_tx_spec tx_spec, *tx_spec_ptr = NULL;
+	struct tx_queue *txq_ctrl = NULL;
+	struct txq_cpu_ctrl *txq_cpu_ptr = NULL;
+	struct aggr_tx_queue *aggr_txq_ctrl = NULL;
+	struct pp2_tx_desc *tx_desc;
+	unsigned long flags = 0;
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_TX)
+			printk(KERN_ERR "%s: STARTED_BIT = 0, packet is dropped.\n", __func__);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+		goto out;
+	}
+
+	if (!(netif_running(dev))) {
+		printk(KERN_ERR "!netif_running() in %s\n", __func__);
+		goto out;
+	}
+
+#if defined(CONFIG_MV_PP2_TX_SPECIAL)
+	if (pp->tx_special_check) {
+
+		if (pp->tx_special_check(pp->port, dev, skb, &tx_spec)) {
+			STAT_INFO(pp->stats.tx_special++);
+			if (tx_spec.tx_func) {
+				tx_spec.tx_func(skb->data, skb->len, &tx_spec);
+				goto out;
+			} else {
+				/* Check validity of tx_spec txp/txq must be CPU owned */
+				tx_spec_ptr = &tx_spec;
+
+				/* in routine cph_flow_mod_frwd,  if this packet should be discard,
+					txq will be assigned to value MV_ETH_TXQ_INVALID */
+				if (tx_spec_ptr->txq == MV_ETH_TXQ_INVALID)
+					goto out;
+			}
+		}
+	}
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+	/* In case this port is tagged, check if SKB is tagged - i.e. SKB's source is MUX interface */
+	if (pp->tagged && (!MV_MUX_SKB_IS_TAGGED(skb))) {
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_TX)
+			pr_err("%s: port %d is tagged, skb not from MUX interface - packet is dropped.\n",
+				__func__, pp->port);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		goto out;
+	}
+
+	/* Get TXQ (without BM) to send packet generated by Linux */
+	if (tx_spec_ptr == NULL) {
+		tx_spec_ptr = &pp->tx_spec;
+		tx_spec_ptr->txq = mv_pp2_tx_policy(pp, skb);
+	}
+
+	aggr_txq_ctrl = &aggr_txqs[smp_processor_id()];
+	txq_ctrl = &pp->txq_ctrl[tx_spec_ptr->txp * CONFIG_MV_PP2_TXQ + tx_spec_ptr->txq];
+	if (txq_ctrl == NULL) {
+		printk(KERN_ERR "%s: invalidate txp/txq (%d/%d)\n",
+			__func__, tx_spec_ptr->txp, tx_spec_ptr->txq);
+		goto out;
+	}
+	txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+
+	MV_ETH_LIGHT_LOCK(flags);
+
+#ifdef CONFIG_MV_PP2_TSO
+	/* GSO/TSO */
+	if (skb_is_gso(skb)) {
+		frags = mv_pp2_tx_tso(skb, dev, tx_spec_ptr, txq_ctrl, aggr_txq_ctrl);
+		goto out;
+	}
+#endif /* CONFIG_MV_PP2_TSO */
+
+	frags = skb_shinfo(skb)->nr_frags + 1;
+
+	if (tx_spec_ptr->flags & MV_ETH_TX_F_MH) {
+		if (mv_pp2_skb_mh_add(skb, tx_spec_ptr->tx_mh)) {
+			frags = 0;
+			goto out;
+		}
+	}
+
+	/* is enough descriptors? */
+#ifdef CONFIG_MV_ETH_PP2_1
+	if (mv_pp2_reserved_desc_num_proc(pp, tx_spec_ptr->txp, tx_spec_ptr->txq, frags) ||
+		mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, frags)) {
+#else
+	if (mv_pp2_phys_desc_num_check(txq_cpu_ptr, frags) ||
+		mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, frags)) {
+
+#endif
+
+		frags = 0;
+		goto out;
+	}
+
+	tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+
+	tx_desc->physTxq = MV_PPV2_TXQ_PHYS(pp->port, tx_spec_ptr->txp, tx_spec_ptr->txq);
+
+	/* Don't use BM for Linux packets: NETA_TX_BM_ENABLE_MASK = 0 */
+	/* NETA_TX_PKT_OFFSET_MASK = 0 - for all descriptors */
+	tx_cmd = mv_pp2_skb_tx_csum(pp, skb);
+
+	if (tx_spec_ptr->flags & MV_ETH_TX_F_HW_CMD) {
+		tx_desc->hwCmd[0] = tx_spec_ptr->hw_cmd[0];
+		tx_desc->hwCmd[1] = tx_spec_ptr->hw_cmd[1];
+		tx_desc->hwCmd[2] = tx_spec_ptr->hw_cmd[2];
+	}
+
+	if (skb->head == sync_head)
+		tx_cmd |= PP2_TX_HWF_SYNC_MASK;
+
+	/* FIXME: beware of nonlinear --BK */
+	tx_desc->dataSize = skb_headlen(skb);
+	bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, skb->data, tx_desc->dataSize);
+	tx_desc->pktOffset = bufPhysAddr & MV_ETH_TX_DESC_ALIGN;
+	tx_desc->bufPhysAddr = bufPhysAddr & (~MV_ETH_TX_DESC_ALIGN);
+
+	if (frags == 1) {
+		/*
+		 * First and Last descriptor
+		 */
+		if (tx_spec_ptr->flags & MV_ETH_TX_F_NO_PAD)
+			tx_cmd |= PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK | PP2_TX_PADDING_DISABLE_MASK;
+		else
+			tx_cmd |= PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK;
+
+		tx_desc->command = tx_cmd;
+		mv_pp2_tx_desc_flush(pp, tx_desc);
+		mv_pp2_shadow_push(txq_cpu_ptr, ((MV_ULONG) skb | MV_ETH_SHADOW_SKB));
+	} else {
+		/* First but not Last */
+		tx_cmd |= PP2_TX_F_DESC_MASK | PP2_TX_PADDING_DISABLE_MASK;
+
+		mv_pp2_shadow_push(txq_cpu_ptr, 0);
+
+		tx_desc->command = tx_cmd;
+		mv_pp2_tx_desc_flush(pp, tx_desc);
+
+		/* Continue with other skb fragments */
+		mv_pp2_tx_frag_process(pp, skb, aggr_txq_ctrl, txq_ctrl, tx_spec_ptr);
+		STAT_DBG(pp->stats.tx_sg++);
+	}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* PPv2.1 - MAS 3.16, decrease number of reserved descriptors */
+	txq_cpu_ptr->reserved_num -= frags;
+#endif
+
+	txq_cpu_ptr->txq_count += frags;
+	aggr_txq_ctrl->txq_count += frags;
+
+	if (tx_cmd & PP2_TX_HWF_SYNC_MASK) {
+		pr_info("%s: port=%d, txp=%d, txq=%d, cpu=%d, skb=%p, rx_desc=0x%08x - Sync packet transmitted\n",
+			__func__, pp->port, tx_spec_ptr->txp, tx_spec_ptr->txq, smp_processor_id(),
+			skb, sync_rx_desc);
+		mv_pp2_tx_desc_print(tx_desc);
+		mvDebugMemDump(skb->data, 64, 1);
+		sync_head = NULL;
+		sync_rx_desc = 0;
+	}
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_TX) {
+		printk(KERN_ERR "\n");
+		printk(KERN_ERR "%s - eth_tx_%lu: cpu=%d, in_intr=0x%lx, port=%d, txp=%d, txq=%d\n",
+			dev->name, dev->stats.tx_packets, smp_processor_id(), in_interrupt(),
+			pp->port, tx_spec_ptr->txp, tx_spec_ptr->txq);
+		printk(KERN_ERR "\t skb=%p, head=%p, data=%p, size=%d\n", skb, skb->head, skb->data, skb->len);
+		pr_info("\t sync_head=%p, sync_rx_desc=0x%08x\n", sync_head, sync_rx_desc);
+		mv_pp2_tx_desc_print(tx_desc);
+		/*mv_pp2_skb_print(skb);*/
+		mvDebugMemDump(skb->data, 64, 1);
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+	/* Enable transmit */
+	wmb();
+	mvPp2AggrTxqPendDescAdd(frags);
+
+	STAT_DBG(aggr_txq_ctrl->stats.txq_tx += frags);
+	STAT_DBG(txq_cpu_ptr->stats.txq_tx += frags);
+
+out:
+	if (frags > 0) {
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+	} else {
+		dev->stats.tx_dropped++;
+		dev_kfree_skb_any(skb);
+	}
+
+#ifndef CONFIG_MV_PP2_TXDONE_ISR
+	if (txq_ctrl) {
+		if (txq_cpu_ptr->txq_count >= mv_ctrl_pp2_txdone) {
+#ifdef CONFIG_MV_PP2_STAT_DIST
+			u32 tx_done = mv_pp2_txq_done(pp, txq_ctrl);
+
+			if (tx_done < pp->dist_stats.tx_done_dist_size)
+				pp->dist_stats.tx_done_dist[tx_done]++;
+#else
+			mv_pp2_txq_done(pp, txq_ctrl);
+#endif /* CONFIG_MV_PP2_STAT_DIST */
+		}
+		/* If after calling mv_pp2_txq_done, txq_ctrl->txq_count equals frags, we need to set the timer */
+		if ((txq_cpu_ptr->txq_count > 0)  && (txq_cpu_ptr->txq_count <= frags) && (frags > 0))
+			mv_pp2_add_tx_done_timer(pp->cpu_config[smp_processor_id()]);
+	}
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+
+	if (txq_ctrl)
+		MV_ETH_LIGHT_UNLOCK(flags);
+
+	return NETDEV_TX_OK;
+}
+
+#ifdef CONFIG_MV_PP2_TSO
+/* Validate TSO */
+static inline int mv_pp2_tso_validate(struct sk_buff *skb, struct net_device *dev)
+{
+	if (!(dev->features & NETIF_F_TSO)) {
+		pr_err("error: (skb_is_gso(skb) returns true but features is not NETIF_F_TSO\n");
+		return 1;
+	}
+	if (skb_shinfo(skb)->frag_list != NULL) {
+		pr_err("***** ERROR: frag_list is not null\n");
+		return 1;
+	}
+	if (skb_shinfo(skb)->gso_segs == 1) {
+		pr_err("***** ERROR: only one TSO segment\n");
+		return 1;
+	}
+	if (skb->len <= skb_shinfo(skb)->gso_size) {
+		pr_err("***** ERROR: total_len (%d) less than gso_size (%d)\n", skb->len, skb_shinfo(skb)->gso_size);
+		return 1;
+	}
+	if ((htons(ETH_P_IP) != skb->protocol) || (ip_hdr(skb)->protocol != IPPROTO_TCP) || (tcp_hdr(skb) == NULL)) {
+		pr_err("***** ERROR: Protocol is not TCP over IP\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline int mv_pp2_tso_build_hdr_desc(struct pp2_tx_desc *tx_desc, MV_U8 *data, struct eth_port *priv,
+					struct sk_buff *skb, struct txq_cpu_ctrl *txq_ctrl, u16 *mh,
+					int hdr_len, int size, MV_U32 tcp_seq, MV_U16 ip_id, int left_len)
+{
+	struct iphdr *iph;
+	struct tcphdr *tcph;
+	MV_U8 *mac;
+	MV_U32 bufPhysAddr;
+	int mac_hdr_len = skb_network_offset(skb);
+
+	mv_pp2_shadow_push(txq_ctrl, ((MV_ULONG)data | MV_ETH_SHADOW_EXT));
+
+	/* Reserve 2 bytes for IP header alignment */
+	mac = data + MV_ETH_MH_SIZE;
+	iph = (struct iphdr *)(mac + mac_hdr_len);
+
+	memcpy(mac, skb->data, hdr_len);
+
+	if (iph) {
+		iph->id = htons(ip_id);
+		iph->tot_len = htons(size + hdr_len - mac_hdr_len);
+	}
+
+	tcph = (struct tcphdr *)(mac + skb_transport_offset(skb));
+	tcph->seq = htonl(tcp_seq);
+
+	if (left_len) {
+		/* Clear all special flags for not last packet */
+		tcph->psh = 0;
+		tcph->fin = 0;
+		tcph->rst = 0;
+	}
+
+	if (mh) {
+		/* Start tarnsmit from MH - add 2 bytes to size */
+		*((MV_U16 *)data) = *mh;
+		/* increment ip_offset field in TX descriptor by 2 bytes */
+		mac_hdr_len += MV_ETH_MH_SIZE;
+		hdr_len += MV_ETH_MH_SIZE;
+	} else {
+		/* Start transmit from MAC */
+		data = mac;
+	}
+
+	tx_desc->dataSize = hdr_len;
+	tx_desc->command = mvPp2TxqDescCsum(mac_hdr_len, skb->protocol, ((u8 *)tcph - (u8 *)iph) >> 2, IPPROTO_TCP);
+	tx_desc->command |= PP2_TX_F_DESC_MASK;
+
+	bufPhysAddr = mvOsCacheFlush(priv->dev->dev.parent, data, tx_desc->dataSize);
+	tx_desc->pktOffset = bufPhysAddr & MV_ETH_TX_DESC_ALIGN;
+	tx_desc->bufPhysAddr = bufPhysAddr & (~MV_ETH_TX_DESC_ALIGN);
+
+	mv_pp2_tx_desc_flush(priv, tx_desc);
+
+	return hdr_len;
+}
+
+static inline int mv_pp2_tso_build_data_desc(struct eth_port *pp, struct pp2_tx_desc *tx_desc, struct sk_buff *skb,
+					     struct txq_cpu_ctrl *txq_ctrl, char *frag_ptr,
+					     int frag_size, int data_left, int total_left)
+{
+	MV_U32 bufPhysAddr;
+	int size, val = 0;
+
+	size = MV_MIN(frag_size, data_left);
+
+	tx_desc->dataSize = size;
+	bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, frag_ptr, size);
+	tx_desc->pktOffset = bufPhysAddr & MV_ETH_TX_DESC_ALIGN;
+	tx_desc->bufPhysAddr = bufPhysAddr & (~MV_ETH_TX_DESC_ALIGN);
+
+	tx_desc->command = 0;
+
+	if (size == data_left) {
+		/* last descriptor in the TCP packet */
+		tx_desc->command = PP2_TX_L_DESC_MASK;
+
+		if (total_left == 0) {
+			/* last descriptor in SKB */
+			val = ((MV_ULONG) skb | MV_ETH_SHADOW_SKB);
+		}
+	}
+	mv_pp2_shadow_push(txq_ctrl, val);
+	mv_pp2_tx_desc_flush(pp, tx_desc);
+
+	return size;
+}
+
+/***********************************************************
+ * mv_pp2_tx_tso --                                        *
+ *   send a packet.                                        *
+ ***********************************************************/
+static int mv_pp2_tx_tso(struct sk_buff *skb, struct net_device *dev, struct mv_pp2_tx_spec *tx_spec,
+			 struct tx_queue *txq_ctrl, struct aggr_tx_queue *aggr_txq_ctrl)
+{
+	int ptxq, frag = 0;
+	int total_len, hdr_len, size, frag_size, data_left;
+	int total_desc_num, total_bytes = 0, max_desc_num = 0;
+	char *frag_ptr;
+	struct pp2_tx_desc *tx_desc;
+	struct txq_cpu_ctrl *txq_cpu_ptr = NULL;
+	MV_U16 ip_id, *mh = NULL;
+	MV_U32 tcp_seq = 0;
+	skb_frag_t *skb_frag_ptr;
+	const struct tcphdr *th = tcp_hdr(skb);
+	struct eth_port *priv = MV_ETH_PRIV(dev);
+	int i;
+
+	STAT_DBG(priv->stats.tx_tso++);
+
+	if (mv_pp2_tso_validate(skb, dev))
+		return 0;
+
+	/* Calculate expected number of TX descriptors */
+	max_desc_num = skb_shinfo(skb)->gso_segs * 2 + skb_shinfo(skb)->nr_frags;
+
+	if (mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, max_desc_num)) {
+		STAT_DBG(priv->stats.tx_tso_no_resource++);
+		return 0;
+	}
+
+	txq_cpu_ptr = &txq_ctrl->txq_cpu[smp_processor_id()];
+
+	/* Check if there are enough descriptors in physical TXQ */
+#ifdef CONFIG_MV_ETH_PP2_1
+	if (mv_pp2_reserved_desc_num_proc(priv, tx_spec->txp, tx_spec->txq, max_desc_num)) {
+#else
+	if (mv_pp2_phys_desc_num_check(txq_cpu_ptr, max_desc_num)) {
+#endif
+		STAT_DBG(priv->stats.tx_tso_no_resource++);
+		return 0;
+	}
+
+	total_len = skb->len;
+	hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
+
+	total_len -= hdr_len;
+	ip_id = ntohs(ip_hdr(skb)->id);
+	tcp_seq = ntohl(th->seq);
+
+	frag_size = skb_headlen(skb);
+	frag_ptr = skb->data;
+
+	if (frag_size < hdr_len) {
+		pr_err("***** ERROR: frag_size=%d, hdr_len=%d\n", frag_size, hdr_len);
+		return 0;
+	}
+
+	/* Skip header - we'll add header in another buffer (from extra pool) */
+	frag_size -= hdr_len;
+	frag_ptr += hdr_len;
+
+	/* A special case where the first skb's frag contains only the packet's header */
+	if (frag_size == 0) {
+		skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
+
+		/* Move to next segment */
+		frag_size = skb_frag_ptr->size;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+		frag_ptr = page_address(skb_frag_ptr->page.p) + skb_frag_ptr->page_offset;
+#else
+		frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
+#endif
+		frag++;
+	}
+	total_desc_num = 0;
+	ptxq = MV_PPV2_TXQ_PHYS(priv->port, tx_spec->txp, tx_spec->txq);
+
+	/* Each iteration - create new TCP segment */
+	while (total_len > 0) {
+		MV_U8 *data;
+
+		data_left = MV_MIN(skb_shinfo(skb)->gso_size, total_len);
+
+		/* Sanity check */
+		if (total_desc_num >= max_desc_num) {
+			pr_err("%s: Used TX descriptors number %d is larger than allocated %d\n",
+				__func__, total_desc_num, max_desc_num);
+			goto outNoTxDesc;
+		}
+
+		data = mv_pp2_extra_pool_get(priv);
+		if (!data) {
+			pr_err("Can't allocate extra buffer for TSO\n");
+			goto outNoTxDesc;
+		}
+
+		tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+		total_desc_num++;
+
+		tx_desc->physTxq = ptxq;
+
+		total_len -= data_left;
+
+		if (tx_spec->flags & MV_ETH_TX_F_MH)
+			mh = &tx_spec->tx_mh;
+
+		/* prepare packet headers: MAC + IP + TCP */
+		size = mv_pp2_tso_build_hdr_desc(tx_desc, data, priv, skb, txq_cpu_ptr, mh,
+					hdr_len, data_left, tcp_seq, ip_id, total_len);
+
+		total_bytes += size;
+
+		/* Update packet's IP ID */
+		ip_id++;
+
+		while (data_left > 0) {
+
+			/* Sanity check */
+			if (total_desc_num >= max_desc_num) {
+				pr_err("%s: Used TX descriptors number %d is larger than allocated %d\n",
+					__func__, total_desc_num, max_desc_num);
+				goto outNoTxDesc;
+			}
+
+			tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+			tx_desc->physTxq = ptxq;
+
+			total_desc_num++;
+
+			size = mv_pp2_tso_build_data_desc(priv, tx_desc, skb, txq_cpu_ptr,
+							  frag_ptr, frag_size, data_left, total_len);
+			total_bytes += size;
+			data_left -= size;
+
+			/* Update TCP sequence number */
+			tcp_seq += size;
+
+			/* Update frag size, and offset */
+			frag_size -= size;
+			frag_ptr += size;
+
+			if ((frag_size == 0) && (frag < skb_shinfo(skb)->nr_frags)) {
+				skb_frag_ptr = &skb_shinfo(skb)->frags[frag];
+
+				/* Move to next segment */
+				frag_size = skb_frag_ptr->size;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+				frag_ptr = page_address(skb_frag_ptr->page.p) + skb_frag_ptr->page_offset;
+#else
+				frag_ptr = page_address(skb_frag_ptr->page) + skb_frag_ptr->page_offset;
+#endif
+				frag++;
+			}
+		}
+	}
+
+	/* TCP segment is ready - transmit it */
+	wmb();
+	mvPp2AggrTxqPendDescAdd(total_desc_num);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	/* PPv2.1 - MAS 3.16, decrease number of reserved descriptors */
+	txq_cpu_ptr->reserved_num -= total_desc_num;
+#endif
+
+	aggr_txq_ctrl->txq_count += total_desc_num;
+	txq_cpu_ptr->txq_count += total_desc_num;
+
+	STAT_DBG(priv->stats.tx_tso_bytes += total_bytes);
+	STAT_DBG(aggr_txq_ctrl->stats.txq_tx += total_desc_num);
+	STAT_DBG(txq_cpu_ptr->stats.txq_tx += total_desc_num);
+
+	return total_desc_num;
+
+outNoTxDesc:
+	/* No enough memory for packet header - rollback */
+	pr_err("%s: No TX descriptors - rollback %d, txq_count=%d, nr_frags=%d, skb=%p, len=%d, gso_segs=%d\n",
+			__func__, total_desc_num, aggr_txq_ctrl->txq_count, skb_shinfo(skb)->nr_frags,
+			skb, skb->len, skb_shinfo(skb)->gso_segs);
+	STAT_DBG(priv->stats.tx_tso_no_resource++);
+
+	for (i = 0; i < total_desc_num; i++) {
+		u32 shadow;
+
+		mv_pp2_shadow_dec_put(txq_cpu_ptr);
+		shadow = txq_cpu_ptr->shadow_txq[txq_cpu_ptr->shadow_txq_put_i];
+		mv_pp2_txq_buf_free(priv, shadow);
+		mvPp2AggrTxqPrevDescGet(aggr_txq_ctrl->q);
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_MV_PP2_TSO */
+
+/* Push packets received by the RXQ to BM pool */
+static void mv_pp2_rxq_drop_pkts(struct eth_port *pp, int rxq)
+{
+	struct pp2_rx_desc   *rx_desc;
+	int	                 rx_done, i;
+	MV_PP2_PHYS_RXQ_CTRL *rx_ctrl = pp->rxq_ctrl[rxq].q;
+
+	if (rx_ctrl == NULL)
+		return;
+
+	rx_done = mvPp2RxqBusyDescNumGet(pp->port, rxq);
+	mvOsCacheIoSync(pp->dev->dev.parent);
+
+	for (i = 0; i < rx_done; i++) {
+		__u32 bm;
+		int pool;
+		struct bm_pool *ppool;
+
+		rx_desc = mvPp2RxqNextDescGet(rx_ctrl);
+
+#if defined(MV_CPU_BE)
+		mvPPv2RxqDescSwap(rx_desc);
+#endif /* MV_CPU_BE */
+
+		bm = mv_pp2_bm_cookie_build(rx_desc);
+		pool = mv_pp2_bm_cookie_pool_get(bm);
+		ppool = &mv_pp2_pool[pool];
+
+		mv_pp2_pool_refill(ppool, bm, rx_desc->bufPhysAddr, rx_desc->bufCookie);
+		mvOsCacheLineInv(pp->dev->dev.parent, rx_desc);
+	}
+	if (rx_done) {
+		mvOsCacheIoSync(pp->dev->dev.parent);
+		mvPp2RxqDescNumUpdate(pp->port, rxq, rx_done, rx_done);
+	}
+}
+
+static int mv_pp2_txq_done_force(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int cpu, tx_done = 0;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+		tx_done = txq_cpu_ptr->txq_count;
+		mv_pp2_txq_bufs_free(pp, &txq_ctrl->txq_cpu[cpu], tx_done);
+		STAT_DBG(txq_cpu_ptr->stats.txq_txdone += tx_done);
+
+		/* reset txq */
+		txq_cpu_ptr->txq_count = 0;
+		txq_cpu_ptr->shadow_txq_put_i = 0;
+		txq_cpu_ptr->shadow_txq_get_i = 0;
+	}
+	return tx_done;
+}
+
+inline u32 mv_pp2_tx_done_pon(struct eth_port *pp, int *tx_todo)
+{
+	int txp, txq;
+	struct tx_queue *txq_ctrl;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+	u32 tx_done = 0;
+
+	*tx_todo = 0;
+
+	STAT_INFO(pp->stats.tx_done++);
+
+	/* simply go over all TX ports and TX queues */
+	txp = pp->txp_num;
+	while (txp--) {
+		txq = CONFIG_MV_PP2_TXQ;
+
+		while (txq--) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+			txq_cpu_ptr = &txq_ctrl->txq_cpu[smp_processor_id()];
+			if ((txq_ctrl) && (txq_cpu_ptr->txq_count)) {
+				tx_done += mv_pp2_txq_done(pp, txq_ctrl);
+				*tx_todo += txq_cpu_ptr->txq_count;
+			}
+		}
+	}
+
+	STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+
+	return tx_done;
+}
+
+
+inline u32 mv_pp2_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo)
+{
+	int txq;
+	struct tx_queue *txq_ctrl;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+	u32 tx_done = 0;
+
+	*tx_todo = 0;
+
+	STAT_INFO(pp->stats.tx_done++);
+
+	while (cause_tx_done != 0) {
+
+		/* For GbE ports we get TX Buffers Threshold Cross per queue in bits [7:0] */
+		txq = mv_pp2_tx_done_policy(cause_tx_done);
+
+		if (txq == -1)
+			break;
+
+		txq_ctrl = &pp->txq_ctrl[txq];
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[smp_processor_id()];
+
+		if (txq_ctrl == NULL) {
+			printk(KERN_ERR "%s: txq_ctrl = NULL, txq=%d\n", __func__, txq);
+			return -EINVAL;
+		}
+
+		if ((txq_ctrl) && (txq_cpu_ptr->txq_count)) {
+			tx_done += mv_pp2_txq_done(pp, txq_ctrl);
+			*tx_todo += txq_cpu_ptr->txq_count;
+		}
+
+		cause_tx_done &= ~(1 << txq);
+	}
+
+	STAT_DIST((tx_done < pp->dist_stats.tx_done_dist_size) ? pp->dist_stats.tx_done_dist[tx_done]++ : 0);
+
+	return tx_done;
+}
+
+
+static void mv_pp2_tx_frag_process(struct eth_port *pp, struct sk_buff *skb, struct aggr_tx_queue *aggr_txq_ctrl,
+					struct tx_queue *txq_ctrl, struct mv_pp2_tx_spec *tx_spec)
+{
+	int i, cpu = smp_processor_id();
+	struct pp2_tx_desc *tx_desc;
+	MV_U32 bufPhysAddr;
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+		tx_desc->physTxq = MV_PPV2_TXQ_PHYS(pp->port, tx_spec->txp, tx_spec->txq);
+
+		/* NETA_TX_BM_ENABLE_MASK = 0 */
+		/* NETA_TX_PKT_OFFSET_MASK = 0 */
+		tx_desc->dataSize = frag->size;
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 1, 10)
+		bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, page_address(frag->page.p) + frag->page_offset,
+						      tx_desc->dataSize);
+#else
+		bufPhysAddr = mvOsCacheFlush(pp->dev->dev.parent, page_address(frag->page) + frag->page_offset,
+						      tx_desc->dataSize);
+#endif
+
+		tx_desc->pktOffset = bufPhysAddr & MV_ETH_TX_DESC_ALIGN;
+		tx_desc->bufPhysAddr = bufPhysAddr & (~MV_ETH_TX_DESC_ALIGN);
+
+		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+			/* Last descriptor */
+			if (tx_spec->flags & MV_ETH_TX_F_NO_PAD)
+				tx_desc->command = (PP2_TX_L_DESC_MASK | PP2_TX_PADDING_DISABLE_MASK);
+			else
+				tx_desc->command = PP2_TX_L_DESC_MASK;
+
+			mv_pp2_shadow_push(&txq_ctrl->txq_cpu[cpu], ((MV_ULONG) skb | MV_ETH_SHADOW_SKB));
+		} else {
+			/* Descriptor in the middle: Not First, Not Last */
+			tx_desc->command = 0;
+
+			mv_pp2_shadow_push(&txq_ctrl->txq_cpu[cpu], 0);
+		}
+
+		mv_pp2_tx_desc_flush(pp, tx_desc);
+	}
+}
+
+
+/* Free "num" buffers from the pool */
+static int mv_pp2_pool_free(int pool, int num)
+{
+	int i = 0, buf_size, total_size;
+	u32 pa;
+	struct bm_pool *ppool = &mv_pp2_pool[pool];
+	bool free_all = false;
+
+	if (num >= ppool->buf_num) {
+		/* Free all buffers from the pool */
+		free_all = true;
+		num = ppool->buf_num;
+	}
+
+	if (MV_ETH_BM_POOL_IS_HWF(ppool->type)) {
+		buf_size = RX_HWF_BUF_SIZE(ppool->pkt_size);
+		total_size = RX_HWF_TOTAL_SIZE(buf_size);
+	} else {
+		buf_size = RX_BUF_SIZE(ppool->pkt_size);
+		total_size = RX_TOTAL_SIZE(buf_size);
+	}
+
+	while (i < num) {
+		MV_U32 *va;
+		va = (MV_U32 *)mvBmPoolGet(pool, &pa);
+		if (va == 0)
+			break;
+
+		/*pr_info("%4d: phys_addr=0x%x, virt_addr=%p\n", i, pa, va);*/
+
+		if (!MV_ETH_BM_POOL_IS_HWF(ppool->type)) {
+			mv_pp2_skb_free((struct sk_buff *)va);
+		} else { /* HWF pool */
+			mvOsFree((char *)va);
+		}
+		i++;
+	}
+	pr_info("bm pool #%d: pkt_size=%4d, buf_size=%4d, total buf_size=%4d - %d of %d buffers free\n",
+			pool, ppool->pkt_size, buf_size, total_size, i, num);
+
+	ppool->buf_num -= num;
+
+	/* Update BM driver with number of buffers removed from pool */
+	mvBmPoolBufNumUpdate(pool, num, 0);
+
+	return i;
+}
+
+
+static int mv_pp2_pool_destroy(int pool)
+{
+	int num, status = 0;
+	struct bm_pool *ppool = &mv_pp2_pool[pool];
+
+	num = mv_pp2_pool_free(pool, ppool->buf_num);
+	if (num != ppool->buf_num) {
+		printk(KERN_ERR "Warning: could not free all buffers in pool %d while destroying pool\n", pool);
+		return MV_ERROR;
+	}
+
+	mvBmPoolControl(pool, MV_STOP);
+
+	/* Note: we don't free the bm_pool here ! */
+	if (ppool->bm_pool)
+		mvOsIoUncachedFree(global_dev->parent,
+				   sizeof(MV_U32) * ppool->capacity,
+				   ppool->physAddr,
+				   ppool->bm_pool,
+				   0);
+
+	memset(ppool, 0, sizeof(struct bm_pool));
+
+	return status;
+}
+
+static int mv_pp2_pool_add(struct eth_port *pp, int pool, int buf_num)
+{
+	struct bm_pool *bm_pool;
+	struct sk_buff *skb;
+	unsigned char *hwf_buff;
+	int i, buf_size, total_size;
+	__u32 bm = 0;
+	phys_addr_t phys_addr;
+
+	if (mvPp2MaxCheck(pool, MV_ETH_BM_POOLS, "bm_pool"))
+		return 0;
+
+	bm_pool = &mv_pp2_pool[pool];
+
+	if (MV_ETH_BM_POOL_IS_HWF(bm_pool->type)) {
+		buf_size = RX_HWF_BUF_SIZE(bm_pool->pkt_size);
+		total_size = RX_HWF_TOTAL_SIZE(buf_size);
+	} else {
+		buf_size = RX_BUF_SIZE(bm_pool->pkt_size);
+		total_size = RX_TOTAL_SIZE(buf_size);
+	}
+
+	/* Check buffer size */
+	if (bm_pool->pkt_size == 0) {
+		printk(KERN_ERR "%s: invalid pool #%d state: pkt_size=%d, buf_size=%d, buf_num=%d\n",
+		       __func__, pool, bm_pool->pkt_size, RX_BUF_SIZE(bm_pool->pkt_size), bm_pool->buf_num);
+		return 0;
+	}
+
+	/* Insure buf_num is smaller than capacity */
+	if ((buf_num < 0) || ((buf_num + bm_pool->buf_num) > (bm_pool->capacity))) {
+		printk(KERN_ERR "%s: can't add %d buffers into bm_pool=%d: capacity=%d, buf_num=%d\n",
+		       __func__, buf_num, pool, bm_pool->capacity, bm_pool->buf_num);
+		return 0;
+	}
+
+	bm = mv_pp2_bm_cookie_pool_set(bm, pool);
+	for (i = 0; i < buf_num; i++) {
+		if (!MV_ETH_BM_POOL_IS_HWF(bm_pool->type)) {
+			/* Allocate skb for pool used for SWF */
+			skb = mv_pp2_skb_alloc(pp, bm_pool, &phys_addr, GFP_KERNEL);
+			if (!skb)
+				break;
+
+			mv_pp2_pool_refill(bm_pool, bm, phys_addr, (unsigned long) skb);
+		} else {
+			/* Allocate pkt + buffer for pool used for HWF */
+			hwf_buff = mv_pp2_hwf_buff_alloc(bm_pool, &phys_addr);
+			if (!hwf_buff)
+				break;
+
+			memset(hwf_buff, 0, buf_size);
+			mv_pp2_pool_refill(bm_pool, bm, phys_addr, (MV_ULONG) hwf_buff);
+		}
+	}
+
+	bm_pool->buf_num += i;
+	bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+	/* Update BM driver with number of buffers added to pool */
+	mvBmPoolBufNumUpdate(pool, i, 1);
+
+	pr_info("%s %s pool #%d: pkt_size=%4d, buf_size=%4d, total_size=%4d - %d of %d buffers added\n",
+		MV_ETH_BM_POOL_IS_HWF(bm_pool->type) ? "HWF" : "SWF",
+		MV_ETH_BM_POOL_IS_SHORT(bm_pool->type) ? "short" : " long",
+		pool, bm_pool->pkt_size, buf_size, total_size, i, buf_num);
+
+	return i;
+}
+
+void	*mv_pp2_bm_pool_create(int pool, int capacity, MV_ULONG *pPhysAddr)
+{
+	MV_ULONG physAddr;
+	void *pVirt;
+	MV_STATUS status;
+	int size = 2 * sizeof(MV_U32) * capacity;
+
+	pVirt = mvOsIoUncachedMalloc(NULL, size, &physAddr, NULL);
+	if (pVirt == NULL) {
+		mvOsPrintf("%s: Can't allocate %d bytes for pool #%d\n",
+				__func__, size, pool);
+		return NULL;
+	}
+
+	/* Pool address must be MV_BM_POOL_PTR_ALIGN bytes aligned */
+	if (MV_IS_NOT_ALIGN((unsigned)pVirt, MV_BM_POOL_PTR_ALIGN)) {
+		mvOsPrintf("memory allocated for BM pool #%d is not %d bytes aligned\n",
+					pool, MV_BM_POOL_PTR_ALIGN);
+		mvOsIoCachedFree(NULL, size, physAddr, pVirt, 0);
+		return NULL;
+	}
+	status = mvBmPoolInit(pool, pVirt, physAddr, capacity);
+	if (status != MV_OK) {
+		mvOsPrintf("%s: Can't init #%d BM pool. status=%d\n", __func__, pool, status);
+		mvOsIoCachedFree(NULL, size, physAddr, pVirt, 0);
+		return NULL;
+	}
+
+	mvBmPoolControl(pool, MV_START);
+
+	if (pPhysAddr != NULL)
+		*pPhysAddr = physAddr;
+
+	return pVirt;
+}
+
+static MV_STATUS mv_pp2_pool_create(int pool, int capacity)
+{
+	struct bm_pool *bm_pool;
+	MV_ULONG    physAddr;
+
+	if ((pool < 0) || (pool >= MV_ETH_BM_POOLS)) {
+		printk(KERN_ERR "%s: pool=%d is out of range\n", __func__, pool);
+		return MV_BAD_VALUE;
+	}
+
+	bm_pool = &mv_pp2_pool[pool];
+	memset(bm_pool, 0, sizeof(struct bm_pool));
+
+	bm_pool->bm_pool = mv_pp2_bm_pool_create(pool, capacity, &physAddr);
+	if (bm_pool->bm_pool == NULL)
+		return MV_FAIL;
+
+	bm_pool->pool = pool;
+	bm_pool->type = MV_ETH_BM_FREE;
+	bm_pool->capacity = capacity;
+	bm_pool->pkt_size = 0;
+	bm_pool->buf_num = 0;
+	atomic_set(&bm_pool->in_use, 0);
+
+	spin_lock_init(&bm_pool->lock);
+
+	return MV_OK;
+}
+
+/* mv_pp2_pool_use:							*
+ *	- notify the driver that BM pool is being used as specific type	*
+ *	- Allocate / Free buffers if necessary				*
+ *	- Returns the used pool pointer in case of success		*
+ *	- Parameters:							*
+ *		- pool: BM pool that is being used			*
+ *		- type: type of usage (SWF/HWF/MIXED long/short)	*
+ *		- pkt_size: number of bytes per packet			*/
+static struct bm_pool *mv_pp2_pool_use(struct eth_port *pp, int pool, enum mv_pp2_bm_type type, int pkt_size)
+{
+	unsigned long flags = 0;
+	struct bm_pool *new_pool;
+	int num;
+
+	new_pool = &mv_pp2_pool[pool];
+
+	if ((MV_ETH_BM_POOL_IS_SHORT(new_pool->type) && MV_ETH_BM_POOL_IS_LONG(type))
+		|| (MV_ETH_BM_POOL_IS_SHORT(type) && MV_ETH_BM_POOL_IS_LONG(new_pool->type))) {
+		pr_err("%s FAILED: BM pool can't be used as short and long at the same time\n", __func__);
+		return NULL;
+	}
+
+	MV_ETH_LOCK(&new_pool->lock, flags);
+
+	if (new_pool->type == MV_ETH_BM_FREE)
+		new_pool->type = type;
+	else if (MV_ETH_BM_POOL_IS_SWF(new_pool->type) && MV_ETH_BM_POOL_IS_HWF(type))
+		new_pool->type = MV_ETH_BM_POOL_IS_LONG(type) ? MV_ETH_BM_MIXED_LONG : MV_ETH_BM_MIXED_SHORT;
+
+	/* Check if buffer allocation is needed, there are 3 cases:			*
+	 *	1. BM pool was used only by HWF, and will be used by SWF as well	*
+	 *	2. BM pool is used as long pool, but packet size doesn't match MTU	*
+	 *	3. BM pool hasn't being used yet					*/
+	if ((MV_ETH_BM_POOL_IS_HWF(new_pool->type) && MV_ETH_BM_POOL_IS_SWF(type))
+		|| (MV_ETH_BM_POOL_IS_LONG(type) && (pkt_size > new_pool->pkt_size))
+		|| (new_pool->pkt_size == 0)) {
+		int port, pkts_num;
+
+		/* If there are ports using this pool, they must be stopped before allocation */
+		port = mv_pp2_port_up_get(new_pool->port_map);
+		if (port != -1) {
+			pr_err("%s: port %d use pool %d and must be stopped before buffer re-allocation\n",
+				__func__, port, new_pool->pool);
+			MV_ETH_UNLOCK(&new_pool->lock, flags);
+			return NULL;
+		}
+
+		/* if pool is empty, then set default buffers number		*
+		 * if pool is not empty, then we must free all the buffers	*/
+		pkts_num = new_pool->buf_num;
+		if (pkts_num == 0)
+			pkts_num = (MV_ETH_BM_POOL_IS_LONG(type)) ?
+				CONFIG_MV_PP2_BM_LONG_BUF_NUM : CONFIG_MV_PP2_BM_SHORT_BUF_NUM;
+		else
+			mv_pp2_pool_free(new_pool->pool, pkts_num);
+
+		/* Check if pool has moved to SWF and HWF shared mode */
+		if ((MV_ETH_BM_POOL_IS_HWF(new_pool->type) && !MV_ETH_BM_POOL_IS_HWF(type))
+			|| (MV_ETH_BM_POOL_IS_HWF(type) && !MV_ETH_BM_POOL_IS_HWF(new_pool->type)))
+			new_pool->type = MV_ETH_BM_POOL_IS_LONG(type) ? MV_ETH_BM_MIXED_LONG : MV_ETH_BM_MIXED_SHORT;
+
+		/* Update packet size (in case of MTU larger than current ot new pool) */
+		if ((new_pool->pkt_size == 0)
+			|| (MV_ETH_BM_POOL_IS_LONG(type) && (pkt_size > new_pool->pkt_size)))
+			new_pool->pkt_size = pkt_size;
+
+		/* Allocate buffers for this pool */
+		num = mv_pp2_pool_add(pp, new_pool->pool, pkts_num);
+		if (num != pkts_num) {
+			pr_err("%s FAILED: pool=%d, pkt_size=%d, only %d of %d allocated\n",
+				__func__, new_pool->pool, new_pool->pkt_size, num, pkts_num);
+			MV_ETH_UNLOCK(&new_pool->lock, flags);
+			return NULL;
+		}
+
+	}
+
+	if (MV_ETH_BM_POOL_IS_HWF(new_pool->type))
+		mvPp2BmPoolBufSizeSet(new_pool->pool, RX_HWF_BUF_SIZE(new_pool->pkt_size));
+	else
+		mvPp2BmPoolBufSizeSet(new_pool->pool, RX_BUF_SIZE(new_pool->pkt_size));
+
+	MV_ETH_UNLOCK(&new_pool->lock, flags);
+
+	return new_pool;
+}
+
+/* Interrupt handling */
+irqreturn_t mv_pp2_isr(int irq, void *dev_id)
+{
+	struct eth_port *pp = (struct eth_port *)dev_id;
+	int cpu = smp_processor_id();
+	struct napi_group_ctrl *napi_group = pp->cpu_config[cpu]->napi_group;
+	struct napi_struct *napi = napi_group->napi;
+	u32 imr;
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_ISR) {
+		pr_info("%s: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
+			__func__, pp->port, cpu,
+			mvPp2RdReg(MV_PP2_ISR_RX_TX_MASK_REG(MV_PPV2_PORT_PHYS(pp->port))),
+			mvPp2GbeIsrCauseRxTxGet(pp->port));
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	STAT_INFO(pp->stats.irq[cpu]++);
+
+	/* Mask all interrupts for cpus in this group */
+	mvPp2GbeCpuInterruptsDisable(pp->port, napi_group->cpu_mask);
+
+	/* Verify that the device not already on the polling list */
+	if (napi_schedule_prep(napi)) {
+		/* schedule the work (rx+txdone+link) out of interrupt contxet */
+		__napi_schedule(napi);
+	} else {
+		STAT_INFO(pp->stats.irq_err[cpu]++);
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		pr_warning("%s: IRQ=%d, port=%d, cpu=%d, cpu_mask=0x%x - NAPI already scheduled\n",
+			__func__, irq, pp->port, cpu, napi_group->cpu_mask);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+	}
+
+	/*
+	 * Ensure mask register write is completed by issuing a read.
+	 * dsb() instruction cannot be used on registers since they are in
+	 * MBUS domain
+	 */
+	imr = mvPp2RdReg(MV_PP2_ISR_ENABLE_REG(pp->port));
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t mv_pp2_link_isr(int irq, void *dev_id)
+{
+	mvGmacIsrSummaryMask();
+
+	tasklet_schedule(&link_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM
+/* wol_isr_register, guarantee the wol irq register once */
+static int wol_isr_register;
+
+irqreturn_t mv_wol_isr(int irq, void *dev_id)
+{
+	mvPp2WolWakeup();
+	machine_restart(NULL);
+
+	return IRQ_HANDLED;
+}
+#endif
+
+void mv_pp2_link_tasklet(unsigned long data)
+{
+	int port;
+	MV_U32 regVal, regVal1;
+	struct eth_port *pp;
+
+	regVal = mvGmacIsrSummaryCauseGet();
+
+	/* check only relevant interrupts - ports0 and 1 */
+	regVal &= (ETH_ISR_SUM_PORT0_MASK | ETH_ISR_SUM_PORT1_MASK);
+
+	for (port = 0; port < mv_pp2_ports_num; port++) {
+		/* check if interrupt was caused by this port */
+		if (!(ETH_ISR_SUM_PORT_MASK(port) & regVal))
+			continue;
+
+		regVal1 = mvGmacPortIsrCauseGet(port);
+
+		/* check for link change interrupt */
+		if (!(regVal1 & ETH_PORT_LINK_CHANGE_MASK)) {
+			mvGmacPortIsrUnmask(port);
+			continue;
+		}
+
+		pp = mv_pp2_port_by_id(port);
+		if (pp)
+			mv_pp2_link_event(pp, 1);
+	}
+
+	mvGmacIsrSummaryUnmask();
+}
+
+static bool mv_pp2_link_status(struct eth_port *pp)
+{
+#ifdef CONFIG_MV_INCLUDE_PON
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		return mv_pon_link_status(NULL);
+	else
+#endif /* CONFIG_MV_PON */
+		return mvGmacPortIsLinkUp(pp->port);
+}
+
+void mv_pp2_link_event(struct eth_port *pp, int print)
+{
+	struct net_device *dev = pp->dev;
+	bool              link_is_up = false;
+
+	STAT_INFO(pp->stats.link++);
+
+	/* Check Link status on ethernet port */
+	link_is_up = mv_pp2_link_status(pp);
+
+	if (link_is_up) {
+		/* Link Up event */
+		mvPp2PortEgressEnable(pp->port, MV_TRUE);
+		set_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+
+		if (mv_pp2_ctrl_is_tx_enabled(pp)) {
+			if (dev) {
+				netif_carrier_on(dev);
+				netif_tx_wake_all_queues(dev);
+			}
+		}
+		mvPp2PortIngressEnable(pp->port, MV_TRUE);
+	} else {
+		/* Link Down event */
+		/*
+		 * Do not allow disabling PON port since need to filter
+		 * some OAM packet from broadcast LLID even the port is down.
+		 */
+		if (!(MV_PP2_IS_PON_PORT(pp->port)))
+			mvPp2PortIngressEnable(pp->port, MV_FALSE);
+
+		if (dev) {
+			netif_carrier_off(dev);
+			netif_tx_stop_all_queues(dev);
+		}
+		mvPp2PortEgressEnable(pp->port, MV_FALSE);
+		clear_bit(MV_ETH_F_LINK_UP_BIT, &(pp->flags));
+	}
+
+	if (print) {
+		if (dev)
+			printk(KERN_ERR "%s: ", dev->name);
+		else
+			printk(KERN_ERR "%s: ", "none");
+
+		mv_pp2_eth_link_status_print(pp->port);
+	}
+}
+
+/***********************************************************************************************/
+int mv_pp2_poll(struct napi_struct *napi, int budget)
+{
+	int rx_done = 0;
+	struct napi_group_ctrl *napi_group;
+	MV_U32 causeRxTx;
+	struct eth_port *pp = MV_ETH_PRIV(napi->dev);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_POLL) {
+		printk(KERN_ERR "%s ENTER: port=%d, cpu=%d, mask=0x%x, cause=0x%x\n",
+			__func__, pp->port, smp_processor_id(),
+			mvPp2RdReg(MV_PP2_ISR_RX_TX_MASK_REG(MV_PPV2_PORT_PHYS(pp->port))),
+			mvPp2GbeIsrCauseRxTxGet(pp->port));
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX)
+			printk(KERN_ERR "%s: STARTED_BIT = 0, poll completed.\n", __func__);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		napi_complete(napi);
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+		return rx_done;
+	}
+
+	STAT_INFO(pp->stats.poll[smp_processor_id()]++);
+
+	/* Read cause register */
+	causeRxTx = mvPp2GbeIsrCauseRxTxGet(pp->port);
+
+	if (causeRxTx & MV_PP2_CAUSE_MISC_SUM_MASK) {
+		if (causeRxTx & MV_PP2_CAUSE_FCS_ERR_MASK)
+			printk(KERN_ERR "%s: FCS error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+			printk(KERN_ERR "%s: RX fifo overrun error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+			printk(KERN_ERR "%s: TX fifo underrun error\n", __func__);
+
+		if (causeRxTx & MV_PP2_CAUSE_MISC_SUM_MASK) {
+			printk(KERN_ERR "%s: misc event\n", __func__);
+			mvPp2WrReg(MV_PP2_ISR_MISC_CAUSE_REG, 0);
+		}
+
+		causeRxTx &= ~MV_PP2_CAUSE_MISC_SUM_MASK;
+		mvPp2WrReg(MV_PP2_ISR_RX_TX_CAUSE_REG(MV_PPV2_PORT_PHYS(pp->port)), causeRxTx);
+	}
+	napi_group = pp->cpu_config[smp_processor_id()]->napi_group;
+	causeRxTx |= napi_group->cause_rx_tx;
+
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+	if (mvPp2GbeIsrCauseTxDoneIsSet(pp->port, causeRxTx)) {
+		int tx_todo = 0, cause_tx_done;
+		/* TX_DONE process */
+		cause_tx_done = mvPp2GbeIsrCauseTxDoneOffset(pp->port, causeRxTx);
+		if (MV_PP2_IS_PON_PORT(pp->port))
+			mv_pp2_tx_done_pon(pp, &tx_todo);
+		else
+			mv_pp2_tx_done_gbe(pp, cause_tx_done, &tx_todo);
+	}
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		causeRxTx &= ~MV_PP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK;
+	else
+		causeRxTx &= ~MV_PP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+
+	while ((causeRxTx != 0) && (budget > 0)) {
+		int count, rx_queue;
+
+		rx_queue = mv_pp2_rx_policy(causeRxTx);
+		if (rx_queue == -1)
+			break;
+
+		count = mv_pp2_rx(pp, budget, rx_queue, napi);
+		rx_done += count;
+		budget -= count;
+		if (budget > 0)
+			causeRxTx &= ~((1 << rx_queue) << MV_PP2_CAUSE_RXQ_OCCUP_DESC_OFFS);
+	}
+
+	/* Maintain RX packets rate if adaptive RX coalescing is enabled */
+	if (pp->rx_adaptive_coal_cfg)
+		pp->rx_rate_pkts += rx_done;
+
+	STAT_DIST((rx_done < pp->dist_stats.rx_dist_size) ? pp->dist_stats.rx_dist[rx_done]++ : 0);
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+	if (pp->dbg_flags & MV_ETH_F_DBG_POLL) {
+		printk(KERN_ERR "%s  EXIT: port=%d, cpu=%d, budget=%d, rx_done=%d\n",
+			__func__, pp->port, smp_processor_id(), budget, rx_done);
+	}
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+	if (budget > 0) {
+		causeRxTx = 0;
+
+		napi_complete(napi);
+
+		STAT_INFO(pp->stats.poll_exit[smp_processor_id()]++);
+
+		/* adapt RX coalescing according to packets rate */
+		if (pp->rx_adaptive_coal_cfg)
+			mv_pp2_adaptive_rx_update(pp);
+
+		/* Enable interrupts for all cpus belong to this group */
+		if (!(pp->flags & MV_ETH_F_IFCAP_NETMAP)) {
+			wmb();
+			mvPp2GbeCpuInterruptsEnable(pp->port, napi_group->cpu_mask);
+		}
+	}
+	napi_group->cause_rx_tx = causeRxTx;
+
+	return rx_done;
+}
+
+void mv_pp2_port_filtering_cleanup(int port)
+{
+	static bool is_first = true;
+
+	/* clean TCAM and SRAM only one, no need to do this per port. */
+	if (is_first) {
+		mvPp2PrsHwClearAll();
+		mvPp2PrsHwInvAll();
+		is_first = false;
+	}
+}
+
+
+static MV_STATUS mv_pp2_bm_pools_init(void)
+{
+	int i, j;
+	MV_STATUS status;
+
+	/* Create all pools with maximum capacity */
+	for (i = 0; i < MV_ETH_BM_POOLS; i++) {
+		status = mv_pp2_pool_create(i, MV_BM_POOL_CAP_MAX);
+		if (status != MV_OK) {
+			printk(KERN_ERR "%s: can't create bm_pool=%d - capacity=%d\n", __func__, i, MV_BM_POOL_CAP_MAX);
+			for (j = 0; j < i; j++)
+				mv_pp2_pool_destroy(j);
+			return status;
+		}
+
+		mv_pp2_pool[i].pkt_size = 0;
+		mv_pp2_pool[i].type = MV_ETH_BM_FREE;
+
+		mvPp2BmPoolBufSizeSet(i, 0);
+	}
+	return 0;
+}
+
+int mv_pp2_swf_bm_pool_init(struct eth_port *pp, int mtu)
+{
+	unsigned long flags = 0;
+	int rxq, pkt_size = RX_PKT_SIZE(mtu);
+
+	if (pp->pool_long == NULL) {
+		pp->pool_long = mv_pp2_pool_use(pp, MV_ETH_BM_SWF_LONG_POOL(pp->port),
+							MV_ETH_BM_SWF_LONG, pkt_size);
+		if (pp->pool_long == NULL)
+			return -1;
+
+		MV_ETH_LOCK(&pp->pool_long->lock, flags);
+		pp->pool_long->port_map |= (1 << pp->port);
+		MV_ETH_UNLOCK(&pp->pool_long->lock, flags);
+
+		for (rxq = 0; rxq < pp->rxq_num; rxq++)
+			mvPp2RxqBmLongPoolSet(pp->port, rxq, pp->pool_long->pool);
+	}
+
+	if (pp->pool_short == NULL) {
+		pp->pool_short = mv_pp2_pool_use(pp, MV_ETH_BM_SWF_SHORT_POOL(pp->port),
+							MV_ETH_BM_SWF_SHORT, MV_ETH_BM_SHORT_PKT_SIZE);
+		if (pp->pool_short == NULL)
+			return -1;
+
+		MV_ETH_LOCK(&pp->pool_short->lock, flags);
+		pp->pool_short->port_map |= (1 << pp->port);
+		MV_ETH_UNLOCK(&pp->pool_short->lock, flags);
+
+		for (rxq = 0; rxq < pp->rxq_num; rxq++)
+			mvPp2RxqBmShortPoolSet(pp->port, rxq, pp->pool_short->pool);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_MV_PP2_HWF
+int mv_pp2_hwf_bm_pool_init(struct eth_port *pp, int mtu)
+{
+	unsigned long flags = 0;
+	int pkt_size = RX_PKT_SIZE(mtu);
+
+	if (pp->hwf_pool_long == NULL) {
+		pp->hwf_pool_long = mv_pp2_pool_use(pp, MV_ETH_BM_HWF_LONG_POOL(pp->port),
+							MV_ETH_BM_HWF_LONG, pkt_size);
+		if (pp->hwf_pool_long == NULL)
+			return -1;
+
+		MV_ETH_LOCK(&pp->hwf_pool_long->lock, flags);
+		pp->hwf_pool_long->port_map |= (1 << pp->port);
+		MV_ETH_UNLOCK(&pp->hwf_pool_long->lock, flags);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+		mv_pp2_hwf_long_pool_attach(pp->port, pp->hwf_pool_long->pool);
+#endif
+	}
+
+	if (pp->hwf_pool_short == NULL) {
+		pp->hwf_pool_short = mv_pp2_pool_use(pp, MV_ETH_BM_HWF_SHORT_POOL(pp->port),
+							MV_ETH_BM_HWF_SHORT, MV_ETH_BM_SHORT_HWF_PKT_SIZE);
+		if (pp->hwf_pool_short == NULL)
+			return -1;
+
+		MV_ETH_LOCK(&pp->hwf_pool_short->lock, flags);
+		pp->hwf_pool_short->port_map |= (1 << pp->port);
+		MV_ETH_UNLOCK(&pp->hwf_pool_short->lock, flags);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+		mv_pp2_hwf_short_pool_attach(pp->port, pp->hwf_pool_short->pool);
+#endif
+	}
+
+#ifndef CONFIG_MV_ETH_PP2_1
+	mvPp2PortHwfBmPoolSet(pp->port, pp->hwf_pool_short->pool, pp->hwf_pool_long->pool);
+#endif
+
+	return 0;
+}
+#endif /* CONFIG_MV_PP2_HWF */
+
+static int mv_pp2_port_link_speed_fc(int port, MV_ETH_PORT_SPEED port_speed, int en_force)
+{
+	if (en_force) {
+		if (mvGmacSpeedDuplexSet(port, port_speed, MV_ETH_DUPLEX_FULL)) {
+			printk(KERN_ERR "SpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvGmacFlowCtrlSet(port, MV_ETH_FC_ENABLE)) {
+			printk(KERN_ERR "FlowCtrlSet failed\n");
+			return -EIO;
+		}
+		if (mvGmacForceLinkModeSet(port, 1, 0)) {
+			printk(KERN_ERR "ForceLinkModeSet failed\n");
+			return -EIO;
+		}
+	} else {
+		if (mvGmacForceLinkModeSet(port, 0, 0)) {
+			printk(KERN_ERR "ForceLinkModeSet failed\n");
+			return -EIO;
+		}
+		if (mvGmacSpeedDuplexSet(port, MV_ETH_SPEED_AN, MV_ETH_DUPLEX_AN)) {
+			printk(KERN_ERR "SpeedDuplexSet failed\n");
+			return -EIO;
+		}
+		if (mvGmacFlowCtrlSet(port, MV_ETH_FC_AN_SYM)) {
+			printk(KERN_ERR "FlowCtrlSet failed\n");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int mv_pp2_load_network_interfaces(struct platform_device *pdev)
+{
+	u32 port;
+	struct eth_port *pp;
+	struct net_device *dev;
+	int mtu, err, phys_port, speed, force_link = 0;
+	struct mv_pp2_pdata *plat_data = (struct mv_pp2_pdata *)pdev->dev.platform_data;
+	u8 mac[MV_MAC_ADDR_SIZE];
+
+	port = pdev->id;
+	phys_port = MV_PPV2_PORT_PHYS(port);
+	pr_info("  o Loading network interface(s) for port #%d: cpu_mask=0x%x, mtu=%d\n",
+			port, plat_data->cpu_mask, plat_data->mtu);
+
+	mtu = mv_pp2_config_get(pdev, mac);
+
+	dev = mv_pp2_netdev_init(pdev);
+
+	if (dev == NULL) {
+		pr_err("\to %s: can't create netdevice\n", __func__);
+		return -EIO;
+	}
+
+	pp = (struct eth_port *)netdev_priv(dev);
+	pp->plat_data = plat_data;
+
+	mv_pp2_ports[port] = pp;
+
+	err = mv_pp2_priv_init(pp, port);
+	if (err) {
+		mv_pp2_priv_cleanup(pp);
+		return err;
+	}
+
+	if (plat_data->flags & MV_PP2_PDATA_F_LINUX_CONNECT) {
+		pr_info("\to Port %d is connected to Linux netdevice\n", port);
+		set_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
+	} else {
+		pr_info("\to Port %d is disconnected from Linux netdevice\n", pp->port);
+		clear_bit(MV_ETH_F_CONNECT_LINUX_BIT, &(pp->flags));
+	}
+
+
+	pp->cpuMask = plat_data->cpu_mask;
+
+	switch (plat_data->speed) {
+	case SPEED_10:
+		speed = MV_ETH_SPEED_10;
+		force_link = 1;
+		break;
+	case SPEED_100:
+		speed = MV_ETH_SPEED_100;
+		force_link = 1;
+		break;
+	case SPEED_1000:
+		speed = MV_ETH_SPEED_1000;
+		force_link = 1;
+		break;
+	case 0:
+		speed = MV_ETH_SPEED_AN;
+		force_link = 0;
+		break;
+	default:
+		pr_err("\to gbe #%d: unknown speed = %d\n", pp->port, plat_data->speed);
+		return -EIO;
+	}
+
+	/* set port's speed, duplex, fc */
+	if (!MV_PP2_IS_PON_PORT(pp->port)) {
+		/* force link, speed and duplex if necessary based on board information */
+		err = mv_pp2_port_link_speed_fc(pp->port, speed, force_link);
+		if (err) {
+			mv_pp2_priv_cleanup(pp);
+			return err;
+		}
+	}
+
+	pr_info("\to %s p=%d: phy=%d,  mtu=%d, mac="MV_MACQUAD_FMT", speed=%s %s\n",
+		MV_PP2_IS_PON_PORT(port) ? "pon" : "giga", port, plat_data->phy_addr, mtu,
+		MV_MACQUAD(mac), mvGmacSpeedStrGet(speed), force_link ? "(force)" : "(platform)");
+
+	if (mv_pp2_hal_init(pp)) {
+		pr_err("\to %s: can't init eth hal\n", __func__);
+		mv_pp2_priv_cleanup(pp);
+		return -EIO;
+	}
+
+	if (mv_pp2_netdev_connect(pp) < 0) {
+		pr_err("\to %s: can't connect to linux\n", __func__);
+		mv_pp2_priv_cleanup(pp);
+		return -EIO;
+	}
+
+	/* Default NAPI initialization */
+	/* Create one group for this port, that contains all RXQs and all CPUs - every cpu can process all RXQs */
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX) {
+		if (mv_pp2_port_napi_group_create(pp->port, 0))
+			return -EIO;
+		if (mv_pp2_napi_set_cpu_affinity(pp->port, 0, (1 << nr_cpu_ids) - 1) ||
+				mv_pp2_eth_napi_set_rxq_affinity(pp->port, 0, (1 << MV_PP2_MAX_RXQ) - 1))
+			return -EIO;
+	}
+
+	if (mv_pp2_pnc_ctrl_en) {
+#ifndef CONFIG_MV_ETH_PP2_1
+		mv_pp2_tx_mtu_set(port, mtu);
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+#ifndef CONFIG_MV_ETH_PP2_1
+		mvPp2ClsHwOversizeRxqSet(MV_PPV2_PORT_PHYS(pp->port), pp->first_rxq);
+#else
+		mvPp2ClsHwOversizeRxqLowSet(MV_PPV2_PORT_PHYS(pp->port),
+			(pp->first_rxq) & MV_PP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+		mvPp2ClsHwRxQueueHighSet(MV_PPV2_PORT_PHYS(pp->port),
+			1,
+			(pp->first_rxq) >> MV_PP2_CLS_OVERSIZE_RXQ_LOW_BITS);
+#endif
+
+		/* classifier port default config */
+		mvPp2ClsHwPortDefConfig(phys_port, 0, FLOWID_DEF(phys_port), pp->first_rxq);
+	}
+
+#ifdef CONFIG_NETMAP
+	mv_pp2_netmap_attach(pp);
+#endif /* CONFIG_NETMAP */
+
+	/* Call mv_pp2_open specifically for ports not connected to Linux netdevice */
+	if (!(pp->flags & MV_ETH_F_CONNECT_LINUX))
+		mv_pp2_eth_open(pp->dev);
+
+	mux_eth_ops.set_tag_type = mv_pp2_tag_type_set;
+	mv_mux_eth_attach(pp->port, pp->dev, &mux_eth_ops);
+
+	pr_info("\n");
+
+	return 0;
+}
+
+
+
+int mv_pp2_resume_network_interfaces(struct eth_port *pp)
+{
+/* TBD */
+	return 0;
+}
+
+/***********************************************************
+ * mv_pp2_port_resume                                      *
+ ***********************************************************/
+
+int mv_pp2_port_resume(int port)
+{
+	struct eth_port *pp;
+
+	pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: pp == NULL, port=%d\n", __func__, port);
+		return  MV_ERROR;
+	}
+
+	if (!(pp->flags & MV_ETH_F_SUSPEND)) {
+		pr_err("%s: port %d is not suspend.\n", __func__, port);
+		return MV_ERROR;
+	}
+	if (pp->pm_mode == MV_ETH_PM_WOL) {
+		mv_pp2_start_internals(pp, pp->dev->mtu);
+		mvGmacPortIsrUnmask(port);
+		mvGmacPortSumIsrUnmask(port);
+	}
+
+	clear_bit(MV_ETH_F_SUSPEND_BIT, &(pp->flags));
+	set_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+
+	pr_info("Exit suspend mode on port #%d\n", port);
+
+	return MV_OK;
+}
+
+void    mv_pp2_hal_shared_init(struct mv_pp2_pdata *plat_data)
+{
+	MV_PP2_HAL_DATA halData;
+
+	memset(&halData, 0, sizeof(halData));
+
+	halData.maxPort = plat_data->max_port;
+	halData.tClk = plat_data->tclk;
+	halData.maxCPUs = nr_cpu_ids;
+	halData.iocc = arch_is_coherent();
+	halData.ctrlModel = plat_data->ctrl_model;
+	halData.ctrlRev = plat_data->ctrl_rev;
+	halData.aggrTxqSize = CONFIG_MV_PP2_AGGR_TXQ_SIZE;
+
+#ifdef CONFIG_MV_INCLUDE_PON
+	halData.maxTcont = CONFIG_MV_PON_TCONTS;
+#endif
+
+	mvPp2HalInit(&halData);
+
+	return;
+}
+
+
+/***********************************************************
+ * mv_pp2_win_init --                                      *
+ *   Win initilization                                     *
+ ***********************************************************/
+void mv_pp2_win_init(void)
+{
+	const struct mbus_dram_target_info *dram;
+	int i;
+	u32 enable;
+
+	/* First disable all address decode windows */
+	enable = 0;
+	mvPp2WrReg(ETH_BASE_ADDR_ENABLE_REG, enable);
+
+	dram = mv_mbus_dram_info();
+	if (!dram) {
+		pr_err("%s: No DRAM information\n", __func__);
+		return;
+	}
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+		u32 baseReg, base = cs->base;
+		u32 sizeReg, size = cs->size;
+		u32 alignment;
+		u8 attr = cs->mbus_attr;
+		u8 target = dram->mbus_dram_target_id;
+
+		/* check if address is aligned to the size */
+		if (MV_IS_NOT_ALIGN(base, size)) {
+			pr_err("%s: Error setting window for cs #%d.\n"
+			   "Address 0x%08x is not aligned to size 0x%x.\n",
+			   __func__, i, base, size);
+			return;
+		}
+
+		if (!MV_IS_POWER_OF_2(size)) {
+			pr_err("%s: Error setting window for cs #%d.\n"
+				"Window size %u is not a power to 2.\n",
+				__func__, i, size);
+			return;
+		}
+
+#ifdef CONFIG_MV_SUPPORT_L2_DEPOSIT
+		/* Setting DRAM windows attribute to :
+			0x3 - Shared transaction + L2 write allocate (L2 Deposit) */
+		attr &= ~(0x30);
+		attr |= 0x30;
+#endif
+
+		baseReg = (base & ETH_WIN_BASE_MASK);
+		sizeReg = mvPp2RdReg(ETH_WIN_SIZE_REG(i));
+
+		/* set size */
+		alignment = 1 << ETH_WIN_SIZE_OFFS;
+		sizeReg &= ~ETH_WIN_SIZE_MASK;
+		sizeReg |= (((size / alignment) - 1) << ETH_WIN_SIZE_OFFS);
+
+		/* set attributes */
+		baseReg &= ~ETH_WIN_ATTR_MASK;
+		baseReg |= attr << ETH_WIN_ATTR_OFFS;
+
+		/* set target ID */
+		baseReg &= ~ETH_WIN_TARGET_MASK;
+		baseReg |= target << ETH_WIN_TARGET_OFFS;
+
+		mvPp2WrReg(ETH_WIN_BASE_REG(i), baseReg);
+		mvPp2WrReg(ETH_WIN_SIZE_REG(i), sizeReg);
+
+		enable |= (1 << i);
+	}
+	/* Enable window */
+	mvPp2WrReg(ETH_BASE_ADDR_ENABLE_REG, enable);
+}
+
+
+/***********************************************************
+ * mv_pp2_eth_port_suspend                                 *
+ *   main driver initialization. loading the interfaces.   *
+ ***********************************************************/
+int mv_pp2_eth_port_suspend(int port)
+{
+	struct eth_port *pp;
+
+	pp = mv_pp2_port_by_id(port);
+	if (!pp)
+		return MV_OK;
+
+	if (pp->flags & MV_ETH_F_SUSPEND) {
+		pr_err("%s: port %d is allready suspend.\n", __func__, port);
+		return MV_ERROR;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		if (pp->pm_mode == MV_ETH_PM_WOL) {
+			/* Clean up and disable all interrupts */
+			mv_pp2_stop_internals(pp);
+			mvGmacPortIsrMask(port);
+			mvGmacPortSumIsrMask(port);
+		}
+		clear_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+	} else
+		clear_bit(MV_ETH_F_STARTED_OLD_BIT, &(pp->flags));
+
+	set_bit(MV_ETH_F_SUSPEND_BIT, &(pp->flags));
+
+	pr_info("Enter suspend mode on port #%d\n", port);
+	return MV_OK;
+}
+
+/***********************************************************
+ * mv_pp2_pm_mode_set --                                   *
+ *   set pm_mode. (power menegment mod)			   *
+ ***********************************************************/
+int	mv_pp2_pm_mode_set(int port, int mode)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: pp == NULL, port=%d\n", __func__, port);
+		return -EINVAL;
+	}
+
+	if (pp->flags & MV_ETH_F_SUSPEND) {
+		pr_err("Port %d must resumed before\n", port);
+		return -EINVAL;
+	}
+
+	pp->pm_mode = mode;
+
+	/* Set wol_ports_bmp, because WoL HW is shared by all ports,
+	   so for WoL mode (mode == 0), only one port is set in bit map */
+	if (mode)
+		wol_ports_bmp &= ~(1 << port);
+	else
+		wol_ports_bmp = (1 << port);
+
+	return MV_OK;
+}
+
+static void mv_pp2_sysfs_exit(void)
+{
+	struct device *pd;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "pp2");
+	if (!pd) {
+		printk(KERN_ERR"%s: cannot find pp2 device\n", __func__);
+		return;
+	}
+#ifdef CONFIG_MV_PP2_L2FW
+	mv_pp2_l2fw_sysfs_exit(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mv_pp2_dpi_sysfs_exit(&pd->kobj);
+#endif
+
+	mv_pp2_wol_sysfs_exit(&pd->kobj);
+	mv_pp2_pme_sysfs_exit(&pd->kobj);
+	mv_pp2_plcr_sysfs_exit(&pd->kobj);
+	mv_pp2_mc_sysfs_exit(&pd->kobj);
+	mv_pp2_cls4_sysfs_exit(&pd->kobj);
+	mv_pp2_cls3_sysfs_exit(&pd->kobj);
+	mv_pp2_cls2_sysfs_exit(&pd->kobj);
+	mv_pp2_cls_sysfs_exit(&pd->kobj);
+	mv_pp2_prs_high_sysfs_exit(&pd->kobj);
+	mv_pp2_gbe_sysfs_exit(&pd->kobj);
+	/* can't delete, we call to init/clean function from this sysfs */
+	/* TODO: open this line when we delete clean/init sysfs commands*/
+	/*mv_pp2_dbg_sysfs_exit(&pd->kobj);*/
+	platform_device_unregister(pp2_sysfs);
+}
+
+static int mv_pp2_sysfs_init(void)
+{
+	struct device *pd;
+
+	pd = bus_find_device_by_name(&platform_bus_type, NULL, "pp2");
+	if (!pd) {
+		pp2_sysfs = platform_device_register_simple("pp2", -1, NULL, 0);
+		pd = bus_find_device_by_name(&platform_bus_type, NULL, "pp2");
+	}
+
+	if (!pd) {
+		printk(KERN_ERR"%s: cannot find pp2 device\n", __func__);
+		return -1;
+	}
+
+	mv_pp2_gbe_sysfs_init(&pd->kobj);
+	mv_pp2_prs_high_sysfs_init(&pd->kobj);
+	mv_pp2_cls_sysfs_init(&pd->kobj);
+	mv_pp2_cls2_sysfs_init(&pd->kobj);
+	mv_pp2_cls3_sysfs_init(&pd->kobj);
+	mv_pp2_cls4_sysfs_init(&pd->kobj);
+	mv_pp2_mc_sysfs_init(&pd->kobj);
+	mv_pp2_plcr_sysfs_init(&pd->kobj);
+	mv_pp2_pme_sysfs_init(&pd->kobj);
+	mv_pp2_dbg_sysfs_init(&pd->kobj);
+	mv_pp2_wol_sysfs_init(&pd->kobj);
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mv_pp2_dpi_sysfs_init(&pd->kobj);
+#endif
+
+#ifdef CONFIG_MV_PP2_L2FW
+	mv_pp2_l2fw_sysfs_init(&pd->kobj);
+#endif
+
+
+	return 0;
+}
+static int	mv_pp2_shared_probe(struct mv_pp2_pdata *plat_data)
+{
+	int size, cpu;
+
+	mv_pp2_sysfs_init();
+
+	/* init MAC Unit */
+	mv_pp2_win_init();
+
+	/* init MAC Unit */
+	mv_pp2_hal_shared_init(plat_data);
+
+	mv_pp2_config_show();
+
+	size = mv_pp2_ports_num * sizeof(struct eth_port *);
+	mv_pp2_ports = mvOsMalloc(size);
+	if (!mv_pp2_ports)
+		goto oom;
+
+	memset(mv_pp2_ports, 0, size);
+
+	/* Allocate aggregated TXQs control */
+	size = nr_cpu_ids * sizeof(struct aggr_tx_queue);
+	aggr_txqs = mvOsMalloc(size);
+	if (!aggr_txqs)
+		goto oom;
+
+	memset(aggr_txqs, 0, size);
+	for_each_possible_cpu(cpu) {
+		aggr_txqs[cpu].txq_size = CONFIG_MV_PP2_AGGR_TXQ_SIZE;
+		aggr_txqs[cpu].q = mvPp2AggrTxqInit(cpu, CONFIG_MV_PP2_AGGR_TXQ_SIZE);
+		if (!aggr_txqs[cpu].q)
+			goto oom;
+	}
+
+#ifdef CONFIG_MV_PP2_HWF
+	/* Create temporary TXQ for switching between HWF and SWF */
+	if (mvPp2TxqTempInit(CONFIG_MV_PP2_TEMP_TXQ_SIZE, CONFIG_MV_PP2_TEMP_TXQ_HWF_SIZE) != MV_OK)
+		goto oom;
+#endif
+
+	if (mv_pp2_bm_pools_init())
+		goto oom;
+
+	/* Parser default initialization */
+	if (mv_pp2_pnc_ctrl_en) {
+		if (mvPrsDefaultInit())
+			printk(KERN_ERR "%s: Warning PARSER default init failed\n", __func__);
+
+		if (mvPp2ClassifierDefInit())
+			printk(KERN_ERR "%s: Warning Classifier defauld init failed\n", __func__);
+	}
+
+#ifdef CONFIG_MV_ETH_PP2_1
+	mvPp2DpiInit();
+#endif
+
+	/* Initialize tasklet for handle link events */
+	tasklet_init(&link_tasklet, mv_pp2_link_tasklet, 0);
+
+	/* request IRQ for link interrupts from GOP */
+	if (request_irq(IRQ_GLOBAL_GOP, mv_pp2_link_isr, (IRQF_DISABLED), "mv_pp2_link", NULL))
+		printk(KERN_ERR "%s: Could not request IRQ for GOP interrupts\n", __func__);
+
+	mvGmacIsrSummaryUnmask();
+
+	mv_pp2_initialized = 1;
+	return 0;
+
+oom:
+	if (mv_pp2_ports)
+		mvOsFree(mv_pp2_ports);
+
+	if (aggr_txqs)
+		mvOsFree(aggr_txqs);
+
+	printk(KERN_ERR "%s: out of memory\n", __func__);
+	return -ENOMEM;
+}
+
+static void mv_pp2_shared_cleanup(void)
+{
+	int pool, cpu;
+
+	/*
+	There is no memory allocation in prser & classifier
+	cleanup functions are not necessary
+	*/
+
+	for (pool = 0; pool < MV_ETH_BM_POOLS; pool++)
+		mv_pp2_pool_destroy(pool);
+
+#ifdef CONFIG_MV_PP2_HWF
+	/* Delete temporary TXQ (switching between HWF and SWF)*/
+	mvPp2TxqTempDelete();
+#endif
+	/* cleanup aggregated tx queues */
+	for_each_possible_cpu(cpu)
+		mvPp2AggrTxqDelete(cpu);
+
+	mvOsFree(aggr_txqs);
+
+	mvOsFree(mv_pp2_ports);
+
+	/* Hal init by mv_pp2_hal_shared_init*/
+	mvPp2HalDestroy();
+
+	/*mv_pp2_win_cleanup();*/
+
+	mv_pp2_sysfs_exit();
+
+	mv_pp2_initialized = 0;
+}
+
+#ifdef CONFIG_OF
+static int pp2_initialized;
+static struct of_device_id of_pp2_table[] = {
+		{ .compatible = "marvell,packet_processor_v2" },
+};
+static struct of_device_id of_eth_lms_table[] = {
+		{ .compatible = "marvell,eth_lms" },
+};
+
+static int mv_eth_pp2_init(void)
+{
+	struct device_node *pp2_np, *eth_np;
+	struct clk *clk;
+
+	/* Has been initialized  */
+	if (pp2_initialized > 0)
+		return MV_OK;
+
+	/* PP2 memory iomap */
+	pp2_np = of_find_matching_node(NULL, of_pp2_table);
+	if (pp2_np)
+		pp2_vbase = (int)of_iomap(pp2_np, 0);
+	else
+		return MV_ERROR;
+	/* Set PP2 gate lock */
+	clk = of_clk_get(pp2_np, 0);
+	clk_prepare_enable(clk);
+
+	/* LMS memory iomap */
+	eth_np = of_find_matching_node(NULL, of_eth_lms_table);
+	if (eth_np)
+		eth_vbase = (int)of_iomap(eth_np, 0);
+	else
+		return MV_ERROR;
+
+	pp2_initialized++;
+
+	return MV_OK;
+}
+
+static struct mv_pp2_pdata *mv_plat_data_get(struct platform_device *pdev)
+{
+	struct mv_pp2_pdata *plat_data;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *phy_node;
+	struct resource *res;
+	struct clk *clk;
+	phy_interface_t phy_mode;
+	const char *mac_addr = NULL;
+	void __iomem *base_addr;
+
+	/* Initialize packet processor and eth lms */
+	if (mv_eth_pp2_init()) {
+		pr_err("packet processor initialized fail\n");
+		return NULL;
+	}
+
+	/* Get GBE MAC port number */
+	if (of_property_read_u32(np, "eth,port-num", &pdev->id)) {
+		pr_err("could not get port number\n");
+		return NULL;
+	}
+
+	plat_data = kmalloc(sizeof(struct mv_pp2_pdata), GFP_KERNEL);
+	if (plat_data == NULL) {
+		pr_err("could not allocate memory for plat_data\n");
+		return NULL;
+	}
+	memset(plat_data, 0, sizeof(struct mv_pp2_pdata));
+
+	/* Get GBE MAC register base */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		pr_err("could not get resource information\n");
+		return NULL;
+	}
+	base_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (!base_addr) {
+		pr_err("could not map neta registers\n");
+		return NULL;
+	}
+	pp2_port_vbase[pdev->id] = (int)base_addr;
+
+	/* get IRQ number */
+	if (pdev->dev.of_node) {
+		plat_data->irq = irq_of_parse_and_map(np, 0);
+		if (plat_data->irq == 0) {
+			pr_err("could not get IRQ number\n");
+			return NULL;
+		}
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+		if (res == NULL) {
+			pr_err("could not get IRQ number\n");
+			return NULL;
+		}
+		plat_data->irq = res->start;
+	}
+
+	/* get MAC address */
+	mac_addr = of_get_mac_address(np);
+	if (mac_addr != NULL)
+		memcpy(plat_data->mac_addr, mac_addr, MV_MAC_ADDR_SIZE);
+
+	/* get phy smi address */
+	phy_node = of_parse_phandle(np, "phy", 0);
+	if (!phy_node) {
+		pr_err("no associated PHY\n");
+		return NULL;
+	}
+	if (of_property_read_u32(phy_node, "reg", &plat_data->phy_addr)) {
+		pr_err("could not PHY SMI address\n");
+		return NULL;
+	}
+
+	/* Get port MTU */
+	if (of_property_read_u32(np, "eth,port-mtu", &plat_data->mtu)) {
+		pr_err("could not get MTU\n");
+		return NULL;
+	}
+
+	/* Get port PHY mode */
+	phy_mode = of_get_phy_mode(np);
+	if (phy_mode < 0) {
+		pr_err("unknown PHY mode\n");
+		return NULL;
+	}
+	switch (phy_mode) {
+	case PHY_INTERFACE_MODE_SGMII:
+		plat_data->is_sgmii = 1;
+		plat_data->is_rgmii = 0;
+	break;
+	case PHY_INTERFACE_MODE_RGMII:
+	case PHY_INTERFACE_MODE_RGMII_ID:
+		plat_data->is_sgmii = 0;
+		plat_data->is_rgmii = 1;
+	break;
+	case PHY_INTERFACE_MODE_MII:
+		plat_data->is_sgmii = 0;
+		plat_data->is_rgmii = 0;
+	break;
+	default:
+		pr_err("unsupported PHY mode (%d)\n", phy_mode);
+		return NULL;
+	}
+
+	/* Global Parameters */
+	plat_data->tclk = MV_ETH_TCLK;
+	plat_data->max_port = MV_ETH_MAX_PORTS;
+
+	/* Per port parameters */
+	plat_data->cpu_mask  = 0x3;
+	plat_data->duplex = DUPLEX_FULL;
+	plat_data->speed = MV_ETH_SPEED_AN;
+
+	/* Connect to Linux device */
+	plat_data->flags |= MV_PP2_PDATA_F_LINUX_CONNECT;
+
+	pdev->dev.platform_data = plat_data;
+
+	clk = devm_clk_get(&pdev->dev, 0);
+	clk_prepare_enable(clk);
+
+	return plat_data;
+}
+#endif /* CONFIG_OF */
+
+/***********************************************************
+ * mv_pp2_eth_probe --                                         *
+ *   main driver initialization. loading the interfaces.   *
+ ***********************************************************/
+static int mv_pp2_eth_probe(struct platform_device *pdev)
+{
+	int phyAddr, is_sgmii, is_rgmii, port;
+#ifdef CONFIG_OF
+	struct mv_pp2_pdata *plat_data = mv_plat_data_get(pdev);
+	pdev->dev.platform_data = plat_data;
+#else
+	struct mv_pp2_pdata *plat_data = (struct mv_pp2_pdata *)pdev->dev.platform_data;
+#endif /* CONFIG_OF */
+	port = pdev->id;
+
+	if (!mv_pp2_initialized) {
+		global_dev = &pdev->dev;
+		mv_pp2_ports_num = plat_data->max_port;
+
+		if (mv_pp2_shared_probe(plat_data))
+			return -ENODEV;
+	}
+
+#ifdef CONFIG_OF
+	/* init SMI register */
+	mvEthPhySmiAddrSet(ETH_SMI_REG(port));
+#endif
+
+	if (!MV_PP2_IS_PON_PORT(port)) {
+		/* First: Disable Gmac */
+		mvGmacPortDisable(port);
+
+		/* Set the board information regarding PHY address */
+		phyAddr = plat_data->phy_addr;
+		if (phyAddr != -1) {
+			mvGmacPhyAddrSet(port, phyAddr);
+			mvEthPhyReset(phyAddr, 1000);
+		}
+
+#ifdef CONFIG_OF
+		is_sgmii = plat_data->is_sgmii;
+		is_rgmii = plat_data->is_rgmii;
+#else
+		is_sgmii = (plat_data->flags & MV_PP2_PDATA_F_SGMII) ? 1 : 0;
+		is_rgmii = (plat_data->flags & MV_PP2_PDATA_F_RGMII) ? 1 : 0;
+#endif
+
+		if (plat_data->flags & MV_PP2_PDATA_F_LB)
+			mvGmacPortLbSet(port, (plat_data->speed == SPEED_1000), is_sgmii);
+
+		mvGmacPortPowerUp(port, is_sgmii, is_rgmii);
+
+		mvGmacPortSumIsrUnmask(port);
+	}
+
+	if (mv_pp2_load_network_interfaces(pdev))
+		return -ENODEV;
+
+#ifdef CONFIG_PM
+	/* Register WoL interrupt */
+	if (!wol_isr_register) {
+		if (request_irq(IRQ_GLOBAL_NET_WAKE_UP, mv_wol_isr, (IRQF_DISABLED), "wol", NULL))
+			pr_err("cannot request irq %d for Wake-on-Lan\n", IRQ_GLOBAL_NET_WAKE_UP);
+		else
+			wol_isr_register++;
+	}
+#endif
+	/* used in mv_pp2_all_ports_probe */
+	plats[port] = pdev;
+
+	return 0;
+}
+
+
+static int mv_pp2_config_get(struct platform_device *pdev, MV_U8 *mac_addr)
+{
+	struct mv_pp2_pdata *plat_data = (struct mv_pp2_pdata *)pdev->dev.platform_data;
+
+	if (mac_addr)
+		memcpy(mac_addr, plat_data->mac_addr, MV_MAC_ADDR_SIZE);
+
+	return plat_data->mtu;
+}
+
+/***********************************************************
+ * mv_pp2_tx_timeout --                                    *
+ *   nothing to be done (?)                                *
+ ***********************************************************/
+static void mv_pp2_tx_timeout(struct net_device *dev)
+{
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+
+	pp->stats.tx_timeout++;
+#endif /* #ifdef CONFIG_MV_PP2_STAT_ERR */
+
+	printk(KERN_INFO "%s: tx timeout\n", dev->name);
+}
+
+static u32 mv_pp2_netdev_fix_features_internal(struct net_device *dev, u32 features)
+{
+	if (MV_MAX_PKT_SIZE(dev->mtu) > MV_PP2_TX_CSUM_MAX_SIZE) {
+		if (features & (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)) {
+			features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+			pr_info("%s: NETIF_F_IP_CSUM, NETIF_F_SG and NETIF_F_TSO not supported when mtu > %d bytes\n",
+				dev->name, MV_PP2_TX_CSUM_MAX_SIZE);
+		}
+	}
+	return features;
+}
+
+static void mv_pp2_netdev_update_features(struct net_device *dev, int mtu)
+{
+	if ((MV_MAX_PKT_SIZE(mtu) > MV_PP2_TX_CSUM_MAX_SIZE)) {
+		if (dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)) {
+			dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+			pr_err("%s: NETIF_F_IP_CSUM, NETIF_F_SG and NETIF_F_TSO not supported for mtu > %d bytes\n",
+				dev->name, MV_PP2_TX_CSUM_MAX_SIZE);
+		}
+	} else {
+			dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+	}
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 25)
+static u32 mv_pp2_netdev_fix_features(struct net_device *dev, u32 features)
+{
+	return mv_pp2_netdev_fix_features_internal(dev, features);
+}
+#else
+static netdev_features_t mv_pp2_netdev_fix_features(struct net_device *dev, netdev_features_t features)
+{
+	return (netdev_features_t)mv_pp2_netdev_fix_features_internal(dev, (u32)features);
+}
+#endif /* LINUX_VERSION_CODE < 3.4.25 */
+
+
+static const struct net_device_ops mv_pp2_netdev_ops = {
+	.ndo_open = mv_pp2_eth_open,
+	.ndo_stop = mv_pp2_eth_stop,
+	.ndo_start_xmit = mv_pp2_tx,
+	.ndo_set_rx_mode = mv_pp2_rx_set_rx_mode,
+	.ndo_set_mac_address = mv_pp2_eth_set_mac_addr,
+	.ndo_change_mtu = mv_pp2_eth_change_mtu,
+	.ndo_tx_timeout = mv_pp2_tx_timeout,
+	.ndo_select_queue = mv_pp2_select_txq,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	.ndo_fix_features = mv_pp2_netdev_fix_features,
+#endif
+};
+
+/***************************************************************
+ * mv_eth_netdev_init -- Allocate and initialize net_device    *
+ *                   structure                                 *
+ ***************************************************************/
+struct net_device *mv_pp2_netdev_init(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct eth_port *dev_priv;
+
+	struct mv_pp2_pdata *plat_data = (struct mv_pp2_pdata *)pdev->dev.platform_data;
+#ifndef CONFIG_OF
+	struct resource *res;
+#endif
+
+	/* Aggregated TXQs - for each CPU */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	dev = alloc_etherdev_mqs(sizeof(struct eth_port), nr_cpu_ids, CONFIG_MV_PP2_RXQ);
+#else
+	dev = alloc_etherdev_mq(sizeof(struct eth_port), nr_cpu_ids);
+#endif
+	if (!dev)
+		return NULL;
+
+	dev_priv = (struct eth_port *)netdev_priv(dev);
+	if (!dev_priv)
+		return NULL;
+
+	memset(dev_priv, 0, sizeof(struct eth_port));
+
+	dev_priv->dev = dev;
+	dev_priv->port = pdev->id;
+
+#ifdef CONFIG_OF
+	dev->irq = plat_data->irq;
+#else
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	BUG_ON(!res);
+	dev->irq = res->start;
+#endif
+
+	if (!is_valid_ether_addr(plat_data->mac_addr))
+#ifdef CONFIG_OF
+		eth_hw_addr_random(dev);
+#else
+		memset(dev->dev_addr, 0, MV_MAC_ADDR_SIZE);
+#endif
+	else {
+		memcpy(dev->dev_addr, plat_data->mac_addr, MV_MAC_ADDR_SIZE);
+		memcpy(dev->perm_addr, plat_data->mac_addr, MV_MAC_ADDR_SIZE);
+	}
+
+	dev->mtu = plat_data->mtu;
+	dev->tx_queue_len = CONFIG_MV_PP2_TXQ_DESC;
+	dev->watchdog_timeo = 5 * HZ;
+
+	if (MV_PP2_IS_PON_PORT(dev_priv->port))
+		dev->hard_header_len += MV_ETH_MH_SIZE;
+
+	dev->netdev_ops = &mv_pp2_netdev_ops;
+
+	SET_ETHTOOL_OPS(dev, &mv_pp2_eth_tool_ops);
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	return dev;
+
+}
+
+/***************************************************************
+ * mv_pp2_netdev_connect -- Connect device to linux            *
+***************************************************************/
+static int mv_pp2_netdev_connect(struct eth_port *pp)
+{
+	struct net_device *dev;
+	struct cpu_ctrl	*cpuCtrl;
+	int cpu;
+
+	if (!pp) {
+		pr_err("\to failed to register, uninitialized port\n");
+		return -ENODEV;
+	}
+
+	dev = pp->dev;
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+		cpuCtrl->napi_group = NULL;
+	}
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX) {
+		mv_pp2_netdev_init_features(pp->dev);
+		if (register_netdev(dev)) {
+			pr_err("\to failed to register %s\n", dev->name);
+			free_netdev(dev);
+			return -ENODEV;
+		} else
+			pr_info("\to %s, ifindex = %d, GbE port = %d", dev->name, dev->ifindex, pp->port);
+	}
+
+	return MV_OK;
+}
+
+bool mv_pp2_eth_netdev_find(unsigned int dev_idx)
+{
+	int port;
+
+	for (port = 0; port < mv_pp2_ports_num; port++) {
+		struct eth_port *pp = mv_pp2_port_by_id(port);
+
+		if (pp && pp->dev && (pp->dev->ifindex == dev_idx))
+			return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(mv_pp2_eth_netdev_find);
+
+
+int mv_pp2_hal_init(struct eth_port *pp)
+{
+	int rxq, txp, txq, size;
+	struct rx_queue *rxq_ctrl;
+
+	/* Init port */
+	pp->port_ctrl = mvPp2PortInit(pp->port, pp->first_rxq, pp->rxq_num, pp->dev->dev.parent);
+	if (!pp->port_ctrl) {
+		printk(KERN_ERR "%s: failed to load port=%d\n", __func__, pp->port);
+		return -ENODEV;
+	}
+
+	size = pp->txp_num * CONFIG_MV_PP2_TXQ * sizeof(struct tx_queue);
+	pp->txq_ctrl = mvOsMalloc(size);
+	if (!pp->txq_ctrl)
+		goto oom;
+
+	memset(pp->txq_ctrl, 0, size);
+
+	/* Create TX descriptor rings */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++) {
+			struct tx_queue *txq_ctrl;
+			int txq_size;
+
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+
+			txq_ctrl->txp = txp;
+			txq_ctrl->txq = txq;
+
+			txq_ctrl->txq_done_pkts_coal = mv_ctrl_pp2_txdone;
+
+#ifdef CONFIG_MV_ETH_PP2_1
+			txq_ctrl->rsvd_chunk = CONFIG_MV_PP2_TXQ_CPU_CHUNK;
+#else
+			txq_ctrl->hwf_size = CONFIG_MV_PP2_TXQ_HWF_DESC;
+#endif
+			txq_size = mv_pp2_txq_size_validate(txq_ctrl, CONFIG_MV_PP2_TXQ_DESC);
+			if (txq_size < 0)
+				return -ENODEV;
+
+			mv_pp2_txq_size_set(txq_ctrl, txq_size);
+		}
+	}
+
+	pp->rxq_ctrl = mvOsMalloc(pp->rxq_num * sizeof(struct rx_queue));
+	if (!pp->rxq_ctrl)
+		goto oom;
+
+	memset(pp->rxq_ctrl, 0, pp->rxq_num * sizeof(struct rx_queue));
+
+	/* Create Rx descriptor rings */
+	for (rxq = 0; rxq < pp->rxq_num; rxq++) {
+		rxq_ctrl = &pp->rxq_ctrl[rxq];
+		rxq_ctrl->rxq_size = CONFIG_MV_PP2_RXQ_DESC;
+		rxq_ctrl->rxq_pkts_coal = CONFIG_MV_PP2_RX_COAL_PKTS;
+		rxq_ctrl->rxq_time_coal = CONFIG_MV_PP2_RX_COAL_USEC;
+	}
+
+	if (pp->tx_spec.flags & MV_ETH_TX_F_MH)
+		mvPp2MhSet(pp->port, MV_TAG_TYPE_MH);
+
+	/* Configure defaults */
+	pp->autoneg_cfg = AUTONEG_ENABLE;
+	pp->speed_cfg = SPEED_1000;
+	pp->duplex_cfg = DUPLEX_FULL;
+	pp->advertise_cfg = 0x2f;
+	pp->rx_time_coal_cfg = CONFIG_MV_PP2_RX_COAL_USEC;
+	pp->rx_pkts_coal_cfg = CONFIG_MV_PP2_RX_COAL_PKTS;
+	pp->tx_pkts_coal_cfg = mv_ctrl_pp2_txdone;
+	pp->rx_time_low_coal_cfg = CONFIG_MV_PP2_RX_COAL_USEC >> 2;
+	pp->rx_time_high_coal_cfg = CONFIG_MV_PP2_RX_COAL_USEC << 2;
+	pp->rx_pkts_low_coal_cfg = CONFIG_MV_PP2_RX_COAL_PKTS;
+	pp->rx_pkts_high_coal_cfg = CONFIG_MV_PP2_RX_COAL_PKTS;
+	pp->pkt_rate_low_cfg = 1000;
+	pp->pkt_rate_high_cfg = 50000;
+	pp->rate_sample_cfg = 5;
+	pp->rate_current = 0; /* Unknown */
+
+	return 0;
+oom:
+	printk(KERN_ERR "%s: port=%d: out of memory\n", __func__, pp->port);
+	return -ENODEV;
+}
+
+/* Show network driver configuration */
+void mv_pp2_config_show(void)
+{
+#ifdef CONFIG_MV_ETH_PP2_1
+	pr_info("  o	PPv2.1 Giga driver\n");
+#else
+	pr_info("  o	PPv2.0 Giga driver\n");
+#endif
+
+	pr_info("  o %d Giga ports supported\n", mv_pp2_ports_num);
+
+#ifdef CONFIG_MV_INCLUDE_PON
+	pr_info("  o xPON port is #%d: - %d of %d TCONTs supported\n",
+		MV_PON_LOGIC_PORT_GET(), CONFIG_MV_PON_TCONTS, MV_PP2_MAX_TCONT);
+#endif
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+	pr_info("  o SKB recycle supported (%s)\n", mv_ctrl_pp2_recycle ? "Enabled" : "Disabled");
+#endif
+
+	pr_info("  o BM supported for CPU: %d BM pools\n", MV_ETH_BM_POOLS);
+
+#ifdef CONFIG_MV_PP2_HWF
+	pr_info("  o HWF supported\n");
+#endif
+
+#ifdef CONFIG_MV_ETH_PMT
+	pr_info("  o PME supported\n");
+#endif
+
+	pr_info("  o RX Queue support: %d Queues * %d Descriptors\n", CONFIG_MV_PP2_RXQ, CONFIG_MV_PP2_RXQ_DESC);
+
+	pr_info("  o TX Queue support: %d Queues * %d Descriptors\n", CONFIG_MV_PP2_TXQ, CONFIG_MV_PP2_TXQ_DESC);
+
+#if defined(CONFIG_MV_PP2_TSO)
+	pr_info("  o GSO supported\n");
+#endif /* CONFIG_MV_PP2_TSO */
+
+#if defined(CONFIG_MV_ETH_RX_CSUM_OFFLOAD)
+	pr_info("  o Receive checksum offload supported\n");
+#endif
+#if defined(CONFIG_MV_ETH_TX_CSUM_OFFLOAD)
+	pr_info("  o Transmit checksum offload supported\n");
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	pr_info("  o Driver ERROR statistics enabled\n");
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_INF
+	pr_info("  o Driver INFO statistics enabled\n");
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	pr_info("  o Driver DEBUG statistics enabled\n");
+#endif
+
+#ifdef CONFIG_MV_ETH_DEBUG_CODE
+	pr_info("  o Driver debug messages enabled\n");
+#endif
+
+#if defined(CONFIG_MV_INCLUDE_SWITCH)
+	pr_info("  o Switch support enabled\n");
+#endif /* CONFIG_MV_INCLUDE_SWITCH */
+
+	pr_info("\n");
+}
+
+/* Set network device features on initialization. Take into account default compile time configuration. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+void mv_pp2_netdev_init_features(struct net_device *dev)
+{
+	dev->features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_LLTX;
+	dev->hw_features = NETIF_F_GRO | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG;
+#ifdef CONFIG_MV_PP2_TSO
+	dev->features |= NETIF_F_TSO;
+	dev->hw_features |= NETIF_F_TSO;
+#endif
+}
+#else
+void mv_pp2_netdev_init_features(struct net_device *dev)
+{
+	if ((MV_MAX_PKT_SIZE(dev->mtu) <= MV_PP2_TX_CSUM_MAX_SIZE))
+		dev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_LLTX;
+	else
+		dev->features = NETIF_F_LLTX;
+#ifdef CONFIG_MV_PP2_TSO
+	if ((MV_MAX_PKT_SIZE(dev->mtu) <= MV_PP2_TX_CSUM_MAX_SIZE))
+		dev->features |= NETIF_F_TSO;
+#endif
+}
+#endif
+
+static int mv_pp2_rxq_fill(struct eth_port *pp, int rxq, int num)
+{
+	mvPp2RxqNonOccupDescAdd(pp->port, rxq, num);
+	return num;
+}
+
+static int mv_pp2_txq_create(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int cpu;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+
+	txq_ctrl->q = mvPp2TxqInit(pp->port, txq_ctrl->txp, txq_ctrl->txq, txq_ctrl->txq_size, txq_ctrl->hwf_size);
+	if (txq_ctrl->q == NULL) {
+#ifdef CONFIG_MV_ETH_PP2_1
+		printk(KERN_ERR "%s: can't create TxQ - port=%d, txp=%d, txq=%d, desc=%d, hwf desc=%d swf desc = %d\n",
+			__func__, pp->port, txq_ctrl->txp, txq_ctrl->txp,
+			txq_ctrl->txq_size, txq_ctrl->hwf_size, txq_ctrl->swf_size);
+#else
+		printk(KERN_ERR "%s: can't create TxQ - port=%d, txp=%d, txq=%d, desc=%d, hwf desc=%d\n",
+		       __func__, pp->port, txq_ctrl->txp, txq_ctrl->txp, txq_ctrl->txq_size, txq_ctrl->hwf_size);
+#endif
+		return -ENODEV;
+	}
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+		txq_cpu_ptr->shadow_txq = mvOsMalloc(txq_cpu_ptr->txq_size * sizeof(MV_U32));
+		if (txq_cpu_ptr->shadow_txq == NULL)
+			goto no_mem;
+		/* reset txq */
+		txq_cpu_ptr->txq_count = 0;
+		txq_cpu_ptr->shadow_txq_put_i = 0;
+		txq_cpu_ptr->shadow_txq_get_i = 0;
+	}
+	return 0;
+
+no_mem:
+	mv_pp2_txq_delete(pp, txq_ctrl);
+	return -ENOMEM;
+}
+
+
+static void mv_pp2_txq_delete(struct eth_port *pp, struct tx_queue *txq_ctrl)
+{
+	int cpu;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+
+	for_each_possible_cpu(cpu) {
+		txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+		if (txq_cpu_ptr->shadow_txq) {
+			mvOsFree(txq_cpu_ptr->shadow_txq);
+			txq_cpu_ptr->shadow_txq = NULL;
+		}
+	}
+
+	if (txq_ctrl->q) {
+		mvPp2TxqDelete(pp->port, txq_ctrl->txp, txq_ctrl->txq);
+		txq_ctrl->q = NULL;
+	}
+}
+
+int mv_pp2_txq_clean(int port, int txp, int txq)
+{
+	struct eth_port *pp;
+	struct tx_queue *txq_ctrl;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+	int msec, pending, tx_done, cpu;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	if (mvPp2MaxCheck(txq, CONFIG_MV_PP2_TXQ, "txq"))
+		return -EINVAL;
+
+	txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+	if (txq_ctrl->q) {
+		/* Enable TXQ drain */
+		mvPp2TxqDrainSet(port, txp, txq, MV_TRUE);
+
+		/* Wait for all packets to be transmitted */
+		msec = 0;
+		do {
+			if (msec >= MV_ETH_TX_PENDING_TIMEOUT_MSEC) {
+				pr_err("port=%d, txp=%d txq=%d: timeout for transmit pending descriptors\n",
+					port, txp, txq);
+				break;
+			}
+			mdelay(1);
+			msec++;
+
+			pending = mvPp2TxqPendDescNumGet(port, txp, txq);
+		} while (pending);
+
+		/* Disable TXQ Drain */
+		mvPp2TxqDrainSet(port, txp, txq, MV_FALSE);
+
+		/* release all transmitted packets */
+
+		tx_done = mv_pp2_txq_done(pp, txq_ctrl);
+		if (tx_done > 0)
+			mvOsPrintf(KERN_INFO "%s: port=%d, txp=%d txq=%d: Free %d transmitted descriptors\n",
+				__func__, port, txp, txq, tx_done);
+
+		/* release all untransmitted packets */
+		tx_done = mv_pp2_txq_done_force(pp, txq_ctrl);
+		if (tx_done > 0)
+			mvOsPrintf(KERN_INFO "%s: port=%d, txp=%d txq=%d: Free %d untransmitted descriptors\n",
+				__func__, port, txp, txq, tx_done);
+
+		/* release all reserved descriptors */
+		mvPp2TxqFreeReservedDesc(port, txp, txq);
+
+		for_each_possible_cpu(cpu) {
+			txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+			txq_cpu_ptr->reserved_num = 0;
+		}
+	}
+	return 0;
+}
+
+/* Free all packets pending transmit from all TXQs and reset TX port */
+int mv_pp2_txp_clean(int port, int txp)
+{
+	struct eth_port *pp;
+	int txq;
+
+	if (mvPp2TxpCheck(port, txp))
+		return -EINVAL;
+
+	pp = mv_pp2_port_by_id(port);
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	/* Flush TX FIFO */
+	mvPp2TxPortFifoFlush(port, MV_TRUE);
+
+	/* free the skb's in the hal tx ring */
+	for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+		mv_pp2_txq_clean(port, txp, txq);
+
+	mvPp2TxPortFifoFlush(port, MV_FALSE);
+
+	return 0;
+}
+
+/* Free received packets from all RXQs and reset RX of the port */
+int mv_pp2_rx_reset(int port)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "Port %d must be stopped before\n", port);
+		return -EINVAL;
+	}
+
+	mvPp2RxReset(port);
+	return 0;
+}
+
+/***********************************************************
+ * coal set functions		                           *
+ ***********************************************************/
+MV_STATUS mv_pp2_rx_ptks_coal_set(int port, int rxq, MV_U32 value)
+{
+	MV_STATUS status = mvPp2RxqPktsCoalSet(port, rxq, value);
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	if (status == MV_OK)
+		pp->rxq_ctrl[rxq].rxq_pkts_coal = value;
+	return status;
+}
+
+MV_STATUS mv_pp2_rx_time_coal_set(int port, int rxq, MV_U32 value)
+{
+
+	MV_STATUS status = mvPp2RxqTimeCoalSet(port, rxq, value);
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	if (status == MV_OK)
+		pp->rxq_ctrl[rxq].rxq_time_coal = value;
+	return status;
+
+	return  MV_OK;
+}
+
+MV_STATUS mv_pp2_tx_done_ptks_coal_set(int port, int txp, int txq, MV_U32 value)
+{
+	MV_STATUS status = mvPp2TxDonePktsCoalSet(port, txp, txq, value);
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	if (status == MV_OK)
+		pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq].txq_done_pkts_coal = value;
+	return status;
+}
+
+/***********************************************************
+* mv_pp2_start_internals --                               *
+*   fill rx buffers. start rx/tx activity. set coalesing. *
+*   clear and unmask interrupt bits                       *
+*   -   RX and TX init
+*   -   HW port enable
+*   -   HW enable port tx
+*   -   Enable NAPI
+*   -   Enable interrupts (RXQ still close .. interrupts will not received)
+*   -   SW start tx (wake_up _all_queues)
+*   -   HW start rx
+***********************************************************/
+int mv_pp2_start_internals(struct eth_port *pp, int mtu)
+{
+	int rxq, txp, txq, err = 0;
+
+	if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+		printk(KERN_ERR "%s: port %d, wrong state: STARTED_BIT = 1\n", __func__, pp->port);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!MV_PP2_IS_PON_PORT(pp->port))
+		mvGmacMaxRxSizeSet(pp->port, RX_PKT_SIZE(mtu));
+#ifdef CONFIG_MV_INCLUDE_PON
+	else
+		mv_pon_mtu_config(RX_PKT_SIZE(mtu));
+#endif
+
+	err = mv_pp2_swf_bm_pool_init(pp, mtu);
+	if (err)
+		goto out;
+#ifdef CONFIG_MV_PP2_HWF
+	err = mv_pp2_hwf_bm_pool_init(pp, mtu);
+	if (err)
+		goto out;
+#endif /* CONFIG_MV_PP2_HWF */
+
+
+	for (rxq = 0; rxq < pp->rxq_num; rxq++) {
+		if (pp->rxq_ctrl[rxq].q == NULL) {
+			/* allocate descriptors and initialize RXQ */
+			pp->rxq_ctrl[rxq].q = mvPp2RxqInit(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+			if (!pp->rxq_ctrl[rxq].q) {
+				printk(KERN_ERR "%s: can't create RxQ port=%d, rxq=%d, desc=%d\n",
+				       __func__, pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+				err = -ENODEV;
+				goto out;
+			}
+			/* Set Offset  - at this point logical RXQs are already mappedto physical RXQs */
+			mvPp2RxqOffsetSet(pp->port, rxq, NET_SKB_PAD);
+		}
+
+		/* Set coalescing pkts and time */
+		mv_pp2_rx_ptks_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_pkts_coal);
+		mv_pp2_rx_time_coal_set(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_time_coal);
+
+		if (!(pp->flags & MV_ETH_F_IFCAP_NETMAP)) {
+			if (mvPp2RxqFreeDescNumGet(pp->port, rxq) == 0)
+				mv_pp2_rxq_fill(pp, rxq, pp->rxq_ctrl[rxq].rxq_size);
+		} else {
+			/*printk(KERN_ERR "%s :: run with netmap enable", __func__);*/
+			mvPp2RxqNonOccupDescAdd(pp->port, rxq, pp->rxq_ctrl[rxq].rxq_size);
+#ifdef CONFIG_NETMAP
+			if (pp2_netmap_rxq_init_buffers(pp, rxq))
+				return MV_ERROR;
+#endif
+		}
+	}
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++) {
+			struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+
+			if ((txq_ctrl->q == NULL) && (txq_ctrl->txq_size > 0)) {
+				err = mv_pp2_txq_create(pp, txq_ctrl);
+				if (err)
+					goto out;
+				spin_lock_init(&txq_ctrl->queue_lock);
+			}
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+			mv_pp2_tx_done_ptks_coal_set(pp->port, txp, txq, txq_ctrl->txq_done_pkts_coal);
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+#ifdef CONFIG_NETMAP
+		if (pp->flags & MV_ETH_F_IFCAP_NETMAP) {
+			if (pp2_netmap_txq_init_buffers(pp, txp, txq))
+				return MV_ERROR;
+		}
+#endif /* CONFIG_NETMAP */
+		}
+		mvPp2TxpMaxTxSizeSet(pp->port, txp, RX_PKT_SIZE(mtu));
+	}
+	/* TODO: set speed, duplex, fc with ethtool parameres (speed_cfg, etc..) */
+
+	set_bit(MV_ETH_F_STARTED_BIT, &(pp->flags));
+ out:
+	return err;
+}
+
+
+
+int mv_pp2_eth_resume_internals(struct eth_port *pp, int mtu)
+{
+/* TBD */
+	return 0;
+}
+
+
+int mv_pp2_restore_registers(struct eth_port *pp, int mtu)
+{
+/* TBD */
+	return 0;
+}
+
+
+/***********************************************************
+ * mv_pp2_eth_suspend_internals --                                *
+ *   stop port rx/tx activity. free skb's from rx/tx rings.*
+ ***********************************************************/
+int mv_pp2_eth_suspend_internals(struct eth_port *pp)
+{
+/* TBD */
+	return 0;
+}
+
+
+/***********************************************************
+* mv_pp2_stop_internals --                                *
+*   -   HW stop rx
+*   -   SW stop tx (tx_stop_all_queues)
+*   -   Disable interrupts
+*   -   Disable NAPI
+*   -   HW  disable port tx
+*   -   HW disable port
+*   -   RX and TX cleanups
+***********************************************************/
+int mv_pp2_stop_internals(struct eth_port *pp)
+{
+	int queue, txp;
+
+	if (!test_and_clear_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+		printk(KERN_ERR "%s: port %d, wrong state: STARTED_BIT = 0.\n", __func__, pp->port);
+		goto error;
+	}
+
+	mdelay(10);
+
+	/* Transmit and free all packets */
+	for (txp = 0; txp < pp->txp_num; txp++)
+		mv_pp2_txp_clean(pp->port, txp);
+
+
+	/* free the skb's in the hal rx ring */
+	for (queue = 0; queue < pp->rxq_num; queue++)
+		mv_pp2_rxq_drop_pkts(pp, queue);
+
+	return 0;
+
+error:
+	printk(KERN_ERR "GbE port %d: stop internals failed\n", pp->port);
+	return -1;
+}
+
+/* return positive if MTU is valid */
+int mv_pp2_eth_check_mtu_valid(struct net_device *dev, int mtu)
+{
+	if (mtu < 68) {
+		pr_info("MTU must be at least 68, change mtu failed\n");
+		return -EINVAL;
+	}
+	if (mtu > 9676 /* 9700 - 20 and rounding to 8 */) {
+		pr_info("%s: Illegal MTU value %d, ", dev->name, mtu);
+		mtu = 9676;
+		pr_info(" rounding MTU to: %d\n", mtu);
+	}
+	return mtu;
+}
+
+/* Check if MTU can be changed */
+int mv_pp2_check_mtu_internals(struct net_device *dev, int mtu)
+{
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	struct bm_pool *port_pool;
+
+	if (!pp)
+		return -EPERM;
+
+	port_pool = pp->pool_long;
+
+	if (!port_pool)
+		return 0;
+
+	/* long pool is not shared with other ports */
+	if ((port_pool) && (port_pool->port_map == (1 << pp->port)))
+		return 0;
+
+	return 1;
+}
+
+/***********************************************************
+ * mv_pp2_eth_change_mtu_internals --                      *
+ *   stop port activity. release skb from rings. set new   *
+ *   mtu in device and hw. restart port activity and       *
+ *   and fill rx-buiffers with size according to new mtu.  *
+ ***********************************************************/
+int mv_pp2_eth_change_mtu_internals(struct net_device *dev, int mtu)
+{
+	struct bm_pool *port_pool;
+	struct eth_port *pp = MV_ETH_PRIV(dev);
+	int pkt_size = RX_PKT_SIZE(mtu), pkts_num;
+	unsigned long flags = 0;
+
+	if (test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_ERR(pp->stats.state_err++);
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_RX)
+			printk(KERN_ERR "%s: port %d, STARTED_BIT = 0, Invalid value.\n", __func__, pp->port);
+#endif
+		return -1;
+	}
+
+	if (mtu == dev->mtu)
+		goto mtu_out;
+
+	port_pool = pp->pool_long;
+
+	if (port_pool) {
+		MV_ETH_LOCK(&port_pool->lock, flags);
+		pkts_num = port_pool->buf_num;
+		/* for now, swf long pool must not be shared with other ports */
+		if (port_pool->port_map == (1 << pp->port)) {
+			/* refill pool with updated buffer size */
+			mv_pp2_pool_free(port_pool->pool, pkts_num);
+			port_pool->pkt_size = pkt_size;
+			mv_pp2_pool_add(pp, port_pool->pool, pkts_num);
+		} else {
+			printk(KERN_ERR "%s: port %d, SWF long pool is shared with other ports.\n", __func__, pp->port);
+			MV_ETH_UNLOCK(&port_pool->lock, flags);
+			return -1;
+		}
+		mvPp2BmPoolBufSizeSet(port_pool->pool, RX_BUF_SIZE(port_pool->pkt_size));
+		MV_ETH_UNLOCK(&port_pool->lock, flags);
+	}
+
+#ifdef CONFIG_MV_PP2_HWF
+	port_pool = pp->hwf_pool_long;
+
+	if (port_pool && (pp->hwf_pool_long != pp->pool_long)) {
+		MV_ETH_LOCK(&port_pool->lock, flags);
+		pkts_num = port_pool->buf_num;
+		/* for now, hwf long pool must not be shared with other ports */
+		if (port_pool->port_map == (1 << pp->port)) {
+			/* refill pool with updated buffer size */
+			mv_pp2_pool_free(port_pool->pool, pkts_num);
+			port_pool->pkt_size = pkt_size;
+			mv_pp2_pool_add(pp, port_pool->pool, pkts_num);
+		} else {
+			printk(KERN_ERR "%s: port %d, HWF long pool is shared with other ports.\n", __func__, pp->port);
+			MV_ETH_UNLOCK(&port_pool->lock, flags);
+			return -1;
+		}
+		mvPp2BmPoolBufSizeSet(port_pool->pool, RX_HWF_BUF_SIZE(port_pool->pkt_size));
+		MV_ETH_UNLOCK(&port_pool->lock, flags);
+	}
+#endif /* CONFIG_MV_PP2_HWF */
+
+	if (!MV_PP2_IS_PON_PORT(pp->port))
+		mvGmacMaxRxSizeSet(pp->port, pkt_size);
+#ifdef CONFIG_MV_INCLUDE_PON
+	else
+		mv_pon_mtu_config(pkt_size);
+#endif
+
+#ifndef CONFIG_MV_ETH_PP2_1
+	mv_pp2_tx_mtu_set(pp->port, pkt_size);
+#endif
+
+mtu_out:
+	dev->mtu = mtu;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
+	netdev_update_features(dev);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	netdev_features_change(dev);
+#else
+	mv_pp2_netdev_update_features(dev, mtu);
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+/***********************************************************
+ * mv_pp2_tx_done_hr_timer_callback --			   *
+ *   callback for tx_done hrtimer                          *
+ ***********************************************************/
+enum hrtimer_restart mv_pp2_tx_done_hr_timer_callback(struct hrtimer *timer)
+{
+	struct cpu_ctrl *cpuCtrl = container_of(timer, struct cpu_ctrl, tx_done_timer);
+
+	tasklet_schedule(&cpuCtrl->tx_done_tasklet);
+
+	return HRTIMER_NORESTART;
+}
+#endif
+
+#ifndef CONFIG_MV_PP2_TXDONE_ISR
+/***********************************************************
+ * mv_pp2_tx_done_timer_callback --			   *
+ *   N msec periodic callback for tx_done                  *
+ ***********************************************************/
+static void mv_pp2_tx_done_timer_callback(unsigned long data)
+{
+	struct cpu_ctrl *cpuCtrl = (struct cpu_ctrl *)data;
+	struct eth_port *pp = cpuCtrl->pp;
+	int tx_done = 0, tx_todo = 0;
+	unsigned int txq_mask;
+
+	STAT_INFO(pp->stats.tx_done_timer_event[smp_processor_id()]++);
+
+	clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+
+	if (!test_bit(MV_ETH_F_STARTED_BIT, &(pp->flags))) {
+		STAT_INFO(pp->stats.netdev_stop++);
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+		if (pp->dbg_flags & MV_ETH_F_DBG_TX)
+			printk(KERN_ERR "%s: port #%d is stopped, STARTED_BIT = 0, exit timer.\n", __func__, pp->port);
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+		return;
+	}
+
+	if (MV_PP2_IS_PON_PORT(pp->port))
+		tx_done = mv_pp2_tx_done_pon(pp, &tx_todo);
+	else {
+		/* check all possible queues, as there is no indication from interrupt */
+		txq_mask = (1 << CONFIG_MV_PP2_TXQ) - 1;
+		tx_done = mv_pp2_tx_done_gbe(pp, txq_mask, &tx_todo);
+	}
+
+	if (cpuCtrl->cpu != smp_processor_id()) {
+		pr_warning("%s: Called on other CPU - %d != %d\n", __func__, cpuCtrl->cpu, smp_processor_id());
+		cpuCtrl = pp->cpu_config[smp_processor_id()];
+	}
+	if (tx_todo > 0)
+		mv_pp2_add_tx_done_timer(cpuCtrl);
+}
+#endif
+
+void mv_pp2_mac_show(int port)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d entry is null\n", __func__, port);
+		return;
+	}
+
+	/* TODO - example in NETA */
+}
+
+/********************************************/
+/*		DSCP API		    */
+/********************************************/
+void mv_pp2_dscp_map_show(int port)
+{
+	int dscp, txq;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d entry is null\n", __func__, port);
+		return;
+	}
+
+	/* TODO - call pnc_ipv4_dscp_show() example in NETA */
+
+	printk(KERN_ERR "\n");
+	printk(KERN_ERR " DSCP <=> TXQ map for port #%d\n\n", port);
+	for (dscp = 0; dscp < sizeof(pp->txq_dscp_map); dscp++) {
+		txq = pp->txq_dscp_map[dscp];
+		if (txq != MV_ETH_TXQ_INVALID)
+			printk(KERN_ERR "0x%02x <=> %d\n", dscp, txq);
+	}
+}
+
+int mv_pp2_rxq_dscp_map_set(int port, int rxq, unsigned char dscp)
+{
+	/* TBD */
+	printk(KERN_ERR "Not supported\n");
+
+	return MV_FAIL;
+}
+
+/* Set TXQ for special DSCP value. txq=-1 - use default TXQ for this port */
+int mv_pp2_txq_dscp_map_set(int port, int txq, unsigned char dscp)
+{
+	MV_U8 old_txq;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (mvPp2PortCheck(port))
+		return -EINVAL;
+
+	if ((pp == NULL) || (pp->txq_ctrl == NULL))
+		return -ENODEV;
+
+	if ((dscp < 0) || (dscp >= 64))
+		return -EINVAL;
+
+	old_txq = pp->txq_dscp_map[dscp];
+
+	/* The same txq - do nothing */
+	if (old_txq == (MV_U8) txq)
+		return 0;
+
+	if (txq == -1) {
+		pp->txq_dscp_map[dscp] = MV_ETH_TXQ_INVALID;
+		return 0;
+	}
+
+	if ((txq < 0) || (txq >= CONFIG_MV_PP2_TXQ))
+		return -EINVAL;
+
+	pp->txq_dscp_map[dscp] = (MV_U8) txq;
+
+	return 0;
+}
+
+/********************************************/
+
+void mv_pp2_eth_vlan_prio_show(int port)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		pr_err("%s: port %d entry is null\n", __func__, port);
+		return;
+	}
+
+	/* TODO - example in NETA */
+}
+
+int mv_pp2_eth_rxq_vlan_prio_set(int port, int rxq, unsigned char prio)
+{
+	int status = -1;
+	/*
+	TODO - example in NETA
+	status = pnc_vlan_prio_set(port, prio, rxq);
+	*/
+
+	if (status == 0)
+		printk(KERN_ERR "Succeeded\n");
+	else if (status == -1)
+		printk(KERN_ERR "Not supported\n");
+	else
+		printk(KERN_ERR "Failed\n");
+
+	return status;
+}
+
+
+static int mv_pp2_priv_init(struct eth_port *pp, int port)
+{
+	static int first_rxq = 0;
+	static int first_rx_q[MV_ETH_MAX_PORTS];
+	int cpu, i;
+	struct cpu_ctrl	*cpuCtrl;
+
+	/* Default field per cpu initialization */
+	for (i = 0; i < nr_cpu_ids; i++) {
+		pp->cpu_config[i] = kmalloc(sizeof(struct cpu_ctrl), GFP_KERNEL);
+		memset(pp->cpu_config[i], 0, sizeof(struct cpu_ctrl));
+	}
+	/* init only once */
+	if (first_rxq == 0)
+		for (i = 0; i < MV_ETH_MAX_PORTS; i++)
+			first_rx_q[i] = -1;
+
+	pp->port = port;
+	pp->rxq_num = CONFIG_MV_PP2_RXQ;
+	pp->txp_num = 1;
+	pp->tx_spec.flags = 0;
+	pp->tx_spec.txp = 0;
+
+	if (first_rx_q[port] == -1) {
+		first_rx_q[port] = first_rxq;
+		first_rxq += pp->rxq_num;
+	}
+
+	pp->first_rxq = first_rx_q[port];
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+		cpuCtrl->txq = CONFIG_MV_PP2_TXQ_DEF;
+		cpuCtrl->pp = pp;
+		cpuCtrl->cpu = cpu;
+	}
+
+	pp->flags = 0;
+
+#ifdef CONFIG_MV_PP2_RX_DESC_PREFETCH
+	pp->flags |= MV_ETH_F_RX_DESC_PREFETCH;
+#endif
+
+#ifdef CONFIG_MV_PP2_RX_PKT_PREFETCH
+	pp->flags |= MV_ETH_F_RX_PKT_PREFETCH;
+#endif
+
+	for (i = 0; i < 64; i++)
+		pp->txq_dscp_map[i] = MV_ETH_TXQ_INVALID;
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	pp->tx_special_check = NULL;
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+#ifdef CONFIG_MV_INCLUDE_PON
+	if (MV_PP2_IS_PON_PORT(port)) {
+		pp->tx_spec.flags |= MV_ETH_TX_F_MH;
+		pp->txp_num = CONFIG_MV_PON_TCONTS;
+		pp->tx_spec.txp = CONFIG_MV_PP2_PON_TXP_DEF;
+		for_each_possible_cpu(i)
+			pp->cpu_config[i]->txq = CONFIG_MV_PP2_PON_TXQ_DEF;
+	}
+#endif
+
+	for_each_possible_cpu(cpu) {
+		cpuCtrl = pp->cpu_config[cpu];
+
+#if defined(CONFIG_MV_PP2_TXDONE_IN_HRTIMER)
+		memset(&cpuCtrl->tx_done_timer, 0, sizeof(struct hrtimer));
+		hrtimer_init(&cpuCtrl->tx_done_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+		cpuCtrl->tx_done_timer.function = mv_pp2_tx_done_hr_timer_callback;
+
+		/* initialize tasklet for tx countevent notification */
+		tasklet_init(&cpuCtrl->tx_done_tasklet, mv_pp2_tx_done_timer_callback,
+			     (unsigned long) cpuCtrl);
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_TIMER)
+		memset(&cpuCtrl->tx_done_timer, 0, sizeof(struct timer_list));
+		cpuCtrl->tx_done_timer.function = mv_pp2_tx_done_timer_callback;
+		cpuCtrl->tx_done_timer.data = (unsigned long)cpuCtrl;
+		init_timer(&cpuCtrl->tx_done_timer);
+#endif
+		clear_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags));
+
+		/* Init pool of external buffers for TSO, fragmentation, etc */
+		cpuCtrl->ext_buf_size = CONFIG_MV_PP2_EXTRA_BUF_SIZE;
+		cpuCtrl->ext_buf_stack = mvStackCreate(CONFIG_MV_PP2_EXTRA_BUF_NUM);
+		if (cpuCtrl->ext_buf_stack == NULL) {
+			pr_err("%s: Error: failed create  ext_buf_stack for port #%d\n", __func__, port);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < CONFIG_MV_PP2_EXTRA_BUF_NUM; i++) {
+			u8 *ext_buf = mvOsMalloc(CONFIG_MV_PP2_EXTRA_BUF_SIZE);
+			if (ext_buf == NULL) {
+				pr_warn("\to %s Warning: %d of %d extra buffers allocated\n",
+					__func__, i, CONFIG_MV_PP2_EXTRA_BUF_NUM);
+				break;
+			}
+			mvStackPush(cpuCtrl->ext_buf_stack, (MV_U32)ext_buf);
+		}
+	}
+
+	pp->weight = CONFIG_MV_PP2_RX_POLL_WEIGHT;
+
+#ifdef CONFIG_MV_PP2_STAT_DIST
+	pp->dist_stats.rx_dist = mvOsMalloc(sizeof(u32) * (pp->rxq_num * CONFIG_MV_PP2_RXQ_DESC + 1));
+	if (pp->dist_stats.rx_dist != NULL) {
+		pp->dist_stats.rx_dist_size = pp->rxq_num * CONFIG_MV_PP2_RXQ_DESC + 1;
+		memset(pp->dist_stats.rx_dist, 0, sizeof(u32) * pp->dist_stats.rx_dist_size);
+	} else
+		pr_err("\to ethPort #%d: Can't allocate %d bytes for rx_dist\n",
+		       pp->port, sizeof(u32) * (pp->rxq_num * CONFIG_MV_PP2_RXQ_DESC + 1));
+
+	pp->dist_stats.tx_done_dist =
+	    mvOsMalloc(sizeof(u32) * (pp->txp_num * CONFIG_MV_PP2_TXQ * CONFIG_MV_PP2_TXQ_DESC + 1));
+	if (pp->dist_stats.tx_done_dist != NULL) {
+		pp->dist_stats.tx_done_dist_size = pp->txp_num * CONFIG_MV_PP2_TXQ * CONFIG_MV_PP2_TXQ_DESC + 1;
+		memset(pp->dist_stats.tx_done_dist, 0, sizeof(u32) * pp->dist_stats.tx_done_dist_size);
+	} else
+		pr_err("\to ethPort #%d: Can't allocate %d bytes for tx_done_dist\n",
+		       pp->port, sizeof(u32) * (pp->txp_num * CONFIG_MV_PP2_TXQ * CONFIG_MV_PP2_TXQ_DESC + 1));
+#endif /* CONFIG_MV_PP2_STAT_DIST */
+
+	return 0;
+}
+
+/*
+free the memory that allocate by
+mv_pp2_netdev_init
+mv_pp2_priv_init
+mv_pp2_hal_init
+*/
+static void mv_pp2_priv_cleanup(struct eth_port *pp)
+{
+	int cpu, port;
+
+	if (!pp)
+		return;
+
+	port = pp->port;
+
+	mvOsFree(pp->rxq_ctrl);
+	pp->rxq_ctrl = NULL;
+
+
+	mvOsFree(pp->txq_ctrl);
+	pp->txq_ctrl = NULL;
+
+	mvPp2PortDestroy(pp->port);
+
+	for_each_possible_cpu(cpu) {
+		/* delete pool of external buffers for TSO, fragmentation, etc */
+		if (mvStackDelete(pp->cpu_config[cpu]->ext_buf_stack))
+			pr_err("Error: failed delete ext_buf_stack for port #%d\n", port);
+
+		kfree(pp->cpu_config[cpu]);
+	}
+
+
+#ifdef CONFIG_MV_PP2_STAT_DIST
+	mvOsFree(pp->dist_stats.rx_dist);
+	mvOsFree(pp->dist_stats.tx_done_dist);
+	mvOsFree(pp->dist_stats.tx_tso_dist);
+#endif /* CONFIG_MV_PP2_STAT_DIST */
+
+	/* allocate by mv_pp2_netdev_init */
+	/* free dev and pp*/
+	synchronize_net();
+	unregister_netdev(pp->dev);
+	free_netdev(pp->dev);
+	mv_pp2_ports[port] = NULL;
+}
+
+/***********************************************************************************
+ ***  print RX bm_pool status
+ ***********************************************************************************/
+void mv_pp2_napi_groups_print(int port)
+{
+	int i;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	printk(KERN_CONT "NAPI groups:   cpu_mask   rxq_mask   napi_state\n");
+	for (i = 0; i < MV_ETH_MAX_NAPI_GROUPS; i++) {
+		if (!pp->napi_group[i])
+			continue;
+		printk(KERN_ERR "          %d:      0x%02x     0x%04x             %d\n",
+			i, pp->napi_group[i]->cpu_mask, pp->napi_group[i]->rxq_mask,
+			test_bit(NAPI_STATE_SCHED, &pp->napi_group[i]->napi->state));
+	}
+
+	printk(KERN_CONT "\n");
+}
+
+/***********************************************************************************
+ ***  print RX bm_pool status
+ ***********************************************************************************/
+void mv_pp2_pool_status_print(int pool)
+{
+	const char *type;
+	struct bm_pool *bm_pool = &mv_pp2_pool[pool];
+	int buf_size, total_size, true_size;
+
+	if (MV_ETH_BM_POOL_IS_HWF(bm_pool->type)) {
+		buf_size = RX_HWF_BUF_SIZE(bm_pool->pkt_size);
+		total_size = RX_HWF_TOTAL_SIZE(buf_size);
+	} else {
+		buf_size = RX_BUF_SIZE(bm_pool->pkt_size);
+		total_size = RX_TOTAL_SIZE(buf_size);
+	}
+	true_size = RX_TRUE_SIZE(total_size);
+
+	switch (bm_pool->type) {
+	case MV_ETH_BM_FREE:
+		type = "MV_ETH_BM_FREE";
+		break;
+	case MV_ETH_BM_SWF_LONG:
+		type = "MV_ETH_BM_SWF_LONG";
+		break;
+	case MV_ETH_BM_SWF_SHORT:
+		type = "MV_ETH_BM_SWF_SHORT";
+		break;
+	case MV_ETH_BM_HWF_LONG:
+		type = "MV_ETH_BM_HWF_LONG";
+		break;
+	case MV_ETH_BM_HWF_SHORT:
+		type = "MV_ETH_BM_HWF_SHORT";
+		break;
+	case MV_ETH_BM_MIXED_LONG:
+		type = "MV_ETH_BM_MIXED_LONG";
+		break;
+	case MV_ETH_BM_MIXED_SHORT:
+		type = "MV_ETH_BM_MIXED_SHORT";
+		break;
+	default:
+		type = "Unknown";
+	}
+
+	pr_info("\nBM Pool #%d: pool type = %s, buffers num = %d\n", pool, type, bm_pool->buf_num);
+	pr_info("     packet size = %d, buffer size = %d, total size = %d, true size = %d\n",
+		bm_pool->pkt_size, buf_size, total_size, true_size);
+	pr_info("     capacity=%d, buf_num=%d, port_map=0x%x, in_use=%u, in_use_thresh=%u\n",
+		bm_pool->capacity, bm_pool->buf_num, bm_pool->port_map,
+		mv_pp2_bm_in_use_read(bm_pool), bm_pool->in_use_thresh);
+
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	pr_cont("     skb_alloc_oom=%u", bm_pool->stats.skb_alloc_oom);
+#endif /* #ifdef CONFIG_MV_PP2_STAT_ERR */
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	pr_cont(", skb_alloc_ok=%u, bm_put=%u\n",
+	       bm_pool->stats.skb_alloc_ok, bm_pool->stats.bm_put);
+
+	pr_info("     no_recycle=%u, skb_recycled_ok=%u, skb_recycled_err=%u, bm_cookie_err=%u\n",
+		bm_pool->stats.no_recycle, bm_pool->stats.skb_recycled_ok,
+		bm_pool->stats.skb_recycled_err, bm_pool->stats.bm_cookie_err);
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+
+	memset(&bm_pool->stats, 0, sizeof(bm_pool->stats));
+}
+
+
+/***********************************************************************************
+ ***  print ext pool status
+ ***********************************************************************************/
+void mv_pp2_ext_pool_print(struct cpu_ctrl *cpu_ctrl)
+{
+	pr_info("\nExt Pool Stack: cpu = %d, bufSize = %u bytes\n",
+		cpu_ctrl->cpu, cpu_ctrl->ext_buf_size);
+	mvStackStatus(cpu_ctrl->ext_buf_stack, 0);
+}
+
+/***********************************************************************************
+ ***  print net device status
+ ***********************************************************************************/
+void mv_pp2_eth_netdev_print(struct net_device *dev)
+{
+	pr_info("%s net_device status:\n\n", dev->name);
+	pr_info("ifIdx=%d, mtu=%u, MAC=" MV_MACQUAD_FMT "\n",
+		dev->ifindex, dev->mtu,	MV_MACQUAD(dev->dev_addr));
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	pr_info("features=0x%x, hw_features=0x%x, wanted_features=0x%x, vlan_features=0x%x\n",
+			(unsigned int)(dev->features), (unsigned int)(dev->hw_features),
+			(unsigned int)(dev->wanted_features), (unsigned int)(dev->vlan_features));
+#else
+	pr_info("features=0x%x, vlan_features=0x%x\n",
+		 (unsigned int)(dev->features), (unsigned int)(dev->vlan_features));
+#endif
+
+	pr_info("flags=0x%x, gflags=0x%x, priv_flags=0x%x: running=%d, oper_up=%d\n",
+		(unsigned int)(dev->flags), (unsigned int)(dev->gflags), (unsigned int)(dev->priv_flags),
+		netif_running(dev), netif_oper_up(dev));
+	pr_info("uc_promisc=%d, promiscuity=%d, allmulti=%d\n", dev->uc_promisc, dev->promiscuity, dev->allmulti);
+
+	if (mv_pp2_eth_netdev_find(dev->ifindex)) {
+		struct eth_port *pp = MV_ETH_PRIV(dev);
+		if (pp->tagged)
+			mv_mux_netdev_print_all(pp->port);
+	} else {
+		/* Check if this is mux netdevice */
+		if (mv_mux_netdev_find(dev->ifindex) != -1)
+			mv_mux_netdev_print(dev);
+	}
+}
+
+void mv_pp2_status_print(void)
+{
+	pr_info("totals: ports=%d\n", mv_pp2_ports_num);
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+	pr_info("SKB recycle                  : %s\n", mv_ctrl_pp2_recycle ? "Enabled" : "Disabled");
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+	pr_info("HWF + SWF data corruption WA : %s\n", mv_pp2_swf_hwf_wa_en ? "Enabled" : "Disabled");
+#endif /* CONFIG_MV_ETH_SWF_HWF_CORRUPTION_WA */
+}
+
+/***********************************************************************************
+ ***  print Ethernet port status
+ ***********************************************************************************/
+void mv_pp2_eth_port_status_print(unsigned int port)
+{
+	int txp, q;
+
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct tx_queue *txq_ctrl;
+	struct cpu_ctrl	*cpuCtrl;
+
+	if (!pp)
+		return;
+
+	pr_err("\n");
+	pr_err("port=%d, flags=0x%lx, rx_weight=%d\n", port, pp->flags, pp->weight);
+
+	pr_info("RX next descriptor prefetch  : %s\n",
+			pp->flags & MV_ETH_F_RX_DESC_PREFETCH ? "Enabled" : "Disabled");
+
+	pr_info("RX packet header prefetch    : %s\n\n",
+			pp->flags & MV_ETH_F_RX_PKT_PREFETCH ? "Enabled" : "Disabled");
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX)
+		pr_info("%s: ", pp->dev->name);
+	else
+		pr_info("port %d: ", port);
+
+	mv_pp2_eth_link_status_print(port);
+
+	pr_cont("\n");
+	pr_info("rxq_coal(pkts)[ q]         = ");
+	for (q = 0; q < pp->rxq_num; q++)
+		pr_cont("%4d ", mvPp2RxqPktsCoalGet(port, q));
+
+	pr_cont("\n");
+	pr_info("rxq_coal(usec)[ q]         = ");
+	for (q = 0; q < pp->rxq_num; q++)
+		pr_cont("%4d ", mvPp2RxqTimeCoalGet(port, q));
+
+	pr_cont("\n");
+	pr_info("rxq_desc(num)[ q]          = ");
+	for (q = 0; q < pp->rxq_num; q++)
+		pr_cont("%4d ", pp->rxq_ctrl[q].rxq_size);
+
+	pr_cont("\n");
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		pr_info("txq_coal(pkts)[%2d.q]       = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++)
+			pr_cont("%4d ", mvPp2TxDonePktsCoalGet(port, txp, q));
+		pr_cont("\n");
+
+		pr_info("txq_desc(num) [%2d.q]       = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + q];
+			pr_cont("%4d ", txq_ctrl->txq_size);
+		}
+		pr_cont("\n");
+
+		pr_info("txq_hwf_desc(num) [%2d.q]   = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + q];
+			pr_cont("%4d ", txq_ctrl->hwf_size);
+		}
+		pr_cont("\n");
+
+#ifdef CONFIG_MV_ETH_PP2_1
+		pr_info("txq_swf_desc(num) [%2d.q]   = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + q];
+			pr_cont("%4d ", txq_ctrl->swf_size);
+		}
+		pr_info("txq_rsvd_chunk(num) [%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + q];
+			pr_cont("%4d ", txq_ctrl->rsvd_chunk);
+		}
+#else
+		pr_info("txq_swf_desc(num) [%2d.q]   = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + q];
+			pr_cont("%4d ", txq_ctrl->txq_cpu[0].txq_size);
+		}
+#endif /* CONFIG_MV_ETH_PP2_1 */
+
+		pr_cont("\n");
+	}
+	pr_info("\n");
+
+#if defined(CONFIG_MV_PP2_TXDONE_ISR)
+	printk(KERN_ERR "Do tx_done in NAPI context triggered by ISR\n");
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		printk(KERN_ERR "txcoal(pkts)[%2d.q] = ", txp);
+		for (q = 0; q < CONFIG_MV_PP2_TXQ; q++)
+			printk(KERN_CONT "%3d ", mvPp2TxDonePktsCoalGet(port, txp, q));
+		printk(KERN_CONT "\n");
+	}
+	printk(KERN_ERR "\n");
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_HRTIMER)
+	pr_err("Do tx_done in TX or high-resolution Timer's tasklet: tx_done_threshold=%d timer_interval=%d usec\n",
+	mv_ctrl_pp2_txdone, mv_pp2_tx_done_hrtimer_period_get());
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_TIMER)
+	pr_err("Do tx_done in TX or regular Timer context: tx_done_threshold=%d timer_interval=%d msec\n",
+	mv_ctrl_pp2_txdone, CONFIG_MV_PP2_TX_DONE_TIMER_PERIOD);
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+	pr_err("\n");
+
+	printk(KERN_ERR "txp=%d, zero_pad=%s, mh_en=%s (0x%04x), hw_cmd: 0x%08x 0x%08x 0x%08x\n",
+		pp->tx_spec.txp, (pp->tx_spec.flags & MV_ETH_TX_F_NO_PAD) ? "Disabled" : "Enabled",
+		(pp->tx_spec.flags & MV_ETH_TX_F_MH) ? "Enabled" : "Disabled",
+		MV_16BIT_BE(pp->tx_spec.tx_mh), pp->tx_spec.hw_cmd[0],
+		pp->tx_spec.hw_cmd[1], pp->tx_spec.hw_cmd[2]);
+
+	printk(KERN_CONT "\n");
+	printk(KERN_CONT "CPU:   txq_def   napi_group   group_id\n");
+	{
+		int cpu;
+		for_each_possible_cpu(cpu) {
+			cpuCtrl = pp->cpu_config[cpu];
+			if (MV_BIT_CHECK(pp->cpuMask, cpu))
+				printk(KERN_ERR "  %d:   %3d        %p    %3d\n",
+					cpu, cpuCtrl->txq, cpuCtrl->napi_group,
+					(cpuCtrl->napi_group != NULL) ? cpuCtrl->napi_group->id : -1);
+		}
+	}
+
+	printk(KERN_CONT "\n");
+
+	mv_pp2_napi_groups_print(port);
+
+	/* Print status of all mux_dev for this port */
+	if (pp->tagged) {
+		printk(KERN_CONT "TAGGED PORT\n\n");
+		mv_mux_netdev_print_all(port);
+	} else
+		printk(KERN_CONT "UNTAGGED PORT\n");
+}
+
+
+/***********************************************************************************
+ ***  print port statistics
+ ***********************************************************************************/
+
+void mv_pp2_port_stats_print(unsigned int port)
+{
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+	struct port_stats *stat = NULL;
+	struct tx_queue *txq_ctrl;
+	struct txq_cpu_ctrl *txq_cpu_ptr;
+	int txp, queue, cpu = smp_processor_id();
+
+	pr_info("\n====================================================\n");
+	pr_info("ethPort_%d: Statistics (running on cpu#%d)", port, cpu);
+	pr_info("----------------------------------------------------\n\n");
+
+	if (pp == NULL) {
+		printk(KERN_ERR "eth_stats_print: wrong port number %d\n", port);
+		return;
+	}
+	stat = &(pp->stats);
+
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	pr_info("Errors:\n");
+	pr_info("rx_error................. %10u\n", stat->rx_error);
+	pr_info("tx_timeout............... %10u\n", stat->tx_timeout);
+	pr_info("state_err................ %10u\n", stat->state_err);
+	pr_info("\n");
+
+	pr_info("ext_stack_empty[cpu]    = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->ext_stack_empty[cpu]);
+
+	pr_info("ext_stack_full[cpu]     = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->ext_stack_full[cpu]);
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+
+#ifdef CONFIG_MV_PP2_STAT_INF
+	pr_info("\nEvents:\n");
+
+	pr_info("irq[cpu]                = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->irq[cpu]);
+
+	pr_info("irq_none[cpu]           = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->irq_err[cpu]);
+
+	pr_info("poll[cpu]               = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->poll[cpu]);
+
+	pr_info("poll_exit[cpu]          = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->poll_exit[cpu]);
+
+	pr_info("tx_timer_event[cpu]     = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->tx_done_timer_event[cpu]);
+
+	pr_info("tx_timer_add[cpu]       = ");
+	for_each_possible_cpu(cpu)
+		pr_cont("%10u ", stat->tx_done_timer_add[cpu]);
+
+	pr_info("\n");
+	pr_info("tx_done_event............ %10u\n", stat->tx_done);
+	pr_info("link..................... %10u\n", stat->link);
+	pr_info("netdev_stop.............. %10u\n", stat->netdev_stop);
+	pr_info("rx_buf_hdr............... %10u\n", stat->rx_buf_hdr);
+
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+	pr_info("rx_special............... %10u\n", stat->rx_special);
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	pr_info("tx_special............... %10u\n", stat->tx_special);
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+#endif /* CONFIG_MV_PP2_STAT_INF */
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	{
+		__u32 total_rx_ok = 0;
+
+		pr_info("\nDebug statistics:\n");
+		pr_info("\n");
+
+		pr_info("ext_stack_get[cpu]      = ");
+		for_each_possible_cpu(cpu)
+			pr_cont("%10u ", stat->ext_stack_get[cpu]);
+
+		pr_info("ext_stack_put[cpu]      = ");
+		for_each_possible_cpu(cpu)
+			pr_cont("%10u ", stat->ext_stack_put[cpu]);
+
+		pr_info("\n");
+		pr_info("rx_gro................... %10u\n", stat->rx_gro);
+		pr_info("rx_gro_bytes ............ %10u\n", stat->rx_gro_bytes);
+
+		pr_info("rx_netif................. %10u\n", stat->rx_netif);
+		pr_info("rx_drop_sw............... %10u\n", stat->rx_drop_sw);
+		pr_info("rx_csum_hw............... %10u\n", stat->rx_csum_hw);
+		pr_info("rx_csum_sw............... %10u\n", stat->rx_csum_sw);
+
+		pr_info("tx_tso................... %10u\n", stat->tx_tso);
+		pr_info("tx_tso_bytes ............ %10u\n", stat->tx_tso_bytes);
+		pr_info("tx_tso_no_resource....... %10u\n", stat->tx_tso_no_resource);
+
+		pr_info("tx_skb_free.............. %10u\n", stat->tx_skb_free);
+		pr_info("tx_sg.................... %10u\n", stat->tx_sg);
+		pr_info("tx_csum_hw............... %10u\n", stat->tx_csum_hw);
+		pr_info("tx_csum_sw............... %10u\n", stat->tx_csum_sw);
+
+		pr_info("\n");
+
+		pr_info("RXQ:       rx_ok\n\n");
+		for (queue = 0; queue < pp->rxq_num; queue++) {
+			u32 rxq_ok = 0;
+
+			rxq_ok = stat->rxq[queue];
+
+			pr_info("%3d:  %10u\n",	queue, rxq_ok);
+			total_rx_ok += rxq_ok;
+		}
+		pr_info("SUM:  %10u\n", total_rx_ok);
+	}
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+
+	pr_info("\nAggregated TXQs statistics\n");
+	pr_info("CPU:  count        send       no_resource\n\n");
+	for_each_possible_cpu(cpu) {
+		struct aggr_tx_queue *aggr_txq_ctrl = &aggr_txqs[cpu];
+		u32 txq_tx = 0, txq_err = 0;
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+		txq_tx = aggr_txq_ctrl->stats.txq_tx;
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+#ifdef CONFIG_MV_PP2_STAT_ERR
+		txq_err = aggr_txq_ctrl->stats.txq_err;
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+
+		pr_info(" %d:    %3d   %10u    %10u\n",
+		       cpu, aggr_txq_ctrl->txq_count, txq_tx, txq_err);
+
+		memset(&aggr_txq_ctrl->stats, 0, sizeof(aggr_txq_ctrl->stats));
+	}
+
+	pr_info("\n");
+	pr_info("TXP-TXQ:  count  res_num      send          done     no_resource      res_req      res_total\n\n");
+
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (queue = 0; queue < CONFIG_MV_PP2_TXQ; queue++)
+			for_each_possible_cpu(cpu) {
+				u32 txq_tx = 0, txq_done = 0, txq_reserved_req = 0, txq_reserved_total = 0, txq_err = 0;
+
+				txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + queue];
+				txq_cpu_ptr = &txq_ctrl->txq_cpu[cpu];
+#ifdef CONFIG_MV_PP2_STAT_DBG
+				txq_tx = txq_cpu_ptr->stats.txq_tx;
+				txq_done = txq_cpu_ptr->stats.txq_txdone;
+				txq_reserved_req = txq_cpu_ptr->stats.txq_reserved_req;
+				txq_reserved_total = txq_cpu_ptr->stats.txq_reserved_total;
+
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+#ifdef CONFIG_MV_PP2_STAT_ERR
+				txq_err = txq_cpu_ptr->stats.txq_err;
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+
+				pr_info("%d-%d-cpu#%d: %3d    %3d   %10u    %10u    %10u    %10u    %10u\n",
+				       txp, queue, cpu, txq_cpu_ptr->txq_count, txq_cpu_ptr->reserved_num,
+				       txq_tx, txq_done, txq_err, txq_reserved_req, txq_reserved_total);
+
+				memset(&txq_cpu_ptr->stats, 0, sizeof(txq_cpu_ptr->stats));
+			}
+	}
+	memset(stat, 0, sizeof(struct port_stats));
+
+	/* RX pool statistics */
+	if (pp->pool_short)
+		mv_pp2_pool_status_print(pp->pool_short->pool);
+
+	if (pp->pool_long)
+		mv_pp2_pool_status_print(pp->pool_long->pool);
+
+#ifdef CONFIG_MV_PP2_STAT_DIST
+	{
+		int i;
+		struct dist_stats *dist_stats = &(pp->dist_stats);
+
+		if (dist_stats->rx_dist) {
+			printk(KERN_ERR "\n      Linux Path RX distribution\n");
+			for (i = 0; i < dist_stats->rx_dist_size; i++) {
+				if (dist_stats->rx_dist[i] != 0) {
+					printk(KERN_ERR "%3d RxPkts - %u times\n", i, dist_stats->rx_dist[i]);
+					dist_stats->rx_dist[i] = 0;
+				}
+			}
+		}
+
+		if (dist_stats->tx_done_dist) {
+			printk(KERN_ERR "\n      tx-done distribution\n");
+			for (i = 0; i < dist_stats->tx_done_dist_size; i++) {
+				if (dist_stats->tx_done_dist[i] != 0) {
+					printk(KERN_ERR "%3d TxDoneDesc - %u times\n", i, dist_stats->tx_done_dist[i]);
+					dist_stats->tx_done_dist[i] = 0;
+				}
+			}
+		}
+#ifdef CONFIG_MV_PP2_TSO
+		if (dist_stats->tx_tso_dist) {
+			printk(KERN_ERR "\n      TSO stats\n");
+			for (i = 0; i < dist_stats->tx_tso_dist_size; i++) {
+				if (dist_stats->tx_tso_dist[i] != 0) {
+					printk(KERN_ERR "%3d KBytes - %u times\n", i, dist_stats->tx_tso_dist[i]);
+					dist_stats->tx_tso_dist[i] = 0;
+				}
+			}
+		}
+#endif /* CONFIG_MV_PP2_TSO */
+		memset(dist_stats, 0, struct dist_stats);
+	}
+#endif /* CONFIG_MV_PP2_STAT_DIST */
+
+	for_each_possible_cpu(cpu)
+		mv_pp2_ext_pool_print(pp->cpu_config[cpu]);
+}
+
+/* mv_pp2_tx_cleanup - reset and delete all tx queues */
+static void mv_pp2_tx_cleanup(struct eth_port *pp)
+{
+	int txp, txq;
+	struct tx_queue *txq_ctrl;
+
+	if (!pp)
+		return;
+
+	/* Reset Tx ports */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		if (mv_pp2_txp_clean(pp->port, txp))
+			printk(KERN_ERR "Warning: Port %d Tx port %d reset failed\n", pp->port, txp);
+	}
+
+	/* Delete Tx queues */
+	for (txp = 0; txp < pp->txp_num; txp++) {
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++) {
+			txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+			if (txq_ctrl->q)
+				mv_pp2_txq_delete(pp, txq_ctrl);
+		}
+	}
+}
+
+/* mv_pp2_rx_cleanup - reset and delete all rx queues */
+static void mv_pp2_rx_cleanup(struct eth_port *pp)
+{
+	int rxq, prxq;
+	struct rx_queue *rxq_ctrl;
+
+	if (!pp)
+		return;
+
+	/* Reset RX ports */
+	if (mv_pp2_rx_reset(pp->port))
+		printk(KERN_ERR "%s Warning: Rx port %d reset failed\n", __func__, pp->port);
+
+	/* Delete Rx queues */
+	/* TODO - delete rxq only if port was in up at least once */
+	for (rxq = 0; rxq < pp->rxq_num; rxq++) {
+		rxq_ctrl = &pp->rxq_ctrl[rxq];
+
+		/* port start called before*/
+		if (rxq_ctrl->q)
+			mvPp2RxqDelete(pp->port, rxq);
+
+		prxq = mvPp2LogicRxqToPhysRxq(pp->port, rxq);
+		mvPp2PhysRxqMapDel(prxq);
+		rxq_ctrl->q = NULL;
+	}
+
+}
+
+
+/* mv_pp2_pool_cleanup - delete all ports buffers from pool */
+static void mv_pp2_pool_cleanup(int port, struct bm_pool *ppool)
+{
+	if (!ppool)
+		return;
+
+	ppool->port_map &= ~(1 << port);
+
+	if (ppool->port_map == 0) {
+		mv_pp2_pool_free(ppool->pool, ppool->buf_num);
+		ppool->type = MV_ETH_BM_FREE;
+	}
+}
+
+static void mv_pp2_napi_cleanup(struct eth_port *pp)
+{
+	int i;
+	struct napi_group_ctrl *napi_group;
+
+	if (!pp)
+		return;
+
+	if (!(pp->flags & MV_ETH_F_CONNECT_LINUX))
+		return;
+
+	for (i = 0; i < MV_ETH_MAX_NAPI_GROUPS; i++) {
+		napi_group = pp->napi_group[i];
+		if (napi_group) {
+			netif_napi_del(napi_group->napi);
+			mvOsFree(napi_group->napi);
+			mvOsFree(napi_group);
+			pp->napi_group[i] = NULL;
+		}
+	}
+}
+
+static int mv_pp2_port_cleanup(int port)
+{
+	struct eth_port *pp;
+	pp = mv_pp2_port_by_id(port);
+
+	if (pp == NULL) {
+		printk(KERN_ERR "port %d already clean\n", port);
+		return 0;
+	}
+
+	if (pp->flags & MV_ETH_F_STARTED) {
+		printk(KERN_ERR "%s: port %d is started, cannot cleanup\n", __func__, port);
+		return -1;
+	}
+
+	mv_pp2_tx_cleanup(pp);
+	mv_pp2_rx_cleanup(pp);
+
+	/*pools cleanup*/
+	mv_pp2_pool_cleanup(port, pp->pool_long);
+	mv_pp2_pool_cleanup(port, pp->pool_short);
+	mv_pp2_pool_cleanup(port, pp->hwf_pool_long);
+	mv_pp2_pool_cleanup(port, pp->hwf_pool_short);
+
+	/* Clear Marvell Header related modes - will be set again if needed on re-init */
+	mvPp2MhSet(port, MV_TAG_TYPE_NONE);
+
+	/* Clear any forced link, speed and duplex */
+	mv_pp2_port_link_speed_fc(port, MV_ETH_SPEED_AN, 0);
+
+	mv_pp2_napi_cleanup(pp);
+
+	if (pp->tagged)
+		mv_mux_eth_detach(pp->port);
+
+	mv_pp2_priv_cleanup(pp);
+
+	printk(KERN_ERR "port %d cleanup done\n", port);
+
+	return 0;
+}
+
+int mv_pp2_all_ports_cleanup(void)
+{
+	int port, status;
+
+	for (port = 0; port < mv_pp2_ports_num; port++) {
+		status = mv_pp2_port_cleanup(port);
+		if (status != 0) {
+			printk(KERN_ERR "%s :port %d, cleanup failed, stopping all ports cleanup\n", __func__, port);
+			return status;
+		}
+	}
+
+	if (mv_pp2_initialized)
+		mv_pp2_shared_cleanup();
+
+	return MV_OK;
+
+}
+
+int mv_pp2_all_ports_probe(void)
+{
+	int port = 0;
+
+	for (port = 0; port < mv_pp2_ports_num; port++)
+		if (mv_pp2_eth_probe(plats[port]))
+			return 1;
+	return 0;
+}
+
+#ifdef CONFIG_MV_INCLUDE_PON
+/* Used by PON module */
+struct mv_netdev_notify_ops mv_netdev_callbacks;
+
+/* Used by netdev driver */
+struct mv_eth_ext_mac_ops *mv_pon_callbacks;
+
+void pon_link_status_notify(int port_id, MV_BOOL link_state)
+{
+	struct eth_port *pon_port = mv_pp2_port_by_id(MV_PON_LOGIC_PORT_GET());
+	mv_pp2_link_event(pon_port, 1);
+}
+
+/* called by PON module */
+void mv_eth_ext_mac_ops_register(int port_id,
+		struct mv_eth_ext_mac_ops **ext_mac_ops, struct mv_netdev_notify_ops **netdev_ops)
+{
+	if (*netdev_ops == NULL) {
+		pr_err("%s: netdev_ops is uninitialized\n", __func__);
+		return;
+	}
+
+	mv_netdev_callbacks.link_notify = pon_link_status_notify;
+	*netdev_ops = &mv_netdev_callbacks;
+
+	if (*ext_mac_ops == NULL) {
+		pr_err("%s: netdev_ops is uninitialized\n", __func__);
+		return;
+	}
+
+	mv_pon_callbacks = *ext_mac_ops;
+}
+
+MV_BOOL mv_pon_link_status(MV_ETH_PORT_STATUS *link)
+{
+	MV_BOOL linkup = MV_TRUE;
+
+	if (mv_pon_callbacks && mv_pon_callbacks->link_status_get)
+		linkup = mv_pon_callbacks->link_status_get(MV_PON_LOGIC_PORT_GET());
+
+	if (link) {
+		link->linkup = linkup;
+		link->speed = MV_ETH_SPEED_1000;
+		link->duplex = MV_ETH_DUPLEX_FULL;
+	}
+
+	return linkup;
+}
+
+MV_STATUS mv_pon_mtu_config(MV_U32 maxEth)
+{
+	if (mv_pon_callbacks && mv_pon_callbacks->max_pkt_size_set) {
+		if (mv_pon_callbacks->max_pkt_size_set(MV_PON_LOGIC_PORT_GET(), maxEth) != MV_OK) {
+			printk(KERN_ERR "pon max_pkt_size_set failed\n");
+			return MV_FAIL;
+		}
+	} else {
+		printk(KERN_ERR "pon max_pkt_size_set is uninitialized\n");
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+MV_STATUS mv_pon_set_mac_addr(void *addr)
+{
+	if (mv_pon_callbacks && mv_pon_callbacks->mac_addr_set) {
+		if (mv_pon_callbacks->mac_addr_set(MV_PON_LOGIC_PORT_GET(), addr) != MV_OK) {
+			printk(KERN_ERR "pon mac_addr_set failed\n");
+			return MV_FAIL;
+		}
+	} else {
+		printk(KERN_ERR "pon mac_addr_set is uninitialized\n");
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+MV_STATUS mv_pon_enable(void)
+{
+	if (mv_pon_callbacks && mv_pon_callbacks->port_enable) {
+		if (mv_pon_callbacks->port_enable(MV_PON_LOGIC_PORT_GET()) != MV_OK) {
+			printk(KERN_ERR "pon port_enable failed\n");
+			return MV_FAIL;
+		}
+	} else
+		printk(KERN_ERR "Warning: pon port_enable is uninitialized\n");
+
+	if (mv_pon_link_status(NULL) == MV_TRUE)
+		return mvPp2PortEgressEnable(MV_PON_LOGIC_PORT_GET(), MV_TRUE);
+
+	return MV_NOT_READY;
+}
+
+MV_STATUS mv_pon_disable(void)
+{
+	mvPp2PortEgressEnable(MV_PON_LOGIC_PORT_GET(), MV_FALSE);
+	if (mv_pon_callbacks && mv_pon_callbacks->port_disable) {
+		if (mv_pon_callbacks->port_disable(MV_PON_LOGIC_PORT_GET()) != MV_OK) {
+			printk(KERN_ERR "pon port_disable failed\n");
+			return MV_FAIL;
+		}
+	} else
+		printk(KERN_ERR "Warning: pon port_disable is uninitialized\n");	return MV_OK;
+
+	return MV_OK;
+}
+#endif /* CONFIG_MV_INCLUDE_PON */
+
+/* Support for platform driver */
+
+#ifdef CONFIG_PM
+
+int mv_pp2_suspend_clock(int port)
+{
+/* TBD */
+	return 0;
+}
+
+/* mv_pp2_suspend_common - common port suspend, can be called anyplace */
+int mv_pp2_suspend_common(int port)
+{
+	struct eth_port *pp;
+
+	pp = mv_pp2_port_by_id(port);
+	if (!pp)
+		return MV_OK;
+
+	if (mv_pp2_eth_port_suspend(port)) {
+		pr_err("%s: port #%d suspend failed.\n", __func__, port);
+		return MV_ERROR;
+	}
+
+	/* PM mode: WoL Mode*/
+	if (pp->pm_mode == 0 &&
+	    wol_ports_bmp & (1 << port)) {
+		/* Insert port to WoL mode */
+		if (mvPp2WolSleep(port)) {
+			pr_err("%s: port #%d suspend failed.\n", __func__, port);
+			return MV_ERROR;
+		}
+	}
+	/* PM mode: Suspend to RAM Mode, TODO list*/
+
+	return MV_OK;
+}
+
+int mv_pp2_eth_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int port = pdev->id;
+
+	if (mv_pp2_suspend_common(port)) {
+		pr_err("%s: port #%d suspend failed.\n", __func__, port);
+		return MV_ERROR;
+	}
+
+	return MV_OK;
+}
+
+int mv_pp2_resume_clock(int port)
+{
+/* TBD */
+	return 0;
+}
+
+
+int mv_pp2_eth_resume(struct platform_device *pdev)
+{
+	struct eth_port *pp;
+	int port = pdev->id;
+
+	pp = mv_pp2_port_by_id(port);
+	if (!pp)
+		return MV_OK;
+
+	/* PM mode: WoL Mode*/
+	if (pp->pm_mode == 0) {
+		if (mv_pp2_port_resume(port)) {
+			pr_err("%s: port #%d resume failed.\n", __func__, port);
+			return MV_ERROR;
+		}
+	}
+
+	return MV_OK;
+}
+
+#endif	/*  CONFIG_PM */
+
+static int mv_pp2_eth_remove(struct platform_device *pdev)
+{
+#ifdef CONFIG_NETMAP
+	int port = pdev->id;
+	struct eth_port *pp = mv_pp2_port_by_id(port);
+#endif
+	printk(KERN_INFO "Removing Marvell Ethernet Driver\n");
+	mv_pp2_sysfs_exit();
+#ifdef CONFIG_NETMAP
+	if (pp->flags & MV_ETH_F_IFCAP_NETMAP)
+		netmap_detach(pp->dev);
+#endif /* CONFIG_NETMAP */
+	return 0;
+}
+
+static void mv_pp2_eth_shutdown(struct platform_device *pdev)
+{
+
+#ifdef CONFIG_PM
+	int port = pdev->id;
+		struct eth_port *pp = mv_pp2_port_by_id(port);
+
+	if (pp->flags & MV_ETH_F_STARTED)
+		mv_pp2_suspend_common(port);
+#endif
+
+	printk(KERN_INFO "Shutting Down Marvell Ethernet Driver\n");
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id pp2_match[] = {
+	{ .compatible = "marvell,pp2" },/* Support ALP and A375 */
+	{ }
+};
+MODULE_DEVICE_TABLE(of, pp2_match);
+
+#endif /* CONFIG_OF */
+
+static struct platform_driver mv_pp2_eth_driver = {
+	.probe = mv_pp2_eth_probe,
+	.remove = mv_pp2_eth_remove,
+	.shutdown = mv_pp2_eth_shutdown,
+#ifdef CONFIG_PM
+	.suspend = mv_pp2_eth_suspend,
+	.resume = mv_pp2_eth_resume,
+#endif /*  CONFIG_PM */
+	.driver = {
+		.name = MV_PP2_PORT_NAME,
+#ifdef CONFIG_OF
+		.of_match_table = pp2_match,
+#endif /* CONFIG_OF */
+	},
+};
+
+#ifdef CONFIG_OF
+module_platform_driver(mv_pp2_eth_driver);
+#else
+static int __init mv_pp2_init_module(void)
+{
+	return platform_driver_register(&mv_pp2_eth_driver);
+}
+module_init(mv_pp2_init_module);
+
+static void __exit mv_pp2_cleanup_module(void)
+{
+	platform_driver_unregister(&mv_pp2_eth_driver);
+}
+module_exit(mv_pp2_cleanup_module);
+#endif /* CONFIG_OF */
+
+MODULE_DESCRIPTION("Marvell Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Dmitri Epshtein <dima@marvell.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.h b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.h
new file mode 100644
index 000000000000..8e8c4a0bb175
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_netdev.h
@@ -0,0 +1,1073 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_netdev_h__
+#define __mv_netdev_h__
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/mv_pp2.h>
+#include <net/ip.h>
+#include <linux/interrupt.h>
+
+#include "mvCommon.h"
+#include "mvOs.h"
+#include "mv802_3.h"
+#include "mvStack.h"
+
+#include "gbe/mvPp2Gbe.h"
+#include "bm/mvBmRegs.h"
+#include "bm/mvBm.h"
+
+#ifndef CONFIG_MV_PON_TCONTS
+# define CONFIG_MV_PON_TCONTS 16
+#endif
+
+/******************************************************
+ * driver statistics control --                       *
+ ******************************************************/
+#ifdef CONFIG_MV_PP2_STAT_ERR
+#define STAT_ERR(c) c
+#else
+#define STAT_ERR(c)
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_INF
+#define STAT_INFO(c) c
+#else
+#define STAT_INFO(c)
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+#define STAT_DBG(c) c
+#else
+#define STAT_DBG(c)
+#endif
+
+#ifdef CONFIG_MV_PP2_STAT_DIST
+#define STAT_DIST(c) c
+#else
+#define STAT_DIST(c)
+#endif
+
+extern int mv_ctrl_pp2_txdone;
+extern unsigned int mv_pp2_pnc_ctrl_en;
+
+
+/****************************************************************************
+ * Rx buffer size: MTU + 2(Marvell Header) + 4(VLAN) + 14(MAC hdr) + 4(CRC) *
+ ****************************************************************************/
+#define MV_ETH_SKB_SHINFO_SIZE		SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* MTU + EtherType + Double VLAN + MAC_SA + MAC_DA + Marvell header */
+#define MV_MAX_PKT_SIZE(mtu)		((mtu) + MV_ETH_MH_SIZE + 2 * VLAN_HLEN + ETH_HLEN)
+
+#define RX_PKT_SIZE(mtu) \
+		MV_ALIGN_UP(MV_MAX_PKT_SIZE(mtu) + ETH_FCS_LEN, CPU_D_CACHE_LINE_SIZE)
+
+#define RX_BUF_SIZE(pkt_size)		((pkt_size) + NET_SKB_PAD)
+#define RX_TOTAL_SIZE(buf_size)		((buf_size) + MV_ETH_SKB_SHINFO_SIZE)
+#define RX_MAX_PKT_SIZE(total_size)	((total_size) - NET_SKB_PAD - MV_ETH_SKB_SHINFO_SIZE)
+
+#define RX_HWF_PKT_OFFS			32
+#define RX_HWF_BUF_SIZE(pkt_size)	((pkt_size) + RX_HWF_PKT_OFFS)
+#define RX_HWF_TOTAL_SIZE(buf_size)	(buf_size)
+#define RX_HWF_MAX_PKT_SIZE(total_size)	((total_size) - RX_HWF_PKT_OFFS)
+
+#define RX_TRUE_SIZE(total_size)	roundup_pow_of_two(total_size)
+
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+extern int mv_ctrl_recycle;
+
+#define mv_pp2_is_recycle()     (mv_ctrl_pp2_recycle)
+int mv_pp2_skb_recycle(struct sk_buff *skb);
+#else
+#define mv_pp2_is_recycle()     0
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+
+
+/******************************************************
+ * interrupt control --                               *
+ ******************************************************/
+#define MV_ETH_TRYLOCK(lock, flags)                           \
+	(in_interrupt() ? spin_trylock((lock)) :              \
+		spin_trylock_irqsave((lock), (flags)))
+
+#define MV_ETH_LOCK(lock, flags)                              \
+{                                                             \
+	if (in_interrupt())                                   \
+		spin_lock((lock));                            \
+	else                                                  \
+		spin_lock_irqsave((lock), (flags));           \
+}
+
+#define MV_ETH_UNLOCK(lock, flags)                            \
+{                                                             \
+	if (in_interrupt())                                   \
+		spin_unlock((lock));                          \
+	else                                                  \
+		spin_unlock_irqrestore((lock), (flags));      \
+}
+
+#define MV_ETH_LIGHT_LOCK(flags)                              \
+	if (!in_interrupt())                                  \
+		local_irq_save(flags);
+
+#define MV_ETH_LIGHT_UNLOCK(flags)	                      \
+	if (!in_interrupt())                                  \
+		local_irq_restore(flags);
+
+/******************************************************
+ * rx / tx queues --                                  *
+ ******************************************************/
+/*
+ * Debug statistics
+ */
+
+struct txq_stats {
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	u32 txq_err;
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	u32 txq_tx;
+	u32 txq_reserved_req;   /* Number of requests to reserve TX descriptors */
+	u32 txq_reserved_total; /* Accumulated number of reserved TX descriptors */
+	u32 txq_txdone;
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+};
+
+struct port_stats {
+
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	u32 rx_error;
+	u32 tx_timeout;
+	u32 ext_stack_empty[CONFIG_NR_CPUS];
+	u32 ext_stack_full[CONFIG_NR_CPUS];
+	u32 state_err;
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+
+#ifdef CONFIG_MV_PP2_STAT_INF
+	u32 irq[CONFIG_NR_CPUS];
+	u32 irq_err[CONFIG_NR_CPUS];
+	u32 poll[CONFIG_NR_CPUS];
+	u32 poll_exit[CONFIG_NR_CPUS];
+	u32 tx_done_timer_event[CONFIG_NR_CPUS];
+	u32 tx_done_timer_add[CONFIG_NR_CPUS];
+	u32 tx_done;
+	u32 link;
+	u32 netdev_stop;
+	u32 rx_buf_hdr;
+
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+	u32 rx_special;
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	u32 tx_special;
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+#endif /* CONFIG_MV_PP2_STAT_INF */
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	u32 rxq[CONFIG_MV_PP2_RXQ];
+	u32 rx_tagged;
+	u32 rx_netif;
+	u32 rx_gro;
+	u32 rx_gro_bytes;
+	u32 rx_drop_sw;
+	u32 rx_csum_hw;
+	u32 rx_csum_sw;
+	u32 tx_csum_hw;
+	u32 tx_csum_sw;
+	u32 tx_skb_free;
+	u32 tx_sg;
+	u32 tx_tso;
+	u32 tx_tso_no_resource;
+	u32 tx_tso_bytes;
+	u32 ext_stack_put[CONFIG_NR_CPUS];
+	u32 ext_stack_get[CONFIG_NR_CPUS];
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+};
+
+#define MV_ETH_TX_DESC_ALIGN		0x1f
+
+/* Used for define type of data saved in shadow: SKB or extended buffer or nothing */
+#define MV_ETH_SHADOW_SKB		0x1
+#define MV_ETH_SHADOW_EXT		0x2
+
+/* Masks used for pp->flags */
+#define MV_ETH_F_STARTED_BIT            0
+#define MV_ETH_F_RX_DESC_PREFETCH_BIT   1
+#define MV_ETH_F_RX_PKT_PREFETCH_BIT    2
+#define MV_ETH_F_CONNECT_LINUX_BIT      5 /* port is connected to Linux netdevice */
+#define MV_ETH_F_LINK_UP_BIT            6
+#define MV_ETH_F_SUSPEND_BIT            12
+#define MV_ETH_F_STARTED_OLD_BIT        13 /*STARTED_BIT value before suspend */
+#define MV_ETH_F_IFCAP_NETMAP_BIT       15
+
+#define MV_ETH_F_STARTED                (1 << MV_ETH_F_STARTED_BIT)
+#define MV_ETH_F_RX_DESC_PREFETCH       (1 << MV_ETH_F_RX_DESC_PREFETCH_BIT)
+#define MV_ETH_F_RX_PKT_PREFETCH        (1 << MV_ETH_F_RX_PKT_PREFETCH_BIT)
+#define MV_ETH_F_CONNECT_LINUX          (1 << MV_ETH_F_CONNECT_LINUX_BIT)
+#define MV_ETH_F_LINK_UP                (1 << MV_ETH_F_LINK_UP_BIT)
+#define MV_ETH_F_SUSPEND                (1 << MV_ETH_F_SUSPEND_BIT)
+#define MV_ETH_F_STARTED_OLD            (1 << MV_ETH_F_STARTED_OLD_BIT)
+#define MV_ETH_F_IFCAP_NETMAP           (1 << MV_ETH_F_IFCAP_NETMAP_BIT)
+
+#ifdef CONFIG_MV_PP2_DEBUG_CODE
+/* Masks used for pp->dbg_flags */
+#define MV_ETH_F_DBG_RX_BIT         0
+#define MV_ETH_F_DBG_TX_BIT         1
+#define MV_ETH_F_DBG_DUMP_BIT       2
+#define MV_ETH_F_DBG_ISR_BIT        3
+#define MV_ETH_F_DBG_POLL_BIT       4
+#define MV_ETH_F_DBG_BUFF_HDR_BIT   5
+
+#define MV_ETH_F_DBG_RX            (1 << MV_ETH_F_DBG_RX_BIT)
+#define MV_ETH_F_DBG_TX            (1 << MV_ETH_F_DBG_TX_BIT)
+#define MV_ETH_F_DBG_DUMP          (1 << MV_ETH_F_DBG_DUMP_BIT)
+#define MV_ETH_F_DBG_ISR           (1 << MV_ETH_F_DBG_ISR_BIT)
+#define MV_ETH_F_DBG_POLL          (1 << MV_ETH_F_DBG_POLL_BIT)
+#define MV_ETH_F_DBG_BUFF_HDR      (1 << MV_ETH_F_DBG_BUFF_HDR_BIT)
+#endif /* CONFIG_MV_PP2_DEBUG_CODE */
+
+/* Masks used for cpu_ctrl->flags */
+#define MV_ETH_F_TX_DONE_TIMER_BIT  0
+
+#define MV_ETH_F_TX_DONE_TIMER		(1 << MV_ETH_F_TX_DONE_TIMER_BIT)	/* 0x01 */
+
+
+#define MV_ETH_TXQ_INVALID	0xFF
+
+#define TOS_TO_DSCP(tos)	((tos >> 2) & 0x3F)
+
+/* Masks used for tx_spec->flags */
+#define MV_ETH_TX_F_NO_PAD	0x0001
+#define MV_ETH_TX_F_MH		0x0002
+#define MV_ETH_TX_F_HW_CMD	0x0004
+
+struct mv_pp2_tx_spec {
+	unsigned long	flags;
+	u32		hw_cmd[3];     /* tx_desc offset = 0x10, 0x14, 0x18 */
+	u16		tx_mh;
+	u8		txp;
+	u8		txq;
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	void		(*tx_func) (u8 *data, int size, struct mv_pp2_tx_spec *tx_spec);
+#endif
+};
+
+struct txq_cpu_ctrl {
+	int			txq_size;
+	int			txq_count;
+	int			reserved_num; /* PPv2.1 (MAS 3.16)- number of reserved descriptors for this CPU */
+	u32			*shadow_txq; /* can be MV_ETH_PKT* or struct skbuf* */
+	int			shadow_txq_put_i;
+	int			shadow_txq_get_i;
+	struct txq_stats	stats;
+};
+
+struct tx_queue {
+	MV_PP2_PHYS_TXQ_CTRL	*q;
+	u8			txp;
+	u8			txq;
+	int			txq_size;
+	int			hwf_size;
+	int			swf_size;
+	int			rsvd_chunk;
+	struct txq_cpu_ctrl	txq_cpu[CONFIG_NR_CPUS];
+	spinlock_t		queue_lock;
+	MV_U32			txq_done_pkts_coal;
+	unsigned long		flags;
+};
+
+struct aggr_tx_queue {
+	MV_PP2_AGGR_TXQ_CTRL	*q;
+	int			txq_size;
+	int			txq_count;
+	struct txq_stats	stats;
+};
+
+struct rx_queue {
+	MV_PP2_PHYS_RXQ_CTRL	*q;
+	int			rxq_size;
+	MV_U32			rxq_pkts_coal;
+	MV_U32			rxq_time_coal;
+};
+
+struct dist_stats {
+	u32	*rx_dist;
+	int	rx_dist_size;
+	u32	*tx_done_dist;
+	int	tx_done_dist_size;
+	u32	*tx_tso_dist;
+	int	tx_tso_dist_size;
+};
+
+struct napi_group_ctrl {
+	int			id;
+	MV_U8			cpu_mask;
+	MV_U16			rxq_mask;
+	MV_U32			cause_rx_tx;
+	struct napi_struct	*napi;
+};
+
+struct cpu_ctrl {
+	struct eth_port		*pp;
+	struct napi_group_ctrl	*napi_group;
+	int			txq;
+	int			cpu;
+#if defined(CONFIG_MV_PP2_TXDONE_IN_HRTIMER)
+	struct hrtimer		tx_done_timer;
+	struct tasklet_struct	tx_done_tasklet;
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_TIMER)
+	struct timer_list       tx_done_timer;
+#endif
+	unsigned long		flags;
+	MV_STACK		*ext_buf_stack;
+	int			ext_buf_size;
+};
+
+struct eth_port {
+	int			port;
+	struct mv_pp2_pdata	*plat_data;
+	bool			tagged; /* NONE/MH/DSA/EDSA/VLAN */
+	MV_PP2_PORT_CTRL	*port_ctrl;
+	struct rx_queue		*rxq_ctrl;
+	struct tx_queue		*txq_ctrl;
+	int			txp_num;
+	int			first_rxq;
+	int			rxq_num;
+	struct net_device	*dev;
+	rwlock_t		rwlock;
+	struct bm_pool		*pool_long;
+	struct bm_pool		*pool_short;
+	struct bm_pool		*hwf_pool_long;
+	struct bm_pool		*hwf_pool_short;
+	struct napi_group_ctrl	*napi_group[MV_PP2_MAX_RXQ];
+	unsigned long		flags; /* MH, TIMER, etc. */
+	u8			dbg_flags;
+	struct mv_pp2_tx_spec	tx_spec;
+	struct port_stats	stats;
+	struct dist_stats	dist_stats;
+	int			weight;
+	MV_U8			txq_dscp_map[64];
+	/* Ethtool parameters */
+	__u16			speed_cfg;
+	__u8			duplex_cfg;
+	__u8			autoneg_cfg;
+	__u16			advertise_cfg;
+	__u32			rx_time_coal_cfg;
+	__u32			rx_pkts_coal_cfg;
+	__u32			tx_pkts_coal_cfg;
+	__u32			rx_time_low_coal_cfg;
+	__u32			rx_time_high_coal_cfg;
+	__u32			rx_pkts_low_coal_cfg;
+	__u32			rx_pkts_high_coal_cfg;
+	__u32			pkt_rate_low_cfg;
+	__u32			pkt_rate_high_cfg;
+	__u32			rate_current; /* unknown (0), low (1), normal (2), high (3) */
+	__u32			rate_sample_cfg;
+	__u32			rx_adaptive_coal_cfg;
+	__u32			wol;
+	/* Rate calculate */
+	unsigned long		rx_rate_pkts;
+	unsigned long		rx_timestamp;
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+	int			(*rx_special_proc)(int port, int rxq, struct net_device *dev,
+						struct sk_buff *skb, struct pp2_rx_desc *rx_desc);
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+	int			(*tx_special_check)(int port, struct net_device *dev, struct sk_buff *skb,
+						struct mv_pp2_tx_spec *tx_spec_out);
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+	MV_U32			cpuMask;
+	MV_U32			rx_indir_table[256];
+	struct cpu_ctrl		*cpu_config[CONFIG_NR_CPUS];
+	MV_U32			sgmii_serdes;
+	int			pm_mode;
+};
+
+enum eth_pm_mode {
+	MV_ETH_PM_WOL = 0,
+	MV_ETH_PM_SUSPEND,
+	MV_ETH_PM_CLOCK,
+	MV_ETH_PM_DISABLE,
+	MV_ETH_PM_LAST
+};
+
+#define MV_ETH_PRIV(dev)	((struct eth_port *)(netdev_priv(dev)))
+#define MV_DEV_STAT(dev)	(&((dev)->stats))
+
+/* BM specific defines */
+struct pool_stats {
+#ifdef CONFIG_MV_PP2_STAT_ERR
+	u32 skb_alloc_oom;
+	u32 stack_empty;
+	u32 stack_full;
+#endif /* CONFIG_MV_PP2_STAT_ERR */
+
+#ifdef CONFIG_MV_PP2_STAT_DBG
+	u32 no_recycle;
+	u32 bm_put;
+	u32 stack_put;
+	u32 stack_get;
+	u32 skb_alloc_ok;
+	u32 skb_recycled_ok;
+	u32 skb_recycled_err;
+	u32 bm_cookie_err;
+#endif /* CONFIG_MV_PP2_STAT_DBG */
+};
+
+/* BM pool assignment */
+#ifdef CONFIG_MV_PP2_BM_PER_PORT_MODE
+/* #port   SWF long   SWF short   HWF long   HWF short *
+ *   0         0          1           0           1    *
+ *   1         2          3           2           3    *
+ *   2         4          5           4           5    *
+ *   3         6          7           6           7    */
+#define MV_ETH_BM_SWF_LONG_POOL(port)		(port << 1)
+#define MV_ETH_BM_SWF_SHORT_POOL(port)		((port << 1) + 1)
+#define MV_ETH_BM_HWF_LONG_POOL(port)		(MV_ETH_BM_SWF_LONG_POOL(port))
+#define MV_ETH_BM_HWF_SHORT_POOL(port)		(MV_ETH_BM_SWF_SHORT_POOL(port))
+#else /* CONFIG_MV_PP2_BM_SWF_HWF_MODE */
+/* #port   SWF long   SWF short   HWF long   HWF short *
+ *   0         0          3           4           7    *
+ *   1         1          3           5           7    *
+ *   2         2          3           6           7    *
+ *   3         2          3           6           7    */
+#define MV_ETH_BM_SWF_LONG_POOL(port)		((port > 2) ? 2 : port)
+#define MV_ETH_BM_SWF_SHORT_POOL(port)		(3)
+#define MV_ETH_BM_HWF_LONG_POOL(port)		((port > 2) ? 6 : (port + 4))
+#define MV_ETH_BM_HWF_SHORT_POOL(port)		(7)
+#endif
+
+#define MV_ETH_BM_POOLS		MV_BM_POOLS
+#define mv_pp2_pool_bm(p)	(p->bm_pool)
+
+enum mv_pp2_bm_type {
+	MV_ETH_BM_FREE,		/* BM pool is not being used by any port		   */
+	MV_ETH_BM_SWF_LONG,	/* BM pool is being used by SWF as long pool		   */
+	MV_ETH_BM_SWF_SHORT,	/* BM pool is being used by SWF as short pool		   */
+	MV_ETH_BM_HWF_LONG,	/* BM pool is being used by HWF as long pool		   */
+	MV_ETH_BM_HWF_SHORT,	/* BM pool is being used by HWF as short pool		   */
+	MV_ETH_BM_MIXED_LONG,	/* BM pool is being used by both HWF and SWF as long pool  */
+	MV_ETH_BM_MIXED_SHORT	/* BM pool is being used by both HWF and SWF as short pool */
+};
+
+/* Macros for using mv_pp2_bm_type */
+#define MV_ETH_BM_POOL_IS_HWF(type)	((type == MV_ETH_BM_HWF_LONG) || (type == MV_ETH_BM_HWF_SHORT))
+#define MV_ETH_BM_POOL_IS_SWF(type)	((type == MV_ETH_BM_SWF_LONG) || (type == MV_ETH_BM_SWF_SHORT))
+#define MV_ETH_BM_POOL_IS_MIXED(type)	((type == MV_ETH_BM_MIXED_LONG) || (type == MV_ETH_BM_MIXED_SHORT))
+#define MV_ETH_BM_POOL_IS_SHORT(type)	((type == MV_ETH_BM_SWF_SHORT) || (type == MV_ETH_BM_HWF_SHORT)\
+									|| (type == MV_ETH_BM_MIXED_SHORT))
+#define MV_ETH_BM_POOL_IS_LONG(type)	((type == MV_ETH_BM_SWF_LONG) || (type == MV_ETH_BM_HWF_LONG)\
+									|| (type == MV_ETH_BM_MIXED_LONG))
+
+/* BM short pool packet size						*/
+/* These values assure that for both HWF and SWF,			*/
+/* the total number of bytes allocated for each buffer will be 512	*/
+#define MV_ETH_BM_SHORT_HWF_PKT_SIZE	RX_HWF_MAX_PKT_SIZE(512)
+#define MV_ETH_BM_SHORT_PKT_SIZE	RX_MAX_PKT_SIZE(512)
+
+struct bm_pool {
+	int			pool;
+	enum mv_pp2_bm_type	type;
+	int			capacity;
+	int			buf_num;
+	int			pkt_size;
+	u32			*bm_pool;
+	MV_ULONG		physAddr;
+	spinlock_t		lock;
+	u32			port_map;
+	atomic_t		in_use;
+	int			in_use_thresh;
+	struct			pool_stats stats;
+};
+
+/* BM cookie (32 bits) definition */
+/* bits[0-7]   - Flags  */
+/*      bit0 - bm_cookie is invalid for SKB recycle */
+#define MV_ETH_BM_COOKIE_F_INVALID_BIT		0
+#define MV_ETH_BM_COOKIE_F_INVALID		(1 << 0)
+
+/*      bit7 - buffer is guaranteed */
+#define MV_ETH_BM_COOKIE_F_GRNTD_BIT		7
+#define MV_ETH_BM_COOKIE_F_GRNTD		(1 << 7)
+
+/* bits[8-15]  - PoolId */
+#define MV_ETH_BM_COOKIE_POOL_OFFS		8
+/* bits[16-23] - Qset   */
+#define MV_ETH_BM_COOKIE_QSET_OFFS		16
+/* bits[24-31] - Cpu    */
+#define MV_ETH_BM_COOKIE_CPU_OFFS		24
+
+#ifdef CONFIG_MV_PP2_TXDONE_IN_HRTIMER
+#define MV_PP2_HRTIMER_PERIOD_MIN	(10UL)
+#define MV_PP2_HRTIMER_PERIOD_MAX	(10000UL)
+unsigned int mv_pp2_tx_done_hrtimer_period_get(void);
+int mv_pp2_tx_done_hrtimer_period_set(unsigned int period);
+#endif
+
+static inline int mv_pp2_bm_cookie_grntd_get(__u32 cookie)
+{
+	return (cookie & MV_ETH_BM_COOKIE_F_GRNTD) >> MV_ETH_BM_COOKIE_F_GRNTD_BIT;
+}
+
+static inline int mv_pp2_bm_cookie_qset_get(__u32 cookie)
+{
+	return (cookie >> 16) & 0xFF;
+}
+
+static inline int mv_pp2_bm_cookie_pool_get(__u32 cookie)
+{
+	return (cookie >> 8) & 0xFF;
+}
+
+static inline __u32 mv_pp2_bm_cookie_pool_set(__u32 cookie, int pool)
+{
+	__u32 bm;
+
+	bm = cookie & ~(0xFF << MV_ETH_BM_COOKIE_POOL_OFFS);
+	bm |= ((pool & 0xFF) << MV_ETH_BM_COOKIE_POOL_OFFS);
+
+	return bm;
+}
+static inline int mv_pp2_bm_cookie_cpu_get(__u32 cookie)
+{
+	return (cookie >> MV_ETH_BM_COOKIE_CPU_OFFS) & 0xFF;
+}
+
+/* Build bm cookie from rx_desc */
+/* Cookie includes information needed to return buffer to bm pool: poolid, qset, etc */
+static inline __u32 mv_pp2_bm_cookie_build(struct pp2_rx_desc *rx_desc)
+{
+	int pool = mvPp2RxBmPoolId(rx_desc);
+	int cpu = smp_processor_id();
+	int qset = (rx_desc->bmQset & PP2_RX_BUFF_QSET_NUM_MASK) >> PP2_RX_BUFF_QSET_NUM_OFFS;
+	int grntd = ((rx_desc->bmQset & PP2_RX_BUFF_TYPE_MASK) >> PP2_RX_BUFF_TYPE_OFFS);
+
+	return ((pool & 0xFF) << MV_ETH_BM_COOKIE_POOL_OFFS) |
+		((cpu & 0xFF) << MV_ETH_BM_COOKIE_CPU_OFFS) |
+		((qset & 0xFF) << MV_ETH_BM_COOKIE_QSET_OFFS) |
+		((grntd & 0x1) << MV_ETH_BM_COOKIE_F_GRNTD_BIT);
+
+}
+
+static inline int mv_pp2_bm_in_use_read(struct bm_pool *bm)
+{
+	return atomic_read(&bm->in_use);
+}
+
+extern struct bm_pool mv_pp2_pool[MV_ETH_BM_POOLS];
+extern struct eth_port **mv_pp2_ports;
+
+static inline void mv_pp2_rx_csum(struct eth_port *pp, struct pp2_rx_desc *rx_desc, struct sk_buff *skb)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 39)
+	if (pp->dev->features & NETIF_F_RXCSUM) {
+		if ((PP2_RX_L3_IS_IP4(rx_desc->status) && !PP2_RX_IP4_HDR_ERR(rx_desc->status)) ||
+			(PP2_RX_L3_IS_IP6(rx_desc->status))) {
+			if ((PP2_RX_L4_IS_UDP(rx_desc->status) || PP2_RX_L4_IS_TCP(rx_desc->status)) &&
+				(PP2_RX_L4_CHK_OK(rx_desc->status))) {
+				skb->csum = 0;
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				STAT_DBG(pp->stats.rx_csum_hw++);
+				return;
+			}
+		}
+	}
+#endif
+	skb->ip_summed = CHECKSUM_NONE;
+	STAT_DBG(pp->stats.rx_csum_sw++);
+}
+
+static inline void mv_pp2_interrupts_unmask(struct eth_port *pp)
+{
+	int cpu = smp_processor_id();
+	struct napi_group_ctrl *napi_group;
+
+	napi_group = pp->cpu_config[cpu]->napi_group;
+
+	if (napi_group == NULL)
+		return;
+
+
+	/* unmask interrupts - for RX unmask only RXQs that are in the same napi group */
+#ifdef CONFIG_MV_PP2_TXDONE_ISR
+	mvPp2GbeIsrRxTxUnmask(pp->port, napi_group->rxq_mask, 1 /* unmask TxDone interrupts */);
+#else
+	mvPp2GbeIsrRxTxUnmask(pp->port, napi_group->rxq_mask, 0 /* mask TxDone interrupts */);
+#endif /* CONFIG_MV_PP2_TXDONE_ISR */
+}
+
+static inline void mv_pp2_interrupts_mask(struct eth_port *pp)
+{
+	mvPp2GbeIsrRxTxMask(pp->port);
+}
+
+static inline int mv_pp2_ctrl_is_tx_enabled(struct eth_port *pp)
+{
+	if (!pp)
+		return -ENODEV;
+
+	if (pp->flags & MV_ETH_F_CONNECT_LINUX)
+		return 1;
+
+	return 0;
+}
+
+/*
+	Check if there are enough descriptors in physical TXQ.
+
+	return: 1 - not enough descriptors,  0 - enough descriptors
+*/
+static inline int mv_pp2_phys_desc_num_check(struct txq_cpu_ctrl *txq_ctrl, int num)
+{
+
+	if ((txq_ctrl->txq_count + num) > txq_ctrl->txq_size) {
+		/*
+		printk(KERN_ERR "eth_tx: txq_ctrl->txq=%d - no_resource: txq_count=%d, txq_size=%d, num=%d\n",
+			txq_ctrl->txq, txq_ctrl->txq_count, txq_ctrl->txq_size, num);
+		*/
+		STAT_ERR(txq_ctrl->stats.txq_err++);
+		return 1;
+	}
+	return 0;
+}
+
+/*
+	Check if there are enough reserved descriptors for SWF
+	If not enough, then try to reqest chunk of reserved descriptors and check again.
+
+	return: 1 - not enough descriptors,  0 - enough descriptors
+*/
+static inline int mv_pp2_reserved_desc_num_proc(struct eth_port *pp, int txp, int txq, int num)
+{
+	struct tx_queue *txq_ctrl = &pp->txq_ctrl[txp * CONFIG_MV_PP2_TXQ + txq];
+	struct txq_cpu_ctrl *txq_cpu_p;
+	struct txq_cpu_ctrl *txq_cpu_ptr =  &txq_ctrl->txq_cpu[smp_processor_id()];
+
+
+	if (txq_cpu_ptr->reserved_num < num) {
+		int req, new_reserved, cpu, txq_count = 0;
+
+		/* new chunk is necessary */
+
+		for_each_possible_cpu(cpu) {
+			/* compute total txq used descriptors */
+			txq_cpu_p = &txq_ctrl->txq_cpu[cpu];
+			txq_count += txq_cpu_p->txq_count;
+			txq_count += txq_cpu_p->reserved_num;
+		}
+
+		req = MV_MAX(txq_ctrl->rsvd_chunk, num - txq_cpu_ptr->reserved_num);
+		txq_count += req;
+
+		if (txq_count  > txq_ctrl->swf_size) {
+			STAT_ERR(txq_cpu_ptr->stats.txq_err++);
+			return 1;
+		}
+
+		new_reserved = mvPp2TxqAllocReservedDesc(pp->port, txp, txq, req);
+
+		STAT_DBG(txq_cpu_ptr->stats.txq_reserved_total += new_reserved);
+		txq_cpu_ptr->reserved_num += new_reserved;
+
+		if (txq_cpu_ptr->reserved_num < num) {
+			STAT_ERR(txq_cpu_ptr->stats.txq_err++);
+			return 1;
+		}
+
+		STAT_DBG(txq_cpu_ptr->stats.txq_reserved_req++);
+	}
+
+	return 0;
+}
+/*
+	Check if there are enough descriptors in aggregated TXQ.
+	If not enough, then try to update number of occupied aggr descriptors and check again.
+
+	return: 1 - not enough descriptors,  0 - enough descriptors
+*/
+static inline int mv_pp2_aggr_desc_num_check(struct aggr_tx_queue *aggr_txq_ctrl, int num)
+{
+	/* Is enough aggregated TX descriptors to send packet */
+	if ((aggr_txq_ctrl->txq_count + num) > aggr_txq_ctrl->txq_size) {
+		/* update number of available aggregated TX descriptors */
+		aggr_txq_ctrl->txq_count = mvPp2AggrTxqPendDescNumGet(smp_processor_id());
+	}
+	/* Is enough aggregated descriptors */
+	if ((aggr_txq_ctrl->txq_count + num) > aggr_txq_ctrl->txq_size) {
+		/*
+		printk(KERN_ERR "eth_tx: txq_ctrl->txq=%d - no_resource: txq_count=%d, txq_size=%d, num=%d\n",
+			txq_ctrl->txq, txq_ctrl->txq_count, txq_ctrl->txq_size, num);
+		*/
+		STAT_ERR(aggr_txq_ctrl->stats.txq_err++);
+		return 1;
+	}
+
+	return 0;
+}
+
+static inline void mv_pp2_tx_desc_flush(struct eth_port *pp, struct pp2_tx_desc *tx_desc)
+{
+#if defined(MV_CPU_BE)
+	mvPPv2TxqDescSwap(tx_desc);
+#endif /* MV_CPU_BE */
+
+	mvOsCacheLineFlush(pp->dev->dev.parent, tx_desc);
+}
+
+static inline void *mv_pp2_extra_pool_get(struct eth_port *pp)
+{
+	int cpu = smp_processor_id();
+	void *ext_buf;
+
+	if (mvStackIndex(pp->cpu_config[cpu]->ext_buf_stack) == 0) {
+		STAT_ERR(pp->stats.ext_stack_empty[cpu]++);
+		ext_buf = mvOsMalloc(CONFIG_MV_PP2_EXTRA_BUF_SIZE);
+	} else {
+		STAT_DBG(pp->stats.ext_stack_get[cpu]++);
+		ext_buf = (void *)mvStackPop(pp->cpu_config[cpu]->ext_buf_stack);
+	}
+	return ext_buf;
+}
+
+static inline int mv_pp2_extra_pool_put(struct eth_port *pp, void *ext_buf)
+{
+	int cpu = smp_processor_id();
+
+	if (mvStackIsFull(pp->cpu_config[cpu]->ext_buf_stack)) {
+		STAT_ERR(pp->stats.ext_stack_full[cpu]++);
+		mvOsFree(ext_buf);
+		return 1;
+	}
+	mvStackPush(pp->cpu_config[cpu]->ext_buf_stack, (MV_U32)ext_buf);
+	STAT_DBG(pp->stats.ext_stack_put[cpu]++);
+	return 0;
+}
+
+#if defined(CONFIG_MV_PP2_TXDONE_IN_HRTIMER)
+static inline void mv_pp2_add_tx_done_timer(struct cpu_ctrl *cpuCtrl)
+{
+	ktime_t interval;
+	unsigned long delay_in_ns = mv_pp2_tx_done_hrtimer_period_get()*1000; /*the func return value is in us unit*/
+
+	if (test_and_set_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
+		STAT_INFO(cpuCtrl->pp->stats.tx_done_timer_add[smp_processor_id()]++);
+		interval = ktime_set(0, delay_in_ns);
+		hrtimer_start(&cpuCtrl->tx_done_timer, interval, HRTIMER_MODE_REL_PINNED);
+	}
+}
+#elif defined(CONFIG_MV_PP2_TXDONE_IN_TIMER)
+static inline void mv_pp2_add_tx_done_timer(struct cpu_ctrl *cpuCtrl)
+{
+	if (test_and_set_bit(MV_ETH_F_TX_DONE_TIMER_BIT, &(cpuCtrl->flags)) == 0) {
+		cpuCtrl->tx_done_timer.expires = jiffies + ((HZ * CONFIG_MV_PP2_TX_DONE_TIMER_PERIOD) / 1000); /* ms */
+		STAT_INFO(cpuCtrl->pp->stats.tx_done_timer_add[smp_processor_id()]++);
+		add_timer_on(&cpuCtrl->tx_done_timer, smp_processor_id());
+	}
+}
+#endif
+
+static inline void mv_pp2_shadow_inc_get(struct txq_cpu_ctrl *txq_cpu)
+{
+	txq_cpu->shadow_txq_get_i++;
+	if (txq_cpu->shadow_txq_get_i == txq_cpu->txq_size)
+		txq_cpu->shadow_txq_get_i = 0;
+}
+
+static inline void mv_pp2_shadow_inc_put(struct txq_cpu_ctrl *txq_cpu)
+{
+	txq_cpu->shadow_txq_put_i++;
+	if (txq_cpu->shadow_txq_put_i == txq_cpu->txq_size)
+		txq_cpu->shadow_txq_put_i = 0;
+}
+
+static inline void mv_pp2_shadow_dec_put(struct txq_cpu_ctrl *txq_cpu)
+{
+	if (txq_cpu->shadow_txq_put_i == 0)
+		txq_cpu->shadow_txq_put_i = txq_cpu->txq_size - 1;
+	else
+		txq_cpu->shadow_txq_put_i--;
+}
+
+static inline u32 mv_pp2_shadow_get_pop(struct txq_cpu_ctrl *txq_cpu)
+{
+	u32 res = txq_cpu->shadow_txq[txq_cpu->shadow_txq_get_i];
+
+	txq_cpu->shadow_txq_get_i++;
+	if (txq_cpu->shadow_txq_get_i == txq_cpu->txq_size)
+		txq_cpu->shadow_txq_get_i = 0;
+	return res;
+}
+
+static inline void mv_pp2_shadow_push(struct txq_cpu_ctrl *txq_cpu, int val)
+{
+	txq_cpu->shadow_txq[txq_cpu->shadow_txq_put_i] = val;
+	txq_cpu->shadow_txq_put_i++;
+	if (txq_cpu->shadow_txq_put_i == txq_cpu->txq_size)
+		txq_cpu->shadow_txq_put_i = 0;
+}
+
+/* Free skb pair */
+static inline void mv_pp2_skb_free(struct sk_buff *skb)
+{
+#ifdef CONFIG_MV_PP2_SKB_RECYCLE
+	skb->skb_recycle = NULL;
+	skb->hw_cookie = 0;
+#endif /* CONFIG_MV_PP2_SKB_RECYCLE */
+
+	dev_kfree_skb_any(skb);
+}
+
+/* PPv2.1 new API - pass packet to Qset */
+/*
+static inline void mv_pp2_pool_qset_put(int pool, MV_ULONG phys_addr, MV_ULONG cookie, struct pp2_rx_desc *rx_desc)
+{
+	int qset, is_grntd;
+
+	qset = (rx_desc->bmQset & PP2_RX_BUFF_QSET_NUM_MASK) >> PP2_RX_BUFF_QSET_NUM_OFFS;
+	is_grntd = (rx_desc->bmQset & PP2_RX_BUFF_TYPE_MASK) >> PP2_RX_BUFF_TYPE_OFFS;
+
+	mvBmPoolQsetPut(pool, (MV_ULONG) phys_addr, (MV_ULONG) cookie, qset, is_grntd);
+}
+*/
+
+/* Pass pkt to BM Pool or RXQ ring */
+static inline void mv_pp2_pool_refill(struct bm_pool *ppool, __u32 bm,
+				MV_ULONG phys_addr, MV_ULONG cookie)
+{
+	int pool = mv_pp2_bm_cookie_pool_get(bm);
+	unsigned long flags = 0;
+	int grntd, qset;
+
+	/* Refill BM pool */
+	STAT_DBG(ppool->stats.bm_put++);
+	MV_ETH_LIGHT_LOCK(flags);
+
+	grntd =  mv_pp2_bm_cookie_grntd_get(bm);
+	qset = mv_pp2_bm_cookie_qset_get(bm);
+
+	/* if PPV2.0 HW ignore qset and grntd */
+	mvBmPoolQsetPut(pool, (MV_ULONG) phys_addr, (MV_ULONG) cookie, qset, grntd);
+
+	MV_ETH_LIGHT_UNLOCK(flags);
+}
+
+static inline MV_U32 mv_pp2_pool_get(int pool)
+{
+	MV_U32 bufCookie;
+	unsigned long flags = 0;
+
+	MV_ETH_LIGHT_LOCK(flags);
+	bufCookie = mvBmPoolGet(pool, NULL);
+	MV_ETH_LIGHT_UNLOCK(flags);
+	return bufCookie;
+}
+
+
+/******************************************************
+ * Function prototypes --                             *
+ ******************************************************/
+int         mv_pp2_start(struct net_device *dev);
+int         mv_pp2_eth_stop(struct net_device *dev);
+int         mv_pp2_eth_change_mtu(struct net_device *dev, int mtu);
+int         mv_pp2_check_mtu_internals(struct net_device *dev, int mtu);
+int         mv_pp2_eth_check_mtu_valid(struct net_device *dev, int mtu);
+
+int         mv_pp2_eth_set_mac_addr(struct net_device *dev, void *mac);
+void	    mv_pp2_rx_set_rx_mode(struct net_device *dev);
+int         mv_pp2_eth_open(struct net_device *dev);
+int         mv_pp2_eth_port_suspend(int port);
+int         mv_pp2_port_resume(int port);
+int         mv_pp2_resume_clock(int port);
+int         mv_pp2_suspend_clock(int port);
+int         mv_pp2_eth_suspend_internals(struct eth_port *pp);
+int         mv_pp2_eth_resume_internals(struct eth_port *pp, int mtu);
+int         mv_pp2_restore_registers(struct eth_port *pp, int mtu);
+
+void	    mv_pp2_port_promisc_set(int port);
+
+void        mv_pp2_win_init(void);
+int         mv_pp2_resume_network_interfaces(struct eth_port *pp);
+int         mv_pp2_pm_mode_set(int port, int mode);
+
+irqreturn_t mv_pp2_isr(int irq, void *dev_id);
+irqreturn_t mv_pp2_link_isr(int irq, void *dev_id);
+int         mv_pp2_start_internals(struct eth_port *pp, int mtu);
+int         mv_pp2_stop_internals(struct eth_port *pp);
+int         mv_pp2_eth_change_mtu_internals(struct net_device *netdev, int mtu);
+
+int         mv_pp2_rx_reset(int port);
+int         mv_pp2_txq_clean(int port, int txp, int txq);
+int         mv_pp2_txp_clean(int port, int txp);
+int         mv_pp2_all_ports_cleanup(void);
+int         mv_pp2_all_ports_probe(void);
+
+MV_STATUS   mv_pp2_rx_ptks_coal_set(int port, int rxq, MV_U32 value);
+MV_STATUS   mv_pp2_rx_time_coal_set(int port, int rxq, MV_U32 value);
+MV_STATUS   mv_pp2_tx_done_ptks_coal_set(int port, int txp, int txq, MV_U32 value);
+
+struct eth_port     *mv_pp2_port_by_id(unsigned int port);
+bool                 mv_pp2_eth_netdev_find(unsigned int if_index);
+
+void        mv_pp2_mac_show(int port);
+void        mv_pp2_dscp_map_show(int port);
+int         mv_pp2_rxq_dscp_map_set(int port, int rxq, unsigned char dscp);
+int         mv_pp2_txq_dscp_map_set(int port, int txq, unsigned char dscp);
+
+int         mv_pp2_eth_rxq_vlan_prio_set(int port, int rxq, unsigned char prio);
+void        mv_pp2_eth_vlan_prio_show(int port);
+
+void        mv_pp2_eth_netdev_print(struct net_device *netdev);
+void        mv_pp2_status_print(void);
+void        mv_pp2_eth_port_status_print(unsigned int port);
+void        mv_pp2_port_stats_print(unsigned int port);
+void        mv_pp2_pool_status_print(int pool);
+
+void        mv_pp2_set_noqueue(struct net_device *dev, int enable);
+void	    mv_pp2_ctrl_pnc(int en);
+void        mv_pp2_ctrl_hwf(int en);
+int         mv_pp2_eth_ctrl_recycle(int en);
+void        mv_pp2_ctrl_txdone(int num);
+int         mv_pp2_eth_ctrl_tx_mh(int port, u16 mh);
+
+int         mv_pp2_ctrl_tx_cmd_dsa(int port, u16 dsa);
+int         mv_pp2_ctrl_tx_cmd_color(int port, u16 color);
+int         mv_pp2_ctrl_tx_cmd_gem_id(int port, u16 gem_id);
+int         mv_pp2_ctrl_tx_cmd_pon_fec(int port, u16 pon_fec);
+int         mv_eth_ctrl_tx_cmd_gem_oem(int port, u16 gem_oem);
+int         mv_pp2_ctrl_tx_cmd_mod(int port, u16 mod);
+int         mv_pp2_ctrl_tx_cmd_pme_dptr(int port, u16 pme_dptr);
+int         mv_pp2_ctrl_tx_cmd_pme_prog(int port, u16 pme_prog);
+
+int         mv_pp2_ctrl_txq_cpu_def(int port, int txp, int txq, int cpu);
+int         mv_pp2_ctrl_flag(int port, u32 flag, u32 val);
+int         mv_pp2_ctrl_tx_flag(int port, u32 flag, u32 val);
+int	    mv_pp2_ctrl_dbg_flag(int port, u32 flag, u32 val);
+int	    mv_pp2_ctrl_txq_size_set(int port, int txp, int txq, int txq_size);
+int         mv_pp2_ctrl_txq_limits_set(int port, int txp, int txq, int hwf_size, int swf_size);
+int         mv_pp2_ctrl_txq_chunk_set(int port, int txp, int txq, int chunk_size);
+int         mv_pp2_ctrl_rxq_size_set(int port, int rxq, int value);
+int	    mv_pp2_ctrl_pool_buf_num_set(int port, int pool, int buf_num);
+int         mv_pp2_ctrl_pool_detach(int port, struct bm_pool *ppool);
+int         mv_pp2_ctrl_pool_size_set(int pool, int pkt_size);
+int	    mv_pp2_ctrl_long_pool_set(int port, int pool);
+int	    mv_pp2_ctrl_short_pool_set(int port, int pool);
+int	    mv_pp2_ctrl_hwf_long_pool_set(int port, int pool);
+int	    mv_pp2_ctrl_hwf_short_pool_set(int port, int pool);
+int     mv_pp2_ctrl_set_poll_rx_weight(int port, u32 weight);
+int     mv_pp2_ctrl_pool_port_map_get(int pool);
+void        mv_pp2_tx_desc_print(struct pp2_tx_desc *desc);
+void        mv_pp2_pkt_print(struct eth_port *pp, struct eth_pbuf *pkt);
+void        mv_pp2_rx_desc_print(struct pp2_rx_desc *desc);
+void        mv_pp2_skb_print(struct sk_buff *skb);
+void        mv_pp2_eth_link_status_print(int port);
+void        mv_pp2_buff_hdr_rx_dump(struct eth_port *pp, struct pp2_rx_desc *rx_desc);
+void        mv_pp2_buff_hdr_rx(struct eth_port *pp, struct pp2_rx_desc *rx_desc);
+
+/* External MAC support (i.e. PON) */
+/* callback functions to be called by netdev (implemented in external MAC module) */
+struct mv_eth_ext_mac_ops {
+	MV_BOOL		(*link_status_get)(int port_id);
+	MV_STATUS	(*max_pkt_size_set)(int port_id, MV_U32 maxEth);
+	MV_STATUS	(*mac_addr_set)(int port_id, void *addr);
+	MV_STATUS	(*port_enable)(int port_id);
+	MV_STATUS	(*port_disable)(int port_id);
+	MV_STATUS	(*mib_counters_show)(int port_id);
+};
+
+/* callback functions to be called by external MAC module (implemented in netdev) */
+struct mv_netdev_notify_ops {
+	void		(*link_notify)(int port_id, MV_BOOL state);
+};
+
+/* Called by external MAC module */
+void mv_eth_ext_mac_ops_register(int port_id,
+		struct mv_eth_ext_mac_ops **extern_mac_ops, struct mv_netdev_notify_ops **netdev_ops);
+
+#ifdef CONFIG_MV_INCLUDE_PON
+MV_BOOL mv_pon_link_status(MV_ETH_PORT_STATUS *link);
+MV_STATUS mv_pon_mtu_config(MV_U32 maxEth);
+MV_STATUS mv_pon_set_mac_addr(void *addr);
+MV_STATUS mv_pon_enable(void);
+MV_STATUS mv_pon_disable(void);
+#endif
+
+#ifdef CONFIG_MV_PP2_TX_SPECIAL
+void        mv_pp2_tx_special_check_func(int port, int (*func)(int port, struct net_device *dev,
+				  struct sk_buff *skb, struct mv_pp2_tx_spec *tx_spec_out));
+#endif /* CONFIG_MV_PP2_TX_SPECIAL */
+
+#ifdef CONFIG_MV_PP2_RX_SPECIAL
+void        mv_pp2_rx_special_proc_func(int port, int (*func)(int port, int rxq, struct net_device *dev,
+							struct sk_buff *skb, struct pp2_rx_desc *rx_desc));
+#endif /* CONFIG_MV_PP2_RX_SPECIAL */
+
+int  mv_pp2_poll(struct napi_struct *napi, int budget);
+void mv_pp2_link_event(struct eth_port *pp, int print);
+int mv_pp2_rx_policy(u32 cause);
+int mv_pp2_refill(struct eth_port *pp, struct bm_pool *ppool, __u32 bm, int is_recycle);
+u32 mv_pp2_txq_done(struct eth_port *pp, struct tx_queue *txq_ctrl);
+u32 mv_pp2_tx_done_gbe(struct eth_port *pp, u32 cause_tx_done, int *tx_todo);
+u32 mv_pp2_tx_done_pon(struct eth_port *pp, int *tx_todo);
+
+/*****************************************
+ *            NAPI Group API             *
+ *****************************************/
+int  mv_pp2_port_napi_group_create(int port, int group);
+int  mv_pp2_port_napi_group_delete(int port, int group);
+int  mv_pp2_napi_set_cpu_affinity(int port, int group, int cpu_mask);
+int  mv_pp2_eth_napi_set_rxq_affinity(int port, int group, int rxq_mask);
+void mv_pp2_napi_groups_print(int port);
+
+struct pp2_rx_desc *mv_pp2_rx_prefetch(struct eth_port *pp,
+					MV_PP2_PHYS_RXQ_CTRL *rx_ctrl, int rx_done, int rx_todo);
+
+void		*mv_pp2_bm_pool_create(int pool, int capacity, MV_ULONG *physAddr);
+
+#if defined(CONFIG_MV_PP2_HWF)
+MV_STATUS mv_pp2_hwf_bm_create(int port, int mtuPktSize);
+void      mv_hwf_bm_dump(void);
+#endif /* CONFIG_MV_PP2_HWF */
+
+#ifdef CONFIG_MV_PP2_SWF_HWF_CORRUPTION_WA
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 34)
+extern void ___dma_single_dev_to_cpu(const void *, size_t, enum dma_data_direction);
+#else
+extern void dma_cache_maint(const void *, size_t, int);
+#endif
+void mv_pp2_cache_inv_wa_ctrl(int en);
+#endif
+
+#endif /* __mv_netdev_h__ */
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_pp2_netmap.h b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_pp2_netmap.h
new file mode 100644
index 000000000000..5c3b9df1bb51
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/net_dev/mv_pp2_netmap.h
@@ -0,0 +1,473 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+/* mv_pp2_netmap.h */
+
+#ifndef __MV_PP2_NETMAP_H__
+#define __MV_PP2_NETMAP_H__
+
+#include <bsd_glue.h>
+#include <netmap.h>
+#include <netmap_kern.h>
+
+#define SOFTC_T	eth_port
+
+static int pool_buf_num[MV_BM_POOLS];
+static struct bm_pool *pool_short[MV_ETH_MAX_PORTS];
+/*
+ * Register/unregister
+ *	adapter is pointer to eth_port
+ */
+static int mv_pp2_netmap_reg(struct ifnet *ifp, int onoff)
+{
+	struct eth_port *adapter = MV_ETH_PRIV(ifp);
+	struct netmap_adapter *na = NA(ifp);
+	int error = 0, txq, rxq;
+
+	if (na == NULL)
+		return -EINVAL;
+
+	if (!(ifp->flags & IFF_UP)) {
+		/* mv_pp2_eth_open has not been called yet, so resources
+		 * are not allocated */
+		printk(KERN_ERR "Interface is down!");
+		return -EINVAL;
+	}
+
+	/* stop current interface */
+	if (mv_pp2_eth_stop(ifp)) {
+		printk(KERN_ERR "%s: stop interface failed\n", ifp->name);
+		return -EINVAL;
+	}
+
+	if (onoff) { /* enable netmap mode */
+		u32 port_map;
+
+		mv_pp2_rx_reset(adapter->port);
+		ifp->if_capenable |= IFCAP_NETMAP;
+		na->if_transmit = (void *)ifp->netdev_ops;
+		ifp->netdev_ops = &na->nm_ndo;
+
+		/* check that long pool is not shared with other ports */
+		port_map =  mv_pp2_ctrl_pool_port_map_get(adapter->pool_long->pool);
+		if (port_map != (1 << adapter->port)) {
+			printk(KERN_ERR "%s: BM pool %d not initialized or shared with other ports.\n",
+			__func__, adapter->pool_long->pool);
+			return -EINVAL;
+		}
+
+		/* Keep old number of long pool buffers */
+		pool_buf_num[adapter->pool_long->pool] = adapter->pool_long->buf_num;
+		mv_pp2_pool_free(adapter->pool_long->pool, adapter->pool_long->buf_num);
+
+		/* set same pool number for short and long packets */
+		for (rxq = 0; rxq < CONFIG_MV_PP2_RXQ; rxq++)
+			mvPp2RxqBmShortPoolSet(adapter->port, rxq, adapter->pool_long->pool);
+
+		/* update short pool in software */
+		pool_short[adapter->port] = adapter->pool_short;
+		adapter->pool_short = adapter->pool_long;
+
+		set_bit(MV_ETH_F_IFCAP_NETMAP_BIT, &(adapter->flags));
+
+	} else {
+		unsigned long flags = 0;
+		u_int pa, i;
+
+		ifp->if_capenable &= ~IFCAP_NETMAP;
+		ifp->netdev_ops = (void *)na->if_transmit;
+		mv_pp2_rx_reset(adapter->port);
+
+		/* TODO: handle SMP - each CPU must call this loop */
+		/*
+		for (txq = 0; txq < CONFIG_MV_PP2_TXQ; txq++)
+			mvPp2TxqSentDescProc(adapter->port, 0, txq);
+		*/
+
+		i = 0;
+		MV_ETH_LOCK(&adapter->pool_long->lock, flags);
+		do {
+			mvBmPoolGet(adapter->pool_long->pool, &pa);
+			i++;
+		} while (pa != 0);
+
+		MV_ETH_UNLOCK(&adapter->pool_long->lock, flags);
+		printk(KERN_ERR "NETMAP: free %d buffers from pool %d\n", i, adapter->pool_long->pool);
+		mv_pp2_pool_add(adapter, adapter->pool_long->pool, pool_buf_num[adapter->pool_long->pool]);
+
+		/* set port's short pool for Linux driver */
+		for (rxq = 0; rxq < CONFIG_MV_PP2_RXQ; rxq++)
+			mvPp2RxqBmShortPoolSet(adapter->port, rxq, adapter->pool_short->pool);
+
+		/* update short pool in software */
+		adapter->pool_short = pool_short[adapter->port];
+
+		clear_bit(MV_ETH_F_IFCAP_NETMAP_BIT, &(adapter->flags));
+	}
+
+	if (mv_pp2_start(ifp)) {
+		printk(KERN_ERR "%s: start interface failed\n", ifp->name);
+		return -EINVAL;
+	}
+	return error;
+}
+
+/*
+ * Reconcile kernel and user view of the transmit ring.
+ */
+static int
+mv_pp2_netmap_txsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+{
+	struct SOFTC_T *adapter = MV_ETH_PRIV(ifp);
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_kring *kring = &na->tx_rings[ring_nr];
+	struct netmap_ring *ring = kring->ring;
+	u_int j, k, n = 0, lim = kring->nkr_num_slots - 1;
+	u_int sent_n, cpu = 0;
+
+	struct pp2_tx_desc *tx_desc;
+	struct aggr_tx_queue *aggr_txq_ctrl = NULL;
+
+	/* generate an interrupt approximately every half ring */
+	/*int report_frequency = kring->nkr_num_slots >> 1;*/
+	/* take a copy of ring->cur now, and never read it again */
+	k = ring->cur;
+	if (k > lim)
+		return netmap_ring_reinit(kring);
+
+	aggr_txq_ctrl = &aggr_txqs[cpu];
+
+	if (do_lock)
+		mtx_lock(&kring->q_lock);
+
+	rmb();
+	/*
+	 * Process new packets to send. j is the current index in the
+	 * netmap ring, l is the corresponding index in the NIC ring.
+	 */
+	j = kring->nr_hwcur;
+	if (j != k) {	/* we have new packets to send */
+		for (n = 0; j != k; n++) {
+			/* slot is the current slot in the netmap ring */
+			struct netmap_slot *slot = &ring->slot[j];
+
+			uint64_t paddr;
+			void *addr = PNMB(slot, &paddr);
+			u_int len = slot->len;
+
+			if (addr == netmap_buffer_base || len > NETMAP_BUF_SIZE) {
+				if (do_lock)
+					mtx_unlock(&kring->q_lock);
+				return netmap_ring_reinit(kring);
+			}
+
+			slot->flags &= ~NS_REPORT;
+
+			/* check aggregated TXQ resource */
+			if (mv_pp2_aggr_desc_num_check(aggr_txq_ctrl, 1)) {
+				if (do_lock)
+					mtx_unlock(&kring->q_lock);
+				return netmap_ring_reinit(kring);
+			}
+
+			tx_desc = mvPp2AggrTxqNextDescGet(aggr_txq_ctrl->q);
+			tx_desc->physTxq = MV_PPV2_TXQ_PHYS(adapter->port, 0, ring_nr);
+			tx_desc->bufPhysAddr = (uint32_t)(paddr);
+			tx_desc->dataSize = len;
+			tx_desc->pktOffset = slot->data_offs;
+			tx_desc->command = PP2_TX_L4_CSUM_NOT | PP2_TX_F_DESC_MASK | PP2_TX_L_DESC_MASK;
+			mv_pp2_tx_desc_flush(adapter, tx_desc);
+			aggr_txq_ctrl->txq_count++;
+
+			if (slot->flags & NS_BUF_CHANGED)
+				slot->flags &= ~NS_BUF_CHANGED;
+
+			j = (j == lim) ? 0 : j + 1;
+		}
+		kring->nr_hwcur = k; /* the saved ring->cur */
+		kring->nr_hwavail -= n;
+
+		wmb(); /* synchronize writes to the NIC ring */
+
+		/* Enable transmit */
+		sent_n = n;
+		while (sent_n > 0xFF) {
+			mvPp2AggrTxqPendDescAdd(0xFF);
+			sent_n -= 0xFF;
+		}
+		mvPp2AggrTxqPendDescAdd(sent_n);
+	}
+
+	if (n == 0 || kring->nr_hwavail < 1) {
+		int delta;
+
+		delta = mvPp2TxqSentDescProc(adapter->port, 0, ring_nr);
+		if (delta)
+			kring->nr_hwavail += delta;
+	}
+	/* update avail to what the kernel knows */
+	ring->avail = kring->nr_hwavail;
+
+	if (do_lock)
+		mtx_unlock(&kring->q_lock);
+
+	return 0;
+}
+
+
+/*
+ * Reconcile kernel and user view of the receive ring.
+ */
+static int
+mv_pp2_netmap_rxsync(struct ifnet *ifp, u_int ring_nr, int do_lock)
+{
+	struct SOFTC_T *adapter = MV_ETH_PRIV(ifp);
+	struct netmap_adapter *na = NA(ifp);
+
+	MV_PP2_PHYS_RXQ_CTRL *rxr = adapter->rxq_ctrl[ring_nr].q;
+
+	struct netmap_kring *kring = &na->rx_rings[ring_nr];
+	struct netmap_ring *ring = kring->ring;
+	u_int j, l, n;
+
+	int force_update = do_lock || kring->nr_kflags & NKR_PENDINTR;
+
+	uint16_t strip_crc = (1) ? 4 : 0; /* TBD :: remove CRC or not */
+
+	u_int lim   = kring->nkr_num_slots - 1;
+	u_int k     = ring->cur;
+	u_int resvd = ring->reserved;
+	u_int rx_done;
+
+	if (k > lim)
+		return netmap_ring_reinit(kring);
+
+	if (do_lock)
+		mtx_lock(&kring->q_lock);
+
+	/* hardware memory barrier that prevents any memory read access from being moved */
+	/* and executed on the other side of the barrier */
+	rmb();
+
+	/*
+	 * Import newly received packets into the netmap ring.
+	 * j is an index in the netmap ring, l in the NIC ring.
+	*/
+	l = rxr->queueCtrl.nextToProc;
+	j = netmap_idx_n2k(kring, l); /* map NIC ring index to netmap ring index */
+
+	if (netmap_no_pendintr || force_update) { /* netmap_no_pendintr = 1, see netmap.c */
+		/* Get number of received packets */
+		rx_done = mvPp2RxqBusyDescNumGet(adapter->port, ring_nr);
+		mvOsCacheIoSync();
+		rx_done = (rx_done >= lim) ? lim - 1 : rx_done;
+		for (n = 0; n < rx_done; n++) {
+			PP2_RX_DESC *curr = (PP2_RX_DESC *)MV_PP2_QUEUE_DESC_PTR(&rxr->queueCtrl, l);
+
+#if defined(MV_CPU_BE)
+			mvPPv2RxqDescSwap(curr);
+#endif /* MV_CPU_BE */
+
+			/* TBD : check for ERRORs */
+			ring->slot[j].len = (curr->dataSize) - strip_crc - MV_ETH_MH_SIZE;
+			ring->slot[j].data_offs = NET_SKB_PAD + MV_ETH_MH_SIZE;
+			ring->slot[j].buf_idx = curr->bufCookie;
+			ring->slot[j].flags |= NS_BUF_CHANGED;
+
+			j = (j == lim) ? 0 : j + 1;
+			l = (l == lim) ? 0 : l + 1;
+		}
+		if (n) { /* update the state variables */
+			struct napi_group_ctrl *napi_group;
+
+			rxr->queueCtrl.nextToProc = l;
+			kring->nr_hwavail += n;
+			mvPp2RxqOccupDescDec(adapter->port, ring_nr, n);
+
+			/* enable interrupts */
+			wmb();
+			napi_group = adapter->cpu_config[smp_processor_id()]->napi_group;
+			mvPp2GbeCpuInterruptsEnable(adapter->port, napi_group->cpu_mask);
+		}
+		kring->nr_kflags &= ~NKR_PENDINTR;
+	}
+
+	/* skip past packets that userspace has released */
+	j = kring->nr_hwcur; /* netmap ring index */
+	if (resvd > 0) {
+		if (resvd + ring->avail >= lim + 1) {
+			printk(KERN_ERR "XXX invalid reserve/avail %d %d", resvd, ring->avail);
+			ring->reserved = resvd = 0;
+		}
+		k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
+	}
+
+	if (j != k) { /* userspace has released some packets. */
+		l = netmap_idx_k2n(kring, j); /* NIC ring index */
+		for (n = 0; j != k; n++) {
+			struct netmap_slot *slot = &ring->slot[j];
+			PP2_RX_DESC *curr = (PP2_RX_DESC *)MV_PP2_QUEUE_DESC_PTR(&rxr->queueCtrl, l);
+			uint64_t paddr;
+			uint32_t *addr = PNMB(slot, &paddr);
+
+			/*
+			In big endian mode:
+			we do not need to swap descriptor here, allready swapped before
+			*/
+
+			slot->data_offs = NET_SKB_PAD + MV_ETH_MH_SIZE;
+			if (addr == (uint32_t *)netmap_buffer_base) { /* bad buf */
+				if (do_lock)
+					mtx_unlock(&kring->q_lock);
+
+				return netmap_ring_reinit(kring);
+			}
+			if (slot->flags & NS_BUF_CHANGED) {
+				slot->flags &= ~NS_BUF_CHANGED;
+
+				mvBmPoolPut(adapter->pool_long->pool, (uint32_t)paddr, curr->bufCookie);
+			}
+			curr->status = 0;
+			j = (j == lim) ? 0 : j + 1;
+			l = (l == lim) ? 0 : l + 1;
+		}
+		kring->nr_hwavail -= n;
+		kring->nr_hwcur = k;
+		/* hardware memory barrier that prevents any memory write access from being moved and */
+		/* executed on the other side of the barrier.*/
+		wmb();
+		/*
+		 * IMPORTANT: we must leave one free slot in the ring,
+		 * so move l back by one unit
+		 */
+		l = (l == 0) ? lim : l - 1;
+		mvPp2RxqNonOccupDescAdd(adapter->port, ring_nr, n);
+	}
+	/* tell userspace that there are new packets */
+	ring->avail = kring->nr_hwavail - resvd;
+
+	if (do_lock)
+		mtx_unlock(&kring->q_lock);
+
+	return 0;
+}
+
+
+/* diagnostic routine to catch errors */
+static void mv_pp2_no_rx_alloc(struct SOFTC_T *a, int n)
+{
+	printk("mv_pp2_skb_alloc should not be called");
+}
+
+/*
+ * Make the rx ring point to the netmap buffers.
+ */
+static int pp2_netmap_rxq_init_buffers(struct SOFTC_T *adapter, int rxq)
+{
+	struct ifnet *ifp = adapter->dev; /* struct net_devive */
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_slot *slot;
+	struct rx_queue   *rxr;
+
+	int i, si;
+	uint64_t paddr;
+	uint32_t *vaddr;
+
+	if (!(adapter->flags & MV_ETH_F_IFCAP_NETMAP))
+		return 0;
+
+	/* initialize the rx ring */
+	slot = netmap_reset(na, NR_RX, rxq, 0);
+	if (!slot) {
+		printk(KERN_ERR "%s: RX slot is null\n", __func__);
+		return 1;
+	}
+	rxr = &(adapter->rxq_ctrl[rxq]);
+
+	for (i = 0; i < rxr->rxq_size; i++) {
+		si = netmap_idx_n2k(&na->rx_rings[rxq], i);
+		vaddr = PNMB(slot + si, &paddr);
+		/* printk(KERN_ERR "paddr = 0x%x, virt = 0x%x\n",
+				(uint32_t)paddr,  (uint32_t)((slot+si)->buf_idx));*/
+
+		/* TODO: use mvBmPoolQsetPut in ppv2.1 */
+		mvBmPoolPut(adapter->pool_long->pool, (uint32_t)paddr, (uint32_t)((slot+si)->buf_idx));
+
+	}
+	rxr->q->queueCtrl.nextToProc = 0;
+	/* Force memory writes to complete */
+	wmb();
+	return 0;
+}
+
+
+/*
+ * Make the tx ring point to the netmap buffers.
+*/
+static int pp2_netmap_txq_init_buffers(struct SOFTC_T *adapter, int txp, int txq)
+{
+	struct ifnet *ifp = adapter->dev;
+	struct netmap_adapter *na = NA(ifp);
+	struct netmap_slot *slot;
+	int q;
+
+	if (!(adapter->flags & MV_ETH_F_IFCAP_NETMAP))
+		return 0;
+
+	q = txp * CONFIG_MV_PP2_TXQ + txq;
+
+	/* initialize the tx ring */
+	slot = netmap_reset(na, NR_TX, q, 0);
+
+	if (!slot) {
+		printk(KERN_ERR "%s: TX slot is null\n", __func__);
+		return 1;
+	}
+
+	return 0;
+}
+
+
+static void
+mv_pp2_netmap_attach(struct SOFTC_T *adapter)
+{
+	struct netmap_adapter na;
+
+	bzero(&na, sizeof(na));
+
+	na.ifp = adapter->dev; /* struct net_device */
+	na.separate_locks = 0;
+	na.num_tx_desc = 256;
+	na.num_rx_desc = adapter->rxq_ctrl->rxq_size;
+	na.nm_register = mv_pp2_netmap_reg;
+	na.nm_txsync = mv_pp2_netmap_txsync;
+	na.nm_rxsync = mv_pp2_netmap_rxsync;
+	na.num_tx_rings = CONFIG_MV_PP2_TXQ;
+	netmap_attach(&na, CONFIG_MV_PP2_RXQ);
+}
+/* end of file */
+
+#endif  /* __MV_PP2_NETMAP_H__ */
diff --git a/drivers/net/ethernet/mvebu_net/pp2/plcr/plcr_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/plcr/plcr_sysfs.c
new file mode 100644
index 000000000000..04a38e253db5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/plcr/plcr_sysfs.c
@@ -0,0 +1,233 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "plcr/mvPp2PlcrHw.h"
+
+
+static ssize_t plcr_help(char *buf)
+{
+	int off = 0;
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "all arguments are decimal numbers\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat             help      - Show this help\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat             regs      - Show PLCR hardware registers\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat             dump      - Dump all policers configuration and status\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p        > dump      - Dump policer <p> configuration and status\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p        > v1_tb_dump- Dump policer <p> token bucket counters\n");
+
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo period   > period    - Set token update base period\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 0|1      > rate      - Enable <1> or Disable <0> addition of tokens to token buckets\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo bytes    > min_pkt   - Set minimal packet length\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 0|1      > edrop     - Enable <1> or Disable <0> early packets drop\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo mode     > mode      - Set policer mode of operation 0-bank01 1-bank10 2-parallal\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p 0|1    > enable    - Enable <1> or Disable <0> policer <p>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p 0|1    > color     - Set color mode for policer <p>: 0-blind, 1-aware\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p u t    > config    - Set token units <u> and update type <t> for policer <p>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p num    > tokens    - Set number of tokens for each update for policer <p>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p c e    > bucket    - Set commit <c> and exceed <e> bucket sizes for policer <p>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i tr     > cpu_v0_tr - Set value <tr> to CPU (SWF) threshold <i>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i tr     > hwf_v0_tr - Set value <tr> to HWF threshold <i>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i tr     > cpu_v1_tr - Set value <tr> to CPU (SWF) threshold <i>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i tr     > hwf_v1_tr - Set value <tr> to HWF threshold <i>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo rxq i    > rxq_tr    - Set thershold <i> to be used for RXQ <rxq>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo txq i    > txq_tr    - Set thershold <i> to be used for TXQ <txq>\n");
+
+	return off;
+}
+
+static ssize_t plcr_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	const char  *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return plcr_help(buf);
+
+	if (!strcmp(name, "regs")) {
+		mvPp2PlcrHwRegs();
+	} else	if (!strcmp(name, "dump")) {
+		mvPp2PlcrHwDumpAll();
+	} else {
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t plcr_dec_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, p = 0, i = 0, v = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %d", &p, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "dump")) {
+		mvPp2PlcrHwDumpSingle(p);
+	} else if (!strcmp(name, "v1_tb_dump")) {
+		mvPp2V1PlcrTbCntDump(p);
+	} else	if (!strcmp(name, "period")) {
+		mvPp2PlcrHwBasePeriodSet(p);
+	} else	if (!strcmp(name, "rate")) {
+		mvPp2PlcrHwBaseRateGenEnable(p);
+	} else	if (!strcmp(name, "min_pkt")) {
+		mvPp2PlcrHwMinPktLen(p);
+	} else	if (!strcmp(name, "edrop")) {
+		mvPp2PlcrHwEarlyDropSet(p);
+	} else	if (!strcmp(name, "enable")) {
+		mvPp2PlcrHwEnable(p, i);
+	/* only for ppv2.1 */
+	} else	if (!strcmp(name, "mode")) {
+		mvPp2PlcrHwMode(p);
+	} else  if (!strcmp(name, "color")) {
+		mvPp2PlcrHwColorModeSet(p, i);
+	} else	if (!strcmp(name, "config")) {
+		mvPp2PlcrHwTokenConfig(p, i, v);
+	} else	if (!strcmp(name, "tokens")) {
+		mvPp2PlcrHwTokenValue(p, i);
+	} else	if (!strcmp(name, "bucket")) {
+		mvPp2PlcrHwBucketSizeSet(p, i, v);
+	} else	if (!strcmp(name, "cpu_v0_tr")) {
+		mvPp2V0PlcrHwCpuThreshSet(p, i);
+	} else	if (!strcmp(name, "cpu_v1_tr")) {
+		mvPp2V1PlcrHwCpuThreshSet(p, i);
+	} else	if (!strcmp(name, "hwf_v0_tr")) {
+		mvPp2V0PlcrHwHwfThreshSet(p, i);
+	} else	if (!strcmp(name, "hwf_v1_tr")) {
+		mvPp2V1PlcrHwHwfThreshSet(p, i);
+	} else	if (!strcmp(name, "rxq_tr")) {
+		mvPp2PlcrHwRxqThreshSet(p, i);
+	} else	if (!strcmp(name, "txq_tr")) {
+		mvPp2PlcrHwTxqThreshSet(p, i);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(help,          S_IRUSR, plcr_show, NULL);
+static DEVICE_ATTR(regs,          S_IRUSR, plcr_show, NULL);
+static DEVICE_ATTR(dump,          S_IRUSR | S_IWUSR, plcr_show, plcr_dec_store);
+
+static DEVICE_ATTR(v1_tb_dump,    S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(period,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(rate,          S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(min_pkt,       S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(edrop,         S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(enable,        S_IWUSR, NULL,     plcr_dec_store);
+/*mode - only for ppv2.1 */
+static DEVICE_ATTR(mode  ,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(color,         S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(config,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(tokens,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(bucket,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(cpu_v0_tr,     S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(hwf_v0_tr,     S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(cpu_v1_tr,     S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(hwf_v1_tr,     S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(rxq_tr,        S_IWUSR, NULL,     plcr_dec_store);
+static DEVICE_ATTR(txq_tr,        S_IWUSR, NULL,     plcr_dec_store);
+
+
+static struct attribute *plcr_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_regs.attr,
+	&dev_attr_dump.attr,
+	&dev_attr_v1_tb_dump.attr,
+	&dev_attr_period.attr,
+	&dev_attr_rate.attr,
+	&dev_attr_min_pkt.attr,
+	&dev_attr_edrop.attr,
+	&dev_attr_enable.attr,
+	/* mode - only for ppv2.1 */
+	&dev_attr_mode.attr,
+	&dev_attr_color.attr,
+	&dev_attr_config.attr,
+	&dev_attr_tokens.attr,
+	&dev_attr_bucket.attr,
+	&dev_attr_cpu_v0_tr.attr,
+	&dev_attr_hwf_v0_tr.attr,
+	&dev_attr_cpu_v1_tr.attr,
+	&dev_attr_hwf_v1_tr.attr,
+	&dev_attr_rxq_tr.attr,
+	&dev_attr_txq_tr.attr,
+
+	NULL
+};
+
+static struct attribute_group plcr_group = {
+	.name = "plcr",
+	.attrs = plcr_attrs,
+};
+
+int mv_pp2_plcr_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(pp2_kobj, &plcr_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", plcr_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_plcr_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &plcr_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/pme/pme_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/pme/pme_sysfs.c
new file mode 100644
index 000000000000..1d4ff3088114
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/pme/pme_sysfs.c
@@ -0,0 +1,321 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "pme/mvPp2PmeHw.h"
+
+static MV_PP2_PME_ENTRY  mv_pp2_pme_e;
+
+static ssize_t pme_help(char *buf)
+{
+	int off = 0;
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "t, i, a, b, c, l, s - are dec numbers\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "v, m, e             - are hex numbers\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              help          - Show this help\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              hw_regs       - Show PME hardware registers\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              sw_dump       - Show PME sw etry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              hw_i_dump     - Dump valid PME hw entries of the instruction table\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              hw_i_dump_all - Dump all PME hw entries of the instruction table\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat              hw_i_inv      - Invalidate all PME hw entries in the table <t>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 1         > sw_clear      - Clear PME sw etry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i         > hw_i_read     - Read PME hw entry <i> from instruction table into sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i         > hw_i_write    - Write sw entry to PME hw entry <i> in the instruction table\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > sw_word       - Set 4 bytes value <v> to sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > sw_cmd        - Set modification command to instruction table sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > sw_data       - Set modification data (2 bytes) to to instruction table sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo a         > sw_type       - Set type of modification command <a> to instruction table sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 0|1       > sw_last       - Set/Clear last bit in instruction table sw entry\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo a b c     > sw_flags      - Set/Clear flags: <a>-last, <b>-ipv4csum, <c>-l4csum\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo t         > hw_d_dump     - Dump non zero PME hw entries from the data table <t>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo t         > hw_d_clear    - Clear all PME hw entries in the data table <t>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo t i v     > hw_d_write    - Write 2b modification data <v> to entry <i> of data table <t>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo t i       > hw_d_read     - Read and print 2b modification data from entry <i> of data table <t>\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i v       > vlan_etype    - Set 2 bytes value <v> of VLAN ethertype <i>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > vlan_def      - Set 2 bytes value <v> of default VLAN ethertype.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i v       > dsa_etype     - Set 2 bytes value <v> of DSA ethertype <i>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > dsa_src_dev   - Set source device value to be set in DSA tag.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 0|1       > ttl_zero      - Action for packet with zero TTL: 0-drop, 1-forward.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > pppoe_etype   - Set 2 bytes value <v> of PPPoE ethertype\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v         > pppoe_len     - Set 2 bytes value <v> of PPPoE length configuration\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo i v       > pppoe_proto   - Set 2 bytes value <v> of PPPoE protocol <i>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo a t v     > pppoe_set     - Set PPPoE header fields: version <a>, type <t> and code <v>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo s i       > max_config    - Set max header size <s bytes> and max instructions <i>.\n");
+
+	return off;
+}
+
+static ssize_t pme_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	const char  *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return pme_help(buf);
+
+	if (!strcmp(name, "sw_dump")) {
+		mvPp2PmeSwDump(&mv_pp2_pme_e);
+	} else if (!strcmp(name, "hw_regs")) {
+		mvPp2PmeHwRegs();
+	} else	if (!strcmp(name, "hw_i_dump")) {
+		mvPp2PmeHwDump(0);
+	} else if (!strcmp(name, "hw_i_dump_all")) {
+		mvPp2PmeHwDump(1);
+	} else if (!strcmp(name, "hw_i_inv")) {
+		mvPp2PmeHwInvAll();
+	} else {
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t pme_dec_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, t = 0, i = 0, v = 0;
+	unsigned long flags;
+	unsigned short data;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %x", &t, &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_i_write")) {
+		err = mvPp2PmeHwWrite(t, &mv_pp2_pme_e);
+	} else if (!strcmp(name, "sw_clear")) {
+		err = mvPp2PmeSwClear(&mv_pp2_pme_e);
+	} else if (!strcmp(name, "hw_i_read")) {
+		err = mvPp2PmeHwRead(t, &mv_pp2_pme_e);
+	} else if (!strcmp(name, "sw_flags")) {
+		err = mvPp2PmeSwCmdFlagsSet(&mv_pp2_pme_e, t, i, v);
+	} else if (!strcmp(name, "sw_last")) {
+		err = mvPp2PmeSwCmdLastSet(&mv_pp2_pme_e, t);
+	} else if (!strcmp(name, "hw_d_dump")) {
+		err = mvPp2PmeHwDataTblDump(t);
+	} else if (!strcmp(name, "hw_d_clear")) {
+		err = mvPp2PmeHwDataTblClear(t);
+	} else if (!strcmp(name, "hw_d_read")) {
+		err = mvPp2PmeHwDataTblRead(t, i, &data);
+		printk(KERN_INFO "Data%d table entry #%d: 0x%04x\n", t, i, data);
+	} else if (!strcmp(name, "hw_d_write")) {
+		data = (unsigned short)v;
+		err = mvPp2PmeHwDataTblWrite(t, i, data);
+	} else if (!strcmp(name, "ttl_zero")) {
+		err = mvPp2PmeTtlZeroSet(t);
+	} else if (!strcmp(name, "max_config")) {
+		err = mvPp2PmeMaxConfig(t, i, v);
+	} else if (!strcmp(name, "pppoe_set")) {
+		err = mvPp2PmeMaxConfig(t, i, v);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t pme_dec_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	unsigned int    err = 0, i = 0, v = 0;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %x", &i, &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "vlan_etype"))
+		mvPp2PmeVlanEtherTypeSet(i, v);
+	else if (!strcmp(name, "dsa_etype"))
+		mvPp2PmeDsaDefaultSet(i, v);
+	else if (!strcmp(name, "pppoe_proto"))
+		mvPp2PmePppoeProtoSet(i, v);
+	else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t pme_hex_store(struct device *dev,
+				struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char	*name = attr->attr.name;
+	unsigned int    err = 0, v = 0;
+	unsigned long   flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x", &v);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "sw_word"))
+		mvPp2PmeSwWordSet(&mv_pp2_pme_e, v);
+	else if (!strcmp(name, "sw_cmd"))
+		mvPp2PmeSwCmdSet(&mv_pp2_pme_e, v);
+	else if (!strcmp(name, "sw_type"))
+		mvPp2PmeSwCmdTypeSet(&mv_pp2_pme_e, v);
+	else if (!strcmp(name, "sw_data"))
+		mvPp2PmeSwCmdDataSet(&mv_pp2_pme_e, v);
+	else if (!strcmp(name, "vlan_def"))
+		mvPp2PmeVlanDefaultSet(v);
+	else if (!strcmp(name, "dsa_src_dev"))
+		mvPp2PmeDsaSrcDevSet(v);
+	else if (!strcmp(name, "pppoe_etype"))
+		mvPp2PmePppoeEtypeSet(v);
+	else if (!strcmp(name, "pppoe_len"))
+		mvPp2PmePppoeLengthSet(v);
+	else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,          S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(sw_dump,       S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(hw_regs,       S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(hw_i_dump,     S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(hw_i_dump_all, S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(hw_i_inv,      S_IRUSR, pme_show, NULL);
+static DEVICE_ATTR(sw_clear,      S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(hw_i_write,    S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(hw_i_read,     S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(sw_flags,      S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(sw_last,       S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(sw_word,       S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(sw_cmd,        S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(sw_type,       S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(sw_data,       S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(hw_d_dump,     S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(hw_d_clear,    S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(hw_d_write,    S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(hw_d_read,     S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(vlan_etype,    S_IWUSR, NULL,     pme_dec_hex_store);
+static DEVICE_ATTR(vlan_def,      S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(dsa_etype,     S_IWUSR, NULL,     pme_dec_hex_store);
+static DEVICE_ATTR(dsa_src_dev,   S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(ttl_zero,      S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(pppoe_set,     S_IWUSR, NULL,     pme_dec_store);
+static DEVICE_ATTR(pppoe_etype,   S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(pppoe_len,     S_IWUSR, NULL,     pme_hex_store);
+static DEVICE_ATTR(pppoe_proto,   S_IWUSR, NULL,     pme_dec_hex_store);
+static DEVICE_ATTR(max_config,    S_IWUSR, NULL,     pme_dec_store);
+
+
+static struct attribute *pme_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_sw_dump.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_hw_i_write.attr,
+	&dev_attr_hw_i_read.attr,
+	&dev_attr_hw_i_dump.attr,
+	&dev_attr_hw_i_dump_all.attr,
+	&dev_attr_hw_i_inv.attr,
+	&dev_attr_sw_word.attr,
+	&dev_attr_sw_cmd.attr,
+	&dev_attr_sw_data.attr,
+	&dev_attr_sw_type.attr,
+	&dev_attr_sw_flags.attr,
+	&dev_attr_sw_last.attr,
+	&dev_attr_hw_d_read.attr,
+	&dev_attr_hw_d_write.attr,
+	&dev_attr_hw_d_dump.attr,
+	&dev_attr_hw_d_clear.attr,
+	&dev_attr_vlan_etype.attr,
+	&dev_attr_vlan_def.attr,
+	&dev_attr_dsa_etype.attr,
+	&dev_attr_dsa_src_dev.attr,
+	&dev_attr_ttl_zero.attr,
+	&dev_attr_pppoe_set.attr,
+	&dev_attr_pppoe_etype.attr,
+	&dev_attr_pppoe_len.attr,
+	&dev_attr_pppoe_proto.attr,
+	&dev_attr_max_config.attr,
+
+	NULL
+};
+
+static struct attribute_group pme_group = {
+	.name = "pme",
+	.attrs = pme_attrs,
+};
+
+int mv_pp2_pme_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(pp2_kobj, &pme_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", pme_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_pme_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &pme_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/prs/prs_high_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_high_sysfs.c
new file mode 100644
index 000000000000..6e202052d6be
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_high_sysfs.c
@@ -0,0 +1,243 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "prs/mvPp2PrsHw.h"
+#include "prs/mvPp2Prs.h"
+#include "prs_sysfs.h"
+
+static struct kobject *prs_kobj;
+
+static ssize_t mv_prs_high_help(char *b)
+{
+	int o = 0;
+
+	o += scnprintf(b + o, PAGE_SIZE - o, "cd                 debug       - move to parser low level sysfs directory\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "cat                dump        - dump all valid HW entries\n");
+
+	o += scnprintf(b + o, PAGE_SIZE - o, "\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c d e     > flow      - Add flow entry to HW\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "                                 flowId <a>, result <b>, result mask <c>, port <d>, tcam index <e>.\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b [1|0]   > vlan1     - Add/Delete single vlan: tpid1 <a>, port map <b>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c [1|0] > vlan2     - Add/Delete double vlan: tpid1 <a>, tpid2 <b>, port map <c>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c d e   > vlan3     - Triple vlan entry tpid1 <a>, tpid2 <b>, tpid3 <c>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "                               ports bitmap <d>, add/del <e=1/0>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo 1           > vlan_del  - Delete all vlan entries.\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c d e f > mac_range - Add mac entry to HW\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "                               port map <a>, da <b> mask <c>, ri <d> mask <e>, end <f>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c       > mac_del   - Delete mac entry from HW\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "                               port map <a>, da <b>, da mask <c>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b c d e   > etype_add - Add ethertype entry to HW\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "                               port map <a>, etype <b>, ri <c> mask <d>, end <e>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo a b         > etype_del - Delete etype <b> entry from HW for ports in port map <a>\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo p [0|1|2|3] > tag       - None[0], Marvell Header[1], DSA tag[2], EDSA tag[3]\n");
+	/* etypeDsaMod and etypeDsa meaningless if  port in DSA/EDSA mode */
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo p {0|1}     > etypeMod  - Expected EtherType DSA[0]/EDSA[1] if port tag is not DSA/EDSA\n");
+	/* typeDsa meaningless if  all ports in DSA/EDSA mode */
+	o += scnprintf(b + o, PAGE_SIZE - o, "echo [hex]       > etypeDsa  - Expected DSA/EDSA ethertype [hex]\n");
+	o += scnprintf(b + o, PAGE_SIZE - o, "\n");
+	return o;
+}
+
+
+static ssize_t mv_prs_high_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "dump"))
+		mvPp2PrsHwDump();
+	else
+		off += mv_prs_high_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_prs_high_store_unsigned(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0, d = 0, e = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x %x %x", &a, &b, &c, &d, &e);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "flow"))
+		mvPrsFlowIdGen(e, a, b, c, d);
+	else if (!strcmp(name, "vlan1"))
+		mvPp2PrsSingleVlan(a, b, c);
+	else if (!strcmp(name, "vlan2"))
+		mvPp2PrsDoubleVlan(a, b, c, d);
+	else if (!strcmp(name, "vlan3"))
+		mvPp2PrsTripleVlan(a, b, c, d, e);
+	else if (!strcmp(name, "vlan_del"))
+		mvPp2PrsVlanAllDel();
+	else if (!strcmp(name, "etype_add"))
+		mvPrsEthTypeSet(a, b, c, d, e);
+	else if (!strcmp(name, "etype_del"))
+		mvPrsEthTypeDel(a, b);
+	else if (!strcmp(name, "tag"))
+		mvPp2PrsTagModeSet(a, b);
+	else if (!strcmp(name, "etypeMod"))
+		mvPp2PrsEtypeDsaModeSet(a, b);
+	else if (!strcmp(name, "etypeDsa"))
+		mvPp2PrsEtypeDsaSet(a);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static ssize_t mv_prs_high_store_str(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, prt = 0, ri = 0, ri_mask = 0, fin = 0;
+	unsigned char da[MV_MAC_ADDR_SIZE];
+	unsigned char da_mask[MV_MAC_ADDR_SIZE];
+
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf,
+		"%x %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx %2hhx:%2hhx:%2hhx:%2hhx:%2hhx:%2hhx %x %x %x",
+		&prt, da, da+1, da+2, da+3, da+4, da+5,
+		da_mask, da_mask+1, da_mask+2, da_mask+3, da_mask+4, da_mask+5, &ri, &ri_mask, &fin);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "mac_range"))
+		mvPrsMacDaRangeSet(prt, da, da_mask, ri, ri_mask, fin);
+	else if (!strcmp(name, "mac_del"))
+		mvPrsMacDaRangeDel(prt, da, da_mask);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(dump,		S_IRUSR, mv_prs_high_show, NULL);
+static DEVICE_ATTR(flow,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(vlan1,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(vlan2,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(vlan3,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(vlan_del,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(etype_add,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(etype_del,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(mac_range,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_str);
+static DEVICE_ATTR(mac_del,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_str);
+static DEVICE_ATTR(tag,			S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(etypeMod,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(etypeDsa,		S_IWUSR, mv_prs_high_show, mv_prs_high_store_unsigned);
+static DEVICE_ATTR(help,		S_IRUSR, mv_prs_high_show, NULL);
+
+
+static struct attribute *prs_high_attrs[] = {
+	&dev_attr_dump.attr,
+	&dev_attr_help.attr,
+	&dev_attr_flow.attr,
+	&dev_attr_vlan1.attr,
+	&dev_attr_vlan2.attr,
+	&dev_attr_vlan3.attr,
+	&dev_attr_vlan_del.attr,
+	&dev_attr_etype_add.attr,
+	&dev_attr_etype_del.attr,
+	&dev_attr_mac_range.attr,
+	&dev_attr_mac_del.attr,
+	&dev_attr_tag.attr,
+	&dev_attr_etypeMod.attr,
+	&dev_attr_etypeDsa.attr,
+    NULL
+};
+
+static struct attribute_group prs_high_group = {
+	.attrs = prs_high_attrs,
+};
+
+int mv_pp2_prs_high_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	prs_kobj = kobject_create_and_add("prs", pp2_kobj);
+
+	if (!prs_kobj) {
+		pr_err("%s: cannot create gbe kobject\n", __func__);
+		return -ENOMEM;
+	}
+
+	err = sysfs_create_group(prs_kobj, &prs_high_group);
+	if (err)
+		pr_err("sysfs group failed %d\n", err);
+
+	mv_pp2_prs_low_sysfs_init(prs_kobj);
+
+	return err;
+
+}
+
+int mv_pp2_prs_high_sysfs_exit(struct kobject *pp2_kobj)
+{
+	mv_pp2_prs_low_sysfs_exit(prs_kobj);
+
+	sysfs_remove_group(pp2_kobj, &prs_high_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/prs/prs_low_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_low_sysfs.c
new file mode 100644
index 000000000000..d1412c71570b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_low_sysfs.c
@@ -0,0 +1,272 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include "mvOs.h"
+#include "mvCommon.h"
+#include "prs/mvPp2PrsHw.h"
+#include "prs/mvPp2Prs.h"
+
+
+static  MV_PP2_PRS_ENTRY pe;
+
+
+static ssize_t mv_prs_low_help(char *buf)
+{
+	int off = 0;
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat          sw_dump       - dump parser SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat          hw_dump       - dump all valid HW entries\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat          hw_regs       - dump parser registers.\n");
+#ifdef CONFIG_MV_ETH_PP2_1
+	off += scnprintf(buf + off, PAGE_SIZE - off, "cat          hw_hits       - dump non zeroed hit counters and the associated HW entries\n");
+#endif
+	off += scnprintf(buf + off, PAGE_SIZE - off, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo id      > hw_write    - write parser SW entry into HW place <id>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo id      > hw_read     - read parser entry <id> into SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo 1       > sw_clear    - clear parser SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo id      > hw_inv      - invalidate parser entry <id> in hw.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo         > hw_inv_all  - invalidate all parser entries in HW.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p m     > t_port      - add<m=1> or delete<m=0> port<p> in SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo pmap    > t_port_map  - set port map <pmap> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v m     > t_ai        - update ainfo value <v> with mask <m> in SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo o d m   > t_byte      - set byte of data <d> with mask <m> and offset <o> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v       > t_lu        - set lookup id <v> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v m     > s_ri        - set result info value <v> with mask <m> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v m     > s_ai        - set ainfo value <v> with mask <m> to sw entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v       > s_next_lu   - set next lookup id value <v> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v       > s_shift     - set packet shift value <v> for next lookup to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo t v     > s_offs      - set offset value <v> for type <t> to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v       > s_lu_done   - set (v=1) or clear (v=0) lookup done bit to SW entry.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo v       > s_fid_gen   - set (v=1) or clear (v=0) flowid generate bit in SW entry.\n");
+
+	off += scnprintf(buf + off, PAGE_SIZE - off, "echo p l m o > hw_frst_itr - set values for first iteration port <p>, lookupid <l>, \n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "                             max loops <m>, init offs <o>.\n");
+	off += scnprintf(buf + off, PAGE_SIZE - off, "\n");
+
+	return off;
+}
+
+
+static ssize_t mv_prs_low_show(struct device *dev,
+				  struct device_attribute *attr, char *buf)
+{
+	const char      *name = attr->attr.name;
+	int             off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+	if (!strcmp(name, "hw_dump"))
+		mvPp2PrsHwDump();
+	else if (!strcmp(name, "sw_dump"))
+		mvPp2PrsSwDump(&pe);
+	else if (!strcmp(name, "hw_regs"))
+		mvPp2PrsHwRegsDump();
+	else if (!strcmp(name, "hw_hits"))
+		mvPp2V1PrsHwHitsDump();
+	else
+		off += mv_prs_low_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_prs_low_store_signed(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	int  err = 0, a = 0, b = 0, c = 0, d = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %d %d", &a, &b, &c, &d);
+	local_irq_save(flags);
+
+	if (!strcmp(name, "s_shift"))
+		mvPp2PrsSwSramShiftSet(&pe, a, SRAM_OP_SEL_SHIFT_ADD);
+	else if (!strcmp(name, "s_offs"))
+		mvPp2PrsSwSramOffsetSet(&pe, a, b, SRAM_OP_SEL_SHIFT_ADD);
+	else if (!strcmp(name, "hw_frst_itr"))
+		mvPp2PrsHwPortInit(a, b, c, d);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static ssize_t mv_prs_low_store_unsigned(struct device *dev,
+				   struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, a = 0, b = 0, c = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%x %x %x", &a, &b, &c);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "hw_write")) {
+		pe.index = a;
+		mvPp2PrsHwWrite(&pe);
+	} else if (!strcmp(name, "hw_read")) {
+		pe.index = a;
+		mvPp2PrsHwRead(&pe);
+	} else if (!strcmp(name, "sw_clear"))
+		mvPp2PrsSwClear(&pe);
+	else if (!strcmp(name, "hw_inv"))
+		mvPp2PrsHwInv(a);
+	else if (!strcmp(name, "hw_inv_all"))
+		mvPp2PrsHwInvAll();
+	else if (!strcmp(name, "t_port"))
+		mvPp2PrsSwTcamPortSet(&pe, a, b);
+	else if (!strcmp(name, "t_port_map"))
+		mvPp2PrsSwTcamPortMapSet(&pe, a);
+	else if (!strcmp(name, "t_lu"))
+		mvPp2PrsSwTcamLuSet(&pe, a);
+	else if (!strcmp(name, "t_ai"))
+		mvPp2PrsSwTcamAiUpdate(&pe, a, b);
+	else if (!strcmp(name, "t_byte"))
+		mvPp2PrsSwTcamByteSet(&pe, a, b, c);
+	else if (!strcmp(name, "s_ri"))
+		mvPp2PrsSwSramRiUpdate(&pe, a, b);
+	else if (!strcmp(name, "s_ai"))
+		mvPp2PrsSwSramAiUpdate(&pe, a, b);
+	else if (!strcmp(name, "s_next_lu"))
+		mvPp2PrsSwSramNextLuSet(&pe, a);
+	else if (!strcmp(name, "s_lu_done"))
+		(a == 1) ? mvPp2PrsSwSramLuDoneSet(&pe) : mvPp2PrsSwSramLuDoneClear(&pe);
+	else if (!strcmp(name, "s_fid_gen"))
+		(a == 1) ? mvPp2PrsSwSramFlowidGenSet(&pe) : mvPp2PrsSwSramFlowidGenClear(&pe);
+	else {
+		err = 1;
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+	}
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+
+static DEVICE_ATTR(hw_dump,		S_IRUSR, mv_prs_low_show, NULL);
+static DEVICE_ATTR(sw_dump,		S_IRUSR, mv_prs_low_show, NULL);
+static DEVICE_ATTR(help,		S_IRUSR, mv_prs_low_show, NULL);
+static DEVICE_ATTR(hw_regs,		S_IRUSR, mv_prs_low_show, NULL);
+static DEVICE_ATTR(hw_hits,		S_IRUSR, mv_prs_low_show, NULL);
+static DEVICE_ATTR(sw_clear,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(hw_write,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(hw_read,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(hw_inv,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(hw_inv_all,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(t_byte,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(t_port,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(t_port_map,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(t_ai,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(t_lu,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(s_ri,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(s_ai,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(s_next_lu,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(s_shift,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_signed);
+static DEVICE_ATTR(s_offs,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_signed);
+static DEVICE_ATTR(s_lu_done,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(s_fid_gen,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_unsigned);
+static DEVICE_ATTR(hw_frst_itr,		S_IWUSR, mv_prs_low_show, mv_prs_low_store_signed);
+
+
+
+static struct attribute *prs_low_attrs[] = {
+	&dev_attr_hw_dump.attr,
+	&dev_attr_sw_dump.attr,
+	&dev_attr_hw_hits.attr,
+	&dev_attr_hw_regs.attr,
+	&dev_attr_hw_write.attr,
+	&dev_attr_hw_read.attr,
+	&dev_attr_hw_inv.attr,
+	&dev_attr_hw_inv_all.attr,
+	&dev_attr_sw_clear.attr,
+	&dev_attr_t_byte.attr,
+	&dev_attr_t_port.attr,
+	&dev_attr_t_port_map.attr,
+	&dev_attr_t_ai.attr,
+	&dev_attr_t_lu.attr,
+	&dev_attr_s_ri.attr,
+	&dev_attr_s_ai.attr,
+	&dev_attr_s_next_lu.attr,
+	&dev_attr_s_shift.attr,
+	&dev_attr_s_offs.attr,
+	&dev_attr_s_lu_done.attr,
+	&dev_attr_s_fid_gen.attr,
+	&dev_attr_hw_frst_itr.attr,
+	&dev_attr_help.attr,
+	NULL
+};
+
+static struct attribute_group prs_low_group = {
+	.name = "debug",
+	.attrs = prs_low_attrs,
+};
+
+int mv_pp2_prs_low_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err;
+
+	err = sysfs_create_group(pp2_kobj, &prs_low_group);
+	if (err)
+		pr_err("sysfs group %s failed %d\n", prs_low_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_prs_low_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &prs_low_group);
+
+	return 0;
+}
+
diff --git a/drivers/net/ethernet/mvebu_net/pp2/prs/prs_sysfs.h b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_sysfs.h
new file mode 100644
index 000000000000..503b62ebc8fc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/prs/prs_sysfs.h
@@ -0,0 +1,34 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __prs_sysfs_h__
+#define __prs_sysfs_h__
+
+int mv_pp2_prs_low_sysfs_init(struct kobject *kobj);
+int mv_pp2_prs_low_sysfs_exit(struct kobject *kobj);
+
+#endif /* __prs_sysfs_h__*/
diff --git a/drivers/net/ethernet/mvebu_net/pp2/wol/wol_sysfs.c b/drivers/net/ethernet/mvebu_net/pp2/wol/wol_sysfs.c
new file mode 100644
index 000000000000..abaf7be8dce9
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/pp2/wol/wol_sysfs.c
@@ -0,0 +1,327 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+
+#include "mvCommon.h"
+#include "mvTypes.h"
+#include "wol/mvPp2Wol.h"
+
+
+static ssize_t wol_help(char *buf)
+{
+	int of = 0;
+
+	of += scnprintf(buf + of, PAGE_SIZE - of, "t, i, a, b, c, l, s - are dec numbers\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "v, m, e             - are hex numbers\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "\n");
+
+	of += scnprintf(buf + of, PAGE_SIZE - of, "cat            help      - Show this help\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "cat            regs      - Show WOL registers\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "cat            status    - Show WOL status\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo port    > sleep     - Enter sleep mode for [port]\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo 1       > wakeup    - Force wakeup\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo mac     > magic_mac - Set MAC [a:b:c:d:e:f] for magic pattern\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo i ip    > arp_ip    - Set IP [a.b.c.d] for ARP IP[i] event\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo i o d m > ptrn      - Set pattern [i] with data [d] and mask [m] from  header offset [o]\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "                           [o] header offset: 0-127\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "                           [d] str, in format: b0:b1::b3:::b6\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "                           [m] str, in format: ff:ff::ff:::ff\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo [0|1]   > magic_en  - On/Off wakeup by magic packet\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo [0|1]   > ucast_en  - On/Off wakeup by Unicast packet\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo [0|1]   > mcast_en  - On/Off wakeup by Multicast packet\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo i [0|1] > arp_ip_en - On/Off wakeup by ARP IP [i] packet\n");
+	of += scnprintf(buf + of, PAGE_SIZE - of, "echo i [0|1] > ptrn_en   - On/Off wakeup by pattern [i] packet\n");
+
+	return of;
+}
+
+static ssize_t wol_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	const char  *name = attr->attr.name;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "help"))
+		return wol_help(buf);
+
+	if (!strcmp(name, "regs")) {
+		mvPp2WolRegs();
+	} else if (!strcmp(name, "status")) {
+		mvPp2WolStatus();
+	} else {
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, attr->attr.name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static ssize_t wol_dec_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0, t = 0, i = 0;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d", &i, &t);
+
+	local_irq_save(flags);
+	if (!strcmp(name, "sleep"))
+		err = mvPp2WolSleep(i);
+	else if (!strcmp(name, "wakeup")) {
+		if (i == 1)
+			err = mvPp2WolWakeup();
+	} else if (!strcmp(name, "magic_en")) {
+		mvPp2WolMagicEventSet(i);
+	} else if (!strcmp(name, "arp_ip_en")) {
+		mvPp2WolArpEventSet(i, t);
+	} else if (!strcmp(name, "ptrn_en")) {
+		mvPp2WolPtrnEventSet(i, t);
+	} else if (!strcmp(name, "mcast_en")) {
+		mvPp2WolMcastEventSet(i);
+	} else if (!strcmp(name, "ucast_en")) {
+		mvPp2WolUcastEventSet(i);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t wol_mac_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0;
+	char          macStr[MV_MAC_STR_SIZE];
+	MV_U8         mac[MV_MAC_ADDR_SIZE];
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%s", macStr);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "magic_mac")) {
+		mvMacStrToHex(macStr, mac);
+		err = mvPp2WolMagicDaSet(mac);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t wol_ip_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0;
+	int           i = 0;
+	unsigned char ip[4];
+	__be32        ipaddr;
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %hhu.%hhu.%hhu.%hhu", &i, ip, ip + 1, ip + 2, ip + 3);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "arp_ip")) {
+		ipaddr = *(__be32 *)ip;
+		err = mvPp2WolArpIpSet(i, ipaddr);
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static int wol_ptrn_get(char *ptrnStr, char *maskStr, MV_U8 *data, MV_U8 *mask, int max_size)
+{
+	int i, j, size;
+	char tmp[3], mask_tmp[3];
+
+	size = strlen(ptrnStr);
+	i = 0;
+	j = 0;
+	while (i < size) {
+		if (j >= max_size) {
+			pr_err("pattern string is too long (max = %d): %s\n",
+				max_size, ptrnStr);
+			return j;
+		}
+
+		if (ptrnStr[i] == ':') {
+			data[j] = 0;
+			mask[j] = 0;
+			j++;
+			i++;
+			continue;
+		}
+		if ((mvCharToHex(ptrnStr[i]) == -1) ||
+		    (mvCharToHex(ptrnStr[i + 1]) == -1) ||
+		    (((i + 2) > size) && (ptrnStr[i + 2] != ':'))) {
+			pr_err("Wrong pattern string format size=%d, i=%d, j=%d: %s\n",
+				size, i, j, &ptrnStr[i]);
+			return -1;
+		}
+
+		tmp[0] = ptrnStr[i];
+		tmp[1] = ptrnStr[i + 1];
+		tmp[2] = '\0';
+		mask_tmp[0] = maskStr[i];
+		mask_tmp[1] = maskStr[i + 1];
+		mask_tmp[2] = '\0';
+		data[j] = (MV_U8) (strtol(tmp, NULL, 16));
+		mask[j] = (MV_U8) (strtol(mask_tmp, NULL, 16));
+		i += 3;
+		j++;
+	}
+	return j;
+}
+
+static ssize_t wol_ptrn_store(struct device *dev,
+			struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char    *name = attr->attr.name;
+	unsigned int  err = 0;
+	int           size, i = 0, off = 0;
+	char          ptrnStr[MV_PP2_WOL_PTRN_BYTES*3];
+	char          maskStr[MV_PP2_WOL_PTRN_BYTES*3];
+	char          data[MV_PP2_WOL_PTRN_BYTES];
+	char          mask[MV_PP2_WOL_PTRN_BYTES];
+	unsigned long flags;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	sscanf(buf, "%d %d %s %s", &i, &off, ptrnStr, maskStr);
+
+	local_irq_save(flags);
+
+	if (!strcmp(name, "ptrn")) {
+		size = wol_ptrn_get(ptrnStr, maskStr, data, mask, MV_PP2_WOL_PTRN_BYTES);
+		if (size != -1)
+			err = mvPp2WolPtrnSet(i, off, size, data, mask);
+		else
+			err = 1;
+	} else
+		printk(KERN_ERR "%s: illegal operation <%s>\n", __func__, name);
+
+	local_irq_restore(flags);
+
+	if (err)
+		printk(KERN_ERR "%s: <%s>, error %d\n", __func__, attr->attr.name, err);
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(help,      S_IRUSR, wol_show, NULL);
+static DEVICE_ATTR(regs,      S_IRUSR, wol_show, NULL);
+static DEVICE_ATTR(status,    S_IRUSR, wol_show, NULL);
+static DEVICE_ATTR(sleep,     S_IWUSR, NULL,     wol_dec_store);
+static DEVICE_ATTR(wakeup,    S_IWUSR, NULL,     wol_dec_store);
+static DEVICE_ATTR(magic_mac, S_IWUSR, NULL,     wol_mac_store);
+static DEVICE_ATTR(arp_ip,    S_IWUSR, NULL,     wol_ip_store);
+static DEVICE_ATTR(ptrn,      S_IWUSR, NULL,     wol_ptrn_store);
+static DEVICE_ATTR(magic_en,  S_IWUSR, NULL,     wol_dec_store);
+static DEVICE_ATTR(arp_ip_en, S_IWUSR, NULL,     wol_dec_store);
+static DEVICE_ATTR(ptrn_en,   S_IWUSR, NULL,     wol_dec_store);
+static DEVICE_ATTR(ucast_en,   S_IWUSR, NULL,    wol_dec_store);
+static DEVICE_ATTR(mcast_en,   S_IWUSR, NULL,    wol_dec_store);
+
+
+static struct attribute *wol_attrs[] = {
+	&dev_attr_help.attr,
+	&dev_attr_regs.attr,
+	&dev_attr_status.attr,
+	&dev_attr_sleep.attr,
+	&dev_attr_wakeup.attr,
+	&dev_attr_magic_mac.attr,
+	&dev_attr_arp_ip.attr,
+	&dev_attr_ptrn.attr,
+	&dev_attr_magic_en.attr,
+	&dev_attr_arp_ip_en.attr,
+	&dev_attr_ptrn_en.attr,
+	&dev_attr_ucast_en.attr,
+	&dev_attr_mcast_en.attr,
+
+	NULL
+};
+
+static struct attribute_group mv_wol_group = {
+	.name = "wol",
+	.attrs = wol_attrs,
+};
+
+#define MV_PP2_WOL_IRQ	110
+
+int mv_pp2_wol_sysfs_init(struct kobject *pp2_kobj)
+{
+	int err = 0;
+
+	err = sysfs_create_group(pp2_kobj, &mv_wol_group);
+	if (err)
+		printk(KERN_INFO "sysfs group %s failed %d\n", mv_wol_group.name, err);
+
+	return err;
+}
+
+int mv_pp2_wol_sysfs_exit(struct kobject *pp2_kobj)
+{
+	sysfs_remove_group(pp2_kobj, &mv_wol_group);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/.gitignore b/drivers/net/ethernet/mvebu_net/switch/.gitignore
new file mode 100644
index 000000000000..60319acce7d0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/.gitignore
@@ -0,0 +1,96 @@
+
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-avantalp/avanta_lp_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/switch/Makefile b/drivers/net/ethernet/mvebu_net/switch/Makefile
new file mode 100644
index 000000000000..16cd6fa5e6b7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for the Marvell Soho switches
+#
+
+ifeq ($(CONFIG_ARCH_MVEBU),y)
+
+ccflags-y       += $(MVEBU_NET_FLAGS)
+ccflags-y       += $(INCLUDE_DIRS)
+
+else
+
+ifneq ($(MACHINE),)
+include $(srctree)/$(MACHINE)/config/mvRules.mk
+endif
+
+ccflags-y       += -I$(PLAT_PATH_I)/$(LSP_MUX_DIR)
+
+endif # CONFIG_ARCH_MVEBU
+
+ifeq ($(CONFIG_ARCH_AVANTA_LP),y)
+
+obj-$(CONFIG_MV_INCLUDE_SWITCH)	+= mv_switch.o mv_switch_sysfs.o mv_phy.o
+
+else
+
+obj-$(CONFIG_MV_INCLUDE_SWITCH)	+= mv_switch.o mv_switch_sysfs.o
+
+endif # CONFIG_ARCH_AVANTA_LP
diff --git a/drivers/net/ethernet/mvebu_net/switch/mv_phy.c b/drivers/net/ethernet/mvebu_net/switch/mv_phy.c
new file mode 100644
index 000000000000..de41922e1bec
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/mv_phy.c
@@ -0,0 +1,544 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mv_switch.h>
+#include <linux/module.h>
+
+#include "mvOs.h"
+#include "mvSysHwConfig.h"
+#include "eth-phy/mvEthPhy.h"
+#ifdef MV_INCLUDE_ETH_COMPLEX
+#include "ctrlEnv/mvCtrlEthCompLib.h"
+#endif /* MV_INCLUDE_ETH_COMPLEX */
+
+#include "msApi.h"
+#include "mv_switch.h"
+#include "mv_phy.h"
+#include "mv_mux/mv_mux_netdev.h"
+
+/*******************************************************************************
+* mv_phy_port_power_state_set
+*
+* DESCRIPTION:
+*	The API configures the PHY port state of given switch logical port.
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	state  - PHY port power state to set.
+*			GT_TRUE: power on
+*			GT_FALSE: power down
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_power_state_set(unsigned int lport, GT_BOOL state)
+{
+	GT_BOOL power_state;
+	GT_BOOL pre_power_state;
+	GT_STATUS rc = GT_OK;
+
+	if (state == GT_TRUE)
+		power_state = GT_FALSE;
+	else
+		power_state = GT_TRUE;
+
+	/* get the current link status */
+	rc = gprtGetPortPowerDown(mv_switch_qd_dev_get(), lport, &pre_power_state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortPowerDown()\n");
+
+	rc = gprtPortPowerDown(mv_switch_qd_dev_get(), lport, power_state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtPortPowerDown()\n");
+
+	/* since link change event from HW (via interrupt) does not happen
+	   for UP->DOWN link change, only for DOWN->UP after link negotiation,
+	   print a port Down state change for this use case */
+	if (pre_power_state == GT_FALSE && power_state == GT_TRUE)
+		pr_err("Port %d: Link-down\n", lport);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_power_state_get
+*
+* DESCRIPTION:
+*	The API gets the PHY port state of given switch logical port.
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state  - PHY port power state to set.
+*			GT_TRUE: power on
+*			GT_FALSE: power down
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_power_state_get(unsigned int lport, GT_BOOL *state)
+{
+	GT_BOOL power_state;
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtGetPortPowerDown(mv_switch_qd_dev_get(), lport, &power_state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortPowerDown()\n");
+
+	if (power_state == GT_TRUE)
+		*state = GT_FALSE;
+	else
+		*state = GT_TRUE;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_autoneg_mode_set
+*
+* DESCRIPTION:
+*	The API configures the auto negotiation state of given switch logical port.
+* INPUTS:
+*	lport          - logical switch PHY port ID.
+*	autoneg_state  - autonegotiation state, enabled or disabled.
+*	autoneg_mode   - enum:
+*			SPEED_AUTO_DUPLEX_AUTO: Auto for both speed and duplex
+*			SPEED_1000_DUPLEX_AUTO: 1000Mbps and auto duplex
+*			SPEED_100_DUPLEX_AUTO:  100Mbps and auto duplex
+*			SPEED_10_DUPLEX_AUTO:   10Mbps and auto duplex
+*			SPEED_AUTO_DUPLEX_FULL: Auto for speed only and Full duplex
+*			SPEED_AUTO_DUPLEX_HALF: Auto for speed only and Half duplex. (1000Mbps is not supported)
+*			SPEED_1000_DUPLEX_FULL: 1000Mbps Full duplex.
+*			SPEED_1000_DUPLEX_HALF: 1000Mbps half duplex.
+*			SPEED_100_DUPLEX_FULL:  100Mbps Full duplex.
+*			SPEED_100_DUPLEX_HALF:  100Mbps half duplex.
+*			SPEED_10_DUPLEX_FULL:   10Mbps Full duplex.
+*			SPEED_10_DUPLEX_HALF:   10Mbps half duplex.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_autoneg_mode_set(unsigned int lport, GT_BOOL autoneg_state, GT_PHY_AUTO_MODE autoneg_mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	if (GT_FALSE == autoneg_state) {
+		rc = gprtPortAutoNegEnable(mv_switch_qd_dev_get(), lport, GT_FALSE);
+		SW_IF_ERROR_STR(rc, "failed to call gprtPortAutoNegEnable()\n");
+
+		return MV_OK;
+	} else	{
+		rc = gprtSetPortAutoMode(mv_switch_qd_dev_get(), lport, autoneg_mode);
+		SW_IF_ERROR_STR(rc, "failed to call gprtSetPortAutoMode()\n");
+
+		rc = gprtPortAutoNegEnable(mv_switch_qd_dev_get(), lport, GT_TRUE);
+		SW_IF_ERROR_STR(rc, "failed to call gprtPortAutoNegEnable()\n");
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_autoneg_mode_get
+*
+* DESCRIPTION:
+*	The API gets the auto negotiation state of given switch logical port.
+* INPUTS:
+*	lport          - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	autoneg_state  - autonegotiation state, enabled or disabled.
+*	autoneg_mode   - enum:
+*			SPEED_AUTO_DUPLEX_AUTO: Auto for both speed and duplex
+*			SPEED_1000_DUPLEX_AUTO: 1000Mbps and auto duplex
+*			SPEED_100_DUPLEX_AUTO:  100Mbps and auto duplex
+*			SPEED_10_DUPLEX_AUTO:   10Mbps and auto duplex
+*			SPEED_AUTO_DUPLEX_FULL: Auto for speed only and Full duplex
+*			SPEED_AUTO_DUPLEX_HALF: Auto for speed only and Half duplex. (1000Mbps is not supported)
+*			SPEED_1000_DUPLEX_FULL: 1000Mbps Full duplex.
+*			SPEED_1000_DUPLEX_HALF: 1000Mbps half duplex.
+*			SPEED_100_DUPLEX_FULL:  100Mbps Full duplex.
+*			SPEED_100_DUPLEX_HALF:  100Mbps half duplex.
+*			SPEED_10_DUPLEX_FULL:   10Mbps Full duplex.
+*			SPEED_10_DUPLEX_HALF:   10Mbps half duplex.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_autoneg_mode_get(unsigned int lport, GT_BOOL *autoneg_state, GT_PHY_AUTO_MODE *autoneg_mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtGetPortAutoNegState(mv_switch_qd_dev_get(), lport, autoneg_state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortAutoNegState()\n");
+
+	rc = gprtGetPortAutoMode(mv_switch_qd_dev_get(), lport, autoneg_mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortAutoMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_autoneg_restart
+*
+* DESCRIPTION:
+*	The API restarts the auto negotiation of given switch logical port.
+* INPUTS:
+*	lport - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_autoneg_restart(unsigned int lport)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtPortRestartAutoNeg(mv_switch_qd_dev_get(), lport);
+	SW_IF_ERROR_STR(rc, "failed to call gprtPortRestartAutoNeg()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_pause_mode_set
+*
+* DESCRIPTION:
+*	This routine will set the pause bit in Autonegotiation Advertisement
+*	Register. And restart the autonegotiation.
+*
+* INPUTS:
+*	lport - logical switch PHY port ID.	The physical address, if SERDES device is accessed
+*	state - GT_PHY_PAUSE_MODE enum value.
+*		GT_PHY_NO_PAUSE		- disable pause
+*		GT_PHY_PAUSE		- support pause
+*		GT_PHY_ASYMMETRIC_PAUSE	- support asymmetric pause
+*		GT_PHY_BOTH_PAUSE	- support both pause and asymmetric pause
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+* COMMENTS:
+*	Data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+int mv_phy_port_pause_mode_set(unsigned int lport, GT_PHY_PAUSE_MODE state)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtSetPause(mv_switch_qd_dev_get(), lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPause()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_pause_mode_get
+*
+* DESCRIPTION:
+*	This routine will get the pause bit in Autonegotiation Advertisement
+*	Register.
+*
+* INPUTS:
+*	lport - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state - GT_PHY_PAUSE_MODE enum value.
+*		GT_PHY_NO_PAUSE		- disable pause
+*		GT_PHY_PAUSE		- support pause
+*		GT_PHY_ASYMMETRIC_PAUSE	- support asymmetric pause
+*		GT_PHY_BOTH_PAUSE	- support both pause and asymmetric pause
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+* COMMENTS:
+*	Data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+int mv_phy_port_pause_mode_get(unsigned int lport, GT_PHY_PAUSE_MODE *state)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtGetPause(mv_switch_qd_dev_get(), lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPause()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_pause_state_get
+*
+* DESCRIPTION:
+*	This routine will get the current pause state.
+*	Register.
+*
+* INPUTS:
+*	lport - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state - pause state
+*		GT_FALSE: MAC pause is not implemented
+*		GT_TRUE: MAC pause is implemented
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_phy_port_pause_state_get(unsigned int lport, GT_BOOL *state)
+{
+	GT_BOOL force;
+	GT_BOOL pause;
+	GT_STATUS rc = GT_OK;
+
+	rc = gpcsGetForcedFC(mv_switch_qd_dev_get(), lport, &force);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedFC()\n");
+
+	if (force) {
+		rc = gpcsGetFCValue(mv_switch_qd_dev_get(), lport, &pause);
+		SW_IF_ERROR_STR(rc, "failed to call gpcsGetFCValue()\n");
+	} else {
+		rc = gprtGetPauseEn(mv_switch_qd_dev_get(), lport, &pause);
+		SW_IF_ERROR_STR(rc, "failed to call gprtGetPauseEn()\n");
+	}
+
+	*state = pause;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_egr_loopback_set
+*
+* DESCRIPTION:
+*	Enable/Disable egress loopback of switch PHY port,
+*       and enable/disable force link port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable egree loopback.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Data sheet register 0.14 - Loop_back
+*******************************************************************************/
+int mv_phy_port_egr_loopback_set(unsigned int lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+	GT_BOOL link_forced;
+
+	/*clear the PHY detect bit before enable loopback to preven the port from getting locked up due the PPU bug*/
+	if (enable == GT_TRUE) {
+		rc = gprtSetPHYDetect(mv_switch_qd_dev_get(), lport, GT_FALSE);
+		SW_IF_ERROR_STR(rc, "failed to call gprtSetPHYDetect()\n");
+	}
+
+	rc = gprtSetPortLoopback(mv_switch_qd_dev_get(), lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortLoopback()\n");
+
+	/*restore the PHY detect bit after disable loopback*/
+	if (enable == GT_FALSE) {
+		rc = gprtSetPHYDetect(mv_switch_qd_dev_get(), lport, GT_TRUE);
+		SW_IF_ERROR_STR(rc, "failed to call gprtSetPHYDetect()\n");
+	}
+
+	/* Get port force link statue */
+	rc = gpcsGetForcedLink(mv_switch_qd_dev_get(), lport, &link_forced);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedLink()\n");
+	if (((enable == GT_TRUE) && (link_forced == GT_FALSE)) ||
+	    ((enable == GT_FALSE) && (link_forced == GT_TRUE))) {
+		/* Set force link */
+		rc = gpcsSetForcedLink(mv_switch_qd_dev_get(), lport, enable);
+		SW_IF_ERROR_STR(rc, "failed to call gpcsSetForcedLink()\n");
+		/* Set force link value */
+		rc = gpcsSetLinkValue(mv_switch_qd_dev_get(), lport, enable);
+		SW_IF_ERROR_STR(rc, "failed to call gpcsSetLinkValue()\n");
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_egr_loopback_get
+*
+* DESCRIPTION:
+*	This API get enabled/disabled state of egress loopback of switch PHY port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable egress loopback.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Data sheet register 0.14 - Loop_back
+*******************************************************************************/
+int mv_phy_port_egr_loopback_get(unsigned int lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtGetPortLoopback(mv_switch_qd_dev_get(), lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortLoopback()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_ingr_loopback_set
+*
+* DESCRIPTION:
+*	This API sets enabled/disabled state of ingress loopback of switch PHY port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable ingress loopback.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Data sheet register FE:28.4, GE:21_2.14  - Loop_back
+********************************************************************************/
+int mv_phy_port_ingr_loopback_set(unsigned int lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtSetPortLineLoopback(mv_switch_qd_dev_get(), lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortLineLoopback()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_ingr_loopback_get
+*
+* DESCRIPTION:
+*	This API gets enabled/disabled state of ingress loopback of switch PHY port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable ingress loopback.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Data sheet register FE:28.4, GE:21_2.14  - Loop_back
+********************************************************************************/
+int mv_phy_port_ingr_loopback_get(unsigned int lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtGetPortLineLoopback(mv_switch_qd_dev_get(), lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortLineLoopback()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_speed_set
+*
+* DESCRIPTION:
+*	This API sets the speed of switch PHY port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable ingress loopback.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+********************************************************************************/
+int mv_phy_port_speed_set(unsigned int lport, GT_PHY_SPEED speed)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtSetPortSpeed(mv_switch_qd_dev_get(), lport, speed);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortSpeed()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_phy_port_duplex_set
+*
+* DESCRIPTION:
+*	This API sets the duplex mode of switch PHY port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	mode   - enable or disable duplex mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+********************************************************************************/
+int mv_phy_port_duplex_set(unsigned int lport, GT_BOOL mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gprtSetPortDuplexMode(mv_switch_qd_dev_get(), lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortDuplexMode()\n");
+
+	return MV_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/mv_phy.h b/drivers/net/ethernet/mvebu_net/switch/mv_phy.h
new file mode 100644
index 000000000000..d24316d78d4f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/mv_phy.h
@@ -0,0 +1,46 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_phy_h__
+#define __mv_phy_h__
+
+
+int mv_phy_port_power_state_set(unsigned int lport, GT_BOOL state);
+int mv_phy_port_power_state_get(unsigned int lport, GT_BOOL *state);
+int mv_phy_port_autoneg_mode_set(unsigned int lport, GT_BOOL autoneg_state, GT_PHY_AUTO_MODE autoneg_mode);
+int mv_phy_port_autoneg_mode_get(unsigned int lport, GT_BOOL *autoneg_state, GT_PHY_AUTO_MODE *autoneg_mode);
+int mv_phy_port_autoneg_restart(unsigned int lport);
+int mv_phy_port_pause_mode_set(unsigned int lport, GT_PHY_PAUSE_MODE state);
+int mv_phy_port_pause_mode_get(unsigned int lport, GT_PHY_PAUSE_MODE *state);
+int mv_phy_port_pause_state_get(unsigned int lport, GT_BOOL *state);
+int mv_phy_port_egr_loopback_set(unsigned int lport, GT_BOOL enable);
+int mv_phy_port_egr_loopback_get(unsigned int lport, GT_BOOL *enable);
+int mv_phy_port_ingr_loopback_set(unsigned int lport, GT_BOOL enable);
+int mv_phy_port_ingr_loopback_get(unsigned int lport, GT_BOOL *enable);
+int mv_phy_port_speed_set(unsigned int lport, GT_PHY_SPEED speed);
+int mv_phy_port_duplex_set(unsigned int lport, GT_BOOL mode);
+#endif /* __mv_phy_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/switch/mv_switch.c b/drivers/net/ethernet/mvebu_net/switch/mv_switch.c
new file mode 100644
index 000000000000..697f0730be9a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/mv_switch.c
@@ -0,0 +1,5556 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/mv_switch.h>
+#include <linux/module.h>
+
+#include "mvOs.h"
+#include "mvSysHwConfig.h"
+#include "eth-phy/mvEthPhy.h"
+#ifdef MV_INCLUDE_ETH_COMPLEX
+#include "ctrlEnv/mvCtrlEthCompLib.h"
+#endif /* MV_INCLUDE_ETH_COMPLEX */
+
+#include "msApi.h"
+#include "h/platform/gtMiiSmiIf.h"
+#include "h/driver/gtHwCntl.h"
+#include "mv_switch.h"
+#include "mv_phy.h"
+#include "mv_mux_netdev.h"
+
+/*void mv_eth_switch_interrupt_clear(void);
+void mv_eth_switch_interrupt_unmask(void);*/
+
+#define MV_SWITCH_DEF_INDEX     0
+#define MV_ETH_PORT_0           0
+#define MV_ETH_PORT_1           1
+
+static u16	db_port_mask[MV_SWITCH_DB_NUM];
+static u16	db_link_mask[MV_SWITCH_DB_NUM];
+static void	*db_cookies[MV_SWITCH_DB_NUM];
+
+static struct sw_port_info_t	sw_port_tbl[MV_SWITCH_MAX_PORT_NUM];
+static struct sw_vlan_info_t	sw_vlan_tbl[MV_SWITCH_MAX_VLAN_NUM];
+
+/* uncomment for debug prints */
+/* #define SWITCH_DEBUG */
+
+#define SWITCH_DBG_OFF      0x0000
+#define SWITCH_DBG_LOAD     0x0001
+#define SWITCH_DBG_MCAST    0x0002
+#define SWITCH_DBG_VLAN     0x0004
+#define SWITCH_DBG_ALL      0xffff
+
+#ifdef SWITCH_DEBUG
+static u32 switch_dbg = 0xffff;
+#define SWITCH_DBG(FLG, X) if ((switch_dbg & (FLG)) == (FLG)) printk X
+#else
+#define SWITCH_DBG(FLG, X)
+#endif /* SWITCH_DEBUG */
+
+static GT_QD_DEV qddev, *qd_dev = NULL;
+static GT_SYS_CONFIG qd_cfg;
+
+static int qd_cpu_port = -1;
+static int enabled_ports_mask;
+static int switch_ports_mask;
+static MV_TAG_TYPE tag_mode;
+static MV_SWITCH_PRESET_TYPE preset;
+static int default_vid;
+static int gbe_port;
+
+static const struct mv_switch_mux_ops *mux_ops;
+static const struct mv_mux_switch_ops switch_ops;
+
+static struct tasklet_struct link_tasklet;
+static int switch_irq = -1;
+int switch_link_poll = 0;
+static struct timer_list switch_link_timer;
+
+static spinlock_t switch_lock;
+
+static unsigned int mv_switch_link_detection_init(struct mv_switch_pdata *plat_data);
+
+
+#ifdef CONFIG_AVANTA_LP
+static GT_BOOL mv_switch_mii_read(GT_QD_DEV *dev, unsigned int phy, unsigned int reg, unsigned int *data)
+{
+	unsigned long flags;
+	MV_U32 result, offset = 0;
+
+	spin_lock_irqsave(&switch_lock, flags);
+
+	offset |= (0x3 << 16);
+	offset |= (phy << 7);
+	offset |= (reg << 2);
+
+	result = MV_REG_READ(offset);
+
+	*data = result;
+
+	spin_unlock_irqrestore(&switch_lock, flags);
+
+	return GT_TRUE;
+}
+#else
+static GT_BOOL mv_switch_mii_read(GT_QD_DEV *dev, unsigned int phy, unsigned int reg, unsigned int *data)
+{
+	unsigned long flags;
+	unsigned short tmp;
+	MV_STATUS status;
+
+	spin_lock_irqsave(&switch_lock, flags);
+	status = mvEthPhyRegRead(phy, reg, &tmp);
+	spin_unlock_irqrestore(&switch_lock, flags);
+	*data = tmp;
+
+	if (status == MV_OK)
+		return GT_TRUE;
+
+	return GT_FALSE;
+}
+#endif
+
+#ifdef CONFIG_AVANTA_LP
+static GT_BOOL mv_switch_mii_write(GT_QD_DEV *dev, unsigned int phy, unsigned int reg, unsigned int data)
+{
+	unsigned long flags;
+	MV_U32 offset = 0;
+
+	spin_lock_irqsave(&switch_lock, flags);
+
+	offset |= (0x3 << 16);
+	offset |= (phy << 7);
+	offset |= (reg << 2);
+
+	MV_REG_WRITE(offset, data);
+
+	spin_unlock_irqrestore(&switch_lock, flags);
+
+	return GT_TRUE;
+}
+#else
+static GT_BOOL mv_switch_mii_write(GT_QD_DEV *dev, unsigned int phy, unsigned int reg, unsigned int data)
+{
+	unsigned long flags;
+	unsigned short tmp;
+	MV_STATUS status;
+
+	spin_lock_irqsave(&switch_lock, flags);
+	tmp = (unsigned short)data;
+	status = mvEthPhyRegWrite(phy, reg, tmp);
+	spin_unlock_irqrestore(&switch_lock, flags);
+
+	if (status == MV_OK)
+		return GT_TRUE;
+
+	return GT_FALSE;
+}
+#endif
+
+static int mv_switch_port_db_get(int port)
+{
+	int db;
+
+	for (db = 0; db < MV_SWITCH_DB_NUM; db++) {
+		if (db_port_mask[db] & (1 << port))
+			return db;
+	}
+
+	return -1;
+}
+
+int mv_switch_default_config_get(MV_TAG_TYPE *tag_mode_val,
+			MV_SWITCH_PRESET_TYPE *preset_val, int *vid_val, int *gbe_port_val)
+{
+	*tag_mode_val = tag_mode;
+	*preset_val = preset;
+	*vid_val = default_vid;
+	*gbe_port_val = gbe_port;
+
+	return 0;
+}
+
+/* return true if db is exist, else return false */
+bool mv_switch_tag_get(int db, MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid, MV_MUX_TAG *tag)
+{
+	unsigned int p, port_mask = db_port_mask[db];
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FALSE, "switch dev qd_dev has not been init!\n");
+
+	if (db_port_mask[db] == 0)
+		return MV_FALSE;
+
+	tag->tag_type = tag_mode;
+
+	if (preset == MV_PRESET_SINGLE_VLAN) {
+		if (tag_mode == MV_TAG_TYPE_MH) {
+			tag->rx_tag_ptrn.mh = MV_16BIT_BE(vid << 12);
+			tag->rx_tag_mask.mh = MV_16BIT_BE(0xf000);
+			tag->tx_tag.mh = MV_16BIT_BE((vid << 12) | port_mask);
+		} else if (tag_mode == MV_TAG_TYPE_DSA) {
+			tag->rx_tag_ptrn.dsa = MV_32BIT_BE(0xc8000000 | MV_SWITCH_GROUP_VLAN_ID(vid));
+			tag->rx_tag_mask.dsa = MV_32BIT_BE(0xff000f00);
+			tag->tx_tag.dsa = MV_32BIT_BE(0xc8000000 | MV_SWITCH_GROUP_VLAN_ID(vid));
+		}
+	} else if (preset == MV_PRESET_PER_PORT_VLAN) {
+		for (p = 0; p < qd_dev->numOfPorts; p++)
+			if (MV_BIT_CHECK(port_mask, p) && (p != qd_cpu_port)) {
+				if (tag_mode == MV_TAG_TYPE_MH) {
+					tag->rx_tag_ptrn.mh = MV_16BIT_BE((vid + p) << 12);
+					tag->rx_tag_mask.mh = MV_16BIT_BE(0xf000);
+					tag->tx_tag.mh = MV_16BIT_BE(((vid + p) << 12) | (1 << p));
+				} else if (tag_mode == MV_TAG_TYPE_DSA) {
+					tag->rx_tag_ptrn.dsa =
+						MV_32BIT_BE(0xc8000000 | MV_SWITCH_GROUP_VLAN_ID(vid + p));
+					tag->rx_tag_mask.dsa = MV_32BIT_BE(0xff000f00);
+					tag->tx_tag.dsa = MV_32BIT_BE(0xc8000000 | MV_SWITCH_GROUP_VLAN_ID(vid + p));
+				}
+			}
+	} /* do nothing if Transparent mode */
+
+	return MV_TRUE;
+}
+
+unsigned int mv_switch_group_map_get(void)
+{
+	unsigned int res = 0, db;
+
+	for (db = 0; db < MV_SWITCH_DB_NUM; db++) {
+		if (db_port_mask[db] != 0)
+			res |= (1 << db);
+	}
+
+	return res;
+}
+
+static int mv_switch_group_state_set(int db, int en)
+{
+	unsigned int p, port_mask = db_port_mask[db];
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	/* enable/disable all ports in group */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (!MV_BIT_CHECK(port_mask, p))
+			continue;
+
+		if (en) {
+			if (gstpSetPortState(qd_dev, p, GT_PORT_FORWARDING) != GT_OK) {
+				printk(KERN_ERR "gstpSetPortState failed\n");
+				return -1;
+			}
+		} else {
+			if (gstpSetPortState(qd_dev, p, GT_PORT_DISABLE) != GT_OK) {
+				printk(KERN_ERR "gstpSetPortState failed\n");
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+int mv_switch_group_restart_autoneg(int db)
+{
+	unsigned int p, port_mask = db_port_mask[db];
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	/* enable/disable all ports in group */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (!MV_BIT_CHECK(port_mask, p))
+			continue;
+
+		if (gprtPortRestartAutoNeg(qd_dev, p) != GT_OK) {
+			printk(KERN_ERR "gprtPortRestartAutoNeg failed\n");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int mv_switch_group_enable(int db)
+{
+	return mv_switch_group_state_set(db, 1);
+}
+
+int mv_switch_group_disable(int db)
+{
+	return mv_switch_group_state_set(db, 0);
+}
+
+int mv_switch_link_status_get(int db)
+{
+	return (db_link_mask[db] > 0);
+}
+
+int mv_switch_mux_ops_set(const struct mv_switch_mux_ops *mux_ops_ptr)
+{
+	mux_ops = mux_ops_ptr;
+
+	return 0;
+}
+
+int mv_switch_group_cookie_set(int db, void *cookie)
+{
+	db_cookies[db] = cookie;
+
+	return 0;
+}
+
+int mv_switch_mac_update(int db, unsigned char *old_mac, unsigned char *new_mac)
+{
+	int err;
+
+	/* remove old mac */
+	err = mv_switch_mac_addr_set(db, old_mac, 0);
+	if (err)
+		return err;
+
+	/* add new mac */
+	err = mv_switch_mac_addr_set(db, new_mac, 1);
+
+	return err;
+}
+int mv_switch_mac_addr_set(int db, unsigned char *mac_addr, unsigned char op)
+{
+	GT_ATU_ENTRY mac_entry;
+	unsigned int ports_mask;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	memset(&mac_entry, 0, sizeof(GT_ATU_ENTRY));
+
+	mac_entry.trunkMember = GT_FALSE;
+	mac_entry.prio = 0;
+	mac_entry.exPrio.useMacFPri = GT_FALSE;
+	mac_entry.exPrio.macFPri = 0;
+	mac_entry.exPrio.macQPri = 0;
+	mac_entry.DBNum = db;
+	memcpy(mac_entry.macAddr.arEther, mac_addr, 6);
+
+	if (is_multicast_ether_addr(mac_addr)) {
+		ports_mask = db_port_mask[db] | (1 << qd_cpu_port);
+		mac_entry.entryState.mcEntryState = GT_MC_STATIC;
+	} else {
+		ports_mask = (1 << qd_cpu_port);
+		mac_entry.entryState.ucEntryState = GT_UC_NO_PRI_STATIC;
+	}
+	mac_entry.portVec = ports_mask;
+
+	if ((op == 0) || (mac_entry.portVec == 0)) {
+		if (gfdbDelAtuEntry(qd_dev, &mac_entry) != GT_OK) {
+			printk(KERN_ERR "gfdbDelAtuEntry failed\n");
+			return -1;
+		}
+	} else {
+		if (gfdbAddMacEntry(qd_dev, &mac_entry) != GT_OK) {
+			printk(KERN_ERR "gfdbAddMacEntry failed\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+int mv_switch_port_based_vlan_set(unsigned int ports_mask, int set_cpu_port)
+{
+	unsigned int p, pl;
+	unsigned char cnt;
+	GT_LPORT port_list[MAX_SWITCH_PORTS];
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(ports_mask, p) && (set_cpu_port || (p != qd_cpu_port))) {
+			SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN,
+				   ("port based vlan, port %d: ", p));
+			for (pl = 0, cnt = 0; pl < qd_dev->numOfPorts; pl++) {
+				if (MV_BIT_CHECK(ports_mask, pl) && (pl != p)) {
+					SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN, ("%d ", pl));
+					port_list[cnt] = pl;
+					cnt++;
+				}
+			}
+			if (gvlnSetPortVlanPorts(qd_dev, p, port_list, cnt) != GT_OK) {
+				printk(KERN_ERR "gvlnSetPortVlanPorts failed\n");
+				return -1;
+			}
+			SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN, ("\n"));
+		}
+	}
+	return 0;
+}
+
+int mv_switch_vlan_in_vtu_set(unsigned short vlan_id, unsigned short db_num, unsigned int ports_mask)
+{
+	GT_VTU_ENTRY vtu_entry;
+	unsigned int p;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	memset(&vtu_entry, 0, sizeof(GT_VTU_ENTRY));
+	vtu_entry.sid = 1;
+	vtu_entry.vid = vlan_id;
+	vtu_entry.DBNum = db_num;
+	vtu_entry.vidPriOverride = GT_FALSE;
+	vtu_entry.vidPriority = 0;
+	vtu_entry.vidExInfo.useVIDFPri = GT_FALSE;
+	vtu_entry.vidExInfo.vidFPri = 0;
+	vtu_entry.vidExInfo.useVIDQPri = GT_FALSE;
+	vtu_entry.vidExInfo.vidQPri = 0;
+	vtu_entry.vidExInfo.vidNRateLimit = GT_FALSE;
+	SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN, ("vtu entry: vid=0x%x, port ", vtu_entry.vid));
+
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(ports_mask, p)) {
+			SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN, ("%d ", p));
+			vtu_entry.vtuData.memberTagP[p] = MEMBER_EGRESS_UNMODIFIED;
+		} else {
+			vtu_entry.vtuData.memberTagP[p] = NOT_A_MEMBER;
+		}
+		vtu_entry.vtuData.portStateP[p] = 0;
+	}
+
+	if (gvtuAddEntry(qd_dev, &vtu_entry) != GT_OK) {
+		printk(KERN_ERR "gvtuAddEntry failed\n");
+		return -1;
+	}
+
+	SWITCH_DBG(SWITCH_DBG_LOAD | SWITCH_DBG_MCAST | SWITCH_DBG_VLAN, ("\n"));
+	return 0;
+}
+
+int mv_switch_atu_db_flush(int db_num)
+{
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	if (gfdbFlushInDB(qd_dev, GT_FLUSH_ALL, db_num) != GT_OK) {
+		printk(KERN_ERR "gfdbFlushInDB failed\n");
+		return -1;
+	}
+	return 0;
+}
+
+int mv_switch_promisc_set(int db, u8 promisc_on)
+{
+	int i;
+	unsigned int ports_mask = db_port_mask[db];
+	int vlan_grp_id = MV_SWITCH_GROUP_VLAN_ID(db);
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	if (promisc_on)
+		ports_mask |= (1 << qd_cpu_port);
+	else
+		ports_mask &= ~(1 << qd_cpu_port);
+
+	mv_switch_port_based_vlan_set(ports_mask, 0);
+
+	for (i = 0; i < qd_dev->numOfPorts; i++) {
+		if (MV_BIT_CHECK(ports_mask, i) && (i != qd_cpu_port)) {
+			if (mv_switch_vlan_in_vtu_set(MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, i),
+						      MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id),
+							ports_mask) != 0) {
+				printk(KERN_ERR "mv_switch_vlan_in_vtu_set failed\n");
+				return -1;
+			}
+		}
+	}
+	db_port_mask[db] = ports_mask;
+
+	return 0;
+}
+
+int mv_switch_vlan_set(u16 vlan_grp_id, u16 port_map)
+{
+	int p;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	/* set port's default private vlan id and database number (DB per group): */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(port_map, p) && (p != qd_cpu_port)) {
+			if (gvlnSetPortVid(qd_dev, p, MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, p)) != GT_OK) {
+				printk(KERN_ERR "gvlnSetPortVid failed\n");
+				return -1;
+			}
+			if (gvlnSetPortVlanDBNum(qd_dev, p, MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id)) != GT_OK) {
+				printk(KERN_ERR "gvlnSetPortVlanDBNum failed\n");
+				return -1;
+			}
+		}
+	}
+
+	/* set port's port-based vlan (CPU port is not part of VLAN) */
+	if (mv_switch_port_based_vlan_set((port_map & ~(1 << qd_cpu_port)), 0) != 0)
+		printk(KERN_ERR "mv_switch_port_based_vlan_set failed\n");
+
+	/* set vtu with group vlan id (used in tx) */
+	if (mv_switch_vlan_in_vtu_set(vlan_grp_id,
+					MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id),
+					 port_map | (1 << qd_cpu_port)) != 0)
+		printk(KERN_ERR "mv_switch_vlan_in_vtu_set failed\n");
+
+	/* set vtu with each port private vlan id (used in rx) */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(port_map, p) && (p != qd_cpu_port)) {
+			if (mv_switch_vlan_in_vtu_set(MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, p),
+						      MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id),
+						      port_map & ~(1 << qd_cpu_port)) != 0) {
+				printk(KERN_ERR "mv_switch_vlan_in_vtu_set failed\n");
+			}
+		}
+	}
+
+	/* update SW vlan DB port mask */
+	db_port_mask[MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id)] = port_map & ~(1 << qd_cpu_port);
+
+	return 0;
+}
+
+void mv_switch_link_update_event(MV_U32 port_mask, int force_link_check)
+{
+	int p, db;
+	unsigned short phy_cause = 0;
+
+	MV_IF_NULL_STR(qd_dev, "switch dev qd_dev has not been init!\n");
+
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(port_mask, p)) {
+			/* this is needed to clear the PHY interrupt */
+			gprtGetPhyIntStatus(qd_dev, p, &phy_cause);
+
+			if (force_link_check)
+				phy_cause |= GT_LINK_STATUS_CHANGED;
+
+			if (phy_cause & GT_LINK_STATUS_CHANGED) {
+				char *link = NULL, *duplex = NULL, *speed = NULL;
+				GT_BOOL flag;
+				GT_PORT_SPEED_MODE speed_mode;
+
+				if (gprtGetLinkState(qd_dev, p, &flag) != GT_OK) {
+					printk(KERN_ERR "gprtGetLinkState failed (port %d)\n", p);
+					link = "ERR";
+				} else
+					link = (flag) ? "up" : "down";
+
+				if (flag) {
+					if (gprtGetDuplex(qd_dev, p, &flag) != GT_OK) {
+						printk(KERN_ERR "gprtGetDuplex failed (port %d)\n", p);
+						duplex = "ERR";
+					} else
+						duplex = (flag) ? "Full" : "Half";
+
+					if (gprtGetSpeedMode(qd_dev, p, &speed_mode) != GT_OK) {
+						printk(KERN_ERR "gprtGetSpeedMode failed (port %d)\n", p);
+						speed = "ERR";
+					} else {
+						if (speed_mode == PORT_SPEED_1000_MBPS)
+							speed = "1000Mbps";
+						else if (speed_mode == PORT_SPEED_100_MBPS)
+							speed = "100Mbps";
+						else
+							speed = "10Mbps";
+					}
+
+					db = mv_switch_port_db_get(p);
+					if (db != -1) {
+						/* link up event for group device (i.e. mux) */
+						if ((db_link_mask[db] == 0) && (mux_ops) && (mux_ops->update_link))
+							mux_ops->update_link(db_cookies[db], 1);
+						db_link_mask[db] |= (1 << p);
+					}
+
+					printk(KERN_ERR "Port %d: Link-%s, %s-duplex, Speed-%s.\n",
+					       p, link, duplex, speed);
+				} else {
+					db = mv_switch_port_db_get(p);
+					if (db != -1) {
+						db_link_mask[db] &= ~(1 << p);
+						/* link down event for group device (i.e. mux) */
+						if ((db_link_mask[db] == 0) && (mux_ops) && (mux_ops->update_link))
+							mux_ops->update_link(db_cookies[db], 0);
+					}
+
+					printk(KERN_ERR "Port %d: Link-down\n", p);
+				}
+			}
+		}
+	}
+}
+
+void mv_switch_link_timer_function(unsigned long data)
+{
+	/* GT_DEV_INT_STATUS devIntStatus; */
+	MV_U32 port_mask = (data & 0xFF);
+
+	mv_switch_link_update_event(port_mask, 0);
+
+	if (switch_link_poll) {
+		switch_link_timer.expires = jiffies + (HZ);	/* 1 second */
+		add_timer(&switch_link_timer);
+	}
+}
+
+static irqreturn_t mv_switch_isr(int irq, void *dev_id)
+{
+	GT_DEV_INT_STATUS devIntStatus;
+	MV_U32 port_mask = 0;
+	GT_U16 swIntStatus;
+	int status;
+
+	/* Check switch interrupt cause */
+	status = eventGetIntStatus(qd_dev, &swIntStatus);
+	if (status != GT_OK) {
+		pr_err("eventGetIntStatus failed: %d\n", status);
+		return IRQ_HANDLED;
+	}
+
+	/* Leave AVB Interrupt to PTP */
+	if (swIntStatus & GT_AVB_INT)
+		return IRQ_NONE;
+
+	if (geventGetDevIntStatus(qd_dev, &devIntStatus) != GT_OK)
+		pr_err("geventGetDevIntStatus failed\n");
+
+	if (devIntStatus.devIntCause & GT_DEV_INT_PHY)
+		port_mask = devIntStatus.phyInt & 0xFF;
+
+	mv_switch_interrupt_mask();
+	tasklet_schedule(&link_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+void mv_switch_tasklet(unsigned long data)
+{
+	GT_DEV_INT_STATUS devIntStatus;
+	MV_U32 port_mask = 0;
+
+	MV_IF_NULL_STR(qd_dev, "switch dev qd_dev has not been init!\n");
+
+	/* TODO: verify that switch interrupt occured */
+
+	if (geventGetDevIntStatus(qd_dev, &devIntStatus) != GT_OK)
+		printk(KERN_ERR "geventGetDevIntStatus failed\n");
+
+	if (devIntStatus.devIntCause & GT_DEV_INT_PHY)
+		port_mask = devIntStatus.phyInt & 0xFF;
+
+	mv_switch_link_update_event(port_mask, 0);
+
+	mv_switch_interrupt_clear();
+
+	mv_switch_interrupt_unmask();
+}
+
+int mv_switch_jumbo_mode_set(int max_size)
+{
+	int i;
+	GT_JUMBO_MODE jumbo_mode;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	/* Set jumbo frames mode */
+	if (max_size <= 1522)
+		jumbo_mode = GT_JUMBO_MODE_1522;
+	else if (max_size <= 2048)
+		jumbo_mode = GT_JUMBO_MODE_2048;
+	else
+		jumbo_mode = GT_JUMBO_MODE_10240;
+
+	for (i = 0; i < qd_dev->numOfPorts; i++) {
+		if (gsysSetJumboMode(qd_dev, i, jumbo_mode) != GT_OK) {
+			printk(KERN_ERR "gsysSetJumboMode %d failed\n", jumbo_mode);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+int mv_switch_load(struct mv_switch_pdata *plat_data)
+{
+	int p;
+	GT_STU_ENTRY	stuEntry;
+
+	printk(KERN_ERR "  o Loading Switch QuarterDeck driver\n");
+
+	if (qd_dev) {
+		printk(KERN_ERR "    o %s: Already initialized\n", __func__);
+		return 0;
+	}
+
+	memset((char *)&qd_cfg, 0, sizeof(GT_SYS_CONFIG));
+	spin_lock_init(&switch_lock);
+
+	/* init config structure for qd package */
+	qd_cfg.BSPFunctions.readMii = mv_switch_mii_read;
+	qd_cfg.BSPFunctions.writeMii = mv_switch_mii_write;
+	qd_cfg.BSPFunctions.semCreate = NULL;
+	qd_cfg.BSPFunctions.semDelete = NULL;
+	qd_cfg.BSPFunctions.semTake = NULL;
+	qd_cfg.BSPFunctions.semGive = NULL;
+	qd_cfg.initPorts = GT_FALSE;
+	qd_cfg.cpuPortNum = plat_data->switch_cpu_port;
+	if (plat_data->smi_scan_mode == 1) {
+		qd_cfg.mode.baseAddr = 0;
+		qd_cfg.mode.scanMode = SMI_MANUAL_MODE;
+	} else if (plat_data->smi_scan_mode == 2) {
+		qd_cfg.mode.scanMode = SMI_MULTI_ADDR_MODE;
+		qd_cfg.mode.baseAddr = plat_data->phy_addr;
+	}
+
+	/* load switch sw package */
+	if (qdLoadDriver(&qd_cfg, &qddev) != GT_OK) {
+		printk(KERN_ERR "qdLoadDriver failed\n");
+		return -1;
+	}
+	qd_dev = &qddev;
+	qd_cpu_port = qd_cfg.cpuPortNum;
+
+	/* WA - Create dummy entry in STU table  */
+	memset(&stuEntry, 0, sizeof(GT_STU_ENTRY));
+	stuEntry.sid = 1; /* required: ((sid > 0) && (sid < 0x3F)) */
+	gstuAddEntry(qd_dev, &stuEntry);
+
+	printk(KERN_ERR "    o Device ID     : 0x%x\n", qd_dev->deviceId);
+	printk(KERN_ERR "    o No. of Ports  : %d\n", qd_dev->numOfPorts);
+	printk(KERN_ERR "    o CPU Port      : %ld\n", qd_dev->cpuPortNum);
+
+	/* disable all disconnected ports */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		/* Do nothing for ports that are not part of the given switch_port_mask */
+		if (!MV_BIT_CHECK(plat_data->port_mask, p))
+			continue;
+
+		/* Force link for ports that connected to GBE ports */
+		if (MV_BIT_CHECK(plat_data->forced_link_port_mask, p)) {
+			/* Switch port connected to GMAC - force link UP - 1000 Full with FC */
+			printk(KERN_ERR "    o Setting Switch Port #%d connected to GMAC port for 1000 Full with FC\n", p);
+			if (gpcsSetForceSpeed(qd_dev, p, PORT_FORCE_SPEED_1000_MBPS) != GT_OK) {
+				printk(KERN_ERR "Force speed 1000mbps - Failed\n");
+				return -1;
+			}
+
+			if ((gpcsSetDpxValue(qd_dev, p, GT_TRUE) != GT_OK) ||
+			    (gpcsSetForcedDpx(qd_dev, p, GT_TRUE) != GT_OK)) {
+				printk(KERN_ERR "Force duplex FULL - Failed\n");
+				return -1;
+			}
+
+			if ((gpcsSetFCValue(qd_dev, p, GT_TRUE) != GT_OK) ||
+			    (gpcsSetForcedFC(qd_dev, p, GT_TRUE) != GT_OK)) {
+				printk(KERN_ERR "Force Flow Control - Failed\n");
+				return -1;
+			}
+
+			if ((gpcsSetLinkValue(qd_dev, p, GT_TRUE) != GT_OK) ||
+			    (gpcsSetForcedLink(qd_dev, p, GT_TRUE) != GT_OK)) {
+				printk(KERN_ERR "Force Link UP - Failed\n");
+				return -1;
+			}
+
+			if (gprtSetPHYDetect(qd_dev, p, GT_FALSE) != GT_OK) {
+				printk(KERN_ERR "gprtSetPHYDetect failed\n");
+				return -1;
+			}
+
+			continue;
+		}
+
+		/* Switch port mapped to connector on the board */
+
+		if ((gpcsSetFCValue(qd_dev, p, GT_FALSE) != GT_OK) ||
+		    (gpcsSetForcedFC(qd_dev, p, GT_FALSE) != GT_OK)) {
+			printk(KERN_ERR "Force Flow Control - Failed\n");
+			return -1;
+		}
+
+		/* Disable switch ports that aren't connected to CPU/PHY */
+		if (!MV_BIT_CHECK(plat_data->connected_port_mask, p)) {
+			printk(KERN_ERR "    o Disable disconnected Switch Port #%d and force link down\n", p);
+			if (gstpSetPortState(qd_dev, p, GT_PORT_DISABLE) != GT_OK) {
+				printk(KERN_ERR "gstpSetPortState failed\n");
+				return -1;
+			}
+			if ((gpcsSetLinkValue(qd_dev, p, GT_FALSE) != GT_OK) ||
+			    (gpcsSetForcedLink(qd_dev, p, GT_TRUE) != GT_OK)) {
+				printk(KERN_ERR "Force Link DOWN - Failed\n");
+				return -1;
+			}
+
+			if (gprtSetPHYDetect(qd_dev, p, GT_FALSE) != GT_OK) {
+				printk(KERN_ERR "gprtSetPHYDetect failed\n");
+				return -1;
+			}
+		}
+	}
+	return 0;
+}
+
+int mv_switch_unload(unsigned int switch_ports_mask)
+{
+	int i;
+
+	printk(KERN_ERR "  o Unloading Switch QuarterDeck driver\n");
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "    o %s: Already un-initialized\n", __func__);
+		return 0;
+	}
+
+	/* Flush all addresses from the MAC address table */
+	/* this also happens in mv_switch_init() but we call it here to clean-up nicely */
+	/* Note: per DB address flush (gfdbFlushInDB) happens when doing ifconfig down on a Switch interface */
+	if (gfdbFlush(qd_dev, GT_FLUSH_ALL) != GT_OK)
+		printk(KERN_ERR "gfdbFlush failed\n");
+
+	/* Reset VLAN tunnel mode */
+	for (i = 0; i < qd_dev->numOfPorts; i++) {
+		if (MV_BIT_CHECK(switch_ports_mask, i) && (i != qd_cpu_port))
+			if (gprtSetVlanTunnel(qd_dev, i, GT_FALSE) != GT_OK)
+				printk(KERN_ERR "gprtSetVlanTunnel failed (port %d)\n", i);
+	}
+
+	/* restore port's default private vlan id and database number to their default values after reset: */
+	for (i = 0; i < qd_dev->numOfPorts; i++) {
+		if (gvlnSetPortVid(qd_dev, i, 0x0001) != GT_OK) { /* that's the default according to the spec */
+			printk(KERN_ERR "gvlnSetPortVid failed\n");
+			return -1;
+		}
+		if (gvlnSetPortVlanDBNum(qd_dev, i, 0) != GT_OK) {
+			printk(KERN_ERR "gvlnSetPortVlanDBNum failed\n");
+			return -1;
+		}
+	}
+
+	/* Port based VLAN */
+	if (mv_switch_port_based_vlan_set(switch_ports_mask, 1))
+		printk(KERN_ERR "mv_switch_port_based_vlan_set failed\n");
+
+	/* Remove all entries from the VTU table */
+	if (gvtuFlush(qd_dev) != GT_OK)
+		printk(KERN_ERR "gvtuFlush failed\n");
+
+	/* unload switch sw package */
+	if (qdUnloadDriver(qd_dev) != GT_OK) {
+		printk(KERN_ERR "qdUnloadDriver failed\n");
+		return -1;
+	}
+	qd_dev = NULL;
+	qd_cpu_port = -1;
+
+	switch_irq = -1;
+	switch_link_poll = 0;
+	del_timer(&switch_link_timer);
+
+	return 0;
+}
+
+int mv_switch_init(struct mv_switch_pdata *plat_data)
+{
+	unsigned int p;
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "%s: qd_dev not initialized, call mv_switch_load() first\n", __func__);
+		return -1;
+	}
+
+	/* general Switch initialization - relevant for all Switch devices */
+	memset(db_port_mask, 0, sizeof(u16) * MV_SWITCH_DB_NUM);
+	memset(db_link_mask, 0, sizeof(u16) * MV_SWITCH_DB_NUM);
+	memset(db_cookies, 0, sizeof(void *) * MV_SWITCH_DB_NUM);
+
+	/* disable all ports */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(plat_data->connected_port_mask, p))
+			if (gstpSetPortState(qd_dev, p, GT_PORT_DISABLE) != GT_OK) {
+				printk(KERN_ERR "gstpSetPortState failed\n");
+				return -1;
+			}
+	}
+
+	/* flush All counters for all ports */
+	if (gstatsFlushAll(qd_dev) != GT_OK)
+		printk(KERN_ERR "gstatsFlushAll failed\n");
+
+	/* set all ports not to unmodify the vlan tag on egress */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(plat_data->connected_port_mask, p)) {
+			if (gprtSetEgressMode(qd_dev, p, GT_UNMODIFY_EGRESS) != GT_OK) {
+				printk(KERN_ERR "gprtSetEgressMode GT_UNMODIFY_EGRESS failed\n");
+				return -1;
+			}
+		}
+	}
+
+	/* initializes the PVT Table (cross-chip port based VLAN) to all one's (initial state) */
+	if (gpvtInitialize(qd_dev) != GT_OK) {
+		printk(KERN_ERR "gpvtInitialize failed\n");
+		return -1;
+	}
+
+	/* set priorities rules */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(plat_data->connected_port_mask, p)) {
+			/* default port priority to queue zero */
+			if (gcosSetPortDefaultTc(qd_dev, p, 0) != GT_OK)
+				printk(KERN_ERR "gcosSetPortDefaultTc failed (port %d)\n", p);
+
+			/* enable IP TOS Prio */
+			if (gqosIpPrioMapEn(qd_dev, p, GT_TRUE) != GT_OK)
+				printk(KERN_ERR "gqosIpPrioMapEn failed (port %d)\n", p);
+
+			/* set IP QoS */
+			if (gqosSetPrioMapRule(qd_dev, p, GT_FALSE) != GT_OK)
+				printk(KERN_ERR "gqosSetPrioMapRule failed (port %d)\n", p);
+
+			/* disable Vlan QoS Prio */
+			if (gqosUserPrioMapEn(qd_dev, p, GT_FALSE) != GT_OK)
+				printk(KERN_ERR "gqosUserPrioMapEn failed (port %d)\n", p);
+		}
+	}
+
+	if (gfdbFlush(qd_dev, GT_FLUSH_ALL) != GT_OK)
+		printk(KERN_ERR "gfdbFlush failed\n");
+
+	mv_switch_link_detection_init(plat_data);
+
+	/* Enable Jumbo support by default */
+	mv_switch_jumbo_mode_set(9180);
+
+	/* Configure Ethernet related LEDs, currently according to Switch ID */
+	switch (qd_dev->deviceId) {
+	case GT_88E6161:
+	case GT_88E6165:
+	case GT_88E6171:
+	case GT_88E6172:
+	case GT_88E6176:
+	case GT_88E6351:
+	case GT_88E6352:
+		break;		/* do nothing */
+
+	default:
+		for (p = 0; p < qd_dev->numOfPorts; p++) {
+			if ((p != qd_cpu_port) &&
+				MV_BIT_CHECK(plat_data->connected_port_mask, p)) {
+				if (gprtSetPhyReg(qd_dev, p, 22, 0x1FFA)) {
+					/* Configure Register 22 LED0 to 0xA for Link/Act */
+					printk(KERN_ERR "gprtSetPhyReg failed (port=%d)\n", p);
+				}
+			}
+		}
+		break;
+	}
+
+	/* Configure speed between GBE port and CPU port */
+	gprtSet200Base(qd_dev, qd_cpu_port, plat_data->is_speed_2000);
+
+	switch_ports_mask = plat_data->connected_port_mask;
+	tag_mode = plat_data->tag_mode;
+	preset = plat_data->preset;
+	default_vid = plat_data->vid;
+	gbe_port = plat_data->gbe_port;
+
+	enabled_ports_mask = switch_ports_mask;
+
+	mv_mux_switch_ops_set(&switch_ops);
+	mv_mux_switch_attach(gbe_port, preset, default_vid, tag_mode, plat_data->switch_cpu_port);
+
+
+#ifdef SWITCH_DEBUG
+	/* for debug: */
+	mv_switch_status_print();
+#endif
+
+	return 0;
+}
+
+int mv_switch_preset_init(MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid)
+{
+	unsigned int p;
+	unsigned char cnt;
+	GT_LPORT port_list[MAX_SWITCH_PORTS];
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	printk(KERN_INFO "Switch driver init:\n");
+	switch (preset) {
+	case MV_PRESET_TRANSPARENT:
+		printk(KERN_INFO "    o preset mode = Transparent\n");
+		break;
+	case MV_PRESET_SINGLE_VLAN:
+		printk(KERN_INFO "    o preset mode = Single Vlan\n");
+		break;
+	case MV_PRESET_PER_PORT_VLAN:
+		printk(KERN_INFO "    o preset mode = Vlan Per Port\n");
+		break;
+	default:
+		printk(KERN_INFO "    o preset mode = Unknown\n");
+	}
+
+	switch (tag_mode) {
+	case MV_TAG_TYPE_MH:
+		printk(KERN_INFO "    o tag mode    = Marvell Header\n");
+		break;
+	case MV_TAG_TYPE_DSA:
+		printk(KERN_INFO "    o tag mode    = DSA Tag\n");
+		break;
+	case MV_TAG_TYPE_NONE:
+		printk(KERN_INFO "    o tag mode    = No Tag\n");
+		break;
+	default:
+		printk(KERN_INFO "Unknown\n");
+
+	}
+
+	/* set all ports to work in Normal mode */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(switch_ports_mask, p)) {
+			if (gprtSetFrameMode(qd_dev, p, GT_FRAME_MODE_NORMAL) != GT_OK) {
+				printk(KERN_ERR "gprtSetFrameMode GT_FRAME_MODE_NORMAL failed\n");
+				return -1;
+			}
+		}
+	}
+
+	/* set Header Mode in all ports to False */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(switch_ports_mask, p)) {
+			if (gprtSetHeaderMode(qd_dev, p, GT_FALSE) != GT_OK) {
+				printk(KERN_ERR "gprtSetHeaderMode GT_FALSE failed\n");
+				return -1;
+			}
+		}
+	}
+
+	/* cpu port specific intialization */
+	if (preset != MV_PRESET_TRANSPARENT) {
+		/* set tag mode for CPU port */
+		if ((tag_mode == MV_TAG_TYPE_MH) &&
+				(gprtSetHeaderMode(qd_dev, qd_cpu_port, GT_TRUE) != GT_OK)) {
+			printk(KERN_ERR "gprtSetHeaderMode GT_TRUE failed\n");
+			return -1;
+		} else if ((tag_mode == MV_TAG_TYPE_DSA) &&
+				(gprtSetFrameMode(qd_dev, qd_cpu_port, GT_FRAME_MODE_DSA) != GT_OK)) {
+			printk(KERN_ERR "gprtSetFrameMode GT_TRUE failed\n");
+			return -1;
+		}
+
+		/* set cpu-port with port-based vlan to all other ports */
+		SWITCH_DBG(SWITCH_DBG_LOAD, ("cpu port-based vlan:"));
+		for (p = 0, cnt = 0; p < qd_dev->numOfPorts; p++) {
+			if (p != qd_cpu_port) {
+				SWITCH_DBG(SWITCH_DBG_LOAD, ("%d ", p));
+				port_list[cnt] = p;
+				cnt++;
+			}
+		}
+		SWITCH_DBG(SWITCH_DBG_LOAD, ("\n"));
+		if (gvlnSetPortVlanPorts(qd_dev, qd_cpu_port, port_list, cnt) != GT_OK) {
+			printk(KERN_ERR "gvlnSetPortVlanPorts failed\n");
+			return -1;
+		}
+	}
+
+	/* The switch CPU port is not part of the VLAN, but rather connected by tunneling to each */
+	/* of the VLAN's ports. Our MAC addr will be added during start operation to the VLAN DB  */
+	/* at switch level to forward packets with this DA to CPU port.                           */
+	SWITCH_DBG(SWITCH_DBG_LOAD, ("Enabling Tunneling on ports: "));
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(switch_ports_mask, p) &&
+				((p != qd_cpu_port) || (preset == MV_PRESET_TRANSPARENT))) {
+			if (gprtSetVlanTunnel(qd_dev, p, GT_TRUE) != GT_OK) {
+				printk(KERN_ERR "gprtSetVlanTunnel failed (port %d)\n", p);
+				return -1;
+			} else {
+				SWITCH_DBG(SWITCH_DBG_LOAD, ("%d ", p));
+			}
+		}
+	}
+	SWITCH_DBG(SWITCH_DBG_LOAD, ("\n"));
+
+	/* split ports to vlans according to preset */
+	if (preset == MV_PRESET_SINGLE_VLAN) {
+		mv_switch_vlan_set(MV_SWITCH_GROUP_VLAN_ID(vid), switch_ports_mask);
+	} else if (preset == MV_PRESET_PER_PORT_VLAN) {
+		for (p = 0; p < qd_dev->numOfPorts; p++)
+			if (MV_BIT_CHECK(switch_ports_mask, p) && (p != qd_cpu_port))
+				mv_switch_vlan_set(MV_SWITCH_GROUP_VLAN_ID(vid + p), (1 << p));
+	}
+
+	if (preset == MV_PRESET_TRANSPARENT) {
+		/* enable all relevant ports (ports connected to the MAC or external ports) */
+		for (p = 0; p < qd_dev->numOfPorts; p++) {
+			if (!MV_BIT_CHECK(switch_ports_mask, p))
+				continue;
+
+			if (gstpSetPortState(qd_dev, p, GT_PORT_FORWARDING) != GT_OK) {
+				printk(KERN_ERR "gstpSetPortState failed\n");
+				return -1;
+			}
+		}
+	} else {
+		/* enable cpu port */
+		if (gstpSetPortState(qd_dev, qd_cpu_port, GT_PORT_FORWARDING) != GT_OK) {
+			printk(KERN_ERR "gstpSetPortState failed\n");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+void mv_switch_interrupt_mask(void)
+{
+#ifdef CONFIG_AVANTA_LP
+	MV_U32 reg;
+
+	reg = MV_REG_READ(0x18954/*MV_ETHCOMP_INT_MAIN_MASK_REG*/);
+
+	reg &= ~0x1/*MV_ETHCOMP_SWITCH_INT_MASK*/;
+
+	MV_REG_WRITE(0x18954/*MV_ETHCOMP_INT_MAIN_MASK_REG*/, reg);
+#endif
+}
+
+void mv_switch_interrupt_unmask(void)
+{
+#ifdef CONFIG_AVANTA_LP
+	MV_U32 reg;
+
+	reg = MV_REG_READ(0x18954/*MV_ETHCOMP_INT_MAIN_MASK_REG*/);
+
+	reg |= 0x1/*MV_ETHCOMP_SWITCH_INT_MASK*/;
+
+	MV_REG_WRITE(0x18954/*MV_ETHCOMP_INT_MAIN_MASK_REG*/, reg);
+#endif
+}
+
+void mv_switch_interrupt_clear(void)
+{
+#ifdef CONFIG_AVANTA_LP
+	MV_U32 reg;
+
+	reg = MV_REG_READ(0x18950/*MV_ETHCOMP_INT_MAIN_CAUSE_REG*/);
+
+	reg &= ~0x1/*MV_ETHCOMP_SWITCH_INT_MASK*/;
+
+	MV_REG_WRITE(0x18950/*MV_ETHCOMP_INT_MAIN_CAUSE_REG*/, reg);
+#endif
+}
+
+static unsigned int mv_switch_link_detection_init(struct mv_switch_pdata *plat_data)
+{
+	unsigned int p;
+	static int link_init_done = 0;
+	unsigned int connected_phys_mask = 0;
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "%s: qd_dev not initialized, call mv_switch_load() first\n", __func__);
+		return 0;
+	}
+
+	switch_irq = plat_data->switch_irq;
+
+	connected_phys_mask = plat_data->connected_port_mask & ~(1 << qd_cpu_port);
+
+	if (!link_init_done) {
+		/* Enable Phy Link Status Changed interrupt at Phy level for the all enabled ports */
+		for (p = 0; p < qd_dev->numOfPorts; p++) {
+			if (MV_BIT_CHECK(connected_phys_mask, p) &&
+					(!MV_BIT_CHECK(plat_data->forced_link_port_mask, p))) {
+				if (gprtPhyIntEnable(qd_dev, p, (GT_LINK_STATUS_CHANGED)) != GT_OK)
+					printk(KERN_ERR "gprtPhyIntEnable failed port %d\n", p);
+			}
+		}
+
+		if (switch_irq != -1) {
+			/* Interrupt supported */
+
+			if ((qd_dev->deviceId == GT_88E6161) || (qd_dev->deviceId == GT_88E6165) ||
+			    (qd_dev->deviceId == GT_88E6351) || (qd_dev->deviceId == GT_88E6171) ||
+			    (qd_dev->deviceId == GT_88E6352)) {
+				GT_DEV_EVENT gt_event = { GT_DEV_INT_PHY, 0, connected_phys_mask };
+
+				if (eventSetDevInt(qd_dev, &gt_event) != GT_OK)
+					printk(KERN_ERR "eventSetDevInt failed\n");
+
+				if (eventSetActive(qd_dev, GT_DEVICE_INT) != GT_OK)
+					printk(KERN_ERR "eventSetActive failed\n");
+			} else {
+				if (eventSetActive(qd_dev, GT_PHY_INTERRUPT) != GT_OK)
+					printk(KERN_ERR "eventSetActive failed\n");
+			}
+		}
+	}
+
+	if (!link_init_done) {
+		/* we want to use a timer for polling link status if no interrupt is available for all or some of the PHYs */
+		if (switch_irq == -1) {
+			/* Use timer for polling */
+			switch_link_poll = 1;
+			init_timer(&switch_link_timer);
+			switch_link_timer.function = mv_switch_link_timer_function;
+
+			if (switch_irq == -1)
+				switch_link_timer.data = connected_phys_mask;
+
+			switch_link_timer.expires = jiffies + (HZ);	/* 1 second */
+			add_timer(&switch_link_timer);
+		} else {
+			/* create tasklet for interrupt handling */
+			tasklet_init(&link_tasklet, mv_switch_tasklet, 0);
+
+			/* Interrupt supported */
+			if (request_irq(switch_irq, mv_switch_isr, IRQF_DISABLED | IRQF_SHARED,
+							"switch", plat_data))
+				printk(KERN_ERR "failed to assign irq%d\n", switch_irq);
+
+			/* interrupt unmasking will be done by GW manager */
+		}
+	}
+
+	link_init_done = 1;
+
+	return connected_phys_mask;
+
+}
+
+int mv_switch_tos_get(unsigned char tos)
+{
+	unsigned char queue;
+	int rc;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	rc = gcosGetDscp2Tc(qd_dev, tos >> 2, &queue);
+	if (rc)
+		return -1;
+
+	return (int)queue;
+}
+
+int mv_switch_tos_set(unsigned char tos, int rxq)
+{
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	return gcosSetDscp2Tc(qd_dev, tos >> 2, (unsigned char)rxq);
+}
+
+int mv_switch_get_free_buffers_num(void)
+{
+	MV_U16 regVal;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	if (gsysGetFreeQSize(qd_dev, &regVal) != GT_OK) {
+		printk(KERN_ERR "gsysGetFreeQSize - FAILED\n");
+		return -1;
+	}
+
+	return regVal;
+}
+
+#define QD_FMT "%10lu %10lu %10lu %10lu %10lu %10lu %10lu\n"
+#define QD_CNT_CORRECT(c, f, p) (GT_U32)(c[p]->f - history_counters[p].f)
+#define QD_STAT_FMT "%10u %10u %10u %10u %10u %10u %10u\n"
+#define QD_STAT_CORRECT(c, f, p) (GT_U16)(c[p]->f - history_stats[p].f)
+
+#define QD_MAX 7
+void mv_switch_stats_print(void)
+{
+	static GT_STATS_COUNTER_SET3 history_counters[QD_MAX] = {0};
+	static GT_PORT_STAT2 history_stats[QD_MAX] = {0};
+	GT_STATS_COUNTER_SET3 * counters[QD_MAX];
+	GT_PORT_STAT2 * port_stats[QD_MAX];
+
+	int p;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return;
+	}
+
+	for (p = 0; p < QD_MAX; p++) {
+		counters[p] = (GT_STATS_COUNTER_SET3 *)mvOsMalloc(sizeof(GT_STATS_COUNTER_SET3));
+		port_stats[p] = (GT_PORT_STAT2 *)mvOsMalloc(sizeof(GT_PORT_STAT2));
+		mvOsMemset(counters[p], 0, sizeof(GT_STATS_COUNTER_SET3));
+		mvOsMemset(port_stats[p], 0, sizeof(GT_PORT_STAT2));
+	}
+
+	pr_err("Total free buffers:      %u\n\n", mv_switch_get_free_buffers_num());
+
+	for (p = 0; p < QD_MAX; p++) {
+		if (gstatsGetPortAllCounters3(qd_dev, p, counters[p]) != GT_OK)
+			pr_err("gstatsGetPortAllCounters3 for port #%d - FAILED\n", p);
+
+		if (gprtGetPortCtr2(qd_dev, p, port_stats[p]) != GT_OK)
+			pr_err("gprtGetPortCtr2 for port #%d - FAILED\n", p);
+	}
+
+	pr_err("PortNum         " QD_FMT, (GT_U32) 0, (GT_U32) 1, (GT_U32) 2, (GT_U32) 3, (GT_U32) 4, (GT_U32) 5,
+	       (GT_U32) 6);
+	pr_err("-----------------------------------------------------------------------------------------------\n");
+	pr_err("InGoodOctetsLo  " QD_FMT,
+		QD_CNT_CORRECT(counters, InGoodOctetsLo, 0), QD_CNT_CORRECT(counters, InGoodOctetsLo, 1),
+		QD_CNT_CORRECT(counters, InGoodOctetsLo, 2), QD_CNT_CORRECT(counters, InGoodOctetsLo, 3),
+		QD_CNT_CORRECT(counters, InGoodOctetsLo, 4), QD_CNT_CORRECT(counters, InGoodOctetsLo, 5),
+		QD_CNT_CORRECT(counters, InGoodOctetsLo, 6));
+	pr_err("InGoodOctetsHi  " QD_FMT,
+		QD_CNT_CORRECT(counters, InGoodOctetsHi, 0), QD_CNT_CORRECT(counters, InGoodOctetsHi, 1),
+		QD_CNT_CORRECT(counters, InGoodOctetsHi, 2), QD_CNT_CORRECT(counters, InGoodOctetsHi, 3),
+		QD_CNT_CORRECT(counters, InGoodOctetsHi, 4), QD_CNT_CORRECT(counters, InGoodOctetsHi, 5),
+		QD_CNT_CORRECT(counters, InGoodOctetsHi, 6));
+	pr_err("InBadOctets     " QD_FMT,
+		QD_CNT_CORRECT(counters, InBadOctets, 0), QD_CNT_CORRECT(counters, InBadOctets, 1),
+		QD_CNT_CORRECT(counters, InBadOctets, 2), QD_CNT_CORRECT(counters, InBadOctets, 3),
+		QD_CNT_CORRECT(counters, InBadOctets, 4), QD_CNT_CORRECT(counters, InBadOctets, 5),
+		QD_CNT_CORRECT(counters, InBadOctets, 6));
+	pr_err("InUnicasts      " QD_FMT,
+		QD_CNT_CORRECT(counters, InUnicasts, 0), QD_CNT_CORRECT(counters, InUnicasts, 1),
+		QD_CNT_CORRECT(counters, InUnicasts, 2), QD_CNT_CORRECT(counters, InUnicasts, 3),
+		QD_CNT_CORRECT(counters, InUnicasts, 4), QD_CNT_CORRECT(counters, InUnicasts, 5),
+		QD_CNT_CORRECT(counters, InUnicasts, 6));
+	pr_err("InBroadcasts    " QD_FMT,
+		QD_CNT_CORRECT(counters, InBroadcasts, 0), QD_CNT_CORRECT(counters, InBroadcasts, 1),
+		QD_CNT_CORRECT(counters, InBroadcasts, 2), QD_CNT_CORRECT(counters, InBroadcasts, 3),
+		QD_CNT_CORRECT(counters, InBroadcasts, 4), QD_CNT_CORRECT(counters, InBroadcasts, 5),
+		QD_CNT_CORRECT(counters, InBroadcasts, 6));
+	pr_err("InMulticasts    " QD_FMT,
+		QD_CNT_CORRECT(counters, InMulticasts, 0), QD_CNT_CORRECT(counters, InMulticasts, 1),
+		QD_CNT_CORRECT(counters, InMulticasts, 2), QD_CNT_CORRECT(counters, InMulticasts, 3),
+		QD_CNT_CORRECT(counters, InMulticasts, 4), QD_CNT_CORRECT(counters, InMulticasts, 5),
+		QD_CNT_CORRECT(counters, InMulticasts, 6));
+	pr_err("inDiscardLo     " QD_FMT,
+		QD_STAT_CORRECT(port_stats, inDiscardLo, 0), QD_STAT_CORRECT(port_stats, inDiscardLo, 1),
+		QD_STAT_CORRECT(port_stats, inDiscardLo, 2), QD_STAT_CORRECT(port_stats, inDiscardLo, 3),
+		QD_STAT_CORRECT(port_stats, inDiscardLo, 4), QD_STAT_CORRECT(port_stats, inDiscardLo, 5),
+		QD_STAT_CORRECT(port_stats, inDiscardLo, 6));
+	pr_err("inDiscardHi     " QD_FMT,
+		QD_STAT_CORRECT(port_stats, inDiscardHi, 0), QD_STAT_CORRECT(port_stats, inDiscardHi, 1),
+		QD_STAT_CORRECT(port_stats, inDiscardHi, 2), QD_STAT_CORRECT(port_stats, inDiscardHi, 3),
+		QD_STAT_CORRECT(port_stats, inDiscardHi, 4), QD_STAT_CORRECT(port_stats, inDiscardHi, 5),
+		QD_STAT_CORRECT(port_stats, inDiscardHi, 6));
+	pr_err("inFiltered      " QD_FMT,
+		QD_STAT_CORRECT(port_stats, inFiltered, 0), QD_STAT_CORRECT(port_stats, inFiltered, 1),
+		QD_STAT_CORRECT(port_stats, inFiltered, 2), QD_STAT_CORRECT(port_stats, inFiltered, 3),
+		QD_STAT_CORRECT(port_stats, inFiltered, 4), QD_STAT_CORRECT(port_stats, inFiltered, 5),
+		QD_STAT_CORRECT(port_stats, inFiltered, 6));
+	pr_err("OutOctetsLo     " QD_FMT,
+		QD_CNT_CORRECT(counters, OutOctetsLo, 0), QD_CNT_CORRECT(counters, OutOctetsLo, 1),
+		QD_CNT_CORRECT(counters, OutOctetsLo, 2), QD_CNT_CORRECT(counters, OutOctetsLo, 3),
+		QD_CNT_CORRECT(counters, OutOctetsLo, 4), QD_CNT_CORRECT(counters, OutOctetsLo, 5),
+		QD_CNT_CORRECT(counters, OutOctetsLo, 6));
+	pr_err("OutOctetsHi     " QD_FMT,
+		QD_CNT_CORRECT(counters, OutOctetsHi, 0), QD_CNT_CORRECT(counters, OutOctetsHi, 1),
+		QD_CNT_CORRECT(counters, OutOctetsHi, 2), QD_CNT_CORRECT(counters, OutOctetsHi, 3),
+		QD_CNT_CORRECT(counters, OutOctetsHi, 4), QD_CNT_CORRECT(counters, OutOctetsHi, 5),
+		QD_CNT_CORRECT(counters, OutOctetsHi, 6));
+	pr_err("OutUnicasts     " QD_FMT,
+		QD_CNT_CORRECT(counters, OutUnicasts, 0), QD_CNT_CORRECT(counters, OutUnicasts, 1),
+		QD_CNT_CORRECT(counters, OutUnicasts, 2), QD_CNT_CORRECT(counters, OutUnicasts, 3),
+		QD_CNT_CORRECT(counters, OutUnicasts, 4), QD_CNT_CORRECT(counters, OutUnicasts, 5),
+		QD_CNT_CORRECT(counters, OutUnicasts, 6));
+	pr_err("OutMulticasts   " QD_FMT,
+		QD_CNT_CORRECT(counters, OutMulticasts, 0), QD_CNT_CORRECT(counters, OutMulticasts, 1),
+		QD_CNT_CORRECT(counters, OutMulticasts, 2), QD_CNT_CORRECT(counters, OutMulticasts, 3),
+		QD_CNT_CORRECT(counters, OutMulticasts, 4), QD_CNT_CORRECT(counters, OutMulticasts, 5),
+		QD_CNT_CORRECT(counters, OutMulticasts, 6));
+	pr_err("OutBroadcasts   " QD_FMT,
+		QD_CNT_CORRECT(counters, OutBroadcasts, 0), QD_CNT_CORRECT(counters, OutBroadcasts, 1),
+		QD_CNT_CORRECT(counters, OutBroadcasts, 2), QD_CNT_CORRECT(counters, OutBroadcasts, 3),
+		QD_CNT_CORRECT(counters, OutBroadcasts, 4), QD_CNT_CORRECT(counters, OutBroadcasts, 5),
+		QD_CNT_CORRECT(counters, OutBroadcasts, 6));
+	pr_err("outFiltered     " QD_FMT,
+		QD_STAT_CORRECT(port_stats, outFiltered, 0), QD_STAT_CORRECT(port_stats, outFiltered, 1),
+		QD_STAT_CORRECT(port_stats, outFiltered, 2), QD_STAT_CORRECT(port_stats, outFiltered, 3),
+		QD_STAT_CORRECT(port_stats, outFiltered, 4), QD_STAT_CORRECT(port_stats, outFiltered, 5),
+		QD_STAT_CORRECT(port_stats, outFiltered, 6));
+
+	pr_err("OutPause        " QD_FMT,
+		QD_CNT_CORRECT(counters, OutPause, 0), QD_CNT_CORRECT(counters, OutPause, 1),
+		QD_CNT_CORRECT(counters, OutPause, 2), QD_CNT_CORRECT(counters, OutPause, 3),
+		QD_CNT_CORRECT(counters, OutPause, 4), QD_CNT_CORRECT(counters, OutPause, 5),
+		QD_CNT_CORRECT(counters, OutPause, 6));
+	pr_err("InPause         " QD_FMT,
+		QD_CNT_CORRECT(counters, InPause, 0), QD_CNT_CORRECT(counters, InPause, 1),
+		QD_CNT_CORRECT(counters, InPause, 2), QD_CNT_CORRECT(counters, InPause, 3),
+		QD_CNT_CORRECT(counters, InPause, 4), QD_CNT_CORRECT(counters, InPause, 5),
+		QD_CNT_CORRECT(counters, InPause, 6));
+
+	pr_err("Octets64        " QD_FMT,
+		QD_CNT_CORRECT(counters, Octets64, 0), QD_CNT_CORRECT(counters, Octets64, 1),
+		QD_CNT_CORRECT(counters, Octets64, 2), QD_CNT_CORRECT(counters, Octets64, 3),
+		QD_CNT_CORRECT(counters, Octets64, 4), QD_CNT_CORRECT(counters, Octets64, 5),
+		QD_CNT_CORRECT(counters, Octets64, 6));
+	pr_err("Octets127       " QD_FMT,
+		QD_CNT_CORRECT(counters, Octets127, 0), QD_CNT_CORRECT(counters, Octets127, 1),
+		QD_CNT_CORRECT(counters, Octets127, 2), QD_CNT_CORRECT(counters, Octets127, 3),
+		QD_CNT_CORRECT(counters, Octets127, 4), QD_CNT_CORRECT(counters, Octets127, 5),
+		QD_CNT_CORRECT(counters, Octets127, 6));
+	pr_err("Octets255       " QD_FMT,
+		QD_CNT_CORRECT(counters, Octets255, 0), QD_CNT_CORRECT(counters, Octets255, 1),
+		QD_CNT_CORRECT(counters, Octets255, 2), QD_CNT_CORRECT(counters, Octets255, 3),
+		QD_CNT_CORRECT(counters, Octets255, 4), QD_CNT_CORRECT(counters, Octets255, 5),
+		QD_CNT_CORRECT(counters, Octets255, 6));
+	pr_err("Octets511       " QD_FMT,
+		QD_CNT_CORRECT(counters, Octets511, 0), QD_CNT_CORRECT(counters, Octets511, 1),
+		QD_CNT_CORRECT(counters, Octets511, 2), QD_CNT_CORRECT(counters, Octets511, 3),
+		QD_CNT_CORRECT(counters, Octets511, 4), QD_CNT_CORRECT(counters, Octets511, 5),
+		QD_CNT_CORRECT(counters, Octets511, 6));
+	pr_err("Octets1023      " QD_FMT,
+		QD_CNT_CORRECT(counters, Octets1023, 0), QD_CNT_CORRECT(counters, Octets1023, 1),
+		QD_CNT_CORRECT(counters, Octets1023, 2), QD_CNT_CORRECT(counters, Octets1023, 3),
+		QD_CNT_CORRECT(counters, Octets1023, 4), QD_CNT_CORRECT(counters, Octets1023, 5),
+		QD_CNT_CORRECT(counters, Octets1023, 6));
+	pr_err("OctetsMax       " QD_FMT,
+		QD_CNT_CORRECT(counters, OctetsMax, 0), QD_CNT_CORRECT(counters, OctetsMax, 1),
+		QD_CNT_CORRECT(counters, OctetsMax, 2), QD_CNT_CORRECT(counters, OctetsMax, 3),
+		QD_CNT_CORRECT(counters, OctetsMax, 4), QD_CNT_CORRECT(counters, OctetsMax, 5),
+		QD_CNT_CORRECT(counters, OctetsMax, 6));
+	pr_err("Excessive       " QD_FMT,
+		QD_CNT_CORRECT(counters, Excessive, 0), QD_CNT_CORRECT(counters, Excessive, 1),
+		QD_CNT_CORRECT(counters, Excessive, 2), QD_CNT_CORRECT(counters, Excessive, 3),
+		QD_CNT_CORRECT(counters, Excessive, 4), QD_CNT_CORRECT(counters, Excessive, 5),
+		QD_CNT_CORRECT(counters, Excessive, 6));
+	pr_err("Single          " QD_FMT,
+		QD_CNT_CORRECT(counters, Single, 0), QD_CNT_CORRECT(counters, Single, 1),
+		QD_CNT_CORRECT(counters, Single, 2), QD_CNT_CORRECT(counters, Single, 3),
+		QD_CNT_CORRECT(counters, Single, 4), QD_CNT_CORRECT(counters, Single, 5),
+		QD_CNT_CORRECT(counters, Single, 6));
+	pr_err("Multiple        " QD_FMT,
+		QD_CNT_CORRECT(counters, Multiple, 0), QD_CNT_CORRECT(counters, Multiple, 1),
+		QD_CNT_CORRECT(counters, Multiple, 2), QD_CNT_CORRECT(counters, Multiple, 3),
+		QD_CNT_CORRECT(counters, Multiple, 4), QD_CNT_CORRECT(counters, Multiple, 5),
+		QD_CNT_CORRECT(counters, Multiple, 6));
+	pr_err("Undersize       " QD_FMT,
+		QD_CNT_CORRECT(counters, Undersize, 0), QD_CNT_CORRECT(counters, Undersize, 1),
+		QD_CNT_CORRECT(counters, Undersize, 2), QD_CNT_CORRECT(counters, Undersize, 3),
+		QD_CNT_CORRECT(counters, Undersize, 4), QD_CNT_CORRECT(counters, Undersize, 5),
+		QD_CNT_CORRECT(counters, Undersize, 6));
+	pr_err("Fragments       " QD_FMT,
+		QD_CNT_CORRECT(counters, Fragments, 0), QD_CNT_CORRECT(counters, Fragments, 1),
+		QD_CNT_CORRECT(counters, Fragments, 2), QD_CNT_CORRECT(counters, Fragments, 3),
+		QD_CNT_CORRECT(counters, Fragments, 4), QD_CNT_CORRECT(counters, Fragments, 5),
+		QD_CNT_CORRECT(counters, Fragments, 6));
+	pr_err("Oversize        " QD_FMT,
+		QD_CNT_CORRECT(counters, Oversize, 0), QD_CNT_CORRECT(counters, Oversize, 1),
+		QD_CNT_CORRECT(counters, Oversize, 2), QD_CNT_CORRECT(counters, Oversize, 3),
+		QD_CNT_CORRECT(counters, Oversize, 4), QD_CNT_CORRECT(counters, Oversize, 5),
+		QD_CNT_CORRECT(counters, Oversize, 6));
+	pr_err("Jabber          " QD_FMT,
+		QD_CNT_CORRECT(counters, Jabber, 0), QD_CNT_CORRECT(counters, Jabber, 1),
+		QD_CNT_CORRECT(counters, Jabber, 2), QD_CNT_CORRECT(counters, Jabber, 3),
+		QD_CNT_CORRECT(counters, Jabber, 4), QD_CNT_CORRECT(counters, Jabber, 5),
+		QD_CNT_CORRECT(counters, Jabber, 6));
+	pr_err("InMACRcvErr     " QD_FMT,
+		QD_CNT_CORRECT(counters, InMACRcvErr, 0), QD_CNT_CORRECT(counters, InMACRcvErr, 1),
+		QD_CNT_CORRECT(counters, InMACRcvErr, 2), QD_CNT_CORRECT(counters, InMACRcvErr, 3),
+		QD_CNT_CORRECT(counters, InMACRcvErr, 4), QD_CNT_CORRECT(counters, InMACRcvErr, 5),
+		QD_CNT_CORRECT(counters, InMACRcvErr, 6));
+	pr_err("InFCSErr        " QD_FMT,
+		QD_CNT_CORRECT(counters, InFCSErr, 0), QD_CNT_CORRECT(counters, InFCSErr, 1),
+		QD_CNT_CORRECT(counters, InFCSErr, 2), QD_CNT_CORRECT(counters, InFCSErr, 3),
+		QD_CNT_CORRECT(counters, InFCSErr, 4), QD_CNT_CORRECT(counters, InFCSErr, 5),
+		QD_CNT_CORRECT(counters, InFCSErr, 6));
+	pr_err("Collisions      " QD_FMT,
+		QD_CNT_CORRECT(counters, Collisions, 0), QD_CNT_CORRECT(counters, Collisions, 1),
+		QD_CNT_CORRECT(counters, Collisions, 2), QD_CNT_CORRECT(counters, Collisions, 3),
+		QD_CNT_CORRECT(counters, Collisions, 4), QD_CNT_CORRECT(counters, Collisions, 5),
+		QD_CNT_CORRECT(counters, Collisions, 6));
+	pr_err("Late            " QD_FMT,
+		QD_CNT_CORRECT(counters, Late, 0), QD_CNT_CORRECT(counters, Late, 1),
+		QD_CNT_CORRECT(counters, Late, 2), QD_CNT_CORRECT(counters, Late, 3),
+		QD_CNT_CORRECT(counters, Late, 4), QD_CNT_CORRECT(counters, Late, 5),
+		QD_CNT_CORRECT(counters, Late, 6));
+	pr_err("OutFCSErr       " QD_FMT,
+		QD_CNT_CORRECT(counters, OutFCSErr, 0), QD_CNT_CORRECT(counters, OutFCSErr, 1),
+		QD_CNT_CORRECT(counters, OutFCSErr, 2), QD_CNT_CORRECT(counters, OutFCSErr, 3),
+		QD_CNT_CORRECT(counters, OutFCSErr, 4), QD_CNT_CORRECT(counters, OutFCSErr, 5),
+		QD_CNT_CORRECT(counters, OutFCSErr, 6));
+	pr_err("Deferred        " QD_FMT,
+		QD_CNT_CORRECT(counters, Deferred, 0), QD_CNT_CORRECT(counters, Deferred, 1),
+		QD_CNT_CORRECT(counters, Deferred, 2), QD_CNT_CORRECT(counters, Deferred, 3),
+		QD_CNT_CORRECT(counters, Deferred, 4), QD_CNT_CORRECT(counters, Deferred, 5),
+		QD_CNT_CORRECT(counters, Deferred, 6));
+
+	/*gstatsFlushAll(qd_dev);*/	/*remove this line for FlushAll operation may cause StatusBusy bit stuck*/
+
+	for (p = 0; p < QD_MAX; p++) {
+		memmove(&history_counters[p], counters[p], sizeof(GT_STATS_COUNTER_SET3));
+		memmove(&history_stats[p], port_stats[p], sizeof(GT_PORT_STAT2));
+		mvOsFree(counters[p]);
+		mvOsFree(port_stats[p]);
+	}
+}
+
+static char *mv_str_port_state(GT_PORT_STP_STATE state)
+{
+	switch (state) {
+	case GT_PORT_DISABLE:
+		return "Disable";
+	case GT_PORT_BLOCKING:
+		return "Blocking";
+	case GT_PORT_LEARNING:
+		return "Learning";
+	case GT_PORT_FORWARDING:
+		return "Forwarding";
+	default:
+		return "Invalid";
+	}
+}
+
+static char *mv_str_speed_state(int port)
+{
+	GT_PORT_SPEED_MODE speed;
+	char *speed_str;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return "ERR";
+	}
+	if (gprtGetSpeedMode(qd_dev, port, &speed) != GT_OK) {
+		printk(KERN_ERR "gprtGetSpeedMode failed (port %d)\n", port);
+		speed_str = "ERR";
+	} else {
+		if (speed == PORT_SPEED_1000_MBPS)
+			speed_str = "1 Gbps";
+		else if (speed == PORT_SPEED_100_MBPS)
+			speed_str = "100 Mbps";
+		else
+			speed_str = "10 Mbps";
+	}
+	return speed_str;
+}
+
+static char *mv_str_duplex_state(int port)
+{
+	GT_BOOL duplex;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return "ERR";
+	}
+	if (gprtGetDuplex(qd_dev, port, &duplex) != GT_OK) {
+		printk(KERN_ERR "gprtGetDuplex failed (port %d)\n", port);
+		return "ERR";
+	} else
+		return (duplex) ? "Full" : "Half";
+}
+
+static char *mv_str_link_state(int port)
+{
+	GT_BOOL link;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return "ERR";
+	}
+	if (gprtGetLinkState(qd_dev, port, &link) != GT_OK) {
+		printk(KERN_ERR "gprtGetLinkState failed (port %d)\n", port);
+		return "ERR";
+	} else
+		return (link) ? "Up" : "Down";
+}
+
+static char *mv_str_pause_state(int port)
+{
+	GT_BOOL force, pause;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return "ERR";
+	}
+	if (gpcsGetForcedFC(qd_dev, port, &force) != GT_OK) {
+		printk(KERN_ERR "gpcsGetForcedFC failed (port %d)\n", port);
+		return "ERR";
+	}
+	if (force) {
+		if (gpcsGetFCValue(qd_dev, port, &pause) != GT_OK) {
+			printk(KERN_ERR "gpcsGetFCValue failed (port %d)\n", port);
+			return "ERR";
+		}
+	} else {
+		if (gprtGetPauseEn(qd_dev, port, &pause) != GT_OK) {
+			printk(KERN_ERR "gprtGetPauseEn failed (port %d)\n", port);
+			return "ERR";
+		}
+	}
+	return (pause) ? "Enable" : "Disable";
+}
+
+static char *mv_str_egress_mode(GT_EGRESS_MODE mode)
+{
+	switch (mode) {
+	case GT_UNMODIFY_EGRESS:
+		return "Unmodify";
+	case GT_UNTAGGED_EGRESS:
+		return "Untagged";
+	case GT_TAGGED_EGRESS:
+		return "Tagged";
+	case GT_ADD_TAG:
+		return "Add Tag";
+	default:
+		return "Invalid";
+	}
+}
+
+static char *mv_str_frame_mode(GT_FRAME_MODE mode)
+{
+	switch (mode) {
+	case GT_FRAME_MODE_NORMAL:
+		return "Normal";
+	case GT_FRAME_MODE_DSA:
+		return "DSA";
+	case GT_FRAME_MODE_PROVIDER:
+		return "Provider";
+	case GT_FRAME_MODE_ETHER_TYPE_DSA:
+		return "EtherType DSA";
+	default:
+		return "Invalid";
+	}
+}
+
+static char *mv_str_header_mode(GT_BOOL mode)
+{
+	switch (mode) {
+	case GT_FALSE:
+		return "False";
+	case GT_TRUE:
+		return "True";
+	default:
+		return "Invalid";
+	}
+}
+
+void mv_switch_status_print(void)
+{
+	int p, i;
+	GT_PORT_STP_STATE port_state = -1;
+	GT_EGRESS_MODE egress_mode = -1;
+	GT_FRAME_MODE frame_mode = -1;
+	GT_BOOL header_mode = -1;
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "Switch is not initialized\n");
+		return;
+	}
+	printk(KERN_ERR "Printing Switch Status:\n");
+
+	for (i = 0; i < MV_SWITCH_DB_NUM; i++)
+		if (db_port_mask[i])
+			printk(KERN_ERR "%d: %d\n", i, db_port_mask[i]);
+
+	printk(KERN_ERR "Port   State     Link   Duplex   Speed    Pause     Egress     Frame    Header\n");
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+
+		if (gstpGetPortState(qd_dev, p, &port_state) != GT_OK)
+			printk(KERN_ERR "gstpGetPortState failed\n");
+
+		if (gprtGetEgressMode(qd_dev, p, &egress_mode) != GT_OK)
+			printk(KERN_ERR "gprtGetEgressMode failed\n");
+
+		if (gprtGetFrameMode(qd_dev, p, &frame_mode) != GT_OK)
+			printk(KERN_ERR "gprtGetFrameMode failed\n");
+
+		if (gprtGetHeaderMode(qd_dev, p, &header_mode) != GT_OK)
+			printk(KERN_ERR "gprtGetHeaderMode failed\n");
+
+		printk(KERN_ERR "%2d, %10s,  %4s,  %4s,  %8s,  %7s,  %s,  %s,  %s\n",
+		       p, mv_str_port_state(port_state), mv_str_link_state(p),
+		       mv_str_duplex_state(p), mv_str_speed_state(p), mv_str_pause_state(p),
+		       mv_str_egress_mode(egress_mode), mv_str_frame_mode(frame_mode), mv_str_header_mode(header_mode));
+	}
+}
+
+#ifdef CONFIG_MV_SW_PTP
+int mv_switch_ptp_reg_read(int port, int reg, MV_U16 *value)
+{
+	GT_STATUS status;
+	GT_U32 val;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return 1;
+	}
+
+	status = gptpGetReg(qd_dev, port, reg, &val);
+	if (status != GT_OK) {
+		pr_err("Failed reading PTP register.\n");
+		return status;
+	}
+	*value = (MV_U16)val;
+	return 0;
+}
+
+int mv_switch_ptp_reg_write(int port, int reg, MV_U16 value)
+{
+	GT_STATUS status;
+
+	if (qd_dev == NULL) {
+		pr_err("Switch is not initialized\n");
+		return 1;
+	}
+
+	status = gptpSetReg(qd_dev, port, reg, (MV_U32)value);
+	if (status != GT_OK) {
+		pr_err("Failed reading PTP register.\n");
+		return status;
+	}
+	return 0;
+}
+#endif
+
+int mv_switch_reg_read(int port, int reg, int type, MV_U16 *value)
+{
+	GT_STATUS status;
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "Switch is not initialized\n");
+		return 1;
+	}
+
+	switch (type) {
+	case MV_SWITCH_PHY_ACCESS:
+		status = gprtGetPhyReg(qd_dev, port, reg, value);
+		break;
+
+	case MV_SWITCH_PORT_ACCESS:
+		status = gprtGetSwitchReg(qd_dev, port, reg, value);
+		break;
+
+	case MV_SWITCH_GLOBAL_ACCESS:
+		status = gprtGetGlobalReg(qd_dev, reg, value);
+		break;
+
+	case MV_SWITCH_GLOBAL2_ACCESS:
+		status = gprtGetGlobal2Reg(qd_dev, reg, value);
+		break;
+
+	case MV_SWITCH_SMI_ACCESS:
+		/* port means phyAddr */
+		status = miiSmiIfReadRegister(qd_dev, port, reg, value);
+		break;
+
+	default:
+		printk(KERN_ERR "%s Failed: Unexpected access type %d\n", __func__, type);
+		return 1;
+	}
+	if (status != GT_OK) {
+		printk(KERN_ERR "%s Failed: status = %d\n", __func__, status);
+		return 2;
+	}
+	return 0;
+}
+
+int mv_switch_reg_write(int port, int reg, int type, MV_U16 value)
+{
+	GT_STATUS status;
+
+	if (qd_dev == NULL) {
+		printk(KERN_ERR "Switch is not initialized\n");
+		return 1;
+	}
+
+	switch (type) {
+	case MV_SWITCH_PHY_ACCESS:
+		status = gprtSetPhyReg(qd_dev, port, reg, value);
+		break;
+
+	case MV_SWITCH_PORT_ACCESS:
+		status = gprtSetSwitchReg(qd_dev, port, reg, value);
+		break;
+
+	case MV_SWITCH_GLOBAL_ACCESS:
+		status = gprtSetGlobalReg(qd_dev, reg, value);
+		break;
+
+	case MV_SWITCH_GLOBAL2_ACCESS:
+		status = gprtSetGlobal2Reg(qd_dev, reg, value);
+		break;
+
+	case MV_SWITCH_SMI_ACCESS:
+		/* port means phyAddr */
+		status = miiSmiIfWriteRegister(qd_dev, port, reg, value);
+		break;
+
+	default:
+		printk(KERN_ERR "%s Failed: Unexpected access type %d\n", __func__, type);
+		return 1;
+	}
+	if (status != GT_OK) {
+		printk(KERN_ERR "%s Failed: status = %d\n", __func__, status);
+		return 2;
+	}
+	return 0;
+}
+
+int mv_switch_all_multicasts_del(int db_num)
+{
+	GT_STATUS status = GT_OK;
+	GT_ATU_ENTRY atu_entry;
+	GT_U8 mc_mac[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+	GT_U8 bc_mac[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+	memcpy(atu_entry.macAddr.arEther, &mc_mac, 6);
+	atu_entry.DBNum = db_num;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+
+	while ((status = gfdbGetAtuEntryNext(qd_dev, &atu_entry)) == GT_OK) {
+
+		/* Delete only Mcast addresses */
+		if (!is_multicast_ether_addr(atu_entry.macAddr.arEther))
+			continue;
+
+		/* we don't want to delete the broadcast entry which is the last one */
+		if (memcmp(atu_entry.macAddr.arEther, &bc_mac, 6) == 0)
+			break;
+
+		SWITCH_DBG(SWITCH_DBG_MCAST, ("Deleting ATU Entry: db = %d, MAC = %02X:%02X:%02X:%02X:%02X:%02X\n",
+					      atu_entry.DBNum, atu_entry.macAddr.arEther[0],
+					      atu_entry.macAddr.arEther[1], atu_entry.macAddr.arEther[2],
+					      atu_entry.macAddr.arEther[3], atu_entry.macAddr.arEther[4],
+					      atu_entry.macAddr.arEther[5]));
+
+		if (gfdbDelAtuEntry(qd_dev, &atu_entry) != GT_OK) {
+			printk(KERN_ERR "gfdbDelAtuEntry failed\n");
+			return -1;
+		}
+		memcpy(atu_entry.macAddr.arEther, &mc_mac, 6);
+		atu_entry.DBNum = db_num;
+	}
+
+	return 0;
+}
+
+int mv_switch_port_add(int switch_port, u16 grp_id)
+{
+	int p;
+	u16 port_map, vlan_grp_id;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+	if (!MV_BIT_CHECK(switch_ports_mask, switch_port)) {
+		printk(KERN_ERR "%s: switch port %d is not connected to PHY/CPU\n", __func__, switch_port);
+		return -1;
+	}
+
+	if (MV_BIT_CHECK(enabled_ports_mask, switch_port)) {
+		printk(KERN_ERR "%s: switch port %d is already enabled\n", __func__, switch_port);
+		return -1;
+	}
+
+	vlan_grp_id = MV_SWITCH_GROUP_VLAN_ID(grp_id);
+	/* Add port to port mask of VLAN group */
+	port_map = db_port_mask[grp_id] | (1 << switch_port);
+
+	/* Set default VLAN_ID for port */
+	if (gvlnSetPortVid(qd_dev, switch_port, MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, switch_port)) != GT_OK) {
+		printk(KERN_ERR "gvlnSetPortVid failed\n");
+		return -1;
+	}
+	/* Map port to VLAN DB */
+	if (gvlnSetPortVlanDBNum(qd_dev, switch_port, grp_id) != GT_OK) {
+		printk(KERN_ERR "gvlnSetPortVlanDBNum failed\n");
+		return -1;
+	}
+
+	/* Add port to the VLAN (CPU port is not part of VLAN) */
+	if (mv_switch_port_based_vlan_set((port_map & ~(1 << qd_cpu_port)), 0) != 0)
+		printk(KERN_ERR "mv_switch_port_based_vlan_set failed\n");
+
+	/* Add port to vtu (used in tx) */
+	if (mv_switch_vlan_in_vtu_set(vlan_grp_id, grp_id, (port_map | (1 << qd_cpu_port))))
+		printk(KERN_ERR "mv_switch_vlan_in_vtu_set failed\n");
+
+	/* set vtu with each port private vlan id (used in rx) */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(port_map, p) && (p != qd_cpu_port)) {
+			if (mv_switch_vlan_in_vtu_set(MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, p),
+						      grp_id, port_map & ~(1 << qd_cpu_port)) != 0) {
+				printk(KERN_ERR "mv_switch_vlan_in_vtu_set failed\n");
+			}
+		}
+	}
+
+	/* Enable port */
+	if (gstpSetPortState(qd_dev, switch_port, GT_PORT_FORWARDING) != GT_OK)
+		printk(KERN_ERR "gstpSetPortState failed\n");
+
+	/* Enable Phy Link Status Changed interrupt at Phy level for the port */
+	if (gprtPhyIntEnable(qd_dev, switch_port, (GT_LINK_STATUS_CHANGED)) != GT_OK)
+		printk(KERN_ERR "gprtPhyIntEnable failed port %d\n", switch_port);
+
+	db_port_mask[grp_id] = port_map;
+	enabled_ports_mask |= (1 << switch_port);
+	/* TODO if new mux */
+
+	return 0;
+}
+
+int mv_switch_port_del(int switch_port)
+{
+	int p;
+	u16 port_map, vlan_grp_id, grp_id;
+
+	MV_IF_NULL_RET_STR(qd_dev, -1, "switch dev qd_dev has not been init!\n");
+	if (!MV_BIT_CHECK(switch_ports_mask, switch_port)) {
+		printk(KERN_ERR "%s: switch port %d is not connected to PHY/CPU\n", __func__, switch_port);
+		return -1;
+	}
+
+	if (!MV_BIT_CHECK(enabled_ports_mask, switch_port)) {
+		printk(KERN_ERR "%s: switch port %d is already disabled\n", __func__, switch_port);
+		return -1;
+	}
+
+	/* Search for port's DB number */
+	for (grp_id = 0; grp_id < MV_SWITCH_DB_NUM; grp_id++)
+		if (db_port_mask[grp_id] & (1 << switch_port))
+			break;
+
+	if (grp_id == MV_SWITCH_DB_NUM) {
+		printk(KERN_ERR "%s: couldn't find port %d VLAN group\n", __func__, switch_port);
+		return -1;
+	}
+
+	vlan_grp_id = MV_SWITCH_GROUP_VLAN_ID(grp_id);
+	/* Add port to port mask of VLAN group */
+	port_map = db_port_mask[grp_id] & ~(1 << switch_port);
+
+	/* Disable link change interrupts on unmapped port */
+	if (gprtPhyIntEnable(qd_dev, switch_port, 0) != GT_OK)
+		printk(KERN_ERR "gprtPhyIntEnable failed on port #%d\n", switch_port);
+
+	/* Disable unmapped port */
+	if (gstpSetPortState(qd_dev, switch_port, GT_PORT_DISABLE) != GT_OK)
+		printk(KERN_ERR "gstpSetPortState failed on port #%d\n", switch_port);
+
+	/* Remove port from the VLAN (CPU port is not part of VLAN) */
+	if (mv_switch_port_based_vlan_set((port_map & ~(1 << qd_cpu_port)), 0) != 0)
+		printk(KERN_ERR "mv_gtw_set_port_based_vlan failed\n");
+
+	/* Remove port from vtu (used in tx) */
+	if (mv_switch_vlan_in_vtu_set(vlan_grp_id, MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id),
+				      (port_map | (1 << qd_cpu_port))) != 0) {
+		printk(KERN_ERR "mv_gtw_set_vlan_in_vtu failed\n");
+	}
+
+	/* Remove port from vtu of each port private vlan id (used in rx) */
+	for (p = 0; p < qd_dev->numOfPorts; p++) {
+		if (MV_BIT_CHECK(port_map, p) && (p != qd_cpu_port)) {
+			if (mv_switch_vlan_in_vtu_set(MV_SWITCH_PORT_VLAN_ID(vlan_grp_id, p),
+						      MV_SWITCH_VLAN_TO_GROUP(vlan_grp_id),
+						      (port_map & ~(1 << qd_cpu_port))) != 0)
+				printk(KERN_ERR "mv_gtw_set_vlan_in_vtu failed\n");
+		}
+	}
+
+	db_port_mask[grp_id] = port_map;
+	enabled_ports_mask &= ~(1 << switch_port);
+
+	return 0;
+}
+
+/*******************************************************************************
+* mv_switch_port_discard_tag_set
+*
+* DESCRIPTION:
+*	The API allows or drops all tagged packets based on logical port.
+*
+* INPUTS:
+*	lport - logical port.
+*	mode  - discard tag mode.
+*		GT_TRUE = discard tagged packets per lport
+*		GT_FALSE = allow tagged packets per lport.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_discard_tag_set(unsigned int lport, GT_BOOL mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetDiscardTagged(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetDiscardTagged()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_discard_tag_get
+*
+* DESCRIPTION:
+*	This routine gets discard tagged bit for given lport.
+*
+* INPUTS:
+*	lport - logical port.
+*
+* OUTPUTS:
+*	mode  - discard tag mode.
+*		GT_TRUE = discard tagged packets per lport
+*		GT_FALSE = allow tagged packets per lport.
+*
+* RETURNS:
+*       On success -  TPM_RC_OK.
+*       On error different types are returned according to the case - see tpm_error_code_t.
+*******************************************************************************/
+int mv_switch_port_discard_tag_get(unsigned int lport, GT_BOOL *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetDiscardTagged(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetDiscardTagged()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_discard_untag_set
+*
+* DESCRIPTION:
+*	The API allows or drops all untagged packets based on logical port.
+*
+* INPUTS:
+*	lport - logical port.
+*	mode  - discard untag mode.
+*		GT_TRUE = discard untagged packets per lport
+*		GT_FALSE = allow untagged packets per lport.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_discard_untag_set(unsigned int lport, GT_BOOL mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetDiscardUntagged(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetDiscardUntagged()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_discard_untag_get
+*
+* DESCRIPTION:
+*	This routine gets discard untagged bit for given lport.
+*
+* INPUTS:
+*	lport - logical port.
+*
+* OUTPUTS:
+*	mode  - discard untag mode.
+*		GT_TRUE = undiscard tagged packets per lport
+*		GT_FALSE = unallow tagged packets per lport.
+*
+* RETURNS:
+*       On success -  TPM_RC_OK.
+*       On error different types are returned according to the case - see tpm_error_code_t.
+*******************************************************************************/
+int mv_switch_port_discard_untag_get(unsigned int lport, GT_BOOL *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetDiscardUntagged(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetDiscardUntagged()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_def_vid_set
+*
+* DESCRIPTION:
+*	The API sets port default vlan ID.
+*
+* INPUTS:
+*	lport - logical port.
+*	vid   - port default VLAN ID.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_def_vid_set(unsigned int lport, unsigned short vid)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnSetPortVid(qd_dev, lport, vid);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnSetPortVid()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_def_vid_get
+*
+* DESCRIPTION:
+*	The API gets port default vlan ID.
+*
+* INPUTS:
+*	lport - logical port.
+*
+* OUTPUTS:
+*	vid   - port default VLAN ID.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_def_vid_get(unsigned int lport, unsigned short *vid)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnGetPortVid(qd_dev, lport, vid);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnGetPortVid()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_def_pri_set
+*
+* DESCRIPTION:
+*	The API sets port default priority.
+*
+* INPUTS:
+*	lport - logical port.
+*	pri   - the port priority.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_def_pri_set(unsigned int lport, unsigned char pri)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gcosSetPortDefaultTc(qd_dev, lport, pri);
+	SW_IF_ERROR_STR(rc, "failed to call gcosSetPortDefaultTc()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_def_pri_get
+*
+* DESCRIPTION:
+*	The API gets port default priority.
+*
+* INPUTS:
+*	lport - logical port.
+*
+* OUTPUTS:
+*	pri   - the port priority.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_def_pri_get(unsigned int lport, unsigned char *pri)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gcosGetPortDefaultTc(qd_dev, lport, pri);
+	SW_IF_ERROR_STR(rc, "failed to call gcosGetPortDefaultTc()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_vtu_entry_find
+*
+* DESCRIPTION:
+*	The API find expected VTU entry in sw_vlan_tbl.
+*
+* INPUTS:
+*	vtu_entry - VTU entry, supply VID.
+*
+* OUTPUTS:
+*	found     - find expected entry or not.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_vtu_entry_find(GT_VTU_ENTRY *vtu_entry, GT_BOOL *found)
+{
+	SW_IF_NULL(vtu_entry);
+	SW_IF_NULL(found);
+
+	if (sw_vlan_tbl[vtu_entry->vid].port_bm) {/* if port members is not 0, this vid entry exist in HW andd SW */
+		memcpy(vtu_entry, &sw_vlan_tbl[vtu_entry->vid].vtu_entry, sizeof(GT_VTU_ENTRY));
+		*found = MV_TRUE;
+	} else {
+		*found = MV_FALSE;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_vtu_entry_save
+*
+* DESCRIPTION:
+*	The API store expected VTU entry in sw_vlan_tbl.
+*
+* INPUTS:
+*	vtu_entry - VTU entry.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_vtu_entry_save(GT_VTU_ENTRY *vtu_entry)
+{
+	SW_IF_NULL(vtu_entry);
+
+	memcpy(&sw_vlan_tbl[vtu_entry->vid].vtu_entry, vtu_entry, sizeof(GT_VTU_ENTRY));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_vid_add
+*
+* DESCRIPTION:
+*	The API adds a VID.
+*
+* INPUTS:
+*	lport     - logical switch port ID.
+*	vid       - VLAN ID.
+*	egr_mode  - egress mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_vid_add(unsigned int lport, unsigned short vid, unsigned char egr_mode)
+{
+	GT_VTU_ENTRY vtu_entry;
+	unsigned int found = GT_FALSE;
+	unsigned int port;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(&vtu_entry, 0, sizeof(GT_VTU_ENTRY));
+
+	/* Find existed VTU entry in SW cache */
+	vtu_entry.vid = vid;
+	rc = mv_switch_vtu_entry_find(&vtu_entry, &found);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_find()\n");
+
+	/* Add new VTU entry in case VTU entry does not exist */
+	if (found == GT_FALSE) {
+		vtu_entry.DBNum				= 0;
+		vtu_entry.vid				= vid;
+		vtu_entry.vidPriOverride		= GT_FALSE;
+		vtu_entry.vidPriority			= 0;
+		vtu_entry.vidExInfo.useVIDFPri		= GT_FALSE;
+		vtu_entry.vidExInfo.vidFPri		= 0;
+		vtu_entry.vidExInfo.useVIDQPri		= GT_FALSE;
+		vtu_entry.vidExInfo.vidQPri		= 0;
+		vtu_entry.vidExInfo.vidNRateLimit	= GT_FALSE;
+
+	}
+
+	/* Update VTU entry */
+	for (port = 0; port < qd_dev->numOfPorts; port++) {
+		if (sw_vlan_tbl[vid].port_bm & (1 << port)) {
+			if (port == lport)
+				vtu_entry.vtuData.memberTagP[port] = egr_mode;/* update egress mode only */
+			else
+				vtu_entry.vtuData.memberTagP[port] = sw_vlan_tbl[vid].egr_mode[port];
+		} else if (port == lport) {
+			vtu_entry.vtuData.memberTagP[port] = egr_mode;
+		} else if ((sw_port_tbl[port].port_mode == GT_FALLBACK) || (qd_dev->cpuPortNum == port)) {
+			/* add cpu_port to VLAN if cpu_port is valid */
+			vtu_entry.vtuData.memberTagP[port] = MEMBER_EGRESS_UNMODIFIED;
+		} else {
+			vtu_entry.vtuData.memberTagP[port] = NOT_A_MEMBER;
+		}
+	}
+
+	/* Add/Update HW VTU entry */
+	rc = gvtuAddEntry(qd_dev, &vtu_entry);
+	SW_IF_ERROR_STR(rc, "failed to call gvtuAddEntry()\n");
+
+	/* Record HW VTU entry info to sw_vlan_tbl */
+	rc = mv_switch_vtu_entry_save(&vtu_entry);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_save()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vid_add
+*
+* DESCRIPTION:
+*	The API adds a VID per lport.
+*
+* INPUTS:
+*	lport     - logical switch port ID.
+*	vid       - VLAN ID.
+*	egr_mode  - egress mode.
+*	belong    - whether this port blong to the VLAN actually
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vid_add(unsigned int lport, unsigned short vid, unsigned char egr_mode, bool belong)
+{
+	GT_STATUS rc = GT_OK;
+	unsigned int  port_bm;
+	unsigned int port_idx;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Verify whether the port is already a member of this VID */
+	port_bm = (unsigned int)(1 << lport);
+	if (!(sw_vlan_tbl[vid].port_bm & port_bm) || (sw_vlan_tbl[vid].egr_mode[lport] != egr_mode)) {
+		rc = mv_switch_vid_add(lport, vid, egr_mode);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_vid_add()\n");
+
+		/* add port to vid's member bit map and set port egress mode */
+		sw_vlan_tbl[vid].port_bm |= port_bm;
+		sw_vlan_tbl[vid].egr_mode[lport] = egr_mode;
+	}
+
+	/* add fallback port or CPU port to this VLAN in DB */
+	for (port_idx = 0; port_idx < qd_dev->numOfPorts; port_idx++) {
+		/* If a VLAN has been defined (there is a member in the VLAN) and
+		   the specified port is not a member */
+		if (!(sw_vlan_tbl[vid].port_bm & (1 << port_idx)) &&
+		     ((sw_port_tbl[port_idx].port_mode == GT_FALLBACK) ||
+		      (port_idx == qd_dev->cpuPortNum))) {
+			sw_vlan_tbl[vid].port_bm |= (1 << port_idx);
+			sw_vlan_tbl[vid].egr_mode[port_idx] = MEMBER_EGRESS_UNMODIFIED;
+		}
+	}
+
+	/* Add the specified port to the SW Port table */
+	if ((true == belong) && (sw_port_tbl[lport].vlan_blong[vid] == MV_SWITCH_PORT_NOT_BELONG))
+		sw_port_tbl[lport].vlan_blong[vid] = MV_SWITCH_PORT_BELONG;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vid_del
+*
+* DESCRIPTION:
+*	The API delete existed VID per lport.
+*
+* INPUTS:
+*	lport     - logical switch port ID.
+*	vid       - VLAN ID.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vid_del(unsigned int lport, unsigned short vid)
+{
+	unsigned int port_bm;
+	GT_VTU_ENTRY vtu_entry;
+	unsigned int found = GT_TRUE;
+	unsigned int port_idx;
+	unsigned int is_vlan_member = 0;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Do nothing if the port does not in this VLAN */
+	port_bm = (unsigned int)(1 << lport);
+	if (!(sw_vlan_tbl[vid].port_bm & port_bm)) {
+		pr_err("%s(%d) port(%d) is not in VLAN(%d)\n", __func__, __LINE__, lport, vid);
+		return MV_OK;
+	}
+
+	/* Find VTU entry in SW cache */
+	memset(&vtu_entry, 0, sizeof(GT_VTU_ENTRY));
+	vtu_entry.vid = vid;
+	rc = mv_switch_vtu_entry_find(&vtu_entry, &found);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_find()\n");
+
+	/* Step 1. Mark the lport as NOT_A_MEMBER. */
+	vtu_entry.vtuData.memberTagP[lport] = NOT_A_MEMBER;
+
+	/* 2. Search whether a secure port is a member of the VLAN */
+	for (port_idx = 0; port_idx < qd_dev->numOfPorts; port_idx++) {
+		if ((vtu_entry.vtuData.memberTagP[port_idx] != NOT_A_MEMBER) &&
+			((sw_port_tbl[port_idx].port_mode == GT_SECURE) ||
+			(sw_port_tbl[port_idx].vlan_blong[vid] == MV_SWITCH_PORT_BELONG))) {
+			is_vlan_member = 1;
+			break;
+	    }
+	}
+
+	/* Update the VTU entry */
+	if (is_vlan_member) {
+		/* Add HW VTU entry */
+		rc = gvtuAddEntry(qd_dev, &vtu_entry);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_find()\n");
+
+		/* Record HW VTU entry info to sw_vlan_tbl */
+		rc = mv_switch_vtu_entry_save(&vtu_entry);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_save()\n");
+
+		/* Delete port from VID DB */
+		sw_vlan_tbl[vid].port_bm &= ~port_bm;
+	} else {
+		/* Delete the VTU entry */
+		rc = gvtuDelEntry(qd_dev, &vtu_entry);
+		SW_IF_ERROR_STR(rc, "failed to call gvtuDelEntry()\n");
+
+		sw_vlan_tbl[vid].port_bm = 0;
+	}
+
+	if (sw_port_tbl[lport].vlan_blong[vid] == MV_SWITCH_PORT_BELONG) {
+		sw_port_tbl[lport].vlan_blong[vid] = MV_SWITCH_PORT_NOT_BELONG;
+		sw_vlan_tbl[vid].egr_mode[lport] = NOT_A_MEMBER;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_vid_get
+*
+* DESCRIPTION:
+*	The API get VID information.
+*
+* INPUTS:
+*	vid       - VLAN ID.
+*
+* OUTPUTS:
+*	vtu_entry - VTU entry.
+*	found     - MV_TRUE, if the appropriate entry exists.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_vid_get(unsigned int vid, GT_VTU_ENTRY *vtu_entry, unsigned int *found)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(vtu_entry, 0, sizeof(GT_VTU_ENTRY));
+	vtu_entry->vid = vid;
+	rc = gvtuFindVidEntry(qd_dev, vtu_entry, found);
+	SW_IF_ERROR_STR(rc, "failed to call gvtuFindVidEntry()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vid_egress_mode_set
+*
+* DESCRIPTION:
+*	The API sets the egress mode for a member port of a vlan.
+*
+* INPUTS:
+*	lport    - logical switch port ID.
+*       vid      - vlan id.
+*       egr_mode - egress mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*       MEMBER_EGRESS_UNMODIFIED - 0
+*       NOT_A_MEMBER             - 1
+*       MEMBER_EGRESS_UNTAGGED   - 2
+*       MEMBER_EGRESS_TAGGED     - 3
+*
+*******************************************************************************/
+int mv_switch_port_vid_egress_mode_set(unsigned int lport, unsigned short vid, unsigned char egr_mode)
+{
+	GT_STATUS    rc = GT_OK;
+	GT_VTU_ENTRY vtu_entry;
+	GT_BOOL      found = GT_FALSE;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* If the port is the member of vlan, set */
+	if (sw_vlan_tbl[vid].port_bm & (1 << lport)) {
+		sw_vlan_tbl[vid].egr_mode[lport] = egr_mode;
+
+		memset(&vtu_entry, 0, sizeof(GT_VTU_ENTRY));
+		vtu_entry.vid = vid;
+
+		rc = mv_switch_vtu_entry_find(&vtu_entry, &found);
+		if (rc != GT_OK && rc != GT_NO_SUCH) {
+			pr_err("%s(%d) rc(%d) failed to call mv_switch_vtu_entry_find()\n",
+			       __func__, __LINE__, rc);
+
+			return MV_FAIL;
+		}
+
+		vtu_entry.vtuData.memberTagP[lport] = egr_mode;
+
+		rc = gvtuAddEntry(qd_dev, &vtu_entry);
+		SW_IF_ERROR_STR(rc, "failed to call gvtuAddEntry()\n");
+
+		/* Record HW VT entry info to sw_vlan_tbl */
+		rc = mv_switch_vtu_entry_save(&vtu_entry);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_vtu_entry_save()\n");
+	} else {
+		pr_err("%s(%d) port(%d) is not the member of vlan(%d)\n",
+		       __func__, __LINE__, lport, vid);
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_unknown_unicast_flood_set
+*
+* DESCRIPTION:
+*	This routine enable/disable unknown unicast frame egress on a specific port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*	enable  - Enable unknown unicast flooding.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_unknown_unicast_flood_set(unsigned char lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetForwardUnknown(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetForwardUnknown()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_unknown_unicast_flood_get
+*
+* DESCRIPTION:
+*       This routine gets unknown unicast frame egress mode of a specific port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*
+* OUTPUTS:
+*	enable  - Enable unknown unicast flooding.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_unknown_unicast_flood_get(unsigned char lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetForwardUnknown(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetForwardUnknown()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_unknown_multicast_flood_set
+*
+* DESCRIPTION:
+*	This routine enable/disable unknown multicast frame egress on a specific port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*	enable  - Enable unknown multicast flooding.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_unknown_multicast_flood_set(unsigned char lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetDefaultForward(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetDefaultForward()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_unknown_multicast_flood_get
+*
+* DESCRIPTION:
+*	This routine gets unknown multicast frame egress mode of a specific port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*
+* OUTPUTS:
+*	enable  - Enable unknown multicast flooding.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_unknown_multicast_flood_get(unsigned char lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetDefaultForward(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetDefaultForward()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_broadcast_flood_set
+*
+* DESCRIPTION:
+*	This routine decides whether the switch always floods the broadcast
+*	frames to all portsr or uses the multicast egress mode (per port).
+*
+* INPUTS:
+*	enable - enable broadcast flooding regardless the multicast egress mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_broadcast_flood_set(GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gsysSetFloodBC(qd_dev, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gsysSetFloodBC()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_broadcast_flood_get
+*
+* DESCRIPTION:
+*	This routine gets the global mode of broadcast flood.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	enable - always floods the broadcast regardless the multicast egress mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_broadcast_flood_get(GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gsysGetFloodBC(qd_dev, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gsysGetFloodBC()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_count3_get
+*
+* DESCRIPTION:
+*	This function gets all counter 3 of the given port
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*	count - all port counter 3.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Clear on read.
+*******************************************************************************/
+int mv_switch_port_count3_get(unsigned int lport, GT_STATS_COUNTER_SET3 *count)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gstatsGetPortAllCounters3(qd_dev, lport, count);
+	SW_IF_ERROR_STR(rc, "failed to call gstatsGetPortAllCounters3()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_drop_count_get
+*
+* DESCRIPTION:
+*	This function gets the port InDiscards, InFiltered, and OutFiltered counters.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*	count - all port dropped counter.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	Clear on read.
+*******************************************************************************/
+int mv_switch_port_drop_count_get(unsigned int lport, GT_PORT_STAT2 *count)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetPortCtr2(qd_dev, lport, count);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortCtr2()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_count_clear
+*
+* DESCRIPTION:
+*	This function clean all counters of the given port
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_count_clear(unsigned int lport)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gstatsFlushPort(qd_dev, lport);
+	SW_IF_ERROR_STR(rc, "failed to call gstatsFlushPort()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_count_clear
+*
+* DESCRIPTION:
+*	This function gets all counters of the given port
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_count_clear(void)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gstatsFlushAll(qd_dev);
+	SW_IF_ERROR_STR(rc, "failed to call gstatsFlushAll()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_limit_mode_set
+*
+* DESCRIPTION:
+*	This routine sets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*	mode  - rate control ingress limit mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	GT_LIMT_ALL = 0,        limit and count all frames
+*	GT_LIMIT_FLOOD,         limit and count Broadcast, Multicast and flooded unicast frames
+*	GT_LIMIT_BRDCST_MLTCST, limit and count Broadcast and Multicast frames
+*	GT_LIMIT_BRDCST         limit and count Broadcast frames
+*
+*******************************************************************************/
+int mv_switch_ingr_limit_mode_set(unsigned int lport, GT_RATE_LIMIT_MODE mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = grcSetLimitMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call grcSetLimitMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_limit_mode_get
+*
+* DESCRIPTION:
+*	This routine gets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*	mode  - rate control ingress limit mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	GT_LIMT_ALL = 0,        limit and count all frames
+*	GT_LIMIT_FLOOD,         limit and count Broadcast, Multicast and flooded unicast frames
+*	GT_LIMIT_BRDCST_MLTCST, limit and count Broadcast and Multicast frames
+*	GT_LIMIT_BRDCST         limit and count Broadcast frames
+*
+*******************************************************************************/
+int mv_switch_ingr_limit_mode_get(unsigned int lport, GT_RATE_LIMIT_MODE *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = grcGetLimitMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call grcSetLimitMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_police_rate_get
+*
+* DESCRIPTION:
+*	The API gets the ingress policing rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*
+* OUTPUTS:
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_police_rate_get(unsigned int		lport,
+				   GT_PIRL2_COUNT_MODE	*count_mode,
+				   unsigned int		*cir)
+{
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_U32		irl_unit;
+	GT_STATUS	rc = GT_OK;
+
+	/* IRL Unit 0 - bucket to be used (0 ~ 4) */
+	irl_unit =  0;
+	memset(&pirl_2_Data, 0, sizeof(GT_PIRL2_DATA));
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpirl2ReadResource(qd_dev, lport, irl_unit, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2ReadResource()\n");
+
+	*count_mode	= pirl_2_Data.byteTobeCounted;
+	*cir		= pirl_2_Data.ingressRate;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_egr_rate_limit_set
+*
+* DESCRIPTION:
+*	The API Configures the egress frame rate limit of logical port.
+* INPUTS:
+*	lport - logical switch port ID.
+*	mode  - egress rate limit mode.
+*       rate  - egress rate limit value.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	GT_ERATE_TYPE used kbRate - frame rate valid values are:
+*	7600,..., 9600,
+*	10000, 20000, 30000, 40000, ..., 100000,
+*	110000, 120000, 130000, ..., 1000000.
+*******************************************************************************/
+int mv_switch_egr_rate_limit_set(unsigned int lport, GT_PIRL_ELIMIT_MODE mode, unsigned int rate)
+{
+	GT_ERATE_TYPE	fRate;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	fRate.fRate  = rate;
+	fRate.kbRate = rate;
+
+	rc = grcSetELimitMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call grcSetELimitMode()\n");
+
+
+	rc = grcSetEgressRate(qd_dev, lport, &fRate);
+	SW_IF_ERROR_STR(rc, "failed to call grcSetEgressRate()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_egr_rate_limit_get
+*
+* DESCRIPTION:
+*	The API return the egress frame rate limit of logical port
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*	mode  - egress rate limit mode.
+*       rate  - egress rate limit value.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	GT_ERATE_TYPE used kbRate - frame rate valid values are:
+*	7600,..., 9600,
+*	10000, 20000, 30000, 40000, ..., 100000,
+*	110000, 120000, 130000, ..., 1000000.
+*******************************************************************************/
+int mv_switch_egr_rate_limit_get(unsigned int lport, GT_PIRL_ELIMIT_MODE *mode, unsigned int *rate)
+{
+	GT_ERATE_TYPE	fRate;
+	GT_STATUS		rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = grcGetELimitMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call grcGetELimitMode()\n");
+
+	rc = grcGetEgressRate(qd_dev, lport, &fRate);
+	SW_IF_ERROR_STR(rc, "failed to call grcGetEgressRate()\n");
+
+	if (mode == GT_PIRL_ELIMIT_FRAME) {
+		/* frame based limit */
+		*rate = fRate.fRate;
+	} else {
+		/* rate based limit */
+		*rate = fRate.kbRate;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_broadcast_rate_get
+*
+* DESCRIPTION:
+*	The API gets the ingress broacast rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*
+* OUTPUTS:
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_broadcast_rate_get(unsigned int		lport,
+				   GT_PIRL2_COUNT_MODE	*count_mode,
+				   unsigned int		*cir)
+{
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_U32		irl_unit;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* IRL Unit 0 - bucket to be used (0 ~ 4) */
+	irl_unit =  MV_SWITCH_PIRL_RESOURCE_BROADCAST;
+	memset(&pirl_2_Data, 0, sizeof(GT_PIRL2_DATA));
+
+	rc = gpirl2ReadResource(qd_dev, lport, irl_unit, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2ReadResource()\n");
+
+	*count_mode	= pirl_2_Data.byteTobeCounted;
+	*cir		= pirl_2_Data.ingressRate;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_multicast_rate_get
+*
+* DESCRIPTION:
+*	The API gets the ingress broacast rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*
+* OUTPUTS:
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_multicast_rate_get(unsigned int		lport,
+				   GT_PIRL2_COUNT_MODE	*count_mode,
+				   unsigned int		*cir)
+{
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_U32		irl_unit;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* IRL Unit 0 - bucket to be used (0 ~ 4) */
+	irl_unit =  MV_SWITCH_PIRL_RESOURCE_MULTICAST;
+	memset(&pirl_2_Data, 0, sizeof(GT_PIRL2_DATA));
+
+	rc = gpirl2ReadResource(qd_dev, lport, irl_unit, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2ReadResource()\n");
+
+	*count_mode	= pirl_2_Data.byteTobeCounted;
+	*cir		= pirl_2_Data.ingressRate;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_mirror_set
+*
+* DESCRIPTION:
+*	Set port mirror.
+*
+* INPUTS:
+*	sport  - Source port.
+*	mode   - mirror mode.
+*	enable - enable/disable mirror.
+*	dport  - Destination port.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_mirror_set(unsigned int sport, enum sw_mirror_mode_t mode, GT_BOOL enable, unsigned int dport)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	if (mode == MV_SWITCH_MIRROR_INGRESS) {
+		if (enable == GT_TRUE) {
+			/* set ingress monitor source */
+			rc = gprtSetIngressMonitorSource(qd_dev, sport, GT_TRUE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetIngressMonitorSource()\n");
+
+			/* set ingress monitor destination */
+			rc = gsysSetIngressMonitorDest(qd_dev, dport);
+			SW_IF_ERROR_STR(rc, "failed to call gsysSetIngressMonitorDest()\n");
+		} else {
+			/*disable ingress monitor source */
+			rc = gprtSetIngressMonitorSource(qd_dev, sport, GT_FALSE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetIngressMonitorSource()\n");
+		}
+	} else if (mode == MV_SWITCH_MIRROR_EGRESS) {
+		if (enable == GT_TRUE) {
+			/* enable egress monitor source */
+			rc = gprtSetEgressMonitorSource(qd_dev, sport, GT_TRUE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetEgressMonitorSource()\n");
+
+			/* set egress monitor destination */
+			rc = gsysSetEgressMonitorDest(qd_dev, dport);
+			SW_IF_ERROR_STR(rc, "failed to call gsysSetEgressMonitorDest()\n");
+		} else {
+			/* disable egress monitor source */
+			rc = gprtSetEgressMonitorSource(qd_dev, sport, GT_FALSE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetEgressMonitorSource()\n");
+		}
+	} else if (mode ==  MV_SWITCH_MIRROR_BOTH) {
+		if (enable == GT_TRUE) {
+			/* enable egress monitor source */
+			rc = gprtSetIngressMonitorSource(qd_dev, sport, GT_TRUE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetIngressMonitorSource()\n");
+
+			/* set ingress monitor destination */
+			rc = gsysSetIngressMonitorDest(qd_dev, dport);
+			SW_IF_ERROR_STR(rc, "failed to call gsysSetIngressMonitorDest()\n");
+
+			/* enable egress monitor source */
+			rc = gprtSetEgressMonitorSource(qd_dev, sport, GT_TRUE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetEgressMonitorSource()\n");
+
+			/* set egress monitor destination */
+			rc = gsysSetEgressMonitorDest(qd_dev, dport);
+			SW_IF_ERROR_STR(rc, "failed to call gsysSetEgressMonitorDest()\n");
+		} else {
+			/*disable ingress monitor source */
+			rc = gprtSetIngressMonitorSource(qd_dev, sport, GT_FALSE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetIngressMonitorSource()\n");
+
+			/* disable egress monitor source */
+			rc = gprtSetEgressMonitorSource(qd_dev, sport, GT_FALSE);
+			SW_IF_ERROR_STR(rc, "failed to call gprtSetEgressMonitorSource()\n");
+		}
+	} else {
+		pr_err("illegal port mirror dir(%d)\n", mode);
+		return MV_FAIL;
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_mirror_get
+*
+* DESCRIPTION:
+*	Get port mirror status.
+*
+* INPUTS:
+*	sport  - Source port.
+*	mode   - mirror mode.
+*
+* OUTPUTS:
+*	enable - enable/disable mirror.
+*	dport  - Destination port.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_mirror_get(unsigned int sport, enum sw_mirror_mode_t mode, GT_BOOL *enable, unsigned int *dport)
+{
+	GT_LPORT port;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	if (mode == MV_SWITCH_MIRROR_INGRESS) {
+		/* Get ingress monitor source status */
+		rc = gprtGetIngressMonitorSource(qd_dev, (GT_LPORT)sport, enable);
+		SW_IF_ERROR_STR(rc, "failed to call gprtGetIngressMonitorSource()\n");
+
+		/* Get ingress destination port */
+		rc = gsysGetIngressMonitorDest(qd_dev, &port);
+		SW_IF_ERROR_STR(rc, "failed to call gsysGetIngressMonitorDest()\n");
+		*dport = port;
+
+	} else if (mode == MV_SWITCH_MIRROR_EGRESS) {
+		/* Get egress monitor source status */
+		rc = gprtGetEgressMonitorSource(qd_dev, (GT_LPORT)sport, enable);
+		SW_IF_ERROR_STR(rc, "failed to call gprtGetEgressMonitorSource()\n");
+
+		/* Get egress destination port */
+		rc = gsysGetEgressMonitorDest(qd_dev, &port);
+		SW_IF_ERROR_STR(rc, "failed to call gsysGetEgressMonitorDest()\n");
+		*dport = port;
+
+	} else {
+		pr_err("illegal port mirror dir(%d)\n", mode);
+		return MV_FAIL;
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_age_time_set
+*
+* DESCRIPTION:
+*	This function sets the MAC address aging time.
+*
+* INPUTS:
+*	time - aging time value.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_age_time_set(unsigned int time)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gfdbSetAgingTimeout(qd_dev, time);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbSetAgingTimeout()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_age_time_get
+*
+* DESCRIPTION:
+*	This function gets the MAC address aging time.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	time - MAC aging time.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_age_time_get(unsigned int *time)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gfdbGetAgingTimeout(qd_dev, (GT_U32 *)time);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbGetAgingTimeout()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mac_learn_disable_set
+*
+* DESCRIPTION:
+*	Enable/disable automatic learning of new source MAC addresses on port
+*	ingress direction.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*	enable - enable/disable MAC learning.
+*		GT_TRUE: disable MAC learning
+*		GT_FALSE: enable MAC learning
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mac_learn_disable_set(unsigned int lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetLearnDisable(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetLearnDisable()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mac_learn_disable_get
+*
+* DESCRIPTION:
+*	Get automatic learning status of new source MAC addresses on port ingress.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable MAC learning.
+*		GT_TRUE: disable MAC learning
+*		GT_FALSE: enable MAC learning
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mac_learn_disable_get(unsigned int lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetLearnDisable(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetLearnDisable()\n");
+
+	return MV_OK;
+}
+#ifdef CONFIG_ARCH_AVANTA_LP
+
+/*******************************************************************************
+* mv_switch_ingr_police_rate_set
+*
+* DESCRIPTION:
+*	The API configures the ingress policing rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*	bktTypeMask - ingress packet type mask
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_police_rate_set(unsigned int	lport,
+				   GT_PIRL2_COUNT_MODE	count_mode,
+				   unsigned int		cir,
+				   GT_U32		bktTypeMask)
+{
+	GT_U32		irlRes;
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_BOOL		pause_state;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(&pirl_2_Data, 0, sizeof(pirl_2_Data));
+
+	irlRes = 0;
+
+	/* configure cir, count_mode */
+	pirl_2_Data.ingressRate		= cir;
+	pirl_2_Data.customSetup.isValid	= GT_FALSE;
+	pirl_2_Data.accountQConf	= GT_FALSE;
+	pirl_2_Data.accountFiltered	= GT_TRUE;
+	pirl_2_Data.mgmtNrlEn		= GT_TRUE;
+	pirl_2_Data.saNrlEn		= GT_FALSE;
+	pirl_2_Data.daNrlEn		= GT_FALSE;
+	pirl_2_Data.samplingMode	= GT_FALSE;
+	pirl_2_Data.actionMode		= PIRL_ACTION_USE_LIMIT_ACTION;
+
+	/* decide which mode to adopt when deal with overload traffic.
+	*  If pause state is ON, select FC mode, otherwize select drop mode.
+	*/
+	rc = mv_phy_port_pause_state_get(lport, &pause_state);
+	SW_IF_ERROR_STR(rc, "failed to call mv_phy_port_pause_state_get()\n");
+	if (pause_state == GT_TRUE)
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_FC;
+	else
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_DROP;
+
+	pirl_2_Data.fcDeassertMode	= GT_PIRL_FC_DEASSERT_EMPTY;
+	pirl_2_Data.bktRateType		= BUCKET_TYPE_TRAFFIC_BASED;
+	pirl_2_Data.priORpt		= GT_TRUE;
+	pirl_2_Data.priMask		= 0;
+	pirl_2_Data.bktTypeMask		= bktTypeMask;
+	pirl_2_Data.byteTobeCounted	= count_mode;
+
+	rc = gpirl2WriteResource(qd_dev, lport, irlRes, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2WriteResource()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_broadcast_rate_set
+*
+* DESCRIPTION:
+*	The API configures the ingress broadcast rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_broadcast_rate_set(unsigned int		lport,
+					GT_PIRL2_COUNT_MODE	count_mode,
+					unsigned int	cir)
+{
+	GT_U32		irlRes;
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_BOOL		pause_state;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(&pirl_2_Data, 0, sizeof(pirl_2_Data));
+
+	irlRes = MV_SWITCH_PIRL_RESOURCE_BROADCAST;
+
+	/* configure cir, count_mode */
+	pirl_2_Data.ingressRate		= cir;
+	pirl_2_Data.customSetup.isValid	= GT_FALSE;
+	pirl_2_Data.accountQConf	= GT_FALSE;
+	pirl_2_Data.accountFiltered	= GT_TRUE;
+	pirl_2_Data.mgmtNrlEn		= GT_TRUE;
+	pirl_2_Data.saNrlEn		= GT_FALSE;
+	pirl_2_Data.daNrlEn		= GT_FALSE;
+	pirl_2_Data.samplingMode	= GT_FALSE;
+	pirl_2_Data.actionMode		= PIRL_ACTION_USE_LIMIT_ACTION;
+
+	/* decide which mode to adopt when deal with overload traffic.
+	*  If pause state is ON, select FC mode, otherwize select drop mode.
+	*/
+	rc = mv_phy_port_pause_state_get(lport, &pause_state);
+	SW_IF_ERROR_STR(rc, "failed to call mv_phy_port_pause_state_get()\n");
+	if (pause_state == GT_TRUE)
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_FC;
+	else
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_DROP;
+
+	pirl_2_Data.fcDeassertMode	= GT_PIRL_FC_DEASSERT_EMPTY;
+	pirl_2_Data.bktRateType		= BUCKET_TYPE_TRAFFIC_BASED;
+	pirl_2_Data.priORpt		= GT_TRUE;
+	pirl_2_Data.priMask		= 0;
+	pirl_2_Data.bktTypeMask		= (1 << MV_SWITCH_PIRL_BKTTYPR_BROADCAST_BIT);
+	pirl_2_Data.byteTobeCounted	= count_mode;
+
+	rc = gpirl2WriteResource(qd_dev, lport, irlRes, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2WriteResource()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_ingr_multicast_rate_set
+*
+* DESCRIPTION:
+*	The API configures the ingress broadcast rate for given switch port.
+*
+* INPUTS:
+*	lport      - logical switch port ID.
+*	count_mode - policing rate count mode:
+*			GT_PIRL2_COUNT_FRAME = 0
+*			GT_PIRL2_COUNT_ALL_LAYER1
+*			GT_PIRL2_COUNT_ALL_LAYER2
+*			GT_PIRL2_COUNT_ALL_LAYER3
+*	cir        - committed infomation rate.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_ingr_multicast_rate_set(unsigned int		lport,
+				   GT_PIRL2_COUNT_MODE	count_mode,
+				   unsigned int cir)
+{
+	GT_U32		irlRes;
+	GT_PIRL2_DATA	pirl_2_Data;
+	GT_BOOL		pause_state;
+	GT_STATUS	rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(&pirl_2_Data, 0, sizeof(pirl_2_Data));
+
+	irlRes = MV_SWITCH_PIRL_RESOURCE_MULTICAST;
+
+	/* configure cir, count_mode */
+	pirl_2_Data.ingressRate		= cir;
+	pirl_2_Data.customSetup.isValid	= GT_FALSE;
+	pirl_2_Data.accountQConf	= GT_FALSE;
+	pirl_2_Data.accountFiltered	= GT_TRUE;
+	pirl_2_Data.mgmtNrlEn		= GT_TRUE;
+	pirl_2_Data.saNrlEn		= GT_FALSE;
+	pirl_2_Data.daNrlEn		= GT_FALSE;
+	pirl_2_Data.samplingMode	= GT_FALSE;
+	pirl_2_Data.actionMode		= PIRL_ACTION_USE_LIMIT_ACTION;
+
+	/* decide which mode to adopt when deal with overload traffic.
+	*  If pause state is ON, select FC mode, otherwize select drop mode.
+	*/
+	rc = mv_phy_port_pause_state_get(lport, &pause_state);
+	SW_IF_ERROR_STR(rc, "failed to call mv_phy_port_pause_state_get()\n");
+	if (pause_state == GT_TRUE)
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_FC;
+	else
+		pirl_2_Data.ebsLimitAction = ESB_LIMIT_ACTION_DROP;
+
+	pirl_2_Data.fcDeassertMode	= GT_PIRL_FC_DEASSERT_EMPTY;
+	pirl_2_Data.bktRateType		= BUCKET_TYPE_TRAFFIC_BASED;
+	pirl_2_Data.priORpt		= GT_TRUE;
+	pirl_2_Data.priMask		= 0;
+	pirl_2_Data.bktTypeMask		= ((1 << MV_SWITCH_PIRL_BKTTYPR_MULTICAST_BIT)
+		| (1 << MV_SWITCH_PIRL_BKTTYPR_UNKNOWN_MULTICAST_BIT));
+	pirl_2_Data.byteTobeCounted	= count_mode;
+
+	rc = gpirl2WriteResource(qd_dev, lport, irlRes, &pirl_2_Data);
+	SW_IF_ERROR_STR(rc, "failed to call gpirl2WriteResource()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_queue_weight_set
+*
+* DESCRIPTION:
+*	The API configures the weight of a queues for all
+*	Ethernet UNI ports in the integrated switch.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*	queue  - switch queue, ranging from 0 to 3.
+*	weight - weight value per queue (1-8).
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_queue_weight_set(unsigned int lport, unsigned char queue, unsigned char weight)
+{
+	unsigned int len = 0;
+	unsigned int offset = 0;
+	unsigned int idx;
+	GT_QoS_WEIGHT l_weight;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* get weight information at first */
+	rc = gsysGetQoSWeight(qd_dev, &l_weight);
+	SW_IF_ERROR_STR(rc, "failed to call gsysGetQoSWeight()\n");
+
+	offset = MV_SWITCH_MAX_QUEUE_NUM*lport + queue;
+	if (offset >= MAX_QOS_WEIGHTS) {
+		pr_err("%s offset(%d) is out of range\n", __func__, offset);
+		return MV_FAIL;
+	}
+
+	/* Update queue weight */
+	if ((offset+1) <= l_weight.len) {
+		l_weight.queue[offset] = weight;
+		len = l_weight.len;
+	} else {
+		for (idx = l_weight.len; idx < (offset/MV_SWITCH_MAX_QUEUE_NUM+1)*MV_SWITCH_MAX_QUEUE_NUM; idx++) {
+			if (idx == offset)
+				l_weight.queue[idx] = weight;
+			else
+				l_weight.queue[idx] = MV_SWITCH_DEFAULT_WEIGHT;
+		}
+
+		len = (offset/MV_SWITCH_MAX_QUEUE_NUM+1)*MV_SWITCH_MAX_QUEUE_NUM;
+	}
+
+	l_weight.len = len;
+
+	rc = gsysSetQoSWeight(qd_dev, &l_weight);
+	SW_IF_ERROR_STR(rc, "failed to call gsysSetQoSWeight()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_queue_weight_get
+*
+* DESCRIPTION:
+*	The API configures the weight of a queues for all
+*	Ethernet UNI ports in the integrated switch.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*	queue  - switch queue, ranging from 0 to 3.
+*
+* OUTPUTS:
+*	weight - weight value per queue (1-8).
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_queue_weight_get(unsigned int lport, unsigned char queue, unsigned char *weight)
+{
+	GT_QoS_WEIGHT l_weight;
+	unsigned int offset;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Get QoS queue information */
+	rc = gsysGetQoSWeight(qd_dev, &l_weight);
+	SW_IF_ERROR_STR(rc, "failed to call gsysSetQoSWeight()\n");
+
+	offset = MV_SWITCH_MAX_QUEUE_NUM*lport + queue;
+	if ((offset + 1) > l_weight.len)
+		*weight = 0;
+	else
+		*weight = l_weight.queue[offset];
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_learn2all_enable_set
+*
+* DESCRIPTION:
+*	Enable/disable learn to all devices
+*
+* INPUTS:
+*	enable - enable/disable learn to all devices.
+*		GT_TRUE: disable MAC learning
+*		GT_FALSE: enable MAC learning
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_learn2all_enable_set(GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gsysSetLearn2All(qd_dev, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gsysSetLearn2All(%d)\n", enable);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_learn2all_enable_get
+*
+* DESCRIPTION:
+*	returns if the learn2all bit status
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	enabled - learn2all enabled/disabled
+*		GT_TRUE: learn2all enabled
+*		GT_FALSE: learn2all disabled
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_learn2all_enable_get(GT_BOOL *enabled)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gsysGetLearn2All(qd_dev, enabled);
+	SW_IF_ERROR_STR(rc, "failed to call gsysSetLearn2All()\n");
+
+	return MV_OK;
+}
+#endif
+/*******************************************************************************
+* mv_switch_mac_limit_set
+*
+* DESCRIPTION:
+*	This function limits the number of MAC addresses per lport.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*	mac_num - maximum number of MAC addresses per port (0-255).
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*
+* COMMENTS:
+*	The following care is needed when enabling this feature:
+*		1) disable learning on the ports
+*		2) flush all non-static addresses in the ATU
+*		3) define the desired limit for the ports
+*		4) re-enable learing on the ports
+*******************************************************************************/
+int mv_switch_mac_limit_set(unsigned int lport, unsigned int mac_num)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* define the desired limit for the ports */
+	rc = gfdbSetPortAtuLearnLimit(qd_dev, lport, mac_num);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbSetPortAtuLearnLimit()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mac_limit_get
+*
+* DESCRIPTION
+*	Port's auto learning limit. When the limit is non-zero value, the number
+*	of MAC addresses that can be learned on this lport are limited to the value
+*	specified in this API. When the learn limit has been reached any frame
+*	that ingresses this lport with a source MAC address not already in the
+*	address database that is associated with this lport will be discarded.
+*	Normal auto-learning will resume on the lport as soon as the number of
+*	active unicast MAC addresses associated to this lport is less than the
+*	learn limit.
+*	CPU directed ATU Load, Purge, or Move will not have any effect on the
+*	learn limit.
+*	This feature is disabled when the limit is zero.
+*	The following care is needed when enabling this feature:
+*		1) dsable learning on the ports
+*		2) flush all non-static addresses in the ATU
+*		3) define the desired limit for the ports
+*		4) re-enable learing on the ports
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*
+* OUTPUTS:
+*	mac_num - maximum number of MAC addresses per port (0-255).
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mac_limit_get(unsigned int lport, unsigned int *mac_num)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gfdbGetPortAtuLearnLimit(qd_dev, lport, (GT_U32 *)mac_num);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbGetPortAtuLearnLimit()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mac_addr_add
+*
+* DESCRIPTION:
+*	This function creates a MAC entry in the MAC address table for a
+*	specific lport in the integrated switch
+*
+* INPUTS:
+*	port_bm  - logical switch port bitmap, bit0: switch port 0, bit1: port 1.
+*	mac_addr - 6byte network order MAC source address.
+*	mode     - Static or dynamic mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mac_addr_add(unsigned int port_bm, unsigned char mac_addr[6], unsigned int mode)
+{
+	GT_ATU_ENTRY mac_entry;
+	unsigned int l_port_bm = 0;
+	GT_STATUS    rc = GT_OK;
+	enum sw_mac_addr_type_t type = MV_SWITCH_UNICAST_MAC_ADDR;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	memset(&mac_entry, 0, sizeof(GT_ATU_ENTRY));
+
+	mac_entry.trunkMember			= GT_FALSE;
+	mac_entry.prio				= 0;
+	mac_entry.exPrio.useMacFPri		= 0;
+	mac_entry.exPrio.macFPri		= 0;
+	mac_entry.exPrio.macQPri		= 0;
+	mac_entry.DBNum				= 0;
+	l_port_bm				= port_bm;
+	memcpy(mac_entry.macAddr.arEther, mac_addr, GT_ETHERNET_HEADER_SIZE);
+
+	/* treat broadcast MAC address as multicast one */
+	if (((mac_addr[0] & 0x01) == 0x01) ||
+		((mac_addr[0] == 0x33) && (mac_addr[1] == 0x33))) {
+		type = MV_SWITCH_MULTICAST_MAC_ADDR;
+		l_port_bm |= (1 << qd_dev->cpuPortNum);
+	}
+	mac_entry.portVec = l_port_bm;
+
+	if (type == MV_SWITCH_UNICAST_MAC_ADDR) {
+		if (mode == MV_SWITCH_DYNAMIC_MAC_ADDR)
+			mac_entry.entryState.ucEntryState = GT_UC_DYNAMIC;
+		else
+			mac_entry.entryState.ucEntryState = GT_UC_STATIC;
+	} else {
+		mac_entry.entryState.mcEntryState = GT_MC_STATIC;
+	}
+
+	/* add ATU entry */
+	rc = gfdbAddMacEntry(qd_dev, &mac_entry);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbAddMacEntry()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mac_addr_del
+*
+* DESCRIPTION:
+*	This function removes an existed MAC entry from the MAC address
+*	table in the integrated switch.
+*
+* INPUTS:
+*	lport    - logical switch port ID.
+*       mac_addr - MAC address.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mac_addr_del(unsigned int lport, unsigned char mac_addr[6])
+{
+	GT_ATU_ENTRY mac_entry;
+	GT_BOOL      found;
+	GT_BOOL      mc_addr = GT_FALSE;
+	GT_STATUS    rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* try to find VTU entry */
+	memset(&mac_entry, 0, sizeof(GT_ATU_ENTRY));
+	memcpy(mac_entry.macAddr.arEther, mac_addr, GT_ETHERNET_HEADER_SIZE);
+	rc = gfdbFindAtuMacEntry(qd_dev, &mac_entry, &found);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbFindAtuMacEntry()\n");
+
+	/* return ok in case no ATU entry is found */
+	if (GT_FALSE == found)
+		return MV_OK;
+
+	/* delete ATU entry */
+	rc = gfdbDelMacEntry(qd_dev, &mac_entry.macAddr);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbDelMacEntry()\n");
+
+	/* treat broadcast MAC address as multicast one */
+	if (((mac_addr[0] & 0x01) == 0x01) ||
+	    ((mac_addr[0] == 0x33) && (mac_addr[1] == 0x33))) {
+		mc_addr = GT_TRUE;
+	}
+
+	/* add ATU again in case there still have other ports */
+	if (((mac_entry.portVec & ~(1 << lport)) && (mc_addr == GT_FALSE)) ||
+	    ((mac_entry.portVec & ~((1 << lport) | (1 << qd_dev->cpuPortNum))) && (mc_addr == GT_TRUE))) {
+		mac_entry.portVec &= ~(1 << lport);
+		rc = gfdbAddMacEntry(qd_dev, &mac_entry);
+		SW_IF_ERROR_STR(rc, "failed to call gfdbAddMacEntry()\n");
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_qos_mode_set()
+*
+* DESCRIPTION:
+*	Configures the scheduling mode per logical port.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*	mode  - scheduler mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_qos_mode_set(unsigned int lport, GT_PORT_SCHED_MODE mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetPortSched(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortSched()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_qos_mode_get()
+*
+* DESCRIPTION:
+*	This API gets the scheduling mode per logical port.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*	mode  - scheduler mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_qos_mode_get(unsigned int lport, GT_PORT_SCHED_MODE *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetPortSched(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortSched()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mtu_set
+*
+* DESCRIPTION:
+*	Set switch MTU size.
+*
+* INPUTS:
+*	mtu - MTU size.
+*
+* OUTPUTS:
+*	None
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mtu_set(unsigned int mtu)
+{
+	unsigned int idx;
+	GT_JUMBO_MODE jumbo_mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Set jumbo frames mode */
+	if (mtu < 1522) {
+		pr_err("MTU(%d) will be adjusted to jumbo mode(1522)\n", mtu);
+		jumbo_mode = GT_JUMBO_MODE_1522;
+	} else if (mtu == 1522) {
+		jumbo_mode = GT_JUMBO_MODE_1522;
+	} else if (mtu < 2048) {
+		pr_err("MTU(%d) will be adjusted to jumbo mode(2048)\n", mtu);
+		jumbo_mode = GT_JUMBO_MODE_2048;
+	} else if (mtu == 2048) {
+		jumbo_mode = GT_JUMBO_MODE_2048;
+	} else if (mtu != 10240) {
+		pr_err("MTU(%d) will be adjusted to jumbo mode(10240)\n", mtu);
+		jumbo_mode = GT_JUMBO_MODE_10240;
+	} else {
+		jumbo_mode = GT_JUMBO_MODE_10240;
+	}
+
+	for (idx = 0; idx < qd_dev->numOfPorts; idx++) {
+		/* Set switch MTU */
+		rc = gsysSetJumboMode(qd_dev, idx, jumbo_mode);
+		SW_IF_ERROR_STR(rc, "failed to call gsysSetJumboMode()\n");
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mtu_get
+*
+* DESCRIPTION:
+*	Get switch MTU size.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	mtu - MTU size.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mtu_get(unsigned int *mtu)
+{
+	GT_JUMBO_MODE jumbo_mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Get switch MTU */
+	rc = gsysGetJumboMode(qd_dev, MV_SWITCH_CPU_PORT_NUM, &jumbo_mode);
+	SW_IF_ERROR_STR(rc, "failed to call gsysGetJumboMode()\n");
+
+	/* Convert jumbo frames mode to MTU size */
+	if (jumbo_mode == GT_JUMBO_MODE_1522)
+		*mtu = 1522;
+	else if (jumbo_mode == GT_JUMBO_MODE_2048)
+		*mtu = 2048;
+	else
+		*mtu = 10240;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_link_state_get
+*
+* DESCRIPTION:
+*	The API return realtime port link state of switch logical port.
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state  - realtime port link state.
+*			GT_TRUE: link up
+*			GT_FALSE: link down down
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_link_state_get(unsigned int lport, GT_BOOL *state)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetLinkState(qd_dev, lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetLinkState()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_duplex_state_get
+*
+* DESCRIPTION:
+*	The API return realtime port duplex status of given switch logical port.
+* INPUTS:
+*	lport - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state - duplex state.
+*		GT_FALSE:half deplex mode
+*		GT_TRUE:full deplex mode					.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_duplex_state_get(unsigned int lport, GT_BOOL *state)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetDuplex(qd_dev, lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetDuplex()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_speed_state_get
+*
+* DESCRIPTION:
+*	The API return realtime port speed mode of given switch logical port.
+* INPUTS:
+*	lport - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state - speed mode state
+*		0:10M
+*		1:100M
+*		2:1000M
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_speed_state_get(unsigned int lport, GT_PORT_SPEED_MODE *speed)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetSpeedMode(qd_dev, lport, speed);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetSpeedMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_secure_mode_set
+*
+* DESCRIPTION:
+*	Change a port mode in the SW data base and remove it from all VLANs
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_secure_mode_set(unsigned int lport)
+{
+	unsigned int port_bm;
+	unsigned short vlan_idx;
+	GT_STATUS rc = GT_OK;
+
+	sw_port_tbl[lport].port_mode = GT_SECURE;
+
+	port_bm = 1 << lport;
+
+	for (vlan_idx = 0; vlan_idx < MV_SWITCH_MAX_VLAN_NUM; vlan_idx++) {
+		if ((sw_vlan_tbl[vlan_idx].port_bm & port_bm) &&
+		    (sw_port_tbl[lport].vlan_blong[vlan_idx] == MV_SWITCH_PORT_NOT_BELONG)) {
+			rc = mv_switch_port_vid_del(lport, vlan_idx);
+			SW_IF_ERROR_STR(rc, "failed to call mv_switch_port_vid_del()\n");
+
+			sw_vlan_tbl[vlan_idx].egr_mode[lport] = NOT_A_MEMBER;
+		}
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_fallback_mode_set
+*
+* DESCRIPTION:
+*	Change a port mode in the SW data base and add it to all VLANs
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_fallback_mode_set(unsigned int lport)
+{
+	unsigned int port_bm;
+	unsigned short vlan_idx;
+	GT_STATUS rc = GT_OK;
+
+	sw_port_tbl[lport].port_mode = GT_FALLBACK;
+
+	port_bm = 1 << lport;
+
+	for (vlan_idx = 0; vlan_idx < MV_SWITCH_MAX_VLAN_NUM; vlan_idx++) {
+		/* If a VLAN has been defined (there is a member in the VLAN) and
+		   the specified port is not a member */
+		if (sw_vlan_tbl[vlan_idx].port_bm &&
+		     !(sw_vlan_tbl[vlan_idx].port_bm & port_bm)) {
+			/* Update VTU table */
+			rc = mv_switch_port_vid_add(lport, vlan_idx, MEMBER_EGRESS_UNMODIFIED, false);
+			SW_IF_ERROR_STR(rc, "failed to call mv_switch_port_vid_add()\n");
+
+			sw_vlan_tbl[vlan_idx].port_bm |= port_bm;
+			sw_vlan_tbl[vlan_idx].egr_mode[lport] = MEMBER_EGRESS_UNMODIFIED;
+		}
+	}
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_filter_set
+*
+* DESCRIPTION:
+*	The API sets the filtering mode of a certain lport.
+*	If the lport is in filtering mode, only the VIDs added by the
+*	tpm_sw_port_add_vid API will be allowed to ingress and egress the lport.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*       filter - set to 1 means the lport will drop all packets which are NOT in
+*		 the allowed VID list (built using API tpm_sw_port_add_vid).
+*		 set to 0 - means that the list of VIDs allowed
+*		 per lport has no significance (the list is not deleted).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_filter_set(unsigned int lport, unsigned char filter)
+{
+	GT_DOT1Q_MODE mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* Move port to secure mode and removed from all VLANs */
+	if (filter) {
+		/* The port is already in the secure mode - do noting */
+		if (sw_port_tbl[lport].port_mode == GT_SECURE)
+			return MV_OK;
+
+		rc = mv_switch_port_secure_mode_set(lport);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_port_secure_mode_set()\n");
+		mode = GT_SECURE;
+	} else {
+		/* Port should be moved to the fallback mode and added to all VLANs */
+		if (sw_port_tbl[lport].port_mode == GT_FALLBACK)
+			return MV_OK;
+
+		rc = mv_switch_port_fallback_mode_set(lport);
+		SW_IF_ERROR_STR(rc, "failed to call mv_switch_port_fallback_mode_set()\n");
+		mode = GT_FALLBACK;
+	}
+
+	rc = gvlnSetPortVlanDot1qMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnSetPortVlanDot1qMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_filter_get
+*
+* DESCRIPTION:
+*	The API gets the filtering mode of a certain lport.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*
+* OUTPUTS:
+*       filter - set to 1 means the lport will drop all packets which are NOT in
+*		 the allowed VID list (built using API tpm_sw_port_add_vid).
+*		 set to 0 - means that the list of VIDs allowed
+*		 per lport has no significance (the list is not deleted).
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_filter_get(unsigned int lport, unsigned char *filter)
+{
+	GT_DOT1Q_MODE mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnGetPortVlanDot1qMode(qd_dev, lport, &mode);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnGetPortVlanDot1qMode()\n");
+
+	if (GT_SECURE == mode)
+		*filter = 1;
+	else
+		*filter = 0;
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_mode_set
+*
+* DESCRIPTION:
+*	The API sets the VLAN 802.1q mode of a certain lport.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*       mode   - VLAN 802.1q mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_mode_set(unsigned int lport, GT_DOT1Q_MODE mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnSetPortVlanDot1qMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnSetPortVlanDot1qMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_mode_get
+*
+* DESCRIPTION:
+*	The API gets the VLAN 802.1q mode of a certain lport.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*
+* OUTPUTS:
+*       mode   - VLAN 802.1q mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_mode_get(unsigned int lport, GT_DOT1Q_MODE *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnGetPortVlanDot1qMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnGetPortVlanDot1qMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_mac_filter_mode_set
+*
+* DESCRIPTION: The routine sets MAC filter mode
+*
+* INPUTS:
+*	lport   - switch port
+*	mode   - MAC filter mode
+*
+* OUTPUTS:
+*	None
+*
+* RETURNS:
+*	On success, the function returns MV_OK. On error different types are returned
+*	according to the case - see tpm_error_code_t.
+*
+* COMMENTS:
+*	None
+*******************************************************************************/
+int mv_switch_port_mac_filter_mode_set(unsigned int	lport,
+				    GT_SA_FILTERING	mode)
+
+{
+	int rc = MV_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* set filter mode */
+	rc = gprtSetSAFiltering(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "fail to set filter port(%d) mode(%d)\n", lport, mode);
+
+	return rc;
+}
+
+/*******************************************************************************
+* mv_switch_port_mac_filter_mode_get
+*
+* DESCRIPTION: The routine adds MAC address filter entry
+*
+* INPUTS:
+*	lport   - switch lport
+*
+* OUTPUTS:
+*	mode   - MAC filter mode
+*
+* RETURNS:
+*	On success, the function returns MV_OK. On error different types are returned
+*	according to the case - see tpm_error_code_t.
+*
+* COMMENTS:
+*	None
+*******************************************************************************/
+int mv_switch_port_mac_filter_mode_get(unsigned int	lport,
+				    GT_SA_FILTERING	*mode)
+{
+	int rc = MV_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	/* set filter mode */
+	rc = gprtGetSAFiltering(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "fail to get filtering mode of port(%d)\n", lport);
+
+	return rc;
+}
+
+/*******************************************************************************
+* mv_switch_port_mac_filter_entry_add
+*
+* DESCRIPTION: The routine adds MAC address filter entry
+*
+* INPUTS:
+*	lport    - switch lport
+*	mac     - MAC address
+*	vlan    - VLAN ID
+*	mode    - MAC filter mode
+*
+* OUTPUTS:
+*	None
+*
+* RETURNS:
+*	On success, the function returns MV_OK. On error different types are returned
+*	according to the case - see tpm_error_code_t.
+*
+* COMMENTS:
+*	None
+*******************************************************************************/
+int mv_switch_port_mac_filter_entry_add(unsigned int	lport,
+				     unsigned char		*mac,
+				     unsigned short		vlan,
+				     GT_SA_FILTERING		mode)
+{
+	int rc = MV_OK;
+	unsigned int port_bm;
+
+	if (GT_SA_DROP_ON_LOCK == mode)
+		port_bm = (1 << lport);
+	else if (GT_SA_DROP_ON_UNLOCK == mode || GT_SA_DROP_TO_CPU == mode)
+		port_bm = 0;
+	else
+		return MV_OK;
+
+	rc = mv_switch_mac_addr_add(port_bm, mac, MV_SWITCH_STATIC_MAC_ADDR);
+	SW_IF_ERROR_STR(rc, "fail to add filtering addr of port(%d)\n", lport);
+
+	return rc;
+}
+
+/*******************************************************************************
+* mv_switch_port_mac_filter_entry_del
+*
+* DESCRIPTION: The routine deletes MAC address filter entry
+*
+* INPUTS:
+*	lport    - switch port
+*	mac     - MAC address
+*	vlan    - VLAN ID
+*	mode    - MAC filter mode
+*
+* OUTPUTS:
+*	None
+*
+* RETURNS:
+*	On success, the function returns MV_OK. On error different types are returned
+*	according to the case - see tpm_error_code_t.
+*
+* COMMENTS:
+*	None
+*******************************************************************************/
+int mv_switch_port_mac_filter_entry_del(unsigned int	lport,
+				     unsigned char		*mac,
+				     unsigned short		vlan,
+				     GT_SA_FILTERING		mode)
+{
+	int rc = MV_OK;
+
+	rc = mv_switch_mac_addr_del(lport, mac);
+	SW_IF_ERROR_STR(rc, "fail to del filtering addr of port(%d)\n", lport);
+
+	return rc;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_set
+*
+* DESCRIPTION:
+*	This routine sets the port VLAN group port membership list.
+*
+* INPUTS:
+*	lport    - logical switch port ID.
+*	mem_port - array of logical ports in the same vlan.
+*	mem_num  - number of members in memPorts array
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_set(unsigned int lport, GT_LPORT mem_port[], unsigned int mem_num)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnSetPortVlanPorts(qd_dev, (GT_LPORT)lport, mem_port, (unsigned char)mem_num);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnSetPortVlanPorts()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_vlan_get
+*
+* DESCRIPTION:
+*	This routine gets the port VLAN group port membership list.
+*
+* INPUTS:
+*	lport    - logical switch port ID.
+*
+* OUTPUTS:
+*	mem_port - array of logical ports in the same vlan.
+*	mem_num  - number of members in memPorts array
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_vlan_get(unsigned int lport, GT_LPORT mem_port[], unsigned int *mem_num)
+{
+	GT_U8 num;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvlnGetPortVlanPorts(qd_dev, lport, mem_port, &num);
+	SW_IF_ERROR_STR(rc, "failed to call gvlnGetPortVlanPorts()\n");
+	*mem_num = num;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mh_mode_set
+*
+* DESCRIPTION:
+*	This routine enables/disables ingress and egress header mode of switch port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*	enable  - enable/disable marvell header.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mh_mode_set(unsigned char lport, GT_BOOL enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetHeaderMode(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetHeaderMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_mh_mode_get
+*
+* DESCRIPTION:
+*	This routine gets ingress and egress header mode of switch port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*
+* OUTPUTS:
+*	enable  - enable/disable marvell header.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_mh_mode_get(unsigned char lport, GT_BOOL *enable)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetHeaderMode(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetHeaderMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_frame_mode_set
+*
+* DESCRIPTION:
+*	This routine sets the frame mode.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*	mode  - frame mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_frame_mode_set(unsigned char lport, GT_FRAME_MODE mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetFrameMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetFrameMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_frame_mode_get
+*
+* DESCRIPTION:
+*	This routine gets the frame mode.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*
+* OUTPUTS:
+*	mode   - frame mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_frame_mode_get(unsigned char lport, GT_FRAME_MODE *mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetFrameMode(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetFrameMode()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_etype_set
+*
+* DESCRIPTION:
+*	This routine sets ethernet type.
+*
+* INPUTS:
+*	lport - logical switch port ID.
+*	etype - Ethernet type.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_etype_set(unsigned char lport, unsigned short etype)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtSetPortEType(qd_dev, lport, etype);
+	SW_IF_ERROR_STR(rc, "failed to call gprtSetPortEType()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_etype_get
+*
+* DESCRIPTION:
+*	This routine gets the frame mode.
+*
+* INPUTS:
+*	lport  - logical switch port ID.
+*
+* OUTPUTS:
+*	etype - Ethernet type.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_etype_get(unsigned char lport, unsigned short *etype)
+{
+	GT_ETYPE l_etype;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gprtGetPortEType(qd_dev, lport, &l_etype);
+	SW_IF_ERROR_STR(rc, "failed to call gprtGetPortEType()\n");
+	*etype = (unsigned short)l_etype;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_preamble_set
+*
+* DESCRIPTION:
+*	This routine sets preamble of a switch port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*	preamble - preamble length.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_preamble_set(unsigned char lport, unsigned short preamble)
+{
+	unsigned short data;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = mv_switch_mii_write(qd_dev, 3, 26, preamble);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_set_port_reg()\n");
+
+	mvOsDelay(10);
+
+	data = 0xb002 | (lport << 8);
+	rc = mv_switch_mii_write(qd_dev, 2, 26, data);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_set_port_reg()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_preamble_get
+*
+* DESCRIPTION:
+*	This routine gets preamble of a switch port.
+*
+* INPUTS:
+*	lport   - logical switch port ID.
+*
+* OUTPUTS:
+*	preamble - preamble length.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_preamble_get(unsigned char lport, unsigned short *preamble)
+{
+	unsigned int data;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	data = 0xc002 | (lport << 8);
+	rc = mv_switch_mii_write(qd_dev, 2, 26, data);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_mii_read()\n");
+
+	mvOsDelay(10);
+
+	rc = mv_switch_mii_read(qd_dev, 3, 26, &data);
+	SW_IF_ERROR_STR(rc, "failed to call mv_switch_mii_read()\n");
+
+	*preamble = data;
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_atu_next_entry_get
+*
+* DESCRIPTION:
+*	This function get next FDB entry.
+*
+* INPUTS:
+*	atu_entry - ATU entry
+*
+* OUTPUTS:
+*	atu_entry - ATU entry
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_atu_next_entry_get(GT_ATU_ENTRY *atu_entry)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gfdbGetAtuEntryNext(qd_dev, atu_entry);
+
+	if (rc == GT_OK)
+		return MV_OK;
+	else
+		return MV_FAIL;
+}
+
+/*******************************************************************************
+* mv_switch_vtu_flush
+*
+* DESCRIPTION:
+*	Flush VTU on the Switch
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_vtu_flush(void)
+{
+	unsigned int lport;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gvtuFlush(qd_dev);
+	SW_IF_ERROR_STR(rc, "failed to call gvtuFlush()\n");
+
+	memset(sw_vlan_tbl, 0, sizeof(sw_vlan_tbl));
+
+	for (lport = 0; lport < qd_dev->numOfPorts; lport++)
+		memset(&(sw_port_tbl[lport].vlan_blong), 0, sizeof(sw_port_tbl[lport].vlan_blong));
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_atu_flush
+*
+* DESCRIPTION:
+*	Flush ATU on the Switch
+*
+* INPUTS:
+*	flush_cmd - flush command
+*	db_num    - ATU DB Num, only 0 should be used, since there is only one ATU DB right now.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_atu_flush(GT_FLUSH_CMD flush_cmd, unsigned short db_num)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gfdbFlushInDB(qd_dev, flush_cmd, db_num);
+	SW_IF_ERROR_STR(rc, "failed to call gfdbFlushInDB()\n");
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_link_set
+*
+* DESCRIPTION:
+*       This routine will force given switch port to be linked.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable port force link.
+*	value  - force link up or down
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_link_set(unsigned int lport, GT_BOOL enable, GT_BOOL value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsSetForcedLink(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetForcedLink()\n");
+
+	rc = gpcsSetLinkValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetLinkValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_link_get
+*
+* DESCRIPTION:
+*       This routine gets the force link state of given switch port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable port force link.
+*	value  - force link up or down
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_link_get(unsigned int lport, GT_BOOL *enable, GT_BOOL *value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsGetForcedLink(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedLink()\n");
+
+	rc = gpcsGetLinkValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetLinkValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_fc_set
+*
+* DESCRIPTION:
+*	This routine will set forced flow control state and value.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable forced flow control.
+*	value  - force flow control value, enable flow control or disable it.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_fc_set(unsigned int lport, GT_BOOL enable, GT_BOOL value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsSetForcedFC(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetForcedFC()\n");
+
+	rc = gpcsSetFCValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetFCValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_fc_get
+*
+* DESCRIPTION:
+*	This routine will get forced flow control state and value.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable forced flow control.
+*	value  - force flow control value, enable flow control or disable it.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_fc_get(unsigned int lport, GT_BOOL *enable, GT_BOOL *value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsGetForcedFC(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedFC()\n");
+
+	rc = gpcsGetFCValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetFCValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_speed_set
+*
+* DESCRIPTION:
+*       This routine will force given switch port to work at specific speed.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable port force speed.
+*	mode   - speed mode.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_speed_set(unsigned int lport, GT_BOOL enable, unsigned int mode)
+{
+	GT_PORT_FORCED_SPEED_MODE l_mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	if (GT_FALSE == enable)
+		l_mode = PORT_DO_NOT_FORCE_SPEED;
+	else
+		l_mode = (GT_PORT_FORCED_SPEED_MODE)mode;
+
+	rc = gpcsSetForceSpeed(qd_dev, lport, l_mode);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetForceSpeed()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_speed_get
+*
+* DESCRIPTION:
+*       This routine gets the force speed state of given switch port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable port force speed.
+*	mode   - speed mode.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_speed_get(unsigned int lport, GT_BOOL *enable, unsigned int *mode)
+{
+	GT_PORT_FORCED_SPEED_MODE l_mode;
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsGetForceSpeed(qd_dev, lport, &l_mode);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForceSpeed()\n");
+
+	if (PORT_DO_NOT_FORCE_SPEED == l_mode) {
+		*enable = GT_FALSE;
+		*mode   = PORT_FORCE_SPEED_1000_MBPS; /* do not have mean in case the force is disabled */
+	} else {
+		*enable = GT_TRUE;
+		*mode   = (unsigned int)l_mode;
+	}
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_duplex_set
+*
+* DESCRIPTION:
+*       This routine will force given switch port w/ duplex mode.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	enable - enable/disable port force duplex.
+*	value  - half or full duplex mode
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_duplex_set(unsigned int lport, GT_BOOL enable, GT_BOOL value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsSetForcedDpx(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetForcedDpx()\n");
+
+	rc = gpcsSetDpxValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetDpxValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_force_duplex_get
+*
+* DESCRIPTION:
+*       This routine gets the force duplex state of given switch port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	enable - enable/disable port force duplex.
+*	value  - half or full duplex mode
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_force_duplex_get(unsigned int lport, GT_BOOL *enable, GT_BOOL *value)
+{
+	GT_STATUS rc = GT_OK;
+
+	MV_IF_NULL_RET_STR(qd_dev, MV_FAIL, "switch dev qd_dev has not been init!\n");
+
+	rc = gpcsGetForcedDpx(qd_dev, lport, enable);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedDpx()\n");
+
+	rc = gpcsGetDpxValue(qd_dev, lport, value);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetDpxValue()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_num_get
+*
+* DESCRIPTION:
+*	This routine will get total switch port number.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+unsigned int mv_switch_port_num_get(void)
+{
+	MV_IF_NULL_RET_STR(qd_dev, 0, "switch dev qd_dev has not been init!\n");
+
+	return qd_dev->numOfPorts;
+}
+
+/*******************************************************************************
+* mv_switch_qd_dev_get
+*
+* DESCRIPTION:
+*	This routine gets QA dev.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	QA dev.
+*******************************************************************************/
+GT_QD_DEV *mv_switch_qd_dev_get(void)
+{
+	return qd_dev;
+}
+
+/*******************************************************************************
+* mv_switch_vtu_shadow_dump
+*
+* DESCRIPTION:
+*	This routine dumps the VTU shadow.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*
+*******************************************************************************/
+int mv_switch_vtu_shadow_dump(void)
+{
+	unsigned int vid;
+	unsigned int num = 0;
+	unsigned int port_idx;
+	GT_VTU_ENTRY *vtu_entry;
+	pr_err("switch VTU shadow\n\n");
+
+	for (vid = 0; vid < MV_SWITCH_MAX_VLAN_NUM; vid++) {
+		if (sw_vlan_tbl[vid].port_bm) {
+			vtu_entry = &sw_vlan_tbl[vid].vtu_entry;
+			pr_err("DBNum:%i, VID:%i port_bm:0x%02x,\n",
+				vtu_entry->DBNum, vtu_entry->vid, sw_vlan_tbl[vid].port_bm);
+			pr_err("Tag Mode: ");
+			for (port_idx = 0; port_idx < MV_SWITCH_MAX_PORT_NUM; port_idx++)
+				pr_err("port(%d):%d; ", port_idx, sw_vlan_tbl[vid].egr_mode[port_idx]);
+			pr_err("\n");
+
+			pr_err("vidPriOverride(%d), vidPriority(%d), sid(%d), vidPolicy(%d), useVIDFPri(%d), vidFPri(%d), useVIDQPri(%d), vidQPri(%d), vidNRateLimit(%d)\n",
+				vtu_entry->vidPriOverride,
+				vtu_entry->vidPriority,
+				vtu_entry->sid,
+				vtu_entry->vidPolicy,
+				vtu_entry->vidExInfo.useVIDFPri,
+				vtu_entry->vidExInfo.vidFPri,
+				vtu_entry->vidExInfo.useVIDQPri,
+				vtu_entry->vidExInfo.vidQPri,
+				vtu_entry->vidExInfo.vidNRateLimit);
+			num++;
+		}
+	}
+	pr_err("\nTag mode 0: egress unmodified, 1:port not in VLAN, 2:egress untagged, 3:egress tagged\n");
+	pr_err("Total switch VLAN number:%d\n", num);
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_vlan_tunnel_set
+*
+* DESCRIPTION:
+*	This routine set VLAN tunnel mode of switch port.
+*
+* INPUTS:
+*	lport  - switch port
+*       mode   - vlan tunnel mode, enable or disable
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*
+*******************************************************************************/
+int mv_switch_vlan_tunnel_set(unsigned int lport, GT_BOOL mode)
+{
+	GT_STATUS rc = GT_OK;
+
+	/* check qd_dev init or not */
+	if (qd_dev == NULL) {
+		rc = MV_ERROR;
+		SW_IF_ERROR_STR(rc, "qd_dev not initialized, call mv_switch_load() first\n");
+	}
+
+	rc = gprtSetVlanTunnel(qd_dev, lport, mode);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedDpx()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_state_get
+*
+* DESCRIPTION:
+*       This routine gets the forwarding state of given switch port.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*
+* OUTPUTS:
+*	state  -switch port forwarding state
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_state_get(unsigned int lport, enum sw_port_state_t *state)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gstpGetPortState(qd_dev, lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsGetForcedLink()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_port_state_set
+*
+* DESCRIPTION:
+*	This routine will set forwarding state.
+*
+* INPUTS:
+*	lport  - logical switch PHY port ID.
+*	state  -switch port forwarding state
+*
+* OUTPUTS:
+*	None.
+*
+* RETURNS:
+*	On success return MV_OK.
+*	On error different types are returned according to the case.
+*******************************************************************************/
+int mv_switch_port_state_set(unsigned int lport, enum sw_port_state_t state)
+{
+	GT_STATUS rc = GT_OK;
+
+	rc = gstpSetPortState(qd_dev, lport, state);
+	SW_IF_ERROR_STR(rc, "failed to call gpcsSetForcedFC()\n");
+
+	return MV_OK;
+}
+
+/*******************************************************************************
+* mv_switch_cpu_port_get
+*
+* DESCRIPTION:
+*	This routine get cpu port of swicth.
+*
+* INPUTS:
+*	None.
+*
+* OUTPUTS:
+*	cpu_port - swicth CPU port configured.
+*
+* RETURNS:
+*
+*******************************************************************************/
+int mv_switch_cpu_port_get(unsigned int *cpu_port)
+{
+	GT_STATUS rc = GT_OK;
+
+	if (cpu_port == NULL)
+		return MV_BAD_VALUE;
+
+	/* check qd_dev init or not */
+	if (qd_dev == NULL) {
+		rc = MV_ERROR;
+		SW_IF_ERROR_STR(rc, "qd_dev not initialized, call mv_switch_load() first\n");
+	}
+
+	*cpu_port = qd_dev->cpuPortNum;
+
+	return MV_OK;
+}
+
+static int mv_switch_probe(struct platform_device *pdev)
+{
+	struct mv_switch_pdata *plat_data = (struct mv_switch_pdata *)pdev->dev.platform_data;
+	/* load switch driver, force link on cpu port */
+	mv_switch_load(plat_data);
+
+	/* default switch init, disable all ports */
+	mv_switch_init(plat_data);
+
+	return MV_OK;
+}
+
+static int mv_switch_remove(struct platform_device *pdev)
+{
+	printk(KERN_INFO "Removing Marvell Switch Driver\n");
+	/* unload */
+
+	return MV_OK;
+}
+
+static void mv_switch_shutdown(struct platform_device *pdev)
+{
+	printk(KERN_INFO "Shutting Down Marvell Switch Driver\n");
+}
+static const struct mv_mux_switch_ops switch_ops =  {
+
+	/* update ops for mux */
+	.promisc_set = mv_switch_promisc_set,
+	.jumbo_mode_set = mv_switch_jumbo_mode_set,
+	.group_disable = mv_switch_group_disable,
+	.group_enable = mv_switch_group_enable,
+	.link_status_get = mv_switch_link_status_get,
+	.all_mcast_del = mv_switch_all_multicasts_del,
+	.mac_addr_set = mv_switch_mac_addr_set,
+	.group_cookie_set = mv_switch_group_cookie_set,
+	.tag_get = mv_switch_tag_get,
+	.preset_init = mv_switch_preset_init,
+	.interrupt_unmask = mv_switch_interrupt_unmask,
+};
+
+static struct platform_driver mv_switch_driver = {
+	.probe = mv_switch_probe,
+	.remove = mv_switch_remove,
+	.shutdown = mv_switch_shutdown,
+#ifdef CONFIG_CPU_IDLE
+	/* TBD */
+#endif /* CONFIG_CPU_IDLE */
+	.driver = {
+		.name = MV_SWITCH_SOHO_NAME,
+	},
+};
+
+static int __init mv_switch_init_module(void)
+{
+	return platform_driver_register(&mv_switch_driver);
+}
+module_init(mv_switch_init_module);
+
+static void __exit mv_switch_cleanup_module(void)
+{
+	platform_driver_unregister(&mv_switch_driver);
+}
+module_exit(mv_switch_cleanup_module);
+
+
+MODULE_DESCRIPTION("Marvell Internal Switch Driver - www.marvell.com");
+MODULE_AUTHOR("Dmitri Epshtein <dima@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/switch/mv_switch.h b/drivers/net/ethernet/mvebu_net/switch/mv_switch.h
new file mode 100644
index 000000000000..98bdf369d573
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/mv_switch.h
@@ -0,0 +1,272 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#ifndef __mv_switch_h__
+#define __mv_switch_h__
+
+#include "mv802_3.h"
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+#include "msApiTypes.h"
+#include "msApiDefs.h"
+#endif
+#include "mv_mux_netdev.h"
+
+#define MV_SWITCH_DB_NUM			16
+
+/*TPM start*/
+#define MV_SWITCH_PORT_NOT_BELONG		(0)	/* VLAN does not belong to port		*/
+#define MV_SWITCH_PORT_BELONG			(1)	/* VLAN belong to port			*/
+#define MV_SWITCH_MAX_VLAN_NUM			(4096)	/* Maximum switch VLAN number		*/
+#define MV_SWITCH_MAX_PORT_NUM			(7)	/* Maximum switch port number		*/
+#define MV_SWITCH_MAX_QUEUE_NUM			(4)	/* Maximum switch queue number per port	*/
+#define MV_SWITCH_CPU_PORT_NUM			(6)	/* Switch port connected to CPU		*/
+#define MV_SWITCH_DEFAULT_WEIGHT		(2)	/* Switch default queue weight		*/
+/*TPM end*/
+
+#define MV_SWITCH_PHY_ACCESS			1
+#define MV_SWITCH_PORT_ACCESS			2
+#define MV_SWITCH_GLOBAL_ACCESS			3
+#define MV_SWITCH_GLOBAL2_ACCESS		4
+#define MV_SWITCH_SMI_ACCESS                	5
+
+#define MV_SWITCH_PORT_VLAN_ID(grp, port)  ((grp) + (port) + 1)
+#define MV_SWITCH_GROUP_VLAN_ID(grp)       (((grp) + 1) << 8)
+#define MV_SWITCH_VLAN_TO_GROUP(vid)       ((((vid) & 0xf00) >> 8) - 1)
+
+#define MV_SWITCH_PIRL_BKTTYPR_UNKNOWN_MULTICAST_BIT    1
+#define MV_SWITCH_PIRL_BKTTYPR_BROADCAST_BIT		2
+#define MV_SWITCH_PIRL_BKTTYPR_MULTICAST_BIT		3
+
+#define MV_SWITCH_PIRL_RESOURCE_BROADCAST    1
+#define MV_SWITCH_PIRL_RESOURCE_MULTICAST    2
+
+
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+/*TPM start*/
+#define SW_IF_NULL(ptr) { \
+	if (ptr == NULL) {\
+		pr_err("(error) %s(%d) recvd NULL pointer\n", __func__, __LINE__);\
+		return MV_FAIL;\
+	} \
+}
+#define SW_IF_ERROR_STR(rc, format, ...) { \
+	if (rc) {\
+		pr_err("(error) %s(%d) (rc=%d): "format, __func__, __LINE__, rc, ##__VA_ARGS__);\
+		return MV_FAIL;\
+	} \
+}
+
+/* port mirror direction */
+enum sw_mirror_mode_t {
+	MV_SWITCH_MIRROR_INGRESS,
+	MV_SWITCH_MIRROR_EGRESS,
+	MV_SWITCH_MIRROR_BOTH
+};
+
+
+/* operations requested by switch device from mux device */
+struct mux_device_ops {
+	int	(*update_link)(void *cookie, int link_up);
+};
+
+/* port mirror direction */
+enum sw_mac_addr_type_t {
+	MV_SWITCH_ALL_MAC_ADDR,
+	MV_SWITCH_UNICAST_MAC_ADDR,
+	MV_SWITCH_MULTICAST_MAC_ADDR,
+	MV_SWITCH_DYNAMIC_MAC_ADDR,
+	MV_SWITCH_STATIC_MAC_ADDR
+};
+
+/* logical port VLAN information */
+struct sw_port_info_t {
+	GT_DOT1Q_MODE	port_mode;
+	unsigned int	vlan_blong[MV_SWITCH_MAX_VLAN_NUM];
+};
+
+/* VLAN information */
+struct sw_vlan_info_t {
+	unsigned int	port_bm;				/* bitmap of the ports in this VLAN */
+	unsigned char	egr_mode[MV_SWITCH_MAX_PORT_NUM];	/* egress mode of each port         */
+	GT_VTU_ENTRY	vtu_entry;				/* Add this member to record HW VT info to SW table */
+};
+
+enum sw_port_state_t {
+	MV_SWITCH_PORT_DISABLE = 0,
+	MV_SWITCH_BLOCKING,
+	MV_SWITCH_LEARNING,
+	MV_SWITCH_FORWARDING
+};
+
+/*TPM end*/
+#endif
+/*unsigned int	mv_switch_link_detection_init(struct mv_switch_pdata *plat_data);*/
+void mv_switch_interrupt_mask(void);
+void mv_switch_interrupt_unmask(void);
+void mv_switch_interrupt_clear(void);
+
+int     mv_switch_unload(unsigned int switch_ports_mask);
+void    mv_switch_link_update_event(MV_U32 port_mask, int force_link_check);
+int     mv_switch_jumbo_mode_set(int max_size);
+int     mv_switch_tos_get(unsigned char tos);
+int     mv_switch_tos_set(unsigned char tos, int queue);
+int     mv_switch_port_based_vlan_set(unsigned int ports_mask, int set_cpu_port);
+int     mv_switch_vlan_in_vtu_set(unsigned short vlan_id, unsigned short db_num, unsigned int ports_mask);
+int     mv_switch_atu_db_flush(int db_num);
+int     mv_switch_vlan_set(u16 vlan_grp_id, u16 port_map);
+int mv_switch_promisc_set(int db, u8 promisc_on);
+
+int     mv_switch_reg_read(int port, int reg, int type, MV_U16 *value);
+int     mv_switch_reg_write(int port, int reg, int type, MV_U16 value);
+
+#ifdef CONFIG_MV_SW_PTP
+int     mv_switch_ptp_reg_read(int port, int reg, MV_U16 *value);
+int     mv_switch_ptp_reg_write(int port, int reg, MV_U16 value);
+#endif
+
+void    mv_switch_stats_print(void);
+void    mv_switch_status_print(void);
+
+int     mv_switch_all_multicasts_del(int db_num);
+
+int     mv_switch_port_add(int switch_port, u16 vlan_grp_id);
+int     mv_switch_port_del(int switch_port);
+
+int		mv_switch_default_config_get(MV_TAG_TYPE *tag_mode,
+						MV_SWITCH_PRESET_TYPE *preset, int *vid, int *gbe_port);
+int		mv_switch_preset_init(MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid);
+bool		mv_switch_tag_get(int db, MV_TAG_TYPE tag_mode, MV_SWITCH_PRESET_TYPE preset, int vid, MV_MUX_TAG *tag);
+unsigned int	mv_switch_group_map_get(void);
+int		mv_switch_group_restart_autoneg(int db);
+int		mv_switch_group_enable(int db);
+int		mv_switch_group_disable(int db);
+int		mv_switch_link_status_get(int db);
+int		mv_switch_group_cookie_set(int db, void *cookie);
+int		mv_switch_mac_update(int db, unsigned char *old_mac, unsigned char *new_mac);
+int		mv_switch_mac_addr_set(int db, unsigned char *mac_addr, unsigned char op);
+int		mv_switch_mux_ops_set(const struct mv_switch_mux_ops *mux_ops_ptr);
+
+#ifdef CONFIG_MV_INCLUDE_SWITCH
+/*TPM start*/
+int mv_switch_mac_update(int db, unsigned char *old_mac, unsigned char *new_mac);
+int mv_switch_port_discard_tag_set(unsigned int lport, GT_BOOL mode);
+int mv_switch_port_discard_tag_get(unsigned int lport, GT_BOOL *mode);
+int mv_switch_port_discard_untag_set(unsigned int lport, GT_BOOL mode);
+int mv_switch_port_discard_untag_get(unsigned int lport, GT_BOOL *mode);
+int mv_switch_port_def_vid_set(unsigned int lport, unsigned short vid);
+int mv_switch_port_def_vid_get(unsigned int lport, unsigned short *vid);
+int mv_switch_port_def_pri_set(unsigned int lport, unsigned char pri);
+int mv_switch_port_def_pri_get(unsigned int lport, unsigned char *pri);
+int mv_switch_port_vid_add(unsigned int lport, unsigned short vid, unsigned char egr_mode, bool belong);
+int mv_switch_port_vid_del(unsigned int lport, unsigned short vid);
+int mv_switch_vid_get(unsigned int vid, GT_VTU_ENTRY *vtu_entry, unsigned int *found);
+int mv_switch_port_vid_egress_mode_set(unsigned int lport, unsigned short vid, unsigned char egr_mode);
+int mv_switch_unknown_unicast_flood_set(unsigned char lport, GT_BOOL enable);
+int mv_switch_unknown_unicast_flood_get(unsigned char lport, GT_BOOL *enable);
+int mv_switch_unknown_multicast_flood_set(unsigned char lport, GT_BOOL enable);
+int mv_switch_unknown_multicast_flood_get(unsigned char lport, GT_BOOL *enable);
+int mv_switch_broadcast_flood_set(GT_BOOL enable);
+int mv_switch_broadcast_flood_get(GT_BOOL *enable);
+int mv_switch_port_count3_get(unsigned int lport, GT_STATS_COUNTER_SET3 *count);
+int mv_switch_port_drop_count_get(unsigned int lport, GT_PORT_STAT2 *count);
+int mv_switch_port_count_clear(unsigned int lport);
+int mv_switch_count_clear(void);
+int mv_switch_ingr_limit_mode_set(unsigned int lport, GT_RATE_LIMIT_MODE mode);
+int mv_switch_ingr_limit_mode_get(unsigned int lport, GT_RATE_LIMIT_MODE *mode);
+int mv_switch_ingr_police_rate_set(unsigned int	lport,
+					GT_PIRL2_COUNT_MODE	count_mode,
+					unsigned int		cir,
+					GT_U32		bktTypeMask);
+int mv_switch_ingr_police_rate_get(unsigned int		lport,
+				   GT_PIRL2_COUNT_MODE	*count_mode,
+				   unsigned int		*cir);
+int mv_switch_egr_rate_limit_set(unsigned int lport, GT_PIRL_ELIMIT_MODE mode, unsigned int rate);
+int mv_switch_egr_rate_limit_get(unsigned int lport, GT_PIRL_ELIMIT_MODE *mode, unsigned int *rate);
+int mv_switch_ingr_broadcast_rate_set(unsigned int lport, GT_PIRL2_COUNT_MODE count_mode, unsigned int cir);
+int mv_switch_ingr_broadcast_rate_get(unsigned int lport, GT_PIRL2_COUNT_MODE *count_mode, unsigned int *cir);
+int mv_switch_ingr_multicast_rate_set(unsigned int lport, GT_PIRL2_COUNT_MODE count_mode, unsigned int cir);
+int mv_switch_ingr_multicast_rate_get(unsigned int lport, GT_PIRL2_COUNT_MODE *count_mode, unsigned int *cir);
+int mv_switch_port_mirror_set(unsigned int sport, enum sw_mirror_mode_t mode, GT_BOOL enable, unsigned int dport);
+int mv_switch_port_mirror_get(unsigned int sport, enum sw_mirror_mode_t mode, GT_BOOL *enable, unsigned int *dport);
+int mv_switch_age_time_set(unsigned int time);
+int mv_switch_age_time_get(unsigned int *time);
+int mv_switch_mac_learn_disable_set(unsigned int lport, GT_BOOL enable);
+int mv_switch_mac_learn_disable_get(unsigned int lport, GT_BOOL *enable);
+int mv_switch_learn2all_enable_set(GT_BOOL enable);
+int mv_switch_learn2all_enable_get(GT_BOOL *enable);
+int mv_switch_mac_limit_set(unsigned int lport, unsigned int mac_num);
+int mv_switch_mac_limit_get(unsigned int lport, unsigned int *mac_num);
+int mv_switch_mac_addr_add(unsigned int port_bm, unsigned char mac_addr[6], unsigned int mode);
+int mv_switch_mac_addr_del(unsigned int lport, unsigned char mac_addr[6]);
+int mv_switch_port_qos_mode_set(unsigned int lport, GT_PORT_SCHED_MODE mode);
+int mv_switch_port_qos_mode_get(unsigned int lport, GT_PORT_SCHED_MODE *mode);
+int mv_switch_queue_weight_set(unsigned int lport, unsigned char queue, unsigned char weight);
+int mv_switch_queue_weight_get(unsigned int lport, unsigned char queue, unsigned char *weight);
+int mv_switch_mtu_set(unsigned int mtu);
+int mv_switch_mtu_get(unsigned int *mtu);
+int mv_switch_link_state_get(unsigned int lport, GT_BOOL *state);
+int mv_switch_duplex_state_get(unsigned int lport, GT_BOOL *state);
+int mv_switch_speed_state_get(unsigned int lport, GT_PORT_SPEED_MODE *speed);
+int mv_switch_port_vlan_filter_set(unsigned int lport, unsigned char filter);
+int mv_switch_port_vlan_filter_get(unsigned int lport, unsigned char *filter);
+int mv_switch_port_vlan_mode_set(unsigned int lport, GT_DOT1Q_MODE mode);
+int mv_switch_port_vlan_mode_get(unsigned int lport, GT_DOT1Q_MODE *mode);
+int mv_switch_port_mac_filter_mode_set(unsigned int lport, GT_SA_FILTERING mode);
+int mv_switch_port_mac_filter_mode_get(unsigned int lport, GT_SA_FILTERING *mode);
+int mv_switch_port_mac_filter_entry_add(unsigned int lport, unsigned char *mac,
+	unsigned short vlan, GT_SA_FILTERING mode);
+int mv_switch_port_mac_filter_entry_del(unsigned int lport, unsigned char *mac,
+	unsigned short	vlan, GT_SA_FILTERING mode);
+int mv_switch_port_vlan_set(unsigned int lport, GT_LPORT mem_port[], unsigned int mem_num);
+int mv_switch_port_vlan_get(unsigned int lport, GT_LPORT mem_port[], unsigned int *mem_num);
+int mv_switch_mh_mode_set(unsigned char lport, GT_BOOL enable);
+int mv_switch_mh_mode_get(unsigned char lport, GT_BOOL *enable);
+int mv_switch_frame_mode_set(unsigned char lport, GT_FRAME_MODE mode);
+int mv_switch_frame_mode_get(unsigned char lport, GT_FRAME_MODE *mode);
+int mv_switch_etype_set(unsigned char lport, unsigned short etype);
+int mv_switch_etype_get(unsigned char lport, unsigned short *etype);
+int mv_switch_port_preamble_set(unsigned char lport, unsigned short preamble);
+int mv_switch_port_preamble_get(unsigned char lport, unsigned short *preamble);
+int mv_switch_port_force_speed_set(unsigned int lport, GT_BOOL enable, unsigned int mode);
+int mv_switch_port_force_speed_get(unsigned int lport, GT_BOOL *enable, unsigned int *mode);
+int mv_switch_port_force_duplex_set(unsigned int lport, GT_BOOL enable, GT_BOOL value);
+int mv_switch_port_force_duplex_get(unsigned int lport, GT_BOOL *enable, GT_BOOL *value);
+int mv_switch_atu_next_entry_get(GT_ATU_ENTRY *atu_entry);
+int mv_switch_vtu_flush(void);
+int mv_switch_atu_flush(GT_FLUSH_CMD flush_cmd, unsigned short db_num);
+unsigned int mv_switch_port_num_get(void);
+GT_QD_DEV *mv_switch_qd_dev_get(void);
+int mv_switch_vtu_shadow_dump(void);
+int mv_switch_vlan_tunnel_set(unsigned int lport, GT_BOOL mode);
+int mv_switch_port_force_link_set(unsigned int lport, GT_BOOL enable, GT_BOOL value);
+int mv_switch_port_force_link_get(unsigned int lport, GT_BOOL *enable, GT_BOOL *value);
+int mv_switch_port_state_set(unsigned int lport, enum sw_port_state_t state);
+int mv_switch_port_state_get(unsigned int lport, enum sw_port_state_t *state);
+int mv_switch_cpu_port_get(unsigned int *cpu_port);
+/*TPM end*/
+#endif
+#endif /* __mv_switch_h__ */
diff --git a/drivers/net/ethernet/mvebu_net/switch/mv_switch_sysfs.c b/drivers/net/ethernet/mvebu_net/switch/mv_switch_sysfs.c
new file mode 100644
index 000000000000..f6912ef63630
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/mv_switch_sysfs.c
@@ -0,0 +1,192 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+*******************************************************************************/
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+
+#include "mv802_3.h"
+#include "mv_switch.h"
+
+
+static ssize_t mv_switch_help(char *buf)
+{
+	int off = 0;
+
+	off += sprintf(buf+off, "cat help                            - show this help\n");
+	off += sprintf(buf+off, "cat stats                           - show statistics for switch all ports info\n");
+	off += sprintf(buf+off, "cat status                          - show switch status\n");
+	off += sprintf(buf+off, "echo p grp        > port_add        - map switch port to a network device\n");
+	off += sprintf(buf+off, "echo p            > port_del        - unmap switch port from a network device\n");
+	off += sprintf(buf+off, "echo p r t   > reg_r                - read switch register.  t: 1-phy, 2-port, 3-global, 4-global2, 5-smi\n");
+	off += sprintf(buf+off, "echo p r t v > reg_w                - write switch register. t: 1-phy, 2-port, 3-global, 4-global2, 5-smi\n");
+#ifdef CONFIG_MV_SW_PTP
+	off += sprintf(buf+off, "echo p r t   > ptp_reg_r            - read ptp register.  p: 15-PTP Global, 14-TAI Global, t: not used\n");
+	off += sprintf(buf+off, "echo p r t v > ptp_reg_w            - write ptp register. p: 15-PTP Global, 14-TAI Global, t: not used\n");
+#endif
+	return off;
+}
+
+static ssize_t mv_switch_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	const char *name = attr->attr.name;
+	int off = 0;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!strcmp(name, "stats"))
+		mv_switch_stats_print();
+	else if (!strcmp(name, "status"))
+		mv_switch_status_print();
+	else
+		off = mv_switch_help(buf);
+
+	return off;
+}
+
+static ssize_t mv_switch_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	unsigned long   flags;
+	int             err, port, reg, type;
+	unsigned int    v;
+	MV_U16          val;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read arguments */
+	err = port = reg = type = val = 0;
+	sscanf(buf, "%d %d %d %x", &port, &reg, &type, &v);
+
+	local_irq_save(flags);
+	if (!strcmp(name, "reg_r")) {
+		err = mv_switch_reg_read(port, reg, type, &val);
+	} else if (!strcmp(name, "reg_w")) {
+		val = (MV_U16)v;
+		err = mv_switch_reg_write(port, reg, type, v);
+#ifdef CONFIG_MV_SW_PTP
+	} else if (!strcmp(name, "ptp_reg_r")) {
+		err = mv_switch_ptp_reg_read(port, reg, &val);
+	} else if (!strcmp(name, "ptp_reg_w")) {
+		val = (MV_U16)v;
+		err = mv_switch_ptp_reg_write(port, reg, val);
+#endif
+	}
+	printk(KERN_ERR "switch register access: type=%d, port=%d, reg=%d", type, port, reg);
+
+	if (err)
+		printk(KERN_ERR " - FAILED, err=%d\n", err);
+	else
+		printk(KERN_ERR " - SUCCESS, val=0x%04x\n", val);
+
+	local_irq_restore(flags);
+
+	return err ? -EINVAL : len;
+}
+
+static ssize_t mv_switch_netdev_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)
+{
+	const char      *name = attr->attr.name;
+	int             err = 0, port = 0, group;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	/* Read arguments */
+	sscanf(buf, "%d %d", &port, &group);
+
+
+	if (!strcmp(name, "port_add"))
+		err = mv_switch_port_add(port, group);
+	else if (!strcmp(name, "port_del"))
+		err = mv_switch_port_del(port);
+
+
+	if (err)
+		printk(KERN_ERR " - FAILED, err=%d\n", err);
+	else
+		printk(KERN_ERR " - SUCCESS\n");
+
+	return err ? -EINVAL : len;
+}
+
+static DEVICE_ATTR(reg_r,       S_IWUSR, mv_switch_show, mv_switch_store);
+static DEVICE_ATTR(reg_w,       S_IWUSR, mv_switch_show, mv_switch_store);
+#ifdef CONFIG_MV_SW_PTP
+static DEVICE_ATTR(ptp_reg_r,   S_IWUSR, mv_switch_show, mv_switch_store);
+static DEVICE_ATTR(ptp_reg_w,   S_IWUSR, mv_switch_show, mv_switch_store);
+#endif
+static DEVICE_ATTR(status,      S_IRUSR, mv_switch_show, mv_switch_store);
+static DEVICE_ATTR(stats,       S_IRUSR, mv_switch_show, mv_switch_store);
+static DEVICE_ATTR(help,        S_IRUSR, mv_switch_show, mv_switch_store);
+static DEVICE_ATTR(port_add,    S_IWUSR, mv_switch_show, mv_switch_netdev_store);
+static DEVICE_ATTR(port_del,    S_IWUSR, mv_switch_show, mv_switch_netdev_store);
+
+static struct attribute *mv_switch_attrs[] = {
+	&dev_attr_reg_r.attr,
+	&dev_attr_reg_w.attr,
+#ifdef CONFIG_MV_SW_PTP
+	&dev_attr_ptp_reg_r.attr,
+	&dev_attr_ptp_reg_w.attr,
+#endif
+	&dev_attr_status.attr,
+	&dev_attr_stats.attr,
+	&dev_attr_help.attr,
+	&dev_attr_port_add.attr,
+	&dev_attr_port_del.attr,
+	NULL
+};
+
+static struct attribute_group mv_switch_group = {
+	.name = "mv_switch",
+	.attrs = mv_switch_attrs,
+};
+
+int __devinit mv_switch_sysfs_init(void)
+{
+	int err;
+	struct device *pd;
+
+	pd = &platform_bus;
+	err = sysfs_create_group(&pd->kobj, &mv_switch_group);
+	if (err)
+		pr_err("Init sysfs group %s failed %d\n", mv_switch_group.name, err);
+
+	return err;
+}
+
+module_init(mv_switch_sysfs_init);
+
+MODULE_AUTHOR("Dima Epshtein");
+MODULE_DESCRIPTION("sysfs for Marvell switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/.gitignore b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/.gitignore
new file mode 100644
index 000000000000..1d5b2ead4159
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/.gitignore
@@ -0,0 +1,97 @@
+#
+# NOTE! Don't add files that are generated in specific
+# subdirectories here. Add them in the ".gitignore" file
+# in that subdirectory instead.
+#
+# NOTE! Please use 'git ls-files -i --exclude-standard'
+# command after changing this file, to see if there are
+# any tracked files which get ignored after the change.
+#
+# Normal rules
+#
+.*
+*.o
+*.o.*
+*.a
+*.s
+*.ko
+*.so
+*.so.dbg
+*.mod.c
+*.i
+*.lst
+*.symtypes
+*.order
+modules.builtin
+*.elf
+*.bin
+*.gz
+*.bz2
+*.lzma
+*.xz
+*.lzo
+*.patch
+*.gcno
+
+#
+#
+#
+arch/arm/mach-armadaxp/armada_xp_family/
+arch/arm/mach-armada370/armada_370_family/
+arch/arm/mach-armada375/armada_375_family/
+arch/arm/mach-armada380/armada_380_family/
+arch/arm/plat-armada/common/
+arch/arm/plat-armada/mv_hal/
+arch/arm/plat-armada/mv_drivers_lsp/mv_pp2/
+arch/arm/plat-armada/mv_drivers_lsp/mv_neta/
+#
+# Top-level generic files
+#
+/tags
+/TAGS
+/linux
+/vmlinux
+/vmlinuz
+/System.map
+/Module.markers
+/Module.symvers
+
+#
+# Debian directory (make deb-pkg)
+#
+/debian/
+
+#
+# git files that we don't want to ignore even it they are dot-files
+#
+!.gitignore
+!.mailmap
+
+#
+# Generated include files
+#
+include/config
+include/linux/version.h
+include/generated
+arch/*/include/generated
+
+# stgit generated dirs
+patches-*
+
+# quilt's files
+patches
+series
+
+# cscope files
+cscope.*
+ncscope.*
+
+# gnu global files
+GPATH
+GRTAGS
+GSYMS
+GTAGS
+
+*.orig
+*~
+\#*#
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/Copyright.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/Copyright.h
new file mode 100644
index 000000000000..7ee8732c3009
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/Copyright.h
@@ -0,0 +1,60 @@
+
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms. Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+********************************************************************************
+Marvell Commercial License Option
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+********************************************************************************
+Marvell GPL License Option
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED. The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+* Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+* Neither the name of Marvell nor the names of its contributors may be
+used to endorse or promote products derived from this software without
+specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*******************************************************************************/
+
+#ifndef _msCopyright_h
+#define _msCopyright_h
+
+#define MSAPI_COPYRIGHT "Copyright 2000~2013, Marvell International Ltd."
+#define MSAPI_VERSION "3.3"
+
+#endif /* _msCopyright_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtMad.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtMad.h
new file mode 100644
index 000000000000..042514c85b92
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtMad.h
@@ -0,0 +1,202 @@
+#include <Copyright.h>
+/*******************************************************************************
+* gtMad.h
+*
+* DESCRIPTION:
+*       MAD API header file.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+GT_STATUS gvctGetAdvCableDiag_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+);
+GT_STATUS gvctGetAdvExtendedStatus_mad
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT   port,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+);
+#include <Copyright.h>
+
+
+GT_STATUS gprtPhyReset_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port
+);
+GT_STATUS gprtSetPortLoopback_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   enable
+);
+
+GT_STATUS gprtSetPortSpeed_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_SPEED speed
+);
+GT_STATUS gprtPortAutoNegEnable_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+);
+GT_STATUS gprtPortPowerDown_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   state
+);
+GT_STATUS gprtPortRestartAutoNeg_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port
+);
+GT_STATUS gprtSetPortDuplexMode_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   dMode
+);
+GT_STATUS gprtSetPortAutoMode_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_PHY_AUTO_MODE mode
+);
+GT_STATUS gprtSetPause_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_PAUSE_MODE state
+);
+GT_STATUS gprtSetDTEDetect_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+);
+GT_STATUS gprtGetDTEDetectStatus_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+);
+GT_STATUS gprtSetDTEDetectDropWait_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_U16    waitTime
+);
+GT_STATUS gprtGetDTEDetectDropWait_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_U16    *waitTime
+);
+GT_STATUS gprtSetEnergyDetect_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_EDETECT_MODE   mode
+);
+GT_STATUS gprtGetEnergyDetect_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_EDETECT_MODE   *mode
+);
+GT_STATUS gprtSet1000TMasterMode_mad
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_1000T_MASTER_SLAVE   *mode
+);
+GT_STATUS gprtGet1000TMasterMode_mad
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_1000T_MASTER_SLAVE   *mode
+);
+GT_STATUS gprtGetPhyLinkStatus_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *linkStatus
+);
+GT_STATUS gprtSetPktGenEnable_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   en,
+    IN GT_PG     *pktInfo
+);
+GT_STATUS gprtGetPhyReg_mad
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+GT_STATUS gprtSetPhyReg_mad
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            inData
+);
+GT_STATUS gprtGetPagedPhyReg_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32  port,
+    IN    GT_U32  regAddr,
+    IN    GT_U32  page,
+    OUT GT_U16* data
+);
+GT_STATUS gprtSetPagedPhyReg_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32 port,
+    IN    GT_U32 regAddr,
+    IN    GT_U32 page,
+    IN  GT_U16 inData
+);
+GT_STATUS gprtPhyIntEnable_mad
+(
+IN GT_QD_DEV    *dev,
+IN GT_LPORT    port,
+IN GT_U16    intType
+);
+GT_STATUS gprtGetPhyIntStatus_mad
+(
+IN   GT_QD_DEV  *dev,
+IN   GT_LPORT   port,
+OUT  GT_U16*    intType
+);
+GT_STATUS gprtGetPhyIntPortSummary_mad
+(
+IN  GT_QD_DEV  *dev,
+OUT GT_U16     *intPortMask
+);
+GT_STATUS gvctGetCableDiag_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    OUT GT_CABLE_STATUS *cableStatus
+);
+GT_STATUS gvctGet1000BTExtendedStatus_mad
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_LPORT        port,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+);
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtPTP.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtPTP.h
new file mode 100644
index 000000000000..09ca56c55cdc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/gtPTP.h
@@ -0,0 +1,499 @@
+#ifdef CONFIG_AVB_FPGA
+
+/*******************************************************************************
+* gptpGetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine gets interrupt status of PTP logic.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpInt    - PTP Int Status
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpInt
+);
+
+/*******************************************************************************
+* gptpSetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine sets interrupt status of PTP logic.
+*
+* INPUTS:
+*    ptpInt    - PTP Int Status
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32    ptpInt
+);
+
+/*******************************************************************************
+* gptpSetFPGAIntEn
+*
+* DESCRIPTION:
+*       This routine enables PTP interrupt.
+*
+* INPUTS:
+*        ptpInt    - PTP Int Status (1 to enable, 0 to disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        ptpInt
+);
+
+/*******************************************************************************
+* gptpGetClockSource
+*
+* DESCRIPTION:
+*       This routine gets PTP Clock source setup.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_SRC     *clkSrc
+);
+
+/*******************************************************************************
+* gptpSetClockSource
+*
+* DESCRIPTION:
+*       This routine sets PTP Clock source setup.
+*
+* INPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_SRC     clkSrc
+);
+
+/*******************************************************************************
+* gptpGetP9Mode
+*
+* DESCRIPTION:
+*       This routine gets Port 9 Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_P9_MODE     *mode
+);
+
+/*******************************************************************************
+* gptpSetP9Mode
+*
+* DESCRIPTION:
+*       This routine sets Port 9 Mode.
+*
+* INPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_P9_MODE     mode
+);
+
+/*******************************************************************************
+* gptpReset
+*
+* DESCRIPTION:
+*       This routine performs software reset for PTP logic.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpReset
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gptpGetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP Duty Cycle Adjustment is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adjEn    - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *adjEn
+);
+
+/*******************************************************************************
+* gptpSetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PTP Duty Cycle Adjustment.
+*
+* INPUTS:
+*        adjEn    - GT_TRUE to enable, GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        adjEn
+);
+
+/*******************************************************************************
+* gptpGetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine gets clock duty cycle adjustment value.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_ADJUSTMENT    *adj
+);
+
+/*******************************************************************************
+* gptpSetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine sets clock duty cycle adjustment value.
+*
+* INPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_ADJUSTMENT    *adj
+);
+
+/*******************************************************************************
+* gptpGetPLLEn
+*
+* DESCRIPTION:
+*       This routine checks if PLL is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpGetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en,
+    OUT GT_U32        *freqSel
+);
+
+/*******************************************************************************
+* gptpSetPLLEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PLL device.
+*
+* INPUTS:
+*        en        - GT_TRUE to enable, GT_FALSE to disable
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*                  Meaningful only when enabling PLL device
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpSetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en,
+    IN  GT_U32        freqSel
+);
+
+/*******************************************************************************
+* gptpGetDDSReg
+*
+* DESCRIPTION:
+*       This routine gets DDS register data.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*
+* OUTPUTS:
+*    ddsData    - register data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    OUT GT_U32    *ddsData
+);
+
+/*******************************************************************************
+* gptpSetDDSReg
+*
+* DESCRIPTION:
+*       This routine sets DDS register data.
+*    DDS register data written by this API are not affected until gptpUpdateDDSReg API is called.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*    ddsData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    IN  GT_U32    ddsData
+);
+
+/*******************************************************************************
+* gptpUpdateDDSReg
+*
+* DESCRIPTION:
+*       This routine updates DDS register data.
+*    DDS register data written by gptpSetDDSReg are not affected until this API is called.
+*
+* INPUTS:
+*    none
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpUpdateDDSReg
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gptpSetADFReg
+*
+* DESCRIPTION:
+*       This routine sets ADF4156 register data.
+*
+* INPUTS:
+*    adfData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetADFReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    adfData
+);
+
+#endif  /*  CONFIG_AVB_FPGA */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvConfig.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvConfig.h
new file mode 100644
index 000000000000..afbd47010a47
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvConfig.h
@@ -0,0 +1,249 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtDrvConfig.h
+*
+* DESCRIPTION:
+*       Includes driver level configuration and initialization function.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 4 $
+*
+*******************************************************************************/
+
+#ifndef __gtDrvConfigh
+#define __gtDrvConfigh
+
+#include <msApi.h>
+#include <gtVct.h>
+#include <gtDrvSwRegs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+* driverConfig
+*
+* DESCRIPTION:
+*       This function initializes the driver level of the quarterDeck software.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success, or
+*       GT_OUT_OF_CPU_MEM   - if failed to allocate CPU memory,
+*       GT_FAIL             - otherwise.
+*
+* COMMENTS:
+*       1.  This function should perform the following:
+*           -   Initialize the global switch configuration structure.
+*           -   Initialize Mii Interface
+*           -   Set the CPU port into trailer mode (Ingress and Egress).
+*
+*******************************************************************************/
+GT_STATUS driverConfig(IN GT_QD_DEV *dev);
+
+/*******************************************************************************
+* driverEnable
+*
+* DESCRIPTION:
+*       This function enables the switch for full operation, after the driver
+*       Config function was called.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverEnable(IN GT_QD_DEV *dev);
+
+/*******************************************************************************
+* driverIsPhyAttached
+*
+* DESCRIPTION:
+*       This function reads and returns Phy ID (register 3) of Marvell Phy.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       phyId - if Marvell Phy exists
+*        0      - otherwise
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U32 driverIsPhyAttached
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort
+);
+
+/*******************************************************************************
+* driverGetPhyID
+*
+* DESCRIPTION:
+*       This function reads and returns Phy ID (register 3) of Marvell Phy.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       phyId - if Marvell Phy exists
+*        GT_INVALID_PORT      - otherwise
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U32 driverGetPhyID
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort
+);
+
+/*******************************************************************************
+* driverGetSerdesPort
+*
+* DESCRIPTION:
+*       This function converts port to Serdes port
+*
+* INPUTS:
+*       hwPort     - port number where the Phy is connected
+*
+* OUTPUTS:
+*       hwPort     - port number where the Phy is connected
+*
+* RETURNS:
+*       GT_OK     - if success
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverGetSerdesPort(GT_QD_DEV *dev, GT_U8* hwPort);
+
+
+/*******************************************************************************
+* driverPagedAccessStart
+*
+* DESCRIPTION:
+*       This function stores page register and Auto Reg Selection mode if needed.
+*
+* INPUTS:
+*       hwPort     - port number where the Phy is connected
+*        pageType - type of the page registers
+*
+* OUTPUTS:
+*       autoOn    - GT_TRUE if Auto Reg Selection enabled, GT_FALSE otherwise.
+*        pageReg - Page Register Data
+*
+* RETURNS:
+*       GT_OK     - if success
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverPagedAccessStart
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    IN    GT_U8         pageType,
+    OUT    GT_BOOL         *autoOn,
+    OUT    GT_U16         *pageReg
+);
+
+
+/*******************************************************************************
+* driverPagedAccessStop
+*
+* DESCRIPTION:
+*       This function restores page register and Auto Reg Selection mode if needed.
+*
+* INPUTS:
+*       hwPort     - port number where the Phy is connected
+*        pageType - type of the page registers
+*       autoOn     - GT_TRUE if Auto Reg Selection enabled, GT_FALSE otherwise.
+*        pageReg  - Page Register Data
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK     - if success
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverPagedAccessStop
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    IN    GT_U8         pageType,
+    IN    GT_BOOL         autoOn,
+    IN    GT_U16         pageReg
+);
+
+
+/*******************************************************************************
+* driverFindPhyInformation
+*
+* DESCRIPTION:
+*       This function gets information of Phy connected to the given port.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*       phyId    - Phy ID
+*
+* RETURNS:
+*       GT_OK     - if found Marvell Phy,
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverFindPhyInformation
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    OUT    GT_PHY_INFO     *phyInfo
+);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __gtDrvConfigh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvEvents.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvEvents.h
new file mode 100644
index 000000000000..72cb4269e2e3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvEvents.h
@@ -0,0 +1,87 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtDrvEvents.h
+*
+* DESCRIPTION:
+*       This file includes function declarations for QuarterDeck interrupts
+*       configuration and handling.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*
+*******************************************************************************/
+
+#ifndef __gtDrvEventsh
+#define __gtDrvEventsh
+
+#include <msApi.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+* drvEventInit
+*
+* DESCRIPTION:
+*       This function initializes the driver's interrupt handling mechanism.
+*
+* INPUTS:
+*       intVecNum   - The interrupt vector the switch is connected to.
+*       isrFunc     - A pointer to the Interrupt Service Routine to be
+*                     connected to the given interrupt vector.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success,
+*       GT_FAIL - otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS drvEventsInit
+(
+    IN  GT_QD_DEV     *dev,
+    IN GT_U32         intVecNum,
+    IN GT_VOIDFUNCPTR isrFunc
+);
+
+
+
+/*******************************************************************************
+* eventQdSr
+*
+* DESCRIPTION:
+*       QuarterDeck interrupt service routine.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL eventQdSr
+(
+    IN  GT_QD_DEV  *dev,
+    OUT GT_U16*    intCause
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __gtDrvEventsh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvSwRegs.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvSwRegs.h
new file mode 100644
index 000000000000..4f8c5867b1ed
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtDrvSwRegs.h
@@ -0,0 +1,304 @@
+#include <Copyright.h>
+
+/********************************************************************************
+ * * gtDrvSwRegs.h
+ * *
+ * * DESCRIPTION:
+ * *       definitions of the register map of QuarterDeck Device
+ * *
+ * * DEPENDENCIES:
+ * *
+ * * FILE REVISION NUMBER:
+ * *
+ * *******************************************************************************/
+#ifndef __gtDrvSwRegsh
+#define __gtDrvSwRegsh
+
+/* QuarterDeck Per Port Registers */
+#define QD_REG_PORT_STATUS        0x0
+#define QD_REG_PCS_CONTROL        0x1        /* for Sapphire family */
+#define QD_REG_LIMIT_PAUSE_CONTROL        0x2        /* Jamming control register */
+#define QD_REG_SWITCH_ID        0x3
+#define QD_REG_PORT_CONTROL        0x4
+#define QD_REG_PORT_CONTROL1        0x5
+#define QD_REG_PORT_VLAN_MAP        0x6
+#define QD_REG_PVID            0x7
+#define QD_REG_PORT_CONTROL2        0x8    /* for Sapphire family */
+#define QD_REG_INGRESS_RATE_CTRL    0x9    /* for Sapphire family */
+#define QD_REG_EGRESS_RATE_CTRL        0xA    /* for Sapphire family */
+#define QD_REG_RATE_CTRL0        0x9
+#define QD_REG_RATE_CTRL        0xA
+#define QD_REG_PAV            0xB
+#define QD_REG_PORT_ATU_CONTROL        0xC
+#define QD_REG_PRI_OVERRIDE        0xD
+#define QD_REG_POLICY_CONTROL    0xE
+#define QD_REG_PORT_ETH_TYPE    0xF
+#define QD_REG_RX_COUNTER        0x10
+#define QD_REG_TX_COUNTER        0x11
+#define QD_REG_DROPPED_COUNTER    0x12
+
+#define QD_REG_INDISCARD_LO_COUNTER        0x10
+#define QD_REG_INDISCARD_HI_COUNTER        0x11
+#define QD_REG_INFILTERED_COUNTER        0x12
+#define QD_REG_OUTFILTERED_COUNTER        0x13
+
+#define QD_REG_LED_CONTROL        0x16
+
+#define QD_REG_Q_COUNTER        0x1B
+#define QD_REG_RATE_CONTROL        0x0A
+#define QD_REG_PORT_ASSOCIATION        0x0B
+#define QD_REG_IEEE_PRI_REMAP_3_0    0x18    /* for Sapphire family */
+#define QD_REG_IEEE_PRI_REMAP_7_4    0x19    /* for Sapphire family */
+
+#define QD_REG_PROVIDER_TAG        0x1A        /* for Schooner family */
+
+/* QuarterDeck Global Registers */
+#define QD_REG_GLOBAL_STATUS        0x0
+#define QD_REG_MACADDR_01        0x1
+#define QD_REG_MACADDR_23        0x2
+#define QD_REG_MACADDR_45        0x3
+#define QD_REG_GLOBAL_CONTROL        0x4
+#define QD_REG_AGETIME_LA_CONTROL    0xA
+#define QD_REG_GLOBAL_CONTROL2        0x1C    /* for Sapphire, Schooner family */
+#define QD_REG_CORETAG_TYPE        0x19        /* for Ruby family */
+#define QD_REG_IP_MAPPING_TABLE    0x19        /* for Amber family */
+#define QD_REG_MONITOR_CONTROL    0x1A        /* for Ruby family */
+#define QD_REG_MANGEMENT_CONTROL    0x1A    /* for Schooner family */
+#define QD_REG_TOTAL_FREE_COUNTER    0x1B    /* for Schooner family */
+
+/* QuarterDeck Global 2 Registers */
+#define QD_REG_PHYINT_SOURCE    0x0
+#define QD_REG_DEVINT_SOURCE    0x0
+#define QD_REG_DEVINT_MASK        0x1
+#define QD_REG_MGMT_ENABLE_2X    0x2
+#define QD_REG_MGMT_ENABLE        0x3
+#define QD_REG_FLOWCTRL_DELAY    0x4
+#define QD_REG_MANAGEMENT        0x5
+#define QD_REG_ROUTING_TBL        0x6
+#define QD_REG_TRUNK_MASK_TBL    0x7
+#define QD_REG_TRUNK_ROUTING    0x8
+#define QD_REG_INGRESS_RATE_COMMAND    0x9
+#define QD_REG_INGRESS_RATE_DATA    0xA
+#define QD_REG_PVT_ADDR            0xB
+#define QD_REG_PVT_DATA            0xC
+#define QD_REG_SWITCH_MAC        0xD
+#define QD_REG_ATU_STATS        0xE
+#define QD_REG_PRIORITY_OVERRIDE    0xF
+#define QD_REG_EEPROM_COMMAND    0x14
+#define QD_REG_EEPROM_DATA        0x15
+#define QD_REG_PTP_COMMAND        0x16
+#define QD_REG_PTP_DATA            0x17
+#define QD_REG_SMI_PHY_CMD        0x18
+#define QD_REG_SMI_PHY_DATA        0x19
+#define QD_REG_SCRATCH_MISC        0x1A
+#define QD_REG_WD_CONTROL        0x1B
+#define QD_REG_QOS_WEIGHT        0x1C
+#define QD_REG_SDET_POLARITY    0x1D
+
+/* QuarterDeck Global 3 Registers */
+#define QD_REG_TCAM_OPERATION         0x0
+#define QD_REG_TCAM_P0_KEYS_1         0x2
+#define QD_REG_TCAM_P0_KEYS_2         0x3
+#define QD_REG_TCAM_P0_KEYS_3         0x4
+#define QD_REG_TCAM_P0_KEYS_4         0x5
+#define QD_REG_TCAM_P0_MATCH_DATA_1   0x6
+#define QD_REG_TCAM_P0_MATCH_DATA_2   0x7
+#define QD_REG_TCAM_P0_MATCH_DATA_3   0x8
+#define QD_REG_TCAM_P0_MATCH_DATA_4   0x9
+#define QD_REG_TCAM_P0_MATCH_DATA_5   0xa
+#define QD_REG_TCAM_P0_MATCH_DATA_6   0xb
+#define QD_REG_TCAM_P0_MATCH_DATA_7   0xc
+#define QD_REG_TCAM_P0_MATCH_DATA_8   0xd
+#define QD_REG_TCAM_P0_MATCH_DATA_9   0xe
+#define QD_REG_TCAM_P0_MATCH_DATA_10  0xf
+#define QD_REG_TCAM_P0_MATCH_DATA_11  0x10
+#define QD_REG_TCAM_P0_MATCH_DATA_12  0x11
+#define QD_REG_TCAM_P0_MATCH_DATA_13  0x12
+#define QD_REG_TCAM_P0_MATCH_DATA_14  0x13
+#define QD_REG_TCAM_P0_MATCH_DATA_15  0x14
+#define QD_REG_TCAM_P0_MATCH_DATA_16  0x15
+#define QD_REG_TCAM_P0_MATCH_DATA_17  0x16
+#define QD_REG_TCAM_P0_MATCH_DATA_18  0x17
+#define QD_REG_TCAM_P0_MATCH_DATA_19  0x18
+#define QD_REG_TCAM_P0_MATCH_DATA_20  0x19
+#define QD_REG_TCAM_P0_MATCH_DATA_21  0x1a
+#define QD_REG_TCAM_P0_MATCH_DATA_22  0x1b
+
+#define QD_REG_TCAM_P1_MATCH_DATA_23   0x2
+#define QD_REG_TCAM_P1_MATCH_DATA_24   0x3
+#define QD_REG_TCAM_P1_MATCH_DATA_25   0x4
+#define QD_REG_TCAM_P1_MATCH_DATA_26   0x5
+#define QD_REG_TCAM_P1_MATCH_DATA_27   0x6
+#define QD_REG_TCAM_P1_MATCH_DATA_28   0x7
+#define QD_REG_TCAM_P1_MATCH_DATA_29   0x8
+#define QD_REG_TCAM_P1_MATCH_DATA_30   0x9
+#define QD_REG_TCAM_P1_MATCH_DATA_31   0xa
+#define QD_REG_TCAM_P1_MATCH_DATA_32   0xb
+#define QD_REG_TCAM_P1_MATCH_DATA_33   0xc
+#define QD_REG_TCAM_P1_MATCH_DATA_34   0xd
+#define QD_REG_TCAM_P1_MATCH_DATA_35   0xe
+#define QD_REG_TCAM_P1_MATCH_DATA_36   0xf
+#define QD_REG_TCAM_P1_MATCH_DATA_37   0x10
+#define QD_REG_TCAM_P1_MATCH_DATA_38   0x11
+#define QD_REG_TCAM_P1_MATCH_DATA_39   0x12
+#define QD_REG_TCAM_P1_MATCH_DATA_40   0x13
+#define QD_REG_TCAM_P1_MATCH_DATA_41   0x14
+#define QD_REG_TCAM_P1_MATCH_DATA_42   0x15
+#define QD_REG_TCAM_P1_MATCH_DATA_43   0x16
+#define QD_REG_TCAM_P1_MATCH_DATA_44   0x17
+#define QD_REG_TCAM_P1_MATCH_DATA_45   0x18
+#define QD_REG_TCAM_P1_MATCH_DATA_46   0x19
+#define QD_REG_TCAM_P1_MATCH_DATA_47   0x1a
+#define QD_REG_TCAM_P1_MATCH_DATA_48   0x1b
+
+#define QD_REG_TCAM_P2_ACTION_1        0x2
+#define QD_REG_TCAM_P2_ACTION_2        0x3
+#define QD_REG_TCAM_P2_ACTION_3        0x4
+#define QD_REG_TCAM_P2_ACTION_4        0x5
+#define QD_REG_TCAM_P2_DEBUG_PORT      0x1c
+#define QD_REG_TCAM_P2_ALL_HIT         0x1f
+
+
+
+/* Global 1 Registers Definition for STU,VTU,RMON,and ATU Registers */
+#define QD_REG_ATU_FID_REG        0x1
+#define QD_REG_VTU_FID_REG        0x2
+#define QD_REG_STU_SID_REG        0x3
+#define QD_REG_VTU_OPERATION        0x5
+#define QD_REG_VTU_VID_REG        0x6
+#define QD_REG_VTU_DATA1_REG        0x7
+#define QD_REG_VTU_DATA2_REG        0x8
+#define QD_REG_VTU_DATA3_REG        0x9
+#define QD_REG_STATS_OPERATION        0x1D
+#define QD_REG_STATS_COUNTER3_2        0x1E
+#define QD_REG_STATS_COUNTER1_0        0x1F
+
+#define QD_REG_ATU_CONTROL        0xA
+#define QD_REG_ATU_OPERATION        0xB
+#define QD_REG_ATU_DATA_REG        0xC
+#define QD_REG_ATU_MAC_BASE        0xD
+#define QD_REG_IP_PRI_BASE        0x10
+#define QD_REG_IEEE_PRI            0x18
+
+
+/* Definitions for MIB Counter */
+#define GT_STATS_NO_OP            0x0
+#define GT_STATS_FLUSH_ALL        0x1
+#define GT_STATS_FLUSH_PORT        0x2
+#define GT_STATS_READ_COUNTER        0x4
+#define GT_STATS_CAPTURE_PORT        0x5
+#define GT_STATS_CAPTURE_PORT_CLEAR  0x6
+
+#define QD_PHY_CONTROL_REG            0
+#define QD_PHY_AUTONEGO_AD_REG            4
+#define QD_PHY_NEXTPAGE_TX_REG            7
+#define QD_PHY_AUTONEGO_1000AD_REG        9
+#define QD_PHY_SPEC_CONTROL_REG            16
+#define QD_PHY_INT_ENABLE_REG            18
+#define QD_PHY_INT_STATUS_REG            19
+#define QD_PHY_INT_PORT_SUMMARY_REG        20
+
+/* Definitions for VCT registers */
+#define QD_REG_MDI0_VCT_STATUS     16
+#define QD_REG_MDI1_VCT_STATUS     17
+#define QD_REG_MDI2_VCT_STATUS     18
+#define QD_REG_MDI3_VCT_STATUS     19
+#define QD_REG_ADV_VCT_CONTROL_5    23
+#define QD_REG_ADV_VCT_CONTROL_8    20
+#define QD_REG_PAIR_SKEW_STATUS    20
+#define QD_REG_PAIR_SWAP_STATUS    21
+
+/* Bit Definition for QD_PHY_CONTROL_REG */
+#define QD_PHY_RESET            0x8000
+#define QD_PHY_LOOPBACK            0x4000
+#define QD_PHY_SPEED            0x2000
+#define QD_PHY_AUTONEGO            0x1000
+#define QD_PHY_POWER            0x800
+#define QD_PHY_ISOLATE            0x400
+#define QD_PHY_RESTART_AUTONEGO        0x200
+#define QD_PHY_DUPLEX            0x100
+#define QD_PHY_SPEED_MSB        0x40
+
+#define QD_PHY_POWER_BIT            11
+#define QD_PHY_RESTART_AUTONEGO_BIT        9
+
+/* Bit Definition for QD_PHY_AUTONEGO_AD_REG */
+#define QD_PHY_NEXTPAGE            0x8000
+#define QD_PHY_REMOTEFAULT        0x4000
+#define QD_PHY_PAUSE            0x400
+#define QD_PHY_100_FULL            0x100
+#define QD_PHY_100_HALF            0x80
+#define QD_PHY_10_FULL            0x40
+#define QD_PHY_10_HALF            0x20
+
+#define QD_PHY_MODE_AUTO_AUTO    (QD_PHY_100_FULL | QD_PHY_100_HALF | QD_PHY_10_FULL | QD_PHY_10_HALF)
+#define QD_PHY_MODE_100_AUTO    (QD_PHY_100_FULL | QD_PHY_100_HALF)
+#define QD_PHY_MODE_10_AUTO        (QD_PHY_10_FULL | QD_PHY_10_HALF)
+#define QD_PHY_MODE_AUTO_FULL    (QD_PHY_100_FULL | QD_PHY_10_FULL)
+#define QD_PHY_MODE_AUTO_HALF    (QD_PHY_100_HALF | QD_PHY_10_HALF)
+
+#define QD_PHY_MODE_100_FULL    QD_PHY_100_FULL
+#define QD_PHY_MODE_100_HALF    QD_PHY_100_HALF
+#define QD_PHY_MODE_10_FULL        QD_PHY_10_FULL
+#define QD_PHY_MODE_10_HALF        QD_PHY_10_HALF
+
+/* Gigabit Phy related definition */
+#define QD_GIGPHY_1000X_FULL_CAP    0x8
+#define QD_GIGPHY_1000X_HALF_CAP    0x4
+#define QD_GIGPHY_1000T_FULL_CAP    0x2
+#define QD_GIGPHY_1000T_HALF_CAP    0x1
+
+#define QD_GIGPHY_1000X_CAP        (QD_GIGPHY_1000X_FULL_CAP|QD_GIGPHY_1000X_HALF_CAP)
+#define QD_GIGPHY_1000T_CAP        (QD_GIGPHY_1000T_FULL_CAP|QD_GIGPHY_1000T_HALF_CAP)
+
+#define QD_GIGPHY_1000X_FULL        0x20
+#define QD_GIGPHY_1000X_HALF        0x40
+
+#define QD_GIGPHY_1000T_FULL        0x200
+#define QD_GIGPHY_1000T_HALF        0x100
+
+/* Bit definition for QD_PHY_INT_ENABLE_REG */
+#define QD_PHY_INT_SPEED_CHANGED        0x4000
+#define QD_PHY_INT_DUPLEX_CHANGED        0x2000
+#define QD_PHY_INT_PAGE_RECEIVED        0x1000
+#define QD_PHY_INT_AUTO_NEG_COMPLETED        0x800
+#define QD_PHY_INT_LINK_STATUS_CHANGED        0x400
+#define QD_PHY_INT_SYMBOL_ERROR            0x200
+#define QD_PHY_INT_FALSE_CARRIER        0x100
+#define QD_PHY_INT_FIFO_FLOW            0x80
+#define QD_PHY_INT_CROSSOVER_CHANGED        0x40
+#define QD_PHY_INT_POLARITY_CHANGED        0x2
+#define QD_PHY_INT_JABBER            0x1
+
+/*Line loopback register related definition*/
+#define QD_PHY_FE_LINE_LOOPBACK_REG    0x1c
+#define QD_PHY_GE_LINE_LOOPBACK_REG    0x15
+
+/* Bit definition for DEVICE Interrupt */
+#define QD_DEV_INT_WATCHDOG            0x8000
+#define QD_DEV_INT_JAMLIMIT            0x4000
+#define QD_DEV_INT_DUPLEX_MISMATCH    0x2000
+#define QD_DEV_INT_WAKE_EVENT         0x1000
+
+/* Definition for Multi Address Mode */
+#define QD_REG_SMI_COMMAND        0x0
+#define QD_REG_SMI_DATA            0x1
+
+/* Bit definition for QD_REG_SMI_COMMAND */
+#define QD_SMI_BUSY                0x8000
+#define QD_SMI_MODE                0x1000
+#define QD_SMI_MODE_BIT            12
+#define QD_SMI_OP_BIT            10
+#define QD_SMI_OP_SIZE            2
+#define QD_SMI_DEV_ADDR_BIT        5
+#define QD_SMI_DEV_ADDR_SIZE    5
+#define QD_SMI_REG_ADDR_BIT        0
+#define QD_SMI_REG_ADDR_SIZE    5
+
+#define QD_SMI_CLAUSE45            0
+#define QD_SMI_CLAUSE22            1
+
+#define QD_SMI_WRITE            0x01
+#define QD_SMI_READ                0x02
+
+#endif /* __gtDrvSwRegsh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtHwCntl.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtHwCntl.h
new file mode 100644
index 000000000000..39b80f43cbb2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/driver/gtHwCntl.h
@@ -0,0 +1,1083 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtHwCntl.h
+*
+* DESCRIPTION:
+*       Functions declarations for Hw accessing quarterDeck phy, internal and
+*       global registers.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*
+*******************************************************************************/
+
+#ifndef __gtHwCntlh
+#define __gtHwCntlh
+
+#include <msApi.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* This macro is used to calculate the register's SMI   */
+/* device address, according to the baseAddr            */
+/* field in the Switch configuration struct.            */
+extern GT_U8 portToSmiMapping(GT_QD_DEV *dev, GT_U8 portNum, GT_U32 accessType);
+#define CALC_SMI_DEV_ADDR(_dev, _portNum, _accessType)        \
+            portToSmiMapping(_dev, _portNum, _accessType)
+
+/* This macro calculates the mask for partial read /    */
+/* write of register's data.                            */
+#define CALC_MASK(fieldOffset,fieldLen,mask)        \
+            if((fieldLen + fieldOffset) >= 16)      \
+                mask = (0 - (1 << fieldOffset));    \
+            else                                    \
+                mask = (((1 << (fieldLen + fieldOffset))) - (1 << fieldOffset))
+
+#define GT_GET_PAGE_ADDR(_regAddr) ((_regAddr<29)?22:29)
+
+/* Start address of phy related register.               */
+#define PHY_REGS_START_ADDR     0x0
+#define PHY_REGS_START_ADDR_8PORT    0x0
+
+/* Start address of ports related register.             */
+#define PORT_REGS_START_ADDR        0x8
+#define PORT_REGS_START_ADDR_8PORT    0x10
+
+/* Start address of global register.                    */
+#define GLOBAL_REGS_START_ADDR  0xF
+#define GLOBAL_REGS_START_ADDR_8PORT  0x1B
+
+#define PHY_ACCESS            1
+#define PORT_ACCESS           2
+#define GLOBAL_REG_ACCESS     3
+#define GLOBAL2_REG_ACCESS    4
+#define GLOBAL3_REG_ACCESS    5
+
+#define QD_SMI_ACCESS_LOOP        2000
+#define QD_SMI_TIMEOUT            2
+
+
+/****************************************************************************/
+/* Phy registers related functions.                                         */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadPhyReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port phy register.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWritePhyReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port phy register.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+
+/*******************************************************************************
+* hwGetPhyRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port phy register.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPhyRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwSetPhyRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port phy register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPhyRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    IN  GT_U16    data
+);
+
+/*******************************************************************************
+* hwPhyReset
+*
+* DESCRIPTION:
+*       This function performs softreset and waits until reset completion.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       u16Data     - data should be written into Phy control register.
+*                      if this value is 0xFF, normal operation occcurs (read,
+*                      update, and write back.)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS hwPhyReset
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U8     portNum,
+    IN    GT_U16        u16Data
+);
+
+
+/*******************************************************************************
+* hwReadPagedPhyReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port phy register in page mode.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       pageNum - Page number of the register to be read.
+*       regAddr - The register's address.
+*        anyPage - register list(vector) that are common to all pages
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+);
+
+
+/*******************************************************************************
+* hwWritePagedPhyReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port phy register in page mode.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       pageNum - Page number of the register to be written.
+*       regAddr - The register's address.
+*        anyPage - Register list(vector) that are common to all pages
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+);
+
+/*******************************************************************************
+* hwGetPagedPhyRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port phy register
+*        in page mode.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       pageNum     - Page number of the register to be read.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*        anyPage - Register list(vector) that are common to all pages
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPagedPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+);
+
+/*******************************************************************************
+* hwSetPagedPhyRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port phy register
+*        in page mode
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       pageNum     - Page number of the register to be read.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*        anyPage     - Register list(vector) that are common to all pages
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPagedPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+);
+
+
+/****************************************************************************/
+/* Per port registers related functions.                                    */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadPortReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port register.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPortReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWritePortReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port register.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePortReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+
+/*******************************************************************************
+* hwGetPortRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPortRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwSetPortRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPortRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     portNum,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    IN  GT_U16    data
+);
+
+/*******************************************************************************
+* hwSetPortRegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetPortRegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+);
+
+/****************************************************************************/
+/* Global registers related functions.                                      */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadGlobalReg
+*
+* DESCRIPTION:
+*       This function reads a switch's global register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobalReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWriteGlobalReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobalReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+
+/*******************************************************************************
+* hwGetGlobalRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobalRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwSetGlobalRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobalRegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    IN  GT_U16    data
+);
+
+
+/****************************************************************************/
+/* Global 2 registers related functions.                                      */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadGlobal2Reg
+*
+* DESCRIPTION:
+*       This function reads a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobal2Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWriteGlobal2Reg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobal2Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+
+/*******************************************************************************
+* hwGetGlobal2RegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobal2RegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwSetGlobal2RegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal2RegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    IN  GT_U16    data
+);
+
+/*******************************************************************************
+* hwSetGlobal2RegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal2RegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+);
+
+/****************************************************************************/
+/* Global 3 registers related functions.                                      */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadGlobal3Reg
+*
+* DESCRIPTION:
+*       This function reads a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobal3Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWriteGlobal3Reg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobal3Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+
+/*******************************************************************************
+* hwGetGlobal3RegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobal3RegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwSetGlobal3RegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal3RegField
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     regAddr,
+    IN  GT_U8     fieldOffset,
+    IN  GT_U8     fieldLength,
+    IN  GT_U16    data
+);
+
+/*******************************************************************************
+* hwSetGlobal3RegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal3RegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+);
+
+/*******************************************************************************
+* hwReadMiiReg
+*
+* DESCRIPTION:
+*       This function reads a switch register.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadMiiReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     phyAddr,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* hwWriteMiiReg
+*
+* DESCRIPTION:
+*       This function writes a switch register.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteMiiReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     phyAddr,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+#ifdef GT_RMGMT_ACCESS
+
+/*******************************************************************************
+* hwAccessMultiRegs
+*
+* DESCRIPTION:
+*       This function accesses switch's registers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwAccessMultiRegs
+(
+    IN GT_QD_DEV *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* __gtHwCntlh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/gtVct.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/gtVct.h
new file mode 100644
index 000000000000..ca5b8e7da418
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/gtVct.h
@@ -0,0 +1,172 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPhy.h
+*
+* DESCRIPTION:
+*       API definitions for Marvell Phy functionality.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+
+#ifndef __gtPhyh
+#define __gtPhyh
+
+#include "msApi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MARVELL_OUI_MSb        0x0141
+#define MARVELL_OUI_LSb        0x0C00
+#define OUI_LSb_MASK        0xFC00
+#define PHY_MODEL_MASK        0x03F0
+#define PHY_REV_MASK        0x000F
+
+#define DEV_E3082        0x8 << 4
+#define DEV_E104X        0x2 << 4
+#define DEV_E1111        0xC << 4
+#define DEV_E1112        0x9 << 4
+#define DEV_E114X        0xD << 4
+#define DEV_E1149        0xA << 4
+#define DEV_E1181        0xE << 4
+#define DEV_EC010        0x3 << 4
+#define DEV_G15LV        0xB << 4    /* 88E6165 internal copper phy, 88E1240 */
+#define DEV_S15LV        0x0 << 4    /* 88E6165 internal SERDES */
+#define DEV_MELODY       0x26 << 4   /* 88EC000 internal copper phy, 88E3020-88E6250 */
+#define DEV_G65G         0x27 << 4    /* 88E6375 (Amber) internal copper phy, 88E1340 */
+#define DEV_E1540        0x2B << 4    /* 88E6352 (Agate) internal copper phy, 88E1540 */
+
+typedef struct _GT_PHY_INFO
+{
+    GT_U32    phyId;        /* Marvell PHY ID (register 3) */
+    GT_U32    anyPage;    /* each bit represents if the corresponding register is any page */
+    GT_U32    flag;        /* see below for definition */
+    GT_U8    vctType;    /* VCT Register Type */
+    GT_U8    exStatusType;    /* EX Status Register Type */
+    GT_U8    dteType;    /* DTE Register Type */
+    GT_U8    pktGenType;    /* Pkt Generator Reg. Type */
+    GT_U8    macIfLoopType;        /* MAC IF Loopback Reg. Type */
+    GT_U8    lineLoopType;        /* Line Loopback Reg. Type */
+    GT_U8    exLoopType;        /* External Loopback Reg. Type */
+    GT_U8    pageType;        /* Page Restriction Type */
+} GT_PHY_INFO;
+
+/* GT_PHY_INFO flag definition */
+#define GT_PHY_VCT_CAPABLE        0x0001
+#define GT_PHY_DTE_CAPABLE        0x0002
+#define GT_PHY_EX_CABLE_STATUS    0x0004
+#define GT_PHY_ADV_VCT_CAPABLE    0x0008
+#define GT_PHY_PKT_GENERATOR    0x0010
+#define GT_PHY_MAC_IF_LOOP        0x0100
+#define GT_PHY_LINE_LOOP        0x0200
+#define GT_PHY_EXTERNAL_LOOP    0x0400
+#define GT_PHY_RESTRICTED_PAGE    0x0800
+#define GT_PHY_GIGABIT            0x8000
+#define GT_PHY_COPPER             0x4000
+#define GT_PHY_FIBER              0x2000
+#define GT_PHY_SERDES_CORE        0x1000
+
+/* VCT Register Type */
+#define GT_PHY_VCT_TYPE1    1    /* 10/100 Fast Ethernet */
+#define GT_PHY_VCT_TYPE2    2    /* 1000M without page support */
+#define GT_PHY_VCT_TYPE3    3    /* 1000M without page but with work around */
+#define GT_PHY_VCT_TYPE4    4    /* 1000M with page support */
+
+/* ADV VCT Register Type */
+#define GT_PHY_ADV_VCT_TYPE1    5    /* 88E1181 type device, not supported */
+#define GT_PHY_ADV_VCT_TYPE2    6    /* 88E6165 family devies */
+
+/* Extended Status Type */
+#define GT_PHY_EX_STATUS_TYPE1    1    /* 88E1111, 88E1141, 88E1145 */
+#define GT_PHY_EX_STATUS_TYPE2    2    /* 88E1112 */
+#define GT_PHY_EX_STATUS_TYPE3    3    /* 88E1149 */
+#define GT_PHY_EX_STATUS_TYPE4    4    /* 88E1181 */
+#define GT_PHY_EX_STATUS_TYPE5    5    /* 88E1116 */
+#define GT_PHY_EX_STATUS_TYPE6    6    /* 88E6165 family devices */
+
+/* DTE Register Type */
+#define GT_PHY_DTE_TYPE1    1    /* 10/100 Fast Ethernet with workaround */
+#define GT_PHY_DTE_TYPE2    2    /* 1000M without page support */
+#define GT_PHY_DTE_TYPE3    3    /* 1000M without page but with work around */
+#define GT_PHY_DTE_TYPE4    4    /* 1000M with page support */
+#define GT_PHY_DTE_TYPE5    5    /* 10/100 Fast Ethernet */
+
+/* Pkt Generator Register Type */
+#define GT_PHY_PKTGEN_TYPE1    1    /* Uses Register 30 */
+#define GT_PHY_PKTGEN_TYPE2    2    /* Uses Register 16 */
+#define GT_PHY_PKTGEN_TYPE3    3    /* Uses Register 25 */
+
+/* MAC Interface Loopback Register Type */
+#define GT_PHY_LOOPBACK_TYPE0    0    /* Don't do anything */
+#define GT_PHY_LOOPBACK_TYPE1    1    /* 0.14 only */
+#define GT_PHY_LOOPBACK_TYPE2    2    /* For DEV_G15LV like device */
+#define GT_PHY_LOOPBACK_TYPE3    3    /* For DEV_S15LV like device */
+#define GT_PHY_LOOPBACK_TYPE4    4    /* For DEV_E1111 like device */
+
+/* Line Loopback Register Type */
+#define GT_PHY_LINE_LB_TYPE1    1    /* 0_2.14 */
+#define GT_PHY_LINE_LB_TYPE2    2    /* 21_2.14 */
+#define GT_PHY_LINE_LB_TYPE3    3    /* 20.14 */
+#define GT_PHY_LINE_LB_TYPE4    4    /* 16.12 */
+
+/* External Loopback Register Type */
+#define GT_PHY_EX_LB_TYPE0    0    /* Don't do anything */
+#define GT_PHY_EX_LB_TYPE1    1    /* For DEV_E1111 like dev */
+#define GT_PHY_EX_LB_TYPE2    2    /* For DEV_E1149 like dev */
+
+/* Restricted Page Access Type */
+#define GT_PHY_PAGE_WRITE_BACK    0    /* For every device */
+#define GT_PHY_PAGE_DIS_AUTO1    1    /* For 88E1111 type */
+#define GT_PHY_PAGE_DIS_AUTO2    2    /* For 88E1121 type */
+#define GT_PHY_NO_PAGE            3    /* No Pages */
+
+
+/* definition for formula to calculate actual distance */
+#ifdef FP_SUPPORT
+#define FORMULA_PHY100M(_data)    ((_data)*0.7861 - 18.862)
+#define FORMULA_PHY1000M(_data)    ((_data)*0.8018 - 28.751)
+#else
+#define FORMULA_PHY100M(_data)    (((long)(_data)*7861 - 188620)/10000 + (((((long)(_data)*7861 - 188620)%10000) >= 5000)?1:0))
+#define FORMULA_PHY1000M(_data)    (((long)(_data)*8018 - 287510)/10000 + (((((long)(_data)*8018 - 287510)%10000) >= 5000)?1:0))
+#endif
+
+#define GT_ADV_VCT_CALC(_data)        \
+        (((long)(_data)*8333 - 191667)/10000 + (((((long)(_data)*8333 - 191667)%10000) >= 5000)?1:0))
+
+#define GT_ADV_VCT_CALC_SHORT(_data)        \
+        (((long)(_data)*7143 - 71429)/10000 + (((((long)(_data)*7143 - 71429)%10000) >= 5000)?1:0))
+
+/* macro to check VCT Failure */
+#define IS_VCT_FAILED(_reg)        \
+        (((_reg) & 0xFF) == 0xFF)
+
+/* macro to find out if Amplitude is zero */
+#define IS_ZERO_AMPLITUDE(_reg)    \
+        (((_reg) & 0x7F00) == 0)
+
+/* macro to retrieve Amplitude */
+#define GET_AMPLITUDE(_reg)    \
+        (((_reg) & 0x7F00) >> 8)
+
+/* macro to find out if Amplitude is positive */
+#define IS_POSITIVE_AMPLITUDE(_reg)    \
+        (((_reg) & 0x8000) == 0x8000)
+
+typedef struct _VCT_REGISTER
+{
+    GT_U8    page;
+    GT_U8    regOffset;
+} VCT_REGISTER;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __gtPhyh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/msApiInternal.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/msApiInternal.h
new file mode 100644
index 000000000000..95b73269b7af
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/msApi/msApiInternal.h
@@ -0,0 +1,1820 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* msApiPrototype.h
+*
+* DESCRIPTION:
+*       API Prototypes for QuarterDeck Device
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApiInternal_h
+#define __msApiInternal_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef GT_USE_SIMPLE_PORT_MAPPING
+#define GT_LPORT_2_PORT(_lport)      (GT_U8)((_lport) & 0xff)
+#define GT_PORT_2_LPORT(_port)       (GT_LPORT)((_port) & 0xff)
+#define GT_LPORTVEC_2_PORTVEC(_lvec)      (GT_U32)((_lvec) & 0xffff)
+#define GT_PORTVEC_2_LPORTVEC(_pvec)       (GT_U32)((_pvec) & 0xffff)
+#define GT_LPORT_2_PHY(_lport)      (GT_U8)((_lport) & 0xff)
+#else
+ #define GT_LPORT_2_PHY(_lport)       lport2phy(dev,_lport)
+ #ifdef GT_PORT_MAP_IN_DEV
+  #define GT_LPORT_2_PORT(_lport)      dev->lport2port(dev->validPortVec, _lport)
+  #define GT_PORT_2_LPORT(_port)       dev->port2lport(dev->validPortVec, _port)
+  #define GT_LPORTVEC_2_PORTVEC(_lvec) dev->lportvec2portvec(dev->validPortVec, _lvec)
+  #define GT_PORTVEC_2_LPORTVEC(_pvec) dev->portvec2lportvec(dev->validPortVec, _pvec)
+ #else
+  #define GT_LPORT_2_PORT(_lport)      lport2port(dev->validPortVec, _lport)
+  #define GT_PORT_2_LPORT(_port)       port2lport(dev->validPortVec, _port)
+  #define GT_LPORTVEC_2_PORTVEC(_lvec) lportvec2portvec(dev->validPortVec, _lvec)
+  #define GT_PORTVEC_2_LPORTVEC(_pvec) portvec2lportvec(dev->validPortVec, _pvec)
+#endif
+
+#endif
+
+#define GT_IS_PORT_SET(_portVec, _port)    \
+            ((_portVec) & (0x1 << (_port)))
+
+#define GT_IS_IRLUNIT_VALID(_dev,_unit)        \
+        (((_dev)->deviceId == GT_88E6065)?(_unit < 12):    \
+        (((_dev)->deviceId == GT_88E6055)?(_unit < 12):    \
+        (((_dev)->deviceId == GT_88E6061)?(_unit < 6):    \
+        (((_dev)->deviceId == GT_88E6035)?(_unit < 6):    \
+                                         (_unit < 3)))))
+
+
+/* The following macro converts a binary    */
+/* value (of 1 bit) to a boolean one.       */
+/* 0 --> GT_FALSE                           */
+/* 1 --> GT_TRUE                            */
+#define BIT_2_BOOL(binVal,boolVal)                                  \
+            (boolVal) = (((binVal) == 0) ? GT_FALSE : GT_TRUE)
+
+/* The following macro converts a boolean   */
+/* value to a binary one (of 1 bit).        */
+/* GT_FALSE --> 0                           */
+/* GT_TRUE --> 1                            */
+#define BOOL_2_BIT(boolVal,binVal)                                  \
+            (binVal) = (((boolVal) == GT_TRUE) ? 1 : 0)
+
+/* The following macro converts a binary    */
+/* value (of 1 bit) to a boolean one.       */
+/* 0 --> GT_TRUE                            */
+/* 1 --> GT_FALSE                           */
+#define BIT_2_BOOL_R(binVal,boolVal)                                  \
+            (boolVal) = (((binVal) == 0) ? GT_TRUE : GT_FALSE)
+
+/* The following macro converts a boolean   */
+/* value to a binary one (of 1 bit).        */
+/* GT_FALSE --> 1                           */
+/* GT_TRUE --> 0                            */
+#define BOOL_2_BIT_R(boolVal,binVal)                                  \
+            (binVal) = (((boolVal) == GT_TRUE) ? 0 : 1)
+
+/* Bit definition for devStorage */
+/* Ingress/Egress Rate type (grcSetPri0Rate,    grcSetEgressRate) */
+#define GT_RATE_ENUM_NOT_USED        0x10000
+#define GT_RATE_ENUM_USED        0x00000
+
+/* device name - devName */
+#define DEV_88E6051              0x0001    /* quarterdeck 6051      */
+#define DEV_88E6052              0x0002    /* quarterdeck 6052      */
+#define DEV_88E6021              0x0004    /* fullsail              */
+#define DEV_88E6060              0x0008    /* Gondola               */
+#define DEV_88E6063              0x0010    /* clippership 6063      */
+#define DEV_FF_EG                0x0020    /* FireFox-EG            */
+#define DEV_FF_HG                0x0040    /* FireFox-HG            */
+#define DEV_FH_VPN               0x0080    /* FireHawk-VPN          */
+#define DEV_88E6083              0x0100    /* Octane 6083           */
+#define DEV_88E6181              0x0200    /* Sapphire 88E6181      */
+#define DEV_88E6183              0x0400    /* Sapphire 88E6153,88E6183 */
+#define DEV_88E6093                 0x0800   /* 88E6093                  */
+#define DEV_88E6092                 0x1000   /* 88E6092                  */
+#define DEV_88E6095              0x2000   /* 88E6095                  */
+#define DEV_88E6182              0x4000   /* Jade 88E6152, 88E6182 */
+#define DEV_88E6185              0x8000   /* Jade 88E6155, 88E6185 */
+#define DEV_88E6108              0x10000   /* 88E6108 */
+#define DEV_88E6061              0x20000   /* 88E6031, 88E6061 */
+#define DEV_88E6065              0x40000   /* 88E6035, 88E6055, 88E6065 */
+#define DEV_88E6096              0x80000   /* 88E6096, 88E6046 */
+#define DEV_88E6097              0x100000   /* 88E6097, 88E6047 */
+#define DEV_88E6161              0x200000   /* 88E6161 */
+#define DEV_88E6165              0x400000   /* 88E6165 */
+#define DEV_88E6351              0x800000   /* 88E6351 */
+#define DEV_88E6175              0x1000000  /* 88E6175 */
+#define DEV_88E6171              0x2000000  /* 88E6125 and 88E6171 */
+#define DEV_88E6371              0x4000000  /* 88E6321, 88E6350 */
+#define DEV_88E6172              0x8000000  /* 88E6172 */
+#define DEV_88E6176              0x10000000 /* 88E6176 */
+#define DEV_88E6240              0x20000000 /* 88E6240 */
+#define DEV_88E6352              0x40000000 /* 88E6352 */
+
+/* device name 1 - devName1. These name for extended device family */
+#define DEV_88EC000                      0x000000001  /* Melody 88EC0XX  */
+#define DEV_88E3020                      0x000000010  /* Spannaker 88E3020  */
+#define DEV_88E6020                      0x000000020  /* Spannaker 88E6020  */
+#define DEV_88E6070                      0x000000040  /* Spannaker 88E6070  */
+#define DEV_88E6071                      0x000000080  /* Spannaker 88E6071  */
+#define DEV_88E6220                      0x000000100  /* Spannaker 88E6220  */
+#define DEV_88E6250                      0x000000200  /* Spannaker 88E6250  */
+
+#define DEV_88E6125                      0x000000400  /* Pearl 88E6125  */
+#define DEV_88E6320                      0x000000800  /* Pearl 88E6320  */
+#define DEV_88E6115                      0x000001000  /* Pearl 88E6115  */
+#define DEV_88E6310                      0x000002000  /* Pearl 88E6310  */
+
+#define DEV_88E6097_FAMILY    ( DEV_88E6096 | DEV_88E6097 )
+#define G1_DEV_88E6097_FAMILY     0
+#define DEV_88E6165_FAMILY    ( DEV_88E6161 | DEV_88E6165 )
+#define G1_DEV_88E6165_FAMILY     0
+
+/* Amber */
+#define DEV_88E6351_AVB_FAMILY    ( DEV_88E6351 | DEV_88E6371 )
+#define G1_DEV_88E6351_AVB_FAMILY      0
+#define DEV_88E6351_NO_AVB_FAMILY    (DEV_88E6171 | DEV_88E6175)
+#define G1_DEV_88E6351_NO_AVB_FAMILY   0
+#define DEV_88E6351_FAMILY    ( DEV_88E6351_AVB_FAMILY | DEV_88E6351_NO_AVB_FAMILY)
+#define G1_DEV_88E6351_FAMILY      0
+
+/* Agate */
+#define DEV_88E6352_AVB_FAMILY    ( DEV_88E6352 | DEV_88E6240 )
+#define G1_DEV_88E6352_AVB_FAMILY      0
+#define DEV_88E6352_NO_AVB_FAMILY    (DEV_88E6172 | DEV_88E6176)
+#define G1_DEV_88E6352_NO_AVB_FAMILY   0
+#define DEV_88E6352_FAMILY    ( DEV_88E6352_AVB_FAMILY | DEV_88E6352_NO_AVB_FAMILY)
+#define G1_DEV_88E6352_FAMILY      0
+
+/* Pearl */
+#define DEV_88E6320_AVB_FAMILY      0
+#define G1_DEV_88E6320_AVB_FAMILY    (DEV_88E6320 | DEV_88E6310)
+#define DEV_88E6320_NO_AVB_FAMILY   0
+#define G1_DEV_88E6320_NO_AVB_FAMILY    (DEV_88E6125 | DEV_88E6115)
+#define DEV_88E6320_FAMILY      0
+#define G1_DEV_88E6320_FAMILY    (G1_DEV_88E6320_AVB_FAMILY | G1_DEV_88E6320_NO_AVB_FAMILY)
+
+/* AVB family includes all avb chips except Melody and SpannakAv */
+#define DEV_AVB_FAMILY    ( DEV_88E6351_AVB_FAMILY | DEV_88E6352_AVB_FAMILY)
+#define G1_DEV_AVB_FAMILY      (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_TCAM_FAMILY    (DEV_88E6352_AVB_FAMILY)
+#define G1_DEV_TCAM_FAMILY      (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_88EC000_FAMILY            0
+#define G1_DEV_88EC000_FAMILY     (DEV_88EC000)
+#define DEV_88ESPANNAK_FAMILY            0
+#define G1_DEV_88ESPANNAK_FAMILY  (DEV_88E3020 | DEV_88E6020 | DEV_88E6070 | DEV_88E6071 | DEV_88E6220  | DEV_88E6250 )
+#define DEV_FE_AVB_FAMILY            0
+#define G1_DEV_FE_AVB_FAMILY     ( G1_DEV_88EC000_FAMILY | G1_DEV_88ESPANNAK_FAMILY )
+
+#define G1_DEV_88E6171_FAMILY   0
+#define DEV_88E6095_FAMILY    ( DEV_88E6092 | DEV_88E6095 )
+#define G1_DEV_88E6095_FAMILY     0
+#define DEV_88E6185_FAMILY    ( DEV_88E6182 | DEV_88E6185 | DEV_88E6108)
+#define G1_DEV_88E6185_FAMILY     0
+
+#define DEV_88E6065_FAMILY    ( DEV_88E6061 | DEV_88E6065 )
+#define G1_DEV_88E6065_FAMILY     0
+
+#define DEV_NEW_FEATURE_IN_REV (DEV_88E6095_FAMILY | DEV_88E6182 | DEV_88E6185)
+#define G1_DEV_NEW_FEATURE_IN_REV  0
+
+#define DEV_BURST_RATE        ( DEV_88E6108 )
+#define G1_DEV_BURST_RATE     0
+#define DEV_DROP_BCAST        ( DEV_88E6108 )
+#define G1_DEV_DROP_BCAST     0
+#define DEV_ARP_PRI            ( DEV_88E6108 )
+#define G1_DEV_ARP_PRI             0
+#define DEV_SNOOP_PRI            ( DEV_88E6108 )
+#define G1_DEV_SNOOP_PRI      0
+#define DEV_SERDES_CORE    ( DEV_88E6108 | DEV_88E6165_FAMILY  | DEV_88E6352_FAMILY )
+#define G1_DEV_SERDES_CORE     (DEV_88E6320_FAMILY)
+
+#define DEV_AGE_INTERRUPT  ( DEV_88E6108 | DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_AGE_INTERRUPT       ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+#define DEV_AGE_INT_GLOBAL2    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_AGE_INT_GLOBAL2     (G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+#define DEV_AGE_OUT_INT        ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_AGE_OUT_INT     ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_AGE_HOLD        ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_AGE_HOLD         ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_DEVICE_INTERRUPT    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_DEVICE_INTERRUPT    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_DEVICE_INT_TYPE1    ( DEV_88E6097_FAMILY )    /* Serdes Int bit[10:8] */
+#define G1_DEV_DEVICE_INT_TYPE1    0    /* Serdes Int bit[10:8] */
+#define DEV_DEVICE_INT_TYPE2    ( DEV_88E6165_FAMILY)
+#define G1_DEV_DEVICE_INT_TYPE2    0
+#define DEV_DEVICE_INT_TYPE3    (  DEV_88E6352_FAMILY )
+#define G1_DEV_DEVICE_INT_TYPE3    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_WATCHDOG_EVENT    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_WATCHDOG_EVENT    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PORT_BASED_AGE_INT    ( DEV_88E6065_FAMILY )
+#define G1_DEV_PORT_BASED_AGE_INT    0
+#define DEV_DEV_PHY_INTERRUPT    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)    /* device interrupt includes phy int */
+#define G1_DEV_DEV_PHY_INTERRUPT    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )    /* device interrupt includes phy int */
+
+#define DEV_AVB_INTERRUPT      ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_AVB_INTERRUPT      ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_AUTO_REFRESH_LOCKED    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_AUTO_REFRESH_LOCKED    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PPU_PHY_ACCESS    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_PPU_PHY_ACCESS    0
+#define DEV_PPU_PHY_ACCESS_RES    ( DEV_88E6097_FAMILY | G1_DEV_88E6320_FAMILY )
+#define G1_DEV_PPU_PHY_ACCESS_RES    0
+
+#define DEV_PPU_READ_ONLY    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_PPU_READ_ONLY     (G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_PPU_SERDES_ACCESS_RES    ( DEV_88E6165_FAMILY | DEV_88E6352_FAMILY )
+#define G1_DEV_PPU_SERDES_ACCESS_RES      0
+
+#define DEV_SERDES_ACCESS_CONFIG    ( DEV_88E6165_FAMILY | DEV_88E6352_FAMILY )
+#define G1_DEV_SERDES_ACCESS_CONFIG      0
+
+#define DEV_NO_EGRESS_POLICY    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_NO_EGRESS_POLICY    (G1_DEV_88E6320_FAMILY)
+
+/* DEV_8PORT_SWITCH is used to access the given device's Register Map */
+#define DEV_8PORT_SWITCH    ( DEV_88E6083 | DEV_88E6181 | DEV_88E6183 | DEV_88E6093 | \
+        DEV_88E6097_FAMILY | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY | \
+        DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_8PORT_SWITCH (G1_DEV_88E6320_FAMILY)
+
+/* DEV_MELODY_SWITCH is used to access the given device's Register Map */
+#define DEV_MELODY_SWITCH    0
+#define G1_DEV_MELODY_SWITCH    ( DEV_88EC000 | G1_DEV_88ESPANNAK_FAMILY )
+
+#define DEV_PORT_SECURITY    ( DEV_88E6083 | DEV_88E6183 | DEV_88E6093 | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_PORT_SECURITY    0
+
+#define DEV_BROADCAST_INVALID    ( DEV_88E6051 | DEV_88E6052 | DEV_FF_EG | DEV_FF_HG)
+#define G1_DEV_BROADCAST_INVALID 0
+
+/* Configurable ATU Size */
+#define DEV_ATU_256_2048     ( DEV_88E6021 | DEV_88E6060 | DEV_88E6065_FAMILY )
+#define G1_DEV_ATU_256_2048     0
+#define DEV_ATU_562_2048     ( DEV_88E6052 | DEV_88E6063 | DEV_FF_HG | DEV_FH_VPN | DEV_88E6083 )
+#define G1_DEV_ATU_562_2048     0
+
+#define DEV_ATU_SIZE_FIXED    DEV_GIGABIT_SWITCH
+#define G1_DEV_ATU_SIZE_FIXED    G1_DEV_GIGABIT_SWITCH
+
+#define DEV_ATU_1024    (DEV_88E6108)    /* Not used */
+#define G1_DEV_ATU_1024    0    /* Not used */
+#define DEV_ATU_8192    (DEV_88E6095_FAMILY | DEV_88E6182 | DEV_88E6185)    /* Not used */
+#define G1_DEV_ATU_8192    0    /* Not used */
+
+#define DEV_ATU_LIMIT        ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_ATU_LIMIT    (G1_DEV_88E6320_FAMILY)
+
+#define DEV_ATU_LIMIT_READ    ( DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_ATU_LIMIT_READ    (G1_DEV_88E6320_FAMILY)
+
+#define DEV_DBNUM_FULL     ( DEV_88E6021 | DEV_88E6060 | DEV_88E6063 |     \
+      DEV_FH_VPN |  DEV_88E6083 |  DEV_88E6183 | DEV_88E6093 | DEV_88E6061 )
+#define G1_DEV_DBNUM_FULL     0
+
+#define DEV_DBNUM_64     ( DEV_88E6065 )
+#define G1_DEV_DBNUM_64     ( G1_DEV_88EC000_FAMILY  | G1_DEV_88ESPANNAK_FAMILY)
+#define DEV_DBNUM_256     ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_DBNUM_256     0
+#define DEV_DBNUM_4096     ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_DBNUM_4096    (G1_DEV_88E6320_FAMILY)
+
+#define DEV_STATIC_ADDR    ( DEV_88E6060 | DEV_88E6021 | DEV_FF_EG | DEV_FF_HG | DEV_88E6052 | DEV_88E6063 | \
+            DEV_FH_VPN | DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |     \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY | DEV_88E6065_FAMILY | DEV_88E6097_FAMILY | \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_STATIC_ADDR    ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_TRAILER     ( DEV_88E6021 | DEV_FF_HG | DEV_88E6052 |          \
+      DEV_88E6063 | DEV_FH_VPN | DEV_88E6083 )
+#define G1_DEV_TRAILER     0
+
+#define DEV_TRAILER_P5        ( DEV_FF_EG )
+#define G1_DEV_TRAILER_P5    0
+#define DEV_TRAILER_P4P5    (DEV_88E6060)
+#define G1_DEV_TRAILER_P4P5    0
+
+#define DEV_HEADER    ( DEV_FF_HG | DEV_88E6063 | DEV_FH_VPN | DEV_88E6083 |    DEV_88E6183 | \
+        DEV_88E6093 |  DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |    \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |  DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_HEADER    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_HEADER_P5        ( DEV_FF_EG )
+#define G1_DEV_HEADER_P5    0
+#define DEV_HEADER_P4P5      (DEV_88E6060)
+#define G1_DEV_HEADER_P4P5      0
+
+/* DEV_QoS : Devices with multiple Queues for QoS Priority Support */
+#define DEV_QoS        ( DEV_88E6021 | DEV_FF_HG | DEV_88E6051 | DEV_88E6052 | DEV_88E6063 | \
+        DEV_FH_VPN | DEV_88E6083 | DEV_88E6181 | DEV_88E6183 | DEV_88E6093 | \
+        DEV_88E6095_FAMILY | DEV_88E6185_FAMILY | DEV_88E6065_FAMILY |     \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_QoS    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_QoS_FPRI_QPRI    ( DEV_88E6065_FAMILY )
+#define G1_DEV_QoS_FPRI_QPRI    0
+
+#define DEV_QoS_WEIGHT        ( DEV_88E6097 | DEV_88E6165 | DEV_AVB_FAMILY | DEV_88E6175 | DEV_88E6352_FAMILY)
+#define G1_DEV_QoS_WEIGHT    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_TAGGING            (DEV_QoS)
+#define G1_DEV_TAGGING         (G1_DEV_QoS)
+
+#define DEV_EGRESS_DOUBLE_TAGGING ( DEV_QoS & ~(DEV_88E6051 | DEV_88E6092 | DEV_88E6182 | DEV_88E6061))
+#define G1_DEV_EGRESS_DOUBLE_TAGGING 0
+
+#define DEV_INGRESS_DOUBLE_TAGGING    ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |     \
+      DEV_88E6095 | DEV_88E6185 | DEV_88E6108 )
+#define G1_DEV_INGRESS_DOUBLE_TAGGING    0
+
+#define DEV_PRIORITY_REMAPPING        ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 | DEV_88E6095 | \
+    DEV_88E6185 | DEV_88E6108 | DEV_88E6065 | DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_PRIORITY_REMAPPING  ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+
+#define DEV_802_1Q    ( DEV_88E6021 | DEV_88E6063 | DEV_FH_VPN |     \
+              DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |     \
+              DEV_88E6095 | DEV_88E6092 | DEV_88E6185_FAMILY |\
+              DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |    \
+              DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_802_1Q    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_802_1S    ( DEV_88E6095 | DEV_88E6185 | DEV_88E6108 | DEV_88E6065 )
+#define G1_DEV_802_1S    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_802_1S_STU    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_802_1S_STU    0
+
+#define DEV_802_1W    ( DEV_88E6183 | DEV_88E6093 | DEV_88E6095 |     \
+              DEV_88E6185 | DEV_88E6108 | DEV_88E6065 |    \
+              DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_802_1W    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_ATU_15SEC_AGING    ( DEV_GIGABIT_SWITCH | DEV_88E6065_FAMILY )
+#define G1_DEV_ATU_15SEC_AGING    0
+#define DEV_ATU_RM_PORTS    ( DEV_88E6093_FAMILY | DEV_88E6065 )
+#define G1_DEV_ATU_RM_PORTS    0
+#define DEV_ATU_EXT_PRI        ( DEV_88E6065_FAMILY )
+#define G1_DEV_ATU_EXT_PRI     ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_VTU_EXT_INFO    ( DEV_88E6065_FAMILY )
+#define G1_DEV_VTU_EXT_INFO    0
+#define DEV_RMON    ( DEV_88E6021 | DEV_88E6063 | DEV_FH_VPN |     \
+              DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |     \
+              DEV_88E6092 | DEV_88E6095 | DEV_88E6185_FAMILY |\
+              DEV_88E6065 | DEV_88E6097_FAMILY |        \
+              DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_RMON    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_RMON_TYPE_1    ( DEV_88E6021 | DEV_88E6063 | DEV_FH_VPN | DEV_88E6083 )
+#define G1_DEV_RMON_TYPE_1    0
+#define DEV_RMON_TYPE_2 ( DEV_88E6183 )
+#define G1_DEV_RMON_TYPE_2 0
+#define DEV_RMON_TYPE_3     \
+        ( DEV_88E6093 | DEV_88E6095 | DEV_88E6092 |     \
+          DEV_88E6185_FAMILY | DEV_88E6065 |             \
+          DEV_88E6097_FAMILY |        \
+          DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_RMON_TYPE_3 ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_RMON_REALTIME_SUPPORT    \
+        ( DEV_88E6065 | DEV_88E6065_FAMILY |    \
+          DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |    \
+          DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_RMON_REALTIME_SUPPORT    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+#define DEV_RMON_PORT_BITS    \
+        ( DEV_88E6065 | DEV_88E6065 | DEV_88E6352_FAMILY )
+#define G1_DEV_RMON_PORT_BITS    \
+   ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_IGMP_SNOOPING    \
+    ( DEV_88E6021 | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |         \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |            \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |            \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_IGMP_SNOOPING    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PORT_MONITORING    \
+    ( DEV_88E6060 | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |         \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |            \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |            \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PORT_MONITORING    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_ENABLE_MONITORING    \
+    ( DEV_88E6060 | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 | DEV_88E6183 | DEV_88E6093 |            \
+      DEV_88E6065_FAMILY )
+#define G1_DEV_ENABLE_MONITORING    0
+#define DEV_MC_RATE_PERCENT    ( DEV_88E6021 | DEV_88E6051 | DEV_88E6052 )
+#define G1_DEV_MC_RATE_PERCENT    0
+
+#define DEV_MC_RATE_KBPS    ( DEV_FF_HG | DEV_88E6063 | DEV_FH_VPN | DEV_88E6083 )
+#define G1_DEV_MC_RATE_KBPS    0
+
+#define DEV_INGRESS_RATE_KBPS    \
+    ( DEV_FF_HG | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 | DEV_88E6181 | DEV_88E6183 |         \
+      DEV_88E6093 | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY)
+#define G1_DEV_INGRESS_RATE_KBPS 0
+
+#define DEV_EGRESS_RATE_KBPS    \
+    ( DEV_FF_HG | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 | DEV_88E6181 | DEV_88E6183 |         \
+      DEV_88E6093 | DEV_88E6095 | DEV_88E6185 |         \
+      DEV_88E6108 | DEV_88E6065_FAMILY |                 \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |         \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_EGRESS_RATE_KBPS    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_ELIMIT_FRAME_BASED    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ELIMIT_FRAME_BASED    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PIRL_RESOURCE    \
+    ( DEV_88E6065_FAMILY )
+#define G1_DEV_PIRL_RESOURCE    0
+
+#define DEV_PIRL2_RESOURCE    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PIRL2_RESOURCE     \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_RESTRICTED_PIRL_RESOURCE    \
+    ( DEV_88E6061 )
+#define G1_DEV_RESTRICTED_PIRL_RESOURCE    0
+
+#define DEV_RESTRICTED_PIRL2_RESOURCE    \
+                        ( DEV_88E6096 | DEV_88E6161 | DEV_88E6171 )
+#define G1_DEV_RESTRICTED_PIRL2_RESOURCE    0
+
+#define DEV_NONE_RATE_LIMIT        \
+    ( DEV_88E6065 )
+#define G1_DEV_NONE_RATE_LIMIT        0
+
+#define DEV_MII_DUPLEX_CONFIG    \
+    ( DEV_88E6021 | DEV_88E6063 | DEV_FH_VPN |            \
+      DEV_88E6083 )
+#define G1_DEV_MII_DUPLEX_CONFIG    0
+
+#define DEV_QD_PLUS     \
+    ( DEV_88E6021 | DEV_FF_EG | DEV_FF_HG |            \
+      DEV_88E6060 | DEV_88E6063 | DEV_FH_VPN |        \
+      DEV_88E6083 | DEV_88E6181 | DEV_88E6183 |             \
+      DEV_88E6093 |                        \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY  |            \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_QD_PLUS     \
+    ( G1_DEV_88E6320_FAMILY )
+
+#define DEV_FASTETH_SWITCH    \
+    ( DEV_88E6051 | DEV_88E6052 | DEV_88E6021 |         \
+      DEV_FF_EG | DEV_FF_HG | DEV_88E6060 |             \
+      DEV_88E6063 | DEV_FH_VPN | DEV_88E6083 |            \
+      DEV_88E6065_FAMILY )
+#define G1_DEV_FASTETH_SWITCH    0
+#define DEV_ENHANCED_FE_SWITCH        ( DEV_88E6065_FAMILY )
+#define G1_DEV_ENHANCED_FE_SWITCH    0
+
+#define DEV_EXTERNAL_PHY    \
+    ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |            \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |            \
+      DEV_88E6097_FAMILY )
+#define G1_DEV_EXTERNAL_PHY    0
+
+#define DEV_EXTERNAL_PHY_ONLY    ( DEV_88E6181 | DEV_88E6183 | DEV_88E6182 | DEV_88E6185 )
+#define G1_DEV_EXTERNAL_PHY_ONLY    0
+
+#define DEV_INTERNAL_GPHY   ( DEV_88E6108 )
+#define G1_DEV_INTERNAL_GPHY   0
+
+#define DEV_FC_WITH_VALUE            \
+    ( DEV_88E6093 | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |    \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |            \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FC_WITH_VALUE            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_FC_STATUS                \
+    ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |     \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |     \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |        \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FC_STATUS                \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_FC_DIS_STATUS    ( DEV_88E6065_FAMILY )
+#define G1_DEV_FC_DIS_STATUS    0
+
+#define DEV_CORE_TAG    ( DEV_88E6093 | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_CORE_TAG    0
+#define DEV_PCS_LINK    ( DEV_88E6093 | DEV_88E6095_FAMILY |     \
+              DEV_88E6185_FAMILY | DEV_88E6097_FAMILY |    \
+              DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PCS_LINK    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_MGMII_STATUS    ( DEV_88E6093 | DEV_88E6095_FAMILY |     \
+                  DEV_88E6185_FAMILY | DEV_88E6097_FAMILY | \
+                  DEV_88E6165_FAMILY )
+#define G1_DEV_MGMII_STATUS    0
+
+#define DEV_MGMII_REVERSE_STATUS    ( DEV_88E6165_FAMILY )
+#define G1_DEV_MGMII_REVERSE_STATUS    0
+
+#define DEV_88E6183_FAMILY        ( DEV_88E6183 | DEV_88E6185_FAMILY )
+#define G1_DEV_88E6183_FAMILY        0
+
+/* New released switch chips After 6093 chip */
+#define DEV_88E6093_FAMILY    ( DEV_88E6093 | DEV_88E6095_FAMILY |\
+                  DEV_88E6185_FAMILY | DEV_88E6097_FAMILY |\
+                  DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_88E6093_FAMILY    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_UNMANAGED_SWITCH    ( DEV_88E6181 )
+#define G1_DEV_UNMANAGED_SWITCH    0
+
+#define DEV_PCS                    \
+        ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |     \
+          DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |     \
+          DEV_88E6097_FAMILY )
+#define G1_DEV_PCS                    0
+
+#define DEV_GIGABIT_SWITCH        \
+        ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |     \
+          DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |     \
+          DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+          DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_GIGABIT_SWITCH        (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_GIGABIT_MANAGED_SWITCH    \
+        ( DEV_88E6183 | DEV_88E6093 | \
+          DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |     \
+          DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+          DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_GIGABIT_MANAGED_SWITCH    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_CASCADE_PORT    \
+        ( DEV_88E6183 | DEV_88E6093 | \
+          DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_CASCADE_PORT 0
+
+
+#define DEV_CROSS_CHIP_VLAN        \
+        ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_CROSS_CHIP_VLAN        0
+
+#define DEV_CROSS_CHIP_PORT_VLAN    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |    \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_CROSS_CHIP_PORT_VLAN    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_TRUNK    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |    \
+              DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |    \
+              DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_TRUNK    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_8_TRUNKING    ( DEV_88E6092 | DEV_88E6182 )
+#define G1_DEV_8_TRUNKING    0
+
+#define DEV_TRUNK_NEW_ID_LOCATION    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_TRUNK_NEW_ID_LOCATION     (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_REDUCED_TRUNK    ( DEV_88E6065_FAMILY )
+#define G1_DEV_REDUCED_TRUNK   \
+    ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_STACKING    \
+    ( DEV_88E6095 | DEV_88E6185 | DEV_88E6108 |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_STACKING    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_FRAME_SIZE_1632        \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )        /* Not used */
+#define G1_DEV_FRAME_SIZE_1632        0        /* Not used */
+
+#define DEV_FLOW_CTRL_DELAY        \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FLOW_CTRL_DELAY        (G1_DEV_88E6320_AVB_FAMILY)
+
+/* port based CPU Port */
+#define DEV_ENHANCED_CPU_PORT    \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ENHANCED_CPU_PORT    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_CPU_DEST_PER_PORT    \
+    ( DEV_88E6183 | DEV_88E6093 | \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_CPU_DEST_PER_PORT   0
+
+#define DEV_CPU_PORT    ( DEV_88E6065_FAMILY )
+#define G1_DEV_CPU_PORT    0
+#define DEV_MULTICAST    ( DEV_88E6065_FAMILY )
+#define G1_DEV_MULTICAST    0
+
+#define DEV_CPU_DEST        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_CPU_DEST        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_MIRROR_DEST    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_MIRROR_DEST    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_QVLAN_ONLY        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY )
+#define G1_DEV_QVLAN_ONLY        0
+#define DEV_5BIT_PORT        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_5BIT_PORT        \
+    ( G1_DEV_FE_AVB_FAMILY|G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_SDET_POLARITY    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY )
+#define G1_DEV_SDET_POLARITY    0
+#define DEV_LIMITED_SDET    \
+        ( DEV_88E6165_FAMILY )     /* only port 4 & 5 support SDET Polarity */
+
+#define G1_DEV_LIMITED_SDET    0    /* only port 4 & 5 support SDET Polarity */
+
+/* supports Reserved Multicast, etc */
+#define DEV_ENHANCED_MULTICAST    \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ENHANCED_MULTICAST    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_ENHANCED_MULTICAST_2X    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ENHANCED_MULTICAST_2X    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+
+#define DEV_ARP_DEST_SUPPORT    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_ARP_DEST_SUPPORT    ( G1_DEV_88E6095_FAMILY | G1_DEV_88E6185_FAMILY )
+#define DEV_MARVELL_TAG_FLOW_CTRL    \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |        \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_MARVELL_TAG_FLOW_CTRL    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_USE_DOUBLE_TAG_DATA        \
+    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )        /* Not Used */
+#define G1_DEV_USE_DOUBLE_TAG_DATA        0        /* Not Used */
+
+#define DEV_MARVELL_TAG_LOOP_BLOCK    ( DEV_88E6095_FAMILY | DEV_88E6185_FAMILY )
+#define G1_DEV_MARVELL_TAG_LOOP_BLOCK    0
+
+#define DEV_LOOPBACK_FILTER        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_LOOPBACK_FILTER        \
+    (G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_FLOOD_BROADCAST        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FLOOD_BROADCAST        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_TAG_FLOW_CONTROL        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_TAG_FLOW_CONTROL        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_ALWAYS_USE_VTU        ( DEV_88E6097_FAMILY )
+#define G1_DEV_ALWAYS_USE_VTU        0
+
+#define DEV_RM_ONE_PTAG            \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_RM_ONE_PTAG            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+
+#define DEV_PRIORITY_OVERRIDE    \
+    ( DEV_88E6183 | DEV_88E6093 | DEV_88E6095 |     \
+      DEV_88E6185 | DEV_88E6108 | DEV_88E6097_FAMILY |    \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PRIORITY_OVERRIDE    \
+      ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_EXT_PRIORITY_OVERRIDE        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_EXT_PRIORITY_OVERRIDE        \
+      ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_PRIORITY_OVERRIDE_TABLE    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PRIORITY_OVERRIDE_TABLE    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_FQPRI_IN_TABLE        ( DEV_88E6065 )
+#define G1_DEV_FQPRI_IN_TABLE     0
+
+#define DEV_MACPRI_IN_TABLE        0
+#define G1_DEV_MACPRI_IN_TABLE     ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_FQPRI_OVERRIDE        ( DEV_88E6065 )
+#define G1_DEV_FQPRI_OVERRIDE        0
+
+#define DEV_Px_MODE     ( DEV_88E6065_FAMILY )
+#define G1_DEV_Px_MODE     0
+
+#define DEV_SA_FILTERING    \
+    ( DEV_88E6065 | DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_SA_FILTERING    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_ARP_TO_CPU        \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ARP_TO_CPU        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_EGRESS_FLOOD    \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_EGRESS_FLOOD    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_FORCE_MAP        ( DEV_88E6065_FAMILY )
+#define G1_DEV_FORCE_MAP    0
+
+#define DEV_PORT_SCHEDULE    ( DEV_88E6065 )
+#define G1_DEV_PORT_SCHEDULE    0
+
+#define DEV_PORT_MIXED_SCHEDULE        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PORT_MIXED_SCHEDULE        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_OUT_Q_SIZE        \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_OUT_Q_SIZE        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_OUT_Q_512        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_OUT_Q_512        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_FULL_Q_COUNTER        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FULL_Q_COUNTER        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PROVIDER_TAG    ( DEV_88E6065_FAMILY )
+#define G1_DEV_PROVIDER_TAG    0
+
+#define DEV_OLD_HEADER        ( DEV_88E6065_FAMILY )
+#define G1_DEV_OLD_HEADER        0
+#define DEV_RECURSIVE_TAG_STRIP        ( DEV_88E6065_FAMILY )
+#define G1_DEV_RECURSIVE_TAG_STRIP        0
+
+#define DEV_FORCE_WITH_VALUE            \
+    ( DEV_88E6181 | DEV_88E6183 | DEV_88E6093 |     \
+      DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |        \
+      DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |        \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FORCE_WITH_VALUE            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+/* Grouping ATU Entry State for Unicast */
+
+#define DEV_UC_7_DYNAMIC        \
+    ( DEV_88E6065_FAMILY | DEV_88E6095_FAMILY |    DEV_88E6185_FAMILY |\
+      DEV_88E6183 | DEV_88E6093 | DEV_88E6097_FAMILY |        \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_7_DYNAMIC        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_UC_NO_PRI_TO_CPU_STATIC_NRL        \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_NO_PRI_TO_CPU_STATIC_NRL        \
+    (  G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_UC_TO_CPU_STATIC_NRL            \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_TO_CPU_STATIC_NRL            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_UC_NO_PRI_STATIC_NRL            \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_NO_PRI_STATIC_NRL            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_UC_STATIC_NRL                    \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_STATIC_NRL                    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_UC_NO_PRI_TO_CPU_STATIC            \
+    ( DEV_88E6065_FAMILY | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |\
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_NO_PRI_TO_CPU_STATIC            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_UC_TO_CPU_STATIC            \
+    ( DEV_88E6065_FAMILY | DEV_88E6095_FAMILY | DEV_88E6185_FAMILY |\
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_TO_CPU_STATIC            \
+    ( G1_DEV_FE_AVB_FAMILY  | G1_DEV_88E6320_FAMILY)
+
+#define DEV_UC_NO_PRI_STATIC            \
+    ( DEV_88E6065_FAMILY | DEV_88E6095_FAMILY |    DEV_88E6185_FAMILY |\
+      DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |\
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_UC_NO_PRI_STATIC            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_UC_STATIC    ( DEV_STATIC_ADDR )
+#define G1_DEV_UC_STATIC    ( G1_DEV_STATIC_ADDR )
+
+
+/* Grouping ATU Entry State for Multicast */
+
+#define DEV_MC_MGM_STATIC_UNLIMITED_RATE        \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_MC_MGM_STATIC_UNLIMITED_RATE        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_MC_PRIO_MGM_STATIC_UNLIMITED_RATE    \
+    ( DEV_88E6065_FAMILY | DEV_88E6097_FAMILY |     \
+      DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_MC_PRIO_MGM_STATIC_UNLIMITED_RATE    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_MC_STATIC_UNLIMITED_RATE    ( DEV_STATIC_ADDR & ~DEV_88E6052 )
+#define G1_DEV_MC_STATIC_UNLIMITED_RATE    ( G1_DEV_STATIC_ADDR )
+
+#define DEV_MC_MGM_STATIC    ( DEV_STATIC_ADDR )
+#define G1_DEV_MC_MGM_STATIC    ( G1_DEV_STATIC_ADDR )
+
+#define DEV_MC_STATIC            ( DEV_STATIC_ADDR )
+#define G1_DEV_MC_STATIC        ( G1_DEV_STATIC_ADDR )
+#define DEV_MC_PRIO_MGM_STATIC        ( DEV_STATIC_ADDR )
+#define G1_DEV_MC_PRIO_MGM_STATIC    ( G1_DEV_STATIC_ADDR )
+
+#define DEV_MC_PRIO_STATIC_UNLIMITED_RATE ( DEV_STATIC_ADDR & ~ (DEV_88E6083|DEV_88E6052) )
+#define G1_DEV_MC_PRIO_STATIC_UNLIMITED_RATE ( G1_DEV_STATIC_ADDR )
+
+#define DEV_MC_PRIO_STATIC        ( DEV_STATIC_ADDR & ~DEV_88E6083 )
+#define G1_DEV_MC_PRIO_STATIC        ( G1_DEV_STATIC_ADDR )
+
+
+/* Grouping Devices that support Pause Limit */
+#define DEV_PAUSE_LIMIT        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PAUSE_LIMIT        \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+/* Grouping Devices that support Frame Mode */
+#define DEV_FRAME_MODE            \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY |     \
+      DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FRAME_MODE            \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_POLICY            \
+    ( DEV_88E6097 | DEV_88E6165 | DEV_AVB_FAMILY | DEV_88E6175)
+#define G1_DEV_POLICY    ( G1_DEV_88E6351_FAMILY | G1_DEV_FE_AVB_FAMILY )
+#define DEV_PORT_ETYPE        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_PORT_ETYPE    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_FID_REG        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FID_REG    (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_FRAME_TO_REGISTER    \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_FRAME_TO_REGISTER   (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_RMU_MODE    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_RMU_MODE  (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_DA_CHECK    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY )
+#define G1_DEV_DA_CHECK  (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_DA_CHECK_1    ( DEV_88E6352_FAMILY )
+#define G1_DEV_DA_CHECK_1  (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_HEADER_TYPE    (DEV_88E6352_FAMILY)
+#define G1_DEV_HEADER_TYPE  (G1_DEV_88E6320_AVB_FAMILY)
+
+#define DEV_COUNTER_MODE    (DEV_88E6352_FAMILY)
+#define G1_DEV_COUNTER_MODE    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_SWITCH_MAC_REG        \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_SWITCH_MAC_REG    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_SWITCH_WOL_WOF_REG       (DEV_88E6352_FAMILY)
+#define G1_DEV_SWITCH_WOL_WOF_REG    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_ATU_STATS            \
+    ( DEV_88E6097_FAMILY | DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_ATU_STATS    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_JUMBO_MODE        \
+    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_JUMBO_MODE  \
+    (G1_DEV_88E6320_FAMILY)
+#define DEV_PTP        ( DEV_88E6165_FAMILY | DEV_AVB_FAMILY )
+#define G1_DEV_PTP    \
+    ( G1_DEV_FE_AVB_FAMILY)
+#define DEV_PTP_2        ( DEV_AVB_FAMILY )
+#define G1_DEV_PTP_2        ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_TAI        ( DEV_AVB_FAMILY )
+#define G1_DEV_TAI    \
+    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_TAI_MULTI_PTP_SYNC    ( DEV_AVB_FAMILY)
+#define G1_DEV_TAI_MULTI_PTP_SYNC    0
+#define DEV_TAI_EXT_CLK    ( DEV_88E6352_AVB_FAMILY)
+#define G1_DEV_TAI_EXT_CLK    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_ARRV_TS_MODE    ( DEV_88E6352_AVB_FAMILY)
+#define G1_DEV_ARRV_TS_MODE    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_TAI_TRIG_GEN   0
+#define G1_DEV_TAI_TRIG_GEN    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_RMU_PAGE2   0
+#define G1_DEV_RMU_PAGE2    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_QUEUE_CONTROL   0
+#define G1_DEV_QUEUE_CONTROL    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_DEBUG_COUNTER   0
+#define G1_DEV_DEBUG_COUNTER    (G1_DEV_88E6320_AVB_FAMILY)
+#define DEV_CUT_THROUGH   0
+#define G1_DEV_CUT_THROUGH    (G1_DEV_88E6320_AVB_FAMILY)
+
+
+#define DEV_AVB_POLICY    ( DEV_AVB_FAMILY )
+#define G1_DEV_AVB_POLICY\
+    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_AVB_POLICY_RECOVER_CLK    ( DEV_88E6351_AVB_FAMILY )
+#define G1_DEV_AVB_POLICY_RECOVER_CLK 0
+
+#define DEV_QAV        ( DEV_AVB_FAMILY )
+#define G1_DEV_QAV    \
+    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_QAV_QPRI_QTS_TOKEN        ( DEV_88E6351_AVB_FAMILY | DEV_88E6352_AVB_FAMILY )
+#define G1_DEV_QAV_QPRI_QTS_TOKEN    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_QAV_QPRI_RATE        0
+#define G1_DEV_QAV_QPRI_RATE    \
+    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_QAV_ISO_DELAY_LIMIT        ( DEV_88E6351_AVB_FAMILY )
+#define G1_DEV_QAV_ISO_DELAY_LIMIT    0
+
+#define DEV_TSM_RESOURCE    ( DEV_AVB_FAMILY )
+#define G1_DEV_TSM_RESOURCE    \
+    ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_200BASE_CFG    ( DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_200BASE_CFG        ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_RGMII_TIMING    ( DEV_88E6165_FAMILY | DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_RGMII_TIMING        ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_Q_COUNTER_TABLE    ( DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_Q_COUNTER_TABLE        ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_LED_CFG        ( DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_LED_CFG            ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_MAC_AVB        ( DEV_AVB_FAMILY)
+#define G1_DEV_MAC_AVB            ( G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_CLK_125        ( DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_CLK_125            ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_QAVB_PRIORITY_OVERRIDE_TABLE    \
+    ( DEV_AVB_FAMILY )
+#define G1_DEV_QAVB_PRIORITY_OVERRIDE_TABLE    \
+    ( G1_DEV_FE_AVB_FAMILY )
+#define DEV_FRAME_PRIORITY_OVERRIDE_TABLE    \
+    ( DEV_88E6351_FAMILY| DEV_88E6352_FAMILY )
+#define G1_DEV_FRAME_PRIORITY_OVERRIDE_TABLE    \
+    ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_SCRATCH_MISC_CTRL        ( DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_SCRATCH_MISC_CTRL        ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_IP_MAPPING_TABLE        ( DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_IP_MAPPING_TABLE            ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+#define DEV_EEPROM            ( DEV_88E6351_FAMILY | DEV_88E6352_FAMILY)
+#define G1_DEV_EEPROM            ( G1_DEV_88E6351_FAMILY | G1_DEV_88E6320_FAMILY)
+
+#define DEV_RMGMT        (DEV_88E6351_FAMILY | DEV_88E6352_FAMILY| DEV_88E6097_FAMILY |DEV_88E6165_FAMILY)
+#define G1_DEV_RMGMT        ( G1_DEV_FE_AVB_FAMILY | G1_DEV_88E6320_FAMILY )
+
+#define DEV_PHY_ACCESS_NO_DIRECTLY    0
+#define G1_DEV_PHY_ACCESS_NO_DIRECTLY    ( G1_DEV_88ESPANNAK_FAMILY )
+
+#define DEV_TCAM        ( DEV_88E6352_FAMILY )
+#define G1_DEV_TCAM        (G1_DEV_88E6320_AVB_FAMILY)
+
+/***************************************************************************/
+/* Added definitions, to improve DSDT */
+#define DEV_SWITCH_MODE     (DEV_QD_PLUS | DEV_ENHANCED_FE_SWITCH )
+#define G1_DEV_SWITCH_MODE  (G1_DEV_QD_PLUS | G1_DEV_ENHANCED_FE_SWITCH )
+
+#define DEV_INIT_READY    (DEV_QD_PLUS | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_INIT_READY (G1_DEV_QD_PLUS | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_IGNORE_FCS    (DEV_GIGABIT_MANAGED_SWITCH | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_IGNORE_FCS (G1_DEV_GIGABIT_MANAGED_SWITCH | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_LOCKED_PORT    (DEV_GIGABIT_MANAGED_SWITCH | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_LOCKED_PORT (G1_DEV_GIGABIT_MANAGED_SWITCH | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_IGNORE_WRONG_DAT    (DEV_GIGABIT_MANAGED_SWITCH | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_IGNORE_WRONG_DAT (G1_DEV_GIGABIT_MANAGED_SWITCH | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_DISCARD_TAGGED    (DEV_88E6093_FAMILY | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_DISCARD_TAGGED (G1_DEV_88E6093_FAMILY | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+#define DEV_MAP_DA    (DEV_88E6093_FAMILY | DEV_ENHANCED_FE_SWITCH | DEV_FE_AVB_FAMILY )
+#define G1_DEV_MAP_DA (G1_DEV_88E6093_FAMILY | G1_DEV_ENHANCED_FE_SWITCH | G1_DEV_FE_AVB_FAMILY )
+
+/* Pearl features */
+
+/***************************************************************************/
+/*PIRL Alpha factor macro for internal switch*/
+#define PIRL_ALPHA 6250000
+
+/* Macros to utilize Device Group */
+
+#define IS_IN_DEV_GROUP(dev,_group) ((dev->devName & (_group))||(dev->devName1 & (G1_##_group)))
+
+/* need to check port number(_hwPort) later */
+#define IS_VALID_API_CALL(dev,_hwPort, _devName)            \
+    ((_hwPort == GT_INVALID_PORT) ? GT_BAD_PARAM :            \
+    (!((dev->devName & (_devName))||(dev->devName1 & (G1_##_devName))) ? GT_NOT_SUPPORTED : GT_OK))
+
+#define DOES_DEVPORT_SUPPORT_PCS(dev, _hwPort)            \
+    (!((dev->devName & DEV_GIGABIT_SWITCH)||(dev->devName1 & G1_DEV_GIGABIT_SWITCH)) || \
+     ((dev->devName & DEV_INTERNAL_GPHY)||(dev->devName1 & G1_DEV_INTERNAL_GPHY)) ||   \
+    (!((dev->devName & DEV_EXTERNAL_PHY_ONLY)||(dev->devName1 & G1_DEV_EXTERNAL_PHY_ONLY)) && (((_hwPort) < 8) || ((_hwPort) > 10)))    \
+    ? 0 : 1)
+
+#define IS_CONFIGURABLE_PHY(dev,_hwPort)    driverIsPhyAttached(dev,_hwPort)
+#define GT_GET_PHY_ID(dev,_hwPort)            driverGetPhyID(dev,_hwPort)
+
+#define GT_GET_SERDES_PORT(dev,_hwPort)        driverGetSerdesPort(dev,_hwPort)
+
+#define GT_GIG_PHY_INT_MASK(dev,_portVct)    ((_portVct) = (_portVct) & 0xF7)
+
+
+#define RECOMMENDED_ESB_LIMIT(dev, _bps)                    \
+	((IS_IN_DEV_GROUP(dev, DEV_PIRL_RESOURCE)) ? 16777200 : 0xFFFFF0)
+
+#define RECOMMENDED_CBS_LIMIT(dev, _bps)                    \
+	((IS_IN_DEV_GROUP(dev, DEV_PIRL_RESOURCE)) ? 393216 : 5000000)
+
+#define RECOMMENDED_BUCKET_INCREMENT(dev, _bps)                \
+        ((IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))?174:        \
+	((_bps) < 1000) ? 3125 :                            \
+	((_bps) < 10000) ? 25 : 5)
+
+#define FACTOR_FROM_BUCKET_INCREMENT(dev, _bInc, _f)        \
+    {                                                        \
+        if(IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))            \
+        {                                                    \
+            if((_bInc) == 174) {(_f) = 64;}                    \
+            else {(_f) = 0;}                                \
+        }                                                    \
+        else                                                \
+        {                                                    \
+		if ((_bInc) == 3125)                        \
+			(_f) = 32;                          \
+		else if ((_bInc) == 25)                     \
+			(_f) = 4;                           \
+		else if ((_bInc) == 50)                     \
+			(_f) = 8;                           \
+		else if ((_bInc) == 5)                      \
+			(_f) = 8;                           \
+		else                                        \
+			(_f) = 0;                           \
+	}                                                   \
+    }
+
+
+#define TSM_GET_CBS(_rate,_cts)    ((_rate)/((_cts)*8))
+#define TSM_GET_RATE(_cbs,_cts)    ((_cbs)*8*(_cts))
+
+
+#define GT_PTP_BUILD_TIME(_time1, _time2)       (((_time1) << 16) | (_time2))
+#define GT_PTP_L16_TIME(_time1) ((_time1) & 0xFFFF)
+#define GT_PTP_H16_TIME(_time1) (((_time1) >> 16) & 0xFFFF)
+
+/*
+ * typedef: enum GT_ATU_STATS_OP
+ *
+ * Description: Enumeration of the ATU Statistics operation
+ *
+ * Enumerations:
+ *   GT_ATU_STATS_ALL        - count all valid entry
+ *   GT_ATU_STATS_NON_STATIC - count all vaild non-static entry
+ *   GT_ATU_STATS_ALL_FID    - count all valid entry in the given DBNum(or FID)
+ *   GT_ATU_STATS_NON_STATIC_FID - count all valid non-static entry in the given DBNum(or FID)
+ */
+typedef enum
+{
+    GT_ATU_STATS_ALL = 0,
+    GT_ATU_STATS_NON_STATIC,
+    GT_ATU_STATS_ALL_FID,
+    GT_ATU_STATS_NON_STATIC_FID
+}GT_ATU_STATS_OP;
+
+
+/*
+ *  typedef: struct GT_ATU_STAT
+ *
+ *  Description:
+ *        This structure is used to count ATU entries.
+ *
+ *  Fields:
+ *      op       - counter type
+ *        DBNum - required only if op is either GT_ATU_STATS_FID or
+ *                GT_ATU_STATS_NON_STATIC_FID
+ */
+typedef struct
+{
+    GT_ATU_STATS_OP    op;
+    GT_U32             DBNum;
+} GT_ATU_STAT;
+
+
+typedef struct _EXTRA_OP_DATA
+{
+    GT_U32 moveFrom;
+    GT_U32 moveTo;
+    GT_U32 intCause;
+    GT_U32 reserved;
+} GT_EXTRA_OP_DATA;
+
+/*******************************************************************************
+* gvtuGetViolation
+*
+* DESCRIPTION:
+*       Get VTU Violation data
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation
+(
+    IN GT_QD_DEV*       dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+);
+
+/*******************************************************************************
+* gvtuGetViolation2
+*
+* DESCRIPTION:
+*       Get VTU Violation data (for Gigabit Device)
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation2
+(
+    IN GT_QD_DEV*       dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+);
+
+/*******************************************************************************
+* gvtuGetViolation3
+*
+* DESCRIPTION:
+*       Get VTU Violation data (for Spinnaker family Device)
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation3
+(
+    IN GT_QD_DEV*       dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+);
+
+/*******************************************************************************
+* gatuGetViolation
+*
+* DESCRIPTION:
+*       Get ATU Violation data
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       atuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gatuGetViolation
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_ATU_INT_STATUS *atuIntStatus
+);
+
+/*******************************************************************************
+* gsysSetRetransmitMode
+*
+* DESCRIPTION:
+*       This routine set the Retransmit Mode.
+*
+* INPUTS:
+*       en - GT_TRUE Retransimt Mode is enabled, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRetransmitMode
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL en
+);
+
+/*******************************************************************************
+* gsysGetRetransmitMode
+*
+* DESCRIPTION:
+*       This routine get the Retransmit Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE Retransmit Mode is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRetransmitMode
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL *en
+);
+
+/*******************************************************************************
+* gsysSetLimitBackoff
+*
+* DESCRIPTION:
+*       This routine set the Limit Backoff bit.
+*
+* INPUTS:
+*       en - GT_TRUE:  uses QoS half duplex backoff operation
+*            GT_FALSE: uses normal half duplex backoff operation
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetLimitBackoff
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL en
+);
+
+/*******************************************************************************
+* gsysGetLimitBackoff
+*
+* DESCRIPTION:
+*       This routine set the Limit Backoff bit.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE:  uses QoS half duplex backoff operation
+*            GT_FALSE: uses normal half duplex backoff operation
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetLimitBackoff
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL *en
+);
+
+/*******************************************************************************
+* gsysSetRsvRegPri
+*
+* DESCRIPTION:
+*       This routine set the Reserved Queue's Requesting Priority
+*
+* INPUTS:
+*       en - GT_TRUE: use the last received frome's priority
+*            GT_FALSE:use the last switched frame's priority
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvReqPri
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL en
+);
+
+/*******************************************************************************
+* gsysGetRsvReqPri
+*
+* DESCRIPTION:
+*       This routine get the Reserved Queue's Requesting Priority
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE: use the last received frome's priority
+*            GT_FALSE:use the last switched frame's priority
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvReqPri
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL *en
+);
+
+/*******************************************************************************
+* gsysGetPtrCollision
+*
+* DESCRIPTION:
+*       This routine get the QC Pointer Collision.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*         This feature is for both clippership and fullsail
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPtrCollision
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL *mode
+);
+
+/*******************************************************************************
+* gsysGetDpvCorrupt
+*
+* DESCRIPTION:
+*       This routine get the DpvCorrupt bit. This bit is set to a one when the
+*       QC detects a destination vector error
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: destination vector corrupt, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*     This feature is on clippership, but not on fullsail
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDpvCorrupt
+(
+    IN GT_BOOL *mode
+);
+
+/*******************************************************************************
+* gsysGetMissingPointers
+*
+* DESCRIPTION:
+*       This routine get the Missing Pointer bit. This bit is set to a one when
+*       the Register File detects less than 64 pointers in the Link List.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: Missing Pointers error, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*     This feature is on clippership, but not on fullsail
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetMissingPointers
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_BOOL *mode
+);
+
+/*******************************************************************************
+* gtDbgPrint
+*
+* DESCRIPTION:
+*       .
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void gtDbgPrint(char* format, ...);
+
+
+/*******************************************************************************
+* gtSemRegister
+*
+* DESCRIPTION:
+*       Assign QuarterDeck Semaphore functions to the given semaphore set.
+*        QuarterDeck maintains its own memory for the structure.
+*
+* INPUTS:
+*        semFunctions - point to the GT_SEM_ROUTINES
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemRegister
+(
+    IN GT_QD_DEV*       dev,
+    IN  GT_SEM_ROUTINES* semRoutines
+);
+
+
+/*******************************************************************************
+* gpirlInitialize
+*
+* DESCRIPTION:
+*       This routine initializes PIRL Resources.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlInitialize
+(
+    IN  GT_QD_DEV              *dev
+);
+
+/*******************************************************************************
+* gpirl2Initialize
+*
+* DESCRIPTION:
+*       This routine initializes PIRL2 Resources.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2Initialize
+(
+    IN  GT_QD_DEV              *dev
+);
+
+#ifdef DEBUG_FEATURE /* this is a debug feature*/
+/*******************************************************************************
+* gprtGetPortQueueCtr
+*
+* DESCRIPTION:
+*       This routine gets the port queue counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortQueueCtr
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_Q_STAT  *ctr
+);
+
+#endif
+
+
+/*******************************************************************************
+* lport2port
+*
+* DESCRIPTION:
+*       This function converts logical port number to physical port number
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U8 lport2port
+(
+    IN GT_U16    portVec,
+    IN GT_LPORT     port
+);
+
+/*******************************************************************************
+* port2lport
+*
+* DESCRIPTION:
+*       This function converts physical port number to logical port number
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_LPORT port2lport
+(
+    IN GT_U16    portVec,
+    IN GT_U8     hwPort
+);
+
+/*******************************************************************************
+* lportvec2portvec
+*
+* DESCRIPTION:
+*       This function converts logical port vector to physical port vector
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        lVec     - logical port vector
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port vector
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U32 lportvec2portvec
+(
+    IN GT_U16    portVec,
+    IN GT_U32     lVec
+);
+
+/*******************************************************************************
+* portvec2lportvec
+*
+* DESCRIPTION:
+*       This function converts physical port vector to logical port vector
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        pVec     - physical port vector
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       logical port vector
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U32 portvec2lportvec
+(
+    IN GT_U16    portVec,
+    IN GT_U32     pVec
+);
+
+/*******************************************************************************
+* lport2phy
+*
+* DESCRIPTION:
+*       This function converts logical port number to physical phy number.
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U8 lport2phy
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __msApiInternal_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtMiiSmiIf.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtMiiSmiIf.h
new file mode 100644
index 000000000000..388253bc0011
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtMiiSmiIf.h
@@ -0,0 +1,238 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtMiiSmiIf.h
+*
+* DESCRIPTION:
+*       Includes functions prototypes for initializing and accessing the
+*       MII / SMI interface.
+*       This is the only file to be included from upper layers.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 2 $
+*
+*******************************************************************************/
+
+#ifndef __gtMiiSmiIfh
+#define __gtMiiSmiIfh
+
+
+#include <msApi.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*******************************************************************************
+* miiSmiIfInit
+*
+* DESCRIPTION:
+*       This function initializes the MII / SMI interface.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       highSmiDevAddr - Indicates whether to use the high device register
+*                     addresses when accessing switch's registers (of all kinds)
+*                     i.e, the devices registers range is 0x10 to 0x1F, or to
+*                     use the low device register addresses (range 0x0 to 0xF).
+*                       GT_TRUE     - use high addresses (0x10 to 0x1F).
+*                       GT_FALSE    - use low addresses (0x0 to 0xF).
+*
+* RETURNS:
+*       DEVICE_ID       - on success
+*       0     - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U16 miiSmiIfInit
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   * highSmiDevAddr
+);
+
+/*******************************************************************************
+* miiSmiManualIfInit
+*
+* DESCRIPTION:
+*       This function returns Device ID from the given base address
+*
+* INPUTS:
+*       baseAddr - either 0x0 or 0x10. Indicates whether to use the low device
+*                    register address or high device register address.
+*                    The device register range is from 0x0 to 0xF or from 0x10
+*                    to 0x1F for 5 port switchs and from 0x0 to 0x1B for 8 port
+*                    switchs.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       DEVICE_ID       - on success
+*       0    - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U16 miiSmiManualIfInit
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_U32        baseAddr
+);
+
+/*******************************************************************************
+* miiSmiIfReadRegister
+*
+* DESCRIPTION:
+*       This function reads a register throw the SMI / MII interface, to be used
+*       by upper layers.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*
+* OUTPUTS:
+*       data        - The register's data.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS miiSmiIfReadRegister
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     phyAddr,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+);
+
+
+/*******************************************************************************
+* miiSmiIfWriteRegister
+*
+* DESCRIPTION:
+*       This function writes to a register throw the SMI / MII interface, to be
+*       used by upper layers.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       data        - The data to be written to the register.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS miiSmiIfWriteRegister
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     phyAddr,
+    IN  GT_U8     regAddr,
+    IN  GT_U16    data
+);
+
+/*****************************************************************************
+* qdMultiAddrRead
+*
+* DESCRIPTION:
+*       This function reads data from a device in the secondary MII bus.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The storage where register date to be saved.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL qdMultiAddrRead (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int* value);
+
+/*****************************************************************************
+* qdMultiAddrWrite
+*
+* DESCRIPTION:
+*       This function writes data to the device in the secondary MII bus.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The data to be written into the register.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL qdMultiAddrWrite (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int value);
+
+#ifdef GT_RMGMT_ACCESS
+/*******************************************************************************
+* qdAccessRegs
+*
+* DESCRIPTION:
+*       This function access registers through device interface by user, to be
+*       used by upper layers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS qdAccessRegs
+(
+    IN  GT_QD_DEV    *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __gtMiiSmiIfh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtSem.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtSem.h
new file mode 100644
index 000000000000..de5aa83a6278
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/gtSem.h
@@ -0,0 +1,153 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtOs.h
+*
+* DESCRIPTION:
+*       Operating System wrapper
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#ifndef __gtSemh
+#define __gtSemh
+
+#include <msApi.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/***** Defines  ********************************************************/
+
+#define OS_WAIT_FOREVER             0
+
+#define OS_MAX_TASKS                30
+#define OS_MAX_TASK_NAME_LENGTH     10
+
+#define OS_MAX_QUEUES               30
+#define OS_MAX_QUEUE_NAME_LENGTH    10
+
+#define OS_MAX_EVENTS               10
+
+#define OS_MAX_SEMAPHORES           50
+
+#define OS_EOF                      (-1)
+
+
+/*******************************************************************************
+* gtSemCreate
+*
+* DESCRIPTION:
+*       Create semaphore.
+*
+* INPUTS:
+*        state - beginning state of the semaphore, either SEM_EMPTY or SEM_FULL
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_SEM if success. Otherwise, NULL
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_SEM gtSemCreate
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM_BEGIN_STATE state
+);
+
+/*******************************************************************************
+* gtSemDelete
+*
+* DESCRIPTION:
+*       Delete semaphore.
+*
+* INPUTS:
+*       smid - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemDelete
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM       smid
+);
+
+/*******************************************************************************
+* gtSemTake
+*
+* DESCRIPTION:
+*       Wait for semaphore.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*       timeOut - time out in miliseconds or 0 to wait forever
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       OS_TIMEOUT - on time out
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemTake
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM       smid,
+    IN GT_U32       timeOut
+);
+
+/*******************************************************************************
+* gtSemGive
+*
+* DESCRIPTION:
+*       release the semaphore which was taken previously.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemGive
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM       smid
+);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif  /* __gtSemh */
+/* Do Not Add Anything Below This Line */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/platformDeps.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/platformDeps.h
new file mode 100644
index 000000000000..73da640801a0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/h/platform/platformDeps.h
@@ -0,0 +1,30 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* platformDeps.h
+*
+* DESCRIPTION:
+*       platform dependent definitions
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __platformDepsh
+#define __platformDepsh
+
+#include <msApi.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+GT_BOOL defaultMiiRead (unsigned int portNumber , unsigned int miiReg, unsigned int* value);
+GT_BOOL defaultMiiWrite (unsigned int portNumber , unsigned int miiReg, unsigned int value);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif   /* platformDepsh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApi.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApi.h
new file mode 100644
index 000000000000..5ebeba14b3ae
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApi.h
@@ -0,0 +1,22 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* msApi.h
+*
+* DESCRIPTION:
+*       API definitions for QuarterDeck Device
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApi_h
+#define __msApi_h
+
+#include <msApiDefs.h>
+#include <msApiInternal.h>
+#include <msApiPrototype.h>
+
+#endif /* __msApi_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiDefs.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiDefs.h
new file mode 100644
index 000000000000..ac528a12b8ee
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiDefs.h
@@ -0,0 +1,4529 @@
+ #include <Copyright.h>
+
+/********************************************************************************
+* msApiDefs.h
+*
+* DESCRIPTION:
+*       API definitions for QuarterDeck Device
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApiDefs_h
+#define __msApiDefs_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Micro definitions */
+#undef GT_USE_MAD
+#undef GT_RMGMT_ACCESS
+#undef CONFIG_AVB_FPGA
+#undef CONFIG_AVB_FPGA_2
+#undef GT_PORT_MAP_IN_DEV
+#ifdef CHECK_API_SELECT
+#include "msApiSelect.h"
+#endif
+#include "msApiSelect.h"
+
+#include <msApiTypes.h>
+#ifdef GT_USE_MAD
+#include "madApiDefs.h"
+#endif
+
+#ifdef DEBUG_QD
+#define DBG_INFO(x) gtDbgPrint x
+#else
+#define DBG_INFO(x);
+#endif /* DEBUG_QD */
+
+typedef GT_U32 GT_SEM;
+
+#define ETHERNET_HEADER_SIZE    GT_ETHERNET_HEADER_SIZE
+#define IS_MULTICAST_MAC        GT_IS_MULTICAST_MAC
+#define IS_BROADCAST_MAC        GT_IS_BROADCAST_MAC
+
+#define GT_INVALID_PHY            0xFF
+#define GT_INVALID_PORT            0xFF
+#define GT_INVALID_PORT_VEC        0xFFFFFFFF
+
+#define GT_UNUSED_PARAM(_a)        (_a)=(_a)
+
+/*
+ *   Logical Port value based on a Port
+ *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
+ *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *  |0|  reserved                                   |    port       |
+ *  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ *
+ *   The following macros should be used to extract specific info
+ *   from a Logical Port index
+ */
+typedef GT_U32 GT_LPORT;
+typedef GT_U32 GT_ETYPE;
+
+
+/* Define the different device type that may exist in system        */
+typedef enum
+{
+    GT_88E6021  = 0x021,
+    GT_88E6031  = 0x031,
+    GT_88E6035  = 0x035,
+    GT_88E6046  = 0x048,
+    GT_88E6047  = 0x049,
+    GT_88E6085  = 0x04A,
+    GT_88E6051  = 0x051,
+    GT_88E6052  = 0x052,
+    GT_88E6055  = 0x055,
+    GT_88E6060  = 0x060,
+    GT_88E6061  = 0x061,
+    GT_88E6065  = 0x065,
+    GT_88E6083  = 0x083,
+    GT_88E6093  = 0x093,
+    GT_88E6045  = 0x094,
+    GT_88E6095  = 0x095,
+    GT_88E6092  = 0x097,
+    GT_88E6096  = 0x098,
+    GT_88E6097  = 0x099,
+    GT_88E6063    = 0x153,
+    GT_88E6121    = 0x104,
+    GT_88E6122    = 0x105,
+    GT_88E6131    = 0x106,
+    GT_88E6108    = 0x107,
+    GT_88E6123    = 0x121,
+    GT_88E6124    = 0x124,    /* was 88E6124 */
+    GT_88E6140    = 0x141, /* Emerald */
+    GT_88E6161    = 0x161, /* Emerald */
+    GT_88E6165    = 0x165, /* Emerald */
+    GT_88E6171    = 0x171, /* Amber */
+    GT_88E6175    = 0x175, /* Amber */
+    GT_88E6181    = 0x1A0,
+    GT_88E6153    = 0x1A1,
+    GT_88E6183    = 0x1A3,
+    GT_88E6152    = 0x1A4,
+    GT_88E6155    = 0x1A5,
+    GT_88E6182    = 0x1A6,
+    GT_88E6185    = 0x1A7,
+    GT_88E6321    = 0x324,    /* was 88E6325 */ /* Amber */
+    GT_88E6350    = 0x371,    /* was 88E6371 */ /* Amber */
+    GT_88E6351    = 0x375,    /* Amber */
+    GT_88EC000    = 0xC00,    /* Melody 0xc00-0xc07 */
+    GT_88E3020    = 0x000,    /* Spinnaker 0x000 */
+    GT_88E6020    = 0x020,    /* Spinnaker 0x020 */
+    GT_88E6070    = 0x070,    /* Spinnaker 0x070 */
+    GT_88E6071    = 0x071,    /* Spinnaker 0x071 */
+    GT_88E6220    = 0x220,    /* Spinnaker 0x220 */
+    GT_88E6250    = 0x250,    /* Spinnaker 0x250 */
+    GT_FH_VPN   = 0xF53,
+    GT_FF_EG    = 0xF91,
+    GT_FF_HG    = 0xF93,
+    GT_88E6172    = 0x172, /* Agate */
+    GT_88E6176    = 0x176, /* Agate */
+    GT_88E6240    = 0x240, /* Agate */
+    GT_88E6352    = 0x352, /* Agate */
+    GT_88E6115    = 0x115, /* Pearl */
+    GT_88E6125    = 0x125, /* Pearl */
+    GT_88E6310    = 0x310, /* Pearl */
+    GT_88E6320    = 0x320, /* Pearl */
+
+}GT_DEVICE;
+
+
+/* Definition for the revision number of the device        */
+typedef enum
+{
+    GT_REV_0 = 0,
+    GT_REV_1,
+    GT_REV_2,
+    GT_REV_3
+}GT_DEVICE_REV;
+
+
+/* ToDo: No Used */
+typedef enum
+{
+    INTR_MODE_DISABLE =0,
+    INTR_MODE_ENABLE
+}INTERRUPT_MODE;
+
+/* Definition for the Port Speed */
+typedef enum
+{
+    PORT_SPEED_10_MBPS = 0,
+    PORT_SPEED_100_MBPS = 1,
+    PORT_SPEED_200_MBPS = 2,    /* valid only if device support */
+    PORT_SPEED_1000_MBPS = 2 ,    /* valid only if device support */
+    PORT_SPEED_UNKNOWN = 3
+} GT_PORT_SPEED_MODE;
+
+/* Definition for the forced Port Speed */
+typedef enum
+{
+    PORT_FORCE_SPEED_10_MBPS = 0,
+    PORT_FORCE_SPEED_100_MBPS = 1,
+    PORT_FORCE_SPEED_200_MBPS = 2,    /* valid only if device support */
+    PORT_FORCE_SPEED_1000_MBPS = 2,    /* valid only if device support */
+    PORT_DO_NOT_FORCE_SPEED =3
+} GT_PORT_FORCED_SPEED_MODE;
+
+/* Definition for the forced Port Duplex mode */
+typedef enum
+{
+    PORT_DO_NOT_FORCE_DUPLEX,
+    PORT_FORCE_FULL_DUPLEX,
+    PORT_FORCE_HALF_DUPLEX
+} GT_PORT_FORCED_DUPLEX_MODE;
+
+/* Definition for the forced Port Link */
+typedef enum
+{
+    PORT_DO_NOT_FORCE_LINK,
+    PORT_FORCE_LINK_UP,
+    PORT_FORCE_LINK_DOWN
+} GT_PORT_FORCED_LINK_MODE;
+
+/* Definition for the forced flow control mode */
+typedef enum
+{
+    PORT_DO_NOT_FORCE_FC,
+    PORT_FORCE_FC_ENABLED,
+    PORT_FORCE_FC_DISABLED
+} GT_PORT_FORCED_FC_MODE;
+
+/* Definition for the PPU state */
+typedef enum
+{
+    PPU_STATE_DISABLED_AT_RESET,
+    PPU_STATE_ACTIVE,
+    PPU_STATE_DISABLED_AFTER_RESET,
+    PPU_STATE_POLLING
+} GT_PPU_STATE;
+
+
+/*
+ * Typedef: enum GT_PORT_CONFIG_MODE
+ *
+ * Description: Defines port's interface type configuration mode determined at
+ *                reset. This definition may not represent the device under use.
+ *                Please refer to the device datasheet for detailed information.
+ *
+ */
+typedef enum
+{
+    PORTCFG_GMII_125MHZ = 0,        /* Px_GTXCLK = 125MHz, 1000BASE */
+    PORTCFG_FD_MII_0MHZ = 1,        /* Px_GTXCLK = 0 MHz, Power Save */
+    PORTCFG_FDHD_MII_25MHZ = 2,        /* Px_GTXCLK = 25MHz, 100BASE */
+    PORTCFG_FDHD_MII_2_5MHZ = 3,    /* Px_GTXCLK = 2.5MHz, 10BASE */
+    PORTCFG_FD_SERDES = 4,            /* Default value */
+    PORTCFG_FD_1000BASE_X = 5,
+    PORTCFG_MGMII = 6,                /* duplex, speed determined by the PPU */
+    PORTCFG_DISABLED = 7
+} GT_PORT_CONFIG_MODE;
+
+
+typedef enum
+{
+    GT_SA_FILTERING_DISABLE = 0,
+    GT_SA_DROP_ON_LOCK,
+    GT_SA_DROP_ON_UNLOCK,
+    GT_SA_DROP_TO_CPU
+} GT_SA_FILTERING;
+
+
+/* Definition for the Ingree/Egress Frame Mode */
+typedef enum
+{
+    GT_FRAME_MODE_NORMAL = 0,    /* Normal Network */
+    GT_FRAME_MODE_DSA,            /* Distributed Switch Architecture */
+    GT_FRAME_MODE_PROVIDER,        /* Provider */
+    GT_FRAME_MODE_ETHER_TYPE_DSA    /* Ether Type DSA */
+} GT_FRAME_MODE;
+
+/*
+ * Typedef: enum GT_JUMBO_MODE
+ *
+ * Description: Defines Jumbo Frame Size allowed to be tx and rx
+ *
+ * Fields:
+ *      GT_JUMBO_MODE_1522 - Rx and Tx frames with max byte of 1522.
+ *      GT_JUMBO_MODE_2048 - Rx and Tx frames with max byte of 2048.
+ *      GT_JUMBO_MODE_10240 - Rx and Tx frames with max byte of 10240.
+ *
+ */
+typedef enum
+{
+    GT_JUMBO_MODE_1522 = 0,
+    GT_JUMBO_MODE_2048,
+    GT_JUMBO_MODE_10240
+} GT_JUMBO_MODE;
+
+
+/*
+ * Typedef: enum GT_PRI_OVERRIDE
+ *
+ * Description: Defines the priority override
+ *
+ * Fields:
+ *      PRI_OVERRIDE_NONE - Normal frame priority processing occurs.
+ *        PRI_OVERRIDE_FRAME_QUEUE -
+ *            Both frame and queue overrides take place on the frame.
+ *      PRI_OVERRIDE_FRAME -
+ *            Overwite the frame's FPri (frame priority).
+ *            If the frame egresses tagged, the priority in the frame will be
+ *            the overwritten priority value.
+ *        PRI_OVERRIDE_QUEUE -
+ *            Overwite the frame's QPri (queue priority).
+ *            QPri is used internally to map the frame to one of the egress
+ *            queues inside the switch.
+ *
+ */
+typedef enum
+{
+    PRI_OVERRIDE_NONE = 0,
+    PRI_OVERRIDE_FRAME_QUEUE,
+    PRI_OVERRIDE_FRAME,
+    PRI_OVERRIDE_QUEUE
+} GT_PRI_OVERRIDE;
+
+
+/*
+ * Typedef: enum GT_FRAME_POLICY
+ *
+ * Description: Defines the policy of the frame
+ *
+ * Fields:
+ *      FRAME_POLICY_NONE - Normal frame switching
+ *      FRAME_POLICY_MIRROR - Mirror(copy) frame to the MirrorDest port
+ *      FRAME_POLICY_TRAP - Trap(re-direct) frame to the CPUDest port
+ *      FRAME_POLICY_DISCARD - Discard(filter) the frame
+ *
+ */
+typedef enum
+{
+    FRAME_POLICY_NONE = 0,
+    FRAME_POLICY_MIRROR,
+    FRAME_POLICY_TRAP,
+    FRAME_POLICY_DISCARD
+} GT_FRAME_POLICY;
+
+
+/*
+ * Typedef: enum GT_POLICY_TYPE
+ *
+ * Description: Defines the policy type
+ *
+ * Fields:
+ *      POLICY_TYPE_DA     - Policy based on Destination Address
+ *      POLICY_TYPE_SA     - Policy based on Source Address
+ *      POLICY_TYPE_VTU     - Policy based on VID
+ *        POLICY_TYPE_ETYPE    - based on Ether Type of a frame
+ *        POLICY_TYPE_PPPoE    - Policy for the frame with Ether Type of 0x8863
+ *        POLICY_TYPE_VBAS    - Policy for the frame with Ether Type of 0x8200
+ *        POLICY_TYPE_OPT82    - Policy for the frame with DHCP Option 82
+ *        POLICY_TYPE_UDP    - Policy for the frame with Broadcast IPv4 UDP or
+ *                        Multicast IPv6 UDP
+ */
+typedef enum
+{
+    POLICY_TYPE_DA,
+    POLICY_TYPE_SA,
+    POLICY_TYPE_VTU,
+    POLICY_TYPE_ETYPE,
+    POLICY_TYPE_PPPoE,
+    POLICY_TYPE_VBAS,
+    POLICY_TYPE_OPT82,
+    POLICY_TYPE_UDP
+} GT_POLICY_TYPE;
+
+
+/*
+ * Typedef: enum GT_PRI_OVERRIDE_FTYPE
+ *
+ * Description: Definition of the frame type for priority override
+ *
+ * Fields:
+ *        FTYPE_DSA_TO_CPU_BPDU -
+ *            Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+ *            Not used on non-DSA Control frames.
+ *        FTYPE_DSA_TO_CPU_F2R -
+ *            Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+ *            Reply). Not used on non-DSA Control frames.
+ *        FTYPE_DSA_TO_CPU_IGMP -
+ *            Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+ *            and on non-DSA Control frames that are IGMP or MLD trapped
+ *        FTYPE_DSA_TO_CPU_TRAP -
+ *            Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+ *            on non-DSA Control frames that are Policy Trapped
+ *        FTYPE_DSA_TO_CPU_ARP -
+ *            Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+ *            on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+ *        FTYPE_DSA_TO_CPU_MIRROR -
+ *            Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+ *            on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+ *        FTYPE_DSA_TO_CPU_RESERVED -
+ *            Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+ *            used on non-DSA Control frames.
+ *        FTYPE_DSA_TO_CPU_UCAST_MGMT -
+ *            Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+ *            MGMT). Not used on non-DSA Control frames.
+ *        FTYPE_DSA_FROM_CPU -
+ *            Used on DSA From_CPU frames. Not used on non-DSA Control frame
+ *        FTYPE_DSA_CROSS_CHIP_FC -
+ *            Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+ *            Control). Not used on non-DSA Control frames.
+ *        FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+ *            Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+ *            Not used on non-DSA Control frames.
+ *        FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+ *            Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+ *            Not used on non-DSA Control frames.
+ *        FTYPE_PORT_ETYPE_MATCH -
+ *            Used on normal network ports (see gprtSetFrameMode API)
+ *            on frames whose Ethertype matches the port's PortEType register.
+ *            Not used on non-DSA Control frames.
+ *        FTYPE_BCAST_NON_DSA_CONTROL -
+ *            Used on Non-DSA Control frames that contain a Broadcast
+ *            destination address. Not used on DSA Control frames.
+ *        FTYPE_PPPoE_NON_DSA_CONTROL -
+ *            Used on Non-DSA Control frames that contain an Ether Type 0x8863
+ *            (i.e., PPPoE frames). Not used on DSA Control frames.
+ *        FTYPE_IP_NON_DSA_CONTROL -
+ *            Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+ *            Type. Not used on DSA Control frames.
+ */
+typedef enum
+{
+    FTYPE_DSA_TO_CPU_BPDU = 0,
+    FTYPE_DSA_TO_CPU_F2R,
+    FTYPE_DSA_TO_CPU_IGMP,
+    FTYPE_DSA_TO_CPU_TRAP,
+    FTYPE_DSA_TO_CPU_ARP,
+    FTYPE_DSA_TO_CPU_MIRROR,
+    FTYPE_DSA_TO_CPU_RESERVED,
+    FTYPE_DSA_TO_CPU_UCAST_MGMT,
+    FTYPE_DSA_FROM_CPU,
+    FTYPE_DSA_CROSS_CHIP_FC,
+    FTYPE_DSA_CROSS_CHIP_EGRESS_MON,
+    FTYPE_DSA_CROSS_CHIP_INGRESS_MON,
+    FTYPE_PORT_ETYPE_MATCH,
+    FTYPE_BCAST_NON_DSA_CONTROL,
+    FTYPE_PPPoE_NON_DSA_CONTROL,
+    FTYPE_IP_NON_DSA_CONTROL
+} GT_PRI_OVERRIDE_FTYPE;
+
+
+/*
+ * Typedef: struct GT_QPRI_TBL_ENTRY
+ *
+ * Description: This structure is used for the entry of Queue Priority Override
+ *                Table.
+ *
+ * Fields:
+ *        qPriEn    - GT_TRUE to enable Queue Priority, GT_FALSE otherwise
+ *        qPriority - priority to be overridden ( 0 ~ 3 ) only if qPriEn is GT_TRUE
+ *                    When qPriEn is GT_FALSE, qPriority should be ignored.
+ *
+ * Notes: If device does not support qPriAvbEn, qPriAvbEn and qAvbPriority fields
+ *        will be ignored.
+ */
+typedef struct
+{
+    GT_BOOL        qPriEn;
+    GT_U32        qPriority;
+}GT_QPRI_TBL_ENTRY;
+
+
+/*
+ * Typedef: struct GT_FPRI_TBL_ENTRY
+ *
+ * Description: This structure is used for the entry of Frame Priority Override
+ *                Table.
+ *
+ * Fields:
+ *        fPriEn    - GT_TRUE to enable Frame Priority, GT_FALSE otherwise
+ *        fPriority - priority to be overridden ( 0 ~ 7 ) only if fPriEn is GT_TRUE
+ *                    When fPriEn is GT_FALSE, fPriority should be ignored.
+ */
+typedef struct
+{
+    GT_BOOL        fPriEn;
+    GT_U32        fPriority;
+}GT_FPRI_TBL_ENTRY;
+
+
+/* Maximam number of ports a switch may have. */
+#define MAX_SWITCH_PORTS    11
+#define VERSION_MAX_LEN     30
+#define MAX_QOS_WEIGHTS        128
+
+/*
+ * Typedef: struct GT_QoS_WEIGHT
+ *
+ * Description: This structure is used for Programmable Round Robin Weights.
+ *
+ * Fields:
+ *      len    - length of the valid queue data
+ *        queue  - upto 128 queue data
+ */
+typedef struct
+{
+    GT_U32        len;
+    GT_U8        queue[MAX_QOS_WEIGHTS];
+}GT_QoS_WEIGHT;
+
+
+
+/*
+ * Typedef: struct GT_VERSION
+ *
+ * Description: This struct holds the package version.
+ *
+ * Fields:
+ *      version - string array holding the version.
+ *
+ */
+typedef struct
+{
+    GT_U8   version[VERSION_MAX_LEN];
+}GT_VERSION;
+
+
+/*
+ * typedef: struct GT_RMU
+ *
+ * Description: This struct holds the Remote Management Unit mode.
+ *
+ * Fields:
+ *        rmuEn    - enable or disable RMU
+ *        port    - logical port where RMU is enabled
+ */
+typedef struct
+{
+    GT_BOOL        rmuEn;
+    GT_LPORT    port;
+} GT_RMU;
+
+
+
+/*
+ * Typedef:
+ *
+ * Description: Defines the different sizes of the Mac address table.
+ *
+ * Fields:
+ *      ATU_SIZE_256    -   256 entries Mac address table.
+ *      ATU_SIZE_512    -   512 entries Mac address table.
+ *      ATU_SIZE_1024   -   1024 entries Mac address table.
+ *      ATU_SIZE_2048   -   2048 entries Mac address table.
+ *      ATU_SIZE_4096   -   4096 entries Mac address table.
+ *      ATU_SIZE_8192   -   8192 entries Mac address table.
+ *
+ */
+typedef enum
+{
+    ATU_SIZE_256,
+    ATU_SIZE_512,
+    ATU_SIZE_1024,
+    ATU_SIZE_2048,
+    ATU_SIZE_4096,
+    ATU_SIZE_8192
+}ATU_SIZE;
+
+
+/*
+ * typedef: enum GT_PORT_STP_STATE
+ *
+ * Description: Enumeration of the port spanning tree state.
+ *
+ * Enumerations:
+ *   GT_PORT_DISABLE    - port is disabled.
+ *   GT_PORT_BLOCKING   - port is in blocking/listening state.
+ *   GT_PORT_LEARNING   - port is in learning state.
+ *   GT_PORT_FORWARDING - port is in forwarding state.
+ */
+typedef enum
+{
+    GT_PORT_DISABLE = 0,
+    GT_PORT_BLOCKING,
+    GT_PORT_LEARNING,
+    GT_PORT_FORWARDING
+} GT_PORT_STP_STATE;
+
+
+/*
+ * typedef: enum GT_EGRESS_MODE
+ *
+ * Description: Enumeration of the port egress mode.
+ *
+ * Enumerations:
+ *   GT_UNMODIFY_EGRESS - frames are transmited unmodified.
+ *   GT_TAGGED_EGRESS   - all frames are transmited tagged.
+ *   GT_UNTAGGED_EGRESS - all frames are transmited untagged.
+ *   GT_ADD_TAG         - always add a tag. (or double tag)
+ */
+typedef enum
+{
+    GT_UNMODIFY_EGRESS = 0,
+    GT_UNTAGGED_EGRESS,
+    GT_TAGGED_EGRESS,
+    GT_ADD_TAG
+} GT_EGRESS_MODE;
+
+/*  typedef: enum GT_DOT1Q_MODE */
+
+typedef enum
+{
+    GT_DISABLE = 0,
+    GT_FALLBACK,
+    GT_CHECK,
+    GT_SECURE
+} GT_DOT1Q_MODE;
+
+
+/* typedef: enum GT_SW_MODE */
+
+typedef enum
+{
+    GT_CPU_ATTATCHED = 0, /* ports come up disabled */
+    GT_BACKOFF,           /* EEPROM attac mode with old half duplex backoff mode */
+    GT_STAND_ALONE,       /* ports come up enabled, ignore EEPROM */
+    GT_EEPROM_ATTATCHED   /* EEPROM defined prot states */
+} GT_SW_MODE;
+
+
+/*
+ * Typedef: enum GT_ATU_OPERARION
+ *
+ * Description: Defines the different ATU and VTU operations
+ *
+ * Fields:
+ *      FLUSH_ALL           - Flush all entries.
+ *      FLUSH_UNLOCKED      - Flush all unlocked entries in ATU.
+ *      LOAD_PURGE_ENTRY    - Load / Purge entry.
+ *      GET_NEXT_ENTRY      - Get next ATU or VTU  entry.
+ *      FLUSH_ALL_IN_DB     - Flush all entries in a particular DBNum.
+ *      FLUSH_UNLOCKED_IN_DB - Flush all unlocked entries in a particular DBNum.
+ *      SERVICE_VIOLATONS   - sevice violations of VTU
+ *
+ */
+typedef enum
+{
+    FLUSH_ALL = 1,        /* for both atu and vtu */
+    FLUSH_UNLOCKED,        /* for atu only */
+    LOAD_PURGE_ENTRY,    /* for both atu and vtu */
+    GET_NEXT_ENTRY,        /* for both atu and vtu */
+    FLUSH_ALL_IN_DB,    /* for atu only */
+    FLUSH_UNLOCKED_IN_DB,    /* for atu only */
+    SERVICE_VIOLATIONS     /* for vtu only */
+} GT_ATU_OPERATION, GT_VTU_OPERATION;
+
+
+/*
+ * typedef: enum GT_FLUSH_CMD
+ *
+ * Description: Enumeration of the address translation unit flush operation.
+ *
+ * Enumerations:
+ *   GT_FLUSH_ALL       - flush all entries.
+ *   GT_FLUSH_ALL_UNBLK - flush all unblocked (or unlocked).
+ *   GT_FLUSH_ALL_UNLOCKED - flush all unblocked (or unlocked).
+ */
+typedef enum
+{
+    GT_FLUSH_ALL       = 1,
+    GT_FLUSH_ALL_UNBLK = 2,
+    GT_FLUSH_ALL_UNLOCKED = 2
+}GT_FLUSH_CMD;
+
+/*
+ * typedef: enum GT_MOVE_CMD
+ *
+ * Description: Enumeration of the address translation unit move or remove operation.
+ *     When destination port is set to 0xF, Remove operation occurs.
+ *
+ * Enumerations:
+ *   GT_MOVE_ALL       - move all entries.
+ *   GT_MOVE_ALL_UNBLK - move all unblocked (or unlocked).
+ *   GT_MOVE_ALL_UNLOCKED - move all unblocked (or unlocked).
+ */
+typedef enum
+{
+    GT_MOVE_ALL       = 1,
+    GT_MOVE_ALL_UNBLK = 2,
+    GT_MOVE_ALL_UNLOCKED = 2
+}GT_MOVE_CMD;
+
+
+/*
+ * typedef: enum GT_ATU_UC_STATE
+ *
+ * Description:
+ *      Enumeration of the address translation unit entry state of unicast
+ *      entris.
+ *
+ * Enumerations:
+ *        GT_UC_INVALID   - invalid entry.
+ *        GT_UC_DYNAMIC   - unicast dynamic entry.
+ *        GT_UC_NO_PRI_TO_CPU_STATIC_NRL
+ *                - static unicast entry that will be forwarded to CPU without
+ *                    forcing priority and without rate limiting.
+ *        GT_UC_TO_CPU_STATIC_NRL
+ *                - static unicast entry that will be forwarded to CPU without
+ *                    rate limiting.
+ *        GT_UC_NO_PRI_STATIC_NRL
+ *                - static unicast entry without forcing priority and without.
+ *                    rate limiting.
+ *        GT_UC_NO_PRI_STATIC_AVB_ENTRY
+ *                - static unicast AVB entry without forcing priority if MacAvb is enabled.
+ *        GT_UC_STATIC_NRL    - static unicast entry without rate limiting.
+ *        GT_UC_STATIC_AVB_ENTRY - static unicast AVB entry if MacAvb is enabled .
+ *        GT_UC_NO_PRI_TO_CPU_STATIC
+ *                - static unicast entry that will be forwarded to CPU without
+ *                    forcing priority.
+ *        GT_UC_TO_CPU_STATIC - static unicast entry that will be forwarded to CPU.
+ *        GT_UC_NO_PRI_STATIC - static unicast entry without forcing priority.
+ *        GT_UC_STATIC    - static unicast entry.
+ *
+ * Note: Please refer to the device datasheet for detailed unicast entry states
+ *        that are supported by the device.
+ */
+typedef enum
+{
+    GT_UC_INVALID      = 0,
+    GT_UC_DYNAMIC      = 0x1,
+
+    GT_UC_NO_PRI_TO_CPU_STATIC_NRL    = 0x8,
+    GT_UC_TO_CPU_STATIC_NRL            = 0x9,
+    GT_UC_NO_PRI_STATIC_NRL            = 0xA,
+    GT_UC_NO_PRI_STATIC_AVB_ENTRY    = 0xA,
+    GT_UC_STATIC_NRL                 = 0xB,
+    GT_UC_STATIC_AVB_ENTRY            = 0xB,
+
+    GT_UC_NO_PRI_TO_CPU_STATIC    = 0xC,
+    GT_UC_TO_CPU_STATIC         = 0xD,
+    GT_UC_NO_PRI_STATIC         = 0xE,
+    GT_UC_STATIC                  = 0xF
+} GT_ATU_UC_STATE;
+
+
+/*
+ * typedef: enum GT_ATU_MC_STATE
+ *
+ * Description:
+ *      Enumeration of the address translation unit entry state of multicast
+ *      entris.
+ *
+ * Enumerations:
+ *      GT_MC_INVALID         - invalid entry.
+ *      GT_MC_MGM_STATIC      - static multicast management entries.
+ *      GT_MC_STATIC          - static multicast regular entris.
+ *      GT_MC_STATIC_AVB_ENTRY- static AVB entry if MacAvb is enalbed.
+ *      GT_MC_PRIO_MGM_STATIC - static multicast management entries with
+ *                              priority.
+ *      GT_MC_PRIO_STATIC     - static multicast regular entris with priority.
+ *      GT_MC_PRIO_STATIC_AVB_ENTRY      - static multicast AVB Entry with priority if MacAvb is enabled
+ *      GT_MC_PRIO_STATIC_UNLIMITED_RATE - static multicast regular entris with priority
+ *                                            and without rate limiting.
+ *      GT_MC_MGM_STATIC_UNLIMITED_RATE     - static multicast management entries without
+ *                                            rate limiting.
+ *      GT_MC_STATIC_UNLIMITED_RATE      - static multicast regular entris without
+ *                                            rate limiting.
+ *      GT_MC_PRIO_MGM_STATIC_UNLIMITED_RATE - static multicast management entries with
+ *                              priority and without rate limiting.
+ *
+ * Note: Please refer to the device datasheet for detailed multicast entry states
+ *        that are supported by the device.
+ */
+typedef enum
+{
+    GT_MC_INVALID         = 0,
+    GT_MC_MGM_STATIC_UNLIMITED_RATE = 0x4,
+    GT_MC_STATIC_UNLIMITED_RATE    = 0x5,
+    GT_MC_STATIC_AVB_ENTRY    = 0x5,
+    GT_MC_MGM_STATIC      = 0x6,
+    GT_MC_STATIC          = 0x7,
+    GT_MC_PRIO_MGM_STATIC_UNLIMITED_RATE = 0xC,
+    GT_MC_PRIO_STATIC_UNLIMITED_RATE    = 0xD,
+    GT_MC_PRIO_STATIC_AVB_ENTRY    = 0xD,
+    GT_MC_PRIO_MGM_STATIC = 0xE,
+    GT_MC_PRIO_STATIC     = 0xF
+} GT_ATU_MC_STATE;
+
+
+/*
+ *  typedef: struct GT_ATU_EXT_PRI
+ *
+ *  Description:
+ *        Extanded priority information for the address tarnslaton unit entry.
+ *
+ *        macFPri data is used to override the frame priority on any frame associated
+ *        with this MAC, if the useMacFPri is GT_TRUE and the port's SA and/or
+ *        DA FPriOverride are enabled. SA Frame Priority Overrides can only occur on
+ *        MAC addresses that are Static or where the Port is Locked, and where the port
+ *        is mapped source port for the MAC address.
+ *
+ *        macQPri data is used to override the queue priority on any frame associated
+ *        with this MAC, if the EntryState indicates Queue Priority Override and the
+ *        port's SA and/or DA QPriOverride are enabled.
+ *
+ *  Fields:
+ *      useMacFPri - Use MAC frame priority override. When this is GT_TRUE,
+ *                     MacFPri data can be used to override the frame priority on
+ *                     any frame associated with this MAC.
+ *      macFPri    - MAC frame priority data (0 ~ 7).
+ *      macQPri    - MAC queue priority data (0 ~ 3).
+ *
+ *  Comment:
+ *      Please refer to the device datasheet to find out if this feature is supported.
+ *        When this structure is implemented, the followings are the devices supporting
+ *        this feature:
+ *            88E6065, 88E6035, and 88E6055 support all extanded priority data.
+ *            88E6061 and 88E6031 support only macQPri data
+ *            88EC000 and 88E3020 family use only macFPri data
+ */
+typedef struct
+{
+    GT_BOOL            useMacFPri;
+    GT_U8             macFPri;
+    GT_U8             macQPri;
+} GT_ATU_EXT_PRI;
+
+
+/*
+ *  typedef: struct GT_ATU_ENTRY
+ *
+ *  Description: address tarnslaton unit Entry
+ *
+ *  Fields:
+ *      macAddr    - mac address
+ *      trunkMember- GT_TRUE if entry belongs to a Trunk. This field will be
+ *                     ignored if device does not support Trunk.
+ *      portVec    - port Vector.
+ *                     If trunkMember field is GT_TRUE, this value represents trunk ID.
+ *      prio       - entry priority.
+ *      entryState - the entry state.
+ *        DBNum       - ATU MAC Address Database number. If multiple address
+ *                     databases are not being used, DBNum should be zero.
+ *                     If multiple address databases are being used, this value
+ *                     should be set to the desired address database number.
+ *        exPrio     - extanded priority information. If device support extanded
+ *                     priority, prio field should be ignored.
+ *
+ *  Comment:
+ *      The entryState union Type is determine according to the Mac Type.
+ */
+typedef struct
+{
+    GT_ETHERADDR     macAddr;
+    GT_BOOL            trunkMember;
+    GT_U32            portVec;
+    GT_U8            prio;
+    GT_U8            reserved;
+    GT_U16            DBNum;
+    union
+    {
+        GT_ATU_UC_STATE ucEntryState;
+        GT_ATU_MC_STATE mcEntryState;
+    } entryState;
+    GT_ATU_EXT_PRI    exPrio;
+} GT_ATU_ENTRY;
+
+
+/*
+ *  typedef: struct GT_VTU_DATA
+ *
+ *  Description: VLAN  tarnslaton unit Data Register
+ *
+ *  Fields:
+ *      memberTagP - Membership and Egress Tagging
+ *                   memberTagP[0] is for Port 0, MemberTagP[1] is for port 1, and so on
+ *
+ *  Comment:
+ *     MAX_SWITCH_PORTS is 10 for Octane.
+ *     In the case of FullSail, there are 3 ports. So, the rest 7 is ignored in memeberTagP
+ */
+typedef struct
+{
+    GT_U8     memberTagP[MAX_SWITCH_PORTS];
+    GT_U8     portStateP[MAX_SWITCH_PORTS];
+} GT_VTU_DATA;
+
+/*
+ *  definition for MEMBER_TAG
+ */
+#define MEMBER_EGRESS_UNMODIFIED    0
+#define NOT_A_MEMBER                1
+#define MEMBER_EGRESS_UNTAGGED        2
+#define MEMBER_EGRESS_TAGGED        3
+
+/*
+ *  typedef: struct GT_VTU_EXT_INFO
+ *
+ *  Description:
+ *        Extanded VTU Entry information for Priority Override and Non Rate Limit.
+ *        Frame Priority is used to as the tag's PRI bits if the frame egresses
+ *        the switch tagged. The egresss queue the frame is switch into is not
+ *        modified by the Frame Priority Override.
+ *        Queue Priority is used to determine the egress queue the frame is
+ *        switched into. If the frame egresses tagged, the priority in the frame
+ *        will not be modified by a Queue Priority Override.
+ *        NonRateLimit for VID is used to indicate any frames associated with this
+ *        VID are to bypass ingress and egress rate limiting, if the ingress
+ *        port's VID NRateLimit is also enabled.
+ *
+ *  Fields:
+ *      useVIDFPri - Use VID frame priority override. When this is GT_TRUE and
+ *                     VIDFPriOverride of the ingress port of the frame is enabled,
+ *                     vidFPri data is used to override the frame priority on
+ *                     any frame associated with this VID.
+ *      vidFPri    - VID frame priority data (0 ~ 7).
+ *      useVIDQPri - Use VID queue priority override. When this is GT_TRUE and
+ *                     VIDQPriOverride of the ingress port of the frame is enabled,
+ *                     vidQPri data can be used to override the queue priority on
+ *                     any frame associated with this VID.
+ *      vidQPri    - VID queue priority data (0 ~ 3).
+ *      vidNRateLimit - bypass rate ingress and egress limiting
+ *
+ *  Comment:
+ *      Please refer to the device datasheet to find out if this feature is supported.
+ *        When this structure is implemented, the followings are the devices supporting
+ *        this feature:
+ *            88E6065, 88E6035, and 88E6055 support all data.
+ *            88E6061 and 88E6031 support only vidNRateLimit.
+ */
+typedef struct
+{
+    GT_BOOL            useVIDFPri;
+    GT_U8             vidFPri;
+    GT_BOOL            useVIDQPri;
+    GT_U8             vidQPri;
+    GT_BOOL            vidNRateLimit;
+} GT_VTU_EXT_INFO;
+
+
+/*
+ *  typedef: struct GT_VTU_ENTRY
+ *
+ *  Description: VLAN tarnslaton unit Entry
+ *        Each field in the structure is device specific, i.e., some fields may not
+ *        be supported by the used switch device. In such case, those fields are
+ *        ignored by the DSDT driver. Please refer to the datasheet for the list of
+ *        supported fields.
+ *
+ *  Fields:
+ *      DBNum      - database number or FID (forwarding information database)
+ *      vid        - VLAN ID
+ *      vtuData    - VTU data
+ *        vidPriOverride - override the priority on any frame associated with this VID
+ *        vidPriority - VID Priority bits (0 ~ 7)
+ *        sid           - 802.1s Port State Database ID
+ *        vidPolicy  - indicate that the frame with this VID uses VID Policy
+ *                     (see gprtSetPolicy API).
+ *        vidExInfo  - extanded information for VTU entry. If the device supports extanded
+ *                     information, vidPriorityOverride and vidPriority values are
+ *                     ignored.
+ */
+typedef struct
+{
+    GT_U16        DBNum;
+    GT_U16        vid;
+    GT_VTU_DATA   vtuData;
+    GT_BOOL          vidPriOverride;
+    GT_U8          vidPriority;
+    GT_U8          sid;
+    GT_BOOL          vidPolicy;
+    GT_VTU_EXT_INFO    vidExInfo;
+} GT_VTU_ENTRY;
+
+
+/*
+ * Typedef: enum GT_STU_OPERARION
+ *
+ * Description: Defines the STU operations
+ *
+ * Fields:
+ *      LOAD_PURGE_STU_ENTRY    - Load / Purge entry.
+ *      GET_NEXT_STU_ENTRY      - Get next STU  entry.
+ *
+ */
+typedef enum
+{
+    LOAD_PURGE_STU_ENTRY = 5,
+    GET_NEXT_STU_ENTRY =6
+} GT_STU_OPERATION;
+
+
+/*
+ *  typedef: struct GT_STU_ENTRY
+ *
+ *  Description: 802.1s Port State Information Database (STU) Entry
+ *
+ *  Fields:
+ *      sid       - STU ID
+ *        portState - Per VLAN Port States for each port.
+ */
+typedef struct
+{
+    GT_U16                sid;
+    GT_PORT_STP_STATE    portState[MAX_SWITCH_PORTS];
+} GT_STU_ENTRY;
+
+
+/*
+ *  typedef: struct GT_VTU_INT_STATUS
+ *
+ *  Description: VLAN tarnslaton unit interrupt status
+ *
+ *  Fields:
+ *      intCause  - VTU Interrupt Cause
+ *                    GT_VTU_FULL_VIOLATION,GT_MEMEBER_VIOLATION,or
+ *                    GT_MISS_VIOLATION
+ *      SPID      - source port number
+ *                     if intCause is GT_VTU_FULL_VIOLATION, it means nothing
+ *      vid       - VLAN ID
+ *                     if intCause is GT_VTU_FULL_VIOLATION, it means nothing
+ */
+typedef struct
+{
+    GT_U16   vtuIntCause;
+    GT_U8    spid;
+    GT_U16   vid;
+} GT_VTU_INT_STATUS;
+
+/*
+ *  typedef: struct GT_ATU_INT_STATUS
+ *
+ *  Description: VLAN tarnslaton unit interrupt status
+ *
+ *  Fields:
+ *      intCause  - ATU Interrupt Cause
+ *                    GT_FULL_VIOLATION,GT_MEMEBER_VIOLATION,
+ *                    GT_MISS_VIOLATION, GT_AGE_VIOLATION, or
+ *                    GT_AGE_OUT_VIOLATION
+ *      SPID      - source port number
+ *                     if intCause is GT_FULL_VIOLATION, it means nothing
+ *      DBNum     - DB Num (or FID)
+ *                     if intCause is GT_FULL_VIOLATION, it means nothing
+ *        macAddr      - MAC Address
+ */
+typedef struct
+{
+    GT_U16   atuIntCause;
+    GT_U8    spid;
+    GT_U8    dbNum;
+    GT_ETHERADDR  macAddr;
+} GT_ATU_INT_STATUS;
+
+/*
+* Definition for VTU interrupt
+*/
+#define GT_MEMBER_VIOLATION        0x4
+#define GT_MISS_VIOLATION        0x2
+#define GT_VTU_FULL_VIOLATION    0x1
+/*
+* Definitions for ATU interrupt in Gigabit switch are the same as
+* the ones for VTU interrupt. Here we just redefine the FULL_VIOLATION for
+* both VTU and ATU.
+*/
+#define GT_FULL_VIOLATION        0x1
+
+#define GT_AGE_VIOLATION        0x8
+#define GT_AGE_OUT_VIOLATION    0x10
+
+
+/*
+
+  * Typedef: enum GT_PVT_OPERATION
+ *
+ * Description: Defines the PVT (Cross Chip Port VLAN Table) Operation type
+ *
+ * Fields:
+ *      PVT_INITIALIZE - Initialize all resources to the inital state
+ *      PVT_WRITE      - Write to the selected PVT entry
+ *      PVT_READ       - Read from the selected PVT entry
+ */
+typedef enum
+{
+    PVT_INITIALIZE     = 0x1,
+    PVT_WRITE        = 0x3,
+    PVT_READ        = 0x4
+} GT_PVT_OPERATION;
+
+
+/*
+ *  typedef: struct GT_PVT_OP_DATA
+ *
+ *  Description: data required by PVT (Cross Chip Port VLAN Table) Operation
+ *
+ *  Fields:
+ *      pvtAddr - pointer to the desired entry of PVT
+ *      pvtData - Cross Chip Port VLAN data for the entry pointed by pvtAddr
+ */
+typedef struct
+{
+    GT_U32    pvtAddr;
+    GT_U32    pvtData;
+} GT_PVT_OP_DATA;
+
+
+/*
+ *  typedef: enum GT_PIRL_FC_DEASSERT
+ *
+ *  Description: Enumeration of the port flow control de-assertion mode on PIRL.
+ *
+ *  Enumerations:
+ *      GT_PIRL_FC_DEASSERT_EMPTY -
+ *                De-assert when the ingress rate resource has become empty
+ *        GT_PIRL_FC_DEASSERT_CBS_LIMIT -
+ *                De-assert when the ingress rate resource has enough room as
+ *                specified by the CBSLimit.
+ */
+typedef enum
+{
+    GT_PIRL_FC_DEASSERT_EMPTY = 0,
+    GT_PIRL_FC_DEASSERT_CBS_LIMIT
+} GT_PIRL_FC_DEASSERT;
+
+
+/*
+ *  typedef: enum GT_PIRL_ELIMIT_MODE
+ *
+ *  Description: Enumeration of the port egress rate limit counting mode.
+ *
+ *  Enumerations:
+ *      GT_PIRL_ELIMIT_FRAME -
+ *                Count the number of frames
+ *      GT_PIRL_ELIMIT_LAYER1 -
+ *                Count all Layer 1 bytes:
+ *                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+ *      GT_PIRL_ELIMIT_LAYER2 -
+ *                Count all Layer 2 bytes: Frame's DA to CRC
+ *      GT_PIRL_ELIMIT_LAYER3 -
+ *                Count all Layer 3 bytes:
+ *                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+ */
+typedef enum
+{
+    GT_PIRL_ELIMIT_FRAME = 0,
+    GT_PIRL_ELIMIT_LAYER1,
+    GT_PIRL_ELIMIT_LAYER2,
+    GT_PIRL_ELIMIT_LAYER3
+} GT_PIRL_ELIMIT_MODE;
+
+
+/* typedef: enum GT_RATE_LIMIT_MODE
+ * The ingress limit mode in the rate control register (0xA)
+ */
+
+typedef enum
+{
+    GT_LIMT_ALL = 0,         /* limit and count all frames */
+    GT_LIMIT_FLOOD,          /* limit and count Broadcast, Multicast and flooded unicast frames */
+    GT_LIMIT_BRDCST_MLTCST,    /* limit and count Broadcast and Multicast frames */
+    GT_LIMIT_BRDCST           /* limit and count Broadcast frames */
+} GT_RATE_LIMIT_MODE;
+
+/* typedef: enum GT_PRI0_RATE
+ * The ingress data rate limit for priority 0 frames
+ */
+
+typedef enum
+{
+    GT_NO_LIMIT = 0,     /* Not limited   */
+    GT_128K,              /* 128K bits/sec */
+    GT_256K,              /* 256K bits/sec */
+    GT_512K,              /* 512 bits/sec */
+    GT_1M,              /* 1M  bits/sec */
+    GT_2M,              /* 2M  bits/sec */
+    GT_4M,              /* 4M  bits/sec */
+    GT_8M,              /* 8M  bits/sec */
+    GT_16M,              /* 16M  bits/sec, Note: supported only by Gigabit Ethernet Switch */
+    GT_32M,              /* 32M  bits/sec, Note: supported only by Gigabit Ethernet Switch */
+    GT_64M,              /* 64M  bits/sec, Note: supported only by Gigabit Ethernet Switch */
+    GT_128M,              /* 128M  bits/sec, Note: supported only by Gigabit Ethernet Switch */
+    GT_256M              /* 256M  bits/sec, Note: supported only by Gigabit Ethernet Switch */
+} GT_PRI0_RATE,GT_EGRESS_RATE;
+
+
+/*
+ * Typedef: union GT_ERATE_TYPE
+ *
+ * Description: Egress Rate
+ *
+ * Fields:
+ *      definedRate - GT_EGRESS_RATE enum type should be used on the following devices:
+ *                        88E6218, 88E6318, 88E6063, 88E6083, 88E6181, 88E6183, 88E6093
+ *                        88E6095, 88E6185, 88E6108, 88E6065, 88E6061, and their variations.
+ *      kbRate      - rate in kbps that should be used on the following devices:
+ *                        88E6097, 88E6096 with count mode of non frame, such as
+ *                                    ALL_LAYER1, ALL_LAYER2, and ALL_LAYER3
+ *                        64kbps ~ 1Mbps    : increments of 64kbps,
+ *                        1Mbps ~ 100Mbps   : increments of 1Mbps, and
+ *                        100Mbps ~ 1000Mbps: increments of 10Mbps
+ *                        Therefore, the valid values are:
+ *                            64, 128, 192, 256, 320, 384,..., 960,
+ *                            1000, 2000, 3000, 4000, ..., 100000,
+ *                            110000, 120000, 130000, ..., 1000000.
+ *      fRate         - frame per second that should be used on the following devices:
+ *                        88E6097, 88E6096 with count mode of frame (GT_PIRL_COUNT_FRAME)
+ */
+typedef union
+{
+    GT_EGRESS_RATE    definedRate;
+    GT_U32            kbRate;
+    GT_U32            fRate;
+} GT_ERATE_TYPE;
+
+/*
+ * Formula for Rate Limit of Gigabit Switch family and Enhanced FastEthernet Switch
+ */
+#define GT_GET_RATE_LIMIT(_kbps)    \
+        ((_kbps)?(8000000 / (28 * (_kbps))):0)
+#define GT_GET_RATE_LIMIT2(_kbps)    \
+        ((_kbps)?(8000000 / (32 * (_kbps)) + (8000000 % (32 * (_kbps))?1:0)):0)
+#define GT_GET_RATE_LIMIT3(_kbps)    \
+        ((_kbps)?(8000000 / (40 * (_kbps)) + (8000000 % (40 * (_kbps))?1:0)):0)
+
+#define MAX_RATE_LIMIT        256000    /* unit of Kbps */
+#define MIN_RATE_LIMIT        65        /* unit of Kbps */
+
+
+#define GT_GET_RATE_LIMIT_PER_FRAME(_frames, _dec)    \
+        ((_frames)?(1000000000 / (32 * (_frames)) + (1000000000 % (32 * (_frames))?1:0)):0)
+
+#define GT_GET_RATE_LIMIT_PER_BYTE(_kbps, _dec)    \
+        ((_kbps)?((8000000*(_dec)) / (32 * (_kbps)) + ((8000000*(_dec)) % (32 * (_kbps))?1:0)):0)
+
+/*
+ * typedef: enum GT_BURST_SIZE
+ * The ingress data rate limit burst size windows selection
+ */
+
+typedef enum
+{
+    GT_BURST_SIZE_12K = 0,     /* 12K byte burst size */
+    GT_BURST_SIZE_24K,        /* 24K byte burst size */
+    GT_BURST_SIZE_48K,        /* 48K byte burst size */
+    GT_BURST_SIZE_96K          /* 96K byte burst size */
+} GT_BURST_SIZE;
+
+/*
+ * typedef: enum GT_BURST_RATE
+ * The ingress data rate limit based on burst size
+ */
+
+typedef enum
+{
+    GT_BURST_NO_LIMIT = 0,     /* Not limited   */
+    GT_BURST_64K,          /* 64K bits/sec */
+    GT_BURST_128K,      /* 128K bits/sec */
+    GT_BURST_256K,      /* 256K bits/sec */
+    GT_BURST_384K,      /* 384K bits/sec */
+    GT_BURST_512K,      /* 512 bits/sec */
+    GT_BURST_640K,      /* 640K bits/sec */
+    GT_BURST_768K,      /* 768K bits/sec */
+    GT_BURST_896K,      /* 896K bits/sec */
+    GT_BURST_1M,        /* 1M  bits/sec */
+    GT_BURST_1500K,      /* 1.5M bits/sec */
+    GT_BURST_2M,        /* 2M  bits/sec */
+    GT_BURST_4M,        /* 4M  bits/sec */
+    GT_BURST_8M,           /* 8M  bits/sec */
+    GT_BURST_16M,          /* 16M  bits/sec */
+    GT_BURST_32M,          /* 32M  bits/sec */
+    GT_BURST_64M,          /* 64M  bits/sec */
+    GT_BURST_128M,         /* 128M  bits/sec */
+    GT_BURST_256M          /* 256M  bits/sec */
+} GT_BURST_RATE;
+
+/*
+ * Formula for burst based Rate Limit
+ */
+#define GT_GET_BURST_RATE_LIMIT(_bsize,_kbps)    \
+        ((_kbps)?(((_bsize)+1)*8000000 / (32 * (_kbps)) +         \
+                (((_bsize)+1)*8000000 % (32 * (_kbps))?1:0))    \
+                :0)
+
+/*
+ * Typedef: enum GT_PIRL_OPERATION
+ *
+ * Description: Defines the PIRL (Port Ingress Rate Limit) Operation type
+ *
+ * Fields:
+ *      PIRL_INIT_ALL_RESOURCE - Initialize all resources to the inital state
+ *      PIRL_INIT_RESOURCE     - Initialize selected resources to the inital state
+ *      PIRL_WRITE_RESOURCE    - Write to the selected resource/register
+ *      PIRL_READ_RESOURCE     - Read from the selected resource/register
+ */
+typedef enum
+{
+    PIRL_INIT_ALL_RESOURCE     = 0x1,
+    PIRL_INIT_RESOURCE        = 0x2,
+    PIRL_WRITE_RESOURCE        = 0x3,
+    PIRL_READ_RESOURCE        = 0x4
+} GT_PIRL_OPERATION, GT_PIRL2_OPERATION;
+
+
+/*
+ *  typedef: struct GT_PIRL_OP_DATA
+ *
+ *  Description: data required by PIRL Operation
+ *
+ *  Fields:
+ *      irlUnit   - Ingress Rate Limit Unit that defines one of IRL resources.
+ *      irlReg    - Ingress Rate Limit Register.
+ *      irlData   - Ingress Rate Limit Data.
+ */
+typedef struct
+{
+    GT_U32    irlUnit;
+    GT_U32    irlReg;
+    GT_U32    irlData;
+} GT_PIRL_OP_DATA;
+
+/*
+ *  typedef: struct GT_PIRL2_OP_DATA
+ *
+ *  Description: data required by PIRL Operation
+ *
+ *  Fields:
+ *      irlPort   - Ingress Rate Limiting port (physical port number).
+ *      irlRes    - Ingress Rate Limit Resource.
+ *      irlReg    - Ingress Rate Limit Register.
+ *      irlData   - Ingress Rate Limit Data.
+ */
+typedef struct
+{
+    GT_U32    irlPort;
+    GT_U32    irlRes;
+    GT_U32    irlReg;
+    GT_U32    irlData;
+} GT_PIRL2_OP_DATA;
+
+/*
+ * Typedef: enum GT_PIRL_ACTION
+ *
+ * Description: Defines the Action that should be taken when
+ *        there there are not enough tokens to accept the entire incoming frame
+ *
+ * Fields:
+ *        PIRL_ACTION_ACCEPT - accept the frame
+ *        PIRL_ACTION_USE_LIMIT_ACTION - use ESB Limit Action
+ */
+typedef enum
+{
+    PIRL_ACTION_USE_LIMIT_ACTION = 0x0,
+    PIRL_ACTION_ACCEPT     = 0x1
+} GT_PIRL_ACTION;
+
+/*
+ * Typedef: enum GT_ESB_LIMIT_ACTION
+ *
+ * Description: Defines the ESB Limit Action that should be taken when
+ *        the incoming port information rate exceeds the EBS_Limit.
+ *
+ * Fields:
+ *        ESB_LIMIT_ACTION_DROP - drop packets
+ *        ESB_LIMIT_ACTION_FC   - send flow control packet
+ */
+typedef enum
+{
+    ESB_LIMIT_ACTION_DROP     = 0x0,
+    ESB_LIMIT_ACTION_FC        = 0x1
+} GT_ESB_LIMIT_ACTION;
+
+
+/*
+ * Typedef: enum GT_BUCKET_RATE_TYPE
+ *
+ * Description: Defines the Bucket Rate Type
+ *
+ * Fields:
+ *        BUCKET_TYPE_TRAFFIC_BASED    - bucket is traffic type based
+ *        BUCKET_TYPE_RATE_BASED        - bucket is rate based
+ */
+typedef enum
+{
+    BUCKET_TYPE_TRAFFIC_BASED    = 0x0,
+    BUCKET_TYPE_RATE_BASED        = 0x1
+} GT_BUCKET_RATE_TYPE;
+
+/*
+ * Definition for GT_BUCKET_TYPE_TRAFFIC_BASED
+ *
+ * Description: Defines the Traffic Type that is used when Bucket Rate Type
+ *        is traffic type based (BUCKET_TYPE_TRAFFIC_BASED).
+ *        Please refer to the device datasheet in order to check which traffic
+ *        types are supported.
+ *
+ * Definition:
+ *        BUCKET_TRAFFIC_UNKNOWN_UNICAST    - unknown unicast frame
+ *        BUCKET_TRAFFIC_UNKNOWN_MULTICAST- unknown multicast frame
+ *        BUCKET_TRAFFIC_BROADCAST        - broadcast frame
+ *        BUCKET_TRAFFIC_MULTICAST        - multicast frame
+ *        BUCKET_TRAFFIC_UNICAST            - unicast frame
+ *        BUCKET_TRAFFIC_MGMT_FRAME        - management frame
+ *        BUCKET_TRAFFIC_ARP                - arp frame
+ *        BUCKET_TRAFFIC_TCP_DATA            - TCP Data
+ *        BUCKET_TRAFFIC_TCP_CTRL            - TCP Ctrl (if any of the TCP Flags[5:0] are set)
+ *        BUCKET_TRAFFIC_UDP                - UDP
+ *        BUCKET_TRAFFIC_NON_TCPUDP        - covers IGMP,ICMP,GRE,IGRP,L2TP
+ *        BUCKET_TRAFFIC_IMS                - Ingress Monitor Source
+ *        BUCKET_TRAFFIC_POLICY_MIRROR    - Policy Mirror
+ *        BUCKET_TRAFFIC_PLICY_TRAP        - Policy Trap
+ */
+#define BUCKET_TRAFFIC_UNKNOWN_UNICAST      0x01
+#define BUCKET_TRAFFIC_UNKNOWN_MULTICAST    0x02
+#define BUCKET_TRAFFIC_BROADCAST            0x04
+#define BUCKET_TRAFFIC_MULTICAST            0x08
+#define BUCKET_TRAFFIC_UNICAST                0x10
+#define BUCKET_TRAFFIC_MGMT_FRAME            0x20
+#define BUCKET_TRAFFIC_ARP                    0x40
+#define BUCKET_TRAFFIC_TCP_DATA                0x100
+#define BUCKET_TRAFFIC_TCP_CTRL                0x200
+#define BUCKET_TRAFFIC_UDP                    0x400
+#define BUCKET_TRAFFIC_NON_TCPUDP            0x800
+#define BUCKET_TRAFFIC_IMS                    0x1000
+#define BUCKET_TRAFFIC_POLICY_MIRROR        0x2000
+#define BUCKET_TRAFFIC_PLICY_TRAP            0x4000
+
+/*
+ *  typedef: enum GT_PIRL_COUNT_MODE
+ *
+ *  Description: Enumeration of the port egress rate limit counting mode.
+ *
+ *  Enumerations:
+ *      GT_PIRL_COUNT_ALL_LAYER1 -
+ *                Count all Layer 1 bytes:
+ *                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+ *      GT_PIRL_COUNT_ALL_LAYER2 -
+ *                Count all Layer 2 bytes: Frame's DA to CRC
+ *      GT_PIRL_COUNT_ALL_LAYER3 -
+ *                Count all Layer 3 bytes:
+ *                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+ */
+typedef enum
+{
+    GT_PIRL_COUNT_ALL_LAYER1 = 0,
+    GT_PIRL_COUNT_ALL_LAYER2,
+    GT_PIRL_COUNT_ALL_LAYER3
+} GT_PIRL_COUNT_MODE;
+
+/*
+ *  typedef: enum GT_PIRL2_COUNT_MODE
+ *
+ *  Description: Enumeration of the port egress rate limit counting mode.
+ *
+ *  Enumerations:
+ *      GT_PIRL2_COUNT_FRAME -
+ *                Count the number of frames
+ *      GT_PIRL2_COUNT_ALL_LAYER1 -
+ *                Count all Layer 1 bytes:
+ *                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+ *      GT_PIRL2_COUNT_ALL_LAYER2 -
+ *                Count all Layer 2 bytes: Frame's DA to CRC
+ *      GT_PIRL2_COUNT_ALL_LAYER3 -
+ *                Count all Layer 3 bytes:
+ *                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+ */
+typedef enum
+{
+    GT_PIRL2_COUNT_FRAME = 0,
+    GT_PIRL2_COUNT_ALL_LAYER1,
+    GT_PIRL2_COUNT_ALL_LAYER2,
+    GT_PIRL2_COUNT_ALL_LAYER3
+} GT_PIRL2_COUNT_MODE;
+
+
+
+/*
+ *  typedef: struct GT_PIRL_RESOURCE
+ *
+ *  Description: data structure that represents a PIRL Resource
+ *
+ *  Fields:
+ *      accountQConf    - account discarded frames due to queue congestion
+ *      accountFiltered - account filtered frames
+ *        ebsLimitAction  - action should be taken when the incoming rate exceeds
+ *                          the ebsLimit.
+ *                                ESB_LIMIT_ACTION_DROP - drop packets
+ *                                ESB_LIMIT_ACTION_FC   - send flow control packet
+ *        ebsLimit        - Excess Burst Size limit ( 0 ~ 0xFFFFFF)
+ *        cbsLimit        - Committed BUrst Size limit (expected to be 2kBytes)
+ *        bktRateFactor   - bucket rate factor = bucketDecrement/updateInterval,
+ *                          where updateInterval indicates the rate at which the
+ *                          bucket needs to be updated with tokens, or 1/CIR,
+ *                          where CIR is the committed information rate in kbps.
+ *                          bucketDecrement indicates the amount of tokens that
+ *                          need to be removed per each bucket decrement.
+ *        bktIncrement    - the amount of tokens that need to be added for each
+ *                          byte of packet information.
+ *        bktRateType        - bucket is either rate based or traffic type based.
+ *                                BUCKET_TYPE_RATE_BASED, or
+ *                                BUCKET_TYPE_TRAFFIC_BASED
+ *        bktTypeMask        - used if bktRateType is BUCKET_TYPE_TRAFFIC_BASED.
+ *                          any combination of the following definitions:
+ *                                BUCKET_TRAFFIC_UNKNOWN_UNICAST,
+ *                                BUCKET_TRAFFIC_UNKNOWN_MULTICAST,
+ *                                BUCKET_TRAFFIC_BROADCAST,
+ *                                BUCKET_TRAFFIC_MULTICAST,
+ *                                BUCKET_TRAFFIC_UNICAST,
+ *                                BUCKET_TRAFFIC_MGMT_FRAME, and
+ *                                BUCKET_TRAFFIC_ARP
+ *        byteTobeCounted    - bytes to be counted for accounting
+ *                                GT_PIRL_COUNT_ALL_LAYER1,
+ *                                GT_PIRL_COUNT_ALL_LAYER2, or
+ *                                GT_PIRL_COUNT_ALL_LAYER3
+ *
+ */
+typedef struct
+{
+    GT_BOOL        accountQConf;
+    GT_BOOL        accountFiltered;
+    GT_ESB_LIMIT_ACTION ebsLimitAction;
+    GT_U32        ebsLimit;
+    GT_U32        cbsLimit;
+    GT_U32        bktRateFactor;
+    GT_U32        bktIncrement;
+    GT_BUCKET_RATE_TYPE    bktRateType;
+    GT_U32        bktTypeMask;
+    GT_PIRL_COUNT_MODE    byteTobeCounted;
+} GT_PIRL_RESOURCE;
+
+/*
+ *  typedef: struct GT_PIRL_CUSTOM_RATE_LIMIT
+ *
+ *  Description: The parameters that decides Ingress Rate Limit vary depending on
+ *                the application. Since DSDT driver cannot cover all the cases,
+ *                this structure is provided for the custom parameter setting.
+ *                However, in most cases, user may ingore this structure by setting
+ *                isValid to GT_FALSE. If Ingress Rate Limit is too much off from
+ *                the expected rate, please contact FAE and gets the correct ebsLimit,
+ *                cbsLimit,bktIncrement, and bktRateFactor value and use this structure
+ *                to do custom parameter setting.
+ *
+ *        isValid         - If GT_TRUE, the paramers in this structure are used
+ *                          to program PIRL Resource's Rate Limit. And ingressRate
+ *                          in GT_PIRL_BUCKET_DATA structure are ignored.
+ *                          If GT_FALSE, ingressRate in GT_PIRL_BUCKET_DATA structure
+ *                          is used for Resource's Rate Limit.
+ *        ebsLimit        - Excess Burst Size limit ( 0 ~ 0xFFFFFF)
+ *        cbsLimit        - Committed Burst Size limit (expected to be 2kBytes)
+ *        bktIncrement    - the amount of tokens that need to be added for each
+ *                          byte of packet information.
+ *        bktRateFactor   - bucket rate factor = bucketDecrement/updateInterval,
+ *                          where updateInterval indicates the rate at which the
+ *                          bucket needs to be updated with tokens, or 1/CIR,
+ *                          where CIR is the committed information rate in kbps.
+ *                          bucketDecrement indicates the amount of tokens that
+ *                          need to be removed per each bucket decrement.
+*/
+typedef struct
+{
+    GT_BOOL        isValid;
+    GT_U32        ebsLimit;
+    GT_U32        cbsLimit;
+    GT_U32        bktIncrement;
+    GT_U32        bktRateFactor;
+} GT_PIRL_CUSTOM_RATE_LIMIT;
+
+/*
+ *  typedef: struct GT_PIRL_BUCKET_DATA
+ *
+ *  Description: data structure for PIRL Bucket programing that is resource based
+ *
+ *  Fields:
+ *        ingressRate       - commited ingress rate in kbps.
+ *                          64kbps ~ 1Mbps    : increments of 64kbps,
+ *                          1Mbps ~ 100Mbps   : increments of 1Mbps, and
+ *                          100Mbps ~ 200Mbps : increments of 10Mbps
+ *                          Therefore, the valid values are:
+ *                                64, 128, 192, 256, 320, 384,..., 960,
+ *                                1000, 2000, 3000, 4000, ..., 100000,
+ *                                110000, 120000, 130000, ..., 200000.
+ *        customSetup       - custom ingress rate parameter setup. please refer to
+ *                          GT_PIRL_CUSTOM_RATE_LIMIT structure.
+ *      accountQConf    - account discarded frames due to queue congestion
+ *      accountFiltered - account filtered frames
+ *        esbLimitAction     - action should be taken when the incoming rate exceeds
+ *                          the limit.
+ *                                ESB_LIMIT_ACTION_DROP - drop packets
+ *                                ESB_LIMIT_ACTION_FC   - send flow control packet
+ *        fcDeassertMode    - port flow control de-assertion mode when limitAction is
+ *                          set to ESB_LIMIT_ACTION_FC.
+ *                          fcDeassertMode[0] for port 0, fcDeassertMode[1] for
+ *                          port 1, etc. If port x does not share the bucket,
+ *                          fcDeassertMode[x] data will be ignored.
+ *                                GT_PIRL_FC_DEASSERT_EMPTY -
+ *                                    De-assert when the ingress rate resource has
+ *                                    become empty.
+ *                                GT_PIRL_FC_DEASSERT_CBS_LIMIT -
+ *                                    De-assert when the ingress rate resource has
+ *                                    enough room as specified by the CBSLimit.
+ *        bktRateType        - bucket is either rate based or traffic type based.
+ *                                BUCKET_TYPE_RATE_BASED, or
+ *                                BUCKET_TYPE_TRAFFIC_BASED
+ *        bktTypeMask        - used if bktRateType is BUCKET_TYPE_TRAFFIC_BASED.
+ *                          any combination of the following definitions:
+ *                                BUCKET_TRAFFIC_UNKNOWN_UNICAST,
+ *                                BUCKET_TRAFFIC_UNKNOWN_MULTICAST,
+ *                                BUCKET_TRAFFIC_BROADCAST,
+ *                                BUCKET_TRAFFIC_MULTICAST,
+ *                                BUCKET_TRAFFIC_UNICAST,
+ *                                BUCKET_TRAFFIC_MGMT_FRAME, and
+ *                                BUCKET_TRAFFIC_ARP
+ *        byteTobeCounted    - bytes to be counted for accounting
+ *                                GT_PIRL_COUNT_ALL_LAYER1,
+ *                                GT_PIRL_COUNT_ALL_LAYER2, or
+ *                                GT_PIRL_COUNT_ALL_LAYER3
+ *
+ */
+typedef struct
+{
+    GT_U32        ingressRate;
+    GT_PIRL_CUSTOM_RATE_LIMIT customSetup;
+    GT_BOOL        accountQConf;
+    GT_BOOL        accountFiltered;
+    GT_ESB_LIMIT_ACTION ebsLimitAction;
+    GT_PIRL_FC_DEASSERT fcDeassertMode[MAX_SWITCH_PORTS];
+    GT_BUCKET_RATE_TYPE    bktRateType;
+    GT_U32        bktTypeMask;
+    GT_PIRL_COUNT_MODE    byteTobeCounted;
+} GT_PIRL_DATA;
+
+
+/*
+ *  typedef: struct GT_PIRL2_RESOURCE
+ *
+ *  Description: data structure that represents a PIRL Resource
+ *
+ *  Fields:
+ *      accountQConf    - account discarded frames due to queue congestion
+ *      accountFiltered - account filtered frames
+ *      mgmtNrlEn         - exclude management frame from ingress rate limiting calculation
+ *      saNrlEn         - exclude from ingress rate limiting calculation if the SA of the
+ *                          frame is in ATU with EntryState that indicates Non Rate Limited.
+ *      daNrlEn         - exclude from ingress rate limiting calculation if the DA of the
+ *                          frame is in ATU with EntryState that indicates Non Rate Limited.
+ *        samplingMode    - sample one out of so many frames/bytes for a stream of frames
+ *        actionMode        - action should be taken when there are not enough tokens
+ *                          to accept the entire incoming frame
+ *                                PIRL_ACTION_ACCEPT - accept the frame
+ *                                PIRL_ACTION_USE_LIMIT_ACTION - use limitAction
+ *        ebsLimitAction  - action should be taken when the incoming rate exceeds
+ *                          the ebsLimit.
+ *                                ESB_LIMIT_ACTION_DROP - drop packets
+ *                                ESB_LIMIT_ACTION_FC   - send flow control packet
+ *        ebsLimit        - Excess Burst Size limit ( 0 ~ 0xFFFFFF)
+ *        cbsLimit        - Committed BUrst Size limit (expected to be 2kBytes)
+ *        bktRateFactor   - bucket rate factor = bucketDecrement/updateInterval,
+ *                          where updateInterval indicates the rate at which the
+ *                          bucket needs to be updated with tokens, or 1/CIR,
+ *                          where CIR is the committed information rate in kbps.
+ *                          bucketDecrement indicates the amount of tokens that
+ *                          need to be removed per each bucket decrement.
+ *        bktIncrement    - the amount of tokens that need to be added for each
+ *                          byte of packet information.
+ *        fcDeassertMode    - flow control de-assertion mode when limitAction is
+ *                          set to ESB_LIMIT_ACTION_FC.
+ *                                GT_PIRL_FC_DEASSERT_EMPTY -
+ *                                    De-assert when the ingress rate resource has
+ *                                    become empty.
+ *                                GT_PIRL_FC_DEASSERT_CBS_LIMIT -
+ *                                    De-assert when the ingress rate resource has
+ *                                    enough room as specified by the CBSLimit.
+ *        bktRateType        - bucket is either rate based or traffic type based.
+ *                                BUCKET_TYPE_RATE_BASED, or
+ *                                BUCKET_TYPE_TRAFFIC_BASED
+ *      priORpt         - determine the incoming frames that get rate limited using
+ *                          this ingress rate resource.
+ *                                  GT_TRUE - typeMask OR priMask
+ *                                  GT_FALSE - typeMask AND priMask
+ *        priMask         - priority bit mask that each bit indicates one of the four
+ *                          queue priorities. Setting each one of these bits indicates
+ *                          that this particular rate resource is slated to account for
+ *                          incoming frames with the enabled bits' priority.
+ *        bktTypeMask        - used if bktRateType is BUCKET_TYPE_TRAFFIC_BASED.
+ *                          any combination of the following definitions:
+ *                                BUCKET_TRAFFIC_UNKNOWN_UNICAST,
+ *                                BUCKET_TRAFFIC_UNKNOWN_MULTICAST,
+ *                                BUCKET_TRAFFIC_BROADCAST,
+ *                                BUCKET_TRAFFIC_MULTICAST,
+ *                                BUCKET_TRAFFIC_UNICAST,
+ *                                BUCKET_TRAFFIC_MGMT_FRAME,
+ *                                BUCKET_TRAFFIC_ARP,
+ *                                BUCKET_TRAFFIC_TCP_DATA,
+ *                                BUCKET_TRAFFIC_TCP_CTRL,
+ *                                BUCKET_TRAFFIC_UDP,
+ *                                BUCKET_TRAFFIC_NON_TCPUDP,
+ *                                BUCKET_TRAFFIC_IMS,
+ *                                BUCKET_TRAFFIC_POLICY_MIRROR, and
+ *                                BUCKET_TRAFFIC_PLICY_TRAP
+ *        byteTobeCounted    - bytes to be counted for accounting
+ *                                GT_PIRL2_COUNT_FRAME,
+ *                                GT_PIRL2_COUNT_ALL_LAYER1,
+ *                                GT_PIRL2_COUNT_ALL_LAYER2, or
+ *                                GT_PIRL2_COUNT_ALL_LAYER3
+ *
+ */
+typedef struct
+{
+    GT_BOOL        accountQConf;
+    GT_BOOL        accountFiltered;
+    GT_BOOL        mgmtNrlEn;
+    GT_BOOL        saNrlEn;
+    GT_BOOL        daNrlEn;
+    GT_BOOL        samplingMode;
+    GT_PIRL_ACTION    actionMode;
+    GT_ESB_LIMIT_ACTION ebsLimitAction;
+    GT_U32        ebsLimit;
+    GT_U32        cbsLimit;
+    GT_U32        bktRateFactor;
+    GT_U32        bktIncrement;
+    GT_PIRL_FC_DEASSERT fcDeassertMode;
+    GT_BUCKET_RATE_TYPE    bktRateType;
+    GT_BOOL        priORpt;
+    GT_U32        priMask;
+    GT_U32        bktTypeMask;
+    GT_PIRL2_COUNT_MODE    byteTobeCounted;
+} GT_PIRL2_RESOURCE;
+
+
+/*
+ *  typedef: struct GT_PIRL2_BUCKET_DATA
+ *
+ *  Description: data structure for PIRL2 Bucket programing that is port based.
+ *
+ *  Fields:
+ *        ingressRate       - commited ingress rate in kbps.
+ *                          64kbps ~ 1Mbps    : increments of 64kbps,
+ *                          1Mbps ~ 100Mbps   : increments of 1Mbps, and
+ *                          100Mbps ~ 200Mbps : increments of 10Mbps
+ *                          Therefore, the valid values are:
+ *                                64, 128, 192, 256, 320, 384,..., 960,
+ *                                1000, 2000, 3000, 4000, ..., 100000,
+ *                                110000, 120000, 130000, ..., 200000.
+ *        customSetup       - custom ingress rate parameter setup. please refer to
+ *                          GT_PIRL_CUSTOM_RATE_LIMIT structure.
+ *      accountQConf    - account discarded frames due to queue congestion
+ *      accountFiltered - account filtered frames
+ *      mgmtNrlEn         - exclude management frame from ingress rate limiting calculation
+ *      saNrlEn         - exclude from ingress rate limiting calculation if the SA of the
+ *                          frame is in ATU with EntryState that indicates Non Rate Limited.
+ *      daNrlEn         - exclude from ingress rate limiting calculation if the DA of the
+ *                          frame is in ATU with EntryState that indicates Non Rate Limited.
+ *        samplingMode    - sample one out of so many frames/bytes for a stream of frames
+ *        actionMode        - action should be taken when there are not enough tokens
+ *                          to accept the entire incoming frame
+ *                                PIRL_ACTION_ACCEPT - accept the frame
+ *                                PIRL_ACTION_USE_LIMIT_ACTION - use limitAction
+ *        ebsLimitAction     - action should be taken when the incoming rate exceeds
+ *                          the limit.
+ *                                ESB_LIMIT_ACTION_DROP - drop packets
+ *                                ESB_LIMIT_ACTION_FC   - send flow control packet
+ *        fcDeassertMode    - flow control de-assertion mode when limitAction is
+ *                          set to ESB_LIMIT_ACTION_FC.
+ *                                GT_PIRL_FC_DEASSERT_EMPTY -
+ *                                    De-assert when the ingress rate resource has
+ *                                    become empty.
+ *                                GT_PIRL_FC_DEASSERT_CBS_LIMIT -
+ *                                    De-assert when the ingress rate resource has
+ *                                    enough room as specified by the CBSLimit.
+ *        bktRateType        - bucket is either rate based or traffic type based.
+ *                                BUCKET_TYPE_RATE_BASED, or
+ *                                BUCKET_TYPE_TRAFFIC_BASED
+ *      priORpt         - determine the incoming frames that get rate limited using
+ *                          this ingress rate resource.
+ *                                  GT_TRUE - typeMask OR priMask
+ *                                  GT_FALSE - typeMask AND priMask
+ *        priMask         - priority bit mask that each bit indicates one of the four
+ *                          queue priorities. Setting each one of these bits indicates
+ *                          that this particular rate resource is slated to account for
+ *                          incoming frames with the enabled bits' priority.
+ *        bktTypeMask        - used if bktRateType is BUCKET_TYPE_TRAFFIC_BASED.
+ *                          any combination of the following definitions:
+ *                                BUCKET_TRAFFIC_UNKNOWN_UNICAST,
+ *                                BUCKET_TRAFFIC_UNKNOWN_MULTICAST,
+ *                                BUCKET_TRAFFIC_BROADCAST,
+ *                                BUCKET_TRAFFIC_MULTICAST,
+ *                                BUCKET_TRAFFIC_UNICAST,
+ *                                BUCKET_TRAFFIC_MGMT_FRAME,
+ *                                BUCKET_TRAFFIC_ARP,
+ *                                BUCKET_TRAFFIC_TCP_DATA,
+ *                                BUCKET_TRAFFIC_TCP_CTRL,
+ *                                BUCKET_TRAFFIC_UDP,
+ *                                BUCKET_TRAFFIC_NON_TCPUDP,
+ *                                BUCKET_TRAFFIC_IMS,
+ *                                BUCKET_TRAFFIC_POLICY_MIRROR, and
+ *                                BUCKET_TRAFFIC_PLICY_TRAP
+ *        byteTobeCounted    - bytes to be counted for accounting
+ *                                GT_PIRL2_COUNT_FRAME,
+ *                                GT_PIRL2_COUNT_ALL_LAYER1,
+ *                                GT_PIRL2_COUNT_ALL_LAYER2, or
+ *                                GT_PIRL2_COUNT_ALL_LAYER3
+ *
+ */
+typedef struct
+{
+    GT_U32        ingressRate;
+    GT_PIRL_CUSTOM_RATE_LIMIT customSetup;
+    GT_BOOL        accountQConf;
+    GT_BOOL        accountFiltered;
+    GT_BOOL        mgmtNrlEn;
+    GT_BOOL        saNrlEn;
+    GT_BOOL        daNrlEn;
+    GT_BOOL        samplingMode;
+    GT_PIRL_ACTION    actionMode;
+    GT_ESB_LIMIT_ACTION ebsLimitAction;
+    GT_PIRL_FC_DEASSERT fcDeassertMode;
+    GT_BUCKET_RATE_TYPE    bktRateType;
+    GT_BOOL        priORpt;
+    GT_U32        priMask;
+    GT_U32        bktTypeMask;
+    GT_PIRL2_COUNT_MODE    byteTobeCounted;
+} GT_PIRL2_DATA;
+
+
+
+/*
+ *  typedef: struct GT_PIRL_CUSTOM_TSM_CFG
+ *
+ *  Description: The parameters that decides Ingress Rate Limit for AVB frames vary
+ *                 depending on the application. Since DSDT driver cannot cover all the cases,
+ *                this structure is provided for the custom parameter setting.
+ *                However, in most cases, user may ingore this structure by setting
+ *                isValid to GT_FALSE. If Ingress Rate Limit is too much off from
+ *                the expected rate, please contact FAE and gets the correct ebsLimit,
+ *                cbsLimit, CTS interval, and action mode value and use this structure
+ *                to do custom parameter setting.
+ *
+ *        isValid         - If GT_TRUE, the paramers in this structure are used
+ *                          to program PIRL Resource's Rate Limit. And ingressRate
+ *                          in GT_PIRL_TSM_DATA structure are ignored.
+ *                          If GT_FALSE, ingressRate in GT_PIRL_TSM_DATA structure
+ *                          is used for Resource's Rate Limit.
+ *        ebsLimit        - Excess Burst Size limit (0 ~ 0xFFFF)
+ *        cbsLimit        - Committed Burst Size limit (0 ~ 0xFFFF)
+ *        ctsIntv         - Class Time Slot Interval
+ *                          0 - interval is 62.5us
+ *                          1 - interval is 125us
+ *                          2 - interval is 250us
+ *                          3 - interval is 1000us
+ *        actionMode        - action should be taken when there are not enough tokens
+ *                          to accept the entire incoming frame
+ *                                PIRL_ACTION_ACCEPT - accept the frame
+ *                                PIRL_ACTION_USE_LIMIT_ACTION - use limitAction
+*/
+typedef struct
+{
+    GT_BOOL        isValid;
+    GT_U32        ebsLimit;
+    GT_U32        cbsLimit;
+    GT_U32        ctsIntv;
+    GT_PIRL_ACTION        actionMode;
+} GT_PIRL_CUSTOM_TSM_CFG;
+
+
+/*
+ *  typedef: struct GT_PIRL2_TSM_DATA
+ *
+ *  Description: data structure for PIRL2 TSM Ingress Rate Limit.
+ *
+ *  Fields:
+ *        ebsLimit        - Excess Burst Size limit (0 ~ 0xFFFF)
+ *        cbsLimit        - Committed Burst Size limit (0 ~ 0xFFFF)
+ *        ctsIntv         - Class Time Slot Interval
+ *                          0 - interval is 62.5us
+ *                          1 - interval is 125us
+ *                          2 - interval is 250us
+ *                          3 - interval is 1000us
+ *        actionMode        - action should be taken when there are not enough tokens
+ *                          to accept the entire incoming frame
+ *                                PIRL_ACTION_ACCEPT - accept the frame
+ *                                PIRL_ACTION_USE_LIMIT_ACTION - use limitAction
+ *         mgmtNrlEn         - exclude management frame from ingress rate limiting calculation
+ *        priMask         - priority bit mask that each bit indicates one of the four
+ *                          queue priorities. Setting each one of these bits indicates
+ *                          that this particular rate resource is slated to account for
+ *                          incoming frames with the enabled bits' priority.
+ *
+ */
+typedef struct
+{
+    GT_BOOL        tsmMode;
+    GT_U32        ebsLimit;
+    GT_U32        cbsLimit;
+    GT_U32        ctsIntv;
+    GT_PIRL_ACTION        actionMode;
+    GT_BOOL        mgmtNrlEn;
+    GT_U32        priMask;
+} GT_PIRL2_TSM_RESOURCE;
+
+
+
+/*
+ *  typedef: struct GT_PIRL2_TSM_DATA
+ *
+ *  Description: data structure for PIRL2 TSM Ingress Rate Limit.
+ *
+ *  Fields:
+ *        tsmMode            - enable/disable TSM mode.
+ *                          The following fields are ignored if diable
+ *        ingressRate       - commited ingress rate in kbps.(min 64 for 64kbps)
+ *        customSetup       - custom ingress rate parameter setup. please refer to
+ *                          GT_PIRL_CUSTOM_TSM_CFG structure.
+ *        mgmtNrlEn        - exclude management frame from ingress rate limiting calculation
+ *        priMask         - priority bit mask that each bit indicates one of the four
+ *                          queue priorities. Setting each one of these bits indicates
+ *                          that this particular rate resource is slated to account for
+ *                          incoming frames with the enabled bits' priority.
+ *
+ */
+typedef struct
+{
+    GT_BOOL        tsmMode;
+    GT_U32        ingressRate;
+    GT_PIRL_CUSTOM_TSM_CFG customSetup;
+    GT_BOOL        mgmtNrlEn;
+    GT_U32        priMask;
+} GT_PIRL2_TSM_DATA;
+
+
+
+#define MAX_PTP_CONSECUTIVE_READ    4
+
+
+/*
+ * Typedef: enum GT_PTP_OPERATION
+ *
+ * Description: Defines the PTP (Precise Time Protocol) Operation type
+ *
+ * Fields:
+ *      PTP_WRITE_DATA             - Write data to the PTP register
+ *      PTP_READ_DATA            - Read data from PTP register
+ *      PTP_READ_MULTIPLE_DATA    - Read multiple data from PTP register
+ *      PTP_READ_TIMESTAMP_DATA    - Read timestamp data from PTP register
+ *                    valid bit will be reset after read
+ */
+typedef enum
+{
+    PTP_WRITE_DATA             = 0x3,
+    PTP_READ_DATA              = 0x4,
+    PTP_READ_MULTIPLE_DATA    = 0x6,
+    PTP_READ_TIMESTAMP_DATA    = 0x8,
+} GT_PTP_OPERATION;
+
+
+/*
+ * Typedef: enum GT_PTP_SPEC
+ *
+ * Description: Defines the PTP (Precise Time Protocol) SPEC type
+ *
+ * Fields:
+ *      PTP_IEEE_1588         - IEEE 1588
+ *      PTP_IEEE_802_1AS    - IEEE 802.1as
+ */
+typedef enum
+{
+    PTP_IEEE_1588        = 0x0,
+    PTP_IEEE_802_1AS    = 0x1
+} GT_PTP_SPEC;
+
+
+/*
+ *  typedef: struct GT_PTP_OP_DATA
+ *
+ *  Description: data required by PTP Operation
+ *
+ *  Fields:
+ *      ptpPort        - physical port of the device
+ *      ptpAddr     - register address
+ *      ptpData     - data for ptp register.
+ *      ptpMultiData- used for multiple read operation.
+ *      nData         - number of data to be read on multiple read operation.
+ */
+typedef struct
+{
+    GT_U32    ptpPort;
+    GT_U32    ptpBlock;
+    GT_U32    ptpAddr;
+    GT_U32    ptpData;
+    GT_U32    ptpMultiData[MAX_PTP_CONSECUTIVE_READ];
+    GT_U32    nData;
+} GT_PTP_OP_DATA;
+
+
+
+/*
+ *  typedef: struct GT_PTP_GLOBAL_CONFIG
+ *
+ *  Description: PTP global configuration parameters
+ *
+ *  Fields:
+ *      ptpEType    - PTP Ether Type
+ *      msgIdTSEn     - Message IDs that needs time stamp
+ *      tsArrPtr     - Time stamp arrival time counter pointer (either Arr0Time or Arr1Time)
+ */
+typedef struct
+{
+    GT_U32    ptpEType;
+    GT_U32    msgIdTSEn;
+    GT_U32    tsArrPtr;
+} GT_PTP_GLOBAL_CONFIG;
+
+
+/*
+ * Typedef: enum GT_PTP_TS_MODE
+ *
+ * Description: Defines the PTP (Precise Time Protocol) Arr0 TS Mode
+ *
+ * Fields:
+ *      GT_PTP_TS_MODE_IN_REG         - Time stamp in TS regigister (original)
+ *      GT_PTP_TS_MODE_IN_RESERVED_2  - Time stamp in Frame resedved 2
+ *      GT_PTP_TS_MODE_IN_FRAME_END   - Time stamp in Frame End
+ */
+typedef enum
+{
+       GT_PTP_TS_MODE_IN_REG,
+       GT_PTP_TS_MODE_IN_RESERVED_2,
+       GT_PTP_TS_MODE_IN_FRAME_END
+} GT_PTP_TS_MODE;
+
+#define PTP_TS_LOC_RESERVED_2  0x10
+#define PTP_FRAME_SIZE 0xc0
+
+/*
+ *  typedef: struct GT_PTP_PORT_CONFIG
+ *
+ *  Description: PTP configuration parameters for each port
+ *
+ *  Fields:
+ *      transSpec    - This is to differentiate between various timing protocols.
+ *      disTSpec     - Disable Transport specific check
+ *      etJump         - offset to Ether type start address in bytes
+ *      ipJump         - offset to IP header start address counting from Ether type offset
+ *      ptpArrIntEn    - PTP port arrival interrupt enable
+ *      ptpDepIntEn    - PTP port departure interrupt enable
+ *      disTSOverwrite - disable time stamp counter overwriting until the corresponding
+ *                          timer valid bit is cleared.
+ *      arrTSMode      - PTP arrival TS mode.
+ */
+typedef struct
+{
+    GT_PTP_SPEC    transSpec;
+    GT_BOOL        disTSpec;
+    GT_U32         etJump;
+    GT_U32         ipJump;
+    GT_BOOL        ptpArrIntEn;
+    GT_BOOL        ptpDepIntEn;
+    GT_BOOL        disTSOverwrite;
+    GT_PTP_TS_MODE         arrTSMode;
+} GT_PTP_PORT_CONFIG;
+
+/*
+ *  typedef: struct GT_PTP_CONFIG
+ *
+ *  Description: PTP configuration parameters
+ *
+ *  Fields:
+ *      ptpEType    - PTP Ether Type
+ *      msgIdTSEn     - Message IDs that needs time stamp
+ *      tsArrPtr     - Time stamp arrival time counter pointer (either Arr0Time or Arr1Time)
+ *      ptpArrIntEn    - PTP port arrival interrupt enable
+ *      ptpDepIntEn    - PTP port departure interrupt enable
+ *      transSpec    - This is to differentiate between various timing protocols.
+ *      msgIdStartBit     - Message ID starting bit in the PTP common header
+ *      disTSOverwrite     - disable time stamp counter overwriting until the corresponding
+ *                          timer valid bit is cleared.
+ *      ptpPortConfig      - PTP port configuration array.
+ */
+typedef struct
+{
+    GT_U32    ptpEType;
+    GT_U32    msgIdTSEn;
+    GT_U32    tsArrPtr;
+
+    GT_U32    ptpArrIntEn;
+    GT_U32    ptpDepIntEn;
+    GT_PTP_SPEC    transSpec;
+    GT_U32    msgIdStartBit;
+    GT_BOOL    disTSOverwrite;
+    GT_PTP_PORT_CONFIG      ptpPortConfig[MAX_SWITCH_PORTS];
+} GT_PTP_CONFIG;
+
+
+/*
+ * Typedef: enum GT_PTP_TIME
+ *
+ * Description: Defines the PTP Time to be read
+ *
+ * Fields:
+ *      PTP_WRITE_DATA             - Write data to the PTP register
+ *      PTP_READ_DATA            - Read data from PTP register
+ *      PTP_READ_MULTIPLE_DATA    - Read multiple data from PTP register
+ */
+typedef enum
+{
+    PTP_ARR0_TIME = 0x0,
+    PTP_ARR1_TIME = 0x1,
+    PTP_DEP_TIME = 0x2
+} GT_PTP_TIME;
+
+
+/*
+ * Typedef: enum GT_PTP_INT_STATUS
+ *
+ * Description: Defines the PTP Port interrupt status for time stamp
+ *
+ * Fields:
+ *      PTP_INT_NORMAL        - No error condition occurred
+ *      PTP_INT_OVERWRITE     - PTP logic has to process more than one PTP frame
+ *                                  that needs time stamping before the current read.
+ *                                Only the latest one is saved.
+ *      PTP_INT_DROP          - PTP logic has to process more than one PTP frame
+ *                                  that needs time stamping before the current read.
+ *                                Only the oldest one is saved.
+ *
+ */
+typedef enum
+{
+    PTP_INT_NORMAL         = 0x0,
+    PTP_INT_OVERWRITE     = 0x1,
+    PTP_INT_DROP         = 0x2
+} GT_PTP_INT_STATUS;
+
+
+/*
+ *  typedef: struct GT_PTP_TS_STATUS
+ *
+ *  Description: PTP port status of time stamp
+ *
+ *  Fields:
+ *      isValid        - time stamp is valid
+ *      status        - time stamp error status
+ *      timeStamped    - time stamp value of a PTP frame that needs to be time stamped
+ *      ptpSeqId    - sequence ID of the frame whose time stamp information has been captured
+ */
+typedef struct
+{
+    GT_BOOL    isValid;
+    GT_U32    timeStamped;
+    GT_U32    ptpSeqId;
+    GT_PTP_INT_STATUS    status;
+} GT_PTP_TS_STATUS;
+
+
+/*
+ *  typedef: struct GT_PTP_PORT_DISCARD_STATS
+ *
+ *  Description: PTP port discard statistics. The counter (4 bit wide) wraps around after 15.
+ *
+ *  Fields:
+ *      tsDepDisCtr    - PTP departure frame discard counter for PTP frames that need time stamping.
+ *      ntsDepDisCtr    - PTP departure frame discard counter for PTP frames that do not need time stamping.
+ *      tsArrDisCtr    - PTP arrival frame discard counter for PTP frames that need time stamping.
+ *      ntsArrDisCtr    - PTP arrival frame discard counter for PTP frames that do not need time stamping.
+ */
+typedef struct
+{
+    GT_U32    tsDepDisCtr;
+    GT_U32    ntsDepDisCtr;
+    GT_U32    tsArrDisCtr;
+    GT_U32    ntsArrDisCtr;
+} GT_PTP_PORT_DISCARD_STATS;
+
+/* From Agate, to add arrival TS mode, to insert TS into frame */
+typedef enum
+{
+    PTP_ARR_TS_MODE_DIABLE  = 0,   /* PTP Arrival mode: disable TS mode modification */
+    PTP_ARR_TS_MODE_FRM_END = 1,   /* PTP Arrival mode: TS at end of frame */
+    PTP_ARR_TS_MODE_LOC_5   = 4,   /* PTP Arrival mode: TS at offset 5 in common header */
+    PTP_ARR_TS_MODE_LOC_17  = 16,  /* PTP Arrival mode: TS at offset 17 in common header */
+    PTP_ARR_TS_MODE_LOC_35  = 34,  /* PTP Arrival mode: TS at offset 35 in common header */
+    PTP_ARR_TS_MODE_LOC_ff         /* PTP Arrival mode: TS at the end of the frame */
+} GT_PTP_ARR_TS_MODE;
+
+
+
+#ifdef CONFIG_AVB_FPGA
+
+typedef enum
+{
+    PTP_CLOCK_SRC_AD_DEVICE = 0,    /* PTP Clock source is from A/D device */
+    PTP_CLOCK_SRC_FPGA                /* PTP Clock source is from Cesium FPGA */
+} GT_PTP_CLOCK_SRC;
+
+typedef enum
+{
+    PTP_P9_MODE_GMII = 0,     /* Port 9 uses GMII connect to 88E1111 */
+    PTP_P9_MODE_MII,        /* Port 9 uses MII connect to 88E1111 */
+    PTP_P9_MODE_MII_CONNECTOR,        /* Port 9 connect to MII connector */
+    PTP_P9_MODE_JUMPER        /* Use Jumper setup */
+} GT_PTP_P9_MODE;
+
+typedef enum
+{
+    GT_PTP_SIGN_NEGATIVE = 0,    /* apply Minus sign to the Duty Cycle */
+    GT_PTP_SIGN_PLUS            /* apply Plus sign to the Duty Cycle */
+} GT_PTP_SIGN;
+
+typedef struct
+{
+    GT_PTP_SIGN    adjSign;    /* determine the plus/minus sign of the duty cycle adj */
+    GT_U32    cycleStep;        /* number of steps which will be applied in adjusting the duty cycle high time
+                                of the 8KHz clock cycle.
+                                valid values are 0 ~ 7 */
+    GT_U32    cycleInterval;    /* define the interval of clock cycles for which a duty cycle adj will occur */
+    GT_U32    cycleAdjust;    /* define the number of 8KHz clock cycles for which duty cycle adj will occur
+                                within each PTP clock clycle interval.
+                                Note that (cycleAdjust <= cycleInterval) for proper operation */
+} GT_PTP_CLOCK_ADJUSTMENT;
+
+#endif
+
+/*
+ *  typedef: struct GT_TAI_EVENT_CONFIG
+ *
+ *  Description: TAI event capture configuration parameters
+ *
+ *  Fields:
+ *      eventOverwrite    - event capture overwrite
+ *      eventCtrStart     - event counter start
+ *      eventPhase        - event phase, When 0x1 the active phase of the PTP_EVREQ input
+ *        is inverted to be active low. When 0x0 the active phase of the PTP_EVREQ input
+ *        is normal activehigh
+ *      intEn             - event capture interrupt enable
+ *      captTrigEvent     - Catpture Trig. 1: from waveform generated by PTP_TRIG.
+ *                                         0: from PTP_EVREQ pin.
+ */
+typedef struct
+{
+    GT_BOOL    eventOverwrite;
+    GT_BOOL    eventCtrStart;
+    GT_BOOL    eventPhase;
+    GT_BOOL    intEn;
+    GT_BOOL    captTrigEvent;
+
+} GT_TAI_EVENT_CONFIG;
+
+
+/*
+ *  typedef: struct GT_TAI_EVENT_STATUS
+ *
+ *  Description: TAI event capture status
+ *
+ *  Fields:
+ *      isValid        - eventTime is valid
+ *      eventTime     - PTP global time when event is registered.
+ *      eventCtr    - event capture counter. increamented only if eventCtrStart is set.
+ *      eventErr    - isValid is already set when a new event is observed.
+ */
+typedef struct
+{
+    GT_BOOL    isValid;
+    GT_U32    eventTime;
+    GT_U32    eventCtr;
+    GT_BOOL    eventErr;
+} GT_TAI_EVENT_STATUS;
+
+
+typedef enum
+{
+    GT_TAI_TRIG_PERIODIC_PURSE = 0,    /* generate periodic purse */
+    GT_TAI_TRIG_ON_GIVEN_TIME        /* generate purse when
+                                    PTP global time matches with given time */
+} GT_TAI_TRIG_MODE;
+
+typedef enum
+{
+    GT_TAI_MULTI_PTP_SYNC_DISABLE = 0,  /* the EventRequest and TriggerGen interfaces operate normally. */
+    GT_TAI_MULTI_PTP_SYNC_ENABLE        /* the logic detects a low to high transition on
+                                        the EventRequest (GPIO) and transfers the value
+                                        in TrigGenAmt[31:0] (TAI Global Config 0x2, 0x3) into
+                                        the PTP Global Time register[31:0]. The EventCapTime[31:0]
+                                        (TAI global Status 0xA, 0xB) is also updated at that
+                                        instant. */
+} GT_TAI_MULTI_PTP_SYNC_MODE;
+
+
+/*
+ *  typedef: struct GT_TAI_CLOCK_SELECT
+ *
+ *  Description: TAI Clock select
+ *
+ *  Fields:
+ *      priRecClkSel      - Synchronous Ethernet Primary Recovered Clock Select.
+ *        This field indicates the internal PHY number whose recovered clock will be
+ *        presented on the SE_RCLK0 pin. The reset value of 0x7 selects no clock and
+ *        the pin is tri-stated.
+ *      syncRecClkSel     - Synchronous Ethernet Secondary Recovered Clock Select.
+ *      ptpExtClk         - PTP external Clock select
+ */
+typedef struct
+{
+    GT_U8    priRecClkSel;
+    GT_U8    syncRecClkSel;
+    GT_BOOL  ptpExtClk;
+} GT_TAI_CLOCK_SELECT;
+
+/*
+ *  typedef: struct GT_TAI_TRIGGER_CONFIG
+ *
+ *  Description: TAI trigger generator configuration parameters
+ *
+ *  Fields:
+ *      intEn         - trigger generator interrupt enable
+ *      trigPhase     - trigger phase, When 0x1 the active phase of the PTP_TRIG output
+ *        is inverted to be active low. When 0x0 the active phase of the PTP_TRIG output
+ *        is normal active high.
+ *      trigLock     - trigger Lock, When 0x1 the leading edge of PTP_TRIG will be adjudterd in following range.
+ *      trigLockRange - trigger Locking range.
+ *      lockCorrect   - Trig Lock Correction amount
+ *      lock2Correct  - Trig Lock 2 Correction amount
+ *      mode        - trigger mode, either GT_TAI_TRIG_PERIODIC_PURSE or
+ *                      GT_TAI_TRIG_ON_GIVEN_TIME
+ *      trigGenAmt     - if mode is GT_TAI_TRIG_PERIODIC_PURSE,
+ *                      this value is used as a clock period in TSClkPer increments
+ *                      If mode is GT_TAI_TRIG_ON_GIVEN_TIME,
+ *                      this value is used to compare with PTP global time.
+ *      pulseWidth        - pulse width in units of TSClkPer.
+ *                      this value should be 1 ~ 0xF. If it's 0, no changes made.
+ *                      this value is valid only in GT_TAI_TRIG_ON_GIVEN_TIME mode.
+ *      trigClkComp    - trigger mode clock compensation amount in pico sec.
+ *                      this value is valid only in GT_TAI_TRIG_PERIODIC_PURSE mode.
+ *      trigGenTime    - Trigger Generation Time.
+ *      trigGenDelay   - Trigger Generation Delay.
+ *      trigGen2Time   - Trigger Generation Time 2.
+ *      trigGen2Delay  - Trigger Generation Delay 2.
+ */
+typedef struct
+{
+    GT_BOOL   intEn;
+    GT_BOOL   trigPhase;
+    GT_BOOL   trigLock;
+    GT_U8     trigLockRange;
+    GT_U8     lockCorrect;
+    GT_U8     lockCorrect2;
+    GT_TAI_TRIG_MODE     mode;
+    GT_U32    trigGenAmt;
+    GT_U32    pulseWidth;
+    GT_U32    trigClkComp;
+    GT_U32    trigGenTime;
+    GT_U32    trigGenDelay;
+    GT_U32    trigGen2Time;
+    GT_U32    trigGen2Delay;
+} GT_TAI_TRIGGER_CONFIG;
+
+
+
+/* AVB functions */
+typedef enum
+{
+    GT_AVB_HI_FPRI,        /* AVB Hi Frame Priority */
+    GT_AVB_HI_QPRI,        /* AVB Hi Queue Priority */
+    GT_AVB_LO_FPRI,        /* AVB Lo Frame Priority */
+    GT_AVB_LO_QPRI,        /* AVB Lo Queue Priority */
+    GT_LEGACY_HI_FPRI,    /* Legacy Hi Frame Priority */
+    GT_LEGACY_HI_QPRI,    /* Legacy Hi Queue Priority */
+    GT_LEGACY_LO_FPRI,    /* Legacy Lo Frame Priority */
+    GT_LEGACY_LO_QPRI    /* Legacy Lo Queue Priority */
+} GT_AVB_PRI_TYPE;
+
+
+typedef enum
+{
+    GT_AVB_LEGACY_MODE, /* all frames entering the port are considered legacy */
+    GT_AVB_STANDARD_AVB_MODE, /*any tagged frame that ends up with an AVB frame priority is considered AVB */
+    GT_AVB_ENHANCED_AVB_MODE, /*any frame that ends up with an AVB frame priority whose DA is contained in the ATU with an AVB Entry state is considered AVB */
+    GT_AVB_SECURE_AVB_MODE   /*any frame that ends up with an AVB frame priority whose DA is contained in the ATU with an AVB entry state and whose DPV has this source port's bit set to a one is considered AVB. */
+} GT_AVB_MODE;
+
+
+/*
+ * Typedef: enum GT_AVB_FRAME_POLICY
+ *
+ * Description: Defines the policy of the frame
+ *
+ * Fields:
+ *      AVB_FRAME_POLICY_NONE - Normal frame switching
+ *      AVB_FRAME_POLICY_MIRROR - Mirror(copy) frame to the MirrorDest port
+ *      AVB_FRAME_POLICY_TRAP - Trap(re-direct) frame to the CPUDest port
+ *      AVB_FRAME_POLICY_RES - Reserved, but implemented as Discard the frame
+ *
+ */
+typedef enum
+{
+    AVB_FRAME_POLICY_NONE = 0,
+    AVB_FRAME_POLICY_MIRROR,
+    AVB_FRAME_POLICY_TRAP,
+    AVB_FRAME_POLICY_RES
+} GT_AVB_FRAME_POLICY;
+
+
+/*
+ * Typedef: enum GT_AVB_FRAME_TYPE
+ *
+ * Description:
+ *        Defines the AVB frame type.
+ *        AVB Hi Frame is one that DA of the frame is contained in the ATU with an
+ *        Entry State that indicates AVB with priority override where the overridden
+ *        priority equals the Hi AVB frame priority(refer to gavbGetPriority API) and
+ *        when the port's DA AvbOverride is enabled.
+ *        AVB Lo Frame is one that DA of the frame is contained in the ATU with an
+ *        Entry State that indicates AVB with priority override where the overridden
+ *        priority equals the Lo AVB frame priority(refer to gavbGetPriority API) and
+ *        when the port's DA AvbOverride is enabled.
+ *
+ * Fields:
+ *      AVB_HI_FRAME    - AVB Hi Frame
+ *      AVB_LO_FRAME    - AVB Lo Frame
+ */
+typedef enum
+{
+    AVB_HI_FRAME,
+    AVB_LO_FRAME
+} GT_AVB_FRAME_TYPE;
+
+
+/*
+ * Typedef: enum GT_TCAM_OPERATION
+ *
+ * Description: Defines the TCAM (Ternary Content Addressable Memory) Operation type
+ *
+ * Fields:
+ *   TCAM_FLUSH_ALL       - Flush all entries
+ *   TCAM_FLUSH_ENTRY     - Flush or invalidate a single TCAM entry
+ *   TCAM_LOAD_ENTRY      - Load qn entry's page - or Purge an entry
+ *   TCAM_GET_NEXT_ENTRY  - Get Next (read next valid entry - all pages)
+ *   TCAM_READ_ENTRY      - Read an entry's page (perform a direct read of an entry)
+ */
+typedef enum
+{
+    TCAM_FLUSH_ALL       = 0x1,
+    TCAM_FLUSH_ENTRY     = 0x2,
+    TCAM_LOAD_ENTRY      = 0x3,
+    TCAM_PURGE_ENTRY     = 0x6,
+    TCAM_GET_NEXT_ENTRY  = 0x4,
+    TCAM_READ_ENTRY      = 0x5
+} GT_TCAM_OPERATION;
+
+typedef enum
+{
+    FRAME_TYPE_NORMAL  = 0,
+    FRAME_TYPE_DSA     = 1,
+    FRAME_TYPE_PROVIDE = 2,
+    FRAME_TYPE_RES = 3,
+} GT_FRAME_TYPE;
+
+/* The P3 register 31 work as follows in the following example sequence:
+o 0x00FF = Reset state, no hits yet
+o 0x0000 = 48-byte TCAM hit on Entry 0x00
+o 0x0201 = 96-byte TCAM hit on Entries 0x01 (low) and 0x02 (high)
+o 0x0003 = 48-byte TCAM hit on Entry 0x03
+o 0x0809 = 96-byte TCAM hit on Entries 0x09 (low) and 0x08 (high)
+o 0x0009 = 48-byte TCAM hit on Entry 0x09 with a TCAM miss on the high 48-bytes
+o 0x00FF = Miss on 1st 48 byte lookup
+*/
+typedef enum
+{
+    TCAM_HIT_RESET  = 0xFF,
+    TCAM_HIT_48_E0  = 0x00,
+    TCAM_HIT_96_E1  = 0x201,
+    TCAM_HIT_48_E3  = 0x03,
+    TCAM_HIT_96_E9  = 0x809,
+    TCAM_HIT_48_MISS  = 0x0FF,
+} GT_TCAM_HIT_STATUS;
+
+
+
+/*
+ *  typedef: struct GT_TCAM_DATA
+ *
+ *  Description: TCAM Key Data and Frame Match Data.
+ *  The bytes are in the lower 8 bits of each 16-bit register. The upper 8 bits of
+ *  each register are the Mask bits for the lower 8 bits where bit 15 is the mask
+ *  for bit 7, bit 14 is the mask for bit 6, etc. The individual pairs of data bits
+ *  and mask bits work together as follows:
+ *    Mask Data Meaning
+ *      0 0 Don’t Care. The data bit can be a one or a zero for a TCAM hit to occur.
+ *      1 0 Hit on 0. The data bit must be a zero for a TCAM hit to occur.
+ *      1 1 Hit on 1. The data bit must be a one for a TCAM hit to occur.
+ *      0 1 Never Hit. Used to prevent a TCAM hit from occurring from this entry.
+ *    The Never Hit value is used to Flush the TCAM or Purge a TCAM entry.
+ *    On a TCAM Flush or Purge, this value it written to the 1st TCAM byte only
+ *    (offset 0x02 on TCAM page 1). On a TCAM Flush All or on a TCAM Flush
+ *    an entry, all other TCAM data and mask bytes are written to a value of 0x0000
+ *    and so are the Action bytes.
+ *
+ *  Fields:
+ *      frameType      - Frame Type. These bits are used to define the Frame type or mode
+ *      frameTypeMask  - Frame Type Mask.
+ *      spv      - Source Port Vector. These bits are used to define which switch ports
+ *                 can use this TCAM entry.
+ *      spvMask  - Source Port Vector Mask.
+ *      ppri      - When the TCAM entry’s FrameMode bits are Provider Tagged, these bits are
+ *                  Provider Priority bits.
+ *      ppriMask  - Provider Priority Mask.
+ *      pvid      - When the TCAM entry’s FrameMode bits are Provider Tagged, these bits are
+ *                  Provider VID bits..
+ *      pvidMask  - Provider VID Mask.
+ *      frameOctet      - Frame Octet 1-48 and 49-96. These are the match data for octet
+ *                        1-48 of the frame if the TCAM entry is for the first 48 bytes
+ *                        of a frame. If this TCAM entry is for the second 48 bytes of
+ *                        a frame this is the match data for octet 49 (or 97).
+ *      frameOctetMask  - Frame Octet Mask.
+ *      continu  - Continue this TCAM entry. This bit should only be a 1 on TCAM entries
+ *                that cover the first 48 bytes of a frame that needs to be extended to
+ *                also match bytes 49 to 96 of the frame or on any subsequent continuation
+ *                beyond byte 96 of the frame.
+ *      interrupt  - Interrupt on a TCAM hit. When this bit is set to a one on a TCAM entry
+ *                   (where the Continue bit is a zero), a TCAM hit interrupt will be
+ *                   generated whenever a match occurs to this entry.
+ *      IncTcamCtr   - Increment the port’s TCAM Counter on a TCAM hit.
+ *      vidOverride  - VID Override Enable.
+ *      vidData    - VID Override Data.
+ *      nextId     - Next Index or Flow ID.
+ *      qpriOverride  - QPRI Override Enable.
+ *      qpriData      - QPRI Override Data.
+ *      fpriOverride  - FPRI Override Enable.
+ *      fpriData      - FPRI Override Data.
+ *      qpriAvbOverride  - QPRI_AVB Override Enable.
+ *      qpriAvbData      - QPRI_AVB Override Data.
+ *      dpvOverride  - DPV Override Enable.
+ *      dpvData      - DPV Override Data.
+ *      factionOverride  - Frame Action Override Enable.
+ *      factionData      - Frame Action Override Data.
+ *      ldBalanceOverride  - Load Balance Override Enable.
+ *      ldBalanceData      - Load Balance Override Data.
+ *      debugPort    - Debug Port Number.
+ *      highHit      - TCAM Entry for High 48-byte Hit..
+ *      lowHit       - TCAM Entry for High 48-byte Hit..
+*/
+typedef union
+{
+  GT_U16  frame[18];
+  struct {
+    GT_U8   destAddr[6];
+    GT_U8   srcAddr[6];
+    GT_U16  tag;
+    GT_U16  priVid;
+    GT_U16  ethType;
+  } paraFrmHd;
+} GT_TCAM_FRM_HD;
+
+typedef union
+{
+  GT_U16  data;
+  struct {
+    GT_U16   oct:8;
+    GT_U16   mask:8;
+  } struc;
+} GT_TCAM_FRAME;
+
+typedef union
+{
+  GT_U16  frame[28];  /* first part is 0-48 bytes of frame */
+                       /* second part is 47-96 bytes of frame */
+  struct {
+  /* Pg0 registers */
+    GT_U16       pg0Op;
+
+    GT_U16       pg0res0;
+
+    GT_U16       spvRes:3;
+    GT_U16       type0Res:3;
+    GT_U16       frame0Type:2;
+    GT_U16       maskType:8;
+
+    GT_U16       spv:8;
+    GT_U16       spvMask:8;
+
+    GT_U16       pvid0Hi:4;
+    GT_U16       ppri0:4;
+    GT_U16       pvid0MaskHi:4;
+    GT_U16       ppri0Mask:4;
+
+    GT_U16       pvid0Low:8;
+    GT_U16       pvidMask0Low:8;
+
+    GT_TCAM_FRAME frame0[22];
+  }  paraFrm;
+} GT_TCAM_FRM0_DATA;
+typedef union
+{
+  GT_U16  frame[28];  /* first part is 0-48 bytes of frame */
+                       /* second part is 47-96 bytes of frame */
+  struct {
+  /* Pg1 registers */
+    GT_U16       pg1Op;
+    GT_U16       pg1res0;
+    GT_TCAM_FRAME frame1[26];
+
+  }  paraFrm;
+} GT_TCAM_FRM1_DATA;
+typedef union
+{
+  GT_U16  frame[28];  /* first part is 0-48 bytes of frame */
+                       /* second part is 47-96 bytes of frame */
+  struct {
+  /* Pg2 registers */
+    GT_U16       pg2Op;
+
+    GT_U16       pg2res0;
+
+    GT_U16       vidData:12;
+    GT_U16       pg2res1:1;
+    GT_U16       IncTcamCtr:1;
+    GT_U16       interrupt:1;
+    GT_U16       continu:1;
+
+    GT_U16        fpriData:3;
+    GT_U16        pg2res3:1;
+    GT_U16        qpriData:2;
+    GT_U16        pg2res2:2;
+    GT_U16        nextId:8;
+
+    GT_U16        dpvData:11;
+    GT_U16        pg2res5:1;
+    GT_U16        qpriAvbData:2;
+    GT_U16        pg2res4:2;
+
+    GT_U16        ldBalanceData:3;
+    GT_U16        pg2res7:1;
+    GT_U16       factionData:11;
+    GT_U16        pg2res6:1;
+  }  paraFrm;
+} GT_TCAM_FRM2_DATA;
+
+typedef struct
+{
+  /* Pg0 registers */
+  GT_TCAM_FRM0_DATA frame0;
+  /* Pg1 registers */
+  GT_TCAM_FRM1_DATA frame1;
+  /* Pg2 registers */
+  GT_TCAM_FRM2_DATA frame2;
+} GT_TCAM_FRM_DATA;
+
+typedef struct
+{
+  GT_TCAM_FRM_DATA  rawFrmData[2];  /* first part is 0-48 bytes of frame */
+                                    /* second part is 47-96 bytes of frame */
+    GT_U8        frameType;
+    GT_U8        frameTypeMask;
+    GT_U8        spv;
+    GT_U8        spvMask;
+    GT_U8        ppri;
+    GT_U8        ppriMask;
+    GT_U16       pvid;
+    GT_U16       pvidMask;
+    GT_U8        frameOctet[96];
+    GT_U8        frameOctetMask[96];
+    GT_U8        continu;
+    GT_U8        interrupt;
+    GT_U8        IncTcamCtr;
+    GT_U8        vidOverride;
+    GT_U16       vidData;
+    GT_U8        nextId;
+    GT_U8        qpriData;
+    GT_U8        fpriData;
+    GT_U8        qpriAvbData;
+    GT_U8        dpvData;
+    GT_U8        factionOverride;
+    GT_U16       factionData;
+    GT_U8        ldBalanceOverride;
+    GT_U8        ldBalanceData;
+    GT_U8        debugPort;
+    GT_U8        highHit;
+    GT_U8        lowHit;
+    GT_U8        is96Frame;
+} GT_TCAM_DATA;
+
+/*
+ *  typedef: struct GT_TCAM_OP_DATA
+ *
+ *  Description: data required by TCAM (Ternary Content Addressable Memory) Operation
+ *
+ *  Fields:
+ *      tcamPage - page  of TCAM
+ *      tcamEntry - pointer to the desired entry of TCAM
+ *      tcamData - TCAM data for the entry pointed by tcamEntry
+ */
+
+typedef struct
+{
+    GT_U32    tcamPage;
+    GT_U32    tcamEntry;
+    GT_TCAM_DATA    *tcamDataP;
+} GT_TCAM_OP_DATA;
+
+
+/*
+ * typedef: enum GT_EVENT_TYPE
+ *
+ * Description: Enumeration of the available hardware driven events.
+ *
+ * Enumerations:
+ *   GT_AVB_INT    - AVB Interrupt Enable
+ *   GT_DEVICE_INT - Device Interrupt (GT_DEVICE_INT_TYPE) Enable
+ *   GT_STATS_DONE - Statistics Operation Done interrrupt Enable
+ *   GT_VTU_PROB - VLAN Problem/Violation Interrupt Enable
+ *   GT_VTU_DONE - VALN Table Operation Done Interrupt Enable
+ *   GT_ATU_PROB - ATU Problem/Violation Interrupt Enable, for Gigabit Switch
+ *   GT_ATU_FULL - ATU full interrupt enable, for Fast Ethernet Switch
+ *   GT_ATU_DONE - ATU Done interrupt enable.
+ *   GT_PHY_INT  - PHY interrupt enable, for Fast Ethernet Switch
+ *   GT_EE_INT   - EEPROM Done interrupt enable.
+ */
+#define GT_AVB_INT               0x100
+#define GT_DEVICE_INT           0x80
+#define GT_STATS_DONE           0x40
+#define GT_VTU_PROB             0x20
+#define GT_VTU_DONE             0x10
+#define GT_ATU_PROB         0x8
+#define GT_ATU_FULL         0x8
+#define GT_ATU_DONE            0x4
+#define GT_PHY_INTERRUPT    0x2        /* Device may not support PHY Int. Please refer to datasheet. */
+#define GT_EE_INTERRUPT        0x1
+
+#define GT_INT_MASK            \
+        (GT_AVB_INT | GT_DEVICE_INT | GT_STATS_DONE | GT_VTU_PROB | GT_VTU_DONE | GT_ATU_FULL |     \
+        GT_ATU_DONE | GT_PHY_INTERRUPT | GT_EE_INTERRUPT)
+#define GT_NO_INTERNAL_PHY_INT_MASK        \
+        (GT_AVB_INT | GT_DEVICE_INT | GT_STATS_DONE | GT_VTU_PROB | GT_VTU_DONE | GT_ATU_PROB |     \
+        GT_ATU_DONE | GT_EE_INTERRUPT)
+
+
+/*
+ *  typedef: struct GT_DEV_EVENT
+ *
+ *  Description: Device interrupt status
+ *
+ *  Fields:
+ *      event     - Device Interrupts to be enabled
+ *                    GT_DEV_INT_WATCHDOG, GT_DEV_INT_JAMLIMIT,
+ *                    GT_DEV_INT_DUPLEX_MISMATCH, and/or GT_DEV_INT_SERDES_LINK
+ *      portList  - SERDES port list where GT_DEV_INT_SERDES_LINK interrupt needs
+ *                    to be asserted. It's in vector format, Bit 10 is for port 10,
+ *                    Bit 9 is for port 9, etc.
+ *                    valid only if GT_DEV_INT_SERDES_LINK bit is set.
+ *      phyList   - Phy list where GT_DEV_INT_PHY interrupt needs to be asserted.
+ *                    It's in vector format, Bit 0 is for port 0,
+ *                    Bit 1 is for port 1, etc.
+ *                    valid only if GT_DEV_INT_PHY bit is set.
+ */
+typedef struct
+{
+    GT_U32        event;
+    GT_U32        portList;
+    GT_U32        phyList;
+} GT_DEV_EVENT;
+
+
+/*
+ *  typedef: struct GT_DEV_INT_STATUS
+ *
+ *  Description: Device interrupt status
+ *
+ *  Fields:
+ *      intCause  - Device Interrupt Cause
+ *                    GT_DEV_INT_WATCHDOG, GT_DEV_INT_JAMLIMIT,
+ *                    GT_DEV_INT_DUPLEX_MISMATCH, and/or GT_DEV_INT_SERDES_LINK
+ *        port      - logical port where GT_DEV_INT_DUPLEX_MISMATCH occurred.
+ *                    valid only if GT_DEV_INT_DUPLEX_MISMATCH is set.
+ *      linkInt   - SERDES port list where GT_DEV_INT_SERDES_LINK interrupt is
+ *                    asserted. It's in vector format, Bit 10 is for port 10,
+ *                    Bit 9 is for port 9, etc.
+ *                    valid only if GT_DEV_INT_SERDES_LINK bit is set.
+ *                    These bits are only valid of the port that is in 1000Base-X mode.
+ */
+typedef struct
+{
+    GT_U32        devIntCause;
+    GT_LPORT    port;
+    GT_U32        linkInt;
+    GT_U32        phyInt;
+} GT_DEV_INT_STATUS;
+
+
+/*
+* GT_DEVICE_INT
+*
+* Description: Enumeration of Device interrupt
+*    GT_DEV_INT_WATCHDOG        - WatchDog event interrupt (WatchDog event can be
+*                              configured with gwdSetEvent API)
+*    GT_DEV_INT_JAMLIMIT        - any of the ports detect an Ingress Jam Limit violation
+*                              (gprtSetPauseLimitIn API)
+*    GT_DEV_INT_DUPLEX_MISMATCH    - any of the ports detect a duplex mismatch
+*                              (i.e., the local port is in half duplex mode while
+*                              the link partner is in full duplex mode)
+*    GT_DEV_INT_SERDES_LINK    - SERDES link chage interrupt.
+*                              An interrupt occurs when a SERDES port changes link
+*                              status (link up or link down)
+*/
+
+#define GT_DEV_INT_WATCHDOG            0x8
+#define GT_DEV_INT_JAMLIMIT            0x4
+#define GT_DEV_INT_DUPLEX_MISMATCH    0x2
+#define GT_DEV_INT_SERDES_LINK        0x1
+#define GT_DEV_INT_WAKE_EVENT         0x1
+#define GT_DEV_INT_PHY                0x10
+
+/*
+* GT_WATCHDOG_EVENT
+*
+* Description: Enumeration of WatchDog event
+*        GT_WD_QC  - Queue Controller Watch Dog enable.
+*                    When enabled, the QC's watch dog circuit checks for link
+*                    list errors and any errors found in the QC.
+*        GT_WD_EGRESS - Egress Watch Dog enable.
+*                    When enabled, each port's egress circuit checks for problems
+*                    between the port and the Queue Controller.
+*        GT_WD_FORCE - Force a Watch Dog event.
+*/
+
+#define GT_WD_QC        0x1
+#define GT_WD_EGRESS    0x2
+#define GT_WD_FORCE        0x4
+
+
+/*
+* typedef: struct GT_WD_EVENT_HISTORY
+*
+* Description: WatchDog Event History (cleared only by a hardware reset)
+*        wdEvent   - When it's set to GT_TRUE, some enabled Watch Dog event occurred.
+*                    The following events are possible:
+*                        QC WatchDog Event (GT_WD_QC)
+*                        Egress WatchDog Event (GT_WD_EGRESS)
+*                        Forced WatchDog Event (GT_WD_FORCE)
+*        egressEvent-If any port's egress logic detects an egress watch dog issue,
+*                    this field is set to GT_TRUE, regardless of the enabling GT_WD_EGRESS
+*                    event.
+*/
+typedef struct
+{
+    GT_BOOL    wdEvent;
+    GT_BOOL egressEvent;
+} GT_WD_EVENT_HISTORY;
+
+
+/*
+* typedef: enum GT_PHY_INT
+*
+* Description: Enumeration of PHY interrupt
+*/
+
+#define GT_SPEED_CHANGED         0x4000
+#define GT_DUPLEX_CHANGED        0x2000
+#define GT_PAGE_RECEIVED        0x1000
+#define GT_AUTO_NEG_COMPLETED    0x800
+#define GT_LINK_STATUS_CHANGED    0x400
+#define GT_SYMBOL_ERROR            0x200
+#define GT_FALSE_CARRIER        0x100
+#define GT_FIFO_FLOW            0x80
+#define GT_CROSSOVER_CHANGED    0x40
+#define GT_POLARITY_CHANGED        0x2
+#define GT_JABBER                0x1
+
+#define GT_AUTO_NEG_ERROR        0x8000
+#define GT_DOWNSHIFT_DETECT        0x20
+#define GT_ENERGY_DETECT        0x10
+
+/*
+* typedef: enum GT_PHY_AUTO_MODE
+*
+* Description: Enumeration of Autonegotiation mode.
+*    Auto for both speed and duplex.
+*    Auto for speed only and Full duplex.
+*    Auto for speed only and Half duplex. (1000Mbps is not supported)
+*    Auto for duplex only and speed 1000Mbps.
+*    Auto for duplex only and speed 100Mbps.
+*    Auto for duplex only and speed 10Mbps.
+*    1000Mbps Full duplex.
+*    100Mbps Full duplex.
+*    100Mbps Half duplex.
+*    10Mbps Full duplex.
+*    10Mbps Half duplex.
+*/
+
+typedef enum
+{
+    SPEED_AUTO_DUPLEX_AUTO,
+    SPEED_1000_DUPLEX_AUTO,
+    SPEED_100_DUPLEX_AUTO,
+    SPEED_10_DUPLEX_AUTO,
+    SPEED_AUTO_DUPLEX_FULL,
+    SPEED_AUTO_DUPLEX_HALF,
+    SPEED_1000_DUPLEX_FULL,
+    SPEED_1000_DUPLEX_HALF,
+    SPEED_100_DUPLEX_FULL,
+    SPEED_100_DUPLEX_HALF,
+    SPEED_10_DUPLEX_FULL,
+    SPEED_10_DUPLEX_HALF
+}GT_PHY_AUTO_MODE;
+
+
+/*
+* typedef: enum GT_PHY_PAUSE_MODE
+*
+* Description: Enumeration of Pause Mode in the Phy.
+*
+* Enumerations:
+*    GT_PHY_NO_PAUSE        - disable pause
+*    GT_PHY_PAUSE        - support pause
+*    GT_PHY_ASYMMETRIC_PAUSE    - support asymmetric pause
+*    GT_PHY_BOTH_PAUSE    - support both pause and asymmetric pause
+*/
+typedef enum
+{
+    GT_PHY_NO_PAUSE = 0,
+    GT_PHY_PAUSE,
+    GT_PHY_ASYMMETRIC_PAUSE,
+    GT_PHY_BOTH_PAUSE
+} GT_PHY_PAUSE_MODE;
+
+
+/*
+* typedef: enum GT_PHY_SPEED
+*
+* Description: Enumeration of Phy Speed
+*
+* Enumerations:
+*    PHY_SPEED_10_MBPS   - 10Mbps
+*    PHY_SPEED_100_MBPS    - 100Mbps
+*    PHY_SPEED_1000_MBPS - 1000Mbps
+*/
+typedef enum
+{
+    PHY_SPEED_10_MBPS,
+    PHY_SPEED_100_MBPS,
+    PHY_SPEED_1000_MBPS
+} GT_PHY_SPEED;
+
+
+/*
+* typedef: enum GT_SERDES_MODE
+*
+* Description: Enumeration of Serdes mode
+*
+* Enumerations:
+*    PHY_SERDES_100FX     - 100 FX
+*    PHY_SERDES_1000X     - 1000 X
+*    PHY_SERDES_SGMII_PHY - SGMII PHY
+*    PHY_SERDES_SGMII_MAC - SGMII MAC
+*/
+typedef enum
+{
+    PHY_SERDES_100FX = 0,
+    PHY_SERDES_1000X,
+    PHY_SERDES_SGMII_PHY,
+    PHY_SERDES_SGMII_MAC
+} GT_SERDES_MODE;
+
+
+/*
+* typedef: enum GT_EDETECT_MODE
+*
+* Description: Enumeration of Energy Detect mode
+*
+* Enumerations:
+*    GT_EDETECT_OFF        - Energy Detect disabled
+*    GT_EDETECT_SENSE_PULSE    - Energy Detect enabled with sense and pulse
+*    GT_EDETECT_SENSE    - Energy Detect enabled only with sense
+*/
+typedef enum
+{
+    GT_EDETECT_OFF = 0,
+    GT_EDETECT_SENSE_PULSE,
+    GT_EDETECT_SENSE
+} GT_EDETECT_MODE;
+
+/*
+ * typedef: enum GT_INGRESS_MODE
+ *
+ * Description: Enumeration of the port ingress mode.
+ *
+ * Enumerations:
+ *   GT_UNMODIFY_INGRESS - frames are receive unmodified.
+ *   GT_TRAILER_INGRESS  - all frames are received with trailer.
+ *   GT_UNTAGGED_INGRESS  - remove tag on receive (for double tagging).
+ *   GT_CPUPORT_INGRESS - no trailer. used to identify the CPU port for IGMP/MLD Snooping
+ */
+typedef enum
+{
+    GT_UNMODIFY_INGRESS = 0,  /* 0x00 */
+    GT_TRAILER_INGRESS,       /* 0x01 */
+    GT_UNTAGGED_INGRESS,      /* 0x10 */
+    GT_CPUPORT_INGRESS        /* 0x11 */
+} GT_INGRESS_MODE;
+
+
+/*
+ * typedef: enum GT_EGRESS_FLOOD
+ *
+ * Description: Enumeration of the port ingress mode.
+ *
+ * Enumerations:
+ *   GT_BLOCK_EGRESS_UNKNOWN - do not egress frame with unknown DA
+ *   GT_BLOCK_EGRESS_UNKNOWN_MULTICAST - do not egress frame with unknown multicast DA
+ *   GT_BLOCK_EGRESS_UNKNOWN_UNIICAST - do not egress frame with unknown unicast DA
+ *   GT_BLOCK_EGRESS_NONE - egress all frames with unknown DA
+ */
+typedef enum
+{
+    GT_BLOCK_EGRESS_UNKNOWN = 0,
+    GT_BLOCK_EGRESS_UNKNOWN_MULTICAST,
+    GT_BLOCK_EGRESS_UNKNOWN_UNICAST,
+    GT_BLOCK_EGRESS_NONE
+} GT_EGRESS_FLOOD;
+
+
+/*
+ *  typedef: enum GT_MC_RATE
+ *
+ *  Description: Enumeration of the port ingress mode.
+ *
+ *  Enumerations:
+ *      GT_MC_3_PERCENT_RL   - multicast rate is limited to 3 percent.
+ *      GT_MC_6_PERCENT_RL   - multicast rate is limited to 6 percent.
+ *      GT_MC_12_PERCENT_RL  - multicast rate is limited to 12 percent.
+ *      GT_MC_100_PERCENT_RL - unlimited multicast rate.
+ */
+typedef enum
+{
+    GT_MC_3_PERCENT_RL = 0,
+    GT_MC_6_PERCENT_RL,
+    GT_MC_12_PERCENT_RL,
+    GT_MC_100_PERCENT_RL,
+} GT_MC_RATE;
+
+
+/*
+ *  typedef: enum GT_INGRESS_RATE_MODE
+ *
+ *  Description: Enumeration of the port ingress rate limit mode.
+ *
+ *  Enumerations:
+ *      GT_RATE_PRI_BASE   - Priority based rate limiting
+ *        GT_RATE_BURST_BASE - Burst Size based rate limiting
+ */
+typedef enum
+{
+    GT_RATE_PRI_BASE = 0,
+    GT_RATE_BURST_BASE
+} GT_INGRESS_RATE_MODE;
+
+
+/*
+ *  typedef: enum GT_PORT_SCHED_MODE
+ *
+ *  Description: Enumeration of port scheduling mode
+ *
+ *  Fields:
+ *         GT_PORT_SCHED_WEIGHTED_RRB - use 8,4,2,1 weighted fair scheduling
+ *         GT_PORT_SCHED_STRICT_PRI3 - use a strict for priority 3 and weighted
+ *                                    round robin for the priority 2,1,and 0
+ *         GT_PORT_SCHED_STRICT_PRI2_3 - use a strict for priority 2,3 and weighted
+ *                                    round robin for the priority 1,and 0
+ *         GT_PORT_SCHED_STRICT_PRI - use a strict priority scheme
+ *
+ *  Comment:
+ */
+typedef enum
+{
+    GT_PORT_SCHED_WEIGHTED_RRB = 0,
+    GT_PORT_SCHED_STRICT_PRI3,
+    GT_PORT_SCHED_STRICT_PRI2_3,
+    GT_PORT_SCHED_STRICT_PRI
+} GT_PORT_SCHED_MODE;
+
+
+/*
+ *  typedef: struct GT_PORT_STAT
+ *
+ *  Description: port statistic struct.
+ *
+ *  Fields:
+ *      rxCtr   - port receive counter.
+ *      txCtr   - port transmit counter.
+ *      dropped - dropped frame counter.
+ *
+ *  Comment:
+ *        dropped frame counter is supported by only limited devices.
+ *        At this moment, 88E6061/88E6065 are the devices supporting
+ *        dropped frame counter.
+ */
+typedef struct
+{
+    GT_U16  rxCtr;
+    GT_U16  txCtr;
+    GT_U16  dropped;
+} GT_PORT_STAT;
+
+/*
+ *  typedef: struct GT_PORT_STAT2
+ *
+ *  Description: port statistic struct.
+ *
+ *  Fields:
+ *      inDiscardLo - InDiscards Low Frame Counter
+ *      inDiscardHi - InDiscards High Frame Counter
+ *      inFiltered  - InFiltered Frame Counter
+ *      outFiltered - OutFiltered Frame Counter
+ *
+ *  Comment:
+ */
+typedef struct
+{
+    GT_U16  inDiscardLo;
+    GT_U16  inDiscardHi;
+    GT_U16  inFiltered;
+    GT_U16  outFiltered;
+} GT_PORT_STAT2;
+
+
+/*
+ **  typedef: struct GT_PORT_Q_COUNTERS
+ **
+ **  Description: port queue statistic struct.
+ **
+ **  Fields:
+ **      OutQ_Size - port egress queue size coi
+ **      Rsv_Size  - ingress reserved e counter
+ **
+ **/
+typedef struct
+{
+    GT_U16  OutQ_Size;
+    GT_U16  Rsv_Size;
+} GT_PORT_Q_STAT;
+
+/*
+ * typedef: enum GT_CTR_MODE
+ *
+ * Description: Enumeration of the port counters mode.
+ *
+ * Enumerations:
+ *   GT_CTR_ALL    - In this mode the counters counts Rx receive and transmit
+ *                   frames.
+ *   GT_CTR_ERRORS - In this mode the counters counts Rx Errors and collisions.
+ */
+typedef enum
+{
+    GT_CTR_ALL = 0,
+    GT_CTR_ERRORS,
+} GT_CTR_MODE;
+
+typedef struct _GT_QD_DEV GT_QD_DEV;
+
+/*
+ * semaphore related definitions.
+ * User Applications may register Semaphore functions using following definitions
+ */
+typedef enum
+{
+    GT_SEM_EMPTY,
+    GT_SEM_FULL
+} GT_SEM_BEGIN_STATE;
+
+typedef GT_SEM (*FGT_SEM_CREATE)(
+                        GT_SEM_BEGIN_STATE state);
+typedef GT_STATUS (*FGT_SEM_DELETE)(
+                        GT_SEM semId);
+typedef GT_STATUS (*FGT_SEM_TAKE)(
+                        GT_SEM semId, GT_U32 timOut);
+typedef GT_STATUS (*FGT_SEM_GIVE)(
+                        GT_SEM semId);
+
+typedef struct
+{
+    FGT_SEM_CREATE    semCreate;     /* create semapore */
+    FGT_SEM_DELETE    semDelete;     /* delete the semapore */
+    FGT_SEM_TAKE    semTake;    /* try to get a semapore */
+    FGT_SEM_GIVE    semGive;    /* return semaphore */
+}GT_SEM_ROUTINES;
+
+/*
+ * definitions for registering Hardware access function.
+ *
+*/
+/*#ifdef GT_RMGMT_ACCESS */
+#if 1
+/*
+ *  * Definition for the direction in HW_DEV_RW_REG structure.
+ *  */
+#define HW_REG_READ                     0
+#define HW_REG_WRITE            1
+#define HW_REG_WAIT_TILL_0      2
+#define HW_REG_WAIT_TILL_1      3
+
+/* HW_ACCESS_READ_REG and HW_ACCESS_WRITE_REG */
+typedef struct _HW_DEV_RW_REG
+{
+  unsigned long cmd;  /*INPUT:HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1 */
+  unsigned long addr; /*INPUT:SMI Address */
+  unsigned long reg;  /*INPUT:Register offset */
+  unsigned long data; /*INPUT,OUTPUT:Value in the Register or Bit number */
+} HW_DEV_RW_REG;
+
+#define  MAX_ACCESS_REG_NUM  12
+
+typedef struct _HW_DEV_REG_ACCESS
+{
+        unsigned long   entries;
+        HW_DEV_RW_REG   rw_reg_list[MAX_ACCESS_REG_NUM]; /* INPUT,OUTPUT: Reg Access information */
+} HW_DEV_REG_ACCESS;
+
+
+typedef GT_BOOL (*FGT_HW_ACCESS)(GT_QD_DEV* dev, HW_DEV_REG_ACCESS *regList);
+#endif
+
+/*
+ * definitions for registering MII access functions.
+ *
+*/
+typedef GT_BOOL (*FGT_READ_MII)(
+                        GT_QD_DEV*   dev,
+                        unsigned int phyAddr,
+                        unsigned int miiReg,
+                        unsigned int* value);
+typedef GT_BOOL (*FGT_WRITE_MII)(
+                        GT_QD_DEV*   dev,
+                        unsigned int phyAddr,
+                        unsigned int miiReg,
+                        unsigned int value);
+typedef GT_BOOL (*FGT_INT_HANDLER)(
+                        GT_QD_DEV*   dev,
+                        GT_U16*);
+
+typedef struct _BSP_FUNCTIONS
+{
+    GT_U32        hwAccessMod;    /* Hardware access mode */
+    FGT_READ_MII     readMii;    /* read MII Registers */
+    FGT_WRITE_MII     writeMii;    /* write MII Registers */
+#ifdef GT_RMGMT_ACCESS
+    FGT_HW_ACCESS   hwAccess;    /* Hardware access function */
+#endif
+    FGT_SEM_CREATE    semCreate;     /* create semapore */
+    FGT_SEM_DELETE    semDelete;     /* delete the semapore */
+    FGT_SEM_TAKE    semTake;    /* try to get a semapore */
+    FGT_SEM_GIVE    semGive;    /* return semaphore */
+}BSP_FUNCTIONS;
+
+
+/*
+ *    Type definition for MIB counter operation
+*/
+typedef enum
+{
+    STATS_FLUSH_ALL,            /* Flush all counters for all ports */
+    STATS_FLUSH_PORT,           /* Flush all counters for a port */
+    STATS_READ_COUNTER,         /* Read a specific counter from a port */
+    STATS_READ_REALTIME_COUNTER,    /* Read a realtime counter from a port */
+    STATS_READ_ALL,              /* Read all counters from a port */
+    STATS_READ_COUNTER_CLEAR,   /* For RMU page2,Read a specific counter from a port and clear*/
+    STATS_READ_ALL_CLEAR        /* For RMU page2,Read all counters from a port and clear*/
+
+} GT_STATS_OPERATION;
+
+typedef struct _GT_STATS_COUNTER_SET
+{
+    GT_U32    InUnicasts;
+    GT_U32    InBroadcasts;
+    GT_U32    InPause;
+    GT_U32    InMulticasts;
+    GT_U32    InFCSErr;
+    GT_U32    AlignErr;
+    GT_U32    InGoodOctets;
+    GT_U32    InBadOctets;
+    GT_U32    Undersize;
+    GT_U32    Fragments;
+    GT_U32    In64Octets;        /* 64 Octets */
+    GT_U32    In127Octets;    /* 65 to 127 Octets */
+    GT_U32    In255Octets;    /* 128 to 255 Octets */
+    GT_U32    In511Octets;    /* 256 to 511 Octets */
+    GT_U32    In1023Octets;    /* 512 to 1023 Octets */
+    GT_U32    InMaxOctets;    /* 1024 to Max Octets */
+    GT_U32    Jabber;
+    GT_U32    Oversize;
+    GT_U32    InDiscards;
+    GT_U32    Filtered;
+    GT_U32    OutUnicasts;
+    GT_U32    OutBroadcasts;
+    GT_U32    OutPause;
+    GT_U32    OutMulticasts;
+    GT_U32    OutFCSErr;
+    GT_U32    OutGoodOctets;
+    GT_U32    Out64Octets;    /* 64 Octets */
+    GT_U32    Out127Octets;    /* 65 to 127 Octets */
+    GT_U32    Out255Octets;    /* 128 to 255 Octets */
+    GT_U32    Out511Octets;    /* 256 to 511 Octets */
+    GT_U32    Out1023Octets;    /* 512 to 1023 Octets */
+    GT_U32    OutMaxOctets;    /* 1024 to Max Octets */
+    GT_U32    Collisions;
+    GT_U32    Late;
+    GT_U32    Excessive;
+    GT_U32    Multiple;
+    GT_U32    Single;
+    GT_U32    Deferred;
+    GT_U32    OutDiscards;
+
+} GT_STATS_COUNTER_SET;
+
+
+typedef enum
+{
+    STATS_InUnicasts = 0,
+    STATS_InBroadcasts,
+    STATS_InPause,
+    STATS_InMulticasts,
+    STATS_InFCSErr,
+    STATS_AlignErr,
+    STATS_InGoodOctets,
+    STATS_InBadOctets,
+    STATS_Undersize,
+    STATS_Fragments,
+    STATS_In64Octets,
+    STATS_In127Octets,
+    STATS_In255Octets,
+    STATS_In511Octets,
+    STATS_In1023Octets,
+    STATS_InMaxOctets,
+    STATS_Jabber,
+    STATS_Oversize,
+    STATS_InDiscards,
+    STATS_Filtered,
+    STATS_OutUnicasts,
+    STATS_OutBroadcasts,
+    STATS_OutPause,
+    STATS_OutMulticasts,
+    STATS_OutFCSErr,
+    STATS_OutGoodOctets,
+    STATS_Out64Octets,
+    STATS_Out127Octets,
+    STATS_Out255Octets,
+    STATS_Out511Octets,
+    STATS_Out1023Octets,
+    STATS_OutMaxOctets,
+    STATS_Collisions,
+    STATS_Late,
+    STATS_Excessive,
+    STATS_Multiple,
+    STATS_Single,
+    STATS_Deferred,
+    STATS_OutDiscards
+
+} GT_STATS_COUNTERS;
+/*
+ * typedef: enum GT_HISTOGRAM_MODE
+ *
+ * Description: Enumeration of the histogram counters mode.
+ *
+ * Enumerations:
+ *   GT_COUNT_RX_ONLY - In this mode, Rx Histogram Counters are counted.
+ *   GT_COUNT_TX_ONLY - In this mode, Tx Histogram Counters are counted.
+ *   GT_COUNT_RX_TX   - In this mode, Rx and Tx Histogram Counters are counted.
+ */
+typedef enum
+{
+    GT_COUNT_RX_ONLY = 0,
+    GT_COUNT_TX_ONLY,
+    GT_COUNT_RX_TX
+} GT_HISTOGRAM_MODE;
+
+/*
+    Counter set 2 is used by 88E6183
+*/
+typedef struct _GT_STATS_COUNTER_SET2
+{
+    GT_U32    InGoodOctetsHi;
+    GT_U32    InGoodOctetsLo;
+    GT_U32    InBadOctets;
+    GT_U32    OutDiscards;
+    GT_U32    InGoodFrames;
+    GT_U32    InBadFrames;
+    GT_U32    InBroadcasts;
+    GT_U32    InMulticasts;
+    /*
+        Histogram Counters : Rx Only, Tx Only, or both Rx and Tx
+        (refer to Histogram Mode)
+    */
+    GT_U32    Octets64;        /* 64 Octets */
+    GT_U32    Octets127;        /* 65 to 127 Octets */
+    GT_U32    Octets255;        /* 128 to 255 Octets */
+    GT_U32    Octets511;        /* 256 to 511 Octets */
+    GT_U32    Octets1023;        /* 512 to 1023 Octets */
+    GT_U32    OctetsMax;        /* 1024 to Max Octets */
+    GT_U32    OutOctetsHi;
+    GT_U32    OutOctetsLo;
+    GT_U32    OutFrames;
+    GT_U32    Excessive;
+    GT_U32    OutMulticasts;
+    GT_U32    OutBroadcasts;
+    GT_U32    InBadMACCtrl;
+
+    GT_U32    OutPause;
+    GT_U32    InPause;
+    GT_U32    InDiscards;
+    GT_U32    Undersize;
+    GT_U32    Fragments;
+    GT_U32    Oversize;
+    GT_U32    Jabber;
+    GT_U32    MACRcvErr;
+    GT_U32    InFCSErr;
+    GT_U32    Collisions;
+    GT_U32    Late;
+
+} GT_STATS_COUNTER_SET2;
+
+
+typedef enum
+{
+    STATS2_InGoodOctetsHi = 0,
+    STATS2_InGoodOctetsLo,
+    STATS2_InBadOctets,
+
+    STATS2_OutDiscards,
+    STATS2_InGoodFrames,
+    STATS2_InBadFrames,
+    STATS2_InBroadcasts,
+    STATS2_InMulticasts,
+    STATS2_64Octets,
+    STATS2_127Octets,
+    STATS2_255Octets,
+    STATS2_511Octets,
+    STATS2_1023Octets,
+    STATS2_MaxOctets,
+    STATS2_OutOctetsHi,
+    STATS2_OutOctetsLo,
+    STATS2_OutFrames,
+    STATS2_Excessive,
+    STATS2_OutMulticasts,
+    STATS2_OutBroadcasts,
+    STATS2_InBadMACCtrl,
+    STATS2_OutPause,
+    STATS2_InPause,
+    STATS2_InDiscards,
+    STATS2_Undersize,
+    STATS2_Fragments,
+    STATS2_Oversize,
+    STATS2_Jabber,
+    STATS2_MACRcvErr,
+    STATS2_InFCSErr,
+    STATS2_Collisions,
+    STATS2_Late
+
+} GT_STATS_COUNTERS2;
+
+/*
+    Counter set 3 is used by 88E6093 and 88E6065 and later
+*/
+typedef struct _GT_STATS_COUNTER_SET3
+{
+    GT_U32    InGoodOctetsLo;    /* offset 0 */
+    GT_U32    InGoodOctetsHi;    /* offset 1, not supported by 88E6065 */
+    GT_U32    InBadOctets;        /* offset 2 */
+    GT_U32    OutFCSErr;            /* offset 3 */
+    GT_U32    InUnicasts;            /* offset 4 */
+    GT_U32    Deferred;            /* offset 5 */
+    GT_U32    InBroadcasts;        /* offset 6 */
+    GT_U32    InMulticasts;        /* offset 7 */
+    /*
+        Histogram Counters : Rx Only, Tx Only, or both Rx and Tx
+        (refer to Histogram Mode)
+    */
+    GT_U32    Octets64;        /* 64 Octets, offset 8 */
+    GT_U32    Octets127;        /* 65 to 127 Octets, offset 9 */
+    GT_U32    Octets255;        /* 128 to 255 Octets, offset 10 */
+    GT_U32    Octets511;        /* 256 to 511 Octets, offset 11 */
+    GT_U32    Octets1023;        /* 512 to 1023 Octets, offset 12 */
+    GT_U32    OctetsMax;        /* 1024 to Max Octets, offset 13 */
+    GT_U32    OutOctetsLo;    /* offset 14 */
+    GT_U32    OutOctetsHi;    /* offset 15, not supported by 88E6065 */
+    GT_U32    OutUnicasts;    /* offset 16 */
+    GT_U32    Excessive;        /* offset 17 */
+    GT_U32    OutMulticasts;    /* offset 18 */
+    GT_U32    OutBroadcasts;    /* offset 19 */
+    GT_U32    Single;            /* offset 20 */
+
+    GT_U32    OutPause;        /* offset 21 */
+    GT_U32    InPause;            /* offset 22 */
+    GT_U32    Multiple;        /* offset 23 */
+    GT_U32    Undersize;        /* offset 24 */
+    GT_U32    Fragments;        /* offset 25 */
+    GT_U32    Oversize;        /* offset 26 */
+    GT_U32    Jabber;            /* offset 27 */
+    GT_U32    InMACRcvErr;    /* offset 28 */
+    GT_U32    InFCSErr;        /* offset 29 */
+    GT_U32    Collisions;        /* offset 30 */
+    GT_U32    Late;                /* offset 31 */
+
+} GT_STATS_COUNTER_SET3;
+
+
+typedef enum
+{
+    STATS3_InGoodOctetsLo = 0,
+    STATS3_InGoodOctetsHi,
+    STATS3_InBadOctets,
+
+    STATS3_OutFCSErr,
+    STATS3_InUnicasts,
+    STATS3_Deferred,            /* offset 5 */
+    STATS3_InBroadcasts,
+    STATS3_InMulticasts,
+    STATS3_64Octets,
+    STATS3_127Octets,
+    STATS3_255Octets,            /* offset 10 */
+    STATS3_511Octets,
+    STATS3_1023Octets,
+    STATS3_MaxOctets,
+    STATS3_OutOctetsLo,
+    STATS3_OutOctetsHi,
+    STATS3_OutUnicasts,        /* offset 16 */
+    STATS3_Excessive,
+    STATS3_OutMulticasts,
+    STATS3_OutBroadcasts,
+    STATS3_Single,
+    STATS3_OutPause,
+    STATS3_InPause,
+    STATS3_Multiple,
+    STATS3_Undersize,            /* offset 24 */
+    STATS3_Fragments,
+    STATS3_Oversize,
+    STATS3_Jabber,
+    STATS3_InMACRcvErr,
+    STATS3_InFCSErr,
+    STATS3_Collisions,
+    STATS3_Late                    /* offset 31 */
+
+} GT_STATS_COUNTERS3;
+
+/*
+    Counter set RMU page2 is used by 88E6320 and later
+*/
+typedef struct _GT_STATS_COUNTER_SET_PAGE2
+{
+	/* Bank 0 */
+    GT_U32    InGoodOctetsLo;     /* offset 0 */
+    GT_U32    InGoodOctetsHi;     /* offset 1 */
+    GT_U32    InBadOctets;        /* offset 2 */
+    GT_U32    OutFCSErr;          /* offset 3 */
+    GT_U32    InUnicasts;         /* offset 4 */
+    GT_U32    Deferred;           /* offset 5 */
+    GT_U32    InBroadcasts;       /* offset 6 */
+    GT_U32    InMulticasts;       /* offset 7 */
+    /*
+        Histogram Counters : Rx Only, Tx Only, or both Rx and Tx
+        (refer to Histogram Mode)
+    */
+    GT_U32    Octets64;         /* 64 Octets, offset 8 */
+    GT_U32    Octets127;        /* 65 to 127 Octets, offset 9 */
+    GT_U32    Octets255;        /* 128 to 255 Octets, offset 10 */
+    GT_U32    Octets511;        /* 256 to 511 Octets, offset 11 */
+    GT_U32    Octets1023;       /* 512 to 1023 Octets, offset 12 */
+    GT_U32    OctetsMax;        /* 1024 to Max Octets, offset 13 */
+    GT_U32    OutOctetsLo;      /* offset 14 */
+    GT_U32    OutOctetsHi;      /* offset 15 */
+    GT_U32    OutUnicasts;      /* offset 16 */
+    GT_U32    Excessive;        /* offset 17 */
+    GT_U32    OutMulticasts;    /* offset 18 */
+    GT_U32    OutBroadcasts;    /* offset 19 */
+    GT_U32    Single;           /* offset 20 */
+
+    GT_U32    OutPause;         /* offset 21 */
+    GT_U32    InPause;          /* offset 22 */
+    GT_U32    Multiple;         /* offset 23 */
+    GT_U32    Undersize;        /* offset 24 */
+    GT_U32    Fragments;        /* offset 25 */
+    GT_U32    Oversize;         /* offset 26 */
+    GT_U32    Jabber;           /* offset 27 */
+    GT_U32    InMACRcvErr;      /* offset 28 */
+    GT_U32    InFCSErr;         /* offset 29 */
+    GT_U32    Collisions;       /* offset 30 */
+    GT_U32    Late;             /* offset 31 */
+	/* Bank 1 */
+    GT_U32    InDiscards;       /* offset 0x00 */
+    GT_U32    InFiltered;       /* offset 0x01 */
+    GT_U32    InAccepted;       /* offset 0x02 */
+    GT_U32    InBadAccepted;    /* offset 0x03 */
+    GT_U32    InGoodAvbClassA;  /* offset 0x04 */
+    GT_U32    InGoodAvbClassB;  /* offset 0x05 */
+    GT_U32    InBadAvbClassA ;  /* offset 0x06 */
+    GT_U32    InBadAvbClassB ;  /* offset 0x07 */
+    GT_U32    TCAMCounter0;     /* offset 0x08 */
+    GT_U32    TCAMCounter1;     /* offset 0x09 */
+    GT_U32    TCAMCounter2;     /* offset 0x0a */
+    GT_U32    TCAMCounter3;     /* offset 0x0b */
+    GT_U32    reserved_c;     /* offset 0x0c */
+    GT_U32    reserved_d;     /* offset 0x0d */
+    GT_U32    InDaUnknown ;     /* offset 0x0e */
+    GT_U32    InMGMT;           /* offset 0x0f */
+    GT_U32    OutQueue0;        /* offset 0x10 */
+    GT_U32    OutQueue1;        /* offset 0x11 */
+    GT_U32    OutQueue2;        /* offset 0x12 */
+    GT_U32    OutQueue3;        /* offset 0x13 */
+    GT_U32    OutQueue4;        /* offset 0x14 */
+    GT_U32    OutQueue5;        /* offset 0x15 */
+    GT_U32    OutQueue6;        /* offset 0x16 */
+    GT_U32    OutQueue7;        /* offset 0x17 */
+    GT_U32    OutCutThrough;    /* offset 0x18 */
+    GT_U32    reserved_19 ;     /* offset 0x19 */
+    GT_U32    OutOctetsA;       /* offset 0x1a */
+    GT_U32    OutOctetsB;       /* offset 0x1b */
+    GT_U32    reserved_1c;      /* offset 0x1c */
+    GT_U32    reserved_1d;      /* offset 0x1d */
+    GT_U32    reserved_1e;      /* offset 0x1e */
+    GT_U32    OutMGMT;          /* offset 0x1f */
+
+} GT_STATS_COUNTER_SET_PAGE2;
+
+#define GT_PAGE2_BANK1 0x80
+typedef enum
+{
+	/* Bank 0 */
+    STATS_PG2_InGoodOctetsLo = 0,
+    STATS_PG2_InGoodOctetsHi,
+    STATS_PG2_InBadOctets,
+
+    STATS_PG2_OutFCSErr,
+    STATS_PG2_InUnicasts,
+    STATS_PG2_Deferred,            /* offset 5 */
+    STATS_PG2_InBroadcasts,
+    STATS_PG2_InMulticasts,
+    STATS_PG2_64Octets,
+    STATS_PG2_127Octets,
+    STATS_PG2_255Octets,            /* offset 10 */
+    STATS_PG2_511Octets,
+    STATS_PG2_1023Octets,
+    STATS_PG2_MaxOctets,
+    STATS_PG2_OutOctetsLo,
+    STATS_PG2_OutOctetsHi,
+    STATS_PG2_OutUnicasts,        /* offset 16 */
+    STATS_PG2_Excessive,
+    STATS_PG2_OutMulticasts,
+    STATS_PG2_OutBroadcasts,
+    STATS_PG2_Single,
+    STATS_PG2_OutPause,
+    STATS_PG2_InPause,
+    STATS_PG2_Multiple,
+    STATS_PG2_Undersize,            /* offset 24 */
+    STATS_PG2_Fragments,
+    STATS_PG2_Oversize,
+    STATS_PG2_Jabber,
+    STATS_PG2_InMACRcvErr,
+    STATS_PG2_InFCSErr,
+    STATS_PG2_Collisions,
+    STATS_PG2_Late,                    /* offset 31 */
+	/* Bank 1 */
+    STATS_PG2_InDiscards      = GT_PAGE2_BANK1+0x00,
+    STATS_PG2_InFiltered      = GT_PAGE2_BANK1+0x01,
+    STATS_PG2_InAccepted      = GT_PAGE2_BANK1+0x02,
+    STATS_PG2_InBadAccepted   = GT_PAGE2_BANK1+0x03,
+    STATS_PG2_InGoodAvbClassA = GT_PAGE2_BANK1+0x04,
+    STATS_PG2_InGoodAvbClassB = GT_PAGE2_BANK1+0x05,
+    STATS_PG2_InBadAvbClassA  = GT_PAGE2_BANK1+0x06,
+    STATS_PG2_InBadAvbClassB  = GT_PAGE2_BANK1+0x07,
+    STATS_PG2_TCAMCounter0    = GT_PAGE2_BANK1+0x08,
+    STATS_PG2_TCAMCounter1    = GT_PAGE2_BANK1+0x09,
+    STATS_PG2_TCAMCounter2    = GT_PAGE2_BANK1+0x0a,
+    STATS_PG2_TCAMCounter3    = GT_PAGE2_BANK1+0x0b,
+    STATS_PG2_InDaUnknown     = GT_PAGE2_BANK1+0x0e,
+    STATS_PG2_InMGMT          = GT_PAGE2_BANK1+0x0f,
+    STATS_PG2_OutQueue0       = GT_PAGE2_BANK1+0x10,
+    STATS_PG2_OutQueue1       = GT_PAGE2_BANK1+0x11,
+    STATS_PG2_OutQueue2       = GT_PAGE2_BANK1+0x12,
+    STATS_PG2_OutQueue3       = GT_PAGE2_BANK1+0x13,
+    STATS_PG2_OutQueue4       = GT_PAGE2_BANK1+0x14,
+    STATS_PG2_OutQueue5       = GT_PAGE2_BANK1+0x15,
+    STATS_PG2_OutQueue6       = GT_PAGE2_BANK1+0x16,
+    STATS_PG2_OutQueue7       = GT_PAGE2_BANK1+0x17,
+    STATS_PG2_OutCutThrough   = GT_PAGE2_BANK1+0x18,
+    STATS_PG2_OutOctetsA      = GT_PAGE2_BANK1+0x1a,
+    STATS_PG2_OutOctetsB      = GT_PAGE2_BANK1+0x1b,
+    STATS_PG2_OutMGMT         = GT_PAGE2_BANK1+0x1f
+
+} GT_STATS_COUNTERS_PAGE2;
+
+/* Switch Mac/Wol/Wof definitions */
+
+/*
+ * typedef: struct GT_1000T_MASTER_SLAVE
+ *
+ * Description: 1000Base-T Master/Slave Configuration
+ *
+ * Fields:
+ *      autoConfig   - GT_TRUE for auto-config, GT_FALSE for manual setup.
+ *      masterPrefer - GT_TRUE if Master configuration is preferred.
+ *
+ */
+typedef struct _GT_1000T_MASTER_SLAVE
+{
+    GT_BOOL    autoConfig;
+    GT_BOOL masterPrefer;
+} GT_1000T_MASTER_SLAVE;
+
+
+#define GT_MDI_PAIR_NUM         4    /* (1,2),(3,6),(4,5),(7,8) */
+#define GT_CHANNEL_PAIR_NUM     2    /* (channel A,B),(channel C,D) */
+
+
+/*
+ * typedef: enum GT_PHY_LINK_STATUS
+ *
+ * Description: Enumeration of Link Status
+ *
+ * Enumerations:
+ *        GT_PHY_LINK_OFF        - No Link
+ *        GT_PHY_LINK_COPPER    - Link on Copper
+ *        GT_PHY_LINK_FIBER    - Link on Fiber
+ */
+typedef enum
+{
+    GT_PHY_LINK_OFF = 0,
+    GT_PHY_LINK_COPPER = 1,
+    GT_PHY_LINK_FIBER = 2
+} GT_PHY_LINK_STATUS;
+
+
+/* Definition for packet generator */
+
+/* Payload */
+typedef enum
+{
+    GT_PG_PAYLOAD_RANDOM = 0,    /* Pseudo-random */
+    GT_PG_PAYLOAD_5AA5        /* 5A,A5,5A,A5,... */
+} GT_PG_PAYLOAD;
+
+/* Length */
+typedef enum
+{
+    GT_PG_LENGTH_64 = 0,        /* 64 bytes */
+    GT_PG_LENGTH_1514
+} GT_PG_LENGTH;
+
+/* Error */
+typedef enum
+{
+    GT_PG_TX_NORMAL = 0,        /* No Error */
+    GT_PG_TX_ERROR            /* Tx packets with CRC error and Symbol error */
+} GT_PG_TX;
+
+/* Structure for packet generator */
+typedef struct
+{
+    GT_PG_PAYLOAD  payload;
+    GT_PG_LENGTH   length;
+    GT_PG_TX       tx;
+} GT_PG;
+
+
+/*
+ * typedef: enum GT_TEST_STATUS
+ *
+ * Description: Enumeration of VCT test status
+ *
+ * Enumerations:
+ *      GT_TEST_FAIL    - virtual cable test failed.
+ *      GT_NORMAL_CABLE - normal cable.
+ *      GT_IMPEDANCE_MISMATCH - impedance mismatch.
+ *      GT_OPEN_CABLE   - open in cable.
+ *      GT_SHORT_CABLE  - short in cable.
+ *
+ */
+typedef enum
+{
+    GT_TEST_FAIL,
+    GT_NORMAL_CABLE,
+    GT_IMPEDANCE_MISMATCH,
+    GT_OPEN_CABLE,
+    GT_SHORT_CABLE,
+} GT_TEST_STATUS;
+
+
+/*
+ * typedef: enum GT_NORMAL_CABLE_LEN
+ *
+ * Description: Enumeration for normal cable length
+ *
+ * Enumerations:
+ *      GT_LESS_THAN_50M - cable length less than 50 meter.
+ *      GT_50M_80M       - cable length between 50 - 80 meter.
+ *      GT_80M_110M      - cable length between 80 - 110 meter.
+ *      GT_110M_140M     - cable length between 110 - 140 meter.
+ *      GT_MORE_THAN_140 - cable length more than 140 meter.
+ *      GT_UNKNOWN_LEN   - unknown length.
+ *
+ */
+typedef enum
+{
+    GT_LESS_THAN_50M,
+    GT_50M_80M,
+    GT_80M_110M,
+    GT_110M_140M,
+    GT_MORE_THAN_140,
+    GT_UNKNOWN_LEN,
+
+} GT_NORMAL_CABLE_LEN;
+
+
+/*
+ * typedef: enum GT_CABLE_LEN
+ *
+ * Description: Enumeration cable length
+ *
+ * Enumerations:
+ *      normCableLen - cable lenght for normal cable.
+ *      errCableLen  - for cable failure the estimate fault distance in meters.
+ *
+ */
+typedef union
+{
+    GT_NORMAL_CABLE_LEN normCableLen;
+    GT_U8               errCableLen;
+
+} GT_CABLE_LEN;
+
+/*
+ * typedef: struct GT_CABLE_STATUS
+ *
+ * Description: virtual cable diagnostic status per MDI pair.
+ *
+ * Fields:
+ *      cableStatus - VCT cable status.
+ *      cableLen    - VCT cable length.
+ *    phyType        - type of phy (100M phy or Gigabit phy)
+ */
+typedef struct
+{
+    GT_TEST_STATUS  cableStatus[GT_MDI_PAIR_NUM];
+    GT_CABLE_LEN    cableLen[GT_MDI_PAIR_NUM];
+    GT_U16        phyType;
+
+} GT_CABLE_STATUS;
+
+
+/*
+ * typedef: enum GT_CABLE_TYPE
+ *
+ * Description: Enumeration of Cable Type
+ *
+ * Enumerations:
+ *        GT_STRAIGHT_CABLE    _ straight cable
+ *      GT_CROSSOVER_CABLE     - crossover cable
+ */
+typedef enum
+{
+    GT_STRAIGHT_CABLE,
+    GT_CROSSOVER_CABLE
+
+} GT_CABLE_TYPE;
+
+
+/*
+ * typedef: enum GT_RX_CHANNEL
+ *
+ * Description: Enumeration of Receiver Channel Assignment
+ *
+ * Enumerations:
+ *        GT_CHANNEL_A   - Channel A
+ *        GT_CHANNEL_B   - Channel B
+ *        GT_CHANNEL_C   - Channel C
+ *        GT_CHANNEL_D   - Channel D
+ */
+typedef enum
+{
+    GT_CHANNEL_A,
+    GT_CHANNEL_B,
+    GT_CHANNEL_C,
+    GT_CHANNEL_D
+} GT_RX_CHANNEL;
+
+/*
+ * typedef: enum GT_POLARITY_STATUS
+ *
+ * Description: Enumeration of polarity status
+ *
+ * Enumerations:
+ *        GT_POSITIVE    - positive polarity
+ *      GT_NEGATIVE    - negative polarity
+ */
+typedef enum
+{
+    GT_POSITIVE,
+    GT_NEGATIVE
+
+} GT_POLARITY_STATUS;
+
+
+/*
+ * typedef: struct GT_1000BT_EXTENDED_STATUS
+ *
+ * Description: Currently the 1000Base-T PCS can determine the cable polarity
+ *         on pairs A,B,C,D; crossover on pairs A,B and C,D; and skew among
+ *        the pares. These status enhance the capability of the virtual cable tester
+ *
+ * Fields:
+ *      isValid        - GT_TRUE if this structure have valid information,
+ *                       GT_FALSE otherwise.
+ *                      It is valid only if 1000BASE-T Link is up.
+ *      pairSwap    - GT_CROSSOVER_CABLE, if the cable is crossover,
+ *                      GT_STRAIGHT_CABLE, otherwise
+ *        pairPolarity- GT_POSITIVE, if polarity is positive,
+ *                      GT_NEGATIVE, otherwise
+ *        pairSkew    - pair skew in units of ns
+ */
+typedef struct
+{
+    GT_BOOL                isValid;
+    GT_CABLE_TYPE        pairSwap[GT_CHANNEL_PAIR_NUM];
+    GT_POLARITY_STATUS    pairPolarity[GT_MDI_PAIR_NUM];
+    GT_U32                pairSkew[GT_MDI_PAIR_NUM];
+
+} GT_1000BT_EXTENDED_STATUS;
+
+/*
+ * typedef: struct GT_ADV_EXTENDED_STATUS
+ *
+ * Description: Currently the 1000Base-T PCS can determine the cable polarity
+ *         on pairs A,B,C,D; crossover on pairs A,B and C,D; and skew among
+ *        the pares. These status enhance the capability of the virtual cable tester
+ *
+ * Fields:
+ *      isValid        - GT_TRUE if this structure have valid information,
+ *                       GT_FALSE otherwise.
+ *                      It is valid only if 1000BASE-T Link is up.
+ *      pairSwap    - Receive channel assignement
+ *        pairPolarity- GT_POSITIVE, if polarity is positive,
+ *                      GT_NEGATIVE, otherwise
+ *        pairSkew    - pair skew in units of ns
+ *        cableLen    - cable length based on DSP
+ */
+typedef struct
+{
+    GT_BOOL            isValid;
+    GT_RX_CHANNEL      pairSwap[GT_MDI_PAIR_NUM];
+    GT_POLARITY_STATUS pairPolarity[GT_MDI_PAIR_NUM];
+    GT_U32             pairSkew[GT_MDI_PAIR_NUM];
+    GT_U32                cableLen[GT_MDI_PAIR_NUM];
+} GT_ADV_EXTENDED_STATUS;
+
+
+/*
+ * if isGigPhy in GT_CABLE_STATUS is not GT_TRUE, cableStatus and cableLen
+ * will have only 2 pairs available.
+ * One is RX Pair and the other is TX Pair.
+ */
+#define MDI_RX_PAIR        0    /* cableStatus[0] or cableLen[0] */
+#define MDI_TX_PAIR        1    /* cableStatus[1] or cableLen[1] */
+
+/* definition for Phy Type */
+#define PHY_100M        0 /* 10/100M phy, E3082 or E3083 */
+#define PHY_1000M        1 /* Gigabit phy, the rest phys */
+#define PHY_10000M        2 /* 10 Gigabit phy, unused */
+#define PHY_1000M_B        3 /* Gigabit phy which needs work-around */
+#define PHY_1000M_MP    4 /* Gigabit phy with multiple page mode */
+
+
+/* Definition for Advance Virtual Cable Test */
+
+/*
+ * typedef: enum GT_ADV_VCT_TRANS_CHAN_SEL
+ *
+ * Description: Enumeration of Advanced VCT Transmitter channel select
+ *
+ * Enumerations:
+ *        GT_ADV_VCT_NO_CROSSPAIR - Transmitter channel select is 000
+ *        GT_ADV_VCT_CROSSPAIR    - Transmitter channelselect is 100/101/110/111
+ */
+typedef enum
+{
+    /* Advanced VCT Mode */
+    GT_ADV_VCT_TCS_NO_CROSSPAIR        = 0,
+    GT_ADV_VCT_TCS_CROSSPAIR_0            = 0x4,
+    GT_ADV_VCT_TCS_CROSSPAIR_1            = 0x5,
+    GT_ADV_VCT_TCS_CROSSPAIR_2            = 0x6,
+    GT_ADV_VCT_TCS_CROSSPAIR_3            = 0x7
+} GT_ADV_VCT_TRANS_CHAN_SEL;
+
+
+typedef enum
+{
+    /* Advanced VCT Mode */
+    GT_ADV_VCT_SAVG_2        = 0,
+    GT_ADV_VCT_SAVG_4        = 1,
+    GT_ADV_VCT_SAVG_8        = 2,
+    GT_ADV_VCT_SAVG_16        = 3,
+    GT_ADV_VCT_SAVG_32        = 4,
+    GT_ADV_VCT_SAVG_64        = 5,
+    GT_ADV_VCT_SAVG_128    = 6,
+    GT_ADV_VCT_SAVG_256    = 7
+} GT_ADV_VCT_SAMPLE_AVG;
+
+typedef enum
+{
+    /* Advanced VCT Mode */
+    GT_ADV_VCT_MAX_PEAK        =0x00,
+    GT_ADV_VCT_FIRST_PEAK        =0x01,
+} GT_ADV_VCT_MOD;
+
+
+typedef unsigned int GT_ADV_VCT_PEAKDET_HYST;
+
+/*
+ * typedef: enum GT_ADV_VCT_MODE
+ *
+ * Description: Enumeration of Advanced VCT Mode and Transmitter channel select
+ *
+ * Enumerations:
+ *      GT_ADV_VCT_FIRST_PEAK   - first peak above a certain threshold is reported.
+ *      GT_ADV_VCT_MAX_PEAK     - maximum peak above a certain threshold is reported.
+ *        GT_ADV_VCT_OFFSE         - offset
+ *        GT_ADV_VCT_SAMPLE_POINT - sample point
+ *
+ *        GT_ADV_VCT_NO_CROSSPAIR - Transmitter channel select is 000
+ *        GT_ADV_VCT_CROSSPAIR    - Transmitter channelselect is 100/101/110/111
+ *   Example: mode = GT_ADV_VCT_FIRST_PEAK | GT_ADV_VCT_CROSSPAIR.
+ */
+typedef struct
+{
+    GT_ADV_VCT_MOD                    mode;
+    GT_ADV_VCT_TRANS_CHAN_SEL      transChanSel;
+    GT_ADV_VCT_SAMPLE_AVG            sampleAvg;
+    GT_ADV_VCT_PEAKDET_HYST        peakDetHyst;
+} GT_ADV_VCT_MODE;
+
+
+/*
+ * typedef: enum GT_ADV_VCT_STATUS
+ *
+ * Description: Enumeration of Advanced VCT status
+ *
+ * Enumerations:
+ *      GT_ADV_VCT_FAIL     - advanced virtual cable test failed.
+ *                             cable lengh cannot be determined.
+ *      GT_ADV_VCT_NORMAL   - normal cable.
+ *                             cable lengh may not be determined.
+ *      GT_ADV_VCT_IMP_GREATER_THAN_115 - impedance mismatch > 115 ohms
+ *                             cable lengh is valid.
+ *      GT_ADV_VCT_IMP_LESS_THAN_85 - impedance mismatch < 85 ohms
+ *                             cable lengh is valid.
+ *      GT_ADV_VCT_OPEN      - cable open
+ *                             cable lengh is valid.
+ *      GT_ADV_VCT_SHORT      - cable shorted
+ *                             cable lengh is valid.
+ *      GT_ADV_VCT_CROSS_PAIR_SHORT - cross pair short.
+ *                             cable lengh for each channel is valid.
+ */
+typedef enum
+{
+    GT_ADV_VCT_FAIL,
+    GT_ADV_VCT_NORMAL,
+    GT_ADV_VCT_IMP_GREATER_THAN_115,
+    GT_ADV_VCT_IMP_LESS_THAN_85,
+    GT_ADV_VCT_OPEN,
+    GT_ADV_VCT_SHORT,
+    GT_ADV_VCT_CROSS_PAIR_SHORT
+} GT_ADV_VCT_STATUS;
+
+
+/*
+ * typedef: struct GT_CROSS_PAIR_LIST
+ *
+ * Description: strucuture for cross pair short channels.
+ *
+ * Fields:
+ *      channel - cross pair short channel list
+ *                channel[i] is GT_TRUE if the channel[i] is cross pair short
+ *                with the current channel under test.
+ *      dist2fault - estimated distance to the shorted location.
+ *                   valid only if related channel (above) is GT_TRUE.
+ */
+typedef struct _GT_CROSS_SHORT_LIST
+{
+    GT_BOOL    channel[GT_MDI_PAIR_NUM];
+    GT_16     dist2fault[GT_MDI_PAIR_NUM];
+} GT_CROSS_SHORT_LIST;
+
+
+/*
+ * typedef: struct GT_ADV_CABLE_STATUS
+ *
+ * Description: strucuture for advanced cable status.
+ *
+ * Fields:
+ *      cableStatus - VCT cable status for each channel.
+ *      crossShort  - cross pair short list for each channel.
+ *                    Valid only if relative cableStatus is GT_ADV_VCT_CROSS_PAIR_SHORT.
+ *      dist2fault  - estimated distance to fault for each channel.
+ *                    Valid if relative cableStatus is one of the followings:
+ *                      GT_ADV_VCT_NORMAL
+ *                      GT_ADV_VCT_IMP_GREATER_THAN_115
+ *                      GT_ADV_VCT_IMP_LESS_THAN_85,
+ *                      GT_ADV_VCT_OPEN, or
+ *                        GT_ADV_VCT_SHORT
+  */
+typedef struct
+{
+    GT_ADV_VCT_STATUS   cableStatus[GT_MDI_PAIR_NUM];
+    union {
+        GT_CROSS_SHORT_LIST crossShort;
+        GT_16     dist2fault;
+    }u[GT_MDI_PAIR_NUM];
+} GT_ADV_CABLE_STATUS;
+
+
+/*
+ * Definition:
+ *        GT_LED_LINK_ACT_SPEED     - off = no link, on = link, blink = activity, blink speed = link speed
+ *        GT_LED_LINK_ACT             - off = no link, on = link, blink = activity
+ *        GT_LED_LINK                 - off = no link, on = link
+ *        GT_LED_10_LINK_ACT        - off = no link, on = 10, blink = activity
+ *        GT_LED_10_LINK            - off = no link, on = 10
+ *        GT_LED_100_LINK_ACT        - off = no link, on = 100 link, blink = activity
+ *        GT_LED_100_LINK            - off = no link, on = 100 link
+ *        GT_LED_1000_LINK_ACT    - off = no link, on = 1000 link, blink = activity
+ *        GT_LED_1000_LINK        - off = no link, on = 1000 link
+ *        GT_LED_10_100_LINK_ACT    - off = no link, on = 10 or 100 link, blink = activity
+ *        GT_LED_10_100_LINK        - off = no link, on = 10 or 100 link
+ *        GT_LED_10_1000_LINK_ACT    - off = no link, on = 10 or 1000 link, blink = activity
+ *        GT_LED_10_1000_LINK        - off = no link, on = 10 or 1000 link
+ *        GT_LED_100_1000_LINK_ACT- off = no link, on = 100 or 1000 link, blink = activity
+ *        GT_LED_100_1000_LINK    - off = no link, on = 100 or 1000 link
+ *        GT_LED_SPECIAL            - special leds
+ *        GT_LED_DUPLEX_COL        - off = half duplx, on = full duplex, blink = collision
+ *        GT_LED_ACTIVITY            - off = no link, blink on = activity
+ *        GT_LED_PTP_ACT            - blink on = PTP activity
+ *        GT_LED_FORCE_BLINK        - force blink
+ *        GT_LED_FORCE_OFF        - force off
+ *        GT_LED_FORCE_ON            - force on
+*/
+#define GT_LED_LINK_ACT_SPEED        1
+#define GT_LED_LINK_ACT            2
+#define GT_LED_LINK                3
+#define GT_LED_10_LINK_ACT            4
+#define GT_LED_10_LINK                5
+#define GT_LED_100_LINK_ACT        6
+#define GT_LED_100_LINK            7
+#define GT_LED_1000_LINK_ACT        8
+#define GT_LED_1000_LINK            9
+#define GT_LED_10_100_LINK_ACT        10
+#define GT_LED_10_100_LINK            11
+#define GT_LED_10_1000_LINK_ACT    12
+#define GT_LED_10_1000_LINK        13
+#define GT_LED_100_1000_LINK_ACT    14
+#define GT_LED_100_1000_LINK        15
+#define GT_LED_SPECIAL                16
+#define GT_LED_DUPLEX_COL            17
+#define GT_LED_ACTIVITY            18
+#define GT_LED_PTP_ACT                19
+#define GT_LED_FORCE_BLINK            20
+#define GT_LED_FORCE_OFF            21
+#define GT_LED_FORCE_ON            22
+#define GT_LED_RESERVE                23
+
+
+/*
+ * typedef: enum GT_LED_CFG
+ *
+ * Description: Enumeration for LED configuration type
+ *
+ * Enumerations:
+ *        GT_LED_CFG_LED0        - read/write led0 value (GT_LED_xxx definition)
+ *        GT_LED_CFG_LED1        - read/write led1 value
+ *        GT_LED_CFG_LED2        - read/write led2 value
+ *        GT_LED_CFG_LED3        - read/write led3 value
+ *        GT_LED_CFG_PULSE_STRETCH    - read/write pulse stretch (0 ~ 4)
+ *        GT_LED_CFG_BLINK_RATE        - read/write blink rate    (0 ~ 5)
+ *        GT_LED_CFG_SPECIAL_CONTROL    - read/write special control (port vector)
+ */
+typedef enum
+{
+    GT_LED_CFG_LED0,
+    GT_LED_CFG_LED1,
+    GT_LED_CFG_LED2,
+    GT_LED_CFG_LED3,
+    GT_LED_CFG_PULSE_STRETCH,
+    GT_LED_CFG_BLINK_RATE,
+    GT_LED_CFG_SPECIAL_CONTROL
+} GT_LED_CFG;
+
+
+/*
+ * typedef: enum GT_AVB_RECOVERED_CLOCK
+ *
+ * Description: Enumeration for recovered clock type
+ *
+ * Enumerations:
+ *        GT_PRIMARY_RECOVERED_CLOCK         - primary recovered clock
+ *        GT_SECONDARY_RECOVERED_CLOCK     - secondary recovered clock
+ */
+typedef enum
+{
+    GT_PRIMARY_RECOVERED_CLOCK,
+    GT_SECONDARY_RECOVERED_CLOCK
+} GT_AVB_RECOVERED_CLOCK;
+
+
+/* Define QAV interrupt bits */
+
+#define GT_QAV_INT_STATUS_ENQ_LMT_BIT            0x8000    /* EnQ Limit Interrupt Enable */
+#define GT_QAV_INT_STATUS_ISO_DEL_BIT            0x0400    /* Iso Delay Interrupt Enable */
+#define GT_QAV_INT_STATUS_ISO_DIS_BIT            0x0200  /* Iso Discard Interrupt Enable */
+#define GT_QAV_INT_STATUS_ISO_LIMIT_EX_BIT        0x0100  /* Iso Packet Memory Exceeded Interrupt Enable */
+
+#define GT_QAV_INT_ENABLE_ENQ_LMT_BIT            0x80  /* EnQ Limit Interrupt Enable */
+#define GT_QAV_INT_ENABLE_ISO_DEL_BIT            0x04  /* Iso Delay Interrupt Enable */
+#define GT_QAV_INT_ENABLE_ISO_DIS_BIT            0x02  /* Iso Discard Interrupt Enable */
+#define GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT        0x01  /* Iso Packet Memory Exceeded Interrupt Enable */
+
+
+/*
+ * Typedef: enum GT_EEPROM_OPERATION
+ *
+ * Description: Defines the EEPROM Operation type
+ *
+ * Fields:
+ *      PTP_WRITE_DATA             - Write data to the EEPROM register
+ *      PTP_READ_DATA            - Read data from EEPROM register
+ *      PTP_RESTART                - Restart EEPROM oprition
+ */
+typedef enum
+{
+    GT_EEPROM_NO_OP                     = 0x0,
+    GT_EEPROM_WRITE_DATA             = 0x3,
+    GT_EEPROM_READ_DATA              = 0x4,
+    GT_EEPROM_RESTART                = 0x6,
+    GT_EEPROM_HALT                    = 0x7,
+} GT_EEPROM_OPERATION;
+
+
+/*
+ *  typedef: struct GT_EEPROM_OP_DATA
+ *
+ *  Description: data required by EEPROM Operation
+ *
+ *  Fields:
+ *      eepromPort        - physical port of the device
+ *      eepromAddr     - register address
+ *      eepromData     - data for ptp register.
+ */
+typedef struct
+{
+    GT_U32    eepromPort;
+    GT_U32    eepromBlock;
+    GT_U32    eepromAddr;
+    GT_U32    eepromData;
+} GT_EEPROM_OP_DATA;
+
+#define GT_EEPROM_OP_ST_RUNNING_MASK        0x800
+#define GT_EEPROM_OP_ST_WRITE_EN_MASK        0x400
+
+#define GT_SCRAT_MISC_REG_SCRAT_0    0x00 /* Scratch Byte 0 */
+#define GT_SCRAT_MISC_REG_SCRAT_1    0x01 /* Scratch Byte 1 */
+#define GT_SCRAT_MISC_REG_GPIO_CFG    0x60 /* GPIO Configuration */
+                                         /* 0x61 = Reserved for future use */
+#define GT_SCRAT_MISC_REG_GPIO_DIR    0x62 /* GPIO Direction */
+#define GT_SCRAT_MISC_REG_GPIO_DAT    0x63 /* GPIO Data */
+#define GT_SCRAT_MISC_REG_CFG_DAT0    0x70 /* CONFIG Data 0 */
+#define GT_SCRAT_MISC_REG_CFG_DAT1    0x71 /* CONFIG Data 1 */
+#define GT_SCRAT_MISC_REG_CFG_DAT2    0x72 /* CONFIG Data 2 */
+#define GT_SCRAT_MISC_REG_CFG_DAT3    0x73 /* CONFIG Data 3 */
+#define GT_SCRAT_MISC_REG_SYNCE        0x7C /* SyncE & TAICLK125’s Drive */
+#define GT_SCRAT_MISC_REG_P5_CLK    0x7D /* P5’s & CLK125’s Clock Drive */
+#define GT_SCRAT_MISC_REG_P6_CLK    0x7E /* P6’s Clock Drive */
+#define GT_SCRAT_MISC_REG_EEPROM    0x7F /* EEPROM Pad drive */
+#define GT_SCRAT_MISC_REG_MAX        0x80 /* Maximun register pointer */
+
+#define GT_GPIO_BIT_0    0x1
+#define GT_GPIO_BIT_1    0x2
+#define GT_GPIO_BIT_2    0x4
+#define GT_GPIO_BIT_3    0x8
+#define GT_GPIO_BIT_4    0x10
+#define GT_GPIO_BIT_5    0x20
+#define GT_GPIO_BIT_6    0x40
+
+typedef struct
+{
+    GT_U8         user : 3;
+    GT_U8         addr : 5;
+}GT_CONFIG_DATA_0;
+
+typedef struct
+{
+    GT_U8         led  : 2;
+    GT_U8         fourcol : 1;
+    GT_U8         normCx : 1;
+    GT_U8         jumbo : 1;
+    GT_U8         ee_we : 1;
+    GT_U8         fd_flow : 1;
+    GT_U8         hd_flow : 1;
+}GT_CONFIG_DATA_1;
+
+typedef struct
+{
+    GT_U8         p5_mod : 3;
+    GT_U8         bit4     : 1;
+    GT_U8         p6_mod : 3;
+}GT_CONFIG_DATA_2;
+
+typedef struct
+{
+    GT_U8         rmu_mod : 2;
+}GT_CONFIG_DATA_3;
+
+typedef struct
+{
+    union {
+        GT_U8                Byte;
+        GT_CONFIG_DATA_0    Data;
+    } cfgData0;
+    union {
+        GT_U8                Byte;
+        GT_CONFIG_DATA_0    Data;
+    } cfgData1;
+    union {
+        GT_U8                Byte;
+        GT_CONFIG_DATA_0    Data;
+    } cfgData2;
+    union {
+        GT_U8                Byte;
+        GT_CONFIG_DATA_0    Data;
+    } cfgData3;
+}GT_CONFIG_DATA;
+
+
+/* definition for Trunking */
+#define IS_TRUNK_ID_VALID(_dev, _id)    (((_id) < 16) ? 1 : 0)
+
+
+/* definition for device scan mode */
+#define SMI_AUTO_SCAN_MODE        0    /* Scan 0 or 0x10 base address to find the QD */
+#define SMI_MANUAL_MODE            1    /* Use QD located at manually defined base addr */
+#define SMI_MULTI_ADDR_MODE        2    /* Use QD at base addr and use indirect access */
+typedef struct
+{
+    GT_U32    scanMode;    /* check definition for device scan mode */
+    GT_U32    baseAddr;    /* meaningful if scanMode is not SMI_AUTO_SCAN_MODE */
+} GT_SCAN_MODE;
+
+/* definition for Cut Through */
+typedef struct
+{
+    GT_U8    enableSelect;    /* Port Enable Select. */
+    GT_U8    enable;          /* Cut Through enable. */
+    GT_U8    cutThruQueue;    /* Cut Through Queues.. */
+} GT_CUT_THROUGH;
+
+#define GT_SKIP_INIT_SETUP    0x736b6970
+
+/*
+ * Typedef: struct GT_SYS_CONFIG
+ *
+ * Description: System configuration Parameters struct.
+ *
+ * Fields:
+ *    devNum        - Switch Device Number
+ *  cpuPortNum  - The physical port used to connect the device to CPU.
+ *                This is the port to which packets destined to CPU are
+ *                forwarded.
+ *  initPorts   - Whether to initialize the ports state.
+ *                GT_FALSE    - leave in default state.
+ *                GT_TRUE     - Initialize to Forwarding state.
+ *  skipInitSetup - skip init setup, if value is GT_SKIP_INIT_SETUP
+ *                  perform init setup, otherwise
+ *                    Initializing port state is not affected by this variable.
+ *    BSPFunctions    - Group of BSP specific functions.
+ *                SMI Read/Write and Semaphore Related functions.
+ */
+typedef struct
+{
+    GT_U8         devNum;
+    GT_U8         cpuPortNum;
+    GT_BOOL       initPorts;
+    BSP_FUNCTIONS BSPFunctions;
+    GT_SCAN_MODE  mode;
+    GT_U32        skipInitSetup;
+}GT_SYS_CONFIG;
+
+#ifdef GT_RMGMT_ACCESS
+typedef enum
+{
+ HW_ACCESS_MODE_SMI = 0,    /* Use SMI */
+ HW_ACCESS_MODE_F2R = 1,    /* Use Marvell RMGMT(F2R) function */
+} FGT_HW_ACCESS_MOD;    /* Hardware access mode */
+#endif
+
+/*
+ * Typedef: struct GT_QD_DEV
+ *
+ * Description: Includes Tapi layer switch configuration data.
+ *
+ * Fields:
+ *   deviceId       - The device type identifier.
+ *   revision       - The device revision number.
+ *   baseRegAddr    - Switch Base Register address.
+ *   numOfPorts     - Number of active ports.
+ *   maxPorts       - max ports. This field is only for driver's use.
+ *   cpuPortNum     - Logical port number whose physical port is connected to the CPU.
+ *   maxPhyNum      - max configurable Phy address.
+ *   stpMode        - current switch STP mode (0 none, 1 en, 2 dis)
+ *   accessMode        - shows how to find and access the device.
+ *   phyAddr        - SMI address used to access Switch registers(only for SMI_MULTI_ADDR_MODE).
+ *   validPortVec   - valid port list in vector format
+ *   validPhyVec    - valid phy list in vector format
+ *   validSerdesVec    - valid serdes list in vector format
+ *   devGroup        - the device group
+ *   devName        - name of the device in group 0
+ *   devName1        - name of the device in group 1
+ *   devStorage        - driver internal use (hold various temp information)
+ *   multiAddrSem   - Semaphore for Accessing SMI Device
+ *   atuRegsSem     - Semaphore for ATU access
+ *   vtuRegsSem     - Semaphore for VTU access
+ *   statsRegsSem   - Semaphore for RMON counter access
+ *   pirlRegsSem    - Semaphore for PIRL Resource access
+ *   ptpRegsSem     - Semaphore for PTP Resource access
+ *   tblRegsSem     - Semaphore for various Table Resource access,
+ *                    such as Trunk Tables and Device Table
+ *   eepromRegsSem  - Semaphore for eeprom control access
+ *   phyRegsSem     - Semaphore for PHY Device access
+ *   hwAccessRegsSem - Semaphore for Remote management access
+ *   fgtHwAccessMod - Select register access mode: Read/Write or Hardware access function
+ *   fgtReadMii     - platform specific SMI register Read function
+ *   fgtWriteMii    - platform specific SMI register Write function
+ *   fgtHwAccess    - platform specific register access function
+ *   semCreate      - function to create semapore
+ *   semDelete      - function to delete the semapore
+ *   semTake        - function to get a semapore
+ *   semGive        - function to return semaphore
+ *   appData        - application data that user may use
+ */
+struct _GT_QD_DEV
+{
+    GT_DEVICE   deviceId;
+    GT_LPORT    cpuPortNum;
+    GT_U8       revision;
+    GT_U8        devNum;
+    GT_U8        devEnabled;
+    GT_U8       baseRegAddr;
+    GT_U8       numOfPorts;
+    GT_U8        maxPorts;
+    GT_U8       maxPhyNum;
+    GT_U8        stpMode;
+    GT_U8        accessMode;
+    GT_U8        phyAddr;
+    GT_U16        reserved;
+    GT_U16        validPortVec;
+    GT_U16        validPhyVec;
+    GT_U16        validSerdesVec;
+    GT_U16        devGroup;
+    GT_U32        devName;
+    GT_U32        devName1;
+    GT_U32        devStorage;
+    GT_SEM        multiAddrSem;
+    GT_SEM        atuRegsSem;
+    GT_SEM        vtuRegsSem;
+    GT_SEM        statsRegsSem;
+    GT_SEM        pirlRegsSem;
+    GT_SEM        ptpRegsSem;
+    GT_SEM        tblRegsSem;
+    GT_SEM        eepromRegsSem;
+    GT_SEM        phyRegsSem;
+    GT_SEM        hwAccessRegsSem;
+
+    FGT_READ_MII  fgtReadMii;
+    FGT_WRITE_MII fgtWriteMii;
+#ifdef GT_RMGMT_ACCESS
+    FGT_HW_ACCESS_MOD fgtHwAccessMod;    /* Hardware access mode */
+    FGT_HW_ACCESS fgtHwAccess;    /* Hardware access  */
+#endif
+
+    FGT_SEM_CREATE  semCreate;     /* create semaphore */
+    FGT_SEM_DELETE  semDelete;     /* delete the semaphore */
+    FGT_SEM_TAKE    semTake;    /* try to get a semaphore */
+    FGT_SEM_GIVE    semGive;    /* return semaphore */
+    void*           appData;
+
+    /* Modified to add port mapping functions into device ssystem configuration. */
+#ifdef GT_PORT_MAP_IN_DEV
+    GT_U8           (*lport2port)   (GT_U16 portVec, GT_LPORT port);
+    GT_LPORT        (*port2lport)   (GT_U16 portVec, GT_U8 hwPort);
+    GT_U32          (*lportvec2portvec) (GT_U16 portVec, GT_U32 lVec);
+    GT_U32          (*portvec2lportvec) (GT_U16 portVec, GT_U32 pVec);
+#endif
+
+    GT_BOOL       use_mad;     /* use MAD driver to process Phy */
+#ifdef GT_USE_MAD
+    MAD_DEV    mad_dev;
+#endif
+};
+
+/*
+ * typedef: struct PIRL_PARA_TBL_T
+ *
+ * Description: PIRL parameter table structure
+ *
+ * Fields:
+ *      BI     - bucket increment
+ *      BRF    - bucket rate factor
+ *      CBS    - Committed Burst Size
+ *      EBS    - Excess Burst Size
+ */
+struct PIRL_PARA_TBL_T {
+	GT_U32 BI;
+	GT_U32 BRF;
+	GT_U32 CBS;
+	GT_U32 EBS;
+};
+
+/*special rate which can not be calculated by formula*/
+enum PIRL_SPECIAL_RATE_ENUM_T {
+	PIRL_RATE_NO_LIMIT = 0,
+	PIRL_RATE_64K,
+	PIRL_RATE_128K,
+	PIRL_RATE_192K,
+	PIRL_RATE_256K,
+	PIRL_RATE_320K,
+	PIRL_RATE_384K,
+	PIRL_RATE_448K,
+	PIRL_RATE_512K,
+	PIRL_RATE_576K,
+	PIRL_RATE_640K,
+	PIRL_RATE_704K,
+	PIRL_RATE_768K,
+	PIRL_RATE_832K,
+	PIRL_RATE_896K,
+	PIRL_RATE_960K,
+	PIRL_RATE_1M,
+	PIRL_RATE_2M,
+	PIRL_RATE_3M,
+	PIRL_RATE_4M,
+	PIRL_RATE_5M,
+	PIRL_RATE_6M,
+	PIRL_RATE_7M,
+	PIRL_RATE_8M,
+	PIRL_RATE_9M,
+	PIRL_RATE_10M,
+	PIRL_RATE_11M,
+	PIRL_RATE_12M,
+	PIRL_RATE_13M,
+	PIRL_RATE_14M,
+	PIRL_RATE_15M,
+	PIRL_RATE_16M,
+	PIRL_RATE_17M,
+	PIRL_RATE_18M,
+	PIRL_RATE_19M,
+	PIRL_RATE_20M
+};
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __msApi_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiPrototype.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiPrototype.h
new file mode 100644
index 000000000000..88a1ee3c26d0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiPrototype.h
@@ -0,0 +1,23371 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* msApiPrototype.h
+*
+* DESCRIPTION:
+*       API Prototypes for QuarterDeck Device
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApiPrototype_h
+#define __msApiPrototype_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* gtBrgFdb.c */
+
+/*******************************************************************************
+* gfdbSetAtuSize
+*
+* DESCRIPTION:
+*       Sets the Mac address table size.
+*
+* INPUTS:
+*       size    - Mac address table size.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetAtuSize
+(
+    IN GT_QD_DEV *dev,
+    IN ATU_SIZE size
+);
+
+
+/*******************************************************************************
+* gfdbGetAgingTimeRange
+*
+* DESCRIPTION:
+*       Gets the maximal and minimum age times that the hardware can support.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       maxTimeout - max aging time in secounds.
+*       minTimeout - min aging time in secounds.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAgingTimeRange
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_U32 *maxTimeout,
+    OUT GT_U32 *minTimeout
+);
+
+/*******************************************************************************
+* gfdbGetAgingTimeout
+*
+* DESCRIPTION:
+*       Gets the timeout period in seconds for aging out dynamically learned
+*       forwarding information. The returned value may not be the same as the value
+*        programmed with <gfdbSetAgingTimeout>. Please refer to the description of
+*        <gfdbSetAgingTimeout>.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       timeout - aging time in seconds.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAgingTimeout
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32       *timeout
+);
+
+/*******************************************************************************
+* gfdbSetAgingTimeout
+*
+* DESCRIPTION:
+*       Sets the timeout period in seconds for aging out dynamically learned
+*       forwarding information. The standard recommends 300 sec.
+*        Supported aging timeout values are multiple of time-base, where time-base
+*        is either 15 or 16 seconds, depending on the Switch device. For example,
+*        88E6063 uses time-base 16, and so supported aging timeouts are 0,16,32,
+*        48,..., and 4080. If unsupported timeout value (bigger than 16) is used,
+*        the value will be rounded to the nearest supported value smaller than the
+*        given timeout. If the given timeout is less than 16, minimum timeout value
+*        16 will be used instead. E.g.) 35 becomes 32 and 5 becomes 16.
+*        <gfdbGetAgingTimeRange> function can be used to find the time-base.
+*
+* INPUTS:
+*       timeout - aging time in seconds.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetAgingTimeout
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U32 timeout
+);
+
+
+
+/*******************************************************************************
+* gfdbGetAtuDynamicCount
+*
+* DESCRIPTION:
+*       Gets the current number of dynamic unicast entries in this
+*       Filtering Database.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numDynEntries - number of dynamic entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - vlan does not exist.
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuDynamicCount
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_U32 *numDynEntries
+);
+
+
+
+/*******************************************************************************
+* gfdbGetAtuEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic MAC address entry from the ATU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       atuEntry - match Address translate unit entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*
+* COMMENTS:
+*       Search starts from Mac[00:00:00:00:00:00]
+*
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuEntryFirst
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_ATU_ENTRY    *atuEntry
+);
+
+
+
+/*******************************************************************************
+* gfdbGetAtuEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic MAC address from the specified Mac Addr.
+*
+* INPUTS:
+*       atuEntry - the Mac Address to start the search.
+*
+* OUTPUTS:
+*       atuEntry - match Address translate unit entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*       Search starts from atu.macAddr[xx:xx:xx:xx:xx:xx] specified by the
+*       user.
+*
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuEntryNext
+(
+    IN GT_QD_DEV *dev,
+    INOUT GT_ATU_ENTRY  *atuEntry
+);
+
+
+
+/*******************************************************************************
+* gfdbFindAtuMacEntry
+*
+* DESCRIPTION:
+*       Find FDB entry for specific MAC address from the ATU.
+*
+* INPUTS:
+*       atuEntry - the Mac address to search.
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       atuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbFindAtuMacEntry
+(
+    IN GT_QD_DEV *dev,
+    INOUT GT_ATU_ENTRY  *atuEntry,
+    OUT GT_BOOL         *found
+);
+
+
+
+/*******************************************************************************
+* gfdbFlush
+*
+* DESCRIPTION:
+*       This routine flush all or unblocked addresses from the MAC Address
+*       Table.
+*
+* INPUTS:
+*       flushCmd - the flush operation type.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbFlush
+(
+    IN GT_QD_DEV *dev,
+    IN GT_FLUSH_CMD flushCmd
+);
+
+/*******************************************************************************
+* gfdbFlushInDB
+*
+* DESCRIPTION:
+*       This routine flush all or unblocked addresses from the particular
+*       ATU Database (DBNum). If multiple address databases are being used, this
+*        API can be used to flush entries in a particular DBNum database.
+*
+* INPUTS:
+*       flushCmd - the flush operation type.
+*        DBNum     - ATU MAC Address Database Number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbFlushInDB
+(
+    IN GT_QD_DEV *dev,
+    IN GT_FLUSH_CMD flushCmd,
+    IN GT_U32 DBNum
+);
+
+/*******************************************************************************
+* gfdbAddMacEntry
+*
+* DESCRIPTION:
+*       Creates the new entry in MAC address table.
+*
+* INPUTS:
+*       macEntry    - mac address entry to insert to the ATU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK             - on success
+*       GT_FAIL           - on error
+*       GT_NO_RESOURCE    - failed to allocate a t2c struct
+*       GT_OUT_OF_CPU_MEM - oaMalloc failed
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbAddMacEntry
+(
+    IN GT_QD_DEV *dev,
+    IN GT_ATU_ENTRY *macEntry
+);
+
+
+
+/*******************************************************************************
+* gfdbDelMacEntry
+*
+* DESCRIPTION:
+*       Deletes MAC address entry.
+*
+* INPUTS:
+*       macAddress - mac address.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*       For SVL mode vlan Id is ignored.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbDelMacEntry
+(
+    IN GT_QD_DEV *dev,
+    IN GT_ETHERADDR  *macAddress
+);
+
+/*******************************************************************************
+* gfdbDelAtuEntry
+*
+* DESCRIPTION:
+*       Deletes ATU entry.
+*
+* INPUTS:
+*       atuEntry - the ATU entry to be deleted.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbDelAtuEntry
+(
+    IN GT_QD_DEV *dev,
+    IN GT_ATU_ENTRY  *atuEntry
+);
+
+/*******************************************************************************
+* gfdbLearnEnable
+*
+* DESCRIPTION:
+*       Enable/disable automatic learning of new source MAC addresses on port
+*       ingress.
+*
+* INPUTS:
+*       en - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbLearnEnable
+(
+    IN GT_QD_DEV *dev,
+    IN GT_BOOL  en
+);
+
+
+/*******************************************************************************
+* gfdbGetLearnEnable
+*
+* DESCRIPTION:
+*       Get automatic learning status of new source MAC addresses on port ingress.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       en - GT_TRUE if enabled  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetLearnEnable
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_BOOL  *en
+);
+
+/*******************************************************************************
+* gstpSetMode
+*
+* DESCRIPTION:
+*       This routine Enable the Spanning tree.
+*
+* INPUTS:
+*       en - GT_TRUE for enable, GT_FALSE for disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       when enabled, this function sets all port to blocking state, and inserts
+*       the BPDU MAC into the ATU to be captured to CPU, on disable all port are
+*       being modified to be in forwarding state.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpSetMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_BOOL  en
+);
+
+
+
+/*******************************************************************************
+* gstpSetPortState
+*
+* DESCRIPTION:
+*       This routine set the port state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       state - the port state to set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpSetPortState
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT           port,
+    IN GT_PORT_STP_STATE  state
+);
+
+
+
+/*******************************************************************************
+* gstpGetPortState
+*
+* DESCRIPTION:
+*       This routine returns the port state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       state - the current port state.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpGetPortState
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT           port,
+    OUT GT_PORT_STP_STATE  *state
+);
+
+/*******************************************************************************
+* gprtSetEgressMode
+*
+* DESCRIPTION:
+*       This routine set the egress mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the egress mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT        port,
+    IN GT_EGRESS_MODE  mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetEgressMode
+*
+* DESCRIPTION:
+*       This routine get the egress mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the egress mode.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    OUT GT_EGRESS_MODE  *mode
+);
+
+
+
+/*******************************************************************************
+* gprtSetVlanTunnel
+*
+* DESCRIPTION:
+*       This routine sets the vlan tunnel mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the vlan tunnel mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetVlanTunnel
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetVlanTunnel
+*
+* DESCRIPTION:
+*       This routine get the vlan tunnel mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the vlan tunnel mode..
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetVlanTunnel
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+/*******************************************************************************
+* gprtSetIGMPSnoop
+*
+* DESCRIPTION:
+*         This routine set the IGMP Snoop. When set to one and this port receives
+*        IGMP frame, the frame is switched to the CPU port, overriding all other
+*        switching decisions, with exception for CPU's Trailer.
+*        CPU port is determined by the Ingress Mode bits. A port is considered
+*        the CPU port if its Ingress Mode are either GT_TRAILER_INGRESS or
+*        GT_CPUPORT_INGRESS.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for IGMP Snoop or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIGMPSnoop
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* gprtGetIGMPSnoop
+*
+* DESCRIPTION:
+*        This routine get the IGMP Snoop mode.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: IGMP Snoop enabled
+*              GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIGMPSnoop
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+/* the following two APIs are added to support clippership */
+
+/*******************************************************************************
+* gprtSetHeaderMode
+*
+* DESCRIPTION:
+*        This routine set ingress and egress header mode of a switch port.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for header mode  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetHeaderMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* gprtGetHeaderMode
+*
+* DESCRIPTION:
+*        This routine gets ingress and egress header mode of a switch port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: header mode enabled
+*              GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHeaderMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+/*******************************************************************************
+* gprtSetProtectedMode
+*
+* DESCRIPTION:
+*        This routine set protected mode of a switch port.
+*        When this mode is set to GT_TRUE, frames are allowed to egress port
+*        defined by the 802.1Q VLAN membership for the frame's VID 'AND'
+*        by the port's VLANTable if 802.1Q is enabled on the port. Both must
+*        allow the frame to Egress.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for protected mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetProtectedMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+);
+
+/*******************************************************************************
+* gprtGetProtectedMode
+*
+* DESCRIPTION:
+*        This routine gets protected mode of a switch port.
+*        When this mode is set to GT_TRUE, frames are allowed to egress port
+*        defined by the 802.1Q VLAN membership for the frame's VID 'AND'
+*        by the port's VLANTable if 802.1Q is enabled on the port. Both must
+*        allow the frame to Egress.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: header mode enabled
+*              GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetProtectedMode
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetForwardUnknown
+*
+* DESCRIPTION:
+*        This routine set Forward Unknown mode of a switch port.
+*        When this mode is set to GT_TRUE, normal switch operation occurs.
+*        When this mode is set to GT_FALSE, unicast frame with unknown DA addresses
+*        will not egress out this port.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for protected mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetForwardUnknown
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT    port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetForwardUnknown
+*
+* DESCRIPTION:
+*        This routine gets Forward Unknown mode of a switch port.
+*        When this mode is set to GT_TRUE, normal switch operation occurs.
+*        When this mode is set to GT_FALSE, unicast frame with unknown DA addresses
+*        will not egress out this port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: header mode enabled
+*                GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetForwardUnknown
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtGetSwitchReg
+*
+* DESCRIPTION:
+*       This routine reads Switch Port Registers.
+*
+* INPUTS:
+*       port    - logical port number
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSwitchReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+
+/*******************************************************************************
+* gprtSetSwitchReg
+*
+* DESCRIPTION:
+*       This routine writes Switch Port Registers.
+*
+* INPUTS:
+*       port    - logical port number
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetSwitchReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    IN  GT_U16         data
+);
+
+
+/*******************************************************************************
+* gprtGetGlobalReg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobalReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+
+/*******************************************************************************
+* gprtSetGlobalReg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobalReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    IN  GT_U16         data
+);
+
+
+
+/*******************************************************************************
+* gvlnSetPortVlanPorts
+*
+* DESCRIPTION:
+*       This routine sets the port VLAN group port membership list.
+*
+* INPUTS:
+*       port        - logical port number to set.
+*       memPorts    - array of logical ports.
+*       memPortsLen - number of members in memPorts array
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanPorts
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_LPORT memPorts[],
+    IN GT_U8    memPortsLen
+);
+
+
+
+/*******************************************************************************
+* gvlnGetPortVlanPorts
+*
+* DESCRIPTION:
+*       This routine gets the port VLAN group port membership list.
+*
+* INPUTS:
+*       port        - logical port number to set.
+*
+* OUTPUTS:
+*       memPorts    - array of logical ports.
+*       memPortsLen - number of members in memPorts array
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanPorts
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_LPORT memPorts[],
+    OUT GT_U8    *memPortsLen
+);
+
+
+
+
+/*******************************************************************************
+* gvlnSetPortUserPriLsb
+*
+* DESCRIPTION:
+*       This routine Set the user priority (VPT) LSB bit, to be added to the
+*       user priority on the egress.
+*
+* INPUTS:
+*       port       - logical port number to set.
+*       userPriLsb - GT_TRUE for 1, GT_FALSE for 0.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortUserPriLsb
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  userPriLsb
+);
+
+
+
+/*******************************************************************************
+* gvlnGetPortUserPriLsb
+*
+* DESCRIPTION:
+*       This routine gets the user priority (VPT) LSB bit.
+*
+* INPUTS:
+*       port       - logical port number to set.
+*
+* OUTPUTS:
+*       userPriLsb - GT_TRUE for 1, GT_FALSE for 0.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortUserPriLsb
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *userPriLsb
+);
+
+
+/*******************************************************************************
+* gvlnSetPortVid
+*
+* DESCRIPTION:
+*       This routine Set the port default vlan id.
+*
+* INPUTS:
+*       port - logical port number to set.
+*       vid  - the port vlan id.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVid
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_U16   vid
+);
+
+
+/*******************************************************************************
+* gvlnGetPortVid
+*
+* DESCRIPTION:
+*       This routine Get the port default vlan id.
+*
+* INPUTS:
+*       port - logical port number to set.
+*
+* OUTPUTS:
+*       vid  - the port vlan id.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVid
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_U16   *vid
+);
+
+/*******************************************************************************
+* gvlnSetPortVlanDBNum
+*
+* DESCRIPTION:
+*       This routine sets the port VLAN database number (DBNum).
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       DBNum     - database number for this port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:IN GT_INGRESS_MODE mode
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanDBNum
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_U32   DBNum
+);
+
+
+/*******************************************************************************
+* gvlnGetPortVlanDBNum
+*
+* DESCRIPTION:IN GT_INGRESS_MODE mode
+*       This routine gets the port VLAN database number (DBNum).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       DBNum     - database number for this port
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanDBNum
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_U32    *DBNum
+);
+
+/********************************************************************
+* gvlnSetPortVlanDot1qMode
+*
+* DESCRIPTION:
+*       This routine sets the port 802.1q mode (11:10)
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode     - 802.1q mode for this port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:IN GT_INGRESS_MODE mode
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanDot1qMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT     port,
+    IN GT_DOT1Q_MODE    mode
+);
+
+/*******************************************************************************
+* gvlnGetPortVlanDot1qMode
+*
+* DESCRIPTION:
+*       This routine gets the port 802.1q mode (bit 11:10).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       mode     - 802.1q mode for this port
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanDot1qMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_DOT1Q_MODE    *mode
+);
+
+
+/********************************************************************
+* gvlnSetPortVlanForceDefaultVID
+*
+* DESCRIPTION:
+*       This routine sets the port 802.1q mode (11:10)
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode    - GT_TRUE, force to use default VID
+*                 GT_FAULSE, otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanForceDefaultVID
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+);
+
+/*******************************************************************************
+* gvlnGetPortVlanForceDefaultVID
+*
+* DESCRIPTION:
+*       This routine gets the port mode for ForceDefaultVID (bit 12).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       mode     - ForceDefaultVID mode for this port
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanForceDefaultVID
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT      port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* eventSetActive
+*
+* DESCRIPTION:
+*       This routine enables/disables the receive of an hardware driven event.
+*
+* INPUTS:
+*       eventType - the event type. any combination of the folowing:
+*           GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL,
+*           GT_ATU_DONE, GT_PHY_INTERRUPT, GT_EE_INTERRUPT, and GT_DEVICE_INT
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Each switch device has its own set of event Types. Please refer to the
+*        device datasheet for the list of event types that the device supports.
+*
+*******************************************************************************/
+GT_STATUS eventSetActive
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_U32         eventType
+);
+
+/*******************************************************************************
+* eventgetActive
+*
+* DESCRIPTION:
+*       This routine gets the enable/disable status of the receive of an hardware driven event.
+*
+* OUTPUTS:
+*       eventType - the event type. any combination of the folowing:
+*           GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL(or GT_ATU_PROB),
+*           GT_ATU_DONE, GT_PHY_INTERRUPT, GT_EE_INTERRUPT, GT_DEVICE_INT,
+*            and GT_AVB_INTERRUPT
+*
+* INPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Each switch device has its own set of event Types. Please refer to the
+*        device datasheet for the list of event types that the device supports.
+*
+*******************************************************************************/
+GT_STATUS eventGetActive
+(
+	IN GT_QD_DEV * dev,
+	IN GT_U32    * eventType
+);
+
+/*******************************************************************************
+* eventGetIntStatus
+*
+* DESCRIPTION:
+*       This routine reads an hardware driven event status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       intCause -  It provides the source of interrupt of the following:
+*                GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL,
+*                GT_ATU_DONE, GT_PHY_INTERRUPT, and GT_EE_INTERRUPT.
+*                For Gigabit Switch, GT_ATU_FULL is replaced with GT_ATU_FULL and
+*                GT_PHY_INTERRUPT is not supported.
+*                GT_DEVICE_INT may not be available on the devices, so please refer
+*                to the datasheet for details.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS eventGetIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *intCause
+);
+
+/*******************************************************************************
+* gvtuGetIntStatus
+*
+* DESCRIPTION:
+*         Check to see if a specific type of VTU interrupt occured
+*
+* INPUTS:
+*       intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_MEMEBER_VIOLATION,
+*            GT_MISS_VIOLATION,
+*            GT_FULL_VIOLATION
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK   - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*         FULL_VIOLATION is only for Fast Ethernet Switch (not for Gigabit Switch).
+*
+*******************************************************************************/
+
+GT_STATUS gvtuGetIntStatus
+(
+    IN  GT_QD_DEV             *dev,
+    OUT GT_VTU_INT_STATUS     *vtuIntStatus
+);
+
+/*******************************************************************************
+* gvtuGetEntryCount
+*
+* DESCRIPTION:
+*       Gets the current number of entries in the VTU table
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numEntries - number of VTU entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - vlan does not exist.
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryCount
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32         *numEntries
+);
+
+/*******************************************************************************
+* gvtuGetEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic entry from the VTU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuEntry - match VTU entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*
+* COMMENTS:
+*       Search starts from vid of all one's
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryFirst
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_VTU_ENTRY    *vtuEntry
+);
+
+/*******************************************************************************
+* gvtuGetEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic VTU entry from the specified VID.
+*
+* INPUTS:
+*       vtuEntry - the VID to start the search.
+*
+* OUTPUTS:
+*       vtuEntry - match VTU  entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*       Search starts from the VID specified by the user.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryNext
+(
+    IN  GT_QD_DEV         *dev,
+    INOUT GT_VTU_ENTRY  *vtuEntry
+);
+
+/*******************************************************************************
+* gvtuFindVidEntry
+*
+* DESCRIPTION:
+*       Find VTU entry for a specific VID, it will return the entry, if found,
+*       along with its associated data
+*
+* INPUTS:
+*       vtuEntry - contains the VID to search for.
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       vtuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuFindVidEntry
+(
+    IN GT_QD_DEV         *dev,
+    INOUT GT_VTU_ENTRY  *vtuEntry,
+    OUT GT_BOOL         *found
+);
+
+/*******************************************************************************
+* gvtuFlush
+*
+* DESCRIPTION:
+*       This routine removes all entries from VTU Table.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuFlush
+(
+    IN GT_QD_DEV *dev
+);
+
+/*******************************************************************************
+* gvtuAddEntry
+*
+* DESCRIPTION:
+*       Creates the new entry in VTU table based on user input.
+*
+* INPUTS:
+*       vtuEntry    - vtu entry to insert to the VTU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK             - on success
+*       GT_FAIL           - on error
+*       GT_FULL              - vtu table is full
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuAddEntry
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_VTU_ENTRY *vtuEntry
+);
+
+/*******************************************************************************
+* gvtuDelEntry
+*
+* DESCRIPTION:
+*       Deletes VTU entry specified by user.
+*
+* INPUTS:
+*       vtuEntry - the VTU entry to be deleted
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuDelEntry
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_VTU_ENTRY *vtuEntry
+);
+
+/* gtPhyCtrl.c */
+
+/*******************************************************************************
+* gprtPhyReset
+*
+* DESCRIPTION:
+*        This routine preforms PHY reset.
+*        After reset, phy will be in Autonegotiation mode.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+* COMMENTS:
+*         data sheet register 0.15 - Reset
+*         data sheet register 0.13 - Speed
+*         data sheet register 0.12 - Autonegotiation
+*         data sheet register 0.8  - Duplex Mode
+*
+*******************************************************************************/
+
+GT_STATUS gprtPhyReset
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_LPORT     port
+);
+
+
+/*******************************************************************************
+* gprtSetPortLoopback
+*
+* DESCRIPTION:
+* Enable/Disable Internal Port Loopback.
+* For 10/100 Fast Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled, this routine disables Auto-Negotiation and
+*   forces speed to be 10Mbps.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   Disabling Loopback simply clears bit 14 of control register(0.14). Therefore,
+*   it is recommended to call gprtSetPortAutoMode for PHY configuration after
+*   Loopback test.
+* For 10/100/1000 Gigagbit Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled and Link is active, the current speed is used.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   All other cases, default MAC Interface speed is used. Please refer to the data
+*   sheet for the information of the default MAC Interface speed.
+*
+* INPUTS:
+*         port - logical port number
+*         enable - If GT_TRUE, enable loopback mode
+*                     If GT_FALSE, disable loopback mode
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.14 - Loop_back
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortLoopback
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL         enable
+);
+
+/*******************************************************************************
+* gprtGetPortLoopback
+*
+* DESCRIPTION:
+* Get Internal Port Loopback state.
+* For 10/100 Fast Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled, this routine disables Auto-Negotiation and
+*   forces speed to be 10Mbps.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   Disabling Loopback simply clears bit 14 of control register(0.14). Therefore,
+*   it is recommended to call gprtSetPortAutoMode for PHY configuration after
+*   Loopback test.
+* For 10/100/1000 Gigagbit Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled and Link is active, the current speed is used.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   All other cases, default MAC Interface speed is used. Please refer to the data
+*   sheet for the information of the default MAC Interface speed.
+*
+*
+* INPUTS:
+*       port - The logical port number, unless SERDES device is accessed
+*              The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       enable - If GT_TRUE,  loopback mode is enabled
+*       If GT_FALSE,  loopback mode is disabled
+*
+* RETURNS:
+*       GT_OK - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       data sheet register 0.14 - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    OUT GT_BOOL  *enable
+);
+
+/*******************************************************************************
+* gprtSetPortLineLoopback
+*
+* DESCRIPTION:
+*        Enable/Disable Port Line Loopback.
+*
+* INPUTS:
+*        port   - The logical port number, unless SERDES device is accessed
+*                 The physical address, if SERDES device is accessed
+*        enable - If GT_TRUE, enable loopback mode
+*                 If GT_FALSE, disable loopback mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        data sheet register FE:28.4, GE:21_2.14  - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortLineLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   enable
+);
+
+/*******************************************************************************
+* gprtGetPortLineLoopback
+*
+* DESCRIPTION:
+*       Get Port Line Loopback status.
+*
+*
+* INPUTS:
+*       port - The logical port number, unless SERDES device is accessed
+*              The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       enable - If GT_TRUE, enable loopback mode
+*                If GT_FALSE, disable loopback mode* enable - If GT_TRUE, enable loopback mode
+*                If GT_FALSE, disable loopback mode
+*
+* RETURNS:
+*      GT_OK - on success
+*      GT_FAIL - on error
+*
+* COMMENTS:
+*      data sheet register FE:28.4, GE:21_2.14  - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortLineLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    OUT GT_BOOL  *enable
+)
+;
+
+/*******************************************************************************
+* gprtSetPortSpeed
+*
+* DESCRIPTION:
+*         Sets speed for a specific logical port. This function will keep the duplex
+*        mode and loopback mode to the previous value, but disable others, such as
+*        Autonegotiation.
+*
+* INPUTS:
+*         port  - logical port number
+*         speed - port speed.
+*                PHY_SPEED_10_MBPS for 10Mbps
+*                PHY_SPEED_100_MBPS for 100Mbps
+*                PHY_SPEED_1000_MBPS for 1000Mbps
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* data sheet register 0.13 - Speed Selection (LSB)
+* data sheet register 0.6  - Speed Selection (MSB)
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortSpeed
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_LPORT     port,
+    IN GT_PHY_SPEED    speed
+);
+
+
+/*******************************************************************************
+* gprtPortAutoNegEnable
+*
+* DESCRIPTION:
+*         Enable/disable an Auto-Negotiation for duplex mode on specific
+*         logical port. When Autonegotiation is disabled, phy will be in 10Mbps Half
+*        Duplex mode. Enabling Autonegotiation will set 100BASE-TX Full Duplex,
+*        100BASE-TX Full Duplex, 100BASE-TX Full Duplex, and 100BASE-TX Full Duplex
+*        in AutoNegotiation Advertisement register.
+*
+* INPUTS:
+*         port - logical port number
+*         state - GT_TRUE for enable Auto-Negotiation for duplex mode,
+*                     GT_FALSE otherwise
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.12 - Auto-Negotiation Enable
+*         data sheet register 4.8, 4.7, 4.6, 4.5 - Auto-Negotiation Advertisement
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortAutoNegEnable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL         state
+);
+
+/*******************************************************************************
+* gprtGetPortAutoNegState
+*
+* DESCRIPTION:
+*         Read the auto negotiation state of specific logical port.
+*         This routine simply reads Auto Negotiation bit (bit 12) of Control
+*         Register.
+*
+* INPUTS:
+*         port    - The logical port number, unless SERDES device is accessed
+*                   The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         state   - GT_TRUE for enable Auto-Negotiation,
+*                   GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK   - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*         data sheet register 0.12 - Auto-Negotiation Enable
+*         data sheet register 4.8, 4.7, 4.6, 4.5 - Auto-Negotiation Advertisement
+*******************************************************************************/
+GT_STATUS gprtGetPortAutoNegState
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+);
+
+/*******************************************************************************
+* gprtPortPowerDown
+*
+* DESCRIPTION:
+*         Enable/disable (power down) on specific logical port. When this function
+*        is called with normal operation request, phy will set to Autonegotiation
+*        mode.
+*
+* INPUTS:
+*         port    - logical port number
+*         state    -  GT_TRUE: power down
+*                     GT_FALSE: normal operation
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.11 - Power Down
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortPowerDown
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        state
+);
+
+/*******************************************************************************
+* gprtGetPortPowerDown
+*
+* DESCRIPTION:
+*         Read Port state (power down/normal operation) on specific logical port.
+*
+* INPUTS:
+*         port    - The logical port number, unless SERDES device is accessed
+*                   The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         state   - GT_TRUE: power down
+*                   GT_FALSE: normal operation
+*
+* RETURNS:
+*         GT_OK   - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*         data sheet register 0.11 - Power Down
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortPowerDown
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+);
+
+/*******************************************************************************
+* gprtPortRestartAutoNeg
+*
+* DESCRIPTION:
+*         Restart AutoNegotiation. If AutoNegotiation is not enabled, it'll enable
+*        it. Loopback and Power Down will be disabled by this routine.
+*
+* INPUTS:
+*         port - logical port number
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.9 - Restart Auto-Negotiation
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortRestartAutoNeg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+
+/*******************************************************************************
+* gprtSetPortDuplexMode
+*
+* DESCRIPTION:
+*         Sets duplex mode for a specific logical port. This function will keep
+*        the speed and loopback mode to the previous value, but disable others,
+*        such as Autonegotiation.
+*
+* INPUTS:
+*         port     - logical port number
+*         dMode    - dulpex mode
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.8 - Duplex Mode
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortDuplexMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        dMode
+);
+
+
+/*******************************************************************************
+* gprtSetPortAutoMode
+*
+* DESCRIPTION:
+*         This routine sets up the port with given Auto Mode.
+*        Supported mode is as follows:
+*        - Auto for both speed and duplex.
+*        - Auto for speed only and Full duplex.
+*        - Auto for speed only and Half duplex.
+*        - Auto for duplex only and speed 1000Mbps.
+*        - Auto for duplex only and speed 100Mbps.
+*        - Auto for duplex only and speed 10Mbps.
+*        - Speed 1000Mbps and Full duplex.
+*        - Speed 1000Mbps and Half duplex.
+*        - Speed 100Mbps and Full duplex.
+*        - Speed 100Mbps and Half duplex.
+*        - Speed 10Mbps and Full duplex.
+*        - Speed 10Mbps and Half duplex.
+*
+*
+* INPUTS:
+*         port - The logical port number
+*         mode - Auto Mode to be written
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+* COMMENTS:
+*         data sheet register 4.8, 4.7, 4.6, and 4.5 Autonegotiation Advertisement
+*         data sheet register 4.6, 4.5 Autonegotiation Advertisement for 1000BX
+*         data sheet register 9.9, 9.8 Autonegotiation Advertisement for 1000BT
+*******************************************************************************/
+
+GT_STATUS gprtSetPortAutoMode
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_LPORT     port,
+    IN GT_PHY_AUTO_MODE mode
+);
+
+/*******************************************************************************
+* gprtGetPortAutoMode
+*
+* DESCRIPTION:
+*        This routine get Auto Mode of specific port.
+*        Supported mode is as follows:
+*        - Auto for both speed and duplex.
+*        - Auto for speed only and Full duplex.
+*        - Auto for speed only and Half duplex.
+*        - Auto for duplex only and speed 1000Mbps.
+*        - Auto for duplex only and speed 100Mbps.
+*        - Auto for duplex only and speed 10Mbps.
+*        - Speed 1000Mbps and Full duplex.
+*        - Speed 1000Mbps and Half duplex.
+*        - Speed 100Mbps and Full duplex.
+*        - Speed 100Mbps and Half duplex.
+*        - Speed 10Mbps and Full duplex.
+*        - Speed 10Mbps and Half duplex.
+*
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                  The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*        mode -    Auto Mode to be written
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - on device without copper
+*
+* COMMENTS:
+*         data sheet register 4.8, 4.7, 4.6, and 4.5 Autonegotiation Advertisement
+*         data sheet register 4.6, 4.5 Autonegotiation Advertisement for 1000BX
+*         data sheet register 9.9, 9.8 Autonegotiation Advertisement for 1000BT
+*******************************************************************************/
+GT_STATUS gprtGetPortAutoMode
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT GT_PHY_AUTO_MODE *mode
+);
+
+/*******************************************************************************
+* gprtSetPause
+*
+* DESCRIPTION:
+*       This routine will set the pause bit in Autonegotiation Advertisement
+*        Register. And restart the autonegotiation.
+*
+* INPUTS:
+* port - The logical port number
+* state - GT_PHY_PAUSE_MODE enum value.
+*            GT_PHY_NO_PAUSE        - disable pause
+*             GT_PHY_PAUSE        - support pause
+*            GT_PHY_ASYMMETRIC_PAUSE    - support asymmetric pause
+*            GT_PHY_BOTH_PAUSE    - support both pause and asymmetric pause
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+* data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+
+GT_STATUS gprtSetPause
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_PHY_PAUSE_MODE state
+);
+
+/*******************************************************************************
+* gprtGetPause
+*
+* DESCRIPTION:
+*       This routine will get the pause bit in Autonegotiation Advertisement
+*       Register.
+*
+* INPUTS:
+*        port -  The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+*
+* OUTPUTS:
+*        state - GT_PHY_PAUSE_MODE enum value.
+*                GT_PHY_NO_PAUSE         - disable pause
+*                GT_PHY_PAUSE            - support pause
+*                GT_PHY_ASYMMETRIC_PAUSE - support asymmetric pause
+*                GT_PHY_BOTH_PAUSE       - support both pause and asymmetric pause
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+*       data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+GT_STATUS gprtGetPause
+(
+    IN GT_QD_DEV           *dev,
+    IN GT_LPORT            port,
+    OUT  GT_PHY_PAUSE_MODE *state
+);
+
+/*******************************************************************************
+* gprtSetDTEDetect
+*
+* DESCRIPTION:
+*       This routine enables/disables DTE.
+*
+* INPUTS:
+*         port - The logical port number
+*         mode - either GT_TRUE(for enable) or GT_FALSE(for disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetect
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+);
+
+/*******************************************************************************
+* gprtGetDTEDetectStatus
+*
+* DESCRIPTION:
+*       This routine gets DTE status.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       status - GT_TRUE, if link partner needs DTE power.
+*                 GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectStatus
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+);
+
+/*******************************************************************************
+* gprtSetDTEDetectDropWait
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        given value.
+*
+* INPUTS:
+*         port - The logical port number
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetectDropWait
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_U16    waitTime
+);
+
+/*******************************************************************************
+* gprtGetDTEDetectDropWait
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        returned value.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectDropWait
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_U16    *waitTime
+);
+
+/*******************************************************************************
+* gprtSetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*       mode - GT_EDETECT_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_EDETECT_MODE   mode
+);
+
+/*******************************************************************************
+* gprtGetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       mode - GT_EDETECT_MODE type
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_EDETECT_MODE   *mode
+);
+
+/*******************************************************************************
+* gprtSet1000TMasterMode
+*
+* DESCRIPTION:
+*       This routine sets the ports 1000Base-T Master mode and restart the Auto
+*        negotiation.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSet1000TMasterMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_1000T_MASTER_SLAVE   *mode
+);
+
+/*******************************************************************************
+* gprtGet1000TMasterMode
+*
+* DESCRIPTION:
+*       This routine retrieves 1000Base-T Master Mode
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGet1000TMasterMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_1000T_MASTER_SLAVE   *mode
+);
+
+/*******************************************************************************
+* gprtGetPhyLinkStatus
+*
+* DESCRIPTION:
+*       This routine retrieves the Link status.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       linkStatus - GT_FALSE if link is not established,
+*                     GT_TRUE if link is established.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyLinkStatus
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *linkStatus
+);
+
+/*******************************************************************************
+* gprtSetPktGenEnable
+*
+* DESCRIPTION:
+*       This routine enables or disables Packet Generator.
+*       Link should be established first prior to enabling the packet generator,
+*       and generator will generate packets at the speed of the established link.
+*        When enables packet generator, the following information should be
+*       provided:
+*           Payload Type:  either Random or 5AA55AA5
+*           Packet Length: either 64 or 1514 bytes
+*           Error Packet:  either Error packet or normal packet
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*       en      - GT_TRUE to enable, GT_FALSE to disable
+*       pktInfo - packet information(GT_PG structure pointer), if en is GT_TRUE.
+*                 ignored, if en is GT_FALSE
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPktGenEnable
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   en,
+    IN GT_PG     *pktInfo
+);
+
+
+/*******************************************************************************
+* gprtGetPhyReg
+*
+* DESCRIPTION:
+*       This routine reads Phy Registers.
+*
+* INPUTS:
+*       port -    The logical port number,
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+
+/*******************************************************************************
+* gprtSetPhyReg
+*
+* DESCRIPTION:
+*       This routine writes Phy Registers.
+*
+* INPUTS:
+*       port -    The logical port number,
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPhyReg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           regAddr,
+    IN  GT_U16           data
+);
+
+/*******************************************************************************
+* gprtGetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine reads phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*
+* OUTPUTS:
+*        data    - value of the read register
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtGetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32  port,
+    IN    GT_U32  regAddr,
+    IN    GT_U32  page,
+    OUT GT_U16* data
+);
+
+/*******************************************************************************
+* gprtSetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine writes a value to phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*        data    - value of the read register
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtSetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32 port,
+    IN    GT_U32 regAddr,
+    IN    GT_U32 page,
+    IN  GT_U16 data
+);
+
+/*******************************************************************************
+* gprtPhyIntEnable
+*
+* DESCRIPTION:
+* Enable/Disable one PHY Interrupt
+* This register determines whether the INT# pin is asserted when an interrupt
+* event occurs. When an interrupt occurs, the corresponding bit is set and
+* remains set until register 19 is read via the SMI. When interrupt enable
+* bits are not set in register 18, interrupt status bits in register 19 are
+* still set when the corresponding interrupt events occur. However, the INT#
+* is not asserted.
+*
+* INPUTS:
+* port    - logical port number
+* intType - the type of interrupt to enable/disable. any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,
+*            GT_POLARITY_CHANGED, and
+*            GT_JABBER
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* 88E3081 data sheet register 18
+*
+*******************************************************************************/
+
+GT_STATUS gprtPhyIntEnable
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT   port,
+IN GT_U16    intType
+);
+
+
+/*******************************************************************************
+* gprtGetPhyIntStatus
+*
+* DESCRIPTION:
+* Check to see if a specific type of  interrupt occured
+*
+* INPUTS:
+* port - logical port number
+* intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,
+*            GT_POLARITY_CHANGED, and
+*            GT_JABBER
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* 88E3081 data sheet register 19
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntStatus
+(
+IN GT_QD_DEV *dev,
+IN  GT_LPORT port,
+OUT  GT_U16* intType
+);
+
+/*******************************************************************************
+* gprtGetPhyIntPortSummary
+*
+* DESCRIPTION:
+* Lists the ports that have active interrupts. It provides a quick way to
+* isolate the interrupt so that the MAC or switch does not have to poll the
+* interrupt status register (19) for all ports. Reading this register does not
+* de-assert the INT# pin
+*
+* INPUTS:
+* none
+*
+* OUTPUTS:
+* GT_U8 *intPortMask - bit Mask with the bits set for the corresponding
+* phys with active interrupt. E.g., the bit number 0 and 2 are set when
+* port number 0 and 2 have active interrupt
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* 88E3081 data sheet register 20
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntPortSummary
+(
+IN GT_QD_DEV *dev,
+OUT GT_U16 *intPortMask
+);
+
+
+
+/*******************************************************************************
+* gprtSetForceFc
+*
+* DESCRIPTION:
+*       This routine set the force flow control state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetForceFc
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  force
+);
+
+
+
+/*******************************************************************************
+* gprtGetForceFc
+*
+* DESCRIPTION:
+*       This routine get the force flow control state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetForceFc
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *force
+);
+
+
+
+/*******************************************************************************
+* gprtSetTrailerMode
+*
+* DESCRIPTION:
+*       This routine set the egress trailer mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for add trailer or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetTrailerMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetTrailerMode
+*
+* DESCRIPTION:
+*       This routine get the egress trailer mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for add trailer or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTrailerMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+
+/*******************************************************************************
+* gprtSetIngressMode
+*
+* DESCRIPTION:
+*       This routine set the ingress mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the ingress mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIngressMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT        port,
+    IN GT_INGRESS_MODE mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetIngressMode
+*
+* DESCRIPTION:
+*       This routine get the ingress mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the ingress mode.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIngressMode
+(
+    IN GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_INGRESS_MODE *mode
+);
+
+
+
+/*******************************************************************************
+* gprtSetMcRateLimit
+*
+* DESCRIPTION:
+*       This routine set the port multicast rate limit.
+*
+* INPUTS:
+*       port - the logical port number.
+*       rate - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMcRateLimit
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT     port,
+    IN GT_MC_RATE   rate
+);
+
+
+
+/*******************************************************************************
+* gprtGetMcRateLimit
+*
+* DESCRIPTION:
+*       This routine Get the port multicast rate limit.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       rate - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMcRateLimit
+(
+    IN GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_MC_RATE  *rate
+);
+
+
+
+/*******************************************************************************
+* gprtSetCtrMode
+*
+* DESCRIPTION:
+*       This routine sets the port counters mode of operation.
+*
+* INPUTS:
+*       mode  - the counter mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetCtrMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_CTR_MODE  mode
+);
+
+
+
+/*******************************************************************************
+* gprtClearAllCtr
+*
+* DESCRIPTION:
+*       This routine clears all port counters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtClearAllCtr
+(
+    IN GT_QD_DEV *dev
+);
+
+
+/*******************************************************************************
+* gprtGetPortCtr
+*
+* DESCRIPTION:
+*       This routine gets the port counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortCtr
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_STAT    *ctr
+);
+
+
+
+
+/*******************************************************************************
+* gprtGetPartnerLinkPause
+*
+* DESCRIPTION:
+*       This routine retrives the link partner pause state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPartnerLinkPause
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *state
+);
+
+
+
+/*******************************************************************************
+* gprtGetSelfLinkPause
+*
+* DESCRIPTION:
+*       This routine retrives the link pause state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSelfLinkPause
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *state
+);
+
+
+
+/*******************************************************************************
+* gprtGetResolve
+*
+* DESCRIPTION:
+*       This routine retrives the resolve state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for Done  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetResolve
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *state
+);
+
+
+
+/*******************************************************************************
+* gprtGetLinkState
+*
+* DESCRIPTION:
+*       This routine retrives the link state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for Up  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLinkState
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *state
+);
+
+
+
+/*******************************************************************************
+* gprtGetPortMode
+*
+* DESCRIPTION:
+*       This routine retrives the port mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for MII  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetPhyMode
+*
+* DESCRIPTION:
+*       This routine retrives the PHY mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for MII PHY  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetDuplex
+*
+* DESCRIPTION:
+*       This routine retrives the port duplex mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for Full  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDuplex
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+
+/*******************************************************************************
+* gprtGetSpeed
+*
+* DESCRIPTION:
+*       This routine retrives the port speed.
+*
+* INPUTS:
+*       speed - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for 100Mb/s  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSpeed
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *speed
+);
+
+/*******************************************************************************
+* gprtSetDuplex
+*
+* DESCRIPTION:
+*       This routine sets the duplex mode of MII/SNI/RMII ports.
+*
+* INPUTS:
+*       port -     the logical port number.
+*                (for FullSail, it will be port 2, and for ClipperShip,
+*                it could be either port 5 or port 6.)
+*       mode -  GT_TRUE for Full Duplex,
+*                GT_FALSE for Half Duplex.
+*
+* OUTPUTS: None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDuplex
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    IN  GT_BOOL  mode
+);
+
+
+/*******************************************************************************
+* gqosSetPortDefaultTc
+*
+* DESCRIPTION:
+*       Sets the default traffic class for a specific port.
+*
+* INPUTS:
+*       port      - logical port number
+*       trafClass - default traffic class of a port.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosSetPortDefaultTc
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_U8    trafClass
+);
+
+
+/*******************************************************************************
+* gcosGetPortDefaultTc
+*
+* DESCRIPTION:
+*       Gets the default traffic class for a specific port.
+*
+* INPUTS:
+*       port      - logical port number
+*
+* OUTPUTS:
+*       trafClass - default traffic class of a port.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosGetPortDefaultTc
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    OUT GT_U8     *trafClass
+);
+
+
+/*******************************************************************************
+* gqosSetPrioMapRule
+*
+* DESCRIPTION:
+*       This routine sets priority mapping rule.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetPrioMapRule
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+
+
+/*******************************************************************************
+* gqosGetPrioMapRule
+*
+* DESCRIPTION:
+*       This routine get the priority mapping rule.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetPrioMapRule
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+
+
+/*******************************************************************************
+* gqosIpPrioMapEn
+*
+* DESCRIPTION:
+*       This routine enables the IP priority mapping.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosIpPrioMapEn
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  en
+);
+
+
+
+/*******************************************************************************
+* gqosGetIpPrioMapEn
+*
+* DESCRIPTION:
+*       This routine return the IP priority mapping state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       en    - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetIpPrioMapEn
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *en
+);
+
+
+
+/*******************************************************************************
+* gqosUserPrioMapEn
+*
+* DESCRIPTION:
+*       This routine enables the user priority mapping.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosUserPrioMapEn
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  en
+);
+
+
+
+/*******************************************************************************
+* gqosGetUserPrioMapEn
+*
+* DESCRIPTION:
+*       This routine return the user priority mapping state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       en    - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetUserPrioMapEn
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *en
+);
+
+
+
+/*******************************************************************************
+* gcosGetUserPrio2Tc
+*
+* DESCRIPTION:
+*       Gets the traffic class number for a specific 802.1p user priority.
+*
+* INPUTS:
+*       userPrior - user priority
+*
+* OUTPUTS:
+*       trClass - The Traffic Class the received frame is assigned.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*       Table - UserPrio2Tc
+*
+*******************************************************************************/
+GT_STATUS gcosGetUserPrio2Tc
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    userPrior,
+    OUT GT_U8   *trClass
+);
+
+
+/*******************************************************************************
+* gcosSetUserPrio2Tc
+*
+* DESCRIPTION:
+*       Sets the traffic class number for a specific 802.1p user priority.
+*
+* INPUTS:
+*       userPrior - user priority of a port.
+*       trClass   - the Traffic Class the received frame is assigned.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*       Table - UserPrio2Tc
+*
+*******************************************************************************/
+GT_STATUS gcosSetUserPrio2Tc
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8    userPrior,
+    IN GT_U8    trClass
+);
+
+
+/*******************************************************************************
+* gcosGetDscp2Tc
+*
+* DESCRIPTION:
+*       This routine retrieves the traffic class assigned for a specific
+*       IPv4 Dscp.
+*
+* INPUTS:
+*       dscp    - the IPv4 frame dscp to query.
+*
+* OUTPUTS:
+*       trClass - The Traffic Class the received frame is assigned.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*       Table - UserPrio2Tc
+*
+*******************************************************************************/
+GT_STATUS gcosGetDscp2Tc
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8   dscp,
+    OUT GT_U8   *trClass
+);
+
+
+/*******************************************************************************
+* gcosSetDscp2Tc
+*
+* DESCRIPTION:
+*       This routine sets the traffic class assigned for a specific
+*       IPv4 Dscp.
+*
+* INPUTS:
+*       dscp    - the IPv4 frame dscp to map.
+*       trClass - the Traffic Class the received frame is assigned.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*       Table - UserPrio2Tc
+*
+*******************************************************************************/
+GT_STATUS gcosSetDscp2Tc
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8    dscp,
+    IN GT_U8    trClass
+);
+
+
+/*******************************************************************************
+* qdLoadDriver
+*
+* DESCRIPTION:
+*       QuarterDeck Driver Initialization Routine.
+*       This is the first routine that needs be called by system software.
+*       It takes sysCfg from system software, and retures a pointer (*dev)
+*       to a data structure which includes infomation related to this QuarterDeck
+*       device. This pointer (*dev) is then used for all the API functions.
+*
+* INPUTS:
+*       sysCfg      - Holds system configuration parameters.
+*
+* OUTPUTS:
+*       dev         - Holds general system information.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_ALREADY_EXIST    - if device already started
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*     qdUnloadDriver is provided when the driver is not to be used anymore.
+*
+*******************************************************************************/
+GT_STATUS qdLoadDriver
+(
+    IN  GT_SYS_CONFIG   *sysCfg,
+    OUT GT_QD_DEV    *dev
+);
+
+
+/*******************************************************************************
+* qdUnloadDriver
+*
+* DESCRIPTION:
+*       This function unloads the QuaterDeck Driver.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       1.  This function should be called only after successful execution of
+*           qdLoadDriver().
+*
+*******************************************************************************/
+GT_STATUS qdUnloadDriver
+(
+    IN GT_QD_DEV* dev
+);
+
+
+/*******************************************************************************
+* sysEnable
+*
+* DESCRIPTION:
+*       This function enables the system for full operation.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS sysEnable
+(
+    IN GT_QD_DEV* dev
+);
+
+
+/*******************************************************************************
+* gsysSwReset
+*
+* DESCRIPTION:
+*       This routine preforms switch software reset.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSwReset
+(
+    IN GT_QD_DEV* dev
+);
+
+
+/*******************************************************************************
+* gsysSetDiscardExcessive
+*
+* DESCRIPTION:
+*       This routine set the Discard Excessive state.
+*
+* INPUTS:
+*       en - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDiscardExcessive
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL en
+);
+
+
+
+/*******************************************************************************
+* gsysGetDiscardExcessive
+*
+* DESCRIPTION:
+*       This routine get the Discard Excessive state.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDiscardExcessive
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL *en
+);
+
+
+
+/*******************************************************************************
+* gsysSetSchedulingMode
+*
+* DESCRIPTION:
+*       This routine set the Scheduling Mode.
+*
+* INPUTS:
+*       mode - GT_TRUE wrr, GT_FALSE strict.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetSchedulingMode
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL mode
+);
+
+
+
+/*******************************************************************************
+* gsysGetSchedulingMode
+*
+* DESCRIPTION:
+*       This routine get the Scheduling Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE wrr, GT_FALSE strict.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSchedulingMode
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_BOOL *mode
+);
+
+
+
+/*******************************************************************************
+* gsysSetMaxFrameSize
+*
+* DESCRIPTION:
+*       This routine Set the max frame size allowed.
+*
+* INPUTS:
+*       mode - GT_TRUE max size 1522, GT_FALSE max size 1535.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetMaxFrameSize
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL mode
+);
+
+
+
+/*******************************************************************************
+* gsysGetMaxFrameSize
+*
+* DESCRIPTION:
+*       This routine Get the max frame size allowed.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE max size 1522, GT_FALSE max size 1535.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetMaxFrameSize
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_BOOL *mode
+);
+
+
+
+/*******************************************************************************
+* gsysReLoad
+*
+* DESCRIPTION:
+*       This routine cause to the switch to reload the EEPROM.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysReLoad
+(
+    IN GT_QD_DEV* dev
+);
+
+
+/*******************************************************************************
+* gsysSetWatchDog
+*
+* DESCRIPTION:
+*       This routine Set the the watch dog mode.
+*
+* INPUTS:
+*       en - GT_TRUE enables, GT_FALSE disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetWatchDog
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL en
+);
+
+
+
+/*******************************************************************************
+* gsysGetWatchDog
+*
+* DESCRIPTION:
+*       This routine Get the the watch dog mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE enables, GT_FALSE disable.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetWatchDog
+(
+    IN GT_QD_DEV* dev,
+    OUT GT_BOOL *en
+);
+
+
+/*******************************************************************************
+* gsysSetDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine sets the full duplex pause src Mac Address.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDuplexPauseMac
+(
+    IN GT_QD_DEV* dev,
+    IN GT_ETHERADDR *mac
+);
+
+
+/*******************************************************************************
+* gsysGetDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine Gets the full duplex pause src Mac Address.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDuplexPauseMac
+(
+    IN GT_QD_DEV* dev,
+    OUT GT_ETHERADDR *mac
+);
+
+
+
+/*******************************************************************************
+* gsysSetPerPortDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine sets whether the full duplex pause src Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPerPortDuplexPauseMac
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL en
+);
+
+
+
+/*******************************************************************************
+* gsysGetPerPortDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine Gets whether the full duplex pause src Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPerPortDuplexPauseMac
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL *en
+);
+
+/*******************************************************************************
+* gsysSetPortWakeonFrameEn
+*
+* DESCRIPTION:
+*       This routine sets port interrupt for wake on frame.
+*
+* INPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPortWakeonFrameEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        portVec
+);
+
+/*******************************************************************************
+* gsysGetPortWakeonFrameEnSt
+*
+* DESCRIPTION:
+*       This routine gets port interrupt status for wake on frame.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPortWakeonFrameEnSt
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_U8       *portVec
+);
+
+/*******************************************************************************
+* gsysGetPortWakeonFrameEn
+*
+* DESCRIPTION:
+*       This routine gets port interrupt status for wake on frame.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPortWakeonFrameEn
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_U8       *portVec
+);
+
+/*******************************************************************************
+* gsysSetWoLMac
+*
+* DESCRIPTION:
+*       This routine sets the Wake on Lan Mac Address.
+*        MAC address should be an Unicast address.
+*        For different MAC Addresses per port operation,
+*        use gsysSetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetWoLMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+);
+
+/*******************************************************************************
+* gsysGetWoLMac
+*
+* DESCRIPTION:
+*       This routine Gets the Wake on Lan Mac Address.
+*        For different MAC Addresses per port operation,
+*        use gsysGetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetWoLMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+);
+
+/*******************************************************************************
+* gsysSetPerPortWoLMac
+*
+* DESCRIPTION:
+*       This routine sets whether the Wake on Lan Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPerPortWoLMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL      en
+);
+
+/*******************************************************************************
+* gsysGetPerPortWoLMac
+*
+* DESCRIPTION:
+*       This routine Gets whether the Wake on Lanc Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPerPortWoLMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetWoLPass
+*
+* DESCRIPTION:
+*       This routine sets the Wake on Lan Password Mac Address.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetWoLPass
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+);
+
+/*******************************************************************************
+* gsysGetWoLPass
+*
+* DESCRIPTION:
+*       This routine Gets the Wake on Lan password Mac Address.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetWoLPass
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+);
+
+/*******************************************************************************
+* gsysReadMiiRegister
+*
+* DESCRIPTION:
+*       This routine reads QuarterDeck Registers. Since this routine is only for
+*        Diagnostic Purpose, no error checking will be performed.
+*        User has to know exactly which phy address(0 ~ 0x1F) will be read.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysReadMiiReg
+(
+    IN GT_QD_DEV* dev,
+    IN  GT_U32    phyAddr,
+    IN  GT_U32    regAddr,
+    OUT GT_U32    *data
+);
+
+/*******************************************************************************
+* gsysWriteMiiRegister
+*
+* DESCRIPTION:
+*       This routine writes QuarterDeck Registers. Since this routine is only for
+*        Diagnostic Purpose, no error checking will be performed.
+*        User has to know exactly which phy address(0 ~ 0x1F) will be read.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*       data    - data to be written.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysWriteMiiReg
+(
+    IN GT_QD_DEV* dev,
+    IN  GT_U32    phyAddr,
+    IN  GT_U32    regAddr,
+    IN  GT_U16    data
+);
+
+#ifdef GT_RMGMT_ACCESS
+/*******************************************************************************
+* gsysAccessMultiRegs
+*
+* DESCRIPTION:
+*       This function accesses switch's registers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysAccessMultiRegs
+(
+    IN  GT_QD_DEV    *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+);
+#endif
+
+/*******************************************************************************
+* gsysGetSW_Mode
+*
+* DESCRIPTION:
+*       This routine get the Switch mode. These two bits returen
+*       the current value of the SW_MODE[1:0] pins.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*         This feature is for both clippership and fullsail
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSW_Mode
+(
+    IN GT_QD_DEV* dev,
+    IN GT_SW_MODE *mode
+);
+
+/*******************************************************************************
+* gsysGetInitReady
+*
+* DESCRIPTION:
+*       This routine get the InitReady bit. This bit is set to a one when the ATU,
+*       the Queue Controller and the Statistics Controller are done with their
+*       initialization and are ready to accept frames.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: switch is ready, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*         This feature is for both clippership and fullsail
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetInitReady
+(
+    IN GT_QD_DEV* dev,
+    IN GT_BOOL *mode
+);
+
+
+/*******************************************************************************
+* gstatsFlushAll
+*
+* DESCRIPTION:
+*       Flush All RMON counters for all ports.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsFlushAll
+(
+    IN GT_QD_DEV* dev
+);
+
+/*******************************************************************************
+* gstatsFlushPort
+*
+* DESCRIPTION:
+*       Flush All RMON counters for a given port.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsFlushPort
+(
+    IN GT_QD_DEV* dev,
+    IN GT_LPORT    port
+);
+
+/*******************************************************************************
+* gstatsGetPortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS    counter,
+    OUT GT_U32            *statsData
+);
+
+/*******************************************************************************
+* gstatsGetPortAllCounters
+*
+* DESCRIPTION:
+*       This routine gets all RMON counters of the given port
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters
+(
+    IN  GT_QD_DEV* dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET    *statsCounterSet
+);
+
+
+/*******************************************************************************
+* grcSetLimitMode
+*
+* DESCRIPTION:
+*       This routine sets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*       port    - logical port number.
+*       mode     - rate control ingress limit mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*******************************************************************************/
+GT_STATUS grcSetLimitMode
+(
+    IN GT_QD_DEV*            dev,
+    IN GT_LPORT          port,
+    IN GT_RATE_LIMIT_MODE    mode
+);
+
+/*******************************************************************************
+* grcGetLimitMode
+*
+* DESCRIPTION:
+*       This routine gets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       mode     - rate control ingress limit mode.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetLimitMode
+(
+    IN GT_QD_DEV* dev,
+    IN  GT_LPORT port,
+    OUT GT_RATE_LIMIT_MODE    *mode
+);
+
+/*******************************************************************************
+* grcSetPri3Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 3 frames.
+*       Priority 3 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 3 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri2Rate
+*              GT_TRUE:  use twice the rate as Pri2Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*******************************************************************************/
+GT_STATUS grcSetPri3Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* grcGetPri3Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 3 frames.
+*       Priority 3 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 3 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri2Rate
+*              GT_TRUE:  use twice the rate as Pri2Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetPri3Rate
+(
+    IN GT_QD_DEV* dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+/*******************************************************************************
+* grcSetPri2Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 2 frames.
+*       Priority 2 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 2 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri1Rate
+*              GT_TRUE:  use twice the rate as Pri1Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*******************************************************************************/
+GT_STATUS grcSetPri2Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* grcGetPri2Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 2 frames.
+*       Priority 2 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 2 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri1Rate
+*              GT_TRUE:  use twice the rate as Pri1Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetPri2Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+/*******************************************************************************
+* grcSetPri1Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 1 frames.
+*       Priority 1 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 1 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri0Rate
+*              GT_TRUE:  use twice the rate as Pri0Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*******************************************************************************/
+GT_STATUS grcSetPri1Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* grcGetPri1Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 1 frames.
+*       Priority 1 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 1 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri0Rate
+*              GT_TRUE:  use twice the rate as Pri0Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetPri1Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+/*******************************************************************************
+* grcSetPri0Rate
+*
+* DESCRIPTION:
+*       This routine sets the port's ingress data limit for priority 0 frames.
+*
+* INPUTS:
+*       port    - logical port number.
+*       rate    - ingress data rate limit for priority 0 frames. These frames
+*             will be discarded after the ingress rate selected is reached
+*             or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcSetPri0Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN GT_LPORT        port,
+    IN GT_PRI0_RATE    rate
+);
+
+/*******************************************************************************
+* grcGetPri0Rate
+*
+* DESCRIPTION:
+*       This routine gets the port's ingress data limit for priority 0 frames.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*
+* OUTPUTS:
+*       rate    - ingress data rate limit for priority 0 frames. These frames
+*             will be discarded after the ingress rate selected is reached
+*             or exceeded.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetPri0Rate
+(
+    IN GT_QD_DEV*            dev,
+    IN  GT_LPORT port,
+    OUT GT_PRI0_RATE    *rate
+);
+
+/*******************************************************************************
+* grcSetBytesCount
+*
+* DESCRIPTION:
+*       This routine sets the byets to count for limiting needs to be determined
+*
+* INPUTS:
+*       port      - logical port number to set.
+*        limitMGMT - GT_TRUE: To limit and count MGMT frame bytes
+*                GT_FALSE: otherwise
+*        countIFG  - GT_TRUE: To count IFG bytes
+*                GT_FALSE: otherwise
+*        countPre  - GT_TRUE: To count Preamble bytes
+*                GT_FALSE: otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcSetBytesCount
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_LPORT            port,
+    IN GT_BOOL         limitMGMT,
+    IN GT_BOOL         countIFG,
+    IN GT_BOOL         countPre
+);
+
+/*******************************************************************************
+* grcGetBytesCount
+*
+* DESCRIPTION:
+*       This routine gets the byets to count for limiting needs to be determined
+*
+* INPUTS:
+*       port    - logical port number
+*
+* OUTPUTS:
+*        limitMGMT - GT_TRUE: To limit and count MGMT frame bytes
+*                GT_FALSE: otherwise
+*        countIFG  - GT_TRUE: To count IFG bytes
+*                GT_FALSE: otherwise
+*        countPre  - GT_TRUE: To count Preamble bytes
+*                GT_FALSE: otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS grcGetBytesCount
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_LPORT            port,
+    IN GT_BOOL         *limitMGMT,
+    IN GT_BOOL         *countIFG,
+    IN GT_BOOL         *countPre
+);
+
+/*******************************************************************************
+* grcSetEgressRate
+*
+* DESCRIPTION:
+*       This routine sets the port's egress data limit.
+*
+*
+* INPUTS:
+*       port      - logical port number.
+*       rateType  - egress data rate limit (GT_ERATE_TYPE union type).
+*                    union type is used to support multiple devices with the
+*                    different formats of egress rate.
+*                    GT_ERATE_TYPE has the following fields:
+*                        definedRate - GT_EGRESS_RATE enum type should used for the
+*                            following devices:
+*                            88E6218, 88E6318, 88E6063, 88E6083, 88E6181, 88E6183,
+*                            88E6093, 88E6095, 88E6185, 88E6108, 88E6065, 88E6061,
+*                            and their variations
+*                        kbRate - rate in kbps that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with the GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_LAYER1,
+*                                GT_PIRL_ELIMIT_LAYER2, or
+*                                GT_PIRL_ELIMIT_LAYER3 (see grcSetELimitMode)
+*                            64kbps ~ 1Mbps    : increments of 64kbps,
+*                            1Mbps ~ 100Mbps   : increments of 1Mbps, and
+*                            100Mbps ~ 1000Mbps: increments of 10Mbps
+*                            Therefore, the valid values are:
+*                                64, 128, 192, 256, 320, 384,..., 960,
+*                                1000, 2000, 3000, 4000, ..., 100000,
+*                                110000, 120000, 130000, ..., 1000000.
+*                        fRate - frame per second that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_FRAME
+*                            Valid values are between 7600 and 1488000
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_EGRESS_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+*******************************************************************************/
+GT_STATUS grcSetEgressRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_ERATE_TYPE   *rateType
+);
+
+/*******************************************************************************
+* grcGetEgressRate
+*
+* DESCRIPTION:
+*       This routine gets the port's egress data limit.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       rateType  - egress data rate limit (GT_ERATE_TYPE union type).
+*                    union type is used to support multiple devices with the
+*                    different formats of egress rate.
+*                    GT_ERATE_TYPE has the following fields:
+*                        definedRate - GT_EGRESS_RATE enum type should used for the
+*                            following devices:
+*                            88E6218, 88E6318, 88E6063, 88E6083, 88E6181, 88E6183,
+*                            88E6093, 88E6095, 88E6185, 88E6108, 88E6065, 88E6061,
+*                            and their variations
+*                        kbRate - rate in kbps that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with the GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_LAYER1,
+*                                GT_PIRL_ELIMIT_LAYER2, or
+*                                GT_PIRL_ELIMIT_LAYER3 (see grcSetELimitMode)
+*                            64kbps ~ 1Mbps    : increments of 64kbps,
+*                            1Mbps ~ 100Mbps   : increments of 1Mbps, and
+*                            100Mbps ~ 1000Mbps: increments of 10Mbps
+*                            Therefore, the valid values are:
+*                                64, 128, 192, 256, 320, 384,..., 960,
+*                                1000, 2000, 3000, 4000, ..., 100000,
+*                                110000, 120000, 130000, ..., 1000000.
+*                        fRate - frame per second that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_FRAME
+*                            Valid values are between 7600 and 1488000
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_EGRESS_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+*******************************************************************************/
+GT_STATUS grcGetEgressRate
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_ERATE_TYPE  *rateType
+);
+
+
+/*******************************************************************************
+* gpavSetPAV
+*
+* DESCRIPTION:
+*       This routine sets the Port Association Vector
+*
+* INPUTS:
+*       port    - logical port number.
+*       pav     - Port Association Vector
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS gpavSetPAV
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_LPORT    port,
+    IN GT_U16    pav
+);
+
+/*******************************************************************************
+* gpavGetPAV
+*
+* DESCRIPTION:
+*       This routine gets the Port Association Vector
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       pav     - Port Association Vector
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS gpavGetPAV
+(
+    IN GT_QD_DEV*       dev,
+    IN  GT_LPORT port,
+    OUT GT_U16    *pav
+);
+
+/*******************************************************************************
+* gpavSetIngressMonitor
+*
+* DESCRIPTION:
+*       This routine sets the Ingress Monitor bit in the PAV.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the ingress monitor bit in the PAV
+*              GT_FALSE: Ingress Monitor enabled
+*              GT_TRUE:  Ingress Monitor disabled
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*******************************************************************************/
+GT_STATUS gpavSetIngressMonitor
+(
+    IN GT_QD_DEV*       dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+);
+
+/*******************************************************************************
+* gpavGetIngressMonitor
+*
+* DESCRIPTION:
+*       This routine gets the Ingress Monitor bit in the PAV.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the ingress monitor bit in the PAV
+*              GT_FALSE: Ingress Monitor enabled
+*              GT_TRUE:  Ingress Monitor disabled
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+*******************************************************************************/
+GT_STATUS gpavGetIngressMonitor
+(
+    IN GT_QD_DEV*       dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+);
+
+/*******************************************************************************
+* gvctGetCableStatus
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the requested port,
+*       and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvCableDiag
+*        API can be used, instead.
+*
+
+*******************************************************************************/
+GT_STATUS gvctGetCableDiag
+(
+    IN GT_QD_DEV*       dev,
+    IN  GT_LPORT        port,
+    OUT GT_CABLE_STATUS *cableStatus
+);
+
+
+/*******************************************************************************
+* gvctGet1000BTExtendedStatus
+*
+* DESCRIPTION:
+*       This routine retrieves extended cable status, such as Pair Poloarity,
+*        Pair Swap, and Pair Skew. Note that this routine will be success only
+*        if 1000Base-T Link is up.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - the extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvExtendedStatus
+*        API can be used, instead.
+*
+*
+*******************************************************************************/
+GT_STATUS gvctGet1000BTExtendedStatus
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_LPORT        port,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+);
+
+
+/*******************************************************************************
+* gtMemSet
+*
+* DESCRIPTION:
+*       Set a block of memory
+*
+* INPUTS:
+*       start  - start address of memory block for setting
+*       simbol - character to store, converted to an unsigned char
+*       size   - size of block to be set
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to set memory block
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * gtMemSet
+(
+    IN void * start,
+    IN int    symbol,
+    IN GT_U32 size
+);
+
+/*******************************************************************************
+* gtMemCpy
+*
+* DESCRIPTION:
+*       Copies 'size' characters from the object pointed to by 'source' into
+*       the object pointed to by 'destination'. If copying takes place between
+*       objects that overlap, the behavior is undefined.
+*
+* INPUTS:
+*       destination - destination of copy
+*       source      - source of copy
+*       size        - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to destination
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * gtMemCpy
+(
+    IN void *       destination,
+    IN const void * source,
+    IN GT_U32       size
+);
+
+
+/*******************************************************************************
+* gtMemCmp
+*
+* DESCRIPTION:
+*       Compares given memories.
+*
+* INPUTS:
+*       src1 - source 1
+*       src2 - source 2
+*       size - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       0, if equal.
+*        negative number, if src1 < src2.
+*        positive number, if src1 > src2.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+int gtMemCmp
+(
+    IN char src1[],
+    IN char src2[],
+    IN GT_U32 size
+);
+
+/*******************************************************************************
+* gtStrlen
+*
+* DESCRIPTION:
+*       Determine the length of a string
+* INPUTS:
+*       source  - string
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       size    - number of characters in string, not including EOS.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_U32 gtStrlen
+(
+    IN const void * source
+);
+
+/*******************************************************************************
+* gtDelay
+*
+* DESCRIPTION:
+*       Wait for the given uSec and return.
+*        Current Switch devices with Gigabit Ethernet Support require 250 uSec
+*        of delay time for PPU to be disabled.
+*        Since this function is System and/or OS dependent, it should be provided
+*        by each DSDT user.
+*
+* INPUTS:
+*       delayTime - delay in uSec.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void gtDelay
+(
+    IN const unsigned int delayTime
+);
+
+
+/*******************************************************************************
+* gtVersion
+*
+* DESCRIPTION:
+*       This function returns the version of the QuarterDeck SW suite.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       version     - QuarterDeck software version.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_BAD_PARAM on bad parameters,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gtVersion
+(
+    OUT GT_VERSION   *version
+);
+
+
+/* Prototypes added for Gigabit Ethernet Switch Support */
+
+
+/* gtBrgFdb.c */
+
+/*******************************************************************************
+* gfdbMove
+*
+* DESCRIPTION:
+*        This routine moves all or unblocked addresses from a port to another.
+*
+* INPUTS:
+*        moveCmd  - the move operation type.
+*        moveFrom - port where moving from
+*        moveTo   - port where moving to
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbMove
+(
+    IN GT_QD_DEV     *dev,
+    IN GT_MOVE_CMD    moveCmd,
+    IN GT_U32        moveFrom,
+    IN GT_U32        moveTo
+);
+
+/*******************************************************************************
+* gfdbMoveInDB
+*
+* DESCRIPTION:
+*         This routine move all or unblocked addresses which are in the particular
+*         ATU Database (DBNum) from a port to another.
+*
+* INPUTS:
+*         moveCmd  - the move operation type.
+*        DBNum         - ATU MAC Address Database Number.
+*        moveFrom - port where moving from
+*        moveTo   - port where moving to
+*
+* OUTPUTS:
+*     None
+*
+* RETURNS:
+*         GT_OK           - on success
+*         GT_FAIL         - on error
+*         GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbMoveInDB
+(
+    IN GT_QD_DEV   *dev,
+    IN GT_MOVE_CMD moveCmd,
+    IN GT_U32         DBNum,
+    IN GT_U32        moveFrom,
+    IN GT_U32        moveTo
+);
+
+/* gtBrgStp.c */
+
+/* gtBrgVlan.c */
+
+/* gtBrgVtu.c */
+
+/* gtEvents.c */
+
+/*******************************************************************************
+* gatuGetIntStatus
+*
+* DESCRIPTION:
+*        Check to see if a specific type of ATU interrupt occured
+*
+* INPUTS:
+*     intType - the type of interrupt which causes an interrupt.
+*                    GT_MEMEBER_VIOLATION, GT_MISS_VIOLATION, or GT_FULL_VIOLATION
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gatuGetIntStatus
+(
+    IN  GT_QD_DEV                *dev,
+    OUT GT_ATU_INT_STATUS    *atuIntStatus
+);
+
+
+/* gtPhyCtrl.c */
+
+/*******************************************************************************
+* gprtSet1000TMasterMode
+*
+* DESCRIPTION:
+*        This routine set the port multicast rate limit.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSet1000TMasterMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_1000T_MASTER_SLAVE    *mode
+);
+
+/*******************************************************************************
+* gprtGet1000TMasterMode
+*
+* DESCRIPTION:
+*        This routine set the port multicast rate limit.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGet1000TMasterMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_1000T_MASTER_SLAVE    *mode
+);
+
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtSetDropOnLock
+*
+* DESCRIPTION:
+*        This routine set the Drop on Lock. When set to one, Ingress frames will
+*        be discarded if their SA field is not in the ATU's address database.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Unknown SA drop or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDropOnLock
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetDropOnLock
+*
+* DESCRIPTION:
+*        This routine gets DropOnLock mode.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: DropOnLock enabled,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDropOnLock
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetDoubleTag
+*
+* DESCRIPTION:
+*        This routine set the Ingress Double Tag Mode. When set to one,
+*        ingressing frames are examined to see if they contain an 802.3ac tag.
+*        If they do, the tag is removed and then the frame is processed from
+*        there (i.e., removed tag is ignored). Essentially, untagged frames
+*        remain untagged, single tagged frames become untagged and double tagged
+*        frames become single tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DoulbeTag mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDoubleTag
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetDoubleTag
+*
+* DESCRIPTION:
+*        This routine gets DoubleTag mode.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: DoubleTag enabled,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDoubleTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetInterswitchPort
+*
+* DESCRIPTION:
+*        This routine set Interswitch Port. When set to one,
+*        it indicates this port is a interswitch port used to communicated with
+*        CPU or to cascade with another switch device.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Interswitch port or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetInterswitchPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetInterswithPort
+*
+* DESCRIPTION:
+*        This routine gets InterswitchPort.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: This port is interswitch port,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetInterswitchPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetLearnDisable
+*
+* DESCRIPTION:
+*        This routine enables/disables automatic learning of new source MAC
+*        addresses on the given port ingress
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for disable or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetLearnDisable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+
+/*******************************************************************************
+* gprtGetLearnDisable
+*
+* DESCRIPTION:
+*        This routine gets LearnDisable setup
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: Learning disabled on the given port ingress frames,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLearnDisable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetIgnoreFCS
+*
+* DESCRIPTION:
+*        This routine sets FCS Ignore mode. When this bit is set to a one,
+*        the last four bytes of frames received on this port are overwritten with
+*        a good CRC and the frames will be accepted by the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for ignore FCS or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIgnoreFCS
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL         mode
+);
+
+/*******************************************************************************
+* gprtGetIgnoreFCS
+*
+* DESCRIPTION:
+*        This routine gets Ignore FCS setup
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: Ignore FCS on the given port's ingress frames,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIgnoreFCS
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gprtSetVTUPriOverride
+*
+* DESCRIPTION:
+*        VTU Priority Override. The following modes are supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetVTUPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_PRI_OVERRIDE        mode
+);
+
+/*******************************************************************************
+* gprtGetVTUPriOverride
+*
+* DESCRIPTION:
+*        VTU Priority Override. The following modes are supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetVTUPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PRI_OVERRIDE        *mode
+);
+
+/*******************************************************************************
+* gprtSetSAPriOverride
+*
+* DESCRIPTION:
+*        SA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetSAPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetSAPriOverride
+*
+* DESCRIPTION:
+*        SA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetSAPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PRI_OVERRIDE        *mode
+);
+
+/*******************************************************************************
+* gprtSetDAPriOverride
+*
+* DESCRIPTION:
+*        DA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetDAPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_PRI_OVERRIDE        mode
+);
+
+/*******************************************************************************
+* gprtGetDAPriOverride
+*
+* DESCRIPTION:
+*        DA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetDAPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PRI_OVERRIDE        *mode
+);
+
+/*******************************************************************************
+* gprtSetCPUPort
+*
+* DESCRIPTION:
+*        This routine sets CPU Port number. When Snooping is enabled on this port
+*        or when this port is configured as an Interswitch Port and it receives a
+*        To_CPU frame, the switch needs to know what port on this device the frame
+*        should egress.
+*
+* INPUTS:
+*        port - the logical port number.
+*        cpuPort - CPU Port number or interswitch port where CPU Port is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetCPUPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_LPORT     cpuPort
+);
+
+/*******************************************************************************
+* gprtGetCPUPort
+*
+* DESCRIPTION:
+*        This routine gets CPU Logical Port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        cpuPort - CPU Port's logical number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetCPUPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_LPORT     *cpuLPort
+);
+
+/*******************************************************************************
+* gprtSetLockedPort
+*
+* DESCRIPTION:
+*        This routine sets LockedPort. When it's set to one, CPU directed
+*        learning for 802.1x MAC authentication is enabled on this port. In this
+*        mode, an ATU Miss Violation interrupt will occur when a new SA address
+*        is received in a frame on this port. Automatically SA learning and
+*        refreshing is disabled in this mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Locked Port, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetLockedPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetLockedPort
+*
+* DESCRIPTION:
+*        This routine gets Locked Port mode for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if LockedPort, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLockedPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gprtSetIgnoreWrongData
+*
+* DESCRIPTION:
+*        This routine sets Ignore Wrong Data. If the frame's SA address is found
+*        in the database and if the entry is 'static' or if the port is 'locked'
+*        the source port's bit is checked to insure the SA has been assigned to
+*        this port. If the SA is NOT assigned to this port, it is considered an
+*        ATU Member Violation. If the IgnoreWrongData is set to GT_FALSE, an ATU
+*        Member Violation interrupt will be generated. If it's set to GT_TRUE,
+*        the ATU Member Violation error will be masked and ignored.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for IgnoreWrongData, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIgnoreWrongData
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+
+/*******************************************************************************
+* gprtGetIgnoreWrongData
+*
+* DESCRIPTION:
+*        This routine gets Ignore Wrong Data mode for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if IgnoreWrongData, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIgnoreWrongData
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+
+/* gtPortRateCtrl.c */
+
+/* gtPortRmon.c */
+
+/*******************************************************************************
+* gstatsGetPortCounter2
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch only
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter2
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS2    counter,
+    OUT GT_U32            *statsData
+);
+
+
+/*******************************************************************************
+* gstatsGetPortAllCounters2
+*
+* DESCRIPTION:
+*        This routine gets all counters of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch only
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters2
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET2    *statsCounterSet
+);
+
+/*******************************************************************************
+* gstatsGetHistogramMode
+*
+* DESCRIPTION:
+*        This routine gets the Histogram Counters Mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - Histogram Mode (GT_COUNT_RX_ONLY, GT_COUNT_TX_ONLY,
+*                    and GT_COUNT_RX_TX)
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        This function supports Gigabit Switch only
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetHistogramMode
+(
+    IN  GT_QD_DEV                *dev,
+    OUT GT_HISTOGRAM_MODE    *mode
+);
+
+/*******************************************************************************
+* gstatsSetHistogramMode
+*
+* DESCRIPTION:
+*        This routine sets the Histogram Counters Mode.
+*
+* INPUTS:
+*        mode - Histogram Mode (GT_COUNT_RX_ONLY, GT_COUNT_TX_ONLY,
+*                    and GT_COUNT_RX_TX)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsSetHistogramMode
+(
+    IN GT_QD_DEV                 *dev,
+    IN GT_HISTOGRAM_MODE        mode
+);
+
+
+/* gtPortStatus.c */
+
+/*******************************************************************************
+* gprtGetPauseEn
+*
+* DESCRIPTION:
+*        This routine retrives the link pause state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for enable or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        If set MAC Pause (for Full Duplex flow control) is implemented in the
+*        link partner and in MyPause
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *state
+);
+
+/*******************************************************************************
+* gprtGetHdFlow
+*
+* DESCRIPTION:
+*        This routine retrives the half duplex flow control value.
+*        If set, Half Duplex back pressure will be used on this port if this port
+*        is in a half duplex mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for enable or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHdFlow
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL     *state
+);
+
+/*******************************************************************************
+* gprtGetPHYDetect
+*
+* DESCRIPTION:
+*        This routine retrives the information regarding PHY detection.
+*        If set, An 802.3 PHY is attached to this port.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if connected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPHYDetect
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL     *state
+);
+
+/*******************************************************************************
+* gprtSetPHYDetect
+*
+* DESCRIPTION:
+*        This routine sets PHYDetect bit which make PPU change its polling.
+*        PPU's pool routine uses these bits to determine which port's to poll
+*        PHYs on for Link, Duplex, Speed, and Flow Control.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE or GT_FALSE
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        This function should not be called if gsysGetPPUState returns
+*        PPU_STATE_ACTIVE.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPHYDetect
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      state
+);
+
+/*******************************************************************************
+* gprtGetSpeedMode
+*
+* DESCRIPTION:
+*       This routine retrives the port speed.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_PORT_SPEED_MODE type.
+*                    (PORT_SPEED_1000_MBPS,PORT_SPEED_100_MBPS, or PORT_SPEED_10_MBPS)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSpeedMode
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_PORT_SPEED_MODE   *speed
+);
+
+/*******************************************************************************
+* gprtGetHighErrorRate
+*
+* DESCRIPTION:
+*        This routine retrives the PCS High Error Rate.
+*        This routine returns GT_TRUE if the rate of invalid code groups seen by
+*        PCS has exceeded 10 to the power of -11.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE or GT_FALSE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHighErrorRate
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetTxPaused
+*
+* DESCRIPTION:
+*        This routine retrives Transmit Pause state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Rx MAC receives a PAUSE frame with none-zero Puase Time
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTxPaused
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+
+/*******************************************************************************
+* gprtGetFlowCtrl
+*
+* DESCRIPTION:
+*        This routine retrives Flow control state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Rx MAC determines that no more data should be
+*                    entering this port.
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFlowCtrl
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetC_Duplex
+*
+* DESCRIPTION:
+*        This routine retrives Port 9's duplex configuration mode determined
+*        at reset.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if configured as Full duplex operation
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Return value is valid only if the given port is 9.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetC_Duplex
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetC_Mode
+*
+* DESCRIPTION:
+*        This routine retrives port's interface type configuration mode
+*        determined at reset.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - one of value in GT_PORT_CONFIG_MODE enum type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Return value is valid only if the given port is 9.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetC_Mode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PORT_CONFIG_MODE   *state
+);
+
+
+/* gtSysCtrl.c */
+
+/*******************************************************************************
+* gsysSetPPUEn
+*
+* DESCRIPTION:
+*        This routine enables/disables Phy Polling Unit.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PPU, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPPUEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+);
+
+/*******************************************************************************
+* gsysGetPPUEn
+*
+* DESCRIPTION:
+*        This routine get the PPU state.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if PPU is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPPUEn
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetCascadePort
+*
+* DESCRIPTION:
+*        This routine sets Cascade Port number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip.
+*
+* INPUTS:
+*        port - Cascade Port
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetCascadePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+);
+
+/*******************************************************************************
+* gsysGetCascadePort
+*
+* DESCRIPTION:
+*        This routine gets Cascade Port number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - Cascade Port
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetCascadePort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT     *port
+);
+
+/*******************************************************************************
+* gsysSetDeviceNumber
+*
+* DESCRIPTION:
+*        This routine sets Device Number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip. From CPU frames whose Dev_Num
+*        fieldmatches these bits have reachedtheir destination chip and are sent
+*        out this chip using the port number indicated in the frame's Trg_Port
+*        field.
+*
+* INPUTS:
+*        devNum - Device Number (0 ~ 31)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDeviceNumber
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          devNum
+);
+
+/*******************************************************************************
+* gsysGetDeviceNumber
+*
+* DESCRIPTION:
+*        This routine gets Device Number.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        devNum - Device Number (0 ~ 31)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDeviceNumber
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32      *devNum
+);
+
+
+/* gtPCSCtrl.c */
+
+
+/*******************************************************************************
+* gpcsGetCommaDet
+*
+* DESCRIPTION:
+*        This routine retrieves Comma Detection status in PCS
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for Comma Detected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetCommaDet
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsGetSyncOK
+*
+* DESCRIPTION:
+*        This routine retrieves SynOK bit. It is set to a one when the PCS has
+*        detected a few comma patterns and is synchronized with its peer PCS
+*        layer.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if synchronized or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetSyncOK
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsGetSyncFail
+*
+* DESCRIPTION:
+*        This routine retrieves SynFail bit.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if synchronizaion failed or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetSyncFail
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsGetAnBypassed
+*
+* DESCRIPTION:
+*        This routine retrieves Inband Auto-Negotiation bypass status.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if AN is bypassed or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetAnBypassed
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsGetAnBypassMode
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of Inband Auto-Negotiation bypass.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if AN bypass is enabled or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetAnBypassMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gpcsSetAnBypassMode
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of Inband Auto-Negotiation bypass.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable AN bypass mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetAnBypassMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+);
+
+/*******************************************************************************
+* gpcsGetPCSAnEn
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if PCS AN is enabled or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSAnEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gpcsSetPCSAnEn
+*
+* DESCRIPTION:
+*        This routine sets Enable mode of PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable PCS AN mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetPCSAnEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+);
+
+/*******************************************************************************
+* gpcsSetRestartPCSAn
+*
+* DESCRIPTION:
+*        This routine restarts PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetRestartPCSAn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+);
+
+/*******************************************************************************
+* gpcsGetPCSAnDone
+*
+* DESCRIPTION:
+*        This routine retrieves completion information of PCS Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if PCS AN is done or never done
+*                GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSAnDone
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gpcsSetLinkValue
+*
+* DESCRIPTION:
+*        This routine sets Link's force value
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force link up, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetLinkValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetLinkValue
+*
+* DESCRIPTION:
+*        This routine retrieves Link Value which will be used for Forcing Link
+*        up or down.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Link Force value is one (link up)
+*                 GT_FALSE otherwise (link down)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetLinkValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetForcedLink
+*
+* DESCRIPTION:
+*        This routine forces Link. If LinkValue is set to one, calling this
+*        routine with GT_TRUE will force Link to be up.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force link (up or down), GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedLink
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetForcedLink
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Link bit
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedLink bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedLink
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetDpxValue
+*
+* DESCRIPTION:
+*        This routine sets Duplex's Forced value. This function needs to be
+*        called prior to gpcsSetForcedDpx.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force full duplex, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetDpxValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetDpxValue
+*
+* DESCRIPTION:
+*        This routine retrieves Duplex's Forced value
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Duplex's Forced value is set to Full duplex,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetDpxValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetForcedDpx
+*
+* DESCRIPTION:
+*        This routine forces duplex mode. If DpxValue is set to one, calling this
+*        routine with GT_TRUE will force duplex mode to be full duplex.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force duplex mode, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedDpx
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetForcedDpx
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Duplex.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedDpx bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedDpx
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetForceSpeed
+*
+* DESCRIPTION:
+*        This routine forces speed.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PORT_FORCED_SPEED_MODE (10, 100, 1000, or No Speed Force)
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForceSpeed
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_PORT_FORCED_SPEED_MODE  mode
+);
+
+/*******************************************************************************
+* gpcsGetForceSpeed
+*
+* DESCRIPTION:
+*        This routine retrieves Force Speed value
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_PORT_FORCED_SPEED_MODE (10, 100, 1000, or no force speed)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForceSpeed
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PORT_FORCED_SPEED_MODE   *mode
+);
+
+
+
+/* gtQosMap.c */
+
+/*******************************************************************************
+* gqosGetTagRemap
+*
+* DESCRIPTION:
+*        Gets the remapped priority value for a specific 802.1p priority on a
+*        given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*        pri   - 802.1p priority
+*
+* OUTPUTS:
+*        remappedPri - remapped Priority
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetTagRemap
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U8        pri,
+    OUT GT_U8       *remappedPri
+);
+
+/*******************************************************************************
+* gqosSetTagRemap
+*
+* DESCRIPTION:
+*        Sets the remapped priority value for a specific 802.1p priority on a
+*        given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*        pri   - 802.1p priority
+*        remappedPri - remapped Priority
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetTagRemap
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_U8        pri,
+    IN GT_U8        remappedPri
+);
+
+
+/* gtSysConfig.c */
+
+/* gtSysStatus.c */
+
+/*******************************************************************************
+* gsysGetPPUState
+*
+* DESCRIPTION:
+*        This routine get the PPU State. These two bits return
+*        the current value of the PPU.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - GT_PPU_STATE
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPPUState
+(
+    IN  GT_QD_DEV       *dev,
+    OUT GT_PPU_STATE    *mode
+);
+
+
+/* Prototypes added for 88E6093 */
+
+
+/* gtBrgFdb.c */
+
+/*******************************************************************************
+* gfdbGetLearn2All
+*
+* DESCRIPTION:
+*        When more than one Marvell device is used to form a single 'switch', it
+*        may be desirable for all devices in the 'switch' to learn any address this
+*        device learns. When this bit is set to a one all other devices in the
+*        'switch' learn the same addresses this device learns. When this bit is
+*        cleared to a zero, only the devices that actually receive frames will learn
+*        from those frames. This mode typically supports more active MAC addresses
+*        at one time as each device in the switch does not need to learn addresses
+*        it may nerver use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if Learn2All is enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetLearn2All
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gfdbSetLearn2All
+*
+* DESCRIPTION:
+*        Enable or disable Learn2All mode.
+*
+* INPUTS:
+*        mode - GT_TRUE to set Learn2All, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gfdbRemovePort
+*
+* DESCRIPTION:
+*       This routine deassociages all or unblocked addresses from a port.
+*
+* INPUTS:
+*       moveCmd - the move operation type.
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbRemovePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD     moveCmd,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gfdbRemovePortInDB
+*
+* DESCRIPTION:
+*       This routine deassociages all or unblocked addresses from a port in the
+*       particular ATU Database (DBNum).
+*
+* INPUTS:
+*       moveCmd  - the move operation type.
+*       port - the logical port number.
+*        DBNum     - ATU MAC Address Database Number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbRemovePortInDB
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD     moveCmd,
+    IN GT_LPORT        port,
+    IN GT_U32         DBNum
+);
+
+
+
+/* gtBrgStp.c */
+
+/* gtBrgVlan.c */
+
+/* gtBrgVtu.c */
+
+/* gtEvents.c */
+
+/* gtPCSCtrl.c */
+
+/*******************************************************************************
+* gpcsGetPCSLink
+*
+* DESCRIPTION:
+*        This routine retrieves Link up status in PCS
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for Comma Detected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSLink
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetFCValue
+*
+* DESCRIPTION:
+*        This routine sets Flow Control's force value
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force flow control enabled, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetFCValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetFCValue
+*
+* DESCRIPTION:
+*        This routine retrieves Flow Control Value which will be used for Forcing
+*        Flow Control enabled or disabled.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if FC Force value is one (flow control enabled)
+*                 GT_FALSE otherwise (flow control disabled)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetFCValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gpcsSetForcedFC
+*
+* DESCRIPTION:
+*        This routine forces Flow Control. If FCValue is set to one, calling this
+*        routine with GT_TRUE will force Flow Control to be enabled.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force flow control (enable or disable), GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedFC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+);
+
+/*******************************************************************************
+* gpcsGetForcedFC
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Flow Control bit
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedFC bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedFC
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+
+
+/* gtPhyCtrl.c */
+
+/*******************************************************************************
+* gprtGetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine reads phy register of the given page
+*
+* INPUTS:
+*        port     - port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*
+* OUTPUTS:
+*        data    - value of the read register
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtGetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32  port,
+    IN    GT_U32  regAddr,
+    IN    GT_U32  page,
+    OUT GT_U16* data
+);
+
+/*******************************************************************************
+* gprtSetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine writes a value to phy register of the given page
+*
+* INPUTS:
+*        port     - port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*        data    - value of the read register
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtSetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32 port,
+    IN    GT_U32 regAddr,
+    IN    GT_U32 page,
+    IN  GT_U16 data
+);
+
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtSetUseCoreTag
+*
+* DESCRIPTION:
+*       This routine set the UseCoreTag bit in Port Control Register.
+*            When this bit is cleared to a zero, ingressing frames are considered
+*            Tagged if the 16-bits following the frame's Source Address is 0x8100.
+*            When this bit is set to a one, ingressing frames are considered Tagged
+*            if the 16-bits following the frame's Source Address is equal to the
+*            CoreTag register value.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetUseCoreTag
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    force
+);
+
+/*******************************************************************************
+* gprtGetUseCoreTag
+*
+* DESCRIPTION:
+*       This routine get the Use Core Tag state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       force - GT_TRUE for using core tag register  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetUseCoreTag
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *force
+);
+
+/*******************************************************************************
+* gprtSetDiscardTagged
+*
+* DESCRIPTION:
+*        When this bit is set to a one, all non-MGMT frames that are processed as
+*        Tagged will be discarded as they enter this switch port. Priority only
+*        tagged frames (with a VID of 0x000) are considered tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to discard tagged frame, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardTagged
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetDiscardTagged
+*
+* DESCRIPTION:
+*        This routine gets DiscardTagged bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DiscardTagged bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardTagged
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gprtSetDiscardUntagged
+*
+* DESCRIPTION:
+*        When this bit is set to a one, all non-MGMT frames that are processed as
+*        Untagged will be discarded as they enter this switch port. Priority only
+*        tagged frames (with a VID of 0x000) are considered tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to discard untagged frame, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardUntagged
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetDiscardUntagged
+*
+* DESCRIPTION:
+*        This routine gets DiscardUntagged bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DiscardUntagged bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardUntagged
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gprtSetMapDA
+*
+* DESCRIPTION:
+*        When this bit is set to a one, normal switch operation will occur where a
+*        frame's DA address is used to direct the frame out the correct port.
+*        When this be is cleared to a zero, the frame will be sent out the port(s)
+*        defined by ForwardUnknown bits or the DefaultForward bits even if the DA
+*        is ound in the address database.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to use MapDA, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMapDA
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetMapDA
+*
+* DESCRIPTION:
+*        This routine gets MapDA bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if MapDA bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMapDA
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gprtSetDefaultForward
+*
+* DESCRIPTION:
+*        When this bit is set to a one, normal switch operation will occurs and
+*        multicast frames with unknown DA addresses are allowed to egress out this
+*        port (assuming the VLAN settings allow the frame to egress this port too).
+*        When this be is cleared to a zero, multicast frames with unknown DA
+*        addresses will not egress out this port.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to use DefaultForward, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDefaultForward
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetDefaultForward
+*
+* DESCRIPTION:
+*        This routine gets DefaultForward bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DefaultForward bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDefaultForward
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gprtSetEgressMonitorSource
+*
+* DESCRIPTION:
+*        When this be is cleared to a zero, normal network switching occurs.
+*        When this bit is set to a one, any frame that egresses out this port will
+*        also be sent to the EgressMonitorDest Port
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to set EgressMonitorSource, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressMonitorSource
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetEgressMonitorSource
+*
+* DESCRIPTION:
+*        This routine gets EgressMonitorSource bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if EgressMonitorSource bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressMonitorSource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gprtSetIngressMonitorSource
+*
+* DESCRIPTION:
+*        When this be is cleared to a zero, normal network switching occurs.
+*        When this bit is set to a one, any frame that egresses out this port will
+*        also be sent to the EgressMonitorDest Port
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to set EgressMonitorSource, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIngressMonitorSource
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetIngressMonitorSource
+*
+* DESCRIPTION:
+*        This routine gets IngressMonitorSource bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if IngressMonitorSource bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIngressMonitorSource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+
+/* gtPortPAV.c */
+
+/* gtPortRateCtrl.c */
+
+/* gtPortRmon.c */
+
+/*******************************************************************************
+* gstatsGetPortCounter3
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch only
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter3
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS3    counter,
+    OUT GT_U32            *statsData
+);
+
+/*******************************************************************************
+* gstatsGetPortAllCounters3
+*
+* DESCRIPTION:
+*        This routine gets all counters of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch only
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters3
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET3    *statsCounterSet
+);
+
+
+/* gtPortStat.c */
+
+/*******************************************************************************
+* gprtGetPortCtr2
+*
+* DESCRIPTION:
+*       This routine gets the port InDiscards, InFiltered, and OutFiltered counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortCtr2
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_STAT2   *ctr
+);
+
+/* gtPortStatus.c */
+
+/*******************************************************************************
+* gprtGetMGMII
+*
+* DESCRIPTION:
+*        SERDES Interface mode. When this bit is cleared to a zero and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be SGMII.  When this bit is set toa one and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be MGMII. When no PHY is detected on this port and the
+*        SERDES interface is being used, it will be configured in 1000Base-X mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE or GT_FALSE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMGMII
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtSetMGMII
+*
+* DESCRIPTION:
+*        SERDES Interface mode. When this bit is cleared to a zero and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be SGMII.  When this bit is set toa one and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be MGMII. When no PHY is detected on this port and the
+*        SERDES interface is being used, it will be configured in 1000Base-X mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE or GT_FALSE
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMGMII
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      state
+);
+
+
+/* gtQosMap.c */
+
+/* gtSysCtrl.c */
+
+/*******************************************************************************
+* gsysSetCoreTagType
+*
+* DESCRIPTION:
+*        This routine sets Ether Core Tag Type.
+*        This Ether Type is added to frames that egress the switch as Double Tagged
+*        frames. It is also the Ether Type expected during Ingress to determine if
+*        a frame is Tagged or not on ports configured as UseCoreTag mode.
+*
+* INPUTS:
+*        etherType - Core Tag Type (2 bytes)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetCoreTagType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16          etherType
+);
+
+/*******************************************************************************
+* gsysGetCoreTagType
+*
+* DESCRIPTION:
+*        This routine gets CoreTagType
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        etherType - Core Tag Type (2 bytes)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetCoreTagType
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *etherType
+);
+
+/*******************************************************************************
+* gsysSetIngressMonitorDest
+*
+* DESCRIPTION:
+*        This routine sets Ingress Monitor Destination Port. Frames that are
+*        targeted toward an Ingress Monitor Destination go out the port number
+*        indicated in these bits. This includes frames received on a Marvell Tag port
+*        with the Ingress Monitor type, and frames received on a Network port that
+*        is enabled to be the Ingress Monitor Source Port.
+*        If the Ingress Monitor Destination Port resides in this device these bits
+*        should point to the Network port where these frames are to egress. If the
+*        Ingress Monitor Destination Port resides in another device these bits
+*        should point to the Marvell Tag port in this device that is used to get
+*        to the device that contains the Ingress Monitor Destination Port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetIngressMonitorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gsysGetIngressMonitorDest
+*
+* DESCRIPTION:
+*        This routine gets Ingress Monitor Destination Port.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetIngressMonitorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+);
+
+/*******************************************************************************
+* gsysSetEgressMonitorDest
+*
+* DESCRIPTION:
+*        This routine sets Egress Monitor Destination Port. Frames that are
+*        targeted toward an Egress Monitor Destination go out the port number
+*        indicated in these bits. This includes frames received on a Marvell Tag port
+*        with the Egress Monitor type, and frames transmitted on a Network port that
+*        is enabled to be the Egress Monitor Source Port.
+*        If the Egress Monitor Destination Port resides in this device these bits
+*        should point to the Network port where these frames are to egress. If the
+*        Egress Monitor Destination Port resides in another device these bits
+*        should point to the Marvell Tag port in this device that is used to get
+*        to the device that contains the Egress Monitor Destination Port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetEgressMonitorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gsysGetEgressMonitorDest
+*
+* DESCRIPTION:
+*        This routine gets Egress Monitor Destination Port.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetEgressMonitorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+);
+
+
+/* gtSysConfig.c */
+
+/* gtSysStatus.c */
+
+
+/* functions added on rev 2.2 */
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtSetMessagePort
+*
+* DESCRIPTION:
+*        When the Learn2All bit is set to one, learning message frames are
+*        generated. These frames will be sent out all ports whose Message Port is
+*        set to one.
+*         If this feature is used, it is recommended that all Marvell Tag ports,
+*        except for the CPU's port, have their MessagePort bit set to one.
+*        Ports that are not Marvell Tag ports should not have their Message Port
+*        bit set to one.
+*
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to make this port a Message Port. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMessagePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetMessagePort
+*
+* DESCRIPTION:
+*        When the Learn2All bit is set to one, learning message frames are
+*        generated. These frames will be sent out all ports whose Message Port is
+*        set to one.
+*         If this feature is used, it is recommended that all Marvell Tag ports,
+*        except for the CPU's port, have their MessagePort bit set to one.
+*        Ports that are not Marvell Tag ports should not have their Message Port
+*        bit set to one.
+*
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to make this port a Message Port. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMessagePort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+);
+
+
+/*******************************************************************************
+* gprtSetTrunkPort
+*
+* DESCRIPTION:
+*        This function enables/disables and sets the trunk ID.
+*
+* INPUTS:
+*        port - the logical port number.
+*        en - GT_TRUE to make the port be a member of a trunk with the given trunkId.
+*             GT_FALSE, otherwise.
+*        trunkId - valid ID is 0 ~ 15.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId is neither valid nor INVALID_TRUNK_ID
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetTrunkPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL         en,
+    IN GT_U32        trunkId
+);
+
+
+/*******************************************************************************
+* gprtGetTrunkPort
+*
+* DESCRIPTION:
+*        This function returns trunk state of the port.
+*        When trunk is disabled, trunkId field won't have valid value.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        en - GT_TRUE, if the port is a member of a trunk,
+*             GT_FALSE, otherwise.
+*        trunkId - 0 ~ 15, valid only if en is GT_TRUE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTrunkPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    OUT GT_BOOL     *en,
+    OUT GT_U32        *trunkId
+);
+
+/*******************************************************************************
+* gprtGetGlobal2Reg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global 2 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobal2Reg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+
+/*******************************************************************************
+* gprtSetGlobal2Reg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global2 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobal2Reg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+);
+
+/*******************************************************************************
+* gprtGetGlobal3Reg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global 3 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobal3Reg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+);
+
+/*******************************************************************************
+* gprtSetGlobal3Reg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global3 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobal3Reg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+);
+
+/* gtSysCtrl.c */
+/*******************************************************************************
+* gsysSetARPDest
+*
+* DESCRIPTION:
+*        This routine sets ARP Monitor Destination Port. Tagged or untagged
+*        frames that ingress Network ports with the Broadcast Destination Address
+*        and with an Ethertype of 0x0806 are mirrored to this port. The ARPDest
+*        should point to the port that directs these frames to the switch's CPU
+*        that will process ARPs. This target port should be a Marvell Tag port so
+*        that frames will egress with a To_CPU Marvell Tag with a CPU Code of ARP.
+*        To_CPU Marvell Tag frames with a CPU Code off ARP that ingress a Marvell
+*        Tag port will be sent to the port number definded in ARPDest.
+*
+*        If ARPDest =  0xF, ARP Monitoring is disabled and ingressing To_CPU ARP
+*        frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gsysGetARPDest
+*
+* DESCRIPTION:
+*        This routine gets ARP Monitor Destination Port. Tagged or untagged
+*        frames that ingress Network ports with the Broadcast Destination Address
+*        and with an Ethertype of 0x0806 are mirrored to this port. The ARPDest
+*        should point to the port that directs these frames to the switch's CPU
+*        that will process ARPs. This target port should be a Marvell Tag port so
+*        that frames will egress with a To_CPU Marvell Tag with a CPU Code of ARP.
+*        To_CPU Marvell Tag frames with a CPU Code off ARP that ingress a Marvell
+*        Tag port will be sent to the port number definded in ARPDest.
+*
+*        If ARPDest =  0xF, ARP Monitoring is disabled and ingressing To_CPU ARP
+*        frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+);
+
+/*******************************************************************************
+* gsysSetRsvd2CpuEnables
+*
+* DESCRIPTION:
+*        Reserved DA Enables. When the function, gsysSetRsvd2Cpu, is called with
+*        en = GT_TRUE, the 16 reserved multicast DA addresses, whose bit in this
+*        enBits(or register) are also set to a one, are treated as MGMT frames.
+*        All the reserved DA's take the form 01:80:C2:00:00:0x. When x = 0x0,
+*        bit 0 of this register is tested. When x = 0x2, bit 2 of this field is
+*        tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2CpuEnables
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        enBits
+);
+
+/*******************************************************************************
+* gsysGetRsvd2CpuEnables
+*
+* DESCRIPTION:
+*        Reserved DA Enables. When the function, gsysSetRsvd2Cpu, is called with
+*        en = GT_TRUE, the 16 reserved multicast DA addresses, whose bit in this
+*        enBits(or register) are also set to a one, are treated as MGMT frames.
+*        All the reserved DA's take the form 01:80:C2:00:00:0x. When x = 0x0,
+*        bit 0 of this register is tested. When x = 0x2, bit 2 of this field is
+*        tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2CpuEnables
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *enBits
+);
+
+/*******************************************************************************
+* gsysSetRsvd2Cpu
+*
+* DESCRIPTION:
+*        When the Rsvd2Cpu is set to a one(GT_TRUE), frames with a Destination
+*        Address in the range 01:80:C2:00:00:0x, regardless of their VLAN
+*        membership, will be considered MGMT frames and sent to the port's CPU
+*        Port as long as the associated Rsvd2CpuEnable bit (gsysSetRsvd2CpuEnable
+*        function) for the frames's DA is also set to a one.
+*
+* INPUTS:
+*        en - GT_TRUE if Rsvd2Cpu is set. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2Cpu
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetRsvd2Cpu
+*
+* DESCRIPTION:
+*        When the Rsvd2Cpu is set to a one(GT_TRUE), frames with a Destination
+*        Address in the range 01:80:C2:00:00:0x, regardless of their VLAN
+*        membership, will be considered MGMT frames and sent to the port's CPU
+*        Port as long as the associated Rsvd2CpuEnable bit (gsysSetRsvd2CpuEnable
+*        function) for the frames's DA is also set to a one.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Rsvd2Cpu is set. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2Cpu
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+/*******************************************************************************
+* gsysSetLearn2All
+*
+* DESCRIPTION:
+*	enable the Learn to All devices in a Switch, this must be enabled for
+*	hardware learn limiting is enabled on any port on any device
+*
+* INPUTS:
+*        en - GT_TRUE if Learn2All is set. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+/*******************************************************************************
+* gsysGetLearn2All
+*
+* DESCRIPTION:
+*	returns the state of Learn to All devices in a Switch flag
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Learn2All is set. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL       *en
+);
+/*******************************************************************************
+* gsysSetMGMTPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on Rsvd2CPU MGMT frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetMGMTPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+);
+
+/*******************************************************************************
+* gsysGetMGMTPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on Rsvd2CPU MGMT frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetMGMTPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+);
+
+/*******************************************************************************
+* gsysSetUseDoubleTagData
+*
+* DESCRIPTION:
+*        This bit is used to determine if Double Tag data that is removed from a
+*        Double Tag frame is used or ignored when making switching decisions on
+*        the frame.
+*
+* INPUTS:
+*        en - GT_TRUE to use removed tag data, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetUseDoubleTagData
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetUseDoubleTagData
+*
+* DESCRIPTION:
+*        This bit is used to determine if Double Tag data that is removed from a
+*        Double Tag frame is used or ignored when making switching decisions on
+*        the frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if removed tag data is used, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetUseDoubleTagData
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetPreventLoops
+*
+* DESCRIPTION:
+*        When a Marvell Tag port receives a Forward Marvell Tag whose Src_Dev
+*        field equals this device's Device Number, the following action will be
+*        taken depending upon the value of this bit.
+*        GT_TRUE (1) - The frame will be discarded.
+*        GT_FALSE(0) - The frame will be prevented from going out its original
+*                        source port as defined by the frame's Src_Port field.
+*
+* INPUTS:
+*        en - GT_TRUE to discard the frame as described above, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetPreventLoops
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetPreventLoops
+*
+* DESCRIPTION:
+*        When a Marvell Tag port receives a Forward Marvell Tag whose Src_Dev
+*        field equals this device's Device Number, the following action will be
+*        taken depending upon the value of this bit.
+*        GT_TRUE (1) - The frame will be discarded.
+*        GT_FALSE(0) - The frame will be prevented from going out its original
+*                        source port as defined by the frame's Src_Port field.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to discard the frame as described above, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPreventLoops
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetFlowControlMessage
+*
+* DESCRIPTION:
+*        When this bit is set to one, Marvell Tag Flow Control messages will be
+*        generated when an output queue becomes congested and received Marvell Tag
+*        Flow Control messages will pause MACs inside this device. When this bit
+*        is cleared to a zero, Marvell Tag Flow Control messages will not be
+*        generated and any received will be ignored at the target MAC.
+*
+* INPUTS:
+*        en - GT_TRUE to use Marvell Tag Flow Control message, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFlowControlMessage
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetFlowControlMessage
+*
+* DESCRIPTION:
+*        When this bit is set to one, Marvell Tag Flow Control messages will be
+*        generated when an output queue becomes congested and received Marvell Tag
+*        Flow Control messages will pause MACs inside this device. When this bit
+*        is cleared to a zero, Marvell Tag Flow Control messages will not be
+*        generated and any received will be ignored at the target MAC.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use Marvell Tag Flow Control message, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFlowControlMessage
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetForceFlowControlPri
+*
+* DESCRIPTION:
+*        When this bit is set to a one the PRI[2:0] bits of generated Marvell Tag
+*        Flow Control frames will be set to the value of the FC Pri bits (set by
+*        gsysSetFCPri function call). When this bit is cleared to a zero, generated
+*        Marvell Tag Flow Control frames will retain the PRI[2:0] bits from the
+*        frames that caused the congestion. This bit will have no effect if the
+*        FlowControlMessage bit(gsysSetFlowControlMessage function call) is
+*        cleared to a zero.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceFlowControlPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetForceFlowControlPri
+*
+* DESCRIPTION:
+*        When this bit is set to a one the PRI[2:0] bits of generated Marvell Tag
+*        Flow Control frames will be set to the value of the FC Pri bits (set by
+*        gsysSetFCPri function call). When this bit is cleared to a zero, generated
+*        Marvell Tag Flow Control frames will retain the PRI[2:0] bits from the
+*        frames that caused the congestion. This bit will have no effect if the
+*        FlowControlMessage bit(gsysSetFlowControlMessage function call) is
+*        cleared to a zero.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceFlowControlPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetFCPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on generated Marvell Tag Flow
+*        Control frames if the ForceFlowControlPri bit(gsysSetForceFlowControlPri)
+*        is set to a one.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFCPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+);
+
+/*******************************************************************************
+* gsysGetFCPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on generated Marvell Tag Flow
+*        Control frames if the ForceFlowControlPri bit(gsysSetForceFlowControlPri)
+*        is set to a one.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFCPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+);
+
+/*******************************************************************************
+* gsysSetFlowCtrlDelay
+*
+* DESCRIPTION:
+*        This function sets Flow control delay time for 10Mbps, 100Mbps, and
+*        1000Mbps.
+*
+* INPUTS:
+*        sp - PORT_SPEED_10_MBPS, PORT_SPEED_100_MBPS, or PORT_SPEED_1000_MBPS
+*        delayTime - actual delay time will be (this value x 2.048uS).
+*                    the value cannot exceed 0x1FFF (or 8191 in decimal).
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetFlowCtrlDelay
+(
+    IN GT_QD_DEV            *dev,
+    IN GT_PORT_SPEED_MODE    sp,
+    IN GT_U32                delayTime
+);
+
+/*******************************************************************************
+* gsysGetFlowCtrlDelay
+*
+* DESCRIPTION:
+*        This function retrieves Flow control delay time for 10Mbps, 100Mbps, and
+*        1000Mbps.
+*
+* INPUTS:
+*        sp - PORT_SPEED_10_MBPS, PORT_SPEED_100_MBPS, or PORT_SPEED_1000_MBPS
+*
+* OUTPUTS:
+*        delayTime - actual delay time will be (this value x 2.048uS).
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFlowCtrlDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_PORT_SPEED_MODE    sp,
+    OUT GT_U32        *delayTime
+);
+
+/*******************************************************************************
+* gsysSetDevRoutingTable
+*
+* DESCRIPTION:
+*        This function sets Device to Port mapping (which device is connected to
+*        which port of this device).
+*
+* INPUTS:
+*        devNum - target device number.
+*        portNum - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if devNum >= 32 or port >= total number of ports.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDevRoutingTable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          devNum,
+    IN GT_LPORT     port
+);
+
+/*******************************************************************************
+* gsysGetDevRoutingTable
+*
+* DESCRIPTION:
+*        This function gets Device to Port mapping (which device is connected to
+*        which port of this device).
+*
+* INPUTS:
+*        devNum - target device number.
+*
+* OUTPUTS:
+*        portNum - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if devNum >= 32
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDevRoutingTable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         devNum,
+    OUT GT_LPORT     *port
+);
+
+/*******************************************************************************
+* gsysSetTrunkMaskTable
+*
+* DESCRIPTION:
+*        This function sets Trunk mask vector table for load balancing.
+*        This vector will be AND'ed with where the frame was originally egressed to.
+*        To insure all trunks are load balanced correctly, the data in this table
+*        needs to be correctly configured.
+*
+* INPUTS:
+*        trunkNum - one of the eight Trunk mask vectors.
+*        trunkMask - Trunk Mask bits. Bit 0 controls trunk masking for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkNum > 0x7 or trunMask > 0x7FF (or port vector).
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetTrunkMaskTable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          trunkNum,
+    IN GT_U32        trunkMask
+);
+
+/*******************************************************************************
+* gsysGetTrunkMaskTable
+*
+* DESCRIPTION:
+*        This function sets Trunk mask vector table for load balancing.
+*        This vector will be AND'ed with where the frame was originally egressed to.
+*        To insure all trunks are load balanced correctly, the data in this table
+*        needs to be correctly configured.
+*
+* INPUTS:
+*        trunkNum - one of the eight Trunk mask vectors.
+*
+* OUTPUTS:
+*        trunkMask - Trunk Mask bits. Bit 0 controls trunk masking for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkNum > 0x7.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetTrunkMaskTable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         trunkNum,
+    OUT GT_U32        *trunkMask
+);
+
+/*******************************************************************************
+* gsysSetHashTrunk
+*
+* DESCRIPTION:
+*        Hash DA & SA for TrunkMask selection. Trunk load balancing is accomplished
+*        by using the frame's DA and SA fields to access one of eight Trunk Masks.
+*        When this bit is set to a one, the hashed computed for address table
+*        lookups is used for the TrunkMask selection. When this bit is cleared to
+*        a zero the lower 3 bits of the frame's DA and SA are XOR'ed together to
+*        select the TrunkMask to use.
+*
+* INPUTS:
+*        en - GT_TRUE to use lookup table, GT_FALSE to use XOR.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetHashTrunk
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetHashTrunk
+*
+* DESCRIPTION:
+*        Hash DA & SA for TrunkMask selection. Trunk load balancing is accomplished
+*        by using the frame's DA and SA fields to access one of eight Trunk Masks.
+*        When this bit is set to a one, the hashed computed for address table
+*        lookups is used for the TrunkMask selection. When this bit is cleared to
+*        a zero the lower 3 bits of the frame's DA and SA are XOR'ed together to
+*        select the TrunkMask to use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use lookup table, GT_FALSE to use XOR.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetHashTrunk
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetTrunkRouting
+*
+* DESCRIPTION:
+*        This function sets routing information for the given Trunk ID.
+*
+* INPUTS:
+*        trunkId - Trunk ID.
+*        trunkRoute - Trunk route bits. Bit 0 controls trunk routing for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId > 0xF or trunkRoute > 0x7FF(or port vector).
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetTrunkRouting
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          trunkId,
+    IN GT_U32        trunkRoute
+);
+
+/*******************************************************************************
+* gsysGetTrunkRouting
+*
+* DESCRIPTION:
+*        This function retrieves routing information for the given Trunk ID.
+*
+* INPUTS:
+*        trunkId - Trunk ID.
+*
+* OUTPUTS:
+*        trunkRoute - Trunk route bits. Bit 0 controls trunk routing for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId > 0xF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetTrunkRouting
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         trunkId,
+    OUT GT_U32        *trunkRoute
+);
+
+
+
+/* Prototype added for 88E6095 Rev 1 or Rev 2 */
+
+/* gtPortCtrl.c */
+/*******************************************************************************
+* gprtGetDiscardBCastMode
+*
+* DESCRIPTION:
+*       This routine gets the Discard Broadcast Mode. If the mode is enabled,
+*        all the broadcast frames to the given port will be discarded.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        en - GT_TRUE, if enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardBCastMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gprtSetDiscardBCastMode
+*
+* DESCRIPTION:
+*       This routine sets the Discard Broadcast mode.
+*        If the mode is enabled, all the broadcast frames to the given port will
+*        be discarded.
+*
+* INPUTS:
+*       port - logical port number
+*        en - GT_TRUE, to enable the mode,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardBCastMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      en
+);
+
+/*******************************************************************************
+* gprtGetFCOnRateLimitMode
+*
+* DESCRIPTION:
+*       This routine returns mode that tells if ingress rate limiting uses Flow
+*        Control. When this mode is enabled and the port receives frames over the
+*        limit, Ingress Rate Limiting will be performed by stalling the
+*        link partner using flow control, instead of discarding frames.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        en - GT_TRUE, if the mode is enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFCOnRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gprtSetFCOnRateLimitMode
+*
+* DESCRIPTION:
+*       This routine sets the mode that tells if ingress rate limiting uses Flow
+*        Control. When this mode is enabled and the port receives frames over the
+*        limit, Ingress Rate Limiting will be performed by stalling the
+*        link partner using flow control, instead of discarding frames.
+*
+* INPUTS:
+*       port - logical port number
+*        en - GT_TRUE, to enable the mode,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetFCOnRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      en
+);
+
+
+/* gtPortRateCtrl.c */
+
+/*******************************************************************************
+* grcSetBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*       bsize    - burst size.
+*       rate    - ingress data rate limit. These frames will be discarded after
+*                the ingress rate selected is reached or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*                                Minimum rate for Burst Size 24K byte is 128Kbps
+*                                Minimum rate for Burst Size 48K byte is 256Kbps
+*                                Minimum rate for Burst Size 96K byte is 512Kbps
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetBurstRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_BURST_SIZE   bsize,
+    IN GT_BURST_RATE   rate
+);
+
+/*******************************************************************************
+* grcGetBurstRate
+*
+* DESCRIPTION:
+*       This routine retrieves the port's ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       bsize    - burst size.
+*       rate    - ingress data rate limit. These frames will be discarded after
+*                the ingress rate selected is reached or exceeded.
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetBurstRate
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BURST_SIZE   *bsize,
+    OUT GT_BURST_RATE   *rate
+);
+
+
+/*******************************************************************************
+* grcSetTCPBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's TCP/IP ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*       rate    - ingress data rate limit for TCP/IP packets. These frames will
+*                be discarded after the ingress rate selected is reached or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*                                Valid rate is GT_BURST_NO_LIMIT, or between
+*                                64Kbps and 1500Kbps.
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetTCPBurstRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_BURST_RATE   rate
+);
+
+
+/*******************************************************************************
+* grcGetTCPBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's TCP/IP ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       rate    - ingress data rate limit for TCP/IP packets. These frames will
+*                be discarded after the ingress rate selected is reached or exceeded.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_VALUE        - register value is not known
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetTCPBurstRate
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BURST_RATE   *rate
+);
+
+
+/* gtSysCtrl.c */
+/*******************************************************************************
+* gsysSetRateLimitMode
+*
+* DESCRIPTION:
+*        Ingress Rate Limiting can be either Priority based or Burst Size based.
+*        This routine sets which mode to use.
+*
+* INPUTS:
+*        mode - either GT_RATE_PRI_BASE or GT_RATE_BURST_BASE
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid mode is used.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRateLimitMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_INGRESS_RATE_MODE mode
+);
+
+/*******************************************************************************
+* gsysGetRateLimitMode
+*
+* DESCRIPTION:
+*        Ingress Rate Limiting can be either Priority based or Burst Size based.
+*        This routine gets which mode is being used.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - either GT_RATE_PRI_BASE or GT_RATE_BURST_BASE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_INGRESS_RATE_MODE *mode
+);
+
+/*******************************************************************************
+* gsysSetAgeInt
+*
+* DESCRIPTION:
+*        Enable/Disable Age Refresh Interrupt. If CPU Directed Learning is being
+*        used (gprtSetLockedPort), it may be desirable to know when an address is
+*        still being used before it totally ages out of the switch. This can be
+*        accomplished by enabling Age Refresh Interrupt (or ATU Age Violation Int).
+*        An ATU Age Violation looks identical to and reported the same as an ATU
+*        Miss Violation. The only difference is when this reported. Normal ATU Miss
+*        Violation only occur if a new SA arrives at a LockedPort. The Age version
+*        of the ATU Miss Violation occurs if an SA arrives at a LockedPort, where
+*        the address is contained in the ATU's database, but where its EntryState
+*        is less than 0x4 (i.e., it has aged more than 1/2 way).
+*        GT_ATU_PROB Interrupt should be enabled for this interrupt to occur.
+*        Refer to eventSetActive routine to enable GT_ATU_PROB.
+*
+*
+* INPUTS:
+*        en - GT_TRUE, to enable,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetAgeInt
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetAgeInt
+*
+* DESCRIPTION:
+*        Get state of Age Refresh Interrupt mode. If CPU Directed Learning is being
+*        used (gprtSetLockedPort), it may be desirable to know when an address is
+*        still being used before it totally ages out of the switch. This can be
+*        accomplished by enabling Age Refresh Interrupt (or ATU Age Violation Int).
+*        An ATU Age Violation looks identical to and reported the same as an ATU
+*        Miss Violation. The only difference is when this reported. Normal ATU Miss
+*        Violation only occur if a new SA arrives at a LockedPort. The Age version
+*        of the ATU Miss Violation occurs if an SA arrives at a LockedPort, where
+*        the address is contained in the ATU's database, but where its EntryState
+*        is less than 0x4 (i.e., it has aged more than 1/2 way).
+*        GT_ATU_PROB Interrupt should be enabled for this interrupt to occur.
+*        Refer to eventSetActive routine to enable GT_ATU_PROB.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE, if enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetAgeInt
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+);
+
+
+/* For Zephyr */
+
+/* gtPhyCtrl.c */
+/*******************************************************************************
+* gprtGetPhyLinkStatus
+*
+* DESCRIPTION:
+*       This routine retrieves the Link status.
+*
+* INPUTS:
+*         port     - The logical port number
+*
+* OUTPUTS:
+*       linkStatus - GT_FALSE if link is not established,
+*                     GT_TRUE if link is established.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyLinkStatus
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *linkStatus
+);
+
+
+/*******************************************************************************
+* gprtSetPktGenEnable
+*
+* DESCRIPTION:
+*       This routine enables or disables Packet Generator.
+*       Link should be established first prior to enabling the packet generator,
+*       and generator will generate packets at the speed of the established link.
+*        When enables packet generator, the following information should be
+*       provided:
+*           Payload Type:  either Random or 5AA55AA5
+*           Packet Length: either 64 or 1514 bytes
+*           Error Packet:  either Error packet or normal packet
+*
+* INPUTS:
+*         port     - The logical port number
+*       en      - GT_TRUE to enable, GT_FALSE to disable
+*       pktInfo - packet information(GT_PG structure pointer), if en is GT_TRUE.
+*                 ignored, if en is GT_FALSE
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPktGenEnable
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   en,
+    IN GT_PG     *pktInfo
+);
+
+
+
+/* gtSysCtrl.c */
+
+/*******************************************************************************
+* gsysSetForceSnoopPri
+*
+* DESCRIPTION:
+*        Force Snooping Priority. The priority on IGMP or MLD Snoop frames are
+*        set to the SnoopPri value (gsysSetSnoopPri API) when Force Snooping
+*       Priority is enabled. When it's disabled, the priority on these frames
+*        is not modified.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceSnoopPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetForceSnoopPri
+*
+* DESCRIPTION:
+*        Force Snooping Priority. The priority on IGMP or MLD Snoop frames are
+*        set to the SnoopPri value (gsysSetSnoopPri API) when Force Snooping
+*       Priority is enabled. When it's disabled, the priority on these frames
+*        is not modified.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceSnoopPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+
+/*******************************************************************************
+* gsysSetSnoopPri
+*
+* DESCRIPTION:
+*        Snoop Priority. When ForceSnoopPri (gsysSetForceSnoopPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU Snoop frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on IGMP/MLD snoop frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetSnoopPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+);
+
+
+/*******************************************************************************
+* gsysGetSnoopPri
+*
+* DESCRIPTION:
+*        Snoop Priority. When ForceSnoopPri (gsysSetForceSnoopPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU Snoop frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on IGMP/MLD snoop frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSnoopPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+);
+
+
+/*******************************************************************************
+* gsysSetForceARPPri
+*
+* DESCRIPTION:
+*        Force ARP Priority. The priority on ARP frames are set to the ARPPri
+*       value (gsysSetARPPri API) when Force ARP Priority is enabled. When it's
+*       disabled, the priority on these frames is not modified.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceARPPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetForceARPPri
+*
+* DESCRIPTION:
+*        Force ARP Priority. The priority on ARP frames are set to the ARPPri
+*       value (gsysSetARPPri API) when Force ARP Priority is enabled. When it's
+*       disabled, the priority on these frames is not modified.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceARPPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+
+/*******************************************************************************
+* gsysSetARPPri
+*
+* DESCRIPTION:
+*        ARP Priority. When ForceARPPri (gsysSetForceARPPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU ARP frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on ARP frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+);
+
+
+/*******************************************************************************
+* gsysGetARPPri
+*
+* DESCRIPTION:
+*        ARP Priority. When ForceARPPri (gsysSetForceARPPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU ARP frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on ARP frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+);
+
+
+/* added for 88E6065 */
+
+/* gtBrgVlan.c */
+
+/********************************************************************
+* gvlnSetForceMap
+*
+* DESCRIPTION:
+*       This routine enables/disables Force Map feature.
+*        When Force Map feature is enabled, all received frames will be
+*        considered MGMT and they are mapped to the port or ports defined
+*        in the VLAN Table overriding the mapping from the address database.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode    - GT_TRUE, to enable force map feature
+*                 GT_FAULSE, otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetForceMap
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      mode
+);
+
+
+/********************************************************************
+* gvlnGetForceMap
+*
+* DESCRIPTION:
+*       This routine checks if Force Map feature is enabled.
+*        When Force Map feature is enabled, all received frames will be
+*        considered MGMT and they are mapped to the port or ports defined
+*        in the VLAN Table overriding the mapping from the address database.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*
+* OUTPUTS:
+*       mode    - GT_TRUE, to enable force map feature
+*                 GT_FAULSE, otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetForceMap
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+);
+
+/* gtEvents.c */
+
+/*******************************************************************************
+* geventSetAgeIntEn
+*
+* DESCRIPTION:
+*        This routine enables/disables Age Interrupt for a port.
+*        When it's enabled, ATU Age Violation interrupts from this port are enabled.
+*        An Age Violation will occur anytime a port is Locked(gprtSetLockedPort)
+*        and the ingressing frame's SA is contained in the ATU as a non-Static
+*        entry with a EntryState less than 0x4.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetAgeIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* geventGetAgeIntEn
+*
+* DESCRIPTION:
+*        This routine gets Age Interrupt Enable for the port.
+*        When it's enabled, ATU Age Violation interrupts from this port are enabled.
+*        An Age Violation will occur anytime a port is Locked(gprtSetLockedPort)
+*        and the ingressing frame's SA is contained in the ATU as a non-Static
+*        entry with a EntryState less than 0x4.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetAgeIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+
+/* gtPIRL.c */
+
+/*******************************************************************************
+* gpirlActivate
+*
+* DESCRIPTION:
+*       This routine activates Ingress Rate Limiting for the given ports by
+*        initializing a resource bucket, assigning ports, and configuring
+*        Bucket Parameters.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*       portVec  - the list of ports that share the bucket.
+*        pirlData - PIRL resource parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlActivate
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec,
+    IN  GT_PIRL_DATA    *pirlData
+);
+
+/*******************************************************************************
+* gpirlDeactivate
+*
+* DESCRIPTION:
+*       This routine deactivates Ingress Rate Limiting for the given bucket.
+*        It simply removes every ports from the Ingress Rate Resource.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be deactivated
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlDeactivate
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit
+);
+
+/*******************************************************************************
+* gpirlUpdateParam
+*
+* DESCRIPTION:
+*       This routine updates IRL Parameter.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11)
+*        pirlData - PIRL resource parameters
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlUpdateParam
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_PIRL_DATA    *pirlData
+);
+
+/*******************************************************************************
+* gpirlReadParam
+*
+* DESCRIPTION:
+*       This routine retrieves IRL Parameter.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlReadParam
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_PIRL_DATA    *pirlData
+);
+
+/*******************************************************************************
+* gpirlUpdatePortVec
+*
+* DESCRIPTION:
+*       This routine updates port list that share the bucket.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*       portVec  - the list of ports that share the bucket.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlUpdatePortVec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec
+);
+
+/*******************************************************************************
+* gpirlReadPortVec
+*
+* DESCRIPTION:
+*       This routine retrieves port list that share the bucket.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*
+* OUTPUTS:
+*       portVec  - the list of ports that share the bucket.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlReadPortVec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_U32        *portVec
+);
+
+/*******************************************************************************
+* grcGetPirlFcMode
+*
+* DESCRIPTION:
+*       This routine gets Port Ingress Rate Limit Flow Control mode.
+*        When EBSLimitAction is programmed to generate a flow control message,
+*        the deassertion of flow control is controlled by this mode.
+*            GT_PIRL_FC_DEASSERT_EMPTY:
+*                De-assert when the ingress rate resource has become empty
+*            GT_PIRL_FC_DEASSERT_CBS_LIMIT
+*                De-assert when the ingress rate resource has enough room as
+*                specified by the CBSLimit.
+*        Please refer to GT_PIRL_RESOURCE structure for EBSLimitAction and
+*        CBSLimit.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        mode - GT_PIRL_FC_DEASSERT enum type
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetPirlFcMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PIRL_FC_DEASSERT        *mode
+);
+
+/*******************************************************************************
+* gpirlGetIngressRateResource
+*
+* DESCRIPTION:
+*       This routine gets Ingress Rate Limiting Resources assigned to the port.
+*        This vector is used to attach specific counter resources to the physical
+*        port. And the same counter resource can be attached to more than one port.
+*
+* INPUTS:
+*       port   - logical port number
+*
+* OUTPUTS:
+*        resVec - resource vector (bit 0 for irl unit 0, bit 1 for irl unit 1, etc.)
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirlGetIngressRateResource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U32        *resVec
+);
+
+
+
+/* gtPortStatus.c */
+
+/*******************************************************************************
+* gprtGetPxMode
+*
+* DESCRIPTION:
+*        This routine retrives 4 bits of Px_MODE Configuration value.
+*        If speed and duplex modes are forced, the returned mode value would be
+*        different from the configuration pin values.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - Px_MODE configuration value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPxMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_U32      *mode
+);
+
+/*******************************************************************************
+* gprtGetMiiInterface
+*
+* DESCRIPTION:
+*        This routine retrives Mii Interface Mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Mii Interface is enabled,
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMiiInterface
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetFdFlowDis
+*
+* DESCRIPTION:
+*        This routine retrives the read time value of the Full Duplex Flow Disable.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Full Duplex Flow Disable.
+*                   GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFdFlowDis
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetHdFlowDis
+*
+* DESCRIPTION:
+*        This routine retrives the read time value of the Half Duplex Flow Disable.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Half Duplex Flow Disable.
+*                   GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHdFlowDis
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+);
+
+/*******************************************************************************
+* gprtGetOutQSize
+*
+* DESCRIPTION:
+*        This routine gets egress queue size counter value.
+*        This counter reflects the current number of Egress buffers switched to
+*        this port. This is the total number of buffers across all four priority
+*        queues.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        count - egress queue size counter value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetOutQSize
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *count
+);
+
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtSetSAFiltering
+*
+* DESCRIPTION:
+*        This routine set the Source Address(SA) fitering method.
+*            GT_SA_FILTERING_DISABLE :
+*                no frame will be filtered.
+*            GT_SA_DROP_ON_LOCK :
+*                discard if SA field is not in the ATU's address database.
+*            GT_SA_DROP_ON_UNLOC :
+*                discard if SA field is in the ATU's address database as Static
+*                entry with a PortVec of all zeros.
+*            GT_SA_DROP_TO_CPU :
+*                Ingressing frames will be mapped to the CPU Port if their SA
+*                field is in the ATU's address database as Static entry with a
+*                PortVec of all zeros. Otherwise, the frames will be discarded
+*                if their SA field is not in the ATU's address database or if this
+*                port's bit is not set in the PortVec bits for the frame's SA.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_SA_FILTERING structure
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtSetSAFiltering
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_SA_FILTERING    mode
+);
+
+/*******************************************************************************
+* gprtGetSAFiltering
+*
+* DESCRIPTION:
+*        This routine gets the Source Address(SA) fitering method.
+*            GT_SA_FILTERING_DISABLE :
+*                no frame will be filtered.
+*            GT_SA_DROP_ON_LOCK :
+*                discard if SA field is not in the ATU's address database.
+*            GT_SA_DROP_ON_UNLOC :
+*                discard if SA field is in the ATU's address database as Static
+*                entry with a PortVec of all zeros.
+*            GT_SA_DROP_TO_CPU :
+*                Ingressing frames will be mapped to the CPU Port if their SA
+*                field is in the ATU's address database as Static entry with a
+*                PortVec of all zeros. Otherwise, the frames will be discarded
+*                if their SA field is not in the ATU's address database or if this
+*                port's bit is not set in the PortVec bits for the frame's SA.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_SA_FILTERING structure
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtGetSAFiltering
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_SA_FILTERING    *mode
+);
+
+
+/*******************************************************************************
+* gprtSetARPtoCPU
+*
+* DESCRIPTION:
+*        When ARPtoCPU is set to GT_TRUE, ARP frames are mapped to the CPU port.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE, to map ARP frames to CPU Port,
+*               GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtSetARPtoCPU
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    mode
+);
+
+
+/*******************************************************************************
+* gprtGetARPtoCPU
+*
+* DESCRIPTION:
+*        When ARPtoCPU is set to GT_TRUE, ARP frames are mapped to the CPU port.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE, to map ARP frames to CPU Port,
+*               GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtGetARPtoCPU
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *mode
+);
+
+/*******************************************************************************
+* gprtSetEgressFlood
+*
+* DESCRIPTION:
+*       This routine set Egress Flooding Mode.
+*        Frames with unknown DA (Destination Address that is not in ATU database)
+*        generally flood out all the ports. This mode can be used to prevent
+*        those frames from egressing this port as follows:
+*            GT_BLOCK_EGRESS_UNKNOWN
+*                do not egress frame with unknown DA (both unicast and multicast)
+*            GT_BLOCK_EGRESS_UNKNOWN_MULTICAST
+*                do not egress frame with unknown multicast DA
+*            GT_BLOCK_EGRESS_UNKNOWN_UNICAST
+*                do not egress frame with unknown unicast DA
+*            GT_BLOCK_EGRESS_NONE
+*                egress all frames with unknown DA
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_EGRESS_FLOOD structure
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressFlood
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_EGRESS_FLOOD      mode
+);
+
+/*******************************************************************************
+* gprtGetEgressFlood
+*
+* DESCRIPTION:
+*       This routine gets Egress Flooding Mode.
+*        Frames with unknown DA (Destination Address that is not in ATU database)
+*        generally flood out all the ports. This mode can be used to prevent
+*        those frames from egressing this port as follows:
+*            GT_BLOCK_EGRESS_UNKNOWN
+*                do not egress frame with unknown DA (both unicast and multicast)
+*            GT_BLOCK_EGRESS_UNKNOWN_MULTICAST
+*                do not egress frame with unknown multicast DA
+*            GT_BLOCK_EGRESS_UNKNOWN_UNICAST
+*                do not egress frame with unknown unicast DA
+*            GT_BLOCK_EGRESS_NONE
+*                egress all frames with unknown DA
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_EGRESS_FLOOD structure
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressFlood
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_EGRESS_FLOOD      *mode
+);
+
+/*******************************************************************************
+* gprtSetPortSched
+*
+* DESCRIPTION:
+*        This routine sets Port Scheduling Mode.
+*        When usePortSched is enablied, this mode is used to select the Queue
+*        controller's scheduling on the port as follows:
+*            GT_PORT_SCHED_WEIGHTED_RRB - use 8,4,2,1 weighted fair scheduling
+*            GT_PORT_SCHED_STRICT_PRI - use a strict priority scheme
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_PORT_SCHED_MODE enum type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortSched
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PORT_SCHED_MODE        mode
+);
+
+/*******************************************************************************
+* gprtGetPortSched
+*
+* DESCRIPTION:
+*        This routine gets Port Scheduling Mode.
+*        When usePortSched is enablied, this mode is used to select the Queue
+*        controller's scheduling on the port as follows:
+*            GT_PORT_SCHED_WEIGHTED_RRB - use 8,4,2,1 weighted fair scheduling
+*            GT_PORT_SCHED_STRICT_PRI - use a strict priority scheme
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_PORT_SCHED_MODE enum type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortSched
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PORT_SCHED_MODE        *mode
+);
+
+
+/*******************************************************************************
+* gprtSetProviderTag
+*
+* DESCRIPTION:
+*        This routine sets Provider Tag which indicates the provider tag (Ether
+*        Type) value that needs to be matched to in ingress to determine if a
+*        frame is Provider tagged or not.
+*
+* INPUTS:
+*        port - the logical port number
+*        tag  - Provider Tag (Ether Type)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetProviderTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        tag
+);
+
+/*******************************************************************************
+* gprtGetProviderTag
+*
+* DESCRIPTION:
+*        This routine gets Provider Tag which indicates the provider tag (Ether
+*        Type) value that needs to be matched to in ingress to determine if a
+*        frame is Provider tagged or not.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        tag  - Provider Tag (Ether Type)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetProviderTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *tag
+);
+
+
+
+/* gtPortRateCtrl.c */
+
+/*******************************************************************************
+* grcSetVidNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables VID None Rate Limit (NRL).
+*        When VID NRL is enabled and the determined VID of a frame results in a VID
+*        whose VIDNonRateLimit in the VTU Table is set to GT_TURE, then the frame
+*        will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable VID None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetVidNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* grcGetVidNrlEn
+*
+* DESCRIPTION:
+*       This routine gets VID None Rate Limit (NRL) mode.
+*        When VID NRL is enabled and the determined VID of a frame results in a VID
+*        whose VIDNonRateLimit in the VTU Table is set to GT_TURE, then the frame
+*        will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable VID None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetVidNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* grcSetSaNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables SA None Rate Limit (NRL).
+*        When SA NRL is enabled and the source address of a frame results in a ATU
+*        hit where the SA's MAC address returns an EntryState that indicates Non
+*        Rate Limited, then the frame will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable SA None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetSaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* grcGetSaNrlEn
+*
+* DESCRIPTION:
+*       This routine gets SA None Rate Limit (NRL) mode.
+*        When SA NRL is enabled and the source address of a frame results in a ATU
+*        hit where the SA's MAC address returns an EntryState that indicates Non
+*        Rate Limited, then the frame will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable SA None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetSaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* grcSetDaNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables DA None Rate Limit (NRL).
+*        When DA NRL is enabled and the destination address of a frame results in
+*        a ATU hit where the DA's MAC address returns an EntryState that indicates
+*        Non Rate Limited, then the frame will not be ingress nor egress rate
+*        limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable DA None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetDaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* grcGetDaNrlEn
+*
+* DESCRIPTION:
+*       This routine gets SA None Rate Limit (NRL) mode.
+*        When DA NRL is enabled and the destination address of a frame results in
+*        a ATU hit where the DA's MAC address returns an EntryState that indicates
+*        Non Rate Limited, then the frame will not be ingress nor egress rate
+*        limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable DA None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetDaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* grcSetELimitMode
+*
+* DESCRIPTION:
+*       This routine sets Egress Rate Limit counting mode.
+*        The supported modes are as follows:
+*            GT_PIRL_ELIMIT_FRAME -
+*                Count the number of frames
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+*            GT_PIRL_ELIMIT_LAYER2 -
+*                Count all Layer 2 bytes: Frame's DA to CRC
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+*
+* INPUTS:
+*       port - logical port number
+*        mode - GT_PIRL_ELIMIT_MODE enum type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        GT_PIRL_ELIMIT_FRAME mode is supported by only a few devices.
+*        Please refer to the device datasheet for details.
+*
+*******************************************************************************/
+GT_STATUS grcSetELimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PIRL_ELIMIT_MODE        mode
+);
+
+/*******************************************************************************
+* grcGetELimitMode
+*
+* DESCRIPTION:
+*       This routine gets Egress Rate Limit counting mode.
+*        The supported modes are as follows:
+*            GT_PIRL_ELIMIT_FRAME -
+*                Count the number of frames
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+*            GT_PIRL_ELIMIT_LAYER2 -
+*                Count all Layer 2 bytes: Frame's DA to CRC
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        mode - GT_PIRL_ELIMIT_MODE enum type
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        GT_PIRL_ELIMIT_FRAME mode is supported by only a few devices.
+*        Please refer to the device datasheet for details.
+*
+*******************************************************************************/
+GT_STATUS grcGetELimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PIRL_ELIMIT_MODE        *mode
+);
+
+/*******************************************************************************
+* grcSetRsvdNrlEn
+*
+* DESCRIPTION:
+*       This routine sets Reserved Non Rate Limit.
+*        When this feature is enabled, frames that match the requirements of the
+*        Rsvd2Cpu bit below will also be considered to be ingress and egress non
+*        rate limited.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Reserved Non Rate Limit,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS grcSetRsvdNrlEn
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+);
+
+/*******************************************************************************
+* grcGetRsvdNrlEn
+*
+* DESCRIPTION:
+*       This routine gets Reserved Non Rate Limit.
+*        When this feature is enabled, frames that match the requirements of the
+*        Rsvd2Cpu bit below will also be considered to be ingress and egress non
+*        rate limited.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Reserved Non Rate Limit,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS grcGetRsvdNrlEn
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+);
+
+
+/* gtPortRmon.c */
+
+/*******************************************************************************
+* gstatsGetRealtimePortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific realtime counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetRealtimePortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS3    counter,
+    OUT GT_U32            *statsData
+);
+
+
+/* gtQosMap.c */
+
+/*******************************************************************************
+* gqosSetVIDFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets VID Frame Priority Override. When this feature is enabled,
+*        VID Frame priority overrides can occur on this port.
+*        VID Frame priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDFPri override field is set to GT_TRUE.
+*        When this occurs the VIDFPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new VIDFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for VID Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetVIDFPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetVIDFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets VID Frame Priority Override. When this feature is enabled,
+*        VID Frame priority overrides can occur on this port.
+*        VID Frame priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDFPri override field is set to GT_TRUE.
+*        When this occurs the VIDFPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new VIDFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for VID Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetVIDFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetSAFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Source Address(SA) Frame Priority Override.
+*        When this feature is enabled, SA Frame priority overrides can occur on
+*        this port.
+*        SA ATU Frame priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for SA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetSAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetSAFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Source Address(SA) Frame Priority Override.
+*        When this feature is enabled, SA Frame priority overrides can occur on
+*        this port.
+*        SA ATU Frame priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for SA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetSAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetDAFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Frame Priority Override.
+*        When this feature is enabled, DA Frame priority overrides can occur on
+*        this port.
+*        DA ATU Frame priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetDAFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Destination Address(DA) Frame Priority Override.
+*        When this feature is enabled, DA Frame priority overrides can occur on
+*        this port.
+*        DA ATU Frame priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for DA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetVIDQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets VID Queue Priority Override. When this feature is enabled,
+*        VID Queue priority overrides can occur on this port.
+*        VID Queue priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDQPri override field is set to GT_TRUE.
+*        When this occurs the VIDQPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new VIDQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for VID Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetVIDQPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetVIDQPriOverride
+*
+* DESCRIPTION:
+*        This routine gets VID Queue Priority Override. When this feature is enabled,
+*        VID Queue priority overrides can occur on this port.
+*        VID Queue priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDQPri override field is set to GT_TRUE.
+*        When this occurs the VIDQPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new VIDQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for VID Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetVIDQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetSAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Source Address(SA) Queue Priority Override.
+*        When this feature is enabled, SA Queue priority overrides can occur on
+*        this port.
+*        SA ATU Queue priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for SA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetSAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetSAQPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Source Address(SA) Queue Priority Override.
+*        When this feature is enabled, SA Queue priority overrides can occur on
+*        this port.
+*        SA ATU Queue priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for SA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetSAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetDAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Queue Priority Override.
+*        When this feature is enabled, DA Queue priority overrides can occur on
+*        this port.
+*        DA ATU Queue priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetDAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Queue Priority Override.
+*        When this feature is enabled, DA Queue priority overrides can occur on
+*        this port.
+*        DA ATU Queue priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for DA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gqosSetARPQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets ARP Queue Priority Override.
+*        When this feature is enabled, ARP Queue priority overrides can occur on
+*        this port.
+*        ARP Queue priority override occurs for all ARP frames.
+*        When this occurs, the frame's previously determined egress queue priority
+*        will be overwritten with ArpQPri.
+*        If the frame egresses tagged the priority in the frame will not
+*        be modified. When used, the two bits of the ArpQPri priority determine the
+*        egress queue the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for ARP Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetARPQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gqosGetARPQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets ARP Queue Priority Override.
+*        When this feature is enabled, ARP Queue priority overrides can occur on
+*        this port.
+*        ARP Queue priority override occurs for all ARP frames.
+*        When this occurs, the frame's previously determined egress queue priority
+*        will be overwritten with ArpQPri.
+*        If the frame egresses tagged the priority in the frame will not
+*        be modified. When used, the two bits of the ArpQPri priority determine the
+*        egress queue the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for ARP Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetARPQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+
+/*******************************************************************************
+* gqosSetQPriValue
+*
+* DESCRIPTION:
+*       This routine sets Queue priority value to used when forced.
+*        When ForceQPri is enabled (gqosSetForceQPri), all frames entering this port
+*        are mapped to the priority queue defined in this value, unless a VTU, SA,
+*        DA or ARP priority override occurs. The Frame's priority (FPri) is not
+*        effected by this value.
+*
+* INPUTS:
+*       port - the logical port number.
+*       pri  - Queue priority value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 3
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetQPriValue
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_U8      pri
+);
+
+/*******************************************************************************
+* gqosGetQPriValue
+*
+* DESCRIPTION:
+*       This routine gets Queue priority value to used when forced.
+*        When ForceQPri is enabled (gqosSetForceQPri), all frames entering this port
+*        are mapped to the priority queue defined in this value, unless a VTU, SA,
+*        DA or ARP priority override occurs. The Frame's priority (FPri) is not
+*        effected by this value.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       pri  - Queue priority value
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetQPriValue
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_U8      *pri
+);
+
+/*******************************************************************************
+* gqosSetForceQPri
+*
+* DESCRIPTION:
+*       This routine enables/disables forcing Queue priority.
+*        When ForceQPri is disabled, normal priority queue mapping is used on all
+*        ingressing frames entering this port. When it's enabled, all frames
+*        entering this port are mapped to the QPriValue (gqosSetQPriValue), unless
+*        a VTU, SA, DA or ARP priority override occurs. The frame's priorty (FPri)
+*        is not effected by this feature.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE, to force Queue Priority,
+*               GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetForceQPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_BOOL    en
+);
+
+/*******************************************************************************
+* gqosGetForceQPri
+*
+* DESCRIPTION:
+*       This routine checks if forcing Queue priority is enabled.
+*        When ForceQPri is disabled, normal priority queue mapping is used on all
+*        ingressing frames entering this port. When it's enabled, all frames
+*        entering this port are mapped to the QPriValue (gqosSetQPriValue), unless
+*        a VTU, SA, DA or ARP priority override occurs. The frame's priorty (FPri)
+*        is not effected by this feature.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       en   - GT_TRUE, to force Queue Priority,
+*               GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetForceQPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *en
+);
+
+/*******************************************************************************
+* gqosSetDefFPri
+*
+* DESCRIPTION:
+*       This routine sets the default frame priority (0 ~ 7).
+*        This priority is used as the default frame priority (FPri) to use when
+*        no other priority information is available.
+*
+* INPUTS:
+*       port - the logical port number
+*       pri  - default frame priority
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 7
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDefFPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_U8      pri
+);
+
+/*******************************************************************************
+* gqosGetDefFPri
+*
+* DESCRIPTION:
+*       This routine gets the default frame priority (0 ~ 7).
+*        This priority is used as the default frame priority (FPri) to use when
+*        no other priority information is available.
+*
+* INPUTS:
+*       port - the logical port number
+*
+* OUTPUTS:
+*       pri  - default frame priority
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDefFPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_U8      *pri
+);
+
+
+/*******************************************************************************
+* gqosSetArpQPri
+*
+* DESCRIPTION:
+*       This routine sets ARP queue Priority to use for ARP QPri Overridden
+*        frames. When a ARP frame is received on a por tthat has its ARP
+*        QPriOVerride is enabled, the QPri assigned to the frame comes from
+*        this value
+*
+* INPUTS:
+*       pri - ARP Queue Priority (0 ~ 3)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 3
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gqosSetArpQPri
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     pri
+);
+
+
+/*******************************************************************************
+* gqosGetArpQPri
+*
+* DESCRIPTION:
+*       This routine gets ARP queue Priority to use for ARP QPri Overridden
+*        frames. When a ARP frame is received on a por tthat has its ARP
+*        QPriOVerride is enabled, the QPri assigned to the frame comes from
+*        this value
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       pri - ARP Queue Priority (0 ~ 3)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gqosGetArpQPri
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U8     *pri
+);
+
+
+/* gtSysCtrl.c */
+
+/*******************************************************************************
+* gsysSetUsePortSchedule
+*
+* DESCRIPTION:
+*       This routine sets per port scheduling mode
+*
+* INPUTS:
+*       en - GT_TRUE enables per port scheduling,
+*             GT_FALSE disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetUsePortSchedule
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+);
+
+/*******************************************************************************
+* gsysGetUsePortSchedule
+*
+* DESCRIPTION:
+*       This routine gets per port scheduling mode
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE enables per port scheduling,
+*             GT_FALSE disable.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetUsePortSchedule
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+);
+
+/*******************************************************************************
+* gsysSetOldHader
+*
+* DESCRIPTION:
+*       This routine sets Egress Old Header.
+*        When this feature is enabled and frames are egressed with a Marvell Header,
+*        the format of the Header is slightly modified to be backwards compatible
+*        with previous devices that used the original Header. Specifically, bit 3
+*        of the Header's 2nd octet is cleared to a zero such that only FPri[2:1]
+*        is available in the Header.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Old Header Mode,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetOldHader
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+);
+
+/*******************************************************************************
+* gsysGetOldHader
+*
+* DESCRIPTION:
+*       This routine gets Egress Old Header.
+*        When this feature is enabled and frames are egressed with a Marvell Header,
+*        the format of the Header is slightly modified to be backwards compatible
+*        with previous devices that used the original Header. Specifically, bit 3
+*        of the Header's 2nd octet is cleared to a zero such that only FPri[2:1]
+*        is available in the Header.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE to enable Old Header Mode,
+*             GT_FALSE to disable
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetOldHader
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+);
+
+/*******************************************************************************
+* gsysSetRecursiveStrippingDisable
+*
+* DESCRIPTION:
+*       This routine determines if recursive tag stripping feature needs to be
+*        disabled.
+*
+* INPUTS:
+*       en - GT_TRUE to disable Recursive Tag Stripping,
+*             GT_FALSE to enable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRecursiveStrippingDisable
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+);
+
+/*******************************************************************************
+* gsysGetRecursiveStrippingDisable
+*
+* DESCRIPTION:
+*       This routine checks if recursive tag stripping feature is disabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE, if Recursive Tag Stripping is disabled,
+*             GT_FALSE, otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRecursiveStrippingDisable
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+);
+
+/*******************************************************************************
+* gsysSetCPUPort
+*
+* DESCRIPTION:
+*       This routine sets CPU Port where Rsvd2Cpu frames and IGMP/MLD Snooped
+*        frames are destined.
+*
+* INPUTS:
+*       cpuPort - CPU Port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCPUPort
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  cpuPort
+);
+
+/*******************************************************************************
+* gsysGetCPUPort
+*
+* DESCRIPTION:
+*       This routine gets CPU Port where Rsvd2Cpu frames and IGMP/MLD Snooped
+*        frames are destined.
+*
+* INPUTS:
+*       cpuPort - CPU Port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCPUPort
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_LPORT  *cpuPort
+);
+
+
+
+/* gtSysStatus.c */
+
+/*******************************************************************************
+* gsysGetFreeQSize
+*
+* DESCRIPTION:
+*       This routine gets Free Queue Counter. This counter reflects the
+*        current number of unalllocated buffers available for all the ports.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       count - Free Queue Counter
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFreeQSize
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16         *count
+);
+
+/*******************************************************************************
+                New APIs in DSDT 2.6
+*******************************************************************************/
+
+/* gtBrgFdb.c */
+
+/*******************************************************************************
+* gfdbSetPortAtuLearnLimit
+*
+* DESCRIPTION:
+*       Port's auto learning limit. When the limit is non-zero value, the number
+*        of MAC addresses that can be learned on this port are limited to the value
+*        specified in this API. When the learn limit has been reached any frame
+*        that ingresses this port with a source MAC address not already in the
+*        address database that is associated with this port will be discarded.
+*        Normal auto-learning will resume on the port as soon as the number of
+*        active unicast MAC addresses associated to this port is less than the
+*        learn limit.
+*        CPU directed ATU Load, Purge, or Move will not have any effect on the
+*        learn limit.
+*        This feature is disabled when the limit is zero.
+*        The following care is needed when enabling this feature:
+*            1) disable learning on the ports
+*            2) flush all non-static addresses in the ATU
+*            3) define the desired limit for the ports
+*            4) re-enable learing on the ports
+*
+* INPUTS:
+*       port  - logical port number
+*       limit - auto learning limit ( 0 ~ 255 )
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetPortAtuLearnLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_U32       limit
+);
+
+
+/*******************************************************************************
+* gfdbGetPortAtuLearnCnt
+*
+* DESCRIPTION:
+*       Read the current number of active unicast MAC addresses associated with
+*        the given port. This counter (LearnCnt) is held at zero if learn limit
+*        (gfdbSetPortAtuLearnLimit API) is set to zero.
+*
+* INPUTS:
+*       port  - logical port number
+*
+* OUTPUTS:
+*       count - current auto learning count
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetPortAtuLearnCnt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_U32       *count
+);
+
+/*******************************************************************************
+* gfdbGetAtuAllCount
+*
+* DESCRIPTION:
+*       Counts all entries in the Address Translation Unit.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       count - number of valid entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuAllCount
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32         *count
+);
+
+/*******************************************************************************
+* gfdbGetAtuAllCountInDBNum
+*
+* DESCRIPTION:
+*       Counts all entries in the defined FID (or DBNum).
+*
+* INPUTS:
+*       dbNum - DBNum of FID
+*
+* OUTPUTS:
+*       count - number of valid entries in FID (or DBNum).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuAllCountInDBNum
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32         dbNum,
+    OUT GT_U32         *count
+);
+
+/*******************************************************************************
+* gfdbGetAtuDynamicCountInDBNum
+*
+* DESCRIPTION:
+*       Counts all non-static entries in the defined FID (or DBNum).
+*
+* INPUTS:
+*       dbNum - DBNum or FID
+*
+* OUTPUTS:
+*       count - number of valid non-static entries in FID (or DBNum).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuDynamicCountInDBNum
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32         dbNum,
+    OUT GT_U32         *count
+);
+
+
+/* gtBrgStu.c */
+
+/*******************************************************************************
+* gstuGetEntryCount
+*
+* DESCRIPTION:
+*       Gets the current number of valid entries in the STU table
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numEntries - number of STU entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryCount
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U32    *numEntries
+);
+
+/*******************************************************************************
+* gstuGetEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic entry from the STU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       stuEntry - find the first valid STU entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryFirst
+(
+    IN  GT_QD_DEV       *dev,
+    OUT GT_STU_ENTRY    *stuEntry
+);
+
+/*******************************************************************************
+* gstuGetEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic STU entry from the specified SID.
+*
+* INPUTS:
+*       stuEntry - the SID to start the search.
+*
+* OUTPUTS:
+*       stuEntry - next STU entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryNext
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_STU_ENTRY  *stuEntry
+);
+
+/*******************************************************************************
+* gstuFindSidEntry
+*
+* DESCRIPTION:
+*       Find STU entry for a specific SID, it will return the entry, if found,
+*       along with its associated data
+*
+* INPUTS:
+*       stuEntry - contains the SID to searche for
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       stuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no such entry.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuFindSidEntry
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_STU_ENTRY  *stuEntry,
+    OUT GT_BOOL         *found
+);
+
+/*******************************************************************************
+* gstuAddEntry
+*
+* DESCRIPTION:
+*       Creates or update the entry in STU table based on user input.
+*
+* INPUTS:
+*       stuEntry    - stu entry to insert to the STU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK             - on success
+*       GT_FAIL           - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gstuAddEntry
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_STU_ENTRY    *stuEntry
+);
+
+/*******************************************************************************
+* gstuDelEntry
+*
+* DESCRIPTION:
+*       Deletes STU entry specified by user.
+*
+* INPUTS:
+*       stuEntry - the STU entry to be deleted
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuDelEntry
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_STU_ENTRY     *stuEntry
+);
+
+
+/* gtCCPVT.c */
+
+/*******************************************************************************
+* gpvtInitialize
+*
+* DESCRIPTION:
+*       This routine initializes the PVT Table to all one's (initial state)
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtInitialize
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gpvtWritePVTData
+*
+* DESCRIPTION:
+*       This routine write Cross Chip Port Vlan Data.
+*        Cross chip Port VLAN Data used as a bit mask to limit where cross chip
+*        frames can egress (in chip Port VLANs are masked using gvlnSetPortVlanPorts
+*        API). Cross chip frames are Forward frames that ingress a DSA or Ether
+*        Type DSA port (see gprtSetFrameMode API). Bit 0 is a mask for port 0,
+*        bit 1 for port 1, etc. When a port's mask bit is one, frames are allowed
+*        to egress that port on this device. When a port's mask bit is zero,
+*        frames are not allowed to egress that port on this device.
+*
+*        The Cross Chip Port VLAN Table is accessed by ingressing frames based
+*        upon the original source port of the frame using the Forward frame's DSA tag
+*        fields Src_Dev, Src_Port/Src_Trunk and Src_Is_Trunk. The 1 entry of the 512
+*        that is accessed by the frame is:
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 0:
+*                If Src_Is_Trunk = 0   Src_Dev[4:0], Src_Port[3:0]119
+*                If Src_Is_Trunk = 1   Device Number (global offset 0x1C), Src_Trunk[3:0]
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 1:
+*                If Src_Is_Trunk = 0   Src_Dev[3:0], Src_Port[4:0]120
+*                If Src_Is_Trunk = 1   Device Number[3:0], Src_Trunk[4:0]
+*
+*        Cross chip port VLANs with Trunks are supported in the table where this
+*        device's entries would be stored (defined by this device's Device Number).
+*        This portion of the table is available for Trunk entries because this device's
+*        port VLAN mappings to ports inside this device are masked by the port's
+*        VLAN Table (see gvlnSetPortVlanPorts API).
+*
+*
+* INPUTS:
+*        pvtPointer - pointer to the desired entry of PVT (0 ~ 511)
+*        pvtData    - Cross Chip Port Vlan Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtWritePVTData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        pvtPointer,
+    IN  GT_U32        pvtData
+);
+
+/*******************************************************************************
+* gpvtReadPVTData
+*
+* DESCRIPTION:
+*       This routine reads Cross Chip Port Vlan Data.
+*        Cross chip Port VLAN Data used as a bit mask to limit where cross chip
+*        frames can egress (in chip Port VLANs are masked using gvlnSetPortVlanPorts
+*        API). Cross chip frames are Forward frames that ingress a DSA or Ether
+*        Type DSA port (see gprtSetFrameMode API). Bit 0 is a mask for port 0,
+*        bit 1 for port 1, etc. When a port's mask bit is one, frames are allowed
+*        to egress that port on this device. When a port's mask bit is zero,
+*        frames are not allowed to egress that port on this device.
+*
+*        The Cross Chip Port VLAN Table is accessed by ingressing frames based
+*        upon the original source port of the frame using the Forward frame's DSA tag
+*        fields Src_Dev, Src_Port/Src_Trunk and Src_Is_Trunk. The 1 entry of the 512
+*        that is accessed by the frame is:
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 0:
+*                If Src_Is_Trunk = 0   Src_Dev[4:0], Src_Port[3:0]119
+*                If Src_Is_Trunk = 1   Device Number (global offset 0x1C), Src_Trunk[3:0]
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 1:
+*                If Src_Is_Trunk = 0   Src_Dev[3:0], Src_Port[4:0]120
+*                If Src_Is_Trunk = 1   Device Number[3:0], Src_Trunk[4:0]
+*
+*        Cross chip port VLANs with Trunks are supported in the table where this
+*        device's entries would be stored (defined by this device's Device Number).
+*        This portion of the table is available for Trunk entries because this device's
+*        port VLAN mappings to ports inside this device are masked by the port's
+*        VLAN Table (see gvlnSetPortVlanPorts API).
+*
+*
+* INPUTS:
+*        pvtPointer - pointer to the desired entry of PVT (0 ~ 511)
+*
+* OUTPUTS:
+*        pvtData    - Cross Chip Port Vlan Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtReadPVTData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        pvtPointer,
+    OUT GT_U32        *pvtData
+);
+
+
+/* gtEvents.c */
+
+/*******************************************************************************
+* geventGetDevIntStatus
+*
+* DESCRIPTION:
+*         Check to see which device interrupts (WatchDog, JamLimit, Duplex Mismatch, and
+*        SERDES Link Int) have occurred.
+*
+* INPUTS:
+*       intType - the type of interrupt which causes an interrupt.
+*                  any combination of
+*                    GT_DEV_INT_WATCHDOG,
+*                    GT_DEV_INT_JAMLIMIT,
+*                    GT_DEV_INT_DUPLEX_MISMATCH,
+*                    GT_DEV_INT_SERDES_LINK
+*        port    - logical port where GT_DEV_INT_DUPLEX_MISMATCH occurred.
+*                  valid only if GT_DEV_INT_DUPLEX_MISMATCH is set in intType.
+*        linkInt - SERDES port list where GT_DEV_INT_SERDES_LINK interrupt is
+*                  asserted. It's in vector format, Bit 10 is for port 10,
+*                  Bit 9 is for port 9, etc.
+*                  valid only if GT_DEV_INT_SERDES_LINK bit is set in intType.
+*                  These bits are only valid of the port that is in 1000Base-X mode.
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetDevIntStatus
+(
+    IN  GT_QD_DEV             *dev,
+    OUT GT_DEV_INT_STATUS    *devIntStatus
+);
+
+/*******************************************************************************
+* geventSetAgeOutIntEn
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Out Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetAgeOutIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* geventGetAgeOutIntEn
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_TRUE, if Age Out Interrupt is enabled
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetAgeOutIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* geventSetOverLimitInt
+*
+* DESCRIPTION:
+*        This routine enables/disables Over Limit Interrupt for a port.
+*        If it's enabled, an ATU Miss violation will be generated when port auto
+*        learn reached the limit(refer to gfdbGetPortAtuLimitReached API).
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Over Limit Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetOverLimitInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+);
+
+/*******************************************************************************
+* geventGetOverLimitInt
+*
+* DESCRIPTION:
+*        This routine enables/disables Over Limit Interrupt for a port.
+*        If it's enabled, an ATU Miss violation will be generated when port auto
+*        learn reached the limit(refer to gfdbSetPortAtuLearnLimit API).
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable Over Limit Interrupt,
+*               GT_FALUSE to disable
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetOverLimitInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* geventGetPortAtuLimitReached
+*
+* DESCRIPTION:
+*       This routine checks if learn limit has been reached.
+*        When it reached, the port can no longer auto learn any more MAC addresses
+*        because the address learn limit set on this port has been reached.
+*
+* INPUTS:
+*       port  - logical port number
+*
+* OUTPUTS:
+*       limit - GT_TRUE, if limit has been reached
+*                GT_FALSE, otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*
+*******************************************************************************/
+GT_STATUS geventGetPortAtuLimitReached
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_BOOL       *limit
+);
+
+/*******************************************************************************
+* eventSetDevInt
+*
+* DESCRIPTION:
+*        Device Interrupt.
+*        The following device interrupts are supported:
+*            GT_DEV_INT_WATCHDOG    -
+*                WatchDog event interrupt (WatchDog event can be configured with
+*                gwdSetEvent API)
+*            GT_DEV_INT_JAMLIMIT    -
+*                any of the ports detect an Ingress Jam Limit violation
+*                (see gprtSetPauseLimitIn API)
+*            GT_DEV_INT_DUPLEX_MISMATCH -
+*                any of the ports detect a duplex mismatch (i.e., the local port is
+*                in half duplex mode while the link partner is in full duplex mode)
+*            GT_DEV_INT_SERDES_LINK -
+*                SERDES link chage interrupt.
+*                An interrupt occurs when a SERDES port changes link status
+*                (link up or link down)
+*
+*        If any of the above events is enabled, GT_DEVICE_INT interrupt will
+*        be asserted by the enabled event when GT_DEV_INT is enabled with
+*        eventSetActive API.
+*
+* INPUTS:
+*        devInt - GT_DEV_INT
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS eventSetDevInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_DEV_EVENT    *devInt
+);
+
+/*******************************************************************************
+* gwdSetEvent
+*
+* DESCRIPTION:
+*        Watch Dog Event.
+*        The following Watch Dog events are supported:
+*            GT_WD_QC  - Queue Controller Watch Dog enable.
+*                        When enabled, the QC's watch dog circuit checks for link
+*                        list errors and any errors found in the QC.
+*            GT_WD_EGRESS - Egress Watch Dog enable.
+*                        When enabled, each port's egress circuit checks for problems
+*                        between the port and the Queue Controller.
+*            GT_WD_FORCE - Force a Watch Dog event.
+*
+*        If any of the above events is enabled, GT_DEVICE_INT interrupt will
+*        be asserted by the enabled WatchDog event when GT_DEV_INT_WATCHDOG is
+*        enabled with eventSetDevActive API and GT_DEV_INT is enabled with
+*        eventSetActive API.
+*
+* INPUTS:
+*        wdEvent - Watch Dog Events
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetEvent
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        wdEvent
+);
+
+/*******************************************************************************
+* gwdSetSWResetOnWD
+*
+* DESCRIPTION:
+*        SWReset on Watch Dog Event.
+*        When this feature is enabled, any enabled watch dog event (gwdSetEvent API)
+*        will automatically reset the switch core's datapath just as if gsysSwReset
+*        API is called.
+*
+*        The Watch Dog History (gwdGetHistory API) won't be cleared by this
+*        automatic SWReset. This allows the user to know if any watch dog event
+*        ever occurred even if the swich is configured to automatically recover
+*        from a watch dog.
+*
+*        When this feature is disabled, enabled watch dog events will not cause a
+*        SWReset.
+*
+* INPUTS:
+*        en   - GT_TRUE to enable SWReset on WD
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetSWResetOnWD
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gwdGetSWResetOnWD
+*
+* DESCRIPTION:
+*        SWReset on Watch Dog Event.
+*        When this feature is enabled, any enabled watch dog event (gwdSetEvent API)
+*        will automatically reset the switch core's datapath just as if gsysSwReset
+*        API is called.
+*
+*        The Watch Dog History (gwdGetHistory API) won't be cleared by this
+*        automatic SWReset. This allows the user to know if any watch dog event
+*        ever occurred even if the swich is configured to automatically recover
+*        from a watch dog.
+*
+*        When this feature is disabled, enabled watch dog events will not cause a
+*        SWReset.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en   - GT_TRUE, if SWReset on WD is enabled
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetSWResetOnWD
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gwdGetHistory
+*
+* DESCRIPTION:
+*        This routine retrieves Watch Dog history. They are
+*
+*        wdEvent -
+*            When it's set to GT_TRUE, some enabled Watch Dog event occurred.
+*            The following events are possible:
+*                QC WatchDog Event (GT_WD_QC)
+*                Egress WatchDog Event (GT_WD_EGRESS)
+*                Forced WatchDog Event (GT_WD_FORCE)
+*        egressEvent -
+*            If any port's egress logic detects an egress watch dog issue,
+*            this field is set to GT_TRUE, regardless of the enabling GT_WD_EGRESS
+*            event.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        history - GT_WD_EVENT_HISTORY structure
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetHistory
+(
+    IN  GT_QD_DEV            *dev,
+    OUT GT_WD_EVENT_HISTORY    *history
+);
+
+
+/* gtPIRL2.c */
+
+/*******************************************************************************
+* gpirl2WriteResource
+*
+* DESCRIPTION:
+*       This routine writes resource bucket parameters to the given resource
+*        of the port.
+*
+* INPUTS:
+*       port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*        pirlData - PIRL resource parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2WriteResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    IN  GT_PIRL2_DATA    *pirlData
+);
+
+/*******************************************************************************
+* gpirl2ReadResource
+*
+* DESCRIPTION:
+*       This routine retrieves IRL Parameter.
+*
+* INPUTS:
+*       port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2ReadResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    OUT GT_PIRL2_DATA    *pirlData
+);
+
+/*******************************************************************************
+* gpirl2DisableResource
+*
+* DESCRIPTION:
+*       This routine disables Ingress Rate Limiting for the given bucket.
+*
+* INPUTS:
+*       port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2DisableResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes
+);
+
+
+/* gtPolicy.c */
+
+/*******************************************************************************
+* gprtSetPolicy
+*
+* DESCRIPTION:
+*       This routine sets the Policy for ports.
+*        Supported Policies are defined as GT_FRAME_POLICY as follows:
+*            FRAME_POLICY_NONE    - normal frame switching
+*            FRAME_POLICY_MIRROR  - mirror (copy) frame to MirrorDest port
+*            FRAME_POLICY_TRAP    - trap(re-direct) frame to the CPUDest port
+*            FRAME_POLICY_DISCARD - discard(filter) the frame
+*        Supported Policy types are defined as GT_POLICY_TYPE:
+*            POLICY_TYPE_DA - DA Policy Mapping
+*                DA Policy Mapping occurs when the DA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_SA - SA Policy Mapping
+*                SA Policy Mapping occurs when the SA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_VTU - VTU Policy Mapping
+*                VTU Policy Mapping occurs when the VID of a frame is contained in
+*                the VTU database with the VidPolicy is enabled.
+*            POLICY_TYPE_ETYPE - EtherType Policy Mapping
+*                EType Policy Mapping occurs when the EtherType of a frame matches
+*                the PortEType (see gprtSetPortEType API)
+*            POLICY_TYPE_PPPoE - PPPoE Policy Mapping
+*                PPPoE Policy Mapping occurs when the EtherType of a frame matches 0x8863
+*            POLICY_TYPE_VBAS - VBAS Policy Mapping
+*                VBAS Policy Mapping occurs when the EtherType of a frame matches 0x8200
+*            POLICY_TYPE_OPT82 - DHCP Option 82 Policy Mapping
+*                DHCP Option 82 Policy Mapping occurs when the ingressing frame is an
+*                IPv4 UDP with a UDP Destination port = 0x0043 or 0x0044, or an
+*                IPv6 UDP with a UDP Destination port = 0x0223 or 0x0222
+*            POLICY_TYPE_UDP - UDP Policy Mapping
+*                UDP Policy Mapping occurs when the ingressing frame is
+*                a Broadcast IPv4 UDP or a Multicast IPv6 UDP.
+*
+* INPUTS:
+*       port    - logical port number.
+*       type     - policy type (GT_POLICY_TYPE)
+*       policy     - policy (GT_FRAME_POLICY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*       GT_BAD_PARAM     - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT     port,
+    IN  GT_POLICY_TYPE    type,
+    IN    GT_FRAME_POLICY    policy
+);
+
+/*******************************************************************************
+* gprtGetPolicy
+*
+* DESCRIPTION:
+*       This routine gets the Policy of the given policy type.
+*        Supported Policies are defined as GT_FRAME_POLICY as follows:
+*            FRAME_POLICY_NONE    - normal frame switching
+*            FRAME_POLICY_MIRROR  - mirror (copy) frame to MirrorDest port
+*            FRAME_POLICY_TRAP    - trap(re-direct) frame to the CPUDest port
+*            FRAME_POLICY_DISCARD - discard(filter) the frame
+*        Supported Policy types are defined as GT_POLICY_TYPE:
+*            POLICY_TYPE_DA - DA Policy Mapping
+*                DA Policy Mapping occurs when the DA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_SA - SA Policy Mapping
+*                SA Policy Mapping occurs when the SA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_VTU - VTU Policy Mapping
+*                VTU Policy Mapping occurs when the VID of a frame is contained in
+*                the VTU database with the VidPolicy is enabled.
+*            POLICY_TYPE_ETYPE - EtherType Policy Mapping
+*                EType Policy Mapping occurs when the EtherType of a frame matches
+*                the PortEType (see gprtSetPortEType API)
+*            POLICY_TYPE_PPPoE - PPPoE Policy Mapping
+*                PPPoE Policy Mapping occurs when the EtherType of a frame matches 0x8863
+*            POLICY_TYPE_VBAS - VBAS Policy Mapping
+*                VBAS Policy Mapping occurs when the EtherType of a frame matches 0x8200
+*            POLICY_TYPE_OPT82 - DHCP Option 82 Policy Mapping
+*                DHCP Option 82 Policy Mapping occurs when the ingressing frame is an
+*                IPv4 UDP with a UDP Destination port = 0x0043 or 0x0044, or an
+*                IPv6 UDP with a UDP Destination port = 0x0223 or 0x0222
+*            POLICY_TYPE_UDP - UDP Policy Mapping
+*                UDP Policy Mapping occurs when the ingressing frame is
+*                a Broadcast IPv4 UDP or a Multicast IPv6 UDP.
+*
+* INPUTS:
+*       port    - logical port number.
+*       type     - policy type (GT_POLICY_TYPE)
+*
+* OUTPUTS:
+*       policy     - policy (GT_FRAME_POLICY)
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*       GT_BAD_PARAM     - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT     port,
+    IN  GT_POLICY_TYPE    type,
+    OUT GT_FRAME_POLICY    *policy
+);
+
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtSetPauseLimitOut
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be transmitted
+*        from this port. When full duplex Flow Control is enabled on this port,
+*        these bits are used to limit the number of Pause refresh frames that can
+*        be generated from this port to keep this port's link partner from sending
+*        any data.
+*        Setting this value to 0 will allow continuous Pause frame refreshes to
+*        egress this port as long as this port remains congested.
+*        Setting this value to 1 will allow 1 Pause frame to egress from this port
+*        for each congestion situation.
+*        Setting this value to 2 will allow 2 Pause frames to egress from this port
+*        for each congestion situation, etc.
+*
+* INPUTS:
+*        port - the logical port number
+*        limit - the max number of Pause refresh frames for each congestion situation
+*                ( 0 ~ 0xFF)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPauseLimitOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        limit
+);
+
+/*******************************************************************************
+* gprtGetPauseLimitOut
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be transmitted
+*        from this port. When full duplex Flow Control is enabled on this port,
+*        these bits are used to limit the number of Pause refresh frames that can
+*        be generated from this port to keep this port's link partner from sending
+*        any data.
+*        Setting this value to 0 will allow continuous Pause frame refreshes to
+*        egress this port as long as this port remains congested.
+*        Setting this value to 1 will allow 1 Pause frame to egress from this port
+*        for each congestion situation.
+*        Setting this value to 2 will allow 2 Pause frames to egress from this port
+*        for each congestion situation, etc.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        limit - the max number of Pause refresh frames for each congestion situation
+*                ( 0 ~ 0xFF)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseLimitOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *limit
+);
+
+/*******************************************************************************
+* gprtSetPauseLimitIn
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be received
+*        on this port. When a port has flow Control enabled, this value can be
+*        used to limit how long this port can be Paused off to prevent a port stall
+*        through jamming.
+*        When this value is in the range of 0x01 to 0xFF, and a frame is ready to
+*        be transmitted out this port, but it cannot be transmitted due to the port
+*        being jammed, this limit mechanism starts. The limit mechanism starts
+*        counting new Pause refresh frames or counts of 16 consecutive collisions.
+*        If the counter reaches the value set through this API, the following event
+*        will occur:
+*            1) Port's ForceFC is enabled,
+*            2) Port's FCValue is cleared to a zero, and
+*            3) Jam Limit Interrupt is asserted.
+*        This effectively disables Flow Control on the port once the Pause timer
+*        expires. If a frame gets transmitted out this port before the counter
+*        reaches this limit, then this limit mechanism counter resets back to zero.
+*
+*        Setting this value to 0 will allow continuous jamming to be received on
+*        this port without the Port's ForceFC and FCValue getting modified.
+*
+*        The modification of Port's ForceFC and FCValue is the only indication that
+*        the limit was reached on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*        limit - the max number of continuous Pause refresh frames for each trasmition
+*                ( 0 ~ 0xFF)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPauseLimitIn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        limit
+);
+
+/*******************************************************************************
+* gprtGetPauseLimitIn
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be received
+*        on this port. When a port has flow Control enabled, this value can be
+*        used to limit how long this port can be Paused off to prevent a port stall
+*        through jamming.
+*        When this value is in the range of 0x01 to 0xFF, and a frame is ready to
+*        be transmitted out this port, but it cannot be transmitted due to the port
+*        being jammed, this limit mechanism starts. The limit mechanism starts
+*        counting new Pause refresh frames or counts of 16 consecutive collisions.
+*        If the counter reaches the value set through this API, the following event
+*        will occur:
+*            1) Port's ForceFC is enabled,
+*            2) Port's FCValue is cleared to a zero, and
+*            3) Jam Limit Interrupt is asserted.
+*        This effectively disables Flow Control on the port once the Pause timer
+*        expires. If a frame gets transmitted out this port before the counter
+*        reaches this limit, then this limit mechanism counter resets back to zero.
+*
+*        Setting this value to 0 will allow continuous jamming to be received on
+*        this port without the Port's ForceFC and FCValue getting modified.
+*
+*        The modification of Port's ForceFC and FCValue is the only indication that
+*        the limit was reached on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        limit - the max number of continuous Pause refresh frames for each trasmition
+*                ( 0 ~ 0xFF)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseLimitIn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *limit
+);
+
+/*******************************************************************************
+* gprtSetFrameMode
+*
+* DESCRIPTION:
+*        Frmae Mode is used to define the expected Ingress and the generated Egress
+*        tagging frame format for this port as follows:
+*            GT_FRAME_MODE_NORMAL -
+*                Normal Network mode uses industry standard IEEE 802.3ac Tagged or
+*                Untagged frames. Tagged frames use an Ether Type of 0x8100.
+*            GT_FRAME_MODE_DSA -
+*                DSA mode uses a Marvell defined tagged frame format for
+*                Chip-to-Chip and Chip-to-CPU connections.
+*            GT_FRAME_MODE_PROVIDER -
+*                Provider mode uses user definable Ether Types per port
+*                (see gprtSetPortEType/gprtGetPortEType API).
+*            GT_FRAME_MODE_ETHER_TYPE_DSA -
+*                Ether Type DSA mode uses standard Marvell DSA Tagged frame info
+*                flowing a user definable Ether Type. This mode allows the mixture
+*                of Normal Network frames with DSA Tagged frames and is useful to
+*                be used on ports that connect to a CPU.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_FRAME_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is unknown
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetFrameMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_FRAME_MODE    mode
+);
+
+/*******************************************************************************
+* gprtGetFrameMode
+*
+* DESCRIPTION:
+*        Frmae Mode is used to define the expected Ingress and the generated Egress
+*        tagging frame format for this port as follows:
+*            GT_FRAME_MODE_NORMAL -
+*                Normal Network mode uses industry standard IEEE 802.3ac Tagged or
+*                Untagged frames. Tagged frames use an Ether Type of 0x8100.
+*            GT_FRAME_MODE_DSA -
+*                DSA mode uses a Marvell defined tagged frame format for
+*                Chip-to-Chip and Chip-to-CPU connections.
+*            GT_FRAME_MODE_PROVIDER -
+*                Provider mode uses user definable Ether Types per port
+*                (see gprtSetPortEType/gprtGetPortEType API).
+*            GT_FRAME_MODE_ETHER_TYPE_DSA -
+*                Ether Type DSA mode uses standard Marvell DSA Tagged frame info
+*                flowing a user definable Ether Type. This mode allows the mixture
+*                of Normal Network frames with DSA Tagged frames and is useful to
+*                be used on ports that connect to a CPU.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_FRAME_MODE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFrameMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_FRAME_MODE    *mode
+);
+
+/*******************************************************************************
+* gprtSetHoldAt1
+*
+* DESCRIPTION:
+*        Hold Aging ATU Entries at an Entry State value of 1. When this feature
+*        is set to GT_TRUE, ATU entries associated with this port will age down
+*        to an Entry State of 0x1, but will not go to 0x0 (0x0 would purge the
+*        entry)
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to hold aging ATU entry with Entry State of 1,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetHoldAt1
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetHoldAt1
+*
+* DESCRIPTION:
+*        Hold Aging ATU Entries at an Entry State value of 1. When this feature
+*        is set to GT_TRUE, ATU entries associated with this port will age down
+*        to an Entry State of 0x1, but will not go to 0x0 (0x0 would purge the
+*        entry)
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to hold aging ATU entry with Entry State of 1,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHoldAt1
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+);
+
+
+/*******************************************************************************
+* gprtSetIntOnAgeOut
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable AgeOutViloation interrupt
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIntOnAgeOut
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetIntOnAgeOut
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable AgeOutViloation interrupt
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIntOnAgeOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gprtSetRefreshLocked
+*
+* DESCRIPTION:
+*        Auto Refresh known addresses when port is Locked. Already known addresses
+*        will be auto refreshed when this feature is enabled. When this feature
+*        is disabled, auto refreshing will not occur on Locked ports.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable Auto Refresh known addresses on locked port
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetRefreshLocked
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gprtGetRefreshLocked
+*
+* DESCRIPTION:
+*        Auto Refresh known addresses when port is Locked. Already known addresses
+*        will be auto refreshed when this feature is enabled. When this feature
+*        is disabled, auto refreshing will not occur on Locked ports.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable Auto Refresh known addresses on locked port
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetRefreshLocked
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+);
+
+/*******************************************************************************
+* gprtSetPortEType
+*
+* DESCRIPTION:
+*        This routine sets the port's special Ether Type. This Ether Type is used
+*        for Policy (see gprtSetPolicy API) and FrameMode (see gprtSetFrameMode API).
+*
+* INPUTS:
+*        port  - the logical port number
+*        etype - port's special ether type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortEType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_ETYPE        etype
+);
+
+/*******************************************************************************
+* gprtGetPortEType
+*
+* DESCRIPTION:
+*        This routine retrieves the port's special Ether Type. This Ether Type is used
+*        for Policy (see gprtSetPolicy API) and FrameMode (see gprtSetFrameMode API).
+*
+* INPUTS:
+*        port  - the logical port number
+*
+* OUTPUTS:
+*        etype - port's special ether type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortEType
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_ETYPE    *etype
+);
+
+
+/* gtPortRateCtr.c */
+
+/*******************************************************************************
+* grcSetFrameOverhead
+*
+* DESCRIPTION:
+*       Egress rate frame overhead adjustment.
+*        This field is used to adjust the number of bytes that need to be added to a
+*        frame's IFG on a per frame basis.
+*
+*        The egress rate limiter multiplies the value programmed in this field by four
+*        for computing the frame byte offset adjustment value (i.e., the amount the
+*        IPG is increased for every frame). This adjustment, if enabled, is made to
+*        every egressing frame's IPG and it is made in addition to any other IPG
+*        adjustments due to other Egress Rate Control settings.
+*
+*        The egress overhead adjustment can add the following number of byte times
+*        to each frame's IPG: 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52,
+*        56 and 60.
+*
+*        Example:
+*        If FrameOverhead = 11, the egress rate limiter would increase the IPG
+*        between every frame by an additional 44 bytes.
+*
+*        Note: When the Count Mode (port offset 0x0A) is in Frame based egress rate
+*        shaping mode, these Frame Overhead bits must be 0x0.
+*
+* INPUTS:
+*       port     - logical port number.
+*       overhead - Frame overhead (0 ~ 15)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetFrameOverhead
+(
+    IN GT_QD_DEV        *dev,
+    IN GT_LPORT            port,
+    IN GT_32            overhead
+);
+
+/*******************************************************************************
+* grcGetFrameOverhead
+*
+* DESCRIPTION:
+*       Egress rate frame overhead adjustment.
+*        This field is used to adjust the number of bytes that need to be added to a
+*        frame's IFG on a per frame basis.
+*
+*        The egress rate limiter multiplies the value programmed in this field by four
+*        for computing the frame byte offset adjustment value (i.e., the amount the
+*        IPG is increased for every frame). This adjustment, if enabled, is made to
+*        every egressing frame's IPG and it is made in addition to any other IPG
+*        adjustments due to other Egress Rate Control settings.
+*
+*        The egress overhead adjustment can add the following number of byte times
+*        to each frame's IPG: 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52,
+*        56 and 60.
+*
+*        Example:
+*        If FrameOverhead = 11, the egress rate limiter would increase the IPG
+*        between every frame by an additional 44 bytes.
+*
+*        Note: When the Count Mode (port offset 0x0A) is in Frame based egress rate
+*        shaping mode, these Frame Overhead bits must be 0x0.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       overhead - Frame overhead (0 ~ 15)
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+*******************************************************************************/
+GT_STATUS grcGetFrameOverhead
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_32    *overhead
+);
+
+
+/* gtPortStatus.c */
+
+/*******************************************************************************
+* gprtGetBufHigh
+*
+* DESCRIPTION:
+*        Output from QC telling the MAC that it should perform Flow Control.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        bufHigh - GT_TRUE, if Flow control required
+*                  GT_FALSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetBufHigh
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *bufHigh
+);
+
+/*******************************************************************************
+* gprtGetFcEn
+*
+* DESCRIPTION:
+*        Input into the QC telling it that Flow Control is enabled on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        fcEn - GT_TRUE, if Flow control is enabled
+*               GT_FALSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFcEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *fcEn
+);
+
+/*******************************************************************************
+* gprtGetRsvSize
+*
+* DESCRIPTION:
+*        This routine gets Ingress reserved queue size counter.
+*        This counter reflects the current number of reserved ingress buffers
+*        assigned to this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        count - reserved ingress queue size counter value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetRsvSize
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *count
+);
+
+
+/* gtPriTable.c */
+
+/*******************************************************************************
+* gsysSetQPriOverrideTable
+*
+* DESCRIPTION:
+*       Queue Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_QPRI_TBL_ENTRY    *entry
+);
+
+/*******************************************************************************
+* gsysGetQPriOverrideTable
+*
+* DESCRIPTION:
+*       Queue Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_QPRI_TBL_ENTRY    *entry
+);
+
+
+/* gtSysCtrl.c */
+
+/*******************************************************************************
+* gsysSetCPUDest
+*
+* DESCRIPTION:
+*        This routine sets CPU Destination Port. CPU Destination port indicates the
+*        port number on this device where the CPU is connected (either directly or
+*        indirectly through another Marvell switch device).
+*
+*        Many modes of frame processing need to know where the CPU is located.
+*        These modes are:
+*        1. When IGMP/MLD frame is received and Snooping is enabled
+*        2. When the port is configured as a DSA port and it receives a To_CPU frame
+*        3. When a Rsvd2CPU frame enters the port
+*        4. When the port's SA Filtering mode is Drop to CPU
+*        5. When any of the port's Policy Options trap the frame to the CPU
+*        6. When the ingressing frame is an ARP and ARP mirroring is enabled in the
+*           device
+*
+*        In all cases, except for ARP, the frames that meet the enabled criteria
+*        are mapped to the CPU Destination port, overriding where the frame would
+*        normally go. In the case of ARP, the frame will be mapped normally and it
+*        will also get copied to this port.
+*        Frames that filtered or discarded will not be mapped to the CPU Destination
+*        port with the exception of the Rsvd2CPU and DSA Tag cases.
+*
+*        If CPUDest = 0xF, the remapped frames will be discarded, no ARP mirroring
+*        will occur and ingressing To_CPU frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCPUDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gsysGetCPUDest
+*
+* DESCRIPTION:
+*        This routine gets CPU Destination Port. CPU Destination port indicates the
+*        port number on this device where the CPU is connected (either directly or
+*        indirectly through another Marvell switch device).
+*
+*        Many modes of frame processing need to know where the CPU is located.
+*        These modes are:
+*        1. When IGMP/MLD frame is received and Snooping is enabled
+*        2. When the port is configured as a DSA port and it receives a To_CPU frame
+*        3. When a Rsvd2CPU frame enters the port
+*        4. When the port's SA Filtering mode is Drop to CPU
+*        5. When any of the port's Policy Options trap the frame to the CPU
+*        6. When the ingressing frame is an ARP and ARP mirroring is enabled in the
+*           device
+*
+*        In all cases, except for ARP, the frames that meet the enabled criteria
+*        are mapped to the CPU Destination port, overriding where the frame would
+*        normally go. In the case of ARP, the frame will be mapped normally and it
+*        will also get copied to this port.
+*        Frames that filtered or discarded will not be mapped to the CPU Destination
+*        port with the exception of the Rsvd2CPU and DSA Tag cases.
+*
+*        If CPUDest = 0xF, the remapped frames will be discarded, no ARP mirroring
+*        will occur and ingressing To_CPU frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCPUDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+);
+
+/*******************************************************************************
+* gsysSetMirrorDest
+*
+* DESCRIPTION:
+*        This routine sets Mirror Destination Port. Frames that ingress a port
+*        that trigger a policy mirror are mapped (copied) to this port as long as
+*        the frame is not filtered or discarded.
+*        The Mirror Destination port should point to the port that directs these
+*        frames to the CPU that will process these frames. This target port should
+*        be a DSA Tag port so the frames will egress with a To_CPU DSA Tag with a
+*        CPU Code of Policy Mirror.
+*        To_CPU DSA Tag frames with a CPU Code of Policy Mirror that ingress a DSA
+*        Tag port will be sent to the port number defined in MirrorDest.
+*
+*        If MirrorDest = 0xF, Policy Mirroring is disabled and ingressing To_CPU
+*        Policy Mirror frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetMirrorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+);
+
+/*******************************************************************************
+* gsysGetMirrorDest
+*
+* DESCRIPTION:
+*        This routine gets Mirror Destination Port. Frames that ingress a port
+*        that trigger a policy mirror are mapped (copied) to this port as long as
+*        the frame is not filtered or discarded.
+*        The Mirror Destination port should point to the port that directs these
+*        frames to the CPU that will process these frames. This target port should
+*        be a DSA Tag port so the frames will egress with a To_CPU DSA Tag with a
+*        CPU Code of Policy Mirror.
+*        To_CPU DSA Tag frames with a CPU Code of Policy Mirror that ingress a DSA
+*        Tag port will be sent to the port number defined in MirrorDest.
+*
+*        If MirrorDest = 0xF, Policy Mirroring is disabled and ingressing To_CPU
+*        Policy Mirror frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetMirrorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+);
+
+/*******************************************************************************
+* gsysSetRMPort
+*
+* DESCRIPTION:
+*        Remote Management feature is enabled only on one port. Since not all ports
+*        can be enabled for Remote Management feature, please refer to the device
+*        datasheet for detailed information.
+*        For example, 88E6097 device allows logical port 9 or 10, and 88E6047
+*        device allows logical port 4 and 5.
+*
+* INPUTS:
+*        port - Remote Management Port
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on unallowable port
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysSetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+);
+
+/*******************************************************************************
+* gsysGetRMPort
+*
+* DESCRIPTION:
+*        Remote Management feature is enabled only on one port. Since not all ports
+*        can be enabled for Remote Management feature, please refer to the device
+*        datasheet for detailed information.
+*        For example, 88E6097 device allows logical port 9 or 10, and 88E6047
+*        device allows logical port 4 and 5.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - Remote Management Port
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysGetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMPort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT     *port
+);
+
+/*******************************************************************************
+* gsysSetRMDACheck
+*
+* DESCRIPTION:
+*        Check the DA on Remote Management frames.
+*        When DA Check is enabled, the DA of Remote Management frames must be
+*        contained in this device's address database (ATU) as a Static entry
+*        (either unicast or multicast). If the DA of the frame is not contained
+*        in this device's address database, the frame will be not be processed as
+*        a Frame-to-Regter frame.
+*        When DA Check is disabled, the DA of Remote Management frames is not
+*        validated before processing the frame.
+*
+* INPUTS:
+*        en - GT_TRUE to enable DA Check,
+*             GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMDACheck
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+);
+
+/*******************************************************************************
+* gsysGetRMDACheck
+*
+* DESCRIPTION:
+*        Check the DA on Remote Management frames.
+*        When DA Check is enabled, the DA of Remote Management frames must be
+*        contained in this device's address database (ATU) as a Static entry
+*        (either unicast or multicast). If the DA of the frame is not contained
+*        in this device's address database, the frame will be not be processed as
+*        a Frame-to-Regter frame.
+*        When DA Check is disabled, the DA of Remote Management frames is not
+*        validated before processing the frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if DA Check is enabled,
+*             GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMDACheck
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *en
+);
+
+/*******************************************************************************
+* gsysSetHeaderType
+*
+* DESCRIPTION:
+*   To set Header Type. These bits are used to configure the bits that are placed
+*   into the Egress Header when it is enabled on a port (Port offset 0x04)
+*   as follows:
+*     00 = Original Header ï¿½ for backwards compatibility to UniMACï¿½s that look at
+*          Header byte 1 bits[4:2] and byte 2 bits [3:0]
+*     01 = Single chip MGMT Header ï¿½ for compatibility to Marvell Fast Ethernet
+*          switches that support Spanning Tree without DSA Tags
+*     10 = Trunk Header ï¿½ used together with the DSA Tags to perform Remote Switching
+*     11 = Reserved for future use.
+*
+* INPUTS:
+*        hdType
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetHeaderType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16       hdType
+);
+
+/*******************************************************************************
+* gsysGetHeaderType
+*
+* DESCRIPTION:
+*   To get Header Type. These bits are used to configure the bits that are placed
+*   into the Egress Header when it is enabled on a port (Port offset 0x04)
+*   as follows:
+*     00 = Original Header ï¿½ for backwards compatibility to UniMACï¿½s that look at
+*          Header byte 1 bits[4:2] and byte 2 bits [3:0]
+*     01 = Single chip MGMT Header ï¿½ for compatibility to Marvell Fast Ethernet
+*          switches that support Spanning Tree without DSA Tags
+*     10 = Trunk Header ï¿½ used together with the DSA Tags to perform Remote Switching
+*     11 = Reserved for future use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        hdType
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetHeaderType
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16     *hdType
+);
+
+/*******************************************************************************
+* gsysSetRMEnable
+*
+* DESCRIPTION:
+*        Enable or disable Remote Management feature. This feature can be enabled
+*        only on one port (see gsysSetRMPort API).
+*
+* INPUTS:
+*        en - GT_TRUE to enable Remote Management feature,
+*             GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysSetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMEnable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+);
+
+/*******************************************************************************
+* gsysGetRMEnable
+*
+* DESCRIPTION:
+*        Enable or disable Remote Management feature. This feature can be enabled
+*        only on one port (see gsysSetRMPort API).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Remote Management feature is enabled,
+*             GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysGetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMEnable
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *en
+);
+
+/*******************************************************************************
+* gsysSetCtrMode
+*
+* DESCRIPTION:
+*        Set Counter Modes. These bits control the operating modes of the two of
+*        the Portï¿½s MIB counters.
+*
+* INPUTS:
+*        ctrMode - Counter mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on bad parameter
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCtrMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16       ctrMode
+);
+
+/*******************************************************************************
+* gsysGetCtrMode
+*
+* DESCRIPTION:
+*        Get Counter Modes. These bits control the operating modes of the two of
+*        the Portï¿½s MIB counters.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        ctrMode - Counter mode
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCtrMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16       *ctrMode
+);
+
+/*******************************************************************************
+* gsysSetRsvd2CpuEnables2X
+*
+* DESCRIPTION:
+*        Reserved DA Enables for the form of 01:80:C2:00:00:2x.
+*        When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one, the 16 reserved
+*        multicast DA addresses, whose bit in this register are also set to a one,
+*        are treadted as MGMT frames. All the reserved DA's take the form
+*        01:80:C2:00:00:2x. When x = 0x0, bit 0 of this register is tested.
+*        When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2CpuEnables2X
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        enBits
+);
+
+/*******************************************************************************
+* gsysGetRsvd2CpuEnables2X
+*
+* DESCRIPTION:
+*        Reserved DA Enables for the form of 01:80:C2:00:00:2x.
+*        When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one, the 16 reserved
+*        multicast DA addresses, whose bit in this register are also set to a one,
+*        are treadted as MGMT frames. All the reserved DA's take the form
+*        01:80:C2:00:00:2x. When x = 0x0, bit 0 of this register is tested.
+*        When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2CpuEnables2X
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *enBits
+);
+
+/*******************************************************************************
+* gsysSetLoopbackFilter
+*
+* DESCRIPTION:
+*        Loopback Filter.
+*        When Loopback Filter is enabled,Forward DSA frames that ingress a DSA port
+*        that came from the same Src_Dev will be filtered to the same Src_Port,
+*        i.e., the frame will not be allowed to egress the source port on the
+*        source device as indicated in the DSA Forward's Tag.
+*
+* INPUTS:
+*        en - GT_TRUE to enable LoopbackFilter, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetLoopbackFilter
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetLoopbackFilter
+*
+* DESCRIPTION:
+*        Loopback Filter.
+*        When Loopback Filter is enabled,Forward DSA frames that ingress a DSA port
+*        that came from the same Src_Dev will be filtered to the same Src_Port,
+*        i.e., the frame will not be allowed to egress the source port on the
+*        source device as indicated in the DSA Forward's Tag.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if LoopbackFilter is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetLoopbackFilter
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetFloodBC
+*
+* DESCRIPTION:
+*        Flood Broadcast.
+*        When Flood Broadcast is enabled, frames with the Broadcast destination
+*        address will flood out all the ports regardless of the setting of the
+*        port's Egress Floods mode (see gprtSetEgressFlood API). VLAN rules and
+*        other switch policy still applies to these Broadcast frames.
+*        When this feature is disabled, frames with the Broadcast destination
+*        address are considered Multicast frames and will be affected by port's
+*        Egress Floods mode.
+*
+* INPUTS:
+*        en - GT_TRUE to enable Flood Broadcast, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFloodBC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetFloodBC
+*
+* DESCRIPTION:
+*        Flood Broadcast.
+*        When Flood Broadcast is enabled, frames with the Broadcast destination
+*        address will flood out all the ports regardless of the setting of the
+*        port's Egress Floods mode (see gprtSetEgressFlood API). VLAN rules and
+*        other switch policy still applies to these Broadcast frames.
+*        When this feature is disabled, frames with the Broadcast destination
+*        address are considered Multicast frames and will be affected by port's
+*        Egress Floods mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Flood Broadcast is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetFloodBC
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetRemove1PTag
+*
+* DESCRIPTION:
+*        Remove One Provider Tag.
+*        When this feature is enabled and a port is configured as a Provider Port
+*        (see gprtSetFrameMode API), recursive Provider Tag stripping will NOT be
+*        performed. Only the first Provider Tag found on the frame will be
+*        extracted and removed. Its extracted data will be used for switching.
+*        When it's disabled and a port is configured as a Provider Port, recursive
+*        Provider Tag stripping will be performed. The first Provider Tag's data
+*        will be extracted and used for switching, and then all subsequent Provider
+*        Tags found in the frame will also be removed. This will only occur if the
+*        port's PortEType (see gprtSetPortEType API) is not 0x8100.
+*
+* INPUTS:
+*        en - GT_TRUE to enable Remove One Provider Tag, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRemove1PTag
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetRemove1PTag
+*
+* DESCRIPTION:
+*        Remove One Provider Tag.
+*        When this feature is enabled and a port is configured as a Provider Port
+*        (see gprtSetFrameMode API), recursive Provider Tag stripping will NOT be
+*        performed. Only the first Provider Tag found on the frame will be
+*        extracted and removed. Its extracted data will be used for switching.
+*        When it's disabled and a port is configured as a Provider Port, recursive
+*        Provider Tag stripping will be performed. The first Provider Tag's data
+*        will be extracted and used for switching, and then all subsequent Provider
+*        Tags found in the frame will also be removed. This will only occur if the
+*        port's PortEType (see gprtSetPortEType API) is not 0x8100.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Remove One Provider Tag is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRemove1PTag
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gsysSetTagFlowControl
+*
+* DESCRIPTION:
+*        Use and generate source port Flow Control status for Cross-Chip Flow
+*        Control.
+*        When this feature is enabled, bit 17 of the DSA Tag Forward frames is
+*        defined to be Src_FC and it is added to these frames when generated and
+*        it is inspected on these frames when received. The QC will use the Src_FC
+*        bit on DSA ports instead of the DSA port's Flow Control mode bit for the
+*        QC Flow Control algorithm.
+*        When it is disabled, bit 17 of the DSA Tag Forward frames is defined to
+*        be Reserved and it will be zero on these frames when generated and it
+*        will not be used on these frames when received (this is a backwards
+*        compatibility mode).
+*
+* INPUTS:
+*        en - GT_TRUE to enable Tag Flow Control, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetTagFlowControl
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetTagFlowControl
+*
+* DESCRIPTION:
+*        Use and generate source port Flow Control status for Cross-Chip Flow
+*        Control.
+*        When this feature is enabled, bit 17 of the DSA Tag Forward frames is
+*        defined to be Src_FC and it is added to these frames when generated and
+*        it is inspected on these frames when received. The QC will use the Src_FC
+*        bit on DSA ports instead of the DSA port's Flow Control mode bit for the
+*        QC Flow Control algorithm.
+*        When it is disabled, bit 17 of the DSA Tag Forward frames is defined to
+*        be Reserved and it will be zero on these frames when generated and it
+*        will not be used on these frames when received (this is a backwards
+*        compatibility mode).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Tag Flow Control is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetTagFlowControl
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetAlwaysUseVTU
+*
+* DESCRIPTION:
+*        Always use VTU.
+*        When this feature is enabled, VTU hit data will be used to map frames
+*        even if 802.1Q is Disabled on the port.
+*        When it's disabled, data will be ignored when mapping frames on ports
+*        where 802.1Q is Disabled.
+*
+* INPUTS:
+*        en - GT_TRUE to use VTU always, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetAlwaysUseVTU
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetAlwaysUseVTU
+*
+* DESCRIPTION:
+*        Always use VTU.
+*        When this feature is enabled, VTU hit data will be used to map frames
+*        even if 802.1Q is Disabled on the port.
+*        When it's disabled, data will be ignored when mapping frames on ports
+*        where 802.1Q is Disabled.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if VTU is always used, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetAlwaysUseVTU
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetQVlansOnly
+*
+* DESCRIPTION:
+*        802.1Q VLANs Only.
+*        When this feature is disabled, the egress mapping of the frame is
+*        limited by the frame's VID (using the MemberTag data found in the VTU)
+*        together with the port based VLANs (using the source port's PortVLANTable,
+*        gvlnSetPortVlanPorts API). The two methods are always used together in
+*        this mode.
+*        When this feature is enabled, the egress mapping of the frame is limitied
+*        by the frame's VID only, if the VID was found in the VTU. If the frame's
+*        VID was not found in the VTU the egress mapping of the frame is limited
+*        by the source port's PortVLANTable only. The two methods are never
+*        used together in this mode.
+*
+* INPUTS:
+*        en - GT_TRUE to use 802.1Q Vlan Only feature, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetQVlansOnly
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGetQVlansOnly
+*
+* DESCRIPTION:
+*        802.1Q VLANs Only.
+*        When this feature is disabled, the egress mapping of the frame is
+*        limited by the frame's VID (using the MemberTag data found in the VTU)
+*        together with the port based VLANs (using the source port's PortVLANTable,
+*        gvlnSetPortVlanPorts API). The two methods are always used together in
+*        this mode.
+*        When this feature is enabled, the egress mapping of the frame is limitied
+*        by the frame's VID only, if the VID was found in the VTU. If the frame's
+*        VID was not found in the VTU the egress mapping of the frame is limited
+*        by the source port's PortVLANTable only. The two methods are never
+*        used together in this mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 802.1Q Vlan Only feature is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetQVlansOnly
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSet5BitPort
+*
+* DESCRIPTION:
+*        Use 5 bits for Port data in the Port VLAN Table (PVT).
+*        When this feature is enabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:5] = Source Device[3:0] or Device Number[3:0]
+*            Addr[4:0] = Source Port/Trunk[4:0]
+*        When it's disabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:4] = Source Device[4:0] or Device Number[4:0]
+*            Addr[3:0] = Source Port/Trunk[3:0]
+*
+* INPUTS:
+*        en - GT_TRUE to use 5 bit as a Source port in PVT, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSet5BitPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+/*******************************************************************************
+* gsysGet5BitPort
+*
+* DESCRIPTION:
+*        Use 5 bits for Port data in the Port VLAN Table (PVT).
+*        When this feature is enabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:5] = Source Device[3:0] or Device Number[3:0]
+*            Addr[4:0] = Source Port/Trunk[4:0]
+*        When it's disabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:4] = Source Device[4:0] or Device Number[4:0]
+*            Addr[3:0] = Source Port/Trunk[3:0]
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 5 bit is used as a Source Port in PVT, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGet5BitPort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+/*******************************************************************************
+* gsysSetSDETPolarity
+*
+* DESCRIPTION:
+*        SDET (Signal Detect) Polarity select bits for each port.
+*        Bit 10 is for Port 10, bit 9 is for Port 9, etc. SDET is used to help
+*        determine link on fiber ports. This bit affects the active level of a
+*        port's SDET pins as follows:
+*            0 = SDET is active low. A low level on the port's SDET pin is
+*                required for link to occur.
+*            1 = SDET is active high. A high level on the portï¿½s SDET pin is
+*                required for link to occur.
+*        SDET is used when the port is configured as a fiber port. In all other
+*        port modes the SDET pins are ignored and these bits have no effect.
+*
+* INPUTS:
+*        sdetVec - SDET Polarity for each port in Vector format
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sdetVec is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetSDETPolarity
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          sdetVec
+);
+
+/*******************************************************************************
+* gsysSetSDETPolarity
+*
+* DESCRIPTION:
+*        SDET (Signal Detect) Polarity select bits for each port.
+*        Bit 10 is for Port 10, bit 9 is for Port 9, etc. SDET is used to help
+*        determine link on fiber ports. This bit affects the active level of a
+*        port's SDET pins as follows:
+*            0 = SDET is active low. A low level on the port's SDET pin is
+*                required for link to occur.
+*            1 = SDET is active high. A high level on the portï¿½s SDET pin is
+*                required for link to occur.
+*        SDET is used when the port is configured as a fiber port. In all other
+*        port modes the SDET pins are ignored and these bits have no effect.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        sdetVec - SDET Polarity for each port in Vector format
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetSDETPolarity
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32      *sdetVec
+);
+
+
+/* gtBrgVlan.c for 2.6 release */
+
+/*******************************************************************************
+* gvlnSetNoEgrPolicy
+*
+* DESCRIPTION:
+*        No Egress Policy. When this bit is set to a one Egress 802.1Q Secure and
+*        Check discards are not performed. This mode allowsa non-802.1Q enabled
+*        port to send a frame to an 802.1Q enabled port that is configured in the
+*        Secure or Check 802.1Q mode. In this situation the frames will egress
+*        even if the VID assigned to the frame is not found in the VTU.
+*
+* INPUTS:
+*        mode - no egress policy mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gvlnSetNoEgrPolicy
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+);
+
+
+/*******************************************************************************
+* gvlnGetNoEgrPolicy
+*
+* DESCRIPTION:
+*        No Egress Policy. When this bit is set to a one Egress 802.1Q Secure and
+*        Check discards are not performed. This mode allowsa non-802.1Q enabled
+*        port to send a frame to an 802.1Q enabled port that is configured in the
+*        Secure or Check 802.1Q mode. In this situation the frames will egress
+*        even if the VID assigned to the frame is not found in the VTU.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - no egress policy mode
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gvlnGetNoEgrPolicy
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *mode
+);
+
+/*******************************************************************************
+* gwdSetRMUTimeOut
+*
+* DESCRIPTION:
+*        Remote Management Timeout. When this bit is set to a one the Remote
+*        Management Unit(RMU) will timeout on Wait on Bit commands. If the bit that
+*        is being tested has not gone to the specified value after 1 sec. has elapsed
+*        the Wait on Bit command will be terminated and the Response frame will be
+*        sent without any further processing.
+*
+*        When this bit is cleared to a zero the Wait on Bit command will wait
+*        until the bit that is being tested has changed to the specified value.
+*
+* INPUTS:
+*        en   - GT_TRUE to enable RMU Timeout
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetRMUTimeOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gwdGetRMUTimeOut
+*
+* DESCRIPTION:
+*        Remote Management Timeout. When this bit is set to a one the Remote
+*        Management Unit(RMU) will timeout on Wait on Bit commands. If the bit that
+*        is being tested has not gone to the specified value after 1 sec. has elapsed
+*        the Wait on Bit command will be terminated and the Response frame will be
+*        sent without any further processing.
+*
+*        When this bit is cleared to a zero the Wait on Bit command will wait
+*        until the bit that is being tested has changed to the specified value.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en   - GT_TRUE to enable RMU Timeout
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetRMUTimeOut
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gwdGetEgressWDEvent
+*
+* DESCRIPTION:
+*        If any port's egress logic detects an egress watch dog issue, this bit
+*        will be set to a one, regardless of the setting of the GT_WD_EGRESS in
+*        gwdSetEvent function.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        event - GT_TRUE, if egress logic has detected any egress watch dog issue
+*                GT_FALUSE, otherwise
+*
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetEgressWDEvent
+(
+    IN  GT_QD_DEV        *dev,
+    OUT GT_BOOL            *event
+);
+
+
+/*******************************************************************************
+* gsysSetQoSWeight
+*
+* DESCRIPTION:
+*       Programmable Round Robin Weights.
+*        Each port has 4 output Queues. Queue 3 has the highest priority and
+*        Queue 0 has the lowest priority. When a scheduling mode of port is
+*        configured as Weighted Round Robin queuing mode, the access sequece of the
+*        Queue is 3,2,3,1,3,2,3,0,3,2,3,1,3,2,3 by default.
+*        This sequence can be configured with this API.
+*
+* INPUTS:
+*       weight - access sequence of the queue
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQoSWeight
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_QoS_WEIGHT    *weight
+);
+
+/*******************************************************************************
+* gsysGetQoSWeight
+*
+* DESCRIPTION:
+*       Programmable Round Robin Weights.
+*        Each port has 4 output Queues. Queue 3 has the highest priority and
+*        Queue 0 has the lowest priority. When a scheduling mode of port is
+*        configured as Weighted Round Robin queuing mode, the access sequece of the
+*        Queue is 3,2,3,1,3,2,3,0,3,2,3,1,3,2,3 by default.
+*        This routine retrieves the access sequence of the Queue.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       weight - access sequence of the queue
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQoSWeight
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_QoS_WEIGHT    *weight
+);
+
+
+/*
+ *    gtPortCtrl.c
+*/
+
+/*******************************************************************************
+* gsysSetJumboMode
+*
+* DESCRIPTION:
+*       This routine Set the max frame size allowed to be received and transmitted
+*        from or to a given port.
+*
+* INPUTS:
+*        port - the logical port number
+*       mode - GT_JUMBO_MODE (1522, 2048, or 10240)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gsysSetJumboMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_JUMBO_MODE   mode
+);
+
+/*******************************************************************************
+* gsysGetJumboMode
+*
+* DESCRIPTION:
+*       This routine gets the max frame size allowed to be received and transmitted
+*        from or to a given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_JUMBO_MODE (1522, 2048, or 10240)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gsysGetJumboMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_JUMBO_MODE   *mode
+);
+
+/*
+ *  gtPhyCtrl.c
+*/
+/*******************************************************************************
+* gprtGetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       mode - GT_EDETECT_MODE type
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_EDETECT_MODE   *mode
+);
+
+/*******************************************************************************
+* gprtSetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*       mode - GT_EDETECT_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_EDETECT_MODE   mode
+);
+
+
+/*
+ *    gtSysCtrl.c
+*/
+
+/*******************************************************************************
+* gsysSetRMUMode
+*
+* DESCRIPTION:
+*        Set Rmote Management Unit Mode: disable, enable on port 4, 5 or 6, or enable
+*        on port 9 or 10. Devices, such as 88E6097, support RMU on port 9 and 10,
+*        while other devices, such as 88E6165, support RMU on port 4, 5 and 6. So,
+*        please refer to the device datasheet for detail.
+*        When RMU is enabled and this device receives a Remote Management Request
+*        frame directed to this device, the frame will be processed and a Remote
+*        Management Response frame will be generated and sent out.
+*
+*        Note: enabling RMU has no effect if the Remote Management port is in half
+*        duplex mode. The port's FrameMode must be DSA or EtherType DSA as well.
+*
+* INPUTS:
+*        rmu - GT_RMU structure
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on bad parameter
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMUMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_RMU        *rmu
+);
+
+/*******************************************************************************
+* gsysGetRMUMode
+*
+* DESCRIPTION:
+*        Get Rmote Management Unit Mode: disable, enable on port 4, 5 or 6, or enable
+*        on port 9 or 10. Devices, such as 88E6097, support RMU on port 9 and 10,
+*        while other devices, such as 88E6165, support RMU on port 4, 5 and 6. So,
+*        please refer to the device datasheet for detail.
+*        When RMU is enabled and this device receives a Remote Management Request
+*        frame directed to this device, the frame will be processed and a Remote
+*        Management Response frame will be generated and sent out.
+*
+*        Note: enabling RMU has no effect if the Remote Management port is in half
+*        duplex mode. The port's FrameMode must be DSA or EtherType DSA as well.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        rmu - GT_RMU structure
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMUMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_RMU        *rmu
+);
+
+/*******************************************************************************
+* gsysPort2Lport
+*
+* DESCRIPTION:
+*        This routine converts physical port number to logical port number.
+*
+* INPUTS:
+*        port - physical port number
+*
+* OUTPUTS:
+*        lport - logical port number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysPort2Lport
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         port,
+    OUT GT_LPORT    *lport
+);
+
+/*******************************************************************************
+* gsysLport2Port
+*
+* DESCRIPTION:
+*        This routine converts logical port number to physical port number.
+*
+* INPUTS:
+*        lport - logical port number
+*
+* OUTPUTS:
+*        port - physical port number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysLport2Port
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    lport,
+    OUT GT_U32         *port
+);
+
+/*******************************************************************************
+* gsysPortvec2Lportvec
+*
+* DESCRIPTION:
+*        This routine converts physical port vector to logical port vector.
+*
+* INPUTS:
+*        portvec - physical port vector
+*
+* OUTPUTS:
+*        lportvec - logical port vector
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysPortvec2Lportvec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        portvec,
+    OUT GT_U32         *lportvec
+);
+
+/*******************************************************************************
+* gsysLportvec2Portvec
+*
+* DESCRIPTION:
+*        This routine converts logical port vector to physical port vector.
+*
+* INPUTS:
+*        lportvec - logical port vector
+*
+* OUTPUTS:
+*        portvec - physical port vector
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysLportvec2Portvec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        lportvec,
+    OUT GT_U32         *portvec
+);
+
+
+/*
+ * gtPIRL.c
+ */
+
+/*******************************************************************************
+* gpirlSetCurTimeUpInt
+*
+* DESCRIPTION:
+*       This function sets the current time update interval.
+*        Please contact FAE for detailed information.
+*
+* INPUTS:
+*       upInt - updata interval (0 ~ 7)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirlSetCurTimeUpInt
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                upInt
+);
+
+
+/*
+ * gtPIRL2.c
+ */
+
+/*******************************************************************************
+* gpirl2SetCurTimeUpInt
+*
+* DESCRIPTION:
+*       This function sets the current time update interval.
+*        Please contact FAE for detailed information.
+*
+* INPUTS:
+*       upInt - updata interval (0 ~ 7)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirl2SetCurTimeUpInt
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                upInt
+);
+
+
+/*
+ * gtPTP.c
+ */
+
+/*******************************************************************************
+* gptpSetConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpGetConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpSetGlobalConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP global configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP global configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetGlobalConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_GLOBAL_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpGetGlobalConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP global configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP global configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetGlobalConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_GLOBAL_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpSetPortConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP port configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP port configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_PTP_PORT_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpGetPortConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP configuration parameters for a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP port configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_PTP_PORT_CONFIG    *ptpData
+);
+
+/*******************************************************************************
+* gptpSetPTPEn
+*
+* DESCRIPTION:
+*       This routine enables or disables PTP.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PTP, GT_FALSE to disable PTP
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gptpGetPTPEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gptpSetPortTsMode
+*
+* DESCRIPTION:
+*       This routine set PTP arrive 0 TS mode on a port.
+*
+* INPUTS:
+*        tsMod - GT_PTP_TS_MODE_IN_REG
+*                GT_PTP_TS_MODE_IN_RESERVED_2
+*                GT_PTP_TS_MODE_IN_FRAME_END
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortTsMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_PTP_TS_MODE  tsMode
+);
+
+/*******************************************************************************
+* gptpGetPortTsMode
+*
+* DESCRIPTION:
+*       This routine get PTP arrive 0 TS mode on a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        tsMod - GT_PTP_TS_MODE_IN_REG
+*                GT_PTP_TS_MODE_IN_RESERVED_2
+*                GT_PTP_TS_MODE_IN_FRAME_END
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortTsMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_PTP_TS_MODE  *tsMode
+);
+
+/*******************************************************************************
+* gptpSetPortPTPEn
+*
+* DESCRIPTION:
+*       This routine enables or disables PTP on a port.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PTP, GT_FALSE to disable PTP
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gptpGetPortPTPEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP is enabled on a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+);
+
+
+/*******************************************************************************
+* gptpGetPTPInt
+*
+* DESCRIPTION:
+*       This routine gets PTP interrupt status for each port.
+*        The PTP Interrupt bit gets set for a given port when an incoming PTP
+*        frame is time stamped and PTPArrIntEn for that port is set to 0x1.
+*        Similary PTP Interrupt bit gets set for a given port when an outgoing
+*        PTP frame is time stamped and PTPDepIntEn for that port is set to 0x1.
+*        This bit gets cleared upon software reading and clearing the corresponding
+*        time counter valid bits that are valid for that port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpInt     - interrupt status for each port (bit 0 for port 0, bit 1 for port 1, etc.)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpInt
+);
+
+/*******************************************************************************
+* gptpGetPTPGlobalTime
+*
+* DESCRIPTION:
+*       This routine gets the global timer value that is running off of the free
+*        running switch core clock.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpTime    - PTP global time
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPGlobalTime
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpTime
+);
+
+/*******************************************************************************
+* gptpGetTimeStamped
+*
+* DESCRIPTION:
+*        This routine retrieves the PTP port status that includes time stamp value
+*        and sequce Id that are captured by PTP logic for a PTP frame that needs
+*        to be time stamped.
+*
+* INPUTS:
+*       port         - logical port number.
+*       timeToRead    - Arr0, Arr1, or Dep time (GT_PTP_TIME enum type)
+*
+* OUTPUTS:
+*        ptpStatus    - PTP port status
+*
+* RETURNS:
+*       GT_OK         - on success
+*       GT_FAIL     - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetTimeStamped
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN    GT_PTP_TIME    timeToRead,
+    OUT GT_PTP_TS_STATUS    *ptpStatus
+);
+
+/*******************************************************************************
+* gptpResetTimeStamp
+*
+* DESCRIPTION:
+*        This routine resets PTP Time valid bit so that PTP logic can time stamp
+*        a next PTP frame that needs to be time stamped.
+*
+* INPUTS:
+*       port         - logical port number.
+*       timeToReset    - Arr0, Arr1, or Dep time (GT_PTP_TIME enum type)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK         - on success
+*       GT_FAIL     - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpResetTimeStamp
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN    GT_PTP_TIME    timeToReset
+);
+
+/*******************************************************************************
+* gptpGetReg
+*
+* DESCRIPTION:
+*       This routine reads PTP register.
+*
+* INPUTS:
+*       port         - logical port number.
+*       regOffset    - register to read
+*
+* OUTPUTS:
+*        data        - register data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        regOffset,
+    OUT GT_U32        *data
+);
+
+/*******************************************************************************
+* gptpSetReg
+*
+* DESCRIPTION:
+*       This routine writes data to PTP register.
+*
+* INPUTS:
+*       port         - logical port number
+*       regOffset    - register to be written
+*        data        - data to be written
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        regOffset,
+    IN  GT_U32        data
+);
+
+
+#ifdef CONFIG_AVB_FPGA
+
+/*******************************************************************************
+* gptpSetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine sets interrupt status of PTP logic.
+*
+* INPUTS:
+*        intStatus    - PTP Int Status
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    intStatus
+);
+
+
+/*******************************************************************************
+* gptpGetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine gets interrupt status of PTP logic.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        intStatus    - PTP Int Status
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    *intStatus
+);
+
+
+/*******************************************************************************
+* gptpSetFPGAIntEn
+*
+* DESCRIPTION:
+*       This routine enables PTP interrupt.
+*
+* INPUTS:
+*        intEn    - enable/disable PTP interrupt (1 to enable, 0 to disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    intEn
+);
+
+/*******************************************************************************
+* gptpGetClockSource
+*
+* DESCRIPTION:
+*       This routine gets PTP Clock source mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_SRC     *clkSrc
+);
+
+/*******************************************************************************
+* gptpSetClockSource
+*
+* DESCRIPTION:
+*       This routine sets PTP Clock source mode.
+*
+* INPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_SRC     clkSrc
+);
+
+/*******************************************************************************
+* gptpGetP9Mode
+*
+* DESCRIPTION:
+*       This routine gets Port 9 Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_P9_MODE     *mode
+);
+
+/*******************************************************************************
+* gptpSetP9Mode
+*
+* DESCRIPTION:
+*       This routine sets Port 9 Mode.
+*
+* INPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_P9_MODE     mode
+);
+
+/*******************************************************************************
+* gptpReset
+*
+* DESCRIPTION:
+*       This routine performs software reset for PTP logic.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpReset
+(
+    IN  GT_QD_DEV     *dev
+);
+
+
+/*******************************************************************************
+* gptpGetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP Duty Cycle Adjustment is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adjEn    - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *adjEn
+);
+
+
+/*******************************************************************************
+* gptpSetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PTP Duty Cycle Adjustment.
+*
+* INPUTS:
+*        adjEn    - GT_TRUE to enable, GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        adjEn
+);
+
+
+/*******************************************************************************
+* gptpGetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine gets clock duty cycle adjustment value.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_ADJUSTMENT        *adj
+);
+
+/*******************************************************************************
+* gptpSetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine sets clock duty cycle adjustment value.
+*
+* INPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_ADJUSTMENT        *adj
+);
+
+/*******************************************************************************
+* gptpGetPLLEn
+*
+* DESCRIPTION:
+*       This routine checks if PLL is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpGetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en,
+    OUT GT_U32        *freqSel
+);
+
+/*******************************************************************************
+* gptpSetPLLEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PLL device.
+*
+* INPUTS:
+*        en        - GT_TRUE to enable, GT_FALSE to disable
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*                  Meaningful only when enabling PLL device
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpSetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en,
+    IN  GT_U32        freqSel
+);
+#endif
+
+/* Amber APIs */
+
+/* gtBrgFdb.c */
+
+/*******************************************************************************
+* gfdbGetMacAvb
+*
+* DESCRIPTION:
+*        ATU MAC entry in AVB mode.
+*        When enabled, ATU entries operate in AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_AVB_ENTRY, and
+*            GT_UC_STATIC_AVB_ENTRY
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_AVB_ENTRY, and
+*            GT_MC_PRIO_STATIC_AVB_ENTRY
+*
+*        When disabled, ATU entries operate in non-AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_NRL, and
+*            GT_UC_STATIC_NRL
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_UNLIMITED_RATE, and
+*            GT_MC_PRIO_STATIC_UNLIMITED_RATE
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if MacAvb is enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*
+*******************************************************************************/
+GT_STATUS gfdbGetMacAvb
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *mode
+);
+
+/*******************************************************************************
+* gfdbSetMacAvb
+*
+* DESCRIPTION:
+*        ATU MAC entry in AVB mode.
+*        When enabled, ATU entries operate in AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_AVB_ENTRY, and
+*            GT_UC_STATIC_AVB_ENTRY
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_AVB_ENTRY, and
+*            GT_MC_PRIO_STATIC_AVB_ENTRY
+*
+*        When disabled, ATU entries operate in non-AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_NRL, and
+*            GT_UC_STATIC_NRL
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_UNLIMITED_RATE, and
+*            GT_MC_PRIO_STATIC_UNLIMITED_RATE
+*
+* INPUTS:
+*        mode - GT_TRUE to enable MacAvb, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetMacAvb
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+);
+
+/*******************************************************************************
+* gfdbGetPortAtuLearnLimit
+*
+* DESCRIPTION:
+*      Port's auto learning limit. When the limit is non-zero value, the number
+*        of MAC addresses that can be learned on this port are limited to the value
+*        specified in this API. When the learn limit has been reached any frame
+*        that ingresses this port with a source MAC address not already in the
+*        address database that is associated with this port will be discarded.
+*        Normal auto-learning will resume on the port as soon as the number of
+*        active unicast MAC addresses associated to this port is less than the
+*        learn limit.
+*        CPU directed ATU Load, Purge, or Move will not have any effect on the
+*        learn limit.
+*        This feature is disabled when the limit is zero.
+*        The following care is needed when enabling this feature:
+*            1) dsable learning on the ports
+*            2) flush all non-static addresses in the ATU
+*            3) define the desired limit for the ports
+*            4) re-enable learing on the ports
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        limit - auto learning limit ( 0 ~ 255 )
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetPortAtuLearnLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    OUT GT_U32       *limit
+);
+
+/* gtPCSCtrl.c */
+
+/*******************************************************************************
+* gpcsGetRGMIITimingDelay
+*
+* DESCRIPTION:
+*        RGMII receive/transmit Timing Control. This api adds delay to RXCLK for
+*        IND inputs and GTXCLK for OUTD outputs when port is in RGMII mode.
+*        Change to this bit are disruptive to normal operation. Hence any changes
+*        to this register must be done only while the port's link is down.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        rxmode - GT_FALSE for default setup, GT_TRUE for adding delay to rxclk
+*        txmode - GT_FALSE for default setup, GT_TRUE for adding delay to txclk
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetRGMIITimingDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *rxmode,
+    OUT GT_BOOL      *txmode
+);
+
+/*******************************************************************************
+* gpcsSetRGMIITimingDelay
+*
+* DESCRIPTION:
+*        RGMII receive/transmit Timing Control. This api adds delay to RXCLK for
+*        IND inputs and GTXCLK for OUTD outputs when port is in RGMII mode.
+*        Change to this bit are disruptive to normal operation. Hence any changes
+*        to this register must be done only while the port's link is down.
+*
+* INPUTS:
+*        port - the logical port number.
+*        rxmode - GT_FALSE for default setup, GT_TRUE for adding delay to rxclk
+*        txmode - GT_FALSE for default setup, GT_TRUE for adding delay to txclk
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetRGMIITimingDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      rxmode,
+    IN  GT_BOOL      txmode
+);
+
+
+/* gtPortLed.c */
+
+/*******************************************************************************
+* gprtSetLED
+*
+* DESCRIPTION:
+*        This API allows to configure 4 LED sections, Pulse stretch, Blink rate,
+*        and special controls.
+*
+* INPUTS:
+*        port    - the logical port number
+*        cfg     - GT_LED_CFG value
+*        value     - value to be configured
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetLED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value
+);
+
+
+/*******************************************************************************
+* gprtGetLED
+*
+* DESCRIPTION:
+*        This API allows to retrieve 4 LED sections, Pulse stretch, Blink rate,
+*        and special controls.
+*
+* INPUTS:
+*        port    - the logical port number
+*        cfg     - GT_LED_CFG value
+*
+* OUTPUTS:
+*        value     - configured value
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetLED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    OUT GT_U32        *value
+);
+
+
+
+
+
+/* gtPortStatus.c */
+
+/*******************************************************************************
+* gprtGetQSizePerQPri
+*
+* DESCRIPTION:
+*        This routine gets egress queue size for port's each QPri (0 ~ 3).
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        counts - egress queue size per QPri (should be 4 * 16bytes)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetQSizePerQPri
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *counts
+);
+
+
+/* gtSysCtrl.c */
+
+
+/*******************************************************************************
+* gsysGetARPwoBC
+*
+* DESCRIPTION:
+*       ARP detection without Broadcast checking. When enabled the switch core
+*       does not check for a Btoadcast MAC address as part of the ARP frame
+*       detection. It only checkes the Ether Type (0x0806).
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE if enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPwoBC
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gsysSetARPwoBC
+*
+* DESCRIPTION:
+*       ARP detection without Broadcast checking. When enabled the switch core
+*       does not check for a Btoadcast MAC address as part of the ARP frame
+*       detection. It only checkes the Ether Type (0x0806).
+*
+* INPUTS:
+*       en - GT_TRUE to enable, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPwoBC
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL     en
+);
+
+
+/*******************************************************************************
+* gsysGetCLK125En
+*
+* DESCRIPTION:
+*        Clock 125MHz Enable.
+*        When this feature is enabled, the CLK125 pin has a free running 125 MHz
+*        clock output.
+*        When it's disabled, the CLK125 pin will be in tri-state.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 125MHz clock is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCLK125En
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+);
+
+
+/*******************************************************************************
+* gsysSetCLK125En
+*
+* DESCRIPTION:
+*        Clock 125MHz Enable.
+*        When this feature is enabled, the CLK125 pin has a free running 125 MHz
+*        clock output.
+*        When it's disabled, the CLK125 pin will be in tri-state.
+*
+* INPUTS:
+*        en - GT_TRUE to enable 125 MHz clock, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCLK125En
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+);
+
+
+/* gtPriTable.c */
+
+/*******************************************************************************
+* gsysSetFPriOverrideTable
+*
+* DESCRIPTION:
+*         Frame Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Frame Priority Table. If the type's fPriEn (in GT_FPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Frame Priority will be overridden
+*        with the value written in fPriority (in GT_FPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Frame Priority Override Table entry (GT_FPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetFPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_FPRI_TBL_ENTRY    *entry
+);
+
+
+/*******************************************************************************
+* gsysGetFPriOverrideTable
+*
+* DESCRIPTION:
+*         Frame Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Frame Priority Table. If the type's fPriEn (in GT_FPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Frame Priority will be overridden
+*        with the value written in fPriority (in GT_FPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Frame Priority Override Table entry (GT_FPRI_TBL_ENTRY)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_BAD_PARAM     - on unknown frame type
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetFPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_FPRI_TBL_ENTRY    *entry
+);
+
+
+/*******************************************************************************
+* gsysSetQPriAvbOverrideTable
+*
+* DESCRIPTION:
+*         Queue Priority Override for AVB enabled ports or AvbOverride enabled ports.
+*        When a frame enters a AVB port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQPriAvbOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_QPRI_TBL_ENTRY    *entry
+);
+
+
+/*******************************************************************************
+* gsysGetQPriAvbOverrideTable
+*
+* DESCRIPTION:
+*         Queue Priority Override for AVB enabled ports or AvbOverride enabled ports.
+*        When a frame enters a AVB port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQPriAvbOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_QPRI_TBL_ENTRY    *entry
+);
+
+
+/* gtPortCtrl.c */
+
+/*******************************************************************************
+* gprtGet200Base
+*
+* DESCRIPTION:
+*        200 Base mode. This bit can be used to change the port's Px_GTXCLK
+*        frequency to 50MHz to support 200 BASE mode as follows:
+*        0 = 25MHz Px_GTXCLK
+*        1 = 50MHz Px_GTXCLK
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - 0 for 100Mbps, 1 for 200Mbps
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        C_Mode should be set to 0x2 in order for this API to work
+*
+*******************************************************************************/
+GT_STATUS gprtGet200Base
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_U32      *mode
+);
+
+
+/*******************************************************************************
+* gprtSet200Base
+*
+* DESCRIPTION:
+*        200 Base mode. This bit can be used to change the port's Px_GTXCLK
+*        frequency to 50MHz to support 200 BASE mode as follows:
+*            0 = 25MHz Px_GTXCLK
+*            1 = 50MHz Px_GTXCLK
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - 0 for 100Mbps, 1 for 200Mbps
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        C_Mode should be set to 0x2 in order for this API to work
+*
+*******************************************************************************/
+GT_STATUS gprtSet200Base
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32      mode
+);
+
+
+/*******************************************************************************
+* gprtSetQueueCtrl
+*
+* DESCRIPTION:
+*        Set port queue control data to the Port Queue Control register.
+*        The registers of Port Queue control are.
+*         Hard Queue Limits register space
+*         Reserved for future Hard Queue Limits use
+*
+* INPUTS:
+*        port  - logical port number
+*        point - Pointer to the Port Queue Control register.
+*        data  - Port Queue Control data written to the register
+*                pointed to by the point above.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetQueueCtrl
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           point,
+    IN  GT_U8            data
+);
+
+/*******************************************************************************
+* gprtGetQueueCtrl
+*
+* DESCRIPTION:
+*        Get port queue control data from the Port Queue Control register.
+*        The registers of Port Queue control are.
+*         Hard Queue Limits register space
+*         Reserved for future Hard Queue Limits use
+*
+* INPUTS:
+*        port  - logical port number
+*        point - Pointer to the Port Queue Control register.
+*
+* OUTPUTS:
+*        data  - Port Queue Control data written to the register
+*                pointed to by the point above.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetQueueCtrl
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           point,
+    OUT GT_U8            *data
+);
+
+/*******************************************************************************
+* gprtGetDebugCounter
+*
+* DESCRIPTION:
+*        Get Port Debug Counter, bad counter and good counter.
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        badCounter  - When CtrMode is cleared to a zero (Global 1 offset 0x1C) this
+*  counter increments each time a frame enters this port that was an error on the
+*  wire. It does not matter if the frameï¿½s CRC is fixed by ForceGoodFCS (Port
+*  offset 0x08) being set to a one, this counter will still increment. A CRC error
+*  frame is one that is 64 bytes to MaxFrameSize (Global 1, offset 0x04) with a
+*  bad CRC (including alignment errors but not dribbles). Fragments and
+*  properly formed frames are not counted. The RxBadFrames counter counts
+*  frames that are counted in the MIB counters as InUndersize, InOversize,
+*  InJabber, InRxErr and InFCSErr.
+*  When CtrMode is set to a one this counter increments each time a transmit
+*  collision occurs on this port.
+*        goodCounter  - When CtrMode is cleared to a zero (Global 1 offset 0x1C) this
+*  counter increments each time a frame enters this port that was not an error
+*  frame on the wire. It does not matter if the frame was filtered or discarded,
+*  only that the frame was received as good on the wire (i.e., its wire size is in the
+*  range of 64 bytes to MaxFrameSize (Global 1, offset 0x04) and its CRC was
+*  good. The RxGoodFrames counter counts frames that are not counted
+*  above as long as they are not being counted in the MIB counters as
+*  InFragments.
+*  When CtrMode is set to a one this counter increments each time a frame is
+*  transmitted out this port.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetDebugCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT  GT_U8            *badCounter,
+    OUT  GT_U8            *goodCounter
+);
+
+/*******************************************************************************
+* gprtSetCutThrough
+*
+* DESCRIPTION:
+*        Set port Cut Through configuration.
+*
+* INPUTS:
+*        port  - logical port number
+*        cutThru - Cut through configuration.
+*                    enableSelect;     Port Enable Select.
+*                    enable;           Cut Through enable.
+*                    cutThruQueue;     Cut Through Queues.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetCutThrough
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_CUT_THROUGH   *cutThru
+);
+
+/*******************************************************************************
+* gprtGetCutThrough
+*
+* DESCRIPTION:
+*        Get port Cut Through configuration.
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        cutThru - Cut through configuration.
+*                    enableSelect;     Port Enable Select.
+*                    enable;           Cut Through enable.
+*                    cutThruQueue;     Cut Through Queues.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetCutThrough
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT GT_CUT_THROUGH   *cutThru
+);
+
+/* gtPIRL2.c */
+
+/*******************************************************************************
+* gpirl2WriteTSMResource
+*
+* DESCRIPTION:
+*        This routine writes rate resource bucket parameters in Time Slot Metering
+*        mode to the given resource of the port.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 1).
+*        pirlData - PIRL TSM resource parameters.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Only Resource 0 and 1 can be supported for TSM Mode.
+*
+*******************************************************************************/
+GT_STATUS gpirl2WriteTSMResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    IN  GT_PIRL2_TSM_DATA    *pirlData
+);
+
+
+/*******************************************************************************
+* gpirl2ReadTSMResource
+*
+* DESCRIPTION:
+*        This routine retrieves IRL Parameter.
+*        Returned ingressRate would be rough number. Instead, customSetup will
+*        have the exact configured value.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 1).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Only Resource 0 and 1 can be supported for TSM Mode.
+*
+*******************************************************************************/
+GT_STATUS gpirl2ReadTSMResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    OUT GT_PIRL2_TSM_DATA    *pirlData
+);
+
+
+/* gtPTP.c */
+
+/*******************************************************************************
+* gtaiSetEventConfig
+*
+* DESCRIPTION:
+*       This routine sets TAI Event Capture configuration parameters.
+*
+* INPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetEventConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_TAI_EVENT_CONFIG    *eventData
+);
+
+
+/*******************************************************************************
+* gtaiGetEventConfig
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_TAI_EVENT_CONFIG    *eventData
+);
+
+/*******************************************************************************
+* gtaiGetEventStatus
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_TAI_EVENT_STATUS    *status
+);
+
+/*******************************************************************************
+* gtaiGetEventInt
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        intStatus     - interrupt status for TAI Event capture
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *intStatus
+);
+
+/*******************************************************************************
+* gtaiClearEventInt
+*
+* DESCRIPTION:
+*       This routine clear TAI Event Capture Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiClearEventInt
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gtaiSetClockSelect
+*
+* DESCRIPTION:
+*       This routine sets several clock select in TAI.
+*
+* INPUTS:
+*        clkSelect  - TAI clock select configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetClockSelect
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_TAI_CLOCK_SELECT    *clkSelect
+);
+
+/*******************************************************************************
+* gtaiGetClockSelect
+*
+* DESCRIPTION:
+*       This routine gets several clock select in TAI.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       clkSelect  - TAI clock select configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetClockSelect
+(
+    IN  GT_QD_DEV     *dev,
+    OUT  GT_TAI_CLOCK_SELECT    *clkSelect
+);
+
+/*******************************************************************************
+* gtaiGetTrigInt
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        intStatus     - interrupt status for TAI Trigger
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *intStatus
+);
+
+/*******************************************************************************
+* gtaiClearTrigInt
+*
+* DESCRIPTION:
+*       This routine clear TAI Trigger Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiClearTrigInt
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gtaiSetTrigConfig
+*
+* DESCRIPTION:
+*       This routine sets TAI Trigger configuration parameters.
+*
+* INPUTS:
+*        trigEn    - enable/disable TAI Trigger.
+*        trigData  - TAI Trigger configuration parameters (valid only if trigEn is GT_TRUE).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL     trigEn,
+    IN  GT_TAI_TRIGGER_CONFIG    *trigData
+);
+
+/*******************************************************************************
+* gtaiGetTrigConfig
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        trigEn    - enable/disable TAI Trigger.
+*        trigData  - TAI Trigger configuration parameters (valid only if trigEn is GT_TRUE).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL     *trigEn,
+    OUT GT_TAI_TRIGGER_CONFIG    *trigData
+);
+
+
+/*******************************************************************************
+* gtaiSetTrigLock
+*
+* DESCRIPTION:
+*       This routine sets TAI Trigger lock.
+*
+* INPUTS:
+*        trigLock       - trigger lock set.
+*        trigLockRange  - trigger lock range.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigLock
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL    trigLock,
+    IN  GT_U8      trigLockRange
+);
+
+/*******************************************************************************
+* gtaiGetTrigLock
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger lock and trigger lock range.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        trigLock       - trigger lock set.
+*        trigLockRange  - trigger lock range.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigLock
+(
+    IN  GT_QD_DEV     *dev,
+    OUT  GT_BOOL    *trigLock,
+    OUT  GT_U8      *trigLockRange
+);
+
+/*******************************************************************************
+* gtaiGetTSClkPer
+*
+* DESCRIPTION:
+*         Time Stamping Clock Period in pico seconds.
+*        This routine specifies the clock period for the time stamping clock supplied
+*        to the PTP hardware logic.
+*        This is the clock that is used by the hardware logic to update the PTP
+*        Global Time Counter.
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        clk        - time stamping clock period
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTSClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *clk
+);
+
+
+/*******************************************************************************
+* gtaiSetTSClkPer
+*
+* DESCRIPTION:
+*         Time Stamping Clock Period in pico seconds.
+*        This routine specifies the clock period for the time stamping clock supplied
+*        to the PTP hardware logic.
+*        This is the clock that is used by the hardware logic to update the PTP
+*        Global Time Counter.
+*
+* INPUTS:
+*        clk        - time stamping clock period
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTSClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        clk
+);
+
+/*******************************************************************************
+* gtaiSetMultiPTPSync
+*
+* DESCRIPTION:
+*         This routine sets Multiple PTP device sync mode and sync time (TrigGenAmt).
+*        When enabled, the hardware logic detects a low to high transition on the
+*        EventRequest(GPIO) and transfers the sync time into the PTP Global Time
+*        register. The EventCapTime is also updated at that instant.
+*
+* INPUTS:
+*        multiEn        - enable/disable Multiple PTP device sync mode
+*        syncTime    - sync time (valid only if multiEn is GT_TRUE)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         When enabled, gtaiSetTrigConfig, gtaiSetEventConfig, gtaiSetTimeInc,
+*        and gtaiSetTimeDec APIs are not operational.
+*
+*******************************************************************************/
+GT_STATUS gtaiSetMultiPTPSync
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL     multiEn,
+    IN  GT_32        syncTime
+);
+
+/*******************************************************************************
+* gtaiGetMultiPTPSync
+*
+* DESCRIPTION:
+*         This routine gets Multiple PTP device sync mode and sync time (TrigGenAmt).
+*        When enabled, the hardware logic detects a low to high transition on the
+*        EventRequest(GPIO) and transfers the sync time into the PTP Global Time
+*        register. The EventCapTime is also updated at that instant.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        multiEn        - enable/disable Multiple PTP device sync mode
+*        syncTime    - sync time (valid only if multiEn is GT_TRUE)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         When enabled, gtaiSetTrigConfig, gtaiSetEventConfig, gtaiSetTimeInc,
+*        and gtaiSetTimeDec APIs are not operational.
+*
+*******************************************************************************/
+GT_STATUS gtaiGetMultiPTPSync
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL     *multiEn,
+    OUT GT_32        *syncTime
+);
+
+/*******************************************************************************
+* gtaiGetTimeIncDec
+*
+* DESCRIPTION:
+*         This routine retrieves Time increment/decrement setup.
+*        This amount specifies the number of units of PTP Global Time that need to be
+*        incremented or decremented. This is used for adjusting the PTP Global Time
+*        counter value by a certain amount.
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*        inc        - GT_TRUE if increment, GT_FALSE if decrement
+*        amount    - increment/decrement amount
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTimeIncDec
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en,
+    OUT GT_BOOL        *inc,
+    OUT GT_U32        *amount
+);
+
+/*******************************************************************************
+* gtaiSetTimeInc
+*
+* DESCRIPTION:
+*         This routine enables time increment by the specifed time increment amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        incremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Increment occurs just once.
+*
+* INPUTS:
+*        amount    - time increment amount (0 ~ 0xFF)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeInc
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        amount
+);
+
+/*******************************************************************************
+* gtaiSetTimeDec
+*
+* DESCRIPTION:
+*         This routine enables time decrement by the specifed time decrement amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Decrement occurs just once.
+*
+* INPUTS:
+*        amount    - time decrement amount (0 ~ 0x7FF)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeDec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        amount
+);
+
+/*******************************************************************************
+* gtaiSetTimeIncDecAmt
+*
+* DESCRIPTION:
+*         This routine sets time decrement or increment amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented or increased. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*
+* INPUTS:
+*        amount    - time decrement amount (0 ~ 0x7FF)
+*        dec       - 0: increase, 1: descrease
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeIncDecAmt
+(
+	IN  GT_QD_DEV     * dev,
+	IN  GT_BOOL         dec,
+	IN  GT_U32          amount
+);
+
+/*******************************************************************************
+* gtaiIncDecTimeEnable
+*
+* DESCRIPTION:
+*         This routine enables time decrement or increment by the specifed time decrement amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Decrement occurs just once.
+*
+* INPUTS:
+*         None.
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiIncDecTimeEnable
+(
+	IN  GT_QD_DEV     * dev
+);
+
+/*******************************************************************************
+* gtaiSetTrigGenAmt
+*
+* DESCRIPTION:
+*         This routine sets the TrigGenAmt
+*
+* INPUTS:
+*        amount    - Trigger Generation Time Amount (U32)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigGenAmt
+(
+	IN  GT_QD_DEV     * dev,
+	IN  GT_U32          amount
+);
+
+/*******************************************************************************
+* gtaiGetTrigGenAmt
+*
+* DESCRIPTION:
+*         This routine gets the TrigGenAmt
+*
+* OUTPUTS:
+*        amount    - Trigger Generation Time Amount (U32)
+*
+* INPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigGenAmt
+(
+	IN  GT_QD_DEV     * dev,
+	IN  GT_U32        * amount
+);
+
+/*******************************************************************************
+ * * gtaiTrigGenRequest
+ * *
+ * * DESCRIPTION:
+ * *         This routine requests TrigGen
+ * *
+ * * OUTPUTS:
+ * *         None.
+ * *
+ * * INPUTS:
+ * *         enable: enable or disable.
+ * *
+ * * RETURNS:
+ * *         GT_OK      - on success
+ * *         GT_FAIL    - on error
+ * *         GT_NOT_SUPPORTED - if current device does not support this feature.
+ * *
+ * * COMMENTS:
+ * *         None
+ * *
+ * *******************************************************************************/
+GT_STATUS gtaiTrigGenRequest
+(
+	IN  GT_QD_DEV     * dev,
+	IN  GT_BOOL         enable
+);
+
+/*******************************************************************************
+* gavbGetPriority
+*
+* DESCRIPTION:
+*        Priority overwrite.
+*        Supported priority type is defined as GT_AVB_PRI_TYPE.
+*        Priority is either 3 bits or 2 bits depending on priority type.
+*            GT_AVB_HI_FPRI        - priority is 0 ~ 7
+*            GT_AVB_HI_QPRI        - priority is 0 ~ 3
+*            GT_AVB_LO_FPRI        - priority is 0 ~ 7
+*            GT_AVB_LO_QPRI        - priority is 0 ~ 3
+*            GT_LEGACY_HI_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_HI_QPRI    - priority is 0 ~ 3
+*            GT_LEGACY_LO_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_LO_QPRI    - priority is 0 ~ 3
+*
+* INPUTS:
+*         priType    - GT_AVB_PRI_TYPE
+*
+* OUTPUTS:
+*        pri    - priority
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetPriority
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_PRI_TYPE        priType,
+    OUT GT_U32        *pri
+);
+
+/*******************************************************************************
+* gavbSetPriority
+*
+* DESCRIPTION:
+*        Priority overwrite.
+*        Supported priority type is defined as GT_AVB_PRI_TYPE.
+*        Priority is either 3 bits or 2 bits depending on priority type.
+*            GT_AVB_HI_FPRI        - priority is 0 ~ 7
+*            GT_AVB_HI_QPRI        - priority is 0 ~ 3
+*            GT_AVB_LO_FPRI        - priority is 0 ~ 7
+*            GT_AVB_LO_QPRI        - priority is 0 ~ 3
+*            GT_LEGACY_HI_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_HI_QPRI    - priority is 0 ~ 3
+*            GT_LEGACY_LO_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_LO_QPRI    - priority is 0 ~ 3
+*
+* INPUTS:
+*         priType    - GT_AVB_PRI_TYPE
+*        pri    - priority
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetPriority
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_PRI_TYPE        priType,
+    IN  GT_U32        pri
+);
+
+/*******************************************************************************
+* gavbGetAVBHiLimit
+*
+* DESCRIPTION:
+*        AVB Hi Frame Limit.
+*        When these bits are zero, normal frame processing occurs.
+*        When it's non-zero, they are used to define the maximum frame size allowed
+*        for AVB frames that can be placed into the GT_AVB_HI_QPRI queue. Frames
+*        that are over this size limit are filtered. The only exception to this
+*        is non-AVB frames that get their QPriAvb assigned by the Priority Override
+*        Table
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        limit    - Hi Frame Limit
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAVBHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *limit
+);
+
+/*******************************************************************************
+* gavbSetAVBHiLimit
+*
+* DESCRIPTION:
+*        AVB Hi Frame Limit.
+*        When these bits are zero, normal frame processing occurs.
+*        When it's non-zero, they are used to define the maximum frame size allowed
+*        for AVB frames that can be placed into the GT_AVB_HI_QPRI queue. Frames
+*        that are over this size limit are filtered. The only exception to this
+*        is non-AVB frames that get their QPriAvb assigned by the Priority Override
+*        Table
+*
+* INPUTS:
+*        limit    - Hi Frame Limit
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAVBHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        limit
+);
+
+/*******************************************************************************
+* gavbGetPtpExtClk
+*
+* DESCRIPTION:
+*        PTP external clock select.
+*        When this bit is cleared to a zero, the PTP core gets its clock from
+*        an internal 125MHz clock based on the device's XTAL_IN input.
+*        When this bit is set to a one, the PTP core gets its clock from the device's
+*        PTP_EXTCLK pin.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        extClk    - GT_TRUE if external clock is selected, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetPtpExtClk
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *extClk
+);
+
+/*******************************************************************************
+* gavbSetPtpExtClk
+*
+* DESCRIPTION:
+*        PTP external clock select.
+*        When this bit is cleared to a zero, the PTP core gets its clock from
+*        an internal 125MHz clock based on the device's XTAL_IN input.
+*        When this bit is set to a one, the PTP core gets its clock from the device's
+*        PTP_EXTCLK pin.
+*
+* INPUTS:
+*        extClk    - GT_TRUE if external clock is selected, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetPtpExtClk
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        extClk
+);
+
+/*******************************************************************************
+* gavbGetRecClkSel
+*
+* DESCRIPTION:
+*        Synchronous Ethernet Recovered Clock Select.
+*        This field indicate the internal PHY number whose recovered clock will
+*        be presented on the SE_RCLK0 or SE_RCLK1 pin depending on the recClk selection.
+*
+* INPUTS:
+*        recClk    - GT_AVB_RECOVERED_CLOCK type
+*
+* OUTPUTS:
+*        clkSel    - recovered clock selection
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetRecClkSel
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_RECOVERED_CLOCK    recClk,
+    OUT GT_U32        *clkSel
+);
+
+
+/*******************************************************************************
+* gavbSetRecClkSel
+*
+* DESCRIPTION:
+*        Synchronous Ethernet Recovered Clock Select.
+*        This field indicate the internal PHY number whose recovered clock will
+*        be presented on the SE_RCLK0 or SE_RCLK1 pin depending on the recClk selection.
+*
+* INPUTS:
+*        recClk    - GT_AVB_RECOVERED_CLOCK type
+*        clkSel    - recovered clock selection (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetRecClkSel
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_RECOVERED_CLOCK    recClk,
+    IN  GT_U32        clkSel
+);
+
+/*******************************************************************************
+* gavbGetAvbOuiBytes
+*
+* DESCRIPTION:
+*        AVB OUI Limit Filter bytes(0 ~ 2).
+*        When all three of the AvbOui Bytes are zero, normal frame processing occurs.
+*        When any of the three AvbOui Bytes are non-zero, all AVB frames must have a
+*        destination address whose 1st three bytes of the DA match these three
+*        AvbOui Bytes or the frame will be filtered.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        ouiBytes    - 3 bytes of OUI field in Ethernet address format
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbOuiBytes
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *obiBytes
+);
+
+/*******************************************************************************
+* gavbSetAvbOuiBytes
+*
+* DESCRIPTION:
+*        AVB OUI Limit Filter bytes(0 ~ 2).
+*        When all three of the AvbOui Bytes are zero, normal frame processing occurs.
+*        When any of the three AvbOui Bytes are non-zero, all AVB frames must have a
+*        destination address whose 1st three bytes of the DA match these three
+*        AvbOui Bytes or the frame will be filtered.
+*
+* INPUTS:
+*        ouiBytes    - 3 bytes of OUI field in Ethernet address format
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbOuiBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        *obiBytes
+);
+
+/*******************************************************************************
+* gavbGetAvbMode
+*
+* DESCRIPTION:
+*        Port's AVB Mode.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        mode    - GT_AVB_MODE type
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_AVB_MODE    *mode
+);
+
+/*******************************************************************************
+* gavbSetAvbMode
+*
+* DESCRIPTION:
+*        Port's AVB Mode.
+*
+* INPUTS:
+*        port    - the logical port number
+*        mode    - GT_AVB_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_AVB_MODE    mode
+);
+
+/*******************************************************************************
+* gavbGetAvbOverride
+*
+* DESCRIPTION:
+*        AVB Override.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the egress portion of this port is considered AVB even if
+*        the ingress portion is not.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbOverride
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gavbSetAvbOverride
+*
+* DESCRIPTION:
+*        AVB Override.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the egress portion of this port is considered AVB even if
+*        the ingress portion is not.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbOverride
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+);
+
+
+/*******************************************************************************
+* gavbGetFilterBadAvb
+*
+* DESCRIPTION:
+*        Filter Bad AVB frames.
+*        When disabled, normal frame processing occurs.
+*        When enabled, frames that are considered Bad AVB frames are filtered.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetFilterBadAvb
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gavbSetFilterBadAvb
+*
+* DESCRIPTION:
+*        Filter Bad AVB frames.
+*        When disabled, normal frame processing occurs.
+*        When enabled, frames that are considered Bad AVB frames are filtered.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetFilterBadAvb
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+);
+
+
+/*******************************************************************************
+* gavbGetAvbTunnel
+*
+* DESCRIPTION:
+*        AVB Tunnel.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the port based VLAN Table masking, 802.1Q VLAN membership
+*        masking and the Trunk Masking is bypassed for any frame entering this port
+*        that is considered AVB by DA. This includes unicast as well as multicast
+*        frame
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbTunnel
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+);
+
+
+/*******************************************************************************
+* gavbSetAvbTunnel
+*
+* DESCRIPTION:
+*        AVB Tunnel.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the port based VLAN Table masking, 802.1Q VLAN membership
+*        masking and the Trunk Masking is bypassed for any frame entering this port
+*        that is considered AVB by DA. This includes unicast as well as multicast
+*        frame
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbTunnel
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+);
+
+
+/*******************************************************************************
+* gavbGetAvbFramePolicy
+*
+* DESCRIPTION:
+*        AVB Hi or Lo frame policy mapping.
+*        Supported policies are defined in GT_AVB_FRAME_POLICY.
+*
+* INPUTS:
+*        port    - the logical port number
+*        fType    - GT_AVB_FRAME_TYPE
+*
+* OUTPUTS:
+*        policy    - GT_AVB_FRAME_POLICY
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbFramePolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN    GT_AVB_FRAME_TYPE    fType,
+    OUT GT_AVB_FRAME_POLICY        *policy
+);
+
+
+/*******************************************************************************
+* gavbSetAvbFramePolicy
+*
+* DESCRIPTION:
+*        AVB Hi or Lo frame policy mapping.
+*        Supported policies are defined in GT_AVB_FRAME_POLICY.
+*
+* INPUTS:
+*        port    - the logical port number
+*        fType    - GT_AVB_FRAME_TYPE
+*        policy    - GT_AVB_FRAME_POLICY
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbFramePolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN    GT_AVB_FRAME_TYPE    fType,
+    IN  GT_AVB_FRAME_POLICY        policy
+);
+
+/* Amber QAV API */
+/*******************************************************************************/
+/* Amber QAV API */
+/*******************************************************************************
+* gqavSetPortQpriXQTSToken
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 0-3 time slot tokens on a port.
+*        The setting value is number of tokens that need to be subtracted at each
+*        QTS interval boundary.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue     - 0 - 3
+*        qtsToken - number of tokens.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXQTSToken
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        qtsToken
+);
+
+/*******************************************************************************
+* gqavGetPortQpriXQTSToken
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 0-3 time slot tokens on a port.
+*        The setting value is number of tokens that need to be subtracted at each
+*        QTS interval boundary.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 0 - 3
+*
+* OUTPUTS:
+*        qtsToken - number of tokens
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXQTSToken
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *qtsToken
+);
+
+/*******************************************************************************
+* gqavSetPortQpriXBurstBytes
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 0-3 Burst Bytes on a port.
+*        This value specifies the number of credits in bytes that can be
+*        accumulated when the queue is blocked from sending out a frame due to
+*        higher priority queue frames being sent out.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 0 - 3
+*        burst - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXBurstBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        burst
+);
+
+/*******************************************************************************
+* gqavGetPortQpriXBurstBytes
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 0-3 Burst Bytes on a port.
+*        This value specifies the number of credits in bytes that can be
+*        accumulated when the queue is blocked from sending out a frame due to
+*        higher priority queue frames being sent out.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 0 - 3
+*
+* OUTPUTS:
+*        burst - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXBurstBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *burst
+);
+
+/*******************************************************************************
+* gqavSetPortQavEnable
+*
+* DESCRIPTION:
+*        This routine set QAV enable status on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE: QAV enable, GT_FALSE: QAV disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQavEnable
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gqavSetPortQpriXRate
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 2-3 rate on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 2 - 3
+*        rate - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXRate
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        rate
+);
+
+/*******************************************************************************
+* gqavGetPortQpriXRate
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 2-3 rate Bytes on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 2 - 3
+*
+* OUTPUTS:
+*        rate - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXRate
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *rate
+);
+
+/*******************************************************************************
+* gqavSetPortQpriXHiLimit
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 2-3 HiLimit on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 2 - 3
+*        hiLimit - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        hiLimit
+);
+
+/*******************************************************************************
+* gqavGetPortQpriXHiLimit
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 2-3 HiLimit Bytes on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 2 - 3
+*
+* OUTPUTS:
+*        hiLimit - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *hiLimit
+);
+
+/*******************************************************************************
+* gqavGetPortQavEnable
+*
+* DESCRIPTION:
+*        This routine get QAV enable status on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE: QAV enable, GT_FALSE: QAV disable
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQavEnable
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************/
+/* QAV Global registers processing */
+/*******************************************************************************
+* gqavSetGlobalAdminMGMT
+*
+* DESCRIPTION:
+*        This routine set to accept Admit Management Frames always.
+*
+* INPUTS:
+*        en - GT_TRUE to set MGMT frame accepted always,
+*             GT_FALSE do not set MGMT frame accepted always
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalAdminMGMT
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gqavGetGlobalAdminMGMT
+*
+* DESCRIPTION:
+*        This routine get setting of Admit Management Frames always.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to set MGMT frame accepted always,
+*             GT_FALSE do not set MGMT frame accepted always
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalAdminMGMT
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoPtrThreshold
+*
+* DESCRIPTION:
+*        This routine set Global Isochronous Queue Pointer Threshold.
+*        This field indicates the total number of isochronous pointers
+*        that are reserved for isochronous streams. The value is expected to be
+*        computed in SRP software and programmed into hardware based on the total
+*        aggregate isochronous streams configured to go through this device..
+*
+* INPUTS:
+*        isoPtrs -  total number of isochronous pointers
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoPtrThreshold
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        isoPtrs
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoPtrThreshold
+*
+* DESCRIPTION:
+*        This routine get Global Isochronous Queue Pointer Threshold.
+*        This field indicates the total number of isochronous pointers
+*        that are reserved for isochronous streams. The value is expected to be
+*        computed in SRP software and programmed into hardware based on the total
+*        aggregate isochronous streams configured to go through this device..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        isoPtrs -  total number of isochronous pointers
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoPtrThreshold
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *isoPtrs
+);
+
+/*******************************************************************************
+* gqavSetGlobalDisQSD4MGMT
+*
+* DESCRIPTION:
+*        This routine set Disable Queue Scheduler Delays for Management frames..
+*
+* INPUTS:
+*        en - GT_TRUE, it indicates to the Queue Controller to disable applying Queue
+*            Scheduler Delays and the corresponding rate regulator does not account
+*            for MGMT frames through this queue.
+*            GT_FALSE, the MGMT frames follow similar rate regulation and delay
+*            regulation envelope as specified for the isochronous queue that the
+*            MGMT frames are sharing with.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalDisQSD4MGMT
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gqavGetGlobalDisQSD4MGMT
+*
+* DESCRIPTION:
+*        This routine Get Disable Queue Scheduler Delays for Management frames..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE, it indicates to the Queue Controller to disable applying Queue
+*            Scheduler Delays and the corresponding rate regulator does not account
+*            for MGMT frames through this queue.
+*            GT_FALSE, the MGMT frames follow similar rate regulation and delay
+*            regulation envelope as specified for the isochronous queue that the
+*            MGMT frames are sharing with.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalDisQSD4MGMT
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gqavSetGlobalInterrupt
+*
+* DESCRIPTION:
+*        This routine set QAV interrupt enable,
+*        The QAV interrypts include:
+*        [GT_QAV_INT_ENABLE_ENQ_LMT_BIT]      # EnQ Limit Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_DEL_BIT]      # Iso Delay Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_DIS_BIT]      # Iso Discard Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                              Interrupt Enable
+*
+* INPUTS:
+*        intEn - [GT_QAV_INT_ENABLE_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalInterrupt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        intEn
+);
+
+/*******************************************************************************
+* gqavGetGlobalInterrupt
+*
+* DESCRIPTION:
+*       This routine get QAV interrupt status and enable status,
+*        The QAV interrypt status include:
+*         [GT_QAV_INT_STATUS_ENQ_LMT_BIT]      # Enqueue Delay Limit exceeded
+*         [GT_QAV_INT_STATUS_ISO_DEL_BIT]      # Iso Delay Interrupt Status
+*         [GT_QAV_INT_STATUS_ISO_DIS_BIT]      # Iso Discard Interrupt Status
+*         [GT_QAV_INT_STATUS_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                                Interrupt Status
+*        The QAV interrypt enable status include:
+*         [GT_QAV_INT_ENABLE_ENQ_LMT_BIT]      # EnQ Limit Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_DEL_BIT]      # Iso Delay Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_DIS_BIT]      # Iso Discard Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                                  Interrupt Enable
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        intEnSt - [GT_QAV_INT_STATUS_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_LIMIT_EX_BIT] OR
+*                [GT_QAV_INT_ENABLE_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalInterrupt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *intEnSt
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoInterruptPort
+*
+* DESCRIPTION:
+*        This routine get Isochronous interrupt port.
+*        This field indicates the port number for IsoDisInt or IsoLimitExInt
+*        bits. Only one such interrupt condition can be detected by hardware at one
+*        time. Once an interrupt bit has been set along with the IsoIntPort, the
+*        software would have to come and clear the bits before hardware records
+*        another interrupt event.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - port number for IsoDisInt or IsoLimitExInt bits.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoInterruptPort
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *port
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoDelayLmt
+*
+* DESCRIPTION:
+*        This routine set Isochronous queue delay Limit
+*        This field represents a per-port isochronous delay limit that
+*        will be checked by the queue controller logic to ensure no isochronous
+*        packets suffer more than this delay w.r.t to their eligibility time slot.
+*        This represents the number of Queue Time Slots. The interval for the QTS
+*        can be configured using the register in Qav Global Configuration, Offset 0x2.
+*
+* INPUTS:
+*        limit - per-port isochronous delay limit.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoDelayLmt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        limit
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoDelayLmt
+*
+* DESCRIPTION:
+*        This routine get Isochronous queue delay Limit
+*        This field represents a per-port isochronous delay limit that
+*        will be checked by the queue controller logic to ensure no isochronous
+*        packets suffer more than this delay w.r.t to their eligibility time slot.
+*        This represents the number of Queue Time Slots. The interval for the QTS
+*        can be configured using the register in Qav Global Configuration, Offset 0x2.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        limit - per-port isochronous delay limit.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoDelayLmt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *limit
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoMonEn
+*
+* DESCRIPTION:
+*       This routine set Isochronous monitor enable
+*        Set GT_TRUE: this bit enables the statistics gathering capabilities stated
+*        in PTP Global Status Registers Offset 0xD, 0xE and 0xF. Once enabled, the
+*        software is expected to program the IsoMonPort (PTP Global Status Offset
+*        0xD) indicating which port of the device does the software wants to monitor.
+*        Upon setting this bit, the hardware collects IsoHiDisCtr, IsoLoDisCtr and
+*        IsoSchMissCtr values for the port indicated by IsoMonPort till this bit is
+*        set to a zero.
+*        Set GT_FALSE: this bit disables the statistics gathering capabilities.
+*
+* INPUTS:
+*        en - GT_TRUE / GT_FALSE.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoMonEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoMonEn
+*
+* DESCRIPTION:
+*        This routine get Isochronous monitor enable
+*        Set GT_TRUE: this bit enables the statistics gathering capabilities stated
+*        in PTP Global Status Registers Offset 0xD, 0xE and 0xF. Once enabled, the
+*        software is expected to program the IsoMonPort (PTP Global Status Offset
+*        0xD) indicating which port of the device does the software wants to monitor.
+*        Upon setting this bit, the hardware collects IsoHiDisCtr, IsoLoDisCtr and
+*        IsoSchMissCtr values for the port indicated by IsoMonPort till this bit is
+*        set to a zero.
+*        Set GT_FALSE: this bit disables the statistics gathering capabilities.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE / GT_FALSE.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoMonEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoMonPort
+*
+* DESCRIPTION:
+*        This routine set Isochronous monitoring port.
+*        This field is updated by software along with Iso Mon En bit
+*        (Qav Global Status, offset 0xD) and it indicates the port number that
+*        the software wants the hardware to start monitoring i.e., start updating
+*        IsoHiDisCtr, IsoLoDisCtr and IsoSchMissCtr. The queue controller clears
+*        the above stats when IsoMonPort is changed..
+*
+* INPUTS:
+*        port -  port number .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoMonPort
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        port
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoMonPort
+*
+* DESCRIPTION:
+*        This routine get Isochronous monitoring port.
+*        This field is updated by software along with Iso Mon En bit
+*        (Qav Global Status, offset 0xD) and it indicates the port number that
+*        the software wants the hardware to start monitoring i.e., start updating
+*        IsoHiDisCtr, IsoLoDisCtr and IsoSchMissCtr. The queue controller clears
+*        the above stats when IsoMonPort is changed..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port -  port number.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoMonPort
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *port
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoHiDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous hi queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous hi packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        disCtr - upcounter of number of isochronous hi packets discarded
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoHiDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        disCtr
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoHiDisCtr
+*
+* DESCRIPTION:
+*        This routine get Isochronous hi queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous hi packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        disCtr - upcounter of number of isochronous hi packets discarded
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoHiDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *disCtr
+);
+
+/*******************************************************************************
+* gqavSetGlobalIsoLoDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous Lo queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous lo packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        disCtr - upcounter of number of isochronous lo packets discarded
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoLoDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        disCtr
+);
+
+/*******************************************************************************
+* gqavGetGlobalIsoLoDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous Lo queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous lo packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        disCtr - upcounter of number of isochronous lo packets discarded
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoLoDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *disCtr
+);
+
+/*******************************************************************************
+* gsysSetUseIpMappingTable
+*
+* DESCRIPTION:
+*        This API set to use IP Frame Priorities from this table.
+*        Set GT_TRUE:  The IP_FPRI data in this table is used as the frameï¿½s
+*            initial IP_FPRI.
+*        Set GT_FALSE: The IP_FPRI data in this table is ignored. Instead the
+*            frameï¿½s initial IP_FPRI is generated by using the frameï¿½s IP_QPRI
+*            as the IP_FPRIï¿½s upper two bits, and the IP_FPRIï¿½s lowest bit comes
+*            from bit 0 of the frameï¿½s source portï¿½s Default PRI (Port offset 0x07).
+*
+* INPUTS:
+*        en    - [GT_TRUE] / [GT_FALSE]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetUseIpMappingTable
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_BOOL            en
+);
+
+/*******************************************************************************
+* gsysGetUseIpMappingTable
+*
+* DESCRIPTION:
+*        This API get to use IP Frame Priorities from this table.
+*        Set GT_TRUE:  The IP_FPRI data in this table is used as the frameï¿½s
+*            initial IP_FPRI.
+*        Set GT_FALSE: The IP_FPRI data in this table is ignored. Instead the
+*            frameï¿½s initial IP_FPRI is generated by using the frameï¿½s IP_QPRI
+*            as the IP_FPRIï¿½s upper two bits, and the IP_FPRIï¿½s lowest bit comes
+*            from bit 0 of the frameï¿½s source portï¿½s Default PRI (Port offset 0x07).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en    - [GT_TRUE] / [GT_FALSE]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetUseIpMappingTable
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_BOOL            *en
+);
+
+/*******************************************************************************
+* gsysSetIpMappingPrio
+*
+* DESCRIPTION:
+*        Set IPv4 and IPv6 Frame Priority Mapping, and
+*        IPv4 and IPv6 Queue Priority Mapping.
+*       The ipFpri value is used as the frames initial FPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the portï¿½s InitialPri (Port offset 0x04)
+*        is configured to use IP FPriï¿½s.
+*       The ipQpri value is used as the frameï¿½s initial QPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the portï¿½s InitialPri and TagIfBoth
+*        registers (Port offset 0x04) are configured to use IP QPriï¿½s.
+*
+* INPUTS:
+*        point - Pointer to the Ip Mapping Table.
+*                0 - 0x3f;
+*        ipFpri -  The value is 0 - 7
+*        ipQpri -  The value is 0 - 3.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetIpMappingPrio
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    IN  GT_U8            ipFpri,
+    IN  GT_U8            ipQpri
+);
+
+/*******************************************************************************
+* gsysGetIpMappingPrio
+*
+* DESCRIPTION:
+*        Get IPv4 and IPv6 Frame Priority Mapping, and
+*        IPv4 and IPv6 Queue Priority Mapping.
+*       The ipFpri value is used as the frames initial FPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the portï¿½s InitialPri (Port offset 0x04)
+*        is configured to use IP FPriï¿½s.
+*        The ipQpri value is used as the frameï¿½s initial QPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the portï¿½s InitialPri and TagIfBoth
+*        registers (Port offset 0x04) are configured to use IP QPriï¿½s.
+*
+* INPUTS:
+*        point - Pointer to the Ip Mapping Table.
+*                0 - 0x3f;
+*
+* OUTPUTS:
+*        ipFpri -  The value is 0 - 7
+*        ipQpri -  The value is 0 - 3.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetIpMappingPrio
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    OUT  GT_U8            *ipFpri,
+    OUT  GT_U8            *ipQpri
+);
+
+/*******************************************************************************
+* gsysReadEeprom
+*
+* DESCRIPTION:
+*        Read EEPROM from EEPROMï¿½s address where the EEOp is performed.
+*
+* INPUTS:
+*        addr - EEPROM Address.
+*
+* OUTPUTS:
+*        data -  Data that was read back from the EEPROM.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysReadEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr,
+    OUT  GT_U8            *data
+);
+
+/*******************************************************************************
+* gsysWriteEeprom
+*
+* DESCRIPTION:
+*        Write EEPROM at the EEPROMï¿½s address where the EEOp is performed.
+*
+* INPUTS:
+*        addr - EEPROM Address.
+*        data - Data to be written to the EEPROM
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysWriteEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr,
+    IN  GT_U8            data
+);
+
+/*******************************************************************************
+* gsysRestartEeprom
+*
+* DESCRIPTION:
+*        Restart Register Loader execution at the EEPROMï¿½s address where the EEOp
+*        is performed
+*
+* INPUTS:
+*        addr - EEPROM Address. .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysRestartEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr
+);
+
+/*******************************************************************************
+* gsysHaltEeprom
+*
+* DESCRIPTION:
+*        Halt (stop executing the EEPROM if its not already stopped)
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysHaltEeprom
+(
+    IN  GT_QD_DEV         *dev
+);
+
+/*******************************************************************************
+* gsysGetStEeprom
+*
+* DESCRIPTION:
+*        Get EEPROM status. They are Register Loader Running status and EEPROM
+*        Write Enable status
+*        runSt is GT_TRUE: Register Loader Running, whenever the register loader
+*            is busy executing the instructions contained in the EEPROM.
+*        writeEn is GT_TRUE: EEPROM Write Enable, that indicates that writing to
+*            the EEPROM is possible.
+*        writeEn is GT_FALSE: the Write EEPROM EEOp above will not do anything.
+*            This reflects the value of the EE_WE configuration pin after Reset.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        runSt   -   [GT_TRUE] / [GT_FALSE)
+*        writeEn -   [GT_TRUE] / [GT_FALSE)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetStEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_BOOL            *writeEn,
+    OUT GT_BOOL            *runSt
+);
+
+/*******************************************************************************
+* gsysSetScratchMiscCtrl
+*
+* DESCRIPTION:
+*        Set Scratch and Misc control data to the Scratch and Misc Control register.
+*        The register of Scratch and Misc control are.
+*                Scratch Byte 0
+*                Scratch Byte 1
+*                GPIO Configuration
+*                Reserved for future use
+*                GPIO Direction
+*                GPIO Data
+*                CONFIG Data 0
+*                CONFIG Data 1
+*                CONFIG Data 2
+*                CONFIG Data 3
+*                SyncE & TAICLK125ï¿½s Drive
+*                P5ï¿½s & CLK125ï¿½s Clock Drive
+*                P6ï¿½s Clock Drive
+*                EEPROM Pad drive
+*
+* INPUTS:
+*        point - Pointer to the Scratch and Misc. Control register.
+*        data  - Scratch and Misc. Control data written to the register
+*                pointed to by the point above.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetScratchMiscCtrl
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    IN  GT_U8            data
+);
+
+/*******************************************************************************
+* gsysGetScratchMiscCtrl
+*
+* DESCRIPTION:
+*        Get Scratch and Misc control data from the Scratch and Misc Control register.
+*        The register of Scratch and Misc control are.
+*                Scratch Byte 0
+*                Scratch Byte 1
+*                GPIO Configuration
+*                Reserved for future use
+*                GPIO Direction
+*                GPIO Data
+*                CONFIG Data 0
+*                CONFIG Data 1
+*                CONFIG Data 2
+*                CONFIG Data 3
+*                SyncE & TAICLK125ï¿½s Drive
+*                P5ï¿½s & CLK125ï¿½s Clock Drive
+*                P6ï¿½s Clock Drive
+*                EEPROM Pad drive
+
+*
+* INPUTS:
+*        point - Pointer to the Scratch and Misc. Control register.
+*
+* OUTPUTS:
+*        data - Scratch and Misc. Control data read from the register
+*                pointed to by the point above.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetScratchMiscCtrl
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    OUT  GT_U8            *data
+);
+
+/*******************************************************************************
+* gsysSetScratchBits
+*
+* DESCRIPTION:
+*        Set bits to the Scratch and Misc Control register <scratch byte 0 and 1>.
+*        These bits are 100% available to software for whatever purpose desired.
+*        These bits do not connect to any hardware function.
+*
+* INPUTS:
+*        scritch - written bits.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetScratchBits
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U16            scratch
+);
+
+/*******************************************************************************
+* gsysGetScratchBits
+*
+* DESCRIPTION:
+*        Get bits from the Scratch and Misc Control register <scratch byte 0 and 1>.
+*        These bits are 100% available to software for whatever purpose desired.
+*        These bits do not connect to any hardware function.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        scritch - read bits.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetScratchBits
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U16            *scratch
+);
+
+/*******************************************************************************
+* gsysSetGpioConfigMod
+*
+* DESCRIPTION:
+*        Set bits to the Scratch and Misc Control register <GPIO Configuration>
+*        to configure GPIO mode.
+*        The bits are shared General Purpose Input Output mode Bits:
+*        Bit 6 - GT_GPIO_BIT_6:    1:GPIO[6]    0:SE_RCLK1
+*        Bit 5 - GT_GPIO_BIT_5:    1:GPIO[5]    0:SE_RCLK0
+*        Now, folloing bits are read only.
+*        Bit 4 - GT_GPIO_BIT_4:    1:GPIO[4]    0:
+*        Bit 3 - GT_GPIO_BIT_3:    1:GPIO[3]    0:
+*        Bit 2 - GT_GPIO_BIT_2:    1:GPIO[2]    0:
+*        Bit 1 - GT_GPIO_BIT_1:    1:GPIO[1]    0:P6_COL
+*        Bit 0 - GT_GPIO_BIT_0:    1:GPIO[0]    0:P6_CRS
+*
+* INPUTS:
+*        mode - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioConfigMod
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            mode
+);
+
+/*******************************************************************************
+* gsysGetGpioConfigMod
+*
+* DESCRIPTION:
+*        Get mode from the Scratch and Misc Control register <GPIO Configuration>.
+*        The bits are shared General Purpose Input Output mode Bits:
+*        Bit 6 - GT_GPIO_BIT_6:    1:GPIO[6]    0:SE_RCLK1
+*        Bit 5 - GT_GPIO_BIT_5:    1:GPIO[5]    0:SE_RCLK0
+*        Now, folloing bits are read only.
+*        Bit 4 - GT_GPIO_BIT_4:    1:GPIO[4]    0:
+*        Bit 3 - GT_GPIO_BIT_3:    1:GPIO[3]    0:
+*        Bit 2 - GT_GPIO_BIT_2:    1:GPIO[2]    0:
+*        Bit 1 - GT_GPIO_BIT_1:    1:GPIO[1]    0:P6_COL
+*        Bit 0 - GT_GPIO_BIT_0:    1:GPIO[0]    0:P6_CRS
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioConfigMod
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            *mode
+);
+
+/*******************************************************************************
+* gsysSetGpioDirection
+*
+* DESCRIPTION:
+*        Set Gpio direction to the Scratch and Misc Control register <GPIO Direction>.
+*        The bits are used to control the direction of GPIO[6:0].
+*        When a GPIOï¿½s bit is set to a one that GPIO will become an input. When a
+*        GPIOï¿½s bit is cleared to a zero that GPIO will become an output
+*        General Purpose Input Output direction bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        dir - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioDirection
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            dir
+);
+
+/*******************************************************************************
+* gsysGetGpioDirection
+*
+* DESCRIPTION:
+*        get Gpio direction from the Scratch and Misc Control register <GPIO Direction>.
+*        The bits are used to control the direction of GPIO[6:0].
+*        When a GPIOï¿½s bit is set to a one that GPIO will become an input. When a
+*        GPIOï¿½s bit is cleared to a zero that GPIO will become an output
+*        General Purpose Input Output direction bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        dir - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioDirection
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U32            *dir
+);
+
+/*******************************************************************************
+* gsysSetGpioData
+*
+* DESCRIPTION:
+*        Set Gpio data to the Scratch and Misc Control register <GPIO data>.
+*        When a GPIOï¿½s bit is set to be an input, data written to this bit will go
+*        to a holding register but will not appear on the pin nor in this register.
+*        Reads of this register will return the actual, real-time, data that is
+*        appearing on the GPIOï¿½s pin.
+*        When a GPIOï¿½s bit is set to be an output, data written to this bit will go
+*        to a holding register and will appear on the GPIOï¿½s pin. Reads of this register
+*        will return the actual, real-time, data that is appearing on the GPIOï¿½s pin
+*        (which in this case should be the data written, but if its isnï¿½t that would
+*        be an indication of a conflict).
+*        When a pinï¿½s direction changes from input to output, the data last written
+*        to the holding register appears on the GPIOï¿½s pin
+*        General Purpose Input Output data bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        data - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioData
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            data
+);
+
+/*******************************************************************************
+* gsysGetGpioData
+*
+* DESCRIPTION:
+*        get Gpio data to the Scratch and Misc Control register <GPIO data>.
+*        When a GPIOï¿½s bit is set to be an input, data written to this bit will go
+*        to a holding register but will not appear on the pin nor in this register.
+*        Reads of this register will return the actual, real-time, data that is
+*        appearing on the GPIOï¿½s pin.
+*        When a GPIOï¿½s bit is set to be an output, data written to this bit will go
+*        to a holding register and will appear on the GPIOï¿½s pin. Reads of this register
+*        will return the actual, real-time, data that is appearing on the GPIOï¿½s pin
+*        (which in this case should be the data written, but if its isnï¿½t that would
+*        be an indication of a conflict).
+*        When a pinï¿½s direction changes from input to output, the data last written
+*        to the holding register appears on the GPIOï¿½s pin
+*        General Purpose Input Output data bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        data - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioData
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            *data
+);
+
+/*******************************************************************************
+* gsysGetConfigData
+*
+* DESCRIPTION:
+*        Get Reset Configuration Pin Data 0-3.
+*        These registers return the values observed after a hardware Reset on the
+*        listed CONFIG data listed below.
+*            Config data 0:
+*              Bit    Config    Pin's Primary Name
+*                0    USER[0]        P6_OUTD[5]
+*                1    USER[1]        P6_OUTD[6]
+*                2    USER[2]        P6_OUTD[7]
+*                3    ADDR[0]        P5_OUTD[0]
+*                4    ADDR[1]        P5_OUTD[5]
+*                5    ADDR[2]        P5_OUTD[6]
+*                6    ADDR]3]        P5_OUTD[7]
+*                7    ADDR[4]        P5_OUTD[1]
+*            Config data 1:
+*                0    LED_SEL[0]    P1_LED
+*                1    LED_SEL[1]    P2_LED
+*                2    4COL P3_LED
+*                3    NormCx        P4_LED
+*                4    Jumbo        P0_LED
+*                5    EE_WE        EE_CS/C2_LED
+*                6    FD_FLOW        EE_CLK/C1_LED
+*                7    HD_FLOW        EE_DIN/C0_LED
+*            Config data 2:
+*                0    P5_MODE[0]    P5_OUTD[2]
+*                1    P5_MODE[1]    P5_OUTD[3]
+*                2    P5_MODE[2]    P5_OUTD[4]
+*                3    Reserved for future use
+*                4    P6_MODE[0]    P6_OUTD[2]
+*                5    P6_MODE[1]    P6_OUTD[3]
+*                6    P6_MODE[2]    P6_OUTD[4]
+*                7    Reserved for future use
+*            Config data 3:
+*                0    RMU_MODE[0] P6_OUTD[0]
+*                1    RMU_MODE[1] P6_OUTD[1]
+*                2    S_VDDOS[0]    PTP_TRIG
+*                3    CLK125EN    CLK125
+*                4    P5_VDDOS[0] P5_GTXCLK
+*                5    P5_VDDOS[1] P5_OUTEN
+*                6    P6_VDDOS[0] P5_GTXCLK
+*                7    P6_VDDOS[1] P6_OUTEN
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       cfgDat - GT_CONFIG_DTTA
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetConfigData
+(
+    IN  GT_QD_DEV             *dev,
+    OUT  GT_CONFIG_DATA        *cfgData
+);
+
+/*******************************************************************************
+* gsysSetSyncETai
+*
+* DESCRIPTION:
+*        Set SyncE and Tai to the Scratch and Misc. Control register <SyncE and TAI pad>.
+*
+* INPUTS:
+*        zpr - ZPR for SyncE and TAI
+*        znr - ZNR for SyncE and TAI
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetSyncETai
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+);
+
+/*******************************************************************************
+* gsysGetSyncETai
+*
+* DESCRIPTION:
+*        Get SyncE and Tai from the Scratch and Misc Control register <SyncE and TAI pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for SyncE and TAI
+*        znr - ZNR for SyncE and TAI*
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetSyncETai
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+);
+
+/*******************************************************************************
+* gsysSetP6_Clock
+*
+* DESCRIPTION:
+*        Set P6_clock to the Scratch and Misc Control register <P6_Clock pad>.
+*
+* INPUTS:
+*        zpr - ZPR for P6_Clock
+*        znr - ZNR for P6_Clock
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetP6_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+);
+
+/*******************************************************************************
+* gsysGetP6_Clock
+*
+* DESCRIPTION:
+*       Get P6_clock from the Scratch and Misc Control register <P6_Clock pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for P6_Clock
+*        znr - ZNR for P6_Clock
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetP6_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+);
+
+/*******************************************************************************
+* gsysSetP5_Clock
+*
+* DESCRIPTION:
+*       Set P5_clock to the Scratch and Misc Control register <P5_Clock pad>.
+*
+* INPUTS:
+*        zpr - ZPR for P5_Clock
+*        znr - ZNR for P5_Clock
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetP5_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+);
+
+/*******************************************************************************
+* gsysGetP5_Clock
+*
+* DESCRIPTION:
+*       Get P5_clock from the Scratch and Misc Control register <P5_Clock pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for P5_Clock
+*        znr - ZNR for P5_Clock
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetP5_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+);
+
+/*******************************************************************************
+* gsysSetEEPROM
+*
+* DESCRIPTION:
+*       Set EEPROM cycle to the Scratch and Misc Control register <EEPROM pad>.
+*
+* INPUTS:
+*        dsm - DSM for EEPROM cycle
+*        zpr - ZPR for EEPROM cycle
+*        znr - ZNR for EEPROM cycle
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetEEPROM
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            dsm,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+);
+
+/*******************************************************************************
+* gsysGetEEPROM
+*
+* DESCRIPTION:
+*       Get EEPROM cycle to the Scratch and Misc Control register <EEPROM pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        dsm - DSM for EEPROM cycle
+*        zpr - ZPR for EEPROM cycle
+*        znr - ZNR for EEPROM cycle
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetEEPROM
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *dsm,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+);
+
+
+/* gtAdvVct.c */
+
+/*******************************************************************************
+* gvctGetAdvCableDiag
+*
+* DESCRIPTION:
+*       This routine perform the advanced virtual cable test for the requested
+*       port and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*       mode - advance VCT mode (either First Peak or Maximum Peak)
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are supporting this API.
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvCableDiag
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+);
+
+/*******************************************************************************
+* gvctGetAdvExtendedStatus
+*
+* DESCRIPTION:
+*        This routine retrieves extended cable status, such as Pair Poloarity,
+*        Pair Swap, and Pair Skew. Note that this routine will be success only
+*        if 1000Base-T Link is up.
+*        DSP based cable length is also provided.
+*
+* INPUTS:
+*       dev  - pointer to GT driver structure returned from mdLoadDriver
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - the extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*        Supporting Device list:
+*           88E1111, 88E1112, 88E1141~6, 88E1149, and Internal Gigabit Phys
+*            in 88E6165 family and 88E6351 family devices
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvExtendedStatus
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT   port,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+);
+
+#ifdef CONFIG_AVB_FPGA
+
+/*******************************************************************************
+* gptpGetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine gets interrupt status of PTP logic.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpInt    - PTP Int Status
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpInt
+);
+
+/*******************************************************************************
+* gptpSetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine sets interrupt status of PTP logic.
+*
+* INPUTS:
+*    ptpInt    - PTP Int Status
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32    ptpInt
+);
+
+/*******************************************************************************
+* gptpSetFPGAIntEn
+*
+* DESCRIPTION:
+*       This routine enables PTP interrupt.
+*
+* INPUTS:
+*        ptpInt    - PTP Int Status (1 to enable, 0 to disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        ptpInt
+);
+
+/*******************************************************************************
+* gptpGetClockSource
+*
+* DESCRIPTION:
+*       This routine gets PTP Clock source setup.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_SRC     *clkSrc
+);
+
+/*******************************************************************************
+* gptpSetClockSource
+*
+* DESCRIPTION:
+*       This routine sets PTP Clock source setup.
+*
+* INPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_SRC     clkSrc
+);
+
+/*******************************************************************************
+* gptpGetP9Mode
+*
+* DESCRIPTION:
+*       This routine gets Port 9 Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_P9_MODE     *mode
+);
+
+/*******************************************************************************
+* gptpSetP9Mode
+*
+* DESCRIPTION:
+*       This routine sets Port 9 Mode.
+*
+* INPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_P9_MODE     mode
+);
+
+/*******************************************************************************
+* gptpReset
+*
+* DESCRIPTION:
+*       This routine performs software reset for PTP logic.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpReset
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gptpGetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP Duty Cycle Adjustment is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adjEn    - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *adjEn
+);
+
+/*******************************************************************************
+* gptpSetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PTP Duty Cycle Adjustment.
+*
+* INPUTS:
+*        adjEn    - GT_TRUE to enable, GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        adjEn
+);
+
+/*******************************************************************************
+* gptpGetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine gets clock duty cycle adjustment value.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_ADJUSTMENT    *adj
+);
+
+/*******************************************************************************
+* gptpSetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine sets clock duty cycle adjustment value.
+*
+* INPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_ADJUSTMENT    *adj
+);
+
+/*******************************************************************************
+* gptpGetPLLEn
+*
+* DESCRIPTION:
+*       This routine checks if PLL is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpGetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en,
+    OUT GT_U32        *freqSel
+);
+
+/*******************************************************************************
+* gptpSetPLLEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PLL device.
+*
+* INPUTS:
+*        en        - GT_TRUE to enable, GT_FALSE to disable
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*                  Meaningful only when enabling PLL device
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpSetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en,
+    IN  GT_U32        freqSel
+);
+
+/*******************************************************************************
+* gptpGetDDSReg
+*
+* DESCRIPTION:
+*       This routine gets DDS register data.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*
+* OUTPUTS:
+*    ddsData    - register data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    OUT GT_U32    *ddsData
+);
+
+/*******************************************************************************
+* gptpSetDDSReg
+*
+* DESCRIPTION:
+*       This routine sets DDS register data.
+*    DDS register data written by this API are not affected until gptpUpdateDDSReg API is called.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*    ddsData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    IN  GT_U32    ddsData
+);
+
+/*******************************************************************************
+* gptpUpdateDDSReg
+*
+* DESCRIPTION:
+*       This routine updates DDS register data.
+*    DDS register data written by gptpSetDDSReg are not affected until this API is called.
+*
+* INPUTS:
+*    none
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpUpdateDDSReg
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gptpSetADFReg
+*
+* DESCRIPTION:
+*       This routine sets ADF4156 register data.
+*
+* INPUTS:
+*    adfData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetADFReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    adfData
+);
+#endif  /*  CONFIG_AVB_FPGA */
+
+/* gtTCAM.c */
+/*******************************************************************************
+* gtcamFlushAll
+*
+* DESCRIPTION:
+*       This routine is to flush all entries. A Flush All command will initialize
+*       TCAM Pages 0 and 1, offsets 0x02 to 0x1B to 0x0000, and TCAM Page 2 offset
+*       0x02 to 0x05 to 0x0000 for all TCAM entries with the exception that TCAM
+*       Page 0 offset 0x02 will be initialized to 0x00FF.
+*
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamFlushAll
+(
+    IN  GT_QD_DEV     *dev
+);
+
+/*******************************************************************************
+* gtcamFlushEntry
+*
+* DESCRIPTION:
+*       This routine is to flush a single entry. A Flush a single TCAM entry command
+*       will write the same values to a TCAM entry as a Flush All command, but it is
+*       done to the selected single TCAM entry only.
+*
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamFlushEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer
+);
+
+/*******************************************************************************
+* gtcamLoadEntry
+*
+* DESCRIPTION:
+*       This routine loads a TCAM entry.
+*        The load sequence of TCAM entry is critical. Each TCAM entry is made up of
+*       3 pages of data. All 3 pages need to loaded in a particular order for the TCAM
+*       to operate correctly while frames are flowing through the switch.
+*       If the entry is currently valid, it must first be flushed. Then page 2 needs
+*       to be loaded first, followed by page 1 and then finally page 0.
+*       Each page load requires its own write TCAMOp with these TCAM page bits set
+*       accordingly.
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*        tcamData    - Tcam entry Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamLoadEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    IN  GT_TCAM_DATA        *tcamData
+);
+
+/*******************************************************************************
+* gtcamPurgyEntry
+*
+* DESCRIPTION:
+*       This routine Purgy a TCAM entry.
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*        tcamData    - Tcam entry Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamPurgyEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    IN  GT_TCAM_DATA        *tcamData
+);
+
+/*******************************************************************************
+* gtcamReadTCAMData
+*
+* DESCRIPTION:
+*       This routine loads the global 3 offsets 0x02 to 0x1B registers with
+*       the data found in the TCAM entry and its TCAM page pointed to by the TCAM
+*       entry and TCAM page bits of this register (bits 7:0 and 11:10 respectively.
+*
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*
+* OUTPUTS:
+*        tcamData    - Tcam entry Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamReadTCAMData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    OUT GT_TCAM_DATA        *tcamData
+);
+
+/*******************************************************************************
+* gtcamGetNextTCAMData
+*
+* DESCRIPTION:
+*       This routine  finds the next higher TCAM Entry number that is valid (i.e.,
+*       any entry whose Page 0 offset 0x02 is not equal to 0x00FF). The TCAM Entry
+*       register (bits 7:0) is used as the TCAM entry to start from. To find
+*       the lowest number TCAM Entry that is valid, start the Get Next operation
+*       with TCAM Entry set to 0xFF.
+*
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 255)
+*
+* OUTPUTS:
+*        tcamPointer - next pointer entry of TCAM (0 ~ 255)
+*        tcamData    - Tcam entry Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamGetNextTCAMData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        *tcamPointer,
+    OUT GT_TCAM_DATA        *tcamData
+);
+
+/*******************************************************************************
+* gprtGetSerdesMode
+*
+* DESCRIPTION:
+*       This routine reads Serdes Interface Mode.
+*
+* INPUTS:
+*        port -  The physical SERDES device address(4/5)
+*
+* OUTPUTS:
+*       mode    - Serdes Interface Mode
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       logical port number is supported only for the devices made production
+*       before 2009.
+*  (Serdes devices: 88E6131, 88E6122, 88E6108, 88E6161, 88E6165 and 88E352 family)
+*
+*******************************************************************************/
+GT_STATUS gprtGetSerdesMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_SERDES_MODE *mode
+);
+
+
+/*******************************************************************************
+* gprtSetSerdesMode
+*
+* DESCRIPTION:
+*       This routine sets Serdes Interface Mode.
+*
+* INPUTS:
+*       port -  The physical SERDES device address(4/5)
+*       mode    - Serdes Interface Mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       logical port number is supported only for the devices made production
+*       before 2009.
+*  (Serdes devices: 88E6131, 88E6122, 88E6108, 88E6161, 88E6165 and 88E352 family)
+*
+*******************************************************************************/
+GT_STATUS gprtSetSerdesMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_SERDES_MODE mode
+);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __msApi_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiSelect.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiSelect.h
new file mode 100644
index 000000000000..9171801ff343
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiSelect.h
@@ -0,0 +1,55 @@
+ #include <Copyright.h>
+
+/********************************************************************************
+* msApiSelect.h
+*
+* DESCRIPTION:
+*       API selection for QuarterDeck Device
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApiSelect_h
+#define __msApiSelect_h
+
+/* Micro definitions */
+/* Customers set self selection for DSDT  in here */
+
+/* DSDT phy API use Mad driver(DSDT/phy) */
+#if 1
+#undef GT_USE_MAD
+#else
+#define GT_USE_MAD 1
+#endif
+
+/* DSDT uses RMGMT to replace SMI */
+#if 1
+#undef GT_RMGMT_ACCESS
+#else
+#define GT_RMGMT_ACCESS 1
+#endif
+
+/* Only for Keystone FPGA design of PTP */
+#if 1
+#undef CONFIG_AVB_FPGA
+#undef CONFIG_AVB_FPGA_2
+/*
+#else
+#define CONFIG_AVB_FPGA  1
+#define CONFIG_AVB_FPGA_2 1
+*/
+#endif
+
+/* To use port mapping functions in Dev configuration */
+#if 1
+#undef GT_PORT_MAP_IN_DEV
+#else
+#define GT_PORT_MAP_IN_DEV  1
+#endif
+
+
+
+#endif /* __msApiSelect_h */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiTypes.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiTypes.h
new file mode 100644
index 000000000000..ee1b3649f9aa
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiTypes.h
@@ -0,0 +1,100 @@
+#include <Copyright.h>
+
+/*
+ * FILENAME:    $Workfile: mv_types.h $
+ * REVISION:    $Revision: 3 $
+ * LAST UPDATE: $Modtime: 12/24/02 5:37p $
+ *
+ * DESCRIPTION:
+ *     This file defines common data types used on Host and NetGX sides.
+ */
+
+
+#ifndef MV_TYPES_H
+#define MV_TYPES_H
+
+
+/* general */
+
+#undef IN
+#define IN
+#undef OUT
+#define OUT
+#undef INOUT
+#define INOUT
+
+
+#ifndef NULL
+#define NULL ((void*)0)
+#endif
+
+typedef void  GT_VOID;
+typedef char  GT_8;
+typedef short GT_16;
+typedef long  GT_32;
+
+typedef unsigned char  GT_U8;
+typedef unsigned short GT_U16;
+typedef unsigned long  GT_U32;
+typedef unsigned int   GT_UINT;
+
+typedef union {
+    GT_U8    c[8];
+    GT_U16    s[4];
+    GT_U32    l[2];
+} GT_U64;
+
+
+typedef enum {
+    GT_FALSE = 0,
+    GT_TRUE  = 1
+} GT_BOOL;
+
+typedef void          (*GT_VOIDFUNCPTR) (void); /* ptr to function returning void */
+typedef unsigned int  (*GT_INTFUNCPTR)  (void); /* ptr to function returning int  */
+
+
+/* module state */
+typedef enum {
+    GT_STATE_NONE = 0,    /* Uninitialized */
+    GT_STATE_IDLE,        /* Initialized, but not started (or stopped) */
+    GT_STATE_ACTIVE        /* Started */
+} GT_STATE;
+
+
+#define    GT_ETHERNET_HEADER_SIZE        (6)
+
+typedef struct
+{
+    GT_U8       arEther[GT_ETHERNET_HEADER_SIZE];
+}GT_ETHERADDR;
+
+/* This macro checks for a multicast mac address    */
+#define GT_IS_MULTICAST_MAC(mac)  ((mac.arEther[0] & 0x1) == 1)
+
+
+/* This macro checks for an broadcast mac address     */
+#define GT_IS_BROADCAST_MAC(mac) (((mac).arEther[0] == 0xFF) && ((mac).arEther[1] == 0xFF) && ((mac).arEther[2] == 0xFF) && ((mac).arEther[3] == 0xFF) && ((mac).arEther[4] == 0xFF) && ((mac).arEther[5] == 0xFF))
+
+
+/* status / error codes */
+typedef int GT_STATUS;
+
+#define GT_ERROR           (-1)
+#define GT_OK               (0x00)    /* Operation succeeded                   */
+#define GT_FAIL               (0x01)    /* Operation failed                      */
+#define GT_BAD_VALUE       (0x02)   /* Illegal value (general)               */
+#define GT_BAD_PARAM       (0x04)   /* Illegal parameter in function called  */
+#define GT_NOT_FOUND       (0x0B)   /* Item not found                        */
+#define GT_NO_MORE         (0x0C)   /* No more items found                   */
+#define GT_NO_SUCH         (0x0D)   /* No such item                          */
+#define GT_TIMEOUT         (0x0E)   /* Time Out                              */
+#define GT_NOT_SUPPORTED   (0x10)   /* This request is not support           */
+#define GT_ALREADY_EXIST   (0x1B)   /* Tried to create existing item         */
+#define GT_BAD_CPU_PORT    (0x20)   /* Input CPU Port is not valid physical port number */
+
+extern GT_U8 qdLong2Char(GT_U32 data);
+extern GT_U8 qdShort2Char(GT_U16 data);
+extern GT_U16 qdLong2Short(GT_U32 data);
+
+#endif /* MV_TYPES_H */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiWince.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiWince.h
new file mode 100644
index 000000000000..f5e8fe4e3b7f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/Include/msApiWince.h
@@ -0,0 +1,556 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* msApiWince.h
+*
+* DESCRIPTION:
+*       Wince Application need to include only this header file.
+*
+* DEPENDENCIES:   None
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __msApiFunc_h
+#define __msApiFunc_h
+
+#include "msApiDefs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef GT_STATUS (*FGT_PRT_ATUSIZE)(ATU_SIZE);
+typedef GT_STATUS (*FGT_PRT_U32_U32)(GT_U32*,GT_U32*);
+typedef GT_STATUS (*FGT_VALUE_U32)(GT_U32);
+typedef GT_STATUS (*FGT_PTR_U32)(GT_U32*);
+typedef GT_STATUS (*FGT_PTR_U16)(GT_U16*);
+typedef GT_STATUS (*FGT_PTR_U32_U32_U32)(GT_U32,GT_U32,GT_U32*);
+typedef GT_STATUS (*FGT_PTR_ATUENTRY)(GT_ATU_ENTRY*);
+typedef GT_STATUS (*FGT_PTR_ATUENTRY_BOOL)(GT_ATU_ENTRY*, GT_BOOL*);
+typedef GT_STATUS (*FGT_VALUE_FLUSHCMD)(GT_FLUSH_CMD);
+typedef GT_STATUS (*FGT_PTR_ETHERADDR)(GT_ETHERADDR*);
+typedef GT_STATUS (*FGT_PTR_BOOL)(GT_BOOL*);
+typedef GT_STATUS (*FGT_VALUE_BOOL)(GT_BOOL);
+typedef GT_STATUS (*FGT_VALUE_PORT_STPSTATE)(GT_LPORT,GT_PORT_STP_STATE);
+typedef GT_STATUS (*FGT_PTR_PORT_STPSTATE)(GT_LPORT,GT_PORT_STP_STATE*);
+typedef GT_STATUS (*FGT_VALUE_PORT_EGRESSMODE)(GT_LPORT,GT_EGRESS_MODE);
+typedef GT_STATUS (*FGT_PTR_PORT_EGRESSMODE)(GT_LPORT,GT_EGRESS_MODE*);
+typedef GT_STATUS (*FGT_VALUE_PORT_BOOL)(GT_LPORT,GT_BOOL);
+typedef GT_STATUS (*FGT_PTR_PORT_BOOL)(GT_LPORT,GT_BOOL*);
+typedef GT_STATUS (*FGT_VALUE_PORT_PORTS_U8)(GT_LPORT,GT_LPORT*,GT_U8);
+typedef GT_STATUS (*FGT_PTR_PORT_PORTS_U8)(GT_LPORT,GT_LPORT*,GT_U8*);
+typedef GT_STATUS (*FGT_VALUE_PORT_U16)(GT_LPORT,GT_U16);
+typedef GT_STATUS (*FGT_PTR_PORT_U16)(GT_LPORT,GT_U16*);
+typedef GT_STATUS (*FGT_VALUE_PORT_AUTOMODE)(GT_LPORT,GT_PHY_AUTO_MODE);
+
+typedef GT_STATUS (*FGT_VALUE_PORT)(GT_LPORT);
+typedef GT_STATUS (*FGT_VALUE_U8)(GT_U8);
+typedef GT_STATUS (*FGT_PTR_U8)(GT_U8*);
+typedef GT_STATUS (*FGT_VALUE_PORT_U8)(GT_LPORT,GT_U8);
+typedef GT_STATUS (*FGT_PTR_PORT_U8)(GT_LPORT,GT_U8*);
+typedef GT_STATUS (*FGT_VALUE_PORT_INGRESSMODE)(GT_LPORT,GT_INGRESS_MODE);
+typedef GT_STATUS (*FGT_PTR_PORT_INGRESSMODE)(GT_LPORT,GT_INGRESS_MODE*);
+typedef GT_STATUS (*FGT_VALUE_PORT_MCRATE)(GT_LPORT,GT_MC_RATE);
+typedef GT_STATUS (*FGT_PTR_PORT_MCRATE)(GT_LPORT,GT_MC_RATE*);
+typedef GT_STATUS (*FGT_VALUE_CTRMODE)(GT_CTR_MODE);
+typedef GT_STATUS (*FGT_PTR_CTRMODE)(GT_CTR_MODE*);
+typedef GT_STATUS (*FGT_VOID)(void);
+typedef GT_STATUS (*FGT_PTR_PORT_PORTSTAT)(GT_LPORT,GT_PORT_STAT*);
+typedef GT_STATUS (*FGT_VALUE_U8_U8)(GT_U8,GT_U8);
+typedef GT_STATUS (*FGT_PTR_U8_U8)(GT_U8,GT_U8*);
+typedef GT_STATUS (*FGT_PTR_CONFIG_INFO)(GT_SYS_CONFIG*,GT_SYS_INFO*);
+typedef GT_STATUS (*FGT_PTR_VERSION)(GT_VERSION*);
+typedef GT_STATUS (*FGT_PTR_REGISTER)(BSP_FUNCTIONS*);
+typedef GT_STATUS (*FGT_PTR_INT_HANDLER)(FGT_INT_HANDLER*);
+
+typedef GT_STATUS (*FGT_PTR_U32_U32_U16)(GT_U32,GT_U32,GT_U16);
+
+
+extern FGT_PRT_ATUSIZE             gfdbSetAtuSize;
+extern FGT_PRT_U32_U32             gfdbGetAgingTimeRange;
+extern FGT_VALUE_U32             gfdbSetAgingTimeout;
+extern FGT_PTR_U32                 gfdbGetAtuDynamicCount;
+extern FGT_PTR_ATUENTRY         gfdbGetAtuEntryFirst;
+extern FGT_PTR_ATUENTRY         gfdbGetAtuEntryNext;
+extern FGT_PTR_ATUENTRY_BOOL     gfdbFindAtuMacEntry;
+extern FGT_VALUE_FLUSHCMD         gfdbFlush;
+extern FGT_PTR_ATUENTRY         gfdbAddMacEntry; //liane
+extern FGT_PTR_ETHERADDR         gfdbDelMacEntry;
+extern FGT_VALUE_BOOL             gfdbLearnEnable;
+extern FGT_VALUE_BOOL                 gstpSetMode;
+extern FGT_VALUE_PORT_STPSTATE         gstpSetPortState;
+extern FGT_PTR_PORT_STPSTATE         gstpGetPortState;
+extern FGT_VALUE_PORT_EGRESSMODE     gprtSetEgressMode;
+extern FGT_PTR_PORT_EGRESSMODE         gprtGetEgressMode;
+extern FGT_VALUE_PORT_BOOL             gprtSetVlanTunnel;
+extern FGT_PTR_PORT_BOOL             gprtGetVlanTunnel;
+extern FGT_VALUE_PORT_PORTS_U8        gvlnSetPortVlanPorts;
+extern FGT_PTR_PORT_PORTS_U8        gvlnGetPortVlanPorts;
+extern FGT_VALUE_PORT_BOOL            gvlnSetPortUserPriLsb;
+extern FGT_PTR_PORT_BOOL            gvlnGetPortUserPriLsb;
+extern FGT_VALUE_PORT_U16            gvlnSetPortVid;
+extern FGT_PTR_PORT_U16                gvlnGetPortVid;
+extern FGT_VALUE_U32                eventSetActive;
+extern FGT_PTR_U16                    eventGetIntStatus;
+extern FGT_VALUE_PORT                gprtPhyReset;
+extern FGT_VALUE_PORT_BOOL            gprtSetPortLoopback;
+extern FGT_VALUE_PORT_BOOL            gprtSetPortSpeed;
+extern FGT_VALUE_PORT_BOOL            gprtPortAutoNegEnable;
+extern FGT_VALUE_PORT_BOOL            gprtPortPowerDown;
+extern FGT_VALUE_PORT                gprtPortRestartAutoNeg;
+extern FGT_VALUE_PORT_BOOL            gprtSetPortDuplexMode;
+extern FGT_VALUE_PORT_AUTOMODE        gprtSetPortAutoMode;
+extern FGT_VALUE_PORT_BOOL            gprtSetPause;
+extern FGT_VALUE_PORT_U16            gprtPhyIntEnable;
+extern FGT_PTR_PORT_U16                gprtGetPhyIntStatus;
+extern FGT_PTR_U16                    gprtGetPhyIntPortSummary;
+extern FGT_VALUE_PORT_BOOL            gprtSetForceFc;
+extern FGT_PTR_PORT_BOOL            gprtGetForceFc;
+extern FGT_VALUE_PORT_BOOL            gprtSetTrailerMode;
+extern FGT_PTR_PORT_BOOL            gprtGetTrailerMode;
+extern FGT_VALUE_PORT_INGRESSMODE    gprtSetIngressMode;
+extern FGT_PTR_PORT_INGRESSMODE        gprtGetIngressMode;
+extern FGT_VALUE_PORT_MCRATE        gprtSetMcRateLimit;
+extern FGT_PTR_PORT_MCRATE            gprtGetMcRateLimit;
+extern FGT_VALUE_CTRMODE            gprtSetCtrMode;
+extern FGT_VOID                    gprtClearAllCtr;
+extern FGT_PTR_PORT_PORTSTAT    gprtGetPortCtr;
+extern FGT_PTR_PORT_BOOL        gprtGetPartnerLinkPause;
+extern FGT_PTR_PORT_BOOL        gprtGetSelfLinkPause;
+extern FGT_PTR_PORT_BOOL        gprtGetResolve;
+extern FGT_PTR_PORT_BOOL        gprtGetLinkState;
+extern FGT_PTR_PORT_BOOL        gprtGetPortMode;
+extern FGT_PTR_PORT_BOOL        gprtGetPhyMode;
+extern FGT_PTR_PORT_BOOL        gprtGetDuplex;
+extern FGT_PTR_PORT_BOOL        gprtGetSpeed;
+extern FGT_VALUE_PORT_U8        gcosSetPortDefaultTc;
+extern FGT_VALUE_PORT_BOOL        gqosSetPrioMapRule;
+extern FGT_PTR_PORT_BOOL        gqosGetPrioMapRule;
+extern FGT_VALUE_PORT_BOOL        gqosIpPrioMapEn;
+extern FGT_PTR_PORT_BOOL        gqosGetIpPrioMapEn;
+extern FGT_VALUE_PORT_BOOL        gqosUserPrioMapEn;
+extern FGT_PTR_PORT_BOOL        gqosGetUserPrioMapEn;
+extern FGT_PTR_U8_U8            gcosGetUserPrio2Tc;
+extern FGT_VALUE_U8_U8            gcosSetUserPrio2Tc;
+extern FGT_PTR_U8_U8            gcosGetDscp2Tc;
+extern FGT_VALUE_U8_U8            gcosSetDscp2Tc;
+extern FGT_PTR_CONFIG_INFO        sysConfig;
+extern FGT_VOID                    sysEnable;
+extern FGT_VOID                    gsysSwReset;
+extern FGT_VALUE_BOOL            gsysSetDiscardExcessive;
+extern FGT_PTR_BOOL                gsysGetDiscardExcessive;
+extern FGT_VALUE_BOOL            gsysSetSchedulingMode;
+extern FGT_PTR_BOOL                gsysGetSchedulingMode;
+extern FGT_VALUE_BOOL            gsysSetMaxFrameSize;
+extern FGT_PTR_BOOL                gsysGetMaxFrameSize;
+extern FGT_VOID                    gsysReLoad;
+extern FGT_VALUE_BOOL            gsysSetWatchDog;
+extern FGT_PTR_BOOL                gsysGetWatchDog;
+extern FGT_PTR_ETHERADDR        gsysSetDuplexPauseMac;
+extern FGT_PTR_ETHERADDR        gsysGetDuplexPauseMac;
+extern FGT_VALUE_BOOL            gsysSetPerPortDuplexPauseMac;
+extern FGT_PTR_BOOL                gsysGetPerPortDuplexPauseMac;
+extern FGT_PTR_U32_U32_U32        gsysReadMiiReg;
+extern FGT_PTR_VERSION            gtVersion;
+extern FGT_PTR_REGISTER            gtRegister;
+
+extern FGT_PTR_U32_U32_U16        gsysWriteMiiReg;
+
+/*
+ * This function will get the all the MS APIs and assign to local function pointers.
+ */
+int qdGetMSApiFunc();
+
+GT_U32 gtStrlen
+(
+    IN const void * source
+);
+
+//*****************************************************************************
+//  I O C T L S
+//*****************************************************************************
+#include "windev.h"
+
+typedef struct _GT_IOCTL_PARAM
+{
+    union
+    {
+        GT_LPORT    portList[8];
+        GT_LPORT    port;
+        GT_U8          u8Data;
+        GT_U16      u16Data;
+        GT_U32      u32Data;
+        GT_BOOL     boolData;
+
+        GT_CTR_MODE    ctrMode;
+        GT_PORT_STP_STATE    stpState;
+        GT_EGRESS_MODE        egressMode;
+        GT_INGRESS_MODE        ingressMode;
+        GT_MC_RATE        mcRate;
+        GT_PORT_STAT    portStat;
+        ATU_SIZE         atuSize;
+        GT_FLUSH_CMD     flushCmd;
+
+        GT_ATU_ENTRY     atuEntry;
+        GT_ETHERADDR     etherAddr;
+        GT_SYS_CONFIG     sysConfig;
+        GT_SYS_INFO        sysInfo;
+
+    } FirstParam;
+
+    union
+    {
+        GT_LPORT    port;
+        GT_LPORT    portList[8];
+        GT_U8        u8Data;
+        GT_U16        u16Data;
+        GT_U32        u32Data;
+        GT_BOOL        boolData;
+        GT_PORT_STP_STATE    stpState;
+        GT_EGRESS_MODE        egressMode;
+        GT_INGRESS_MODE        ingressMode;
+        GT_MC_RATE        mcRate;
+
+        GT_PORT_STAT    portStat;
+
+        GT_PHY_AUTO_MODE    phyAutoMode;
+
+    } SecondParam;
+
+    union
+    {
+        GT_U8    u8Data;
+        GT_U16    u16Data;
+        GT_U32    u32Data;
+
+    } ThirdParam;
+
+} GT_IOCTL_PARAM, *PGT_IOCTL_PARAM;
+
+#define GET_FUNC_FROM_CTL_CODE(_ioctl) ((_ioctl>>2) & 0xFFF)
+
+/*
+    Microsoft allows for us to use 0x800 ~ 0xFFF
+    So, our program is using 6 bits for function group,
+    and 6 bits for each function.
+*/
+#define SUB_FUNC_MASK        0xFC0
+#define SYS_CFG_FUNC_MASK    (1 << 6) | 0x800
+#define SYS_CTRL_FUNC_MASK    (2 << 6) | 0x800
+#define FDB_FUNC_MASK        (3 << 6) | 0x800
+#define VLAN_FUNC_MASK        (4 << 6) | 0x800
+#define STP_FUNC_MASK        (5 << 6) | 0x800
+#define PORT_CTRL_FUNC_MASK        (6 << 6) | 0x800
+#define PORT_STATUS_FUNC_MASK    (7 << 6) | 0x800
+#define PORT_STATS_FUNC_MASK    (8 << 6) | 0x800
+#define QOS_FUNC_MASK            (9 << 6) | 0x800
+#define PHY_CTRL_FUNC_MASK        (10 << 6) | 0x800
+#define SYS_EVENT_FUNC_MASK        (11 << 6) | 0x800
+#define PHY_INT_FUNC_MASK        (12 << 6) | 0x800
+
+/*
+    Functions for SYS Configuration
+*/
+#define IOCTL_sysConfig    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CFG_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysReadMiiReg    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CFG_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysWriteMiiReg    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CFG_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gtVersion    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CFG_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for ATU
+*/
+#define IOCTL_gfdbSetAtuSize    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbGetAgingTimeRange    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbSetAgingTimeout    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbGetAtuDynamicCount    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbGetAtuEntryFirst    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbGetAtuEntryNext    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbFindAtuMacEntry    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbFlush    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbAddMacEntry    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 8, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbDelMacEntry    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 9, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gfdbLearnEnable    \
+    CTL_CODE(FILE_DEVICE_NETWORK , FDB_FUNC_MASK + 10, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+/*
+    Functions for STP
+*/
+#define IOCTL_gstpSetMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , STP_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gstpSetPortState    \
+    CTL_CODE(FILE_DEVICE_NETWORK , STP_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gstpGetPortState    \
+    CTL_CODE(FILE_DEVICE_NETWORK , STP_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for VLAN
+*/
+#define IOCTL_gprtSetEgressMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetEgressMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetVlanTunnel    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetVlanTunnel    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnSetPortVlanPorts    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnGetPortVlanPorts    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnSetPortUserPriLsb    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnGetPortUserPriLsb    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnSetPortVid    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 8, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gvlnGetPortVid    \
+    CTL_CODE(FILE_DEVICE_NETWORK , VLAN_FUNC_MASK + 9, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for System Event
+*/
+#define IOCTL_eventSetActive    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_EVENT_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_eventGetIntStatus    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_EVENT_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for Phy Control
+*/
+#define IOCTL_gprtPhyReset    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetPortLoopback    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetPortSpeed    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtPortAutoNegEnable    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtPortPowerDown    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtPortRestartAutoNeg    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetPortDuplexMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetPortAutoMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetPause    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_CTRL_FUNC_MASK + 8, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+/*
+    Functions for Phy Interrupt
+*/
+#define IOCTL_gprtPhyIntEnable    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_INT_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetPhyIntStatus    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_INT_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetPhyIntPortSummary    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PHY_INT_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for Port Control
+*/
+#define IOCTL_gprtSetForceFc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetForceFc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetTrailerMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetTrailerMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetIngressMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetIngressMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtSetMcRateLimit    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetMcRateLimit    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_CTRL_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+/*
+    Functions for Port Statistics
+*/
+#define IOCTL_gprtSetCtrMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATS_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtClearAllCtr    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATS_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetPortCtr    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATS_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for Port Status
+*/
+#define IOCTL_gprtGetPartnerLinkPause    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetSelfLinkPause    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetResolve    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetLinkState    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetPortMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetPhyMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetDuplex    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gprtGetSpeed    \
+    CTL_CODE(FILE_DEVICE_NETWORK , PORT_STATUS_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+/*
+    Functions for QoS Mapping
+*/
+#define IOCTL_gcosSetPortDefaultTc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosSetPrioMapRule    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosGetPrioMapRule    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosIpPrioMapEn    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosGetIpPrioMapEn    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosUserPrioMapEn    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gqosGetUserPrioMapEn    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gcosGetUserPrio2Tc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gcosSetUserPrio2Tc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 8, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gcosGetDscp2Tc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 9, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gcosSetDscp2Tc    \
+    CTL_CODE(FILE_DEVICE_NETWORK , QOS_FUNC_MASK + 10, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+/*
+    Functions for Sys Control
+*/
+#define IOCTL_gsysSwReset    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 0, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetDiscardExcessive    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 1, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetDiscardExcessive    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 2, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetSchedulingMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 3, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetSchedulingMode    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 4, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetMaxFrameSize    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 5, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetMaxFrameSize    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 6, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysReLoad    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 7, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetWatchDog    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 8, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetWatchDog    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 9, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetDuplexPauseMac    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 10, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetDuplexPauseMac    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 11, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysSetPerPortDuplexPauseMac    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 12, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define IOCTL_gsysGetPerPortDuplexPauseMac    \
+    CTL_CODE(FILE_DEVICE_NETWORK , SYS_CTRL_FUNC_MASK + 13, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/README_SW.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/README_SW.txt
new file mode 100644
index 000000000000..78a5f3f74e89
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/README_SW.txt
@@ -0,0 +1,382 @@
+
+                    Switch driver in DSDT Release version 3.3
+                   ============================================
+
+Table of Content:
+-----------------
+1) Release History
+2) Source Code Organization
+3) General Introduction
+4) HOW TO - Build qdDrv.o for vxWorks
+5) HOW TO - Build qdDrv.lib for WinCE
+6) HOW TO - Build qdDrv.o for Linux
+7) HOW TO - Use phy driver (madDrv) in qdDrv.o
+8) HOW TO - select features in msApiSelect.h
+9) HOW TO - Build kinds of driver
+10) Changes - DSDT 3.3 change list
+
+
+1) Release History
+------------------
+DSDT_3.3/switch - Feb.1. 2013.
+          0. Added and completed to support 88E6320 family.
+		  1. Fixed errors.
+
+DSDT_3.2/switch - Mar.1. 2012.
+          0. Added to support 88E6320 family.
+          1. Added to support full functions of 88E6352 family, and SpinnakerAv.
+          2. Added TCAM function of 88E6320.
+          3. Fixed page phy access problem of 88E6352.
+
+DSDT_3.1/switch - Mar.31. 2011.
+          0. Update phy.
+          1. Added new PTP and TAI feature of 88E6352.
+          2. Added msApiSelect.h to allow Customer to select DSDT functions.
+          3. Moved port mapping functions into Dev structure, which is selected by Customer.
+          4. Fixed bugs of Melody.
+          5. Added 88E6352 family.
+          6. Added semaphore for Remote management access.
+          7. Added port mapping functions into system config.
+          8. Fixed 6152/6155 port vector.
+          9. Fixed warning problems for long to short.
+         10. Fixed API gatuGetViolation DB Number error.
+DSDT_3.0/switch - May 26. 2010.
+          0. Based on DSDT2.8a.
+          1. Support Marvell F2R (Remote management) function.
+          2. Added device group 1 to extend device number.
+          3. Added to support 88EC0xx.
+          4. Added to use Marvell Phy driver (madDrv).
+          5. Changed name form DSDT* to DSDT*/switch. The name QD is kept to indicate DSDT switch driver.
+          6. Added to support 88EC0XX and 88E6250 group.
+DSDT2.8a.zip - Jan. 2009. Fixed problem.
+              1. Deleted to support 6095 family for Ingress Rate Limit withFlow control.
+              2. Deleted unused definition in GT_QPRI_TBL_ENTRY.
+DSDT2.8.zip - Nov. 2008. added support for 88E6351 family (88E6351, 88E6175, 88E6124)
+          1. New APIs are added to support new devices.
+
+DSDT2.7a.zip - March. 2008.
+          1. Fixed known bugs.
+          2. Enhanced some of the APIs.
+
+DSDT2.7.zip - May. 2007. added support for 88E6165 family (88E6123, 88E6125, 88E6140, 88E6161)
+          1. New APIs are added to support new devices.
+          2. Bug fix
+            GT_PIRL2_DATA structure includes GT_PIRL_COUNT_MODE enum type,
+            which should be GT_PIRL2_COUNT_MODE.
+            88E6083 support Static Management frame.
+            gprtSetForwardUnknown deals with wrong bit.
+          3. Removed Diag program that make user confused with missing files.
+
+DSDT2.6b.zip - Jan. 2007.
+          1. Bug Fixes
+          2. PIRL Rate Limiting Parameter update
+
+DSDT2.6a.zip - Nov. 2006. added support for 88E6045.
+
+DSDT2.6.zip - Jul. 2006. added support for 88E6097, 88E6096, 88E6046, 88E6047, and 88E6085.
+          1. New APIs are added to support new devices.
+          2. Bug fixes those were in 2.6 preliminary release.
+
+DSDT2.6pre.zip - Apr. 2006. added preliminary support for 88E6097.
+          1. New features are added.
+          2. Some parameters in the existing APIs are modified to support extended feature.
+
+DSDT2.5b.zip - Jan. 2006.
+          1. added gtDelay function after disabling PPU
+              Since delay function is system/OS dependent, it is required that DSDT user
+              fill up the gtDelay function based its platform parameters.
+              gtDelay function is located in src\msApi\gtUtils.c
+          2. Unused GT_STATUS definitions are removed.
+
+DSDT2.5a.zip - Jan. 2006, added support for 88E6122 and 88E6121 and new feature that bypasses
+          initial device setup, and bug fixes in the previous release.
+          1. Bypass initial configuration when loading driver.
+          2. Bug fixes:
+              1) synchronization issues.
+              2) port vector of 0xFF was treated as an invalid vector.
+
+DSDT2.5.zip - Nov. 2005, added support for 88E6065, 88E6035, 88E6055, 88E6061, and 88E6031,
+          and bug fixes in the previous release.
+          1. New APIs are added to support new devices.
+          2. Bug fixes:
+              1) gfdbGetAtuEntryNext API returns GT_NO_SUCH when Entry's MAC is Broadcast address.
+              2) entryState in GT_ATU_ENTRY follows the definition.
+              3) gsysSetTrunkMaskTable API does not overwrite HashTrunk value anymore.
+              4) 10/100 FastEthernet Phy Reset occurs along with Speed, Duplex modification.
+
+
+DSDT2.4a.zip - Oct. 2005, added support for 88E6131 and a bug fix.
+          1. gprtPortPowerDown(gtPhyCtrl.c) didn't work due to reset - reset is not called after PowerDown bit change.
+
+DSDT2.4.zip - Aug. 2005, bug fixes and modifications
+          1. gprtSetPktGenEnable(gtPhyCtrl.c) didn't work with Serdes Device - resolved.
+          2. gprtSetPortAutoMode(gtPhyCtrl.c) dropped 1000Mbps Half duplex mode - resolved.
+          3. gprtGetPhyLinkStatus(gtPhyCtrl.c) returned LinkOn when there is no phy connected - resolved.
+          4. gprtSetPortDuplexMode(gtPhyCtrl.c) reset 1000M Speed - resolved.
+          5. gfdbSetAtuSize(gtBrgFdb.c), now, returns GT_NOT_SUPPORT if ATU size of the device
+             is not configurable.
+          6. gprtSetPortLoopback(gtPhyCtrl.c) treats Fast Ethernet Phy and Gigabit Ethernet Phy
+               differently.
+          7. GT_GET_SERDES_PORT, now, does the error checking.
+          8. IS_CONFIGURABLE_PHY reads PHY ID and returns the ID
+
+DSDT2.4pre.zip - July. 2005, added support for 88E6108
+          1. New features are added.
+          2. Arguments in gprtSetPause and gprtSetPortSpeed are modified to support
+             1000M Phys.
+          3. Driver functions are added to support Marvell Alask Phys and to be
+             expanded easily for the future Phys.
+
+DSDT2.3c.zip - May. 2005,
+          1. New features in Rev1 or Rev2 of 88E6095 are added
+          2. gfdbGetAgingTimeout, and gfdbGetLearnEnable are added
+          3. Bug fixes in grcSetEgressRate and grcSetPri0Rate
+          4. Resetting TrunkID, when gprtSetTrunkPort is called to disable Trunk, is applied
+             only to Rev0 of 88E6095 and 88E6185
+
+DSDT2.3b.zip - Mar. 2005,
+          1. gstpSetMode function does not modify Port State any more, since STP module
+             sets the port state. gstpSetMode sets the switch so that it can receive
+              BPDU packets.
+          2. gtLoadDriver clears Rsvd2Cpu and Rsvd2CpuEn bits.
+          3. TrunkID will be reset when gprtSetTrunkPort is called to disable Trunk.
+          4. "Check PPU Status in order to verify PPU disabled" is applied to gtVct.c
+
+DSDT2.3a.zip - Jan. 2005, added support for 88E6152, 88E6155, 88E6182, and 88E6092
+          devices, removed non-existing devices, and bug fix in 2.3 release.
+          Fix :
+          Check PPU Status in order to verify PPU disabled.
+
+DSDT2.3.zip - Nov. 2004, support for 88E6185 and bug fixes in 2.3 preliminary release.
+          Fixes :
+          1) Provide some delay after disabling PPU.
+          2) VCT runs after disabling PPU.
+
+DSDT2.3pre.zip - Nov. 2004, added preliminary support for 88E6185.
+
+DSDT2.2a.zip - Nov. 2004, added semaphore support for MII Access with multi address mode.
+
+DSDT2.2.zip - Oct. 2004, support for 88E6095 and bug fixes in 2.2 preliminary release.
+
+DSDT2.2pre.zip - Sep. 2004, added preliminary support for 88E6095 and work-around for VCT
+          based on VCT Application Note.
+
+DSDT2.1a.zip - Apr. 2004, support 88E6093 and bug fixes.
+          Device Driver Package name has been changed from QDDriver to DSDT(Datacom
+          Same Driver Technology).
+          Bug Fixes :
+          1) DBNum was not correctly handled while getting entry from VTU Table.
+          2) Member Tag in VTU entry was not defined correctly for 88E6183 family.
+          3) Correction of 88E6183 RMON Counter Structure and Enum.
+          4) ATU Interrupt Handling routine
+
+qdDriver2.1-pre.zip - Apr. 2004, added preliminary support for 88E6093 and bug fixes.
+          Bug Fixes :
+          1) DBNum was not incorrectly handled while getting entry from
+          VTU Table.
+          2) Member Tag in VTU entry was not defined correctly for 88E6183 family.
+
+qdDriver2.0a.zip - Dec. 2003, provides functions, which can read/write
+          Switch Port Registers and Switch Global Registers:
+          gprtGetSwitchReg,
+          gprtSetSwitchReg,
+          gprtGetGlobalReg, and
+          gprtSetGlobalReg
+
+qdDriver2.0.zip - July. 2003, supports Multi Address Mode for upcoming device.
+          AUTO_SCAN_MODE, MANUAL_MODE, and MULTI_ADDR_MODE are added
+          to find a QD Family device.
+          Supports Sapphire (10 port Gigabit switch).
+
+qdDriver1.4a.zip - Apr. 2003, bug fixes.
+          Bug fixes on portVec in GT_ATU_ENTRY structure, which supported only
+          total of 8 ports (defined as GT_U8). It's now defined as GT_U32.
+          utils.c and testApi.c in Diag directory also modified to support
+          the switch with more than 8 ports.
+
+qdDriver1.4.zip - Apr. 2003, added support for Ocatne (6083).
+          Removed NO-OPs, which created when DBG_PRINT is undefined.
+          Bug fixes on gprtSetIGMPSnoop and gprtGetIGMPSnoop functions,
+          and GT_PRI0_RATE enum type.
+
+qdDriver1.3h.zip - Feb. 2003, added support for Extended Cable Status,
+          such as Impediance mismatch, Pair Skew, Pair Swap and Pair Polarity.
+          Bug fixes on FFox-EG and FFox-XP device ID.
+
+qdDriver1.3g.zip - Dec. 2002, added preliminary support for Octane (6083)
+
+qdDriver1.3.zip - Oct. 2002, added support for ClipperShip (6063)
+          This driver works with all released devices, including
+          6051, 6052, 6021, and 6063
+
+qdDriver1.2.zip - Aug. 2002, added support for FullSail (6021)
+
+qdDriver1.1.zip - June, 2002 OS independent QuarterDeck Driver Release
+          Based on 1.0 release, but removed OS dependency. The driver
+          is designed to work with any OS without code changes.
+
+qdDriver1.0.zip - Feb. 2002, Initial QuaterDeck Driver Release
+          Based on vxWorks OS, support 6051/6052
+
+
+2) Source Code Organization
+--------------------------
+    2.1) src
+        Switch Driver Suite Source Code.
+
+    2.2) Include directory
+        Switch Driver Suite Header files and Prototype files
+
+    2.3) Library
+        Object files for Switch driver Suite
+
+    2.4) Sample
+        Sample Code that shows how to use MSAPIs, e.g., init Switch driver, setup VLAN for Home Gateway, etc.
+
+    2.5) Tools
+	Building related files
+
+    2.6) Phy
+        The phy part of switch has alternative individual driver, Marvell Phy Driver (madDrv).
+        The code is in DSDT_X.Y/phy.
+        See DSDT_X.Y/phy/README.txt for more detail.
+
+
+    * The Switch Driver Suite Source Code is OS independent, and fully supported by Marvell.
+    * The Sample Codes are tested under vxworks and Linux, and is provided for reference only.
+
+
+3) General Introduction
+-----------------------
+
+The Switch driver suite is standalone program, which is independent of both OS and Platform.
+As such, applications of MSAPIs need to register platform specific functions.
+This is done by calling qdLoadDriver function. This function returns a pointer (*dev),
+which contains the device and platform information. It will then be used for each MSAPI call.
+
+msApiInit.c file in Diag directory and Sample\Initialization directory demonstrate
+how you can register those functions.
+
+msApiInit.c
+    qdStart is the main function to initialize Switch Driver and does the
+    followings:
+    a) register the platform specific functions.
+       1.1 and 1.2 below are required.
+       1.3 to 1.4 is for F2R(Remote management), it is selected
+          by flag GT_RMGMT_ACCESS in IncludemsApiDefs.h.
+       1.5 to 1.8 is optional.
+        1.1) readMii - BSP specific MII read function
+        1.2) writeMii - BSP specific MII write function
+        1.3) hwAccessMod - BSP supported function mode
+        1.4) hwAccess - BSP specific hardware access function
+        1.5) semCreate - OS specific semaphore create function.
+        1.6) semDelete - OS specific semaphore delete function.
+        1.7) semTake - OS specific semaphore take function.
+        1.8) semGive - OS specific semaphore give function.
+
+        Notes) The given example will use DB-88E6218 BSP as an example.
+
+    b) Initialize BSP provided routine (if required).
+
+    c) Calls qdLoadDriver routine.
+        1.1) Input (GT_SYS_CONFIG) - CPU Port Number (Board Specific) and Port Mode
+        (either 1 for Forwarding mode or 0 for Hardware default mode)
+        1.2) Output (GT_QD_DEV) - Contains all device (Switch) and platform specific info.
+             It will be used for all API calls.
+
+    d) Calls sysEnable (for future use.)
+
+
+4) HOW TO - Build qdDrv.o for vxWorks
+-------------------------------------
+
+1. Extract the given ZIP file into c:\DSDT_3.x\switch directory
+   You may change the directory name to your choice, and change the environment variable below accordingly.
+2. Edit setenv.bat file in c:\DSDT_3.x\switch\tools
+3. Modify the following variables according to your setup.
+set DSDT_USER_BASE=C:\DSDT_3.x\switch
+set DSDT_PROJ_NAME=qdDrv
+set DSDT_USE_MAD=FALSE or TRUE
+set WIND_BASE=C:\Tornado
+set TARGETCPU=MIPS        ;ARM for ARM Cpu
+set WIND_HOST_TYPE=x86-win32
+4. run "setenv"
+5. Change directory to c:\DSDT_3.x\switch\src
+6. run "make"
+7. qdDrv.o and qdDrv.map will be created in c:\DSDT_3.x\switch\Library.
+
+
+5) HOW TO - Build qdDrv.lib for WinCE
+-------------------------------------
+
+1. Extract the given ZIP file into c:\DSDT_3.x\switch directory(directory can be changed)
+2. Edit setenv.bat file in c:\DSDT_3.x\switch\tools
+3. Modify the following variables according to your setup.
+set DSDT_USER_BASE=C:\DSDT_3.x\switch
+set DSDT_PROJ_NAME=qdDrv
+set DSDT_USE_MAD=FALSE or TRUE
+set TARGETCPU=x86        ;MIPSIV for MIPS IV
+set WCEROOT=C:\WINCE400
+
+4. run "setenv WINCE"
+5. Change directory to c:\DSDT_3.x\switch\src
+6. run "make"
+7. qdDrv.lib will be created in c:\DSDT_3.x\switch\Library.
+
+
+6) HOW TO - Build qdDrv.o for Linux
+-----------------------------------
+
+1. Extract the given ZIP file into $HOME/DSDT_3.x/switch directory(directory can be changed)
+ `   in Linux system (verified with Fedore 8.x)
+2. Edit setenv file in $HOME/DSDT_3.x/switch/tools
+3. Modify the following variables according to your setup.
+    declare -x DSDT_USER_BASE=$HOME/DSDT_3.x/switch
+    declare -x DSDT_PROJ_NAME=qdDrv
+    declare -x DSDT_USE_MAD=FALSE or TRUE
+4. run "source setenv"
+5. Change directory to $HOME/DSDT_3.x/switch/src
+6. run "make"
+7. qdDrv.o and qdDrv.map will be created in $HOME/DSDT_3.x/switch/Library.
+
+7) HOW TO - Use Phy driver in qdDrv.o
+-------------------------------------
+
+ The Phy driver(DSDT/phy or MAD) supports Marvell Phy products. It includes the phys in switch chips.
+ From DSDT 3.0A, the Switch driver added functions to merge MAD driver
+ to replace original Phy regarded functions. This is an option.
+ There is no any difference from API point of view, if it uses QD over MAD APIs.
+ For old chips, it is selectable to use ether original Phy API function or call MAD functions. To use latest Phy functions, it should us QD over MAD. The selection is to define <GT_USE_MAD> in <msApiSelect.h>.
+
+8) HOW TO - select features in msApiSelect.h
+---------------------------------------------
+
+ Customer can ignore the selections. The default selections are <undef> for all.
+
+ From DSDT_3.0D, DSDT adds file <msApiSelect.h> in DSDT*.*/Include.
+ The file allows customer to select self features.
+ Now selections are:
+    GT_PORT_MAP_IN_DEV: To use port mapping functions in Dev configuration.
+    GT_RMGMT_ACCESS: DSDT uses RMGMT to replace SMI.
+    GT_USE_MAD: the phy APIs of switch use Mad driver(DSDT/phy).
+
+ If customer wants to use the selection,
+   1. Adds a Micro definition <CHECK_API_SELECT> in customers build environment.
+     For example, in <makefile>, adds <CFLAGS += -DCHECK_API_SELECT>.
+   2. In file <Include/msApiSelect>, set correct selections.
+   3. Re-build DSDT/switch.
+
+
+9) HOW TO - build driver
+-----------------------------------
+
+ Change directory to $HOME/DSDT_3.x/
+ <make switch>: to build switch driver image only.
+ <make phy>: to build phy driver image only.
+ <make>: to build switch and phy driver images, and switch driver does not use MAD APIs.
+ <make DSDT_USE_MAD=TRUE>: to build switch and phy driver images, and switch driver uses MAD APIs.
+
+10) CHANGES
+-----------
+1. Completed to supports Marvell 88E6320.
+2. Fixed Phy access problem.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/makedefs b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/makedefs
new file mode 100644
index 000000000000..0bedc22d0825
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/makedefs
@@ -0,0 +1,14 @@
+VPATH = $(DSDT_PATH)/src/driver $(DSDT_PATH)/src/msapi $(DSDT_PATH)/src/platform
+
+DSDT_INCLUDE = -I$(DSDT_PATH)/Include -I$(DSDT_PATH)/Include/h/driver -I$(DSDT_PATH)/Include/h/msApi -I$(DSDT_PATH)/Include/h/platform  -I$(DSDT_PATH)/../include
+
+DSDT_OBJS = ./gtDrvConfig.o ./gtDrvEvents.o ./gtHwCntl.o             \
+./gtBrgStu.o ./gtBrgFdb.o ./gtBrgStp.o ./gtBrgVlan.o ./gtBrgVtu.o ./gtEvents.o    \
+        gtPCSCtrl.o gtPhyCtrl.o gtPhyInt.o gtPortCtrl.o gtPortPav.o    \
+        gtPortRateCtrl.o gtPortRmon.o gtPortStat.o gtPortStatus.o    \
+        gtQosMap.o gtSysConfig.o gtSysCtrl.o gtSysStatus.o gtUtils.o \
+        gtVct.o gtVersion.o gtPIRL.o gtPIRL2.o gtWeight.o       \
+            gtCCPVT.o gtPolicy.o \
+            gtPriTable.o gtWeight.o gtPTP.o gtPortLed.o gtMisc.o \
+            gtTCAM.o \
+        ./gtDebug.o ./gtMiiSmiIf.o ./gtSem.o ./platformDeps.o
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/802_1q.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/802_1q.c
new file mode 100644
index 000000000000..99ab347b4542
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/802_1q.c
@@ -0,0 +1,332 @@
+#include <Copyright.h>
+/*******************************************************************************
+* 802_1q.c
+*
+* DESCRIPTION:
+*        There are three 802.1Q modes (GT_SECURE, GT_CHECK, and GT_FALLBACK).
+*        In GT_SECURE mode, the VID for the given frame must be contained in
+*        the VTU, and the Ingress port must be a member of the VLAN or the
+*        frame will be discarded.
+*        In GT_CHECK mode, the VID for the given frame must be contained in
+*        the VTU or the frame will be discarded (the frame will not be
+*        discarded if the Ingress port is not a memeber of the VLAN).
+*        In GT_FALLBACK mode, Frames are not discarded if their VID's are not
+*        contained in the VTU. If the frame's VID is contained in the VTU, the
+*        frame is allowed to exit only those ports that are members of the
+*        frame's VLAN; otherwise the switch 'falls back' into Port Based VLAN
+*        mode for the frame (88E6021 Spec. section 3.5.2.1).
+*
+*        Egress Tagging for a member port of a Vlan has the following three
+*        choices:
+*        1) Unmodified,
+*        2) Untagged, and
+*        3) Tagged
+*
+*        This sample shows how to utilize 802.1Q feature in the device.
+*        For more information, please refer to 88E6021 Spec. section 3.5.2.3
+*
+* DEPENDENCIES:
+*        88E6021 and 88E6063 are supporting this feature.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include "msSample.h"
+
+
+/*****************************************************************************
+* sample802_1qSetup
+*
+* DESCRIPTION:
+*        This routine will show how to configure the switch device so that it
+*        can be a Home Gateway. This example assumes that all the frames are not
+*        VLAN-Tagged.
+*        1) to clear VLAN ID Table,
+*         2) to enable 802.1Q in SECURE mode for each port except CPU port,
+*        3) to enable 802.1Q in FALL BACK mode for the CPU port.
+*        4) to add VLAN ID 1 with member port 0 and CPU port
+*        (untagged egress),
+*        5) to add VLAN ID 2 with member the rest of the ports and CPU port
+*        (untagged egress),
+*        6) to configure the default vid of each port:
+*        Port 0 have PVID 1, CPU port has PVID 3, and the rest ports have PVID 2.
+*        Note: CPU port's PVID should be unknown VID, so that QuarterDeck can use
+*        VlanTable (header info) for TX.
+*
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*
+* COMMENTS:
+*        WARNING!!
+*        If you create just two VLAN for this setup, Trailer mode or Header mode
+*        for the CPU port has to be enabled and Ethernet driver which connects to
+*        CPU port should understand VLAN-TAGGING, Trailer mode, or Header mode.
+*
+*******************************************************************************/
+GT_STATUS sample802_1qSetup(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_DOT1Q_MODE mode;
+        GT_VTU_ENTRY vtuEntry;
+    GT_U16 vid;
+    GT_LPORT port;
+    int i;
+
+    /*
+     *    1) Clear VLAN ID Table
+    */
+    if((status = gvtuFlush(dev)) != GT_OK)
+    {
+        MSG_PRINT(("gvtuFlush returned fail.\n"));
+        return status;
+    }
+
+    /*
+     *    2) Enable 802.1Q for each port as GT_SECURE mode except CPU port.
+    */
+    mode = GT_SECURE;
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        port = i;
+        if (port == dev->cpuPortNum)
+            continue;
+
+        if((status = gvlnSetPortVlanDot1qMode(dev,port, mode)) != GT_OK)
+        {
+            MSG_PRINT(("gvlnSetPortVlanDot1qMode return Failed\n"));
+            return status;
+        }
+    }
+
+    /*
+     *    3) Enable 802.1Q for CPU port as GT_FALLBACK mode
+    */
+    if((status = gvlnSetPortVlanDot1qMode(dev, dev->cpuPortNum, GT_FALLBACK)) != GT_OK)
+    {
+        MSG_PRINT(("gvlnSetPortVlanDot1qMode return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    4) Add VLAN ID 1 with Port 0 and CPU Port as members of the Vlan.
+    */
+    gtMemSet(&vtuEntry,0,sizeof(GT_VTU_ENTRY));
+    vtuEntry.DBNum = 0;
+    vtuEntry.vid = 1;
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        port = i;
+        if((i==0) || (port == dev->cpuPortNum))
+            vtuEntry.vtuData.memberTagP[port] = MEMBER_EGRESS_UNTAGGED;
+        else
+            vtuEntry.vtuData.memberTagP[port] = NOT_A_MEMBER;
+    }
+
+    if((status = gvtuAddEntry(dev,&vtuEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gvtuAddEntry returned fail.\n"));
+        return status;
+    }
+
+    /*
+     *    5) Add VLAN ID 2 with the rest of the Ports and CPU Port as members of
+     *    the Vlan.
+    */
+    gtMemSet(&vtuEntry,0,sizeof(GT_VTU_ENTRY));
+    vtuEntry.DBNum = 0;
+    vtuEntry.vid = 2;
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        port = i;
+        if(i == 0)
+            vtuEntry.vtuData.memberTagP[port] = NOT_A_MEMBER;
+        else
+            vtuEntry.vtuData.memberTagP[port] = MEMBER_EGRESS_UNTAGGED;
+    }
+
+    if((status = gvtuAddEntry(dev,&vtuEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gvtuAddEntry returned fail.\n"));
+        return status;
+    }
+
+
+    /*
+     *    6) Configure the default vid for each port.
+     *    Port 0 has PVID 1, CPU port has PVID 3, and the rest ports have PVID 2.
+    */
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        port = i;
+        if(i==0)
+            vid = 1;
+        else if(port == dev->cpuPortNum)
+            vid = 3;
+        else
+            vid = 2;
+
+        if((status = gvlnSetPortVid(dev,port,vid)) != GT_OK)
+        {
+            MSG_PRINT(("gvlnSetPortVid returned fail.\n"));
+            return status;
+        }
+    }
+
+    return GT_OK;
+
+}
+
+
+/*****************************************************************************
+* sampleAdmitOnlyTaggedFrame
+*
+* DESCRIPTION:
+*        This routine will show how to configure a port to accept only vlan
+*        tagged frames.
+*        This routine assumes that 802.1Q has been enabled for the given port.
+*
+* INPUTS:
+*       port - logical port to be configured.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*
+* COMMENTS:
+*        Some device support Discard Untagged feature. If so, gprtSetDiscardUntagged
+*        function will do the work.
+*
+*******************************************************************************/
+GT_STATUS sampleAdmitOnlyTaggedFrame(GT_QD_DEV *dev,GT_LPORT port)
+{
+    GT_STATUS status;
+    GT_VTU_ENTRY vtuEntry;
+    int i;
+
+    /*
+     *    0) If device support gprtSetDiscardUntagged, call the function.
+    */
+    status = gprtSetDiscardUntagged(dev, port, GT_TRUE);
+    switch (status)
+    {
+        case GT_OK:
+            MSG_PRINT(("Done.\n"));
+            return status;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Try other method.\n"));
+            break;
+        default:
+            MSG_PRINT(("Failure accessing device.\n"));
+            return status;
+    }
+
+
+    /*
+     *    1) Add VLAN ID 0xFFF with the given port as a member.
+    */
+    gtMemSet(&vtuEntry,0,sizeof(GT_VTU_ENTRY));
+    vtuEntry.DBNum = 0;
+    vtuEntry.vid = 0xFFF;
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        vtuEntry.vtuData.memberTagP[i] = NOT_A_MEMBER;
+    }
+    vtuEntry.vtuData.memberTagP[port] = MEMBER_EGRESS_TAGGED;
+
+    if((status = gvtuAddEntry(dev,&vtuEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gvtuAddEntry returned fail.\n"));
+        return status;
+    }
+
+    /*
+     *    2) Configure the default vid for the given port with VID 0xFFF
+    */
+    if((status = gvlnSetPortVid(dev,port,0xFFF)) != GT_OK)
+    {
+        MSG_PRINT(("gvlnSetPortVid returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+
+}
+
+
+/*****************************************************************************
+* sampleDisplayVIDTable
+*
+* DESCRIPTION:
+*        This routine will show how to enumerate each vid entry in the VTU table
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS sampleDisplayVIDTable(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+        GT_VTU_ENTRY vtuEntry;
+    GT_LPORT port;
+    int portIndex;
+
+    gtMemSet(&vtuEntry,0,sizeof(GT_VTU_ENTRY));
+    vtuEntry.vid = 0xfff;
+    if((status = gvtuGetEntryFirst(dev,&vtuEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gvtuGetEntryCount returned fail.\n"));
+        return status;
+    }
+
+    MSG_PRINT(("DBNum:%i, VID:%i \n",vtuEntry.DBNum,vtuEntry.vid));
+
+    for(portIndex=0; portIndex<dev->numOfPorts; portIndex++)
+    {
+        port = portIndex;
+
+        MSG_PRINT(("Tag%i:%#x  ",port,vtuEntry.vtuData.memberTagP[port]));
+    }
+
+    MSG_PRINT(("\n"));
+
+    while(1)
+    {
+        if((status = gvtuGetEntryNext(dev,&vtuEntry)) != GT_OK)
+        {
+            break;
+        }
+
+        MSG_PRINT(("DBNum:%i, VID:%i \n",vtuEntry.DBNum,vtuEntry.vid));
+
+        for(portIndex=0; portIndex<dev->numOfPorts; portIndex++)
+        {
+            port = portIndex;
+
+            MSG_PRINT(("Tag%i:%#x  ",port,vtuEntry.vtuData.memberTagP[port]));
+        }
+
+        MSG_PRINT(("\n"));
+
+    }
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/readme.txt
new file mode 100644
index 000000000000..fa749fa5274f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/802.1Q/readme.txt
@@ -0,0 +1,46 @@
+========================================================================
+			802.1Q Feature
+========================================================================
+
+There are three 802.1Q modes (GT_SECURE, GT_CHECK, and GT_FALLBACK).
+In GT_SECURE mode, the VID for the given frame must be contained in
+the VTU, and the Ingress port must be a member of the VLAN or the
+frame will be discarded.
+In GT_CHECK mode, the VID for the given frame must be contained in
+the VTU or the frame will be discarded (the frame will not be
+discarded if the Ingress port is not a memeber of the VLAN).
+In GT_FALLBACK mode, Frames are not discarded if their VID's are not
+contained in the VTU. If the frame's VID is contained in the VTU, the
+frame is allowed to exit only those ports that are members of the
+frame's VLAN; otherwise the switch 'falls back' into Port Based VLAN
+mode for the frame (88E6021 Spec. section 3.5.2.1).
+
+Egress Tagging for a member port of a Vlan has the following three
+choices:
+1) Unmodified,
+2) Untagged, and
+3) Tagged
+
+This sample shows how to utilize 802.1Q feature in the device.
+For more information, please refer to 88E6021 Spec. section 3.5.2.3.
+
+802_1q.c
+	sample802_1qSetup
+		This routine will show
+		1) how to enable 802.1Q feature for each port,
+		2) how to clear VLAN ID (VTU) Table,
+		3) how to enable 802.1Q in SECURE mode for each port,
+		4) how to add VLAN ID 1 with member port 0 and CPU port
+		(unmodified egress),
+		5) how to add VLAN ID 2 with member the rest of the ports and CPU port
+		(untagged egress),
+		6) how to configure the default vid of each port:
+		Port 0 and CPU port have PVID 1 and the rest ports have PVID 2.
+
+	sampleAdmitOnlyTaggedFrame
+		This routine will show how to configure a port to accept only vlan
+		tagged frames.
+		This routine assumes that 802.1Q has been enabled for the given port.
+
+	sampleDisplayVIDTable
+		This routine will show how to enumerate each vid entry in the VTU table
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/advCableTest.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/advCableTest.c
new file mode 100644
index 000000000000..964f5b194509
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/advCableTest.c
@@ -0,0 +1,205 @@
+#include <Copyright.h>
+/********************************************************************************
+* testApi.c
+*
+* DESCRIPTION:
+*       API test functions
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+GT_STATUS advVctTest(GT_QD_DEV *dev, GT_LPORT port);
+GT_STATUS getAdvExtendedStatus(GT_QD_DEV *dev, GT_LPORT port);
+
+void displayAdvVCTResult
+(
+    GT_ADV_CABLE_STATUS *cableStatus,
+    int    channel
+)
+{
+    int i;
+
+    switch(cableStatus->cableStatus[channel])
+    {
+        case GT_ADV_VCT_FAIL:
+            MSG_PRINT(("Advanced Cable Test Failed\n"));
+            break;
+        case GT_ADV_VCT_NORMAL:
+            MSG_PRINT(("Cable Test Passed. No problem found.\n"));
+            break;
+        case GT_ADV_VCT_IMP_GREATER_THAN_115:
+            MSG_PRINT(("Cable Test Passed. Impedance is greater than 115 Ohms.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableStatus->u[channel].dist2fault));
+            break;
+        case GT_ADV_VCT_IMP_LESS_THAN_85:
+            MSG_PRINT(("Cable Test Passed. Impedance is less than 85 Ohms.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableStatus->u[channel].dist2fault));
+            break;
+        case GT_ADV_VCT_OPEN:
+            MSG_PRINT(("Cable Test Passed. Open Cable.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableStatus->u[channel].dist2fault));
+            break;
+        case GT_ADV_VCT_SHORT:
+            MSG_PRINT(("Cable Test Passed. Shorted Cable.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableStatus->u[channel].dist2fault));
+            break;
+        case GT_ADV_VCT_CROSS_PAIR_SHORT:
+            MSG_PRINT(("Cable Test Passed.\n"));
+            for(i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                if(cableStatus->u[channel].crossShort.channel[i] == GT_TRUE)
+                {
+                    MSG_PRINT(("\tCross pair short with channel %i.\n",i));
+                    MSG_PRINT(("\tApproximatly %i meters from the tested port.\n",
+                                    cableStatus->u[channel].crossShort.dist2fault[i]));
+                }
+            }
+            break;
+        default:
+            MSG_PRINT(("Unknown Test Result.\n"));
+            break;
+    }
+}
+
+/* Advanced VCT (TDR) */
+GT_STATUS advVctTest(GT_QD_DEV *dev, GT_LPORT port)
+{
+    GT_STATUS status;
+    int i, j;
+    GT_ADV_VCT_MODE mode;
+    GT_ADV_CABLE_STATUS advCableStatus;
+
+    GT_ADV_VCT_MOD mod[2] = {
+        GT_ADV_VCT_FIRST_PEAK,
+        GT_ADV_VCT_MAX_PEAK
+    };
+
+    char modeStr[2][32] = {
+        "(Adv VCT First PEAK)",
+        "(Adv VCT MAX PEAK)"
+    };
+printf("!!!! sample adv Cable Test Result for Port %i\n",port);
+
+    if (dev == 0)
+    {
+        MSG_PRINT(("GT driver is not initialized\n"));
+        return GT_FAIL;
+    }
+
+    for (j=0; j<2; j++)
+    {
+        mode.mode=mod[j];
+        mode.transChanSel=GT_ADV_VCT_TCS_NO_CROSSPAIR;
+        mode.sampleAvg = 0;
+        mode.peakDetHyst =0;
+
+        /*
+         *    Start and get Cable Test Result
+         */
+        status = GT_OK;
+printf("!!!! 1 sample adv Cable Test Result for Port %i\n",port);
+
+        if((status = gvctGetAdvCableDiag(dev,port,
+                                mode,&advCableStatus)) != GT_OK)
+        {
+            MSG_PRINT(("gvctGetAdvCableDiag return Failed\n"));
+            return status;
+        }
+
+        MSG_PRINT(("\nCable Test Result %s for Port %i\n", modeStr[j], (int)port));
+
+        for(i=0; i<GT_MDI_PAIR_NUM; i++)
+        {
+            MSG_PRINT(("MDI PAIR %i:\n",i));
+            displayAdvVCTResult(&advCableStatus, i);
+        }
+    }
+
+    return GT_OK;
+}
+
+/* Advanced DSP VCT */
+GT_STATUS getAdvExtendedStatus(GT_QD_DEV *dev, GT_LPORT port)
+{
+    GT_STATUS status;
+    GT_ADV_EXTENDED_STATUS extendedStatus;
+    int i;
+    char ch;
+
+    if (dev == 0)
+    {
+        MSG_PRINT(("GT driver is not initialized\n"));
+        return GT_FAIL;
+    }
+
+    /*
+     *     Start getting Extended Information.
+     */
+    if((status = gvctGetAdvExtendedStatus(dev,port, &extendedStatus)) != GT_OK)
+    {
+        MSG_PRINT(("gvctGetAdvExtendedStatus return Failed\n"));
+        return status;
+    }
+
+    if (!extendedStatus.isValid)
+    {
+        MSG_PRINT(("Not able to get Extended Status.\n"));
+        MSG_PRINT(("Please check if 1000B-T Link is established on Port %i.\n",(int)port));
+        return status;
+    }
+
+    /* Pair Polarity */
+    MSG_PRINT(("Pair Polarity:\n"));
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        MSG_PRINT(("MDI PAIR %i: %s\n",i,
+                    (extendedStatus.pairPolarity[i] == GT_POSITIVE)?"Positive":"Negative"));
+    }
+
+    /* Pair Swap */
+    MSG_PRINT(("Pair Swap:\n"));
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        switch(extendedStatus.pairSwap[i])
+        {
+            case GT_CHANNEL_A:
+                ch = 'A';
+                break;
+            case GT_CHANNEL_B:
+                ch = 'B';
+                break;
+            case GT_CHANNEL_C:
+                ch = 'C';
+                break;
+            case GT_CHANNEL_D:
+                ch = 'D';
+                break;
+            default:
+                MSG_PRINT(("Error: reported unknown Pair Swap %i\n",extendedStatus.pairSwap[i]));
+                ch = 'U';
+                break;
+        }
+
+        MSG_PRINT(("MDI PAIR %i: Channel %c\n",i,ch));
+    }
+
+    /* Pair Polarity */
+    MSG_PRINT(("Pair Skew:\n"));
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        MSG_PRINT(("MDI PAIR %i: %ins\n",i,(int)extendedStatus.pairSkew[i]));
+    }
+
+    /* Pair Polarity */
+    MSG_PRINT(("Cable Len:\n"));
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        MSG_PRINT(("MDI PAIR %i: approximately %im\n",i,(int)extendedStatus.cableLen[i]));
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/cableTest.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/cableTest.c
new file mode 100644
index 000000000000..8ed2efb0be2a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/cableTest.c
@@ -0,0 +1,109 @@
+#include <Copyright.h>
+/********************************************************************************
+* cableTest.c
+*
+* DESCRIPTION:
+*        This sample shows how to run Virtual Cable Test and how to use the
+*        test result.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*******************************************************************************/
+
+#include "msSample.h"
+
+void sampleDisplayCableTestResult
+(
+    GT_TEST_STATUS *cableStatus,
+    GT_CABLE_LEN *cableLen
+)
+{
+    switch(*cableStatus)
+    {
+        case GT_TEST_FAIL:
+            MSG_PRINT(("Cable Test Failed\n"));
+            break;
+        case GT_NORMAL_CABLE:
+            MSG_PRINT(("Cable Test Passed. No problem found.\n"));
+            switch(cableLen->normCableLen)
+            {
+                case GT_LESS_THAN_50M:
+                    MSG_PRINT(("Cable Length is less than 50M.\n"));
+                    break;
+                case GT_50M_80M:
+                    MSG_PRINT(("Cable Length is between 50M and 80M.\n"));
+                    break;
+                case GT_80M_110M:
+                    MSG_PRINT(("Cable Length is between 80M and 110M.\n"));
+                    break;
+                case GT_110M_140M:
+                    MSG_PRINT(("Cable Length is between 110M and 140M.\n"));
+                    break;
+                case GT_MORE_THAN_140:
+                    MSG_PRINT(("Cable Length is over 140M.\n"));
+                    break;
+                default:
+                    MSG_PRINT(("Cable Length is unknown.\n"));
+                    break;
+            }
+            break;
+        case GT_IMPEDANCE_MISMATCH:
+            MSG_PRINT(("Cable Test Passed. Cable has Impedance Mismatch .\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableLen->errCableLen));
+            break;
+        case GT_OPEN_CABLE:
+            MSG_PRINT(("Cable Test Passed. Cable is open.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableLen->errCableLen));
+            break;
+        case GT_SHORT_CABLE:
+            MSG_PRINT(("Cable Test Passed. Cable is short.\n"));
+            MSG_PRINT(("Approximatly %i meters from the tested port.\n",cableLen->errCableLen));
+            break;
+        default:
+            MSG_PRINT(("Unknown Test Result.\n"));
+            break;
+    }
+}
+
+GT_STATUS sampleCableTest(GT_QD_DEV *dev,GT_LPORT port)
+{
+    GT_STATUS status;
+    GT_CABLE_STATUS cableStatus;
+    int i;
+
+    /*
+     *    Start and get Cable Test Result
+    */
+
+    if((status = gvctGetCableDiag(dev,port, &cableStatus)) != GT_OK)
+    {
+        MSG_PRINT(("gvctGetCableDiag return Failed\n"));
+        return status;
+    }
+
+    MSG_PRINT(("Cable Test Result for Port %i\n",port));
+
+    if(cableStatus.phyType == PHY_100M)
+    {
+        MSG_PRINT(("RX PAIR :\n"));
+        sampleDisplayCableTestResult(&cableStatus.cableStatus[MDI_RX_PAIR],
+                                    &cableStatus.cableLen[MDI_RX_PAIR]);
+        MSG_PRINT(("TX PAIR :\n"));
+        sampleDisplayCableTestResult(&cableStatus.cableStatus[MDI_TX_PAIR],
+                                    &cableStatus.cableLen[MDI_TX_PAIR]);
+    }
+    else /* phyType must be PHY_1000M */
+    {
+        for(i=0; i<GT_MDI_PAIR_NUM; i++)
+        {
+            MSG_PRINT(("MDI PAIR %i:\n",i));
+            sampleDisplayCableTestResult(&cableStatus.cableStatus[i],
+                                    &cableStatus.cableLen[i]);
+        }
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/readme.txt
new file mode 100644
index 000000000000..aad81548e580
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CableTest/readme.txt
@@ -0,0 +1,13 @@
+========================================================================
+		Cable Test
+========================================================================
+
+This sample shows how to run VCT(Virtual Cable Tester).
+
+Notes:
+
+cableTest.c
+	The function, sampleCableTest, can be used to run and display the result.
+
+advCableTest.c
+	The functions, advVctTest and getAdvExtendedStatus, can be used to run advanced VCT and display the result.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/crossChipTrunk.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/crossChipTrunk.c
new file mode 100644
index 000000000000..d01db5e2a9f8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/crossChipTrunk.c
@@ -0,0 +1,347 @@
+#include <Copyright.h>
+/********************************************************************************
+* crossChipTrunk.c
+*
+* DESCRIPTION:
+*        This sample shows how to setup the Cross Chip TRUNK
+*
+* DEPENDENCIES:
+*        Please check the device's spec. if the device supports this feature.
+*        At the moment this sample was written, 88E6095 was the only device support
+*        this feature.
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*******************************************************************************/
+
+#include "msSample.h"
+
+#define N_OF_QD_DEVICES    2
+
+/* qdMultiDev is defined in sample/MultiDevice/msApiInit.c */
+extern GT_QD_DEV       *qdMultiDev[N_OF_QD_DEVICES];
+
+/*
+#ifndef TRUNK_MEMBER
+#define MAX_PORT_IN_TRUNK 4
+
+typedef struct _TRUNK_SET {
+    GT_U32    devIndex;
+    GT_U32    port;
+} TRUNK_SET;
+
+typedef struct _TRUNK_MEMBER {
+    GT_U32    trunkId;
+    GT_U32    nTrunkPort;
+    TRUNK_SET trunkSet[MAX_PORT_IN_TRUNK];
+} TRUNK_MEMBER;
+
+#endif
+*/
+
+GT_STATUS sampleCrossChipTrunk(GT_QD_DEV *dev[], TRUNK_MEMBER* tm);
+
+/*
+    Setup Trunk with the following member ports:
+        Port 0,1,2 of Device 0, and
+        Port 0 of Device 1,
+    where Device 0 is the first Switch Device Structure in qdMultiDev array
+    and Device 1 is the second Switch Device Structure in qdMultiDev array.
+*/
+GT_STATUS crossChipTrunkSetup()
+{
+    TRUNK_MEMBER tm;
+
+    tm.trunkId = 1;
+    tm.nTrunkPort = 4;
+    tm.trunkSet[0].devIndex = 0;
+    tm.trunkSet[0].port = 0;
+    tm.trunkSet[1].devIndex = 0;
+    tm.trunkSet[1].port = 1;
+    tm.trunkSet[2].devIndex = 0;
+    tm.trunkSet[2].port = 2;
+    tm.trunkSet[3].devIndex = 1;
+    tm.trunkSet[3].port = 0;
+
+    return sampleCrossChipTrunk(qdMultiDev, &tm);
+}
+
+GT_STATUS sampleCrossChipTrunk(GT_QD_DEV *dev[], TRUNK_MEMBER* tm)
+{
+    GT_STATUS status;
+    int i,j,index;
+    GT_U32 mask, trunkId;
+    TRUNK_SET* ts;
+    GT_U32 portVec[N_OF_QD_DEVICES];
+    GT_U32 casecadeVec = 0xC0;    /* Port 6 and 7. ToDo : get this value from user or device */
+
+    /*
+     *    Enable Trunk for each member of the Trunk and set the Trunk ID (1).
+    */
+
+    printf("Setting TRUNK\n");
+    printf("Trunk ID : %i\n",(unsigned int)tm->trunkId);
+    printf("N Ports  : %i\n",(unsigned int)tm->nTrunkPort);
+    printf("1st Port  : Dev %i, Port %i\n",
+            (unsigned int)tm->trunkSet[0].devIndex,(unsigned int)tm->trunkSet[0].port);
+    printf("2nd Port  : Dev %i, Port %i\n",
+            (unsigned int)tm->trunkSet[1].devIndex,(unsigned int)tm->trunkSet[1].port);
+    printf("3rd Port  : Dev %i, Port %i\n",
+            (unsigned int)tm->trunkSet[2].devIndex,(unsigned int)tm->trunkSet[2].port);
+    printf("4th Port  : Dev %i, Port %i\n",
+            (unsigned int)tm->trunkSet[3].devIndex,(unsigned int)tm->trunkSet[3].port);
+
+    trunkId = tm->trunkId;
+
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+        portVec[i] = 0;
+
+    printf("Enabling TRUNK for each member port.\n");
+    for(i=0; i<tm->nTrunkPort; i++)
+    {
+        ts = &tm->trunkSet[i];
+
+        if(ts->devIndex >= N_OF_QD_DEVICES)
+        {
+            printf("Device %i is supported. Max Device Number is %i\n",(unsigned int)ts->devIndex,N_OF_QD_DEVICES-1);
+            return GT_FAIL;
+        }
+
+        if((dev[ts->devIndex] == NULL) || (!dev[ts->devIndex]->devEnabled))
+        {
+            printf("Device %i is not initialized\n",(unsigned int)ts->devIndex);
+            return GT_FAIL;
+        }
+
+        /* enabled trunk on the given port */
+        if((status = gprtSetTrunkPort(dev[ts->devIndex],ts->port,GT_TRUE,trunkId)) != GT_OK)
+        {
+            MSG_PRINT(("gprtSetTrunkPort return Failed\n"));
+            return status;
+        }
+
+        portVec[ts->devIndex] |= (1 << ts->port);
+    }
+
+    /*
+     *    Set Trunk Route Table for the given Trunk ID.
+    */
+    printf("Setting TRUNK Routing Table\n");
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+    {
+        if((dev[i] == NULL) || (!dev[i]->devEnabled))
+        {
+            printf("Device %i is not initialized\n",i);
+            break;
+        }
+
+        if((status = gsysSetTrunkRouting(dev[i],trunkId,portVec[i]|casecadeVec)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetTrunkRouting return Failed\n"));
+            return status;
+        }
+    }
+
+    /*
+     *    Set Trunk Mask Table for load balancing.
+    */
+    printf("Setting TRUNK Mask for Load Balancing\n");
+    for(i=0; i<8; i++)
+    {
+        /* choose a port to be used for the given addr combo index */
+        index = i % tm->nTrunkPort;
+        ts = &tm->trunkSet[index];
+
+        for(j=0; j<N_OF_QD_DEVICES; j++)
+        {
+            if((dev[j] == NULL) || (!dev[j]->devEnabled))
+            {
+                printf("Device %i is not initialized\n",j);
+                continue;
+            }
+
+            if(portVec[j] == 0)
+                continue;
+
+            if((status = gsysGetTrunkMaskTable(dev[j],i,&mask)) != GT_OK)
+            {
+                MSG_PRINT(("gsysGetTrunkMaskTable return Failed\n"));
+                return status;
+            }
+
+            mask &= ~portVec[j];
+
+            if(ts->devIndex == j)
+                mask |= (1 << ts->port);
+
+            if((status = gsysSetTrunkMaskTable(dev[j],i,mask)) != GT_OK)
+            {
+                MSG_PRINT(("gsysSetTrunkMaskTable return Failed\n"));
+                return status;
+            }
+        }
+    }
+
+    return GT_OK;
+}
+
+
+
+/*
+    Assumption 1: Device ID, Cascading Port, CPU Port, and Interswitch Port are
+        already set properly. For more information, please refer to the
+        sample/MultiDevice/msApiInit.c
+
+    Assumption 2: Port 0,1,2 of Device 0 and Port 0 of Device 1 are member of a
+        trunk with Trunk ID 1.
+*/
+
+GT_STATUS sampleFixedCrossChipTrunk(GT_QD_DEV *dev[])
+{
+    GT_STATUS status;
+    int i;
+    GT_U32 mask, trunkBit, trunkId;
+
+    /*
+     *    Enable Trunk for each member of the Trunk and set the Trunk ID (1).
+    */
+
+    trunkId = 1;
+
+    if((dev[0] == NULL) || (!dev[0]->devEnabled))
+    {
+        printf("Device 0 is not initialized\n");
+        return GT_FAIL;
+    }
+    if((dev[1] == NULL) || (!dev[1]->devEnabled))
+    {
+        printf("Device 1 is not initialized\n");
+        return GT_FAIL;
+    }
+
+    /* setup for Device 0 port 0 */
+    if((status = gprtSetTrunkPort(dev[0],0,GT_TRUE,trunkId)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetTrunkPort return Failed\n"));
+        return status;
+    }
+
+    /* setup for Device 0 port 1 */
+    if((status = gprtSetTrunkPort(dev[0],1,GT_TRUE,trunkId)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetTrunkPort return Failed\n"));
+        return status;
+    }
+
+    /* setup for Device 0 port 2 */
+    if((status = gprtSetTrunkPort(dev[0],2,GT_TRUE,trunkId)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetTrunkPort return Failed\n"));
+        return status;
+    }
+
+    /* setup for Device 1 port 0 */
+    if((status = gprtSetTrunkPort(dev[1],0,GT_TRUE,trunkId)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetTrunkPort return Failed\n"));
+        return status;
+    }
+
+
+    /*
+     *    Set Trunk Route Table for the given Trunk ID.
+    */
+
+    /* setup for Device 0, trunk ID 1 : port 0,1,2, and 9 (cascading port, assumption1) */
+    if((status = gsysSetTrunkRouting(dev[0],trunkId,0x7|0x200)) != GT_OK)
+    {
+        MSG_PRINT(("gsysSetTrunkRouting return Failed\n"));
+        return status;
+    }
+
+    /* setup for Device 1, trunk ID 1 : port 0, and 8 (cascading port, assumption1) */
+    if((status = gsysSetTrunkRouting(dev[1],trunkId,0x1|0x100)) != GT_OK)
+    {
+        MSG_PRINT(("gsysSetTrunkRouting return Failed\n"));
+        return status;
+    }
+
+
+    /*
+     *    Set Trunk Mask Table for load balancing.
+    */
+
+    /*
+       Trunk Mask Table for Device 0:
+
+                        10    9    8    7    6    5    4    3    2    1    0
+       TrunkMask[0]        1    1    1    1    1    1    1    1    0    0    1
+       TrunkMask[1]        1    1    1    1    1    1    1    1    0    1    0
+       TrunkMask[2]        1    1    1    1    1    1    1    1    1    0    0
+       TrunkMask[3]        1    1    1    1    1    1    1    1    0    0    0
+       TrunkMask[4]        1    1    1    1    1    1    1    1    0    0    1
+       TrunkMask[5]        1    1    1    1    1    1    1    1    0    1    0
+       TrunkMask[6]        1    1    1    1    1    1    1    1    1    0    0
+       TrunkMask[7]        1    1    1    1    1    1    1    1    0    0    0
+
+
+       Trunk Mask Table for Device 1:
+
+                        10    9    8    7    6    5    4    3    2    1    0
+       TrunkMask[0]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[1]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[2]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[3]        1    1    1    1    1    1    1    1    1    1    1
+       TrunkMask[4]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[5]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[6]        1    1    1    1    1    1    1    1    1    1    0
+       TrunkMask[7]        1    1    1    1    1    1    1    1    1    1    1
+
+    */
+
+    /* setup for Device 0 */
+    for(i=0; i<8; i++)
+    {
+        if((i%4) == 3)
+        {
+            trunkBit = 0;
+        }
+        else
+        {
+            trunkBit = 1 << (i%4);
+        }
+
+        mask = 0x7F8 | trunkBit;
+
+        if((status = gsysSetTrunkMaskTable(dev[0],i,mask)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetTrunkMaskTable return Failed\n"));
+            return status;
+        }
+
+    }
+
+    /* setup for Device 1 */
+    for(i=0; i<8; i++)
+    {
+        if((i%4) == 3)
+        {
+            trunkBit = 1;
+        }
+        else
+        {
+            trunkBit = 0;
+        }
+
+        mask = 0x7FE | trunkBit;
+
+        if((status = gsysSetTrunkMaskTable(dev[1],i,mask)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetTrunkMaskTable return Failed\n"));
+            return status;
+        }
+
+    }
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/readme.txt
new file mode 100644
index 000000000000..97661f77df6c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/CrossChipTrunk/readme.txt
@@ -0,0 +1,12 @@
+========================================================================
+		Setup for Cross Chip Trunk
+========================================================================
+
+This sample shows how to setup the Cross Chip Trunk with the following
+assumptions.
+	Assumption 1: Device ID, Cascading Port, CPU Port, and Interswitch Port are
+		already set properly. For more information, please refer to the
+		sample/MultiDevice/msApiInit.c
+
+	Assumption 2: Port 0,1,2 of Device 0 and Port 0 of Device 1 are member of a
+		trunk with Trunk ID, 1.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/flowCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/flowCtrl.c
new file mode 100644
index 000000000000..53ed3ca3fca2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/flowCtrl.c
@@ -0,0 +1,55 @@
+#include <Copyright.h>
+/********************************************************************************
+* flowCtrl.c
+*
+* DESCRIPTION:
+*       Sample program which will show how to Enable or Disable Flow Control of
+*        the given Port of the QuaterDeck.
+*
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include "msSample.h"
+
+/*
+ *    Enable or Disable Flow Control of the given port.
+ *    Input - port : port to be programmed.
+ *            enalble : either Enable or Disable.
+*/
+GT_STATUS sampleSetFlowControl(GT_QD_DEV *dev, GT_LPORT port, GT_BOOL enable)
+{
+    GT_STATUS status;
+
+    /*
+     *    Program Phy's Pause bit in AutoNegotiation Advertisement Register.
+     */
+    if((status = gprtSetPause(dev,port,enable)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetForceFC return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Restart AutoNegotiation of the given Port's phy
+     */
+    if((status = gprtPortRestartAutoNeg(dev,port)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetForceFC return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Program Port's Flow Control.
+     */
+    if((status = gprtSetForceFc(dev,port,enable)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetForceFC return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/readme.txt
new file mode 100644
index 000000000000..1856793014a0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/FlowControl/readme.txt
@@ -0,0 +1,10 @@
+========================================================================
+		Flow Contorl Enable or Disable
+========================================================================
+
+Flow Control Setup requires multiple of DSDT API calls which can be
+examined in this sample.
+
+flowCtrl.c
+	sampleSetFlowControl can be used to enable or disable flow control
+	of the given port
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/header.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/header.c
new file mode 100644
index 000000000000..cf1afb7bcd71
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/header.c
@@ -0,0 +1,44 @@
+#include <Copyright.h>
+/********************************************************************************
+* header.c
+*
+* DESCRIPTION:
+*        This sample shows how to enable/disable CPU port's ingress and egress
+*        Header mode. For more information about Header mode, please refer to
+*        88E6063 Data Book. Header mode sould be handled by Ethernet Device/Driver
+*        as well, since 88E6063, with header mode enabled, sends out a packet with
+*        header, which cannot be recognized by regular Ethernet device/driver,
+*        and expects header for every received packet.
+*
+* DEPENDENCIES:
+*        88E6051, 88E6052, and 88E6021 are not supporting this feature.
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*        WARNING!!
+*        When Header mode for the CPU port is enabled, Ethernet Device/Driver
+*        which connects to the CPU port should understand Header Format.
+*        If Ethernet Device does not know about Header mode, then user may set
+*        the device to Promiscuous mode in order to receive packets from QD's CPU
+*        port. After that, it is Ethernet Device Driver's responsibility to handle
+*        Header properly.
+*******************************************************************************/
+
+#include "msSample.h"
+
+GT_STATUS sampleHeaderEnable(GT_QD_DEV *dev,GT_BOOL en)
+{
+    GT_STATUS status;
+
+    /*
+     *    Enable/Disable Header mode
+    */
+    if((status = gprtSetHeaderMode(dev,dev->cpuPortNum, en)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetHeaderMode return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/readme.txt
new file mode 100644
index 000000000000..9dea472fab4e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Header/readme.txt
@@ -0,0 +1,19 @@
+========================================================================
+		CPU Header Mode Enable or Disable
+========================================================================
+
+This sample shows how to enable/disable header mode for CPU port.
+For more information about header mode, please refer to 88E6063 Spec.
+section 3.5.10 and section 3.7.5.
+
+Notes:
+When Header mode for the CPU port is enabled, Ethernet Device/Driver
+which is directly connected to the CPU port should understand Header Format.
+If Ethernet Device does not know about Header mode, then user may set
+the device to Promiscuous mode in order to receive packets from switch's CPU
+port. After that, it is Ethernet Device Driver's responsibility to handle
+Header properly.
+
+header.c
+	sampleHeaderEnable can be used to enable or disable CPU port's
+	header mode
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/msSample.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/msSample.h
new file mode 100644
index 000000000000..a68c8f7d008b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/msSample.h
@@ -0,0 +1,146 @@
+#include <Copyright.h>
+/********************************************************************************
+* msSample.h
+*
+* DESCRIPTION:
+*       Types definitions for Sample program
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __pfTesth
+#define __pfTesth
+
+#ifdef _VXWORKS
+#include "vxWorks.h"
+#include "logLib.h"
+#endif
+#include "stdio.h"
+#include "stdarg.h"
+#include "stdlib.h"
+#include "time.h"
+#include "string.h"
+
+#include "msApi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef GT_U32 (*GT_API_VOID) (GT_QD_DEV*);
+typedef int (*GT_CMP_FUNC) (void*, int, int);
+
+typedef GT_STATUS (*GT_API_SET_BOOL) (GT_QD_DEV*, GT_BOOL);
+typedef GT_STATUS (*GT_API_GET_BOOL) (GT_QD_DEV*, GT_BOOL*);
+
+typedef GT_STATUS (*GT_API_MAC_ADDR) (GT_QD_DEV*, GT_ETHERADDR*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_BOOL) (GT_QD_DEV*, GT_LPORT,GT_BOOL);
+typedef GT_STATUS (*GT_API_GET_PORT_BOOL) (GT_QD_DEV*, GT_LPORT,GT_BOOL*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U16) (GT_QD_DEV*, GT_LPORT,GT_U16);
+typedef GT_STATUS (*GT_API_GET_PORT_U16) (GT_QD_DEV*, GT_LPORT,GT_U16*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U32) (GT_QD_DEV*, GT_LPORT,GT_U32);
+typedef GT_STATUS (*GT_API_GET_PORT_U32) (GT_QD_DEV*, GT_LPORT,GT_U32*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U8) (GT_QD_DEV*, GT_LPORT,GT_U8);
+typedef GT_STATUS (*GT_API_GET_PORT_U8) (GT_QD_DEV*, GT_LPORT,GT_U8*);
+
+typedef struct _TEST_API
+{
+    union
+    {
+        GT_API_SET_BOOL bool;
+        GT_API_MAC_ADDR mac;
+        GT_API_SET_PORT_BOOL port_bool;
+        GT_API_SET_PORT_U8 port_u8;
+        GT_API_SET_PORT_U16 port_u16;
+        GT_API_SET_PORT_U32 port_u32;
+    } setFunc;
+
+    union
+    {
+        GT_API_GET_BOOL bool;
+        GT_API_MAC_ADDR mac;
+        GT_API_GET_PORT_BOOL port_bool;
+        GT_API_GET_PORT_U8 port_u8;
+        GT_API_GET_PORT_U16 port_u16;
+        GT_API_GET_PORT_U32 port_u32;
+    } getFunc;
+
+}TEST_API;
+
+typedef struct _TEST_STRUCT
+{
+    char strTest[16];
+    GT_API_VOID testFunc;
+    GT_U32 testResults;
+} TEST_STRUCT;
+
+#define MSG_PRINT(x) testPrint x
+
+#define TEST_MAC_ENTRIES    32
+typedef struct _TEST_ATU_ENTRY
+{
+    GT_ATU_ENTRY atuEntry[TEST_MAC_ENTRIES];
+}TEST_ATU_ENTRY;
+
+typedef struct _ATU_ENTRY_INFO
+{
+    GT_ATU_ENTRY atuEntry;
+    GT_U16    hash;
+    GT_U16    bucket;
+} ATU_ENTRY_INFO;
+
+extern GT_SYS_CONFIG   pfTestSysCfg;
+extern ATU_ENTRY_INFO *gAtuEntry;
+extern GT_QD_DEV       *dev;
+
+GT_STATUS qdStart(int,int,int);
+GT_STATUS qdSimSetPhyInt(unsigned int portNumber, unsigned short u16Data);
+GT_STATUS qdSimSetGlobalInt(unsigned short u16Data);
+
+GT_STATUS testAll(GT_QD_DEV*);
+void testPrint(char* format, ...);
+
+extern FGT_INT_HANDLER qdIntHandler;
+
+int vtuEntryCmpFunc(void* buf, int a, int b);
+int atuEntryCmpFunc(void* buf, int a, int b);
+GT_STATUS gtSort(int list[], GT_CMP_FUNC cmpFunc, void* buf, GT_U32 len);
+GT_U16 createATUList(GT_QD_DEV *dev, TEST_ATU_ENTRY atuEntry[], GT_U16 entrySize, GT_U16 dbNumSize,
+                    GT_U16 sameMacsInEachDb, GT_U16 bSize);
+GT_STATUS testFillUpAtu(GT_QD_DEV *dev, ATU_ENTRY_INFO *atuEntry, GT_U8 atuSize,
+                    GT_U8 dbNum, GT_U16 first2Bytes, GT_ATU_UC_STATE state);
+GT_U16 runQDHash(GT_U8* eaddr, GT_U16 dbNum, int bSize, GT_U16* pHash,
+                    GT_U16* preBucket, GT_U16* posBucket);
+GT_STATUS testDisplayATUList();
+
+#undef USE_SEMAPHORE
+
+#ifdef USE_SEMAPHORE
+GT_SEM osSemCreate(GT_SEM_BEGIN_STATE state);
+GT_STATUS osSemDelete(GT_SEM smid);
+GT_STATUS osSemWait(GT_SEM smid, GT_U32 timeOut);
+GT_STATUS osSemSignal(GT_SEM smid);
+#endif
+
+GT_BOOL gtBspReadMii ( GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                      unsigned int* value);
+GT_BOOL gtBspWriteMii ( GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                       unsigned int value);
+void gtBspMiiInit();
+
+GT_BOOL qdSimRead (GT_QD_DEV* dev,unsigned int portNumber , unsigned int miiReg, unsigned int* value);
+GT_BOOL qdSimWrite (GT_QD_DEV* dev,unsigned int portNumber , unsigned int miiReg, unsigned int value);
+void qdSimInit();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif   /* __pfTesth */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/qdSimRegs.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/qdSimRegs.h
new file mode 100644
index 000000000000..dda2475acf0a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Include/qdSimRegs.h
@@ -0,0 +1,132 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtSimRegs.h
+*
+* DESCRIPTION:
+*       This file includes the declaration of the struct to hold the addresses
+*       of switch (global & per-port).
+*
+* DEPENDENCIES:
+*       QuarterDeck register MAP.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __qdSimRegsh
+#define __qdSimRegsh
+
+/* QuarterDeck Per Port Registers */
+#define QD_REG_PORT_STATUS        0x0
+#define QD_REG_SWITCH_ID        0x3
+#define QD_REG_PORT_CONTROL        0x4
+#define QD_REG_PORT_VLAN_MAP    0x6
+#define QD_REG_PVID                0x7
+#define QD_REG_RATE_CTRL        0xA
+#define QD_REG_PAV                0xB
+#define QD_REG_RXCOUNTER        0x10
+#define QD_REG_TXCOUNTER        0x11
+#define QD_REG_Q_COUNTER        0x1B
+
+/* QuarterDeck Global Registers */
+#define QD_REG_GLOBAL_STATUS    0x0
+#define QD_REG_MACADDR_01        0x1
+#define QD_REG_MACADDR_23        0x2
+#define QD_REG_MACADDR_45        0x3
+#define QD_REG_GLOBAL_CONTROL    0x4
+
+/* the following VTU entries are added for Fullsail and Clippership */
+#define QD_REG_VTU_OPERATION        0x5
+#define QD_REG_VTU_VID_REG        0x6
+#define QD_REG_VTU_DATA1_REG        0x7
+#define QD_REG_VTU_DATA2_REG        0x8
+#define QD_REG_VTU_DATA3_REG        0x9
+#define QD_REG_STATS_OPERATION        0x1D
+#define QD_REG_STATS_COUNTER3_2        0x1E
+#define QD_REG_STATS_COUNTER1_0        0x1F
+
+#define QD_REG_ATU_CONTROL        0xA
+#define QD_REG_ATU_OPERATION    0xB
+#define QD_REG_ATU_DATA_REG        0xC
+#define QD_REG_ATU_MAC_BASE        0xD
+#define QD_REG_ATU_MAC_01        0xD
+#define QD_REG_ATU_MAC_23        0xE
+#define QD_REG_ATU_MAC_45        0xF
+#define QD_REG_IP_PRI_BASE        0x10
+#define QD_REG_IP_PRI_REG0        0x10
+#define QD_REG_IP_PRI_REG1        0x11
+#define QD_REG_IP_PRI_REG2        0x12
+#define QD_REG_IP_PRI_REG3        0x13
+#define QD_REG_IP_PRI_REG4        0x14
+#define QD_REG_IP_PRI_REG5        0x15
+#define QD_REG_IP_PRI_REG6        0x16
+#define QD_REG_IP_PRI_REG7        0x17
+#define QD_REG_IEEE_PRI            0x18
+
+/* Definition for QD_REG_PORT_STATUS */
+#define QD_PORT_STATUS_DUPLEX    0x200
+
+/* Definitions for MIB Counter */
+#define GT_STATS_NO_OP            0x0
+#define GT_STATS_FLUSH_ALL        0x1
+#define GT_STATS_FLUSH_PORT        0x2
+#define GT_STATS_READ_COUNTER        0x4
+#define GT_STATS_CAPTURE_PORT        0x5
+#define GT_STATS_CAPTURE_PORT_CLEAR  0x6  /* RMU page 2 */
+
+#define QD_PHY_CONTROL_REG                0
+#define QD_PHY_AUTONEGO_AD_REG            4
+#define QD_PHY_NEXTPAGE_TX_REG            7
+#define QD_PHY_SPEC_CONTROL_REG            16
+#define QD_PHY_INT_ENABLE_REG            18
+#define QD_PHY_INT_STATUS_REG            19
+#define QD_PHY_INT_PORT_SUMMARY_REG        20
+
+/* Bit Definition for QD_PHY_CONTROL_REG */
+#define QD_PHY_RESET            0x8000
+#define QD_PHY_LOOPBACK            0x4000
+#define QD_PHY_SPEED            0x2000
+#define QD_PHY_AUTONEGO            0x1000
+#define QD_PHY_POWER            0x800
+#define QD_PHY_ISOLATE            0x400
+#define QD_PHY_RESTART_AUTONEGO        0x200
+#define QD_PHY_DUPLEX            0x100
+
+#define QD_PHY_POWER_BIT                11
+#define QD_PHY_RESTART_AUTONEGO_BIT        9
+
+/* Bit Definition for QD_PHY_AUTONEGO_AD_REG */
+#define QD_PHY_NEXTPAGE            0x8000
+#define QD_PHY_REMOTEFAULT        0x4000
+#define QD_PHY_PAUSE            0x400
+#define QD_PHY_100_FULL            0x100
+#define QD_PHY_100_HALF            0x80
+#define QD_PHY_10_FULL            0x40
+#define QD_PHY_10_HALF            0x20
+
+#define QD_PHY_MODE_AUTO_AUTO    (QD_PHY_100_FULL | QD_PHY_100_HALF | QD_PHY_10_FULL | QD_PHY_10_HALF)
+#define QD_PHY_MODE_100_AUTO    (QD_PHY_100_FULL | QD_PHY_100_HALF)
+#define QD_PHY_MODE_10_AUTO        (QD_PHY_10_FULL | QD_PHY_10_HALF)
+#define QD_PHY_MODE_AUTO_FULL    (QD_PHY_100_FULL | QD_PHY_10_FULL)
+#define QD_PHY_MODE_AUTO_HALF    (QD_PHY_100_HALF | QD_PHY_10_HALF)
+
+#define QD_PHY_MODE_100_FULL    QD_PHY_100_FULL
+#define QD_PHY_MODE_100_HALF    QD_PHY_100_HALF
+#define QD_PHY_MODE_10_FULL        QD_PHY_10_FULL
+#define QD_PHY_MODE_10_HALF        QD_PHY_10_HALF
+
+/* Bit definition for QD_PHY_INT_ENABLE_REG */
+#define QD_PHY_INT_SPEED_CHANGED        0x4000
+#define QD_PHY_INT_DUPLEX_CHANGED        0x2000
+#define QD_PHY_INT_PAGE_RECEIVED        0x1000
+#define QD_PHY_INT_AUTO_NEG_COMPLETED        0x800
+#define QD_PHY_INT_LINK_STATUS_CHANGED        0x400
+#define QD_PHY_INT_SYMBOL_ERROR            0x200
+#define QD_PHY_INT_FALSE_CARRIER        0x100
+#define QD_PHY_INT_FIFO_FLOW            0x80
+#define QD_PHY_INT_CROSSOVER_CHANGED        0x40
+#define QD_PHY_INT_POLARITY_CHANGED        0x2
+#define QD_PHY_INT_JABBER            0x1
+
+#endif /* __qdSimRegsh */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/ev96122mii.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/ev96122mii.c
new file mode 100644
index 000000000000..e7c10be6fde0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/ev96122mii.c
@@ -0,0 +1,213 @@
+#include <Copyright.h>
+/********************************************************************************
+* ev96122mii.c
+*
+* DESCRIPTION:
+*       SMI access routines for EV-96122 board
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include <msSample.h>
+
+/*
+ * For each platform, all we need is
+ * 1) Assigning functions into
+ *         fgtReadMii : to read MII registers, and
+ *         fgtWriteMii : to write MII registers.
+ *
+ * 2) Register Interrupt (Not Defined Yet.)
+*/
+
+/*
+ *  EV-96122 Specific Definition
+*/
+
+#define SMI_OP_CODE_BIT_READ                    1
+#define SMI_OP_CODE_BIT_WRITE                   0
+#define SMI_BUSY                                1<<28
+#define READ_VALID                              1<<27
+
+#ifdef FIREFOX
+#define ETHER_SMI_REG                   0x10
+#define internalRegBaseAddr 0x80008000
+#define NONE_CACHEABLE        0x00000000
+#define CACHEABLE            0x00000000
+#define SMI_RX_TIMEOUT        1000
+#else
+#define ETHER_SMI_REG                   0x080810
+#define internalRegBaseAddr 0x14000000
+#define NONE_CACHEABLE        0xa0000000
+#define CACHEABLE            0x80000000
+#define SMI_RX_TIMEOUT        10000000
+#endif
+
+typedef unsigned int              SMI_REG;
+
+#ifdef LE /* Little Endian */
+#define SHORT_SWAP(X) (X)
+#define WORD_SWAP(X) (X)
+#define LONG_SWAP(X) ((l64)(X))
+
+#else    /* Big Endian */
+#define SHORT_SWAP(X) ((X <<8 ) | (X >> 8))
+
+#define WORD_SWAP(X) (((X)&0xff)<<24)+      \
+                    (((X)&0xff00)<<8)+      \
+                    (((X)&0xff0000)>>8)+    \
+                    (((X)&0xff000000)>>24)
+
+#define LONG_SWAP(X) ( (l64) (((X)&0xffULL)<<56)+               \
+                            (((X)&0xff00ULL)<<40)+              \
+                            (((X)&0xff0000ULL)<<24)+            \
+                            (((X)&0xff000000ULL)<<8)+           \
+                            (((X)&0xff00000000ULL)>>8)+         \
+                            (((X)&0xff0000000000ULL)>>24)+      \
+                            (((X)&0xff000000000000ULL)>>40)+    \
+                            (((X)&0xff00000000000000ULL)>>56))
+
+#endif
+
+#define GT_REG_READ(offset, pData)                                          \
+*pData = ( (volatile unsigned int)*((unsigned int *)                        \
+           (NONE_CACHEABLE | internalRegBaseAddr | (offset))) );            \
+*pData = WORD_SWAP(*pData)
+
+#define GT_REG_WRITE(offset, data)                                          \
+(volatile unsigned int)*((unsigned int *)(NONE_CACHEABLE |                  \
+          internalRegBaseAddr | (offset))) = WORD_SWAP(data)
+
+typedef enum _bool{false,true} bool;
+
+/*****************************************************************************
+*
+* bool etherReadMIIReg (unsigned int portNumber , unsigned int MIIReg,
+* unsigned int* value)
+*
+* Description
+* This function will access the MII registers and will read the value of
+* the MII register , and will retrieve the value in the pointer.
+* Inputs
+* portNumber - one of the 2 possiable Ethernet ports (0-1).
+* MIIReg - the MII register offset.
+* Outputs
+* value - pointer to unsigned int which will receive the value.
+* Returns Value
+* true if success.
+* false if fail to make the assignment.
+* Error types (and exceptions if exist)
+*/
+
+GT_BOOL gtBspReadMii (GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                        unsigned int* value)
+{
+SMI_REG smiReg;
+unsigned int phyAddr;
+unsigned int timeOut = 10; /* in 100MS units */
+int i;
+
+/* first check that it is not busy */
+    GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+    if(smiReg & SMI_BUSY)
+    {
+        for(i = 0 ; i < SMI_RX_TIMEOUT ; i++);
+        do {
+            GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+            if(timeOut-- < 1 ) {
+                return false;
+            }
+        } while (smiReg & SMI_BUSY);
+    }
+/* not busy */
+
+    phyAddr = portNumber;
+
+    smiReg =  (phyAddr << 16) | (SMI_OP_CODE_BIT_READ << 26) | (MIIReg << 21) |
+         SMI_OP_CODE_BIT_READ<<26;
+
+    GT_REG_WRITE (ETHER_SMI_REG,*((unsigned int*)&smiReg));
+    timeOut = 10; /* initialize the time out var again */
+    GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+    if(!(smiReg & READ_VALID))
+        {
+            i=0;
+            while(i < SMI_RX_TIMEOUT)
+            {
+                i++;
+            }
+        {
+        }
+        do {
+            GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+            if(timeOut-- < 1 ) {
+                return false;
+            }
+        } while (!(smiReg & READ_VALID));
+     }
+    *value = (unsigned int)(smiReg & 0xffff);
+
+    return true;
+
+
+}
+
+/*****************************************************************************
+*
+* bool etherWriteMIIReg (unsigned int portNumber , unsigned int MIIReg,
+* unsigned int value)
+*
+* Description
+* This function will access the MII registers and will write the value
+* to the MII register.
+* Inputs
+* portNumber - one of the 2 possiable Ethernet ports (0-1).
+* MIIReg - the MII register offset.
+* value -the value that will be written.
+* Outputs
+* Returns Value
+* true if success.
+* false if fail to make the assignment.
+* Error types (and exceptions if exist)
+*/
+
+GT_BOOL gtBspWriteMii (GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                       unsigned int value)
+{
+SMI_REG smiReg;
+unsigned int phyAddr;
+unsigned int timeOut = 10; /* in 100MS units */
+int i;
+
+/* first check that it is not busy */
+    GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+    if(smiReg & SMI_BUSY)
+    {
+        for(i = 0 ; i < SMI_RX_TIMEOUT ; i++);
+        do {
+            GT_REG_READ (ETHER_SMI_REG,(unsigned int*)&smiReg);
+            if(timeOut-- < 1 ) {
+                return false;
+            }
+        } while (smiReg & SMI_BUSY);
+    }
+/* not busy */
+
+    phyAddr = portNumber;
+
+    smiReg = 0; /* make sure no garbage value in reserved bits */
+    smiReg = smiReg | (phyAddr << 16) | (SMI_OP_CODE_BIT_WRITE << 26) |
+             (MIIReg << 21) | (value & 0xffff);
+
+    GT_REG_WRITE (ETHER_SMI_REG,*((unsigned int*)&smiReg));
+
+    return(true);
+}
+
+
+void gtBspMiiInit(GT_QD_DEV* dev)
+{
+    return;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msApiInit.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msApiInit.c
new file mode 100644
index 000000000000..fd3a0cc6574e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msApiInit.c
@@ -0,0 +1,117 @@
+#include <Copyright.h>
+/********************************************************************************
+* msApiInit.c
+*
+* DESCRIPTION:
+*       MS API initialization routine
+*
+* DEPENDENCIES:   Platform
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+/*
+#define MULTI_ADDR_MODE
+#define MANUAL_MODE
+*/
+
+GT_SYS_CONFIG   cfg;
+GT_QD_DEV       diagDev;
+GT_QD_DEV       *dev=&diagDev;
+
+
+/*
+ *  Initialize the QuarterDeck. This should be done in BSP driver init routine.
+ *    Since BSP is not combined with QuarterDeck driver, we are doing here.
+*/
+
+GT_STATUS qdStart(int cpuPort, int useQdSim, int devId) /* devId is used for simulator only */
+{
+GT_STATUS status;
+
+    /*
+     *  Register all the required functions to QuarterDeck Driver.
+    */
+    memset((char*)&cfg,0,sizeof(GT_SYS_CONFIG));
+    memset((char*)&diagDev,0,sizeof(GT_QD_DEV));
+
+    if(useQdSim == 0) /* use EV-96122 */
+    {
+        cfg.BSPFunctions.readMii   = gtBspReadMii;
+        cfg.BSPFunctions.writeMii  = gtBspWriteMii;
+#ifdef GT_RMGMT_ACCESS
+    cfg.BSPFunctions.hwAccess  = gtBspHwAccess;
+#endif
+#ifdef USE_SEMAPHORE
+        cfg.BSPFunctions.semCreate = osSemCreate;
+        cfg.BSPFunctions.semDelete = osSemDelete;
+        cfg.BSPFunctions.semTake   = osSemWait;
+        cfg.BSPFunctions.semGive   = osSemSignal;
+#else
+        cfg.BSPFunctions.semCreate = NULL;
+        cfg.BSPFunctions.semDelete = NULL;
+        cfg.BSPFunctions.semTake   = NULL;
+        cfg.BSPFunctions.semGive   = NULL;
+#endif
+        gtBspMiiInit(dev);
+    }
+    else    /* use QuaterDeck Simulator (No QD Device Required.) */
+    {
+        cfg.BSPFunctions.readMii   = qdSimRead;
+        cfg.BSPFunctions.writeMii  = qdSimWrite;
+#ifdef USE_SEMAPHORE
+        cfg.BSPFunctions.semCreate = osSemCreate;
+        cfg.BSPFunctions.semDelete = osSemDelete;
+        cfg.BSPFunctions.semTake   = osSemWait;
+        cfg.BSPFunctions.semGive   = osSemSignal;
+#else
+        cfg.BSPFunctions.semCreate = NULL;
+        cfg.BSPFunctions.semDelete = NULL;
+        cfg.BSPFunctions.semTake   = NULL;
+        cfg.BSPFunctions.semGive   = NULL;
+#endif
+
+        qdSimInit(devId,0);
+    }
+
+    cfg.initPorts = GT_TRUE;    /* Set switch ports to Forwarding mode. If GT_FALSE, use Default Setting. */
+    cfg.cpuPortNum = cpuPort;
+#ifdef MANUAL_MODE    /* not defined. this is only for sample */
+    /* user may want to use this mode when there are two QD switchs on the same MII bus. */
+    cfg.mode.scanMode = SMI_MANUAL_MODE;    /* Use QD located at manually defined base addr */
+    cfg.mode.baseAddr = 0x10;    /* valid value in this case is either 0 or 0x10 */
+#else
+#ifdef MULTI_ADDR_MODE
+    cfg.mode.scanMode = SMI_MULTI_ADDR_MODE;    /* find a QD in indirect access mode */
+    cfg.mode.baseAddr = 1;        /* this is the phyAddr used by QD family device.
+                                Valid value are 1 ~ 31.*/
+#else
+    cfg.mode.scanMode = SMI_AUTO_SCAN_MODE;    /* Scan 0 or 0x10 base address to find the QD */
+    cfg.mode.baseAddr = 0;
+#endif
+#endif
+    if((status=qdLoadDriver(&cfg, dev)) != GT_OK)
+    {
+        MSG_PRINT(("qdLoadDriver return Failed\n"));
+        return status;
+    }
+
+    MSG_PRINT(("Device ID     : 0x%x\n",dev->deviceId));
+    MSG_PRINT(("Base Reg Addr : 0x%x\n",dev->baseRegAddr));
+    MSG_PRINT(("No of Ports   : %d\n",dev->numOfPorts));
+    MSG_PRINT(("CPU Ports     : %d\n",dev->cpuPortNum));
+
+    /*
+     *  start the QuarterDeck
+    */
+    if((status=sysEnable(dev)) != GT_OK)
+    {
+        MSG_PRINT(("sysConfig return Failed\n"));
+        return status;
+    }
+
+    MSG_PRINT(("QuarterDeck has been started.\n"));
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msSample.h b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msSample.h
new file mode 100644
index 000000000000..587c90f9a90e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/msSample.h
@@ -0,0 +1,157 @@
+#include <Copyright.h>
+/********************************************************************************
+* msSample.h
+*
+* DESCRIPTION:
+*       Types definitions for Sample program
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifndef __pfTesth
+#define __pfTesth
+
+#ifdef _VXWORKS
+#include "vxWorks.h"
+#include "logLib.h"
+#endif
+
+#ifndef __KERNEL__
+#include "stdio.h"
+#include "stdarg.h"
+#include "stdlib.h"
+#include "time.h"
+#include "string.h"
+#endif
+
+#include "msApi.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef GT_U32 (*GT_API_VOID) (GT_QD_DEV*);
+typedef int (*GT_CMP_FUNC) (void*, int, int);
+
+typedef GT_STATUS (*GT_API_SET_BOOL) (GT_QD_DEV*, GT_BOOL);
+typedef GT_STATUS (*GT_API_GET_BOOL) (GT_QD_DEV*, GT_BOOL*);
+
+typedef GT_STATUS (*GT_API_MAC_ADDR) (GT_QD_DEV*, GT_ETHERADDR*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_BOOL) (GT_QD_DEV*, GT_LPORT,GT_BOOL);
+typedef GT_STATUS (*GT_API_GET_PORT_BOOL) (GT_QD_DEV*, GT_LPORT,GT_BOOL*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U16) (GT_QD_DEV*, GT_LPORT,GT_U16);
+typedef GT_STATUS (*GT_API_GET_PORT_U16) (GT_QD_DEV*, GT_LPORT,GT_U16*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U32) (GT_QD_DEV*, GT_LPORT,GT_U32);
+typedef GT_STATUS (*GT_API_GET_PORT_U32) (GT_QD_DEV*, GT_LPORT,GT_U32*);
+
+typedef GT_STATUS (*GT_API_SET_PORT_U8) (GT_QD_DEV*, GT_LPORT,GT_U8);
+typedef GT_STATUS (*GT_API_GET_PORT_U8) (GT_QD_DEV*, GT_LPORT,GT_U8*);
+
+typedef struct _TEST_API
+{
+    union
+    {
+        GT_API_SET_BOOL bool;
+        GT_API_MAC_ADDR mac;
+        GT_API_SET_PORT_BOOL port_bool;
+        GT_API_SET_PORT_U8 port_u8;
+        GT_API_SET_PORT_U16 port_u16;
+        GT_API_SET_PORT_U32 port_u32;
+    } setFunc;
+
+    union
+    {
+        GT_API_GET_BOOL bool;
+        GT_API_MAC_ADDR mac;
+        GT_API_GET_PORT_BOOL port_bool;
+        GT_API_GET_PORT_U8 port_u8;
+        GT_API_GET_PORT_U16 port_u16;
+        GT_API_GET_PORT_U32 port_u32;
+    } getFunc;
+
+}TEST_API;
+
+typedef struct _TEST_STRUCT
+{
+    char strTest[16];
+    GT_API_VOID testFunc;
+    GT_U32 testResults;
+} TEST_STRUCT;
+
+#define MSG_PRINT(x) testPrint x
+
+#define TEST_MAC_ENTRIES    32
+typedef struct _TEST_ATU_ENTRY
+{
+    GT_ATU_ENTRY atuEntry[TEST_MAC_ENTRIES];
+}TEST_ATU_ENTRY;
+
+typedef struct _ATU_ENTRY_INFO
+{
+    GT_ATU_ENTRY atuEntry;
+    GT_U16    hash;
+    GT_U16    bucket;
+} ATU_ENTRY_INFO;
+
+extern GT_SYS_CONFIG   pfTestSysCfg;
+extern ATU_ENTRY_INFO *gAtuEntry;
+extern GT_QD_DEV       *dev;
+
+GT_STATUS qdStart(int,int,int);
+GT_STATUS qdSimSetPhyInt(unsigned int portNumber, unsigned short u16Data);
+GT_STATUS qdSimSetGlobalInt(unsigned short u16Data);
+
+GT_STATUS testAll(GT_QD_DEV*);
+
+#ifdef __KERNEL__
+#define testPrint  printk
+#define printf  printk
+#define puts  printk
+#define rand  random32
+#else
+#define  testPrint printf
+#endif
+
+extern FGT_INT_HANDLER qdIntHandler;
+
+int vtuEntryCmpFunc(void* buf, int a, int b);
+int atuEntryCmpFunc(void* buf, int a, int b);
+GT_STATUS gtSort(int list[], GT_CMP_FUNC cmpFunc, void* buf, GT_U32 len);
+GT_U16 createATUList(GT_QD_DEV *dev, TEST_ATU_ENTRY atuEntry[], GT_U16 entrySize, GT_U16 dbNumSize,
+                    GT_U16 sameMacsInEachDb, GT_U16 bSize);
+GT_STATUS testFillUpAtu(GT_QD_DEV *dev, ATU_ENTRY_INFO *atuEntry, GT_U8 atuSize,
+                    GT_U8 dbNum, GT_U16 first2Bytes, GT_ATU_UC_STATE state);
+GT_U16 runQDHash(GT_U8* eaddr, GT_U16 dbNum, int bSize, GT_U16* pHash,
+                    GT_U16* preBucket, GT_U16* posBucket);
+GT_STATUS testDisplayATUList();
+
+#undef USE_SEMAPHORE
+
+#ifdef USE_SEMAPHORE
+GT_SEM osSemCreate(GT_SEM_BEGIN_STATE state);
+GT_STATUS osSemDelete(GT_SEM smid);
+GT_STATUS osSemWait(GT_SEM smid, GT_U32 timeOut);
+GT_STATUS osSemSignal(GT_SEM smid);
+#endif
+
+GT_BOOL gtBspReadMii ( GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                      unsigned int* value);
+GT_BOOL gtBspWriteMii ( GT_QD_DEV* dev, unsigned int portNumber , unsigned int MIIReg,
+                       unsigned int value);
+void gtBspMiiInit();
+
+GT_BOOL qdSimRead (GT_QD_DEV* dev,unsigned int portNumber , unsigned int miiReg, unsigned int* value);
+GT_BOOL qdSimWrite (GT_QD_DEV* dev,unsigned int portNumber , unsigned int miiReg, unsigned int value);
+void qdSimInit();
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif   /* __pfTesth */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/osSem.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/osSem.c
new file mode 100644
index 000000000000..21f4c78b5567
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/osSem.c
@@ -0,0 +1,233 @@
+#include <Copyright.h>
+/********************************************************************************
+* osSem.c
+*
+* DESCRIPTION:
+*       Semaphore related routines
+*
+* DEPENDENCIES:
+*       OS Dependent.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#ifdef _VXWORKS
+#include "vxWorks.h"
+#include "semLib.h"
+#include "errnoLib.h"
+#include "objLib.h"
+
+int sysClkRateGet(void);
+
+#elif defined(WIN32)
+#include "windows.h"
+#include "wdm.h"
+#elif defined(LINUX)
+#include "/usr/include/semaphore.h"
+typedef    sem_t          semaphore ;
+#endif
+
+#include <msApi.h>
+
+GT_SEM osSemCreate( GT_SEM_BEGIN_STATE state);
+GT_STATUS osSemDelete(GT_SEM smid);
+GT_STATUS osSemWait(  GT_SEM smid, GT_U32 timeOut);
+GT_STATUS osSemSignal(GT_SEM smid);
+
+/*******************************************************************************
+* osSemCreate
+*
+* DESCRIPTION:
+*       Create semaphore.
+*
+* INPUTS:
+*       name   - semaphore Name
+*       init   - init value of semaphore counter
+*       count  - max counter (must be >= 1)
+*
+* OUTPUTS:
+*       smid - semaphore Id
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_SEM osSemCreate(GT_SEM_BEGIN_STATE state)
+{
+#ifdef _VXWORKS
+#if 0
+    return (GT_SEM)semBCreate(SEM_Q_FIFO, state);
+#else
+    GT_SEM semid;
+    semid =(GT_SEM)semBCreate(SEM_Q_FIFO, state);
+    return semid;
+#endif
+
+#elif defined(WIN32)
+    return (GT_SEM)CreateSemaphore(NULL, state, 1, NULL);
+#elif defined(LINUX)
+    semaphore lxSem;
+
+    sem_init(&lxSem, state, 1);
+    return lxSem;
+#else
+    return 1;
+#endif
+    return GT_OK;
+}
+
+/*******************************************************************************
+* osSemDelete
+*
+* DESCRIPTION:
+*       Delete semaphore.
+*
+* INPUTS:
+*       smid - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS osSemDelete(GT_SEM smid)
+{
+#ifdef _VXWORKS
+    STATUS rc;
+
+    rc = semDelete((SEM_ID) smid);
+    if (rc != OK)
+        return GT_FAIL;
+
+#elif defined(WIN32)
+    if (CloseHandle((HANDLE)smid) == 0)
+        return GT_FAIL;
+
+#elif defined(LINUX)
+    sem_destroy((semaphore*) smid);
+#else
+    return GT_OK;
+#endif
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* osSemWait
+*
+* DESCRIPTION:
+*       Wait on semaphore.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*       timeOut - time out in miliseconds or 0 to wait forever
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       OS_TIMEOUT - on time out
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS osSemWait(GT_SEM smid, GT_U32 timeOut)
+{
+#ifdef _VXWORKS
+    STATUS rc;
+
+    if (timeOut == 0)
+    rc = semTake ((SEM_ID) smid, WAIT_FOREVER);
+    else
+    {
+        int num, delay;
+
+        num = sysClkRateGet();
+        delay = (num * timeOut) / 1000;
+        if (delay < 1)
+            rc = semTake ((SEM_ID) smid, 1);
+        else
+            rc = semTake ((SEM_ID) smid, delay);
+    }
+
+    if (rc != OK)
+    {
+        if (errno == S_objLib_OBJ_TIMEOUT)
+            return GT_TIMEOUT;
+        else
+            return GT_FAIL;
+    }
+
+#elif defined(WIN32)
+    DWORD rc;
+
+    rc = WaitForSingleObject((HANDLE)smid, timeOut);
+
+    if (rc == WAIT_ABANDONED)
+        return GT_FAIL;
+    if (rc == WAIT_TIMEOUT)
+        return GT_TIMEOUT;
+
+#elif defined(LINUX)
+    sem_wait((semaphore*) smid) ;
+#else
+    return GT_OK;
+
+#endif
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* osSemSignal
+*
+* DESCRIPTION:
+*       Signal a semaphore.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS osSemSignal(GT_SEM smid)
+{
+#ifdef _VXWORKS
+    STATUS rc;
+    rc = semGive ((SEM_ID) smid);
+    if (rc != OK)
+        return GT_FAIL;
+
+#elif defined(WIN32)
+    if(ReleaseSemaphore((HANDLE) smid, 1, NULL) == 0)
+        return GT_FAIL;
+
+#elif defined(LINUX)
+    sem_post((semaphore*) smid) ;
+#else
+    return GT_OK;
+#endif
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/qdSim.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/qdSim.c
new file mode 100644
index 000000000000..5d7bfb3e2f7b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/qdSim.c
@@ -0,0 +1,2004 @@
+#include <Copyright.h>
+/********************************************************************************
+* qdSim.c
+*
+* DESCRIPTION:
+*       Simulate QuaterDeck Device(88E6052)'s register map. When QuareterDeck API
+*        try to read/write a bit or bits into QuaterDeck, the simulator will redirect to
+*         its own memory place and performing the function very close to QuaterDeck.
+*        For example,
+*        1) user can set/reset a certain bit of QuarterDeck registers(Phy,Port,and General registers).
+*        2) user can access ATU (flush, load, purge, etc. with max MAC addresses of 32)
+*        3) user can manually generate an Interrupt and test the Interrupt routine.
+*        4) when user read a register, it will clear a certain register if it's a Self Clear register.
+*        5) when user write a register, it will return ERROR if it's read only register.
+*
+*
+* DEPENDENCIES:   QuaterDeck (88E6052) Register MAP.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include <msApi.h>
+#include <qdSimRegs.h>
+
+#define IS_BROADCAST_ADDR(_addr)                                \
+            (((_addr)[0] == 0xFF) && ((_addr)[1] == 0xFF) &&    \
+             ((_addr)[2] == 0xFF) && ((_addr)[3] == 0xFF) &&    \
+             ((_addr)[4] == 0xFF) && ((_addr)[5] == 0xFF))
+
+#define IS_GLOBAL_REG(_port)    ((int)(_port) == qdSimDev.qdSimGlobalRegBase)
+#define IS_PORT_REG(_port) (((int)(_port) >= qdSimDev.qdSimPortBase) && ((int)(_port) < qdSimDev.qdSimPortBase + qdSimDev.qdSimNumOfPorts))
+#define IS_PHY_REG(_port) (((int)(_port) >= qdSimDev.qdSimPhyBase) && ((int)(_port) < qdSimDev.qdSimPhyBase + qdSimDev.qdSimNumOfPhys))
+
+typedef struct _QD_SIM_DEV
+{
+    int qdSimUsed;
+    unsigned int qdSimDevId;
+    int qdSimNumOfPorts;
+    int qdSimPortBase;
+    int qdSimNumOfPhys;
+    int qdSimPhyBase;
+    int qdSimGlobalRegBase;
+    int qdSimPortStatsClear[10];
+    int qdSimStatsCapturedPort;
+    int vtuSize;
+    int atuSize;
+} QD_SIM_DEV;
+
+static QD_SIM_DEV qdSimDev = {0};
+
+void qdSimRegsInit();
+GT_BOOL qdSimRead (GT_QD_DEV *dev, unsigned int portNumber , unsigned int miiReg, unsigned int* value);
+GT_BOOL qdSimWrite(GT_QD_DEV *dev, unsigned int portNumber , unsigned int miiReg, unsigned int value);
+
+/*
+ *    This Array will simulate the QuarterDeck Registers.
+ *    To use it, qdSimRegs has to be initialized with its default values and
+ *    Call qdSimRead and qdSimWrite functions.
+*/
+#define MAX_SMI_ADDRESS        0x20
+#define MAX_REG_ADDRESS        0x20
+#define MAX_ATU_ADDRESS        0x800
+#define MAX_QD_VTU_ENTRIES    0x40
+
+GT_U16 qdSimRegs[MAX_SMI_ADDRESS][MAX_REG_ADDRESS];
+
+typedef struct _QDSIM_ATU_ENTRY
+{
+    GT_U16 atuData;
+    GT_U16 DBNum;
+    GT_U8 atuMac[6];
+} QDSIM_ATU_ENTRY;
+
+/*
+    Since QuarterDeck Simulator supports only fixed size of atu entry,
+    we are going with array list not dynamic linked list.
+*/
+typedef struct _QDSIM_ATU_NODE
+{
+    QDSIM_ATU_ENTRY atuEntry;
+    GT_U32 nextEntry;
+} QDSIM_ATU_NODE;
+
+typedef struct _QDSIM_ATU_LIST
+{
+    int atuSize;
+    GT_U32 head;
+} QDSIM_ATU_LIST;
+
+QDSIM_ATU_NODE ATUNode[MAX_ATU_ADDRESS];
+QDSIM_ATU_LIST ATUList;
+
+typedef struct _QDSIM_VTU_ENTRY
+{
+    GT_U16 DBNum;
+    GT_U16 memberTag[10];
+    GT_U16 vid;
+} QDSIM_VTU_ENTRY;
+
+/*
+    Since QuarterDeck Simulator supports only fixed size of atu entry,
+    we are going with array list not dynamic linked list.
+*/
+typedef struct _QDSIM_VTU_NODE
+{
+    QDSIM_VTU_ENTRY vtuEntry;
+    GT_U32 nextEntry;
+} QDSIM_VTU_NODE;
+
+typedef struct _QDSIM_VTU_LIST
+{
+    int vtuSize;
+    GT_U32 head;
+} QDSIM_VTU_LIST;
+
+QDSIM_VTU_NODE VTUNode[MAX_QD_VTU_ENTRIES];
+QDSIM_VTU_LIST VTUList;
+
+/*******************************************************************************
+* qdMemSet
+*
+* DESCRIPTION:
+*       Set a block of memory
+*
+* INPUTS:
+*       start  - start address of memory block for setting
+*       simbol - character to store, converted to an unsigned char
+*       size   - size of block to be set
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to set memory block
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * qdMemSet
+(
+    IN void * start,
+    IN int    symbol,
+    IN GT_U32 size
+)
+{
+    GT_U32 i;
+    char* buf;
+
+    buf = (char*)start;
+
+    for(i=0; i<size; i++)
+    {
+        *buf++ = (char)symbol;
+    }
+
+    return start;
+}
+
+/*******************************************************************************
+* qdMemCpy
+*
+* DESCRIPTION:
+*       Copies 'size' characters from the object pointed to by 'source' into
+*       the object pointed to by 'destination'. If copying takes place between
+*       objects that overlap, the behavior is undefined.
+*
+* INPUTS:
+*       destination - destination of copy
+*       source      - source of copy
+*       size        - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to destination
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * qdMemCpy
+(
+    IN void *       destination,
+    IN const void * source,
+    IN GT_U32       size
+)
+{
+    GT_U32 i;
+    char* buf;
+    char* src;
+
+    buf = (char*)destination;
+    src = (char*)source;
+
+    for(i=0; i<size; i++)
+    {
+        *buf++ = *src++;
+    }
+
+    return destination;
+}
+
+/*******************************************************************************
+* qdMemCmp
+*
+* DESCRIPTION:
+*       Compares given memories.
+*
+* INPUTS:
+*       src1 - source 1
+*       src2 - source 2
+*       size - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       0, if equal.
+*        negative number, if src1 < src2.
+*        positive number, if src1 > src2.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+int qdMemCmp
+(
+    IN char src1[],
+    IN char src2[],
+    IN GT_U32 size
+)
+{
+    GT_U32 i;
+    int value;
+
+    for(i=0; i<size; i++)
+    {
+        if((value = (int)(src1[i] - src2[i])) != 0)
+            return value;
+    }
+
+    return 0;
+}
+
+/*
+    Compare the given ethernet addresses.
+    0, if they are equal.
+    Negative int, if mac2 is bigger than mac1.
+    Positive int, if mac1 is bigger than mac2.
+*/
+int cmpEtherMac(unsigned char* mac1, unsigned char* mac2)
+{
+    int i, tmp;
+
+    for(i=0; i<6; i++)
+    {
+        if((tmp = mac1[i] - mac2[i]) != 0)
+            return tmp;
+    }
+    return 0;
+}
+
+/*
+    entry index, if found.
+    MAX_ATU_ADDRESS, otherwise.
+*/
+int qdSimATUFindNext(QDSIM_ATU_ENTRY* entry)
+{
+    int i;
+    int node = ATUList.head;
+
+    if (IS_BROADCAST_ADDR(entry->atuMac))
+    {
+        if(ATUList.atuSize != 0)
+        {
+            if (ATUNode[node].atuEntry.DBNum == entry->DBNum)
+                return node;
+            else
+            {
+                for(i=0; i<ATUList.atuSize; i++)
+                {
+                    if(ATUNode[node].atuEntry.DBNum == entry->DBNum)
+                        return node;
+                    node = ATUNode[node].nextEntry;
+                }
+            }
+
+        }
+        return MAX_ATU_ADDRESS;
+    }
+
+    for(i=0; i<ATUList.atuSize; i++)
+    {
+        if(cmpEtherMac(ATUNode[node].atuEntry.atuMac,entry->atuMac) > 0)
+        {
+            if(ATUNode[node].atuEntry.DBNum == entry->DBNum)
+                break;
+        }
+        node = ATUNode[node].nextEntry;
+    }
+
+    if (i == ATUList.atuSize)
+        return MAX_ATU_ADDRESS;
+
+    return node;
+}
+
+/*
+    Return 1, if added successfully.
+    Return 0, otherwise.
+*/
+GT_BOOL qdSimATUAdd(QDSIM_ATU_ENTRY* entry)
+{
+    int i, freeNode, preNode, node;
+
+    preNode = node = ATUList.head;
+
+    if (ATUList.atuSize >= MAX_ATU_ADDRESS)
+        return GT_FALSE;
+
+    /* find a free entry from our global memory. */
+    for(i=0; i<MAX_ATU_ADDRESS; i++)
+    {
+        if(ATUNode[i].nextEntry == MAX_ATU_ADDRESS)
+            break;
+    }
+
+    if (i==MAX_ATU_ADDRESS)
+    {
+        return GT_FALSE;
+    }
+
+    freeNode = i;
+
+    /* find the smallest entry which is bigger than the given entry */
+    for(i=0; i<ATUList.atuSize; i++)
+    {
+        if(cmpEtherMac(ATUNode[node].atuEntry.atuMac,entry->atuMac) >= 0)
+            break;
+        preNode = node;
+        node = ATUNode[node].nextEntry;
+    }
+
+    /* if the same Mac address is in the list and dbnum is identical, then just update and return. */
+    if (i != ATUList.atuSize)
+        if(cmpEtherMac(ATUNode[node].atuEntry.atuMac,entry->atuMac) == 0)
+        {
+            if(ATUNode[node].atuEntry.DBNum == entry->DBNum)
+            {
+                ATUNode[node].atuEntry.atuData = entry->atuData;
+                return GT_TRUE;
+            }
+        }
+
+    qdMemCpy(ATUNode[freeNode].atuEntry.atuMac, entry->atuMac, 6);
+    ATUNode[freeNode].atuEntry.atuData = entry->atuData;
+    ATUNode[freeNode].atuEntry.DBNum = entry->DBNum;
+
+    /* Add it to head */
+    if (i == 0)
+    {
+        ATUNode[freeNode].nextEntry = ATUList.head;
+        ATUList.head = freeNode;
+    }
+    /* Add it to tail */
+    else if (i == ATUList.atuSize)
+    {
+        ATUNode[preNode].nextEntry = freeNode;
+        ATUNode[freeNode].nextEntry = ATUList.head;
+    }
+    /* Add it in the middle of the list */
+    else
+    {
+        ATUNode[freeNode].nextEntry = ATUNode[preNode].nextEntry;
+        ATUNode[preNode].nextEntry = freeNode;
+    }
+    ATUList.atuSize++;
+    return GT_TRUE;
+}
+
+
+/*
+    Return 1, if added successfully.
+    Return 0, otherwise.
+*/
+GT_BOOL qdSimATUDel(QDSIM_ATU_ENTRY* entry)
+{
+    int i, preNode, node;
+
+    preNode = node = ATUList.head;
+
+    /* find the entry */
+    for(i=0; i<ATUList.atuSize; i++)
+    {
+        if(cmpEtherMac(ATUNode[node].atuEntry.atuMac,entry->atuMac) == 0)
+        {
+            if(ATUNode[node].atuEntry.DBNum == entry->DBNum)
+                break;
+        }
+        preNode = node;
+        node = ATUNode[node].nextEntry;
+    }
+
+    if (i == ATUList.atuSize)
+    {
+        /* cannot find the given entry to be deleted. */
+        return GT_FALSE;
+    }
+
+    /* Delete it from head */
+    if (i == 0)
+    {
+        ATUList.head = ATUNode[node].nextEntry;
+    }
+    /* Delete it in the middle of the list */
+    else if (i != ATUList.atuSize-1)
+    {
+        ATUNode[preNode].nextEntry = ATUNode[node].nextEntry;
+    }
+    ATUList.atuSize--;
+    ATUNode[node].nextEntry = MAX_ATU_ADDRESS;
+
+    return GT_TRUE;
+}
+
+
+GT_BOOL qdSimATUFlushUnlockedEntry()
+{
+    int i;
+
+    for (i=0; i<MAX_ATU_ADDRESS; i++)
+    {
+        if(((ATUNode[i].atuEntry.atuData & 0xF) != 0xF)    &&
+            (!(ATUNode[i].atuEntry.atuMac[0] & 1))         &&
+            (ATUNode[i].nextEntry != MAX_ATU_ADDRESS))
+        {
+            qdSimATUDel(&ATUNode[i].atuEntry);
+        }
+    }
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimATUFlushInDB(int dbNum)
+{
+    int i;
+
+    for (i=0; i<MAX_ATU_ADDRESS; i++)
+    {
+        if(ATUNode[i].atuEntry.DBNum != dbNum)
+            continue;
+        qdSimATUDel(&ATUNode[i].atuEntry);
+    }
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimATUFlushUnlockedInDB(int dbNum)
+{
+    int i;
+
+    for (i=0; i<MAX_ATU_ADDRESS; i++)
+    {
+        if(ATUNode[i].atuEntry.DBNum != dbNum)
+            continue;
+
+        if(((ATUNode[i].atuEntry.atuData & 0xF) != 0xF)    &&
+            (!(ATUNode[i].atuEntry.atuMac[0] & 1))         &&
+            (ATUNode[i].nextEntry != MAX_ATU_ADDRESS))
+        {
+            qdSimATUDel(&ATUNode[i].atuEntry);
+        }
+    }
+    return GT_TRUE;
+}
+
+
+void qdSimATUInit()
+{
+    int i;
+
+    qdMemSet((char*)ATUNode, 0, sizeof(ATUNode));
+
+    /* MAX_ATU_ADDRESS means entry i is free, otherwise, it's not free */
+    for (i=0; i<MAX_ATU_ADDRESS; i++)
+        ATUNode[i].nextEntry = MAX_ATU_ADDRESS;
+
+    ATUList.atuSize = 0;
+    ATUList.head = 0;
+}
+
+void qdSimGetATUInfo(QDSIM_ATU_ENTRY* entry)
+{
+    entry->atuData = qdSimRegs[qdSimDev.qdSimGlobalRegBase][12];
+    entry->atuMac[0] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][13] >> 8) & 0xFF;
+    entry->atuMac[1] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][13] & 0xFF;
+    entry->atuMac[2] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][14] >> 8) & 0xFF;
+    entry->atuMac[3] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][14] & 0xFF;
+    entry->atuMac[4] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][15] >> 8) & 0xFF;
+    entry->atuMac[5] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][15] & 0xFF;
+    entry->DBNum = qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] & 0xF;
+    return;
+}
+
+void qdSimSetATUInfo(QDSIM_ATU_ENTRY* entry)
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][12] = entry->atuData;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][13] = (entry->atuMac[0]<<8) | entry->atuMac[1];
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][14] = (entry->atuMac[2]<<8) | entry->atuMac[3];
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][15] = (entry->atuMac[4]<<8) | entry->atuMac[5];
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] &= ~0xF;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] |= (entry->DBNum & 0xF);
+
+    return;
+}
+
+void qdSimReSetATUInfo()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] &= ~0xF;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][12] = 0;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][13] = 0xFFFF;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][14] = 0xFFFF;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][15] = 0xFFFF;
+
+    return;
+}
+
+GT_BOOL qdSimATUOperation(unsigned int value)
+{
+    QDSIM_ATU_ENTRY entry;
+    int    index;
+
+    switch((value & 0x7000) >> 12)
+    {
+        case 1:
+            /* Flush ALL */
+            qdSimATUInit();
+            break;
+        case 2:
+            /* Flush all unlocked entries */
+            return qdSimATUFlushUnlockedEntry();
+        case 3:
+            /* Load or Purge entry */
+            qdSimGetATUInfo(&entry);
+            if(entry.atuData & 0xF)
+                return qdSimATUAdd(&entry);
+            else
+                return qdSimATUDel(&entry);
+            break;
+        case 4:
+            /* Get Next Entry */
+            qdSimGetATUInfo(&entry);
+            index = qdSimATUFindNext(&entry);
+            if (index == MAX_ATU_ADDRESS)
+            {
+                qdSimReSetATUInfo();
+                return GT_TRUE;
+            }
+            else
+            {
+                qdSimSetATUInfo(&ATUNode[index].atuEntry);
+                return GT_TRUE;
+            }
+            break;
+        case 5:
+            /* Flush ALL in a DBNum */
+            return qdSimATUFlushInDB(value & 0xF);
+            break;
+        case 6:
+            /* Flush all unlocked entries */
+            return qdSimATUFlushUnlockedInDB(value & 0xF);
+        default:
+            break;
+    }
+    return GT_TRUE;
+}
+
+/*
+    VTU Related Routines
+*/
+
+/*
+    entry index, if found.
+    MAX_QD_VTU_ENTRIES, otherwise.
+*/
+int qdSimVTUFindNext(QDSIM_VTU_ENTRY* entry)
+{
+    int i;
+    int node = VTUList.head;
+
+    if (entry->vid == 0xFFF)
+    {
+        if(VTUList.vtuSize != 0)
+            return node;
+        else
+            return MAX_QD_VTU_ENTRIES;
+    }
+
+    for(i=0; i<VTUList.vtuSize; i++)
+    {
+        if(VTUNode[node].vtuEntry.vid > entry->vid)
+            break;
+        node = VTUNode[node].nextEntry;
+    }
+
+    if (i == VTUList.vtuSize)
+        return MAX_QD_VTU_ENTRIES;
+
+    return node;
+}
+
+/*
+    Return 1, if added successfully.
+    Return 0, otherwise.
+*/
+GT_BOOL qdSimVTUAdd(QDSIM_VTU_ENTRY* entry)
+{
+    int i, freeNode, preNode, node;
+
+    preNode = node = VTUList.head;
+
+    if (VTUList.vtuSize >= qdSimDev.vtuSize)
+        return GT_FALSE;
+
+    /* find a free entry from our global memory. */
+    for(i=0; i<MAX_QD_VTU_ENTRIES; i++)
+    {
+        if(VTUNode[i].nextEntry == MAX_QD_VTU_ENTRIES)
+            break;
+    }
+
+    if (i==MAX_QD_VTU_ENTRIES)
+    {
+        return GT_FALSE;
+    }
+
+    freeNode = i;
+
+    /* find the smallest entry which is bigger than the given entry */
+    for(i=0; i<VTUList.vtuSize; i++)
+    {
+        if(VTUNode[node].vtuEntry.vid >= entry->vid)
+            break;
+        preNode = node;
+        node = VTUNode[node].nextEntry;
+    }
+
+    /* if the same vid is in the list, then just update and return. */
+    if (i != VTUList.vtuSize)
+        if(VTUNode[node].vtuEntry.vid == entry->vid)
+        {
+            qdMemCpy(&VTUNode[node].vtuEntry, entry, sizeof(QDSIM_VTU_ENTRY));
+            return GT_TRUE;
+        }
+
+    qdMemCpy(&VTUNode[freeNode].vtuEntry, entry, sizeof(QDSIM_VTU_ENTRY));
+
+    /* Add it to head */
+    if (i == 0)
+    {
+        VTUNode[freeNode].nextEntry = VTUList.head;
+        VTUList.head = freeNode;
+    }
+    /* Add it to tail */
+    else if (i == VTUList.vtuSize)
+    {
+        VTUNode[preNode].nextEntry = freeNode;
+        VTUNode[freeNode].nextEntry = VTUList.head;
+    }
+    /* Add it in the middle of the list */
+    else
+    {
+        VTUNode[freeNode].nextEntry = VTUNode[preNode].nextEntry;
+        VTUNode[preNode].nextEntry = freeNode;
+    }
+    VTUList.vtuSize++;
+    return GT_TRUE;
+}
+
+
+/*
+    Return 1, if added successfully.
+    Return 0, otherwise.
+*/
+GT_BOOL qdSimVTUDel(QDSIM_VTU_ENTRY* entry)
+{
+    int i, preNode, node;
+
+    preNode = node = VTUList.head;
+
+    /* find the entry */
+    for(i=0; i<VTUList.vtuSize; i++)
+    {
+        if(VTUNode[node].vtuEntry.vid == entry->vid)
+            break;
+        preNode = node;
+        node = VTUNode[node].nextEntry;
+    }
+
+    if (i == VTUList.vtuSize)
+    {
+        /* cannot find the given entry to be deleted. */
+        return GT_FALSE;
+    }
+
+    /* Delete it from head */
+    if (i == 0)
+    {
+        VTUList.head = VTUNode[node].nextEntry;
+    }
+    /* Delete it in the middle of the list */
+    else if (i != VTUList.vtuSize-1)
+    {
+        VTUNode[preNode].nextEntry = VTUNode[node].nextEntry;
+    }
+    VTUList.vtuSize--;
+    VTUNode[node].nextEntry = MAX_QD_VTU_ENTRIES;
+
+    return GT_TRUE;
+}
+
+
+/*
+    Return 1, if added successfully.
+    Return 0, otherwise.
+*/
+GT_BOOL qdSimVTUUpdate(QDSIM_VTU_ENTRY* entry)
+{
+    int i;
+    int node = VTUList.head;
+
+    /* find the entry */
+    for(i=0; i<VTUList.vtuSize; i++)
+    {
+        if(VTUNode[node].vtuEntry.vid == entry->vid)
+            break;
+        node = VTUNode[node].nextEntry;
+    }
+
+    if (i == VTUList.vtuSize)
+    {
+        /* cannot find the given entry to be deleted. */
+        return GT_FALSE;
+    }
+
+    /* Update the found entry */
+    qdMemCpy(&VTUNode[node].vtuEntry, entry, sizeof(QDSIM_VTU_ENTRY));
+
+    return GT_TRUE;
+}
+
+void qdSimVTUInit()
+{
+    int i;
+
+    qdMemSet((char*)VTUNode, 0, sizeof(VTUNode));
+
+    /* MAX_ATU_ADDRESS means entry i is free, otherwise, it's not free */
+    for (i=0; i<MAX_QD_VTU_ENTRIES; i++)
+        VTUNode[i].nextEntry = MAX_QD_VTU_ENTRIES;
+
+    VTUList.vtuSize = 0;
+    VTUList.head = 0;
+}
+
+void qdSimGetVTUInfo(QDSIM_VTU_ENTRY* entry)
+{
+    entry->DBNum = qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] & 0xF;
+    entry->vid = qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] & 0x1FFF;
+    entry->memberTag[0] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] & 0x3;
+    entry->memberTag[1] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] >> 4) & 0x3;
+    entry->memberTag[2] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] >> 8) & 0x3;
+    entry->memberTag[3] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] >> 12) & 0x3;
+    entry->memberTag[4] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] & 0x3;
+    entry->memberTag[5] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] >> 4) & 0x3;
+    entry->memberTag[6] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] >> 8) & 0x3;
+    entry->memberTag[7] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] >> 12) & 0x3;
+    entry->memberTag[8] = qdSimRegs[qdSimDev.qdSimGlobalRegBase][9] & 0x3;
+    entry->memberTag[9] = (qdSimRegs[qdSimDev.qdSimGlobalRegBase][9] >> 4) & 0x3;
+
+    return;
+}
+
+void qdSimSetVTUInfo(QDSIM_VTU_ENTRY* entry)
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] |= entry->DBNum;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = (entry->vid & 0xFFF) | 0x1000;
+
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] =     entry->memberTag[0] |
+                        (entry->memberTag[1] << 4) |
+                        (entry->memberTag[2] << 8) |
+                        (entry->memberTag[3] << 12);
+
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] =     entry->memberTag[4] |
+                        (entry->memberTag[5] << 4) |
+                        (entry->memberTag[6] << 8) |
+                        (entry->memberTag[7] << 12);
+
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][9] =     entry->memberTag[8] |
+                        (entry->memberTag[9] << 4);
+
+    return;
+}
+
+void qdSimReSetVTUInfo()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = 0xFFF;
+
+    return;
+}
+
+void qdSimVTUGetViolation()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] &= ~0xFFF;
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] |= 1;    /* assume port 1 causes the violation */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = 1;    /* assume vid 1 causes the violation */
+}
+
+void qdSimVTUResetBusy()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] &= ~0x8000;
+
+    return;
+}
+
+GT_BOOL qdSimVTUOperation(unsigned int value)
+{
+    QDSIM_VTU_ENTRY entry;
+    int    index;
+
+    if(!(value & 0x8000))
+        return GT_FALSE;
+
+    qdSimVTUResetBusy();
+
+    switch((value & 0x7000) >> 12)
+    {
+        case 1:
+            /* Flush ALL */
+            qdSimVTUInit();
+            break;
+        case 3:
+            /* Load or Purge entry */
+            qdSimGetVTUInfo(&entry);
+            if(entry.vid & 0x1000)
+            {
+                entry.vid &= ~0x1000;
+                return qdSimVTUAdd(&entry);
+            }
+            else
+                return qdSimVTUDel(&entry);
+            break;
+        case 4:
+            /* Get Next Entry */
+            qdSimGetVTUInfo(&entry);
+            entry.vid &= ~0x1000;
+            index = qdSimVTUFindNext(&entry);
+            if (index == MAX_QD_VTU_ENTRIES)
+            {
+                qdSimReSetVTUInfo();
+                return GT_TRUE;
+            }
+            else
+            {
+                qdSimSetVTUInfo(&VTUNode[index].vtuEntry);
+                return GT_TRUE;
+            }
+            break;
+        case 7:
+            qdSimVTUGetViolation();
+            break;
+        default:
+            break;
+    }
+    return GT_TRUE;
+}
+
+void qdSimStatsInit()
+{
+    int i;
+
+    for(i=0; i<qdSimDev.qdSimNumOfPorts; i++)
+        qdSimDev.qdSimPortStatsClear[i] = 0;
+
+}
+
+GT_BOOL qdSimStatsOperation(unsigned int value)
+{
+    int    i;
+
+    if(!(value & 0x8000))
+        return GT_FALSE;
+
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][29] &= ~0x8000;
+
+    switch((value & 0x7000) >> 12)
+    {
+        case 1:
+            /* Flush ALL */
+            for(i=0; i<qdSimDev.qdSimNumOfPorts; i++)
+                qdSimDev.qdSimPortStatsClear[i] = 1;
+            break;
+        case 2:
+            /* Flush a port */
+            if ((value & 0x3F) >= (unsigned int)qdSimDev.qdSimNumOfPorts)
+                return GT_FALSE;
+            qdSimDev.qdSimPortStatsClear[value & 0x3F] = 1;
+            break;
+        case 4:
+            /* Read a counter */
+            if(qdSimDev.qdSimPortStatsClear[qdSimDev.qdSimStatsCapturedPort] == 1)
+            {
+                qdSimRegs[qdSimDev.qdSimGlobalRegBase][30] = 0;
+                qdSimRegs[qdSimDev.qdSimGlobalRegBase][31] = 0;
+            }
+            else
+            {
+                qdSimRegs[qdSimDev.qdSimGlobalRegBase][30] = qdSimDev.qdSimStatsCapturedPort;
+                qdSimRegs[qdSimDev.qdSimGlobalRegBase][31] = value & 0x3F;
+            }
+            break;
+        case 5:
+            if ((value & 0x3F) >= (unsigned int)qdSimDev.qdSimNumOfPorts)
+                return GT_FALSE;
+            qdSimDev.qdSimStatsCapturedPort = value & 0x3F;
+            break;
+        default:
+            return GT_FALSE;
+    }
+    return GT_TRUE;
+}
+
+#define QD_PHY_CONTROL_RW (QD_PHY_RESET|QD_PHY_LOOPBACK|QD_PHY_SPEED|QD_PHY_AUTONEGO|QD_PHY_POWER|QD_PHY_RESTART_AUTONEGO|QD_PHY_DUPLEX)
+#define QD_PHY_CONTROL_RO (~QD_PHY_CONTROL_RW)
+
+GT_BOOL qdSimPhyControl(unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+
+    /* reset all the Read Only bits. */
+    value &= QD_PHY_CONTROL_RW;
+
+    /* If powerDown is set, add Reset and Restart Auto bits. */
+    if(value & QD_PHY_POWER)
+    {
+        value |= (QD_PHY_RESET|QD_PHY_RESTART_AUTONEGO);
+        qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+        return GT_TRUE;
+    }
+
+    /* If Power Down was set, clear Reset and Restart Auto bits. */
+    if(qdSimRegs[portNumber][miiReg] & QD_PHY_POWER)
+    {
+        value &= ~(QD_PHY_RESET|QD_PHY_RESTART_AUTONEGO);
+        qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+        return GT_TRUE;
+    }
+
+    /* If Reset or Restart Auto set, replace with current value and clear Reset/Restart Auto. */
+    if (value & (QD_PHY_RESET|QD_PHY_RESTART_AUTONEGO))
+    {
+        value &= ~(QD_PHY_RESET|QD_PHY_RESTART_AUTONEGO);
+        qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+        return GT_TRUE;
+    }
+    else
+    {
+        value &= ~(QD_PHY_SPEED|QD_PHY_AUTONEGO|QD_PHY_DUPLEX);
+        qdSimRegs[portNumber][miiReg] &= (QD_PHY_SPEED|QD_PHY_AUTONEGO|QD_PHY_DUPLEX);
+        qdSimRegs[portNumber][miiReg] |= (GT_U16)value;
+        return GT_TRUE;
+    }
+
+    return GT_TRUE;
+}
+
+void qdSimRegsInit_6021()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] = 0;    /* VTU Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = 0;    /* VTU VID Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][29] = 0;    /* Stats Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][30] = 0;    /* Stats Counter Register Bytes 3,2 */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][31] = 0;    /* Stats Counter Register Bytes 1,0 */
+}
+
+void qdSimRegsInit_6063()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] = 0;    /* VTU Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = 0;    /* VTU VID Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][29] = 0;    /* Stats Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][30] = 0;    /* Stats Counter Register Bytes 3,2 */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][31] = 0;    /* Stats Counter Register Bytes 1,0 */
+}
+
+void qdSimRegsInit_6083()
+{
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] = 0;    /* VTU Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][6] = 0;    /* VTU VID Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][7] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][8] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][9] = 0;    /* VTU Data Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][29] = 0;    /* Stats Operation Register */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][30] = 0;    /* Stats Counter Register Bytes 3,2 */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][31] = 0;    /* Stats Counter Register Bytes 1,0 */
+}
+
+void qdSimRegsInit()
+{
+    int i;
+
+    qdMemSet(qdSimRegs, 0xff, sizeof(qdSimRegs));
+
+    /*
+        PHY Registers Setup
+    */
+    for(i=0; i<qdSimDev.qdSimNumOfPhys; i++)
+    {
+        qdSimRegs[i][0] = 0x3100;    /* PHY Control */
+        qdSimRegs[i][1] = 0x7849;    /* PHY Status */
+        qdSimRegs[i][2] = 0x0141;    /* PHY Id 1 */
+        qdSimRegs[i][3] = 0x0c1f;    /* PHY Id 2 */
+        qdSimRegs[i][4] = 0x01e1;    /* AutoNego Ad */
+        qdSimRegs[i][5] = 0;        /* Partner Ability */
+        qdSimRegs[i][6] = 4;        /* AutoNego Expansion */
+        qdSimRegs[i][7] = 0x2001;    /* Next Page Transmit */
+        qdSimRegs[i][8] = 0;        /* Link Partner Next Page */
+        qdSimRegs[i][16] = 0x130;    /* Phy Specific Control */
+        qdSimRegs[i][17] = 0x40;    /* Phy Specific Status */
+        qdSimRegs[i][18] = 0;        /* Phy Interrupt Enable */
+        qdSimRegs[i][19] = 0x40;    /* Phy Interrupt Status */
+        qdSimRegs[i][20] = 0;        /* Interrupt Port Summary */
+        qdSimRegs[i][21] = 0;        /* Receive Error Counter */
+        qdSimRegs[i][22] = 0xa34;    /* LED Parallel Select */
+        qdSimRegs[i][23] = 0x3fc;    /* LED Stream Select */
+        qdSimRegs[i][24] = 0x42bf;    /* LED Control */
+    }
+
+    /*
+        Port Registers Setup
+    */
+    for(i=qdSimDev.qdSimPortBase; i<qdSimDev.qdSimNumOfPorts+qdSimDev.qdSimPortBase; i++)
+    {
+        qdSimRegs[i][0] = 0x800;    /* Port Status */
+        qdSimRegs[i][3] = (GT_U16)qdSimDev.qdSimDevId << 4;    /* Switch ID */
+        qdSimRegs[i][4] = 0x7f;    /* Port Control */
+        qdSimRegs[i][6] = 0x7f & (~(1 << (i-8)));    /* Port Based Vlan Map */
+        qdSimRegs[i][7] = 1;        /* Default Port Vlan ID & Priority */
+        qdSimRegs[i][16] = 0;        /* Rx Frame Counter */
+        qdSimRegs[i][17] = 0;        /* Tx Frame Counter */
+    }
+
+    /*
+        Global Registers Setup
+    */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][0] = 0x3c01;    /* Global Status */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][1] = 0;        /* Switch Mac Addr 0 ~ 1 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][2] = 0;        /* Switch Mac Addr 2 ~ 3 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][3] = 0;        /* Switch Mac Addr 4 ~ 5 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][4] = 0x81;    /* Global Control */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][10] = 0x1130;        /* ATU Control */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] = 0;                /* ATU Operation */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][12] = 0;                /* ATU Data */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][13] = 0;                /* ATU Mac Addr 0 ~ 1 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][14] = 0;                /* ATU Mac Addr 2 ~ 3 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][15] = 0;                /* ATU Mac Addr 4 ~ 5 byte */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][16] = 0;            /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][17] = 0;            /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][18] = 0x5555;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][19] = 0x5555;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][20] = 0xaaaa;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][21] = 0xaaaa;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][22] = 0xffff;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][23] = 0xffff;    /* IP-PRI Mapping */
+    qdSimRegs[qdSimDev.qdSimGlobalRegBase][24] = 0xfa41;    /* IEEE-PRI Mapping */
+
+    switch(qdSimDev.qdSimDevId)
+    {
+        case GT_88E6021:
+            qdSimRegsInit_6021();
+            break;
+        case GT_88E6063:
+        case GT_FF_HG:
+        case GT_FF_EG:
+        case GT_FH_VPN:
+            qdSimRegsInit_6063();
+            break;
+        case GT_88E6083:
+            qdSimRegsInit_6083();
+            break;
+        default:
+            break;
+    }
+}
+
+GT_BOOL qdSimRead_6052(unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    *value = (unsigned int) qdSimRegs[portNumber][miiReg];
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    qdSimRegs[portNumber][miiReg] &= ~0xF;
+                    if(qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG])
+                        qdSimRegs[portNumber][miiReg] |= 0x2;
+
+                    break;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+            case QD_REG_GLOBAL_CONTROL:
+            case QD_REG_ATU_CONTROL:
+            case QD_REG_ATU_OPERATION:
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+                    break;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+            case QD_REG_SWITCH_ID:
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+                    break;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_ENABLE_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+                    qdSimRegs[portNumber][miiReg] = 0;
+                    qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] &= ~(1<<portNumber);
+                    break;
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    *value = (unsigned int) qdSimRegs[0][miiReg];
+                    break;
+        }
+    }
+
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimRead_6021(unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    *value = (unsigned int) qdSimRegs[portNumber][miiReg];
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    qdSimRegs[portNumber][miiReg] &= ~0x7F;
+                    if(qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG])
+                        qdSimRegs[portNumber][miiReg] |= 0x2;
+
+                    break;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+            case QD_REG_VTU_OPERATION:
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+            case QD_REG_GLOBAL_CONTROL:
+            case QD_REG_ATU_CONTROL:
+            case QD_REG_ATU_OPERATION:
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+            case QD_REG_STATS_OPERATION:
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    break;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+            case QD_REG_SWITCH_ID:
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+            case QD_REG_Q_COUNTER:
+                    break;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_ENABLE_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+                    qdSimRegs[portNumber][miiReg] = 0;
+                    qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] &= ~(1<<portNumber);
+                    break;
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    *value = (unsigned int) qdSimRegs[0][miiReg];
+                    break;
+        }
+    }
+
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimRead_6063(unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    *value = (unsigned int) qdSimRegs[portNumber][miiReg];
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    qdSimRegs[portNumber][miiReg] &= ~0x7F;
+                    if(qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG])
+                        qdSimRegs[portNumber][miiReg] |= 0x2;
+
+                    break;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+            case QD_REG_VTU_OPERATION:
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+            case QD_REG_GLOBAL_CONTROL:
+            case QD_REG_ATU_CONTROL:
+            case QD_REG_ATU_OPERATION:
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+            case QD_REG_STATS_OPERATION:
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    break;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+            case QD_REG_SWITCH_ID:
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+            case QD_REG_Q_COUNTER:
+                    break;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_ENABLE_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+                    qdSimRegs[portNumber][miiReg] = 0;
+                    qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] &= ~(1<<portNumber);
+                    break;
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    *value = (unsigned int) qdSimRegs[0][miiReg];
+                    break;
+        }
+    }
+
+    return GT_TRUE;
+}
+
+
+GT_BOOL qdSimRead_6083(unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    *value = (unsigned int) qdSimRegs[portNumber][miiReg];
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    qdSimRegs[portNumber][miiReg] &= ~0x7F;
+                    if(qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG])
+                        qdSimRegs[portNumber][miiReg] |= 0x2;
+
+                    break;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+            case QD_REG_VTU_OPERATION:
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+            case QD_REG_GLOBAL_CONTROL:
+            case QD_REG_ATU_CONTROL:
+            case QD_REG_ATU_OPERATION:
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+            case QD_REG_STATS_OPERATION:
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    break;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+            case QD_REG_SWITCH_ID:
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+            case QD_REG_Q_COUNTER:
+                    break;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_ENABLE_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+                    qdSimRegs[portNumber][miiReg] = 0;
+                    qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] &= ~(1<<portNumber);
+                    break;
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    *value = (unsigned int) qdSimRegs[0][miiReg];
+                    break;
+        }
+    }
+
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimRead (GT_QD_DEV *dev,unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    if (portNumber >= MAX_SMI_ADDRESS)
+        portNumber -= MAX_SMI_ADDRESS;
+
+    if ((portNumber >= MAX_SMI_ADDRESS) || (miiReg >= MAX_REG_ADDRESS))
+        return GT_FALSE;
+
+    switch(qdSimDev.qdSimDevId)
+    {
+        case GT_88E6051:
+        case GT_88E6052:
+            return qdSimRead_6052(portNumber, miiReg, value);
+        case GT_88E6021:
+            return qdSimRead_6021(portNumber, miiReg, value);
+        case GT_88E6063:
+        case GT_FF_HG:
+        case GT_FF_EG:
+        case GT_FH_VPN:
+            return qdSimRead_6063(portNumber, miiReg, value);
+        case GT_88E6083:
+            return qdSimRead_6083(portNumber, miiReg, value);
+        default:
+            break;
+    }
+
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimWrite_6052 (unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    GT_BOOL status;
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    /* readonly register */
+                    return GT_FALSE;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+                    break;
+            case QD_REG_GLOBAL_CONTROL:
+                    if(value & 0x200)
+                    {
+                        /* Reload EEPROM values */
+                        qdSimRegsInit();
+                        qdSimRegs[portNumber][QD_REG_GLOBAL_STATUS] |= 0x1;
+                        return GT_TRUE;
+                    }
+                    break;
+            case QD_REG_ATU_CONTROL:
+                    value &= ~0x8000;
+                    break;
+            case QD_REG_ATU_OPERATION:
+                    status = qdSimATUOperation(value);
+                    return status;
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+                    break;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+            case QD_REG_SWITCH_ID:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+                    break;
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+                    /* readonly registers */
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    return qdSimPhyControl(portNumber,miiReg,value);
+            case QD_PHY_INT_ENABLE_REG:
+            case QD_PHY_AUTONEGO_AD_REG:
+            case QD_PHY_NEXTPAGE_TX_REG:
+            case QD_PHY_SPEC_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else
+        return GT_FALSE;
+
+    qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimWrite_6021 (unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    GT_BOOL status;
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    /* readonly register */
+                    return GT_FALSE;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+                    break;
+            case QD_REG_VTU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] |= (value & 0xF);
+                    status = qdSimVTUOperation(value);
+                    return status;
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+                    break;
+            case QD_REG_GLOBAL_CONTROL:
+                    if(value & 0x200)
+                    {
+                        /* Reload EEPROM values */
+                        qdSimRegsInit();
+                        qdSimRegs[portNumber][QD_REG_GLOBAL_STATUS] |= 0x1;
+                        return GT_TRUE;
+                    }
+                    break;
+            case QD_REG_ATU_CONTROL:
+                    value &= ~0x8000;
+                    break;
+            case QD_REG_ATU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] |= (value & 0xF);
+                    status = qdSimATUOperation(value);
+                    return status;
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+                    break;
+            case QD_REG_STATS_OPERATION:
+                    status = qdSimStatsOperation(value);
+                    return status;
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+                    if(portNumber > 9)
+                    {
+                        qdSimRegs[portNumber][miiReg] &= ~QD_PORT_STATUS_DUPLEX;
+                        qdSimRegs[portNumber][miiReg] |= (value & QD_PORT_STATUS_DUPLEX);
+                        return GT_TRUE;
+                    }
+            case QD_REG_SWITCH_ID:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+                    break;
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_Q_COUNTER:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    return qdSimPhyControl(portNumber,miiReg,value);
+            case QD_PHY_INT_ENABLE_REG:
+            case QD_PHY_AUTONEGO_AD_REG:
+            case QD_PHY_NEXTPAGE_TX_REG:
+            case QD_PHY_SPEC_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else
+        return GT_FALSE;
+
+    qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimWrite_6063 (unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    GT_BOOL status;
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    /* readonly register */
+                    return GT_FALSE;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+                    break;
+            case QD_REG_VTU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] |= (value & 0xF);
+                    status = qdSimVTUOperation(value);
+                    return status;
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+                    break;
+            case QD_REG_GLOBAL_CONTROL:
+                    if(value & 0x200)
+                    {
+                        /* Reload EEPROM values */
+                        qdSimRegsInit();
+                        qdSimRegs[portNumber][QD_REG_GLOBAL_STATUS] |= 0x1;
+                        return GT_TRUE;
+                    }
+                    break;
+            case QD_REG_ATU_CONTROL:
+                    value &= ~0x8000;
+                    break;
+            case QD_REG_ATU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] |= (value & 0xF);
+                    status = qdSimATUOperation(value);
+                    return status;
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+                    break;
+            case QD_REG_STATS_OPERATION:
+                    status = qdSimStatsOperation(value);
+                    return status;
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+                    if(portNumber > 12)
+                    {
+                        qdSimRegs[portNumber][miiReg] &= ~QD_PORT_STATUS_DUPLEX;
+                        qdSimRegs[portNumber][miiReg] |= (value & QD_PORT_STATUS_DUPLEX);
+                        return GT_TRUE;
+                    }
+            case QD_REG_SWITCH_ID:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+                    break;
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_Q_COUNTER:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    return qdSimPhyControl(portNumber,miiReg,value);
+            case QD_PHY_INT_ENABLE_REG:
+            case QD_PHY_AUTONEGO_AD_REG:
+            case QD_PHY_NEXTPAGE_TX_REG:
+            case QD_PHY_SPEC_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else
+        return GT_FALSE;
+
+    qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+    return GT_TRUE;
+}
+
+GT_BOOL qdSimWrite_6083 (unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    GT_BOOL status;
+
+    if (IS_GLOBAL_REG(portNumber))    /* Global register */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_GLOBAL_STATUS:
+                    /* readonly register */
+                    return GT_FALSE;
+            case QD_REG_MACADDR_01:
+            case QD_REG_MACADDR_23:
+            case QD_REG_MACADDR_45:
+                    break;
+            case QD_REG_VTU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][5] |= (value & 0xF);
+                    status = qdSimVTUOperation(value);
+                    return status;
+            case QD_REG_VTU_VID_REG:
+            case QD_REG_VTU_DATA1_REG:
+            case QD_REG_VTU_DATA2_REG:
+            case QD_REG_VTU_DATA3_REG:
+                    break;
+            case QD_REG_GLOBAL_CONTROL:
+                    if(value & 0x200)
+                    {
+                        /* Reload EEPROM values */
+                        qdSimRegsInit();
+                        qdSimRegs[portNumber][QD_REG_GLOBAL_STATUS] |= 0x1;
+                        return GT_TRUE;
+                    }
+                    break;
+            case QD_REG_ATU_CONTROL:
+                    value &= ~0x8000;
+                    break;
+            case QD_REG_ATU_OPERATION:
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] &= ~0xF;
+                    qdSimRegs[qdSimDev.qdSimGlobalRegBase][11] |= (value & 0xF);
+                    status = qdSimATUOperation(value);
+                    return status;
+            case QD_REG_ATU_DATA_REG:
+            case QD_REG_ATU_MAC_01:
+            case QD_REG_ATU_MAC_23:
+            case QD_REG_ATU_MAC_45:
+            case QD_REG_IP_PRI_REG0:
+            case QD_REG_IP_PRI_REG1:
+            case QD_REG_IP_PRI_REG2:
+            case QD_REG_IP_PRI_REG3:
+            case QD_REG_IP_PRI_REG4:
+            case QD_REG_IP_PRI_REG5:
+            case QD_REG_IP_PRI_REG6:
+            case QD_REG_IP_PRI_REG7:
+            case QD_REG_IEEE_PRI:
+                    break;
+            case QD_REG_STATS_OPERATION:
+                    status = qdSimStatsOperation(value);
+                    return status;
+            case QD_REG_STATS_COUNTER3_2:
+            case QD_REG_STATS_COUNTER1_0:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PORT_REG(portNumber))    /* Port registers */
+    {
+        switch(miiReg)
+        {
+            case QD_REG_PORT_STATUS:
+                    if(portNumber > 12)
+                    {
+                        qdSimRegs[portNumber][miiReg] &= ~QD_PORT_STATUS_DUPLEX;
+                        qdSimRegs[portNumber][miiReg] |= (value & QD_PORT_STATUS_DUPLEX);
+                        return GT_TRUE;
+                    }
+            case QD_REG_SWITCH_ID:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_PORT_CONTROL:
+            case QD_REG_PORT_VLAN_MAP:
+            case QD_REG_PVID:
+            case QD_REG_RATE_CTRL:
+            case QD_REG_PAV:
+                    break;
+            case QD_REG_RXCOUNTER:
+            case QD_REG_TXCOUNTER:
+                    /* readonly registers */
+                    return GT_FALSE;
+            case QD_REG_Q_COUNTER:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else if(IS_PHY_REG(portNumber))    /* phy registers */
+    {
+        switch(miiReg)
+        {
+            case QD_PHY_CONTROL_REG:
+                    return qdSimPhyControl(portNumber,miiReg,value);
+            case QD_PHY_INT_ENABLE_REG:
+            case QD_PHY_AUTONEGO_AD_REG:
+            case QD_PHY_NEXTPAGE_TX_REG:
+            case QD_PHY_SPEC_CONTROL_REG:
+                    break;
+            case QD_PHY_INT_STATUS_REG:
+            case QD_PHY_INT_PORT_SUMMARY_REG:
+                    return GT_FALSE;
+            default:
+                    return GT_FALSE;
+        }
+    }
+    else
+        return GT_FALSE;
+
+    qdSimRegs[portNumber][miiReg] = (GT_U16)value;
+    return GT_TRUE;
+}
+
+
+GT_BOOL qdSimWrite (GT_QD_DEV *dev,unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    if (portNumber >= MAX_SMI_ADDRESS)
+        portNumber -= MAX_SMI_ADDRESS;
+
+    if ((portNumber >= MAX_SMI_ADDRESS) || (miiReg >= MAX_REG_ADDRESS))
+        return GT_FALSE;
+
+    switch(qdSimDev.qdSimDevId)
+    {
+        case GT_88E6051:
+        case GT_88E6052:
+            return qdSimWrite_6052(portNumber, miiReg, value);
+        case GT_88E6021:
+            return qdSimWrite_6021(portNumber, miiReg, value);
+        case GT_88E6063:
+        case GT_FF_HG:
+        case GT_FF_EG:
+        case GT_FH_VPN:
+            return qdSimWrite_6063(portNumber, miiReg, value);
+        case GT_88E6083:
+            return qdSimWrite_6083(portNumber, miiReg, value);
+
+        default:
+            break;
+    }
+
+    return GT_TRUE;
+}
+
+GT_STATUS qdSimSetPhyInt(unsigned int portNumber, unsigned short u16Data)
+{
+    if(!qdSimDev.qdSimUsed)
+        return GT_FAIL;
+
+    qdSimRegs[portNumber][QD_PHY_INT_STATUS_REG] = u16Data;
+    if(u16Data)
+        qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] |= (1<<portNumber);
+    else
+        qdSimRegs[0][QD_PHY_INT_PORT_SUMMARY_REG] &= ~(1<<portNumber);
+
+    qdSimRegs[MAX_SMI_ADDRESS-1][QD_REG_GLOBAL_STATUS] |= 0x2;
+    return GT_OK;
+}
+
+GT_STATUS qdSimSetGlobalInt(unsigned short u16Data)
+{
+    if(!qdSimDev.qdSimUsed)
+        return GT_FAIL;
+
+    qdSimRegs[MAX_SMI_ADDRESS-1][QD_REG_GLOBAL_STATUS] |= (u16Data & 0xF);
+    return GT_OK;
+}
+
+
+void qdSimInit(GT_DEVICE devId, int baseAddr)
+{
+    qdSimDev.qdSimUsed = 1;
+
+    qdSimDev.qdSimDevId = devId;
+    qdSimDev.vtuSize = 0;
+
+    qdSimDev.qdSimPhyBase = baseAddr;
+    qdSimDev.qdSimPortBase = baseAddr + 0x8;
+    qdSimDev.qdSimGlobalRegBase = baseAddr + 0xF;
+
+    switch(devId)
+    {
+        case GT_88E6021:
+            qdSimDev.vtuSize = 16;
+            qdSimDev.qdSimNumOfPhys = 2;
+            qdSimDev.qdSimNumOfPorts = 3;
+            break;
+        case GT_88E6051:
+            qdSimDev.qdSimNumOfPhys = 5;
+            qdSimDev.qdSimNumOfPorts = 6;
+            break;
+        case GT_88E6063:
+        case GT_FH_VPN:
+            qdSimDev.vtuSize = 64;
+        case GT_88E6052:
+        case GT_FF_HG:
+        case GT_FF_EG:
+            qdSimDev.qdSimNumOfPhys = 5;
+            qdSimDev.qdSimNumOfPorts = 7;
+            break;
+        case GT_88E6083:
+            qdSimDev.vtuSize = 64;
+            qdSimDev.qdSimNumOfPhys = 8;
+            qdSimDev.qdSimNumOfPorts = 10;
+            qdSimDev.qdSimPhyBase = 0;
+            qdSimDev.qdSimPortBase = 0x10;
+            qdSimDev.qdSimGlobalRegBase = 0x1b;
+            break;
+        default:
+            qdSimDev.vtuSize = 64;
+            qdSimDev.qdSimDevId = GT_88E6063;
+            qdSimDev.qdSimNumOfPhys = 5;
+            qdSimDev.qdSimNumOfPorts = 7;
+            break;
+    }
+
+    qdSimATUInit();
+    qdSimVTUInit();
+    qdSimRegsInit();
+
+    return;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/readme.txt
new file mode 100644
index 000000000000..5b7323a45993
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Initialization/readme.txt
@@ -0,0 +1,85 @@
+========================================================================
+		QuaterDeck Initialization called by BSP
+========================================================================
+
+Initialization Sample Program will show how to initialize the QuaterDeck
+Driver/Device.
+
+This Sample includes the following files:
+	- msApiInit.c
+	- qdSim.c
+	- qdSimRegs.h
+	- ev96122mii.c
+	- osSem.c
+
+msApiInit.c
+	qdStart is the main function of this Sample and does the followings:
+	1) register the required functions.(gtRegister API)
+		1.1) readMii - BSP specific MII read function
+						(provided by BSP and required by all QuarterDeck API)
+		1.2) writeMii - BSP specific MII write function
+						(provided by BSP and required by all QuarterDeck API)
+		1.3) semCreate - OS specific semaphore create function.
+						(provided by BSP and recommanded by QuarterDeck MAC
+						address database API)
+		1.4) semDelete - OS specific semaphore delete function.
+						(provided by BSP and recommanded by QuarterDeck MAC
+						address database API)
+		1.5) semTake - OS specific semaphore take function.
+						(provided by BSP and recommanded by QuarterDeck MAC
+						address database API)
+		1.6) semGive - OS specific semaphore give function.
+						(provided by BSP and recommanded by QuarterDeck MAC
+						address database API)
+		Notes) The given example will use EV96122 BSP and QuarterDeck Simulator
+		as an example.
+
+	2) Initialize BSP provided routine (if required).
+		Notes) QuarterDeck Simulator needs to be initialized.(qdSimInit)
+
+	3) Calls sysConfig routine.
+		1.1) Input (GT_SYS_CONFIG) - CPU Port Number (Board Specific,
+		either port 5 or port 6) and Port state (either 1 for Forwarding mode
+		or 0 for Blocked mode)
+		1.2) Output (GT_SYS_INFO) - Device ID, Base MII Address (either 0 or
+		0x10), Number of Ports, and CPU port number.
+
+	4) Calls sysEnable (for future use.)
+
+qdSim.c (QuaterDeck Simulator)
+    Simulates QuaterDeck Device(88E6052)'s register map. When QuareterDeck API
+	try to read/write a bit or bits into QuaterDeck, the simulator will
+	redirect to its own memory place and performing the function very close to
+	QuaterDeck. For example,
+	1) user can set/reset a certain bit of QuarterDeck registers
+		(Phy,Port,and General registers).
+	2) user can access ATU (flush, load, purge, etc. with max MAC addresses
+		of 32)
+	3) user can manually generate an Interrupt and test the Interrupt routine.
+	4) when user read a register, it will clear a certain register if it's a
+		Self Clear register.
+	5) when user write a register, it will return ERROR if it's read only
+		register.
+	Notes) Simulator can be used when user has no QuarterDeck device connected Board.
+
+	Exported routines are :
+		qdSimRead 	for reading MII registers,
+		qdSimWrite 	for writing to MII registers, and
+		qdSimInit 	for initializing Simulator.
+
+ev96122mii.c
+	Provides EV-96122 Board specific MII access functions.
+
+	Exported routines are :
+		gtBspReadMii 	for reading MII registers,
+		gtBspWriteMii	for writing to MII registers, and
+		gtBspMiiInit 	for initializing EV-96122 and QuarterDeck connection.
+
+osSem.c
+	Provides OS specific Semapore Functions.
+
+	Exported routines are :
+		osSemCreate 	for semaphore creation
+		osSemDelete 	for semaphore deletion
+		osSemWait 		for taking semaphore
+		osSemSignal 	for releasing semaphore
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/qdInt.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/qdInt.c
new file mode 100644
index 000000000000..9d6c6d779c7f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/qdInt.c
@@ -0,0 +1,386 @@
+#include <Copyright.h>
+/********************************************************************************
+* qdInt.c
+*
+* DESCRIPTION:
+*        This sample shows how to call QuarterDeck Interrupt handler when QD INT
+*        raised, and how to take care each Interrupt Cause.
+*
+* DEPENDENCIES:   NONE.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include "msSample.h"
+
+
+/*
+ *    To enable quarterDeck interrupt, you need to call eventSetActive() and
+ *    gprtPhyIntEnable(), as following sample routine.
+ *    sampleQDIntEnable will enable all interrupt causes.
+ *    For Port, GT_ATU_FULL, GT_ATU_DONE, GT_PHY_INTERRUPT, and GT_EE_INTERRUPT
+ *    are enabled.
+ *
+ *    In this sample, GT_SPEED_CHANGED, GT_DUPLEX_CHANGED, and
+ *  GT_LINK_STATUS_CHANGED are enabled for ports 0 ~ 2.
+*/
+GT_STATUS sampleQDIntEnable(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_LPORT port;
+    GT_U16 data;
+
+    /*
+     *    Enable QuarterDeck interrupt for ATUFull, ATUDone, PHYInt, and EEInt.
+     *    If writing 0 into eventSetActive(), all port interrupt will be disabled.
+    */
+    data = GT_STATS_DONE|GT_VTU_PROB|GT_VTU_DONE|
+           GT_ATU_FULL|GT_ATU_DONE|GT_EE_INTERRUPT;
+/*           GT_ATU_FULL|GT_ATU_DONE|GT_PHY_INTERRUPT|GT_EE_INTERRUPT; */
+    if((status = eventSetActive(dev,data)) != GT_OK)
+    {
+        MSG_PRINT(("eventSetActive returned fail.\n"));
+        return status;
+    }
+
+    /*
+     *    Enable Phy interrupt for every possible interrupt cause.
+     *    If writing 0 into gprtPhyIntEnable(), all port interrupt will be disabled.
+    */
+    data =     GT_SPEED_CHANGED|GT_DUPLEX_CHANGED|GT_LINK_STATUS_CHANGED;
+
+    for(port=0; port<3; port++)
+    {
+        if((status = gprtPhyIntEnable(dev,port,data)) != GT_OK)
+        {
+            MSG_PRINT(("gprtPhyIntEnable returned fail.\n"));
+            return status;
+        }
+    }
+
+    return GT_OK;
+}
+
+/*
+ *    Disable QuarterDeck Interrupt.
+*/
+GT_STATUS sampleQDIntDisable(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_LPORT port;
+
+    /*
+     *    Writing 0 into eventSetActive(), all port interrupt will be disabled.
+    */
+    if((status = eventSetActive(dev,0)) != GT_OK)
+    {
+        MSG_PRINT(("eventSetActive returned fail.\n"));
+        return status;
+    }
+
+    /*
+     *    Writing 0 into gprtPhyIntEnable(), all port interrupt will be disabled.
+    */
+    for(port=0; port<3; port++)
+    {
+        if((status = gprtPhyIntEnable(dev,port,0)) != GT_OK)
+        {
+            MSG_PRINT(("gprtPhyIntEnable returned fail.\n"));
+            return status;
+        }
+    }
+    return GT_OK;
+}
+
+
+/*
+ *    Assume that the following function, sampleQDIntVector(), is registered
+ *    when BSP calls intConnect for QD Interrupt.
+ *    This sample will show how to deal with QuarterDeck Interrupt.
+*/
+GT_STATUS sampleQDIntVector(GT_QD_DEV *dev)
+{
+    GT_U16 intCause, phyIntCause;
+    GT_U16 portVec;
+    GT_LPORT port;
+    GT_VTU_INT_STATUS vtuInt;
+    GT_ATU_INT_STATUS atuInt;
+
+    /*
+     *    Disable QuarterDeck Interrupt in System Level.
+     *    ToDo...
+    */
+
+    /*
+     *    Check if QD generated the interrupt.
+    */
+    if(eventGetIntStatus(dev,&intCause) != GT_OK)
+    {
+        /* QD didn't generate the interrupt. */
+        return GT_FAIL;
+    }
+
+    /*
+     *    QD generated interrupt with the reason in intCause.
+    */
+
+    if(intCause & GT_STATS_DONE)
+    {
+        /*
+         *    Statistics Done Interrupt
+         *    ToDo...
+        */
+
+    }
+    if(intCause & GT_VTU_DONE)
+    {
+        /*
+         *    VTU Done Interrupt
+         *    ToDo...
+        */
+
+    }
+
+    if(intCause & GT_VTU_PROB)
+    {
+        /*
+         *    Vlan Table Problem/Violation.
+         *    Need to read the cause.
+        */
+        do {
+            if(gvtuGetIntStatus(dev,&vtuInt) != GT_OK)
+            {
+                /* failed to retrieve VTU Interrupt cause */
+                break;
+            }
+
+            if(vtuInt.vtuIntCause & GT_VTU_FULL_VIOLATION)
+            {
+                /*
+                 *    Vlan Table is Full
+                 *    ToDo...
+                */
+            }
+
+            if(vtuInt.vtuIntCause & GT_MEMBER_VIOLATION)
+            {
+                /*
+                 *    Member Violation
+                 *    ToDo...
+                */
+            }
+
+            if(vtuInt.vtuIntCause & GT_MISS_VIOLATION)
+            {
+                /*
+                 *    Miss Violation
+                 *    ToDo...
+                */
+            }
+        } while(vtuInt.vtuIntCause != 0);
+    }
+
+    if(intCause & GT_ATU_PROB)
+    {
+        /*
+         *    ATU cannot load or learn a new mapping due to all the available
+         *    locations for an address being locked.
+         *    ToDo...
+        */
+        do {
+            if(gatuGetIntStatus(dev,&atuInt) != GT_OK)
+            {
+                /* failed to retrieve VTU Interrupt cause */
+                break;
+            }
+
+            if(atuInt.atuIntCause & GT_FULL_VIOLATION)
+            {
+                /*
+                 *    Table is Full
+                 *    ToDo...
+                */
+            }
+
+            if(atuInt.atuIntCause & GT_MEMBER_VIOLATION)
+            {
+                /*
+                 *    Member Violation
+                 *    ToDo...
+                */
+            }
+
+            if(atuInt.atuIntCause & GT_MISS_VIOLATION)
+            {
+                /*
+                 *    Miss Violation
+                 *    ToDo...
+                */
+            }
+        } while(atuInt.atuIntCause != 0);
+
+    }
+
+    if(intCause & GT_ATU_DONE)
+    {
+        /*
+         *    There is a transitions from a one to a zero on ATUBusy bit
+         *    (Refer to ATU Operation Register.)
+         *    ToDo...
+        */
+
+    }
+
+    if(intCause & GT_PHY_INTERRUPT)
+    {
+        /*
+         *    At least one of the Phy generated interrupt.
+         *    We need to read Phy Interrupt Summary and go through each phy
+         *    based on the summary.
+        */
+
+        if(gprtGetPhyIntPortSummary(dev,&portVec) != GT_OK)
+        {
+            return GT_FAIL;
+        }
+
+        port = 0;
+        while(portVec)
+        {
+            if(portVec & 0x01)
+            {
+                /*
+                 *    Call gprtGetPhyIntStatus to get intCause
+                */
+                if(gprtGetPhyIntStatus(dev,port,&phyIntCause) != GT_OK)
+                {
+                    /*
+                     *    Something wrong with the system. Need to do the
+                     *    necessary work.
+                     *    ToDo...
+                    */
+                }
+
+                if(phyIntCause & GT_SPEED_CHANGED)
+                {
+                    /*
+                     *    Speed has been changed.
+                     *    ToDo...
+                    */
+                }
+
+                if(phyIntCause & GT_DUPLEX_CHANGED)
+                {
+                    /*
+                     *    Duplex mode has been changed.
+                     *    ToDo...
+                    */
+                }
+
+                if(phyIntCause & GT_PAGE_RECEIVED)
+                {
+                    /*
+                     *    Page received.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_AUTO_NEG_COMPLETED)
+                {
+                    /*
+                     *    AutoNegotiation completed.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_LINK_STATUS_CHANGED)
+                {
+                    /*
+                     *    Link Status changed.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_SYMBOL_ERROR)
+                {
+                    /*
+                     *    Symbol error
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_FALSE_CARRIER)
+                {
+                    /*
+                     *    False Carrier.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_FIFO_FLOW)
+                {
+                    /*
+                     *    Fifo Overflow/underflow error
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_CROSSOVER_CHANGED)
+                {
+                    /*
+                     *    MDI/MDIX crossover changed.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_POLARITY_CHANGED)
+                {
+                    /*
+                     *    Polarity changed.
+                     *    ToDo...
+                    */
+
+                }
+
+                if(phyIntCause & GT_JABBER)
+                {
+                    /*
+                     *    Jabber
+                     *    ToDo...
+                    */
+
+                }
+            }
+
+            portVec >>= 1;
+            port++;
+        }
+    }
+
+    if(intCause & GT_EE_INTERRUPT)
+    {
+        /*
+         *    EEPROM is done loading registers.
+         *    ToDo...
+        */
+
+    }
+
+
+    /*
+     *    Now, all the QuarterDeck related interrupt have been cleared,
+     *    so it's OK to enable QuarterDeck Interrupt in System Level.
+     *    ToDo...
+    */
+
+    return GT_OK;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/readme.txt
new file mode 100644
index 000000000000..778aad138f19
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Interrupt/readme.txt
@@ -0,0 +1,13 @@
+========================================================================
+		QuaterDeck Interrupt Handler called by BSP Interrupt Vector
+========================================================================
+
+This sample shows how to enable/disable QuaterDeck Interrupt, and how to
+handle each Interrupt.
+
+qdInt.c
+	sampleQDIntEnable() will enable all the interrupt supported by
+	QuaterDeck.
+	sampleQDIntDisable() will disable QuarterDeck Interrupt.
+	sampleQDIntVector() will show how BSP interrupt service routine can
+	deal with QuarterDeck Interrupt.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/loadBalance.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/loadBalance.c
new file mode 100644
index 000000000000..ef56559beab8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/loadBalance.c
@@ -0,0 +1,57 @@
+#include <Copyright.h>
+/********************************************************************************
+* loadBalance.c
+*
+* DESCRIPTION:
+*        This sample shows how to setup load balance among Trunk ports.
+*        In this sample, port 0,1,2, and 3 will be in the Trunk group.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*******************************************************************************/
+
+#include "msSample.h"
+
+
+/*
+   The following sample sets Trunk Mask Table as follows:
+
+                      10    9    8    7    6    5    4    3    2    1    0
+   TrunkMask[0]        1    1    1    1    1    1    1    0    0    0    1
+   TrunkMask[1]        1    1    1    1    1    1    1    0    0    1    0
+   TrunkMask[2]        1    1    1    1    1    1    1    0    1    0    0
+   TrunkMask[3]        1    1    1    1    1    1    1    1    0    0    0
+   TrunkMask[4]        1    1    1    1    1    1    1    0    0    0    1
+   TrunkMask[5]        1    1    1    1    1    1    1    0    0    1    0
+   TrunkMask[6]        1    1    1    1    1    1    1    0    1    0    0
+   TrunkMask[7]        1    1    1    1    1    1    1    1    0    0    0
+*/
+
+GT_STATUS sampleLoadBalance(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    int i;
+    GT_U32 mask, baseMask;
+
+    baseMask = (1 << dev->numOfPorts) - 1;
+    baseMask &= 0xFFF0;    /* clear bits for port 0 ~ 3 */
+
+    /*
+     *    Set the trunk mask table for load balancing.
+    */
+    for(i=0; i<8; i++)
+    {
+        mask = baseMask | (1 << (i%4));
+
+        if((status = gsysSetTrunkMaskTable(dev,i,mask)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetTrunkMaskTable return Failed\n"));
+            return status;
+        }
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/readme.txt
new file mode 100644
index 000000000000..3238a88be93d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/LoadBalance/readme.txt
@@ -0,0 +1,9 @@
+========================================================================
+		Setup for Load Balancing
+========================================================================
+
+This sample shows how to setup Trunk Mask Table for load balancing.
+For more information about load balancing, please refer to 88E6095 Spec.
+
+loadBalance.c
+	sampleLoadBalance demonstrates how to setup Trunk Mask Table.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/macAddr.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/macAddr.c
new file mode 100644
index 000000000000..da20fc92e2a9
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/macAddr.c
@@ -0,0 +1,223 @@
+#include <Copyright.h>
+/********************************************************************************
+* macAddr.c
+*
+* DESCRIPTION:
+*    This sample will demonstrate how to add/delete a static MAC Address
+*    into/from the QuaterDeck MAC Address Data Base.
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+/*
+ *    Add the CPU MAC address into the QuaterDeck MAC Address database.
+ *    Input - None
+*/
+GT_STATUS sampleAddCPUMac(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_ATU_ENTRY macEntry;
+
+    /*
+     *    Assume that Ethernet address for the CPU MAC is
+     *    00-50-43-00-01-02.
+    */
+    macEntry.macAddr.arEther[0] = 0x00;
+    macEntry.macAddr.arEther[1] = 0x50;
+    macEntry.macAddr.arEther[2] = 0x43;
+    macEntry.macAddr.arEther[3] = 0x00;
+    macEntry.macAddr.arEther[4] = 0x01;
+    macEntry.macAddr.arEther[5] = 0x02;
+
+    macEntry.portVec = 1 << dev->cpuPortNum;     /* CPU Port number. 7bits are used for portVector. */
+
+    macEntry.prio = 0;            /* Priority (2bits). When these bits are used they override
+                                any other priority determined by the frame's data. This value is
+                                meaningful only if the device does not support extended priority
+                                information such as MAC Queue Priority and MAC Frame Priority */
+
+    macEntry.exPrio.macQPri = 0;    /* If device doesnot support MAC Queue Priority override,
+                                    this field is ignored. */
+    macEntry.exPrio.macFPri = 0;    /* If device doesnot support MAC Frame Priority override,
+                                    this field is ignored. */
+    macEntry.exPrio.useMacFPri = 0;    /* If device doesnot support MAC Frame Priority override,
+                                    this field is ignored. */
+
+    macEntry.entryState.ucEntryState = GT_UC_STATIC;
+                                /* This address is locked and will not be aged out.
+                                Refer to GT_ATU_UC_STATE in msApiDefs.h for other option. */
+
+    /*
+     *    Add the MAC Address.
+     */
+    if((status = gfdbAddMacEntry(dev,&macEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbAddMacEntry returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *    Delete the CPU MAC address from the QuaterDeck MAC Address database.
+ *    Input - None
+*/
+GT_STATUS sampleDelCPUMac(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_ATU_ENTRY macEntry;
+
+    /*
+     *    Assume that Ethernet address for the CPU MAC is
+     *    00-50-43-00-01-02.
+    */
+    macEntry.macAddr.arEther[0] = 0x00;
+    macEntry.macAddr.arEther[1] = 0x50;
+    macEntry.macAddr.arEther[2] = 0x43;
+    macEntry.macAddr.arEther[3] = 0x00;
+    macEntry.macAddr.arEther[4] = 0x01;
+    macEntry.macAddr.arEther[5] = 0x02;
+
+    /*
+     *    Delete the CPU MAC Address.
+     */
+    if((status = gfdbDelMacEntry(dev,&macEntry.macAddr)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbDelMacEntry returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *    Add a multicast MAC address into the QuaterDeck MAC Address database,
+ *    where address is 01-00-18-1a-00-00 and frames with this destination has
+ *    to be forwarding to Port 1, Port 2 and Port 4 (port starts from Port 0)
+ *    Input - None
+*/
+GT_STATUS sampleAddMulticastAddr(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_ATU_ENTRY macEntry;
+
+    /*
+     *    Assume that we want to add the following multicast address
+     *    01-50-43-00-01-02.
+    */
+    macEntry.macAddr.arEther[0] = 0x01;
+    macEntry.macAddr.arEther[1] = 0x50;
+    macEntry.macAddr.arEther[2] = 0x43;
+    macEntry.macAddr.arEther[3] = 0x00;
+    macEntry.macAddr.arEther[4] = 0x01;
+    macEntry.macAddr.arEther[5] = 0x02;
+
+    /*
+     *     Assume that a packet needs to be forwarded to the second Port (port 1),
+     *    the third Port (port 2) and cpu Port, if the frame has destination of
+     *    01-00-18-1a-00-00.
+    */
+    macEntry.portVec =     (1<<1) | /* the second port */
+                (1<<2) | /* the third port */
+                (1<<dev->cpuPortNum);
+
+    macEntry.prio = 0;            /* Priority (2bits). When these bits are used they override
+                                any other priority determined by the frame's data. This value is
+                                meaningful only if the device does not support extended priority
+                                information such as MAC Queue Priority and MAC Frame Priority */
+
+    macEntry.exPrio.macQPri = 0;    /* If device doesnot support MAC Queue Priority override,
+                                    this field is ignored. */
+    macEntry.exPrio.macFPri = 0;    /* If device doesnot support MAC Frame Priority override,
+                                    this field is ignored. */
+    macEntry.exPrio.useMacFPri = 0;    /* If device doesnot support MAC Frame Priority override,
+                                    this field is ignored. */
+
+    macEntry.entryState.ucEntryState = GT_MC_STATIC;
+                                /* This address is locked and will not be aged out.
+                                Refer to GT_ATU_MC_STATE in msApiDefs.h for other option.*/
+
+    /*
+     *    Add the MAC Address.
+     */
+    if((status = gfdbAddMacEntry(dev,&macEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbAddMacEntry returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *    Delete the Multicast MAC address of 01-00-18-1a-00-00.
+ *    Input - None
+*/
+GT_STATUS sampleDelMulticastAddr(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_ATU_ENTRY macEntry;
+
+    /*
+     *    Assume that Ethernet address for the CPU MAC is
+     *    01-50-43-00-01-02.
+    */
+    macEntry.macAddr.arEther[0] = 0x01;
+    macEntry.macAddr.arEther[1] = 0x50;
+    macEntry.macAddr.arEther[2] = 0x43;
+    macEntry.macAddr.arEther[3] = 0x00;
+    macEntry.macAddr.arEther[4] = 0x01;
+    macEntry.macAddr.arEther[5] = 0x02;
+
+    /*
+     *    Delete the given Multicast Address.
+     */
+    if((status = gfdbDelMacEntry(dev,&macEntry.macAddr)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbDelMacEntry returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *    This sample function will show how to display all the MAC address
+ *    in the ATU.
+*/
+GT_STATUS sampleShowMacEntry(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_ATU_ENTRY tmpMacEntry;
+
+    MSG_PRINT(("ATU List:\n"));
+    memset(&tmpMacEntry,0,sizeof(GT_ATU_ENTRY));
+
+    while(1)
+    {
+        /* Get the sorted list of MAC Table. */
+        if((status = gfdbGetAtuEntryNext(dev,&tmpMacEntry)) != GT_OK)
+        {
+            return status;
+        }
+
+        MSG_PRINT(("(%02x-%02x-%02x-%02x-%02x-%02x) PortVec %#x\n",
+                tmpMacEntry.macAddr.arEther[0],
+                tmpMacEntry.macAddr.arEther[1],
+                tmpMacEntry.macAddr.arEther[2],
+                tmpMacEntry.macAddr.arEther[3],
+                tmpMacEntry.macAddr.arEther[4],
+                tmpMacEntry.macAddr.arEther[5],
+                tmpMacEntry.portVec));
+    }
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/readme.txt
new file mode 100644
index 000000000000..59a50705d574
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MACAddress/readme.txt
@@ -0,0 +1,16 @@
+========================================================================
+		MAC Address ADD/DELETE Sample
+========================================================================
+
+This sample will demonstrate how to add/delete a static MAC Address
+into/from the QuaterDeck MAC Address Data Base and how to enumerate
+all the MAC addresses in the Data Base.
+
+macAddr.c
+	sampleAddCPUMac : show how to add CPU Mac address into the QuterDeck
+		Database.
+	sampleDelCPUMac : show how to delete CPU Mac address from the database.
+	sampleAddMulticastAddr : show how to add a multicast address into
+		the QuaterDeck Database.
+	sampleDelMulticastAddr : show how to delete a multicast address.
+	sampleShowMacEntry : show how to get all the Mac addresses in the database.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/minimizeCPUTraffic.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/minimizeCPUTraffic.c
new file mode 100644
index 000000000000..7a17a2af00d8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/minimizeCPUTraffic.c
@@ -0,0 +1,184 @@
+#include <Copyright.h>
+/********************************************************************************
+* minizeCPUTraffic.c
+*
+* DESCRIPTION:
+*        This sample shows how to setup the CPU port not to be a member of any
+*        VLAN, while it still be a manager of a switch.
+*
+* DEPENDENCIES:
+*        Please check the device's spec. if the device supports this feature.
+*        At the moment this sample was written, 88E6095 was the only device support
+*        this feature.
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*******************************************************************************/
+
+#include "msSample.h"
+
+/*
+    For the devices that support gsysSetARPDest API:
+
+    0) Remove CPU port from VLAN Member Table.
+       (this sample deals with Port Based Vlan only.)
+    1) Mirror ARPs to the CPU with To_CPU Marvell Tag
+    2) Convert unicast frames directed to the CPU into To_CPU Marvell Tag
+    Assumption : Device ID, Cascading Port, CPU Port, and Interswitch Port are
+        already set properly. For more information, please refer to the
+        sample/MultiDevice/msApiInit.c
+*/
+
+GT_STATUS sampleMinimizeCPUTraffic1(GT_QD_DEV *dev, GT_U8* macAddr)
+{
+    GT_STATUS status;
+    int i;
+    GT_LPORT memPorts[16], cpuPort;
+    GT_U8 memPortsLen, index;
+    GT_ATU_ENTRY macEntry;
+
+    cpuPort = (GT_LPORT)dev->cpuPortNum;
+
+    /*
+     *    Remove CPU port from VLAN Member Table.
+    */
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        if((status = gvlnGetPortVlanPorts(dev,(GT_LPORT)i,memPorts,&memPortsLen)) != GT_OK)
+        {
+            MSG_PRINT(("gvlnGetPortVlanPorts return Failed\n"));
+            return status;
+        }
+
+        for(index=0; index<memPortsLen; index++)
+        {
+            if (memPorts[index] == cpuPort)
+                break;
+        }
+
+        if(index != memPortsLen)
+        {
+            /* CPU Port is the member of the port vlan */
+            if((memPortsLen-1) != index)
+            {
+                memPorts[index] = memPorts[memPortsLen-1];
+            }
+            memPortsLen--;
+
+            if((status = gvlnSetPortVlanPorts(dev,(GT_LPORT)i,memPorts,memPortsLen)) != GT_OK)
+            {
+                MSG_PRINT(("gvlnSetPortVlanPorts return Failed\n"));
+                return status;
+            }
+        }
+    }
+
+    /*
+     *    Mirror ARPs to the CPU with To_CPU Marvell Tag.
+    */
+    if((status = gsysSetARPDest(dev,cpuPort)) != GT_OK)
+    {
+        MSG_PRINT(("gsysSetARPDest return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Convert unicast frames directed to the CPU into To_CPU Marvell Tag.
+     *  This sample assumes that DBNum is not used. If DBNum is used,
+     *  the macEntry has to be added for each DBNum used.
+    */
+    memset(&macEntry,0,sizeof(GT_ATU_ENTRY));
+    memcpy(macEntry.macAddr.arEther,macAddr,6);
+    macEntry.portVec = 1 << dev->cpuPortNum;
+    macEntry.prio = 0;            /* Priority (2bits). When these bits are used they override
+                                any other priority determined by the frame's data */
+    macEntry.entryState.ucEntryState = GT_UC_TO_CPU_STATIC;
+    macEntry.DBNum = 0;
+    macEntry.trunkMember = GT_FALSE;
+
+    if((status = gfdbAddMacEntry(dev,&macEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbAddMacEntry return Failed\n"));
+        return status;
+    }
+
+
+    return GT_OK;
+}
+
+
+/*
+    For the devices that support gprtSetARPtoCPU API:
+
+    1) Enable ARP to CPU feature fore each port.
+    2) Set Egress Flood Mode to be Block Unknown DA on CPU Port.
+    3) Add CPU Port's MAC into address table.
+    4) Remove Broadcast address from address table.
+*/
+
+GT_STATUS sampleMinimizeCPUTraffic2(GT_QD_DEV *dev, GT_U8* macAddr)
+{
+    GT_STATUS status;
+    int i;
+    GT_LPORT cpuPort;
+    GT_ATU_ENTRY macEntry;
+
+    cpuPort = (GT_LPORT)dev->cpuPortNum;
+
+    /*
+     *    Remove CPU port from VLAN Member Table.
+    */
+    for(i=0; i<dev->numOfPorts; i++)
+    {
+        if (i == cpuPort)
+            continue;
+
+        if((status = gprtSetARPtoCPU(dev,i,GT_TRUE)) != GT_OK)
+        {
+            MSG_PRINT(("gprtSetARPtoCPU return Failed\n"));
+            return status;
+        }
+    }
+
+    /*
+     * Set Egress Flood Mode to be Block Unknown DA on CPU Port.
+    */
+    if((status = gprtSetEgressFlood(dev,cpuPort,GT_BLOCK_EGRESS_UNKNOWN)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetEgressFlood return Failed\n"));
+        return status;
+    }
+
+
+    /*
+     *    Add CPU's MAC into address table.
+     *  This sample assumes that DBNum is not used. If DBNum is used,
+     *  the macEntry has to be added for each DBNum used.
+    */
+    memset(&macEntry,0,sizeof(GT_ATU_ENTRY));
+    memcpy(macEntry.macAddr.arEther,macAddr,6);
+    macEntry.portVec = 1 << dev->cpuPortNum;
+    macEntry.prio = 0;            /* Priority (2bits). When these bits are used they override
+                                any other priority determined by the frame's data */
+    macEntry.entryState.ucEntryState = GT_UC_STATIC;
+    macEntry.DBNum = 0;
+    macEntry.trunkMember = GT_FALSE;
+
+    if((status = gfdbAddMacEntry(dev,&macEntry)) != GT_OK)
+    {
+        MSG_PRINT(("gfdbAddMacEntry return Failed\n"));
+        return status;
+    }
+
+    /*
+     *  Delete BroadCast Entry from address table if exists.
+     *  This sample assumes that DBNum is not used. If DBNum is used,
+     *  the macEntry has to be added for each DBNum used.
+    */
+    memset(&macEntry,0,sizeof(GT_ATU_ENTRY));
+    memset(macEntry.macAddr.arEther,0xFF,6);
+    gfdbDelAtuEntry(dev,&macEntry);
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/readme.txt
new file mode 100644
index 000000000000..fd8f7d33353a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MinimizeCPUTraffic/readme.txt
@@ -0,0 +1,22 @@
+========================================================================
+		Setup for CPU Port which is not a member of any VLAN
+========================================================================
+
+Previous SOHO switch devices were low port count and/or used for Routers.
+In this environment, the CPU must be a member of all VLANs, so it can
+route the frames from one VLAN to another.
+In a high port count managed switch, the CPU is not a router but the
+manager of the switch. In this environment, the CPU doesn't want to be a
+member of any VLAN. If it is, it can get saturated with non-management
+frames preventing it from receiving the important management frames.
+
+In order to support the feature, the following has to be provided:
+
+1. For the devices that support gsysSetARPDest API:
+	0) Remove CPU port from VLAN Member Table.
+	1) Mirror ARPs to the CPU with To_CPU Marvell Tag.
+	2) Convert unicast frames directed to the CPU into To_CPU Marvell Tag.
+
+2. For the devices that support gprtSetARPtoCPU API:
+	0) Remove CPU port from VLAN Member Table.
+	1) Enable ARP to CPU for each port.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MultiDevice/msApiInit.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MultiDevice/msApiInit.c
new file mode 100644
index 000000000000..cc01c6ff2fa7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/MultiDevice/msApiInit.c
@@ -0,0 +1,271 @@
+#include <Copyright.h>
+/********************************************************************************
+* msApiInit.c
+*
+* DESCRIPTION:
+*        MS API initialization routine for devices supporting Multi Address Mode,
+*        such as 88E6183. Following setup will be used for this sample code.
+*
+*          ------------------
+*        |CPU Ethernet Dev|
+*        ------------------
+*         |
+*         |
+*         |   8--------------9     8--------------9      8--------------
+*         |----| QD Device 0|------| QD Device 1|-----| QD Device 2|
+*             --------------          --------------        --------------
+*               0 1 2 ... 7             0 1 2 ... 7          0 1 2 ... 7
+*
+*
+*        Ethernet port of CPU is connected to port 8 of Device 0,
+*        port 9 of Device 0 is connected to port 8 of Device 1, and
+*        port 9 of Device 1 is connected to port 8 of Device 2.
+*
+*        Device 0 uses Phy Address 1,
+*        Device 1 uses Phy Address 2, and
+*        Device 2 uses Phy Address 3
+*        Notes: Phy Address 0 cannot be used in a Multi Chip Address Mode.
+*
+*        Each Switch Device has to be configured to Multi Chip Address Mode.
+*        For detailed information for Multi Chip Address Mode configuration,
+*        please refer to your device's Datasheet.
+*
+* DEPENDENCIES:   Platform
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+/*
+#define MULTI_ADDR_MODE
+#define MANUAL_MODE
+*/
+
+#define MULTI_ADDR_MODE
+#define N_OF_QD_DEVICES        3    /* number of 88E6183 devices connected */
+
+#define DEVICE0_ID        10
+#define DEVICE1_ID        DEVICE0_ID + 1
+#define DEVICE2_ID        DEVICE0_ID + 2
+
+#define DEVICE0_PHY_ADDR    1
+#define DEVICE1_PHY_ADDR    DEVICE0_PHY_ADDR + 1
+#define DEVICE2_PHY_ADDR    DEVICE0_PHY_ADDR + 2
+
+#define S_CPU_DEVICE        DEVICE0_ID
+
+#define DEVICE0_CPU_PORT        8
+#define DEVICE0_CASCADE_PORT    9
+#define DEVICE1_CPU_PORT        8
+#define DEVICE1_CASCADE_PORT    9
+#define DEVICE2_CPU_PORT        8
+#define DEVICE2_CASCADE_PORT    9
+
+GT_QD_DEV       *qdMultiDev[N_OF_QD_DEVICES] = {0,};
+
+/*
+ * Initialize each Switch Devices. This should be done in BSP driver init routine.
+ *    Since BSP is not combined with QuarterDeck driver, we are doing here.
+ * This routine will setup Switch Devices according to the above description.
+*/
+
+GT_STATUS qdMultiDevStart()
+{
+    GT_STATUS status = GT_FAIL;
+    GT_SYS_CONFIG   cfg;
+    int cpuPort;
+    int cascadePort;
+    int i,j;
+
+    memset((char*)&cfg,0,sizeof(GT_SYS_CONFIG));
+
+    /*
+     *    Create QD Device Structure for each device.
+     */
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+    {
+        qdMultiDev[i] = (GT_QD_DEV*)malloc(sizeof(GT_QD_DEV));
+
+        if(qdMultiDev[i] == NULL)
+        {
+            while(i--)
+                free(qdMultiDev[i]);
+            return GT_FAIL;
+        }
+
+        memset((char*)qdMultiDev[i],0,sizeof(GT_QD_DEV));
+    }
+
+    /*
+     *  Register all the required functions to QuarterDeck Driver for each device.
+    */
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+    {
+        cfg.BSPFunctions.readMii   = gtBspReadMii;
+        cfg.BSPFunctions.writeMii  = gtBspWriteMii;
+#ifdef USE_SEMAPHORE
+        cfg.BSPFunctions.semCreate = osSemCreate;
+        cfg.BSPFunctions.semDelete = osSemDelete;
+        cfg.BSPFunctions.semTake   = osSemWait;
+        cfg.BSPFunctions.semGive   = osSemSignal;
+#else
+        cfg.BSPFunctions.semCreate = NULL;
+        cfg.BSPFunctions.semDelete = NULL;
+        cfg.BSPFunctions.semTake   = NULL;
+        cfg.BSPFunctions.semGive   = NULL;
+#endif
+
+        cfg.initPorts = GT_TRUE;    /* Set switch ports to Forwarding mode. If GT_FALSE, use Default Setting. */
+        switch (i)
+        {
+            case 0: /* if we are registering device 0 */
+                cfg.cpuPortNum = DEVICE0_CPU_PORT;
+                break;
+            case 1: /* if we are registering device 1 */
+                cfg.cpuPortNum = DEVICE1_CPU_PORT;    /* where device 0 is connected */
+                break;
+            case 2: /* if we are registering device 2 */
+                cfg.cpuPortNum = DEVICE2_CPU_PORT;    /* where device 1 is connected */
+                break;
+            default: /* we don't have any more device. it shouldn't happen in our sample setup. */
+                goto errorExit;
+        }
+
+#ifdef MANUAL_MODE    /* not defined. this is only for sample */
+        /* user may want to use this mode when there are two QD switchs on the same MII bus. */
+        cfg.mode.scanMode = SMI_MANUAL_MODE;    /* Use QD located at manually defined base addr */
+        cfg.mode.baseAddr = 0x10;    /* valid value in this case is either 0 or 0x10 */
+#else
+#ifdef MULTI_ADDR_MODE    /* It should have been defined for this sample code */
+        cfg.mode.scanMode = SMI_MULTI_ADDR_MODE;    /* find a QD in indirect access mode */
+        cfg.mode.baseAddr = DEVICE0_PHY_ADDR + i;        /* this is the phyAddr used by QD family device.
+                                                                        Valid values are 1 ~ 31.*/
+#else
+        cfg.mode.scanMode = SMI_AUTO_SCAN_MODE;    /* Scan 0 or 0x10 base address to find the QD */
+        cfg.mode.baseAddr = 0;
+#endif
+#endif
+
+        if((status=qdLoadDriver(&cfg, qdMultiDev[i])) != GT_OK)
+        {
+            MSG_PRINT(("qdLoadDriver return Failed\n"));
+            goto errorExit;
+        }
+
+        MSG_PRINT(("Device ID     : 0x%x\n",qdMultiDev[i]->deviceId));
+        MSG_PRINT(("Base Reg Addr : 0x%x\n",qdMultiDev[i]->baseRegAddr));
+        MSG_PRINT(("No of Ports   : %d\n",qdMultiDev[i]->numOfPorts));
+        MSG_PRINT(("CPU Ports     : %d\n",qdMultiDev[i]->cpuPortNum));
+
+        /*
+         *  start the QuarterDeck
+        */
+        if((status=sysEnable(qdMultiDev[i])) != GT_OK)
+        {
+            MSG_PRINT(("sysConfig return Failed\n"));
+            goto errorExit;
+        }
+    }
+
+    /*
+        Now, we need to configure Cascading information for each devices.
+        1. Set Interswitch port mode for port 8 and 9 for device 0,1,and 2,
+            so that switch device can expect Marvell Tag from frames
+            ingressing/egressing this port.
+        2. Set CPU Port information (for To_CPU frame) for each port of device.
+        3. Set Cascading Port information (for From_CPU fram) for each device.
+        4. Set Device ID (if required)
+            Note: DeviceID is hardware configurable.
+    */
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+    {
+        switch (i)
+        {
+            case 0: /* if we are registering device 0 */
+                cpuPort = DEVICE0_CPU_PORT;         /* where CPU Enet port is connected */
+                cascadePort = DEVICE0_CASCADE_PORT;    /* where device 1 is connected */
+                break;
+            case 1: /* if we are registering device 1 */
+                cpuPort = DEVICE1_CPU_PORT;         /* where device 0 is connected */
+                cascadePort = DEVICE1_CASCADE_PORT;    /* where device 2 is connected */
+                break;
+            case 2: /* if we are registering device 2 */
+                cpuPort = DEVICE2_CPU_PORT;         /* where device 1 is connected */
+                cascadePort = DEVICE2_CASCADE_PORT;    /* no need to setup for the given sample setup */
+                break;
+            default: /* we don't have any more device. it shouldn't happen in our sample setup. */
+                goto errorExit;
+        }
+
+        /*
+            1. Set Interswitch port mode for port 8 and 9 for device 0,1,and 2,
+                so that switch device can expect Marvell Tag from frames
+                ingressing/egressing this port.
+            2. Set CPU Port information (for To_CPU frame) for each port of device.
+        */
+        for(j=0; j<qdMultiDev[i]->numOfPorts; j++)
+        {
+            if((i == cpuPort) || (i == cascadePort))
+            {
+                if((status=gprtSetInterswitchPort(qdMultiDev[i],j,GT_TRUE)) != GT_OK)
+                {
+                    MSG_PRINT(("gprtSetInterswitchPort returned %i (port %i, mode TRUE)\n",status,j));
+                    goto errorExit;
+                }
+            }
+            else
+            {
+                if((status=gprtSetInterswitchPort(qdMultiDev[i],j,GT_FALSE)) != GT_OK)
+                {
+                    MSG_PRINT(("gprtSetInterswitchPort returned %i (port %i, mode FALSE)\n",status,j));
+                    goto errorExit;
+                }
+            }
+
+            if((status=gprtSetCPUPort(qdMultiDev[i],j,cpuPort)) != GT_OK)
+            {
+                MSG_PRINT(("gprtSetCPUPort returned %i\n",status));
+                goto errorExit;
+            }
+        }
+
+        /*
+            3. Set Cascading Port information (for From_CPU fram) for each device.
+        */
+        if((status=gsysSetCascadePort(qdMultiDev[i],cascadePort)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetCascadePort returned %i\n",status));
+            goto errorExit;
+        }
+
+        /*
+            4. Set Device ID (if required)
+        */
+        if((status=gsysSetDeviceNumber(qdMultiDev[i],DEVICE0_ID+i)) != GT_OK)
+        {
+            MSG_PRINT(("gsysSetDeviceNumber returned %i\n",status));
+            goto errorExit;
+        }
+
+    }
+
+    MSG_PRINT(("QuarterDeck has been started.\n"));
+
+    return GT_OK;
+
+errorExit:
+
+    for(i=0; i<N_OF_QD_DEVICES; i++)
+    {
+        if(qdMultiDev[i] != NULL)
+        {
+            qdUnloadDriver(qdMultiDev[i]);
+              free(qdMultiDev[i]);
+        }
+    }
+
+    MSG_PRINT(("QuarterDeck initialization failed.\n"));
+
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl.c
new file mode 100644
index 000000000000..5d3c85803d61
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl.c
@@ -0,0 +1,92 @@
+#include <Copyright.h>
+/********************************************************************************
+* pirl.c
+*
+* DESCRIPTION:
+*       Setup PIRL buckets
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+
+/*
+ *  This setup function configures the Port 0 of Marvell SOHO Switch Device with
+ *    capability of PIRL to be :
+ *
+ *    1) Ingress Rate    : 128Kbps (128K bit per sec)
+ *    2) Discarded frame : Do not account discarded frame due to queue congestion
+ *    3) Filtered frame  : Account filtered frame
+ *    4) Limit action    : Drop packets when the incoming rate exceeds the limit
+ *    5) Rate type       : Rate is based on Traffic type
+ *    6) Traffic type    : ARP, MGMT, Multicast, Broadcast, and Unicast frames are
+ *                         tracked as part of the rate resource calculation.
+ *    7) Byte counted    : Account only Layer 3 bytes (IP header and payload)
+ *
+ *    Notes: This sample uses IRL Unit 0. The available number of IRL Units are
+ *    various depending on the device. Please refer to the datasheet for detailed
+ *    information.
+ *
+ *    Notes: Port 0 will be blocked while programming PIRL.
+*/
+
+GT_STATUS samplePIRLSetup(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_PIRL_DATA pirlData;
+    GT_U32        irlUnit;
+    GT_LPORT     port;
+
+    /* change Current Timer Update Interval */
+    status = gpirlSetCurTimeUpInt(dev,4);
+    switch (status)
+    {
+        case GT_OK:
+            break;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Device is not supporting PIRL.\n"));
+            return status;
+        default:
+            MSG_PRINT(("Failure to configure device.\n"));
+            return status;
+    }
+
+    irlUnit = 0;
+    port = 0;
+
+    pirlData.ingressRate         = 128;
+    pirlData.accountQConf         = GT_FALSE;
+    pirlData.accountFiltered    = GT_TRUE;
+    pirlData.ebsLimitAction        = ESB_LIMIT_ACTION_DROP;
+    pirlData.bktRateType        = BUCKET_TYPE_TRAFFIC_BASED;
+    pirlData.bktTypeMask        = BUCKET_TRAFFIC_BROADCAST |
+                                  BUCKET_TRAFFIC_MULTICAST |
+                                  BUCKET_TRAFFIC_UNICAST   |
+                                  BUCKET_TRAFFIC_MGMT_FRAME|
+                                  BUCKET_TRAFFIC_ARP;
+
+    pirlData.byteTobeCounted    = GT_PIRL_COUNT_ALL_LAYER3;
+
+    status = gpirlActivate(dev,irlUnit,(1<<port),&pirlData);
+
+    switch (status)
+    {
+        case GT_OK:
+            MSG_PRINT(("IRL Unit 0 is activated.\n"));
+            break;
+        case GT_BAD_PARAM:
+            MSG_PRINT(("Invalid parameters are given.\n"));
+            break;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Device is not supporting PIRL.\n"));
+            break;
+        default:
+            MSG_PRINT(("Failure to configure device.\n"));
+            break;
+    }
+
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl2.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl2.c
new file mode 100644
index 000000000000..eddfde0a6920
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/pirl2.c
@@ -0,0 +1,199 @@
+#include <Copyright.h>
+/********************************************************************************
+* pirl2.c
+*
+* DESCRIPTION:
+*       Setup PIRL buckets for 88E6097 device family
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+
+/*
+ *  This setup function configures the resource 0 of Port 0 of Marvell SOHO
+ *    Switch Device with capability of PIRL to be :
+ *
+ *    1) Ingress Rate    : 128Kbps (128K bit per sec)
+ *    2) Custom setup for Ingress Rate : disabled
+ *    3) Discarded frame : Do not account discarded frame due to queue congestion
+ *    4) Filtered frame  : Account filtered frame
+ *    5) Mgmt frame      : Exclude management frame from rate limiting calculation
+ *    6) SA found in ATU : Exclude from ingress rate limiting calculation if the SA of the
+ *                         frame is in ATU with EntryState that indicates Non Rate Limited.
+ *    7) DA found in ATU : Include to ingress rate limiting calculation even though the DA of the
+ *                         frame is in ATU with EntryState that indicates Non Rate Limited.
+ *    8) Sampling Mode   : Disable the mode
+ *    9) Action Mode     : Follow Limit action when there are not enough tokens to accept the
+ *                         entire imcoming frame.
+ *    10) Limit action   : Drop packets when the incoming rate exceeds the limit
+ *    11) Rate type      : Rate is based on Traffic type
+ *    12) Traffic type   : ARP, MGMT, Multicast, Broadcast, and Unicast frames are
+ *                           tracked as part of the rate resource calculation.
+ *    13) Byte counted   : Account only Layer 3 bytes (IP header and payload)
+ *
+*/
+
+GT_STATUS samplePIRL2Setup(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_PIRL2_DATA pirlData;
+    GT_U32        irlRes;
+    GT_LPORT     port;
+
+    /* change Current Timer Update Interval */
+    status = gpirl2SetCurTimeUpInt(dev,4);
+    switch (status)
+    {
+        case GT_OK:
+            break;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Device is not supporting PIRL.\n"));
+            return status;
+        default:
+            MSG_PRINT(("Failure to configure device.\n"));
+            return status;
+    }
+
+    port = 0;
+    irlRes = 0;
+
+    pirlData.ingressRate         = 128;
+
+    pirlData.customSetup.isValid = GT_FALSE;
+
+    pirlData.accountQConf         = GT_FALSE;
+    pirlData.accountFiltered    = GT_TRUE;
+
+    pirlData.mgmtNrlEn = GT_TRUE;
+    pirlData.saNrlEn   = GT_TRUE;
+    pirlData.daNrlEn   = GT_FALSE;
+    pirlData.samplingMode = GT_FALSE;
+    pirlData.actionMode = PIRL_ACTION_USE_LIMIT_ACTION;
+
+    pirlData.ebsLimitAction        = ESB_LIMIT_ACTION_DROP;
+    pirlData.bktRateType        = BUCKET_TYPE_TRAFFIC_BASED;
+    pirlData.bktTypeMask        = BUCKET_TRAFFIC_BROADCAST |
+                                  BUCKET_TRAFFIC_MULTICAST |
+                                  BUCKET_TRAFFIC_UNICAST   |
+                                  BUCKET_TRAFFIC_MGMT_FRAME|
+                                  BUCKET_TRAFFIC_ARP;
+
+    pirlData.priORpt = GT_TRUE;
+    pirlData.priMask = 0;
+
+    pirlData.byteTobeCounted    = GT_PIRL2_COUNT_ALL_LAYER3;
+
+    status = gpirl2WriteResource(dev,port,irlRes,&pirlData);
+
+    switch (status)
+    {
+        case GT_OK:
+            MSG_PRINT(("PIRL2 writing completed.\n"));
+            break;
+        case GT_BAD_PARAM:
+            MSG_PRINT(("Invalid parameters are given.\n"));
+            break;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Device is not supporting PIRL2.\n"));
+            break;
+        default:
+            MSG_PRINT(("Failure to configure device.\n"));
+            break;
+    }
+
+    return status;
+}
+
+
+
+/*
+ *    This setup function shows how to configure Ingress Rate of 128Kbps with the
+ *    custom data information.
+ *  it configures the resource 0 of Port 0 of Marvell SOHO Switch Device with
+ *    capability of PIRL to be :
+ *
+ *    1) Custom setup for Ingress Rate : Enabled
+ *    2) Custom EBS Limit : 0xFFFFFF
+ *    3) Custom CBS Limit : 0x200000
+ *    4) Custom Bucket Increament  : 0x3D
+ *    5) Custom Bucket Rate Factor : 2
+ *    6) Discarded frame : Do not account discarded frame due to queue congestion
+ *    7) Filtered frame  : Account filtered frame
+ *    8) Mgmt frame      : Exclude management frame from rate limiting calculation
+ *    9) SA found in ATU : Exclude from ingress rate limiting calculation if the SA of the
+ *                         frame is in ATU with EntryState that indicates Non Rate Limited.
+ *    10) DA found in ATU : Include to ingress rate limiting calculation even though the DA of the
+ *                         frame is in ATU with EntryState that indicates Non Rate Limited.
+ *    11) Sampling Mode   : Disable the mode
+ *    12) Action Mode     : Follow Limit action when there are not enough tokens to accept the
+ *                         entire imcoming frame.
+ *    13) Limit action   : Drop packets when the incoming rate exceeds the limit
+ *    14) Rate type      : Rate is based on Traffic type
+ *    15) Traffic type   : ARP, MGMT, Multicast, Broadcast, and Unicast frames are
+ *                           tracked as part of the rate resource calculation.
+ *    16) Byte counted   : Account only Layer 3 bytes (IP header and payload)
+ *
+*/
+
+GT_STATUS samplePIRL2CustomSetup(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_PIRL2_DATA pirlData;
+    GT_U32        irlRes;
+    GT_LPORT     port;
+
+    port = 0;
+    irlRes = 0;
+
+    pirlData.customSetup.isValid = GT_TRUE;
+    pirlData.customSetup.ebsLimit = 0xFFFFFF;
+    pirlData.customSetup.cbsLimit = 0x200000;
+    pirlData.customSetup.bktIncrement = 0x3D;
+    pirlData.customSetup.bktRateFactor = 2;
+
+    pirlData.accountQConf         = GT_FALSE;
+    pirlData.accountFiltered    = GT_TRUE;
+
+    pirlData.mgmtNrlEn = GT_TRUE;
+    pirlData.saNrlEn   = GT_TRUE;
+    pirlData.daNrlEn   = GT_FALSE;
+    pirlData.samplingMode = GT_FALSE;
+    pirlData.actionMode = PIRL_ACTION_USE_LIMIT_ACTION;
+
+    pirlData.ebsLimitAction        = ESB_LIMIT_ACTION_DROP;
+    pirlData.bktRateType        = BUCKET_TYPE_TRAFFIC_BASED;
+    pirlData.bktTypeMask        = BUCKET_TRAFFIC_BROADCAST |
+                                  BUCKET_TRAFFIC_MULTICAST |
+                                  BUCKET_TRAFFIC_UNICAST   |
+                                  BUCKET_TRAFFIC_MGMT_FRAME|
+                                  BUCKET_TRAFFIC_ARP;
+
+    pirlData.priORpt = GT_TRUE;
+    pirlData.priMask = 0;
+
+    pirlData.byteTobeCounted    = GT_PIRL2_COUNT_ALL_LAYER3;
+
+    status = gpirl2WriteResource(dev,port,irlRes,&pirlData);
+
+    switch (status)
+    {
+        case GT_OK:
+            MSG_PRINT(("PIRL2 writing completed.\n"));
+            break;
+        case GT_BAD_PARAM:
+            MSG_PRINT(("Invalid parameters are given.\n"));
+            break;
+        case GT_NOT_SUPPORTED:
+            MSG_PRINT(("Device is not supporting PIRL2.\n"));
+            break;
+        default:
+            MSG_PRINT(("Failure to configure device.\n"));
+            break;
+    }
+
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/readme.txt
new file mode 100644
index 000000000000..ccdc106d3b61
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PIRL/readme.txt
@@ -0,0 +1,16 @@
+==================================================================
+		PIRL (Port based Ingress Rate Limit) Setup
+==================================================================
+
+88E6065 device family, 88E6097 device family, and recent Marvell SOHO
+Switch Devices support 'Best-in-Class' per port TCP/IP ingress rate limiting
+(based on some kind of a bucket scheme to keep track of the bandwidth) along
+with independent Storm prevention.
+
+This sample shows how to use PIRL resources.
+
+pirl.c
+    this sample routines support 88E6065 device family.
+
+pirl2.c
+    this sample routines support 88E6097 device family.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/ptp.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/ptp.c
new file mode 100644
index 000000000000..b0a4e7ed28c0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/ptp.c
@@ -0,0 +1,281 @@
+#include <Copyright.h>
+/********************************************************************************
+* ptp.c
+*
+* DESCRIPTION:
+*       Setup PTP for 88E6165 device family.
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+
+/*
+ *  PTP Init routine
+ *
+ *    1) Setup each port to forward PTP frame to CPU port
+ *    2) Enable PTP Interrupt (assumes that no other interrupt is used, but PTP)
+ *    3) Configure PTP
+ *    4) Enable PTP
+ *
+ *    Notes: This sample uses the following configuration
+ *    1) Enables only PTP interrupt
+ *    2) Assumes PTP Ethernet Type is 0x88F7
+ *    3) Time Stamp is enabled only for Message ID 0, 2, and 3
+ *    4) Message ID 0 and 2 use Arr0 pointer and ID 3 uses Arr1 pointer
+ *    5) PTP interrtups are enabled on Port 0 ~ 5
+ *
+ *    Notes: Forwarding PTP fram to CPU port is based on Ether Type DSA Tag (8 bytes).
+ *    Therefore, Ethernet device driver, that actually rx/tx the PTP frame,
+ *    should expect/insert Ether Type DSA Tag.
+*/
+
+GT_STATUS samplePTPInit(GT_QD_DEV *dev)
+{
+     GT_PTP_CONFIG ptpCfg;
+    GT_LPORT port;
+    GT_STATUS status;
+
+
+    /*
+     *    1) Setup each port to forward PTP frame to CPU port
+    */
+
+    /* setup EtypeType and Policy */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if ((status = gprtSetPortEType(dev, port, (GT_ETYPE)0x88F7)) != GT_OK)
+        {
+            MSG_PRINT(("gprtSetPortEType returned not OK\n"));
+            return status;
+        }
+
+        if (port == dev->cpuPortNum)
+            continue;
+
+        if ((status = gprtSetPolicy(dev, port, POLICY_TYPE_ETYPE, FRAME_POLICY_TRAP)) != GT_OK)
+        {
+            MSG_PRINT(("gprtSetPolicy returned not OK\n"));
+            return status;
+        }
+    }
+
+    /* setup Frame Mode for CPU port */
+    if ((status = gprtSetFrameMode(dev, dev->cpuPortNum, GT_FRAME_MODE_ETHER_TYPE_DSA)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetFrameMode return failed\n"));
+        return status;
+    }
+
+    /*
+     *    2) Enable PTP Interrupt
+    */
+    eventSetActive(dev, GT_AVB_INT);
+
+
+    /*
+     *    3) Configure PTP
+    */
+    ptpCfg.ptpEType = 0x88F7;
+    ptpCfg.msgIdTSEn = 0xd;        /* id 0, 2, and 3 */
+    ptpCfg.tsArrPtr = 0x8;        /* id 0 and 2 for ARR0, id 3 for ARR1 */
+
+    /* Transport specific bits present in PTP Common Header */
+    ptpCfg.transSpec = 1;
+
+    /* starting bit location for the Message ID field in the PTP Common Header */
+    ptpCfg.msgIdStartBit = 4;
+
+    ptpCfg.ptpArrIntEn = 0x3F;
+    ptpCfg.ptpDepIntEn = 0x3F;
+    ptpCfg.disTSOverwrite = 0;
+
+
+    if ((status = gptpSetConfig(dev, &ptpCfg)) != GT_OK)
+    {
+        MSG_PRINT(("gptpSetConfig return failed\n"));
+        return status;
+    }
+    if ((status = gptpSetPTPEn(dev, GT_TRUE)) != GT_OK)
+    {
+        MSG_PRINT(("gptpSetPTPEn return failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *  PTP Interrupt Handler
+ *
+ *    1) for each port that causes PTP interrup, do the followings
+ *    2) check Arrival 0 Time Stamp
+ *    3) check Arrival 1 Time Stamp
+ *    4) check Departure Time Stamp
+*/
+
+GT_STATUS samplePTPIntHandler(GT_QD_DEV *dev)
+{
+    GT_U32 int_ports, i, int_status;
+    GT_STATUS status;
+    GT_PTP_TS_STATUS    ptpStatus;
+
+    /* disable AVB Interrupt */
+    eventSetActive(dev, 0);
+
+    /* read interrupt cause */
+    if((status=eventGetIntStatus(dev,(GT_U16*)&int_status))!=GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+
+    if ((int_status & GT_AVB_INT) == 0)
+    {
+        MSG_PRINT(("eventGetIntStatus return No AVB Interrupt\n"));
+        /* it's not PTP interrupt */
+        goto ret_int;
+    }
+
+    /* read AVB Int status */
+    if((status = gptpGetPTPInt(dev, &int_ports)) != GT_OK)
+    {
+        MSG_PRINT(("gptpGetPTPInt return failed\n"));
+        goto ret_int;
+    }
+
+    /* for each port, get the timestamp information if necessary */
+    i = 0;
+    while(int_ports)
+    {
+        if(!(int_ports & 0x1))
+        {
+            i++;
+            int_ports >>= 1;
+            continue;
+        }
+
+        /* check Arrival0 Time Stamp */
+        if((status = gptpGetTimeStamped(dev, i, PTP_ARR0_TIME, &ptpStatus)) != GT_OK)
+        {
+            MSG_PRINT(("gptpGetTimeStamped return failed\n"));
+            goto ret_int;
+        }
+
+        if (ptpStatus.isValid == GT_TRUE)
+        {
+            switch(ptpStatus.status)
+            {
+                case PTP_INT_NORMAL:
+                    /* To Do: No error condition occurred. So store the time stamp and seqId */
+                    break;
+
+                case PTP_INT_OVERWRITE:
+                    /* To Do: PTP Logic received several PTP frames and only the last one is valid */
+                    break;
+
+                case PTP_INT_DROP:
+                    /* To Do: PTP Logic received several PTP frames and only the first one is valid */
+                    break;
+
+                default:
+                    MSG_PRINT(("unknown ptp status %i\n", ptpStatus.status));
+                    status = GT_FAIL;
+                    goto ret_int;
+
+            }
+
+            if((status = gptpResetTimeStamp(dev, i, PTP_ARR0_TIME)) != GT_OK)
+            {
+                MSG_PRINT(("gptpResetTimeStamp return failed\n"));
+                goto ret_int;
+            }
+        }
+
+        /* check Arrival1 Time Stamp */
+        if((status = gptpGetTimeStamped(dev, i, PTP_ARR1_TIME, &ptpStatus)) != GT_OK)
+        {
+            MSG_PRINT(("gptpGetTimeStamped return failed\n"));
+            goto ret_int;
+        }
+
+        if (ptpStatus.isValid == GT_TRUE)
+        {
+            switch(ptpStatus.status)
+            {
+                case PTP_INT_NORMAL:
+                    /* To Do: No error condition occurred. So store the time stamp and seqId */
+                    break;
+
+                case PTP_INT_OVERWRITE:
+                    /* To Do: PTP Logic received several PTP frames and only the last one is valid */
+                    break;
+
+                case PTP_INT_DROP:
+                    /* To Do: PTP Logic received several PTP frames and only the first one is valid */
+                    break;
+
+                default:
+                    MSG_PRINT(("unknown ptp status %i\n", ptpStatus.status));
+                    status = GT_FAIL;
+                    goto ret_int;
+            }
+
+            if((status = gptpResetTimeStamp(dev, i, PTP_ARR1_TIME)) != GT_OK)
+            {
+                MSG_PRINT(("gptpResetTimeStamp return failed\n"));
+                goto ret_int;
+            }
+
+        }
+
+        /* check Departure Time Stamp */
+        if((status = gptpGetTimeStamped(dev, i, PTP_DEP_TIME, &ptpStatus)) != GT_OK)
+        {
+            MSG_PRINT(("gptpGetTimeStamped return failed\n"));
+            goto ret_int;
+        }
+
+        if (ptpStatus.isValid == GT_TRUE)
+        {
+            switch(ptpStatus.status)
+            {
+                case PTP_INT_NORMAL:
+                    /* To Do: No error condition occurred. So store the time stamp and seqId */
+                    break;
+
+                case PTP_INT_OVERWRITE:
+                    /* To Do: PTP Logic received several PTP frames and only the last one is valid */
+                    break;
+
+                case PTP_INT_DROP:
+                    /* To Do: PTP Logic received several PTP frames and only the first one is valid */
+                    break;
+
+                default:
+                    MSG_PRINT(("unknown ptp status %i\n", ptpStatus.status));
+                    status = GT_FAIL;
+                    goto ret_int;
+            }
+
+            if((status = gptpResetTimeStamp(dev, i, PTP_DEP_TIME)) != GT_OK)
+            {
+                MSG_PRINT(("gptpResetTimeStamp return failed\n"));
+                goto ret_int;
+            }
+
+        }
+
+        int_ports >>= 1;
+
+    }
+
+ret_int:
+    eventSetActive(dev, GT_AVB_INT);
+
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/readme.txt
new file mode 100644
index 000000000000..0a4d5ad19dd1
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PTP/readme.txt
@@ -0,0 +1,7 @@
+==================================================================
+                PTP (Precise Time Protocol) Setup
+==================================================================
+
+88E6165 device family Switch Devices support PTP.
+
+This sample shows how to use PTP APIs.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/phyPktGenSample.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/phyPktGenSample.c
new file mode 100644
index 000000000000..371caad07620
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/phyPktGenSample.c
@@ -0,0 +1,86 @@
+#include <Copyright.h>
+/********************************************************************************
+* phyPktGenSample.c
+*
+* DESCRIPTION:
+*       Packet Generator setup sample (startQdPktGenerator and stopQdPktGenerator).
+*
+* DEPENDENCIES:
+*        Please check the phy device's spec. if the device supports this feature.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+
+/*
+ * Start Packet Generator.
+ * Input:
+ *      pktload - enum GT_PG_PAYLOAD (GT_PG_PAYLOAD_RANDOM or GT_PG_PAYLOAD_5AA5)
+ *      length  - enum GT_PG_LENGTH  (GT_PG_LENGTH_64 or GT_PG_LENGTH_1514)
+ *      tx      - enum GT_PG_TX      (GT_PG_TX_NORMAL or GT_PG_TX_ERROR)
+*/
+GT_STATUS startQdPktGenerator
+(
+    GT_QD_DEV      *dev,
+    GT_LPORT       port,
+    GT_PG_PAYLOAD  payload,
+    GT_PG_LENGTH   length,
+    GT_PG_TX       tx
+)
+{
+    GT_STATUS status;
+    GT_PG     pktInfo;
+
+    if (dev == 0)
+    {
+        MSG_PRINT(("GT driver is not initialized\n"));
+        return GT_FAIL;
+    }
+
+    MSG_PRINT(("Start Packet Generator for port %i\n",(int)port));
+
+    pktInfo.payload = payload; /* Pseudo-random, 5AA55AA5... */
+    pktInfo.length = length;   /* 64 bytes, 1514 bytes */
+    pktInfo.tx = tx;           /* normal packet, error packet */
+
+    /*
+     *    Start Packet Generator
+    */
+    if((status = gprtSetPktGenEnable(dev,port,GT_TRUE,&pktInfo)) != GT_OK)
+    {
+        MSG_PRINT(("mdDiagSetPktGenEnable return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ * Stop Packet Generator.
+ */
+GT_STATUS stopQdPktGenerator(GT_QD_DEV *dev,GT_LPORT port)
+{
+    GT_STATUS status;
+
+    if (dev == 0)
+    {
+        MSG_PRINT(("GT driver is not initialized\n"));
+        return GT_FAIL;
+    }
+
+    MSG_PRINT(("Stopping Packet Generator for port %i\n",(int)port));
+
+    /*
+     *    Start Packet Generator
+    */
+    if((status = gprtSetPktGenEnable(dev,port,GT_FALSE,NULL)) != GT_OK)
+    {
+        MSG_PRINT(("mdDiagSetPktGenEnable return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/readme.txt
new file mode 100644
index 000000000000..5a2f2160a6f5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PktGen/readme.txt
@@ -0,0 +1,12 @@
+
+            Sample for Packet Generator
+=======================================================
+
+This sample shows how to start/stop Packet Generator.
+
+Main Functions:
+    startPktGenerator - start Packet Generator
+    stopPktGenerator  - stop Packet Generator
+
+Used APIs:
+    gprtSetPktGenEnable
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/portMonitor.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/portMonitor.c
new file mode 100644
index 000000000000..320d85ce32e2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/portMonitor.c
@@ -0,0 +1,107 @@
+#include <Copyright.h>
+/********************************************************************************
+* portMonitor.c
+*
+* DESCRIPTION:
+*        This sample shows how to monitor a certain port. Port monitoring is
+*        supported by the ClipperShip device with Egress only monitoring or
+*        Egress and Ingress monitoring.
+*
+* DEPENDENCIES:
+*        Only ClipperShip Family supports this feature.
+*
+* FILE REVISION NUMBER:
+*
+* COMMENTS:
+*******************************************************************************/
+
+#include "msSample.h"
+
+/*
+ *    Enable EgressMonitoring for the monitoredPort.
+ *    With this setup, monitoringPort will receive every packet
+ *    which egressed from monitoredPort.
+*/
+GT_STATUS sampleEgressMonitor(GT_QD_DEV *dev,GT_LPORT monitoredPort, GT_LPORT monitoringPort)
+{
+    GT_STATUS status;
+    GT_U16 pav;
+
+    /*
+     *    Enable EgressMonitoring for the monitoredPort.
+    */
+    pav = (1<<monitoringPort) || (1<<monitoredPort);
+
+    if((status = gpavSetPAV(dev,monitoredPort, pav)) != GT_OK)
+    {
+        MSG_PRINT(("gpavSetPAV return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+/*
+ *    Enable Egress Monitoring and Ingress Monitoring for the monitoredPort.
+ *    With this setup, monitoringPort will receive every packet
+ *    which is both from monitoredPort and to monitoredPort.
+*/
+GT_STATUS samplePortMonitor(GT_QD_DEV *dev,GT_LPORT monitoredPort, GT_LPORT monitoringPort)
+{
+    GT_STATUS status;
+    GT_U16 pav;
+
+    /*
+     *    Enable Egress Monitoring for the monitoredPort.
+    */
+    pav = (1<<monitoringPort) || (1<<monitoredPort);
+
+    if((status = gpavSetPAV(dev,monitoredPort, pav)) != GT_OK)
+    {
+        MSG_PRINT(("gpavSetPAV return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Enable Ingress Monitoring for the monitoredPort.
+    */
+    if((status = gpavSetIngressMonitor(dev,monitoredPort, GT_TRUE)) != GT_OK)
+    {
+        MSG_PRINT(("gpavSetIngressMonitor return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ *    Disable Egress Monitoring and Ingress Monitoring for the monitoredPort.
+*/
+GT_STATUS sampleDisablePortMonitor(GT_QD_DEV *dev,GT_LPORT monitoredPort)
+{
+    GT_STATUS status;
+    GT_U16 pav;
+
+    /*
+     *    Disable Egress Monitoring for the monitoredPort.
+    */
+    pav = (1<<monitoredPort);
+
+    if((status = gpavSetPAV(dev,monitoredPort, pav)) != GT_OK)
+    {
+        MSG_PRINT(("gpavSetPAV return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Disable Ingress Monitoring for the monitoredPort.
+    */
+    if((status = gpavSetIngressMonitor(dev,monitoredPort, GT_FALSE)) != GT_OK)
+    {
+        MSG_PRINT(("gpavSetIngressMonitor return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/readme.txt
new file mode 100644
index 000000000000..4848ff453415
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/PortMonitor/readme.txt
@@ -0,0 +1,21 @@
+========================================================================
+		Port Monitor Setup
+========================================================================
+
+88E6063 device supports Port Monitoring, which allows a user to monitor
+all the traffic of a certain port.
+This sample shows how to enable/disable Port Monitoring.
+For more information about Port Monitoring, please refer to 88E6063 Spec.
+
+Note :
+Port monitoring supported by ClipperShip has two modes:
+1. Egress only monitoring (monitor packets comming out of the
+monitored port, and
+2. Egress and Ingress monitoring (monitor packet comming in and out
+from the monitored port)
+
+portMonitor.c
+	sampleEgressMonitor can be used to enable Egress only port monitoring.
+	samplePortMonitor can be used to enable Egress and Ingress monitoring.
+	sampleDisablePortMonitor can be used to disable monitoring
+	(both Egress only mode and Egress and Ingress mode).
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/qos.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/qos.c
new file mode 100644
index 000000000000..8547c701020e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/qos.c
@@ -0,0 +1,195 @@
+#include <Copyright.h>
+/********************************************************************************
+* qos.c
+*
+* DESCRIPTION:
+*       Sample program which will show how to setup the Priority Queue for QoS
+*
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+/*
+ *    sampleQoS will enable using both IEEE 802.3ac Tag and IPv4/IPv6 Traffic
+ *    Class field and IEEE 802.3ac has a higher priority than IPv4/IPv6.
+ *    The following is the QoS mapping programmed by sampleQos:
+ *    1) IEEE 802.3ac Tag (Priority 0 ~ 7, 3 bits)
+ *        Priority 1~3 is using QuarterDeck Queue 0.
+ *        Priority 0,4 is using QuarterDeck Queue 1.
+ *        Priority 6,7 is using QuarterDeck Queue 2.
+ *        Priority 5 is using QuarterDeck Queue 3.
+ *    2) IPv4/IPv6 (Priority 0 ~ 63, 6 bits)
+ *        Priority 0~7 is using QuaterDeck Queue 0.
+ *        Priority 8~31 is using QuaterDeck Queue 1.
+ *        Priority 32~55 is using QuaterDeck Queue 2.
+ *        Priority 56~63 is using QuaterDeck Queue 3.
+ *    3) Each port's default priority is set to 1.
+*/
+GT_STATUS sampleQos(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_U8 priority;
+    GT_LPORT port;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+       /*
+         *  Use IEEE Tag
+         */
+        if((status = gqosUserPrioMapEn(dev,port,GT_TRUE)) != GT_OK)
+        {
+            MSG_PRINT(("gqosUserPrioMapEn return Failed\n"));
+            return status;
+        }
+
+        /*
+         *  Use IPv4/IPv6 priority fields (use IP)
+         */
+        if((status = gqosIpPrioMapEn(dev,port,GT_TRUE)) != GT_OK)
+        {
+            MSG_PRINT(("gqosIpPrioMapEn return Failed\n"));
+            return status;
+        }
+
+        /*
+         *  IEEE Tag has higher priority than IP priority fields
+         */
+        if((status = gqosSetPrioMapRule(dev,port,GT_TRUE)) != GT_OK)
+        {
+            MSG_PRINT(("gqosSetPrioMapRule return Failed\n"));
+            return status;
+        }
+        MSG_PRINT(("sampleQos port %d Ok\n", port));
+    }
+
+    /*
+     *    IEEE 802.3ac Tag (Priority 0 ~ 7, 3 bits)
+     *    Priority 1~3 is using QuarterDeck Queue 0.
+     *    Priority 0,4 is using QuarterDeck Queue 1.
+     *    Priority 6,7 is using QuarterDeck Queue 2.
+     *    Priority 5 is using QuarterDeck Queue 3.
+    */
+
+    /*    Priority 0 is using QuarterDeck Queue 1. */
+    if((status = gcosSetUserPrio2Tc(dev,0,1)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 1 is using QuarterDeck Queue 0. */
+    if((status = gcosSetUserPrio2Tc(dev,1,0)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 2 is using QuarterDeck Queue 0. */
+    if((status = gcosSetUserPrio2Tc(dev,2,0)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 3 is using QuarterDeck Queue 0. */
+    if((status = gcosSetUserPrio2Tc(dev,3,0)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 4 is using QuarterDeck Queue 1. */
+    if((status = gcosSetUserPrio2Tc(dev,4,1)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 5 is using QuarterDeck Queue 3. */
+    if((status = gcosSetUserPrio2Tc(dev,5,3)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 6 is using QuarterDeck Queue 2. */
+    if((status = gcosSetUserPrio2Tc(dev,6,2)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+    /*    Priority 7 is using QuarterDeck Queue 2. */
+    if((status = gcosSetUserPrio2Tc(dev,7,2)) != GT_OK)
+    {
+        MSG_PRINT(("gcosSetUserPrio2Tc returned fail.\n"));
+        return status;
+    }
+
+
+    /*
+     *    IPv4/IPv6 (Priority 0 ~ 63, 6 bits)
+     *    Priority 0~7 is using QuaterDeck Queue 0.
+     *    Priority 8~31 is using QuaterDeck Queue 1.
+     *    Priority 32~55 is using QuaterDeck Queue 2.
+     *    Priority 56~63 is using QuaterDeck Queue 3.
+    */
+
+    /*    Priority 0~7 is using QuaterDeck Queue 0. */
+    for(priority=0; priority<8; priority++)
+    {
+        if((status = gcosSetDscp2Tc(dev,priority,0)) != GT_OK)
+        {
+            MSG_PRINT(("gcosSetDscp2Tc returned fail.\n"));
+            return status;
+        }
+    }
+
+    /*    Priority 8~31 is using QuaterDeck Queue 1. */
+    for(priority=8; priority<32; priority++)
+    {
+        if((status = gcosSetDscp2Tc(dev,priority,1)) != GT_OK)
+        {
+            MSG_PRINT(("gcosSetDscp2Tc returned fail.\n"));
+            return status;
+        }
+    }
+
+    /*    Priority 32~55 is using QuaterDeck Queue 2. */
+    for(priority=32; priority<56; priority++)
+    {
+        if((status = gcosSetDscp2Tc(dev,priority,2)) != GT_OK)
+        {
+            MSG_PRINT(("gcosSetDscp2Tc returned fail.\n"));
+            return status;
+        }
+    }
+
+    /*    Priority 56~63 is using QuaterDeck Queue 3. */
+    for(priority=56; priority<64; priority++)
+    {
+        if((status = gcosSetDscp2Tc(dev,priority,3)) != GT_OK)
+        {
+            MSG_PRINT(("gcosSetDscp2Tc returned fail.\n"));
+            return status;
+        }
+    }
+
+    /*
+     * Each port's default priority is set to 1.
+    */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if((status = gcosSetPortDefaultTc(dev,port,1)) != GT_OK)
+        {
+            MSG_PRINT(("gcosSetDscp2Tc returned fail.\n"));
+            return status;
+        }
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/readme.txt
new file mode 100644
index 000000000000..37ddca3c2fd3
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/QoSSetup/readme.txt
@@ -0,0 +1,32 @@
+========================================================================
+		Priority Queue Setup for QoS
+========================================================================
+
+QuarterDeck Device has 4 Priority Queues to support QoS. The priority of
+a frame is determined by (in priority order):
+	1) The CPU's Trailer if enabled on the port.
+	2) The DA address in the frame if the frame's DA address is in the address
+		database with a priority defined.
+	3) The IEEE 802.3ac Tag containing IEEE 802.1p priority information
+		if enabled on the port.
+	4) The IPv4 Type of Service (TOS)/DiffServ field or IPv6 Traffic Class
+		field if enabled on the port.
+	5) The Port's default priority defined in DefPri.
+
+This sample program will deal with the above 3) ~ 5) cases.
+
+qos.c
+	sampleQoS will enable using both IEEE 802.3ac Tag and IPv4/IPv6 Traffic
+	Class field and IEEE 802.3ac has a higher priority than IPv4/IPv6.
+	The following is the QoS mapping programmed by sampleQos:
+	1) IEEE 802.3ac Tag (Priority 0 ~ 7, 3 bits)
+		Priority 1~3 is using QuarterDeck Queue 0.
+		Priority 0,4 is using QuarterDeck Queue 1.
+		Priority 6,7 is using QuarterDeck Queue 2.
+		Priority 5 is using QuarterDeck Queue 3.
+	2) IPv4/IPv6 (Priority 0 ~ 63, 6 bits)
+		Priority 0~7 is using QuaterDeck Queue 0.
+		Priority 8~31 is using QuaterDeck Queue 1.
+		Priority 32~55 is using QuaterDeck Queue 2.
+		Priority 56~63 is using QuaterDeck Queue 3.
+	3) Each port's default priority is set to 1.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/README b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/README
new file mode 100644
index 000000000000..dc763903eccc
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/README
@@ -0,0 +1,2 @@
+This directory includes sample code that demonstrats how to use the API of
+the DSDT Suite. There is a readme.txt file in each of the subdirectories.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/readme.txt
new file mode 100644
index 000000000000..018f268c7014
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/readme.txt
@@ -0,0 +1,28 @@
+========================================================================
+		RMON Counters
+========================================================================
+
+This sample shows how to read/clear RMON counter in the device.
+Please notes that there are three groups of RMON counters in Marvell SOHO Switchs.
+Each group has different set of counters. Therefore it is necessary to find out
+which group the switch device belongs to.
+
+Group for GT_STATS_COUNTERS : 88E6021, 88E6063, and 88E6083
+Group for GT_STATS_COUNTERS2 : 88E6183
+Group for GT_STATS_COUNTERS3 : 88E6093, 88E6095, 88E6185, and 88E6065
+
+rmon.c
+	sampleClearRMONCounter
+		shows how to reset RMON counter for the given port
+
+	sampleGetRMONCounter
+		shows how to read RMON counter for each port
+        this routine is for the devices that use GT_STATS_COUNTERS.
+
+	sampleGetRMONCounter2
+		shows how to read RMON counter for each port
+        this routine is for the devices that use GT_STATS_COUNTERS2.
+
+	sampleGetRMONCounter3
+		shows how to read RMON counter for each port
+        this routine is for the devices that use GT_STATS_COUNTERS3.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/rmon.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/rmon.c
new file mode 100644
index 000000000000..ed9c8784d76d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/RMON/rmon.c
@@ -0,0 +1,237 @@
+#include <Copyright.h>
+/********************************************************************************
+* rmon.c
+*
+* DESCRIPTION:
+*        This sample shows how to read/clear RMON counter in the device
+*
+* DEPENDENCIES:   NONE.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include "msSample.h"
+
+
+/*
+ * This sample is for every device that support RMON counter.
+*/
+GT_STATUS sampleClearRMONCounter(GT_QD_DEV *dev,GT_LPORT port)
+{
+    GT_STATUS status;
+
+    if((status = gstatsFlushPort(dev,port)) != GT_OK)
+    {
+        MSG_PRINT(("gstatsFlushPort returned fail (%#x).\n",status));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This sample is for 88E6021, 88E6063, and 88E6083.
+*/
+void sampleDisplayCounter(GT_STATS_COUNTER_SET *statsCounter)
+{
+    MSG_PRINT(("InUnicasts    %08i    ", statsCounter->InUnicasts));
+    MSG_PRINT(("InBroadcasts  %08i   \n", statsCounter->InBroadcasts));
+    MSG_PRINT(("InPause       %08i    ", statsCounter->InPause));
+    MSG_PRINT(("InMulticasts  %08i   \n", statsCounter->InMulticasts));
+    MSG_PRINT(("InFCSErr      %08i    ", statsCounter->InFCSErr));
+    MSG_PRINT(("AlignErr      %08i   \n", statsCounter->AlignErr));
+    MSG_PRINT(("InGoodOctets  %08i    ", statsCounter->InGoodOctets));
+    MSG_PRINT(("InBadOctets   %08i   \n", statsCounter->InBadOctets));
+    MSG_PRINT(("Undersize     %08i    ", statsCounter->Undersize));
+    MSG_PRINT(("Fragments     %08i   \n", statsCounter->Fragments));
+    MSG_PRINT(("In64Octets    %08i    ", statsCounter->In64Octets));
+    MSG_PRINT(("In127Octets   %08i   \n", statsCounter->In127Octets));
+    MSG_PRINT(("In255Octets   %08i    ", statsCounter->In255Octets));
+    MSG_PRINT(("In511Octets   %08i   \n", statsCounter->In511Octets));
+    MSG_PRINT(("In1023Octets  %08i    ", statsCounter->In1023Octets));
+    MSG_PRINT(("InMaxOctets   %08i   \n", statsCounter->InMaxOctets));
+    MSG_PRINT(("Jabber        %08i    ", statsCounter->Jabber));
+    MSG_PRINT(("Oversize      %08i   \n", statsCounter->Oversize));
+    MSG_PRINT(("InDiscards    %08i    ", statsCounter->InDiscards));
+    MSG_PRINT(("Filtered      %08i   \n", statsCounter->Filtered));
+    MSG_PRINT(("OutUnicasts   %08i    ", statsCounter->OutUnicasts));
+    MSG_PRINT(("OutBroadcasts %08i   \n", statsCounter->OutBroadcasts));
+    MSG_PRINT(("OutPause      %08i    ", statsCounter->OutPause));
+    MSG_PRINT(("OutMulticasts %08i   \n", statsCounter->OutMulticasts));
+    MSG_PRINT(("OutFCSErr     %08i    ", statsCounter->OutFCSErr));
+    MSG_PRINT(("OutGoodOctets %08i   \n", statsCounter->OutGoodOctets));
+    MSG_PRINT(("Out64Octets   %08i    ", statsCounter->Out64Octets));
+    MSG_PRINT(("Out127Octets  %08i   \n", statsCounter->Out127Octets));
+    MSG_PRINT(("Out255Octets  %08i    ", statsCounter->Out255Octets));
+    MSG_PRINT(("Out511Octets  %08i   \n", statsCounter->Out511Octets));
+    MSG_PRINT(("Out1023Octets %08i    ", statsCounter->Out1023Octets));
+    MSG_PRINT(("OutMaxOctets  %08i   \n", statsCounter->OutMaxOctets));
+    MSG_PRINT(("Collisions    %08i    ", statsCounter->Collisions));
+    MSG_PRINT(("Late          %08i   \n", statsCounter->Late));
+    MSG_PRINT(("Excessive     %08i    ", statsCounter->Excessive));
+    MSG_PRINT(("Multiple      %08i   \n", statsCounter->Multiple));
+    MSG_PRINT(("Single        %08i    ", statsCounter->Single));
+    MSG_PRINT(("Deferred      %08i   \n", statsCounter->Deferred));
+    MSG_PRINT(("OutDiscards   %08i   \n", statsCounter->OutDiscards));
+}
+
+/*
+ * This sample is for 88E6021, 88E6063, and 88E6083.
+*/
+GT_STATUS sampleGetRMONCounter(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_LPORT port;
+    GT_STATS_COUNTER_SET    statsCounterSet;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        MSG_PRINT(("Port %i :\n",port));
+
+        if((status = gstatsGetPortAllCounters(dev,port,&statsCounterSet)) != GT_OK)
+        {
+            MSG_PRINT(("gstatsGetPortAllCounters returned fail (%#x).\n",status));
+            return status;
+        }
+
+        sampleDisplayCounter(&statsCounterSet);
+
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ * This sample is for 88E6183
+*/
+void sampleDisplayCounter2(GT_STATS_COUNTER_SET2 *statsCounter)
+{
+    MSG_PRINT(("InGoodOctetsHi  %08i    ", statsCounter->InGoodOctetsHi));
+    MSG_PRINT(("InGoodOctetsLo  %08i   \n", statsCounter->InGoodOctetsLo));
+    MSG_PRINT(("InBadOctets     %08i    ", statsCounter->InBadOctets));
+    MSG_PRINT(("OutDiscards     %08i   \n", statsCounter->OutDiscards));
+    MSG_PRINT(("InGoodFrames    %08i    ", statsCounter->InGoodFrames));
+    MSG_PRINT(("InBadFrames     %08i   \n", statsCounter->InBadFrames));
+    MSG_PRINT(("InBroadcasts    %08i    ", statsCounter->InBroadcasts));
+    MSG_PRINT(("InMulticasts    %08i   \n", statsCounter->InMulticasts));
+    MSG_PRINT(("64Octets        %08i    ", statsCounter->Octets64));
+    MSG_PRINT(("127Octets       %08i   \n", statsCounter->Octets127));
+    MSG_PRINT(("255Octets       %08i    ", statsCounter->Octets255));
+    MSG_PRINT(("511Octets       %08i   \n", statsCounter->Octets511));
+    MSG_PRINT(("1023Octets      %08i    ", statsCounter->Octets1023));
+    MSG_PRINT(("MaxOctets       %08i   \n", statsCounter->OctetsMax));
+    MSG_PRINT(("OutOctetsHi     %08i    ", statsCounter->OutOctetsHi));
+    MSG_PRINT(("OutOctetsLo     %08i   \n", statsCounter->OutOctetsLo));
+    MSG_PRINT(("OutFrames       %08i    ", statsCounter->OutFrames));
+    MSG_PRINT(("Excessive       %08i   \n", statsCounter->Excessive));
+    MSG_PRINT(("OutMulticasts   %08i    ", statsCounter->OutMulticasts));
+    MSG_PRINT(("OutBroadcasts   %08i    ", statsCounter->OutBroadcasts));
+    MSG_PRINT(("InBadMACCtrl    %08i    ", statsCounter->InBadMACCtrl));
+    MSG_PRINT(("OutPause        %08i   \n", statsCounter->OutPause));
+    MSG_PRINT(("InPause         %08i    ", statsCounter->InPause));
+    MSG_PRINT(("InDiscards      %08i   \n", statsCounter->InDiscards));
+    MSG_PRINT(("Undersize       %08i    ", statsCounter->Undersize));
+    MSG_PRINT(("Fragments       %08i   \n", statsCounter->Fragments));
+    MSG_PRINT(("Oversize        %08i    ", statsCounter->Oversize));
+    MSG_PRINT(("Jabber          %08i   \n", statsCounter->Jabber));
+    MSG_PRINT(("MACRcvErr       %08i    ", statsCounter->MACRcvErr));
+    MSG_PRINT(("InFCSErr        %08i   \n", statsCounter->InFCSErr));
+    MSG_PRINT(("Collisions      %08i    ", statsCounter->Collisions));
+    MSG_PRINT(("Late            %08i   \n", statsCounter->Late));
+}
+
+/*
+ * This sample is for 88E6183
+*/
+GT_STATUS sampleGetRMONCounter2(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_LPORT port;
+    GT_STATS_COUNTER_SET2 statsCounterSet;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        MSG_PRINT(("Port %i :\n",port));
+
+        if((status = gstatsGetPortAllCounters2(dev,port,&statsCounterSet)) != GT_OK)
+        {
+            MSG_PRINT(("gstatsGetPortAllCounters2 returned fail (%#x).\n",status));
+            return status;
+        }
+
+        sampleDisplayCounter2(&statsCounterSet);
+
+    }
+
+    return GT_OK;
+}
+
+
+
+/*
+ * This sample is for 88E6093, 88E6095, 88E6185, and 88E6065
+*/
+void sampleDisplayCounter3(GT_STATS_COUNTER_SET3 *statsCounter)
+{
+    MSG_PRINT(("InGoodOctetsLo  %08i    ", statsCounter->InGoodOctetsLo));
+    MSG_PRINT(("InGoodOctetsHi  %08i   \n", statsCounter->InGoodOctetsHi));
+    MSG_PRINT(("InBadOctets     %08i    ", statsCounter->InBadOctets));
+    MSG_PRINT(("OutFCSErr       %08i   \n", statsCounter->OutFCSErr));
+    MSG_PRINT(("InUnicasts      %08i    ", statsCounter->InUnicasts));
+    MSG_PRINT(("Deferred        %08i   \n", statsCounter->Deferred));
+    MSG_PRINT(("InBroadcasts    %08i    ", statsCounter->InBroadcasts));
+    MSG_PRINT(("InMulticasts    %08i   \n", statsCounter->InMulticasts));
+    MSG_PRINT(("64Octets        %08i    ", statsCounter->Octets64));
+    MSG_PRINT(("127Octets       %08i   \n", statsCounter->Octets127));
+    MSG_PRINT(("255Octets       %08i    ", statsCounter->Octets255));
+    MSG_PRINT(("511Octets       %08i   \n", statsCounter->Octets511));
+    MSG_PRINT(("1023Octets      %08i    ", statsCounter->Octets1023));
+    MSG_PRINT(("MaxOctets       %08i   \n", statsCounter->OctetsMax));
+    MSG_PRINT(("OutOctetsLo     %08i    ", statsCounter->OutOctetsLo));
+    MSG_PRINT(("OutOctetsHi     %08i   \n", statsCounter->OutOctetsHi));
+    MSG_PRINT(("OutUnicasts     %08i    ", statsCounter->OutUnicasts));
+    MSG_PRINT(("Excessive       %08i   \n", statsCounter->Excessive));
+    MSG_PRINT(("OutMulticasts   %08i    ", statsCounter->OutMulticasts));
+    MSG_PRINT(("OutBroadcasts   %08i   \n", statsCounter->OutBroadcasts));
+    MSG_PRINT(("Single          %08i    ", statsCounter->Single));
+    MSG_PRINT(("OutPause        %08i   \n", statsCounter->OutPause));
+    MSG_PRINT(("InPause         %08i    ", statsCounter->InPause));
+    MSG_PRINT(("Multiple        %08i   \n", statsCounter->Multiple));
+    MSG_PRINT(("Undersize       %08i    ", statsCounter->Undersize));
+    MSG_PRINT(("Fragments       %08i   \n", statsCounter->Fragments));
+    MSG_PRINT(("Oversize        %08i    ", statsCounter->Oversize));
+    MSG_PRINT(("Jabber          %08i   \n", statsCounter->Jabber));
+    MSG_PRINT(("InMACRcvErr     %08i    ", statsCounter->InMACRcvErr));
+    MSG_PRINT(("InFCSErr        %08i   \n", statsCounter->InFCSErr));
+    MSG_PRINT(("Collisions      %08i    ", statsCounter->Collisions));
+    MSG_PRINT(("Late            %08i   \n", statsCounter->Late));
+}
+
+
+/*
+ * This sample is for 88E6093, 88E6095, 88E6185, and 88E6065
+*/
+GT_STATUS sampleGetRMONCounter3(GT_QD_DEV *dev)
+{
+    GT_STATUS status;
+    GT_LPORT port;
+    GT_STATS_COUNTER_SET3 statsCounterSet;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        MSG_PRINT(("Port %i :\n",port));
+
+        if((status = gstatsGetPortAllCounters3(dev,port,&statsCounterSet)) != GT_OK)
+        {
+            MSG_PRINT(("gstatsGetPortAllCounters3 returned fail (%#x).\n",status));
+            return status;
+        }
+
+        sampleDisplayCounter3(&statsCounterSet);
+
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/readme.txt
new file mode 100644
index 000000000000..a1edb65e94b2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/readme.txt
@@ -0,0 +1,12 @@
+========================================================================
+		CPU Trailer Mode Enable or Disable
+========================================================================
+
+This sample shows how to enable/disable CPU port's ingress trailer mode
+and engress trailer mode.
+For more information about trailer mode, please refer to 88E6052 Spec.
+section 3.5.5 and section 3.7.3
+
+trailer.c
+	sampleCPUTrailerEnable can be used to enable or disable CPU port's
+	trailer mode
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/trailer.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/trailer.c
new file mode 100644
index 000000000000..fd90411fe238
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/Trailer/trailer.c
@@ -0,0 +1,50 @@
+#include <Copyright.h>
+/********************************************************************************
+* trailer.c
+*
+* DESCRIPTION:
+*        This sample shows how to enable/disable CPU port's ingress and egress
+*        Trailer mode.
+*
+* DEPENDENCIES:   NONE.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include "msSample.h"
+
+GT_STATUS sampleCPUTrailerEnable(GT_QD_DEV *dev, GT_BOOL en)
+{
+    GT_STATUS status;
+    GT_INGRESS_MODE    inMode;
+
+    if (en)    /* Enable Trailer Mode */
+    {
+        inMode = GT_TRAILER_INGRESS;
+    }
+    else
+    {
+        inMode = GT_UNMODIFY_INGRESS;
+    }
+
+    /*
+     *    Enable CPU's Ingress Trailer
+    */
+    if((status = gprtSetIngressMode(dev,dev->cpuPortNum, inMode)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetIngressMode return Failed\n"));
+        return status;
+    }
+
+    /*
+     *    Enable CPU's Egress Trailer
+    */
+    if((status = gprtSetTrailerMode(dev,dev->cpuPortNum, en)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetTrailerMode return Failed\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/hgVlan.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/hgVlan.c
new file mode 100644
index 000000000000..befec66ae6f0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/hgVlan.c
@@ -0,0 +1,135 @@
+#include <Copyright.h>
+/********************************************************************************
+* hgVlan.c
+*
+* DESCRIPTION:
+*       Setup the VLAN table of QuaterDeck so that it can be used as a Home
+*        Gateway.
+*
+*
+* DEPENDENCIES:   None.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+
+static GT_STATUS sampleHomeGatewayVlan(GT_QD_DEV *dev,
+                               GT_LPORT numOfPorts,
+                       GT_LPORT cpuPort);
+
+/*
+ *  Get the required parameter from QuarterDeck driver.
+ *    Notes: This routine should be called after QuarterDeck Driver has been initialized.
+ *        (Refer to Initialization Sample)
+*/
+
+GT_STATUS sampleVlanSetup(GT_QD_DEV *dev)
+{
+    sampleHomeGatewayVlan(dev,dev->numOfPorts, dev->cpuPortNum);
+
+    return GT_OK;
+}
+
+
+/*
+ *    WAN Port (Port 0) and CPU Port (Port 5) are in VLAN 1 and
+ *    all ports (including CPU Port) except WAN Port are in VLAN 2.
+ *    1) Set PVID for each port. (CPU port has PVID 2, which is the same as LAN)
+ *    2) Set Port Based VLAN Map for each port. (CPU port's VLAN Map is set for all LAN ports)
+ *  Notes:
+ *        1) Trailer Mode
+ *            When Ethernet Device, which is directly connected to CPU port, sends out a packet
+ *            to WAN, DPV in Trailer Tag should have WAN port bit set (bit 0 in this case), and
+ *            to LAN, Trailer Tag should be set to 0.
+ *            Restriction : Only one group of VLAN can have multiple ports.
+ *        2) Header Mode
+ *            When Ethernet Device, which is directly connected to CPU port, sends out a packet
+ *            to WAN, VlanTable in Header Tag should have WAN ports bits set (bit 0 in this case), and
+ *            to LAN, VlanTable in Header Tag should have LAN ports bits set (bit 1~4 and 6 in this case)
+*/
+static GT_STATUS sampleHomeGatewayVlan(GT_QD_DEV *dev,GT_LPORT numOfPorts, GT_LPORT cpuPort)
+{
+    GT_STATUS status;
+    GT_LPORT index,port,portToSet;
+    GT_LPORT portList[MAX_SWITCH_PORTS];
+
+    /*
+     *  set PVID for each port.
+     *    the first port(port 0, WAN) has default VID 2 and all others has 1.
+     */
+
+    if((status = gvlnSetPortVid(dev,0,2)) != GT_OK)
+    {
+        MSG_PRINT(("gprtSetPortVid returned fail.\n"));
+        return status;
+    }
+
+    for (port=1; port<numOfPorts; port++)
+    {
+        if((status = gvlnSetPortVid(dev,port,1)) != GT_OK)
+        {
+            MSG_PRINT(("gprtSetPortVid returned fail.\n"));
+            return status;
+        }
+    }
+
+    /*
+     *  set Port VLAN Mapping.
+     *    port 0 (WAN) and cpu port are in a vlan 2.
+     *    And all the rest ports (LAN) and cpu port are in a vlan 1.
+     */
+
+    /* port 0 : set cpuPort only */
+    portList[0] = cpuPort;
+    if((status = gvlnSetPortVlanPorts(dev,0,portList,1)) != GT_OK)
+    {
+        MSG_PRINT(("gvlnSetPortVlanPorts returned fail.\n"));
+        return status;
+    }
+
+    /* set all ports except port 0 and itself */
+    for (portToSet=1; portToSet<numOfPorts; portToSet++)
+    {
+        /* port 0 and cpuPort will be taken cared seperately. */
+        if (portToSet == cpuPort)
+        {
+            continue;
+        }
+
+        index = 0;
+        for (port=1; port<numOfPorts; port++)
+        {
+            if (port == portToSet)
+            {
+                continue;
+            }
+            portList[index++] = port;
+        }
+
+        if((status = gvlnSetPortVlanPorts(dev,(GT_U8)portToSet,portList,index)) != GT_OK)
+        {
+            MSG_PRINT(("gvlnSetPortVlanPorts returned fail.\n"));
+            return status;
+        }
+    }
+
+    /* cpuPort : set all port except cpuPort and WAN port */
+    index = 0;
+    for (port=1; port<numOfPorts; port++)
+    {
+        if (port == cpuPort)
+        {
+            continue;
+        }
+        portList[index++] = port;
+    }
+
+    if((status = gvlnSetPortVlanPorts(dev,cpuPort,portList,index)) != GT_OK)
+    {
+        MSG_PRINT(("gvlnSetPortVlanPorts returned fail.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/readme.txt
new file mode 100644
index 000000000000..717729e3d603
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/VlanSetup/readme.txt
@@ -0,0 +1,31 @@
+========================================================================
+		VLAN Setup for Home Gateway Solution
+========================================================================
+
+Vlan Setup Program will show how to setup the QuaterDeck's vlan
+for Home Gateway.
+In the sample program, Port 0 (WAN port) and CPU Port (Port 5) are in
+a VLAN 2, and Port 1 ~ Port 6 (including CPU Port) are in a VLAN 1.
+
+VLAN MAP setting for the given sample program is:
+Port 0 (WAN) = 0x20,
+Port 1 (LAN) = 0x7C,
+Port 2 (LAN) = 0x7A,
+Port 3 (LAN) = 0x76,
+Port 4 (LAN) = 0x6E,
+Port 5 (CPU) = 0x5E, and
+Port 6 (LAN) = 0x3E
+
+Notes:
+	1) Trailer Mode is enabled:
+		When Ethernet Device, which is directly connected to CPU port, sends out a packet
+		to WAN, DPV in Trailer Tag should have WAN port bit set (bit 0 in this case), and
+		to LAN, Trailer Tag should be set to 0.
+		Restriction : Only one group of VLANs can have multiple ports.
+	2) Header Mode is enabled:
+		When Ethernet Device, which is directly connected to CPU port, sends out a packet
+		to WAN, VlanTable in Header Tag should have WAN ports bits set (bit 0 in this case), and
+		to LAN, VlanTable in Header Tag should have LAN ports bits set (bit 1~4 and 6 in this case)
+
+hgVlan.c
+	sampleHGVlanSetup is the main function for the Home Gateway setup.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/makefile b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/makefile
new file mode 100644
index 000000000000..a463ef3305f5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/makefile
@@ -0,0 +1,53 @@
+# makefile - build main object file
+#
+# modification history
+# --------------------
+# 04-15-02,mj	created
+#
+#######################################################################
+
+DEMONAME = sample
+WORK_TO_DO = $(DEMONAME).o
+
+exe : $(WORK_TO_DO)
+
+# Include common variable definitions
+include $(DSDT_TOOL_DIR)\make.defs
+
+.PHONY : exe
+
+QDLIB = $(LIB_DIR)/$(DSDT_PROJ_NAME).o
+
+OBJDIRS	= $(subst /,\,$(dir $(QDLIB)))
+
+CSOURCES = FlowControl/flowCtrl.c Initialization/osSem.c Initialization/ev96122mii.c \
+           Initialization/qdSim.c Initialization/msApiInit.c  \
+	   MACAddress/macAddr.c QoSSetup/qos.c VlanSetup/hgVlan.c Interrupt/qdInt.c \
+		   Trailer/trailer.c RMON/rmon.c 802.1Q/802_1q.c Header/header.c \
+		   CableTest/cableTest.c CableTest/advCableTest.c PortMonitor/portMonitor.c MultiDevice/msApiInit.c \
+			CrossChipTrunk/crossChipTrunk.c MinimizeCPUTraffic\minimizeCPUTraffic.c	\
+			PktGen/phyPktGenSample.c PIRL/pirl.c PIRL/pirl2.c ptp/ptp.c
+
+EXTRA_INCLUDE = -I./Include -I./Initialization
+COBJECTS  	= $(CSOURCES:.c=.o)
+AOBJECTS  	= $(ASOURCES:.s=.o)
+ifeq ($(OBJECTS),)
+OBJECTS  	= $(COBJECTS) $(AOBJECTS)
+endif
+
+OBJECTS_MS= $(subst /,\,$(OBJECTS))
+
+$(DEMONAME).o : $(OBJECTS) $(QDLIB)
+	$(LD) $(LDFLAGS) -Map $(DEMONAME).map -o $(DEMONAME).o $(OBJECTS) $(QDLIB)
+
+$(OBJECTS) : %.o : %.c
+.c.o :
+	$(CC) $(CFLAGS) $(EXTRA_DEFINE) $(EXTRA_INCLUDE) $(ADDED_CFLAGS) -c $< -o $@
+
+.PHONY : clean
+clean :
+	@for %x in ($(OBJECTS_MS)) do \
+		$(RM) %x
+	$(RM) $(DEMONAME).*
+
+#end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/readme.txt b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/readme.txt
new file mode 100644
index 000000000000..983f28cf0296
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/readme.txt
@@ -0,0 +1,7 @@
+==================================================================
+                TCAM (Ternary Content Addressable Memory) Setup
+==================================================================
+
+88E3520 device family Switch Devices support TCAM.
+
+This sample shows how to use TCAM APIs.
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/tcam.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/tcam.c
new file mode 100644
index 000000000000..996d61595b8d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/sample/tcam/tcam.c
@@ -0,0 +1,408 @@
+#include <Copyright.h>
+/********************************************************************************
+* tcam.c
+*
+* DESCRIPTION:
+*      iHow to use TCAM API functions
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+#include "msSample.h"
+#include "gtHwCntl.h"
+
+static void testDisplayStatus(GT_STATUS status)
+{
+    switch(status)
+    {
+        case GT_OK :
+            MSG_PRINT(("Passed.\n"));
+            break;
+        case GT_FAIL :
+            MSG_PRINT(("Failed.\n"));
+            break;
+        case GT_BAD_PARAM :
+            MSG_PRINT(("Bad Parameter.\n"));
+            break;
+        case GT_NOT_SUPPORTED :
+            MSG_PRINT(("Not Supported.\n"));
+            break;
+        case GT_NOT_FOUND :
+            MSG_PRINT(("Not Found.\n"));
+            break;
+        case GT_NO_MORE :
+            MSG_PRINT(("No more Item.\n"));
+            break;
+        case GT_NO_SUCH :
+            MSG_PRINT(("No Such Item.\n"));
+            break;
+        default:
+            MSG_PRINT(("Failed.\n"));
+            break;
+    }
+}
+
+/* sample TCAM */
+void getTcamFrameHd(GT_U8 destAddr[], GT_U8 srcAddr[], GT_U16 *tag,
+        GT_U16 *pri, GT_U16 *vid, GT_U16 *ethType, GT_TCAM_FRM_HD *tcamHrData)
+{
+  MSG_PRINT(("Get TCAM Frame header.\n"));
+
+  memcpy(destAddr, tcamHrData->paraFrmHd.destAddr, 6);
+  memcpy(srcAddr, tcamHrData->paraFrmHd.srcAddr, 6);
+  *tag = tcamHrData->paraFrmHd.tag;
+  *pri = (tcamHrData->paraFrmHd.priVid>>12)&0xf;
+  *vid = tcamHrData->paraFrmHd.priVid&0xfff;
+  *ethType = tcamHrData->paraFrmHd.ethType;
+}
+
+void setTcamFrameHd(GT_U8 destAddr[], GT_U8 srcAddr[], GT_U16 tag,
+GT_U16 pri, GT_U16 vid, GT_U16 ethType, GT_TCAM_FRM_HD *tcamHrData)
+{
+  MSG_PRINT(("Set TCAM Frame header.\n"));
+
+  memcpy(tcamHrData->paraFrmHd.destAddr, destAddr, 6);
+  memcpy(tcamHrData->paraFrmHd.srcAddr, srcAddr, 6);
+  tcamHrData->paraFrmHd.tag = tag;
+  tcamHrData->paraFrmHd.priVid = (pri<<12)|(vid&0xfff);
+  tcamHrData->paraFrmHd.ethType = ethType;
+}
+
+static void displayTcamFrameHd(GT_TCAM_FRM_HD *tcamHrData)
+{
+  MSG_PRINT(("TCAM Frame header.\n"));
+
+  MSG_PRINT(("Dest address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+  tcamHrData->paraFrmHd.destAddr[0], tcamHrData->paraFrmHd.destAddr[1],
+  tcamHrData->paraFrmHd.destAddr[2], tcamHrData->paraFrmHd.destAddr[3],
+  tcamHrData->paraFrmHd.destAddr[4], tcamHrData->paraFrmHd.destAddr[5]
+  ));
+  MSG_PRINT(("Src address: %02x:%02x:%02x:%02x:%02x:%02x\n",
+  tcamHrData->paraFrmHd.srcAddr[0], tcamHrData->paraFrmHd.srcAddr[1],
+  tcamHrData->paraFrmHd.srcAddr[2], tcamHrData->paraFrmHd.srcAddr[3],
+  tcamHrData->paraFrmHd.srcAddr[4], tcamHrData->paraFrmHd.srcAddr[5]
+  ));
+  MSG_PRINT(("Tag: %x\n", tcamHrData->paraFrmHd.tag));
+  MSG_PRINT(("PRI: %x\n", (tcamHrData->paraFrmHd.priVid&0xf000)>>12));
+  MSG_PRINT(("VID: %x\n", tcamHrData->paraFrmHd.priVid&0xfff));
+  MSG_PRINT(("ether type: %x\n", tcamHrData->paraFrmHd.ethType));
+
+}
+
+/* show=0: no display.
+   show=1: display basic parameters.
+   show=2: display basic and data parameters.
+   show=4: display frame raw data.
+   show=0xf: display all parameters. */
+void displayTcamData(GT_TCAM_DATA *tcamData, int show)
+{
+  int i;
+  if(!show)
+    return;
+  MSG_PRINT(("TCAM data.\n"));
+
+#if 1
+  if(show&4)
+  {
+    MSG_PRINT(("\nFirst part of TCAM Frame\n"));
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[0].frame0.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[0].frame0.frame[i]));
+    }
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[0].frame1.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[0].frame1.frame[i]));
+    }
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[0].frame2.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[0].frame2.frame[i]));
+    }
+    MSG_PRINT(("\nSecond part of TCAM Frame\n"));
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[1].frame0.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[1].frame0.frame[i]));
+    }
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[1].frame1.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[1].frame1.frame[i]));
+    }
+    for(i=0; i<28; i++)
+    {
+      if(i%14==0)
+      MSG_PRINT(("\nframeOctet[%02d]: %04x ", i, tcamData->rawFrmData[1].frame2.frame[i]));
+      else
+      MSG_PRINT(("%04x ", tcamData->rawFrmData[1].frame2.frame[i]));
+    }
+  }
+#endif
+
+  if(show&3)
+  {
+  MSG_PRINT(("frameType: %x frameTypeMask: %x\n", tcamData->frameType, tcamData->frameTypeMask));
+  MSG_PRINT(("spv: %x spvMask: %x\n", tcamData->spv, tcamData->spvMask));
+  MSG_PRINT(("ppri: %x ppriMask: %x\n", tcamData->ppri, tcamData->ppriMask));
+  MSG_PRINT(("pvid: %x pvidMask: %x\n", tcamData->pvid, tcamData->pvidMask));
+
+  displayTcamFrameHd((GT_TCAM_FRM_HD *)tcamData->frameOctet);
+  }
+#if 1
+  if(show&2)
+  for(i=0; i<96; i++)
+    MSG_PRINT(("frameOctet[%d]: %x frameOctetMask[%d]: %x\n",  \
+             i, tcamData->frameOctet[i], i, tcamData->frameOctetMask[i]));
+#endif
+
+  if(show&3)
+  {
+  MSG_PRINT(("continu: %x\n", tcamData->continu));
+  MSG_PRINT(("interrupt: %x\n", tcamData->interrupt));
+  MSG_PRINT(("IncTcamCtr: %x\n", tcamData->IncTcamCtr));
+  MSG_PRINT(("vidData: %x\n", tcamData->vidData));
+  MSG_PRINT(("nextId: %x\n", tcamData->nextId));
+  MSG_PRINT(("qpriData: %x\n", tcamData->qpriData));
+  MSG_PRINT(("fpriData: %x\n", tcamData->fpriData));
+  MSG_PRINT(("qpriAvbData: %x\n", tcamData->qpriAvbData));
+  MSG_PRINT(("dpvData: %x\n", tcamData->dpvData));
+  MSG_PRINT(("factionData: %x\n", tcamData->factionData));
+  MSG_PRINT(("ldBalanceData: %x\n", tcamData->ldBalanceData));
+  MSG_PRINT(("debugPort: %x\n", tcamData->debugPort));
+  MSG_PRINT(("highHit: %x\n", tcamData->highHit));
+  MSG_PRINT(("lowHit: %x\n", tcamData->lowHit));
+  }
+}
+
+
+GT_U32 sampleTcam(GT_QD_DEV *dev)
+{
+  GT_STATUS status;
+  GT_U32 testResults = 0;
+  int i, j;
+
+  GT_TCAM_DATA     tcamData;
+  GT_U32        tcamPointer;
+#define NumberOfEntry 10
+#define  Is96Frame  1
+
+  MSG_PRINT(("TCAM API test \n"));
+
+  MSG_PRINT(("\n  TCAM API Flush all test \n"));
+  status = GT_OK;
+  if((status = gtcamFlushAll(dev)) != GT_OK)
+  {
+    MSG_PRINT(("gtcamFlushAll returned "));
+    testDisplayStatus(status);
+    return status;
+  }
+#if 1
+  for(i=0; i<NumberOfEntry; i+=2)
+  {
+    tcamPointer = i;
+    tcamData.is96Frame = Is96Frame;
+
+    MSG_PRINT(("gtcamReadTCAMData tcam entry: %d \n", (int)tcamPointer));
+    if((status = gtcamReadTCAMData(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamReadTCAMData returned \n"));
+        testDisplayStatus(status);
+        return status;
+    }
+    displayTcamData(&tcamData, 4);
+  }
+#endif
+#if 0
+  MSG_PRINT(("\n  TCAM API Purge and read test \n"));
+//    displayTcamData(&tcamData, 1);
+//  for(i=0; i<NumberOfEntry; i++)
+  for(i=0; i<3; i++)
+  {
+    tcamPointer = i;
+//    memset((char *)&tcamData, 0x55+i, sizeof(GT_TCAM_DATA));
+    tcamData.is96Frame = Is96Frame;
+    MSG_PRINT(("gtcamPurgyEntry entry: %d \n", (int)tcamPointer));
+
+    if((status = gtcamPurgyEntry(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamPurgyEntry returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+
+    MSG_PRINT(("gtcamReadTCAMData tcam entry: %d \n", (int)tcamPointer));
+    if((status = gtcamReadTCAMData(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamReadTCAMData returned \n"));
+        testDisplayStatus(status);
+        return status;
+    }
+    displayTcamData(&tcamData, 1);
+  }
+#endif
+#if 1
+    MSG_PRINT(("TCAM API Load and read test \n"));
+    /* fill Tcam data */
+  for(i=4; i<NumberOfEntry+4; i+=2)
+  {
+    tcamData.frameType = 0x5;
+    tcamData.frameTypeMask = 0x5;
+    tcamData.spv = 0x7;
+    tcamData.spvMask = 0x7;
+    tcamData.ppri = 0x9;
+    tcamData.ppriMask = 0x9;
+    tcamData. pvid = 0xb;
+    tcamData.pvidMask = 0xb;
+
+    for(j=0; j<96; j++)
+    {
+      tcamData.frameOctet[j] = 10+j;
+      tcamData.frameOctetMask[j] = 80+j;
+    }
+
+#if 1
+    {
+      GT_U8 destAddr[6] = {0x11, 0x22, 0x33, 0x44, 0x55, 0x66};
+      GT_U8 srcAddr[6] = {0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd};
+      GT_U16 tag = 0x3456;
+      GT_U16 pri = 0xa;
+      GT_U16 vid = 0x123;
+      GT_U16 ethType = 0x88f7;
+      GT_TCAM_FRM_HD *tcamHrData = (GT_TCAM_FRM_HD *)tcamData.frameOctet;
+
+      setTcamFrameHd(destAddr, srcAddr, tag, pri, vid, ethType, tcamHrData);
+    }
+#endif
+    tcamData.continu = 0x51;
+    tcamData.interrupt = 0x52;
+    tcamData.IncTcamCtr = 0x53;
+    tcamData.vidData = 0x55;
+    tcamData.nextId = 0x56;
+    tcamData.qpriData = 0x58;
+    tcamData.qpriAvbData = 0x5b;
+    tcamData.dpvData = 0x5d;
+    tcamData.factionData = 0x5f;
+    tcamData.ldBalanceData = 0x61;
+    tcamData.debugPort = 0x62;
+    tcamData.highHit = 0x63;
+    tcamData.lowHit = 0x64;
+
+    tcamPointer = i;
+    MSG_PRINT(("TCAM API Load test for entry %d\n", (int)tcamPointer));
+    if((status = gtcamLoadEntry(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamLoadEntry returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+  }
+  for(i=0; i<NumberOfEntry; i+=2)
+  {
+    tcamPointer = i;
+    MSG_PRINT(("TCAM API read test for entry %d\n", (int)tcamPointer));
+    if((status = gtcamReadTCAMData(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamReadTCAMData returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+    displayTcamData(&tcamData, 1);
+  }
+#endif
+
+#if 0
+    MSG_PRINT(("TCAM API Flush single and read test on entry %d \n", (int)tcamPointer));
+    if((status = gtcamFlushEntry(dev, tcamPointer)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamFlushEntry returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+    if((status = gtcamReadTCAMData(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamReadTCAMData returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+    displayTcamData(&tcamData, 7);
+
+#endif
+
+
+
+#if 0
+  MSG_PRINT(("\n  TCAM API Purge and read then next \n"));
+//    displayTcamData(&tcamData, 1);
+//  for(i=0; i<NumberOfEntry; i++)
+  for(i=0; i<3; i++)
+  {
+    tcamPointer = i;
+//    memset((char *)&tcamData, 0x55+i, sizeof(GT_TCAM_DATA));
+    tcamData.is96Frame = Is96Frame;
+    MSG_PRINT(("gtcamPurgyEntry entry: %d \n", (int)tcamPointer));
+
+    if((status = gtcamPurgyEntry(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamPurgyEntry returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+
+    MSG_PRINT(("gtcamReadTCAMData tcam entry: %d \n", (int)tcamPointer));
+    if((status = gtcamReadTCAMData(dev, tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamReadTCAMData returned \n"));
+        testDisplayStatus(status);
+        return status;
+    }
+    displayTcamData(&tcamData, 1);
+  }
+#endif
+
+#if 1
+    tcamPointer = 0;
+    MSG_PRINT(("TCAM API Get next test start entry %d \n", (int)tcamPointer));
+
+    if((status = gtcamGetNextTCAMData(dev, &tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamGetNextTCAMData returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+
+    MSG_PRINT(("TCAM API Get next test next entry %d \n", (int)tcamPointer));
+    displayTcamData(&tcamData, 3);
+
+    tcamPointer = 4;
+    MSG_PRINT(("TCAM API Get next test start entry %d \n", (int)tcamPointer));
+
+    if((status = gtcamGetNextTCAMData(dev, &tcamPointer, &tcamData)) != GT_OK)
+    {
+        MSG_PRINT(("gtcamGetNextTCAMData returned "));
+        testDisplayStatus(status);
+        return status;
+    }
+
+    MSG_PRINT(("TCAM API Get next test next entry %d \n", (int)tcamPointer));
+    displayTcamData(&tcamData, 3);
+
+#endif
+    MSG_PRINT(("Tcam API test done "));
+    testDisplayStatus(status);
+
+    return testResults;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvConfig.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvConfig.c
new file mode 100644
index 000000000000..d5a9e308cfb4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvConfig.c
@@ -0,0 +1,963 @@
+#include <Copyright.h>
+/********************************************************************************
+* gtDrvConfig.h
+*
+* DESCRIPTION:
+*       Includes driver level configuration and initialization function.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 8 $
+*
+*******************************************************************************/
+
+#include <gtDrvSwRegs.h>
+#include <gtDrvConfig.h>
+#include <gtMiiSmiIf.h>
+#include <gtHwCntl.h>
+#include <gtVct.h>
+#include <msApiDefs.h>
+
+
+/*******************************************************************************
+* lport2port
+*
+* DESCRIPTION:
+*       This function converts logical port number to physical port number
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U8 lport2port
+(
+    IN GT_U16    portVec,
+    IN GT_LPORT     port
+)
+{
+    GT_U8    hwPort, tmpPort;
+
+    tmpPort = hwPort = 0;
+
+    while (portVec)
+    {
+        if(portVec & 0x1)
+        {
+            if((GT_LPORT)tmpPort == port)
+                break;
+            tmpPort++;
+        }
+        hwPort++;
+        portVec >>= 1;
+    }
+
+    if (!portVec)
+        hwPort = GT_INVALID_PORT;
+
+    return hwPort;
+}
+
+/*******************************************************************************
+* port2lport
+*
+* DESCRIPTION:
+*       This function converts physical port number to logical port number
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_LPORT port2lport
+(
+    IN GT_U16    portVec,
+    IN GT_U8     hwPort
+)
+{
+    GT_U8        tmpPort,port;
+
+    port = 0;
+
+    if (hwPort == GT_INVALID_PORT)
+        return (GT_LPORT)hwPort;
+
+    if (!GT_IS_PORT_SET(portVec, hwPort))
+        return (GT_LPORT)GT_INVALID_PORT;
+
+    for (tmpPort = 0; tmpPort <= hwPort; tmpPort++)
+    {
+        if(portVec & 0x1)
+        {
+            port++;
+        }
+        portVec >>= 1;
+    }
+
+    return (GT_LPORT)port-1;
+}
+
+/*******************************************************************************
+* lportvec2portvec
+*
+* DESCRIPTION:
+*       This function converts logical port vector to physical port vector
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        lVec     - logical port vector
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port vector
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U32 lportvec2portvec
+(
+    IN GT_U16    portVec,
+    IN GT_U32     lVec
+)
+{
+    GT_U32    pVec, vec;
+
+    pVec = 0;
+    vec = 1;
+
+    while (portVec)
+    {
+        if(portVec & 0x1)
+        {
+            if(lVec & 0x1)
+            {
+                pVec |= vec;
+            }
+            lVec >>= 1;
+        }
+
+        vec <<= 1;
+        portVec >>= 1;
+    }
+
+    if(lVec)
+        return GT_INVALID_PORT_VEC;
+
+    return pVec;
+}
+
+/*******************************************************************************
+* portvec2lportvec
+*
+* DESCRIPTION:
+*       This function converts physical port vector to logical port vector
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        pVec     - physical port vector
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       logical port vector
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U32 portvec2lportvec
+(
+    IN GT_U16    portVec,
+    IN GT_U32     pVec
+)
+{
+    GT_U32    lVec, vec;
+
+    lVec = 0;
+    vec = 1;
+
+    while (portVec)
+    {
+        if(portVec & 0x1)
+        {
+            if(pVec & 0x1)
+            {
+                lVec |= vec;
+            }
+            vec <<= 1;
+        }
+
+        pVec >>= 1;
+        portVec >>= 1;
+    }
+
+    return lVec;
+}
+
+/*******************************************************************************
+* lport2phy
+*
+* DESCRIPTION:
+*       This function converts logical port number to physical phy number.
+*
+* INPUTS:
+*        portVec - physical port list in vector
+*        port    - logical port number
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       physical port number
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_U8 lport2phy
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+)
+{
+    GT_U8    hwPort;
+
+    /* check if it's for SERDES */
+    if(dev->validSerdesVec & (1<<port))
+    {
+        hwPort = (GT_U8)port;
+    }
+    else
+    {
+        hwPort = GT_LPORT_2_PORT(port);
+    }
+
+    return hwPort;
+}
+
+
+/*******************************************************************************
+* driverConfig
+*
+* DESCRIPTION:
+*       This function initializes the driver level of the quarterDeck software.
+*
+* INPUTS:
+*        None.
+* OUTPUTS:
+*        None.
+* RETURNS:
+*       GT_OK               - on success, or
+*       GT_OUT_OF_CPU_MEM   - if failed to allocate CPU memory,
+*       GT_FAIL             - otherwise.
+*
+* COMMENTS:
+*       1.  This function should perform the following:
+*           -   Initialize the global switch configuration structure.
+*           -   Initialize Mii Interface
+*
+*******************************************************************************/
+GT_STATUS driverConfig
+(
+    IN GT_QD_DEV    *dev
+)
+{
+    GT_U16          deviceId;
+    GT_BOOL         highSmiDevAddr;
+
+
+    if(dev->accessMode == SMI_AUTO_SCAN_MODE)
+    {
+        /* Initialize the MII / SMI interface, search for the device */
+        if((deviceId = miiSmiIfInit(dev,&highSmiDevAddr)) == 0)
+        {
+            return GT_FAIL;
+        }
+
+        dev->baseRegAddr = (highSmiDevAddr)?0x10:0;
+    }
+    else
+    {
+        if((deviceId = miiSmiManualIfInit(dev,(GT_U32)dev->baseRegAddr)) == 0)
+        {
+            return GT_FAIL;
+        }
+    }
+
+    /* Init the device's config struct.             */
+    dev->deviceId       = deviceId >> 4;
+    dev->revision       = (GT_U8)deviceId & 0xF;
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* driverEnable
+*
+* DESCRIPTION:
+*       This function enables the switch for full operation, after the driver
+*       Config function was called.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverEnable
+(
+    IN GT_QD_DEV    *dev
+)
+{
+    GT_UNUSED_PARAM(dev);
+    return GT_OK;
+}
+
+
+GT_STATUS driverGetSerdesPort(GT_QD_DEV *dev, GT_U8* hwPort)
+{
+    switch(dev->deviceId)
+    {
+        case GT_88E6122:
+            if ((*hwPort<3) || (*hwPort>6))
+            {
+                *hwPort = GT_INVALID_PORT;
+            }
+            else
+            {
+                *hwPort += 9;
+            }
+            break;
+        case GT_88E6131:
+        case GT_88E6108:
+            if ((*hwPort<4) || (*hwPort>7))
+            {
+                *hwPort = GT_INVALID_PORT;
+            }
+            else
+            {
+                *hwPort += 8;
+            }
+            break;
+        case GT_88E6123:
+        case GT_88E6140:
+        case GT_88E6161:
+        case GT_88E6165:
+        case GT_88E6172:
+        case GT_88E6176:
+        case GT_88E6240:
+        case GT_88E6352:
+
+            if ((*hwPort<4) || (*hwPort>5))
+            {
+                *hwPort = GT_INVALID_PORT;
+            }
+            else
+            {
+              *hwPort += 8;
+              if(!(IS_IN_DEV_GROUP(dev,DEV_88E6165_FAMILY)))
+                *hwPort = 0xF;
+            }
+            break;
+        default:
+            *hwPort = GT_INVALID_PORT;
+            break;
+    }
+    return GT_OK;
+}
+
+/*******************************************************************************
+* driverFindPhyID
+*
+* DESCRIPTION:
+*       This function get Phy ID from Phy register 2 and 3.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*       phyId    - Phy ID
+*
+* RETURNS:
+*       GT_OK     - if found Marvell Phy,
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static GT_STATUS driverFindPhyID
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    OUT    GT_U32         *phyID
+)
+{
+    GT_U16 ouiMsb, ouiLsb;
+    GT_STATUS status;
+
+    if((status= hwReadPhyReg(dev,hwPort,2,&ouiMsb)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Register.\n"));
+        return status;
+    }
+
+    if((status= hwReadPhyReg(dev,hwPort,3,&ouiLsb)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Register.\n"));
+        return status;
+    }
+
+    if(ouiMsb != MARVELL_OUI_MSb)
+        return GT_FAIL;
+
+    *phyID = (GT_U32)ouiLsb;
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* driverIsPhyAttached
+*
+* DESCRIPTION:
+*       This function verifies Marvell Phy.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       1 - if Marvell Phy exists
+*        0 - otherwise
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U32 driverIsPhyAttached
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort
+)
+{
+    GT_U32         phyId;
+
+    if(hwPort > dev->maxPhyNum)
+        return 0;
+
+    if(driverFindPhyID(dev,hwPort,&phyId) != GT_OK)
+    {
+        DBG_INFO(("cannot find Marvell Phy.\n"));
+        return 0;
+    }
+
+    return 1;
+}
+
+/*******************************************************************************
+* driverGetPhyID
+*
+* DESCRIPTION:
+*       This function reads and returns Phy ID (register 3) of Marvell Phy.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       phyId - if Marvell Phy exists
+*        GT_INVALID_PORT      - otherwise
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U32 driverGetPhyID
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort
+)
+{
+    GT_U32         phyId;
+
+    if(hwPort > dev->maxPhyNum)
+        return GT_INVALID_PHY;
+
+    if(driverFindPhyID(dev,hwPort,&phyId) != GT_OK)
+    {
+        DBG_INFO(("cannot find Marvell Phy.\n"));
+        return GT_INVALID_PHY;
+    }
+
+    return phyId;
+}
+
+
+/*******************************************************************************
+* driverPagedAccessStart
+*
+* DESCRIPTION:
+*       This function stores page register and Auto Reg Selection mode if needed.
+*
+* INPUTS:
+*       hwPort     - port number where the Phy is connected
+*        pageType - type of the page registers
+*
+* OUTPUTS:
+*       autoOn    - GT_TRUE if Auto Reg Selection enabled, GT_FALSE otherwise.
+*        pageReg - Page Register Data
+*
+* RETURNS:
+*       GT_OK     - if success
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverPagedAccessStart
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    IN    GT_U8         pageType,
+    OUT    GT_BOOL         *autoOn,
+    OUT    GT_U16         *pageReg
+)
+{
+    GT_U16 data;
+    GT_STATUS status;
+
+    switch(pageType)
+    {
+        case GT_PHY_PAGE_WRITE_BACK:
+            break;
+        case GT_PHY_PAGE_DIS_AUTO1:    /* 88E1111 Type */
+            if((status= hwGetPhyRegField(dev,hwPort,27,9,1,&data)) != GT_OK)
+            {
+                DBG_INFO(("Not able to read Phy Register.\n"));
+                return status;
+            }
+
+            data ^= 0x1;    /* toggle bit 0 */
+            BIT_2_BOOL(data, *autoOn);
+
+            if (*autoOn) /* Auto On */
+            {
+                if((status= hwSetPhyRegField(dev,hwPort,27,9,1,data)) != GT_OK)
+                {
+                    DBG_INFO(("Not able to write Phy Register.\n"));
+                    return status;
+                }
+            }
+
+            break;
+        case GT_PHY_PAGE_DIS_AUTO2:    /* 88E1112 Type */
+            if((status= hwGetPhyRegField(dev,hwPort,22,15,1,&data)) != GT_OK)
+            {
+                DBG_INFO(("Not able to read Phy Register.\n"));
+                return status;
+            }
+
+            BIT_2_BOOL(data, *autoOn);
+            data ^= 0x1;    /* toggle bit 0 */
+
+            if (*autoOn) /* Auto On */
+            {
+                if((status= hwSetPhyRegField(dev,hwPort,22,15,1,data)) != GT_OK)
+                {
+                    DBG_INFO(("Not able to write Phy Register.\n"));
+                    return status;
+                }
+            }
+
+            break;
+
+        case GT_PHY_NO_PAGE:
+        default:
+            /* Nothing to do */
+            return GT_OK;
+    }
+
+
+    if((status= hwGetPhyRegField(dev,hwPort,22,0,8,pageReg)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Register.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* driverPagedAccessStop
+*
+* DESCRIPTION:
+*       This function restores page register and Auto Reg Selection mode if needed.
+*
+* INPUTS:
+*       hwPort     - port number where the Phy is connected
+*        pageType - type of the page registers
+*       autoOn     - GT_TRUE if Auto Reg Selection enabled, GT_FALSE otherwise.
+*        pageReg  - Page Register Data
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK     - if success
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverPagedAccessStop
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    IN    GT_U8         pageType,
+    IN    GT_BOOL         autoOn,
+    IN    GT_U16         pageReg
+)
+{
+    GT_U16 data;
+    GT_STATUS status;
+
+    switch(pageType)
+    {
+        case GT_PHY_PAGE_WRITE_BACK:
+            break;
+        case GT_PHY_PAGE_DIS_AUTO1:    /* 88E1111 Type */
+            if (autoOn) /* Auto On */
+            {
+                data = 0;
+                if((status= hwSetPhyRegField(dev,hwPort,27,9,1,data)) != GT_OK)
+                {
+                    DBG_INFO(("Not able to write Phy Register.\n"));
+                    return status;
+                }
+            }
+
+            break;
+        case GT_PHY_PAGE_DIS_AUTO2:    /* 88E1112 Type */
+            if (autoOn) /* Auto On */
+            {
+                data = 1;
+                if((status= hwSetPhyRegField(dev,hwPort,22,15,1,data)) != GT_OK)
+                {
+                    DBG_INFO(("Not able to write Phy Register.\n"));
+                    return status;
+                }
+            }
+
+            break;
+
+        case GT_PHY_NO_PAGE:
+        default:
+            /* Nothing to do */
+            return GT_OK;
+    }
+
+
+    if((status= hwSetPhyRegField(dev,hwPort,22,0,8,pageReg)) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Register.\n"));
+        return status;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* driverFindPhyInformation
+*
+* DESCRIPTION:
+*       This function gets information of Phy connected to the given port.
+*        PhyInfo structure should have valid Phy ID.
+*
+* INPUTS:
+*       hwPort    - port number where the Phy is connected
+*
+* OUTPUTS:
+*       phyId    - Phy ID
+*
+* RETURNS:
+*       GT_OK     - if found Marvell Phy,
+*       GT_FAIL - othrwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS driverFindPhyInformation
+(
+    IN  GT_QD_DEV    *dev,
+    IN    GT_U8         hwPort,
+    OUT    GT_PHY_INFO     *phyInfo
+)
+{
+    GT_U32 phyId;
+    GT_U16 data;
+
+    phyId = phyInfo->phyId;
+
+    switch (phyId & PHY_MODEL_MASK)
+    {
+        case DEV_E3082:
+        case DEV_MELODY:
+                phyInfo->anyPage = 0xFFFFFFFF;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_COPPER;
+                phyInfo->vctType = GT_PHY_VCT_TYPE1;
+                phyInfo->exStatusType = 0;
+                if ((phyId & PHY_REV_MASK) < 9)
+                    phyInfo->dteType = GT_PHY_DTE_TYPE1;    /* need workaround */
+                else
+                    phyInfo->dteType = GT_PHY_DTE_TYPE5;
+
+                phyInfo->pktGenType = 0;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_NO_PAGE;
+                break;
+
+        case DEV_E104X:
+                phyInfo->anyPage = 0xFFFFFFFF;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_GIGABIT|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_EXTERNAL_LOOP;
+
+                phyInfo->dteType = 0;
+                if ((phyId & PHY_REV_MASK) < 3)
+                    phyInfo->flag &= ~GT_PHY_VCT_CAPABLE; /* VCT is not supported */
+                else if ((phyId & PHY_REV_MASK) == 3)
+                    phyInfo->vctType = GT_PHY_VCT_TYPE3;    /* Need workaround */
+                else
+                    phyInfo->vctType = GT_PHY_VCT_TYPE2;
+                phyInfo->exStatusType = 0;
+
+                phyInfo->pktGenType = 0;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_NO_PAGE;
+
+                break;
+
+        case DEV_E1111:
+                phyInfo->anyPage = 0xFFF1FE0C;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_GIGABIT|GT_PHY_RESTRICTED_PAGE;
+
+                phyInfo->vctType = GT_PHY_VCT_TYPE2;
+                phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE1;
+                if ((phyId & PHY_REV_MASK) < 2)
+                    phyInfo->dteType = GT_PHY_DTE_TYPE3;    /* Need workaround */
+                else
+                    phyInfo->dteType = GT_PHY_DTE_TYPE2;
+
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE1;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_DIS_AUTO1;
+                break;
+
+        case DEV_E1112:
+                phyInfo->anyPage = 0x1BC0780C;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_GIGABIT|GT_PHY_RESTRICTED_PAGE|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_PKT_GENERATOR;
+
+                phyInfo->vctType = GT_PHY_VCT_TYPE4;
+                phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE2;
+                phyInfo->dteType = GT_PHY_DTE_TYPE4;
+
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE2;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_DIS_AUTO2;
+                break;
+
+        case DEV_E114X:
+                phyInfo->anyPage = 0x2FF1FE0C;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_GIGABIT|GT_PHY_RESTRICTED_PAGE;
+
+                phyInfo->vctType = GT_PHY_VCT_TYPE2;
+                phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE1;
+                if ((phyId & PHY_REV_MASK) < 4)
+                    phyInfo->dteType = GT_PHY_DTE_TYPE3;    /* Need workaround */
+                else
+                    phyInfo->dteType = GT_PHY_DTE_TYPE2;
+
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE1;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_DIS_AUTO1;
+
+                break;
+
+        case DEV_E1149:
+                phyInfo->anyPage = 0x2040FFFF;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_GIGABIT|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_PKT_GENERATOR;
+                phyInfo->vctType = GT_PHY_VCT_TYPE4;
+                phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE3;
+                phyInfo->dteType = GT_PHY_DTE_TYPE4;
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE2;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_WRITE_BACK;
+                break;
+
+        case DEV_G15LV:
+                if (dev->devName1 &DEV_88E6108)
+                {
+                    phyInfo->anyPage = 0x0000FFFF;
+                    phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                    GT_PHY_EX_CABLE_STATUS|
+                                    GT_PHY_GIGABIT|
+                                    GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                    GT_PHY_PKT_GENERATOR;
+                    phyInfo->vctType = GT_PHY_VCT_TYPE4;
+                    phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE3;
+                    phyInfo->dteType = GT_PHY_DTE_TYPE4;
+                    phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE2;
+                    phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                    phyInfo->lineLoopType = 0;
+                    phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                    phyInfo->pageType = GT_PHY_PAGE_WRITE_BACK;
+                }
+                else /* 88E6165 family */
+                {
+                    phyInfo->anyPage = 0x0000FFFF;
+                    phyInfo->flag = GT_PHY_ADV_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                    GT_PHY_EX_CABLE_STATUS|
+                                    GT_PHY_GIGABIT|
+                                    GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                    GT_PHY_PKT_GENERATOR;
+                    phyInfo->vctType = GT_PHY_ADV_VCT_TYPE2;
+                    phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE6;
+                    phyInfo->dteType = GT_PHY_DTE_TYPE4;
+                    phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE2;
+                    phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                    phyInfo->lineLoopType = 0;
+                    phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                    phyInfo->pageType = GT_PHY_PAGE_WRITE_BACK;
+                }
+                break;
+
+        case DEV_EC010:
+                phyInfo->anyPage = 0x2040780C;
+                phyInfo->flag = GT_PHY_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_GIGABIT|GT_PHY_RESTRICTED_PAGE|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP;
+                phyInfo->vctType = GT_PHY_VCT_TYPE2;
+                phyInfo->exStatusType = 0;
+                phyInfo->dteType = GT_PHY_DTE_TYPE3;    /* Need workaround */
+                phyInfo->pktGenType = 0;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_WRITE_BACK;
+                break;
+
+        case DEV_S15LV:
+                phyInfo->anyPage = 0xFFFFFFFF;
+                phyInfo->flag = GT_PHY_SERDES_CORE|GT_PHY_GIGABIT|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_PKT_GENERATOR;
+                phyInfo->vctType = 0;
+                phyInfo->exStatusType = 0;
+                phyInfo->dteType = 0;
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE3;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE3;
+                phyInfo->lineLoopType = GT_PHY_LINE_LB_TYPE4;
+                phyInfo->exLoopType = 0;
+                phyInfo->pageType = GT_PHY_NO_PAGE;
+                break;
+
+        case DEV_G65G:
+        case DEV_E1540:
+                phyInfo->anyPage = 0x0000FFFF;
+                phyInfo->flag = GT_PHY_ADV_VCT_CAPABLE|GT_PHY_DTE_CAPABLE|
+                                GT_PHY_EX_CABLE_STATUS|
+                                GT_PHY_GIGABIT|
+                                GT_PHY_MAC_IF_LOOP|GT_PHY_LINE_LOOP|GT_PHY_EXTERNAL_LOOP|
+                                GT_PHY_PKT_GENERATOR;
+                phyInfo->vctType = GT_PHY_ADV_VCT_TYPE2;
+                phyInfo->exStatusType = GT_PHY_EX_STATUS_TYPE6;
+                phyInfo->dteType = GT_PHY_DTE_TYPE4;
+                phyInfo->pktGenType = GT_PHY_PKTGEN_TYPE2;
+                phyInfo->macIfLoopType = GT_PHY_LOOPBACK_TYPE1;
+                phyInfo->lineLoopType = 0;
+                phyInfo->exLoopType = GT_PHY_EX_LB_TYPE0;
+                phyInfo->pageType = GT_PHY_PAGE_WRITE_BACK;
+                break;
+        default:
+            return GT_FAIL;
+    }
+
+    if (phyInfo->flag & GT_PHY_GIGABIT)
+    {
+        if(hwGetPhyRegField(dev,hwPort,15,12,4,&data) != GT_OK)
+        {
+            DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,15));
+               return GT_FAIL;
+        }
+
+        if(data & QD_GIGPHY_1000X_CAP)
+            phyInfo->flag |= GT_PHY_FIBER;
+
+        if(data & QD_GIGPHY_1000T_CAP)
+        {
+            phyInfo->flag |= GT_PHY_COPPER;
+        }
+        else
+        {
+            phyInfo->flag &= ~(GT_PHY_VCT_CAPABLE|GT_PHY_EX_CABLE_STATUS|GT_PHY_DTE_CAPABLE|GT_PHY_ADV_VCT_CAPABLE);
+        }
+    }
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvEvents.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvEvents.c
new file mode 100644
index 000000000000..762c55fb2c89
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtDrvEvents.c
@@ -0,0 +1,94 @@
+#include <Copyright.h>
+/********************************************************************************
+* gtDrvEvents.c
+*
+* DESCRIPTION:
+*       This file includes function declarations for QuarterDeck interrupts
+*       configuration and handling.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*
+*******************************************************************************/
+
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtDrvEvents.h>
+
+/*******************************************************************************
+* drvEventsInit
+*
+* DESCRIPTION:
+*       This function initializes the driver's interrupt handling mechanism.
+*
+* INPUTS:
+*       intVecNum   - The interrupt vector the switch is connected to.
+*       isrFunc     - A pointer to the Interrupt Service Routine to be
+*                     connected to the given interrupt vector.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success,
+*       GT_FAIL - otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS drvEventsInit
+(
+    IN  GT_QD_DEV       *dev,
+    IN GT_U32           intVecNum,
+    IN GT_VOIDFUNCPTR   isrFunc
+)
+{
+    GT_UNUSED_PARAM(dev);
+    GT_UNUSED_PARAM(intVecNum);
+    GT_UNUSED_PARAM(isrFunc);
+#if 0
+    return osInterruptConnect(intVecNum,isrFunc,0);
+#endif
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* eventQdSr
+*
+* DESCRIPTION:
+*       QuarterDeck interrupt service routine.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       None.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL eventQdSr
+(
+    IN  GT_QD_DEV* dev,
+    OUT GT_U16* intCause
+)
+{
+    GT_STATUS       retVal;         /* Function calls return value.     */
+
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,0,4,intCause);
+
+    if(retVal != GT_OK)
+        return GT_FALSE;
+
+    return (*intCause)?GT_TRUE:GT_FALSE;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtHwCntl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtHwCntl.c
new file mode 100644
index 000000000000..18db3ebd47a4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/gtHwCntl.c
@@ -0,0 +1,2931 @@
+#include <Copyright.h>
+/********************************************************************************
+* gtHwCntl.c
+*
+* DESCRIPTION:
+*       Functions declarations for Hw accessing quarterDeck phy, internal and
+*       global registers.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*
+*******************************************************************************/
+
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtMiiSmiIf.h>
+#include <gtSem.h>
+
+
+static GT_STATUS hwReadPPU(GT_QD_DEV *dev, GT_U16 *data);
+static GT_STATUS hwWritePPU(GT_QD_DEV *dev, GT_U16 data);
+static GT_STATUS coreReadPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+);
+static GT_STATUS coreWritePhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+);
+static GT_STATUS coreReadPagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+);
+static GT_STATUS coreWritePagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+);
+
+GT_STATUS phyRegReadPPUEn (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr,
+                        GT_U16* value);
+GT_STATUS phyRegWritePPUEn (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr,
+                       GT_U16 value);
+#ifndef GT_RMGMT_ACCESS
+static GT_STATUS phyReadGlobal2Reg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+);
+static GT_STATUS phyWriteGlobal2Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+);
+#endif
+
+
+/*******************************************************************************
+* portToSmiMapping
+*
+* DESCRIPTION:
+*       This function mapps port to smi address
+*
+* INPUTS:
+*        dev - device context
+*       portNum - Port number to read the register for.
+*        accessType - type of register (Phy, Port, or Global)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       smiAddr    - smi address.
+*
+*******************************************************************************/
+GT_U8 portToSmiMapping
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8    portNum,
+    IN GT_U32    accessType
+)
+{
+    GT_U8 smiAddr;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_8PORT_SWITCH))
+    {
+        switch(accessType)
+        {
+            case PHY_ACCESS:
+              if (dev->validPhyVec & (1<<portNum))
+                smiAddr = PHY_REGS_START_ADDR_8PORT + portNum;
+              else
+			  {
+                smiAddr = 0xFF;
+                if(IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE))
+				{
+                  if (dev->validSerdesVec & (1<<portNum))
+                    smiAddr = PHY_REGS_START_ADDR_8PORT + portNum;
+				}
+			  }
+              break;
+            case PORT_ACCESS:
+                    if (dev->validPortVec & (1<<portNum))
+                        smiAddr = PORT_REGS_START_ADDR_8PORT + portNum;
+                    else
+                        smiAddr = 0xFF;
+                    break;
+            case GLOBAL_REG_ACCESS:
+                    smiAddr = GLOBAL_REGS_START_ADDR_8PORT;
+                    break;
+            case GLOBAL3_REG_ACCESS:
+                    smiAddr = GLOBAL_REGS_START_ADDR_8PORT + 2;
+                    break;
+            default:
+                    smiAddr = GLOBAL_REGS_START_ADDR_8PORT + 1;
+                    break;
+        }
+    }
+    else
+    {
+        smiAddr = dev->baseRegAddr;
+        switch(accessType)
+        {
+            case PHY_ACCESS:
+              if (dev->validPhyVec & (1<<portNum))
+                smiAddr += PHY_REGS_START_ADDR + portNum;
+              else
+			  {
+                smiAddr = 0xFF;
+                if(IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE))
+				{
+                  if (dev->validSerdesVec & (1<<portNum))
+                    smiAddr += PHY_REGS_START_ADDR + portNum;
+				}
+			  }
+              break;
+            case PORT_ACCESS:
+                    if (dev->validPortVec & (1<<portNum))
+                        smiAddr += PORT_REGS_START_ADDR + portNum;
+                    else
+                        smiAddr = 0xFF;
+                    break;
+            case GLOBAL_REG_ACCESS:
+                    smiAddr += GLOBAL_REGS_START_ADDR;
+                    break;
+            default:
+                    /*  88EC0XX uses PORT_REGS_START_ADDR -1 */
+                    if(IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+                      smiAddr += PORT_REGS_START_ADDR - 1;
+                    else
+                      smiAddr += GLOBAL_REGS_START_ADDR - 1;
+                    break;
+        }
+    }
+    return smiAddr;
+}
+
+
+/****************************************************************************/
+/* Phy registers related functions.                                         */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadPhyReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port phy register.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreReadPhyReg(dev, portNum, regAddr, data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWritePhyReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port phy register.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreWritePhyReg(dev, portNum, regAddr, data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetPhyRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port phy register.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreReadPhyReg(dev, portNum, regAddr, &tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if (retVal != GT_OK)
+        return retVal;
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+
+    DBG_INFO(("Read from phy(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwSetPhyRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port phy register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreReadPhyReg(dev, portNum, regAddr, &tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+
+    retVal = coreWritePhyReg(dev, portNum, regAddr, tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwReadPagedPhyReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port phy register in page mode.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       pageNum - Page number of the register to be read.
+*       regAddr - The register's address.
+*        anyPage - Any Page register vector
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreReadPagedPhyReg(dev,portNum,pageNum,regAddr,anyPage,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWritePagedPhyReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port phy register in page mode.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       pageNum - Page number of the register to be written.
+*       regAddr - The register's address.
+*        anyPage - Any Page register vector
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreWritePagedPhyReg(dev,portNum,pageNum,regAddr,anyPage,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetPagedPhyRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port phy register
+*        in page mode.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       pageNum     - Page number of the register to be read.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*        anyPage     - Any Page register vector
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPagedPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = coreReadPagedPhyReg(dev,portNum,pageNum,regAddr,anyPage,&tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+
+    DBG_INFO(("Read from phy(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* hwSetPagedPhyRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port phy register
+*        in page mode
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       pageNum     - Page number of the register to be read.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*        anyPage     - Any Page register vector
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPagedPhyRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    if((retVal=coreReadPagedPhyReg(dev,portNum,pageNum,regAddr,anyPage,&tmpData)) != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+    retVal = coreWritePagedPhyReg(dev,portNum,pageNum,regAddr,anyPage,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwPhyReset
+*
+* DESCRIPTION:
+*       This function performs softreset and waits until reset completion.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       u16Data     - data should be written into Phy control register.
+*                      if this value is 0xFF, normal operation occcurs (read,
+*                      update, and write back.)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS hwPhyReset
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U8        portNum,
+    IN    GT_U16        u16Data
+)
+{
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U32 retryCount;
+    GT_BOOL    pd = GT_FALSE;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    if((retVal=coreReadPhyReg(dev,portNum,0,&tmpData))
+           != GT_OK)
+    {
+           DBG_INFO(("Reading Register failed\n"));
+        gtSemGive(dev,dev->multiAddrSem);
+           return retVal;
+    }
+
+    if (tmpData & 0x800)
+    {
+        pd = GT_TRUE;
+    }
+
+    if (u16Data != 0xFF)
+    {
+        tmpData = u16Data;
+    }
+
+    /* Set the desired bits to 0. */
+    if (pd)
+    {
+        tmpData |= 0x800;
+    }
+    else
+    {
+        tmpData |= 0x8000;
+    }
+
+    if((retVal=coreWritePhyReg(dev,portNum,0,tmpData))
+        != GT_OK)
+    {
+        DBG_INFO(("Writing to register failed\n"));
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    if (pd)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return GT_OK;
+    }
+
+    for (retryCount = 0x1000; retryCount > 0; retryCount--)
+    {
+        if((retVal=coreReadPhyReg(dev,portNum,0,&tmpData)) != GT_OK)
+        {
+            DBG_INFO(("Reading register failed\n"));
+            gtSemGive(dev,dev->multiAddrSem);
+            return retVal;
+        }
+        if ((tmpData & 0x8000) == 0)
+            break;
+    }
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if (retryCount == 0)
+    {
+        DBG_INFO(("Reset bit is not cleared\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/****************************************************************************/
+/* Per port registers related functions.                                    */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadPortReg
+*
+* DESCRIPTION:
+*       This function reads a switch's port register.
+*
+* INPUTS:
+*       portNum - Port number to read the register for.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadPortReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PORT_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("Read from port(%d) register: phyAddr 0x%x, regAddr 0x%x, ",
+              portNum,phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",*data));
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWritePortReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's port register.
+*
+* INPUTS:
+*       portNum - Port number to write the register for.
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWritePortReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8   phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PORT_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    DBG_INFO(("Write to port(%d) register: phyAddr 0x%x, regAddr 0x%x, ",
+              portNum,phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetPortRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to read the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetPortRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PORT_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if (retVal != GT_OK)
+        return retVal;
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+    DBG_INFO(("Read from port(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* hwSetPortRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetPortRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PORT_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+    DBG_INFO(("Write to port(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwSetPortRegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's port register.
+*
+* INPUTS:
+*       portNum     - Port number to write the register for.
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetPortRegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+)
+{
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PORT_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= (data & mask);
+    DBG_INFO(("Write to port(%d) register: regAddr 0x%x, ",
+              portNum,regAddr));
+    DBG_INFO(("mask %d, data 0x%x.\n",mask,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+
+/****************************************************************************/
+/* Global registers related functions.                                      */
+/****************************************************************************/
+
+/*******************************************************************************
+* hwReadGlobalReg
+*
+* DESCRIPTION:
+*       This function reads a switch's global register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobalReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("read from global register: phyAddr 0x%x, regAddr 0x%x, ",
+              phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",*data));
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWriteGlobalReg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobalReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8   phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+
+    DBG_INFO(("Write to global register: phyAddr 0x%x, regAddr 0x%x, ",
+              phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetGlobalRegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobalRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+    DBG_INFO(("Read from global register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* hwSetGlobalRegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobalRegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+
+    DBG_INFO(("Write to global register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* hwReadGlobal2Reg
+*
+* DESCRIPTION:
+*       This function reads a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobal2Reg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("read from global 2 register: phyAddr 0x%x, regAddr 0x%x, ", phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",*data));
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWriteGlobal2Reg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobal2Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8   phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    DBG_INFO(("Write to global 2 register: phyAddr 0x%x, regAddr 0x%x, ", phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetGlobal2RegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobal2RegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if(retVal != GT_OK)
+        return retVal;
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+    DBG_INFO(("Read from global 2 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* hwSetGlobal2RegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal2RegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+
+    DBG_INFO(("Write to global 2 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* hwSetGlobal2RegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's global 2 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal2RegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+)
+{
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= (data & mask);
+
+    DBG_INFO(("Write to global 2 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("mask %d, data 0x%x.\n",mask,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwReadGlobal3Reg
+*
+* DESCRIPTION:
+*       This function reads a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadGlobal3Reg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("read from global 3 register: phyAddr 0x%x, regAddr 0x%x, ", phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",*data));
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWriteGlobal3Reg
+*
+* DESCRIPTION:
+*       This function writes to a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*       data    - The data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteGlobal3Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8   phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    DBG_INFO(("Write to global 3 register: phyAddr 0x%x, regAddr 0x%x, ", phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwGetGlobal3RegField
+*
+* DESCRIPTION:
+*       This function reads a specified field from a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to read.
+*
+* OUTPUTS:
+*       data        - The read register field.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwGetGlobal3RegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    OUT GT_U16   *data
+)
+{
+    GT_U16 mask;            /* Bits mask to be read */
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    if(retVal != GT_OK)
+        return retVal;
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+    tmpData = (tmpData & mask) >> fieldOffset;
+    *data = tmpData;
+    DBG_INFO(("Read from global 3 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fOff %d, fLen %d, data 0x%x.\n",fieldOffset,fieldLength,*data));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* hwSetGlobal3RegField
+*
+* DESCRIPTION:
+*       This function writes to specified field in a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       fieldOffset - The field start bit index. (0 - 15)
+*       fieldLength - Number of bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  The sum of fieldOffset & fieldLength parameters must be smaller-
+*           equal to 16.
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal3RegField
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U8    fieldOffset,
+    IN  GT_U8    fieldLength,
+    IN  GT_U16   data
+)
+{
+    GT_U16 mask;
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    CALC_MASK(fieldOffset,fieldLength,mask);
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= ((data << fieldOffset) & mask);
+
+    DBG_INFO(("Write to global 3 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("fieldOff %d, fieldLen %d, data 0x%x.\n",fieldOffset,
+              fieldLength,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* hwSetGlobal3RegBits
+*
+* DESCRIPTION:
+*       This function writes to specified bits in a switch's global 3 register.
+*
+* INPUTS:
+*       regAddr     - The register's address.
+*       mask         - The bits to write.
+*       data        - Data to be written.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  When Data is 0x1002 and mask is 0xF00F, 0001b is written to bit[31:24]
+*            and 0010b is written to bit[3:0]
+*
+*******************************************************************************/
+GT_STATUS hwSetGlobal3RegBits
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   mask,
+    IN  GT_U16   data
+)
+{
+    GT_U16 tmpData;
+    GT_STATUS   retVal;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->multiAddrSem);
+        return retVal;
+    }
+
+    /* Set the desired bits to 0.                       */
+    tmpData &= ~mask;
+    /* Set the given data into the above reset bits.    */
+    tmpData |= (data & mask);
+
+    DBG_INFO(("Write to global 3 register: regAddr 0x%x, ",
+              regAddr));
+    DBG_INFO(("mask %d, data 0x%x.\n",mask,data));
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,tmpData);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    return retVal;
+}
+
+
+/*********************************************************************************************/
+
+/*******************************************************************************
+* hwReadMiiReg
+*
+* DESCRIPTION:
+*       This function reads a switch register.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwReadMiiReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     phyAddr,
+    IN  GT_U8     regAddr,
+    OUT GT_U16    *data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+     gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("Read from phy(0x%x) register: regAddr 0x%x, data 0x%x.\n",
+              phyAddr,regAddr,*data));
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwWriteMiiReg
+*
+* DESCRIPTION:
+*       This function writes a switch register.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwWriteMiiReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    phyAddr,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_STATUS   retVal;
+
+    gtSemTake(dev,dev->multiAddrSem,OS_WAIT_FOREVER);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    gtSemGive(dev,dev->multiAddrSem);
+
+    DBG_INFO(("Write to phy(0x%x) register: regAddr 0x%x, data 0x%x.\n",
+              phyAddr,regAddr,data));
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* hwReadPPU
+*
+* DESCRIPTION:
+*            This function reads PPU bit in Global Register
+*
+* INPUTS:
+*            None.
+*
+* OUTPUTS:
+*            data    - The read register's data.
+*
+* RETURNS:
+*            GT_OK on success, or
+*            GT_FAIL otherwise.
+*
+* COMMENTS:
+*            This function can be used to access PHY register connected to Gigabit
+*            Switch.
+*            Semaphore should be acquired before this function get called.
+*
+*******************************************************************************/
+static GT_STATUS hwReadPPU
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U16    *data
+)
+{
+    GT_STATUS   retVal;
+    GT_U16        tmpData;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,4,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *data = (tmpData >> 14) & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* hwWritePPU
+*
+* DESCRIPTION:
+*            This function writes PPU bit in Global Register
+*
+* INPUTS:
+*            data - The value to write into PPU bit
+*
+* OUTPUTS:
+*            None.
+*
+* RETURNS:
+*            GT_OK on success, or
+*            GT_FAIL otherwise.
+*
+* COMMENTS:
+*            This function can be used to access PHY register connected to Gigabit
+*            Switch.
+*            Semaphore should be acquired before this function get called.
+*
+*******************************************************************************/
+static GT_STATUS hwWritePPU
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U16    data
+)
+{
+    GT_STATUS   retVal;
+    GT_U16        tmpData;
+    GT_U8       phyAddr;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    retVal =  miiSmiIfReadRegister(dev,phyAddr,4,&tmpData);
+
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    if (data)
+        tmpData |= (0x1 << 14);
+    else
+        tmpData &= ~(0x1 << 14);
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,4,tmpData);
+
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* busy wait - till PPU is actually disabled */
+    if (data == 0) /* disable PPU */
+    {
+        gtDelay(250);
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+static GT_STATUS coreReadPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal, retPPU = 0;
+    GT_U16        orgPPU;
+    GT_BOOL        usePPU = GT_FALSE;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PHY_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    orgPPU = 0;
+
+    if(IS_IN_DEV_GROUP(dev, DEV_PPU_READ_ONLY))
+    {
+        if((IS_IN_DEV_GROUP(dev,DEV_PPU_SERDES_ACCESS_RES)) && (dev->validSerdesVec & (1<<phyAddr)))
+        {
+            if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+
+            if(orgPPU)
+            {
+                /* Disable PPU so that External Phy can be accessible */
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+        else
+        usePPU = GT_TRUE;
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_EXTERNAL_PHY))
+    {
+        if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+        {
+            return retPPU;
+        }
+
+        if(orgPPU)
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS))
+            {
+                if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS_RES))
+                {
+                    if(dev->revision != 0)
+                        usePPU = GT_TRUE;
+                }
+                else
+                {
+                     usePPU = GT_TRUE;
+                }
+            }
+
+            /* Disable PPU so that External Phy can be accessible */
+            if (!usePPU)
+            {
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_PHY_ACCESS_NO_DIRECTLY))
+    {
+      usePPU = GT_TRUE;
+    }
+
+    if (usePPU)
+    {
+        retVal = phyRegReadPPUEn (dev,phyAddr,regAddr,data);
+    }
+    else
+    {
+        retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+    }
+
+    DBG_INFO(("Read from phy(%d) register: phyAddr 0x%x, regAddr 0x%x, ", portNum,phyAddr,regAddr));
+
+    if(orgPPU && (!usePPU))
+    {
+      if((retPPU=hwWritePPU(dev, orgPPU)) != GT_OK)
+      {
+        return retPPU;
+      }
+    }
+
+    return retVal;
+}
+
+
+static GT_STATUS coreWritePhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8           phyAddr;
+    GT_STATUS   retVal, retPPU;
+    GT_U16        orgPPU = 0;
+    GT_BOOL        usePPU = GT_FALSE;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PHY_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        if((IS_IN_DEV_GROUP(dev,DEV_PPU_SERDES_ACCESS_RES)) && (dev->validSerdesVec & (1<<phyAddr)))
+        {
+            if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+
+            if(orgPPU)
+            {
+                /* Disable PPU so that External Phy can be accessible */
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                        return retPPU;
+                }
+            }
+        }
+        else
+          usePPU = GT_TRUE;
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_EXTERNAL_PHY))
+    {
+        if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+        {
+            return retPPU;
+        }
+
+        if(orgPPU)
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS))
+            {
+                if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS_RES))
+                {
+                    if(dev->revision != 0)
+                        usePPU = GT_TRUE;
+                }
+                else
+                {
+                     usePPU = GT_TRUE;
+                }
+            }
+
+            /* Disable PPU so that External Phy can be accessible */
+            if (!usePPU)
+            {
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_PHY_ACCESS_NO_DIRECTLY))
+    {
+      usePPU = GT_TRUE;
+    }
+
+    DBG_INFO(("Write to phy(%d) register: phyAddr 0x%x, regAddr 0x%x, ",
+                portNum,phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    if (usePPU)
+    {
+        retVal = phyRegWritePPUEn (dev,phyAddr,regAddr,data);
+    }
+    else
+    {
+        retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+    }
+
+        if(orgPPU && (!usePPU))
+        {
+            if((retPPU=hwWritePPU(dev, orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+        }
+
+    return retVal;
+}
+
+
+static GT_STATUS coreReadPagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr,pageAddr;
+    GT_STATUS   retVal, retPPU;
+    GT_U16        orgPPU, tmpData, orgPage;
+    GT_BOOL        usePPU = GT_FALSE;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PHY_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    orgPPU = 0;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        if((IS_IN_DEV_GROUP(dev,DEV_PPU_SERDES_ACCESS_RES)) && (dev->validSerdesVec & (1<<phyAddr)))
+        {
+            if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+
+            if(orgPPU)
+            {
+                /* Disable PPU so that External Phy can be accessible */
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+        else
+             usePPU = GT_TRUE;
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_EXTERNAL_PHY))
+    {
+        if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+        {
+            return retPPU;
+        }
+
+        if(orgPPU)
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS))
+            {
+                if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS_RES))
+                {
+                    if(dev->revision != 0)
+                        usePPU = GT_TRUE;
+                }
+                else
+                {
+                     usePPU = GT_TRUE;
+                }
+            }
+
+            /* Disable PPU so that External Phy can be accessible */
+            if (!usePPU)
+            {
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+    }
+
+    if(anyPage & (1 << regAddr))
+    {
+        if (usePPU)
+        {
+            retVal = phyRegReadPPUEn (dev,phyAddr,regAddr,data);
+        }
+        else
+        {
+            retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+        }
+        DBG_INFO(("Read from phy(%d) register: smiAddr 0x%x, pageNum 0x%x, regAddr 0x%x\n",
+                    portNum,phyAddr,pageNum,regAddr));
+    }
+    else
+    {
+        pageAddr = GT_GET_PAGE_ADDR(regAddr);
+
+        if (usePPU)
+        {
+            retVal = phyRegReadPPUEn (dev,phyAddr,pageAddr,&orgPage);
+        }
+        else
+        {
+            retVal = miiSmiIfReadRegister(dev,phyAddr,pageAddr,&orgPage);
+        }
+
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("Reading page register failed\n"));
+            return retVal;
+        }
+
+        if(pageAddr == 22)
+            tmpData = orgPage & 0xFF00;
+        else
+            tmpData = orgPage & 0xFFC0;
+        tmpData |= pageNum;
+
+        if (usePPU)
+        {
+            if((retVal = phyRegWritePPUEn(dev,phyAddr,pageAddr,tmpData)) == GT_OK)
+            {
+                retVal = phyRegReadPPUEn (dev,phyAddr,regAddr,data);
+
+                DBG_INFO(("Read from phy(%d) register: smiAddr 0x%x, pageNum 0x%x, regAddr 0x%x\n",
+                            portNum,phyAddr,pageNum,regAddr));
+            }
+        }
+        else
+        {
+            if((retVal = miiSmiIfWriteRegister(dev,phyAddr,pageAddr,tmpData)) == GT_OK)
+            {
+                retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+                DBG_INFO(("Read from phy(%d) register: smiAddr 0x%x, pageNum 0x%x, regAddr 0x%x\n",
+                            portNum,phyAddr,pageNum,regAddr));
+            }
+        }
+    }
+
+        if(orgPPU && (!usePPU))
+        {
+            if((retPPU=hwWritePPU(dev, orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+        }
+
+    return retVal;
+
+}
+
+
+static GT_STATUS coreWritePagedPhyReg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    portNum,
+    IN  GT_U8    pageNum,
+    IN  GT_U8    regAddr,
+    IN  GT_U32     anyPage,
+    IN  GT_U16   data
+)
+{
+    GT_U8           phyAddr,pageAddr;
+    GT_STATUS   retVal, retPPU;
+    GT_U16        orgPPU, tmpData, orgPage;
+    GT_BOOL        usePPU = GT_FALSE;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, portNum, PHY_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    orgPPU = 0;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        if((IS_IN_DEV_GROUP(dev,DEV_PPU_SERDES_ACCESS_RES)) && (dev->validSerdesVec & (1<<phyAddr)))
+        {
+            if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+
+            if(orgPPU)
+            {
+                /* Disable PPU so that External Phy can be accessible */
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+        else
+             usePPU = GT_TRUE;
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_EXTERNAL_PHY))
+    {
+        if((retPPU=hwReadPPU(dev, &orgPPU)) != GT_OK)
+        {
+            return retPPU;
+        }
+
+        if(orgPPU)
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS))
+            {
+                if(IS_IN_DEV_GROUP(dev,DEV_PPU_PHY_ACCESS_RES))
+                {
+                    if(dev->revision != 0)
+                        usePPU = GT_TRUE;
+                }
+                else
+                {
+                     usePPU = GT_TRUE;
+                }
+            }
+
+            /* Disable PPU so that External Phy can be accessible */
+            if (!usePPU)
+            {
+                if((retPPU=hwWritePPU(dev, 0)) != GT_OK)
+                {
+                    return retPPU;
+                }
+            }
+        }
+    }
+
+    DBG_INFO(("Write to phy(%d) register: smiAddr 0x%x, pageNum 0x%x, regAddr 0x%x\n",
+                portNum,phyAddr,pageNum,regAddr));
+    DBG_INFO(("data 0x%x.\n",data));
+
+    if(anyPage & (1 << regAddr))
+    {
+        if (usePPU)
+        {
+            retVal = phyRegWritePPUEn (dev,phyAddr,regAddr,data);
+        }
+        else
+        {
+            retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+        }
+    }
+    else
+    {
+        pageAddr = GT_GET_PAGE_ADDR(regAddr);
+
+        if (usePPU)
+        {
+            retVal = phyRegReadPPUEn (dev,phyAddr,pageAddr,&orgPage);
+        }
+        else
+        {
+            retVal = miiSmiIfReadRegister(dev,phyAddr,pageAddr,&orgPage);
+        }
+
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("Reading page register failed\n"));
+            return retVal;
+        }
+
+        if(pageAddr == 22)
+            tmpData = orgPage & 0xFF00;
+        else
+            tmpData = orgPage & 0xFFC0;
+        tmpData |= pageNum;
+
+        if (usePPU)
+        {
+            if((retVal = phyRegWritePPUEn(dev,phyAddr,pageAddr,tmpData)) == GT_OK)
+            {
+                retVal = phyRegWritePPUEn(dev,phyAddr,regAddr,data);
+            }
+        }
+        else
+        {
+            if((retVal = miiSmiIfWriteRegister(dev,phyAddr,pageAddr,tmpData)) == GT_OK)
+            {
+                retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+            }
+        }
+    }
+
+        if(orgPPU && (!usePPU))
+        {
+            if((retPPU=hwWritePPU(dev, orgPPU)) != GT_OK)
+            {
+                return retPPU;
+            }
+        }
+
+    return retVal;
+}
+
+
+/*****************************************************************************
+* phyRegReadPPUEn
+*
+* DESCRIPTION:
+*       This function reads phy register data when PPU is enabled.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The storage where register date to be saved.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS phyRegReadPPUEn (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr,
+                        unsigned short* value)
+#ifdef GT_RMGMT_ACCESS
+{
+  GT_U16 smiReg;
+  GT_STATUS   retVal;
+
+  HW_DEV_REG_ACCESS regAccess;
+
+  DBG_INFO(("Read Phy register while PPU Enabled\n"));
+
+  regAccess.entries = 4;
+
+  regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[0].reg = QD_REG_SMI_PHY_CMD;
+  regAccess.rw_reg_list[0].data = 15;
+  smiReg =  QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_READ << QD_SMI_OP_BIT) | (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+  regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[1].reg = QD_REG_SMI_PHY_CMD;
+  regAccess.rw_reg_list[1].data = smiReg;
+
+  regAccess.rw_reg_list[2].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[2].reg = QD_REG_SMI_PHY_CMD;
+  regAccess.rw_reg_list[2].data = 15;
+
+  regAccess.rw_reg_list[3].cmd = HW_REG_READ;
+  regAccess.rw_reg_list[3].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[3].reg = QD_REG_SMI_PHY_DATA;
+  regAccess.rw_reg_list[3].data = 0;
+  retVal = hwAccessMultiRegs(dev, &regAccess);
+  if(retVal != GT_OK)
+  {
+    return retVal;
+  }
+  *value = (unsigned short)regAccess.rw_reg_list[3].data;
+
+  return GT_OK;
+}
+#else
+{
+    volatile unsigned int timeOut; /* in 100MS units */
+    volatile int i;
+    GT_U16 smiReg;
+
+    DBG_INFO(("Read Phy register while PPU Enabled\n"));
+
+    /* first check that it is not busy */
+    if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+    {
+        DBG_INFO(("Reading Phy register Failed\n"));
+        return GT_FAIL;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                DBG_INFO(("Reading Phy register Timed Out\n"));
+                return GT_FAIL;
+            }
+            if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+            {
+                DBG_INFO(("Reading Phy register Failed\n"));
+                return GT_FAIL;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+
+    smiReg =  QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_READ << QD_SMI_OP_BIT) |
+            (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+    if(phyWriteGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, smiReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+    if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                DBG_INFO(("Reading Phy register Timed Out\n"));
+                return GT_FALSE;
+            }
+            if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+            {
+                DBG_INFO(("Reading Phy register Failed\n"));
+                return GT_FAIL;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+    if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_DATA, &smiReg) != GT_OK)
+    {
+        DBG_INFO(("Reading Phy register Failed\n"));
+        return GT_FAIL;
+    }
+    *value = (unsigned short)smiReg;
+
+    return GT_OK;
+}
+#endif
+
+/*****************************************************************************
+* phyRegWritePPUEn
+*
+* DESCRIPTION:
+*       This function writes data to a phy register when PPU is enabled.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The data to be written into the register.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+
+GT_STATUS phyRegWritePPUEn (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr,
+                       unsigned short value)
+#ifdef GT_RMGMT_ACCESS
+{
+  GT_U16 smiReg;
+  GT_STATUS   retVal;
+
+  HW_DEV_REG_ACCESS regAccess;
+
+  DBG_INFO(("Write Phy register while PPU Enabled\n"));
+
+  regAccess.entries = 3;
+
+  regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[0].reg = QD_REG_SMI_PHY_CMD;
+  regAccess.rw_reg_list[0].data = 15;
+
+  regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[1].reg = QD_REG_SMI_PHY_DATA;
+  regAccess.rw_reg_list[1].data = value;
+
+  smiReg = QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_WRITE << QD_SMI_OP_BIT) | (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+  regAccess.rw_reg_list[2].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+  regAccess.rw_reg_list[2].reg = QD_REG_SMI_PHY_CMD;
+  regAccess.rw_reg_list[2].data = smiReg;
+
+  retVal = hwAccessMultiRegs(dev, &regAccess);
+  if(retVal != GT_OK)
+  {
+    return retVal;
+  }
+
+  return GT_OK;
+}
+#else
+{
+    volatile unsigned int timeOut; /* in 100MS units */
+    volatile int i;
+    GT_U16 smiReg;
+
+    DBG_INFO(("Writing Phy register while PPU Enabled\n"));
+
+    /* first check that it is not busy */
+    if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+    {
+        DBG_INFO(("Reading Phy register Failed\n"));
+        return GT_FAIL;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                DBG_INFO(("Writing Phy register Timed Out\n"));
+                return GT_FALSE;
+            }
+            if(phyReadGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, &smiReg) != GT_OK)
+            {
+                DBG_INFO(("Writing Phy register Failed\n"));
+                return GT_FAIL;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+
+    if(phyWriteGlobal2Reg(dev,QD_REG_SMI_PHY_DATA, value) != GT_OK)
+    {
+        DBG_INFO(("Writing Phy Data register Failed\n"));
+        return GT_FAIL;
+    }
+    smiReg = QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_WRITE << QD_SMI_OP_BIT) |
+            (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+    if(phyWriteGlobal2Reg(dev,QD_REG_SMI_PHY_CMD, smiReg) != GT_OK)
+    {
+        DBG_INFO(("Writing Phy Command register Failed\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+static GT_STATUS phyReadGlobal2Reg
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    OUT GT_U16   *data
+)
+{
+    GT_U8       phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    retVal = miiSmiIfReadRegister(dev,phyAddr,regAddr,data);
+
+    DBG_INFO(("read from global 2 register: phyAddr 0x%x, regAddr 0x%x, ",
+              phyAddr,regAddr));
+    DBG_INFO(("data 0x%x.\n",*data));
+    return retVal;
+}
+
+
+static GT_STATUS phyWriteGlobal2Reg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8    regAddr,
+    IN  GT_U16   data
+)
+{
+    GT_U8   phyAddr;
+    GT_STATUS   retVal;
+
+    phyAddr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+    if (phyAddr == 0xFF)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    retVal = miiSmiIfWriteRegister(dev,phyAddr,regAddr,data);
+
+    return retVal;
+}
+#endif
+
+#ifdef GT_RMGMT_ACCESS
+/*******************************************************************************
+* hwAccessMultiRegs
+*
+* DESCRIPTION:
+*       This function accesses switch's registers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList
+*
+* RETURNS:
+*       GT_OK on success, or
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS hwAccessMultiRegs
+(
+    IN GT_QD_DEV *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+)
+{
+  GT_STATUS   retVal;
+
+  gtSemTake(dev,dev->hwAccessRegsSem,OS_WAIT_FOREVER);
+
+  retVal = qdAccessRegs(dev, regList);
+
+  gtSemGive(dev,dev->hwAccessRegsSem);
+
+  return retVal;
+}
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/makefile b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/makefile
new file mode 100644
index 000000000000..0bbdad0412b9
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/driver/makefile
@@ -0,0 +1,33 @@
+# Source files in this directory
+TARGET =
+CSOURCES    = gtDrvConfig.c gtDrvEvents.c gtHwCntl.c
+ASOURCES    =
+
+# Include common variable definitions
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.defs
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.defs
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.defs
+endif
+
+# Add in extra stuffs
+EXTRA_INCLUDE    +=
+EXTRA_DEFINE    +=
+ADDED_CFLAGS    +=
+
+# Include common build rules
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.rules
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.rules
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.rules
+endif
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/makefile b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/makefile
new file mode 100644
index 000000000000..6776b017e612
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/makefile
@@ -0,0 +1,106 @@
+# makefile - build main object file
+#
+# modification history
+# --------------------
+# 04-15-02,mj	created
+#
+#######################################################################
+
+TARGET = $(DSDT_PROJ_NAME)
+
+ifeq ($(OS_RUN),VXWORKS)
+WORK_TO_DO = $(DSDT_PROJ_NAME).o
+
+# Include common variable definitions
+include $(DSDT_TOOL_DIR)\make.defs
+endif
+
+ifeq ($(OS_RUN),LINUX)
+WORK_TO_DO = $(DSDT_PROJ_NAME).o
+
+# Include common variable definitions
+include $(DSDT_TOOL_DIR)/makelnx.defs
+endif
+
+exe : $(WORK_TO_DO)
+
+.PHONY : exe
+
+vpath %.o    $(OBJDIR)
+vpath %.lib  $(OBJDIR)
+
+MAINDRV_PATH = $(SRC_BASE_PATH)
+MAD_ROOT = $(SRC_BASE_PATH)/../../phy
+
+SRC_OBJ_DIR = $(MAINDRV_PATH)/driver \
+               $(MAINDRV_PATH)/msapi \
+               $(MAINDRV_PATH)/platform
+
+OBJECTS_LIST = $(MAINDRV_PATH)/driver/$(OBJDIR)/driver.o \
+               $(MAINDRV_PATH)/msapi/$(OBJDIR)/msapi.o \
+               $(MAINDRV_PATH)/platform/$(OBJDIR)/platform.o
+
+OBJECTS = $(OBJECTS_LIST)
+
+OBJDIRS	= $(subst /,\,$(dir $(OBJECTS)))
+LIBDIRS	= $(subst /,\,$(LIB_DIR))
+
+OBJDIRS_/ = $(subst \,/,$(OBJDIRS))
+LIBDIRS_/ = $(subst \,/,$(LIBDIRS))
+
+ifeq ($(OS_RUN),VXWORKS)
+$(DSDT_PROJ_NAME).o : $(OBJECTS) $(MAD_OBJ)
+	@ $(ECHO) '------------'
+	@ $(ECHO) 'Building $@'
+	$(LD) $(LDFLAGS) -Map $(LIB_DIR)/$(DSDT_PROJ_NAME).map -o $(LIB_DIR)/$(DSDT_PROJ_NAME).o $(OBJECTS) $(MAD_OBJ)
+
+$(OBJECTS) : FORCE
+	@($(CD) $(subst /,\,$(dir $(@D)))) && $(MAKE)
+
+$(MAD_OBJ) : FORCE
+	$(MAKE) -C $(MAD_ROOT)/src
+
+endif
+
+ifeq ($(OS_RUN),LINUX)
+$(DSDT_PROJ_NAME).o : $(OBJECTS) $(MAD_OBJ)
+	@ $(ECHO) '------------'
+	@ $(ECHO) 'Building $@'
+	$(LD) $(LDFLAGS) -Map $(LIB_DIR)/$(DSDT_PROJ_NAME).map -o $(LIB_DIR)/$(DSDT_PROJ_NAME).o $(OBJECTS) $(MAD_OBJ)
+
+$(OBJECTS) : FORCE
+	@cd $(dir $(@D)); $(MAKE)
+
+$(MAD_OBJ) : FORCE
+	$(MAKE) -C $(MAD_ROOT)/src
+
+endif
+
+FORCE :
+
+.PHONY : clean
+ifeq ($(OS_RUN),VXWORKS)
+clean :
+	@for %x in ($(OBJDIRS)) do \
+	    @($(CD) %x..) && $(MAKE) clean
+	- ($(CD) $(LIBDIRS)) && $(RM) $(DSDT_PROJ_NAME).o
+	- ($(CD) $(LIBDIRS)) && $(RM) $(DSDT_PROJ_NAME).map
+	@if exist $(MAD_ROOT) $(MAKE) -C $(MAD_ROOT)/src clean
+endif
+
+ifeq ($(OS_RUN),LINUX)
+clean :
+	@for i in $(SRC_OBJ_DIR); do	\
+		cd $$i; $(RM) -r -f *.o *.map makedeps *obj; 	\
+	done
+#		cd $$i; $(RM) -f *.o *.map;
+	@for i in $(OBJDIRS_/); do	\
+		$(RM) -r -f  $$i;	\
+	done
+#		cd $$i..; $(MAKE) clean;
+	@cd $(LIB_DIR); $(RM) *.o
+	@cd $(LIB_DIR); $(RM) *.map
+	$(MAKE) -C $(MAD_ROOT)/src clean
+endif
+
+#end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAVB.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAVB.c
new file mode 100644
index 000000000000..ee3ef334ec1f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAVB.c
@@ -0,0 +1,4018 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtAVB.c
+*
+* DESCRIPTION:
+*       API definitions for Precise Time Protocol logic
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+#ifdef CONFIG_AVB_FPGA
+
+#undef USE_SINGLE_READ
+
+#define AVB_SMI_ADDR        0xC
+
+#define QD_REG_PTP_INT_OFFSET        0
+#define QD_REG_PTP_INTEN_OFFSET        1
+#define QD_REG_PTP_FREQ_OFFSET        4
+#define QD_REG_PTP_PHASE_OFFSET        6
+#define QD_REG_PTP_CLK_CTRL_OFFSET    4
+#define QD_REG_PTP_CYCLE_INTERVAL_OFFSET        5
+#define QD_REG_PTP_CYCLE_ADJ_OFFSET                6
+#define QD_REG_PTP_PLL_CTRL_OFFSET    7
+#define QD_REG_PTP_CLK_SRC_OFFSET    0x9
+#define QD_REG_PTP_P9_MODE_OFFSET    0xA
+#define QD_REG_PTP_RESET_OFFSET        0xB
+
+#define GT_PTP_MERGE_32BIT(_high16,_low16)    (((_high16)<<16)|(_low16))
+#define GT_PTP_GET_HIGH16(_data)    ((_data) >> 16) & 0xFFFF
+#define GT_PTP_GET_LOW16(_data)        (_data) & 0xFFFF
+
+#if 0
+
+#define AVB_FPGA_READ_REG       gprtGetPhyReg
+#define AVB_FPGA_WRITE_REG      gprtSetPhyReg
+unsigned int (*avbFpgaReadReg)(void* unused, unsigned int port, unsigned int reg, unsigned int* data);
+unsigned int (*avbFpgaWriteReg)(void* unused, unsigned int port, unsigned int reg, unsigned int data);
+#else
+
+/* for RMGMT access  and can be controlled by <sw_apps -rmgmt 0/1> */
+unsigned int (*avbFpgaReadReg)(void* unused, unsigned int port, unsigned int reg, GT_U32* data)=gprtGetPhyReg;
+unsigned int (*avbFpgaWriteReg)(void* unused, unsigned int port, unsigned int reg, GT_U32 data)=gprtSetPhyReg;
+#define AVB_FPGA_READ_REG       avbFpgaReadReg
+#define AVB_FPGA_WRITE_REG      avbFpgaWriteReg
+
+#endif /* 0 */
+
+#endif
+
+#if 0
+#define GT_PTP_BUILD_TIME(_time1, _time2)    (((_time1) << 16) | (_time2))
+#define GT_PTP_L16_TIME(_time1)    ((_time1) & 0xFFFF)
+#define GT_PTP_H16_TIME(_time1)    (((_time1) >> 16) & 0xFFFF)
+#endif
+
+
+/****************************************************************************/
+/* PTP operation function declaration.                                    */
+/****************************************************************************/
+extern GT_STATUS ptpOperationPerform
+(
+    IN   GT_QD_DEV             *dev,
+    IN   GT_PTP_OPERATION    ptpOp,
+    INOUT GT_PTP_OP_DATA     *opData
+);
+
+
+/*******************************************************************************
+* gavbGetPriority
+*
+* DESCRIPTION:
+*        Priority overwrite.
+*        Supported priority type is defined as GT_AVB_PRI_TYPE.
+*        Priority is either 3 bits or 2 bits depending on priority type.
+*            GT_AVB_HI_FPRI        - priority is 0 ~ 7
+*            GT_AVB_HI_QPRI        - priority is 0 ~ 3
+*            GT_AVB_LO_FPRI        - priority is 0 ~ 7
+*            GT_AVB_LO_QPRI        - priority is 0 ~ 3
+*            GT_LEGACY_HI_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_HI_QPRI    - priority is 0 ~ 3
+*            GT_LEGACY_LO_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_LO_QPRI    - priority is 0 ~ 3
+*
+* INPUTS:
+*         priType    - GT_AVB_PRI_TYPE
+*
+* OUTPUTS:
+*        pri    - priority
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetPriority
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_PRI_TYPE        priType,
+    OUT GT_U32        *pri
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U16        mask, reg, bitPos;
+
+    DBG_INFO(("gavbGetPriority Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    switch (priType)
+    {
+        case GT_AVB_HI_FPRI:
+            mask = 0x7;
+            reg = 0;
+            bitPos = 12;
+            break;
+        case GT_AVB_HI_QPRI:
+            mask = 0x3;
+            reg = 0;
+            bitPos = 8;
+            break;
+        case GT_AVB_LO_FPRI:
+            mask = 0x7;
+            reg = 0;
+            bitPos = 4;
+            break;
+        case GT_AVB_LO_QPRI:
+            mask = 0x3;
+            reg = 0;
+            bitPos = 0;
+            break;
+        case GT_LEGACY_HI_FPRI:
+            mask = 0x7;
+            reg = 4;
+            bitPos = 12;
+            break;
+        case GT_LEGACY_HI_QPRI:
+            mask = 0x3;
+            reg = 4;
+            bitPos = 8;
+            break;
+        case GT_LEGACY_LO_FPRI:
+            mask = 0x7;
+            reg = 4;
+            bitPos = 4;
+            break;
+        case GT_LEGACY_LO_QPRI:
+            mask = 0x3;
+            reg = 4;
+            bitPos = 0;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = reg;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    *pri = (GT_U32)(opData.ptpData >> bitPos) & mask;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetPriority
+*
+* DESCRIPTION:
+*        Priority overwrite.
+*        Supported priority type is defined as GT_AVB_PRI_TYPE.
+*        Priority is either 3 bits or 2 bits depending on priority type.
+*            GT_AVB_HI_FPRI        - priority is 0 ~ 7
+*            GT_AVB_HI_QPRI        - priority is 0 ~ 3
+*            GT_AVB_LO_FPRI        - priority is 0 ~ 7
+*            GT_AVB_LO_QPRI        - priority is 0 ~ 3
+*            GT_LEGACY_HI_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_HI_QPRI    - priority is 0 ~ 3
+*            GT_LEGACY_LO_FPRI    - priority is 0 ~ 7
+*            GT_LEGACY_LO_QPRI    - priority is 0 ~ 3
+*
+* INPUTS:
+*         priType    - GT_AVB_PRI_TYPE
+*        pri    - priority
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetPriority
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_PRI_TYPE        priType,
+    IN  GT_U32        pri
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U16        mask, reg, bitPos;
+
+    DBG_INFO(("gavbSetPriority Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    switch (priType)
+    {
+        case GT_AVB_HI_FPRI:
+            mask = 0x7;
+            reg = 0;
+            bitPos = 12;
+            break;
+        case GT_AVB_HI_QPRI:
+            mask = 0x3;
+            reg = 0;
+            bitPos = 8;
+            break;
+        case GT_AVB_LO_FPRI:
+            mask = 0x7;
+            reg = 0;
+            bitPos = 4;
+            break;
+        case GT_AVB_LO_QPRI:
+            mask = 0x3;
+            reg = 0;
+            bitPos = 0;
+            break;
+        case GT_LEGACY_HI_FPRI:
+            mask = 0x7;
+            reg = 4;
+            bitPos = 12;
+            break;
+        case GT_LEGACY_HI_QPRI:
+            mask = 0x3;
+            reg = 4;
+            bitPos = 8;
+            break;
+        case GT_LEGACY_LO_FPRI:
+            mask = 0x7;
+            reg = 4;
+            bitPos = 4;
+            break;
+        case GT_LEGACY_LO_QPRI:
+            mask = 0x3;
+            reg = 4;
+            bitPos = 0;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    if (pri & (~mask))
+    {
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = reg;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(mask << bitPos);
+    opData.ptpData |= (pri << bitPos);
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gavbGetAVBHiLimit
+*
+* DESCRIPTION:
+*        AVB Hi Frame Limit.
+*        When these bits are zero, normal frame processing occurs.
+*        When it's non-zero, they are used to define the maximum frame size allowed
+*        for AVB frames that can be placed into the GT_AVB_HI_QPRI queue. Frames
+*        that are over this size limit are filtered. The only exception to this
+*        is non-AVB frames that get their QPriAvb assigned by the Priority Override
+*        Table
+*
+* INPUTS:
+*         None
+*
+* OUTPUTS:
+*        limit    - Hi Frame Limit
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAVBHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *limit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbGetAVBHiLimit Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 8;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *limit = (GT_U32)(opData.ptpData & 0x7FF);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetAVBHiLimit
+*
+* DESCRIPTION:
+*        AVB Hi Frame Limit.
+*        When these bits are zero, normal frame processing occurs.
+*        When it's non-zero, they are used to define the maximum frame size allowed
+*        for AVB frames that can be placed into the GT_AVB_HI_QPRI queue. Frames
+*        that are over this size limit are filtered. The only exception to this
+*        is non-AVB frames that get their QPriAvb assigned by the Priority Override
+*        Table
+*
+* INPUTS:
+*        limit    - Hi Frame Limit
+*
+* OUTPUTS:
+*         None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAVBHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        limit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbSetAVBHiLimit Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 8;
+    opData.ptpData = (GT_U16)limit;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetPtpExtClk
+*
+* DESCRIPTION:
+*        PTP external clock select.
+*        When this bit is cleared to a zero, the PTP core gets its clock from
+*        an internal 125MHz clock based on the device's XTAL_IN input.
+*        When this bit is set to a one, the PTP core gets its clock from the device's
+*        PTP_EXTCLK pin.
+*
+* INPUTS:
+*         None
+*
+* OUTPUTS:
+*        extClk    - GT_TRUE if external clock is selected, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetPtpExtClk
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *extClk
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbGetPtpExtClk Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY_RECOVER_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0xB;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *extClk = (GT_U32)(opData.ptpData >> 15) & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gavbSetPtpExtClk
+*
+* DESCRIPTION:
+*        PTP external clock select.
+*        When this bit is cleared to a zero, the PTP core gets its clock from
+*        an internal 125MHz clock based on the device's XTAL_IN input.
+*        When this bit is set to a one, the PTP core gets its clock from the device's
+*        PTP_EXTCLK pin.
+*
+* INPUTS:
+*        extClk    - GT_TRUE if external clock is selected, GT_FALSE otherwise
+*
+* OUTPUTS:
+*         None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetPtpExtClk
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        extClk
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbSetPtpExtClk Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY_RECOVER_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0xB;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    if(extClk)
+        opData.ptpData |= 0x8000;
+    else
+        opData.ptpData &= ~0x8000;
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetRecClkSel
+*
+* DESCRIPTION:
+*        Synchronous Ethernet Recovered Clock Select.
+*        This field indicate the internal PHY number whose recovered clock will
+*        be presented on the SE_RCLK0 or SE_RCLK1 pin depending on the recClk selection.
+*
+* INPUTS:
+*        recClk    - GT_AVB_RECOVERED_CLOCK type
+*
+* OUTPUTS:
+*        clkSel    - recovered clock selection
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetRecClkSel
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_RECOVERED_CLOCK    recClk,
+    OUT GT_U32        *clkSel
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U16        bitPos;
+
+    DBG_INFO(("gavbGetRecClkSel Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY_RECOVER_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    switch (recClk)
+    {
+        case GT_PRIMARY_RECOVERED_CLOCK:
+            bitPos = 0;
+            break;
+        case GT_SECONDARY_RECOVERED_CLOCK:
+            bitPos = 4;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0xB;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *clkSel = (GT_U32)(opData.ptpData >> bitPos) & 0x7;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetRecClkSel
+*
+* DESCRIPTION:
+*        Synchronous Ethernet Recovered Clock Select.
+*        This field indicate the internal PHY number whose recovered clock will
+*        be presented on the SE_RCLK0 or SE_RCLK1 pin depending on the recClk selection.
+*
+* INPUTS:
+*        recClk    - GT_AVB_RECOVERED_CLOCK type
+*        clkSel    - recovered clock selection (should be less than 8)
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetRecClkSel
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_AVB_RECOVERED_CLOCK    recClk,
+    IN  GT_U32        clkSel
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U16        bitPos;
+
+    DBG_INFO(("gavbSetRecClkSel Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY_RECOVER_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    if (clkSel > 0x7)
+        return GT_BAD_PARAM;
+
+    switch (recClk)
+    {
+        case GT_PRIMARY_RECOVERED_CLOCK:
+            bitPos = 0;
+            break;
+        case GT_SECONDARY_RECOVERED_CLOCK:
+            bitPos = 4;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0xB;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(0x7 << bitPos);
+    opData.ptpData |= clkSel << bitPos;
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetAvbOuiBytes
+*
+* DESCRIPTION:
+*        AVB OUI Limit Filter bytes(0 ~ 2).
+*        When all three of the AvbOui Bytes are zero, normal frame processing occurs.
+*        When any of the three AvbOui Bytes are non-zero, all AVB frames must have a
+*        destination address whose 1st three bytes of the DA match these three
+*        AvbOui Bytes or the frame will be filtered.
+*
+* INPUTS:
+*         None
+*
+* OUTPUTS:
+*        ouiBytes    - 3 bytes of OUI field in Ethernet address format
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbOuiBytes
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *obiBytes
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbGetAvbOuiBytes Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0xC;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    obiBytes[0] = (GT_U8)((opData.ptpData >> 8) & 0xFF);
+    obiBytes[1] = (GT_U8)(opData.ptpData & 0xFF);
+
+    opData.ptpAddr = 0xD;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    obiBytes[2] = (GT_U8)((opData.ptpData >> 8) & 0xFF);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gavbSetAvbOuiBytes
+*
+* DESCRIPTION:
+*        AVB OUI Limit Filter bytes(0 ~ 2).
+*        When all three of the AvbOui Bytes are zero, normal frame processing occurs.
+*        When any of the three AvbOui Bytes are non-zero, all AVB frames must have a
+*        destination address whose 1st three bytes of the DA match these three
+*        AvbOui Bytes or the frame will be filtered.
+*
+* INPUTS:
+*        ouiBytes    - 3 bytes of OUI field in Ethernet address format
+*
+* OUTPUTS:
+*         None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbOuiBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        *obiBytes
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gavbSetAvbOuiBytes Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0xC;
+
+    opData.ptpData = (obiBytes[0] << 8) | obiBytes[1];
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpAddr = 0xD;
+    opData.ptpData = (obiBytes[2] << 8);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetAvbMode
+*
+* DESCRIPTION:
+*        Port's AVB Mode.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        mode    - GT_AVB_MODE type
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_AVB_MODE    *mode
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbGetAvbMode Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *mode = (GT_AVB_MODE)((opData.ptpData >> 14) & 0x3);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetAvbMode
+*
+* DESCRIPTION:
+*        Port's AVB Mode.
+*
+* INPUTS:
+*        port    - the logical port number
+*        mode    - GT_AVB_MODE type
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_AVB_MODE    mode
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbSetAvbMode Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(0x3 << 14);
+    opData.ptpData |= (mode << 14);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gavbGetAvbOverride
+*
+* DESCRIPTION:
+*        AVB Override.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the egress portion of this port is considered AVB even if
+*        the ingress portion is not.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbOverride
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbGetAvbOverride Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    *en = (opData.ptpData >> 13) & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetAvbOverride
+*
+* DESCRIPTION:
+*        AVB Override.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the egress portion of this port is considered AVB even if
+*        the ingress portion is not.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbOverride
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbSetAvbOverride Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    if (en)
+        opData.ptpData |= (0x1 << 13);
+    else
+        opData.ptpData &= ~(0x1 << 13);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetFilterBadAvb
+*
+* DESCRIPTION:
+*        Filter Bad AVB frames.
+*        When disabled, normal frame processing occurs.
+*        When enabled, frames that are considered Bad AVB frames are filtered.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetFilterBadAvb
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbGetFilterBadAvb Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    *en = (opData.ptpData >> 12) & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetFilterBadAvb
+*
+* DESCRIPTION:
+*        Filter Bad AVB frames.
+*        When disabled, normal frame processing occurs.
+*        When enabled, frames that are considered Bad AVB frames are filtered.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetFilterBadAvb
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbSetFilterBadAvb Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    if (en)
+        opData.ptpData |= (0x1 << 12);
+    else
+        opData.ptpData &= ~(0x1 << 12);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetAvbTunnel
+*
+* DESCRIPTION:
+*        AVB Tunnel.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the port based VLAN Table masking, 802.1Q VLAN membership
+*        masking and the Trunk Masking is bypassed for any frame entering this port
+*        that is considered AVB by DA. This includes unicast as well as multicast
+*        frame
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbTunnel
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gavbGetAvbTunnel Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    *en = (opData.ptpData >> 11) & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbSetAvbTunnel
+*
+* DESCRIPTION:
+*        AVB Tunnel.
+*        When disabled, normal frame processing occurs.
+*        When enabled, the port based VLAN Table masking, 802.1Q VLAN membership
+*        masking and the Trunk Masking is bypassed for any frame entering this port
+*        that is considered AVB by DA. This includes unicast as well as multicast
+*        frame
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE to enable, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbTunnel
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("GT_STATUS gavbGetAvbTunnel Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    if (en)
+        opData.ptpData |= (0x1 << 11);
+    else
+        opData.ptpData &= ~(0x1 << 11);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gavbGetAvbFramePolicy
+*
+* DESCRIPTION:
+*        AVB Hi or Lo frame policy mapping.
+*        Supported policies are defined in GT_AVB_FRAME_POLICY.
+*
+* INPUTS:
+*        port    - the logical port number
+*        fType    - GT_AVB_FRAME_TYPE
+*
+* OUTPUTS:
+*        policy    - GT_AVB_FRAME_POLICY
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbGetAvbFramePolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN    GT_AVB_FRAME_TYPE    fType,
+    OUT GT_AVB_FRAME_POLICY        *policy
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8         hwPort;         /* the physical port number     */
+    GT_U16        bitPos;
+
+    DBG_INFO(("gavbGetAvbFramePolicy Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    switch (fType)
+    {
+        case AVB_HI_FRAME:
+            bitPos = 2;
+            break;
+        case AVB_LO_FRAME:
+            bitPos = 0;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    *policy = (opData.ptpData >> bitPos) & 0x3;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gavbSetAvbFramePolicy
+*
+* DESCRIPTION:
+*        AVB Hi or Lo frame policy mapping.
+*        Supported policies are defined in GT_AVB_FRAME_POLICY.
+*
+* INPUTS:
+*        port    - the logical port number
+*        fType    - GT_AVB_FRAME_TYPE
+*        policy    - GT_AVB_FRAME_POLICY
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gavbSetAvbFramePolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN    GT_AVB_FRAME_TYPE    fType,
+    IN  GT_AVB_FRAME_POLICY        policy
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U8         hwPort;         /* the physical port number     */
+    GT_U16        bitPos;
+
+    DBG_INFO(("gavbSetAvbFramePolicy Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+        return GT_BAD_PARAM;
+
+    switch (fType)
+    {
+        case AVB_HI_FRAME:
+            bitPos = 2;
+            break;
+        case AVB_LO_FRAME:
+            bitPos = 0;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x1;    /* AVB Policy register space */
+
+    opData.ptpPort = (GT_U16)hwPort;
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(0x3 << bitPos);
+    opData.ptpData |= (policy & 0x3) << bitPos;
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed AVB operation.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/******************************************************************************
+*
+*
+*******************************************************************************/
+/* Amber QAV API */
+/*******************************************************************************
+* gqavSetPortQpriXQTSToken
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 0-3 time slot tokens on a port.
+*        The setting value is number of tokens that need to be subtracted at each
+*        QTS interval boundary.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue     - 0 - 3
+*        qtsToken - number of tokens.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXQTSToken
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        qtsToken
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavSetPortQpriXQTSToken Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_QTS_TOKEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    /* check if qtsToken is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (qtsToken>0x7fff)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if (qtsToken>0x3fff)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = queue*2;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_WRITE_DATA;
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+      opData.ptpData = qtsToken&0x7fff;
+    else
+      opData.ptpData = qtsToken&0x3fff;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing QTS token for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetPortQpriXQTSToken
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 0-3 time slot tokens on a port.
+*        The setting value is number of tokens that need to be subtracted at each
+*        QTS interval boundary.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 0 - 3
+*
+* OUTPUTS:
+*        qtsToken - number of tokens
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXQTSToken
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *qtsToken
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavGetPortQpriXQTSToken Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_QTS_TOKEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+
+    opData.ptpAddr = queue*2;
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading QTS token for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+      *qtsToken =(GT_U16)opData.ptpData&0x7fff;
+    else
+      *qtsToken =(GT_U16)opData.ptpData&0x3fff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gqavSetPortQpriXBurstBytes
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 0-3 Burst Bytes on a port.
+*        This value specifies the number of credits in bytes that can be
+*        accumulated when the queue is blocked from sending out a frame due to
+*        higher priority queue frames being sent out.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 0 - 3
+*        burst - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXBurstBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        burst
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavSetPortQpriXBurstBytes Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_QTS_TOKEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    /* check if burst is beyond range */
+    if (burst>0x7fff)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = queue*2+1;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData = burst&0x7fff;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Burst bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+/*******************************************************************************
+* gqavGetPortQpriXBurstBytes
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 0-3 Burst Bytes on a port.
+*        This value specifies the number of credits in bytes that can be
+*        accumulated when the queue is blocked from sending out a frame due to
+*        higher priority queue frames being sent out.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 0 - 3
+*
+* OUTPUTS:
+*        burst - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXBurstBytes
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *burst
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavgetPortQpriXBurstBytes Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_QTS_TOKEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+
+    }
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+
+    opData.ptpAddr = queue*2+1;
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Burst bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    *burst = (GT_U16)opData.ptpData&0x7fff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gqavSetPortQpriXRate
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 2-3 rate on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 2 - 3
+*        rate - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXRate
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        rate
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavSetPortQpriXRate Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev, DEV_QAV_QPRI_RATE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    /* check if rate  is beyond range */
+    if (rate>0x0fff)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = queue*2+1;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData = rate&0x0fff;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing rate bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+/*******************************************************************************
+* gqavGetPortQpriXRate
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 2-3 rate Bytes on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 2 - 3
+*
+* OUTPUTS:
+*        rate - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXRate
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *rate
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavgetPortQpriXRate Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_RATE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+
+    }
+    /* check if queue is beyond range */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+    {
+      if (queue>0x3)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if ((queue>0x3)||(queue<0x2))
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+
+    opData.ptpAddr = queue*2+1;
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Rate bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    *rate = (GT_U16)opData.ptpData&0x0fff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+/*******************************************************************************
+* gqavSetPortQpriXHiLimit
+*
+* DESCRIPTION:
+*        This routine set Priority Queue 2-3 HiLimit on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue - 2 - 3
+*        hiLimit - number of credits in bytes .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQpriXHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    IN  GT_U16        hiLimit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavSetPortQpriXHiLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev, DEV_QAV_QPRI_RATE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if queue is beyond range */
+    if ((queue>0x3)||(queue<2))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* check if rate  is beyond range */
+    if (hiLimit>0x0fff)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = queue*2+1;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData = hiLimit&0x0fff;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Burst bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+/*******************************************************************************
+* gqavGetPortQpriXHiLimit
+*
+* DESCRIPTION:
+*        This routine get Priority Queue 2-3 HiLimit Bytes on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        queue    - 2 - 3
+*
+* OUTPUTS:
+*        hiLimit - number of credits in bytes .
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQpriXHiLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_U8        queue,
+    OUT GT_U16        *hiLimit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavgetPortQpriXHiLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_QPRI_RATE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+
+    }
+    /* check if queue is beyond range */
+    if ((queue>0x3)||(queue<2))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+
+    opData.ptpAddr = queue*2+1;
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Hi Limit bytes for port %d queue %d.\n", port, queue));
+        return GT_FAIL;
+    }
+
+    *hiLimit = (GT_U16)opData.ptpData&0x7fff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gqavSetPortQavEnable
+*
+* DESCRIPTION:
+*        This routine set QAV enable status on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*        en        - GT_TRUE: QAV enable, GT_FALSE: QAV disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetPortQavEnable
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavSetPortQavEnable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 8;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData = (en==GT_TRUE)?0x8000:0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing QAV enable for port %d.\n", port));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavGetPortQavEnable
+*
+* DESCRIPTION:
+*        This routine get QAV enable status on a port.
+*
+* INPUTS:
+*        port    - the logical port number
+*
+* OUTPUTS:
+*        en        - GT_TRUE: QAV enable, GT_FALSE: QAV disable
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetPortQavEnable
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gqavGetPortQavEnable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 8;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading QAV enable for port %d.\n", port));
+        return GT_FAIL;
+    }
+
+    *en = ((opData.ptpData&0x8000)==0)?GT_FALSE:GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************/
+/* QAV Global resters processing */
+/*******************************************************************************
+* gqavSetGlobalAdminMGMT
+*
+* DESCRIPTION:
+*        This routine set to accept Admit Management Frames always.
+*
+* INPUTS:
+*        en - GT_TRUE to set MGMT frame accepted always,
+*             GT_FALSE do not set MGMT frame accepted always
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalAdminMGMT
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalAdminMGMT Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 0;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading QAV global config admin MGMT.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0x8000;
+    if (en)
+        opData.ptpData |= 0x8000;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing QAV global config admin MGMT.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalAdminMGMT
+*
+* DESCRIPTION:
+*        This routine get setting of Admit Management Frames always.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to set MGMT frame accepted always,
+*             GT_FALSE do not set MGMT frame accepted always
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalAdminMGMT
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalAdminMGMT Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 0;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading QAV global config admin MGMT.\n"));
+        return GT_FAIL;
+    }
+
+    if (opData.ptpData&0x8000)
+      *en = GT_TRUE;
+    else
+      *en = GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavSetGlobalIsoPtrThreshold
+*
+* DESCRIPTION:
+*        This routine set Global Isochronous Queue Pointer Threshold.
+*        This field indicates the total number of isochronous pointers
+*        that are reserved for isochronous streams. The value is expected to be
+*        computed in SRP software and programmed into hardware based on the total
+*        aggregate isochronous streams configured to go through this device..
+*
+* INPUTS:
+*        isoPtrs -  total number of isochronous pointers
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoPtrThreshold
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        isoPtrs
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoPtrThreshold Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if isoPtrs is beyond range */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY)))
+    {
+      if (isoPtrs>0x3ff)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+    else
+    {
+      if (isoPtrs>0x1ff)
+      {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+      }
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 0;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading QAV global config Isochronous Queue Pointer Threshold.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    if (!(IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY)))
+      opData.ptpData &= ~0x3ff;
+    else
+      opData.ptpData &= ~0x1ff;
+    opData.ptpData |= isoPtrs;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing QAV global config Isochronous Queue Pointer Threshold.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavGetGlobalIsoPtrThreshold
+*
+* DESCRIPTION:
+*        This routine get Global Isochronous Queue Pointer Threshold.
+*        This field indicates the total number of isochronous pointers
+*        that are reserved for isochronous streams. The value is expected to be
+*        computed in SRP software and programmed into hardware based on the total
+*        aggregate isochronous streams configured to go through this device..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        isoPtrs -  total number of isochronous pointers
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoPtrThreshold
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *isoPtrs
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoPtrThreshold Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 0;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous Queue Pointer Threshold.\n"));
+        return GT_FAIL;
+    }
+
+    if (!(IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY)))
+      *isoPtrs = (GT_U16)opData.ptpData&0x3ff;
+    else
+      *isoPtrs = (GT_U16)opData.ptpData&0x1ff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavSetGlobalDisQSD4MGMT
+*
+* DESCRIPTION:
+*        This routine set Disable Queue Scheduler Delays for Management frames..
+*
+* INPUTS:
+*        en - GT_TRUE, it indicates to the Queue Controller to disable applying Queue
+*            Scheduler Delays and the corresponding rate regulator does not account
+*            for MGMT frames through this queue.
+*            GT_FALSE, the MGMT frames follow similar rate regulation and delay
+*            regulation envelope as specified for the isochronous queue that the
+*            MGMT frames are sharing with.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalDisQSD4MGMT
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalDisQSD4MGMT Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 3;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Disable Queue Scheduler Delay for MGMT frames.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0x4000;
+    if (en==GT_TRUE)
+        opData.ptpData |= 0x4000;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Disable Queue Scheduler Delay for MGMT frames.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalDisQSD4MGMT
+*
+* DESCRIPTION:
+*        This routine Get Disable Queue Scheduler Delays for Management frames..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE, it indicates to the Queue Controller to disable applying Queue
+*            Scheduler Delays and the corresponding rate regulator does not account
+*            for MGMT frames through this queue.
+*            GT_FALSE, the MGMT frames follow similar rate regulation and delay
+*            regulation envelope as specified for the isochronous queue that the
+*            MGMT frames are sharing with.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalDisQSD4MGMT
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalDisQSD4MGMT Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 3;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Disable Queue Scheduler Delay for MGMT frames.\n"));
+        return GT_FAIL;
+    }
+
+    if (opData.ptpData&0x4000)
+      *en = GT_TRUE;
+    else
+      *en = GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavSetGlobalInterrupt
+*
+* DESCRIPTION:
+*        This routine set QAV interrupt enable,
+*        The QAV interrypts include:
+*        [GT_QAV_INT_ENABLE_ENQ_LMT_BIT]      # EnQ Limit Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_DEL_BIT]      # Iso Delay Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_DIS_BIT]      # Iso Discard Interrupt Enable
+*        [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                              Interrupt Enable
+*
+* INPUTS:
+*        intEn - [GT_QAV_INT_ENABLE_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalInterrupt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        intEn
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalInterrupt Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 8;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Interrupt enable status.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0xffff;
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6351_AVB_FAMILY))
+      opData.ptpData |= (intEn&0x87);
+    else
+      opData.ptpData |= (intEn&0x03);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Interrupt enable status.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalInterrupt
+*
+* DESCRIPTION:
+*       This routine get QAV interrupt status and enable status,
+*        The QAV interrypt status include:
+*         [GT_QAV_INT_STATUS_ENQ_LMT_BIT]      # Enqueue Delay Limit exceeded
+*         [GT_QAV_INT_STATUS_ISO_DEL_BIT]      # Iso Delay Interrupt Status
+*         [GT_QAV_INT_STATUS_ISO_DIS_BIT]      # Iso Discard Interrupt Status
+*         [GT_QAV_INT_STATUS_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                                Interrupt Status
+*        The QAV interrypt enable status include:
+*         [GT_QAV_INT_ENABLE_ENQ_LMT_BIT]      # EnQ Limit Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_DEL_BIT]      # Iso Delay Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_DIS_BIT]      # Iso Discard Interrupt Enable
+*         [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT] # Iso Packet Memory Exceeded
+*                                                  Interrupt Enable
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        intEnSt - [GT_QAV_INT_STATUS_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_STATUS_ISO_LIMIT_EX_BIT] OR
+*                [GT_QAV_INT_ENABLE_ENQ_LMT_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DEL_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_DIS_BIT] OR
+*                [GT_QAV_INT_ENABLE_ISO_LIMIT_EX_BIT]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalInterrupt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *intEnSt
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalInterrupt Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 8;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Interrupt status.\n"));
+        return GT_FAIL;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6351_AVB_FAMILY))
+      *intEnSt = (GT_U16)opData.ptpData & 0x8787;
+    else
+      *intEnSt = (GT_U16)opData.ptpData & 0x0303;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalIsoInterruptPort
+*
+* DESCRIPTION:
+*        This routine get Isochronous interrupt port.
+*        This field indicates the port number for IsoDisInt or IsoLimitExInt
+*        bits. Only one such interrupt condition can be detected by hardware at one
+*        time. Once an interrupt bit has been set along with the IsoIntPort, the
+*        software would have to come and clear the bits before hardware records
+*        another interrupt event.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - port number for IsoDisInt or IsoLimitExInt bits.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoInterruptPort
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *port
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoInterruptPort Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 9;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous interrupt port..\n"));
+        return GT_FAIL;
+    }
+
+    *port = (GT_U8)opData.ptpData&0xf;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavSetGlobalIsoDelayLmt
+*
+* DESCRIPTION:
+*        This routine set Isochronous queue delay Limit
+*        This field represents a per-port isochronous delay limit that
+*        will be checked by the queue controller logic to ensure no isochronous
+*        packets suffer more than this delay w.r.t to their eligibility time slot.
+*        This represents the number of Queue Time Slots. The interval for the QTS
+*        can be configured using the register in Qav Global Configuration, Offset 0x2.
+*
+* INPUTS:
+*        limit - per-port isochronous delay limit.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoDelayLmt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        limit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoDelayLmt Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_ISO_DELAY_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 10;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous queue delay Limit.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0xff;
+    opData.ptpData |= (limit&0xff);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Isochronous queue delay Limit.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalIsoDelayLmt
+*
+* DESCRIPTION:
+*        This routine get Isochronous queue delay Limit
+*        This field represents a per-port isochronous delay limit that
+*        will be checked by the queue controller logic to ensure no isochronous
+*        packets suffer more than this delay w.r.t to their eligibility time slot.
+*        This represents the number of Queue Time Slots. The interval for the QTS
+*        can be configured using the register in Qav Global Configuration, Offset 0x2.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        limit - per-port isochronous delay limit.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoDelayLmt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *limit
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoDelayLmt Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV_ISO_DELAY_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 10;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous queue delay Limit.\n"));
+        return GT_FAIL;
+    }
+
+    *limit = (GT_U8)(opData.ptpData)&0xff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavSetGlobalIsoMonEn
+*
+* DESCRIPTION:
+*       This routine set Isochronous monitor enable
+*        Set GT_TRUE: this bit enables the statistics gathering capabilities stated
+*        in PTP Global Status Registers Offset 0xD, 0xE and 0xF. Once enabled, the
+*        software is expected to program the IsoMonPort (PTP Global Status Offset
+*        0xD) indicating which port of the device does the software wants to monitor.
+*        Upon setting this bit, the hardware collects IsoHiDisCtr, IsoLoDisCtr and
+*        IsoSchMissCtr values for the port indicated by IsoMonPort till this bit is
+*        set to a zero.
+*        Set GT_FALSE: this bit disables the statistics gathering capabilities.
+*
+* INPUTS:
+*        en - GT_TRUE / GT_FALSE.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoMonEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoMonEn Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 12;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous monitor enable.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0x8000;
+    if (en)
+        opData.ptpData |= 0x8000;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Isochronous monitor enable.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalIsoMonEn
+*
+* DESCRIPTION:
+*        This routine get Isochronous monitor enable
+*        Set GT_TRUE: this bit enables the statistics gathering capabilities stated
+*        in PTP Global Status Registers Offset 0xD, 0xE and 0xF. Once enabled, the
+*        software is expected to program the IsoMonPort (PTP Global Status Offset
+*        0xD) indicating which port of the device does the software wants to monitor.
+*        Upon setting this bit, the hardware collects IsoHiDisCtr, IsoLoDisCtr and
+*        IsoSchMissCtr values for the port indicated by IsoMonPort till this bit is
+*        set to a zero.
+*        Set GT_FALSE: this bit disables the statistics gathering capabilities.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE / GT_FALSE.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoMonEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoMonEn Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 12;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous monitor enable.\n"));
+        return GT_FAIL;
+    }
+
+    if (opData.ptpData&0x8000)
+      *en = 1;
+    else
+      *en = 0;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavSetGlobalIsoMonPort
+*
+* DESCRIPTION:
+*        This routine set Isochronous monitoring port.
+*        This field is updated by software along with Iso Mon En bit
+*        (Qav Global Status, offset 0xD) and it indicates the port number that
+*        the software wants the hardware to start monitoring i.e., start updating
+*        IsoHiDisCtr, IsoLoDisCtr and IsoSchMissCtr. The queue controller clears
+*        the above stats when IsoMonPort is changed..
+*
+* INPUTS:
+*        port -  port number .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoMonPort
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U16        port
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoMonPort Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if port is beyond range */
+    if (port>0xf)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 12;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous monitoring port.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0xf;
+    opData.ptpData |= port;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Isochronous monitoring port.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavGetGlobalIsoMonPort
+*
+* DESCRIPTION:
+*        This routine get Isochronous monitoring port.
+*        This field is updated by software along with Iso Mon En bit
+*        (Qav Global Status, offset 0xD) and it indicates the port number that
+*        the software wants the hardware to start monitoring i.e., start updating
+*        IsoHiDisCtr, IsoLoDisCtr and IsoSchMissCtr. The queue controller clears
+*        the above stats when IsoMonPort is changed..
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port -  port number.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoMonPort
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U16        *port
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoMonPort Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 12;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous monitoring port.\n"));
+        return GT_FAIL;
+    }
+
+    *port = (GT_U16)opData.ptpData&0xf;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavSetGlobalIsoHiDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous hi queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous hi packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        disCtr - upcounter of number of isochronous hi packets discarded
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoHiDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        disCtr
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoHiDisCtr Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 13;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous hi queue discard counter..\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0xff00;
+    if (disCtr)
+        opData.ptpData |= (disCtr<<8);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Isochronous hi queue discard counter..\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gqavGetGlobalIsoHiDisCtr
+*
+* DESCRIPTION:
+*        This routine get Isochronous hi queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous hi packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        disCtr - upcounter of number of isochronous hi packets discarded
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoHiDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *disCtr
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoHiDisCtr Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 13;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous hi queue discard counter.\n"));
+        return GT_FAIL;
+    }
+
+    *disCtr = (GT_U8)(opData.ptpData>>8)&0xff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavSetGlobalIsoLoDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous Lo queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous lo packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        disCtr - upcounter of number of isochronous lo packets discarded
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavSetGlobalIsoLoDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U8        disCtr
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavSetGlobalIsoLoDisCtr Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 13;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous lo queue discard counter.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= ~0xff;
+    opData.ptpData |= (disCtr&0xff);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Isochronous lo queue discard counter.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqavGetGlobalIsoLoDisCtr
+*
+* DESCRIPTION:
+*        This routine set Isochronous Lo queue discard counter.
+*        This field is updated by hardware when instructed to do so by
+*        enabling the IsoMonEn bit in Qav Global Status Register Offset 0xD.
+*        This is an upcounter of number of isochronous lo packets discarded
+*        by Queue Controller.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        disCtr - upcounter of number of isochronous lo packets discarded
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gqavGetGlobalIsoLoDisCtr
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U8        *disCtr
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gqavGetGlobalIsoLoDisCtr Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAV))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x2;    /* QAV register space */
+    opData.ptpAddr = 13;
+
+    opData.ptpPort = 0xF;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading Isochronous lo queue discard counter.\n"));
+        return GT_FAIL;
+    }
+
+    *disCtr = (GT_U8)(opData.ptpData)&0xff;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct.c
new file mode 100644
index 000000000000..63af010106eb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct.c
@@ -0,0 +1,1876 @@
+#include <Copyright.h>
+/*******************************************************************************
+* gtAdvVct.c
+*
+* DESCRIPTION:
+*       API for Marvell Virtual Cable Tester.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+#include <msApi.h>
+#include <gtVct.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtSem.h>
+#ifdef GT_USE_MAD
+#include <gtMad.h>
+#endif
+
+#ifdef GT_USE_MAD
+#include "gtAdvVct_mad.c"
+#endif
+
+#define GT_LOOKUP_TABLE_ENTRY  128  /* 73 */
+
+#define GT_ADV_VCT_ACCEPTABLE_SHORT_CABLE  11
+
+static  GT_U8 tbl_1181[GT_LOOKUP_TABLE_ENTRY] =
+                    {  2,  4,  8, 14, 18, 20, 25, 30, 33, 36,
+                      39, 42, 46, 48, 51, 54, 57, 59, 62, 64,
+                      66, 69, 71, 73, 75, 77, 80, 81, 83, 85,
+                      87, 88, 90, 93, 95, 97, 98,100,101,103,
+                     104,106,106,107,109,110,111,113,114,115,
+                     116,118,119,120,121,122,124,125,126,127,
+                     128,129,130,131,132,133,134,135,136,137,
+                     138,139,140};
+
+static  GT_U8 tbl_1111[GT_LOOKUP_TABLE_ENTRY] =
+                    {  0,  2,  4, 5, 6, 9, 13, 17, 20, 23,
+                      27, 30, 33, 35, 38, 41, 43, 46, 48, 51,
+                      53, 55, 58, 60, 62, 64, 66, 68, 70, 72,
+                      73, 75, 77, 79, 80, 82, 84, 85, 87, 88,
+                      90, 91, 93, 94, 96, 97, 98,100,101,102,
+                     104,105,106,107,109,110,111,112,113,114,
+                     116,117,118,119,120,121,122,123,124,125,
+                     126,127,128,129,130,131,132,133,134,134};
+
+static  GT_U8 tbl_1112[GT_LOOKUP_TABLE_ENTRY] =   /* from 17*/
+                    {  0,  4,  8, 11, 14, 18, 21, 24, 28, 31,
+                      34, 37, 39, 42, 44, 47, 49, 52, 54, 56,
+                      58, 60, 62, 64, 66, 68, 70, 72, 74, 75,
+                      77, 79, 80, 82, 83, 85, 87, 88, 89, 91,
+                      92, 94, 95, 96, 98, 99,100,101,103,104,
+                      105,106,107,108,109,111,112,113,114,115,
+                      116,117,118,119,120,121,122,123,124,124,
+                      125,126,127,128,129,130,131,131,132,133,
+                      134,135,135,136,137,138,139,139,140,141,
+                      142,142,143,144,144,145,146,147,147,148};
+
+static  GT_U8 tbl_1116[GT_LOOKUP_TABLE_ENTRY] =   /* from 16*/
+                    {  2,  4,  8, 14, 18, 20, 25, 30, 33, 36,
+                      39, 42, 46, 48, 51, 54, 57, 59, 62, 64,
+                      66, 69, 71, 73, 75, 77, 80, 81, 83, 85,
+                      87, 88, 90, 93, 95, 97, 98, 100, 101, 103,
+                      104,106,106,107,109,110,111,113,114,115,
+                      116,118,119,120,121,122,124,125,126,127,
+                      128,129,130,131,132,133,134,135,136,137,
+                      138,139,140};
+
+static  GT_U8 tbl_1240[GT_LOOKUP_TABLE_ENTRY] =
+                    {  1,  2,  5, 10, 13, 15, 18, 22, 26, 30,
+                      33, 35, 38, 40, 43, 45, 48, 51, 53, 55,
+                      58, 60, 63, 65, 68, 69, 70, 71, 73, 75,
+                      77, 79, 80, 81, 82, 83, 85, 87, 88, 90,
+                      91, 92, 93, 95, 97, 98,100,101,102,103,
+                     105,106,107,108,109,110,111,112,113,114,
+                     115,116,117,118,119,120,121,122,123,124,
+                     125,126,127,128,129,130};
+
+/*******************************************************************************
+* getDetailedAdvVCTResult
+*
+* DESCRIPTION:
+*        This routine differenciate Open/Short from Impedance mismatch.
+*
+* INPUTS:
+*        amp - amplitude
+*        len - distance to fault
+*        vctResult - test result
+*                    (Impedance mismatch, either > 115 ohms, or < 85 ohms)
+*
+* OUTPUTS:
+*
+* RETURNS:
+*       GT_ADV_VCT_STATUS
+*
+* COMMENTS:
+*       This routine assumes test result is not normal nor cross pair short.
+*
+*******************************************************************************/
+static
+GT_ADV_VCT_STATUS getDetailedAdvVCTResult
+(
+    IN  GT_U32  devType,
+    IN  GT_U32  amp,
+    IN  GT_U32  len,
+    IN  GT_ADV_VCT_STATUS result
+)
+{
+    GT_ADV_VCT_STATUS vctResult;
+    GT_BOOL    update = GT_FALSE;
+
+    DBG_INFO(("getDetailedAdvVCTResult Called.\n"));
+
+    if (devType == GT_PHY_ADV_VCT_TYPE2)
+    {
+        if(len < 10)
+        {
+            if(amp > 54)  /* 90 x 0.6 */
+                update = GT_TRUE;
+        }
+        else if(len < 50)
+        {
+            if(amp > 42) /* 70 x 0.6 */
+                update = GT_TRUE;
+        }
+        else if(len < 110)
+        {
+            if(amp > 30)  /* 50 x 0.6 */
+                update = GT_TRUE;
+        }
+        else if(len < 140)
+        {
+            if(amp > 24)  /* 40 x 0.6 */
+                update = GT_TRUE;
+        }
+        else
+        {
+            if(amp > 18) /* 30 x 0.6 */
+                update = GT_TRUE;
+        }
+    }
+    else
+    {
+        if(len < 10)
+        {
+            if(amp > 90)
+                update = GT_TRUE;
+        }
+        else if(len < 50)
+        {
+            if(amp > 70)
+                update = GT_TRUE;
+        }
+        else if(len < 110)
+        {
+            if(amp > 50)
+                update = GT_TRUE;
+        }
+        else if(len < 140)
+        {
+            if(amp > 40)
+                update = GT_TRUE;
+        }
+        else
+        {
+            if(amp > 30)
+                update = GT_TRUE;
+        }
+    }
+
+
+    switch (result)
+    {
+        case GT_ADV_VCT_IMP_GREATER_THAN_115:
+                if(update)
+                    vctResult = GT_ADV_VCT_OPEN;
+                else
+                    vctResult = result;
+                break;
+        case GT_ADV_VCT_IMP_LESS_THAN_85:
+                if(update)
+                    vctResult = GT_ADV_VCT_SHORT;
+                else
+                    vctResult = result;
+                break;
+        default:
+                vctResult = result;
+                break;
+    }
+
+    return vctResult;
+}
+
+/*******************************************************************************
+* analizeAdvVCTResult
+*
+* DESCRIPTION:
+*        This routine analize the Advanced VCT result.
+*
+* INPUTS:
+*        channel - channel number where test was run
+*        crossChannelReg - register values after the test is completed
+*        mode    - use formula for normal cable case
+*
+* OUTPUTS:
+*        cableStatus - analized test result.
+*
+* RETURNS:
+*        -1, or distance to fault
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+static
+GT_16 analizeAdvVCTNoCrosspairResult
+(
+    IN  GT_U32  devType,
+    IN  int     channel,
+    IN  GT_U16 *crossChannelReg,
+    IN  GT_BOOL isShort,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    int len;
+    GT_16 dist2fault;
+    GT_ADV_VCT_STATUS vctResult = GT_ADV_VCT_NORMAL;
+
+    DBG_INFO(("analizeAdvVCTNoCrosspairResult Called.\n"));
+    DBG_INFO(("analizeAdvVCTNoCrosspairResult chan %d reg data %x\n", channel, crossChannelReg[channel]));
+
+    dist2fault = -1;
+
+    /* check if test is failed */
+    if(IS_VCT_FAILED(crossChannelReg[channel]))
+    {
+        cableStatus->cableStatus[channel] = GT_ADV_VCT_FAIL;
+        return dist2fault;
+    }
+
+    /* Check if fault detected */
+    if(IS_ZERO_AMPLITUDE(crossChannelReg[channel]))
+    {
+        cableStatus->cableStatus[channel] = GT_ADV_VCT_NORMAL;
+        return dist2fault;
+    }
+
+    /* find out test result by reading Amplitude */
+    if(IS_POSITIVE_AMPLITUDE(crossChannelReg[channel]))
+    {
+        vctResult = GT_ADV_VCT_IMP_GREATER_THAN_115;
+    }
+    else
+    {
+        vctResult = GT_ADV_VCT_IMP_LESS_THAN_85;
+    }
+
+    /*
+     * now, calculate the distance for GT_ADV_VCT_IMP_GREATER_THAN_115 and
+     * GT_ADV_VCT_IMP_LESS_THAN_85
+     */
+    switch (vctResult)
+    {
+        case GT_ADV_VCT_IMP_GREATER_THAN_115:
+        case GT_ADV_VCT_IMP_LESS_THAN_85:
+            if(!isShort)
+            {
+                len = (int)GT_ADV_VCT_CALC(crossChannelReg[channel] & 0xFF);
+            }
+            else
+            {
+                len = (int)GT_ADV_VCT_CALC_SHORT(crossChannelReg[channel] & 0xFF);
+            }
+            DBG_INFO(("@@@@ no cross len %d\n", len));
+
+            if (len < 0)
+                len = 0;
+            cableStatus->u[channel].dist2fault = (GT_16)len;
+            vctResult = getDetailedAdvVCTResult(
+                                    devType,
+                                    GET_AMPLITUDE(crossChannelReg[channel]),
+                                    len,
+                                    vctResult);
+            dist2fault = (GT_16)len;
+            break;
+        default:
+            break;
+    }
+
+    cableStatus->cableStatus[channel] = vctResult;
+
+    return dist2fault;
+}
+
+
+static
+GT_16 analizeAdvVCTResult
+(
+    IN  GT_U32  devType,
+    IN  int     channel,
+    IN  GT_U16 *crossChannelReg,
+    IN  GT_BOOL isShort,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    int i, len;
+    GT_16 dist2fault;
+    GT_ADV_VCT_STATUS vctResult = GT_ADV_VCT_NORMAL;
+
+    DBG_INFO(("analizeAdvVCTResult(Crosspair) chan %d reg data %x\n", channel, crossChannelReg[channel]));
+    DBG_INFO(("analizeAdvVCTResult Called.\n"));
+
+    dist2fault = -1;
+
+    /* check if test is failed */
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        if(IS_VCT_FAILED(crossChannelReg[i]))
+        {
+            cableStatus->cableStatus[channel] = GT_ADV_VCT_FAIL;
+            return dist2fault;
+        }
+    }
+
+    /* find out test result by reading Amplitude */
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        if (i == channel)
+        {
+            if(!IS_ZERO_AMPLITUDE(crossChannelReg[i]))
+            {
+                if(IS_POSITIVE_AMPLITUDE(crossChannelReg[i]))
+                {
+                    vctResult = GT_ADV_VCT_IMP_GREATER_THAN_115;
+                }
+                else
+                {
+                    vctResult = GT_ADV_VCT_IMP_LESS_THAN_85;
+                }
+            }
+            continue;
+        }
+
+        if(IS_ZERO_AMPLITUDE(crossChannelReg[i]))
+            continue;
+
+        vctResult = GT_ADV_VCT_CROSS_PAIR_SHORT;
+        break;
+    }
+
+    /* if it is cross pair short, check the distance for each channel */
+    if(vctResult == GT_ADV_VCT_CROSS_PAIR_SHORT)
+    {
+        cableStatus->cableStatus[channel] = GT_ADV_VCT_CROSS_PAIR_SHORT;
+        for (i=0; i<GT_MDI_PAIR_NUM; i++)
+        {
+            if(IS_ZERO_AMPLITUDE(crossChannelReg[i]))
+            {
+                cableStatus->u[channel].crossShort.channel[i] = GT_FALSE;
+                cableStatus->u[channel].crossShort.dist2fault[i] = 0;
+                continue;
+            }
+
+            cableStatus->u[channel].crossShort.channel[i] = GT_TRUE;
+            if(!isShort)
+                len = (int)GT_ADV_VCT_CALC(crossChannelReg[i] & 0xFF);
+            else
+                len = (int)GT_ADV_VCT_CALC_SHORT(crossChannelReg[i] & 0xFF);
+            DBG_INFO(("@@@@ len %d\n", len));
+
+            if (len < 0)
+                len = 0;
+            cableStatus->u[channel].crossShort.dist2fault[i] = (GT_16)len;
+            dist2fault = (GT_16)len;
+        }
+
+        return dist2fault;
+    }
+
+    /*
+     * now, calculate the distance for GT_ADV_VCT_IMP_GREATER_THAN_115 and
+     * GT_ADV_VCT_IMP_LESS_THAN_85
+     */
+    switch (vctResult)
+    {
+        case GT_ADV_VCT_IMP_GREATER_THAN_115:
+        case GT_ADV_VCT_IMP_LESS_THAN_85:
+            if(isShort)
+                len = (int)GT_ADV_VCT_CALC(crossChannelReg[channel] & 0xFF);
+            else
+                len = (int)GT_ADV_VCT_CALC_SHORT(crossChannelReg[channel] & 0xFF);
+            if (len < 0)
+                len = 0;
+            cableStatus->u[channel].dist2fault = (GT_16)len;
+            vctResult = getDetailedAdvVCTResult(
+                                    devType,
+                                    GET_AMPLITUDE(crossChannelReg[channel]),
+                                    len,
+                                    vctResult);
+            dist2fault = (GT_16)len;
+            break;
+        default:
+            break;
+    }
+
+    cableStatus->cableStatus[channel] = vctResult;
+
+    return dist2fault;
+}
+
+
+/*******************************************************************************
+* runAdvCableTest_1181
+*
+* DESCRIPTION:
+*        This routine performs the advanced virtual cable test for the PHY with
+*        multiple page mode and returns the the status per MDIP/N.
+*
+* INPUTS:
+*        port - logical port number.
+*        mode - GT_TRUE, if short cable detect is required
+*               GT_FALSE, otherwise
+*
+* OUTPUTS:
+*        cableStatus - the port copper cable status.
+*        tooShort    - if known distance to fault is too short
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+static
+GT_STATUS runAdvCableTest_1181
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_BOOL         mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus,
+    OUT GT_BOOL         *tooShort
+)
+{
+    GT_STATUS retVal;
+    GT_U16 u16Data;
+    GT_U16 crossChannelReg[GT_MDI_PAIR_NUM];
+    int i,j;
+    GT_16  dist2fault;
+
+    VCT_REGISTER regList[GT_MDI_PAIR_NUM][GT_MDI_PAIR_NUM] = {
+                            {{8,16},{8,17},{8,18},{8,19}},  /* channel 0 */
+                            {{8,24},{8,25},{8,26},{8,27}},  /* channel 1 */
+                            {{9,16},{9,17},{9,18},{9,19}},  /* channel 2 */
+                            {{9,24},{9,25},{9,26},{9,27}}   /* channel 3 */
+                            };
+
+    DBG_INFO(("runAdvCableTest_1181 Called.\n"));
+
+    if (mode)
+        *tooShort = GT_FALSE;
+
+    /*
+     * start Advanced Virtual Cable Tester
+     */
+    if((retVal = hwSetPagedPhyRegField(
+                        dev,hwPort,8,QD_REG_ADV_VCT_CONTROL_8,15,1,phyInfo->anyPage,1)) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    /*
+     * loop until test completion and result is valid
+     */
+    do
+    {
+        if((retVal = hwReadPagedPhyReg(
+                            dev,hwPort,8,QD_REG_ADV_VCT_CONTROL_8,phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            DBG_INFO(("Reading from paged phy reg failed.\n"));
+            return retVal;
+        }
+    } while(u16Data & 0x8000);
+
+    DBG_INFO(("Page 8 of Reg20 after test : %0#x.\n", u16Data));
+
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        /*
+         * read the test result for the cross pair against selected MDI Pair
+         */
+        for (j=0; j<GT_MDI_PAIR_NUM; j++)
+        {
+            if((retVal = hwReadPagedPhyReg(
+                                dev,hwPort,
+                                regList[i][j].page,
+                                regList[i][j].regOffset,
+                                phyInfo->anyPage,
+                                &crossChannelReg[j])) != GT_OK)
+            {
+                DBG_INFO(("Reading from paged phy reg failed.\n"));
+                return retVal;
+            }
+        }
+
+        /*
+         * analyze the test result for RX Pair
+         */
+        dist2fault = analizeAdvVCTResult(phyInfo->vctType, i, crossChannelReg, mode, cableStatus);
+
+        if(mode)
+        {
+            if ((dist2fault>=0) && (dist2fault<GT_ADV_VCT_ACCEPTABLE_SHORT_CABLE))
+            {
+                DBG_INFO(("Distance to Fault is too Short. So, rerun after changing pulse width\n"));
+                *tooShort = GT_TRUE;
+                break;
+            }
+        }
+    }
+
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* getAdvCableStatus_1181
+*
+* DESCRIPTION:
+*        This routine performs the virtual cable test for the PHY with
+*        multiple page mode and returns the the status per MDIP/N.
+*
+* INPUTS:
+*        port - logical port number.
+*        mode - advance VCT mode (either First Peak or Maximum Peak)
+*
+* OUTPUTS:
+*        cableStatus - the port copper cable status.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+static
+GT_STATUS getAdvCableStatus_1181
+(
+    IN  GT_QD_DEV          *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS retVal;
+    GT_U16 orgPulse, u16Data;
+    GT_BOOL flag, tooShort;
+
+    flag = GT_TRUE;
+
+    /*
+     * set Adv VCT Mode
+     */
+    switch (mode.mode)
+    {
+        case GT_ADV_VCT_FIRST_PEAK:
+            break;
+        case GT_ADV_VCT_MAX_PEAK:
+            break;
+        default:
+            DBG_INFO(("Unknown Advanced VCT Mode.\n"));
+            return GT_BAD_PARAM;
+    }
+
+    u16Data = (mode.mode<<6) | (mode.peakDetHyst) | (mode.sampleAvg<<8);
+    if((retVal = hwSetPagedPhyRegField(
+                        dev,hwPort,8,QD_REG_ADV_VCT_CONTROL_8,0,11,phyInfo->anyPage,u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    if (flag)
+    {
+        /* save original Pulse Width */
+        if((retVal = hwGetPagedPhyRegField(
+                            dev,hwPort,9,23,10,2,phyInfo->anyPage,&orgPulse)) != GT_OK)
+        {
+            DBG_INFO(("Reading paged phy reg failed.\n"));
+            return retVal;
+        }
+
+        /* set the Pulse Width with default value */
+        if (orgPulse != 0)
+        {
+            if((retVal = hwSetPagedPhyRegField(
+                                dev,hwPort,9,23,10,2,phyInfo->anyPage,0)) != GT_OK)
+            {
+                DBG_INFO(("Writing to paged phy reg failed.\n"));
+                return retVal;
+            }
+        }
+    }
+
+    if((retVal=runAdvCableTest_1181(dev,hwPort,phyInfo,flag,cableStatus,&tooShort)) != GT_OK)
+    {
+        DBG_INFO(("Running advanced VCT failed.\n"));
+        return retVal;
+    }
+
+    if (flag)
+    {
+        if(tooShort)
+        {
+            /* set the Pulse Width with minimum width */
+            if((retVal = hwSetPagedPhyRegField(
+                                dev,hwPort,9,23,10,2,phyInfo->anyPage,3)) != GT_OK)
+            {
+                DBG_INFO(("Writing to paged phy reg failed.\n"));
+                return retVal;
+            }
+
+            /* run the Adv VCT again */
+            if((retVal=runAdvCableTest_1181(dev,hwPort,phyInfo,GT_FALSE,cableStatus,&tooShort)) != GT_OK)
+            {
+                DBG_INFO(("Running advanced VCT failed.\n"));
+                return retVal;
+            }
+
+        }
+
+        /* set the Pulse Width back to the original value */
+        if((retVal = hwSetPagedPhyRegField(
+                            dev,hwPort,9,23,10,2,phyInfo->anyPage,orgPulse)) != GT_OK)
+        {
+            DBG_INFO(("Writing to paged phy reg failed.\n"));
+            return retVal;
+        }
+
+    }
+
+    return GT_OK;
+}
+
+
+static
+GT_STATUS runAdvCableTest_1116_set
+(
+    IN  GT_QD_DEV          *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_32           channel,
+    IN  GT_ADV_VCT_TRANS_CHAN_SEL        crosspair
+)
+{
+    GT_STATUS retVal;
+
+    DBG_INFO(("runAdvCableTest_1116_set Called.\n"));
+
+    /*
+     * start Advanced Virtual Cable Tester
+     */
+    if((retVal = hwSetPagedPhyRegField(
+                        dev,hwPort,5,QD_REG_ADV_VCT_CONTROL_5,15,1,phyInfo->anyPage,1)) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+static
+GT_STATUS runAdvCableTest_1116_check
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo
+)
+{
+    GT_STATUS retVal;
+    GT_U16 u16Data;
+
+    /*
+     * loop until test completion and result is valid
+     */
+    do {
+        if((retVal = hwReadPagedPhyReg(
+                            dev,hwPort,5,QD_REG_ADV_VCT_CONTROL_5,phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            DBG_INFO(("Reading from paged phy reg failed.\n"));
+            return retVal;
+        }
+    } while (u16Data & 0x8000);
+
+    return GT_OK;
+}
+
+static
+GT_STATUS runAdvCableTest_1116_get
+(
+    IN  GT_QD_DEV          *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_ADV_VCT_TRANS_CHAN_SEL    crosspair,
+    IN  GT_32            channel,
+    OUT GT_ADV_CABLE_STATUS *cableStatus,
+    OUT GT_BOOL         *tooShort
+)
+{
+    GT_STATUS retVal;
+    GT_U16 u16Data;
+    GT_U16 crossChannelReg[GT_MDI_PAIR_NUM];
+    int j;
+    GT_16  dist2fault;
+    GT_BOOL         mode;
+    GT_BOOL         localTooShort[GT_MDI_PAIR_NUM];
+
+    VCT_REGISTER regList[GT_MDI_PAIR_NUM] = { {5,16},{5,17},{5,18},{5,19} };
+
+    mode = GT_TRUE;
+
+    DBG_INFO(("runAdvCableTest_1116_get Called.\n"));
+
+    if ((retVal = hwReadPagedPhyReg(
+                        dev,hwPort,5,QD_REG_ADV_VCT_CONTROL_5,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading from paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("Page 5 of Reg23 after test : %0#x.\n", u16Data));
+
+    /*
+     * read the test result for the cross pair against selected MDI Pair
+     */
+    for (j=0; j<GT_MDI_PAIR_NUM; j++)
+    {
+        if((retVal = hwReadPagedPhyReg(
+                                dev,hwPort,
+                                regList[j].page,
+                                regList[j].regOffset,
+                                phyInfo->anyPage,
+                                &crossChannelReg[j])) != GT_OK)
+        {
+            DBG_INFO(("Reading from paged phy reg failed.\n"));
+            return retVal;
+        }
+        DBG_INFO(("@@@@@ reg channel %d is %x \n", j, crossChannelReg[j]));
+    }
+
+    /*
+     * analyze the test result for RX Pair
+     */
+    for (j=0; j<GT_MDI_PAIR_NUM; j++)
+    {
+        if (crosspair!=GT_ADV_VCT_TCS_NO_CROSSPAIR)
+            dist2fault = analizeAdvVCTResult(phyInfo->vctType, j, crossChannelReg, mode&(*tooShort), cableStatus);
+        else
+            dist2fault = analizeAdvVCTNoCrosspairResult(phyInfo->vctType, j, crossChannelReg, mode&(*tooShort), cableStatus);
+
+        localTooShort[j]=GT_FALSE;
+        if((mode)&&(*tooShort==GT_FALSE))
+        {
+            if ((dist2fault>=0) && (dist2fault<GT_ADV_VCT_ACCEPTABLE_SHORT_CABLE))
+            {
+                DBG_INFO(("@@@#@@@@ it is too short dist2fault %d\n", dist2fault));
+                DBG_INFO(("Distance to Fault is too Short. So, rerun after changing pulse width\n"));
+                localTooShort[j]=GT_TRUE;
+            }
+        }
+    }
+
+    /* check and decide if length is too short */
+    for (j=0; j<GT_MDI_PAIR_NUM; j++)
+    {
+        if (localTooShort[j]==GT_FALSE) break;
+    }
+
+    if (j==GT_MDI_PAIR_NUM)
+        *tooShort = GT_TRUE;
+
+    return GT_OK;
+}
+
+static
+GT_STATUS runAdvCableTest_1116
+(
+    IN  GT_QD_DEV          *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_BOOL         mode,
+    IN  GT_ADV_VCT_TRANS_CHAN_SEL   crosspair,
+    OUT GT_ADV_CABLE_STATUS *cableStatus,
+    OUT GT_BOOL         *tooShort
+)
+{
+    GT_STATUS retVal;
+    GT_32  channel;
+
+    DBG_INFO(("runAdvCableTest_1116 Called.\n"));
+
+    if (crosspair!=GT_ADV_VCT_TCS_NO_CROSSPAIR)
+    {
+        channel = crosspair - GT_ADV_VCT_TCS_CROSSPAIR_0;
+    }
+    else
+    {
+        channel = 0;
+    }
+
+    /* Set transmit channel */
+    if((retVal=runAdvCableTest_1116_set(dev,hwPort, phyInfo,channel, crosspair)) != GT_OK)
+    {
+        DBG_INFO(("Running advanced VCT failed.\n"));
+        return retVal;
+    }
+
+    /*
+     * check test completion
+     */
+    retVal = runAdvCableTest_1116_check(dev,hwPort,phyInfo);
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("Running advanced VCT failed.\n"));
+        return retVal;
+    }
+
+    /*
+     * read the test result for the cross pair against selected MDI Pair
+     */
+    retVal = runAdvCableTest_1116_get(dev, hwPort, phyInfo, crosspair,
+                                    channel,cableStatus,(GT_BOOL *)tooShort);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Running advanced VCT get failed.\n"));
+    }
+
+    return retVal;
+}
+
+static
+GT_STATUS getAdvCableStatus_1116
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_U8           hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS retVal;
+    GT_U16 orgPulse, u16Data;
+    GT_BOOL flag, tooShort;
+    GT_ADV_VCT_TRANS_CHAN_SEL crosspair;
+
+    flag = GT_TRUE;
+    crosspair = mode.transChanSel;
+
+    /*
+     * Check Adv VCT Mode
+     */
+    switch (mode.mode)
+    {
+        case GT_ADV_VCT_FIRST_PEAK:
+        case GT_ADV_VCT_MAX_PEAK:
+                break;
+
+        default:
+                DBG_INFO(("Unknown ADV VCT Mode.\n"));
+                return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal = hwGetPagedPhyRegField(
+                            dev,hwPort,5,QD_REG_ADV_VCT_CONTROL_5,0,13,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    u16Data |= ((mode.mode<<6) | (mode.transChanSel<<11));
+    if (mode.peakDetHyst) u16Data |= (mode.peakDetHyst);
+    if (mode.sampleAvg) u16Data |= (mode.sampleAvg<<8) ;
+
+    if((retVal = hwSetPagedPhyRegField(
+                        dev,hwPort,5,QD_REG_ADV_VCT_CONTROL_5,0,13,phyInfo->anyPage,u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    if (flag)
+    {
+        /* save original Pulse Width */
+        if((retVal = hwGetPagedPhyRegField(dev,hwPort,5,28,10,2,phyInfo->anyPage,&orgPulse)) != GT_OK)
+        {
+            DBG_INFO(("Reading paged phy reg failed.\n"));
+            return retVal;
+        }
+
+        /* set the Pulse Width with default value */
+        if (orgPulse != 0)
+        {
+            if((retVal = hwSetPagedPhyRegField(dev,hwPort,5,28,10,2,phyInfo->anyPage,0)) != GT_OK)
+            {
+                DBG_INFO(("Writing to paged phy reg failed.\n"));
+                return retVal;
+            }
+        }
+        tooShort=GT_FALSE;
+    }
+
+    if((retVal=runAdvCableTest_1116(dev,hwPort,phyInfo,flag,crosspair,
+                                    cableStatus,&tooShort)) != GT_OK)
+    {
+        DBG_INFO(("Running advanced VCT failed.\n"));
+        return retVal;
+    }
+
+    if (flag)
+    {
+        if(tooShort)
+        {
+            /* set the Pulse Width with minimum width */
+            if((retVal = hwSetPagedPhyRegField(
+                                        dev,hwPort,5,28,10,2,phyInfo->anyPage,3)) != GT_OK)
+            {
+                DBG_INFO(("Writing to paged phy reg failed.\n"));
+                return retVal;
+            }
+
+            /* run the Adv VCT again */
+            if((retVal=runAdvCableTest_1116(dev,hwPort,phyInfo,GT_FALSE,crosspair,
+                                        cableStatus,&tooShort)) != GT_OK)
+            {
+                DBG_INFO(("Running advanced VCT failed.\n"));
+                return retVal;
+            }
+
+        }
+
+        /* set the Pulse Width back to the original value */
+        if((retVal = hwSetPagedPhyRegField(
+                                dev,hwPort,5,28,10,2,phyInfo->anyPage,orgPulse)) != GT_OK)
+        {
+            DBG_INFO(("Writing to paged phy reg failed.\n"));
+            return retVal;
+        }
+
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvctGetAdvCableStatus
+*
+* DESCRIPTION:
+*       This routine perform the advanced virtual cable test for the requested
+*       port and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*       mode - advance VCT mode (either First Peak or Maximum Peak)
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are supporting this API.
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvCableDiag
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status;
+    GT_U8 hwPort;
+    GT_U16 u16Data, org0;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn, autoNeg;
+    GT_U16            pageReg;
+    int i;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gvctGetAdvCableDiag_mad(dev, port, mode, cableStatus);
+#endif
+
+    DBG_INFO(("gvctGetCableDiag Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_ADV_VCT_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /*
+     * If Fiber is used, simply return with test fail.
+     */
+    if(phyInfo.flag & GT_PHY_FIBER)
+    {
+        if((status= hwReadPagedPhyReg(dev,hwPort,1,17,phyInfo.anyPage,&u16Data)) != GT_OK)
+        {
+        gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+
+        if(u16Data & 0x400)
+        {
+            for (i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                cableStatus->cableStatus[i] = GT_ADV_VCT_FAIL;
+            }
+        gtSemGive(dev,dev->phyRegsSem);
+            return GT_OK;
+        }
+    }
+
+    /*
+     * Check the link
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,0,17,phyInfo.anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to reset the Phy.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return status;
+    }
+
+    autoNeg = GT_FALSE;
+    org0 = 0;
+    if (!(u16Data & 0x400))
+    {
+        /* link is down, so disable auto-neg if enabled */
+        if((status= hwReadPagedPhyReg(dev,hwPort,0,0,phyInfo.anyPage,&u16Data)) != GT_OK)
+        {
+            DBG_INFO(("Not able to reset the Phy.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+
+        org0 = u16Data;
+
+        if (u16Data & 0x1000)
+        {
+            u16Data = 0x140;
+
+            /* link is down, so disable auto-neg if enabled */
+            if((status= hwWritePagedPhyReg(dev,hwPort,0,0,phyInfo.anyPage,u16Data)) != GT_OK)
+            {
+                DBG_INFO(("Not able to reset the Phy.\n"));
+                return status;
+            }
+
+            if((status= hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+            {
+                DBG_INFO(("Not able to reset the Phy.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+                return status;
+            }
+            autoNeg = GT_TRUE;
+        }
+    }
+
+    switch(phyInfo.vctType)
+    {
+        case GT_PHY_ADV_VCT_TYPE1:
+            status = getAdvCableStatus_1181(dev,hwPort,&phyInfo,mode,cableStatus);
+            break;
+        case GT_PHY_ADV_VCT_TYPE2:
+            status = getAdvCableStatus_1116(dev,hwPort,&phyInfo,mode,cableStatus);
+            break;
+        default:
+            status = GT_FAIL;
+            break;
+    }
+
+    if (autoNeg)
+    {
+        if((status= hwPhyReset(dev,hwPort,org0)) != GT_OK)
+        {
+            DBG_INFO(("Not able to reset the Phy.\n"));
+            goto cableDiagCleanup;
+        gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+    }
+cableDiagCleanup:
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
+
+
+/*******************************************************************************
+* dspLookup
+*
+* DESCRIPTION:
+*       This routine returns cable length (meters) by reading DSP Lookup table.
+*
+* INPUTS:
+*       regValue - register 21
+*
+* OUTPUTS:
+*       cableLen - cable length (unit of meters).
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static
+GT_STATUS dspLookup
+(
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_U16 regValue,
+    OUT GT_32  *cableLen
+)
+{
+    GT_U16 startEntry,tableEntry;
+    GT_U8* tbl;
+    switch(phyInfo->exStatusType)
+    {
+        case GT_PHY_EX_STATUS_TYPE1:    /* 88E1111/88E1141/E1145 */
+            startEntry = 18-1;
+            tableEntry = 80;
+            tbl = tbl_1111;
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE2:    /* 88E1112 */
+            startEntry = 17;
+            tableEntry = 100;
+            tbl = tbl_1112;
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE3:   /* 88E1149 has no reference constans*/
+            startEntry = 16;
+            tableEntry = 73;
+            tbl = tbl_1181;
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE4:   /* 88E1181 */
+            startEntry = 16;
+            tableEntry = 73;
+            tbl = tbl_1181;
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE5:   /* 88E1116 88E1121 */
+            startEntry = 16;
+            tableEntry = 73;
+            tbl = tbl_1116;
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE6:   /* 88E6165 Internal Phy */
+            if ((phyInfo->phyId & PHY_MODEL_MASK) == DEV_G65G)
+                startEntry = 18;
+            else
+                startEntry = 21;
+            tableEntry = 76;
+            tbl = tbl_1240;
+            break;
+
+        default:
+            return GT_NOT_SUPPORTED;
+    }
+
+    if (tbl == NULL)
+    {
+        *cableLen = -1;
+        return GT_OK;
+    }
+
+    if (regValue < startEntry)
+    {
+        *cableLen = 0;
+        return GT_OK;
+    }
+
+    if (regValue >= (tableEntry+startEntry-1))
+    {
+        regValue = tableEntry-1;
+    }
+    else
+    {
+        regValue -= startEntry;
+    }
+
+    *cableLen = (GT_32)tbl[regValue];
+    return GT_OK;
+}
+
+/*******************************************************************************
+* getDSPDistance_1111
+*
+* DESCRIPTION:
+*       This routine returns cable length (meters) from DSP method.
+*       This routine is for the 88E1111 like devices.
+*
+* INPUTS:
+*       mdi - pair of each MDI (0..3).
+*
+* OUTPUTS:
+*       cableLen - cable length (unit of meters).
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static
+GT_STATUS getDSPDistance_1111
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8  hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_U32 mdi,
+    OUT GT_32 *cableLen
+)
+{
+    GT_U16     data, pageNum;
+    GT_STATUS  retVal;
+
+    DBG_INFO(("getDSPDistance Called.\n"));
+
+    pageNum = 0x8754 + (GT_U16)((mdi << 12)&0xf000);
+
+    if((retVal = hwReadPagedPhyReg(dev,hwPort,(GT_U8)pageNum,31,phyInfo->anyPage,&data)) != GT_OK)
+    {
+        DBG_INFO(("Reading length of MDI pair failed.\n"));
+        return retVal;
+    }
+
+    return dspLookup(phyInfo,data,cableLen);
+}
+
+
+/*******************************************************************************
+* getDSPDistance_1181
+*
+* DESCRIPTION:
+*       This routine returns cable length (meters) from DSP method.
+*       This routine is for the 88E1181 like devices.
+*
+* INPUTS:
+*       mdi - pair of each MDI (0..3).
+*
+* OUTPUTS:
+*       cableLen - cable length (unit of meters).
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static
+GT_STATUS getDSPDistance_1181
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8  hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_U32 mdi,
+    OUT GT_32 *cableLen
+)
+{
+    GT_U16     data, retryCount;
+    GT_STATUS  retVal;
+
+    DBG_INFO(("getDSPDistance Called.\n"));
+
+    /* Set the required bits for Cable length register */
+    if((retVal = hwWritePagedPhyReg(dev,hwPort,0xff,19,phyInfo->anyPage,(GT_U16)(0x1018+(0xff&mdi)))) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    retryCount = 1000;
+
+    do
+    {
+        if(retryCount == 0)
+        {
+            DBG_INFO(("Ready bit of Cable length resiter is not set.\n"));
+            return GT_FAIL;
+        }
+
+        /* Check the ready bit of Cable length register */
+        if((retVal = hwGetPagedPhyRegField(dev,hwPort,0xff,19,15,1,phyInfo->anyPage,&data)) != GT_OK)
+        {
+            DBG_INFO(("Writing to paged phy reg failed.\n"));
+            return retVal;
+        }
+
+        retryCount--;
+
+    } while(!data);
+
+    /* read length of MDI pair */
+    if((retVal = hwReadPagedPhyReg(dev,hwPort,0xff,21,phyInfo->anyPage,&data)) != GT_OK)
+    {
+        DBG_INFO(("Reading length of MDI pair failed.\n"));
+        return retVal;
+    }
+
+    return dspLookup(phyInfo,data,cableLen);
+}
+
+
+/*******************************************************************************
+* getDSPDistance_1240
+*
+* DESCRIPTION:
+*       This routine returns cable length (meters) from DSP method.
+*       This routine is for the 88E1181 like devices.
+*
+* INPUTS:
+*       mdi - pair of each MDI (0..3).
+*
+* OUTPUTS:
+*       cableLen - cable length (unit of meters).
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static
+GT_STATUS getDSPDistance_1240
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8  hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    IN  GT_U32 mdi,
+    OUT GT_32 *cableLen
+)
+{
+    GT_U16     data, retryCount;
+    GT_STATUS  retVal;
+
+    DBG_INFO(("getDSPDistance Called.\n"));
+
+    /* Set the required bits for Cable length register */
+    if((retVal = hwWritePagedPhyReg(dev,hwPort,0xff,16,phyInfo->anyPage,(GT_U16)(0x1118+(0xff&mdi)))) != GT_OK)
+    {
+        DBG_INFO(("Writing to paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    retryCount = 1000;
+
+    do
+    {
+        if(retryCount == 0)
+        {
+            DBG_INFO(("Ready bit of Cable length resiter is not set.\n"));
+            return GT_FAIL;
+        }
+
+        /* Check the ready bit of Cable length register */
+        if((retVal = hwGetPagedPhyRegField(dev,hwPort,0xff,16,15,1,phyInfo->anyPage,&data)) != GT_OK)
+        {
+            DBG_INFO(("Writing to paged phy reg failed.\n"));
+            return retVal;
+        }
+
+        retryCount--;
+
+    } while(!data);
+
+    /* read length of MDI pair */
+    if((retVal = hwReadPagedPhyReg(dev,hwPort,0xff,18,phyInfo->anyPage,&data)) != GT_OK)
+    {
+        DBG_INFO(("Reading length of MDI pair failed.\n"));
+        return retVal;
+    }
+
+    return dspLookup(phyInfo,data,cableLen);
+}
+
+
+
+/*******************************************************************************
+* getExStatus_28
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*        for 1000M phy with multiple page mode
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static GT_STATUS getExStatus_28
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS retVal;
+    GT_U16 u16Data, i;
+
+    extendedStatus->isValid = GT_FALSE;
+    /* DSP based cable length */
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        if((retVal = getDSPDistance_1111(dev,hwPort,phyInfo,i,(GT_32 *)&(extendedStatus->cableLen[i]))) != GT_OK)
+        {
+            DBG_INFO(("getDSPDistance failed.\n"));
+            return retVal;
+        }
+    }
+
+
+    /*
+     * get data from 28_5 register for pair swap
+     */
+    if((retVal = hwReadPagedPhyReg(
+                    dev,hwPort,5,28,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading from paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    /* if bit 6 is not set, it's not valid. */
+    if (!(u16Data & 0x40))
+    {
+        DBG_INFO(("Valid Bit is not set (%0#x).\n", u16Data));
+        return GT_OK;
+    }
+
+    extendedStatus->isValid = GT_TRUE;
+
+    /* get Pair Polarity */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        switch((u16Data >> i) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairPolarity[i] = GT_POSITIVE;
+                break;
+            default:
+                extendedStatus->pairPolarity[i] = GT_NEGATIVE;
+            break;
+        }
+    }
+
+    /* get Pair Swap for Channel A and B */
+    if (u16Data & 0x10)
+    {
+        extendedStatus->pairSwap[0] = GT_CHANNEL_A;
+        extendedStatus->pairSwap[1] = GT_CHANNEL_B;
+    }
+    else
+    {
+        extendedStatus->pairSwap[0] = GT_CHANNEL_B;
+        extendedStatus->pairSwap[1] = GT_CHANNEL_A;
+    }
+
+    /* get Pair Swap for Channel C and D */
+    if (u16Data & 0x20)
+    {
+        extendedStatus->pairSwap[2] = GT_CHANNEL_C;
+        extendedStatus->pairSwap[3] = GT_CHANNEL_D;
+    }
+    else
+    {
+        extendedStatus->pairSwap[2] = GT_CHANNEL_D;
+        extendedStatus->pairSwap[3] = GT_CHANNEL_C;
+    }
+
+    /*
+     * get data from 28_4 register for pair skew
+     */
+    if((retVal = hwReadPagedPhyReg(
+                    dev,hwPort,4,28,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading from paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    /* get Pair Skew */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        extendedStatus->pairSkew[i] = ((u16Data >> i*4) & 0xF) * 8;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* getExStatus
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*        for 1000M phy with multiple page mode
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static GT_STATUS getExStatus
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            hwPort,
+    IN    GT_PHY_INFO        *phyInfo,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS retVal;
+    GT_U16 u16Data, i;
+
+    extendedStatus->isValid = GT_FALSE;
+    /* DSP based cable length */
+    switch(phyInfo->exStatusType)
+    {
+        case GT_PHY_EX_STATUS_TYPE1:
+        case GT_PHY_EX_STATUS_TYPE2:
+            for (i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                if((retVal = getDSPDistance_1111(dev,hwPort,phyInfo,i,(GT_32 *)&extendedStatus->cableLen[i])) != GT_OK)
+                {
+                    DBG_INFO(("getDSPDistance failed.\n"));
+                    return retVal;
+                }
+            }
+            break;
+        case GT_PHY_EX_STATUS_TYPE3:
+        case GT_PHY_EX_STATUS_TYPE4:
+        case GT_PHY_EX_STATUS_TYPE5:
+            for (i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                if((retVal = getDSPDistance_1181(dev,hwPort,phyInfo,i,(GT_32 *)&extendedStatus->cableLen[i])) != GT_OK)
+                {
+                    DBG_INFO(("getDSPDistance failed.\n"));
+                    return retVal;
+                }
+            }
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE6:
+            for (i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                if((retVal = getDSPDistance_1240(dev,hwPort,phyInfo,i,(GT_32 *)&extendedStatus->cableLen[i])) != GT_OK)
+                {
+                    DBG_INFO(("getDSPDistance failed.\n"));
+                    return retVal;
+                }
+            }
+            break;
+
+        default:
+            return GT_NOT_SUPPORTED;
+    }
+
+    /*
+     * get data from 21_5 register for pair swap
+     */
+    if((retVal = hwReadPagedPhyReg(
+                    dev,hwPort,5,QD_REG_PAIR_SWAP_STATUS,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading from paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    /* if bit 6 is not set, it's not valid. */
+    if (!(u16Data & 0x40))
+    {
+        DBG_INFO(("Valid Bit is not set (%0#x).\n", u16Data));
+        return GT_OK;
+    }
+
+    extendedStatus->isValid = GT_TRUE;
+
+    /* get Pair Polarity */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        switch((u16Data >> i) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairPolarity[i] = GT_POSITIVE;
+                break;
+            default:
+                extendedStatus->pairPolarity[i] = GT_NEGATIVE;
+            break;
+        }
+    }
+
+    /* get Pair Swap for Channel A and B */
+    if (u16Data & 0x10)
+    {
+        extendedStatus->pairSwap[0] = GT_CHANNEL_A;
+        extendedStatus->pairSwap[1] = GT_CHANNEL_B;
+    }
+    else
+    {
+        extendedStatus->pairSwap[0] = GT_CHANNEL_B;
+        extendedStatus->pairSwap[1] = GT_CHANNEL_A;
+    }
+
+    /* get Pair Swap for Channel C and D */
+    if (u16Data & 0x20)
+    {
+        extendedStatus->pairSwap[2] = GT_CHANNEL_C;
+        extendedStatus->pairSwap[3] = GT_CHANNEL_D;
+    }
+    else
+    {
+        extendedStatus->pairSwap[2] = GT_CHANNEL_D;
+        extendedStatus->pairSwap[3] = GT_CHANNEL_C;
+    }
+
+    /*
+     * get data from 20_5 register for pair skew
+     */
+    if((retVal = hwReadPagedPhyReg(
+                    dev,hwPort,5,QD_REG_PAIR_SKEW_STATUS,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Reading from paged phy reg failed.\n"));
+        return retVal;
+    }
+
+    /* get Pair Skew */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        extendedStatus->pairSkew[i] = ((u16Data >> i*4) & 0xF) * 8;
+    }
+
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvctGetAdvExtendedStatus
+*
+* DESCRIPTION:
+*       This routine retrieves extended cable status, such as Pair Poloarity,
+*        Pair Swap, and Pair Skew. Note that this routine will be success only
+*        if 1000Base-T Link is up.
+*        Note: Since DSP based cable length in extended status is based on
+*             constants from test results. At present, only E1181, E1111, and
+*             E1112 are available.
+*
+* INPUTS:
+*       dev  - pointer to GT driver structure returned from qdLoadDriver
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - the extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*        Supporting Device list:
+*           88E1111, 88E1112, 88E1141~6, 88E1149, and Internal Gigabit Phys
+*            in 88E6165 family and 88E6351 family devices
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvExtendedStatus
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT   port,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS retVal;
+    GT_U8 hwPort;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gvctGetAdvExtendedStatus_mad(dev, port, extendedStatus);
+#endif
+
+    DBG_INFO(("gvctGetAdvExtendedStatus Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_EX_CABLE_STATUS))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((retVal = gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+        gtDelay(250);
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    switch(phyInfo.exStatusType)
+    {
+        case GT_PHY_EX_STATUS_TYPE1:
+            if((retVal = getExStatus_28(dev,hwPort,&phyInfo,extendedStatus)) != GT_OK)
+            {
+                DBG_INFO(("Getting Extanded Cable Status failed.\n"));
+                break;
+            }
+            break;
+
+        case GT_PHY_EX_STATUS_TYPE2:
+        case GT_PHY_EX_STATUS_TYPE3:
+        case GT_PHY_EX_STATUS_TYPE4:
+        case GT_PHY_EX_STATUS_TYPE5:
+        case GT_PHY_EX_STATUS_TYPE6:
+            if((retVal = getExStatus(dev,hwPort,&phyInfo,extendedStatus)) != GT_OK)
+            {
+                DBG_INFO(("Getting Extanded Cable Status failed.\n"));
+                break;
+            }
+
+            break;
+        default:
+            retVal = GT_NOT_SUPPORTED;
+    }
+
+        gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct_mad.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct_mad.c
new file mode 100644
index 000000000000..1984086d5ad4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtAdvVct_mad.c
@@ -0,0 +1,227 @@
+#include <Copyright.h>
+/*******************************************************************************
+* gtAdvVct.c
+*
+* DESCRIPTION:
+*       API for Marvell Virtual Cable Tester.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+#include <msApi.h>
+#include <gtVct.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtSem.h>
+
+#include <madApi.h>
+
+/*******************************************************************************
+* gvctGetAdvCableStatus_mad
+*
+* DESCRIPTION:
+*       This routine perform the advanced virtual cable test for the requested
+*       port and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*       mode - advance VCT mode (either First Peak or Maximum Peak)
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are supporting this API.
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvCableDiag_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    IN  GT_ADV_VCT_MODE mode,
+    OUT GT_ADV_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status=GT_OK;
+    GT_BOOL ppuEn;
+    GT_U8 hwPort;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gvctGetCableDiag_mad Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_ADV_VCT_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if ( mdDiagGetAdvCableStatus(&(dev->mad_dev),port,*((MAD_ADV_VCT_MODE *)&mode),(MAD_ADV_CABLE_STATUS*)cableStatus) != MAD_OK)
+    {
+      DBG_INFO(("Failed to run mdDiagGetAdvCableStatus.\n"));
+      gtSemGive(dev,dev->phyRegsSem);
+      return GT_FALSE;
+    }
+
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
+
+
+/*******************************************************************************
+* gvctGetAdvExtendedStatus_mad
+*
+* DESCRIPTION:
+*       This routine retrieves extended cable status, such as Pair Poloarity,
+*        Pair Swap, and Pair Skew. Note that this routine will be success only
+*        if 1000Base-T Link is up.
+*        Note: Since DSP based cable length in extended status is based on
+*             constants from test results. At present, only E1181, E1111, and
+*             E1112 are available.
+*
+* INPUTS:
+*       dev  - pointer to GT driver structure returned from qdLoadDriver
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - the extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*        Supporting Device list:
+*           88E1111, 88E1112, 88E1141~6, 88E1149, and Internal Gigabit Phys
+*            in 88E6165 family and 88E6351 family devices
+*
+*******************************************************************************/
+GT_STATUS gvctGetAdvExtendedStatus_mad
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT   port,
+    OUT GT_ADV_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS retVal=GT_OK;
+    GT_U8 hwPort;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gvctGetAdvExtendedStatus_mad Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_EX_CABLE_STATUS))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((retVal = gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+        gtDelay(250);
+    }
+
+    if ( mdDiagGet1000BTExtendedStatus(&(dev->mad_dev),port,(MAD_1000BT_EXTENDED_STATUS*)extendedStatus) != MAD_OK)
+    {
+      DBG_INFO(("Failed to run mdDiagGet1000BTExtendedStatus.\n"));
+      gtSemGive(dev,dev->phyRegsSem);
+      return GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+           DBG_INFO(("Not able to enable PPUEn.\n"));
+           return GT_FALSE;
+        }
+    }
+
+        gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgFdb.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgFdb.c
new file mode 100644
index 000000000000..773a11fff14a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgFdb.c
@@ -0,0 +1,3289 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtBrgFdb.c
+*
+* DESCRIPTION:
+*       API definitions for Multiple Forwarding Databases
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 9 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* Forward function declaration.                                            */
+/****************************************************************************/
+static GT_STATUS atuOperationPerform
+(
+    IN      GT_QD_DEV           *dev,
+    IN      GT_ATU_OPERATION    atuOp,
+    INOUT    GT_EXTRA_OP_DATA    *opData,
+    INOUT     GT_ATU_ENTRY        *atuEntry
+);
+
+static GT_STATUS atuStateAppToDev
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        unicast,
+    IN  GT_U32        state,
+    OUT GT_U32        *newOne
+);
+
+static GT_STATUS atuStateDevToApp
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        unicast,
+    IN  GT_U32        state,
+    OUT GT_U32        *newOne
+);
+
+static GT_STATUS atuGetStats
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_ATU_STAT    *atuStat,
+    OUT GT_U32        *count
+);
+
+
+/*******************************************************************************
+* gfdbSetPortAtuLearnLimit
+*
+* DESCRIPTION:
+*       Port's auto learning limit. When the limit is non-zero value, the number
+*        of MAC addresses that can be learned on this port are limited to the value
+*        specified in this API. When the learn limit has been reached any frame
+*        that ingresses this port with a source MAC address not already in the
+*        address database that is associated with this port will be discarded.
+*        Normal auto-learning will resume on the port as soon as the number of
+*        active unicast MAC addresses associated to this port is less than the
+*        learn limit.
+*        CPU directed ATU Load, Purge, or Move will not have any effect on the
+*        learn limit.
+*        This feature is disabled when the limit is zero.
+*        The following care is needed when enabling this feature:
+*            1) dsable learning on the ports
+*            2) flush all non-static addresses in the ATU
+*            3) define the desired limit for the ports
+*            4) re-enable learing on the ports
+*
+* INPUTS:
+*       port  - logical port number
+*       limit - auto learning limit ( 0 ~ 255 )
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetPortAtuLearnLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_U32       limit
+)
+{
+    GT_U16          data, mask;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gfdbSetPortAtuLearnLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check device if it has fixed ATU Size. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (limit > 0xFF)
+    {
+        DBG_INFO(("Bad Parameter\n"));
+        return GT_BAD_PARAM;
+    }
+
+    mask = 0x80FF;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT_READ))
+        mask |= 0x1000;
+
+    data = (GT_U16) limit;
+
+    /* Set the learn limit bits.                  */
+    retVal = hwSetPortRegBits(dev,hwPort, QD_REG_PORT_ATU_CONTROL, mask, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetPortAtuLearnLimit
+*
+* DESCRIPTION:
+*      Port's auto learning limit. When the limit is non-zero value, the number
+*        of MAC addresses that can be learned on this port are limited to the value
+*        specified in this API. When the learn limit has been reached any frame
+*        that ingresses this port with a source MAC address not already in the
+*        address database that is associated with this port will be discarded.
+*        Normal auto-learning will resume on the port as soon as the number of
+*        active unicast MAC addresses associated to this port is less than the
+*        learn limit.
+*        CPU directed ATU Load, Purge, or Move will not have any effect on the
+*        learn limit.
+*        This feature is disabled when the limit is zero.
+*        The following care is needed when enabling this feature:
+*            1) dsable learning on the ports
+*            2) flush all non-static addresses in the ATU
+*            3) define the desired limit for the ports
+*            4) re-enable learing on the ports
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        limit - auto learning limit ( 0 ~ 255 )
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetPortAtuLearnLimit
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    OUT GT_U32       *limit
+)
+{
+    GT_U16          data, mask;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gfdbGetPortAtuLearnLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check device if it has fixed ATU Size. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT_READ))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    mask = 0x9000;
+    data = (GT_U16) 0x1000;
+
+    /* Set the learn limit bits.                  */
+    retVal = hwSetPortRegBits(dev,hwPort, QD_REG_PORT_ATU_CONTROL, mask, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* Get the ReadLearnLimit bit. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 0, 8, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *limit = (GT_U32)data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gfdbGetPortAtuLearnCnt
+*
+* DESCRIPTION:
+*       Read the current number of active unicast MAC addresses associated with
+*        the given port. This counter (LearnCnt) is held at zero if learn limit
+*        (gfdbSetPortAtuLearnLimit API) is set to zero.
+*
+* INPUTS:
+*       port  - logical port number
+*
+* OUTPUTS:
+*       count - current auto learning count
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetPortAtuLearnCnt
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_U32       *count
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gfdbGetPortAtuLearnCnt Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check device if this feature is supported. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the ReadLearnCnt bit. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 15, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0)
+    {
+        /* Set the ReadLearnCnt bit. */
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 15, 1, 1);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+
+    /* Get the LearnCnt bits. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 0, 8, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *count = (GT_U32)data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAtuAllCount
+*
+* DESCRIPTION:
+*       Counts all entries in the Address Translation Unit.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       count - number of valid entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuAllCount
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32         *count
+)
+{
+    GT_U32          dbNum, maxDbNum, numOfEntries;
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_ATU_STAT        atuStat;
+
+    DBG_INFO(("gfdbGetAtuAllCount Called.\n"));
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ATU_STATS))
+    {
+        atuStat.op = GT_ATU_STATS_ALL;
+        return atuGetStats(dev,&atuStat,count);
+    }
+
+    numOfEntries = 0;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_FULL))
+        maxDbNum = 16;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+        maxDbNum = 64;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+        maxDbNum = 256;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_4096))
+        maxDbNum = 4096;
+    else
+        maxDbNum = 1;
+
+    for(dbNum=0; dbNum<maxDbNum; dbNum++)
+    {
+        entry.DBNum = (GT_U16)dbNum;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+            gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+        else
+            gtMemSet(entry.macAddr.arEther,0xFF,sizeof(GT_ETHERADDR));
+
+        while(1)
+        {
+            retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("Failed.\n"));
+                return retVal;
+            }
+
+            if(IS_BROADCAST_MAC(entry.macAddr))
+            {
+                if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+                    break;
+                else if(entry.entryState.ucEntryState == 0)
+                    break;
+                numOfEntries++;
+                break;
+            }
+
+            numOfEntries++;
+        }
+    }
+
+    *count = numOfEntries;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAtuAllCountInDBNum
+*
+* DESCRIPTION:
+*       Counts all entries in the defined FID (or DBNum).
+*
+* INPUTS:
+*       dbNum -
+*
+* OUTPUTS:
+*       count - number of valid entries in FID (or DBNum).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuAllCountInDBNum
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32         dbNum,
+    OUT GT_U32         *count
+)
+{
+    GT_U32          numOfEntries;
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_ATU_STAT        atuStat;
+
+    DBG_INFO(("gfdbGetAtuAllCountInDBNum Called.\n"));
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ATU_STATS))
+    {
+        atuStat.op = GT_ATU_STATS_ALL_FID;
+        atuStat.DBNum = dbNum;
+        return atuGetStats(dev,&atuStat,count);
+    }
+
+    numOfEntries = 0;
+
+    entry.DBNum = (GT_U16)dbNum;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+    else
+        gtMemSet(entry.macAddr.arEther,0xFF,sizeof(GT_ETHERADDR));
+
+    while(1)
+    {
+        retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if(IS_BROADCAST_MAC(entry.macAddr))
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+                break;
+            else if(entry.entryState.ucEntryState == 0)
+                break;
+            numOfEntries++;
+            break;
+        }
+
+        numOfEntries++;
+    }
+
+    *count = numOfEntries;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAtuDynamicCountInDBNum
+*
+* DESCRIPTION:
+*       Counts all non-static entries in the defined FID (or DBNum).
+*
+* INPUTS:
+*       dbNum -
+*
+* OUTPUTS:
+*       count - number of valid non-static entries in FID (or DBNum).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuDynamicCountInDBNum
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32         dbNum,
+    OUT GT_U32         *count
+)
+{
+    GT_U32          numOfEntries, tmpState;
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_ATU_UC_STATE    state;
+    GT_ATU_STAT        atuStat;
+
+    DBG_INFO(("gfdbGetAtuDynamicCountInDBNum Called.\n"));
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ATU_STATS))
+    {
+        atuStat.op = GT_ATU_STATS_NON_STATIC_FID;
+        atuStat.DBNum = dbNum;
+        return atuGetStats(dev,&atuStat,count);
+    }
+
+    numOfEntries = 0;
+
+    entry.DBNum = (GT_U16)dbNum;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+    else
+        gtMemSet(entry.macAddr.arEther,0xFF,sizeof(GT_ETHERADDR));
+
+    while(1)
+    {
+        retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if(IS_BROADCAST_MAC(entry.macAddr))
+            break;
+
+        if(IS_MULTICAST_MAC(entry.macAddr))
+        {
+            continue;
+        }
+
+        atuStateDevToApp(dev,GT_TRUE,entry.entryState.ucEntryState,&tmpState);
+        state = (GT_ATU_UC_STATE)tmpState;
+        if (state == GT_UC_DYNAMIC)
+        {
+            numOfEntries++;
+        }
+    }
+
+    *count = numOfEntries;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gfdbSetAtuSize
+*
+* DESCRIPTION:
+*       Sets the Mac address table size.
+*
+* INPUTS:
+*       size    - Mac address table size.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetAtuSize
+(
+    IN GT_QD_DEV    *dev,
+    IN ATU_SIZE     size
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gfdbSetAtuSize Called.\n"));
+
+    switch(size)
+    {
+        case ATU_SIZE_256:
+            if (IS_IN_DEV_GROUP(dev,DEV_ATU_256_2048))
+                data = 0;
+            else
+                return GT_NOT_SUPPORTED;
+            break;
+        case ATU_SIZE_512:
+        case ATU_SIZE_1024:
+        case ATU_SIZE_2048:
+            if (IS_IN_DEV_GROUP(dev,DEV_ATU_256_2048))
+                data = (GT_U16)size;
+            else
+                data = (GT_U16)size - 1;
+            break;
+
+        case ATU_SIZE_4096:
+            if ((IS_IN_DEV_GROUP(dev,DEV_ATU_256_2048))||(IS_IN_DEV_GROUP(dev,DEV_ATU_562_2048)))
+                return GT_NOT_SUPPORTED;
+            else
+                data = 3;
+            break;
+        default:
+            return GT_NOT_SUPPORTED;
+    }
+
+    /* Check device if it has fixed ATU Size. */
+    if (IS_IN_DEV_GROUP(dev,DEV_ATU_SIZE_FIXED))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set the Software reset bit.                  */
+    retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL,12,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* Make sure the reset operation is completed.  */
+#ifdef GT_RMGMT_ACCESS
+    /*    if (IS_IN_DEV_GROUP(dev,DEV_RMGMT)) */
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_1;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_GLOBAL_STATUS;
+      regAccess.rw_reg_list[0].data = 11;
+
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->atuRegsSem);
+        return retVal;
+      }
+    }
+#else
+    {
+    data = 0;
+    while(data == 0)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,11,1,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+    }
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAgingTimeRange
+*
+* DESCRIPTION:
+*       Gets the maximal and minimum age times that the hardware can support.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       maxTimeout - max aging time in secounds.
+*       minTimeout - min aging time in secounds.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAgingTimeRange
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_U32 *maxTimeout,
+    OUT GT_U32 *minTimeout
+)
+{
+    DBG_INFO(("gfdbGetAgingTimeRange Called.\n"));
+    if((maxTimeout == NULL) || (minTimeout == NULL))
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ATU_15SEC_AGING))
+    {
+        *minTimeout = 15;
+        *maxTimeout = 3825;
+    }
+    else
+    {
+        *minTimeout = 16;
+        *maxTimeout = 4080;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAgingTimeout
+*
+* DESCRIPTION:
+*       Gets the timeout period in seconds for aging out dynamically learned
+*       forwarding information. The returned value may not be the same as the value
+*        programmed with <gfdbSetAgingTimeout>. Please refer to the description of
+*        <gfdbSetAgingTimeout>.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       timeout - aging time in seconds.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAgingTimeout
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32       *timeout
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U16            timeBase;
+
+    DBG_INFO(("gfdbGetAgingTimeout Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ATU_15SEC_AGING))
+        timeBase = 15;
+    else
+        timeBase = 16;
+
+    /* Get the Time Out value.              */
+    retVal = hwGetGlobalRegField(dev,QD_REG_ATU_CONTROL,4,8,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *timeout = data*timeBase;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbSetAgingTimeout
+*
+* DESCRIPTION:
+*       Sets the timeout period in seconds for aging out dynamically learned
+*       forwarding information. The standard recommends 300 sec.
+*        Supported aging timeout values are multiple of time-base, where time-base
+*        is either 15 or 16 seconds, depending on the Switch device. For example,
+*        88E6063 uses time-base 16, and so supported aging timeouts are 0,16,32,
+*        48,..., and 4080. If unsupported timeout value (bigger than 16) is used,
+*        the value will be rounded to the nearest supported value smaller than the
+*        given timeout. If the given timeout is less than 16, minimum timeout value
+*        16 will be used instead. E.g.) 35 becomes 32 and 5 becomes 16.
+*        <gfdbGetAgingTimeRange> function can be used to find the time-base.
+*
+* INPUTS:
+*       timeout - aging time in seconds.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetAgingTimeout
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32 timeout
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U16            timeBase;
+
+    DBG_INFO(("gfdbSetAgingTimeout Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ATU_15SEC_AGING))
+        timeBase = 15;
+    else
+        timeBase = 16;
+
+    if((timeout < timeBase) && (timeout != 0))
+    {
+        data = 1;
+    }
+    else
+    {
+        data = (GT_U16)(timeout/timeBase);
+       if (data & 0xFF00)
+            data = 0xFF;
+    }
+
+    /* Set the Time Out value.              */
+    retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL,4,8,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetLearn2All
+*
+* DESCRIPTION:
+*        When more than one Marvell device is used to form a single 'switch', it
+*        may be desirable for all devices in the 'switch' to learn any address this
+*        device learns. When this bit is set to a one all other devices in the
+*        'switch' learn the same addresses this device learns. When this bit is
+*        cleared to a zero, only the devices that actually receive frames will learn
+*        from those frames. This mode typically supports more active MAC addresses
+*        at one time as each device in the switch does not need to learn addresses
+*        it may nerver use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if Learn2All is enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*
+*******************************************************************************/
+GT_STATUS gfdbGetLearn2All
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetLearn2All Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if ((!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_88EC000_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Learn2All. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_ATU_CONTROL, 3, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gfdbSetLearn2All
+*
+* DESCRIPTION:
+*        Enable or disable Learn2All mode.
+*
+* INPUTS:
+*        mode - GT_TRUE to set Learn2All, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gprtSetLearn2All Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if ((!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_88EC000_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Learn2All. */
+    retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL, 3, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gfdbGetMacAvb
+*
+* DESCRIPTION:
+*        ATU MAC entry in AVB mode.
+*        When enabled, ATU entries operate in AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_AVB_ENTRY, and
+*            GT_UC_STATIC_AVB_ENTRY
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_AVB_ENTRY, and
+*            GT_MC_PRIO_STATIC_AVB_ENTRY
+*
+*        When disabled, ATU entries operate in non-AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_NRL, and
+*            GT_UC_STATIC_NRL
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_UNLIMITED_RATE, and
+*            GT_MC_PRIO_STATIC_UNLIMITED_RATE
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if MacAvb is enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*
+*******************************************************************************/
+GT_STATUS gfdbGetMacAvb
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gfdbGetMacAvb Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MAC_AVB))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the bit. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_ATU_CONTROL, 15, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gfdbSetMacAvb
+*
+* DESCRIPTION:
+*        ATU MAC entry in AVB mode.
+*        When enabled, ATU entries operate in AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_AVB_ENTRY, and
+*            GT_UC_STATIC_AVB_ENTRY
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_AVB_ENTRY, and
+*            GT_MC_PRIO_STATIC_AVB_ENTRY
+*
+*        When disabled, ATU entries operate in non-AVB mode:
+*
+*        GT_ATU_UC_STATE - support
+*            GT_UC_NO_PRI_STATIC_NRL, and
+*            GT_UC_STATIC_NRL
+*
+*        GT_ATU_MC_STATE - support
+*            GT_MC_STATIC_UNLIMITED_RATE, and
+*            GT_MC_PRIO_STATIC_UNLIMITED_RATE
+*
+* INPUTS:
+*        mode - GT_TRUE to enable MacAvb, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gfdbSetMacAvb
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gprtSetMacAvb Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MAC_AVB))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the bit */
+    retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL, 15, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gfdbGetAtuDynamicCount
+*
+* DESCRIPTION:
+*       Gets the current number of dynamic unicast (non-static) entries in this
+*       Filtering Database.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numDynEntries - number of dynamic entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - vlan does not exist.
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuDynamicCount
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32         *numDynEntries
+)
+{
+    GT_U32          dbNum, maxDbNum, numOfEntries, tmpState;
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_ATU_UC_STATE    state;
+    GT_ATU_STAT        atuStat;
+
+    DBG_INFO(("gfdbGetAtuDynamicCount Called.\n"));
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ATU_STATS))
+    {
+        atuStat.op = GT_ATU_STATS_NON_STATIC;
+        return atuGetStats(dev,&atuStat,numDynEntries);
+    }
+
+    numOfEntries = 0;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_FULL))
+        maxDbNum = 16;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+        maxDbNum = 64;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+        maxDbNum = 256;
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_4096))
+        maxDbNum = 4096;
+    else
+        maxDbNum = 1;
+
+    for(dbNum=0; dbNum<maxDbNum; dbNum++)
+    {
+        entry.DBNum = (GT_U16)dbNum;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+            gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+        else
+            gtMemSet(entry.macAddr.arEther,0xFF,sizeof(GT_ETHERADDR));
+
+        while(1)
+        {
+            retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("Failed.\n"));
+                return retVal;
+            }
+
+            if(IS_BROADCAST_MAC(entry.macAddr))
+                break;
+
+            if(IS_MULTICAST_MAC(entry.macAddr))
+            {
+                continue;
+            }
+
+            atuStateDevToApp(dev,GT_TRUE,(GT_U32)entry.entryState.ucEntryState,&tmpState);
+            state = (GT_ATU_UC_STATE)tmpState;
+            if (state == GT_UC_DYNAMIC)
+            {
+                numOfEntries++;
+            }
+        }
+    }
+
+    *numDynEntries = numOfEntries;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetAtuEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic MAC address entry from the ATU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       atuEntry - match Address translate unit entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*
+* COMMENTS:
+*       Search starts from Mac[00:00:00:00:00:00]
+*
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuEntryFirst
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_ATU_ENTRY    *atuEntry
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_U32 data;
+
+    DBG_INFO(("gfdbGetAtuEntryFirst Called.\n"));
+
+    if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+    else
+        gtMemSet(entry.macAddr.arEther,0xFF,sizeof(GT_ETHERADDR));
+
+    entry.DBNum = atuEntry->DBNum;
+
+    DBG_INFO(("DBNum : %i\n",entry.DBNum));
+
+    retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (atuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    if(IS_BROADCAST_MAC(entry.macAddr))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        {
+            DBG_INFO(("Failed (Invalid Mac).\n"));
+            return GT_NO_SUCH;
+        }
+        else if(entry.entryState.ucEntryState == 0)
+        {
+            DBG_INFO(("Failed (Invalid Mac).\n"));
+            return GT_NO_SUCH;
+        }
+    }
+
+    gtMemCpy(atuEntry->macAddr.arEther,entry.macAddr.arEther,6);
+    atuEntry->portVec   = GT_PORTVEC_2_LPORTVEC(entry.portVec);
+    atuEntry->prio      = entry.prio;
+    atuEntry->trunkMember = entry.trunkMember;
+    atuEntry->exPrio.useMacFPri = entry.exPrio.useMacFPri;
+    atuEntry->exPrio.macFPri = entry.exPrio.macFPri;
+    atuEntry->exPrio.macQPri = entry.exPrio.macQPri;
+
+    if(IS_MULTICAST_MAC(entry.macAddr))
+    {
+        if(dev->deviceId == GT_88E6051)
+        {
+            DBG_INFO(("Failed.\n"));
+            return GT_FAIL;
+        }
+
+        atuStateDevToApp(dev,GT_FALSE,entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.mcEntryState = data;
+    }
+    else
+    {
+        atuStateDevToApp(dev,GT_TRUE,entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.ucEntryState = data;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gfdbGetAtuEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic MAC address from the specified Mac Addr.
+*
+* INPUTS:
+*       atuEntry - the Mac Address to start the search.
+*
+* OUTPUTS:
+*       atuEntry - match Address translate unit entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*       Search starts from atu.macAddr[xx:xx:xx:xx:xx:xx] specified by the
+*       user.
+*
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbGetAtuEntryNext
+(
+    IN GT_QD_DEV    *dev,
+    INOUT GT_ATU_ENTRY  *atuEntry
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_U32 data;
+
+    DBG_INFO(("gfdbGetAtuEntryNext Called.\n"));
+
+    if(IS_BROADCAST_MAC(atuEntry->macAddr))
+    {
+           return GT_NO_SUCH;
+    }
+
+    gtMemCpy(entry.macAddr.arEther,atuEntry->macAddr.arEther,6);
+
+    entry.DBNum = atuEntry->DBNum;
+    DBG_INFO(("DBNum : %i\n",entry.DBNum));
+
+    retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (atuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    if(IS_BROADCAST_MAC(entry.macAddr))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        {
+            DBG_INFO(("Failed (Invalid Mac).\n"));
+            return GT_NO_SUCH;
+        }
+        else if(entry.entryState.ucEntryState == 0)
+        {
+            DBG_INFO(("Failed (Invalid Mac).\n"));
+            return GT_NO_SUCH;
+        }
+    }
+
+    gtMemCpy(atuEntry->macAddr.arEther,entry.macAddr.arEther,6);
+    atuEntry->portVec   = GT_PORTVEC_2_LPORTVEC(entry.portVec);
+    atuEntry->prio      = entry.prio;
+    atuEntry->trunkMember = entry.trunkMember;
+    atuEntry->exPrio.useMacFPri = entry.exPrio.useMacFPri;
+    atuEntry->exPrio.macFPri = entry.exPrio.macFPri;
+    atuEntry->exPrio.macQPri = entry.exPrio.macQPri;
+
+    if(IS_MULTICAST_MAC(entry.macAddr))
+    {
+        if(dev->deviceId == GT_88E6051)
+        {
+            DBG_INFO(("Failed.\n"));
+            return GT_FAIL;
+        }
+
+        atuStateDevToApp(dev,GT_FALSE,(GT_U32)entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.mcEntryState = data;
+    }
+    else
+    {
+        atuStateDevToApp(dev,GT_TRUE,(GT_U32)entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.ucEntryState = data;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gfdbFindAtuMacEntry
+*
+* DESCRIPTION:
+*       Find FDB entry for specific MAC address from the ATU.
+*
+* INPUTS:
+*       atuEntry - the Mac address to search.
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       atuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*       GT_BAD_PARAM    - on bad parameter
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbFindAtuMacEntry
+(
+    IN GT_QD_DEV    *dev,
+    INOUT GT_ATU_ENTRY  *atuEntry,
+    OUT GT_BOOL         *found
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    int           i;
+    GT_U32 data;
+
+    DBG_INFO(("gfdbFindAtuMacEntry Called.\n"));
+    *found = GT_FALSE;
+    gtMemCpy(entry.macAddr.arEther,atuEntry->macAddr.arEther,6);
+    entry.DBNum = atuEntry->DBNum;
+    /* Decrement 1 from mac address.    */
+    for(i=5; i >= 0; i--)
+    {
+        if(entry.macAddr.arEther[i] != 0)
+        {
+            entry.macAddr.arEther[i] -= 1;
+            break;
+        }
+        else
+            entry.macAddr.arEther[i] = 0xFF;
+    }
+
+    /* Check if the given mac equals zero   */
+    if((i == -1) && IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+    {
+        DBG_INFO(("Address should not be all zeros.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(IS_BROADCAST_MAC(entry.macAddr))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_BROADCAST_INVALID))
+        {
+            DBG_INFO(("Failed (Broadcast addr is not valid).\n"));
+            return GT_NO_SUCH;
+        }
+        else if(entry.entryState.ucEntryState == 0)
+        {
+            DBG_INFO(("Failed (Invalid Mac).\n"));
+            return GT_NO_SUCH;
+        }
+    }
+    if(gtMemCmp((char*)atuEntry->macAddr.arEther,(char*)entry.macAddr.arEther,ETHERNET_HEADER_SIZE))
+    {
+#ifdef __KERNELL__
+      DBG_INFO(("@@@@@@@@@@@@@  error gfdbFindAtuMacEntry:check: %02x:%02x:%02x:%02x:%02x:%02x get: %02x:%02x:%02x:%02x:%02x:%02x\n",
+      atuEntry->macAddr.arEther[0],
+      atuEntry->macAddr.arEther[1],
+      atuEntry->macAddr.arEther[2],
+      atuEntry->macAddr.arEther[3],
+      atuEntry->macAddr.arEther[4],
+      atuEntry->macAddr.arEther[5],
+      entry.macAddr.arEther[0],
+      entry.macAddr.arEther[1],
+      entry.macAddr.arEther[2],
+      entry.macAddr.arEther[3],
+      entry.macAddr.arEther[4],
+      entry.macAddr.arEther[5]
+      ));
+#endif
+        DBG_INFO(("Failed.\n"));
+        return GT_NO_SUCH;
+    }
+
+    atuEntry->portVec   = GT_PORTVEC_2_LPORTVEC(entry.portVec);
+    atuEntry->prio      = entry.prio;
+    atuEntry->trunkMember = entry.trunkMember;
+    atuEntry->exPrio.useMacFPri = entry.exPrio.useMacFPri;
+    atuEntry->exPrio.macFPri = entry.exPrio.macFPri;
+    atuEntry->exPrio.macQPri = entry.exPrio.macQPri;
+
+    if(IS_MULTICAST_MAC(entry.macAddr))
+    {
+        if(dev->deviceId == GT_88E6051)
+        {
+            DBG_INFO(("Failed.\n"));
+            return GT_FAIL;
+        }
+
+        atuStateDevToApp(dev,GT_FALSE,(GT_U32)entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.mcEntryState = data;
+    }
+    else
+    {
+        atuStateDevToApp(dev,GT_TRUE,(GT_U32)entry.entryState.ucEntryState,
+                        &data);
+        atuEntry->entryState.ucEntryState = data;
+    }
+
+    *found = GT_TRUE;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gfdbFlush
+*
+* DESCRIPTION:
+*       This routine flush all or unblocked addresses from the MAC Address
+*       Table.
+*
+* INPUTS:
+*       flushCmd - the flush operation type.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbFlush
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_FLUSH_CMD flushCmd
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+
+    DBG_INFO(("gfdbFlush Called.\n"));
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STATIC_ADDR))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.DBNum = 0;
+    entry.entryState.ucEntryState = 0;
+
+    if(flushCmd == GT_FLUSH_ALL)
+        retVal = atuOperationPerform(dev,FLUSH_ALL,NULL,&entry);
+    else
+        retVal = atuOperationPerform(dev,FLUSH_UNLOCKED,NULL,&entry);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbFlushInDB
+*
+* DESCRIPTION:
+*       This routine flush all or unblocked addresses from the particular
+*       ATU Database (DBNum). If multiple address databases are being used, this
+*        API can be used to flush entries in a particular DBNum database.
+*
+* INPUTS:
+*       flushCmd - the flush operation type.
+*        DBNum     - ATU MAC Address Database Number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbFlushInDB
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_FLUSH_CMD flushCmd,
+    IN GT_U32 DBNum
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+
+    DBG_INFO(("gfdbFlush Called.\n"));
+    DBG_INFO(("gfdbFush: dev=%x, dev->atuRegsSem=%d \n",dev, dev->atuRegsSem));
+
+    /* check if device supports this feature */
+    if ((!IS_IN_DEV_GROUP(dev,DEV_DBNUM_FULL)) &&
+        (!IS_IN_DEV_GROUP(dev,DEV_DBNUM_64)) &&
+        (!IS_IN_DEV_GROUP(dev,DEV_DBNUM_4096)) &&
+        (!IS_IN_DEV_GROUP(dev,DEV_DBNUM_256)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.DBNum = (GT_U16)DBNum;
+    entry.entryState.ucEntryState = 0;
+
+    if(flushCmd == GT_FLUSH_ALL)
+        retVal = atuOperationPerform(dev,FLUSH_ALL_IN_DB,NULL,&entry);
+    else
+        retVal = atuOperationPerform(dev,FLUSH_UNLOCKED_IN_DB,NULL,&entry);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbMove
+*
+* DESCRIPTION:
+*       This routine moves all or unblocked addresses from a port to another.
+*
+* INPUTS:
+*         moveCmd  - the move operation type.
+*        moveFrom - port where moving from
+*        moveTo   - port where moving to
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbMove
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD  moveCmd,
+    IN GT_LPORT        moveFrom,
+    IN GT_LPORT        moveTo
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_EXTRA_OP_DATA    opData;
+
+    DBG_INFO(("gfdbMove Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1W))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.DBNum = 0;
+    entry.entryState.ucEntryState = 0xF;
+    if (moveTo == 0xF)
+        opData.moveTo = moveTo;
+    else
+        opData.moveTo = (GT_U32)GT_LPORT_2_PORT(moveTo);
+    opData.moveFrom = (GT_U32)GT_LPORT_2_PORT(moveFrom);
+
+    if((opData.moveTo == 0xFF) || (opData.moveFrom == 0xFF))
+        return GT_BAD_PARAM;
+
+    if(moveCmd == GT_MOVE_ALL)
+        retVal = atuOperationPerform(dev,FLUSH_ALL,&opData,&entry);
+    else
+        retVal = atuOperationPerform(dev,FLUSH_UNLOCKED,&opData,&entry);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbMoveInDB
+*
+* DESCRIPTION:
+*       This routine move all or unblocked addresses which are in the particular
+*       ATU Database (DBNum) from a port to another.
+*
+* INPUTS:
+*       moveCmd  - the move operation type.
+*        DBNum     - ATU MAC Address Database Number.
+*        moveFrom - port where moving from
+*        moveTo   - port where moving to
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbMoveInDB
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD     moveCmd,
+    IN GT_U32         DBNum,
+    IN GT_LPORT        moveFrom,
+    IN GT_LPORT        moveTo
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_EXTRA_OP_DATA    opData;
+
+    DBG_INFO(("gfdbMoveInDB Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1W))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.DBNum = (GT_U16)DBNum;
+    entry.entryState.ucEntryState = 0xF;
+
+    if (moveTo == 0xF)
+        opData.moveTo = moveTo;
+    else
+        opData.moveTo = (GT_U32)GT_LPORT_2_PORT(moveTo);
+    opData.moveFrom = (GT_U32)GT_LPORT_2_PORT(moveFrom);
+
+    if((opData.moveTo == 0xFF) || (opData.moveFrom == 0xFF))
+        return GT_BAD_PARAM;
+
+    if(moveCmd == GT_MOVE_ALL)
+        retVal = atuOperationPerform(dev,FLUSH_ALL_IN_DB,&opData,&entry);
+    else
+        retVal = atuOperationPerform(dev,FLUSH_UNLOCKED_IN_DB,&opData,&entry);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbRemovePort
+*
+* DESCRIPTION:
+*       This routine deassociages all or unblocked addresses from a port.
+*
+* INPUTS:
+*       moveCmd - the move operation type.
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbRemovePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD     moveCmd,
+    IN GT_LPORT        port
+)
+{
+    DBG_INFO(("gfdbRemovePort Called.\n"));
+
+    return gfdbMove(dev,moveCmd,port,(GT_LPORT)0xF);
+}
+
+
+/*******************************************************************************
+* gfdbRemovePortInDB
+*
+* DESCRIPTION:
+*       This routine deassociages all or unblocked addresses from a port in the
+*       particular ATU Database (DBNum).
+*
+* INPUTS:
+*       moveCmd  - the move operation type.
+*       port - the logical port number.
+*        DBNum     - ATU MAC Address Database Number.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORTED- if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbRemovePortInDB
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_MOVE_CMD     moveCmd,
+    IN GT_LPORT        port,
+    IN GT_U32         DBNum
+)
+{
+    DBG_INFO(("gfdbRemovePortInDB Called.\n"));
+
+    return gfdbMoveInDB(dev,moveCmd,DBNum,port,(GT_LPORT)0xF);
+}
+
+
+/*******************************************************************************
+* gfdbAddMacEntry
+*
+* DESCRIPTION:
+*       Creates the new entry in MAC address table.
+*
+* INPUTS:
+*       macEntry    - mac address entry to insert to the ATU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK          - on success
+*       GT_FAIL        - on error
+*       GT_BAD_PARAM   - on invalid port vector
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbAddMacEntry
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ATU_ENTRY *macEntry
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+    GT_U32 data;
+
+    DBG_INFO(("gfdbAddMacEntry Called.\n"));
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STATIC_ADDR))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtMemCpy(entry.macAddr.arEther,macEntry->macAddr.arEther,6);
+    entry.DBNum        = macEntry->DBNum;
+    entry.portVec     = GT_LPORTVEC_2_PORTVEC(macEntry->portVec);
+    if(entry.portVec == GT_INVALID_PORT_VEC)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ATU_EXT_PRI))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_FQPRI_IN_TABLE))
+        {
+            entry.exPrio.useMacFPri = macEntry->exPrio.useMacFPri;
+            entry.exPrio.macFPri = macEntry->exPrio.macFPri;
+            entry.exPrio.macQPri = macEntry->exPrio.macQPri;
+        }
+        else
+        {
+            entry.exPrio.useMacFPri = 0;
+            entry.exPrio.macFPri = macEntry->exPrio.macQPri;
+            entry.exPrio.macQPri = macEntry->exPrio.macQPri;
+        }
+        entry.prio        = 0;
+    }
+    else
+    {
+        entry.exPrio.useMacFPri = 0;
+        entry.exPrio.macFPri = 0;
+        entry.exPrio.macQPri = 0;
+        entry.prio        = macEntry->prio;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        entry.trunkMember = macEntry->trunkMember;
+    }
+    else
+    {
+        entry.trunkMember = GT_FALSE;
+    }
+
+    if(IS_MULTICAST_MAC(entry.macAddr))
+    {
+        atuStateAppToDev(dev,GT_FALSE,(GT_U32)macEntry->entryState.mcEntryState,
+                        &data);
+        entry.entryState.ucEntryState = data;
+    }
+    else
+    {
+        atuStateAppToDev(dev,GT_TRUE,(GT_U32)macEntry->entryState.ucEntryState,
+                        &data);
+        entry.entryState.ucEntryState = data;
+    }
+
+    if (entry.entryState.ucEntryState == 0)
+    {
+        DBG_INFO(("Entry State should not be ZERO.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = atuOperationPerform(dev,LOAD_PURGE_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gfdbDelMacEntry
+*
+* DESCRIPTION:
+*       Deletes MAC address entry. If DBNum or FID is used, gfdbDelAtuEntry API
+*        would be the better choice to delete an entry in ATU.
+*
+* INPUTS:
+*       macAddress - mac address.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gfdbDelMacEntry
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR  *macAddress
+)
+{
+    GT_STATUS retVal;
+    GT_ATU_ENTRY    entry;
+
+    DBG_INFO(("gfdbDelMacEntry Called.\n"));
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STATIC_ADDR))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtMemCpy(entry.macAddr.arEther,macAddress->arEther,6);
+    entry.DBNum = 0;
+    entry.prio = 0;
+    entry.portVec = 0;
+    entry.entryState.ucEntryState = 0;
+    entry.trunkMember = GT_FALSE;
+    entry.exPrio.useMacFPri = GT_FALSE;
+    entry.exPrio.macFPri = 0;
+    entry.exPrio.macQPri = 0;
+
+    retVal = atuOperationPerform(dev,LOAD_PURGE_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbDelAtuEntry
+*
+* DESCRIPTION:
+*       Deletes ATU entry.
+*
+* INPUTS:
+*       atuEntry - the ATU entry to be deleted.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*        DBNum in atuEntry -
+*            ATU MAC Address Database number. If multiple address
+*            databases are not being used, DBNum should be zero.
+*            If multiple address databases are being used, this value
+*            should be set to the desired address database number.
+*
+*******************************************************************************/
+GT_STATUS gfdbDelAtuEntry
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ATU_ENTRY  *atuEntry
+)
+{
+    GT_ATU_ENTRY    entry;
+    GT_STATUS retVal;
+
+    DBG_INFO(("gfdbDelMacEntry Called.\n"));
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STATIC_ADDR))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtMemCpy(entry.macAddr.arEther,atuEntry->macAddr.arEther,6);
+    entry.DBNum = atuEntry->DBNum;
+    entry.prio = 0;
+    entry.portVec = 0;
+    entry.entryState.ucEntryState = 0;
+    entry.trunkMember = GT_FALSE;
+    entry.exPrio.useMacFPri = GT_FALSE;
+    entry.exPrio.macFPri = 0;
+    entry.exPrio.macQPri = 0;
+
+    retVal = atuOperationPerform(dev,LOAD_PURGE_ENTRY,NULL,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gfdbLearnEnable
+*
+* DESCRIPTION:
+*       Enable/disable automatic learning of new source MAC addresses on port
+*       ingress.
+*
+* INPUTS:
+*       en - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbLearnEnable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL  en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_LPORT    port;
+    GT_BOOL        mode;
+
+    DBG_INFO(("gfdbLearnEnable Called.\n"));
+    BOOL_2_BIT(en,data);
+    data = 1 - data;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        mode = (en)?GT_FALSE:GT_TRUE;
+
+        for (port=0; port<dev->numOfPorts; port++)
+        {
+            retVal = gprtSetLearnDisable(dev,port,mode);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("Failed.\n"));
+                return retVal;
+            }
+        }
+    }
+    else
+    {
+        /* Set the Learn Enable bit.            */
+        retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL,14,1,data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gfdbGetLearnEnable
+*
+* DESCRIPTION:
+*       Get automatic learning status of new source MAC addresses on port ingress.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       en - GT_TRUE if enabled  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gfdbGetLearnEnable
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_BOOL  *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gfdbGetLearnEnable Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+    else
+    {
+        /* Get the Learn Enable bit.            */
+        retVal = hwGetGlobalRegField(dev,QD_REG_ATU_CONTROL,14,1,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+
+    data = 1 - data;
+    BOOL_2_BIT(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/****************************************************************************/
+/* Internal use functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* gatuGetViolation
+*
+* DESCRIPTION:
+*       Get ATU Violation data
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       atuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gatuGetViolation
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_ATU_INT_STATUS *atuIntStatus
+)
+{
+    GT_U16              intCause;
+    GT_STATUS           retVal;
+    GT_ATU_ENTRY        entry;
+    GT_EXTRA_OP_DATA    opData;
+    GT_BOOL                found, ageInt;
+
+    DBG_INFO(("gatuGetViolation Called.\n"));
+
+    /* check which Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,3,1,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read ATU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    if (!intCause)
+    {
+        /* No Violation occurred. */
+        atuIntStatus->atuIntCause = 0;
+        return GT_OK;
+    }
+
+    entry.DBNum = 0;
+
+    retVal = atuOperationPerform(dev,SERVICE_VIOLATIONS,&opData,&entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (atuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    gtMemCpy(atuIntStatus->macAddr.arEther,entry.macAddr.arEther,6);
+
+    atuIntStatus->atuIntCause = (GT_U16)opData.intCause;
+    atuIntStatus->spid = entry.entryState.ucEntryState;
+    atuIntStatus->dbNum = qdShort2Char(entry.DBNum);
+
+    if(atuIntStatus->spid != 0xF)
+        atuIntStatus->spid = (GT_U8)GT_PORT_2_LPORT(atuIntStatus->spid);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+    {
+        if (opData.intCause == GT_AGE_VIOLATION)
+        {
+            atuIntStatus->atuIntCause = GT_AGE_OUT_VIOLATION;
+        }
+        else if (opData.intCause == GT_MISS_VIOLATION)
+        {
+            /* check if it's AGE Violation */
+            if((retVal = gsysGetAgeInt(dev, &ageInt)) != GT_OK)
+                return retVal;
+
+            if(ageInt)
+            {
+                gfdbFindAtuMacEntry(dev, &entry, &found);
+                if ((found) && (entry.entryState.ucEntryState <= 4))
+                    atuIntStatus->atuIntCause = GT_AGE_VIOLATION;
+            }
+
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* atuOperationPerform
+*
+* DESCRIPTION:
+*       This function is used by all ATU control functions, and is responsible
+*       to write the required operation into the ATU registers.
+*
+* INPUTS:
+*       atuOp       - The ATU operation bits to be written into the ATU
+*                     operation register.
+*       DBNum       - ATU Database Number for CPU accesses
+*       entryPri    - The EntryPri field in the ATU Data register.
+*       portVec     - The portVec field in the ATU Data register.
+*       entryState  - The EntryState field in the ATU Data register.
+*       atuMac      - The Mac address to be written to the ATU Mac registers.
+*
+* OUTPUTS:
+*       entryPri    - The EntryPri field in case the atuOp is GetNext.
+*       portVec     - The portVec field in case the atuOp is GetNext.
+*       entryState  - The EntryState field in case the atuOp is GetNext.
+*       atuMac      - The returned Mac address in case the atuOp is GetNext.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*       1.  if atuMac == NULL, nothing needs to be written to ATU Mac registers.
+*
+*******************************************************************************/
+static GT_STATUS atuOperationPerform
+(
+    IN      GT_QD_DEV           *dev,
+    IN      GT_ATU_OPERATION    atuOp,
+    INOUT    GT_EXTRA_OP_DATA    *opData,
+    INOUT     GT_ATU_ENTRY        *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U16          opcodeData;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           i;
+    GT_U16            portMask;
+
+    gtSemTake(dev,dev->atuRegsSem,OS_WAIT_FOREVER);
+
+    portMask = (1 << dev->maxPorts) - 1;
+
+    /* Wait until the ATU in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_ATU_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->atuRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    opcodeData = 0;
+
+    switch (atuOp)
+    {
+        case LOAD_PURGE_ENTRY:
+                if ((IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY) &&
+                    (!((IS_IN_DEV_GROUP(dev,DEV_88EC000_FAMILY))||
+                       (IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY))))) ||
+                    IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+                {
+                    if (IS_IN_DEV_GROUP(dev,DEV_TRUNK) && entry->trunkMember)
+                    {
+                        /* portVec represents trunk ID */
+                        data = (GT_U16)( 0x8000 | (((entry->portVec) & 0xF) << 4) |
+                             (((entry->entryState.ucEntryState) & 0xF)) );
+                    }
+                    else
+                    {
+                        data = (GT_U16)( (((entry->portVec) & portMask) << 4) |
+                             (((entry->entryState.ucEntryState) & 0xF)) );
+                    }
+                    opcodeData |= (entry->prio & 0x7) << 8;
+                }
+                else if(IS_IN_DEV_GROUP(dev,DEV_ATU_EXT_PRI))
+                {
+                  if(IS_IN_DEV_GROUP(dev,DEV_MACPRI_IN_TABLE))
+                  {
+                    data = (GT_U16)( (((entry->portVec) & portMask) << 4) |
+                             ((entry->entryState.ucEntryState) & 0xF) );
+                    data |= ((entry->prio & 0x7) << 13);
+                  }
+                  else
+                  {
+                    data = (GT_U16)( (((entry->portVec) & portMask) << 4) |
+                             (((entry->entryState.ucEntryState) & 0xF)) |
+                             (((entry->exPrio.macQPri) & 0x3) << 14) );
+                    if(entry->exPrio.useMacFPri == GT_TRUE)
+                        data |= ((1 << 13) | ((entry->exPrio.macFPri & 0x7) << 10));
+                  }
+                }
+                else
+                {
+                    data = (GT_U16)( (((entry->prio) & 0x3) << 14) |
+                            (((entry->portVec) & portMask) << 4) |
+                            (((entry->entryState.ucEntryState) & 0xF)) );
+                }
+                retVal = hwWriteGlobalReg(dev,QD_REG_ATU_DATA_REG,data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->atuRegsSem);
+                    return retVal;
+                }
+                /* pass thru */
+
+        case GET_NEXT_ENTRY:
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      for(i = 0; i < 3; i++)
+      {
+        data=(entry->macAddr.arEther[2*i] << 8)|(entry->macAddr.arEther[1 + 2*i]);
+        regAccess.rw_reg_list[i].cmd = HW_REG_WRITE;
+        regAccess.rw_reg_list[i].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+        regAccess.rw_reg_list[i].reg = QD_REG_ATU_MAC_BASE+i;
+        regAccess.rw_reg_list[i].data = data;
+      }
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->atuRegsSem);
+        return retVal;
+      }
+    }
+#else
+                for(i = 0; i < 3; i++)
+                {
+                    data=(entry->macAddr.arEther[2*i] << 8)|(entry->macAddr.arEther[1 + 2*i]);
+                    retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_ATU_MAC_BASE+i),data);
+                    if(retVal != GT_OK)
+                    {
+                        gtSemGive(dev,dev->atuRegsSem);
+                        return retVal;
+                    }
+                }
+#endif
+                break;
+
+        case FLUSH_ALL:
+        case FLUSH_UNLOCKED:
+        case FLUSH_ALL_IN_DB:
+        case FLUSH_UNLOCKED_IN_DB:
+                if (entry->entryState.ucEntryState == 0xF)
+                {
+                    data = (GT_U16)(0xF | ((opData->moveFrom & 0xF) << 4) | ((opData->moveTo & 0xF) << 8));
+                }
+                else
+                {
+                    data = 0;
+                }
+                retVal = hwWriteGlobalReg(dev,QD_REG_ATU_DATA_REG,data);
+                   if(retVal != GT_OK)
+                {
+                       gtSemGive(dev,dev->atuRegsSem);
+                    return retVal;
+                   }
+                break;
+
+        case SERVICE_VIOLATIONS:
+
+                break;
+
+        default :
+                return GT_FAIL;
+    }
+
+    /* Set DBNum */
+    if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+    {
+        retVal = hwSetGlobalRegField(dev,QD_REG_ATU_FID_REG,0,12,(GT_U16)(entry->DBNum & 0xFFF));
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+    }
+    else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+    {
+        retVal = hwSetGlobalRegField(dev,QD_REG_ATU_CONTROL,12,4,(GT_U16)((entry->DBNum & 0xF0) >> 4));
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+    }
+    else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+    {
+        opcodeData |= ((entry->DBNum & 0x30) << 4);    /* Op Reg bit 9:8 */
+    }
+
+    /* Set the ATU Operation register in addtion to DBNum setup  */
+
+    if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        opcodeData |= ((1 << 15) | (atuOp << 12));
+    else
+    {
+        opcodeData |= ((1 << 15) | (atuOp << 12) | (entry->DBNum & 0xF));
+    }
+
+    retVal = hwWriteGlobalReg(dev,QD_REG_ATU_OPERATION,opcodeData);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->atuRegsSem);
+        return retVal;
+    }
+
+    /* If the operation is to service violation operation wait for the response   */
+    if(atuOp == SERVICE_VIOLATIONS)
+    {
+        /* Wait until the VTU in ready. */
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 1;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_ATU_OPERATION;
+          regAccess.rw_reg_list[0].data = 15;
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->atuRegsSem);
+          return retVal;
+          }
+        }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+        }
+#endif
+
+        /* get the Interrupt Cause */
+        retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,4,4,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+        if (!IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+        {
+            data &= 0x7;    /* only 3 bits are valid for non age_out_int group */
+        }
+
+        switch (data)
+        {
+            case 8:    /* Age Interrupt */
+                opData->intCause = GT_AGE_VIOLATION;
+                break;
+            case 4:    /* Member Violation */
+                opData->intCause = GT_MEMBER_VIOLATION;
+                break;
+            case 2:    /* Miss Violation */
+                opData->intCause = GT_MISS_VIOLATION;
+                break;
+            case 1:    /* Full Violation */
+                opData->intCause = GT_FULL_VIOLATION;
+                break;
+            default:
+                opData->intCause = 0;
+                gtSemGive(dev,dev->atuRegsSem);
+                return GT_OK;
+        }
+
+        /* get the DBNum that was involved in the violation */
+
+        entry->DBNum = 0;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_FID_REG,0,12,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->DBNum = (GT_U16)data;
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_CONTROL,12,4,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->DBNum = (GT_U16)data << 4;
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,8,2,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->DBNum = (GT_U16)data << 4;
+        }
+
+        if(!IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,0,4,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+
+            entry->DBNum |= (GT_U8)(data & 0xF);
+        }
+
+        /* get the Source Port ID that was involved in the violation */
+
+        retVal = hwReadGlobalReg(dev,QD_REG_ATU_DATA_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+
+        entry->entryState.ucEntryState = data & 0xF;
+
+        /* Get the Mac address  */
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 3;
+
+          for(i = 0; i < 3; i++)
+          {
+            regAccess.rw_reg_list[i].cmd = HW_REG_READ;
+            regAccess.rw_reg_list[i].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+            regAccess.rw_reg_list[i].reg = QD_REG_ATU_MAC_BASE+i;
+            regAccess.rw_reg_list[i].data = 0;
+          }
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+          }
+          for(i = 0; i < 3; i++)
+          {
+            entry->macAddr.arEther[2*i] = qdLong2Char(regAccess.rw_reg_list[i].data >> 8);
+            entry->macAddr.arEther[1 + 2*i] = qdLong2Char(regAccess.rw_reg_list[i].data & 0xFF);
+          }
+        }
+#else
+        for(i = 0; i < 3; i++)
+        {
+            retVal = hwReadGlobalReg(dev,(GT_U8)(QD_REG_ATU_MAC_BASE+i),&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->macAddr.arEther[2*i] = data >> 8;
+            entry->macAddr.arEther[1 + 2*i] = data & 0xFF;
+        }
+#endif
+
+
+    } /* end of service violations */
+    /* If the operation is a gen next operation wait for the response   */
+    if(atuOp == GET_NEXT_ENTRY)
+    {
+        entry->trunkMember = GT_FALSE;
+        entry->exPrio.useMacFPri = GT_FALSE;
+        entry->exPrio.macFPri = 0;
+        entry->exPrio.macQPri = 0;
+
+        /* Wait until the ATU in ready. */
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 5;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_ATU_OPERATION;
+          regAccess.rw_reg_list[0].data = 15;
+
+          for(i = 1; i < 4; i++)
+          {
+            regAccess.rw_reg_list[i].cmd = HW_REG_READ;
+            regAccess.rw_reg_list[i].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+            regAccess.rw_reg_list[i].reg = QD_REG_ATU_MAC_BASE+i-1;
+            regAccess.rw_reg_list[i].data = 0;
+          }
+
+          regAccess.rw_reg_list[4].cmd = HW_REG_READ;
+          regAccess.rw_reg_list[4].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+          regAccess.rw_reg_list[4].reg = QD_REG_ATU_DATA_REG;
+          regAccess.rw_reg_list[4].data = 0;
+
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+          }
+          for(i = 0; i < 3; i++)
+          {
+            entry->macAddr.arEther[2*i] = qdLong2Char(regAccess.rw_reg_list[i+1].data >> 8);
+            entry->macAddr.arEther[1 + 2*i] = qdLong2Char(regAccess.rw_reg_list[i+1].data & 0xFF);
+          }
+          data = qdLong2Short(regAccess.rw_reg_list[4].data);
+        }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+        }
+
+        /* Get the Mac address  */
+        for(i = 0; i < 3; i++)
+        {
+            retVal = hwReadGlobalReg(dev,(GT_U8)(QD_REG_ATU_MAC_BASE+i),&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->macAddr.arEther[2*i] = data >> 8;
+            entry->macAddr.arEther[1 + 2*i] = data & 0xFF;
+        }
+
+        retVal = hwReadGlobalReg(dev,QD_REG_ATU_DATA_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->atuRegsSem);
+            return retVal;
+        }
+#endif
+
+        /* Get the Atu data register fields */
+        if ((IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY) &&
+            (!((IS_IN_DEV_GROUP(dev,DEV_88EC000_FAMILY))||
+               (IS_IN_DEV_GROUP(dev,DEV_88ESPANNAK_FAMILY))))) ||
+            IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+        {
+            if (IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+            {
+                entry->trunkMember = (data & 0x8000)?GT_TRUE:GT_FALSE;
+            }
+
+            entry->portVec = (data >> 4) & portMask;
+            entry->entryState.ucEntryState = data & 0xF;
+            retVal = hwGetGlobalRegField(dev,QD_REG_ATU_OPERATION,8,3,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->atuRegsSem);
+                return retVal;
+            }
+            entry->prio = (GT_U8)data;
+        }
+        else if(IS_IN_DEV_GROUP(dev,DEV_ATU_EXT_PRI))
+        {
+          if(IS_IN_DEV_GROUP(dev,DEV_MACPRI_IN_TABLE))
+          {
+            entry->portVec = (data >> 4) & portMask;
+            entry->entryState.ucEntryState = data & 0xF;
+            entry->prio = (data >> 13) & 0x7;
+            entry->exPrio.macFPri = entry->prio;
+            entry->exPrio.macQPri = entry->prio;
+            entry->prio = 0;
+            entry->exPrio.useMacFPri = GT_FALSE; /* doesn't care */
+          }
+          else
+          {
+            entry->prio = 0;
+            entry->portVec = (data >> 4) & portMask;
+            entry->entryState.ucEntryState = data & 0xF;
+            entry->exPrio.useMacFPri = (data & 0x2000)?GT_TRUE:GT_FALSE;
+            entry->exPrio.macFPri = (data >> 10) & 0x7;
+            entry->exPrio.macQPri = data >> 14;
+          }
+        }
+        else
+        {
+            entry->prio = data >> 14;
+            entry->portVec = (data >> 4) & portMask;
+            entry->entryState.ucEntryState = data & 0xF;
+        }
+    }
+
+    gtSemGive(dev,dev->atuRegsSem);
+    return GT_OK;
+}
+
+static GT_STATUS atuStateAppToDev
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        unicast,
+    IN  GT_U32        state,
+    OUT GT_U32        *newOne
+)
+{
+    GT_U32    newState;
+    GT_STATUS    retVal = GT_OK;
+
+    if(unicast)
+    {
+        switch ((GT_ATU_UC_STATE)state)
+        {
+            case GT_UC_INVALID:
+                newState = state;
+                break;
+
+            case GT_UC_DYNAMIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_7_DYNAMIC))
+                {
+                    newState = 7;
+                }
+                else
+                {
+                    newState = 0xE;
+                }
+                break;
+
+            case GT_UC_NO_PRI_TO_CPU_STATIC_NRL:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_NO_PRI_TO_CPU_STATIC_NRL))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_TO_CPU_STATIC_NRL:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_TO_CPU_STATIC_NRL))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_NO_PRI_STATIC_NRL:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_NO_PRI_STATIC_NRL))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_STATIC_NRL:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_STATIC_NRL))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_NO_PRI_TO_CPU_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_NO_PRI_TO_CPU_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_TO_CPU_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_TO_CPU_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_NO_PRI_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_NO_PRI_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_UC_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_UC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            default:
+                if (IS_IN_DEV_GROUP(dev,DEV_UC_7_DYNAMIC))
+                {
+                    newState = 7;
+                }
+                else
+                {
+                    newState = 0xE;
+                }
+                retVal = GT_BAD_PARAM;
+                break;
+
+        }
+    }
+    else
+    {
+        switch ((GT_ATU_UC_STATE)state)
+        {
+            case GT_MC_INVALID:
+                newState = state;
+                break;
+
+            case GT_MC_MGM_STATIC_UNLIMITED_RATE:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_MGM_STATIC_UNLIMITED_RATE))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_STATIC_UNLIMITED_RATE:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_STATIC_UNLIMITED_RATE))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_MGM_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_MGM_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_PRIO_MGM_STATIC_UNLIMITED_RATE:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_PRIO_MGM_STATIC_UNLIMITED_RATE))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_PRIO_STATIC_UNLIMITED_RATE:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_PRIO_STATIC_UNLIMITED_RATE))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_PRIO_MGM_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_PRIO_MGM_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            case GT_MC_PRIO_STATIC:
+                if (IS_IN_DEV_GROUP(dev,DEV_MC_PRIO_STATIC))
+                {
+                    newState = state;
+                }
+                else
+                {
+                    newState = (GT_U32)GT_MC_STATIC;
+                    retVal = GT_BAD_PARAM;
+                }
+                break;
+
+            default:
+                newState = (GT_U32)GT_MC_STATIC;
+                retVal = GT_BAD_PARAM;
+                break;
+
+        }
+    }
+
+    *newOne = newState;
+    return retVal;
+}
+
+static GT_STATUS atuStateDevToApp
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        unicast,
+    IN  GT_U32        state,
+    OUT GT_U32        *newOne
+)
+{
+    GT_U32    newState;
+    GT_STATUS    retVal = GT_OK;
+
+    if(unicast)
+    {
+        if (state == 0)
+        {
+            newState = (GT_U32)GT_UC_INVALID;
+        }
+        else if (state <= 7)
+        {
+            newState = (GT_U32)GT_UC_DYNAMIC;
+        }
+        else if ((state <= 0xE) && (!IS_IN_DEV_GROUP(dev,DEV_UC_7_DYNAMIC)))
+        {
+            newState = (GT_U32)GT_UC_DYNAMIC;
+        }
+        else
+        {
+            newState = state;
+        }
+    }
+    else
+    {
+        newState = state;
+    }
+
+    *newOne = newState;
+    return retVal;
+}
+
+
+static GT_STATUS atuGetStats
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_ATU_STAT    *atuStat,
+    OUT GT_U32        *count
+)
+{
+    GT_U32          numOfEntries, dbNum;
+    GT_ATU_ENTRY    entry;
+    GT_U16            data,mode,bin;
+    GT_STATUS       retVal;
+
+    DBG_INFO(("atuGetStats Called.\n"));
+
+    switch (atuStat->op)
+    {
+        case GT_ATU_STATS_ALL:
+        case GT_ATU_STATS_NON_STATIC:
+            dbNum = 0;
+            break;
+        case GT_ATU_STATS_ALL_FID:
+        case GT_ATU_STATS_NON_STATIC_FID:
+            dbNum = atuStat->DBNum;
+            break;
+        default:
+            return GT_FALSE;
+    }
+
+    numOfEntries = 0;
+    mode = atuStat->op;
+
+    for(bin=0; bin<4; bin++)
+    {
+        data = (bin << 14) | (mode << 12);
+
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_ATU_STATS, data);
+           if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+
+        entry.DBNum = (GT_U16)dbNum;
+        gtMemSet(entry.macAddr.arEther,0,sizeof(GT_ETHERADDR));
+
+        retVal = atuOperationPerform(dev,GET_NEXT_ENTRY,NULL,&entry);
+           if(retVal == GT_FAIL)
+        {
+               DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev, QD_REG_ATU_STATS, &data);
+           if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+
+        numOfEntries += (data & 0xFFF);
+    }
+
+    *count = numOfEntries;
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStp.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStp.c
new file mode 100644
index 000000000000..b876dcd1534d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStp.c
@@ -0,0 +1,333 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtBrgStp.c
+*
+* DESCRIPTION:
+*       API definitions to handle port spanning tree state.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+static GT_STATUS enhancedBPDUSet(GT_QD_DEV *dev,GT_BOOL en)
+{
+    GT_STATUS       retVal = GT_OK; /* Functions return value.      */
+    GT_U16            enBits;
+
+    /* If disable, reset the BPDU bit(bit0) from Rsvd2CpuEnables register */
+       if(en == GT_FALSE)
+    {
+        if((retVal = gsysGetRsvd2CpuEnables(dev,&enBits)) != GT_OK)
+        {
+            DBG_INFO(("gsysGetRsvd2CpuEnables failed.\n"));
+            return retVal;
+        }
+        enBits &= ~0x1;
+
+        if((retVal = gsysSetRsvd2CpuEnables(dev,enBits)) != GT_OK)
+        {
+            DBG_INFO(("gsysSetRsvd2CpuEnables failed.\n"));
+            return retVal;
+        }
+
+        return retVal;
+    }
+
+    /*
+        If enable,
+        1) Set MGMT Pri bits,
+        2) Set BPDU bit(bit0) from Rsvd2CpuEnables register,
+        3) Enable Rsvd2Cpu
+    */
+    if((retVal = gsysSetMGMTPri(dev,7)) != GT_OK)
+    {
+        DBG_INFO(("gsysSetMGMTPri failed.\n"));
+        return retVal;
+    }
+
+    if((retVal = gsysGetRsvd2CpuEnables(dev,&enBits)) != GT_OK)
+    {
+        DBG_INFO(("gsysGetRsvd2CpuEnables failed.\n"));
+        return retVal;
+    }
+    enBits |= 0x1;
+    if((retVal = gsysSetRsvd2CpuEnables(dev,enBits)) != GT_OK)
+    {
+        DBG_INFO(("gsysSetRsvd2CpuEnables failed.\n"));
+        return retVal;
+    }
+
+    if((retVal = gsysSetRsvd2Cpu(dev,GT_TRUE)) != GT_OK)
+    {
+        DBG_INFO(("gsysSetRsvd2Cpu failed.\n"));
+        return retVal;
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gstpSetMode
+*
+* DESCRIPTION:
+*       This routine Enable the Spanning tree.
+*
+* INPUTS:
+*       en - GT_TRUE for enable, GT_FALSE for disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       when enabled, this function sets all port to blocking state, and inserts
+*       the BPDU MAC into the ATU to be captured to CPU, on disable all port are
+*       being modified to be in forwarding state.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpSetMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_BOOL  en
+)
+{
+    GT_STATUS       retVal = GT_OK; /* Functions return value.      */
+    GT_ATU_ENTRY        atuEntry;   /* The ATU entry data to be set */
+    GT_U32          i, dbNum;
+
+    DBG_INFO(("gstpSetMode Called.\n"));
+    if(dev->deviceId == GT_88E6051)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    if((en == GT_TRUE) && (dev->stpMode == 1))
+    {
+        DBG_INFO(("OK.\n"));
+        return GT_OK;
+    }
+
+    switch(dev->deviceId)
+    {
+        case GT_88E6051:
+        case GT_88E6052:
+            dbNum = 1;
+            break;
+        case GT_FF_HG:
+        case GT_FF_EG:
+        case GT_88E6021:
+        case GT_88E6060:
+        case GT_88E6031:
+        case GT_88E6061:
+        case GT_88E6063:
+        case GT_FH_VPN:
+        case GT_88E6083:
+        case GT_88E6153:
+        case GT_88E6181:
+        case GT_88E6183:
+        case GT_88E6093:
+            dbNum = 16;
+            break;
+        case GT_88E6035:
+        case GT_88E6055:
+        case GT_88E6065:
+            dbNum = 64;
+            break;
+        default:
+            if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST))
+            {
+                dbNum = 64;
+            }
+            else
+            {
+                dbNum = 0;
+                retVal = enhancedBPDUSet(dev,en);
+            }
+            break;
+    }
+
+    for (i=0; i<dbNum; i++)
+    {
+        /* Set the Atu entry parameters.    */
+        atuEntry.macAddr.arEther[0] = 0x01;
+        atuEntry.macAddr.arEther[1] = 0x80;
+        atuEntry.macAddr.arEther[2] = 0xC2;
+        atuEntry.macAddr.arEther[3] = 0x00;
+        atuEntry.macAddr.arEther[4] = 0x00;
+        atuEntry.macAddr.arEther[5] = 0x00;
+        atuEntry.portVec = GT_LPORTVEC_2_PORTVEC((1<<dev->cpuPortNum));
+        if(IS_IN_DEV_GROUP(dev,DEV_ATU_EXT_PRI))
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_FQPRI_IN_TABLE))
+            {
+                atuEntry.exPrio.useMacFPri = GT_TRUE;
+                atuEntry.exPrio.macFPri = 7;
+            }
+            else
+            {
+                atuEntry.exPrio.useMacFPri = 0;
+                atuEntry.exPrio.macFPri = 0;
+            }
+            atuEntry.exPrio.macQPri = 3;
+            atuEntry.prio    = 0;
+        }
+        else
+        {
+            atuEntry.prio    = 3;
+            atuEntry.exPrio.useMacFPri = 0;
+            atuEntry.exPrio.macFPri = 0;
+            atuEntry.exPrio.macQPri = 0;
+        }
+        atuEntry.DBNum = (GT_U8)i;
+        atuEntry.entryState.mcEntryState = GT_MC_PRIO_MGM_STATIC;
+
+        if(en == GT_TRUE)
+        {
+            retVal = gfdbAddMacEntry(dev,&atuEntry);
+        }
+        else
+        {
+            if(dev->stpMode == 0)
+                break;
+            retVal = gfdbDelAtuEntry(dev,&atuEntry);
+        }
+
+        if (retVal != GT_OK)
+            break;
+    }
+
+    if(retVal == GT_OK)
+    {
+        if(en == GT_TRUE)
+            dev->stpMode = 1;
+        else
+            dev->stpMode = 2;
+        DBG_INFO(("OK.\n"));
+    }
+    else
+    {
+           dev->stpMode = 0;
+        DBG_INFO(("Failed.\n"));
+    }
+
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gstpSetPortState
+*
+* DESCRIPTION:
+*       This routine set the port state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       state - the port state to set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpSetPortState
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT           port,
+    IN GT_PORT_STP_STATE  state
+)
+{
+    GT_U8           phyPort;        /* Physical port                */
+    GT_U16          data;           /* Data to write to register.   */
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gstpSetPortState Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+    data    = state;
+
+    /* Set the port state bits.             */
+    retVal= hwSetPortRegField(dev,phyPort, QD_REG_PORT_CONTROL,0,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gstpGetPortState
+*
+* DESCRIPTION:
+*       This routine returns the port state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       state - the current port state.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstpGetPortState
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT           port,
+    OUT GT_PORT_STP_STATE  *state
+)
+{
+    GT_U8           phyPort;        /* Physical port                */
+    GT_U16          data;           /* Data read from register.     */
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gstpGetPortState Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* Get the port state bits.             */
+    retVal = hwGetPortRegField(dev,phyPort, QD_REG_PORT_CONTROL,0,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *state = data & 0x3;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStu.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStu.c
new file mode 100644
index 000000000000..7433edc071e8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgStu.c
@@ -0,0 +1,848 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtBrgStu.c
+*
+* DESCRIPTION:
+*       API definitions for SID (VTU 802.1s Port State Information Database)
+*        Translation Unit.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* Forward function declaration.                                            */
+/****************************************************************************/
+
+static GT_STATUS stuOperationPerform
+(
+    IN        GT_QD_DEV           *dev,
+    IN      GT_STU_OPERATION    stuOp,
+    INOUT   GT_U8               *valid,
+    INOUT     GT_STU_ENTRY        *stuEntry
+);
+
+/*******************************************************************************
+* gstuGetEntryCount
+*
+* DESCRIPTION:
+*       Gets the current number of valid entries in the STU table
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numEntries - number of STU entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryCount
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U32    *numEntries
+)
+{
+    GT_U8               valid;
+    GT_U32                numOfEntries;
+    GT_STATUS           retVal;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuGetEntryCount Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.sid = 0;
+    numOfEntries = 0;
+
+    while(1)
+    {
+        retVal = stuOperationPerform(dev,GET_NEXT_STU_ENTRY,&valid,&entry);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+            return retVal;
+        }
+
+        if( entry.sid==0x3F )
+        {
+            if (valid==1) numOfEntries++;
+            break;
+        }
+
+        numOfEntries++;
+    }
+
+    *numEntries = numOfEntries;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstuGetEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic entry from the STU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       stuEntry - find the first valid STU entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryFirst
+(
+    IN  GT_QD_DEV       *dev,
+    OUT GT_STU_ENTRY    *stuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT               lport;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuGetEntryFirst Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    entry.sid = 0;
+    valid = 0;
+
+    retVal = stuOperationPerform(dev,GET_NEXT_STU_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrieve the value from the operation */
+
+    if((entry.sid == 0x3F) && (valid == 0))
+        return GT_NO_SUCH;
+
+    stuEntry->sid = entry.sid;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        stuEntry->portState[lport]=entry.portState[port];
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gstuGetEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic STU entry from the specified SID.
+*
+* INPUTS:
+*       stuEntry - the SID to start the search.
+*
+* OUTPUTS:
+*       stuEntry - next STU entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstuGetEntryNext
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_STU_ENTRY  *stuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT               lport;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuGetEntryNext Called.\n"));
+
+    /* check if device supports this feature */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(stuEntry->sid >= 0x3F)
+    {
+        return GT_NO_SUCH;
+    }
+    else
+    {
+        entry.sid = stuEntry->sid;
+    }
+    valid = 0;
+
+    retVal = stuOperationPerform(dev,GET_NEXT_STU_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrieve the value from the operation */
+
+    if((entry.sid == 0x3F) && (valid == 0))
+        return GT_NO_SUCH;
+
+    stuEntry->sid = entry.sid;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        stuEntry->portState[lport]=entry.portState[port];
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gstuFindSidEntry
+*
+* DESCRIPTION:
+*       Find STU entry for a specific SID, it will return the entry, if found,
+*       along with its associated data
+*
+* INPUTS:
+*       stuEntry - contains the SID to searche for
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       stuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no such entry.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Valid SID is 1 ~ 63.
+*
+*******************************************************************************/
+GT_STATUS gstuFindSidEntry
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_STU_ENTRY  *stuEntry,
+    OUT GT_BOOL         *found
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT            lport;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuFindSidEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((stuEntry->sid == 0) || (stuEntry->sid > 0x3F))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    *found = GT_FALSE;
+
+    /* Decrement 1 from sid */
+    entry.sid   = stuEntry->sid-1;
+    valid = 0; /* valid is not used as input in this operation */
+
+    retVal = stuOperationPerform(dev,GET_NEXT_STU_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrive the value from the operation */
+    if ((entry.sid != stuEntry->sid) | (valid == 0))
+        return GT_NO_SUCH;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        stuEntry->portState[lport]=entry.portState[port];
+    }
+
+    *found = GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gstuAddEntry
+*
+* DESCRIPTION:
+*       Creates or update the entry in STU table based on user input.
+*
+* INPUTS:
+*       stuEntry    - stu entry to insert to the STU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK             - on success
+*       GT_FAIL           - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Valid SID is 1 ~ 63.
+*
+*******************************************************************************/
+GT_STATUS gstuAddEntry
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_STU_ENTRY    *stuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8           port;
+    GT_LPORT           lport;
+    GT_STU_ENTRY     tmpStuEntry;
+    GT_BOOL             found;
+    int                count = 50000;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuAddEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((stuEntry->sid == 0) || (stuEntry->sid > 0x3F))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    entry.sid = stuEntry->sid;
+
+    valid = 1; /* for load operation */
+
+    for(port=0; port<dev->maxPorts; port++)
+    {
+        lport = GT_PORT_2_LPORT(port);
+        if (lport == GT_INVALID_PORT)
+            entry.portState[port] = 0;
+        else
+            entry.portState[port] = stuEntry->portState[lport];
+    }
+
+    retVal = stuOperationPerform(dev,LOAD_PURGE_STU_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* verify that the given entry has been added */
+    tmpStuEntry.sid = stuEntry->sid;
+
+    if((retVal = gstuFindSidEntry(dev,&tmpStuEntry,&found)) != GT_OK)
+    {
+        while(count--);
+        if((retVal = gstuFindSidEntry(dev,&tmpStuEntry,&found)) != GT_OK)
+        {
+            DBG_INFO(("Added entry cannot be found\n"));
+            return retVal;
+        }
+    }
+    if(found == GT_FALSE)
+    {
+        DBG_INFO(("Added entry cannot be found\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gstuDelEntry
+*
+* DESCRIPTION:
+*       Deletes STU entry specified by user.
+*
+* INPUTS:
+*       stuEntry - the STU entry to be deleted
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Valid SID is 1 ~ 63.
+*
+*******************************************************************************/
+GT_STATUS gstuDelEntry
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_STU_ENTRY     *stuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_STU_ENTRY        entry;
+
+    DBG_INFO(("gstuDelEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((stuEntry->sid == 0) || (stuEntry->sid > 0x3F))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    entry.sid = stuEntry->sid;
+    valid = 0; /* for delete operation */
+
+    retVal = stuOperationPerform(dev,LOAD_PURGE_STU_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (stuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/****************************************************************************/
+/* Internal use functions.                                                  */
+/****************************************************************************/
+
+static GT_STATUS stuSetSTUData
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_STU_ENTRY        *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data1,data2,data3;           /* Data to be set into the      */
+    GT_U16            nStuData = 0;
+
+    data1 = data2 = data3 = 0;
+
+    switch (dev->maxPorts)
+    {
+        case 11:
+            data3 |= (entry->portState[10] & 3) << 10;
+            /* pass through */
+        case 10:
+            data3 |= (entry->portState[9] & 3) << 6;
+            /* pass through */
+        case 9:
+            data3 |= (entry->portState[8] & 3) << 2;
+            nStuData++;
+
+            /* pass through */
+        case 8:
+            data2 |= (entry->portState[7] & 3) << 14;
+            /* pass through */
+        case 7:
+            data2 |= (entry->portState[6] & 3) << 10;
+            /* pass through */
+        case 6:
+            data2 |= (entry->portState[5] & 3) << 6;
+            /* pass through */
+        case 5:
+            data2 |= (entry->portState[4] & 3) << 2;
+            nStuData++;
+
+            /* pass through */
+        case 4:
+            data1 |= (entry->portState[3] & 3) << 14;
+            /* pass through */
+        case 3:
+            data1 |= (entry->portState[2] & 3) << 10;
+            /* pass through */
+        case 2:
+            data1 |= (entry->portState[1] & 3) << 6;
+            /* pass through */
+        case 1:
+            data1 |= (entry->portState[0] & 3) << 2;
+            nStuData++;
+            break;
+
+        default:
+            return GT_FAIL;
+    }
+
+    switch(nStuData)
+    {
+        case 3:
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA3_REG,data3);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            /* pass through */
+        case 2:
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA2_REG,data2);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            /* pass through */
+        case 1:
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA1_REG,data1);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            break;
+        default:
+            return GT_FAIL;
+    }
+
+    return retVal;
+}
+
+static GT_STATUS stuGetSTUData
+(
+    IN    GT_QD_DEV           *dev,
+    OUT    GT_STU_ENTRY        *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data1,data2,data3;           /* Data to be set into the      */
+    GT_U16            nStuData = 0;
+
+    data1 = data2 = data3 = 0;
+
+    gtMemSet((void*)entry->portState,0,sizeof(entry->portState));
+
+    switch (dev->maxPorts)
+    {
+        case 11:
+        case 10:
+        case 9:
+            nStuData = 3;
+            break;
+
+        case 8:
+        case 7:
+        case 6:
+        case 5:
+            nStuData = 2;
+            break;
+
+        case 4:
+        case 3:
+        case 2:
+        case 1:
+            nStuData = 1;
+            break;
+
+        default:
+            return GT_FAIL;
+    }
+
+    switch(nStuData)
+    {
+        case 3:
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA3_REG,&data3);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            /* pass through */
+        case 2:
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA2_REG,&data2);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            /* pass through */
+        case 1:
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA1_REG,&data1);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+            break;
+        default:
+            return GT_FAIL;
+    }
+
+    switch (dev->maxPorts)
+    {
+        case 11:
+            entry->portState[10]  = (data3 >> 10) & 3 ;
+            /* pass through */
+        case 10:
+            entry->portState[9]  = (data3 >> 6) & 3 ;
+            /* pass through */
+        case 9:
+            entry->portState[8]  = (data3 >> 2) & 3 ;
+            /* pass through */
+        case 8:
+            entry->portState[7]  = (data2 >> 14) & 3 ;
+            /* pass through */
+        case 7:
+            entry->portState[6]  = (data2 >> 10) & 3 ;
+            /* pass through */
+        case 6:
+            entry->portState[5]  = (data2 >> 6) & 3 ;
+            /* pass through */
+        case 5:
+            entry->portState[4]  = (data2 >> 2) & 3 ;
+            /* pass through */
+        case 4:
+            entry->portState[3]  = (data1 >> 14) & 3 ;
+            /* pass through */
+        case 3:
+            entry->portState[2]  = (data1 >> 10) & 3 ;
+            /* pass through */
+        case 2:
+            entry->portState[1]  = (data1 >> 6) & 3 ;
+            /* pass through */
+        case 1:
+            entry->portState[0]  = (data1 >> 2) & 3 ;
+            break;
+
+        default:
+            return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* stuOperationPerform
+*
+* DESCRIPTION:
+*       This function is used by all STU control functions, and is responsible
+*       to write the required operation into the STU registers.
+*
+* INPUTS:
+*       stuOp       - The STU operation bits to be written into the STU
+*                     operation register.
+*       sid         - sid
+*       valid       - valid bit
+*       stuData     - STU Data with port state information
+*
+* OUTPUTS:
+*       sid         - sid
+*       valid       - valid bit
+*       stuData     - STU Data with port state information
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+static GT_STATUS stuOperationPerform
+(
+    IN        GT_QD_DEV           *dev,
+    IN      GT_STU_OPERATION    stuOp,
+    INOUT   GT_U8               *valid,
+    INOUT    GT_STU_ENTRY        *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                /* register.                    */
+
+    gtSemTake(dev,dev->vtuRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the VTU in ready. */
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+    }
+
+    /* Set the VTU data register if Load operation is required. */
+    if (stuOp == LOAD_PURGE_STU_ENTRY)
+    {
+        if (*valid == 1)
+        {
+            /* set the Port State for all the ports */
+            retVal = stuSetSTUData(dev,entry);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+
+            /* Set the valid bit (QD_REG_VTU_VID_REG) */
+               data= *valid << 12 ;
+            retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_VTU_VID_REG),data);
+               if(retVal != GT_OK)
+            {
+                   gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+                  }
+        }
+        else
+        {
+            /* Clear the valid bit (QD_REG_VTU_VID_REG) */
+               data= 0 ;
+            retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_VTU_VID_REG),data);
+               if(retVal != GT_OK)
+            {
+                   gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+                  }
+        }
+    }
+
+    /* Set the SID register (QD_REG_STU_SID_REG) */
+       data= (entry->sid) & 0x3F;
+    retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_STU_SID_REG),data);
+       if(retVal != GT_OK)
+    {
+           gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+       }
+
+    /* Start the STU Operation by defining the stuOp and VTUBusy */
+    data = (1 << 15) | (stuOp << 12);
+
+    retVal = hwWriteGlobalReg(dev,QD_REG_VTU_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+    }
+
+    /* If the operation is a get next operation wait for the response   */
+    if(stuOp == GET_NEXT_STU_ENTRY)
+    {
+        /* Wait until the STU in ready. */
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+
+        /****************** get the valid bit *******************/
+        retVal = hwGetGlobalRegField(dev,QD_REG_VTU_VID_REG,12,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        *valid = (GT_U8)data;
+
+        /****************** get the sid *******************/
+
+        retVal = hwReadGlobalReg(dev,QD_REG_STU_SID_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        /* the sid is bits 0-5 */
+        entry->sid   = data & 0x3F;
+
+        if (*valid)
+        {
+            /* get the Port State for all the ports */
+            retVal = stuGetSTUData(dev,entry);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+
+        } /* entry is valid */
+
+    } /* end of get next entry */
+
+    gtSemGive(dev,dev->vtuRegsSem);
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVlan.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVlan.c
new file mode 100644
index 000000000000..76b6dd6c1803
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVlan.c
@@ -0,0 +1,1312 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtBrgVlan.c
+*
+* DESCRIPTION:
+*       API definitions to handle port-based vlan configuration.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*******************************************************************************
+* gprtSetEgressMode
+*
+* DESCRIPTION:
+*       This routine set the egress mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the egress mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressMode
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_EGRESS_MODE  mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gprtSetEgressMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_TAGGING)) != GT_OK )
+      return retVal;
+
+    switch (mode)
+    {
+        case (GT_UNMODIFY_EGRESS):
+            data = 0;
+            break;
+
+        case (GT_TAGGED_EGRESS):
+            data = 2;
+            break;
+
+        case (GT_UNTAGGED_EGRESS):
+            data = 1;
+            break;
+
+        case (GT_ADD_TAG):
+            if(!IS_IN_DEV_GROUP(dev,DEV_EGRESS_DOUBLE_TAGGING))
+            {
+                DBG_INFO(("GT_NOT_SUPPORTED\n"));
+                return GT_NOT_SUPPORTED;
+            }
+            data = 3;
+            break;
+        default:
+            DBG_INFO(("Failed.\n"));
+            return GT_FAIL;
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL,12,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtGetEgressMode
+*
+* DESCRIPTION:
+*       This routine get the egress mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the egress mode.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressMode
+(
+    IN GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_EGRESS_MODE  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    if(mode == NULL)
+        return GT_BAD_PARAM;
+
+    DBG_INFO(("gprtGetEgressMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL,12,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    switch (data)
+    {
+        case (0):
+            *mode = GT_UNMODIFY_EGRESS;
+            break;
+
+        case (2):
+            *mode = GT_TAGGED_EGRESS;
+            break;
+
+        case (1):
+            *mode = GT_UNTAGGED_EGRESS;
+            break;
+
+        case (3):
+            *mode = GT_ADD_TAG;
+            break;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtSetVlanTunnel
+*
+* DESCRIPTION:
+*       This routine sets the vlan tunnel mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the vlan tunnel mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetVlanTunnel
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gprtSetVlanTunnel Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+    BOOL_2_BIT(mode,data);
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL,7,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtGetVlanTunnel
+*
+* DESCRIPTION:
+*       This routine get the vlan tunnel mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the vlan tunnel mode..
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetVlanTunnel
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gprtGetVlanTunnel Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL,7,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvlnSetPortVlanPorts
+*
+* DESCRIPTION:
+*       This routine sets the port VLAN group port membership list.
+*
+* INPUTS:
+*       port        - logical port number to set.
+*       memPorts    - array of logical ports in the same vlan.
+*       memPortsLen - number of members in memPorts array
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanPorts
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_LPORT  memPorts[],
+    IN GT_U8     memPortsLen
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U8           i;
+
+    DBG_INFO(("gvlnSetPortVlanPorts Called.\n"));
+    if(memPorts == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+    data = 0;
+
+    if(memPortsLen > dev->numOfPorts)
+    {
+        DBG_INFO(("Failed (PortsLen Too Big).\n"));
+        return GT_BAD_PARAM;
+    }
+
+    for(i = 0; i < memPortsLen; i++)
+        data |= (1 << GT_LPORT_2_PORT(memPorts[i]));
+
+    /* numOfPorts = 3 for fullsail, = 10 for octane, = 7 for others */
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,0,dev->maxPorts,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvlnGetPortVlanPorts
+*
+* DESCRIPTION:
+*       This routine gets the port VLAN group port membership list.
+*
+* INPUTS:
+*       port        - logical port number to set.
+*
+* OUTPUTS:
+*       memPorts    - array of logical ports in the same vlan.
+*       memPortsLen - number of members in memPorts array
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanPorts
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_LPORT memPorts[],
+    OUT GT_U8    *memPortsLen
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U8           i;
+
+    DBG_INFO(("gvlnGetPortVlanPorts Called.\n"));
+    if((memPorts == NULL) || (memPortsLen == NULL))
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* memPortsLen = 3 for fullsail, =7 for others */
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,0,dev->maxPorts,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    i = 0;
+    for(phyPort = 0; phyPort < dev->maxPorts; phyPort++)
+    {
+        if(!GT_IS_PORT_SET(dev->validPortVec, phyPort))
+            continue;
+
+        if(((1 << phyPort) & data) != 0)
+        {
+            memPorts[i] = GT_PORT_2_LPORT(phyPort);
+            i++;
+        }
+    }
+    *memPortsLen = i;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvlnSetPortUserPriLsb
+*
+* DESCRIPTION:
+*       This routine Set the user priority (VPT) LSB bit, to be added to the
+*       user priority on the egress.
+*
+* INPUTS:
+*       port       - logical port number to set.
+*       userPriLsb - GT_TRUE for 1, GT_FALSE for 0.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortUserPriLsb
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   userPriLsb
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnSetPortUserPriLsb Called.\n"));
+
+    /* Gigabit Switch does not support this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+    BOOL_2_BIT(userPriLsb,data);
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PVID,13,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvlnGetPortUserPriLsb
+*
+* DESCRIPTION:
+*       This routine gets the user priority (VPT) LSB bit.
+*
+* INPUTS:
+*       port       - logical port number to set.
+*
+* OUTPUTS:
+*       userPriLsb - GT_TRUE for 1, GT_FALSE for 0.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortUserPriLsb
+(
+    IN GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *userPriLsb
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnGetPortUserPriLsb Called.\n"));
+
+    /* Gigabit Switch does not support this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(userPriLsb == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PVID,13,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*userPriLsb);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvlnSetPortVid
+*
+* DESCRIPTION:
+*       This routine Set the port default vlan id.
+*
+* INPUTS:
+*       port - logical port number to set.
+*       vid  - the port vlan id.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVid
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_U16       vid
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnSetPortVid Called.\n"));
+    phyPort = GT_LPORT_2_PORT(port);
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PVID,0,12, vid);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvlnGetPortVid
+*
+* DESCRIPTION:
+*       This routine Get the port default vlan id.
+*
+* INPUTS:
+*       port - logical port number to set.
+*
+* OUTPUTS:
+*       vid  - the port vlan id.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVid
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_U16   *vid
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnGetPortVid Called.\n"));
+    if(vid == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PVID,0,12, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *vid = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvlnSetPortVlanDBNum
+*
+* DESCRIPTION:
+*       This routine sets the port's default VLAN database number (DBNum or
+*        FID, Forwarding Information Database).
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       DBNum     - database number for this port (or FID)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:IN GT_INGRESS_MODE mode
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanDBNum
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_U32    DBNum
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnSetPortVlanDBNum Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_4096))
+    {
+        if(DBNum > 4095)
+        {
+            return GT_BAD_PARAM;
+        }
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,8,(GT_U16)((DBNum & 0xFF0) >> 4));
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4,(GT_U16)(DBNum & 0x000F));
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+    {
+        if(DBNum > 255)
+        {
+            return GT_BAD_PARAM;
+        }
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,4,(GT_U16)((DBNum & 0xF0) >> 4));
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4,(GT_U16)(DBNum & 0x0F));
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+    {
+        if(DBNum > 63)
+        {
+            return GT_BAD_PARAM;
+        }
+        if(IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+        {
+          retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,2,(GT_U16)((DBNum & 0x30) >> 4));
+          retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4,(GT_U16)(DBNum & 0x0F));
+        }
+        else
+        {
+          retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,6,2,(GT_U16)((DBNum & 0x30) >> 4));
+          retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4,(GT_U16)(DBNum & 0x0F));
+        }
+    }
+    else
+    {
+        if(DBNum > 15)
+        {
+            return GT_BAD_PARAM;
+        }
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4,(GT_U16)(DBNum & 0x0F));
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvlnGetPortVlanDBNum
+*
+* DESCRIPTION:
+*       This routine gets the port's default VLAN database number (DBNum or
+*        FID, Forwarding Information Database).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       DBNum     - database number for this port (or FID)
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanDBNum
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_U32   *DBNum
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data,dataH;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnGetPortVlanDBNum Called.\n"));
+
+    if(DBNum == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_4096))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,8, &dataH);
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4, &data);
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,4, &dataH);
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4, &data);
+    }
+    else if(IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))
+        {
+          retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL1,0,2, &dataH);
+          retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4, &data);
+        }
+        else
+        {
+          retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,6,2, &dataH);
+          retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4, &data);
+        }
+    }
+    else
+    {
+        dataH = 0;
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,12,4, &data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *DBNum = (GT_U32)(data | (dataH << 4));
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/********************************************************************
+* gvlnSetPortVlanDot1qMode
+*
+* DESCRIPTION:
+*       This routine sets the IEEE 802.1q mode for this port (11:10)
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode     - 802.1q mode for this port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:IN GT_INGRESS_MODE mode
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanDot1qMode
+(
+    IN GT_QD_DEV        *dev,
+    IN GT_LPORT     port,
+    IN GT_DOT1Q_MODE    mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnSetPortVlanDot1qMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_802_1Q)) != GT_OK )
+      return retVal;
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL2,10,2,(GT_U16)mode );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,10,2,(GT_U16)mode );
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvlnGetPortVlanDot1qMode
+*
+* DESCRIPTION:
+*       This routine gets the IEEE 802.1q mode for this (bit 11:10).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       mode     - 802.1q mode for this port
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanDot1qMode
+(
+    IN GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_DOT1Q_MODE   *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gvlnGetPortVlanDot1qMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_802_1Q)) != GT_OK )
+      return retVal;
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_CONTROL2,10,2, &data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_VLAN_MAP,10,2, &data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *mode = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/********************************************************************
+* gvlnSetPortVlanForceDefaultVID
+*
+* DESCRIPTION:
+*       This routine sets the mode for forcing to use default VID
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode    - GT_TRUE, force to use default VID
+*                 GT_FAULSE, otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetPortVlanForceDefaultVID
+(
+    IN GT_QD_DEV        *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U16          data;           /* Data to be set into the      */
+
+    DBG_INFO(("gvlnSetPortForceDefaultVID Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_802_1Q)) != GT_OK )
+      return retVal;
+
+    BOOL_2_BIT(mode,data);
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PVID,12,1,data );
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvlnGetPortVlanForceDefaultVID
+*
+* DESCRIPTION:
+*       This routine gets the port mode for ForceDefaultVID (bit 12).
+*
+* INPUTS:
+*       port     - logical port number to get.
+*
+* OUTPUTS:
+*       mode     - ForceDefaultVID mode for this port
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetPortVlanForceDefaultVID
+(
+    IN GT_QD_DEV        *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+
+    DBG_INFO(("gvlnGetPortVlanDot1qMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_802_1Q)) != GT_OK )
+      return retVal;
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PVID,12,1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/********************************************************************
+* gvlnSetForceMap
+*
+* DESCRIPTION:
+*       This routine enables/disables Force Map feature.
+*        When Force Map feature is enabled, all received frames will be
+*        considered MGMT and they are mapped to the port or ports defined
+*        in the VLAN Table overriding the mapping from the address database.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*       mode    - GT_TRUE, to enable force map feature
+*                 GT_FAULSE, otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gvlnSetForceMap
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U16          data;           /* Data to be set into the reg  */
+
+    DBG_INFO(("gvlnSetForceMap Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_MAP))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(mode,data);
+
+    retVal = hwSetPortRegField(dev,phyPort, QD_REG_PORT_VLAN_MAP, 8, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/********************************************************************
+* gvlnGetForceMap
+*
+* DESCRIPTION:
+*       This routine checks if Force Map feature is enabled.
+*        When Force Map feature is enabled, all received frames will be
+*        considered MGMT and they are mapped to the port or ports defined
+*        in the VLAN Table overriding the mapping from the address database.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*
+* OUTPUTS:
+*       mode    - GT_TRUE, to enable force map feature
+*                 GT_FAULSE, otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gvlnGetForceMap
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U16          data;           /* Data to be set into the reg  */
+
+    DBG_INFO(("gvlnGetForceMap Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_MAP))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort, QD_REG_PORT_VLAN_MAP, 8, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvlnSetNoEgrPolicy
+*
+* DESCRIPTION:
+*        No Egress Policy. When this bit is set to a one Egress 802.1Q Secure and
+*        Check discards are not performed. This mode allowsa non-802.1Q enabled
+*        port to send a frame to an 802.1Q enabled port that is configured in the
+*        Secure or Check 802.1Q mode. In this situation the frames will egress
+*        even if the VID assigned to the frame is not found in the VTU.
+*
+* INPUTS:
+*        mode - no egress policy mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gvlnSetNoEgrPolicy
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+
+    DBG_INFO(("gvlnSetNoEgrPolicy Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NO_EGRESS_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(mode,data);
+
+    /* Set related register */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_SDET_POLARITY,13,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvlnGetNoEgrPolicy
+*
+* DESCRIPTION:
+*        No Egress Policy. When this bit is set to a one Egress 802.1Q Secure and
+*        Check discards are not performed. This mode allowsa non-802.1Q enabled
+*        port to send a frame to an 802.1Q enabled port that is configured in the
+*        Secure or Check 802.1Q mode. In this situation the frames will egress
+*        even if the VID assigned to the frame is not found in the VTU.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - no egress policy mode
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gvlnGetNoEgrPolicy
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gvlnGetNoEgrPolicy Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NO_EGRESS_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related register */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_SDET_POLARITY,13,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVtu.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVtu.c
new file mode 100644
index 000000000000..bdc6774d15bb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtBrgVtu.c
@@ -0,0 +1,1592 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtBrgVtu.c
+*
+* DESCRIPTION:
+*       API definitions for Vlan Translation Unit for 802.1Q.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 9 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* Forward function declaration.                                            */
+/****************************************************************************/
+#define MEMBER_TAG_CONV_FOR_APP(_dev,_tag)    memberTagConversionForApp(_dev,_tag)
+#define MEMBER_TAG_CONV_FOR_DEV(_dev,_tag)    memberTagConversionForDev(_dev,_tag)
+
+static GT_U8 memberTagConversionForApp
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_U8               tag
+)
+{
+    GT_U8 convTag;
+
+    /* check if memberTag needs to be converted */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))))
+        return tag;
+
+    switch(tag)
+    {
+        case 0:
+                convTag = MEMBER_EGRESS_UNMODIFIED;
+                break;
+        case 1:
+                convTag = MEMBER_EGRESS_UNTAGGED;
+                break;
+        case 2:
+                convTag = MEMBER_EGRESS_TAGGED;
+                break;
+        case 3:
+                convTag = NOT_A_MEMBER;
+                break;
+        default:
+                DBG_INFO(("Unknown Tag (%#x) from Device !!!.\n",tag));
+                convTag = 0xFF;
+                break;
+
+    }
+
+    return convTag;
+}
+
+static GT_U8 memberTagConversionForDev
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_U8               tag
+)
+{
+    GT_U8 convTag;
+
+    /* check if memberTag needs to be converted */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))))
+        return tag;
+
+    switch(tag)
+    {
+        case MEMBER_EGRESS_UNMODIFIED:
+                convTag = 0;
+                break;
+        case NOT_A_MEMBER:
+                convTag = 3;
+                break;
+        case MEMBER_EGRESS_UNTAGGED:
+                convTag = 1;
+                break;
+        case MEMBER_EGRESS_TAGGED:
+                convTag = 2;
+                break;
+        default:
+                DBG_INFO(("Unknown Tag (%#x) from App. !!!.\n",tag));
+                convTag = 0xFF;
+                break;
+
+    }
+
+    return convTag;
+}
+
+static GT_STATUS vtuOperationPerform
+(
+    IN        GT_QD_DEV           *dev,
+    IN      GT_VTU_OPERATION    vtuOp,
+    INOUT   GT_U8               *valid,
+    INOUT     GT_VTU_ENTRY        *vtuEntry
+);
+
+/*******************************************************************************
+* gvtuGetEntryCount
+*
+* DESCRIPTION:
+*       Gets the current number of valid entries in the VTU table
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       numEntries - number of VTU entries.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - vlan does not exist.
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryCount
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U32    *numEntries
+)
+{
+    GT_U8               valid;
+    GT_U32        numOfEntries;
+    GT_STATUS           retVal;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetEntryCount Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    entry.vid = 0xFFF;
+    entry.DBNum = 0;
+
+    numOfEntries = 0;
+    while(1)
+    {
+        retVal = vtuOperationPerform(dev,GET_NEXT_ENTRY,&valid,&entry);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+            return retVal;
+        }
+
+        if( entry.vid==0xFFF )
+        {
+            if (valid==1) numOfEntries++;
+            break;
+        }
+
+        numOfEntries++;
+    }
+
+    *numEntries = numOfEntries;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gvtuGetEntryFirst
+*
+* DESCRIPTION:
+*       Gets first lexicographic entry from the VTU.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuEntry - match VTU entry.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NO_SUCH - table is empty.
+*
+* COMMENTS:
+*       Search starts from vid of all one's
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryFirst
+(
+    IN  GT_QD_DEV       *dev,
+    OUT GT_VTU_ENTRY    *vtuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT               lport;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetEntryFirst Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    entry.vid = 0xFFF;
+    entry.DBNum = 0;
+
+    retVal = vtuOperationPerform(dev,GET_NEXT_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrive the value from the operation */
+
+    if((entry.vid == 0xFFF) && (valid == 0))
+        return GT_NO_SUCH;
+
+    vtuEntry->DBNum = entry.DBNum;
+    vtuEntry->vid   = entry.vid;
+
+    vtuEntry->vidPriOverride = entry.vidPriOverride;
+    vtuEntry->vidPriority = entry.vidPriority;
+
+    vtuEntry->vidPolicy = entry.vidPolicy;
+    vtuEntry->sid = entry.sid;
+
+    vtuEntry->vidExInfo.useVIDFPri = entry.vidExInfo.useVIDFPri;
+    vtuEntry->vidExInfo.vidFPri = entry.vidExInfo.vidFPri;
+    vtuEntry->vidExInfo.useVIDQPri = entry.vidExInfo.useVIDQPri;
+    vtuEntry->vidExInfo.vidQPri = entry.vidExInfo.vidQPri;
+    vtuEntry->vidExInfo.vidNRateLimit = entry.vidExInfo.vidNRateLimit;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        vtuEntry->vtuData.memberTagP[lport]=MEMBER_TAG_CONV_FOR_APP(dev,entry.vtuData.memberTagP[port]);
+        vtuEntry->vtuData.portStateP[lport]=entry.vtuData.portStateP[port];
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvtuGetEntryNext
+*
+* DESCRIPTION:
+*       Gets next lexicographic VTU entry from the specified VID.
+*
+* INPUTS:
+*       vtuEntry - the VID to start the search.
+*
+* OUTPUTS:
+*       vtuEntry - match VTU  entry.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*       Search starts from the VID specified by the user.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetEntryNext
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_VTU_ENTRY  *vtuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT               lport;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetEntryNext Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    entry.DBNum = vtuEntry->DBNum;
+    entry.vid   = vtuEntry->vid;
+    valid = 0;
+
+    retVal = vtuOperationPerform(dev,GET_NEXT_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrieve the value from the operation */
+
+    if((entry.vid == 0xFFF) && (valid == 0))
+        return GT_NO_SUCH;
+
+    vtuEntry->DBNum = entry.DBNum;
+    vtuEntry->vid   = entry.vid;
+
+    vtuEntry->vidPriOverride = entry.vidPriOverride;
+    vtuEntry->vidPriority = entry.vidPriority;
+
+    vtuEntry->vidPolicy = entry.vidPolicy;
+    vtuEntry->sid = entry.sid;
+
+    vtuEntry->vidExInfo.useVIDFPri = entry.vidExInfo.useVIDFPri;
+    vtuEntry->vidExInfo.vidFPri = entry.vidExInfo.vidFPri;
+    vtuEntry->vidExInfo.useVIDQPri = entry.vidExInfo.useVIDQPri;
+    vtuEntry->vidExInfo.vidQPri = entry.vidExInfo.vidQPri;
+    vtuEntry->vidExInfo.vidNRateLimit = entry.vidExInfo.vidNRateLimit;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        vtuEntry->vtuData.memberTagP[lport]=MEMBER_TAG_CONV_FOR_APP(dev,entry.vtuData.memberTagP[port]);
+        vtuEntry->vtuData.portStateP[lport]=entry.vtuData.portStateP[port];
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvtuFindVidEntry
+*
+* DESCRIPTION:
+*       Find VTU entry for a specific VID, it will return the entry, if found,
+*       along with its associated data
+*
+* INPUTS:
+*       vtuEntry - contains the VID to searche for
+*
+* OUTPUTS:
+*       found    - GT_TRUE, if the appropriate entry exists.
+*       vtuEntry - the entry parameters.
+*
+* RETURNS:
+*       GT_OK      - on success.
+*       GT_FAIL    - on error or entry does not exist.
+*       GT_NO_SUCH - no more entries.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuFindVidEntry
+(
+    IN  GT_QD_DEV       *dev,
+    INOUT GT_VTU_ENTRY  *vtuEntry,
+    OUT GT_BOOL         *found
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8               port;
+    GT_LPORT            lport;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuFindVidEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    *found = GT_FALSE;
+
+    /* Decrement 1 from vid    */
+    entry.vid   = vtuEntry->vid-1;
+    valid = 0; /* valid is not used as input in this operation */
+    entry.DBNum = vtuEntry->DBNum;
+
+    retVal = vtuOperationPerform(dev,GET_NEXT_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* retrive the value from the operation */
+
+    if( (entry.vid !=vtuEntry->vid) | (valid !=1) )
+    {
+          DBG_INFO(("Failed.\n"));
+          return GT_NO_SUCH;
+    }
+
+    vtuEntry->DBNum = entry.DBNum;
+
+    vtuEntry->vidPriOverride = entry.vidPriOverride;
+    vtuEntry->vidPriority = entry.vidPriority;
+
+    vtuEntry->vidPolicy = entry.vidPolicy;
+    vtuEntry->sid = entry.sid;
+
+    vtuEntry->vidExInfo.useVIDFPri = entry.vidExInfo.useVIDFPri;
+    vtuEntry->vidExInfo.vidFPri = entry.vidExInfo.vidFPri;
+    vtuEntry->vidExInfo.useVIDQPri = entry.vidExInfo.useVIDQPri;
+    vtuEntry->vidExInfo.vidQPri = entry.vidExInfo.vidQPri;
+    vtuEntry->vidExInfo.vidNRateLimit = entry.vidExInfo.vidNRateLimit;
+
+    for(lport=0; lport<dev->numOfPorts; lport++)
+    {
+        port = GT_LPORT_2_PORT(lport);
+        vtuEntry->vtuData.memberTagP[lport]=MEMBER_TAG_CONV_FOR_APP(dev,entry.vtuData.memberTagP[port]);
+        vtuEntry->vtuData.portStateP[lport]=entry.vtuData.portStateP[port];
+    }
+
+    *found = GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvtuFlush
+*
+* DESCRIPTION:
+*       This routine removes all entries from VTU Table.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuFlush
+(
+    IN  GT_QD_DEV       *dev
+)
+{
+    GT_STATUS       retVal;
+
+    DBG_INFO(("gvtuFlush Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = vtuOperationPerform(dev,FLUSH_ALL,NULL,NULL);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvtuAddEntry
+*
+* DESCRIPTION:
+*       Creates the new entry in VTU table based on user input.
+*
+* INPUTS:
+*       vtuEntry    - vtu entry to insert to the VTU.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK             - on success
+*       GT_FAIL           - on error
+*       GT_FULL              - vtu table is full
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuAddEntry
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_VTU_ENTRY *vtuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_U8           port;
+    GT_LPORT           lport;
+    GT_VTU_ENTRY     tmpVtuEntry;
+    GT_BOOL             found;
+    int                count = 5000;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuAddEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    entry.DBNum = vtuEntry->DBNum;
+    entry.vid   = vtuEntry->vid;
+
+    if(IS_IN_DEV_GROUP(dev,DEV_VTU_EXT_INFO))
+    {
+        entry.vidPriOverride = 0;
+        entry.vidPriority = 0;
+
+        entry.vidPolicy = GT_FALSE;
+        entry.sid = 0;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_FQPRI_IN_TABLE))
+        {
+            entry.vidExInfo.useVIDFPri = vtuEntry->vidExInfo.useVIDFPri;
+            entry.vidExInfo.vidFPri = vtuEntry->vidExInfo.vidFPri;
+            entry.vidExInfo.useVIDQPri = vtuEntry->vidExInfo.useVIDQPri;
+            entry.vidExInfo.vidQPri = vtuEntry->vidExInfo.vidQPri;
+            entry.vidExInfo.vidNRateLimit = vtuEntry->vidExInfo.vidNRateLimit;
+        }
+        else
+        {
+            entry.vidExInfo.useVIDFPri = 0;
+            entry.vidExInfo.vidFPri = 0;
+            entry.vidExInfo.useVIDQPri = 0;
+            entry.vidExInfo.vidQPri = 0;
+            entry.vidExInfo.vidNRateLimit = vtuEntry->vidExInfo.vidNRateLimit;
+        }
+    }
+    else
+    {
+        entry.vidPriOverride = vtuEntry->vidPriOverride;
+        entry.vidPriority = vtuEntry->vidPriority;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_POLICY))
+        {
+            entry.vidPolicy = vtuEntry->vidPolicy;
+        }
+        else
+        {
+            entry.vidPolicy = GT_FALSE;
+        }
+
+        if(IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+        {
+            entry.sid = vtuEntry->sid;
+        }
+        else
+        {
+            entry.sid = 0;
+        }
+
+        entry.vidExInfo.useVIDFPri = 0;
+        entry.vidExInfo.vidFPri = 0;
+        entry.vidExInfo.useVIDQPri = 0;
+        entry.vidExInfo.vidQPri = 0;
+        entry.vidExInfo.vidNRateLimit = 0;
+    }
+
+    valid = 1; /* for load operation */
+
+    for(port=0; port<dev->maxPorts; port++)
+    {
+        lport = GT_PORT_2_LPORT(port);
+        if(lport == GT_INVALID_PORT)
+        {
+            entry.vtuData.memberTagP[port] = MEMBER_TAG_CONV_FOR_DEV(dev,NOT_A_MEMBER);
+            entry.vtuData.portStateP[port] = 0;
+        }
+        else
+        {
+            entry.vtuData.memberTagP[port] = MEMBER_TAG_CONV_FOR_DEV(dev,vtuEntry->vtuData.memberTagP[lport]);
+            if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                entry.vtuData.portStateP[port] = vtuEntry->vtuData.portStateP[lport];
+            else
+                entry.vtuData.portStateP[port] = 0;
+        }
+    }
+
+    retVal = vtuOperationPerform(dev,LOAD_PURGE_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* verify that the given entry has been added */
+    tmpVtuEntry.vid = vtuEntry->vid;
+    tmpVtuEntry.DBNum = vtuEntry->DBNum;
+
+    if((retVal = gvtuFindVidEntry(dev,&tmpVtuEntry,&found)) != GT_OK)
+    {
+        while(count--);
+        if((retVal = gvtuFindVidEntry(dev,&tmpVtuEntry,&found)) != GT_OK)
+        {
+            DBG_INFO(("Added entry cannot be found\n"));
+            return retVal;
+        }
+    }
+    if(found == GT_FALSE)
+    {
+        DBG_INFO(("Added entry cannot be found\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvtuDelEntry
+*
+* DESCRIPTION:
+*       Deletes VTU entry specified by user.
+*
+* INPUTS:
+*       vtuEntry - the VTU entry to be deleted
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuDelEntry
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_VTU_ENTRY *vtuEntry
+)
+{
+    GT_U8               valid;
+    GT_STATUS           retVal;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuDelEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK)
+      return retVal;
+
+    entry.DBNum = vtuEntry->DBNum;
+    entry.vid   = vtuEntry->vid;
+    valid = 0; /* for delete operation */
+
+    retVal = vtuOperationPerform(dev,LOAD_PURGE_ENTRY,&valid, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/****************************************************************************/
+/* Internal use functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* gvtuGetViolation
+*
+* DESCRIPTION:
+*       Get VTU Violation data
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+)
+{
+    GT_U8               spid;
+    GT_U16               vid;
+    GT_U16               intCause;
+    GT_STATUS           retVal;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetViolation Called.\n"));
+
+    /* check which Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,4,3,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read VTU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    if (intCause == 0)
+    {
+        /* No Violation occurred. */
+        vtuIntStatus->vtuIntCause = 0;
+        return GT_OK;
+    }
+
+    entry.DBNum = 0;
+
+    retVal = vtuOperationPerform(dev,SERVICE_VIOLATIONS,NULL, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    spid = entry.DBNum & 0xF;
+    vid = entry.vid;
+
+    if(spid == 0xF)
+    {
+        vtuIntStatus->vtuIntCause = GT_VTU_FULL_VIOLATION;
+        vtuIntStatus->spid = spid;
+        vtuIntStatus->vid = 0;
+    }
+    else
+    {
+        vtuIntStatus->vtuIntCause = intCause & (GT_MEMBER_VIOLATION | GT_MISS_VIOLATION);
+        vtuIntStatus->spid = spid;
+        vtuIntStatus->vid = vid;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvtuGetViolation2
+*
+* DESCRIPTION:
+*       Get VTU Violation data (for Gigabit Device)
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation2
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+)
+{
+    GT_U16               intCause;
+    GT_STATUS           retVal;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetViolation2 Called.\n"));
+
+    /* check if Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,5,1,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read VTU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    if (intCause == 0)
+    {
+        /* No Violation occurred. */
+        vtuIntStatus->vtuIntCause = 0;
+        return GT_OK;
+    }
+
+    entry.DBNum = 0;
+
+    retVal = vtuOperationPerform(dev,SERVICE_VIOLATIONS,NULL, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* check which Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,5,2,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read VTU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    switch (intCause)
+    {
+        case 0:
+            /* No Violation occurred. */
+            vtuIntStatus->vtuIntCause = 0;
+            return GT_OK;
+        case 1:
+            /* Miss Violation */
+            vtuIntStatus->vtuIntCause = GT_MISS_VIOLATION;
+            break;
+        case 2:
+            /* Member Violation */
+            vtuIntStatus->vtuIntCause = GT_MEMBER_VIOLATION;
+            break;
+        default :
+            return GT_FAIL;
+    }
+
+    vtuIntStatus->spid = entry.DBNum & 0xF;
+    vtuIntStatus->vid = entry.vid;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gvtuGetViolation3
+*
+* DESCRIPTION:
+*       Get VTU Violation data
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       vtuIntStatus - interrupt cause, source portID, and vid.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NOT_SUPPORT  - if current device does not support this feature.
+*
+* COMMENTS:
+*        This is an internal function. No user should call this function.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gvtuGetViolation3
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+)
+{
+    GT_U16               intCause;
+    GT_STATUS           retVal;
+    GT_VTU_ENTRY        entry;
+
+    DBG_INFO(("gvtuGetViolation3 Called.\n"));
+
+    /* check if Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,5,1,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read VTU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    if (intCause == 0)
+    {
+        /* No Violation occurred. */
+        vtuIntStatus->vtuIntCause = 0;
+        return GT_OK;
+    }
+
+    entry.DBNum = 0;
+
+    retVal = vtuOperationPerform(dev,SERVICE_VIOLATIONS,NULL, &entry);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (vtuOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* check which Violation occurred */
+    retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,4,3,&intCause);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("ERROR to read VTU OPERATION Register.\n"));
+        return retVal;
+    }
+
+    vtuIntStatus->vtuIntCause = 0;
+
+    if(intCause & 0x1)
+    {
+        vtuIntStatus->vtuIntCause |= GT_VTU_FULL_VIOLATION;
+    }
+
+    if(intCause & 0x2)
+    {
+        vtuIntStatus->vtuIntCause |= GT_MISS_VIOLATION;
+    }
+
+    if(intCause & 0x4)
+    {
+        vtuIntStatus->vtuIntCause |= GT_MEMBER_VIOLATION;
+    }
+
+    vtuIntStatus->spid = entry.DBNum & 0xF;
+    vtuIntStatus->vid = entry.vid;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* vtuOperationPerform
+*
+* DESCRIPTION:
+*       This function is used by all VTU control functions, and is responsible
+*       to write the required operation into the VTU registers.
+*
+* INPUTS:
+*       vtuOp       - The VTU operation bits to be written into the VTU
+*                     operation register.
+*       DBNum       - DBNum where the given vid belongs to
+*       vid         - vlan id
+*       valid       - valid bit
+*       vtuData     - VTU Data with memberTag information
+*
+* OUTPUTS:
+*       DBNum       - DBNum where the given vid belongs to
+*       vid         - vlan id
+*       valid       - valid bit
+*       vtuData     - VTU Data with memberTag information
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+static GT_STATUS vtuOperationPerform
+(
+    IN        GT_QD_DEV           *dev,
+    IN      GT_VTU_OPERATION    vtuOp,
+    INOUT   GT_U8               *valid,
+    INOUT    GT_VTU_ENTRY        *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                /* register.                    */
+
+    gtSemTake(dev,dev->vtuRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the VTU in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_VTU_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    /* Set the VTU data register    */
+    /* There is no need to setup data reg. on flush, get next, or service violation */
+    if((vtuOp != FLUSH_ALL) && (vtuOp != GET_NEXT_ENTRY) && (vtuOp != SERVICE_VIOLATIONS))
+    {
+
+        /****************** VTU DATA 1 REG *******************/
+
+        /* get data and wirte to QD_REG_VTU_DATA1_REG (ports 0 to 3) */
+
+        data =  (entry->vtuData.memberTagP[0] & 3)     |
+                ((entry->vtuData.memberTagP[1] & 3)<<4) |
+                ((entry->vtuData.memberTagP[2] & 3)<<8);
+
+        if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+            data |= ((entry->vtuData.portStateP[0] & 3)<<2)    |
+                    ((entry->vtuData.portStateP[1] & 3)<<6) |
+                    ((entry->vtuData.portStateP[2] & 3)<<10);
+
+        if(dev->maxPorts > 3)
+        {
+            data |= ((entry->vtuData.memberTagP[3] & 3)<<12) ;
+            if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                data |= ((entry->vtuData.portStateP[3] & 3)<<14) ;
+        }
+
+        retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA1_REG,data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        /****************** VTU DATA 2 REG *******************/
+
+        /* get data and wirte to QD_REG_VTU_DATA2_REG (ports 4 to 7) */
+
+        if(dev->maxPorts > 4)
+        {
+            /* also need to set data register  ports 4 to 6 */
+
+            data =  (entry->vtuData.memberTagP[4] & 3)   |
+                    ((entry->vtuData.memberTagP[5] & 3) << 4);
+
+            if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                data |= ((entry->vtuData.portStateP[4] & 3) << 2) |
+                        ((entry->vtuData.portStateP[5] & 3) << 6);
+
+            if(dev->maxPorts > 6)
+            {
+                data |= ((entry->vtuData.memberTagP[6] & 3)<<8) ;
+                if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                    data |= ((entry->vtuData.portStateP[6] & 3)<<10) ;
+            }
+
+            if(dev->maxPorts > 7)
+            {
+                data |= ((entry->vtuData.memberTagP[7] & 3)<<12) ;
+                if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                    data |= ((entry->vtuData.portStateP[7] & 3)<<14) ;
+            }
+
+            if (IS_IN_DEV_GROUP(dev,DEV_VTU_EXT_INFO))
+            {
+                if(entry->vidExInfo.useVIDFPri == GT_TRUE)
+                    data |= ((1 << 15) | ((entry->vidExInfo.vidFPri & 0x7) << 12));
+                if(entry->vidExInfo.useVIDQPri == GT_TRUE)
+                    data |= ((1 << 11) | ((entry->vidExInfo.vidQPri & 0x3) << 9));
+                if(entry->vidExInfo.vidNRateLimit == GT_TRUE)
+                    data |= (1 << 8);
+            }
+
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA2_REG,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+
+
+        /****************** VTU DATA 3 REG *******************/
+
+        /* get data and wirte to QD_REG_VTU_DATA3_REG (ports 8 to 10) */
+
+        if(dev->maxPorts > 7)
+        {
+            /* also need to set data register  ports 8 to 9 */
+
+            data =  (entry->vtuData.memberTagP[8] & 3)   |
+                    ((entry->vtuData.memberTagP[9] & 3) << 4);
+
+            if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                data |= ((entry->vtuData.portStateP[8] & 3) << 2)    |
+                        ((entry->vtuData.portStateP[9] & 3) << 6);
+
+            if(dev->maxPorts > 10)
+            {
+                data |= (entry->vtuData.memberTagP[10] & 3) << 8;
+
+                if (IS_IN_DEV_GROUP(dev,DEV_802_1S))
+                    data |= (entry->vtuData.portStateP[10] & 3) << 10;
+            }
+
+            if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+            {
+                if(entry->vidPriOverride == GT_TRUE)
+                    data |= ((1 << 15) | ((entry->vidPriority & 0x7) << 12));
+            }
+
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA3_REG,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+        {
+            if(entry->vidPriOverride == GT_TRUE)
+                data = ((1 << 15) | ((entry->vidPriority & 0x7) << 12));
+            else
+                data = 0;
+
+            retVal = hwWriteGlobalReg(dev,QD_REG_VTU_DATA3_REG,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+    }
+
+    /* Set the VID register (QD_REG_VTU_VID_REG) */
+    /* There is no need to setup VID reg. on flush and service violation */
+    if((vtuOp != FLUSH_ALL) && (vtuOp != SERVICE_VIOLATIONS) )
+    {
+        data= ( (entry->vid) & 0xFFF ) | ( (*valid) << 12 );
+        retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_VTU_VID_REG),data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+    }
+
+    /* Set SID, FID, VIDPolicy, if it's Load operation */
+    if((vtuOp == LOAD_PURGE_ENTRY) && (*valid == 1))
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+        {
+            data= (entry->sid) & 0x3F;
+            retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_STU_SID_REG),data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+
+        data = 0;
+
+        if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        {
+            if(IS_IN_DEV_GROUP(dev,DEV_POLICY))
+            {
+                data= entry->vidPolicy << 12;
+            }
+
+            data |= (entry->DBNum & 0xFFF);
+
+            retVal = hwWriteGlobalReg(dev,(GT_U8)(QD_REG_VTU_FID_REG),data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+
+
+    }
+
+    /* Start the VTU Operation by defining the DBNum, vtuOp and VTUBusy    */
+    /*
+     * Flush operation will skip the above two setup (for data and vid), and
+     * come to here directly
+     */
+
+    if(vtuOp == FLUSH_ALL)
+        data = (1 << 15) | (vtuOp << 12);
+    else
+    {
+        if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        {
+            data = (1 << 15) | (vtuOp << 12);
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+        {
+            /* Since DBNum is defined as GT_U8, it cannot be >= 256. */
+            #if 0
+            if(entry->DBNum >= 256)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return GT_BAD_PARAM;
+            }
+            #endif
+            data = (1 << 15) | (vtuOp << 12) | ((entry->DBNum & 0xF0) << 4) | (entry->DBNum & 0x0F);
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+        {
+            if(entry->DBNum >= 64)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return GT_BAD_PARAM;
+            }
+            data = (1 << 15) | (vtuOp << 12) | ((entry->DBNum & 0x30) << 4) | (entry->DBNum & 0x0F);
+        }
+        else
+        {
+            if(entry->DBNum >= 16)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return GT_BAD_PARAM;
+            }
+            data = (1 << 15) | (vtuOp << 12) | entry->DBNum;
+        }
+    }
+
+    retVal = hwWriteGlobalReg(dev,QD_REG_VTU_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+    }
+
+    /* only two operations need to go through the mess below to get some data
+     * after the operations -  service violation and get next entry
+     */
+
+    /* If the operation is to service violation operation wait for the response   */
+    if(vtuOp == SERVICE_VIOLATIONS)
+    {
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 1;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_VTU_OPERATION;
+          regAccess.rw_reg_list[0].data = 15;
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+          }
+        }
+#else
+        /* Wait until the VTU in ready. */
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+#endif
+
+        /* get the Source Port ID that was involved in the violation */
+        retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,0,4,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        entry->DBNum = (GT_U8)(data & 0xF);
+
+        /* get the VID that was involved in the violation */
+
+        retVal = hwReadGlobalReg(dev,QD_REG_VTU_VID_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        /* Get the vid - bits 0-11 */
+        entry->vid   = data & 0xFFF;
+
+
+    } /* end of service violations */
+
+    /* If the operation is a get next operation wait for the response   */
+    if(vtuOp == GET_NEXT_ENTRY)
+    {
+        entry->vidExInfo.useVIDFPri = GT_FALSE;
+        entry->vidExInfo.vidFPri = 0;
+
+        entry->vidExInfo.useVIDQPri = GT_FALSE;
+        entry->vidExInfo.vidQPri = 0;
+
+        entry->vidExInfo.vidNRateLimit = GT_FALSE;
+
+        entry->sid = 0;
+           entry->vidPolicy = GT_FALSE;
+
+        /* Wait until the VTU in ready. */
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 1;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_VTU_OPERATION;
+          regAccess.rw_reg_list[0].data = 15;
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+          }
+        }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+        }
+#endif
+
+        /****************** get the vid *******************/
+
+        retVal = hwReadGlobalReg(dev,QD_REG_VTU_VID_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        /* the vid is bits 0-11 */
+        entry->vid   = data & 0xFFF;
+
+        /* the vid valid is bits 12 */
+        *valid   = (data >> 12) & 1;
+
+        if (*valid == 0)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return GT_OK;
+        }
+
+        /****************** get the SID *******************/
+        if(IS_IN_DEV_GROUP(dev,DEV_802_1S_STU))
+        {
+            retVal = hwReadGlobalReg(dev,(GT_U8)(QD_REG_STU_SID_REG),&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+            entry->sid = data & 0x3F;
+        }
+
+        /****************** get the DBNum *******************/
+        if(IS_IN_DEV_GROUP(dev,DEV_FID_REG))
+        {
+            retVal = hwReadGlobalReg(dev,(GT_U8)(QD_REG_VTU_FID_REG),&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+
+            if(IS_IN_DEV_GROUP(dev,DEV_POLICY))
+            {
+                entry->vidPolicy = (data >> 12) & 0x1;
+            }
+
+            entry->DBNum = data & 0xFFF;
+
+        }
+        else
+        {
+            retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,0,4,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+
+            entry->DBNum = data & 0xF;
+
+            if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_256))
+            {
+                retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,8,4,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->vtuRegsSem);
+                    return retVal;
+                }
+
+                entry->DBNum |= ((data & 0xF) << 4);
+            }
+            else if (IS_IN_DEV_GROUP(dev,DEV_DBNUM_64))
+            {
+                retVal = hwGetGlobalRegField(dev,QD_REG_VTU_OPERATION,8,2,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->vtuRegsSem);
+                    return retVal;
+                }
+
+                entry->DBNum |= ((data & 0x3) << 4);
+            }
+        }
+
+
+        /****************** get the MemberTagP *******************/
+        retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA1_REG,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->vtuRegsSem);
+            return retVal;
+        }
+
+        /* get data from data register for ports 0 to 2 */
+        entry->vtuData.memberTagP[0]  =  data & 3 ;
+        entry->vtuData.memberTagP[1]  = (data >> 4) & 3 ;
+        entry->vtuData.memberTagP[2]  = (data >> 8) & 3 ;
+        entry->vtuData.portStateP[0]  = (data >> 2) & 3 ;
+        entry->vtuData.portStateP[1]  = (data >> 6) & 3 ;
+        entry->vtuData.portStateP[2]  = (data >> 10) & 3 ;
+
+        /****************** for the switch more than 3 ports *****************/
+
+        if(dev->maxPorts > 3)
+        {
+            /* fullsail has 3 ports, clippership has 7 prots */
+            entry->vtuData.memberTagP[3]  = (data >>12) & 3 ;
+            entry->vtuData.portStateP[3]  = (data >>14) & 3 ;
+
+            /* get data from data register for ports 4 to 6 */
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA2_REG,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+            entry->vtuData.memberTagP[4]  = data & 3 ;
+            entry->vtuData.memberTagP[5]  = (data >> 4) & 3 ;
+            entry->vtuData.portStateP[4]  = (data >> 2) & 3 ;
+            entry->vtuData.portStateP[5]  = (data >> 6) & 3 ;
+
+            if(dev->maxPorts > 6)
+            {
+                entry->vtuData.memberTagP[6]  = (data >> 8) & 3 ;
+                entry->vtuData.portStateP[6]  = (data >> 10) & 3 ;
+            }
+
+            if (IS_IN_DEV_GROUP(dev,DEV_VTU_EXT_INFO))
+            {
+                entry->vidPriOverride = 0;
+                entry->vidPriority = 0;
+
+                entry->vidExInfo.useVIDFPri = (data & 0x8000)?GT_TRUE:GT_FALSE;
+                entry->vidExInfo.vidFPri = (data >> 12) & 0x7;
+
+                entry->vidExInfo.useVIDQPri = (data & 0x0800)?GT_TRUE:GT_FALSE;
+                entry->vidExInfo.vidQPri = (data >> 9) & 0x3;
+
+                entry->vidExInfo.vidNRateLimit = (data & 0x0100)?GT_TRUE:GT_FALSE;
+            }
+        }
+        /****************** upto 7 port switch *******************/
+
+        /****************** for the switch more than 7 ports *****************/
+
+        if(dev->maxPorts > 7)
+        {
+            /* fullsail has 3 ports, clippership has 7 prots */
+            entry->vtuData.memberTagP[7]  = (data >>12) & 3 ;
+            entry->vtuData.portStateP[7]  = (data >>14) & 3 ;
+
+            /* get data from data register for ports 4 to 6 */
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA3_REG,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+            entry->vtuData.memberTagP[8]  = data & 3 ;
+            entry->vtuData.memberTagP[9]  = (data >> 4) & 3 ;
+            entry->vtuData.portStateP[8]  = (data >> 2) & 3 ;
+            entry->vtuData.portStateP[9]  = (data >> 6) & 3 ;
+
+            if(dev->maxPorts > 10)
+            {
+                entry->vtuData.memberTagP[10]  = (data >> 8) & 3 ;
+                entry->vtuData.portStateP[10]  = (data >> 10) & 3 ;
+            }
+
+            if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+            {
+                if (data & 0x8000)
+                {
+                    entry->vidPriOverride = GT_TRUE;
+                    entry->vidPriority = (data >> 12) & 0x7;
+                }
+                else
+                {
+                    entry->vidPriOverride = GT_FALSE;
+                    entry->vidPriority = 0;
+                }
+            }
+
+        }
+        else if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+        {
+            /* get data from data register for ports 4 to 6 */
+            retVal = hwReadGlobalReg(dev,QD_REG_VTU_DATA3_REG,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+            }
+
+            if (data & 0x8000)
+            {
+                entry->vidPriOverride = GT_TRUE;
+                entry->vidPriority = (data >> 12) & 0x7;
+            }
+            else
+            {
+                entry->vidPriOverride = GT_FALSE;
+                entry->vidPriority = 0;
+            }
+        }
+
+        /****************** upto 11 ports switch *******************/
+
+    } /* end of get next entry */
+
+    gtSemGive(dev,dev->vtuRegsSem);
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtCCPVT.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtCCPVT.c
new file mode 100644
index 000000000000..98388a11c700
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtCCPVT.c
@@ -0,0 +1,438 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtCCPVT.c
+*
+* DESCRIPTION:
+*       API definitions for Cross Chip Port Vlan Data Table
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* Cross Chip Port Vlan operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS pvtOperationPerform
+(
+    IN   GT_QD_DEV             *dev,
+    IN   GT_PVT_OPERATION    pvtOp,
+    INOUT GT_PVT_OP_DATA    *opData
+);
+
+
+/*******************************************************************************
+* gpvtInitialize
+*
+* DESCRIPTION:
+*       This routine initializes the PVT Table to all one's (initial state)
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtInitialize
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_PVT_OPERATION    op;
+
+    DBG_INFO(("gpvtInitialize Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CROSS_CHIP_PORT_VLAN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Program Tuning register */
+    op = PVT_INITIALIZE;
+    retVal = pvtOperationPerform(dev,op,NULL);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (pvtOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpvtWritePVTData
+*
+* DESCRIPTION:
+*       This routine write Cross Chip Port Vlan Data.
+*        Cross chip Port VLAN Data used as a bit mask to limit where cross chip
+*        frames can egress (in chip Port VLANs are masked using gvlnSetPortVlanPorts
+*        API). Cross chip frames are Forward frames that ingress a DSA or Ether
+*        Type DSA port (see gprtSetFrameMode API). Bit 0 is a mask for port 0,
+*        bit 1 for port 1, etc. When a port's mask bit is one, frames are allowed
+*        to egress that port on this device. When a port's mask bit is zero,
+*        frames are not allowed to egress that port on this device.
+*
+*        The Cross Chip Port VLAN Table is accessed by ingressing frames based
+*        upon the original source port of the frame using the Forward frame's DSA tag
+*        fields Src_Dev, Src_Port/Src_Trunk and Src_Is_Trunk. The 1 entry of the 512
+*        that is accessed by the frame is:
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 0:
+*                If Src_Is_Trunk = 0   Src_Dev[4:0], Src_Port[3:0]119
+*                If Src_Is_Trunk = 1   Device Number (global offset 0x1C), Src_Trunk[3:0]
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 1:
+*                If Src_Is_Trunk = 0   Src_Dev[3:0], Src_Port[4:0]120
+*                If Src_Is_Trunk = 1   Device Number[3:0], Src_Trunk[4:0]
+*
+*        Cross chip port VLANs with Trunks are supported in the table where this
+*        device's entries would be stored (defined by this device's Device Number).
+*        This portion of the table is available for Trunk entries because this device's
+*        port VLAN mappings to ports inside this device are masked by the port's
+*        VLAN Table (see gvlnSetPortVlanPorts API).
+*
+*
+* INPUTS:
+*        pvtPointer - pointer to the desired entry of PVT (0 ~ 511)
+*        pvtData    - Cross Chip Port Vlan Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtWritePVTData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        pvtPointer,
+    IN  GT_U32        pvtData
+)
+{
+    GT_STATUS           retVal;
+    GT_PVT_OPERATION    op;
+    GT_PVT_OP_DATA        opData;
+
+    DBG_INFO(("gpvtWritePVTData Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CROSS_CHIP_PORT_VLAN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if (pvtPointer > 0x1FF)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* check if the given pvtData is valid */
+    if (pvtData >= (GT_U32)(1 << dev->maxPorts))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = PVT_WRITE;
+    opData.pvtAddr = pvtPointer;
+
+    if((opData.pvtData = GT_LPORTVEC_2_PORTVEC(pvtData)) == GT_INVALID_PORT_VEC)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+
+    retVal = pvtOperationPerform(dev,op,&opData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (pvtOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpvtReadPVTData
+*
+* DESCRIPTION:
+*       This routine reads Cross Chip Port Vlan Data.
+*        Cross chip Port VLAN Data used as a bit mask to limit where cross chip
+*        frames can egress (in chip Port VLANs are masked using gvlnSetPortVlanPorts
+*        API). Cross chip frames are Forward frames that ingress a DSA or Ether
+*        Type DSA port (see gprtSetFrameMode API). Bit 0 is a mask for port 0,
+*        bit 1 for port 1, etc. When a port's mask bit is one, frames are allowed
+*        to egress that port on this device. When a port's mask bit is zero,
+*        frames are not allowed to egress that port on this device.
+*
+*        The Cross Chip Port VLAN Table is accessed by ingressing frames based
+*        upon the original source port of the frame using the Forward frame's DSA tag
+*        fields Src_Dev, Src_Port/Src_Trunk and Src_Is_Trunk. The 1 entry of the 512
+*        that is accessed by the frame is:
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 0:
+*                If Src_Is_Trunk = 0   Src_Dev[4:0], Src_Port[3:0]119
+*                If Src_Is_Trunk = 1   Device Number (global offset 0x1C), Src_Trunk[3:0]
+*            If 5 Bit Port (in Global 2, offset 0x1D) = 1:
+*                If Src_Is_Trunk = 0   Src_Dev[3:0], Src_Port[4:0]120
+*                If Src_Is_Trunk = 1   Device Number[3:0], Src_Trunk[4:0]
+*
+*        Cross chip port VLANs with Trunks are supported in the table where this
+*        device's entries would be stored (defined by this device's Device Number).
+*        This portion of the table is available for Trunk entries because this device's
+*        port VLAN mappings to ports inside this device are masked by the port's
+*        VLAN Table (see gvlnSetPortVlanPorts API).
+*
+*
+* INPUTS:
+*        pvtPointer - pointer to the desired entry of PVT (0 ~ 511)
+*
+* OUTPUTS:
+*        pvtData    - Cross Chip Port Vlan Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpvtReadPVTData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        pvtPointer,
+    OUT GT_U32        *pvtData
+)
+{
+    GT_STATUS           retVal;
+    GT_PVT_OPERATION    op;
+    GT_PVT_OP_DATA        opData;
+
+    DBG_INFO(("gpvtReadPVTData Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CROSS_CHIP_PORT_VLAN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if (pvtPointer > 0x1FF)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = PVT_READ;
+    opData.pvtAddr = pvtPointer;
+    retVal = pvtOperationPerform(dev,op,&opData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (pvtOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    opData.pvtData &= (1 << dev->maxPorts) - 1;
+    *pvtData = GT_PORTVEC_2_LPORTVEC(opData.pvtData);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+
+
+/*******************************************************************************
+* pvtOperationPerform
+*
+* DESCRIPTION:
+*       This function accesses PVT Table
+*
+* INPUTS:
+*       pvtOp   - The pvt operation
+*       pvtData - address and data to be written into PVT
+*
+* OUTPUTS:
+*       pvtData - data read from PVT pointed by address
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pvtOperationPerform
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_PVT_OPERATION   pvtOp,
+    INOUT GT_PVT_OP_DATA     *opData
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the pvt in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PVT_ADDR;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PVT_ADDR,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    /* Set the PVT Operation register */
+    switch (pvtOp)
+    {
+        case PVT_INITIALIZE:
+            data = (1 << 15) | (pvtOp << 12);
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PVT_ADDR,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PVT_WRITE:
+            data = (GT_U16)opData->pvtData;
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PVT_DATA,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (pvtOp << 12) | opData->pvtAddr);
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PVT_ADDR,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PVT_READ:
+            data = (GT_U16)((1 << 15) | (pvtOp << 12) | opData->pvtAddr);
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PVT_ADDR,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 1;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_PVT_ADDR;
+              regAccess.rw_reg_list[0].data = 15;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+            }
+#else
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_PVT_ADDR,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->tblRegsSem);
+                    return retVal;
+                }
+            }
+#endif
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_PVT_DATA,&data);
+            opData->pvtData = (GT_U32)data;
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+
+            break;
+
+        default:
+
+            gtSemGive(dev,dev->tblRegsSem);
+            return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtEvents.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtEvents.c
new file mode 100644
index 000000000000..49fc7d46825c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtEvents.c
@@ -0,0 +1,1583 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtEvents.c
+*
+* DESCRIPTION:
+*       API definitions for system interrupt events handling.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*******************************************************************************
+* eventSetActive
+*
+* DESCRIPTION:
+*       This routine enables/disables the receive of an hardware driven event.
+*
+* INPUTS:
+*       eventType - the event type. any combination of the folowing:
+*           GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL(or GT_ATU_PROB),
+*           GT_ATU_DONE, GT_PHY_INTERRUPT, GT_EE_INTERRUPT, GT_DEVICE_INT,
+*            and GT_AVB_INTERRUPT
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Each switch device has its own set of event Types. Please refer to the
+*        device datasheet for the list of event types that the device supports.
+*
+*******************************************************************************/
+GT_STATUS eventSetActive
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U32      eventType
+)
+{
+    GT_STATUS   retVal;
+    GT_U16     data;
+    GT_U16    intMask;
+    GT_U8     len;
+
+    DBG_INFO(("eventSetActive Called.\n"));
+
+    data = (GT_U16) eventType;
+    len = 9;
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_EXTERNAL_PHY_ONLY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_DEV_PHY_INTERRUPT)))
+    {
+        intMask = GT_NO_INTERNAL_PHY_INT_MASK;
+    }
+    else
+    {
+        intMask = GT_INT_MASK;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_AVB_INTERRUPT))
+    {
+        intMask &= ~GT_AVB_INT;
+        len = 8;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_DEVICE_INTERRUPT))
+    {
+        intMask &= ~GT_DEVICE_INT;
+        len = 7;
+    }
+
+
+    if(data & ~intMask)
+    {
+        DBG_INFO(("Invalid event type.\n"));
+        return GT_FAIL;
+    }
+
+    /* Set the IntEn bit.               */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,0,len,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* eventgetActive
+*
+* DESCRIPTION:
+*       This routine gets the enable/disable status of the receive of an hardware driven event.
+*
+* OUTPUTS:
+*       eventType - the event type. any combination of the folowing:
+*           GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL(or GT_ATU_PROB),
+*           GT_ATU_DONE, GT_PHY_INTERRUPT, GT_EE_INTERRUPT, GT_DEVICE_INT,
+*            and GT_AVB_INTERRUPT
+*
+* INPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Each switch device has its own set of event Types. Please refer to the
+*        device datasheet for the list of event types that the device supports.
+*
+*******************************************************************************/
+GT_STATUS eventGetActive(IN GT_QD_DEV * dev, IN GT_U32 * eventType)
+{
+	GT_STATUS   retVal;
+	GT_U16      data;
+	GT_U16      intMask;
+	GT_U8       len;
+
+	DBG_INFO(("eventGetActive Called.\n"));
+
+	len = 9;
+
+	if ((IS_IN_DEV_GROUP(dev, DEV_EXTERNAL_PHY_ONLY)) ||
+		(IS_IN_DEV_GROUP(dev, DEV_DEV_PHY_INTERRUPT))) {
+		intMask = GT_NO_INTERNAL_PHY_INT_MASK;
+	} else {
+		intMask = GT_INT_MASK;
+	}
+
+	if (!IS_IN_DEV_GROUP(dev, DEV_AVB_INTERRUPT)) {
+		intMask &= ~GT_AVB_INT;
+		len = 8;
+	}
+
+	if (!IS_IN_DEV_GROUP(dev, DEV_DEVICE_INTERRUPT)) {
+		intMask &= ~GT_DEVICE_INT;
+		len = 7;
+	}
+
+	/* Get the IntEn bit.               */
+	retVal = hwGetGlobalRegField(dev, QD_REG_GLOBAL_CONTROL, 0, len, &data);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed.\n"));
+		return retVal;
+	}
+
+	*eventType = data;
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+/*******************************************************************************
+* eventGetIntStatus
+*
+* DESCRIPTION:
+*       This routine reads an hardware driven event status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       intCause - It provides the source of interrupt of the following:
+*               GT_STATS_DONE, GT_VTU_PROB, GT_VTU_DONE, GT_ATU_FULL,
+*               GT_ATU_DONE, GT_PHY_INTERRUPT, GT_EE_INTERRUPT, GT_DEVICE_INT,
+*                and GT_AVB_INTERRUPT
+*                For Gigabit Switch, GT_ATU_FULL is replaced with GT_ATU_PROB and
+*                if there is no internal phy, GT_PHY_INTERRUPT is not supported.
+*
+* RETURNS:
+*       GT_OK   - read success.
+*       GT_FAIL - otherwise
+*
+* COMMENTS:
+*       Each switch device has its own set of event Types. Please refer to the
+*        device datasheet for the list of event types that the device supports.
+*
+*******************************************************************************/
+GT_STATUS eventGetIntStatus
+(
+    IN GT_QD_DEV *dev,
+    OUT GT_U16   *intCause
+)
+{
+    GT_STATUS     retVal;         /* Function calls return value.     */
+    GT_U8         len;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_AVB_INTERRUPT))
+        len = 9;
+    else if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INTERRUPT))
+        len = 8;
+    else
+        len = 7;
+
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,0,len,intCause);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gvtuGetIntStatus
+*
+* DESCRIPTION:
+* Check to see if a specific type of VTU interrupt occured
+*
+* INPUTS:
+*       intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_MEMEBER_VIOLATION,
+*            GT_MISS_VIOLATION,
+*            GT_FULL_VIOLATION
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*     FULL_VIOLATION is not supported by all switch devices.
+*    Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gvtuGetIntStatus
+(
+    IN GT_QD_DEV          *dev,
+    OUT GT_VTU_INT_STATUS *vtuIntStatus
+)
+{
+    GT_STATUS       retVal;
+
+    DBG_INFO(("gvtuGetIntStatus Called.\n"));
+
+    /* check if device supports this feature */
+    if((IS_VALID_API_CALL(dev,1, DEV_802_1Q)) != GT_OK )
+      return GT_FAIL;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        retVal = gvtuGetViolation2(dev,vtuIntStatus);
+    }
+    else if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+              (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        retVal = gvtuGetViolation3(dev,vtuIntStatus);
+    }
+    else
+    {
+        retVal = gvtuGetViolation(dev,vtuIntStatus);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gatuGetIntStatus
+*
+* DESCRIPTION:
+* Check to see if a specific type of ATU interrupt occured
+*
+* INPUTS:
+*          intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_AGE_OUT_VIOLATION,
+*            GT_AGE_VIOLATION,
+*            GT_MEMEBER_VIOLATION,
+*            GT_MISS_VIOLATION,
+*            GT_FULL_VIOLATION
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gatuGetIntStatus
+(
+    IN GT_QD_DEV          *dev,
+    OUT GT_ATU_INT_STATUS *atuIntStatus
+)
+{
+    GT_STATUS       retVal;
+
+    DBG_INFO(("gatuGetIntStatus Called.\n"));
+
+    /* check if device supports this feature */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gatuGetViolation(dev,atuIntStatus);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* geventGetDevIntStatus
+*
+* DESCRIPTION:
+*         Check to see which device interrupts (WatchDog, JamLimit, Duplex Mismatch,
+*        SERDES Link Int, and Phy Int) have occurred.
+*
+* INPUTS:
+*       intType - the type of interrupt which causes an interrupt.
+*                  any combination of
+*                    GT_DEV_INT_WATCHDOG,
+*                    GT_DEV_INT_JAMLIMIT,
+*                    GT_DEV_INT_DUPLEX_MISMATCH,
+*                    GT_DEV_INT_SERDES_LINK
+*                    GT_DEV_INT_WAKE_EVENT
+*                    GT_DEV_INT_PHY
+*        port    - logical port where GT_DEV_INT_DUPLEX_MISMATCH occurred.
+*                  valid only if GT_DEV_INT_DUPLEX_MISMATCH is set in intType.
+*        linkInt - SERDES port list where GT_DEV_INT_SERDES_LINK interrupt is
+*                  asserted. It's in vector format, Bit 10 is for port 10,
+*                  Bit 9 is for port 9, etc.
+*                  valid only if GT_DEV_INT_SERDES_LINK bit is set in intType.
+*                  These bits are only valid of the port that is in 1000Base-X mode.
+*        phyInt  - port list where GT_DEV_INT_PHY interrupt is asserted.
+*                  It's in vector format, Bit 0 is for port 0, Bit 1 is for port 1, etc.
+*                  valid only if GT_DEV_INT_PHY bit is set in intType.
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS geventGetDevIntStatus
+(
+    IN  GT_QD_DEV             *dev,
+    OUT GT_DEV_INT_STATUS    *devIntStatus
+)
+{
+    GT_STATUS       retVal;
+    GT_U16            data, hwPort;
+
+    DBG_INFO(("geventGetDevIntStatus Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_DEVICE_INTERRUPT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    devIntStatus->devIntCause = 0;
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_DEVINT_SOURCE,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* check SERDES Link Int and Phy Int, if applicable */
+    if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE1))
+    {
+        /* check SERDES Link Int */
+        if (data & (0x7 << 8))
+        {
+            devIntStatus->devIntCause |= GT_DEV_INT_SERDES_LINK;
+            devIntStatus->linkInt = GT_PORTVEC_2_LPORTVEC((data & (7<<8)));
+        }
+    }
+    else  if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE2))  /* DEV_DEVICE_INT_TYPE2 */
+    {
+        if (data & (0x3 << 11))
+        {
+            devIntStatus->devIntCause |= GT_DEV_INT_SERDES_LINK;
+            devIntStatus->linkInt = GT_PORTVEC_2_LPORTVEC((data & (0x3 << 11)) >> 7);
+        }
+
+        if (data & 0x1F)
+        {
+            devIntStatus->devIntCause |= GT_DEV_INT_PHY;
+            devIntStatus->phyInt = GT_PORTVEC_2_LPORTVEC((data & 0x1F));
+        }
+    }
+    else /* DEV_DEVICE_INT_TYPE3 */
+    {
+      if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE3))  /* DEV_DEVICE_INT_TYPE3 */
+      {
+        if (data & QD_DEV_INT_WAKE_EVENT)
+        {
+            devIntStatus->devIntCause |= GT_DEV_INT_WAKE_EVENT;
+        }
+      }
+
+        if (data & 0x1F)
+        {
+            devIntStatus->devIntCause |= GT_DEV_INT_PHY;
+            devIntStatus->phyInt = GT_PORTVEC_2_LPORTVEC((data & 0x1F));
+        }
+    }
+
+    if (data & QD_DEV_INT_DUPLEX_MISMATCH)
+    {
+        devIntStatus->devIntCause |= GT_DEV_INT_DUPLEX_MISMATCH;
+
+        /* read port that causes the interrupt */
+        retVal = hwGetGlobal2RegField(dev, QD_REG_WD_CONTROL, 12, 4, &hwPort);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        /* re-arm the interrupt event */
+        retVal = hwSetGlobal2RegField(dev, QD_REG_WD_CONTROL, 12, 4, 0xF);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        devIntStatus->port = GT_PORT_2_LPORT((GT_U8)hwPort);
+    }
+
+    if (data & QD_DEV_INT_WATCHDOG)
+    {
+        devIntStatus->devIntCause |= GT_DEV_INT_WATCHDOG;
+    }
+
+    if (data & QD_DEV_INT_JAMLIMIT)
+    {
+        devIntStatus->devIntCause |= GT_DEV_INT_JAMLIMIT;
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* geventSetAgeIntEn
+*
+* DESCRIPTION:
+*        This routine enables/disables Age Interrupt for a port.
+*        When it's enabled, ATU Age Violation interrupts from this port are enabled.
+*        An Age Violation will occur anytime a port is Locked(gprtSetLockedPort)
+*        and the ingressing frame's SA is contained in the ATU as a non-Static
+*        entry with a EntryState less than 0x4.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetAgeIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventSetAgeIntEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_BASED_AGE_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Age Interrupt Enable Mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,11,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* geventGetAgeIntEn
+*
+* DESCRIPTION:
+*        This routine gets Age Interrupt Enable for the port.
+*        When it's enabled, ATU Age Violation interrupts from this port are enabled.
+*        An Age Violation will occur anytime a port is Locked(gprtSetLockedPort)
+*        and the ingressing frame's SA is contained in the ATU as a non-Static
+*        entry with a EntryState less than 0x4.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetAgeIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventGetAgeIntEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_BASED_AGE_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Age Interrupt Enable Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,11,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* geventSetAgeOutIntEn
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Age Out Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetAgeOutIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventSetAgeOutIntEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Age Out Interrupt Enable Mode. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,14,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* geventGetAgeOutIntEn
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_TRUE, if Age Out Interrupt is enabled
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetAgeOutIntEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventGetAgeOutIntEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Age Out Interrupt Enable Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,14,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* geventSetOverLimitInt
+*
+* DESCRIPTION:
+*        This routine enables/disables Over Limit Interrupt for a port.
+*        If it's enabled, an ATU Miss violation will be generated when port auto
+*        learn reached the limit(refer to gfdbGetPortAtuLimitReached API).
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_TRUE to enable Over Limit Interrupt,
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventSetOverLimitInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventSetOverLimitInt Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Over Limit Interrupt Enable Mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 13, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* geventGetOverLimitInt
+*
+* DESCRIPTION:
+*        This routine enables/disables Over Limit Interrupt for a port.
+*        If it's enabled, an ATU Miss violation will be generated when port auto
+*        learn reached the limit(refer to gfdbSetPortAtuLearnLimit API).
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable Over Limit Interrupt,
+*               GT_FALUSE to disable
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS geventGetOverLimitInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventGetOverLimitInt Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set Over Limit Interrupt Enable Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 13, 1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* geventGetPortAtuLimitReached
+*
+* DESCRIPTION:
+*       This routine checks if learn limit has been reached.
+*        When it reached, the port can no longer auto learn any more MAC addresses
+*        because the address learn limit set on this port has been reached.
+*
+* INPUTS:
+*       port  - logical port number
+*
+* OUTPUTS:
+*       limit - GT_TRUE, if limit has been reached
+*                GT_FALSE, otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*
+*******************************************************************************/
+GT_STATUS geventGetPortAtuLimitReached
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT      port,
+    IN  GT_BOOL       *limit
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("geventGetPortAtuLimitReached Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check device if this feature is supported. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ATU_LIMIT))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the LimitReached bit. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ATU_CONTROL, 14, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *limit);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* eventSetDevInt
+*
+* DESCRIPTION:
+*        Device Interrupt.
+*        The following device interrupts are supported:
+*            GT_DEV_INT_WATCHDOG    -
+*                WatchDog event interrupt (WatchDog event can be configured with
+*                gwdSetEvent API)
+*            GT_DEV_INT_JAMLIMIT    -
+*                any of the ports detect an Ingress Jam Limit violation
+*                (see gprtSetPauseLimitIn API)
+*            GT_DEV_INT_DUPLEX_MISMATCH -
+*                any of the ports detect a duplex mismatch (i.e., the local port is
+*                in half duplex mode while the link partner is in full duplex mode)
+*            GT_DEV_INT_WAKE_EVENT -
+*                any of the ports detect a Wake event interrupt
+*            GT_DEV_INT_SERDES_LINK -
+*                SERDES link change interrupt.
+*                An interrupt occurs when a SERDES port changes link status
+*                (link up or link down)
+*            GT_DEV_INT_PHY - Phy interrupt.
+*
+*        If any of the above events is enabled, GT_DEVICE_INT interrupt will
+*        be asserted by the enabled event when GT_DEV_INT is enabled with
+*        eventSetActive API.
+*
+* INPUTS:
+*        devInt - GT_DEV_INT
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS eventSetDevInt
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_DEV_EVENT    *devInt
+)
+{
+    GT_U16          data, event;
+    GT_U16            serdesMask=0, phyMask=0, mask=0;
+    GT_U32            pList;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("eventSetDevInt Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_DEVICE_INTERRUPT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    event = (GT_U16)devInt->event;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE1))
+    {
+        serdesMask = mask = 7 << 8;    /* SERDES Port List */
+        phyMask = 0;
+    }
+    else if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE2))
+    {
+        serdesMask = mask = 3 << 11;    /* SERDES Port List */
+        mask |= 0x1F;    /* Phy list */
+        phyMask = 0x1F;
+    }
+    else
+    {
+        mask |= 0x1F;    /* Phy list */
+        phyMask = 0x1F;
+    }
+    mask |= QD_DEV_INT_WATCHDOG | QD_DEV_INT_JAMLIMIT | QD_DEV_INT_DUPLEX_MISMATCH | QD_DEV_INT_WAKE_EVENT ;
+
+    data = 0;
+
+     if (event & GT_DEV_INT_SERDES_LINK)
+     {
+        /* check for valid SERDES Port List */
+        if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE1))
+        {
+            pList = GT_LPORTVEC_2_PORTVEC(devInt->portList);
+            if ((GT_U16)pList & (~serdesMask))
+            {
+                DBG_INFO(("GT_BAD_PARAM portList\n"));
+                return GT_BAD_PARAM;
+            }
+            data = (GT_U16)pList;
+        }
+        else
+        {
+            pList = GT_LPORTVEC_2_PORTVEC(devInt->portList);
+            pList <<= 7;
+            if ((GT_U16)pList & (~serdesMask))
+            {
+                DBG_INFO(("GT_BAD_PARAM portList\n"));
+                return GT_BAD_PARAM;
+            }
+            data = (GT_U16)pList;
+        }
+    }
+
+    if (event & GT_DEV_INT_PHY)
+    {
+        /* check for valid Phy List */
+        if (IS_IN_DEV_GROUP(dev,DEV_DEVICE_INT_TYPE1))
+         {
+            DBG_INFO(("GT_BAD_PARAM: PHY Int not supported.\n"));
+             return GT_BAD_PARAM;
+         }
+        else
+        {
+            pList = GT_LPORTVEC_2_PORTVEC(devInt->phyList);
+            if ((GT_U16)pList & (~phyMask))
+            {
+                DBG_INFO(("GT_BAD_PARAM phyList\n"));
+                return GT_BAD_PARAM;
+            }
+
+            data |= (GT_U16)pList;
+        }
+     }
+
+    if (event & GT_DEV_INT_WATCHDOG)
+    {
+        data |= QD_DEV_INT_WATCHDOG;
+    }
+
+    if (event & GT_DEV_INT_JAMLIMIT)
+    {
+        data |= QD_DEV_INT_JAMLIMIT;
+    }
+
+    if (event & GT_DEV_INT_DUPLEX_MISMATCH)
+    {
+        data |= QD_DEV_INT_DUPLEX_MISMATCH;
+    }
+
+    if (event & GT_DEV_INT_WAKE_EVENT)
+    {
+        data |= QD_DEV_INT_WAKE_EVENT;
+    }
+
+    if (data & (~mask))
+    {
+        DBG_INFO(("GT_BAD_PARAM portList\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (data & GT_DEV_INT_DUPLEX_MISMATCH)
+    {
+        retVal = hwSetGlobal2RegField(dev, QD_REG_WD_CONTROL, 12, 4, 0xF);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+
+    /* Set the related bit. */
+    retVal = hwSetGlobal2RegBits(dev,QD_REG_DEVINT_MASK, mask, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdSetEvent
+*
+* DESCRIPTION:
+*        Watch Dog Event.
+*        The following Watch Dog events are supported:
+*            GT_WD_QC  - Queue Controller Watch Dog enable.
+*                        When enabled, the QC's watch dog circuit checks for link
+*                        list errors and any errors found in the QC.
+*            GT_WD_EGRESS - Egress Watch Dog enable.
+*                        When enabled, each port's egress circuit checks for problems
+*                        between the port and the Queue Controller.
+*            GT_WD_FORCE - Force a Watch Dog event.
+*
+*        If any of the above events is enabled, GT_DEVICE_INT interrupt will
+*        be asserted by the enabled WatchDog event when GT_DEV_INT_WATCHDOG is
+*        enabled with eventSetDevActive API and GT_DEV_INT is enabled with
+*        eventSetActive API.
+*
+* INPUTS:
+*        wdEvent - Watch Dog Events
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetEvent
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        wdEvent
+)
+{
+    GT_U16          data, mask;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdSetEvent Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    mask = (1 << 5) | (1 << 3) | (1 << 2);
+    data = 0;
+
+    if (wdEvent & GT_WD_QC)
+    {
+        data |= (1 << 5);
+    }
+
+    if (wdEvent & GT_WD_EGRESS)
+    {
+        data |= (1 << 3);
+    }
+
+    if (wdEvent & GT_WD_FORCE)
+    {
+        data |= (1 << 2);
+    }
+
+    /* Set the related bit. */
+    retVal = hwSetGlobal2RegBits(dev,QD_REG_WD_CONTROL, mask, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdSetSWResetOnWD
+*
+* DESCRIPTION:
+*        SWReset on Watch Dog Event.
+*        When this feature is enabled, any enabled watch dog event (gwdSetEvent API)
+*        will automatically reset the switch core's datapath just as if gsysSwReset
+*        API is called.
+*
+*        The Watch Dog History (gwdGetHistory API) won't be cleared by this
+*        automatic SWReset. This allows the user to know if any watch dog event
+*        ever occurred even if the swich is configured to automatically recover
+*        from a watch dog.
+*
+*        When this feature is disabled, enabled watch dog events will not cause a
+*        SWReset.
+*
+* INPUTS:
+*        en   - GT_TRUE to enable SWReset on WD
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetSWResetOnWD
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdSetSWResetOnWD Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the related bit. */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_WD_CONTROL, 0, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdGetSWResetOnWD
+*
+* DESCRIPTION:
+*        SWReset on Watch Dog Event.
+*        When this feature is enabled, any enabled watch dog event (gwdSetEvent API)
+*        will automatically reset the switch core's datapath just as if gsysSwReset
+*        API is called.
+*
+*        The Watch Dog History (gwdGetHistory API) won't be cleared by this
+*        automatic SWReset. This allows the user to know if any watch dog event
+*        ever occurred even if the swich is configured to automatically recover
+*        from a watch dog.
+*
+*        When this feature is disabled, enabled watch dog events will not cause a
+*        SWReset.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en   - GT_TRUE, if SWReset on WD is enabled
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetSWResetOnWD
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdSetSWResetOnWD Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the related bit. */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_WD_CONTROL, 0, 1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdGetHistory
+*
+* DESCRIPTION:
+*        This routine retrieves Watch Dog history. They are
+*
+*        wdEvent -
+*            When it's set to GT_TRUE, some enabled Watch Dog event occurred.
+*            The following events are possible:
+*                QC WatchDog Event (GT_WD_QC)
+*                Egress WatchDog Event (GT_WD_EGRESS)
+*                Forced WatchDog Event (GT_WD_FORCE)
+*        egressEvent -
+*            If any port's egress logic detects an egress watch dog issue,
+*            this field is set to GT_TRUE, regardless of the enabling GT_WD_EGRESS
+*            event.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        history - GT_WD_EVENT_HISTORY structure
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetHistory
+(
+    IN  GT_QD_DEV            *dev,
+    OUT GT_WD_EVENT_HISTORY    *history
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdSetSWResetOnWD Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the related bit. */
+    retVal = hwReadGlobal2Reg(dev,QD_REG_WD_CONTROL,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (data & (1 << 4))
+    {
+        history->egressEvent = GT_TRUE;
+    }
+    else
+    {
+        history->egressEvent = GT_FALSE;
+    }
+
+    if (data & (1 << 1))
+    {
+        history->wdEvent = GT_TRUE;
+    }
+    else
+    {
+        history->wdEvent = GT_FALSE;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gwdSetRMUTimeOut
+*
+* DESCRIPTION:
+*        Remote Management Timeout. When this bit is set to a one the Remote
+*        Management Unit(RMU) will timeout on Wait on Bit commands. If the bit that
+*        is being tested has not gone to the specified value after 1 sec. has elapsed
+*        the Wait on Bit command will be terminated and the Response frame will be
+*        sent without any further processing.
+*
+*        When this bit is cleared to a zero the Wait on Bit command will wait
+*        until the bit that is being tested has changed to the specified value.
+*
+* INPUTS:
+*        en   - GT_TRUE to enable RMU Timeout
+*               GT_FALUSE to disable
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdSetRMUTimeOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdSetRMUTimeOut Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the related bit. */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_WD_CONTROL, 6, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdGetRMUTimeOut
+*
+* DESCRIPTION:
+*        Remote Management Timeout. When this bit is set to a one the Remote
+*        Management Unit(RMU) will timeout on Wait on Bit commands. If the bit that
+*        is being tested has not gone to the specified value after 1 sec. has elapsed
+*        the Wait on Bit command will be terminated and the Response frame will be
+*        sent without any further processing.
+*
+*        When this bit is cleared to a zero the Wait on Bit command will wait
+*        until the bit that is being tested has changed to the specified value.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en   - GT_TRUE to enable RMU Timeout
+*               GT_FALUSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetRMUTimeOut
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdGetRMUTimeOut Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the related bit. */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_WD_CONTROL, 6, 1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gwdGetEgressWDEvent
+*
+* DESCRIPTION:
+*        If any port's egress logic detects an egress watch dog issue, this bit
+*        will be set to a one, regardless of the setting of the GT_WD_EGRESS in
+*        gwdSetEvent function.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        event - GT_TRUE, if egress logic has detected any egress watch dog issue
+*                GT_FALUSE, otherwise
+*
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gwdGetEgressWDEvent
+(
+    IN  GT_QD_DEV        *dev,
+    OUT GT_BOOL            *event
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gwdGetEgressWDEvent Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the related bit. */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_WD_CONTROL, 7, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *event);
+
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtMisc.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtMisc.c
new file mode 100644
index 000000000000..d52f905e5599
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtMisc.c
@@ -0,0 +1,2188 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtMisc.c
+*
+* DESCRIPTION:
+*       API definitions for Ip Mapping Table
+*                            EEPROM access
+*                            Scratch and Misc Control
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gsysSetUseIpMappingTable
+*
+* DESCRIPTION:
+*        This API set to use IP Frame Priorities from this table.
+*        Set GT_TRUE:  The IP_FPRI data in this table is used as the frame’s
+*            initial IP_FPRI.
+*        Set GT_FALSE: The IP_FPRI data in this table is ignored. Instead the
+*            frame’s initial IP_FPRI is generated by using the frame’s IP_QPRI
+*            as the IP_FPRI’s upper two bits, and the IP_FPRI’s lowest bit comes
+*            from bit 0 of the frame’s source port’s Default PRI (Port offset 0x07).
+*
+* INPUTS:
+*        en    - [GT_TRUE] / [GT_FALSE]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetUseIpMappingTable
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_BOOL            en
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+
+    DBG_INFO(("gsysSetUseIpMappingTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_IP_MAPPING_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = (en==GT_TRUE)?1:0;
+
+    retVal = hwSetGlobalRegField(dev,QD_REG_IP_MAPPING_TABLE,14,1,data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetUseIpMappingTable
+*
+* DESCRIPTION:
+*        This API get to use IP Frame Priorities from this table.
+*        Set GT_TRUE:  The IP_FPRI data in this table is used as the frame’s
+*            initial IP_FPRI.
+*        Set GT_FALSE: The IP_FPRI data in this table is ignored. Instead the
+*            frame’s initial IP_FPRI is generated by using the frame’s IP_QPRI
+*            as the IP_FPRI’s upper two bits, and the IP_FPRI’s lowest bit comes
+*            from bit 0 of the frame’s source port’s Default PRI (Port offset 0x07).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en    - [GT_TRUE] / [GT_FALSE]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetUseIpMappingTable
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_BOOL            *en
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+
+    DBG_INFO(("gsysGetUseIpMappingTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_IP_MAPPING_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+
+    retVal = hwGetGlobalRegField(dev,QD_REG_IP_MAPPING_TABLE,14,1,&data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *en= (data==1)?GT_TRUE:GT_FALSE;
+
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetIpMappingPrio
+*
+* DESCRIPTION:
+*        Set IPv4 and IPv6 Frame Priority Mapping, and
+*        IPv4 and IPv6 Queue Priority Mapping.
+*       The ipFpri value is used as the frame's initial FPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the port’s InitialPri (Port offset 0x04)
+*        is configured to use IP FPri’s.
+*       The ipQpri value is used as the frame’s initial QPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the port’s InitialPri and TagIfBoth
+*        registers (Port offset 0x04) are configured to use IP QPri’s.
+*
+* INPUTS:
+*        point - Pointer to the Ip Mapping Table.
+*                0 - 0x3f;
+*        ipFpri -  The value is 0 - 7
+*        ipQpri -  The value is 0 - 3.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetIpMappingPrio
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    IN  GT_U8            ipFpri,
+    IN  GT_U8            ipQpri
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+
+    DBG_INFO(("gsysSetIpMappingPrio Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_IP_MAPPING_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((point>0x3f)||(ipFpri>7)||(ipQpri>3))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Scratch and Misc control is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_IP_MAPPING_TABLE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+    }
+#else
+       data = 1;
+    while(data == 1)
+       {
+        retVal = hwGetGlobalRegField(dev,QD_REG_IP_MAPPING_TABLE,15,1,&data);
+           if(retVal != GT_OK)
+           {
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+           }
+    }
+#endif
+
+    data = (((ipFpri&7)<<4) | (ipQpri&3));
+    data |=  ((GT_U16)((1 << 15) | (point << 8)));
+
+    retVal = hwWriteGlobalReg(dev, QD_REG_IP_MAPPING_TABLE, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetIpMappingPrio
+*
+* DESCRIPTION:
+*        Get IPv4 and IPv6 Frame Priority Mapping, and
+*        IPv4 and IPv6 Queue Priority Mapping.
+*       The ipFpri value is used as the frame's initial FPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the port’s InitialPri (Port offset 0x04)
+*        is configured to use IP FPri’s.
+*        The ipQpri value is used as the frame’s initial QPRI when the frame is
+*        an IPv4 or an IPv6 frame, and the port’s InitialPri and TagIfBoth
+*        registers (Port offset 0x04) are configured to use IP QPri’s.
+*
+* INPUTS:
+*        point - Pointer to the Ip Mapping Table.
+*                0 - 0x3f;
+*
+* OUTPUTS:
+*        ipFpri -  The value is 0 - 7
+*        ipQpri -  The value is 0 - 3.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetIpMappingPrio
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    OUT  GT_U8            *ipFpri,
+    OUT  GT_U8            *ipQpri
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+
+    DBG_INFO(("gsysGetIpMappingPrio Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_IP_MAPPING_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (point > 0x3f)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_IP_MAPPING_TABLE;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_IP_MAPPING_TABLE;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+    do {
+        retVal = hwReadGlobalReg(dev, QD_REG_IP_MAPPING_TABLE, &data);
+        if(retVal != GT_OK)
+           {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+        }
+    } while (data&0x8000);
+#endif
+
+
+    *ipFpri = (data >> 4) & 7;
+    *ipQpri = (data) & 3;
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* eepromOperationPerform
+*
+* DESCRIPTION:
+*       This function accesses EEPROM Command Register and Data Register.
+*       The device supports the following EEPROM
+*            operations
+*            GT_EEPROM_NO_OP = No Operation
+*            GT_EEPROM_WRITE_DATA = Write EEPROM at Addr.
+*            GT_EEPROM_READ_DATA = Read EEPROM from Addr.
+*            GT_EEPROM_RESTART = Restart Register Loader execution at Addr
+*                (eepromData = don’t care in this case)
+*            GT_EEPROM_HALT = Halt (stop executing the EEPROM if its not already
+*                stopped)
+*
+* INPUTS:
+*       eepromOp      - EEPROM Opcode.
+*       eepromData    - Data to be written to the EEPROM
+*
+* OUTPUTS:
+*       eepromData    - Data that was read back from the EEPROM.
+*
+Command register above.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*       GT_BAD_PARAM - if input parameters are beyond range.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS eepromOperationPerform
+(
+    IN    GT_QD_DEV             *dev,
+    IN    GT_EEPROM_OPERATION    eepromOp,
+    INOUT GT_EEPROM_OP_DATA        *opData
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+    DBG_INFO(("eepromOperationPerform Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EEPROM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (eepromOp>GT_EEPROM_HALT)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->eepromRegsSem,OS_WAIT_FOREVER);
+
+
+    /* Wait until the eeprom in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_EEPROM_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_EEPROM_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->eepromRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+
+    /* Set the EEPROM Operation register */
+    switch (eepromOp)
+    {
+        case GT_EEPROM_WRITE_DATA:
+            retVal = hwGetGlobal2RegField(dev,QD_REG_EEPROM_COMMAND,10,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+            if (data==0)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                DBG_INFO(("EEPROM is not writablen"));
+                return GT_FAIL;
+            }
+
+            retVal = hwGetGlobal2RegField(dev,QD_REG_EEPROM_COMMAND,11,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+            if (data==1)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                DBG_INFO(("EEPROM Loader is running"));
+                return GT_FAIL;
+            }
+
+            data = (GT_U16)opData->eepromData;
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_EEPROM_DATA,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (GT_EEPROM_WRITE_DATA << 12) |
+                    (opData->eepromAddr & 0xFF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_EEPROM_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+            break;
+
+        case GT_EEPROM_READ_DATA:
+            retVal = hwGetGlobal2RegField(dev,QD_REG_EEPROM_COMMAND,11,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+            if (data==1)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                DBG_INFO(("EEPROM Loader is running"));
+                return GT_FAIL;
+            }
+
+            data = (GT_U16)((1 << 15) | (GT_EEPROM_READ_DATA << 12) |
+                    (opData->eepromAddr & 0xFF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_EEPROM_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+
+
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 1;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_EEPROM_COMMAND;
+              regAccess.rw_reg_list[0].data = 15;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->vtuRegsSem);
+                return retVal;
+              }
+            }
+#else
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_EEPROM_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->eepromRegsSem);
+                    return retVal;
+                }
+            }
+#endif
+
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_EEPROM_DATA,&data);
+            opData->eepromData = (GT_U32)data;
+
+            break;
+
+        case GT_EEPROM_RESTART:
+            data = (GT_U16)((1 << 15) | (GT_EEPROM_RESTART << 12) |
+                    (opData->eepromAddr & 0xFF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_EEPROM_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+
+
+            break;
+
+        case GT_EEPROM_HALT:
+            data = (GT_U16)((1 << 15) | (GT_EEPROM_HALT << 12) |
+                    (opData->eepromAddr & 0xFF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_EEPROM_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->eepromRegsSem);
+                return retVal;
+            }
+
+            break;
+        default:
+            gtSemGive(dev,dev->eepromRegsSem);
+            return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->eepromRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysReadEeprom
+*
+* DESCRIPTION:
+*        Read EEPROM from EEPROM’s address where the EEOp is performed.
+*
+* INPUTS:
+*        addr - EEPROM Address.
+*
+* OUTPUTS:
+*        data -  Data that was read back from the EEPROM.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysReadEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr,
+    OUT  GT_U8            *data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_EEPROM_OPERATION    eepromOp;
+    GT_EEPROM_OP_DATA    opData;
+
+    eepromOp = GT_EEPROM_READ_DATA;
+    opData.eepromAddr = addr;
+
+    retVal = eepromOperationPerform(dev,eepromOp,&opData);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+    *data = (GT_U8)opData.eepromData;
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysWriteEeprom
+*
+* DESCRIPTION:
+*        Write EEPROM at the EEPROM’s address where the EEOp is performed.
+*
+* INPUTS:
+*        addr - EEPROM Address.
+*        data - Data to be written to the EEPROM
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysWriteEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr,
+    IN  GT_U8            data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_EEPROM_OPERATION    eepromOp;
+    GT_EEPROM_OP_DATA    opData;
+
+    eepromOp = GT_EEPROM_WRITE_DATA;
+    opData.eepromAddr = addr;
+    opData.eepromData = data;
+
+    retVal = eepromOperationPerform(dev,eepromOp,&opData);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysRestartEeprom
+*
+* DESCRIPTION:
+*        Restart Register Loader execution at the EEPROM’s address where the EEOp
+*        is performed
+*
+* INPUTS:
+*        addr - EEPROM Address. .
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysRestartEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            addr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_EEPROM_OPERATION    eepromOp;
+    GT_EEPROM_OP_DATA    opData;
+
+    eepromOp = GT_EEPROM_RESTART;
+    opData.eepromAddr = addr;
+
+    retVal = eepromOperationPerform(dev,eepromOp,&opData);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysHaltEeprom
+*
+* DESCRIPTION:
+*        Halt (stop executing the EEPROM if its not already stopped)
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysHaltEeprom
+(
+    IN  GT_QD_DEV         *dev
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_EEPROM_OPERATION    eepromOp;
+    GT_EEPROM_OP_DATA    opData;
+
+    eepromOp = GT_EEPROM_HALT;
+
+    retVal = eepromOperationPerform(dev,eepromOp,  &opData);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysGetStEeprom
+*
+* DESCRIPTION:
+*        Get EEPROM status. They are Register Loader Running status and EEPROM
+*        Write Enable status
+*        runSt is GT_TRUE: Register Loader Running, whenever the register loader
+*            is busy executing the instructions contained in the EEPROM.
+*        writeEn is GT_TRUE: EEPROM Write Enable, that indicates that writing to
+*            the EEPROM is possible.
+*        writeEn is GT_FALSE: the Write EEPROM EEOp above will not do anything.
+*            This reflects the value of the EE_WE configuration pin after Reset.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        runSt   -   [GT_TRUE] / [GT_FALSE)
+*        writeEn -   [GT_TRUE] / [GT_FALSE)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetStEeprom
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_BOOL            *writeEn,
+    OUT GT_BOOL            *runSt
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16      data;             /* temporary Data storage */
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EEPROM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_EEPROM_COMMAND, &data);
+    if(retVal != GT_OK)
+    {
+           return retVal;
+    }
+
+
+    *runSt   = (data&GT_EEPROM_OP_ST_RUNNING_MASK)?GT_TRUE:GT_FALSE;
+    *writeEn = (data&GT_EEPROM_OP_ST_WRITE_EN_MASK)?GT_TRUE:GT_FALSE;
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysSetScratchMiscCtrl
+*
+* DESCRIPTION:
+*        Set Scratch and Misc control data to the Scratch and Misc Control register.
+*        The registers of Scratch and Misc control are.
+*                Scratch Byte 0
+*                Scratch Byte 1
+*                GPIO Configuration
+*                Reserved for future use
+*                GPIO Direction
+*                GPIO Data
+*                CONFIG Data 0
+*                CONFIG Data 1
+*                CONFIG Data 2
+*                CONFIG Data 3
+*                SyncE & TAICLK125’s Drive
+*                P5’s & CLK125’s Clock Drive
+*                P6’s Clock Drive
+*                EEPROM Pad drive
+*
+* INPUTS:
+*        point - Pointer to the Scratch and Misc. Control register.
+*        data  - Scratch and Misc. Control data written to the register
+*                pointed to by the point above.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetScratchMiscCtrl
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    IN  GT_U8            data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    IN  GT_U16            tmpData;
+
+    if (point > GT_SCRAT_MISC_REG_MAX)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (data &0xffffff00)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* program QoS Weight Table, 4 sequences at a time */
+
+    /* Wait until the Scratch and Misc control is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SCRATCH_MISC;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+    }
+#else
+       tmpData = 1;
+    while(tmpData == 1)
+       {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_SCRATCH_MISC,15,1,&tmpData);
+           if(retVal != GT_OK)
+           {
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+           }
+    }
+#endif
+
+    tmpData =  (GT_U16)((1 << 15) | (point << 8) | data);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_SCRATCH_MISC, tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+       return retVal;
+
+}
+
+
+
+/*******************************************************************************
+* gsysGetScratchMiscCtrl
+*
+* DESCRIPTION:
+*        Get Scratch and Misc control data from the Scratch and Misc Control register.
+*        The register of Scratch and Misc control are.
+*                Scratch Byte 0
+*                Scratch Byte 1
+*                GPIO Configuration
+*                Reserved for future use
+*                GPIO Direction
+*                GPIO Data
+*                CONFIG Data 0
+*                CONFIG Data 1
+*                CONFIG Data 2
+*                CONFIG Data 3
+*                SyncE & TAICLK125’s Drive
+*                P5’s & CLK125’s Clock Drive
+*                P6’s Clock Drive
+*                EEPROM Pad drive
+
+*
+* INPUTS:
+*        point - Pointer to the Scratch and Misc. Control register.
+*
+* OUTPUTS:
+*        data - Scratch and Misc. Control data read from the register
+*                pointed to by the point above.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetScratchMiscCtrl
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            point,
+    OUT  GT_U8            *data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    OUT  GT_U16            tmpData;
+
+    if (point > GT_SCRAT_MISC_REG_MAX)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (point>0x7f)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* program QoS Weight Table, 4 sequences at a time */
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SCRATCH_MISC;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_SCRATCH_MISC;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+     }
+     tmpData = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+    do {
+        retVal = hwReadGlobal2Reg(dev, QD_REG_SCRATCH_MISC, &tmpData);
+        if(retVal != GT_OK)
+           {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+        }
+    } while (tmpData&0x8000);
+#endif
+
+    *data = tmpData&0xff;
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+
+    return retVal;
+}
+
+
+
+
+/*******************************************************************************
+* gsysSetScratchBits
+*
+* DESCRIPTION:
+*        Set bits to the Scratch and Misc Control register <scratch byte 0 and 1>.
+*        These bits are 100% available to software for whatever purpose desired.
+*        These bits do not connect to any hardware function.
+*
+* INPUTS:
+*        scritch - written bits.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetScratchBits
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U16            scratch
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetScratchBits Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SCRAT_0, (GT_U8)(scratch&0xff));
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SCRAT_1, (GT_U8)((scratch>>8)&0xff));
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysGetScratchBits
+*
+* DESCRIPTION:
+*        Get bits from the Scratch and Misc Control register <scratch byte 0 and 1>.
+*        These bits are 100% available to software for whatever purpose desired.
+*        These bits do not connect to any hardware function.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        scritch - read bits.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetScratchBits
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U16            *scratch
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetScratchBits Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SCRAT_1, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+    *scratch = data;
+    *scratch = *scratch<<8;
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SCRAT_0, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *scratch |= data;
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysSetGpioConfigMod
+*
+* DESCRIPTION:
+*        Set bits to the Scratch and Misc Control register <GPIO Configuration>
+*        to configure GPIO mode.
+*        The bits are shared General Purpose Input Output mode Bits:
+*        Bit 6 - GT_GPIO_BIT_6:    1:GPIO[6]    0:SE_RCLK1
+*        Bit 5 - GT_GPIO_BIT_5:    1:GPIO[5]    0:SE_RCLK0
+*        Now, folloing bits are read only.
+*        Bit 4 - GT_GPIO_BIT_4:    1:GPIO[4]    0:
+*        Bit 3 - GT_GPIO_BIT_3:    1:GPIO[3]    0:
+*        Bit 2 - GT_GPIO_BIT_2:    1:GPIO[2]    0:
+*        Bit 1 - GT_GPIO_BIT_1:    1:GPIO[1]    0:P6_COL
+*        Bit 0 - GT_GPIO_BIT_0:    1:GPIO[0]    0:P6_CRS
+*
+* INPUTS:
+*        mode - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioConfigMod
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            mode
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetGpioConfigMod Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_CFG, (GT_U8)(mode&0x7f));
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysGetGpioConfigMod
+*
+* DESCRIPTION:
+*        Get mode from the Scratch and Misc Control register <GPIO Configuration>.
+*        The bits are shared General Purpose Input Output mode Bits:
+*        Bit 6 - GT_GPIO_BIT_6:    1:GPIO[6]    0:SE_RCLK1
+*        Bit 5 - GT_GPIO_BIT_5:    1:GPIO[5]    0:SE_RCLK0
+*        Now, folloing bits are read only.
+*        Bit 4 - GT_GPIO_BIT_4:    1:GPIO[4]    0:
+*        Bit 3 - GT_GPIO_BIT_3:    1:GPIO[3]    0:
+*        Bit 2 - GT_GPIO_BIT_2:    1:GPIO[2]    0:
+*        Bit 1 - GT_GPIO_BIT_1:    1:GPIO[1]    0:P6_COL
+*        Bit 0 - GT_GPIO_BIT_0:    1:GPIO[0]    0:P6_CRS
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioConfigMod
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            *mode
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetGpioConfigMod Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_CFG, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *mode = 0x7f&data;
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysSetGpioDirection
+*
+* DESCRIPTION:
+*        Set Gpio direction to the Scratch and Misc Control register <GPIO Direction>.
+*        The bits are used to control the direction of GPIO[6:0].
+*        When a GPIO’s bit is set to a one that GPIO will become an input. When a
+*        GPIO’s bit is cleared to a zero that GPIO will become an output
+*        General Purpose Input Output direction bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        dir - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioDirection
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            dir
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetGpioDirection Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_DIR, (GT_U8)(dir&0x7f));
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysGetGpioDirection
+*
+* DESCRIPTION:
+*        get Gpio direction from the Scratch and Misc Control register <GPIO Direction>.
+*        The bits are used to control the direction of GPIO[6:0].
+*        When a GPIO’s bit is set to a one that GPIO will become an input. When a
+*        GPIO’s bit is cleared to a zero that GPIO will become an output
+*        General Purpose Input Output direction bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        dir - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioDirection
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U32            *dir
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetGpioDirection Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_DIR, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *dir = data;
+
+    return GT_OK;
+
+}
+
+
+
+/*******************************************************************************
+* gsysSetGpioData
+*
+* DESCRIPTION:
+*        Set Gpio data to the Scratch and Misc Control register <GPIO data>.
+*        When a GPIO’s bit is set to be an input, data written to this bit will go
+*        to a holding register but will not appear on the pin nor in this register.
+*        Reads of this register will return the actual, real-time, data that is
+*        appearing on the GPIO’s pin.
+*        When a GPIO’s bit is set to be an output, data written to this bit will go
+*        to a holding register and will appear on the GPIO’s pin. Reads of this register
+*        will return the actual, real-time, data that is appearing on the GPIO’s pin
+*        (which in this case should be the data written, but if its isn’t that would
+*        be an indication of a conflict).
+*        When a pin’s direction changes from input to output, the data last written
+*        to the holding register appears on the GPIO’s pin
+*        General Purpose Input Output data bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        data - OR [GT_GPIO_BIT_x]
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetGpioData
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetGpioData Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_DAT, (GT_U8)(data&0x7f));
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysGetGpioData
+*
+* DESCRIPTION:
+*        get Gpio data to the Scratch and Misc Control register <GPIO data>.
+*        When a GPIO’s bit is set to be an input, data written to this bit will go
+*        to a holding register but will not appear on the pin nor in this register.
+*        Reads of this register will return the actual, real-time, data that is
+*        appearing on the GPIO’s pin.
+*        When a GPIO’s bit is set to be an output, data written to this bit will go
+*        to a holding register and will appear on the GPIO’s pin. Reads of this register
+*        will return the actual, real-time, data that is appearing on the GPIO’s pin
+*        (which in this case should be the data written, but if its isn’t that would
+*        be an indication of a conflict).
+*        When a pin’s direction changes from input to output, the data last written
+*        to the holding register appears on the GPIO’s pin
+*        General Purpose Input Output data bits are:
+*        Bit 6 - GT_GPIO_BIT_6
+*        Bit 5 - GT_GPIO_BIT_5
+*        Bit 4 - GT_GPIO_BIT_4
+*        Bit 3 - GT_GPIO_BIT_3
+*        Bit 2 - GT_GPIO_BIT_2
+*        Bit 1 - GT_GPIO_BIT_1
+*        Bit 0 - GT_GPIO_BIT_0
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        data - OR [GT_GPIO_BIT_x]
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetGpioData
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U32            *data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        tmpData;
+
+    DBG_INFO(("gsysGetGpioData Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_GPIO_DAT, &tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *data = tmpData;
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysGetConfigData
+*
+* DESCRIPTION:
+*        Get Reset Configuration Pin Data 0-3.
+*        These registers return the values observed after a hardware Reset on the
+*        listed CONFIG data listed below.
+*            Config data 0:
+*              Bit    Config    Pin's Primary Name
+*                0    USER[0]        P6_OUTD[5]
+*                1    USER[1]        P6_OUTD[6]
+*                2    USER[2]        P6_OUTD[7]
+*                3    ADDR[0]        P5_OUTD[0]
+*                4    ADDR[1]        P5_OUTD[5]
+*                5    ADDR[2]        P5_OUTD[6]
+*                6    ADDR]3]        P5_OUTD[7]
+*                7    ADDR[4]        P5_OUTD[1]
+*            Config data 1:
+*                0    LED_SEL[0]    P1_LED
+*                1    LED_SEL[1]    P2_LED
+*                2    4COL P3_LED
+*                3    NormCx        P4_LED
+*                4    Jumbo        P0_LED
+*                5    EE_WE        EE_CS/C2_LED
+*                6    FD_FLOW        EE_CLK/C1_LED
+*                7    HD_FLOW        EE_DIN/C0_LED
+*            Config data 2:
+*                0    P5_MODE[0]    P5_OUTD[2]
+*                1    P5_MODE[1]    P5_OUTD[3]
+*                2    P5_MODE[2]    P5_OUTD[4]
+*                3    Reserved for future use
+*                4    P6_MODE[0]    P6_OUTD[2]
+*                5    P6_MODE[1]    P6_OUTD[3]
+*                6    P6_MODE[2]    P6_OUTD[4]
+*                7    Reserved for future use
+*            Config data 3:
+*                0    RMU_MODE[0] P6_OUTD[0]
+*                1    RMU_MODE[1] P6_OUTD[1]
+*                2    S_VDDOS[0]    PTP_TRIG
+*                3    CLK125EN    CLK125
+*                4    P5_VDDOS[0] P5_GTXCLK
+*                5    P5_VDDOS[1] P5_OUTEN
+*                6    P6_VDDOS[0] P5_GTXCLK
+*                7    P6_VDDOS[1] P6_OUTEN
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       cfgDat - GT_CONFIG_DTTA
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetConfigData
+(
+    IN  GT_QD_DEV             *dev,
+    OUT  GT_CONFIG_DATA        *cfgData
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        tmpData;
+
+    DBG_INFO(("gsysGetConfigData Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_CFG_DAT0, &tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    cfgData->cfgData0.Byte = tmpData;
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_CFG_DAT1, &tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    cfgData->cfgData1.Byte = tmpData;
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_CFG_DAT2, &tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    cfgData->cfgData2.Byte = tmpData;
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_CFG_DAT3, &tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    cfgData->cfgData3.Byte = tmpData;
+
+    return GT_OK;
+
+}
+
+
+
+/*******************************************************************************
+* gsysSetSyncETai
+*
+* DESCRIPTION:
+*        Set SyncE and Tai to the Scratch and Misc. Control register <SyncE and TAI pad>.
+*
+* INPUTS:
+*        zpr - ZPR for SyncE and TAI
+*        znr - ZNR for SyncE and TAI
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetSyncETai
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysSetSyncETai Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((zpr>0x7) || (znr>0x7))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = ((zpr&0x7)<<3) | (znr&0x7);
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SYNCE, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+
+
+/*******************************************************************************
+* gsysGetSyncETai
+*
+* DESCRIPTION:
+*        Get SyncE and Tai from the Scratch and Misc Control register <SyncE and TAI pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for SyncE and TAI
+*        znr - ZNR for SyncE and TAI*
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetSyncETai
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetSyncETai Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_SYNCE, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *zpr = 0x7 &(data>>3);
+    *znr = 0x7 &(data);
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysSetP6_Clock
+*
+* DESCRIPTION:
+*        Set P6_clock to the Scratch and Misc Control register <P6_Clock pad>.
+*
+* INPUTS:
+*        zpr - ZPR for P6_Clock
+*        znr - ZNR for P6_Clock
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetP6_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysSetP6_Clock Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((zpr>0x7) || (znr>0x7))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = ((zpr&0x7)<<3) | (znr&0x7);
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_P6_CLK, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysGetP6_Clock
+*
+* DESCRIPTION:
+*       Get P6_clock from the Scratch and Misc Control register <P6_Clock pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for P6_Clock
+*        znr - ZNR for P6_Clock
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetP6_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetP6_Clock Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_P6_CLK, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *zpr = 0x7 &(data>>3);
+    *znr = 0x7 &(data);
+
+    return GT_OK;
+
+}
+
+
+
+
+/*******************************************************************************
+* gsysSetP5_Clock
+*
+* DESCRIPTION:
+*       Set P5_clock to the Scratch and Misc Control register <P5_Clock pad>.
+*
+* INPUTS:
+*        zpr - ZPR for P5_Clock
+*        znr - ZNR for P5_Clock
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetP5_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysSetP5_Clock Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((zpr>0x7) || (znr>0x7))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = ((zpr&0x7)<<3) | (znr&0x7);
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_P5_CLK, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysGetP5_Clock
+*
+* DESCRIPTION:
+*       Get P5_clock from the Scratch and Misc Control register <P5_Clock pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        zpr - ZPR for P5_Clock
+*        znr - ZNR for P5_Clock
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetP5_Clock
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetP6_Clock Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_P5_CLK, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *zpr = 0x7 &(data>>3);
+    *znr = 0x7 &(data);
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gsysSetEEPROM
+*
+* DESCRIPTION:
+*       Set EEPROM cycle to the Scratch and Misc Control register <EEPROM pad>.
+*
+* INPUTS:
+*        dsm - DSM for EEPROM cycle
+*        zpr - ZPR for EEPROM cycle
+*        znr - ZNR for EEPROM cycle
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysSetEEPROM
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            dsm,
+    IN  GT_U8            zpr,
+    IN  GT_U8            znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysSetEEPROM Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((zpr>0x7) || (znr>0x7))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = ((dsm&0x3)<<6) | ((zpr&0x7)<<3) | (znr&0x7);
+
+    retVal = gsysSetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_EEPROM, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gsysGetEEPROM
+*
+* DESCRIPTION:
+*       Get EEPROM cycle to the Scratch and Misc Control register <EEPROM pad>.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        dsm - DSM for EEPROM cycle
+*        zpr - ZPR for EEPROM cycle
+*        znr - ZNR for EEPROM cycle
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gsysGetEEPROM
+(
+    IN  GT_QD_DEV         *dev,
+    OUT  GT_U8            *dsm,
+    OUT  GT_U8            *zpr,
+    OUT  GT_U8            *znr
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        data;
+
+    DBG_INFO(("gsysGetEEPROM Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SCRATCH_MISC_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = gsysGetScratchMiscCtrl(dev, GT_SCRAT_MISC_REG_EEPROM, &data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *dsm = 0x3 &(data>>6);
+    *zpr = 0x7 &(data>>3);
+    *znr = 0x7 &(data);
+
+    return GT_OK;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPCSCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPCSCtrl.c
new file mode 100644
index 000000000000..460ce7118b50
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPCSCtrl.c
@@ -0,0 +1,1937 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPCSCtrl.c
+*
+* DESCRIPTION:
+*       API implementation for 1000BASE-X PCS block register access.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gpcsGetCommaDet
+*
+* DESCRIPTION:
+*        This routine retrieves Comma Detection status in PCS
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for Comma Detected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetCommaDet
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetCommaDet Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PCS_LINK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the CommaDet bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,15,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetPCSLink
+*
+* DESCRIPTION:
+*        This routine retrieves Link up status in PCS
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for Comma Detected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSLink
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetPCSLink Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS_LINK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the PCS Link bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,15,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetSyncOK
+*
+* DESCRIPTION:
+*        This routine retrieves SynOK bit. It is set to a one when the PCS has
+*        detected a few comma patterns and is synchronized with its peer PCS
+*        layer.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if synchronized or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetSyncOK
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetSyncOK Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SyncOK bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,14,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsGetSyncFail
+*
+* DESCRIPTION:
+*        This routine retrieves SynFail bit.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if synchronizaion failed or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetSyncFail
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetSyncFail Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SyncFail bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,13,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsGetAnBypassed
+*
+* DESCRIPTION:
+*        This routine retrieves Inband Auto-Negotiation bypass status.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if AN is bypassed or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetAnBypassed
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetAnBypassed Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the AnBypassed bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,12,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetAnBypassMode
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of Inband Auto-Negotiation bypass.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if AN bypass is enabled or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetAnBypassMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetAnBypassMode Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the AnBypass bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,11,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsSetAnBypassMode
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of Inband Auto-Negotiation bypass.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable AN bypass mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetAnBypassMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetAnBypassMode Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the AnBypass bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,11,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetPCSAnEn
+*
+* DESCRIPTION:
+*        This routine retrieves Enable mode of PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if PCS AN is enabled or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSAnEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetPCSAnEn Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        if (!IS_IN_DEV_GROUP(dev, DEV_INTERNAL_GPHY))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+
+        if ((hwPort < 4) || (hwPort > 7))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Get the PCSAnEn bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,10,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsSetPCSAnEn
+*
+* DESCRIPTION:
+*        This routine sets Enable mode of PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable PCS AN mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetPCSAnEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetPCSAnEn Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        if (!IS_IN_DEV_GROUP(dev, DEV_INTERNAL_GPHY))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+
+        if ((hwPort < 4) || (hwPort > 7))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Get the PCSAnEn bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,10,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsSetRestartPCSAn
+*
+* DESCRIPTION:
+*        This routine restarts PCS Inband Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetRestartPCSAn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetRestartPCSAn Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = 1;    /* to set RestartPCSAn bit */
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the RestartPCSAn bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,9,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetPCSAnDone
+*
+* DESCRIPTION:
+*        This routine retrieves completion information of PCS Auto-Negotiation.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE if PCS AN is done or never done
+*                GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetPCSAnDone
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetPCSAnDone Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PCS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given port supports PCS */
+    if (!DOES_DEVPORT_SUPPORT_PCS(dev,hwPort))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the PCSAnDone bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,8,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetFCValue
+*
+* DESCRIPTION:
+*        This routine sets Flow Control's force value
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force flow control enabled, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetFCValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetFCValue Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the FCValue bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,7,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetFCValue
+*
+* DESCRIPTION:
+*        This routine retrieves Flow Control Value which will be used for Forcing
+*        Flow Control enabled or disabled.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if FC Force value is one (flow control enabled)
+*                 GT_FALSE otherwise (flow control disabled)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetFCValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetFCValue Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the FCValue bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,7,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetForcedFC
+*
+* DESCRIPTION:
+*        This routine forces Flow Control. If FCValue is set to one, calling this
+*        routine with GT_TRUE will force Flow Control to be enabled.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force flow control (enable or disable), GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedFC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetForcedFC Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedFC bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,6,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetForcedFC
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Flow Control bit
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedFC bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedFC
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetForcedLink Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedLink bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,6,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gpcsSetLinkValue
+*
+* DESCRIPTION:
+*        This routine sets Link's force value
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force link up, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetLinkValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetLinkValue Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the LinkValue bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,5,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetLinkValue
+*
+* DESCRIPTION:
+*        This routine retrieves Link Value which will be used for Forcing Link
+*        up or down.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Link Force value is one (link up)
+*                 GT_FALSE otherwise (link down)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetLinkValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetLinkValue Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the LinkValue bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,5,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetForcedLink
+*
+* DESCRIPTION:
+*        This routine forces Link. If LinkValue is set to one, calling this
+*        routine with GT_TRUE will force Link to be up.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force link (up or down), GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedLink
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetForcedLink Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedLink bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,4,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetForcedLink
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Link bit
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedLink bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedLink
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetForcedLink Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedLink bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,4,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetDpxValue
+*
+* DESCRIPTION:
+*        This routine sets Duplex's Forced value. This function needs to be
+*        called prior to gpcsSetForcedDpx.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force full duplex, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetDpxValue
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetDpxValue Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the DpxValue bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,3,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetDpxValue
+*
+* DESCRIPTION:
+*        This routine retrieves Duplex's Forced value
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Duplex's Forced value is set to Full duplex,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetDpxValue
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetForcedLink Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the DpxValue bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,3,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetForcedDpx
+*
+* DESCRIPTION:
+*        This routine forces duplex mode. If DpxValue is set to one, calling this
+*        routine with GT_TRUE will force duplex mode to be full duplex.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE to force duplex mode, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForcedDpx
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN    GT_BOOL        state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetForcedDpx Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedDpx bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,2,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetForcedDpx
+*
+* DESCRIPTION:
+*        This routine retrieves Forced Duplex.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if ForcedDpx bit is one,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForcedDpx
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetForcedDpx Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForcedDpx bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,2,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsSetForceSpeed
+*
+* DESCRIPTION:
+*        This routine forces Speed.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PORT_FORCED_SPEED_MODE (10, 100, 1000, or no force speed)
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetForceSpeed
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_PORT_FORCED_SPEED_MODE  mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetForceSpeed Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the Force Speed bits.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,0,2,(GT_U16)mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetForceSpeed
+*
+* DESCRIPTION:
+*        This routine retrieves Force Speed value
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_PORT_FORCED_SPEED_MODE (10, 100, 1000, or no force speed)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetForceSpeed
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PORT_FORCED_SPEED_MODE   *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetForceSpeed Called.\n"));
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FORCE_WITH_VALUE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the ForceSpeed bits.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,0,2,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = data;
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gpcsGetRGMIITimingDelay
+*
+* DESCRIPTION:
+*        RGMII receive/transmit Timing Control. This api adds delay to RXCLK for
+*        IND inputs and GTXCLK for OUTD outputs when port is in RGMII mode.
+*        Change to this bit are disruptive to normal operation. Hence any changes
+*        to this register must be done only while the port's link is down.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        rxmode - GT_FALSE for default setup, GT_TRUE for adding delay to rxclk
+*        txmode - GT_FALSE for default setup, GT_TRUE for adding delay to txclk
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpcsGetRGMIITimingDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *rxmode,
+    OUT GT_BOOL      *txmode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsGetRGMIITimingDelay Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RGMII_TIMING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+  if(((dev->devName==DEV_88E6165)||(dev->devName==DEV_88E6161))&&
+	  ((hwPort==4)||((hwPort==5)&&(dev->revision==2)))) /* 88E6123 revision A2 */
+  {
+    if(hwWritePortReg(dev,4,0x1A,0x81E7) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+    if(hwReadPortReg(dev,5,0x1A,&data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+	if((hwPort==5)&&(dev->revision==2))  /* 88E6123 revision A2 */
+	{
+      *rxmode = (data & 0x2)?GT_TRUE:GT_FALSE;
+      *txmode = (data & 0x1)?GT_TRUE:GT_FALSE;
+	}
+	else
+	{
+      *rxmode = (data & 0x10)?GT_TRUE:GT_FALSE;
+      *txmode = (data & 0x8)?GT_TRUE:GT_FALSE;
+	}
+
+  }
+  else
+  {
+    /* Get the register bit(s).  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,14,2,&data);
+
+    *rxmode = (data & 0x2)?GT_TRUE:GT_FALSE;
+    *txmode = (data & 0x1)?GT_TRUE:GT_FALSE;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+  }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gpcsSetRGMIITimingDelay
+*
+* DESCRIPTION:
+*        RGMII receive/transmit Timing Control. This api adds delay to RXCLK for
+*        IND inputs and GTXCLK for OUTD outputs when port is in RGMII mode.
+*        Change to this bit are disruptive to normal operation. Hence any changes
+*        to this register must be done only while the port's link is down.
+*
+* INPUTS:
+*        port - the logical port number.
+*        rxmode - GT_FALSE for default setup, GT_TRUE for adding delay to rxclk
+*        txmode - GT_FALSE for default setup, GT_TRUE for adding delay to txclk
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpcsSetRGMIITimingDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      rxmode,
+    IN  GT_BOOL      txmode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gpcsSetRGMIITimingDelay Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RGMII_TIMING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+  if(((dev->devName==DEV_88E6165)||(dev->devName==DEV_88E6161))&&
+	  ((hwPort==4)||((hwPort==5)&&(dev->revision==2)))) /* 88E6123 revision A2 */
+  {
+    if(hwWritePortReg(dev,4,0x1A,0x81E7) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+    if(hwReadPortReg(dev,5,0x1A,&data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+	if((hwPort==5)&&(dev->revision==2))  /* 88E6123 revision A2 */
+	{
+	  data &= 0xfff9;
+      data |= (rxmode) ? 0x2 : 0;
+      data |= (txmode) ? 0x1: 0;
+	}
+	else
+	{
+	  data &= 0xffe7;
+      data |= (rxmode) ? 0x10 : 0;
+      data |= (txmode) ? 0x8: 0;
+	}
+    if(hwWritePortReg(dev,5,0x1A, data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+     if(hwWritePortReg(dev,4,0x1A,0xC1E7) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+ }
+  else
+  {
+    data = (rxmode) ? 2 : 0;
+    data |= (txmode) ? 1 : 0;
+
+    /* Set the register bit(s).  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,14,2,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+  }
+
+    /* return */
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL.c
new file mode 100644
index 000000000000..09a3b0085cfb
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL.c
@@ -0,0 +1,1827 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPIRL.c
+*
+* DESCRIPTION:
+*       API definitions for PIRL Resources
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* STATS operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS pirlOperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_PIRL_OPERATION    pirlOp,
+    INOUT GT_PIRL_OP_DATA     *opData
+);
+
+static GT_STATUS pirlInitialize
+(
+    IN  GT_QD_DEV              *dev
+);
+
+static GT_STATUS pirlInitIRLUnit
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit
+);
+
+static GT_STATUS pirlDataToResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL_DATA        *pirlData,
+    OUT GT_PIRL_RESOURCE    *res
+);
+
+static GT_STATUS pirlResourceToData
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL_RESOURCE    *res,
+    OUT GT_PIRL_DATA        *pirlData
+);
+
+static GT_STATUS pirlWriteResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit,
+    IN  GT_PIRL_RESOURCE    *res
+);
+
+static GT_STATUS pirlReadResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit,
+    OUT GT_PIRL_RESOURCE    *res
+);
+
+static GT_STATUS pirlSetPortVec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec
+);
+
+static GT_STATUS pirlGetPortVec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_U32        *portVec
+);
+
+static GT_STATUS pirlSetFcMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PIRL_FC_DEASSERT        mode
+);
+
+/*******************************************************************************
+* gpirlActivate
+*
+* DESCRIPTION:
+*       This routine activates Ingress Rate Limiting for the given ports by
+*        initializing a resource bucket, assigning ports, and configuring
+*        Bucket Parameters.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*       portVec  - the list of ports that share the bucket.
+*        pirlData - PIRL resource parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlActivate
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec,
+    IN  GT_PIRL_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_PORT_STP_STATE    pState[MAX_SWITCH_PORTS];
+    GT_LPORT            port;
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+    GT_PIRL_RESOURCE    pirlRes;
+
+    DBG_INFO(("gpirlActivate Called.\n"));
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM irlUnit\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* check if the given portVec is valid */
+    if ((!portVec) || (portVec >= (GT_U32)(1<<dev->numOfPorts)))
+    {
+        DBG_INFO(("GT_BAD_PARAM portVec\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* set or reset port's ingress resource bit based on the portVec */
+    retVal = pirlSetPortVec(dev, irlUnit, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    /* Disable ports that share the bucket */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if(!GT_IS_PORT_SET(portVec,port))
+            continue;
+
+        retVal = gstpGetPortState(dev, port, &pState[port]);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+
+        retVal = gstpSetPortState(dev, port, GT_PORT_DISABLE);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+    }
+
+    /* Program Tuning register */
+    op = PIRL_WRITE_RESOURCE;
+    opData.irlUnit = irlUnit;
+    opData.irlReg = 0xF;
+    opData.irlData = 0x7;
+    retVal = pirlOperationPerform(dev,op,&opData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    /* Program the Ingress Rate Resource Parameters */
+    retVal = pirlDataToResource(dev,pirlData,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Data to PIRL Resource conversion failed.\n"));
+        return retVal;
+    }
+
+    retVal = pirlWriteResource(dev,irlUnit,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    /* Initialize internal counters */
+    retVal = pirlInitIRLUnit(dev,irlUnit);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    /* Program PirlFCMode for each port that shares Bucket */
+    if (pirlRes.ebsLimitAction == ESB_LIMIT_ACTION_FC)
+    {
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            if(!GT_IS_PORT_SET(portVec,port))
+                continue;
+
+            retVal = pirlSetFcMode(dev,port,pirlData->fcDeassertMode[port]);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("PIRL FC Mode set failed.\n"));
+                return retVal;
+            }
+        }
+    }
+
+    /* Set the ports in their original state */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if(!GT_IS_PORT_SET(portVec,port))
+            continue;
+
+        retVal = gstpSetPortState(dev, port, pState[port]);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpirlDeactivate
+*
+* DESCRIPTION:
+*       This routine deactivates Ingress Rate Limiting for the given bucket.
+*        It simply removes every ports from the Ingress Rate Resource.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlDeactivate
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit
+)
+{
+    GT_STATUS           retVal;
+
+    DBG_INFO(("gpirlDectivate Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* reset port's ingress resource bit */
+    retVal = pirlSetPortVec(dev, irlUnit, 0);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gpirlUpdateParam
+*
+* DESCRIPTION:
+*       This routine updates IRL Parameter.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*        pirlData - PIRL resource parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlUpdateParam
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_PIRL_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_PORT_STP_STATE    pState[MAX_SWITCH_PORTS];
+    GT_LPORT            port;
+    GT_PIRL_RESOURCE    pirlRes;
+    GT_U32                portVec;
+
+    DBG_INFO(("gpirlUpdateParam Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get port list that share ingress resource */
+    retVal = pirlGetPortVec(dev, irlUnit, &portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    /* check if the given portVec is valid */
+    if (!portVec)
+    {
+        DBG_INFO(("IRL Unit not Activated\n"));
+        return GT_FAIL;
+    }
+
+    /* Disable ports that share the bucket */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if(!GT_IS_PORT_SET(portVec,port))
+            continue;
+
+        retVal = gstpGetPortState(dev, port, &pState[port]);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+
+        retVal = gstpSetPortState(dev, port, GT_PORT_DISABLE);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+    }
+
+    /* Program the Ingress Rate Resource Parameters */
+    retVal = pirlDataToResource(dev,pirlData,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Data to PIRL Resource conversion failed.\n"));
+        return retVal;
+    }
+
+    retVal = pirlWriteResource(dev,irlUnit,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    /* Initialize internal counrters for the bucket */
+    retVal = pirlInitIRLUnit(dev,irlUnit);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    /* Program PirlFCMode for each port that shares Bucket */
+    if (pirlRes.ebsLimitAction == ESB_LIMIT_ACTION_FC)
+    {
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            if(!GT_IS_PORT_SET(portVec,port))
+                continue;
+
+            retVal = pirlSetFcMode(dev,port,pirlData->fcDeassertMode[port]);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("PIRL FC Mode set failed.\n"));
+                return retVal;
+            }
+        }
+    }
+
+    /* Set the ports in their original state */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if(!GT_IS_PORT_SET(portVec,port))
+            continue;
+
+        retVal = gstpSetPortState(dev, port, pState[port]);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Getting Port State failed\n"));
+            return retVal;
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpirlReadParam
+*
+* DESCRIPTION:
+*       This routine retrieves IRL Parameter.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlReadParam
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_PIRL_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_LPORT            port;
+    GT_PIRL_RESOURCE    pirlRes;
+    GT_U32                portVec;
+
+    DBG_INFO(("gpirlReadParam Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get port list that share ingress resource */
+    retVal = pirlGetPortVec(dev, irlUnit, &portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    /* check if the given portVec is valid */
+    if (!portVec)
+    {
+        DBG_INFO(("IRL Unit not Activated\n"));
+        return GT_FAIL;
+    }
+
+    /* Read the Ingress Rate Resource Parameters */
+    retVal = pirlReadResource(dev,irlUnit,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Read Resource failed.\n"));
+        return retVal;
+    }
+
+    retVal = pirlResourceToData(dev,&pirlRes,pirlData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Resource to PIRL Data conversion failed.\n"));
+        return retVal;
+    }
+
+    /* Program PirlFCMode for each port that shares Bucket */
+    if (pirlRes.ebsLimitAction == ESB_LIMIT_ACTION_FC)
+    {
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            if(!GT_IS_PORT_SET(portVec,port))
+                continue;
+
+            retVal = grcGetPirlFcMode(dev,port,&pirlData->fcDeassertMode[port]);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("PIRL FC Mode get failed.\n"));
+                return retVal;
+            }
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+
+/*******************************************************************************
+* gpirlUpdatePortVec
+*
+* DESCRIPTION:
+*       This routine updates port list that share the bucket.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*       portVec  - the list of ports that share the bucket.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlUpdatePortVec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec
+)
+{
+    GT_STATUS       retVal;
+    GT_U32            tmpVec;
+
+    DBG_INFO(("gpirlActivate Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* check if the given portVec is valid */
+    if ((!portVec) || (portVec > (GT_U32)(1<<dev->numOfPorts)))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get port list that share ingress resource */
+    retVal = pirlGetPortVec(dev, irlUnit, &tmpVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    /* check if the given portVec is valid */
+    if (!tmpVec)
+    {
+        DBG_INFO(("IRL Unit not Activated\n"));
+        return GT_FAIL;
+    }
+
+    /* set or reset port's ingress resource bit based on the portVec */
+    retVal = pirlSetPortVec(dev, irlUnit, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpirlReadPortVec
+*
+* DESCRIPTION:
+*       This routine retrieves port list that share the bucket.
+*        It is assumed that gpirlActivate has been successfully called with
+*        the given irlUnit before this function is called.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used (0 ~ 11).
+*
+* OUTPUTS:
+*       portVec  - the list of ports that share the bucket.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlReadPortVec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_U32        *portVec
+)
+{
+    GT_STATUS       retVal;
+
+    DBG_INFO(("gpirlReadPortVec Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (!GT_IS_IRLUNIT_VALID(dev,irlUnit))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get port list that share ingress resource */
+    retVal = pirlGetPortVec(dev, irlUnit, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    /* check if the given portVec is valid */
+    if (!*portVec)
+    {
+        DBG_INFO(("IRL Unit not Activated\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+
+/*******************************************************************************
+* grcGetPirlFcMode
+*
+* DESCRIPTION:
+*       This routine gets Port Ingress Rate Limit Flow Control mode.
+*        When EBSLimitAction is programmed to generate a flow control message,
+*        the deassertion of flow control is controlled by this mode.
+*            GT_PIRL_FC_DEASSERT_EMPTY:
+*                De-assert when the ingress rate resource has become empty
+*            GT_PIRL_FC_DEASSERT_CBS_LIMIT
+*                De-assert when the ingress rate resource has enough room as
+*                specified by the CBSLimit.
+*        Please refer to GT_PIRL_RESOURCE structure for EBSLimitAction and
+*        CBSLimit.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        mode - GT_PIRL_FC_DEASSERT enum type
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetPirlFcMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PIRL_FC_DEASSERT        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetDaNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the PirlFcMode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,12,1,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *mode = (GT_PIRL_FC_DEASSERT)data;
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gpirlGetIngressRateResource
+*
+* DESCRIPTION:
+*       This routine gets Ingress Rate Limiting Resources assigned to the port.
+*        This vector is used to attach specific counter resources to the physical
+*        port. And the same counter resource can be attached to more than one port.
+*
+* INPUTS:
+*       port   - logical port number
+*
+* OUTPUTS:
+*        resVec - resource vector (bit 11:0, since there is 12 resources)
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirlGetIngressRateResource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U32        *resVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+    GT_U16            data;
+
+    DBG_INFO(("grcGetIngressRateResource Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the resource vector.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,0,12,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *resVec = (GT_U32)data;
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gpirlSetCurTimeUpInt
+*
+* DESCRIPTION:
+*       This function sets the current time update interval.
+*        Please contact FAE for detailed information.
+*
+* INPUTS:
+*       upInt - updata interval (0 ~ 7)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirlSetCurTimeUpInt
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                upInt
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (upInt > 0x7)
+        return GT_BAD_PARAM;
+
+    op = PIRL_READ_RESOURCE;
+
+    opData.irlUnit = 0xF;
+    opData.irlReg = 1;
+    opData.irlData = 0;
+
+    retVal = pirlOperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    op = PIRL_WRITE_RESOURCE;
+    opData.irlData = (opData.irlData & 0xFFF8) | (GT_U16)upInt;
+
+    retVal = pirlOperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* gpirlInitialize
+*
+* DESCRIPTION:
+*       This routine initializes PIRL Resources.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirlInitialize
+(
+    IN  GT_QD_DEV              *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_LPORT        port;
+    GT_U8           hwPort;        /* Physical port.               */
+
+    /* reset port's ingress resource bit */
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        hwPort = GT_LPORT_2_PORT(port);
+
+        /* Set the resource vector.            */
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,0,12,0);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+    }
+
+    retVal = pirlInitialize(dev);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* statsOperationPerform
+*
+* DESCRIPTION:
+*       This function accesses Ingress Rate Command Register and Data Register.
+*
+* INPUTS:
+*       pirlOp       - The stats operation bits to be written into the stats
+*                     operation register.
+*       port        - port number
+*       counter     - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       pirlData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlOperationPerform
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_PIRL_OPERATION   pirlOp,
+    INOUT GT_PIRL_OP_DATA     *opData
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+
+    gtSemTake(dev,dev->pirlRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the pirl in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->pirlRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    /* Set the PIRL Operation register */
+    switch (pirlOp)
+    {
+        case PIRL_INIT_ALL_RESOURCE:
+            data = (1 << 15) | (PIRL_INIT_ALL_RESOURCE << 12);
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+        case PIRL_INIT_RESOURCE:
+            data = (GT_U16)((1 << 15) | (PIRL_INIT_RESOURCE << 12) |
+                    ((opData->irlUnit&0xF)<< 4));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PIRL_WRITE_RESOURCE:
+            data = (GT_U16)opData->irlData;
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_DATA,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PIRL_WRITE_RESOURCE << 12) |
+                    ((opData->irlUnit&0xF) << 4) | (opData->irlReg & 0xF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PIRL_READ_RESOURCE:
+            data = (GT_U16)((1 << 15) | (PIRL_READ_RESOURCE << 12) |
+                    ((opData->irlUnit&0xF) << 4) | (opData->irlReg & 0xF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 1;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+              regAccess.rw_reg_list[0].data = 15;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+              }
+            }
+#else
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->pirlRegsSem);
+                    return retVal;
+                }
+            }
+#endif
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_INGRESS_RATE_DATA,&data);
+            opData->irlData = (GT_U32)data;
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+
+        default:
+
+            gtSemGive(dev,dev->pirlRegsSem);
+            return GT_FAIL;
+    }
+
+    /* Wait until the pirl in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->pirlRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    gtSemGive(dev,dev->pirlRegsSem);
+    return retVal;
+}
+
+/*
+ * Initialize all PIRL resources to the inital state.
+*/
+static GT_STATUS pirlInitialize
+(
+    IN  GT_QD_DEV              *dev
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_PIRL_OPERATION    op;
+
+    op = PIRL_INIT_ALL_RESOURCE;
+
+    retVal = pirlOperationPerform(dev, op, NULL);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    retVal = gpirlSetCurTimeUpInt(dev,7);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+    }
+
+    return retVal;
+}
+
+/*
+ * Initialize the selected PIRL resource to the inital state.
+ * This function initializes only the BSM structure for the IRL Unit.
+*/
+static GT_STATUS pirlInitIRLUnit
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+
+    op = PIRL_INIT_RESOURCE;
+    opData.irlUnit = irlUnit;
+
+    retVal = pirlOperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    return retVal;
+}
+
+/*
+ * convert PIRL Data structure to PIRL Resource structure.
+ * if PIRL Data is not valid, return GT_BAD_PARARM;
+*/
+static GT_STATUS pirlDataToResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL_DATA        *pirlData,
+    OUT GT_PIRL_RESOURCE    *res
+)
+{
+    GT_U16 typeMask;
+
+    switch(pirlData->accountQConf)
+    {
+        case GT_FALSE:
+        case GT_TRUE:
+            res->accountQConf = pirlData->accountQConf;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->accountFiltered)
+    {
+        case GT_FALSE:
+        case GT_TRUE:
+            res->accountFiltered = pirlData->accountFiltered;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->ebsLimitAction)
+    {
+        case ESB_LIMIT_ACTION_DROP:
+        case ESB_LIMIT_ACTION_FC:
+            res->ebsLimitAction = pirlData->ebsLimitAction;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    if(pirlData->customSetup.isValid == GT_TRUE)
+    {
+        res->ebsLimit = pirlData->customSetup.ebsLimit;
+        res->cbsLimit = pirlData->customSetup.cbsLimit;
+        res->bktIncrement = pirlData->customSetup.bktIncrement;
+        res->bktRateFactor = pirlData->customSetup.bktRateFactor;
+    }
+    else
+    {
+        if(pirlData->ingressRate == 0)
+            return GT_BAD_PARAM;
+
+        if(pirlData->ingressRate < 1000)    /* less than 1Mbps */
+        {
+            /* it should be divided by 64 */
+            if(pirlData->ingressRate % 64)
+                return GT_BAD_PARAM;
+            res->bktRateFactor = pirlData->ingressRate/64;
+        }
+        else if(pirlData->ingressRate <= 100000)    /* less than or equal to 100Mbps */
+        {
+            /* it should be divided by 1000 */
+            if(pirlData->ingressRate % 1000)
+                return GT_BAD_PARAM;
+            res->bktRateFactor = pirlData->ingressRate/64 + ((pirlData->ingressRate % 64)?1:0);
+        }
+        else if(pirlData->ingressRate <= 200000)    /* less than or equal to 200Mbps */
+        {
+            /* it should be divided by 10000 */
+            if(pirlData->ingressRate % 10000)
+                return GT_BAD_PARAM;
+            res->bktRateFactor = pirlData->ingressRate/64 + ((pirlData->ingressRate % 64)?1:0);
+        }
+        else
+            return GT_BAD_PARAM;
+
+        res->ebsLimit = RECOMMENDED_ESB_LIMIT(dev, pirlData->ingressRate);
+        res->cbsLimit = RECOMMENDED_CBS_LIMIT(dev, pirlData->ingressRate);
+        res->bktIncrement = RECOMMENDED_BUCKET_INCREMENT(dev, pirlData->ingressRate);
+    }
+
+    switch(pirlData->bktRateType)
+    {
+        case BUCKET_TYPE_TRAFFIC_BASED:
+            res->bktRateType = pirlData->bktRateType;
+
+            if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL_RESOURCE))
+            {
+                typeMask = 0xF;
+            }
+            else
+            {
+                typeMask = 0x7F;
+            }
+
+            if (pirlData->bktTypeMask > typeMask)
+            {
+                return GT_BAD_PARAM;
+            }
+            else
+            {
+                res->bktTypeMask = pirlData->bktTypeMask;
+            }
+
+            break;
+
+        case BUCKET_TYPE_RATE_BASED:
+            if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL_RESOURCE))
+                return GT_BAD_PARAM;
+            res->bktRateType = pirlData->bktRateType;
+            res->bktTypeMask = 0;
+            break;
+
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->byteTobeCounted)
+    {
+        case GT_PIRL_COUNT_ALL_LAYER1:
+            res->byteTobeCounted = 1;
+            break;
+        case GT_PIRL_COUNT_ALL_LAYER2:
+            res->byteTobeCounted = 2;
+            break;
+        case GT_PIRL_COUNT_ALL_LAYER3:
+            res->byteTobeCounted = 6;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * convert PIRL Resource structure to PIRL Data structure.
+*/
+static GT_STATUS pirlResourceToData
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL_RESOURCE    *res,
+    OUT GT_PIRL_DATA        *pirlData
+)
+{
+    GT_U32    rate;
+    GT_U32    factor;
+
+    pirlData->accountQConf = res->accountQConf;
+    pirlData->accountFiltered = res->accountFiltered;
+    pirlData->ebsLimitAction = res->ebsLimitAction;
+
+    pirlData->customSetup.isValid = GT_FALSE;
+
+    FACTOR_FROM_BUCKET_INCREMENT(dev,res->bktIncrement,factor);
+
+    rate = res->bktRateFactor * factor;
+    if(rate == 0)
+    {
+        pirlData->ingressRate = 0;
+        pirlData->customSetup.isValid = GT_TRUE;
+        pirlData->customSetup.ebsLimit = res->ebsLimit;
+        pirlData->customSetup.cbsLimit = res->cbsLimit;
+        pirlData->customSetup.bktIncrement = res->bktIncrement;
+        pirlData->customSetup.bktRateFactor = res->bktRateFactor;
+    }
+    else if(rate < 1000)
+    {
+        pirlData->ingressRate = rate;
+    }
+    else if(rate < 100000)
+    {
+        pirlData->ingressRate = rate - (rate % 1000);
+    }
+    else
+    {
+        pirlData->ingressRate = rate - (rate % 10000);
+    }
+
+    pirlData->bktRateType = res->bktRateType;
+    pirlData->bktTypeMask = res->bktTypeMask;
+
+    switch(res->byteTobeCounted)
+    {
+        case 1:
+            pirlData->byteTobeCounted = GT_PIRL_COUNT_ALL_LAYER1;
+            break;
+        case 2:
+            pirlData->byteTobeCounted = GT_PIRL_COUNT_ALL_LAYER2;
+            break;
+        case 6:
+            pirlData->byteTobeCounted = GT_PIRL_COUNT_ALL_LAYER3;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* pirlWriteResource
+*
+* DESCRIPTION:
+*       This function writes IRL Resource to BCM (Bucket Configuration Memory)
+*
+* INPUTS:
+*        irlUnit - resource unit to be accessed
+*       res     - IRL Resource data
+*
+* OUTPUTS:
+*       Nont.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlWriteResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit,
+    IN  GT_PIRL_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;            /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+    int                i;
+
+    op = PIRL_WRITE_RESOURCE;
+
+    /* reg0 data */
+    data[0] = (GT_U16)((res->bktRateType << 15) |    /* Bit[15] : Bucket Rate Type */
+                      (res->bktTypeMask << 4 ) |        /* Bit[14:4] : Traffic Type   */
+                      res->byteTobeCounted );            /* Bit[3:0] : Bytes to be counted */
+
+    /* reg1 data */
+    data[1] = (GT_U16)res->bktIncrement;    /* Bit[11:0] : Bucket Increment */
+
+    /* reg2 data */
+    data[2] = (GT_U16)res->bktRateFactor;    /* Bit[15:0] : Bucket Rate Factor */
+
+    /* reg3 data */
+    data[3] = (GT_U16)(res->cbsLimit & 0xFFF) << 4;    /* Bit[15:4] : CBS Limit[11:0] */
+
+    /* reg4 data */
+    data[4] = (GT_U16)(res->cbsLimit >> 12);        /* Bit[11:0] : CBS Limit[23:12] */
+
+    /* reg5 data */
+    data[5] = (GT_U16)(res->ebsLimit & 0xFFFF);        /* Bit[15:0] : EBS Limit[15:0] */
+
+    /* reg6 data */
+    data[6] = (GT_U16)((res->ebsLimit >> 16)    |    /* Bit[7:0] : EBS Limit[23:16] */
+                    (res->ebsLimitAction << 12)    |    /* Bit[12] : EBS Limit Action */
+                    (res->accountFiltered << 14)|    /* Bit[14] : Account Filtered */
+                    (res->accountQConf << 15));        /* Bit[15] : Account QConf */
+    /* reg7 data */
+    data[7] = 0;    /* Reserved */
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlUnit = irlUnit;
+        opData.irlReg = i;
+        opData.irlData = data[i];
+
+        retVal = pirlOperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirlReadResource
+*
+* DESCRIPTION:
+*       This function reads IRL Resource from BCM (Bucket Configuration Memory)
+*
+* INPUTS:
+*        irlUnit - resource unit to be accessed
+*
+* OUTPUTS:
+*       res - IRL Resource data
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlReadResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit,
+    OUT GT_PIRL_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;            /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+    int                i;
+
+    op = PIRL_READ_RESOURCE;
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlUnit = irlUnit;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirlOperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+
+        data[i] = (GT_U16)opData.irlData;
+    }
+
+
+    /* reg0 data */
+    res->bktRateType = (data[0] >> 15) & 0x1;
+    res->bktTypeMask = (data[0] >> 4) & 0x7F;
+
+    res->byteTobeCounted = data[0] & 0xF;
+
+    /* reg1 data */
+    res->bktIncrement = data[1] & 0xFFF;
+
+    /* reg2 data */
+    res->bktRateFactor = data[2] & 0xFFFF;
+
+    /* reg3,4 data */
+    res->cbsLimit = ((data[3] >> 4) & 0xFFF) | ((data[4] & 0xFFF) << 12);
+
+    /* reg5,6 data */
+    res->ebsLimit = data[5] | ((data[6] & 0xFF) << 16);
+
+    /* reg6 data */
+    res->ebsLimitAction = (data[6] >> 12) & 0x1;
+    res->accountFiltered = (data[6] >> 14) & 0x1;
+    res->accountQConf = (data[6] >> 15) & 0x1;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* pirlSetPortVec
+*
+* DESCRIPTION:
+*       This routine sets port list that share the bucket and resets ports that
+*        do not share the bucket.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used.
+*       portVec  - the list of ports that share the bucket.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlSetPortVec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        irlUnit,
+    IN  GT_U32        portVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_LPORT        port;
+    GT_U8           hwPort;        /* Physical port.               */
+    GT_U16            data;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        if(GT_IS_PORT_SET(portVec,port))
+            data = 1;
+        else
+            data = 0;
+
+        hwPort = GT_LPORT_2_PORT(port);
+
+        /* Set the resource vector.            */
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,(GT_U8)irlUnit,1,data);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirlGetPortVec
+*
+* DESCRIPTION:
+*       This routine gets port list that share the bucket.
+*
+* INPUTS:
+*        irlUnit  - bucket to be used.
+*
+* OUTPUTS:
+*       portVec  - the list of ports that share the bucket.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlGetPortVec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        irlUnit,
+    OUT GT_U32        *portVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_LPORT        port;
+    GT_U8           hwPort;        /* Physical port.               */
+    GT_U16            data;
+
+    *portVec = 0;
+
+    for(port=0; port<dev->numOfPorts; port++)
+    {
+        hwPort = GT_LPORT_2_PORT(port);
+
+        /* Set the resource vector.            */
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,(GT_U8)irlUnit,1,&data);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+
+        if(data == 1)
+            *portVec |= (1 << port);
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirlSetFcMode
+*
+* DESCRIPTION:
+*       This routine gets Port Ingress Rate Limit Flow Control mode.
+*        When EBSLimitAction is programmed to generate a flow control message,
+*        the deassertion of flow control is controlled by this mode.
+*            GT_PIRL_FC_DEASSERT_EMPTY:
+*                De-assert when the ingress rate resource has become empty
+*            GT_PIRL_FC_DEASSERT_CBS_LIMIT
+*                De-assert when the ingress rate resource has enough room as
+*                specified by the CBSLimit.
+*        Please refer to GT_PIRL_RESOURCE structure for EBSLimitAction and
+*        CBSLimit.
+*
+* INPUTS:
+*       port - logical port number
+*        mode - GT_PIRL_FC_DEASSERT enum type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirlSetFcMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PIRL_FC_DEASSERT        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("pirlSetFcMode Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    data = (GT_U16) mode;
+
+    /* Set the PirlFcMode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,12,1,data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+#define PIRL_DEBUG
+#ifdef PIRL_DEBUG
+/*******************************************************************************
+* pirlDumpResource
+*
+* DESCRIPTION:
+*       This function dumps IRL Resource register values.
+*
+* INPUTS:
+*        irlUnit  - resource unit to be accessed
+*        dataLen  - data size.
+*
+* OUTPUTS:
+*       data - IRL Resource data
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS pirlDumpResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlUnit,
+    IN    GT_U32                dataLen,
+    OUT GT_U16                *data
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_PIRL_OPERATION    op;
+    GT_PIRL_OP_DATA        opData;
+    GT_U32                i;
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    op = PIRL_READ_RESOURCE;
+
+    for(i=0; i<dataLen; i++)
+    {
+        opData.irlUnit = irlUnit;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirlOperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+
+        data[i] = (GT_U16)opData.irlData;
+    }
+
+    return GT_OK;
+}
+#endif /* PIRL_DEBUG */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL2.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL2.c
new file mode 100644
index 000000000000..3a0de73d8b7d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPIRL2.c
@@ -0,0 +1,1833 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPIRL2.c
+*
+* DESCRIPTION:
+*       API definitions for Port based PIRL Resources
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/* special ingress rate limit para */
+static struct PIRL_PARA_TBL_T pirl2RateLimitParaTbl[] = {
+	/* BI----BRF-----CBS-------EBS--------------------*/
+	{0x000, 0x00, 0x000000, 0x000000},/*No rate limit*/
+	{0x186, 0x03, 0xD06470, 0xFFFFF0},/*PIRL_RATE_64K*/
+	{0x30D, 0x0D, 0x415370, 0xFFFFF0},/*PIRL_RATE_128K*/
+	{0x186, 0x0A, 0x712D70, 0xFFFFF0},/*PIRL_RATE_192K*/
+	{0x186, 0x0D, 0xA0C8F0, 0xFFFFF0},/*PIRL_RATE_256K*/
+	{0x138, 0x0D, 0x4191F0, 0xFFFFF0},/*PIRL_RATE_320K*/
+	{0x0C3, 0x0A, 0x712D70, 0xFFFFF0},/*PIRL_RATE_384K*/
+	{0x0C3, 0x0C, 0x595FB0, 0xFFFFF0},/*PIRL_RATE_448K*/
+	{0x0C3, 0x0D, 0x4191F0, 0xFFFFF0},/*PIRL_RATE_512K*/
+	{0x0C3, 0x0F, 0x29C430, 0xFFFFF0},/*PIRL_RATE_576K*/
+	{0x09C, 0x0D, 0x4191F0, 0xFFFFF0},/*PIRL_RATE_640K*/
+	{0x07C, 0x0C, 0x597EF0, 0xFFFFF0},/*PIRL_RATE_704K*/
+	{0x082, 0x0D, 0x71E8F0, 0xFFFFF0},/*PIRL_RATE_768K*/
+	{0x078, 0x0D, 0x4191F0, 0xFFFFF0},/*PIRL_RATE_832K*/
+	{0x084, 0x10, 0x1E69F0, 0xFFFFF0},/*PIRL_RATE_896K*/
+	{0x027, 0x05, 0xB896B0, 0xFFFFF0},/*PIRL_RATE_960K*/
+	{0x031, 0x07, 0xA28A28, 0xFFFFF0},/*PIRL_RATE_1M*/
+	{0x031, 0x0D, 0x451460, 0xFFFFF0},/*PIRL_RATE_2M*/
+	{0x021, 0x0D, 0x432C18, 0xFFFFF0},/*PIRL_RATE_3M*/
+	{0x01C, 0x0F, 0x2A6070, 0xFFFFF0},/*PIRL_RATE_4M*/
+	{0x065, 0x43, 0x029810, 0xFFFFF0},/*PIRL_RATE_5M*/
+	{0x037, 0x2C, 0x0186A0, 0xFFFFF0},/*PIRL_RATE_6M*/
+	{0x051, 0x4B, 0x0222E0, 0xFFFFF0},/*PIRL_RATE_7M*/
+	{0x035, 0x38, 0x0186A0, 0xFFFFF0},/*PIRL_RATE_8M*/
+	{0x03B, 0x46, 0x0186A0, 0xFFFFF0},/*PIRL_RATE_9M*/
+	{0x030, 0x40, 0x015F90, 0xFFFFF0},/*PIRL_RATE_10M*/
+	{0x033, 0x4B, 0x0186A0, 0xFFFFF0},/*PIRL_RATE_11M*/
+	{0x033, 0x51, 0x015F90, 0xFFFFF0},/*PIRL_RATE_12M*/
+	{0x02F, 0x51, 0x015F90, 0xFFFFF0},/*PIRL_RATE_13M*/
+	{0x02D, 0x54, 0x013880, 0xFFFFF0},/*PIRL_RATE_14M*/
+	{0x02A, 0x54, 0x013880, 0xFFFFF0},/*PIRL_RATE_15M*/
+	{0x030, 0x66, 0x015F90, 0xFFFFF0},/*PIRL_RATE_16M*/
+	{0x02F, 0x6A, 0x015F90, 0xFFFFF0},/*PIRL_RATE_17M*/
+	{0x02A, 0x64, 0x013880, 0xFFFFF0},/*PIRL_RATE_18M*/
+	{0x02D, 0x72, 0x013880, 0xFFFFF0},/*PIRL_RATE_19M*/
+	{0x02C, 0x75, 0x013880, 0xFFFFF0},/*PIRL_RATE_20M*/
+};
+
+
+/****************************************************************************/
+/* PIRL operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS pirl2OperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_PIRL2_OPERATION    pirlOp,
+    INOUT GT_PIRL2_OP_DATA     *opData
+);
+
+static GT_STATUS pirl2Initialize
+(
+    IN  GT_QD_DEV              *dev
+);
+
+static GT_STATUS pirl2InitIRLResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes
+);
+
+static GT_STATUS pirl2DisableIRLResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes
+);
+
+static GT_STATUS pirl2DataToResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL2_DATA        *pirlData,
+    OUT GT_PIRL2_RESOURCE    *res
+);
+
+static GT_STATUS pirl2ResourceToData
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL2_RESOURCE    *res,
+    OUT GT_PIRL2_DATA        *pirlData
+);
+
+static GT_STATUS pirl2WriteResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    IN  GT_PIRL2_RESOURCE    *res
+);
+
+static GT_STATUS pirl2ReadResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    OUT GT_PIRL2_RESOURCE    *res
+);
+
+static GT_STATUS pirl2WriteTSMResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    IN  GT_PIRL2_TSM_RESOURCE    *res
+);
+
+static GT_STATUS pirl2ReadTSMResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    OUT GT_PIRL2_TSM_RESOURCE    *res
+);
+
+/*******************************************************************************
+* gpirl2WriteResource
+*
+* DESCRIPTION:
+*        This routine writes resource bucket parameters to the given resource
+*        of the port.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*        pirlData - PIRL resource parameters.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gpirl2WriteResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    IN  GT_PIRL2_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_PIRL2_RESOURCE    pirlRes;
+    GT_U32               irlPort;         /* the physical port number     */
+    GT_U32                maxRes;
+
+    DBG_INFO(("gpirl2WriteResource Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL2_RESOURCE))
+    {
+        maxRes = 2;
+    }
+    else
+    {
+        maxRes = 5;
+    }
+
+    if (irlRes >= maxRes)
+    {
+        DBG_INFO(("GT_BAD_PARAM irlRes\n"));
+        return GT_BAD_PARAM;
+    }
+
+    irlPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (irlPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("GT_BAD_PARAM port\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Initialize internal counters */
+    retVal = pirl2InitIRLResource(dev,irlPort,irlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    /* Program the Ingress Rate Resource Parameters */
+    retVal = pirl2DataToResource(dev,pirlData,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Data to PIRL Resource conversion failed.\n"));
+        return retVal;
+    }
+
+    retVal = pirl2WriteResource(dev,irlPort,irlRes,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gpirl2ReadResource
+*
+* DESCRIPTION:
+*        This routine retrieves IRL Parameter.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gpirl2ReadResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    OUT GT_PIRL2_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                irlPort;
+    GT_PIRL2_RESOURCE    pirlRes;
+    GT_U32                maxRes;
+
+    DBG_INFO(("gpirl2ReadResource Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL2_RESOURCE))
+    {
+        maxRes = 2;
+    }
+    else
+    {
+        maxRes = 5;
+    }
+
+    if (irlRes >= maxRes)
+    {
+        DBG_INFO(("GT_BAD_PARAM irlRes\n"));
+        return GT_BAD_PARAM;
+    }
+
+    irlPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (irlPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("GT_BAD_PARAM port\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Read the Ingress Rate Resource Parameters */
+    retVal = pirl2ReadResource(dev,irlPort,irlRes,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Read Resource failed.\n"));
+        return retVal;
+    }
+
+    retVal = pirl2ResourceToData(dev,&pirlRes,pirlData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Resource to PIRL Data conversion failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gpirl2DisableResource
+*
+* DESCRIPTION:
+*       This routine disables Ingress Rate Limiting for the given bucket.
+*
+* INPUTS:
+*       port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2DisableResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                irlPort;
+    GT_U32                maxRes;
+
+    DBG_INFO(("gpirl2Dectivate Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL2_RESOURCE))
+    {
+        maxRes = 2;
+    }
+    else
+    {
+        maxRes = 5;
+    }
+
+    if (irlRes >= maxRes)
+    {
+        DBG_INFO(("GT_BAD_PARAM irlRes\n"));
+        return GT_BAD_PARAM;
+    }
+
+    irlPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (irlPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("GT_BAD_PARAM port\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* disable irl resource */
+    retVal = pirl2DisableIRLResource(dev, irlPort, irlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Getting Port State failed\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gpirl2SetCurTimeUpInt
+*
+* DESCRIPTION:
+*       This function sets the current time update interval.
+*        Please contact FAE for detailed information.
+*
+* INPUTS:
+*       upInt - updata interval (0 ~ 7)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gpirl2SetCurTimeUpInt
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                upInt
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA    opData;
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (upInt > 0x7)
+        return GT_BAD_PARAM;
+
+    op = PIRL_READ_RESOURCE;
+
+    opData.irlPort = 0xF;
+    opData.irlRes = 0;
+    opData.irlReg = 1;
+    opData.irlData = 0;
+
+    retVal = pirl2OperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    op = PIRL_WRITE_RESOURCE;
+    opData.irlData = (opData.irlData & 0xFFF8) | (GT_U16)upInt;
+
+    retVal = pirl2OperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+           DBG_INFO(("PIRL OP Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gpirl2WriteTSMResource
+*
+* DESCRIPTION:
+*        This routine writes rate resource bucket parameters in Time Slot Metering
+*        mode to the given resource of the port.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 1).
+*        pirlData - PIRL TSM resource parameters.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Only Resource 0 and 1 can be supported for TSM Mode.
+*
+*******************************************************************************/
+GT_STATUS gpirl2WriteTSMResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    IN  GT_PIRL2_TSM_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_PIRL2_TSM_RESOURCE    pirlRes;
+    GT_U32               irlPort;         /* the physical port number     */
+    GT_U32                maxRes;
+    GT_U32                cbs, cts, i, rate;
+
+    DBG_INFO(("gpirl2WriteTSMResource Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TSM_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    maxRes = 2;
+
+    if (irlRes >= maxRes)
+    {
+        DBG_INFO(("GT_BAD_PARAM irlRes\n"));
+        return GT_BAD_PARAM;
+    }
+
+    irlPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (irlPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("GT_BAD_PARAM port\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Initialize internal counters */
+    retVal = pirl2InitIRLResource(dev,irlPort,irlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    if (pirlData->customSetup.isValid == GT_TRUE)
+    {
+        pirlRes.cbsLimit = pirlData->customSetup.cbsLimit;
+        pirlRes.ctsIntv = pirlData->customSetup.ctsIntv;
+        pirlRes.ebsLimit = pirlData->customSetup.ebsLimit;
+        pirlRes.actionMode = pirlData->customSetup.actionMode;
+    }
+    else
+    {
+        /* convert ingressRate to cbsLimit and ctsIntv */
+        cts = 1;
+        cbs = 0;
+        i = 3;
+        rate = pirlData->ingressRate;
+        while(cts < 16)
+        {
+            cbs = TSM_GET_CBS(rate, cts);
+            if ((cbs == 0) || (cbs <= 0xFFFF))
+                break;
+            cts += i;
+            i = cts;
+        }
+
+        if (cts > 16)
+        {
+            return GT_BAD_PARAM;
+        }
+
+        switch (cts)
+        {
+            case 1:
+                pirlRes.ctsIntv = 3;
+                break;
+            case 4:
+                pirlRes.ctsIntv = 2;
+                break;
+            case 8:
+                pirlRes.ctsIntv = 1;
+                break;
+            case 16:
+                pirlRes.ctsIntv = 0;
+                break;
+            default:
+                return GT_FAIL;
+        }
+
+        pirlRes.cbsLimit = cbs;
+        pirlRes.ebsLimit = 0xFFFF;
+        pirlRes.actionMode = 1;
+    }
+
+    pirlRes.mgmtNrlEn = pirlData->mgmtNrlEn;
+    pirlRes.priMask = pirlData->priMask;
+    pirlRes.tsmMode = GT_TRUE;
+
+    if (pirlData->tsmMode == GT_FALSE)
+    {
+        pirlRes.tsmMode = 0;
+        pirlRes.cbsLimit = 0;
+        pirlRes.ctsIntv = 0;
+        pirlRes.ebsLimit = 0;
+        pirlRes.actionMode = 0;
+        pirlRes.mgmtNrlEn = 0;
+        pirlRes.priMask = 0;
+    }
+
+    retVal = pirl2WriteTSMResource(dev,irlPort,irlRes,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Write Resource failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gpirl2ReadTSMResource
+*
+* DESCRIPTION:
+*        This routine retrieves IRL Parameter.
+*        Returned ingressRate would be rough number. Instead, customSetup will
+*        have the exact configured value.
+*
+* INPUTS:
+*        port     - logical port number.
+*        irlRes   - bucket to be used (0 ~ 1).
+*
+* OUTPUTS:
+*        pirlData - PIRL resource parameters.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Only Resource 0 and 1 can be supported for TSM Mode.
+*
+*******************************************************************************/
+GT_STATUS gpirl2ReadTSMResource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        irlRes,
+    OUT GT_PIRL2_TSM_DATA    *pirlData
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                irlPort;
+    GT_PIRL2_TSM_RESOURCE    pirlRes;
+    GT_U32                maxRes, cbs, cts;
+
+    DBG_INFO(("gpirl2ReadTSMResource Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TSM_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given bucket number is valid */
+    maxRes = 2;
+
+    if (irlRes >= maxRes)
+    {
+        DBG_INFO(("GT_BAD_PARAM irlRes\n"));
+        return GT_BAD_PARAM;
+    }
+
+    irlPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (irlPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("GT_BAD_PARAM port\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Read the Ingress Rate Resource Parameters */
+    retVal = pirl2ReadTSMResource(dev,irlPort,irlRes,&pirlRes);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL Read Resource failed.\n"));
+        return retVal;
+    }
+
+    if (pirlRes.tsmMode == 0)
+    {
+        /* TMS Mode is not enabled */
+        pirlData->tsmMode = GT_FALSE;
+        pirlData->ingressRate = 0;
+        pirlData->mgmtNrlEn = 0;
+        pirlData->priMask = 0;
+        pirlData->customSetup.isValid = 0;
+        pirlData->customSetup.cbsLimit = 0;
+        pirlData->customSetup.ctsIntv = 0;
+        pirlData->customSetup.ebsLimit = 0;
+        pirlData->customSetup.actionMode = 0;
+        return GT_OK;
+    }
+
+    cbs = pirlRes.cbsLimit;
+    switch (pirlRes.ctsIntv)
+    {
+        case 0:
+            cts = 16;
+            break;
+        case 1:
+            cts = 8;
+            break;
+        case 2:
+            cts = 4;
+            break;
+        case 3:
+            cts = 1;
+            break;
+        default:
+            return GT_FAIL;
+    }
+
+    pirlData->ingressRate = TSM_GET_RATE(cbs,cts);
+
+    pirlData->mgmtNrlEn = pirlRes.mgmtNrlEn;
+    pirlData->priMask = pirlRes.priMask;
+
+    pirlData->customSetup.isValid = GT_TRUE;
+    pirlData->customSetup.cbsLimit = pirlRes.cbsLimit;
+    pirlData->customSetup.ctsIntv = pirlRes.ctsIntv;
+    pirlData->customSetup.ebsLimit = pirlRes.ebsLimit;
+    pirlData->customSetup.actionMode = pirlRes.actionMode;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* gpirl2Initialize
+*
+* DESCRIPTION:
+*       This routine initializes PIRL Resources.
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gpirl2Initialize
+(
+    IN  GT_QD_DEV              *dev
+)
+{
+    GT_STATUS           retVal;
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = pirl2Initialize(dev);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirl2OperationPerform
+*
+* DESCRIPTION:
+*       This function accesses Ingress Rate Command Register and Data Register.
+*
+* INPUTS:
+*       pirlOp     - The stats operation bits to be written into the stats
+*                    operation register.
+*
+* OUTPUTS:
+*       pirlData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirl2OperationPerform
+(
+    IN    GT_QD_DEV             *dev,
+    IN    GT_PIRL2_OPERATION    pirlOp,
+    INOUT GT_PIRL2_OP_DATA        *opData
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+
+    gtSemTake(dev,dev->pirlRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the pirl in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->pirlRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    /* Set the PIRL Operation register */
+    switch (pirlOp)
+    {
+        case PIRL_INIT_ALL_RESOURCE:
+            data = (1 << 15) | (PIRL_INIT_ALL_RESOURCE << 12);
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+        case PIRL_INIT_RESOURCE:
+            data = (GT_U16)((1 << 15) | (PIRL_INIT_RESOURCE << 12) |
+                    (opData->irlPort << 8) |
+                    (opData->irlRes << 5));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PIRL_WRITE_RESOURCE:
+            data = (GT_U16)opData->irlData;
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_DATA,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PIRL_WRITE_RESOURCE << 12) |
+                    (opData->irlPort << 8)    |
+                    (opData->irlRes << 5)    |
+                    (opData->irlReg & 0xF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            break;
+
+        case PIRL_READ_RESOURCE:
+            data = (GT_U16)((1 << 15) | (PIRL_READ_RESOURCE << 12) |
+                    (opData->irlPort << 8)    |
+                    (opData->irlRes << 5)    |
+                    (opData->irlReg & 0xF));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_INGRESS_RATE_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 1;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+              regAccess.rw_reg_list[0].data = 15;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+              }
+            }
+#else
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->pirlRegsSem);
+                    return retVal;
+                }
+            }
+#endif
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_INGRESS_RATE_DATA,&data);
+            opData->irlData = (GT_U32)data;
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->pirlRegsSem);
+                return retVal;
+            }
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+
+        default:
+
+            gtSemGive(dev,dev->pirlRegsSem);
+            return GT_FAIL;
+    }
+
+    /* Wait until the pirl in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_INGRESS_RATE_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->pirlRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_INGRESS_RATE_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->pirlRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    gtSemGive(dev,dev->pirlRegsSem);
+    return retVal;
+}
+
+/*
+ * Initialize all PIRL resources to the inital state.
+*/
+static GT_STATUS pirl2Initialize
+(
+    IN  GT_QD_DEV              *dev
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_PIRL2_OPERATION    op;
+
+    op = PIRL_INIT_ALL_RESOURCE;
+
+
+    retVal = pirl2OperationPerform(dev, op, NULL);
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL OP Failed.\n"));
+        return retVal;
+    }
+
+
+    retVal = gpirl2SetCurTimeUpInt(dev,4);
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL OP Failed.\n"));
+    }
+
+    return retVal;
+}
+
+/*
+ * Initialize the selected PIRL resource to the inital state.
+ * This function initializes only the BSM structure for the IRL Unit.
+*/
+static GT_STATUS pirl2InitIRLResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA     opData;
+
+    op = PIRL_INIT_RESOURCE;
+    opData.irlPort = irlPort;
+    opData.irlRes = irlRes;
+
+    retVal = pirl2OperationPerform(dev, op, &opData);
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("PIRL OP Failed.\n"));
+        return retVal;
+    }
+
+    return retVal;
+}
+
+/*
+ * Disable the selected PIRL resource.
+*/
+static GT_STATUS pirl2DisableIRLResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes
+)
+{
+    GT_STATUS       retVal;            /* Functions return value */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA    opData;
+    int                i;
+
+    op = PIRL_WRITE_RESOURCE;
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+/*find the greatest common divisor for two interger
+*used to calculate BRF and BI
+*/
+static GT_U32 pirl2GetGCD(IN GT_U32 data1,
+		IN GT_U32 data2)
+{
+	GT_U32 temp1, temp2, temp;
+
+	if (data1 == 0 || data2 == 0)
+		return 1;
+
+	temp1 = data1;
+	temp2 = data2;
+
+	if (temp1 < temp2) {
+		temp = temp1;
+		temp1 = temp2;
+		temp2 = temp;
+	}
+	temp = temp1 % temp2;
+	while (temp) {
+		temp1 = temp2;
+		temp2 = temp;
+		temp = temp1 % temp2;
+	}
+
+	return temp2;
+}
+
+/*
+ * convert PIRL Data structure to PIRL Resource structure.
+ * if PIRL Data is not valid, return GT_BAD_PARARM;
+*/
+static GT_STATUS pirl2DataToResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL2_DATA        *pirlData,
+    OUT GT_PIRL2_RESOURCE    *res
+)
+{
+    GT_U32 typeMask;
+    GT_U32 data;
+	GT_U32 burst_allocation;
+	GT_U32 pirl2_cir;
+	GT_U32 pirl_gcd = 1;
+
+    gtMemSet((void*)res,0,sizeof(GT_PIRL2_RESOURCE));
+
+    data = (GT_U32)(pirlData->accountQConf|pirlData->accountFiltered|
+                    pirlData->mgmtNrlEn|pirlData->saNrlEn|pirlData->daNrlEn|
+                    pirlData->samplingMode);
+
+    if (data > 1)
+    {
+        DBG_INFO(("GT_BAD_PARAM (Boolean)\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RESTRICTED_PIRL2_RESOURCE))
+    {
+        if (pirlData->samplingMode != GT_FALSE)
+        {
+            DBG_INFO(("GT_BAD_PARAM (sampling mode)\n"));
+            return GT_BAD_PARAM;
+        }
+    }
+
+    res->accountQConf = pirlData->accountQConf;
+    res->accountFiltered = pirlData->accountFiltered;
+    res->mgmtNrlEn = pirlData->mgmtNrlEn;
+    res->saNrlEn = pirlData->saNrlEn;
+    res->daNrlEn = pirlData->daNrlEn;
+    res->samplingMode = pirlData->samplingMode;
+
+    switch(pirlData->actionMode)
+    {
+        case PIRL_ACTION_ACCEPT:
+        case PIRL_ACTION_USE_LIMIT_ACTION:
+            res->actionMode = pirlData->actionMode;
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM actionMode\n"));
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->ebsLimitAction)
+    {
+        case ESB_LIMIT_ACTION_DROP:
+        case ESB_LIMIT_ACTION_FC:
+            res->ebsLimitAction = pirlData->ebsLimitAction;
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM ebsLimitAction\n"));
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->fcDeassertMode)
+    {
+        case GT_PIRL_FC_DEASSERT_EMPTY:
+        case GT_PIRL_FC_DEASSERT_CBS_LIMIT:
+            res->fcDeassertMode = pirlData->fcDeassertMode;
+            break;
+        default:
+            if(res->ebsLimitAction != ESB_LIMIT_ACTION_FC)
+            {
+                res->fcDeassertMode    = GT_PIRL_FC_DEASSERT_EMPTY;
+                break;
+            }
+            DBG_INFO(("GT_BAD_PARAM fcDeassertMode\n"));
+            return GT_BAD_PARAM;
+    }
+
+    if(pirlData->customSetup.isValid == GT_TRUE)
+    {
+        res->ebsLimit = pirlData->customSetup.ebsLimit;
+        res->cbsLimit = pirlData->customSetup.cbsLimit;
+        res->bktIncrement = pirlData->customSetup.bktIncrement;
+        res->bktRateFactor = pirlData->customSetup.bktRateFactor;
+    }
+    else
+    {
+        if(pirlData->ingressRate == 0)
+        {
+            DBG_INFO(("GT_BAD_PARAM ingressRate(%i)\n",pirlData->ingressRate));
+            return GT_BAD_PARAM;
+        }
+
+	if (pirlData->ingressRate < 1000) { /* less than 1Mbps */
+		/* it should be divided by 64 */
+		if (pirlData->ingressRate % 64) {
+			DBG_INFO(("GT_BAD_PARAM ingressRate(%i)\n", pirlData->ingressRate));
+			return GT_BAD_PARAM;
+		}
+		/* Less than 1Mbps, use special value */
+		res->bktIncrement = pirl2RateLimitParaTbl[pirlData->ingressRate / 64].BI;
+		res->bktRateFactor = pirl2RateLimitParaTbl[pirlData->ingressRate / 64].BRF;
+		res->cbsLimit = pirl2RateLimitParaTbl[pirlData->ingressRate / 64].CBS;
+		res->ebsLimit = pirl2RateLimitParaTbl[pirlData->ingressRate / 64].EBS;
+	} else if (pirlData->ingressRate <= 20000) {
+		/* greater or equal to 1Mbps, and less than or equal to 20Mbps, it should be divided by 1000 */
+		if (pirlData->ingressRate % 1000) {
+			DBG_INFO(("GT_BAD_PARAM ingressRate(%i)\n", pirlData->ingressRate));
+			return GT_BAD_PARAM;
+		}
+		res->bktIncrement = pirl2RateLimitParaTbl[PIRL_RATE_960K + pirlData->ingressRate / 1000].BI;
+		res->bktRateFactor = pirl2RateLimitParaTbl[PIRL_RATE_960K + pirlData->ingressRate / 1000].BRF;
+		res->cbsLimit = pirl2RateLimitParaTbl[PIRL_RATE_960K + pirlData->ingressRate / 1000].CBS;
+		res->ebsLimit = pirl2RateLimitParaTbl[PIRL_RATE_960K + pirlData->ingressRate / 1000].EBS;
+	} else {/* greater than 20Mbps */
+		if (pirlData->ingressRate < 100000) {
+			/* it should be divided by 1000, if less than 100Mbps*/
+			if (pirlData->ingressRate % 1000) {
+				DBG_INFO(("GT_BAD_PARAM ingressRate(%i)\n", pirlData->ingressRate));
+				return GT_BAD_PARAM;
+			}
+		} else {
+			/* it should be divided by 10000, if more or equal than 100Mbps */
+			if (pirlData->ingressRate % 10000) {
+				DBG_INFO(("GT_BAD_PARAM ingressRate(%i)\n", pirlData->ingressRate));
+				return GT_BAD_PARAM;
+			}
+		}
+
+		pirl2_cir = pirlData->ingressRate * 1000;
+
+		burst_allocation = pirl2_cir;
+
+		pirl_gcd = pirl2GetGCD(pirl2_cir, PIRL_ALPHA);
+		res->bktRateFactor = pirl2_cir / pirl_gcd;
+		/* Correct Rate Factor because the actuall rate  will be 5/4 of the setting rate,
+		so we should decrease the configuration rate to 4/5,
+		if we use res->bktRateFactor = res->bktRateFactor * 4 / 5, then bktRateFactor might be a decimal,
+		it will cause inaccurate,
+		so here I amplify bktRateFactor by 4 and amplify bktIncrement by 5.
+		And if we want to hold random size packets, we can plus 1 more to bktRateFactor, here I do not do it.
+		Since in all cases, res->bktRateFactor * 4 will not be larger than 2^16,
+		res->bktIncrement *5 will not be larger than 2^12,  so it's safe*/
+		res->bktRateFactor = res->bktRateFactor * 4;
+		res->bktIncrement = (PIRL_ALPHA / pirl_gcd) * 5;
+
+		res->ebsLimit = RECOMMENDED_ESB_LIMIT(dev, pirlData->ingressRate);
+		/* cbs = ebs - ba*bi/8, and we should avoid the counting number > ULONG_MAX*/
+		if ((burst_allocation / 8) >
+			(((~0UL) - RECOMMENDED_CBS_LIMIT(dev, pirlData->ingressRate)) / res->bktIncrement))
+			res->cbsLimit = RECOMMENDED_CBS_LIMIT(dev, pirlData->ingressRate);
+		else if (res->ebsLimit > (res->bktIncrement * (burst_allocation/8)
+					+ RECOMMENDED_CBS_LIMIT(dev, pirlData->ingressRate)))
+			res->cbsLimit = res->ebsLimit - res->bktIncrement * (burst_allocation/8);
+		else
+			res->cbsLimit = RECOMMENDED_CBS_LIMIT(dev, pirlData->ingressRate);
+
+		DBG_INFO(("ingressRate %u pirl_gcd %u bktIncrement from 0x%x increased to 0x%x ",
+			pirlData->ingressRate, pirl_gcd, res->bktIncrement / 5, res->bktIncrement));
+		DBG_INFO(("bktRateFactor from 0x%x increased to 0x%x cbsLimit 0x%x\r\n",
+			res->bktRateFactor / 4,	res->bktRateFactor, res->cbsLimit));
+	}
+    }
+
+    switch(pirlData->bktRateType)
+    {
+        case BUCKET_TYPE_TRAFFIC_BASED:
+            res->bktRateType = pirlData->bktRateType;
+
+            typeMask = 0x7FFF;
+
+            if (pirlData->bktTypeMask > typeMask)
+            {
+                DBG_INFO(("GT_BAD_PARAM bktTypeMask(%#x)\n",pirlData->bktTypeMask));
+                return GT_BAD_PARAM;
+            }
+
+               res->bktTypeMask = pirlData->bktTypeMask;
+
+            if (pirlData->bktTypeMask & BUCKET_TRAFFIC_ARP)
+            {
+                res->bktTypeMask &= ~BUCKET_TRAFFIC_ARP;
+                res->bktTypeMask |= 0x80;
+            }
+
+            if (pirlData->priORpt > 1)
+            {
+                DBG_INFO(("GT_BAD_PARAM rpiORpt\n"));
+                return GT_BAD_PARAM;
+            }
+
+            res->priORpt = pirlData->priORpt;
+
+            if (pirlData->priMask >= (1 << 4))
+            {
+                DBG_INFO(("GT_BAD_PARAM priMask(%#x)\n",pirlData->priMask));
+                return GT_BAD_PARAM;
+            }
+
+            res->priMask = pirlData->priMask;
+
+            break;
+
+        case BUCKET_TYPE_RATE_BASED:
+            res->bktRateType = pirlData->bktRateType;
+               res->bktTypeMask = pirlData->bktTypeMask;
+            res->priORpt = pirlData->priORpt;
+            res->priMask = pirlData->priMask;
+            break;
+
+        default:
+            DBG_INFO(("GT_BAD_PARAM bktRateType(%#x)\n",pirlData->bktRateType));
+            return GT_BAD_PARAM;
+    }
+
+    switch(pirlData->byteTobeCounted)
+    {
+        case GT_PIRL2_COUNT_FRAME:
+        case GT_PIRL2_COUNT_ALL_LAYER1:
+        case GT_PIRL2_COUNT_ALL_LAYER2:
+        case GT_PIRL2_COUNT_ALL_LAYER3:
+            res->byteTobeCounted = pirlData->byteTobeCounted;
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM byteTobeCounted(%#x)\n",pirlData->byteTobeCounted));
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * convert PIRL Resource structure to PIRL Data structure.
+*/
+static GT_STATUS pirl2ResourceToData
+(
+    IN  GT_QD_DEV              *dev,
+    IN  GT_PIRL2_RESOURCE    *res,
+    OUT GT_PIRL2_DATA        *pirlData
+)
+{
+    GT_U32    rate;
+    GT_U32    factor;
+
+    pirlData->accountQConf = res->accountQConf;
+    pirlData->accountFiltered = res->accountFiltered;
+    pirlData->mgmtNrlEn = res->mgmtNrlEn;
+    pirlData->saNrlEn = res->saNrlEn;
+    pirlData->daNrlEn = res->daNrlEn;
+    pirlData->samplingMode = res->samplingMode;
+    pirlData->ebsLimitAction = res->ebsLimitAction;
+    pirlData->actionMode = res->actionMode;
+    pirlData->fcDeassertMode = res->fcDeassertMode;
+
+    pirlData->customSetup.isValid = GT_FALSE;
+
+    FACTOR_FROM_BUCKET_INCREMENT(dev,res->bktIncrement,factor);
+
+    rate = res->bktRateFactor * factor;
+    if(factor == 128)
+    {
+        pirlData->ingressRate = rate - (rate % 1000);
+    }
+    else if (factor == 0)
+    {
+        pirlData->ingressRate = 0;
+        pirlData->customSetup.isValid = GT_TRUE;
+        pirlData->customSetup.ebsLimit = res->ebsLimit;
+        pirlData->customSetup.cbsLimit = res->cbsLimit;
+        pirlData->customSetup.bktIncrement = res->bktIncrement;
+        pirlData->customSetup.bktRateFactor = res->bktRateFactor;
+    }
+    else
+    {
+        pirlData->ingressRate = rate;
+    }
+
+    pirlData->bktRateType = res->bktRateType;
+    pirlData->bktTypeMask = res->bktTypeMask;
+
+    if (pirlData->bktTypeMask & 0x80)
+    {
+        res->bktTypeMask &= ~0x80;
+        res->bktTypeMask |= BUCKET_TRAFFIC_ARP;
+    }
+
+    pirlData->priORpt = res->priORpt;
+    pirlData->priMask = res->priMask;
+
+    pirlData->byteTobeCounted = res->byteTobeCounted;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* pirl2WriteResource
+*
+* DESCRIPTION:
+*       This function writes IRL Resource to BCM (Bucket Configuration Memory)
+*
+* INPUTS:
+*       irlPort - physical port number.
+*        irlRes  - bucket to be used (0 ~ 4).
+*       res     - IRL Resource data
+*
+* OUTPUTS:
+*       Nont.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirl2WriteResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    IN  GT_PIRL2_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;            /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA     opData;
+    int                i;
+
+    op = PIRL_WRITE_RESOURCE;
+
+    /* reg0 data */
+    data[0] = (GT_U16)((res->bktRateType << 15) |    /* Bit[15] : Bucket Rate Type */
+                      (res->bktTypeMask << 0 ));         /* Bit[14:0] : Traffic Type   */
+
+    /* reg1 data */
+    data[1] = (GT_U16)res->bktIncrement;    /* Bit[11:0] : Bucket Increment */
+
+    /* reg2 data */
+    data[2] = (GT_U16)res->bktRateFactor;    /* Bit[15:0] : Bucket Rate Factor */
+
+    /* reg3 data */
+    data[3] = (GT_U16)((res->cbsLimit & 0xFFF) << 4)|    /* Bit[15:4] : CBS Limit[11:0] */
+                    (res->byteTobeCounted << 2);        /* Bit[3:0] : Bytes to be counted */
+
+    /* reg4 data */
+    data[4] = (GT_U16)(res->cbsLimit >> 12);        /* Bit[11:0] : CBS Limit[23:12] */
+
+    /* reg5 data */
+    data[5] = (GT_U16)(res->ebsLimit & 0xFFFF);        /* Bit[15:0] : EBS Limit[15:0] */
+
+    /* reg6 data */
+    data[6] = (GT_U16)((res->ebsLimit >> 16)    |    /* Bit[7:0] : EBS Limit[23:16] */
+                    (res->samplingMode << 11)    |    /* Bit[11] : Sampling Mode */
+                    (res->ebsLimitAction << 12)    |    /* Bit[12] : EBS Limit Action */
+                    (res->actionMode << 13)        |    /* Bit[13] : Action Mode */
+                    (res->fcDeassertMode << 14));    /* Bit[14] : Flow control mode */
+
+    /* reg7 data */
+    data[7] = (GT_U16)((res->daNrlEn)            |    /* Bit[0]  : DA Nrl En */
+                    (res->saNrlEn << 1)            |    /* Bit[1]  : SA Nrl En */
+                    (res->mgmtNrlEn << 2)         |    /* Bit[2]  : MGMT Nrl En */
+                    (res->priMask << 8)         |    /* Bit[11:8] : Priority Queue Mask */
+                    (res->priORpt << 12)         |    /* Bit[12] : Priority OR PacketType */
+                    (res->accountFiltered << 14)|    /* Bit[14] : Account Filtered */
+                    (res->accountQConf << 15));        /* Bit[15] : Account QConf */
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = data[i];
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirl2ReadResource
+*
+* DESCRIPTION:
+*       This function reads IRL Resource from BCM (Bucket Configuration Memory)
+*
+* INPUTS:
+*       irlPort  - physical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*
+* OUTPUTS:
+*       res - IRL Resource data
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirl2ReadResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    OUT GT_PIRL2_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA    opData;
+    int                i;
+
+    op = PIRL_READ_RESOURCE;
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+
+        data[i] = (GT_U16)opData.irlData;
+    }
+
+
+    /* reg0 data */
+    res->bktRateType = (data[0] >> 15) & 0x1;
+    res->bktTypeMask = (data[0] >> 0) & 0x7FFF;
+
+    /* reg1 data */
+    res->bktIncrement = data[1] & 0xFFF;
+
+    /* reg2 data */
+    res->bktRateFactor = data[2] & 0xFFFF;
+
+    /* reg3,4 data */
+    res->byteTobeCounted = (data[3] >> 2) & 0x3;
+    res->cbsLimit = ((data[3] >> 4) & 0xFFF) | ((data[4] & 0xFFF) << 12);
+
+    /* reg5,6 data */
+    res->ebsLimit = data[5] | ((data[6] & 0xFF) << 16);
+
+    /* reg6 data */
+    res->samplingMode = (data[6] >> 11) & 0x1;
+    res->ebsLimitAction = (data[6] >> 12) & 0x1;
+    res->actionMode = (data[6] >> 13) & 0x1;
+    res->fcDeassertMode = (data[6] >> 14) & 0x1;
+
+    /* reg7 data */
+    res->daNrlEn = (data[7] >> 0) & 0x1;
+    res->saNrlEn = (data[7] >> 1) & 0x1;
+    res->mgmtNrlEn = (data[7] >> 2) & 0x1;
+    res->priMask = (data[7] >> 8) & 0xF;
+    res->priORpt = (data[7] >> 12) & 0x1;
+    res->accountFiltered = (data[7] >> 14) & 0x1;
+    res->accountQConf = (data[7] >> 15) & 0x1;
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirl2WriteTSMResource
+*
+* DESCRIPTION:
+*         This function writes IRL Resource to BCM (Bucket Configuration Memory)
+*        in Time Slot Metering Mode.
+*
+* INPUTS:
+*        irlPort - physical port number.
+*        irlRes  - bucket to be used (0 ~ 1).
+*        res     - IRL Resource data
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK on success,
+*        GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirl2WriteTSMResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    IN  GT_PIRL2_TSM_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;            /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA     opData;
+    int                i;
+
+    op = PIRL_WRITE_RESOURCE;
+
+    /* reg0 data */
+    data[0] = 0;
+
+    /* reg1 data */
+    data[1] = 0;
+
+    /* reg2 data */
+    data[2] = 0;
+
+    /* reg3 data */
+    data[3] = (GT_U16)(((res->cbsLimit & 0xFFF) << 4)|    /* Bit[15:4] : CBS Limit[11:0] */
+                    (0x2 << 2));                            /* Bit[3:0] : Bytes to be counted */
+
+    /* reg4 data */
+    data[4] = (GT_U16)(res->cbsLimit >> 12);        /* Bit[11:0] : CBS Limit[23:12] */
+
+    /* reg5 data */
+    data[5] = (GT_U16)(res->ebsLimit & 0xFFFF);        /* Bit[15:0] : EBS Limit[15:0] */
+
+    /* reg6 data */
+    data[6] = (GT_U16)(res->actionMode << 13);        /* Bit[13] : Action Mode */
+
+    /* reg7 data */
+    data[7] = (GT_U16)((res->tsmMode << 7)        |    /* Bit[7]  : TSM Mode */
+                    (res->mgmtNrlEn << 2)         |    /* Bit[2]  : MGMT Nrl En */
+                    (res->priMask << 8)         |    /* Bit[11:8] : Priority Queue Mask */
+                    (res->ctsIntv << 4));            /* Bit[5:4] : Class Timer Slot Interval */
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = data[i];
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* pirl2ReadTSMResource
+*
+* DESCRIPTION:
+*        This function reads IRL Resource from BCM (Bucket Configuration Memory)
+*        in Time Slot Metering Mode.
+*
+* INPUTS:
+*        irlPort  - physical port number.
+*        irlRes   - bucket to be used (0 ~ 1).
+*
+* OUTPUTS:
+*        res - IRL Resource data
+*
+* RETURNS:
+*         GT_OK on success,
+*         GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS pirl2ReadTSMResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    OUT GT_PIRL2_TSM_RESOURCE    *res
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_U16          data[8];     /* temporary Data storage */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA    opData;
+    int                i;
+
+    op = PIRL_READ_RESOURCE;
+
+    for(i=0; i<8; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+
+        data[i] = (GT_U16)opData.irlData;
+    }
+
+    res->tsmMode = data[7] & (1<<7);
+
+    if(res->tsmMode == GT_FALSE)
+    {
+        /* TMS mode is not set */
+        res->cbsLimit = 0;
+        res->ebsLimit = 0;
+        res->actionMode = 0;
+        res->mgmtNrlEn = 0;
+        res->priMask = 0;
+        res->ctsIntv = 0;
+
+        return GT_OK;
+    }
+
+    /* reg3,4 data */
+    res->cbsLimit = ((data[3] >> 4) & 0xFFF) | ((data[4] & 0xF) << 12);
+
+    /* reg5,6 data */
+    res->ebsLimit = data[5];
+
+    /* reg6 data */
+    res->actionMode = (data[6] >> 13) & 0x1;
+
+    /* reg7 data */
+    res->mgmtNrlEn = (data[7] >> 2) & 0x1;
+    res->priMask = (data[7] >> 8) & 0xF;
+    res->ctsIntv = (data[7] >> 4) & 0x3;
+
+    return GT_OK;
+}
+
+#define PIRL2_DEBUG
+#ifdef PIRL2_DEBUG
+/*******************************************************************************
+* pirl2DumpResource
+*
+* DESCRIPTION:
+*       This function dumps IRL Resource register values.
+*
+* INPUTS:
+*       irlPort  - physical port number.
+*        irlRes   - bucket to be used (0 ~ 4).
+*        dataLen  - data size.
+*
+* OUTPUTS:
+*       data - IRL Resource data
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS pirl2DumpResource
+(
+    IN  GT_QD_DEV              *dev,
+    IN    GT_U32                irlPort,
+    IN    GT_U32                irlRes,
+    IN    GT_U32                dataLen,
+    OUT GT_U16                *data
+)
+{
+    GT_STATUS       retVal;        /* Functions return value */
+    GT_PIRL2_OPERATION    op;
+    GT_PIRL2_OP_DATA    opData;
+    GT_U32                i;
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    op = PIRL_READ_RESOURCE;
+
+    for(i=0; i<dataLen; i++)
+    {
+        opData.irlPort = irlPort;
+        opData.irlRes = irlRes;
+        opData.irlReg = i;
+        opData.irlData = 0;
+
+        retVal = pirl2OperationPerform(dev, op, &opData);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("PIRL OP Failed.\n"));
+            return retVal;
+        }
+
+        data[i] = (GT_U16)opData.irlData;
+    }
+
+    return GT_OK;
+}
+#endif /* PIRL2_DEBUG */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTP.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTP.c
new file mode 100644
index 000000000000..11832f00d73b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTP.c
@@ -0,0 +1,5681 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPTP.c
+*
+* DESCRIPTION:
+*       API definitions for Precise Time Protocol logic
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+#ifdef CONFIG_AVB_FPGA
+
+#undef USE_SINGLE_READ
+
+#define AVB_SMI_ADDR        0xC
+
+#define QD_REG_PTP_INT_OFFSET        0
+#define QD_REG_PTP_INTEN_OFFSET        1
+#define QD_REG_PTP_FREQ_OFFSET        4
+#define QD_REG_PTP_PHASE_OFFSET        6
+#define QD_REG_PTP_CLK_CTRL_OFFSET    4
+#define QD_REG_PTP_CYCLE_INTERVAL_OFFSET        5
+#define QD_REG_PTP_CYCLE_ADJ_OFFSET                6
+#define QD_REG_PTP_PLL_CTRL_OFFSET    7
+#define QD_REG_PTP_CLK_SRC_OFFSET    0x9
+#define QD_REG_PTP_P9_MODE_OFFSET    0xA
+#define QD_REG_PTP_RESET_OFFSET        0xB
+
+#define GT_PTP_MERGE_32BIT(_high16,_low16)    (((_high16)<<16)|(_low16))
+#define GT_PTP_GET_HIGH16(_data)    ((_data) >> 16) & 0xFFFF
+#define GT_PTP_GET_LOW16(_data)        (_data) & 0xFFFF
+
+#if 0
+
+#define AVB_FPGA_READ_REG       gprtGetPhyReg
+#define AVB_FPGA_WRITE_REG      gprtSetPhyReg
+unsigned int (*avbFpgaReadReg)(void* unused, unsigned int port, unsigned int reg, unsigned int* data);
+unsigned int (*avbFpgaWriteReg)(void* unused, unsigned int port, unsigned int reg, unsigned int data);
+#else
+
+/* for RMGMT access  and can be controlled by <sw_apps -rmgmt 0/1> */
+unsigned int (*avbFpgaReadReg)(void* unused, unsigned int port, unsigned int reg, GT_U32* data)=gprtGetPhyReg;
+unsigned int (*avbFpgaWriteReg)(void* unused, unsigned int port, unsigned int reg, GT_U32 data)=gprtSetPhyReg;
+#define AVB_FPGA_READ_REG       avbFpgaReadReg
+#define AVB_FPGA_WRITE_REG      avbFpgaWriteReg
+
+#endif /* 0 */
+
+#endif
+
+#if 0
+#define GT_PTP_BUILD_TIME(_time1, _time2)    (((_time1) << 16) | (_time2))
+#define GT_PTP_L16_TIME(_time1)    ((_time1) & 0xFFFF)
+#define GT_PTP_H16_TIME(_time1)    (((_time1) >> 16) & 0xFFFF)
+#endif
+
+
+/****************************************************************************/
+/* PTP operation function declaration.                                    */
+/****************************************************************************/
+extern GT_STATUS ptpOperationPerform
+(
+    IN   GT_QD_DEV             *dev,
+    IN   GT_PTP_OPERATION    ptpOp,
+    INOUT GT_PTP_OP_DATA     *opData
+);
+
+
+/*******************************************************************************
+* gptpSetConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_LPORT            port;
+    GT_PTP_PORT_CONFIG    ptpPortData;
+
+    DBG_INFO(("gptpSetConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_WRITE_DATA;
+
+    /* setting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    opData.ptpData = ptpData->ptpEType;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting MsgIDTSEn, offset 1 */
+    opData.ptpAddr = 1;
+    opData.ptpData = ptpData->msgIdTSEn;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting TSArrPtr, offset 2 */
+    opData.ptpAddr = 2;
+    opData.ptpData = ptpData->tsArrPtr;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+#endif
+    {
+        ptpPortData.transSpec = ptpData->transSpec;
+        ptpPortData.disTSpec = 1;    /* default value */
+        ptpPortData.disTSOverwrite = ptpData->disTSOverwrite;
+        ptpPortData.ipJump = 2;        /* default value */
+        ptpPortData.etJump = 12;    /* default value */
+
+        /* per port configuration */
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            ptpPortData.ptpArrIntEn = (ptpData->ptpArrIntEn & (1 << port))? GT_TRUE : GT_FALSE;
+            ptpPortData.ptpDepIntEn = (ptpData->ptpDepIntEn & (1 << port))? GT_TRUE : GT_FALSE;
+          if((retVal = gptpSetPortConfig(dev, port, &ptpPortData)) != GT_OK)
+          {
+                DBG_INFO(("Failed gptpSetPortConfig.\n"));
+                return GT_FAIL;
+          }
+          if (IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE))
+      {
+            if(!((ptpData->ptpPortConfig[port].arrTSMode==GT_PTP_TS_MODE_IN_REG)||(ptpData->ptpPortConfig[port].arrTSMode==GT_PTP_TS_MODE_IN_RESERVED_2)||(ptpData->ptpPortConfig[port].arrTSMode==GT_PTP_TS_MODE_IN_FRAME_END)))
+              ptpData->ptpPortConfig[port].arrTSMode=GT_PTP_TS_MODE_IN_REG;
+            if((retVal = gptpSetPortTsMode(dev, port, ptpData->ptpPortConfig[port].arrTSMode)) != GT_OK)
+            {
+                DBG_INFO(("Failed gptpSetPortConfig.\n"));
+                return GT_FAIL;
+            }
+      }
+        }
+
+        return GT_OK;
+    }
+
+    /* old PTP block */
+    /* setting PTPArrIntEn, offset 3 */
+    opData.ptpAddr = 3;
+    opData.ptpData = GT_LPORTVEC_2_PORTVEC(ptpData->ptpArrIntEn);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPArrIntEn.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting PTPDepIntEn, offset 4 */
+    opData.ptpAddr = 4;
+    opData.ptpData = GT_LPORTVEC_2_PORTVEC(ptpData->ptpDepIntEn);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPDepIntEn.\n"));
+        return GT_FAIL;
+    }
+
+    /* TransSpec, MsgIDStartBit, DisTSOverwrite bit, offset 5 */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+#ifdef CONFIG_AVB_FPGA
+    opData.ptpData = ((ptpData->transSpec&0xF) << 12) | ((ptpData->msgIdStartBit&0x7) << 9) |
+                    (opData.ptpData & 0x1) | ((ptpData->disTSOverwrite?1:0) << 1);
+#else
+    opData.ptpData = ((ptpData->transSpec&0xF) << 12) | (opData.ptpData & 0x1) | ((ptpData->disTSOverwrite?1:0) << 1);
+#endif
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing MsgIDStartBit & DisTSOverwrite.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpGetConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_LPORT            port;
+    GT_PTP_PORT_CONFIG    ptpPortData;
+
+    DBG_INFO(("gptpGetConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->ptpEType = opData.ptpData;
+
+    /* getting MsgIDTSEn, offset 1 */
+    opData.ptpAddr = 1;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->msgIdTSEn = opData.ptpData;
+
+    /* getting TSArrPtr, offset 2 */
+    opData.ptpAddr = 2;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->tsArrPtr = opData.ptpData;
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+#endif
+    {
+        ptpData->ptpArrIntEn = 0;
+        ptpData->ptpDepIntEn = 0;
+
+        /* per port configuration */
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            if((retVal = gptpGetPortConfig(dev, port, &ptpPortData)) != GT_OK)
+            {
+                DBG_INFO(("Failed gptpGetPortConfig.\n"));
+                return GT_FAIL;
+            }
+
+            ptpData->ptpArrIntEn |= (ptpPortData.ptpArrIntEn ? (1 << port) : 0);
+            ptpData->ptpDepIntEn |= (ptpPortData.ptpDepIntEn ? (1 << port) : 0);
+            ptpData->ptpPortConfig[port].ptpArrIntEn = ptpPortData.ptpArrIntEn;
+            ptpData->ptpPortConfig[port].ptpDepIntEn = ptpPortData.ptpDepIntEn;
+
+
+          if (IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE))
+      {
+            ptpData->ptpPortConfig[port].transSpec = ptpPortData.transSpec;
+        ptpData->ptpPortConfig[port].disTSOverwrite = ptpPortData.disTSOverwrite;
+            if((retVal = gptpGetPortTsMode(dev, port, &ptpData->ptpPortConfig[port].arrTSMode)) != GT_OK)
+            {
+                DBG_INFO(("Failed gptpGetPortConfig.\n"));
+                return GT_FAIL;
+            }
+      }
+        }
+
+        ptpData->transSpec = ptpPortData.transSpec;
+        ptpData->disTSOverwrite = ptpPortData.disTSOverwrite;
+
+        ptpData->msgIdStartBit = 4;
+
+        return GT_OK;
+    }
+
+    /* old PTP block */
+    /* getting PTPArrIntEn, offset 3 */
+    opData.ptpAddr = 3;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPArrIntEn.\n"));
+        return GT_FAIL;
+    }
+    opData.ptpData &= dev->validPortVec;
+    ptpData->ptpArrIntEn = GT_PORTVEC_2_LPORTVEC(opData.ptpData);
+
+
+    /* getting PTPDepIntEn, offset 4 */
+    opData.ptpAddr = 4;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPDepIntEn.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= dev->validPortVec;
+    ptpData->ptpDepIntEn = GT_PORTVEC_2_LPORTVEC(opData.ptpData);
+
+    /* MsgIDStartBit, DisTSOverwrite bit, offset 5 */
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->transSpec = (opData.ptpData >> 12) & 0xF;
+#ifdef CONFIG_AVB_FPGA
+    ptpData->msgIdStartBit = (opData.ptpData >> 9) & 0x7;
+#else
+    ptpData->msgIdStartBit = 0;
+#endif
+    ptpData->disTSOverwrite = ((opData.ptpData >> 1) & 0x1) ? GT_TRUE : GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetGlobalConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP global configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP global configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetGlobalConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_GLOBAL_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpSetGlobalConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_WRITE_DATA;
+
+    /* setting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    opData.ptpData = ptpData->ptpEType;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting MsgIDTSEn, offset 1 */
+    opData.ptpAddr = 1;
+    opData.ptpData = ptpData->msgIdTSEn;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting TSArrPtr, offset 2 */
+    opData.ptpAddr = 2;
+    opData.ptpData = ptpData->tsArrPtr;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGlobalGetConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP global configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP global configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetGlobalConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_GLOBAL_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpGetGlobalConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->ptpEType = opData.ptpData;
+
+    /* getting MsgIDTSEn, offset 1 */
+    opData.ptpAddr = 1;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->msgIdTSEn = opData.ptpData;
+
+    /* getting TSArrPtr, offset 2 */
+    opData.ptpAddr = 2;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->tsArrPtr = opData.ptpData;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpSetPortConfig
+*
+* DESCRIPTION:
+*       This routine writes PTP port configuration parameters.
+*
+* INPUTS:
+*        ptpData  - PTP port configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_PTP_PORT_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gptpSetPortConfig Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    if (ptpData->transSpec > 0xF)    /* 4 bits */
+        return GT_BAD_PARAM;
+
+    if (ptpData->etJump > 0x1F)    /* 5 bits */
+        return GT_BAD_PARAM;
+
+    if (ptpData->ipJump > 0x3F)    /* 6 bits */
+        return GT_BAD_PARAM;
+
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = hwPort;
+
+    /* TransSpec, DisTSpecCheck, DisTSOverwrite bit, offset 0 */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+    opData.ptpData = (ptpData->transSpec << 12) | (opData.ptpData & 0x1) |
+                    ((ptpData->disTSpec?1:0) << 11) |
+                    ((ptpData->disTSOverwrite?1:0) << 1);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TransSpec,DisTSpecCheck,DisTSOverwrite.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting etJump and ipJump, offset 1 */
+    opData.ptpAddr = 1;
+    opData.ptpData = (ptpData->ipJump << 8) | ptpData->etJump;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    /* setting Int, offset 2 */
+    opData.ptpAddr = 2;
+    opData.ptpData = (ptpData->ptpArrIntEn?1:0) |
+                    ((ptpData->ptpDepIntEn?1:0) << 1);
+    if (IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE))
+    {
+       opData.ptpData |= ((ptpData->arrTSMode&0xff) << 8);  /* from Agate to set ArrTSMode */
+    }
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetPortConfig
+*
+* DESCRIPTION:
+*       This routine reads PTP configuration parameters for a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpData  - PTP port configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_PTP_PORT_CONFIG    *ptpData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gptpGetPortConfig Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = hwPort;
+    op = PTP_READ_DATA;
+
+    /* TransSpec, DisTSpecCheck, DisTSOverwrite bit, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->transSpec = opData.ptpData >> 12;
+    ptpData->disTSpec = ((opData.ptpData >> 11) & 0x1) ? GT_TRUE : GT_FALSE;
+    ptpData->disTSOverwrite = ((opData.ptpData >> 1) & 0x1) ? GT_TRUE : GT_FALSE;
+
+    /* getting MsgIDTSEn, offset 1 */
+    opData.ptpAddr = 1;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading MsgIDTSEn.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->ipJump = (opData.ptpData >> 8) & 0x3F;
+    ptpData->etJump = opData.ptpData & 0x1F;
+
+    /* getting TSArrPtr, offset 2 */
+    opData.ptpAddr = 2;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TSArrPtr.\n"));
+        return GT_FAIL;
+    }
+
+    ptpData->ptpDepIntEn = ((opData.ptpData >> 1) & 0x1) ? GT_TRUE : GT_FALSE;
+    ptpData->ptpArrIntEn = (opData.ptpData & 0x1) ? GT_TRUE : GT_FALSE;
+    if (IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE))
+    {
+      ptpData->arrTSMode = (opData.ptpData &0xff00) >> 8;  /* from Agate to get ArrTSMode */
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpSetPTPEn
+*
+* DESCRIPTION:
+*       This routine enables or disables PTP.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PTP, GT_FALSE to disable PTP
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_LPORT            port;
+
+    DBG_INFO(("gptpSetPTPEn Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+#endif
+    {
+        /* per port configuration */
+        for(port=0; port<dev->numOfPorts; port++)
+        {
+            if((retVal = gptpSetPortPTPEn(dev, port, en)) != GT_OK)
+            {
+                DBG_INFO(("Failed gptpSetPortPTPEn.\n"));
+                return GT_FAIL;
+            }
+        }
+
+        return GT_OK;
+    }
+
+    /* old PTP block */
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+    opData.ptpData &= ~0x1;
+    opData.ptpData |= (en ? 0 : 1);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing MsgIDStartBit & DisTSOverwrite.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetPTPEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpGetPTPEn Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+#endif
+    {
+        if((retVal = gptpGetPortPTPEn(dev, 0, en)) != GT_OK)
+        {
+               DBG_INFO(("Failed gptpGetPortPTPEn.\n"));
+            return GT_FAIL;
+        }
+
+        return GT_OK;
+    }
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *en = (opData.ptpData & 0x1) ? GT_FALSE : GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetPortPTPEn
+*
+* DESCRIPTION:
+*       This routine enables or disables PTP on a port.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PTP, GT_FALSE to disable PTP
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_BOOL        en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gptpSetPortPTPEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpAddr = 0;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    if (en)
+        opData.ptpData &= ~0x1;
+    else
+        opData.ptpData |= 0x1;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TransSpec,DisTSpecCheck,DisTSOverwrite.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpGetPortPTPEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP is enabled on a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortPTPEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gptpGetPortPTPEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpAddr = 0;
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *en = (opData.ptpData & 0x1) ? GT_FALSE : GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetPortTsMode
+*
+* DESCRIPTION:
+*       This routine set PTP arrive 0 TS mode on a port.
+*
+* INPUTS:
+*        tsMod - GT_PTP_TS_MODE_IN_REG
+*                GT_PTP_TS_MODE_IN_RESERVED_2
+*                GT_PTP_TS_MODE_IN_FRAME_END
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetPortTsMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    IN  GT_PTP_TS_MODE  tsMode
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+
+    DBG_INFO(("gptpSetPortTsMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    if (!(IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpAddr = 2;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TsMode.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpData &= 0xff;
+    switch (tsMode)
+    {
+      case GT_PTP_TS_MODE_IN_REG:
+        break;
+      case GT_PTP_TS_MODE_IN_RESERVED_2:
+        opData.ptpData |= (PTP_TS_LOC_RESERVED_2<<8); /* set TS in reserved 1 */
+        break;
+      case GT_PTP_TS_MODE_IN_FRAME_END:
+        opData.ptpData |= (PTP_FRAME_SIZE<<8); /* set TS in end of PTP frame */
+        break;
+      default:
+        DBG_INFO(("GT_NOT_SUPPORTED the TS mode\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Ts Mode.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpGetPortTsMode
+*
+* DESCRIPTION:
+*       This routine get PTP arrive 0 TS mode on a port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        tsMod - GT_PTP_TS_MODE_IN_REG
+*                GT_PTP_TS_MODE_IN_RESERVED_2
+*                GT_PTP_TS_MODE_IN_FRAME_END
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPortTsMode
+(
+    IN  GT_QD_DEV     *dev,
+    IN    GT_LPORT    port,
+    OUT GT_PTP_TS_MODE  *tsMode
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32            hwPort;
+    GT_U16            tmpData;
+
+    DBG_INFO(("gptpGetPortTsMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    if (!(IS_IN_DEV_GROUP(dev, DEV_ARRV_TS_MODE)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpAddr = 2;
+
+    opData.ptpPort = hwPort;
+
+    op = PTP_READ_DATA;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TsMode.\n"));
+        return GT_FAIL;
+    }
+
+    tmpData = qdLong2Short(opData.ptpData >>8);
+    if (tmpData>=PTP_FRAME_SIZE)
+      *tsMode = GT_PTP_TS_MODE_IN_FRAME_END;
+    else if (tmpData == PTP_TS_LOC_RESERVED_2)
+      *tsMode = GT_PTP_TS_MODE_IN_RESERVED_2;
+    else
+      *tsMode = GT_PTP_TS_MODE_IN_REG;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpGetPTPInt
+*
+* DESCRIPTION:
+*       This routine gets PTP interrupt status for each port.
+*        The PTP Interrupt bit gets set for a given port when an incoming PTP
+*        frame is time stamped and PTPArrIntEn for that port is set to 0x1.
+*        Similary PTP Interrupt bit gets set for a given port when an outgoing
+*        PTP frame is time stamped and PTPDepIntEn for that port is set to 0x1.
+*        This bit gets cleared upon software reading and clearing the corresponding
+*        time counter valid bits that are valid for that port.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpInt     - interrupt status for each port (bit 0 for port 0, bit 1 for port 1, etc.)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpInt
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpGetPTPInt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpPort = 0xF;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 8;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= (1 << dev->maxPorts) - 1;
+
+    *ptpInt = GT_PORTVEC_2_LPORTVEC(opData.ptpData);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetPTPGlobalTime
+*
+* DESCRIPTION:
+*       This routine gets the global timer value that is running off of the free
+*        running switch core clock.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpTime    - PTP global time
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetPTPGlobalTime
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpTime
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpGetPTPGlobalTime Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+#ifndef USE_SINGLE_READ
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpPort = IS_IN_DEV_GROUP(dev,DEV_TAI)?0xE:0xF;    /* Global register */
+    op = PTP_READ_MULTIPLE_DATA;
+    opData.ptpAddr = IS_IN_DEV_GROUP(dev,DEV_TAI)?0xE:9;
+    opData.nData = 2;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *ptpTime = GT_PTP_BUILD_TIME(opData.ptpMultiData[1],opData.ptpMultiData[0]);
+#else
+    {
+    GT_U32 data[2];
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpPort = IS_IN_DEV_GROUP(dev,DEV_TAI)?0xE:0xF;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = IS_IN_DEV_GROUP(dev,DEV_TAI)?0xE:9;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    data[0] = opData.ptpData;
+
+    op = PTP_READ_DATA;
+    opData.ptpAddr++;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    data[1] = opData.ptpData;
+
+    *ptpTime = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+    }
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpGetTimeStamped
+*
+* DESCRIPTION:
+*        This routine retrieves the PTP port status that includes time stamp value
+*        and sequce Id that are captured by PTP logic for a PTP frame that needs
+*        to be time stamped.
+*
+* INPUTS:
+*       port         - logical port number.
+*       timeToRead    - Arr0, Arr1, or Dep time (GT_PTP_TIME enum type)
+*
+* OUTPUTS:
+*        ptpStatus    - PTP port status
+*
+* RETURNS:
+*       GT_OK         - on success
+*       GT_FAIL     - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetTimeStamped
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN    GT_PTP_TIME    timeToRead,
+    OUT GT_PTP_TS_STATUS    *ptpStatus
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                hwPort;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32                baseReg;
+
+    DBG_INFO(("gptpGetTimeStamped Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("Invalid port number\n"));
+        return GT_BAD_PARAM;
+    }
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+        baseReg = 8;
+    else
+        baseReg = 0;
+#else
+    baseReg = 8;
+#endif
+
+    switch (timeToRead)
+    {
+        case PTP_ARR0_TIME:
+            opData.ptpAddr = baseReg + 0;
+            break;
+        case PTP_ARR1_TIME:
+            opData.ptpAddr = baseReg + 4;
+            break;
+        case PTP_DEP_TIME:
+            opData.ptpAddr = baseReg + 8;
+            break;
+        default:
+            DBG_INFO(("Invalid time to be read\n"));
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpPort = hwPort;
+    opData.ptpBlock = 0;
+
+#ifndef USE_SINGLE_READ
+    op = PTP_READ_TIMESTAMP_DATA;
+    opData.nData = 4;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    ptpStatus->isValid = (opData.ptpMultiData[0] & 0x1) ? GT_TRUE : GT_FALSE;
+    ptpStatus->status = (GT_PTP_INT_STATUS)((opData.ptpMultiData[0] >> 1) & 0x3);
+    ptpStatus->timeStamped = GT_PTP_BUILD_TIME(opData.ptpMultiData[2],opData.ptpMultiData[1]);
+    ptpStatus->ptpSeqId = opData.ptpMultiData[3];
+#else
+    {
+    GT_U32 data[4], i;
+
+    op = PTP_READ_DATA;
+
+    for (i=0; i<4; i++)
+    {
+        if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+        {
+            DBG_INFO(("Failed reading DisPTP.\n"));
+            return GT_FAIL;
+        }
+
+        data[i] = opData.ptpData;
+        opData.ptpAddr++;
+    }
+
+    ptpStatus->isValid = (data[0] & 0x1) ? GT_TRUE : GT_FALSE;
+    ptpStatus->status = (GT_PTP_INT_STATUS)((data[0] >> 1) & 0x3);
+    ptpStatus->timeStamped = GT_PTP_BUILD_TIME(data[2],data[1]);
+    ptpStatus->ptpSeqId = data[3];
+
+    }
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpResetTimeStamp
+*
+* DESCRIPTION:
+*        This routine resets PTP Time valid bit so that PTP logic can time stamp
+*        a next PTP frame that needs to be time stamped.
+*
+* INPUTS:
+*       port         - logical port number.
+*       timeToReset    - Arr0, Arr1, or Dep time (GT_PTP_TIME enum type)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK         - on success
+*       GT_FAIL     - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpResetTimeStamp
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN    GT_PTP_TIME    timeToReset
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                hwPort;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32                baseReg;
+
+    DBG_INFO(("gptpResetTimeStamp Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    hwPort = (GT_U32)GT_LPORT_2_PORT(port);
+    if (hwPort == GT_INVALID_PORT)
+    {
+        DBG_INFO(("Invalid port number\n"));
+        return GT_BAD_PARAM;
+    }
+
+#ifndef CONFIG_AVB_FPGA
+    if (IS_IN_DEV_GROUP(dev,DEV_PTP_2))
+        baseReg = 8;
+    else
+        baseReg = 0;
+#else
+    baseReg = 8;
+#endif
+
+    switch (timeToReset)
+    {
+        case PTP_ARR0_TIME:
+            opData.ptpAddr = baseReg + 0;
+            break;
+        case PTP_ARR1_TIME:
+            opData.ptpAddr = baseReg + 4;
+            break;
+        case PTP_DEP_TIME:
+            opData.ptpAddr = baseReg + 8;
+            break;
+        default:
+            DBG_INFO(("Invalid time to reset\n"));
+            return GT_BAD_PARAM;
+    }
+
+    opData.ptpPort = hwPort;
+    opData.ptpData = 0;
+    opData.ptpBlock = 0;
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing Port Status.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetReg
+*
+* DESCRIPTION:
+*       This routine reads PTP register.
+*
+* INPUTS:
+*       port         - logical port number.
+*       regOffset    - register to read
+*
+* OUTPUTS:
+*        data        - register data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        regOffset,
+    OUT GT_U32        *data
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                hwPort;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpGetReg Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    hwPort = (GT_U32)port;
+
+    if (regOffset > 0x1F)
+    {
+        DBG_INFO(("Invalid reg offset\n"));
+        return GT_BAD_PARAM;
+    }
+
+    op = PTP_READ_DATA;
+    opData.ptpPort = hwPort;
+    opData.ptpAddr = regOffset;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *data = opData.ptpData;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetReg
+*
+* DESCRIPTION:
+*       This routine writes data to PTP register.
+*
+* INPUTS:
+*       port         - logical port number
+*       regOffset    - register to be written
+*        data        - data to be written
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U32        regOffset,
+    IN  GT_U32        data
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                hwPort;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gptpSetReg Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PTP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    hwPort = (GT_U32)port;
+
+    if ((regOffset > 0x1F) || (data > 0xFFFF))
+    {
+        DBG_INFO(("Invalid reg offset/data\n"));
+        return GT_BAD_PARAM;
+    }
+
+    op = PTP_WRITE_DATA;
+    opData.ptpPort = hwPort;
+    opData.ptpAddr = regOffset;
+    opData.ptpData = data;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/* TAI functions */
+/*******************************************************************************
+* gtaiSetEventConfig
+*
+* DESCRIPTION:
+*       This routine sets TAI Event Capture configuration parameters.
+*
+* INPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetEventConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_TAI_EVENT_CONFIG    *eventData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetEventConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~((3<<14)|(1<<8));
+    if (eventData->intEn)
+        opData.ptpData |= (1 << 8);
+    if (eventData->eventOverwrite)
+        opData.ptpData |= (1 << 15);
+    if (eventData->eventCtrStart)
+        opData.ptpData |= (1 << 14);
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+      opData.ptpData &= ~(1<<13);
+      if (eventData->eventPhase)
+        opData.ptpData |= (1 << 13);
+    }
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN)) /* after 6320 */
+    {
+	}
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+	if(IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN))
+	{
+     /* getting Capture trigger, offset 9 */
+      op = PTP_READ_DATA;
+      opData.ptpAddr = 9;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      opData.ptpData &= ~(1<<14);
+      opData.ptpData |= (eventData->captTrigEvent==GT_TRUE)?0x4000:0x0;
+
+      op = PTP_WRITE_DATA;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+	  }
+	}
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtaiGetEventConfig
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_TAI_EVENT_CONFIG    *eventData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetEventConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    eventData->intEn = (opData.ptpData & (1<<8))?GT_TRUE:GT_FALSE;
+    eventData->eventOverwrite = (opData.ptpData & (1<<15))?GT_TRUE:GT_FALSE;
+    eventData->eventCtrStart = (opData.ptpData & (1<<14))?GT_TRUE:GT_FALSE;
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+      eventData->eventPhase = (opData.ptpData & (1<<13))?GT_TRUE:GT_FALSE;
+    }
+
+	if(IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN))
+	{
+     /* getting Capture trigger, offset 9 */
+      op = PTP_READ_DATA;
+      opData.ptpAddr = 9;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      eventData->captTrigEvent = (opData.ptpData & (1<<14))?GT_TRUE:GT_FALSE;
+	}
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtaiGetEventStatus
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        eventData  - TAI Event Capture configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_TAI_EVENT_STATUS    *status
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32                 data[2];
+
+    DBG_INFO(("gtaiGetEventStatus Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 9;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    status->isValid = (opData.ptpData & (1<<8))?GT_TRUE:GT_FALSE;
+    status->eventCtr = opData.ptpData & 0xFF;
+    status->eventErr = (opData.ptpData & (1<<9))?GT_TRUE:GT_FALSE;
+
+    if (status->isValid == GT_FALSE)
+    {
+        return GT_OK;
+    }
+
+    opData.ptpAddr = 10;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+    data[0] = opData.ptpData;
+
+    opData.ptpAddr = 11;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+    data[1] = opData.ptpData;
+
+    status->eventTime = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtaiGetEventInt
+*
+* DESCRIPTION:
+*       This routine gets TAI Event Capture Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        intStatus     - interrupt status for TAI Event capture
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetEventInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *intStatus
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetEventInt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpPort = 0xE;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 9;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *intStatus = (opData.ptpData & 0x8000)?GT_TRUE:GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiClearEventInt
+*
+* DESCRIPTION:
+*       This routine clear TAI Event Capture Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiClearEventInt
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiClearEventInt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpPort = 0xE;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 9;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading eventInt.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= 0x7EFF;
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing eventInt.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiSetClockSelect
+*
+* DESCRIPTION:
+*       This routine sets several clock select in TAI.
+*
+* INPUTS:
+*        clkSelect  - TAI clock select configuration parameters.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetClockSelect
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_TAI_CLOCK_SELECT    *clkSelect
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetClockSelect Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting Tai clock select, offset 8 */
+    opData.ptpAddr = 8;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(0x4077);
+    opData.ptpData |= (((clkSelect->priRecClkSel)&7) << 0);
+    opData.ptpData |= (((clkSelect->syncRecClkSel)&7) << 4);
+    opData.ptpData |= (((clkSelect->ptpExtClk)&1) << 14);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 8;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtaiGetClockSelect
+*
+* DESCRIPTION:
+*       This routine gets several clock select in TAI.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       clkSelect  - TAI clock select configuration parameters.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetClockSelect
+(
+    IN  GT_QD_DEV     *dev,
+    OUT  GT_TAI_CLOCK_SELECT    *clkSelect
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA      opData;
+
+    DBG_INFO(("gtaiGetClockSelect Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting Tai clock select, offset 8 */
+    opData.ptpAddr = 8;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    clkSelect->priRecClkSel = qdLong2Char(opData.ptpData&7);
+    clkSelect->syncRecClkSel = qdLong2Char((opData.ptpData >> 4) & 7);
+    clkSelect->ptpExtClk = (opData.ptpData>> 14) & 1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtaiGetTrigInt
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        intStatus     - interrupt status for TAI Trigger
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigInt
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *intStatus
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetTrigInt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpPort = 0xE;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 8;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *intStatus = (opData.ptpData & 0x8000)?GT_TRUE:GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiClearTrigInt
+*
+* DESCRIPTION:
+*       This routine clear TAI Trigger Interrupt status.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiClearTrigInt
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiClearTrigInt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpPort = 0xE;    /* Global register */
+    op = PTP_READ_DATA;
+    opData.ptpAddr = 8;
+    opData.ptpBlock = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= 0x7fff;
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetTrigConfig
+*
+* DESCRIPTION:
+*       This routine sets TAI Trigger configuration parameters.
+*
+* INPUTS:
+*        trigEn    - enable/disable TAI Trigger.
+*        trigData  - TAI Trigger configuration parameters (valid only if trigEn is GT_TRUE).
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigConfig
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL     trigEn,
+    IN  GT_TAI_TRIGGER_CONFIG    *trigData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetTrigConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+      if (trigData->trigPhase)
+        opData.ptpData |= (1 << 12);
+    }
+
+    opData.ptpData &= ~(3|(1<<9));
+
+    if (trigEn == GT_FALSE)
+    {
+        op = PTP_WRITE_DATA;
+
+        if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+        {
+            DBG_INFO(("Failed writing PTPEType.\n"));
+            return GT_FAIL;
+        }
+
+        return GT_OK;
+    }
+
+    opData.ptpData |= 1;
+
+    if (trigData->intEn)
+        opData.ptpData |= (1 << 9);
+
+    if (trigData->mode == GT_TAI_TRIG_ON_GIVEN_TIME)
+        opData.ptpData |= (1 << 1);
+
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpAddr = 2;
+    opData.ptpData = GT_PTP_L16_TIME(trigData->trigGenAmt);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpAddr = 3;
+    opData.ptpData = GT_PTP_H16_TIME(trigData->trigGenAmt);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    if (trigData->mode == GT_TAI_TRIG_ON_GIVEN_TIME)
+    {
+        if ((trigData->pulseWidth >= 0) && (trigData->pulseWidth <= 0xF))
+        {
+            op = PTP_READ_DATA;
+            opData.ptpAddr = 5;        /* PulseWidth */
+
+            if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+            {
+                DBG_INFO(("Failed writing PTPEType.\n"));
+                return GT_FAIL;
+            }
+
+            op = PTP_WRITE_DATA;
+            opData.ptpAddr = 5;        /* PulseWidth */
+            opData.ptpData &= (~0xF000);
+            opData.ptpData |= (GT_U16)(trigData->pulseWidth << 12);
+
+            if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+            {
+                DBG_INFO(("Failed writing PTPEType.\n"));
+                return GT_FAIL;
+            }
+        }
+    }
+    else
+    {
+        op = PTP_WRITE_DATA;
+        opData.ptpAddr = 4;        /* TrigClkComp */
+        opData.ptpData = (GT_U16)trigData->trigClkComp;
+
+        if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+        {
+            DBG_INFO(("Failed writing PTPEType.\n"));
+            return GT_FAIL;
+        }
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN)) /* after 6320 */
+    {
+      op = PTP_WRITE_DATA;
+      opData.ptpAddr = 0x10;
+      opData.ptpData = GT_PTP_L16_TIME(trigData->trigGenTime);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x11;
+      opData.ptpData = GT_PTP_H16_TIME(trigData->trigGenTime);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x13;
+      opData.ptpData = GT_PTP_L16_TIME(trigData->trigGenDelay);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x12;
+      opData.ptpData = 0xF & GT_PTP_L16_TIME(trigData->lockCorrect);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x14;
+      opData.ptpData = GT_PTP_L16_TIME(trigData->trigGen2Time);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x15;
+      opData.ptpData = GT_PTP_H16_TIME(trigData->trigGen2Time);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x17;
+      opData.ptpData = GT_PTP_L16_TIME(trigData->trigGen2Delay);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+      opData.ptpAddr = 0x16;
+      opData.ptpData = 0xF & GT_PTP_L16_TIME(trigData->lockCorrect2);
+
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtaiGetTrigConfig
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger configuration parameters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        trigEn    - enable/disable TAI Trigger.
+*        trigData  - TAI Trigger configuration parameters (valid only if trigEn is GT_TRUE).
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigConfig
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL     *trigEn,
+    OUT GT_TAI_TRIGGER_CONFIG    *trigData
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32                 data[2];
+
+    DBG_INFO(("gtaiGetTrigConfig Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    if (!(opData.ptpData & 1))
+    {
+        *trigEn = GT_FALSE;
+        return GT_OK;
+    }
+
+    if (trigData == NULL)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    *trigEn = GT_TRUE;
+    trigData->mode = (opData.ptpData >> 1) & 1;
+    trigData->intEn = (opData.ptpData >> 9) & 1;
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_EXT_CLK))
+    {
+      trigData->trigPhase = (opData.ptpData >>12) & 1;
+    }
+
+
+    opData.ptpAddr = 2;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+    data[0] = opData.ptpData;
+
+    opData.ptpAddr = 3;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+    data[1] = opData.ptpData;
+
+    trigData->trigGenAmt = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+    opData.ptpAddr = 5;        /* PulseWidth */
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    trigData->pulseWidth = (GT_U32)((opData.ptpData >> 12) & 0xF);
+
+    /* getting TrigClkComp, offset 4 */
+    opData.ptpAddr = 4;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    trigData->trigClkComp = (GT_U32)opData.ptpData;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN)) /* after 6320 */
+    {
+      op = PTP_READ_DATA;
+      opData.ptpAddr = 0x10;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      data[0] = opData.ptpData;
+
+      opData.ptpAddr = 0x11;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      data[1] = opData.ptpData;
+      trigData->trigGenTime = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+
+	  opData.ptpAddr = 0x13;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      trigData->trigGenDelay = opData.ptpData;
+
+      opData.ptpAddr = 0x12;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      trigData->lockCorrect = 0xF & opData.ptpData;
+
+      opData.ptpAddr = 0x14;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      data[0] = opData.ptpData;
+
+      opData.ptpAddr = 0x15;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      data[1] = opData.ptpData;
+      trigData->trigGen2Time = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+
+	  opData.ptpAddr = 0x17;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      trigData->trigGen2Delay = opData.ptpData;
+
+      opData.ptpAddr = 0x16;
+      if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+	  {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+	  }
+      trigData->lockCorrect2 = 0xF & opData.ptpData;
+
+
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtaiSetTrigLock
+*
+* DESCRIPTION:
+*       This routine sets TAI Trigger lock.
+*
+* INPUTS:
+*        trigLock       - trigger lock set.
+*        trigLockRange  - trigger lock range.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigLock
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL    trigLock,
+    IN  GT_U8      trigLockRange
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetTrigLock Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpPort = 0xE;    /* TAI register */
+
+    op = PTP_READ_DATA;
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(0xf<<4);
+
+    opData.ptpData |= (trigLock==GT_TRUE) ?0x80:0;
+    opData.ptpData |= ((trigLockRange&0x7)<<4);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtaiGetTrigLock
+*
+* DESCRIPTION:
+*       This routine gets TAI Trigger lock and trigger lock range.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        trigLock       - trigger lock set.
+*        trigLockRange  - trigger lock range.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigLock
+(
+    IN  GT_QD_DEV     *dev,
+    OUT  GT_BOOL    *trigLock,
+    OUT  GT_U8      *trigLockRange
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetTrigLock Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_TRIG_GEN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+    opData.ptpPort = 0xE;    /* TAI register */
+
+    op = PTP_READ_DATA;
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading PTPEType.\n"));
+        return GT_FAIL;
+    }
+
+    trigLock = (opData.ptpData&0x80)?GT_TRUE:GT_FALSE;
+    trigLockRange = (opData.ptpData&0x70)>>4;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiGetTSClkPer
+*
+* DESCRIPTION:
+*         Time Stamping Clock Period in pico seconds.
+*        This routine specifies the clock period for the time stamping clock supplied
+*        to the PTP hardware logic.
+*        This is the clock that is used by the hardware logic to update the PTP
+*        Global Time Counter.
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        clk        - time stamping clock period
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTSClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *clk
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetTSClkPer Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 1;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *clk = (GT_U32)opData.ptpData;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetTSClkPer
+*
+* DESCRIPTION:
+*         Time Stamping Clock Period in pico seconds.
+*        This routine specifies the clock period for the time stamping clock supplied
+*        to the PTP hardware logic.
+*        This is the clock that is used by the hardware logic to update the PTP
+*        Global Time Counter.
+*
+* INPUTS:
+*        clk        - time stamping clock period
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTSClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        clk
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetTSClkPer Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 1;
+
+    opData.ptpData = (GT_U16)clk;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetMultiPTPSync
+*
+* DESCRIPTION:
+*         This routine sets Multiple PTP device sync mode and sync time (TrigGenAmt).
+*        When enabled, the hardware logic detects a low to high transition on the
+*        EventRequest(GPIO) and transfers the sync time into the PTP Global Time
+*        register. The EventCapTime is also updated at that instant.
+*
+* INPUTS:
+*        multiEn        - enable/disable Multiple PTP device sync mode
+*        syncTime    - sync time (valid only if multiEn is GT_TRUE)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         When enabled, gtaiSetTrigConfig, gtaiSetEventConfig, gtaiSetTimeInc,
+*        and gtaiSetTimeDec APIs are not operational.
+*
+*******************************************************************************/
+GT_STATUS gtaiSetMultiPTPSync
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL     multiEn,
+    IN  GT_32        syncTime
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetMultiPTPSync Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_MULTI_PTP_SYNC))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= ~(1 << 2);
+
+    if (multiEn == GT_FALSE)
+    {
+        op = PTP_WRITE_DATA;
+
+        if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+        {
+            DBG_INFO(("Failed writing TAI register.\n"));
+            return GT_FAIL;
+        }
+
+        return GT_OK;
+    }
+
+    opData.ptpData |= (1 << 2);
+
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpAddr = 2;
+    opData.ptpData = GT_PTP_L16_TIME(syncTime);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpAddr = 3;
+    opData.ptpData = GT_PTP_H16_TIME(syncTime);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtaiGetMultiPTPSync
+*
+* DESCRIPTION:
+*         This routine sets Multiple PTP device sync mode and sync time (TrigGenAmt).
+*        When enabled, the hardware logic detects a low to high transition on the
+*        EventRequest(GPIO) and transfers the sync time into the PTP Global Time
+*        register. The EventCapTime is also updated at that instant.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        multiEn        - enable/disable Multiple PTP device sync mode
+*        syncTime    - sync time (valid only if multiEn is GT_TRUE)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         When enabled, gtaiSetTrigConfig, gtaiSetEventConfig, gtaiSetTimeInc,
+*        and gtaiSetTimeDec APIs are not operational.
+*
+*******************************************************************************/
+GT_STATUS gtaiGetMultiPTPSync
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL     *multiEn,
+    OUT GT_32        *syncTime
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+    GT_U32                 data[2];
+
+    DBG_INFO(("gtaiGetMultiPTPSync Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI_MULTI_PTP_SYNC))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    /* getting PTPEType, offset 0 */
+    opData.ptpAddr = 0;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    if(!(opData.ptpData & (1 << 2)))
+    {
+        *multiEn = GT_FALSE;
+        *syncTime = 0;
+        return GT_OK;
+    }
+
+    opData.ptpAddr = 2;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+    data[0] = opData.ptpData;
+
+    opData.ptpAddr = 3;
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+    data[1] = opData.ptpData;
+
+    *syncTime = GT_PTP_BUILD_TIME(data[1],data[0]);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtaiGetTimeIncDec
+*
+* DESCRIPTION:
+*         This routine retrieves Time increment/decrement setup.
+*        This amount specifies the number of units of PTP Global Time that need to be
+*        incremented or decremented. This is used for adjusting the PTP Global Time
+*        counter value by a certain amount.
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        expired    - GT_TRUE if inc/dec occurred, GT_FALSE otherwise
+*        inc        - GT_TRUE if increment, GT_FALSE if decrement
+*        amount    - increment/decrement amount
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         Time increment or decrement will be excuted once.
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTimeIncDec
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *expired,
+    OUT GT_BOOL        *inc,
+    OUT GT_U32        *amount
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetTimeIncDec Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *inc = (opData.ptpData & 0x800)?GT_FALSE:GT_TRUE;
+    *amount = (GT_U32)(opData.ptpData & 0x7FF);
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    *expired = (opData.ptpData & 0x8)?GT_FALSE:GT_TRUE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetTimeInc
+*
+* DESCRIPTION:
+*         This routine enables time increment by the specifed time increment amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        incremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Increment occurs just once.
+*
+* INPUTS:
+*        amount    - time increment amount (0 ~ 0x7FF)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeInc
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        amount
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetTimeInc Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* set TimeIncAmt */
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= 0xF000;
+    opData.ptpData |= amount;
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    /* set TimeIncEn */
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData |= 0x8;
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading DisPTP.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetTimeDec
+*
+* DESCRIPTION:
+*         This routine enables time decrement by the specifed time decrement amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Decrement occurs just once.
+*
+* INPUTS:
+*        amount    - time decrement amount (0 ~ 0x7FF)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeDec
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        amount
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetTimeInc Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    /* set TimeIncAmt */
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 5;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData &= 0xF000;
+    opData.ptpData |= amount;
+    opData.ptpData |= 0x800;    /* decrement */
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    /* set TimeIncEn */
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 0;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    opData.ptpData |= 0x8;
+
+    op = PTP_WRITE_DATA;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiSetTimeIncDecAmt
+*
+* DESCRIPTION:
+*         This routine sets time decrement or increment amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented or increased. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*
+* INPUTS:
+*        amount    - time decrement amount (0 ~ 0x7FF)
+*        dec       - 0: increase, 1: descrease
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTimeIncDecAmt(IN GT_QD_DEV * dev, IN GT_BOOL dec, IN GT_U32 amount)
+{
+	GT_STATUS           retVal;
+	GT_PTP_OPERATION    op;
+	GT_PTP_OP_DATA      opData;
+
+	DBG_INFO(("gtaiSetTimeIncDecAmt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+	/* check if device supports this feature */
+	if (!IS_IN_DEV_GROUP(dev, DEV_TAI)) {
+		DBG_INFO(("GT_NOT_SUPPORTED\n"));
+		return GT_NOT_SUPPORTED;
+	}
+#endif
+
+	/* set TimeIncAmt */
+	opData.ptpBlock = 0x0;    /* PTP register space */
+
+	opData.ptpPort = 0xE;    /* TAI register */
+	op = PTP_READ_DATA;
+
+	opData.ptpAddr = 5;
+
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading TAI register.\n"));
+		return GT_FAIL;
+	}
+
+	opData.ptpData &= 0xF000;
+	opData.ptpData |= amount;
+	opData.ptpData |= (dec & 0x1) << 11;    /* decrement */
+
+	op = PTP_WRITE_DATA;
+
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed writing TAI register.\n"));
+		return GT_FAIL;
+	}
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiIncDecTimeEnable
+*
+* DESCRIPTION:
+*         This routine enables time decrement or increment by the specifed time decrement amount.
+*        The amount specifies the number of units of PTP Global Time that need to be
+*        decremented. This is used for adjusting the PTP Global Time counter value by
+*        a certain amount.
+*        Decrement occurs just once.
+*
+* INPUTS:
+*         None.
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiIncDecTimeEnable(IN GT_QD_DEV * dev)
+{
+	GT_STATUS           retVal;
+	GT_PTP_OPERATION    op;
+	GT_PTP_OP_DATA      opData;
+
+	DBG_INFO(("gtaiIncDecTimeEnable Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+	/* check if device supports this feature */
+	if (!IS_IN_DEV_GROUP(dev, DEV_TAI)) {
+		DBG_INFO(("GT_NOT_SUPPORTED\n"));
+		return GT_NOT_SUPPORTED;
+	}
+#endif
+
+    /* set TimeIncEn */
+	opData.ptpBlock = 0x0;    /* PTP register space */
+
+	opData.ptpPort = 0xE;    /* TAI register */
+	op = PTP_READ_DATA;
+
+	opData.ptpAddr = 0;
+
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading TAI register.\n"));
+		return GT_FAIL;
+	}
+
+	opData.ptpData |= 0x8;
+
+	op = PTP_WRITE_DATA;
+
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed writing TAI register.\n"));
+		return GT_FAIL;
+	}
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiSetTrigGenAmt
+*
+* DESCRIPTION:
+*         This routine sets the TrigGenAmt
+*
+* INPUTS:
+*        amount    - Trigger Generation Time Amount (U32)
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetTrigGenAmt(IN GT_QD_DEV * dev, IN GT_U32 amount)
+{
+	GT_STATUS           retVal;
+	GT_PTP_OPERATION    op;
+	GT_PTP_OP_DATA      opData;
+
+	DBG_INFO(("gtaiSetTrigGenAmt Called.\n"));
+
+    /* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+	if (!IS_IN_DEV_GROUP(dev, DEV_TAI)) {
+		DBG_INFO(("GT_NOT_SUPPORTED\n"));
+		return GT_NOT_SUPPORTED;
+	}
+#endif
+
+	opData.ptpBlock = 0x0;    /* PTP register space */
+	opData.ptpPort = 0xE;    /* TAI register */
+	op = PTP_WRITE_DATA;
+	opData.ptpAddr = 2;
+	opData.ptpData = GT_PTP_L16_TIME(amount);
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed writing TrigGenAmt.\n"));
+		return GT_FAIL;
+	}
+
+	opData.ptpAddr = 3;
+	opData.ptpData = GT_PTP_H16_TIME(amount);
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed writing TrigGenAmt.\n"));
+		return GT_FAIL;
+	}
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiGetTrigGenAmt
+*
+* DESCRIPTION:
+*         This routine gets the TrigGenAmt
+*
+* OUTPUTS:
+*        amount    - Trigger Generation Time Amount (U32)
+*
+* INPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetTrigGenAmt(IN GT_QD_DEV * dev,	IN GT_U32 * amount)
+{
+	GT_STATUS           retVal;
+	GT_PTP_OPERATION    op;
+	GT_PTP_OP_DATA      opData;
+	GT_U32              data[2];
+
+	DBG_INFO(("gtaiGetTrigGenAmt Called.\n"));
+
+	/* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+	if (!IS_IN_DEV_GROUP(dev, DEV_TAI)) {
+		DBG_INFO(("GT_NOT_SUPPORTED\n"));
+		return GT_NOT_SUPPORTED;
+	}
+#endif
+
+	opData.ptpBlock = 0x0;    /* PTP register space */
+	opData.ptpPort = 0xE;    /* TAI register */
+	op = PTP_READ_DATA;
+	opData.ptpAddr = 2;
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading PTPEType.\n"));
+		return GT_FAIL;
+	}
+	data[0] = opData.ptpData;
+
+	opData.ptpAddr = 3;
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading PTPEType.\n"));
+		return GT_FAIL;
+	}
+	data[1] = opData.ptpData;
+
+	*amount = GT_PTP_BUILD_TIME(data[1], data[0]);
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+/*******************************************************************************
+* gtaiTrigGenRequest
+*
+* DESCRIPTION:
+*         This routine requests TrigGen
+*
+* OUTPUTS:
+*         None.
+*
+* INPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiTrigGenRequest(IN GT_QD_DEV * dev, IN GT_BOOL enable)
+{
+	GT_STATUS           retVal;
+	GT_PTP_OPERATION    op;
+	GT_PTP_OP_DATA      opData;
+
+	DBG_INFO(("gtaiTrigGenRequest Called.\n"));
+
+	/* check if device supports this feature */
+#ifndef CONFIG_AVB_FPGA
+	if (!IS_IN_DEV_GROUP(dev, DEV_TAI)) {
+		DBG_INFO(("GT_NOT_SUPPORTED\n"));
+		return GT_NOT_SUPPORTED;
+	}
+#endif
+
+	opData.ptpBlock = 0x0;    /* PTP register space */
+	opData.ptpPort = 0xE;    /* TAI register */
+	op = PTP_READ_DATA;
+	opData.ptpAddr = 0;
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading PTPEType.\n"));
+		return GT_FAIL;
+	}
+
+	opData.ptpData &= ~0x1;
+	opData.ptpData |= enable & 0x1;
+	op = PTP_WRITE_DATA;
+	retVal = ptpOperationPerform(dev, op, &opData);
+	if (retVal != GT_OK) {
+		DBG_INFO(("Failed reading PTPEType.\n"));
+		return GT_FAIL;
+	}
+
+	DBG_INFO(("OK.\n"));
+	return GT_OK;
+}
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* ptpOperationPerform
+*
+* DESCRIPTION:
+*       This function accesses PTP Command Register and Data Register.
+*
+* INPUTS:
+*       ptpOp      - The stats operation bits to be written into the stats
+*                    operation register.
+*
+* OUTPUTS:
+*       ptpData    - points to the data storage that the operation requires.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS ptpOperationPerform
+(
+    IN    GT_QD_DEV             *dev,
+    IN    GT_PTP_OPERATION        ptpOp,
+    INOUT GT_PTP_OP_DATA        *opData
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U32             i;
+
+#ifdef CONFIG_AVB_FPGA
+    GT_U32             tmpData;
+#endif
+
+
+    gtSemTake(dev,dev->ptpRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the ptp in ready. */
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PTP_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    {
+    GT_U16 data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PTP_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+        }
+    }
+    }
+#endif
+#else /* CONFIG_AVB_FPGA */
+    {
+    GT_U16 data = 1;
+    while(data == 1)
+    {
+        retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,&tmpData);
+        data = (GT_U16)tmpData;
+        data = (data >> 15) & 0x1;
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+        }
+    }
+    }
+#endif
+
+    /* Set the PTP Operation register */
+    switch (ptpOp)
+    {
+        case PTP_WRITE_DATA:
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 2;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_PTP_DATA;
+              regAccess.rw_reg_list[0].data = (GT_U16)opData->ptpData;
+              regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[1].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[1].data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                                (opData->ptpPort << 8)  |
+                                (opData->ptpBlock << 5) |
+                                (opData->ptpAddr & 0x1F));
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+            }
+#else
+    {
+    GT_U16 data;
+            data = (GT_U16)opData->ptpData;
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_DATA,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_COMMAND,data);
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+    }
+#endif
+#else /* CONFIG_AVB_FPGA */
+    {
+    GT_U16 data;
+            data = (GT_U16)opData->ptpData;
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+    }
+#endif
+            break;
+
+        case PTP_READ_DATA:
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 3;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+#ifndef CONFIG_AVB_FPGA
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+#else
+              regAccess.rw_reg_list[0].addr = AVB_SMI_ADDR;
+#endif
+              regAccess.rw_reg_list[0].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[0].data = (GT_U16)((1 << 15) | (PTP_READ_DATA << 12) |
+                                (opData->ptpPort << 8)  |
+                                (opData->ptpBlock << 5) |
+                                (opData->ptpAddr & 0x1F));
+              regAccess.rw_reg_list[1].cmd = HW_REG_WAIT_TILL_0;
+#ifndef CONFIG_AVB_FPGA
+              regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+#else
+              regAccess.rw_reg_list[1].addr = AVB_SMI_ADDR;
+#endif
+              regAccess.rw_reg_list[1].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[1].data = 15;
+              regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+#ifndef CONFIG_AVB_FPGA
+              regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+#else
+              regAccess.rw_reg_list[2].addr = AVB_SMI_ADDR;
+#endif
+              regAccess.rw_reg_list[2].reg = QD_REG_PTP_DATA;
+              regAccess.rw_reg_list[2].data = 0;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+              opData->ptpData = (GT_U32)    regAccess.rw_reg_list[2].data;
+            }
+#else
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_PTP_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_PTP_DATA,&data);
+            opData->ptpData = (GT_U32)data;
+    }
+#endif
+#else /*CONFIG_AVB_FPGA */
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,&tmpData);
+                data = (GT_U32)tmpData;
+                data = (data >> 15) & 0x1;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,&tmpData);
+            data = (GT_U32)tmpData;
+            opData->ptpData = (GT_U32)data;
+    }
+#endif
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+
+        case PTP_READ_MULTIPLE_DATA:
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[0].data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                                (opData->ptpPort << 8)  |
+                                (opData->ptpBlock << 5) |
+                                (opData->ptpAddr & 0x1F));
+              regAccess.rw_reg_list[1].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[1].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[1].data = 15;
+              for(i=0; i<opData->nData; i++)
+              {
+                regAccess.rw_reg_list[2+i].cmd = HW_REG_READ;
+                regAccess.rw_reg_list[2+i].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+                regAccess.rw_reg_list[2+i].reg = QD_REG_PTP_DATA;
+                regAccess.rw_reg_list[2+i].data = 0;
+              }
+              regAccess.entries = 2+i;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+              for(i=0; i<opData->nData; i++)
+              {
+                opData->ptpMultiData[i] = (GT_U32)    regAccess.rw_reg_list[2+i].data;
+              }
+            }
+#else
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_PTP_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            for(i=0; i<opData->nData; i++)
+            {
+                retVal = hwReadGlobal2Reg(dev,QD_REG_PTP_DATA,&data);
+                opData->ptpMultiData[i] = (GT_U32)data;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+    }
+#endif
+#else /* CONFIG_AVB_FPGA */
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,&tmpData);
+                data = (GT_U32)tmpData;
+                data = (data >> 15) & 0x1;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            for(i=0; i<opData->nData; i++)
+            {
+                retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,&tmpData);
+                data = (GT_U32)tmpData;
+                opData->ptpMultiData[i] = (GT_U32)data;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+    }
+#endif
+
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+
+        case PTP_READ_TIMESTAMP_DATA:
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+            {
+              HW_DEV_REG_ACCESS regAccess;
+
+              regAccess.entries = 3;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[0].data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                                (opData->ptpPort << 8)  |
+                                (opData->ptpBlock << 5) |
+                                (opData->ptpAddr & 0x1F));
+              regAccess.rw_reg_list[1].cmd = HW_REG_WAIT_TILL_0;
+              regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[1].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[1].data = 15;
+              regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+              regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[2].reg = QD_REG_PTP_DATA;
+              regAccess.rw_reg_list[2].data = 0;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+              opData->ptpMultiData[0] = (GT_U32)    regAccess.rw_reg_list[2].data;
+
+              if (!(opData->ptpMultiData[0] & 0x1))
+              {
+                /* valid bit is not set */
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+              }
+
+              for(i=0; i<(opData->nData-1); i++)
+              {
+                regAccess.rw_reg_list[i].cmd = HW_REG_READ;
+                regAccess.rw_reg_list[i].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+                regAccess.rw_reg_list[i].reg = QD_REG_PTP_DATA;
+                regAccess.rw_reg_list[i].data = 0;
+              }
+              regAccess.entries = i;
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+              for(i=0; i<(opData->nData-1); i++)
+              {
+                opData->ptpMultiData[i+1] = (GT_U32)    regAccess.rw_reg_list[i].data;
+              }
+
+
+              regAccess.entries = 2;
+
+              regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[0].reg = QD_REG_PTP_DATA;
+              regAccess.rw_reg_list[0].data = (GT_U16)0;
+              regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+              regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+              regAccess.rw_reg_list[1].reg = QD_REG_PTP_COMMAND;
+              regAccess.rw_reg_list[1].data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                                (opData->ptpPort << 8)  |
+                                (opData->ptpBlock << 5) |
+                                (opData->ptpAddr & 0x1F));
+              retVal = hwAccessMultiRegs(dev, &regAccess);
+              if(retVal != GT_OK)
+              {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+              }
+            }
+#else
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = hwGetGlobal2RegField(dev,QD_REG_PTP_COMMAND,15,1,&data);
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = hwReadGlobal2Reg(dev,QD_REG_PTP_DATA,&data);
+            opData->ptpMultiData[0] = (GT_U32)data;
+            if(retVal != GT_OK)
+               {
+                   gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            if (!(data & 0x1))
+            {
+                /* valid bit is not set */
+                   gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            for(i=1; i<opData->nData; i++)
+            {
+                retVal = hwReadGlobal2Reg(dev,QD_REG_PTP_DATA,&data);
+                opData->ptpMultiData[i] = (GT_U32)data;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_DATA,0);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            retVal = hwWriteGlobal2Reg(dev,QD_REG_PTP_COMMAND,data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+    }
+#endif
+#else /* CONFIG_AVB_FPGA */
+    {
+    GT_U16 data;
+            data = (GT_U16)((1 << 15) | (PTP_READ_MULTIPLE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = 1;
+            while(data == 1)
+            {
+                retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,&tmpData);
+                data = (GT_U32)tmpData;
+                data = (data >> 15) & 0x1;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,&tmpData);
+            data = (GT_U32)tmpData;
+            opData->ptpMultiData[0] = (GT_U32)data;
+            if(retVal != GT_OK)
+               {
+                   gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            if (!(data & 0x1))
+            {
+                /* valid bit is not set */
+                   gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            for(i=1; i<opData->nData; i++)
+            {
+                retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,&tmpData);
+                data = (GT_U32)tmpData;
+                opData->ptpMultiData[i] = (GT_U32)data;
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->ptpRegsSem);
+                    return retVal;
+                }
+            }
+
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_DATA,0);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+
+            data = (GT_U16)((1 << 15) | (PTP_WRITE_DATA << 12) |
+                    (opData->ptpPort << 8)    |
+                    (opData->ptpBlock << 5)    |
+                    (opData->ptpAddr & 0x1F));
+            tmpData = (GT_U32)data;
+            retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,tmpData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->ptpRegsSem);
+                return retVal;
+            }
+    }
+#endif
+            gtSemGive(dev,dev->ptpRegsSem);
+            break;
+
+        default:
+
+            gtSemGive(dev,dev->ptpRegsSem);
+            return GT_FAIL;
+    }
+
+    /* Wait until the ptp is ready. */
+#ifndef CONFIG_AVB_FPGA
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PTP_COMMAND;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    {
+    GT_U16 data;
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PTP_COMMAND,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+        }
+    }
+    }
+#endif
+#else /* CONFIG_AVB_FPGA */
+    {
+    GT_U16 data;
+    data = 1;
+    while(data == 1)
+    {
+        retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_COMMAND,&tmpData);
+        data = (GT_U16)tmpData;
+        data = (data >> 15) & 0x1;
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->ptpRegsSem);
+            return retVal;
+        }
+    }
+    }
+#endif
+
+    gtSemGive(dev,dev->ptpRegsSem);
+    return retVal;
+}
+
+
+#ifdef CONFIG_AVB_FPGA
+
+/*******************************************************************************
+* gptpGetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine gets interrupt status of PTP logic.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        ptpInt    - PTP Int Status
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *ptpInt
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetPTPIntStatus Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_INT_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *ptpInt = (GT_U32)data & 0x1;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetFPGAIntStatus
+*
+* DESCRIPTION:
+*       This routine sets interrupt status of PTP logic.
+*
+* INPUTS:
+*    ptpInt    - PTP Int Status
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntStatus
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32    ptpInt
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpSetPTPIntStatus Called.\n"));
+
+    data = ptpInt?1:0;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_INT_OFFSET,ptpInt);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpSetFPGAIntEn
+*
+* DESCRIPTION:
+*       This routine enables PTP interrupt.
+*
+* INPUTS:
+*        ptpInt    - PTP Int Status (1 to enable, 0 to disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetFPGAIntEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        ptpInt
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetPTPIntEn Called.\n"));
+
+    data = (ptpInt == 0)?0:1;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_INTEN_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpGetClockSource
+*
+* DESCRIPTION:
+*       This routine gets PTP Clock source setup.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_SRC     *clkSrc
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetClockSource Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_SRC_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *clkSrc = (GT_PTP_CLOCK_SRC)(data & 0x1);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetClockSource
+*
+* DESCRIPTION:
+*       This routine sets PTP Clock source setup.
+*
+* INPUTS:
+*        clkSrc    - PTP clock source (A/D Device or FPGA)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetClockSource
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_SRC     clkSrc
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpSetClockSource Called.\n"));
+
+    data = (GT_U32)clkSrc;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_SRC_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpGetP9Mode
+*
+* DESCRIPTION:
+*       This routine gets Port 9 Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_P9_MODE     *mode
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetP9Mode Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_P9_MODE_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    if (data & 0x1)
+    {
+        switch (data & 0x6)
+        {
+            case 0:
+                *mode = PTP_P9_MODE_GMII;
+                break;
+            case 2:
+                *mode = PTP_P9_MODE_MII;
+                break;
+            case 4:
+                *mode = PTP_P9_MODE_MII_CONNECTOR;
+                break;
+            default:
+                return GT_BAD_PARAM;
+        }
+    }
+    else
+    {
+        *mode = PTP_P9_MODE_JUMPER;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpSetP9Mode
+*
+* DESCRIPTION:
+*       This routine sets Port 9 Mode.
+*
+* INPUTS:
+*        mode - Port 9 mode (GT_PTP_P9_MODE enum type)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetP9Mode
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_P9_MODE     mode
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpSetP9Mode Called.\n"));
+
+    switch (mode)
+    {
+        case PTP_P9_MODE_GMII:
+            data = 1;
+            break;
+        case PTP_P9_MODE_MII:
+            data = 3;
+            break;
+        case PTP_P9_MODE_MII_CONNECTOR:
+            data = 5;
+            break;
+        case PTP_P9_MODE_JUMPER:
+            data = 0;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_P9_MODE_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpReset
+*
+* DESCRIPTION:
+*       This routine performs software reset for PTP logic.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpReset
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpReset Called.\n"));
+
+    data = 1;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_RESET_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine checks if PTP Duty Cycle Adjustment is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adjEn    - GT_TRUE if enabled, GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *adjEn
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetCycleAdjustEn Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *adjEn = (data & 0x2)?GT_TRUE:GT_FALSE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpSetCycleAdjustEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PTP Duty Cycle Adjustment.
+*
+* INPUTS:
+*        adjEn    - GT_TRUE to enable, GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjustEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        adjEn
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetCycleAdjustEn Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    if (adjEn == GT_FALSE)
+        data &= ~0x3;    /* clear both Enable bit and Valid bit */
+    else
+        data |= 0x2;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine gets clock duty cycle adjustment value.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_PTP_CLOCK_ADJUSTMENT    *adj
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetCycleAdjust Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    adj->adjSign = (data & 0x4)?GT_PTP_SIGN_PLUS:GT_PTP_SIGN_NEGATIVE;
+    adj->cycleStep = (data >> 3) & 0x7;
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CYCLE_INTERVAL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    adj->cycleInterval = data;
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CYCLE_ADJ_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    adj->cycleAdjust = data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gptpSetCycleAdjust
+*
+* DESCRIPTION:
+*       This routine sets clock duty cycle adjustment value.
+*
+* INPUTS:
+*        adj    - adjustment value (GT_PTP_CLOCK_ADJUSTMENT structure)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetCycleAdjust
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PTP_CLOCK_ADJUSTMENT    *adj
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+    GT_U32                data1;
+
+    DBG_INFO(("gptpSetCycleAdjust Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    data &= ~0x1;    /* clear Valid bit */
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Setup the Cycle Interval */
+    data1 = adj->cycleInterval & 0xFFFF;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CYCLE_INTERVAL_OFFSET,data1);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Setup the Cycle Adjustment */
+    data1 = adj->cycleAdjust & 0xFFFF;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CYCLE_ADJ_OFFSET,data1);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* clear Sign bit and Cycle Step bits on QD_REG_PTP_CLK_CTRL_OFFSET value */
+    data &= ~0x3C;
+
+    switch (adj->adjSign)
+    {
+        case GT_PTP_SIGN_PLUS:
+            data |= 0x4;
+            break;
+
+        case GT_PTP_SIGN_NEGATIVE:
+            break;
+
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    data |= ((adj->cycleStep & 0x7) << 3);    /* setup Step bits */
+    data |= 0x1;                            /* set Valid bit */
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_CLK_CTRL_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetPLLEn
+*
+* DESCRIPTION:
+*       This routine checks if PLL is enabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*        en        - GT_TRUE if enabled, GT_FALSE otherwise
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpGetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_BOOL        *en,
+    OUT GT_U32        *freqSel
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpGetPLLEn Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_PLL_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *en = (data & 0x1)?GT_TRUE:GT_FALSE;
+
+    *freqSel = (data >> 1) & 0x7;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpSetPLLEn
+*
+* DESCRIPTION:
+*       This routine enables/disables PLL device.
+*
+* INPUTS:
+*        en        - GT_TRUE to enable, GT_FALSE to disable
+*        freqSel    - PLL Frequency Selection (default 0x3 - 22.368MHz)
+*                  Meaningful only when enabling PLL device
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       PLL Frequence selection is based on the Clock Recovery PLL device.
+*        IDT MK1575-01 is the default PLL device.
+*
+*******************************************************************************/
+GT_STATUS gptpSetPLLEn
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        en,
+    IN  GT_U32        freqSel
+)
+{
+    GT_STATUS           retVal;
+    GT_U32                data;
+
+    DBG_INFO(("gptpSetPPLEn Called.\n"));
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_PLL_CTRL_OFFSET,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    if(en == GT_FALSE)
+    {
+        data |= 0x1;
+    }
+    else
+    {
+        data &= ~0x1;
+        data |= (freqSel & 0x7) << 1;
+    }
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,QD_REG_PTP_PLL_CTRL_OFFSET,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gptpGetDDSReg
+*
+* DESCRIPTION:
+*       This routine gets DDS register data.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*
+* OUTPUTS:
+*    ddsData    - register data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpGetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    OUT GT_U32    *ddsData
+)
+{
+    GT_STATUS           retVal;
+        GT_U32                  data;
+        GT_U32                  timeout = 0x100000;
+
+    DBG_INFO(("gptpGetDDSReg Called.\n"));
+
+    if (ddsReg > 0x3f)
+        return GT_BAD_PARAM;
+    do
+    {
+        retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,12,&data);
+        if (retVal != GT_OK)
+            return retVal;
+        timeout--;
+        if (timeout == 0)
+            return GT_FAIL;
+    } while (data & 0x8000);
+
+    data = 0x8000 | 0x4000 | (ddsReg << 8);
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,12,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,12,&data);
+    if (retVal != GT_OK)
+        return retVal;
+
+    *ddsData = data & 0xFF;
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,12,0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpSetDDSReg
+*
+* DESCRIPTION:
+*       This routine sets DDS register data.
+*    DDS register data written by this API are not affected until gptpUpdateDDSReg API is called.
+*
+* INPUTS:
+*    ddsReg    - DDS Register
+*    ddsData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetDDSReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    ddsReg,
+    IN  GT_U32    ddsData
+)
+{
+    GT_STATUS           retVal;
+        GT_U32                  data;
+        GT_U32                  timeout = 0x100000;
+
+    DBG_INFO(("gptpSetDDSReg Called.\n"));
+
+    if ((ddsReg > 0x3f) || (ddsData > 0xff))
+        return GT_BAD_PARAM;
+
+    do
+    {
+        retVal = AVB_FPGA_READ_REG(dev,AVB_SMI_ADDR,12,&data);
+        if (retVal != GT_OK)
+            return retVal;
+        timeout--;
+        if (timeout == 0)
+            return GT_FAIL;
+    } while (data & 0x8000);
+
+    data = 0x8000 | (ddsReg << 8) | (ddsData);
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,12,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,12,0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpUpdateDDSReg
+*
+* DESCRIPTION:
+*       This routine updates DDS register data.
+*    DDS register data written by gptpSetDDSReg are not affected until this API is called.
+*
+* INPUTS:
+*    none
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpUpdateDDSReg
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+
+    DBG_INFO(("gptpUpdateDDSReg Called.\n"));
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,13,0x0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,13,0x1);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gptpSetADFReg
+*
+* DESCRIPTION:
+*       This routine sets ADF4156 register data.
+*
+* INPUTS:
+*    adfData    - register data
+*
+* OUTPUTS:
+*    none
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gptpSetADFReg
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32    adfData
+)
+{
+    GT_STATUS           retVal;
+
+    DBG_INFO(("gptpSetADFReg Called.\n"));
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,14,(adfData & 0xFFFF));
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = AVB_FPGA_WRITE_REG(dev,AVB_SMI_ADDR,15,((adfData>>16) & 0xFFFF));
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+#endif  /*  CONFIG_AVB_FPGA */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTPHidden.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTPHidden.c
new file mode 100644
index 000000000000..f997d335ff52
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPTPHidden.c
@@ -0,0 +1,269 @@
+/* Hidden APIs */
+
+
+/*******************************************************************************
+* gtaiGetSocClkPer
+*
+* DESCRIPTION:
+*         SoC clock period
+*        This specifies clock period for the clock that gets generated from the
+*        PTP block to the reset of the SoC. The period is specified in TSClkPer
+*        increments
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        clkPer    - clock period
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetSocClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *clkPer
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetTimeIncAmt Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 6;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    *clkPer = (GT_U32)(opData.ptpData & 0x1FF);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetSocClkPer
+*
+* DESCRIPTION:
+*         SoC clock period
+*        This specifies clock period for the clock that gets generated from the
+*        PTP block to the reset of the SoC. The period is specified in TSClkPer
+*        increments
+*
+* INPUTS:
+*        clkPer    - clock period
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetSocClkPer
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        clkPer
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetSocClkPer Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 6;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    op = PTP_WRITE_DATA;
+    opData.ptpData &= ~0x1FF;
+    opData.ptpData |= (clkPer & 0x1FF);
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiGetSocClkComp
+*
+* DESCRIPTION:
+*        Soc clock compensation amount in pico seconds.
+*        This field specifies the remainder amount for when the clock is being
+*        generated with a period specifed by the clkPer. The hardware logic keeps
+*        track of the remainder for every clock tick generation and compensates for it.
+*
+* INPUTS:
+*         None.
+*
+* OUTPUTS:
+*        amount    - clock compensation amount in pico seconds
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiGetSocClkComp
+(
+    IN  GT_QD_DEV     *dev,
+    OUT GT_U32        *amount
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiGetSocClkComp Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_READ_DATA;
+
+    opData.ptpAddr = 7;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed reading TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    *amount = (GT_U32)opData.ptpData;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtaiSetSocClkComp
+*
+* DESCRIPTION:
+*        Soc clock compensation amount in pico seconds.
+*        This field specifies the remainder amount for when the clock is being
+*        generated with a period specifed by the clkPer. The hardware logic keeps
+*        track of the remainder for every clock tick generation and compensates for it.
+*
+* INPUTS:
+*        amount    - clock compensation amount in pico seconds
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK      - on success
+*         GT_FAIL    - on error
+*         GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*         None
+*
+*******************************************************************************/
+GT_STATUS gtaiSetSocClkComp
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        amount
+)
+{
+    GT_STATUS           retVal;
+    GT_PTP_OPERATION    op;
+    GT_PTP_OP_DATA        opData;
+
+    DBG_INFO(("gtaiSetSocClkComp Called.\n"));
+
+#ifndef CONFIG_AVB_FPGA
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+#endif
+
+    opData.ptpBlock = 0x0;    /* PTP register space */
+
+    opData.ptpPort = 0xE;    /* TAI register */
+    op = PTP_WRITE_DATA;
+
+    opData.ptpAddr = 7;
+
+    opData.ptpData = (GT_U16)amount;
+
+    if((retVal = ptpOperationPerform(dev, op, &opData)) != GT_OK)
+    {
+        DBG_INFO(("Failed writing TAI register.\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPage2Access.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPage2Access.c
new file mode 100644
index 000000000000..d402d946d17a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPage2Access.c
@@ -0,0 +1,1175 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPage2Access.c
+*
+* DESCRIPTION:
+*       API definitions for Page 2 access
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/* Set to enable/disable Page 2 rester list */
+/*******************************************************************************
+* gtP2SetAccessRMUPage2
+*
+* DESCRIPTION:
+*        This routine sets to access registers of page 2(RMU).
+*
+* INPUTS:
+*        access  - TRUE or FALSE
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gtP2SetAccessRMUPage2
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL      access
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gtP2SetAccessRMUPage2 Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMU_PAGE2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = (access==GT_TRUE) ? 1 : 0; /* bit location? */
+
+    /* Set Page 2 access.            */
+    retVal = hwWritePortReg(dev,0x17, 0x1a,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gtP2GetAccessRMUPage2
+*
+* DESCRIPTION:
+*        This routine gets to access registers of page 2(RMU).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        access  - TRUE or FALSE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gtP2GetAccessRMUPage2
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_BOOL     *access
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gtP2GetAccessRMUPage2 Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMU_PAGE2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+
+    /* Get Page 2 access state.            */
+    retVal = hwReadPortReg(dev,0x17, 0x1a,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    *access = (data==1) ? GT_TRUE : GT_FALSE; /* bit location? */
+    return retVal;
+}
+
+/* Page 2 stats APIs */
+
+/****************************************************************************/
+/* STATS operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS statsOperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_STATS_OPERATION   statsOp,
+    IN   GT_U8                port,
+    IN   GT_STATS_COUNTERS_PAGE2    counter,
+    OUT  GT_VOID              *statsData
+);
+
+static GT_STATUS statsCapture
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_U8             bank,
+    IN GT_U8      port
+);
+
+static GT_STATUS statsCaptureClear
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_U8             bank,
+    IN GT_U8      port
+);
+
+static GT_STATUS statsReadCounter
+(
+    IN   GT_QD_DEV        *dev,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+);
+
+static GT_STATUS statsReadRealtimeCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U8             port,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+);
+
+
+
+/*******************************************************************************
+* gstatsPg2GetPortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsPg2GetPortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS_PAGE2    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsPg2GetPortCounter Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+	/* Enter Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_TRUE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+	/* Exit Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_FALSE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsPg2GetPortCounterClear
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port and clear.
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsPg2GetPortCounterClear
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS_PAGE2    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsPg2GetPortCounterClear Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+	/* Enter Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_TRUE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_COUNTER_CLEAR,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+	/* Exit Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_FALSE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsPg2GetPortAllCounters
+*
+* DESCRIPTION:
+*       This routine gets all counters of the given port
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       statsCounterSet - points to GT_STATS_COUNTER_SET_PAGE2 for the MIB counters
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsPg2GetPortAllCounters
+(
+    IN  GT_QD_DEV               *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET_PAGE2    *statsCounterSet
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsPg2GetPortAllCounters Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+	/* Enter Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_TRUE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_ALL,hwPort,0,(GT_VOID*)statsCounterSet);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+	/* Exit Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_FALSE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsPg2GetPortAllCountersClear
+*
+* DESCRIPTION:
+*       This routine gets all counters of the given port and clear
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       statsCounterSet - points to GT_STATS_COUNTER_SET_PAGE2 for the MIB counters
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsPg2GetPortAllCountersClear
+(
+    IN  GT_QD_DEV               *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET_PAGE2    *statsCounterSet
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsPg2GetPortAllCountersClear Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+	/* Enter Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_TRUE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_ALL_CLEAR,hwPort,0,(GT_VOID*)statsCounterSet);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+	/* Exit Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_FALSE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsPg2GetRealtimePortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific realtime counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsPg2GetRealtimePortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS_PAGE2   counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsPg2GetRealtimePortCounter Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_REALTIME_SUPPORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+	/* Enter Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_TRUE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_REALTIME_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+	/* Exit Page2 access */
+    retVal = gtP2SetAccessRMUPage2(dev, GT_FALSE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (gtP2SetAccessRMUPage2 returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/****************************************************************************/
+/* Internal use functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* statsOperationPerform
+*
+* DESCRIPTION:
+*       This function is used by all stats control functions, and is responsible
+*       to write the required operation into the stats registers.
+*
+* INPUTS:
+*       statsOp       - The stats operation bits to be written into the stats
+*                     operation register.
+*       port        - port number
+*       counter     - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+static GT_STATUS statsOperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_STATS_OPERATION   statsOp,
+    IN   GT_U8                port,
+    IN   GT_STATS_COUNTERS_PAGE2    counter,
+    OUT  GT_VOID              *statsData
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data; /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U32 statsCounter;
+    GT_U32 lastCounter;
+    GT_U16            portNum;
+	GT_U8 bank;
+
+    gtSemTake(dev,dev->statsRegsSem,OS_WAIT_FOREVER);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMON_PORT_BITS))
+    {
+        portNum = (port + 1) << 5;
+    }
+    else
+    {
+        portNum = (GT_U16)port;
+    }
+
+    /* Wait until the stats in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->statsRegsSem);
+        return retVal;
+      }
+      histoData = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->statsRegsSem);
+            return retVal;
+        }
+    }
+
+#endif
+
+    /* Set the STAT Operation register */
+    switch (statsOp)
+    {
+        case STATS_READ_COUNTER:
+			bank = (counter&GT_PAGE2_BANK1)?1:0;
+            retVal = statsCapture(dev, bank, port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            retVal = statsReadCounter(dev,counter,(GT_U32*)statsData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+            break;
+
+        case STATS_READ_COUNTER_CLEAR:
+			bank = (counter&GT_PAGE2_BANK1)?1:0;
+            retVal = statsCaptureClear(dev, bank, port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            retVal = statsReadCounter(dev,counter,(GT_U32*)statsData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+            break;
+
+        case STATS_READ_REALTIME_COUNTER:
+            retVal = statsReadRealtimeCounter(dev,port,counter,(GT_U32*)statsData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            break;
+
+        case STATS_READ_ALL:
+          for(bank=0; bank<2; bank++)
+		  {
+            retVal = statsCapture(dev, bank, port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            lastCounter = (bank==0)?(GT_U32)STATS_PG2_Late : (GT_U32)STATS_PG2_OutMGMT;
+
+            for(statsCounter=0; statsCounter<=lastCounter; statsCounter++)
+            {
+                retVal = statsReadCounter(dev,statsCounter,((GT_U32*)statsData + statsCounter));
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->statsRegsSem);
+                    return retVal;
+                }
+            }
+		  }
+            break;
+
+        case STATS_READ_ALL_CLEAR:
+          for(bank=0; bank<2; bank++)
+		  {
+            retVal = statsCaptureClear(dev,bank, port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            lastCounter = (bank==0)?(GT_U32)STATS_PG2_Late : (GT_U32)STATS_PG2_OutMGMT;
+
+            for(statsCounter=0; statsCounter<=lastCounter; statsCounter++)
+            {
+                retVal = statsReadCounter(dev,statsCounter,((GT_U32*)statsData + statsCounter));
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->statsRegsSem);
+                    return retVal;
+                }
+            }
+		  }
+            break;
+
+        default:
+
+            gtSemGive(dev,dev->statsRegsSem);
+            return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->statsRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* statsCapture
+*
+* DESCRIPTION:
+*       This function is used to capture all counters of a port.
+*
+* INPUTS:
+*       port        - port number
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsCapture
+(
+    IN GT_QD_DEV            *dev,
+    IN GT_U8             bank,
+    IN GT_U8             port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U16            portNum;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMON_PORT_BITS))
+    {
+        portNum = (port + 1) << 5;
+    }
+    else
+    {
+        portNum = (GT_U16)port;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    data = (1 << 15) | (GT_STATS_CAPTURE_PORT << 12) | portNum | (bank<<9);
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* statsCaptureClear
+*
+* DESCRIPTION:
+*       This function is used to capture all counters of a port and clear.
+*
+* INPUTS:
+*       port        - port number
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsCaptureClear
+(
+    IN GT_QD_DEV            *dev,
+    IN GT_U8             bank,
+    IN GT_U8             port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;/* Data to be set into the      */
+                                    /* register.                    */
+    GT_U16            portNum;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMON_PORT_BITS))
+    {
+        portNum = (port + 1) << 5;
+    }
+    else
+    {
+        portNum = (GT_U16)port;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    data = (1 << 15) | (GT_STATS_CAPTURE_PORT_CLEAR << 12) | portNum | (bank<<9);
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* statsReadCounter
+*
+* DESCRIPTION:
+*       This function is used to read a captured counter.
+*
+* INPUTS:
+*       counter     - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsReadCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+)
+{
+    GT_STATUS   retVal;         /* Functions return value.            */
+    GT_U16      data;/* Data to be set into the  register. */
+	GT_U8       bank;
+#ifndef GT_RMGMT_ACCESS
+    GT_U16    counter3_2;     /* Counter Register Bytes 3 & 2       */
+    GT_U16    counter1_0;     /* Counter Register Bytes 1 & 0       */
+#endif
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+	bank = (counter&GT_PAGE2_BANK1)?1:0;
+    data = (GT_U16)((1 << 15) | (GT_STATS_READ_COUNTER << 12) | counter | (bank<<9));
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_STATS_COUNTER3_2;
+      regAccess.rw_reg_list[1].data = 0;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_STATS_COUNTER1_0;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+      *statsData = (regAccess.rw_reg_list[1].data << 16) | regAccess.rw_reg_list[2].data;
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER3_2,&counter3_2);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER1_0,&counter1_0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *statsData = (counter3_2 << 16) | counter1_0;
+#endif
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* statsReadRealtimeCounter
+*
+* DESCRIPTION:
+*       This function is used to read a realtime counter.
+*
+* INPUTS:
+*       port     - port to be accessed
+*       counter  - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsReadRealtimeCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U8             port,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+)
+{
+    GT_STATUS   retVal;         /* Functions return value.            */
+    GT_U16      data, histoData;/* Data to be set into the  register. */
+    GT_U16    counter3_2;     /* Counter Register Bytes 3 & 2       */
+    GT_U16    counter1_0;     /* Counter Register Bytes 1 & 0       */
+    GT_U8  bank;
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_OPERATION,&histoData);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    histoData &= 0xC00;
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+	bank = (counter&GT_PAGE2_BANK1)?1:0;
+    data = (GT_U16)((1 << 15) | (GT_STATS_READ_COUNTER << 12) | ((port+1) << 5) | counter | (bank<<9));
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER3_2,&counter3_2);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER1_0,&counter1_0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *statsData = (counter3_2 << 16) | counter1_0;
+
+    return GT_OK;
+
+}
+
+/* Page 2 ATU APIs */
+
+
+/* Page 2 SMI APIs */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl.c
new file mode 100644
index 000000000000..9ce3efd62c38
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl.c
@@ -0,0 +1,3752 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPhyCtrl.h
+*
+* DESCRIPTION:
+* API definitions for PHY control facility.
+*
+* DEPENDENCIES:
+* None.
+*
+* FILE REVISION NUMBER:
+* $Revision: 10 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtVct.h>
+#include <gtSem.h>
+#ifdef GT_USE_MAD
+#include <gtMad.h>
+#endif
+
+#ifdef GT_USE_MAD
+#include "gtPhyCtrl_mad.c"
+#endif
+/*
+ * This routine set Auto-Negotiation Ad Register for Fast Ethernet Phy
+*/
+static
+GT_STATUS feSetAutoMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8      hwPort,
+    IN GT_PHY_INFO     *phyInfo,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+    GT_U16             u16Data;
+
+    GT_UNUSED_PARAM(phyInfo);
+
+    DBG_INFO(("feSetAutoMode Called.\n"));
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_AUTONEGO_AD_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Mask out all auto mode related bits. */
+    u16Data &= ~QD_PHY_MODE_AUTO_AUTO;
+
+    switch(mode)
+    {
+        case SPEED_AUTO_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_AUTO_AUTO;
+                break;
+        case SPEED_100_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_100_AUTO;
+                break;
+        case SPEED_10_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_10_AUTO;
+                break;
+        case SPEED_AUTO_DUPLEX_FULL:
+                u16Data |= QD_PHY_MODE_AUTO_FULL;
+                break;
+        case SPEED_AUTO_DUPLEX_HALF:
+                u16Data |= QD_PHY_MODE_AUTO_HALF;
+                break;
+        case SPEED_100_DUPLEX_FULL:
+                u16Data |= QD_PHY_100_FULL;
+                break;
+        case SPEED_100_DUPLEX_HALF:
+                u16Data |= QD_PHY_100_HALF;
+                break;
+        case SPEED_10_DUPLEX_FULL:
+                u16Data |= QD_PHY_10_FULL;
+                break;
+        case SPEED_10_DUPLEX_HALF:
+                u16Data |= QD_PHY_10_HALF;
+                break;
+        default:
+                 DBG_INFO(("Unknown Auto Mode (%d)\n",mode));
+                return GT_BAD_PARAM;
+    }
+
+    /* Write to Phy AutoNegotiation Advertisement Register.  */
+    if(hwWritePhyReg(dev,hwPort,QD_PHY_AUTONEGO_AD_REG,u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d,data:%#x).\n",hwPort,QD_PHY_AUTONEGO_AD_REG,u16Data));
+           return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This routine get Auto-Negotiation Ad Register for Fast Ethernet Phy
+*/
+static
+GT_STATUS feGetAutoMode
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U8            hwPort,
+    IN  GT_PHY_INFO      *phyInfo,
+    OUT GT_PHY_AUTO_MODE *mode
+)
+{
+    GT_U16  u16Data;
+
+    GT_UNUSED_PARAM(phyInfo);
+
+    DBG_INFO(("feGetAutoMode Called.\n"));
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_AUTONEGO_AD_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Pick out all auto mode related bits. */
+    u16Data &= QD_PHY_MODE_AUTO_AUTO;
+
+    switch(u16Data)
+    {
+        case QD_PHY_MODE_10_HALF:
+                *mode = SPEED_10_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_10_FULL:
+                *mode = SPEED_10_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_100_HALF:
+                *mode = SPEED_100_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_100_FULL:
+                *mode = SPEED_100_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_AUTO_HALF:
+                *mode = SPEED_AUTO_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_AUTO_FULL:
+                *mode = SPEED_AUTO_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_10_AUTO:
+                *mode = SPEED_10_DUPLEX_AUTO;
+                break;
+        case QD_PHY_MODE_100_AUTO:
+                *mode = SPEED_100_DUPLEX_AUTO;
+                break;
+        case QD_PHY_MODE_AUTO_AUTO:
+                *mode = SPEED_AUTO_DUPLEX_AUTO;
+                break;
+        default:
+                DBG_INFO(("Unknown Auto Mode (%d)\n", u16Data));
+                *mode = SPEED_AUTO_DUPLEX_AUTO;
+                break;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This routine set Auto-Negotiation Advertisement Register for Copper
+*/
+static
+GT_STATUS gigCopperSetAutoMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8 hwPort,
+    IN GT_PHY_INFO     *phyInfo,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+    GT_U16             u16Data,u16Data1;
+
+    DBG_INFO(("gigCopperSetAutoMode Called.\n"));
+
+    if(hwReadPagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_AD_REG,phyInfo->anyPage,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Mask out all auto mode related bits. */
+    u16Data &= ~QD_PHY_MODE_AUTO_AUTO;
+
+    if(hwReadPagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_1000AD_REG,phyInfo->anyPage,&u16Data1) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Mask out all auto mode related bits. */
+    u16Data1 &= ~(QD_GIGPHY_1000T_FULL|QD_GIGPHY_1000T_HALF);
+
+    switch(mode)
+    {
+        case SPEED_AUTO_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_AUTO_AUTO;
+        case SPEED_1000_DUPLEX_AUTO:
+                u16Data1 |= QD_GIGPHY_1000T_FULL|QD_GIGPHY_1000T_HALF;
+                break;
+        case SPEED_AUTO_DUPLEX_FULL:
+                u16Data  |= QD_PHY_MODE_AUTO_FULL;
+                u16Data1 |= QD_GIGPHY_1000T_FULL;
+                break;
+        case SPEED_1000_DUPLEX_FULL:
+                u16Data1 |= QD_GIGPHY_1000T_FULL;
+                break;
+        case SPEED_1000_DUPLEX_HALF:
+                u16Data1 |= QD_GIGPHY_1000T_HALF;
+                break;
+        case SPEED_AUTO_DUPLEX_HALF:
+                u16Data  |= QD_PHY_MODE_AUTO_HALF;
+                u16Data1 |= QD_GIGPHY_1000T_HALF;
+                break;
+        case SPEED_100_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_100_AUTO;
+                break;
+        case SPEED_10_DUPLEX_AUTO:
+                u16Data |= QD_PHY_MODE_10_AUTO;
+                break;
+        case SPEED_100_DUPLEX_FULL:
+                u16Data |= QD_PHY_100_FULL;
+                break;
+        case SPEED_100_DUPLEX_HALF:
+                u16Data |= QD_PHY_100_HALF;
+                break;
+        case SPEED_10_DUPLEX_FULL:
+                u16Data |= QD_PHY_10_FULL;
+                break;
+        case SPEED_10_DUPLEX_HALF:
+                u16Data |= QD_PHY_10_HALF;
+                break;
+        default:
+                DBG_INFO(("Unknown Auto Mode (%d)\n",mode));
+                return GT_BAD_PARAM;
+    }
+
+    /* Write to Phy AutoNegotiation Advertisement Register.  */
+    if(hwWritePagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_AD_REG,phyInfo->anyPage,u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d,data:%#x).\n",hwPort,QD_PHY_AUTONEGO_AD_REG,u16Data));
+           return GT_FAIL;
+    }
+
+    /* Write to Phy AutoNegotiation 1000B Advertisement Register.  */
+    if(hwWritePagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_1000AD_REG,phyInfo->anyPage,u16Data1) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This routine get Auto-Negotiation Ad Register for Copper
+*/
+static
+GT_STATUS gigCopperGetAutoMode
+(
+    IN GT_QD_DEV        *dev,
+    IN GT_U8            hwPort,
+    IN GT_PHY_INFO      *phyInfo,
+    IN GT_PHY_AUTO_MODE *mode
+)
+{
+    GT_U16 u16Data, u16Data1;
+    GT_U32 u32Data;
+
+    DBG_INFO(("gigCopperGetAutoMode Called.\n"));
+
+    if(hwReadPagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_AD_REG,phyInfo->anyPage,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Pick out all auto mode related bits. */
+    u16Data &= QD_PHY_MODE_AUTO_AUTO;
+
+    if(hwReadPagedPhyReg(dev,hwPort,0,QD_PHY_AUTONEGO_1000AD_REG,phyInfo->anyPage,&u16Data1) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Pick out all auto mode related bits. */
+    u16Data1 &= (QD_GIGPHY_1000T_FULL|QD_GIGPHY_1000T_HALF);
+
+    u32Data  = (u16Data&0xffff)|((u16Data1&0xffff)<<16);
+
+    switch(u32Data)
+    {
+        case QD_PHY_MODE_10_HALF:
+                *mode = SPEED_10_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_10_FULL:
+                *mode = SPEED_10_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_100_HALF:
+                *mode = SPEED_100_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_100_FULL:
+                *mode = SPEED_100_DUPLEX_FULL;
+                break;
+        case (QD_GIGPHY_1000T_HALF<<16):
+                *mode = SPEED_1000_DUPLEX_HALF;
+                break;
+        case (QD_GIGPHY_1000T_FULL<<16):
+                *mode = SPEED_1000_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_AUTO_HALF|(QD_GIGPHY_1000T_HALF<<16):
+                *mode = SPEED_AUTO_DUPLEX_HALF;
+                break;
+        case QD_PHY_MODE_AUTO_FULL|(QD_GIGPHY_1000T_FULL<<16):
+                *mode = SPEED_AUTO_DUPLEX_FULL;
+                break;
+        case QD_PHY_MODE_10_AUTO:
+                *mode = SPEED_10_DUPLEX_AUTO;
+                break;
+        case QD_PHY_MODE_100_AUTO:
+                *mode = SPEED_100_DUPLEX_AUTO;
+                break;
+        case ((QD_GIGPHY_1000T_FULL|QD_GIGPHY_1000T_HALF)<<16):
+                *mode = SPEED_1000_DUPLEX_AUTO;
+                break;
+        case QD_PHY_MODE_AUTO_AUTO|((QD_GIGPHY_1000T_FULL|QD_GIGPHY_1000T_HALF)<<16):
+                *mode = SPEED_AUTO_DUPLEX_AUTO;
+                break;
+        default:
+                *mode = SPEED_AUTO_DUPLEX_AUTO;
+                DBG_INFO(("Unknown Auto Mode (%08x)\n", u32Data));
+                break;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This routine set Auto-Negotiation Ad Register for Fiber
+*/
+static
+GT_STATUS gigFiberSetAutoMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8 hwPort,
+    IN GT_PHY_INFO     *phyInfo,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+    GT_U16             u16Data;
+
+    DBG_INFO(("gigPhySetAutoMode Called.\n"));
+
+    if(hwReadPagedPhyReg(dev,hwPort,1,QD_PHY_AUTONEGO_AD_REG,phyInfo->anyPage,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+           return GT_FAIL;
+    }
+
+    /* Mask out all auto mode related bits. */
+    u16Data &= ~(QD_GIGPHY_1000X_FULL|QD_GIGPHY_1000X_HALF);
+
+    switch(mode)
+    {
+        case SPEED_AUTO_DUPLEX_AUTO:
+        case SPEED_1000_DUPLEX_AUTO:
+                u16Data |= QD_GIGPHY_1000X_FULL|QD_GIGPHY_1000X_HALF;
+                break;
+        case SPEED_AUTO_DUPLEX_FULL:
+        case SPEED_1000_DUPLEX_FULL:
+                u16Data |= QD_GIGPHY_1000X_FULL;
+                break;
+        case SPEED_AUTO_DUPLEX_HALF:
+        case SPEED_1000_DUPLEX_HALF:
+                u16Data |= QD_GIGPHY_1000X_HALF;
+                break;
+        default:
+                    DBG_INFO(("Unknown Auto Mode (%d)\n",mode));
+                return GT_BAD_PARAM;
+    }
+
+    /* Write to Phy AutoNegotiation Advertisement Register.  */
+    if(hwWritePagedPhyReg(dev,hwPort,1,QD_PHY_AUTONEGO_AD_REG,phyInfo->anyPage,u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d,data:%#x).\n",hwPort,QD_PHY_AUTONEGO_AD_REG,u16Data));
+           return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * This routine sets Auto Mode and Reset the phy
+*/
+static
+GT_STATUS phySetAutoMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8 hwPort,
+    IN GT_PHY_INFO *phyInfo,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+    GT_U16         u16Data;
+    GT_STATUS    status;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+    DBG_INFO(("phySetAutoMode Called.\n"));
+
+    if (!(phyInfo->flag & GT_PHY_GIGABIT))
+    {
+        if((status=feSetAutoMode(dev,hwPort,phyInfo,mode)) != GT_OK)
+        {
+               return status;
+        }
+
+        u16Data = QD_PHY_SPEED | QD_PHY_DUPLEX | QD_PHY_AUTONEGO;
+
+        DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+                  hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+        /* soft reset */
+        return hwPhyReset(dev,hwPort,u16Data);
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo->pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+    if(phyInfo->flag & GT_PHY_COPPER)
+    {
+        if((status=gigCopperSetAutoMode(dev,hwPort,phyInfo,mode)) != GT_OK)
+        {
+               return status;
+        }
+
+        u16Data = QD_PHY_AUTONEGO;
+
+        DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+                  hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+        /* Write to Phy Control Register.  */
+        if(hwWritePagedPhyReg(dev,hwPort,0,QD_PHY_CONTROL_REG,phyInfo->anyPage,u16Data) != GT_OK)
+            return GT_FAIL;
+    }
+    else if(phyInfo->flag & GT_PHY_FIBER)
+    {
+        if((status=gigFiberSetAutoMode(dev,hwPort,phyInfo,mode)) != GT_OK)
+        {
+               return status;
+        }
+        u16Data = QD_PHY_AUTONEGO;
+
+        DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+                  hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+        /* Write to Phy Control Register.  */
+        if(hwWritePagedPhyReg(dev,hwPort,1,QD_PHY_CONTROL_REG,phyInfo->anyPage,u16Data) != GT_OK)
+            return GT_FAIL;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo->pageType,autoOn,pageReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+    return hwPhyReset(dev,hwPort,0xFF);
+}
+
+/*
+ * This routine gets Auto Mode
+*/
+static
+GT_STATUS phyGetAutoMode
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U8            hwPort,
+    IN  GT_PHY_INFO      *phyInfo,
+    OUT GT_PHY_AUTO_MODE *mode
+)
+{
+    GT_STATUS    status;
+    GT_BOOL      autoOn;
+    GT_U16       pageReg;
+
+    DBG_INFO(("phyGetAutoMode Called.\n"));
+
+    if (!(phyInfo->flag & GT_PHY_GIGABIT))
+    {
+        if((status=feGetAutoMode(dev,hwPort,phyInfo,mode)) != GT_OK)
+        {
+               return status;
+        }
+        return status;
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo->pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+    if(phyInfo->flag & GT_PHY_COPPER)
+    {
+        if((status=gigCopperGetAutoMode(dev,hwPort,phyInfo,mode)) != GT_OK)
+        {
+               return status;
+        }
+    }
+    else if(phyInfo->flag & GT_PHY_FIBER)
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo->pageType,autoOn,pageReg) != GT_OK)
+    {
+        return GT_FAIL;
+    }
+
+    return status;
+}
+
+/*******************************************************************************
+* gprtPhyReset
+*
+* DESCRIPTION:
+*       This routine preforms PHY reset.
+*        After reset, phy will be in Autonegotiation mode.
+*
+* INPUTS:
+* port - The logical port number, unless SERDES device is accessed
+*        The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+* data sheet register 0.15 - Reset
+* data sheet register 0.13 - Speed
+* data sheet register 0.12 - Autonegotiation
+* data sheet register 0.8  - Duplex Mode
+*******************************************************************************/
+
+GT_STATUS gprtPhyReset
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtPhyReset_mad(dev, port);
+#endif
+
+    DBG_INFO(("gprtPhyReset Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* set Auto Negotiation AD Register */
+    retVal = phySetAutoMode(dev,hwPort,&phyInfo,SPEED_AUTO_DUPLEX_AUTO);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPortLoopback
+*
+* DESCRIPTION:
+* Enable/Disable Internal Port Loopback.
+* For 10/100 Fast Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled, this routine disables Auto-Negotiation and
+*   forces speed to be 10Mbps.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   Disabling Loopback simply clears bit 14 of control register(0.14). Therefore,
+*   it is recommended to call gprtSetPortAutoMode for PHY configuration after
+*   Loopback test.
+* For 10/100/1000 Gigagbit Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled and Link is active, the current speed is used.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   All other cases, default MAC Interface speed is used. Please refer to the data
+*   sheet for the information of the default MAC Interface speed.
+*
+*
+* INPUTS:
+* port - The logical port number, unless SERDES device is accessed
+*        The physical address, if SERDES device is accessed
+* enable - If GT_TRUE, enable loopback mode
+* If GT_FALSE, disable loopback mode
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* data sheet register 0.14 - Loop_back
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   enable
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_PHY_INFO        phyInfo;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPortLoopback_mad(dev, port, enable);
+#endif
+
+    DBG_INFO(("gprtSetPortLoopback Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_CONTROL_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* is this Fast Ethernet Phy? */
+    if (!(phyInfo.flag & GT_PHY_GIGABIT))
+    {
+        if(enable)
+        {
+            if(u16Data & QD_PHY_AUTONEGO)
+            {
+                /* disable Auto-Neg and force speed to be 10Mbps */
+                u16Data = u16Data & QD_PHY_DUPLEX;
+
+                if((retVal=hwPhyReset(dev,hwPort,u16Data)) != GT_OK)
+                {
+                    DBG_INFO(("Softreset failed.\n"));
+                    gtSemGive(dev,dev->phyRegsSem);
+                    return retVal;
+                }
+            }
+        }
+    }
+
+    BOOL_2_BIT(enable,u16Data);
+
+    /* Write to Phy Control Register.  */
+    retVal = hwSetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,14,1,u16Data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPortLoopback
+*
+* DESCRIPTION:
+* Get Internal Port Loopback state.
+* For 10/100 Fast Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled, this routine disables Auto-Negotiation and
+*   forces speed to be 10Mbps.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   Disabling Loopback simply clears bit 14 of control register(0.14). Therefore,
+*   it is recommended to call gprtSetPortAutoMode for PHY configuration after
+*   Loopback test.
+* For 10/100/1000 Gigagbit Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled and Link is active, the current speed is used.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   All other cases, default MAC Interface speed is used. Please refer to the data
+*   sheet for the information of the default MAC Interface speed.
+*
+*
+* INPUTS:
+*       port - The logical port number, unless SERDES device is accessed
+*              The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       enable - If GT_TRUE,  loopback mode is enabled
+*       If GT_FALSE,  loopback mode is disabled
+*
+* RETURNS:
+*       GT_OK - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       data sheet register 0.14 - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    OUT GT_BOOL  *enable
+)
+{
+    GT_STATUS      retVal;         /* Functions return value.      */
+    GT_U8          hwPort;         /* the physical port number     */
+    GT_U16         u16Data;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gprtGetPortLoopback Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /*get loopback state*/
+    retVal = hwGetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,14,1,&u16Data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+        BIT_2_BOOL(u16Data, *enable);
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetPortLineLoopback
+*
+* DESCRIPTION:
+*        Enable/Disable Port Line Loopback.
+*
+* INPUTS:
+*        port   - The logical port number, unless SERDES device is accessed
+*                 The physical address, if SERDES device is accessed
+*        enable - If GT_TRUE, enable loopback mode
+*                 If GT_FALSE, disable loopback mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        data sheet register FE:28.4, GE:21_2.14  - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortLineLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   enable
+)
+{
+    GT_STATUS      retVal;         /* Functions return value.      */
+    GT_U8          hwPort;         /* the physical port number     */
+    GT_U16         u16Data;
+    GT_PHY_INFO    phyInfo;
+    GT_U16         pageReg;
+
+    DBG_INFO(("gprtSetPortLineLoopback Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    BOOL_2_BIT(enable,u16Data);
+
+    /* GE Phy */
+    if ((phyInfo.flag & GT_PHY_GIGABIT))
+    {
+        if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,0,&pageReg) != GT_OK)
+        {
+            return GT_FAIL;
+        }
+        /* Write to GE PHY MAC specific control register.  */
+        retVal = hwSetPagedPhyRegField(dev,hwPort, 2, QD_PHY_GE_LINE_LOOPBACK_REG,14,1,phyInfo.anyPage, u16Data);
+
+        if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,0,pageReg) != GT_OK)
+        {
+            return GT_FAIL;
+        }
+    }
+    else /* FE Phy */
+    {
+        /* Write to FE PHY specific control register.  */
+        retVal = hwSetPhyRegField(dev,hwPort,QD_PHY_FE_LINE_LOOPBACK_REG,4,1,u16Data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPortLineLoopback
+*
+* DESCRIPTION:
+*       Get Port Line Loopback status.
+*
+*
+* INPUTS:
+*       port - The logical port number, unless SERDES device is accessed
+*              The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       enable - If GT_TRUE, enable loopback mode
+*                If GT_FALSE, disable loopback mode* enable - If GT_TRUE, enable loopback mode
+*                If GT_FALSE, disable loopback mode
+*
+* RETURNS:
+*      GT_OK - on success
+*      GT_FAIL - on error
+*
+* COMMENTS:
+*      data sheet register FE:28.4, GE:21_2.14  - Loop_back
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortLineLoopback
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    OUT GT_BOOL  *enable
+)
+{
+    GT_STATUS      retVal;         /* Functions return value.      */
+    GT_U8          hwPort;         /* the physical port number     */
+    GT_U16         u16Data;
+    GT_PHY_INFO    phyInfo;
+    GT_U16         pageReg;
+
+    DBG_INFO(("gprtGetPortLineLoopback Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+     /* GE Phy */
+    if ((phyInfo.flag & GT_PHY_GIGABIT))
+    {
+        if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,0,&pageReg) != GT_OK)
+        {
+            return GT_FAIL;
+        }
+        /* Read to GE PHY MAC specific control register.  */
+        retVal = hwGetPagedPhyRegField(dev,hwPort, 2, QD_PHY_GE_LINE_LOOPBACK_REG,14,1,phyInfo.anyPage,&u16Data);
+
+        if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,0,pageReg) != GT_OK)
+        {
+            return GT_FAIL;
+        }
+    }
+    else /* FE Phy */
+    {
+        /* Read to FE PHY specific control register.  */
+        retVal = hwGetPhyRegField(dev,hwPort,QD_PHY_FE_LINE_LOOPBACK_REG,4,1,&u16Data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(u16Data, *enable);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetPortSpeed
+*
+* DESCRIPTION:
+*         Sets speed for a specific logical port. This function will keep the duplex
+*        mode and loopback mode to the previous value, but disable others, such as
+*        Autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*        speed - port speed.
+*                PHY_SPEED_10_MBPS for 10Mbps
+*                PHY_SPEED_100_MBPS for 100Mbps
+*                PHY_SPEED_1000_MBPS for 1000Mbps
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* data sheet register 0.13 - Speed Selection (LSB)
+* data sheet register 0.6  - Speed Selection (MSB)
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortSpeed
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_SPEED speed
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_PHY_INFO        phyInfo;
+    GT_STATUS        retVal;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPortSpeed_mad(dev, port, speed);
+#endif
+
+    DBG_INFO(("gprtSetPortSpeed Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_CONTROL_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    switch(speed)
+    {
+        case PHY_SPEED_10_MBPS:
+            if ((phyInfo.flag & GT_PHY_GIGABIT) && !(phyInfo.flag & GT_PHY_COPPER))
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+            }
+            u16Data = u16Data & (QD_PHY_LOOPBACK | QD_PHY_DUPLEX);
+            break;
+        case PHY_SPEED_100_MBPS:
+            u16Data = (u16Data & (QD_PHY_LOOPBACK | QD_PHY_DUPLEX)) | QD_PHY_SPEED;
+            break;
+        case PHY_SPEED_1000_MBPS:
+            if (!(phyInfo.flag & GT_PHY_GIGABIT))
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+            }
+            u16Data = (u16Data & (QD_PHY_LOOPBACK | QD_PHY_DUPLEX)) | QD_PHY_SPEED_MSB;
+            break;
+        default:
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+    }
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    retVal = hwPhyReset(dev,hwPort,u16Data);
+      gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtPortAutoNegEnable
+*
+* DESCRIPTION:
+*         Enable/disable an Auto-Negotiation.
+*        This routine simply sets Auto Negotiation bit (bit 12) of Control
+*        Register and reset the phy.
+*        For Speed and Duplex selection, please use gprtSetPortAutoMode.
+*
+* INPUTS:
+*        port -  The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         state - GT_TRUE for enable Auto-Negotiation,
+*                GT_FALSE otherwise
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.12 - Auto-Negotiation Enable
+*         data sheet register 4.8, 4.7, 4.6, 4.5 - Auto-Negotiation Advertisement
+*
+*******************************************************************************/
+GT_STATUS gprtPortAutoNegEnable
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtPortAutoNegEnable_mad(dev, port, state);
+#endif
+
+    DBG_INFO(("gprtPortAutoNegEnable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_CONTROL_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(state)
+    {
+        u16Data = (u16Data & (QD_PHY_SPEED | QD_PHY_DUPLEX)) | QD_PHY_AUTONEGO;
+    }
+    else
+    {
+        u16Data = u16Data & (QD_PHY_SPEED | QD_PHY_DUPLEX);
+    }
+
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    retVal = hwPhyReset(dev,hwPort,u16Data);
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPortAutoNegState
+*
+* DESCRIPTION:
+*         Read the auto negotiation state of specific logical port.
+*         This routine simply reads Auto Negotiation bit (bit 12) of Control
+*         Register.
+*
+* INPUTS:
+*         port    - The logical port number, unless SERDES device is accessed
+*                   The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         state   - GT_TRUE for enable Auto-Negotiation,
+*                   GT_FALSE otherwise
+*
+* RETURNS:
+*         GT_OK   - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*         data sheet register 0.12 - Auto-Negotiation Enable
+*         data sheet register 4.8, 4.7, 4.6, 4.5 - Auto-Negotiation Advertisement
+*******************************************************************************/
+GT_STATUS gprtGetPortAutoNegState
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U8        hwPort;  /* the physical port number     */
+    GT_U16       u16Data;
+    GT_STATUS    retVal;
+
+    DBG_INFO(("gprtGetPortAutoNegState Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* read the auto negotiation state from bit 12 of PHY control register */
+    if((retVal=hwGetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,12,1,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+
+    BIT_2_BOOL(u16Data, *state);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtPortPowerDown
+*
+* DESCRIPTION:
+*         Enable/disable (power down) on specific logical port.
+*        Phy configuration remains unchanged after Power down.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         state -    GT_TRUE: power down
+*                 GT_FALSE: normal operation
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.11 - Power Down
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortPowerDown
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   state
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtPortPowerDown_mad(dev, port, state);
+#endif
+
+    DBG_INFO(("gprtPortPowerDown Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(state,u16Data);
+
+    if((retVal=hwSetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,11,1,u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtGetPortPowerDown
+*
+* DESCRIPTION:
+*         Read Port state (power down/normal operation) on specific logical port.
+*
+* INPUTS:
+*         port    - The logical port number, unless SERDES device is accessed
+*                   The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         state   - GT_TRUE: power down
+*                   GT_FALSE: normal operation
+*
+* RETURNS:
+*         GT_OK   - on success
+*         GT_FAIL - on error
+*
+* COMMENTS:
+*         data sheet register 0.11 - Power Down
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortPowerDown
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8        hwPort;         /* the physical port number     */
+    GT_U16       u16Data;
+
+#ifdef GT_USE_MAD
+
+#endif
+
+    DBG_INFO(("gprtGetPortPowerDown Called.\n"));
+
+    if(state == NULL)
+    {
+        DBG_INFO(("input pointer is NULL, return\n"));
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal=hwGetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,11,1,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+
+    BIT_2_BOOL(u16Data, *state);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtPortRestartAutoNeg
+*
+* DESCRIPTION:
+*         Restart AutoNegotiation. If AutoNegotiation is not enabled, it'll enable
+*        it. Loopback and Power Down will be disabled by this routine.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.9 - Restart Auto-Negotiation
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortRestartAutoNeg
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtPortRestartAutoNeg_mad(dev, port);
+#endif
+
+    DBG_INFO(("gprtPortRestartAutoNeg Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_CONTROL_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    u16Data &= (QD_PHY_DUPLEX | QD_PHY_SPEED);
+    u16Data |= (QD_PHY_RESTART_AUTONEGO | QD_PHY_AUTONEGO);
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    /* Write to Phy Control Register.  */
+    retVal = hwWritePhyReg(dev,hwPort,QD_PHY_CONTROL_REG,u16Data);
+    gtSemGive(dev,dev->phyRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetPortDuplexMode
+*
+* DESCRIPTION:
+*         Sets duplex mode for a specific logical port. This function will keep
+*        the speed and loopback mode to the previous value, but disable others,
+*        such as Autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         dMode    - dulpex mode
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.8 - Duplex Mode
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortDuplexMode
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   dMode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPortDuplexMode_mad(dev, port, dMode);
+#endif
+
+    DBG_INFO(("gprtSetPortDuplexMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(hwReadPhyReg(dev,hwPort,QD_PHY_CONTROL_REG,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(dMode)
+    {
+        u16Data = (u16Data & (QD_PHY_LOOPBACK | QD_PHY_SPEED | QD_PHY_SPEED_MSB)) | QD_PHY_DUPLEX;
+    }
+    else
+    {
+        u16Data = u16Data & (QD_PHY_LOOPBACK | QD_PHY_SPEED | QD_PHY_SPEED_MSB);
+    }
+
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    /* Write to Phy Control Register.  */
+    retVal = hwPhyReset(dev,hwPort,u16Data);
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPortAutoMode
+*
+* DESCRIPTION:
+*         This routine sets up the port with given Auto Mode.
+*        Supported mode is as follows:
+*        - Auto for both speed and duplex.
+*        - Auto for speed only and Full duplex.
+*        - Auto for speed only and Half duplex.
+*        - Auto for duplex only and speed 1000Mbps.
+*        - Auto for duplex only and speed 100Mbps.
+*        - Auto for duplex only and speed 10Mbps.
+*        - Speed 1000Mbps and Full duplex.
+*        - Speed 1000Mbps and Half duplex.
+*        - Speed 100Mbps and Full duplex.
+*        - Speed 100Mbps and Half duplex.
+*        - Speed 10Mbps and Full duplex.
+*        - Speed 10Mbps and Half duplex.
+*
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         mode - Auto Mode to be written
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - on device without copper
+*
+* COMMENTS:
+*         data sheet register 4.8, 4.7, 4.6, and 4.5 Autonegotiation Advertisement
+*         data sheet register 4.6, 4.5 Autonegotiation Advertisement for 1000BX
+*         data sheet register 9.9, 9.8 Autonegotiation Advertisement for 1000BT
+*******************************************************************************/
+
+GT_STATUS gprtSetPortAutoMode
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPortAutoMode_mad(dev, port, mode);
+#endif
+
+    DBG_INFO(("gprtSetPortAutoMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    retVal = GT_NOT_SUPPORTED;
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    retVal = phySetAutoMode(dev,hwPort,&phyInfo,mode);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+
+}
+
+/*******************************************************************************
+* gprtGetPortAutoMode
+*
+* DESCRIPTION:
+*        This routine get Auto Mode of specific port.
+*        Supported mode is as follows:
+*        - Auto for both speed and duplex.
+*        - Auto for speed only and Full duplex.
+*        - Auto for speed only and Half duplex.
+*        - Auto for duplex only and speed 1000Mbps.
+*        - Auto for duplex only and speed 100Mbps.
+*        - Auto for duplex only and speed 10Mbps.
+*        - Speed 1000Mbps and Full duplex.
+*        - Speed 1000Mbps and Half duplex.
+*        - Speed 100Mbps and Full duplex.
+*        - Speed 100Mbps and Half duplex.
+*        - Speed 10Mbps and Full duplex.
+*        - Speed 10Mbps and Half duplex.
+*
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                  The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*        mode -    Auto Mode to be written
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - on device without copper
+*
+* COMMENTS:
+*         data sheet register 4.8, 4.7, 4.6, and 4.5 Autonegotiation Advertisement
+*         data sheet register 4.6, 4.5 Autonegotiation Advertisement for 1000BX
+*         data sheet register 9.9, 9.8 Autonegotiation Advertisement for 1000BT
+*******************************************************************************/
+
+GT_STATUS gprtGetPortAutoMode
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT GT_PHY_AUTO_MODE *mode
+)
+{
+    GT_STATUS       retVal; /* Functions return value.  */
+    GT_U8           hwPort; /* the physical port number */
+    GT_PHY_INFO     phyInfo;
+
+    DBG_INFO(("gprtGetPortAutoMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    retVal = GT_NOT_SUPPORTED;
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        DBG_INFO(("PHY device is not configurable.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    retVal = phyGetAutoMode(dev,hwPort,&phyInfo,mode);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+
+}
+
+/*******************************************************************************
+* gprtSetPause
+*
+* DESCRIPTION:
+*       This routine will set the pause bit in Autonegotiation Advertisement
+*        Register. And restart the autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*        state - GT_PHY_PAUSE_MODE enum value.
+*                GT_PHY_NO_PAUSE        - disable pause
+*                 GT_PHY_PAUSE        - support pause
+*                GT_PHY_ASYMMETRIC_PAUSE    - support asymmetric pause
+*                GT_PHY_BOTH_PAUSE    - support both pause and asymmetric pause
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+* data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+
+GT_STATUS gprtSetPause
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_PAUSE_MODE state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data,regStart;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO        phyInfo;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPause_mad(dev, port, state);
+#endif
+
+    DBG_INFO(("phySetPause Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    regStart = 10;
+
+    if(state & GT_PHY_ASYMMETRIC_PAUSE)
+    {
+        if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+        {
+            DBG_INFO(("Unknown PHY device.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        if (!(phyInfo.flag & GT_PHY_GIGABIT))
+        {
+            DBG_INFO(("Not Supported\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_BAD_PARAM;
+        }
+
+        if(!(phyInfo.flag & GT_PHY_COPPER))
+        {
+            regStart = 7;
+        }
+
+    }
+
+    u16Data = state;
+
+    /* Write to Phy AutoNegotiation Advertisement Register.  */
+    if((retVal=hwSetPhyRegField(dev,hwPort,QD_PHY_AUTONEGO_AD_REG,(GT_U8)regStart,2,u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* Restart Auto Negotiation */
+    if((retVal=hwSetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,9,1,1)) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d,data:%#x).\n",hwPort,QD_PHY_AUTONEGO_AD_REG,u16Data));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPause
+*
+* DESCRIPTION:
+*       This routine will get the pause bit in Autonegotiation Advertisement
+*       Register.
+*
+* INPUTS:
+*        port -  The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+*
+* OUTPUTS:
+*        state - GT_PHY_PAUSE_MODE enum value.
+*                GT_PHY_NO_PAUSE         - disable pause
+*                GT_PHY_PAUSE            - support pause
+*                GT_PHY_ASYMMETRIC_PAUSE - support asymmetric pause
+*                GT_PHY_BOTH_PAUSE       - support both pause and asymmetric pause
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+*       data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+GT_STATUS gprtGetPause
+(
+    IN GT_QD_DEV           *dev,
+    IN GT_LPORT            port,
+    OUT  GT_PHY_PAUSE_MODE *state
+)
+{
+    GT_U8          hwPort;  /* the physical port number */
+    GT_U16         u16Data;
+    GT_U16         regStart;
+    GT_STATUS      retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gprtGetPause Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    regStart = 10;
+
+    /* Read Phy AutoNegotiation Advertisement Register.  */
+    if((retVal=hwGetPhyRegField(dev,hwPort,QD_PHY_AUTONEGO_AD_REG,(GT_U8)regStart,2,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_AUTONEGO_AD_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+
+    *state = u16Data;
+
+    return retVal;
+}
+
+static
+GT_STATUS dteWorkAround_Phy100M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort
+)
+{
+    GT_STATUS status = GT_OK;
+    GT_U32 threshold[] = {0x000B,0x0000,0x8780,0x0000,0x8F80,0x0000,
+                          0x9780,0x0000,0x9F80,0x0000,0xA780,0x0000,
+                          0xAF80,0x0000,0xB780,0x0000,0xBF80,0x0000,
+                          0xC780,0x0000,0xCF80,0x0000,0xD780,0x0000,
+                          0xDF80,0x0000,0xE780,0x0000,0xEF80,0x0000,
+                          0xF780,0x0000,0xFF80,0x0000};
+    int i, thresholdSize;
+
+    /* force r125 clock */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0003)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x807f)) != GT_OK)
+    {
+        return status;
+    }
+
+    /* write thresholds */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x000B)) != GT_OK)
+    {
+        return status;
+    }
+
+    thresholdSize = sizeof(threshold)/sizeof(GT_U32);
+
+    for(i=0; i<thresholdSize; i++)
+    {
+        if((status= hwWritePhyReg(dev,hwPort,0x1E,(GT_U16)threshold[i])) != GT_OK)
+        {
+            return status;
+        }
+    }
+
+    /* setting adc Masking */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0001)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x4000)) != GT_OK)
+    {
+        return status;
+    }
+
+    /* setting noise level */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0005)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0xA000)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*
+        offseting cable length measurement by 6.72m(2*4*0.84m)
+        set 30_10.14:11 to 0x1001 for cable length measure.
+    */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x000a)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x4840)) != GT_OK)
+    {
+        return status;
+    }
+
+    /* release force r125 clock */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0003)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x0000)) != GT_OK)
+    {
+        return status;
+    }
+
+
+    return status;
+}
+
+static
+GT_STATUS dteWorkAround_Phy1000M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort
+)
+{
+    GT_STATUS status = GT_OK;
+    GT_U32 threshold[] = {0x0000,0x8780,0x0000,0x8F80,0x0000,0x9780,
+                          0x0000,0x9F80,0x0000,0xA780,0x0000,0xAF80,
+                          0x0000,0xB780,0x0000,0xBF80,0x0000,0xC780,
+                          0x0000,0xCF80,0x0000,0xD780,0x0000,0xDF80,
+                          0x0000,0xE780,0x0000,0xEF80,0x0000,0xF780,
+                          0x0000,0xFF80,0x0000};
+    int i, thresholdSize;
+
+    /*  */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x001B)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x43FF)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*  */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x001C)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0x9999)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*  */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x001F)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0xE00C)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*  */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0018)) != GT_OK)
+    {
+        return status;
+    }
+    if((status= hwWritePhyReg(dev,hwPort,0x1E,0xFFA1)) != GT_OK)
+    {
+        return status;
+    }
+
+    /* write thresholds */
+    if((status= hwWritePhyReg(dev,hwPort,0x1D,0x0010)) != GT_OK)
+    {
+        return status;
+    }
+
+    thresholdSize = sizeof(threshold)/sizeof(GT_U32);
+
+    for(i=0; i<thresholdSize; i++)
+    {
+        if((status= hwWritePhyReg(dev,hwPort,0x1E,(GT_U16)threshold[i])) != GT_OK)
+        {
+            return status;
+        }
+    }
+
+    return status;
+}
+
+static
+GT_STATUS feSetDTE
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort,
+    IN  GT_BOOL   state
+)
+{
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+
+    if((retVal = hwReadPhyReg(dev,hwPort,0x10,&u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    u16Data = state?(u16Data|0x8000):(u16Data&(~0x8000));
+
+    if((retVal = hwWritePhyReg(dev,hwPort,0x10,u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* soft reset */
+    if((retVal = hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    return retVal;
+}
+
+static
+GT_STATUS gigSetDTE
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort,
+    IN  GT_BOOL   state
+)
+{
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+
+    if((retVal = hwReadPhyReg(dev,hwPort,20,&u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    u16Data = state?(u16Data|0x4):(u16Data&(~0x4));
+
+    if((retVal = hwWritePhyReg(dev,hwPort,20,u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* soft reset */
+    if((retVal = hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    return retVal;
+}
+
+static
+GT_STATUS gigMPSetDTE
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort,
+    IN  GT_BOOL   state
+)
+{
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+
+    if((retVal = hwReadPagedPhyReg(dev,hwPort,0,26,0,&u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    u16Data = state?(u16Data|0x100):(u16Data&(~0x100));
+
+    if((retVal = hwWritePagedPhyReg(dev,hwPort,0,26,0,u16Data)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* soft reset */
+    if((retVal = hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetDTEDetect
+*
+* DESCRIPTION:
+*       This routine enables/disables DTE.
+*
+* INPUTS:
+*         port - The logical port number
+*         mode - either GT_TRUE(for enable) or GT_FALSE(for disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetect
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetDTEDetect_mad(dev, port, state);
+#endif
+
+    DBG_INFO(("phySetDTE Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch(phyInfo.dteType)
+    {
+        case GT_PHY_DTE_TYPE1:
+            /* FE Phy needs work-around */
+            if((retVal = feSetDTE(dev,hwPort,state)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            if(state == GT_FALSE)
+                break;
+
+            if((retVal = dteWorkAround_Phy100M(dev,hwPort)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            break;
+        case GT_PHY_DTE_TYPE3:
+            /* Gigabit Phy with work-around required */
+            if((retVal = gigSetDTE(dev,hwPort,state)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            if(state == GT_FALSE)
+                break;
+
+            if((retVal = dteWorkAround_Phy1000M(dev,hwPort)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            break;
+
+        case GT_PHY_DTE_TYPE2:
+            /* no workaround required */
+            if((retVal = gigSetDTE(dev,hwPort,state)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            break;
+        case GT_PHY_DTE_TYPE4:
+            /* no workaround required */
+            if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            if((retVal = gigMPSetDTE(dev,hwPort,state)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+            break;
+        case GT_PHY_DTE_TYPE5:
+            /* FE Phy */
+            if((retVal = feSetDTE(dev,hwPort,state)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            break;
+
+        default:
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_NOT_SUPPORTED;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDTEDetectStatus
+*
+* DESCRIPTION:
+*       This routine gets DTE status.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       status - GT_TRUE, if link partner needs DTE power.
+*                 GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectStatus
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data,pageReg;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetDTEDetectStatus_mad(dev, port, state);
+#endif
+
+    DBG_INFO(("gprtGetDTEStatus Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch(phyInfo.dteType)
+    {
+        case GT_PHY_DTE_TYPE1:
+            /* FE Phy needs work-around */
+            if((retVal = hwReadPhyReg(dev,hwPort,17,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            *state = (u16Data & 0x8000)?GT_TRUE:GT_FALSE;
+
+            break;
+        case GT_PHY_DTE_TYPE2:
+        case GT_PHY_DTE_TYPE3:
+            if((retVal = hwReadPhyReg(dev,hwPort,27,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            *state = (u16Data & 0x10)?GT_TRUE:GT_FALSE;
+
+            break;
+        case GT_PHY_DTE_TYPE4:
+            if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            if((retVal = hwReadPagedPhyReg(dev,hwPort,0,17,phyInfo.anyPage,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            *state = (u16Data & 0x4)?GT_TRUE:GT_FALSE;
+
+            if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            break;
+        default:
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_NOT_SUPPORTED;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDTEDetectDropWait
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        given value.
+*
+* INPUTS:
+*         port - The logical port number
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetectDropWait
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_U16    waitTime
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetDTEDetectDropWait_mad(dev, port, waitTime);
+#endif
+
+    DBG_INFO(("gprtSetDTEDropWait Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch(phyInfo.dteType)
+    {
+        case GT_PHY_DTE_TYPE1:
+            if((retVal = hwReadPhyReg(dev,hwPort,22,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data & ~(0xF<<12)) | ((waitTime & 0xF) << 12);
+
+            if((retVal = hwWritePhyReg(dev,hwPort,22,u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            break;
+        case GT_PHY_DTE_TYPE2:
+        case GT_PHY_DTE_TYPE3:
+            if((retVal = hwReadPhyReg(dev,hwPort,27,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data & ~(0xF<<5)) | ((waitTime & 0xF) << 5);
+
+            if((retVal = hwWritePhyReg(dev,hwPort,27,u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            break;
+        case GT_PHY_DTE_TYPE4:
+            if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            if((retVal = hwReadPagedPhyReg(dev,hwPort,0,26,phyInfo.anyPage,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data & ~(0xF<<4)) | ((waitTime & 0xF) << 4);
+            if((retVal = hwWritePagedPhyReg(dev,hwPort,0,26,phyInfo.anyPage,u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+
+            if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            break;
+        default:
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_NOT_SUPPORTED;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDTEDetectDropWait
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        returned value.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectDropWait
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_U16    *waitTime
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetDTEDetectDropWait_mad(dev, port, waitTime);
+#endif
+
+    DBG_INFO(("gprtSetDTEDropWait Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch(phyInfo.dteType)
+    {
+        case GT_PHY_DTE_TYPE1:
+            if((retVal = hwReadPhyReg(dev,hwPort,22,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data >> 12) & 0xF;
+
+            break;
+        case GT_PHY_DTE_TYPE2:
+        case GT_PHY_DTE_TYPE3:
+            if((retVal = hwReadPhyReg(dev,hwPort,27,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data >> 5) & 0xF;
+
+            break;
+        case GT_PHY_DTE_TYPE4:
+            if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+
+            if((retVal = hwReadPagedPhyReg(dev,hwPort,0,26,phyInfo.anyPage,&u16Data)) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return retVal;
+            }
+            u16Data = (u16Data >> 4) & 0xF;
+
+            if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+            {
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+            }
+            break;
+        default:
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_NOT_SUPPORTED;
+    }
+
+    *waitTime = u16Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*         mode - GT_EDETECT_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_EDETECT_MODE   mode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetEnergyDetect_mad(dev, port, mode);
+#endif
+
+    DBG_INFO(("gprtSetEnergyDetect Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (phyInfo.flag & GT_PHY_SERDES_CORE)
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+    else if (phyInfo.flag & GT_PHY_GIGABIT)
+    {
+        /* check if the mode is valid */
+        switch (mode)
+        {
+            case GT_EDETECT_OFF:
+                u16Data = 0;
+                break;
+            case GT_EDETECT_SENSE_PULSE:
+                u16Data = 3;
+                break;
+            case GT_EDETECT_SENSE:
+                u16Data = 2;
+                break;
+            default:
+                DBG_INFO(("Invalid paramerter.\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+        }
+
+        if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        if((retVal = hwSetPagedPhyRegField(dev,hwPort,0,0x10,8,2,phyInfo.anyPage,u16Data)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+
+        if((retVal = hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+
+        if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+    }
+    else    /* it's a Fast Ethernet device */
+    {
+        /* check if the mode is valid */
+        switch (mode)
+        {
+            case GT_EDETECT_OFF:
+                u16Data = 0;
+                break;
+            case GT_EDETECT_SENSE_PULSE:
+                u16Data = 1;
+                break;
+            case GT_EDETECT_SENSE:
+            default:
+                DBG_INFO(("Invalid paramerter.\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+        }
+
+        if((retVal = hwSetPhyRegField(dev,hwPort,0x10,14,1,u16Data)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetEnergyDetect
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       mode - GT_EDETECT_MODE type
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetEnergyDetect
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_EDETECT_MODE   *mode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetEnergyDetect_mad(dev, port, mode);
+#endif
+
+    DBG_INFO(("gprtGetEnergyDetect Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (phyInfo.flag & GT_PHY_SERDES_CORE)
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+    else if (phyInfo.flag & GT_PHY_GIGABIT)
+    {
+        /* read the mode */
+
+        if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        if((retVal = hwGetPagedPhyRegField(dev,hwPort,0,0x10,8,2,phyInfo.anyPage,&u16Data)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+
+        if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        switch (u16Data)
+        {
+            case 0:
+            case 1:
+                *mode = GT_EDETECT_OFF;
+                break;
+            case 2:
+                *mode = GT_EDETECT_SENSE;
+                break;
+            case 3:
+                *mode = GT_EDETECT_SENSE_PULSE;
+                break;
+            default:
+                DBG_INFO(("Unknown value (shouldn't happen).\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+        }
+
+    }
+    else    /* it's a Fast Ethernet device */
+    {
+        /* read the mode */
+        if((retVal = hwGetPhyRegField(dev,hwPort,0x10,14,1,&u16Data)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return retVal;
+        }
+
+        switch (u16Data)
+        {
+            case 0:
+                *mode = GT_EDETECT_OFF;
+                break;
+            case 1:
+                *mode = GT_EDETECT_SENSE_PULSE;
+                break;
+            default:
+                DBG_INFO(("Unknown value (shouldn't happen).\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+        }
+
+    }
+
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSet1000TMasterMode
+*
+* DESCRIPTION:
+*       This routine sets the ports 1000Base-T Master mode and restart the Auto
+*        negotiation.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSet1000TMasterMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_1000T_MASTER_SLAVE   *mode
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8            hwPort;         /* the physical port number     */
+    GT_U16        data;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSet1000TMasterMode_mad(dev, port, mode);
+#endif
+
+    DBG_INFO(("gprtSet1000TMasterMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_GIGABIT) || !(phyInfo.flag & GT_PHY_COPPER))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode->autoConfig == GT_TRUE)
+    {
+        if(mode->masterPrefer == GT_TRUE)
+        {
+            data = 0x1;
+        }
+        else
+        {
+            data = 0x0;
+        }
+    }
+    else
+    {
+        if(mode->masterPrefer == GT_TRUE)
+        {
+            data = 0x6;
+        }
+        else
+        {
+            data = 0x4;
+        }
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* Set the Master Mode.    */
+    retVal = hwSetPagedPhyRegField(dev,hwPort,0,9,10,3,phyInfo.anyPage,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* Restart Auto Negotiation */
+    if((retVal=hwSetPhyRegField(dev,hwPort,QD_PHY_CONTROL_REG,9,1,1)) != GT_OK)
+    {
+        DBG_INFO(("Not able to write Phy Reg(port:%d,offset:%d,data:%#x).\n",hwPort,QD_PHY_CONTROL_REG,1));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGet1000TMasterMode
+*
+* DESCRIPTION:
+*       This routine retrieves 1000Base-T Master Mode
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGet1000TMasterMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_1000T_MASTER_SLAVE   *mode
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U8            hwPort;         /* the physical port number     */
+    GT_U16        data;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGet1000TMasterMode_mad(dev, port, mode);
+#endif
+
+    DBG_INFO(("gprtGet1000TMasterMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_GIGABIT) || !(phyInfo.flag & GT_PHY_COPPER))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* Set the Master Mode.    */
+    retVal = hwGetPagedPhyRegField(dev,hwPort,0,9,10,3,phyInfo.anyPage,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(data & 0x4)    /* Manual Mode */
+    {
+        mode->autoConfig = GT_FALSE;
+
+        if(data & 0x2)
+        {
+            mode->masterPrefer = GT_TRUE;
+        }
+        else
+        {
+            mode->masterPrefer = GT_FALSE;
+        }
+    }
+    else    /* Auto Mode */
+    {
+        mode->autoConfig = GT_TRUE;
+
+        if(data & 0x1)
+        {
+            mode->masterPrefer = GT_TRUE;
+        }
+        else
+        {
+            mode->masterPrefer = GT_FALSE;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPhyLinkStatus
+*
+* DESCRIPTION:
+*       This routine retrieves the Link status.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       linkStatus - GT_FALSE if link is not established,
+*                     GT_TRUE if link is established.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyLinkStatus
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *linkStatus
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_PHY_INFO        phyInfo;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetPhyLinkStatus_mad(dev, port, linkStatus);
+#endif
+
+    DBG_INFO(("gprtGetPhyLinkStatus Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+         return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal=hwGetPhyRegField(dev,hwPort,17,10,1,&u16Data)) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+
+    BIT_2_BOOL(u16Data,*linkStatus);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPktGenEnable
+*
+* DESCRIPTION:
+*       This routine enables or disables Packet Generator.
+*       Link should be established first prior to enabling the packet generator,
+*       and generator will generate packets at the speed of the established link.
+*        When enables packet generator, the following information should be
+*       provided:
+*           Payload Type:  either Random or 5AA55AA5
+*           Packet Length: either 64 or 1514 bytes
+*           Error Packet:  either Error packet or normal packet
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*       en      - GT_TRUE to enable, GT_FALSE to disable
+*       pktInfo - packet information(GT_PG structure pointer), if en is GT_TRUE.
+*                 ignored, if en is GT_FALSE
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPktGenEnable
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   en,
+    IN GT_PG     *pktInfo
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             data;
+    GT_BOOL            link;
+    GT_PHY_INFO        phyInfo;
+    GT_U8            page,reg, offset, len;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPktGenEnable_mad(dev, port, en, pktInfo);
+#endif
+
+    DBG_INFO(("gprtSetPktGenEnable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(!(phyInfo.flag & GT_PHY_PKT_GENERATOR))
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (phyInfo.pktGenType)
+    {
+        case GT_PHY_PKTGEN_TYPE1:    /* 30_18.5:2 */
+                page = 18;
+                reg = 30;
+                offset = 2;
+                break;
+        case GT_PHY_PKTGEN_TYPE2:    /* 16_6.3:0 */
+                page = 6;
+                reg = 16;
+                offset = 0;
+                break;
+        case GT_PHY_PKTGEN_TYPE3:    /* 25.3:0 */
+                page = 0;
+                reg = 25;
+                offset = 0;
+                break;
+        default:
+                DBG_INFO(("Unknown PKTGEN Type.\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+    }
+
+    if (en)
+    {
+        if((retVal = gprtGetPhyLinkStatus(dev,port,&link)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        if (link == GT_FALSE)
+        {
+            DBG_INFO(("Link should be on to run Packet Generator.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        data = 0x8;
+
+        if (pktInfo->payload == GT_PG_PAYLOAD_5AA5)
+            data |= 0x4;
+
+        if (pktInfo->length == GT_PG_LENGTH_1514)
+            data |= 0x2;
+
+        if (pktInfo->tx == GT_PG_TX_ERROR)
+            data |= 0x1;
+
+        len = 4;
+    }
+    else
+    {
+        data = 0;
+        len = 1;
+        offset += 3;
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if((retVal=hwSetPagedPhyRegField(dev,hwPort,
+                page,reg,offset,len,phyInfo.anyPage,data)) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return retVal;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetPhyReg
+*
+* DESCRIPTION:
+*       This routine reads Phy Registers.
+*
+* INPUTS:
+*       port -    The logical port number,
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+    GT_U8           hwPort;         /* the physical port number     */
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetPhyReg_mad(dev, port, regAddr, data);
+#endif
+
+    DBG_INFO(("gprtGetPhyReg Called.\n"));
+
+/*    hwPort = GT_LPORT_2_PHY(port); */
+    hwPort = qdLong2Char(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Get Phy Register. */
+    if(hwReadPhyReg(dev,hwPort,(GT_U8)regAddr,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *data = u16Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPhyReg
+*
+* DESCRIPTION:
+*       This routine writes Phy Registers.
+*
+* INPUTS:
+*       port -    The logical port number,
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPhyReg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           regAddr,
+    IN  GT_U16           data
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPhyReg_mad(dev, port, regAddr, data);
+#endif
+
+    DBG_INFO(("gprtSetPhyReg Called.\n"));
+
+/*    hwPort = GT_LPORT_2_PHY(port); */
+    hwPort = qdLong2Char(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Write to Phy Register */
+    if(hwWritePhyReg(dev,hwPort,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtGetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine reads phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*
+* OUTPUTS:
+*        data    - value of the read register
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtGetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32  port,
+    IN    GT_U32  regAddr,
+    IN    GT_U32  page,
+    OUT GT_U16* data
+)
+{
+    GT_PHY_INFO        phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+    GT_U8            hwPort;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetPagedPhyReg_mad(dev, port, regAddr, page, data);
+#endif
+
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(hwReadPagedPhyReg(dev,hwPort,(GT_U8)page,
+                                (GT_U8)regAddr,0,data) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPagedPhyReg
+*
+* DESCRIPTION:
+*       This routine writes a value to phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*        data    - value of the read register
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtSetPagedPhyReg
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32 port,
+    IN    GT_U32 regAddr,
+    IN    GT_U32 page,
+    IN  GT_U16 data
+)
+{
+    GT_PHY_INFO        phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+    GT_U8            hwPort;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetPagedPhyReg_mad(dev, port, regAddr, page, data);
+#endif
+
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(hwWritePagedPhyReg(dev,hwPort,(GT_U8)page,
+                                (GT_U8)regAddr,0,data) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl_mad.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl_mad.c
new file mode 100644
index 000000000000..42145deb8b4e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyCtrl_mad.c
@@ -0,0 +1,2045 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPhyCtrl.h
+*
+* DESCRIPTION:
+* API definitions for PHY control facility.
+*
+* DEPENDENCIES:
+* None.
+*
+* FILE REVISION NUMBER:
+* $Revision: 10 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtVct.h>
+#include <gtSem.h>
+
+
+#ifdef GT_USE_MAD
+#include "madApi.h"
+#include "madHwCntl.h"
+#endif
+
+
+/*
+ * This routine set Auto-Negotiation Ad Register for Copper
+*/
+static
+GT_STATUS translateAutoMode
+(
+    IN    GT_PHY_INFO  *phyInfo,
+    IN    GT_PHY_AUTO_MODE mode,
+    OUT      MAD_BOOL       *autoEn,
+    OUT   MAD_U32      *autoMode
+)
+{
+  MAD_BOOL        autoNegoEn;
+  MAD_SPEED_MODE    speedMode;
+  MAD_DUPLEX_MODE duplexMode;
+
+    switch(mode)
+    {
+        case SPEED_AUTO_DUPLEX_AUTO:
+          autoNegoEn = MAD_TRUE;
+          speedMode  = MAD_SPEED_AUTO;
+          duplexMode = MAD_AUTO_DUPLEX;
+        case SPEED_1000_DUPLEX_AUTO:
+          autoNegoEn = MAD_TRUE;
+          speedMode  = MAD_SPEED_1000M;
+          duplexMode = MAD_AUTO_DUPLEX;
+          break;
+        case SPEED_AUTO_DUPLEX_FULL:
+          autoNegoEn = MAD_TRUE;
+          speedMode  = MAD_SPEED_AUTO;
+          duplexMode = MAD_FULL_DUPLEX;
+          break;
+        case SPEED_1000_DUPLEX_FULL:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_1000M;
+          duplexMode = MAD_FULL_DUPLEX;
+          break;
+        case SPEED_1000_DUPLEX_HALF:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_1000M;
+          duplexMode = MAD_HALF_DUPLEX;
+          break;
+        case SPEED_AUTO_DUPLEX_HALF:
+          autoNegoEn = MAD_TRUE;
+          speedMode  = MAD_SPEED_AUTO;
+          duplexMode = MAD_HALF_DUPLEX;
+          break;
+        case SPEED_100_DUPLEX_AUTO:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_100M;
+          duplexMode = MAD_FULL_DUPLEX;
+          break;
+        case SPEED_10_DUPLEX_AUTO:
+          autoNegoEn = MAD_TRUE;
+          speedMode  = MAD_SPEED_10M;
+          duplexMode = MAD_AUTO_DUPLEX;
+          break;
+        case SPEED_100_DUPLEX_FULL:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_100M;
+          duplexMode = MAD_FULL_DUPLEX;
+          break;
+        case SPEED_100_DUPLEX_HALF:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_100M;
+          duplexMode = MAD_HALF_DUPLEX;
+          break;
+        case SPEED_10_DUPLEX_FULL:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_10M;
+          duplexMode = MAD_FULL_DUPLEX;
+          break;
+        case SPEED_10_DUPLEX_HALF:
+          autoNegoEn = MAD_FALSE;
+          speedMode  = MAD_SPEED_10M;
+          duplexMode = MAD_HALF_DUPLEX;
+          break;
+        default:
+          DBG_INFO(("Unknown Auto Mode (%d)\n",mode));
+          return GT_BAD_PARAM;
+    }
+
+    *autoEn = autoNegoEn;
+    if (mdGetAutoNegoMode(autoNegoEn, speedMode, duplexMode, autoMode) != MAD_OK)
+      return GT_FAIL;
+
+
+    return GT_OK;
+}
+
+/*
+ * This routine sets Auto Mode and Reset the phy
+*/
+static
+GT_STATUS phySetAutoMode_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_U8 hwPort,
+    IN GT_PHY_INFO *phyInfo,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+    MAD_STATUS    status;
+    MAD_U32 autoMode;
+    MAD_BOOL  autoEn;
+
+    DBG_INFO(("phySetAutoMode_mad Called.\n"));
+
+    status = translateAutoMode(phyInfo,mode, &autoEn, &autoMode);
+    if(status != GT_OK)
+    {
+       return status;
+    }
+
+
+    if(phyInfo->flag & GT_PHY_COPPER)
+    {
+        if((mdCopperSetAutoNeg(&(dev->mad_dev),hwPort,autoEn, autoMode)) != MAD_OK)
+        {
+               return GT_FAIL;
+        }
+
+    }
+    else if(phyInfo->flag & GT_PHY_FIBER)
+    {
+        if((mdFiberSetAutoNeg(&(dev->mad_dev),hwPort,autoEn, autoMode)) != MAD_OK)
+        {
+               return GT_FAIL;
+        }
+
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtPhyReset_mad
+*
+* DESCRIPTION:
+*       This routine preforms PHY reset.
+*        After reset, phy will be in Autonegotiation mode.
+*
+* INPUTS:
+* port - The logical port number, unless SERDES device is accessed
+*        The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+* data sheet register 0.15 - Reset
+* data sheet register 0.13 - Speed
+* data sheet register 0.12 - Autonegotiation
+* data sheet register 0.8  - Duplex Mode
+*******************************************************************************/
+
+GT_STATUS gprtPhyReset_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+
+    DBG_INFO(("gprtPhyReset Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /* set Auto Negotiation AD Register */
+    retVal = phySetAutoMode_mad(dev,hwPort,&phyInfo,SPEED_AUTO_DUPLEX_AUTO);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPortLoopback_mad
+*
+* DESCRIPTION:
+* Enable/Disable Internal Port Loopback.
+* For 10/100 Fast Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled, this routine disables Auto-Negotiation and
+*   forces speed to be 10Mbps.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   Disabling Loopback simply clears bit 14 of control register(0.14). Therefore,
+*   it is recommended to call gprtSetPortAutoMode for PHY configuration after
+*   Loopback test.
+* For 10/100/1000 Gigagbit Ethernet PHY, speed of Loopback is determined as follows:
+*   If Auto-Negotiation is enabled and Link is active, the current speed is used.
+*   If Auto-Negotiation is disabled, the forced speed is used.
+*   All other cases, default MAC Interface speed is used. Please refer to the data
+*   sheet for the information of the default MAC Interface speed.
+*
+*
+* INPUTS:
+* port - The logical port number, unless SERDES device is accessed
+*        The physical address, if SERDES device is accessed
+* enable - If GT_TRUE, enable loopback mode
+* If GT_FALSE, disable loopback mode
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* data sheet register 0.14 - Loop_back
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortLoopback_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   enable
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+    MAD_STATUS status;
+
+    DBG_INFO(("gprtSetPortLoopback_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if((status = mdDiagSetLineLoopback(&(dev->mad_dev),port,enable)) != MAD_OK)
+    {
+		if(status==MAD_API_LINK_DOWN)
+          return GT_OK;
+        DBG_INFO(("mdDiagSetLineLoopback failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPortSpeed_mad
+*
+* DESCRIPTION:
+*         Sets speed for a specific logical port. This function will keep the duplex
+*        mode and loopback mode to the previous value, but disable others, such as
+*        Autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*        speed - port speed.
+*                PHY_SPEED_10_MBPS for 10Mbps
+*                PHY_SPEED_100_MBPS for 100Mbps
+*                PHY_SPEED_1000_MBPS for 1000Mbps
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* data sheet register 0.13 - Speed Selection (LSB)
+* data sheet register 0.6  - Speed Selection (MSB)
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetPortSpeed_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_SPEED speed
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+    MAD_STATUS status;
+    MAD_U32 mspeed;
+    MAD_DUPLEX_MODE mDuplexmod;
+
+    DBG_INFO(("gprtSetPortSpeed_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+    switch (speed)
+    {
+      case PHY_SPEED_10_MBPS:
+        mspeed = 10;
+        break;
+      case PHY_SPEED_100_MBPS:
+        mspeed = 100;
+        break;
+      case PHY_SPEED_1000_MBPS:
+      default:
+        mspeed = 1000;
+        break;
+    }
+
+    if((status = mdGetDuplexStatus(&(dev->mad_dev),port,&mDuplexmod)) != MAD_OK)
+    {
+        DBG_INFO(("mdGetDuplexStatus failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+
+    if((status = mdCopperSetSpeedDuplex(&(dev->mad_dev),port,mspeed,((mDuplexmod)?MAD_TRUE:MAD_FALSE))) != MAD_OK)
+    {
+        DBG_INFO(("mdCopperSetSpeedDuplex failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtPortAutoNegEnable_mad
+*
+* DESCRIPTION:
+*         Enable/disable an Auto-Negotiation.
+*        This routine simply sets Auto Negotiation bit (bit 12) of Control
+*        Register and reset the phy.
+*        For Speed and Duplex selection, please use gprtSetPortAutoMode.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         state - GT_TRUE for enable Auto-Negotiation,
+*                GT_FALSE otherwise
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.12 - Auto-Negotiation Enable
+*         data sheet register 4.8, 4.7, 4.6, 4.5 - Auto-Negotiation Advertisement
+*
+*******************************************************************************/
+GT_STATUS gprtPortAutoNegEnable_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    MAD_STATUS retVal;
+    MAD_SPEED_MODE speedMode;
+    MAD_DUPLEX_MODE duplexMode;
+    MAD_BOOL autoNegoEn;
+    MAD_U32            autoMode;
+
+    DBG_INFO(("gprtPortAutoNegEnable_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((retVal=mdGetSpeedStatus(&(dev->mad_dev), hwPort, &speedMode))!=MAD_OK)
+    {
+        DBG_INFO(("mdGetSpeedStatus Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+     if ((retVal=mdGetDuplexStatus(&(dev->mad_dev), hwPort, &duplexMode))!=MAD_OK)
+    {
+       DBG_INFO(("mdGetDuplexStatus Failed.\n"));
+       gtSemGive(dev,dev->phyRegsSem);
+       return GT_FAIL;
+    }
+
+    autoNegoEn = (state==GT_TRUE)?MAD_TRUE:MAD_FALSE;
+
+    if ((mdGetAutoNegoMode(autoNegoEn, speedMode, duplexMode, &autoMode)) != MAD_OK)
+    {
+        DBG_INFO(("mdGetAutoNegoMode Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+     if ((retVal=mdCopperSetAutoNeg(&(dev->mad_dev), hwPort, autoNegoEn, autoMode))!=MAD_OK)
+    {
+        DBG_INFO(("mdCopperSetAutoNeg Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtPortPowerDown_mad
+*
+* DESCRIPTION:
+*         Enable/disable (power down) on specific logical port.
+*        Phy configuration remains unchanged after Power down.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         state -    GT_TRUE: power down
+*                 GT_FALSE: normal operation
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.11 - Power Down
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortPowerDown_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    MAD_STATUS retVal;
+    MAD_BOOL pwMode
+
+    DBG_INFO(("gprtPortPowerDown_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (state==GT_TRUE)
+      pwMode = MAD_TRUE;
+    else
+      pwMode = MAD_FALSE;
+
+     if ((retVal=mdSysSetPhyEnable(&(dev->mad_dev), hwPort, pwMode))!=MAD_OK)
+    {
+        DBG_INFO(("mdSysSetPhyEnable Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtPortRestartAutoNeg_mad
+*
+* DESCRIPTION:
+*         Restart AutoNegotiation. If AutoNegotiation is not enabled, it'll enable
+*        it. Loopback and Power Down will be disabled by this routine.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.9 - Restart Auto-Negotiation
+*
+*******************************************************************************/
+
+GT_STATUS gprtPortRestartAutoNeg_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          u16Data;
+    MAD_U32         u32Data;
+
+    DBG_INFO(("gprtPortRestartAutoNeg_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal=mdSysGetPhyReg(&(dev->mad_dev),hwPort,QD_PHY_CONTROL_REG,&u32Data)) != MAD_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    u16Data = u32Data;
+    u16Data &= (QD_PHY_DUPLEX | QD_PHY_SPEED);
+    u16Data |= (QD_PHY_RESTART_AUTONEGO | QD_PHY_AUTONEGO);
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    /* Write to Phy Control Register.  */
+    if((retVal=madHwPagedSetCtrlPara(&(dev->mad_dev),hwPort,0,u16Data)) != MAD_OK)
+    {
+        DBG_INFO(("CallmadHwPagedSetCtrlPara failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+
+    if(retVal != MAD_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPortDuplexMode_mad
+*
+* DESCRIPTION:
+*         Sets duplex mode for a specific logical port. This function will keep
+*        the speed and loopback mode to the previous value, but disable others,
+*        such as Autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         dMode    - dulpex mode
+*
+* OUTPUTS:
+*         None.
+*
+* RETURNS:
+*         GT_OK     - on success
+*         GT_FAIL     - on error
+*
+* COMMENTS:
+*         data sheet register 0.8 - Duplex Mode
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortDuplexMode_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_BOOL   dMode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal;
+    MAD_U32         u32Data;
+
+    DBG_INFO(("gprtSetPortDuplexMode_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal=mdSysGetPhyReg(&(dev->mad_dev),hwPort,QD_PHY_CONTROL_REG,&u32Data)) != MAD_OK)
+    {
+        DBG_INFO(("Not able to read Phy Reg(port:%d,offset:%d).\n",hwPort,QD_PHY_CONTROL_REG));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    u16Data = u32Data;
+
+    if(dMode)
+    {
+        u16Data = (u16Data & (QD_PHY_LOOPBACK | QD_PHY_SPEED | QD_PHY_SPEED_MSB)) | QD_PHY_DUPLEX;
+    }
+    else
+    {
+        u16Data = u16Data & (QD_PHY_LOOPBACK | QD_PHY_SPEED | QD_PHY_SPEED_MSB);
+    }
+
+
+    DBG_INFO(("Write to phy(%d) register: regAddr 0x%x, data %#x",
+              hwPort,QD_PHY_CONTROL_REG,u16Data));
+
+    /* Write to Phy Control Register.  */
+    if((retVal=madHwPagedSetCtrlPara(&(dev->mad_dev),hwPort,0,u16Data)) != MAD_OK)
+    {
+        DBG_INFO(("CallmadHwPagedSetCtrlPara failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetPortAutoMode_mad
+*
+* DESCRIPTION:
+*         This routine sets up the port with given Auto Mode.
+*        Supported mode is as follows:
+*        - Auto for both speed and duplex.
+*        - Auto for speed only and Full duplex.
+*        - Auto for speed only and Half duplex.
+*        - Auto for duplex only and speed 1000Mbps.
+*        - Auto for duplex only and speed 100Mbps.
+*        - Auto for duplex only and speed 10Mbps.
+*        - Speed 1000Mbps and Full duplex.
+*        - Speed 1000Mbps and Half duplex.
+*        - Speed 100Mbps and Full duplex.
+*        - Speed 100Mbps and Half duplex.
+*        - Speed 10Mbps and Full duplex.
+*        - Speed 10Mbps and Half duplex.
+*
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*         mode - Auto Mode to be written
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - on device without copper
+*
+* COMMENTS:
+*         data sheet register 4.8, 4.7, 4.6, and 4.5 Autonegotiation Advertisement
+*         data sheet register 4.6, 4.5 Autonegotiation Advertisement for 1000BX
+*         data sheet register 9.9, 9.8 Autonegotiation Advertisement for 1000BT
+*******************************************************************************/
+
+GT_STATUS gprtSetPortAutoMode_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_PHY_AUTO_MODE mode
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+
+    DBG_INFO(("gprtSetPortAutoMode_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    retVal = GT_NOT_SUPPORTED;
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    retVal = phySetAutoMode_mad(dev,hwPort,&phyInfo,mode);
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+
+}
+
+
+/*******************************************************************************
+* gprtSetPause_mad
+*
+* DESCRIPTION:
+*       This routine will set the pause bit in Autonegotiation Advertisement
+*        Register. And restart the autonegotiation.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*        state - GT_PHY_PAUSE_MODE enum value.
+*                GT_PHY_NO_PAUSE        - disable pause
+*                 GT_PHY_PAUSE        - support pause
+*                GT_PHY_ASYMMETRIC_PAUSE    - support asymmetric pause
+*                GT_PHY_BOTH_PAUSE    - support both pause and asymmetric pause
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+* COMMENTS:
+* data sheet register 4.10 Autonegotiation Advertisement Register
+*******************************************************************************/
+
+GT_STATUS gprtSetPause_mad
+(
+IN GT_QD_DEV *dev,
+IN GT_LPORT  port,
+IN GT_PHY_PAUSE_MODE state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+    MAD_STATUS retVal;
+    MAD_BOOL autoNegoEn;
+    MAD_U32            autoMode
+
+    DBG_INFO(("phySetPause_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(state & GT_PHY_ASYMMETRIC_PAUSE)
+    {
+        if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+        {
+            DBG_INFO(("Unknown PHY device.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_FAIL;
+        }
+
+        if (!(phyInfo.flag & GT_PHY_GIGABIT))
+        {
+            DBG_INFO(("Not Supported\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return GT_BAD_PARAM;
+        }
+
+    }
+
+     if ((retVal=mdCopperGetAutoNeg(&(dev->mad_dev), hwPort, &autoNegoEn, &autoMode))!=MAD_OK)
+    {
+        DBG_INFO(("mdCopperSetAutoNeg Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (state==GT_TRUE)
+      autoMode |= MAD_AUTO_AD_ASYM_PAUSE;
+    else
+      autoMode &= ~MAD_AUTO_AD_ASYM_PAUSE;
+
+
+     if ((retVal=mdCopperSetAutoNeg(&(dev->mad_dev), hwPort, autoNegoEn, autoMode))!=MAD_OK)
+    {
+        DBG_INFO(("mdCopperSetAutoNeg Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetDTEDetect_mad
+*
+* DESCRIPTION:
+*       This routine enables/disables DTE.
+*
+* INPUTS:
+*         port - The logical port number
+*         mode - either GT_TRUE(for enable) or GT_FALSE(for disable)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetect_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    MAD_BOOL            en;
+
+    DBG_INFO(("phySetDTE_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (state==GT_TRUE)
+      en = MAD_TRUE;
+    else
+      en = MAD_FALSE;
+
+    if(mdCopperSetDTEDetectEnable(&(dev->mad_dev),hwPort,en,0) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperSetDTEDetectEnable failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDTEDetectStatus_mad
+*
+* DESCRIPTION:
+*       This routine gets DTE status.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       status - GT_TRUE, if link partner needs DTE power.
+*                 GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectStatus_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    MAD_BOOL    en;
+    MAD_U16     dropHys;
+
+    DBG_INFO(("gprtGetDTEStatus_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mdCopperGetDTEDetectEnable(&(dev->mad_dev),hwPort,&en,&dropHys) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperSetDTEDetectEnable failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+
+     if (en==MAD_TRUE)
+      *state = GT_TRUE;
+    else
+      *state = GT_FALSE;
+
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDTEDetectDropWait_mad
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        given value.
+*
+* INPUTS:
+*         port - The logical port number
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetDTEDetectDropWait_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_U16    waitTime
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    MAD_BOOL    en;
+    MAD_U16     dropHys;
+
+
+    DBG_INFO(("gprtSetDTEDropWait_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports DTE */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mdCopperGetDTEDetectEnable(&(dev->mad_dev),hwPort,&en,&dropHys) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperSetDTEDetectEnable failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    dropHys = waitTime;
+    if(mdCopperSetDTEDetectEnable(&(dev->mad_dev),hwPort,en,dropHys) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperSetDTEDetectEnable failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDTEDetectDropWait_mad
+*
+* DESCRIPTION:
+*       Once the PHY no longer detects that the link partner filter, the PHY
+*        will wait a period of time before clearing the power over Ethernet
+*        detection status bit. The wait time is 5 seconds multiplied by the
+*        returned value.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       waitTime - 0 ~ 15 (unit of 4 sec.)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetDTEDetectDropWait_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_U16    *waitTime
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+    MAD_BOOL    en;
+    MAD_U16     dropHys;
+
+    DBG_INFO(("gprtSetDTEDropWait_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_DTE_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mdCopperGetDTEDetectEnable(&(dev->mad_dev),hwPort,&en,&dropHys) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperSetDTEDetectEnable failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *waitTime = dropHys;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetEnergyDetect_mad
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*       mode - GT_EDETECT_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtSetEnergyDetect_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_EDETECT_MODE   mode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gprtSetEnergyDetect_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (phyInfo.flag & GT_PHY_SERDES_CORE)
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+    else if (phyInfo.flag & GT_PHY_GIGABIT)
+    {
+        /* check if the mode is valid */
+        switch (mode)
+        {
+            case GT_EDETECT_OFF:
+                u16Data = 0;
+                break;
+            case GT_EDETECT_SENSE_PULSE:
+                u16Data = 3;
+                break;
+            case GT_EDETECT_SENSE:
+                u16Data = 2;
+                break;
+            default:
+                DBG_INFO(("Invalid paramerter.\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+        }
+    }
+    else    /* it's a Fast Ethernet device */
+    {
+        /* check if the mode is valid */
+        switch (mode)
+        {
+            case GT_EDETECT_OFF:
+                u16Data = 0;
+                break;
+            case GT_EDETECT_SENSE_PULSE:
+                u16Data = 1;
+                break;
+            case GT_EDETECT_SENSE:
+            default:
+                DBG_INFO(("Invalid paramerter.\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_BAD_PARAM;
+        }
+
+    }
+
+    if(mdSysSetDetectPowerDownMode(&(dev->mad_dev),hwPort,u16Data) != MAD_OK)
+    {
+        DBG_INFO(("Call mdSysSetDetectPowerDownMode failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetEnergyDetect_mad
+*
+* DESCRIPTION:
+*       Energy Detect power down mode enables or disables the PHY to wake up on
+*        its own by detecting activity on the CAT 5 cable.
+*
+* INPUTS:
+*         port - The logical port number
+*
+* OUTPUTS:
+*       mode - GT_EDETECT_MODE type
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetEnergyDetect_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_EDETECT_MODE   *mode
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16             u16Data;
+    GT_STATUS        retVal = GT_OK;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gprtGetEnergyDetect_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(mdSysGetDetectPowerDownMode(&(dev->mad_dev),hwPort,&u16Data) != MAD_OK)
+    {
+        DBG_INFO(("Call mdSysSetDetectPowerDownMode failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (phyInfo.flag & GT_PHY_SERDES_CORE)
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+    else if (phyInfo.flag & GT_PHY_GIGABIT)
+    {
+        /* read the mode */
+
+        switch (u16Data)
+        {
+            case 0:
+            case 1:
+                *mode = GT_EDETECT_OFF;
+                break;
+            case 2:
+                *mode = GT_EDETECT_SENSE;
+                break;
+            case 3:
+                *mode = GT_EDETECT_SENSE_PULSE;
+                break;
+            default:
+                DBG_INFO(("Unknown value (should not happen).\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+        }
+
+    }
+    else    /* it's a Fast Ethernet device */
+    {
+        switch (u16Data)
+        {
+            case 0:
+                *mode = GT_EDETECT_OFF;
+                break;
+            case 1:
+                *mode = GT_EDETECT_SENSE_PULSE;
+                break;
+            default:
+                DBG_INFO(("Unknown value (shouldn not happen).\n"));
+                gtSemGive(dev,dev->phyRegsSem);
+                return GT_FAIL;
+        }
+
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSet1000TMasterMode_mad
+*
+* DESCRIPTION:
+*       This routine sets the ports 1000Base-T Master mode and restart the Auto
+*        negotiation.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSet1000TMasterMode_mad
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_1000T_MASTER_SLAVE   *mode
+)
+{
+    GT_U8            hwPort;         /* the physical port number     */
+    GT_PHY_INFO    phyInfo;
+    MAD_1000T_MASTER_SLAVE    msmode
+
+    DBG_INFO(("gprtSet1000TMasterMode_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_GIGABIT) || !(phyInfo.flag & GT_PHY_COPPER))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    msmode.autoConfig = mode->autoConfig;
+    msmode.masterPrefer = mode->masterPrefer;
+
+    if(mdCopperSet1000TMasterMode(&(dev->mad_dev),hwPort,&msmode) != MAD_OK)
+    {
+        DBG_INFO(("Call mdSysSetDetectPowerDownMode failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGet1000TMasterMode_mad
+*
+* DESCRIPTION:
+*       This routine retrieves 1000Base-T Master Mode
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_1000T_MASTER_SLAVE structure
+*                autoConfig   - GT_TRUE for auto, GT_FALSE for manual setup.
+*                masterPrefer - GT_TRUE if Master configuration is preferred.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGet1000TMasterMode_mad
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_1000T_MASTER_SLAVE   *mode
+)
+{
+    GT_U8            hwPort;         /* the physical port number     */
+    GT_PHY_INFO    phyInfo;
+    MAD_1000T_MASTER_SLAVE    msmode
+
+    DBG_INFO(("gprtGet1000TMasterMode_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_GIGABIT) || !(phyInfo.flag & GT_PHY_COPPER))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mdCopperGet1000TMasterMode(&(dev->mad_dev),hwPort,&msmode) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperGet1000TMasterMode failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+    mode->autoConfig = msmode.autoConfig;
+    mode->masterPrefer = msmode.masterPrefer;
+
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtGetPhyLinkStatus_mad
+*
+* DESCRIPTION:
+*       This routine retrieves the Link status.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*
+* OUTPUTS:
+*       linkStatus - GT_FALSE if link is not established,
+*                     GT_TRUE if link is established.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyLinkStatus_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *linkStatus
+)
+{
+
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+    OUT MAD_BOOL    linkOn;
+
+    DBG_INFO(("gprtGetPhyLinkStatus_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+         return GT_NOT_SUPPORTED;
+    }
+
+    if(mdCopperGetLinkStatus(&(dev->mad_dev),hwPort,&linkOn) != MAD_OK)
+    {
+        DBG_INFO(("Call mdCopperGetLinkStatus failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *linkStatus = (linkOn==MAD_TRUE)?GT_TRUE:GT_FAIL;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetPktGenEnable_mad
+*
+* DESCRIPTION:
+*       This routine enables or disables Packet Generator.
+*       Link should be established first prior to enabling the packet generator,
+*       and generator will generate packets at the speed of the established link.
+*        When enables packet generator, the following information should be
+*       provided:
+*           Payload Type:  either Random or 5AA55AA5
+*           Packet Length: either 64 or 1514 bytes
+*           Error Packet:  either Error packet or normal packet
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*       en      - GT_TRUE to enable, GT_FALSE to disable
+*       pktInfo - packet information(GT_PG structure pointer), if en is GT_TRUE.
+*                 ignored, if en is GT_FALSE
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPktGenEnable_mad
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   en,
+    IN GT_PG     *pktInfo
+)
+{
+
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PHY_INFO        phyInfo;
+    MAD_U32   men;
+    MAD_PG    mpktInfo;
+
+    DBG_INFO(("gprtSetPktGenEnable_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(!(phyInfo.flag & GT_PHY_PKT_GENERATOR))
+    {
+        DBG_INFO(("Not Supported.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    mpktInfo.payload = pktInfo->payload;
+    mpktInfo.length = pktInfo->length;
+    mpktInfo.tx = pktInfo->tx;
+    if (en==GT_TRUE)
+    {
+      men =1;
+      mpktInfo.en_type = MAD_PG_EN_COPPER;
+    }
+    else
+    {
+      men =0;
+      mpktInfo.en_type = MAD_PG_DISABLE;
+    }
+
+    if(mdDiagSetPktGenEnable(&(dev->mad_dev),hwPort, men, &mpktInfo) != MAD_OK)
+    {
+        DBG_INFO(("Call mdSysSetDetectPowerDownMode failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtGetPhyReg_mad
+*
+* DESCRIPTION:
+*       This routine reads Phy Registers.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyReg_mad
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    MAD_U32          u32Data;           /* The register's read data.    */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPhyReg_mad Called.\n"));
+
+    hwPort = GT_LPORT_2_PHY(port);
+    /* hwPort = port; */
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Get Phy Register. */
+    if(mdSysGetPhyReg(&(dev->mad_dev),hwPort,regAddr,&u32Data) != MAD_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *data = u32Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPhyReg_mad
+*
+* DESCRIPTION:
+*       This routine writes Phy Registers.
+*
+* INPUTS:
+*        port -    The logical port number, unless SERDES device is accessed
+*                The physical address, if SERDES device is accessed
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPhyReg_mad
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            inData
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    MAD_U32        data = inData;
+
+    DBG_INFO(("gprtSetPhyReg_mad Called.\n"));
+
+    hwPort = GT_LPORT_2_PHY(port);
+    /* hwPort = port;  */
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Write to Phy Register */
+    if(mdSysSetPhyReg(&(dev->mad_dev),hwPort,regAddr,data) != MAD_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gprtGetPagedPhyReg_mad
+*
+* DESCRIPTION:
+*       This routine reads phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*
+* OUTPUTS:
+*        data    - value of the read register
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtGetPagedPhyReg_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32  port,
+    IN    GT_U32  regAddr,
+    IN    GT_U32  page,
+    OUT GT_U16* data
+)
+{
+    GT_PHY_INFO        phyInfo;
+    GT_U8            hwPort;
+    MAD_U32        u32Data;
+
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(mdSysGetPagedPhyReg(&(dev->mad_dev),hwPort,page, regAddr,&u32Data) != MAD_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *data = u32Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetPagedPhyReg_mad
+*
+* DESCRIPTION:
+*       This routine writes a value to phy register of the given page
+*
+* INPUTS:
+*        port     - logical port to be read
+*        regAddr    - register offset to be read
+*        page    - page number to be read
+*        data    - value of the read register
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK               - if read successed
+*       GT_FAIL               - if read failed
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gprtSetPagedPhyReg_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U32 port,
+    IN    GT_U32 regAddr,
+    IN    GT_U32 page,
+    IN  GT_U16 inData
+)
+{
+    GT_PHY_INFO        phyInfo;
+    GT_U8            hwPort;
+    MAD_U32 data = inData;
+
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(mdSysSetPagedPhyReg(&(dev->mad_dev),hwPort,page, regAddr, data) != MAD_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt.c
new file mode 100644
index 000000000000..b5e4d7ee2e92
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt.c
@@ -0,0 +1,301 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPhyInt.h
+*
+* DESCRIPTION:
+* API definitions for PHY interrupt handling
+*
+* DEPENDENCIES:
+* None.
+*
+* FILE REVISION NUMBER:
+* $Revision: 10 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+#include <gtDrvConfig.h>
+
+#ifdef GT_USE_MAD
+#include <gtMad.h>
+#endif
+
+#ifdef GT_USE_MAD
+#include "gtPhyInt_mad.c"
+#endif
+/*******************************************************************************
+* gprtPhyIntEnable
+*
+* DESCRIPTION:
+* Enable/Disable one PHY Interrupt
+* This register determines whether the INT# pin is asserted when an interrupt
+* event occurs. When an interrupt occurs, the corresponding bit is set and
+* remains set until register 19 is read via the SMI. When interrupt enable
+* bits are not set in register 18, interrupt status bits in register 19 are
+* still set when the corresponding interrupt events occur. However, the INT#
+* is not asserted.
+*
+* INPUTS:
+* port -   The logical port number, unless SERDES device is accessed
+*          The physical address, if SERDES device is accessed
+* intType - the type of interrupt to enable/disable. any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,    ( Copper only )
+*            GT_DOWNSHIFT_DETECT,    ( for 1000M Copper only )
+*            GT_ENERGY_DETECT,        ( for 1000M Copper only )
+*            GT_POLARITY_CHANGED, and ( Copper only )
+*            GT_JABBER                (Copper only )
+*
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* For 88E6131, 88E6122, and 88E6108 devices, Serdes port can be accessed using
+* logical port number.
+* For 88E6161 88E6165 and 88E6352 devices, Serdes port 5 (address 0xD/0xF) can be accessed
+* using logical port number, but not port 4 (since port 4 could be an internal
+* PHY.)
+*******************************************************************************/
+
+
+GT_STATUS gprtPhyIntEnable
+(
+IN GT_QD_DEV    *dev,
+IN GT_LPORT    port,
+IN GT_U16    intType
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+
+/*
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtPhyIntEnable_mad(dev, port, intType);
+#endif
+*/
+    DBG_INFO(("gprtPhyIntEnable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if((IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE)) && (hwPort > 3))
+    {
+        if(!(dev->validSerdesVec & (1 << hwPort)))
+        {
+            if(!((IS_IN_DEV_GROUP(dev,DEV_SERDES_ACCESS_CONFIG)) && (hwPort == 4)))
+                GT_GET_SERDES_PORT(dev,&hwPort);
+        }
+        if(hwPort >= dev->maxPhyNum)
+        {
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwWritePhyReg(dev,hwPort, QD_PHY_INT_ENABLE_REG, intType);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+
+}
+
+/*******************************************************************************
+* gprtGetPhyIntStatus
+*
+* DESCRIPTION:
+* Check to see if a specific type of interrupt occured
+*
+* INPUTS:
+* port -   The logical port number, unless SERDES device is accessed
+*          The physical address, if SERDES device is accessed
+* intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,    ( Copper only )
+*            GT_DOWNSHIFT_DETECT,    ( for 1000M Copper only )
+*            GT_ENERGY_DETECT,        ( for 1000M Copper only )
+*            GT_POLARITY_CHANGED, and ( Copper only )
+*            GT_JABBER                (Copper only )
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* For 88E6131, 88E6122, and 88E6108 devices, Serdes port can be accessed using
+* logical port number.
+* For 88E6161 88E6165 and 88E6352 devices, Serdes port 5 (address 0xD/0xF) can be accessed
+* using logical port number, but not port 4 (since port 4 could be an internal
+* PHY.)
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntStatus
+(
+IN   GT_QD_DEV  *dev,
+IN   GT_LPORT   port,
+OUT  GT_U16*    intType
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetPhyIntStatus_mad(dev, port, intType);
+#endif
+
+    DBG_INFO(("gprtGetPhyIntStatus Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+    if((IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE)) && (hwPort > 3))
+    {
+        if(!(dev->validSerdesVec & (1 << hwPort)))
+        {
+            if(!((IS_IN_DEV_GROUP(dev,DEV_SERDES_ACCESS_CONFIG)) && (hwPort == 4)))
+                GT_GET_SERDES_PORT(dev,&hwPort);
+        }
+        if(hwPort >= dev->maxPhyNum)
+        {
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwReadPhyReg(dev,hwPort, QD_PHY_INT_STATUS_REG, intType);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPhyIntPortSummary
+*
+* DESCRIPTION:
+* Lists the ports that have active interrupts. It provides a quick way to
+* isolate the interrupt so that the MAC or switch does not have to poll the
+* interrupt status register (19) for all ports. Reading this register does not
+* de-assert the INT# pin
+*
+* INPUTS:
+* none
+*
+* OUTPUTS:
+* GT_U8 *intPortMask - bit Mask with the bits set for the corresponding
+* phys with active interrupt. E.g., the bit number 0 and 2 are set when
+* port number 0 and 2 have active interrupt
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* 88E3081 data sheet register 20
+* For 88E6165, 88E6375 devices, geventGetDevIntStatus should be used instead.
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntPortSummary
+(
+IN  GT_QD_DEV  *dev,
+OUT GT_U16     *intPortMask
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            portVec;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetPhyIntPortSummary_mad(dev, intPortMask);
+#endif
+
+    DBG_INFO(("gprtGetPhyIntPortSummary Called.\n"));
+
+    /* translate LPORT 0 to hardware port */
+    hwPort = GT_LPORT_2_PORT(0);
+
+    *intPortMask=0;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DEV_PHY_INTERRUPT))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_INTERNAL_GPHY))
+    {
+        /* get the interrupt port summary from global register */
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PHYINT_SOURCE,0,dev->maxPorts,&portVec);
+        GT_GIG_PHY_INT_MASK(dev,portVec);
+        *intPortMask = (GT_U16)GT_PORTVEC_2_LPORTVEC(portVec);
+    }
+    else
+    {
+        /* get the interrupt port summary from phy */
+        retVal = hwReadPhyReg(dev,hwPort, QD_PHY_INT_PORT_SUMMARY_REG, &portVec);
+        *intPortMask = (GT_U16)GT_PORTVEC_2_LPORTVEC(portVec);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt_mad.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt_mad.c
new file mode 100644
index 000000000000..267cf417c7e2
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPhyInt_mad.c
@@ -0,0 +1,272 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPhyInt.h
+*
+* DESCRIPTION:
+* API definitions for PHY interrupt handling
+*
+* DEPENDENCIES:
+* None.
+*
+* FILE REVISION NUMBER:
+* $Revision: 10 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+#include <gtDrvConfig.h>
+#include "madApi.h"
+
+/*******************************************************************************
+* gprtPhyIntEnable_mad
+*
+* DESCRIPTION:
+* Enable/Disable one PHY Interrupt
+* This register determines whether the INT# pin is asserted when an interrupt
+* event occurs. When an interrupt occurs, the corresponding bit is set and
+* remains set until register 19 is read via the SMI. When interrupt enable
+* bits are not set in register 18, interrupt status bits in register 19 are
+* still set when the corresponding interrupt events occur. However, the INT#
+* is not asserted.
+*
+* INPUTS:
+* port -   The logical port number, unless SERDES device is accessed
+*          The physical address, if SERDES device is accessed
+* intType - the type of interrupt to enable/disable. any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,    ( Copper only )
+*            GT_DOWNSHIFT_DETECT,    ( for 1000M Copper only )
+*            GT_ENERGY_DETECT,        ( for 1000M Copper only )
+*            GT_POLARITY_CHANGED, and ( Copper only )
+*            GT_JABBER                (Copper only )
+*
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* For 88E6131, 88E6122, and 88E6108 devices, Serdes port can be accessed using
+* logical port number.
+* For 88E6161 88E6165 and 88E6352 devices, Serdes port 5 (address 0xD/0xF) can be accessed
+* using logical port number, but not port 4 (since port 4 could be an internal
+* PHY.)
+*******************************************************************************/
+
+
+GT_STATUS gprtPhyIntEnable_mad
+(
+IN GT_QD_DEV    *dev,
+IN GT_LPORT    port,
+IN GT_U16    intType
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    MAD_INT_TYPE    mintType;
+
+    DBG_INFO(("gprtPhyIntEnable_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+
+    if((IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE)) && (hwPort > 3))
+    {
+        if(!(dev->validSerdesVec & (1 << hwPort)))
+        {
+            if(!((IS_IN_DEV_GROUP(dev,DEV_SERDES_ACCESS_CONFIG)) && (hwPort == 4)))
+                GT_GET_SERDES_PORT(dev,&hwPort);
+        }
+        if(hwPort >= dev->maxPhyNum)
+        {
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    mintType.intGroup0 = 0;
+    mintType.intGroup1 = 0;
+    mintType.intGroup0 = intType;
+    if(mdIntSetEnable(&(dev->mad_dev),hwPort,&mintType) != MAD_OK)
+    {
+        DBG_INFO(("Call mdIntSetEnable failed.\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gprtGetPhyIntStatus_mad
+*
+* DESCRIPTION:
+* Check to see if a specific type of interrupt occured
+*
+* INPUTS:
+* port -   The logical port number, unless SERDES device is accessed
+*          The physical address, if SERDES device is accessed
+* intType - the type of interrupt which causes an interrupt.
+*            any combination of
+*            GT_SPEED_CHANGED,
+*            GT_DUPLEX_CHANGED,
+*            GT_PAGE_RECEIVED,
+*            GT_AUTO_NEG_COMPLETED,
+*            GT_LINK_STATUS_CHANGED,
+*            GT_SYMBOL_ERROR,
+*            GT_FALSE_CARRIER,
+*            GT_FIFO_FLOW,
+*            GT_CROSSOVER_CHANGED,    ( Copper only )
+*            GT_DOWNSHIFT_DETECT,    ( for 1000M Copper only )
+*            GT_ENERGY_DETECT,        ( for 1000M Copper only )
+*            GT_POLARITY_CHANGED, and ( Copper only )
+*            GT_JABBER                (Copper only )
+*
+* OUTPUTS:
+* None.
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* For 88E6131, 88E6122, and 88E6108 devices, Serdes port can be accessed using
+* logical port number.
+* For 88E6161 88E6165 and 88E6352 devices, Serdes port 5 (address 0xD/0xF) can be accessed
+* using logical port number, but not port 4 (since port 4 could be an internal
+* PHY.)
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntStatus_mad
+(
+IN   GT_QD_DEV  *dev,
+IN   GT_LPORT   port,
+OUT  GT_U16*    intType
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+    MAD_INT_TYPE    mintType;
+
+    DBG_INFO(("gprtGetPhyIntStatus_mad Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PHY(port);
+    if((IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE)) && (hwPort > 3))
+    {
+        if(!(dev->validSerdesVec & (1 << hwPort)))
+        {
+            if(!((IS_IN_DEV_GROUP(dev,DEV_SERDES_ACCESS_CONFIG)) && (hwPort == 4)))
+                GT_GET_SERDES_PORT(dev,&hwPort);
+        }
+        if(hwPort >= dev->maxPhyNum)
+        {
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* check if the port is configurable */
+    if(!IS_CONFIGURABLE_PHY(dev,hwPort))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mdIntGetStatus(&(dev->mad_dev),hwPort,&mintType) != MAD_OK)
+    {
+        DBG_INFO(("Call mdIntGetStatus failed.\n"));
+        return GT_FAIL;
+    }
+
+    *intType = mintType.intGroup0;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtGetPhyIntPortSummary_mad
+*
+* DESCRIPTION:
+* Lists the ports that have active interrupts. It provides a quick way to
+* isolate the interrupt so that the MAC or switch does not have to poll the
+* interrupt status register (19) for all ports. Reading this register does not
+* de-assert the INT# pin
+*
+* INPUTS:
+* none
+*
+* OUTPUTS:
+* GT_U8 *intPortMask - bit Mask with the bits set for the corresponding
+* phys with active interrupt. E.g., the bit number 0 and 2 are set when
+* port number 0 and 2 have active interrupt
+*
+* RETURNS:
+* GT_OK - on success
+* GT_FAIL - on error
+*
+* COMMENTS:
+* 88E3081 data sheet register 20
+* For 88E6165, 88E6375 devices, geventGetDevIntStatus should be used instead.
+*
+*******************************************************************************/
+
+GT_STATUS gprtGetPhyIntPortSummary_mad
+(
+IN  GT_QD_DEV  *dev,
+OUT GT_U16     *intPortMask
+)
+{
+    GT_STATUS       retVal;
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          portVec;
+    MAD_U32         mportVec;
+
+    DBG_INFO(("gprtGetPhyIntPortSummary_mad Called.\n"));
+
+    /* translate LPORT 0 to hardware port */
+    hwPort = GT_LPORT_2_PORT(0);
+
+    *intPortMask=0;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DEV_PHY_INTERRUPT))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_INTERNAL_GPHY))
+    {
+        /* get the interrupt port summary from global register */
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PHYINT_SOURCE,0,dev->maxPorts,&portVec);
+        GT_GIG_PHY_INT_MASK(dev,portVec);
+        *intPortMask = (GT_U16)GT_PORTVEC_2_LPORTVEC(portVec);
+    }
+    else
+    {
+        /* get the interrupt port summary from phy */
+      if(mdIntGetPortSummary(&(dev->mad_dev), &mportVec) != MAD_OK)
+      {
+        DBG_INFO(("Call mdIntGetPortSummary failed.\n"));
+        return GT_FAIL;
+      }
+      portVec = mportVec;
+      *intPortMask = (GT_U16)GT_PORTVEC_2_LPORTVEC(portVec);
+    }
+
+    return GT_OK;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPolicy.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPolicy.c
new file mode 100644
index 000000000000..7c8ee4179aac
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPolicy.c
@@ -0,0 +1,279 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPolicy.c
+*
+* DESCRIPTION:
+*       API definitions to handle Policy Mapping
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*******************************************************************************
+* gprtSetPolicy
+*
+* DESCRIPTION:
+*       This routine sets the Policy for ports.
+*        Supported Policies are defined as GT_FRAME_POLICY as follows:
+*            FRAME_POLICY_NONE    - normal frame switching
+*            FRAME_POLICY_MIRROR  - mirror (copy) frame to MirrorDest port
+*            FRAME_POLICY_TRAP    - trap(re-direct) frame to the CPUDest port
+*            FRAME_POLICY_DISCARD - discard(filter) the frame
+*        Supported Policy types are defined as GT_POLICY_TYPE:
+*            POLICY_TYPE_DA - DA Policy Mapping
+*                DA Policy Mapping occurs when the DA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_SA - SA Policy Mapping
+*                SA Policy Mapping occurs when the SA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_VTU - VTU Policy Mapping
+*                VTU Policy Mapping occurs when the VID of a frame is contained in
+*                the VTU database with the VidPolicy is enabled.
+*            POLICY_TYPE_ETYPE - EtherType Policy Mapping
+*                EType Policy Mapping occurs when the EtherType of a frame matches
+*                the PortEType (see gprtSetPortEType API)
+*            POLICY_TYPE_PPPoE - PPPoE Policy Mapping
+*                PPPoE Policy Mapping occurs when the EtherType of a frame matches 0x8863
+*            POLICY_TYPE_VBAS - VBAS Policy Mapping
+*                VBAS Policy Mapping occurs when the EtherType of a frame matches 0x8200
+*            POLICY_TYPE_OPT82 - DHCP Option 82 Policy Mapping
+*                DHCP Option 82 Policy Mapping occurs when the ingressing frame is an
+*                IPv4 UDP with a UDP Destination port = 0x0043 or 0x0044, or an
+*                IPv6 UDP with a UDP Destination port = 0x0223 or 0x0222
+*            POLICY_TYPE_UDP - UDP Policy Mapping
+*                UDP Policy Mapping occurs when the ingressing frame is
+*                a Broadcast IPv4 UDP or a Multicast IPv6 UDP.
+*
+* INPUTS:
+*       port    - logical port number.
+*       type     - policy type (GT_POLICY_TYPE)
+*       policy     - policy (GT_FRAME_POLICY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*       GT_BAD_PARAM     - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT     port,
+    IN  GT_POLICY_TYPE    type,
+    IN    GT_FRAME_POLICY    policy
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* Physical port.               */
+    GT_U8            offset;
+
+    DBG_INFO(("gprtSetPolicy Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (policy)
+    {
+        case FRAME_POLICY_NONE:
+        case FRAME_POLICY_MIRROR:
+        case FRAME_POLICY_TRAP:
+        case FRAME_POLICY_DISCARD:
+            break;
+        default:
+            DBG_INFO(("Bad Policy\n"));
+            return GT_BAD_PARAM;
+    }
+
+    switch (type)
+    {
+        case POLICY_TYPE_DA:
+            offset = 14;
+            break;
+        case POLICY_TYPE_SA:
+            offset = 12;
+            break;
+        case POLICY_TYPE_VTU:
+            offset = 10;
+            break;
+        case POLICY_TYPE_ETYPE:
+            offset = 8;
+            break;
+        case POLICY_TYPE_PPPoE:
+            offset = 6;
+            break;
+        case POLICY_TYPE_VBAS:
+            offset = 4;
+            break;
+        case POLICY_TYPE_OPT82:
+            if (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+            {
+              DBG_INFO(("GT_NOT_SUPPORTED\n"));
+              return GT_NOT_SUPPORTED;
+            }
+            else
+              offset = 2;
+            break;
+        case POLICY_TYPE_UDP:
+            offset = 0;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_POLICY_CONTROL, offset, 2, (GT_U16)policy);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGetPolicy
+*
+* DESCRIPTION:
+*       This routine gets the Policy of the given policy type.
+*        Supported Policies are defined as GT_FRAME_POLICY as follows:
+*            FRAME_POLICY_NONE    - normal frame switching
+*            FRAME_POLICY_MIRROR  - mirror (copy) frame to MirrorDest port
+*            FRAME_POLICY_TRAP    - trap(re-direct) frame to the CPUDest port
+*            FRAME_POLICY_DISCARD - discard(filter) the frame
+*        Supported Policy types are defined as GT_POLICY_TYPE:
+*            POLICY_TYPE_DA - DA Policy Mapping
+*                DA Policy Mapping occurs when the DA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_SA - SA Policy Mapping
+*                SA Policy Mapping occurs when the SA of a frame is contained in
+*                the ATU address database with an Entry State that indicates Policy.
+*            POLICY_TYPE_VTU - VTU Policy Mapping
+*                VTU Policy Mapping occurs when the VID of a frame is contained in
+*                the VTU database with the VidPolicy is enabled.
+*            POLICY_TYPE_ETYPE - EtherType Policy Mapping
+*                EType Policy Mapping occurs when the EtherType of a frame matches
+*                the PortEType (see gprtSetPortEType API)
+*            POLICY_TYPE_PPPoE - PPPoE Policy Mapping
+*                PPPoE Policy Mapping occurs when the EtherType of a frame matches 0x8863
+*            POLICY_TYPE_VBAS - VBAS Policy Mapping
+*                VBAS Policy Mapping occurs when the EtherType of a frame matches 0x8200
+*            POLICY_TYPE_OPT82 - DHCP Option 82 Policy Mapping
+*                DHCP Option 82 Policy Mapping occurs when the ingressing frame is an
+*                IPv4 UDP with a UDP Destination port = 0x0043 or 0x0044, or an
+*                IPv6 UDP with a UDP Destination port = 0x0223 or 0x0222
+*            POLICY_TYPE_UDP - UDP Policy Mapping
+*                UDP Policy Mapping occurs when the ingressing frame is
+*                a Broadcast IPv4 UDP or a Multicast IPv6 UDP.
+*
+* INPUTS:
+*       port    - logical port number.
+*       type     - policy type (GT_POLICY_TYPE)
+*
+* OUTPUTS:
+*       policy     - policy (GT_FRAME_POLICY)
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*       GT_BAD_PARAM     - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPolicy
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT     port,
+    IN  GT_POLICY_TYPE    type,
+    OUT GT_FRAME_POLICY    *policy
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* Physical port.               */
+    GT_U8            offset;
+    GT_U16            data;
+
+    DBG_INFO(("gprtGetPolicy Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_POLICY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (type)
+    {
+        case POLICY_TYPE_DA:
+            offset = 14;
+            break;
+        case POLICY_TYPE_SA:
+            offset = 12;
+            break;
+        case POLICY_TYPE_VTU:
+            offset = 10;
+            break;
+        case POLICY_TYPE_ETYPE:
+            offset = 8;
+            break;
+        case POLICY_TYPE_PPPoE:
+            offset = 6;
+            break;
+        case POLICY_TYPE_VBAS:
+            offset = 4;
+            break;
+        case POLICY_TYPE_OPT82:
+            offset = 2;
+            break;
+        case POLICY_TYPE_UDP:
+            offset = 0;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_POLICY_CONTROL, offset, 2, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *policy = (GT_FRAME_POLICY)data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortCtrl.c
new file mode 100644
index 000000000000..2fd2cf0e1e6a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortCtrl.c
@@ -0,0 +1,7290 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortCtrl.c
+*
+* DESCRIPTION:
+*       API implementation for switch port control.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+#include <gtSem.h>
+
+/*******************************************************************************
+* gprtSetForceFc
+*
+* DESCRIPTION:
+*       This routine set the force flow control state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetForceFc
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    force
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_PORT_STP_STATE  state;
+
+    DBG_INFO(("gprtSetForceFc Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        if(force)
+            data = 3;
+        else
+            data = 0;
+
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,6,2,data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+        }
+        else
+        {
+            DBG_INFO(("OK.\n"));
+        }
+        return retVal;
+    }
+
+    /* Port should be disabled before Set Force Flow Control bit */
+    retVal = gstpGetPortState(dev,port, &state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("gstpGetPortState failed.\n"));
+        return retVal;
+    }
+
+    retVal = gstpSetPortState(dev,port, GT_PORT_DISABLE);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("gstpSetPortState failed.\n"));
+        return retVal;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(force, data);
+
+    /* Set the force flow control bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,15,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* Restore original stp state. */
+    if(gstpSetPortState(dev,port, state) != GT_OK)
+    {
+        DBG_INFO(("gstpSetPortState failed.\n"));
+        return GT_FAIL;
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetForceFc
+*
+* DESCRIPTION:
+*       This routine get the force flow control state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetForceFc
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *force
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetForceFc Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (IS_IN_DEV_GROUP(dev,DEV_FC_WITH_VALUE))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PCS_CONTROL,6,2,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+        }
+        else
+        {
+            DBG_INFO(("OK.\n"));
+        }
+
+        if(data & 0x1)
+            *force = GT_TRUE;
+        else
+            *force = GT_FALSE;
+
+        return retVal;
+    }
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,15,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *force);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetUseCoreTag
+*
+* DESCRIPTION:
+*        This routine set the UseCoreTag bit in Port Control Register.
+*        When this bit is cleared to a zero, ingressing frames are considered
+*        Tagged if the 16-bits following the frame's Source Address is 0x8100.
+*        When this bit is set to a one, ingressing frames are considered Tagged
+*        if the 16-bits following the frame's Source Address is equal to the
+*        CoreTag register value.
+*
+* INPUTS:
+*       port  - the logical port number.
+*       force - GT_TRUE for force flow control  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetUseCoreTag
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    force
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetUseCoreTag Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CORE_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(force, data);
+
+    /* Set the UseCoreTag bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,15,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetUseCoreTag
+*
+* DESCRIPTION:
+*       This routine get the Use Core Tag state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       force - GT_TRUE for using core tag register  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetUseCoreTag
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *force
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetUseCoreTag Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_CORE_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the UseCoreTag bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,15,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *force);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetTrailerMode
+*
+* DESCRIPTION:
+*       This routine set the egress trailer mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for add trailer or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetTrailerMode
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetTrailerMode Called.\n"));
+
+    /* check if device supports this feature */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRAILER)) ||
+          (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+          (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if(hwPort < 4)
+    {
+        /* check if device supports this feature for this port */
+        if ((IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5)))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+    else if(hwPort == 4)
+    {
+        /* check if device supports this feature for this port*/
+        if (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Set the trailer mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,14,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetTrailerMode
+*
+* DESCRIPTION:
+*       This routine get the egress trailer mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for add trailer or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTrailerMode
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetTrailerMode Called.\n"));
+
+    /* check if device supports this feature */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRAILER)) ||
+          (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+          (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if(hwPort < 4)
+    {
+        /* check if device supports this feature for this port */
+        if ((IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5)))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+    else if(hwPort == 4)
+    {
+        /* check if device supports this feature for this port */
+        if (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Get the Trailer mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,14,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+
+/*******************************************************************************
+* gprtSetIngressMode
+*
+* DESCRIPTION:
+*       This routine set the ingress mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the ingress mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIngressMode
+(
+    IN  GT_QD_DEV      *dev,
+    IN GT_LPORT        port,
+    IN GT_INGRESS_MODE mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIngressMode Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Gigabit Switch does not support this status. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if device supports this feature */
+    switch (mode)
+    {
+        case (GT_UNMODIFY_INGRESS):
+            break;
+
+        case (GT_TRAILER_INGRESS):
+            if (!((IS_IN_DEV_GROUP(dev,DEV_TRAILER)) ||
+                  (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+                  (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5))))
+     {
+                DBG_INFO(("Given ingress mode is not supported by this device\n"));
+                return GT_NOT_SUPPORTED;
+            }
+            break;
+
+        case (GT_UNTAGGED_INGRESS):
+            if(!(IS_IN_DEV_GROUP(dev,DEV_TAGGING)))
+            {
+                DBG_INFO(("Given ingress mode is not supported by this device\n"));
+                return GT_NOT_SUPPORTED;
+            }
+            break;
+
+        case (GT_CPUPORT_INGRESS):
+            if(!(IS_IN_DEV_GROUP(dev,DEV_IGMP_SNOOPING)))
+            {
+                DBG_INFO(("Given ingress mode is not supported by this device\n"));
+                return GT_NOT_SUPPORTED;
+            }
+
+            if(hwPort != GT_LPORT_2_PORT(dev->cpuPortNum))
+            {
+                DBG_INFO(("Given ingress mode is supported by CPU port only\n"));
+                return GT_NOT_SUPPORTED;
+            }
+
+            break;
+
+        default:
+            DBG_INFO(("Failed.\n"));
+            return GT_FAIL;
+    }
+
+    /* Set the Ingress Mode.        */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,8,2,(GT_U16)mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetIngressMode
+*
+* DESCRIPTION:
+*       This routine get the ingress mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - the ingress mode.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIngressMode
+(
+    IN  GT_QD_DEV      *dev,
+    IN  GT_LPORT        port,
+    OUT GT_INGRESS_MODE *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIngressMode Called.\n"));
+
+    /* Gigabit Switch does not support this status. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the Ingress Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 8, 2,&data);
+    *mode = data;
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetMcRateLimit
+*
+* DESCRIPTION:
+*       This routine set the port multicast rate limit.
+*
+* INPUTS:
+*       port - the logical port number.
+*       rate - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMcRateLimit
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_MC_RATE   rate
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetMcRateLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* this feature only exits in 6051, 6052, and 6012. It is replace with
+     * Rate Cotrol Register in the future products, starting from clippership
+     */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_MC_RATE_PERCENT)) != GT_OK)
+        return retVal;
+
+    /* Set the multicast rate limit.    */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,2,2,(GT_U16)rate);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetMcRateLimit
+*
+* DESCRIPTION:
+*       This routine Get the port multicast rate limit.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       rate - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMcRateLimit
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT    port,
+    OUT GT_MC_RATE  *rate
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read data        */
+
+    DBG_INFO(("gprtGetMcRateLimit Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* this feature only exits in 6051, 6052, and 6012. It is replace with
+     * Rate Cotrol Register in the future products, starting from clippership
+     */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_MC_RATE_PERCENT)) != GT_OK)
+        return retVal;
+
+    /* Get the multicast rate limit.    */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 2, 2,&data);
+    *rate = data;
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/* the following two APIs are added to support fullsail and clippership */
+
+/*******************************************************************************
+* gprtSetIGMPSnoop
+*
+* DESCRIPTION:
+*         This routine set the IGMP Snoop. When set to one and this port receives
+*        IGMP frame, the frame is switched to the CPU port, overriding all other
+*        switching decisions, with exception for CPU's Trailer.
+*        CPU port is determined by the Ingress Mode bits. A port is considered
+*        the CPU port if its Ingress Mode are either GT_TRAILER_INGRESS or
+*        GT_CPUPORT_INGRESS.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for IGMP Snoop or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIGMPSnoop
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIGMPSnoop Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_IGMP_SNOOPING)) != GT_OK)
+      return retVal;
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the IGMP Snooping mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,10,1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetIGMPSnoop
+*
+* DESCRIPTION:
+*       This routine get the IGMP Snoop mode.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: IGMP Snoop enabled
+*           GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIGMPSnoop
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIGMPSnoop Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_IGMP_SNOOPING)) != GT_OK)
+      return retVal;
+
+    /* Get the Ingress Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 10, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/* the following two APIs are added to support clippership */
+
+/*******************************************************************************
+* gprtSetHeaderMode
+*
+* DESCRIPTION:
+*       This routine set ingress and egress header mode of a switch port.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for header mode  or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetHeaderMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetHeaderMode Called.\n"));
+
+    /* only devices beyond quarterdeck (6052) has this feature */
+    /* Fullsail (DEV_QD_88E6502) is an exception, and does not support this feature */
+    if(IS_VALID_API_CALL(dev,port, DEV_HEADER|DEV_HEADER_P5|DEV_HEADER_P4P5) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if(hwPort < 4)
+    {
+        if ((IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5)))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+    else if(hwPort == 4)
+    {
+        if (IS_IN_DEV_GROUP(dev,DEV_HEADER_P5))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Set the header mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,11,1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetHeaderMode
+*
+* DESCRIPTION:
+*       This routine gets ingress and egress header mode of a switch port.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: header mode enabled
+*           GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHeaderMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetHeaderMode Called.\n"));
+
+    /* only devices beyond quarterdeck (6052) has this feature */
+    /* Fullsail (DEV_QD_88E602) is an exception, and does not support this feature */
+    if(IS_VALID_API_CALL(dev,port, DEV_HEADER|DEV_HEADER_P5|DEV_HEADER_P4P5) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if(hwPort < 4)
+    {
+        if ((IS_IN_DEV_GROUP(dev,DEV_TRAILER_P5)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_TRAILER_P4P5)))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+    else if(hwPort == 4)
+    {
+        if (IS_IN_DEV_GROUP(dev,DEV_HEADER_P5))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* Get the Header Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 11, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/* the following four APIs are added to support Octane */
+
+/*******************************************************************************
+* gprtSetProtectedMode
+*
+* DESCRIPTION:
+*       This routine set protected mode of a switch port.
+*        When this mode is set to GT_TRUE, frames are allowed to egress port
+*        defined by the 802.1Q VLAN membership for the frame's VID 'AND'
+*        by the port's VLANTable if 802.1Q is enabled on the port. Both must
+*        allow the frame to Egress.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for protected mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetProtectedMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetProtectedMode Called.\n"));
+
+    /* Check if this feature is supported */
+    if(IS_VALID_API_CALL(dev,port, DEV_PORT_SECURITY) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_CROSS_CHIP_VLAN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the protected mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,3,1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetProtectedMode
+*
+* DESCRIPTION:
+*       This routine gets protected mode of a switch port.
+*        When this mode is set to GT_TRUE, frames are allowed to egress port
+*        defined by the 802.1Q VLAN membership for the frame's VID 'AND'
+*        by the port's VLANTable if 802.1Q is enabled on the port. Both must
+*        allow the frame to Egress.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: header mode enabled
+*           GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetProtectedMode
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetProtectedMode Called.\n"));
+
+    if(IS_VALID_API_CALL(dev,port, DEV_PORT_SECURITY) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_CROSS_CHIP_VLAN))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the protected Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 3, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetForwardUnknown
+*
+* DESCRIPTION:
+*       This routine set Forward Unknown mode of a switch port.
+*        When this mode is set to GT_TRUE, normal switch operation occurs.
+*        When this mode is set to GT_FALSE, unicast frame with unknown DA addresses
+*        will not egress out this port.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for protected mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetForwardUnknown
+(
+    IN  GT_QD_DEV   *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL      mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetForwardUnknown Called.\n"));
+
+    if(IS_VALID_API_CALL(dev,port, DEV_PORT_SECURITY|DEV_EGRESS_FLOOD) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the forward unknown mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,2,1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetForwardUnknown
+*
+* DESCRIPTION:
+*       This routine gets Forward Unknown mode of a switch port.
+*        When this mode is set to GT_TRUE, normal switch operation occurs.
+*        When this mode is set to GT_FALSE, unicast frame with unknown DA addresses
+*        will not egress out this port.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: header mode enabled
+*           GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetForwardUnknown
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetForwardUnknown Called.\n"));
+
+    if(IS_VALID_API_CALL(dev,port, DEV_PORT_SECURITY|DEV_EGRESS_FLOOD) != GT_OK)
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the forward unknown Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 2, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDropOnLock
+*
+* DESCRIPTION:
+*        This routine set the Drop on Lock. When set to one, Ingress frames will
+*        be discarded if their SA field is not in the ATU's address database.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Unknown SA drop or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDropOnLock
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDropOnLock Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the DropOnLock mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,14,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetDropOnLock
+*
+* DESCRIPTION:
+*        This routine gets DropOnLock mode.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: DropOnLock enabled,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDropOnLock
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDropOnLock Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DropOnLock Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 14, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetDoubleTag
+*
+* DESCRIPTION:
+*        This routine set the Ingress Double Tag Mode. When set to one,
+*        ingressing frames are examined to see if they contain an 802.3ac tag.
+*        If they do, the tag is removed and then the frame is processed from
+*        there (i.e., removed tag is ignored). Essentially, untagged frames
+*        remain untagged, single tagged frames become untagged and double tagged
+*        frames become single tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DoulbeTag mode or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDoubleTag
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDoubleTag Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_INGRESS_DOUBLE_TAGGING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the DoubleTag mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,9,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetDoubleTag
+*
+* DESCRIPTION:
+*        This routine gets DoubleTag mode.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: DoubleTag enabled,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDoubleTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDoubleTag Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_INGRESS_DOUBLE_TAGGING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DoubleTag Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 9, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetInterswitchPort
+*
+* DESCRIPTION:
+*        This routine set Interswitch Port. When set to one,
+*        it indicates this port is a interswitch port used to communicated with
+*        CPU or to cascade with another switch device.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Interswitch port or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetInterswitchPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetInterswitchPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the InterswitchPort.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,8,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetInterswithPort
+*
+* DESCRIPTION:
+*        This routine gets InterswitchPort.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: This port is interswitch port,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetInterswitchPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetInterswitchPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the InterswitchPort Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 8, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetLearnDisable
+*
+* DESCRIPTION:
+*        This routine enables/disables automatic learning of new source MAC
+*        addresses on the given port ingress
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for disable or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetLearnDisable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetLearnDisable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the LearnDisable mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP,11,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetLearnDisable
+*
+* DESCRIPTION:
+*        This routine gets LearnDisable setup
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: Learning disabled on the given port ingress frames,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLearnDisable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetLearnDisable Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the LearnDisable Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 11, 1, &data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetIgnoreFCS
+*
+* DESCRIPTION:
+*        This routine sets FCS Ignore mode. When this bit is set to a one,
+*        the last four bytes of frames received on this port are overwritten with
+*        a good CRC and the frames will be accepted by the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for ignore FCS or GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIgnoreFCS
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL         mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIgnoreFCS Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev, DEV_IGNORE_FCS)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the IgnoreFCS mode.            */
+    if ((IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        retVal = hwSetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL2,15,1,data );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP,10,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetIgnoreFCS
+*
+* DESCRIPTION:
+*        This routine gets Ignore FCS setup
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE: Ignore FCS on the given port's ingress frames,
+*                 GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIgnoreFCS
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIgnoreFCS Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_IGNORE_FCS)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the IgnoreFCS Mode.            */
+    if ((IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))))
+    {
+        retVal = hwGetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL2,15,1,&data );
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 10, 1, &data);
+    }
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetVTUPriOverride
+*
+* DESCRIPTION:
+*        VTU Priority Override. The following modes are supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetVTUPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_PRI_OVERRIDE        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetVTUPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (mode)
+    {
+        case PRI_OVERRIDE_NONE:
+            data = 0;
+            break;
+        case PRI_OVERRIDE_FRAME_QUEUE:
+            if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                data = 3;
+            }
+            else
+            {
+                data = 1;
+            }
+            break;
+        case PRI_OVERRIDE_FRAME:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 1;
+            break;
+        case PRI_OVERRIDE_QUEUE:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 2;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    /* Set the VTUPri Override mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,10,2,data);
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,14,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetVTUPriOverride
+*
+* DESCRIPTION:
+*        VTU Priority Override. The following modes are supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's VID (in the VTU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetVTUPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PRI_OVERRIDE        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetVTUPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the VTUPriOverride Mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,10,2,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 14, 1, &data);
+        if(data == 1)
+            data = 3;
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    switch (data)
+    {
+        case 0:
+            *mode = PRI_OVERRIDE_NONE;
+            break;
+        case 3:
+            *mode = PRI_OVERRIDE_FRAME_QUEUE;
+            break;
+        case 1:
+            *mode = PRI_OVERRIDE_FRAME;
+            break;
+        case 2:
+            *mode = PRI_OVERRIDE_QUEUE;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetSAPriOverride
+*
+* DESCRIPTION:
+*        SA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetSAPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetSAPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (mode)
+    {
+        case PRI_OVERRIDE_NONE:
+            data = 0;
+            break;
+        case PRI_OVERRIDE_FRAME_QUEUE:
+            if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                data = 3;
+            }
+            else
+            {
+                data = 1;
+            }
+            break;
+        case PRI_OVERRIDE_FRAME:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 1;
+            break;
+        case PRI_OVERRIDE_QUEUE:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 2;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    /* Set the SAPriOverride mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,12,2,data);
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,13,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetSAPriOverride
+*
+* DESCRIPTION:
+*        SA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's SA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetSAPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PRI_OVERRIDE        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetSAPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SAPriOverride Mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,12,2,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 13, 1, &data);
+        if(data == 1)
+            data = 3;
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    switch (data)
+    {
+        case 0:
+            *mode = PRI_OVERRIDE_NONE;
+            break;
+        case 3:
+            *mode = PRI_OVERRIDE_FRAME_QUEUE;
+            break;
+        case 1:
+            *mode = PRI_OVERRIDE_FRAME;
+            break;
+        case 2:
+            *mode = PRI_OVERRIDE_QUEUE;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetDAPriOverride
+*
+* DESCRIPTION:
+*        DA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_PRI_OVERRIDE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtSetDAPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_PRI_OVERRIDE        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDAPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (mode)
+    {
+        case PRI_OVERRIDE_NONE:
+            data = 0;
+            break;
+        case PRI_OVERRIDE_FRAME_QUEUE:
+            if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                data = 3;
+            }
+            else
+            {
+                data = 1;
+            }
+            break;
+        case PRI_OVERRIDE_FRAME:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 1;
+            break;
+        case PRI_OVERRIDE_QUEUE:
+            if (!IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+            {
+                DBG_INFO(("Bad Parameter\n"));
+                return GT_BAD_PARAM;
+            }
+            data = 2;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    /* Set the DAPriOverride mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,14,2,data);
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,12,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDAPriOverride
+*
+* DESCRIPTION:
+*        DA Priority Override. The following mode is supported:
+*            PRI_OVERRIDE_NONE -
+*                Normal frame priority processing occurs.
+*            PRI_OVERRIDE_FRAME -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's FPri (frame priority).
+*                If the frame egresses tagged, the priority in the frame will be
+*                this new priority value.
+*            PRI_OVERRIDE_QUEUE -
+*                Priority assigned to the frame's DA (in the ATU table) is used
+*                to overwite the frame's QPri (queue priority).
+*                QPri is used internally to map the frame to one of the egress
+*                queues inside the switch.
+*            PRI_OVERRIDE_FRAME_QUEUE -
+*                Both frame and queue overrides take place on the frame.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_PRI_OVERRIDE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        PRI_OVERRIDE_FRAME and PRI_OVERRIDE_QUEUE modes are supported only on
+*        certain switch device. Please refer to the device datasheet.
+*
+*******************************************************************************/
+GT_STATUS gprtGetDAPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PRI_OVERRIDE        *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDAPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DAPriOverride Mode.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_EXT_PRIORITY_OVERRIDE))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PRI_OVERRIDE,14,2,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 12, 1, &data);
+        if(data == 1)
+            data = 3;
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    switch (data)
+    {
+        case 0:
+            *mode = PRI_OVERRIDE_NONE;
+            break;
+        case 3:
+            *mode = PRI_OVERRIDE_FRAME_QUEUE;
+            break;
+        case 1:
+            *mode = PRI_OVERRIDE_FRAME;
+            break;
+        case 2:
+            *mode = PRI_OVERRIDE_QUEUE;
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetCPUPort
+*
+* DESCRIPTION:
+*        This routine sets CPU Port number. When Snooping is enabled on this port
+*        or when this port is configured as an Interswitch Port and it receives a
+*        To_CPU frame, the switch needs to know what port on this device the frame
+*        should egress.
+*
+* INPUTS:
+*        port - the logical port number.
+*        cpuPort - CPU Port number or interswitch port where CPU Port is connected
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetCPUPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_LPORT     cpuPort
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetCPUPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    data = (GT_U16)GT_LPORT_2_PORT(cpuPort);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_DEST_PER_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set the CPU Port.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,0,4,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetCPUPort
+*
+* DESCRIPTION:
+*        This routine gets CPU Logical Port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        cpuPort - CPU Port's logical number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetCPUPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_LPORT     *cpuLPort
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetCPUPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_DEST_PER_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the CPUPort.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 0, 4, &data);
+
+    *cpuLPort = GT_PORT_2_LPORT((GT_U8)data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetLockedPort
+*
+* DESCRIPTION:
+*        This routine sets LockedPort. When it's set to one, CPU directed
+*        learning for 802.1x MAC authentication is enabled on this port. In this
+*        mode, an ATU Miss Violation interrupt will occur when a new SA address
+*        is received in a frame on this port. Automatically SA learning and
+*        refreshing is disabled in this mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for Locked Port, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetLockedPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetLockedPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_LOCKED_PORT)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Locked Port.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 13, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetLockedPort
+*
+* DESCRIPTION:
+*        This routine gets Locked Port mode for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if LockedPort, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLockedPort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetLockedPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_LOCKED_PORT)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the LockedPort. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 13, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetIgnoreWrongData
+*
+* DESCRIPTION:
+*        This routine sets Ignore Wrong Data. If the frame's SA address is found
+*        in the database and if the entry is 'static' or if the port is 'locked'
+*        the source port's bit is checked to insure the SA has been assigned to
+*        this port. If the SA is NOT assigned to this port, it is considered an
+*        ATU Member Violation. If the IgnoreWrongData is set to GT_FALSE, an ATU
+*        Member Violation interrupt will be generated. If it's set to GT_TRUE,
+*        the ATU Member Violation error will be masked and ignored.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for IgnoreWrongData, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIgnoreWrongData
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIgnoreWrongData Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_IGNORE_WRONG_DAT)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set IgnoreWrongData.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 12, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetIgnoreWrongData
+*
+* DESCRIPTION:
+*        This routine gets Ignore Wrong Data mode for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if IgnoreWrongData, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIgnoreWrongData
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIgnoreWrongData Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_IGNORE_WRONG_DAT)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the IgnoreWrongData. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 12, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDiscardTagged
+*
+* DESCRIPTION:
+*        When this bit is set to a one, all non-MGMT frames that are processed as
+*        Tagged will be discarded as they enter this switch port. Priority only
+*        tagged frames (with a VID of 0x000) are considered tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to discard tagged frame, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardTagged
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDiscardTagged Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_DISCARD_TAGGED)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set DiscardTagged. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 9, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDiscardTagged
+*
+* DESCRIPTION:
+*        This routine gets DiscardTagged bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DiscardTagged bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardTagged
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDiscardTagged Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_DISCARD_TAGGED)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DiscardTagged. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 9, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDiscardUntagged
+*
+* DESCRIPTION:
+*        When this bit is set to a one, all non-MGMT frames that are processed as
+*        Untagged will be discarded as they enter this switch port. Priority only
+*        tagged frames (with a VID of 0x000) are considered tagged.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to discard untagged frame, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardUntagged
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDiscardUntagged Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_DISCARD_TAGGED)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set DiscardUnTagged. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 8, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDiscardUntagged
+*
+* DESCRIPTION:
+*        This routine gets DiscardUntagged bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DiscardUntagged bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardUntagged
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDiscardUnTagged Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_DISCARD_TAGGED)) )
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DiscardUnTagged. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 8, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetMapDA
+*
+* DESCRIPTION:
+*        When this bit is set to a one, normal switch operation will occur where a
+*        frame's DA address is used to direct the frame out the correct port.
+*        When this be is cleared to a zero, the frame will be sent out the port(s)
+*        defined by ForwardUnknown bits or the DefaultForward bits even if the DA
+*        is ound in the address database.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to use MapDA, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMapDA
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetMapDA Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_MAP_DA)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set MapDA. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 7, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetMapDA
+*
+* DESCRIPTION:
+*        This routine gets MapDA bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if MapDA bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMapDA
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetMapDA Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_MAP_DA)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the MapDA. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 7, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetDefaultForward
+*
+* DESCRIPTION:
+*        When this bit is set to a one, normal switch operation will occurs and
+*        multicast frames with unknown DA addresses are allowed to egress out this
+*        port (assuming the VLAN settings allow the frame to egress this port too).
+*        When this bit is cleared to a zero, multicast frames with unknown DA
+*        addresses will not egress out this port.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to use DefaultForward, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDefaultForward
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDefaultForward Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set DefaultForward. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EGRESS_FLOOD))
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 6, 1, data);
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 3, 1, data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDefaultForward
+*
+* DESCRIPTION:
+*        This routine gets DefaultForward bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if DefaultForward bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDefaultForward
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetDefaultForward Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DefaultForward. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EGRESS_FLOOD))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 6, 1, &data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 3, 1, &data);
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetEgressMonitorSource
+*
+* DESCRIPTION:
+*        When this be is cleared to a zero, normal network switching occurs.
+*        When this bit is set to a one, any frame that egresses out this port will
+*        also be sent to the EgressMonitorDest Port
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to set EgressMonitorSource, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressMonitorSource
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetEgressMonitorSource Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set EgressMonitorSource. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 5, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetEgressMonitorSource
+*
+* DESCRIPTION:
+*        This routine gets EgressMonitorSource bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if EgressMonitorSource bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressMonitorSource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetEgressMonitorSource Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the EgressMonitorSource. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 5, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetIngressMonitorSource
+*
+* DESCRIPTION:
+*        When this be is cleared to a zero, normal network switching occurs.
+*        When this bit is set to a one, any frame that egresses out this port will
+*        also be sent to the EgressMonitorDest Port
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to set EgressMonitorSource, GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIngressMonitorSource
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIngressMonitorSource Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set IngressMonitorSource. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 4, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetIngressMonitorSource
+*
+* DESCRIPTION:
+*        This routine gets IngressMonitorSource bit for the given port
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode  - GT_TRUE if IngressMonitorSource bit is set, GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIngressMonitorSource
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIngressMonitorSource Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the IngressMonitorSource. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2, 4, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetMessagePort
+*
+* DESCRIPTION:
+*        When the Learn2All bit is set to one, learning message frames are
+*        generated. These frames will be sent out all ports whose Message Port is
+*        set to one.
+*         If this feature is used, it is recommended that all Marvell Tag ports,
+*        except for the CPU's port, have their MessagePort bit set to one.
+*        Ports that are not Marvell Tag ports should not have their Message Port
+*        bit set to one.
+*
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to make this port a Message Port. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMessagePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetMessagePort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set IngressMonitorSource. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 15, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetMessagePort
+*
+* DESCRIPTION:
+*        When the Learn2All bit is set to one, learning message frames are
+*        generated. These frames will be sent out all ports whose Message Port is
+*        set to one.
+*         If this feature is used, it is recommended that all Marvell Tag ports,
+*        except for the CPU's port, have their MessagePort bit set to one.
+*        Ports that are not Marvell Tag ports should not have their Message Port
+*        bit set to one.
+*
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to make this port a Message Port. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMessagePort
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetMessagePort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the IngressMonitorSource. */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 15, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetTrunkPort
+*
+* DESCRIPTION:
+*        This function enables/disables and sets the trunk ID.
+*
+* INPUTS:
+*        port - the logical port number.
+*        en - GT_TRUE to make the port be a member of a trunk with the given trunkId.
+*             GT_FALSE, otherwise.
+*        trunkId - valid ID is 0 ~ 15.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId is neither valid nor INVALID_TRUNK_ID
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetTrunkPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL         en,
+    IN GT_U32        trunkId
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetTrunkPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(en, data);
+
+    if(en == GT_TRUE)
+    {
+        /* need to enable trunk. so check the trunkId */
+        if (!IS_TRUNK_ID_VALID(dev, trunkId))
+        {
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+        }
+
+        /* Set TrunkId. */
+        if (IS_IN_DEV_GROUP(dev,DEV_TRUNK_NEW_ID_LOCATION))
+        {
+            retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 8, 4, (GT_U16)trunkId);
+        }
+        else
+        {
+            retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 4, 4, (GT_U16)trunkId);
+        }
+
+        if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+    }
+    else
+    {
+        /*
+           Need to reset trunkId for 88E6095 rev0.
+        */
+        if (IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) &&
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            trunkId = 0;
+
+            /* Set TrunkId. */
+            retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 4, 4, (GT_U16)trunkId);
+            if(retVal != GT_OK)
+            {
+                   DBG_INFO(("Failed.\n"));
+                return retVal;
+            }
+        }
+    }
+
+    /* Set TrunkPort bit. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 14, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetTrunkPort
+*
+* DESCRIPTION:
+*        This function returns trunk state of the port.
+*        When trunk is disabled, trunkId field won't have valid value.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        en - GT_TRUE, if the port is a member of a trunk,
+*             GT_FALSE, otherwise.
+*        trunkId - 0 ~ 15, valid only if en is GT_TRUE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTrunkPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    OUT GT_BOOL     *en,
+    OUT GT_U32        *trunkId
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetTrunkPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = 0;
+
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 14, 1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_TRUNK_NEW_ID_LOCATION))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 8, 4, &data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL1, 4, 4, &data);
+    }
+
+    *trunkId = (GT_U32)data;
+
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetDiscardBCastMode
+*
+* DESCRIPTION:
+*       This routine gets the Discard Broadcast Mode. If the mode is enabled,
+*        all the broadcast frames to the given port will be discarded.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        en - GT_TRUE, if enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDiscardBCastMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetDiscardBCastMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_DROP_BCAST))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    data = 0;
+
+    retVal = hwGetPortRegField(dev,hwPort, 0x15, 6, 1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetDiscardBCastMode
+*
+* DESCRIPTION:
+*       This routine sets the Discard Broadcast mode.
+*        If the mode is enabled, all the broadcast frames to the given port will
+*        be discarded.
+*
+* INPUTS:
+*       port - logical port number
+*        en - GT_TRUE, to enable the mode,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDiscardBCastMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      en
+)
+{
+    GT_U16          data;           /* Used to poll the data */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDiscardBCastMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_DROP_BCAST))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(en, data);
+
+    retVal = hwSetPortRegField(dev,hwPort, 0x15, 6, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtGetFCOnRateLimitMode
+*
+* DESCRIPTION:
+*       This routine returns mode that tells if ingress rate limiting uses Flow
+*        Control. When this mode is enabled and the port receives frames over the
+*        limit, Ingress Rate Limiting will be performed by stalling the
+*        link partner using flow control, instead of discarding frames.
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        en - GT_TRUE, if the mode is enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        In order for this mode to work, Flow Control and Rate Limiting
+*        should be configured properly.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFCOnRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetFCOnRateLimitMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+    }
+
+    data = 0;
+
+    retVal = hwGetPortRegField(dev,hwPort, 0x15, 4, 2, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (data == 0x3)
+        *en = GT_TRUE;
+    else
+        *en = GT_FALSE;
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetFCOnRateLimitMode
+*
+* DESCRIPTION:
+*       This routine sets the mode that tells if ingress rate limiting uses Flow
+*        Control. When this mode is enabled and the port receives frames over the
+*        limit, Ingress Rate Limiting will be performed by stalling the
+*        link partner using flow control, instead of discarding frames.
+*
+* INPUTS:
+*       port - logical port number
+*        en - GT_TRUE, to enable the mode,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       This routine won't configure Flow Control or Rate Limiting.
+*        In order for this mode to work, Flow Control and Rate Limiting
+*        should be configured properly.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetFCOnRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetFCOnRateLimitMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    if (en)
+        data = 0x3;
+    else
+        data = 0;
+
+    retVal = hwSetPortRegField(dev,hwPort, 0x15, 4, 2, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetSAFiltering
+*
+* DESCRIPTION:
+*        This routine set the Source Address(SA) fitering method.
+*            GT_SA_FILTERING_DISABLE :
+*                no frame will be filtered.
+*            GT_SA_DROP_ON_LOCK :
+*                discard if SA field is not in the ATU's address database.
+*            GT_SA_DROP_ON_UNLOCK :
+*                discard if SA field is in the ATU's address database as Static
+*                entry with a PortVec of all zeros.
+*            GT_SA_DROP_TO_CPU :
+*                Ingressing frames will be mapped to the CPU Port if their SA
+*                field is in the ATU's address database as Static entry with a
+*                PortVec of all zeros. Otherwise, the frames will be discarded
+*                if their SA field is not in the ATU's address database or if this
+*                port's bit is not set in the PortVec bits for the frame's SA.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_SA_FILTERING structure
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtSetSAFiltering
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_SA_FILTERING    mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gprtSetSAFiltering Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SA_FILTERING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = (GT_U16) mode;
+
+    /* Set the SA Filtering bits.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,14,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetSAFiltering
+*
+* DESCRIPTION:
+*        This routine gets the Source Address(SA) fitering method.
+*            GT_SA_FILTERING_DISABLE :
+*                no frame will be filtered.
+*            GT_SA_DROP_ON_LOCK :
+*                discard if SA field is not in the ATU's address database.
+*            GT_SA_DROP_ON_UNLOCK :
+*                discard if SA field is in the ATU's address database as Static
+*                entry with a PortVec of all zeros.
+*            GT_SA_DROP_TO_CPU :
+*                Ingressing frames will be mapped to the CPU Port if their SA
+*                field is in the ATU's address database as Static entry with a
+*                PortVec of all zeros. Otherwise, the frames will be discarded
+*                if their SA field is not in the ATU's address database or if this
+*                port's bit is not set in the PortVec bits for the frame's SA.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_SA_FILTERING structure
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtGetSAFiltering
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_SA_FILTERING    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gprtSetSAFiltering Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SA_FILTERING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SA Filtering bits.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,14,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = (GT_SA_FILTERING)data;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetARPtoCPU
+*
+* DESCRIPTION:
+*        When ARPtoCPU (or ARP Mirror) is set to GT_TRUE, ARP frames are mirrored
+*        to the CPU port.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE, to map ARP frames to CPU Port,
+*               GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtSetARPtoCPU
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    mode
+)
+{
+    GT_U16            data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetARPtoCPU Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_TO_CPU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the ARPtoCPU bits.  */
+    if (IS_IN_DEV_GROUP(dev,DEV_FASTETH_SWITCH))
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,8,1,data);
+    else
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,6,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetARPtoCPU
+*
+* DESCRIPTION:
+*        When ARPtoCPU (or ARP Mirror) is set to GT_TRUE, ARP frames are mirrored
+*        to the CPU port.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE, to map ARP frames to CPU Port,
+*               GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS gprtGetARPtoCPU
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *mode
+)
+{
+    GT_U16            data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetARPtoCPU Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports the feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_TO_CPU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the ARPtoCPU bits.  */
+    if (IS_IN_DEV_GROUP(dev,DEV_FASTETH_SWITCH))
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,8,1,&data);
+    else
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,6,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gsysSetLearn2All
+*
+* DESCRIPTION:
+*       enable the Learn to All devices in a Switch, this must be enabled for
+*       hardware learn limiting is enabled on any port on any device
+*
+* INPUTS:
+*        en - GT_TRUE if Learn2All is set. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetLearn2All Called.\n"));
+
+    BOOL_2_BIT(en, data);
+
+    /* Set related bit */
+    retVal = hwSetGlobalRegField(dev,QD_REG_AGETIME_LA_CONTROL,3,1, data);
+
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetLearn2All
+*
+* DESCRIPTION:
+*       returns the state of Learn to All devices in a Switch flag
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Learn2All is set. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetLearn2All
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL       *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetLearn2All Called.\n"));
+
+    /* Set related bit */
+    retVal = hwGetGlobalRegField(dev, QD_REG_AGETIME_LA_CONTROL, 3, 1, &data);
+
+    if (retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetEgressFlood
+*
+* DESCRIPTION:
+*       This routine set Egress Flooding Mode.
+*        Frames with unknown DA (Destination Address that is not in ATU database)
+*        generally flood out all the ports. This mode can be used to prevent
+*        those frames from egressing this port as follows:
+*            GT_BLOCK_EGRESS_UNKNOWN
+*                do not egress frame with unknown DA (both unicast and multicast)
+*            GT_BLOCK_EGRESS_UNKNOWN_MULTICAST
+*                do not egress frame with unknown multicast DA
+*            GT_BLOCK_EGRESS_UNKNOWN_UNICAST
+*                do not egress frame with unknown unicast DA
+*            GT_BLOCK_EGRESS_NONE
+*                egress all frames with unknown DA
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_EGRESS_FLOOD type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetEgressFlood
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    IN  GT_EGRESS_FLOOD      mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gprtSetEgressFlood Called.\n"));
+
+    /* check if device supports the feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EGRESS_FLOOD))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    data = (GT_U16) mode;
+
+    /* Set the Egress Flood mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,2,2,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetEgressFlood
+*
+* DESCRIPTION:
+*       This routine gets Egress Flooding Mode.
+*        Frames with unknown DA (Destination Address that is not in ATU database)
+*        generally flood out all the ports. This mode can be used to prevent
+*        those frames from egressing this port as follows:
+*            GT_BLOCK_EGRESS_UNKNOWN
+*                do not egress frame with unknown DA (both unicast and multicast)
+*            GT_BLOCK_EGRESS_UNKNOWN_MULTICAST
+*                do not egress frame with unknown multicast DA
+*            GT_BLOCK_EGRESS_UNKNOWN_UNICAST
+*                do not egress frame with unknown unicast DA
+*            GT_BLOCK_EGRESS_NONE
+*                egress all frames with unknown DA
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_EGRESS_FLOOD type
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetEgressFlood
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_LPORT     port,
+    OUT GT_EGRESS_FLOOD      *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetEgressFlood Called.\n"));
+
+    /* check if device supports the feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_EGRESS_FLOOD))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* get the Egress Flood mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,2,2,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = (GT_EGRESS_FLOOD) data;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPortSched
+*
+* DESCRIPTION:
+*        This routine sets Port Scheduling Mode.
+*        When usePortSched is enablied, this mode is used to select the Queue
+*        controller's scheduling on the port as follows:
+*            GT_PORT_SCHED_WEIGHTED_RRB - use 8,4,2,1 weighted fair scheduling
+*            GT_PORT_SCHED_STRICT_PRI - use a strict priority scheme
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_PORT_SCHED_MODE enum type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortSched
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PORT_SCHED_MODE        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetPortSched Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!((IS_IN_DEV_GROUP(dev,DEV_PORT_SCHEDULE)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_PORT_MIXED_SCHEDULE))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PORT_MIXED_SCHEDULE))
+    {
+        switch(mode)
+        {
+            case GT_PORT_SCHED_WEIGHTED_RRB:
+                data = 0;
+                break;
+            case GT_PORT_SCHED_STRICT_PRI:
+                data = 3;
+                break;
+            case GT_PORT_SCHED_STRICT_PRI3:
+                data = 1;
+                break;
+            case GT_PORT_SCHED_STRICT_PRI2_3:
+                data = 2;
+                break;
+            default:
+                return GT_BAD_PARAM;
+        }
+
+        retVal = hwSetPortRegField(dev,hwPort, QD_REG_RATE_CTRL, 12,2,data);
+
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+        }
+        else
+        {
+            DBG_INFO(("OK.\n"));
+        }
+        return retVal;
+
+    }
+
+
+    data = mode;
+
+    /* Set the gprtSetPortSched mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,14,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPortSched
+*
+* DESCRIPTION:
+*        This routine gets Port Scheduling Mode.
+*        When usePortSched is enablied, this mode is used to select the Queue
+*        controller's scheduling on the port as follows:
+*            GT_PORT_SCHED_WEIGHTED_RRB - use 8,4,2,1 weighted fair scheduling
+*            GT_PORT_SCHED_STRICT_PRI - use a strict priority scheme
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_PORT_SCHED_MODE enum type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortSched
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PORT_SCHED_MODE        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPortSched Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!((IS_IN_DEV_GROUP(dev,DEV_PORT_SCHEDULE)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_PORT_MIXED_SCHEDULE))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PORT_MIXED_SCHEDULE))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_RATE_CTRL, 12,2,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        switch(data)
+        {
+            case 0:
+                *mode = GT_PORT_SCHED_WEIGHTED_RRB;
+                break;
+            case 1:
+                *mode = GT_PORT_SCHED_STRICT_PRI3;
+                break;
+            case 2:
+                *mode = GT_PORT_SCHED_STRICT_PRI2_3;
+                break;
+            case 3:
+                *mode = GT_PORT_SCHED_STRICT_PRI;
+                break;
+            default:
+                return GT_BAD_PARAM;
+        }
+
+        return GT_OK;
+
+    }
+
+    /* Get the gprtGetPortSched mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION,14,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = (GT_PORT_SCHED_MODE)data;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetProviderTag
+*
+* DESCRIPTION:
+*        This routine sets Provider Tag which indicates the provider tag (Ether
+*        Type) value that needs to be matched to in ingress to determine if a
+*        frame is Provider tagged or not.
+*
+* INPUTS:
+*        port - the logical port number
+*        tag  - Provider Tag (Ether Type)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetProviderTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        tag
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetProviderTag Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PROVIDER_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set Provider Tag.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PROVIDER_TAG, 0, 16, tag);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetProviderTag
+*
+* DESCRIPTION:
+*        This routine gets Provider Tag which indicates the provider tag (Ether
+*        Type) value that needs to be matched to in ingress to determine if a
+*        frame is Provider tagged or not.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        tag  - Provider Tag (Ether Type)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetProviderTag
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *tag
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetProviderTag Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PROVIDER_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Provider Tag.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PROVIDER_TAG, 0, 16, tag);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPauseLimitOut
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be transmitted
+*        from this port. When full duplex Flow Control is enabled on this port,
+*        these bits are used to limit the number of Pause refresh frames that can
+*        be generated from this port to keep this port's link partner from sending
+*        any data.
+*        Setting this value to 0 will allow continuous Pause frame refreshes to
+*        egress this port as long as this port remains congested.
+*        Setting this value to 1 will allow 1 Pause frame to egress from this port
+*        for each congestion situation.
+*        Setting this value to 2 will allow 2 Pause frames to egress from this port
+*        for each congestion situation, etc.
+*
+* INPUTS:
+*        port - the logical port number
+*        limit - the max number of Pause refresh frames for each congestion situation
+*                ( 0 ~ 0xFF)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPauseLimitOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        limit
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetPauseLimitOut Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PAUSE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (limit > 0xFF)
+    {
+        DBG_INFO(("Bad Parameter\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set Pause Limit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_LIMIT_PAUSE_CONTROL, 8, 8, limit);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPauseLimitOut
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be transmitted
+*        from this port. When full duplex Flow Control is enabled on this port,
+*        these bits are used to limit the number of Pause refresh frames that can
+*        be generated from this port to keep this port's link partner from sending
+*        any data.
+*        Setting this value to 0 will allow continuous Pause frame refreshes to
+*        egress this port as long as this port remains congested.
+*        Setting this value to 1 will allow 1 Pause frame to egress from this port
+*        for each congestion situation.
+*        Setting this value to 2 will allow 2 Pause frames to egress from this port
+*        for each congestion situation, etc.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        limit - the max number of Pause refresh frames for each congestion situation
+*                ( 0 ~ 0xFF)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseLimitOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *limit
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPauseLimitOut Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PAUSE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Pause Limit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_LIMIT_PAUSE_CONTROL, 8, 8, limit);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPauseLimitIn
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be received
+*        on this port. When a port has flow Control enabled, this value can be
+*        used to limit how long this port can be Paused off to prevent a port stall
+*        through jamming.
+*        When this value is in the range of 0x01 to 0xFF, and a frame is ready to
+*        be transmitted out this port, but it cannot be transmitted due to the port
+*        being jammed, this limit mechanism starts. The limit mechanism starts
+*        counting new Pause refresh frames or counts of 16 consecutive collisions.
+*        If the counter reaches the value set through this API, the following event
+*        will occur:
+*            1) Port's ForceFC is enabled,
+*            2) Port's FCValue is cleared to a zero, and
+*            3) Jam Limit Interrupt is asserted.
+*        This effectively disables Flow Control on the port once the Pause timer
+*        expires. If a frame gets transmitted out this port before the counter
+*        reaches this limit, then this limit mechanism counter resets back to zero.
+*
+*        Setting this value to 0 will allow continuous jamming to be received on
+*        this port without the Port's ForceFC and FCValue getting modified.
+*
+*        The modification of Port's ForceFC and FCValue is the only indication that
+*        the limit was reached on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*        limit - the max number of continuous Pause refresh frames for each trasmition
+*                ( 0 ~ 0xFF)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if limit > 0xFF
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPauseLimitIn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_U16        limit
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetPauseLimitIn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PAUSE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (limit > 0xFF)
+    {
+        DBG_INFO(("Bad Parameter\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set Pause Limit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_LIMIT_PAUSE_CONTROL, 0, 8, limit);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPauseLimitIn
+*
+* DESCRIPTION:
+*        Limit the number of continuous Pause refresh frames that can be received
+*        on this port. When a port has flow Control enabled, this value can be
+*        used to limit how long this port can be Paused off to prevent a port stall
+*        through jamming.
+*        When this value is in the range of 0x01 to 0xFF, and a frame is ready to
+*        be transmitted out this port, but it cannot be transmitted due to the port
+*        being jammed, this limit mechanism starts. The limit mechanism starts
+*        counting new Pause refresh frames or counts of 16 consecutive collisions.
+*        If the counter reaches the value set through this API, the following event
+*        will occur:
+*            1) Port's ForceFC is enabled,
+*            2) Port's FCValue is cleared to a zero, and
+*            3) Jam Limit Interrupt is asserted.
+*        This effectively disables Flow Control on the port once the Pause timer
+*        expires. If a frame gets transmitted out this port before the counter
+*        reaches this limit, then this limit mechanism counter resets back to zero.
+*
+*        Setting this value to 0 will allow continuous jamming to be received on
+*        this port without the Port's ForceFC and FCValue getting modified.
+*
+*        The modification of Port's ForceFC and FCValue is the only indication that
+*        the limit was reached on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        limit - the max number of continuous Pause refresh frames for each trasmition
+*                ( 0 ~ 0xFF)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseLimitIn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *limit
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPauseLimitIn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PAUSE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Pause Limit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_LIMIT_PAUSE_CONTROL, 0, 8, limit);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetFrameMode
+*
+* DESCRIPTION:
+*        Frmae Mode is used to define the expected Ingress and the generated Egress
+*        tagging frame format for this port as follows:
+*            GT_FRAME_MODE_NORMAL -
+*                Normal Network mode uses industry standard IEEE 802.3ac Tagged or
+*                Untagged frames. Tagged frames use an Ether Type of 0x8100.
+*            GT_FRAME_MODE_DSA -
+*                DSA mode uses a Marvell defined tagged frame format for
+*                Chip-to-Chip and Chip-to-CPU connections.
+*            GT_FRAME_MODE_PROVIDER -
+*                Provider mode uses user definable Ether Types per port
+*                (see gprtSetPortEType/gprtGetPortEType API).
+*            GT_FRAME_MODE_ETHER_TYPE_DSA -
+*                Ether Type DSA mode uses standard Marvell DSA Tagged frame info
+*                flowing a user definable Ether Type. This mode allows the mixture
+*                of Normal Network frames with DSA Tagged frames and is useful to
+*                be used on ports that connect to a CPU.
+*
+* INPUTS:
+*        port - the logical port number
+*        mode - GT_FRAME_MODE type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if mode is unknown
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetFrameMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_FRAME_MODE    mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetFrameMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (mode)
+    {
+        case GT_FRAME_MODE_NORMAL:
+        case GT_FRAME_MODE_DSA:
+        case GT_FRAME_MODE_PROVIDER:
+        case GT_FRAME_MODE_ETHER_TYPE_DSA:
+            break;
+        default:
+            DBG_INFO(("Bad Parameter\n"));
+            return GT_BAD_PARAM;
+    }
+
+    /* Set Frame Mode.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 8, 2, (GT_U16)mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetFrameMode
+*
+* DESCRIPTION:
+*        Frmae Mode is used to define the expected Ingress and the generated Egress
+*        tagging frame format for this port as follows:
+*            GT_FRAME_MODE_NORMAL -
+*                Normal Network mode uses industry standard IEEE 802.3ac Tagged or
+*                Untagged frames. Tagged frames use an Ether Type of 0x8100.
+*            GT_FRAME_MODE_DSA -
+*                DSA mode uses a Marvell defined tagged frame format for
+*                Chip-to-Chip and Chip-to-CPU connections.
+*            GT_FRAME_MODE_PROVIDER -
+*                Provider mode uses user definable Ether Types per port
+*                (see gprtSetPortEType/gprtGetPortEType API).
+*            GT_FRAME_MODE_ETHER_TYPE_DSA -
+*                Ether Type DSA mode uses standard Marvell DSA Tagged frame info
+*                flowing a user definable Ether Type. This mode allows the mixture
+*                of Normal Network frames with DSA Tagged frames and is useful to
+*                be used on ports that connect to a CPU.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        mode - GT_FRAME_MODE type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFrameMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_FRAME_MODE    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetFrameMode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Pause Limit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL, 8, 2, &data);
+    *mode = data;
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetHoldAt1
+*
+* DESCRIPTION:
+*        Hold Aging ATU Entries at an Entry State value of 1. When this feature
+*        is set to GT_TRUE, ATU entries associated with this port will age down
+*        to an Entry State of 0x1, but will not go to 0x0 (0x0 would purge the
+*        entry)
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to hold aging ATU entry with Entry State of 1,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetHoldAt1
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetHoldAt1 Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_HOLD))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set HoldAt1 */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 15, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetHoldAt1
+*
+* DESCRIPTION:
+*        Hold Aging ATU Entries at an Entry State value of 1. When this feature
+*        is set to GT_TRUE, ATU entries associated with this port will age down
+*        to an Entry State of 0x1, but will not go to 0x0 (0x0 would purge the
+*        entry)
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to hold aging ATU entry with Entry State of 1,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHoldAt1
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetHoldAt1 Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_HOLD))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get HoldAt1 */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 15, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetIntOnAgeOut
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable AgeOutViloation interrupt
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetIntOnAgeOut
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetIntOnAgeOut Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set Age Out Interrupt Enable Mode. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 14, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetIntOnAgeOut
+*
+* DESCRIPTION:
+*        Interrupt on Age Out. When aging is enabled, all non-static address
+*        entries in the ATU's address database are periodically aged.
+*        When this feature is set to GT_TRUE and an entry associated with this
+*        port is aged out, an AgeOutViolation will be captured for that entry.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable AgeOutViloation interrupt
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetIntOnAgeOut
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetIntOnAgeOut Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_OUT_INT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get IntOnAgeOut */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 14, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetRefreshLocked
+*
+* DESCRIPTION:
+*        Auto Refresh known addresses when port is Locked. Already known addresses
+*        will be auto refreshed when this feature is enabled. When this feature
+*        is disabled, auto refreshing will not occur on Locked ports.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE to enable Auto Refresh known addresses on locked port
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetRefreshLocked
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetRefreshLocked Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AUTO_REFRESH_LOCKED))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set RefreshLocked */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 11, 1, data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetRefreshLocked
+*
+* DESCRIPTION:
+*        Auto Refresh known addresses when port is Locked. Already known addresses
+*        will be auto refreshed when this feature is enabled. When this feature
+*        is disabled, auto refreshing will not occur on Locked ports.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable Auto Refresh known addresses on locked port
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetRefreshLocked
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gprtGetRefreshLocked Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device allows to force a flowcontrol disabled */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AUTO_REFRESH_LOCKED))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get RefreshLocked */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_ASSOCIATION, 11, 1, &data);
+
+    BIT_2_BOOL(data, *mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetPortEType
+*
+* DESCRIPTION:
+*        This routine sets the port's special Ether Type. This Ether Type is used
+*        for Policy (see gprtSetPolicy API) and FrameMode (see gprtSetFrameMode API).
+*
+* INPUTS:
+*        port  - the logical port number
+*        etype - port's special ether type
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPortEType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_ETYPE        etype
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetPortEType Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_ETYPE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = (GT_U16)etype;
+
+    /* Set the EtherType.            */
+    retVal = hwWritePortReg(dev,hwPort, QD_REG_PORT_ETH_TYPE,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetPortEType
+*
+* DESCRIPTION:
+*        This routine retrieves the port's special Ether Type. This Ether Type is used
+*        for Policy (see gprtSetPolicy API) and FrameMode (see gprtSetFrameMode API).
+*
+* INPUTS:
+*        port  - the logical port number
+*
+* OUTPUTS:
+*        etype - port's special ether type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortEType
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_ETYPE    *etype
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPortEType Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_ETYPE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the EtherType.            */
+    retVal = hwReadPortReg(dev,hwPort, QD_REG_PORT_ETH_TYPE,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *etype = (GT_ETYPE) data;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gsysSetJumboMode
+*
+* DESCRIPTION:
+*       This routine Set the max frame size allowed to be received and transmitted
+*        from or to a given port.
+*
+* INPUTS:
+*        port - the logical port number
+*       mode - GT_JUMBO_MODE (1522, 2048, or 10240)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gsysSetJumboMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_JUMBO_MODE   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetJumboMode Called.\n"));
+
+    if (mode > GT_JUMBO_MODE_10240)
+    {
+        DBG_INFO(("Bad Parameter\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_JUMBO_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the Jumbo Fram Size bit.               */
+    retVal = hwSetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL2,12,2,(GT_U16)mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetJumboMode
+*
+* DESCRIPTION:
+*       This routine gets the max frame size allowed to be received and transmitted
+*        from or to a given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_JUMBO_MODE (1522, 2048, or 10240)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gsysGetJumboMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_JUMBO_MODE   *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* to keep the read valve       */
+
+    DBG_INFO(("gsysGetJumboMode Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_JUMBO_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get Jumbo Frame Mode.            */
+    retVal = hwGetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL2,12,2,&data );
+
+    *mode = (GT_JUMBO_MODE)data;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGet200Base
+*
+* DESCRIPTION:
+*        200 Base mode. This bit can be used to change the port's Px_GTXCLK
+*        frequency to 50MHz to support 200 BASE mode as follows:
+*        0 = 25MHz Px_GTXCLK
+*        1 = 50MHz Px_GTXCLK
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - 0 for 100Mbps, 1 for 200Mbps
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        C_Mode should be set to 0x2 in order for this API to work
+*
+*******************************************************************************/
+GT_STATUS gprtGet200Base
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_U32      *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16        data;
+
+    DBG_INFO(("gprtGet200Base Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_200BASE_CFG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the high error rate bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,6,1, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = data;
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSet200Base
+*
+* DESCRIPTION:
+*        200 Base mode. This bit can be used to change the port's Px_GTXCLK
+*        frequency to 50MHz to support 200 BASE mode as follows:
+*        0 = 25MHz Px_GTXCLK
+*        1 = 50MHz Px_GTXCLK
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - 0 for 100Mbps, 1 for 200Mbps
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        C_Mode should be set to 0x2 in order for this API to work
+*
+*******************************************************************************/
+GT_STATUS gprtSet200Base
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32      mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSet200Base Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_200BASE_CFG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+	/* Get the high error rate bit.  */
+	retVal = hwSetPortRegField(dev, hwPort, QD_REG_PCS_CONTROL, 12, 1, (GT_U16)mode & 0x1);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetSwitchReg
+*
+* DESCRIPTION:
+*       This routine reads Switch Port Registers.
+*
+* INPUTS:
+*       port    - logical port number
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSwitchReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetSwitchReg Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get Phy Register. */
+    if(hwReadPortReg(dev,hwPort,(GT_U8)regAddr,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    *data = u16Data;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetSwitchReg
+*
+* DESCRIPTION:
+*       This routine writes Switch Port Registers.
+*
+* INPUTS:
+*       port    - logical port number
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetSwitchReg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetSwitchReg Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the Scheduling bit.              */
+    if(hwWritePortReg(dev,hwPort,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGetGlobalReg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobalReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+
+    DBG_INFO(("gprtGetGlobalReg Called.\n"));
+
+    /* Get Phy Register. */
+    if(hwReadGlobalReg(dev,(GT_U8)regAddr,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    *data = u16Data;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetGlobalReg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobalReg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+)
+{
+    DBG_INFO(("gprtSetGlobalReg Called.\n"));
+
+    /* Get the Scheduling bit.              */
+    if(hwWriteGlobalReg(dev,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtGetGlobal2Reg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global 2 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobal2Reg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+
+    DBG_INFO(("gprtGetGlobal2Reg Called.\n"));
+
+    /* Get Phy Register. */
+    if(hwReadGlobal2Reg(dev,(GT_U8)regAddr,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    *data = u16Data;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetGlobal2Reg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global2 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobal2Reg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+)
+{
+    DBG_INFO(("gprtSetGlobal2Reg Called.\n"));
+
+    /* Get the Scheduling bit.              */
+    if(hwWriteGlobal2Reg(dev,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+/*******************************************************************************
+* gprtGetGlobal3Reg
+*
+* DESCRIPTION:
+*       This routine reads Switch Global 3 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetGlobal3Reg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+
+    DBG_INFO(("gprtGetGlobal3Reg Called.\n"));
+
+    /* Get Phy Register. */
+    if(hwReadGlobal3Reg(dev,(GT_U8)regAddr,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    *data = u16Data;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gprtSetGlobal3Reg
+*
+* DESCRIPTION:
+*       This routine writes Switch Global3 Registers.
+*
+* INPUTS:
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetGlobal3Reg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+)
+{
+    DBG_INFO(("gprtSetGlobal3Reg Called.\n"));
+
+    /* Get the Scheduling bit.              */
+    if(hwWriteGlobal3Reg(dev,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    return GT_OK;
+}
+
+/* New functions in 88E6320 or later */
+/*******************************************************************************
+* gprtSetQueueCtrl
+*
+* DESCRIPTION:
+*        Set port queue control data to the Port Queue Control register.
+*        The registers of Port Queue control are.
+*         Hard Queue Limits register space
+*         Reserved for future Hard Queue Limits use
+*
+* INPUTS:
+*        port  - logical port number
+*        point - Pointer to the Port Queue Control register.
+*        data  - Port Queue Control data written to the register
+*                pointed to by the point above.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetQueueCtrl
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           point,
+    IN  GT_U8            data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    IN  GT_U16            tmpData;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_QUEUE_CONTROL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (point > 0x80)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (data &0xffffff00)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+
+    /* Wait until the Port Queue Control is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, PORT_ACCESS);
+      regAccess.rw_reg_list[0].reg = 0x1d;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->vtuRegsSem);
+        return retVal;
+      }
+    }
+#else
+       tmpData = 1;
+    while(tmpData == 1)
+       {
+        retVal = hwGetPortRegField(dev,hwPort, 0x1d,15,1,&tmpData);
+           if(retVal != GT_OK)
+           {
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+           }
+    }
+#endif
+
+    tmpData =  (GT_U16)((1 << 15) | (point << 8) | data);
+
+    retVal = hwWritePortReg(dev,hwPort, 0x1d, tmpData);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+       return retVal;
+
+}
+
+
+/*******************************************************************************
+* gprtGetQueueCtrl
+*
+* DESCRIPTION:
+*        Get port queue control data from the Port Queue Control register.
+*        The registers of Port Queue control are.
+*         Hard Queue Limits register space
+*         Reserved for future Hard Queue Limits use
+*
+* INPUTS:
+*        port  - logical port number
+*        point - Pointer to the Port Queue Control register.
+*
+* OUTPUTS:
+*        data  - Port Queue Control data written to the register
+*                pointed to by the point above.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetQueueCtrl
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_U32           point,
+    OUT GT_U8            *data
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    OUT  GT_U16            tmpData;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_QUEUE_CONTROL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (point > 0x80)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Port Queue Control is ready. */
+
+    do {
+        retVal = hwGetPortRegField(dev,hwPort, 0x1d,15,1,&tmpData);
+        if(retVal != GT_OK)
+           {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+        }
+    } while (tmpData&0x8000);
+
+    *data = tmpData&0xff;
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetDebugCounter
+*
+* DESCRIPTION:
+*        Get Port Debug Counter, bad counter and good counter.
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        badCounter  - When CtrMode is cleared to a zero (Global 1 offset 0x1C) this
+*  counter increments each time a frame enters this port that was an error on the
+*  wire. It does not matter if the frame’s CRC is fixed by ForceGoodFCS (Port
+*  offset 0x08) being set to a one, this counter will still increment. A CRC error
+*  frame is one that is 64 bytes to MaxFrameSize (Global 1, offset 0x04) with a
+*  bad CRC (including alignment errors but not dribbles). Fragments and
+*  properly formed frames are not counted. The RxBadFrames counter counts
+*  frames that are counted in the MIB counters as InUndersize, InOversize,
+*  InJabber, InRxErr and InFCSErr.
+*  When CtrMode is set to a one this counter increments each time a transmit
+*  collision occurs on this port.
+*        goodCounter  - When CtrMode is cleared to a zero (Global 1 offset 0x1C) this
+*  counter increments each time a frame enters this port that was not an error
+*  frame on the wire. It does not matter if the frame was filtered or discarded,
+*  only that the frame was received as good on the wire (i.e., its wire size is in the
+*  range of 64 bytes to MaxFrameSize (Global 1, offset 0x04) and its CRC was
+*  good. The RxGoodFrames counter counts frames that are not counted
+*  above as long as they are not being counted in the MIB counters as
+*  InFragments.
+*  When CtrMode is set to a one this counter increments each time a frame is
+*  transmitted out this port.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetDebugCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT  GT_U8            *badCounter,
+    OUT  GT_U8            *goodCounter
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    OUT  GT_U16            tmpData;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_DEBUG_COUNTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = hwReadPortReg(dev,hwPort, 0x1e, &tmpData);
+    if(retVal != GT_OK)
+    {
+      DBG_INFO(("Failed.\n"));
+      return retVal;
+    }
+
+    *goodCounter = tmpData&0xff;
+    *badCounter = (tmpData>>8)&0xff;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetCutThrough
+*
+* DESCRIPTION:
+*        Set port Cut Through configuration.
+*
+* INPUTS:
+*        port  - logical port number
+*        cutThru - Cut through configuration.
+*                    enableSelect;     Port Enable Select.
+*                    enable;           Cut Through enable.
+*                    cutThruQueue;     Cut Through Queues.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetCutThrough
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    IN  GT_CUT_THROUGH   *cutThru
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    OUT  GT_U16            tmpData;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_CUT_THROUGH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+    tmpData = (cutThru->enableSelect<<12)|(cutThru->enable<<8)|cutThru->cutThruQueue;
+    retVal = hwWritePortReg(dev,hwPort, 0x1f, tmpData);
+    if(retVal != GT_OK)
+    {
+      DBG_INFO(("Failed.\n"));
+      return retVal;
+    }
+
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetCutThrough
+*
+* DESCRIPTION:
+*        Get port Cut Through configuration.
+*
+* INPUTS:
+*        port  - logical port number
+*
+* OUTPUTS:
+*        cutThru - Cut through configuration.
+*                    enableSelect;     Port Enable Select.
+*                    enable;           Cut Through enable.
+*                    cutThruQueue;     Cut Through Queues.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM - if input parameters are beyond range.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetCutThrough
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT         port,
+    OUT GT_CUT_THROUGH   *cutThru
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    OUT  GT_U16            tmpData;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_CUT_THROUGH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (hwPort < (dev->maxPorts - 2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = hwReadPortReg(dev,hwPort, 0x1f, &tmpData);
+    if(retVal != GT_OK)
+    {
+      DBG_INFO(("Failed.\n"));
+      return retVal;
+    }
+
+    cutThru->enableSelect = (tmpData>>12)&0xf;
+    cutThru->enable = (tmpData&0x100)?1:0;
+    cutThru->cutThruQueue =tmpData&0xff;
+
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortLed.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortLed.c
new file mode 100644
index 000000000000..595346b6e450
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortLed.c
@@ -0,0 +1,840 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortLed.c
+*
+* DESCRIPTION:
+*       API definitions for LED Control
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+static GT_STATUS convertLED2APP
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value,
+    OUT GT_U32        *data
+);
+
+
+static GT_STATUS convertAPP2LED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value,
+    OUT GT_U32        *data
+);
+
+
+/*******************************************************************************
+* gprtSetLED
+*
+* DESCRIPTION:
+*        This API allows to configure 4 LED sections, Pulse stretch, Blink rate,
+*        and special controls.
+*
+* INPUTS:
+*        port    - the logical port number
+*        cfg     - GT_LED_CFG value
+*        value     - value to be configured
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtSetLED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+    GT_U32        ptr, conv, mask;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetLED Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort >= 5)
+        return GT_BAD_PARAM;
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_LED_CFG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (cfg)
+    {
+        case GT_LED_CFG_LED0:
+            ptr = 0;
+            mask = 0xF;
+            break;
+        case GT_LED_CFG_LED1:
+            ptr = 0;
+            mask = 0xF0;
+            break;
+        case GT_LED_CFG_LED2:
+            ptr = 1;
+            mask = 0xF;
+            break;
+        case GT_LED_CFG_LED3:
+            ptr = 1;
+            mask = 0xF0;
+            break;
+        case GT_LED_CFG_PULSE_STRETCH:
+            ptr = 6;
+            mask = 0x70;
+            break;
+        case GT_LED_CFG_BLINK_RATE:
+            ptr = 6;
+            mask = 0x7;
+            break;
+        case GT_LED_CFG_SPECIAL_CONTROL:
+            ptr = 7;
+            mask = (1 << dev->maxPorts) - 1;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+    conv = 0;
+    retVal = convertAPP2LED(dev,port,cfg,value,&conv);
+    if (retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_LED_CONTROL;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwGetPortRegField(dev,hwPort,QD_REG_LED_CONTROL,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+    } while(data == 1);
+#endif
+
+    /* read the current data */
+    data = (GT_U16)(ptr << 12);
+
+    retVal = hwWritePortReg(dev, hwPort, QD_REG_LED_CONTROL, data);
+    if(retVal != GT_OK)
+      {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwGetPortRegField(dev, hwPort, QD_REG_LED_CONTROL,0,11,&data);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    /* overwrite the data */
+    data = (GT_U16)((1 << 15) | (ptr << 12) | (conv | (data & ~mask)));
+
+    retVal = hwWritePortReg(dev, hwPort, QD_REG_LED_CONTROL, data);
+    if(retVal != GT_OK)
+      {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGetLED
+*
+* DESCRIPTION:
+*        This API allows to retrieve 4 LED sections, Pulse stretch, Blink rate,
+*        and special controls.
+*
+* INPUTS:
+*        port    - the logical port number
+*        cfg     - GT_LED_CFG value
+*
+* OUTPUTS:
+*        value     - value to be configured
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None
+*
+*******************************************************************************/
+GT_STATUS gprtGetLED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    OUT GT_U32        *value
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+    GT_U32        ptr;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetLED Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+    if (hwPort >= 5)
+        return GT_BAD_PARAM;
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_LED_CFG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (cfg)
+    {
+        case GT_LED_CFG_LED0:
+            ptr = 0;
+            break;
+        case GT_LED_CFG_LED1:
+            ptr = 0;
+            break;
+        case GT_LED_CFG_LED2:
+            ptr = 1;
+            break;
+        case GT_LED_CFG_LED3:
+            ptr = 1;
+            break;
+        case GT_LED_CFG_PULSE_STRETCH:
+            ptr = 6;
+            break;
+        case GT_LED_CFG_BLINK_RATE:
+            ptr = 6;
+            break;
+        case GT_LED_CFG_SPECIAL_CONTROL:
+            ptr = 7;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_LED_CONTROL;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwGetPortRegField(dev,hwPort,QD_REG_LED_CONTROL,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+    } while(data == 1);
+#endif
+
+    /* read the current data */
+    data = (GT_U16)(ptr << 12);
+
+    retVal = hwWritePortReg(dev, hwPort, QD_REG_LED_CONTROL, data);
+    if(retVal != GT_OK)
+      {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwGetPortRegField(dev, hwPort, QD_REG_LED_CONTROL,0,11,&data);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = convertLED2APP(dev,port,cfg,data,value);
+    if (retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
+
+
+static GT_STATUS convertAPP2LED
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value,
+    OUT GT_U32        *data
+)
+{
+    GT_STATUS    retVal = GT_OK;
+
+    switch (cfg)
+    {
+        case GT_LED_CFG_LED0:
+            switch (value)
+            {
+                case GT_LED_LINK_ACT:
+                    *data = 3;
+                    break;
+                case GT_LED_LINK:
+                    *data = 8;
+                    break;
+                case GT_LED_10_LINK_ACT:
+                    *data = 10;
+                    break;
+                case GT_LED_10_LINK:
+                    *data = 9;
+                    break;
+                case GT_LED_1000_LINK_ACT:
+                    *data = 2;
+                    break;
+                case GT_LED_100_1000_LINK_ACT:
+                    *data = 1;
+                    break;
+                case GT_LED_100_1000_LINK:
+                    *data = 11;
+                    break;
+                case GT_LED_SPECIAL:
+                    *data = 7;
+                    break;
+                case GT_LED_DUPLEX_COL:
+                    *data = 6;
+                    break;
+                case GT_LED_PTP_ACT:
+                    *data = 0;
+                    break;
+                case GT_LED_FORCE_BLINK:
+                    *data = 13;
+                    break;
+                case GT_LED_FORCE_OFF:
+                    *data = 14;
+                    break;
+                case GT_LED_FORCE_ON:
+                    *data = 15;
+                    break;
+                default:
+                    retVal = GT_BAD_PARAM;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_LED1:
+            switch (value)
+            {
+                case GT_LED_LINK_ACT_SPEED:
+                    *data = 0;
+                    break;
+                case GT_LED_100_LINK_ACT:
+                    *data = 10;
+                    break;
+                case GT_LED_100_LINK:
+                    *data = 9;
+                    break;
+                case GT_LED_1000_LINK:
+                    *data = 3;
+                    break;
+                case GT_LED_10_100_LINK_ACT:
+                    *data = 1;
+                    break;
+                case GT_LED_10_100_LINK:
+                    *data = 11;
+                    break;
+                case GT_LED_SPECIAL:
+                    *data = 6;
+                    break;
+                case GT_LED_DUPLEX_COL:
+                    *data = 7;
+                    break;
+                case GT_LED_ACTIVITY:
+                    *data = 8;
+                    break;
+                case GT_LED_PTP_ACT:
+                    *data = 12;
+                    break;
+                case GT_LED_FORCE_BLINK:
+                    *data = 13;
+                    break;
+                case GT_LED_FORCE_OFF:
+                    *data = 14;
+                    break;
+                case GT_LED_FORCE_ON:
+                    *data = 15;
+                    break;
+                default:
+                    retVal = GT_BAD_PARAM;
+                    break;
+            }
+            *data <<= 4;
+            break;
+
+        case GT_LED_CFG_LED2:
+            switch (value)
+            {
+                case GT_LED_10_LINK_ACT:
+                    *data = 6;
+                    break;
+                case GT_LED_100_LINK:
+                    *data = 8;
+                    break;
+                case GT_LED_1000_LINK_ACT:
+                    *data = 10;
+                    break;
+                case GT_LED_1000_LINK:
+                    *data = 9;
+                    break;
+                case GT_LED_10_1000_LINK_ACT:
+                    *data = 1;
+                    break;
+                case GT_LED_10_1000_LINK:
+                    *data = 11;
+                    break;
+                case GT_LED_100_1000_LINK_ACT:
+                    *data = 7;
+                    break;
+                case GT_LED_100_1000_LINK:
+                    *data = 3;
+                    break;
+                case GT_LED_SPECIAL:
+                    *data = 2;
+                    break;
+                case GT_LED_DUPLEX_COL:
+                    *data = 0;
+                    break;
+                case GT_LED_PTP_ACT:
+                    *data = 12;
+                    break;
+                case GT_LED_FORCE_BLINK:
+                    *data = 13;
+                    break;
+                case GT_LED_FORCE_OFF:
+                    *data = 14;
+                    break;
+                case GT_LED_FORCE_ON:
+                    *data = 15;
+                    break;
+                default:
+                    retVal = GT_BAD_PARAM;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_LED3:
+            switch (value)
+            {
+                case GT_LED_LINK_ACT:
+                    *data = 10;
+                    break;
+                case GT_LED_LINK:
+                    *data = 9;
+                    break;
+                case GT_LED_10_LINK:
+                    *data = 8;
+                    break;
+                case GT_LED_100_LINK_ACT:
+                    *data = 6;
+                    break;
+                case GT_LED_10_1000_LINK_ACT:
+                    *data = 7;
+                    break;
+                case GT_LED_SPECIAL:
+                    *data = 0;
+                    break;
+                case GT_LED_DUPLEX_COL:
+                    *data = 1;
+                    break;
+                case GT_LED_ACTIVITY:
+                    *data = 11;
+                    break;
+                case GT_LED_PTP_ACT:
+                    *data = 12;
+                    break;
+                case GT_LED_FORCE_BLINK:
+                    *data = 13;
+                    break;
+                case GT_LED_FORCE_OFF:
+                    *data = 14;
+                    break;
+                case GT_LED_FORCE_ON:
+                    *data = 15;
+                    break;
+                default:
+                    retVal = GT_BAD_PARAM;
+                    break;
+            }
+            *data <<= 4;
+            break;
+
+        case GT_LED_CFG_PULSE_STRETCH:
+            if (value > 0x4)
+                retVal = GT_BAD_PARAM;
+            *data = value << 4;
+            break;
+        case GT_LED_CFG_BLINK_RATE:
+            if (value > 0x5)
+                retVal = GT_BAD_PARAM;
+            *data = value;
+            break;
+
+        case GT_LED_CFG_SPECIAL_CONTROL:
+            if (value >= (GT_U32)(1 << dev->maxPorts))
+                retVal = GT_BAD_PARAM;
+            *data = value;
+            break;
+
+        default:
+            retVal = GT_BAD_PARAM;
+            break;
+    }
+
+    return retVal;
+
+}
+
+
+static GT_STATUS convertLED2APP
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_LPORT    port,
+    IN  GT_LED_CFG    cfg,
+    IN  GT_U32        value,
+    OUT GT_U32        *data
+)
+{
+    GT_STATUS retVal = GT_OK;
+
+    switch (cfg)
+    {
+        case GT_LED_CFG_LED0:
+            value &= 0xF;
+            switch (value)
+            {
+                case 0:
+                    *data = GT_LED_PTP_ACT;
+                    break;
+                case 1:
+                    *data = GT_LED_100_1000_LINK_ACT;
+                    break;
+                case 2:
+                    *data = GT_LED_1000_LINK_ACT;
+                    break;
+                case 3:
+                    *data = GT_LED_LINK_ACT;
+                    break;
+                case 4:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 5:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 6:
+                    *data = GT_LED_DUPLEX_COL;
+                    break;
+                case 7:
+                    *data = GT_LED_SPECIAL;
+                    break;
+                case 8:
+                    *data = GT_LED_LINK;
+                    break;
+                case 9:
+                    *data = GT_LED_10_LINK;
+                    break;
+                case 10:
+                    *data = GT_LED_10_LINK_ACT;
+                    break;
+                case 11:
+                    *data = GT_LED_100_1000_LINK;
+                    break;
+                case 12:
+                    *data = GT_LED_PTP_ACT;
+                    break;
+                case 13:
+                    *data = GT_LED_FORCE_BLINK;
+                    break;
+                case 14:
+                    *data = GT_LED_FORCE_OFF;
+                    break;
+                case 15:
+                    *data = GT_LED_FORCE_ON;
+                    break;
+                default:
+                    retVal = GT_FAIL;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_LED1:
+            value >>= 4;
+            value &= 0xF;
+            switch (value)
+            {
+                case 0:
+                    *data = GT_LED_LINK_ACT_SPEED;
+                    break;
+                case 1:
+                    *data = GT_LED_10_100_LINK_ACT;
+                    break;
+                case 2:
+                    *data = GT_LED_10_100_LINK_ACT;
+                    break;
+                case 3:
+                    *data = GT_LED_1000_LINK;
+                    break;
+                case 4:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 5:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 6:
+                    *data = GT_LED_SPECIAL;
+                    break;
+                case 7:
+                    *data = GT_LED_DUPLEX_COL;
+                    break;
+                case 8:
+                    *data = GT_LED_ACTIVITY;
+                    break;
+                case 9:
+                    *data = GT_LED_100_LINK;
+                    break;
+                case 10:
+                    *data = GT_LED_100_LINK_ACT;
+                    break;
+                case 11:
+                    *data = GT_LED_10_100_LINK;
+                    break;
+                case 12:
+                    *data = GT_LED_PTP_ACT;
+                    break;
+                case 13:
+                    *data = GT_LED_FORCE_BLINK;
+                    break;
+                case 14:
+                    *data = GT_LED_FORCE_OFF;
+                    break;
+                case 15:
+                    *data = GT_LED_FORCE_ON;
+                    break;
+                default:
+                    retVal = GT_FAIL;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_LED2:
+            value &= 0xF;
+            switch (value)
+            {
+                case 0:
+                    *data = GT_LED_DUPLEX_COL;
+                    break;
+                case 1:
+                    *data = GT_LED_10_1000_LINK_ACT;
+                    break;
+                case 2:
+                    *data = GT_LED_SPECIAL;
+                    break;
+                case 3:
+                    *data = GT_LED_100_1000_LINK;
+                    break;
+                case 4:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 5:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 6:
+                    *data = GT_LED_10_LINK_ACT;
+                    break;
+                case 7:
+                    *data = GT_LED_100_1000_LINK_ACT;
+                    break;
+                case 8:
+                    *data = GT_LED_100_LINK;
+                    break;
+                case 9:
+                    *data = GT_LED_1000_LINK;
+                    break;
+                case 10:
+                    *data = GT_LED_1000_LINK_ACT;
+                    break;
+                case 11:
+                    *data = GT_LED_10_1000_LINK;
+                    break;
+                case 12:
+                    *data = GT_LED_PTP_ACT;
+                    break;
+                case 13:
+                    *data = GT_LED_FORCE_BLINK;
+                    break;
+                case 14:
+                    *data = GT_LED_FORCE_OFF;
+                    break;
+                case 15:
+                    *data = GT_LED_FORCE_ON;
+                    break;
+                default:
+                    retVal = GT_FAIL;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_LED3:
+            value >>= 4;
+            value &= 0xF;
+            switch (value)
+            {
+                case 0:
+                    *data = GT_LED_SPECIAL;
+                    break;
+                case 1:
+                    *data = GT_LED_DUPLEX_COL;
+                    break;
+                case 2:
+                    *data = GT_LED_DUPLEX_COL;
+                    break;
+                case 3:
+                    *data = GT_LED_SPECIAL;
+                    break;
+                case 4:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 5:
+                    *data = GT_LED_RESERVE;
+                    break;
+                case 6:
+                    *data = GT_LED_100_LINK_ACT;
+                    break;
+                case 7:
+                    *data = GT_LED_10_1000_LINK_ACT;
+                    break;
+                case 8:
+                    *data = GT_LED_10_LINK;
+                    break;
+                case 9:
+                    *data = GT_LED_LINK;
+                    break;
+                case 10:
+                    *data = GT_LED_LINK_ACT;
+                    break;
+                case 11:
+                    *data = GT_LED_ACTIVITY;
+                    break;
+                case 12:
+                    *data = GT_LED_PTP_ACT;
+                    break;
+                case 13:
+                    *data = GT_LED_FORCE_BLINK;
+                    break;
+                case 14:
+                    *data = GT_LED_FORCE_OFF;
+                    break;
+                case 15:
+                    *data = GT_LED_FORCE_ON;
+                    break;
+                default:
+                    retVal = GT_FAIL;
+                    break;
+            }
+            break;
+
+        case GT_LED_CFG_PULSE_STRETCH:
+            *data = (value >> 4) & 0x7;
+            break;
+
+        case GT_LED_CFG_BLINK_RATE:
+            *data = value & 0x7;
+            break;
+
+        case GT_LED_CFG_SPECIAL_CONTROL:
+            *data = value & ((1 << dev->maxPorts) - 1);
+            break;
+
+        default:
+            retVal = GT_BAD_PARAM;
+            break;
+    }
+
+    return retVal;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortPav.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortPav.c
new file mode 100644
index 000000000000..5b89c1e5b855
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortPav.c
@@ -0,0 +1,269 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortPav.c
+*
+* DESCRIPTION:
+*       API definitions to handle Port Association Vector (0xB).
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*******************************************************************************
+* gpavSetPAV
+*
+* DESCRIPTION:
+*       This routine sets the Port Association Vector
+*
+* INPUTS:
+*       port    - logical port number.
+*       pav     - Port Association Vector
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpavSetPAV
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT     port,
+    IN GT_U16     pav
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U16            hwPav;
+
+    DBG_INFO(("gpavSetPAV Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_PORT_MONITORING)) != GT_OK )
+      return retVal;
+
+    /*
+     * translate Logical Port Vector to Physical Port Vector.
+     */
+    hwPav = (GT_U16)GT_LPORTVEC_2_PORTVEC(pav);
+
+    if(hwPav == (GT_U16)GT_INVALID_PORT_VEC)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    /* there are 7 ports in the switch */
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_ASSOCIATION,0,dev->maxPorts,hwPav);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gpavGetPAV
+*
+* DESCRIPTION:
+*       This routine gets the Port Association Vector
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       pav     - Port Association Vector
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpavGetPAV
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_U16   *pav
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gpavGetPAV Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_PORT_MONITORING)) != GT_OK )
+      return retVal;
+
+    if(pav == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,(GT_U8)QD_REG_PORT_ASSOCIATION,0,dev->maxPorts,&data );
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /*
+     * translate Physical Port Vector to Logical Port Vector.
+     */
+    *pav = (GT_U16)GT_PORTVEC_2_LPORTVEC(data);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gpavSetIngressMonitor
+*
+* DESCRIPTION:
+*       This routine sets the Ingress Monitor bit in the PAV.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the ingress monitor bit in the PAV
+*              GT_FALSE: Ingress Monitor enabled
+*              GT_TRUE:  Ingress Monitor disabled
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpavSetIngressMonitor
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("gpavSetIngressMonitorCalled.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+    BOOL_2_BIT(mode,data);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENABLE_MONITORING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_PORT_ASSOCIATION,15,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gpavGetIngressMonitor
+*
+* DESCRIPTION:
+*       This routine gets the Ingress Monitor bit in the PAV.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the ingress monitor bit in the PAV
+*              GT_FALSE: Ingress Monitor enabled
+*              GT_TRUE:  Ingress Monitor disabled
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gpavGetIngressMonitor
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetIngressMonitor Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENABLE_MONITORING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_PORT_ASSOCIATION,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRateCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRateCtrl.c
new file mode 100644
index 000000000000..5cac79c627d4
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRateCtrl.c
@@ -0,0 +1,2818 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortRateCtrl.c
+*
+* DESCRIPTION:
+*       API definitions to handle port rate control registers (0xA).
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*
+ Convert given hw Rate Limit to sw defined Rate Limit.
+ This routine is only for Gigabit Managed Switch Device.
+ If the given device is not an accepted device, it'll simply copy the hw limit
+ to sw limit.
+*/
+static GT_STATUS cRateLimit(GT_QD_DEV *dev, GT_U32 hwLimit, GT_U32* swLimit)
+{
+    GT_U32 sLimit, hLimit, startLimit, endLimit, i;
+
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY))))
+    {
+        *swLimit = hwLimit;
+        return GT_OK;
+    }
+
+    if(hwLimit == 0)
+    {
+        *swLimit = GT_NO_LIMIT;
+        return GT_OK;
+    }
+
+    sLimit = 1000;
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+        hLimit = GT_GET_RATE_LIMIT3(sLimit);
+    else if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+        hLimit = GT_GET_RATE_LIMIT2(sLimit);
+    else
+        hLimit = GT_GET_RATE_LIMIT(sLimit);
+    if(hLimit == hwLimit)
+    {
+        *swLimit = GT_1M;
+        return GT_OK;
+    }
+
+    if(hLimit > hwLimit)
+    {
+        startLimit = 2000;
+        endLimit = 256000;
+        *swLimit = GT_2M;
+    }
+    else
+    {
+        startLimit = 128;
+        endLimit = 512;
+        *swLimit = GT_128K;
+    }
+
+    i = 0;
+    for(sLimit=startLimit;sLimit<=endLimit;sLimit *= 2, i++)
+    {
+        if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+            hLimit = GT_GET_RATE_LIMIT3(sLimit);
+        else if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+            hLimit = GT_GET_RATE_LIMIT2(sLimit);
+        else
+            hLimit = GT_GET_RATE_LIMIT(sLimit);
+
+        if(hLimit == 0)
+            hLimit = 1;
+
+        if(hLimit == hwLimit)
+        {
+            *swLimit += i;
+            return GT_OK;
+        }
+
+        if(hLimit < hwLimit)
+            break;
+    }
+
+    *swLimit = hwLimit;
+    return GT_OK;
+}
+
+
+/*
+ Convert given sw defined Burst Rate to meaningful number.
+*/
+static GT_STATUS cBurstEnum2Number(GT_QD_DEV *dev, GT_BURST_RATE rate, GT_U32 *rLimit)
+{
+    GT_U32 rateLimit;
+
+    GT_UNUSED_PARAM(dev);
+
+    switch(rate)
+    {
+        case GT_BURST_NO_LIMIT :
+                rateLimit = 0; /* MAX_RATE_LIMIT; */
+                break;
+        case GT_BURST_64K :
+                rateLimit = 64;
+                break;
+        case GT_BURST_128K :
+                rateLimit = 128;
+                break;
+        case GT_BURST_256K :
+                rateLimit = 256;
+                break;
+        case GT_BURST_384K :
+                rateLimit = 384;
+                break;
+        case GT_BURST_512K :
+                rateLimit = 512;
+                break;
+        case GT_BURST_640K :
+                rateLimit = 640;
+                break;
+        case GT_BURST_768K :
+                rateLimit = 768;
+                break;
+        case GT_BURST_896K :
+                rateLimit = 896;
+                break;
+        case GT_BURST_1M :
+                rateLimit = 1000;
+                break;
+        case GT_BURST_1500K :
+                rateLimit = 1500;
+                break;
+        case GT_BURST_2M :
+                rateLimit = 2000;
+                break;
+        case GT_BURST_4M :
+                rateLimit = 4000;
+                break;
+        case GT_BURST_8M :
+                rateLimit = 8000;
+                break;
+        case GT_BURST_16M :
+                rateLimit = 16000;
+                break;
+        case GT_BURST_32M :
+                rateLimit = 32000;
+                break;
+        case GT_BURST_64M :
+                rateLimit = 64000;
+                break;
+        case GT_BURST_128M :
+                rateLimit = 128000;
+                break;
+        case GT_BURST_256M :
+                rateLimit = 256000;
+                break;
+        default :
+                return GT_BAD_PARAM;
+    }
+
+    *rLimit = rateLimit;
+    return GT_OK;
+}
+
+
+/*
+ Convert given hw Burst Rate Limit to sw defined Burst Rate Limit.
+*/
+static GT_STATUS cBurstRateLimit(GT_QD_DEV *dev, GT_U32 burstSize, GT_U32 hwLimit, GT_BURST_RATE* swLimit)
+{
+    GT_BURST_RATE sLimit, startLimit, endLimit;
+    GT_U32 rLimit, tmpLimit;
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    if(hwLimit == 0)
+    {
+        *swLimit = GT_BURST_NO_LIMIT;
+        return GT_OK;
+    }
+
+    startLimit = GT_BURST_64K;
+    endLimit = GT_BURST_256M;
+
+    for(sLimit=startLimit;sLimit<=endLimit;sLimit++)
+    {
+        if((retVal = cBurstEnum2Number(dev, sLimit, &rLimit)) != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+               return retVal;
+        }
+
+        tmpLimit = GT_GET_BURST_RATE_LIMIT(burstSize,rLimit);
+
+        if(hwLimit == tmpLimit)
+        {
+            *swLimit = sLimit;
+            return GT_OK;
+        }
+    }
+
+    return GT_FAIL;
+}
+
+
+/*
+ Convert given sw defined Burst Rate to meaningful number.
+*/
+static GT_STATUS cTCPBurstRate(GT_QD_DEV *dev, GT_BURST_RATE rate, GT_U32 *data)
+{
+    GT_UNUSED_PARAM(dev);
+
+    switch(rate)
+    {
+        case GT_BURST_NO_LIMIT :
+                *data = 0; /* MAX_RATE_LIMIT; */
+                break;
+        case GT_BURST_64K :
+                *data = 0x1D00;
+                break;
+        case GT_BURST_128K :
+                *data = 0x3FFF;
+                break;
+        case GT_BURST_256K :
+                *data = 0x7FFF;
+                break;
+        case GT_BURST_384K :
+                *data = 0x7DE0;
+                break;
+        case GT_BURST_512K :
+                *data = 0x76F0;
+                break;
+        case GT_BURST_640K :
+                *data = 0x7660;
+                break;
+        case GT_BURST_768K :
+                *data = 0x7600;
+                break;
+        case GT_BURST_896K :
+                *data = 0x74EF;
+                break;
+        case GT_BURST_1M :
+                *data = 0x7340;
+                break;
+        case GT_BURST_1500K :
+                *data = 0x7300;
+                break;
+        default :
+                return GT_BAD_PARAM;
+    }
+
+    return GT_OK;
+}
+
+static GT_STATUS setEnhancedERate(GT_QD_DEV *dev, GT_LPORT port, GT_ERATE_TYPE *rateType)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+    GT_U32        rate, eDec;
+    GT_PIRL_ELIMIT_MODE        mode;
+    GT_U8        phyPort;        /* Physical port.               */
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if((retVal = grcGetELimitMode(dev,port,&mode)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    if (mode == GT_PIRL_ELIMIT_FRAME)
+    {
+        /* Count Per Frame */
+        rate = rateType->fRate;
+
+        if (rate == 0) /* disable egress rate limit */
+        {
+            eDec = 0;
+            data = 0;
+        }
+        else if((rate < 7600)  || (rate > 1488000))
+        {
+            return GT_BAD_PARAM;
+        }
+        else
+        {
+            eDec = 1;
+            data = (GT_U16)GT_GET_RATE_LIMIT_PER_FRAME(rate,eDec);
+        }
+    }
+    else
+    {
+        /* Count Per Byte */
+        rate = rateType->kbRate;
+
+        if(rate == 0)
+        {
+            eDec = 0;
+        }
+        else if(rate < 1000)    /* less than 1Mbps */
+        {
+            /* it should be divided by 64 */
+            if(rate % 64)
+                return GT_BAD_PARAM;
+            eDec = rate/64;
+        }
+        else if(rate <= 100000)    /* less than or equal to 100Mbps */
+        {
+            /* it should be divided by 1000 */
+            if(rate % 1000)
+                return GT_BAD_PARAM;
+            eDec = rate/1000;
+        }
+        else if(rate <= 1000000)    /* less than or equal to 1000Mbps */
+        {
+            /* it should be divided by 10000 */
+            if(rate % 10000)
+                return GT_BAD_PARAM;
+            eDec = rate/10000;
+        }
+        else
+            return GT_BAD_PARAM;
+
+        if(rate == 0)
+        {
+            data = 0;
+        }
+        else
+        {
+            data = (GT_U16)GT_GET_RATE_LIMIT_PER_BYTE(rate,eDec);
+        }
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL0,0,7,(GT_U16)eDec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,0,12,(GT_U16)data );
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+static GT_STATUS getEnhancedERate(GT_QD_DEV *dev, GT_LPORT port, GT_ERATE_TYPE *rateType)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        rate, eDec;
+    GT_PIRL_ELIMIT_MODE        mode;
+    GT_U8        phyPort;        /* Physical port.               */
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if((retVal = grcGetELimitMode(dev,port,&mode)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL0,0,7,&eDec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,0,12,&rate );
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    if (mode == GT_PIRL_ELIMIT_FRAME)
+    {
+        rateType->fRate = GT_GET_RATE_LIMIT_PER_FRAME(rate,eDec);
+    }
+    else
+    {
+        /* Count Per Byte */
+        rateType->kbRate = GT_GET_RATE_LIMIT_PER_BYTE(rate,eDec);
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetLimitMode
+*
+* DESCRIPTION:
+*       This routine sets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*       port    - logical port number.
+*       mode     - rate control ingress limit mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetLimitMode
+(
+    IN GT_QD_DEV             *dev,
+    IN GT_LPORT          port,
+    IN GT_RATE_LIMIT_MODE    mode
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetLimitMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,14,2,(GT_U16)mode );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,14,2,(GT_U16)mode );
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetLimitMode
+*
+* DESCRIPTION:
+*       This routine gets the port's rate control ingress limit mode.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       mode     - rate control ingress limit mode.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetLimitMode
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_RATE_LIMIT_MODE    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetLimitMode Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,14,2,&data );
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,14,2,&data );
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *mode = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcSetPri3Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 3 frames.
+*       Priority 3 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 3 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri2Rate
+*              GT_TRUE:  use twice the rate as Pri2Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetPri3Rate
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetPri3Rate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    BOOL_2_BIT(mode,data);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,14,1,data );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,13,1,data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetPri3Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 3 frames.
+*       Priority 3 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 3 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri2Rate
+*              GT_TRUE:  use twice the rate as Pri2Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetPri3Rate
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetPri3Rate Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,14,1,&data );
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,13,1,&data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetPri2Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 2 frames.
+*       Priority 2 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 2 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri1Rate
+*              GT_TRUE:  use twice the rate as Pri1Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetPri2Rate
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT port,
+    IN GT_BOOL  mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetPri2Rate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    BOOL_2_BIT(mode,data);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,13,1,data );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,12,1,data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetPri2Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 2 frames.
+*       Priority 2 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 2 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri1Rate
+*              GT_TRUE:  use twice the rate as Pri1Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetPri2Rate
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetPri2Rate Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,13,1,&data );
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,12,1,&data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetPri1Rate
+*
+* DESCRIPTION:
+*       This routine sets the ingress data rate limit for priority 1 frames.
+*       Priority 1 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - the priority 1 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri0Rate
+*              GT_TRUE:  use twice the rate as Pri0Rate
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetPri1Rate
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetPri1Rate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    BOOL_2_BIT(mode,data);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,12,1,data );
+    }
+    else
+    {
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,11,1,data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetPri1Rate
+*
+* DESCRIPTION:
+*       This routine gets the ingress data rate limit for priority 1 frames.
+*       Priority 1 frames will be discarded after the ingress rate selection
+*       is reached or exceeded.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - the priority 1 frame rate limit mode
+*              GT_FALSE: use the same rate as Pri0Rate
+*              GT_TRUE:  use twice the rate as Pri0Rate
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetPri1Rate
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_BOOL  *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetPri1Rate Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,12,1,&data );
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,11,1,&data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetPri0Rate
+*
+* DESCRIPTION:
+*       This routine sets the port's ingress data limit for priority 0 frames.
+*
+* INPUTS:
+*       port    - logical port number.
+*       rate    - ingress data rate limit for priority 0 frames. These frames
+*             will be discarded after the ingress rate selected is reached
+*             or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_PRI0_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetPri0Rate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_PRI0_RATE    rate
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit, tmpLimit;
+
+    DBG_INFO(("grcSetPri0Rate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH))
+    {
+        dev->devStorage &= ~(GT_RATE_ENUM_NOT_USED);
+        switch(rate)
+        {
+            case GT_NO_LIMIT :
+                    rateLimit = 0; /* MAX_RATE_LIMIT; */
+                    break;
+            case GT_128K :
+                    rateLimit = 128;
+                    break;
+            case GT_256K :
+                    rateLimit = 256;
+                    break;
+            case GT_512K :
+                    rateLimit = 512;
+                    break;
+            case GT_1M :
+                    rateLimit = 1000;
+                    break;
+            case GT_2M :
+                    rateLimit = 2000;
+                    break;
+            case GT_4M :
+                    rateLimit = 4000;
+                    break;
+            case GT_8M :
+                    rateLimit = 8000;
+                    break;
+            case GT_16M :
+                    rateLimit = 16000;
+                    break;
+            case GT_32M :
+                    rateLimit = 32000;
+                    break;
+            case GT_64M :
+                    rateLimit = 64000;
+                    break;
+            case GT_128M :
+                    rateLimit = 128000;
+                    break;
+            case GT_256M :
+                    rateLimit = 256000;
+                    break;
+            default :
+                    rateLimit = (GT_U32)rate;
+                    dev->devStorage |= GT_RATE_ENUM_NOT_USED;
+                    break;
+        }
+
+        if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+            tmpLimit = GT_GET_RATE_LIMIT2(rateLimit);
+        else
+            tmpLimit = GT_GET_RATE_LIMIT(rateLimit);
+
+        if((tmpLimit == 0) && (rateLimit != 0))
+            rateLimit = 1;
+        else
+            rateLimit = tmpLimit;
+
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,12,(GT_U16)rateLimit );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+    else
+    {
+        switch(rate)
+        {
+            case GT_NO_LIMIT :
+            case GT_128K :
+            case GT_256K :
+            case GT_512K :
+            case GT_1M :
+            case GT_2M :
+            case GT_4M :
+            case GT_8M :
+                    break;
+            default :
+                    return GT_BAD_PARAM;
+        }
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,8,3,(GT_U16)rate );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetPri0Rate
+*
+* DESCRIPTION:
+*       This routine gets the port's ingress data limit for priority 0 frames.
+*
+* INPUTS:
+*       port    - logical port number to set.
+*
+* OUTPUTS:
+*       rate    - ingress data rate limit for priority 0 frames. These frames
+*             will be discarded after the ingress rate selected is reached
+*             or exceeded.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_PRI0_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetPri0Rate
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_PRI0_RATE    *rate
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            tmpLimit;
+
+    DBG_INFO(("grcGetPri0Rate Called.\n"));
+
+    if(rate == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,12,&data);
+        tmpLimit = (GT_U32)data;
+
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if(dev->devStorage & GT_RATE_ENUM_NOT_USED)
+        {
+            if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+                *rate = GT_GET_RATE_LIMIT2(tmpLimit);
+            else
+                *rate = GT_GET_RATE_LIMIT(tmpLimit);
+        }
+        else
+        {
+            cRateLimit(dev, tmpLimit, (GT_U32*)rate);
+        }
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,8,3,&data );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+        *rate = data;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcSetBytesCount
+*
+* DESCRIPTION:
+*       This routine sets the byets to count for limiting needs to be determined
+*
+* INPUTS:
+*       port      - logical port number to set.
+*        limitMGMT - GT_TRUE: To limit and count MGMT frame bytes
+*                GT_FALSE: otherwise
+*        countIFG  - GT_TRUE: To count IFG bytes
+*                GT_FALSE: otherwise
+*        countPre  - GT_TRUE: To count Preamble bytes
+*                GT_FALSE: otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetBytesCount
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      limitMGMT,
+    IN GT_BOOL      countIFG,
+    IN GT_BOOL      countPre
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U16          data;           /* data for bytes count         */
+
+    DBG_INFO(("grcSetBytesCount Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        BOOL_2_BIT(limitMGMT,data);
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,15,1,data );
+        if (retVal != GT_OK)
+            return retVal;
+
+        data = 0;
+        if( countIFG == GT_TRUE ) data |= 2;
+        if( countPre == GT_TRUE ) data |= 1;
+
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,12,2,data );
+    }
+    else
+    {
+        data = 0;
+        if(    limitMGMT == GT_TRUE ) data |=4;
+        if(     countIFG == GT_TRUE ) data |=2;
+        if(     countPre == GT_TRUE ) data |=1;
+
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,4,3,data );
+    }
+
+       if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetBytesCount
+*
+* DESCRIPTION:
+*       This routine gets the byets to count for limiting needs to be determined
+*
+* INPUTS:
+*       port    - logical port number
+*
+* OUTPUTS:
+*        limitMGMT - GT_TRUE: To limit and count MGMT frame bytes
+*                GT_FALSE: otherwise
+*        countIFG  - GT_TRUE: To count IFG bytes
+*                GT_FALSE: otherwise
+*        countPre  - GT_TRUE: To count Preamble bytes
+*                GT_FALSE: otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetBytesCount
+(
+    IN GT_QD_DEV *dev,
+    IN GT_LPORT  port,
+    IN GT_BOOL      *limitMGMT,
+    IN GT_BOOL      *countIFG,
+    IN GT_BOOL      *countPre
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetBytesCount Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_INGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if (limitMGMT == NULL || countIFG == NULL || countPre == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+       *limitMGMT = *countIFG = *countPre = GT_FALSE;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,15,1,&data );
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        BIT_2_BOOL(data,*limitMGMT);
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,12,2,&data );
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if( data & 0x2 ) *countIFG = GT_TRUE;
+        if( data & 0x1 ) *countPre = GT_TRUE;
+
+    }
+    else
+    {
+
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,4,3,&data );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if ( data & 4 ) *limitMGMT = GT_TRUE;
+        if ( data & 2 ) *countIFG  = GT_TRUE;
+        if ( data & 1 ) *countPre  = GT_TRUE;
+
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcSetEgressRate
+*
+* DESCRIPTION:
+*       This routine sets the port's egress data limit.
+*
+*
+* INPUTS:
+*       port      - logical port number.
+*       rateType  - egress data rate limit (GT_ERATE_TYPE union type).
+*                    union type is used to support multiple devices with the
+*                    different formats of egress rate.
+*                    GT_ERATE_TYPE has the following fields:
+*                        definedRate - GT_EGRESS_RATE enum type should used for the
+*                            following devices:
+*                            88E6218, 88E6318, 88E6063, 88E6083, 88E6181, 88E6183,
+*                            88E6093, 88E6095, 88E6185, 88E6108, 88E6065, 88E6061,
+*                            and their variations
+*                        kbRate - rate in kbps that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with the GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_LAYER1,
+*                                GT_PIRL_ELIMIT_LAYER2, or
+*                                GT_PIRL_ELIMIT_LAYER3 (see grcSetELimitMode)
+*                            64kbps ~ 1Mbps    : increments of 64kbps,
+*                            1Mbps ~ 100Mbps   : increments of 1Mbps, and
+*                            100Mbps ~ 1000Mbps: increments of 10Mbps
+*                            Therefore, the valid values are:
+*                                64, 128, 192, 256, 320, 384,..., 960,
+*                                1000, 2000, 3000, 4000, ..., 100000,
+*                                110000, 120000, 130000, ..., 1000000.
+*                        fRate - frame per second that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_FRAME
+*                            Valid values are between 7600 and 1488000
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_EGRESS_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+*******************************************************************************/
+GT_STATUS grcSetEgressRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_ERATE_TYPE   *rateType
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit, tmpLimit;
+    GT_EGRESS_RATE  rate;
+
+    DBG_INFO(("grcSetEgressRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_EGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))
+    {
+        return setEnhancedERate(dev,port,rateType);
+    }
+
+    rate = rateType->definedRate;
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        dev->devStorage &= ~(GT_RATE_ENUM_NOT_USED);
+        switch(rate)
+        {
+            case GT_NO_LIMIT :
+                    rateLimit = 0; /* MAX_RATE_LIMIT; */
+                    break;
+            case GT_128K :
+                    rateLimit = 128;
+                    break;
+            case GT_256K :
+                    rateLimit = 256;
+                    break;
+            case GT_512K :
+                    rateLimit = 512;
+                    break;
+            case GT_1M :
+                    rateLimit = 1000;
+                    break;
+            case GT_2M :
+                    rateLimit = 2000;
+                    break;
+            case GT_4M :
+                    rateLimit = 4000;
+                    break;
+            case GT_8M :
+                    rateLimit = 8000;
+                    break;
+            case GT_16M :
+                    rateLimit = 16000;
+                    break;
+            case GT_32M :
+                    rateLimit = 32000;
+                    break;
+            case GT_64M :
+                    rateLimit = 64000;
+                    break;
+            case GT_128M :
+                    rateLimit = 128000;
+                    break;
+            case GT_256M :
+                    rateLimit = 256000;
+                    break;
+            default :
+                    rateLimit = (GT_U32)rate;
+                    dev->devStorage |= GT_RATE_ENUM_NOT_USED;
+                    break;
+        }
+
+        if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+            (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+            tmpLimit = GT_GET_RATE_LIMIT3(rateLimit);
+        else if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+            tmpLimit = GT_GET_RATE_LIMIT2(rateLimit);
+        else
+            tmpLimit = GT_GET_RATE_LIMIT(rateLimit);
+
+        if((tmpLimit == 0) && (rateLimit != 0))
+            rateLimit = 1;
+        else
+            rateLimit = tmpLimit;
+
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,0,12,(GT_U16)rateLimit );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+    else
+    {
+        switch(rate)
+        {
+            case GT_NO_LIMIT :
+            case GT_128K :
+            case GT_256K :
+            case GT_512K :
+            case GT_1M :
+            case GT_2M :
+            case GT_4M :
+            case GT_8M :
+                    break;
+            default :
+                    return GT_BAD_PARAM;
+        }
+        retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,0,3,(GT_U16)rate );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetEgressRate
+*
+* DESCRIPTION:
+*       This routine gets the port's egress data limit.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       rateType  - egress data rate limit (GT_ERATE_TYPE union type).
+*                    union type is used to support multiple devices with the
+*                    different formats of egress rate.
+*                    GT_ERATE_TYPE has the following fields:
+*                        definedRate - GT_EGRESS_RATE enum type should used for the
+*                            following devices:
+*                            88E6218, 88E6318, 88E6063, 88E6083, 88E6181, 88E6183,
+*                            88E6093, 88E6095, 88E6185, 88E6108, 88E6065, 88E6061,
+*                            and their variations
+*                        kbRate - rate in kbps that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with the GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_LAYER1,
+*                                GT_PIRL_ELIMIT_LAYER2, or
+*                                GT_PIRL_ELIMIT_LAYER3 (see grcSetELimitMode)
+*                            64kbps ~ 1Mbps    : increments of 64kbps,
+*                            1Mbps ~ 100Mbps   : increments of 1Mbps, and
+*                            100Mbps ~ 1000Mbps: increments of 10Mbps
+*                            Therefore, the valid values are:
+*                                64, 128, 192, 256, 320, 384,..., 960,
+*                                1000, 2000, 3000, 4000, ..., 100000,
+*                                110000, 120000, 130000, ..., 1000000.
+*                        fRate - frame per second that should used for the following
+*                            devices:
+*                            88E6097, 88E6096 with GT_PIRL_ELIMIT_MODE of
+*                                GT_PIRL_ELIMIT_FRAME
+*                            Valid values are between 7600 and 1488000
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*            GT_16M, GT_32M, GT_64M, GT_128M, and GT_256M in GT_EGRESS_RATE enum
+*            are supported only by Gigabit Ethernet Switch.
+*
+*******************************************************************************/
+GT_STATUS grcGetEgressRate
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_ERATE_TYPE  *rateType
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            tmpLimit,tmpRate;
+
+    DBG_INFO(("grcGetEgressRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,phyPort, DEV_EGRESS_RATE_KBPS|DEV_UNMANAGED_SWITCH)) != GT_OK )
+      return retVal;
+
+    if(rateType == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))
+    {
+        return getEnhancedERate(dev,port,rateType);
+    }
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_EGRESS_RATE_CTRL,0,12,&data );
+        tmpLimit = (GT_U32)data;
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        if(dev->devStorage & GT_RATE_ENUM_NOT_USED)
+        {
+            if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+                (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+                tmpRate = GT_GET_RATE_LIMIT3(tmpLimit);
+            else if (!IS_IN_DEV_GROUP(dev,DEV_88E6183_FAMILY))
+                tmpRate = GT_GET_RATE_LIMIT2(tmpLimit);
+            else
+                tmpRate = GT_GET_RATE_LIMIT(tmpLimit);
+            rateType->kbRate = tmpRate;
+        }
+        else
+        {
+            cRateLimit(dev, tmpLimit, &tmpRate);
+            rateType->definedRate = (GT_EGRESS_RATE)tmpRate;
+        }
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL,0,3,&data );
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+
+        rateType->definedRate = (GT_EGRESS_RATE)data;
+    }
+
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*       bsize    - burst size.
+*       rate    - ingress data rate limit. These frames will be discarded after
+*                the ingress rate selected is reached or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*                                Minimum rate for Burst Size 24K byte is 128Kbps
+*                                Minimum rate for Burst Size 48K byte is 256Kbps
+*                                Minimum rate for Burst Size 96K byte is 512Kbps
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetBurstRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_BURST_SIZE   bsize,
+    IN GT_BURST_RATE   rate
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit;
+    GT_U32            burstSize =0;
+
+    DBG_INFO(("grcSetBurstRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    switch (bsize)
+    {
+        case GT_BURST_SIZE_12K:
+            burstSize = 0;
+            break;
+        case GT_BURST_SIZE_24K:
+            if ((rate < GT_BURST_128K) && (rate != GT_BURST_NO_LIMIT))
+                return GT_BAD_PARAM;
+            burstSize = 1;
+            break;
+        case GT_BURST_SIZE_48K:
+            if ((rate < GT_BURST_256K) && (rate != GT_BURST_NO_LIMIT))
+                return GT_BAD_PARAM;
+            burstSize = 3;
+            break;
+        case GT_BURST_SIZE_96K:
+            if ((rate < GT_BURST_512K) && (rate != GT_BURST_NO_LIMIT))
+                return GT_BAD_PARAM;
+            burstSize = 7;
+            break;
+        default:
+            return GT_BAD_PARAM;
+    }
+
+    if((retVal = cBurstEnum2Number(dev, rate, &rateLimit)) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    rateLimit = GT_GET_BURST_RATE_LIMIT(burstSize,rateLimit);
+
+    rateLimit |= (GT_U32)(burstSize << 12);
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,15,(GT_U16)rateLimit );
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcGetBurstRate
+*
+* DESCRIPTION:
+*       This routine retrieves the port's ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       bsize    - burst size.
+*       rate    - ingress data rate limit. These frames will be discarded after
+*                the ingress rate selected is reached or exceeded.
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetBurstRate
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BURST_SIZE   *bsize,
+    OUT GT_BURST_RATE   *rate
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit, burstSize;
+    GT_U16            data;
+
+    DBG_INFO(("grcGetBurstRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,15,&data);
+    rateLimit = (GT_U32)data;
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    burstSize = rateLimit >> 12;
+    rateLimit &= 0x0FFF;
+
+    retVal = cBurstRateLimit(dev, burstSize, rateLimit, rate);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    switch (burstSize)
+    {
+        case 0:
+            *bsize = GT_BURST_SIZE_12K;
+            break;
+        case 1:
+            *bsize = GT_BURST_SIZE_24K;
+            break;
+        case 3:
+            *bsize = GT_BURST_SIZE_48K;
+            break;
+        case 7:
+            *bsize = GT_BURST_SIZE_96K;
+            break;
+        default:
+            return GT_BAD_VALUE;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetTCPBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's TCP/IP ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*       rate    - ingress data rate limit for TCP/IP packets. These frames will
+*                be discarded after the ingress rate selected is reached or exceeded.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*                                Valid rate is GT_BURST_NO_LIMIT, or between
+*                                64Kbps and 1500Kbps.
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcSetTCPBurstRate
+(
+    IN GT_QD_DEV       *dev,
+    IN GT_LPORT        port,
+    IN GT_BURST_RATE   rate
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit;
+
+    DBG_INFO(("grcSetTCPBurstRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    if((retVal = cTCPBurstRate(dev, rate, &rateLimit)) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,15,(GT_U16)rateLimit );
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetTCPBurstRate
+*
+* DESCRIPTION:
+*       This routine sets the port's TCP/IP ingress data limit based on burst size.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       rate    - ingress data rate limit for TCP/IP packets. These frames will
+*                be discarded after the ingress rate selected is reached or exceeded.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_VALUE        - register value is not known
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*        If the device supports both priority based Rate Limiting and burst size
+*        based Rate limiting, user has to manually change the mode to burst size
+*        based Rate limiting by calling gsysSetRateLimitMode.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS grcGetTCPBurstRate
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_BURST_RATE   *rate
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U32            rateLimit;
+    GT_U32            data;
+    GT_U16            u16Data;
+    GT_BURST_RATE sLimit, startLimit, endLimit;
+
+    DBG_INFO(("grcGetTCPBurstRate Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_1))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_INGRESS_RATE_CTRL,0,15,&u16Data);
+    data = (GT_U32)u16Data;
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    if ((data & 0xFFF) == 0)
+    {
+        *rate = GT_BURST_NO_LIMIT;
+        return GT_OK;
+    }
+
+    startLimit = GT_BURST_64K;
+    endLimit = GT_BURST_1500K;
+
+    for(sLimit=startLimit;sLimit<=endLimit;sLimit++)
+    {
+        if((retVal = cTCPBurstRate(dev, sLimit, &rateLimit)) != GT_OK)
+        {
+            break;
+        }
+
+        if(rateLimit == data)
+        {
+            *rate = sLimit;
+            return GT_OK;
+        }
+    }
+
+    DBG_INFO(("Fail to find TCP Rate.\n"));
+    return GT_BAD_VALUE;
+}
+
+
+/*******************************************************************************
+* grcSetVidNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables VID None Rate Limit (NRL).
+*        When VID NRL is enabled and the determined VID of a frame results in a VID
+*        whose VIDNonRateLimit in the VTU Table is set to GT_TURE, then the frame
+*        will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable VID None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetVidNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetVidNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the VidNrlEn mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,15,1,data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcGetVidNrlEn
+*
+* DESCRIPTION:
+*       This routine gets VID None Rate Limit (NRL) mode.
+*        When VID NRL is enabled and the determined VID of a frame results in a VID
+*        whose VIDNonRateLimit in the VTU Table is set to GT_TURE, then the frame
+*        will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable VID None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetVidNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetVidNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the VidNrlEn mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,15,1,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetSaNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables SA None Rate Limit (NRL).
+*        When SA NRL is enabled and the source address of a frame results in a ATU
+*        hit where the SA's MAC address returns an EntryState that indicates Non
+*        Rate Limited, then the frame will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable SA None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetSaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetSaNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the SaNrlEn mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,14,1,data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcGetSaNrlEn
+*
+* DESCRIPTION:
+*       This routine gets SA None Rate Limit (NRL) mode.
+*        When SA NRL is enabled and the source address of a frame results in a ATU
+*        hit where the SA's MAC address returns an EntryState that indicates Non
+*        Rate Limited, then the frame will not be ingress nor egress rate limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable SA None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetSaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetSaNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SaNrlEn mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,14,1,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcSetDaNrlEn
+*
+* DESCRIPTION:
+*       This routine enables/disables DA None Rate Limit (NRL).
+*        When DA NRL is enabled and the destination address of a frame results in
+*        a ATU hit where the DA's MAC address returns an EntryState that indicates
+*        Non Rate Limited, then the frame will not be ingress nor egress rate
+*        limited.
+*
+* INPUTS:
+*       port - logical port number.
+*        mode - GT_TRUE to enable DA None Rate Limit
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetDaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetDaNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the DaNrlEn mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,13,1,data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcGetDaNrlEn
+*
+* DESCRIPTION:
+*       This routine gets SA None Rate Limit (NRL) mode.
+*        When DA NRL is enabled and the destination address of a frame results in
+*        a ATU hit where the DA's MAC address returns an EntryState that indicates
+*        Non Rate Limited, then the frame will not be ingress nor egress rate
+*        limited.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE to enable DA None Rate Limit
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetDaNrlEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetDaNrlEn Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DaNrlEn mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_INGRESS_RATE_CTRL,13,1,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetELimitMode
+*
+* DESCRIPTION:
+*       This routine sets Egress Rate Limit counting mode.
+*        The supported modes are as follows:
+*            GT_PIRL_ELIMIT_FRAME -
+*                Count the number of frames
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+*            GT_PIRL_ELIMIT_LAYER2 -
+*                Count all Layer 2 bytes: Frame's DA to CRC
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+*
+* INPUTS:
+*       port - logical port number
+*        mode - GT_PIRL_ELIMIT_MODE enum type
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetELimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_PIRL_ELIMIT_MODE        mode
+)
+{
+    GT_U16            data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetELimitMode Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))
+    {
+        if(mode == GT_PIRL_ELIMIT_FRAME)
+            return GT_NOT_SUPPORTED;
+    }
+
+    data = (GT_U16)mode & 0x3;
+
+    /* Set the Elimit mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_EGRESS_RATE_CTRL,14,2,data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcGetELimitMode
+*
+* DESCRIPTION:
+*       This routine gets Egress Rate Limit counting mode.
+*        The supported modes are as follows:
+*            GT_PIRL_ELIMIT_FRAME -
+*                Count the number of frames
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Preamble (8bytes) + Frame's DA to CRC + IFG (12bytes)
+*            GT_PIRL_ELIMIT_LAYER2 -
+*                Count all Layer 2 bytes: Frame's DA to CRC
+*            GT_PIRL_ELIMIT_LAYER1 -
+*                Count all Layer 1 bytes:
+*                Frame's DA to CRC - 18 - 4 (if frame is tagged)
+*
+* INPUTS:
+*       port - logical port number
+*
+* OUTPUTS:
+*        mode - GT_PIRL_ELIMIT_MODE enum type
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*        GT_NOT_SUPPORTED    - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcGetELimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_PIRL_ELIMIT_MODE        *mode
+)
+{
+    GT_U16            data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetELimitMode Called.\n"));
+
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if the given Switch supports this feature. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))))
+    {
+           DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Elimit mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_EGRESS_RATE_CTRL,14,2,&data);
+    if(retVal != GT_OK)
+       {
+        DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *mode = data;
+
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcSetRsvdNrlEn
+*
+* DESCRIPTION:
+*       This routine sets Reserved Non Rate Limit.
+*        When this feature is enabled, frames that match the requirements of the
+*        Rsvd2Cpu bit below will also be considered to be ingress and egress non
+*        rate limited.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Reserved Non Rate Limit,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS grcSetRsvdNrlEn
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("grcSetRsvdNrlEn Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the RsvdNrl bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,4,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* grcGetRsvdNrlEn
+*
+* DESCRIPTION:
+*       This routine gets Reserved Non Rate Limit.
+*        When this feature is enabled, frames that match the requirements of the
+*        Rsvd2Cpu bit below will also be considered to be ingress and egress non
+*        rate limited.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Reserved Non Rate Limit,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS grcGetRsvdNrlEn
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("grcGetRsvdNrlEn Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_NONE_RATE_LIMIT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the RsvdNrl bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,4,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* grcSetFrameOverhead
+*
+* DESCRIPTION:
+*       Egress rate frame overhead adjustment.
+*        This field is used to adjust the number of bytes that need to be added to a
+*        frame's IFG on a per frame basis.
+*
+*        The egress rate limiter multiplies the value programmed in this field by four
+*        for computing the frame byte offset adjustment value (i.e., the amount the
+*        IPG is increased for every frame). This adjustment, if enabled, is made to
+*        every egressing frame's IPG and it is made in addition to any other IPG
+*        adjustments due to other Egress Rate Control settings.
+*
+*        The egress overhead adjustment can add the following number of byte times
+*        to each frame's IPG: 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52,
+*        56 and 60.
+*
+*        Example:
+*        If FrameOverhead = 11, the egress rate limiter would increase the IPG
+*        between every frame by an additional 44 bytes.
+*
+*        Note: When the Count Mode (port offset 0x0A) is in Frame based egress rate
+*        shaping mode, these Frame Overhead bits must be 0x0.
+*
+* INPUTS:
+*       port     - logical port number.
+*       overhead - Frame overhead (0 ~ 15)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_BAD_PARAM        - on bad parameters
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS grcSetFrameOverhead
+(
+    IN GT_QD_DEV        *dev,
+    IN GT_LPORT            port,
+    IN GT_32            overhead
+)
+{
+
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcSetFrameOverhead Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (overhead > 15)
+    {
+        DBG_INFO(("GT_BAD_PARAM \n"));
+        return GT_BAD_PARAM;
+    }
+
+    retVal = hwSetPortRegField(dev,phyPort,QD_REG_RATE_CTRL0,8,4,(GT_U16)overhead );
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* grcGetFrameOverhead
+*
+* DESCRIPTION:
+*       Egress rate frame overhead adjustment.
+*        This field is used to adjust the number of bytes that need to be added to a
+*        frame's IFG on a per frame basis.
+*
+*        The egress rate limiter multiplies the value programmed in this field by four
+*        for computing the frame byte offset adjustment value (i.e., the amount the
+*        IPG is increased for every frame). This adjustment, if enabled, is made to
+*        every egressing frame's IPG and it is made in addition to any other IPG
+*        adjustments due to other Egress Rate Control settings.
+*
+*        The egress overhead adjustment can add the following number of byte times
+*        to each frame's IPG: 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52,
+*        56 and 60.
+*
+*        Example:
+*        If FrameOverhead = 11, the egress rate limiter would increase the IPG
+*        between every frame by an additional 44 bytes.
+*
+*        Note: When the Count Mode (port offset 0x0A) is in Frame based egress rate
+*        shaping mode, these Frame Overhead bits must be 0x0.
+*
+* INPUTS:
+*       port    - logical port number.
+*
+* OUTPUTS:
+*       overhead - Frame overhead (0 ~ 15)
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+*******************************************************************************/
+GT_STATUS grcGetFrameOverhead
+(
+    IN GT_QD_DEV *dev,
+    IN  GT_LPORT port,
+    OUT GT_32    *overhead
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U8           phyPort;        /* Physical port.               */
+
+    DBG_INFO(("grcGetFrameOverhead Called.\n"));
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ELIMIT_FRAME_BASED))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwGetPortRegField(dev,phyPort,QD_REG_RATE_CTRL0,8,4,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *overhead = (GT_U32)data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRmon.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRmon.c
new file mode 100644
index 000000000000..809cdaf64f0e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortRmon.c
@@ -0,0 +1,1244 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtPortCounter.c
+*
+* DESCRIPTION:
+*       API definitions for RMON counters
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/****************************************************************************/
+/* STATS operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS statsOperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_STATS_OPERATION   statsOp,
+    IN   GT_U8                port,
+    IN   GT_STATS_COUNTERS    counter,
+    OUT  GT_VOID              *statsData
+);
+
+static GT_STATUS statsCapture
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_U8      port
+);
+
+static GT_STATUS statsReadCounter
+(
+    IN   GT_QD_DEV        *dev,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+);
+
+static GT_STATUS statsReadRealtimeCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U8             port,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+);
+
+
+/*******************************************************************************
+* gstatsFlushAll
+*
+* DESCRIPTION:
+*       Flush All counters for all ports.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsFlushAll
+(
+        IN GT_QD_DEV  *dev
+)
+{
+    GT_STATUS           retVal;
+
+    DBG_INFO(("gstatsFlushAll Called.\n"));
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,1, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_FLUSH_ALL,0,0,NULL);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsFlushPort
+*
+* DESCRIPTION:
+*       Flush All counters for a given port.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsFlushPort
+(
+    IN GT_QD_DEV  *dev,
+    IN GT_LPORT      port
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsFlushPort Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_FLUSH_PORT,hwPort,0,NULL);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsGetPortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortCounter Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_1))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsGetPortAllCounters
+*
+* DESCRIPTION:
+*       This routine gets all counters of the given port
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*
+* COMMENTS:
+*       None
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters
+(
+    IN  GT_QD_DEV               *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET    *statsCounterSet
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortAllCounters Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Gigabit Switch does not support this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_1))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_ALL,hwPort,0,(GT_VOID*)statsCounterSet);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsGetPortCounter2
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter2
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS2    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortCounters2 Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsGetPortAllCounters2
+*
+* DESCRIPTION:
+*        This routine gets all counters of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters2
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET2    *statsCounterSet
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortAllCounters2 Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_2))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_ALL,hwPort,0,(GT_VOID*)statsCounterSet);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsGetPortCounter3
+*
+* DESCRIPTION:
+*        This routine gets a specific counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch and Spinnaker family
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortCounter3
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS3    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortCounters3 Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Only 88E6093 Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gstatsGetPortAllCounters3
+*
+* DESCRIPTION:
+*        This routine gets all counters of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        statsCounterSet - points to GT_STATS_COUNTER_SET for the MIB counters
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*        This function supports Gigabit Switch and Spinnaker family
+*
+*******************************************************************************/
+GT_STATUS gstatsGetPortAllCounters3
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    OUT GT_STATS_COUNTER_SET3    *statsCounterSet
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetPortAllCounters3 Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_TYPE_3))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_ALL,hwPort,0,(GT_VOID*)statsCounterSet);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gstatsGetHistogramMode
+*
+* DESCRIPTION:
+*        This routine gets the Histogram Counters Mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - Histogram Mode (GT_COUNT_RX_ONLY, GT_COUNT_TX_ONLY,
+*                    and GT_COUNT_RX_TX)
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetHistogramMode
+(
+    IN  GT_QD_DEV                *dev,
+    OUT GT_HISTOGRAM_MODE    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gstatsGetHistogramMode Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_RMON_REALTIME_SUPPORT))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,10,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *mode = data - 1; /* Software definition starts from 0 ~ 2,
+                        while hardware supports the values from 1 to 3 */
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gstatsSetHistogramMode
+*
+* DESCRIPTION:
+*        This routine sets the Histogram Counters Mode.
+*
+* INPUTS:
+*        mode - Histogram Mode (GT_COUNT_RX_ONLY, GT_COUNT_TX_ONLY,
+*                    and GT_COUNT_RX_TX)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsSetHistogramMode
+(
+    IN GT_QD_DEV                 *dev,
+    IN GT_HISTOGRAM_MODE        mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gstatsSetHistogramMode Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_MANAGED_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_RMON_REALTIME_SUPPORT))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (mode)
+    {
+        case GT_COUNT_RX_ONLY:
+        case GT_COUNT_TX_ONLY:
+        case GT_COUNT_RX_TX:
+            break;
+        default:
+            DBG_INFO(("Failed.\n"));
+            return GT_BAD_PARAM;
+    }
+
+    data = (GT_U16)mode + 1;
+
+    /* Set the Histogram mode bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_STATS_OPERATION,10,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gstatsGetRealtimePortCounter
+*
+* DESCRIPTION:
+*        This routine gets a specific realtime counter of the given port
+*
+* INPUTS:
+*        port - the logical port number.
+*        counter - the counter which will be read
+*
+* OUTPUTS:
+*        statsData - points to 32bit data storage for the MIB counter
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gstatsGetRealtimePortCounter
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_STATS_COUNTERS3    counter,
+    OUT GT_U32            *statsData
+)
+{
+    GT_STATUS    retVal;
+    GT_U8        hwPort;         /* physical port number         */
+
+    DBG_INFO(("gstatsGetRealtimePortCounter Called.\n"));
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_RMON)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RMON_REALTIME_SUPPORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = statsOperationPerform(dev,STATS_READ_REALTIME_COUNTER,hwPort,counter,(GT_VOID*)statsData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (statsOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/****************************************************************************/
+/* Internal use functions.                                                  */
+/****************************************************************************/
+
+/*******************************************************************************
+* statsOperationPerform
+*
+* DESCRIPTION:
+*       This function is used by all stats control functions, and is responsible
+*       to write the required operation into the stats registers.
+*
+* INPUTS:
+*       statsOp       - The stats operation bits to be written into the stats
+*                     operation register.
+*       port        - port number
+*       counter     - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+
+static GT_STATUS statsOperationPerform
+(
+    IN   GT_QD_DEV            *dev,
+    IN   GT_STATS_OPERATION   statsOp,
+    IN   GT_U8                port,
+    IN   GT_STATS_COUNTERS    counter,
+    OUT  GT_VOID              *statsData
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data,histoData; /* Data to be set into the      */
+                                    /* register.                    */
+    GT_U32 statsCounter;
+    GT_U32 lastCounter;
+    GT_U16            portNum;
+
+    gtSemTake(dev,dev->statsRegsSem,OS_WAIT_FOREVER);
+
+    if (!((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_RMON_REALTIME_SUPPORT))))
+    {
+      if (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+        lastCounter = (GT_U32)STATS2_Late;
+      else
+        lastCounter = (GT_U32)STATS_OutDiscards;
+    }
+    else
+    {
+        lastCounter = (GT_U32)STATS3_Late;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMON_PORT_BITS))
+    {
+        portNum = (port + 1) << 5;
+    }
+    else
+    {
+        portNum = (GT_U16)port;
+    }
+
+    /* Wait until the stats in ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->statsRegsSem);
+        return retVal;
+      }
+      histoData = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->statsRegsSem);
+            return retVal;
+        }
+    }
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_OPERATION,&histoData);
+    if(retVal != GT_OK)
+    {
+        gtSemGive(dev,dev->statsRegsSem);
+        return retVal;
+    }
+
+#endif
+    histoData &= 0xC00;
+
+    /* Set the STAT Operation register */
+    switch (statsOp)
+    {
+        case STATS_FLUSH_ALL:
+            data = (1 << 15) | (GT_STATS_FLUSH_ALL << 12) | histoData;
+            retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+            gtSemGive(dev,dev->statsRegsSem);
+            return retVal;
+
+        case STATS_FLUSH_PORT:
+            data = (1 << 15) | (GT_STATS_FLUSH_PORT << 12) | portNum | histoData;
+            retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+            gtSemGive(dev,dev->statsRegsSem);
+            return retVal;
+
+        case STATS_READ_COUNTER:
+            retVal = statsCapture(dev,port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            retVal = statsReadCounter(dev,counter,(GT_U32*)statsData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+            break;
+
+        case STATS_READ_REALTIME_COUNTER:
+            retVal = statsReadRealtimeCounter(dev,port,counter,(GT_U32*)statsData);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            break;
+
+        case STATS_READ_ALL:
+            retVal = statsCapture(dev,port);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->statsRegsSem);
+                return retVal;
+            }
+
+            for(statsCounter=0; statsCounter<=lastCounter; statsCounter++)
+            {
+                retVal = statsReadCounter(dev,statsCounter,((GT_U32*)statsData + statsCounter));
+                if(retVal != GT_OK)
+                {
+                    gtSemGive(dev,dev->statsRegsSem);
+                    return retVal;
+                }
+            }
+            break;
+
+        default:
+
+            gtSemGive(dev,dev->statsRegsSem);
+            return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->statsRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* statsCapture
+*
+* DESCRIPTION:
+*       This function is used to capture all counters of a port.
+*
+* INPUTS:
+*       port        - port number
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsCapture
+(
+    IN GT_QD_DEV            *dev,
+    IN GT_U8             port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data, histoData;/* Data to be set into the      */
+                                    /* register.                    */
+    GT_U16            portNum;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMON_PORT_BITS))
+    {
+        portNum = (port + 1) << 5;
+    }
+    else
+    {
+        portNum = (GT_U16)port;
+    }
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_OPERATION,&histoData);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    histoData &= 0xC00;
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    data = (1 << 15) | (GT_STATS_CAPTURE_PORT << 12) | portNum | histoData;
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* statsReadCounter
+*
+* DESCRIPTION:
+*       This function is used to read a captured counter.
+*
+* INPUTS:
+*       counter     - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsReadCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+)
+{
+    GT_STATUS   retVal;         /* Functions return value.            */
+    GT_U16      data, histoData;/* Data to be set into the  register. */
+#ifndef GT_RMGMT_ACCESS
+    GT_U16    counter3_2;     /* Counter Register Bytes 3 & 2       */
+    GT_U16    counter1_0;     /* Counter Register Bytes 1 & 0       */
+#endif
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_OPERATION,&histoData);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    histoData &= 0xC00;
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    data = (GT_U16)((1 << 15) | (GT_STATS_READ_COUNTER << 12) | counter | histoData);
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_STATS_COUNTER3_2;
+      regAccess.rw_reg_list[1].data = 0;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_STATS_COUNTER1_0;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+      *statsData = (regAccess.rw_reg_list[1].data << 16) | regAccess.rw_reg_list[2].data;
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER3_2,&counter3_2);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER1_0,&counter1_0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *statsData = (counter3_2 << 16) | counter1_0;
+#endif
+
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* statsReadRealtimeCounter
+*
+* DESCRIPTION:
+*       This function is used to read a realtime counter.
+*
+* INPUTS:
+*       port     - port to be accessed
+*       counter  - counter to be read if it's read operation
+*
+* OUTPUTS:
+*       statsData   - points to the data storage where the MIB counter will be saved.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*        If Semaphore is used, Semaphore should be acquired before this function call.
+*******************************************************************************/
+static GT_STATUS statsReadRealtimeCounter
+(
+    IN   GT_QD_DEV      *dev,
+    IN   GT_U8             port,
+    IN   GT_U32            counter,
+    OUT  GT_U32            *statsData
+)
+{
+    GT_STATUS   retVal;         /* Functions return value.            */
+    GT_U16      data, histoData;/* Data to be set into the  register. */
+    GT_U16    counter3_2;     /* Counter Register Bytes 3 & 2       */
+    GT_U16    counter1_0;     /* Counter Register Bytes 1 & 0       */
+
+    /* Get the Histogram mode bit.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_OPERATION,&histoData);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    histoData &= 0xC00;
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    data = (GT_U16)((1 << 15) | (GT_STATS_READ_COUNTER << 12) | ((port+1) << 5) | counter | histoData);
+    retVal = hwWriteGlobalReg(dev,QD_REG_STATS_OPERATION,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_STATS_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_STATS_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+       }
+#endif
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER3_2,&counter3_2);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwReadGlobalReg(dev,QD_REG_STATS_COUNTER1_0,&counter1_0);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    *statsData = (counter3_2 << 16) | counter1_0;
+
+    return GT_OK;
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStat.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStat.c
new file mode 100644
index 000000000000..22a304cfe9e0
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStat.c
@@ -0,0 +1,391 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortStat.c
+*
+* DESCRIPTION:
+*       API implementation for switch port rx/tx counters.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gprtSetCtrMode
+*
+* DESCRIPTION:
+*       This routine sets the port rx/tx counters mode of operation.
+*
+* INPUTS:
+*       mode  - the counter mode.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetCtrMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_CTR_MODE  mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gprtSetCtrMode Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,8,1,(GT_U16)mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("gprtSetCtrMode Failed .\n"));
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("gprtSetCtrMode OK .\n"));
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtClearAllCtr
+*
+* DESCRIPTION:
+*       This routine clears all port rx/tx counters.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtClearAllCtr
+(
+    IN GT_QD_DEV    *dev
+)
+{
+    IN GT_STATUS     retVal;         /* Functions return value.      */
+    IN GT_U16        mode;           /* hold counters current mode   */
+
+    DBG_INFO(("gprtClearAllCtr Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* get counter current mode  */
+    if(hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,8,1,&mode) != GT_OK)
+    {
+        DBG_INFO(("Failed (Get field).\n"));
+        return GT_FAIL;
+    }
+    /* write opposite value to reset counter */
+    if(hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,8,1,(GT_U16)(1 - mode)) != GT_OK)
+    {
+        DBG_INFO(("Failed (Get field).\n"));
+        return GT_FAIL;
+    }
+    /* restore counters mode */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,8,1,mode);
+
+    DBG_INFO(("OK.\n"));
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetPortCtr
+*
+* DESCRIPTION:
+*       This routine gets the port rx/tx counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortCtr
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_STAT    *ctr
+)
+{
+    GT_U16          count;          /* counters current value       */
+    GT_U8           hwPort;         /* physical port number         */
+
+    DBG_INFO(("gprtGetPortCtr Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(ctr  == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* get rx counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_RX_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read Rx).\n"));
+        return GT_FAIL;
+    }
+    ctr->rxCtr = count;
+    /* get tx counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_TX_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read Tx).\n"));
+        return GT_FAIL;
+    }
+    ctr->txCtr = count;
+
+    if ((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        /* get dropped counter value  */
+        if(hwReadPortReg(dev,hwPort, QD_REG_DROPPED_COUNTER, &count) != GT_OK)
+        {
+            DBG_INFO(("Failed (Read Tx).\n"));
+            return GT_FAIL;
+        }
+        ctr->dropped = count;
+    }
+    else
+        ctr->dropped = 0;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGetPortCtr2
+*
+* DESCRIPTION:
+*       This routine gets the port InDiscards, InFiltered, and OutFiltered counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortCtr2
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_STAT2   *ctr
+)
+{
+#ifndef GT_RMGMT_ACCESS
+    GT_U16          count;          /* counters current value       */
+#endif
+    GT_U8           hwPort;         /* physical port number         */
+
+    DBG_INFO(("gprtGetPortCtr2 Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(ctr  == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 4;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_INDISCARD_LO_COUNTER;
+      regAccess.rw_reg_list[0].data = 0;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_INDISCARD_HI_COUNTER;
+      regAccess.rw_reg_list[1].data = 0;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_INFILTERED_COUNTER;
+      regAccess.rw_reg_list[2].data = 0;
+      regAccess.rw_reg_list[3].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[3].addr = CALC_SMI_DEV_ADDR(dev, hwPort, PORT_ACCESS);
+      regAccess.rw_reg_list[3].reg = QD_REG_OUTFILTERED_COUNTER;
+      regAccess.rw_reg_list[3].data = 0;
+      if(hwAccessMultiRegs(dev, &regAccess) != GT_OK)
+      {
+        return GT_FAIL;
+      }
+        ctr->inDiscardLo = qdLong2Short(regAccess.rw_reg_list[0].data);
+        ctr->inDiscardHi = qdLong2Short(regAccess.rw_reg_list[1].data);
+        ctr->inFiltered = qdLong2Short(regAccess.rw_reg_list[2].data);
+        ctr->outFiltered = qdLong2Short(regAccess.rw_reg_list[3].data);
+    }
+#else
+    /* get InDiscard Low counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_INDISCARD_LO_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read inDiscardLo).\n"));
+        return GT_FAIL;
+    }
+    ctr->inDiscardLo = count;
+    /* get InDiscard High counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_INDISCARD_HI_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read inDiscardHi).\n"));
+        return GT_FAIL;
+    }
+    ctr->inDiscardHi = count;
+
+    /* get InFiltered counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_INFILTERED_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read inFiltered).\n"));
+        return GT_FAIL;
+    }
+    ctr->inFiltered = count;
+
+    /* get OutFiltered counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_OUTFILTERED_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read outFiltered).\n"));
+        return GT_FAIL;
+    }
+    ctr->outFiltered = count;
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+#ifdef DEBUG_FEATURE /* this is a debug feature*/
+/*******************************************************************************
+* gprtGetPortQueueCtr
+*
+* DESCRIPTION:
+*       This routine gets the port queue counters.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       ctr - the counters value.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortQueueCtr
+(
+    IN  GT_QD_DEV       *dev,
+    IN  GT_LPORT        port,
+    OUT GT_PORT_Q_STAT  *ctr
+)
+{
+    GT_U16          count;          /* counters current value       */
+    GT_U8           hwPort;         /* physical port number         */
+
+    DBG_INFO(("gprtGetPortQueueCtr Called.\n"));
+
+    if(ctr  == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* translate logical port to physical port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* get queue counter value  */
+    if(hwReadPortReg(dev,hwPort, QD_REG_Q_COUNTER, &count) != GT_OK)
+    {
+        DBG_INFO(("Failed (Read Rx).\n"));
+        return GT_FAIL;
+    }
+
+    /* the fist 5 bits(4:0) are OutQ_Size */
+    ctr->OutQ_Size = count & 0x1F;
+
+    /* the Rsv_Size are bits 15:8 */
+    ctr->Rsv_Size  = count >> 8;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStatus.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStatus.c
new file mode 100644
index 000000000000..bf2786ce445f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPortStatus.c
@@ -0,0 +1,2235 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPortCtrl.c
+*
+* DESCRIPTION:
+*       API implementation for switch port status.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+#include <gtDrvConfig.h>
+
+typedef struct _GT_Px_MODE
+{
+    GT_BOOL miiEn;
+    GT_BOOL portMode;
+    GT_BOOL phyMode;
+    GT_PORT_SPEED_MODE speed;
+    GT_BOOL duplex;
+} GT_Px_MODE;
+
+/*******************************************************************************
+* procPx_Mode
+*
+* DESCRIPTION:
+*       This routine retrieves Px_MODE and analize it.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - Px_MODE structure
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+*******************************************************************************/
+GT_STATUS procPx_Mode
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_Px_MODE   *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("procPx_Mode Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,7,5,&data);
+    if (retVal != GT_OK)
+        return retVal;
+
+    if(data & 0x1)
+    {
+        /* MII Interface Enabled. */
+        mode->miiEn = GT_TRUE;        /* Mii Interface Enabled */
+
+        switch(data >> 1)
+        {
+            case 0:
+            case 1:
+            case 4:
+            case 5:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_FALSE;    /* not standard Mii, either SNI or 200 Mii */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 2:
+            case 6:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_FALSE;    /* not standard Mii, either SNI or 200 Mii */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 3:
+                mode->speed = PORT_SPEED_200_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_FALSE;    /* not standard Mii, either SNI or 200 Mii */
+                mode->phyMode = GT_FALSE;    /* MAC Mode */
+                break;
+            case 7:
+                mode->speed = PORT_SPEED_200_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_FALSE;    /* not standard Mii, either SNI or 200 Mii */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 8:
+                mode->speed = PORT_SPEED_UNKNOWN;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_FALSE;    /* MAC Mode */
+                break;
+            case 9:
+                mode->speed = PORT_SPEED_UNKNOWN;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* RMii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 10:
+                mode->speed = PORT_SPEED_UNKNOWN;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_FALSE;    /* MAC Mode */
+                break;
+            case 11:
+                mode->speed = PORT_SPEED_UNKNOWN;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* RMii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 12:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 13:
+                mode->speed = PORT_SPEED_100_MBPS;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 14:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 15:
+                mode->speed = PORT_SPEED_100_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* Mii Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            default:
+                return GT_FAIL;
+        }
+    }
+    else
+    {
+        /* MII Interface Disabled. */
+        mode->miiEn = GT_FALSE;
+
+        switch((data >> 1) & 0x3)
+        {
+            case 0:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* MII Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 1:
+                mode->speed = PORT_SPEED_100_MBPS;
+                mode->duplex = GT_FALSE;    /* half duplex */
+                mode->portMode = GT_TRUE;    /* MII Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 2:
+                mode->speed = PORT_SPEED_10_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* MII Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            case 3:
+                mode->speed = PORT_SPEED_100_MBPS;
+                mode->duplex = GT_TRUE;        /* full duplex */
+                mode->portMode = GT_TRUE;    /* MII Mode */
+                mode->phyMode = GT_TRUE;    /* PHY Mode */
+                break;
+            default:
+                return GT_FAIL;
+        }
+    }
+
+    /* return */
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtGetPartnerLinkPause
+*
+* DESCRIPTION:
+*       This routine retrives the link partner pause state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPartnerLinkPause
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPartnerLinkPause Called.\n"));
+
+    /* Gigabit Switch does not support this status. gprtGetPauseEn is supported instead. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,15,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPauseEn
+*
+* DESCRIPTION:
+*        This routine retrives the link pause state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for enable or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        If set MAC Pause (for Full Duplex flow control) is implemented in the
+*        link partner and in MyPause
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPauseEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL     *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPauseEn Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,15,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetSelfLinkPause
+*
+* DESCRIPTION:
+*       This routine retrives the link pause state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for enable  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSelfLinkPause
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetSelfLinkPause Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,14,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetResolve
+*
+* DESCRIPTION:
+*       This routine retrives the resolve state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for Done  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetResolve
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetResolve Called.\n"));
+
+    /* Gigabit Switch does not support this status. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,13,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetHdFlow
+*
+* DESCRIPTION:
+*        This routine retrives the half duplex flow control value.
+*        If set, Half Duplex back pressure will be used on this port if this port
+*        is in a half duplex mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE for enable or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHdFlow
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL     *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetHdFlow Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,13,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPHYDetect
+*
+* DESCRIPTION:
+*        This routine retrives the information regarding PHY detection.
+*        If set, An 802.3 PHY is attached to this port.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if connected or GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPHYDetect
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL     *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPHYDetect Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,12,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtSetPHYDetect
+*
+* DESCRIPTION:
+*        This routine sets PHYDetect bit which make PPU change its polling.
+*        PPU's pool routine uses these bits to determine which port's to poll
+*        PHYs on for Link, Duplex, Speed, and Flow Control.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE or GT_FALSE
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        This function should not be called if gsysGetPPUState returns
+*        PPU_STATE_ACTIVE.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetPHYDetect
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      state
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetPHYDetect Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Set the PHY Detect bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,12,1,(GT_U16)state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetLinkState
+*
+* DESCRIPTION:
+*       This routine retrives the link state.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       state - GT_TRUE for Up  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetLinkState
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U8            bitNumber;
+
+    DBG_INFO(("gprtGetLinkState Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        bitNumber = 11;
+    }
+    else
+    {
+        bitNumber = 12;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,bitNumber,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetPortMode
+*
+* DESCRIPTION:
+*       This routine retrives the port mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for MII 10/100 or RMII 100,
+*               GT_FALSE for SNI 10 or MII 200
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPortMode
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_Px_MODE        pxMode;
+
+    DBG_INFO(("gprtGetPortMode Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        retVal = procPx_Mode(dev,port,&pxMode);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("procPx_Mode return Fail\n"));
+            return retVal;
+        }
+        *mode = pxMode.portMode;
+        return GT_OK;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,11,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetPhyMode
+*
+* DESCRIPTION:
+*       This routine retrives the PHY mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for MII PHY Mode,
+*               GT_FALSE for MII MAC Mode
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPhyMode
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_Px_MODE        pxMode;
+
+    DBG_INFO(("gprtGetPhyMode Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        retVal = procPx_Mode(dev,port,&pxMode);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("procPx_Mode return Fail\n"));
+            return retVal;
+        }
+        *mode = pxMode.phyMode;
+        return GT_OK;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,10,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetDuplex
+*
+* DESCRIPTION:
+*       This routine retrives the port duplex mode.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for Full  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetDuplex
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U8            bitNumber;
+    GT_Px_MODE        pxMode;
+
+    DBG_INFO(("gprtGetDuplex Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        retVal = procPx_Mode(dev,port,&pxMode);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("procPx_Mode return Fail\n"));
+            return retVal;
+        }
+        *mode = pxMode.duplex;
+        return GT_OK;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        bitNumber = 10;
+    }
+    else
+    {
+        bitNumber = 9;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,bitNumber,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetSpeed
+*
+* DESCRIPTION:
+*       This routine retrives the port speed.
+*
+* INPUTS:
+*       speed - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for 100Mb/s  or GT_FALSE otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSpeed
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *speed
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_Px_MODE        pxMode;
+
+    DBG_INFO(("gprtGetSpeed Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        retVal = procPx_Mode(dev,port,&pxMode);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("procPx_Mode return Fail\n"));
+            return retVal;
+        }
+        *speed = (pxMode.speed==PORT_SPEED_100_MBPS)?GT_TRUE:GT_FALSE;
+        return GT_OK;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+    /* Get the force flow control bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,8,1,&data);
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *speed);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetSpeedMode
+*
+* DESCRIPTION:
+*       This routine retrives the port speed.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_PORT_SPEED_MODE type.
+*                (PORT_SPEED_1000_MBPS,PORT_SPEED_100_MBPS, PORT_SPEED_10_MBPS,
+*                etc.)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSpeedMode
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_PORT_SPEED_MODE   *speed
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_Px_MODE        pxMode;
+
+    DBG_INFO(("gprtGetSpeed Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        retVal = procPx_Mode(dev,port,&pxMode);
+        if (retVal != GT_OK)
+        {
+            DBG_INFO(("procPx_Mode return Fail\n"));
+            return retVal;
+        }
+        *speed = pxMode.speed;
+        return GT_OK;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        /* Get the force flow control bit.  */
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,8,2,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,8,1,&data);
+    }
+
+    *speed = (GT_PORT_SPEED_MODE)data;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtSetDuplex
+*
+* DESCRIPTION:
+*       This routine sets the duplex mode of MII/SNI/RMII ports.
+*
+* INPUTS:
+*       port -     the logical port number.
+*                (for FullSail, it will be port 2, and for ClipperShip,
+*                it could be either port 5 or port 6.)
+*       mode -  GT_TRUE for Full Duplex,
+*                GT_FALSE for Half Duplex.
+*
+* OUTPUTS: None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetDuplex
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    IN  GT_BOOL   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetDuplex Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_MII_DUPLEX_CONFIG)) != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* check if phy is not configurable. */
+    if(IS_CONFIGURABLE_PHY(dev, hwPort))
+    {
+        /*
+         * phy is configurable. this function is not for the port where phy
+         * can be configured.
+         */
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set the duplex mode. */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,9,1,(GT_U16)mode);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetHighErrorRate
+*
+* DESCRIPTION:
+*        This routine retrives the PCS High Error Rate.
+*        This routine returns GT_TRUE if the rate of invalid code groups seen by
+*        PCS has exceeded 10 to the power of -11.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE or GT_FALSE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHighErrorRate
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetHighErrorRate Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_MGMII_STATUS) || IS_IN_DEV_GROUP(dev,DEV_200BASE_CFG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the high error rate bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,6,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetMGMII
+*
+* DESCRIPTION:
+*        SERDES Interface mode. When this bit is cleared to a zero and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be SGMII.  When this bit is set to a one and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be MGMII. When no PHY is detected on this port and the
+*        SERDES interface is being used, it will be configured in 1000Base-X mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE or GT_FALSE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMGMII
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetMGMII Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_MGMII_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the high error rate bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,6,1,&data);
+
+    /* translate binary to BOOL  */
+    if (IS_IN_DEV_GROUP(dev,DEV_MGMII_REVERSE_STATUS))
+    {
+        BIT_2_BOOL_R(data, *state);
+    }
+    else
+    {
+        BIT_2_BOOL(data, *state);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtSetMGMII
+*
+* DESCRIPTION:
+*        SERDES Interface mode. When this bit is cleared to a zero and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be SGMII.  When this bit is set toa one and a PHY is
+*        detected connected to this port, the SERDES interface between this port
+*        and the PHY will be MGMII. When no PHY is detected on this port and the
+*        SERDES interface is being used, it will be configured in 1000Base-X mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*        state - GT_TRUE or GT_FALSE
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetMGMII
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_BOOL      state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtSetMGMII Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_MGMII_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_MGMII_REVERSE_STATUS))
+    {
+        BOOL_2_BIT_R(state,data);
+    }
+    else
+    {
+        BOOL_2_BIT(state,data);
+    }
+
+    /* Get the high error rate bit.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,6,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gprtGetTxPaused
+*
+* DESCRIPTION:
+*        This routine retrives Transmit Pause state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Rx MAC receives a PAUSE frame with none-zero Puase Time
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetTxPaused
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetTxPaused Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the TxPaused bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,5,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetFlowCtrl
+*
+* DESCRIPTION:
+*        This routine retrives Flow control state.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Rx MAC determines that no more data should be
+*                    entering this port.
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFlowCtrl
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetFlowCtrl Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the FlowCtrl bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,4,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetFdFlowDis
+*
+* DESCRIPTION:
+*        This routine retrives the read time value of the Full Duplex Flow Disable.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Full Duplex Flow Disable.
+*                   GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFdFlowDis
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetFdFlowDis Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_DIS_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the FdFlowDis bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,3,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetHdFlowDis
+*
+* DESCRIPTION:
+*        This routine retrives the read time value of the Half Duplex Flow Disable.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Half Duplex Flow Disable.
+*                   GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetHdFlowDis
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetHdFlowDis Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FC_DIS_STATUS))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the HdFlowDis bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,2,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetPxMode
+*
+* DESCRIPTION:
+*        This routine retrives 4 bits of Px_MODE Configuration value.
+*        If speed and duplex modes are forced, the returned mode value would be
+*        different from the configuration pin values.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - Px_MODE configuration value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetPxMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_U32      *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetPxMode Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the Px_Mode bits.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,8,4,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *mode = (GT_U32) data;
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetMiiInterface
+*
+* DESCRIPTION:
+*        This routine retrives Mii Interface Mode.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if Mii Interface is enabled,
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetMiiInterface
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetMiiInterface Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_Px_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the Mii bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,7,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetOutQSize
+*
+* DESCRIPTION:
+*        This routine gets egress queue size counter value.
+*        This counter reflects the current number of Egress buffers switched to
+*        this port. This is the total number of buffers across all four priority
+*        queues.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        count - egress queue size counter value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetOutQSize
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *count
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetOutQSize Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_OUT_Q_SIZE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get OutQ_Size.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_Q_COUNTER_TABLE))
+    {
+        if((retVal = hwWritePortReg(dev,hwPort, QD_REG_Q_COUNTER, 0x8000)) != GT_OK)
+            return retVal;
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,count);
+    }
+    else if (IS_IN_DEV_GROUP(dev,DEV_OUT_Q_512))
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,7,9,count);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,8,8,count);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetBufHigh
+*
+* DESCRIPTION:
+*        Output from QC telling the MAC that it should perform Flow Control.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        bufHigh - GT_TRUE, if Flow control required
+*                  GT_FALSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetBufHigh
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *bufHigh
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gprtGetBufHigh Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FULL_Q_COUNTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get BufHigh.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_Q_COUNTER_TABLE))
+    {
+        if((retVal = hwWritePortReg(dev,hwPort, QD_REG_Q_COUNTER, 0x9000)) != GT_OK)
+            return retVal;
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,1,1,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,6,1,&data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *bufHigh);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetFcEn
+*
+* DESCRIPTION:
+*        Input into the QC telling it that Flow Control is enabled on this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        fcEn - GT_TRUE, if Flow control is enabled
+*               GT_FALSE, otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetFcEn
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *fcEn
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gprtGetFcEn Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FULL_Q_COUNTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get FcEn.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_Q_COUNTER_TABLE))
+    {
+        if((retVal = hwWritePortReg(dev,hwPort, QD_REG_Q_COUNTER, 0xa000)) != GT_OK)
+            return retVal;
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,1,&data);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,5,1,&data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *fcEn);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetRsvSize
+*
+* DESCRIPTION:
+*        This routine gets Ingress reserved queue size counter.
+*        This counter reflects the current number of reserved ingress buffers
+*        assigned to this port.
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        count - reserved ingress queue size counter value
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetRsvSize
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *count
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetRsvSize Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_OUT_Q_SIZE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get Rsv_Size.            */
+    if (IS_IN_DEV_GROUP(dev,DEV_Q_COUNTER_TABLE))
+    {
+        if((retVal = hwWritePortReg(dev,hwPort, QD_REG_Q_COUNTER, 0x9000)) != GT_OK)
+            return retVal;
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,count);
+    }
+    else
+    {
+        retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,5,count);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetQSizePerQPri
+*
+* DESCRIPTION:
+*        This routine gets egress queue size for port's each QPri (0 ~ 3).
+*
+* INPUTS:
+*        port - the logical port number
+*
+* OUTPUTS:
+*        counts - egress queue size per QPri (should be 4 * 16bytes)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gprtGetQSizePerQPri
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_U16        *counts
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetQSizePerQPri Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_Q_COUNTER_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if((retVal = hwWritePortReg(dev,hwPort, QD_REG_Q_COUNTER, 0x800)) != GT_OK)
+        return retVal;
+
+    if((retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,&counts[0])) != GT_OK)
+        return retVal;
+
+    if((retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,&counts[1])) != GT_OK)
+        return retVal;
+
+    if((retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,&counts[2])) != GT_OK)
+        return retVal;
+
+    if((retVal = hwGetPortRegField(dev,hwPort, QD_REG_Q_COUNTER,0,9,&counts[3])) != GT_OK)
+        return retVal;
+
+    DBG_INFO(("OK.\n"));
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gprtGetC_Duplex
+*
+* DESCRIPTION:
+*        This routine retrives Port 9's duplex configuration mode determined
+*        at reset.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - GT_TRUE if configured as Full duplex operation
+*                  GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Return value is valid only if the given port is 9.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetC_Duplex
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_BOOL      *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetC_Duplex Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the C_Duplex bit.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,3,1,&data);
+
+    /* translate binary to BOOL  */
+    BIT_2_BOOL(data, *state);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
+
+/*******************************************************************************
+* gprtGetC_Mode
+*
+* DESCRIPTION:
+*        This routine retrives port's interface type configuration mode
+*        determined at reset.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        state - one of value in GT_PORT_CONFIG_MODE enum type
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Return value is valid only if the given port is 9.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetC_Mode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    OUT GT_PORT_CONFIG_MODE   *state
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gprtGetC_Mode Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Get the C_Mode bits.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_STATUS,0,3,&data);
+
+    /* translate binary to BOOL  */
+    *state = (GT_PORT_CONFIG_MODE)data;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    /* return */
+    return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPriTable.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPriTable.c
new file mode 100644
index 000000000000..8b1a8a197492
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtPriTable.c
@@ -0,0 +1,1110 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtPriTable.c
+*
+* DESCRIPTION:
+*       API definitions for Priority Override Table
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gsysSetQPriOverrideTable
+*
+* DESCRIPTION:
+*       Queue Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_QPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data,qPri;
+
+    DBG_INFO(("gsysSetQPriOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Priority Override Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PRIORITY_OVERRIDE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    if (IS_IN_DEV_GROUP(dev,DEV_QAVB_PRIORITY_OVERRIDE_TABLE))
+    {
+        data = fType << 8;
+
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, &data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+        data &= ((1 << 7) | (3 << 4));    /* keep QPriAvb information */
+    }
+    else
+        data = 0;
+
+    if (entry->qPriEn)
+        qPri = (GT_U16)((1 << 3) | (entry->qPriority & 0x3));
+    else
+        qPri = 0;
+
+    data |= (GT_U16)((1 << 15) | (fType << 8) | qPri);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetQPriOverrideTable
+*
+* DESCRIPTION:
+*       Queue Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_QPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetQPriOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Priority Override Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PRIORITY_OVERRIDE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    data = fType << 8;
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    if (data & (1 << 3))
+    {
+        entry->qPriEn = GT_TRUE;
+        entry->qPriority = data & 0x3;
+    }
+    else
+    {
+        entry->qPriEn = GT_FALSE;
+        entry->qPriority = data & 0x3; /* no meaning, but just in case */
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetFPriOverrideTable
+*
+* DESCRIPTION:
+*         Frame Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Frame Priority Table. If the type's fPriEn (in GT_FPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Frame Priority will be overridden
+*        with the value written in fPriority (in GT_FPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Frame Priority Override Table entry (GT_FPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetFPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_FPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data,fPri;
+
+    DBG_INFO(("gsysSetFPriOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Priority Override Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PRIORITY_OVERRIDE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    if (entry->fPriEn)
+        fPri = (GT_U16)((1 << 3) | (entry->fPriority & 0x7));
+    else
+        fPri = 0;
+
+    data = (GT_U16)((1 << 15) | (fType << 8) | fPri | 0x1000);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetQPriOverrideTable
+*
+* DESCRIPTION:
+*         Frame Priority Override.
+*        When a frame enters a port, its type is determined and the type is used
+*        to access the Frame Priority Table. If the type's fPriEn (in GT_FPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Frame Priority will be overridden
+*        with the value written in fPriority (in GT_FPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Frame Priority Override Table entry (GT_FPRI_TBL_ENTRY)
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_BAD_PARAM     - on unknown frame type
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetFPriOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_FPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetFPriOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Priority Override Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PRIORITY_OVERRIDE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    data = (fType << 8) | 0x1000;
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    if (data & (1 << 3))
+    {
+        entry->fPriEn = GT_TRUE;
+        entry->fPriority = data & 0x7;
+    }
+    else
+    {
+        entry->fPriEn = GT_FALSE;
+        entry->fPriority = data & 0x7; /* no meaning, but just in case */
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetQPriAvbOverrideTable
+*
+* DESCRIPTION:
+*         Queue Priority Override for AVB enabled ports or AvbOverride enabled ports.
+*        When a frame enters a AVB port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQPriAvbOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    IN  GT_QPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data,qPri;
+
+    DBG_INFO(("gsysSetQPriAvbOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAVB_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    data = fType << 8;
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    data &= ((1 << 3) | (3 << 0));    /* keep QPri information */
+
+    if (entry->qPriEn)
+        qPri = (GT_U16)((1 << 7) | ((entry->qPriority & 0x3) << 4));
+    else
+        qPri = 0;
+
+    data |= (GT_U16)((1 << 15) | (fType << 8) | qPri);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetQPriOverrideTable
+*
+* DESCRIPTION:
+*         Queue Priority Override for AVB enabled ports or AvbOverride enabled ports.
+*        When a frame enters a AVB port, its type is determined and the type is used
+*        to access the Queue Priority Table. If the type's qPriEn (in GT_QPRI_TBL_ENTRY
+*        structure) is enabled, then the frame's Queue Priority will be overridden
+*        with the value written in qPriority (in GT_QPRI_TBL_ENTRY structure).
+*        Frame Types supported are:
+*            FTYPE_DSA_TO_CPU_BPDU -
+*                Used on multicast DSA To_CPU frames with a Code of 0x0 (BPDU/MGMT).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_F2R -
+*                Used on DSA To_CPU frames with a Code of 0x1 (Frame to Register
+*                Reply). Not used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_IGMP -
+*                Used on DSA To_CPU frames with a Code of 0x2 (IGMP/MLD Trap)
+*                and on non-DSA Control frames that are IGMP or MLD trapped
+*            FTYPE_DSA_TO_CPU_TRAP -
+*                Used on DSA To_CPU frames with a Code of 0x3 (Policy Trap) and
+*                on non-DSA Control frames that are Policy Trapped
+*            FTYPE_DSA_TO_CPU_ARP -
+*                Used on DSA To_CPU frames with a Code of 0x4 (ARP Mirror) and
+*                on non-DSA Control frames that are ARP Mirrored (see gprtSetARPtoCPU API).
+*            FTYPE_DSA_TO_CPU_MIRROR -
+*                Used on DSA To_CPU frames with a Code of 0x5 (Policy Mirror) and
+*                on non-DSA Control frames that are Policy Mirrored (see gprtSetPolicy API).
+*            FTYPE_DSA_TO_CPU_RESERVED -
+*                Used on DSA To_CPU frames with a Code of 0x6 (Reserved). Not
+*                used on non-DSA Control frames.
+*            FTYPE_DSA_TO_CPU_UCAST_MGMT -
+*                Used on unicast DSA To_CPU frames with a Code of 0x0 (unicast
+*                MGMT). Not used on non-DSA Control frames.
+*            FTYPE_DSA_FROM_CPU -
+*                Used on DSA From_CPU frames. Not used on non-DSA Control frame
+*            FTYPE_DSA_CROSS_CHIP_FC -
+*                Used on DSA Cross Chip Flow Control frames (To_Sniffer Flow
+*                Control). Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_EGRESS_MON -
+*                Used on DSA Cross Chip Egress Monitor frames (To_Sniffer Tx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_DSA_CROSS_CHIP_INGRESS_MON -
+*                Used on DSA Cross Chip Ingress Monitor frames (To_Sniffer Rx).
+*                Not used on non-DSA Control frames.
+*            FTYPE_PORT_ETYPE_MATCH -
+*                Used on normal network ports (see gprtSetFrameMode API)
+*                on frames whose Ethertype matches the port's PortEType register.
+*                Not used on non-DSA Control frames.
+*            FTYPE_BCAST_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain a Broadcast
+*                destination address. Not used on DSA Control frames.
+*            FTYPE_PPPoE_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an Ether Type 0x8863
+*                (i.e., PPPoE frames). Not used on DSA Control frames.
+*            FTYPE_IP_NON_DSA_CONTROL -
+*                Used on Non-DSA Control frames that contain an IPv4 or IPv6 Ether
+*                Type. Not used on DSA Control frames.
+*
+* INPUTS:
+*       fType - frame type (GT_PRI_OVERRIDE_FTYPE)
+*
+* OUTPUTS:
+*       entry - Q Priority Override Table entry (GT_QPRI_TBL_ENTRY)
+*
+* RETURNS:
+*        GT_OK      - on success
+*        GT_FAIL    - on error
+*        GT_BAD_PARAM     - on unknown frame type
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQPriAvbOverrideTable
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_PRI_OVERRIDE_FTYPE    fType,
+    OUT GT_QPRI_TBL_ENTRY    *entry
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetQPriAvbOverrideTable Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QAVB_PRIORITY_OVERRIDE_TABLE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    switch (fType)
+    {
+        case FTYPE_DSA_TO_CPU_BPDU:
+        case FTYPE_DSA_TO_CPU_F2R:
+        case FTYPE_DSA_TO_CPU_IGMP:
+        case FTYPE_DSA_TO_CPU_TRAP:
+        case FTYPE_DSA_TO_CPU_ARP:
+        case FTYPE_DSA_TO_CPU_MIRROR:
+        case FTYPE_DSA_TO_CPU_RESERVED:
+        case FTYPE_DSA_TO_CPU_UCAST_MGMT:
+        case FTYPE_DSA_FROM_CPU:
+        case FTYPE_DSA_CROSS_CHIP_FC:
+        case FTYPE_DSA_CROSS_CHIP_EGRESS_MON:
+        case FTYPE_DSA_CROSS_CHIP_INGRESS_MON:
+        case FTYPE_PORT_ETYPE_MATCH:
+        case FTYPE_BCAST_NON_DSA_CONTROL:
+        case FTYPE_PPPoE_NON_DSA_CONTROL:
+        case FTYPE_IP_NON_DSA_CONTROL:
+            break;
+        default:
+            DBG_INFO(("GT_BAD_PARAM\n"));
+            return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Wait until the Priority Override Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_PRIORITY_OVERRIDE;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+
+    data = fType << 8;
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev, QD_REG_PRIORITY_OVERRIDE, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+    }
+
+    if (data & (1 << 7))
+    {
+        entry->qPriEn = GT_TRUE;
+        entry->qPriority = (data >> 4) & 0x3;
+    }
+    else
+    {
+        entry->qPriEn = GT_FALSE;
+        entry->qPriority = (data >> 4) & 0x3; /* no meaning, but just in case */
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtQosMap.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtQosMap.c
new file mode 100644
index 000000000000..8ea9a1da6c94
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtQosMap.c
@@ -0,0 +1,2423 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtQosMap.c
+*
+* DESCRIPTION:
+*       API implementation for qos mapping.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+/*******************************************************************************
+* gcosSetPortDefaultTc
+*
+* DESCRIPTION:
+*       Sets the default traffic class for a specific port.
+*
+* INPUTS:
+*       port      - logical port number
+*       trafClass - default traffic class of a port.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Fast Ethernet switch family supports 2 bits (0 ~ 3) while Gigabit Switch
+*        family supports 3 bits (0 ~ 7)
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosSetPortDefaultTc
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    IN GT_U8      trafClass
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gcosSetPortDefaultTc Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        /* Set the default port pri.  */
+        retVal = hwSetPortRegField(dev,hwPort,QD_REG_PVID,13,3,trafClass);
+    }
+    else
+    {
+        /* Set the default port pri.  */
+        retVal = hwSetPortRegField(dev,hwPort,QD_REG_PVID,14,2,trafClass);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gcosGetPortDefaultTc
+*
+* DESCRIPTION:
+*       Gets the default traffic class for a specific port.
+*
+* INPUTS:
+*       port      - logical port number
+*
+* OUTPUTS:
+*       trafClass - default traffic class of a port.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Fast Ethernet switch family supports 2 bits (0 ~ 3) while Gigabit Switch
+*        family supports 3 bits (0 ~ 7)
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosGetPortDefaultTc
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    OUT GT_U8     *trafClass
+)
+{
+    GT_U16            data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gcosSetPortDefaultTc Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_FE_AVB_FAMILY)))
+    {
+        /* Get the default port pri.  */
+        retVal = hwGetPortRegField(dev,hwPort,QD_REG_PVID,13,3,&data);
+    }
+    else
+    {
+        /* Get the default port pri.  */
+        retVal = hwGetPortRegField(dev,hwPort,QD_REG_PVID,14,2,&data);
+    }
+
+    *trafClass = (GT_U8)data;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gqosSetPrioMapRule
+*
+* DESCRIPTION:
+*       This routine sets priority mapping rule.
+*        If the current frame is both IEEE 802.3ac tagged and an IPv4 or IPv6,
+*        and UserPrioMap (for IEEE 802.3ac) and IPPrioMap (for IP frame) are
+*        enabled, then priority selection is made based on this setup.
+*        If PrioMapRule is set to GT_TRUE, UserPrioMap is used.
+*        If PrioMapRule is reset to GT_FALSE, IPPrioMap is used.
+*
+* INPUTS:
+*       port - the logical port number.
+*       mode - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetPrioMapRule
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    mode
+)
+{
+    GT_U16          data;           /* temporary data buffer */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetPrioMapRule Called.\n"));
+    /* translate bool to binary */
+    BOOL_2_BIT(mode, data);
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Set the TagIfBoth.  */
+    retVal = hwSetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL,6,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gqosGetPrioMapRule
+*
+* DESCRIPTION:
+*       This routine gets priority mapping rule.
+*        If the current frame is both IEEE 802.3ac tagged and an IPv4 or IPv6,
+*        and UserPrioMap (for IEEE 802.3ac) and IPPrioMap (for IP frame) are
+*        enabled, then priority selection is made based on this setup.
+*        If PrioMapRule is set to GT_TRUE, UserPrioMap is used.
+*        If PrioMapRule is reset to GT_FALSE, IPPrioMap is used.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       mode - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetPrioMapRule
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetPrioMapRule Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* get the TagIfBoth.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,6,1,&data);
+    /* translate bool to binary */
+    BIT_2_BOOL(data, *mode);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gqosIpPrioMapEn
+*
+* DESCRIPTION:
+*       This routine enables the IP priority mapping.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosIpPrioMapEn
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    en
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosIpPrioMapEn Called.\n"));
+    /* translate bool to binary */
+    BOOL_2_BIT(en, data);
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Set the useIp.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,5,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+
+/*******************************************************************************
+* gqosGetIpPrioMapEn
+*
+* DESCRIPTION:
+*       This routine return the IP priority mapping state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       en    - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetIpPrioMapEn
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *en
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetIpPrioMapEn Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Get the UseIp.  */
+    retVal = hwGetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL,5,1,&data);
+    /* translate bool to binary */
+    BIT_2_BOOL(data, *en);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+
+/*******************************************************************************
+* gqosUserPrioMapEn
+*
+* DESCRIPTION:
+*       This routine enables the user priority mapping.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE to Enable, GT_FALSE for otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosUserPrioMapEn
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_LPORT   port,
+    IN GT_BOOL    en
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosUserPrioMapEn Called.\n"));
+    /* translate bool to binary */
+    BOOL_2_BIT(en, data);
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Set the useTag.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL,4,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+
+/*******************************************************************************
+* gqosGetUserPrioMapEn
+*
+* DESCRIPTION:
+*       This routine return the user priority mapping state.
+*
+* INPUTS:
+*       port  - the logical port number.
+*
+* OUTPUTS:
+*       en    - GT_TRUE for user prio rule, GT_FALSE for otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetUserPrioMapEn
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  port,
+    OUT GT_BOOL   *en
+)
+{
+    GT_U16          data;           /* Used to poll the SWReset bit */
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetUserPrioMapEn Called.\n"));
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if((retVal = IS_VALID_API_CALL(dev,hwPort, DEV_QoS)) != GT_OK )
+      return retVal;
+
+    /* Get the UseTag.  */
+    retVal = hwGetPortRegField(dev,hwPort,QD_REG_PORT_CONTROL,4,1,&data);
+    /* translate bool to binary */
+    BIT_2_BOOL(data, *en);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gcosGetUserPrio2Tc
+*
+* DESCRIPTION:
+*       Gets the traffic class number for a specific 802.1p user priority.
+*
+* INPUTS:
+*       userPrior - user priority
+*
+* OUTPUTS:
+*       trClass - The Traffic Class the received frame is assigned.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosGetUserPrio2Tc
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     userPrior,
+    OUT GT_U8     *trClass
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+    GT_U16          data;           /* store the read data          */
+
+    DBG_INFO(("gcosGetUserPrio2Tc Called.\n"));
+
+    /* check if device supports this feature */
+    if(!IS_IN_DEV_GROUP(dev,DEV_QoS))
+        return GT_NOT_SUPPORTED;
+
+    /* calc the bit offset */
+    bitOffset = ((userPrior & 0x7) * 2);
+    /* Get the traffic class for the VPT.  */
+    retVal = hwGetGlobalRegField(dev,QD_REG_IEEE_PRI,bitOffset,2,&data);
+    *trClass = (GT_U8)data;
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gcosSetUserPrio2Tc
+*
+* DESCRIPTION:
+*       Sets the traffic class number for a specific 802.1p user priority.
+*
+* INPUTS:
+*       userPrior - user priority of a port.
+*       trClass   - the Traffic Class the received frame is assigned.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosSetUserPrio2Tc
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_U8      userPrior,
+    IN GT_U8      trClass
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+
+    DBG_INFO(("gcosSetUserPrio2Tc Called.\n"));
+    /* check if device supports this feature */
+    if(!IS_IN_DEV_GROUP(dev,DEV_QoS))
+        return GT_NOT_SUPPORTED;
+
+    /* calc the bit offset */
+    bitOffset = ((userPrior & 0x7) * 2);
+    /* Set the traffic class for the VPT.  */
+    retVal = hwSetGlobalRegField(dev,QD_REG_IEEE_PRI, bitOffset,2,trClass);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gcosGetDscp2Tc
+*
+* DESCRIPTION:
+*       This routine retrieves the traffic class assigned for a specific
+*       IPv4 Dscp.
+*
+* INPUTS:
+*       dscp    - the IPv4 frame dscp to query.
+*
+* OUTPUTS:
+*       trClass - The Traffic Class the received frame is assigned.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosGetDscp2Tc
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     dscp,
+    OUT GT_U8     *trClass
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+    GT_U8           regOffset;      /* the reg offset in the IP tbl */
+    GT_U16          data;           /* store the read data          */
+
+    DBG_INFO(("gcosGetDscp2Tc Called.\n"));
+    /* check if device supports this feature */
+    if(!IS_IN_DEV_GROUP(dev,DEV_QoS))
+        return GT_NOT_SUPPORTED;
+
+    /* calc the bit offset */
+    bitOffset = (((dscp & 0x3f) % 8) * 2);
+    regOffset = ((dscp & 0x3f) / 8);
+    /* Get the traffic class for the IP dscp.  */
+    retVal = hwGetGlobalRegField(dev,(GT_U8)(QD_REG_IP_PRI_BASE+regOffset),
+                                 bitOffset, 2, &data);
+    *trClass = (GT_U8)data;
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gcosSetDscp2Tc
+*
+* DESCRIPTION:
+*       This routine sets the traffic class assigned for a specific
+*       IPv4 Dscp.
+*
+* INPUTS:
+*       dscp    - the IPv4 frame dscp to map.
+*       trClass - the Traffic Class the received frame is assigned.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gcosSetDscp2Tc
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_U8      dscp,
+    IN GT_U8      trClass
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+    GT_U8           regOffset;      /* the reg offset in the IP tbl */
+
+    DBG_INFO(("gcosSetDscp2Tc Called.\n"));
+    /* check if device supports this feature */
+    if(!IS_IN_DEV_GROUP(dev,DEV_QoS))
+        return GT_NOT_SUPPORTED;
+
+    /* calc the bit offset */
+    bitOffset = (((dscp & 0x3f) % 8) * 2);
+    regOffset = ((dscp & 0x3f) / 8);
+    /* Set the traffic class for the IP dscp.  */
+    retVal = hwSetGlobalRegField(dev,(GT_U8)(QD_REG_IP_PRI_BASE+regOffset),
+                                 bitOffset, 2, trClass);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+
+/*******************************************************************************
+* gqosGetTagRemap
+*
+* DESCRIPTION:
+*        Gets the remapped priority value for a specific 802.1p priority on a
+*        given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*        pri   - 802.1p priority
+*
+* OUTPUTS:
+*        remappedPri - remapped Priority
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetTagRemap
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U8        pri,
+    OUT GT_U8       *remappedPri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* store the read data          */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U8           regAddr;        /* register address.            */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+
+    DBG_INFO(("gqosGetTagRemap Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_REMAPPING))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if (pri <= 3)
+    {
+        regAddr = QD_REG_IEEE_PRI_REMAP_3_0;
+    }
+    else
+    {
+        regAddr = QD_REG_IEEE_PRI_REMAP_7_4;
+    }
+
+    /* calc the bit offset */
+    bitOffset = 4 * (pri % 4);
+
+    retVal = hwGetPortRegField(dev,phyPort,regAddr,bitOffset,3,&data );
+
+    *remappedPri = (GT_U8)data;
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetTagRemap
+*
+* DESCRIPTION:
+*        Sets the remapped priority value for a specific 802.1p priority on a
+*        given port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*        pri   - 802.1p priority
+*        remappedPri - remapped Priority
+*
+* OUTPUTS:
+*        None
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetTagRemap
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port,
+    IN GT_U8        pri,
+    IN GT_U8        remappedPri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           phyPort;        /* Physical port.               */
+    GT_U8           regAddr;        /* register address.            */
+    GT_U8           bitOffset;      /* the bit offset in the reg    */
+
+    DBG_INFO(("gqosSetTagRemap Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_PRIORITY_REMAPPING))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    phyPort = GT_LPORT_2_PORT(port);
+
+    if (pri <= 3)
+    {
+        regAddr = QD_REG_IEEE_PRI_REMAP_3_0;
+    }
+    else
+    {
+        regAddr = QD_REG_IEEE_PRI_REMAP_7_4;
+    }
+
+    /* calc the bit offset */
+    bitOffset = 4 * (pri % 4);
+
+    retVal = hwSetPortRegField(dev,phyPort,regAddr,bitOffset,3,remappedPri);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetQPriValue
+*
+* DESCRIPTION:
+*       This routine sets Queue priority value to used when forced.
+*        When ForceQPri is enabled (gqosSetForceQPri), all frames entering this port
+*        are mapped to the priority queue defined in this value, unless a VTU, SA,
+*        DA or ARP priority override occurs. The Frame's priority (FPri) is not
+*        effected by this value.
+*
+* INPUTS:
+*       port - the logical port number.
+*       pri  - Queue priority value
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 3
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetQPriValue
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_U8      pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetQPriValue Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 3)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    /* Set the QPriValue.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 10, 2, (GT_U16)pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetQPriValue
+*
+* DESCRIPTION:
+*       This routine gets Queue priority value to used when forced.
+*        When ForceQPri is enabled (gqosSetForceQPri), all frames entering this port
+*        are mapped to the priority queue defined in this value, unless a VTU, SA,
+*        DA or ARP priority override occurs. The Frame's priority (FPri) is not
+*        effected by this value.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       pri  - Queue priority value
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetQPriValue
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_U8      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gqosGetQPriValue Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the QPriValue.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 10, 2, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *pri = (GT_U8)data;
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetForceQPri
+*
+* DESCRIPTION:
+*       This routine enables/disables forcing Queue priority.
+*        When ForceQPri is disabled, normal priority queue mapping is used on all
+*        ingressing frames entering this port. When it's enabled, all frames
+*        entering this port are mapped to the QPriValue (gqosSetQPriValue), unless
+*        a VTU, SA, DA or ARP priority override occurs. The frame's priorty (FPri)
+*        is not effected by this feature.
+*
+* INPUTS:
+*       port - the logical port number.
+*       en   - GT_TRUE, to force Queue Priority,
+*               GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetForceQPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_BOOL    en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetQPriValue Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(en, data);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set the ForceQPri.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 9, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetForceQPri
+*
+* DESCRIPTION:
+*       This routine checks if forcing Queue priority is enabled.
+*        When ForceQPri is disabled, normal priority queue mapping is used on all
+*        ingressing frames entering this port. When it's enabled, all frames
+*        entering this port are mapped to the QPriValue (gqosSetQPriValue), unless
+*        a VTU, SA, DA or ARP priority override occurs. The frame's priorty (FPri)
+*        is not effected by this feature.
+*
+* INPUTS:
+*       port - the logical port number.
+*
+* OUTPUTS:
+*       en   - GT_TRUE, to force Queue Priority,
+*               GT_FALSE, otherwise.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetForceQPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_BOOL    *en
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetQPriValue Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the ForceQPri.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_VLAN_MAP, 9, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetDefFPri
+*
+* DESCRIPTION:
+*       This routine sets the default frame priority (0 ~ 7).
+*        This priority is used as the default frame priority (FPri) to use when
+*        no other priority information is available.
+*
+* INPUTS:
+*       port - the logical port number
+*       pri  - default frame priority
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 7
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDefFPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    IN  GT_U8      pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetDefFPri Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 7)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    /* Set the DefFPri.  */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PVID, 13, 3, (GT_U16)pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosGetDefFPri
+*
+* DESCRIPTION:
+*       This routine gets the default frame priority (0 ~ 7).
+*        This priority is used as the default frame priority (FPri) to use when
+*        no other priority information is available.
+*
+* INPUTS:
+*       port - the logical port number
+*
+* OUTPUTS:
+*       pri  - default frame priority
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDefFPri
+(
+    IN  GT_QD_DEV  *dev,
+    IN  GT_LPORT   port,
+    OUT GT_U8      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16            data;
+
+    DBG_INFO(("gqosGetDefFPri Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_FPRI_QPRI))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DefFPri.  */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PVID, 13, 3, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    *pri = (GT_U8)data;
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetVIDFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets VID Frame Priority Override. When this feature is enabled,
+*        VID Frame priority overrides can occur on this port.
+*        VID Frame priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDFPri override field is set to GT_TRUE.
+*        When this occurs the VIDFPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new VIDFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for VID Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetVIDFPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetVIDFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the VIDFPriOverride mode.            */
+       retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,14,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosGetVIDFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets VID Frame Priority Override. When this feature is enabled,
+*        VID Frame priority overrides can occur on this port.
+*        VID Frame priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDFPri override field is set to GT_TRUE.
+*        When this occurs the VIDFPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new VIDFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for VID Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetVIDFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetVIDFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the VIDFPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,14,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetSAFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Source Address(SA) Frame Priority Override.
+*        When this feature is enabled, SA Frame priority overrides can occur on
+*        this port.
+*        SA ATU Frame priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for SA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetSAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetSAFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the SAFPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,13,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetSAFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Source Address(SA) Frame Priority Override.
+*        When this feature is enabled, SA Frame priority overrides can occur on
+*        this port.
+*        SA ATU Frame priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for SA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetSAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetSAFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SAFPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,13,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosSetDAFPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Frame Priority Override.
+*        When this feature is enabled, DA Frame priority overrides can occur on
+*        this port.
+*        DA ATU Frame priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetDAFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the DAFPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,12,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetDAFPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Destination Address(DA) Frame Priority Override.
+*        When this feature is enabled, DA Frame priority overrides can occur on
+*        this port.
+*        DA ATU Frame priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUFPri field set to GT_TRUE.
+*        When this occurs the ATUFPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined frame
+*        priority. If the frame egresses tagged the priority in the frame will be
+*        this new ATUFPri value. This function does not affect the egress queue
+*        priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for DA Frame Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDAFPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetDAFPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DAFPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,12,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetVIDQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets VID Queue Priority Override. When this feature is enabled,
+*        VID Queue priority overrides can occur on this port.
+*        VID Queue priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDQPri override field is set to GT_TRUE.
+*        When this occurs the VIDQPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new VIDQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for VID Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetVIDQPriOverride
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port,
+    IN GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetVIDQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the VIDQPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,3,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosGetVIDQPriOverride
+*
+* DESCRIPTION:
+*        This routine gets VID Queue Priority Override. When this feature is enabled,
+*        VID Queue priority overrides can occur on this port.
+*        VID Queue priority override occurs when the determined VID of a frame
+*        results in a VTU entry whose useVIDQPri override field is set to GT_TRUE.
+*        When this occurs the VIDQPri value assigned to the frame's VID (in the
+*        VTU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new VIDQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for VID Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetVIDQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetVIDQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the VIDQPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,3,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetSAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Source Address(SA) Queue Priority Override.
+*        When this feature is enabled, SA Queue priority overrides can occur on
+*        this port.
+*        SA ATU Queue priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for SA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetSAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetSAQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the SAQPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,2,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetSAQPriOverride
+*
+* DESCRIPTION:
+*        This routine gets Source Address(SA) Queue Priority Override.
+*        When this feature is enabled, SA Queue priority overrides can occur on
+*        this port.
+*        SA ATU Queue priority override occurs when the determined source address
+*        of a frame results in an ATU hit where the SA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's SA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for SA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetSAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetSAQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the SAQPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,2,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosSetDAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Queue Priority Override.
+*        When this feature is enabled, DA Queue priority overrides can occur on
+*        this port.
+*        DA ATU Queue priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for DA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetDAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetDAQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the DAQPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,1,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetDAQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets Destination Address(DA) Queue Priority Override.
+*        When this feature is enabled, DA Queue priority overrides can occur on
+*        this port.
+*        DA ATU Queue priority override occurs when the determined destination address
+*        of a frame results in an ATU hit where the DA's MAC address entry contains
+*        the useATUQPri field set to GT_TRUE.
+*        When this occurs the ATUQPri value assigned to the frame's DA (in the
+*        ATU Table) is used to overwrite the frame's previously determined queue
+*        priority. If the frame egresses tagged the priority in the frame will not
+*        be modified by this new ATUQPri value. This function affects the egress
+*        queue priority (QPri) the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for DA Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetDAQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetDAQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DAQPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,1,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosSetARPQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets ARP Queue Priority Override.
+*        When this feature is enabled, ARP Queue priority overrides can occur on
+*        this port.
+*        ARP Queue priority override occurs for all ARP frames.
+*        When this occurs, the frame's previously determined egress queue priority
+*        will be overwritten with ArpQPri.
+*        If the frame egresses tagged the priority in the frame will not
+*        be modified. When used, the two bits of the ArpQPri priority determine the
+*        egress queue the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*        mode - GT_TRUE for ARP Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosSetARPQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    IN  GT_BOOL        mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosSetARPQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate BOOL to binary */
+    BOOL_2_BIT(mode, data);
+
+    /* Set the ARPQPriOverride mode.            */
+    retVal = hwSetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,0,1,data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+    return retVal;
+}
+
+/*******************************************************************************
+* gqosGetARPQPriOverride
+*
+* DESCRIPTION:
+*        This routine sets ARP Queue Priority Override.
+*        When this feature is enabled, ARP Queue priority overrides can occur on
+*        this port.
+*        ARP Queue priority override occurs for all ARP frames.
+*        When this occurs, the frame's previously determined egress queue priority
+*        will be overwritten with ArpQPri.
+*        If the frame egresses tagged the priority in the frame will not
+*        be modified. When used, the two bits of the ArpQPri priority determine the
+*        egress queue the frame is switched into.
+*
+* INPUTS:
+*        port - the logical port number.
+*
+* OUTPUTS:
+*        mode - GT_TRUE for ARP Queue Priority Override,
+*               GT_FALSE otherwise
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gqosGetARPQPriOverride
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    port,
+    OUT GT_BOOL        *mode
+)
+{
+    GT_U16          data;
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gqosGetARPQPriOverride Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the ARPQPriOverride mode.            */
+    retVal = hwGetPortRegField(dev,hwPort, QD_REG_PORT_CONTROL2,0,1,&data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+    }
+    else
+    {
+        DBG_INFO(("OK.\n"));
+    }
+
+    BIT_2_BOOL(data, *mode);
+
+    return retVal;
+}
+
+
+/*******************************************************************************
+* gqosSetArpQPri
+*
+* DESCRIPTION:
+*       This routine sets ARP queue Priority to use for ARP QPri Overridden
+*        frames. When a ARP frame is received on a por tthat has its ARP
+*        QPriOVerride is enabled, the QPri assigned to the frame comes from
+*        this value
+*
+* INPUTS:
+*       pri - ARP Queue Priority (0 ~ 3)
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_BAD_PARAM - if pri > 3
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gqosSetArpQPri
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gqosSetArpQPri Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 3)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = (GT_U16)pri;
+
+    /* Set the ArpQPri bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,6,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gqosGetArpQPri
+*
+* DESCRIPTION:
+*       This routine gets ARP queue Priority to use for ARP QPri Overridden
+*        frames. When a ARP frame is received on a por tthat has its ARP
+*        QPriOVerride is enabled, the QPri assigned to the frame comes from
+*        this value
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       pri - ARP Queue Priority (0 ~ 3)
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gqosGetArpQPri
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_U8     *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gqosGetArpQPri Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_FQPRI_OVERRIDE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the ArpQPri bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,6,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *pri = (GT_U8)data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSerdesCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSerdesCtrl.c
new file mode 100644
index 000000000000..b3b6c22a5655
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSerdesCtrl.c
@@ -0,0 +1,287 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtSerdesCtrl.h
+*
+* DESCRIPTION:
+* API definitions for Phy Serdes control facility.
+*
+* DEPENDENCIES:
+* None.
+*
+* FILE REVISION NUMBER:
+* $Revision: 10 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtVct.h>
+#include <gtSem.h>
+
+
+/*******************************************************************************
+* gprtGetSerdesMode
+*
+* DESCRIPTION:
+*       This routine reads Serdes Interface Mode.
+*
+* INPUTS:
+*        port -  The physical SERDES device address(4/5)
+*
+* OUTPUTS:
+*       mode    - Serdes Interface Mode
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       logical port number is supported only for the devices made production
+*       before 2009.
+*  (Serdes devices: 88E6131, 88E6122, 88E6108, 88E6161, 88E6165 and 88E352 family)
+*
+*******************************************************************************/
+GT_STATUS gprtGetSerdesMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_SERDES_MODE *mode
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+    GT_U8           hwPort;         /* the physical port number     */
+
+
+    DBG_INFO(("gprtGetSerdesMode Called.\n"));
+
+    if(!IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if input is logical port number */
+    hwPort = GT_LPORT_2_PORT(port);
+    GT_GET_SERDES_PORT(dev,&hwPort);
+
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Get Phy Register. */
+    if(hwGetPhyRegField(dev,hwPort,16,0,2,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *mode = u16Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gprtSetSerdesMode
+*
+* DESCRIPTION:
+*       This routine sets Serdes Interface Mode.
+*
+* INPUTS:
+*       port -  The physical SERDES device address(4/5)
+*       mode    - Serdes Interface Mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       logical port number is supported only for the devices made production
+*       before 2009.
+*  (Serdes devices: 88E6131, 88E6122, 88E6108, 88E6161, 88E6165 and 88E352 family)
+*
+*******************************************************************************/
+GT_STATUS gprtSetSerdesMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_SERDES_MODE mode
+)
+{
+    GT_U16          u16Data;
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_STATUS    retVal;
+
+    DBG_INFO(("gprtSetSerdesMode Called.\n"));
+
+    if(!IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if input is logical port number */
+    hwPort = GT_LPORT_2_PORT(port);
+    GT_GET_SERDES_PORT(dev,&hwPort);
+
+    u16Data = mode;
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Set Phy Register. */
+    if(hwSetPhyRegField(dev,hwPort,16,0,2,u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    retVal = hwPhyReset(dev,hwPort,0xFF);
+    gtSemGive(dev,dev->phyRegsSem);
+    return retVal;
+}
+
+
+
+#if 0
+/*******************************************************************************
+* gprtGetSerdesReg
+*
+* DESCRIPTION:
+*       This routine reads Phy Serdes Registers.
+*
+* INPUTS:
+*       port -    The logical port number.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtGetSerdesReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT     port,
+    IN  GT_U32         regAddr,
+    OUT GT_U16         *data
+)
+{
+    GT_U16          u16Data;           /* The register's read data.    */
+    GT_U8           hwPort;         /* the physical port number     */
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtGetSerdesReg_mad(dev, port, regAddr, data);
+#endif
+
+    DBG_INFO(("gprtGetSerdesReg Called.\n"));
+
+    if(!IS_IN_DEV_GROUP(dev,DEV_SERDES_CORE))
+    {
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if input is logical port number */
+    hwPort = GT_LPORT_2_PORT(port);
+    GT_GET_SERDES_PORT(dev,&hwPort);
+
+    if(hwPort > dev->maxPhyNum)
+    {
+        /* check if input is physical serdes address */
+        if(dev->validSerdesVec & (1<<port))
+        {
+            hwPort = (GT_U8)port;
+        }
+        else
+            return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Get Phy Register. */
+    if(hwGetPhyRegField(dev,hwPort,16,0,2,&u16Data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    *mode = u16Data;
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gprtSetSerdesReg
+*
+* DESCRIPTION:
+*       This routine writes Phy Serdes Registers.
+*
+* INPUTS:
+*       port -    The logical port number.
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gprtSetSerdesReg
+(
+    IN  GT_QD_DEV        *dev,
+    IN  GT_LPORT        port,
+    IN  GT_U32            regAddr,
+    IN  GT_U16            data
+)
+{
+    GT_U8           hwPort;         /* the physical port number     */
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+        return gprtSetSerdesReg_mad(dev, port, regAddr, data);
+#endif
+
+    DBG_INFO(("gprtSetSerdesReg Called.\n"));
+
+/*    hwPort = GT_LPORT_2_PHY(port); */
+    hwPort = qdLong2Char(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* Write to Phy Register */
+    if(hwWritePhyReg(dev,hwPort,(GT_U8)regAddr,data) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return GT_OK;
+}
+
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysConfig.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysConfig.c
new file mode 100644
index 000000000000..bff020f32b84
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysConfig.c
@@ -0,0 +1,1304 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtSysConfig.c
+*
+* DESCRIPTION:
+*       API definitions for system configuration, and enabling.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 4 $
+*
+*******************************************************************************/
+
+#include <msApi.h>
+#include <msApiPrototype.h>
+#include <gtDrvConfig.h>
+#include <gtSem.h>
+#include <platformDeps.h>
+#ifdef GT_USE_MAD
+#include "madApi.h"
+#include "madApiDefs.h"
+#endif
+#include <gtHwCntl.h>
+
+extern GT_U8 lport2port(IN GT_U16 portVec, IN GT_LPORT  port);
+extern GT_LPORT port2lport(IN GT_U16 portVec, IN GT_U8  hwPort);
+extern GT_U32 lportvec2portvec(IN GT_U16 portVec, IN GT_U32  lVec);
+extern GT_U32 portvec2lportvec(IN GT_U16 portVec, IN GT_U32  pVec);
+static GT_BOOL gtRegister(GT_QD_DEV *qd_dev, BSP_FUNCTIONS* pBSPFunctions);
+
+#ifdef GT_USE_MAD
+static MAD_BOOL madSMIRead(MAD_DEV* dev, unsigned int smiAddr,
+              unsigned int reg, unsigned int* value)
+{
+  GT_STATUS  status;
+  GT_U16 data;
+  status =     hwReadPhyReg((GT_QD_DEV *)(dev->swDev), smiAddr, reg, &data);
+
+  if(status == GT_OK)
+  {
+    *value = data;
+    return MAD_TRUE;
+  }
+  else
+    return MAD_FALSE;
+}
+
+static MAD_BOOL madSMIWrite(MAD_DEV* dev, unsigned int smiAddr,
+              unsigned int reg, unsigned int value)
+{
+  GT_STATUS  status;
+  GT_U16 data;
+
+  data = value;
+  status =     hwWritePhyReg((GT_QD_DEV *)(dev->swDev), smiAddr, reg, data);
+
+  if(status == GT_OK)
+    return MAD_TRUE;
+  else
+    return MAD_FALSE;
+}
+
+static char * madGetDeviceName ( MAD_DEVICE_ID deviceId)
+{
+
+    switch (deviceId)
+    {
+        case MAD_88E10X0: return ("MAD_88E10X0 ");
+        case MAD_88E10X0S: return ("MAD_88E10X0S ");
+        case MAD_88E1011: return ("MAD_88E1011 ");
+        case MAD_88E104X: return ("MAD_88E104X ");
+        case MAD_88E1111: return ("MAD_88E1111/MAD_88E1115 ");
+        case MAD_88E1112: return ("MAD_88E1112 ");
+        case MAD_88E1116: return ("MAD_88E1116/MAD_88E1116R ");
+        case MAD_88E114X: return ("MAD_88E114X ");
+        case MAD_88E1149: return ("MAD_88E1149 ");
+        case MAD_88E1149R: return ("MAD_88E1149R ");
+        case MAD_SWG65G : return ("MAD_SWG65G ");
+        case MAD_88E1181: return ("MAD_88E1181 ");
+        case MAD_88E3016: return ("MAD_88E3015/MAD_88E3016/MAD_88E3018/MAD_88E3019 ");
+/*        case MAD_88E3019: return ("MAD_88E3019 "); */
+        case MAD_88E1121: return ("MAD_88E1121/MAD_88E1121R ");
+        case MAD_88E3082: return ("MAD_88E3082/MAD_88E3083 ");
+        case MAD_88E1240: return ("MAD_88E1240 ");
+        case MAD_88E1340S: return ("MAD_88E1340S ");
+        case MAD_88E1340: return ("MAD_88E1340 ");
+        case MAD_88E1340M: return ("MAD_88E1340M ");
+        case MAD_88E1119R: return ("MAD_88E1119R ");
+        case MAD_88E1310:  return ("MAD_88E1310 ");
+        case MAD_MELODY:  return ("MAD_MELODY_PHY ");
+        case MAD_88E1540:  return ("MAD_88E1540 ");
+        case MAD_88E3183:  return ("MAD_88E3183 ");
+        case MAD_88E3061:  return ("MAD_88E3061 ");
+        case MAD_88E1510:  return ("MAD_88E1510 ");
+        case MAD_88E1548:  return ("MAD_88E1548 ");
+        default : return (" No-name ");
+    }
+} ;
+
+
+static MAD_STATUS madStart(GT_QD_DEV* qd_dev,  int smiPair)
+{
+	int port;
+    MAD_STATUS status = MAD_FAIL;
+    MAD_DEV* dev = (MAD_DEV*)&(qd_dev->mad_dev);
+    MAD_SYS_CONFIG   cfg;
+    cfg.BSPFunctions.readMii   = (FMAD_READ_MII )madSMIRead;
+    cfg.BSPFunctions.writeMii  = (FMAD_WRITE_MII )madSMIWrite;
+    cfg.BSPFunctions.semCreate = NULL;
+    cfg.BSPFunctions.semDelete = NULL;
+    cfg.BSPFunctions.semTake   = NULL;
+    cfg.BSPFunctions.semGive   = NULL;
+
+    dev->swDev = (void *)qd_dev;
+	cfg.smiBaseAddr = smiPair;  /* Set SMI Address */
+	cfg.switchType = MAD_SYS_SW_TYPE_NO;
+	if((qd_dev->deviceId==GT_88E6320)||
+	   (qd_dev->deviceId==GT_88E6310)||
+	   (qd_dev->deviceId==GT_88E6310)||
+	   (qd_dev->deviceId==GT_88E6310))
+	{
+	  cfg.switchType = MAD_SYS_SW_TYPE_1;
+	}
+
+    if((status=mdLoadDriver(&cfg, dev)) != MAD_OK)
+    {
+        return status;
+    }
+    dev->phyInfo.swPhyType = 1;  /* The Phy is part of switch*/
+
+	/* to set parameters to ports of phy ports added in switch*/
+	for (port=dev->numOfPorts; port < qd_dev->maxPhyNum; port++)
+      dev->phyInfo.hwMode[port] = dev->phyInfo.hwMode[0];
+    dev->numOfPorts = qd_dev->maxPhyNum;
+/*    dev->numOfPorts = qd_dev->numOfPorts; */
+
+    DBG_INFO(("Device Name   : %s\n", madGetDeviceName(dev->deviceId)));
+    DBG_INFO(("Device ID     : 0x%x\n",dev->deviceId));
+    DBG_INFO(("Revision      : 0x%x\n",dev->revision));
+    DBG_INFO(("Base Reg Addr : 0x%x\n",dev->baseRegAddr));
+/*    DBG_INFO(("No of Ports   : %d\n",dev->numOfPorts)); */
+    DBG_INFO(("No of Ports   : %d\n",qd_dev->maxPhyNum));
+    DBG_INFO(("QD dev        : %x\n",dev->swDev));
+
+    DBG_INFO(("MAD has been started.\n"));
+
+    qd_dev->use_mad = GT_TRUE;
+    return MAD_OK;
+}
+
+/*
+static void madClose(MAD_DEV* dev)
+{
+    if (dev->devEnabled)
+        mdUnloadDriver(dev);
+}
+*/
+
+ GT_STATUS qd_madInit(GT_QD_DEV    *dev, int phyAddr)
+{
+  MAD_STATUS    status;
+
+
+  status = madStart(dev, phyAddr);
+  if (MAD_OK != status)
+  {
+        DBG_INFO(("sMAD Initialization Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+  }
+
+  return GT_OK;
+}
+
+#endif /* GT_USE_MAD */
+
+/*******************************************************************************
+* qdLoadDriver
+*
+* DESCRIPTION:
+*       QuarterDeck Driver Initialization Routine.
+*       This is the first routine that needs be called by system software.
+*       It takes *cfg from system software, and retures a pointer (*dev)
+*       to a data structure which includes infomation related to this QuarterDeck
+*       device. This pointer (*dev) is then used for all the API functions.
+*
+* INPUTS:
+*       cfg  - Holds device configuration parameters provided by system software.
+*
+* OUTPUTS:
+*       dev  - Holds device information to be used for each API call.
+*
+* RETURNS:
+*       GT_OK               - on success
+*       GT_FAIL             - on error
+*       GT_ALREADY_EXIST    - if device already started
+*       GT_BAD_PARAM        - on bad parameters
+*
+* COMMENTS:
+*     qdUnloadDriver is also provided to do driver cleanup.
+*
+*******************************************************************************/
+GT_STATUS qdLoadDriver
+(
+    IN  GT_SYS_CONFIG   *cfg,
+    OUT GT_QD_DEV    *dev
+)
+{
+    GT_STATUS   retVal;
+    GT_LPORT    port;
+
+    DBG_INFO(("qdLoadDriver Called.\n"));
+
+    /* Check for parameters validity        */
+    if(dev == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Check for parameters validity        */
+    if(cfg == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* The initialization was already done. */
+    if(dev->devEnabled)
+    {
+        DBG_INFO(("QuarterDeck already started.\n"));
+        return GT_ALREADY_EXIST;
+    }
+
+#ifdef GT_PORT_MAP_IN_DEV
+    /* Modified to add port mapping functions into device ssystem configuration. */
+
+    if (dev->lport2port == NULL) {
+      dev->lport2port = lport2port;
+    }
+
+    if (dev->port2lport == NULL) {
+      dev->port2lport = port2lport;
+    }
+
+    if (dev->lportvec2portvec == NULL) {
+      dev->lportvec2portvec = lportvec2portvec;
+    }
+
+    if (dev->portvec2lportvec == NULL) {
+      dev->portvec2lportvec = portvec2lportvec;
+    }
+#endif
+
+    if(gtRegister(dev,&(cfg->BSPFunctions)) != GT_TRUE)
+    {
+       DBG_INFO(("gtRegister Failed.\n"));
+       return GT_FAIL;
+    }
+    dev->accessMode = (GT_U8)cfg->mode.scanMode;
+    if (dev->accessMode == SMI_MULTI_ADDR_MODE)
+    {
+        dev->baseRegAddr = 0;
+        dev->phyAddr = (GT_U8)cfg->mode.baseAddr;
+    }
+    else
+    {
+        dev->baseRegAddr = (GT_U8)cfg->mode.baseAddr;
+        dev->phyAddr = 0;
+    }
+
+
+    /* Initialize the driver    */
+    retVal = driverConfig(dev);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("driverConfig Failed.\n"));
+        return retVal;
+    }
+
+    /* Initialize dev fields.         */
+    dev->cpuPortNum = cfg->cpuPortNum;
+    dev->maxPhyNum = 5;
+    dev->devGroup = 0;
+    dev->devStorage = 0;
+    /* Assign Device Name */
+    dev->devName = 0;
+    dev->devName1 = 0;
+
+    dev->validSerdesVec = 0;
+
+    if((dev->deviceId&0xfff8)==GT_88EC000) /* device id 0xc00 - 0xc07 are GT_88EC0XX */
+      dev->deviceId=GT_88EC000;
+
+	if (dev->deviceId == 0xc10)
+		dev->deviceId = GT_88E6352;
+
+    switch(dev->deviceId)
+    {
+        case GT_88E6021:
+                dev->numOfPorts = 3;
+                dev->maxPorts = 3;
+                dev->maxPhyNum = 2;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6021;
+                break;
+
+        case GT_88E6051:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 5;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6051;
+                break;
+
+        case GT_88E6052:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6052;
+                break;
+
+        case GT_88E6060:
+                if((dev->cpuPortNum != 4)&&(dev->cpuPortNum != 5))
+                {
+                    return GT_FAIL;
+                }
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6060;
+                break;
+
+        case GT_88E6031:
+                dev->numOfPorts = 3;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 3;
+                dev->validPortVec = 0x31;    /* port 0, 4, and 5 */
+                dev->validPhyVec = 0x31;    /* port 0, 4, and 5 */
+                dev->devName = DEV_88E6061;
+                break;
+
+        case GT_88E6061:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 6;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6061;
+                break;
+
+        case GT_88E6035:
+                dev->numOfPorts = 3;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 3;
+                dev->validPortVec = 0x31;    /* port 0, 4, and 5 */
+                dev->validPhyVec = 0x31;    /* port 0, 4, and 5 */
+                dev->devName = DEV_88E6065;
+                break;
+
+        case GT_88E6055:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = 0x2F;    /* port 0,1,2,3, and 5 */
+                dev->validPhyVec = 0x2F;    /* port 0,1,2,3, and 5 */
+                dev->devName = DEV_88E6065;
+                break;
+
+        case GT_88E6065:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 6;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6065;
+                break;
+
+        case GT_88E6063:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6063;
+                break;
+
+        case GT_FH_VPN:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_FH_VPN;
+                break;
+
+        case GT_FF_EG:
+                if(dev->cpuPortNum != 5)
+                {
+                    return GT_FAIL;
+                }
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_FF_EG;
+                break;
+
+        case GT_FF_HG:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_FF_HG;
+                break;
+
+        case GT_88E6083:
+                dev->numOfPorts = 10;
+                dev->maxPorts = 10;
+                dev->maxPhyNum = 8;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6083;
+                break;
+
+        case GT_88E6153:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 6;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6183;
+                break;
+
+        case GT_88E6181:
+                dev->numOfPorts = 8;
+                dev->maxPorts = 8;
+                dev->maxPhyNum = 8;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6181;
+                break;
+
+        case GT_88E6183:
+                dev->numOfPorts = 10;
+                dev->maxPorts = 10;
+                dev->maxPhyNum = 10;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6183;
+                break;
+
+        case GT_88E6093:
+                dev->numOfPorts = 11;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6093;
+                break;
+
+        case GT_88E6092:
+                dev->numOfPorts = 11;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6092;
+                break;
+
+        case GT_88E6095:
+                dev->numOfPorts = 11;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6095;
+                break;
+
+        case GT_88E6045:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = 0x60F;
+                dev->validPhyVec = 0x60F;
+                dev->devName = DEV_88E6095;
+                break;
+
+        case GT_88E6097:
+                dev->numOfPorts = 11;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6097;
+                break;
+
+        case GT_88E6096:
+                dev->numOfPorts = 11;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6096;
+                break;
+
+        case GT_88E6047:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = 0x60F;
+                dev->validPhyVec = 0x60F;
+                dev->devName = DEV_88E6097;
+                break;
+
+        case GT_88E6046:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = 0x60F;
+                dev->validPhyVec = 0x60F;
+                dev->devName = DEV_88E6096;
+                break;
+
+        case GT_88E6085:
+                dev->numOfPorts = 10;
+                dev->maxPorts = 11;
+                dev->maxPhyNum = 11;
+                dev->validPortVec = 0x6FF;
+                dev->validPhyVec = 0x6FF;
+                dev->devName = DEV_88E6096;
+                break;
+
+        case GT_88E6152:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 6;
+                dev->validPortVec = 0x28F;
+                dev->validPhyVec = 0x28F;
+                dev->devName = DEV_88E6182;
+                break;
+
+        case GT_88E6155:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 6;
+                dev->validPortVec = 0x28F;
+                dev->validPhyVec = 0x28F;
+                dev->devName = DEV_88E6185;
+                break;
+
+        case GT_88E6182:
+                dev->numOfPorts = 10;
+                dev->maxPorts = 10;
+                dev->maxPhyNum = 10;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6182;
+                break;
+
+        case GT_88E6185:
+                dev->numOfPorts = 10;
+                dev->maxPorts = 10;
+                dev->maxPhyNum = 10;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->devName = DEV_88E6185;
+                break;
+
+        case GT_88E6121:
+                dev->numOfPorts = 3;
+                dev->maxPorts = 8;
+                dev->maxPhyNum = 3;
+                dev->validPortVec = 0xE;    /* port 1, 2, and 3 */
+                dev->validPhyVec = 0xE;        /* port 1, 2, and 3 */
+                dev->devName = DEV_88E6108;
+                break;
+
+        case GT_88E6122:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 8;
+                dev->maxPhyNum = 16;
+                dev->validPortVec = 0x7E;    /* port 1 ~ 6 */
+                dev->validPhyVec = 0xF07E;    /* port 1 ~ 6, 12 ~ 15 (serdes) */
+                dev->validSerdesVec = 0xF000;
+                dev->devName = DEV_88E6108;
+                break;
+
+        case GT_88E6131:
+        case GT_88E6108:
+                dev->numOfPorts = 8;
+                dev->maxPorts = 8;
+                dev->maxPhyNum = 16;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = (1 << dev->maxPhyNum) - 1;
+                dev->validSerdesVec = 0xF000;
+                dev->devName = DEV_88E6108;
+                break;
+
+        case GT_88E6123:
+                dev->numOfPorts = 3;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 14;
+                dev->validPortVec = 0x23;
+                dev->validPhyVec = 0x303F;
+                dev->validSerdesVec = 0x3000;
+                dev->devName = DEV_88E6161;
+                break;
+
+        case GT_88E6140:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 14;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x303F;
+                dev->validSerdesVec = 0x3000;
+                dev->devName = DEV_88E6165;
+                break;
+
+        case GT_88E6161:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 14;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x303F;
+                dev->validSerdesVec = 0x3000;
+                dev->devName = DEV_88E6161;
+                break;
+
+        case GT_88E6165:
+                dev->numOfPorts = 6;
+                dev->maxPorts = 6;
+                dev->maxPhyNum = 14;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x303F;
+                dev->validSerdesVec = 0x3000;
+                dev->devName = DEV_88E6165;
+                break;
+
+        case GT_88E6351:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName = DEV_88E6351;
+                break;
+
+        case GT_88E6175:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E6175; /* test device group 1 */
+                break;
+
+        case GT_88E6124 :
+                dev->numOfPorts = 4;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 7;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPortVec &= ~(0x7);
+                dev->validPhyVec = 0x78;
+                dev->devName = DEV_88E6171;
+                break;
+
+        case GT_88E6171 :
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName = DEV_88E6171;
+                break;
+
+        case GT_88E6321 :
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPortVec &= ~(0x7);
+                dev->validPhyVec = 0x1F;
+                dev->devName = DEV_88E6371;
+                break;
+
+        case GT_88E6350 :
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName = DEV_88E6371;
+                break;
+
+        case GT_88EC000 :
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88EC000;
+                break;
+        case GT_88E3020:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6020:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6070:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6071:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6220:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6250:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->devName1 = DEV_88E3020;
+                break;
+        case GT_88E6172:
+                dev->numOfPorts = 7;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->validSerdesVec = 0x8000;
+                dev->devName = DEV_88E6172;
+                break;
+
+        case GT_88E6176:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x5F;
+                dev->validSerdesVec = 0x8000;
+                dev->devName = DEV_88E6176;
+                break;
+
+        case GT_88E6240:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->validSerdesVec = 0x8000;
+                dev->devName = DEV_88E6240;
+                break;
+
+        case GT_88E6352:
+                dev->numOfPorts = 7;
+                dev->maxPorts = 7;
+                dev->maxPhyNum = 5;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x1F;
+                dev->validSerdesVec = 0x8000;
+                dev->devName = DEV_88E6352;
+                break;
+
+        case GT_88E6115:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 5;
+                dev->maxPhyNum = 4;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x18;
+                dev->validSerdesVec = 0x0003;
+                dev->devName1 = DEV_88E6115;
+                break;
+
+        case GT_88E6125:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 5;
+                dev->maxPhyNum = 4;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x18;
+                dev->validSerdesVec = 0x0003;
+                dev->devName1 = DEV_88E6125;
+                break;
+
+        case GT_88E6310:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 5;
+                dev->maxPhyNum = 4;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x18;
+                dev->validSerdesVec = 0x0003;
+                dev->devName1 = DEV_88E6310;
+                break;
+
+        case GT_88E6320:
+                dev->numOfPorts = 5;
+                dev->maxPorts = 5;
+                dev->maxPhyNum = 4;
+                dev->validPortVec = (1 << dev->numOfPorts) - 1;
+                dev->validPhyVec = 0x18;
+                dev->validSerdesVec = 0x0003;
+                dev->devName1 = DEV_88E6320;
+                break;
+
+
+        default:
+                DBG_INFO(("Unknown Device. Initialization failed\n"));
+                return GT_FAIL;
+    }
+
+    dev->cpuPortNum = GT_PORT_2_LPORT(cfg->cpuPortNum);
+
+    if(dev->cpuPortNum == GT_INVALID_PORT)
+    {
+        if(GT_LPORT_2_PORT((GT_LPORT)cfg->cpuPortNum) != GT_INVALID_PORT)
+        {
+            dev->cpuPortNum = cfg->cpuPortNum;
+        }
+        else
+        {
+            return GT_BAD_CPU_PORT;
+        }
+    }
+
+    /* Initialize the MultiAddress Register Access semaphore.    */
+    if((dev->multiAddrSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the ATU semaphore.    */
+    if((dev->atuRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the VTU semaphore.    */
+    if((dev->vtuRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the STATS semaphore.    */
+    if((dev->statsRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the PIRL semaphore.    */
+    if((dev->pirlRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the PTP semaphore.    */
+    if((dev->ptpRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the Table semaphore.    */
+    if((dev->tblRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the EEPROM Configuration semaphore.    */
+    if((dev->eepromRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the PHY Device Register Access semaphore.    */
+    if((dev->phyRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the Remote management Register Access semaphore.    */
+    if((dev->hwAccessRegsSem = gtSemCreate(dev,GT_SEM_FULL)) == 0)
+    {
+        DBG_INFO(("semCreate Failed.\n"));
+        qdUnloadDriver(dev);
+        return GT_FAIL;
+    }
+
+    /* Initialize the ports states to forwarding mode. */
+    if(cfg->initPorts == GT_TRUE)
+    {
+        for (port=0; port<dev->numOfPorts; port++)
+        {
+            if((retVal = gstpSetPortState(dev,port,GT_PORT_FORWARDING)) != GT_OK)
+               {
+                DBG_INFO(("Failed.\n"));
+                qdUnloadDriver(dev);
+                   return retVal;
+            }
+        }
+    }
+
+    dev->use_mad = GT_FALSE;
+#ifdef GT_USE_MAD
+	{
+	  int portPhyAddr=0;
+	  unsigned int validPhyVec = dev->validPhyVec;
+	  while((validPhyVec&1)==0)
+	  {
+        validPhyVec >>= 1;
+	    portPhyAddr++;
+	  }
+      DBG_INFO(("@@@@@@@@@@ qd_madInit\n"));
+      if((retVal = qd_madInit(dev, portPhyAddr)) != GT_OK)
+      {
+        DBG_INFO(("Initialize MAD failed.\n"));
+        qdUnloadDriver(dev);
+        return retVal;
+      }
+	}
+#endif
+
+    if(cfg->skipInitSetup == GT_SKIP_INIT_SETUP)
+    {
+        dev->devEnabled = 1;
+        dev->devNum = cfg->devNum;
+
+        DBG_INFO(("OK.\n"));
+        return GT_OK;
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_ENHANCED_CPU_PORT))
+    {
+        if((retVal = gsysSetRsvd2CpuEnables(dev,0)) != GT_OK)
+        {
+            DBG_INFO(("gsysGetRsvd2CpuEnables failed.\n"));
+            qdUnloadDriver(dev);
+            return retVal;
+        }
+
+        if((retVal = gsysSetRsvd2Cpu(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("gsysSetRsvd2Cpu failed.\n"));
+            qdUnloadDriver(dev);
+            return retVal;
+        }
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_CPU_DEST_PER_PORT))
+    {
+        for (port=0; port<dev->numOfPorts; port++)
+        {
+            retVal = gprtSetCPUPort(dev,port,dev->cpuPortNum);
+            if(retVal != GT_OK)
+            {
+                DBG_INFO(("Failed.\n"));
+                qdUnloadDriver(dev);
+                   return retVal;
+            }
+        }
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_CPU_PORT))
+    {
+        retVal = gsysSetCPUPort(dev,dev->cpuPortNum);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+            qdUnloadDriver(dev);
+               return retVal;
+        }
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_CPU_DEST))
+    {
+        retVal = gsysSetCPUDest(dev,dev->cpuPortNum);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+            qdUnloadDriver(dev);
+               return retVal;
+        }
+    }
+
+    if(IS_IN_DEV_GROUP(dev,DEV_MULTICAST))
+    {
+        if((retVal = gsysSetRsvd2Cpu(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("gsysSetRsvd2Cpu failed.\n"));
+            qdUnloadDriver(dev);
+            return retVal;
+        }
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PIRL_RESOURCE))
+    {
+        retVal = gpirlInitialize(dev);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+            qdUnloadDriver(dev);
+               return retVal;
+        }
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PIRL2_RESOURCE))
+    {
+        retVal = gpirl2Initialize(dev);
+        if(retVal != GT_OK)
+           {
+            DBG_INFO(("Failed.\n"));
+            qdUnloadDriver(dev);
+               return retVal;
+        }
+    }
+
+    dev->devEnabled = 1;
+    dev->devNum = cfg->devNum;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* sysEnable
+*
+* DESCRIPTION:
+*       This function enables the system for full operation.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       1.  This function should be called only after successful execution of
+*           qdLoadDriver().
+*
+*******************************************************************************/
+GT_STATUS sysEnable( GT_QD_DEV *dev)
+{
+    DBG_INFO(("sysEnable Called.\n"));
+    DBG_INFO(("OK.\n"));
+    return driverEnable(dev);
+}
+
+
+/*******************************************************************************
+* qdUnloadDriver
+*
+* DESCRIPTION:
+*       This function unloads the QuaterDeck Driver.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       1.  This function should be called only after successful execution of
+*           qdLoadDriver().
+*
+*******************************************************************************/
+GT_STATUS qdUnloadDriver
+(
+    IN GT_QD_DEV* dev
+)
+{
+    DBG_INFO(("qdUnloadDriver Called.\n"));
+
+    /* Delete the MultiAddress mode reagister access semaphore.    */
+    if(gtSemDelete(dev,dev->multiAddrSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the ATU semaphore.    */
+    if(gtSemDelete(dev,dev->atuRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the VTU semaphore.    */
+    if(gtSemDelete(dev,dev->vtuRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the STATS semaphore.    */
+    if(gtSemDelete(dev,dev->statsRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the PIRL semaphore.    */
+    if(gtSemDelete(dev,dev->pirlRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the PTP semaphore.    */
+    if(gtSemDelete(dev,dev->ptpRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the Table semaphore.    */
+    if(gtSemDelete(dev,dev->tblRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the EEPROM Configuration semaphore.    */
+    if(gtSemDelete(dev,dev->eepromRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    /* Delete the PHY Device semaphore.    */
+    if(gtSemDelete(dev,dev->phyRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+    /* Delete the Remote management Register Access semaphore.    */
+    if(gtSemDelete(dev,dev->hwAccessRegsSem) != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_FAIL;
+    }
+
+    gtMemSet(dev,0,sizeof(GT_QD_DEV));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtRegister
+*
+* DESCRIPTION:
+*       BSP should register the following functions:
+*        1) MII Read - (Input, must provide)
+*            allows QuarterDeck driver to read QuarterDeck device registers.
+*        2) MII Write - (Input, must provice)
+*            allows QuarterDeck driver to write QuarterDeck device registers.
+*        3) Semaphore Create - (Input, optional)
+*            OS specific Semaphore Creat function.
+*        4) Semaphore Delete - (Input, optional)
+*            OS specific Semaphore Delete function.
+*        5) Semaphore Take - (Input, optional)
+*            OS specific Semaphore Take function.
+*        6) Semaphore Give - (Input, optional)
+*            OS specific Semaphore Give function.
+*        Notes: 3) ~ 6) should be provided all or should not be provided at all.
+*
+* INPUTS:
+*        pBSPFunctions - pointer to the structure for above functions.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_TRUE, if input is valid. GT_FALSE, otherwise.
+*
+* COMMENTS:
+*       This function should be called only once.
+*
+*******************************************************************************/
+static GT_BOOL gtRegister(GT_QD_DEV *dev, BSP_FUNCTIONS* pBSPFunctions)
+{
+    dev->fgtReadMii =  pBSPFunctions->readMii;
+    dev->fgtWriteMii = pBSPFunctions->writeMii;
+#ifdef GT_RMGMT_ACCESS
+    dev->fgtHwAccessMod =  pBSPFunctions->hwAccessMod;
+    dev->fgtHwAccess = pBSPFunctions->hwAccess;
+#endif
+
+    dev->semCreate = pBSPFunctions->semCreate;
+    dev->semDelete = pBSPFunctions->semDelete;
+    dev->semTake   = pBSPFunctions->semTake  ;
+    dev->semGive   = pBSPFunctions->semGive  ;
+
+    return GT_TRUE;
+}
+
+static GT_U8 qd32_2_8[256] = {
+0,1,2,3,4,5,6,7,8,9,
+10,11,12,13,14,15,16,17,18,19,
+20,21,22,23,24,25,26,27,28,29,
+30,31,32,33,34,35,36,37,38,39,
+40,41,42,43,44,45,46,47,48,49,
+50,51,52,53,54,55,56,57,58,59,
+60,61,62,63,64,65,66,67,68,69,
+70,71,72,73,74,75,76,77,78,79,
+80,81,82,83,84,85,86,87,88,89,
+90,91,92,93,94,95,96,97,98,99,
+100,101,102,103,104,105,106,107,108,109,
+110,111,112,113,114,115,116,117,118,119,
+120,121,122,123,124,125,126,127,128,129,
+130,131,132,133,134,135,136,137,138,139,
+140,141,142,143,144,145,146,147,148,149,
+150,151,152,153,154,155,156,157,158,159,
+160,161,162,163,164,165,166,167,168,169,
+170,171,172,173,174,175,176,177,178,179,
+180,181,182,183,184,185,186,187,188,189,
+190,191,192,193,194,195,196,197,198,199,
+200,201,202,203,204,205,206,207,208,209,
+210,211,212,213,214,215,216,217,218,219,
+220,221,222,223,224,225,226,227,228,229,
+230,231,232,233,234,235,236,237,238,239,
+240,241,242,243,244,245,246,247,248,249,
+250,251,252,253,254,255};
+
+
+
+GT_U8 qdLong2Char(GT_U32 data)
+{
+    return qd32_2_8[data&0xff];
+}
+
+GT_U8 qdShort2Char(GT_U16 data)
+{
+    GT_U32 dataL = data;
+    return qd32_2_8[dataL&0xff];
+}
+
+GT_U16 qdLong2Short(GT_U32 data)
+{
+  GT_U32 data1= 1;
+  if( *((GT_U16 *)&data1) )
+    return *((GT_U16 *)&data);
+  else
+    return *((GT_U16 *)&data + 1);
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysCtrl.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysCtrl.c
new file mode 100644
index 000000000000..078a3414ec8b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysCtrl.c
@@ -0,0 +1,9236 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtSysCtrl.c
+*
+* DESCRIPTION:
+*       API definitions for system global control.
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+#include <gtSem.h>
+
+static GT_STATUS writeSwitchMacWolReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    IN GT_ETHERADDR *mac
+);
+static GT_STATUS readSwitchMacWolReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    OUT GT_ETHERADDR *mac
+);
+static GT_STATUS writeDiffMACWoL
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    IN GT_U16       diffAddr
+);
+static GT_STATUS readDiffMACWoL
+(
+    IN  GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    OUT GT_U16        *diffAddr
+);
+static GT_STATUS writeSwMacWolWofReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        pointer,
+    IN GT_U8        data
+);
+static GT_STATUS readSwMacWolWofReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        pointer,
+    OUT GT_U8       *data
+);
+
+static GT_STATUS writeSwitchMacReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+);
+
+static GT_STATUS readSwitchMacReg
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+);
+
+static GT_STATUS writeDiffMAC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        diffAddr
+);
+
+static GT_STATUS readDiffMAC
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16        *diffAddr
+);
+/*******************************************************************************
+* gsysSwReset
+*
+* DESCRIPTION:
+*       This routine preforms switch software reset.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSwReset
+(
+    IN  GT_QD_DEV *dev
+)
+{
+#ifndef GT_RMGMT_ACCESS
+    GT_U16          data;           /* Used to poll the SWReset bit */
+#endif
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8            regOffset;
+
+    DBG_INFO(("gsysSwReset Called.\n"));
+
+    /* Set the Software reset bit.                  */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        regOffset = QD_REG_GLOBAL_CONTROL;
+    }
+    else
+    {
+      if (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+        regOffset = QD_REG_GLOBAL_CONTROL;
+      else
+        regOffset = QD_REG_ATU_CONTROL;
+    }
+
+    retVal = hwSetGlobalRegField(dev,regOffset,15,1,1);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = regOffset;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    /* Make sure the reset operation is completed.  */
+    data = 1;
+    while(data != 0)
+    {
+           retVal = hwGetGlobalRegField(dev,regOffset,15,1,&data);
+
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            return retVal;
+        }
+    }
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetPPUEn
+*
+* DESCRIPTION:
+*        This routine enables/disables Phy Polling Unit.
+*
+* INPUTS:
+*        en - GT_TRUE to enable PPU, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPPUEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                /* register.                    */
+    DBG_INFO(("gsysSetPPUEn Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the PPUEn bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,14,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetPPUEn
+*
+* DESCRIPTION:
+*        This routine get the PPU state.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if PPU is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPPUEn
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetPPUEn Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Get the GetPPUEn bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,14,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetDiscardExcessive
+*
+* DESCRIPTION:
+*       This routine set the Discard Excessive state.
+*
+* INPUTS:
+*       en - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDiscardExcessive
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_BOOL en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetDiscardExcessive Called.\n"));
+    BOOL_2_BIT(en,data);
+
+    /* Set the Discard Exissive bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,13,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetDiscardExcessive
+*
+* DESCRIPTION:
+*       This routine get the Discard Excessive state.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDiscardExcessive
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_BOOL    *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetDiscardExcessive Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Get the Discard Exissive bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,13,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetSchedulingMode
+*
+* DESCRIPTION:
+*       This routine set the Scheduling Mode.
+*
+* INPUTS:
+*       mode - GT_TRUE wrr, GT_FALSE strict.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetSchedulingMode
+(
+    IN  GT_QD_DEV *dev,
+    IN GT_BOOL    mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetSchedulingMode Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PORT_MIXED_SCHEDULE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(mode,data);
+    data = 1 - data;
+
+    /* Set the Schecduling bit.             */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,11,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetSchedulingMode
+*
+* DESCRIPTION:
+*       This routine get the Scheduling Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE wrr, GT_FALSE strict.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSchedulingMode
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetSchedulingMode Called.\n"));
+    if (IS_IN_DEV_GROUP(dev, DEV_PORT_MIXED_SCHEDULE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+    /* Get the Scheduling bit.              */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,11,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(1 - data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetMaxFrameSize
+*
+* DESCRIPTION:
+*       This routine Set the max frame size allowed.
+*
+* INPUTS:
+*       mode - GT_TRUE max size 1522,
+*               GT_FALSE max size 1535, 1632, or 2048.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Please refer to the device spec. to get the max frame size.
+*       88E6095 device supports upto 1632.
+*       88E6065/88E6061 devices support upto 2048.
+*
+*******************************************************************************/
+GT_STATUS gsysSetMaxFrameSize
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetMaxFrameSize Called.\n"));
+
+    if (IS_IN_DEV_GROUP(dev,DEV_JUMBO_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(mode,data);
+    data = 1 - data;
+
+    /* Set the Max Fram Size bit.               */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,10,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetMaxFrameSize
+*
+* DESCRIPTION:
+*       This routine Get the max frame size allowed.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE max size 1522,
+*               GT_FALSE max size 1535, 1632, or 2048.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Please refer to the device spec. to get the max frame size.
+*       88E6095 device supports upto 1632.
+*       88E6065/88E6061 devices support upto 2048.
+*
+*******************************************************************************/
+GT_STATUS gsysGetMaxFrameSize
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetMaxFrameSize Called.\n"));
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_JUMBO_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Max Frame Size bit.          */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,10,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(1 - data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysReLoad
+*
+* DESCRIPTION:
+*       This routine cause to the switch to reload the EEPROM.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysReLoad
+(
+    IN  GT_QD_DEV *dev
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysReLoad Called.\n"));
+    /* Set the Reload bit.                  */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,9,1,1);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* Should a check for reload completion. */
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetWatchDog
+*
+* DESCRIPTION:
+*       This routine Set the the watch dog mode.
+*
+* INPUTS:
+*       en - GT_TRUE enables, GT_FALSE disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetWatchDog
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetWatchDog Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_WATCHDOG_EVENT)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the WatchDog bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,7,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetWatchDog
+*
+* DESCRIPTION:
+*       This routine Get the the watch dog mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE enables, GT_FALSE disable.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetWatchDog
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetWatchDog Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (IS_IN_DEV_GROUP(dev,DEV_ENHANCED_FE_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Get the WatchDog bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,7,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine sets the full duplex pause src Mac Address.
+*        MAC address should be an Unicast address.
+*        For different MAC Addresses per port operation,
+*        use gsysSetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetDuplexPauseMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetDuplexPauseMac Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        return writeSwitchMacReg(dev,mac);
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_MACADDR_01;
+  /* Set the first Mac register with diffAddr bit reset.  */
+      data = (((*mac).arEther[0] & 0xFE) << 8) | (*mac).arEther[1];
+      regAccess.rw_reg_list[0].data = data;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_MACADDR_23;
+  /* Set the Mac23 address register.   */
+      data = ((*mac).arEther[2] << 8) | (*mac).arEther[3];
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_MACADDR_45;
+  /* Set the Mac45 address register.   */
+      data = ((*mac).arEther[4] << 8) | (*mac).arEther[5];
+      regAccess.rw_reg_list[2].data = data;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    /* Set the first Mac register with diffAddr bit reset.  */
+    data = (((*mac).arEther[0] & 0xFE) << 8) | (*mac).arEther[1];
+    retVal = hwWriteGlobalReg(dev,QD_REG_MACADDR_01,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* Set the Mac23 address register.   */
+    data = ((*mac).arEther[2] << 8) | (*mac).arEther[3];
+    retVal = hwWriteGlobalReg(dev,QD_REG_MACADDR_23,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* Set the Mac45 address register.   */
+    data = ((*mac).arEther[4] << 8) | (*mac).arEther[5];
+    retVal = hwWriteGlobalReg(dev,QD_REG_MACADDR_45,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine Gets the full duplex pause src Mac Address.
+*        For different MAC Addresses per port operation,
+*        use gsysGetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetDuplexPauseMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+#ifndef GT_RMGMT_ACCESS
+    GT_U16          data;           /* Data to read from register.  */
+#endif
+
+    DBG_INFO(("gsysGetDuplexPauseMac Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        return readSwitchMacReg(dev,mac);
+    }
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_MACADDR_01;
+      regAccess.rw_reg_list[0].data = 0;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_MACADDR_23;
+      regAccess.rw_reg_list[1].data = 0;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_MACADDR_45;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      (*mac).arEther[0] = qdLong2Char((regAccess.rw_reg_list[0].data >> 8)) & ~0x01;
+      (*mac).arEther[1] = qdLong2Char(regAccess.rw_reg_list[0].data & 0xFF);
+      (*mac).arEther[2] = qdLong2Char(regAccess.rw_reg_list[1].data >> 8);
+      (*mac).arEther[3] = qdLong2Char(regAccess.rw_reg_list[1].data & 0xFF);
+      (*mac).arEther[4] = qdLong2Char(regAccess.rw_reg_list[2].data >> 8);
+      (*mac).arEther[5] = qdLong2Char(regAccess.rw_reg_list[2].data & 0xFF);
+    }
+#else
+    /* Get the Mac01 register.      */
+    retVal = hwReadGlobalReg(dev,QD_REG_MACADDR_01,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    (*mac).arEther[0] = (data >> 8) & ~0x01;
+    (*mac).arEther[1] = data & 0xFF;
+    /* Get the Mac23 register.      */
+    retVal = hwReadGlobalReg(dev,QD_REG_MACADDR_23,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    (*mac).arEther[2] = data >> 8;
+    (*mac).arEther[3] = data & 0xFF;
+
+    /* Get the Mac45 register.      */
+    retVal = hwReadGlobalReg(dev,QD_REG_MACADDR_45,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    (*mac).arEther[4] = data >> 8;
+    (*mac).arEther[5] = data & 0xFF;
+#endif
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysSetPerPortDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine sets whether the full duplex pause src Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPerPortDuplexPauseMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL      en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetPerPortDuplexPauseMac Called.\n"));
+    BOOL_2_BIT(en,data);
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        retVal = writeDiffMAC(dev,data);
+    }
+    else
+    {
+      if (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+        retVal = writeDiffMAC(dev,data);
+      else
+        retVal = hwSetGlobalRegField(dev,QD_REG_MACADDR_01,8,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetPerPortDuplexPauseMac
+*
+* DESCRIPTION:
+*       This routine Gets whether the full duplex pause src Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPerPortDuplexPauseMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetPerPortDuplexPauseMac Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        retVal = readDiffMAC(dev,&data);
+    }
+    else
+    {
+      if (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH))
+        retVal = readDiffMAC(dev,&data);
+      else
+        retVal = hwGetGlobalRegField(dev,QD_REG_MACADDR_01,8,1,&data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetPortWakeonFrameEn
+*
+* DESCRIPTION:
+*       This routine sets port interrupt for wake on frame.
+*
+* INPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPortWakeonFrameEn
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        portVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetPortWakeonFrame Called.\n"));
+
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    retVal = writeSwMacWolWofReg(dev, 0x0e, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetPortWakeonFrameEn
+*
+* DESCRIPTION:
+*       This routine gets port interrupt enable for wake on frame.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPortWakeonFrameEn
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_U8       *portVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysGetPortWakeonFrameEn Called.\n"));
+
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    retVal = readSwMacWolWofReg(dev, 0x0e, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetPortWakeonFrameEnSt
+*
+* DESCRIPTION:
+*       This routine gets port interrupt status for wake on frame.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       portVec - combine port interrupt enable=1 disable=0:
+*                 port 0: bit0, port 1: bit1, port 2: bit2, ...
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPortWakeonFrameEnSt
+(
+    IN GT_QD_DEV    *dev,
+    OUT GT_U8       *portVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysGetPortWakeonFrameEnSt Called.\n"));
+
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    retVal = readSwMacWolWofReg(dev, 0x0c, portVec);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetWoLMac
+*
+* DESCRIPTION:
+*       This routine sets the Wake on Lan Mac Address.
+*        MAC address should be an Unicast address.
+*        For different MAC Addresses per port operation,
+*        use gsysSetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetWoLMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+)
+{
+    DBG_INFO(("gsysSetWoLMac Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    return writeSwitchMacWolReg(dev, 0x10, mac);
+
+}
+
+/*******************************************************************************
+* gsysGetWoLMac
+*
+* DESCRIPTION:
+*       This routine Gets the Wake on Lan Mac Address.
+*        For different MAC Addresses per port operation,
+*        use gsysGetPerPortDuplexPauseMac API.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetWoLMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+)
+{
+    DBG_INFO(("gsysGetWoLMac Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    return readSwitchMacWolReg(dev, 0x10, mac);
+}
+
+/*******************************************************************************
+* gsysSetPerPortWoLMac
+*
+* DESCRIPTION:
+*       This routine sets whether the Wake on Lan Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetPerPortWoLMac
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL      en
+)
+{
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetPerPortWoLMac Called.\n"));
+    BOOL_2_BIT(en,data);
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    return writeDiffMACWoL(dev, 0x10, data);
+}
+
+/*******************************************************************************
+* gsysGetPerPortWoLMac
+*
+* DESCRIPTION:
+*       This routine Gets whether the Wake on Lanc Mac Address is per
+*       port or per device.
+*
+* INPUTS:
+*       en - GT_TURE per port mac, GT_FALSE global mac.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPerPortWoLMac
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetPerPortWoLMac Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_MAC_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+
+    }
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+
+    retVal = readDiffMACWoL(dev, 0x10, &data);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetWoLPass
+*
+* DESCRIPTION:
+*       This routine sets the Wake on Lan Password Mac Address.
+*
+* INPUTS:
+*       mac - The Mac address to be set.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetWoLPass
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+)
+{
+    DBG_INFO(("gsysSetWoLPass Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* if the device has Switch MAC Register, we need the special operation */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    return writeSwitchMacWolReg(dev, 0x16, mac);
+
+}
+
+/*******************************************************************************
+* gsysGetWoLPass
+*
+* DESCRIPTION:
+*       This routine Gets the Wake on Lan password Mac Address.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mac - the Mac address.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetWoLPass
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+)
+{
+    DBG_INFO(("gsysGetWoLPass Called.\n"));
+    if(mac == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_SWITCH_WOL_WOF_REG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    return readSwitchMacWolReg(dev, 0x16, mac);
+}
+
+
+/*******************************************************************************
+* gsysReadMiiReg
+*
+* DESCRIPTION:
+*       This routine reads QuarterDeck Registers. Since this routine is only for
+*        Diagnostic Purpose, no error checking will be performed.
+*        User has to know which phy address(0 ~ 0x1F) will be read.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysReadMiiReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         phyAddr,
+    IN  GT_U32         regAddr,
+    OUT GT_U32         *data
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          u16Data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysReadMiiRegister Called.\n"));
+
+    /* Get the register data */
+    retVal = hwReadMiiReg(dev,(GT_U8)phyAddr,(GT_U8)regAddr,&u16Data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *data = (GT_U32)u16Data;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysWriteMiiReg
+*
+* DESCRIPTION:
+*       This routine writes QuarterDeck Registers. Since this routine is only for
+*        Diagnostic Purpose, no error checking will be performed.
+*        User has to know which phy address(0 ~ 0x1F) will be read.
+*
+* INPUTS:
+*       phyAddr - Phy Address to read the register for.( 0 ~ 0x1F )
+*       regAddr - The register's address.
+*
+* OUTPUTS:
+*       data    - The read register's data.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysWriteMiiReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         phyAddr,
+    IN  GT_U32         regAddr,
+    IN  GT_U16         data
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysWriteMiiRegister Called.\n"));
+
+    /* Set the register data */
+    retVal = hwWriteMiiReg(dev,(GT_U8)phyAddr,(GT_U8)regAddr,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    return GT_OK;
+}
+#ifdef GT_RMGMT_ACCESS
+/*******************************************************************************
+* gsysAccessMultiRegs
+*
+* DESCRIPTION:
+*       This function accesses switch's registers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysAccessMultiRegs
+(
+    IN  GT_QD_DEV    *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysAccessMultiReg Called.\n"));
+    /* Set the register data */
+    retVal = hwAccessMultiRegs(dev,regList);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+    return GT_OK;
+}
+#endif
+
+/*******************************************************************************
+* gsysSetRetransmitMode
+*
+* DESCRIPTION:
+*       This routine set the Retransmit Mode.
+*
+* INPUTS:
+*       en - GT_TRUE Retransimt Mode is enabled, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRetransmitMode
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL      en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetRetransmitMode Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    BOOL_2_BIT(en,data);
+
+    /* Set the Retransmit Mode bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,15,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetRetransmitMode
+*
+* DESCRIPTION:
+*       This routine get the Retransmit Mode.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE Retransmit Mode is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRetransmitMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRetransmitMode Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* Get the bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetLimitBackoff
+*
+* DESCRIPTION:
+*       This routine set the Limit Backoff bit.
+*
+* INPUTS:
+*       en - GT_TRUE:  uses QoS half duplex backoff operation
+*            GT_FALSE: uses normal half duplex backoff operation
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetLimitBackoff
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL      en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetLimitBackoff Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    BOOL_2_BIT(en,data);
+
+    /* Set the bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,14,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetLimitBackoff
+*
+* DESCRIPTION:
+*       This routine set the Limit Backoff bit.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE:  uses QoS half duplex backoff operation
+*            GT_FALSE: uses normal half duplex backoff operation
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetLimitBackoff
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetLimitBackoff Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+    /* Only Gigabit Switch supports this status. */
+    if ((IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MELODY_SWITCH)))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,14,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRsvReqPri
+*
+* DESCRIPTION:
+*       This routine set the Reserved Queue's Requesting Priority
+*
+* INPUTS:
+*       en - GT_TRUE: use the last received frome's priority
+*            GT_FALSE:use the last switched frame's priority
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvReqPri
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL      en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetRsvReqPri Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    BOOL_2_BIT(en,data);
+
+    /* Set the bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetRsvReqPri
+*
+* DESCRIPTION:
+*       This routine get the Reserved Queue's Requesting Priority
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE: use the last received frome's priority
+*            GT_FALSE:use the last switched frame's priority
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvReqPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRsvReqPri Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+    /* Only Gigabit Switch supports this status. */
+    if (IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetCascadePort
+*
+* DESCRIPTION:
+*        This routine sets Cascade Port number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip.
+*
+*        Use Cascade Port = 0xE to indicate this chip has no Cascade port.
+*        Use Cascade Port = 0xF to use Routing table (gsysGetDevRoutingTable).
+*
+* INPUTS:
+*        port - Cascade Port
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetCascadePort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysSetCascadePort Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    if((port == 0xE) || (port == 0xF))
+        data = (GT_U16)port;
+    else
+    {
+        data = (GT_U16)(GT_LPORT_2_PORT(port));
+        if (data == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set the Cascade port.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,4,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCascadePort
+*
+* DESCRIPTION:
+*        This routine gets Cascade Port number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip.
+*
+*        Use Cascade Port = 0xE to indicate this chip has no Cascade port.
+*        Use Cascade Port = 0xF to use Routing table (gsysGetDevRoutingTable).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - Cascade Port
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetCascadePort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT     *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysSetCascadePort Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CASCADE_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Cascade port.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,4,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if((data == 0xE) || (data == 0xF))
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetDeviceNumber
+*
+* DESCRIPTION:
+*        This routine sets Device Number.
+*        In multichip systems frames coming from a CPU need to know when they
+*        have reached their destination chip. From CPU frames whose Dev_Num
+*        fieldmatches these bits have reachedtheir destination chip and are sent
+*        out this chip using the port number indicated in the frame's Trg_Port
+*        field.
+*
+* INPUTS:
+*        devNum - Device Number (0 ~ 31)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDeviceNumber
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          devNum
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysSetDeviceNumber Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = ((GT_U16)devNum) & 0x1F; /* only 5 bits are valid */
+
+    /* Set the Device Number.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,0,5,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetDeviceNumber
+*
+* DESCRIPTION:
+*        This routine gets Device Number.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        devNum - Device Number (0 ~ 31)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDeviceNumber
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32      *devNum
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetDeviceNumber Called.\n"));
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Device Number.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,0,5,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *devNum = (GT_U32)data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetCoreTagType
+*
+* DESCRIPTION:
+*        This routine sets Ether Core Tag Type.
+*        This Ether Type is added to frames that egress the switch as Double Tagged
+*        frames. It is also the Ether Type expected during Ingress to determine if
+*        a frame is Tagged or not on ports configured as UseCoreTag mode.
+*
+* INPUTS:
+*        etherType - Core Tag Type (2 bytes)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetCoreTagType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16          etherType
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetCoreTagType Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CORE_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set the Ether Type */
+    retVal = hwWriteGlobalReg(dev,QD_REG_CORETAG_TYPE,etherType);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCoreTagType
+*
+* DESCRIPTION:
+*        This routine gets CoreTagType
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        etherType - Core Tag Type (2 bytes)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetCoreTagType
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *etherType
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetCoreTagType Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CORE_TAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the Device Number.                */
+    retVal = hwReadGlobalReg(dev,QD_REG_CORETAG_TYPE,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *etherType = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetIngressMonitorDest
+*
+* DESCRIPTION:
+*        This routine sets Ingress Monitor Destination Port. Frames that are
+*        targeted toward an Ingress Monitor Destination go out the port number
+*        indicated in these bits. This includes frames received on a Marvell Tag port
+*        with the Ingress Monitor type, and frames received on a Network port that
+*        is enabled to be the Ingress Monitor Source Port.
+*        If the Ingress Monitor Destination Port resides in this device these bits
+*        should point to the Network port where these frames are to egress. If the
+*        Ingress Monitor Destination Port resides in another device these bits
+*        should point to the Marvell Tag port in this device that is used to get
+*        to the device that contains the Ingress Monitor Destination Port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetIngressMonitorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetIngressMonitorDest Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    if(port == 0xF)
+        hwPort = (GT_U8)port;
+    else
+    {
+        hwPort = (GT_U8)GT_LPORT_2_PORT(port);
+        if (hwPort == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set the Ether Type */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 12, 4, (GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetIngressMonitorDest
+*
+* DESCRIPTION:
+*        This routine gets Ingress Monitor Destination Port.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetIngressMonitorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetIngressMonitorDest Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the IngressMonitorDest. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 12, 4, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0xF)
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetEgressMonitorDest
+*
+* DESCRIPTION:
+*        This routine sets Egress Monitor Destination Port. Frames that are
+*        targeted toward an Egress Monitor Destination go out the port number
+*        indicated in these bits. This includes frames received on a Marvell Tag port
+*        with the Egress Monitor type, and frames transmitted on a Network port that
+*        is enabled to be the Egress Monitor Source Port.
+*        If the Egress Monitor Destination Port resides in this device these bits
+*        should point to the Network port where these frames are to egress. If the
+*        Egress Monitor Destination Port resides in another device these bits
+*        should point to the Marvell Tag port in this device that is used to get
+*        to the device that contains the Egress Monitor Destination Port.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetEgressMonitorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetEgressMonitorDest Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(port == 0xF)
+        hwPort = (GT_U8)port;
+    else
+    {
+        hwPort = (GT_U8)GT_LPORT_2_PORT(port);
+        if (hwPort == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set EgressMonitorDest */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 8, 4, (GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetEgressMonitorDest
+*
+* DESCRIPTION:
+*        This routine gets Egress Monitor Destination Port.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetEgressMonitorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetEgressMonitorDest Called.\n"));
+
+    /* Only Gigabit Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_88E6093_FAMILY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the EgressMonitorDest. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 8, 4, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0xF)
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetARPDest
+*
+* DESCRIPTION:
+*        This routine sets ARP Monitor Destination Port. Tagged or untagged
+*        frames ingress Network ports that have the Broadcast Destination Address
+*        with an Ethertype of 0x0806 are mirrored to this port. The ARPDest
+*        should point to the port that directs these frames to the switch's CPU
+*        that will process ARPs. This target port should be a Marvell Tag port so
+*        that frames will egress with a To CPU Marvell Tag with a CPU Code of ARP.
+*        To CPU Marvell Tag frames with a CPU Code off ARP that ingress a Marvell
+*        Tag port will be sent to the port number defineded in ARPDest.
+*
+*        If ARPDest =  0xF, ARP Monitoring is disabled and ingressing To CPU ARP
+*        frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetARPDest Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_DEST_SUPPORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    if(port == 0xF)
+        hwPort = (GT_U8)port;
+    else
+    {
+        hwPort = (GT_U8)(GT_LPORT_2_PORT(port));
+        if (hwPort == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 4, 4, (GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetARPDest
+*
+* DESCRIPTION:
+*        This routine gets ARP Monitor Destination Port. Tagged or untagged
+*        frames ingress Network ports that have the Broadcast Destination Address
+*        with an Ethertype of 0x0806 are mirrored to this port. The ARPDest
+*        should point to the port that directs these frames to the switch's CPU
+*        that will process ARPs. This target port should be a Marvell Tag port so
+*        that frames will egress with a To CPU Marvell Tag with a CPU Code of ARP.
+*        To CPU Marvell Tag frames with a CPU Code off ARP that ingress a Marvell
+*        Tag port will be sent to the port number defineded in ARPDest.
+*
+*        If ARPDest =  0xF, ARP Monitoring is disabled and ingressing To CPU ARP
+*        frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetARPDest Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_DEST_SUPPORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 4, 4, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0xF)
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRsvd2CpuEnables
+*
+* DESCRIPTION:
+*        Reserved DA Enables. When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one,
+*        the 16 reserved multicast DA addresses, whose bit in this register are
+*        also set to a one, are treadted as MGMT frames. All the reserved DA's
+*        take the form 01:80:C2:00:00:0x. When x = 0x0, bit 0 of this register is
+*        tested. When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2CpuEnables
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        enBits
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetRsvd2CpuEnables Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_MGMT_ENABLE, (GT_U16)enBits);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRsvd2CpuEnables
+*
+* DESCRIPTION:
+*        Reserved DA Enables. When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one,
+*        the 16 reserved multicast DA addresses, whose bit in this register are
+*        also set to a one, are treadted as MGMT frames. All the reserved DA's
+*        take the form 01:80:C2:00:00:0x. When x = 0x0, bit 0 of this register is
+*        tested. When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2CpuEnables
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *enBits
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysGetRsvd2CpuEnables Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related register */
+    retVal = hwReadGlobal2Reg(dev, QD_REG_MGMT_ENABLE, enBits);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRsvd2Cpu
+*
+* DESCRIPTION:
+*        When the Rsvd2Cpu is set to a one(GT_TRUE), frames with a Destination
+*        Address in the range 01:80:C2:00:00:0x, regardless of their VLAN
+*        membership, will be considered MGMT frames and sent to the CPU Port.
+*        If device supports Rsvd2CpuEnable (gsysSetRsvd2CpuEnable function),
+*        the frame will be considered MGMT frame when the associated Rsvd2CpuEnable
+*        bit for the frames's DA is also set to a one.
+*
+* INPUTS:
+*        en - GT_TRUE if Rsvd2Cpu is set. GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2Cpu
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetRsvd2Cpu Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MULTICAST))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    if (IS_IN_DEV_GROUP(dev,DEV_MULTICAST))
+    {
+        retVal = hwSetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,3,1, data);
+    }
+    else
+    {
+        retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 3, 1, data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRsvd2Cpu
+*
+* DESCRIPTION:
+*        When the Rsvd2Cpu is set to a one(GT_TRUE), frames with a Destination
+*        Address in the range 01:80:C2:00:00:0x, regardless of their VLAN
+*        membership, will be considered MGMT frames and sent to the CPU Port.
+*        If device supports Rsvd2CpuEnable (gsysSetRsvd2CpuEnable function),
+*        the frame will be considered MGMT frame when the associated Rsvd2CpuEnable
+*        bit for the frames's DA is also set to a one.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Rsvd2Cpu is set. GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2Cpu
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetRsvd2Cpu Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_MULTICAST))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    if (IS_IN_DEV_GROUP(dev,DEV_MULTICAST))
+    {
+        retVal = hwGetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,3,1,&data);
+    }
+    else
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,3,1,&data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetMGMTPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on Rsvd2CPU frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetMGMTPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetMGMTPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 0, 3, pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetMGMTPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on Rsvd2CPU frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetMGMTPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    DBG_INFO(("gsysGetMGMTPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,0,3,pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetUseDoubleTagData
+*
+* DESCRIPTION:
+*        This bit is used to determine if Double Tag data that is removed from a
+*        Double Tag frame is used or ignored when making switching decisions on
+*        the frame.
+*
+* INPUTS:
+*        en - GT_TRUE to use removed tag data, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetUseDoubleTagData
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetUseDoubleTagData Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_LOOP_BLOCK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 15, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetUseDoubleTagData
+*
+* DESCRIPTION:
+*        This bit is used to determine if Double Tag data that is removed from a
+*        Double Tag frame is used or ignored when making switching decisions on
+*        the frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if removed tag data is used, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetUseDoubleTagData
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetUseDoubleTagData Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_LOOP_BLOCK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetPreventLoops
+*
+* DESCRIPTION:
+*        When a Marvell Tag port receives a Forward Marvell Tag whose Src_Dev
+*        field equals this device's Device Number, the following action will be
+*        taken depending upon the value of this bit.
+*        GT_TRUE (1) - The frame will be discarded.
+*        GT_FALSE(0) - The frame will be prevented from going out its original
+*                        source port as defined by the frame's Src_Port field.
+*
+* INPUTS:
+*        en - GT_TRUE to discard the frame as described above, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetPreventLoops
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetPreventLoops Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_LOOP_BLOCK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 14, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetPreventLoops
+*
+* DESCRIPTION:
+*        When a Marvell Tag port receives a Forward Marvell Tag whose Src_Dev
+*        field equals this device's Device Number, the following action will be
+*        taken depending upon the value of this bit.
+*        GT_TRUE (1) - The frame will be discarded.
+*        GT_FALSE(0) - The frame will be prevented from going out its original
+*                        source port as defined by the frame's Src_Port field.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to discard the frame as described above, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPreventLoops
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetPreventLoops Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_LOOP_BLOCK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,14,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetFlowControlMessage
+*
+* DESCRIPTION:
+*        When this bit is set to one, Marvell Tag Flow Control messages will be
+*        generated when an output queue becomes congested and received Marvell Tag
+*        Flow Control messages will pause MACs inside this device. When this bit
+*        is cleared to a zero Marvell Tag Flow Control messages will not be
+*        generated and any received will be ignored at the target MAC.
+*
+* INPUTS:
+*        en - GT_TRUE to use Marvell Tag Flow Control message, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFlowControlMessage
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetFlowControlMessage Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 13, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetFlowControlMessage
+*
+* DESCRIPTION:
+*        When this bit is set to one, Marvell Tag Flow Control messages will be
+*        generated when an output queue becomes congested and received Marvell Tag
+*        Flow Control messages will pause MACs inside this device. When this bit
+*        is cleared to a zero Marvell Tag Flow Control messages will not be
+*        generated and any received will be ignored at the target MAC.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use Marvell Tag Flow Control message, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFlowControlMessage
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetFlowControlMessage Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,13,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetForceFlowControlPri
+*
+* DESCRIPTION:
+*        When this bit is set to a one the PRI[2:0] bits of generated Marvell Tag
+*        Flow Control frames will be set to the value of the FC Pri bits (set by
+*        gsysSetFCPri function call). When this bit is cleared to a zero generated
+*        Marvell Tag Flow Control frames will retain the PRI[2:0] bits from the
+*        frames that caused the congestion. This bit will have no effect if the
+*        FlowControlMessage bit(gsysSetFlowControlMessage function call) is
+*        cleared to a zero.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceFlowControlPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetForceFlowControlPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 7, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetForceFlowControlPri
+*
+* DESCRIPTION:
+*        When this bit is set to a one the PRI[2:0] bits of generated Marvell Tag
+*        Flow Control frames will be set to the value of the FC Pri bits (set by
+*        gsysSetFCPri function call). When this bit is cleared to a zero generated
+*        Marvell Tag Flow Control frames will retain the PRI[2:0] bits from the
+*        frames that caused the congestion. This bit will have no effect if the
+*        FlowControlMessage bit(gsysSetFlowControlMessage function call) is
+*        cleared to a zero.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceFlowControlPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetForceFlowControlPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,7,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetFCPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on generated Marvell Tag Flow
+*        Control frames if the ForceFlowControlPri bit(gsysSetForceFlowControlPri)
+*        is set to a one.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFCPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetFCPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 4, 3, pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetFCPri
+*
+* DESCRIPTION:
+*        These bits are used as the PRI[2:0] bits on generated Marvell Tag Flow
+*        Control frames if the ForceFlowControlPri bit(gsysSetForceFlowControlPri)
+*        is set to a one.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFCPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    DBG_INFO(("gsysGetFCPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MARVELL_TAG_FLOW_CTRL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,4,3,pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetFlowCtrlDelay
+*
+* DESCRIPTION:
+*        This function sets Flow control delay time for 10Mbps, 100Mbps, and
+*        1000Mbps.
+*
+* INPUTS:
+*        sp - PORT_SPEED_10_MBPS, PORT_SPEED_100_MBPS, or PORT_SPEED_1000_MBPS
+*        delayTime - delay time.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*    Actual delay time will be delayTime x 2.048uS (or x 8.192uS) depending on
+*    switch device. Please refer to the device datasheet for detailed information.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFlowCtrlDelay
+(
+    IN GT_QD_DEV            *dev,
+    IN GT_PORT_SPEED_MODE    sp,
+    IN GT_U32                delayTime
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetFlowCtrlDelay Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FLOW_CTRL_DELAY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_FLOWCTRL_DELAY;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_FLOWCTRL_DELAY,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+#endif
+
+    switch(sp)
+    {
+        case PORT_SPEED_10_MBPS:
+                data = 0;
+                break;
+        case PORT_SPEED_100_MBPS:
+                data = 1 << 13;
+                break;
+        case PORT_SPEED_1000_MBPS:
+                data = 2 << 13;
+                break;
+        default:
+                DBG_INFO(("GT_BAD_PARAM (sp)\n"));
+                gtSemGive(dev,dev->tblRegsSem);
+                return GT_BAD_PARAM;
+    }
+
+    if (delayTime > 0x1FFF)
+    {
+        DBG_INFO(("GT_BAD_PARAM (delayTime)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    data |= (GT_U16)(0x8000 | delayTime);
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_FLOWCTRL_DELAY,data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetFlowCtrlDelay
+*
+* DESCRIPTION:
+*        This function retrieves Flow control delay time for 10Mbps, 100Mbps, and
+*        1000Mbps.
+*
+* INPUTS:
+*        sp - PORT_SPEED_10_MBPS, PORT_SPEED_100_MBPS, or PORT_SPEED_1000_MBPS
+*
+* OUTPUTS:
+*        delayTime - delay time
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*    Actual delay time will be delayTime x 2.048uS (or x 8.192uS) depending on
+*    switch device. Please refer to the device datasheet for detailed information.
+*
+*******************************************************************************/
+GT_STATUS gsysGetFlowCtrlDelay
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_PORT_SPEED_MODE    sp,
+    OUT GT_U32        *delayTime
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetFlowCtrlDelay Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FLOW_CTRL_DELAY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_FLOWCTRL_DELAY;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_FLOWCTRL_DELAY,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+#endif
+
+    switch(sp)
+    {
+        case PORT_SPEED_10_MBPS:
+                data = 0;
+                break;
+        case PORT_SPEED_100_MBPS:
+                data = 1 << 13;
+                break;
+        case PORT_SPEED_1000_MBPS:
+                data = 2 << 13;
+                break;
+        default:
+                DBG_INFO(("GT_BAD_PARAM (sp)\n"));
+                gtSemGive(dev,dev->tblRegsSem);
+                return GT_BAD_PARAM;
+    }
+
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_FLOWCTRL_DELAY,data);
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_FLOWCTRL_DELAY,&data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *delayTime = (GT_U32)(data & 0x1FFF);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetDevRoutingTable
+*
+* DESCRIPTION:
+*        This function sets Device to Port mapping (which device is connected to
+*        which port of this device).
+*
+* INPUTS:
+*        devNum - target device number.
+*        portNum - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetDevRoutingTable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          devNum,
+    IN GT_LPORT     port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetDevRoutingTable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STACKING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    if(devNum > 0x1F)
+    {
+        DBG_INFO(("GT_BAD_PARAM (devNum)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_ROUTING_TBL;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_ROUTING_TBL;
+      /* translate LPORT to hardware port */
+      if(port >= dev->numOfPorts)
+      {
+        hwPort = 0xF;
+      }
+      else
+      {
+        hwPort = GT_LPORT_2_PORT(port);
+      }
+      data = (GT_U16)(0x8000 | (devNum << 8) | hwPort);
+      regAccess.rw_reg_list[1].data = data;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_ROUTING_TBL,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+
+    /* translate LPORT to hardware port */
+    if(port >= dev->numOfPorts)
+    {
+        hwPort = 0xF;
+    }
+    else
+    {
+        hwPort = GT_LPORT_2_PORT(port);
+    }
+
+    data = (GT_U16)(0x8000 | (devNum << 8) | hwPort);
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_ROUTING_TBL,data);
+#endif
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetDevRoutingTable
+*
+* DESCRIPTION:
+*        This function gets Device to Port mapping (which device is connected to
+*        which port of this device).
+*
+* INPUTS:
+*        devNum - target device number.
+*
+* OUTPUTS:
+*        portNum - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sp is not valid or delayTime is > 0x1FFF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetDevRoutingTable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         devNum,
+    OUT GT_LPORT     *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetDevRoutingTable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_STACKING))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    if(devNum > 0x1F)
+    {
+        DBG_INFO(("GT_BAD_PARAM (devNum)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_ROUTING_TBL;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_ROUTING_TBL;
+      data = (GT_U16)(devNum << 8);
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_ROUTING_TBL;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[2].data);
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_ROUTING_TBL,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+
+    data = (GT_U16)(devNum << 8);
+
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_ROUTING_TBL,data);
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_ROUTING_TBL,&data);
+
+#endif
+    gtSemGive(dev,dev->tblRegsSem);
+
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    *port = GT_PORT_2_LPORT((GT_U8)(data & 0xF));
+    if(*port == GT_INVALID_PORT)
+    {
+        *port = 0xF;
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetTrunkMaskTable
+*
+* DESCRIPTION:
+*        This function sets Trunk Mask for the given Trunk Number.
+*
+* INPUTS:
+*        trunkNum - Trunk Number.
+*        trunkMask - Trunk mask bits. Bit 0 controls trunk masking for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkNum > 0x7 for 88E6095 and 88E6183 family and
+*                       if trunkNum > 0x3 for 88E6065 family.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetTrunkMaskTable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          trunkNum,
+    IN GT_U32        trunkMask
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U32            mask;
+
+    DBG_INFO(("gsysSetTrunkMaskTable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRUNK)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_TRUNK_MASK_TBL;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_TRUNK_MASK_TBL;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_MASK_TBL,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+#endif
+
+    data &= 0x0800;
+
+    if(trunkNum > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkNum)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    if((trunkNum > 0x3) && IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkNum)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    mask = (1 << dev->numOfPorts) - 1;
+
+    if(trunkMask > mask)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkMask)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    mask = GT_LPORTVEC_2_PORTVEC(trunkMask);
+
+    data = (GT_U16)(0x8000 | data | (trunkNum << 12) | mask);
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_TRUNK_MASK_TBL,data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetTrunkMaskTable
+*
+* DESCRIPTION:
+*        This function gets Trunk Mask for the given Trunk Number.
+*
+* INPUTS:
+*        trunkNum - Trunk Number.
+*
+* OUTPUTS:
+*        trunkMask - Trunk mask bits. Bit 0 controls trunk masking for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkNum > 0x7 for 88E6095 and 88E6183 family and
+*                       if trunkNum > 0x3 for 88E6065 family.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetTrunkMaskTable
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         trunkNum,
+    OUT GT_U32        *trunkMask
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U32            mask;
+
+    DBG_INFO(("gsysGetTrunkMaskTable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRUNK)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_TRUNK_MASK_TBL;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_TRUNK_MASK_TBL;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_MASK_TBL,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+#endif
+
+    data &= 0x0800;
+
+    if(trunkNum > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkId)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    if((trunkNum > 0x3) && IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkNum)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    data = (GT_U16)(data | (trunkNum << 12));
+
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_TRUNK_MASK_TBL,data);
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_MASK_TBL,&data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    mask = (1 << dev->maxPorts) - 1;
+
+    *trunkMask = GT_PORTVEC_2_LPORTVEC(data & mask);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetHashTrunk
+*
+* DESCRIPTION:
+*        Hash DA & SA for TrunkMask selection. Trunk load balancing is accomplished
+*        by using the frame's DA and SA fields to access one of eight Trunk Masks.
+*        When this bit is set to a one the hashed computed for address table
+*        lookups is used for the TrunkMask selection. When this bit is cleared to
+*        a zero the lower 3 bits of the frame's DA and SA are XOR'ed together to
+*        select the TrunkMask to use.
+*
+* INPUTS:
+*        en - GT_TRUE to use lookup table, GT_FALSE to use XOR.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetHashTrunk
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetHashTrunk Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRUNK)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_TRUNK_MASK_TBL, 11, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetHashTrunk
+*
+* DESCRIPTION:
+*        Hash DA & SA for TrunkMask selection. Trunk load balancing is accomplished
+*        by using the frame's DA and SA fields to access one of eight Trunk Masks.
+*        When this bit is set to a one the hashed computed for address table
+*        lookups is used for the TrunkMask selection. When this bit is cleared to
+*        a zero the lower 3 bits of the frame's DA and SA are XOR'ed together to
+*        select the TrunkMask to use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use lookup table, GT_FALSE to use XOR.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetHashTrunk
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetHashTrunk Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!((IS_IN_DEV_GROUP(dev,DEV_TRUNK)) ||
+        (IS_IN_DEV_GROUP(dev,DEV_REDUCED_TRUNK))))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_TRUNK_MASK_TBL,11,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetTrunkRouting
+*
+* DESCRIPTION:
+*        This function sets routing information for the given Trunk ID.
+*
+* INPUTS:
+*        trunkId - Trunk ID.
+*        trunkRoute - Trunk route bits. Bit 0 controls trunk routing for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId > 0xF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysSetTrunkRouting
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          trunkId,
+    IN GT_U32        trunkRoute
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U32            mask;
+    GT_U32            maxTrunk;
+
+    DBG_INFO(("gsysSetTrunkRouting Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_TRUNK_ROUTING;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_ROUTING,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+#endif
+
+    if (IS_IN_DEV_GROUP(dev,DEV_8_TRUNKING))
+        maxTrunk = 8;
+    else
+        maxTrunk = 16;
+
+    if(trunkId >= maxTrunk)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkId)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    mask = (1 << dev->numOfPorts) - 1;
+
+    if(trunkRoute > mask)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkRoute)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    mask = GT_LPORTVEC_2_PORTVEC(trunkRoute);
+
+    data = (GT_U16)(0x8000 | (trunkId << 11) | mask);
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_TRUNK_ROUTING,data);
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetTrunkRouting
+*
+* DESCRIPTION:
+*        This function retrieves routing information for the given Trunk ID.
+*
+* INPUTS:
+*        trunkId - Trunk ID.
+*
+* OUTPUTS:
+*        trunkRoute - Trunk route bits. Bit 0 controls trunk routing for port 0,
+*                    bit 1 for port 1 , etc.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if trunkId > 0xF.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetTrunkRouting
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         trunkId,
+    OUT GT_U32        *trunkRoute
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U32            mask;
+    GT_U32            maxTrunk;
+
+    DBG_INFO(("gsysGetTrunkRouting Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TRUNK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_8_TRUNKING))
+        maxTrunk = 8;
+    else
+        maxTrunk = 16;
+
+    if(trunkId >= maxTrunk)
+    {
+        DBG_INFO(("GT_BAD_PARAM (trunkId)\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return GT_BAD_PARAM;
+    }
+
+    /* Check if the register can be accessed. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_TRUNK_ROUTING;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_TRUNK_ROUTING;
+      data = (GT_U16)(trunkId << 11);
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_TRUNK_ROUTING;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[2].data);
+    }
+#else
+    do
+    {
+        retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_ROUTING,&data);
+        if(retVal != GT_OK)
+        {
+            DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    } while (data & 0x8000);
+
+    data = (GT_U16)(trunkId << 11);
+
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_TRUNK_ROUTING,data);
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_TRUNK_ROUTING,&data);
+#endif
+    gtSemGive(dev,dev->tblRegsSem);
+       if(retVal != GT_OK)
+    {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    mask = (1 << dev->maxPorts) - 1;
+
+    *trunkRoute = GT_PORTVEC_2_LPORTVEC(data & mask);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRateLimitMode
+*
+* DESCRIPTION:
+*        Ingress Rate Limiting can be either Priority based or Burst Size based.
+*        This routine sets which mode to use.
+*
+* INPUTS:
+*        mode - either GT_RATE_PRI_BASE or GT_RATE_BURST_BASE
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if invalid mode is used.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRateLimitMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_INGRESS_RATE_MODE mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetRateLimitMode Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_2))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    switch (mode)
+    {
+        case GT_RATE_PRI_BASE:
+            data = 0;
+            break;
+        case GT_RATE_BURST_BASE:
+            data = 1;
+            break;
+        default:
+            DBG_INFO(("Not supported mode %i\n",mode));
+            return GT_BAD_PARAM;
+    }
+
+    hwPort = 7;
+
+    /* Set related bit */
+    retVal = hwSetPortRegField(dev,hwPort, 0x1A, 15, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRateLimitMode
+*
+* DESCRIPTION:
+*        Ingress Rate Limiting can be either Priority based or Burst Size based.
+*        This routine gets which mode is being used.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - either GT_RATE_PRI_BASE or GT_RATE_BURST_BASE
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRateLimitMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_INGRESS_RATE_MODE *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysGetRateLimitMode Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_BURST_RATE))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_2))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    hwPort = 7;
+    data = 0;
+
+    /* Get related bit */
+    retVal = hwGetPortRegField(dev,hwPort, 0x1A, 15, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (data == 0)
+        *mode = GT_RATE_PRI_BASE;
+    else
+        *mode = GT_RATE_BURST_BASE;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetAgeInt
+*
+* DESCRIPTION:
+*        Enable/Disable Age Refresh Interrupt. If CPU Directed Learning is being
+*        used (gprtSetLockedPort), it may be desirable to know when an address is
+*        still being used before it totally ages out of the switch. This can be
+*        accomplished by enabling Age Refresh Interrupt (or ATU Age Violation Int).
+*        An ATU Age Violation looks identical to and reported the same as an ATU
+*        Miss Violation. The only difference is when this reported. Normal ATU Miss
+*        Violation only occur if a new SA arrives at a LockedPort. The Age version
+*        of the ATU Miss Violation occurs if an SA arrives at a LockedPort, where
+*        the address is contained in the ATU's database, but where its EntryState
+*        is less than 0x4 (i.e., it has aged more than 1/2 way).
+*        GT_ATU_PROB Interrupt should be enabled for this interrupt to occur.
+*        Refer to eventSetActive routine to enable GT_ATU_PROB.
+*
+*        If the device supports Refresh Locked feature (gprtSetRefreshLocked API),
+*        the feature must not be enabled for this Miss Violation to occur.
+*
+* INPUTS:
+*        en - GT_TRUE, to enable,
+*             GT_FALSE, otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetAgeInt
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetAgeInt Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_INTERRUPT))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_2))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    BOOL_2_BIT(en, data);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_AGE_INT_GLOBAL2))
+    {
+        retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 10, 1, data);
+    }
+    else
+    {
+        hwPort = 7;
+        /* Set related bit */
+        retVal = hwSetPortRegField(dev,hwPort, 0x1A, 14, 1, data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetAgeInt
+*
+* DESCRIPTION:
+*        Get state of Age Refresh Interrupt mode. If CPU Directed Learning is being
+*        used (gprtSetLockedPort), it may be desirable to know when an address is
+*        still being used before it totally ages out of the switch. This can be
+*        accomplished by enabling Age Refresh Interrupt (or ATU Age Violation Int).
+*        An ATU Age Violation looks identical to and reported the same as an ATU
+*        Miss Violation. The only difference is when this reported. Normal ATU Miss
+*        Violation only occur if a new SA arrives at a LockedPort. The Age version
+*        of the ATU Miss Violation occurs if an SA arrives at a LockedPort, where
+*        the address is contained in the ATU's database, but where its EntryState
+*        is less than 0x4 (i.e., it has aged more than 1/2 way).
+*        GT_ATU_PROB Interrupt should be enabled for this interrupt to occur.
+*        Refer to eventSetActive routine to enable GT_ATU_PROB.
+*
+*        If the device supports Refresh Locked feature (gprtSetRefreshLocked API),
+*        the feature must not be enabled for this Miss Violation to occur.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE, if enabled,
+*             GT_FALSE, otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetAgeInt
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysGetAgeInt Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_AGE_INTERRUPT))
+    {
+        if (!IS_IN_DEV_GROUP(dev,DEV_NEW_FEATURE_IN_REV) ||
+            ((GT_DEVICE_REV)dev->revision < GT_REV_2))
+        {
+            DBG_INFO(("GT_NOT_SUPPORTED\n"));
+            return GT_NOT_SUPPORTED;
+        }
+    }
+
+    data = 0;
+
+    if (IS_IN_DEV_GROUP(dev,DEV_AGE_INT_GLOBAL2))
+    {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT, 10, 1, &data);
+    }
+    else
+    {
+        hwPort = 7;
+        /* Get related bit */
+        retVal = hwGetPortRegField(dev,hwPort, 0x1A, 14, 1, &data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetForceSnoopPri
+*
+* DESCRIPTION:
+*        Force Snooping Priority. The priority on IGMP or MLD Snoop frames are
+*        set to the SnoopPri value (gsysSetSnoopPri API) when Force Snooping
+*       Priority is enabled. When it's disabled, the priority on these frames
+*        is not modified.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceSnoopPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetForceSnoopPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SNOOP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE, 7, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetForceSnoopPri
+*
+* DESCRIPTION:
+*        Force Snooping Priority. The priority on IGMP or MLD Snoop frames are
+*        set to the SnoopPri value (gsysSetSnoopPri API) when Force Snooping
+*       Priority is enabled. When it's disabled, the priority on these frames
+*        is not modified.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceSnoopPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetForceSnoopPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SNOOP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,7,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetSnoopPri
+*
+* DESCRIPTION:
+*        Snoop Priority. When ForceSnoopPri (gsysSetForceSnoopPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU Snoop frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on IGMP/MLD snoop frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetSnoopPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetSnoopPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SNOOP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE, 4, 3, pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetSnoopPri
+*
+* DESCRIPTION:
+*        Snoop Priority. When ForceSnoopPri (gsysSetForceSnoopPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU Snoop frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on IGMP/MLD snoop frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSnoopPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    DBG_INFO(("gsysGetSnoopPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SNOOP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,4,3,pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetForceARPPri
+*
+* DESCRIPTION:
+*        Force ARP Priority. The priority on ARP frames are set to the ARPPri
+*       value (gsysSetARPPri API) when Force ARP Priority is enabled. When it's
+*       disabled, the priority on these frames is not modified.
+*
+* INPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetForceARPPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetForceARPPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE, 3, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetForceARPPri
+*
+* DESCRIPTION:
+*        Force ARP Priority. The priority on ARP frames are set to the ARPPri
+*       value (gsysSetARPPri API) when Force ARP Priority is enabled. When it's
+*       disabled, the priority on these frames is not modified.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE to use defined PRI bits, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetForceARPPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetForceARPPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,3,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetARPPri
+*
+* DESCRIPTION:
+*        ARP Priority. When ForceARPPri (gsysSetForceARPPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU ARP frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on ARP frames.
+*
+* INPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - If pri is not less than 8.
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPPri
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetARPPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (pri > 0x7)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE, 0, 3, pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetARPPri
+*
+* DESCRIPTION:
+*        ARP Priority. When ForceARPPri (gsysSetForceARPPri API) is enabled,
+*       this priority is used as the egressing frame's PRI[2:0] bits on generated
+*       Marvell Tag To_CPU ARP frames and higher 2 bits of the priority are
+*       used as the internal Queue Priority to use on ARP frames.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        pri - PRI[2:0] bits (should be less than 8)
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPPri
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *pri
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    DBG_INFO(("gsysGetARPPri Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ARP_PRI))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_PRIORITY_OVERRIDE,0,3,pri);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetUsePortSchedule
+*
+* DESCRIPTION:
+*       This routine sets per port scheduling mode
+*
+* INPUTS:
+*       en - GT_TRUE enables per port scheduling,
+*             GT_FALSE disable.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetUsePortSchedule
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetWatchDog Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_SCHEDULE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the UsePortSchedule bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetUsePortSchedule
+*
+* DESCRIPTION:
+*       This routine gets per port scheduling mode
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE enables per port scheduling,
+*             GT_FALSE disable.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetUsePortSchedule
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                    /* register.                    */
+    DBG_INFO(("gsysSetWatchDog Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_PORT_SCHEDULE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the UsePortSchedule bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetOldHader
+*
+* DESCRIPTION:
+*       This routine sets Egress Old Header.
+*        When this feature is enabled and frames are egressed with a Marvell Header,
+*        the format of the Header is slightly modified to be backwards compatible
+*        with previous devices that used the original Header. Specifically, bit 3
+*        of the Header's 2nd octet is cleared to a zero such that only FPri[2:1]
+*        is available in the Header.
+*
+* INPUTS:
+*       en - GT_TRUE to enable Old Header Mode,
+*             GT_FALSE to disable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetOldHader
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetArpQPri Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_OLD_HEADER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the OldHader bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,5,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetOldHader
+*
+* DESCRIPTION:
+*       This routine gets Egress Old Header.
+*        When this feature is enabled and frames are egressed with a Marvell Header,
+*        the format of the Header is slightly modified to be backwards compatible
+*        with previous devices that used the original Header. Specifically, bit 3
+*        of the Header's 2nd octet is cleared to a zero such that only FPri[2:1]
+*        is available in the Header.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE to enable Old Header Mode,
+*             GT_FALSE to disable
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetOldHader
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetArpQPri Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_OLD_HEADER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the OldHader bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,5,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRecursiveStrippingDisable
+*
+* DESCRIPTION:
+*       This routine determines if recursive tag stripping feature needs to be
+*        disabled.
+*
+* INPUTS:
+*       en - GT_TRUE to disable Recursive Tag Stripping,
+*             GT_FALSE to enable
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRecursiveStrippingDisable
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_BOOL   en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetRecursiveStrippingDisable Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RECURSIVE_TAG_STRIP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the RecursiveStrippingDisable bit.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,15,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetRecursiveStrippingDisable
+*
+* DESCRIPTION:
+*       This routine checks if recursive tag stripping feature is disabled.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE, if Recursive Tag Stripping is disabled,
+*             GT_FALSE, otherwise
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRecursiveStrippingDisable
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_BOOL   *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetRecursiveStrippingDisable Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_RECURSIVE_TAG_STRIP))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the RecursiveStrippingDisable bit.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data, *en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetCPUPort
+*
+* DESCRIPTION:
+*       This routine sets CPU Port where Rsvd2Cpu frames and IGMP/MLD Snooped
+*        frames are destined.
+*
+* INPUTS:
+*       cpuPort - CPU Port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCPUPort
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT  cpuPort
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetCPUPort Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(cpuPort);
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (cpuPort >= dev->numOfPorts)
+    {
+        return GT_BAD_PARAM;
+    }
+
+    /* Set the CPU Port.            */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,0,3,(GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCPUPort
+*
+* DESCRIPTION:
+*       This routine gets CPU Port where Rsvd2Cpu frames and IGMP/MLD Snooped
+*        frames are destined.
+*
+* INPUTS:
+*       cpuPort - CPU Port
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCPUPort
+(
+    IN  GT_QD_DEV *dev,
+    OUT GT_LPORT  *cpuPort
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysGetCPUPort Called.\n"));
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the CPU Port.            */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MANGEMENT_CONTROL,0,3,&hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    /* translate hardware port to LPORT */
+    *cpuPort = (GT_LPORT)GT_PORT_2_LPORT((GT_U8)hwPort);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetCPUDest
+*
+* DESCRIPTION:
+*        This routine sets CPU Destination Port. CPU Destination port indicates the
+*        port number on this device where the CPU is connected (either directly or
+*        indirectly through another Marvell switch device).
+*
+*        Many modes of frame processing need to know where the CPU is located.
+*        These modes are:
+*        1. When IGMP/MLD frame is received and Snooping is enabled
+*        2. When the port is configured as a DSA port and it receives a To_CPU frame
+*        3. When a Rsvd2CPU frame enters the port
+*        4. When the port's SA Filtering mode is Drop to CPU
+*        5. When any of the port's Policy Options trap the frame to the CPU
+*        6. When the ingressing frame is an ARP and ARP mirroring is enabled in the
+*           device
+*
+*        In all cases, except for ARP, the frames that meet the enabled criteria
+*        are mapped to the CPU Destination port, overriding where the frame would
+*        normally go. In the case of ARP, the frame will be mapped normally and it
+*        will also get copied to this port.
+*        Frames that filtered or discarded will not be mapped to the CPU Destination
+*        port with the exception of the Rsvd2CPU and DSA Tag cases.
+*
+*        If CPUDest = 0xF, the remapped frames will be discarded, no ARP mirroring
+*        will occur and ingressing To_CPU frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCPUDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetCPUDest Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_DEST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    if(port == 0xF)
+        hwPort = (GT_U8)port;
+    else
+    {
+        hwPort = (GT_U8)(GT_LPORT_2_PORT(port));
+        if (hwPort == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 4, 4, (GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCPUDest
+*
+* DESCRIPTION:
+*        This routine gets CPU Destination Port. CPU Destination port indicates the
+*        port number on this device where the CPU is connected (either directly or
+*        indirectly through another Marvell switch device).
+*
+*        Many modes of frame processing need to know where the CPU is located.
+*        These modes are:
+*        1. When IGMP/MLD frame is received and Snooping is enabled
+*        2. When the port is configured as a DSA port and it receives a To_CPU frame
+*        3. When a Rsvd2CPU frame enters the port
+*        4. When the port's SA Filtering mode is Drop to CPU
+*        5. When any of the port's Policy Options trap the frame to the CPU
+*        6. When the ingressing frame is an ARP and ARP mirroring is enabled in the
+*           device
+*
+*        In all cases, except for ARP, the frames that meet the enabled criteria
+*        are mapped to the CPU Destination port, overriding where the frame would
+*        normally go. In the case of ARP, the frame will be mapped normally and it
+*        will also get copied to this port.
+*        Frames that filtered or discarded will not be mapped to the CPU Destination
+*        port with the exception of the Rsvd2CPU and DSA Tag cases.
+*
+*        If CPUDest = 0xF, the remapped frames will be discarded, no ARP mirroring
+*        will occur and ingressing To_CPU frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCPUDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetCPUDest Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_DEST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 4, 4, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0xF)
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetMirrorDest
+*
+* DESCRIPTION:
+*        This routine sets Mirror Destination Port. Frames that ingress a port
+*        that trigger a policy mirror are mapped (copied) to this port as long as
+*        the frame is not filtered or discarded.
+*        The Mirror Destination port should point to the port that directs these
+*        frames to the CPU that will process these frames. This target port should
+*        be a DSA Tag port so the frames will egress with a To_CPU DSA Tag with a
+*        CPU Code of Policy Mirror.
+*        To_CPU DSA Tag frames with a CPU Code of Policy Mirror that ingress a DSA
+*        Tag port will be sent to the port number defined in MirrorDest.
+*
+*        If MirrorDest = 0xF, Policy Mirroring is disabled and ingressing To_CPU
+*        Policy Mirror frames will be discarded.
+*
+* INPUTS:
+*        port  - the logical port number.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetMirrorDest
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT        port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U8           hwPort;         /* the physical port number     */
+
+    DBG_INFO(("gsysSetMirrorDest Called.\n"));
+
+    /* translate LPORT to hardware port */
+    hwPort = GT_LPORT_2_PORT(port);
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_MIRROR_DEST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    if(port == 0xF)
+        hwPort = (GT_U8)port;
+    else
+    {
+        hwPort = (GT_U8)(GT_LPORT_2_PORT(port));
+        if (hwPort == GT_INVALID_PORT)
+            return GT_BAD_PARAM;
+    }
+
+    /* Set related bit */
+    retVal = hwSetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 0, 4, (GT_U16)hwPort);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetMirrorDest
+*
+* DESCRIPTION:
+*        This routine gets Mirror Destination Port. Frames that ingress a port
+*        that trigger a policy mirror are mapped (copied) to this port as long as
+*        the frame is not filtered or discarded.
+*        The Mirror Destination port should point to the port that directs these
+*        frames to the CPU that will process these frames. This target port should
+*        be a DSA Tag port so the frames will egress with a To_CPU DSA Tag with a
+*        CPU Code of Policy Mirror.
+*        To_CPU DSA Tag frames with a CPU Code of Policy Mirror that ingress a DSA
+*        Tag port will be sent to the port number defined in MirrorDest.
+*
+*        If MirrorDest = 0xF, Policy Mirroring is disabled and ingressing To_CPU
+*        Policy Mirror frames will be discarded.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port  - the logical port number.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetMirrorDest
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT      *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetMirrorDest Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CPU_DEST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobalRegField(dev,QD_REG_MONITOR_CONTROL, 0, 4, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 0xF)
+    {
+        *port = (GT_LPORT)data;
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT((GT_U8)data);
+    }
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetRMPort
+*
+* DESCRIPTION:
+*        Remote Management feature is enabled only on one port. Since not all ports
+*        can be enabled for Remote Management feature, please refer to the device
+*        datasheet for detailed information.
+*        For example, 88E6097 device allows logical port 9 or 10, and 88E6047
+*        device allows logical port 4 and 5.
+*
+* INPUTS:
+*        port - Remote Management Port
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on unallowable port
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysSetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_LPORT     port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetRMPort Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (IS_IN_DEV_GROUP(dev,DEV_RMU_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* translate LPORT to hardware port */
+    switch(GT_LPORT_2_PORT(port))
+    {
+        case 9:
+                data = 0;
+                break;
+        case 10:
+                data = 1;
+                break;
+        default:
+                DBG_INFO(("Not Allowed Port.\n"));
+                return GT_BAD_PARAM;
+    }
+
+    /* Set the F2R port. */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,13,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRMPort
+*
+* DESCRIPTION:
+*        Remote Management feature is enabled only on one port. Since not all ports
+*        can be enabled for Remote Management feature, please refer to the device
+*        datasheet for detailed information.
+*        For example, 88E6097 device allows logical port 9 or 10, and 88E6047
+*        device allows logical port 4 and 5.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        port - Remote Management Port
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysGetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMPort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_LPORT     *port
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRMPort Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    if (IS_IN_DEV_GROUP(dev,DEV_RMU_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the F2R port.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,13,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if(data == 1)
+    {
+        *port = GT_PORT_2_LPORT(10);
+    }
+    else
+    {
+        *port = GT_PORT_2_LPORT(9);
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRMDACheck
+*
+* DESCRIPTION:
+*        Check the DA on Remote Management frames.
+*        When DA Check is enabled, the DA of Remote Management frames must be
+*        contained in this device's address database (ATU) as a Static entry
+*        (either unicast or multicast). If the DA of the frame is not contained
+*        in this device's address database, the frame will be not be processed as
+*        a Remote Management frame.
+*        When DA Check is disabled, the DA of Remote Management frames is not
+*        validated before processing the frame.
+*
+* INPUTS:
+*        en - GT_TRUE to enable DA Check,
+*             GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMDACheck
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetRMDACheck Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_DA_CHECK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_DA_CHECK_1))
+    {
+      /* Set the DA Check bit. */
+      retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,11,1,data);
+    }
+    else
+    {
+      /* Set the DA Check bit. */
+      retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,14,1,data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRMDACheck
+*
+* DESCRIPTION:
+*        Check the DA on Remote Management frames.
+*        When DA Check is enabled, the DA of Remote Management frames must be
+*        contained in this device's address database (ATU) as a Static entry
+*        (either unicast or multicast). If the DA of the frame is not contained
+*        in this device's address database, the frame will be not be processed as
+*        a Frame-to-Regter frame.
+*        When DA Check is disabled, the DA of Remote Management frames is not
+*        validated before processing the frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if DA Check is enabled,
+*             GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMDACheck
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRMDACheck Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (!IS_IN_DEV_GROUP(dev,DEV_DA_CHECK))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DA Check bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,14,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetHeaderType
+*
+* DESCRIPTION:
+*   To set Header Type. These bits are used to configure the bits that are placed
+*   into the Egress Header when it is enabled on a port (Port offset 0x04)
+*   as follows:
+*     00 = Original Header – for backwards compatibility to UniMAC’s that look at
+*          Header byte 1 bits[4:2] and byte 2 bits [3:0]
+*     01 = Single chip MGMT Header – for compatibility to Marvell Fast Ethernet
+*          switches that support Spanning Tree without DSA Tags
+*     10 = Trunk Header – used together with the DSA Tags to perform Remote Switching
+*     11 = Reserved for future use.
+*
+* INPUTS:
+*        hdType
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetHeaderType
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16       hdType
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetHeaderType Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_HEADER_TYPE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    data = hdType&0x3;
+    /* Set the DA Check bit. */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,14,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetHeaderType
+*
+* DESCRIPTION:
+*   To get Header Type. These bits are used to configure the bits that are placed
+*   into the Egress Header when it is enabled on a port (Port offset 0x04)
+*   as follows:
+*     00 = Original Header – for backwards compatibility to UniMAC’s that look at
+*          Header byte 1 bits[4:2] and byte 2 bits [3:0]
+*     01 = Single chip MGMT Header – for compatibility to Marvell Fast Ethernet
+*          switches that support Spanning Tree without DSA Tags
+*     10 = Trunk Header – used together with the DSA Tags to perform Remote Switching
+*     11 = Reserved for future use.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        hdType
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetHeaderType
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16     *hdType
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetHeaderType Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_HEADER_TYPE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the DA Check bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,14,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *hdType = data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRMEnable
+*
+* DESCRIPTION:
+*        Enable or disable Remote Management feature. This feature can be enabled
+*        only on one port (see gsysSetRMPort API).
+*
+* INPUTS:
+*        en - GT_TRUE to enable Remote Management feature,
+*             GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysSetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMEnable
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL         en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetRMEnable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set the F2R En bit. */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRMEnable
+*
+* DESCRIPTION:
+*        Enable or disable Remote Management feature. This feature can be enabled
+*        only on one port (see gsysSetRMPort API).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Remote Management feature is enabled,
+*             GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        Obsolete. Please uses gsysGetRMUMode API, instead.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMEnable
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL     *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRMEnable Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the F2R En bit.                */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRMUMode
+*
+* DESCRIPTION:
+*        Set Rmote Management Unit Mode: disable, enable on port 4, 5 or 6, or enable
+*        on port 9 or 10. Devices, such as 88E6097, support RMU on port 9 and 10,
+*        while other devices, such as 88E6165, support RMU on port 4, 5 and 6. So,
+*        please refer to the device datasheet for detail.
+*        When RMU is enabled and this device receives a Remote Management Request
+*        frame directed to this device, the frame will be processed and a Remote
+*        Management Response frame will be generated and sent out.
+*
+*        Note: enabling RMU has no effect if the Remote Management port is in half
+*        duplex mode. The port's FrameMode must be DSA or EtherType DSA as well.
+*
+* INPUTS:
+*        rmu - GT_RMU structure
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on bad parameter
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRMUMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_RMU        *rmu
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    GT_U16          port;
+
+    DBG_INFO(("gsysSetRMUMode Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMU_MODE))
+    {
+        if (rmu->rmuEn == GT_TRUE)
+        {
+            port = GT_LPORT_2_PORT(rmu->port);
+            if (port == GT_INVALID_PORT)
+                return GT_BAD_PARAM;
+
+            switch(port)
+            {
+                case 4:
+                    data = 1;
+                    break;
+                case 5:
+                    data = 2;
+                    break;
+                 case 6:
+                    if (IS_IN_DEV_GROUP(dev,DEV_88E6352_FAMILY))
+                    {
+                      data = 3;
+                      break;
+                    }
+                    return GT_BAD_PARAM;
+               default:
+                    return GT_BAD_PARAM;
+            }
+        }
+        else
+        {
+            data = 0;
+        }
+    }
+    else
+    {
+        if (rmu->rmuEn)
+        {
+            port = GT_LPORT_2_PORT(rmu->port);
+            if (port == GT_INVALID_PORT)
+                return GT_BAD_PARAM;
+
+            switch(port)
+            {
+                case 9:
+                    data = 1;
+                    break;
+                case 10:
+                    data = 3;
+                    break;
+                default:
+                    return GT_BAD_PARAM;
+            }
+        }
+        else
+        {
+            data = 0;
+        }
+    }
+
+    /* Set the RMUMode bit. */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,2,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRMUMode
+*
+* DESCRIPTION:
+*        Get Rmote Management Unit Mode: disable, enable on port 4, 5 or 6, or enable
+*        on port 9 or 10. Devices, such as 88E6097, support RMU on port 9 and 10,
+*        while other devices, such as 88E6165, support RMU on port 4, 5 and 6. So,
+*        please refer to the device datasheet for detail.
+*        When RMU is enabled and this device receives a Remote Management Request
+*        frame directed to this device, the frame will be processed and a Remote
+*        Management Response frame will be generated and sent out.
+*
+*        Note: enabling RMU has no effect if the Remote Management port is in half
+*        duplex mode. The port's FrameMode must be DSA or EtherType DSA as well.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        rmu - GT_RMU structure
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRMUMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_RMU        *rmu
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetRMUMode Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FRAME_TO_REGISTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the RMUMode bit. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,12,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_RMU_MODE))
+    {
+        switch (data)
+        {
+            case 0:
+                    rmu->rmuEn = GT_FALSE;
+                    break;
+
+            case 1:
+                    rmu->rmuEn = GT_TRUE;
+                    rmu->port = GT_PORT_2_LPORT(4);
+                    break;
+
+            case 2:
+                    rmu->rmuEn = GT_TRUE;
+                    rmu->port = GT_PORT_2_LPORT(5);
+                    break;
+            case 3:
+                    if (IS_IN_DEV_GROUP(dev,DEV_88E6352_FAMILY))
+                    {
+                      rmu->rmuEn = GT_TRUE;
+                      rmu->port = GT_PORT_2_LPORT(6);
+                      break;
+                    }
+                    return GT_FAIL;
+            default:
+                    return GT_FAIL;
+        }
+    }
+    else
+    {
+        switch (data)
+        {
+            case 0:
+                    rmu->rmuEn = GT_FALSE;
+                    break;
+
+            case 1:
+                    rmu->rmuEn = GT_TRUE;
+                    rmu->port = GT_PORT_2_LPORT(9);
+                    break;
+
+            case 3:
+                    rmu->rmuEn = GT_TRUE;
+                    rmu->port = GT_PORT_2_LPORT(10);
+                    break;
+
+            default:
+                    rmu->rmuEn = GT_FALSE;
+                    break;
+        }
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysSetCtrMode
+*
+* DESCRIPTION:
+*        Set Counter Modes. These bits control the operating modes of the two of
+*        the Port’s MIB counters.
+*
+* INPUTS:
+*        ctrMode - Counter mode
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM     - on bad parameter
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCtrMode
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16       ctrMode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetCtrMode Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_COUNTER_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6352_FAMILY))
+    {
+      data = ctrMode&0x3;
+      /* Set the Counter Mode. */
+      retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,5,2,data);
+    }
+    else
+    {
+      data = ctrMode&0x1;
+      /* Set the Counter Mode. */
+      retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,0,1,data);
+    }
+
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCtrMode
+*
+* DESCRIPTION:
+*        Get Counter Modes. These bits control the operating modes of the two of
+*        the Port’s MIB counters.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        ctrMode - Counter mode
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCtrMode
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16       *ctrMode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetCtrMode Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_COUNTER_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_COUNTER_MODE))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_88E6352_FAMILY))
+    {
+      /* Set the Counter Mode. */
+      retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,5,2, &data);
+    }
+    else
+    {
+      /* Set the Counter Mode. */
+      retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL2,0,1, &data);
+    }
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *ctrMode = data;
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRsvd2CpuEnables2X
+*
+* DESCRIPTION:
+*        Reserved DA Enables for the form of 01:80:C2:00:00:2x.
+*        When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one, the 16 reserved
+*        multicast DA addresses, whose bit in this register are also set to a one,
+*        are treadted as MGMT frames. All the reserved DA's take the form
+*        01:80:C2:00:00:2x. When x = 0x0, bit 0 of this register is tested.
+*        When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRsvd2CpuEnables2X
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16        enBits
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysSetRsvd2CpuEnables2X Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST_2X))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Set related register */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_MGMT_ENABLE_2X, (GT_U16)enBits);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetRsvd2CpuEnables2X
+*
+* DESCRIPTION:
+*        Reserved DA Enables for the form of 01:80:C2:00:00:2x.
+*        When the Rsvd2Cpu(gsysSetRsvd2Cpu) is set to a one, the 16 reserved
+*        multicast DA addresses, whose bit in this register are also set to a one,
+*        are treadted as MGMT frames. All the reserved DA's take the form
+*        01:80:C2:00:00:2x. When x = 0x0, bit 0 of this register is tested.
+*        When x = 0x2, bit 2 of this field is tested and so on.
+*        If the tested bit in this register is cleared to a zero, the frame will
+*        be treated as a normal (non-MGMT) frame.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        enBits - bit vector of enabled Reserved Multicast.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRsvd2CpuEnables2X
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16      *enBits
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysGetRsvd2CpuEnables2X Called.\n"));
+
+    /* Check if Switch supports this status. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ENHANCED_MULTICAST_2X))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related register */
+    retVal = hwReadGlobal2Reg(dev, QD_REG_MGMT_ENABLE_2X, enBits);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetLoopbackFilter
+*
+* DESCRIPTION:
+*        Loopback Filter.
+*        When Loopback Filter is enabled,Forward DSA frames that ingress a DSA port
+*        that came from the same Src_Dev will be filtered to the same Src_Port,
+*        i.e., the frame will not be allowed to egress the source port on the
+*        source device as indicated in the DSA Forward's Tag.
+*
+* INPUTS:
+*        en - GT_TRUE to enable LoopbackFilter, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetLoopbackFilter
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetLoopbackFilter Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_LOOPBACK_FILTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 15, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetLoopbackFilter
+*
+* DESCRIPTION:
+*        Loopback Filter.
+*        When Loopback Filter is enabled,Forward DSA frames that ingress a DSA port
+*        that came from the same Src_Dev will be filtered to the same Src_Port,
+*        i.e., the frame will not be allowed to egress the source port on the
+*        source device as indicated in the DSA Forward's Tag.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if LoopbackFilter is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetLoopbackFilter
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetLoopbackFilter Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_LOOPBACK_FILTER))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetFloodBC
+*
+* DESCRIPTION:
+*        Flood Broadcast.
+*        When Flood Broadcast is enabled, frames with the Broadcast destination
+*        address will flood out all the ports regardless of the setting of the
+*        port's Egress Floods mode (see gprtSetEgressFlood API). VLAN rules and
+*        other switch policy still applies to these Broadcast frames.
+*        When this feature is disabled, frames with the Broadcast destination
+*        address are considered Multicast frames and will be affected by port's
+*        Egress Floods mode.
+*
+* INPUTS:
+*        en - GT_TRUE to enable Flood Broadcast, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetFloodBC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetFloodBC Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FLOOD_BROADCAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 12, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetFloodBC
+*
+* DESCRIPTION:
+*        Flood Broadcast.
+*        When Flood Broadcast is enabled, frames with the Broadcast destination
+*        address will flood out all the ports regardless of the setting of the
+*        port's Egress Floods mode (see gprtSetEgressFlood API). VLAN rules and
+*        other switch policy still applies to these Broadcast frames.
+*        When this feature is disabled, frames with the Broadcast destination
+*        address are considered Multicast frames and will be affected by port's
+*        Egress Floods mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Flood Broadcast is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetFloodBC
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetFloodBC Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_FLOOD_BROADCAST))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,12,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetRemove1PTag
+*
+* DESCRIPTION:
+*        Remove One Provider Tag.
+*        When this feature is enabled and a port is configured as a Provider Port
+*        (see gprtSetFrameMode API), recursive Provider Tag stripping will NOT be
+*        performed. Only the first Provider Tag found on the frame will be
+*        extracted and removed. Its extracted data will be used for switching.
+*        When it's disabled and a port is configured as a Provider Port, recursive
+*        Provider Tag stripping will be performed. The first Provider Tag's data
+*        will be extracted and used for switching, and then all subsequent Provider
+*        Tags found in the frame will also be removed. This will only occur if the
+*        port's PortEType (see gprtSetPortEType API) is not 0x8100.
+*
+* INPUTS:
+*        en - GT_TRUE to enable Remove One Provider Tag, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetRemove1PTag
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetRemove1PTag Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RM_ONE_PTAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 11, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetRemove1PTag
+*
+* DESCRIPTION:
+*        Remove One Provider Tag.
+*        When this feature is enabled and a port is configured as a Provider Port
+*        (see gprtSetFrameMode API), recursive Provider Tag stripping will NOT be
+*        performed. Only the first Provider Tag found on the frame will be
+*        extracted and removed. Its extracted data will be used for switching.
+*        When it's disabled and a port is configured as a Provider Port, recursive
+*        Provider Tag stripping will be performed. The first Provider Tag's data
+*        will be extracted and used for switching, and then all subsequent Provider
+*        Tags found in the frame will also be removed. This will only occur if the
+*        port's PortEType (see gprtSetPortEType API) is not 0x8100.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Remove One Provider Tag is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetRemove1PTag
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL        *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysGetRemove1PTag Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_RM_ONE_PTAG))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT, 11, 1, &data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetTagFlowControl
+*
+* DESCRIPTION:
+*        Use and generate source port Flow Control status for Cross-Chip Flow
+*        Control.
+*        When this feature is enabled, bit 17 of the DSA Tag Forward frames is
+*        defined to be Src_FC and it is added to these frames when generated and
+*        it is inspected on these frames when received. The QC will use the Src_FC
+*        bit on DSA ports instead of the DSA port's Flow Control mode bit for the
+*        QC Flow Control algorithm.
+*        When it is disabled, bit 17 of the DSA Tag Forward frames is defined to
+*        be Reserved and it will be zero on these frames when generated and it
+*        will not be used on these frames when received (this is a backwards
+*        compatibility mode).
+*
+* INPUTS:
+*        en - GT_TRUE to enable Tag Flow Control, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetTagFlowControl
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetTagFlowControl Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAG_FLOW_CONTROL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 9, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetTagFlowControl
+*
+* DESCRIPTION:
+*        Use and generate source port Flow Control status for Cross-Chip Flow
+*        Control.
+*        When this feature is enabled, bit 17 of the DSA Tag Forward frames is
+*        defined to be Src_FC and it is added to these frames when generated and
+*        it is inspected on these frames when received. The QC will use the Src_FC
+*        bit on DSA ports instead of the DSA port's Flow Control mode bit for the
+*        QC Flow Control algorithm.
+*        When it is disabled, bit 17 of the DSA Tag Forward frames is defined to
+*        be Reserved and it will be zero on these frames when generated and it
+*        will not be used on these frames when received (this is a backwards
+*        compatibility mode).
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if Tag Flow Control is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetTagFlowControl
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetTagFlowControl Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TAG_FLOW_CONTROL))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,9,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetAlwaysUseVTU
+*
+* DESCRIPTION:
+*        Always use VTU.
+*        When this feature is enabled, VTU hit data will be used to map frames
+*        even if 802.1Q is Disabled on the port.
+*        When it's disabled, data will be ignored when mapping frames on ports
+*        where 802.1Q is Disabled.
+*
+* INPUTS:
+*        en - GT_TRUE to use VTU always, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetAlwaysUseVTU
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetAlwaysUseVTU Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ALWAYS_USE_VTU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_MANAGEMENT, 8, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetAlwaysUseVTU
+*
+* DESCRIPTION:
+*        Always use VTU.
+*        When this feature is enabled, VTU hit data will be used to map frames
+*        even if 802.1Q is Disabled on the port.
+*        When it's disabled, data will be ignored when mapping frames on ports
+*        where 802.1Q is Disabled.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if VTU is always used, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetAlwaysUseVTU
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetAlwaysUseVTU Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_ALWAYS_USE_VTU))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_MANAGEMENT,8,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetQVlansOnly
+*
+* DESCRIPTION:
+*        802.1Q VLANs Only.
+*        When this feature is disabled, the egress mapping of the frame is
+*        limited by the frame's VID (using the MemberTag data found in the VTU)
+*        together with the port based VLANs (using the source port's PortVLANTable,
+*        gvlnSetPortVlanPorts API). The two methods are always used together in
+*        this mode.
+*        When this feature is enabled, the egress mapping of the frame is limitied
+*        by the frame's VID only, if the VID was found in the VTU. If the frame's
+*        VID was not found in the VTU the egress mapping of the frame is limited
+*        by the source port's PortVLANTable only. The two methods are never
+*        used together in this mode.
+*
+* INPUTS:
+*        en - GT_TRUE to use 802.1Q Vlan Only feature, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetQVlansOnly
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetQVlansOnly Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QVLAN_ONLY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_SDET_POLARITY, 15, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetQVlansOnly
+*
+* DESCRIPTION:
+*        802.1Q VLANs Only.
+*        When this feature is disabled, the egress mapping of the frame is
+*        limited by the frame's VID (using the MemberTag data found in the VTU)
+*        together with the port based VLANs (using the source port's PortVLANTable,
+*        gvlnSetPortVlanPorts API). The two methods are always used together in
+*        this mode.
+*        When this feature is enabled, the egress mapping of the frame is limitied
+*        by the frame's VID only, if the VID was found in the VTU. If the frame's
+*        VID was not found in the VTU the egress mapping of the frame is limited
+*        by the source port's PortVLANTable only. The two methods are never
+*        used together in this mode.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 802.1Q Vlan Only feature is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetQVlansOnly
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetQVlansOnly Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QVLAN_ONLY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_SDET_POLARITY,15,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSet5BitPort
+*
+* DESCRIPTION:
+*        Use 5 bits for Port data in the Port VLAN Table (PVT).
+*        When this feature is enabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:5] = Source Device[3:0] or Device Number[3:0]
+*            Addr[4:0] = Source Port/Trunk[4:0]
+*        When it's disabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:4] = Source Device[4:0] or Device Number[4:0]
+*            Addr[3:0] = Source Port/Trunk[3:0]
+*
+* INPUTS:
+*        en - GT_TRUE to use 5 bit as a Source port in PVT, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSet5BitPort
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSet5BitPort Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_5BIT_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_SDET_POLARITY, 14, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGet5BitPort
+*
+* DESCRIPTION:
+*        Use 5 bits for Port data in the Port VLAN Table (PVT).
+*        When this feature is enabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:5] = Source Device[3:0] or Device Number[3:0]
+*            Addr[4:0] = Source Port/Trunk[4:0]
+*        When it's disabled, the 9 bits used to access the PVT memory is:
+*            Addr[8:4] = Source Device[4:0] or Device Number[4:0]
+*            Addr[3:0] = Source Port/Trunk[3:0]
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 5 bit is used as a Source Port in PVT, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGet5BitPort
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGet5BitPort Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_5BIT_PORT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_SDET_POLARITY,14,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetSDETPolarity
+*
+* DESCRIPTION:
+*        SDET (Signal Detect) Polarity select bits for each port.
+*        Bit 10 is for Port 10, bit 9 is for Port 9, etc. SDET is used to help
+*        determine link on fiber ports. This bit affects the active level of a
+*        port's SDET pins as follows:
+*            0 = SDET is active low. A low level on the port's SDET pin is
+*                required for link to occur.
+*            1 = SDET is active high. A high level on the port’s SDET pin is
+*                required for link to occur.
+*        SDET is used when the port is configured as a fiber port. In all other
+*        port modes the SDET pins are ignored and these bits have no effect.
+*
+* INPUTS:
+*        sdetVec - SDET Polarity for each port in Vector format
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_BAD_PARAM - if sdetVec is invalid
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetSDETPolarity
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U32          sdetVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysSetSDETPolarity Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SDET_POLARITY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if ((GT_U16)sdetVec > ((1 << dev->numOfPorts) - 1))
+    {
+        DBG_INFO(("GT_BAD_PARAM \n"));
+        return GT_BAD_PARAM;
+    }
+
+    data = (GT_U16)GT_LPORTVEC_2_PORTVEC(sdetVec);
+
+    if (IS_IN_DEV_GROUP(dev,DEV_LIMITED_SDET))
+    {
+        if (data & (~0x30))    /* only port 4 and 5 of this device support SDET */
+        {
+            DBG_INFO(("GT_BAD_PARAM \n"));
+            return GT_BAD_PARAM;
+        }
+    }
+
+    /* Set the related bits. */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_SDET_POLARITY,0,dev->maxPorts,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetSDETPolarity
+*
+* DESCRIPTION:
+*        SDET (Signal Detect) Polarity select bits for each port.
+*        Bit 10 is for Port 10, bit 9 is for Port 9, etc. SDET is used to help
+*        determine link on fiber ports. This bit affects the active level of a
+*        port's SDET pins as follows:
+*            0 = SDET is active low. A low level on the port's SDET pin is
+*                required for link to occur.
+*            1 = SDET is active high. A high level on the port’s SDET pin is
+*                required for link to occur.
+*        SDET is used when the port is configured as a fiber port. In all other
+*        port modes the SDET pins are ignored and these bits have no effect.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        sdetVec - SDET Polarity for each port in Vector format
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetSDETPolarity
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U32      *sdetVec
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+    DBG_INFO(("gsysGetSDETPolarity Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_SDET_POLARITY))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get the related bits. */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_SDET_POLARITY,0,dev->maxPorts,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_LIMITED_SDET))
+    {
+        data &= 0x30;
+    }
+
+    *sdetVec = GT_PORTVEC_2_LPORTVEC(data);
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetARPwoBC
+*
+* DESCRIPTION:
+*       ARP detection without Broadcast checking. When enabled the switch core
+*       does not check for a Btoadcast MAC address as part of the ARP frame
+*       detection. It only checkes the Ether Type (0x0806).
+*
+* INPUTS:
+*       en - GT_TRUE to enable, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetARPwoBC
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_BOOL     en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* Data to be set into the      */
+                                /* register.                    */
+    DBG_INFO(("sysSetARPwoBC Called.\n"));
+    BOOL_2_BIT(en,data);
+
+    /* Set the Discard Exissive bit.                */
+    retVal = hwSetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gsysGetARPwoBC
+*
+* DESCRIPTION:
+*       ARP detection without Broadcast checking. When enabled the switch core
+*       does not check for a Btoadcast MAC address as part of the ARP frame
+*       detection. It only checkes the Ether Type (0x0806).
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       en - GT_TRUE if enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetARPwoBC
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_BOOL        *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetARPwoBC Called.\n"));
+    if(en == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Get the bit. */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_CONTROL,12,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysSetCLK125En
+*
+* DESCRIPTION:
+*        Clock 125MHz Enable.
+*        When this feature is enabled, the CLK125 pin has a free running 125 MHz
+*        clock output.
+*        When it's disabled, the CLK125 pin will be in tri-state.
+*
+* INPUTS:
+*        en - GT_TRUE to enable 125 MHz clock, GT_FALSE otherwise.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysSetCLK125En
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_BOOL        en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16            data;
+
+    DBG_INFO(("gsysSetCLK125En Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CLK_125))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    BOOL_2_BIT(en,data);
+
+    /* Set related bit */
+    retVal = hwSetGlobal2RegField(dev,QD_REG_SDET_POLARITY, 11, 1, data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetCLK125En
+*
+* DESCRIPTION:
+*        Clock 125MHz Enable.
+*        When this feature is enabled, the CLK125 pin has a free running 125 MHz
+*        clock output.
+*        When it's disabled, the CLK125 pin will be in tri-state.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        en - GT_TRUE if 125MHz clock is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysGetCLK125En
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL      *en
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetCLK125En Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_CLK_125))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Get related bit */
+    retVal = hwGetGlobal2RegField(dev,QD_REG_SDET_POLARITY,11,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*en);
+    DBG_INFO(("OK.\n"));
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysPort2Lport
+*
+* DESCRIPTION:
+*        This routine converts physical port number to logical port number.
+*
+* INPUTS:
+*        port - physical port number
+*
+* OUTPUTS:
+*        lport - logical port number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysPort2Lport
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32         port,
+    OUT GT_LPORT    *lport
+)
+{
+    DBG_INFO(("gsysPort2Lport Called.\n"));
+
+    if (port > 0xFF)
+    {
+        return GT_FAIL;
+    }
+
+    *lport = GT_PORT_2_LPORT((GT_U8)port);
+
+    if (*lport == GT_INVALID_PORT)
+    {
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysLport2Port
+*
+* DESCRIPTION:
+*        This routine converts logical port number to physical port number.
+*
+* INPUTS:
+*        lport - logical port number
+*
+* OUTPUTS:
+*        port - physical port number
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysLport2Port
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_LPORT    lport,
+    OUT GT_U32         *port
+)
+{
+    DBG_INFO(("gsysLport2Port Called.\n"));
+
+    *port = (GT_U32)GT_LPORT_2_PORT(lport);
+
+    if (*port == GT_INVALID_PORT)
+    {
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysPortvec2Lportvec
+*
+* DESCRIPTION:
+*        This routine converts physical port vector to logical port vector.
+*
+* INPUTS:
+*        portvec - physical port vector
+*
+* OUTPUTS:
+*        lportvec - logical port vector
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysPortvec2Lportvec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        portvec,
+    OUT GT_U32         *lportvec
+)
+{
+    DBG_INFO(("gsysPortvec2Lportvec Called.\n"));
+
+    if (portvec & (~((GT_U32)dev->validPortVec)))
+    {
+        return GT_FAIL;
+    }
+
+    *lportvec = GT_PORTVEC_2_LPORTVEC(portvec);
+
+    if (*lportvec == GT_INVALID_PORT_VEC)
+    {
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysLportvec2Portvec
+*
+* DESCRIPTION:
+*        This routine converts logical port vector to physical port vector.
+*
+* INPUTS:
+*        lportvec - logical port vector
+*
+* OUTPUTS:
+*        portvec - physical port vector
+*
+* RETURNS:
+*        GT_OK   - on success
+*        GT_FAIL - on error
+*
+* COMMENTS:
+*        None.
+*
+*******************************************************************************/
+GT_STATUS gsysLportvec2Portvec
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U32        lportvec,
+    OUT GT_U32         *portvec
+)
+{
+    DBG_INFO(("gsysLportvec2Portvec Called.\n"));
+
+    *portvec = GT_LPORTVEC_2_PORTVEC(lportvec);
+
+    if (*portvec == GT_INVALID_PORT_VEC)
+    {
+        return GT_FAIL;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+
+/*
+ * Write to Switch MAC/Wol MAC Register
+ */
+static GT_STATUS writeSwitchMacWolReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    IN GT_ETHERADDR *mac
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+    GT_U16            i;
+
+    for (i=0; i<GT_ETHERNET_HEADER_SIZE; i++)
+    {
+        /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+        }
+#endif
+
+        data = (1 << 15) | ((i+baseid) << 8) | mac->arEther[i];
+
+        retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+/*
+ * Read from Switch MAC/WoL MAC Register
+ */
+static GT_STATUS readSwitchMacWolReg
+(
+    IN  GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    OUT GT_ETHERADDR *mac
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+    GT_U16            i;
+
+    /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+           retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&data);
+           if(retVal != GT_OK)
+        {
+               return retVal;
+        }
+       }
+#endif
+
+    for (i=0; i<GT_ETHERNET_HEADER_SIZE; i++)
+    {
+        data = (i+baseid) << 8;
+
+        retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev,QD_REG_SWITCH_MAC,&data);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+
+        if ((baseid != 0x16) && (i == 0))  /* base id = 0x16 is Wol Password,and it has nor diffAddr */
+            mac->arEther[i] = data & 0xFE;    /* bit 0 is for diffAddr */
+        else
+            mac->arEther[i] = data & 0xFF;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ * Write to Different MAC Address per port bit in Switch MAC/WoL Register
+ */
+static GT_STATUS writeDiffMACWoL
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    IN GT_U16       diffAddr
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+
+    /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_SWITCH_MAC;
+      data = baseid<<8;
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[2].data);
+    }
+#else
+    data = 1;
+    while(data == 1)
+    {
+       retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&data);
+          if(retVal != GT_OK)
+        {
+               return retVal;
+        }
+    }
+    data = baseid<<8;
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    /* Read Swith MAC Reg */
+    retVal = hwReadGlobal2Reg(dev,QD_REG_SWITCH_MAC,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+#endif
+
+    data = (1 << 15) | (baseid<<8) | (data & 0xFE) | (diffAddr & 0x1);
+
+    /* Write back to Swith MAC Reg with updated diffAddr */
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    return GT_OK;
+}
+
+/*
+ * Read Different MAC Address per port bit in Switch MAC/WoL Register
+ */
+static GT_STATUS readDiffMACWoL
+(
+    IN  GT_QD_DEV    *dev,
+    IN GT_U8        baseid,
+    OUT GT_U16        *diffAddr
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          data;     /* temporary Data storage */
+
+    /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_SWITCH_MAC;
+      data = baseid<<8;
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+      data = qdLong2Short(regAccess.rw_reg_list[2].data);
+    }
+#else
+    data = 1;
+       while(data == 1)
+    {
+           retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&data);
+           if(retVal != GT_OK)
+        {
+               return retVal;
+        }
+       }
+
+    /* Write to Swith MAC Reg for reading operation */
+    data = baseid<<8;
+    retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev,QD_REG_SWITCH_MAC,&data);
+    if(retVal != GT_OK)
+    {
+        return retVal;
+    }
+#endif
+    *diffAddr = data & 0x1;
+
+    return GT_OK;
+}
+
+/*
+ * Write to Switch MAC/WoL/WoF Register
+ */
+static GT_STATUS writeSwMacWolWofReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        pointer,
+    IN GT_U8        data
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          tmpData;     /* temporary Data storage */
+    {
+        /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+        tmpData = 1;
+        while(tmpData == 1)
+        {
+            retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&tmpData);
+            if(retVal != GT_OK)
+            {
+                return retVal;
+            }
+        }
+#endif
+
+        tmpData = (1 << 15) | (pointer << 8) | data;
+
+        retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC, tmpData);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+    }
+
+    return GT_OK;
+}
+
+/*
+ * Read from Switch MAC/WoL/WoF Register
+ */
+static GT_STATUS readSwMacWolWofReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U8        pointer,
+    OUT GT_U8       *data
+)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+    GT_U16          tmpData;     /* temporary Data storage */
+
+    /* Wait until the device is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_SWITCH_MAC;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        return retVal;
+      }
+    }
+#else
+    tmpData = 1;
+       while(tmpData == 1)
+    {
+           retVal = hwGetGlobal2RegField(dev,QD_REG_SWITCH_MAC,15,1,&tmpData);
+           if(retVal != GT_OK)
+        {
+               return retVal;
+        }
+       }
+#endif
+
+    {
+        tmpData = pointer << 8;
+
+        retVal = hwWriteGlobal2Reg(dev,QD_REG_SWITCH_MAC,tmpData);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev,QD_REG_SWITCH_MAC,&tmpData);
+        if(retVal != GT_OK)
+           {
+               return retVal;
+        }
+
+        *data = tmpData & 0xFF;
+    }
+
+    return GT_OK;
+}
+
+
+/*
+ * Write to Switch MAC Register
+ */
+static GT_STATUS writeSwitchMacReg
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR *mac
+)
+{
+    return writeSwitchMacWolReg(dev, 0, mac);
+}
+
+/*
+ * Read from Switch MAC Register
+ */
+static GT_STATUS readSwitchMacReg
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_ETHERADDR *mac
+)
+{
+    return readSwitchMacWolReg(dev, 0, mac);
+}
+
+
+/*
+ * Write to Different MAC Address per port bit in Switch MAC Register
+ */
+static GT_STATUS writeDiffMAC
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_U16       diffAddr
+)
+{
+    return writeDiffMACWoL(dev, 0, diffAddr);
+}
+
+/*
+ * Read Different MAC Address per port bit in Switch MAC Register
+ */
+static GT_STATUS readDiffMAC
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16        *diffAddr
+)
+{
+    return readDiffMACWoL(dev, 0, diffAddr);
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysStatus.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysStatus.c
new file mode 100644
index 000000000000..a585d0fe828a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtSysStatus.c
@@ -0,0 +1,262 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtSysStatus.c
+*
+* DESCRIPTION:
+*       API definitions for system global status.
+*     Added for fullsail
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: 5 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gsysGetPPUState
+*
+* DESCRIPTION:
+*        This routine get the PPU State. These two bits return
+*        the current value of the PPU.
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        mode - GT_PPU_STATE
+*
+* RETURNS:
+*        GT_OK           - on success
+*        GT_BAD_PARAM    - on bad parameter
+*        GT_FAIL         - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*        None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetPPUState
+(
+    IN  GT_QD_DEV       *dev,
+    OUT GT_PPU_STATE    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetPPUState Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_GIGABIT_SWITCH))
+    {
+        DBG_INFO(("Not Supported.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get the bits from hardware */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,14,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    if (IS_IN_DEV_GROUP(dev,DEV_PPU_READ_ONLY))
+    {
+        data |= 0x4000;
+    }
+
+    *mode = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetSW_Mode
+*
+* DESCRIPTION:
+*       This routine get the Switch mode. These two bits returen
+*       the current value of the SW_MODE[1:0] pins.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE Discard is enabled, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetSW_Mode
+(
+    IN  GT_QD_DEV  *dev,
+    OUT GT_SW_MODE *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetSW_Mode Called.\n"));
+
+    /* check if device supports this feature */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_SWITCH_MODE)) )
+    {
+        DBG_INFO(("Not Supported.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get the bits from hardware */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,12,2,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    *mode = data;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetInitReady
+*
+* DESCRIPTION:
+*       This routine get the InitReady bit. This bit is set to a one when the ATU,
+*       the Queue Controller and the Statistics Controller are done with their
+*       initialization and are ready to accept frames.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       mode - GT_TRUE: switch is ready, GT_FALSE otherwise.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_BAD_PARAM    - on bad parameter
+*       GT_FAIL         - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetInitReady
+(
+    IN  GT_QD_DEV  *dev,
+    OUT GT_BOOL    *mode
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+    GT_U16          data;           /* The register's read data.    */
+
+    DBG_INFO(("gsysGetInitReady Called.\n"));
+
+    /* check if device supports this feature */
+    if (!(IS_IN_DEV_GROUP(dev,DEV_INIT_READY)) )
+    {
+        DBG_INFO(("Not Supported.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if(mode == NULL)
+    {
+        DBG_INFO(("Failed.\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* get the bits from hardware */
+    retVal = hwGetGlobalRegField(dev,QD_REG_GLOBAL_STATUS,11,1,&data);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    BIT_2_BOOL(data,*mode);
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gsysGetFreeQSize
+*
+* DESCRIPTION:
+*       This routine gets Free Queue Counter. This counter reflects the
+*        current number of unalllocated buffers available for all the ports.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       count - Free Queue Counter
+*
+* RETURNS:
+*       GT_OK            - on success
+*       GT_FAIL          - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gsysGetFreeQSize
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_U16         *count
+)
+{
+    GT_STATUS       retVal;         /* Functions return value.      */
+
+    DBG_INFO(("gsysGetFreeQSize Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_OUT_Q_SIZE))
+    {
+        DBG_INFO(("Not Supported.\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* get the counter */
+    retVal = hwGetGlobalRegField(dev,QD_REG_TOTAL_FREE_COUNTER,0,9,count);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed.\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtTCAM.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtTCAM.c
new file mode 100644
index 000000000000..2043da60020d
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtTCAM.c
@@ -0,0 +1,1226 @@
+#include <Copyright.h>
+
+/*******************************************************************************
+* gtTCAM.c
+*
+* DESCRIPTION:
+*       API definitions for control of Ternary Content Addressable Memory
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+GT_STATUS tcamReadGlobal3Reg
+(
+  IN GT_QD_DEV *dev,
+  IN  GT_U8    regAddr,
+  OUT GT_U16   *data
+)
+{
+  GT_STATUS           retVal;
+  retVal = hwReadGlobal3Reg(dev, regAddr, data);
+  if(retVal != GT_OK)
+  {
+    return retVal;
+  }
+
+  return GT_OK;
+}
+
+GT_STATUS tcamWriteGlobal3Reg
+(
+  IN  GT_QD_DEV *dev,
+  IN  GT_U8    regAddr,
+  IN  GT_U16   data
+)
+{
+  GT_STATUS           retVal;
+
+  retVal = hwWriteGlobal3Reg(dev, regAddr, data);
+  if(retVal != GT_OK)
+  {
+    return retVal;
+  }
+  return GT_OK;
+}
+
+/****************************************************************************/
+/* TCAM operation function declaration.                                    */
+/****************************************************************************/
+static GT_STATUS tcamOperationPerform
+(
+    IN   GT_QD_DEV             *dev,
+    IN   GT_TCAM_OPERATION    tcamOp,
+    INOUT GT_TCAM_OP_DATA    *opData
+);
+
+/*******************************************************************************
+* gtcamFlushAll
+*
+* DESCRIPTION:
+*       This routine is to flush all entries. A Flush All command will initialize
+*       TCAM Pages 0 and 1, offsets 0x02 to 0x1B to 0x0000, and TCAM Page 2 offset
+*       0x02 to 0x05 to 0x0000 for all TCAM entries with the exception that TCAM
+*       Page 0 offset 0x02 will be initialized to 0x00FF.
+*
+*
+* INPUTS:
+*        None.
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamFlushAll
+(
+    IN  GT_QD_DEV     *dev
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamFlushAll Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_FLUSH_ALL;
+    tcamOpData.tcamEntry = 0xFF;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtcamFlushEntry
+*
+* DESCRIPTION:
+*       This routine is to flush a single entry. A Flush a single TCAM entry command
+*       will write the same values to a TCAM entry as a Flush All command, but it is
+*       done to the selected single TCAM entry only.
+*
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamFlushEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamFlushEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if (tcamPointer > 0xFE)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_FLUSH_ALL;
+    tcamOpData.tcamEntry = tcamPointer;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtcamLoadEntry
+*
+* DESCRIPTION:
+*       This routine loads a TCAM entry.
+*       The load sequence of TCAM entry is critical. Each TCAM entry is made up of
+*       3 pages of data. All 3 pages need to loaded in a particular order for the TCAM
+*       to operate correctly while frames are flowing through the switch.
+*       If the entry is currently valid, it must first be flushed. Then page 2 needs
+*       to be loaded first, followed by page 1 and then finally page 0.
+*       Each page load requires its own write TCAMOp with these TCAM page bits set
+*       accordingly.
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*        tcamData    - Tcam entry Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamLoadEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    IN  GT_TCAM_DATA        *tcamData
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamLoadEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if ((tcamPointer > 0xFE)||(tcamData==NULL))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_LOAD_ENTRY;
+    tcamOpData.tcamPage = 0; /* useless */
+    tcamOpData.tcamEntry = tcamPointer;
+    tcamOpData.tcamDataP = tcamData;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtcamPurgyEntry
+*
+* DESCRIPTION:
+*       This routine Purgy a TCAM entry.
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*        tcamData    - Tcam entry Data
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamPurgyEntry
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    IN  GT_TCAM_DATA        *tcamData
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamPurgyEntry Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if ((tcamPointer > 0xFE)||(tcamData==NULL))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_PURGE_ENTRY;
+    tcamOpData.tcamPage = 0; /* useless */
+    tcamOpData.tcamEntry = tcamPointer;
+    tcamOpData.tcamDataP = tcamData;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+
+/*******************************************************************************
+* gtcamReadTCAMData
+*
+* DESCRIPTION:
+*       This routine loads the global 3 offsets 0x02 to 0x1B registers with
+*       the data found in the TCAM entry and its TCAM page pointed to by the TCAM
+*       entry and TCAM page bits of this register (bits 7:0 and 11:10 respectively.
+*
+*
+* INPUTS:
+*        tcamPointer - pointer to the desired entry of TCAM (0 ~ 254)
+*
+* OUTPUTS:
+*        tcamData    - Tcam entry Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamReadTCAMData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        tcamPointer,
+    OUT GT_TCAM_DATA        *tcamData
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamReadTCAMData Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if ((tcamPointer > 0xFE)||(tcamData==NULL))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_READ_ENTRY;
+    tcamOpData.tcamPage = 0; /* useless */
+    tcamOpData.tcamEntry = tcamPointer;
+    tcamOpData.tcamDataP = tcamData;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtcamGetNextTCAMData
+*
+* DESCRIPTION:
+*       This routine  finds the next higher TCAM Entry number that is valid (i.e.,
+*       any entry whose Page 0 offset 0x02 is not equal to 0x00FF). The TCAM Entry
+*       register (bits 7:0) is used as the TCAM entry to start from. To find
+*       the lowest number TCAM Entry that is valid, start the Get Next operation
+*       with TCAM Entry set to 0xFF.
+*
+*
+* INPUTS:
+*        tcamPointer - start pointer entry of TCAM (0 ~ 255)
+*
+* OUTPUTS:
+*        tcamPointer - next pointer entry of TCAM (0 ~ 255)
+*        tcamData    - Tcam entry Data
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*       GT_BAD_PARAM - if invalid parameter is given
+*       GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtcamGetNextTCAMData
+(
+    IN  GT_QD_DEV     *dev,
+    IN  GT_U32        *tcamPointer,
+    OUT GT_TCAM_DATA        *tcamData
+)
+{
+    GT_STATUS           retVal;
+    GT_TCAM_OPERATION    op;
+    GT_TCAM_OP_DATA     tcamOpData;
+
+    DBG_INFO(("gtcamGetNextTCAMData Called.\n"));
+
+    /* check if device supports this feature */
+    if (!IS_IN_DEV_GROUP(dev,DEV_TCAM))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the given pointer is valid */
+    if ((*tcamPointer > 0xFF)||(tcamData==NULL))
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    /* Program Tuning register */
+    op = TCAM_GET_NEXT_ENTRY;
+    tcamOpData.tcamPage = 0; /* useless */
+    tcamOpData.tcamEntry = *tcamPointer;
+    tcamOpData.tcamDataP = tcamData;
+    retVal = tcamOperationPerform(dev,op, &tcamOpData);
+    if(retVal != GT_OK)
+    {
+        DBG_INFO(("Failed (tcamOperationPerform returned GT_FAIL).\n"));
+        return retVal;
+    }
+
+    *tcamPointer = tcamOpData.tcamEntry;
+    tcamData->rawFrmData[0].frame0.paraFrm.pg0Op = qdLong2Short(tcamOpData.tcamEntry);
+    tcamData->rawFrmData[0].frame0.paraFrm.pg0Op &= 0xff;
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+
+}
+
+#if 0
+/*******************************************************************************
+* gtcamAddEntry
+*
+* DESCRIPTION:
+*       Creates the new entry in TCAM.
+*
+* INPUTS:
+*       tcamEntry    - TCAM entry to insert to the TCAM.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK          - on success
+*       GT_FAIL        - on error
+*       GT_BAD_PARAM   - on invalid port vector
+*
+* COMMENTS:
+*
+* GalTis:
+*
+*******************************************************************************/
+GT_STATUS gtcamAddEntry
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ATU_ENTRY *tcamEntry
+)
+{
+    GT_STATUS       retVal;
+    GT_ATU_ENTRY    entry;
+
+    DBG_INFO(("gtcamAddEntry Called.\n"));
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+/*******************************************************************************
+* gtcamDelEntry
+*
+* DESCRIPTION:
+*       Deletes TCAM entry.
+*
+* INPUTS:
+*       tcamEntry - TCAM entry.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK           - on success
+*       GT_FAIL         - on error
+*       GT_NO_RESOURCE  - failed to allocate a t2c struct
+*       GT_NO_SUCH      - if specified address entry does not exist
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gtcamDelEntry
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_ETHERADDR  *tcamEntry
+)
+{
+    GT_STATUS retVal;
+    GT_ATU_ENTRY    entry;
+
+    DBG_INFO(("gtcamDelEntry Called.\n"));
+
+    DBG_INFO(("OK.\n"));
+    return GT_OK;
+}
+
+
+#endif
+
+/****************************************************************************/
+/* Internal functions.                                                  */
+/****************************************************************************/
+static GT_STATUS tcamSetPage0Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  int i, startLoc, endReg;
+
+
+  tcamDataP->rawFrmData[extFrame].frame0.paraFrm.maskType=tcamDataP->frameTypeMask;
+  if(extFrame==1)
+  {
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0Type=0;
+  }
+  else
+  {
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0Type=tcamDataP->frameType;
+  }
+  {
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.type0Res=0;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.spvRes=0;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.spvMask=tcamDataP->spvMask;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.spv=tcamDataP->spv;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.ppri0Mask=tcamDataP->ppriMask;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.ppri0=tcamDataP->ppri;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0MaskHi=(tcamDataP->pvidMask>>8);
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0Hi=(tcamDataP->pvid>>8);
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvidMask0Low=tcamDataP->pvidMask&0xff;
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0Low=tcamDataP->pvid&0xff;
+  }
+
+  if(extFrame==1)
+    startLoc =48;
+  else
+    startLoc = 0;
+  for(i=0; i<(22); i++)
+  {
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0[i].struc.mask=tcamDataP->frameOctetMask[i+startLoc];
+    tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0[i].struc.oct=tcamDataP->frameOctet[i+startLoc];
+  }
+
+  if(extFrame==0)
+    endReg =0x1c;
+  else
+    endReg = 3;
+
+  for(i=2; i<endReg; i++)
+  {
+    retVal = tcamWriteGlobal3Reg(dev,i, tcamDataP->rawFrmData[extFrame].frame0.frame[i]);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+  }
+
+  return GT_OK;
+}
+static GT_STATUS tcamSetPage1Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  int i, startLoc;
+
+  if(extFrame==1)
+    startLoc =48;
+  else
+    startLoc = 0;
+  for(i=0; i<(26); i++)
+  {
+    tcamDataP->rawFrmData[extFrame].frame1.paraFrm.frame1[i].struc.mask=tcamDataP->frameOctetMask[i+23+startLoc];
+    tcamDataP->rawFrmData[extFrame].frame1.paraFrm.frame1[i].struc.mask=tcamDataP->frameOctet[i+23+startLoc];
+  }
+  for(i=2; i<0x1c; i++)
+  {
+    retVal = tcamWriteGlobal3Reg(dev, i,tcamDataP->rawFrmData[extFrame].frame1.frame[i]);
+    if(retVal != GT_OK)
+    {
+     return retVal;
+    }
+  }
+
+  return GT_OK;
+}
+
+static GT_STATUS tcamSetPage2Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  GT_U16          data;     /* temporary Data storage */
+  int i, endReg;
+
+  tcamDataP->continu = 0;
+  if((extFrame!=1)&&(tcamDataP->is96Frame==1))
+      tcamDataP->continu = 1;
+
+  tcamDataP->rawFrmData[extFrame].frame2.paraFrm.continu=tcamDataP->continu;
+  if(extFrame==0)
+  {
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.interrupt=tcamDataP->interrupt;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.IncTcamCtr=tcamDataP->IncTcamCtr;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm. pg2res1=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.vidData=tcamDataP->vidData&0x07ff;
+
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.nextId=tcamDataP->nextId;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res2=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.qpriData=tcamDataP->qpriData;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res3=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.fpriData=tcamDataP->fpriData;
+
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res4=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.qpriAvbData=tcamDataP->qpriAvbData;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res5=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.dpvData=tcamDataP->dpvData;
+
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res6=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.factionData=tcamDataP->factionData;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.pg2res7=0;
+    tcamDataP->rawFrmData[extFrame].frame2.paraFrm.ldBalanceData=tcamDataP->ldBalanceData;
+  }
+
+  if(extFrame==0)
+    endReg = 6;
+  else
+    endReg = 3;
+
+  for(i=2; i<endReg; i++)
+  {
+    retVal = tcamWriteGlobal3Reg(dev, i,tcamDataP->rawFrmData[extFrame].frame2.frame[i]);
+    if(retVal != GT_OK)
+    {
+     return retVal;
+    }
+  }
+
+  if(extFrame==0)
+  {
+    data = (tcamDataP->debugPort );
+    retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_P2_DEBUG_PORT,data);
+    if(retVal != GT_OK)
+    {
+       return retVal;
+    }
+    data = ((tcamDataP->highHit<<8) | (tcamDataP->lowHit<<0)  );
+    retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_P2_ALL_HIT,data);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+  }
+  return GT_OK;
+}
+
+
+static GT_STATUS tcamGetPage0Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  int i, startLoc, endReg;
+
+  if(extFrame==0)
+    endReg =0x1c;
+  else
+    endReg = 3;
+
+  for(i=2; i<endReg; i++)
+  {
+    retVal = tcamReadGlobal3Reg(dev, i, &tcamDataP->rawFrmData[extFrame].frame0.frame[i]);
+    if(retVal != GT_OK)
+    {
+      return retVal;
+    }
+  }
+
+  if(extFrame==0)
+  {
+  tcamDataP->frameTypeMask=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.maskType);
+  tcamDataP->frameType=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0Type);
+
+  tcamDataP->spvMask=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.spvMask);
+  tcamDataP->spv=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.spv);
+  tcamDataP->ppriMask=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.ppri0Mask);
+  tcamDataP->pvidMask=tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0MaskHi;
+  tcamDataP->pvidMask <<=8;
+  tcamDataP->ppri=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.ppri0);
+  tcamDataP->pvid=tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0Hi;
+  tcamDataP->pvid <<=8;
+  tcamDataP->pvidMask |= tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvidMask0Low;
+  tcamDataP->pvid |= tcamDataP->rawFrmData[extFrame].frame0.paraFrm.pvid0Low;
+  }
+
+  if(extFrame==1)
+    startLoc =48;
+  else
+    startLoc = 0;
+  for(i=0; i<(22); i++)
+  {
+    tcamDataP->frameOctetMask[i+startLoc]=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0[i].struc.mask);
+    tcamDataP->frameOctet[i+startLoc]=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame0.paraFrm.frame0[i].struc.oct);
+  }
+
+  return GT_OK;
+}
+static GT_STATUS tcamGetPage1Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  int i, startLoc;
+
+  for(i=2; i<0x1c; i++)
+  {
+    retVal = tcamReadGlobal3Reg(dev,i,&tcamDataP->rawFrmData[extFrame].frame1.frame[i]);
+    if(retVal != GT_OK)
+    {
+     return retVal;
+    }
+  }
+  if(extFrame==1)
+    startLoc =48;
+  else
+    startLoc = 0;
+  for(i=0; i<(26); i++)
+  {
+    tcamDataP->frameOctetMask[i+23+startLoc]=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame1.paraFrm.frame1[i].struc.mask);
+    tcamDataP->frameOctet[i+23+startLoc]=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame1.paraFrm.frame1[i].struc.mask);
+  }
+
+  return GT_OK;
+}
+
+static GT_STATUS tcamGetPage2Data(GT_QD_DEV *dev, GT_TCAM_DATA *tcamDataP, GT_U8 extFrame)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  GT_U16          data;     /* temporary Data storage */
+  int i, endReg;
+
+/*  if(extFrame!=0)
+    return GT_OK;
+*/
+
+  if(extFrame==0)
+    endReg = 6;
+  else
+    endReg = 3;
+
+  for(i=2; i<endReg; i++)
+  {
+    retVal = tcamReadGlobal3Reg(dev, i, &tcamDataP->rawFrmData[extFrame].frame2.frame[i]);
+    if(retVal != GT_OK)
+    {
+     return retVal;
+    }
+  }
+  if(extFrame==0)
+  {
+
+    tcamDataP->continu=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.continu);
+    tcamDataP->interrupt=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.IncTcamCtr);
+    tcamDataP->vidData=tcamDataP->rawFrmData[extFrame].frame2.paraFrm.vidData;
+    tcamDataP->nextId=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.nextId);
+    tcamDataP->qpriData=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.qpriData);
+    tcamDataP->fpriData=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.fpriData);
+    tcamDataP->qpriAvbData=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.qpriAvbData);
+    tcamDataP->dpvData=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.dpvData);
+    tcamDataP->factionData=tcamDataP->rawFrmData[extFrame].frame2.paraFrm.factionData;
+    tcamDataP->ldBalanceData=qdShort2Char(tcamDataP->rawFrmData[extFrame].frame2.paraFrm.ldBalanceData);
+
+    i = QD_REG_TCAM_P2_DEBUG_PORT;
+    retVal = tcamReadGlobal3Reg(dev,QD_REG_TCAM_P2_DEBUG_PORT,&data);
+    if(retVal != GT_OK)
+    {
+       return retVal;
+    }
+    tcamDataP->rawFrmData[extFrame].frame2.frame[i] = data;
+    tcamDataP->debugPort = (data)&0xf;
+    i = QD_REG_TCAM_P2_ALL_HIT;
+    retVal = tcamReadGlobal3Reg(dev,QD_REG_TCAM_P2_ALL_HIT,&data);
+    if(retVal != GT_OK)
+    {
+       return retVal;
+    }
+    tcamDataP->rawFrmData[extFrame].frame2.frame[i] = data;
+    tcamDataP->highHit = (data>>8)&0xff;
+    tcamDataP->lowHit = data&0xff;
+  }
+  return GT_OK;
+}
+
+static GT_STATUS waitTcamReady(GT_QD_DEV           *dev)
+{
+    GT_STATUS       retVal;    /* Functions return value */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 1;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL3_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_TCAM_OPERATION;
+      regAccess.rw_reg_list[0].data = 15;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+    GT_U16          data;     /* temporary Data storage */
+    data = 1;
+    while(data == 1)
+    {
+        retVal = hwGetGlobal3RegField(dev,QD_REG_TCAM_OPERATION,15,1,&data);
+        if(retVal != GT_OK)
+        {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+    }
+#endif
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* tcamOperationPerform
+*
+* DESCRIPTION:
+*       This function accesses TCAM Table
+*
+* INPUTS:
+*       tcamOp   - The tcam operation
+*       tcamData - address and data to be written into TCAM
+*
+* OUTPUTS:
+*       tcamData - data read from TCAM pointed by address
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+static GT_STATUS tcamOperationPerform
+(
+    IN    GT_QD_DEV           *dev,
+    IN    GT_TCAM_OPERATION   tcamOp,
+    INOUT GT_TCAM_OP_DATA     *opData
+)
+{
+  GT_STATUS       retVal;    /* Functions return value */
+  GT_U16          data;     /* temporary Data storage */
+
+  gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+  /* Wait until the tcam in ready. */
+  retVal = waitTcamReady(dev);
+  if(retVal != GT_OK)
+  {
+    gtSemGive(dev,dev->tblRegsSem);
+    return retVal;
+  }
+
+  /* Set the TCAM Operation register */
+  switch (tcamOp)
+  {
+    case TCAM_FLUSH_ALL:
+      data = 0;
+      data = (1 << 15) | (tcamOp << 12);
+      retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      break;
+
+    case TCAM_FLUSH_ENTRY:
+    {
+      int i, extFrame;
+      if(opData->tcamDataP->is96Frame==1)
+        extFrame = 2;
+      else
+        extFrame = 1;
+      for(i=0; i<extFrame; i++)
+      {
+        data = 0;
+        data = qdLong2Short((1 << 15) | (tcamOp << 12) | (opData->tcamEntry+i)) ;
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+      }
+    }
+      break;
+
+    case TCAM_LOAD_ENTRY:
+    case TCAM_PURGE_ENTRY:
+    {
+      int i, extFrame;
+      if((opData->tcamDataP->is96Frame==1)&&(tcamOp!=TCAM_PURGE_ENTRY))
+        extFrame = 2;
+      else
+        extFrame = 1;
+
+      for(i=0; i<extFrame; i++)
+      {
+        if(tcamOp!=TCAM_PURGE_ENTRY)
+        {
+          retVal = tcamSetPage2Data(dev, opData->tcamDataP, i);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+
+          data = 0;
+          data = qdLong2Short((1 << 15) | (TCAM_LOAD_ENTRY << 12) | (2 << 10) | (opData->tcamEntry+i));
+          retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+          retVal = waitTcamReady(dev);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+
+          retVal = tcamSetPage1Data(dev, opData->tcamDataP, i);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+          data = 0;
+          data = qdLong2Short((1 << 15) | (TCAM_LOAD_ENTRY << 12) | (1 << 10) | (opData->tcamEntry+i)) ;
+          retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+
+          retVal = waitTcamReady(dev);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+
+        }
+        retVal = tcamSetPage0Data(dev,  opData->tcamDataP, i);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        if(tcamOp==TCAM_PURGE_ENTRY)
+        {
+          data = 0xffff ;
+          retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_P0_KEYS_1,data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+#if 0
+          retVal = tcamReadGlobal3Reg(dev, QD_REG_TCAM_P0_KEYS_1, &data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+#endif
+        }
+        data = 0;
+        data = qdLong2Short((1 << 15) | (TCAM_LOAD_ENTRY << 12) | (0 << 10) | (opData->tcamEntry+i));
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        retVal = waitTcamReady(dev);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+#if 0 /* Test read back */
+          retVal = tcamReadGlobal3Reg(dev, QD_REG_TCAM_P0_KEYS_1, &data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+
+        data = 0;
+        data = (1 << 15) | (TCAM_READ_ENTRY << 12) | (0 << 10) | (opData->tcamEntry+i);
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        retVal = waitTcamReady(dev);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+{
+  int j;
+  for(j=0; j<0x1c; j++)
+  {
+/*          retVal = tcamReadGlobal3Reg(dev, QD_REG_TCAM_P0_KEYS_1, &data); */
+          retVal = tcamReadGlobal3Reg(dev, j, &data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+  }
+}
+
+#endif
+      }
+  }
+    break;
+
+    case TCAM_GET_NEXT_ENTRY:
+    {
+      data = 0;
+      data = qdLong2Short((1 << 15) | (tcamOp << 12) | (opData->tcamEntry)) ;
+      retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+      /* Wait until the tcam in ready. */
+      retVal = waitTcamReady(dev);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+
+      retVal = tcamReadGlobal3Reg(dev,QD_REG_TCAM_OPERATION, &data);
+      if(retVal != GT_OK)
+      {
+        gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+
+/*      if(opData->tcamEntry == 0xff)   If ask to find the lowest entry*/
+      {
+        if ((data&0xff)==0xff)
+        {
+          retVal = tcamReadGlobal3Reg(dev,QD_REG_TCAM_P0_KEYS_1, &data);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+          if(data==0x00ff)
+          {
+            /* No higher valid TCAM entry */
+            return GT_OK;
+          }
+          else
+          {
+            /* The highest valid TCAM entry found*/
+          }
+        }
+      }
+
+      /* Get next entry and read the entry */
+      opData->tcamEntry = data&0xff;
+    }
+    case TCAM_READ_ENTRY:
+    {
+      int i, extFrame;
+      if(opData->tcamDataP->is96Frame==1)
+        extFrame = 2;
+      else
+        extFrame = 1;
+      for(i=0; i<extFrame; i++)
+      {
+        data = 0;
+        /* Read page 0 */
+        data = qdLong2Short((1 << 15) | (tcamOp << 12) | (0 << 10) | (opData->tcamEntry+i)) ;
+        opData->tcamDataP->rawFrmData[i].frame0.frame[0] = data;
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        /* Wait until the tcam in ready. */
+        retVal = waitTcamReady(dev);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+        retVal = tcamGetPage0Data(dev, opData->tcamDataP, i);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+        data = 0;
+        /* Read page 1 */
+        data = qdLong2Short((1 << 15) | (tcamOp << 12) | (1 << 10) | (opData->tcamEntry+i)) ;
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        /* Wait until the tcam in ready. */
+        retVal = waitTcamReady(dev);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+        retVal = tcamGetPage1Data(dev, opData->tcamDataP, i);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+        data = 0;
+        /* Read page 2 */
+        data = qdLong2Short((1 << 15) | (tcamOp << 12) | (2 << 10) | (opData->tcamEntry+i)) ;
+        retVal = tcamWriteGlobal3Reg(dev,QD_REG_TCAM_OPERATION,data);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+        /* Wait until the tcam in ready. */
+        retVal = waitTcamReady(dev);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+
+        retVal = tcamGetPage2Data(dev, opData->tcamDataP, i);
+        if(retVal != GT_OK)
+        {
+          gtSemGive(dev,dev->tblRegsSem);
+          return retVal;
+        }
+      }
+    }
+    break;
+
+    default:
+      gtSemGive(dev,dev->tblRegsSem);
+      return GT_FAIL;
+}
+
+  gtSemGive(dev,dev->tblRegsSem);
+  return retVal;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtUtils.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtUtils.c
new file mode 100644
index 000000000000..d47f1addc411
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtUtils.c
@@ -0,0 +1,209 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtUtils.c
+*
+* DESCRIPTION:
+*       Collection of Utility functions
+*
+* DEPENDENCIES:
+*       None
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+
+/*******************************************************************************
+* gtMemSet
+*
+* DESCRIPTION:
+*       Set a block of memory
+*
+* INPUTS:
+*       start  - start address of memory block for setting
+*       simbol - character to store, converted to an unsigned char
+*       size   - size of block to be set
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to set memory block
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * gtMemSet
+(
+    IN void * start,
+    IN int    symbol,
+    IN GT_U32 size
+)
+{
+    GT_U32 i;
+    char* buf;
+
+    buf = (char*)start;
+
+    for(i=0; i<size; i++)
+    {
+        *buf++ = (char)symbol;
+    }
+
+    return start;
+}
+
+/*******************************************************************************
+* gtMemCpy
+*
+* DESCRIPTION:
+*       Copies 'size' characters from the object pointed to by 'source' into
+*       the object pointed to by 'destination'. If copying takes place between
+*       objects that overlap, the behavior is undefined.
+*
+* INPUTS:
+*       destination - destination of copy
+*       source      - source of copy
+*       size        - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       Pointer to destination
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void * gtMemCpy
+(
+    IN void *       destination,
+    IN const void * source,
+    IN GT_U32       size
+)
+{
+    GT_U32 i;
+    char* buf;
+    char* src;
+
+    buf = (char*)destination;
+    src = (char*)source;
+
+    for(i=0; i<size; i++)
+    {
+        *buf++ = *src++;
+    }
+
+    return destination;
+}
+
+/*******************************************************************************
+* gtMemCmp
+*
+* DESCRIPTION:
+*       Compares given memories.
+*
+* INPUTS:
+*       src1 - source 1
+*       src2 - source 2
+*       size - size of memory to copy
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       0, if equal.
+*        negative number, if src1 < src2.
+*        positive number, if src1 > src2.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+int gtMemCmp
+(
+    IN char src1[],
+    IN char src2[],
+    IN GT_U32 size
+)
+{
+    GT_U32 i;
+    int value;
+
+    for(i=0; i<size; i++)
+    {
+        if((value = (int)(src1[i] - src2[i])) != 0)
+            return value;
+    }
+
+    return 0;
+}
+
+/*******************************************************************************
+* gtStrlen
+*
+* DESCRIPTION:
+*       Determine the length of a string
+* INPUTS:
+*       source  - string
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       size    - number of characters in string, not including EOS.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_U32 gtStrlen
+(
+    IN const void * source
+)
+{
+    GT_U32 i = 0;
+    char* src;
+
+    src = (char*)source;
+
+    while(*src++) i++;
+
+    return i;
+}
+
+
+/*******************************************************************************
+* gtDelay
+*
+* DESCRIPTION:
+*       Wait for the given uSec and return.
+*        Current Switch devices with Gigabit Ethernet Support require 250 uSec
+*        of delay time for PPU to be disabled.
+*        Since this function is System and/or OS dependent, it should be provided
+*        by each DSDT user.
+*
+* INPUTS:
+*       delayTime - delay in uSec.
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       None
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+void gtDelay
+(
+    IN const unsigned int delayTime
+)
+{
+
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct.c
new file mode 100644
index 000000000000..2f915fb7fd25
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct.c
@@ -0,0 +1,1248 @@
+#include <Copyright.h>
+/*******************************************************************************
+* gtVct.c
+*
+* DESCRIPTION:
+*       API for Marvell Virtual Cable Tester.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+#include <msApi.h>
+#include <gtVct.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtSem.h>
+
+#ifdef GT_USE_MAD
+#include <gtMad.h>
+#endif
+
+#ifdef GT_USE_MAD
+#include "gtVct_mad.c"
+#endif
+
+/*******************************************************************************
+* analizePhy100MVCTResult
+*
+* DESCRIPTION:
+*       This routine analize the virtual cable test result for 10/100M Phy
+*
+* INPUTS:
+*       regValue - test result
+*
+* OUTPUTS:
+*       cableStatus - analized test result.
+*       cableLen    - cable length or the distance where problem occurs.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+static
+GT_STATUS analizePhy100MVCTResult
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U16 regValue,
+    OUT GT_TEST_STATUS *cableStatus,
+    OUT GT_CABLE_LEN *cableLen
+)
+{
+    int len;
+
+    GT_UNUSED_PARAM(dev);
+
+    switch((regValue & 0x6000) >> 13)
+    {
+        case 0:
+            /* test passed. No problem found. */
+            /* check if there is impedance mismatch */
+            if ((regValue & 0xFF) == 0xFF)
+            {
+                *cableStatus = GT_NORMAL_CABLE;
+                cableLen->normCableLen = GT_UNKNOWN_LEN;
+            }
+            else
+            {
+                *cableStatus = GT_IMPEDANCE_MISMATCH;
+                len = (int)FORMULA_PHY100M(regValue & 0xFF);
+                if(len <= 0)
+                    cableLen->errCableLen = 0;
+                else
+                    cableLen->errCableLen = (GT_U8)len;
+            }
+
+            break;
+        case 1:
+            /* test passed. Cable is short. */
+            *cableStatus = GT_SHORT_CABLE;
+            len = (int)FORMULA_PHY100M(regValue & 0xFF);
+            if(len <= 0)
+                cableLen->errCableLen = 0;
+            else
+                cableLen->errCableLen = (GT_U8)len;
+            break;
+        case 2:
+            /* test passed. Cable is open. */
+            *cableStatus = GT_OPEN_CABLE;
+            len = (int)FORMULA_PHY100M(regValue & 0xFF);
+            if(len <= 0)
+                cableLen->errCableLen = 0;
+            else
+                cableLen->errCableLen = (GT_U8)len;
+            break;
+        case 3:
+        default:
+            /* test failed. No result is valid. */
+            *cableStatus = GT_TEST_FAIL;
+            break;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* getCableStatus_Phy100M
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the 10/100Mbps phy,
+*       and returns the the status per Rx/Tx pair.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+static
+GT_STATUS getCableStatus_Phy100M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status;
+    GT_U16 reg26, reg27;
+
+    DBG_INFO(("getCableStatus_100Phy Called.\n"));
+
+
+    /* Wait until the Table is ready. */
+    /*
+     *     phy should be in 100 Full Duplex.
+     */
+    if((status= hwWritePhyReg(dev,hwPort,0,QD_PHY_RESET | QD_PHY_SPEED | QD_PHY_DUPLEX)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*
+     * start Virtual Cable Tester
+     */
+    if((status= hwWritePhyReg(dev,hwPort,26,0x8000)) != GT_OK)
+    {
+        return status;
+    }
+
+    do
+    {
+        if((status= hwReadPhyReg(dev,hwPort,26,&reg26)) != GT_OK)
+        {
+            return status;
+        }
+
+    } while(reg26 & 0x8000);
+
+    /*
+     * read the test result for RX Pair
+     */
+    if((status= hwReadPhyReg(dev,hwPort,26,&reg26)) != GT_OK)
+    {
+        return status;
+    }
+
+    /*
+     * read the test result for TX Pair
+     */
+    if((status= hwReadPhyReg(dev,hwPort,27,&reg27)) != GT_OK)
+    {
+        return status;
+    }
+
+    cableStatus->phyType = PHY_100M;
+
+    /*
+     * analyze the test result for RX Pair
+     */
+    analizePhy100MVCTResult(dev, reg26, &cableStatus->cableStatus[MDI_RX_PAIR],
+                            &cableStatus->cableLen[MDI_RX_PAIR]);
+
+    /*
+     * analyze the test result for TX Pair
+     */
+    analizePhy100MVCTResult(dev, reg27, &cableStatus->cableStatus[MDI_TX_PAIR],
+                            &cableStatus->cableLen[MDI_TX_PAIR]);
+
+    return status;
+}
+
+static
+GT_STATUS  enable1stWorkAround_Phy100M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort
+)
+{
+    GT_U16      value;
+    GT_STATUS   status;
+
+    /* enable 1st work-around */
+    if ((status = hwWritePhyReg(dev, hwPort, 29, 3)) != GT_OK)
+       return status;
+
+    value = 0x6440;
+    if ((status = hwWritePhyReg(dev, hwPort, 30, value)) != GT_OK)
+       return status;
+
+    return GT_OK;
+}
+
+static
+GT_STATUS  disable1stWorkAround_Phy100M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort
+)
+{
+    GT_STATUS status;
+
+    /* disable 1st work-around */
+    if ((status = hwWritePhyReg(dev, hwPort, 29, 3)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0)) != GT_OK)
+       return status;
+
+    return GT_OK;
+}
+
+static
+GT_STATUS workAround_Phy100M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status = GT_OK;
+
+    /*
+     * If Cable Status is OPEN and the length is less than 15m,
+     * then apply Work Around.
+     */
+
+    if((cableStatus->cableStatus[MDI_RX_PAIR] == GT_OPEN_CABLE) ||
+        (cableStatus->cableStatus[MDI_TX_PAIR] == GT_OPEN_CABLE))
+    {
+        /* must be disabled first and then enable again */
+        disable1stWorkAround_Phy100M(dev,hwPort);
+
+        enable1stWorkAround_Phy100M(dev,hwPort);
+
+        if((status= hwWritePhyReg(dev,hwPort,29,0x000A)) != GT_OK)
+        {
+            return status;
+        }
+        if((status= hwWritePhyReg(dev,hwPort,30,0x0002)) != GT_OK)
+        {
+            return status;
+        }
+
+        if((status = getCableStatus_Phy100M(dev,hwPort,cableStatus)) != GT_OK)
+        {
+            return status;
+        }
+
+        if((status= hwWritePhyReg(dev,hwPort,29,0x000A)) != GT_OK)
+        {
+            return status;
+        }
+        if((status= hwWritePhyReg(dev,hwPort,30,0x0000)) != GT_OK)
+        {
+            return status;
+        }
+    }
+
+    return status;
+}
+
+
+static
+GT_STATUS  enable1stWorkAround_Phy1000M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort
+)
+{
+    GT_STATUS   status;
+
+    /* enable 1st work-around */
+    if ((status = hwWritePhyReg(dev, hwPort, 29, 0x0018)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0x00c2)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0x00ca)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0x00c2)) != GT_OK)
+       return status;
+    return GT_OK;
+}
+
+static
+GT_STATUS  disable1stWorkAround_Phy1000M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8     hwPort
+)
+{
+    GT_STATUS status;
+
+    /* disable 1st work-around */
+    if ((status = hwWritePhyReg(dev, hwPort, 29, 0x0018)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0x0042)) != GT_OK)
+       return status;
+
+    return GT_OK;
+}
+
+/*******************************************************************************
+* analizePhy1000MVCTResult
+*
+* DESCRIPTION:
+*       This routine analize the virtual cable test result for a Gigabit Phy
+*
+* INPUTS:
+*       reg17 - original value of register 17
+*       regValue - test result
+*
+* OUTPUTS:
+*       cableStatus - analized test result.
+*       cableLen    - cable length or the distance where problem occurs.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+static
+GT_STATUS analizePhy1000MVCTResult
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U16 reg17,
+    IN  GT_U16 regValue,
+    OUT GT_TEST_STATUS *cableStatus,
+    OUT GT_CABLE_LEN *cableLen
+)
+{
+    GT_U16 u16Data;
+    int len;
+
+    GT_UNUSED_PARAM(dev);
+
+    switch((regValue & 0x6000) >> 13)
+    {
+        case 0:
+
+            /* Check Impedance Mismatch */
+            if ((regValue & 0xFF) < 0xFF)
+            {
+                /*  if the reflected amplitude is low it is good cable too.
+                    for this registers values it is a good cable:
+                    0xE23, 0xE24, 0xE25, 0xE26, 0xE27 */
+                if ((regValue < 0xE23) || (regValue > 0xE27))
+                {
+                    *cableStatus = GT_IMPEDANCE_MISMATCH;
+                    len = (int)FORMULA_PHY1000M(regValue & 0xFF);
+                    if(len <= 0)
+                        cableLen->errCableLen = 0;
+                    else
+                        cableLen->errCableLen = (GT_U8)len;
+                    break;
+                }
+            }
+
+            /* test passed. No problem found. */
+            *cableStatus = GT_NORMAL_CABLE;
+
+            u16Data = reg17;
+
+            /* To get Cable Length, Link should be on and Speed should be 100M or 1000M */
+            if(!(u16Data & 0x0400))
+            {
+                cableLen->normCableLen = GT_UNKNOWN_LEN;
+                break;
+            }
+
+            if((u16Data & 0xC000) != 0x8000)
+            {
+                cableLen->normCableLen = GT_UNKNOWN_LEN;
+                break;
+            }
+
+            /*
+             * read the test result for the selected MDI Pair
+             */
+
+            u16Data = ((u16Data >> 7) & 0x7);
+
+            switch(u16Data)
+            {
+                case 0:
+                    cableLen->normCableLen = GT_LESS_THAN_50M;
+                    break;
+                case 1:
+                    cableLen->normCableLen = GT_50M_80M;
+                    break;
+                case 2:
+                    cableLen->normCableLen = GT_80M_110M;
+                    break;
+                case 3:
+                    cableLen->normCableLen = GT_110M_140M;
+                    break;
+                case 4:
+                    cableLen->normCableLen = GT_MORE_THAN_140;
+                    break;
+                default:
+                    cableLen->normCableLen = GT_UNKNOWN_LEN;
+                    break;
+            }
+            break;
+        case 1:
+            /* test passed. Cable is short. */
+            *cableStatus = GT_SHORT_CABLE;
+            len = (int)FORMULA_PHY1000M(regValue & 0xFF);
+            if(len <= 0)
+                cableLen->errCableLen = 0;
+            else
+                cableLen->errCableLen = (GT_U8)len;
+            break;
+        case 2:
+            /* test passed. Cable is open. */
+            *cableStatus = GT_OPEN_CABLE;
+            len = (int)FORMULA_PHY1000M(regValue & 0xFF);
+            if(len <= 0)
+                cableLen->errCableLen = 0;
+            else
+                cableLen->errCableLen = (GT_U8)len;
+            break;
+        case 3:
+        default:
+            /* test failed. No result is valid. */
+            *cableStatus = GT_TEST_FAIL;
+            break;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* getCableStatus_Phy1000M
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the 10/100Mbps phy,
+*       and returns the the status per Rx/Tx pair.
+*
+* INPUTS:
+*       port - logical port number.
+*        reg17 - original value of reg17.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+static
+GT_STATUS getCableStatus_Phy1000M
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort,
+    IN  GT_U16             reg17,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status;
+    GT_U16 reg28;
+    int i;
+
+    DBG_INFO(("getCableStatus_Phy1000M Called.\n"));
+
+    /*
+     * start Virtual Cable Tester
+     */
+    if((status= hwWritePagedPhyReg(dev,hwPort,0,28,0,0x8000)) != GT_OK)
+    {
+        return status;
+    }
+
+    do
+    {
+        if((status= hwReadPhyReg(dev,hwPort,28,&reg28)) != GT_OK)
+        {
+            return status;
+        }
+
+    } while(reg28 & 0x8000);
+
+    cableStatus->phyType = PHY_1000M;
+
+    DBG_INFO(("Reg28 after test : %0#x.\n", reg28));
+
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        /*
+         * read the test result for the selected MDI Pair
+         */
+        if((status= hwReadPagedPhyReg(dev,hwPort,(GT_U8)i,28,0,&reg28)) != GT_OK)
+        {
+            return status;
+        }
+
+        /*
+         * analyze the test result for RX Pair
+         */
+        if((status = analizePhy1000MVCTResult(dev, reg17, reg28,
+                                &cableStatus->cableStatus[i],
+                                &cableStatus->cableLen[i])) != GT_OK)
+        {
+            return status;
+        }
+    }
+
+    return GT_OK;
+}
+
+static
+GT_STATUS workAround_Phy1000M
+(
+  GT_QD_DEV *dev,
+  GT_U8 hwPort
+)
+{
+    GT_STATUS status;
+
+    DBG_INFO(("workAround for Gigabit Phy Called.\n"));
+
+    /* enable 1st work-around */
+    if ((status = hwWritePhyReg(dev, hwPort, 29, 0x0018)) != GT_OK)
+       return status;
+
+    if ((status = hwWritePhyReg(dev, hwPort, 30, 0x00c2)) != GT_OK)
+       return status;
+
+    if((status = hwWritePhyReg(dev,hwPort,29,0x1e)) != GT_OK)
+    {
+        return status;
+    }
+
+    if((status = hwWritePhyReg(dev,hwPort,30,0xcc00)) != GT_OK)
+    {
+        return status;
+    }
+
+    if((status = hwWritePhyReg(dev,hwPort,30,0xc800)) != GT_OK)
+    {
+        return status;
+    }
+    if((status = hwWritePhyReg(dev,hwPort,30,0xc400)) != GT_OK)
+    {
+        return status;
+    }
+    if((status = hwWritePhyReg(dev,hwPort,30,0xc000)) != GT_OK)
+    {
+        return status;
+    }
+    if((status = hwWritePhyReg(dev,hwPort,30,0xc100)) != GT_OK)
+    {
+        return status;
+    }
+
+    DBG_INFO(("workAround for Gigabit Phy completed.\n"));
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* getCableStatus_Phy1000MPage
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the 10/100Mbps phy with
+*       multiple page mode and returns the the status per MDIP/N.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+* GalTis:
+*
+*******************************************************************************/
+static
+GT_STATUS getCableStatus_Phy1000MPage
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_U8            hwPort,
+    IN  GT_PHY_INFO        *phyInfo,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status;
+    GT_U16 u16Data;
+    GT_U16 reg17 = 0;
+    int i;
+
+    DBG_INFO(("getCableStatus_Phy1000M Called.\n"));
+
+    /*
+     * If Fiber is used, simply return with test fail.
+     */
+    if(phyInfo->flag & GT_PHY_FIBER)
+    {
+        if((status= hwReadPagedPhyReg(dev,hwPort,1,17,phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            return status;
+        }
+
+        if(u16Data & 0x400)
+        {
+            for (i=0; i<GT_MDI_PAIR_NUM; i++)
+            {
+                cableStatus->cableStatus[i] = GT_TEST_FAIL;
+            }
+            return GT_OK;
+        }
+    }
+
+    /*
+     * If Copper is used and Link is on, get DSP Distance and put it in the
+     * old reg17 format.(bit9:7 with DSP Distance)
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,0,17,phyInfo->anyPage,&u16Data)) != GT_OK)
+    {
+        return status;
+    }
+
+    if(u16Data & 0x400)
+    {
+        reg17 = (u16Data & 0xC000) | 0x400;
+
+        if((status= hwReadPagedPhyReg(dev,hwPort,5,26,phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            return status;
+        }
+        reg17 |= ((u16Data & 0x7) << 7);
+    }
+
+    /*
+     * start Virtual Cable Tester
+     */
+    if((status= hwWritePagedPhyReg(dev,hwPort,5,16,phyInfo->anyPage,0x8000)) != GT_OK)
+    {
+        return status;
+    }
+
+    do
+    {
+        if((status= hwReadPagedPhyReg(dev,hwPort,5,16,phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            return status;
+        }
+
+    } while(u16Data & 0x8000);
+
+    cableStatus->phyType = PHY_1000M;
+
+    DBG_INFO(("Page 5 of Reg16 after test : %0#x.\n", u16Data));
+
+    for (i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        /*
+         * read the test result for the selected MDI Pair
+         */
+        if((status= hwReadPagedPhyReg(dev,hwPort,5,(GT_U8)(16+i),phyInfo->anyPage,&u16Data)) != GT_OK)
+        {
+            return status;
+        }
+
+        /*
+         * analyze the test result for RX Pair
+         */
+        if((status = analizePhy1000MVCTResult(dev, reg17, u16Data,
+                                &cableStatus->cableStatus[i],
+                                &cableStatus->cableLen[i])) != GT_OK)
+        {
+            return status;
+        }
+    }
+
+    return GT_OK;
+}
+
+
+
+/*******************************************************************************
+* gvctGetCableStatus
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the requested port,
+*       and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvCableDiag
+*        API can be used, instead.
+*
+*******************************************************************************/
+GT_STATUS gvctGetCableDiag
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status;
+    GT_U8 hwPort;
+    GT_U16 orgReg0, orgReg17;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+#ifdef GT_USE_MAD
+    if (dev->use_mad==GT_TRUE)
+    {
+        return gvctGetCableDiag_mad(dev, port, cableStatus);
+    }
+#endif
+
+    DBG_INFO(("gvctGetCableDiag Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_VCT_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    /*
+     * save original register 17 value, which will be used later depending on
+     * test result.
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,0,17,phyInfo.anyPage,&orgReg17)) != GT_OK)
+    {
+        DBG_INFO(("Not able to reset the Phy.\n"));
+        goto cableDiagCleanup;
+    }
+
+    /*
+     * save Config Register data
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,0,0,phyInfo.anyPage,&orgReg0)) != GT_OK)
+    {
+        DBG_INFO(("Not able to reset the Phy.\n"));
+        goto cableDiagCleanup;
+    }
+    switch(phyInfo.vctType)
+    {
+        case GT_PHY_VCT_TYPE1:
+            enable1stWorkAround_Phy100M(dev,hwPort);
+            status = getCableStatus_Phy100M(dev,hwPort,cableStatus);
+            /* every fast ethernet phy requires this work-around */
+            workAround_Phy100M(dev,hwPort,cableStatus);
+            disable1stWorkAround_Phy100M(dev,hwPort);
+            break;
+        case GT_PHY_VCT_TYPE2:
+            enable1stWorkAround_Phy1000M(dev,hwPort);
+            status = getCableStatus_Phy1000M(dev,hwPort,orgReg17,cableStatus);
+            disable1stWorkAround_Phy1000M(dev,hwPort);
+            break;
+        case GT_PHY_VCT_TYPE3:
+            enable1stWorkAround_Phy1000M(dev,hwPort);
+            workAround_Phy1000M(dev,hwPort);
+            status = getCableStatus_Phy1000M(dev,hwPort,orgReg17,cableStatus);
+            disable1stWorkAround_Phy1000M(dev,hwPort);
+            break;
+        case GT_PHY_VCT_TYPE4:
+            status = getCableStatus_Phy1000MPage(dev,hwPort,&phyInfo,cableStatus);
+            break;
+        default:
+            status = GT_FAIL;
+            break;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_GIGABIT))
+    {
+        if((status = hwPhyReset(dev,hwPort,orgReg0)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+    }
+    else
+    {
+        /*
+         * restore Config Register Data
+         */
+        if((status= hwWritePagedPhyReg(dev,hwPort,0,0,phyInfo.anyPage,orgReg0)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+
+        /* soft reset */
+        if((status = hwPhyReset(dev,hwPort,0xFF)) != GT_OK)
+        {
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+    }
+
+cableDiagCleanup:
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
+
+
+/*******************************************************************************
+* getExStatus1000M
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*        for 1000M phy
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static GT_STATUS getExStatus1000M
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8           hwPort,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS status;
+    GT_U16 u16Data, i;
+
+    /*
+     * get data from 28_5 register
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,5,28,0,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read a Phy register.\n"));
+        return status;
+    }
+
+    /* if bit 6 is not set, it's not valid. */
+    if (!(u16Data & 0x0040))
+    {
+        DBG_INFO(("Valid Bit is not set (%0#x).\n", u16Data));
+        extendedStatus->isValid = GT_FALSE;
+        return GT_OK;
+    }
+
+    extendedStatus->isValid = GT_TRUE;
+
+    /* get Pair Polarity */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        switch((u16Data >> i) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairPolarity[i] = GT_POSITIVE;
+                break;
+            default:
+                extendedStatus->pairPolarity[i] = GT_NEGATIVE;
+                break;
+        }
+    }
+
+    /* get Pair Swap */
+    for(i=0; i<GT_CHANNEL_PAIR_NUM; i++)
+    {
+        switch((u16Data >> (i+4)) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairSwap[i] = GT_STRAIGHT_CABLE;
+                break;
+            default:
+                extendedStatus->pairSwap[i] = GT_CROSSOVER_CABLE;
+                break;
+        }
+    }
+
+    /*
+     * get data from 28_4 register
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,4,28,0,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read a Phy register.\n"));
+        return status;
+    }
+
+    /* get Pair Skew */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        extendedStatus->pairSkew[i] = ((u16Data >> i*4) & 0xF) * 8;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* getExStatus1000MPage
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*        for 1000M phy with multiple page mode
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+static GT_STATUS getExStatus1000MPage
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_U8            hwPort,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS status;
+    GT_U16 u16Data, i;
+
+    /*
+     * get data from 21_5 register for pair swap
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,5,21,0,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read a paged Phy register.\n"));
+        return status;
+    }
+
+    /* if bit 6 is not set, it's not valid. */
+    if (!(u16Data & 0x0040))
+    {
+        DBG_INFO(("Valid Bit is not set (%0#x).\n", u16Data));
+        extendedStatus->isValid = GT_FALSE;
+        return GT_OK;
+    }
+
+    extendedStatus->isValid = GT_TRUE;
+
+    /* get Pair Polarity */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        switch((u16Data >> i) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairPolarity[i] = GT_POSITIVE;
+                break;
+            default:
+                extendedStatus->pairPolarity[i] = GT_NEGATIVE;
+                break;
+        }
+    }
+
+    /* get Pair Swap */
+    for(i=0; i<GT_CHANNEL_PAIR_NUM; i++)
+    {
+        switch((u16Data >> (i+4)) & 0x1)
+        {
+            case 0:
+                extendedStatus->pairSwap[i] = GT_STRAIGHT_CABLE;
+                break;
+            default:
+                extendedStatus->pairSwap[i] = GT_CROSSOVER_CABLE;
+                break;
+        }
+    }
+
+    /*
+     * get data from 20_5 register for pair skew
+     */
+    if((status= hwReadPagedPhyReg(dev,hwPort,5,20,0,&u16Data)) != GT_OK)
+    {
+        DBG_INFO(("Not able to read a paged Phy register.\n"));
+        return status;
+    }
+
+    /* get Pair Skew */
+    for(i=0; i<GT_MDI_PAIR_NUM; i++)
+    {
+        extendedStatus->pairSkew[i] = ((u16Data >> i*4) & 0xF) * 8;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gvctGet1000BTExtendedStatus
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvExtendedStatus
+*        API can be used, instead.
+*
+*******************************************************************************/
+GT_STATUS gvctGet1000BTExtendedStatus
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_LPORT        port,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS status;
+    GT_U8 hwPort;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+    GT_BOOL            autoOn;
+    GT_U16            pageReg;
+
+    DBG_INFO(("gvctGetCableDiag Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_EX_CABLE_STATUS))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if(driverPagedAccessStart(dev,hwPort,phyInfo.pageType,&autoOn,&pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    switch(phyInfo.vctType)
+    {
+        case GT_PHY_VCT_TYPE2:
+            status = getExStatus1000M(dev,hwPort,extendedStatus);
+            break;
+        case GT_PHY_VCT_TYPE4:
+            status = getExStatus1000MPage(dev,hwPort,extendedStatus);
+            break;
+        default:
+               DBG_INFO(("Device is not supporting Extended Cable Status.\n"));
+            status = GT_NOT_SUPPORTED;
+    }
+
+    if(driverPagedAccessStop(dev,hwPort,phyInfo.pageType,autoOn,pageReg) != GT_OK)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct_mad.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct_mad.c
new file mode 100644
index 000000000000..37ab926f277c
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVct_mad.c
@@ -0,0 +1,224 @@
+#include <Copyright.h>
+/*******************************************************************************
+* gtVct.c
+*
+* DESCRIPTION:
+*       API for Marvell Virtual Cable Tester.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*******************************************************************************/
+#include <msApi.h>
+#include <gtVct.h>
+#include <gtDrvConfig.h>
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtSem.h>
+
+#include <madApi.h>
+
+
+/*******************************************************************************
+* gvctGetCableStatus_mad
+*
+* DESCRIPTION:
+*       This routine perform the virtual cable test for the requested port,
+*       and returns the the status per MDI pair.
+*
+* INPUTS:
+*       port - logical port number.
+*
+* OUTPUTS:
+*       cableStatus - the port copper cable status.
+*       cableLen    - the port copper cable length.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvCableDiag
+*        API can be used, instead.
+*
+*******************************************************************************/
+GT_STATUS gvctGetCableDiag_mad
+(
+    IN  GT_QD_DEV *dev,
+    IN  GT_LPORT        port,
+    OUT GT_CABLE_STATUS *cableStatus
+)
+{
+    GT_STATUS status=GT_OK;
+    GT_U8 hwPort;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gvctGetCableDiag_mad Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_VCT_CAPABLE))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if ( mdDiagGetCableStatus(&(dev->mad_dev),port, (MAD_CABLE_STATUS*)cableStatus) != MAD_OK)
+    {
+      DBG_INFO(("Failed to run mdDiagGetCableStatus.\n"));
+      gtSemGive(dev,dev->phyRegsSem);
+      return GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
+
+
+
+/*******************************************************************************
+* gvctGet1000BTExtendedStatus_mad
+*
+* DESCRIPTION:
+*       This routine retrieves Pair Skew, Pair Swap, and Pair Polarity
+*
+* INPUTS:
+*       dev - device context.
+*       port - logical port number.
+*
+* OUTPUTS:
+*       extendedStatus - extended cable status.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       Internal Gigabit Phys in 88E6165 family and 88E6351 family devices
+*        are not supported by this API. For those devices, gvctGetAdvExtendedStatus
+*        API can be used, instead.
+*
+*******************************************************************************/
+GT_STATUS gvctGet1000BTExtendedStatus_mad
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_LPORT        port,
+    OUT GT_1000BT_EXTENDED_STATUS *extendedStatus
+)
+{
+    GT_STATUS status=GT_OK;
+    GT_U8 hwPort;
+    GT_BOOL ppuEn;
+    GT_PHY_INFO    phyInfo;
+
+    DBG_INFO(("gvctGetCableDiag_mad Called.\n"));
+    hwPort = GT_LPORT_2_PHY(port);
+
+    gtSemTake(dev,dev->phyRegsSem,OS_WAIT_FOREVER);
+
+    /* check if the port is configurable */
+    if((phyInfo.phyId=GT_GET_PHY_ID(dev,hwPort)) == GT_INVALID_PHY)
+    {
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* check if the port supports VCT */
+    if(driverFindPhyInformation(dev,hwPort,&phyInfo) != GT_OK)
+    {
+        DBG_INFO(("Unknown PHY device.\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_FAIL;
+    }
+
+    if (!(phyInfo.flag & GT_PHY_EX_CABLE_STATUS))
+    {
+        DBG_INFO(("Not Supported\n"));
+        gtSemGive(dev,dev->phyRegsSem);
+        return GT_NOT_SUPPORTED;
+    }
+
+    /* Need to disable PPUEn for safe. */
+    if(gsysGetPPUEn(dev,&ppuEn) != GT_OK)
+    {
+        ppuEn = GT_FALSE;
+    }
+
+    if(ppuEn != GT_FALSE)
+    {
+        if((status= gsysSetPPUEn(dev,GT_FALSE)) != GT_OK)
+        {
+            DBG_INFO(("Not able to disable PPUEn.\n"));
+            gtSemGive(dev,dev->phyRegsSem);
+            return status;
+        }
+        gtDelay(250);
+    }
+
+    if ( mdDiagGet1000BTExtendedStatus(&(dev->mad_dev),port,(MAD_1000BT_EXTENDED_STATUS*)extendedStatus) != MAD_OK)
+    {
+      DBG_INFO(("Failed to run mdDiagGet1000BTExtendedStatus.\n"));
+      gtSemGive(dev,dev->phyRegsSem);
+      return GT_FALSE;
+    }
+
+
+    if(ppuEn != GT_FALSE)
+    {
+        if(gsysSetPPUEn(dev,ppuEn) != GT_OK)
+        {
+            DBG_INFO(("Not able to enable PPUEn.\n"));
+            status = GT_FAIL;
+        }
+    }
+
+    gtSemGive(dev,dev->phyRegsSem);
+    return status;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVersion.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVersion.c
new file mode 100644
index 000000000000..b24dc4339452
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtVersion.c
@@ -0,0 +1,59 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtVersion.h
+*
+* DESCRIPTION:
+*       Includes software version information for the QuarterDeck software
+*       suite.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 1 $
+*
+*******************************************************************************/
+
+#include <msApi.h>
+
+char msApiCopyright[] = MSAPI_COPYRIGHT;
+char msApiVersion[] = MSAPI_VERSION;
+
+/*******************************************************************************
+* gtVersion
+*
+* DESCRIPTION:
+*       This function returns the version of the QuarterDeck SW suite.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       version     - QuarterDeck software version.
+*
+* RETURNS:
+*       GT_OK on success,
+*       GT_BAD_PARAM on bad parameters,
+*       GT_FAIL otherwise.
+*
+* COMMENTS:
+*
+*******************************************************************************/
+GT_STATUS gtVersion
+(
+    OUT GT_VERSION   *version
+)
+{
+    if(version == NULL)
+        return GT_BAD_PARAM;
+
+    if(gtStrlen(msApiVersion) > VERSION_MAX_LEN)
+    {
+        return GT_FAIL;
+    }
+
+    gtMemCpy(version->version,msApiVersion,gtStrlen(msApiVersion));
+    version->version[gtStrlen(msApiVersion)] = '\0';
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtWeight.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtWeight.c
new file mode 100644
index 000000000000..55fee5dabfe8
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/gtWeight.c
@@ -0,0 +1,479 @@
+#include <Copyright.h>
+
+/********************************************************************************
+* gtWeight.c
+*
+* DESCRIPTION:
+*       API definitions for Round Robin Weight table access
+*
+* DEPENDENCIES:
+*
+* FILE REVISION NUMBER:
+*       $Revision: $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+#include <gtHwCntl.h>
+#include <gtDrvSwRegs.h>
+
+
+/*******************************************************************************
+* gsysSetQoSWeight
+*
+* DESCRIPTION:
+*       Programmable Round Robin Weights.
+*        Each port has 4 output Queues. Queue 3 has the highest priority and
+*        Queue 0 has the lowest priority. When a scheduling mode of port is
+*        configured as Weighted Round Robin queuing mode, the access sequece of the
+*        Queue is 3,2,3,1,3,2,3,0,3,2,3,1,3,2,3 by default.
+*        This sequence can be configured with this API.
+*
+* INPUTS:
+*       weight - access sequence of the queue
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysSetQoSWeight
+(
+    IN  GT_QD_DEV         *dev,
+    IN  GT_QoS_WEIGHT    *weight
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data;
+    GT_U32        len, i;
+
+    DBG_INFO(("gsysSetQoSWeight Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_WEIGHT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    if (weight->len > 128)
+    {
+        DBG_INFO(("GT_BAD_PARAM\n"));
+        return GT_BAD_PARAM;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    len = weight->len/4;
+
+    /* program QoS Weight Table, 4 sequences at a time */
+
+    for(i=0; i<len; i++)
+    {
+        /* Wait until the QoS Weight Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+      data =  (GT_U16)((1 << 15) | (i << 8) |
+            (weight->queue[i*4] & 0x3) |
+            ((weight->queue[i*4+1] & 0x3) << 2) |
+            ((weight->queue[i*4+2] & 0x3) << 4) |
+            ((weight->queue[i*4+3] & 0x3) << 6));
+      regAccess.rw_reg_list[1].data = data;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      if(retVal != GT_OK)
+      {
+            gtSemGive(dev,dev->tblRegsSem);
+        return retVal;
+      }
+    }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobal2RegField(dev,QD_REG_QOS_WEIGHT,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+        }
+
+        data =  (GT_U16)((1 << 15) | (i << 8) |
+                (weight->queue[i*4] & 0x3) |
+                ((weight->queue[i*4+1] & 0x3) << 2) |
+                ((weight->queue[i*4+2] & 0x3) << 4) |
+                ((weight->queue[i*4+3] & 0x3) << 6));
+
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+        if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+#endif
+    }
+
+    /* program remaining sequences if any */
+    i = weight->len % 4;
+    if (i)
+    {
+        /* Wait until the QoS Weight Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 2;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+          regAccess.rw_reg_list[0].data = 15;
+          regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+          regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+          regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+          data =  (GT_U16)((1 << 15) | (len << 8));
+          switch (i)
+          {
+            case 3:
+                data |= ((weight->queue[len*4+2] & 0x3) << 4);
+            case 2:
+                data |= ((weight->queue[len*4+1] & 0x3) << 2);
+            case 1:
+                data |= ((weight->queue[len*4+0] & 0x3) << 0);
+            break;
+            default:
+                DBG_INFO(("Should not come to this point.\n"));
+                gtSemGive(dev,dev->tblRegsSem);
+            return GT_FALSE;
+          }
+          regAccess.rw_reg_list[1].data = data;
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          if(retVal != GT_OK)
+          {
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+          }
+        }
+#else
+        data = 1;
+        while(data == 1)
+        {
+            retVal = hwGetGlobal2RegField(dev,QD_REG_QOS_WEIGHT,15,1,&data);
+            if(retVal != GT_OK)
+            {
+                gtSemGive(dev,dev->tblRegsSem);
+                return retVal;
+            }
+        }
+
+        data =  (GT_U16)((1 << 15) | (len << 8));
+
+        switch (i)
+        {
+            case 3:
+                data |= ((weight->queue[len*4+2] & 0x3) << 4);
+            case 2:
+                data |= ((weight->queue[len*4+1] & 0x3) << 2);
+            case 1:
+                data |= ((weight->queue[len*4+0] & 0x3) << 0);
+                break;
+            default:
+                   DBG_INFO(("Should not come to this point.\n"));
+                gtSemGive(dev,dev->tblRegsSem);
+                return GT_FALSE;
+        }
+
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+        if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+#endif
+    }
+
+    /* Write the lengh of the sequence */
+
+    /* Wait until the QoS Weight Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+      data =  (GT_U16)((1 << 15) | (0x20 << 8) | weight->len);
+      regAccess.rw_reg_list[1].data = data;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+    }
+#else
+       data = 1;
+    while(data == 1)
+       {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_QOS_WEIGHT,15,1,&data);
+           if(retVal != GT_OK)
+           {
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+           }
+    }
+
+    data =  (GT_U16)((1 << 15) | (0x20 << 8) | weight->len);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+#endif
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+           return retVal;
+    }
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gsysGetQoSWeight
+*
+* DESCRIPTION:
+*       Programmable Round Robin Weights.
+*        Each port has 4 output Queues. Queue 3 has the highest priority and
+*        Queue 0 has the lowest priority. When a scheduling mode of port is
+*        configured as Weighted Round Robin queuing mode, the access sequece of the
+*        Queue is 3,2,3,1,3,2,3,0,3,2,3,1,3,2,3 by default.
+*        This routine retrieves the access sequence of the Queue.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       weight - access sequence of the queue
+*
+* RETURNS:
+*       GT_OK      - on success
+*       GT_FAIL    - on error
+*        GT_NOT_SUPPORTED - if current device does not support this feature.
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gsysGetQoSWeight
+(
+    IN  GT_QD_DEV         *dev,
+    OUT GT_QoS_WEIGHT    *weight
+)
+{
+    GT_STATUS    retVal;         /* Functions return value.      */
+    GT_U16        data, i;
+    GT_U32        len;
+
+    DBG_INFO(("gsysGetQoSWeight Called.\n"));
+
+    /* Check if Switch supports this feature. */
+    if (!IS_IN_DEV_GROUP(dev,DEV_QoS_WEIGHT))
+    {
+        DBG_INFO(("GT_NOT_SUPPORTED\n"));
+        return GT_NOT_SUPPORTED;
+    }
+
+    gtSemTake(dev,dev->tblRegsSem,OS_WAIT_FOREVER);
+
+    /* Read the lengh of the sequence */
+
+    /* Wait until the QoS Weight Table is ready. */
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 3;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[0].data = 15;
+      regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+      data = (0x20 << 8);
+      regAccess.rw_reg_list[1].data = data;
+      regAccess.rw_reg_list[2].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[2].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[2].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[2].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      data = qdLong2Short(regAccess.rw_reg_list[2].data);
+    }
+#else
+       data = 1;
+    while(data == 1)
+       {
+        retVal = hwGetGlobal2RegField(dev,QD_REG_QOS_WEIGHT,15,1,&data);
+           if(retVal != GT_OK)
+           {
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+           }
+    }
+
+    data = (0x20 << 8);
+
+    retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    retVal = hwReadGlobal2Reg(dev, QD_REG_QOS_WEIGHT, &data);
+#endif
+    if(retVal != GT_OK)
+       {
+           DBG_INFO(("Failed.\n"));
+        gtSemGive(dev,dev->tblRegsSem);
+           return retVal;
+    }
+
+    weight->len = data & 0xFF;
+
+    len = weight->len/4;
+
+    /* read QoS Weight Table, 4 sequences at a time */
+
+    for(i=0; i<len; i++)
+    {
+        data = i << 8;
+
+#ifdef GT_RMGMT_ACCESS
+    {
+      HW_DEV_REG_ACCESS regAccess;
+
+      regAccess.entries = 2;
+
+      regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+      regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[0].data = data;
+      regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+      regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+      regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+      regAccess.rw_reg_list[1].data = 0;
+      retVal = hwAccessMultiRegs(dev, &regAccess);
+      data = qdLong2Short(regAccess.rw_reg_list[1].data);
+    }
+#else
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+        if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev, QD_REG_QOS_WEIGHT, &data);
+#endif
+        if(retVal != GT_OK)
+           {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+        }
+
+        weight->queue[i*4] = data & 0x3;
+        weight->queue[i*4+1] = (data >> 2) & 0x3;
+        weight->queue[i*4+2] = (data >> 4) & 0x3;
+        weight->queue[i*4+3] = (data >> 6) & 0x3;
+
+    }
+
+    /* read remaining sequences if any */
+    i = (GT_U16) (weight->len % 4);
+    if (i)
+    {
+        data = (GT_U16)(len << 8);
+
+#ifdef GT_RMGMT_ACCESS
+        {
+          HW_DEV_REG_ACCESS regAccess;
+
+          regAccess.entries = 2;
+
+          regAccess.rw_reg_list[0].cmd = HW_REG_WRITE;
+          regAccess.rw_reg_list[0].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+          regAccess.rw_reg_list[0].reg = QD_REG_QOS_WEIGHT;
+          regAccess.rw_reg_list[0].data = data;
+          regAccess.rw_reg_list[1].cmd = HW_REG_READ;
+          regAccess.rw_reg_list[1].addr = CALC_SMI_DEV_ADDR(dev, 0, GLOBAL2_REG_ACCESS);
+          regAccess.rw_reg_list[1].reg = QD_REG_QOS_WEIGHT;
+          regAccess.rw_reg_list[1].data = 0;
+          retVal = hwAccessMultiRegs(dev, &regAccess);
+          data = qdLong2Short(regAccess.rw_reg_list[1].data);
+        }
+#else
+        retVal = hwWriteGlobal2Reg(dev, QD_REG_QOS_WEIGHT, data);
+        if(retVal != GT_OK)
+        {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+            return retVal;
+        }
+
+        retVal = hwReadGlobal2Reg(dev, QD_REG_QOS_WEIGHT, &data);
+#endif
+        if(retVal != GT_OK)
+           {
+               DBG_INFO(("Failed.\n"));
+            gtSemGive(dev,dev->tblRegsSem);
+               return retVal;
+        }
+
+        switch (i)
+        {
+            case 3:
+                weight->queue[len*4+2] = (data >> 4) & 0x3;
+            case 2:
+                weight->queue[len*4+1] = (data >> 2) & 0x3;
+            case 1:
+                weight->queue[len*4] = data & 0x3;
+                break;
+            default:
+                   DBG_INFO(("Should not come to this point.\n"));
+                gtSemGive(dev,dev->tblRegsSem);
+                return GT_FALSE;
+        }
+    }
+
+    gtSemGive(dev,dev->tblRegsSem);
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/makefile b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/makefile
new file mode 100644
index 000000000000..b03983201084
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/msapi/makefile
@@ -0,0 +1,49 @@
+# Source files in this directory
+TARGET =
+CSOURCES	= gtBrgFdb.c gtBrgStp.c gtBrgVlan.c \
+			gtEvents.c \
+			gtPortCtrl.c gtPortStat.c gtPortStatus.c \
+			gtQosMap.c gtPIRL.c	\
+			gtPhyCtrl.c gtPhyInt.c \
+			gtSysConfig.c gtSysCtrl.c gtVersion.c gtUtils.c \
+			gtBrgVtu.c gtPortRmon.c gtSysStatus.c \
+			gtPortRateCtrl.c gtPortPav.c gtVct.c gtAdvVct.c gtPCSCtrl.c \
+			gtBrgStu.c gtCCPVT.c gtPIRL2.c gtPolicy.c \
+			gtTCAM.c \
+			gtPriTable.c gtWeight.c gtAVB.c gtPTP.c gtPortLed.c gtMisc.c
+
+ASOURCES	=
+
+# Include common variable definitions
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.defs
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.defs
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.defs
+endif
+
+# Add in extra stuffs
+EXTRA_INCLUDE	+=
+EXTRA_DEFINE	+=
+ADDED_CFLAGS	+=
+
+ifeq ($(DSDT_USE_MAD),TRUE)
+#CSOURCES	+= gtPhyCtrl_mad.c gtPhyInt_mad.c gtVct_mad.c gtAdvVct_mad.c
+ADDED_CFLAGS	+= -DGT_USE_MAD
+endif
+
+# Include common build rules
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.rules
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.rules
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.rules
+endif
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtDebug.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtDebug.c
new file mode 100644
index 000000000000..a53b521e1d97
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtDebug.c
@@ -0,0 +1,78 @@
+#include <Copyright.h>
+/********************************************************************************
+* debug.c
+*
+* DESCRIPTION:
+*       Debug message display routine
+*
+* DEPENDENCIES:
+*       OS Dependent
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+#include <msApi.h>
+
+#ifdef DEBUG_QD
+#ifdef _VXWORKS
+#include "vxWorks.h"
+#include "logLib.h"
+#include "stdarg.h"
+#elif defined(WIN32)
+#include "windows.h"
+/* #include "wdm.h" */
+#elif defined(LINUX)
+#include "stdarg.h"
+#endif
+
+/*******************************************************************************
+* gtDbgPrint
+*
+* DESCRIPTION:
+*       .
+*
+* INPUTS:
+*       None
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+#if defined(_VXWORKS) || defined(WIN32) || defined(LINUX)
+void gtDbgPrint(char* format, ...)
+{
+    va_list argP;
+    char dbgStr[1000] = "";
+
+    va_start(argP, format);
+
+    vsprintf(dbgStr, format, argP);
+
+#ifdef _VXWORKS
+    printf("%s",dbgStr);
+/*    logMsg(dbgStr,0,1,2,3,4,5); */
+#elif defined(WIN32)
+    printf("%s",dbgStr);
+/*    DbgPrint(dbgStr);*/
+#elif defined(LINUX)
+    printk("%s",dbgStr);
+#endif
+    return;
+}
+#else
+void gtDbgPrint(char* format, ...)
+{
+    GT_UNUSED_PARAM(format);
+}
+#endif
+#else /* DEBUG_QD not defined */
+void gtDbgPrint(char* format, ...)
+{
+    GT_UNUSED_PARAM(format);
+}
+#endif /* DEBUG_QD */
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtMiiSmiIf.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtMiiSmiIf.c
new file mode 100644
index 000000000000..8f2fbaa17cb5
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtMiiSmiIf.c
@@ -0,0 +1,835 @@
+#include <Copyright.h>
+/********************************************************************************
+* gtMiiSmiIf.c
+*
+* DESCRIPTION:
+*       Includes functions prototypes for initializing and accessing the
+*       MII / SMI interface.
+*       This is the only file to be included from upper layers.
+*
+* DEPENDENCIES:
+*       None.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*
+*******************************************************************************/
+
+#include <gtDrvSwRegs.h>
+#include <gtHwCntl.h>
+#include <gtMiiSmiIf.h>
+#include <platformDeps.h>
+#include <gtSem.h>
+
+/* Local sub-functions */
+GT_BOOL qdMultiAddrRead (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int MIIReg,
+                        unsigned int* value);
+GT_BOOL qdMultiAddrWrite (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int MIIReg,
+                       unsigned int value);
+static GT_BOOL fgtReadMii(GT_QD_DEV*dev, unsigned int phyAddr, unsigned int miiReg, unsigned int* value);
+static GT_BOOL fgtWriteMii(GT_QD_DEV*dev, unsigned int phyAddr, unsigned int miiReg, unsigned int value);
+
+#ifdef GT_RMGMT_ACCESS
+static GT_BOOL fgtAccessRegs(GT_QD_DEV* dev, HW_DEV_REG_ACCESS *regList);
+static GT_BOOL qdMultiAddrAccess(GT_QD_DEV* dev, HW_DEV_REG_ACCESS *regList);
+#endif
+
+/*******************************************************************************
+* miiSmiIfInit
+*
+* DESCRIPTION:
+*       This function initializes the MII / SMI interface.
+*
+* INPUTS:
+*       None.
+*
+* OUTPUTS:
+*       highSmiDevAddr - Indicates whether to use the high device register
+*                     addresses when accessing switch's registers (of all kinds)
+*                     i.e, the devices registers range is 0x10 to 0x1F, or to
+*                     use the low device register addresses (range 0x0 to 0xF).
+*                       GT_TRUE     - use high addresses (0x10 to 0x1F).
+*                       GT_FALSE    - use low addresses (0x0 to 0xF).
+*
+* RETURNS:
+*       DEVICE_ID       - on success
+*       0    - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U16 miiSmiIfInit
+(
+    IN  GT_QD_DEV    *dev,
+    OUT GT_BOOL * highSmiDevAddr
+)
+{
+    GT_STATUS status;
+    GT_U16 data, data1;
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR,QD_REG_SWITCH_ID,&data)) != GT_OK)
+    {
+        return 0;
+    }
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR+1,QD_REG_SWITCH_ID,&data1)) != GT_OK)
+    {
+        return 0;
+    }
+
+    switch(data & 0xFF00)
+    {
+        case 0x0200:
+        case 0x0300:
+        case 0x0500:
+        case 0x0600:
+        case 0x1500:
+        case 0xC000:        /* Melody */
+        case 0x0700:        /* Spinnaker */
+        case 0x2200:        /* Spinnaker */
+        case 0x2500:        /* Spinnaker */
+        case 0xF500:
+        case 0xF900:
+            if (data == data1)
+            {
+                *highSmiDevAddr = GT_FALSE;
+                return data;
+            }
+            break;
+        default:
+            break;
+    }
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR+0x10,QD_REG_SWITCH_ID,&data)) != GT_OK)
+    {
+        return 0;
+    }
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR+0x11,QD_REG_SWITCH_ID,&data1)) != GT_OK)
+    {
+        return 0;
+    }
+
+    switch(data & 0xFF00)
+    {
+        case 0x0200:
+        case 0x0300:
+        case 0x0500:
+        case 0x0600:
+        case 0x1500:
+        case 0xC000:        /* Melody */
+        case 0x0700:        /* Spinnaker */
+        case 0x2200:        /* Spinnaker */
+        case 0x2500:        /* Spinnaker */
+        case 0xF500:
+        case 0xF900:
+            if (data == data1)
+            {
+                *highSmiDevAddr = GT_TRUE;
+                return data;
+            }
+            break;
+        default:
+            break;
+    }
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR_8PORT,QD_REG_SWITCH_ID,&data)) != GT_OK)
+    {
+        return 0;
+    }
+
+    if((status = miiSmiIfReadRegister(dev,PORT_REGS_START_ADDR_8PORT+1,QD_REG_SWITCH_ID,&data1)) != GT_OK)
+    {
+        return 0;
+    }
+
+    switch(data & 0xFF00)
+    {
+        case 0x0800:
+        case 0x1A00:
+        case 0x1000:
+        case 0x0900:
+        case 0x0400:
+        case 0x1200:
+        case 0x1400:
+        case 0x1600:
+        case 0x1700:
+        case 0x3200:
+        case 0x3700:
+        case 0x2400:    /* Agate */
+        case 0x3500:    /* Agate */
+        case 0x1100:    /* Pearl */
+        case 0x3100:    /* Pearl */
+            if (data == data1)
+            {
+                *highSmiDevAddr = GT_FALSE;
+                return data;
+            }
+            break;
+        default:
+            break;
+    }
+
+    return 0;
+}
+
+
+/*******************************************************************************
+* miiSmiManualIfInit
+*
+* DESCRIPTION:
+*       This function returns Device ID from the given base address
+*
+* INPUTS:
+*       baseAddr - either 0x0 or 0x10. Indicates whether to use the low device
+*                    register address or high device register address.
+*                    The device register range is from 0x0 to 0xF or from 0x10
+*                    to 0x1F for 5 port switchs and from 0x0 to 0x1B for 8 port
+*                    switchs.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       DEVICE_ID       - on success
+*       0    - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_U16 miiSmiManualIfInit
+(
+    IN  GT_QD_DEV   *dev,
+    IN  GT_U32        baseAddr
+)
+{
+    GT_STATUS status;
+    GT_U16 data;
+
+
+    if((status = miiSmiIfReadRegister(dev,(GT_U8)(PORT_REGS_START_ADDR+baseAddr),QD_REG_SWITCH_ID,&data)) != GT_OK)
+    {
+        return 0;
+    }
+
+    switch(data & 0xFF00)
+    {
+        case 0x0200:
+        case 0x0300:
+        case 0x0500:
+        case 0x0600:
+        case 0x1500:
+        case 0xF500:
+        case 0xF900:
+        case 0x0700:        /* Spinnaker */
+        case 0x2200:        /* Spinnaker */
+        case 0x2500:        /* Spinnaker */
+            return data;
+        case 0xC000:        /* Melody, Now it could be 0xc00 - 0xc07 */
+            return data&0xFF0F;
+        default:
+            break;
+    }
+    if(baseAddr != 0)
+        return 0;
+
+    if((status = miiSmiIfReadRegister(dev,(GT_U8)(PORT_REGS_START_ADDR_8PORT+baseAddr),QD_REG_SWITCH_ID,&data)) != GT_OK)
+    {
+        return 0;
+    }
+
+    switch(data & 0xFF00)
+    {
+        case 0x0800:
+        case 0x1A00:
+        case 0x1000:
+        case 0x0900:
+        case 0x0400:
+        case 0x1200:
+        case 0x1400:
+        case 0x1600:
+        case 0x3200:
+        case 0x1700:
+        case 0x3700:
+        case 0x2400:    /* Agate */
+        case 0x3500:    /* Agate */
+        case 0x1100:    /* Pearl */
+        case 0x3100:    /* Pearl */
+	case 0xc100:	/* ALP Fix */
+            return data;
+        default:
+            break;
+    }
+
+    return 0;
+}
+
+/*******************************************************************************
+* Following functions are internal APIs between Driver layer and Platform layer
+********************************************************************************/
+
+/*******************************************************************************
+* miiSmiIfReadRegister
+*
+* DESCRIPTION:
+*       This function reads a register throw the SMI / MII interface, to be used
+*       by upper layers.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*
+* OUTPUTS:
+*       data        - The register's data.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS miiSmiIfReadRegister
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U8        phyAddr,
+    IN  GT_U8        regAddr,
+    OUT GT_U16       *data
+)
+{
+    unsigned int tmpData;
+
+#ifdef GT_RMGMT_ACCESS
+    if((dev->accessMode == SMI_MULTI_ADDR_MODE) &&
+       (dev->fgtHwAccessMod == HW_ACCESS_MODE_SMI))
+#else
+    if(dev->accessMode == SMI_MULTI_ADDR_MODE)
+#endif
+    {
+         if(qdMultiAddrRead(dev,(GT_U32)phyAddr,(GT_U32)regAddr,&tmpData) != GT_TRUE)
+        {
+            return GT_FAIL;
+        }
+    }
+    else
+    {
+         if(fgtReadMii(dev,(GT_U32)phyAddr,(GT_U32)regAddr,&tmpData) != GT_TRUE)
+        {
+            return GT_FAIL;
+        }
+    }
+    *data = (GT_U16)tmpData;
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* miiSmiIfWriteRegister
+*
+* DESCRIPTION:
+*       This function writes to a register throw the SMI / MII interface, to be
+*       used by upper layers.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       data        - The data to be written to the register.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS miiSmiIfWriteRegister
+(
+    IN  GT_QD_DEV    *dev,
+    IN  GT_U8        phyAddr,
+    IN  GT_U8        regAddr,
+    IN  GT_U16       data
+)
+{
+#ifdef GT_RMGMT_ACCESS
+    if((dev->accessMode == SMI_MULTI_ADDR_MODE) &&
+       (dev->fgtHwAccessMod == HW_ACCESS_MODE_SMI))
+#else
+    if(dev->accessMode == SMI_MULTI_ADDR_MODE)
+#endif
+    {
+         if(qdMultiAddrWrite(dev,(GT_U32)phyAddr,(GT_U32)regAddr,(GT_U32)data) != GT_TRUE)
+        {
+            return GT_FAIL;
+        }
+    }
+    else
+    {
+        if(fgtWriteMii(dev,(GT_U32)phyAddr,(GT_U32)regAddr,(GT_U32)data) != GT_TRUE)
+        {
+            return GT_FAIL;
+        }
+    }
+    return GT_OK;
+}
+
+#ifdef GT_RMGMT_ACCESS
+
+/*******************************************************************************
+* qdAccessRegs
+*
+* DESCRIPTION:
+*       This function access registers through device interface
+*        (like Marvell F2R on ethernet) by user, to be used by upper layers.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG includes:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_STATUS qdAccessRegs
+(
+    IN  GT_QD_DEV    *dev,
+    INOUT HW_DEV_REG_ACCESS *regList
+)
+{
+  if((dev->accessMode == SMI_MULTI_ADDR_MODE) &&
+     (dev->fgtHwAccessMod == HW_ACCESS_MODE_SMI))
+  {
+      if(qdMultiAddrAccess(dev, regList) != GT_TRUE)
+      {
+        return GT_FAIL;
+      }
+  }
+  else
+  {
+    if ((IS_IN_DEV_GROUP(dev,DEV_RMGMT)) &&
+        (dev->fgtHwAccess != NULL) &&
+        (dev->fgtHwAccessMod == HW_ACCESS_MODE_F2R) )
+    {
+      if(dev->fgtHwAccess(dev, regList) != GT_TRUE)
+      {
+        if(fgtAccessRegs(dev, regList) != GT_TRUE)
+        {
+          return GT_FAIL;
+        }
+      }
+    }
+    else
+    {
+      if(fgtAccessRegs(dev, regList) != GT_TRUE)
+      {
+        return GT_FAIL;
+      }
+    }
+  }
+  return GT_OK;
+
+}
+#endif
+
+
+/* Local sub-functions */
+
+/*****************************************************************************
+* qdMultiAddrRead
+*
+* DESCRIPTION:
+*       This function reads data from a device in the secondary MII bus.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The storage where register date to be saved.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL qdMultiAddrRead (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int* value)
+{
+    unsigned int smiReg;
+    volatile unsigned int timeOut; /* in 100MS units */
+    volatile int i;
+
+    /* first check that it is not busy */
+    if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                return GT_FALSE;
+            }
+            if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+            {
+                return GT_FALSE;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+
+    smiReg =  QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_READ << QD_SMI_OP_BIT) |
+            (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+    if(fgtWriteMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+    if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                return GT_FALSE;
+            }
+            if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+            {
+                return GT_FALSE;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+    if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_DATA, &smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+    *value = smiReg;
+
+    return GT_TRUE;
+}
+
+/*****************************************************************************
+* qdMultiAddrWrite
+*
+* DESCRIPTION:
+*       This function writes data to the device in the secondary MII bus.
+*
+* INPUTS:
+*       phyAddr     - The PHY address to be read.
+*       regAddr     - The register address to read.
+*       value       - The data to be written into the register.
+*
+* OUTPUTS:
+*       None.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+
+GT_BOOL qdMultiAddrWrite (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int value)
+{
+    unsigned int smiReg;
+    volatile unsigned int timeOut; /* in 100MS units */
+    volatile int i;
+
+    /* first check that it is not busy */
+    if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+    timeOut = QD_SMI_ACCESS_LOOP; /* initialize the loop count */
+
+    if(smiReg & QD_SMI_BUSY)
+    {
+        for(i = 0 ; i < QD_SMI_TIMEOUT ; i++);
+        do
+        {
+            if(timeOut-- < 1 )
+            {
+                return GT_FALSE;
+            }
+            if(fgtReadMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, &smiReg) != GT_TRUE)
+            {
+                return GT_FALSE;
+            }
+        } while (smiReg & QD_SMI_BUSY);
+    }
+
+    if(fgtWriteMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_DATA, value) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+    smiReg = QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_WRITE << QD_SMI_OP_BIT) |
+            (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+    if(fgtWriteMii(dev,(GT_U32)dev->phyAddr,(GT_U32)QD_REG_SMI_COMMAND, smiReg) != GT_TRUE)
+    {
+        return GT_FALSE;
+    }
+
+    return GT_TRUE;
+}
+
+
+static GT_BOOL fgtReadMii(GT_QD_DEV*dev, unsigned int phyAddr, unsigned int reg, unsigned int* value)
+{
+  GT_BOOL retVal;
+  retVal =  dev->fgtReadMii(dev, phyAddr, reg, value);
+  return  retVal;
+}
+
+
+static GT_BOOL fgtWriteMii(GT_QD_DEV*dev, unsigned int phyAddr, unsigned int reg, unsigned int value)
+{
+  GT_BOOL retVal;
+  retVal = dev->fgtWriteMii(dev, phyAddr, reg, value);
+  return  retVal;
+}
+
+#ifdef GT_RMGMT_ACCESS
+
+static GT_BOOL fgtAccessRegs(GT_QD_DEV* dev, HW_DEV_REG_ACCESS *regList)
+{
+  int i;
+  GT_BOOL retVal = GT_TRUE;
+  unsigned short data, mask;
+  unsigned int tmpData;
+  for (i=0; i<regList->entries; i++)
+  {
+    switch (regList->rw_reg_list[i].cmd)
+    {
+      case HW_REG_READ:
+        retVal=dev->fgtReadMii(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&(regList->rw_reg_list[i].data));
+        break;
+      case HW_REG_WRITE:
+        retVal=dev->fgtWriteMii(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, regList->rw_reg_list[i].data);
+        break;
+      case HW_REG_WAIT_TILL_0:
+      {
+        mask = (1<<regList->rw_reg_list[i].data);
+        do
+        {
+          retVal=dev->fgtReadMii(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&tmpData);
+          if(retVal != GT_TRUE)
+          {
+            return retVal;
+          }
+          data = tmpData;
+        } while(data & mask);
+      }
+        break;
+      case HW_REG_WAIT_TILL_1:
+      {
+        mask = (1<<regList->rw_reg_list[i].data);
+        do
+        {
+          retVal=dev->fgtReadMii(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&tmpData);
+          if(retVal != GT_TRUE)
+          {
+            return retVal;
+          }
+          data = tmpData;
+        } while(!(data & mask));
+      }
+        break;
+
+      default:
+        return GT_FALSE;
+    }
+  }
+    return retVal;
+}
+
+GT_BOOL qdAccessMultiAddrRead (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int* value)
+{
+  GT_U16 smiReg;
+
+  HW_DEV_REG_ACCESS regAccess;
+
+  regAccess.entries = 4;
+
+  regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[0].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[0].reg = QD_REG_SMI_COMMAND;
+  regAccess.rw_reg_list[0].data = 15;
+  smiReg =  QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_READ << QD_SMI_OP_BIT) | (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+  regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[1].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[1].reg = QD_REG_SMI_COMMAND;
+  regAccess.rw_reg_list[1].data = smiReg;
+
+  regAccess.rw_reg_list[2].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[2].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[2].reg = QD_REG_SMI_COMMAND;
+  regAccess.rw_reg_list[2].data = 15;
+
+  regAccess.rw_reg_list[3].cmd = HW_REG_READ;
+  regAccess.rw_reg_list[3].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[3].reg = QD_REG_SMI_DATA;
+  regAccess.rw_reg_list[3].data = 0;
+  if(dev->fgtHwAccess(dev, &regAccess) != GT_TRUE)
+  {
+    return GT_FALSE;
+  }
+  *value = (unsigned short)regAccess.rw_reg_list[3].data;
+
+  return GT_TRUE;
+}
+GT_BOOL qdAccessMultiAddrWrite (GT_QD_DEV* dev, unsigned int phyAddr , unsigned int regAddr, unsigned int value)
+{
+  GT_U16 smiReg;
+
+  HW_DEV_REG_ACCESS regAccess;
+
+  regAccess.entries = 3;
+
+  regAccess.rw_reg_list[0].cmd = HW_REG_WAIT_TILL_0;
+  regAccess.rw_reg_list[0].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[0].reg = QD_REG_SMI_COMMAND;
+  regAccess.rw_reg_list[0].data = 15;
+
+  regAccess.rw_reg_list[1].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[1].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[1].reg = QD_REG_SMI_DATA;
+  regAccess.rw_reg_list[1].data = value;
+
+  smiReg = QD_SMI_BUSY | (phyAddr << QD_SMI_DEV_ADDR_BIT) | (QD_SMI_WRITE << QD_SMI_OP_BIT) | (regAddr << QD_SMI_REG_ADDR_BIT) | (QD_SMI_CLAUSE22 << QD_SMI_MODE_BIT);
+
+  regAccess.rw_reg_list[2].cmd = HW_REG_WRITE;
+  regAccess.rw_reg_list[2].addr = (GT_U32)dev->phyAddr;
+  regAccess.rw_reg_list[2].reg = QD_REG_SMI_COMMAND;
+  regAccess.rw_reg_list[2].data = smiReg;
+
+  if(dev->fgtHwAccess(dev, &regAccess) != GT_TRUE)
+  {
+    return GT_FALSE;
+  }
+
+  return GT_TRUE;
+}
+
+/*****************************************************************************
+* qdMultiAddrAccess
+*
+* DESCRIPTION:
+*       This function access data to the device in the secondary MII bus.
+*
+* INPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*     HW_DEV_RW_REG includes:
+*     cmd - HW_REG_READ, HW_REG_WRITE, HW_REG_WAIT_TILL_0 or HW_REG_WAIT_TILL_1
+*     addr - SMI Address
+*     reg  - Register offset
+*     data - INPUT,OUTPUT:Value in the Register or Bit number
+*
+* OUTPUTS:
+*   regList     - list of HW_DEV_RW_REG.
+*
+* RETURNS:
+*       GT_TRUE   - on success
+*       GT_FALSE  - on error
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+
+static GT_BOOL qdMultiAddrAccess(GT_QD_DEV* dev, HW_DEV_REG_ACCESS *regList)
+{
+  int i;
+  GT_BOOL retVal = GT_TRUE;
+  unsigned short data, mask;
+  unsigned int tmpData;
+
+  for (i=0; i<regList->entries; i++)
+  {
+    switch (regList->rw_reg_list[i].cmd)
+    {
+      case HW_REG_READ:
+        retVal=qdAccessMultiAddrRead(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&(regList->rw_reg_list[i].data));
+        break;
+      case HW_REG_WRITE:
+        retVal=qdAccessMultiAddrWrite(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, regList->rw_reg_list[i].data);
+        break;
+      case HW_REG_WAIT_TILL_0:
+      {
+        mask = (1<<regList->rw_reg_list[i].data);
+        do
+        {
+          retVal=qdAccessMultiAddrRead(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&tmpData);
+          if(retVal != GT_TRUE)
+          {
+            return retVal;
+          }
+          data = tmpData;
+        } while(data & mask);
+      }
+        break;
+      case HW_REG_WAIT_TILL_1:
+      {
+        mask = (1<<regList->rw_reg_list[i].data);
+        do
+        {
+          retVal=qdAccessMultiAddrRead(dev, regList->rw_reg_list[i].addr,
+                regList->rw_reg_list[i].reg, (unsigned int *)&tmpData);
+          if(retVal != GT_TRUE)
+          {
+            return retVal;
+          }
+          data = tmpData;
+        } while(!(data & mask));
+      }
+        break;
+
+      default:
+        return GT_FALSE;
+    }
+  }
+    return retVal;
+}
+
+#endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtSem.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtSem.c
new file mode 100644
index 000000000000..d92337e680b7
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/gtSem.c
@@ -0,0 +1,148 @@
+#include <Copyright.h>
+/********************************************************************************
+* gtOs.c
+*
+* DESCRIPTION:
+*       Semaphore related routines
+*
+* DEPENDENCIES:
+*       OS Dependent.
+*
+* FILE REVISION NUMBER:
+*       $Revision: 3 $
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtSem.h>
+
+
+/*******************************************************************************
+* gtSemCreate
+*
+* DESCRIPTION:
+*       Create semaphore.
+*
+* INPUTS:
+*        state - beginning state of the semaphore, either GT_SEM_EMPTY or GT_SEM_FULL
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_SEM if success. Otherwise, NULL
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_SEM gtSemCreate
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM_BEGIN_STATE state
+)
+{
+    if(dev->semCreate)
+        return dev->semCreate(state);
+
+    return 1; /* should return any value other than 0 to let it keep going */
+}
+
+/*******************************************************************************
+* gtSemDelete
+*
+* DESCRIPTION:
+*       Delete semaphore.
+*
+* INPUTS:
+*       smid - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemDelete
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM smid
+)
+{
+    if((dev->semDelete) && (smid))
+        return dev->semDelete(smid);
+
+    return GT_OK;
+}
+
+
+/*******************************************************************************
+* gtSemTake
+*
+* DESCRIPTION:
+*       Wait for semaphore.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*       timeOut - time out in miliseconds or 0 to wait forever
+*
+* OUTPUTS:
+*       None
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*       OS_TIMEOUT - on time out
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemTake
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM smid,
+    IN GT_U32 timeOut
+)
+{
+    if(dev->semTake)
+        return dev->semTake(smid, timeOut);
+
+    return GT_OK;
+
+}
+
+/*******************************************************************************
+* gtSemGive
+*
+* DESCRIPTION:
+*       release the semaphore which was taken previously.
+*
+* INPUTS:
+*       smid    - semaphore Id
+*
+* OUTPUTS:
+*       None
+*
+* RETURNS:
+*       GT_OK   - on success
+*       GT_FAIL - on error
+*
+* COMMENTS:
+*       None
+*
+*******************************************************************************/
+GT_STATUS gtSemGive
+(
+    IN GT_QD_DEV    *dev,
+    IN GT_SEM       smid
+)
+{
+    if(dev->semGive)
+        return dev->semGive(smid);
+
+    return GT_OK;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/makefile b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/makefile
new file mode 100644
index 000000000000..429d58ef634f
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/makefile
@@ -0,0 +1,33 @@
+# Source files in this directory
+TARGET =
+CSOURCES	= gtMiiSmiIf.c platformDeps.c gtSem.c gtDebug.c
+ASOURCES	=
+
+# Include common variable definitions
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.defs
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.defs
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.defs
+endif
+
+# Add in extra stuffs
+EXTRA_INCLUDE	+=
+EXTRA_DEFINE	+=
+ADDED_CFLAGS	+=
+
+# Include common build rules
+ifeq ($(OS_RUN),VXWORKS)
+include $(DSDT_TOOL_DIR)\make.rules
+endif
+ifeq ($(OS_RUN),WIN32)
+include $(DSDT_TOOL_DIR)\makewce.rules
+endif
+ifeq ($(OS_RUN),LINUX)
+include $(DSDT_TOOL_DIR)/makelnx.rules
+endif
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/platformDeps.c b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/platformDeps.c
new file mode 100644
index 000000000000..58782105bb5e
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/src/platform/platformDeps.c
@@ -0,0 +1,90 @@
+#include <Copyright.h>
+/********************************************************************************
+* platformDeps.c
+*
+* DESCRIPTION:
+*       platform dependent functions
+*
+* DEPENDENCIES:   Platform.
+*
+* FILE REVISION NUMBER:
+*
+*******************************************************************************/
+
+#include <msApi.h>
+#include <gtDrvEvents.h>
+#include <gtHwCntl.h>
+#include <platformDeps.h>
+
+#if 0
+/*******************************************************************************
+* gtAssignFunctions
+*
+* DESCRIPTION:
+*       Exchange MII access functions and QuarterDeck Int Handler.
+*        MII access functions will be called by QuarterDeck driver and
+*        QD Int Handler should be called by BSP when BSP sees an interrupt which is related to
+*        QD (This interrupt has to be initialized by BSP, since QD has no idea which
+*        interrupt is assigned to QD)
+*
+* INPUTS:
+*       fReadMii     - function to read MII registers
+*        fWriteMii    - functino to write MII registers
+*
+* OUTPUTS:
+*        None.
+*
+* RETURNS:
+*       GT_TRUE, if input is valid. GT_FALSE, otherwise.
+*
+* COMMENTS:
+*       None.
+*
+*******************************************************************************/
+GT_BOOL gtAssignFunctions
+(
+   GT_QD_DEV      *dev,
+   FGT_READ_MII   fReadMii,
+   FGT_WRITE_MII fWriteMii
+)
+{
+    if((fReadMii == NULL) || (fWriteMii == NULL))
+        return GT_FALSE;
+
+    dev->fgtReadMii = fReadMii;
+    dev->fgtWriteMii = fWriteMii;
+
+    return GT_TRUE;
+}
+
+#endif
+
+GT_BOOL defaultMiiRead (unsigned int portNumber , unsigned int miiReg, unsigned int* value)
+{
+    GT_UNUSED_PARAM(miiReg);
+
+    if (portNumber > GLOBAL_REGS_START_ADDR)
+        portNumber -= GLOBAL_REGS_START_ADDR;
+
+    if (portNumber > GLOBAL_REGS_START_ADDR)
+        return GT_FALSE;
+
+    *value = 0;
+
+    return GT_TRUE;
+}
+
+GT_BOOL defaultMiiWrite (unsigned int portNumber , unsigned int miiReg, unsigned int value)
+{
+    GT_UNUSED_PARAM(miiReg);
+
+    if (portNumber > GLOBAL_REGS_START_ADDR)
+        portNumber -= GLOBAL_REGS_START_ADDR;
+
+    if (portNumber > GLOBAL_REGS_START_ADDR)
+        return GT_FALSE;
+
+    value = value;
+
+    return GT_TRUE;
+}
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.defs b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.defs
new file mode 100644
index 000000000000..7470c2fbf711
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.defs
@@ -0,0 +1,105 @@
+# make.defs - common variable definitions
+#
+# modification history
+# --------------------
+# 04-12-02,mj	created
+#
+
+DEPENDENCIES=makedeps
+
+ifeq ($(TARGET_CPU),MIPS)
+
+CPU_TOOL=mips
+
+ifeq ($(TOR_VER),2.1)
+CPU=MIPS64
+CC_ARCH_SPEC1= -mips4 -G 0 -B$(VX_ROOT)/host/x86-win32/lib/gcc-lib/ -mno-branch-likely -fno-builtin -funroll-loops -Wall
+else
+CPU=R4000
+CC_ARCH_SPEC1= -mcpu=r4000 -mips3 -mgp32 -mfp32 -G 0 -B$(VX_ROOT)/host/x86-win32/lib/gcc-lib/
+endif
+
+ifeq ($(ENDIAN),EL)
+CC_ARCH_SPEC = -EL $(CC_ARCH_SPEC1)
+else
+CC_ARCH_SPEC = -EB $(CC_ARCH_SPEC1)
+endif
+
+# ARM CPU
+else
+
+CPU_TOOL=arm
+CPU		= ARMARCH4
+CC_ARCH_SPEC1 = -march=armv4 -mapcs-32
+
+ifeq ($(ENDIAN),LE)
+CC_ARCH_SPEC = $(CC_ARCH_SPEC1) -mlittle-endian
+else
+CC_ARCH_SPEC = $(CC_ARCH_SPEC1)
+endif
+
+endif
+
+TOOL       = gnu
+
+SW_ROOT = $(subst \,/,$(DSDT_USER_BASE))
+SRC_BASE_PATH = $(SW_ROOT)/src
+QD_INCLUDE_PATH = $(SW_ROOT)/Include/h
+
+VX_ROOT = $(subst \,/,$(WIND_BASE))
+OS_INCLUDE_PATH = $(VX_ROOT)/target/h
+
+LIB_DIR = $(SW_ROOT)/library
+OBJDIR = obj
+
+CC      =cc$(CPU_TOOL)
+LD      =ld$(CPU_TOOL)
+MAKE    =make
+CD		=cd
+RM      =del
+ECHO	=echo
+
+CFLAGS = $(CFLAGS1) $(CFLAGS2) $(INCLUDE_PATH) $(CDEBUG_DEFINES)
+
+CFLAGS1 = -DCPU=$(CPU) -D_$(OS_RUN) $(CC_ARCH_SPEC) -g $(DEBUG_TYPE)
+CFLAGS2 = $(CC_OPTIM) $(CC_COMPILER) $(CC_WARNINGS_ALL)
+
+CC_OPTIM = $(CC_OPTIM_TARGET)
+
+ifeq ($(TARGET_CPU),MIPS)
+#CC_COMPILER	=  -ansi -nostdinc
+CC_COMPILER	=  -ansi
+else
+CC_COMPILER	=  -ansi -fdollars-in-identifiers -mno-sched-prolog -fvolatile
+endif
+
+CC_OPTIM_TARGET	= -O2 -funroll-loops -fno-for-scope -fno-builtin
+CC_WARNINGS_ALL	= -Wall
+
+ifeq ($(RELEASE),NO)
+CDEBUG_DEFINES=-DDEBUG=1 -DDEBUG_QD
+else
+CDEBUG_DEFINES=
+endif
+
+ifeq ($(TARGET_CPU),MIPS)
+LDFLAGS1 = -nostdlib -r -X -N
+ARFLAGS =  crus
+else
+#LDFLAGS	= -nostdlib -X -EL -r
+LDFLAGS1	= -nostdlib -X -N -r
+endif
+
+ifeq ($(ENDIAN),EL)
+LDFLAGS = $(LDFLAGS1) -EL
+else
+LDFLAGS = $(LDFLAGS1)
+endif
+
+
+INCLUDE_PATH = -I. \
+               -I$(QD_INCLUDE_PATH)/driver \
+               -I$(QD_INCLUDE_PATH)/msApi \
+               -I$(QD_INCLUDE_PATH)/platform \
+               -I$(SW_ROOT)/include \
+               -I$(OS_INCLUDE_PATH)
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.rules b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.rules
new file mode 100644
index 000000000000..9208b04d2c70
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/make.rules
@@ -0,0 +1,54 @@
+# make.rules - common build rules
+#
+# modification history
+# --------------------
+# 04-15-02,mj	created
+#
+########################################################################
+ifeq ($(TARGET),)
+TARGET	= $(notdir $(subst \,/,$(shell cd)))
+endif
+default : $(TARGET).o
+
+# Set searching directories for target and dependencies
+vpath %.o    $(OBJDIR)
+
+# Include RULES file if it exists
+ifeq ($(shell if exist $(DEPENDENCIES) $(ECHO) 1),1)
+include $(DEPENDENCIES)
+endif
+
+# Accumulate source files specified in each directory makefile
+COBJECTS  	= $(CSOURCES:.c=.o)
+AOBJECTS  	= $(ASOURCES:.s=.o)
+ifeq ($(OBJECTS),)
+OBJECTS  	= $(COBJECTS) $(AOBJECTS)
+endif
+
+$(TARGET).o : $(OBJECTS) deps
+	@ $(ECHO) 'Building $@'
+	$(LD) $(LDFLAGS) -Map $(OBJDIR)/$(TARGET).map -o $(OBJDIR)/$(TARGET).o $(addprefix $(OBJDIR)/,$(notdir $(OBJECTS)))
+
+$(OBJECTS) : %.o : %.c
+.c.o :
+	@if not exist $(OBJDIR) mkdir $(OBJDIR)
+	$(CC) $(CFLAGS) $(EXTRA_DEFINE) $(EXTRA_INCLUDE) $(ADDED_CFLAGS) -c $< -o $(OBJDIR)/$(notdir $@)
+
+deps : $(CSOURCES)
+	@$(ECHO) '##' > $(DEPENDENCIES)
+	@$(ECHO) '## This file is generated by "make"!' >> $(DEPENDENCIES)
+	@$(ECHO) '##' >> $(DEPENDENCIES)
+	@for %x in ($^) do \
+	    @$(ECHO) '  %x' & \
+	     $(CC) -M -MG $(CFLAGS) %x >> $(DEPENDENCIES) & \
+	     @$(ECHO) '##' >> $(DEPENDENCIES)
+
+.PHONY: clean
+clean:
+	$(RM) $(DEPENDENCIES)
+	- ($(CD) $(OBJDIR)) && $(RM) *.o
+	- ($(CD) $(OBJDIR)) && $(RM) *.map
+
+FORCE :
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.defs b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.defs
new file mode 100644
index 000000000000..eeb7a2e1bc6b
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.defs
@@ -0,0 +1,62 @@
+# make.defs - common variable definitions
+#
+# modification history
+# --------------------
+# 04-12-02,mj	created
+#
+SHELL=/bin/sh -e
+
+#CC_ARCH_SPEC=  -march=i386 -mcpu=i686
+
+SW_ROOT = $(DSDT_USER_BASE)
+SRC_BASE_PATH = $(SW_ROOT)/src
+QD_INCLUDE_PATH = $(SW_ROOT)/Include/h
+
+LIB_DIR = $(SW_ROOT)/Library
+OBJDIR = $(DSDT_PROJ_NAME)_obj
+
+ifeq ($(CC),)
+CC      =cc
+LD      =ld
+endif
+
+MAKE    =make
+CD      =pwd
+RM      =rm -f
+ECHO    =echo
+
+DEPENDENCIES= makedeps
+
+#CFLAGS = $(CFLAGS1) $(CFLAGS2) $(INCLUDE_PATH) $(CDEBUG_DEFINES)
+CFLAGS = -DLINUX $(CFLAGS1) $(CFLAGS2) $(INCLUDE_PATH) $(CDEBUG_DEFINES)
+
+CFLAGS1 = $(CC_ARCH_SPEC) $(DEBUG_TYPE)
+CFLAGS2 = $(CC_OPTIM) $(CC_COMPILER) $(CC_WARNINGS_ALL)
+
+CC_OPTIM = $(CC_OPTIM_TARGET)
+CC_OPTIM_DRIVER	= -O
+CC_OPTIM_TARGET	= -O2
+CC_COMPILER = -ansi -nostdinc
+CC_WARNINGS_ALL	= -Wall
+
+ifeq ($(RELEASE),NO)
+CDEBUG_DEFINES=-DDEBUG=1 -DDEBUG_QD
+else
+CDEBUG_DEFINES=
+endif
+
+LDFLAGS = -nostdlib -r -X -N
+ARFLAGS = crus
+
+INCLUDE_PATH = -I. \
+               -I$(QD_INCLUDE_PATH)/driver \
+               -I$(QD_INCLUDE_PATH)/msApi \
+               -I$(QD_INCLUDE_PATH)/platform \
+               -I$(SW_ROOT)/Include
+ifeq ($(DSDT_USE_MAD),TRUE)
+MAD_INCLUDE_PATH = $(SW_ROOT)/../phy/Include
+INCLUDE_PATH += -I$(MAD_INCLUDE_PATH)
+INCLUDE_PATH += -I$(MAD_INCLUDE_PATH)/h/driver
+INCLUDE_PATH += -I$(MAD_INCLUDE_PATH)/h/madApi
+INCLUDE_PATH += -I$(MAD_INCLUDE_PATH)/h/platform
+endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.rules b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.rules
new file mode 100644
index 000000000000..4c1b92681bdd
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makelnx.rules
@@ -0,0 +1,57 @@
+# make.rules - common build rules
+#
+# modification history
+# --------------------
+# 04-15-02,mj	created
+#
+########################################################################
+
+ifeq ($(TARGET),)
+TARGET	= $(notdir $(shell $(CD)))
+endif
+default : $(TARGET).o
+
+# Set searching directories for target and dependencies
+vpath %.o $(OBJDIR)
+
+# Include DEPENDENCIES file if it exists
+ifeq ($(shell if [ -a $(DEPENDENCIES) ]; then $(ECHO) 1; fi),1)
+include $(DEPENDENCIES)
+endif
+
+# Accumulate source files specified in each directory makefile
+COBJECTS  = $(CSOURCES:.c=.o)
+AOBJECTS  = $(ASOURCES:.s=.o)
+ifeq ($(OBJECTS),)
+OBJECTS  = $(COBJECTS) $(AOBJECTS)
+endif
+
+$(TARGET).o : $(OBJECTS) deps
+	@ $(ECHO) 'Building $@'
+	$(LD) $(LDFLAGS) -Map $(OBJDIR)/$(TARGET).map -o $(OBJDIR)/$(TARGET).o $(addprefix $(OBJDIR)/,$(notdir $(OBJECTS)))
+
+$(OBJECTS): %.o: %.c
+.c.o :
+	@if ! [ -a $(OBJDIR) ]; then mkdir $(OBJDIR); fi
+	$(CC) $(CFLAGS) $(EXTRA_DEFINE) $(EXTRA_INCLUDE) $(ADDED_CFLAGS) -c $< -o $(OBJDIR)/$(notdir $@)
+
+deps : $(CSOURCES)
+	@ $(ECHO) '##' > $(DEPENDENCIES)
+	@ $(ECHO) '## This file is generated by "make"!' >> $(DEPENDENCIES)
+	@ $(ECHO) '##' >> $(DEPENDENCIES)
+	@for i in $^ ; do \
+		$(ECHO) "  $$i"; \
+		$(CC) -M -MG $(CFLAGS) $$i >> $(DEPENDENCIES);  \
+		$(ECHO) '##' >> $(DEPENDENCIES);	\
+	done
+
+.PHONY: clean
+clean:
+#	cd $(OBJDIR); $(RM) *.o
+#	cd $(OBJDIR); $(RM) *.map
+	$(RM) -f -r $(OBJDIR)
+	$(RM) $(DEPENDENCIES)
+
+FORCE :
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.defs b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.defs
new file mode 100644
index 000000000000..52db4e49bf3a
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.defs
@@ -0,0 +1,94 @@
+# makewce.defs - common variable definitions for wce
+#
+# modification history
+# --------------------
+# 04-24-02,mj	created
+#
+#######################################################################
+
+ifeq ($(TARGET),)
+TARGET	= $(notdir $(subst \,/,$(shell cd)))
+endif
+default : $(TARGET).lib
+
+SW_ROOT = $(subst \,/,$(DSDT_USER_BASE))
+SRC_BASE_PATH = $(SW_ROOT)/src
+#INCLUDE_PATH = $(subst ;, -I,$(INCLUDE))
+
+LIB_DIR = $(SW_ROOT)/library
+OBJDIR = obj
+
+QD_INCLUDE_PATH = -I$(DSDT_USER_BASE)\Include\h\driver \
+                  -I$(DSDT_USER_BASE)\Include\h\msApi \
+                  -I$(DSDT_USER_BASE)\Include\h\platform \
+                  -I$(DSDT_USER_BASE)\Include
+
+MAKE    =make
+CD		=cd
+RM      =del
+ECHO	=echo
+LD      =lib
+
+ifeq ($(TARGETCPU),MIPSIV)
+CC      =clmips
+
+TGTCPUFAMILY=MIPS
+TGTCPUDEFINES=-D$(TGTCPUFAMILY) -D_MIPS_ -DR4000
+
+ifeq ($(RELEASE),NO)
+CDEBUG_DEFINES=-DDEBUG=1 -DDEBUG_QD
+CDEBUG_FLAGS=-Od
+else
+CDEBUG_DEFINES=
+CDEBUG_FLAGS=
+endif
+
+
+CDEFINES1=-DUNDER_CE -D_WIN32_WCE=400
+CDEFINES=-DWIN32 -DSTRICT $(TGTCPUDEFINES) $(CDEBUG_DEFINES) $(CDEFINES1)
+
+#CINCLUDES=-I. -I$(INCLUDE_PATH) -I$(QD_INCLUDE_PATH)
+CINCLUDES=-I. $(QD_INCLUDE_PATH)
+
+CFLAGS1= -W3 -c -QMFPE -nologo -QMmips2 -Gy
+#CFLAGS1= -W3 -Og -Oi -Os -Ob1 -YX -Gs8192 -c
+#CFLAGS2= -FAsc -Fa$(@R).cod -WX
+
+CFLAGS= $(CFLAGS1) $(CINCLUDES) $(CDEBUG_FLAGS) $(CDEFINES)
+
+LINKER_SUBSYSTEM=-subsystem:windowsce,4.00
+MACHINEOPTION=-machine:MIPS
+
+LDFLAGS = -nologo -nodefaultlib $(LINKER_SUBSYSTEM) $(TARGETOBJFILES) $(SOURCELIBS) $(MACHINEOPTION)
+
+else
+CC      =cl
+
+TGTCPUFAMILY=x86
+TGTCPUDEFINES=-D$(TGTCPUFAMILY) -D_X86_
+
+ifeq ($(RELEASE),NO)
+CDEBUG_DEFINES=-DDEBUG=1 -DDEBUG_QD
+CDEBUG_FLAGS=-Od
+else
+CDEBUG_DEFINES=
+CDEBUG_FLAGS=-Og
+endif
+
+CDEFINES1=-DUNDER_CE -D_WIN32_WCE=400
+CDEFINES=-DWIN32 -DSTRICT $(TGTCPUDEFINES) $(CDEBUG_DEFINES) $(CDEFINES1)
+
+#CINCLUDES=-I. -I$(INCLUDE_PATH) -I$(QD_INCLUDE_PATH)
+CINCLUDES=-I. $(QD_INCLUDE_PATH)
+
+CFLAGS1= -W3 -Oi -Os -Ob1 -YX -Gs8192 -c
+#CFLAGS2= -FAsc -Fa$(@R).cod -WX
+
+CFLAGS= $(CFLAGS1) $(CINCLUDES) $(CDEBUG_FLAGS) $(CDEFINES)
+
+LINKER_SUBSYSTEM=-subsystem:windowsce,4.00
+MACHINEOPTION=-machine:IX86
+
+LDFLAGS = -nologo -nodefaultlib $(LINKER_SUBSYSTEM) $(TARGETOBJFILES) $(SOURCELIBS) $(MACHINEOPTION)
+
+endif
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.rules b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.rules
new file mode 100644
index 000000000000..7dd5bccf18ec
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/makewce.rules
@@ -0,0 +1,49 @@
+# makewce.rules - common build rules for wce
+#
+# modification history
+# --------------------
+# 04-25-02,mj	created
+#
+########################################################################
+
+# Set searching directories for target and dependencies
+vpath %.obj    $(OBJDIR)
+
+# Include RULES file if it exists
+#ifeq ($(shell if exist RULES $(ECHO) 1),1)
+#include RULES
+#endif
+
+# Accumulate source files specified in each directory makefile
+COBJECTS  	= $(CSOURCES:.c=.obj)
+AOBJECTS  	= $(ASOURCES:.s=.obj)
+ifeq ($(OBJECTS),)
+OBJECTS  	= $(COBJECTS) $(AOBJECTS)
+endif
+
+$(TARGET).lib : $(OBJECTS) $(CSOURCES)
+	@ $(ECHO) 'Building $@'
+	$(LD) $(LDFLAGS) -ignore:4001 -out:$(OBJDIR)\$(TARGET).lib $(addprefix $(OBJDIR)\,$(notdir $(OBJECTS)))
+
+$(OBJECTS) : %.obj : %.c
+	@if not exist $(OBJDIR) mkdir $(OBJDIR)
+	$(CC) $(CFLAGS) $(EXTRA_DEFINE) $(EXTRA_INCLUDE) $(ADDED_CFLAGS) -c $< -Fo$(OBJDIR)\$(notdir $@)
+
+#deps : $(CSOURCES)
+#	@$(ECHO) '##' >> RULES
+#	@$(ECHO) '## This file is generated by \"make\" - DO NOT EDIT !' >> RULES
+#	@$(ECHO) '## $(shell $(DATE))' >> RULES
+#	@$(ECHO) '##' >> RULES
+#	@for %x in ($^) do \
+#	    @$(ECHO) "  %x" & \
+#	     $(CC) -M -MG $(CFLAGS) %x >> RULES & \
+#	     $(ECHO) >> RULES
+
+.PHONY: clean
+clean:
+	- ($(CD) $(OBJDIR)) && $(RM) *.obj
+	- ($(CD) $(OBJDIR)) && $(RM) *.lib
+
+FORCE :
+
+# end of file
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv
new file mode 100644
index 000000000000..a5e5a98dd498
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv
@@ -0,0 +1,14 @@
+declare -x DSDT_USER_BASE=/home/xudy/dsdt/DSDT_3.2_internal/switch
+declare -x DSDT_PROJ_NAME=qdDrv
+declare -x DSDT_TOOL_DIR=$DSDT_USER_BASE/tools
+declare -x RELEASE=YES
+declare -x OS_RUN=LINUX
+declare -x MAD_USER_BASE=$DSDT_USER_BASE/../phy
+declare -x MAD_PROJ_NAME=madDrv
+declare -x MAD_TOOL_DIR=$MAD_USER_BASE/tools
+declare -x USER_API_NAME=
+declare -x USER_NAME=
+declare -x DSP_VCT=FALSE
+declare -x VCT_PENDING=FALSE
+#declare -x DSDT_USE_MAD=FALSE
+declare -x DSDT_USE_MAD=TRUE
diff --git a/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv.bat b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv.bat
new file mode 100644
index 000000000000..e140d89b8eec
--- /dev/null
+++ b/drivers/net/ethernet/mvebu_net/switch/qd-dsdt-3.3/tools/setenv.bat
@@ -0,0 +1,55 @@
+@echo off
+set REQ_OS=%1
+
+if "%REQ_OS%"==""        set OS_RUN=VXWORKS
+if "%REQ_OS%"=="VXWORKS" set OS_RUN=VXWORKS
+if "%REQ_OS%"=="vxworks" set OS_RUN=VXWORKS
+if "%REQ_OS%"=="vxWorks" set OS_RUN=VXWORKS
+if "%REQ_OS%"=="win32"   set OS_RUN=WIN32
+if "%REQ_OS%"=="WIN32"   set OS_RUN=WIN32
+if "%REQ_OS%"=="wince"   set OS_RUN=WIN32
+if "%REQ_OS%"=="WINCE"   set OS_RUN=WIN32
+
+set DSDT_USER_BASE=C:\DSDT_2.8pre_4
+set DSDT_PROJ_NAME=qdDrv
+set DSDT_USE_MAD=FALSE
+set DSDT_TOOL_DIR=%DSDT_USER_BASE%\tools
+set RELEASE=YES
+set TARGET_CPU=ARM
+rem set TARGET_CPU=MIPS
+
+if "%OS_RUN%"=="VXWORKS" goto VXWORKS_ENV
+if "%OS_RUN%"=="WIN32" goto WIN32_ENV
+
+echo Unknown Target OS!
+echo Supported Target OS is vxworks and wince.
+echo Assumes VxWorks as a Target OS.
+
+:VXWORKS_ENV
+
+set TOR_VER=2.1
+set ENDIAN=EL
+set WIND_BASE=C:\Tornado.arm
+rem set WIND_BASE=C:\Tornado2.1
+set WIND_HOST_TYPE=x86-win32
+
+set PATH=%WIND_BASE%\host\%WIND_HOST_TYPE%\bin;%PATH%
+
+echo Environment Variable for VxWorks has been set.
+
+goto DONE
+
+:WIN32_ENV
+
+set TARGETCPU=MIPSIV
+rem set TARGETCPU=x86
+set WCEROOT=C:\WINCE400
+
+set INCLUDE=%WCEROOT%\public\common\oak\inc;%WCEROOT%\public\common\sdk\inc;%WCEROOT%\public\common\ddk\inc;%INCLUDE%
+set PATH=%WCEROOT%\sdk\bin\i386;%WCEROOT%\public\common\oak\bin\i386;%DSDT_USER_BASE%\tools;%path%
+
+echo Environment Variable for WinCE has been set.
+goto DONE
+
+:DONE
+cd %DSDT_USER_BASE%\src
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ba55adfc7aae..bd1e67a73774 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -195,6 +195,8 @@ int fixed_phy_add(unsigned int irq, int phy_id,
 
 	list_add_tail(&fp->node, &fmb->phys);
 
+	mdiobus_scan(fmb->mii_bus, phy_id);
+
 	return 0;
 
 err_regs:
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 00bc55cc821c..db9fd278acb4 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -224,6 +224,73 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
 	return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
 }
 EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
+
+int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+				struct device_node *node)
+{
+	const int na = 3, ns = 2;
+	int rlen;
+
+	parser->node = node;
+	parser->pna = of_n_addr_cells(node);
+	parser->np = parser->pna + na + ns;
+
+	parser->range = of_get_property(node, "ranges", &rlen);
+	if (parser->range == NULL)
+		return -ENOENT;
+
+	parser->end = parser->range + rlen / sizeof(__be32);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
+
+struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
+						struct of_pci_range *range)
+{
+	const int na = 3, ns = 2;
+
+	if (!range)
+		return NULL;
+
+	if (!parser->range || parser->range + parser->np > parser->end)
+		return NULL;
+
+	range->pci_space = parser->range[0];
+	range->flags = of_bus_pci_get_flags(parser->range);
+	range->pci_addr = of_read_number(parser->range + 1, ns);
+	range->cpu_addr = of_translate_address(parser->node,
+				parser->range + na);
+	range->size = of_read_number(parser->range + parser->pna + na, ns);
+
+	parser->range += parser->np;
+
+	/* Now consume following elements while they are contiguous */
+	while (parser->range + parser->np <= parser->end) {
+		u32 flags, pci_space;
+		u64 pci_addr, cpu_addr, size;
+
+		pci_space = be32_to_cpup(parser->range);
+		flags = of_bus_pci_get_flags(parser->range);
+		pci_addr = of_read_number(parser->range + 1, ns);
+		cpu_addr = of_translate_address(parser->node,
+				parser->range + na);
+		size = of_read_number(parser->range + parser->pna + na, ns);
+
+		if (flags != range->flags)
+			break;
+		if (pci_addr != range->pci_addr + range->size ||
+		    cpu_addr != range->cpu_addr + range->size)
+			break;
+
+		range->size += size;
+		parser->range += parser->np;
+	}
+
+	return range;
+}
+EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
+
 #endif /* CONFIG_PCI */
 
 /*
@@ -415,7 +482,7 @@ static u64 __of_translate_address(struct device_node *dev,
 	int na, ns, pna, pns;
 	u64 result = OF_BAD_ADDR;
 
-	pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+	pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev));
 
 	/* Increase refcount at current level */
 	of_node_get(dev);
@@ -430,13 +497,13 @@ static u64 __of_translate_address(struct device_node *dev,
 	bus->count_cells(dev, &na, &ns);
 	if (!OF_CHECK_COUNTS(na, ns)) {
 		printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
-		       dev->full_name);
+		       of_node_full_name(dev));
 		goto bail;
 	}
 	memcpy(addr, in_addr, na * 4);
 
 	pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
-	    bus->name, na, ns, parent->full_name);
+	    bus->name, na, ns, of_node_full_name(parent));
 	of_dump_addr("OF: translating address:", addr, na);
 
 	/* Translate */
@@ -458,12 +525,12 @@ static u64 __of_translate_address(struct device_node *dev,
 		pbus->count_cells(dev, &pna, &pns);
 		if (!OF_CHECK_COUNTS(pna, pns)) {
 			printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
-			       dev->full_name);
+			       of_node_full_name(dev));
 			break;
 		}
 
 		pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n",
-		    pbus->name, pna, pns, parent->full_name);
+		    pbus->name, pna, pns, of_node_full_name(parent));
 
 		/* Apply bus translation */
 		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 1d10b4ec6814..b0d75e3ab995 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1136,9 +1136,19 @@ EXPORT_SYMBOL(of_parse_phandle);
  * To get a device_node of the `node2' node you may call this:
  * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
  */
+void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
+{
+	int i;
+	printk("%s %s", msg, of_node_full_name(args->np));
+	for (i = 0; i < args->args_count; i++)
+		printk(i ? ",%08x" : ":%08x", args->args[i]);
+	printk("\n");
+}
+
 static int __of_parse_phandle_with_args(const struct device_node *np,
 					const char *list_name,
-					const char *cells_name, int index,
+					const char *cells_name,
+					int cell_count, int index,
 					struct of_phandle_args *out_args)
 {
 	const __be32 *list, *list_end;
@@ -1174,11 +1184,17 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
 					 np->full_name);
 				goto err;
 			}
-			if (of_property_read_u32(node, cells_name, &count)) {
-				pr_err("%s: could not get %s for %s\n",
-					 np->full_name, cells_name,
-					 node->full_name);
-				goto err;
+
+			if (cells_name) {
+				if (of_property_read_u32(node, cells_name,
+							 &count)) {
+					pr_err("%s: could not get %s for %s\n",
+						np->full_name, cells_name,
+						node->full_name);
+					goto err;
+				}
+			} else {
+				count = cell_count;
 			}
 
 			/*
@@ -1244,10 +1260,52 @@ int of_parse_phandle_with_args(const struct device_node *np, const char *list_na
 {
 	if (index < 0)
 		return -EINVAL;
-	return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+	return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+					    index, out_args);
 }
 EXPORT_SYMBOL(of_parse_phandle_with_args);
 
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np:		pointer to a device tree node containing a list
+ * @list_name:	property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index:	index of a phandle to parse out
+ * @out_args:	optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ * 	list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+				const char *list_name, int cell_count,
+				int index, struct of_phandle_args *out_args)
+{
+	if (index < 0)
+		return -EINVAL;
+	return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+					   index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
 /**
  * of_count_phandle_with_args() - Find the number of phandles references in a property
  * @np:		pointer to a device tree node containing a list
@@ -1266,7 +1324,8 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
 				const char *cells_name)
 {
-	return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+	return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+					    NULL);
 }
 EXPORT_SYMBOL(of_count_phandle_with_args);
 
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index a3c1c5aae6a9..786b0b47fae4 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -28,21 +28,20 @@
 
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
- * @device: Device node of the device whose interrupt is to be mapped
+ * @dev: Device node of the device whose interrupt is to be mapped
  * @index: Index of the interrupt to map
  *
- * This function is a wrapper that chains of_irq_map_one() and
+ * This function is a wrapper that chains of_irq_parse_one() and
  * irq_create_of_mapping() to make things easier to callers
  */
 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
 {
-	struct of_irq oirq;
+	struct of_phandle_args oirq;
 
-	if (of_irq_map_one(dev, index, &oirq))
+	if (of_irq_parse_one(dev, index, &oirq))
 		return 0;
 
-	return irq_create_of_mapping(oirq.controller, oirq.specifier,
-				     oirq.size);
+	return irq_create_of_mapping(&oirq);
 }
 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
 
@@ -79,33 +78,34 @@ struct device_node *of_irq_find_parent(struct device_node *child)
 }
 
 /**
- * of_irq_map_raw - Low level interrupt tree parsing
+ * of_irq_parse_raw - Low level interrupt tree parsing
  * @parent:	the device interrupt parent
- * @intspec:	interrupt specifier ("interrupts" property of the device)
- * @ointsize:   size of the passed in interrupt specifier
- * @addr:	address specifier (start of "reg" property of the device)
- * @out_irq:	structure of_irq filled by this function
+ * @addr:	address specifier (start of "reg" property of the device) in be32 format
+ * @out_irq:	structure of_irq updated by this function
  *
  * Returns 0 on success and a negative number on error
  *
  * This function is a low-level interrupt tree walking function. It
  * can be used to do a partial walk with synthetized reg and interrupts
  * properties, for example when resolving PCI interrupts when no device
- * node exist for the parent.
+ * node exist for the parent. It takes an interrupt specifier structure as
+ * input, walks the tree looking for any interrupt-map properties, translates
+ * the specifier for each map, and then returns the translated map.
  */
-int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
-		   u32 ointsize, const __be32 *addr, struct of_irq *out_irq)
+int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
 {
 	struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL;
-	const __be32 *tmp, *imap, *imask;
+	__be32 initial_match_array[MAX_PHANDLE_ARGS];
+	const __be32 *match_array = initial_match_array;
+	const __be32 *tmp, *imap, *imask, dummy_imask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
 	u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0;
 	int imaplen, match, i;
 
-	pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
-		 parent->full_name, be32_to_cpup(intspec),
-		 be32_to_cpup(intspec + 1), ointsize);
+#ifdef DEBUG
+	of_print_phandle_args("of_irq_parse_raw: ", out_irq);
+#endif
 
-	ipar = of_node_get(parent);
+	ipar = of_node_get(out_irq->np);
 
 	/* First get the #interrupt-cells property of the current cursor
 	 * that tells us how to interpret the passed-in intspec. If there
@@ -126,9 +126,9 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 		goto fail;
 	}
 
-	pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+	pr_debug("of_irq_parse_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize);
 
-	if (ointsize != intsize)
+	if (out_irq->args_count != intsize)
 		return -EINVAL;
 
 	/* Look for this #address-cells. We have to implement the old linux
@@ -147,6 +147,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 
 	pr_debug(" -> addrsize=%d\n", addrsize);
 
+	/* Range check so that the temporary buffer doesn't overflow */
+	if (WARN_ON(addrsize + intsize > MAX_PHANDLE_ARGS))
+		goto fail;
+
+	/* Precalculate the match array - this simplifies match loop */
+	for (i = 0; i < addrsize; i++)
+		initial_match_array[i] = addr ? addr[i] : 0;
+	for (i = 0; i < intsize; i++)
+		initial_match_array[addrsize + i] = cpu_to_be32(out_irq->args[i]);
+
 	/* Now start the actual "proper" walk of the interrupt tree */
 	while (ipar != NULL) {
 		/* Now check if cursor is an interrupt-controller and if it is
@@ -155,15 +165,19 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 		if (of_get_property(ipar, "interrupt-controller", NULL) !=
 				NULL) {
 			pr_debug(" -> got it !\n");
-			for (i = 0; i < intsize; i++)
-				out_irq->specifier[i] =
-						of_read_number(intspec +i, 1);
-			out_irq->size = intsize;
-			out_irq->controller = ipar;
 			of_node_put(old);
 			return 0;
 		}
 
+		/*
+		 * interrupt-map parsing does not work without a reg
+		 * property when #address-cells != 0
+		 */
+		if (addrsize && !addr) {
+			pr_debug(" -> no reg passed in when needed !\n");
+			goto fail;
+		}
+
 		/* Now look for an interrupt-map */
 		imap = of_get_property(ipar, "interrupt-map", &imaplen);
 		/* No interrupt map, check for an interrupt parent */
@@ -176,34 +190,16 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 
 		/* Look for a mask */
 		imask = of_get_property(ipar, "interrupt-map-mask", NULL);
-
-		/* If we were passed no "reg" property and we attempt to parse
-		 * an interrupt-map, then #address-cells must be 0.
-		 * Fail if it's not.
-		 */
-		if (addr == NULL && addrsize != 0) {
-			pr_debug(" -> no reg passed in when needed !\n");
-			goto fail;
-		}
+		if (!imask)
+			imask = dummy_imask;
 
 		/* Parse interrupt-map */
 		match = 0;
 		while (imaplen > (addrsize + intsize + 1) && !match) {
 			/* Compare specifiers */
 			match = 1;
-			for (i = 0; i < addrsize && match; ++i) {
-				__be32 mask = imask ? imask[i]
-						    : cpu_to_be32(0xffffffffu);
-				match = ((addr[i] ^ imap[i]) & mask) == 0;
-			}
-			for (; i < (addrsize + intsize) && match; ++i) {
-				__be32 mask = imask ? imask[i]
-						    : cpu_to_be32(0xffffffffu);
-				match =
-				   ((intspec[i-addrsize] ^ imap[i]) & mask) == 0;
-			}
-			imap += addrsize + intsize;
-			imaplen -= addrsize + intsize;
+			for (i = 0; i < (addrsize + intsize); i++, imaplen--)
+				match &= !((match_array[i] ^ *imap++) & imask[i]);
 
 			pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen);
 
@@ -237,6 +233,8 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 			    newintsize, newaddrsize);
 
 			/* Check for malformed properties */
+			if (WARN_ON(newaddrsize + newintsize > MAX_PHANDLE_ARGS))
+				goto fail;
 			if (imaplen < (newaddrsize + newintsize))
 				goto fail;
 
@@ -248,12 +246,18 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 		if (!match)
 			goto fail;
 
-		of_node_put(old);
-		old = of_node_get(newpar);
+		/*
+		 * Successfully parsed an interrrupt-map translation; copy new
+		 * interrupt specifier into the out_irq structure
+		 */
+		of_node_put(out_irq->np);
+		out_irq->np = of_node_get(newpar);
+
+		match_array = imap - newaddrsize - newintsize;
+		for (i = 0; i < newintsize; i++)
+			out_irq->args[i] = be32_to_cpup(imap - newintsize + i);
+		out_irq->args_count = intsize = newintsize;
 		addrsize = newaddrsize;
-		intsize = newintsize;
-		intspec = imap - intsize;
-		addr = intspec - addrsize;
 
 	skiplevel:
 		/* Iterate again with new parent */
@@ -264,46 +268,53 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
 	}
  fail:
 	of_node_put(ipar);
-	of_node_put(old);
+	of_node_put(out_irq->np);
 	of_node_put(newpar);
 
 	return -EINVAL;
 }
-EXPORT_SYMBOL_GPL(of_irq_map_raw);
+EXPORT_SYMBOL_GPL(of_irq_parse_raw);
 
 /**
- * of_irq_map_one - Resolve an interrupt for a device
+ * of_irq_parse_one - Resolve an interrupt for a device
  * @device: the device whose interrupt is to be resolved
  * @index: index of the interrupt to resolve
  * @out_irq: structure of_irq filled by this function
  *
- * This function resolves an interrupt, walking the tree, for a given
- * device-tree node. It's the high level pendant to of_irq_map_raw().
+ * This function resolves an interrupt for a node by walking the interrupt tree,
+ * finding which interrupt controller node it is attached to, and returning the
+ * interrupt specifier that can be used to retrieve a Linux IRQ number.
  */
-int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq)
+int of_irq_parse_one(struct device_node *device, int index, struct of_phandle_args *out_irq)
 {
 	struct device_node *p;
 	const __be32 *intspec, *tmp, *addr;
 	u32 intsize, intlen;
-	int res = -EINVAL;
+	int i, res = -EINVAL;
 
-	pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+	pr_debug("of_irq_parse_one: dev=%s, index=%d\n", of_node_full_name(device), index);
 
 	/* OldWorld mac stuff is "special", handle out of line */
 	if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
-		return of_irq_map_oldworld(device, index, out_irq);
+		return of_irq_parse_oldworld(device, index, out_irq);
+
+	/* Get the reg property (if any) */
+	addr = of_get_property(device, "reg", NULL);
 
 	/* Get the interrupts property */
 	intspec = of_get_property(device, "interrupts", &intlen);
-	if (intspec == NULL)
-		return -EINVAL;
+	if (intspec == NULL) {
+		/* Try the new-style interrupts-extended */
+		res = of_parse_phandle_with_args(device, "interrupts-extended",
+						"#interrupt-cells", index, out_irq);
+		if (res)
+			return -EINVAL;
+		return of_irq_parse_raw(addr, out_irq);
+	}
 	intlen /= sizeof(*intspec);
 
 	pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen);
 
-	/* Get the reg property (if any) */
-	addr = of_get_property(device, "reg", NULL);
-
 	/* Look for the interrupt parent. */
 	p = of_irq_find_parent(device);
 	if (p == NULL)
@@ -321,14 +332,20 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
 	if ((index + 1) * intsize > intlen)
 		goto out;
 
-	/* Get new specifier and map it */
-	res = of_irq_map_raw(p, intspec + index * intsize, intsize,
-			     addr, out_irq);
+	/* Copy intspec into irq structure */
+	intspec += index * intsize;
+	out_irq->np = p;
+	out_irq->args_count = intsize;
+	for (i = 0; i < intsize; i++)
+		out_irq->args[i] = be32_to_cpup(intspec++);
+
+	/* Check if there are any interrupt-map translations to process */
+	res = of_irq_parse_raw(addr, out_irq);
  out:
 	of_node_put(p);
 	return res;
 }
-EXPORT_SYMBOL_GPL(of_irq_map_one);
+EXPORT_SYMBOL_GPL(of_irq_parse_one);
 
 /**
  * of_irq_to_resource - Decode a node's IRQ and return it as a resource
@@ -345,6 +362,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 	if (r && irq) {
 		const char *name = NULL;
 
+		memset(r, 0, sizeof(*r));
 		/*
 		 * Get optional "interrupts-names" property to add a name
 		 * to the resource.
@@ -353,8 +371,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 					      &name);
 
 		r->start = r->end = irq;
-		r->flags = IORESOURCE_IRQ;
-		r->name = name ? name : dev->full_name;
+		r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+		r->name = name ? name : of_node_full_name(dev);
 	}
 
 	return irq;
@@ -367,9 +385,10 @@ EXPORT_SYMBOL_GPL(of_irq_to_resource);
  */
 int of_irq_count(struct device_node *dev)
 {
+	struct of_phandle_args irq;
 	int nr = 0;
 
-	while (of_irq_to_resource(dev, nr, NULL))
+	while (of_irq_parse_one(dev, nr, &irq) == 0)
 		nr++;
 
 	return nr;
@@ -482,8 +501,9 @@ void __init of_irq_init(const struct of_device_id *matches)
 		}
 
 		/* Get the next pending parent that might have children */
-		desc = list_first_entry(&intc_parent_list, typeof(*desc), list);
-		if (list_empty(&intc_parent_list) || !desc) {
+		desc = list_first_entry_or_null(&intc_parent_list,
+						typeof(*desc), list);
+		if (!desc) {
 			pr_err("of_irq_init: children remain, but no parents\n");
 			break;
 		}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index d5a57a9e329c..66d55917dc92 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -14,6 +14,7 @@
 #include <linux/netdevice.h>
 #include <linux/err.h>
 #include <linux/phy.h>
+#include <linux/phy_fixed.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
@@ -215,10 +216,6 @@ EXPORT_SYMBOL(of_phy_connect);
  * @dev: pointer to net_device claiming the phy
  * @hndlr: Link state callback for the network device
  * @iface: PHY data interface type
- *
- * This function is a temporary stop-gap and will be removed soon.  It is
- * only to support the fs_enet, ucc_geth and gianfar Ethernet drivers.  Do
- * not call this function from new drivers.
  */
 struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
 					     void (*hndlr)(struct net_device *),
@@ -247,3 +244,34 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
 	return IS_ERR(phy) ? NULL : phy;
 }
 EXPORT_SYMBOL(of_phy_connect_fixed_link);
+
+#if defined(CONFIG_FIXED_PHY)
+/**
+ * of_phy_register_fixed_link - Parse fixed-link property and register a dummy phy
+ * @np: pointer to the OF device node that contains the "fixed-link"
+ * property for which a dummy phy should be registered.
+ */
+#define FIXED_LINK_PROPERTIES_COUNT 5
+int of_phy_register_fixed_link(struct device_node *np)
+{
+	struct fixed_phy_status status = {};
+	u32 fixed_link_props[FIXED_LINK_PROPERTIES_COUNT];
+	int ret;
+
+	ret = of_property_read_u32_array(np, "fixed-link",
+					 fixed_link_props,
+					 FIXED_LINK_PROPERTIES_COUNT);
+	if (ret < 0)
+		return ret;
+
+	status.link       = 1;
+	status.duplex     = fixed_link_props[1];
+	status.speed      = fixed_link_props[2];
+	status.pause      = fixed_link_props[3];
+	status.asym_pause = fixed_link_props[4];
+
+	return fixed_phy_add(PHY_POLL, fixed_link_props[0],
+			     &status);
+}
+EXPORT_SYMBOL(of_phy_register_fixed_link);
+#endif
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 13e37e2d8ec1..e5ca00893c0c 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -5,14 +5,15 @@
 #include <asm/prom.h>
 
 static inline int __of_pci_pci_compare(struct device_node *node,
-				       unsigned int devfn)
+				       unsigned int data)
 {
-	unsigned int size;
-	const __be32 *reg = of_get_property(node, "reg", &size);
+	int devfn;
 
-	if (!reg || size < 5 * sizeof(__be32))
+	devfn = of_pci_get_devfn(node);
+	if (devfn < 0)
 		return 0;
-	return ((be32_to_cpup(&reg[0]) >> 8) & 0xff) == devfn;
+
+	return devfn == data;
 }
 
 struct device_node *of_pci_find_child_device(struct device_node *parent,
@@ -40,3 +41,96 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(of_pci_find_child_device);
+
+/**
+ * of_pci_get_devfn() - Get device and function numbers for a device node
+ * @np: device node
+ *
+ * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
+ * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
+ * and function numbers respectively. On error a negative error code is
+ * returned.
+ */
+int of_pci_get_devfn(struct device_node *np)
+{
+	unsigned int size;
+	const __be32 *reg;
+
+	reg = of_get_property(np, "reg", &size);
+
+	if (!reg || size < 5 * sizeof(__be32))
+		return -EINVAL;
+
+	return (be32_to_cpup(reg) >> 8) & 0xff;
+}
+EXPORT_SYMBOL_GPL(of_pci_get_devfn);
+
+/**
+ * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
+ * @node: device node
+ * @res: address to a struct resource to return the bus-range
+ *
+ * Returns 0 on success or a negative error-code on failure.
+ */
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
+{
+	const __be32 *values;
+	int len;
+
+	values = of_get_property(node, "bus-range", &len);
+	if (!values || len < sizeof(*values) * 2)
+		return -EINVAL;
+
+	res->name = node->name;
+	res->start = be32_to_cpup(values++);
+	res->end = be32_to_cpup(values);
+	res->flags = IORESOURCE_BUS;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
+
+#ifdef CONFIG_PCI_MSI
+
+static LIST_HEAD(of_pci_msi_chip_list);
+static DEFINE_MUTEX(of_pci_msi_chip_mutex);
+
+int of_pci_msi_chip_add(struct msi_chip *chip)
+{
+	if (!of_property_read_bool(chip->of_node, "msi-controller"))
+		return -EINVAL;
+
+	mutex_lock(&of_pci_msi_chip_mutex);
+	list_add(&chip->list, &of_pci_msi_chip_list);
+	mutex_unlock(&of_pci_msi_chip_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(of_pci_msi_chip_add);
+
+void of_pci_msi_chip_remove(struct msi_chip *chip)
+{
+	mutex_lock(&of_pci_msi_chip_mutex);
+	list_del(&chip->list);
+	mutex_unlock(&of_pci_msi_chip_mutex);
+}
+EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove);
+
+struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node)
+{
+	struct msi_chip *c;
+
+	mutex_lock(&of_pci_msi_chip_mutex);
+	list_for_each_entry(c, &of_pci_msi_chip_list, list) {
+		if (c->of_node == of_node) {
+			mutex_unlock(&of_pci_msi_chip_mutex);
+			return c;
+		}
+	}
+	mutex_unlock(&of_pci_msi_chip_mutex);
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
+
+#endif /* CONFIG_PCI_MSI */
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 677053813211..303afebc247a 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -5,7 +5,7 @@
 #include <asm/prom.h>
 
 /**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * of_irq_parse_pci - Resolve the interrupt for a PCI device
  * @pdev:       the device whose interrupt is to be resolved
  * @out_irq:    structure of_irq filled by this function
  *
@@ -15,7 +15,7 @@
  * PCI tree until an device-node is found, at which point it will finish
  * resolving using the OF tree walking.
  */
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
 {
 	struct device_node *dn, *ppnode;
 	struct pci_dev *ppdev;
@@ -30,7 +30,7 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
 	 */
 	dn = pci_device_to_OF_node(pdev);
 	if (dn) {
-		rc = of_irq_map_one(dn, 0, out_irq);
+		rc = of_irq_parse_one(dn, 0, out_irq);
 		if (!rc)
 			return rc;
 	}
@@ -85,9 +85,12 @@ int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
 		pdev = ppdev;
 	}
 
+	out_irq->np = ppnode;
+	out_irq->args_count = 1;
+	out_irq->args[0] = lspec;
 	lspec_be = cpu_to_be32(lspec);
 	laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
-	laddr[1]  = laddr[2] = cpu_to_be32(0);
-	return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+	laddr[1] = laddr[2] = cpu_to_be32(0);
+	return of_irq_parse_raw(laddr, out_irq);
 }
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
+EXPORT_SYMBOL_GPL(of_irq_parse_pci);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index 0eb5c38b4e07..e21012bde639 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -9,18 +9,24 @@
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_irq.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 
-static bool selftest_passed = true;
+static struct selftest_results {
+	int passed;
+	int failed;
+} selftest_results;
+
 #define selftest(result, fmt, ...) { \
 	if (!(result)) { \
-		pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
-		selftest_passed = false; \
+		selftest_results.failed++; \
+		pr_err("FAIL %s():%i " fmt, __func__, __LINE__, ##__VA_ARGS__); \
 	} else { \
-		pr_info("pass %s:%i\n", __FILE__, __LINE__); \
+		selftest_results.passed++; \
+		pr_debug("pass %s():%i\n", __func__, __LINE__); \
 	} \
 }
 
@@ -131,7 +137,6 @@ static void __init of_selftest_property_match_string(void)
 	struct device_node *np;
 	int rc;
 
-	pr_info("start\n");
 	np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
 	if (!np) {
 		pr_err("No testcase data in device tree\n");
@@ -154,6 +159,147 @@ static void __init of_selftest_property_match_string(void)
 	selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc);
 }
 
+static void __init of_selftest_parse_interrupts(void)
+{
+	struct device_node *np;
+	struct of_phandle_args args;
+	int i, rc;
+
+	np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
+	if (!np) {
+		pr_err("missing testcase data\n");
+		return;
+	}
+
+	for (i = 0; i < 4; i++) {
+		bool passed = true;
+		args.args_count = 0;
+		rc = of_irq_parse_one(np, i, &args);
+
+		passed &= !rc;
+		passed &= (args.args_count == 1);
+		passed &= (args.args[0] == (i + 1));
+
+		selftest(passed, "index %i - data error on node %s rc=%i\n",
+			 i, args.np->full_name, rc);
+	}
+	of_node_put(np);
+
+	np = of_find_node_by_path("/testcase-data/interrupts/interrupts1");
+	if (!np) {
+		pr_err("missing testcase data\n");
+		return;
+	}
+
+	for (i = 0; i < 4; i++) {
+		bool passed = true;
+		args.args_count = 0;
+		rc = of_irq_parse_one(np, i, &args);
+
+		/* Test the values from tests-phandle.dtsi */
+		switch (i) {
+		case 0:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == 9);
+			break;
+		case 1:
+			passed &= !rc;
+			passed &= (args.args_count == 3);
+			passed &= (args.args[0] == 10);
+			passed &= (args.args[1] == 11);
+			passed &= (args.args[2] == 12);
+			break;
+		case 2:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == 13);
+			passed &= (args.args[1] == 14);
+			break;
+		case 3:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == 15);
+			passed &= (args.args[1] == 16);
+			break;
+		default:
+			passed = false;
+		}
+		selftest(passed, "index %i - data error on node %s rc=%i\n",
+			 i, args.np->full_name, rc);
+	}
+	of_node_put(np);
+}
+
+static void __init of_selftest_parse_interrupts_extended(void)
+{
+	struct device_node *np;
+	struct of_phandle_args args;
+	int i, rc;
+
+	np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
+	if (!np) {
+		pr_err("missing testcase data\n");
+		return;
+	}
+
+	for (i = 0; i < 7; i++) {
+		bool passed = true;
+		rc = of_irq_parse_one(np, i, &args);
+
+		/* Test the values from tests-phandle.dtsi */
+		switch (i) {
+		case 0:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == 1);
+			break;
+		case 1:
+			passed &= !rc;
+			passed &= (args.args_count == 3);
+			passed &= (args.args[0] == 2);
+			passed &= (args.args[1] == 3);
+			passed &= (args.args[2] == 4);
+			break;
+		case 2:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == 5);
+			passed &= (args.args[1] == 6);
+			break;
+		case 3:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == 9);
+			break;
+		case 4:
+			passed &= !rc;
+			passed &= (args.args_count == 3);
+			passed &= (args.args[0] == 10);
+			passed &= (args.args[1] == 11);
+			passed &= (args.args[2] == 12);
+			break;
+		case 5:
+			passed &= !rc;
+			passed &= (args.args_count == 2);
+			passed &= (args.args[0] == 13);
+			passed &= (args.args[1] == 14);
+			break;
+		case 6:
+			passed &= !rc;
+			passed &= (args.args_count == 1);
+			passed &= (args.args[0] == 15);
+			break;
+		default:
+			passed = false;
+		}
+
+		selftest(passed, "index %i - data error on node %s rc=%i\n",
+			 i, args.np->full_name, rc);
+	}
+	of_node_put(np);
+}
+
 static int __init of_selftest(void)
 {
 	struct device_node *np;
@@ -168,7 +314,10 @@ static int __init of_selftest(void)
 	pr_info("start of selftest - you will see error messages\n");
 	of_selftest_parse_phandle_with_args();
 	of_selftest_property_match_string();
-	pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+	of_selftest_parse_interrupts();
+	of_selftest_parse_interrupts_extended();
+	pr_info("end of selftest - %i passed, %i failed\n",
+		selftest_results.passed, selftest_results.failed);
 	return 0;
 }
 late_initcall(of_selftest);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 6d51aa68ec7a..3d4c061347c7 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -1,13 +1,9 @@
 #
 # PCI configuration
 #
-config ARCH_SUPPORTS_MSI
-	bool
-
 config PCI_MSI
 	bool "Message Signaled Interrupts (MSI and MSI-X)"
 	depends on PCI
-	depends on ARCH_SUPPORTS_MSI
 	help
 	   This allows device drivers to enable MSI (Message Signaled
 	   Interrupts).  Message Signaled Interrupts enable a device to
@@ -119,3 +115,5 @@ config PCI_IOAPIC
 config PCI_LABEL
 	def_bool y if (DMI || ACPI)
 	select NLS
+
+source "drivers/pci/host/Kconfig"
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 0c3efcffa83b..6ebf5bf8e7a7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -67,3 +67,6 @@ obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
 obj-$(CONFIG_OF) += of.o
 
 ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
+
+# PCI host controller drivers
+obj-y += host/
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
new file mode 100644
index 000000000000..6918fbc92c02
--- /dev/null
+++ b/drivers/pci/host/Kconfig
@@ -0,0 +1,8 @@
+menu "PCI host controller drivers"
+	depends on PCI
+
+config PCI_MVEBU
+	bool "Marvell EBU PCIe controller"
+	depends on ARCH_MVEBU
+
+endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
new file mode 100644
index 000000000000..5ea2d8bf013a
--- /dev/null
+++ b/drivers/pci/host/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
new file mode 100644
index 000000000000..252418cee730
--- /dev/null
+++ b/drivers/pci/host/pci-mvebu.c
@@ -0,0 +1,1006 @@
+/*
+ * PCIe driver for Marvell Armada 370 and Armada XP SoCs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/mbus.h>
+#include <linux/msi.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/*
+ * PCIe unit register offsets.
+ */
+#define PCIE_DEV_ID_OFF		0x0000
+#define PCIE_CMD_OFF		0x0004
+#define PCIE_DEV_REV_OFF	0x0008
+#define PCIE_BAR_LO_OFF(n)	(0x0010 + ((n) << 3))
+#define PCIE_BAR_HI_OFF(n)	(0x0014 + ((n) << 3))
+#define PCIE_HEADER_LOG_4_OFF	0x0128
+#define PCIE_BAR_CTRL_OFF(n)	(0x1804 + (((n) - 1) * 4))
+#define PCIE_WIN04_CTRL_OFF(n)	(0x1820 + ((n) << 4))
+#define PCIE_WIN04_BASE_OFF(n)	(0x1824 + ((n) << 4))
+#define PCIE_WIN04_REMAP_OFF(n)	(0x182c + ((n) << 4))
+#define PCIE_WIN5_CTRL_OFF	0x1880
+#define PCIE_WIN5_BASE_OFF	0x1884
+#define PCIE_WIN5_REMAP_OFF	0x188c
+#define PCIE_CONF_ADDR_OFF	0x18f8
+#define  PCIE_CONF_ADDR_EN		0x80000000
+#define  PCIE_CONF_REG(r)		((((r) & 0xf00) << 16) | ((r) & 0xfc))
+#define  PCIE_CONF_BUS(b)		(((b) & 0xff) << 16)
+#define  PCIE_CONF_DEV(d)		(((d) & 0x1f) << 11)
+#define  PCIE_CONF_FUNC(f)		(((f) & 0x7) << 8)
+#define  PCIE_CONF_ADDR(bus, devfn, where) \
+	(PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn))    | \
+	 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
+	 PCIE_CONF_ADDR_EN)
+#define PCIE_CONF_DATA_OFF	0x18fc
+#define PCIE_MASK_OFF		0x1910
+#define  PCIE_MASK_ENABLE_INTS          0x0f000000
+#define PCIE_CTRL_OFF		0x1a00
+#define  PCIE_CTRL_X1_MODE		0x0001
+#define PCIE_STAT_OFF		0x1a04
+#define  PCIE_STAT_BUS                  0xff00
+#define  PCIE_STAT_DEV                  0x1f0000
+#define  PCIE_STAT_LINK_DOWN		BIT(0)
+#define PCIE_DEBUG_CTRL         0x1a60
+#define  PCIE_DEBUG_SOFT_RESET		BIT(20)
+
+/*
+ * This product ID is registered by Marvell, and used when the Marvell
+ * SoC is not the root complex, but an endpoint on the PCIe bus. It is
+ * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
+ * bridge.
+ */
+#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
+#define MVEBU_MAX_PCIE_PORTS	0x4
+
+static u32 nports;
+static u32 mbus_pcie_save[MVEBU_MAX_PCIE_PORTS];
+struct mvebu_pcie_port *port_bak[MVEBU_MAX_PCIE_PORTS];
+
+/* PCI configuration space of a PCI-to-PCI bridge */
+struct mvebu_sw_pci_bridge {
+	u16 vendor;
+	u16 device;
+	u16 command;
+	u16 class;
+	u8 interface;
+	u8 revision;
+	u8 bist;
+	u8 header_type;
+	u8 latency_timer;
+	u8 cache_line_size;
+	u32 bar[2];
+	u8 primary_bus;
+	u8 secondary_bus;
+	u8 subordinate_bus;
+	u8 secondary_latency_timer;
+	u8 iobase;
+	u8 iolimit;
+	u16 secondary_status;
+	u16 membase;
+	u16 memlimit;
+	u16 iobaseupper;
+	u16 iolimitupper;
+	u8 cappointer;
+	u8 reserved1;
+	u16 reserved2;
+	u32 romaddr;
+	u8 intline;
+	u8 intpin;
+	u16 bridgectrl;
+};
+
+struct mvebu_pcie_port;
+
+/* Structure representing all PCIe interfaces */
+struct mvebu_pcie {
+	struct platform_device *pdev;
+	struct mvebu_pcie_port *ports;
+	struct msi_chip *msi;
+	struct resource io;
+	struct resource realio;
+	struct resource mem;
+	struct resource busn;
+	int nports;
+};
+
+/* Structure representing one PCIe interface */
+struct mvebu_pcie_port {
+	char *name;
+	void __iomem *base;
+	spinlock_t conf_lock;
+	int haslink;
+	u32 port;
+	u32 lane;
+	int devfn;
+	unsigned int mem_target;
+	unsigned int mem_attr;
+	unsigned int io_target;
+	unsigned int io_attr;
+	struct clk *clk;
+	struct mvebu_sw_pci_bridge bridge;
+	struct device_node *dn;
+	struct mvebu_pcie *pcie;
+	phys_addr_t memwin_base;
+	size_t memwin_size;
+	phys_addr_t iowin_base;
+	size_t iowin_size;
+};
+
+static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
+{
+	return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
+}
+
+static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
+{
+	u32 stat;
+
+	stat = readl(port->base + PCIE_STAT_OFF);
+	stat &= ~PCIE_STAT_BUS;
+	stat |= nr << 8;
+	writel(stat, port->base + PCIE_STAT_OFF);
+}
+
+static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
+{
+	u32 stat;
+
+	stat = readl(port->base + PCIE_STAT_OFF);
+	stat &= ~PCIE_STAT_DEV;
+	stat |= nr << 16;
+	writel(stat, port->base + PCIE_STAT_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+	const struct mbus_dram_target_info *dram;
+	u32 size;
+	int i;
+
+	dram = mv_mbus_dram_info();
+
+	/* First, disable and clear BARs and windows. */
+	for (i = 1; i < 3; i++) {
+		writel(0, port->base + PCIE_BAR_CTRL_OFF(i));
+		writel(0, port->base + PCIE_BAR_LO_OFF(i));
+		writel(0, port->base + PCIE_BAR_HI_OFF(i));
+	}
+
+	for (i = 0; i < 5; i++) {
+		writel(0, port->base + PCIE_WIN04_CTRL_OFF(i));
+		writel(0, port->base + PCIE_WIN04_BASE_OFF(i));
+		writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
+	}
+
+	writel(0, port->base + PCIE_WIN5_CTRL_OFF);
+	writel(0, port->base + PCIE_WIN5_BASE_OFF);
+	writel(0, port->base + PCIE_WIN5_REMAP_OFF);
+
+	/* Setup windows for DDR banks.  Count total DDR size on the fly. */
+	size = 0;
+	for (i = 0; i < dram->num_cs; i++) {
+		const struct mbus_dram_window *cs = dram->cs + i;
+
+		writel(cs->base & 0xffff0000,
+		       port->base + PCIE_WIN04_BASE_OFF(i));
+		writel(0, port->base + PCIE_WIN04_REMAP_OFF(i));
+		writel(((cs->size - 1) & 0xffff0000) |
+			(cs->mbus_attr << 8) |
+			(dram->mbus_dram_target_id << 4) | 1,
+		       port->base + PCIE_WIN04_CTRL_OFF(i));
+
+		size += cs->size;
+	}
+
+	/* Round up 'size' to the nearest power of two. */
+	if ((size & (size - 1)) != 0)
+		size = 1 << fls(size);
+
+	/* Setup BAR[1] to all DRAM banks. */
+	writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1));
+	writel(0, port->base + PCIE_BAR_HI_OFF(1));
+	writel(((size - 1) & 0xffff0000) | 1,
+	       port->base + PCIE_BAR_CTRL_OFF(1));
+}
+
+static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
+{
+	u16 cmd;
+	u32 mask;
+
+	/* Point PCIe unit MBUS decode windows to DRAM space. */
+	mvebu_pcie_setup_wins(port);
+
+	/* Master + slave enable. */
+	cmd = readw(port->base + PCIE_CMD_OFF);
+	cmd |= PCI_COMMAND_IO;
+	cmd |= PCI_COMMAND_MEMORY;
+	cmd |= PCI_COMMAND_MASTER;
+	writew(cmd, port->base + PCIE_CMD_OFF);
+
+	/* Enable interrupt lines A-D. */
+	mask = readl(port->base + PCIE_MASK_OFF);
+	mask |= PCIE_MASK_ENABLE_INTS;
+	writel(mask, port->base + PCIE_MASK_OFF);
+}
+
+static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
+				 struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 *val)
+{
+	writel(PCIE_CONF_ADDR(bus->number, devfn, where),
+	       port->base + PCIE_CONF_ADDR_OFF);
+
+	*val = readl(port->base + PCIE_CONF_DATA_OFF);
+
+	if (size == 1)
+		*val = (*val >> (8 * (where & 3))) & 0xff;
+	else if (size == 2)
+		*val = (*val >> (8 * (where & 3))) & 0xffff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
+				 struct pci_bus *bus,
+				 u32 devfn, int where, int size, u32 val)
+{
+	int ret = PCIBIOS_SUCCESSFUL;
+
+	writel(PCIE_CONF_ADDR(bus->number, devfn, where),
+	       port->base + PCIE_CONF_ADDR_OFF);
+
+	if (size == 4)
+		writel(val, port->base + PCIE_CONF_DATA_OFF);
+	else if (size == 2)
+		writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+	else if (size == 1)
+		writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3));
+	else
+		ret = PCIBIOS_BAD_REGISTER_NUMBER;
+
+	return ret;
+}
+
+static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+{
+	phys_addr_t iobase;
+
+	/* Are the new iobase/iolimit values invalid? */
+	if (port->bridge.iolimit < port->bridge.iobase ||
+	    port->bridge.iolimitupper < port->bridge.iobaseupper) {
+
+		/* If a window was configured, remove it */
+		if (port->iowin_base) {
+			mvebu_mbus_del_window(port->iowin_base,
+					      port->iowin_size);
+			port->iowin_base = 0;
+			port->iowin_size = 0;
+		}
+
+		return;
+	}
+
+	/*
+	 * We read the PCI-to-PCI bridge emulated registers, and
+	 * calculate the base address and size of the address decoding
+	 * window to setup, according to the PCI-to-PCI bridge
+	 * specifications. iobase is the bus address, port->iowin_base
+	 * is the CPU address.
+	 */
+	iobase = ((port->bridge.iobase & 0xF0) << 8) |
+		(port->bridge.iobaseupper << 16);
+	port->iowin_base = port->pcie->io.start + iobase;
+	port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
+			    (port->bridge.iolimitupper << 16)) -
+			    iobase);
+
+	mvebu_mbus_add_window_remap_by_id(port->io_target, port->io_attr,
+					  port->iowin_base, port->iowin_size,
+					  iobase);
+
+	pci_ioremap_io(iobase, port->iowin_base);
+}
+
+static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+{
+	/* Are the new membase/memlimit values invalid? */
+	if (port->bridge.memlimit < port->bridge.membase) {
+
+		/* If a window was configured, remove it */
+		if (port->memwin_base) {
+			mvebu_mbus_del_window(port->memwin_base,
+					      port->memwin_size);
+			port->memwin_base = 0;
+			port->memwin_size = 0;
+		}
+
+		return;
+	}
+
+	/*
+	 * We read the PCI-to-PCI bridge emulated registers, and
+	 * calculate the base address and size of the address decoding
+	 * window to setup, according to the PCI-to-PCI bridge
+	 * specifications.
+	 */
+	port->memwin_base  = ((port->bridge.membase & 0xFFF0) << 16);
+	port->memwin_size  =
+		(((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+		port->memwin_base;
+
+	mvebu_mbus_add_window_by_id(port->mem_target, port->mem_attr,
+				    port->memwin_base, port->memwin_size);
+}
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
+{
+	struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+	memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
+
+	bridge->class = PCI_CLASS_BRIDGE_PCI;
+	bridge->vendor = PCI_VENDOR_ID_MARVELL;
+	bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
+	bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
+	bridge->cache_line_size = 0x10;
+
+	/* We support 32 bits I/O addressing */
+	bridge->iobase = PCI_IO_RANGE_TYPE_32;
+	bridge->iolimit = PCI_IO_RANGE_TYPE_32;
+}
+
+/*
+ * Read the configuration space of the PCI-to-PCI bridge associated to
+ * the given PCIe interface.
+ */
+static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
+				  unsigned int where, int size, u32 *value)
+{
+	struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+
+	switch (where & ~3) {
+	case PCI_VENDOR_ID:
+		*value = bridge->device << 16 | bridge->vendor;
+		break;
+
+	case PCI_COMMAND:
+		*value = bridge->command;
+		break;
+
+	case PCI_CLASS_REVISION:
+		*value = bridge->class << 16 | bridge->interface << 8 |
+			 bridge->revision;
+		break;
+
+	case PCI_CACHE_LINE_SIZE:
+		*value = bridge->bist << 24 | bridge->header_type << 16 |
+			 bridge->latency_timer << 8 | bridge->cache_line_size;
+		break;
+
+	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+		*value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
+		break;
+
+	case PCI_PRIMARY_BUS:
+		*value = (bridge->secondary_latency_timer << 24 |
+			  bridge->subordinate_bus         << 16 |
+			  bridge->secondary_bus           <<  8 |
+			  bridge->primary_bus);
+		break;
+
+	case PCI_IO_BASE:
+		*value = (bridge->secondary_status << 16 |
+			  bridge->iolimit          <<  8 |
+			  bridge->iobase);
+		break;
+
+	case PCI_MEMORY_BASE:
+		*value = (bridge->memlimit << 16 | bridge->membase);
+		break;
+
+	case PCI_PREF_MEMORY_BASE:
+		*value = 0;
+		break;
+
+	case PCI_IO_BASE_UPPER16:
+		*value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
+		break;
+
+	case PCI_ROM_ADDRESS1:
+		*value = 0;
+		break;
+
+	default:
+		*value = 0xffffffff;
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+	}
+
+	if (size == 2)
+		*value = (*value >> (8 * (where & 3))) & 0xffff;
+	else if (size == 1)
+		*value = (*value >> (8 * (where & 3))) & 0xff;
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* Write to the PCI-to-PCI bridge configuration space */
+static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
+				     unsigned int where, int size, u32 value)
+{
+	struct mvebu_sw_pci_bridge *bridge = &port->bridge;
+	u32 mask, reg;
+	int err;
+
+	if (size == 4)
+		mask = 0x0;
+	else if (size == 2)
+		mask = ~(0xffff << ((where & 3) * 8));
+	else if (size == 1)
+		mask = ~(0xff << ((where & 3) * 8));
+	else
+		return PCIBIOS_BAD_REGISTER_NUMBER;
+
+	err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
+	if (err)
+		return err;
+
+	value = (reg & mask) | value << ((where & 3) * 8);
+
+	switch (where & ~3) {
+	case PCI_COMMAND:
+		bridge->command = value & 0xffff;
+		break;
+
+	case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
+		bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
+		break;
+
+	case PCI_IO_BASE:
+		/*
+		 * We also keep bit 1 set, it is a read-only bit that
+		 * indicates we support 32 bits addressing for the
+		 * I/O
+		 */
+		bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
+		bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+		bridge->secondary_status = value >> 16;
+		mvebu_pcie_handle_iobase_change(port);
+		break;
+
+	case PCI_MEMORY_BASE:
+		bridge->membase = value & 0xffff;
+		bridge->memlimit = value >> 16;
+		mvebu_pcie_handle_membase_change(port);
+		break;
+
+	case PCI_IO_BASE_UPPER16:
+		bridge->iobaseupper = value & 0xffff;
+		bridge->iolimitupper = value >> 16;
+		mvebu_pcie_handle_iobase_change(port);
+		break;
+
+	case PCI_PRIMARY_BUS:
+		bridge->primary_bus             = value & 0xff;
+		bridge->secondary_bus           = (value >> 8) & 0xff;
+		bridge->subordinate_bus         = (value >> 16) & 0xff;
+		bridge->secondary_latency_timer = (value >> 24) & 0xff;
+		mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+		break;
+
+	default:
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+	return sys->private_data;
+}
+
+static struct mvebu_pcie_port *
+mvebu_pcie_find_port(struct mvebu_pcie *pcie, struct pci_bus *bus,
+		     int devfn)
+{
+	int i;
+
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+		if (bus->number == 0 && port->devfn == devfn)
+			return port;
+		if (bus->number != 0 &&
+		    bus->number >= port->bridge.secondary_bus &&
+		    bus->number <= port->bridge.subordinate_bus)
+			return port;
+	}
+
+	return NULL;
+}
+
+/* PCI configuration space write function */
+static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+			      int where, int size, u32 val)
+{
+	struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+	struct mvebu_pcie_port *port;
+	unsigned long flags;
+	int ret;
+
+	port = mvebu_pcie_find_port(pcie, bus, devfn);
+	if (!port)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Access the emulated PCI-to-PCI bridge */
+	if (bus->number == 0)
+		return mvebu_sw_pci_bridge_write(port, where, size, val);
+
+	if (!port->haslink)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/*
+	 * On the secondary bus, we don't want to expose any other
+	 * device than the device physically connected in the PCIe
+	 * slot, visible in slot 0. In slot 1, there's a special
+	 * Marvell device that only makes sense when the Armada is
+	 * used as a PCIe endpoint.
+	 */
+	if (bus->number == port->bridge.secondary_bus &&
+	    PCI_SLOT(devfn) != 0)
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	/* Access the real PCIe interface */
+	spin_lock_irqsave(&port->conf_lock, flags);
+	ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
+				    where, size, val);
+	spin_unlock_irqrestore(&port->conf_lock, flags);
+
+	return ret;
+}
+
+/* PCI configuration space read function */
+static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+			      int size, u32 *val)
+{
+	struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+	struct mvebu_pcie_port *port;
+	unsigned long flags;
+	int ret;
+
+	port = mvebu_pcie_find_port(pcie, bus, devfn);
+	if (!port) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/* Access the emulated PCI-to-PCI bridge */
+	if (bus->number == 0)
+		return mvebu_sw_pci_bridge_read(port, where, size, val);
+
+	if (!port->haslink) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/*
+	 * On the secondary bus, we don't want to expose any other
+	 * device than the device physically connected in the PCIe
+	 * slot, visible in slot 0. In slot 1, there's a special
+	 * Marvell device that only makes sense when the Armada is
+	 * used as a PCIe endpoint.
+	 */
+	if (bus->number == port->bridge.secondary_bus &&
+	    PCI_SLOT(devfn) != 0) {
+		*val = 0xffffffff;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	/* Access the real PCIe interface */
+	spin_lock_irqsave(&port->conf_lock, flags);
+	ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
+				    where, size, val);
+	spin_unlock_irqrestore(&port->conf_lock, flags);
+
+	return ret;
+}
+
+static struct pci_ops mvebu_pcie_ops = {
+	.read = mvebu_pcie_rd_conf,
+	.write = mvebu_pcie_wr_conf,
+};
+
+static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+	struct mvebu_pcie *pcie = sys_to_pcie(sys);
+	int i;
+
+	pci_add_resource_offset(&sys->resources, &pcie->realio, sys->io_offset);
+	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
+	pci_add_resource(&sys->resources, &pcie->busn);
+
+	for (i = 0; i < pcie->nports; i++) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+		if (!port->base)
+			continue;
+		mvebu_pcie_setup_hw(port);
+	}
+
+	return 1;
+}
+
+static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+	struct of_phandle_args oirq;
+	int ret;
+
+	ret = of_irq_parse_pci(dev, &oirq);
+	if (ret)
+		return ret;
+
+	return irq_create_of_mapping(&oirq);
+}
+
+static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+	struct mvebu_pcie *pcie = sys_to_pcie(sys);
+	struct pci_bus *bus;
+
+	bus = pci_create_root_bus(&pcie->pdev->dev, sys->busnr,
+				  &mvebu_pcie_ops, sys, &sys->resources);
+	if (!bus)
+		return NULL;
+
+	pci_scan_child_bus(bus);
+
+	return bus;
+}
+
+void mvebu_pcie_add_bus(struct pci_bus *bus)
+{
+	struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
+	bus->msi = pcie->msi;
+}
+
+resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
+					  const struct resource *res,
+					  resource_size_t start,
+					  resource_size_t size,
+					  resource_size_t align)
+{
+	if (dev->bus->number != 0)
+		return start;
+
+	/*
+	 * On the PCI-to-PCI bridge side, the I/O windows must have at
+	 * least a 64 KB size and be aligned on their size, and the
+	 * memory windows must have at least a 1 MB size and be
+	 * aligned on their size
+	 */
+	if (res->flags & IORESOURCE_IO)
+		return round_up(start, max((resource_size_t)SZ_64K, size));
+	else if (res->flags & IORESOURCE_MEM)
+		return round_up(start, max((resource_size_t)SZ_1M, size));
+	else
+		return start;
+}
+
+static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie)
+{
+	struct hw_pci hw;
+
+	memset(&hw, 0, sizeof(hw));
+
+	hw.nr_controllers = 1;
+	hw.private_data   = (void **)&pcie;
+	hw.setup          = mvebu_pcie_setup;
+	hw.scan           = mvebu_pcie_scan_bus;
+	hw.map_irq        = mvebu_pcie_map_irq;
+	hw.ops            = &mvebu_pcie_ops;
+	hw.align_resource = mvebu_pcie_align_resource;
+	hw.add_bus        = mvebu_pcie_add_bus;
+
+	pci_common_init(&hw);
+}
+
+/*
+ * Looks up the list of register addresses encoded into the reg =
+ * <...> property for one that matches the given port/lane. Once
+ * found, maps it.
+ */
+static void __iomem * __init
+mvebu_pcie_map_registers(struct platform_device *pdev,
+			 struct device_node *np,
+			 struct mvebu_pcie_port *port)
+{
+	struct resource regs;
+	int ret = 0;
+
+	ret = of_address_to_resource(np, 0, &regs);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return devm_ioremap_resource(&pdev->dev, &regs);
+}
+
+static void __init mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
+{
+	struct device_node *msi_node;
+
+	msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
+				    "msi-parent", 0);
+	if (!msi_node)
+		return;
+
+	pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
+
+	if (pcie->msi)
+		pcie->msi->dev = &pcie->pdev->dev;
+}
+
+#define DT_FLAGS_TO_TYPE(flags)       (((flags) >> 24) & 0x03)
+#define    DT_TYPE_IO                 0x1
+#define    DT_TYPE_MEM32              0x2
+#define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
+#define DT_CPUADDR_TO_ATTR(cpuaddr)   (((cpuaddr) >> 48) & 0xFF)
+
+static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
+			      unsigned long type, int *tgt, int *attr)
+{
+	const int na = 3, ns = 2;
+	const __be32 *range;
+	int rlen, nranges, rangesz, pna, i;
+
+	range = of_get_property(np, "ranges", &rlen);
+	if (!range)
+		return -EINVAL;
+
+	pna = of_n_addr_cells(np);
+	rangesz = pna + na + ns;
+	nranges = rlen / sizeof(__be32) / rangesz;
+
+	for (i = 0; i < nranges; i++) {
+		u32 flags = of_read_number(range, 1);
+		u32 slot = of_read_number(range, 2);
+		u64 cpuaddr = of_read_number(range + na, pna);
+		unsigned long rtype;
+
+		if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
+			rtype = IORESOURCE_IO;
+		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
+			rtype = IORESOURCE_MEM;
+
+		if (slot == PCI_SLOT(devfn) && type == rtype) {
+			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
+			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
+			return 0;
+		}
+
+		range += rangesz;
+	}
+
+	return -ENOENT;
+}
+
+static int mvebu_pcie_suspend(struct platform_device *pdev, pm_message_t message)
+{
+	int i;
+
+	for (i = 0; i < nports; i++)
+		mbus_pcie_save[i] = (readl(port_bak[i]->base + PCIE_STAT_OFF));
+
+	return 0;
+}
+
+static int mvebu_pcie_resume(struct platform_device *pdev)
+{
+	int i;
+
+	for (i = 0; i < nports; i++) {
+		writel_relaxed(mbus_pcie_save[i], port_bak[i]->base + PCIE_STAT_OFF);
+		mvebu_pcie_setup_hw(port_bak[i]);
+	}
+
+	return 0;
+}
+
+static int __init mvebu_pcie_probe(struct platform_device *pdev)
+{
+	struct mvebu_pcie *pcie;
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *child;
+	int i, ret;
+
+	pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
+			    GFP_KERNEL);
+	if (!pcie)
+		return -ENOMEM;
+
+	pcie->pdev = pdev;
+
+	/* Get the PCIe memory and I/O aperture */
+	mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
+	if (resource_size(&pcie->mem) == 0) {
+		dev_err(&pdev->dev, "invalid memory aperture size\n");
+		return -EINVAL;
+	}
+
+	mvebu_mbus_get_pcie_io_aperture(&pcie->io);
+	if (resource_size(&pcie->io) == 0) {
+		dev_err(&pdev->dev, "invalid I/O aperture size\n");
+		return -EINVAL;
+	}
+
+	pcie->realio.flags = pcie->io.flags;
+	pcie->realio.start = PCIBIOS_MIN_IO;
+	pcie->realio.end = min_t(resource_size_t,
+				  IO_SPACE_LIMIT,
+				  resource_size(&pcie->io));
+
+	/* Get the bus range */
+	ret = of_pci_parse_bus_range(np, &pcie->busn);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
+			ret);
+		return ret;
+	}
+
+	i = 0;
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		if (!of_device_is_available(child))
+			continue;
+		i++;
+	}
+
+	pcie->ports = devm_kzalloc(&pdev->dev, i *
+				   sizeof(struct mvebu_pcie_port),
+				   GFP_KERNEL);
+	if (!pcie->ports)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_child_of_node(pdev->dev.of_node, child) {
+		struct mvebu_pcie_port *port = &pcie->ports[i];
+
+		if (!of_device_is_available(child))
+			continue;
+
+		port->pcie = pcie;
+
+		if (of_property_read_u32(child, "marvell,pcie-port",
+					 &port->port)) {
+			dev_warn(&pdev->dev,
+				 "ignoring PCIe DT node, missing pcie-port property\n");
+			continue;
+		}
+
+		if (of_property_read_u32(child, "marvell,pcie-lane",
+					 &port->lane))
+			port->lane = 0;
+
+		port->name = kasprintf(GFP_KERNEL, "pcie%d.%d",
+				       port->port, port->lane);
+
+		port->devfn = of_pci_get_devfn(child);
+		if (port->devfn < 0)
+			continue;
+
+		ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_MEM,
+					 &port->mem_target, &port->mem_attr);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for mem window\n",
+				port->port, port->lane);
+			continue;
+		}
+
+		ret = mvebu_get_tgt_attr(np, port->devfn, IORESOURCE_IO,
+					 &port->io_target, &port->io_attr);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "PCIe%d.%d: cannot get tgt/attr for io window\n",
+				port->port, port->lane);
+			continue;
+		}
+
+		port->clk = of_clk_get_by_name(child, NULL);
+		if (IS_ERR(port->clk)) {
+			dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n",
+			       port->port, port->lane);
+			continue;
+		}
+
+		ret = clk_prepare_enable(port->clk);
+		if (ret)
+			continue;
+
+		port->base = mvebu_pcie_map_registers(pdev, child, port);
+		if (IS_ERR(port->base)) {
+			dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
+				port->port, port->lane);
+			port->base = NULL;
+			clk_disable_unprepare(port->clk);
+			continue;
+		}
+
+		mvebu_pcie_set_local_dev_nr(port, 1);
+
+		if (mvebu_pcie_link_up(port)) {
+			port->haslink = 1;
+			dev_info(&pdev->dev, "PCIe%d.%d: link up\n",
+				 port->port, port->lane);
+		} else {
+			port->haslink = 0;
+			dev_info(&pdev->dev, "PCIe%d.%d: link down\n",
+				 port->port, port->lane);
+		}
+
+		port->dn = child;
+		spin_lock_init(&port->conf_lock);
+		mvebu_sw_pci_bridge_init(port);
+		port_bak[i] = port;
+		i++;
+	}
+
+	nports = i;
+	pcie->nports = i;
+	mvebu_pcie_msi_enable(pcie);
+	mvebu_pcie_enable(pcie);
+
+	return 0;
+}
+
+static const struct of_device_id mvebu_pcie_of_match_table[] = {
+	{ .compatible = "marvell,armada-xp-pcie", },
+	{ .compatible = "marvell,armada-370-pcie", },
+	{ .compatible = "marvell,armada-375-pcie", },
+	{ .compatible = "marvell,armada-38x-pcie", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mvebu_pcie_of_match_table);
+
+static struct platform_driver mvebu_pcie_driver = {
+#ifdef CONFIG_PM
+	.suspend        = mvebu_pcie_suspend,
+	.resume         = mvebu_pcie_resume,
+#endif
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = "mvebu-pcie",
+		.of_match_table =
+		   of_match_ptr(mvebu_pcie_of_match_table),
+	},
+};
+
+static int __init mvebu_pcie_init(void)
+{
+	return platform_driver_probe(&mvebu_pcie_driver,
+				     mvebu_pcie_probe);
+}
+
+subsys_initcall(mvebu_pcie_init);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell EBU PCIe driver");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 2c1075213bec..78aa3146491a 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -30,20 +30,44 @@ static int pci_msi_enable = 1;
 
 /* Arch hooks */
 
-#ifndef arch_msi_check_device
-int arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
+int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
 {
+	struct msi_chip *chip = dev->bus->msi;
+	int err;
+
+	if (!chip || !chip->setup_irq)
+		return -EINVAL;
+
+	err = chip->setup_irq(chip, dev, desc);
+	if (err < 0)
+		return err;
+
+	irq_set_chip_data(desc->irq, chip);
+
 	return 0;
 }
-#endif
 
-#ifndef arch_setup_msi_irqs
-# define arch_setup_msi_irqs default_setup_msi_irqs
-# define HAVE_DEFAULT_MSI_SETUP_IRQS
-#endif
+void __weak arch_teardown_msi_irq(unsigned int irq)
+{
+	struct msi_chip *chip = irq_get_chip_data(irq);
 
-#ifdef HAVE_DEFAULT_MSI_SETUP_IRQS
-int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+	if (!chip || !chip->teardown_irq)
+		return;
+
+	chip->teardown_irq(chip, irq);
+}
+
+int __weak arch_msi_check_device(struct pci_dev *dev, int nvec, int type)
+{
+	struct msi_chip *chip = dev->bus->msi;
+
+	if (!chip || !chip->check_device)
+		return 0;
+
+	return chip->check_device(chip, dev, nvec, type);
+}
+
+int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
 	struct msi_desc *entry;
 	int ret;
@@ -65,14 +89,11 @@ int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 
 	return 0;
 }
-#endif
-
-#ifndef arch_teardown_msi_irqs
-# define arch_teardown_msi_irqs default_teardown_msi_irqs
-# define HAVE_DEFAULT_MSI_TEARDOWN_IRQS
-#endif
 
-#ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS
+/*
+ * We have a default implementation available as a separate non-weak
+ * function, as it is used by the Xen x86 PCI code
+ */
 void default_teardown_msi_irqs(struct pci_dev *dev)
 {
 	struct msi_desc *entry;
@@ -86,14 +107,12 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
 			arch_teardown_msi_irq(entry->irq + i);
 	}
 }
-#endif
 
-#ifndef arch_restore_msi_irqs
-# define arch_restore_msi_irqs default_restore_msi_irqs
-# define HAVE_DEFAULT_MSI_RESTORE_IRQS
-#endif
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+	return default_teardown_msi_irqs(dev);
+}
 
-#ifdef HAVE_DEFAULT_MSI_RESTORE_IRQS
 void default_restore_msi_irqs(struct pci_dev *dev, int irq)
 {
 	struct msi_desc *entry;
@@ -111,7 +130,11 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)
 	if (entry)
 		write_msi_msg(irq, &entry->msg);
 }
-#endif
+
+void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq)
+{
+	return default_restore_msi_irqs(dev, irq);
+}
 
 static void msi_set_enable(struct pci_dev *dev, int enable)
 {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ea37072e8bf2..6ebe6abb6790 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -634,6 +634,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
 
 	child->parent = parent;
 	child->ops = parent->ops;
+	child->msi = parent->msi;
 	child->sysdata = parent->sysdata;
 	child->bus_flags = parent->bus_flags;
 
diff --git a/drivers/pinctrl/mvebu/Kconfig b/drivers/pinctrl/mvebu/Kconfig
index 366fa541ee91..a22b0302922d 100644
--- a/drivers/pinctrl/mvebu/Kconfig
+++ b/drivers/pinctrl/mvebu/Kconfig
@@ -17,6 +17,14 @@ config PINCTRL_ARMADA_370
 	bool
 	select PINCTRL_MVEBU
 
+config PINCTRL_ARMADA_375
+	bool
+	select PINCTRL_MVEBU
+
+config PINCTRL_ARMADA_38X
+	bool
+	select PINCTRL_MVEBU
+
 config PINCTRL_ARMADA_XP
 	bool
 	select PINCTRL_MVEBU
diff --git a/drivers/pinctrl/mvebu/Makefile b/drivers/pinctrl/mvebu/Makefile
index 37c253297af0..bc1b9f14f539 100644
--- a/drivers/pinctrl/mvebu/Makefile
+++ b/drivers/pinctrl/mvebu/Makefile
@@ -2,4 +2,6 @@ obj-$(CONFIG_PINCTRL_MVEBU)	+= pinctrl-mvebu.o
 obj-$(CONFIG_PINCTRL_DOVE)	+= pinctrl-dove.o
 obj-$(CONFIG_PINCTRL_KIRKWOOD)	+= pinctrl-kirkwood.o
 obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o
+obj-$(CONFIG_PINCTRL_ARMADA_375) += pinctrl-armada-375.o
+obj-$(CONFIG_PINCTRL_ARMADA_38X) += pinctrl-armada-38x.o
 obj-$(CONFIG_PINCTRL_ARMADA_XP)  += pinctrl-armada-xp.o
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-375.c b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
new file mode 100644
index 000000000000..001224642295
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-375.c
@@ -0,0 +1,441 @@
+/*
+ * Marvell Armada 375 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+static struct mvebu_mpp_mode mv88f6720_mpp_modes[] = {
+	MPP_MODE(0,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad2"),
+		 MPP_FUNCTION(0x2, "spi0", "cs1"),
+		 MPP_FUNCTION(0x3, "spi1", "cs1"),
+		 MPP_FUNCTION(0x5, "nand", "io2")),
+	MPP_MODE(1,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad3"),
+		 MPP_FUNCTION(0x2, "spi0", "mosi"),
+		 MPP_FUNCTION(0x3, "spi1", "mosi"),
+		 MPP_FUNCTION(0x5, "nand", "io3")),
+	MPP_MODE(2,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad4"),
+		 MPP_FUNCTION(0x2, "ptp", "eventreq"),
+		 MPP_FUNCTION(0x3, "led", "c0"),
+		 MPP_FUNCTION(0x4, "audio", "sdi"),
+		 MPP_FUNCTION(0x5, "nand", "io4"),
+		 MPP_FUNCTION(0x6, "spi1", "mosi")),
+	MPP_MODE(3,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad5"),
+		 MPP_FUNCTION(0x2, "ptp", "triggen"),
+		 MPP_FUNCTION(0x3, "led", "p3"),
+		 MPP_FUNCTION(0x4, "audio", "mclk"),
+		 MPP_FUNCTION(0x5, "nand", "io5"),
+		 MPP_FUNCTION(0x6, "spi1", "miso")),
+	MPP_MODE(4,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad6"),
+		 MPP_FUNCTION(0x2, "spi0", "miso"),
+		 MPP_FUNCTION(0x3, "spi1", "miso"),
+		 MPP_FUNCTION(0x5, "nand", "io6")),
+	MPP_MODE(5,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad7"),
+		 MPP_FUNCTION(0x2, "spi0", "cs2"),
+		 MPP_FUNCTION(0x3, "spi1", "cs2"),
+		 MPP_FUNCTION(0x5, "nand", "io7"),
+		 MPP_FUNCTION(0x6, "spi1", "miso")),
+	MPP_MODE(6,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad0"),
+		 MPP_FUNCTION(0x3, "led", "p1"),
+		 MPP_FUNCTION(0x4, "audio", "rclk"),
+		 MPP_FUNCTION(0x5, "nand", "io0")),
+	MPP_MODE(7,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "ad1"),
+		 MPP_FUNCTION(0x2, "ptp", "clk"),
+		 MPP_FUNCTION(0x3, "led", "p2"),
+		 MPP_FUNCTION(0x4, "audio", "extclk"),
+		 MPP_FUNCTION(0x5, "nand", "io1")),
+	MPP_MODE(8,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev ", "bootcs"),
+		 MPP_FUNCTION(0x2, "spi0", "cs0"),
+		 MPP_FUNCTION(0x3, "spi1", "cs0"),
+		 MPP_FUNCTION(0x5, "nand", "ce")),
+	MPP_MODE(9,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "nf", "wen"),
+		 MPP_FUNCTION(0x2, "spi0", "sck"),
+		 MPP_FUNCTION(0x3, "spi1", "sck"),
+		 MPP_FUNCTION(0x5, "nand", "we")),
+	MPP_MODE(10,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "nf", "ren"),
+		 MPP_FUNCTION(0x2, "dram", "vttctrl"),
+		 MPP_FUNCTION(0x3, "led", "c1"),
+		 MPP_FUNCTION(0x5, "nand", "re"),
+		 MPP_FUNCTION(0x6, "spi1", "sck")),
+	MPP_MODE(11,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "a0"),
+		 MPP_FUNCTION(0x3, "led", "c2"),
+		 MPP_FUNCTION(0x4, "audio", "sdo"),
+		 MPP_FUNCTION(0x5, "nand", "cle")),
+	MPP_MODE(12,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "a1"),
+		 MPP_FUNCTION(0x4, "audio", "bclk"),
+		 MPP_FUNCTION(0x5, "nand", "ale")),
+	MPP_MODE(13,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "dev", "readyn"),
+		 MPP_FUNCTION(0x2, "pcie0", "rstoutn"),
+		 MPP_FUNCTION(0x3, "pcie1", "rstoutn"),
+		 MPP_FUNCTION(0x5, "nand", "rb"),
+		 MPP_FUNCTION(0x6, "spi1", "mosi")),
+	MPP_MODE(14,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "i2c0", "sda"),
+		 MPP_FUNCTION(0x3, "uart1", "txd")),
+	MPP_MODE(15,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "i2c0", "sck"),
+		 MPP_FUNCTION(0x3, "uart1", "rxd")),
+	MPP_MODE(16,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "uart0", "txd")),
+	MPP_MODE(17,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "uart0", "rxd")),
+	MPP_MODE(18,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "intn")),
+	MPP_MODE(19,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "rstn")),
+	MPP_MODE(20,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "pclk")),
+	MPP_MODE(21,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "fsync")),
+	MPP_MODE(22,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "drx")),
+	MPP_MODE(23,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "tdm", "dtx")),
+	MPP_MODE(24,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p0"),
+		 MPP_FUNCTION(0x2, "ge1", "rxd0"),
+		 MPP_FUNCTION(0x3, "sd", "cmd"),
+		 MPP_FUNCTION(0x4, "uart0", "rts"),
+		 MPP_FUNCTION(0x5, "spi0", "cs0"),
+		 MPP_FUNCTION(0x6, "dev", "cs1")),
+	MPP_MODE(25,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p2"),
+		 MPP_FUNCTION(0x2, "ge1", "rxd1"),
+		 MPP_FUNCTION(0x3, "sd", "d0"),
+		 MPP_FUNCTION(0x4, "uart0", "cts"),
+		 MPP_FUNCTION(0x5, "spi0", "mosi"),
+		 MPP_FUNCTION(0x6, "dev", "cs2")),
+	MPP_MODE(26,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+		 MPP_FUNCTION(0x2, "ge1", "rxd2"),
+		 MPP_FUNCTION(0x3, "sd", "d2"),
+		 MPP_FUNCTION(0x4, "uart1", "rts"),
+		 MPP_FUNCTION(0x5, "spi0", "cs1"),
+		 MPP_FUNCTION(0x6, "led", "c1")),
+	MPP_MODE(27,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+		 MPP_FUNCTION(0x2, "ge1", "rxd3"),
+		 MPP_FUNCTION(0x3, "sd", "d1"),
+		 MPP_FUNCTION(0x4, "uart1", "cts"),
+		 MPP_FUNCTION(0x5, "spi0", "miso"),
+		 MPP_FUNCTION(0x6, "led", "c2")),
+	MPP_MODE(28,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p3"),
+		 MPP_FUNCTION(0x2, "ge1", "txctl"),
+		 MPP_FUNCTION(0x3, "sd", "clk"),
+		 MPP_FUNCTION(0x5, "dram", "vttctrl")),
+	MPP_MODE(29,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+		 MPP_FUNCTION(0x2, "ge1", "rxclk"),
+		 MPP_FUNCTION(0x3, "sd", "d3"),
+		 MPP_FUNCTION(0x5, "spi0", "sck"),
+		 MPP_FUNCTION(0x6, "pcie0", "rstoutn")),
+	MPP_MODE(30,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "txd0"),
+		 MPP_FUNCTION(0x3, "spi1", "cs0"),
+		 MPP_FUNCTION(0x5, "led", "p3"),
+		 MPP_FUNCTION(0x6, "ptp", "eventreq")),
+	MPP_MODE(31,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "txd1"),
+		 MPP_FUNCTION(0x3, "spi1", "mosi"),
+		 MPP_FUNCTION(0x5, "led", "p0")),
+	MPP_MODE(32,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "txd2"),
+		 MPP_FUNCTION(0x3, "spi1", "sck"),
+		 MPP_FUNCTION(0x4, "ptp", "triggen"),
+		 MPP_FUNCTION(0x5, "led", "c0")),
+	MPP_MODE(33,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "txd3"),
+		 MPP_FUNCTION(0x3, "spi1", "miso"),
+		 MPP_FUNCTION(0x5, "led", "p2")),
+	MPP_MODE(34,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "txclkout"),
+		 MPP_FUNCTION(0x3, "spi1", "sck"),
+		 MPP_FUNCTION(0x5, "led", "c1")),
+	MPP_MODE(35,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge1", "rxctl"),
+		 MPP_FUNCTION(0x3, "spi1", "cs1"),
+		 MPP_FUNCTION(0x4, "spi0", "cs2"),
+		 MPP_FUNCTION(0x5, "led", "p1")),
+	MPP_MODE(36,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+		 MPP_FUNCTION(0x5, "led", "c2")),
+	MPP_MODE(37,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie0", "clkreq"),
+		 MPP_FUNCTION(0x2, "tdm", "intn"),
+		 MPP_FUNCTION(0x4, "ge", "mdc")),
+	MPP_MODE(38,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie1", "clkreq"),
+		 MPP_FUNCTION(0x4, "ge", "mdio")),
+	MPP_MODE(39,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "ref", "clkout"),
+		 MPP_FUNCTION(0x5, "led", "p3")),
+	MPP_MODE(40,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "uart1", "txd"),
+		 MPP_FUNCTION(0x5, "led", "p0")),
+	MPP_MODE(41,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "uart1", "rxd"),
+		 MPP_FUNCTION(0x5, "led", "p1")),
+	MPP_MODE(42,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x3, "spi1", "cs2"),
+		 MPP_FUNCTION(0x4, "led", "c0"),
+		 MPP_FUNCTION(0x6, "ptp", "clk")),
+	MPP_MODE(43,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "sata0", "prsnt"),
+		 MPP_FUNCTION(0x4, "dram", "vttctrl"),
+		 MPP_FUNCTION(0x5, "led", "c1")),
+	MPP_MODE(44,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "sata0", "prsnt")),
+	MPP_MODE(45,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "spi0", "cs2"),
+		 MPP_FUNCTION(0x4, "pcie0", "rstoutn"),
+		 MPP_FUNCTION(0x5, "led", "c2"),
+		 MPP_FUNCTION(0x6, "spi1", "cs2")),
+	MPP_MODE(46,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p0"),
+		 MPP_FUNCTION(0x2, "ge0", "txd0"),
+		 MPP_FUNCTION(0x3, "ge1", "txd0"),
+		 MPP_FUNCTION(0x6, "dev", "wen1")),
+	MPP_MODE(47,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p1"),
+		 MPP_FUNCTION(0x2, "ge0", "txd1"),
+		 MPP_FUNCTION(0x3, "ge1", "txd1"),
+		 MPP_FUNCTION(0x5, "ptp", "triggen"),
+		 MPP_FUNCTION(0x6, "dev", "ale0")),
+	MPP_MODE(48,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p2"),
+		 MPP_FUNCTION(0x2, "ge0", "txd2"),
+		 MPP_FUNCTION(0x3, "ge1", "txd2"),
+		 MPP_FUNCTION(0x6, "dev", "ale1")),
+	MPP_MODE(49,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "p3"),
+		 MPP_FUNCTION(0x2, "ge0", "txd3"),
+		 MPP_FUNCTION(0x3, "ge1", "txd3"),
+		 MPP_FUNCTION(0x6, "dev", "a2")),
+	MPP_MODE(50,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "c0"),
+		 MPP_FUNCTION(0x2, "ge0", "rxd0"),
+		 MPP_FUNCTION(0x3, "ge1", "rxd0"),
+		 MPP_FUNCTION(0x5, "ptp", "eventreq"),
+		 MPP_FUNCTION(0x6, "dev", "ad12")),
+	MPP_MODE(51,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "c1"),
+		 MPP_FUNCTION(0x2, "ge0", "rxd1"),
+		 MPP_FUNCTION(0x3, "ge1", "rxd1"),
+		 MPP_FUNCTION(0x6, "dev", "ad8")),
+	MPP_MODE(52,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "led", "c2"),
+		 MPP_FUNCTION(0x2, "ge0", "rxd2"),
+		 MPP_FUNCTION(0x3, "ge1", "rxd2"),
+		 MPP_FUNCTION(0x5, "i2c0", "sda"),
+		 MPP_FUNCTION(0x6, "dev", "ad9")),
+	MPP_MODE(53,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie1", "rstoutn"),
+		 MPP_FUNCTION(0x2, "ge0", "rxd3"),
+		 MPP_FUNCTION(0x3, "ge1", "rxd3"),
+		 MPP_FUNCTION(0x5, "i2c0", "sck"),
+		 MPP_FUNCTION(0x6, "dev", "ad10")),
+	MPP_MODE(54,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "pcie0", "rstoutn"),
+		 MPP_FUNCTION(0x2, "ge0", "rxctl"),
+		 MPP_FUNCTION(0x3, "ge1", "rxctl"),
+		 MPP_FUNCTION(0x6, "dev", "ad11")),
+	MPP_MODE(55,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge0", "rxclk"),
+		 MPP_FUNCTION(0x3, "ge1", "rxclk"),
+		 MPP_FUNCTION(0x6, "dev", "cs0")),
+	MPP_MODE(56,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge0", "txclkout"),
+		 MPP_FUNCTION(0x3, "ge1", "txclkout"),
+		 MPP_FUNCTION(0x6, "dev", "oe")),
+	MPP_MODE(57,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ge0", "txctl"),
+		 MPP_FUNCTION(0x3, "ge1", "txctl"),
+		 MPP_FUNCTION(0x6, "dev", "wen0")),
+	MPP_MODE(58,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "led", "c0")),
+	MPP_MODE(59,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x4, "led", "c1")),
+	MPP_MODE(60,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "uart1", "txd"),
+		 MPP_FUNCTION(0x4, "led", "c2"),
+		 MPP_FUNCTION(0x6, "dev", "ad13")),
+	MPP_MODE(61,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "i2c1", "sda"),
+		 MPP_FUNCTION(0x2, "uart1", "rxd"),
+		 MPP_FUNCTION(0x3, "spi1", "cs2"),
+		 MPP_FUNCTION(0x4, "led", "p0"),
+		 MPP_FUNCTION(0x6, "dev", "ad14")),
+	MPP_MODE(62,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "i2c1", "sck"),
+		 MPP_FUNCTION(0x4, "led", "p1"),
+		 MPP_FUNCTION(0x6, "dev", "ad15")),
+	MPP_MODE(63,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ptp", "triggen"),
+		 MPP_FUNCTION(0x4, "led", "p2"),
+		 MPP_FUNCTION(0x6, "dev", "burst")),
+	MPP_MODE(64,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "dram", "vttctrl"),
+		 MPP_FUNCTION(0x4, "led", "p3")),
+	MPP_MODE(65,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x1, "sata1", "prsnt")),
+	MPP_MODE(66,
+		 MPP_FUNCTION(0x0, "gpio", NULL),
+		 MPP_FUNCTION(0x2, "ptp", "eventreq"),
+		 MPP_FUNCTION(0x4, "spi1", "cs3"),
+		 MPP_FUNCTION(0x5, "pcie0", "rstoutn"),
+		 MPP_FUNCTION(0x6, "dev", "cs3")),
+};
+
+static struct mvebu_pinctrl_soc_info armada_375_pinctrl_info;
+
+static struct of_device_id armada_375_pinctrl_of_match[] = {
+	{ .compatible = "marvell,mv88f6720-pinctrl" },
+	{ },
+};
+
+static struct mvebu_mpp_ctrl mv88f6720_mpp_controls[] = {
+	MPP_REG_CTRL(0, 69),
+};
+
+static struct pinctrl_gpio_range mv88f6720_mpp_gpio_ranges[] = {
+	MPP_GPIO_RANGE(0,   0,  0, 32),
+	MPP_GPIO_RANGE(1,  32, 32, 32),
+	MPP_GPIO_RANGE(2,  64, 64,  3),
+};
+
+static int armada_375_pinctrl_probe(struct platform_device *pdev)
+{
+	struct mvebu_pinctrl_soc_info *soc = &armada_375_pinctrl_info;
+
+	soc->variant = 0; /* no variants for Armada 370 */
+	soc->controls = mv88f6720_mpp_controls;
+	soc->ncontrols = ARRAY_SIZE(mv88f6720_mpp_controls);
+	soc->modes = mv88f6720_mpp_modes;
+	soc->nmodes = ARRAY_SIZE(mv88f6720_mpp_modes);
+	soc->gpioranges = mv88f6720_mpp_gpio_ranges;
+	soc->ngpioranges = ARRAY_SIZE(mv88f6720_mpp_gpio_ranges);
+
+	pdev->dev.platform_data = soc;
+
+	return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_375_pinctrl_remove(struct platform_device *pdev)
+{
+	return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_375_pinctrl_driver = {
+	.driver = {
+		.name = "armada-375-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(armada_375_pinctrl_of_match),
+	},
+	.probe = armada_375_pinctrl_probe,
+	.remove = armada_375_pinctrl_remove,
+};
+
+module_platform_driver(armada_375_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 375 pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-38x.c b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
new file mode 100644
index 000000000000..247ee2a93513
--- /dev/null
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-38x.c
@@ -0,0 +1,449 @@
+/*
+ * Marvell Armada 380/385 pinctrl driver based on mvebu pinctrl core
+ *
+ * Copyright (C) 2013 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-mvebu.h"
+
+enum {
+	V_88F6810 = BIT(0),
+	V_88F6820 = BIT(1),
+	V_88F6828 = BIT(2),
+	V_88F6810_PLUS = (V_88F6810 | V_88F6820 | V_88F6828),
+	V_88F6820_PLUS = (V_88F6820 | V_88F6828),
+};
+
+static struct mvebu_mpp_mode armada_38x_mpp_modes[] = {
+	MPP_MODE(0,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua0",   "rxd",        V_88F6810_PLUS)),
+	MPP_MODE(1,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua0",   "txd",        V_88F6810_PLUS)),
+	MPP_MODE(2,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "i2c0",  "sck",        V_88F6810_PLUS)),
+	MPP_MODE(3,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "i2c0",  "sda",        V_88F6810_PLUS)),
+	MPP_MODE(4,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge",    "mdc",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ua1",   "txd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS)),
+	MPP_MODE(5,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge",    "mdio",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ua1",   "rxd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS)),
+	MPP_MODE(6,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txclkout",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge0",   "crs",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "cs3",        V_88F6810_PLUS)),
+	MPP_MODE(7,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txd0",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad9",        V_88F6810_PLUS)),
+	MPP_MODE(8,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txd1",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad10",       V_88F6810_PLUS)),
+	MPP_MODE(9,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txd2",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad11",       V_88F6810_PLUS)),
+	MPP_MODE(10,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txd3",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad12",       V_88F6810_PLUS)),
+	MPP_MODE(11,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txctl",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad13",       V_88F6810_PLUS)),
+	MPP_MODE(12,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxd0",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "cs1",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad14",       V_88F6810_PLUS)),
+	MPP_MODE(13,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxd1",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie0", "clkreq",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "cs2",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad15",       V_88F6810_PLUS)),
+	MPP_MODE(14,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxd2",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "cs3",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "wen1",       V_88F6810_PLUS)),
+	MPP_MODE(15,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxd3",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge",    "mdc slave",  V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "mosi",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+	MPP_MODE(16,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxctl",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge",    "mdio slave", V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "miso",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "pcie0", "clkreq",     V_88F6810_PLUS)),
+	MPP_MODE(17,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxclk",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua1",   "rxd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sata1", "prsnt",      V_88F6810_PLUS)),
+	MPP_MODE(18,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "rxerr",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ptp",   "trig_gen",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi0",  "cs0",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "pcie1", "rstout",     V_88F6820_PLUS)),
+	MPP_MODE(19,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "col",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ptp",   "event_req ", V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie0", "clkreq",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sata1", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "ua0",   "cts",        V_88F6810_PLUS)),
+	MPP_MODE(20,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ge0",   "txclk ",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ptp",   "clk",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "ua0",   "rts",        V_88F6810_PLUS)),
+	MPP_MODE(21,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "cs1",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxd0",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "cmd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "bootcs",     V_88F6810_PLUS)),
+	MPP_MODE(22,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "mosi",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad0",        V_88F6810_PLUS)),
+	MPP_MODE(23,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad2",        V_88F6810_PLUS)),
+	MPP_MODE(24,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "miso",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ua0",   "cts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua1",   "rxd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d4",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ready",      V_88F6810_PLUS)),
+	MPP_MODE(25,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "cs0",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ua0",   "rts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua1",   "txd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d5",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "cs0",        V_88F6810_PLUS)),
+	MPP_MODE(26,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "cs2",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "i2c1",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d6",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "cs1",        V_88F6810_PLUS)),
+	MPP_MODE(27,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "spi0",  "cs3",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txclkout",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "i2c1",  "sda",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d7",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "cs2",        V_88F6810_PLUS)),
+	MPP_MODE(28,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txd0",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "clk",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad5",        V_88F6810_PLUS)),
+	MPP_MODE(29,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txd1",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ale0",       V_88F6810_PLUS)),
+	MPP_MODE(30,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txd2",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "oen",        V_88F6810_PLUS)),
+	MPP_MODE(31,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txd3",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ale1",       V_88F6810_PLUS)),
+	MPP_MODE(32,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "txctl",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "wen0",       V_88F6810_PLUS)),
+	MPP_MODE(33,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "m",     "decc_err",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad3",        V_88F6810_PLUS)),
+	MPP_MODE(34,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad1",        V_88F6810_PLUS)),
+	MPP_MODE(35,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "a1",         V_88F6810_PLUS)),
+	MPP_MODE(36,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ptp",   "trig_gen",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "a0",         V_88F6810_PLUS)),
+	MPP_MODE(37,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ptp",   "clk",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxclk",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d3",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad8",        V_88F6810_PLUS)),
+	MPP_MODE(38,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ptp",   "event_req",  V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxd1",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ref",   "clk_out0",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d0",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad4",        V_88F6810_PLUS)),
+	MPP_MODE(39,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "i2c1",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxd2",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d1",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "a2",         V_88F6810_PLUS)),
+	MPP_MODE(40,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "i2c1",  "sda",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxd3",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "sd0",   "d2",         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad6",        V_88F6810_PLUS)),
+	MPP_MODE(41,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua1",   "rxd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge1",   "rxctl",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "cts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "cs3",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "burst/last", V_88F6810_PLUS)),
+	MPP_MODE(42,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua1",   "txd",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "ua0",   "rts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "ad7",        V_88F6810_PLUS)),
+	MPP_MODE(43,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "pcie0", "clkreq",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "dev",   "clkout",     V_88F6810_PLUS)),
+	MPP_MODE(44,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+		 MPP_VAR_FUNCTION(4, "sata3", "prsnt",      V_88F6828),
+		 MPP_VAR_FUNCTION(5, "pcie0", "rstout",     V_88F6810_PLUS)),
+	MPP_MODE(45,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ref",   "clk_out0",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+	MPP_MODE(46,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ref",   "clk_out1",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "pcie2", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "pcie3", "rstout",     V_88F6810_PLUS)),
+	MPP_MODE(47,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "sata2", "prsnt",      V_88F6828),
+		 MPP_VAR_FUNCTION(4, "spi1",  "cs2",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sata3", "prsnt",      V_88F6828)),
+	MPP_MODE(48,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "m",     "vtt_ctrl",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "pclk",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "mclk",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d4",         V_88F6810_PLUS)),
+	MPP_MODE(49,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata2", "prsnt",      V_88F6828),
+		 MPP_VAR_FUNCTION(2, "sata3", "prsnt",      V_88F6828),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "fsync",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "lrclk",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d5",         V_88F6810_PLUS)),
+	MPP_MODE(50,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "drx",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "extclk",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "cmd",        V_88F6810_PLUS)),
+	MPP_MODE(51,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "dtx",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "sdo",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "m",     "decc_err",   V_88F6810_PLUS)),
+	MPP_MODE(52,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "intn",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "sdi",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d6",         V_88F6810_PLUS)),
+	MPP_MODE(53,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata1", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "tdm2c", "rstn",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "audio", "bclk",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d7",         V_88F6810_PLUS)),
+	MPP_MODE(54,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "sata0", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "sata1", "prsnt",      V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d3",         V_88F6810_PLUS)),
+	MPP_MODE(55,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua1",   "cts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge",    "mdio",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "clkreq",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "cs1",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d0",         V_88F6810_PLUS)),
+	MPP_MODE(56,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "ua1",   "rts",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "ge",    "mdc",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "m",     "decc_err",   V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "mosi",       V_88F6810_PLUS)),
+	MPP_MODE(57,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "clk",        V_88F6810_PLUS)),
+	MPP_MODE(58,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "pcie1", "clkreq",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(2, "i2c1",  "sck",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie2", "clkreq",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "miso",       V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d1",         V_88F6810_PLUS)),
+	MPP_MODE(59,
+		 MPP_VAR_FUNCTION(0, "gpio",  NULL,         V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(1, "pcie0", "rstout",     V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(2, "i2c1",  "sda",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(3, "pcie1", "rstout",     V_88F6820_PLUS),
+		 MPP_VAR_FUNCTION(4, "spi1",  "cs0",        V_88F6810_PLUS),
+		 MPP_VAR_FUNCTION(5, "sd0",   "d2",         V_88F6810_PLUS)),
+};
+
+static struct mvebu_pinctrl_soc_info armada_38x_pinctrl_info;
+
+static struct of_device_id armada_38x_pinctrl_of_match[] = {
+	{
+		.compatible = "marvell,mv88f6810-pinctrl",
+		.data       = (void *) V_88F6810,
+	},
+	{
+		.compatible = "marvell,mv88f6820-pinctrl",
+		.data       = (void *) V_88F6820,
+	},
+	{
+		.compatible = "marvell,mv88f6828-pinctrl",
+		.data       = (void *) V_88F6828,
+	},
+	{ },
+};
+
+static struct mvebu_mpp_ctrl armada_38x_mpp_controls[] = {
+	MPP_REG_CTRL(0, 59),
+};
+
+static struct pinctrl_gpio_range armada_38x_mpp_gpio_ranges[] = {
+	MPP_GPIO_RANGE(0,   0,  0, 32),
+	MPP_GPIO_RANGE(1,  32, 32, 27),
+};
+
+static int armada_38x_pinctrl_probe(struct platform_device *pdev)
+{
+	struct mvebu_pinctrl_soc_info *soc = &armada_38x_pinctrl_info;
+	const struct of_device_id *match =
+		of_match_device(armada_38x_pinctrl_of_match, &pdev->dev);
+
+	if (!match)
+		return -ENODEV;
+
+	soc->variant = (unsigned) match->data & 0xff;
+
+	soc->controls = armada_38x_mpp_controls;
+	soc->ncontrols = ARRAY_SIZE(armada_38x_mpp_controls);
+	soc->gpioranges = armada_38x_mpp_gpio_ranges;
+	soc->ngpioranges = ARRAY_SIZE(armada_38x_mpp_gpio_ranges);
+	soc->modes = armada_38x_mpp_modes;
+	soc->nmodes = armada_38x_mpp_controls[0].npins;
+
+	pdev->dev.platform_data = soc;
+
+	return mvebu_pinctrl_probe(pdev);
+}
+
+static int armada_38x_pinctrl_remove(struct platform_device *pdev)
+{
+	return mvebu_pinctrl_remove(pdev);
+}
+
+static struct platform_driver armada_38x_pinctrl_driver = {
+	.driver = {
+		.name = "armada-38x-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(armada_38x_pinctrl_of_match),
+	},
+	.probe = armada_38x_pinctrl_probe,
+	.remove = armada_38x_pinctrl_remove,
+#ifdef CONFIG_PM
+	.suspend = mvebu_pinctrl_suspend,
+	.resume = mvebu_pinctrl_resume,
+#endif
+};
+
+module_platform_driver(armada_38x_pinctrl_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
+MODULE_DESCRIPTION("Marvell Armada 38x pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index bb7ddb1bc89f..a338dccb5707 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -30,6 +30,10 @@
 #define MPP_BITS	4
 #define MPP_MASK	0xf
 
+#define PINCTRL_REGS_SAVE_NUM	10
+
+static u32 pinctrl_save[PINCTRL_REGS_SAVE_NUM];
+
 struct mvebu_pinctrl_function {
 	const char *name;
 	const char **groups;
@@ -759,3 +763,33 @@ int mvebu_pinctrl_remove(struct platform_device *pdev)
 	pinctrl_unregister(pctl->pctldev);
 	return 0;
 }
+
+#ifdef CONFIG_PM
+int mvebu_pinctrl_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
+	int reg;
+
+	if (!pctl)
+		return -EINVAL;
+
+	for (reg = 0; reg < PINCTRL_REGS_SAVE_NUM; reg++)
+		pinctrl_save[reg] = readl_relaxed(pctl->base + reg * 0x4);
+
+	return 0;
+}
+
+int mvebu_pinctrl_resume(struct platform_device *pdev)
+{
+	struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev);
+	int reg;
+
+	if (!pctl)
+		return -EINVAL;
+
+	for (reg = 0; reg < PINCTRL_REGS_SAVE_NUM; reg++)
+		writel_relaxed(pinctrl_save[reg], pctl->base + reg * 0x4);
+
+	return 0;
+}
+#endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.h b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
index 90bd3beee860..65a739b9860d 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.h
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.h
@@ -188,5 +188,7 @@ struct mvebu_pinctrl_soc_info {
 
 int mvebu_pinctrl_probe(struct platform_device *pdev);
 int mvebu_pinctrl_remove(struct platform_device *pdev);
+int mvebu_pinctrl_suspend(struct platform_device *pdev, pm_message_t state);
+int mvebu_pinctrl_resume(struct platform_device *pdev);
 
 #endif
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index d1b6089a0ef8..3cfb7fc5c334 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -127,15 +127,11 @@ int reset_control_deassert(struct reset_control *rstc)
 EXPORT_SYMBOL_GPL(reset_control_deassert);
 
 /**
- * reset_control_get - Lookup and obtain a reference to a reset controller.
- * @dev: device to be reset by the controller
- * @id: reset line name
- *
- * Returns a struct reset_control or IS_ERR() condition containing errno.
- *
- * Use of id names is optional.
+ * of_reset_control_get - Lookup and obtain a reference to a reset
+ * controller by OF node.
  */
-struct reset_control *reset_control_get(struct device *dev, const char *id)
+struct reset_control *of_reset_control_get(struct device_node *np,
+					   const char *id)
 {
 	struct reset_control *rstc = ERR_PTR(-EPROBE_DEFER);
 	struct reset_controller_dev *r, *rcdev;
@@ -144,13 +140,10 @@ struct reset_control *reset_control_get(struct device *dev, const char *id)
 	int rstc_id;
 	int ret;
 
-	if (!dev)
-		return ERR_PTR(-EINVAL);
-
 	if (id)
-		index = of_property_match_string(dev->of_node,
+		index = of_property_match_string(np,
 						 "reset-names", id);
-	ret = of_parse_phandle_with_args(dev->of_node, "resets", "#reset-cells",
+	ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
 					 index, &args);
 	if (ret)
 		return ERR_PTR(ret);
@@ -185,12 +178,38 @@ struct reset_control *reset_control_get(struct device *dev, const char *id)
 		return ERR_PTR(-ENOMEM);
 	}
 
-	rstc->dev = dev;
+	rstc->dev = NULL;
 	rstc->rcdev = rcdev;
 	rstc->id = rstc_id;
 
 	return rstc;
 }
+EXPORT_SYMBOL(of_reset_control_get);
+
+/**
+ * reset_control_get - Lookup and obtain a reference to a reset controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+struct reset_control *reset_control_get(struct device *dev, const char *id)
+{
+	struct reset_control *rstc;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	rstc = of_reset_control_get(dev->of_node, id);
+	if (IS_ERR(rstc))
+		return rstc;
+
+	rstc->dev = dev;
+
+	return rstc;
+}
 EXPORT_SYMBOL_GPL(reset_control_get);
 
 /**
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c33f86f1a69b..7c7c50350c8e 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_RTC_DRV_MC13XXX)	+= rtc-mc13xxx.o
 obj-$(CONFIG_RTC_DRV_MSM6242)	+= rtc-msm6242.o
 obj-$(CONFIG_RTC_DRV_MPC5121)	+= rtc-mpc5121.o
 obj-$(CONFIG_RTC_DRV_MV)	+= rtc-mv.o
+obj-$(CONFIG_RTC_DRV_MV)	+= rtc-mvebu.o
 obj-$(CONFIG_RTC_DRV_NUC900)	+= rtc-nuc900.o
 obj-$(CONFIG_RTC_DRV_OMAP)	+= rtc-omap.o
 obj-$(CONFIG_RTC_DRV_PALMAS)	+= rtc-palmas.o
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index baab802f2153..2b8ed561be66 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -223,6 +223,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
 	struct rtc_plat_data *pdata;
 	resource_size_t size;
 	u32 rtc_time;
+	u32 rtc_date;
 	int ret = 0;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -266,6 +267,17 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
 		}
 	}
 
+	/*
+	 * A date after January 19th, 2038 does not fit on 32 bits and
+	 * will confuse the kernel and userspace. Reset to a sane date
+	 * (January 1st, 2013) if we're after 2038.
+	 */
+	rtc_date = readl(pdata->ioaddr + RTC_DATE_REG_OFFS);
+	if (bcd2bin((rtc_date >> RTC_YEAR_OFFS) & 0xff) >= 38) {
+		dev_info(&pdev->dev, "invalid RTC date, resetting to January, 1st 2013\n");
+		writel(0x130101, pdata->ioaddr + RTC_DATE_REG_OFFS);
+	}
+
 	pdata->irq = platform_get_irq(pdev, 0);
 
 	platform_set_drvdata(pdev, pdata);
diff --git a/drivers/rtc/rtc-mvebu.c b/drivers/rtc/rtc-mvebu.c
new file mode 100755
index 000000000000..f8d1920f227b
--- /dev/null
+++ b/drivers/rtc/rtc-mvebu.c
@@ -0,0 +1,489 @@
+/*
+ * Marvell EBU SoC Realtime Clock Driver (RTC)
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Based on a driver made by Brian Mahaffy <bmahaffy@marvell.com>
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+/* The RTC DRS, revision 1.2, indicates that firmware should wait 5us after every register write */
+/* to the RTC hard macro so that the required update can occur without holding off the system bus */
+#define RTC_READ_REG(reg)		ioread32(rtc->regbase_rtc + reg)
+#define RTC_WRITE_REG(val, reg)		{ iowrite32(val, rtc->regbase_rtc + reg); udelay(5); }
+
+#define RTC_0HZ 0x00
+#define RTC_1HZ 0x04
+#define RTC_2HZ 0x08
+#define RTC_4HZ 0x10
+#define RTC_8HZ 0x20
+
+#define RTC_NOMINAL_TIMING 0x0
+
+#define RTC_STATUS_REG_OFFS		0x0
+#define RTC_IRQ_1_CONFIG_REG_OFFS	0x4
+#define RTC_IRQ_2_CONFIG_REG_OFFS	0x8
+#define RTC_TIME_REG_OFFS		0xC
+#define RTC_ALARM_1_REG_OFFS		0x10
+#define RTC_ALARM_2_REG_OFFS		0x14
+#define RTC_CLOCK_CORR_REG_OFFS		0x18
+#define RTC_TEST_CONFIG_REG_OFFS	0x1C
+
+#define RTC_SZ_STATUS_ALARM1_MASK	0x1
+#define RTC_SZ_STATUS_ALARM2_MASK	0x2
+#define RTC_SZ_TIMING_RESERVED1_MASK	0xFFFF0000
+#define RTC_SZ_INTERRUPT1_INT1AE_MASK	0x1
+#define RTC_SZ_INTERRUPT1_RESERVED1_MASK	0xFFFFFFC0
+#define RTC_SZ_INTERRUPT2_INT2FE_MASK	0x2
+#define RTC_SZ_INTERRUPT2_RESERVED1_MASK	0xFFFFFFC0
+
+typedef struct mvebu_rtc_s {
+	struct rtc_device *rtc_dev;
+	void __iomem      *regbase_rtc;
+	void __iomem      *regbase_soc;
+	spinlock_t         lock;
+	uint32_t           irq;
+	uint32_t           periodic_freq;
+} mvebu_rtc_t;
+
+static bool rtc_init_state(mvebu_rtc_t *rtc)
+{
+	/* Update RTC-MBUS bridge timing parameters */
+	writel(0xFD4D4CFA, rtc->regbase_soc);
+
+	/* Make sure we are not in any test mode */
+	RTC_WRITE_REG(0, RTC_TEST_CONFIG_REG_OFFS);
+	msleep_interruptible(500);
+
+	/* Setup nominal register access timing */
+	RTC_WRITE_REG(RTC_NOMINAL_TIMING, RTC_CLOCK_CORR_REG_OFFS);
+
+	/* Turn off Int1 sources & clear the Alarm count */
+	RTC_WRITE_REG(0, RTC_IRQ_1_CONFIG_REG_OFFS);
+	RTC_WRITE_REG(0, RTC_ALARM_1_REG_OFFS);
+
+	/* Turn off Int2 sources & clear the Periodic count */
+	RTC_WRITE_REG(0, RTC_IRQ_2_CONFIG_REG_OFFS);
+	RTC_WRITE_REG(0, RTC_ALARM_2_REG_OFFS);
+
+	/* Clear any pending Status bits */
+	RTC_WRITE_REG((RTC_SZ_STATUS_ALARM1_MASK | RTC_SZ_STATUS_ALARM2_MASK), RTC_STATUS_REG_OFFS);
+	{
+		uint32_t stat, alrm1, alrm2, int1, int2, tstcfg;
+
+		stat   = RTC_READ_REG(RTC_STATUS_REG_OFFS) & 0xFF;
+		alrm1  = RTC_READ_REG(RTC_ALARM_1_REG_OFFS);
+		int1   = RTC_READ_REG(RTC_IRQ_1_CONFIG_REG_OFFS) & 0xFF;
+		alrm2  = RTC_READ_REG(RTC_ALARM_2_REG_OFFS);
+		int2   = RTC_READ_REG(RTC_IRQ_2_CONFIG_REG_OFFS) & 0xFF;
+		tstcfg = RTC_READ_REG(RTC_TEST_CONFIG_REG_OFFS) & 0xFF;
+
+		if ((0xFC == stat)  &&
+			(0 == alrm1) && (0xC0 == int1) &&
+			(0 == alrm2) && (0xC0 == int2) &&
+			(0 == tstcfg)) {
+			return true;
+		} else {
+			return false;
+		}
+	}
+}
+
+/*
+ * Calculate the next alarm time given the
+ * requested alarm time and the current time...
+ */
+static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm)
+{
+	unsigned long now_time;
+	unsigned long next_time;
+
+/* ALPHA_CUSTOMIZE 20160428 modify
+  Because schedule power on incorrect */
+//	next->tm_year = now->tm_year;
+//	next->tm_mon  = now->tm_mon;
+//	next->tm_mday = now->tm_mday;
+  next->tm_year = alrm->tm_year;
+ 	next->tm_mon  = alrm->tm_mon;
+	next->tm_mday = alrm->tm_mday;
+	
+	next->tm_hour = alrm->tm_hour;
+	next->tm_min  = alrm->tm_min;
+	next->tm_sec  = alrm->tm_sec;
+
+	rtc_tm_to_time(now, &now_time);
+	rtc_tm_to_time(next, &next_time);
+
+/* ALPHA_CUSTOMIZE 20160428 modify
+  Because schedule power on incorrect */
+	/* Do we need to advance a day? */
+//	if (next_time < now_time) {
+//		next_time += 60 * 60 * 24;
+//		rtc_time_to_tm(next_time, next);
+//	}
+}
+
+static int rtc_setup_alarm(mvebu_rtc_t *rtc, struct rtc_time *alrm)
+{
+	struct rtc_time alarm_tm, now_tm;
+	unsigned long now, time;
+	int ret;
+
+	do {
+		now = RTC_READ_REG(RTC_TIME_REG_OFFS);
+		rtc_time_to_tm(now, &now_tm);
+		rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
+		ret = rtc_tm_to_time(&alarm_tm, &time);
+
+		if (ret != 0)
+			continue;
+		else
+			RTC_WRITE_REG(time, RTC_ALARM_1_REG_OFFS);
+
+	} while (now != RTC_READ_REG(RTC_TIME_REG_OFFS));
+
+	return ret;
+}
+
+static irqreturn_t mvebu_rtc_irq_handler(int irq, void *rtc_ptr)
+{
+	mvebu_rtc_t *rtc = (mvebu_rtc_t *)rtc_ptr;
+	irqreturn_t irq_status = IRQ_NONE;
+	uint32_t rtc_status_reg, val;
+
+	val = readl(rtc->regbase_soc + 0x8);
+	writel(0, rtc->regbase_soc + 0x8);
+
+	rtc_status_reg = RTC_READ_REG(RTC_STATUS_REG_OFFS);
+
+	/* Has Alarm 1 triggered? */
+	/* TBD - properly check IRQ status */
+	/* if (rtc_status_reg & RTC_SZ_STATUS_ALARM1_MASK) { */
+		/* Disable the interrupt.  This alarm is complete... */
+		RTC_WRITE_REG(RTC_SZ_INTERRUPT1_RESERVED1_MASK, RTC_IRQ_1_CONFIG_REG_OFFS);
+		RTC_WRITE_REG(RTC_SZ_STATUS_ALARM1_MASK, RTC_STATUS_REG_OFFS);
+
+		rtc_update_irq(rtc->rtc_dev, 1, (RTC_IRQF | RTC_AF));
+		irq_status = IRQ_HANDLED;
+	/* } */
+#if 0
+	/* Has either the 1Hz or user periodic frequency triggered?
+	 NOTE: This is a periodic interrupt.  It keeps on trucking... */
+	if (rtc_status_reg & RTC_SZ_STATUS_ALARM2_MASK) {
+		RTC_WRITE_REG(RTC_SZ_STATUS_ALARM2_MASK, RTC_STATUS_REG_OFFS);
+
+		if (rtc->periodic_freq == RTC_1HZ)
+			rtc_update_irq(rtc->rtc_dev, 1, (RTC_IRQF | RTC_UF));
+		else
+			rtc_update_irq(rtc->rtc_dev, 1, (RTC_IRQF | RTC_PF));
+
+		irq_status = IRQ_HANDLED;
+	}
+#endif
+
+	return irq_status;
+}
+
+static int mvebu_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+	mvebu_rtc_t *rtc = dev_get_drvdata(dev);
+	unsigned long time, time_check;
+
+	spin_lock_irq(&rtc->lock);
+
+	time = RTC_READ_REG(RTC_TIME_REG_OFFS);
+
+	/* WA for failing time read attempts. The HW ERRATA information should be added here */
+	/* if detected more than one second between two time reads, read once again */
+	time_check = RTC_READ_REG(RTC_TIME_REG_OFFS);
+	if ((time_check - time) > 1)
+		time_check = RTC_READ_REG(RTC_TIME_REG_OFFS);
+	/* End of WA */
+
+	rtc_time_to_tm(time_check, tm);
+	spin_unlock_irq(&rtc->lock);
+
+	return 0;
+}
+
+static int mvebu_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+	mvebu_rtc_t *rtc = dev_get_drvdata(dev);
+	unsigned long time;
+
+	if (rtc_tm_to_time(tm, &time) == 0) {
+		spin_lock_irq(&rtc->lock);
+		/* WA for failing time set attempts. The HW ERRATA information should be added here */
+		RTC_WRITE_REG(0, RTC_STATUS_REG_OFFS);
+		mdelay(100);
+		/* End of SW WA */
+		RTC_WRITE_REG(time, RTC_TIME_REG_OFFS);
+		spin_unlock_irq(&rtc->lock);
+	}
+
+	return 0;
+}
+
+static int mvebu_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	mvebu_rtc_t *rtc = dev_get_drvdata(dev);
+	unsigned long time;
+
+	spin_lock_irq(&rtc->lock);
+
+	rtc_time_to_tm((time = RTC_READ_REG(RTC_ALARM_1_REG_OFFS)), &alrm->time);
+	alrm->enabled = (RTC_READ_REG(RTC_IRQ_1_CONFIG_REG_OFFS) & RTC_SZ_INTERRUPT1_INT1AE_MASK) ? 1 : 0;
+
+	spin_unlock_irq(&rtc->lock);
+
+	return 0;
+}
+
+static int mvebu_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+	mvebu_rtc_t *rtc = dev_get_drvdata(dev);
+	unsigned long time;
+	uint32_t val;
+
+	if (rtc_tm_to_time(&alrm->time, &time) == 0) {
+		spin_lock_irq(&rtc->lock);
+
+		if (rtc_setup_alarm(rtc, &alrm->time) == 0) {
+			if (alrm->enabled) {
+				RTC_WRITE_REG(RTC_SZ_INTERRUPT1_INT1AE_MASK, RTC_IRQ_1_CONFIG_REG_OFFS);
+				val = readl(rtc->regbase_soc + 0x8);
+				writel(val | (0x1 << 2), rtc->regbase_soc + 0x8);
+			}
+		}
+		spin_unlock_irq(&rtc->lock);
+	}
+
+	return 0;
+}
+
+static int mvebu_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
+{
+	mvebu_rtc_t *rtc = dev_get_drvdata(dev);
+	spin_lock_irq(&rtc->lock);
+
+	if (enable) {
+		RTC_WRITE_REG(RTC_SZ_INTERRUPT1_INT1AE_MASK, RTC_IRQ_1_CONFIG_REG_OFFS);
+	} else {
+		RTC_WRITE_REG(RTC_SZ_INTERRUPT1_RESERVED1_MASK, RTC_IRQ_1_CONFIG_REG_OFFS);
+	}
+
+	spin_unlock_irq(&rtc->lock);
+
+	return 0;
+}
+
+
+static const struct rtc_class_ops mvebu_rtc_ops = {
+	.read_time         = mvebu_rtc_read_time,
+	.set_time          = mvebu_rtc_set_time,
+	.read_alarm        = mvebu_rtc_read_alarm,
+	.set_alarm         = mvebu_rtc_set_alarm,
+	.alarm_irq_enable  = mvebu_rtc_alarm_irq_enable,
+};
+
+static int mvebu_rtc_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	mvebu_rtc_t *rtc = kzalloc(sizeof(struct mvebu_rtc_s), GFP_KERNEL);
+
+	if (unlikely(!rtc)) {
+		ret = -ENOMEM;
+		goto exit;
+	} else
+		platform_set_drvdata(pdev, rtc);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (unlikely(res == NULL)) {
+		dev_err(&pdev->dev, "No IO resource\n");
+		ret = -ENOENT;
+		goto errExit1;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "No MEM resource\n");
+		ret = -EBUSY;
+		goto errExit1;
+	}
+
+	rtc->regbase_rtc = ioremap(res->start, resource_size(res));
+	if (unlikely(!rtc->regbase_rtc)) {
+		dev_err(&pdev->dev, "No REMAP resource\n");
+		ret = -EINVAL;
+		goto errExit2;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (unlikely(res == NULL)) {
+		dev_err(&pdev->dev, "No IO resource\n");
+		ret = -ENOENT;
+		goto errExit1;
+	}
+
+	if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+		dev_err(&pdev->dev, "No MEM resource\n");
+		ret = -EBUSY;
+		goto errExit1;
+	}
+
+	rtc->regbase_soc = ioremap(res->start, resource_size(res));
+	if (unlikely(!rtc->regbase_soc)) {
+		dev_err(&pdev->dev, "No REMAP resource\n");
+		ret = -EINVAL;
+		goto errExit2;
+	}
+
+	rtc->irq = platform_get_irq(pdev, 0);
+	if (unlikely(rtc->irq <= 0)) {
+		dev_err(&pdev->dev, "No IRQ resource\n");
+		ret = -ENOENT;
+		goto errExit1;
+	}
+
+	/* No need to re-init the RTC as it was already initialized by the boot loader at start of battery life */
+#if 1
+	/* Init the state of the RTC, failure indicates there is probably no battery */
+	{
+		uint32_t count = 3;
+
+		while (!rtc_init_state(rtc) && --count)
+			;
+
+		if (count == 0) {
+				dev_err(&pdev->dev, "%probe: Error in rtc_init_state\n", __func__);
+			ret = -ENODEV;
+			goto errExit3;
+		}
+	}
+#endif
+	spin_lock_init(&rtc->lock);
+
+	/* register shared periodic/carry/alarm irq */
+	ret = request_irq(rtc->irq, mvebu_rtc_irq_handler, 0, "mvebu-rtc", rtc);
+	if (unlikely(ret)) {
+		dev_err(&pdev->dev, "Request IRQ failed with %d, IRQ %d\n", ret, rtc->irq);
+		ret = ret;
+		goto errExit3;
+	}
+
+	device_init_wakeup(&pdev->dev, 1);
+	rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &mvebu_rtc_ops, THIS_MODULE);
+	if (IS_ERR(rtc->rtc_dev)) {
+		dev_err(&pdev->dev, "%s: error in rtc_device_register\n", __func__);
+		ret = PTR_ERR(rtc);
+		goto errExit4;
+	}
+
+	rtc->periodic_freq = RTC_0HZ;
+	device_init_wakeup(&pdev->dev, 1);
+	goto exit;
+
+errExit4:
+	free_irq(rtc->irq, rtc);
+
+errExit3:
+	iounmap(rtc->regbase_rtc);
+
+errExit2:
+	release_mem_region(res->start, resource_size(res));
+
+errExit1:
+	kfree(rtc);
+
+exit:
+	return ret;
+}
+
+static int __exit mvebu_rtc_remove(struct platform_device *pdev)
+{
+	struct resource *res;
+	mvebu_rtc_t *rtc = platform_get_drvdata(pdev);
+
+	if (rtc) {
+		rtc_device_unregister(rtc->rtc_dev);
+		free_irq(rtc->irq, rtc);
+		iounmap(rtc->regbase_rtc);
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		release_mem_region(res->start, resource_size(res));
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+		release_mem_region(res->start, resource_size(res));
+		platform_set_drvdata(pdev, NULL);
+		kfree(rtc);
+	}
+
+	return 0;
+}
+
+static int mvebu_rtc_resume(struct platform_device *pdev)
+{
+	mvebu_rtc_t *rtc = platform_get_drvdata(pdev);
+
+	/* Update RTC-MBUS bridge timing parameters */
+	writel(0xFD4D4CFA, rtc->regbase_soc);
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id rtc_mvebu_of_match_table[] = {
+	{ .compatible = "marvell,mvebu-rtc", },
+	{}
+};
+#endif
+
+static struct platform_driver mvebu_rtc_driver = {
+	.probe      = mvebu_rtc_probe,
+	.remove     = __exit_p(mvebu_rtc_remove),
+#ifdef CONFIG_PM
+	.resume = mvebu_rtc_resume,
+#endif
+	.driver     = {
+		.name   = "mvebu-rtc",
+		.owner  = THIS_MODULE,
+		.of_match_table = of_match_ptr(rtc_mvebu_of_match_table),
+	},
+};
+
+static int __init mvebu_rtc_init(void)
+{
+	return platform_driver_register(&mvebu_rtc_driver);
+}
+
+static void __exit mvebu_rtc_exit(void)
+{
+	platform_driver_unregister(&mvebu_rtc_driver);
+}
+
+module_init(mvebu_rtc_init);
+module_exit(mvebu_rtc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Nadav Haklai <nadavh@marvell.com>");
+MODULE_DESCRIPTION("Marvell EBU SoC Realtime Clock Driver (RTC)");
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
old mode 100644
new mode 100755
index f43de1e56420..4bb93c8bad9d
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -360,7 +360,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
 	case VOLUME_OVERFLOW:
 	case MISCOMPARE:
 	case BLANK_CHECK:
-	case DATA_PROTECT:
 		return TARGET_ERROR;
 
 	case MEDIUM_ERROR:
@@ -385,7 +384,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
 			return TARGET_ERROR;
 		}
 		return SUCCESS;
-
+  
+  case DATA_PROTECT:
 	default:
 		return SUCCESS;
 	}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
old mode 100644
new mode 100755
index 26b543bc4f53..db6663295b87
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -64,6 +64,11 @@
 #include <scsi/scsi_ioctl.h>
 #include <scsi/scsicam.h>
 
+#ifdef ALPHA_CUSTOMIZE// 2012-01-12 - Tim Tsay - iscsi dev node index start from sdka
+#include <linux/string.h>
+#define ISCSI_DEV_START_INDEX	(26*11)
+#endif
+
 #include "sd.h"
 #include "scsi_priv.h"
 #include "scsi_logging.h"
@@ -2891,6 +2896,11 @@ static int sd_probe(struct device *dev)
 	int index;
 	int error;
 
+#ifdef ALPHA_CUSTOMIZE //2011-01-12 - Tim Tsay - let iscsi device node start from sdaa
+	int start_index = 0;
+	struct scsi_host_template *iscsi_hostt = sdp->host->hostt;
+#endif
+
 	error = -ENODEV;
 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
 		goto out;
@@ -2907,6 +2917,21 @@ static int sd_probe(struct device *dev)
 	if (!gd)
 		goto out_free;
 
+#ifdef ALPHA_CUSTOMIZE //2011-01-12 - Tim Tsay - let iscsi device node start from sdaa
+	/* printk("\nCheck proc_name = %s \n", iscsi_hostt->proc_name); */
+	/* some scsi_host_template do not set proc_name, e.g. uas */
+	if (iscsi_hostt->proc_name && !strcmp(iscsi_hostt->proc_name, "iscsi_tcp")) {
+		start_index = ISCSI_DEV_START_INDEX;
+		do {
+			if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
+				goto out_put;
+			
+			spin_lock(&sd_index_lock);
+			error = ida_get_new_above(&sd_index_ida,start_index , &index);
+			spin_unlock(&sd_index_lock);
+		} while (error == -EAGAIN);
+	}else
+#endif
 	do {
 		if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
 			goto out_put;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 66a5f82cf138..0119a9e01245 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -8,7 +8,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
@@ -23,7 +22,9 @@
 
 #define DRIVER_NAME			"orion_spi"
 
-#define ORION_NUM_CHIPSELECTS		1 /* only one slave is supported*/
+#define ORION_NUM_CHIPSELECTS		4
+#define ORION_CHIPSELECTS_OFFS		2
+#define ORION_CHIPSELECTS_MASK		(0x3 << ORION_CHIPSELECTS_OFFS)
 #define ORION_SPI_WAIT_RDY_MAX_LOOP	2000 /* in usec */
 
 #define ORION_SPI_IF_CTRL_REG		0x00
@@ -32,11 +33,11 @@
 #define ORION_SPI_DATA_IN_REG		0x0c
 #define ORION_SPI_INT_CAUSE_REG		0x10
 
-#define ORION_SPI_MODE_CPOL		(1 << 11)
-#define ORION_SPI_MODE_CPHA		(1 << 12)
+#define ORION_SPI_MODE_CPOL			(1 << 11)
+#define ORION_SPI_MODE_CPHA			(1 << 12)
 #define ORION_SPI_IF_8_16_BIT_MODE	(1 << 5)
 #define ORION_SPI_CLK_PRESCALE_MASK	0x1F
-#define ORION_SPI_MODE_MASK		(ORION_SPI_MODE_CPOL | \
+#define ORION_SPI_MODE_MASK			(ORION_SPI_MODE_CPOL | \
 					 ORION_SPI_MODE_CPHA)
 
 struct orion_spi {
@@ -44,7 +45,8 @@ struct orion_spi {
 	void __iomem		*base;
 	unsigned int		max_speed;
 	unsigned int		min_speed;
-	struct clk              *clk;
+	struct clk			*clk;
+	struct spi_device	*cur_spi;
 };
 
 static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
@@ -174,10 +176,11 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
 
 static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
 {
+	orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+					  0x1 | ORION_CHIPSELECTS_MASK);
 	if (enable)
-		orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
-	else
-		orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+		orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+			0x1 | (orion_spi->cur_spi->chip_select << ORION_CHIPSELECTS_OFFS));
 }
 
 static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
@@ -308,6 +311,8 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
 	if (status < 0)
 		goto msg_done;
 
+	orion_spi->cur_spi = spi;
+
 	list_for_each_entry(t, &m->transfers, transfer_list) {
 		/* make sure buffer length is even when working in 16
 		 * bit mode*/
@@ -405,6 +410,8 @@ static int orion_spi_probe(struct platform_device *pdev)
 	int status = 0;
 	const u32 *iprop;
 	int size;
+	u32 ret;
+	unsigned int num_cs;
 
 	master = spi_alloc_master(&pdev->dev, sizeof *spi);
 	if (master == NULL) {
@@ -419,14 +426,16 @@ static int orion_spi_probe(struct platform_device *pdev)
 					&size);
 		if (iprop && size == sizeof(*iprop))
 			master->bus_num = *iprop;
+
+		ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+		if (ret < 0)
+			num_cs = ORION_NUM_CHIPSELECTS;
 	}
 
-	/* we support only mode 0, and no options */
 	master->mode_bits = SPI_CPHA | SPI_CPOL;
-
 	master->setup = orion_spi_setup;
 	master->transfer_one_message = orion_spi_transfer_one_message;
-	master->num_chipselect = ORION_NUM_CHIPSELECTS;
+	master->num_chipselect = num_cs;
 
 	dev_set_drvdata(&pdev->dev, master);
 
@@ -456,7 +465,7 @@ static int orion_spi_probe(struct platform_device *pdev)
 		status = -EBUSY;
 		goto out_rel_clk;
 	}
-	spi->base = ioremap(r->start, SZ_1K);
+	spi->base = ioremap(r->start, SZ_128);
 
 	if (orion_spi_reset(spi) < 0)
 		goto out_rel_mem;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
old mode 100644
new mode 100755
index 58c479d13b57..464480bcc941
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -519,7 +519,11 @@ static int __init iscsi_target_init_module(void)
 {
 	int ret = 0;
 
+#ifdef ALPHA_CUSTOMIZE
+	printk("iSCSI-Target "ISCSIT_VERSION".\n\t1. Support Default Policy\n\t2. Support Session Info\n");
+#else
 	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
+#endif
 
 	iscsit_global = kzalloc(sizeof(struct iscsit_global), GFP_KERNEL);
 	if (!iscsit_global) {
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
old mode 100644
new mode 100755
index 130a1e4f96a1..9aa80b9a5e98
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -28,6 +28,98 @@
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
+
+#ifdef ALPHA_CUSTOMIZE //2012-03-06 Tim Tsay - Support Base64 decoding
+/* Ref to UNH-iSCSI 2.0 */
+static unsigned char base64_to_number(unsigned char base64)
+{
+	switch (base64) {
+	case '=':
+		return 64;
+	case '/':
+		return 63;
+	case '+':
+		return 62;
+	default:
+		if ((base64 >= 'A') && (base64 <= 'Z'))
+			return base64 - 'A';
+		else if ((base64 >= 'a') && (base64 <= 'z'))
+			return 26 + (base64 - 'a');
+		else if ((base64 >= '0') && (base64 <= '9'))
+			return 52 + (base64 - '0');
+		else
+			return 65;
+	}
+}
+
+static void decode_base64_str_to_hex(unsigned char *string, unsigned char *intnum, int str_len)
+{
+	int len;
+	int count;
+	int intptr;
+	unsigned char num[4];
+	unsigned int octets;
+
+	if ((string == NULL) || (intnum == NULL))
+		return;
+	len = strlen(string);
+	if(len > str_len) {
+		len = str_len;
+	}
+	if (len == 0)
+		return;
+	if ((len % 4) != 0)
+		return;
+	count = 0;
+	intptr = 0;
+	while (count < len - 4) {
+		num[0] = base64_to_number(string[count]);
+		num[1] = base64_to_number(string[count + 1]);
+		num[2] = base64_to_number(string[count + 2]);
+		num[3] = base64_to_number(string[count + 3]);
+		if ((num[0] == 65) || (num[1] == 65) || (num[2] == 65) || (num[3] == 65))
+			return;
+		count += 4;
+		octets =
+		    (num[0] << 18) | (num[1] << 12) | (num[2] << 6) |
+		    num[3];
+		intnum[intptr] = (octets & 0xFF0000) >> 16;
+		intnum[intptr + 1] = (octets & 0x00FF00) >> 8;
+		intnum[intptr + 2] = octets & 0x0000FF;
+		intptr += 3;
+	}
+	num[0] = base64_to_number(string[count]);
+	num[1] = base64_to_number(string[count + 1]);
+	num[2] = base64_to_number(string[count + 2]);
+	num[3] = base64_to_number(string[count + 3]);
+	if ((num[0] == 65) || (num[1] == 65) ||
+	    (num[2] == 65) || (num[3] == 65) || (num[0] == 64)
+	    || (num[1] == 64))
+		return;
+	if (num[2] == 64) {
+		if (num[3] != 64)
+			return;
+		intnum[intptr] = (num[0] << 2) | (num[1] >> 4);
+	} else if (num[3] == 64) {
+		intnum[intptr] = (num[0] << 2) | (num[1] >> 4);
+		intnum[intptr + 1] = (num[1] << 4) | (num[2] >> 2);
+	} else {
+		octets =
+		    (num[0] << 18) | (num[1] << 12) | (num[2] << 6) |
+		    num[3];
+		intnum[intptr] = (octets & 0xFF0000) >> 16;
+		intnum[intptr + 1] = (octets & 0x00FF00) >> 8;
+		intnum[intptr + 2] = octets & 0x0000FF;
+	}
+}
+
+void chap_base64_str_to_hex(unsigned char *dst, unsigned char *src, int len)
+{
+	decode_base64_str_to_hex(src, dst, len);
+}
+
+#endif /* endof ALPHA_CUSTOMIZE */
+
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
 	int j = DIV_ROUND_UP(len, 2), rc;
@@ -182,7 +274,11 @@ static int chap_server_compute_md5(
 		pr_err("Could not find CHAP_N.\n");
 		goto out;
 	}
-	if (type == HEX) {
+	if (type == HEX
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-06 Tim Tsay - Support Base64 decoding */
+		|| type == BASE64
+#endif
+	) {
 		pr_err("Could not find CHAP_N.\n");
 		goto out;
 	}
@@ -202,12 +298,21 @@ static int chap_server_compute_md5(
 		pr_err("Could not find CHAP_R.\n");
 		goto out;
 	}
-	if (type != HEX) {
+	if (type != HEX
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-06 Tim Tsay - Support Base64 decoding */
+		&& type != BASE64
+#endif
+	) {
 		pr_err("Could not find CHAP_R.\n");
 		goto out;
 	}
 
 	pr_debug("[server] Got CHAP_R=%s\n", chap_r);
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-06 Tim Tsay - Support Base64 decoding */
+	if(type == BASE64) {
+		chap_base64_str_to_hex(client_digest, chap_r, strlen(chap_r));
+	} else
+#endif
 	chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
 	tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
old mode 100644
new mode 100755
index c45b3365d63d..87e15e58e7df
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1054,7 +1054,66 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
 /* End items for lio_target_tpg_attrib_cit */
 
 /* Start items for lio_target_tpg_param_cit */
-
+#ifdef ALPHA_CUSTOMIZE //2013-09 Odie Lee - fix parameters "newline" character issue
+#define DEF_TPG_PARAM(name)						\
+static ssize_t iscsi_tpg_param_show_##name(				\
+	struct se_portal_group *se_tpg,					\
+	char *page)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	struct iscsi_param *param;					\
+	ssize_t rb;							\
+									\
+	if (iscsit_get_tpg(tpg) < 0)					\
+		return -EINVAL;						\
+									\
+	param = iscsi_find_param_from_key(__stringify(name),		\
+				tpg->param_list);			\
+	if (!param) {							\
+		iscsit_put_tpg(tpg);					\
+		return -EINVAL;						\
+	}								\
+	rb = snprintf(page, PAGE_SIZE, "%s\n", param->value);		\
+									\
+	iscsit_put_tpg(tpg);						\
+	return rb;							\
+}									\
+static ssize_t iscsi_tpg_param_store_##name(				\
+	struct se_portal_group *se_tpg,				\
+	const char *page,						\
+	size_t count)							\
+{									\
+	struct iscsi_portal_group *tpg = container_of(se_tpg,		\
+			struct iscsi_portal_group, tpg_se_tpg);		\
+	char *buf;							\
+	int ret;							\
+									\
+	buf = kzalloc(PAGE_SIZE, GFP_KERNEL);				\
+	if (!buf)							\
+		return -ENOMEM;						\
+	snprintf(buf, PAGE_SIZE, "%s=%s", __stringify(name), page);	\
+	if( buf[strlen(buf)-1]=='\n' )	\
+		buf[strlen(buf)-1] = '\0'; /* Kill newline */			\
+									\
+	if (iscsit_get_tpg(tpg) < 0) {					\
+		kfree(buf);						\
+		return -EINVAL;						\
+	}								\
+									\
+	ret = iscsi_change_param_value(buf, tpg->param_list, 1);	\
+	if (ret < 0)							\
+		goto out;						\
+									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return count;							\
+out:									\
+	kfree(buf);							\
+	iscsit_put_tpg(tpg);						\
+	return -EINVAL;						\
+}
+#else
 #define DEF_TPG_PARAM(name)						\
 static ssize_t iscsi_tpg_param_show_##name(				\
 	struct se_portal_group *se_tpg,					\
@@ -1112,6 +1171,7 @@ out:									\
 	iscsit_put_tpg(tpg);						\
 	return -EINVAL;						\
 }
+#endif
 
 #define TPG_PARAM_ATTR(_name, _mode) TF_TPG_PARAM_ATTR(iscsi, _name, _mode);
 
@@ -1266,10 +1326,187 @@ out:
 
 TF_TPG_BASE_ATTR(lio_target, enable, S_IRUGO | S_IWUSR);
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 tim tsay - show session info
+// show session info for a tpg
+
+static ssize_t lio_target_show_tgp_sess_info(
+		struct iscsi_portal_group *tpg, 
+		char *page,
+		const ssize_t max_pagelen)
+{
+	ssize_t rb = 0;
+	struct se_portal_group *se_tpg = NULL;
+	struct iscsi_session *sess = NULL;
+	struct se_session *se_sess = NULL, *se_sess_tmp = NULL;
+	struct iscsi_conn *conn = NULL;
+	int is_empty = 0;
+
+	if(tpg == NULL || page == NULL)	
+		return 0;
+
+	se_tpg = &tpg->tpg_se_tpg;
+	if(se_tpg == NULL)
+		return 0;
+	
+	spin_lock_bh(&se_tpg->session_lock);
+	/*
+		Warning :uncomment below statement will cause kernel panic: message:
+			Aiee, killing interrupt handler 
+	*/
+	//is_empty = (se_tpg->tpg_sess_list.next == &se_tpg->tpg_sess_list?1:0);
+	//printk("after check se_tpg->tpg_sess_list\n");
+	if(is_empty == 0) {
+		//printk("before sprintf(TPGT)\n");
+		
+		rb += sprintf(page, "----------------[iSCSI Session Info "
+		"for TPGT: %u]-----------------\n", (unsigned int)tpg->tpgt);
+		
+		list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
+				sess_list) {
+
+			sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+			if(sess->sess_ops == NULL)
+				break;
+			
+			if (sess->sess_ops->InitiatorName)
+				rb += snprintf(page+rb,(max_pagelen-rb), "InitiatorName: %s\n",
+					sess->sess_ops->InitiatorName);
+			if (sess->sess_ops->InitiatorAlias)
+				rb += snprintf(page+rb,(max_pagelen-rb), "InitiatorAlias: %s\n",
+					sess->sess_ops->InitiatorAlias);
+			
+			rb += snprintf(page+rb,(max_pagelen-rb), "LIO Session ID: %u   "
+				"ISID: 0x%02x %02x %02x %02x %02x %02x  "
+				"TSIH: %hu  ", sess->sid,
+				sess->isid[0], sess->isid[1], sess->isid[2],
+				sess->isid[3], sess->isid[4], sess->isid[5],
+				sess->tsih);
+			rb += snprintf(page+rb,(max_pagelen-rb), "SessionType: %s\n",
+					(sess->sess_ops->SessionType) ?
+					"Discovery" : "Normal");
+			rb += snprintf(page+rb,(max_pagelen-rb),"Session State: ");
+			switch (sess->session_state) {
+			case TARG_SESS_STATE_FREE:
+				rb += snprintf(page+rb,(max_pagelen-rb), "TARG_SESS_FREE\n");
+				break;
+			case TARG_SESS_STATE_ACTIVE:
+				rb += snprintf(page+rb,(max_pagelen-rb), "TARG_SESS_STATE_ACTIVE\n");
+				break;
+			case TARG_SESS_STATE_LOGGED_IN:
+				rb += snprintf(page+rb,(max_pagelen-rb), "TARG_SESS_STATE_LOGGED_IN\n");
+				break;
+			case TARG_SESS_STATE_FAILED:
+				rb += snprintf(page+rb,(max_pagelen-rb), "TARG_SESS_STATE_FAILED\n");
+				break;
+			case TARG_SESS_STATE_IN_CONTINUE:
+				rb += snprintf(page+rb,(max_pagelen-rb), "TARG_SESS_STATE_IN_CONTINUE\n");
+				break;
+			default:
+				rb += snprintf(page+rb,(max_pagelen-rb), "ERROR: Unknown Session State!\n");
+				break;
+			}
+			/*	remove session values for tpgt
+			rb += snprintf(page+rb,(max_pagelen-rb), "---------------------[iSCSI Session Values]-----------------------\n");
+			rb += snprintf(page+rb,(max_pagelen-rb), "  CmdSN/WR  :  CmdSN/WC  :  ExpCmdSN  :  MaxCmdSN  :     ITT    :     TTT\n");
+			rb += snprintf(page+rb,(max_pagelen-rb), " 0x%08x   0x%08x   0x%08x   0x%08x   0x%08x   0x%08x\n",
+				sess->cmdsn_window, (sess->max_cmd_sn - sess->exp_cmd_sn) + 1,
+				sess->exp_cmd_sn, sess->max_cmd_sn,
+				sess->init_task_tag, sess->targ_xfer_tag);
+			*/
+			rb += snprintf(page+rb,(max_pagelen-rb), "----------------------[iSCSI Connections]-------------------------\n");
+							
+			spin_lock(&sess->conn_lock);
+			list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
+				rb += sprintf(page+rb, "CID: %hu  Connection"
+						" State: ", conn->cid);
+				switch (conn->conn_state) {
+				case TARG_CONN_STATE_FREE:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_FREE\n");
+					break;
+				case TARG_CONN_STATE_XPT_UP:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_XPT_UP\n");
+					break;
+				case TARG_CONN_STATE_IN_LOGIN:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_IN_LOGIN\n");
+					break;
+				case TARG_CONN_STATE_LOGGED_IN:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_LOGGED_IN\n");
+					break;
+				case TARG_CONN_STATE_IN_LOGOUT:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_IN_LOGOUT\n");
+					break;
+				case TARG_CONN_STATE_LOGOUT_REQUESTED:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_LOGOUT_REQUESTED\n");
+					break;
+				case TARG_CONN_STATE_CLEANUP_WAIT:
+					rb += sprintf(page+rb,
+						"TARG_CONN_STATE_CLEANUP_WAIT\n");
+					break;
+				default:
+					rb += sprintf(page+rb,
+						"ERROR: Unknown Connection State!\n");
+					break;
+				}
+	
+				rb += sprintf(page+rb, "   Address %s %s\n", conn->login_ip,
+					(conn->network_transport == ISCSI_TCP) ?
+					"TCP" : "SCTP");
+				/*
+				rb += sprintf(page+rb, "  StatSN: 0x%08x\n",
+					conn->stat_sn);
+				*/
+			}
+			spin_unlock(&sess->conn_lock);
+		}
+		spin_unlock_bh(&se_tpg->session_lock);
+	}
+	return rb;
+}
+
+static ssize_t lio_target_tpg_show_sessions(
+	struct se_portal_group *se_tpg,
+	char *page)
+{
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+
+	ssize_t len = 0;
+	
+	len = lio_target_show_tgp_sess_info(tpg, page, PAGE_SIZE);
+	return(len);
+}
+
+static ssize_t lio_target_tpg_store_sessions(
+	struct se_portal_group *se_tpg,
+	const char *page,
+	size_t count)
+{
+#if 0
+	struct iscsi_portal_group *tpg = container_of(se_tpg,
+			struct iscsi_portal_group, tpg_se_tpg);
+#endif
+
+	return -EINVAL;
+}
+TF_TPG_BASE_ATTR(lio_target, sessions, S_IRUGO);
+
+static struct configfs_attribute *lio_target_tpg_attrs[] = {
+	&lio_target_tpg_enable.attr,
+	&lio_target_tpg_sessions.attr,
+	NULL,
+};
+#else
 static struct configfs_attribute *lio_target_tpg_attrs[] = {
 	&lio_target_tpg_enable.attr,
 	NULL,
 };
+#endif
 
 /* End items for lio_target_tpg_cit */
 
@@ -1850,6 +2087,9 @@ int iscsi_target_register_configfs(void)
 	fabric->tf_ops.queue_data_in = &lio_queue_data_in;
 	fabric->tf_ops.queue_status = &lio_queue_status;
 	fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	fabric->tf_ops.copy_node_attributes = &lio_copy_node_attributes;
+#endif
 	/*
 	 * Setup function pointers for generic logic in target_core_fabric_configfs.c
 	 */
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
old mode 100644
new mode 100755
index bc788c52b6cc..616df4738b89
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1116,6 +1116,10 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
 	return 0;
 }
 
+#ifdef ALPHA_CUSTOMIZE	/* 2011-10 Tim Tsay - support default policy */
+int core_free_device_list_for_node(struct se_node_acl *,
+		        struct se_portal_group *);
+#endif
 static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
 	u8 *buffer, zero_tsih = 0;
@@ -1124,6 +1128,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
 	struct iscsi_login *login;
 	struct iscsi_portal_group *tpg = NULL;
 	struct iscsi_login_req *pdu;
+#ifdef ALPHA_CUSTOMIZE /* 2011-10 Tim Tsay - support default policy */
+	struct se_node_acl *se_nacl = NULL;
+#endif
 
 	flush_signals(current);
 
@@ -1308,6 +1315,27 @@ new_sess_out:
 				  ISCSI_LOGIN_STATUS_INIT_ERR);
 	if (!zero_tsih || !conn->sess)
 		goto old_sess_out;
+
+#ifdef ALPHA_CUSTOMIZE	/* 2011-10 Tim Tsay - support default policy */	
+	/* release device list from auth failed dynamic acls */
+	se_nacl = conn->sess->se_sess->se_node_acl;
+	if(tpg && se_nacl) {
+		struct se_portal_group *tpg_se_tpg = &tpg->tpg_se_tpg;
+		spin_lock_bh(&tpg_se_tpg->acl_node_lock);
+		if (se_nacl->dynamic_node_acl) {
+			if (!(ISCSI_TPG_ATTRIB(tpg)->cache_dynamic_acls)) {
+				list_del(&se_nacl->acl_list);
+				tpg_se_tpg->num_node_acls--;
+				spin_unlock_bh(&tpg_se_tpg->acl_node_lock);
+				core_free_device_list_for_node(se_nacl, tpg_se_tpg);
+				tpg_se_tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg_se_tpg, se_nacl);
+				spin_lock_bh(&tpg_se_tpg->acl_node_lock);
+			}
+		}
+		spin_unlock_bh(&tpg_se_tpg->acl_node_lock);
+	}
+#endif
+
 	if (conn->sess->se_sess)
 		transport_free_session(conn->sess->se_sess);
 	if (conn->sess->session_index != 0) {
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
old mode 100644
new mode 100755
index 72d9dec991c0..c1fba559c747
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -83,6 +83,12 @@ int extract_param(
 	if (*ptr == '0' && (*(ptr+1) == 'x' || *(ptr+1) == 'X')) {
 		ptr += 2; /* skip 0x */
 		*type = HEX;
+#ifdef ALPHA_CUSTOMIZE //2012-03-06 Tim Tsay - Support Base64 decoding
+	} 
+	else if (*ptr== '0' && (*(ptr+1) == 'b' || *(ptr+1) == 'B')) {
+		ptr += 2; /* skip 0b */
+		*type = BASE64;
+#endif //endof ALPHA_CUSTOMIZE
 	} else
 		*type = DECIMAL;
 
@@ -113,6 +119,9 @@ static u32 iscsi_handle_authentication(
 	struct iscsi_node_auth *auth;
 	struct iscsi_node_acl *iscsi_nacl;
 	struct se_node_acl *se_nacl;
+#ifdef ALPHA_CUSTOMIZE /* 2011-10 Tim Tsay - Read chap settings from default policy */
+	struct se_portal_group *se_tpg = NULL;
+#endif
 
 	if (!sess->sess_ops->SessionType) {
 		/*
@@ -124,6 +133,25 @@ static u32 iscsi_handle_authentication(
 					" CHAP auth\n");
 			return -1;
 		}
+#ifdef ALPHA_CUSTOMIZE /* 2011-10 Tim Tsay - Read chap settings from default policy */
+		/*
+		 * Read auth from default ACL for initiators that apply the default policy
+		 */
+		se_tpg = se_nacl->se_tpg;
+		if( (se_tpg->se_tpg_default_acl != NULL) &&
+			se_nacl->dynamic_node_acl) {
+			
+			iscsi_nacl = container_of(se_tpg->se_tpg_default_acl,
+				struct iscsi_node_acl, se_node_acl);
+			if (!(iscsi_nacl)) {
+				printk(KERN_ERR "Unable to locate iscsi_node_acl_t for"
+						" CHAP auth\n");
+				return -1;
+			}
+		}
+		else
+#endif
+		{
 		iscsi_nacl = container_of(se_nacl, struct iscsi_node_acl,
 				se_node_acl);
 		if (!iscsi_nacl) {
@@ -131,7 +159,7 @@ static u32 iscsi_handle_authentication(
 					" CHAP auth\n");
 			return -1;
 		}
-
+		}
 		auth = ISCSI_NODE_AUTH(iscsi_nacl);
 	} else {
 		/*
diff --git a/drivers/target/iscsi/iscsi_target_nego.h b/drivers/target/iscsi/iscsi_target_nego.h
old mode 100644
new mode 100755
index f021cbd330e5..e88d30cdb04f
--- a/drivers/target/iscsi/iscsi_target_nego.h
+++ b/drivers/target/iscsi/iscsi_target_nego.h
@@ -3,6 +3,9 @@
 
 #define DECIMAL         0
 #define HEX             1
+#ifdef ALPHA_CUSTOMIZE //2012-03-06 Tim Tsay - Support Base64 decoding
+#define BASE64 		2
+#endif
 
 extern void convert_null_to_semi(char *, int);
 extern int extract_param(const char *, const char *, unsigned int, char *,
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
old mode 100644
new mode 100755
index 11dc2936af76..89ef53d5d8a2
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -49,6 +49,27 @@ void iscsit_set_default_node_attribues(
 	a->default_erl = NA_DEFAULT_ERL;
 }
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+void iscsi_copy_node_attribues (
+	struct iscsi_node_acl *dst_acl,
+	struct iscsi_node_acl *src_acl)
+{
+	struct iscsi_node_attrib *dst_a = &dst_acl->node_attrib;
+	struct iscsi_node_attrib *src_a = &src_acl->node_attrib;
+
+	dst_a->dataout_timeout = src_a->dataout_timeout;
+	dst_a->dataout_timeout_retries = src_a->dataout_timeout_retries;
+	dst_a->default_erl = src_a->default_erl;
+	dst_a->nopin_timeout = src_a->nopin_timeout;
+	dst_a->nopin_response_timeout = src_a->nopin_response_timeout;
+	dst_a->random_datain_pdu_offsets = src_a->random_datain_pdu_offsets;
+	dst_a->random_datain_seq_offsets = src_a->random_datain_seq_offsets;
+	dst_a->random_r2t_offsets = src_a->random_r2t_offsets;
+		
+	return;
+}
+#endif
+
 int iscsit_na_dataout_timeout(
 	struct iscsi_node_acl *acl,
 	u32 dataout_timeout)
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.h b/drivers/target/iscsi/iscsi_target_nodeattrib.h
old mode 100644
new mode 100755
index c970b326ef23..05c1b76cba40
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.h
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.h
@@ -11,4 +11,8 @@ extern int iscsit_na_random_datain_seq_offsets(struct iscsi_node_acl *, u32);
 extern int iscsit_na_random_r2t_offsets(struct iscsi_node_acl *, u32);
 extern int iscsit_na_default_erl(struct iscsi_node_acl *, u32);
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+extern void iscsi_copy_node_attribues (struct iscsi_node_acl *, struct iscsi_node_acl *);
+#endif
+
 #endif /* ISCSI_TARGET_NODEATTRIB_H */
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
old mode 100644
new mode 100755
index a47046a752aa..a459dba2f09f
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -116,7 +116,11 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
 #define INITIAL_SENDTARGETS			ALL
 #define INITIAL_TARGETNAME			"LIO.Target"
 #define INITIAL_INITIATORNAME			"LIO.Initiator"
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - change vendor info
+#define INITIAL_TARGETALIAS			"Western Digital Corporation Target"
+#else
 #define INITIAL_TARGETALIAS			"LIO Target"
+#endif
 #define INITIAL_INITIATORALIAS			"LIO Initiator"
 #define INITIAL_TARGETADDRESS			"0.0.0.0:0000,0"
 #define INITIAL_TARGETPORTALGROUPTAG		"1"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
old mode 100644
new mode 100755
index f31b4c5cdf3f..51ec6a9c4d60
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -56,6 +56,22 @@ struct iscsi_portal_group *iscsit_alloc_portal_group(struct iscsi_tiqn *tiqn, u1
 	return tpg;
 }
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy	
+void lio_copy_node_attributes (
+	struct se_node_acl *dst_se_acl,
+	struct se_node_acl *src_se_acl)
+{
+	struct iscsi_node_acl *dst_acl = container_of(dst_se_acl, struct iscsi_node_acl,
+					se_node_acl);
+	struct iscsi_node_acl *src_acl = container_of(src_se_acl, struct iscsi_node_acl,
+					se_node_acl);
+
+	ISCSI_NODE_ATTRIB(dst_acl)->nacl = dst_acl;
+	iscsi_copy_node_attribues(dst_acl,src_acl);
+	return;
+}
+#endif
+
 static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *);
 
 int iscsit_load_discovery_tpg(void)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
old mode 100644
new mode 100755
index dda48c141a8c..8e035d607018
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -38,4 +38,8 @@ extern int iscsit_ta_cache_dynamic_acls(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+extern void lio_copy_node_attributes (struct se_node_acl *, struct se_node_acl *);
+#endif //end of ALPHA_CUSTOMIZE
+
 #endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
old mode 100644
new mode 100755
index 660b109487ae..741041e29849
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -271,6 +271,9 @@ int core_free_device_list_for_node(
 
 	return 0;
 }
+#ifdef ALPHA_CUSTOMIZE /* 2011-10 Tim Tsay - support default policy */
+EXPORT_SYMBOL(core_free_device_list_for_node);
+#endif
 
 void core_update_device_list_access(
 	u32 mapped_lun,
@@ -1253,6 +1256,9 @@ int core_dev_add_initiator_node_lun_acl(
 {
 	struct se_lun *lun;
 	struct se_node_acl *nacl;
+#ifdef ALPHA_CUSTOMIZE	/* 2012-07-04 Tim Tsay - set default policy to the existing sessions*/
+	struct se_node_acl *acl = NULL;
+#endif
 
 	lun = core_dev_get_lun(tpg, unpacked_lun);
 	if (!lun) {
@@ -1293,6 +1299,25 @@ int core_dev_add_initiator_node_lun_acl(
 	 * pre-registrations that need to be enabled for this LUN ACL..
 	 */
 	core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
+
+#ifdef ALPHA_CUSTOMIZE	/* 2012-07-04 Tim Tsay - set default policy to the existing sessions*/
+	/*
+	 * apply new LUN ACL to default policies
+	 */
+	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg) && tpg->se_tpg_default_acl != NULL) { 
+		if(!strcmp(tpg->se_tpg_default_acl->initiatorname, nacl->initiatorname)) {
+			spin_lock_bh(&tpg->acl_node_lock);
+			list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+				if (acl->dynamic_node_acl && nacl != acl) {
+					spin_unlock_bh(&tpg->acl_node_lock);
+					core_tpg_add_node_to_devs(acl, tpg);
+					spin_lock_bh(&tpg->acl_node_lock);
+				}
+			}
+			spin_unlock_bh(&tpg->acl_node_lock);
+		}
+	}
+#endif /* end of ALPHA_CUSTOMIZE */
 	return 0;
 }
 
@@ -1306,6 +1331,9 @@ int core_dev_del_initiator_node_lun_acl(
 	struct se_lun_acl *lacl)
 {
 	struct se_node_acl *nacl;
+#ifdef ALPHA_CUSTOMIZE	/* 2012-07-04 Tim Tsay - set default policy to the existing sessions*/
+	struct se_node_acl *acl = NULL;
+#endif
 
 	nacl = lacl->se_lun_nacl;
 	if (!nacl)
@@ -1328,6 +1356,28 @@ int core_dev_del_initiator_node_lun_acl(
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
 		lacl->initiatorname, lacl->mapped_lun);
 
+#ifdef ALPHA_CUSTOMIZE	/* 2012-07-04 Tim Tsay - set default policy to the existing sessions*/
+	/*
+	 * apply new LUN ACL to default policies
+	 */
+	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg) && tpg->se_tpg_default_acl != NULL)
+	{
+		if(!strcmp(tpg->se_tpg_default_acl->initiatorname, nacl->initiatorname)) {
+			spin_lock_bh(&tpg->acl_node_lock);
+			list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
+				if (acl->dynamic_node_acl && nacl != acl) {
+					spin_unlock_bh(&tpg->acl_node_lock);
+					/* dynamic acl copy acl from default policy, so does mapped lun */
+					core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
+								TRANSPORT_LUNFLAGS_NO_ACCESS, acl, tpg);
+					spin_lock_bh(&tpg->acl_node_lock);
+				}
+			}
+			spin_unlock_bh(&tpg->acl_node_lock);
+		}
+	}
+#endif /* end of ALPHA_CUSTOMIZE */
+
 	return 0;
 }
 
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
old mode 100644
new mode 100755
index 04c775cb3e65..376ac6619252
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -540,7 +540,37 @@ static struct configfs_group_operations target_fabric_nacl_group_ops = {
 	.drop_item	= target_fabric_drop_nodeacl,
 };
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - for default policy
+static void target_fabric_nacl_release(struct config_item *acl_ci) 
+{
+	struct config_group *acl_cg = NULL;
+	struct config_item *df_item = NULL;
+	int i = 0;
+	
+	if(!acl_ci)
+		return ;
+	
+	if(!(acl_cg = to_config_group(acl_ci)))
+		return ;
+
+	//put default acl
+	for (i = 0; acl_cg->default_groups[i]; i++) {
+		df_item = &acl_cg->default_groups[i]->cg_item;
+		acl_cg->default_groups[i] = NULL;
+		config_item_put(df_item);
+	}
+
+	//free default groups
+	kfree(acl_cg->default_groups);
+}
+
+static struct configfs_item_operations target_fabric_nacl_item_ops = {
+	.release = target_fabric_nacl_release,
+};
+TF_CIT_SETUP(tpg_nacl, &target_fabric_nacl_item_ops, &target_fabric_nacl_group_ops, NULL);
+#else
 TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
+#endif
 
 /* End of tfc_tpg_nacl_cit */
 
@@ -1046,6 +1076,32 @@ static struct config_group *target_fabric_make_tpg(
 	config_group_init_type_name(&se_tpg->tpg_param_group, "param",
 			&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
 
+#ifdef ALPHA_CUSTOMIZE // support default policy
+	do{
+		struct config_group *nacl_cg = NULL;
+		struct se_node_acl *se_nacl = NULL;
+		struct config_group *acl_cg = NULL;
+
+		nacl_cg = target_fabric_make_nodeacl(&se_tpg->tpg_acl_group, "default_policy");
+		if (!nacl_cg || IS_ERR(nacl_cg)){
+			tf->tf_ops.fabric_drop_tpg(se_tpg);
+			return ERR_PTR(-EINVAL);
+		}
+		se_nacl = container_of(nacl_cg, struct se_node_acl, acl_group);
+
+		se_tpg->se_tpg_default_acl = se_nacl;
+		
+		acl_cg = &se_tpg->tpg_acl_group;
+		if (!(acl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
+				GFP_KERNEL))){
+			//target_fabric_drop_nodeacl(group, nacl_cg);
+			tf->tf_ops.fabric_drop_tpg(se_tpg);
+			return ERR_PTR(-EINVAL);
+		}
+		acl_cg->default_groups[0] = &se_nacl->acl_group;
+		acl_cg->default_groups[1] = NULL;
+	}while(0);
+#endif
 	return &se_tpg->tpg_group;
 }
 
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
old mode 100644
new mode 100755
index 18d49df4d0ac..4fb4c9eec449
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -83,6 +83,10 @@ int	core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
 struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
 int	core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+void core_tpg_copy_node_to_devs( struct se_node_acl *, struct se_node_acl *, struct se_portal_group *);
+#endif
+
 /* target_core_transport.c */
 extern struct kmem_cache *se_tmr_req_cache;
 
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
old mode 100644
new mode 100755
index 04a74938bb43..8df089a26f7d
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2252,6 +2252,16 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
 		}
 		spin_unlock(&pr_tmpl->registration_lock);
 
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-02 Tim Tsay - increment the PRgeneration 
+									upon a successful REGISTER*/
+			/*
+			 * Increment PRgeneration counter for se_device_t"
+			 * upon a successful REGISTER, see spc4r17 section 6.3.2
+			 * READ_KEYS service action.
+			 */
+			pr_reg->pr_res_generation = core_scsi3_pr_generation(
+							cmd->se_dev);
+#endif
 		if (!aptpl) {
 			pr_tmpl->pr_aptpl_active = 0;
 			core_scsi3_update_and_write_aptpl(dev, NULL, 0);
@@ -3825,9 +3835,19 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
 		 * Check for overflow of 8byte PRI READ_KEYS payload and
 		 * next reservation key list descriptor.
 		 */
+
+#ifdef ALPHA_CUSTOMIZE /*2012-03-02 Tim Tsay - set the additional length 
+							according to the length of all reservation key list
+							, see SPC2 section 7.10
+							Note: remove this if we do not need to fully support SPC-2,
+							because it do not meet the SPC-3 above.
+							e.g. if the MS WLP SCSI Compliance does not require this anymore*/
+		if ((add_len + 8) <= (cmd->data_length - 8))
+#else
 		if ((add_len + 8) > (cmd->data_length - 8))
 			break;
-
+#endif
+		{
 		buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff);
 		buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff);
 		buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff);
@@ -3836,7 +3856,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
 		buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff);
 		buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff);
 		buf[off++] = (pr_reg->pr_res_key & 0xff);
-
+		}
 		add_len += 8;
 	}
 	spin_unlock(&dev->t10_pr.registration_lock);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
old mode 100644
new mode 100755
index bbc5b0ee2bdc..c601a3928030
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -33,6 +33,12 @@
 #include "target_core_internal.h"
 #include "target_core_ua.h"
 
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-01 Tim Tsay - return error when PMI = 0, LBA != 0 
+ 	 	 	 	 	 	 	 return check condition , sense key = illegal request, 
+ 	 	 	 	 	 	 	 sense code = invalid cdb field reference to 
+ 	 	 	 	 	 	 	 SBC2r16 section 5.10 */
+static inline u32 transport_lba_32(unsigned char *cdb);
+#endif
 
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
@@ -42,6 +48,22 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
 	unsigned char *rbuf;
 	unsigned char buf[8];
 	u32 blocks;
+#ifdef ALPHA_CUSTOMIZE /* 2012-03-01 Tim Tsay - return error when PMI = 0, LBA != 0 
+ 	 	 	 	 	 	 	 return check condition , sense key = illegal request, 
+ 	 	 	 	 	 	 	 sense code = invalid cdb field reference to 
+ 	 	 	 	 	 	 	 SBC2r16 section 5.10 */
+	unsigned char *cdb = cmd->t_task_cdb;
+	u32 lba = 0;
+	
+	if(!(cdb[8] & 0x1)) {
+		lba = transport_lba_32(&cdb[2]);
+		if(lba != 0) {
+			printk(KERN_ERR "READ CAPACITY(10) with PMI==0 but LBA=%u\n",
+			       (unsigned int)lba);
+			return TCM_INVALID_CDB_FIELD;
+		}
+	}
+#endif
 
 	if (blocks_long >= 0x00000000ffffffff)
 		blocks = 0xffffffff;
@@ -62,8 +84,12 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
 		memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length));
 		transport_kunmap_data_sg(cmd);
 	}
-
+	
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	target_complete_cmd_with_length(cmd, GOOD, 8);
+#else
 	target_complete_cmd(cmd, GOOD);
+#endif
 	return 0;
 }
 
@@ -101,7 +127,12 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 		transport_kunmap_data_sg(cmd);
 	}
 
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	target_complete_cmd_with_length(cmd, GOOD, 32);
+#else
 	target_complete_cmd(cmd, GOOD);
+#endif
+	
 	return 0;
 }
 
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
old mode 100644
new mode 100755
index 9fabbf7214cd..8b49fa5a802d
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -71,6 +71,10 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
 {
 	struct se_lun *lun = cmd->se_lun;
 	struct se_device *dev = cmd->se_dev;
+#ifdef ALPHA_CUSTOMIZE    /* 2012-02-29 Tim Tsay - customize vendor info and fixs to left-aligned ASCII data
+                              - SPC3r23 Section 4.4.1*/ 
+	int i;
+#endif
 
 	/* Set RMB (removable media) for tape devices */
 	if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -96,15 +100,58 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
 	spc_fill_alua_data(lun->lun_sep, buf);
 
 	buf[7] = 0x2; /* CmdQue=1 */
-
+#ifdef ALPHA_CUSTOMIZE    /* 2012-02-29 Tim Tsay - customize vendor info and fixs to left-aligned ASCII data
+                              - SPC3r23 Section 4.4.1 and 6.4.2 */
+	memset(&buf[8], 0x20, 28);
+	memcpy(&buf[8], "WD      ", 8);
+#else
 	memcpy(&buf[8], "LIO-ORG ", 8);
+#endif
 	memset(&buf[16], 0x20, 16);
 	memcpy(&buf[16], dev->t10_wwn.model,
 	       min_t(size_t, strlen(dev->t10_wwn.model), 16));
 	memcpy(&buf[32], dev->t10_wwn.revision,
 	       min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+#ifdef ALPHA_CUSTOMIZE    /* 2012-02-29 Tim Tsay - customize vendor info and fixs to left-aligned ASCII data
+                              - SPC3r23 Section 4.4.1 and 6.4.2 */ 
+	//replace null terminator ('\0') with space (' ')
+	for(i = 8;i < 36;i++) {
+		if(buf[i] == '\0') {
+			buf[i] = ' ';
+		}
+	}
+#endif
 	buf[4] = 31; /* Set additional length to 31 */
 
+#ifdef ALPHA_CUSTOMIZE /* 2012-02-29 Tim Tsay - add a version descriptor ref: SPC3r23  */
+	if(cmd->data_length >= 95) {
+		u32 off = 58;
+		/* SCSI architecture standard: SAM-3 (no version claimed) */
+		buf[off] = 0x00;
+		buf[off+1] = 0x60;
+		off += 2;
+
+		/* physical transport standard: ignored */
+
+		/* SCSI transport protocol standard : iSCSI */
+		buf[off] = 0x09;
+		buf[off+1] = 0x60;
+		off += 2;
+
+		/* SPC version : SPC-3r23 */
+		buf[off] = 0x03;
+		buf[off+1] = 0x12;
+		off += 2;
+
+		/* device type command set: SBC-2 */
+		buf[off] = 0x03;
+		buf[off+1] = 0x20;
+		off += 2;
+
+		buf[4] = off - 1 - 4;
+	}
+#endif /* endof ALPHA_CUSTOMIZE */
+
 	return 0;
 }
 EXPORT_SYMBOL(spc_emulate_inquiry_std);
@@ -210,12 +257,19 @@ spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
 	 */
 	buf[off++] = (0x6 << 4);
 
+#ifdef ALPHA_CUSTOMIZE	//2012-01-10 - change IEEE Company ID
+	// WESTERN DIGITAL CORPORATION IEEE Company ID: 00 00 C0
+	buf[off++] = 0x00;
+	buf[off++] = 0x0C;
+	buf[off] = (0x0 << 4);
+#else
 	/*
 	 * Use OpenFabrics IEEE Company ID: 00 14 05
 	 */
 	buf[off++] = 0x01;
 	buf[off++] = 0x40;
 	buf[off] = (0x5 << 4);
+#endif
 
 	/*
 	 * Return ConfigFS Unit Serial Number information for
@@ -247,7 +301,13 @@ check_t10_vend_desc:
 	buf[off] = 0x2; /* ASCII */
 	buf[off+1] = 0x1; /* T10 Vendor ID */
 	buf[off+2] = 0x0;
+#ifdef ALPHA_CUSTOMIZE    /* 2012-02-29 Tim Tsay - customize vendor info and fixs to left-aligned ASCII data
+                              - SPC4r17 Section 7.3.4 and Section 4.4.1*/ 
+	memcpy(&buf[off+4], "WD      ", 8);
+#else
 	memcpy(&buf[off+4], "LIO-ORG", 8);
+#endif
+
 	/* Extra Byte for NULL Terminator */
 	id_len++;
 	/* Identifier Length */
@@ -628,6 +688,9 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 	unsigned char buf[SE_INQUIRY_BUF];
 	sense_reason_t ret;
 	int p;
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	int len = 0;
+#endif
 
 	memset(buf, 0, SE_INQUIRY_BUF);
 
@@ -645,6 +708,9 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 		}
 
 		ret = spc_emulate_inquiry_std(cmd, buf);
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+		len = buf[4] + 5;
+#endif
 		goto out;
 	}
 
@@ -652,6 +718,9 @@ spc_emulate_inquiry(struct se_cmd *cmd)
 		if (cdb[2] == evpd_handlers[p].page) {
 			buf[1] = cdb[2];
 			ret = evpd_handlers[p].emulate(cmd, buf);
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+			len = get_unaligned_be16(&buf[2]) + 4;
+#endif
 			goto out;
 		}
 	}
@@ -666,8 +735,14 @@ out:
 		transport_kunmap_data_sg(cmd);
 	}
 
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	if (!ret)
+		target_complete_cmd_with_length(cmd, GOOD, len);
+#else
 	if (!ret)
 		target_complete_cmd(cmd, GOOD);
+#endif
+
 	return ret;
 }
 
@@ -984,8 +1059,12 @@ set_length:
 		memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length));
 		transport_kunmap_data_sg(cmd);
 	}
-
+	
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	target_complete_cmd_with_length(cmd, GOOD, length);
+#else
 	target_complete_cmd(cmd, GOOD);
+#endif
 	return 0;
 }
 
@@ -1161,8 +1240,12 @@ done:
 	buf[2] = ((lun_count >> 8) & 0xff);
 	buf[3] = (lun_count & 0xff);
 	transport_kunmap_data_sg(cmd);
-
+	
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+	target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
+#else
 	target_complete_cmd(cmd, GOOD);
+#endif
 	return 0;
 }
 EXPORT_SYMBOL(spc_emulate_report_luns);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
old mode 100644
new mode 100755
index aac9d2727e3c..4ba8cd54eab8
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -130,6 +130,17 @@ void core_tpg_add_node_to_devs(
 	struct se_lun *lun;
 	struct se_device *dev;
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+/*	core_tpg_add_node_to_devs():
+ *
+ *	apply default policy instead of set from demo mode values
+ */
+	struct se_node_acl *default_acl = tpg->se_tpg_default_acl;
+	if(default_acl){
+		core_tpg_copy_node_to_devs(acl,default_acl, tpg);
+	}else
+#endif
+	{
 	spin_lock(&tpg->tpg_lun_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
 		lun = tpg->tpg_lun_list[i];
@@ -168,6 +179,7 @@ void core_tpg_add_node_to_devs(
 		spin_lock(&tpg->tpg_lun_lock);
 	}
 	spin_unlock(&tpg->tpg_lun_lock);
+	}
 }
 
 /*      core_set_queue_depth_for_node():
@@ -245,6 +257,92 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
 	return 0;
 }
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+void core_tpg_copy_node_to_devs(
+	struct se_node_acl *dst_acl,
+	struct se_node_acl *src_acl,
+	struct se_portal_group *tpg)
+{
+	int i = 0, enable = 1;
+	u32 lun_access = 0;
+	u32 mapped_lun = 0;
+	struct se_lun *lun;
+	struct se_dev_entry *deve = NULL;
+
+	if(dst_acl == NULL || src_acl == NULL || tpg == NULL)
+		return;
+	
+	spin_lock(&tpg->tpg_lun_lock);
+	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
+		lun = tpg->tpg_lun_list[i];
+		if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
+			continue;
+
+		spin_unlock(&tpg->tpg_lun_lock);
+
+		spin_lock_bh(&src_acl->device_list_lock);
+		for(mapped_lun = 0; mapped_lun < TRANSPORT_MAX_LUNS_PER_TPG; ++mapped_lun) {
+			deve = src_acl->device_list[mapped_lun];
+			if(!deve)
+				continue;
+			if(deve->se_lun == lun)
+				break;
+		}
+		if(mapped_lun < TRANSPORT_MAX_LUNS_PER_TPG) {
+			deve = src_acl->device_list[mapped_lun];
+			if(!deve) {
+				spin_unlock_bh(&src_acl->device_list_lock);
+				spin_lock(&tpg->tpg_lun_lock);
+				continue;
+			}
+		}
+		else {
+			spin_unlock_bh(&src_acl->device_list_lock);
+			spin_lock(&tpg->tpg_lun_lock);
+			continue;
+		}
+		mapped_lun = deve->mapped_lun;
+		
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+			enable = 1;
+		} 
+		else {
+			enable = 0;
+		}
+		
+		lun_access = 0;
+		if (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_WRITE) {
+			lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
+		}
+		else if (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
+			lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+		}
+		else {
+			lun_access = TRANSPORT_LUNFLAGS_NO_ACCESS;
+		}
+		
+		spin_unlock_bh(&src_acl->device_list_lock);
+
+		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u->%u] - Adding %s"
+			" access for LUN in Default Policy\n",
+			tpg->se_tpg_tfo->get_fabric_name(),
+			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,mapped_lun,
+			(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
+			"READ-WRITE" : "READ-ONLY");
+
+
+		if( enable )
+			core_enable_device_list_for_node(lun, NULL, mapped_lun,
+					lun_access, dst_acl, tpg);
+		else
+			core_disable_device_list_for_node(lun, NULL, mapped_lun,
+					lun_access, dst_acl, tpg);
+		spin_lock(&tpg->tpg_lun_lock);
+	}
+	spin_unlock(&tpg->tpg_lun_lock);
+}
+#endif
+
 /*	core_tpg_check_initiator_node_acl()
  *
  *
@@ -254,6 +352,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 	unsigned char *initiatorname)
 {
 	struct se_node_acl *acl;
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	struct se_node_acl *default_acl;
+#endif
 
 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
 	if (acl)
@@ -262,6 +363,11 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
 		return NULL;
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	if(!(default_acl = tpg->se_tpg_default_acl))
+		return NULL;
+#endif
+
 	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
 	if (!acl)
 		return NULL;
@@ -273,13 +379,22 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 	spin_lock_init(&acl->device_list_lock);
 	spin_lock_init(&acl->nacl_sess_lock);
 	atomic_set(&acl->acl_pr_ref_count, 0);
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	acl->queue_depth = default_acl->queue_depth;
+#else
 	acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+#endif
 	snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
 	acl->se_tpg = tpg;
 	acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 	spin_lock_init(&acl->stats_lock);
 	acl->dynamic_node_acl = 1;
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	if(tpg->se_tpg_tfo->copy_node_attributes) {
+		tpg->se_tpg_tfo->copy_node_attributes(acl,default_acl);
+	} else
+#endif
 	tpg->se_tpg_tfo->set_default_node_attributes(acl);
 
 	if (core_create_device_list_for_node(acl) < 0) {
@@ -299,14 +414,22 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
 	 */
 	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
 	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+		core_tpg_copy_node_to_devs(acl,default_acl, tpg);
+#else
 		core_tpg_add_node_to_devs(acl, tpg);
+#endif
 
 	spin_lock_irq(&tpg->acl_node_lock);
 	list_add_tail(&acl->acl_list, &tpg->acl_node_list);
 	tpg->num_node_acls++;
 	spin_unlock_irq(&tpg->acl_node_lock);
 
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	pr_debug("%s_TPG[%u] - Added Default Policy ACL with TCQ Depth: %d for %s"
+#else
 	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+#endif
 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
old mode 100644
new mode 100755
index 21e315874a54..37dc05051d65
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -633,6 +633,25 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+	if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+			cmd->residual_count += cmd->data_length - length;
+		} else {
+			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+			cmd->residual_count = cmd->data_length - length;
+		}
+
+		cmd->data_length = length;
+	}
+
+	target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+#endif
+
 static void target_add_to_state_list(struct se_cmd *cmd)
 {
 	struct se_device *dev = cmd->se_dev;
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 54ffd64ca3f7..f7e6a8faf302 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -24,10 +24,7 @@
 #include <linux/of_device.h>
 #include <linux/thermal.h>
 
-#define THERMAL_VALID_OFFSET		9
 #define THERMAL_VALID_MASK		0x1
-#define THERMAL_TEMP_OFFSET		10
-#define THERMAL_TEMP_MASK		0x1ff
 
 /* Thermal Manager Control and Status Register */
 #define PMU_TDC0_SW_RST_MASK		(0x1 << 1)
@@ -38,24 +35,47 @@
 #define PMU_TDC0_OTF_CAL_MASK		(0x1 << 30)
 #define PMU_TDC0_START_CAL_MASK		(0x1 << 25)
 
-struct armada_thermal_ops;
+#define A375_Z1_CAL_RESET_LSB		0x8011e214
+#define A375_Z1_CAL_RESET_MSB		0x30a88019
+#define A375_Z1_WORKAROUND_BIT		BIT(9)
+
+#define A375_UNIT_CONTROL_SHIFT		27
+#define A375_UNIT_CONTROL_MASK		0x7
+#define A375_READOUT_INVERT		BIT(15)
+#define A375_HW_RESETn			BIT(8)
+#define A380_HW_RESET			BIT(8)
+
+struct armada_thermal_data;
 
 /* Marvell EBU Thermal Sensor Dev Structure */
 struct armada_thermal_priv {
 	void __iomem *sensor;
 	void __iomem *control;
-	struct armada_thermal_ops *ops;
+	struct armada_thermal_data *data;
 };
 
-struct armada_thermal_ops {
+struct armada_thermal_data {
 	/* Initialize the sensor */
-	void (*init_sensor)(struct armada_thermal_priv *);
+	void (*init_sensor)(struct platform_device *pdev,
+			    struct armada_thermal_priv *);
 
 	/* Test for a valid sensor value (optional) */
 	bool (*is_valid)(struct armada_thermal_priv *);
+
+	/* Formula coeficients: temp = (b + m * reg) / div */
+	unsigned long coef_b;
+	unsigned long coef_m;
+	unsigned long coef_div;
+	bool inverted;
+
+	/* Register shift and mask to access the sensor temperature */
+	unsigned int temp_shift;
+	unsigned int temp_mask;
+	unsigned int is_valid_shift;
 };
 
-static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
+static void armadaxp_init_sensor(struct platform_device *pdev,
+				 struct armada_thermal_priv *priv)
 {
 	unsigned long reg;
 
@@ -80,7 +100,8 @@ static void armadaxp_init_sensor(struct armada_thermal_priv *priv)
 	writel(reg, priv->sensor);
 }
 
-static void armada370_init_sensor(struct armada_thermal_priv *priv)
+static void armada370_init_sensor(struct platform_device *pdev,
+				  struct armada_thermal_priv *priv)
 {
 	unsigned long reg;
 
@@ -99,11 +120,54 @@ static void armada370_init_sensor(struct armada_thermal_priv *priv)
 	mdelay(10);
 }
 
+static void armada375_init_sensor(struct platform_device *pdev,
+				  struct armada_thermal_priv *priv)
+{
+	unsigned long reg;
+	bool quirk_needed =
+		!!of_device_is_compatible(pdev->dev.of_node,
+					  "marvell,armada375-z1-thermal");
+
+	if (quirk_needed) {
+		/* Ensure these registers have the default (reset) values */
+		writel(A375_Z1_CAL_RESET_LSB, priv->control);
+		writel(A375_Z1_CAL_RESET_MSB, priv->control + 0x4);
+	}
+
+	reg = readl(priv->control + 4);
+	reg &= ~(A375_UNIT_CONTROL_MASK << A375_UNIT_CONTROL_SHIFT);
+	reg &= ~A375_READOUT_INVERT;
+	reg &= ~A375_HW_RESETn;
+
+	if (quirk_needed)
+		reg |= A375_Z1_WORKAROUND_BIT;
+
+	writel(reg, priv->control + 4);
+	mdelay(20);
+
+	reg |= A375_HW_RESETn;
+	writel(reg, priv->control + 4);
+	mdelay(50);
+}
+
+static void armada380_init_sensor(struct platform_device *pdev,
+				  struct armada_thermal_priv *priv)
+{
+	unsigned long reg = readl_relaxed(priv->control);
+
+	/* Reset hardware once */
+	if (!(reg & A380_HW_RESET)) {
+		reg |= A380_HW_RESET;
+		writel(reg, priv->control);
+		mdelay(10);
+	}
+}
+
 static bool armada_is_valid(struct armada_thermal_priv *priv)
 {
 	unsigned long reg = readl_relaxed(priv->sensor);
 
-	return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK;
+	return (reg >> priv->data->is_valid_shift) & THERMAL_VALID_MASK;
 }
 
 static int armada_get_temp(struct thermal_zone_device *thermal,
@@ -111,17 +175,27 @@ static int armada_get_temp(struct thermal_zone_device *thermal,
 {
 	struct armada_thermal_priv *priv = thermal->devdata;
 	unsigned long reg;
+	unsigned long m, b, div;
 
 	/* Valid check */
-	if (priv->ops->is_valid && !priv->ops->is_valid(priv)) {
+	if (priv->data->is_valid && !priv->data->is_valid(priv)) {
 		dev_err(&thermal->device,
 			"Temperature sensor reading not valid\n");
 		return -EIO;
 	}
 
 	reg = readl_relaxed(priv->sensor);
-	reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK;
-	*temp = (3153000000UL - (10000000UL*reg)) / 13825;
+	reg = (reg >> priv->data->temp_shift) & priv->data->temp_mask;
+
+	/* Get formula coeficients */
+	b = priv->data->coef_b;
+	m = priv->data->coef_m;
+	div = priv->data->coef_div;
+
+	if (priv->data->inverted)
+		*temp = ((m * reg) - b) / div;
+	else
+		*temp = (b - (m * reg)) / div;
 	return 0;
 }
 
@@ -129,23 +203,69 @@ static struct thermal_zone_device_ops ops = {
 	.get_temp = armada_get_temp,
 };
 
-static const struct armada_thermal_ops armadaxp_ops = {
+static const struct armada_thermal_data armadaxp_data = {
 	.init_sensor = armadaxp_init_sensor,
+	.temp_shift = 10,
+	.temp_mask = 0x1ff,
+	.coef_b = 3153000000UL,
+	.coef_m = 10000000UL,
+	.coef_div = 13825,
 };
 
-static const struct armada_thermal_ops armada370_ops = {
+static const struct armada_thermal_data armada370_data = {
 	.is_valid = armada_is_valid,
 	.init_sensor = armada370_init_sensor,
+	.is_valid_shift = 9,
+	.temp_shift = 10,
+	.temp_mask = 0x1ff,
+	.coef_b = 3153000000UL,
+	.coef_m = 10000000UL,
+	.coef_div = 13825,
+};
+
+static const struct armada_thermal_data armada375_data = {
+	.is_valid = armada_is_valid,
+	.init_sensor = armada375_init_sensor,
+	.is_valid_shift = 10,
+	.temp_shift = 0,
+	.temp_mask = 0x1ff,
+	.coef_b = 3171900000UL,
+	.coef_m = 10000000UL,
+	.coef_div = 13616,
+};
+
+static const struct armada_thermal_data armada380_data = {
+	.is_valid = armada_is_valid,
+	.init_sensor = armada380_init_sensor,
+	.is_valid_shift = 10,
+	.temp_shift = 0,
+	.temp_mask = 0x3ff,
+	.coef_b = 1169498786UL,
+	.coef_m = 2000000UL,
+	.coef_div = 4289,
+	.inverted = true,
 };
 
 static const struct of_device_id armada_thermal_id_table[] = {
 	{
 		.compatible = "marvell,armadaxp-thermal",
-		.data       = &armadaxp_ops,
+		.data       = &armadaxp_data,
 	},
 	{
 		.compatible = "marvell,armada370-thermal",
-		.data       = &armada370_ops,
+		.data       = &armada370_data,
+	},
+	{
+		.compatible = "marvell,armada375-thermal",
+		.data       = &armada375_data,
+	},
+	{
+		.compatible = "marvell,armada375-z1-thermal",
+		.data       = &armada375_data,
+	},
+	{
+		.compatible = "marvell,armada380-thermal",
+		.data       = &armada380_data,
 	},
 	{
 		/* sentinel */
@@ -178,8 +298,8 @@ static int armada_thermal_probe(struct platform_device *pdev)
 	if (IS_ERR(priv->control))
 		return PTR_ERR(priv->control);
 
-	priv->ops = (struct armada_thermal_ops *)match->data;
-	priv->ops->init_sensor(priv);
+	priv->data = (struct armada_thermal_data *)match->data;
+	priv->data->init_sensor(pdev, priv);
 
 	thermal = thermal_zone_device_register("armada_thermal", 0, 0,
 					       priv, &ops, NULL, 0, 0);
@@ -200,7 +320,17 @@ static int armada_thermal_exit(struct platform_device *pdev)
 		platform_get_drvdata(pdev);
 
 	thermal_zone_device_unregister(armada_thermal);
-	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static int armada_thermal_resume(struct platform_device *pdev)
+{
+	struct thermal_zone_device *thermal =
+		platform_get_drvdata(pdev);
+	struct armada_thermal_priv *priv = thermal->devdata;
+
+	priv->data->init_sensor(pdev, priv);
 
 	return 0;
 }
@@ -208,10 +338,13 @@ static int armada_thermal_exit(struct platform_device *pdev)
 static struct platform_driver armada_thermal_driver = {
 	.probe = armada_thermal_probe,
 	.remove = armada_thermal_exit,
+#ifdef CONFIG_PM
+	.resume = armada_thermal_resume,
+#endif
 	.driver = {
 		.name = "armada_thermal",
 		.owner = THIS_MODULE,
-		.of_match_table = of_match_ptr(armada_thermal_id_table),
+		.of_match_table = armada_thermal_id_table,
 	},
 };
 
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a036e03ae1b3..15a397a6bc59 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -859,56 +859,12 @@ static int hub_hub_status(struct usb_hub *hub,
 	mutex_unlock(&hub->status_mutex);
 	return ret;
 }
-
 static int hub_set_port_link_state(struct usb_hub *hub, int port1,
-			unsigned int link_status)
+				unsigned int link_status)
 {
 	return set_port_feature(hub->hdev,
-			port1 | (link_status << 3),
-			USB_PORT_FEAT_LINK_STATE);
-}
-
-/*
- * If USB 3.0 ports are placed into the Disabled state, they will no longer
- * detect any device connects or disconnects.  This is generally not what the
- * USB core wants, since it expects a disabled port to produce a port status
- * change event when a new device connects.
- *
- * Instead, set the link state to Disabled, wait for the link to settle into
- * that state, clear any change bits, and then put the port into the RxDetect
- * state.
- */
-static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
-{
-	int ret;
-	int total_time;
-	u16 portchange, portstatus;
-
-	if (!hub_is_superspeed(hub->hdev))
-		return -EINVAL;
-
-	ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
-	if (ret)
-		return ret;
-
-	/* Wait for the link to enter the disabled state. */
-	for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
-		ret = hub_port_status(hub, port1, &portstatus, &portchange);
-		if (ret < 0)
-			return ret;
-
-		if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
-				USB_SS_PORT_LS_SS_DISABLED)
-			break;
-		if (total_time >= HUB_DEBOUNCE_TIMEOUT)
-			break;
-		msleep(HUB_DEBOUNCE_STEP);
-	}
-	if (total_time >= HUB_DEBOUNCE_TIMEOUT)
-		dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n",
-				port1, total_time);
-
-	return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
+				port1 | (link_status << 3),
+				USB_PORT_FEAT_LINK_STATE);
 }
 
 static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
@@ -919,13 +875,8 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
 	if (hub->ports[port1 - 1]->child && set_state)
 		usb_set_device_state(hub->ports[port1 - 1]->child,
 				USB_STATE_NOTATTACHED);
-	if (!hub->error) {
-		if (hub_is_superspeed(hub->hdev))
-			ret = hub_usb3_port_disable(hub, port1);
-		else
-			ret = usb_clear_port_feature(hdev, port1,
-					USB_PORT_FEAT_ENABLE);
-	}
+	if (!hub->error && !hub_is_superspeed(hub->hdev))
+		ret = usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_ENABLE);
 	if (ret && ret != -ENODEV)
 		dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n",
 				port1, ret);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 344d5e2f87d7..6de64671261e 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -30,6 +30,13 @@ if USB_XHCI_HCD
 config USB_XHCI_PLATFORM
 	tristate
 
+config USB_XHCI_MVEBU
+	tristate "xHCI support for Marvell Armada 375/38x"
+	select USB_XHCI_PLATFORM
+	---help---
+	  Say 'Y' to enable the support for the xHCI host controller
+	  found in Marvell Armada 375 and 38x ARM SOCs.
+
 config USB_XHCI_HCD_DEBUGGING
 	bool "Debugging for the xHCI host controller"
 	---help---
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 4fb73c156d72..0875b6c0e890 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -17,6 +17,7 @@ xhci-hcd-$(CONFIG_PCI)	+= xhci-pci.o
 
 ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
 	xhci-hcd-y		+= xhci-plat.o
+	xhci-hcd-$(CONFIG_USB_XHCI_MVEBU)	+= xhci-mvebu.o
 endif
 
 obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index efbc588b48c5..4776f6efd1c5 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -24,8 +24,8 @@
 
 #include "ehci.h"
 
-#define rdl(off)	__raw_readl(hcd->regs + (off))
-#define wrl(off, val)	__raw_writel((val), hcd->regs + (off))
+#define rdl(off)	readl_relaxed(hcd->regs + (off))
+#define wrl(off, val)	writel_relaxed((val), hcd->regs + (off))
 
 #define USB_CMD			0x140
 #define USB_MODE		0x1a8
@@ -46,6 +46,8 @@ static const char hcd_name[] = "ehci-orion";
 
 static struct hc_driver __read_mostly ehci_orion_hc_driver;
 
+static u32 usb_save[(USB_IPG - USB_CAUSE) + (USB_PHY_TST_GRP_CTRL - USB_PHY_PWR_CTRL)];
+
 /*
  * Implement Orion USB controller specification guidelines
  */
@@ -287,9 +289,99 @@ static int ehci_orion_drv_remove(struct platform_device *pdev)
 		clk_disable_unprepare(clk);
 		clk_put(clk);
 	}
+
+	return 0;
+}
+
+static int ehci_orion_drv_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+
+	int addr, i;
+
+	for (addr = USB_CAUSE, i = 0; addr <= USB_IPG; addr += 0x4, i++)
+		usb_save[i] = readl_relaxed(hcd->regs + addr);
+
+	for (addr = USB_PHY_PWR_CTRL; addr <= USB_PHY_TST_GRP_CTRL; addr += 0x4, i++)
+		usb_save[i] = readl_relaxed(hcd->regs + addr);
+
+	return 0;
+}
+
+#define MV_USB_CORE_CMD_RESET_BIT           1
+#define MV_USB_CORE_CMD_RESET_MASK          (1 << MV_USB_CORE_CMD_RESET_BIT)
+#define MV_USB_CORE_MODE_OFFSET                 0
+#define MV_USB_CORE_MODE_MASK                   (3 << MV_USB_CORE_MODE_OFFSET)
+#define MV_USB_CORE_MODE_HOST                   (3 << MV_USB_CORE_MODE_OFFSET)
+#define MV_USB_CORE_MODE_DEVICE                 (2 << MV_USB_CORE_MODE_OFFSET)
+#define MV_USB_CORE_CMD_RUN_BIT             0
+#define MV_USB_CORE_CMD_RUN_MASK            (1 << MV_USB_CORE_CMD_RUN_BIT)
+
+static int ehci_orion_drv_resume(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	int addr, regVal, i;
+
+	for (addr = USB_CAUSE, i = 0; addr <= USB_IPG; addr += 0x4, i++)
+		writel_relaxed(usb_save[i], hcd->regs + addr);
+
+	for (addr = USB_PHY_PWR_CTRL; addr <= USB_PHY_TST_GRP_CTRL; addr += 0x4, i++)
+		writel_relaxed(usb_save[i], hcd->regs + addr);
+
+	/* Clear Interrupt Cause and Mask registers */
+	writel_relaxed(0, hcd->regs + 0x310);
+	writel_relaxed(0, hcd->regs + 0x314);
+
+	/* Reset controller */
+	regVal = readl_relaxed(hcd->regs + 0x140);
+	writel_relaxed(regVal | MV_USB_CORE_CMD_RESET_MASK, hcd->regs + 0x140);
+	while (readl_relaxed(hcd->regs + 0x140) & MV_USB_CORE_CMD_RESET_MASK)
+		;
+
+	/* Set Mode register (Stop and Reset USB Core before) */
+	/* Stop the controller */
+	regVal = readl_relaxed(hcd->regs + 0x140);
+	regVal &= ~MV_USB_CORE_CMD_RUN_MASK;
+	writel_relaxed(regVal, hcd->regs + 0x140);
+
+	/* Reset the controller to get default values */
+	regVal = readl_relaxed(hcd->regs + 0x140);
+	regVal |= MV_USB_CORE_CMD_RESET_MASK;
+	writel_relaxed(regVal, hcd->regs + 0x140);
+
+	/* Wait for the controller reset to complete */
+	do {
+		regVal = readl_relaxed(hcd->regs + 0x140);
+	} while (regVal & MV_USB_CORE_CMD_RESET_MASK);
+
+	/* Set USB_MODE register */
+	regVal = MV_USB_CORE_MODE_HOST;
+	writel_relaxed(regVal, hcd->regs + 0x1A8);
+
 	return 0;
 }
 
+static int ehci_orion_drv_shutdown(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	static void __iomem *usb_pwr_ctrl_base;
+	struct clk *clk;
+
+	usb_hcd_platform_shutdown(pdev);
+
+	usb_pwr_ctrl_base = hcd->regs + USB_PHY_PWR_CTRL;
+	BUG_ON(!usb_pwr_ctrl_base);
+	/* Power Down & PLL Power down */
+	writel((readl(usb_pwr_ctrl_base) & ~(BIT(0) | BIT(1))), usb_pwr_ctrl_base);
+
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+	}
+
+}
+
 static const struct of_device_id ehci_orion_dt_ids[] = {
 	{ .compatible = "marvell,orion-ehci", },
 	{},
@@ -299,7 +391,11 @@ MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
 static struct platform_driver ehci_orion_driver = {
 	.probe		= ehci_orion_drv_probe,
 	.remove		= ehci_orion_drv_remove,
-	.shutdown	= usb_hcd_platform_shutdown,
+#ifdef CONFIG_PM
+	.suspend        = ehci_orion_drv_suspend,
+	.resume         = ehci_orion_drv_resume,
+#endif
+	.shutdown	= ehci_orion_drv_shutdown,
 	.driver = {
 		.name	= "orion-ehci",
 		.owner  = THIS_MODULE,
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7cdcfd024744..1a84bcd174ee 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -761,39 +761,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 			break;
 		case USB_PORT_FEAT_LINK_STATE:
 			temp = xhci_readl(xhci, port_array[wIndex]);
-
-			/* Disable port */
-			if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
-				xhci_dbg(xhci, "Disable port %d\n", wIndex);
-				temp = xhci_port_state_to_neutral(temp);
-				/*
-				 * Clear all change bits, so that we get a new
-				 * connection event.
-				 */
-				temp |= PORT_CSC | PORT_PEC | PORT_WRC |
-					PORT_OCC | PORT_RC | PORT_PLC |
-					PORT_CEC;
-				xhci_writel(xhci, temp | PORT_PE,
-					port_array[wIndex]);
-				temp = xhci_readl(xhci, port_array[wIndex]);
-				break;
-			}
-
-			/* Put link in RxDetect (enable port) */
-			if (link_state == USB_SS_PORT_LS_RX_DETECT) {
-				xhci_dbg(xhci, "Enable port %d\n", wIndex);
-				xhci_set_link_state(xhci, port_array, wIndex,
-						link_state);
-				temp = xhci_readl(xhci, port_array[wIndex]);
-				break;
-			}
-
 			/* Software should not attempt to set
-			 * port link state above '3' (U3) and the port
+			 * port link state above '5' (Rx.Detect) and the port
 			 * must be enabled.
 			 */
 			if ((temp & PORT_PE) == 0 ||
-				(link_state > USB_SS_PORT_LS_U3)) {
+				(link_state > USB_SS_PORT_LS_RX_DETECT)) {
 				xhci_warn(xhci, "Cannot set link state.\n");
 				goto error;
 			}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f2e57a1112c9..fbf5b9b2d53a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -995,6 +995,32 @@ fail:
 	return 0;
 }
 
+void mrvl_refresh_ring_seg(struct xhci_segment *seg, unsigned int cycle_state)
+{
+	union xhci_trb *current_trb = seg->trbs;
+	do {
+		if (cycle_state != 0)
+			current_trb->link.control &= ~TRB_CYCLE;
+		else
+			current_trb->link.control |= TRB_CYCLE;
+		if (TRB_TYPE_LINK_LE32(current_trb->link.control))
+			break;
+	} while (current_trb++);
+}
+
+void mrvl_refresh_ring(struct xhci_ring *ring,unsigned int cycle_state)
+{
+	struct xhci_segment *current_seg = ring->first_seg;
+
+	while(current_seg != ring->last_seg) {
+		mrvl_refresh_ring_seg(current_seg, cycle_state);
+		current_seg = current_seg->next;
+	};
+
+	mrvl_refresh_ring_seg(current_seg, cycle_state);
+	xhci_initialize_ring_info(ring, cycle_state);
+}
+
 void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
 		struct usb_device *udev)
 {
@@ -1012,9 +1038,17 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
 	 * configured device has reset, so all control transfers should have
 	 * been completed or cancelled before the reset.
 	 */
+#define MRVL_XHCI_RELEASE_3_0		1
+#if defined(MRVL_XHCI_RELEASE_3_0) && MRVL_XHCI_RELEASE_3_0
+	/*walk around for address dev fail*/
+	mrvl_refresh_ring(ep_ring, 1);
+	ep0_ctx->deq = cpu_to_le64(ep_ring->first_seg->dma |
+			ep_ring->cycle_state);
+#else
 	ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg,
 							ep_ring->enqueue)
 				   | ep_ring->cycle_state);
+#endif
 }
 
 /*
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
new file mode 100644
index 000000000000..a1271edc34cc
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2013 Marvell
+ * Author: Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mbus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "xhci.h"
+
+#define USB3_MAX_WINDOWS	4
+#define USB3_WIN_CTRL(w)	(0x0 + ((w) * 8))
+#define USB3_WIN_BASE(w)	(0x4 + ((w) * 8))
+
+struct xhci_mvebu_priv {
+	void __iomem *base;
+	struct clk *clk;
+};
+
+static void mv_usb3_conf_mbus_windows(void __iomem *base,
+				      const struct mbus_dram_target_info *dram)
+{
+	int win;
+
+	/* Clear all existing windows */
+	for (win = 0; win < USB3_MAX_WINDOWS; win++) {
+		writel(0, base + USB3_WIN_CTRL(win));
+		writel(0, base + USB3_WIN_BASE(win));
+	}
+
+	/* Program each DRAM CS in a seperate window */
+	for (win = 0; win < dram->num_cs; win++) {
+		const struct mbus_dram_window *cs = dram->cs + win;
+
+		writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
+		       (dram->mbus_dram_target_id << 4) | 1,
+		       base + USB3_WIN_CTRL(win));
+
+		writel((cs->base & 0xffff0000), base + USB3_WIN_BASE(win));
+	}
+}
+
+int xhci_mvebu_probe(struct platform_device *pdev)
+{
+	struct resource	*res;
+	struct xhci_mvebu_priv *priv;
+	void __iomem	*base;
+	const struct mbus_dram_target_info *dram;
+	int ret;
+	struct clk *clk;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct xhci_mvebu_priv),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res)
+		return -ENODEV;
+
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (!base)
+		return -ENOMEM;
+
+	clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(clk)) {
+		return PTR_ERR(clk);
+	}
+
+	ret = clk_prepare_enable(clk);
+	if (ret < 0) {
+		return ret;
+	}
+
+	dram = mv_mbus_dram_info();
+	mv_usb3_conf_mbus_windows(base, dram);
+
+	priv->base = base;
+	priv->clk = clk;
+
+	/* Change default value of internal XHCI MAC register - USB3.0 disconnect WA - ERRATA ## TBD */
+	set_bit(7, base + 0x380c);
+
+	ret = common_xhci_plat_probe(pdev, priv);
+	if (ret < 0) {
+		clk_disable_unprepare(clk);
+		return ret;
+	}
+
+	return ret;
+}
+
+int xhci_mvebu_remove(struct platform_device *pdev)
+{
+	struct usb_hcd *hcd = platform_get_drvdata(pdev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct xhci_mvebu_priv *priv = (struct xhci_mvebu_priv *)xhci->priv;
+	struct clk *clk = priv->clk;
+
+	common_xhci_plat_remove(pdev);
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+void xhci_mvebu_resume(struct device *dev)
+{
+	const struct mbus_dram_target_info *dram;
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct xhci_mvebu_priv *priv = (struct xhci_mvebu_priv *)xhci->priv;
+	void __iomem *base = priv->base;
+
+	dram = mv_mbus_dram_info();
+	mv_usb3_conf_mbus_windows(base, dram);
+}
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
new file mode 100644
index 000000000000..d5ca7261c972
--- /dev/null
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013 Marvell
+ *
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __LINUX_XHCI_MVEBU_H
+#define __LINUX_XHCI_MVEBU_H
+
+#ifdef CONFIG_USB_XHCI_MVEBU
+int xhci_mvebu_probe(struct platform_device *pdev);
+int xhci_mvebu_remove(struct platform_device *pdev);
+void xhci_mvebu_resume(struct device *dev);
+#else
+#define xhci_mvebu_probe NULL
+#define xhci_mvebu_remove NULL
+#define xhci_mvebu_resume NULL
+#endif
+#endif /* __LINUX_XHCI_MVEBU_H */
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6e70ce976769..9b5ba6c7819b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,8 +14,12 @@
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
 
 #include "xhci.h"
+#include "xhci-mvebu.h"
 
 static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
 {
@@ -82,7 +86,8 @@ static const struct hc_driver xhci_plat_xhci_driver = {
 	.bus_resume =		xhci_bus_resume,
 };
 
-static int xhci_plat_probe(struct platform_device *pdev)
+int common_xhci_plat_probe(struct platform_device *pdev,
+			   void *priv)
 {
 	const struct hc_driver	*driver;
 	struct xhci_hcd		*xhci;
@@ -104,6 +109,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
 	if (!res)
 		return -ENODEV;
 
+	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
+	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+	if (!pdev->dev.dma_mask)
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	else
+		dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
 	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
 	if (!hcd)
 		return -ENOMEM;
@@ -132,6 +146,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
 	/* USB 2.0 roothub is stored in the platform_device now. */
 	hcd = dev_get_drvdata(&pdev->dev);
 	xhci = hcd_to_xhci(hcd);
+	xhci->priv = priv;
 	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
 			dev_name(&pdev->dev), hcd);
 	if (!xhci->shared_hcd) {
@@ -169,7 +184,7 @@ put_hcd:
 	return ret;
 }
 
-static int xhci_plat_remove(struct platform_device *dev)
+int common_xhci_plat_remove(struct platform_device *dev)
 {
 	struct usb_hcd	*hcd = platform_get_drvdata(dev);
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
@@ -186,11 +201,136 @@ static int xhci_plat_remove(struct platform_device *dev)
 	return 0;
 }
 
+static int default_xhci_plat_probe(struct platform_device *pdev)
+{
+	return common_xhci_plat_probe(pdev, NULL);
+}
+
+static int default_xhci_plat_remove(struct platform_device *pdev)
+{
+	return common_xhci_plat_remove(pdev);
+}
+
+struct xhci_plat_ops {
+	int (*probe)(struct platform_device *);
+	int (*remove)(struct platform_device *);
+	void (*resume)(struct device *);
+};
+
+static struct xhci_plat_ops xhci_plat_default = {
+	.probe = default_xhci_plat_probe,
+	.remove =  default_xhci_plat_remove,
+};
+
+#ifdef CONFIG_OF
+struct xhci_plat_ops xhci_plat_mvebu = {
+	.probe =  xhci_mvebu_probe,
+	.remove =  xhci_mvebu_remove,
+	.resume =  xhci_mvebu_resume,
+};
+
+static const struct of_device_id usb_xhci_of_match[] = {
+	{
+		.compatible = "xhci-platform",
+		.data = (void *) &xhci_plat_default,
+	},
+	{
+		.compatible = "marvell,xhci-armada-375",
+		.data = (void *) &xhci_plat_mvebu,
+	},
+	{
+		.compatible = "marvell,xhci-armada-380",
+		.data = (void *) &xhci_plat_mvebu,
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+#endif
+
+static int xhci_plat_probe(struct platform_device *pdev)
+{
+	const struct xhci_plat_ops *plat_of = &xhci_plat_default;
+
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match =
+			of_match_device(usb_xhci_of_match, &pdev->dev);
+		if (!match)
+			return -ENODEV;
+		plat_of = match->data;
+	}
+
+	if (!plat_of || !plat_of->probe)
+		return  -ENODEV;
+
+	return plat_of->probe(pdev);
+}
+
+static int xhci_plat_remove(struct platform_device *pdev)
+{
+	const struct xhci_plat_ops *plat_of = &xhci_plat_default;
+
+	if (pdev->dev.of_node) {
+		const struct of_device_id *match =
+			of_match_device(usb_xhci_of_match, &pdev->dev);
+		if (!match)
+			return -ENODEV;
+		plat_of = match->data;
+	}
+
+	if (!plat_of || !plat_of->remove)
+		return  -ENODEV;
+
+	return plat_of->remove(pdev);
+}
+
+#ifdef CONFIG_PM
+static int xhci_plat_suspend(struct device *dev)
+{
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+
+	return xhci_suspend(xhci);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+	const struct xhci_plat_ops *plat_of = &xhci_plat_default;
+	struct usb_hcd	*hcd = dev_get_drvdata(dev);
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+
+	if (dev->of_node) {
+		const struct of_device_id *match =
+			of_match_device(usb_xhci_of_match, dev);
+		if (!match)
+			return -ENODEV;
+		plat_of = match->data;
+	}
+
+	if (!plat_of)
+		return -ENODEV;
+
+	if (plat_of->resume)
+		plat_of->resume(dev);
+
+	return xhci_resume(xhci, 0);
+}
+
+static const struct dev_pm_ops xhci_plat_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+};
+#define DEV_PM_OPS	(&xhci_plat_pm_ops)
+#else
+#define DEV_PM_OPS	NULL
+#endif /* CONFIG_PM */
+
 static struct platform_driver usb_xhci_driver = {
-	.probe	= xhci_plat_probe,
-	.remove	= xhci_plat_remove,
+	.probe		= xhci_plat_probe,
+	.remove		= xhci_plat_remove,
+	.shutdown	= xhci_plat_remove,
 	.driver	= {
 		.name = "xhci-hcd",
+		.pm = DEV_PM_OPS,
+		.of_match_table = of_match_ptr(usb_xhci_of_match),
 	},
 };
 MODULE_ALIAS("platform:xhci-hcd");
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bcfb08e41eb6..90a6025a04ec 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1665,6 +1665,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
 			port_id);
 
 	temp = xhci_readl(xhci, port_array[faked_port_index]);
+	if (temp & PORT_CEC) {
+		xhci_dbg(xhci, "port failed to configure its link partner.\n");
+		xhci_test_and_clear_bit(xhci, port_array,
+				faked_port_index, PORT_CEC);
+	}
+
 	if (hcd->state == HC_STATE_SUSPENDED) {
 		xhci_dbg(xhci, "resume root hub\n");
 		usb_hcd_resume_root_hub(hcd);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4b46de842175..901133984cbd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3499,7 +3499,6 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 	struct xhci_virt_device *virt_dev;
-	struct device *dev = hcd->self.controller;
 	unsigned long flags;
 	u32 state;
 	int i, ret;
@@ -3511,7 +3510,7 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
 	 * if no devices remain.
 	 */
 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
-		pm_runtime_put_noidle(dev);
+		pm_runtime_put_noidle(hcd->self.controller);
 #endif
 
 	ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
@@ -3585,7 +3584,6 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
 	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-	struct device *dev = hcd->self.controller;
 	unsigned long flags;
 	int timeleft;
 	int ret;
@@ -3645,7 +3643,7 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 	 * suspend if there is a device attached.
 	 */
 	if (xhci->quirks & XHCI_RESET_ON_RESUME)
-		pm_runtime_get_noresume(dev);
+		pm_runtime_get_noresume(hcd->self.controller);
 #endif
 
 	/* Is this a LS or FS device under a HS hub? */
@@ -4657,7 +4655,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 	struct xhci_hcd		*xhci;
 	struct device		*dev = hcd->self.controller;
 	int			retval;
-	u32			temp;
 
 	/* Accept arbitrarily long scatter-gather lists */
 	hcd->self.sg_tablesize = ~0;
@@ -4685,14 +4682,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 		/* xHCI private pointer was set in xhci_pci_probe for the second
 		 * registered roothub.
 		 */
-		xhci = hcd_to_xhci(hcd);
-		temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
-		if (HCC_64BIT_ADDR(temp)) {
-			xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
-			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
-		} else {
-			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
-		}
 		return 0;
 	}
 
@@ -4731,12 +4720,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
 		goto error;
 	xhci_dbg(xhci, "Reset complete\n");
 
-	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
-	if (HCC_64BIT_ADDR(temp)) {
+	/* Set dma_mask and coherent_dma_mask to 64-bits,
+	 * if xHC supports 64-bit addressing */
+	if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+			!dma_set_mask(dev, DMA_BIT_MASK(64))) {
 		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
-		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
-	} else {
-		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+		dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
 	}
 
 	xhci_dbg(xhci, "Calling HCD init\n");
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 627fcd9388ca..277b9007efc0 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1415,6 +1415,7 @@ struct xhci_hcd {
 	__u32		hcc_params;
 
 	spinlock_t	lock;
+	void           *priv;
 
 	/* packed release number */
 	u8		sbrn;
@@ -1551,6 +1552,9 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
 	return xhci->main_hcd;
 }
 
+int common_xhci_plat_probe(struct platform_device *pdev, void *priv);
+int common_xhci_plat_remove(struct platform_device *dev);
+
 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
 #define XHCI_DEBUG	1
 #else
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4354b9127713..c2f0d24d3100 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -32,6 +32,9 @@
 #include <linux/ratelimit.h>
 #include <linux/uuid.h>
 #include <asm/unaligned.h>
+#include <linux/async_tx.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -251,6 +254,47 @@ void btrfs_csum_final(u32 crc, char *result)
 	put_unaligned_le32(~crc, result);
 }
 
+struct dma_async_tx_descriptor *
+btrfs_csum_data_dma_offload(const u8 *data, u32 *crc, unsigned int len)
+{
+	struct async_submit_ctl submit;
+	struct dma_chan *chan;
+	struct dma_device *device;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t src;
+
+	/* perform basic sanity checks */
+	if (unlikely((unsigned long)data < PAGE_OFFSET)	||
+	    unlikely(high_memory <= ((void *)data)))
+		return NULL;
+
+	len = min(len, (unsigned int)(high_memory - (void *)data));
+
+	/* offload the crc calc for page size */
+	if (len != PAGE_SIZE)
+		return NULL;
+
+	init_async_submit(&submit, 0, NULL, NULL, NULL, NULL);
+
+	chan = async_tx_find_channel(&submit, DMA_CRC32C, NULL, 0, NULL, 0, 0);
+	if (!chan)
+		return NULL;
+
+	device = chan->device;
+	src = dma_map_single(device->dev, (void *)data, len, DMA_TO_DEVICE);
+
+	tx = device->device_prep_dma_crc32c(chan, src, len, crc,
+					    DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT);
+	if (unlikely(!tx)) {
+		/* HW is unable to handle this context */
+		dma_unmap_page(device->dev, src, len, DMA_TO_DEVICE);
+		return NULL;
+	}
+
+	async_tx_submit(chan, tx, &submit);
+
+	return tx;
+}
 /*
  * compute the csum for a btree block, and either verify it or write it
  * into the csum field of the block.
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index be69ce1b07a2..8eb1cbeaad94 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -77,6 +77,8 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(char *data, u32 seed, size_t len);
+struct dma_async_tx_descriptor *
+btrfs_csum_data_dma_offload(const u8 *data, u32 *crc, unsigned int len);
 void btrfs_csum_final(u32 crc, char *result);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
 			int metadata);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b193bf324a41..d73f7ac610f7 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -20,6 +20,8 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/highmem.h>
+#include <linux/dmaengine.h>
+#include <linux/async_tx.h>
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -413,19 +415,25 @@ fail:
 	return ret;
 }
 
+struct sum_offload {
+	u32				*sum;	/* ptr to sum	*/
+	struct dma_async_tx_descriptor	*tx;	/* tx desc	*/
+	char				*data;	/* bvec kmapped	*/
+};
+
 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 		       struct bio *bio, u64 file_start, int contig)
 {
 	struct btrfs_ordered_sum *sums;
 	struct btrfs_sector_sum *sector_sum;
 	struct btrfs_ordered_extent *ordered;
-	char *data;
 	struct bio_vec *bvec = bio->bi_io_vec;
-	int bio_index = 0;
+	int bio_index = 0, bio_idx2;
 	unsigned long total_bytes = 0;
 	unsigned long this_sum_bytes = 0;
 	u64 offset;
 	u64 disk_bytenr;
+	struct sum_offload *sum_off;
 
 	WARN_ON(bio->bi_vcnt <= 0);
 	sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
@@ -446,6 +454,9 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 	BUG_ON(!ordered); /* Logic error */
 	sums->bytenr = ordered->start;
 
+	sum_off = kzalloc(bio->bi_vcnt * (sizeof(struct sum_offload)), GFP_KERNEL);
+	BUG_ON(!sum_off);
+
 	while (bio_index < bio->bi_vcnt) {
 		if (!contig)
 			offset = page_offset(bvec->bv_page) + bvec->bv_offset;
@@ -455,6 +466,17 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 			unsigned long bytes_left;
 			sums->len = this_sum_bytes;
 			this_sum_bytes = 0;
+			bio_idx2 = 0;
+
+			while (bio_idx2 < bio_index) {
+				if (sum_off[bio_idx2].tx) {
+					async_tx_quiesce(&sum_off[bio_idx2].tx);
+					kunmap_atomic(sum_off[bio_idx2].data);
+					btrfs_csum_final(*sum_off[bio_idx2].sum,
+							(char *)sum_off[bio_idx2].sum);
+				}
+				bio_idx2++;
+			}
 			btrfs_add_ordered_sum(inode, ordered, sums);
 			btrfs_put_ordered_extent(ordered);
 
@@ -470,14 +492,22 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 			sums->bytenr = ordered->start;
 		}
 
-		data = kmap_atomic(bvec->bv_page);
+		sum_off[bio_index].data = kmap_atomic(bvec->bv_page);
 		sector_sum->sum = ~(u32)0;
-		sector_sum->sum = btrfs_csum_data(data + bvec->bv_offset,
+		sum_off[bio_index].tx =
+			btrfs_csum_data_dma_offload(sum_off[bio_index].data + bvec->bv_offset,
+						&sector_sum->sum,
+						bvec->bv_len);
+
+		if (sum_off[bio_index].tx)
+			sum_off[bio_index].sum = &sector_sum->sum;
+		else {
+			sector_sum->sum = btrfs_csum_data(sum_off[bio_index].data + bvec->bv_offset,
 						  sector_sum->sum,
 						  bvec->bv_len);
-		kunmap_atomic(data);
-		btrfs_csum_final(sector_sum->sum,
-				 (char *)&sector_sum->sum);
+			kunmap_atomic(sum_off[bio_index].data);
+			btrfs_csum_final(sector_sum->sum, (char *)&sector_sum->sum);
+		}
 		sector_sum->bytenr = disk_bytenr;
 
 		sector_sum++;
@@ -488,6 +518,18 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
 		offset += bvec->bv_len;
 		bvec++;
 	}
+
+	bio_idx2 = 0;
+	while (bio_idx2 < bio->bi_vcnt) {
+		if (sum_off[bio_idx2].tx) {
+			async_tx_quiesce(&sum_off[bio_idx2].tx);
+			kunmap_atomic(sum_off[bio_idx2].data);
+			btrfs_csum_final(*sum_off[bio_idx2].sum, (char *)sum_off[bio_idx2].sum);
+		}
+		bio_idx2++;
+	}
+
+	kfree(sum_off);
 	this_sum_bytes = 0;
 	btrfs_add_ordered_sum(inode, ordered, sums);
 	btrfs_put_ordered_extent(ordered);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
old mode 100644
new mode 100755
index e53009657f0e..bb2e30e00ae0
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1133,7 +1133,7 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
 	u64 bitmap_start;
 	u64 bytes_per_bitmap;
 
-	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
+	bytes_per_bitmap = ((u64)BITS_PER_BITMAP) * ctl->unit;
 	bitmap_start = offset - ctl->start;
 	bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
 	bitmap_start *= bytes_per_bitmap;
@@ -1290,7 +1290,7 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
 			    prev->offset + prev->bytes > offset)
 				return prev;
 		}
-		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
+		if (entry->offset + ((u64)BITS_PER_BITMAP) * ctl->unit > offset)
 			return entry;
 	} else if (entry->offset + entry->bytes > offset)
 		return entry;
@@ -1300,7 +1300,7 @@ tree_search_offset(struct btrfs_free_space_ctl *ctl,
 
 	while (1) {
 		if (entry->bitmap) {
-			if (entry->offset + BITS_PER_BITMAP *
+			if (entry->offset + ((u64)BITS_PER_BITMAP) *
 			    ctl->unit > offset)
 				break;
 		} else {
@@ -1354,7 +1354,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
 	u64 bitmap_bytes;
 	u64 extent_bytes;
 	u64 size = block_group->key.offset;
-	u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
+	u64 bytes_per_bg = ((u64)BITS_PER_BITMAP) * ctl->unit;
 	int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
 
 	max_bitmaps = max(max_bitmaps, 1);
@@ -1553,7 +1553,7 @@ static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
 	int ret;
 
 again:
-	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
+	end = bitmap_info->offset + (((u64)BITS_PER_BITMAP) * ctl->unit) - 1;
 
 	/*
 	 * We need to search for bits in this bitmap.  We could only cover some
@@ -1627,7 +1627,7 @@ static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
 	u64 bytes_to_set = 0;
 	u64 end;
 
-	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
+	end = info->offset + (((u64)BITS_PER_BITMAP) * ctl->unit);
 
 	bytes_to_set = min(end - offset, bytes);
 
@@ -1670,7 +1670,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
 	 * so allow those block groups to still be allowed to have a bitmap
 	 * entry.
 	 */
-	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
+	if (((((u64)BITS_PER_BITMAP) * ctl->unit) >> 1) > block_group->key.offset)
 		return false;
 
 	return true;
@@ -2784,11 +2784,11 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
 			break;
 next:
 		if (next_bitmap) {
-			offset += BITS_PER_BITMAP * ctl->unit;
+			offset += ((u64)BITS_PER_BITMAP) * ctl->unit;
 		} else {
 			start += bytes;
-			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
-				offset += BITS_PER_BITMAP * ctl->unit;
+			if (start >= offset + ((u64)BITS_PER_BITMAP) * ctl->unit)
+				offset += ((u64)BITS_PER_BITMAP) * ctl->unit;
 		}
 
 		if (fatal_signal_pending(current)) {
@@ -3265,7 +3265,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
 	 * The first bitmap we have starts at offset 0 so the next one is just
 	 * at the end of the first bitmap.
 	 */
-	next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+	next_bitmap_offset = ((u64)BITS_PER_BITMAP) * 4096;
 
 	/* Test a bit straddling two bitmaps */
 	ret = add_free_space_entry(cache, next_bitmap_offset -
@@ -3297,7 +3297,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
 /* This is the high grade jackassery */
 static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
 {
-	u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
+	u64 bitmap_offset = ((u64)BITS_PER_BITMAP) * 4096;
 	int ret;
 
 	printk(KERN_ERR "Running bitmap and extent tests\n");
diff --git a/fs/buffer.c b/fs/buffer.c
index 75964d734444..860b02a2989d 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -985,7 +985,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
 		bh = page_buffers(page);
 		if (bh->b_size == size) {
 			end_block = init_page_buffers(page, bdev,
-						index << sizebits, size);
+				(sector_t)((sector_t)index <<
+					(sector_t)sizebits), size);
 			goto done;
 		}
 		if (!try_to_free_buffers(page))
@@ -1006,7 +1007,9 @@ grow_dev_page(struct block_device *bdev, sector_t block,
 	 */
 	spin_lock(&inode->i_mapping->private_lock);
 	link_dev_buffers(page, bh);
-	end_block = init_page_buffers(page, bdev, index << sizebits, size);
+	end_block = init_page_buffers(page, bdev,
+			(sector_t)((sector_t)index << (sector_t)sizebits),
+			size);
 	spin_unlock(&inode->i_mapping->private_lock);
 done:
 	ret = (block < end_block) ? 1 : -ENXIO;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8b0c656f2ab2..d9f0cedb34b9 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3173,7 +3173,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
 	/* determine the eof that the server (probably) has */
 	eof = CIFS_I(rdata->mapping->host)->server_eof;
 	eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
-	cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
+	cifs_dbg(FYI, "eof=%llu eof_index=%llu\n", eof, eof_index);
 
 	rdata->tailsz = PAGE_CACHE_SIZE;
 	for (i = 0; i < nr_pages; i++) {
@@ -3183,14 +3183,14 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
 			/* enough data to fill the page */
 			iov.iov_base = kmap(page);
 			iov.iov_len = PAGE_CACHE_SIZE;
-			cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
+			cifs_dbg(FYI, "%u: idx=%llu iov_base=%p iov_len=%zu\n",
 				 i, page->index, iov.iov_base, iov.iov_len);
 			len -= PAGE_CACHE_SIZE;
 		} else if (len > 0) {
 			/* enough for partial page, fill and zero the rest */
 			iov.iov_base = kmap(page);
 			iov.iov_len = len;
-			cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n",
+			cifs_dbg(FYI, "%u: idx=%llu iov_base=%p iov_len=%zu\n",
 				 i, page->index, iov.iov_base, iov.iov_len);
 			memset(iov.iov_base + len,
 				'\0', PAGE_CACHE_SIZE - len);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 4237722bfd27..0c5c9cd1faaa 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -180,7 +180,7 @@ Einumber:
 bad_entry:
 	if (!quiet)
 		ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
-			"offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+			"offset=%llu, inode=%lu, rec_len=%d, name_len=%d",
 			dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
 			(unsigned long) le32_to_cpu(p->inode),
 			rec_len, p->name_len);
@@ -190,7 +190,7 @@ Eend:
 		p = (ext2_dirent *)(kaddr + offs);
 		ext2_error(sb, "ext2_check_page",
 			"entry in directory #%lu spans the page boundary"
-			"offset=%lu, inode=%lu",
+			"offset=%llu, inode=%lu",
 			dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
 			(unsigned long) le32_to_cpu(p->inode));
 	}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4635788e14bf..4133cd21d277 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -639,6 +639,7 @@ const struct file_operations ext4_file_operations = {
 	.fsync		= ext4_sync_file,
 	.splice_read	= generic_file_splice_read,
 	.splice_write	= generic_file_splice_write,
+	.splice_from_socket = generic_splice_from_socket,
 	.fallocate	= ext4_fallocate,
 };
 
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
old mode 100644
new mode 100755
index fba960ee26de..eca29aa860cd
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -27,6 +27,9 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <trace/events/ext4.h>
+#ifdef ALPHA_CUSTOMIZE		//alpha.Hwalock++ 2015/08/02 
+ #include <linux/random.h>
+#endif
 
 #ifdef CONFIG_EXT4_DEBUG
 ushort ext4_mballoc_debug __read_mostly;
@@ -2096,7 +2099,14 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 	 * cr == 3  try to get anything
 	 */
 repeat:
+#ifdef ALPHA_CUSTOMIZE		//alpha.Hwalock++ 2015/08/02 
+	#define MBALLOC_RANDOM_THRES	1024
+	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+		ext4_group_t random_interval;
+		random_interval = ngroups / ( MBALLOC_RANDOM_THRES / 2 );
+#else
 	for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
+#endif
 		ac->ac_criteria = cr;
 		/*
 		 * searching for the right group start
@@ -2105,13 +2115,33 @@ repeat:
 		group = ac->ac_g_ex.fe_group;
 
 		for (i = 0; i < ngroups; group++, i++) {
+#ifdef ALPHA_CUSTOMIZE		//alpha.Hwalock++ 2015/08/02 
+			if ( 0 == cr ) { // only do it on cr==0 for safety
+				if ( i >= MBALLOC_RANDOM_THRES &&
+						ngroups > 2 * MBALLOC_RANDOM_THRES ) {
+					ext4_group_t step;
+					step = get_random_int() % random_interval;
+					if ( 2 > step ) {
+						step = 0;
+					} else {
+						step -= 2;
+					}
+					group += step;
+					i += step;
+				}	
+			}
+
+			if ( group >= ngroups ) {
+				group -= ngroups;
+			}
+#else
 			/*
 			 * Artificially restricted ngroups for non-extent
 			 * files makes group > ngroups possible on first loop.
 			 */
 			if (group >= ngroups)
 				group = 0;
-
+#endif
 			/* This now checks without needing the buddy page */
 			if (!ext4_mb_good_group(ac, group, cr))
 				continue;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 23bf1a52a5da..b4d79dfa2d45 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -785,7 +785,7 @@ static int fuse_check_page(struct page *page)
 	       1 << PG_active |
 	       1 << PG_reclaim))) {
 		printk(KERN_WARNING "fuse: trying to steal weird page\n");
-		printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
+		printk(KERN_WARNING "  page=%p index=%lli flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
 		return 1;
 	}
 	return 0;
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 74825be65b7b..fbb9dfb7b1d2 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
 		spin_unlock(&inode->i_lock);
 
 		/* In case the dropping of a reference would nuke next_i. */
-		if ((&next_i->i_sb_list != list) &&
-		    atomic_read(&next_i->i_count)) {
+		while (&next_i->i_sb_list != list) {
 			spin_lock(&next_i->i_lock);
-			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
+						atomic_read(&next_i->i_count)) {
 				__iget(next_i);
 				need_iput = next_i;
+				spin_unlock(&next_i->i_lock);
+				break;
 			}
 			spin_unlock(&next_i->i_lock);
+			next_i = list_entry(next_i->i_sb_list.next,
+						struct inode, i_sb_list);
 		}
 
 		/*
-		 * We can safely drop inode_sb_list_lock here because we hold
-		 * references on both inode and next_i.  Also no new inodes
-		 * will be added since the umount has begun.
+		 * We can safely drop inode_sb_list_lock here because either
+		 * we actually hold references on both inode and next_i or
+		 * end of list.  Also no new inodes will be added since the
+		 * umount has begun.
 		 */
 		spin_unlock(&inode_sb_list_lock);
 
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index fc6b49bf7360..5d70e8a1abfb 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -299,16 +299,22 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
 					 unsigned int flags)
 {
 	struct fsnotify_mark *lmark, *mark;
-
+	LIST_HEAD(free_list);
+	
 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
 		if (mark->flags & flags) {
+		    list_add(&mark->free_g_list, &free_list);
+		    list_del_init(&mark->g_list);
 			fsnotify_get_mark(mark);
-			fsnotify_destroy_mark_locked(mark, group);
-			fsnotify_put_mark(mark);
 		}
 	}
 	mutex_unlock(&group->mark_mutex);
+	
+	list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
+	    fsnotify_destroy_mark(mark, group);
+	    fsnotify_put_mark(mark);
+	}
 }
 
 /*
diff --git a/fs/read_write.c b/fs/read_write.c
index f6b7c600eb7f..3560ab2893c1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1100,6 +1100,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
 	if (!(out.file->f_mode & FMODE_WRITE))
 		goto fput_out;
 	retval = -EINVAL;
+	if (!out.file->f_op || !out.file->f_op->sendpage)
+		goto fput_out;
 	in_inode = file_inode(in.file);
 	out_inode = file_inode(out.file);
 	out_pos = out.file->f_pos;
diff --git a/fs/splice.c b/fs/splice.c
index 4b5a5fac3383..1a07c1328500 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -19,6 +19,7 @@
  */
 #include <linux/fs.h>
 #include <linux/file.h>
+#include <linux/fsnotify.h>
 #include <linux/pagemap.h>
 #include <linux/splice.h>
 #include <linux/memcontrol.h>
@@ -32,6 +33,8 @@
 #include <linux/gfp.h>
 #include <linux/socket.h>
 #include <linux/compat.h>
+#include <linux/net.h>
+#include <net/sock.h>
 #include "internal.h"
 
 /*
@@ -536,10 +539,8 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
 		len = left;
 
 	ret = __generic_file_splice_read(in, ppos, pipe, len, flags);
-	if (ret > 0) {
+	if (ret > 0)
 		*ppos += ret;
-		file_accessed(in);
-	}
 
 	return ret;
 }
@@ -711,18 +712,19 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe,
 {
 	struct file *file = sd->u.file;
 	loff_t pos = sd->pos;
-	int more;
+	int ret, more;
 
-	if (!likely(file->f_op && file->f_op->sendpage))
-		return -EINVAL;
-
-	more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+	ret = buf->ops->confirm(pipe, buf);
+	if (!ret) {
+		more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0;
+		if (sd->len < sd->total_len && pipe->nrbufs > 1)
+			more |= MSG_SENDPAGE_NOTLAST;
 
-	if (sd->len < sd->total_len && pipe->nrbufs > 1)
-		more |= MSG_SENDPAGE_NOTLAST;
+		ret = file->f_op->sendpage(file, buf->page, buf->offset,
+					   sd->len, &pos, more);
+	}
 
-	return file->f_op->sendpage(file, buf->page, buf->offset,
-				    sd->len, &pos, more);
+	return ret;
 }
 
 /*
@@ -1029,12 +1031,9 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
 
 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
 		ret = file_remove_suid(out);
-		if (!ret) {
-			ret = file_update_time(out);
-			if (!ret)
-				ret = splice_from_pipe_feed(pipe, &sd,
-							    pipe_to_file);
-		}
+		if (!ret)
+			ret = splice_from_pipe_feed(pipe, &sd,
+						    pipe_to_file);
 		mutex_unlock(&inode->i_mutex);
 	} while (ret > 0);
 	splice_from_pipe_end(pipe, &sd);
@@ -1415,6 +1414,229 @@ static long do_splice(struct file *in, loff_t __user *off_in,
 	return -EINVAL;
 }
 
+ssize_t generic_splice_from_socket(struct file *file, struct socket *sock,
+				   loff_t __user *ppos, size_t count_req)
+{
+	struct address_space *mapping = file->f_mapping;
+	const struct address_space_operations *a_ops = mapping->a_ops;
+	struct inode *inode = mapping->host;
+	loff_t pos;
+	int err = 0, remaining;
+	struct kvec iov;
+	struct msghdr msg = { 0 };
+	size_t written = 0, verified_sz;
+
+	if (copy_from_user(&pos, ppos, sizeof(loff_t)))
+		return -EFAULT;
+
+	err = rw_verify_area(WRITE, file, &file->f_pos, count_req);
+	if (err < 0) {
+		pr_debug("%s: rw_verify_area, err %d\n", __func__, err);
+		return err;
+	}
+
+	verified_sz = err;
+
+	file_start_write(file);
+
+	mutex_lock(&inode->i_mutex);
+
+	/* We can write back this queue in page reclaim */
+	current->backing_dev_info = mapping->backing_dev_info;
+
+	err = generic_write_checks(file, &pos, &verified_sz, S_ISBLK(inode->i_mode));
+	if (err) {
+		pr_debug("%s: generic_write_checks err %d\n", __func__, err);
+		goto cleanup;
+	}
+
+	if (verified_sz == 0) {
+		pr_debug("%s: generic_write_checks err, verified_sz %d\n", __func__, verified_sz);
+		goto cleanup;
+	}
+
+	err = file_remove_suid(file);
+	if (err) {
+		pr_debug("%s: file_remove_suid, err %d\n", __func__, err);
+		goto cleanup;
+	}
+
+	err = file_update_time(file);
+	if (err) {
+		pr_debug("%s: file_update_time, err %d\n", __func__, err);
+		goto cleanup;
+	}
+
+	remaining = verified_sz;
+
+	while (remaining > 0) {
+		unsigned long offset;	/* Offset into pagecache page */
+		unsigned long bytes;	/* Bytes to write to page */
+		int copied;		/* Bytes copied from net */
+		struct page *page;
+		void *fsdata;
+		long rcvtimeo;
+		char *paddr;
+
+		offset = (pos & (PAGE_CACHE_SIZE - 1));
+		bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, remaining);
+
+		err = a_ops->write_begin(file, mapping, pos, bytes,
+					 AOP_FLAG_UNINTERRUPTIBLE,
+					 &page, &fsdata);
+		if (unlikely(err)) {
+			pr_debug("%s: write_begin err %d\n", __func__, err);
+			break;
+		}
+
+		if (mapping_writably_mapped(mapping))
+			flush_dcache_page(page);
+
+		/* save page address for partial recvmsg case */
+		paddr = kmap(page) + offset;
+		iov.iov_base = paddr;
+		iov.iov_len = bytes;
+
+		rcvtimeo = sock->sk->sk_rcvtimeo;
+		sock->sk->sk_rcvtimeo = 5 * HZ;
+
+		/* IOV is ready, receive the data from socket now */
+		copied = kernel_recvmsg(sock, &msg, &iov, 1, bytes, MSG_WAITALL);
+
+		sock->sk->sk_rcvtimeo = rcvtimeo;
+
+		/* kernel_recvmsg returned an error or no data */
+		if (unlikely(copied <= 0)) {
+			kunmap(page);
+
+			/* update error and quit */
+			err = copied;
+
+			pr_debug("%s: kernel_recvmsg err %d, bytes %ld, written %zu, remaining %d\n",
+				 __func__, err, bytes, written, remaining);
+
+			/* release pagecache */
+			a_ops->write_end(file, mapping, pos,
+					 bytes, 0, page, fsdata);
+			break;
+		}
+
+		if (unlikely(copied != bytes)) {
+			char *kaddr;
+			char *buff;
+
+			/* recvmsg failed to write the requested bytes, this can happen from
+			 * NEED_RESCHED signal or socket timeout. Partial writes are not allowed
+			 * so we write the recvmsg portion and finish splice, this will force the
+			 * caller to redo the remaining.
+			 */
+
+			pr_debug("%s: do partial bytes %ld copied %d  offset %lu pos %llu\n",
+				 __func__, bytes, copied, offset, pos);
+
+			/* alloc buffer for recvmsg data */
+			buff = kmalloc(copied, GFP_KERNEL);
+			if (unlikely(!buff)) {
+				err = -ENOMEM;
+				break;
+			}
+			/* copy recvmsg bytes to buffer */
+			memcpy(buff, paddr, copied);
+
+			/* and free the partial page */
+			kunmap(page);
+			err = a_ops->write_end(file, mapping, pos,
+					       bytes, 0, page, fsdata);
+			if (unlikely(err < 0)) {
+				kfree(buff);
+				pr_debug("%s: write_end of partial recvmsg, err %d\n", __func__, err);
+				break;
+			}
+
+			/* allocate a new page with recvmsg size */
+			err = a_ops->write_begin(file, mapping, pos, copied,
+						 AOP_FLAG_UNINTERRUPTIBLE,
+						 &page, &fsdata);
+			if (unlikely(err)) {
+				kfree(buff);
+				pr_debug("%s: write_begin of copied recvmsg, err %d\n", __func__, err);
+				break;
+			}
+
+			if (mapping_writably_mapped(mapping))
+				flush_dcache_page(page);
+
+			/* copy the buffer to new page */
+			kaddr = kmap_atomic(page) + offset;
+			memcpy(kaddr, buff, copied);
+
+			kfree(buff);
+			kunmap_atomic(kaddr);
+
+			/* and write it */
+			mark_page_accessed(page);
+			err = a_ops->write_end(file, mapping, pos,
+					       copied, copied, page, fsdata);
+			if (unlikely(err < 0)) {
+				pr_debug("%s: write_end recvmsg, err %d\n", __func__, err);
+				break;
+			}
+
+			BUG_ON(copied != err);
+
+			/* update written counters */
+			pos += copied;
+			written += copied;
+
+			break;
+		} else {
+			kunmap(page);
+
+			/* page written w/o recvmsg error */
+			mark_page_accessed(page);
+			err = a_ops->write_end(file, mapping, pos,
+					       bytes, copied, page, fsdata);
+
+			if (unlikely(err < 0)) {
+				pr_debug("%s: write_end, err %d\n", __func__, err);
+				break;
+			}
+
+			BUG_ON(copied != err);
+		}
+		/* write success, update counters */
+		remaining -= copied;
+		pos += copied;
+		written += copied;
+	}
+
+	if (written > 0)
+		balance_dirty_pages_ratelimited(mapping);
+
+cleanup:
+	current->backing_dev_info = NULL;
+
+	mutex_unlock(&inode->i_mutex);
+
+	if (written > 0) {
+		err = generic_write_sync(file, pos - written, written);
+		if (err < 0) {
+			written = 0;
+			goto done;
+		}
+		fsnotify_modify(file);
+
+		if (copy_to_user(ppos, &pos, sizeof(loff_t))) {
+			written = 0;
+			err = -EFAULT;
+		}
+	}
+done:
+	file_end_write(file);
+
+	return written ? written : err;
+}
+
 /*
  * Map an iov into an array of pages and offset/length tupples. With the
  * partial_page structure, we can map several non-contiguous ranges into
@@ -1741,12 +1963,33 @@ SYSCALL_DEFINE6(splice, int, fd_in, loff_t __user *, off_in,
 		size_t, len, unsigned int, flags)
 {
 	struct fd in, out;
-	long error;
+	int error;
+	struct socket *sock = NULL;
 
 	if (unlikely(!len))
 		return 0;
 
 	error = -EBADF;
+	/* check if fd_in is a socket */
+	sock = sockfd_lookup(fd_in, &error);
+	if (sock) {
+		if (!sock->sk)
+			goto nosock;
+		out = fdget(fd_out);
+		if (out.file) {
+			if (!(out.file->f_mode & FMODE_WRITE))
+				goto done;
+			if (!out.file->f_op->splice_from_socket)
+				goto done;
+			error = out.file->f_op->splice_from_socket(out.file, sock, off_out, len);
+		}
+done:
+		fdput(out);
+nosock:
+		fput(sock->file);
+		return error;
+	}
+
 	in = fdget(fd_in);
 	if (in.file) {
 		if (in.file->f_mode & FMODE_READ) {
@@ -2094,3 +2337,4 @@ SYSCALL_DEFINE4(tee, int, fdin, int, fdout, size_t, len, unsigned int, flags)
 
 	return error;
 }
+
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 8ca62c28fe12..920cc3b08828 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -385,7 +385,7 @@ static int squashfs_readpage(struct file *file, struct page *page)
 	int end_index = start_index | mask;
 	int file_end = i_size_read(inode) >> msblk->block_log;
 
-	TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n",
+	TRACE("Entered squashfs_readpage, page index %llx, start block %llx\n",
 				page->index, squashfs_i(inode)->start);
 
 	if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 12806dffb345..7e3fddfa8e55 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -56,7 +56,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
 	void *pageaddr;
 	struct squashfs_cache_entry *entry;
 
-	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
+	TRACE("Entered squashfs_symlink_readpage, page index %lld, start block "
 			"%llx, offset %x\n", page->index, block, offset);
 
 	/*
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 7f60e900edff..0115651e285a 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -267,8 +267,8 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
 	       (unsigned long long)ui->ui_size);
 	pr_err("\tflags          %d\n", ui->flags);
 	pr_err("\tcompr_type     %d\n", ui->compr_type);
-	pr_err("\tlast_page_read %lu\n", ui->last_page_read);
-	pr_err("\tread_in_a_row  %lu\n", ui->read_in_a_row);
+	pr_err("\tlast_page_read %llu\n", ui->last_page_read);
+	pr_err("\tread_in_a_row  %llu\n", ui->read_in_a_row);
 	pr_err("\tdata_len       %d\n", ui->data_len);
 
 	if (!S_ISDIR(inode->i_mode))
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 14374530784c..aceaee2e92fe 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -111,7 +111,7 @@ static int do_readpage(struct page *page)
 	struct inode *inode = page->mapping->host;
 	loff_t i_size = i_size_read(inode);
 
-	dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
+	dbg_gen("ino %lu, pg %llu, i_size %lld, flags %#lx",
 		inode->i_ino, page->index, i_size, page->flags);
 	ubifs_assert(!PageChecked(page));
 	ubifs_assert(!PagePrivate(page));
@@ -167,7 +167,7 @@ static int do_readpage(struct page *page)
 			dbg_gen("hole");
 			goto out_free;
 		}
-		ubifs_err("cannot read page %lu of inode %lu, error %d",
+		ubifs_err("cannot read page %llu of inode %lu, error %d",
 			  page->index, inode->i_ino, err);
 		goto error;
 	}
@@ -546,7 +546,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
 	loff_t end_pos = pos + len;
 	int appending = !!(end_pos > inode->i_size);
 
-	dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld",
+	dbg_gen("ino %lu, pos %llu, pg %llu, len %u, copied %d, i_size %lld",
 		inode->i_ino, pos, page->index, len, copied, inode->i_size);
 
 	if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) {
@@ -616,7 +616,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
 	void *addr, *zaddr;
 	pgoff_t end_index;
 
-	dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
+	dbg_gen("ino %lu, pg %llu, i_size %lld, flags %#lx",
 		inode->i_ino, page->index, i_size, page->flags);
 
 	addr = zaddr = kmap(page);
@@ -928,7 +928,7 @@ static int do_writepage(struct page *page, int len)
 	}
 	if (err) {
 		SetPageError(page);
-		ubifs_err("cannot write page %lu of inode %lu, error %d",
+		ubifs_err("cannot write page %llu of inode %lu, error %d",
 			  page->index, inode->i_ino, err);
 		ubifs_ro_mode(c, err);
 	}
@@ -1004,7 +1004,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
 	int err, len = i_size & (PAGE_CACHE_SIZE - 1);
 	void *kaddr;
 
-	dbg_gen("ino %lu, pg %lu, pg flags %#lx",
+	dbg_gen("ino %lu, pg %llu, pg flags %#lx",
 		inode->i_ino, page->index, page->flags);
 	ubifs_assert(PagePrivate(page));
 
@@ -1451,7 +1451,7 @@ static int ubifs_vm_page_mkwrite(struct vm_area_struct *vma,
 	struct ubifs_budget_req req = { .new_page = 1 };
 	int err, update_time;
 
-	dbg_gen("ino %lu, pg %lu, i_size %lld",	inode->i_ino, page->index,
+	dbg_gen("ino %lu, pg %llu, i_size %lld",	inode->i_ino, page->index,
 		i_size_read(inode));
 	ubifs_assert(!c->ro_media && !c->ro_mount);
 
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 96d3e4ab11a9..78fc35823739 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,6 +74,7 @@ enum dma_transaction_type {
 	DMA_SLAVE,
 	DMA_CYCLIC,
 	DMA_INTERLEAVE,
+	DMA_CRC32C,
 /* last transaction type for creation of the capabilities mask */
 	DMA_TX_TYPE_END,
 };
@@ -583,6 +584,9 @@ struct dma_device {
 		struct scatterlist *dst_sg, unsigned int dst_nents,
 		struct scatterlist *src_sg, unsigned int src_nents,
 		unsigned long flags);
+	struct dma_async_tx_descriptor *(*device_prep_dma_crc32c)(
+		struct dma_chan *chan, dma_addr_t src,
+		size_t len, u32 *seed, unsigned long flags);
 
 	struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
 		struct dma_chan *chan, struct scatterlist *sgl,
@@ -1035,6 +1039,11 @@ struct dma_pinned_list {
 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
 
+#ifdef CONFIG_SPLICE_NET_DMA_SUPPORT
+struct dma_pinned_list *dma_pin_kernel_iovec_pages(struct iovec *iov, size_t len);
+void dma_unpin_kernel_iovec_pages(struct dma_pinned_list* pinned_list);
+#endif
+
 dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
 	struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
 dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 65c2be22b601..f24474b4f63e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -45,6 +45,7 @@ struct vfsmount;
 struct cred;
 struct swap_info_struct;
 struct seq_file;
+struct socket;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -1539,6 +1540,8 @@ struct file_operations {
 	int (*flock) (struct file *, int, struct file_lock *);
 	ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
 	ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
+	ssize_t (*splice_from_socket)(struct file *file, struct socket *sock,
+				     loff_t __user *ppos, size_t count);
 	int (*setlease)(struct file *, long, struct file_lock **);
 	long (*fallocate)(struct file *file, int mode, loff_t offset,
 			  loff_t len);
@@ -2414,6 +2417,8 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
 		struct file *, loff_t *, size_t, unsigned int);
 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
 		struct file *out, loff_t *, size_t len, unsigned int flags);
+extern ssize_t generic_splice_from_socket(struct file *file, struct socket *sock,
+				     loff_t __user *ppos, size_t count);
 
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 4b2ee8d12f5e..ff272e66e0ea 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -283,6 +283,7 @@ struct fsnotify_mark {
 	atomic_t refcnt;		/* active things looking at this mark */
 	struct fsnotify_group *group;	/* group this mark is for */
 	struct list_head g_list;	/* list of marks by group->i_fsnotify_marks */
+	struct list_head free_g_list;
 	spinlock_t lock;		/* protect group and inode */
 	union {
 		struct fsnotify_inode_mark i;
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e2ff97..f4101f01286b 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -154,6 +154,14 @@ static inline int irq_balancing_disabled(unsigned int irq)
 	return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
 }
 
+static inline int irq_is_percpu(unsigned int irq)
+{
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+	return desc->status_use_accessors & IRQ_PER_CPU;
+}
+
 static inline void
 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
 {
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index dba482e31a13..4a1b079de6de 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -11,6 +11,8 @@
 #ifndef __LINUX_MBUS_H
 #define __LINUX_MBUS_H
 
+struct resource;
+
 struct mbus_dram_target_info
 {
 	/*
@@ -59,14 +61,21 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
 }
 #endif
 
-int mvebu_mbus_add_window_remap_flags(const char *devname, phys_addr_t base,
-				      size_t size, phys_addr_t remap,
-				      unsigned int flags);
-int mvebu_mbus_add_window(const char *devname, phys_addr_t base,
-			  size_t size);
+int mvebu_mbus_save_cpu_target(u32 *store_addr);
+void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
+void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
+int mvebu_mbus_add_window_remap_by_id(unsigned int target,
+				      unsigned int attribute,
+				      phys_addr_t base, size_t size,
+				      phys_addr_t remap);
+int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
+				phys_addr_t base, size_t size);
 int mvebu_mbus_del_window(phys_addr_t base, size_t size);
 int mvebu_mbus_init(const char *soc, phys_addr_t mbus_phys_base,
 		    size_t mbus_size, phys_addr_t sdram_phys_base,
 		    size_t sdram_size);
+int mvebu_mbus_dt_init(bool is_coherent);
+int mvebu_mbus_get_addr_win_info(phys_addr_t phyaddr, u8 *trg_id, u8 *attr);
+int mvebu_mbus_win_addr_get(u8 target_id, u8 attribute, u32 *phy_base, u32 *size);
 
 #endif /* __LINUX_MBUS_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index cb358355ef43..28b0dc37a8c4 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -19,6 +19,8 @@
 #define APOLLO_MOUSE_MINOR	7
 #define PC110PAD_MINOR		9
 /*#define ADB_MOUSE_MINOR	10	FIXME OBSOLETE */
+#define CRYPTODEV_MINOR		70	/* OCF async crypto */
+#define CESADEV_MINOR		71	/* marvell CESA */
 #define WATCHDOG_MINOR		130	/* Watchdog timer     */
 #define TEMP_MINOR		131	/* Temperature Sensor */
 #define RTC_MINOR		135
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a9a48309f045..3f3b016ab326 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1712,6 +1712,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
 #define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
 #define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
 #define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
+#define FOLL_COW	0x4000	/* internal GUP flag */
 
 typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
 			void *data);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 20c2d6dd5d25..f27decb4980f 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -50,12 +50,31 @@ struct msi_desc {
 };
 
 /*
- * The arch hook for setup up msi irqs
+ * The arch hooks to setup up msi irqs. Those functions are
+ * implemented as weak symbols so that they /can/ be overriden by
+ * architecture specific code if needed.
  */
 int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
 void arch_teardown_msi_irq(unsigned int irq);
 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
 void arch_teardown_msi_irqs(struct pci_dev *dev);
 int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
+void arch_restore_msi_irqs(struct pci_dev *dev, int irq);
+
+void default_teardown_msi_irqs(struct pci_dev *dev);
+void default_restore_msi_irqs(struct pci_dev *dev, int irq);
+
+struct msi_chip {
+	struct module *owner;
+	struct device *dev;
+	struct device_node *of_node;
+	struct list_head list;
+
+	int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev,
+			 struct msi_desc *desc);
+	void (*teardown_irq)(struct msi_chip *chip, unsigned int irq);
+	int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
+			    int nvec, int type);
+};
 
 #endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 211ff67e8b0d..0cfd721f76a5 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -126,6 +126,11 @@ struct nand_bbt_descr {
  */
 #define NAND_BBT_DYNAMICSTRUCT	0x80000000
 
+#ifdef CONFIG_MTD_NAND_NFC_MLC_SUPPORT
+/* Search the bad block indicators according to Marvell's Naked symantics */
+#define NAND_BBT_SCANMVCUSTOM	0x10000000
+#endif
+
 /* The maximum number of blocks to scan for a bbt */
 #define NAND_BBT_SCAN_MAXBLOCKS	4
 
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index ab6363443ce8..327dee79e094 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -202,6 +202,10 @@ typedef enum {
 /* Keep gcc happy */
 struct nand_chip;
 
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS		(1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE	(1 << 7)
+
 /* ONFI timing mode, used in both asynchronous and synchronous mode */
 #define ONFI_TIMING_MODE_0		(1 << 0)
 #define ONFI_TIMING_MODE_1		(1 << 1)
@@ -224,7 +228,10 @@ struct nand_onfi_params {
 	__le16 revision;
 	__le16 features;
 	__le16 opt_cmd;
-	u8 reserved[22];
+	u8 reserved0[2];
+	__le16 ext_param_page_length; /* since ONFI 2.1 */
+	u8 num_of_param_pages;        /* since ONFI 2.1 */
+	u8 reserved1[17];
 
 	/* manufacturer information block */
 	char manufacturer[12];
@@ -281,6 +288,40 @@ struct nand_onfi_params {
 
 #define ONFI_CRC_BASE	0x4F4E
 
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+	u8 ecc_bits;
+	u8 codeword_size;
+	__le16 bb_per_lun;
+	__le16 block_endurance;
+	u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0	0	/* Unused section. */
+#define ONFI_SECTION_TYPE_1	1	/* for additional sections. */
+#define ONFI_SECTION_TYPE_2	2	/* for ECC information. */
+struct onfi_ext_section {
+	u8 type;
+	u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+	__le16 crc;
+	u8 sig[4];             /* 'E' 'P' 'P' 'S' */
+	u8 reserved0[10];
+	struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+	/*
+	 * The actual size of the Extended Parameter Page is in
+	 * @ext_param_page_length of nand_onfi_params{}.
+	 * The following are the variable length sections.
+	 * So we do not add any fields below. Please see the ONFI spec.
+	 */
+} __packed;
+
 /**
  * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
  * @lock:               protection lock
@@ -434,6 +475,12 @@ struct nand_buffers {
  *			bad block marker position; i.e., BBM == 11110111b is
  *			not bad when badblockbits == 7
  * @cellinfo:		[INTERN] MLC/multichip data from chip ident
+ * @ecc_strength_ds:	[INTERN] ECC correctability from the datasheet.
+ *			Minimum amount of bit errors per @ecc_step_ds guaranteed
+ *			to be correctable. If unknown, set to zero.
+ * @ecc_step_ds:	[INTERN] ECC step required by the @ecc_strength_ds,
+ *                      also from the datasheet. It is the recommended ECC step
+ *			size, if known; if unknown, set to zero.
  * @numchips:		[INTERN] number of physical chips
  * @chipsize:		[INTERN] the size of one chip for multichip arrays
  * @pagemask:		[INTERN] page number mask = number of (pages / chip) - 1
@@ -497,6 +544,11 @@ struct nand_chip {
 
 	int chip_delay;
 	unsigned int options;
+#ifdef CONFIG_MTD_NAND_NFC_MLC_SUPPORT
+	unsigned int	oobsize_ovrd;
+	unsigned int	bb_location;
+	unsigned int	bb_page;
+#endif
 	unsigned int bbt_options;
 
 	int page_shift;
@@ -510,6 +562,8 @@ struct nand_chip {
 	unsigned int pagebuf_bitflips;
 	int subpagesize;
 	uint8_t cellinfo;
+	uint16_t ecc_strength_ds;
+	uint16_t ecc_step_ds;
 	int badblockpos;
 	int badblockbits;
 
@@ -708,6 +762,12 @@ struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
 	return chip->priv;
 }
 
+/* return the supported features. */
+static inline int onfi_feature(struct nand_chip *chip)
+{
+	return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0;
+}
+
 /* return the supported asynchronous timing mode. */
 static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
 {
diff --git a/include/linux/mvebu-v7-cpuidle.h b/include/linux/mvebu-v7-cpuidle.h
new file mode 100644
index 000000000000..73f6d32c7618
--- /dev/null
+++ b/include/linux/mvebu-v7-cpuidle.h
@@ -0,0 +1,28 @@
+/*
+ * Marvell EBU cpuidle defintion
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#ifndef __LINUX_MVEBU_V7_CPUIDLE_H__
+#define __LINUX_MVEBU_V7_CPUIDLE_H__
+
+enum mvebu_v7_cpuidle_types {
+	CPUIDLE_ARMADA_XP,
+	CPUIDLE_ARMADA_370,
+	CPUIDLE_ARMADA_38X,
+};
+
+struct mvebu_v7_cpuidle {
+	int type;
+	int (*cpu_suspend)(unsigned long deepidle);
+};
+
+#endif
diff --git a/include/linux/of.h b/include/linux/of.h
index 1fd08ca23106..73ba597cd324 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -226,6 +226,17 @@ static inline int of_get_child_count(const struct device_node *np)
 	return num;
 }
 
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+	struct device_node *child;
+	int num = 0;
+
+	for_each_available_child_of_node(np, child)
+		num++;
+
+	return num;
+}
+
 extern struct device_node *of_find_node_with_property(
 	struct device_node *from, const char *prop_name);
 #define for_each_node_with_property(dn, prop_name) \
@@ -274,12 +285,16 @@ extern int of_n_size_cells(struct device_node *np);
 extern const struct of_device_id *of_match_node(
 	const struct of_device_id *matches, const struct device_node *node);
 extern int of_modalias_node(struct device_node *node, char *modalias, int len);
+extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
 extern struct device_node *of_parse_phandle(const struct device_node *np,
 					    const char *phandle_name,
 					    int index);
 extern int of_parse_phandle_with_args(const struct device_node *np,
 	const char *list_name, const char *cells_name, int index,
 	struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+	const char *list_name, int cells_count, int index,
+	struct of_phandle_args *out_args);
 extern int of_count_phandle_with_args(const struct device_node *np,
 	const char *list_name, const char *cells_name);
 
@@ -381,6 +396,11 @@ static inline int of_get_child_count(const struct device_node *np)
 	return 0;
 }
 
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+	return 0;
+}
+
 static inline int of_device_is_compatible(const struct device_node *device,
 					  const char *name)
 {
@@ -488,6 +508,13 @@ static inline int of_parse_phandle_with_args(struct device_node *np,
 	return -ENOSYS;
 }
 
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+	const char *list_name, int cells_count, int index,
+	struct of_phandle_args *out_args)
+{
+	return -ENOSYS;
+}
+
 static inline int of_count_phandle_with_args(struct device_node *np,
 					     const char *list_name,
 					     const char *cells_name)
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 0506eb53519b..4c2e6f26432c 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -4,6 +4,36 @@
 #include <linux/errno.h>
 #include <linux/of.h>
 
+struct of_pci_range_parser {
+	struct device_node *node;
+	const __be32 *range;
+	const __be32 *end;
+	int np;
+	int pna;
+};
+
+struct of_pci_range {
+	u32 pci_space;
+	u64 pci_addr;
+	u64 cpu_addr;
+	u64 size;
+	u32 flags;
+};
+
+#define for_each_of_pci_range(parser, range) \
+	for (; of_pci_range_parser_one(parser, range);)
+
+static inline void of_pci_range_to_resource(struct of_pci_range *range,
+					    struct device_node *np,
+					    struct resource *res)
+{
+	res->flags = range->flags;
+	res->start = range->cpu_addr;
+	res->end = range->cpu_addr + range->size - 1;
+	res->parent = res->child = res->sibling = NULL;
+	res->name = np->full_name;
+}
+
 #ifdef CONFIG_OF_ADDRESS
 extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
 extern bool of_can_translate_address(struct device_node *dev);
@@ -27,6 +57,11 @@ static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
 #define pci_address_to_pio pci_address_to_pio
 #endif
 
+extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+			struct device_node *node);
+extern struct of_pci_range *of_pci_range_parser_one(
+					struct of_pci_range_parser *parser,
+					struct of_pci_range *range);
 #else /* CONFIG_OF_ADDRESS */
 #ifndef of_address_to_resource
 static inline int of_address_to_resource(struct device_node *dev, int index,
@@ -53,6 +88,19 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
 {
 	return NULL;
 }
+
+static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
+			struct device_node *node)
+{
+	return -1;
+}
+
+static inline struct of_pci_range *of_pci_range_parser_one(
+					struct of_pci_range_parser *parser,
+					struct of_pci_range *range)
+{
+	return NULL;
+}
 #endif /* CONFIG_OF_ADDRESS */
 
 
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 535cecf1e02f..c0d6dfe80895 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -1,8 +1,6 @@
 #ifndef __OF_IRQ_H
 #define __OF_IRQ_H
 
-#if defined(CONFIG_OF)
-struct of_irq;
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
@@ -10,30 +8,6 @@ struct of_irq;
 #include <linux/ioport.h>
 #include <linux/of.h>
 
-/*
- * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
- * implements it differently.  However, the prototype is the same for all,
- * so declare it here regardless of the CONFIG_OF_IRQ setting.
- */
-extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
-
-#if defined(CONFIG_OF_IRQ)
-/**
- * of_irq - container for device_node/irq_specifier pair for an irq controller
- * @controller: pointer to interrupt controller device tree node
- * @size: size of interrupt specifier
- * @specifier: array of cells @size long specifing the specific interrupt
- *
- * This structure is returned when an interrupt is mapped. The controller
- * field needs to be put() after use
- */
-#define OF_MAX_IRQ_SPEC		4 /* We handle specifiers of at most 4 cells */
-struct of_irq {
-	struct device_node *controller; /* Interrupt controller node */
-	u32 size; /* Specifier size */
-	u32 specifier[OF_MAX_IRQ_SPEC]; /* Specifier copy */
-};
-
 typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
 
 /*
@@ -45,37 +19,38 @@ typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
 extern unsigned int of_irq_workarounds;
 extern struct device_node *of_irq_dflt_pic;
-extern int of_irq_map_oldworld(struct device_node *device, int index,
-			       struct of_irq *out_irq);
+extern int of_irq_parse_oldworld(struct device_node *device, int index,
+			       struct of_phandle_args *out_irq);
 #else /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
 #define of_irq_workarounds (0)
 #define of_irq_dflt_pic (NULL)
-static inline int of_irq_map_oldworld(struct device_node *device, int index,
-				      struct of_irq *out_irq)
+static inline int of_irq_parse_oldworld(struct device_node *device, int index,
+				      struct of_phandle_args *out_irq)
 {
 	return -EINVAL;
 }
 #endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
 
-
-extern int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
-			  u32 ointsize, const __be32 *addr,
-			  struct of_irq *out_irq);
-extern int of_irq_map_one(struct device_node *device, int index,
-			  struct of_irq *out_irq);
-extern unsigned int irq_create_of_mapping(struct device_node *controller,
-					  const u32 *intspec,
-					  unsigned int intsize);
+extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
+extern int of_irq_parse_one(struct device_node *device, int index,
+			  struct of_phandle_args *out_irq);
+extern unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data);
 extern int of_irq_to_resource(struct device_node *dev, int index,
 			      struct resource *r);
 extern int of_irq_count(struct device_node *dev);
 extern int of_irq_to_resource_table(struct device_node *dev,
 		struct resource *res, int nr_irqs);
-extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 extern void of_irq_init(const struct of_device_id *matches);
 
-#endif /* CONFIG_OF_IRQ */
+#if defined(CONFIG_OF)
+/*
+ * irq_of_parse_and_map() is used by all OF enabled platforms; but SPARC
+ * implements it differently.  However, the prototype is the same for all,
+ * so declare it here regardless of the CONFIG_OF_IRQ setting.
+ */
+extern unsigned int irq_of_parse_and_map(struct device_node *node, int index);
+extern struct device_node *of_irq_find_parent(struct device_node *child);
 
 #else /* !CONFIG_OF */
 static inline unsigned int irq_of_parse_and_map(struct device_node *dev,
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 8163107b94b4..bf6efea38a0c 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -57,4 +57,14 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 }
 #endif /* CONFIG_OF */
 
+#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
+extern int of_phy_register_fixed_link(struct device_node *np);
+#else
+static inline int of_phy_register_fixed_link(struct device_node *np)
+{
+	return -ENOSYS;
+}
+#endif
+
+
 #endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index bb115deb7612..f297237349e8 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -2,13 +2,27 @@
 #define __OF_PCI_H
 
 #include <linux/pci.h>
+#include <linux/msi.h>
 
 struct pci_dev;
-struct of_irq;
-int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
+struct of_phandle_args;
+int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq);
 
 struct device_node;
 struct device_node *of_pci_find_child_device(struct device_node *parent,
 					     unsigned int devfn);
+int of_pci_get_devfn(struct device_node *np);
+int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
+
+#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
+int of_pci_msi_chip_add(struct msi_chip *chip);
+void of_pci_msi_chip_remove(struct msi_chip *chip);
+struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node);
+#else
+static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; }
+static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { }
+static inline struct msi_chip *
+of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
+#endif
 
 #endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3a24e4ff3248..7ffc012a0d6b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -432,6 +432,7 @@ struct pci_bus {
 	struct resource busn_res;	/* bus numbers routed to this bus */
 
 	struct pci_ops	*ops;		/* configuration access functions */
+	struct msi_chip	*msi;		/* MSI controller */
 	void		*sysdata;	/* hook for sys-specific extension */
 	struct proc_dir_entry *procdir;	/* directory entry in /proc/bus/pci */
 
diff --git a/include/linux/platform_data/mtd-nand-pxa3xx.h b/include/linux/platform_data/mtd-nand-pxa3xx.h
index c42f39f20195..a94147124929 100644
--- a/include/linux/platform_data/mtd-nand-pxa3xx.h
+++ b/include/linux/platform_data/mtd-nand-pxa3xx.h
@@ -16,19 +16,6 @@ struct pxa3xx_nand_timing {
 	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
 };
 
-struct pxa3xx_nand_cmdset {
-	uint16_t	read1;
-	uint16_t	read2;
-	uint16_t	program;
-	uint16_t	read_status;
-	uint16_t	read_id;
-	uint16_t	erase;
-	uint16_t	reset;
-	uint16_t	lock;
-	uint16_t	unlock;
-	uint16_t	lock_status;
-};
-
 struct pxa3xx_nand_flash {
 	char		*name;
 	uint32_t	chip_id;
@@ -68,6 +55,9 @@ struct pxa3xx_nand_platform_data {
 	/* indicate how many chip selects will be used */
 	int	num_cs;
 
+	/* use an flash-based bad block table */
+	bool	flash_bbt;
+
 	const struct mtd_partition		*parts[NUM_CHIP_SELECT];
 	unsigned int				nr_parts[NUM_CHIP_SELECT];
 
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 27d3156d093a..fbaedfdb18d2 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -59,5 +59,7 @@ struct sdhci_pxa_platdata {
 struct sdhci_pxa {
 	u8	clk_enable;
 	u8	power_mode;
+	void	__iomem *sdio3_conf_reg;
+	void	__iomem *mbus_win_regs;
 };
 #endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index ffc444c38b0a..b481b8857c50 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -56,6 +56,8 @@ static inline int radix_tree_is_indirect_ptr(void *ptr)
 	return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
 }
 
+#define radixkey_t unsigned long long
+
 /*** radix-tree API starts here ***/
 
 #define RADIX_TREE_MAX_TAGS 3
@@ -216,42 +218,42 @@ static inline void radix_tree_replace_slot(void **pslot, void *item)
 	rcu_assign_pointer(*pslot, item);
 }
 
-int radix_tree_insert(struct radix_tree_root *, unsigned long, void *);
-void *radix_tree_lookup(struct radix_tree_root *, unsigned long);
-void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long);
-void *radix_tree_delete(struct radix_tree_root *, unsigned long);
+int radix_tree_insert(struct radix_tree_root *, radixkey_t, void *);
+void *radix_tree_lookup(struct radix_tree_root *, radixkey_t);
+void **radix_tree_lookup_slot(struct radix_tree_root *, radixkey_t);
+void *radix_tree_delete(struct radix_tree_root *, radixkey_t);
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
-			unsigned long first_index, unsigned int max_items);
+			radixkey_t first_index, unsigned int max_items);
 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
-			void ***results, unsigned long *indices,
-			unsigned long first_index, unsigned int max_items);
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
-				unsigned long index, unsigned long max_scan);
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
-				unsigned long index, unsigned long max_scan);
+			void ***results, radixkey_t *indices,
+			radixkey_t first_index, unsigned int max_items);
+radixkey_t radix_tree_next_hole(struct radix_tree_root *root,
+				radixkey_t index, unsigned long max_scan);
+radixkey_t radix_tree_prev_hole(struct radix_tree_root *root,
+				radixkey_t index, unsigned long max_scan);
 int radix_tree_preload(gfp_t gfp_mask);
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag);
+			radixkey_t index, unsigned int tag);
 void *radix_tree_tag_clear(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag);
+			radixkey_t index, unsigned int tag);
 int radix_tree_tag_get(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag);
+			radixkey_t index, unsigned int tag);
 unsigned int
 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
-		unsigned long first_index, unsigned int max_items,
+		radixkey_t first_index, unsigned int max_items,
 		unsigned int tag);
 unsigned int
 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
-		unsigned long first_index, unsigned int max_items,
+		radixkey_t first_index, unsigned int max_items,
 		unsigned int tag);
 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
-		unsigned long *first_indexp, unsigned long last_index,
+		radixkey_t *first_indexp, radixkey_t last_index,
 		unsigned long nr_to_tag,
 		unsigned int fromtag, unsigned int totag);
 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
+radixkey_t radix_tree_locate_item(struct radix_tree_root *root, void *item);
 
 static inline void radix_tree_preload_end(void)
 {
@@ -273,8 +275,8 @@ static inline void radix_tree_preload_end(void)
  * radix tree tag.
  */
 struct radix_tree_iter {
-	unsigned long	index;
-	unsigned long	next_index;
+	radixkey_t	index;
+	radixkey_t	next_index;
 	unsigned long	tags;
 };
 
@@ -290,7 +292,7 @@ struct radix_tree_iter {
  * Returns:	NULL
  */
 static __always_inline void **
-radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
+radix_tree_iter_init(struct radix_tree_iter *iter, radixkey_t start)
 {
 	/*
 	 * Leave iter->tags uninitialized. radix_tree_next_chunk() will fill it
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 6082247feab1..d595faf584df 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -3,11 +3,13 @@
 
 struct device;
 struct reset_control;
+struct device_node;
 
 int reset_control_reset(struct reset_control *rstc);
 int reset_control_assert(struct reset_control *rstc);
 int reset_control_deassert(struct reset_control *rstc);
 
+struct reset_control *of_reset_control_get(struct device_node *np, const char *id);
 struct reset_control *reset_control_get(struct device *dev, const char *id);
 void reset_control_put(struct reset_control *rstc);
 struct reset_control *devm_reset_control_get(struct device *dev, const char *id);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 478120ae34e5..dbd4a5e48c34 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -446,6 +446,12 @@ struct sk_buff {
 	__be16			protocol;
 
 	void			(*destructor)(struct sk_buff *skb);
+#ifdef CONFIG_NET_SKB_RECYCLE
+	int				(*skb_recycle) (struct sk_buff *skb);
+	__u32			hw_cookie;
+#endif /* CONFIG_NET_SKB_RECYCLE */
+
+
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	struct nf_conntrack	*nfct;
 #endif
@@ -650,6 +656,10 @@ static inline struct sk_buff *alloc_skb_head(gfp_t priority)
 }
 
 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+#ifdef CONFIG_NET_SKB_RECYCLE
+extern void skb_recycle(struct sk_buff *skb);
+extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
+#endif
 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
 extern struct sk_buff *skb_clone(struct sk_buff *skb,
 				 gfp_t priority);
@@ -2447,6 +2457,9 @@ extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
 					       int offset, struct iovec *to,
 					       int size);
+extern int	       skb_copy_datagram_to_kernel_iovec(const struct sk_buff *from,
+					       int offset, struct iovec *to,
+					       int size);
 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
 							int hlen,
 							struct iovec *iov);
@@ -2930,5 +2943,30 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
 			       skb_network_header(skb);
 	return hdr_len + skb_gso_transport_seglen(skb);
 }
+#ifdef CONFIG_NET_SKB_RECYCLE
+static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
+{
+	if (irqs_disabled())
+		return false;
+
+	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
+		return false;
+
+	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+		return false;
+
+	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+	if (skb_end_pointer(skb) - skb->head < skb_size)
+		return false;
+
+	if (skb_shared(skb) || skb_cloned(skb) || skb_has_frag_list(skb))
+		return false;
+
+	if (skb->head_frag)
+		return false;
+
+	return true;
+}
+#endif	/* CONFIG_NET_SKB_RECYCLE */
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b10ce4b341ea..cc1321d1e715 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -317,6 +317,7 @@ extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
 			     int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
+extern void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len);
 
 struct timespec;
 
diff --git a/include/linux/types.h b/include/linux/types.h
index 4d118ba11349..6acaf685d035 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -139,7 +139,7 @@ typedef unsigned long blkcnt_t;
  * can override it.
  */
 #ifndef pgoff_t
-#define pgoff_t unsigned long
+#define pgoff_t unsigned long long
 #endif
 
 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
old mode 100644
new mode 100755
index ffa2696d64dc..057ee48cd3b4
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -50,6 +50,9 @@ int	transport_subsystem_register(struct se_subsystem_api *);
 void	transport_subsystem_release(struct se_subsystem_api *);
 
 void	target_complete_cmd(struct se_cmd *, u8);
+#ifdef ALPHA_CUSTOMIZE /* 2014-09-12 Tim Tsay - apply transfer length patch from linux-3.10.54 */
+void	target_complete_cmd_with_length(struct se_cmd *, u8, int);
+#endif
 
 sense_reason_t	spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
 sense_reason_t	spc_emulate_report_luns(struct se_cmd *cmd);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
old mode 100644
new mode 100755
index 7d99c0b5b789..d4642acd6b2b
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -809,6 +809,9 @@ struct se_portal_group {
 	struct config_group	tpg_acl_group;
 	struct config_group	tpg_attrib_group;
 	struct config_group	tpg_param_group;
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	struct se_node_acl	*se_tpg_default_acl;
+#endif
 };
 
 struct se_wwn {
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
old mode 100644
new mode 100755
index 1dcce9cc99b9..b092fcb65313
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -81,6 +81,9 @@ struct target_core_fabric_ops {
 	struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
 				struct config_group *, const char *);
 	void (*fabric_drop_nodeacl)(struct se_node_acl *);
+#ifdef ALPHA_CUSTOMIZE //2011-10 Tim Tsay - support default policy
+	void (*copy_node_attributes)(struct se_node_acl *, struct se_node_acl *);
+#endif
 };
 
 struct se_session *transport_init_session(void);
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
old mode 100644
new mode 100755
index e0cecd2eabdc..d20eb6fd6cb9
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -9,8 +9,11 @@
 #ifndef _UAPI_LINUX_LOOP_H
 #define _UAPI_LINUX_LOOP_H
 
-
+#ifdef ALPHA_CUSTOMIZE
+#define LO_NAME_SIZE    128
+#else
 #define LO_NAME_SIZE	64
+#endif
 #define LO_KEY_SIZE	32
 
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 54a4d5223238..920fe9f4101f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -655,15 +655,14 @@ int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 }
 EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 
-unsigned int irq_create_of_mapping(struct device_node *controller,
-				   const u32 *intspec, unsigned int intsize)
+unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
 {
 	struct irq_domain *domain;
 	irq_hw_number_t hwirq;
 	unsigned int type = IRQ_TYPE_NONE;
 	unsigned int virq;
 
-	domain = controller ? irq_find_host(controller) : irq_default_domain;
+	domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain;
 	if (!domain) {
 #ifdef CONFIG_MIPS
 		/*
@@ -677,17 +676,17 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
 		if (intsize > 0)
 			return intspec[0];
 #endif
-		pr_warning("no irq domain found for %s !\n",
-			   of_node_full_name(controller));
+		pr_warn("no irq domain found for %s !\n",
+			of_node_full_name(irq_data->np));
 		return 0;
 	}
 
 	/* If domain has no translation, then we assume interrupt line */
 	if (domain->ops->xlate == NULL)
-		hwirq = intspec[0];
+		hwirq = irq_data->args[0];
 	else {
-		if (domain->ops->xlate(domain, controller, intspec, intsize,
-				     &hwirq, &type))
+		if (domain->ops->xlate(domain, irq_data->np, irq_data->args,
+					irq_data->args_count, &hwirq, &type))
 			return 0;
 	}
 
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index e7964296fd50..6ef6ce4e4ea1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -33,7 +33,6 @@
 #include <linux/bitops.h>
 #include <linux/rcupdate.h>
 
-
 #ifdef __KERNEL__
 #define RADIX_TREE_MAP_SHIFT	(CONFIG_BASE_SMALL ? 4 : 6)
 #else
@@ -57,7 +56,7 @@ struct radix_tree_node {
 	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];
 };
 
-#define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(unsigned long))
+#define RADIX_TREE_INDEX_BITS  (8 /* CHAR_BIT */ * sizeof(radixkey_t))
 #define RADIX_TREE_MAX_PATH (DIV_ROUND_UP(RADIX_TREE_INDEX_BITS, \
 					  RADIX_TREE_MAP_SHIFT))
 
@@ -65,7 +64,7 @@ struct radix_tree_node {
  * The height_to_maxindex array needs to be one deeper than the maximum
  * path as height 0 holds only 1 entry.
  */
-static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
+static radixkey_t height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
 
 /*
  * Radix tree node cache.
@@ -294,7 +293,7 @@ EXPORT_SYMBOL(radix_tree_preload);
  *	Return the maximum key which can be store into a
  *	radix tree with height HEIGHT.
  */
-static inline unsigned long radix_tree_maxindex(unsigned int height)
+static inline radixkey_t radix_tree_maxindex(unsigned int height)
 {
 	return height_to_maxindex[height];
 }
@@ -302,7 +301,7 @@ static inline unsigned long radix_tree_maxindex(unsigned int height)
 /*
  *	Extend a radix tree so it can store key @index.
  */
-static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
+static int radix_tree_extend(struct radix_tree_root *root, radixkey_t index)
 {
 	struct radix_tree_node *node;
 	struct radix_tree_node *slot;
@@ -345,6 +344,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index)
 		rcu_assign_pointer(root->rnode, node);
 		root->height = newheight;
 	} while (height > root->height);
+
 out:
 	return 0;
 }
@@ -358,7 +358,7 @@ out:
  *	Insert an item into the radix tree at position @index.
  */
 int radix_tree_insert(struct radix_tree_root *root,
-			unsigned long index, void *item)
+			radixkey_t index, void *item)
 {
 	struct radix_tree_node *node = NULL, *slot;
 	unsigned int height, shift;
@@ -384,7 +384,9 @@ int radix_tree_insert(struct radix_tree_root *root,
 		if (slot == NULL) {
 			/* Have to add a child node.  */
 			if (!(slot = radix_tree_node_alloc(root)))
+			{
 				return -ENOMEM;
+			}
 			slot->height = height;
 			slot->parent = node;
 			if (node) {
@@ -403,7 +405,9 @@ int radix_tree_insert(struct radix_tree_root *root,
 	}
 
 	if (slot != NULL)
+	{
 		return -EEXIST;
+	}	
 
 	if (node) {
 		node->count++;
@@ -425,7 +429,7 @@ EXPORT_SYMBOL(radix_tree_insert);
  * is_slot == 0 : search for the node.
  */
 static void *radix_tree_lookup_element(struct radix_tree_root *root,
-				unsigned long index, int is_slot)
+				radixkey_t index, int is_slot)
 {
 	unsigned int height, shift;
 	struct radix_tree_node *node, **slot;
@@ -474,7 +478,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root,
  *	exclusive from other writers. Any dereference of the slot must be done
  *	using radix_tree_deref_slot.
  */
-void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
+void **radix_tree_lookup_slot(struct radix_tree_root *root, radixkey_t index)
 {
 	return (void **)radix_tree_lookup_element(root, index, 1);
 }
@@ -492,7 +496,7 @@ EXPORT_SYMBOL(radix_tree_lookup_slot);
  *	them safely). No RCU barriers are required to access or modify the
  *	returned item, however.
  */
-void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_lookup(struct radix_tree_root *root, radixkey_t index)
 {
 	return radix_tree_lookup_element(root, index, 0);
 }
@@ -512,7 +516,7 @@ EXPORT_SYMBOL(radix_tree_lookup);
  *	item is a bug.
  */
 void *radix_tree_tag_set(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag)
+			radixkey_t index, unsigned int tag)
 {
 	unsigned int height, shift;
 	struct radix_tree_node *slot;
@@ -558,7 +562,7 @@ EXPORT_SYMBOL(radix_tree_tag_set);
  *	has the same return value and semantics as radix_tree_lookup().
  */
 void *radix_tree_tag_clear(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag)
+			radixkey_t index, unsigned int tag)
 {
 	struct radix_tree_node *node = NULL;
 	struct radix_tree_node *slot = NULL;
@@ -622,7 +626,7 @@ EXPORT_SYMBOL(radix_tree_tag_clear);
  * from concurrency.
  */
 int radix_tree_tag_get(struct radix_tree_root *root,
-			unsigned long index, unsigned int tag)
+			radixkey_t index, unsigned int tag)
 {
 	unsigned int height, shift;
 	struct radix_tree_node *node;
@@ -676,7 +680,8 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
 {
 	unsigned shift, tag = flags & RADIX_TREE_ITER_TAG_MASK;
 	struct radix_tree_node *rnode, *node;
-	unsigned long index, offset;
+        radixkey_t index;
+	unsigned long offset;
 
 	if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
 		return NULL;
@@ -807,7 +812,7 @@ EXPORT_SYMBOL(radix_tree_next_chunk);
  * be prepared to handle that.
  */
 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
-		unsigned long *first_indexp, unsigned long last_index,
+		radixkey_t *first_indexp, radixkey_t last_index,
 		unsigned long nr_to_tag,
 		unsigned int iftag, unsigned int settag)
 {
@@ -816,7 +821,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
 	struct radix_tree_node *slot;
 	unsigned int shift;
 	unsigned long tagged = 0;
-	unsigned long index = *first_indexp;
+	radixkey_t index = *first_indexp;
 
 	last_index = min(last_index, radix_tree_maxindex(height));
 	if (index > last_index)
@@ -837,7 +842,7 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
 	slot = indirect_to_ptr(root->rnode);
 
 	for (;;) {
-		unsigned long upindex;
+		radixkey_t upindex;
 		int offset;
 
 		offset = (index >> shift) & RADIX_TREE_MAP_MASK;
@@ -930,8 +935,8 @@ EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
  *	radix_tree_next_hole covering both indexes may return 10 if called
  *	under rcu_read_lock.
  */
-unsigned long radix_tree_next_hole(struct radix_tree_root *root,
-				unsigned long index, unsigned long max_scan)
+radixkey_t radix_tree_next_hole(struct radix_tree_root *root,
+				radixkey_t index, unsigned long max_scan)
 {
 	unsigned long i;
 
@@ -967,8 +972,8 @@ EXPORT_SYMBOL(radix_tree_next_hole);
  *	radix_tree_prev_hole covering both indexes may return 5 if called under
  *	rcu_read_lock.
  */
-unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
-				   unsigned long index, unsigned long max_scan)
+radixkey_t radix_tree_prev_hole(struct radix_tree_root *root,
+				   radixkey_t index, unsigned long max_scan)
 {
 	unsigned long i;
 
@@ -976,7 +981,7 @@ unsigned long radix_tree_prev_hole(struct radix_tree_root *root,
 		if (!radix_tree_lookup(root, index))
 			break;
 		index--;
-		if (index == ULONG_MAX)
+		if (index == (radixkey_t)(-1))
 			break;
 	}
 
@@ -1005,7 +1010,7 @@ EXPORT_SYMBOL(radix_tree_prev_hole);
  */
 unsigned int
 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
-			unsigned long first_index, unsigned int max_items)
+			radixkey_t first_index, unsigned int max_items)
 {
 	struct radix_tree_iter iter;
 	void **slot;
@@ -1046,8 +1051,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup);
  */
 unsigned int
 radix_tree_gang_lookup_slot(struct radix_tree_root *root,
-			void ***results, unsigned long *indices,
-			unsigned long first_index, unsigned int max_items)
+			void ***results, radixkey_t *indices,
+			radixkey_t first_index, unsigned int max_items)
 {
 	struct radix_tree_iter iter;
 	void **slot;
@@ -1083,7 +1088,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
  */
 unsigned int
 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
-		unsigned long first_index, unsigned int max_items,
+		radixkey_t first_index, unsigned int max_items,
 		unsigned int tag)
 {
 	struct radix_tree_iter iter;
@@ -1120,7 +1125,7 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
  */
 unsigned int
 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
-		unsigned long first_index, unsigned int max_items,
+		radixkey_t first_index, unsigned int max_items,
 		unsigned int tag)
 {
 	struct radix_tree_iter iter;
@@ -1146,8 +1151,8 @@ EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
 /*
  * This linear search is at present only useful to shmem_unuse_inode().
  */
-static unsigned long __locate(struct radix_tree_node *slot, void *item,
-			      unsigned long index, unsigned long *found_index)
+static radixkey_t __locate(struct radix_tree_node *slot, void *item,
+			      radixkey_t index, radixkey_t *found_index)
 {
 	unsigned int shift, height;
 	unsigned long i;
@@ -1160,10 +1165,10 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item,
 		for (;;) {
 			if (slot->slots[i] != NULL)
 				break;
-			index &= ~((1UL << shift) - 1);
-			index += 1UL << shift;
+			index &= ~(((radixkey_t)1 << shift) - 1);
+			index += (radixkey_t)1 << shift;
 			if (index == 0)
-				goto out;	/* 32-bit wraparound */
+				goto out;	/* key type wraparound */
 			i++;
 			if (i == RADIX_TREE_MAP_SIZE)
 				goto out;
@@ -1197,12 +1202,12 @@ out:
  *	Caller must hold no lock (since this time-consuming function needs
  *	to be preemptible), and must check afterwards if item is still there.
  */
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+radixkey_t radix_tree_locate_item(struct radix_tree_root *root, void *item)
 {
 	struct radix_tree_node *node;
-	unsigned long max_index;
-	unsigned long cur_index = 0;
-	unsigned long found_index = -1;
+	radixkey_t max_index;
+	radixkey_t cur_index = 0;
+	radixkey_t found_index = -1;
 
 	do {
 		rcu_read_lock();
@@ -1227,7 +1232,7 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
 	return found_index;
 }
 #else
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
+radixkey_t radix_tree_locate_item(struct radix_tree_root *root, void *item)
 {
 	return -1;
 }
@@ -1306,7 +1311,7 @@ static inline void radix_tree_shrink(struct radix_tree_root *root)
  *
  *	Returns the address of the deleted item, or NULL if it was not present.
  */
-void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
+void *radix_tree_delete(struct radix_tree_root *root, radixkey_t index)
 {
 	struct radix_tree_node *node = NULL;
 	struct radix_tree_node *slot = NULL;
@@ -1404,16 +1409,16 @@ radix_tree_node_ctor(void *node)
 	memset(node, 0, sizeof(struct radix_tree_node));
 }
 
-static __init unsigned long __maxindex(unsigned int height)
+static __init radixkey_t __maxindex(unsigned int height)
 {
 	unsigned int width = height * RADIX_TREE_MAP_SHIFT;
 	int shift = RADIX_TREE_INDEX_BITS - width;
 
 	if (shift < 0)
-		return ~0UL;
-	if (shift >= BITS_PER_LONG)
-		return 0UL;
-	return ~0UL >> shift;
+		return (radixkey_t)(-1);
+	if (shift >= RADIX_TREE_INDEX_BITS)
+		return 0;
+	return (radixkey_t)(-1) >> shift;
 }
 
 static __init void radix_tree_init_maxindex(void)
@@ -1421,7 +1426,9 @@ static __init void radix_tree_init_maxindex(void)
 	unsigned int i;
 
 	for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
+	{
 		height_to_maxindex[i] = __maxindex(i);
+	}
 }
 
 static int radix_tree_callback(struct notifier_block *nfb,
diff --git a/mkimage b/mkimage
new file mode 100755
index 0000000000000000000000000000000000000000..4f054e4afafeb22dcc085f6799cb6d140cc6b76f
GIT binary patch
literal 12796
zcmbVT2|!fk_WuS399bMJHBGczZ%|wyH1df`u?Ql$px}~AILr*p=qxmIN6|keHelQ?
zn3a`nK23f3UcH`ssHi9=YQ98sU&*D0u=JUkmM{Llb7$^2ki7T)|BGkN@}2LT^PTT}
z-(8)bHY#1AP%!xjV1Z2HURJ8&A$&i^(-O}(ree`7l67an5K&uv33LEb2XqR8ObH02
zHt_Ka&<=PIJOQ<lZ35|_zJQWw#t5iCi}18dvMRIzl;~IiAivZe0JH@9OVCEJ4?F?2
zlMjLnILUx-g5lr^<hHCUyuP6~V+|iERRkKe5y<W1&<^%@hiw0GvnlU!vp&XbvO0@m
z9k$qkp1uf88rz656QsTrC-va~WV;i9azpK}B{zJY_~N&R-(K`k_)p6eU-hfV3MKun
z0LqV)3ohU>kdo|^XD-zw+z=y)_dhcIm<K>FYsC+3#cyrJuW!Zs<_2;Ou(DNqY%Bit
zR(x$MKB*O7(2D=K6+g8VZ)?RbZN<OVivMdXUfYTv)QT@}#V=^Z_i4pH41UkMs7c_C
zOwHy2FRzyQZj9yT7FcZ7TnDeU^SQYUrJ|z}8N_l&j)hcjvgSG+20b$rnRw>l?N(jk
zTvq7h9n50U7P5T3me)GwT6HjFu-mP+T(eE5<xMs#)A1&Yf#pM2M;&>a(@a>Wbr^J(
zLXdiNU>2v9?BwR=7a<0o<<GI3cmva!Z4Lvo6&kEkAEO<H>?SLp4^59EOrgKc4gm%2
z9f3t_va%7QMy90Z4vdYXWCZyB{ONnrmXre83jER9AfO|{WD2Lg$2=^XwneE5#zLVy
z1|fxJ@a<5Dg@myvc*0mXMTC*~V!~J;C4|whlyE!7$_cBmR}gN`*kZyV*e3{wGPaa(
z81^*6h+sM44veiLtY&NtVFdXJ;f{>0C5%P2kuWx}D#BRA)r31URztW8>Vfb>j7<=(
zWV@~^-88JJ3u<Zq6yLbp+}vDtgb#9CA$9k_aNKU#m^^iVZS#|Q^lXkY;+Og~N72ZQ
zL^d{{TXPg;z({S4^%75+Fj5*DYb2gBVx%pjajnEtW{e#1%O##NWQ-<$vBXoRjB&)5
zNIYfCIGFfCiKomNlZc-o@svSh2Ju-EPnk4k5uYUSlu_ek;^QQqGHaYcJSXv#VIvKw
zF+$=g)5b#LnZ#4ZjYY&?{|$mD%Dl0J_y&om88DU;UoY`A6UN2F*GN3gh;b?LYbBm$
z#<-mL<q}UbWL!i1Vu`1jGOi`Q1UxVs@cnbb*RIRgvvS58v#_p>*WXd9#!WQ-QmItA
ze}&S`#Vrn+<4V2eZb{-QR4a2nwXFhYpcQMtENChRx$D;eSCb;)q@%~;O1>AXOj4|*
z#^z3C)G8?^oV2_522p<nOev>MudSTTDywlul4=Mt_lm_(F00`ms3IYV_H{}P$(rl;
z*ZK!U@rYf^&J=Y`_`(_5q{MHKn5AlJ@2UwS`P?6bdN_dEq=_ib{zBSiVM2p&NQ^7H
zRn$wU6;qY4-bpCdC<Ud){VYaYR--O`1zugXVd4{Ix8|M|(;9@~8dcqJjq)H;w1U<t
zDNlp4n0DDUm=&c8wN+TLOw3lI*kfF`72RrF4L4jhLxdbnn6LLTzurC3TN)kwxQ4R1
zdpvgG1VhEDW$rL&?4UXkuDka1pZZ*`G!EC*t}EDfL@@lG;p_fkeg3cJX4zI4f>td>
zWE4CF2-{2E!_+Q@5@-^+FN5|ZXu;L@ArR6^g%iT)lDf1CaLkohg#k0Z8-^k!;fVh*
zCXpc+Crx3P#ytWHUf5e!GdJ1YAEd}AQ{zcEToZvbhsnW9rY`xL3iFF83v^}LV#=Jr
zODUl%2X{A!CENAC%C>KTy07hD{B5sA1oC)q5<Of)mm>_uBS42qxq&mnVp=r}(ln5U
zyNn#vx^6c+likny+|F;yZO1?PO>%;t+ZRB)600ef>Qc(A>xJs4XFpJxl$21bcD)2a
z$<StA*Yw2ivK04$pUH3FSRP4Vm@FCG%>Ta2H2*Fe{JQ)Mx-vUSIO(cUB-A^1RVBf3
zQ<zkVO+gYTzEIt#4ne3Ym4TE95%qvcJoig;bHZ&t-JJu%Gq3YNQSMhLy#?7~a-;n-
z@wi_M4$!U_N*G@x?B4auBfGxqsi=1~1oo-t3wBZ=dA+Aq_<7GI?{)Bg{EqDXE~x*~
z`#SWNllBvZ@0nsDQsX{OV~E$dt-uTRQ2om*P1-R=346sPm3kjG4)M7#^%s@t9DGW}
z%Rv`LQ!z|8L+?uLEyz<48T8c8Bx<^XFrj87`V?sO<}r_Kodd2a3ANxJf@BDF5cb?E
zlCWkQgi$0+I|ymy)^{Z7R!G&7bRVQWwyq*6Bk3LTSFDK;Cmf{BMx3yZ7OFRbEl_jo
z;B_m4Wy17&VZzZ1mikjQC&Q~(DplvEX(CRB*LWzEFiooZ3e=AKar6<>BQS8!Q1+3b
zhb3VrpYS{hgQPK~_Z+6=Rl|bSW$(e!HuC0rF^TaVJ$qb@0!yfPVG?uRL@sVpzeLGc
z61HD+W#k7*=pal#xRW{xwdyZ~le^rV;5&kd5D;OAPK9yOx~!rID{rItU+fv|e5kDX
z#RtW-gWgDgBjbf=bk!-u;XBAdsH-;6b?L5R`oX4^u05mV3C4L3bG+c{68IFIDsg(4
z7`T%5R_q8|9n1hnQ$SKwjgRBv#pO;O_DA*0VIb9;YnnA>XFr5m&_>K8Z5@rb@$33Q
zQFiOaM_si_F?6eOxjaur81G6Ria5atm!Rj4#mqIHYTaW#9D7P5oq7~|d#s}#RPv1<
z$vtq|l--1`pwgjo=t!Lky`74wQyq1(%bnyNccmUwY!{HLl||kjnbhM=>X9V(m>=XW
zg#hD0K2#AHX60~AJY`!9q=LVBiz--ztYk2@HcA8<Fk>xtgN{gq@6`F|&-d1&0ko7d
z_=)uR!8E978iGDSR4~UHH{eHB`kCg|FPIzzlOqRHK*3<zyy5V1_X#D|Ka{u70VPFV
z+^2~STI+S%lq`EJ^LUg@aZP<)L3Bn`G<9$V(RQwA3Uce9AqG}vDTD{ro0Al!HR`g%
zI9GW}A%aRFuw-xye-;I-X;RX!>o<k7gV>oyn9!N(#~I}AL4Fd+KXX0|rD*T@f}eol
zXk0Z(G%{S<c+MjH(_ZONNcn-7oUkN}qD9s(W2N}A{_#)uhI$P&(o7q-E0NqYz6&gP
z&f}QW`8TCJ_HT-$N#Bh0P-_b6UO=nX+v+OT^k;L^-O1oRr-TAeOB<(z7U%m&zi?)N
z_Qs(ueH&g-VuLYSm6)UHPqT|DlCA(Af+DT}(%oPFD7(A~3aV|mY;hUz7epsd_V*O?
zaE;z|`{_AP-2V*5`Cmd<{Q9mV+ItD%jqrDu5eUh54Uy~#F`lj>(%ldGLQnX&p|`k{
z_yMYN=mRPA&Lx;NQ6+?k8A>5jDQ2jIOqG}sCS-<*84*HegqYDu$m}F$bQLnYiW!kY
zW~7+G37MRjp%F4QVn(!(87*e?7c%>c8F4~poR|?WWX6jbgN4k&V$v`nWti7TfO=oz
z#BM>o`8Lv&;h&~-zc^o^flg7Xzo<)*3X4jKO4K`{ps)$zP8JtHr9VMKo(1XjTI6r_
z`VH@7xoHikFmLgm<={*4H|Xz;cYIg%frQ<B&lcuZ`I}qeF&FRdOzRd!K$AsA1r+xl
zPh)?gCxb@TrAVtuE@gcSZGzH?O*rx6$MUFF(1b{asAfB9+#mlFMSVL77cGy1@~n>$
zprF%6v9`Emx(ii##*^TC1?|zU+e*H@>o(4_vuK~~LHlH7u_mtUjJotCL`AD~2rymR
zgpw~@i}Qhd?=2|rNnTn^2d;ZBrx$?m#P<;>iVtcn_wN?#Nb48cYS5HI>8c>EDI5{R
zQNDK259NlYo=)7-LP1)Vzy%E>-5ug{8|QT^PgzUfEz)uYs&ZQTz<1dhzN>V-QFaSe
zgnOpF!9yyaQ#5`DQhbu~BVw2G<pq~|KVHKaYmt|3$jcVm`Gp(8ZTD^DcyHj4N{6E9
z@D>VQxPfu?cQ-)Z)Jc+voh5rDJwVj}f~w>q47Rml<s)2a!|DMgPmWW`f15SXBGI>@
zDi3xT4P4%I$TLiU_uaeDl+r{%?pqc6evn*wH=cLFc|xf1$lEBxO#c(4(=UWApjS%!
zP7XHC?~>?Bv(-9$4*3*zNo%2w^$zMbQ6e$-Md|}W7VYQMk?1hc8X_V<L=(|Ty<f=a
zDt4yJm-4Jgu`^vRm1p%3J5%YGXY~|2)8$Zk)+1C`9E~?e!_jWys^MrN`e0bpl!PYV
za+KHa9tefql9-$+-CezB07@7VCoTRuN|AS3Ly7jT<*}eqG3oA!-^)2H2G#a7wy4<w
z6qolR!|9<B40D^v1D%7~+}7}1?%uZ6sKFh;V$Z$0=nNCnR6p0dUWjO?Ug##Z(GkK8
z_2!2M)Cw0Cd`pjacDYqgD7QD@@?O1pilPm9KpXPFHss1S<Uwu7gWHg|yPw>(Tjgq0
zx$d-6zdR66KE&w}t|pamde?V>mHcs)>ry)bk{2Qv&q+5l>dj$Y2JBLA-Y<=`l1A#<
ztw@*<;k@#5y>yxH|F8(OCwJ~CNX~*<!y~w;yc+3FZNl>1Mvv?&UG&?hZdY?aSu?L0
za7#>!z!a;*G<t}GJ7Ie8)4XH@X}R_)5`MFvckPLk993?HO?n18y;h9W$PcN&OV7(z
z(2E?L{rLyB5w*B->fPpgF_5kYZdU50hhP+&|MSy*Jy9SPSDTxk#j{efeZPa&JUwHk
zBV?p?w$^OYb5Ra1XKtZ^i{U)HRckRgIIZ1)_#D~-!w}MlF>@vhm=Ml)K$MvH6lXR$
zcuyN=G-&ZuoikbUZFY<F7+zK<3(2slGjcIwIUBefj+dOKaypx(kc-iBTD{)R#TYmP
zI4eg{a4~w0p77`9o6Lr(LwsaxZ0vMzIE?w?ArBOg!@x_P@E~8uJM9LJw{gA6b#FNu
zbU_rdE*|sS5v;X<bIf({28*O`bF|RT@o7t0@Wf7bX%H<mQ+=8=F1x|uz}pCqes7Yr
zP}cgC%{HxG$^bfe0!B7^bJ0daQ6K-X4Za=*D{r66725DxLh4{BypOuor|yYc$|}W8
zM2k@Lk|tiZc;}Lm&E?x|7OppuGy^U4i+q+RkIdqt4Mhf>lSgMOS7_Gi41MUCB#Uwk
z;ZiU#hiM+l=&>lrW1NYGXX6|et=Y_>PB?ULp-Y~;x<;)720c}o=giMXZQbiJoC3_2
z3}Zf{^jykZ9(9|@Ma2%x=VqnI)aaBDs*#aYy--6n5BAfI*Lzj<94UHZPWQyspW}_F
zN54c{oGA7@loaH?DdYNaQD!G~zW;cwHt)*vtRO$V(b@u&jvM2&<QeSu^7DKBzJ2?0
zleBg#iqtPPdC07H$?1#+-7JUG@@NRVzpl<^w>t}YgJ(fOjYa9Ou354k@7N0D>^MTC
zA*5=py?Ksae?f(ozrUE?lif#b$J%cpo<wibIO!!BhZP_j^31~+l;04Vw%EQfo{G*0
zjRYifFpm=LN+e?wth21PIaV(9-iq?cBvl_bHrq!gW5vr`0K@*{`4=7KKxvMm)rO(d
z@*Kj&6-C7jo*cr`q>0X=6$Q7lG}UJ1Q38%3jDj1RpHKS>s!ZxaqmnDsR}Zv=<($cl
z{zW9WdD4vckI?pHS>BRUvnDXDxzMO($<nXZZehveN3)R##mp5A9>h%ALGf&4GJY(k
zLI)c?GAmo6ARw9>6wjsLm1$w2j%C^A80=Z8%<)7*92=Q3n#(4_VPx4(?0gx_QHafv
zWoIRihYr!OA#t)1%b>xt*yz+jgX7|0z`GuiX)2IBhn|fEPRSn5tOlOgF$SKve7nIw
zY&tOU@wxHD0ud*D4ZudxHv!Bs+YXNt$Hy~+!&Jo5=*M9*Ywacn%l2ZE9m0|H<Ivey
zcIsHBH<-<AIQ>|4b~Yw;JTuR=7TFwZ6cD1DT{OpLpXFc^izZ39(1EtXY_vRYEn?%w
zOlHoQJe!SY6Jk;b*|GII*!Y~Z(b)`dqoJHUJ_p+JfIWqV>bL5(W}DT(vVA0%Ww#aB
zwH7wZV7D5~>=_Ap78QeBZ*t6H<C8}Z2lh%WPBU*}qv<y$9Xp;^oNuyQ=Fq7(T_zkl
zyQz?8*&aljqvtFI4Vm=zwNg+{t9;tQTl0c>rb6jQI>%D6LzoBQK+mtgnB^=|szxkU
zsup@N(f<up-ADiX`^f&2tw&zH=kI0EpZM{u`=)vA_xukrKIv_L3K%a;o&?a{?!P_W
zifcEgzqZ2<TgSD3r`sM&yH{_{EPQgcu5sm?`eg&l&vaN-b#`vg#$yfTr;fbcZ`9`Y
z+l$w|*e<5(+t+&RT&$1ytj7&*?aYklxBPfMc}M9BM^8W8borcR+@_;9>&m`zO<Zbz
zux8CH{k%JQ|GK*0@XFX}icY7@!8%={`j?-=Rh9cV>(J*SOrtI*&p2j(zTm^dmjj25
zII4W+UuEG(W^D>t{mYt7PnwsmPdUDS+nJ<0)t_84pB(6%b|LLh`0=yPCLaFylekm8
zhOqPFO2(bo*F{^ubJNX@kN#oLhKcXIx%H5G_4cY|RRPhe@}S)Hr^5DX8bjB;Tx@&1
z<0#|gk9%l$CC21$UYvY(cJT9O6xH03rF|ogT@%XIJ^#q2&B3cL@BGsfN2}Z)?r&EV
ze}}0Tt!e*O*QFi4-WEP{eoUf8^|o36nqr#no15ci&C1i7BTvQ|%Eqv~8{fYfZ2q6s
zitf8^s$QD3NBz^@Q)>-bjh{X6UDXb;xO~gq3q8)|jf^?n`N!g~Dh#8J{yJH^e)N~)
zHVxrf^~#HJ+m0r$zL0t1&6A0iJ%?BQdh_^KGaF5*cTZX4)5{C~R$n#av?E42{7R3&
zCvr!H{B?hE__<-v2W9;=IiPn$MCgu%T-g4oO~y$Z%4}TwqxoC@bXoglhdbw=Te|;L
zpU0Nguid!jMBPJ)8=hSdzA<*_wC$h$&AhdSb5(u6dBe_k!!B+PeD}yYK4HYM@BTjL
z$OoN!ol&~FoOL`9s=HJ=RKMh@nU?T1Q)bQ!J=Edy2hHu@==~`RS+}g+3&Y<~Uq7-^
z^|JAXqUUeBgH6*X<^B9cfuZF2LFR7X2G6q2?tb*QhJIfOQ|6tHIGA~^z;<EFojccd
zEK1$+*+a(`t)1asdHnc`Zya8+XZOj$<8NFT`_F=HhYKcE|M^_-rl+47v_9i>zwncz
zx`+JzR%YO!8S|9Mwby1`m~f$B&9z0=0dsel(knJ-zj)Y{U%mW@Eo#t3<M=n{gdGeV
z5xQ+lmw?9}?-ev*$Iz{xM~7}-|N4}T{gg8|O#7($#O?uy>NknYPWA8c>G`Q^ckAEl
zbwhXc{gpGFA#Yd${u12&{ig?YXeujcXMb=K3wkMY=bN48RbBkF-@5-9+<kM{dy9^|
z9=_w)mCvr7Eq?UEnb4z0zFL@a@#sHqZ#ZYPx=ugxk1jhFOzpMh*4J~^+UAV-tm`*Z
zRON*;)y)Tn2Io%;Rdj4#W_Z=~Y2NLxnrCU#4w<_YT?|?N!;$b4IWFax-5UZEw7m*G
z`nk)D`q3jyBhSpSK9xE1@TX^|96yyGdLilh(35}6{<QippD){Xe*U5LBO04G#YNuO
zzWtTmTMxv&u_61<D>rI_2GwtUBlyHW`b;{Xv$NpTA7ba_Z>Y-D?g{8_oW7)=t?wUq
zgl=51D6F>og`g>w*8*Y`-2;bm*d6FTDFRPyh5;7>8-bm`<-pR5U|_|(@k~*yQgjbf
zD$DT{BO0<uz(G7@q8mn*+(A_qkP;r0>Izz@d^s>6t2TLWa$WN0&?mhN(t%Cs+cjBN
zYV~~kLq4y-7CjG>58Ox6jk@$Xh%BQAQuKfkpX#ttWl9x3)nOHQ!;fcMwhDaDNA|N^
z_+|)rEqJ<4y#T-`K2kq?nj;-F<{9|ngYFLLVI_?dpY<?&s>7C)Dpf?60@HVM<d42l
zQ~~hZ%%+DNq1h-FJa)3hfd~12KIr>Ixzp<y%ZGsVfZc#2fQx_|fPjUF1MnDNARq-W
z9*_sH16~5W4)_qT9<Up51aJ{>10ehO|Kb^V;zeUM1KR<Vvm)SP0OfHZa0!6s?iJuy
z0W=RwfZqYoTr2}#4Zx>ZY#s0>00p)kcsBr_WU()RzXZ^n9RofCpn3ZmxDh~eehc_l
z0M&-{77{N6A^x{VAl~Lt{2hS104RPu|5il-DE|Jy@c@c{7;q|p;?D%m0#N)Dfu{l}
z{u#gq06CrsYzI*M^ML0ADE?C5asb6&0sI<(;(rVHT>#bXGT_w!d}hWz0p19pM_Sdu
zy8u*odw~xCs2;xrJ_ewgJPUjgKy~Q`{sGWdPHv)&#vg#!vEcw3KL^|!K;w@G9tNQC
zX98ydX#85>nE)EU19%>Q#=ii#0??K}sQ8W#p!Xqw{Qn(zBY^zx0X_imw?RJ1KDCp6
z4IXEXNKGBWMdPd3K3sh4ld%K1fpG)k;t~eNbJ62*dzXQ`Gf5s3--oeShmk&h)aJ2R
z+|?NDg)G);;|;ONDI;TeZGo37usUP&oF;svYtpk=;&A6}WU>0WR>xe62lIAM%WQ+4
zo@lgSa?xftm`UG56q<Q*h#Qbt-cW?!d@yLT(W3+wYcS^K<FeS0Yt*Bs%&}M<Z?ik#
z&Vy#+vYt$8EhZh@;S!yGVkO%iJ9!Rh<9gSEyDMbpzdNY@q`3!{zPScNH6tI?MhD5{
z+)Lj^qk*Fa?;-EYkAj!=j0^Bh9P~7@60ocn4;n9wZ8Vp(FIRZ=XpZS1J%XnI6eH=;
ze!UiY%K=n#K#~rvgLDAZD(TVwo&|>Xb@?D&I>rD{Q<7iWm*e1sVv+q)e**fJnD%m_
zX+PIPj|1)R7c83zptVhUrO+#d9>pWuqcPAv-~h;abpBX^20E{h9<6&>uNXANN`C14
zH3|xGpyh+~=$HeL^@^ca480P0kZ2=4f|vaC7{1G=$*J(0V2YKFa)4})&X*Mrd(Pid
zBYtGRi@bU?-WurDK#!x%%6o8L{1!0TBfSRtR=)TB>tL0io^lQLm}q>X3z>Y#em{kP
z?9m+4Lty&GpYkdnvfd^LWIg(5vb+twYCk>tHgYlaWE*5trgwp+xgfvN{~3VZKCcW&
z(o+E+0LXr);kscOwj0@otVjGIKRvp>m;pT_DR>XkAvg(;?KMEJ0eY(_ncjo*;`6}d
upW<7N{jjt@&cog|LY$Yr1WfkiS|UCC^BvSC1KE;~wNSb$Gr*)n$NvJ|VY+?*

literal 0
HcmV?d00001

diff --git a/mm/memory.c b/mm/memory.c
index 4b60011907d7..31cb8cf4cc83 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -699,7 +699,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
 	if (page)
 		dump_page(page);
 	printk(KERN_ALERT
-		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+		"addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%llx\n",
 		(void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
 	/*
 	 * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
@@ -1462,6 +1462,16 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(zap_vma_ptes);
 
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+	return pte_write(pte) ||
+		((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 /**
  * follow_page_mask - look up a page descriptor from a user-virtual address
  * @vma: vm_area_struct mapping @address
@@ -1569,7 +1579,7 @@ split_fallthrough:
 	}
 	if ((flags & FOLL_NUMA) && pte_numa(pte))
 		goto no_page;
-	if ((flags & FOLL_WRITE) && !pte_write(pte))
+	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags))
 		goto unlock;
 
 	page = vm_normal_page(vma, address, pte);
@@ -1876,7 +1886,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 				 */
 				if ((ret & VM_FAULT_WRITE) &&
 				    !(vma->vm_flags & VM_WRITE))
-					foll_flags &= ~FOLL_WRITE;
+					foll_flags |= FOLL_COW;
 
 				cond_resched();
 			}
diff --git a/mm/mmap.c b/mm/mmap.c
index 8f87b14c7968..f4059529cb6a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2008,7 +2008,15 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
 			vma_tmp = rb_entry(rb_node,
 					   struct vm_area_struct, vm_rb);
 
+#ifdef CONFIG_MV_LARGE_PAGE_SUPPORT
+			/* Take into account a wrap-around of the
+			** vm_end field to 0x0. e.g. vm_start =
+			** 0xFFFF0000 size PAGE_SIZE.
+			*/
+			if ((vma_tmp->vm_end - 1) >= addr) {
+#else
 			if (vma_tmp->vm_end > addr) {
+#endif
 				vma = vma_tmp;
 				if (vma_tmp->vm_start <= addr)
 					break;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0ab02fb8e9b1..eaaeb235ba98 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6251,7 +6251,7 @@ static void dump_page_flags(unsigned long flags)
 void dump_page(struct page *page)
 {
 	printk(KERN_ALERT
-	       "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
+	       "page:%p count:%d mapcount:%d mapping:%p index:%#llx\n",
 		page, atomic_read(&page->_count), page_mapcount(page),
 		page->mapping, page->index);
 	dump_page_flags(page->flags);
diff --git a/mm/percpu.c b/mm/percpu.c
index 8c8e08f3a692..9750cebe34de 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -228,7 +228,7 @@ static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 /* obtain pointer to a chunk from a page struct */
 static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 {
-	return (struct pcpu_chunk *)page->index;
+	return (struct pcpu_chunk *)(unsigned long)page->index;
 }
 
 static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b71423db7785..db90ce6e2ce7 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -128,6 +128,65 @@ out_noerr:
 	goto out;
 }
 
+/*
+ *	skb_copy_datagram_to_kernel_iovec - Copy a datagram to a kernel iovec structure.
+ *	@skb: buffer to copy
+ *	@offset: offset in the buffer to start copying from
+ *	@to: io vector to copy to
+ *	@len: amount of data to copy from buffer to iovec
+ *
+ *	Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_to_kernel_iovec(const struct sk_buff *skb, int offset,
+				      struct iovec *to, int len)
+{
+	int i, fraglen, end = 0;
+	struct sk_buff *next = skb_shinfo(skb)->frag_list;
+
+	if (!len)
+		return 0;
+
+next_skb:
+	fraglen = skb_headlen(skb);
+	i = -1;
+
+	while (1) {
+		int start = end;
+
+		if ((end += fraglen) > offset) {
+			int copy = end - offset;
+			int o = offset - start;
+
+			if (copy > len)
+				copy = len;
+			if (i == -1)
+				memcpy_tokerneliovec(to, skb->data + o, copy);
+			else {
+				skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+				struct page *page = frag->page.p;
+				void *p = kmap(page) + frag->page_offset + o;
+				memcpy_tokerneliovec(to, p, copy);
+				kunmap(page);
+			}
+
+			if (!(len -= copy))
+				return 0;
+			offset += copy;
+		}
+		if (++i >= skb_shinfo(skb)->nr_frags)
+			break;
+		fraglen = skb_shinfo(skb)->frags[i].size;
+	}
+	if (next) {
+		skb = next;
+		BUG_ON(skb_shinfo(skb)->frag_list);
+		next = skb->next;
+		goto next_skb;
+	}
+
+	return -EFAULT;
+}
+
 /**
  *	__skb_recv_datagram - Receive a datagram skbuff
  *	@sk: socket
diff --git a/net/core/dev.c b/net/core/dev.c
index a0e55ffc03c9..e6694a4d297c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3851,8 +3851,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
 		break;
 
 	case GRO_MERGED_FREE:
-		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+		if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
+#ifdef CONFIG_NET_SKB_RECYCLE
+			/* Workaround for the cases when recycle callback was not called */
+			if (skb->skb_recycle) {
+				/* Sign that skb is not available for recycle */
+				skb->hw_cookie |= BIT(0);
+				skb->skb_recycle(skb);
+			}
+#endif /* CONFIG_NET_SKB_RECYCLE */
 			kmem_cache_free(skbuff_head_cache, skb);
+		}
 		else
 			__kfree_skb(skb);
 		break;
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 9a31515fb8e3..6948b7f8a4ea 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -100,6 +100,26 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
 }
 EXPORT_SYMBOL(memcpy_toiovecend);
 
+/*
+ *	In kernel copy to iovec. Returns -EFAULT on error.
+ *
+ *	Note: this modifies the original iovec.
+ */
+void memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len)
+{
+	while (len > 0) {
+		if (iov->iov_len) {
+			int copy = min_t(unsigned int, iov->iov_len, len);
+			memcpy(iov->iov_base, kdata, copy);
+			len -= copy;
+			kdata += copy;
+			iov->iov_base += copy;
+			iov->iov_len -= copy;
+		}
+		iov++;
+	}
+}
+
 /*
  *	Copy iovec from kernel. Returns -EFAULT on error.
  */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 79143b7af7e5..aa0a9af550c6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -507,6 +507,14 @@ static void skb_release_data(struct sk_buff *skb)
 			skb_drop_fraglist(skb);
 
 		skb_free_head(skb);
+#ifdef CONFIG_NET_SKB_RECYCLE
+		/* Workaround for the cases when recycle callback was not called */
+		if (skb->skb_recycle) {
+			/* Sign that skb is not available for recycle */
+			skb->hw_cookie |= BIT(0);
+			skb->skb_recycle(skb);
+		}
+#endif /* CONFIG_NET_SKB_RECYCLE */
 	}
 }
 
@@ -588,6 +596,10 @@ static void skb_release_all(struct sk_buff *skb)
 
 void __kfree_skb(struct sk_buff *skb)
 {
+#ifdef CONFIG_NET_SKB_RECYCLE
+	if (skb->skb_recycle && !skb->skb_recycle(skb))
+		return;
+#endif /* CONFIG_NET_SKB_RECYCLE */
 	skb_release_all(skb);
 	kfree_skbmem(skb);
 }
@@ -665,6 +677,55 @@ void consume_skb(struct sk_buff *skb)
 }
 EXPORT_SYMBOL(consume_skb);
 
+#ifdef CONFIG_NET_SKB_RECYCLE
+/**
+ *	skb_recycle - clean up an skb for reuse
+ *	@skb: buffer
+ *
+ *	Recycles the skb to be reused as a receive buffer. This
+ *	function does any necessary reference count dropping, and
+ *	cleans up the skbuff as if it just came from __alloc_skb().
+ */
+void skb_recycle(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+
+	skb_release_head_state(skb);
+
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->data = skb->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(skb);
+}
+EXPORT_SYMBOL(skb_recycle);
+
+/**
+ *	skb_recycle_check - check if skb can be reused for receive
+ *	@skb: buffer
+ *	@skb_size: minimum receive buffer size
+ *
+ *	Checks that the skb passed in is not shared or cloned, and
+ *	that it is linear and its head portion at least as large as
+ *	skb_size so that it can be recycled as a receive buffer.
+ *	If these conditions are met, this function does any necessary
+ *	reference count dropping and cleans up the skbuff as if it
+ *	just came from __alloc_skb().
+ */
+bool skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+	if (!skb_is_recycleable(skb, skb_size))
+		return false;
+
+	skb_recycle(skb);
+
+	return true;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+#endif /* CONFIG_NET_SKB_RECYCLE */
+
 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 {
 	new->tstamp		= old->tstamp;
@@ -733,6 +794,11 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 	n->cloned = 1;
 	n->nohdr = 0;
 	n->destructor = NULL;
+#ifdef CONFIG_NET_SKB_RECYCLE
+	n->skb_recycle = NULL;
+	n->hw_cookie = 0;
+#endif /* CONFIG_NET_SKB_RECYCLE */
+
 	C(tail);
 	C(end);
 	C(head);
@@ -3388,6 +3454,15 @@ EXPORT_SYMBOL(__skb_warn_lro_forwarding);
 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
 {
 	if (head_stolen) {
+#ifdef CONFIG_NET_SKB_RECYCLE
+		/* Workaround for the cases when recycle callback was not called */
+		if (skb->skb_recycle) {
+			/* Sign that skb is not available for recycle */
+			skb->hw_cookie |= BIT(0);
+			skb->skb_recycle(skb);
+		}
+#endif /* CONFIG_NET_SKB_RECYCLE */
+
 		skb_release_head_state(skb);
 		kmem_cache_free(skbuff_head_cache, skb);
 	} else {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 39bdb14b3214..220bd644c4fd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@
 
 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 
-int sysctl_tcp_min_tso_segs __read_mostly = 2;
+int sysctl_tcp_min_tso_segs __read_mostly = 22;
 
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -1616,6 +1616,20 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 
 		if (skb)
 			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
+#ifdef CONFIG_SPLICE_NET_DMA_SUPPORT
+		if (msg->msg_flags & MSG_KERNSPACE) {
+			if ((available >= target) &&
+			    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
+			    !sysctl_tcp_low_latency &&
+			    dma_find_channel(DMA_MEMCPY)) {
+				preempt_enable_no_resched();
+				tp->ucopy.pinned_list =
+						dma_pin_kernel_iovec_pages(msg->msg_iov, len);
+			} else {
+				preempt_enable_no_resched();
+			}
+		}
+#else
 		if ((available < target) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    !sysctl_tcp_low_latency &&
@@ -1626,6 +1640,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		} else {
 			preempt_enable_no_resched();
 		}
+#endif
 	}
 #endif
 
@@ -1867,8 +1882,14 @@ do_prequeue:
 			} else
 #endif
 			{
-				err = skb_copy_datagram_iovec(skb, offset,
-						msg->msg_iov, used);
+#ifdef CONFIG_SPLICE_NET_DMA_SUPPORT
+				if (msg->msg_flags & MSG_KERNSPACE)
+					err = skb_copy_datagram_to_kernel_iovec(skb,
+							offset, msg->msg_iov, used);
+				else
+#endif
+					err = skb_copy_datagram_iovec(skb, offset,
+							msg->msg_iov, used);
 				if (err) {
 					/* Exception. Bailout! */
 					if (!copied)
@@ -1934,7 +1955,12 @@ skip_copy:
 	tp->ucopy.dma_chan = NULL;
 
 	if (tp->ucopy.pinned_list) {
-		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
+#ifdef CONFIG_SPLICE_NET_DMA_SUPPORT
+		if(msg->msg_flags & MSG_KERNSPACE)
+			dma_unpin_kernel_iovec_pages(tp->ucopy.pinned_list);
+		else
+#endif
+			dma_unpin_iovec_pages(tp->ucopy.pinned_list);
 		tp->ucopy.pinned_list = NULL;
 	}
 #endif
diff --git a/net/socket.c b/net/socket.c
index fc90b4f0da3c..39358556580e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1831,6 +1831,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
 	msg.msg_controllen = 0;
 	msg.msg_iovlen = 1;
 	msg.msg_iov = &iov;
+	msg.msg_flags = 0;
 	iov.iov_len = size;
 	iov.iov_base = ubuf;
 	/* Save some cycles and don't copy the address if not needed */
diff --git a/patch/ITR-99092.diff b/patch/ITR-99092.diff
new file mode 100755
index 000000000000..5164b2f5417e
--- /dev/null
+++ b/patch/ITR-99092.diff
@@ -0,0 +1,78 @@
+Index: linux/include/linux/fsnotify_backend.h
+===================================================================
+--- linux/include/linux/fsnotify_backend.h	(revision 16115)
++++ linux/include/linux/fsnotify_backend.h	(working copy)
+@@ -283,6 +283,7 @@
+ 	atomic_t refcnt;		/* active things looking at this mark */
+ 	struct fsnotify_group *group;	/* group this mark is for */
+ 	struct list_head g_list;	/* list of marks by group->i_fsnotify_marks */
++	struct list_head free_g_list;
+ 	spinlock_t lock;		/* protect group and inode */
+ 	union {
+ 		struct fsnotify_inode_mark i;
+Index: linux/fs/notify/inode_mark.c
+===================================================================
+--- linux/fs/notify/inode_mark.c	(revision 16115)
++++ linux/fs/notify/inode_mark.c	(working copy)
+@@ -288,20 +288,25 @@
+ 		spin_unlock(&inode->i_lock);
+ 
+ 		/* In case the dropping of a reference would nuke next_i. */
+-		if ((&next_i->i_sb_list != list) &&
+-		    atomic_read(&next_i->i_count)) {
++		while (&next_i->i_sb_list != list) {
+ 			spin_lock(&next_i->i_lock);
+-			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
++			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
++						atomic_read(&next_i->i_count)) {
+ 				__iget(next_i);
+ 				need_iput = next_i;
++				spin_unlock(&next_i->i_lock);
++				break;
+ 			}
+ 			spin_unlock(&next_i->i_lock);
++			next_i = list_entry(next_i->i_sb_list.next,
++						struct inode, i_sb_list);
+ 		}
+ 
+ 		/*
+-		 * We can safely drop inode_sb_list_lock here because we hold
+-		 * references on both inode and next_i.  Also no new inodes
+-		 * will be added since the umount has begun.
++		 * We can safely drop inode_sb_list_lock here because either
++		 * we actually hold references on both inode and next_i or
++		 * end of list.  Also no new inodes will be added since the
++		 * umount has begun.
+ 		 */
+ 		spin_unlock(&inode_sb_list_lock);
+ 
+Index: linux/fs/notify/mark.c
+===================================================================
+--- linux/fs/notify/mark.c	(revision 16115)
++++ linux/fs/notify/mark.c	(working copy)
+@@ -299,16 +299,22 @@
+ 					 unsigned int flags)
+ {
+ 	struct fsnotify_mark *lmark, *mark;
+-
++	LIST_HEAD(free_list);
++	
+ 	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
+ 	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+ 		if (mark->flags & flags) {
++		    list_add(&mark->free_g_list, &free_list);
++		    list_del_init(&mark->g_list);
+ 			fsnotify_get_mark(mark);
+-			fsnotify_destroy_mark_locked(mark, group);
+-			fsnotify_put_mark(mark);
+ 		}
+ 	}
+ 	mutex_unlock(&group->mark_mutex);
++	
++	list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
++	    fsnotify_destroy_mark(mark, group);
++	    fsnotify_put_mark(mark);
++	}
+ }
+ 
+ /*
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index b954de58304f..84a373feb372 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -33,7 +33,7 @@ my %ignore_type = ();
 my @ignore = ();
 my $help = 0;
 my $configuration_file = ".checkpatch.conf";
-my $max_line_length = 80;
+my $max_line_length = 120;
 
 sub help {
 	my ($exitcode) = @_;
@@ -1783,7 +1783,8 @@ sub process {
 		    $rawline !~ /^.\s*\*\s*\@$Ident\s/ &&
 		    !($line =~ /^\+\s*$logFunctions\s*\(\s*(?:(KERN_\S+\s*|[^"]*))?"[X\t]*"\s*(?:|,|\)\s*;)\s*$/ ||
 		    $line =~ /^\+\s*"[^"]*"\s*(?:\s*|,|\)\s*;)\s*$/) &&
-		    $length > $max_line_length)
+		    $length > $max_line_length &&
+		    !($realfile =~ /sysfs/))
 		{
 			WARN("LONG_LINE",
 			     "line over $max_line_length characters\n" . $herecurr);
@@ -2395,10 +2396,10 @@ sub process {
 # 			$clean = 0;
 # 		}
 
-		if ($line =~ /\bLINUX_VERSION_CODE\b/) {
-			WARN("LINUX_VERSION_CODE",
-			     "LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
-		}
+#		if ($line =~ /\bLINUX_VERSION_CODE\b/) {
+#			WARN("LINUX_VERSION_CODE",
+#			     "LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
+#		}
 
 # check for uses of printk_ratelimit
 		if ($line =~ /\bprintk_ratelimit\s*\(/) {
@@ -2934,19 +2935,6 @@ sub process {
 			}
 		}
 
-#CamelCase
-		while ($line =~ m{($Constant|$Lval)}g) {
-			my $var = $1;
-			if ($var !~ /$Constant/ &&
-			    $var =~ /[A-Z]\w*[a-z]|[a-z]\w*[A-Z]/ &&
-			    $var !~ /"^(?:Clear|Set|TestClear|TestSet|)Page[A-Z]/ &&
-			    !defined $camelcase{$var}) {
-				$camelcase{$var} = 1;
-				WARN("CAMELCASE",
-				     "Avoid CamelCase: <$var>\n" . $herecurr);
-			}
-		}
-
 #no spaces allowed after \ in define
 		if ($line=~/\#\s*define.*\\\s$/) {
 			WARN("WHITESPACE_AFTER_LINE_CONTINUATION",
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index f221ddf69080..a6e4ad6ddd2a 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -58,6 +58,9 @@ CONFIG_FLAGS=""
 if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
 if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
 UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
+if [ -n "$BASEVERSION" ] ; then UTS_VERSION="$UTS_VERSION $BASEVERSION"; fi
+if [ -n "$BUILDNO" ] ; then UTS_VERSION="$UTS_VERSION Build-$BUILDNO"; fi
+
 
 # Truncate to maximum length
 
diff --git a/scripts/mkuboot.sh b/scripts/mkuboot.sh
index 446739c7843a..640dfc5c98da 100755
--- a/scripts/mkuboot.sh
+++ b/scripts/mkuboot.sh
@@ -4,7 +4,8 @@
 # Build U-Boot image when `mkimage' tool is available.
 #
 
-MKIMAGE=$(type -path "${CROSS_COMPILE}mkimage")
+#MKIMAGE=$(type -path "${CROSS_COMPILE}mkimage")
+MKIMAGE=mkimage
 
 if [ -z "${MKIMAGE}" ]; then
 	MKIMAGE=$(type -path mkimage)
@@ -16,4 +17,5 @@ if [ -z "${MKIMAGE}" ]; then
 fi
 
 # Call "mkimage" to create U-Boot image
-${MKIMAGE} "$@"
+#${MKIMAGE} "$@"
+./${MKIMAGE} "$@"
diff --git a/tools/Makefile b/tools/Makefile
index 41067f304215..ae71c34a4b45 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -14,6 +14,7 @@ help:
 	@echo '  virtio     - vhost test module'
 	@echo '  net        - misc networking tools'
 	@echo '  vm         - misc vm tools'
+	@echo '  cesa       - cesa tools'
 	@echo '  x86_energy_perf_policy - Intel energy policy tool'
 	@echo ''
 	@echo 'You can do:'
@@ -35,7 +36,7 @@ help:
 cpupower: FORCE
 	$(call descend,power/$@)
 
-cgroup firewire guest usb virtio vm net: FORCE
+cgroup cesa firewire guest usb virtio vm net: FORCE
 	$(call descend,$@)
 
 liblk: FORCE
@@ -53,7 +54,7 @@ turbostat x86_energy_perf_policy: FORCE
 cpupower_install:
 	$(call descend,power/$(@:_install=),install)
 
-cgroup_install firewire_install lguest_install perf_install usb_install virtio_install vm_install net_install:
+cgroup_install cesa_install firewire_install lguest_install perf_install usb_install virtio_install vm_install net_install:
 	$(call descend,$(@:_install=),install)
 
 selftests_install:
@@ -62,14 +63,14 @@ selftests_install:
 turbostat_install x86_energy_perf_policy_install:
 	$(call descend,power/x86/$(@:_install=),install)
 
-install: cgroup_install cpupower_install firewire_install lguest_install \
+install: cgroup_install cesa_install cpupower_install firewire_install lguest_install \
 		perf_install selftests_install turbostat_install usb_install \
 		virtio_install vm_install net_install x86_energy_perf_policy_install
 
 cpupower_clean:
 	$(call descend,power/cpupower,clean)
 
-cgroup_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
+cgroup_clean cesa_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clean net_clean:
 	$(call descend,$(@:_clean=),clean)
 
 liblk_clean:
@@ -84,7 +85,7 @@ selftests_clean:
 turbostat_clean x86_energy_perf_policy_clean:
 	$(call descend,power/x86/$(@:_clean=),clean)
 
-clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \
+clean: cgroup_clean cesa_clean cpupower_clean firewire_clean lguest_clean perf_clean \
 		selftests_clean turbostat_clean usb_clean virtio_clean \
 		vm_clean net_clean x86_energy_perf_policy_clean
 
diff --git a/tools/cesa/Makefile b/tools/cesa/Makefile
new file mode 100644
index 000000000000..5be03de95d83
--- /dev/null
+++ b/tools/cesa/Makefile
@@ -0,0 +1,19 @@
+# Makefile for cesa tools
+
+srctree  := $(shell /bin/pwd)
+
+ifndef KSRC
+KSRC  := $(srctree)/../..
+endif
+
+CC = $(CROSS_COMPILE)gcc
+LD = $(CROSS_COMPILE)ld
+CFLAGS = -Wall -Wextra
+CFLAGS += -I$(KSRC)/drivers/crypto/mvebu_cesa/
+
+all: mv_cesa_tool
+%: %.c
+	$(CC) $(CFLAGS) -static mv_cesa_tool.c -o mv_cesa_tool
+
+clean:
+	$(RM) mv_cesa_tool
diff --git a/tools/cesa/mv_cesa_tool.c b/tools/cesa/mv_cesa_tool.c
new file mode 100644
index 000000000000..db3375a46246
--- /dev/null
+++ b/tools/cesa/mv_cesa_tool.c
@@ -0,0 +1,307 @@
+/*******************************************************************************
+Copyright (C) Marvell International Ltd. and its affiliates
+
+This software file (the "File") is owned and distributed by Marvell
+International Ltd. and/or its affiliates ("Marvell") under the following
+alternative licensing terms.  Once you have made an election to distribute the
+File under one of the following license alternatives, please (i) delete this
+introductory statement regarding license alternatives, (ii) delete the two
+license alternatives that you have not elected to use and (iii) preserve the
+Marvell copyright notice above.
+
+********************************************************************************
+Marvell Commercial License Option
+
+If you received this File from Marvell and you have entered into a commercial
+license agreement (a "Commercial License") with Marvell, the File is licensed
+to you under the terms of the applicable Commercial License.
+
+********************************************************************************
+Marvell GPL License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File in accordance with the terms and conditions of the General
+Public License Version 2, June 1991 (the "GPL License"), a copy of which is
+available along with the File in the license.txt file or by writing to the Free
+Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
+on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
+
+THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
+WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
+DISCLAIMED.  The GPL License provides additional details about this warranty
+disclaimer.
+********************************************************************************
+Marvell BSD License Option
+
+If you received this File from Marvell, you may opt to use, redistribute and/or
+modify this File under the following licensing terms.
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    *   Redistributions of source code must retain the above copyright notice,
+	this list of conditions and the following disclaimer.
+
+    *   Redistributions in binary form must reproduce the above copyright
+	notice, this list of conditions and the following disclaimer in the
+	documentation and/or other materials provided with the distribution.
+
+    *   Neither the name of Marvell nor the names of its contributors may be
+	used to endorse or promote products derived from this software without
+	specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+*******************************************************************************/
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/poll.h>
+
+#include "cesa_dev.h"
+
+
+
+void show_usage(int badarg)
+{
+        fprintf(stderr,
+                "Usage:                                                                                 \n"
+                " mv_cesa_tool -h                                                                       \n"
+                "   Display this help.                                                                  \n"
+                "                                                                                       \n"
+		" mv_cesa_tool -test  <test_mode> <iter> <req_size> <checkmode>				\n"
+		"	(can be used only in Test Mode)							\n"
+		"	test_mode: Multi, Size AES, DES, 3DES MD5 SHA1     				\n"
+		"	iter	 : number of iteration to run						\n"
+		"	req_size : size of the buffer to be encrypt/decrypt/HASING			\n"
+		"	checkmode: 0 - Fast verify 							\n"
+		"		   1 - Full verify						        \n"
+		"		   2 - without verify (for performence)					\n"
+		"											\n"
+		" mv_cesa_tool -test -s <iter> <req_size> <session_id> <data_id>			\n"
+		"       (can be used only in Test Mode)                                                 \n"
+		"	+++ for debug usage only +++							\n"
+		"	req_size  : size of the buffer to be encrypt/decrypt/HASING                     \n"
+		"	iter      : number of iteration to run 						\n"
+		"	session_id: which session to open out of the cesa_test session see in kernel.	\n"
+		"	data_id   : which data pattern to use out of the cesa_test session see in kernel\n"
+		"       checkmode : 0 - Fast verify                                                     \n"
+                "                   1 - Full verify                                                     \n"
+                "                   2 - without verify (for performence)                                \n"
+		"											\n"
+		" mv_cesa_tool -debug <debug_mode> [-i index] [-v] [-s size]				\n"
+		"	(Tst_req, Tst_stats and Tst_ses can be used only in Test Mode)			\n"
+		"	debug_mode: Sts       - display general status 					\n"
+		"		    Chan      - display channel index status 				\n"
+		"		    Q         - display SW Q 						\n"
+		"		    SA        - display SA index 					\n"
+		"		    SAD       - display entire SAD					\n"
+		"		    Sram - display SRAM contents					\n"
+		"		    Tst_req - display request index with buffer of size size		\n"
+		"                   Tst_stats - display test statistics                                 \n"
+		"		    Tst_ses - display session index					\n"
+		"	-i index  : index (only relevant for: Chan, SA, Tst_req and Tst_ses  \n"
+		"	-v        : verbose mode							\n"
+		"	-s size	  : buffer size (only relevant for Tst_req)				\n\n"
+	);
+
+	exit(badarg);
+}
+
+static void get_index(int argc, char **argv, CESA_DEBUG *cesa_debug)
+{
+	int j;
+	for(j = 3; j < argc; j++) {
+		if(!strcmp(argv[j], "-i")) {
+			j++;
+			break;
+		}
+	}
+	if(!(j < argc)) {
+		fprintf(stderr,"missing/ illegal index. \n");
+		exit(1);
+	}
+	cesa_debug->index = atoi(argv[j]);
+}
+
+static void parse_debug_cmdline(int argc, char **argv, CESA_DEBUG *cesa_debug)
+{
+	unsigned int i = 2,j;
+
+        cesa_debug->mode = 0;
+	for(j = i; j < argc; j++) {
+		if(!strcmp(argv[j], "-v"))
+			cesa_debug->mode++;
+	}
+
+	if(argc < 3) {
+                fprintf(stderr,"missing arguments\n");
+		exit(1);
+	}
+
+	if(!strcmp(argv[i], "Sts"))
+		cesa_debug->debug = STATUS;
+	else if(!strcmp(argv[i], "Chan")) {
+                cesa_debug->debug = CHAN;
+		get_index(argc, argv, cesa_debug);
+	}
+        else if(!strcmp(argv[i], "Q"))
+                cesa_debug->debug = QUEUE;
+        else if(!strcmp(argv[i], "SA")) {
+                cesa_debug->debug = SA;
+		get_index(argc, argv, cesa_debug);
+	}
+        else if(!strcmp(argv[i], "Cache_idx")) {
+                cesa_debug->debug = CACHE_IDX;
+		get_index(argc, argv, cesa_debug);
+	}
+        else if(!strcmp(argv[i], "Sram"))
+                cesa_debug->debug = SRAM;
+        else if(!strcmp(argv[i], "SAD"))
+                cesa_debug->debug = SAD;
+        else if(!strcmp(argv[i], "Tst_req")) {
+                cesa_debug->debug = TST_REQ;
+		for(j = i; j < argc; j++) {
+			if(!strcmp(argv[j], "-s")) {
+				j++;
+				break;
+			}
+		}
+		if(!(j < argc)) {
+			fprintf(stderr,"missing/illegal size\n");
+			exit(1);
+		}
+		cesa_debug->size = atoi(argv[j]);
+		get_index(argc, argv, cesa_debug);
+	}
+        else if(!strcmp(argv[i], "Tst_ses")) {
+                cesa_debug->debug = TST_SES;
+		get_index(argc, argv, cesa_debug);
+	}
+	else if(!strcmp(argv[i], "Tst_stats")) {
+		cesa_debug->debug = TST_STATS;
+        }
+	else{
+		fprintf(stderr,"illegal debug option\n");
+		exit(1);
+	}
+
+}
+
+static void parse_test_cmdline(int argc, char **argv, CESA_TEST *cesa_test)
+{
+        unsigned int i = 2;
+
+	if(argc < 6) {
+		fprintf(stderr,"missing arguments\n");
+		exit(1);
+	}
+
+	if(!strcmp(argv[i], "-s")) { /* single test */
+		i++;
+		if(argc != 8)
+			show_usage(1);
+		cesa_test->test = SINGLE;
+		cesa_test->iter = atoi(argv[i++]);
+		cesa_test->req_size = atoi(argv[i++]);
+		cesa_test->session_id = atoi(argv[i++]);
+		cesa_test->data_id = atoi(argv[i++]);
+                cesa_test->checkmode = atoi(argv[i++]);
+	}
+        else {
+		if(argc != 6)
+                        show_usage(1);
+		if (!strcmp(argv[i], "Multi"))
+			cesa_test->test = MULTI;
+                else if (!strcmp(argv[i], "Size"))
+                        cesa_test->test = SIZE;
+		else if(!strcmp(argv[i], "AES"))
+                        cesa_test->test = AES;
+                else if(!strcmp(argv[i], "DES"))
+                        cesa_test->test = DES;
+                else if(!strcmp(argv[i], "3DES"))
+                        cesa_test->test = TRI_DES;
+                else if(!strcmp(argv[i], "MD5"))
+                        cesa_test->test = MD5;
+                else if(!strcmp(argv[i], "SHA1"))
+                        cesa_test->test = SHA1;
+		else {
+			fprintf(stderr,"illegal test option\n");
+			exit(1);
+		}
+		i++;
+		cesa_test->iter = atoi(argv[i++]);
+		cesa_test->req_size = atoi(argv[i++]);
+		cesa_test->checkmode = atoi(argv[i++]);
+        }
+}
+
+
+
+int main(int argc, char *argv[])
+{
+        char *name = "/dev/cesa";
+        int fd, t, i, fdflags;
+        int rc = 0;
+	CESA_TEST	cesa_test;
+	CESA_DEBUG	cesa_debug;
+
+	memset(&cesa_test, 0, sizeof(CESA_TEST));
+	memset(&cesa_debug, 0, sizeof(CESA_DEBUG));
+
+        /* open the device */
+        fd = open(name, O_RDWR);
+        if (fd <= 0) {
+                printf("## Cannot open %s device.##\n",name);
+                exit(2);
+        }
+
+        /* set some flags */
+        fdflags = fcntl(fd,F_GETFL,0);
+        fdflags |= O_NONBLOCK;
+        fcntl(fd,F_SETFL,fdflags);
+
+        if(argc < 2) {
+                fprintf(stderr,"missing arguments\n");
+		exit(1);
+        }
+
+	i = 1;
+
+        if (!strcmp(argv[i], "-h")) {
+                show_usage(0);
+        }
+
+        else if (!strcmp(argv[i], "-test")) { /* test */
+		parse_test_cmdline(argc, argv, &cesa_test);
+		printf("test %d iter %d req_size %d checkmode %d sess_id %d data_id %d \n",
+			cesa_test.test, cesa_test.iter, cesa_test.req_size, cesa_test.checkmode,
+			cesa_test.session_id, cesa_test.data_id );
+
+		rc = ioctl(fd, CIOCTEST, &cesa_test);
+	}
+	else { /* debug */
+                parse_debug_cmdline(argc, argv, &cesa_debug);
+		printf("debug %d index %d mode %d size %d \n",
+			cesa_debug.debug, cesa_debug.index, cesa_debug.mode, cesa_debug.size);
+                rc = ioctl(fd, CIOCDEBUG, &cesa_debug);
+	}
+	if(rc < 0) printf("Cesa Tool failed to perform action!\n");
+
+	close(fd);
+
+	return 0;
+}
diff --git a/tools/nas/nas_init.sh b/tools/nas/nas_init.sh
new file mode 100755
index 000000000000..0c33468cb1f2
--- /dev/null
+++ b/tools/nas/nas_init.sh
@@ -0,0 +1,873 @@
+#!/bin/bash
+
+echo " * Version: 5.3"
+
+# LOG:
+# 5.3:
+#   1. enable adaptive coalecing for a338 & a375.
+# 5.2:
+#   1. fix -j option when not using the -p option.
+# 5.1:
+#   1. add mkfs.btrfs the -f flag
+# 5.0:
+#   1. remove btrfs disabling use of strict allocate in smb.conf
+# 4.9:
+#   1. set fdisk alignment to better support SSDs and new 4K sectors HDDS.
+#   2. remove default HDD_NUM set to 4. (each RAID has it's own default)
+# 4.8:
+#   1. setting tcp_limit_output_bytes on a38x.
+# 4.7:
+#   1. adding option none to -s parameters.
+# 4.6:
+#   1. a388 interrupt affinity.
+#   2. a375 optimizations (network apadtive coalescing).
+#   3. a375 interrupt affinity.
+#   4. RAID stripe cache size changed to 4K.
+# 4.4:
+#   1. initial support for a380, a385, a388 configurations.
+#   2. added support for 3 HDDs RD0 and RD5 setups.
+# 4.3:
+#   1. fixed wrong partnum usage.
+#   2. min receivefile size set to 16k.
+# 4.2:
+#   1. removed strip_cache_size on RD1 configuration (no such entity in RD1).
+#   2. fixing mdadm -S command.
+# 4.1:
+#   1. added support to encrypted single disk configuration (e.g. -t crypt_sd)
+#   2. added new option -j that create offset partition in disk (e.g. -j 150)
+# 4.0:
+#   1. complete redesign of the nas_init script.
+#   2. added new option -s that transfer SoC used, please see help.
+#   3. new functions to consolidate mkfs and mount utilities.
+#   4. removed old -b option, no support for rootfs copy.
+#   5. removed old -s option, SSD support was broken anyway.
+#   6. adding support for a370 adaptive coaleaing.
+# 3.6:
+#   1. fixing option u: will not start samba processes.
+#   2. remove the NONE option for FS.
+#   3. removing reboot command.
+# 3.5:
+#   1. moving raid-5 to 128 chunk size
+# 3.4:
+#   1. adding support for ubuntu systems by using -u param.
+#   2. when using ubuntu, users will have to stop and start samba manually,
+#      will be done automaticlly in later versions.
+#   3. fixing bonding setup flow as needed by newer kernels.(both 2 and 4 links)
+#   4. switching bond configuration to balance-xor (from balance-alb).
+#   5. checking if smbd and nmbd exist on the filesystem.
+# 3.3:
+#   1. adding support for 5 HDDs RAID5.
+#   2. reverting gso, tso to default state.
+#   3. RD5 changes:
+#      3.1. disk scheduler revert to deafult.
+#      3.2. dirty_ratio, dirty_background_ratio reverted to default.
+#      3.3. md read_ahead changed to 2048.
+#      3.4. disks read_ahead changed to 192.
+#   4. minor cosmetics changes.
+# 3.2:
+#   1. adding support for -l 0 to avoid network setup.
+#   2. updating smb.conf to improve single link performance.
+# 3.1:
+#   1. mount point updated.
+#   2. XOR affinity update.
+#   3. RAID-6 support.
+# 3.0:
+#   1. first all-in-one script to include: affinity, bonding, nas_init.
+#   2. PAGE_SIZE automatic set.
+#   3. LINK_NUM support to identify number of links.
+#   4. prints cleanup and better flow status report.
+#   5. Error prints added.
+#   6. Exit on error.
+# 2.5:
+#   1. XFS mount options updated.
+#   2. RAID1 chunk size updated.
+#   3. splice enabled for XFS.
+# 2.4:
+#   1. support RAID-1.
+#   2. splice enabled by default.
+# 2.3:
+#   1. fixing the mkdir share for loop.
+# 2.2:
+#   1. automatic size settings for PARTSIZE.
+# 2.1:
+#   1. setting coal to 100 in net queue 0 and fixing for in NETQ.
+
+PREPARE_HDD="no"
+MKFS="no"
+TOPOLOGY="sd"
+HDD_NUM=""
+PLATFORM=""
+FS="ext4"
+SYSDISKEXIST="no"
+CPU_COUNT=`grep -c ^processor /proc/cpuinfo`
+LARGE_PAGE=`/usr/bin/getconf PAGESIZE`
+PARTNUM="1"
+LINK_NUM=`dmesg |grep "Giga ports" |awk '{print $2}'`
+PARTSIZE="55GB"
+JUMPPARTSIZE="0"
+NETQ="0"
+SAMBASTATUS="enabled"
+MNT_DIR="/mnt/public"
+# Encryption variables
+CRYPTO_NAME="cryptovol1"
+KEY_SIZE="192"
+ALGORITHIM="aes"
+
+
+STAMP="[$(date +%H:%M-%d%b%Y)]:"
+echo -e "Script parameters has been updated! please read help!"
+
+function do_error_hold {
+    echo -e "\n*************************** Error ******************************\n"
+    echo -e "${STAMP} Error: ${1}"
+    echo -ne "Press Enter continue..."
+    echo -e "\n****************************************************************\n"
+    read TEMP
+    exit 2
+}
+
+function do_error {
+    echo -e "\n*************************** Error ******************************\n"
+    echo -e " ${STAMP}: ${1}"
+    echo -e " Please see help\n"
+    echo -e "\n****************************************************************\n"
+    exit 1
+}
+
+if [ "$CPU_COUNT" == "0" ]; then
+    CPU_COUNT="1"
+fi
+
+while getopts "l:ps:mzun:f:t:h:j:" flag; do
+    case "$flag" in
+	f)
+	    FS=$OPTARG
+	    case "$OPTARG" in
+		ext4|btrfs|xfs)	echo "Filesystem: ${OPTARG}" ;;
+		*)   do_error "-f: wrong option" ;;
+	    esac
+	    ;;
+	m)	MKFS="yes"
+	    ;;
+	z)	SYSDISKEXIST="yes"
+	    ;;
+	n)	PARTNUM="$OPTARG"
+	    ;;
+	p)	PREPARE_HDD="yes"
+	    ;;
+	l)	LINK_NUM=$OPTARG
+	    case "$OPTARG" in
+		0|1|2|4) ;;
+		*)	do_error "-l: wrong option" ;;
+	    esac
+	    ;;
+	t)	TOPOLOGY=$OPTARG
+	    case "$OPTARG" in
+		sd|rd0|rd1|rd5|rd6|crypt_sd) ;;
+		*)	do_error "-t: wrong option" ;;
+	    esac
+	    ;;
+	s)	PLATFORM=$OPTARG
+	    case "$OPTARG" in
+		a310|a310|axp|a370|a375|a380|a385|a388|none) ;;
+		*)	do_error "-s: wrong option" ;;
+	    esac
+	    ;;
+	h)	HDD_NUM=$OPTARG
+	    case "$OPTARG" in
+		2|3|4|5|8) ;;
+		*)	do_error "-h: wrong option" ;;
+	    esac
+	    ;;
+	u)      SAMBASTATUS="disabled"
+	    ;;
+	j)	JUMPPARTSIZE=$OPTARG
+		if [ $OPTARG -lt 1 ]; then
+			do_error "-j: wrong input"
+		fi
+		;;
+	*)	echo "Usage: $0"
+	    echo "           -s <a300|a310|axp|a370|a375|a380|a385|a388|none>: platform used with nas_init"
+	    echo "           -f <ext4|xfs|btrfs>: file system type ext4, xfs, btrfs or fat32"
+	    echo "           -t <sd|rd0|rd1|rd5|rd6>: drive topology"
+	    echo "           -n <num>: partition number to be mounted"
+	    echo "           -m create RAID and FD (mkfs/mdadm)"
+	    echo "           -p prepare drives (fdisk)"
+	    echo "           -h <num>: number of HDDs to use"
+	    echo "           -l <num>: number of links to use"
+	    echo "           -u adding this flag will disable SAMBA support"
+	    echo "           -j <num>: offset in disk for the new partition to be created (in GB)"
+	    exit 1
+	    ;;
+    esac
+done
+
+# verify supporting arch
+case "$PLATFORM" in
+    a300|a310|axp|a370|a375|a380|a385|a388|none)	;;
+    *)	do_error "Platform ${PLATFORM} unsupported, please pass valid -s option" ;;
+esac
+
+echo -ne "************** System info ***************\n"
+echo -ne "    Topology:      "
+case "$TOPOLOGY" in
+    sd)		echo -ne "Single drive\n" ;;
+    rd0)	echo -ne "RAID0\n" ;;
+    rd1)	echo -ne "RAID1\n" ;;
+    rd5)	echo -ne "RAID5\n" ;;
+    rd6)	echo -ne "RAID6\n" ;;
+    crypt_sd) 	echo -ne "Encrypted single drive\n" ;;
+    *)	do_error "Invalid drive topology" ;;
+esac
+
+echo -ne "    Filesystem:    $FS\n"
+echo -ne "    Platform:      $PLATFORM\n"
+echo -ne "    Cores:         $CPU_COUNT\n"
+echo -ne "    Links:         $LINK_NUM\n"
+echo -ne "    Page Size:     $LARGE_PAGE\n"
+echo -ne "    Disk mount:    $SYSDISKEXIST\n"
+echo -ne "    * Option above will used for the system configuration\n"
+echo -ne "      if you like to change them, please pass different parameters to the nas_init.sh\n"
+echo -ne "******************************************\n"
+
+if [ "$SAMBASTATUS" == "enabled" ]; then
+
+    [ ! -e "$(which smbd)" ] && do_error "SAMBA in not installed on your filesystem (aptitude install samba)"
+    [ ! -e "$(which nmbd)" ] && do_error "SAMBA in not installed on your filesystem (aptitude install samba)"
+
+    echo -ne " * Stopping SAMBA processes:   "
+    if [ "$(pidof smbd)" ]; then
+	killall smbd
+    fi
+
+    if [ "$(pidof nmbd)" ]; then
+	killall nmbd
+    fi
+    echo -ne "[Done]\n"
+    sleep 2
+
+    echo -ne " * Checking SAMBA is not running:  "
+    if [ `ps -ef |grep smb |grep -v grep |wc -l` != 0 ]; then
+	do_error "Unable to stop Samba processes, to stop them manually use -u flag"
+    fi
+    echo -ne "[Done]\n"
+fi
+
+mkdir -p $MNT_DIR
+chmod 777 $MNT_DIR
+
+echo -ne " * Unmounting $MNT_DIR:            "
+if [ `mount | grep $MNT_DIR | grep -v grep | wc -l` != 0 ]; then
+    umount $MNT_DIR
+fi
+
+sleep 2
+
+if [ `mount | grep mnt | grep -v grep | wc -l` != 0 ]; then
+    do_error "Unable to unmount $MNT_DIR"
+fi
+echo -ne "[Done]\n"
+
+# examine disk topology
+if [ "$TOPOLOGY" == "sd" -o "$TOPOLOGY" == "crypt_sd" ]; then
+    if [ "$SYSDISKEXIST" == "yes" ]; then
+	DRIVES="b"
+    else
+	DRIVES="a"
+    fi
+    PARTSIZE="55GB"
+elif [ "$TOPOLOGY" == "rd0" ]; then
+    if [ "$HDD_NUM" == "5" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e f"
+	else
+	    DRIVES="a b c d e"
+	fi
+	PARTSIZE="20GB"
+    elif [ "$HDD_NUM" == "4" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e"
+	else
+	    DRIVES="a b c d"
+	fi
+	PARTSIZE="20GB"
+    elif [ "$HDD_NUM" == "3" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d"
+	else
+	    DRIVES="a b c"
+	fi
+	PARTSIZE="25GB"
+    else # [ "$HDD_NUM" == "2" ] default
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c"
+	else
+	    DRIVES="a b"
+	fi
+	HDD_NUM=2
+	PARTSIZE="50GB"
+    fi
+    LEVEL=0
+elif [ "$TOPOLOGY" == "rd1" ]; then
+    if [ "$SYSDISKEXIST" == "yes" ]; then
+	DRIVES="b c"
+    else
+	DRIVES="a b"
+    fi
+    PARTSIZE="55GB"
+    HDD_NUM=2
+    LEVEL=1
+elif [ "$TOPOLOGY" == "rd5" ]; then
+    if [ "$HDD_NUM" == "8" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e f g h i"
+	else
+	    DRIVES="a b c d e f g h"
+	fi
+	PARTSIZE="10GB"
+    elif [ "$HDD_NUM" == "5" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e f"
+	else
+	    DRIVES="a b c d e"
+	fi
+	PARTSIZE="20GB"
+    elif [ "$HDD_NUM" == "3" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d"
+	else
+	    DRIVES="a b c"
+	fi
+	PARTSIZE="25GB"
+    else # [ "$HDD_NUM" == "4" ] default HDD_NUM
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e"
+	else
+	    DRIVES="a b c d"
+	fi
+	PARTSIZE="20GB"
+	HDD_NUM=4
+    fi
+    LEVEL=5
+elif [ "$TOPOLOGY" == "rd6" ]; then
+    if [ "$HDD_NUM" == "8" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e f g h i"
+	else
+	    DRIVES="a b c d e f g h"
+	fi
+	PARTSIZE="10GB"
+    elif [ "$HDD_NUM" == "5" ]; then
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e f"
+	else
+	    DRIVES="a b c d e"
+	fi
+	PARTSIZE="20GB"
+    else #[ "$HDD_NUM" == "4" ] default HDD_NUM
+	if [ "$SYSDISKEXIST" == "yes" ]; then
+	    DRIVES="b c d e"
+	else
+	    DRIVES="a b c d"
+	fi
+	PARTSIZE="20GB"
+	HDD_NUM=4
+    fi
+    LEVEL=6
+fi
+
+# create_fs function variables:
+# $1 filesystem type
+# $2 RAID topology
+# $3 device to use
+# $4 number of HDDs (optional, for RAID only)
+function create_fs {
+    STRIDE=32
+    STRIPE=0
+    HDD_NUM=$4
+    case "$1" in
+	ext4)	[ ! -e "$(which mkfs.ext4)" ] \
+	    && do_error "missing mkfs.ext4 in rootfs (aptitude install e2fsprogs)" ;;
+	xfs)	[ ! -e "$(which mkfs.xfs)" ] \
+	    && do_error "missing mkfs.xfs in rootfs (aptitude install xfsprogs)" ;;
+	btrfs)	[ ! -e "$(which mkfs.btrfs)" ] \
+	    && do_error "missing mkfs.btrfs in rootfs (aptitude install btrfs-tools)" ;;
+	*) do_error "no valid filesystem specified" ;;
+    esac
+
+    case "$1" in
+	ext4)
+	    case "$2" in
+		sd) STRIPE=0
+		    ;;
+		rd1) STRIPE=$STRIDE
+		    ;;
+		rd0) STRIPE=$((STRIDE * HDD_NUM))
+	            ;;
+		rd5) HDD_NUM=$((HDD_NUM - 1))
+		    STRIPE=$((STRIDE * HDD_NUM))
+		    ;;
+		rd6) HDD_NUM=$(($HDD_NUM - 2))
+		    STRIPE=$((STRIDE * HDD_NUM))
+		    ;;
+		*) do_error "unsupported topology $2\n"
+		    ;;
+	    esac
+	    if [ $STRIPE -gt 0 ]; then
+		E_OPTION="-E stride=$STRIDE,stripe-width=$STRIPE"
+	    else
+		E_OPTION=""
+	    fi
+	    # create the filesystem
+	    mkfs.ext4 -j -m 0 -O large_file,extent -b 4096 $E_OPTION -F $3
+	    ;;
+        xfs) mkfs.xfs -f $3
+	    ;;
+        btrfs) mkfs.btrfs -f $3
+	    ;;
+	*) do_error "unsupported filesystem $1\n"
+	    ;;
+    esac
+}
+
+# mount_fs function variables:
+# $1 filesystem type
+# $2 device to use
+# $3 mount dir
+function mount_fs {
+    if [ "$1" == "ext4" ]; then
+	mount -t ext4 $2 $3 -o noatime,data=writeback,barrier=0,nobh
+    elif [ "$1" == "xfs" ]; then
+	mount -t xfs $2 $3 -o noatime,nodirspread
+    elif [ "$1" == "btrfs" ]; then
+	mount -t btrfs $2 $3 -o noatime,thread_pool=$CPU_COUNT
+    else
+	do_error "unsupported filesystem $1\n"
+    fi
+
+    if [ `mount | grep $3 | grep -v grep | wc -l` == 0 ]; then
+	do_error "Failed to mount FS $1 from $2 to dir $3"
+    fi
+}
+
+# Checking drives existence
+#for drive in `echo $DRIVES`; do
+ #   if [ `fdisk -l |grep "Disk /dev/sd$drive" |grep -v grep |wc -l` == 0 ]; then
+#	do_error "Drives assigned (/dev/sd$drive) is not persent!"
+ #   fi
+#done
+
+[ ! -e "$(which mdadm)" ] && do_error "missing mdadm in rootfs (aptitude install mdadm)"
+
+if [ "$JUMPPARTSIZE" -ne "0" ]; then
+	PARTNUM="2"
+fi
+
+if [ "$PREPARE_HDD" == "yes" ]; then
+    echo -ne " * Preparing disks partitions: "
+    [ ! -e "$(which fdisk)" ] && do_error "missing fdisk in rootfs"
+
+    mdadm -S /dev/md*
+    sleep 2
+
+    for partition in `echo $DRIVES`; do mdadm --zero-superblock /dev/sd${partition}${PARTNUM}; done
+    sleep 2
+
+    set -o verbose
+
+    FDISK_LINE="o\nn\np\n1\n\n+$PARTSIZE\nt\n83\nw\n"
+    if [ "$JUMPPARTSIZE" -ne "0" ]; then
+	FDISK_LINE="o\nn\np\n1\n\n+${JUMPPARTSIZE}GB\nt\n83\nn\np\n2\n\n+$PARTSIZE\nt\n2\n83\nw\n"
+    fi
+
+    for drive in `echo $DRIVES`; \
+		do echo -e "${FDISK_LINE}" | fdisk -c -u /dev/sd${drive}; done
+    sleep 3
+    mdadm -S /dev/md*
+    sleep 2
+    set +o verbose
+
+    blockdev --rereadpt /dev/sd${drive} \
+    || do_error "The partition table has been altered, please reboot device and run nas_init again"
+    echo -ne "[Done]\n"
+fi
+
+echo -ne " * Stopping all MD devices:    "
+mdadm -S /dev/md*
+sleep 2
+echo -ne "[Done]\n"
+
+if [ "$TOPOLOGY" == "sd" ]; then
+    PARTITIONS="/dev/sd${DRIVES}${PARTNUM}"
+    echo -ne " * Starting single disk:       "
+    set -o verbose
+
+    echo -e 1024 > /sys/block/sd${DRIVES}/queue/read_ahead_kb
+
+    if [ "$MKFS" == "yes" ]; then
+	for partition in `echo $DRIVES`; do mdadm --zero-superblock /dev/sd${partition}${PARTNUM}; done
+	sleep 2
+	create_fs $FS $TOPOLOGY $PARTITIONS
+    fi
+
+    mount_fs $FS $PARTITIONS $MNT_DIR
+
+    set +o verbose
+    echo -ne "[Done]\n"
+elif [ "$TOPOLOGY" == "crypt_sd" ]; then
+    PARTITIONS="/dev/sd${DRIVES}${PARTNUM}"
+    CRYPTO_PARTITIONS="/dev/mapper/$CRYPTO_NAME"
+    echo -ne " * Starting encrypted single disk:       "
+    set -o verbose
+
+    echo -e 1024 > /sys/block/sd${DRIVES}/queue/read_ahead_kb
+
+    # create encryption key
+    dd if=/dev/urandom of=key bs=$KEY_SIZE count=1
+    cryptsetup -c $ALGORITHIM -d key -s $KEY_SIZE create $CRYPTO_NAME $PARTITIONS
+
+    if [ "$MKFS" == "yes" ]; then
+	create_fs $FS "sd" $CRYPTO_PARTITIONS
+    fi
+
+    mount_fs $FS $CRYPTO_PARTITIONS $MNT_DIR
+
+    set +o verbose
+    echo -ne "[Done]\n"
+else # RAID TOPOLOGY
+    [ ! -e "$(which mdadm)" ] && do_error "missing mdadm in rootfs (aptitude install mdadm)"
+
+    echo -ne " * Starting $TOPOLOGY build:       "
+    for drive in `echo $DRIVES`; do PARTITIONS="${PARTITIONS} /dev/sd${drive}${PARTNUM}"; done
+
+    set -o verbose
+
+    if [ "$MKFS" == "yes" ]; then
+	mdadm -S /dev/md*
+	sleep 2
+	for partition in `echo $DRIVES`; do mdadm --zero-superblock /dev/sd${partition}${PARTNUM}; done
+	sleep 2
+
+	echo "y" | mdadm --create -c 128 /dev/md0 --level=$LEVEL -n $HDD_NUM --force $PARTITIONS
+	sleep 2
+
+	if [ `cat /proc/mdstat  |grep md0 |wc -l` == 0 ]; then
+	    do_error "Unable to create RAID device"
+	fi
+
+	create_fs $FS $TOPOLOGY /dev/md0 $HDD_NUM
+
+    else
+	# need to reassemble the raid
+	mdadm --assemble /dev/md0 --force $PARTITIONS
+
+	if [ `cat /proc/mdstat  |grep md0 |wc -l` == 0 ]; then
+	    do_error "Unable to assemble RAID device"
+	fi
+    fi
+
+    mount_fs $FS /dev/md0 $MNT_DIR
+
+    if [ "$TOPOLOGY" != "rd1" ]; then
+       # no stripe_cache_size support in rd1
+	if [ "$LARGE_PAGE" == "65536" ]; then
+            echo 256 > /sys/block/md0/md/stripe_cache_size
+	else
+            echo 4096 > /sys/block/md0/md/stripe_cache_size
+	fi
+    fi
+
+    for drive in `echo $DRIVES`; do echo -e 192 > /sys/block/sd${drive}/queue/read_ahead_kb; done
+
+    echo -e 2048 > /sys/block/md0/queue/read_ahead_kb
+    echo 100000 > /sys/block/md0/md/sync_speed_min
+
+    set +o verbose
+    echo -ne "[Done]\n"
+
+fi
+
+sleep 2
+echo -ne " * Network setup:              "
+if [ "$LINK_NUM" == "2" ]; then
+    [ ! -e "$(which ifenslave)" ] && do_error "missing ifenslave in rootfs (aptitude install ifenslave)"
+    set -o verbose
+    ifconfig eth0 0.0.0.0 down
+    ifconfig eth1 0.0.0.0 down
+
+    ifconfig bond0 192.168.0.5 netmask 255.255.255.0 down
+    echo balance-xor > /sys/class/net/bond0/bonding/mode
+    ifconfig bond0 up
+    ifenslave bond0 eth0 eth1
+
+    set +o verbose
+    echo -ne "[Done]\n"
+elif [ "$LINK_NUM" == "4" ]; then
+    [ ! -e "$(which ifenslave)" ] && do_error "missing ifenslave in rootfs (aptitude install ifenslave)"
+    set -o verbose
+    ifconfig eth0 0.0.0.0 down
+    ifconfig eth1 0.0.0.0 down
+    ifconfig eth2 0.0.0.0 down
+    ifconfig eth3 0.0.0.0 down
+
+    ifconfig bond0 192.168.0.5 netmask 255.255.255.0 down
+    echo balance-xor > /sys/class/net/bond0/bonding/mode
+    ifconfig bond0 up
+    ifenslave bond0 eth0 eth1 eth2 eth3
+
+    set +o verbose
+    echo -ne "[Done]\n"
+elif [ "$LINK_NUM" == "1" ]; then
+    set -o verbose
+    ifconfig eth0 192.168.0.5 netmask 255.255.255.0 up
+    set +o verbose
+    echo -ne "[Done]\n"
+elif [ "$LINK_NUM" == "0" ]; then
+    echo -ne "[Skip]\n"
+fi
+
+echo -ne " * Network optimization:       "
+
+if [ "$PLATFORM" == "a300" ] || [ "$PLATFORM" == "a310" ]; then
+# KW section, LSP_5.1.3
+    [ ! -e "$(which mv_eth_tool)" ] && do_error "missing mv_eth_tool in rootfs"
+    set -o verbose
+    mv_eth_tool -txdone 8
+    mv_eth_tool -txen 0 1
+    mv_eth_tool -reuse 1
+    mv_eth_tool -recycle 1
+    mv_eth_tool -lro 0 1
+    mv_eth_tool -rxcoal 0 160
+    set +o verbose
+elif [ "$PLATFORM" == "axp" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+    set -o verbose
+    echo 1 > /sys/devices/platform/neta/gbe/skb
+    for (( j=0; j<$LINK_NUM; j++ )); do
+        # set RX coalecing to zero on all port 0 queues
+	for (( i=0; i<=$NETQ; i++ )); do
+	    echo "$j $i 100" > /sys/devices/platform/neta/gbe/rxq_time_coal
+	done
+
+	ethtool -k eth$j > /dev/null
+    done
+    set +o verbose
+elif [ "$PLATFORM" == "a370" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+    set -o verbose
+    ethtool -C eth0 pkt-rate-low 20000 pkt-rate-high 3000000 rx-frames 100 \
+	rx-usecs 1500 rx-usecs-high 1500 adaptive-rx on
+    set +o verbose
+elif [ "$PLATFORM" == "a375" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+    set -o verbose
+    for i in 0 1 ; do
+	ethtool -C  eth$i pkt-rate-low 20000 pkt-rate-high 3000000	\
+		    rx-usecs-low  60       rx-frames-low  32		\
+		    rx-usecs      80       rx-frames      32		\
+		    rx-usecs-high 1150     rx-frames-high 32  	adaptive-rx on
+    done
+    set +o verbose
+elif [ "$PLATFORM" == "a380" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+#    set -o verbose
+#    ethtool -C eth0 pkt-rate-low 20000 pkt-rate-high 3000000 rx-frames 100 \
+#	rx-usecs 1500 rx-usecs-high 1500 adaptive-rx on
+#    set +o verbose
+    echo 250000 > /proc/sys/net/ipv4/tcp_limit_output_bytes
+elif [ "$PLATFORM" == "a385" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+#    set -o verbose
+#    ethtool -C eth0 pkt-rate-low 20000 pkt-rate-high 3000000 rx-frames 100 \
+#	rx-usecs 1500 rx-usecs-high 1500 adaptive-rx on
+#    set +o verbose
+    echo 250000 > /proc/sys/net/ipv4/tcp_limit_output_bytes
+elif [ "$PLATFORM" == "a388" ]; then
+    [ ! -e "$(which ethtool)" ] && do_error "missing ethtool in rootfs"
+    set -o verbose
+    for i in 0 1 ; do
+	ethtool -C  eth$i pkt-rate-low 20000 pkt-rate-high 3000000	\
+		    rx-usecs-low  60       rx-frames-low  32		\
+		    rx-usecs      80       rx-frames      32		\
+		    rx-usecs-high 1150     rx-frames-high 32  	adaptive-rx on
+    done
+    set +o verbose
+    echo 250000 > /proc/sys/net/ipv4/tcp_limit_output_bytes
+fi
+
+echo -ne "[Done]\n"
+sleep 2
+
+chmod 777 /mnt/
+mkdir -p /mnt/usb
+chmod 777 /mnt/usb
+
+for (( i=0; i<4; i++ )); do
+    mkdir -p /mnt/public/share$i
+    chmod 777 /mnt/public/share$i
+done
+
+# Samba
+if [ "$SAMBASTATUS" == "enabled" ]; then
+
+    echo -ne " * Starting Samba daemons:     "
+    if [[ -e "$(which smbd)" && -e "$(which nmbd)" ]]; then
+	chmod 0755 /var/lock
+	rm -rf /etc/smb.conf
+	touch  /etc/smb.conf
+	echo '[global]' 				>>  /etc/smb.conf
+	echo ' netbios name = debian-armada'		>>  /etc/smb.conf
+	echo ' workgroup = WORKGROUP'			>>  /etc/smb.conf
+	echo ' server string = debian-armada'		>>  /etc/smb.conf
+	echo ' encrypt passwords = yes'			>>  /etc/smb.conf
+	echo ' security = user'				>>  /etc/smb.conf
+	echo ' map to guest = bad password'		>>  /etc/smb.conf
+	echo ' use mmap = yes'				>>  /etc/smb.conf
+	echo ' use sendfile = yes'			>>  /etc/smb.conf
+	echo ' dns proxy = no'				>>  /etc/smb.conf
+	echo ' max log size = 200'			>>  /etc/smb.conf
+	echo ' log level = 0'				>>  /etc/smb.conf
+	echo ' socket options = IPTOS_LOWDELAY TCP_NODELAY' >>  /etc/smb.conf
+	echo ' local master = no'			>>  /etc/smb.conf
+	echo ' dns proxy = no'				>>  /etc/smb.conf
+	echo ' ldap ssl = no'				>>  /etc/smb.conf
+	echo ' create mask = 0666'			>>  /etc/smb.conf
+	echo ' directory mask = 0777'			>>  /etc/smb.conf
+	echo ' show add printer wizard = No'		>>  /etc/smb.conf
+	echo ' printcap name = /dev/null'		>>  /etc/smb.conf
+	echo ' load printers = no'			>>  /etc/smb.conf
+	echo ' disable spoolss = Yes'			>>  /etc/smb.conf
+	echo ' max xmit = 131072'			>>  /etc/smb.conf
+	echo ' disable netbios = yes'			>>  /etc/smb.conf
+	echo ' csc policy = disable'			>>  /etc/smb.conf
+	echo ' strict allocate = yes'			>>  /etc/smb.conf
+	if [ "$FS" == "btrfs" ]; then
+		# crash identified with btrfs
+		echo '# min receivefile size = 16k'	>>  /etc/smb.conf
+	else
+		echo ' min receivefile size = 16k'	>>  /etc/smb.conf
+	fi
+	echo ''						>>  /etc/smb.conf
+	echo '[public]'					>>  /etc/smb.conf
+	echo ' comment = my public share'		>>  /etc/smb.conf
+	echo ' path = /mnt/public'			>>  /etc/smb.conf
+	echo ' writeable = yes'				>>  /etc/smb.conf
+	echo ' printable = no'				>>  /etc/smb.conf
+	echo ' public = yes'				>>  /etc/smb.conf
+	echo ''						>>  /etc/smb.conf
+	echo ''						>>  /etc/smb.conf
+	echo '[usb]'					>>  /etc/smb.conf
+	echo ' comment = usb share'			>>  /etc/smb.conf
+	echo ' path = /mnt/usb'				>>  /etc/smb.conf
+	echo ' writeable = yes'				>>  /etc/smb.conf
+	echo ' printable = no'				>>  /etc/smb.conf
+	echo ' public = yes'				>>  /etc/smb.conf
+	echo ''						>>  /etc/smb.conf
+
+	rm -rf /var/log/log.smbd
+	rm -rf /var/log/log.nmbd
+
+	$(which nmbd) -D -s /etc/smb.conf
+	$(which smbd) -D -s /etc/smb.conf
+
+	sleep 1
+	echo -ne "[Done]\n"
+    fi
+fi
+
+echo -ne " * Setting up affinity:        "
+if [ "$PLATFORM" == "axp" ]; then
+    if [ "$CPU_COUNT" = "4" ]; then
+	set -o verbose
+
+	# XOR Engines
+	echo 1 > /proc/irq/51/smp_affinity
+	echo 2 > /proc/irq/52/smp_affinity
+	echo 4 > /proc/irq/94/smp_affinity
+	echo 8 > /proc/irq/95/smp_affinity
+	# ETH
+	echo 1 > /proc/irq/8/smp_affinity
+	echo 2 > /proc/irq/10/smp_affinity
+	echo 4 > /proc/irq/12/smp_affinity
+	echo 8 > /proc/irq/14/smp_affinity
+	# SATA
+	echo 2 > /proc/irq/55/smp_affinity
+	# PCI-E SATA controller
+	echo 4 > /proc/irq/99/smp_affinity
+	echo 8 > /proc/irq/103/smp_affinity
+
+	set +o verbose
+    elif [ "$CPU_COUNT" == "2" ]; then
+	set -o verbose
+
+	# XOR Engines
+	echo 1 > /proc/irq/51/smp_affinity
+	echo 2 > /proc/irq/52/smp_affinity
+	echo 1 > /proc/irq/94/smp_affinity
+	echo 2 > /proc/irq/95/smp_affinity
+	# ETH
+	echo 1 > /proc/irq/8/smp_affinity
+	echo 2 > /proc/irq/10/smp_affinity
+	# SATA
+	echo 2 > /proc/irq/55/smp_affinity
+	# PCI-E SATA controller
+	echo 2  > /proc/irq/99/smp_affinity
+
+	set +o verbose
+    fi
+elif [ "$PLATFORM" == "a375" ]; then
+        set -o verbose
+
+        # XOR Engines
+        #echo 1 > /proc/irq/54/smp_affinity
+        #echo 2 > /proc/irq/55/smp_affinity
+        echo 2 > /proc/irq/97/smp_affinity
+        echo 2 > /proc/irq/98/smp_affinity
+        # SATA
+	echo 2 > /proc/irq/58/smp_affinity
+
+        set +o verbose
+elif [ "$PLATFORM" == "a385" ]; then
+	set -o verbose
+
+	# XOR Engines
+	echo 1 > /proc/irq/54/smp_affinity
+	echo 2 > /proc/irq/55/smp_affinity
+	echo 1 > /proc/irq/97/smp_affinity
+	echo 2 > /proc/irq/98/smp_affinity
+	# ETH
+	echo 1 > /proc/irq/363/smp_affinity
+	echo 2 > /proc/irq/365/smp_affinity
+	# SATA
+#	echo 2 > /proc/irq/61/smp_affinity
+	# PCI-E SATA controller
+	echo 2  > /proc/irq/61/smp_affinity
+
+	set +o verbose
+elif [ "$PLATFORM" == "a388" ]; then
+	set -o verbose
+
+	# XOR Engines
+	echo 1 > /proc/irq/54/smp_affinity
+	echo 1 > /proc/irq/55/smp_affinity
+	echo 2 > /proc/irq/97/smp_affinity
+	echo 2 > /proc/irq/98/smp_affinity
+	# ETH
+#	echo 1 > /proc/irq/363/smp_affinity
+#	echo 2 > /proc/irq/365/smp_affinity
+	# SATA
+	echo 2 > /proc/irq/60/smp_affinity
+	# PCI-E SATA controller
+	echo 2  > /proc/irq/61/smp_affinity
+
+	set +o verbose
+fi
+echo -ne "[Done]\n"
+
+case "$TOPOLOGY" in
+    rd1 | rd5 | rd6)
+		#watch "cat /proc/mdstat|grep finish"
+	;;
+esac
+
+echo -ne "\n==============================================\n"
diff --git a/xbuild.sh b/xbuild.sh
new file mode 100755
index 000000000000..e6b4626283b2
--- /dev/null
+++ b/xbuild.sh
@@ -0,0 +1,172 @@
+#!/bin/sh
+
+build()
+{
+  if [ -z "$ARCH" ] ; then
+    echo "do \"export ARCH=arm\" first."
+    exit 1
+  fi
+  RET=1
+  PREV_CROSS_COMPILE=$CROSS_COMPILE
+  PREV_PATH=$PATH
+  
+  export CROSS_COMPILE=arm-marvell-linux-gnueabi-
+  export PATH=/opt_gccarm/armv7-marvell-linux-gnueabi-softfp_i686_64K_Dev_20131002/bin:/bin/:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+  export KBUILD_BUILD_USER=kman
+  export KBUILD_BUILD_HOST=kmachine
+  export BASEVERSION=2014T30p5
+  export BUILDNO=git$(git rev-parse --verify --short HEAD)
+  if [ "$PROJECT_NAME" = "Yellowstone" ]; then
+    echo -e "\033[32m ***********************\033[0m"
+    echo -e "\033[32m *  build Yellowstone  *\033[0m"
+    echo -e "\033[32m ***********************\033[0m"
+    cp arch/arm/boot/dts/Yellowstone/* arch/arm/boot/dts/
+  elif [ "$PROJECT_NAME" = "Yosemite" ]; then
+    echo -e "\033[32m ***********************\033[0m"
+    echo -e "\033[32m *    build Yosemite   *\033[0m"
+    echo -e "\033[32m ***********************\033[0m"
+    cp arch/arm/boot/dts/Yosemite/* arch/arm/boot/dts/
+  else
+    echo "Not support"
+    exit 0
+  fi
+  
+  #make mrproper
+  #make mvebu_lsp_defconfig
+  
+  rm arch/arm/boot/zImage
+  
+	KCFLAGS="-DALPHA_CUSTOMIZE -D$PROJECT_NAME" make zImage
+
+	#make armada-388-rd.dtb
+	make dtbs
+	#cat arch/arm/boot/zImage arch/arm/boot/dts/armada-388-rd.dtb > arch/arm/boot/zImage_dtb
+	cat arch/arm/boot/zImage arch/arm/boot/dts/armada-385-db.dtb > arch/arm/boot/zImage_dtb
+	rm arch/arm/boot/zImage
+	mv arch/arm/boot/zImage_dtb arch/arm/boot/zImage
+	./scripts/mkuboot.sh -A arm -O linux -T kernel -C none -a 0x00008000 -e 0x00008000 -n 'Linux-388' -d arch/arm/boot/zImage arch/arm/boot/uImage
+	if [ $? = 0 ] ; then
+		KCFLAGS="-DALPHA_CUSTOMIZE" make modules
+		if [ $? = 0 ] ; then
+			RET=0
+		fi
+	fi
+	
+	# netatop
+	export KERNELDIR=`pwd`
+	make clean -C ../netatop-0.5
+	make all -C ../netatop-0.5 || exit 1
+
+	export CROSS_COMPILE=$PREV_CROSS_COMPILE
+	export PATH=$PREV_PATH
+  
+  rm arch/arm/boot/dts/armada-385-db.dts  
+  rm arch/arm/boot/dts/armada-385-rd.dts 
+  rm arch/arm/boot/dts/armada-388-rd.dts  
+  rm arch/arm/boot/dts/armada-38x.dtsi
+	exit $RET
+}
+
+install()
+{
+	cp -avf arch/arm/boot/uImage ${ROOTDIR}/merge/${PROJECT_NAME}/
+
+	if [ -n "${ROOT_FS}" ] ; then
+		# for iSCSI Target
+		cp -avf \
+			./drivers/target/iscsi/iscsi_target_mod.ko \
+			./drivers/target/target_core_mod.ko \
+			./drivers/target/target_core_file.ko \
+			./drivers/target/target_core_iblock.ko \
+			${ROOT_FS}/driver/
+
+		# for Virtual Volume
+		cp -avf \
+			./drivers/scsi/scsi_transport_iscsi.ko \
+			./drivers/scsi/iscsi_tcp.ko \
+			./drivers/scsi/libiscsi_tcp.ko \
+			./drivers/scsi/libiscsi.ko \
+			${ROOT_FS}/driver/
+
+		cp -avf drivers/net/bonding/bonding.ko   ${ROOT_FS}/driver/
+		cp -avf net/ipv4/tunnel4.ko              ${ROOT_FS}/driver/
+		cp -avf net/ipv6/ipv6.ko                 ${ROOT_FS}/driver/
+		cp -avf net/ipv6/sit.ko                  ${ROOT_FS}/driver/
+		cp -avf net/ipv6/xfrm6_mode_beet.ko      ${ROOT_FS}/driver/
+		cp -avf net/ipv6/xfrm6_mode_transport.ko ${ROOT_FS}/driver/
+		cp -avf net/ipv6/xfrm6_mode_tunnel.ko    ${ROOT_FS}/driver/
+		cp -avf net/ipv4/ipip.ko                 ${ROOT_FS}/driver/
+		cp -avf net/ipv6/tunnel6.ko              ${ROOT_FS}/driver/
+		cp -avf net/ipv6/ip6_tunnel.ko           ${ROOT_FS}/driver/
+		cp -avf drivers/net/tun.ko               ${ROOT_FS}/driver/
+		
+		                
+		mkdir -p ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/bsd_comp.ko			${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/ppp_async.ko        ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/ppp_deflate.ko      ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/ppp_generic.ko      ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/ppp_mppe.ko         ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/ppp_synctty.ko      ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/pppoe.ko            ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/ppp/pppox.ko            ${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf drivers/net/slip/slhc.ko			${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+		cp -avf lib/crc-ccitt.ko					${MODULE_DIR}/apkg/addons/${PROJECT_NAME}/VPN/lib/modules
+	
+        # Docker dependencies
+        cp -avf \
+            net/netfilter/nf_conntrack.ko \
+            net/netfilter/nf_nat.ko \
+            net/netfilter/x_tables.ko \
+            net/netfilter/xt_addrtype.ko \
+            net/netfilter/xt_conntrack.ko \
+            net/netfilter/xt_nat.ko \
+            net/netfilter/xt_tcpudp.ko \
+            net/ipv4/netfilter/ip_tables.ko \
+            net/ipv4/netfilter/nf_nat_ipv4.ko \
+            net/ipv4/netfilter/nf_defrag_ipv4.ko \
+            net/ipv4/netfilter/nf_conntrack_ipv4.ko \
+            net/ipv4/netfilter/iptable_nat.ko \
+            net/ipv4/netfilter/ipt_MASQUERADE.ko \
+            net/ipv4/netfilter/iptable_filter.ko \
+            net/llc/llc.ko \
+            net/802/stp.ko \
+            net/bridge/bridge.ko \
+            ${ROOT_FS}/driver/
+
+		# netatop
+		cp -avf ../netatop-0.5/module/netatop.ko ${ROOT_FS}/driver/
+			
+		# btrfs
+		cp -avf fs/btrfs/btrfs.ko ${ROOT_FS}/driver/
+
+		# iptable - ipv6 netfilter, recent and log module - for ssh brute force protection
+		cp -avf \
+			net/netfilter/xt_LOG.ko \
+			net/netfilter/xt_limit.ko \
+			net/netfilter/xt_recent.ko \
+			net/netfilter/xt_state.ko \
+			net/netfilter/xt_tcpmss.ko \
+			net/ipv6/netfilter/ip6_tables.ko \
+			net/ipv6/netfilter/ip6table_filter.ko \
+			net/ipv6/netfilter/nf_defrag_ipv6.ko \
+			net/ipv6/netfilter/nf_conntrack_ipv6.ko \
+			${ROOT_FS}/driver/
+
+	fi
+}
+
+clean()
+{
+	make clean
+}
+
+if [ "$1" = "build" ]; then
+	build
+elif [ "$1" = "install" ]; then
+	install
+elif [ "$1" = "clean" ]; then
+	clean
+else
+	echo "Usage : $0 build or $0 install or $0 clean"
+fi
diff --git a/xbuild_forGPL.sh b/xbuild_forGPL.sh
new file mode 100755
index 000000000000..b3d76e8656d8
--- /dev/null
+++ b/xbuild_forGPL.sh
@@ -0,0 +1,77 @@
+#!/bin/sh
+
+build()
+{
+  if [ -z "$ARCH" ] ; then
+    echo "do \"export ARCH=arm\" first."
+    exit 1
+  fi
+  RET=1
+  PREV_CROSS_COMPILE=$CROSS_COMPILE
+  PREV_PATH=$PATH
+  
+  export KBUILD_BUILD_USER=kman
+  export KBUILD_BUILD_HOST=kmachine
+  export BASEVERSION=2014T30p5
+  export BUILDNO=git$(git rev-parse --verify --short HEAD)
+  if [ "$PROJECT_NAME" = "WDMyCloudEX4100" ]; then
+    echo -e "\033[32m ***************************\033[0m"
+    echo -e "\033[32m *  build WDMyCloudEX4100  *\033[0m"
+    echo -e "\033[32m ***************************\033[0m"
+    cp arch/arm/boot/dts/Yellowstone/* arch/arm/boot/dts/
+  elif [ "$PROJECT_NAME" = "WDMyCloudEX2100" ]; then
+    echo -e "\033[32m ******************************\033[0m"
+    echo -e "\033[32m *    build WDMyCloudEX2100   *\033[0m"
+    echo -e "\033[32m ******************************\033[0m"
+    cp arch/arm/boot/dts/Yosemite/* arch/arm/boot/dts/
+  else
+    echo "Not support"
+    exit 0
+  fi
+  
+  #make mrproper
+  #make mvebu_lsp_defconfig
+  
+  rm arch/arm/boot/zImage
+	KCFLAGS="-DALPHA_CUSTOMIZE" make zImage
+	#make armada-388-rd.dtb
+	make dtbs
+	#cat arch/arm/boot/zImage arch/arm/boot/dts/armada-388-rd.dtb > arch/arm/boot/zImage_dtb
+	cat arch/arm/boot/zImage arch/arm/boot/dts/armada-385-db.dtb > arch/arm/boot/zImage_dtb
+	rm arch/arm/boot/zImage
+	mv arch/arm/boot/zImage_dtb arch/arm/boot/zImage
+	./scripts/mkuboot.sh -A arm -O linux -T kernel -C none -a 0x00008000 -e 0x00008000 -n 'Linux-388' -d arch/arm/boot/zImage arch/arm/boot/uImage
+	if [ $? = 0 ] ; then
+		KCFLAGS="-DALPHA_CUSTOMIZE" make modules
+		if [ $? = 0 ] ; then
+			RET=0
+		fi
+	fi
+
+	# netatop
+	export KERNELDIR=`pwd`
+	make clean -C ../netatop-0.5
+	make all -C ../netatop-0.5 || exit 1
+
+	export CROSS_COMPILE=$PREV_CROSS_COMPILE
+	export PATH=$PREV_PATH
+  
+  rm arch/arm/boot/dts/armada-385-db.dts  
+  rm arch/arm/boot/dts/armada-385-rd.dts 
+  rm arch/arm/boot/dts/armada-388-rd.dts  
+  rm arch/arm/boot/dts/armada-38x.dtsi
+	exit $RET
+}
+
+clean()
+{
+	make clean
+}
+
+if [ "$1" = "build" ]; then
+	build
+elif [ "$1" = "clean" ]; then
+	clean
+else
+	echo "Usage : $0 build or $0 clean"
+fi
-- 
2.23.0

